Merge "usb : dwc3: Initialize kernel stack variables properly"
diff --git a/AndroidKernel.mk b/AndroidKernel.mk
index 74f1b0d..3ab1fd8 100644
--- a/AndroidKernel.mk
+++ b/AndroidKernel.mk
@@ -6,6 +6,11 @@
 INSTALLED_KERNEL_TARGET := $(PRODUCT_OUT)/kernel
 endif
 
+ifneq ($(TARGET_KERNEL_APPEND_DTB), true)
+$(info Using DTB Image)
+INSTALLED_DTBIMAGE_TARGET := $(PRODUCT_OUT)/dtb.img
+endif
+
 TARGET_KERNEL_MAKE_ENV := $(strip $(TARGET_KERNEL_MAKE_ENV))
 ifeq ($(TARGET_KERNEL_MAKE_ENV),)
 KERNEL_MAKE_ENV :=
@@ -184,6 +189,10 @@
 			echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \
 			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(real_cc) oldconfig; fi
 
+# Creating a dtb.img once the kernel is compiled if TARGET_KERNEL_APPEND_DTB is set to be false
+$(INSTALLED_DTBIMAGE_TARGET): $(TARGET_PREBUILT_INT_KERNEL)
+	cat $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/dts/vendor/qcom/*.dtb > $@
+
 .PHONY: kerneltags
 kerneltags: $(KERNEL_OUT) $(KERNEL_CONFIG)
 	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(real_cc) tags
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 7331822..8718d4a 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -477,6 +477,7 @@
 		/sys/devices/system/cpu/vulnerabilities/spectre_v2
 		/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
 		/sys/devices/system/cpu/vulnerabilities/l1tf
+		/sys/devices/system/cpu/vulnerabilities/mds
 Date:		January 2018
 Contact:	Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:	Information about CPU vulnerabilities
@@ -489,8 +490,7 @@
 		"Vulnerable"	  CPU is affected and no mitigation in effect
 		"Mitigation: $M"  CPU is affected and mitigation $M is in effect
 
-		Details about the l1tf file can be found in
-		Documentation/admin-guide/l1tf.rst
+		See also: Documentation/admin-guide/hw-vuln/index.rst
 
 What:		/sys/devices/system/cpu/smt
 		/sys/devices/system/cpu/smt/active
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
new file mode 100644
index 0000000..ffc064c
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/index.rst
@@ -0,0 +1,13 @@
+========================
+Hardware vulnerabilities
+========================
+
+This section describes CPU vulnerabilities and provides an overview of the
+possible mitigations along with guidance for selecting mitigations if they
+are configurable at compile, boot or run time.
+
+.. toctree::
+   :maxdepth: 1
+
+   l1tf
+   mds
diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/hw-vuln/l1tf.rst
similarity index 99%
rename from Documentation/admin-guide/l1tf.rst
rename to Documentation/admin-guide/hw-vuln/l1tf.rst
index 9f5924f..31653a9 100644
--- a/Documentation/admin-guide/l1tf.rst
+++ b/Documentation/admin-guide/hw-vuln/l1tf.rst
@@ -445,6 +445,7 @@
 line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush
 module parameter is ignored and writes to the sysfs file are rejected.
 
+.. _mitigation_selection:
 
 Mitigation selection guide
 --------------------------
@@ -556,7 +557,7 @@
 the bare metal hypervisor, the nested hypervisor and the nested virtual
 machine.  VMENTER operations from the nested hypervisor into the nested
 guest will always be processed by the bare metal hypervisor. If KVM is the
-bare metal hypervisor it wiil:
+bare metal hypervisor it will:
 
  - Flush the L1D cache on every switch from the nested hypervisor to the
    nested virtual machine, so that the nested hypervisor's secrets are not
diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst
new file mode 100644
index 0000000..e3a796c
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/mds.rst
@@ -0,0 +1,308 @@
+MDS - Microarchitectural Data Sampling
+======================================
+
+Microarchitectural Data Sampling is a hardware vulnerability which allows
+unprivileged speculative access to data which is available in various CPU
+internal buffers.
+
+Affected processors
+-------------------
+
+This vulnerability affects a wide range of Intel processors. The
+vulnerability is not present on:
+
+   - Processors from AMD, Centaur and other non Intel vendors
+
+   - Older processor models, where the CPU family is < 6
+
+   - Some Atoms (Bonnell, Saltwell, Goldmont, GoldmontPlus)
+
+   - Intel processors which have the ARCH_CAP_MDS_NO bit set in the
+     IA32_ARCH_CAPABILITIES MSR.
+
+Whether a processor is affected or not can be read out from the MDS
+vulnerability file in sysfs. See :ref:`mds_sys_info`.
+
+Not all processors are affected by all variants of MDS, but the mitigation
+is identical for all of them so the kernel treats them as a single
+vulnerability.
+
+Related CVEs
+------------
+
+The following CVE entries are related to the MDS vulnerability:
+
+   ==============  =====  ===================================================
+   CVE-2018-12126  MSBDS  Microarchitectural Store Buffer Data Sampling
+   CVE-2018-12130  MFBDS  Microarchitectural Fill Buffer Data Sampling
+   CVE-2018-12127  MLPDS  Microarchitectural Load Port Data Sampling
+   CVE-2019-11091  MDSUM  Microarchitectural Data Sampling Uncacheable Memory
+   ==============  =====  ===================================================
+
+Problem
+-------
+
+When performing store, load, L1 refill operations, processors write data
+into temporary microarchitectural structures (buffers). The data in the
+buffer can be forwarded to load operations as an optimization.
+
+Under certain conditions, usually a fault/assist caused by a load
+operation, data unrelated to the load memory address can be speculatively
+forwarded from the buffers. Because the load operation causes a fault or
+assist and its result will be discarded, the forwarded data will not cause
+incorrect program execution or state changes. But a malicious operation
+may be able to forward this speculative data to a disclosure gadget which
+allows in turn to infer the value via a cache side channel attack.
+
+Because the buffers are potentially shared between Hyper-Threads cross
+Hyper-Thread attacks are possible.
+
+Deeper technical information is available in the MDS specific x86
+architecture section: :ref:`Documentation/x86/mds.rst <mds>`.
+
+
+Attack scenarios
+----------------
+
+Attacks against the MDS vulnerabilities can be mounted from malicious non
+priviledged user space applications running on hosts or guest. Malicious
+guest OSes can obviously mount attacks as well.
+
+Contrary to other speculation based vulnerabilities the MDS vulnerability
+does not allow the attacker to control the memory target address. As a
+consequence the attacks are purely sampling based, but as demonstrated with
+the TLBleed attack samples can be postprocessed successfully.
+
+Web-Browsers
+^^^^^^^^^^^^
+
+  It's unclear whether attacks through Web-Browsers are possible at
+  all. The exploitation through Java-Script is considered very unlikely,
+  but other widely used web technologies like Webassembly could possibly be
+  abused.
+
+
+.. _mds_sys_info:
+
+MDS system information
+-----------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current MDS
+status of the system: whether the system is vulnerable, and which
+mitigations are active. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/mds
+
+The possible values in this file are:
+
+  .. list-table::
+
+     * - 'Not affected'
+       - The processor is not vulnerable
+     * - 'Vulnerable'
+       - The processor is vulnerable, but no mitigation enabled
+     * - 'Vulnerable: Clear CPU buffers attempted, no microcode'
+       - The processor is vulnerable but microcode is not updated.
+
+         The mitigation is enabled on a best effort basis. See :ref:`vmwerv`
+     * - 'Mitigation: Clear CPU buffers'
+       - The processor is vulnerable and the CPU buffer clearing mitigation is
+         enabled.
+
+If the processor is vulnerable then the following information is appended
+to the above information:
+
+    ========================  ============================================
+    'SMT vulnerable'          SMT is enabled
+    'SMT mitigated'           SMT is enabled and mitigated
+    'SMT disabled'            SMT is disabled
+    'SMT Host state unknown'  Kernel runs in a VM, Host SMT state unknown
+    ========================  ============================================
+
+.. _vmwerv:
+
+Best effort mitigation mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+  If the processor is vulnerable, but the availability of the microcode based
+  mitigation mechanism is not advertised via CPUID the kernel selects a best
+  effort mitigation mode.  This mode invokes the mitigation instructions
+  without a guarantee that they clear the CPU buffers.
+
+  This is done to address virtualization scenarios where the host has the
+  microcode update applied, but the hypervisor is not yet updated to expose
+  the CPUID to the guest. If the host has updated microcode the protection
+  takes effect otherwise a few cpu cycles are wasted pointlessly.
+
+  The state in the mds sysfs file reflects this situation accordingly.
+
+
+Mitigation mechanism
+-------------------------
+
+The kernel detects the affected CPUs and the presence of the microcode
+which is required.
+
+If a CPU is affected and the microcode is available, then the kernel
+enables the mitigation by default. The mitigation can be controlled at boot
+time via a kernel command line option. See
+:ref:`mds_mitigation_control_command_line`.
+
+.. _cpu_buffer_clear:
+
+CPU buffer clearing
+^^^^^^^^^^^^^^^^^^^
+
+  The mitigation for MDS clears the affected CPU buffers on return to user
+  space and when entering a guest.
+
+  If SMT is enabled it also clears the buffers on idle entry when the CPU
+  is only affected by MSBDS and not any other MDS variant, because the
+  other variants cannot be protected against cross Hyper-Thread attacks.
+
+  For CPUs which are only affected by MSBDS the user space, guest and idle
+  transition mitigations are sufficient and SMT is not affected.
+
+.. _virt_mechanism:
+
+Virtualization mitigation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+  The protection for host to guest transition depends on the L1TF
+  vulnerability of the CPU:
+
+  - CPU is affected by L1TF:
+
+    If the L1D flush mitigation is enabled and up to date microcode is
+    available, the L1D flush mitigation is automatically protecting the
+    guest transition.
+
+    If the L1D flush mitigation is disabled then the MDS mitigation is
+    invoked explicit when the host MDS mitigation is enabled.
+
+    For details on L1TF and virtualization see:
+    :ref:`Documentation/admin-guide/hw-vuln//l1tf.rst <mitigation_control_kvm>`.
+
+  - CPU is not affected by L1TF:
+
+    CPU buffers are flushed before entering the guest when the host MDS
+    mitigation is enabled.
+
+  The resulting MDS protection matrix for the host to guest transition:
+
+  ============ ===== ============= ============ =================
+   L1TF         MDS   VMX-L1FLUSH   Host MDS     MDS-State
+
+   Don't care   No    Don't care    N/A          Not affected
+
+   Yes          Yes   Disabled      Off          Vulnerable
+
+   Yes          Yes   Disabled      Full         Mitigated
+
+   Yes          Yes   Enabled       Don't care   Mitigated
+
+   No           Yes   N/A           Off          Vulnerable
+
+   No           Yes   N/A           Full         Mitigated
+  ============ ===== ============= ============ =================
+
+  This only covers the host to guest transition, i.e. prevents leakage from
+  host to guest, but does not protect the guest internally. Guests need to
+  have their own protections.
+
+.. _xeon_phi:
+
+XEON PHI specific considerations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+  The XEON PHI processor family is affected by MSBDS which can be exploited
+  cross Hyper-Threads when entering idle states. Some XEON PHI variants allow
+  to use MWAIT in user space (Ring 3) which opens an potential attack vector
+  for malicious user space. The exposure can be disabled on the kernel
+  command line with the 'ring3mwait=disable' command line option.
+
+  XEON PHI is not affected by the other MDS variants and MSBDS is mitigated
+  before the CPU enters a idle state. As XEON PHI is not affected by L1TF
+  either disabling SMT is not required for full protection.
+
+.. _mds_smt_control:
+
+SMT control
+^^^^^^^^^^^
+
+  All MDS variants except MSBDS can be attacked cross Hyper-Threads. That
+  means on CPUs which are affected by MFBDS or MLPDS it is necessary to
+  disable SMT for full protection. These are most of the affected CPUs; the
+  exception is XEON PHI, see :ref:`xeon_phi`.
+
+  Disabling SMT can have a significant performance impact, but the impact
+  depends on the type of workloads.
+
+  See the relevant chapter in the L1TF mitigation documentation for details:
+  :ref:`Documentation/admin-guide/hw-vuln/l1tf.rst <smt_control>`.
+
+
+.. _mds_mitigation_control_command_line:
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the MDS mitigations at boot
+time with the option "mds=". The valid arguments for this option are:
+
+  ============  =============================================================
+  full		If the CPU is vulnerable, enable all available mitigations
+		for the MDS vulnerability, CPU buffer clearing on exit to
+		userspace and when entering a VM. Idle transitions are
+		protected as well if SMT is enabled.
+
+		It does not automatically disable SMT.
+
+  full,nosmt	The same as mds=full, with SMT disabled on vulnerable
+		CPUs.  This is the complete mitigation.
+
+  off		Disables MDS mitigations completely.
+
+  ============  =============================================================
+
+Not specifying this option is equivalent to "mds=full".
+
+
+Mitigation selection guide
+--------------------------
+
+1. Trusted userspace
+^^^^^^^^^^^^^^^^^^^^
+
+   If all userspace applications are from a trusted source and do not
+   execute untrusted code which is supplied externally, then the mitigation
+   can be disabled.
+
+
+2. Virtualization with trusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   The same considerations as above versus trusted user space apply.
+
+3. Virtualization with untrusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   The protection depends on the state of the L1TF mitigations.
+   See :ref:`virt_mechanism`.
+
+   If the MDS mitigation is enabled and SMT is disabled, guest to host and
+   guest to guest attacks are prevented.
+
+.. _mds_default_mitigations:
+
+Default mitigations
+-------------------
+
+  The kernel default mitigations for vulnerable processors are:
+
+  - Enable CPU buffer clearing
+
+  The kernel does not by default enforce the disabling of SMT, which leaves
+  SMT systems vulnerable when running untrusted code. The same rationale as
+  for L1TF applies.
+  See :ref:`Documentation/admin-guide/hw-vuln//l1tf.rst <default_mitigations>`.
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index 0873685..89abc50 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -17,14 +17,12 @@
    kernel-parameters
    devices
 
-This section describes CPU vulnerabilities and provides an overview of the
-possible mitigations along with guidance for selecting mitigations if they
-are configurable at compile, boot or run time.
+This section describes CPU vulnerabilities and their mitigations.
 
 .. toctree::
    :maxdepth: 1
 
-   l1tf
+   hw-vuln/index
 
 Here is a set of documents aimed at users who are trying to track down
 problems and bugs in particular.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 0a55b3e..0706064 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2086,7 +2086,7 @@
 
 			Default is 'flush'.
 
-			For details see: Documentation/admin-guide/l1tf.rst
+			For details see: Documentation/admin-guide/hw-vuln/l1tf.rst
 
 	l2cr=		[PPC]
 
@@ -2326,6 +2326,32 @@
 			Format: <first>,<last>
 			Specifies range of consoles to be captured by the MDA.
 
+	mds=		[X86,INTEL]
+			Control mitigation for the Micro-architectural Data
+			Sampling (MDS) vulnerability.
+
+			Certain CPUs are vulnerable to an exploit against CPU
+			internal buffers which can forward information to a
+			disclosure gadget under certain conditions.
+
+			In vulnerable processors, the speculatively
+			forwarded data can be used in a cache side channel
+			attack, to access data to which the attacker does
+			not have direct access.
+
+			This parameter controls the MDS mitigation. The
+			options are:
+
+			full       - Enable MDS mitigation on vulnerable CPUs
+			full,nosmt - Enable MDS mitigation and disable
+				     SMT on vulnerable CPUs
+			off        - Unconditionally disable MDS mitigation
+
+			Not specifying this option is equivalent to
+			mds=full.
+
+			For details see: Documentation/admin-guide/hw-vuln/mds.rst
+
 	mem=nn[KMG]	[KNL,BOOT] Force usage of a specific amount of memory
 			Amount of memory to be used when the kernel is not able
 			to see the whole system memory or for test.
@@ -2483,6 +2509,40 @@
 			in the "bleeding edge" mini2440 support kernel at
 			http://repo.or.cz/w/linux-2.6/mini2440.git
 
+	mitigations=
+			[X86,PPC,S390] Control optional mitigations for CPU
+			vulnerabilities.  This is a set of curated,
+			arch-independent options, each of which is an
+			aggregation of existing arch-specific options.
+
+			off
+				Disable all optional CPU mitigations.  This
+				improves system performance, but it may also
+				expose users to several CPU vulnerabilities.
+				Equivalent to: nopti [X86,PPC]
+					       nospectre_v1 [PPC]
+					       nobp=0 [S390]
+					       nospectre_v2 [X86,PPC,S390]
+					       spectre_v2_user=off [X86]
+					       spec_store_bypass_disable=off [X86,PPC]
+					       l1tf=off [X86]
+					       mds=off [X86]
+
+			auto (default)
+				Mitigate all CPU vulnerabilities, but leave SMT
+				enabled, even if it's vulnerable.  This is for
+				users who don't want to be surprised by SMT
+				getting disabled across kernel upgrades, or who
+				have other ways of avoiding SMT-based attacks.
+				Equivalent to: (default behavior)
+
+			auto,nosmt
+				Mitigate all CPU vulnerabilities, disabling SMT
+				if needed.  This is for users who always want to
+				be fully mitigated, even if it means losing SMT.
+				Equivalent to: l1tf=flush,nosmt [X86]
+					       mds=full,nosmt [X86]
+
 	mminit_loglevel=
 			[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
 			parameter allows control of the logging verbosity for
@@ -2812,7 +2872,7 @@
 			check bypass). With this option data leaks are possible
 			in the system.
 
-	nospectre_v2	[X86] Disable all mitigations for the Spectre variant 2
+	nospectre_v2	[X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
 			(indirect branch prediction) vulnerability. System may
 			allow data leaks with this option, which is equivalent
 			to spectre_v2=off.
@@ -3911,7 +3971,9 @@
 				[[,]s[mp]#### \
 				[[,]b[ios] | a[cpi] | k[bd] | t[riple] | e[fi] | p[ci]] \
 				[[,]f[orce]
-			Where reboot_mode is one of warm (soft) or cold (hard) or gpio,
+			Where reboot_mode is one of warm (soft) or cold (hard) or gpio
+					(prefix with 'panic_' to set mode for panic
+					reboot only),
 			      reboot_type is one of bios, acpi, kbd, triple, efi, or pci,
 			      reboot_force is either force or not specified,
 			      reboot_cpu is s[mp]#### with #### being the processor
diff --git a/Documentation/arm/msm/remote_debug_drv.txt b/Documentation/arm/msm/remote_debug_drv.txt
new file mode 100644
index 0000000..13a35f4
--- /dev/null
+++ b/Documentation/arm/msm/remote_debug_drv.txt
@@ -0,0 +1,468 @@
+Introduction
+============
+
+The goal of this debug feature is to provide a reliable, responsive,
+accurate and secure debug capability to developers interested in
+debugging MSM subsystem processor images without the use of a hardware
+debugger.
+
+The Debug Agent along with the Remote Debug Driver implements a shared
+memory based transport mechanism that allows for a debugger (ex. GDB)
+running on a host PC to communicate with a remote stub running on
+peripheral subsystems such as the ADSP, MODEM etc.
+
+The diagram below depicts end to end the components involved to
+support remote debugging:
+
+
+:               :
+:    HOST (PC)  :  MSM
+:  ,--------,   :   ,-------,
+:  |        |   :   | Debug |                         ,--------,
+:  |Debugger|<--:-->| Agent |                         | Remote |
+:  |        |   :   |  App  |                  +----->| Debug  |
+:  `--------`   :   |-------|    ,--------,    |      | Stub   |
+:               :   | Remote|    |        |<---+      `--------`
+:               :   | Debug |<-->|--------|
+:               :   | Driver|    |        |<---+      ,--------,
+:               :   `-------`    `--------`    |      | Remote |
+:               :       LA         Shared      +----->| Debug  |
+:               :                  Memory             | Stub   |
+:               :                                     `--------`
+:               :                               Peripheral Subsystems
+:               :                                 (ADSP, MODEM, ...)
+
+
+Debugger:       Debugger application running on the host PC that
+                communicates with the remote stub.
+                Examples: GDB, LLDB
+
+Debug Agent:    Software that runs on the Linux Android platform
+                that provides connectivity from the MSM to the
+                host PC. This involves two portions:
+                1) User mode Debug Agent application that discovers
+                processes running on the subsystems and creates
+                TCP/IP sockets for the host to connect to. In addition
+                to this, it creates an info (or meta) port that
+                users can connect to discover the various
+                processes and their corresponding debug ports.
+
+Remote Debug    A character based driver that the Debug
+Driver:         Agent uses to transport the payload received from the
+                host to the debug stub running on the subsystem
+                processor over shared memory and vice versa.
+
+Shared Memory:  Shared memory from the SMEM pool that is accessible
+                from the Applications Processor (AP) and the
+                subsystem processors.
+
+Remote Debug    Privileged code that runs in the kernels of the
+Stub:           subsystem processors that receives debug commands
+                from the debugger running on the host and
+                acts on these commands. These commands include reading
+                and writing to registers and memory belonging to the
+                subsystem's address space, setting breakpoints,
+                single stepping etc.
+
+Hardware description
+====================
+
+The Remote Debug Driver interfaces with the Remote Debug stubs
+running on the subsystem processors and does not drive or
+manage any hardware resources.
+
+Software description
+====================
+
+The debugger and the remote stubs use Remote Serial Protocol (RSP)
+to communicate with each other. This is widely used protocol by both
+software and hardware debuggers. RSP is an ASCII based protocol
+and used when it is not possible to run GDB server on the target under
+debug.
+
+The Debug Agent application along with the Remote Debug Driver
+is responsible for establishing a bi-directional connection from
+the debugger application running on the host to the remote debug
+stub running on a subsystem. The Debug Agent establishes connectivity
+to the host PC via TCP/IP sockets.
+
+This feature uses ADB port forwarding to establish connectivity
+between the debugger running on the host and the target under debug.
+
+Please note the Debug Agent does not expose HLOS memory to the
+remote subsystem processors.
+
+Design
+======
+
+Here is the overall flow:
+
+1) When the Debug Agent application starts up, it opens up a shared memory
+based transport channel to the various subsystem processor images.
+
+2) The Debug Agent application sends messages across to the remote stubs
+to discover the various processes that are running on the subsystem and
+creates debug sockets for each of them.
+
+3) Whenever a process running on a subsystem exits, the Debug Agent
+is notified by the stub so that the debug port and other resources
+can be reclaimed.
+
+4) The Debug Agent uses the services of the Remote Debug Driver to
+transport payload from the host debugger to the remote stub and vice versa.
+
+5) Communication between the Remote Debug Driver and the Remote Debug stub
+running on the subsystem processor is done over shared memory (see figure).
+SMEM services are used to allocate the shared memory that will
+be readable and writeable by the AP and the subsystem image under debug.
+
+A separate SMEM allocation takes place for each subsystem processor
+involved in remote debugging. The remote stub running on each of the
+subsystems allocates a SMEM buffer using a unique identifier so that both
+the AP and subsystem get the same physical block of memory. It should be
+noted that subsystem images can be restarted at any time.
+However, when a subsystem comes back up, its stub uses the same unique
+SMEM identifier to allocate the SMEM block. This would not result in a
+new allocation rather the same block of memory in the first bootup instance
+is provided back to the stub running on the subsystem.
+
+An 8KB chunk of shared memory is allocated and used for communication
+per subsystem. For multi-process capable subsystems, 16KB chunk of shared
+memory is allocated to allow for simultaneous debugging of more than one
+process running on a single subsystem.
+
+The shared memory is used as a circular ring buffer in each direction.
+Thus we have a bi-directional shared memory channel between the AP
+and a subsystem. We call this SMQ. Each memory channel contains a header,
+data and a control mechanism that is used to synchronize read and write
+of data between the AP and the remote subsystem.
+
+Overall SMQ memory view:
+:
+:    +------------------------------------------------+
+:    | SMEM buffer                                    |
+:    |-----------------------+------------------------|
+:    |Producer: LA           | Producer: Remote       |
+:    |Consumer: Remote       |           subsystem    |
+:    |          subsystem    | Consumer: LA           |
+:    |                       |                        |
+:    |               Producer|                Consumer|
+:    +-----------------------+------------------------+
+:    |                       |
+:    |                       |
+:    |                       +--------------------------------------+
+:    |                                                              |
+:    |                                                              |
+:    v                                                              v
+:    +--------------------------------------------------------------+
+:    |   Header  |       Data      |            Control             |
+:    +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+:    |           | b | b | b |     | S  |n |n |     | S |n |n |     |
+:    |  Producer | l | l | l |     | M  |o |o |     | M |o |o |     |
+:    |    Ver    | o | o | o |     | Q  |d |d |     | Q |d |d |     |
+:    |-----------| c | c | c | ... |    |e |e | ... |   |e |e | ... |
+:    |           | k | k | k |     | O  |  |  |     | I |  |  |     |
+:    |  Consumer |   |   |   |     | u  |0 |1 |     | n |0 |1 |     |
+:    |    Ver    | 0 | 1 | 2 |     | t  |  |  |     |   |  |  |     |
+:    +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+:                                       |           |
+:                                       +           |
+:                                                   |
+:                          +------------------------+
+:                          |
+:                          v
+:                        +----+----+----+----+
+:                        | SMQ Nodes         |
+:                        |----|----|----|----|
+:                 Node # |  0 |  1 |  2 | ...|
+:                        |----|----|----|----|
+: Starting Block Index # |  0 |  3 |  8 | ...|
+:                        |----|----|----|----|
+:            # of blocks |  3 |  5 |  1 | ...|
+:                        +----+----+----+----+
+:
+
+Header: Contains version numbers for software compatibility to ensure
+that both producers and consumers on the AP and subsystems know how to
+read from and write to the queue.
+Both the producer and consumer versions are 1.
+:     +---------+-------------------+
+:     | Size    | Field             |
+:     +---------+-------------------+
+:     | 1 byte  | Producer Version  |
+:     +---------+-------------------+
+:     | 1 byte  | Consumer Version  |
+:     +---------+-------------------+
+
+
+Data: The data portion contains multiple blocks [0..N] of a fixed size.
+The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1.
+Payload sent from the debug agent app is split (if necessary) and placed
+in these blocks. The first data block is placed at the next 8 byte aligned
+address after the header.
+
+The number of blocks for a given SMEM allocation is derived as follows:
+  Number of Blocks = ((Total Size - Alignment - Size of Header
+                      - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE))
+
+The producer maintains a private block map of each of these blocks to
+determine which of these blocks in the queue is available and which are free.
+
+Control:
+The control portion contains a list of nodes [0..N] where N is number
+of available data blocks. Each node identifies the data
+block indexes that contain a particular debug message to be transferred,
+and the number of blocks it took to hold the contents of the message.
+
+Each node has the following structure:
+:     +---------+-------------------+
+:     | Size    | Field             |
+:     +---------+-------------------+
+:     | 2 bytes |Staring Block Index|
+:     +---------+-------------------+
+:     | 2 bytes |Number of Blocks   |
+:     +---------+-------------------+
+
+The producer and the consumer update different parts of the control channel
+(SMQOut / SMQIn) respectively. Each of these control data structures contains
+information about the last node that was written / read, and the actual nodes
+that were written/read.
+
+SMQOut Structure (R/W by producer, R by consumer):
+:     +---------+-------------------+
+:     | Size    | Field             |
+:     +---------+-------------------+
+:     | 4 bytes | Magic Init Number |
+:     +---------+-------------------+
+:     | 4 bytes | Reset             |
+:     +---------+-------------------+
+:     | 4 bytes | Last Sent Index   |
+:     +---------+-------------------+
+:     | 4 bytes | Index Free Read   |
+:     +---------+-------------------+
+
+SMQIn Structure (R/W by consumer, R by producer):
+:     +---------+-------------------+
+:     | Size    | Field             |
+:     +---------+-------------------+
+:     | 4 bytes | Magic Init Number |
+:     +---------+-------------------+
+:     | 4 bytes | Reset ACK         |
+:     +---------+-------------------+
+:     | 4 bytes | Last Read Index   |
+:     +---------+-------------------+
+:     | 4 bytes | Index Free Write  |
+:     +---------+-------------------+
+
+Magic Init Number:
+Both SMQ Out and SMQ In initialize this field with a predefined magic
+number so as to make sure that both the consumer and producer blocks
+have fully initialized and have valid data in the shared memory control area.
+  Producer Magic #: 0xFF00FF01
+  Consumer Magic #: 0xFF00FF02
+
+SMQ Out's Last Sent Index and Index Free Read:
+  Only a producer can write to these indexes and they are updated whenever
+  there is new payload to be inserted into the SMQ in order to be sent to a
+  consumer.
+
+  The number of blocks required for the SMQ allocation is determined as:
+   (payload size + SM_BLOCKSIZE - 1) / SM_BLOCKSIZE
+
+  The private block map is searched for a large enough continuous set of blocks
+  and the user data is copied into the data blocks.
+
+  The starting index of the free block(s) is updated in the SMQOut's Last Sent
+  Index. This update keeps track of which index was last written to and the
+  producer uses it to determine where the the next allocation could be done.
+
+  Every allocation, a producer updates the Index Free Read from its
+  collaborating consumer's Index Free Write field (if they are unequal).
+  This index value indicates that the consumer has read all blocks associated
+  with allocation on the SMQ and that the producer can reuse these blocks for
+  subsquent allocations since this is a circular queue.
+
+  At cold boot and restart, these indexes are initialized to zero and all
+  blocks are marked as available for allocation.
+
+SMQ In's Last Read Index and Index Free Write:
+  These indexes are written to only by a consumer and are updated whenever
+  there is new payload to be read from the SMQ. The Last Read Index keeps
+  track of which index was last read by the consumer and using this, it
+  determines where the next read should be done.
+  After completing a read, Last Read Index is incremented to the
+  next block index. A consumer updates Index Free Write to the starting
+  index of an allocation whenever it has completed processing the blocks.
+  This is an optimization that can be used to prevent an additional copy
+  of data from the queue into a client's data buffer and the data in the queue
+  itself can be used.
+  Once Index Free Write is updated, the collaborating producer (on the next
+  data allocation) reads the updated Index Free Write value and it then
+  updates its corresponding SMQ Out's Index Free Read and marks the blocks
+  associated with that index as available for allocation. At cold boot and
+  restart, these indexes are initialized to zero.
+
+SMQ Out Reset# and SMQ In Reset ACK #:
+  Since subsystems can restart at anytime, the data blocks and control channel
+  can be in an inconsistent state when a producer or consumer comes up.
+  We use Reset and Reset ACK to manage this. At cold boot, the producer
+  initializes the Reset# to a known number ex. 1. Every other reset that the
+  producer undergoes, the Reset#1 is simply incremented by 1. All the producer
+  indexes are reset.
+  When the producer notifies the consumer of data availability, the consumer
+  reads the producers Reset # and copies that into its SMQ In Reset ACK#
+  field when they differ. When that occurs, the consumer resets its
+  indexes to 0.
+
+6) Asynchronous notifications between a producer and consumer are
+done using the SMP2P service which is interrupt based.
+
+Power Management
+================
+
+None
+
+SMP/multi-core
+==============
+
+The driver uses completion to wake up the Debug Agent client threads.
+
+Security
+========
+
+From the perspective of the subsystem, the AP is untrusted. The remote
+stubs consult the secure debug fuses to determine whether or not the
+remote debugging will be enabled at the subsystem.
+
+If the hardware debug fuses indicate that debugging is disabled, the
+remote stubs will not be functional on the subsystem. Writes to the
+queue will only be done if the driver sees that the remote stub has been
+initialized on the subsystem.
+
+Therefore even if any untrusted software running on the AP requests
+the services of the Remote Debug Driver and inject RSP messages
+into the shared memory buffer, these RSP messages will be discarded and
+an appropriate error code will be sent up to the invoking application.
+
+Performance
+===========
+
+During operation, the Remote Debug Driver copies RSP messages
+asynchronously sent from the host debugger to the remote stub and vice
+versa. The debug messages are ASCII based and relatively short
+(<25 bytes) and may once in a while go up to a maximum 700 bytes
+depending on the command the user requested. Thus we do not
+anticipate any major performance impact. Moreover, in a typical
+functional debug scenario performance should not be a concern.
+
+Interface
+=========
+
+The Remote Debug Driver is a character based device that manages
+a piece of shared memory that is used as a bi-directional
+single producer/consumer circular queue using a next fit allocator.
+Every subsystem, has its own shared memory buffer that is managed
+like a separate device.
+
+The driver distinguishes each subsystem processor's buffer by
+registering a node with a different minor number.
+
+For each subsystem that is supported, the driver exposes a user space
+interface through the following node:
+    - /dev/rdbg-<subsystem>
+    Ex. /dev/rdbg-adsp (for the ADSP subsystem)
+
+The standard open(), close(), read() and write() API set is
+implemented.
+
+The open() syscall will fail if a subsystem is not present or supported
+by the driver or a shared memory buffer cannot be allocated for the
+AP - subsystem communication. It will also fail if the subsytem has
+not initialized the queue on its side. Here are the error codes returned
+in case a call to open() fails:
+ENODEV - memory was not yet allocated for the device
+EEXIST - device is already opened
+ENOMEM - SMEM allocation failed
+ECOMM - Subsytem queue is not yet setup
+ENOMEM - Failure to initialize SMQ
+
+read() is a blocking call that will return with the number of bytes written
+by the subsystem whenever the subsystem sends it some payload. Here are the
+error codes returned in case a call to read() fails:
+EINVAL - Invalid input
+ENODEV - Device has not been opened yet
+ERESTARTSYS - call to wait_for_completion_interruptible is interrupted
+ENODATA - call to smq_receive failed
+
+write() attempts to send user mode payload out to the subsystem. It can fail
+if the SMQ is full. The number of bytes written is returned back to the user.
+Here are the error codes returned in case a call to write() fails:
+EINVAL - Invalid input
+ECOMM - SMQ send failed
+
+In the close() syscall, the control information state of the SMQ is
+initialized to zero thereby preventing any further communication between
+the AP and the subsystem. Here is the error code returned in case
+a call to close() fails:
+ENODEV - device wasn't opened/initialized
+
+The Remote Debug driver uses SMP2P for bi-directional AP to subsystem
+notification. Notifications are sent to indicate that there are new
+debug messages available for processing. Each subsystem that is
+supported will need to add a device tree entry per the usage
+specification of SMP2P driver.
+
+In case the remote stub becomes non operational or the security configuration
+on the subsystem does not permit debugging, any messages put in the SMQ will
+not be responded to. It is the responsibility of the Debug Agent app and the
+host debugger application such as GDB to timeout and notify the user of the
+non availability of remote debugging.
+
+Driver parameters
+=================
+
+None
+
+Config options
+==============
+
+The driver is configured with a device tree entry to map an SMP2P entry
+to the device. The SMP2P entry name used is "rdbg". Please see
+kernel\Documentation\arm\msm\msm_smp2p.txt for information about the
+device tree entry required to configure SMP2P.
+
+The driver uses the SMEM allocation type SMEM_LC_DEBUGGER to allocate memory
+for the queue that is used to share data with the subsystems.
+
+Dependencies
+============
+
+The Debug Agent driver requires services of SMEM to
+allocate shared memory buffers.
+
+SMP2P is used as a bi-directional notification
+mechanism between the AP and a subsystem processor.
+
+User space utilities
+====================
+
+This driver is meant to be used in conjunction with the user mode
+Remote Debug Agent application.
+
+Other
+=====
+
+None
+
+Known issues
+============
+For targets with an external subsystem, we cannot use
+shared memory for communication and would have to use the prevailing
+transport mechanisms that exists between the AP and the external subsystem.
+
+This driver cannot be leveraged for such targets.
+
+To do
+=====
+
+None
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 3c6fc2e..eeb3fc9 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -58,6 +58,7 @@
 | ARM            | Cortex-A72      | #853709         | N/A                         |
 | ARM            | Cortex-A73      | #858921         | ARM64_ERRATUM_858921        |
 | ARM            | Cortex-A55      | #1024718        | ARM64_ERRATUM_1024718       |
+| ARM            | Cortex-A76      | #1463225        | ARM64_ERRATUM_1463225       |
 | ARM            | MMU-500         | #841119,#826419 | N/A                         |
 |                |                 |                 |                             |
 | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375        |
diff --git a/Documentation/dev-tools/gcov.rst b/Documentation/dev-tools/gcov.rst
index 69a7d90..46aae52 100644
--- a/Documentation/dev-tools/gcov.rst
+++ b/Documentation/dev-tools/gcov.rst
@@ -34,10 +34,6 @@
         CONFIG_DEBUG_FS=y
         CONFIG_GCOV_KERNEL=y
 
-select the gcc's gcov format, default is autodetect based on gcc version::
-
-        CONFIG_GCOV_FORMAT_AUTODETECT=y
-
 and to get coverage data for the entire kernel::
 
         CONFIG_GCOV_PROFILE_ALL=y
@@ -169,6 +165,20 @@
       [user@build] gcov -o /tmp/coverage/tmp/out/init main.c
 
 
+Note on compilers
+-----------------
+
+GCC and LLVM gcov tools are not necessarily compatible. Use gcov_ to work with
+GCC-generated .gcno and .gcda files, and use llvm-cov_ for Clang.
+
+.. _gcov: http://gcc.gnu.org/onlinedocs/gcc/Gcov.html
+.. _llvm-cov: https://llvm.org/docs/CommandGuide/llvm-cov.html
+
+Build differences between GCC and Clang gcov are handled by Kconfig. It
+automatically selects the appropriate gcov format depending on the detected
+toolchain.
+
+
 Troubleshooting
 ---------------
 
diff --git a/Documentation/driver-api/usb/power-management.rst b/Documentation/driver-api/usb/power-management.rst
index 79beb80..4a74cf6 100644
--- a/Documentation/driver-api/usb/power-management.rst
+++ b/Documentation/driver-api/usb/power-management.rst
@@ -370,11 +370,15 @@
 then the interface is considered to be idle, and the kernel may
 autosuspend the device.
 
-Drivers need not be concerned about balancing changes to the usage
-counter; the USB core will undo any remaining "get"s when a driver
-is unbound from its interface.  As a corollary, drivers must not call
-any of the ``usb_autopm_*`` functions after their ``disconnect``
-routine has returned.
+Drivers must be careful to balance their overall changes to the usage
+counter.  Unbalanced "get"s will remain in effect when a driver is
+unbound from its interface, preventing the device from going into
+runtime suspend should the interface be bound to a driver again.  On
+the other hand, drivers are allowed to achieve this balance by calling
+the ``usb_autopm_*`` functions even after their ``disconnect`` routine
+has returned -- say from within a work-queue routine -- provided they
+retain an active reference to the interface (via ``usb_get_intf`` and
+``usb_put_intf``).
 
 Drivers using the async routines are responsible for their own
 synchronization and mutual exclusion.
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 3a7b6052..08c23b6 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -343,9 +343,9 @@
 - ``ENOTEMPTY``: the file is unencrypted and is a nonempty directory
 - ``ENOTTY``: this type of filesystem does not implement encryption
 - ``EOPNOTSUPP``: the kernel was not configured with encryption
-  support for this filesystem, or the filesystem superblock has not
+  support for filesystems, or the filesystem superblock has not
   had encryption enabled on it.  (For example, to use encryption on an
-  ext4 filesystem, CONFIG_EXT4_ENCRYPTION must be enabled in the
+  ext4 filesystem, CONFIG_FS_ENCRYPTION must be enabled in the
   kernel config, and the superblock must have had the "encrypt"
   feature flag enabled using ``tune2fs -O encrypt`` or ``mkfs.ext4 -O
   encrypt``.)
@@ -451,10 +451,18 @@
 - Unencrypted files, or files encrypted with a different encryption
   policy (i.e. different key, modes, or flags), cannot be renamed or
   linked into an encrypted directory; see `Encryption policy
-  enforcement`_.  Attempts to do so will fail with EPERM.  However,
+  enforcement`_.  Attempts to do so will fail with EXDEV.  However,
   encrypted files can be renamed within an encrypted directory, or
   into an unencrypted directory.
 
+  Note: "moving" an unencrypted file into an encrypted directory, e.g.
+  with the `mv` program, is implemented in userspace by a copy
+  followed by a delete.  Be aware that the original unencrypted data
+  may remain recoverable from free space on the disk; prefer to keep
+  all files encrypted from the very beginning.  The `shred` program
+  may be used to overwrite the source files but isn't guaranteed to be
+  effective on all filesystems and storage devices.
+
 - Direct I/O is not supported on encrypted files.  Attempts to use
   direct I/O on such files will fall back to buffered I/O.
 
@@ -541,7 +549,7 @@
 Except for those special files, it is forbidden to have unencrypted
 files, or files encrypted with a different encryption policy, in an
 encrypted directory tree.  Attempts to link or rename such a file into
-an encrypted directory will fail with EPERM.  This is also enforced
+an encrypted directory will fail with EXDEV.  This is also enforced
 during ->lookup() to provide limited protection against offline
 attacks that try to disable or downgrade encryption in known locations
 where applications may later write sensitive data.  It is recommended
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 7b7b845..32b5186 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -622,3 +622,8 @@
 	alloc_file_clone(file, flags, ops) does not affect any caller's references.
 	On success you get a new struct file sharing the mount/dentry with the
 	original, on failure - ERR_PTR().
+--
+[mandatory]
+	DCACHE_RCUACCESS is gone; having an RCU delay on dentry freeing is the
+	default.  DCACHE_NORCU opts out, and only d_alloc_pseudo() has any
+	business doing so.
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index d1ee484..ee9984f 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -36,6 +36,7 @@
   * Intel Cannon Lake (PCH)
   * Intel Cedar Fork (PCH)
   * Intel Ice Lake (PCH)
+  * Intel Comet Lake (PCH)
    Datasheets: Publicly available at the Intel website
 
 On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 5db7e87..1cdc139 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -104,6 +104,7 @@
    :maxdepth: 2
 
    sh/index
+   x86/index
 
 Filesystem Documentation
 ------------------------
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index b138b2a..90ccb22 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -410,6 +410,7 @@
 	minimum RTT when it is moved to a longer path (e.g., due to traffic
 	engineering). A longer window makes the filter more resistant to RTT
 	inflations such as transient congestion. The unit is seconds.
+	Possible values: 0 - 86400 (1 day)
 	Default: 300
 
 tcp_moderate_rcvbuf - BOOLEAN
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 9ecde51..2793d4e 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -92,6 +92,14 @@
 	0 - disable JIT kallsyms export (default value)
 	1 - enable JIT kallsyms export for privileged users only
 
+bpf_jit_limit
+-------------
+
+This enforces a global limit for memory allocations to the BPF JIT
+compiler in order to reject unprivileged JIT requests once it has
+been surpassed. bpf_jit_limit contains the value of the global limit
+in bytes.
+
 dev_weight
 --------------
 
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 4cd84b1..d22c468 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -65,6 +65,7 @@
 - swappiness
 - user_reserve_kbytes
 - vfs_cache_pressure
+- watermark_boost_factor
 - watermark_scale_factor
 - zone_reclaim_mode
 - want_old_faultaround_pte
@@ -892,6 +893,26 @@
 
 =============================================================
 
+watermark_boost_factor:
+
+This factor controls the level of reclaim when memory is being fragmented.
+It defines the percentage of the high watermark of a zone that will be
+reclaimed if pages of different mobility are being mixed within pageblocks.
+The intent is that compaction has less work to do in the future and to
+increase the success rate of future high-order allocations such as SLUB
+allocations, THP and hugetlbfs pages.
+
+To make it sensible with respect to the watermark_scale_factor
+parameter, the unit is in fractions of 10,000. The default value of
+15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
+watermark will be reclaimed in the event of a pageblock being mixed due
+to fragmentation. The level of reclaim is determined by the number of
+fragmentation events that occurred in the recent past. If this value is
+smaller than a pageblock then a pageblocks worth of pages will be reclaimed
+(e.g.  2MB on 64-bit x86). A boost factor of 0 will disable the feature.
+
+=============================================================
+
 watermark_scale_factor:
 
 This factor controls the aggressiveness of kswapd. It defines the
diff --git a/Documentation/x86/conf.py b/Documentation/x86/conf.py
new file mode 100644
index 0000000..33c5c31
--- /dev/null
+++ b/Documentation/x86/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "X86 architecture specific documentation"
+
+tags.add("subproject")
+
+latex_documents = [
+    ('index', 'x86.tex', project,
+     'The kernel development community', 'manual'),
+]
diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst
new file mode 100644
index 0000000..ef389dc
--- /dev/null
+++ b/Documentation/x86/index.rst
@@ -0,0 +1,8 @@
+==========================
+x86 architecture specifics
+==========================
+
+.. toctree::
+   :maxdepth: 1
+
+   mds
diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst
new file mode 100644
index 0000000..5d4330b
--- /dev/null
+++ b/Documentation/x86/mds.rst
@@ -0,0 +1,193 @@
+Microarchitectural Data Sampling (MDS) mitigation
+=================================================
+
+.. _mds:
+
+Overview
+--------
+
+Microarchitectural Data Sampling (MDS) is a family of side channel attacks
+on internal buffers in Intel CPUs. The variants are:
+
+ - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126)
+ - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130)
+ - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127)
+ - Microarchitectural Data Sampling Uncacheable Memory (MDSUM) (CVE-2019-11091)
+
+MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a
+dependent load (store-to-load forwarding) as an optimization. The forward
+can also happen to a faulting or assisting load operation for a different
+memory address, which can be exploited under certain conditions. Store
+buffers are partitioned between Hyper-Threads so cross thread forwarding is
+not possible. But if a thread enters or exits a sleep state the store
+buffer is repartitioned which can expose data from one thread to the other.
+
+MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage
+L1 miss situations and to hold data which is returned or sent in response
+to a memory or I/O operation. Fill buffers can forward data to a load
+operation and also write data to the cache. When the fill buffer is
+deallocated it can retain the stale data of the preceding operations which
+can then be forwarded to a faulting or assisting load operation, which can
+be exploited under certain conditions. Fill buffers are shared between
+Hyper-Threads so cross thread leakage is possible.
+
+MLPDS leaks Load Port Data. Load ports are used to perform load operations
+from memory or I/O. The received data is then forwarded to the register
+file or a subsequent operation. In some implementations the Load Port can
+contain stale data from a previous operation which can be forwarded to
+faulting or assisting loads under certain conditions, which again can be
+exploited eventually. Load ports are shared between Hyper-Threads so cross
+thread leakage is possible.
+
+MDSUM is a special case of MSBDS, MFBDS and MLPDS. An uncacheable load from
+memory that takes a fault or assist can leave data in a microarchitectural
+structure that may later be observed using one of the same methods used by
+MSBDS, MFBDS or MLPDS.
+
+Exposure assumptions
+--------------------
+
+It is assumed that attack code resides in user space or in a guest with one
+exception. The rationale behind this assumption is that the code construct
+needed for exploiting MDS requires:
+
+ - to control the load to trigger a fault or assist
+
+ - to have a disclosure gadget which exposes the speculatively accessed
+   data for consumption through a side channel.
+
+ - to control the pointer through which the disclosure gadget exposes the
+   data
+
+The existence of such a construct in the kernel cannot be excluded with
+100% certainty, but the complexity involved makes it extremly unlikely.
+
+There is one exception, which is untrusted BPF. The functionality of
+untrusted BPF is limited, but it needs to be thoroughly investigated
+whether it can be used to create such a construct.
+
+
+Mitigation strategy
+-------------------
+
+All variants have the same mitigation strategy at least for the single CPU
+thread case (SMT off): Force the CPU to clear the affected buffers.
+
+This is achieved by using the otherwise unused and obsolete VERW
+instruction in combination with a microcode update. The microcode clears
+the affected CPU buffers when the VERW instruction is executed.
+
+For virtualization there are two ways to achieve CPU buffer
+clearing. Either the modified VERW instruction or via the L1D Flush
+command. The latter is issued when L1TF mitigation is enabled so the extra
+VERW can be avoided. If the CPU is not affected by L1TF then VERW needs to
+be issued.
+
+If the VERW instruction with the supplied segment selector argument is
+executed on a CPU without the microcode update there is no side effect
+other than a small number of pointlessly wasted CPU cycles.
+
+This does not protect against cross Hyper-Thread attacks except for MSBDS
+which is only exploitable cross Hyper-thread when one of the Hyper-Threads
+enters a C-state.
+
+The kernel provides a function to invoke the buffer clearing:
+
+    mds_clear_cpu_buffers()
+
+The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
+(idle) transitions.
+
+As a special quirk to address virtualization scenarios where the host has
+the microcode updated, but the hypervisor does not (yet) expose the
+MD_CLEAR CPUID bit to guests, the kernel issues the VERW instruction in the
+hope that it might actually clear the buffers. The state is reflected
+accordingly.
+
+According to current knowledge additional mitigations inside the kernel
+itself are not required because the necessary gadgets to expose the leaked
+data cannot be controlled in a way which allows exploitation from malicious
+user space or VM guests.
+
+Kernel internal mitigation modes
+--------------------------------
+
+ ======= ============================================================
+ off      Mitigation is disabled. Either the CPU is not affected or
+          mds=off is supplied on the kernel command line
+
+ full     Mitigation is enabled. CPU is affected and MD_CLEAR is
+          advertised in CPUID.
+
+ vmwerv	  Mitigation is enabled. CPU is affected and MD_CLEAR is not
+	  advertised in CPUID. That is mainly for virtualization
+	  scenarios where the host has the updated microcode but the
+	  hypervisor does not expose MD_CLEAR in CPUID. It's a best
+	  effort approach without guarantee.
+ ======= ============================================================
+
+If the CPU is affected and mds=off is not supplied on the kernel command
+line then the kernel selects the appropriate mitigation mode depending on
+the availability of the MD_CLEAR CPUID bit.
+
+Mitigation points
+-----------------
+
+1. Return to user space
+^^^^^^^^^^^^^^^^^^^^^^^
+
+   When transitioning from kernel to user space the CPU buffers are flushed
+   on affected CPUs when the mitigation is not disabled on the kernel
+   command line. The migitation is enabled through the static key
+   mds_user_clear.
+
+   The mitigation is invoked in prepare_exit_to_usermode() which covers
+   all but one of the kernel to user space transitions.  The exception
+   is when we return from a Non Maskable Interrupt (NMI), which is
+   handled directly in do_nmi().
+
+   (The reason that NMI is special is that prepare_exit_to_usermode() can
+    enable IRQs.  In NMI context, NMIs are blocked, and we don't want to
+    enable IRQs with NMIs blocked.)
+
+
+2. C-State transition
+^^^^^^^^^^^^^^^^^^^^^
+
+   When a CPU goes idle and enters a C-State the CPU buffers need to be
+   cleared on affected CPUs when SMT is active. This addresses the
+   repartitioning of the store buffer when one of the Hyper-Threads enters
+   a C-State.
+
+   When SMT is inactive, i.e. either the CPU does not support it or all
+   sibling threads are offline CPU buffer clearing is not required.
+
+   The idle clearing is enabled on CPUs which are only affected by MSBDS
+   and not by any other MDS variant. The other MDS variants cannot be
+   protected against cross Hyper-Thread attacks because the Fill Buffer and
+   the Load Ports are shared. So on CPUs affected by other variants, the
+   idle clearing would be a window dressing exercise and is therefore not
+   activated.
+
+   The invocation is controlled by the static key mds_idle_clear which is
+   switched depending on the chosen mitigation mode and the SMT state of
+   the system.
+
+   The buffer clear is only invoked before entering the C-State to prevent
+   that stale data from the idling CPU from spilling to the Hyper-Thread
+   sibling after the store buffer got repartitioned and all entries are
+   available to the non idle sibling.
+
+   When coming out of idle the store buffer is partitioned again so each
+   sibling has half of it available. The back from idle CPU could be then
+   speculatively exposed to contents of the sibling. The buffers are
+   flushed either on exit to user space or on VMENTER so malicious code
+   in user space or the guest cannot speculatively access them.
+
+   The mitigation is hooked into all variants of halt()/mwait(), but does
+   not cover the legacy ACPI IO-Port mechanism because the ACPI idle driver
+   has been superseded by the intel_idle driver around 2010 and is
+   preferred on all affected CPUs which are expected to gain the MD_CLEAR
+   functionality in microcode. Aside of that the IO-Port mechanism is a
+   legacy interface which is only used on older systems which are either
+   not affected or do not receive microcode updates anymore.
diff --git a/Makefile b/Makefile
index 6ad7da8..0859e67 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 19
-SUBLEVEL = 36
+SUBLEVEL = 47
 EXTRAVERSION =
 NAME = "People's Front"
 
@@ -612,6 +612,16 @@
   CC_FLAGS_FTRACE := -pg
 endif
 
+# Make toolchain changes before including arch/$(SRCARCH)/Makefile to ensure
+# ar/cc/ld-* macros return correct values.
+ifdef CONFIG_LTO_CLANG
+# use llvm-ar for building symbol tables from IR files, and llvm-nm instead
+# of objdump for processing symbol versions and exports
+LLVM_AR		:= llvm-ar
+LLVM_NM		:= llvm-nm
+export LLVM_AR LLVM_NM
+endif
+
 # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
 # values of the respective KBUILD_* variables
 ARCH_CPPFLAGS :=
@@ -624,7 +634,7 @@
 # Read in dependencies to all Kconfig* files, make sure to run syncconfig if
 # changes are detected. This should be included after arch/$(SRCARCH)/Makefile
 # because some architectures define CROSS_COMPILE there.
--include include/config/auto.conf.cmd
+include include/config/auto.conf.cmd
 
 # To avoid any implicit rule to kick in, define an empty command
 $(KCONFIG_CONFIG): ;
@@ -662,8 +672,7 @@
 KBUILD_CFLAGS	+= $(call cc-disable-warning, int-in-bool-context)
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS	+= $(call cc-option,-Oz,-Os)
-KBUILD_CFLAGS	+= $(call cc-disable-warning,maybe-uninitialized,)
+KBUILD_CFLAGS	+= -Os $(call cc-disable-warning,maybe-uninitialized,)
 else
 ifdef CONFIG_PROFILE_ALL_BRANCHES
 KBUILD_CFLAGS	+= -O2 $(call cc-disable-warning,maybe-uninitialized,)
@@ -820,8 +829,24 @@
 LDFLAGS_vmlinux += --gc-sections
 endif
 
+ifdef CONFIG_LTO_CLANG
+lto-clang-flags	:= -flto -fvisibility=hidden
+
+# allow disabling only clang LTO where needed
+DISABLE_LTO_CLANG := -fno-lto -fvisibility=default
+export DISABLE_LTO_CLANG
+endif
+
+ifdef CONFIG_LTO
+LTO_CFLAGS	:= $(lto-clang-flags)
+KBUILD_CFLAGS	+= $(LTO_CFLAGS)
+
+DISABLE_LTO	:= $(DISABLE_LTO_CLANG)
+export LTO_CFLAGS DISABLE_LTO
+endif
+
 ifdef CONFIG_CFI_CLANG
-cfi-clang-flags	+= -fsanitize=cfi
+cfi-clang-flags	+= -fsanitize=cfi $(call cc-option, -fsplit-lto-unit)
 DISABLE_CFI_CLANG := -fno-sanitize=cfi
 ifdef CONFIG_MODULES
 cfi-clang-flags	+= -fsanitize-cfi-cross-dso
@@ -831,17 +856,19 @@
 cfi-clang-flags	+= -fsanitize-recover=cfi -fno-sanitize-trap=cfi
 endif
 
+# also disable CFI when LTO is disabled
+DISABLE_LTO_CLANG += $(DISABLE_CFI_CLANG)
 # allow disabling only clang CFI where needed
 export DISABLE_CFI_CLANG
 endif
 
 ifdef CONFIG_CFI
-# cfi-flags are re-tested in prepare-compiler-check
-cfi-flags	:= $(cfi-clang-flags)
-KBUILD_CFLAGS	+= $(cfi-flags)
+CFI_CFLAGS	:= $(cfi-clang-flags)
+KBUILD_CFLAGS	+= $(CFI_CFLAGS)
 
 DISABLE_CFI	:= $(DISABLE_CFI_CLANG)
-export DISABLE_CFI
+DISABLE_LTO	+= $(DISABLE_CFI)
+export CFI_CFLAGS DISABLE_CFI
 endif
 
 # arch Makefile may override CC so keep this after arch Makefile is included
@@ -1640,7 +1667,8 @@
 		-o -name modules.builtin -o -name '.tmp_*.o.*' \
 		-o -name '*.c.[012]*.*' \
 		-o -name '*.ll' \
-		-o -name '*.gcno' \) -type f -print | xargs rm -f
+		-o -name '*.gcno' \
+		-o -name '*.*.symversions' \) -type f -print | xargs rm -f
 
 # Generate tags for editors
 # ---------------------------------------------------------------------------
diff --git a/arch/Kconfig b/arch/Kconfig
index dc96df7..53b62c6 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -474,6 +474,45 @@
 	  about 20% of all kernel functions, which increases the kernel code
 	  size by about 2%.
 
+config LTO
+	def_bool n
+
+config ARCH_SUPPORTS_LTO_CLANG
+	bool
+	help
+	  An architecture should select this option if it supports:
+	  - compiling with clang,
+	  - compiling inline assembly with clang's integrated assembler,
+	  - and linking with LLD.
+
+choice
+	prompt "Link-Time Optimization (LTO) (EXPERIMENTAL)"
+	default LTO_NONE
+	help
+	  This option turns on Link-Time Optimization (LTO).
+
+config LTO_NONE
+	bool "None"
+
+config LTO_CLANG
+	bool "Use clang Link Time Optimization (LTO) (EXPERIMENTAL)"
+	depends on ARCH_SUPPORTS_LTO_CLANG
+	depends on !FTRACE_MCOUNT_RECORD || HAVE_C_RECORDMCOUNT
+	depends on !KASAN
+	depends on CC_IS_CLANG && LD_IS_LLD
+	select LTO
+	help
+          This option enables clang's Link Time Optimization (LTO), which allows
+          the compiler to optimize the kernel globally at link time. If you
+          enable this option, the compiler generates LLVM IR instead of object
+          files, and the actual compilation from IR occurs at the LTO link step,
+          which may take several minutes.
+
+          If you select this option, you must compile the kernel with clang and
+	  LLD.
+
+endchoice
+
 config CFI
 	bool
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 8ef577f..f201cf3 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -612,6 +612,7 @@
 	select HAVE_IDE
 	select PM_GENERIC_DOMAINS if PM
 	select PM_GENERIC_DOMAINS_OF if PM && OF
+	select REGMAP_MMIO
 	select RESET_CONTROLLER
 	select USE_OF
 	select ZONE_DMA
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 517e0e1..e205bbb 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -1395,7 +1395,21 @@
 
 		@ Preserve return value of efi_entry() in r4
 		mov	r4, r0
-		bl	cache_clean_flush
+
+		@ our cache maintenance code relies on CP15 barrier instructions
+		@ but since we arrived here with the MMU and caches configured
+		@ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
+		@ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
+		@ the enable path will be executed on v7+ only.
+		mrc	p15, 0, r1, c1, c0, 0	@ read SCTLR
+		tst	r1, #(1 << 5)		@ CP15BEN bit set?
+		bne	0f
+		orr	r1, r1, #(1 << 5)	@ CP15 barrier instructions
+		mcr	p15, 0, r1, c1, c0, 0	@ write SCTLR
+ ARM(		.inst	0xf57ff06f		@ v7+ isb	)
+ THUMB(		isb						)
+
+0:		bl	cache_clean_flush
 		bl	cache_off
 
 		@ Set parameters for booting zImage according to boot protocol
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
index 5641d16..28e7513 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
@@ -93,7 +93,7 @@
 };
 
 &hdmi {
-	hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+	hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
 };
 
 &pwm {
diff --git a/arch/arm/boot/dts/exynos5260.dtsi b/arch/arm/boot/dts/exynos5260.dtsi
index 5516785..33a085f 100644
--- a/arch/arm/boot/dts/exynos5260.dtsi
+++ b/arch/arm/boot/dts/exynos5260.dtsi
@@ -223,7 +223,7 @@
 			wakeup-interrupt-controller {
 				compatible = "samsung,exynos4210-wakeup-eint";
 				interrupt-parent = <&gic>;
-				interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
 			};
 		};
 
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
index e84544b..b90cea8 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
@@ -22,7 +22,7 @@
 			"Headphone Jack", "HPL",
 			"Headphone Jack", "HPR",
 			"Headphone Jack", "MICBIAS",
-			"IN1", "Headphone Jack",
+			"IN12", "Headphone Jack",
 			"Speakers", "SPKL",
 			"Speakers", "SPKR";
 
diff --git a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
index acc3b11f..cde3025 100644
--- a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
@@ -298,7 +298,7 @@
 	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
 	vmcc-supply = <&reg_sd3_vmmc>;
 	cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
-	bus-witdh = <4>;
+	bus-width = <4>;
 	no-1-8-v;
 	status = "okay";
 };
@@ -309,7 +309,7 @@
 	pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
 	pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
 	vmcc-supply = <&reg_sd4_vmmc>;
-	bus-witdh = <8>;
+	bus-width = <8>;
 	no-1-8-v;
 	non-removable;
 	status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index ed1aafd..fe4e89d 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -89,6 +89,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
 	phy-mode = "rgmii";
+	phy-reset-duration = <10>; /* in msecs */
 	phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
 	phy-supply = <&vdd_eth_io_reg>;
 	status = "disabled";
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index e6a36a7..c706adf 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -1261,27 +1261,27 @@
 	gpu_opp_table: gpu-opp-table {
 		compatible = "operating-points-v2";
 
-		opp@100000000 {
+		opp-100000000 {
 			opp-hz = /bits/ 64 <100000000>;
 			opp-microvolt = <950000>;
 		};
-		opp@200000000 {
+		opp-200000000 {
 			opp-hz = /bits/ 64 <200000000>;
 			opp-microvolt = <950000>;
 		};
-		opp@300000000 {
+		opp-300000000 {
 			opp-hz = /bits/ 64 <300000000>;
 			opp-microvolt = <1000000>;
 		};
-		opp@400000000 {
+		opp-400000000 {
 			opp-hz = /bits/ 64 <400000000>;
 			opp-microvolt = <1100000>;
 		};
-		opp@500000000 {
+		opp-500000000 {
 			opp-hz = /bits/ 64 <500000000>;
 			opp-microvolt = <1200000>;
 		};
-		opp@600000000 {
+		opp-600000000 {
 			opp-hz = /bits/ 64 <600000000>;
 			opp-microvolt = <1250000>;
 		};
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index 07e3194..617c2c9 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -278,6 +278,8 @@ static int __xts_crypt(struct skcipher_request *req,
 	int err;
 
 	err = skcipher_walk_virt(&walk, req, true);
+	if (err)
+		return err;
 
 	crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
 
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 07e27f2..d2453e2d 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -68,6 +68,8 @@
 #define BPIALL				__ACCESS_CP15(c7, 0, c5, 6)
 #define ICIALLU				__ACCESS_CP15(c7, 0, c5, 0)
 
+#define CNTVCT				__ACCESS_CP15_64(1, c14)
+
 extern unsigned long cr_alignment;	/* defined in entry-armv.S */
 
 static inline unsigned long get_cr(void)
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 265ea9c..523c499 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -317,6 +317,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
 	return ret;
 }
 
+static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
+				       const void *data, unsigned long len)
+{
+	int srcu_idx = srcu_read_lock(&kvm->srcu);
+	int ret = kvm_write_guest(kvm, gpa, data, len);
+
+	srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+	return ret;
+}
+
 static inline void *kvm_get_hyp_vector(void)
 {
 	switch(read_cpuid_part()) {
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index ec29de2..cab8947 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -133,9 +133,9 @@
  */
 	.text
 __after_proc_init:
-#ifdef CONFIG_ARM_MPU
 M_CLASS(movw	r12, #:lower16:BASEADDR_V7M_SCB)
 M_CLASS(movt	r12, #:upper16:BASEADDR_V7M_SCB)
+#ifdef CONFIG_ARM_MPU
 M_CLASS(ldr	r3, [r12, 0x50])
 AR_CLASS(mrc	p15, 0, r3, c0, c1, 4)          @ Read ID_MMFR0
 	and	r3, r3, #(MMFR0_PMSA)           @ PMSA field
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 32fae4d..0921e2c 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -594,13 +594,13 @@ static int __init at91_pm_backup_init(void)
 
 	np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
 	if (!np)
-		goto securam_fail;
+		goto securam_fail_no_ref_dev;
 
 	pdev = of_find_device_by_node(np);
 	of_node_put(np);
 	if (!pdev) {
 		pr_warn("%s: failed to find securam device!\n", __func__);
-		goto securam_fail;
+		goto securam_fail_no_ref_dev;
 	}
 
 	sram_pool = gen_pool_get(&pdev->dev, NULL);
@@ -623,6 +623,8 @@ static int __init at91_pm_backup_init(void)
 	return 0;
 
 securam_fail:
+	put_device(&pdev->dev);
+securam_fail_no_ref_dev:
 	iounmap(pm_data.sfrbu);
 	pm_data.sfrbu = NULL;
 	return ret;
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index be1f20f..fbe1db6 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -196,6 +196,7 @@ void __init exynos_firmware_init(void)
 		return;
 
 	addr = of_get_address(nd, 0, NULL, NULL);
+	of_node_put(nd);
 	if (!addr) {
 		pr_err("%s: No address specified.\n", __func__);
 		return;
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 7ead3ac..b1fe53e 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -639,8 +639,10 @@ void __init exynos_pm_init(void)
 
 	if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
 		pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
+		of_node_put(np);
 		return;
 	}
+	of_node_put(np);
 
 	pm_data = (const struct exynos_pm_data *) match->data;
 
diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c
index c7169c2..08c7892 100644
--- a/arch/arm/mach-imx/mach-imx51.c
+++ b/arch/arm/mach-imx/mach-imx51.c
@@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void)
 		return;
 
 	m4if_base = of_iomap(np, 0);
+	of_node_put(np);
 	if (!m4if_base) {
 		pr_err("Unable to map M4IF registers\n");
 		return;
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index 53c316f..fe4932f 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
 	}
 };
 
-static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
+static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
 static struct iop_adma_platform_data iop13xx_adma_0_data = {
 	.hw_id = 0,
 	.pool_size = PAGE_SIZE,
@@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
 	.resource = iop13xx_adma_0_resources,
 	.dev = {
 		.dma_mask = &iop13xx_adma_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = (void *) &iop13xx_adma_0_data,
 	},
 };
@@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
 	.resource = iop13xx_adma_1_resources,
 	.dev = {
 		.dma_mask = &iop13xx_adma_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = (void *) &iop13xx_adma_1_data,
 	},
 };
@@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
 	.resource = iop13xx_adma_2_resources,
 	.dev = {
 		.dma_mask = &iop13xx_adma_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = (void *) &iop13xx_adma_2_data,
 	},
 };
diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c
index db511ec..116feb6 100644
--- a/arch/arm/mach-iop13xx/tpmi.c
+++ b/arch/arm/mach-iop13xx/tpmi.c
@@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
 	}
 };
 
-u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
+u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
 static struct platform_device iop13xx_tpmi_0_device = {
 	.name = "iop-tpmi",
 	.id = 0,
@@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
 	.resource = iop13xx_tpmi_0_resources,
 	.dev = {
 		.dma_mask          = &iop13xx_tpmi_mask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
@@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
 	.resource = iop13xx_tpmi_1_resources,
 	.dev = {
 		.dma_mask          = &iop13xx_tpmi_mask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
@@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
 	.resource = iop13xx_tpmi_2_resources,
 	.dev = {
 		.dma_mask          = &iop13xx_tpmi_mask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
@@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
 	.resource = iop13xx_tpmi_3_resources,
 	.dev = {
 		.dma_mask          = &iop13xx_tpmi_mask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c
index a4d1f8d..d961222 100644
--- a/arch/arm/plat-iop/adma.c
+++ b/arch/arm/plat-iop/adma.c
@@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
 	.resource = iop3xx_dma_0_resources,
 	.dev = {
 		.dma_mask = &iop3xx_adma_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = (void *) &iop3xx_dma_0_data,
 	},
 };
@@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
 	.resource = iop3xx_dma_1_resources,
 	.dev = {
 		.dma_mask = &iop3xx_adma_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = (void *) &iop3xx_dma_1_data,
 	},
 };
@@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
 	.resource = iop3xx_aau_resources,
 	.dev = {
 		.dma_mask = &iop3xx_adma_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = (void *) &iop3xx_aau_data,
 	},
 };
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index a2399fd..1e97087 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
 	.resource	= orion_xor0_shared_resources,
 	.dev            = {
 		.dma_mask               = &orion_xor_dmamask,
-		.coherent_dma_mask      = DMA_BIT_MASK(64),
+		.coherent_dma_mask      = DMA_BIT_MASK(32),
 		.platform_data          = &orion_xor0_pdata,
 	},
 };
@@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
 	.resource	= orion_xor1_shared_resources,
 	.dev            = {
 		.dma_mask               = &orion_xor_dmamask,
-		.coherent_dma_mask      = DMA_BIT_MASK(64),
+		.coherent_dma_mask      = DMA_BIT_MASK(32),
 		.platform_data          = &orion_xor1_pdata,
 	},
 };
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
index a9dd619..7bdbf5d5 100644
--- a/arch/arm/vdso/vgettimeofday.c
+++ b/arch/arm/vdso/vgettimeofday.c
@@ -18,9 +18,9 @@
 #include <linux/compiler.h>
 #include <linux/hrtimer.h>
 #include <linux/time.h>
-#include <asm/arch_timer.h>
 #include <asm/barrier.h>
 #include <asm/bug.h>
+#include <asm/cp15.h>
 #include <asm/page.h>
 #include <asm/unistd.h>
 #include <asm/vdso_datapage.h>
@@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata)
 	u64 cycle_now;
 	u64 nsec;
 
-	cycle_now = arch_counter_get_cntvct();
+	isb();
+	cycle_now = read_sysreg(CNTVCT);
 
 	cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
 
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6599e3e..dbee061 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -57,6 +57,7 @@
 	select ARCH_USE_QUEUED_RWLOCKS
 	select ARCH_USE_QUEUED_SPINLOCKS
 	select ARCH_SUPPORTS_MEMORY_FAILURE
+	select ARCH_SUPPORTS_LTO_CLANG
 	select ARCH_SUPPORTS_ATOMIC_RMW
 	select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
 	select ARCH_SUPPORTS_NUMA_BALANCING
@@ -108,7 +109,7 @@
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_MMAP_RND_BITS
 	select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
-	select HAVE_ARCH_PREL32_RELOCATIONS
+	select HAVE_ARCH_PREL32_RELOCATIONS if !LTO_CLANG
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_STACKLEAK
 	select HAVE_ARCH_THREAD_STRUCT_WHITELIST
@@ -506,6 +507,24 @@
 
 	  If unsure, say Y.
 
+config ARM64_ERRATUM_1463225
+	bool "Cortex-A76: Software Step might prevent interrupt recognition"
+	default y
+	help
+	  This option adds a workaround for Arm Cortex-A76 erratum 1463225.
+
+	  On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping
+	  of a system call instruction (SVC) can prevent recognition of
+	  subsequent interrupts when software stepping is disabled in the
+	  exception handler of the system call and either kernel debugging
+	  is enabled or VHE is in use.
+
+	  Work around the erratum by triggering a dummy step exception
+	  when handling a system call from a task that is being stepped
+	  in a VHE configuration of the kernel.
+
+	  If unsure, say Y.
+
 config CAVIUM_ERRATUM_22375
 	bool "Cavium erratum 22375, 24313"
 	default y
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 932be86..8e65950 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -166,6 +166,14 @@
 	  This enables support for the LITO chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
+config ARCH_BENGAL
+	bool "Enable Support for Qualcomm Technologies, Inc. BENGAL"
+	depends on ARCH_QCOM
+	select COMMON_CLK_QCOM
+	help
+	  This enables support for the BENGAL chipset. If you do not
+	  wish to build a kernel that runs on this chipset, say 'N' here.
+
 config ARCH_REALTEK
 	bool "Realtek Platforms"
 	help
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
index 246c317..91061d9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
@@ -94,8 +94,8 @@
 	snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
 	snps,reset-active-low;
 	snps,reset-delays-us = <0 10000 50000>;
-	tx_delay = <0x25>;
-	rx_delay = <0x11>;
+	tx_delay = <0x24>;
+	rx_delay = <0x18>;
 	status = "okay";
 };
 
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index c88e603..df7e62d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -305,6 +305,7 @@
 		phys = <&emmc_phy>;
 		phy-names = "phy_arasan";
 		power-domains = <&power RK3399_PD_EMMC>;
+		disable-cqe-dcmd;
 		status = "disabled";
 	};
 
diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig
index 1e4155c..ced9e4f 100644
--- a/arch/arm64/configs/cuttlefish_defconfig
+++ b/arch/arm64/configs/cuttlefish_defconfig
@@ -72,6 +72,8 @@
 # CONFIG_ARM_SCPI_POWER_DOMAIN is not set
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
+CONFIG_LTO_CLANG=y
+CONFIG_CFI_CLANG=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
@@ -85,6 +87,7 @@
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
 CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
@@ -196,7 +199,6 @@
 # CONFIG_MAC80211_RC_MINSTREL is not set
 CONFIG_RFKILL=y
 # CONFIG_UEVENT_HELPER is not set
-CONFIG_DEVTMPFS=y
 # CONFIG_ALLOW_DEV_COREDUMP is not set
 CONFIG_DEBUG_DEVRES=y
 CONFIG_OF_UNITTEST=y
@@ -396,6 +398,7 @@
 CONFIG_ASHMEM=y
 CONFIG_ANDROID_VSOC=y
 CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
 CONFIG_COMMON_CLK_SCPI=y
 # CONFIG_COMMON_CLK_XGENE is not set
 CONFIG_MAILBOX=y
diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig
new file mode 100644
index 0000000..28a67bc
--- /dev/null
+++ b/arch/arm64/configs/vendor/bengal_defconfig
@@ -0,0 +1,553 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_BENGAL=y
+CONFIG_PCI=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_SECCOMP=y
+# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+CONFIG_PRINT_VMEMLAYOUT=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_BT=y
+CONFIG_CFG80211=y
+CONFIG_RFKILL=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RMNET=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_TTY_PRINTK=y
+CONFIG_HW_RANDOM=y
+CONFIG_DIAG_CHAR=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SPI=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_BENGAL=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_ADC_TM=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_V4L_TEST_DRIVERS=y
+CONFIG_VIDEO_VIM2M=y
+CONFIG_VIDEO_VICODEC=y
+CONFIG_DRM=y
+# CONFIG_DRM_MSM is not set
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_QCOM_EMU_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=900
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_QCOM_GPI_DMA_DEBUG=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA3=y
+CONFIG_IPA_WDI_UNIFIED_API=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QPNP_PBS=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_MINIDUMP=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QTEE_SHM_BRIDGE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_IIO=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_PWM=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SLIMBUS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_XZ_DEC=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_LOCK_TORTURE_TEST=m
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_LKDTM=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_MEMTEST=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_TGU=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index 1d02cb2..c952077 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -54,9 +54,11 @@
 CONFIG_NR_CPUS=8
 CONFIG_HZ_100=y
 CONFIG_SECCOMP=y
+CONFIG_OKL4_GUEST=y
 # CONFIG_UNMAP_KERNEL_AT_EL0 is not set
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
 CONFIG_SETEND_EMULATION=y
 # CONFIG_ARM64_VHE is not set
 CONFIG_RANDOMIZE_BASE=y
@@ -73,6 +75,7 @@
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
@@ -106,6 +109,7 @@
 CONFIG_CMA=y
 CONFIG_CMA_AREAS=16
 CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
 CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -273,6 +277,7 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
+CONFIG_OKL4_USER_VIRQ=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -311,6 +316,7 @@
 CONFIG_CLD_LL_CORE=y
 CONFIG_CNSS2=y
 CONFIG_CNSS2_QMI=y
+CONFIG_CNSS_ASYNC=y
 CONFIG_BUS_AUTO_SUSPEND=y
 CONFIG_CNSS_QCA6390=y
 CONFIG_CNSS_GENL=y
@@ -342,6 +348,8 @@
 # CONFIG_DEVPORT is not set
 CONFIG_DIAG_CHAR=y
 CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_OKL4_PIPE=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QCOM_GENI=y
 CONFIG_I3C=y
@@ -362,6 +370,7 @@
 CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_QPNP_QNOVO5=y
 CONFIG_QPNP_FG_GEN4=y
+CONFIG_HL6111R=y
 CONFIG_THERMAL=y
 CONFIG_THERMAL_WRITABLE_TRIPS=y
 CONFIG_THERMAL_GOV_USER_SPACE=y
@@ -469,6 +478,7 @@
 CONFIG_QPNP_USB_PDPHY=y
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
 CONFIG_MMC_TEST=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
@@ -489,6 +499,7 @@
 CONFIG_STAGING=y
 CONFIG_ASHMEM=y
 CONFIG_ION=y
+CONFIG_ION_POOL_AUTO_REFILL=y
 CONFIG_QPNP_REVID=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
@@ -568,6 +579,8 @@
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_BUS_CONFIG_RPMH=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
 CONFIG_QSEE_IPC_IRQ_BRIDGE=y
 CONFIG_QCOM_GLINK=y
 CONFIG_QCOM_GLINK_PKT=y
@@ -578,10 +591,12 @@
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
 CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QTI_DDR_STATS_LOG=y
 CONFIG_QTEE_SHM_BRIDGE=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_QCOM_CDSP_RM=y
+CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
@@ -592,6 +607,7 @@
 CONFIG_ARM_QCOM_DEVFREQ_FW=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_ARM_QCOM_DEVFREQ_QOSLAT=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_QCOM_SPMI_ADC5=y
@@ -613,12 +629,9 @@
 CONFIG_QCOM_KGSL=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
-CONFIG_EXT4_FS_ENCRYPTION=y
-CONFIG_EXT4_FS_ICE_ENCRYPTION=y
 CONFIG_F2FS_FS=y
 CONFIG_F2FS_FS_SECURITY=y
-CONFIG_F2FS_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -640,6 +653,7 @@
 CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_CCM=y
 CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 1f8ad89..123a55d 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -54,6 +54,7 @@
 CONFIG_NR_CPUS=8
 CONFIG_HZ_100=y
 CONFIG_SECCOMP=y
+CONFIG_OKL4_GUEST=y
 # CONFIG_UNMAP_KERNEL_AT_EL0 is not set
 CONFIG_PRINT_VMEMLAYOUT=y
 CONFIG_ARMV8_DEPRECATED=y
@@ -75,6 +76,7 @@
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
@@ -113,6 +115,7 @@
 CONFIG_CMA_DEBUGFS=y
 CONFIG_CMA_AREAS=16
 CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
 CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -283,6 +286,7 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
+CONFIG_OKL4_USER_VIRQ=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -320,6 +324,7 @@
 CONFIG_CNSS2=y
 CONFIG_CNSS2_DEBUG=y
 CONFIG_CNSS2_QMI=y
+CONFIG_CNSS_ASYNC=y
 CONFIG_BUS_AUTO_SUSPEND=y
 CONFIG_CNSS_QCA6390=y
 CONFIG_CNSS_GENL=y
@@ -354,6 +359,8 @@
 # CONFIG_DEVPORT is not set
 CONFIG_DIAG_CHAR=y
 CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_OKL4_PIPE=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QCOM_GENI=y
 CONFIG_I3C=y
@@ -374,6 +381,7 @@
 CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_QPNP_QNOVO5=y
 CONFIG_QPNP_FG_GEN4=y
+CONFIG_HL6111R=y
 CONFIG_THERMAL=y
 CONFIG_THERMAL_WRITABLE_TRIPS=y
 CONFIG_THERMAL_GOV_USER_SPACE=y
@@ -482,7 +490,9 @@
 CONFIG_QPNP_USB_PDPHY=y
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
 CONFIG_MMC_TEST=y
+CONFIG_MMC_IPC_LOGGING=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
@@ -503,10 +513,12 @@
 CONFIG_DMADEVICES=y
 CONFIG_QCOM_GPI_DMA=y
 CONFIG_QCOM_GPI_DMA_DEBUG=y
+CONFIG_DEBUG_DMA_BUF_REF=y
 CONFIG_UIO=y
 CONFIG_STAGING=y
 CONFIG_ASHMEM=y
 CONFIG_ION=y
+CONFIG_ION_POOL_AUTO_REFILL=y
 CONFIG_QPNP_REVID=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
@@ -538,6 +550,7 @@
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_IOMMU_DEBUGFS=y
 CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_TLBSYNC_DEBUG=y
 CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_DEBUG_TRACKING=y
@@ -599,10 +612,12 @@
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
 CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QTI_DDR_STATS_LOG=y
 CONFIG_QTEE_SHM_BRIDGE=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_QCOM_CDSP_RM=y
+CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
@@ -613,6 +628,7 @@
 CONFIG_ARM_QCOM_DEVFREQ_FW=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_ARM_QCOM_DEVFREQ_QOSLAT=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_QCOM_SPMI_ADC5=y
@@ -637,12 +653,9 @@
 CONFIG_QCOM_KGSL=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
-CONFIG_EXT4_FS_ENCRYPTION=y
-CONFIG_EXT4_FS_ICE_ENCRYPTION=y
 CONFIG_F2FS_FS=y
 CONFIG_F2FS_FS_SECURITY=y
-CONFIG_F2FS_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -667,6 +680,7 @@
 CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_CCM=y
 CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
@@ -679,6 +693,7 @@
 CONFIG_XZ_DEC=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS=y
 CONFIG_DEBUG_INFO=y
 CONFIG_PAGE_OWNER=y
 CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index c2cb03f..d9fc53c 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -44,6 +44,7 @@
 CONFIG_SLAB_FREELIST_HARDENED=y
 CONFIG_PROFILING=y
 # CONFIG_ZONE_DMA32 is not set
+CONFIG_HOTPLUG_SIZE_BITS=29
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_LITO=y
 CONFIG_PCI=y
@@ -60,7 +61,6 @@
 # CONFIG_ARM64_VHE is not set
 CONFIG_RANDOMIZE_BASE=y
 # CONFIG_EFI is not set
-CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
 CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y
 CONFIG_COMPAT=y
 CONFIG_PM_AUTOSLEEP=y
@@ -98,10 +98,17 @@
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
+CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_CMA=y
 CONFIG_ZSMALLOC=y
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -248,10 +255,14 @@
 CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
 CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM_WCN3990=y
 CONFIG_CFG80211=y
 CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
 CONFIG_FW_LOADER_USER_HELPER=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_WCD_IRQ=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=y
@@ -325,7 +336,9 @@
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 # CONFIG_DEVPORT is not set
 CONFIG_DIAG_CHAR=y
+CONFIG_MSM_FASTCVPD=y
 CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QCOM_GENI=y
 CONFIG_SPI=y
@@ -357,6 +370,7 @@
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
 CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
 CONFIG_QTI_AOP_REG_COOLING_DEVICE=y
 CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
 CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE=y
@@ -381,6 +395,7 @@
 CONFIG_VIDEO_FIXED_MINOR_RANGES=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_SPECTRA_CAMERA=y
+CONFIG_MSM_NPU=y
 CONFIG_V4L_TEST_DRIVERS=y
 CONFIG_VIDEO_VIM2M=m
 CONFIG_VIDEO_VICODEC=y
@@ -397,6 +412,7 @@
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
@@ -448,9 +464,11 @@
 CONFIG_QPNP_USB_PDPHY=y
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
 CONFIG_MMC_TEST=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
@@ -505,11 +523,14 @@
 CONFIG_RPMSG_QCOM_GLINK_SMEM=y
 CONFIG_RPMSG_QCOM_GLINK_SPSS=y
 CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_MEM_OFFLINE=y
+CONFIG_OVERRIDE_MEMORY_LIMIT=y
 CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_IPCC=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_LITO_LLCC=y
+CONFIG_QCOM_LLCC_PERFMON=m
 CONFIG_QCOM_MDT_LOADER=y
 CONFIG_QCOM_QMI_HELPERS=y
 CONFIG_QCOM_QMI_RMNET=y
@@ -535,6 +556,7 @@
 CONFIG_QCOM_DCC_V2=y
 CONFIG_QCOM_EUD=y
 CONFIG_QCOM_MINIDUMP=y
+CONFIG_QCOM_FSA4480_I2C=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BUS_SCALING=y
@@ -547,9 +569,13 @@
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
 CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QTI_DDR_STATS_LOG=y
 CONFIG_QTEE_SHM_BRIDGE=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_MSM_PERFORMANCE=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_CDSP_RM=y
+CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_QMI=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
@@ -557,6 +583,7 @@
 CONFIG_ARM_MEMLAT_MON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_GOV_CDSPL3=y
 CONFIG_ARM_QCOM_DEVFREQ_FW=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
@@ -573,14 +600,13 @@
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_QCOM_QFPROM=y
 CONFIG_NVMEM_SPMI_SDAM=y
-CONFIG_SLIMBUS=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_QCOM_KGSL=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_F2FS_FS=y
 CONFIG_F2FS_FS_SECURITY=y
-CONFIG_F2FS_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index e770dc3..ab8b52d 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -9,6 +9,7 @@
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_PSI=y
+CONFIG_PSI_FTRACE=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_RCU_NOCB_CPU=y
@@ -18,6 +19,7 @@
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
+CONFIG_DEBUG_BLK_CGROUP=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CPUSETS=y
@@ -35,7 +37,6 @@
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
 # CONFIG_FHANDLE is not set
-CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_EMBEDDED=y
 # CONFIG_COMPAT_BRK is not set
@@ -43,6 +44,7 @@
 CONFIG_SLAB_FREELIST_HARDENED=y
 CONFIG_PROFILING=y
 # CONFIG_ZONE_DMA32 is not set
+CONFIG_HOTPLUG_SIZE_BITS=29
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_LITO=y
 CONFIG_PCI=y
@@ -59,7 +61,6 @@
 CONFIG_SETEND_EMULATION=y
 # CONFIG_ARM64_VHE is not set
 CONFIG_RANDOMIZE_BASE=y
-CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
 CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y
 CONFIG_COMPAT=y
 CONFIG_PM_AUTOSLEEP=y
@@ -99,12 +100,19 @@
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
+CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_CLEANCACHE=y
 CONFIG_CMA=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -252,10 +260,14 @@
 CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
 CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM_WCN3990=y
 CONFIG_CFG80211=y
 CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
 CONFIG_FW_LOADER_USER_HELPER=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_WCD_IRQ=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=y
@@ -331,7 +343,9 @@
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 # CONFIG_DEVPORT is not set
 CONFIG_DIAG_CHAR=y
+CONFIG_MSM_FASTCVPD=y
 CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QCOM_GENI=y
 CONFIG_SPI=y
@@ -363,6 +377,7 @@
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
 CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
 CONFIG_QTI_AOP_REG_COOLING_DEVICE=y
 CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
 CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE=y
@@ -387,6 +402,7 @@
 CONFIG_VIDEO_FIXED_MINOR_RANGES=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_SPECTRA_CAMERA=y
+CONFIG_MSM_NPU=y
 CONFIG_V4L_TEST_DRIVERS=y
 CONFIG_VIDEO_VIM2M=y
 CONFIG_VIDEO_VICODEC=y
@@ -403,6 +419,7 @@
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
@@ -454,9 +471,12 @@
 CONFIG_QPNP_USB_PDPHY=y
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
 CONFIG_MMC_TEST=y
+CONFIG_MMC_IPC_LOGGING=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
@@ -508,6 +528,7 @@
 CONFIG_MSM_QMP=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_TLBSYNC_DEBUG=y
 CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_DEBUG_TRACKING=y
@@ -516,11 +537,14 @@
 CONFIG_RPMSG_QCOM_GLINK_SMEM=y
 CONFIG_RPMSG_QCOM_GLINK_SPSS=y
 CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_MEM_OFFLINE=y
+CONFIG_OVERRIDE_MEMORY_LIMIT=y
 CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_IPCC=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_LITO_LLCC=y
+CONFIG_QCOM_LLCC_PERFMON=m
 CONFIG_QCOM_MDT_LOADER=y
 CONFIG_QCOM_QMI_HELPERS=y
 CONFIG_QCOM_QMI_RMNET=y
@@ -548,6 +572,7 @@
 CONFIG_QCOM_MINIDUMP=y
 CONFIG_MSM_CORE_HANG_DETECT=y
 CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_QCOM_FSA4480_I2C=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BUS_SCALING=y
@@ -560,9 +585,13 @@
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
 CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QTI_DDR_STATS_LOG=y
 CONFIG_QTEE_SHM_BRIDGE=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_MSM_PERFORMANCE=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_CDSP_RM=y
+CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_DEBUG=y
 CONFIG_ICNSS_QMI=y
@@ -571,6 +600,7 @@
 CONFIG_ARM_MEMLAT_MON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_GOV_CDSPL3=y
 CONFIG_ARM_QCOM_DEVFREQ_FW=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
@@ -587,14 +617,13 @@
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_QCOM_QFPROM=y
 CONFIG_NVMEM_SPMI_SDAM=y
-CONFIG_SLIMBUS=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_QCOM_KGSL=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_F2FS_FS=y
 CONFIG_F2FS_FS_SECURITY=y
-CONFIG_F2FS_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -631,12 +660,16 @@
 CONFIG_XZ_DEC=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS=y
 CONFIG_DEBUG_INFO=y
 CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PANIC_ON_OOM=y
 CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
 CONFIG_PAGE_POISONING=y
 CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
@@ -650,10 +683,10 @@
 CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_TIMEOUT=-1
 CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
 CONFIG_SCHEDSTATS=y
 CONFIG_SCHED_STACK_END_CHECK=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_MUTEXES=y
+CONFIG_PROVE_LOCKING=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_LOCK_TORTURE_TEST=m
 CONFIG_DEBUG_SG=y
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index e7a95a5..5cc2489 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_request *req,
 	int err;
 
 	err = skcipher_walk_virt(&walk, req, false);
+	if (err)
+		return err;
 
 	kernel_neon_begin();
 	neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 067d893..bee3027 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -60,6 +60,22 @@ asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
 				      struct ghash_key const *k,
 				      const char *head);
 
+#ifdef CONFIG_CFI_CLANG
+static inline void __cfi_pmull_ghash_update_p64(int blocks, u64 dg[],
+                const char *src, struct ghash_key const *k, const char *head)
+{
+        return pmull_ghash_update_p64(blocks, dg, src, k, head);
+}
+#define pmull_ghash_update_p64 __cfi_pmull_ghash_update_p64
+
+static inline void __cfi_pmull_ghash_update_p8(int blocks, u64 dg[],
+                const char *src, struct ghash_key const *k, const char *head)
+{
+        return pmull_ghash_update_p8(blocks, dg, src, k, head);
+}
+#define pmull_ghash_update_p8 __cfi_pmull_ghash_update_p8
+#endif
+
 static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src,
 				  struct ghash_key const *k,
 				  const char *head);
@@ -418,9 +434,11 @@ static int gcm_encrypt(struct aead_request *req)
 		put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
 		while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
-			int blocks = walk.nbytes / AES_BLOCK_SIZE;
+			const int blocks =
+				walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
 			u8 *dst = walk.dst.virt.addr;
 			u8 *src = walk.src.virt.addr;
+			int remaining = blocks;
 
 			do {
 				__aes_arm64_encrypt(ctx->aes_key.key_enc,
@@ -430,9 +448,9 @@ static int gcm_encrypt(struct aead_request *req)
 
 				dst += AES_BLOCK_SIZE;
 				src += AES_BLOCK_SIZE;
-			} while (--blocks > 0);
+			} while (--remaining > 0);
 
-			ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
+			ghash_do_update(blocks, dg,
 					walk.dst.virt.addr, &ctx->ghash_key,
 					NULL);
 
@@ -553,7 +571,7 @@ static int gcm_decrypt(struct aead_request *req)
 		put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
 		while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
-			int blocks = walk.nbytes / AES_BLOCK_SIZE;
+			int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
 			u8 *dst = walk.dst.virt.addr;
 			u8 *src = walk.src.virt.addr;
 
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index f2a234d..93e0751 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -148,18 +148,47 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
 	isb();
 }
 
+/*
+ * Ensure that reads of the counter are treated the same as memory reads
+ * for the purposes of ordering by subsequent memory barriers.
+ *
+ * This insanity brought to you by speculative system register reads,
+ * out-of-order memory accesses, sequence locks and Thomas Gleixner.
+ *
+ * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
+ */
+#define arch_counter_enforce_ordering(val) do {				\
+	u64 tmp, _val = (val);						\
+									\
+	asm volatile(							\
+	"	eor	%0, %1, %1\n"					\
+	"	add	%0, sp, %0\n"					\
+	"	ldr	xzr, [%0]"					\
+	: "=r" (tmp) : "r" (_val));					\
+} while (0)
+
 static inline u64 arch_counter_get_cntpct(void)
 {
+	u64 cnt;
+
 	isb();
-	return arch_timer_reg_read_stable(cntpct_el0);
+	cnt = arch_timer_reg_read_stable(cntpct_el0);
+	arch_counter_enforce_ordering(cnt);
+	return cnt;
 }
 
 static inline u64 arch_counter_get_cntvct(void)
 {
+	u64 cnt;
+
 	isb();
-	return arch_timer_reg_read_stable(cntvct_el0);
+	cnt = arch_timer_reg_read_stable(cntvct_el0);
+	arch_counter_enforce_ordering(cnt);
+	return cnt;
 }
 
+#undef arch_counter_enforce_ordering
+
 static inline int arch_timer_arch_init(void)
 {
 	return 0;
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index f37920b4..ba45a37 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -51,6 +51,7 @@
 #define ARM64_SSBD				30
 #define ARM64_MISMATCHED_CACHE_TYPE		31
 #define ARM64_HAS_STAGE2_FWB			32
+#define ARM64_WORKAROUND_1463225		33
 #define ARM64_SSBS				34
 #define ARM64_WORKAROUND_1188873		35
 
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index b447b4d..c7e30a6 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -23,26 +23,34 @@
 
 #include <asm/errno.h>
 
+#define FUTEX_MAX_LOOPS	128 /* What's the largest number you can think of? */
+
 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)		\
 do {									\
+	unsigned int loops = FUTEX_MAX_LOOPS;				\
+									\
 	uaccess_enable();						\
 	asm volatile(							\
 "	prfm	pstl1strm, %2\n"					\
 "1:	ldxr	%w1, %2\n"						\
 	insn "\n"							\
 "2:	stlxr	%w0, %w3, %2\n"						\
-"	cbnz	%w0, 1b\n"						\
-"	dmb	ish\n"							\
+"	cbz	%w0, 3f\n"						\
+"	sub	%w4, %w4, %w0\n"					\
+"	cbnz	%w4, 1b\n"						\
+"	mov	%w0, %w7\n"						\
 "3:\n"									\
+"	dmb	ish\n"							\
 "	.pushsection .fixup,\"ax\"\n"					\
 "	.align	2\n"							\
-"4:	mov	%w0, %w5\n"						\
+"4:	mov	%w0, %w6\n"						\
 "	b	3b\n"							\
 "	.popsection\n"							\
 	_ASM_EXTABLE(1b, 4b)						\
 	_ASM_EXTABLE(2b, 4b)						\
-	: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)	\
-	: "r" (oparg), "Ir" (-EFAULT)					\
+	: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp),	\
+	  "+r" (loops)							\
+	: "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN)			\
 	: "memory");							\
 	uaccess_disable();						\
 } while (0)
@@ -50,30 +58,30 @@ do {									\
 static inline int
 arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
 {
-	int oldval, ret, tmp;
+	int oldval = 0, ret, tmp;
 	u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
 
 	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
-		__futex_atomic_op("mov	%w3, %w4",
+		__futex_atomic_op("mov	%w3, %w5",
 				  ret, oldval, uaddr, tmp, oparg);
 		break;
 	case FUTEX_OP_ADD:
-		__futex_atomic_op("add	%w3, %w1, %w4",
+		__futex_atomic_op("add	%w3, %w1, %w5",
 				  ret, oldval, uaddr, tmp, oparg);
 		break;
 	case FUTEX_OP_OR:
-		__futex_atomic_op("orr	%w3, %w1, %w4",
+		__futex_atomic_op("orr	%w3, %w1, %w5",
 				  ret, oldval, uaddr, tmp, oparg);
 		break;
 	case FUTEX_OP_ANDN:
-		__futex_atomic_op("and	%w3, %w1, %w4",
+		__futex_atomic_op("and	%w3, %w1, %w5",
 				  ret, oldval, uaddr, tmp, ~oparg);
 		break;
 	case FUTEX_OP_XOR:
-		__futex_atomic_op("eor	%w3, %w1, %w4",
+		__futex_atomic_op("eor	%w3, %w1, %w5",
 				  ret, oldval, uaddr, tmp, oparg);
 		break;
 	default:
@@ -93,6 +101,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
 			      u32 oldval, u32 newval)
 {
 	int ret = 0;
+	unsigned int loops = FUTEX_MAX_LOOPS;
 	u32 val, tmp;
 	u32 __user *uaddr;
 
@@ -104,20 +113,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
 	asm volatile("// futex_atomic_cmpxchg_inatomic\n"
 "	prfm	pstl1strm, %2\n"
 "1:	ldxr	%w1, %2\n"
-"	sub	%w3, %w1, %w4\n"
-"	cbnz	%w3, 3f\n"
-"2:	stlxr	%w3, %w5, %2\n"
-"	cbnz	%w3, 1b\n"
-"	dmb	ish\n"
+"	sub	%w3, %w1, %w5\n"
+"	cbnz	%w3, 4f\n"
+"2:	stlxr	%w3, %w6, %2\n"
+"	cbz	%w3, 3f\n"
+"	sub	%w4, %w4, %w3\n"
+"	cbnz	%w4, 1b\n"
+"	mov	%w0, %w8\n"
 "3:\n"
+"	dmb	ish\n"
+"4:\n"
 "	.pushsection .fixup,\"ax\"\n"
-"4:	mov	%w0, %w6\n"
-"	b	3b\n"
+"5:	mov	%w0, %w7\n"
+"	b	4b\n"
 "	.popsection\n"
-	_ASM_EXTABLE(1b, 4b)
-	_ASM_EXTABLE(2b, 4b)
-	: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
-	: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
+	_ASM_EXTABLE(1b, 5b)
+	_ASM_EXTABLE(2b, 5b)
+	: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
+	: "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
 	: "memory");
 	uaccess_disable();
 
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 384c343..f150c0f 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -28,7 +28,7 @@
 	({								\
 		u64 reg;						\
 		asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\
-					 "mrs_s %0, " __stringify(r##vh),\
+					 __mrs_s("%0", r##vh),		\
 					 ARM64_HAS_VIRT_HOST_EXTN)	\
 			     : "=r" (reg));				\
 		reg;							\
@@ -38,7 +38,7 @@
 	do {								\
 		u64 __val = (u64)(v);					\
 		asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\
-					 "msr_s " __stringify(r##vh) ", %x0",\
+					 __msr_s(r##vh, "%x0"),		\
 					 ARM64_HAS_VIRT_HOST_EXTN)	\
 					 : : "rZ" (__val));		\
 	} while (0)
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index d6fff7d..b255844 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -394,6 +394,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
 	return ret;
 }
 
+static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
+				       const void *data, unsigned long len)
+{
+	int srcu_idx = srcu_read_lock(&kvm->srcu);
+	int ret = kvm_write_guest(kvm, gpa, data, len);
+
+	srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+	return ret;
+}
+
 #ifdef CONFIG_KVM_INDIRECT_VECTORS
 /*
  * EL2 vectors can be mapped and rerouted in a number of ways,
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 8262325..8be3b0f 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -20,7 +20,12 @@
 
 #else	/* __ASSEMBLER__ */
 
+#ifdef CONFIG_LTO_CLANG
+#define __LSE_PREAMBLE	".arch armv8-a+lse\n"
+#else
 __asm__(".arch_extension	lse");
+#define __LSE_PREAMBLE
+#endif
 
 /* Move the ll/sc atomics out-of-line */
 #define __LL_SC_INLINE		notrace
@@ -33,7 +38,7 @@ __asm__(".arch_extension	lse");
 
 /* In-line patching at runtime */
 #define ARM64_LSE_ATOMIC_INSN(llsc, lse)				\
-	ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
+	ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
 
 #endif	/* __ASSEMBLER__ */
 #else	/* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 82fcef9..f64d4e3 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -478,6 +478,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 	return (unsigned long) __va(pmd_page_paddr(pmd));
 }
 
+static inline void pte_unmap(pte_t *pte) { }
+
 /* Find an entry in the third-level page table. */
 #define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 
@@ -486,7 +488,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 
 #define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
 #define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte)			do { } while (0)
 #define pte_unmap_nested(pte)		do { } while (0)
 
 #define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 233995a..ede4218 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -53,7 +53,15 @@
  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
  */
 #ifdef CONFIG_COMPAT
+#ifdef CONFIG_ARM64_64K_PAGES
+/*
+ * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
+ * by the compat vectors page.
+ */
 #define TASK_SIZE_32		UL(0x100000000)
+#else
+#define TASK_SIZE_32		(UL(0x100000000) - PAGE_SIZE)
+#endif /* CONFIG_ARM64_64K_PAGES */
 #define TASK_SIZE		(test_thread_flag(TIF_32BIT) ? \
 				TASK_SIZE_32 : TASK_SIZE_64)
 #define TASK_SIZE_OF(tsk)	(test_tsk_thread_flag(tsk, TIF_32BIT) ? \
@@ -172,6 +180,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
 	regs->pc = pc;
 }
 
+static inline void set_ssbs_bit(struct pt_regs *regs)
+{
+	regs->pstate |= PSR_SSBS_BIT;
+}
+
+static inline void set_compat_ssbs_bit(struct pt_regs *regs)
+{
+	regs->pstate |= PSR_AA32_SSBS_BIT;
+}
+
 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
 				unsigned long sp)
 {
@@ -179,7 +197,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
 	regs->pstate = PSR_MODE_EL0t;
 
 	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-		regs->pstate |= PSR_SSBS_BIT;
+		set_ssbs_bit(regs);
 
 	regs->sp = sp;
 }
@@ -198,7 +216,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
 #endif
 
 	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-		regs->pstate |= PSR_AA32_SSBS_BIT;
+		set_compat_ssbs_bit(regs);
 
 	regs->compat_sp = sp;
 }
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index ad8be16..5810265 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -20,7 +20,7 @@
 #include <linux/compat.h>
 #include <linux/err.h>
 
-typedef long (*syscall_fn_t)(struct pt_regs *regs);
+typedef long (*syscall_fn_t)(const struct pt_regs *regs);
 
 extern const syscall_fn_t sys_call_table[];
 
diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h
index a4477e5..507d0ee 100644
--- a/arch/arm64/include/asm/syscall_wrapper.h
+++ b/arch/arm64/include/asm/syscall_wrapper.h
@@ -30,10 +30,10 @@
 	}										\
 	static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 
-#define COMPAT_SYSCALL_DEFINE0(sname)					\
-	asmlinkage long __arm64_compat_sys_##sname(void);		\
-	ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO);	\
-	asmlinkage long __arm64_compat_sys_##sname(void)
+#define COMPAT_SYSCALL_DEFINE0(sname)							\
+	asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused);	\
+	ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO);			\
+	asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused)
 
 #define COND_SYSCALL_COMPAT(name) \
 	cond_syscall(__arm64_compat_sys_##name);
@@ -62,11 +62,11 @@
 	static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 
 #ifndef SYSCALL_DEFINE0
-#define SYSCALL_DEFINE0(sname)					\
-	SYSCALL_METADATA(_##sname, 0);				\
-	asmlinkage long __arm64_sys_##sname(void);		\
-	ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO);	\
-	asmlinkage long __arm64_sys_##sname(void)
+#define SYSCALL_DEFINE0(sname)							\
+	SYSCALL_METADATA(_##sname, 0);						\
+	asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused);	\
+	ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO);			\
+	asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused)
 #endif
 
 #ifndef COND_SYSCALL
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 7e9ab1f..a3e6c10 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -705,20 +705,39 @@
 #include <linux/build_bug.h>
 #include <linux/types.h>
 
-asm(
-"	.irp	num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
-"	.equ	.L__reg_num_x\\num, \\num\n"
-"	.endr\n"
+#define __DEFINE_MRS_MSR_S_REGNUM				\
+"	.irp	num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
+"	.equ	.L__reg_num_x\\num, \\num\n"			\
+"	.endr\n"						\
 "	.equ	.L__reg_num_xzr, 31\n"
-"\n"
-"	.macro	mrs_s, rt, sreg\n"
-	__emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt))
+
+#define DEFINE_MRS_S						\
+	__DEFINE_MRS_MSR_S_REGNUM				\
+"	.macro	mrs_s, rt, sreg\n"				\
+	__emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt))	\
 "	.endm\n"
-"\n"
-"	.macro	msr_s, sreg, rt\n"
-	__emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt))
+
+#define DEFINE_MSR_S						\
+	__DEFINE_MRS_MSR_S_REGNUM				\
+"	.macro	msr_s, sreg, rt\n"				\
+	__emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt))	\
 "	.endm\n"
-);
+
+#define UNDEFINE_MRS_S						\
+"	.purgem	mrs_s\n"
+
+#define UNDEFINE_MSR_S						\
+"	.purgem	msr_s\n"
+
+#define __mrs_s(v, r)						\
+	DEFINE_MRS_S						\
+"	mrs_s " v ", " __stringify(r) "\n"			\
+	UNDEFINE_MRS_S
+
+#define __msr_s(r, v)						\
+	DEFINE_MSR_S						\
+"	msr_s " __stringify(r) ", " v "\n"			\
+	UNDEFINE_MSR_S
 
 /*
  * Unlike read_cpuid, calls to read_sysreg are never expected to be
@@ -746,13 +765,13 @@ asm(
  */
 #define read_sysreg_s(r) ({						\
 	u64 __val;							\
-	asm volatile("mrs_s %0, " __stringify(r) : "=r" (__val));	\
+	asm volatile(__mrs_s("%0", r) : "=r" (__val));			\
 	__val;								\
 })
 
 #define write_sysreg_s(v, r) do {					\
 	u64 __val = (u64)(v);						\
-	asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val));	\
+	asm volatile(__msr_s(r, "%x0") : : "rZ" (__val));		\
 } while (0)
 
 /*
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
index 2b9a637..f89263c 100644
--- a/arch/arm64/include/asm/vdso_datapage.h
+++ b/arch/arm64/include/asm/vdso_datapage.h
@@ -38,6 +38,7 @@ struct vdso_data {
 	__u32 tz_minuteswest;	/* Whacky timezone stuff */
 	__u32 tz_dsttime;
 	__u32 use_syscall;
+	__u32 hrtimer_res;
 };
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 323aeb5..92fba85 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -99,7 +99,7 @@ int main(void)
   DEFINE(CLOCK_REALTIME,	CLOCK_REALTIME);
   DEFINE(CLOCK_MONOTONIC,	CLOCK_MONOTONIC);
   DEFINE(CLOCK_MONOTONIC_RAW,	CLOCK_MONOTONIC_RAW);
-  DEFINE(CLOCK_REALTIME_RES,	MONOTONIC_RES_NSEC);
+  DEFINE(CLOCK_REALTIME_RES,	offsetof(struct vdso_data, hrtimer_res));
   DEFINE(CLOCK_REALTIME_COARSE,	CLOCK_REALTIME_COARSE);
   DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
   DEFINE(CLOCK_COARSE_RES,	LOW_RES_NSEC);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 32b19b6..774e828 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -433,6 +433,22 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 }
 #endif	/* CONFIG_ARM64_SSBD */
 
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static bool
+has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
+			       int scope)
+{
+	u32 midr = read_cpuid_id();
+	/* Cortex-A76 r0p0 - r3p1 */
+	struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
+
+	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+	return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
+}
+#endif
+
 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
 	.matches = is_affected_midr_range,			\
 	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
@@ -721,6 +737,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 		ERRATA_MIDR_RANGE_LIST(arm64_workaround_1188873_cpus),
 	},
 #endif
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+	{
+		.desc = "ARM erratum 1463225",
+		.capability = ARM64_WORKAROUND_1463225,
+		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+		.matches = has_cortex_a76_erratum_1463225,
+	},
+#endif
 	{
 	}
 };
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index ea00124..00f8b86 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu)
 				pr_err("%pOF: missing enable-method property\n",
 					dn);
 		}
+		of_node_put(dn);
 	} else {
 		enable_method = acpi_get_enable_method(cpu);
 		if (!enable_method) {
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 06ca574..262925b 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -135,6 +135,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors);
  */
 static int clear_os_lock(unsigned int cpu)
 {
+	write_sysreg(0, osdlr_el1);
 	write_sysreg(0, oslar_el1);
 	isb();
 	return 0;
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index b09b6f7..06941c1 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
 
 	if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
 		/*
-		 * Randomize the module region over a 4 GB window covering the
+		 * Randomize the module region over a 2 GB window covering the
 		 * kernel. This reduces the risk of modules leaking information
 		 * about the address of the kernel itself, but results in
 		 * branches between modules and the core kernel that are
 		 * resolved via PLTs. (Branches between modules will be
 		 * resolved normally.)
 		 */
-		module_range = SZ_4G - (u64)(_end - _stext);
-		module_alloc_base = max((u64)_end + offset - SZ_4G,
+		module_range = SZ_2G - (u64)(_end - _stext);
+		module_alloc_base = max((u64)_end + offset - SZ_2G,
 					(u64)MODULES_VADDR);
 	} else {
 		/*
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index f0f27ae..0b368ce 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -56,7 +56,7 @@ void *module_alloc(unsigned long size)
 		 * can simply omit this fallback in that case.
 		 */
 		p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
-				module_alloc_base + SZ_4G, GFP_KERNEL,
+				module_alloc_base + SZ_2G, GFP_KERNEL,
 				PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
 				__builtin_return_address(0));
 
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 12a3e59..c13674f 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -480,6 +480,20 @@ void uao_thread_switch(struct task_struct *next)
 	}
 }
 
+static void ssbs_thread_switch(struct task_struct *next)
+{
+	if (likely(!(next->flags & PF_KTHREAD)) &&
+	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
+	    !test_tsk_thread_flag(next, TIF_SSBD)) {
+		struct pt_regs *regs = task_pt_regs(next);
+
+		if (compat_user_mode(regs))
+			set_compat_ssbs_bit(regs);
+		else if (user_mode(regs))
+			set_ssbs_bit(regs);
+	}
+}
+
 /*
  * We store our current task in sp_el0, which is clobbered by userspace. Keep a
  * shadow copy so that we can restore this upon entry from userspace.
@@ -508,6 +522,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
 	contextidr_thread_switch(next);
 	entry_task_switch(next);
 	uao_thread_switch(next);
+	ssbs_thread_switch(next);
 
 	/*
 	 * Complete any pending TLB or cache maintenance on this CPU in case
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 5ba4465..ea94cf8 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
 	unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
 	unsigned long high = low + SDEI_STACK_SIZE;
 
+	if (!low)
+		return false;
+
 	if (sp < low || sp >= high)
 		return false;
 
@@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
 	unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
 	unsigned long high = low + SDEI_STACK_SIZE;
 
+	if (!low)
+		return false;
+
 	if (sp < low || sp >= high)
 		return false;
 
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index b44065f..71d9921 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -31,7 +31,7 @@
 
 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
 		unsigned long, prot, unsigned long, flags,
-		unsigned long, fd, off_t, off)
+		unsigned long, fd, unsigned long, off)
 {
 	if (offset_in_page(off) != 0)
 		return -EINVAL;
@@ -47,22 +47,26 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
 	return ksys_personality(personality);
 }
 
+asmlinkage long sys_ni_syscall(void);
+
+SYSCALL_DEFINE0(ni_syscall)
+{
+	return sys_ni_syscall();
+}
+
 /*
  * Wrappers to pass the pt_regs argument.
  */
 #define sys_personality		sys_arm64_personality
 
-asmlinkage long sys_ni_syscall(const struct pt_regs *);
-#define __arm64_sys_ni_syscall	sys_ni_syscall
-
 #undef __SYSCALL
 #define __SYSCALL(nr, sym)	asmlinkage long __arm64_##sym(const struct pt_regs *);
 #include <asm/unistd.h>
 
 #undef __SYSCALL
-#define __SYSCALL(nr, sym)	[nr] = (syscall_fn_t)__arm64_##sym,
+#define __SYSCALL(nr, sym)	[nr] = __arm64_##sym,
 
 const syscall_fn_t sys_call_table[__NR_syscalls] = {
-	[0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
+	[0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall,
 #include <asm/unistd.h>
 };
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
index 0f8bcb7..f8f6c26 100644
--- a/arch/arm64/kernel/sys32.c
+++ b/arch/arm64/kernel/sys32.c
@@ -133,17 +133,21 @@ COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
 	return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
 }
 
-asmlinkage long sys_ni_syscall(const struct pt_regs *);
-#define __arm64_sys_ni_syscall	sys_ni_syscall
+asmlinkage long sys_ni_syscall(void);
+
+COMPAT_SYSCALL_DEFINE0(ni_syscall)
+{
+	return sys_ni_syscall();
+}
 
 #undef __SYSCALL
 #define __SYSCALL(nr, sym)	asmlinkage long __arm64_##sym(const struct pt_regs *);
 #include <asm/unistd32.h>
 
 #undef __SYSCALL
-#define __SYSCALL(nr, sym)	[nr] = (syscall_fn_t)__arm64_##sym,
+#define __SYSCALL(nr, sym)	[nr] = __arm64_##sym,
 
 const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
-	[0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
+	[0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall,
 #include <asm/unistd32.h>
 };
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 5610ac0..871c739 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -8,6 +8,7 @@
 #include <linux/syscalls.h>
 
 #include <asm/daifflags.h>
+#include <asm/debug-monitors.h>
 #include <asm/fpsimd.h>
 #include <asm/syscall.h>
 #include <asm/thread_info.h>
@@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags)
 int syscall_trace_enter(struct pt_regs *regs);
 void syscall_trace_exit(struct pt_regs *regs);
 
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static void cortex_a76_erratum_1463225_svc_handler(void)
+{
+	u32 reg, val;
+
+	if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
+		return;
+
+	if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
+		return;
+
+	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
+	reg = read_sysreg(mdscr_el1);
+	val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
+	write_sysreg(val, mdscr_el1);
+	asm volatile("msr daifclr, #8");
+	isb();
+
+	/* We will have taken a single-step exception by this point */
+
+	write_sysreg(reg, mdscr_el1);
+	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
+}
+#else
+static void cortex_a76_erratum_1463225_svc_handler(void) { }
+#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+
 static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
 			   const syscall_fn_t syscall_table[])
 {
@@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
 	regs->orig_x0 = regs->regs[0];
 	regs->syscallno = scno;
 
+	cortex_a76_erratum_1463225_svc_handler();
 	local_daif_restore(DAIF_PROCCTX);
 	user_exit();
 
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 2d41900..ec0bb58 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -232,6 +232,9 @@ void update_vsyscall(struct timekeeper *tk)
 	vdso_data->wtm_clock_sec		= tk->wall_to_monotonic.tv_sec;
 	vdso_data->wtm_clock_nsec		= tk->wall_to_monotonic.tv_nsec;
 
+	/* Read without the seqlock held by clock_getres() */
+	WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution);
+
 	if (!use_syscall) {
 		/* tkr_mono.cycle_last == tkr_raw.cycle_last */
 		vdso_data->cs_cycle_last	= tk->tkr_mono.cycle_last;
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index b215c71..ef3f9d9 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -15,6 +15,7 @@
 ccflags-y := -shared -fno-common -fno-builtin
 ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
 		$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ccflags-y += $(DISABLE_LTO)
 
 # Disable gcov profiling for VDSO code
 GCOV_PROFILE := n
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index c39872a..856fee6 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -73,6 +73,13 @@
 	movn	x_tmp, #0xff00, lsl #48
 	and	\res, x_tmp, \res
 	mul	\res, \res, \mult
+	/*
+	 * Fake address dependency from the value computed from the counter
+	 * register to subsequent data page accesses so that the sequence
+	 * locking also orders the read of the counter.
+	 */
+	and	x_tmp, \res, xzr
+	add	vdso_data, vdso_data, x_tmp
 	.endm
 
 	/*
@@ -147,12 +154,12 @@
 	/* w11 = cs_mono_mult, w12 = cs_shift */
 	ldp	w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
 	ldp	x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
-	seqcnt_check fail=1b
 
 	get_nsec_per_sec res=x9
 	lsl	x9, x9, x12
 
 	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+	seqcnt_check fail=1b
 	get_ts_realtime res_sec=x10, res_nsec=x11, \
 		clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
 
@@ -211,13 +218,13 @@
 	/* w11 = cs_mono_mult, w12 = cs_shift */
 	ldp	w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
 	ldp	x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
-	seqcnt_check fail=realtime
 
 	/* All computations are done with left-shifted nsecs. */
 	get_nsec_per_sec res=x9
 	lsl	x9, x9, x12
 
 	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+	seqcnt_check fail=realtime
 	get_ts_realtime res_sec=x10, res_nsec=x11, \
 		clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
 	clock_gettime_return, shift=1
@@ -231,7 +238,6 @@
 	ldp	w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
 	ldp	x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
 	ldp	x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
-	seqcnt_check fail=monotonic
 
 	/* All computations are done with left-shifted nsecs. */
 	lsl	x4, x4, x12
@@ -239,6 +245,7 @@
 	lsl	x9, x9, x12
 
 	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+	seqcnt_check fail=monotonic
 	get_ts_realtime res_sec=x10, res_nsec=x11, \
 		clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
 
@@ -253,13 +260,13 @@
 	/* w11 = cs_raw_mult, w12 = cs_shift */
 	ldp	w12, w11, [vdso_data, #VDSO_CS_SHIFT]
 	ldp	x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
-	seqcnt_check fail=monotonic_raw
 
 	/* All computations are done with left-shifted nsecs. */
 	get_nsec_per_sec res=x9
 	lsl	x9, x9, x12
 
 	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+	seqcnt_check fail=monotonic_raw
 	get_ts_clock_raw res_sec=x10, res_nsec=x11, \
 		clock_nsec=x15, nsec_to_sec=x9
 
@@ -301,13 +308,14 @@
 	ccmp	w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
 	b.ne	1f
 
-	ldr	x2, 5f
+	adr	vdso_data, _vdso_data
+	ldr	w2, [vdso_data, #CLOCK_REALTIME_RES]
 	b	2f
 1:
 	cmp	w0, #CLOCK_REALTIME_COARSE
 	ccmp	w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
 	b.ne	4f
-	ldr	x2, 6f
+	ldr	x2, 5f
 2:
 	cbz	x1, 3f
 	stp	xzr, x2, [x1]
@@ -321,8 +329,6 @@
 	svc	#0
 	ret
 5:
-	.quad	CLOCK_REALTIME_RES
-6:
 	.quad	CLOCK_COARSE_RES
 	.cfi_endproc
 ENDPROC(__kernel_clock_getres)
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index d9028bb..c769fe5 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -4,7 +4,8 @@
 #
 
 ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING \
-		$(DISABLE_STACKLEAK_PLUGIN) $(DISABLE_CFI)
+		$(DISABLE_STACKLEAK_PLUGIN) \
+		$(DISABLE_CFI)
 
 ifeq ($(cc-name),clang)
 ccflags-y += -fno-jump-tables
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 18b9a52..0688816 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -117,6 +117,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 	int ret = -EINVAL;
 	bool loaded;
 
+	/* Reset PMU outside of the non-preemptible section */
+	kvm_pmu_vcpu_reset(vcpu);
+
 	preempt_disable();
 	loaded = (vcpu->cpu != -1);
 	if (loaded)
@@ -164,9 +167,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 		vcpu->arch.reset_state.reset = false;
 	}
 
-	/* Reset PMU */
-	kvm_pmu_vcpu_reset(vcpu);
-
 	/* Default workaround setup is enabled (if supported) */
 	if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
 		vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 5df2d61..33ce9a5 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -11,7 +11,12 @@
 # patching of the bl instruction in the caller with an atomic instruction
 # when supported by the CPU. Result and argument registers are handled
 # correctly, based on the function prototype.
+ifeq ($(CONFIG_LD_IS_LLD), y)
+# https://bugs.llvm.org/show_bug.cgi?id=35841
+obj-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
+else
 lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
+endif
 CFLAGS_atomic_ll_sc.o	:= -ffixed-x1 -ffixed-x2        		\
 		   -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6		\
 		   -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9		\
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 6c3b040f..ef4d78a 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -791,6 +791,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
 {
 	struct vm_struct *area;
 	int ret;
+	unsigned long pfn = 0;
 
 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
 					     is_dma_coherent(dev, attrs));
@@ -798,20 +799,23 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
 		return ret;
 
-	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
-		/*
-		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
-		 * hence in the vmalloc space.
-		 */
-		unsigned long pfn = vmalloc_to_pfn(cpu_addr);
-		return __swiotlb_mmap_pfn(vma, pfn, size);
-	}
-
 	area = find_vm_area(cpu_addr);
-	if (WARN_ON(!area || !area->pages))
-		return -ENXIO;
 
-	return iommu_dma_mmap(area->pages, size, vma);
+	if (area && area->pages)
+		return iommu_dma_mmap(area->pages, size, vma);
+	else if (!is_vmalloc_addr(cpu_addr))
+		pfn = page_to_pfn(virt_to_page(cpu_addr));
+	else if (is_vmalloc_addr(cpu_addr))
+		/*
+		 * DMA_ATTR_FORCE_CONTIGUOUS and atomic pool allocations are
+		 * always remapped, hence in the vmalloc space.
+		 */
+		pfn = vmalloc_to_pfn(cpu_addr);
+
+	if (pfn)
+		return __swiotlb_mmap_pfn(vma, pfn, size);
+
+	return -ENXIO;
 }
 
 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
@@ -819,22 +823,24 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
 			       size_t size, unsigned long attrs)
 {
 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	struct page *page = NULL;
 	struct vm_struct *area = find_vm_area(cpu_addr);
 
-	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+	if (area && area->pages)
+		return sg_alloc_table_from_pages(sgt, area->pages, count, 0,
+					size, GFP_KERNEL);
+	else if (!is_vmalloc_addr(cpu_addr))
+		page = virt_to_page(cpu_addr);
+	else if (is_vmalloc_addr(cpu_addr))
 		/*
-		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
-		 * hence in the vmalloc space.
+		 * DMA_ATTR_FORCE_CONTIGUOUS and atomic pool allocations
+		 * are always remapped, hence in the vmalloc space.
 		 */
-		struct page *page = vmalloc_to_page(cpu_addr);
+		page = vmalloc_to_page(cpu_addr);
+
+	if (page)
 		return __swiotlb_get_sgtable_page(sgt, page, size);
-	}
-
-	if (WARN_ON(!area || !area->pages))
-		return -ENXIO;
-
-	return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
-					 GFP_KERNEL);
+	return -ENXIO;
 }
 
 static void __iommu_sync_single_for_cpu(struct device *dev,
@@ -1050,18 +1056,14 @@ iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
 static int arm_iommu_get_dma_cookie(struct device *dev,
 				    struct dma_iommu_mapping *mapping)
 {
-	int s1_bypass = 0, is_fast = 0;
+	int is_fast = 0;
 	int err = 0;
 
 	mutex_lock(&iommu_dma_init_mutex);
 
-	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
-					&s1_bypass);
 	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
 
-	if (s1_bypass)
-		mapping->ops = &arm64_swiotlb_dma_ops;
-	else if (is_fast)
+	if (is_fast)
 		err = fast_smmu_init_mapping(dev, mapping);
 	else
 		err = iommu_init_mapping(dev, mapping);
@@ -1070,20 +1072,6 @@ static int arm_iommu_get_dma_cookie(struct device *dev,
 	return err;
 }
 
-void arm_iommu_put_dma_cookie(struct iommu_domain *domain)
-{
-	int s1_bypass = 0, is_fast = 0;
-
-	iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS,
-					&s1_bypass);
-	iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &is_fast);
-
-	if (is_fast)
-		fast_smmu_put_dma_cookie(domain);
-	else if (!s1_bypass)
-		iommu_put_dma_cookie(domain);
-}
-
 /*
  * Checks for "qcom,iommu-dma-addr-pool" property.
  * If not present, leaves dma_addr and dma_size unmodified.
@@ -1129,17 +1117,22 @@ static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
 	struct iommu_domain *domain;
 	struct iommu_group *group;
 	struct dma_iommu_mapping mapping = {0};
+	int s1_bypass;
 
 	group = dev->iommu_group;
 	if (!group)
 		return;
 
-	arm_iommu_get_dma_window(dev, &dma_base, &size);
-
 	domain = iommu_get_domain_for_dev(dev);
 	if (!domain)
 		return;
 
+	iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+	if (s1_bypass)
+		return;
+
+	arm_iommu_get_dma_window(dev, &dma_base, &size);
+
 	/* Allow iommu-debug to call arch_setup_dma_ops to reconfigure itself */
 	if (domain->type != IOMMU_DOMAIN_DMA &&
 	    !of_device_is_compatible(dev->of_node, "iommu-debug-test")) {
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index ef004dd..9c94715 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -864,14 +864,47 @@ void __init hook_debug_fault_code(int nr,
 	debug_fault_info[nr].name	= name;
 }
 
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static int __exception
+cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+{
+	if (user_mode(regs))
+		return 0;
+
+	if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
+		return 0;
+
+	/*
+	 * We've taken a dummy step exception from the kernel to ensure
+	 * that interrupts are re-enabled on the syscall path. Return back
+	 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
+	 * masked so that we can safely restore the mdscr and get on with
+	 * handling the syscall.
+	 */
+	regs->pstate |= PSR_D_BIT;
+	return 1;
+}
+#else
+static int __exception
+cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+{
+	return 0;
+}
+#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+
 asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
-					      unsigned int esr,
-					      struct pt_regs *regs)
+					       unsigned int esr,
+					       struct pt_regs *regs)
 {
 	const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
 	unsigned long pc = instruction_pointer(regs);
 	int rv;
 
+	if (cortex_a76_erratum_1463225_debug_handler(regs))
+		return 0;
+
 	/*
 	 * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
 	 * already disabled to preserve the last enabled/disabled addresses.
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index fdf213a..0f987ff 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -399,6 +399,14 @@ static phys_addr_t pgd_pgtable_alloc(void)
 	return __pa(ptr);
 }
 
+void create_pgtable_mapping(phys_addr_t start, phys_addr_t end)
+{
+	unsigned long virt = (unsigned long)phys_to_virt(start);
+
+	__create_pgd_mapping(init_mm.pgd, start, virt, end - start,
+				PAGE_KERNEL, NULL, 0);
+}
+
 /*
  * This function can only be used to modify existing table entries,
  * without allocating new levels of table. Note that this permits the
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index dda41ce..76fd72f 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -116,24 +116,25 @@
 	mrs	x2, tpidr_el0
 	mrs	x3, tpidrro_el0
 	mrs	x4, contextidr_el1
-	mrs	x5, cpacr_el1
-	mrs	x6, tcr_el1
-	mrs	x7, vbar_el1
-	mrs	x8, mdscr_el1
-	mrs	x9, oslsr_el1
-	mrs	x10, sctlr_el1
+	mrs	x5, osdlr_el1
+	mrs	x6, cpacr_el1
+	mrs	x7, tcr_el1
+	mrs	x8, vbar_el1
+	mrs	x9, mdscr_el1
+	mrs	x10, oslsr_el1
+	mrs	x11, sctlr_el1
 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
-	mrs	x11, tpidr_el1
+	mrs	x12, tpidr_el1
 alternative_else
-	mrs	x11, tpidr_el2
+	mrs	x12, tpidr_el2
 alternative_endif
-	mrs	x12, sp_el0
+	mrs	x13, sp_el0
 	stp	x2, x3, [x0]
-	stp	x4, xzr, [x0, #16]
-	stp	x5, x6, [x0, #32]
-	stp	x7, x8, [x0, #48]
-	stp	x9, x10, [x0, #64]
-	stp	x11, x12, [x0, #80]
+	stp	x4, x5, [x0, #16]
+	stp	x6, x7, [x0, #32]
+	stp	x8, x9, [x0, #48]
+	stp	x10, x11, [x0, #64]
+	stp	x12, x13, [x0, #80]
 	ret
 ENDPROC(cpu_do_suspend)
 
@@ -156,8 +157,8 @@
 	msr	cpacr_el1, x6
 
 	/* Don't change t0sz here, mask those bits when restoring */
-	mrs	x5, tcr_el1
-	bfi	x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+	mrs	x7, tcr_el1
+	bfi	x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
 
 	msr	tcr_el1, x8
 	msr	vbar_el1, x9
@@ -181,6 +182,7 @@
 	/*
 	 * Restore oslsr_el1 by writing oslar_el1
 	 */
+	msr	osdlr_el1, x5
 	ubfx	x11, x11, #1, #1
 	msr	oslar_el1, x11
 	reset_pmuserenr_el0 x0			// Disable PMU access from EL0
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 783de51..6c88165 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -100,12 +100,6 @@
 #define A64_STXR(sf, Rt, Rn, Rs) \
 	A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
 
-/* Prefetch */
-#define A64_PRFM(Rn, type, target, policy) \
-	aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \
-				  AARCH64_INSN_PRFM_TARGET_##target, \
-				  AARCH64_INSN_PRFM_POLICY_##policy)
-
 /* Add/subtract (immediate) */
 #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
 	aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index a6fdaea..2eef156 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -736,7 +736,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 	case BPF_STX | BPF_XADD | BPF_DW:
 		emit_a64_mov_i(1, tmp, off, ctx);
 		emit(A64_ADD(1, tmp, tmp, dst), ctx);
-		emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
 		emit(A64_LDXR(isdw, tmp2, tmp), ctx);
 		emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
 		emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 4c7a93f..7c0b2e6 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -211,12 +211,6 @@ const char *get_system_type(void)
 	return ath79_sys_type;
 }
 
-int get_c0_perfcount_int(void)
-{
-	return ATH79_MISC_IRQ(5);
-}
-EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
-
 unsigned int get_c0_compare_int(void)
 {
 	return CP0_LEGACY_COMPARE_IRQ;
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
index 684c9dc..b8a21b9 100644
--- a/arch/mips/configs/generic_defconfig
+++ b/arch/mips/configs/generic_defconfig
@@ -63,7 +63,7 @@
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION=y
 CONFIG_FANOTIFY=y
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=y
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index eb6c0d5..2c1e30c 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -33,6 +33,7 @@
 #include <asm/processor.h>
 #include <asm/sigcontext.h>
 #include <linux/uaccess.h>
+#include <asm/irq_regs.h>
 
 static struct hard_trap_info {
 	unsigned char tt;	/* Trap type code for MIPS R3xxx and R4xxx */
@@ -214,7 +215,7 @@ static void kgdb_call_nmi_hook(void *ignored)
 	old_fs = get_fs();
 	set_fs(get_ds());
 
-	kgdb_nmicallback(raw_smp_processor_id(), NULL);
+	kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
 
 	set_fs(old_fs);
 }
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 4138635..d67fb64 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -64,17 +64,11 @@ struct mips_perf_event {
 	#define CNTR_EVEN	0x55555555
 	#define CNTR_ODD	0xaaaaaaaa
 	#define CNTR_ALL	0xffffffff
-#ifdef CONFIG_MIPS_MT_SMP
 	enum {
 		T  = 0,
 		V  = 1,
 		P  = 2,
 	} range;
-#else
-	#define T
-	#define V
-	#define P
-#endif
 };
 
 static struct mips_perf_event raw_event;
@@ -325,9 +319,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
 {
 	struct perf_event *event = container_of(evt, struct perf_event, hw);
 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
-#ifdef CONFIG_MIPS_MT_SMP
 	unsigned int range = evt->event_base >> 24;
-#endif /* CONFIG_MIPS_MT_SMP */
 
 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 
@@ -336,21 +328,15 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
 		/* Make sure interrupt enabled. */
 		MIPS_PERFCTRL_IE;
 
-#ifdef CONFIG_CPU_BMIPS5000
-	{
+	if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
 		/* enable the counter for the calling thread */
 		cpuc->saved_ctrl[idx] |=
 			(1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
-	}
-#else
-#ifdef CONFIG_MIPS_MT_SMP
-	if (range > V) {
+	} else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
 		/* The counter is processor wide. Set it up to count all TCs. */
 		pr_debug("Enabling perf counter for all TCs\n");
 		cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
-	} else
-#endif /* CONFIG_MIPS_MT_SMP */
-	{
+	} else {
 		unsigned int cpu, ctrl;
 
 		/*
@@ -365,7 +351,6 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
 		cpuc->saved_ctrl[idx] |= ctrl;
 		pr_debug("Enabling perf counter for CPU%d\n", cpu);
 	}
-#endif /* CONFIG_CPU_BMIPS5000 */
 	/*
 	 * We do not actually let the counter run. Leave it until start().
 	 */
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 73913f0..5796083 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -125,7 +125,7 @@
 	subu	t1, v0,  __NR_O32_Linux
 	move	a1, v0
 	bnez	t1, 1f /* __NR_syscall at offset 0 */
-	lw	a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+	ld	a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
 	.set	pop
 
 1:	jal	syscall_trace_enter
diff --git a/arch/nds32/configs/defconfig b/arch/nds32/configs/defconfig
index 2546d87..65ce925 100644
--- a/arch/nds32/configs/defconfig
+++ b/arch/nds32/configs/defconfig
@@ -74,7 +74,7 @@
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION=y
 CONFIG_FUSE_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 8e6d83f..7644b65 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -35,6 +35,7 @@
 	select GENERIC_STRNCPY_FROM_USER
 	select SYSCTL_ARCH_UNALIGN_ALLOW
 	select SYSCTL_EXCEPTION_TRACE
+	select ARCH_DISCARD_MEMBLOCK
 	select HAVE_MOD_ARCH_SPECIFIC
 	select VIRT_TO_BUS
 	select MODULES_USE_ELF_RELA
@@ -311,21 +312,16 @@
 	def_bool y
 	depends on 64BIT
 
-config ARCH_DISCONTIGMEM_ENABLE
+config ARCH_SPARSEMEM_ENABLE
 	def_bool y
 	depends on 64BIT
 
 config ARCH_FLATMEM_ENABLE
 	def_bool y
 
-config ARCH_DISCONTIGMEM_DEFAULT
+config ARCH_SPARSEMEM_DEFAULT
 	def_bool y
-	depends on ARCH_DISCONTIGMEM_ENABLE
-
-config NODES_SHIFT
-	int
-	default "3"
-	depends on NEED_MULTIPLE_NODES
+	depends on ARCH_SPARSEMEM_ENABLE
 
 source "kernel/Kconfig.hz"
 
diff --git a/arch/parisc/boot/compressed/head.S b/arch/parisc/boot/compressed/head.S
index 5aba20f..e8b798f 100644
--- a/arch/parisc/boot/compressed/head.S
+++ b/arch/parisc/boot/compressed/head.S
@@ -22,7 +22,7 @@
 	__HEAD
 
 ENTRY(startup)
-	 .level LEVEL
+	 .level PA_ASM_LEVEL
 
 #define PSW_W_SM	0x200
 #define PSW_W_BIT       36
@@ -63,7 +63,7 @@
 	load32	BOOTADDR(decompress_kernel),%r3
 
 #ifdef CONFIG_64BIT
-	.level LEVEL
+	.level PA_ASM_LEVEL
 	ssm	PSW_W_SM, %r0		/* set W-bit */
 	depdi	0, 31, 32, %r3
 #endif
@@ -72,7 +72,7 @@
 
 startup_continue:
 #ifdef CONFIG_64BIT
-	.level LEVEL
+	.level PA_ASM_LEVEL
 	rsm	PSW_W_SM, %r0		/* clear W-bit */
 #endif
 
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index e9c6385..6f30fa5 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -61,14 +61,14 @@
 #define LDCW		ldcw,co
 #define BL		b,l
 # ifdef CONFIG_64BIT
-#  define LEVEL		2.0w
+#  define PA_ASM_LEVEL	2.0w
 # else
-#  define LEVEL		2.0
+#  define PA_ASM_LEVEL	2.0
 # endif
 #else
 #define LDCW		ldcw
 #define BL		bl
-#define LEVEL		1.1
+#define PA_ASM_LEVEL	1.1
 #endif
 
 #ifdef __ASSEMBLY__
diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h
index fafa389..8d39040 100644
--- a/arch/parisc/include/asm/mmzone.h
+++ b/arch/parisc/include/asm/mmzone.h
@@ -2,62 +2,6 @@
 #ifndef _PARISC_MMZONE_H
 #define _PARISC_MMZONE_H
 
-#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
+#define MAX_PHYSMEM_RANGES 4 /* Fix the size for now (current known max is 3) */
 
-#ifdef CONFIG_DISCONTIGMEM
-
-extern int npmem_ranges;
-
-struct node_map_data {
-    pg_data_t pg_data;
-};
-
-extern struct node_map_data node_data[];
-
-#define NODE_DATA(nid)          (&node_data[nid].pg_data)
-
-/* We have these possible memory map layouts:
- * Astro: 0-3.75, 67.75-68, 4-64
- * zx1: 0-1, 257-260, 4-256
- * Stretch (N-class): 0-2, 4-32, 34-xxx
- */
-
-/* Since each 1GB can only belong to one region (node), we can create
- * an index table for pfn to nid lookup; each entry in pfnnid_map 
- * represents 1GB, and contains the node that the memory belongs to. */
-
-#define PFNNID_SHIFT (30 - PAGE_SHIFT)
-#define PFNNID_MAP_MAX  512     /* support 512GB */
-extern signed char pfnnid_map[PFNNID_MAP_MAX];
-
-#ifndef CONFIG_64BIT
-#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
-#else
-/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */
-#define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
-#endif
-
-static inline int pfn_to_nid(unsigned long pfn)
-{
-	unsigned int i;
-
-	if (unlikely(pfn_is_io(pfn)))
-		return 0;
-
-	i = pfn >> PFNNID_SHIFT;
-	BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
-
-	return pfnnid_map[i];
-}
-
-static inline int pfn_valid(int pfn)
-{
-	int nid = pfn_to_nid(pfn);
-
-	if (nid >= 0)
-		return (pfn < node_end_pfn(nid));
-	return 0;
-}
-
-#endif
 #endif /* _PARISC_MMZONE_H */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index af00fe9..936d439 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -145,9 +145,9 @@ extern int npmem_ranges;
 #define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
 #define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_SPARSEMEM
 #define pfn_valid(pfn)		((pfn) < max_mapnr)
-#endif /* CONFIG_DISCONTIGMEM */
+#endif
 
 #ifdef CONFIG_HUGETLB_PAGE
 #define HPAGE_SHIFT		PMD_SHIFT /* fixed for transparent huge pages */
diff --git a/arch/parisc/include/asm/sparsemem.h b/arch/parisc/include/asm/sparsemem.h
new file mode 100644
index 0000000..b5c3a79
--- /dev/null
+++ b/arch/parisc/include/asm/sparsemem.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_PARISC_SPARSEMEM_H
+#define ASM_PARISC_SPARSEMEM_H
+
+/* We have these possible memory map layouts:
+ * Astro: 0-3.75, 67.75-68, 4-64
+ * zx1: 0-1, 257-260, 4-256
+ * Stretch (N-class): 0-2, 4-32, 34-xxx
+ */
+
+#define MAX_PHYSMEM_BITS	39	/* 512 GB */
+#define SECTION_SIZE_BITS	27	/* 128 MB */
+
+#endif
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index fbb4e43..f56cbab 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -22,7 +22,7 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
-	.level	LEVEL
+	.level	PA_ASM_LEVEL
 
 	__INITDATA
 ENTRY(boot_args)
@@ -258,7 +258,7 @@
 	ldo		R%PA(fault_vector_11)(%r10),%r10
 
 $is_pa20:
-	.level		LEVEL /* restore 1.1 || 2.0w */
+	.level		PA_ASM_LEVEL /* restore 1.1 || 2.0w */
 #endif /*!CONFIG_64BIT*/
 	load32		PA(fault_vector_20),%r10
 
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 7baa226..174213b 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -138,12 +138,6 @@ extern void $$dyncall(void);
 EXPORT_SYMBOL($$dyncall);
 #endif
 
-#ifdef CONFIG_DISCONTIGMEM
-#include <asm/mmzone.h>
-EXPORT_SYMBOL(node_data);
-EXPORT_SYMBOL(pfnnid_map);
-#endif
-
 #ifdef CONFIG_FUNCTION_TRACER
 extern void _mcount(void);
 EXPORT_SYMBOL(_mcount);
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 841db71..97c2067 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -193,6 +193,7 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
  */
 
 int running_on_qemu __read_mostly;
+EXPORT_SYMBOL(running_on_qemu);
 
 void __cpuidle arch_cpu_idle_dead(void)
 {
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index f453997..61a647a 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -48,7 +48,7 @@
 	 */
 #define KILL_INSN	break	0,0
 
-	.level          LEVEL
+	.level          PA_ASM_LEVEL
 
 	.text
 
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index aae9b0d..6c11a26 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -33,6 +33,7 @@
 #include <asm/mmzone.h>
 #include <asm/sections.h>
 #include <asm/msgbuf.h>
+#include <asm/sparsemem.h>
 
 extern int  data_start;
 extern void parisc_kernel_start(void);	/* Kernel entry point in head.S */
@@ -49,11 +50,6 @@ pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
 pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
 
-#ifdef CONFIG_DISCONTIGMEM
-struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
-signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
-#endif
-
 static struct resource data_resource = {
 	.name	= "Kernel data",
 	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
@@ -77,8 +73,8 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
  * information retrieved in kernel/inventory.c.
  */
 
-physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
-int npmem_ranges __read_mostly;
+physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
+int npmem_ranges __initdata;
 
 /*
  * get_memblock() allocates pages via memblock.
@@ -111,7 +107,7 @@ static void * __init get_memblock(unsigned long size)
 }
 
 #ifdef CONFIG_64BIT
-#define MAX_MEM         (~0UL)
+#define MAX_MEM         (1UL << MAX_PHYSMEM_BITS)
 #else /* !CONFIG_64BIT */
 #define MAX_MEM         (3584U*1024U*1024U)
 #endif /* !CONFIG_64BIT */
@@ -150,7 +146,7 @@ static void __init mem_limit_func(void)
 static void __init setup_bootmem(void)
 {
 	unsigned long mem_max;
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_SPARSEMEM
 	physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
 	int npmem_holes;
 #endif
@@ -168,23 +164,20 @@ static void __init setup_bootmem(void)
 		int j;
 
 		for (j = i; j > 0; j--) {
-			unsigned long tmp;
+			physmem_range_t tmp;
 
 			if (pmem_ranges[j-1].start_pfn <
 			    pmem_ranges[j].start_pfn) {
 
 				break;
 			}
-			tmp = pmem_ranges[j-1].start_pfn;
-			pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
-			pmem_ranges[j].start_pfn = tmp;
-			tmp = pmem_ranges[j-1].pages;
-			pmem_ranges[j-1].pages = pmem_ranges[j].pages;
-			pmem_ranges[j].pages = tmp;
+			tmp = pmem_ranges[j-1];
+			pmem_ranges[j-1] = pmem_ranges[j];
+			pmem_ranges[j] = tmp;
 		}
 	}
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_SPARSEMEM
 	/*
 	 * Throw out ranges that are too far apart (controlled by
 	 * MAX_GAP).
@@ -196,7 +189,7 @@ static void __init setup_bootmem(void)
 			 pmem_ranges[i-1].pages) > MAX_GAP) {
 			npmem_ranges = i;
 			printk("Large gap in memory detected (%ld pages). "
-			       "Consider turning on CONFIG_DISCONTIGMEM\n",
+			       "Consider turning on CONFIG_SPARSEMEM\n",
 			       pmem_ranges[i].start_pfn -
 			       (pmem_ranges[i-1].start_pfn +
 			        pmem_ranges[i-1].pages));
@@ -261,9 +254,8 @@ static void __init setup_bootmem(void)
 
 	printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_SPARSEMEM
 	/* Merge the ranges, keeping track of the holes */
-
 	{
 		unsigned long end_pfn;
 		unsigned long hole_pages;
@@ -286,18 +278,6 @@ static void __init setup_bootmem(void)
 	}
 #endif
 
-#ifdef CONFIG_DISCONTIGMEM
-	for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
-		memset(NODE_DATA(i), 0, sizeof(pg_data_t));
-	}
-	memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
-
-	for (i = 0; i < npmem_ranges; i++) {
-		node_set_state(i, N_NORMAL_MEMORY);
-		node_set_online(i);
-	}
-#endif
-
 	/*
 	 * Initialize and free the full range of memory in each range.
 	 */
@@ -338,7 +318,7 @@ static void __init setup_bootmem(void)
 	memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
 			(unsigned long)(_end - KERNEL_BINARY_TEXT_START));
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_SPARSEMEM
 
 	/* reserve the holes */
 
@@ -384,6 +364,9 @@ static void __init setup_bootmem(void)
 
 	/* Initialize Page Deallocation Table (PDT) and check for bad memory. */
 	pdc_pdt_init();
+
+	memblock_allow_resize();
+	memblock_dump_all();
 }
 
 static int __init parisc_text_address(unsigned long vaddr)
@@ -711,37 +694,46 @@ static void __init gateway_init(void)
 		  PAGE_SIZE, PAGE_GATEWAY, 1);
 }
 
-void __init paging_init(void)
+static void __init parisc_bootmem_free(void)
 {
+	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
+	unsigned long holes_size[MAX_NR_ZONES] = { 0, };
+	unsigned long mem_start_pfn = ~0UL, mem_end_pfn = 0, mem_size_pfn = 0;
 	int i;
 
+	for (i = 0; i < npmem_ranges; i++) {
+		unsigned long start = pmem_ranges[i].start_pfn;
+		unsigned long size = pmem_ranges[i].pages;
+		unsigned long end = start + size;
+
+		if (mem_start_pfn > start)
+			mem_start_pfn = start;
+		if (mem_end_pfn < end)
+			mem_end_pfn = end;
+		mem_size_pfn += size;
+	}
+
+	zones_size[0] = mem_end_pfn - mem_start_pfn;
+	holes_size[0] = zones_size[0] - mem_size_pfn;
+
+	free_area_init_node(0, zones_size, mem_start_pfn, holes_size);
+}
+
+void __init paging_init(void)
+{
 	setup_bootmem();
 	pagetable_init();
 	gateway_init();
 	flush_cache_all_local(); /* start with known state */
 	flush_tlb_all_local(NULL);
 
-	for (i = 0; i < npmem_ranges; i++) {
-		unsigned long zones_size[MAX_NR_ZONES] = { 0, };
-
-		zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
-
-#ifdef CONFIG_DISCONTIGMEM
-		/* Need to initialize the pfnnid_map before we can initialize
-		   the zone */
-		{
-		    int j;
-		    for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
-			 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
-			 j++) {
-			pfnnid_map[j] = i;
-		    }
-		}
-#endif
-
-		free_area_init_node(i, zones_size,
-				pmem_ranges[i].start_pfn, NULL);
-	}
+	/*
+	 * Mark all memblocks as present for sparsemem using
+	 * memory_present() and then initialize sparsemem.
+	 */
+	memblocks_present();
+	sparse_init();
+	parisc_bootmem_free();
 }
 
 #ifdef CONFIG_PA20
diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c
index 9d9f6f3..3da3e2b 100644
--- a/arch/powerpc/boot/addnote.c
+++ b/arch/powerpc/boot/addnote.c
@@ -223,7 +223,11 @@ main(int ac, char **av)
 	PUT_16(E_PHNUM, np + 2);
 
 	/* write back */
-	lseek(fd, (long) 0, SEEK_SET);
+	i = lseek(fd, (long) 0, SEEK_SET);
+	if (i < 0) {
+		perror("lseek");
+		exit(1);
+	}
 	i = write(fd, buf, n);
 	if (i < 0) {
 		perror("write");
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 6bd5e72..ffeaed6 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -195,6 +195,7 @@
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_HUGETLBFS=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 # CONFIG_MISC_FILESYSTEMS is not set
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 391ed2c..f9019b5 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -83,6 +83,9 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 
 	pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
 			       pgtable_gfp_flags(mm, GFP_KERNEL));
+	if (unlikely(!pgd))
+		return pgd;
+
 	/*
 	 * Don't scan the PGD for pointers, it contains references to PUDs but
 	 * those references are not full pointers and so can't be recognised by
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index eb2a33d..e382bd6 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -41,7 +41,7 @@
 #if defined(CONFIG_PPC_BOOK3E_64)
 #define MSR_64BIT	MSR_CM
 
-#define MSR_		(MSR_ME | MSR_CE)
+#define MSR_		(MSR_ME | MSR_RI | MSR_CE)
 #define MSR_KERNEL	(MSR_ | MSR_64BIT)
 #define MSR_USER32	(MSR_ | MSR_PR | MSR_EE)
 #define MSR_USER64	(MSR_USER32 | MSR_64BIT)
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 4898e94..9168a24 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -970,7 +970,9 @@
 
 	/* Restore parameters passed from prom_init/kexec */
 	mr	r3,r31
-	bl	early_setup		/* also sets r13 and SPRG_PACA */
+	LOAD_REG_ADDR(r12, DOTSYM(early_setup))
+	mtctr	r12
+	bctrl		/* also sets r13 and SPRG_PACA */
 
 	LOAD_REG_ADDR(r3, start_here_common)
 	ld	r4,PACAKMSR(r13)
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 7f5ac2e..3617800 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -170,6 +170,9 @@
 	bne-	core_idle_lock_held
 	blr
 
+/* Reuse an unused pt_regs slot for IAMR */
+#define PNV_POWERSAVE_IAMR	_DAR
+
 /*
  * Pass requested state in r3:
  *	r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
@@ -200,6 +203,12 @@
 	/* Continue saving state */
 	SAVE_GPR(2, r1)
 	SAVE_NVGPRS(r1)
+
+BEGIN_FTR_SECTION
+	mfspr	r5, SPRN_IAMR
+	std	r5, PNV_POWERSAVE_IAMR(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
 	mfcr	r5
 	std	r5,_CCR(r1)
 	std	r1,PACAR1(r13)
@@ -924,6 +933,17 @@
 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 	REST_NVGPRS(r1)
 	REST_GPR(2, r1)
+
+BEGIN_FTR_SECTION
+	/* IAMR was saved in pnv_powersave_common() */
+	ld	r5, PNV_POWERSAVE_IAMR(r1)
+	mtspr	SPRN_IAMR, r5
+	/*
+	 * We don't need an isync here because the upcoming mtmsrd is
+	 * execution synchronizing.
+	 */
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
 	ld	r4,PACAKMSR(r13)
 	ld	r5,_LINK(r1)
 	ld	r6,_CCR(r1)
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 683b5b3..cd381e2 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -22,6 +22,7 @@
 #include <linux/kvm_host.h>
 #include <linux/init.h>
 #include <linux/export.h>
+#include <linux/kmemleak.h>
 #include <linux/kvm_para.h>
 #include <linux/slab.h>
 #include <linux/of.h>
@@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
 
 static __init void kvm_free_tmp(void)
 {
+	/*
+	 * Inform kmemleak about the hole in the .bss section since the
+	 * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
+	 */
+	kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
+			   ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
 	free_reserved_area(&kvm_tmp[kvm_tmp_index],
 			   &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
 }
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index 1341325..70568cc 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -4,6 +4,7 @@
 //
 // Copyright 2018, Michael Ellerman, IBM Corporation.
 
+#include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
 #include <linux/seq_buf.h>
@@ -56,7 +57,7 @@ void setup_barrier_nospec(void)
 	enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
 		 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
 
-	if (!no_nospec)
+	if (!no_nospec && !cpu_mitigations_off())
 		enable_barrier_nospec(enable);
 }
 
@@ -115,7 +116,7 @@ static int __init handle_nospectre_v2(char *p)
 early_param("nospectre_v2", handle_nospectre_v2);
 void setup_spectre_v2(void)
 {
-	if (no_spectrev2)
+	if (no_spectrev2 || cpu_mitigations_off())
 		do_btb_flush_fixups();
 	else
 		btb_flush_enabled = true;
@@ -299,7 +300,7 @@ void setup_stf_barrier(void)
 
 	stf_enabled_flush_types = type;
 
-	if (!no_stf_barrier)
+	if (!no_stf_barrier && !cpu_mitigations_off())
 		stf_barrier_enable(enable);
 }
 
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index faf0022..eaf7300 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -955,7 +955,7 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable)
 
 	enabled_flush_types = types;
 
-	if (!no_rfi_flush)
+	if (!no_rfi_flush && !cpu_mitigations_off())
 		rfi_flush_enable(enable);
 }
 
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 61c1fad..6dc4320 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -338,13 +338,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  * NMI IPIs may not be recoverable, so should not be used as ongoing part of
  * a running system. They can be used for crash, debug, halt/reboot, etc.
  *
- * NMI IPIs are globally single threaded. No more than one in progress at
- * any time.
- *
  * The IPI call waits with interrupts disabled until all targets enter the
- * NMI handler, then the call returns.
+ * NMI handler, then returns. Subsequent IPIs can be issued before targets
+ * have returned from their handlers, so there is no guarantee about
+ * concurrency or re-entrancy.
  *
- * No new NMI can be initiated until targets exit the handler.
+ * A new NMI can be issued before all targets exit the handler.
  *
  * The IPI call may time out without all targets entering the NMI handler.
  * In that case, there is some logic to recover (and ignore subsequent
@@ -355,7 +354,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 
 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
 static struct cpumask nmi_ipi_pending_mask;
-static int nmi_ipi_busy_count = 0;
+static bool nmi_ipi_busy = false;
 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
 
 static void nmi_ipi_lock_start(unsigned long *flags)
@@ -394,7 +393,7 @@ static void nmi_ipi_unlock_end(unsigned long *flags)
  */
 int smp_handle_nmi_ipi(struct pt_regs *regs)
 {
-	void (*fn)(struct pt_regs *);
+	void (*fn)(struct pt_regs *) = NULL;
 	unsigned long flags;
 	int me = raw_smp_processor_id();
 	int ret = 0;
@@ -405,29 +404,17 @@ int smp_handle_nmi_ipi(struct pt_regs *regs)
 	 * because the caller may have timed out.
 	 */
 	nmi_ipi_lock_start(&flags);
-	if (!nmi_ipi_busy_count)
-		goto out;
-	if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask))
-		goto out;
-
-	fn = nmi_ipi_function;
-	if (!fn)
-		goto out;
-
-	cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
-	nmi_ipi_busy_count++;
-	nmi_ipi_unlock();
-
-	ret = 1;
-
-	fn(regs);
-
-	nmi_ipi_lock();
-	if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */
-		nmi_ipi_busy_count--;
-out:
+	if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
+		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
+		fn = READ_ONCE(nmi_ipi_function);
+		WARN_ON_ONCE(!fn);
+		ret = 1;
+	}
 	nmi_ipi_unlock_end(&flags);
 
+	if (fn)
+		fn(regs);
+
 	return ret;
 }
 
@@ -453,7 +440,7 @@ static void do_smp_send_nmi_ipi(int cpu, bool safe)
  * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
  * - fn is the target callback function.
  * - delay_us > 0 is the delay before giving up waiting for targets to
- *   complete executing the handler, == 0 specifies indefinite delay.
+ *   begin executing the handler, == 0 specifies indefinite delay.
  */
 int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
 {
@@ -467,31 +454,33 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
 	if (unlikely(!smp_ops))
 		return 0;
 
-	/* Take the nmi_ipi_busy count/lock with interrupts hard disabled */
 	nmi_ipi_lock_start(&flags);
-	while (nmi_ipi_busy_count) {
+	while (nmi_ipi_busy) {
 		nmi_ipi_unlock_end(&flags);
-		spin_until_cond(nmi_ipi_busy_count == 0);
+		spin_until_cond(!nmi_ipi_busy);
 		nmi_ipi_lock_start(&flags);
 	}
-
+	nmi_ipi_busy = true;
 	nmi_ipi_function = fn;
 
+	WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
+
 	if (cpu < 0) {
 		/* ALL_OTHERS */
 		cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
 		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 	} else {
-		/* cpumask starts clear */
 		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
 	}
-	nmi_ipi_busy_count++;
+
 	nmi_ipi_unlock();
 
+	/* Interrupts remain hard disabled */
+
 	do_smp_send_nmi_ipi(cpu, safe);
 
 	nmi_ipi_lock();
-	/* nmi_ipi_busy_count is held here, so unlock/lock is okay */
+	/* nmi_ipi_busy is set here, so unlock/lock is okay */
 	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
 		nmi_ipi_unlock();
 		udelay(1);
@@ -503,29 +492,15 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
 		}
 	}
 
-	while (nmi_ipi_busy_count > 1) {
-		nmi_ipi_unlock();
-		udelay(1);
-		nmi_ipi_lock();
-		if (delay_us) {
-			delay_us--;
-			if (!delay_us)
-				break;
-		}
-	}
-
 	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
 		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
 		ret = 0;
 		cpumask_clear(&nmi_ipi_pending_mask);
 	}
-	if (nmi_ipi_busy_count > 1) {
-		/* Timeout waiting for CPUs to execute fn */
-		ret = 0;
-		nmi_ipi_busy_count = 1;
-	}
 
-	nmi_ipi_busy_count--;
+	nmi_ipi_function = NULL;
+	nmi_ipi_busy = false;
+
 	nmi_ipi_unlock_end(&flags);
 
 	return ret;
@@ -593,17 +568,8 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
 static void nmi_stop_this_cpu(struct pt_regs *regs)
 {
 	/*
-	 * This is a special case because it never returns, so the NMI IPI
-	 * handling would never mark it as done, which makes any later
-	 * smp_send_nmi_ipi() call spin forever. Mark it done now.
-	 *
 	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
 	 */
-	nmi_ipi_lock();
-	if (nmi_ipi_busy_count > 1)
-		nmi_ipi_busy_count--;
-	nmi_ipi_unlock();
-
 	spin_begin();
 	while (1)
 		spin_cpu_relax();
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 769c262..75cff3f 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -98,7 +98,7 @@
 	 * can be used, r7 contains NSEC_PER_SEC.
 	 */
 
-	lwz	r5,WTOM_CLOCK_SEC(r9)
+	lwz	r5,(WTOM_CLOCK_SEC+LOPART)(r9)
 	lwz	r6,WTOM_CLOCK_NSEC(r9)
 
 	/* We now have our offset in r5,r6. We create a fake dependency
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 3c6ab22..af3c15a 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -77,7 +77,7 @@ static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
 
 static u64 wd_timer_period_ms __read_mostly;  /* interval between heartbeat */
 
-static DEFINE_PER_CPU(struct timer_list, wd_timer);
+static DEFINE_PER_CPU(struct hrtimer, wd_hrtimer);
 static DEFINE_PER_CPU(u64, wd_timer_tb);
 
 /* SMP checker bits */
@@ -293,21 +293,21 @@ void soft_nmi_interrupt(struct pt_regs *regs)
 	nmi_exit();
 }
 
-static void wd_timer_reset(unsigned int cpu, struct timer_list *t)
-{
-	t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms);
-	if (wd_timer_period_ms > 1000)
-		t->expires = __round_jiffies_up(t->expires, cpu);
-	add_timer_on(t, cpu);
-}
-
-static void wd_timer_fn(struct timer_list *t)
+static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 {
 	int cpu = smp_processor_id();
 
+	if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+		return HRTIMER_NORESTART;
+
+	if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
+		return HRTIMER_NORESTART;
+
 	watchdog_timer_interrupt(cpu);
 
-	wd_timer_reset(cpu, t);
+	hrtimer_forward_now(hrtimer, ms_to_ktime(wd_timer_period_ms));
+
+	return HRTIMER_RESTART;
 }
 
 void arch_touch_nmi_watchdog(void)
@@ -323,37 +323,22 @@ void arch_touch_nmi_watchdog(void)
 }
 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
 
-static void start_watchdog_timer_on(unsigned int cpu)
+static void start_watchdog(void *arg)
 {
-	struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
-
-	per_cpu(wd_timer_tb, cpu) = get_tb();
-
-	timer_setup(t, wd_timer_fn, TIMER_PINNED);
-	wd_timer_reset(cpu, t);
-}
-
-static void stop_watchdog_timer_on(unsigned int cpu)
-{
-	struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
-
-	del_timer_sync(t);
-}
-
-static int start_wd_on_cpu(unsigned int cpu)
-{
+	struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
+	int cpu = smp_processor_id();
 	unsigned long flags;
 
 	if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
 		WARN_ON(1);
-		return 0;
+		return;
 	}
 
 	if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
-		return 0;
+		return;
 
 	if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
-		return 0;
+		return;
 
 	wd_smp_lock(&flags);
 	cpumask_set_cpu(cpu, &wd_cpus_enabled);
@@ -363,27 +348,40 @@ static int start_wd_on_cpu(unsigned int cpu)
 	}
 	wd_smp_unlock(&flags);
 
-	start_watchdog_timer_on(cpu);
+	*this_cpu_ptr(&wd_timer_tb) = get_tb();
 
-	return 0;
+	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	hrtimer->function = watchdog_timer_fn;
+	hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms),
+		      HRTIMER_MODE_REL_PINNED);
 }
 
-static int stop_wd_on_cpu(unsigned int cpu)
+static int start_watchdog_on_cpu(unsigned int cpu)
 {
+	return smp_call_function_single(cpu, start_watchdog, NULL, true);
+}
+
+static void stop_watchdog(void *arg)
+{
+	struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
+	int cpu = smp_processor_id();
 	unsigned long flags;
 
 	if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
-		return 0; /* Can happen in CPU unplug case */
+		return; /* Can happen in CPU unplug case */
 
-	stop_watchdog_timer_on(cpu);
+	hrtimer_cancel(hrtimer);
 
 	wd_smp_lock(&flags);
 	cpumask_clear_cpu(cpu, &wd_cpus_enabled);
 	wd_smp_unlock(&flags);
 
 	wd_smp_clear_cpu_pending(cpu, get_tb());
+}
 
-	return 0;
+static int stop_watchdog_on_cpu(unsigned int cpu)
+{
+	return smp_call_function_single(cpu, stop_watchdog, NULL, true);
 }
 
 static void watchdog_calc_timeouts(void)
@@ -402,7 +400,7 @@ void watchdog_nmi_stop(void)
 	int cpu;
 
 	for_each_cpu(cpu, &wd_cpus_enabled)
-		stop_wd_on_cpu(cpu);
+		stop_watchdog_on_cpu(cpu);
 }
 
 void watchdog_nmi_start(void)
@@ -411,7 +409,7 @@ void watchdog_nmi_start(void)
 
 	watchdog_calc_timeouts();
 	for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
-		start_wd_on_cpu(cpu);
+		start_watchdog_on_cpu(cpu);
 }
 
 /*
@@ -423,7 +421,8 @@ int __init watchdog_nmi_probe(void)
 
 	err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
 					"powerpc/watchdog:online",
-					start_wd_on_cpu, stop_wd_on_cpu);
+					start_watchdog_on_cpu,
+					stop_watchdog_on_cpu);
 	if (err < 0) {
 		pr_warn("could not be initialized");
 		return err;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 10fb43e..f473c05 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1495,6 +1495,9 @@ int start_topology_update(void)
 {
 	int rc = 0;
 
+	if (!topology_updates_enabled)
+		return 0;
+
 	if (firmware_has_feature(FW_FEATURE_PRRN)) {
 		if (!prrn_enabled) {
 			prrn_enabled = 1;
@@ -1524,6 +1527,9 @@ int stop_topology_update(void)
 {
 	int rc = 0;
 
+	if (!topology_updates_enabled)
+		return 0;
+
 	if (prrn_enabled) {
 		prrn_enabled = 0;
 #ifdef CONFIG_SMP
@@ -1579,11 +1585,13 @@ static ssize_t topology_write(struct file *file, const char __user *buf,
 
 	kbuf[read_len] = '\0';
 
-	if (!strncmp(kbuf, "on", 2))
+	if (!strncmp(kbuf, "on", 2)) {
+		topology_updates_enabled = true;
 		start_topology_update();
-	else if (!strncmp(kbuf, "off", 3))
+	} else if (!strncmp(kbuf, "off", 3)) {
 		stop_topology_update();
-	else
+		topology_updates_enabled = false;
+	} else
 		return -EINVAL;
 
 	return count;
@@ -1598,9 +1606,7 @@ static const struct file_operations topology_ops = {
 
 static int topology_update_init(void)
 {
-	/* Do not poll for changes if disabled at boot */
-	if (topology_updates_enabled)
-		start_topology_update();
+	start_topology_update();
 
 	if (vphn_enabled)
 		topology_schedule_update();
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 4f213ba..53e9b58 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -31,6 +31,7 @@
 #include <linux/spinlock.h>
 #include <linux/export.h>
 #include <linux/hugetlb.h>
+#include <linux/security.h>
 #include <asm/mman.h>
 #include <asm/mmu.h>
 #include <asm/copro.h>
@@ -376,6 +377,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
 	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
 	unsigned long addr, found, prev;
 	struct vm_unmapped_area_info info;
+	unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
 
 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 	info.length = len;
@@ -392,7 +394,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
 	if (high_limit > DEFAULT_MAP_WINDOW)
 		addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
 
-	while (addr > PAGE_SIZE) {
+	while (addr > min_addr) {
 		info.high_limit = addr;
 		if (!slice_scan_available(addr - 1, available, 0, &addr))
 			continue;
@@ -404,8 +406,8 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
 		 * Check if we need to reduce the range, or if we can
 		 * extend it to cover the previous available slice.
 		 */
-		if (addr < PAGE_SIZE)
-			addr = PAGE_SIZE;
+		if (addr < min_addr)
+			addr = min_addr;
 		else if (slice_scan_available(addr - 1, available, 0, &prev)) {
 			addr = prev;
 			goto prev_slice;
@@ -527,7 +529,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
 		addr = _ALIGN_UP(addr, page_size);
 		slice_dbg(" aligned addr=%lx\n", addr);
 		/* Ignore hint if it's too large or overlaps a VMA */
-		if (addr > high_limit - len ||
+		if (addr > high_limit - len || addr < mmap_min_addr ||
 		    !slice_area_is_free(mm, addr, len))
 			addr = 0;
 	}
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 1fafc32b..5553226 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -496,6 +496,11 @@ static int nest_imc_event_init(struct perf_event *event)
 	 * Get the base memory addresss for this cpu.
 	 */
 	chip_id = cpu_to_chip_id(event->cpu);
+
+	/* Return, if chip_id is not valid */
+	if (chip_id < 0)
+		return -ENODEV;
+
 	pcni = pmu->mem_info;
 	do {
 		if (pcni->id == chip_id) {
@@ -503,7 +508,7 @@ static int nest_imc_event_init(struct perf_event *event)
 			break;
 		}
 		pcni++;
-	} while (pcni);
+	} while (pcni->vbase != 0);
 
 	if (!flag)
 		return -ENODEV;
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 6c6a7c7..ad0216c 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -330,7 +330,7 @@
 
 config PPC_RADIX_MMU
 	bool "Radix MMU Support"
-	depends on PPC_BOOK3S_64
+	depends on PPC_BOOK3S_64 && HUGETLB_PAGE
 	select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
 	default y
 	help
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index 58a0794..3d27f02 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -127,7 +127,7 @@ static int imc_get_mem_addr_nest(struct device_node *node,
 								nr_chips))
 		goto error;
 
-	pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(*pmu_ptr->mem_info),
+	pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info),
 				    GFP_KERNEL);
 	if (!pmu_ptr->mem_info)
 		goto error;
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 8c3e3e3..f0ea315 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -307,7 +307,7 @@ do {								\
 		"	.balign 4\n"				\
 		"4:\n"						\
 		"	li %0, %6\n"				\
-		"	jump 2b, %1\n"				\
+		"	jump 3b, %1\n"				\
 		"	.previous\n"				\
 		"	.section __ex_table,\"a\"\n"		\
 		"	.balign " RISCV_SZPTR "\n"			\
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 941d8cc..15cdad2 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -492,7 +492,6 @@
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -512,6 +511,7 @@
 CONFIG_NILFS2_FS=m
 CONFIG_FS_DAX=y
 CONFIG_EXPORTFS_BLOCK_OPS=y
+CONFIG_FS_ENCRYPTION=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index eb6f75f..e670d99 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -489,7 +489,6 @@
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -507,6 +506,7 @@
 CONFIG_NILFS2_FS=m
 CONFIG_FS_DAX=y
 CONFIG_EXPORTFS_BLOCK_OPS=y
+CONFIG_FS_ENCRYPTION=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 7d22a47..f74639a 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -252,11 +252,14 @@ do {								\
 
 /*
  * Cache aliasing on the latest machines calls for a mapping granularity
- * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
- * of up to 1GB. For 31-bit processes the virtual address space is limited,
- * use no alignment and limit the randomization to 8MB.
+ * of 512KB for the anonymous mapping base. For 64-bit processes use a
+ * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
+ * the virtual address space is limited, use no alignment and limit the
+ * randomization to 8MB.
+ * For the additional randomization of the program break use 32MB for
+ * 64-bit and 8MB for 31-bit.
  */
-#define BRK_RND_MASK	(is_compat_task() ? 0x7ffUL : 0x3ffffUL)
+#define BRK_RND_MASK	(is_compat_task() ? 0x7ffUL : 0x1fffUL)
 #define MMAP_RND_MASK	(is_compat_task() ? 0x7ffUL : 0x3ff80UL)
 #define MMAP_ALIGN_MASK	(is_compat_task() ? 0 : 0x7fUL)
 #define STACK_RND_MASK	MMAP_RND_MASK
diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c
index 5a286b0..602e7cc 100644
--- a/arch/s390/kernel/kexec_elf.c
+++ b/arch/s390/kernel/kexec_elf.c
@@ -19,10 +19,15 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
 	struct kexec_buf buf;
 	const Elf_Ehdr *ehdr;
 	const Elf_Phdr *phdr;
+	Elf_Addr entry;
 	int i, ret;
 
 	ehdr = (Elf_Ehdr *)kernel;
 	buf.image = image;
+	if (image->type == KEXEC_TYPE_CRASH)
+		entry = STARTUP_KDUMP_OFFSET;
+	else
+		entry = ehdr->e_entry;
 
 	phdr = (void *)ehdr + ehdr->e_phoff;
 	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
@@ -35,7 +40,7 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
 		buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
 		buf.memsz = phdr->p_memsz;
 
-		if (phdr->p_paddr == 0) {
+		if (entry - phdr->p_paddr < phdr->p_memsz) {
 			data->kernel_buf = buf.buffer;
 			data->memsz += STARTUP_NORMAL_OFFSET;
 
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index bdddaae..649135c 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/module.h>
 #include <linux/device.h>
+#include <linux/cpu.h>
 #include <asm/nospec-branch.h>
 
 static int __init nobp_setup_early(char *str)
@@ -58,7 +59,7 @@ early_param("nospectre_v2", nospectre_v2_setup_early);
 
 void __init nospec_auto_detect(void)
 {
-	if (test_facility(156)) {
+	if (test_facility(156) || cpu_mitigations_off()) {
 		/*
 		 * The machine supports etokens.
 		 * Disable expolines and disable nobp.
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index f2cc7da..ae894ac 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -410,6 +410,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
 	return old;
 }
 
+#ifdef CONFIG_PGSTE
 static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
 {
 	pgd_t *pgd;
@@ -427,6 +428,7 @@ static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
 	pmd = pmd_alloc(mm, pud, addr);
 	return pmd;
 }
+#endif
 
 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
 		       pmd_t *pmdp, pmd_t new)
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c
index 26789ad..cb99df5 100644
--- a/arch/sh/boards/of-generic.c
+++ b/arch/sh/boards/of-generic.c
@@ -175,10 +175,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = {
 
 struct sh_clk_ops;
 
-void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
 {
 }
 
-void __init plat_irq_setup(void)
+void __init __weak plat_irq_setup(void)
 {
 }
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h
index 96b8cb1..029bbad 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7786.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h
@@ -135,7 +135,7 @@ enum {
 
 static inline u32 sh7786_mm_sel(void)
 {
-	return __raw_readl(0xFC400020) & 0x7;
+	return __raw_readl((const volatile void __iomem *)0xFC400020) & 0x7;
 }
 
 #endif /* __CPU_SH7786_H__ */
diff --git a/arch/um/drivers/port_user.c b/arch/um/drivers/port_user.c
index 9a8e1b6..5f56d11 100644
--- a/arch/um/drivers/port_user.c
+++ b/arch/um/drivers/port_user.c
@@ -168,7 +168,7 @@ int port_connection(int fd, int *socket, int *pid_out)
 {
 	int new, err;
 	char *argv[] = { "/usr/sbin/in.telnetd", "-L",
-			 "/usr/lib/uml/port-helper", NULL };
+			 OS_LIB_PATH "/uml/port-helper", NULL };
 	struct port_pre_exec_data data;
 
 	new = accept(fd, NULL, 0);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e76d16a..f85253f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -78,6 +78,7 @@
 	select ARCH_SUPPORTS_ACPI
 	select ARCH_SUPPORTS_ATOMIC_RMW
 	select ARCH_SUPPORTS_NUMA_BALANCING	if X86_64
+	select ARCH_SUPPORTS_LTO_CLANG		if X86_64
 	select ARCH_USE_BUILTIN_BSWAP
 	select ARCH_USE_QUEUED_RWLOCKS
 	select ARCH_USE_QUEUED_SPINLOCKS
@@ -124,7 +125,7 @@
 	select HAVE_ARCH_MMAP_RND_BITS		if MMU
 	select HAVE_ARCH_MMAP_RND_COMPAT_BITS	if MMU && COMPAT
 	select HAVE_ARCH_COMPAT_MMAP_BASES	if MMU && COMPAT
-	select HAVE_ARCH_PREL32_RELOCATIONS
+	select HAVE_ARCH_PREL32_RELOCATIONS	if !LTO_CLANG
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_THREAD_STRUCT_WHITELIST
 	select HAVE_ARCH_TRACEHOOK
@@ -185,7 +186,7 @@
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_RELIABLE_STACKTRACE		if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
 	select HAVE_STACKPROTECTOR		if CC_HAS_SANE_STACKPROTECTOR
-	select HAVE_STACK_VALIDATION		if X86_64
+	select HAVE_STACK_VALIDATION		if X86_64 && !LTO_CLANG
 	select HAVE_RSEQ
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_UNSTABLE_SCHED_CLOCK
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 3afc909..723bffd 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -47,7 +47,7 @@
 export BITS
 
 ifdef CONFIG_X86_NEED_RELOCS
-        LDFLAGS_vmlinux := --emit-relocs
+        LDFLAGS_vmlinux := --emit-relocs --discard-none
 endif
 
 #
@@ -214,6 +214,11 @@
 KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
 endif
 
+ifdef CONFIG_LTO_CLANG
+KBUILD_LDFLAGS	+= -plugin-opt=-code-model=kernel \
+		   -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+endif
+
 # Speed up the build
 KBUILD_CFLAGS += -pipe
 # Workaround for a gcc prelease that unfortunately was shipped in a suse release
@@ -224,6 +229,15 @@
 # Avoid indirect branches in kernel to deal with Spectre
 ifdef CONFIG_RETPOLINE
   KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
+  # Additionally, avoid generating expensive indirect jumps which
+  # are subject to retpolines for small number of switch cases.
+  # clang turns off jump table generation by default when under
+  # retpoline builds, however, gcc does not for x86. This has
+  # only been fixed starting from gcc stable version 8.4.0 and
+  # onwards, but not for older ones. See gcc bug #86952.
+  ifndef CONFIG_CC_IS_CLANG
+    KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
+  endif
 endif
 
 archscripts: scripts_basic
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index f8ee9e4..837829d 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -23,6 +23,7 @@
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_BPF=y
 CONFIG_NAMESPACES=y
+CONFIG_SCHED_TUNE=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_RD_LZ4 is not set
 # CONFIG_FHANDLE is not set
@@ -68,6 +69,8 @@
 CONFIG_OPROFILE=y
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
+CONFIG_LTO_CLANG=y
+CONFIG_CFI_CLANG=y
 CONFIG_REFCOUNT_FULL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -84,6 +87,7 @@
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
 CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
@@ -204,7 +208,6 @@
 CONFIG_MAC80211=y
 CONFIG_RFKILL=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
 CONFIG_DEBUG_DEVRES=y
 CONFIG_OF=y
 CONFIG_OF_UNITTEST=y
@@ -414,6 +417,7 @@
 CONFIG_ASHMEM=y
 CONFIG_ANDROID_VSOC=y
 CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
 # CONFIG_X86_PLATFORM_DEVICES is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_ANDROID=y
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index cd4df93..7bbfe7d 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -76,15 +76,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
 	return 0;
 }
 
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
-			u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
 {
 	if (irq_fpu_usable()) {
 		kernel_fpu_begin();
-		*(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
+		*(__u16 *)out = crc_t10dif_pcl(crc, data, len);
 		kernel_fpu_end();
 	} else
-		*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+		*(__u16 *)out = crc_t10dif_generic(crc, data, len);
 	return 0;
 }
 
@@ -93,15 +92,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
 {
 	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
 
-	return __chksum_finup(&ctx->crc, data, len, out);
+	return __chksum_finup(ctx->crc, data, len, out);
 }
 
 static int chksum_digest(struct shash_desc *desc, const u8 *data,
 			 unsigned int length, u8 *out)
 {
-	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
-	return __chksum_finup(&ctx->crc, data, length, out);
+	return __chksum_finup(0, data, length, out);
 }
 
 static struct shash_alg alg = {
diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
index 3b6e70d..8457cdd 100644
--- a/arch/x86/crypto/poly1305-avx2-x86_64.S
+++ b/arch/x86/crypto/poly1305-avx2-x86_64.S
@@ -323,6 +323,12 @@
 	vpaddq		t2,t1,t1
 	vmovq		t1x,d4
 
+	# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+	# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+	# amount.  Careful: we must not assume the carry bits 'd0 >> 26',
+	# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+	# integers.  It's true in a single-block implementation, but not here.
+
 	# d1 += d0 >> 26
 	mov		d0,%rax
 	shr		$26,%rax
@@ -361,16 +367,16 @@
 	# h0 += (d4 >> 26) * 5
 	mov		d4,%rax
 	shr		$26,%rax
-	lea		(%eax,%eax,4),%eax
-	add		%eax,%ebx
+	lea		(%rax,%rax,4),%rax
+	add		%rax,%rbx
 	# h4 = d4 & 0x3ffffff
 	mov		d4,%rax
 	and		$0x3ffffff,%eax
 	mov		%eax,h4
 
 	# h1 += h0 >> 26
-	mov		%ebx,%eax
-	shr		$26,%eax
+	mov		%rbx,%rax
+	shr		$26,%rax
 	add		%eax,h1
 	# h0 = h0 & 0x3ffffff
 	andl		$0x3ffffff,%ebx
diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
index c88c670..5851c74 100644
--- a/arch/x86/crypto/poly1305-sse2-x86_64.S
+++ b/arch/x86/crypto/poly1305-sse2-x86_64.S
@@ -253,16 +253,16 @@
 	# h0 += (d4 >> 26) * 5
 	mov		d4,%rax
 	shr		$26,%rax
-	lea		(%eax,%eax,4),%eax
-	add		%eax,%ebx
+	lea		(%rax,%rax,4),%rax
+	add		%rax,%rbx
 	# h4 = d4 & 0x3ffffff
 	mov		d4,%rax
 	and		$0x3ffffff,%eax
 	mov		%eax,h4
 
 	# h1 += h0 >> 26
-	mov		%ebx,%eax
-	shr		$26,%eax
+	mov		%rbx,%rax
+	shr		$26,%rax
 	add		%eax,h1
 	# h0 = h0 & 0x3ffffff
 	andl		$0x3ffffff,%ebx
@@ -520,6 +520,12 @@
 	paddq		t2,t1
 	movq		t1,d4
 
+	# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+	# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+	# amount.  Careful: we must not assume the carry bits 'd0 >> 26',
+	# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+	# integers.  It's true in a single-block implementation, but not here.
+
 	# d1 += d0 >> 26
 	mov		d0,%rax
 	shr		$26,%rax
@@ -558,16 +564,16 @@
 	# h0 += (d4 >> 26) * 5
 	mov		d4,%rax
 	shr		$26,%rax
-	lea		(%eax,%eax,4),%eax
-	add		%eax,%ebx
+	lea		(%rax,%rax,4),%rax
+	add		%rax,%rbx
 	# h4 = d4 & 0x3ffffff
 	mov		d4,%rax
 	and		$0x3ffffff,%eax
 	mov		%eax,h4
 
 	# h1 += h0 >> 26
-	mov		%ebx,%eax
-	shr		$26,%eax
+	mov		%rbx,%rax
+	shr		$26,%rax
 	add		%eax,h1
 	# h0 = h0 & 0x3ffffff
 	andl		$0x3ffffff,%ebx
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 3b2490b..8353348 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -31,6 +31,7 @@
 #include <asm/vdso.h>
 #include <linux/uaccess.h>
 #include <asm/cpufeature.h>
+#include <asm/nospec-branch.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/syscalls.h>
@@ -212,6 +213,8 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
 #endif
 
 	user_enter_irqoff();
+
+	mds_user_clear_cpu_buffers();
 }
 
 #define SYSCALL_EXIT_WORK_FLAGS				\
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index fbbf1ba..b5c2b10 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -648,6 +648,7 @@
 	pushl	%ebx
 	pushl	%edi
 	pushl	%esi
+	pushfl
 
 	/* switch stack */
 	movl	%esp, TASK_threadsp(%eax)
@@ -670,6 +671,7 @@
 #endif
 
 	/* restore callee-saved registers */
+	popfl
 	popl	%esi
 	popl	%edi
 	popl	%ebx
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 617df50a..c90e00d 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -352,6 +352,7 @@
 	pushq	%r13
 	pushq	%r14
 	pushq	%r15
+	pushfq
 
 	/* switch stack */
 	movq	%rsp, TASK_threadsp(%rdi)
@@ -374,6 +375,7 @@
 #endif
 
 	/* restore callee-saved registers */
+	popfq
 	popq	%r15
 	popq	%r14
 	popq	%r13
@@ -903,7 +905,7 @@
  */
 #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
 
-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
+.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0
 ENTRY(\sym)
 	UNWIND_HINT_IRET_REGS offset=\has_error_code*8
 
@@ -923,6 +925,20 @@
 	jnz	.Lfrom_usermode_switch_stack_\@
 	.endif
 
+	.if \create_gap == 1
+	/*
+	 * If coming from kernel space, create a 6-word gap to allow the
+	 * int3 handler to emulate a call instruction.
+	 */
+	testb	$3, CS-ORIG_RAX(%rsp)
+	jnz	.Lfrom_usermode_no_gap_\@
+	.rept	6
+	pushq	5*8(%rsp)
+	.endr
+	UNWIND_HINT_IRET_REGS offset=8
+.Lfrom_usermode_no_gap_\@:
+	.endif
+
 	.if \paranoid
 	call	paranoid_entry
 	.else
@@ -1152,7 +1168,7 @@
 #endif /* CONFIG_HYPERV */
 
 idtentry debug			do_debug		has_error_code=0	paranoid=1 shift_ist=DEBUG_STACK
-idtentry int3			do_int3			has_error_code=0
+idtentry int3			do_int3			has_error_code=0	create_gap=1
 idtentry stack_segment		do_stack_segment	has_error_code=1
 
 #ifdef CONFIG_XEN
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index aa3336a..7d17b3a 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -10,13 +10,11 @@
 #ifdef CONFIG_IA32_EMULATION
 /* On X86_64, we use struct pt_regs * to pass parameters to syscalls */
 #define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
-
-/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */
-extern asmlinkage long sys_ni_syscall(const struct pt_regs *);
-
+#define __sys_ni_syscall __ia32_sys_ni_syscall
 #else /* CONFIG_IA32_EMULATION */
 #define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+#define __sys_ni_syscall sys_ni_syscall
 #endif /* CONFIG_IA32_EMULATION */
 
 #include <asm/syscalls_32.h>
@@ -29,6 +27,6 @@ __visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] =
 	 * Smells like a compiler bug -- it doesn't work
 	 * when the & below is removed.
 	 */
-	[0 ... __NR_syscall_compat_max] = &sys_ni_syscall,
+	[0 ... __NR_syscall_compat_max] = &__sys_ni_syscall,
 #include <asm/syscalls_32.h>
 };
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index d5252bc..eb82ad9 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -4,11 +4,17 @@
 #include <linux/linkage.h>
 #include <linux/sys.h>
 #include <linux/cache.h>
+#include <linux/syscalls.h>
 #include <asm/asm-offsets.h>
 #include <asm/syscall.h>
 
-/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */
-extern asmlinkage long sys_ni_syscall(const struct pt_regs *);
+extern asmlinkage long sys_ni_syscall(void);
+
+SYSCALL_DEFINE0(ni_syscall)
+{
+	return sys_ni_syscall();
+}
+
 #define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
 #include <asm/syscalls_64.h>
 #undef __SYSCALL_64
@@ -20,6 +26,6 @@ asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
 	 * Smells like a compiler bug -- it doesn't work
 	 * when the & below is removed.
 	 */
-	[0 ... __NR_syscall_max] = &sys_ni_syscall,
+	[0 ... __NR_syscall_max] = &__x64_sys_ni_syscall,
 #include <asm/syscalls_64.h>
 };
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 859e919..57d188e 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -124,7 +124,7 @@
 110	i386	iopl			sys_iopl			__ia32_sys_iopl
 111	i386	vhangup			sys_vhangup
 112	i386	idle
-113	i386	vm86old			sys_vm86old			sys_ni_syscall
+113	i386	vm86old			sys_vm86old			__ia32_sys_ni_syscall
 114	i386	wait4			sys_wait4			__ia32_compat_sys_wait4
 115	i386	swapoff			sys_swapoff			__ia32_sys_swapoff
 116	i386	sysinfo			sys_sysinfo			__ia32_compat_sys_sysinfo
@@ -177,7 +177,7 @@
 163	i386	mremap			sys_mremap			__ia32_sys_mremap
 164	i386	setresuid		sys_setresuid16			__ia32_sys_setresuid16
 165	i386	getresuid		sys_getresuid16			__ia32_sys_getresuid16
-166	i386	vm86			sys_vm86			sys_ni_syscall
+166	i386	vm86			sys_vm86			__ia32_sys_ni_syscall
 167	i386	query_module
 168	i386	poll			sys_poll			__ia32_sys_poll
 169	i386	nfsservctl
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 5bfe224..c66b333 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -3,7 +3,6 @@
 # Building vDSO images for x86.
 #
 
-KBUILD_CFLAGS += $(DISABLE_LTO)
 KASAN_SANITIZE			:= n
 UBSAN_SANITIZE			:= n
 OBJECT_FILES_NON_STANDARD	:= y
@@ -68,7 +67,7 @@
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
        $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
        -fno-omit-frame-pointer -foptimize-sibling-calls \
-       -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
+       -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(DISABLE_LTO)
 
 ifdef CONFIG_RETPOLINE
 ifneq ($(RETPOLINE_VDSO_CFLAGS),)
@@ -139,6 +138,8 @@
 KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out $(LTO_CFLAGS),$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out $(CFI_CFLAGS),$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
 KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
 KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 3e5dd85..27ade3cb 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -116,23 +116,144 @@ static __initconst const u64 amd_hw_cache_event_ids
  },
 };
 
+static __initconst const u64 amd_hw_cache_event_ids_f17h
+				[PERF_COUNT_HW_CACHE_MAX]
+				[PERF_COUNT_HW_CACHE_OP_MAX]
+				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
+		[C(RESULT_MISS)]   = 0xc860, /* L2$ access from DC Miss */
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
+		[C(RESULT_MISS)]   = 0,
+	},
+},
+[C(L1I)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches  */
+		[C(RESULT_MISS)]   = 0x0081, /* Instruction cache misses   */
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+},
+[C(LL)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+},
+[C(DTLB)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
+		[C(RESULT_MISS)]   = 0xf045, /* L2 DTLB misses (PT walks) */
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+},
+[C(ITLB)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
+		[C(RESULT_MISS)]   = 0xff85, /* L1 ITLB misses, L2 misses */
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+},
+[C(BPU)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr.      */
+		[C(RESULT_MISS)]   = 0x00c3, /* Retired Mispredicted BI    */
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+},
+[C(NODE)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+},
+};
+
 /*
- * AMD Performance Monitor K7 and later.
+ * AMD Performance Monitor K7 and later, up to and including Family 16h:
  */
 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
 {
-  [PERF_COUNT_HW_CPU_CYCLES]			= 0x0076,
-  [PERF_COUNT_HW_INSTRUCTIONS]			= 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]		= 0x077d,
-  [PERF_COUNT_HW_CACHE_MISSES]			= 0x077e,
-  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]		= 0x00c2,
-  [PERF_COUNT_HW_BRANCH_MISSES]			= 0x00c3,
-  [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= 0x00d0, /* "Decoder empty" event */
-  [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= 0x00d1, /* "Dispatch stalls" event */
+	[PERF_COUNT_HW_CPU_CYCLES]		= 0x0076,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= 0x077d,
+	[PERF_COUNT_HW_CACHE_MISSES]		= 0x077e,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c2,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c3,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= 0x00d0, /* "Decoder empty" event */
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= 0x00d1, /* "Dispatch stalls" event */
+};
+
+/*
+ * AMD Performance Monitor Family 17h and later:
+ */
+static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
+{
+	[PERF_COUNT_HW_CPU_CYCLES]		= 0x0076,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= 0xff60,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c2,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c3,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= 0x0287,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= 0x0187,
 };
 
 static u64 amd_pmu_event_map(int hw_event)
 {
+	if (boot_cpu_data.x86 >= 0x17)
+		return amd_f17h_perfmon_event_map[hw_event];
+
 	return amd_perfmon_event_map[hw_event];
 }
 
@@ -844,9 +965,10 @@ __init int amd_pmu_init(void)
 		x86_pmu.amd_nb_constraints = 0;
 	}
 
-	/* Events are common for all AMDs */
-	memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
-	       sizeof(hw_cache_event_ids));
+	if (boot_cpu_data.x86 >= 0x17)
+		memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
+	else
+		memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
 
 	return 0;
 }
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 12453cf7..09c53bc 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2074,15 +2074,19 @@ static void intel_pmu_disable_event(struct perf_event *event)
 	cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
 	cpuc->intel_cp_status &= ~(1ull << hwc->idx);
 
-	if (unlikely(event->attr.precise_ip))
-		intel_pmu_pebs_disable(event);
-
 	if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
 		intel_pmu_disable_fixed(hwc);
 		return;
 	}
 
 	x86_pmu_disable_event(event);
+
+	/*
+	 * Needs to be called after x86_pmu_disable_event,
+	 * so we don't trigger the event without PEBS bit set.
+	 */
+	if (unlikely(event->attr.precise_ip))
+		intel_pmu_pebs_disable(event);
 }
 
 static void intel_pmu_del_event(struct perf_event *event)
@@ -3014,7 +3018,7 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
 		flags &= ~PERF_SAMPLE_TIME;
 	if (!event->attr.exclude_kernel)
 		flags &= ~PERF_SAMPLE_REGS_USER;
-	if (event->attr.sample_regs_user & ~PEBS_REGS)
+	if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
 		flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
 	return flags;
 }
@@ -3068,7 +3072,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
 		return ret;
 
 	if (event->attr.precise_ip) {
-		if (!event->attr.freq) {
+		if (!(event->attr.freq || event->attr.wakeup_events)) {
 			event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
 			if (!(event->attr.sample_type &
 			      ~intel_pmu_large_pebs_flags(event)))
@@ -3447,6 +3451,12 @@ static void intel_pmu_cpu_starting(int cpu)
 
 	cpuc->lbr_sel = NULL;
 
+	if (x86_pmu.flags & PMU_FL_TFA) {
+		WARN_ON_ONCE(cpuc->tfa_shadow);
+		cpuc->tfa_shadow = ~0ULL;
+		intel_set_tfa(cpuc, false);
+	}
+
 	if (x86_pmu.version > 1)
 		flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
 
@@ -4126,11 +4136,11 @@ __init int intel_pmu_init(void)
 		name = "nehalem";
 		break;
 
-	case INTEL_FAM6_ATOM_PINEVIEW:
-	case INTEL_FAM6_ATOM_LINCROFT:
-	case INTEL_FAM6_ATOM_PENWELL:
-	case INTEL_FAM6_ATOM_CLOVERVIEW:
-	case INTEL_FAM6_ATOM_CEDARVIEW:
+	case INTEL_FAM6_ATOM_BONNELL:
+	case INTEL_FAM6_ATOM_BONNELL_MID:
+	case INTEL_FAM6_ATOM_SALTWELL:
+	case INTEL_FAM6_ATOM_SALTWELL_MID:
+	case INTEL_FAM6_ATOM_SALTWELL_TABLET:
 		memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
 		       sizeof(hw_cache_event_ids));
 
@@ -4143,9 +4153,11 @@ __init int intel_pmu_init(void)
 		name = "bonnell";
 		break;
 
-	case INTEL_FAM6_ATOM_SILVERMONT1:
-	case INTEL_FAM6_ATOM_SILVERMONT2:
+	case INTEL_FAM6_ATOM_SILVERMONT:
+	case INTEL_FAM6_ATOM_SILVERMONT_X:
+	case INTEL_FAM6_ATOM_SILVERMONT_MID:
 	case INTEL_FAM6_ATOM_AIRMONT:
+	case INTEL_FAM6_ATOM_AIRMONT_MID:
 		memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
 			sizeof(hw_cache_event_ids));
 		memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -4164,7 +4176,7 @@ __init int intel_pmu_init(void)
 		break;
 
 	case INTEL_FAM6_ATOM_GOLDMONT:
-	case INTEL_FAM6_ATOM_DENVERTON:
+	case INTEL_FAM6_ATOM_GOLDMONT_X:
 		memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
 		       sizeof(hw_cache_event_ids));
 		memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
@@ -4190,7 +4202,7 @@ __init int intel_pmu_init(void)
 		name = "goldmont";
 		break;
 
-	case INTEL_FAM6_ATOM_GEMINI_LAKE:
+	case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
 		memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
 		       sizeof(hw_cache_event_ids));
 		memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 9f8084f..4a650eb 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -76,15 +76,15 @@
  *			       Scope: Package (physical package)
  *	MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
  *			       perf code: 0x04
- *			       Available model: HSW ULT,CNL
+ *			       Available model: HSW ULT,KBL,CNL
  *			       Scope: Package (physical package)
  *	MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
  *			       perf code: 0x05
- *			       Available model: HSW ULT,CNL
+ *			       Available model: HSW ULT,KBL,CNL
  *			       Scope: Package (physical package)
  *	MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
  *			       perf code: 0x06
- *			       Available model: HSW ULT,GLM,CNL
+ *			       Available model: HSW ULT,KBL,GLM,CNL
  *			       Scope: Package (physical package)
  *
  */
@@ -559,8 +559,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
 
 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
 
-	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates),
-	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates),
+	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates),
+	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates),
 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT,     slm_cstates),
 
 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE,   snb_cstates),
@@ -572,8 +572,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
 	X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
 	X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
 
-	X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  snb_cstates),
-	X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
+	X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  hswult_cstates),
+	X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates),
 
 	X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
 
@@ -581,9 +581,11 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
 	X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
 
 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
-	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_DENVERTON, glm_cstates),
+	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
 
-	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GEMINI_LAKE, glm_cstates),
+	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
+
+	X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates),
 	{ },
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 32f3e94..2413169 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -777,9 +777,11 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
 	X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE,  skl_rapl_init),
 
 	X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init),
-	X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON, hsw_rapl_init),
+	X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
 
-	X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GEMINI_LAKE, hsw_rapl_init),
+	X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
+
+	X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE,  skl_rapl_init),
 	{},
 };
 
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index b4771a6..ace6c1e 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -69,14 +69,14 @@ static bool test_intel(int idx)
 	case INTEL_FAM6_BROADWELL_GT3E:
 	case INTEL_FAM6_BROADWELL_X:
 
-	case INTEL_FAM6_ATOM_SILVERMONT1:
-	case INTEL_FAM6_ATOM_SILVERMONT2:
+	case INTEL_FAM6_ATOM_SILVERMONT:
+	case INTEL_FAM6_ATOM_SILVERMONT_X:
 	case INTEL_FAM6_ATOM_AIRMONT:
 
 	case INTEL_FAM6_ATOM_GOLDMONT:
-	case INTEL_FAM6_ATOM_DENVERTON:
+	case INTEL_FAM6_ATOM_GOLDMONT_X:
 
-	case INTEL_FAM6_ATOM_GEMINI_LAKE:
+	case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
 
 	case INTEL_FAM6_XEON_PHI_KNL:
 	case INTEL_FAM6_XEON_PHI_KNM:
@@ -89,6 +89,7 @@ static bool test_intel(int idx)
 	case INTEL_FAM6_SKYLAKE_X:
 	case INTEL_FAM6_KABYLAKE_MOBILE:
 	case INTEL_FAM6_KABYLAKE_DESKTOP:
+	case INTEL_FAM6_ICELAKE_MOBILE:
 		if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
 			return true;
 		break;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 42a3628..05659c7 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -96,25 +96,25 @@ struct amd_nb {
 	PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
 	PERF_SAMPLE_PERIOD)
 
-#define PEBS_REGS \
-	(PERF_REG_X86_AX | \
-	 PERF_REG_X86_BX | \
-	 PERF_REG_X86_CX | \
-	 PERF_REG_X86_DX | \
-	 PERF_REG_X86_DI | \
-	 PERF_REG_X86_SI | \
-	 PERF_REG_X86_SP | \
-	 PERF_REG_X86_BP | \
-	 PERF_REG_X86_IP | \
-	 PERF_REG_X86_FLAGS | \
-	 PERF_REG_X86_R8 | \
-	 PERF_REG_X86_R9 | \
-	 PERF_REG_X86_R10 | \
-	 PERF_REG_X86_R11 | \
-	 PERF_REG_X86_R12 | \
-	 PERF_REG_X86_R13 | \
-	 PERF_REG_X86_R14 | \
-	 PERF_REG_X86_R15)
+#define PEBS_GP_REGS			\
+	((1ULL << PERF_REG_X86_AX)    | \
+	 (1ULL << PERF_REG_X86_BX)    | \
+	 (1ULL << PERF_REG_X86_CX)    | \
+	 (1ULL << PERF_REG_X86_DX)    | \
+	 (1ULL << PERF_REG_X86_DI)    | \
+	 (1ULL << PERF_REG_X86_SI)    | \
+	 (1ULL << PERF_REG_X86_SP)    | \
+	 (1ULL << PERF_REG_X86_BP)    | \
+	 (1ULL << PERF_REG_X86_IP)    | \
+	 (1ULL << PERF_REG_X86_FLAGS) | \
+	 (1ULL << PERF_REG_X86_R8)    | \
+	 (1ULL << PERF_REG_X86_R9)    | \
+	 (1ULL << PERF_REG_X86_R10)   | \
+	 (1ULL << PERF_REG_X86_R11)   | \
+	 (1ULL << PERF_REG_X86_R12)   | \
+	 (1ULL << PERF_REG_X86_R13)   | \
+	 (1ULL << PERF_REG_X86_R14)   | \
+	 (1ULL << PERF_REG_X86_R15))
 
 /*
  * Per register state.
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 86b1341..d8b7398 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -61,9 +61,8 @@
 } while (0)
 
 #define RELOAD_SEG(seg)		{		\
-	unsigned int pre = GET_SEG(seg);	\
+	unsigned int pre = (seg) | 3;		\
 	unsigned int cur = get_user_seg(seg);	\
-	pre |= 3;				\
 	if (pre != cur)				\
 		set_user_seg(seg, pre);		\
 }
@@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
 				   struct sigcontext_32 __user *sc)
 {
 	unsigned int tmpflags, err = 0;
+	u16 gs, fs, es, ds;
 	void __user *buf;
 	u32 tmp;
 
@@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
 	current->restart_block.fn = do_no_restart_syscall;
 
 	get_user_try {
-		/*
-		 * Reload fs and gs if they have changed in the signal
-		 * handler.  This does not handle long fs/gs base changes in
-		 * the handler, but does not clobber them at least in the
-		 * normal case.
-		 */
-		RELOAD_SEG(gs);
-		RELOAD_SEG(fs);
-		RELOAD_SEG(ds);
-		RELOAD_SEG(es);
+		gs = GET_SEG(gs);
+		fs = GET_SEG(fs);
+		ds = GET_SEG(ds);
+		es = GET_SEG(es);
 
 		COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
 		COPY(dx); COPY(cx); COPY(ip); COPY(ax);
@@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
 		buf = compat_ptr(tmp);
 	} get_user_catch(err);
 
+	/*
+	 * Reload fs and gs if they have changed in the signal
+	 * handler.  This does not handle long fs/gs base changes in
+	 * the handler, but does not clobber them at least in the
+	 * normal case.
+	 */
+	RELOAD_SEG(gs);
+	RELOAD_SEG(fs);
+	RELOAD_SEG(ds);
+	RELOAD_SEG(es);
+
 	err |= fpu__restore_sig(buf, 1);
 
 	force_iret();
@@ -113,7 +118,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
 	return err;
 }
 
-asmlinkage long sys32_sigreturn(void)
+asmlinkage long sys32_sigreturn(const struct pt_regs *__unused)
 {
 	struct pt_regs *regs = current_pt_regs();
 	struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8);
@@ -139,7 +144,7 @@ asmlinkage long sys32_sigreturn(void)
 	return 0;
 }
 
-asmlinkage long sys32_rt_sigreturn(void)
+asmlinkage long sys32_rt_sigreturn(const struct pt_regs *__unused)
 {
 	struct pt_regs *regs = current_pt_regs();
 	struct rt_sigframe_ia32 __user *frame;
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 7b31ee5..69037da 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -341,6 +341,7 @@
 #define X86_FEATURE_AVX512_4VNNIW	(18*32+ 2) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS	(18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
 #define X86_FEATURE_TSX_FORCE_ABORT	(18*32+13) /* "" TSX_FORCE_ABORT */
+#define X86_FEATURE_MD_CLEAR		(18*32+10) /* VERW clears CPU buffers */
 #define X86_FEATURE_PCONFIG		(18*32+18) /* Intel PCONFIG */
 #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
@@ -378,5 +379,7 @@
 #define X86_BUG_SPECTRE_V2		X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
 #define X86_BUG_SPEC_STORE_BYPASS	X86_BUG(17) /* CPU is affected by speculative store bypass attack */
 #define X86_BUG_L1TF			X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+#define X86_BUG_MDS			X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
+#define X86_BUG_MSBDS_ONLY		X86_BUG(20) /* CPU is only affected by the  MSDBS variant of BUG_MDS */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index cec5fae..baa549f 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -82,8 +82,7 @@ struct efi_scratch {
 #define arch_efi_call_virt_setup()					\
 ({									\
 	efi_sync_low_kernel_mappings();					\
-	preempt_disable();						\
-	__kernel_fpu_begin();						\
+	kernel_fpu_begin();						\
 	firmware_restrict_branch_speculation_start();			\
 									\
 	if (!efi_enabled(EFI_OLD_MEMMAP))				\
@@ -99,8 +98,7 @@ struct efi_scratch {
 		efi_switch_mm(efi_scratch.prev_mm);			\
 									\
 	firmware_restrict_branch_speculation_end();			\
-	__kernel_fpu_end();						\
-	preempt_enable();						\
+	kernel_fpu_end();						\
 })
 
 extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 6390bd8..5e12b23 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -159,7 +159,7 @@ extern pte_t *kmap_pte;
 extern pte_t *pkmap_page_table;
 
 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
-void native_set_fixmap(enum fixed_addresses idx,
+void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
 		       phys_addr_t phys, pgprot_t flags);
 
 #ifndef CONFIG_PARAVIRT
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index a9caac9..b56d504 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -12,17 +12,12 @@
 #define _ASM_X86_FPU_API_H
 
 /*
- * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
- * and they don't touch the preempt state on their own.
- * If you enable preemption after __kernel_fpu_begin(), preempt notifier
- * should call the __kernel_fpu_end() to prevent the kernel/user FPU
- * state from getting corrupted. KVM for example uses this model.
- *
- * All other cases use kernel_fpu_begin/end() which disable preemption
- * during kernel FPU usage.
+ * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
+ * disables preemption so be careful if you intend to use it for long periods
+ * of time.
+ * If you intend to use the FPU in softirq you need to check first with
+ * irq_fpu_usable() if it is possible.
  */
-extern void __kernel_fpu_begin(void);
-extern void __kernel_fpu_end(void);
 extern void kernel_fpu_begin(void);
 extern void kernel_fpu_end(void);
 extern bool irq_fpu_usable(void);
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 0ad25cc..058b1a1 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -8,9 +8,6 @@
  * The "_X" parts are generally the EP and EX Xeons, or the
  * "Extreme" ones, like Broadwell-E.
  *
- * Things ending in "2" are usually because we have no better
- * name for them.  There's no processor called "SILVERMONT2".
- *
  * While adding a new CPUID for a new microarchitecture, add a new
  * group to keep logically sorted out in chronological order. Within
  * that group keep the CPUID for the variants sorted by model number.
@@ -59,19 +56,23 @@
 
 /* "Small Core" Processors (Atom) */
 
-#define INTEL_FAM6_ATOM_PINEVIEW	0x1C
-#define INTEL_FAM6_ATOM_LINCROFT	0x26
-#define INTEL_FAM6_ATOM_PENWELL		0x27
-#define INTEL_FAM6_ATOM_CLOVERVIEW	0x35
-#define INTEL_FAM6_ATOM_CEDARVIEW	0x36
-#define INTEL_FAM6_ATOM_SILVERMONT1	0x37 /* BayTrail/BYT / Valleyview */
-#define INTEL_FAM6_ATOM_SILVERMONT2	0x4D /* Avaton/Rangely */
-#define INTEL_FAM6_ATOM_AIRMONT		0x4C /* CherryTrail / Braswell */
-#define INTEL_FAM6_ATOM_MERRIFIELD	0x4A /* Tangier */
-#define INTEL_FAM6_ATOM_MOOREFIELD	0x5A /* Anniedale */
-#define INTEL_FAM6_ATOM_GOLDMONT	0x5C
-#define INTEL_FAM6_ATOM_DENVERTON	0x5F /* Goldmont Microserver */
-#define INTEL_FAM6_ATOM_GEMINI_LAKE	0x7A
+#define INTEL_FAM6_ATOM_BONNELL		0x1C /* Diamondville, Pineview */
+#define INTEL_FAM6_ATOM_BONNELL_MID	0x26 /* Silverthorne, Lincroft */
+
+#define INTEL_FAM6_ATOM_SALTWELL	0x36 /* Cedarview */
+#define INTEL_FAM6_ATOM_SALTWELL_MID	0x27 /* Penwell */
+#define INTEL_FAM6_ATOM_SALTWELL_TABLET	0x35 /* Cloverview */
+
+#define INTEL_FAM6_ATOM_SILVERMONT	0x37 /* Bay Trail, Valleyview */
+#define INTEL_FAM6_ATOM_SILVERMONT_X	0x4D /* Avaton, Rangely */
+#define INTEL_FAM6_ATOM_SILVERMONT_MID	0x4A /* Merriefield */
+
+#define INTEL_FAM6_ATOM_AIRMONT		0x4C /* Cherry Trail, Braswell */
+#define INTEL_FAM6_ATOM_AIRMONT_MID	0x5A /* Moorefield */
+
+#define INTEL_FAM6_ATOM_GOLDMONT	0x5C /* Apollo Lake */
+#define INTEL_FAM6_ATOM_GOLDMONT_X	0x5F /* Denverton */
+#define INTEL_FAM6_ATOM_GOLDMONT_PLUS	0x7A /* Gemini Lake */
 
 /* Xeon Phi */
 
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 15450a675..c99c66b 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -6,6 +6,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/nospec-branch.h>
+
 /* Provide __cpuidle; we can't safely include <linux/cpu.h> */
 #define __cpuidle __attribute__((__section__(".cpuidle.text")))
 
@@ -54,11 +56,13 @@ static inline void native_irq_enable(void)
 
 static inline __cpuidle void native_safe_halt(void)
 {
+	mds_idle_clear_cpu_buffers();
 	asm volatile("sti; hlt": : :"memory");
 }
 
 static inline __cpuidle void native_halt(void)
 {
+	mds_idle_clear_cpu_buffers();
 	asm volatile("hlt": : :"memory");
 }
 
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index f14ca0b..f85f43d 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -2,6 +2,8 @@
 #ifndef _ASM_X86_MSR_INDEX_H
 #define _ASM_X86_MSR_INDEX_H
 
+#include <linux/bits.h>
+
 /*
  * CPU model specific register (MSR) numbers.
  *
@@ -40,14 +42,14 @@
 /* Intel MSRs. Some also available on other CPUs */
 
 #define MSR_IA32_SPEC_CTRL		0x00000048 /* Speculation Control */
-#define SPEC_CTRL_IBRS			(1 << 0)   /* Indirect Branch Restricted Speculation */
+#define SPEC_CTRL_IBRS			BIT(0)	   /* Indirect Branch Restricted Speculation */
 #define SPEC_CTRL_STIBP_SHIFT		1	   /* Single Thread Indirect Branch Predictor (STIBP) bit */
-#define SPEC_CTRL_STIBP			(1 << SPEC_CTRL_STIBP_SHIFT)	/* STIBP mask */
+#define SPEC_CTRL_STIBP			BIT(SPEC_CTRL_STIBP_SHIFT)	/* STIBP mask */
 #define SPEC_CTRL_SSBD_SHIFT		2	   /* Speculative Store Bypass Disable bit */
-#define SPEC_CTRL_SSBD			(1 << SPEC_CTRL_SSBD_SHIFT)	/* Speculative Store Bypass Disable */
+#define SPEC_CTRL_SSBD			BIT(SPEC_CTRL_SSBD_SHIFT)	/* Speculative Store Bypass Disable */
 
 #define MSR_IA32_PRED_CMD		0x00000049 /* Prediction Command */
-#define PRED_CMD_IBPB			(1 << 0)   /* Indirect Branch Prediction Barrier */
+#define PRED_CMD_IBPB			BIT(0)	   /* Indirect Branch Prediction Barrier */
 
 #define MSR_PPIN_CTL			0x0000004e
 #define MSR_PPIN			0x0000004f
@@ -69,20 +71,25 @@
 #define MSR_MTRRcap			0x000000fe
 
 #define MSR_IA32_ARCH_CAPABILITIES	0x0000010a
-#define ARCH_CAP_RDCL_NO		(1 << 0)   /* Not susceptible to Meltdown */
-#define ARCH_CAP_IBRS_ALL		(1 << 1)   /* Enhanced IBRS support */
-#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH	(1 << 3)   /* Skip L1D flush on vmentry */
-#define ARCH_CAP_SSB_NO			(1 << 4)   /*
-						    * Not susceptible to Speculative Store Bypass
-						    * attack, so no Speculative Store Bypass
-						    * control required.
-						    */
+#define ARCH_CAP_RDCL_NO		BIT(0)	/* Not susceptible to Meltdown */
+#define ARCH_CAP_IBRS_ALL		BIT(1)	/* Enhanced IBRS support */
+#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH	BIT(3)	/* Skip L1D flush on vmentry */
+#define ARCH_CAP_SSB_NO			BIT(4)	/*
+						 * Not susceptible to Speculative Store Bypass
+						 * attack, so no Speculative Store Bypass
+						 * control required.
+						 */
+#define ARCH_CAP_MDS_NO			BIT(5)   /*
+						  * Not susceptible to
+						  * Microarchitectural Data
+						  * Sampling (MDS) vulnerabilities.
+						  */
 
 #define MSR_IA32_FLUSH_CMD		0x0000010b
-#define L1D_FLUSH			(1 << 0)   /*
-						    * Writeback and invalidate the
-						    * L1 data cache.
-						    */
+#define L1D_FLUSH			BIT(0)	/*
+						 * Writeback and invalidate the
+						 * L1 data cache.
+						 */
 
 #define MSR_IA32_BBL_CR_CTL		0x00000119
 #define MSR_IA32_BBL_CR_CTL3		0x0000011e
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 39a2fb2..eb0f80c 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -6,6 +6,7 @@
 #include <linux/sched/idle.h>
 
 #include <asm/cpufeature.h>
+#include <asm/nospec-branch.h>
 
 #define MWAIT_SUBSTATE_MASK		0xf
 #define MWAIT_CSTATE_MASK		0xf
@@ -40,6 +41,8 @@ static inline void __monitorx(const void *eax, unsigned long ecx,
 
 static inline void __mwait(unsigned long eax, unsigned long ecx)
 {
+	mds_idle_clear_cpu_buffers();
+
 	/* "mwait %eax, %ecx;" */
 	asm volatile(".byte 0x0f, 0x01, 0xc9;"
 		     :: "a" (eax), "c" (ecx));
@@ -74,6 +77,8 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
 static inline void __mwaitx(unsigned long eax, unsigned long ebx,
 			    unsigned long ecx)
 {
+	/* No MDS buffer clear as this is AMD/HYGON only */
+
 	/* "mwaitx %eax, %ebx, %ecx;" */
 	asm volatile(".byte 0x0f, 0x01, 0xfb;"
 		     :: "a" (eax), "b" (ebx), "c" (ecx));
@@ -81,6 +86,8 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
 
 static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
 {
+	mds_idle_clear_cpu_buffers();
+
 	trace_hardirqs_on();
 	/* "mwait %eax, %ecx;" */
 	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 032b600..599c273 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -317,6 +317,56 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
 
+DECLARE_STATIC_KEY_FALSE(mds_user_clear);
+DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
+
+#include <asm/segment.h>
+
+/**
+ * mds_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+ * This uses the otherwise unused and obsolete VERW instruction in
+ * combination with microcode which triggers a CPU buffer flush when the
+ * instruction is executed.
+ */
+static inline void mds_clear_cpu_buffers(void)
+{
+	static const u16 ds = __KERNEL_DS;
+
+	/*
+	 * Has to be the memory-operand variant because only that
+	 * guarantees the CPU buffer flush functionality according to
+	 * documentation. The register-operand variant does not.
+	 * Works with any segment selector, but a valid writable
+	 * data segment is the fastest variant.
+	 *
+	 * "cc" clobber is required because VERW modifies ZF.
+	 */
+	asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
+}
+
+/**
+ * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+ * Clear CPU buffers if the corresponding static key is enabled
+ */
+static inline void mds_user_clear_cpu_buffers(void)
+{
+	if (static_branch_likely(&mds_user_clear))
+		mds_clear_cpu_buffers();
+}
+
+/**
+ * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+ * Clear CPU buffers if the corresponding static key is enabled
+ */
+static inline void mds_idle_clear_cpu_buffers(void)
+{
+	if (static_branch_likely(&mds_idle_clear))
+		mds_clear_cpu_buffers();
+}
+
 #endif /* __ASSEMBLY__ */
 
 /*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index d53c54b..b54f256 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -997,4 +997,10 @@ enum l1tf_mitigations {
 
 extern enum l1tf_mitigations l1tf_mitigation;
 
+enum mds_mitigations {
+	MDS_MITIGATION_OFF,
+	MDS_MITIGATION_FULL,
+	MDS_MITIGATION_VMWERV,
+};
+
 #endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 7cf1a27..157149d 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -40,6 +40,7 @@ asmlinkage void ret_from_fork(void);
  * order of the fields must match the code in __switch_to_asm().
  */
 struct inactive_task_frame {
+	unsigned long flags;
 #ifdef CONFIG_X86_64
 	unsigned long r15;
 	unsigned long r14;
diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
index d0adec6..71e58c8 100644
--- a/arch/x86/include/asm/syscall_wrapper.h
+++ b/arch/x86/include/asm/syscall_wrapper.h
@@ -44,6 +44,18 @@
 		return __se_sys##name(SC_IA32_REGS_TO_ARGS(x,__VA_ARGS__));\
 	}
 
+/*
+ * To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias
+ * named __ia32_sys_*()
+ */
+
+#define SYSCALL_DEFINE0(sname)						\
+	SYSCALL_METADATA(_##sname, 0);					\
+	asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
+	ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO);		\
+	SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname);		\
+	asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
+
 #define COND_SYSCALL(name)						\
 	cond_syscall(sys_##name);					\
 	cond_syscall(__ia32_sys_##name)
@@ -169,6 +181,30 @@
 	static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 
 /*
+ * As the generic SYSCALL_DEFINE0() macro does not decode any parameters for
+ * obvious reasons, and passing struct pt_regs *regs to it in %rdi does not
+ * hurt, we only need to re-define it here to keep the naming congruent to
+ * SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() and SYS_NI()
+ * macros to work correctly.
+ */
+#ifndef SYSCALL_DEFINE0
+#define SYSCALL_DEFINE0(sname)					\
+	SYSCALL_METADATA(_##sname, 0);				\
+	asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
+	ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO);	\
+	asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
+#endif
+
+#ifndef COND_SYSCALL
+#define COND_SYSCALL(name) cond_syscall(__x64_sys_##name)
+#endif
+
+#ifndef SYS_NI
+#define SYS_NI(name) SYSCALL_ALIAS(__x64_sys_##name, sys_ni_posix_timers);
+#endif
+
+
+/*
  * For VSYSCALLS, we need to declare these three syscalls with the new
  * pt_regs-based calling convention for in-kernel use.
  */
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index e85ff65..0bbb07e 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -39,4 +39,34 @@ extern int poke_int3_handler(struct pt_regs *regs);
 extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
 extern int after_bootmem;
 
+#ifndef CONFIG_UML_X86
+static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
+{
+	regs->ip = ip;
+}
+
+#define INT3_INSN_SIZE 1
+#define CALL_INSN_SIZE 5
+
+#ifdef CONFIG_X86_64
+static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
+{
+	/*
+	 * The int3 handler in entry_64.S adds a gap between the
+	 * stack where the break point happened, and the saving of
+	 * pt_regs. We can extend the original stack because of
+	 * this gap. See the idtentry macro's create_gap option.
+	 */
+	regs->sp -= sizeof(unsigned long);
+	*(unsigned long *)regs->sp = val;
+}
+
+static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
+{
+	int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
+	int3_emulate_jmp(regs, func);
+}
+#endif /* CONFIG_X86_64 */
+#endif /* !CONFIG_UML_X86 */
+
 #endif /* _ASM_X86_TEXT_PATCHING_H */
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index fd23d57..f164557 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -378,6 +378,7 @@ struct kvm_sync_regs {
 #define KVM_X86_QUIRK_LINT0_REENABLED	(1 << 0)
 #define KVM_X86_QUIRK_CD_NW_CLEARED	(1 << 1)
 #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE	(1 << 2)
+#define KVM_X86_QUIRK_OUT_7E_INC_RIP	(1 << 3)
 
 #define KVM_STATE_NESTED_GUEST_MODE	0x00000001
 #define KVM_STATE_NESTED_RUN_PENDING	0x00000002
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index b9d5e7c..918a237 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -662,15 +662,29 @@ void __init alternative_instructions(void)
  * handlers seeing an inconsistent instruction while you patch.
  */
 void *__init_or_module text_poke_early(void *addr, const void *opcode,
-					      size_t len)
+				       size_t len)
 {
 	unsigned long flags;
-	local_irq_save(flags);
-	memcpy(addr, opcode, len);
-	local_irq_restore(flags);
-	sync_core();
-	/* Could also do a CLFLUSH here to speed up CPU recovery; but
-	   that causes hangs on some VIA CPUs. */
+
+	if (boot_cpu_has(X86_FEATURE_NX) &&
+	    is_module_text_address((unsigned long)addr)) {
+		/*
+		 * Modules text is marked initially as non-executable, so the
+		 * code cannot be running and speculative code-fetches are
+		 * prevented. Just change the code.
+		 */
+		memcpy(addr, opcode, len);
+	} else {
+		local_irq_save(flags);
+		memcpy(addr, opcode, len);
+		local_irq_restore(flags);
+		sync_core();
+
+		/*
+		 * Could also do a CLFLUSH here to speed up CPU recovery; but
+		 * that causes hangs on some VIA CPUs.
+		 */
+	}
 	return addr;
 }
 
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 1e0c4c7..9b096f2 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -35,6 +35,7 @@
 static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
 static void __init l1tf_select_mitigation(void);
+static void __init mds_select_mitigation(void);
 
 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
 u64 x86_spec_ctrl_base;
@@ -61,6 +62,13 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
 /* Control unconditional IBPB in switch_mm() */
 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
 
+/* Control MDS CPU buffer clear before returning to user space */
+DEFINE_STATIC_KEY_FALSE(mds_user_clear);
+EXPORT_SYMBOL_GPL(mds_user_clear);
+/* Control MDS CPU buffer clear before idling (halt, mwait) */
+DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
+EXPORT_SYMBOL_GPL(mds_idle_clear);
+
 void __init check_bugs(void)
 {
 	identify_boot_cpu();
@@ -99,6 +107,10 @@ void __init check_bugs(void)
 
 	l1tf_select_mitigation();
 
+	mds_select_mitigation();
+
+	arch_smt_update();
+
 #ifdef CONFIG_X86_32
 	/*
 	 * Check whether we are able to run this kernel safely on SMP.
@@ -205,6 +217,61 @@ static void x86_amd_ssb_disable(void)
 }
 
 #undef pr_fmt
+#define pr_fmt(fmt)	"MDS: " fmt
+
+/* Default mitigation for MDS-affected CPUs */
+static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
+static bool mds_nosmt __ro_after_init = false;
+
+static const char * const mds_strings[] = {
+	[MDS_MITIGATION_OFF]	= "Vulnerable",
+	[MDS_MITIGATION_FULL]	= "Mitigation: Clear CPU buffers",
+	[MDS_MITIGATION_VMWERV]	= "Vulnerable: Clear CPU buffers attempted, no microcode",
+};
+
+static void __init mds_select_mitigation(void)
+{
+	if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
+		mds_mitigation = MDS_MITIGATION_OFF;
+		return;
+	}
+
+	if (mds_mitigation == MDS_MITIGATION_FULL) {
+		if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
+			mds_mitigation = MDS_MITIGATION_VMWERV;
+
+		static_branch_enable(&mds_user_clear);
+
+		if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
+		    (mds_nosmt || cpu_mitigations_auto_nosmt()))
+			cpu_smt_disable(false);
+	}
+
+	pr_info("%s\n", mds_strings[mds_mitigation]);
+}
+
+static int __init mds_cmdline(char *str)
+{
+	if (!boot_cpu_has_bug(X86_BUG_MDS))
+		return 0;
+
+	if (!str)
+		return -EINVAL;
+
+	if (!strcmp(str, "off"))
+		mds_mitigation = MDS_MITIGATION_OFF;
+	else if (!strcmp(str, "full"))
+		mds_mitigation = MDS_MITIGATION_FULL;
+	else if (!strcmp(str, "full,nosmt")) {
+		mds_mitigation = MDS_MITIGATION_FULL;
+		mds_nosmt = true;
+	}
+
+	return 0;
+}
+early_param("mds", mds_cmdline);
+
+#undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V2 : " fmt
 
 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
@@ -272,7 +339,7 @@ static const struct {
 	const char			*option;
 	enum spectre_v2_user_cmd	cmd;
 	bool				secure;
-} v2_user_options[] __initdata = {
+} v2_user_options[] __initconst = {
 	{ "auto",		SPECTRE_V2_USER_CMD_AUTO,		false },
 	{ "off",		SPECTRE_V2_USER_CMD_NONE,		false },
 	{ "on",			SPECTRE_V2_USER_CMD_FORCE,		true  },
@@ -407,7 +474,7 @@ static const struct {
 	const char *option;
 	enum spectre_v2_mitigation_cmd cmd;
 	bool secure;
-} mitigation_options[] __initdata = {
+} mitigation_options[] __initconst = {
 	{ "off",		SPECTRE_V2_CMD_NONE,		  false },
 	{ "on",			SPECTRE_V2_CMD_FORCE,		  true  },
 	{ "retpoline",		SPECTRE_V2_CMD_RETPOLINE,	  false },
@@ -428,7 +495,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
 	char arg[20];
 	int ret, i;
 
-	if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+	if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
+	    cpu_mitigations_off())
 		return SPECTRE_V2_CMD_NONE;
 
 	ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
@@ -560,9 +628,6 @@ static void __init spectre_v2_select_mitigation(void)
 
 	/* Set up IBPB and STIBP depending on the general spectre V2 command */
 	spectre_v2_user_select_mitigation(cmd);
-
-	/* Enable STIBP if appropriate */
-	arch_smt_update();
 }
 
 static void update_stibp_msr(void * __unused)
@@ -596,6 +661,31 @@ static void update_indir_branch_cond(void)
 		static_branch_disable(&switch_to_cond_stibp);
 }
 
+#undef pr_fmt
+#define pr_fmt(fmt) fmt
+
+/* Update the static key controlling the MDS CPU buffer clear in idle */
+static void update_mds_branch_idle(void)
+{
+	/*
+	 * Enable the idle clearing if SMT is active on CPUs which are
+	 * affected only by MSBDS and not any other MDS variant.
+	 *
+	 * The other variants cannot be mitigated when SMT is enabled, so
+	 * clearing the buffers on idle just to prevent the Store Buffer
+	 * repartitioning leak would be a window dressing exercise.
+	 */
+	if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
+		return;
+
+	if (sched_smt_active())
+		static_branch_enable(&mds_idle_clear);
+	else
+		static_branch_disable(&mds_idle_clear);
+}
+
+#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
+
 void arch_smt_update(void)
 {
 	/* Enhanced IBRS implies STIBP. No update required. */
@@ -616,6 +706,17 @@ void arch_smt_update(void)
 		break;
 	}
 
+	switch (mds_mitigation) {
+	case MDS_MITIGATION_FULL:
+	case MDS_MITIGATION_VMWERV:
+		if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
+			pr_warn_once(MDS_MSG_SMT);
+		update_mds_branch_idle();
+		break;
+	case MDS_MITIGATION_OFF:
+		break;
+	}
+
 	mutex_unlock(&spec_ctrl_mutex);
 }
 
@@ -643,7 +744,7 @@ static const char * const ssb_strings[] = {
 static const struct {
 	const char *option;
 	enum ssb_mitigation_cmd cmd;
-} ssb_mitigation_options[]  __initdata = {
+} ssb_mitigation_options[]  __initconst = {
 	{ "auto",	SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
 	{ "on",		SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
 	{ "off",	SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
@@ -657,7 +758,8 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
 	char arg[20];
 	int ret, i;
 
-	if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
+	if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
+	    cpu_mitigations_off()) {
 		return SPEC_STORE_BYPASS_CMD_NONE;
 	} else {
 		ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
@@ -978,6 +1080,11 @@ static void __init l1tf_select_mitigation(void)
 	if (!boot_cpu_has_bug(X86_BUG_L1TF))
 		return;
 
+	if (cpu_mitigations_off())
+		l1tf_mitigation = L1TF_MITIGATION_OFF;
+	else if (cpu_mitigations_auto_nosmt())
+		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
+
 	override_cache_bits(&boot_cpu_data);
 
 	switch (l1tf_mitigation) {
@@ -1006,7 +1113,7 @@ static void __init l1tf_select_mitigation(void)
 		pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
 				half_pa);
 		pr_info("However, doing so will make a part of your RAM unusable.\n");
-		pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
+		pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
 		return;
 	}
 
@@ -1039,6 +1146,7 @@ static int __init l1tf_cmdline(char *str)
 early_param("l1tf", l1tf_cmdline);
 
 #undef pr_fmt
+#define pr_fmt(fmt) fmt
 
 #ifdef CONFIG_SYSFS
 
@@ -1077,6 +1185,23 @@ static ssize_t l1tf_show_state(char *buf)
 }
 #endif
 
+static ssize_t mds_show_state(char *buf)
+{
+	if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
+		return sprintf(buf, "%s; SMT Host state unknown\n",
+			       mds_strings[mds_mitigation]);
+	}
+
+	if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
+		return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+			       (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
+			        sched_smt_active() ? "mitigated" : "disabled"));
+	}
+
+	return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+		       sched_smt_active() ? "vulnerable" : "disabled");
+}
+
 static char *stibp_state(void)
 {
 	if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
@@ -1141,6 +1266,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
 		if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
 			return l1tf_show_state(buf);
 		break;
+
+	case X86_BUG_MDS:
+		return mds_show_state(buf);
+
 	default:
 		break;
 	}
@@ -1172,4 +1301,9 @@ ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *b
 {
 	return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
 }
+
+ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
+}
 #endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 44c4ef3..1073118 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -948,60 +948,73 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #endif
 }
 
-static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
-	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_CEDARVIEW,	X86_FEATURE_ANY },
-	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_CLOVERVIEW,	X86_FEATURE_ANY },
-	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_LINCROFT,	X86_FEATURE_ANY },
-	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_PENWELL,	X86_FEATURE_ANY },
-	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_PINEVIEW,	X86_FEATURE_ANY },
-	{ X86_VENDOR_CENTAUR,	5 },
-	{ X86_VENDOR_INTEL,	5 },
-	{ X86_VENDOR_NSC,	5 },
-	{ X86_VENDOR_ANY,	4 },
+#define NO_SPECULATION	BIT(0)
+#define NO_MELTDOWN	BIT(1)
+#define NO_SSB		BIT(2)
+#define NO_L1TF		BIT(3)
+#define NO_MDS		BIT(4)
+#define MSBDS_ONLY	BIT(5)
+
+#define VULNWL(_vendor, _family, _model, _whitelist)	\
+	{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+
+#define VULNWL_INTEL(model, whitelist)		\
+	VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
+
+#define VULNWL_AMD(family, whitelist)		\
+	VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
+
+static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+	VULNWL(ANY,	4, X86_MODEL_ANY,	NO_SPECULATION),
+	VULNWL(CENTAUR,	5, X86_MODEL_ANY,	NO_SPECULATION),
+	VULNWL(INTEL,	5, X86_MODEL_ANY,	NO_SPECULATION),
+	VULNWL(NSC,	5, X86_MODEL_ANY,	NO_SPECULATION),
+
+	/* Intel Family 6 */
+	VULNWL_INTEL(ATOM_SALTWELL,		NO_SPECULATION),
+	VULNWL_INTEL(ATOM_SALTWELL_TABLET,	NO_SPECULATION),
+	VULNWL_INTEL(ATOM_SALTWELL_MID,		NO_SPECULATION),
+	VULNWL_INTEL(ATOM_BONNELL,		NO_SPECULATION),
+	VULNWL_INTEL(ATOM_BONNELL_MID,		NO_SPECULATION),
+
+	VULNWL_INTEL(ATOM_SILVERMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY),
+	VULNWL_INTEL(ATOM_SILVERMONT_X,		NO_SSB | NO_L1TF | MSBDS_ONLY),
+	VULNWL_INTEL(ATOM_SILVERMONT_MID,	NO_SSB | NO_L1TF | MSBDS_ONLY),
+	VULNWL_INTEL(ATOM_AIRMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY),
+	VULNWL_INTEL(XEON_PHI_KNL,		NO_SSB | NO_L1TF | MSBDS_ONLY),
+	VULNWL_INTEL(XEON_PHI_KNM,		NO_SSB | NO_L1TF | MSBDS_ONLY),
+
+	VULNWL_INTEL(CORE_YONAH,		NO_SSB),
+
+	VULNWL_INTEL(ATOM_AIRMONT_MID,		NO_L1TF | MSBDS_ONLY),
+
+	VULNWL_INTEL(ATOM_GOLDMONT,		NO_MDS | NO_L1TF),
+	VULNWL_INTEL(ATOM_GOLDMONT_X,		NO_MDS | NO_L1TF),
+	VULNWL_INTEL(ATOM_GOLDMONT_PLUS,	NO_MDS | NO_L1TF),
+
+	/* AMD Family 0xf - 0x12 */
+	VULNWL_AMD(0x0f,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+	VULNWL_AMD(0x10,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+	VULNWL_AMD(0x11,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+
+	/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS),
 	{}
 };
 
-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
-	{ X86_VENDOR_AMD },
-	{}
-};
+static bool __init cpu_matches(unsigned long which)
+{
+	const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
 
-/* Only list CPUs which speculate but are non susceptible to SSB */
-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT1	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_AIRMONT		},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT2	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MERRIFIELD	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_CORE_YONAH		},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNL		},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNM		},
-	{ X86_VENDOR_AMD,	0x12,					},
-	{ X86_VENDOR_AMD,	0x11,					},
-	{ X86_VENDOR_AMD,	0x10,					},
-	{ X86_VENDOR_AMD,	0xf,					},
-	{}
-};
-
-static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
-	/* in addition to cpu_no_speculation */
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT1	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT2	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_AIRMONT		},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MERRIFIELD	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MOOREFIELD	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_GOLDMONT	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_DENVERTON	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_GEMINI_LAKE	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNL		},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNM		},
-	{}
-};
+	return m && !!(m->driver_data & which);
+}
 
 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
 	u64 ia32_cap = 0;
 
-	if (x86_match_cpu(cpu_no_speculation))
+	if (cpu_matches(NO_SPECULATION))
 		return;
 
 	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
@@ -1010,15 +1023,20 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 	if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
 		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
 
-	if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
-	   !(ia32_cap & ARCH_CAP_SSB_NO) &&
+	if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
 	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
 		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
 
 	if (ia32_cap & ARCH_CAP_IBRS_ALL)
 		setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
 
-	if (x86_match_cpu(cpu_no_meltdown))
+	if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) {
+		setup_force_cpu_bug(X86_BUG_MDS);
+		if (cpu_matches(MSBDS_ONLY))
+			setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+	}
+
+	if (cpu_matches(NO_MELTDOWN))
 		return;
 
 	/* Rogue Data Cache Load? No! */
@@ -1027,7 +1045,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 
 	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
 
-	if (x86_match_cpu(cpu_no_l1tf))
+	if (cpu_matches(NO_L1TF))
 		return;
 
 	setup_force_cpu_bug(X86_BUG_L1TF);
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
index f8c260d..912d539 100644
--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
@@ -91,7 +91,7 @@ static u64 get_prefetch_disable_bits(void)
 		 */
 		return 0xF;
 	case INTEL_FAM6_ATOM_GOLDMONT:
-	case INTEL_FAM6_ATOM_GEMINI_LAKE:
+	case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
 		/*
 		 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
 		 * as:
@@ -995,7 +995,7 @@ static int measure_cycles_perf_fn(void *_plr)
 
 	switch (boot_cpu_data.x86_model) {
 	case INTEL_FAM6_ATOM_GOLDMONT:
-	case INTEL_FAM6_ATOM_GEMINI_LAKE:
+	case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
 		l2_hit_bits = (0x52ULL << 16) | (0x2 << 8) | 0xd1;
 		l2_miss_bits = (0x52ULL << 16) | (0x10 << 8) | 0xd1;
 		break;
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index c805a06..ff1c00b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -46,8 +46,6 @@
 static struct mce i_mce;
 static struct dentry *dfs_inj;
 
-static u8 n_banks;
-
 #define MAX_FLAG_OPT_SIZE	4
 #define NBCFG			0x44
 
@@ -567,9 +565,15 @@ static void do_inject(void)
 static int inj_bank_set(void *data, u64 val)
 {
 	struct mce *m = (struct mce *)data;
+	u8 n_banks;
+	u64 cap;
+
+	/* Get bank count on target CPU so we can handle non-uniform values. */
+	rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
+	n_banks = cap & MCG_BANKCNT_MASK;
 
 	if (val >= n_banks) {
-		pr_err("Non-existent MCE bank: %llu\n", val);
+		pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu);
 		return -EINVAL;
 	}
 
@@ -659,10 +663,6 @@ static struct dfs_node {
 static int __init debugfs_init(void)
 {
 	unsigned int i;
-	u64 cap;
-
-	rdmsrl(MSR_IA32_MCG_CAP, cap);
-	n_banks = cap & MCG_BANKCNT_MASK;
 
 	dfs_inj = debugfs_create_dir("mce-inject", NULL);
 	if (!dfs_inj)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index f34d89c..06f7c04 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -165,6 +165,11 @@ static struct severity {
 		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
 		KERNEL
 		),
+	MCESEV(
+		PANIC, "Instruction fetch error in kernel",
+		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+		KERNEL
+		),
 #endif
 	MCESEV(
 		PANIC, "Action required: unknown MCACOD",
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index f9e7096..fee118b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -711,19 +711,49 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 
 		barrier();
 		m.status = mce_rdmsrl(msr_ops.status(i));
+
+		/* If this entry is not valid, ignore it */
 		if (!(m.status & MCI_STATUS_VAL))
 			continue;
 
 		/*
-		 * Uncorrected or signalled events are handled by the exception
-		 * handler when it is enabled, so don't process those here.
-		 *
-		 * TBD do the same check for MCI_STATUS_EN here?
+		 * If we are logging everything (at CPU online) or this
+		 * is a corrected error, then we must log it.
 		 */
-		if (!(flags & MCP_UC) &&
-		    (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
-			continue;
+		if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
+			goto log_it;
 
+		/*
+		 * Newer Intel systems that support software error
+		 * recovery need to make additional checks. Other
+		 * CPUs should skip over uncorrected errors, but log
+		 * everything else.
+		 */
+		if (!mca_cfg.ser) {
+			if (m.status & MCI_STATUS_UC)
+				continue;
+			goto log_it;
+		}
+
+		/* Log "not enabled" (speculative) errors */
+		if (!(m.status & MCI_STATUS_EN))
+			goto log_it;
+
+		/*
+		 * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
+		 * UC == 1 && PCC == 0 && S == 0
+		 */
+		if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
+			goto log_it;
+
+		/*
+		 * Skip anything else. Presumption is that our read of this
+		 * bank is racing with a machine check. Leave the log alone
+		 * for do_machine_check() to deal with it.
+		 */
+		continue;
+
+log_it:
 		error_seen = true;
 
 		mce_read_aux(&m, i);
@@ -1450,13 +1480,12 @@ EXPORT_SYMBOL_GPL(mce_notify_irq);
 static int __mcheck_cpu_mce_banks_init(void)
 {
 	int i;
-	u8 num_banks = mca_cfg.banks;
 
-	mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL);
+	mce_banks = kcalloc(MAX_NR_BANKS, sizeof(struct mce_bank), GFP_KERNEL);
 	if (!mce_banks)
 		return -ENOMEM;
 
-	for (i = 0; i < num_banks; i++) {
+	for (i = 0; i < MAX_NR_BANKS; i++) {
 		struct mce_bank *b = &mce_banks[i];
 
 		b->ctl = -1ULL;
@@ -1470,28 +1499,19 @@ static int __mcheck_cpu_mce_banks_init(void)
  */
 static int __mcheck_cpu_cap_init(void)
 {
-	unsigned b;
 	u64 cap;
+	u8 b;
 
 	rdmsrl(MSR_IA32_MCG_CAP, cap);
 
 	b = cap & MCG_BANKCNT_MASK;
-	if (!mca_cfg.banks)
-		pr_info("CPU supports %d MCE banks\n", b);
-
-	if (b > MAX_NR_BANKS) {
-		pr_warn("Using only %u machine check banks out of %u\n",
-			MAX_NR_BANKS, b);
+	if (WARN_ON_ONCE(b > MAX_NR_BANKS))
 		b = MAX_NR_BANKS;
-	}
 
-	/* Don't support asymmetric configurations today */
-	WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
-	mca_cfg.banks = b;
+	mca_cfg.banks = max(mca_cfg.banks, b);
 
 	if (!mce_banks) {
 		int err = __mcheck_cpu_mce_banks_init();
-
 		if (err)
 			return err;
 	}
@@ -2473,6 +2493,8 @@ EXPORT_SYMBOL_GPL(mcsafe_key);
 
 static int __init mcheck_late_init(void)
 {
+	pr_info("Using %d MCE banks\n", mca_cfg.banks);
+
 	if (mca_cfg.recovery)
 		static_branch_inc(&mcsafe_key);
 
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index b9bc8a1..b43ddef 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -418,8 +418,9 @@ static int do_microcode_update(const void __user *buf, size_t size)
 		if (ustate == UCODE_ERROR) {
 			error = -1;
 			break;
-		} else if (ustate == UCODE_OK)
+		} else if (ustate == UCODE_NEW) {
 			apply_microcode_on_target(cpu);
+		}
 	}
 
 	return error;
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index d805202..917840e 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -45,7 +45,7 @@
 #define VMWARE_PORT_CMD_VCPU_RESERVED	31
 
 #define VMWARE_PORT(cmd, eax, ebx, ecx, edx)				\
-	__asm__("inl (%%dx)" :						\
+	__asm__("inl (%%dx), %%eax" :					\
 			"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :	\
 			"0"(VMWARE_HYPERVISOR_MAGIC),			\
 			"1"(VMWARE_PORT_CMD_##cmd),			\
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 2ea85b3..2e5003f 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -93,7 +93,7 @@ bool irq_fpu_usable(void)
 }
 EXPORT_SYMBOL(irq_fpu_usable);
 
-void __kernel_fpu_begin(void)
+static void __kernel_fpu_begin(void)
 {
 	struct fpu *fpu = &current->thread.fpu;
 
@@ -111,9 +111,8 @@ void __kernel_fpu_begin(void)
 		__cpu_invalidate_fpregs_state();
 	}
 }
-EXPORT_SYMBOL(__kernel_fpu_begin);
 
-void __kernel_fpu_end(void)
+static void __kernel_fpu_end(void)
 {
 	struct fpu *fpu = &current->thread.fpu;
 
@@ -122,7 +121,6 @@ void __kernel_fpu_end(void)
 
 	kernel_fpu_enable();
 }
-EXPORT_SYMBOL(__kernel_fpu_end);
 
 void kernel_fpu_begin(void)
 {
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 7ee8067..4d2a401 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -29,6 +29,7 @@
 #include <asm/kprobes.h>
 #include <asm/ftrace.h>
 #include <asm/nops.h>
+#include <asm/text-patching.h>
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
@@ -228,6 +229,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 }
 
 static unsigned long ftrace_update_func;
+static unsigned long ftrace_update_func_call;
 
 static int update_ftrace_func(unsigned long ip, void *new)
 {
@@ -256,6 +258,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
 	unsigned char *new;
 	int ret;
 
+	ftrace_update_func_call = (unsigned long)func;
+
 	new = ftrace_call_replace(ip, (unsigned long)func);
 	ret = update_ftrace_func(ip, new);
 
@@ -291,13 +295,28 @@ int ftrace_int3_handler(struct pt_regs *regs)
 	if (WARN_ON_ONCE(!regs))
 		return 0;
 
-	ip = regs->ip - 1;
-	if (!ftrace_location(ip) && !is_ftrace_caller(ip))
-		return 0;
+	ip = regs->ip - INT3_INSN_SIZE;
 
-	regs->ip += MCOUNT_INSN_SIZE - 1;
+#ifdef CONFIG_X86_64
+	if (ftrace_location(ip)) {
+		int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
+		return 1;
+	} else if (is_ftrace_caller(ip)) {
+		if (!ftrace_update_func_call) {
+			int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
+			return 1;
+		}
+		int3_emulate_call(regs, ftrace_update_func_call);
+		return 1;
+	}
+#else
+	if (ftrace_location(ip) || is_ftrace_caller(ip)) {
+		int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
+		return 1;
+	}
+#endif
 
-	return 1;
+	return 0;
 }
 
 static int ftrace_write(unsigned long ip, const char *val, int size)
@@ -868,6 +887,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
 
 	func = ftrace_ops_get_func(ops);
 
+	ftrace_update_func_call = (unsigned long)func;
+
 	/* Do a safe modify in case the trampoline is executing */
 	new = ftrace_call_replace(ip, (unsigned long)func);
 	ret = update_ftrace_func(ip, new);
@@ -964,6 +985,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func)
 {
 	unsigned char *new;
 
+	ftrace_update_func_call = 0UL;
 	new = ftrace_jmp_replace(ip, (unsigned long)func);
 
 	return update_ftrace_func(ip, new);
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 0469cd0..b50ac9c 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -26,9 +26,18 @@ int sysctl_panic_on_stackoverflow;
 /*
  * Probabilistic stack overflow check:
  *
- * Only check the stack in process context, because everything else
- * runs on the big interrupt stacks. Checking reliably is too expensive,
- * so we just check from interrupts.
+ * Regular device interrupts can enter on the following stacks:
+ *
+ * - User stack
+ *
+ * - Kernel task stack
+ *
+ * - Interrupt stack if a device driver reenables interrupts
+ *   which should only happen in really old drivers.
+ *
+ * - Debug IST stack
+ *
+ * All other contexts are invalid.
  */
 static inline void stack_overflow_check(struct pt_regs *regs)
 {
@@ -53,8 +62,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
 		return;
 
 	oist = this_cpu_ptr(&orig_ist);
-	estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
-	estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
+	estack_bottom = (u64)oist->ist[DEBUG_STACK];
+	estack_top = estack_bottom - DEBUG_STKSZ + STACK_TOP_MARGIN;
 	if (regs->sp >= estack_top && regs->sp <= estack_bottom)
 		return;
 
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index b0d1e81..544bc2d 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -569,6 +569,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
 	unsigned long *sara = stack_addr(regs);
 
 	ri->ret_addr = (kprobe_opcode_t *) *sara;
+	ri->fp = sara;
 
 	/* Replace the return addr with trampoline addr */
 	*sara = (unsigned long) &kretprobe_trampoline;
@@ -748,26 +749,48 @@ asm(
 NOKPROBE_SYMBOL(kretprobe_trampoline);
 STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
 
+static struct kprobe kretprobe_kprobe = {
+	.addr = (void *)kretprobe_trampoline,
+};
+
 /*
  * Called from kretprobe_trampoline
  */
 __visible __used void *trampoline_handler(struct pt_regs *regs)
 {
+	struct kprobe_ctlblk *kcb;
 	struct kretprobe_instance *ri = NULL;
 	struct hlist_head *head, empty_rp;
 	struct hlist_node *tmp;
 	unsigned long flags, orig_ret_address = 0;
 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
 	kprobe_opcode_t *correct_ret_addr = NULL;
+	void *frame_pointer;
+	bool skipped = false;
+
+	preempt_disable();
+
+	/*
+	 * Set a dummy kprobe for avoiding kretprobe recursion.
+	 * Since kretprobe never run in kprobe handler, kprobe must not
+	 * be running at this point.
+	 */
+	kcb = get_kprobe_ctlblk();
+	__this_cpu_write(current_kprobe, &kretprobe_kprobe);
+	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 
 	INIT_HLIST_HEAD(&empty_rp);
 	kretprobe_hash_lock(current, &head, &flags);
 	/* fixup registers */
 #ifdef CONFIG_X86_64
 	regs->cs = __KERNEL_CS;
+	/* On x86-64, we use pt_regs->sp for return address holder. */
+	frame_pointer = &regs->sp;
 #else
 	regs->cs = __KERNEL_CS | get_kernel_rpl();
 	regs->gs = 0;
+	/* On x86-32, we use pt_regs->flags for return address holder. */
+	frame_pointer = &regs->flags;
 #endif
 	regs->ip = trampoline_address;
 	regs->orig_ax = ~0UL;
@@ -789,8 +812,25 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
 		if (ri->task != current)
 			/* another task is sharing our hash bucket */
 			continue;
+		/*
+		 * Return probes must be pushed on this hash list correct
+		 * order (same as return order) so that it can be poped
+		 * correctly. However, if we find it is pushed it incorrect
+		 * order, this means we find a function which should not be
+		 * probed, because the wrong order entry is pushed on the
+		 * path of processing other kretprobe itself.
+		 */
+		if (ri->fp != frame_pointer) {
+			if (!skipped)
+				pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
+			skipped = true;
+			continue;
+		}
 
 		orig_ret_address = (unsigned long)ri->ret_addr;
+		if (skipped)
+			pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
+				ri->rp->kp.addr);
 
 		if (orig_ret_address != trampoline_address)
 			/*
@@ -808,14 +848,15 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
 		if (ri->task != current)
 			/* another task is sharing our hash bucket */
 			continue;
+		if (ri->fp != frame_pointer)
+			continue;
 
 		orig_ret_address = (unsigned long)ri->ret_addr;
 		if (ri->rp && ri->rp->handler) {
 			__this_cpu_write(current_kprobe, &ri->rp->kp);
-			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
 			ri->ret_addr = correct_ret_addr;
 			ri->rp->handler(ri, regs);
-			__this_cpu_write(current_kprobe, NULL);
+			__this_cpu_write(current_kprobe, &kretprobe_kprobe);
 		}
 
 		recycle_rp_inst(ri, &empty_rp);
@@ -831,6 +872,9 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
 
 	kretprobe_hash_unlock(current, &flags);
 
+	__this_cpu_write(current_kprobe, NULL);
+	preempt_enable();
+
 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
 		hlist_del(&ri->hlist);
 		kfree(ri);
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index f58336a..6645f12 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -87,7 +87,7 @@ void *module_alloc(unsigned long size)
 	p = __vmalloc_node_range(size, MODULE_ALIGN,
 				    MODULES_VADDR + get_module_load_offset(),
 				    MODULES_END, GFP_KERNEL,
-				    PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+				    PAGE_KERNEL, 0, NUMA_NO_NODE,
 				    __builtin_return_address(0));
 	if (p && (kasan_module_alloc(p, size) < 0)) {
 		vfree(p);
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 18bc9b5..086cf1d 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -34,6 +34,7 @@
 #include <asm/x86_init.h>
 #include <asm/reboot.h>
 #include <asm/cache.h>
+#include <asm/nospec-branch.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/nmi.h>
@@ -533,6 +534,9 @@ do_nmi(struct pt_regs *regs, long error_code)
 		write_cr2(this_cpu_read(nmi_cr2));
 	if (this_cpu_dec_return(nmi_state))
 		goto nmi_restart;
+
+	if (user_mode(regs))
+		mds_user_clear_cpu_buffers();
 }
 NOKPROBE_SYMBOL(do_nmi);
 
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 7d31192..b8b08e6 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -411,6 +411,8 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
 	u64 msr = x86_spec_ctrl_base;
 	bool updmsr = false;
 
+	lockdep_assert_irqs_disabled();
+
 	/*
 	 * If TIF_SSBD is different, select the proper mitigation
 	 * method. Note that if SSBD mitigation is disabled or permanentely
@@ -462,10 +464,12 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
 
 void speculation_ctrl_update(unsigned long tif)
 {
+	unsigned long flags;
+
 	/* Forced update. Make sure all relevant TIF flags are different */
-	preempt_disable();
+	local_irq_save(flags);
 	__speculation_ctrl_update(~tif, tif);
-	preempt_enable();
+	local_irq_restore(flags);
 }
 
 /* Called from seccomp/prctl update */
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index d3e593e..020efe0 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -130,6 +130,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
 	struct task_struct *tsk;
 	int err;
 
+	/*
+	 * For a new task use the RESET flags value since there is no before.
+	 * All the status flags are zero; DF and all the system flags must also
+	 * be 0, specifically IF must be 0 because we context switch to the new
+	 * task with interrupts disabled.
+	 */
+	frame->flags = X86_EFLAGS_FIXED;
 	frame->bp = 0;
 	frame->ret_addr = (unsigned long) ret_from_fork;
 	p->thread.sp = (unsigned long) fork_frame;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index a0854f2..59f71d0 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -300,6 +300,14 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
 	childregs = task_pt_regs(p);
 	fork_frame = container_of(childregs, struct fork_frame, regs);
 	frame = &fork_frame->frame;
+
+	/*
+	 * For a new task use the RESET flags value since there is no before.
+	 * All the status flags are zero; DF and all the system flags must also
+	 * be 0, specifically IF must be 0 because we context switch to the new
+	 * task with interrupts disabled.
+	 */
+	frame->flags = X86_EFLAGS_FIXED;
 	frame->bp = 0;
 	frame->ret_addr = (unsigned long) ret_from_fork;
 	p->thread.sp = (unsigned long) fork_frame;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 725624b..8fd3ced 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -81,6 +81,19 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
 	return 0;
 }
 
+/*
+ * Some machines don't handle the default ACPI reboot method and
+ * require the EFI reboot method:
+ */
+static int __init set_efi_reboot(const struct dmi_system_id *d)
+{
+	if (reboot_type != BOOT_EFI && !efi_runtime_disabled()) {
+		reboot_type = BOOT_EFI;
+		pr_info("%s series board detected. Selecting EFI-method for reboot.\n", d->ident);
+	}
+	return 0;
+}
+
 void __noreturn machine_real_restart(unsigned int type)
 {
 	local_irq_disable();
@@ -166,6 +179,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
 		},
 	},
+	{	/* Handle reboot issue on Acer TravelMate X514-51T */
+		.callback = set_efi_reboot,
+		.ident = "Acer TravelMate X514-51T",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate X514-51T"),
+		},
+	},
 
 	/* Apple */
 	{	/* Handle problems with rebooting on Apple MacBook5 */
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 92a3b31..44e647a 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs,
 		COPY_SEG_CPL3(cs);
 		COPY_SEG_CPL3(ss);
 
-#ifdef CONFIG_X86_64
-		/*
-		 * Fix up SS if needed for the benefit of old DOSEMU and
-		 * CRIU.
-		 */
-		if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) &&
-			     user_64bit_mode(regs)))
-			force_valid_ss(regs);
-#endif
-
 		get_user_ex(tmpflags, &sc->flags);
 		regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
 		regs->orig_ax = -1;		/* disable syscall checks */
@@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs,
 		buf = (void __user *)buf_val;
 	} get_user_catch(err);
 
+#ifdef CONFIG_X86_64
+	/*
+	 * Fix up SS if needed for the benefit of old DOSEMU and
+	 * CRIU.
+	 */
+	if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
+		force_valid_ss(regs);
+#endif
+
 	err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
 
 	force_iret();
@@ -461,6 +460,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
 {
 	struct rt_sigframe __user *frame;
 	void __user *fp = NULL;
+	unsigned long uc_flags;
 	int err = 0;
 
 	frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
@@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
 			return -EFAULT;
 	}
 
+	uc_flags = frame_uc_flags(regs);
+
 	put_user_try {
 		/* Create the ucontext.  */
-		put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
+		put_user_ex(uc_flags, &frame->uc.uc_flags);
 		put_user_ex(0, &frame->uc.uc_link);
 		save_altstack_ex(&frame->uc.uc_stack, regs->sp);
 
@@ -541,6 +543,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
 {
 #ifdef CONFIG_X86_X32_ABI
 	struct rt_sigframe_x32 __user *frame;
+	unsigned long uc_flags;
 	void __user *restorer;
 	int err = 0;
 	void __user *fpstate = NULL;
@@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
 			return -EFAULT;
 	}
 
+	uc_flags = frame_uc_flags(regs);
+
 	put_user_try {
 		/* Create the ucontext.  */
-		put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
+		put_user_ex(uc_flags, &frame->uc.uc_flags);
 		put_user_ex(0, &frame->uc.uc_link);
 		compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
 		put_user_ex(0, &frame->uc.uc__pad0);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 6d5dc5d..03b7529 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -636,7 +636,7 @@ unsigned long native_calibrate_tsc(void)
 		case INTEL_FAM6_KABYLAKE_DESKTOP:
 			crystal_khz = 24000;	/* 24.0 MHz */
 			break;
-		case INTEL_FAM6_ATOM_DENVERTON:
+		case INTEL_FAM6_ATOM_GOLDMONT_X:
 			crystal_khz = 25000;	/* 25.0 MHz */
 			break;
 		case INTEL_FAM6_ATOM_GOLDMONT:
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 27ef714..3d0e9aee 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -59,12 +59,12 @@ static const struct freq_desc freq_desc_ann = {
 };
 
 static const struct x86_cpu_id tsc_msr_cpu_ids[] = {
-	INTEL_CPU_FAM6(ATOM_PENWELL,		freq_desc_pnw),
-	INTEL_CPU_FAM6(ATOM_CLOVERVIEW,		freq_desc_clv),
-	INTEL_CPU_FAM6(ATOM_SILVERMONT1,	freq_desc_byt),
+	INTEL_CPU_FAM6(ATOM_SALTWELL_MID,	freq_desc_pnw),
+	INTEL_CPU_FAM6(ATOM_SALTWELL_TABLET,	freq_desc_clv),
+	INTEL_CPU_FAM6(ATOM_SILVERMONT,		freq_desc_byt),
+	INTEL_CPU_FAM6(ATOM_SILVERMONT_MID,	freq_desc_tng),
 	INTEL_CPU_FAM6(ATOM_AIRMONT,		freq_desc_cht),
-	INTEL_CPU_FAM6(ATOM_MERRIFIELD,		freq_desc_tng),
-	INTEL_CPU_FAM6(ATOM_MOOREFIELD,		freq_desc_ann),
+	INTEL_CPU_FAM6(ATOM_AIRMONT_MID,	freq_desc_ann),
 	{}
 };
 
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index c63bab9..2fb152d 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -151,11 +151,11 @@
 		*(.text.__x86.indirect_thunk)
 		__indirect_thunk_end = .;
 #endif
-
-		/* End of text section */
-		_etext = .;
 	} :text = 0x9090
 
+	/* End of text section */
+	_etext = .;
+
 	NOTES :text :note
 
 	EXCEPTION_TABLE(16) :text = 0x9090
@@ -372,7 +372,7 @@
 	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
 		__bss_start = .;
 		*(.bss..page_aligned)
-		*(.bss)
+		*(BSS_MAIN)
 		BSS_DECRYPTED
 		. = ALIGN(PAGE_SIZE);
 		__bss_stop = .;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 98d13c6..b810102 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -382,7 +382,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 	/* cpuid 0x80000008.ebx */
 	const u32 kvm_cpuid_8000_0008_ebx_x86_features =
 		F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
-		F(AMD_SSB_NO);
+		F(AMD_SSB_NO) | F(AMD_STIBP);
 
 	/* cpuid 0xC0000001.edx */
 	const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -412,7 +412,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 	/* cpuid 7.0.edx*/
 	const u32 kvm_cpuid_7_0_edx_x86_features =
 		F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
-		F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
+		F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
+		F(MD_CLEAR);
 
 	/* all calls to cpuid_count() should be made on the same cpu */
 	get_cpu();
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 106482d..860bd27 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2575,15 +2575,13 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
 	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
 	 * supports long mode.
 	 */
-	cr4 = ctxt->ops->get_cr(ctxt, 4);
 	if (emulator_has_longmode(ctxt)) {
 		struct desc_struct cs_desc;
 
 		/* Zero CR4.PCIDE before CR0.PG.  */
-		if (cr4 & X86_CR4_PCIDE) {
+		cr4 = ctxt->ops->get_cr(ctxt, 4);
+		if (cr4 & X86_CR4_PCIDE)
 			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
-			cr4 &= ~X86_CR4_PCIDE;
-		}
 
 		/* A 32-bit code segment is required to clear EFER.LMA.  */
 		memset(&cs_desc, 0, sizeof(cs_desc));
@@ -2597,13 +2595,16 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
 	if (cr0 & X86_CR0_PE)
 		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
 
-	/* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
-	if (cr4 & X86_CR4_PAE)
-		ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
+	if (emulator_has_longmode(ctxt)) {
+		/* Clear CR4.PAE before clearing EFER.LME. */
+		cr4 = ctxt->ops->get_cr(ctxt, 4);
+		if (cr4 & X86_CR4_PAE)
+			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
 
-	/* And finally go back to 32-bit mode.  */
-	efer = 0;
-	ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+		/* And finally go back to 32-bit mode.  */
+		efer = 0;
+		ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+	}
 
 	smbase = ctxt->ops->get_smbase(ctxt);
 
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 01d209a..229d996 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1291,7 +1291,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
 				       flush.address_space, flush.flags);
 
 		sparse_banks[0] = flush.processor_mask;
-		all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS;
+
+		/*
+		 * Work around possible WS2012 bug: it sends hypercalls
+		 * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
+		 * while also expecting us to flush something and crashing if
+		 * we don't. Let's treat processor_mask == 0 same as
+		 * HV_FLUSH_ALL_PROCESSORS.
+		 */
+		all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
+			flush.processor_mask == 0;
 	} else {
 		if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
 					    sizeof(flush_ex))))
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 3692de8..cba414d 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -133,6 +133,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
 		if (offset <= max_apic_id) {
 			u8 cluster_size = min(max_apic_id - offset + 1, 16U);
 
+			offset = array_index_nospec(offset, map->max_apic_id + 1);
 			*cluster = &map->phys_map[offset];
 			*mask = dest_id & (0xffff >> (16 - cluster_size));
 		} else {
@@ -896,7 +897,8 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
 		if (irq->dest_id > map->max_apic_id) {
 			*bitmap = 0;
 		} else {
-			*dst = &map->phys_map[irq->dest_id];
+			u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
+			*dst = &map->phys_map[dest_id];
 			*bitmap = 1;
 		}
 		return true;
@@ -1447,7 +1449,7 @@ static void apic_timer_expired(struct kvm_lapic *apic)
 	if (swait_active(q))
 		swake_up_one(q);
 
-	if (apic_lvtt_tscdeadline(apic))
+	if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
 		ktimer->expired_tscdeadline = ktimer->tscdeadline;
 }
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6dc7280..ea454d3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2022,7 +2022,11 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	if (!kvm_vcpu_apicv_active(vcpu))
 		return;
 
-	if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
+	/*
+	 * Since the host physical APIC id is 8 bits,
+	 * we can support host APIC ID upto 255.
+	 */
+	if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
 		return;
 
 	entry = READ_ONCE(*(svm->avic_physical_id_cache));
@@ -2679,6 +2683,7 @@ static int npf_interception(struct vcpu_svm *svm)
 static int db_interception(struct vcpu_svm *svm)
 {
 	struct kvm_run *kvm_run = svm->vcpu.run;
+	struct kvm_vcpu *vcpu = &svm->vcpu;
 
 	if (!(svm->vcpu.guest_debug &
 	      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
@@ -2689,6 +2694,8 @@ static int db_interception(struct vcpu_svm *svm)
 
 	if (svm->nmi_singlestep) {
 		disable_nmi_singlestep(svm);
+		/* Make sure we check for pending NMIs upon entry */
+		kvm_make_request(KVM_REQ_EVENT, vcpu);
 	}
 
 	if (svm->vcpu.guest_debug &
@@ -4493,14 +4500,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
 		kvm_lapic_reg_write(apic, APIC_ICR, icrl);
 		break;
 	case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
+		int i;
+		struct kvm_vcpu *vcpu;
+		struct kvm *kvm = svm->vcpu.kvm;
 		struct kvm_lapic *apic = svm->vcpu.arch.apic;
 
 		/*
-		 * Update ICR high and low, then emulate sending IPI,
-		 * which is handled when writing APIC_ICR.
+		 * At this point, we expect that the AVIC HW has already
+		 * set the appropriate IRR bits on the valid target
+		 * vcpus. So, we just need to kick the appropriate vcpu.
 		 */
-		kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
-		kvm_lapic_reg_write(apic, APIC_ICR, icrl);
+		kvm_for_each_vcpu(i, vcpu, kvm) {
+			bool m = kvm_apic_match_dest(vcpu, apic,
+						     icrl & KVM_APIC_SHORT_MASK,
+						     GET_APIC_DEST_FIELD(icrh),
+						     icrl & KVM_APIC_DEST_MASK);
+
+			if (m && !avic_vcpu_is_running(vcpu))
+				kvm_vcpu_wake_up(vcpu);
+		}
 		break;
 	}
 	case AVIC_IPI_FAILURE_INVALID_TARGET:
@@ -6775,7 +6793,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
 	struct page **src_p, **dst_p;
 	struct kvm_sev_dbg debug;
 	unsigned long n;
-	int ret, size;
+	unsigned int size;
+	int ret;
 
 	if (!sev_guest(kvm))
 		return -ENOTTY;
@@ -6783,6 +6802,11 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
 	if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
 		return -EFAULT;
 
+	if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
+		return -EINVAL;
+	if (!debug.dst_uaddr)
+		return -EINVAL;
+
 	vaddr = debug.src_uaddr;
 	size = debug.len;
 	vaddr_end = vaddr + size;
@@ -6833,8 +6857,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
 						     dst_vaddr,
 						     len, &argp->error);
 
-		sev_unpin_memory(kvm, src_p, 1);
-		sev_unpin_memory(kvm, dst_p, 1);
+		sev_unpin_memory(kvm, src_p, n);
+		sev_unpin_memory(kvm, dst_p, n);
 
 		if (ret)
 			goto err;
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 0f99768..b3f219b 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -438,13 +438,13 @@ TRACE_EVENT(kvm_apic_ipi,
 );
 
 TRACE_EVENT(kvm_apic_accept_irq,
-	    TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec),
+	    TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
 	    TP_ARGS(apicid, dm, tm, vec),
 
 	TP_STRUCT__entry(
 		__field(	__u32,		apicid		)
 		__field(	__u16,		dm		)
-		__field(	__u8,		tm		)
+		__field(	__u16,		tm		)
 		__field(	__u8,		vec		)
 	),
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3380a31..73d6d58 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10765,8 +10765,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
 		(unsigned long)&current_evmcs->host_rsp : 0;
 
+	/* L1D Flush includes CPU buffer clear to mitigate MDS */
 	if (static_branch_unlikely(&vmx_l1d_should_flush))
 		vmx_l1d_flush(vcpu);
+	else if (static_branch_unlikely(&mds_user_clear))
+		mds_clear_cpu_buffers();
 
 	asm(
 		/* Store host registers */
@@ -11127,8 +11130,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 	return ERR_PTR(err);
 }
 
-#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
-#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
+#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
 
 static int vmx_vm_init(struct kvm *kvm)
 {
@@ -14236,7 +14239,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
 		return ret;
 
 	/* Empty 'VMXON' state is permitted */
-	if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
+	if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
 		return 0;
 
 	if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
@@ -14269,7 +14272,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
 	if (nested_cpu_has_shadow_vmcs(vmcs12) &&
 	    vmcs12->vmcs_link_pointer != -1ull) {
 		struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
-		if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
+		if (kvm_state->size < sizeof(*kvm_state) + 2 * sizeof(*vmcs12))
 			return -EINVAL;
 
 		if (copy_from_user(shadow_vmcs12,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4a61e16..be4ba09 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1162,31 +1162,42 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
 	return 0;
 }
 
+static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+	if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
+		return false;
+
+	if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+		return false;
+
+	return true;
+
+}
 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
 	if (efer & efer_reserved_bits)
 		return false;
 
-	if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
-			return false;
-
-	if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
-			return false;
-
-	return true;
+	return __kvm_valid_efer(vcpu, efer);
 }
 EXPORT_SYMBOL_GPL(kvm_valid_efer);
 
-static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
+static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
 	u64 old_efer = vcpu->arch.efer;
+	u64 efer = msr_info->data;
 
-	if (!kvm_valid_efer(vcpu, efer))
+	if (efer & efer_reserved_bits)
 		return 1;
 
-	if (is_paging(vcpu)
-	    && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
-		return 1;
+	if (!msr_info->host_initiated) {
+		if (!__kvm_valid_efer(vcpu, efer))
+			return 1;
+
+		if (is_paging(vcpu) &&
+		    (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
+			return 1;
+	}
 
 	efer &= ~EFER_LMA;
 	efer |= vcpu->arch.efer & EFER_LMA;
@@ -2356,7 +2367,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		vcpu->arch.arch_capabilities = data;
 		break;
 	case MSR_EFER:
-		return set_efer(vcpu, data);
+		return set_efer(vcpu, msr_info);
 	case MSR_K7_HWCR:
 		data &= ~(u64)0x40;	/* ignore flush filter disable */
 		data &= ~(u64)0x100;	/* ignore ignne emulation enable */
@@ -6328,6 +6339,12 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
 
+static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.pio.count = 0;
+	return 1;
+}
+
 static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
 {
 	vcpu->arch.pio.count = 0;
@@ -6344,12 +6361,23 @@ static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
 	unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
 	int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
 					    size, port, &val, 1);
+	if (ret)
+		return ret;
 
-	if (!ret) {
+	/*
+	 * Workaround userspace that relies on old KVM behavior of %rip being
+	 * incremented prior to exiting to userspace to handle "OUT 0x7e".
+	 */
+	if (port == 0x7e &&
+	    kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
+		vcpu->arch.complete_userspace_io =
+			complete_fast_pio_out_port_0x7e;
+		kvm_skip_emulated_instruction(vcpu);
+	} else {
 		vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
 		vcpu->arch.complete_userspace_io = complete_fast_pio_out;
 	}
-	return ret;
+	return 0;
 }
 
 static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 25a972c..3c19d60 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -6,6 +6,18 @@
 # Produces uninteresting flaky coverage.
 KCOV_INSTRUMENT_delay.o	:= n
 
+# Early boot use of cmdline; don't instrument it
+ifdef CONFIG_AMD_MEM_ENCRYPT
+KCOV_INSTRUMENT_cmdline.o := n
+KASAN_SANITIZE_cmdline.o  := n
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_cmdline.o = -pg
+endif
+
+CFLAGS_cmdline.o := $(call cc-option, -fno-stack-protector)
+endif
+
 inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
 inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
 quiet_cmd_inat_tables = GEN     $@
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 3b24dc0..9d05572 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -257,6 +257,7 @@
 	/* Copy successful. Return zero */
 .L_done_memcpy_trap:
 	xorl %eax, %eax
+.L_done:
 	ret
 ENDPROC(__memcpy_mcsafe)
 EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
@@ -273,7 +274,7 @@
 	addl	%edx, %ecx
 .E_trailing_bytes:
 	mov	%ecx, %eax
-	ret
+	jmp	.L_done
 
 	/*
 	 * For write fault handling, given the destination is unaligned,
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 47bebfe6..9d9765e 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -427,8 +427,6 @@ static noinline int vmalloc_fault(unsigned long address)
 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
 		return -1;
 
-	WARN_ON_ONCE(in_nmi());
-
 	/*
 	 * Copy kernel mappings over when needed. This can also
 	 * happen within a race in page table update. In the later
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index d883869..fb5f29c 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -6,6 +6,7 @@
 #include <linux/bootmem.h>	/* for max_low_pfn */
 #include <linux/swapfile.h>
 #include <linux/swapops.h>
+#include <linux/kmemleak.h>
 
 #include <asm/set_memory.h>
 #include <asm/e820/api.h>
@@ -767,6 +768,11 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 	if (debug_pagealloc_enabled()) {
 		pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
 			begin, end - 1);
+		/*
+		 * Inform kmemleak about the hole in the memory since the
+		 * corresponding pages will be unmapped.
+		 */
+		kmemleak_free_part((void *)begin, end - begin);
 		set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
 	} else {
 		/*
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 61db77b..0988971 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -93,7 +93,7 @@ void __init kernel_randomize_memory(void)
 	if (!kaslr_memory_enabled())
 		return;
 
-	kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT);
+	kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
 	kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
 
 	/*
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 1e95d57..b69f7d4 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -230,7 +230,7 @@ bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
 /* Can we access it for direct reading/writing? Must be RAM: */
 int valid_phys_addr_range(phys_addr_t addr, size_t count)
 {
-	return addr + count <= __pa(high_memory);
+	return addr + count - 1 <= __pa(high_memory - 1);
 }
 
 /* Can we access it through mmap? Must be a valid physical address: */
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 59274e2..619101a 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -660,7 +660,7 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
 	fixmaps_set++;
 }
 
-void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
+void native_set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys,
 		       pgprot_t flags)
 {
 	/* Sanitize 'prot' against any unsupported bits: */
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index c1fc1ae..4df3e5c 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -35,6 +35,7 @@
 #include <linux/spinlock.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
+#include <linux/cpu.h>
 
 #include <asm/cpufeature.h>
 #include <asm/hypervisor.h>
@@ -115,7 +116,8 @@ void __init pti_check_boottime_disable(void)
 		}
 	}
 
-	if (cmdline_find_option_bool(boot_command_line, "nopti")) {
+	if (cmdline_find_option_bool(boot_command_line, "nopti") ||
+	    cpu_mitigations_off()) {
 		pti_mode = PTI_FORCE_OFF;
 		pti_print_if_insecure("disabled on command line.");
 		return;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index a6d1b02..a6836ab 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -694,7 +694,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 {
 	int cpu;
 
-	struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
+	struct flush_tlb_info info = {
 		.mm = mm,
 	};
 
diff --git a/arch/x86/platform/atom/punit_atom_debug.c b/arch/x86/platform/atom/punit_atom_debug.c
index 034813d..41dae0f 100644
--- a/arch/x86/platform/atom/punit_atom_debug.c
+++ b/arch/x86/platform/atom/punit_atom_debug.c
@@ -143,8 +143,8 @@ static void punit_dbgfs_unregister(void)
 	  (kernel_ulong_t)&drv_data }
 
 static const struct x86_cpu_id intel_punit_cpu_ids[] = {
-	ICPU(INTEL_FAM6_ATOM_SILVERMONT1, punit_device_byt),
-	ICPU(INTEL_FAM6_ATOM_MERRIFIELD,  punit_device_tng),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT, punit_device_byt),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID,  punit_device_tng),
 	ICPU(INTEL_FAM6_ATOM_AIRMONT,	  punit_device_cht),
 	{}
 };
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
index 5a0483e..31dce78 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
@@ -68,7 +68,7 @@ static struct bt_sfi_data tng_bt_sfi_data __initdata = {
 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata }
 
 static const struct x86_cpu_id bt_sfi_cpu_ids[] = {
-	ICPU(INTEL_FAM6_ATOM_MERRIFIELD, tng_bt_sfi_data),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, tng_bt_sfi_data),
 	{}
 };
 
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index d101058..47d0979 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -20,8 +20,6 @@ void __init set_real_mode_mem(phys_addr_t mem, size_t size)
 	void *base = __va(mem);
 
 	real_mode_header = (struct real_mode_header *) base;
-	printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
-	       base, (unsigned long long)mem, size);
 }
 
 void __init reserve_real_mode(void)
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index f7f7702..dab0782 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void)
 	}
 
 	xen_pvh = 1;
+	xen_domain_type = XEN_HVM_DOMAIN;
 	xen_start_flags = pvh_start_info.flags;
 
 	msr = cpuid_ebx(xen_cpuid_base() + 2);
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index c5e2c5a..15e8c99 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5226,7 +5226,7 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
 	return min_shallow;
 }
 
-static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
 {
 	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
 	struct blk_mq_tags *tags = hctx->sched_tags;
@@ -5234,6 +5234,11 @@ static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
 
 	min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
 	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
+}
+
+static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+{
+	bfq_depth_updated(hctx);
 	return 0;
 }
 
@@ -5656,6 +5661,7 @@ static struct elevator_type iosched_bfq_mq = {
 		.requests_merged	= bfq_requests_merged,
 		.request_merged		= bfq_request_merged,
 		.has_work		= bfq_has_work,
+		.depth_updated		= bfq_depth_updated,
 		.init_hctx		= bfq_init_hctx,
 		.init_sched		= bfq_init_queue,
 		.exit_sched		= bfq_exit_queue,
diff --git a/block/blk-core.c b/block/blk-core.c
index 2ab7cf3..44a510c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -980,6 +980,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
 	kblockd_schedule_work(&q->timeout_work);
 }
 
+static void blk_timeout_work_dummy(struct work_struct *work)
+{
+}
+
 /**
  * blk_alloc_queue_node - allocate a request queue
  * @gfp_mask: memory allocation flags
@@ -1034,7 +1038,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
 	timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
 		    laptop_mode_timer_fn, 0);
 	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
-	INIT_WORK(&q->timeout_work, NULL);
+	INIT_WORK(&q->timeout_work, blk_timeout_work_dummy);
 	INIT_LIST_HEAD(&q->timeout_list);
 	INIT_LIST_HEAD(&q->icq_list);
 #ifdef CONFIG_BLK_CGROUP
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7d53f23..4e563ee 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2236,7 +2236,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
 	return 0;
 
  free_fq:
-	kfree(hctx->fq);
+	blk_free_flush_queue(hctx->fq);
  exit_hctx:
 	if (set->ops->exit_hctx)
 		set->ops->exit_hctx(hctx, hctx_idx);
@@ -2887,6 +2887,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
 		}
 		if (ret)
 			break;
+		if (q->elevator && q->elevator->type->ops.mq.depth_updated)
+			q->elevator->type->ops.mq.depth_updated(hctx);
 	}
 
 	if (!ret)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 2eb8744..9ad5211 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -225,6 +225,7 @@ struct cfq_group_data {
 
 	unsigned int weight;
 	unsigned int leaf_weight;
+	u64 group_idle;
 };
 
 /* This is per cgroup per device grouping structure */
@@ -310,6 +311,7 @@ struct cfq_group {
 	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
 	struct cfq_queue *async_idle_cfqq;
 
+	u64 group_idle;
 };
 
 struct cfq_io_cq {
@@ -805,6 +807,17 @@ static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 
 #endif	/* CONFIG_CFQ_GROUP_IOSCHED */
 
+static inline u64 get_group_idle(struct cfq_data *cfqd)
+{
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+	struct cfq_queue *cfqq = cfqd->active_queue;
+
+	if (cfqq && cfqq->cfqg)
+		return cfqq->cfqg->group_idle;
+#endif
+	return cfqd->cfq_group_idle;
+}
+
 #define cfq_log(cfqd, fmt, args...)	\
 	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 
@@ -825,7 +838,7 @@ static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
 	if (!sample_valid(ttime->ttime_samples))
 		return false;
 	if (group_idle)
-		slice = cfqd->cfq_group_idle;
+		slice = get_group_idle(cfqd);
 	else
 		slice = cfqd->cfq_slice_idle;
 	return ttime->ttime_mean > slice;
@@ -1592,6 +1605,7 @@ static void cfq_cpd_init(struct blkcg_policy_data *cpd)
 
 	cgd->weight = weight;
 	cgd->leaf_weight = weight;
+	cgd->group_idle = cfq_group_idle;
 }
 
 static void cfq_cpd_free(struct blkcg_policy_data *cpd)
@@ -1636,6 +1650,7 @@ static void cfq_pd_init(struct blkg_policy_data *pd)
 
 	cfqg->weight = cgd->weight;
 	cfqg->leaf_weight = cgd->leaf_weight;
+	cfqg->group_idle = cgd->group_idle;
 }
 
 static void cfq_pd_offline(struct blkg_policy_data *pd)
@@ -1757,6 +1772,19 @@ static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
 	return 0;
 }
 
+static int cfq_print_group_idle(struct seq_file *sf, void *v)
+{
+	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
+	struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
+	u64 val = 0;
+
+	if (cgd)
+		val = cgd->group_idle;
+
+	seq_printf(sf, "%llu\n", div_u64(val, NSEC_PER_USEC));
+	return 0;
+}
+
 static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
 					char *buf, size_t nbytes, loff_t off,
 					bool on_dfl, bool is_leaf_weight)
@@ -1878,6 +1906,37 @@ static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
 	return __cfq_set_weight(css, val, false, false, true);
 }
 
+static int cfq_set_group_idle(struct cgroup_subsys_state *css,
+			       struct cftype *cft, u64 val)
+{
+	struct blkcg *blkcg = css_to_blkcg(css);
+	struct cfq_group_data *cfqgd;
+	struct blkcg_gq *blkg;
+	int ret = 0;
+
+	spin_lock_irq(&blkcg->lock);
+	cfqgd = blkcg_to_cfqgd(blkcg);
+	if (!cfqgd) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	cfqgd->group_idle = val * NSEC_PER_USEC;
+
+	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+		struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+
+		if (!cfqg)
+			continue;
+
+		cfqg->group_idle = cfqgd->group_idle;
+	}
+
+out:
+	spin_unlock_irq(&blkcg->lock);
+	return ret;
+}
+
 static int cfqg_print_stat(struct seq_file *sf, void *v)
 {
 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
@@ -2023,6 +2082,11 @@ static struct cftype cfq_blkcg_legacy_files[] = {
 		.seq_show = cfq_print_leaf_weight,
 		.write_u64 = cfq_set_leaf_weight,
 	},
+	{
+		.name = "group_idle",
+		.seq_show = cfq_print_group_idle,
+		.write_u64 = cfq_set_group_idle,
+	},
 
 	/* statistics, covers only the tasks in the cfqg */
 	{
@@ -2917,7 +2981,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 	 * with sync vs async workloads.
 	 */
 	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
-		!cfqd->cfq_group_idle)
+		!get_group_idle(cfqd))
 		return;
 
 	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
@@ -2928,9 +2992,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 	 */
 	if (!cfq_should_idle(cfqd, cfqq)) {
 		/* no queue idling. Check for group idling */
-		if (cfqd->cfq_group_idle)
-			group_idle = cfqd->cfq_group_idle;
-		else
+		group_idle = get_group_idle(cfqd);
+		if (!group_idle)
 			return;
 	}
 
@@ -2971,7 +3034,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 	cfq_mark_cfqq_wait_request(cfqq);
 
 	if (group_idle)
-		sl = cfqd->cfq_group_idle;
+		sl = group_idle;
 	else
 		sl = cfqd->cfq_slice_idle;
 
@@ -3320,7 +3383,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
 	 * this group, wait for requests to complete.
 	 */
 check_group_idle:
-	if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
+	if (get_group_idle(cfqd) && cfqq->cfqg->nr_cfqq == 1 &&
 	    cfqq->cfqg->dispatched &&
 	    !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
 		cfqq = NULL;
@@ -3884,7 +3947,7 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 			cfqd->cfq_slice_idle);
 	}
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-	__cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
+	__cfq_update_io_thinktime(&cfqq->cfqg->ttime, get_group_idle(cfqd));
 #endif
 }
 
@@ -4273,7 +4336,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
 		if (cfq_should_wait_busy(cfqd, cfqq)) {
 			u64 extend_sl = cfqd->cfq_slice_idle;
 			if (!cfqd->cfq_slice_idle)
-				extend_sl = cfqd->cfq_group_idle;
+				extend_sl = get_group_idle(cfqd);
 			cfqq->slice_end = now + extend_sl;
 			cfq_mark_cfqq_wait_busy(cfqq);
 			cfq_log_cfqq(cfqd, cfqq, "will busy wait");
diff --git a/block/elevator.c b/block/elevator.c
index a54870f..3d3316f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -989,11 +989,15 @@ int elevator_init_mq(struct request_queue *q)
 	mutex_lock(&q->sysfs_lock);
 	if (unlikely(q->elevator))
 		goto out_unlock;
-
-	e = elevator_get(q, "mq-deadline", false);
-	if (!e)
-		goto out_unlock;
-
+	if (IS_ENABLED(CONFIG_IOSCHED_BFQ)) {
+		e = elevator_get(q, "bfq", false);
+		if (!e)
+			goto out_unlock;
+	} else {
+		e = elevator_get(q, "mq-deadline", false);
+		if (!e)
+			goto out_unlock;
+	}
 	err = blk_mq_init_sched(q, e);
 	if (err)
 		elevator_put(e);
diff --git a/block/genhd.c b/block/genhd.c
index be5bab2..2b2a936 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -518,6 +518,18 @@ void blk_free_devt(dev_t devt)
 	}
 }
 
+/**
+ *	We invalidate devt by assigning NULL pointer for devt in idr.
+ */
+void blk_invalidate_devt(dev_t devt)
+{
+	if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
+		spin_lock_bh(&ext_devt_lock);
+		idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt)));
+		spin_unlock_bh(&ext_devt_lock);
+	}
+}
+
 static char *bdevt_str(dev_t devt, char *buf)
 {
 	if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
@@ -769,6 +781,13 @@ void del_gendisk(struct gendisk *disk)
 
 	if (!(disk->flags & GENHD_FL_HIDDEN))
 		blk_unregister_region(disk_devt(disk), disk->minors);
+	/*
+	 * Remove gendisk pointer from idr so that it cannot be looked up
+	 * while RCU period before freeing gendisk is running to prevent
+	 * use-after-free issues. Note that the device number stays
+	 * "in-use" until we really free the gendisk.
+	 */
+	blk_invalidate_devt(disk_devt(disk));
 
 	kobject_put(disk->part0.holder_dir);
 	kobject_put(disk->slave_dir);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 5f8db5c5..98d60a5 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -289,6 +289,13 @@ void delete_partition(struct gendisk *disk, int partno)
 	kobject_put(part->holder_dir);
 	device_del(part_to_dev(part));
 
+	/*
+	 * Remove gendisk pointer from idr so that it cannot be looked up
+	 * while RCU period before freeing gendisk is running to prevent
+	 * use-after-free issues. Note that the device number stays
+	 * "in-use" until we really free the gendisk.
+	 */
+	blk_invalidate_devt(part_devt(part));
 	hd_struct_kill(part);
 }
 
diff --git a/block/sed-opal.c b/block/sed-opal.c
index e0de4dd..1196408 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -2095,13 +2095,16 @@ static int opal_erase_locking_range(struct opal_dev *dev,
 static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
 					  struct opal_mbr_data *opal_mbr)
 {
+	u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ?
+		OPAL_TRUE : OPAL_FALSE;
+
 	const struct opal_step mbr_steps[] = {
 		{ opal_discovery0, },
 		{ start_admin1LSP_opal_session, &opal_mbr->key },
-		{ set_mbr_done, &opal_mbr->enable_disable },
+		{ set_mbr_done, &enable_disable },
 		{ end_opal_session, },
 		{ start_admin1LSP_opal_session, &opal_mbr->key },
-		{ set_mbr_enable_disable, &opal_mbr->enable_disable },
+		{ set_mbr_enable_disable, &enable_disable },
 		{ end_opal_session, },
 		{ NULL, }
 	};
@@ -2221,7 +2224,7 @@ static int __opal_lock_unlock(struct opal_dev *dev,
 
 static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key)
 {
-	u8 mbr_done_tf = 1;
+	u8 mbr_done_tf = OPAL_TRUE;
 	const struct opal_step mbrdone_step [] = {
 		{ opal_discovery0, },
 		{ start_admin1LSP_opal_session, key },
diff --git a/build.config.cuttlefish.aarch64 b/build.config.cuttlefish.aarch64
index fe921b4..da780202 100644
--- a/build.config.cuttlefish.aarch64
+++ b/build.config.cuttlefish.aarch64
@@ -1,12 +1,14 @@
 ARCH=arm64
 BRANCH=android-4.19
+CC=clang
 CLANG_TRIPLE=aarch64-linux-gnu-
 CROSS_COMPILE=aarch64-linux-androidkernel-
 DEFCONFIG=cuttlefish_defconfig
 EXTRA_CMDS=''
 KERNEL_DIR=common
 POST_DEFCONFIG_CMDS="check_defconfig"
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r349610/bin
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r353983c/bin
+LD=ld.lld
 LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
 FILES="
 arch/arm64/boot/Image.gz
diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64
index 31e4057..da47330 100644
--- a/build.config.cuttlefish.x86_64
+++ b/build.config.cuttlefish.x86_64
@@ -1,12 +1,14 @@
 ARCH=x86_64
 BRANCH=android-4.19
+CC=clang
 CLANG_TRIPLE=x86_64-linux-gnu-
 CROSS_COMPILE=x86_64-linux-androidkernel-
 DEFCONFIG=x86_64_cuttlefish_defconfig
 EXTRA_CMDS=''
 KERNEL_DIR=common
 POST_DEFCONFIG_CMDS="check_defconfig"
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r349610/bin
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r353983c/bin
+LD=ld.lld
 LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin
 FILES="
 arch/x86/boot/bzImage
diff --git a/crypto/ccm.c b/crypto/ccm.c
index b242fd0..1bee010 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -458,7 +458,6 @@ static void crypto_ccm_free(struct aead_instance *inst)
 
 static int crypto_ccm_create_common(struct crypto_template *tmpl,
 				    struct rtattr **tb,
-				    const char *full_name,
 				    const char *ctr_name,
 				    const char *mac_name)
 {
@@ -486,7 +485,8 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
 
 	mac = __crypto_hash_alg_common(mac_alg);
 	err = -EINVAL;
-	if (mac->digestsize != 16)
+	if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 ||
+	    mac->digestsize != 16)
 		goto out_put_mac;
 
 	inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
@@ -509,23 +509,27 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
 
 	ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
 
-	/* Not a stream cipher? */
+	/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
 	err = -EINVAL;
-	if (ctr->base.cra_blocksize != 1)
+	if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
+	    crypto_skcipher_alg_ivsize(ctr) != 16 ||
+	    ctr->base.cra_blocksize != 1)
 		goto err_drop_ctr;
 
-	/* We want the real thing! */
-	if (crypto_skcipher_alg_ivsize(ctr) != 16)
+	/* ctr and cbcmac must use the same underlying block cipher. */
+	if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0)
 		goto err_drop_ctr;
 
 	err = -ENAMETOOLONG;
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+		     "ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
+		goto err_drop_ctr;
+
 	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "ccm_base(%s,%s)", ctr->base.cra_driver_name,
 		     mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_drop_ctr;
 
-	memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
 	inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
 	inst->alg.base.cra_priority = (mac->base.cra_priority +
 				       ctr->base.cra_priority) / 2;
@@ -567,7 +571,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
 	const char *cipher_name;
 	char ctr_name[CRYPTO_MAX_ALG_NAME];
 	char mac_name[CRYPTO_MAX_ALG_NAME];
-	char full_name[CRYPTO_MAX_ALG_NAME];
 
 	cipher_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(cipher_name))
@@ -581,12 +584,7 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
 		     cipher_name) >= CRYPTO_MAX_ALG_NAME)
 		return -ENAMETOOLONG;
 
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
-	    CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
-					mac_name);
+	return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
 }
 
 static struct crypto_template crypto_ccm_tmpl = {
@@ -599,23 +597,17 @@ static int crypto_ccm_base_create(struct crypto_template *tmpl,
 				  struct rtattr **tb)
 {
 	const char *ctr_name;
-	const char *cipher_name;
-	char full_name[CRYPTO_MAX_ALG_NAME];
+	const char *mac_name;
 
 	ctr_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(ctr_name))
 		return PTR_ERR(ctr_name);
 
-	cipher_name = crypto_attr_alg_name(tb[2]);
-	if (IS_ERR(cipher_name))
-		return PTR_ERR(cipher_name);
+	mac_name = crypto_attr_alg_name(tb[2]);
+	if (IS_ERR(mac_name))
+		return PTR_ERR(mac_name);
 
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
-		     ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
-					cipher_name);
+	return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
 }
 
 static struct crypto_template crypto_ccm_base_tmpl = {
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 573c07e..f2b1588 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -647,8 +647,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
 
 	err = -ENAMETOOLONG;
 	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
-		     "%s(%s,%s)", name, chacha_name,
-		     poly_name) >= CRYPTO_MAX_ALG_NAME)
+		     "%s(%s,%s)", name, chacha->base.cra_name,
+		     poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
 		goto out_drop_chacha;
 	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "%s(%s,%s)", name, chacha->base.cra_driver_name,
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 8e94e29..d08048a 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
 	return 0;
 }
 
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
-			u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
 {
-	*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+	*(__u16 *)out = crc_t10dif_generic(crc, data, len);
 	return 0;
 }
 
@@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
 {
 	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
 
-	return __chksum_finup(&ctx->crc, data, len, out);
+	return __chksum_finup(ctx->crc, data, len, out);
 }
 
 static int chksum_digest(struct shash_desc *desc, const u8 *data,
 			 unsigned int length, u8 *out)
 {
-	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
-	return __chksum_finup(&ctx->crc, data, length, out);
+	return __chksum_finup(0, data, length, out);
 }
 
 static struct shash_alg alg = {
diff --git a/crypto/gcm.c b/crypto/gcm.c
index e438492..cfad6704 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -597,7 +597,6 @@ static void crypto_gcm_free(struct aead_instance *inst)
 
 static int crypto_gcm_create_common(struct crypto_template *tmpl,
 				    struct rtattr **tb,
-				    const char *full_name,
 				    const char *ctr_name,
 				    const char *ghash_name)
 {
@@ -638,7 +637,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
 		goto err_free_inst;
 
 	err = -EINVAL;
-	if (ghash->digestsize != 16)
+	if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
+	    ghash->digestsize != 16)
 		goto err_drop_ghash;
 
 	crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
@@ -650,24 +650,24 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
 
 	ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
 
-	/* We only support 16-byte blocks. */
+	/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
 	err = -EINVAL;
-	if (crypto_skcipher_alg_ivsize(ctr) != 16)
-		goto out_put_ctr;
-
-	/* Not a stream cipher? */
-	if (ctr->base.cra_blocksize != 1)
+	if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
+	    crypto_skcipher_alg_ivsize(ctr) != 16 ||
+	    ctr->base.cra_blocksize != 1)
 		goto out_put_ctr;
 
 	err = -ENAMETOOLONG;
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+		     "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
+		goto out_put_ctr;
+
 	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "gcm_base(%s,%s)", ctr->base.cra_driver_name,
 		     ghash_alg->cra_driver_name) >=
 	    CRYPTO_MAX_ALG_NAME)
 		goto out_put_ctr;
 
-	memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
 	inst->alg.base.cra_flags = (ghash->base.cra_flags |
 				    ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
 	inst->alg.base.cra_priority = (ghash->base.cra_priority +
@@ -709,7 +709,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
 	const char *cipher_name;
 	char ctr_name[CRYPTO_MAX_ALG_NAME];
-	char full_name[CRYPTO_MAX_ALG_NAME];
 
 	cipher_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(cipher_name))
@@ -719,12 +718,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
 	    CRYPTO_MAX_ALG_NAME)
 		return -ENAMETOOLONG;
 
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
-	    CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	return crypto_gcm_create_common(tmpl, tb, full_name,
-					ctr_name, "ghash");
+	return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
 }
 
 static struct crypto_template crypto_gcm_tmpl = {
@@ -738,7 +732,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
 {
 	const char *ctr_name;
 	const char *ghash_name;
-	char full_name[CRYPTO_MAX_ALG_NAME];
 
 	ctr_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(ctr_name))
@@ -748,12 +741,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
 	if (IS_ERR(ghash_name))
 		return PTR_ERR(ghash_name);
 
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
-		     ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	return crypto_gcm_create_common(tmpl, tb, full_name,
-					ctr_name, ghash_name);
+	return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
 }
 
 static struct crypto_template crypto_gcm_base_tmpl = {
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index 8c77bc7..df8fc0f5 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -161,7 +161,7 @@ static int salsa20_crypt(struct skcipher_request *req)
 
 	err = skcipher_walk_virt(&walk, req, true);
 
-	salsa20_init(state, ctx, walk.iv);
+	salsa20_init(state, ctx, req->iv);
 
 	while (walk.nbytes > 0) {
 		unsigned int nbytes = walk.nbytes;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index f816afb..fff74f1 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -131,8 +131,13 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err)
 		memcpy(walk->dst.virt.addr, walk->page, n);
 		skcipher_unmap_dst(walk);
 	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
-		if (WARN_ON(err)) {
-			/* unexpected case; didn't process all bytes */
+		if (err) {
+			/*
+			 * Didn't process all bytes.  Either the algorithm is
+			 * broken, or this was the last step and it turned out
+			 * the message wasn't evenly divisible into blocks but
+			 * the algorithm requires it.
+			 */
 			err = -EINVAL;
 			goto finish;
 		}
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 7431d3e..39652cd 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -5592,7 +5592,49 @@ static const struct hash_testvec poly1305_tv_template[] = {
 		.psize		= 80,
 		.digest		= "\x13\x00\x00\x00\x00\x00\x00\x00"
 				  "\x00\x00\x00\x00\x00\x00\x00\x00",
-	},
+	}, { /* Regression test for overflow in AVX2 implementation */
+		.plaintext	= "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff",
+		.psize		= 300,
+		.digest		= "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
+				  "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
+	}
 };
 
 /* NHPoly1305 test vectors from https://github.com/google/adiantum */
diff --git a/drivers/Makefile b/drivers/Makefile
index 120f0a0..9a258f1 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -79,6 +79,7 @@
 obj-y				+= macintosh/
 obj-$(CONFIG_IDE)		+= ide/
 obj-$(CONFIG_CRYPTO)		+= crypto/
+obj-$(CONFIG_EXTCON)		+= extcon/
 obj-y				+= scsi/
 obj-y				+= nvme/
 obj-$(CONFIG_ATA)		+= ata/
@@ -166,7 +167,6 @@
 obj-$(CONFIG_HYPERV)		+= hv/
 
 obj-$(CONFIG_PM_DEVFREQ)	+= devfreq/
-obj-$(CONFIG_EXTCON)		+= extcon/
 obj-$(CONFIG_MEMORY)		+= memory/
 obj-$(CONFIG_IIO)		+= iio/
 obj-$(CONFIG_VME_BUS)		+= vme/
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 969bf8d..c651e20 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -292,7 +292,7 @@ static const struct lpss_device_desc bsw_spi_dev_desc = {
 #define ICPU(model)	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
 
 static const struct x86_cpu_id lpss_cpu_ids[] = {
-	ICPU(INTEL_FAM6_ATOM_SILVERMONT1),	/* Valleyview, Bay Trail */
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT),	/* Valleyview, Bay Trail */
 	ICPU(INTEL_FAM6_ATOM_AIRMONT),	/* Braswell, Cherry Trail */
 	{}
 };
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 4424997e..e10fec9 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -81,12 +81,8 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
 
 	ACPI_FUNCTION_TRACE(ev_enable_gpe);
 
-	/* Clear the GPE status */
-	status = acpi_hw_clear_gpe(gpe_event_info);
-	if (ACPI_FAILURE(status))
-		return_ACPI_STATUS(status);
-
 	/* Enable the requested GPE */
+
 	status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index e48eebc..43c2615 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1231,18 +1231,24 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
 /*
  * set numa proximity domain for smmuv3 device
  */
-static void  __init arm_smmu_v3_set_proximity(struct device *dev,
+static int  __init arm_smmu_v3_set_proximity(struct device *dev,
 					      struct acpi_iort_node *node)
 {
 	struct acpi_iort_smmu_v3 *smmu;
 
 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
 	if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
-		set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
+		int node = acpi_map_pxm_to_node(smmu->pxm);
+
+		if (node != NUMA_NO_NODE && !node_online(node))
+			return -EINVAL;
+
+		set_dev_node(dev, node);
 		pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
 			smmu->base_address,
 			smmu->pxm);
 	}
+	return 0;
 }
 #else
 #define arm_smmu_v3_set_proximity NULL
@@ -1317,7 +1323,7 @@ struct iort_dev_config {
 	int (*dev_count_resources)(struct acpi_iort_node *node);
 	void (*dev_init_resources)(struct resource *res,
 				     struct acpi_iort_node *node);
-	void (*dev_set_proximity)(struct device *dev,
+	int (*dev_set_proximity)(struct device *dev,
 				    struct acpi_iort_node *node);
 };
 
@@ -1368,8 +1374,11 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
 	if (!pdev)
 		return -ENOMEM;
 
-	if (ops->dev_set_proximity)
-		ops->dev_set_proximity(&pdev->dev, node);
+	if (ops->dev_set_proximity) {
+		ret = ops->dev_set_proximity(&pdev->dev, node);
+		if (ret)
+			goto dev_put;
+	}
 
 	count = ops->dev_count_resources(node);
 
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index df2175b..8340c81b 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -542,6 +542,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
 		goto out;
 	}
 
+	dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
+			cmd_name, out_obj->buffer.length);
+	print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
+			out_obj->buffer.pointer,
+			min_t(u32, 128, out_obj->buffer.length), true);
+
 	if (call_pkg) {
 		call_pkg->nd_fw_size = out_obj->buffer.length;
 		memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
@@ -560,12 +566,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
 		return 0;
 	}
 
-	dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
-			cmd_name, out_obj->buffer.length);
-	print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
-			out_obj->buffer.pointer,
-			min_t(u32, 128, out_obj->buffer.length), true);
-
 	for (i = 0, offset = 0; i < desc->out_num; i++) {
 		u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
 				(u32 *) out_obj->buffer.pointer,
@@ -1298,19 +1298,30 @@ static ssize_t scrub_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct nvdimm_bus_descriptor *nd_desc;
+	struct acpi_nfit_desc *acpi_desc;
 	ssize_t rc = -ENXIO;
+	bool busy;
 
 	device_lock(dev);
 	nd_desc = dev_get_drvdata(dev);
-	if (nd_desc) {
-		struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
-
-		mutex_lock(&acpi_desc->init_mutex);
-		rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
-				acpi_desc->scrub_busy
-				&& !acpi_desc->cancel ? "+\n" : "\n");
-		mutex_unlock(&acpi_desc->init_mutex);
+	if (!nd_desc) {
+		device_unlock(dev);
+		return rc;
 	}
+	acpi_desc = to_acpi_desc(nd_desc);
+
+	mutex_lock(&acpi_desc->init_mutex);
+	busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
+		&& !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
+	rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
+	/* Allow an admin to poll the busy state at a higher rate */
+	if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
+				&acpi_desc->scrub_flags)) {
+		acpi_desc->scrub_tmo = 1;
+		mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
+	}
+
+	mutex_unlock(&acpi_desc->init_mutex);
 	device_unlock(dev);
 	return rc;
 }
@@ -2529,7 +2540,10 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc,
 
 	if (rc < 0)
 		return rc;
-	return cmd_rc;
+	if (cmd_rc < 0)
+		return cmd_rc;
+	set_bit(ARS_VALID, &acpi_desc->scrub_flags);
+	return 0;
 }
 
 static int ars_continue(struct acpi_nfit_desc *acpi_desc)
@@ -2539,11 +2553,11 @@ static int ars_continue(struct acpi_nfit_desc *acpi_desc)
 	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
 	struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
 
-	memset(&ars_start, 0, sizeof(ars_start));
-	ars_start.address = ars_status->restart_address;
-	ars_start.length = ars_status->restart_length;
-	ars_start.type = ars_status->type;
-	ars_start.flags = acpi_desc->ars_start_flags;
+	ars_start = (struct nd_cmd_ars_start) {
+		.address = ars_status->restart_address,
+		.length = ars_status->restart_length,
+		.type = ars_status->type,
+	};
 	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
 			sizeof(ars_start), &cmd_rc);
 	if (rc < 0)
@@ -2622,6 +2636,17 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
 	 */
 	if (ars_status->out_length < 44)
 		return 0;
+
+	/*
+	 * Ignore potentially stale results that are only refreshed
+	 * after a start-ARS event.
+	 */
+	if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) {
+		dev_dbg(acpi_desc->dev, "skip %d stale records\n",
+				ars_status->num_records);
+		return 0;
+	}
+
 	for (i = 0; i < ars_status->num_records; i++) {
 		/* only process full records */
 		if (ars_status->out_length
@@ -2960,7 +2985,7 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
 
 	lockdep_assert_held(&acpi_desc->init_mutex);
 
-	if (acpi_desc->cancel)
+	if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags))
 		return 0;
 
 	if (query_rc == -EBUSY) {
@@ -3034,7 +3059,7 @@ static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
 {
 	lockdep_assert_held(&acpi_desc->init_mutex);
 
-	acpi_desc->scrub_busy = 1;
+	set_bit(ARS_BUSY, &acpi_desc->scrub_flags);
 	/* note this should only be set from within the workqueue */
 	if (tmo)
 		acpi_desc->scrub_tmo = tmo;
@@ -3050,7 +3075,7 @@ static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
 {
 	lockdep_assert_held(&acpi_desc->init_mutex);
 
-	acpi_desc->scrub_busy = 0;
+	clear_bit(ARS_BUSY, &acpi_desc->scrub_flags);
 	acpi_desc->scrub_count++;
 	if (acpi_desc->scrub_count_state)
 		sysfs_notify_dirent(acpi_desc->scrub_count_state);
@@ -3071,6 +3096,7 @@ static void acpi_nfit_scrub(struct work_struct *work)
 	else
 		notify_ars_done(acpi_desc);
 	memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
+	clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
 	mutex_unlock(&acpi_desc->init_mutex);
 }
 
@@ -3105,6 +3131,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
 	struct nfit_spa *nfit_spa;
 	int rc;
 
+	set_bit(ARS_VALID, &acpi_desc->scrub_flags);
 	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
 		switch (nfit_spa_type(nfit_spa->spa)) {
 		case NFIT_SPA_VOLATILE:
@@ -3322,7 +3349,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
 	struct nfit_spa *nfit_spa;
 
 	mutex_lock(&acpi_desc->init_mutex);
-	if (acpi_desc->cancel) {
+	if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) {
 		mutex_unlock(&acpi_desc->init_mutex);
 		return 0;
 	}
@@ -3401,7 +3428,7 @@ void acpi_nfit_shutdown(void *data)
 	mutex_unlock(&acpi_desc_lock);
 
 	mutex_lock(&acpi_desc->init_mutex);
-	acpi_desc->cancel = 1;
+	set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
 	cancel_delayed_work_sync(&acpi_desc->dwork);
 	mutex_unlock(&acpi_desc->init_mutex);
 
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 02c10de..68848fc 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -181,6 +181,13 @@ struct nfit_mem {
 	bool has_lsw;
 };
 
+enum scrub_flags {
+	ARS_BUSY,
+	ARS_CANCEL,
+	ARS_VALID,
+	ARS_POLL,
+};
+
 struct acpi_nfit_desc {
 	struct nvdimm_bus_descriptor nd_desc;
 	struct acpi_table_header acpi_header;
@@ -194,7 +201,6 @@ struct acpi_nfit_desc {
 	struct list_head idts;
 	struct nvdimm_bus *nvdimm_bus;
 	struct device *dev;
-	u8 ars_start_flags;
 	struct nd_cmd_ars_status *ars_status;
 	struct nfit_spa *scrub_spa;
 	struct delayed_work dwork;
@@ -203,8 +209,7 @@ struct acpi_nfit_desc {
 	unsigned int max_ars;
 	unsigned int scrub_count;
 	unsigned int scrub_mode;
-	unsigned int scrub_busy:1;
-	unsigned int cancel:1;
+	unsigned long scrub_flags;
 	unsigned long dimm_cmd_force_en;
 	unsigned long bus_cmd_force_en;
 	unsigned long bus_nfit_cmd_force_en;
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 693cf05..288673c 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -975,6 +975,14 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
 		const struct acpi_data_node *data = to_acpi_data_node(fwnode);
 		struct acpi_data_node *dn;
 
+		/*
+		 * We can have a combination of device and data nodes, e.g. with
+		 * hierarchical _DSD properties. Make sure the adev pointer is
+		 * restored before going through data nodes, otherwise we will
+		 * be looking for data_nodes below the last device found instead
+		 * of the common fwnode shared by device_nodes and data_nodes.
+		 */
+		adev = to_acpi_device_node(fwnode);
 		if (adev)
 			head = &adev->data.subnodes;
 		else if (data)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 74c4890..847db3e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -977,6 +977,8 @@ static int acpi_s2idle_prepare(void)
 	if (acpi_sci_irq_valid())
 		enable_irq_wake(acpi_sci_irq);
 
+	acpi_enable_wakeup_devices(ACPI_STATE_S0);
+
 	/* Change the configuration of GPEs to avoid spurious wakeup. */
 	acpi_enable_all_wakeup_gpes();
 	acpi_os_wait_events_complete();
@@ -1026,6 +1028,8 @@ static void acpi_s2idle_restore(void)
 {
 	acpi_enable_all_runtime_gpes();
 
+	acpi_disable_wakeup_devices(ACPI_STATE_S0);
+
 	if (acpi_sci_irq_valid())
 		disable_irq_wake(acpi_sci_irq);
 
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 06c31ec..9a8e286 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -54,7 +54,7 @@ static const struct always_present_id always_present_ids[] = {
 	 * Bay / Cherry Trail PWM directly poked by GPU driver in win10,
 	 * but Linux uses a separate PWM driver, harmless if not used.
 	 */
-	ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT1), {}),
+	ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT), {}),
 	ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
 	/*
 	 * The INT0002 device is necessary to clear wakeup interrupt sources
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index af22c46..27bd921 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -926,14 +926,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
 	index = page - alloc->pages;
 	page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+
+	mm = alloc->vma_vm_mm;
+	if (!mmget_not_zero(mm))
+		goto err_mmget;
+	if (!down_write_trylock(&mm->mmap_sem))
+		goto err_down_write_mmap_sem_failed;
 	vma = binder_alloc_get_vma(alloc);
-	if (vma) {
-		if (!mmget_not_zero(alloc->vma_vm_mm))
-			goto err_mmget;
-		mm = alloc->vma_vm_mm;
-		if (!down_write_trylock(&mm->mmap_sem))
-			goto err_down_write_mmap_sem_failed;
-	}
 
 	list_lru_isolate(lru, item);
 	spin_unlock(lock);
@@ -944,10 +943,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 		zap_page_range(vma, page_addr, PAGE_SIZE);
 
 		trace_binder_unmap_user_end(alloc, index);
-
-		up_write(&mm->mmap_sem);
-		mmput(mm);
 	}
+	up_write(&mm->mmap_sem);
+	mmput(mm);
 
 	trace_binder_unmap_kernel_start(alloc, index);
 
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index b3ed8f9..173e6f2 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
 /* Per the spec, only slot type and drawer type ODD can be supported */
 static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
 {
-	char buf[16];
+	char *buf;
 	unsigned int ret;
-	struct rm_feature_desc *desc = (void *)(buf + 8);
+	struct rm_feature_desc *desc;
 	struct ata_taskfile tf;
 	static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
 			2,      /* only 1 feature descriptor requested */
 			0, 3,   /* 3, removable medium feature */
 			0, 0, 0,/* reserved */
-			0, sizeof(buf),
+			0, 16,
 			0, 0, 0,
 	};
 
+	buf = kzalloc(16, GFP_KERNEL);
+	if (!buf)
+		return ODD_MECH_TYPE_UNSUPPORTED;
+	desc = (void *)(buf + 8);
+
 	ata_tf_init(dev, &tf);
 	tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf.command = ATA_CMD_PACKET;
 	tf.protocol = ATAPI_PROT_PIO;
-	tf.lbam = sizeof(buf);
+	tf.lbam = 16;
 
 	ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
-				buf, sizeof(buf), 0);
-	if (ret)
+				buf, 16, 0);
+	if (ret) {
+		kfree(buf);
 		return ODD_MECH_TYPE_UNSUPPORTED;
+	}
 
-	if (be16_to_cpu(desc->feature_code) != 3)
+	if (be16_to_cpu(desc->feature_code) != 3) {
+		kfree(buf);
 		return ODD_MECH_TYPE_UNSUPPORTED;
+	}
 
-	if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+	if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
+		kfree(buf);
 		return ODD_MECH_TYPE_SLOT;
-	else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+	} else if (desc->mech_type == 1 && desc->load == 0 &&
+		   desc->eject == 1) {
+		kfree(buf);
 		return ODD_MECH_TYPE_DRAWER;
-	else
+	} else {
+		kfree(buf);
 		return ODD_MECH_TYPE_UNSUPPORTED;
+	}
 }
 
 /* Test if ODD is zero power ready by sense code */
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 0ef8197..392932f 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -637,11 +637,18 @@ ssize_t __weak cpu_show_l1tf(struct device *dev,
 	return sprintf(buf, "Not affected\n");
 }
 
+ssize_t __weak cpu_show_mds(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
 static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
 static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
 static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
+static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
 	&dev_attr_meltdown.attr,
@@ -649,6 +656,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
 	&dev_attr_spectre_v2.attr,
 	&dev_attr_spec_store_bypass.attr,
 	&dev_attr_l1tf.attr,
+	&dev_attr_mds.attr,
 	NULL
 };
 
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index e378af5..7d53342 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -503,7 +503,7 @@ static int really_probe(struct device *dev, struct device_driver *drv)
 
 	ret = dma_configure(dev);
 	if (ret)
-		goto dma_failed;
+		goto probe_failed;
 
 	if (driver_sysfs_add(dev)) {
 		printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
@@ -558,14 +558,13 @@ static int really_probe(struct device *dev, struct device_driver *drv)
 	goto done;
 
 probe_failed:
-	dma_deconfigure(dev);
-dma_failed:
 	if (dev->bus)
 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
 					     BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
 pinctrl_bind_failed:
 	device_links_no_driver(dev);
 	devres_release_all(dev);
+	dma_deconfigure(dev);
 	driver_sysfs_remove(dev);
 	dev->driver = NULL;
 	dev_set_drvdata(dev, NULL);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 797ada9..7a86f09 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1744,6 +1744,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 	if (dev->power.syscore)
 		goto Complete;
 
+	/* Avoid direct_complete to let wakeup_path propagate. */
+	if (device_may_wakeup(dev) || dev->power.wakeup_path)
+		dev->power.direct_complete = false;
+
 	if (dev->power.direct_complete) {
 		if (pm_runtime_status_suspended(dev)) {
 			pm_runtime_disable(dev);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index c18586f..17defbf 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -96,13 +96,8 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
 	/*
 	 * Must use NOIO because we don't want to recurse back into the
 	 * block or filesystem layers from page reclaim.
-	 *
-	 * Cannot support DAX and highmem, because our ->direct_access
-	 * routine for DAX must return memory that is always addressable.
-	 * If DAX was reworked to use pfns and kmap throughout, this
-	 * restriction might be able to be lifted.
 	 */
-	gfp_flags = GFP_NOIO | __GFP_ZERO;
+	gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
 	page = alloc_page(gfp_flags);
 	if (!page)
 		return NULL;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index a63da9e..f1e63eb 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1112,8 +1112,9 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
 			err = __blkdev_reread_part(bdev);
 		else
 			err = blkdev_reread_part(bdev);
-		pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
-			__func__, lo_number, err);
+		if (err)
+			pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
+				__func__, lo_number, err);
 		/* Device is gone, no point in returning error */
 		err = 0;
 	}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 23752dc..dd64f58 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -446,6 +446,8 @@ static int init_vq(struct virtio_blk *vblk)
 	if (err)
 		num_vqs = 1;
 
+	num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
+
 	vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
 	if (!vblk->vqs)
 		return -ENOMEM;
diff --git a/drivers/block/vs_block_server.c b/drivers/block/vs_block_server.c
index 53f5fd3..74f2617 100644
--- a/drivers/block/vs_block_server.c
+++ b/drivers/block/vs_block_server.c
@@ -1044,6 +1044,10 @@ vs_block_server_alloc(struct vs_service_device *service)
 	 * 4 in all mainline kernels). That possibility is the only reason we
 	 * can't enable rx_atomic for this driver.
 	 */
+	server->bioset = kzalloc(sizeof(struct bio_set), GFP_KERNEL);
+	if (!server->bioset)
+		goto fail_create_bioset;
+
 	err = bioset_init(server->bioset, min_t(unsigned, service->recv_quota,
 				VSERVICE_BLOCK_IO_READ_MAX_PENDING +
 				VSERVICE_BLOCK_IO_WRITE_MAX_PENDING),
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index c245894..0f36db0 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1063,6 +1063,8 @@ static int ace_setup(struct ace_device *ace)
 	return 0;
 
 err_read:
+	/* prevent double queue cleanup */
+	ace->gd->queue = NULL;
 	put_disk(ace->gd);
 err_alloc_disk:
 	blk_cleanup_queue(ace->queue);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 50045f0..6c6a10e 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -794,18 +794,18 @@ struct zram_work {
 	struct zram *zram;
 	unsigned long entry;
 	struct bio *bio;
+	struct bio_vec bvec;
 };
 
 #if PAGE_SIZE != 4096
 static void zram_sync_read(struct work_struct *work)
 {
-	struct bio_vec bvec;
 	struct zram_work *zw = container_of(work, struct zram_work, work);
 	struct zram *zram = zw->zram;
 	unsigned long entry = zw->entry;
 	struct bio *bio = zw->bio;
 
-	read_from_bdev_async(zram, &bvec, entry, bio);
+	read_from_bdev_async(zram, &zw->bvec, entry, bio);
 }
 
 /*
@@ -818,6 +818,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
 {
 	struct zram_work work;
 
+	work.bvec = *bvec;
 	work.zram = zram;
 	work.entry = entry;
 	work.bio = bio;
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index 597c21c..a9dc17c 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -318,6 +318,13 @@ static int bluetooth_power(int on)
 				goto vdd_rfa2_fail;
 			}
 		}
+		if (bt_power_pdata->bt_vdd_asd) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_asd);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddasd config failed");
+				goto vdd_asd_fail;
+			}
+		}
 		if (bt_power_pdata->bt_chip_pwd) {
 			rc = bt_configure_vreg(bt_power_pdata->bt_chip_pwd);
 			if (rc < 0) {
@@ -354,6 +361,9 @@ static int bluetooth_power(int on)
 		if (bt_power_pdata->bt_chip_pwd)
 			bt_vreg_disable(bt_power_pdata->bt_chip_pwd);
 chip_pwd_fail:
+		if (bt_power_pdata->bt_vdd_asd)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_asd);
+vdd_asd_fail:
 		if (bt_power_pdata->bt_vdd_rfa2)
 			bt_vreg_disable(bt_power_pdata->bt_vdd_rfa2);
 vdd_rfa2_fail:
@@ -657,6 +667,11 @@ static int bt_power_populate_dt_pinfo(struct platform_device *pdev)
 					"qca,bt-vdd-rfa2");
 		if (rc < 0)
 			BT_PWR_ERR("bt-vdd-rfa2 not provided in device tree");
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_asd,
+					"qca,bt-vdd-asd");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-asd not provided in device tree");
 		rc = bt_dt_parse_clk_info(&pdev->dev,
 					&bt_power_pdata->bt_chip_clk);
 		if (rc < 0)
diff --git a/drivers/bluetooth/btfm_slim_slave.h b/drivers/bluetooth/btfm_slim_slave.h
index 6640090..26d9c1d 100644
--- a/drivers/bluetooth/btfm_slim_slave.h
+++ b/drivers/bluetooth/btfm_slim_slave.h
@@ -95,8 +95,8 @@ enum {
 	QCA_APACHE_SOC_ID_0102  = 0x40020122,
 	QCA_APACHE_SOC_ID_0103  = 0x40020123,
 	QCA_APACHE_SOC_ID_0110  = 0x40020130,
-	QCA_APACHE_SOC_ID_0111  = 0x40020140,
-	QCA_APACHE_SOC_ID_0120  = 0x40020240,
+	QCA_APACHE_SOC_ID_0120  = 0x40020140,
+	QCA_APACHE_SOC_ID_0121  = 0x40020150,
 };
 
 enum {
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 4593baf..19eecf1 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -115,11 +115,13 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen,
 				  TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
 	if (err == -EINTR) {
 		bt_dev_err(hdev, "Execution of wmt command interrupted");
+		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
 		return err;
 	}
 
 	if (err) {
 		bt_dev_err(hdev, "Execution of wmt command timed out");
+		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
 		return -ETIMEDOUT;
 	}
 
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 77b67a5..40a4f95 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2888,6 +2888,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
 		return 0;
 	}
 
+	irq_set_status_flags(irq, IRQ_NOAUTOEN);
 	ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
 			       0, "OOB Wake-on-BT", data);
 	if (ret) {
@@ -2902,7 +2903,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
 	}
 
 	data->oob_wake_irq = irq;
-	disable_irq(irq);
 	bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
 	return 0;
 }
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index f0d593c..77004c2 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -504,6 +504,8 @@ static int qca_open(struct hci_uart *hu)
 		qcadev = serdev_device_get_drvdata(hu->serdev);
 		if (qcadev->btsoc_type != QCA_WCN3990) {
 			gpiod_set_value_cansleep(qcadev->bt_en, 1);
+			/* Controller needs time to bootup. */
+			msleep(150);
 		} else {
 			hu->init_speed = qcadev->init_speed;
 			hu->oper_speed = qcadev->oper_speed;
diff --git a/drivers/bus/mhi/controllers/mhi_arch_qcom.c b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
index c9cc3cd..cdb334b 100644
--- a/drivers/bus/mhi/controllers/mhi_arch_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
@@ -15,6 +15,7 @@
 #include <linux/msm_pcie.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
+#include <linux/suspend.h>
 #include <linux/mhi.h>
 #include "mhi_qcom.h"
 
@@ -28,16 +29,21 @@ struct arch_info {
 	struct pci_saved_state *pcie_state;
 	async_cookie_t cookie;
 	void *boot_ipc_log;
+	void *tsync_ipc_log;
 	struct mhi_device *boot_dev;
 	struct mhi_link_info current_link_info;
 	struct work_struct bw_scale_work;
 	bool drv_connected;
+	struct notifier_block pm_notifier;
+	struct completion pm_completion;
 };
 
 /* ipc log markings */
 #define DLOG "Dev->Host: "
 #define HLOG "Host: "
 
+#define MHI_TSYNC_LOG_PAGES (10)
+
 #ifdef CONFIG_MHI_DEBUG
 
 #define MHI_IPC_LOG_PAGES (100)
@@ -50,6 +56,37 @@ enum MHI_DEBUG_LEVEL  mhi_ipc_log_lvl = MHI_MSG_LVL_ERROR;
 
 #endif
 
+static int mhi_arch_pm_notifier(struct notifier_block *nb,
+				unsigned long event, void *unused)
+{
+	struct arch_info *arch_info =
+		container_of(nb, struct arch_info, pm_notifier);
+
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		reinit_completion(&arch_info->pm_completion);
+		break;
+
+	case PM_POST_SUSPEND:
+		complete_all(&arch_info->pm_completion);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+void mhi_arch_timesync_log(struct mhi_controller *mhi_cntrl, u64 remote_time)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+
+	if (remote_time != U64_MAX)
+		ipc_log_string(arch_info->tsync_ipc_log, "%6u.%06lu 0x%llx",
+			       REMOTE_TICKS_TO_SEC(remote_time),
+			       REMOTE_TIME_REMAINDER_US(remote_time),
+			       remote_time);
+}
+
 static int mhi_arch_set_bus_request(struct mhi_controller *mhi_cntrl, int index)
 {
 	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
@@ -95,12 +132,11 @@ static void mhi_arch_pci_link_state_cb(struct msm_pcie_notify *notify)
 
 		arch_info->drv_connected = true;
 
-		pm_runtime_allow(&pci_dev->dev);
-
 		mutex_lock(&mhi_cntrl->pm_mutex);
 
 		/* if we're in amss attempt a suspend */
 		if (mhi_dev->powered_on && mhi_cntrl->ee == MHI_EE_AMSS) {
+			pm_runtime_allow(&pci_dev->dev);
 			pm_runtime_mark_last_busy(&pci_dev->dev);
 			pm_request_autosuspend(&pci_dev->dev);
 		}
@@ -121,7 +157,7 @@ static void mhi_arch_pci_link_state_cb(struct msm_pcie_notify *notify)
 	}
 }
 
-static int mhi_arch_esoc_ops_power_on(void *priv, bool mdm_state)
+static int mhi_arch_esoc_ops_power_on(void *priv, unsigned int flags)
 {
 	struct mhi_controller *mhi_cntrl = priv;
 	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
@@ -175,17 +211,39 @@ static void mhi_arch_link_off(struct mhi_controller *mhi_cntrl)
 	MHI_LOG("Exited\n");
 }
 
-void mhi_arch_esoc_ops_power_off(void *priv, bool mdm_state)
+static void mhi_arch_esoc_ops_power_off(void *priv, unsigned int flags)
 {
 	struct mhi_controller *mhi_cntrl = priv;
 	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
 	struct arch_info *arch_info = mhi_dev->arch_info;
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+	bool mdm_state = (flags & ESOC_HOOK_MDM_CRASH);
 
 	MHI_LOG("Enter: mdm_crashed:%d\n", mdm_state);
+
+	/*
+	 * Abort system suspend if system is preparing to go to suspend
+	 * by grabbing wake source.
+	 * If system is suspended, wait for pm notifier callback to notify
+	 * that resume has occurred with PM_POST_SUSPEND event.
+	 */
+	pm_stay_awake(&mhi_cntrl->mhi_dev->dev);
+	wait_for_completion(&arch_info->pm_completion);
+
+	/* if link is in drv suspend, wake it up */
+	pm_runtime_get_sync(&pci_dev->dev);
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
 	if (!mhi_dev->powered_on) {
 		MHI_LOG("Not in active state\n");
+		mutex_unlock(&mhi_cntrl->pm_mutex);
+		pm_runtime_put_noidle(&pci_dev->dev);
 		return;
 	}
+	mhi_dev->powered_on = false;
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+
+	pm_runtime_put_noidle(&pci_dev->dev);
 
 	MHI_LOG("Triggering shutdown process\n");
 	mhi_power_down(mhi_cntrl, !mdm_state);
@@ -199,7 +257,20 @@ void mhi_arch_esoc_ops_power_off(void *priv, bool mdm_state)
 
 	mhi_arch_pcie_deinit(mhi_cntrl);
 	mhi_cntrl->dev = NULL;
-	mhi_dev->powered_on = false;
+
+	pm_relax(&mhi_cntrl->mhi_dev->dev);
+}
+
+static void mhi_arch_esoc_ops_mdm_error(void *priv)
+{
+	struct mhi_controller *mhi_cntrl = priv;
+
+	MHI_LOG("Enter: mdm asserted\n");
+
+	/* transition MHI state into error state */
+	mhi_control_error(mhi_cntrl);
+
+	MHI_LOG("Exit\n");
 }
 
 static void mhi_bl_dl_cb(struct mhi_device *mhi_device,
@@ -255,9 +326,6 @@ static void mhi_boot_monitor(void *data, async_cookie_t cookie)
 		if (boot_dev)
 			mhi_unprepare_from_transfer(boot_dev);
 
-		/* enable link inactivity timer to start auto suspend */
-		msm_pcie_l1ss_timeout_enable(mhi_dev->pci_dev);
-
 		if (!mhi_dev->drv_supported || arch_info->drv_connected)
 			pm_runtime_allow(&mhi_dev->pci_dev->dev);
 	}
@@ -411,6 +479,14 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
 							    node, 0);
 		mhi_cntrl->log_lvl = mhi_ipc_log_lvl;
 
+		snprintf(node, sizeof(node), "mhi_tsync_%04x_%02u.%02u.%02u",
+			 mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
+			 mhi_cntrl->slot);
+		arch_info->tsync_ipc_log = ipc_log_context_create(
+					   MHI_TSYNC_LOG_PAGES, node, 0);
+		if (arch_info->tsync_ipc_log)
+			mhi_cntrl->tsync_log = mhi_arch_timesync_log;
+
 		/* register for bus scale if defined */
 		arch_info->msm_bus_pdata = msm_bus_cl_get_pdata_from_dev(
 							&mhi_dev->pci_dev->dev);
@@ -446,6 +522,18 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
 		if (ret)
 			MHI_LOG("Failed to reg. for link up notification\n");
 
+		init_completion(&arch_info->pm_completion);
+
+		/* register PM notifier to get post resume events */
+		arch_info->pm_notifier.notifier_call = mhi_arch_pm_notifier;
+		register_pm_notifier(&arch_info->pm_notifier);
+
+		/*
+		 * Mark as completed at initial boot-up to allow ESOC power on
+		 * callback to proceed if system has not gone to suspend
+		 */
+		complete_all(&arch_info->pm_completion);
+
 		arch_info->esoc_client = devm_register_esoc_client(
 						&mhi_dev->pci_dev->dev, "mdm");
 		if (IS_ERR_OR_NULL(arch_info->esoc_client)) {
@@ -461,6 +549,8 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
 				mhi_arch_esoc_ops_power_on;
 			esoc_ops->esoc_link_power_off =
 				mhi_arch_esoc_ops_power_off;
+			esoc_ops->esoc_link_mdm_crash =
+				mhi_arch_esoc_ops_mdm_error;
 
 			ret = esoc_register_client_hook(arch_info->esoc_client,
 							esoc_ops);
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
index 25d4a25..c7002ec 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -221,6 +221,7 @@ static int mhi_runtime_suspend(struct device *dev)
 
 	if (ret) {
 		MHI_LOG("Abort due to ret:%d\n", ret);
+		mhi_dev->suspend_mode = MHI_ACTIVE_STATE;
 		goto exit_runtime_suspend;
 	}
 
@@ -364,6 +365,7 @@ int mhi_system_suspend(struct device *dev)
 
 	if (ret) {
 		MHI_LOG("Abort due to ret:%d\n", ret);
+		mhi_dev->suspend_mode = MHI_ACTIVE_STATE;
 		goto exit_system_suspend;
 	}
 
@@ -389,6 +391,47 @@ int mhi_system_suspend(struct device *dev)
 	return ret;
 }
 
+static int mhi_force_suspend(struct mhi_controller *mhi_cntrl)
+{
+	int ret = -EIO;
+	const u32 delayms = 100;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	int itr = DIV_ROUND_UP(mhi_cntrl->timeout_ms, delayms);
+
+	MHI_LOG("Entered\n");
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+
+	for (; itr; itr--) {
+		/*
+		 * This function get called soon as device entered mission mode
+		 * so most of the channels are still in disabled state. However,
+		 * sbl channels are active and clients could be trying to close
+		 * channels while we trying to suspend the link. So, we need to
+		 * re-try if MHI is busy
+		 */
+		ret = mhi_pm_suspend(mhi_cntrl);
+		if (!ret || ret != -EBUSY)
+			break;
+
+		MHI_LOG("MHI busy, sleeping and retry\n");
+		msleep(delayms);
+	}
+
+	if (ret)
+		goto exit_force_suspend;
+
+	mhi_dev->suspend_mode = MHI_DEFAULT_SUSPEND;
+	ret = mhi_arch_link_suspend(mhi_cntrl);
+
+exit_force_suspend:
+	MHI_LOG("Force suspend ret with %d\n", ret);
+
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+
+	return ret;
+}
+
 /* checks if link is down */
 static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv)
 {
@@ -558,6 +601,7 @@ static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
 {
 	struct mhi_dev *mhi_dev = priv;
 	struct device *dev = &mhi_dev->pci_dev->dev;
+	int ret;
 
 	switch (reason) {
 	case MHI_CB_IDLE:
@@ -569,6 +613,17 @@ static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
 		if (mhi_dev->bw_scale)
 			mhi_dev->bw_scale(mhi_cntrl, mhi_dev);
 		break;
+	case MHI_CB_EE_MISSION_MODE:
+		/*
+		 * we need to force a suspend so device can switch to
+		 * mission mode pcie phy settings.
+		 */
+		pm_runtime_get(dev);
+		ret = mhi_force_suspend(mhi_cntrl);
+		if (!ret)
+			mhi_runtime_resume(dev);
+		pm_runtime_put(dev);
+		break;
 	default:
 		MHI_ERR("Unhandled cb:0x%x\n", reason);
 	}
@@ -712,6 +767,7 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev)
 	mhi_cntrl->lpm_disable = mhi_lpm_disable;
 	mhi_cntrl->lpm_enable = mhi_lpm_enable;
 	mhi_cntrl->time_get = mhi_time_get;
+	mhi_cntrl->remote_timer_freq = 19200000;
 
 	ret = of_register_mhi_controller(mhi_cntrl);
 	if (ret)
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.h b/drivers/bus/mhi/controllers/mhi_qcom.h
index 0a1680a..fdab799 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.h
+++ b/drivers/bus/mhi/controllers/mhi_qcom.h
@@ -18,6 +18,14 @@
 #define MHI_RPM_SUSPEND_TMR_MS (250)
 #define MHI_PCI_BAR_NUM (0)
 
+/* timesync time calculations */
+#define REMOTE_TICKS_TO_US(x) (div_u64((x) * 100ULL, \
+			       div_u64(mhi_cntrl->remote_timer_freq, 10000ULL)))
+#define REMOTE_TICKS_TO_SEC(x) (div_u64((x), \
+				mhi_cntrl->remote_timer_freq))
+#define REMOTE_TIME_REMAINDER_US(x) (REMOTE_TICKS_TO_US((x)) % \
+					(REMOTE_TICKS_TO_SEC((x)) * 1000000ULL))
+
 extern const char * const mhi_ee_str[MHI_EE_MAX];
 #define TO_MHI_EXEC_STR(ee) (ee >= MHI_EE_MAX ? "INVALID_EE" : mhi_ee_str[ee])
 
diff --git a/drivers/bus/mhi/core/mhi_boot.c b/drivers/bus/mhi/core/mhi_boot.c
index 6649a9f..d840768 100644
--- a/drivers/bus/mhi/core/mhi_boot.c
+++ b/drivers/bus/mhi/core/mhi_boot.c
@@ -18,12 +18,14 @@
 #include "mhi_internal.h"
 
 
-/* setup rddm vector table for rddm transfer */
-static void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+/* setup rddm vector table for rddm transfer and program rxvec */
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
 			     struct image_info *img_info)
 {
 	struct mhi_buf *mhi_buf = img_info->mhi_buf;
 	struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+	void __iomem *base = mhi_cntrl->bhie;
+	u32 sequence_id;
 	int i = 0;
 
 	for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
@@ -32,17 +34,35 @@ static void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
 		bhi_vec->dma_addr = mhi_buf->dma_addr;
 		bhi_vec->size = mhi_buf->len;
 	}
+
+	MHI_LOG("BHIe programming for RDDM\n");
+
+	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
+		      upper_32_bits(mhi_buf->dma_addr));
+
+	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
+		      lower_32_bits(mhi_buf->dma_addr));
+
+	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
+	sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
+
+	if (unlikely(!sequence_id))
+		sequence_id = 1;
+
+	mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
+			    BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
+			    sequence_id);
+
+	MHI_LOG("address:%pad len:0x%lx sequence:%u\n",
+		&mhi_buf->dma_addr, mhi_buf->len, sequence_id);
 }
 
 /* collect rddm during kernel panic */
 static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
 {
 	int ret;
-	struct mhi_buf *mhi_buf;
-	u32 sequence_id;
 	u32 rx_status;
 	enum mhi_ee ee;
-	struct image_info *rddm_image = mhi_cntrl->rddm_image;
 	const u32 delayus = 5000;
 	u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
 	const u32 rddm_timeout_us = 200000;
@@ -68,29 +88,6 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
 	/* update should take the effect immediately */
 	smp_wmb();
 
-	/* setup the RX vector table */
-	mhi_rddm_prepare(mhi_cntrl, rddm_image);
-	mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1];
-
-	MHI_LOG("Starting BHIe programming for RDDM\n");
-
-	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
-		      upper_32_bits(mhi_buf->dma_addr));
-
-	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
-		      lower_32_bits(mhi_buf->dma_addr));
-
-	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
-	sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
-
-	if (unlikely(!sequence_id))
-		sequence_id = 1;
-
-
-	mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
-			    BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
-			    sequence_id);
-
 	/*
 	 * Make sure device is not already in RDDM.
 	 * In case device asserts and a kernel panic follows, device will
@@ -157,70 +154,15 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
 int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
 {
 	void __iomem *base = mhi_cntrl->bhie;
-	rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
-	struct image_info *rddm_image = mhi_cntrl->rddm_image;
-	struct mhi_buf *mhi_buf;
-	int ret;
 	u32 rx_status;
-	u32 sequence_id;
-
-	if (!rddm_image)
-		return -ENOMEM;
 
 	if (in_panic)
 		return __mhi_download_rddm_in_panic(mhi_cntrl);
 
-	MHI_LOG("Waiting for device to enter RDDM state from EE:%s\n",
-		TO_MHI_EXEC_STR(mhi_cntrl->ee));
-
-	ret = wait_event_timeout(mhi_cntrl->state_event,
-				 mhi_cntrl->ee == MHI_EE_RDDM ||
-				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
-				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
-
-	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
-		MHI_ERR("MHI is not in valid state, pm_state:%s ee:%s\n",
-			to_mhi_pm_state_str(mhi_cntrl->pm_state),
-			TO_MHI_EXEC_STR(mhi_cntrl->ee));
-		return -EIO;
-	}
-
-	mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
-
-	/* vector table is the last entry */
-	mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1];
-
-	read_lock_bh(pm_lock);
-	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
-		read_unlock_bh(pm_lock);
-		return -EIO;
-	}
-
-	MHI_LOG("Starting BHIe Programming for RDDM\n");
-
-	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
-		      upper_32_bits(mhi_buf->dma_addr));
-
-	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
-		      lower_32_bits(mhi_buf->dma_addr));
-
-	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
-
-	sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
-	mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
-			    BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
-			    sequence_id);
-	read_unlock_bh(pm_lock);
-
-	MHI_LOG("Upper:0x%x Lower:0x%x len:0x%lx sequence:%u\n",
-		upper_32_bits(mhi_buf->dma_addr),
-		lower_32_bits(mhi_buf->dma_addr),
-		mhi_buf->len, sequence_id);
 	MHI_LOG("Waiting for image download completion\n");
 
 	/* waiting for image download completion */
 	wait_event_timeout(mhi_cntrl->state_event,
-			   MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
 			   mhi_read_reg_field(mhi_cntrl, base,
 					      BHIE_RXVECSTATUS_OFFS,
 					      BHIE_RXVECSTATUS_STATUS_BMSK,
@@ -228,9 +170,6 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
 					      &rx_status) || rx_status,
 			   msecs_to_jiffies(mhi_cntrl->timeout_ms));
 
-	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
-		return -EIO;
-
 	return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
 }
 EXPORT_SYMBOL(mhi_download_rddm_img);
@@ -493,9 +432,9 @@ void mhi_fw_load_worker(struct work_struct *work)
 
 	MHI_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee));
 
-	/* if device in pthru, we do not have to load firmware */
+	/* if device in pthru, do reset to ready state transition */
 	if (mhi_cntrl->ee == MHI_EE_PTHRU)
-		return;
+		goto fw_load_ee_pthru;
 
 	fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
 		mhi_cntrl->edl_image : mhi_cntrl->fw_image;
@@ -559,6 +498,7 @@ void mhi_fw_load_worker(struct work_struct *work)
 		mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
 	}
 
+fw_load_ee_pthru:
 	/* transitioning into MHI RESET->READY state */
 	ret = mhi_ready_state_transition(mhi_cntrl);
 
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
index 3bac124..c76d3a4 100644
--- a/drivers/bus/mhi/core/mhi_init.c
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -23,6 +23,7 @@ const char * const mhi_ee_str[MHI_EE_MAX] = {
 	[MHI_EE_PTHRU] = "PASS THRU",
 	[MHI_EE_EDL] = "EDL",
 	[MHI_EE_DISABLE_TRANSITION] = "DISABLE",
+	[MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
 };
 
 const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX] = {
@@ -534,6 +535,8 @@ int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
 	     !mhi_cntrl->lpm_enable)
 		return -EINVAL;
 
+	mhi_cntrl->local_timer_freq = arch_timer_get_cntfrq();
+
 	/* register method supported */
 	mhi_tsync = kzalloc(sizeof(*mhi_tsync), GFP_KERNEL);
 	if (!mhi_tsync)
@@ -1146,6 +1149,9 @@ static int of_parse_dt(struct mhi_controller *mhi_cntrl,
 		       struct device_node *of_node)
 {
 	int ret;
+	enum mhi_ee i;
+	u32 *ee;
+	u32 bhie_offset;
 
 	/* parse MHI channel configuration */
 	ret = of_parse_ch_cfg(mhi_cntrl, of_node);
@@ -1173,6 +1179,23 @@ static int of_parse_dt(struct mhi_controller *mhi_cntrl,
 	if (of_property_read_bool(of_node, "mhi,m2-no-db-access"))
 		mhi_cntrl->db_access &= ~MHI_PM_M2;
 
+	/* parse the device ee table */
+	for (i = MHI_EE_PBL, ee = mhi_cntrl->ee_table; i < MHI_EE_MAX;
+	     i++, ee++) {
+		/* setup the default ee before checking for override */
+		*ee = i;
+		ret = of_property_match_string(of_node, "mhi,ee-names",
+					       mhi_ee_str[i]);
+		if (ret < 0)
+			continue;
+
+		of_property_read_u32_index(of_node, "mhi,ee", ret, ee);
+	}
+
+	ret = of_property_read_u32(of_node, "mhi,bhie-offset", &bhie_offset);
+	if (!ret)
+		mhi_cntrl->bhie = mhi_cntrl->regs + bhie_offset;
+
 	return 0;
 
 error_ev_cfg:
@@ -1287,6 +1310,10 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
 	mhi_dev->mhi_cntrl = mhi_cntrl;
 	dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u", mhi_dev->dev_id,
 		     mhi_dev->domain, mhi_dev->bus, mhi_dev->slot);
+
+	/* init wake source */
+	device_init_wakeup(&mhi_dev->dev, true);
+
 	ret = device_add(&mhi_dev->dev);
 	if (ret)
 		goto error_add_dev;
@@ -1369,12 +1396,6 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
 		goto error_dev_ctxt;
 	}
 
-	ret = mhi_init_irq_setup(mhi_cntrl);
-	if (ret) {
-		MHI_ERR("Error setting up irq\n");
-		goto error_setup_irq;
-	}
-
 	/*
 	 * allocate rddm table if specified, this table is for debug purpose
 	 * so we'll ignore erros
@@ -1387,16 +1408,22 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
 		 * This controller supports rddm, we need to manually clear
 		 * BHIE RX registers since por values are undefined.
 		 */
-		ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
-				   &bhie_off);
-		if (ret) {
-			MHI_ERR("Error getting bhie offset\n");
-			goto bhie_error;
+		if (!mhi_cntrl->bhie) {
+			ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
+					   &bhie_off);
+			if (ret) {
+				MHI_ERR("Error getting bhie offset\n");
+				goto bhie_error;
+			}
+
+			mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
 		}
 
-		memset_io(mhi_cntrl->regs + bhie_off + BHIE_RXVECADDR_LOW_OFFS,
-			  0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
-			  4);
+		memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, 0,
+			  BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + 4);
+
+		if (mhi_cntrl->rddm_image)
+			mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
 	}
 
 	mhi_cntrl->pre_init = true;
@@ -1410,10 +1437,6 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
 		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
 		mhi_cntrl->rddm_image = NULL;
 	}
-	mhi_deinit_free_irq(mhi_cntrl);
-
-error_setup_irq:
-	mhi_deinit_dev_ctxt(mhi_cntrl);
 
 error_dev_ctxt:
 	mutex_unlock(&mhi_cntrl->pm_mutex);
@@ -1434,7 +1457,6 @@ void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
 		mhi_cntrl->rddm_image = NULL;
 	}
 
-	mhi_deinit_free_irq(mhi_cntrl);
 	mhi_deinit_dev_ctxt(mhi_cntrl);
 	mhi_cntrl->pre_init = false;
 }
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
index 3aef667..4ff381c 100644
--- a/drivers/bus/mhi/core/mhi_internal.h
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -225,8 +225,11 @@ extern struct bus_type mhi_bus_type;
 #define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF)
 #define SOC_HW_VERSION_MINOR_VER_SHFT (0)
 
-/* convert ticks to micro seconds by dividing by 19.2 */
-#define TIME_TICKS_TO_US(x) (div_u64((x) * 10, 192))
+/* timesync time calculations */
+#define LOCAL_TICKS_TO_US(x) (div_u64((x) * 100ULL, \
+				div_u64(mhi_cntrl->local_timer_freq, 10000ULL)))
+#define REMOTE_TICKS_TO_US(x) (div_u64((x) * 100ULL, \
+			       div_u64(mhi_cntrl->remote_timer_freq, 10000ULL)))
 
 struct mhi_event_ctxt {
 	u32 reserved : 8;
@@ -347,6 +350,7 @@ enum mhi_cmd_type {
 enum MHI_CMD {
 	MHI_CMD_RESET_CHAN,
 	MHI_CMD_START_CHAN,
+	MHI_CMD_STOP_CHAN,
 	MHI_CMD_TIMSYNC_CFG,
 };
 
@@ -754,6 +758,17 @@ int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl);
 void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl);
 int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl);
 void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl);
+int mhi_early_notify_device(struct device *dev, void *data);
+
+/* timesync log support */
+static inline void mhi_timesync_log(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
+
+	if (mhi_tsync && mhi_cntrl->tsync_log)
+		mhi_cntrl->tsync_log(mhi_cntrl,
+				     readq_no_log(mhi_tsync->time_reg));
+}
 
 /* memory allocation methods */
 static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl,
@@ -809,6 +824,8 @@ void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
 int mhi_dtr_init(void);
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+		      struct image_info *img_info);
 
 /* isr handlers */
 irqreturn_t mhi_msi_handlr(int irq_number, void *dev);
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index e9ba9ab..4d77341 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -173,12 +173,24 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
 				    db);
 }
 
+static enum mhi_ee mhi_translate_dev_ee(struct mhi_controller *mhi_cntrl,
+					u32 dev_ee)
+{
+	enum mhi_ee i;
+
+	for (i = MHI_EE_PBL; i < MHI_EE_MAX; i++)
+		if (mhi_cntrl->ee_table[i] == dev_ee)
+			return i;
+
+	return MHI_EE_NOT_SUPPORTED;
+}
+
 enum mhi_ee mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
 {
 	u32 exec;
 	int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
 
-	return (ret) ? MHI_EE_MAX : exec;
+	return (ret) ? MHI_EE_MAX : mhi_translate_dev_ee(mhi_cntrl, exec);
 }
 
 enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
@@ -625,6 +637,22 @@ int mhi_destroy_device(struct device *dev, void *data)
 	return 0;
 }
 
+int mhi_early_notify_device(struct device *dev, void *data)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+	/* skip early notification */
+	if (!mhi_dev->early_notif)
+		return 0;
+
+	MHI_LOG("Early notification for dev:%s\n", mhi_dev->chan_name);
+
+	mhi_notify(mhi_dev, MHI_CB_FATAL_ERROR);
+
+	return 0;
+}
+
 void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason)
 {
 	struct mhi_driver *mhi_drv;
@@ -696,7 +724,8 @@ static ssize_t time_us_show(struct device *dev,
 	}
 
 	return scnprintf(buf, PAGE_SIZE, "local: %llu remote: %llu (us)\n",
-			 TIME_TICKS_TO_US(t_host), TIME_TICKS_TO_US(t_device));
+			 LOCAL_TICKS_TO_US(t_host),
+			 REMOTE_TICKS_TO_US(t_device));
 }
 static DEVICE_ATTR_RO(time_us);
 
@@ -834,6 +863,13 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl)
 		/* add if there is a matching DT node */
 		mhi_assign_of_node(mhi_cntrl, mhi_dev);
 
+		/*
+		 * if set, these device should get a early notification during
+		 * early notification state
+		 */
+		mhi_dev->early_notif =
+			of_property_read_bool(mhi_dev->dev.of_node,
+					      "mhi,early-notify");
 		/* init wake source */
 		if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
 			device_init_wakeup(&mhi_dev->dev, true);
@@ -1170,6 +1206,9 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
 			enum MHI_ST_TRANSITION st = MHI_ST_TRANSITION_MAX;
 			enum mhi_ee event = MHI_TRE_GET_EV_EXECENV(local_rp);
 
+			/* convert device ee to host ee */
+			event = mhi_translate_dev_ee(mhi_cntrl, event);
+
 			MHI_LOG("MHI EE received event:%s\n",
 				TO_MHI_EXEC_STR(event));
 			switch (event) {
@@ -1442,15 +1481,17 @@ irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev)
 	struct mhi_controller *mhi_cntrl = dev;
 	enum mhi_dev_state state = MHI_STATE_MAX;
 	enum MHI_PM_STATE pm_state = 0;
-	enum mhi_ee ee;
+	enum mhi_ee ee = 0;
 
 	MHI_VERB("Enter\n");
 
 	write_lock_irq(&mhi_cntrl->pm_lock);
 	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
 		state = mhi_get_mhi_state(mhi_cntrl);
-		ee = mhi_get_exec_env(mhi_cntrl);
-		MHI_LOG("device ee:%s dev_state:%s\n", TO_MHI_EXEC_STR(ee),
+		ee = mhi_cntrl->ee;
+		mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+		MHI_LOG("device ee:%s dev_state:%s\n",
+			TO_MHI_EXEC_STR(mhi_cntrl->ee),
 			TO_MHI_STATE_STR(state));
 	}
 
@@ -1460,6 +1501,17 @@ irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev)
 					       MHI_PM_SYS_ERR_DETECT);
 	}
 	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	/* if device in rddm don't bother processing sys error */
+	if (mhi_cntrl->ee == MHI_EE_RDDM) {
+		if (mhi_cntrl->ee != ee) {
+			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+					     MHI_CB_EE_RDDM);
+			wake_up_all(&mhi_cntrl->state_event);
+		}
+		goto exit_intvec;
+	}
+
 	if (pm_state == MHI_PM_SYS_ERR_DETECT) {
 		wake_up_all(&mhi_cntrl->state_event);
 
@@ -1471,6 +1523,7 @@ irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev)
 			schedule_work(&mhi_cntrl->syserr_worker);
 	}
 
+exit_intvec:
 	MHI_VERB("Exit\n");
 
 	return IRQ_HANDLED;
@@ -1525,6 +1578,11 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
 		cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
 		cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
 		break;
+	case MHI_CMD_STOP_CHAN:
+		cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
+		cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
+		cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
+		break;
 	case MHI_CMD_TIMSYNC_CFG:
 		cmd_tre->ptr = MHI_TRE_CMD_TSYNC_CFG_PTR;
 		cmd_tre->dword[0] = MHI_TRE_CMD_TSYNC_CFG_DWORD0;
@@ -1978,6 +2036,120 @@ void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
 }
 EXPORT_SYMBOL(mhi_unprepare_from_transfer);
 
+static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
+				    struct mhi_chan *mhi_chan,
+				    enum MHI_CMD cmd)
+{
+	int ret = -EIO;
+
+	mutex_lock(&mhi_chan->mutex);
+
+	MHI_VERB("Changing chan:%d to state:%s\n",
+		 mhi_chan->chan, cmd == MHI_CMD_START_CHAN ? "START" : "STOP");
+
+	/* if channel is not active state state do not allow to state change */
+	if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
+		ret = -EINVAL;
+		MHI_LOG("channel:%d is not in active state, ch_state%d\n",
+			mhi_chan->chan, mhi_chan->ch_state);
+		goto error_chan_state;
+	}
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		MHI_ERR("MHI host is not in active state\n");
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+		ret = -EIO;
+		goto error_chan_state;
+	}
+
+	mhi_cntrl->wake_toggle(mhi_cntrl);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+	mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+	mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+
+	ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
+	if (ret) {
+		MHI_ERR("Failed to send start chan cmd\n");
+		goto error_chan_state;
+	}
+
+	ret = wait_for_completion_timeout(&mhi_chan->completion,
+				msecs_to_jiffies(mhi_cntrl->timeout_ms));
+	if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
+		MHI_ERR("Failed to receive cmd completion for chan:%d\n",
+			mhi_chan->chan);
+		ret = -EIO;
+		goto error_chan_state;
+	}
+
+	ret = 0;
+
+	MHI_VERB("chan:%d successfully transition to state:%s\n",
+		 mhi_chan->chan, cmd == MHI_CMD_START_CHAN ? "START" : "STOP");
+
+error_chan_state:
+	mutex_unlock(&mhi_chan->mutex);
+
+	return ret;
+}
+
+int mhi_pause_transfer(struct mhi_device *mhi_dev)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_chan *mhi_chan;
+	int dir, ret;
+
+	for (dir = 0; dir < 2; dir++) {
+		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+		if (!mhi_chan)
+			continue;
+
+		/*
+		 * If one channel successful stopped but second channel fail
+		 * to stop, we still bail out because there is no way to
+		 * recover it. Resuming the stopped channel won't be helpful
+		 * and likely to fail.
+		 */
+		ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
+					       MHI_CMD_STOP_CHAN);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_pause_transfer);
+
+int mhi_resume_transfer(struct mhi_device *mhi_dev)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_chan *mhi_chan;
+	int dir, ret;
+
+	for (dir = 0; dir < 2; dir++) {
+		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+		if (!mhi_chan)
+			continue;
+
+		/*
+		 * Similar to pause, if one channel start, and other channel
+		 * failed to start, we would bail out. No need to pause
+		 * the start channel. Client will be resetting both
+		 * channels upon failure.
+		 */
+		ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
+					       MHI_CMD_START_CHAN);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_resume_transfer);
+
 int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev,
 				enum dma_data_direction dir)
 {
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
index e8dffb25..fdff6cb 100644
--- a/drivers/bus/mhi/core/mhi_pm.c
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -136,6 +136,9 @@ enum MHI_PM_STATE __must_check mhi_tryset_pm_state(
 	MHI_VERB("Transition to pm state from:%s to:%s\n",
 		 to_mhi_pm_state_str(cur_state), to_mhi_pm_state_str(state));
 
+	if (MHI_REG_ACCESS_VALID(cur_state) || MHI_REG_ACCESS_VALID(state))
+		mhi_timesync_log(mhi_cntrl);
+
 	mhi_cntrl->pm_state = state;
 	return mhi_cntrl->pm_state;
 }
@@ -433,27 +436,31 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
 
 	MHI_LOG("Processing Mission Mode Transition\n");
 
-	/* force MHI to be in M0 state before continuing */
-	ret = __mhi_device_get_sync(mhi_cntrl);
-	if (ret)
-		return ret;
-
-	ret = -EIO;
-
 	write_lock_irq(&mhi_cntrl->pm_lock);
 	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
 		mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
 	write_unlock_irq(&mhi_cntrl->pm_lock);
 
-	read_lock_bh(&mhi_cntrl->pm_lock);
 	if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
-		goto error_mission_mode;
+		return -EIO;
 
 	wake_up_all(&mhi_cntrl->state_event);
 
+	mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+			     MHI_CB_EE_MISSION_MODE);
+
+	/* force MHI to be in M0 state before continuing */
+	ret = __mhi_device_get_sync(mhi_cntrl);
+	if (ret)
+		return ret;
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+
 	/* add elements to all HW event rings */
-	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		ret = -EIO;
 		goto error_mission_mode;
+	}
 
 	mhi_event = mhi_cntrl->mhi_event;
 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
@@ -487,8 +494,6 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
 	/* setup sysfs nodes for userspace votes */
 	mhi_create_vote_sysfs(mhi_cntrl);
 
-	ret = 0;
-
 	read_lock_bh(&mhi_cntrl->pm_lock);
 
 error_mission_mode:
@@ -517,9 +522,20 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 		to_mhi_pm_state_str(transition_state));
 
 	/* We must notify MHI control driver so it can clean up first */
-	if (transition_state == MHI_PM_SYS_ERR_PROCESS)
+	if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
+		/*
+		 * if controller support rddm, we do not process
+		 * sys error state, instead we will jump directly
+		 * to rddm state
+		 */
+		if (mhi_cntrl->rddm_image) {
+			MHI_LOG(
+				"Controller Support RDDM, skipping SYS_ERR_PROCESS\n");
+			return;
+		}
 		mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
 				     MHI_CB_SYS_ERROR);
+	}
 
 	mutex_lock(&mhi_cntrl->pm_mutex);
 	write_lock_irq(&mhi_cntrl->pm_lock);
@@ -793,12 +809,12 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
 			MHI_ERR("Error setting dev_context\n");
 			goto error_dev_ctxt;
 		}
+	}
 
-		ret = mhi_init_irq_setup(mhi_cntrl);
-		if (ret) {
-			MHI_ERR("Error setting up irq\n");
-			goto error_setup_irq;
-		}
+	ret = mhi_init_irq_setup(mhi_cntrl);
+	if (ret) {
+		MHI_ERR("Error setting up irq\n");
+		goto error_setup_irq;
 	}
 
 	/* setup bhi offset & intvec */
@@ -812,8 +828,8 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
 
 	mhi_cntrl->bhi = mhi_cntrl->regs + val;
 
-	/* setup bhie offset */
-	if (mhi_cntrl->fbc_download) {
+	/* setup bhie offset if not set */
+	if (mhi_cntrl->fbc_download && !mhi_cntrl->bhie) {
 		ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
 		if (ret) {
 			write_unlock_irq(&mhi_cntrl->pm_lock);
@@ -855,8 +871,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
 	return 0;
 
 error_bhi_offset:
-	if (!mhi_cntrl->pre_init)
-		mhi_deinit_free_irq(mhi_cntrl);
+	mhi_deinit_free_irq(mhi_cntrl);
 
 error_setup_irq:
 	if (!mhi_cntrl->pre_init)
@@ -869,6 +884,36 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
 }
 EXPORT_SYMBOL(mhi_async_power_up);
 
+/* Transition MHI into error state and notify critical clients */
+void mhi_control_error(struct mhi_controller *mhi_cntrl)
+{
+	enum MHI_PM_STATE cur_state;
+
+	MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_LD_ERR_FATAL_DETECT);
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) {
+		MHI_ERR("Failed to transition to state:%s from:%s\n",
+			to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
+			to_mhi_pm_state_str(cur_state));
+		goto exit_control_error;
+	}
+
+	/* start notifying all clients who request early notification */
+	device_for_each_child(mhi_cntrl->dev, NULL, mhi_early_notify_device);
+
+exit_control_error:
+	MHI_LOG("Exit with pm_state:%s MHI_STATE:%s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+}
+EXPORT_SYMBOL(mhi_control_error);
+
 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
 {
 	enum MHI_PM_STATE cur_state;
@@ -890,13 +935,14 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
 
 	mhi_deinit_debugfs(mhi_cntrl);
 
+	mhi_deinit_free_irq(mhi_cntrl);
+
 	if (!mhi_cntrl->pre_init) {
 		/* free all allocated resources */
 		if (mhi_cntrl->fbc_image) {
 			mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
 			mhi_cntrl->fbc_image = NULL;
 		}
-		mhi_deinit_free_irq(mhi_cntrl);
 		mhi_deinit_dev_ctxt(mhi_cntrl);
 	}
 }
@@ -1279,6 +1325,7 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
 	read_lock_bh(&mhi_cntrl->pm_lock);
 	mhi_cntrl->wake_get(mhi_cntrl, true);
 	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+		pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
 		mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
 		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
 	}
@@ -1389,6 +1436,10 @@ int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
 		to_mhi_pm_state_str(mhi_cntrl->pm_state),
 		TO_MHI_EXEC_STR(mhi_cntrl->ee));
 
+	/* device already in rddm */
+	if (mhi_cntrl->ee == MHI_EE_RDDM)
+		return 0;
+
 	MHI_LOG("Triggering SYS_ERR to force rddm state\n");
 	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
 
diff --git a/drivers/bus/mhi/devices/mhi_satellite.c b/drivers/bus/mhi/devices/mhi_satellite.c
index f3556ff..33338f4 100644
--- a/drivers/bus/mhi/devices/mhi_satellite.c
+++ b/drivers/bus/mhi/devices/mhi_satellite.c
@@ -353,7 +353,7 @@ static bool mhi_sat_isvalid_header(struct sat_header *hdr, int len)
 		return false;
 
 	/* validate SAT IPC version */
-	if (hdr->major_ver != SAT_MAJOR_VERSION &&
+	if (hdr->major_ver != SAT_MAJOR_VERSION ||
 	    hdr->minor_ver != SAT_MINOR_VERSION)
 		return false;
 
diff --git a/drivers/bus/mhi/devices/mhi_uci.c b/drivers/bus/mhi/devices/mhi_uci.c
index fa8793d..978c627 100644
--- a/drivers/bus/mhi/devices/mhi_uci.c
+++ b/drivers/bus/mhi/devices/mhi_uci.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
 
 #include <linux/cdev.h>
 #include <linux/device.h>
@@ -641,7 +641,7 @@ static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
 	spin_unlock_irqrestore(&uci_chan->lock, flags);
 
 	if (mhi_dev->dev.power.wakeup)
-		__pm_wakeup_event(mhi_dev->dev.power.wakeup, 0);
+		pm_wakeup_hard_event(&mhi_dev->dev);
 
 	wake_up(&uci_chan->wq);
 }
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 5c071c2..2819868 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -559,6 +559,14 @@
           applications/compute DSP processor.
 		  Say M if you want to enable this module.
 
+config MSM_RDBG
+	tristate "QTI Remote debug driver"
+	help
+	Implements a shared memory based transport mechanism that allows
+	for a debugger running on a host PC to communicate with a remote
+	stub running on peripheral subsystems such as the ADSP, MODEM etc.
+		Say M if you want to enable this module.
+
 config ADI
 	tristate "SPARC Privileged ADI driver"
 	depends on SPARC64
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 07ac833..e4e1f36 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -62,6 +62,7 @@
   obj-$(CONFIG_MSM_ADSPRPC)	+= adsprpc_compat.o
 endif
 obj-$(CONFIG_MSM_FASTCVPD)	+= fastcvpd.o
+obj-$(CONFIG_MSM_RDBG)		+= rdbg.o
 obj-$(CONFIG_ADI)		+= adi.o
 obj-$(CONFIG_DIAG_CHAR)		+= diag/
 obj-$(CONFIG_OKL4_PIPE)		+= okl4_pipe.o
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 86cf9af..f93d47c 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -60,8 +60,12 @@
 #define AUDIO_PDR_ADSP_SERVICE_NAME              "avs/audio"
 #define ADSP_AUDIOPD_NAME                        "msm/adsp/audio_pd"
 
-#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_sdsprpc"
-#define SENSORS_PDR_SLPI_SERVICE_NAME            "tms/servreg"
+#define SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME   "sensors_pdr_adsprpc"
+#define SENSORS_PDR_ADSP_SERVICE_NAME              "tms/servreg"
+#define ADSP_SENSORPD_NAME                       "msm/adsp/sensor_pd"
+
+#define SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_sdsprpc"
+#define SENSORS_PDR_SLPI_SERVICE_NAME            SENSORS_PDR_ADSP_SERVICE_NAME
 #define SLPI_SENSORPD_NAME                       "msm/slpi/sensor_pd"
 
 #define RPC_TIMEOUT	(5 * HZ)
@@ -379,6 +383,7 @@ struct fastrpc_file {
 	int pd;
 	char *servloc_name;
 	int file_close;
+	int dsp_process_init;
 	struct fastrpc_apps *apps;
 	struct hlist_head perf;
 	struct dentry *debugfs_file;
@@ -407,6 +412,14 @@ static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
 						fastrpc_pdr_notifier_cb,
 				.cid = ADSP_DOMAIN_ID,
 			},
+			{
+				.servloc_name =
+				SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME,
+				.spdname = ADSP_SENSORPD_NAME,
+				.pdrnb.notifier_call =
+						fastrpc_pdr_notifier_cb,
+				.cid = ADSP_DOMAIN_ID,
+			}
 		},
 	},
 	{
@@ -424,7 +437,7 @@ static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
 		.spd = {
 			{
 				.servloc_name =
-				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
+				SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME,
 				.spdname = SLPI_SENSORPD_NAME,
 				.pdrnb.notifier_call =
 						fastrpc_pdr_notifier_cb,
@@ -581,7 +594,9 @@ static void fastrpc_mmap_add(struct fastrpc_mmap *map)
 				map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
 		struct fastrpc_apps *me = &gfa;
 
+		spin_lock(&me->hlock);
 		hlist_add_head(&map->hn, &me->maps);
+		spin_unlock(&me->hlock);
 	} else {
 		struct fastrpc_file *fl = map->fl;
 
@@ -601,23 +616,33 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
 		return -EOVERFLOW;
 	if (mflags == ADSP_MMAP_HEAP_ADDR ||
 				 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+		spin_lock(&me->hlock);
 		hlist_for_each_entry_safe(map, n, &me->maps, hn) {
 			if (va >= map->va &&
 				va + len <= map->va + map->len &&
 				map->fd == fd) {
-				if (refs)
+				if (refs) {
+					if (map->refs + 1 == INT_MAX) {
+						spin_unlock(&me->hlock);
+						return -ETOOMANYREFS;
+					}
 					map->refs++;
+				}
 				match = map;
 				break;
 			}
 		}
+		spin_unlock(&me->hlock);
 	} else {
 		hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
 			if (va >= map->va &&
 				va + len <= map->va + map->len &&
 				map->fd == fd) {
-				if (refs)
+				if (refs) {
+					if (map->refs + 1 == INT_MAX)
+						return -ETOOMANYREFS;
 					map->refs++;
+				}
 				match = map;
 				break;
 			}
@@ -656,6 +681,7 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
 	struct hlist_node *n;
 	struct fastrpc_apps *me = &gfa;
 
+	spin_lock(&me->hlock);
 	hlist_for_each_entry_safe(map, n, &me->maps, hn) {
 		if (map->raddr == va &&
 			map->raddr + map->len == va + len &&
@@ -665,6 +691,7 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
 			break;
 		}
 	}
+	spin_unlock(&me->hlock);
 	if (match) {
 		*ppmap = match;
 		return 0;
@@ -874,6 +901,12 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
 				DMA_ATTR_SKIP_CPU_SYNC;
 		else if (map->attr & FASTRPC_ATTR_COHERENT)
 			map->attach->dma_map_attrs |= DMA_ATTR_FORCE_COHERENT;
+		/*
+		 * Skip CPU sync if IO Cohernecy is not supported
+		 * as we flush later
+		 */
+		else if (!sess->smmu.coherent)
+			map->attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
 
 		VERIFY(err, !IS_ERR_OR_NULL(map->table =
 			dma_buf_map_attachment(map->attach,
@@ -1609,11 +1642,11 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 
 		if (rpra && lrpra && rpra[i].buf.len &&
 			ctx->overps[oix]->mstart) {
-			if (map && map->handle) {
+			if (map && map->buf) {
 				dma_buf_begin_cpu_access(map->buf,
-					DMA_BIDIRECTIONAL);
+					DMA_TO_DEVICE);
 				dma_buf_end_cpu_access(map->buf,
-					DMA_BIDIRECTIONAL);
+					DMA_TO_DEVICE);
 			} else
 				dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
 					uint64_to_ptr(rpra[i].buf.pv
@@ -1715,11 +1748,11 @@ static void inv_args_pre(struct smq_invoke_ctx *ctx)
 			continue;
 		if (!IS_CACHE_ALIGNED((uintptr_t)
 				uint64_to_ptr(rpra[i].buf.pv))) {
-			if (map && map->handle) {
+			if (map && map->buf) {
 				dma_buf_begin_cpu_access(map->buf,
-					DMA_BIDIRECTIONAL);
+					DMA_TO_DEVICE);
 				dma_buf_end_cpu_access(map->buf,
-					DMA_BIDIRECTIONAL);
+					DMA_TO_DEVICE);
 			} else
 				dmac_flush_range(
 					uint64_to_ptr(rpra[i].buf.pv), (char *)
@@ -1729,11 +1762,11 @@ static void inv_args_pre(struct smq_invoke_ctx *ctx)
 		end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
 							rpra[i].buf.len);
 		if (!IS_CACHE_ALIGNED(end)) {
-			if (map && map->handle) {
+			if (map && map->buf) {
 				dma_buf_begin_cpu_access(map->buf,
-					DMA_BIDIRECTIONAL);
+					DMA_TO_DEVICE);
 				dma_buf_end_cpu_access(map->buf,
-					DMA_BIDIRECTIONAL);
+					DMA_TO_DEVICE);
 			} else
 				dmac_flush_range((char *)end,
 					(char *)end + 1);
@@ -1768,9 +1801,9 @@ static void inv_args(struct smq_invoke_ctx *ctx)
 		}
 		if (map && map->buf) {
 			dma_buf_begin_cpu_access(map->buf,
-				DMA_BIDIRECTIONAL);
+				DMA_FROM_DEVICE);
 			dma_buf_end_cpu_access(map->buf,
-				DMA_BIDIRECTIONAL);
+				DMA_FROM_DEVICE);
 		} else
 			dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
 				(char *)uint64_to_ptr(rpra[i].buf.pv
@@ -2025,8 +2058,12 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
 		if (init->flags == FASTRPC_INIT_ATTACH)
 			fl->pd = 0;
 		else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
-			fl->servloc_name =
-				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
+			if (fl->cid == ADSP_DOMAIN_ID)
+				fl->servloc_name =
+				SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME;
+			else if (fl->cid == SDSP_DOMAIN_ID)
+				fl->servloc_name =
+				SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME;
 			fl->pd = 2;
 		}
 		VERIFY(err, !(err = fastrpc_internal_invoke(fl,
@@ -2083,8 +2120,10 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
 		err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
 		if (err)
 			goto bail;
-		fl->init_mem = imem;
+		if (fl->init_mem)
+			fastrpc_buf_free(fl->init_mem, 0);
 
+		fl->init_mem = imem;
 		inbuf.pageslen = 1;
 		ra[0].buf.pv = (void *)&inbuf;
 		ra[0].buf.len = sizeof(inbuf);
@@ -2157,7 +2196,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
 		if (!strcmp(proc_name, "audiopd")) {
 			fl->servloc_name =
 				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
-			err = fastrpc_mmap_remove_pdr(fl);
+			VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
 			if (err)
 				goto bail;
 		}
@@ -2216,22 +2255,38 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
 			goto bail;
 	} else {
 		err = -ENOTTY;
+		goto bail;
 	}
+	fl->dsp_process_init = 1;
 bail:
 	kfree(proc_name);
 	if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
 		me->staticpd_flags = 0;
 	if (mem && err) {
 		if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR
-			&& me->channel[fl->cid].rhvm.vmid && rh_hyp_done)
-			hyp_assign_phys(mem->phys, (uint64_t)mem->size,
+			&& me->channel[fl->cid].rhvm.vmid && rh_hyp_done) {
+			int hyp_err = 0;
+
+			hyp_err = hyp_assign_phys(mem->phys,
+					(uint64_t)mem->size,
 					me->channel[fl->cid].rhvm.vmid,
 					me->channel[fl->cid].rhvm.vmcount,
 					hlosvm, hlosvmperm, 1);
+			if (hyp_err)
+				pr_warn("adsprpc: %s: %s: rh hyp unassign failed with %d for phys 0x%llx of size %zd\n",
+						__func__, current->comm,
+						hyp_err, mem->phys, mem->size);
+		}
 		mutex_lock(&fl->map_mutex);
 		fastrpc_mmap_free(mem, 0);
 		mutex_unlock(&fl->map_mutex);
 	}
+	if (err) {
+		if (!IS_ERR_OR_NULL(fl->init_mem)) {
+			fastrpc_buf_free(fl->init_mem, 0);
+			fl->init_mem = NULL;
+		}
+	}
 	if (file) {
 		mutex_lock(&fl->map_mutex);
 		fastrpc_mmap_free(file, 0);
@@ -2380,9 +2435,15 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
 	VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
 	if (err)
 		goto bail;
+	VERIFY(err, fl->sctx != NULL);
+	if (err)
+		goto bail;
 	VERIFY(err, fl->apps->channel[fl->cid].rpdev != NULL);
 	if (err)
 		goto bail;
+	VERIFY(err, fl->apps->channel[fl->cid].issubsystemup == 1);
+	if (err)
+		goto bail;
 	tgid = fl->tgid;
 	ra[0].buf.pv = (void *)&tgid;
 	ra[0].buf.len = sizeof(tgid);
@@ -2394,6 +2455,9 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
 	ioctl.crc = NULL;
 	VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
 		FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+	if (err && fl->dsp_process_init)
+		pr_err("adsprpc: %s: releasing DSP process failed with %d (0x%x) for %s\n",
+				__func__, err, err, current->comm);
 bail:
 	return err;
 }
@@ -2452,8 +2516,12 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
 				hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
 				me->channel[fl->cid].rhvm.vmperm,
 				me->channel[fl->cid].rhvm.vmcount);
-		if (err)
+		if (err) {
+			pr_err("adsprpc: %s: %s: rh hyp assign failed with %d for phys 0x%llx, size %zd\n",
+					__func__, current->comm,
+					err, phys, size);
 			goto bail;
+		}
 	}
 bail:
 	return err;
@@ -2503,8 +2571,12 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
 					me->channel[fl->cid].rhvm.vmid,
 					me->channel[fl->cid].rhvm.vmcount,
 					destVM, destVMperm, 1);
-			if (err)
+			if (err) {
+				pr_err("adsprpc: %s: %s: rh hyp unassign failed with %d for phys 0x%llx, size %zd\n",
+					__func__, current->comm,
+					err, phys, size);
 				goto bail;
+			}
 		}
 	}
 
@@ -2620,8 +2692,8 @@ static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
 		me->channel[fl->cid].spd[session].prevpdrcount) {
 		err = fastrpc_mmap_remove_ssr(fl);
 		if (err)
-			pr_err("adsprpc: %s: SSR: failed to unmap remote heap (err %d)\n",
-					__func__, err);
+			pr_warn("adsprpc: %s: %s: failed to unmap remote heap (err %d)\n",
+					__func__, current->comm, err);
 		me->channel[fl->cid].spd[session].prevpdrcount =
 				me->channel[fl->cid].spd[session].pdrcount;
 	}
@@ -3284,7 +3356,6 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
 	struct fastrpc_apps *me = &gfa;
 	int cid, err = 0;
 
-
 	VERIFY(err, fl && fl->sctx);
 	if (err)
 		goto bail;
@@ -3316,9 +3387,11 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
 	if (cid == ADSP_DOMAIN_ID && me->channel[cid].ssrcount !=
 			 me->channel[cid].prevssrcount) {
 		mutex_lock(&fl->map_mutex);
-		if (fastrpc_mmap_remove_ssr(fl))
-			pr_err("adsprpc: %s: SSR: Failed to unmap remote heap for %s\n",
-				__func__, me->channel[cid].name);
+		err = fastrpc_mmap_remove_ssr(fl);
+		if (err)
+			pr_warn("adsprpc: %s: %s: failed to unmap remote heap for %s (err %d)\n",
+					__func__, current->comm,
+					me->channel[cid].subsys, err);
 		mutex_unlock(&fl->map_mutex);
 		me->channel[cid].prevssrcount =
 					me->channel[cid].ssrcount;
@@ -3385,6 +3458,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
 		fl->debugfs_file = debugfs_file;
 	memset(&fl->perf, 0, sizeof(fl->perf));
 	fl->qos_request = 0;
+	fl->dsp_process_init = 0;
 	filp->private_data = fl;
 	mutex_init(&fl->internal_map_mutex);
 	mutex_init(&fl->map_mutex);
@@ -3477,6 +3551,25 @@ static int fastrpc_internal_control(struct fastrpc_file *fl,
 	return err;
 }
 
+static int fastrpc_check_pd_status(struct fastrpc_file *fl, char *sloc_name)
+{
+	int err = 0, session = -1, cid = -1;
+	struct fastrpc_apps *me = &gfa;
+
+	if (fl->servloc_name && sloc_name
+		&& !strcmp(fl->servloc_name, sloc_name)) {
+		err = fastrpc_get_spd_session(sloc_name, &session, &cid);
+		if (err || cid != fl->cid)
+			goto bail;
+		if (!me->channel[cid].spd[session].ispdup) {
+			err = -ENOTCONN;
+			goto bail;
+		}
+	}
+bail:
+	return err;
+}
+
 static int fastrpc_setmode(unsigned long ioctl_param,
 				struct fastrpc_file *fl)
 {
@@ -3612,6 +3705,12 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
 	p.inv.fds = NULL;
 	p.inv.attrs = NULL;
 	p.inv.crc = NULL;
+
+	err = fastrpc_check_pd_status(fl,
+			AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME);
+	if (err)
+		goto bail;
+
 	spin_lock(&fl->hlock);
 	if (fl->file_close == 1) {
 		err = EBADF;
@@ -3855,7 +3954,12 @@ static int fastrpc_get_service_location_notify(struct notifier_block *nb,
 				ADSP_AUDIOPD_NAME))) {
 			goto pdr_register;
 		} else if ((!strcmp(spd->servloc_name,
-				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME))
+				SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME))
+				&& (!strcmp(pdr->domain_list[i].name,
+				ADSP_SENSORPD_NAME))) {
+			goto pdr_register;
+		} else if ((!strcmp(spd->servloc_name,
+				SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME))
 				&& (!strcmp(pdr->domain_list[i].name,
 				SLPI_SENSORPD_NAME))) {
 			goto pdr_register;
@@ -4157,23 +4261,44 @@ static int fastrpc_probe(struct platform_device *pdev)
 	if (of_property_read_bool(dev->of_node,
 					"qcom,fastrpc-adsp-sensors-pdr")) {
 		err = fastrpc_get_spd_session(
-		SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session, &cid);
+		SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME, &session, &cid);
 		if (err)
 			goto spdbail;
 		me->channel[cid].spd[session].get_service_nb.notifier_call =
 					fastrpc_get_service_location_notify;
 		ret = get_service_location(
-				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
+				SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME,
+				SENSORS_PDR_ADSP_SERVICE_NAME,
+				&me->channel[cid].spd[session].get_service_nb);
+		if (ret)
+			pr_warn("adsprpc: %s: get service location failed with %d for %s (%s)\n",
+				__func__, ret, SENSORS_PDR_SLPI_SERVICE_NAME,
+				SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME);
+		else
+			pr_debug("adsprpc: %s: service location enabled for %s (%s)\n",
+				__func__, SENSORS_PDR_SLPI_SERVICE_NAME,
+				SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME);
+	}
+	if (of_property_read_bool(dev->of_node,
+					"qcom,fastrpc-slpi-sensors-pdr")) {
+		err = fastrpc_get_spd_session(
+		SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME, &session, &cid);
+		if (err)
+			goto spdbail;
+		me->channel[cid].spd[session].get_service_nb.notifier_call =
+					fastrpc_get_service_location_notify;
+		ret = get_service_location(
+				SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME,
 				SENSORS_PDR_SLPI_SERVICE_NAME,
 				&me->channel[cid].spd[session].get_service_nb);
 		if (ret)
 			pr_warn("adsprpc: %s: get service location failed with %d for %s (%s)\n",
 				__func__, ret, SENSORS_PDR_SLPI_SERVICE_NAME,
-				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME);
+				SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME);
 		else
 			pr_debug("adsprpc: %s: service location enabled for %s (%s)\n",
 				__func__, SENSORS_PDR_SLPI_SERVICE_NAME,
-				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME);
+				SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME);
 	}
 spdbail:
 	err = of_platform_populate(pdev->dev.of_node,
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index ac6a3ff..029ffd1 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -1957,7 +1957,7 @@ static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
 	if (!buf)
 		return -EIO;
 
-	if (len <= (sizeof(struct dci_pkt_req_t) +
+	if (len < (sizeof(struct dci_pkt_req_t) +
 		sizeof(struct diag_pkt_header_t)) ||
 		len > DCI_REQ_BUF_SIZE) {
 		pr_err("diag: dci: Invalid length %d len in %s\n",
@@ -1971,7 +1971,6 @@ static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
 	req_len -= sizeof(struct dci_pkt_req_t);
 	req_buf = temp; /* Start of the Request */
 	header = (struct diag_pkt_header_t *)temp;
-	temp += sizeof(struct diag_pkt_header_t);
 	read_len += sizeof(struct diag_pkt_header_t);
 	if (read_len >= DCI_REQ_BUF_SIZE) {
 		pr_err("diag: dci: In %s, invalid read_len: %d\n", __func__,
@@ -2083,9 +2082,9 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
 	uint8_t *event_mask_ptr;
 	struct diag_dci_client_tbl *dci_entry = NULL;
 
-	if (!temp) {
-		pr_err("diag: Invalid buffer in %s\n", __func__);
-		return -ENOMEM;
+	if (!temp || len < sizeof(int)) {
+		pr_err("diag: Invalid input in %s\n", __func__);
+		return -EINVAL;
 	}
 
 	/* This is Pkt request/response transaction */
@@ -2141,7 +2140,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
 		count = 0; /* iterator for extracting log codes */
 
 		while (count < num_codes) {
-			if (read_len >= USER_SPACE_DATA) {
+			if (read_len + sizeof(uint16_t) > len) {
 				pr_err("diag: dci: Invalid length for log type in %s\n",
 								__func__);
 				mutex_unlock(&driver->dci_mutex);
@@ -2255,7 +2254,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
 		pr_debug("diag: head of dci event mask %pK\n", event_mask_ptr);
 		count = 0; /* iterator for extracting log codes */
 		while (count < num_codes) {
-			if (read_len >= USER_SPACE_DATA) {
+			if (read_len + sizeof(int) > len) {
 				pr_err("diag: dci: Invalid length for event type in %s\n",
 								__func__);
 				mutex_unlock(&driver->dci_mutex);
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index 5817066..4a7534a 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -18,7 +18,7 @@
 #define DISABLE_LOG_MASK	0
 #define MAX_EVENT_SIZE		512
 #define DCI_CLIENT_INDEX_INVALID -1
-#define DCI_LOG_CON_MIN_LEN		14
+#define DCI_LOG_CON_MIN_LEN		16
 #define DCI_EVENT_CON_MIN_LEN		16
 
 #define EXT_HDR_LEN		8
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index 92ebeb3..df6b72b 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -81,12 +81,12 @@ static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
 		driver->supports_apps_hdlc_encoding,
 		driver->supports_apps_header_untagging,
 		driver->supports_sockets,
-		driver->logging_mode,
+		driver->logging_mode[0],
 		driver->rsp_buf_busy,
 		driver->hdlc_disabled,
 		driver->time_sync_enabled,
-		driver->md_session_mode,
-		driver->md_session_mask,
+		driver->md_session_mode[0],
+		driver->md_session_mask[0],
 		driver->uses_time_api,
 		driver->supports_pd_buffering,
 		driver->supports_diagid_v2_feature_mask);
@@ -241,7 +241,7 @@ static ssize_t diag_dbgfs_read_power(struct file *file, char __user *ubuf,
 		driver->num_dci_client,
 		driver->md_ws.ref_count,
 		driver->md_ws.copy_count,
-		driver->logging_mode,
+		driver->logging_mode[0],
 		driver->diag_dev->power.wakeup->active_count,
 		driver->diag_dev->power.wakeup->relax_count);
 
@@ -488,7 +488,7 @@ static ssize_t diag_dbgfs_read_socketinfo(struct file *file, char __user *ubuf,
 	struct diag_socket_info *info = NULL;
 	struct diagfwd_info *fwd_ctxt = NULL;
 
-	if (diag_dbgfs_socketinfo_index >= NUM_PERIPHERALS) {
+	if (diag_dbgfs_socketinfo_index >= NUM_TYPES) {
 		/* Done. Reset to prepare for future requests */
 		diag_dbgfs_socketinfo_index = 0;
 		return 0;
@@ -594,7 +594,7 @@ static ssize_t diag_dbgfs_read_rpmsginfo(struct file *file, char __user *ubuf,
 	struct diag_rpmsg_info *info = NULL;
 	struct diagfwd_info *fwd_ctxt = NULL;
 
-	if (diag_dbgfs_rpmsginfo_index >= NUM_PERIPHERALS) {
+	if (diag_dbgfs_rpmsginfo_index >= NUM_TYPES) {
 		/* Done. Reset to prepare for future requests */
 		diag_dbgfs_rpmsginfo_index = 0;
 		return 0;
@@ -632,7 +632,7 @@ static ssize_t diag_dbgfs_read_rpmsginfo(struct file *file, char __user *ubuf,
 
 			bytes_written = scnprintf(buf+bytes_in_buffer,
 				bytes_remaining,
-				"name\t\t:\t%s\n"
+				"name\t\t:\t%s:\t%s\n"
 				"hdl\t\t:\t%pK\n"
 				"inited\t\t:\t%d\n"
 				"opened\t\t:\t%d\n"
@@ -647,6 +647,7 @@ static ssize_t diag_dbgfs_read_rpmsginfo(struct file *file, char __user *ubuf,
 				"fwd inited\t:\t%d\n"
 				"fwd opened\t:\t%d\n"
 				"fwd ch_open\t:\t%d\n\n",
+				info->edge,
 				info->name,
 				info->hdl,
 				info->inited,
@@ -728,7 +729,7 @@ static ssize_t diag_dbgfs_read_hsicinfo(struct file *file, char __user *ubuf,
 	unsigned int bytes_in_buffer = 0;
 	struct diag_hsic_info *hsic_info = NULL;
 
-	if (diag_dbgfs_hsicinfo_index >= NUM_DIAG_USB_DEV) {
+	if (diag_dbgfs_hsicinfo_index >= NUM_HSIC_DEV) {
 		/* Done. Reset to prepare for future requests */
 		diag_dbgfs_hsicinfo_index = 0;
 		return 0;
@@ -879,7 +880,7 @@ static ssize_t diag_dbgfs_read_bridge(struct file *file, char __user *ubuf,
 	unsigned int bytes_in_buffer = 0;
 	struct diagfwd_bridge_info *info = NULL;
 
-	if (diag_dbgfs_bridgeinfo_index >= NUM_DIAG_USB_DEV) {
+	if (diag_dbgfs_bridgeinfo_index >= NUM_REMOTE_DEV) {
 		/* Done. Reset to prepare for future requests */
 		diag_dbgfs_bridgeinfo_index = 0;
 		return 0;
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 8d9c922..66d7bd7 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -102,7 +102,8 @@ static int diag_check_update(int md_peripheral, int pid)
 	mutex_lock(&driver->md_session_lock);
 	info = diag_md_session_get_pid(pid);
 	ret = (!info || (info &&
-		(info->peripheral_mask & MD_PERIPHERAL_MASK(md_peripheral))));
+		(info->peripheral_mask[DIAG_LOCAL_PROC] &
+		MD_PERIPHERAL_MASK(md_peripheral))));
 	mutex_unlock(&driver->md_session_lock);
 
 	return ret;
@@ -134,14 +135,15 @@ static void diag_send_log_mask_update(uint8_t peripheral,
 	int err = 0, send_once = 0, i;
 	int header_len = sizeof(struct diag_ctrl_log_mask);
 	uint8_t *buf = NULL, *temp = NULL;
-	uint8_t upd = 0, status;
-	uint32_t mask_size = 0, pd_mask = 0;
+	uint8_t upd = 0, status, eq_id;
+	uint32_t mask_size = 0, pd_mask = 0, num_items = 0;
 	struct diag_ctrl_log_mask ctrl_pkt;
 	struct diag_ctrl_log_mask_sub ctrl_pkt_sub;
 	struct diag_mask_info *mask_info = NULL;
 	struct diag_log_mask_t *mask = NULL;
 	struct diagfwd_info *fwd_info = NULL;
 	struct diag_multisim_masks *ms_ptr = NULL;
+	int proc = DIAG_LOCAL_PROC;
 
 	if (peripheral >= NUM_PERIPHERALS)
 		return;
@@ -155,16 +157,18 @@ static void diag_send_log_mask_update(uint8_t peripheral,
 
 	MD_PERIPHERAL_PD_MASK(TYPE_CNTL, peripheral, pd_mask);
 
-	if (driver->md_session_mask != 0) {
-		if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
-			if (driver->md_session_map[peripheral])
+	if (driver->md_session_mask[proc] != 0) {
+		if (driver->md_session_mask[proc] &
+			MD_PERIPHERAL_MASK(peripheral)) {
+			if (driver->md_session_map[proc][peripheral])
 				mask_info =
-				driver->md_session_map[peripheral]->log_mask;
-		} else if (driver->md_session_mask & pd_mask) {
-			upd = diag_mask_to_pd_value(driver->md_session_mask);
-			if (upd && driver->md_session_map[upd])
+			driver->md_session_map[proc][peripheral]->log_mask;
+		} else if (driver->md_session_mask[proc] & pd_mask) {
+			upd =
+			diag_mask_to_pd_value(driver->md_session_mask[proc]);
+			if (upd && driver->md_session_map[proc][upd])
 				mask_info =
-				driver->md_session_map[upd]->log_mask;
+				driver->md_session_map[proc][upd]->log_mask;
 		} else {
 			DIAG_LOG(DIAG_DEBUG_MASKS,
 			"asking for mask update with unknown session mask\n");
@@ -193,22 +197,6 @@ static void diag_send_log_mask_update(uint8_t peripheral,
 		goto err;
 	buf = mask_info->update_buf;
 
-	switch (status) {
-	case DIAG_CTRL_MASK_ALL_DISABLED:
-	case DIAG_CTRL_MASK_ALL_ENABLED:
-		ctrl_pkt.equip_id = 0;
-		ctrl_pkt.num_items = 0;
-		ctrl_pkt.log_mask_size = 0;
-		send_once = 1;
-		break;
-	case DIAG_CTRL_MASK_VALID:
-		send_once = 0;
-		break;
-	default:
-		pr_debug("diag: In %s, invalid log_mask status\n", __func__);
-		return;
-	}
-
 	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
 		if (!mask->ptr)
 			continue;
@@ -217,22 +205,34 @@ static void diag_send_log_mask_update(uint8_t peripheral,
 			continue;
 
 		mutex_lock(&mask->lock);
+		switch (status) {
+		case DIAG_CTRL_MASK_ALL_DISABLED:
+		case DIAG_CTRL_MASK_ALL_ENABLED:
+			eq_id = 0;
+			num_items = 0;
+			mask_size = 0;
+			send_once = 1;
+			break;
+		case DIAG_CTRL_MASK_VALID:
+			mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
+			eq_id = i;
+			num_items = mask->num_items_tools;
+			break;
+		default:
+			pr_debug("diag: In %s, invalid log_mask status\n",
+				__func__);
+			mutex_unlock(&mask->lock);
+			return;
+		}
 		if (sub_index >= 0 && preset_id > 0)
 			goto proceed_sub_pkt;
 
 		ctrl_pkt.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
 		ctrl_pkt.stream_id = 1;
 		ctrl_pkt.status = mask_info->status;
-		if (mask_info->status == DIAG_CTRL_MASK_VALID) {
-			mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
-			ctrl_pkt.equip_id = i;
-			ctrl_pkt.num_items = mask->num_items_tools;
-			ctrl_pkt.log_mask_size = mask_size;
-		} else {
-			ctrl_pkt.equip_id = 0;
-			ctrl_pkt.num_items = 0;
-			ctrl_pkt.log_mask_size = 0;
-		}
+		ctrl_pkt.equip_id = eq_id;
+		ctrl_pkt.num_items = num_items;
+		ctrl_pkt.log_mask_size = mask_size;
 		ctrl_pkt.data_len = LOG_MASK_CTRL_HEADER_LEN + mask_size;
 		header_len = sizeof(struct diag_ctrl_msg_mask);
 		goto send_cntrl_pkt;
@@ -249,16 +249,9 @@ static void diag_send_log_mask_update(uint8_t peripheral,
 		}
 		ctrl_pkt_sub.stream_id = 1;
 		ctrl_pkt_sub.status = status;
-		if (status == DIAG_CTRL_MASK_VALID) {
-			mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
-			ctrl_pkt_sub.equip_id = i;
-			ctrl_pkt_sub.num_items = mask->num_items_tools;
-			ctrl_pkt_sub.log_mask_size = mask_size;
-		} else {
-			ctrl_pkt_sub.equip_id = 0;
-			ctrl_pkt_sub.num_items = 0;
-			ctrl_pkt_sub.log_mask_size = 0;
-		}
+		ctrl_pkt_sub.equip_id = eq_id;
+		ctrl_pkt_sub.num_items = num_items;
+		ctrl_pkt_sub.log_mask_size = mask_size;
 		ctrl_pkt_sub.data_len = LOG_MASK_CTRL_HEADER_LEN_SUB +
 			mask_size;
 		header_len = sizeof(struct diag_ctrl_msg_mask_sub);
@@ -287,9 +280,9 @@ static void diag_send_log_mask_update(uint8_t peripheral,
 		mutex_unlock(&mask->lock);
 
 		DIAG_LOG(DIAG_DEBUG_MASKS,
-			 "sending ctrl pkt to %d, e %d num_items %d size %d\n",
-			 peripheral, i, ctrl_pkt.num_items,
-			 ctrl_pkt.log_mask_size);
+			 "sending ctrl pkt to %d, equip_id %d num_items %d size %d\n",
+			 peripheral, eq_id, num_items,
+			 mask_size);
 
 		err = diagfwd_write(peripheral, TYPE_CNTL,
 				    buf, header_len + mask_size);
@@ -319,6 +312,7 @@ static void diag_send_event_mask_update(uint8_t peripheral, int sub_index,
 	struct diag_mask_info *mask_info = NULL;
 	struct diagfwd_info *fwd_info = NULL;
 	struct diag_multisim_masks *ms_ptr = NULL;
+	int proc = DIAG_LOCAL_PROC;
 
 	if (num_bytes <= 0 || num_bytes > driver->event_mask_size) {
 		pr_debug("diag: In %s, invalid event mask length %d\n",
@@ -338,16 +332,18 @@ static void diag_send_event_mask_update(uint8_t peripheral, int sub_index,
 
 	MD_PERIPHERAL_PD_MASK(TYPE_CNTL, peripheral, pd_mask);
 
-	if (driver->md_session_mask != 0) {
-		if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
-			if (driver->md_session_map[peripheral])
+	if (driver->md_session_mask[proc] != 0) {
+		if (driver->md_session_mask[proc] &
+			MD_PERIPHERAL_MASK(peripheral)) {
+			if (driver->md_session_map[proc][peripheral])
 				mask_info =
-				driver->md_session_map[peripheral]->event_mask;
-		} else if (driver->md_session_mask & pd_mask) {
-			upd = diag_mask_to_pd_value(driver->md_session_mask);
-			if (upd && driver->md_session_map[upd])
+			driver->md_session_map[proc][peripheral]->event_mask;
+		} else if (driver->md_session_mask[proc] & pd_mask) {
+			upd =
+			diag_mask_to_pd_value(driver->md_session_mask[proc]);
+			if (upd && driver->md_session_map[proc][upd])
 				mask_info =
-				driver->md_session_map[upd]->event_mask;
+				driver->md_session_map[proc][upd]->event_mask;
 		} else {
 			DIAG_LOG(DIAG_DEBUG_MASKS,
 			"asking for mask update with unknown session mask\n");
@@ -476,6 +472,7 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last,
 	struct diagfwd_info *fwd_info = NULL;
 	struct diag_md_session_t *md_session_info = NULL;
 	struct diag_multisim_masks *ms_ptr = NULL;
+	int proc = DIAG_LOCAL_PROC;
 
 	if (peripheral >= NUM_PERIPHERALS)
 		return;
@@ -489,20 +486,23 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last,
 
 	MD_PERIPHERAL_PD_MASK(TYPE_CNTL, peripheral, pd_mask);
 
-	if (driver->md_session_mask != 0) {
-		if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
-			if (driver->md_session_map[peripheral]) {
+	if (driver->md_session_mask[proc] != 0) {
+		if (driver->md_session_mask[proc] &
+			MD_PERIPHERAL_MASK(peripheral)) {
+			if (driver->md_session_map[proc][peripheral]) {
 				mask_info =
-				driver->md_session_map[peripheral]->msg_mask;
+			driver->md_session_map[proc][peripheral]->msg_mask;
 				md_session_info =
-					driver->md_session_map[peripheral];
+				driver->md_session_map[proc][peripheral];
 			}
-		} else if (driver->md_session_mask & pd_mask) {
-			upd = diag_mask_to_pd_value(driver->md_session_mask);
-			if (upd && driver->md_session_map[upd]) {
+		} else if (driver->md_session_mask[proc] & pd_mask) {
+			upd =
+			diag_mask_to_pd_value(driver->md_session_mask[proc]);
+			if (upd && driver->md_session_map[proc][upd]) {
 				mask_info =
-				driver->md_session_map[upd]->msg_mask;
-				md_session_info = driver->md_session_map[upd];
+				driver->md_session_map[proc][upd]->msg_mask;
+				md_session_info =
+				driver->md_session_map[proc][upd];
 			}
 		} else {
 			DIAG_LOG(DIAG_DEBUG_MASKS,
@@ -605,7 +605,7 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last,
 		header.msg_mask_size = mask_size;
 		mask_size *= sizeof(uint32_t);
 		header.data_len = MSG_MASK_CTRL_HEADER_LEN + mask_size;
-		memcpy(buf, &header, header_len);
+		memcpy(buf, &header, sizeof(header));
 		if (mask_size > 0)
 			memcpy(buf + header_len, mask->ptr, mask_size);
 		mutex_unlock(&mask->lock);
@@ -640,7 +640,7 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last,
 		header_sub.msg_mask_size = mask_size;
 		mask_size *= sizeof(uint32_t);
 		header_sub.data_len = MSG_MASK_CTRL_HEADER_LEN_SUB + mask_size;
-		memcpy(buf, &header_sub, header_len);
+		memcpy(buf, &header_sub, sizeof(header_sub));
 		if (mask_size > 0)
 			memcpy(buf + header_len, mask->ptr, mask_size);
 		mutex_unlock(&mask->lock);
@@ -1223,6 +1223,8 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
 			peripheral = diag_search_peripheral_by_pd(i);
 		else
 			peripheral = i;
+		if (peripheral < 0 || peripheral >= NUM_PERIPHERALS)
+			continue;
 		if (sub_index >= 0 &&
 			!driver->feature[peripheral].multi_sim_support)
 			continue;
@@ -1354,6 +1356,8 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
 			peripheral = diag_search_peripheral_by_pd(i);
 		else
 			peripheral = i;
+		if (peripheral < 0 || peripheral >= NUM_PERIPHERALS)
+			continue;
 		if (sub_index >= 0 &&
 			!driver->feature[peripheral].multi_sim_support)
 			continue;
@@ -1495,11 +1499,11 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
 		ms_ptr = diag_get_ms_ptr_index(mask_info->ms_ptr, sub_index);
 		if (!ms_ptr)
 			goto err;
-		if (src_len >= header_len + mask_len)
+		if (src_len >= header_len + mask_len - 1)
 			memcpy(ms_ptr->sub_ptr, src_buf + header_len, mask_len);
 		ms_ptr->status = DIAG_CTRL_MASK_VALID;
 	} else {
-		if (src_len >= header_len + mask_len)
+		if (src_len >= header_len + mask_len - 1)
 			memcpy(mask_info->ptr, src_buf + header_len, mask_len);
 		mask_info->status = DIAG_CTRL_MASK_VALID;
 	}
@@ -1546,6 +1550,8 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
 			peripheral = diag_search_peripheral_by_pd(i);
 		else
 			peripheral = i;
+		if (peripheral < 0 || peripheral >= NUM_PERIPHERALS)
+			continue;
 		if (sub_index >= 0 &&
 			!driver->feature[peripheral].multi_sim_support)
 			continue;
@@ -1651,6 +1657,8 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
 			peripheral = diag_search_peripheral_by_pd(i);
 		else
 			peripheral = i;
+		if (peripheral < 0 || peripheral >= NUM_PERIPHERALS)
+			continue;
 		if (sub_index >= 0 &&
 			!driver->feature[peripheral].multi_sim_support)
 			continue;
@@ -1675,7 +1683,7 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
 	int rsp_header_len = 0;
 	uint32_t mask_size = 0, equip_id;
 	struct diag_log_mask_t *log_item = NULL;
-	struct diag_log_config_req_t *req;
+	struct diag_log_config_get_req_t *req;
 	struct diag_log_config_rsp_t rsp;
 	struct diag_log_config_rsp_sub_t *req_sub;
 	struct diag_log_config_rsp_sub_t rsp_sub;
@@ -1688,7 +1696,7 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
 
 	mask_info = (!info) ? &log_mask : info->log_mask;
 	if (!src_buf || !dest_buf || dest_len <= 0 || !mask_info ||
-		src_len < sizeof(struct diag_log_config_req_t)) {
+		src_len < sizeof(struct diag_log_config_get_req_t)) {
 		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
 		       __func__, src_buf, src_len, dest_buf, dest_len,
 		       mask_info);
@@ -1702,9 +1710,9 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
 
 	if (!cmd_ver) {
 		log_item = (struct diag_log_mask_t *)mask_info->ptr;
-		if (src_len < sizeof(struct diag_log_config_req_t))
+		if (src_len < sizeof(struct diag_log_config_get_req_t))
 			goto err;
-		req = (struct diag_log_config_req_t *)src_buf;
+		req = (struct diag_log_config_get_req_t *)src_buf;
 		rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
 		rsp.padding[0] = 0;
 		rsp.padding[1] = 0;
@@ -2077,6 +2085,8 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
 			peripheral = diag_search_peripheral_by_pd(i);
 		else
 			peripheral = i;
+		if (peripheral < 0 || peripheral >= NUM_PERIPHERALS)
+			continue;
 		if (sub_index >= 0 &&
 			!driver->feature[peripheral].multi_sim_support)
 			continue;
@@ -2192,6 +2202,8 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
 			peripheral = diag_search_peripheral_by_pd(i);
 		else
 			peripheral = i;
+		if (peripheral < 0 || peripheral >= NUM_PERIPHERALS)
+			continue;
 		if (sub_index >= 0 &&
 			!driver->feature[peripheral].multi_sim_support)
 			continue;
@@ -3190,7 +3202,8 @@ void diag_send_updates_peripheral(uint8_t peripheral)
 				driver->real_time_mode[DIAG_LOCAL_PROC]);
 		diag_send_peripheral_buffering_mode(
 					&driver->buffering_mode[peripheral]);
-
+		if (P_FMASK_DIAGID_V2(peripheral))
+			diag_send_hw_accel_status(peripheral);
 		/*
 		 * Clear mask_update variable afer updating
 		 * logging masks to peripheral.
diff --git a/drivers/char/diag/diag_masks.h b/drivers/char/diag/diag_masks.h
index bffb1d6..1036ab7 100644
--- a/drivers/char/diag/diag_masks.h
+++ b/drivers/char/diag/diag_masks.h
@@ -36,6 +36,13 @@ struct diag_msg_mask_t {
 	uint32_t *ptr;
 };
 
+struct diag_log_config_get_req_t {
+	uint8_t cmd_code;
+	uint8_t padding[3];
+	uint32_t sub_cmd;
+	uint32_t equip_id;
+} __packed;
+
 struct diag_log_config_req_t {
 	uint8_t cmd_code;
 	uint8_t padding[3];
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 017fc9d..0481222 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -87,7 +87,18 @@ void diag_md_open_all(void)
 			ch->ops->open(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
 	}
 }
+void diag_md_open_device(int id)
+{
 
+	struct diag_md_info *ch = NULL;
+
+		ch = &diag_md[id];
+		if (!ch->md_info_inited)
+			return;
+		if (ch->ops && ch->ops->open)
+			ch->ops->open(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
+
+}
 void diag_md_close_all(void)
 {
 	int i, j;
@@ -126,7 +137,42 @@ void diag_md_close_all(void)
 
 	diag_ws_reset(DIAG_WS_MUX);
 }
+void diag_md_close_device(int id)
+{
+	int  j;
+	unsigned long flags;
+	struct diag_md_info *ch = NULL;
+	struct diag_buf_tbl_t *entry = NULL;
 
+		ch = &diag_md[id];
+		if (!ch->md_info_inited)
+			return;
+
+		if (ch->ops && ch->ops->close)
+			ch->ops->close(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
+
+		/*
+		 * When we close the Memory device mode, make sure we flush the
+		 * internal buffers in the table so that there are no stale
+		 * entries.
+		 */
+		spin_lock_irqsave(&ch->lock, flags);
+		for (j = 0; j < ch->num_tbl_entries; j++) {
+			entry = &ch->tbl[j];
+			if (entry->len <= 0)
+				continue;
+			if (ch->ops && ch->ops->write_done)
+				ch->ops->write_done(entry->buf, entry->len,
+						    entry->ctx,
+						    DIAG_MEMORY_DEVICE_MODE);
+			entry->buf = NULL;
+			entry->len = 0;
+			entry->ctx = 0;
+		}
+		spin_unlock_irqrestore(&ch->lock, flags);
+
+	diag_ws_reset(DIAG_WS_MUX);
+}
 int diag_md_write(int id, unsigned char *buf, int len, int ctx)
 {
 	int i, peripheral, pid = 0;
@@ -140,17 +186,20 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
 
 	if (!buf || len < 0)
 		return -EINVAL;
-
-	peripheral = diag_md_get_peripheral(ctx);
-	if (peripheral < 0)
-		return -EINVAL;
-
+	if (id == DIAG_LOCAL_PROC) {
+		peripheral = diag_md_get_peripheral(ctx);
+		if (peripheral < 0)
+			return -EINVAL;
+	} else {
+		peripheral = 0;
+	}
 	mutex_lock(&driver->md_session_lock);
-	session_info = diag_md_session_get_peripheral(peripheral);
+	session_info = diag_md_session_get_peripheral(id, peripheral);
 	if (!session_info) {
 		mutex_unlock(&driver->md_session_lock);
 		return -EIO;
 	}
+
 	pid = session_info->pid;
 
 	ch = &diag_md[id];
@@ -249,14 +298,14 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
 			if (peripheral < 0)
 				goto drop_data;
 			session_info =
-			diag_md_session_get_peripheral(peripheral);
+			diag_md_session_get_peripheral(i, peripheral);
 			if (!session_info)
 				goto drop_data;
 
 			if (session_info && info &&
 				(session_info->pid != info->pid))
 				continue;
-			if ((info && (info->peripheral_mask &
+			if ((info && (info->peripheral_mask[i] &
 			    MD_PERIPHERAL_MASK(peripheral)) == 0))
 				goto drop_data;
 			pid_struct = find_get_pid(session_info->pid);
diff --git a/drivers/char/diag/diag_memorydevice.h b/drivers/char/diag/diag_memorydevice.h
index 8b6cda2..e8d4a51 100644
--- a/drivers/char/diag/diag_memorydevice.h
+++ b/drivers/char/diag/diag_memorydevice.h
@@ -4,20 +4,7 @@
 
 #ifndef DIAG_MEMORYDEVICE_H
 #define DIAG_MEMORYDEVICE_H
-
-#define DIAG_MD_LOCAL		0
-#define DIAG_MD_LOCAL_LAST	1
-#define DIAG_MD_BRIDGE_BASE	DIAG_MD_LOCAL_LAST
-#define DIAG_MD_MDM		(DIAG_MD_BRIDGE_BASE)
-#define DIAG_MD_MDM2		(DIAG_MD_BRIDGE_BASE + 1)
-#define DIAG_MD_SMUX		(DIAG_MD_BRIDGE_BASE + 2)
-#define DIAG_MD_BRIDGE_LAST	(DIAG_MD_BRIDGE_BASE + 3)
-
-#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
-#define NUM_DIAG_MD_DEV		DIAG_MD_LOCAL_LAST
-#else
-#define NUM_DIAG_MD_DEV		DIAG_MD_BRIDGE_LAST
-#endif
+#include "diagchar.h"
 
 struct diag_buf_tbl_t {
 	unsigned char *buf;
@@ -44,6 +31,8 @@ void diag_md_exit(void);
 void diag_md_mdm_exit(void);
 void diag_md_open_all(void);
 void diag_md_close_all(void);
+void diag_md_open_device(int id);
+void diag_md_close_device(int id);
 int diag_md_register(int id, int ctx, struct diag_mux_ops *ops);
 int diag_md_close_peripheral(int id, uint8_t peripheral);
 int diag_md_write(int id, unsigned char *buf, int len, int ctx);
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index ec537f1..6606c5e 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -31,6 +31,8 @@ static struct diag_logger_ops usb_log_ops = {
 	.open = diag_usb_connect_all,
 	.close = diag_usb_disconnect_all,
 	.queue_read = diag_usb_queue_read,
+	.open_device = diag_usb_connect_device,
+	.close_device = diag_usb_disconnect_device,
 	.write = diag_usb_write,
 	.close_peripheral = NULL
 };
@@ -38,6 +40,8 @@ static struct diag_logger_ops usb_log_ops = {
 static struct diag_logger_ops md_log_ops = {
 	.open = diag_md_open_all,
 	.close = diag_md_close_all,
+	.open_device = diag_md_open_device,
+	.close_device = diag_md_close_device,
 	.queue_read = NULL,
 	.write = diag_md_write,
 	.close_peripheral = diag_md_close_peripheral,
@@ -45,6 +49,7 @@ static struct diag_logger_ops md_log_ops = {
 
 int diag_mux_init(void)
 {
+	int proc;
 	diag_mux = kzalloc(sizeof(struct diag_mux_state_t),
 			 GFP_KERNEL);
 	if (!diag_mux)
@@ -64,9 +69,11 @@ int diag_mux_init(void)
 	 */
 	diag_mux->usb_ptr = &usb_logger;
 	diag_mux->md_ptr = &md_logger;
-	diag_mux->logger = &usb_logger;
-	diag_mux->mux_mask = 0;
-	diag_mux->mode = DIAG_USB_MODE;
+	for (proc = 0; proc < NUM_MUX_PROC; proc++) {
+		diag_mux->logger[proc] = &usb_logger;
+		diag_mux->mux_mask[proc] = 0;
+		diag_mux->mode[proc] = DIAG_USB_MODE;
+	}
 	return 0;
 }
 
@@ -114,10 +121,10 @@ int diag_mux_queue_read(int proc)
 	if (!diag_mux)
 		return -EIO;
 
-	if (diag_mux->mode == DIAG_MULTI_MODE)
+	if (diag_mux->mode[proc] == DIAG_MULTI_MODE)
 		logger = diag_mux->usb_ptr;
 	else
-		logger = diag_mux->logger;
+		logger = diag_mux->logger[proc];
 
 	if (logger && logger->log_ops && logger->log_ops->queue_read)
 		return logger->log_ops->queue_read(proc);
@@ -135,15 +142,20 @@ int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
 		return -EINVAL;
 	if (!diag_mux)
 		return -EIO;
+	if (proc == DIAG_LOCAL_PROC) {
+		peripheral = diag_md_get_peripheral(ctx);
+		if (peripheral < 0) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"diag:%s:%d invalid peripheral = %d\n",
+				__func__, __LINE__, peripheral);
+			return -EINVAL;
+		}
 
-	peripheral = diag_md_get_peripheral(ctx);
-	if (peripheral < 0) {
-		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
-			"diag: invalid peripheral = %d\n", peripheral);
-		return -EINVAL;
+	} else {
+		peripheral = 0;
 	}
 
-	if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask) {
+	if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask[proc]) {
 		logger = diag_mux->md_ptr;
 		log_sink = DIAG_MEMORY_DEVICE_MODE;
 	} else {
@@ -193,17 +205,17 @@ int diag_mux_close_peripheral(int proc, uint8_t peripheral)
 	if (!diag_mux)
 		return -EIO;
 
-	if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
+	if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask[proc])
 		logger = diag_mux->md_ptr;
 	else
-		logger = diag_mux->logger;
+		logger = diag_mux->logger[proc];
 
 	if (logger && logger->log_ops && logger->log_ops->close_peripheral)
 		return logger->log_ops->close_peripheral(proc, peripheral);
 	return 0;
 }
 
-int diag_mux_switch_logging(int *req_mode, int *peripheral_mask)
+int diag_mux_switch_logging(int proc, int *req_mode, int *peripheral_mask)
 {
 	unsigned int new_mask = 0;
 
@@ -218,14 +230,14 @@ int diag_mux_switch_logging(int *req_mode, int *peripheral_mask)
 
 	switch (*req_mode) {
 	case DIAG_USB_MODE:
-		new_mask = ~(*peripheral_mask) & diag_mux->mux_mask;
+		new_mask = ~(*peripheral_mask) & diag_mux->mux_mask[proc];
 		if (new_mask != DIAG_CON_NONE)
 			*req_mode = DIAG_MULTI_MODE;
 		if (new_mask == DIAG_CON_ALL)
 			*req_mode = DIAG_MEMORY_DEVICE_MODE;
 		break;
 	case DIAG_MEMORY_DEVICE_MODE:
-		new_mask = (*peripheral_mask) | diag_mux->mux_mask;
+		new_mask = (*peripheral_mask) | diag_mux->mux_mask[proc];
 		if (new_mask != DIAG_CON_ALL)
 			*req_mode = DIAG_MULTI_MODE;
 		break;
@@ -234,39 +246,39 @@ int diag_mux_switch_logging(int *req_mode, int *peripheral_mask)
 		return -EINVAL;
 	}
 
-	switch (diag_mux->mode) {
+	switch (diag_mux->mode[proc]) {
 	case DIAG_USB_MODE:
 		if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
-			diag_mux->usb_ptr->log_ops->close();
-			diag_mux->logger = diag_mux->md_ptr;
-			diag_mux->md_ptr->log_ops->open();
+			diag_mux->usb_ptr->log_ops->close_device(proc);
+			diag_mux->logger[proc] = diag_mux->md_ptr;
+			diag_mux->md_ptr->log_ops->open_device(proc);
 		} else if (*req_mode == DIAG_MULTI_MODE) {
-			diag_mux->md_ptr->log_ops->open();
-			diag_mux->logger = NULL;
+			diag_mux->md_ptr->log_ops->open_device(proc);
+			diag_mux->logger[proc] = NULL;
 		}
 		break;
 	case DIAG_MEMORY_DEVICE_MODE:
 		if (*req_mode == DIAG_USB_MODE) {
-			diag_mux->md_ptr->log_ops->close();
-			diag_mux->logger = diag_mux->usb_ptr;
-			diag_mux->usb_ptr->log_ops->open();
+			diag_mux->md_ptr->log_ops->close_device(proc);
+			diag_mux->logger[proc] = diag_mux->usb_ptr;
+			diag_mux->usb_ptr->log_ops->open_device(proc);
 		} else if (*req_mode == DIAG_MULTI_MODE) {
-			diag_mux->usb_ptr->log_ops->open();
-			diag_mux->logger = NULL;
+			diag_mux->usb_ptr->log_ops->open_device(proc);
+			diag_mux->logger[proc] = NULL;
 		}
 		break;
 	case DIAG_MULTI_MODE:
 		if (*req_mode == DIAG_USB_MODE) {
-			diag_mux->md_ptr->log_ops->close();
-			diag_mux->logger = diag_mux->usb_ptr;
+			diag_mux->md_ptr->log_ops->close_device(proc);
+			diag_mux->logger[proc] = diag_mux->usb_ptr;
 		} else if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
-			diag_mux->usb_ptr->log_ops->close();
-			diag_mux->logger = diag_mux->md_ptr;
+			diag_mux->usb_ptr->log_ops->close_device(proc);
+			diag_mux->logger[proc] = diag_mux->md_ptr;
 		}
 		break;
 	}
-	diag_mux->mode = *req_mode;
-	diag_mux->mux_mask = new_mask;
+	diag_mux->mode[proc] = *req_mode;
+	diag_mux->mux_mask[proc] = new_mask;
 	*peripheral_mask = new_mask;
 	return 0;
 }
diff --git a/drivers/char/diag/diag_mux.h b/drivers/char/diag/diag_mux.h
index 72eec53..24d33bb 100644
--- a/drivers/char/diag/diag_mux.h
+++ b/drivers/char/diag/diag_mux.h
@@ -1,26 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef DIAG_MUX_H
 #define DIAG_MUX_H
-#include "diagchar.h"
-
-struct diag_mux_state_t {
-	struct diag_logger_t *logger;
-	struct diag_logger_t *usb_ptr;
-	struct diag_logger_t *md_ptr;
-	unsigned int mux_mask;
-	unsigned int mode;
-};
-
-struct diag_mux_ops {
-	int (*open)(int id, int mode);
-	int (*close)(int id, int mode);
-	int (*read_done)(unsigned char *buf, int len, int id);
-	int (*write_done)(unsigned char *buf, int len, int buf_ctx,
-			      int id);
-};
-
 #define DIAG_USB_MODE			0
 #define DIAG_MEMORY_DEVICE_MODE		1
 #define DIAG_NO_LOGGING_MODE		2
@@ -40,9 +22,27 @@ struct diag_mux_ops {
 #define NUM_MUX_PROC		DIAG_MUX_BRIDGE_LAST
 #endif
 
+struct diag_mux_state_t {
+	struct diag_logger_t *logger[NUM_MUX_PROC];
+	struct diag_logger_t *usb_ptr;
+	struct diag_logger_t *md_ptr;
+	unsigned int mux_mask[NUM_MUX_PROC];
+	unsigned int mode[NUM_MUX_PROC];
+};
+
+struct diag_mux_ops {
+	int (*open)(int id, int mode);
+	int (*close)(int id, int mode);
+	int (*read_done)(unsigned char *buf, int len, int id);
+	int (*write_done)(unsigned char *buf, int len, int buf_ctx,
+			      int id);
+};
+
 struct diag_logger_ops {
 	void (*open)(void);
 	void (*close)(void);
+	void (*open_device)(int id);
+	void (*close_device)(int id);
 	int (*queue_read)(int id);
 	int (*write)(int id, unsigned char *buf, int len, int ctx);
 	int (*close_peripheral)(int id, uint8_t peripheral);
@@ -64,5 +64,5 @@ int diag_mux_write(int proc, unsigned char *buf, int len, int ctx);
 int diag_mux_close_peripheral(int proc, uint8_t peripheral);
 int diag_mux_open_all(struct diag_logger_t *logger);
 int diag_mux_close_all(void);
-int diag_mux_switch_logging(int *new_mode, int *peripheral_mask);
+int diag_mux_switch_logging(int proc, int *new_mode, int *peripheral_mask);
 #endif
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
index b5c34d1..547ced8 100644
--- a/drivers/char/diag/diag_usb.c
+++ b/drivers/char/diag/diag_usb.c
@@ -245,7 +245,8 @@ static void usb_disconnect_work_fn(struct work_struct *work)
 	ch->name, atomic_read(&ch->disconnected), atomic_read(&ch->connected));
 
 	if (!atomic_read(&ch->connected) &&
-		driver->usb_connected && diag_mask_param())
+		driver->usb_connected && diag_mask_param() &&
+		ch->id == DIAG_USB_LOCAL)
 		diag_clear_masks(0);
 
 	usb_disconnect(ch);
@@ -596,6 +597,16 @@ void diag_usb_connect_all(void)
 		usb_connect(usb_info);
 	}
 }
+void diag_usb_connect_device(int id)
+{
+	struct diag_usb_info *usb_info = NULL;
+
+		usb_info = &diag_usb[id];
+		if (!usb_info->enabled)
+			return;
+		atomic_set(&usb_info->diag_state, 1);
+		usb_connect(usb_info);
+}
 
 /*
  * This functions performs USB disconnect operations wrt Diag synchronously.
@@ -616,6 +627,17 @@ void diag_usb_disconnect_all(void)
 	}
 }
 
+void diag_usb_disconnect_device(int id)
+{
+	struct diag_usb_info *usb_info = NULL;
+
+		usb_info = &diag_usb[id];
+		if (!usb_info->enabled)
+			return;
+		atomic_set(&usb_info->diag_state, 0);
+		usb_disconnect(usb_info);
+
+}
 int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops)
 {
 	struct diag_usb_info *ch = NULL;
diff --git a/drivers/char/diag/diag_usb.h b/drivers/char/diag/diag_usb.h
index 6339bf9..c91735c 100644
--- a/drivers/char/diag/diag_usb.h
+++ b/drivers/char/diag/diag_usb.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef DIAGUSB_H
@@ -73,6 +73,8 @@ int diag_usb_queue_read(int id);
 int diag_usb_write(int id, unsigned char *buf, int len, int ctxt);
 void diag_usb_connect_all(void);
 void diag_usb_disconnect_all(void);
+void diag_usb_connect_device(int id);
+void diag_usb_disconnect_device(int id);
 void diag_usb_exit(int id);
 #else
 int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops)
@@ -93,6 +95,12 @@ void diag_usb_connect_all(void)
 void diag_usb_disconnect_all(void)
 {
 }
+void diag_usb_connect_device(int id)
+{
+}
+void diag_usb_disconnect_device(int id)
+{
+}
 void diag_usb_exit(int id)
 {
 }
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 5de68c4d..1719a328 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -140,6 +140,7 @@
 #define DIAG_GET_TIME_API	0x21B
 #define DIAG_SET_TIME_API	0x21C
 #define DIAG_GET_DIAG_ID	0x222
+#define DIAG_HW_ACCEL_CMD	0x224
 #define DIAG_FEATURE_QUERY	0x225
 #define DIAG_SWITCH_COMMAND	0x081B
 #define DIAG_BUFFERING_MODE	0x080C
@@ -262,6 +263,9 @@ do {						\
 #define DIAGIDV2_STATUS(f_index)	\
 	driver->diagid_v2_status[f_index]
 
+#define P_FMASK_DIAGID_V2(peripheral)	\
+	driver->feature[peripheral].diagid_v2_feature_mask
+
 /*
  * Number of stm processors includes all the peripherals and
  * apps.Added 1 below to indicate apps
@@ -367,6 +371,19 @@ enum remote_procs {
 	MDM2 = 2,
 	QSC = 5,
 };
+#define DIAG_MD_LOCAL		0
+#define DIAG_MD_LOCAL_LAST	1
+#define DIAG_MD_BRIDGE_BASE	DIAG_MD_LOCAL_LAST
+#define DIAG_MD_MDM		(DIAG_MD_BRIDGE_BASE)
+#define DIAG_MD_MDM2		(DIAG_MD_BRIDGE_BASE + 1)
+#define DIAG_MD_SMUX		(DIAG_MD_BRIDGE_BASE + 2)
+#define DIAG_MD_BRIDGE_LAST	(DIAG_MD_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DIAG_MD_DEV		DIAG_MD_LOCAL_LAST
+#else
+#define NUM_DIAG_MD_DEV		DIAG_MD_BRIDGE_LAST
+#endif
 
 struct diag_pkt_header_t {
 	uint8_t cmd_code;
@@ -630,12 +647,14 @@ struct diag_logging_mode_param_t {
 	uint8_t pd_val;
 	uint8_t reserved;
 	int peripheral;
+	int device_mask;
 } __packed;
 
 struct diag_query_pid_t {
 	uint32_t peripheral_mask;
 	uint32_t pd_mask;
 	int pid;
+	int device_mask;
 };
 
 struct diag_con_all_param_t {
@@ -646,7 +665,7 @@ struct diag_con_all_param_t {
 
 struct diag_md_session_t {
 	int pid;
-	int peripheral_mask;
+	int peripheral_mask[NUM_DIAG_MD_DEV];
 	uint8_t hdlc_disabled;
 	uint8_t msg_mask_tbl_count;
 	struct timer_list hdlc_reset_timer;
@@ -759,7 +778,7 @@ struct diagchar_dev {
 	int dci_tag;
 	int dci_client_id[MAX_DCI_CLIENTS];
 	struct mutex dci_mutex;
-	struct mutex rpmsginfo_mutex[NUM_PERIPHERALS];
+	spinlock_t rpmsginfo_lock[NUM_PERIPHERALS];
 	int num_dci_client;
 	unsigned char *apps_dci_buf;
 	int dci_state;
@@ -836,16 +855,17 @@ struct diagchar_dev {
 	uint8_t *dci_pkt_buf; /* For Apps DCI packets */
 	uint32_t dci_pkt_length;
 	int in_busy_dcipktdata;
-	int logging_mode;
-	int logging_mask;
+	int logging_mode[NUM_DIAG_MD_DEV];
+	int logging_mask[NUM_DIAG_MD_DEV];
 	int pd_logging_mode[NUM_UPD];
 	int pd_session_clear[NUM_UPD];
 	int num_pd_session;
 	int diag_id_sent[NUM_PERIPHERALS];
 	int mask_check;
-	uint32_t md_session_mask;
-	uint8_t md_session_mode;
-	struct diag_md_session_t *md_session_map[NUM_MD_SESSIONS];
+	uint32_t md_session_mask[NUM_DIAG_MD_DEV];
+	uint8_t md_session_mode[NUM_DIAG_MD_DEV];
+	struct diag_md_session_t *md_session_map[NUM_DIAG_MD_DEV]
+					[NUM_MD_SESSIONS];
 	struct mutex md_session_lock;
 	/* Power related variables */
 	struct diag_ws_ref_t dci_ws;
@@ -875,6 +895,7 @@ struct diagchar_dev {
 	uint8_t uses_time_api;
 	uint32_t diagid_v2_feature[DIAGID_V2_FEATURE_COUNT];
 	uint32_t diagid_v2_status[DIAGID_V2_FEATURE_COUNT];
+	uint32_t diag_hw_accel[DIAGID_V2_FEATURE_COUNT];
 };
 
 extern struct diagchar_dev *driver;
@@ -914,8 +935,12 @@ uint8_t diag_search_diagid_by_pd(uint8_t pd_val,
 void diag_record_stats(int type, int flag);
 
 struct diag_md_session_t *diag_md_session_get_pid(int pid);
-struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral);
-int diag_md_session_match_pid_peripheral(int pid, uint8_t peripheral);
+struct diag_md_session_t *diag_md_session_get_peripheral(int dev_id,
+							uint8_t peripheral);
+int diag_md_session_match_pid_peripheral(int proc, int pid,
+					uint8_t peripheral);
 int diag_map_hw_accel_type_ver(uint8_t hw_accel_type, uint8_t hw_accel_ver);
+void diag_map_index_to_hw_accel(uint8_t index, uint8_t *hw_accel_type,
+			uint8_t *hw_accel_ver);
 
 #endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 6f0a373..27565bf 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -432,7 +432,8 @@ void diag_clear_masks(int pid)
 static void diag_close_logging_process(const int pid)
 {
 	int i, j;
-	int session_mask;
+	int session_mask = 0;
+	int device_mask = 0;
 	uint32_t p_mask;
 	struct diag_md_session_t *session_info = NULL;
 	struct diag_logging_mode_param_t params;
@@ -443,15 +444,21 @@ static void diag_close_logging_process(const int pid)
 		mutex_unlock(&driver->md_session_lock);
 		return;
 	}
-	session_mask = session_info->peripheral_mask;
+	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+		if (session_info->peripheral_mask[i]) {
+			session_mask = session_info->peripheral_mask[i];
+			device_mask = device_mask | (1 << i);
+		}
+	}
 	mutex_unlock(&driver->md_session_lock);
 
 	if (diag_mask_clear_param)
 		diag_clear_masks(pid);
 
 	mutex_lock(&driver->diagchar_mutex);
-	p_mask =
-	diag_translate_kernel_to_user_mask(session_mask);
+	if (session_mask)
+		p_mask =
+		diag_translate_kernel_to_user_mask(session_mask);
 
 	for (i = 0; i < NUM_MD_SESSIONS; i++)
 		if (MD_PERIPHERAL_MASK(i) & session_mask)
@@ -461,6 +468,7 @@ static void diag_close_logging_process(const int pid)
 	params.mode_param = 0;
 	params.pd_mask = 0;
 	params.peripheral_mask = p_mask;
+	params.device_mask = device_mask;
 
 	if (driver->num_pd_session > 0) {
 		for (i = UPD_WLAN; (i < NUM_MD_SESSIONS); i++) {
@@ -1245,41 +1253,45 @@ static int mask_request_validate(unsigned char mask_buf[], int len)
 
 static void diag_md_session_init(void)
 {
-	int i;
+	int i, proc;
 
 	mutex_init(&driver->md_session_lock);
-	driver->md_session_mask = 0;
-	driver->md_session_mode = DIAG_MD_NONE;
-	for (i = 0; i < NUM_MD_SESSIONS; i++)
-		driver->md_session_map[i] = NULL;
+	for (proc = 0; proc < NUM_DIAG_MD_DEV; proc++) {
+		driver->md_session_mask[proc] = 0;
+		driver->md_session_mode[proc] = DIAG_MD_NONE;
+		for (i = 0; i < NUM_MD_SESSIONS; i++)
+			driver->md_session_map[proc][i] = NULL;
+	}
 }
 
 static void diag_md_session_exit(void)
 {
-	int i;
+	int i, proc;
 	struct diag_md_session_t *session_info = NULL;
 
-	for (i = 0; i < NUM_MD_SESSIONS; i++) {
-		if (driver->md_session_map[i]) {
-			session_info = driver->md_session_map[i];
-			diag_log_mask_free(session_info->log_mask);
-			kfree(session_info->log_mask);
-			session_info->log_mask = NULL;
-			diag_msg_mask_free(session_info->msg_mask,
-				session_info);
-			kfree(session_info->msg_mask);
-			session_info->msg_mask = NULL;
-			diag_event_mask_free(session_info->event_mask);
-			kfree(session_info->event_mask);
-			session_info->event_mask = NULL;
-			kfree(session_info);
-			session_info = NULL;
-			driver->md_session_map[i] = NULL;
+	for (proc = 0; proc < NUM_DIAG_MD_DEV; proc++) {
+		for (i = 0; i < NUM_MD_SESSIONS; i++) {
+			if (driver->md_session_map[proc][i]) {
+				session_info = driver->md_session_map[proc][i];
+				diag_log_mask_free(session_info->log_mask);
+				kfree(session_info->log_mask);
+				session_info->log_mask = NULL;
+				diag_msg_mask_free(session_info->msg_mask,
+					session_info);
+				kfree(session_info->msg_mask);
+				session_info->msg_mask = NULL;
+				diag_event_mask_free(session_info->event_mask);
+				kfree(session_info->event_mask);
+				session_info->event_mask = NULL;
+				kfree(session_info);
+				session_info = NULL;
+				driver->md_session_map[proc][i] = NULL;
+			}
 		}
+		mutex_destroy(&driver->md_session_lock);
+		driver->md_session_mask[proc] = 0;
+		driver->md_session_mode[proc] = DIAG_MD_NONE;
 	}
-	mutex_destroy(&driver->md_session_lock);
-	driver->md_session_mask = 0;
-	driver->md_session_mode = DIAG_MD_NONE;
 }
 
 int diag_md_session_create(int mode, int peripheral_mask, int proc)
@@ -1293,76 +1305,81 @@ int diag_md_session_create(int mode, int peripheral_mask, int proc)
 	 * request comes in with same peripheral mask value then return
 	 * invalid param
 	 */
-	if (driver->md_session_mode == DIAG_MD_PERIPHERAL &&
-	    (driver->md_session_mask & peripheral_mask) != 0)
+	if (driver->md_session_mode[proc] == DIAG_MD_PERIPHERAL &&
+	    (driver->md_session_mask[proc] & peripheral_mask) != 0)
 		return -EINVAL;
 
 	mutex_lock(&driver->md_session_lock);
-	new_session = kzalloc(sizeof(struct diag_md_session_t), GFP_KERNEL);
+	new_session = diag_md_session_get_pid(current->tgid);
 	if (!new_session) {
-		mutex_unlock(&driver->md_session_lock);
-		return -ENOMEM;
-	}
-	new_session->peripheral_mask = 0;
-	new_session->pid = current->tgid;
-	new_session->task = current;
-	new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
+		new_session = kzalloc(sizeof(struct diag_md_session_t),
 					GFP_KERNEL);
-	if (!new_session->log_mask) {
-		err = -ENOMEM;
-		goto fail_peripheral;
-	}
-	new_session->event_mask = kzalloc(sizeof(struct diag_mask_info),
-					  GFP_KERNEL);
-	if (!new_session->event_mask) {
-		err = -ENOMEM;
-		goto fail_peripheral;
-	}
-	new_session->msg_mask = kzalloc(sizeof(struct diag_mask_info),
-					GFP_KERNEL);
-	if (!new_session->msg_mask) {
-		err = -ENOMEM;
-		goto fail_peripheral;
-	}
+		if (!new_session) {
+			mutex_unlock(&driver->md_session_lock);
+			return -ENOMEM;
+		}
+		new_session->peripheral_mask[proc] = 0;
+		new_session->pid = current->tgid;
+		new_session->task = current;
+		new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
+						GFP_KERNEL);
+		if (!new_session->log_mask) {
+			err = -ENOMEM;
+			goto fail_peripheral;
+		}
+		new_session->event_mask = kzalloc(sizeof(struct diag_mask_info),
+						  GFP_KERNEL);
+		if (!new_session->event_mask) {
+			err = -ENOMEM;
+			goto fail_peripheral;
+		}
+		new_session->msg_mask = kzalloc(sizeof(struct diag_mask_info),
+						GFP_KERNEL);
+		if (!new_session->msg_mask) {
+			err = -ENOMEM;
+			goto fail_peripheral;
+		}
 
-	err = diag_log_mask_copy(new_session->log_mask, &log_mask);
-	if (err) {
-		DIAG_LOG(DIAG_DEBUG_USERSPACE,
-			 "return value of log copy. err %d\n", err);
-		goto fail_peripheral;
-	}
-	err = diag_event_mask_copy(new_session->event_mask, &event_mask);
-	if (err) {
-		DIAG_LOG(DIAG_DEBUG_USERSPACE,
-			 "return value of event copy. err %d\n", err);
-		goto fail_peripheral;
-	}
-	new_session->msg_mask_tbl_count = 0;
-	err = diag_msg_mask_copy(new_session, new_session->msg_mask,
-		&msg_mask);
-	if (err) {
-		DIAG_LOG(DIAG_DEBUG_USERSPACE,
-			 "return value of msg copy. err %d\n", err);
-		goto fail_peripheral;
+		err = diag_log_mask_copy(new_session->log_mask, &log_mask);
+		if (err) {
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				 "return value of log copy. err %d\n", err);
+			goto fail_peripheral;
+		}
+		err = diag_event_mask_copy(new_session->event_mask,
+								   &event_mask);
+		if (err) {
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				 "return value of event copy. err %d\n", err);
+			goto fail_peripheral;
+		}
+		new_session->msg_mask_tbl_count = 0;
+		err = diag_msg_mask_copy(new_session, new_session->msg_mask,
+					&msg_mask);
+		if (err) {
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				 "return value of msg copy. err %d\n", err);
+			goto fail_peripheral;
+		}
 	}
 	for (i = 0; i < NUM_MD_SESSIONS; i++) {
 		if ((MD_PERIPHERAL_MASK(i) & peripheral_mask) == 0)
 			continue;
-		if (driver->md_session_map[i] != NULL) {
+		if (driver->md_session_map[proc][i] != NULL) {
 			DIAG_LOG(DIAG_DEBUG_USERSPACE,
 				 "another instance present for %d\n", i);
 			err = -EEXIST;
 			goto fail_peripheral;
 		}
-		new_session->peripheral_mask |= MD_PERIPHERAL_MASK(i);
-		driver->md_session_map[i] = new_session;
-		driver->md_session_mask |= MD_PERIPHERAL_MASK(i);
+		new_session->peripheral_mask[proc] |= MD_PERIPHERAL_MASK(i);
+		driver->md_session_map[proc][i] = new_session;
+		driver->md_session_mask[proc] |= MD_PERIPHERAL_MASK(i);
 	}
 	timer_setup(&new_session->hdlc_reset_timer,
 		diag_md_hdlc_reset_timer_func,
 		0);
 
-	driver->md_session_mode = DIAG_MD_PERIPHERAL;
+	driver->md_session_mode[proc] = DIAG_MD_PERIPHERAL;
 	mutex_unlock(&driver->md_session_lock);
 	DIAG_LOG(DIAG_DEBUG_USERSPACE,
 		 "created session in peripheral mode\n");
@@ -1390,16 +1407,19 @@ static void diag_md_session_close(int pid)
 	int i;
 	uint8_t found = 0;
 	struct diag_md_session_t *session_info = NULL;
+	int proc;
 
 	session_info = diag_md_session_get_pid(pid);
 	if (!session_info)
 		return;
-
-	for (i = 0; i < NUM_MD_SESSIONS; i++) {
-		if (driver->md_session_map[i] != session_info)
-			continue;
-		driver->md_session_map[i] = NULL;
-		driver->md_session_mask &= ~session_info->peripheral_mask;
+	for (proc = 0; proc < NUM_DIAG_MD_DEV; proc++) {
+		for (i = 0; i < NUM_MD_SESSIONS; i++) {
+			if (driver->md_session_map[proc][i] != session_info)
+				continue;
+			driver->md_session_map[proc][i] = NULL;
+			driver->md_session_mask[proc] &=
+					~session_info->peripheral_mask[proc];
+		}
 	}
 	diag_log_mask_free(session_info->log_mask);
 	kfree(session_info->log_mask);
@@ -1412,13 +1432,15 @@ static void diag_md_session_close(int pid)
 	kfree(session_info->event_mask);
 	session_info->event_mask = NULL;
 	del_timer(&session_info->hdlc_reset_timer);
-
-	for (i = 0; i < NUM_MD_SESSIONS && !found; i++) {
-		if (driver->md_session_map[i] != NULL)
-			found = 1;
+	for (proc = 0; proc < NUM_DIAG_MD_DEV; proc++) {
+		for (i = 0; i < NUM_MD_SESSIONS && !found; i++) {
+			if (driver->md_session_map[proc][i] != NULL)
+				found = 1;
+		}
+		driver->md_session_mode[proc] = (found) ? DIAG_MD_PERIPHERAL :
+								DIAG_MD_NONE;
+		found = 0;
 	}
-
-	driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE;
 	kfree(session_info);
 	session_info = NULL;
 	DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n");
@@ -1427,22 +1449,27 @@ static void diag_md_session_close(int pid)
 struct diag_md_session_t *diag_md_session_get_pid(int pid)
 {
 	int i;
+	int proc;
 
 	if (pid <= 0)
 		return NULL;
-	for (i = 0; i < NUM_MD_SESSIONS; i++) {
-		if (driver->md_session_map[i] &&
-		    driver->md_session_map[i]->pid == pid)
-			return driver->md_session_map[i];
+
+	for (proc = 0; proc < NUM_DIAG_MD_DEV; proc++) {
+		for (i = 0; i < NUM_MD_SESSIONS; i++) {
+			if (driver->md_session_map[proc][i] &&
+				driver->md_session_map[proc][i]->pid == pid)
+				return driver->md_session_map[proc][i];
+		}
 	}
 	return NULL;
 }
 
-struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
+struct diag_md_session_t *diag_md_session_get_peripheral(int proc,
+				uint8_t peripheral)
 {
 	if (peripheral >= NUM_MD_SESSIONS)
 		return NULL;
-	return driver->md_session_map[peripheral];
+	return driver->md_session_map[proc][peripheral];
 }
 
 /*
@@ -1454,7 +1481,7 @@ struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
  *
  */
 
-int diag_md_session_match_pid_peripheral(int pid,
+int diag_md_session_match_pid_peripheral(int proc, int pid,
 	uint8_t peripheral)
 {
 	int i, flag = 0;
@@ -1463,26 +1490,29 @@ int diag_md_session_match_pid_peripheral(int pid,
 		return -EINVAL;
 
 	if (!peripheral) {
-		for (i = 0; i < NUM_MD_SESSIONS; i++) {
-			if (driver->md_session_map[i] &&
-			    driver->md_session_map[i]->pid == pid) {
-				peripheral |= 1 << i;
-				flag = 1;
+		for (proc = 0; proc < NUM_DIAG_MD_DEV; proc++) {
+			for (i = 0; i < NUM_MD_SESSIONS; i++) {
+				if (driver->md_session_map[proc][i] &&
+					driver->md_session_map[proc][i]->pid ==
+								pid) {
+					peripheral |= 1 << i;
+					flag = 1;
+				}
 			}
+			if (flag)
+				return peripheral;
 		}
-		if (flag)
-			return peripheral;
 	}
 
 	if (!pid) {
-		if (driver->md_session_map[peripheral])
-			return driver->md_session_map[peripheral]->pid;
+		if (driver->md_session_map[proc][peripheral])
+			return driver->md_session_map[proc][peripheral]->pid;
 	}
 
 	return -EINVAL;
 }
 
-static int diag_md_peripheral_switch(int pid,
+static int diag_md_peripheral_switch(int proc, int pid,
 				int peripheral_mask, int req_mode)
 {
 	int i, bit = 0;
@@ -1491,7 +1521,7 @@ static int diag_md_peripheral_switch(int pid,
 	session_info = diag_md_session_get_pid(pid);
 	if (!session_info)
 		return -EINVAL;
-	if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
+	if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE)
 		return -EINVAL;
 
 	/*
@@ -1503,28 +1533,29 @@ static int diag_md_peripheral_switch(int pid,
 		if (!bit)
 			continue;
 		if (req_mode == DIAG_USB_MODE) {
-			if (driver->md_session_map[i] != session_info)
+			if (driver->md_session_map[proc][i] != session_info)
 				return -EINVAL;
-			driver->md_session_map[i] = NULL;
-			driver->md_session_mask &= ~bit;
-			session_info->peripheral_mask &= ~bit;
+			driver->md_session_map[proc][i] = NULL;
+			driver->md_session_mask[proc] &= ~bit;
+			session_info->peripheral_mask[proc] &= ~bit;
 
 		} else {
-			if (driver->md_session_map[i] != NULL)
+			if (driver->md_session_map[proc][i] != NULL)
 				return -EINVAL;
-			driver->md_session_map[i] = session_info;
-			driver->md_session_mask |= bit;
-			session_info->peripheral_mask |= bit;
+			driver->md_session_map[proc][i] = session_info;
+			driver->md_session_mask[proc] |= bit;
+			session_info->peripheral_mask[proc] |= bit;
 
 		}
 	}
 
-	driver->md_session_mode = DIAG_MD_PERIPHERAL;
+	driver->md_session_mode[proc] = DIAG_MD_PERIPHERAL;
 	DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n",
 		peripheral_mask, req_mode);
+	return 0;
 }
 
-static int diag_md_session_check(int curr_mode, int req_mode,
+static int diag_md_session_check(int proc, int curr_mode, int req_mode,
 				 const struct diag_logging_mode_param_t *param,
 				 uint8_t *change_mode)
 {
@@ -1553,8 +1584,9 @@ static int diag_md_session_check(int curr_mode, int req_mode,
 		if (curr_mode == DIAG_USB_MODE)
 			return 0;
 		mutex_lock(&driver->md_session_lock);
-		if (driver->md_session_mode == DIAG_MD_NONE
-		    && driver->md_session_mask == 0 && driver->logging_mask) {
+		if (driver->md_session_mode[proc] == DIAG_MD_NONE
+		    && driver->md_session_mask[proc] == 0 &&
+			driver->logging_mask[proc]) {
 			*change_mode = 1;
 			mutex_unlock(&driver->md_session_lock);
 			return 0;
@@ -1567,7 +1599,7 @@ static int diag_md_session_check(int curr_mode, int req_mode,
 			bit = MD_PERIPHERAL_MASK(i) & param->peripheral_mask;
 			if (!bit)
 				continue;
-			if (bit & driver->logging_mask)
+			if (bit & driver->logging_mask[proc])
 				change_mask |= bit;
 		}
 		if (!change_mask) {
@@ -1588,7 +1620,7 @@ static int diag_md_session_check(int curr_mode, int req_mode,
 			mutex_unlock(&driver->md_session_lock);
 			return 0;
 		}
-		peripheral_mask = session_info->peripheral_mask;
+		peripheral_mask = session_info->peripheral_mask[proc];
 		if ((change_mask & peripheral_mask)
 							!= change_mask) {
 			DIAG_LOG(DIAG_DEBUG_USERSPACE,
@@ -1600,7 +1632,7 @@ static int diag_md_session_check(int curr_mode, int req_mode,
 
 		/* If all peripherals are being set to USB Mode, call close */
 		if (~change_mask & peripheral_mask) {
-			err = diag_md_peripheral_switch(current->tgid,
+			err = diag_md_peripheral_switch(proc, current->tgid,
 					change_mask, DIAG_USB_MODE);
 		} else
 			diag_md_session_close(current->tgid);
@@ -1614,18 +1646,20 @@ static int diag_md_session_check(int curr_mode, int req_mode,
 		 * owned by this md session
 		 */
 		mutex_lock(&driver->md_session_lock);
-		change_mask = driver->md_session_mask & param->peripheral_mask;
+		change_mask = driver->md_session_mask[proc] &
+				param->peripheral_mask;
 		session_info = diag_md_session_get_pid(current->tgid);
 
-		if (session_info) {
-			if ((session_info->peripheral_mask & change_mask)
+		if (session_info && driver->md_session_mode[proc] !=
+							DIAG_MD_NONE) {
+			if ((session_info->peripheral_mask[proc] & change_mask)
 							!= change_mask) {
 				DIAG_LOG(DIAG_DEBUG_USERSPACE,
 				    "Another MD Session owns a requested peripheral\n");
 				mutex_unlock(&driver->md_session_lock);
 				return -EINVAL;
 			}
-			err = diag_md_peripheral_switch(current->tgid,
+			err = diag_md_peripheral_switch(proc, current->tgid,
 					change_mask, DIAG_USB_MODE);
 			mutex_unlock(&driver->md_session_lock);
 		} else {
@@ -1636,7 +1670,7 @@ static int diag_md_session_check(int curr_mode, int req_mode,
 				return -EINVAL;
 			}
 			err = diag_md_session_create(DIAG_MD_PERIPHERAL,
-				param->peripheral_mask, DIAG_LOCAL_PROC);
+				param->peripheral_mask, proc);
 			mutex_lock(&driver->hdlc_disable_mutex);
 			for (i = 0; i < NUM_MD_SESSIONS; i++) {
 				if ((param->peripheral_mask > 0) &&
@@ -1724,12 +1758,81 @@ static void diag_switch_logging_clear_mask(
 		diag_clear_masks(pid);
 
 }
+static int diag_switch_logging_proc(struct diag_logging_mode_param_t *param,
+					int new_mode, uint32_t peripheral_mask)
+{
+	int proc = 0, local_proc = 0, err = 0, curr_mode;
+	uint8_t do_switch = 1;
 
+	for (proc = 0; proc < NUM_DIAG_MD_DEV; proc++) {
+		local_proc = 1 << proc;
+		if (param->device_mask & (local_proc)) {
+			curr_mode = driver->logging_mode[proc];
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				"request to switch logging from %d mask:%0x to new_mode %d mask:%0x\n",
+				curr_mode, driver->md_session_mask[proc],
+				new_mode, peripheral_mask);
+
+			err = diag_md_session_check(proc, curr_mode, new_mode,
+							param, &do_switch);
+			if (err) {
+				DIAG_LOG(DIAG_DEBUG_USERSPACE,
+					"err from diag_md_session_check, err: %d\n",
+					err);
+				return err;
+			}
+
+			if (do_switch == 0) {
+				DIAG_LOG(DIAG_DEBUG_USERSPACE,
+					"not switching modes c: %d n: %d\n",
+					curr_mode, new_mode);
+				return 0;
+			}
+
+			diag_ws_reset(DIAG_WS_MUX);
+			err = diag_mux_switch_logging(proc, &new_mode,
+					&peripheral_mask);
+			if (err) {
+				pr_err("diag: In %s, unable to switch mode from %d to %d, err: %d\n",
+					__func__, curr_mode, new_mode, err);
+				driver->logging_mode[proc] = curr_mode;
+				return err;
+			}
+			driver->logging_mode[proc] = new_mode;
+			driver->logging_mask[proc] = peripheral_mask;
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				"Switch logging to %d mask:%0x\n", new_mode,
+					peripheral_mask);
+
+			/* Update to take peripheral_mask */
+			if (new_mode != DIAG_MEMORY_DEVICE_MODE &&
+				new_mode != DIAG_MULTI_MODE) {
+				diag_update_real_time_vote(
+						DIAG_PROC_MEMORY_DEVICE,
+						MODE_REALTIME,
+						ALL_PROC);
+			} else {
+				diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE,
+							VOTE_UP,
+							ALL_PROC);
+			}
+
+			if (!((new_mode == DIAG_MEMORY_DEVICE_MODE ||
+				new_mode == DIAG_MULTI_MODE) &&
+				curr_mode == DIAG_USB_MODE)) {
+				queue_work(driver->diag_real_time_wq,
+					   &driver->diag_real_time_work);
+			}
+		}
+		peripheral_mask =
+			diag_translate_mask(param->peripheral_mask);
+	}
+	return err;
+}
 static int diag_switch_logging(struct diag_logging_mode_param_t *param)
 {
 	int new_mode, i = 0;
-	int curr_mode, err = 0, peripheral = 0;
-	uint8_t do_switch = 1;
+	int err = 0, peripheral = 0;
 	uint32_t peripheral_mask = 0, pd_mask = 0;
 
 	if (!param)
@@ -1740,6 +1843,11 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param)
 			"asking for mode switch with no peripheral mask set\n");
 		return -EINVAL;
 	}
+	if (!param->device_mask) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			"asking for mode switch with no device mask set\n");
+		return -EINVAL;
+	}
 
 	if (param->pd_mask) {
 		pd_mask = diag_translate_mask(param->pd_mask);
@@ -1780,9 +1888,9 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param)
 		}
 		i = param->pd_val - UPD_WLAN;
 		mutex_lock(&driver->md_session_lock);
-		if (driver->md_session_map[peripheral] &&
+		if (driver->md_session_map[DIAG_LOCAL_PROC][peripheral] &&
 			(MD_PERIPHERAL_MASK(peripheral) &
-			diag_mux->mux_mask) &&
+			diag_mux->mux_mask[DIAG_LOCAL_PROC]) &&
 			!driver->pd_session_clear[i]) {
 			DIAG_LOG(DIAG_DEBUG_USERSPACE,
 			"diag_fr: User PD is already logging onto active peripheral logging\n");
@@ -1820,58 +1928,8 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param)
 		       __func__, param->req_mode);
 		return -EINVAL;
 	}
+	err = diag_switch_logging_proc(param, new_mode, peripheral_mask);
 
-	curr_mode = driver->logging_mode;
-	DIAG_LOG(DIAG_DEBUG_USERSPACE,
-		"request to switch logging from %d mask:%0x to new_mode %d mask:%0x\n",
-		curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
-
-	err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
-	if (err) {
-		DIAG_LOG(DIAG_DEBUG_USERSPACE,
-			 "err from diag_md_session_check, err: %d\n", err);
-		return err;
-	}
-
-	if (do_switch == 0) {
-		DIAG_LOG(DIAG_DEBUG_USERSPACE,
-			 "not switching modes c: %d n: %d\n",
-			 curr_mode, new_mode);
-		return 0;
-	}
-
-	diag_ws_reset(DIAG_WS_MUX);
-	err = diag_mux_switch_logging(&new_mode, &peripheral_mask);
-	if (err) {
-		pr_err("diag: In %s, unable to switch mode from %d to %d, err: %d\n",
-		       __func__, curr_mode, new_mode, err);
-		driver->logging_mode = curr_mode;
-		goto fail;
-	}
-	driver->logging_mode = new_mode;
-	driver->logging_mask = peripheral_mask;
-	DIAG_LOG(DIAG_DEBUG_USERSPACE,
-		"Switch logging to %d mask:%0x\n", new_mode, peripheral_mask);
-
-	/* Update to take peripheral_mask */
-	if (new_mode != DIAG_MEMORY_DEVICE_MODE &&
-		new_mode != DIAG_MULTI_MODE) {
-		diag_update_real_time_vote(DIAG_PROC_MEMORY_DEVICE,
-					   MODE_REALTIME, ALL_PROC);
-	} else {
-		diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE, VOTE_UP,
-				      ALL_PROC);
-	}
-
-	if (!((new_mode == DIAG_MEMORY_DEVICE_MODE ||
-		new_mode == DIAG_MULTI_MODE) &&
-	    curr_mode == DIAG_USB_MODE)) {
-		queue_work(driver->diag_real_time_wq,
-			   &driver->diag_real_time_work);
-	}
-
-	return 0;
-fail:
 	return err;
 }
 
@@ -2158,7 +2216,7 @@ static int diag_ioctl_dci_support(unsigned long ioarg)
 static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
 {
 	uint8_t hdlc_support, i;
-	int peripheral = -EINVAL;
+	int peripheral = -EINVAL, proc = DIAG_LOCAL_PROC;
 	struct diag_md_session_t *session_info = NULL;
 
 	if (copy_from_user(&hdlc_support, (void __user *)ioarg,
@@ -2174,18 +2232,19 @@ static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
 		driver->hdlc_disabled = hdlc_support;
 
 	peripheral =
-		diag_md_session_match_pid_peripheral(current->tgid,
+		diag_md_session_match_pid_peripheral(DIAG_LOCAL_PROC,
+							current->tgid,
 		0);
 	for (i = 0; i < NUM_MD_SESSIONS; i++) {
 		if (peripheral > 0 && session_info) {
 			if (peripheral & (1 << i))
 				driver->p_hdlc_disabled[i] =
 				session_info->hdlc_disabled;
-			else if (!diag_md_session_get_peripheral(i))
+			else if (!diag_md_session_get_peripheral(proc, i))
 				driver->p_hdlc_disabled[i] =
 				driver->hdlc_disabled;
 		} else {
-			if (!diag_md_session_get_peripheral(i))
+			if (!diag_md_session_get_peripheral(proc, i))
 				driver->p_hdlc_disabled[i] =
 				driver->hdlc_disabled;
 		}
@@ -2365,8 +2424,27 @@ static int diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
 	return ret;
 }
 
-int diag_map_hw_accel_type_ver(
-	uint8_t hw_accel_type, uint8_t hw_accel_ver)
+void diag_map_index_to_hw_accel(uint8_t index,
+	uint8_t *hw_accel_type, uint8_t *hw_accel_ver)
+{
+	*hw_accel_type = 0;
+	*hw_accel_ver = 0;
+
+	switch (index) {
+	case DIAG_HW_ACCEL_TYPE_STM:
+		*hw_accel_type = DIAG_HW_ACCEL_TYPE_STM;
+		*hw_accel_ver = DIAG_HW_ACCEL_VER_MIN;
+		break;
+	case DIAG_HW_ACCEL_TYPE_ATB:
+		*hw_accel_type = DIAG_HW_ACCEL_TYPE_ATB;
+		*hw_accel_ver = DIAG_HW_ACCEL_VER_MIN;
+		break;
+	default:
+		break;
+	}
+}
+
+int diag_map_hw_accel_type_ver(uint8_t hw_accel_type, uint8_t hw_accel_ver)
 {
 	int index = -EINVAL;
 
@@ -2425,26 +2503,22 @@ static int diag_ioctl_query_pd_featuremask(
 static int diag_ioctl_passthru_control_func(
 	struct diag_hw_accel_cmd_req_t *req_params)
 {
-	return diag_send_passtru_ctrl_pkt(req_params);
+	return diag_send_passthru_ctrl_pkt(req_params);
 }
 
-static void diag_ioctl_query_session_pid(struct diag_query_pid_t *param)
+static void diag_query_session_pid(struct diag_query_pid_t *param)
 {
 	int prev_pid = 0, test_pid = 0, i = 0, count = 0;
-	uint32_t pd_mask = 0, peripheral_mask = 0;
-	struct diag_md_session_t *info = NULL;
+	int local_proc = 0;
+	int proc = 0;
 
-	param->pid = 0;
-
-	if (param->pd_mask && param->peripheral_mask) {
-		param->pid = -EINVAL;
-		return;
-	} else if (param->peripheral_mask) {
-		if (param->peripheral_mask == DIAG_CON_ALL) {
+	for (proc = 0; proc < NUM_DIAG_MD_DEV; proc++) {
+		local_proc = 1<<proc;
+		if (param->device_mask & (local_proc)) {
 			for (i = 0; i <= NUM_PERIPHERALS; i++) {
-				if (driver->md_session_map[i]) {
+				if (driver->md_session_map[proc][i]) {
 					test_pid =
-					driver->md_session_map[i]->pid;
+					driver->md_session_map[proc][i]->pid;
 					count++;
 					if (!prev_pid)
 						prev_pid = test_pid;
@@ -2457,16 +2531,35 @@ static void diag_ioctl_query_session_pid(struct diag_query_pid_t *param)
 			}
 			if (i == count && prev_pid)
 				param->pid = prev_pid;
+		}
+	}
+}
+static void diag_ioctl_query_session_pid(struct diag_query_pid_t *param)
+{
+	int prev_pid = 0, test_pid = 0, i = 0;
+	unsigned int proc = DIAG_LOCAL_PROC;
+	uint32_t pd_mask = 0, peripheral_mask = 0;
+	struct diag_md_session_t *info = NULL;
+
+	param->pid = 0;
+
+	if (param->pd_mask && param->peripheral_mask) {
+		param->pid = -EINVAL;
+		return;
+	} else if (param->peripheral_mask) {
+		if (param->peripheral_mask == DIAG_CON_ALL) {
+			diag_query_session_pid(param);
 		} else {
 			peripheral_mask =
 				diag_translate_mask(param->peripheral_mask);
 			for (i = 0; i <= NUM_PERIPHERALS; i++) {
-				if (driver->md_session_map[i] &&
+				if (driver->md_session_map[proc][i] &&
 					(peripheral_mask &
 					MD_PERIPHERAL_MASK(i))) {
-					info = driver->md_session_map[i];
+					info =
+					driver->md_session_map[proc][i];
 					if (peripheral_mask !=
-						info->peripheral_mask) {
+					info->peripheral_mask[proc]) {
 						DIAG_LOG(DIAG_DEBUG_USERSPACE,
 						"diag: Invalid Peripheral mask given as input\n");
 						param->pid = -EINVAL;
@@ -2489,10 +2582,12 @@ static void diag_ioctl_query_session_pid(struct diag_query_pid_t *param)
 		pd_mask =
 			diag_translate_mask(param->pd_mask);
 		for (i = UPD_WLAN; i < NUM_MD_SESSIONS; i++) {
-			if (driver->md_session_map[i] &&
+			if (driver->md_session_map[proc][i] &&
 				(pd_mask & MD_PERIPHERAL_MASK(i))) {
-				info = driver->md_session_map[i];
-				if (pd_mask != info->peripheral_mask) {
+				info =
+				driver->md_session_map[proc][i];
+				if (pd_mask !=
+				info->peripheral_mask[proc]) {
 					DIAG_LOG(DIAG_DEBUG_USERSPACE,
 					"diag: Invalid PD mask given as input\n");
 					param->pid = -EINVAL;
@@ -2525,13 +2620,13 @@ static int diag_ioctl_register_callback(unsigned long ioarg)
 		return -EFAULT;
 	}
 
-	if (reg.proc < 0 || reg.proc >= DIAG_NUM_PROC) {
+	if (reg.proc < 0 || reg.proc >= NUM_DIAG_MD_DEV) {
 		pr_err("diag: In %s, invalid proc %d for callback registration\n",
 		       __func__, reg.proc);
 		return -EINVAL;
 	}
 
-	if (driver->md_session_mode == DIAG_MD_PERIPHERAL)
+	if (driver->md_session_mode[reg.proc] == DIAG_MD_PERIPHERAL)
 		return -EIO;
 
 	return err;
@@ -3482,6 +3577,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 	struct diag_md_session_t *session_info = NULL;
 	struct pid *pid_struct = NULL;
 	struct task_struct *task_s = NULL;
+	int proc = 0;
 
 	mutex_lock(&driver->diagchar_mutex);
 	for (i = 0; i < driver->num_clients; i++)
@@ -3502,26 +3598,34 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 
 	mutex_lock(&driver->diagchar_mutex);
 
-	if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) &&
-	    (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
-	     driver->logging_mode == DIAG_MULTI_MODE)) {
-		pr_debug("diag: process woken up\n");
-		/*Copy the type of data being passed*/
-		data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
-		driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
-		atomic_dec(&driver->data_ready_notif[index]);
-		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
-		if (ret == -EFAULT)
-			goto exit;
-		/* place holder for number of data field */
-		ret += sizeof(int);
-		mutex_lock(&driver->md_session_lock);
-		session_info = diag_md_session_get_pid(current->tgid);
-		exit_stat = diag_md_copy_to_user(buf, &ret, count,
+	if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
+		for (proc = 0 ; proc < NUM_DIAG_MD_DEV; proc++) {
+			if (driver->logging_mode[proc] ==
+				DIAG_MEMORY_DEVICE_MODE ||
+			 driver->logging_mode[proc] == DIAG_MULTI_MODE) {
+				pr_debug("diag: process woken up\n");
+				/*Copy the type of data being passed*/
+				data_type = driver->data_ready[index] &
+						USER_SPACE_DATA_TYPE;
+				driver->data_ready[index] ^=
+						USER_SPACE_DATA_TYPE;
+				atomic_dec(&driver->data_ready_notif[index]);
+				COPY_USER_SPACE_OR_ERR(buf, data_type,
+							sizeof(int));
+				if (ret == -EFAULT)
+					goto exit;
+				/* place holder for number of data field */
+				ret += sizeof(int);
+				mutex_lock(&driver->md_session_lock);
+				session_info =
+					diag_md_session_get_pid(current->tgid);
+				exit_stat = diag_md_copy_to_user(buf, &ret,
+							count,
 						 session_info);
-		mutex_unlock(&driver->md_session_lock);
-		goto exit;
-	} else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
+				mutex_unlock(&driver->md_session_lock);
+				goto exit;
+			}
+		}
 		/* In case, the thread wakes up and the logging mode is not
 		 * memory device any more, the condition needs to be cleared.
 		 */
@@ -3569,7 +3673,8 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 		/*Copy the type of data being passed*/
 		data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
 		mutex_lock(&driver->md_session_lock);
-		session_info = diag_md_session_get_peripheral(APPS_DATA);
+		session_info = diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
+								APPS_DATA);
 		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
 		if (ret == -EFAULT) {
 			mutex_unlock(&driver->md_session_lock);
@@ -3589,7 +3694,8 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 		/*Copy the type of data being passed*/
 		data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
 		mutex_lock(&driver->md_session_lock);
-		session_info = diag_md_session_get_peripheral(APPS_DATA);
+		session_info = diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
+								APPS_DATA);
 		COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
 		if (ret == -EFAULT) {
 			mutex_unlock(&driver->md_session_lock);
@@ -3623,7 +3729,8 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 		/*Copy the type of data being passed*/
 		data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
 		mutex_lock(&driver->md_session_lock);
-		session_info = diag_md_session_get_peripheral(APPS_DATA);
+		session_info = diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
+								APPS_DATA);
 		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
 		if (ret == -EFAULT) {
 			mutex_unlock(&driver->md_session_lock);
@@ -3809,6 +3916,7 @@ static ssize_t diagchar_write(struct file *file, const char __user *buf,
 	int err = 0;
 	int pkt_type = 0;
 	int payload_len = 0;
+	int token = 0;
 	const char __user *payload_buf = NULL;
 
 	/*
@@ -3828,7 +3936,18 @@ static ssize_t diagchar_write(struct file *file, const char __user *buf,
 		return -EIO;
 	}
 
-	if (driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) {
+	err = copy_from_user(&token, buf+4, sizeof(int));
+	if (err) {
+		pr_err("diag: copy failed for user space data\n");
+		return -EIO;
+	}
+	if (token < 0)
+		token = diag_get_remote(token);
+	else
+		token = 0;
+
+	if (driver->logging_mode[token] == DIAG_USB_MODE &&
+		!driver->usb_connected) {
 		if (!((pkt_type == DCI_DATA_TYPE) ||
 		    (pkt_type == DCI_PKT_TYPE) ||
 		    (pkt_type & DATA_TYPE_DCI_LOG) ||
@@ -3866,7 +3985,8 @@ static ssize_t diagchar_write(struct file *file, const char __user *buf,
 		 * stream. If USB is not connected and we are not in memory
 		 * device mode, we should not process these logs/events.
 		 */
-		if (pkt_type && driver->logging_mode == DIAG_USB_MODE &&
+		if (pkt_type && driver->logging_mode[DIAG_LOCAL_PROC] ==
+			DIAG_USB_MODE &&
 		    !driver->usb_connected)
 			return err;
 	}
@@ -4173,6 +4293,7 @@ static int __init diagchar_init(void)
 {
 	dev_t dev;
 	int ret, i;
+	int proc;
 
 	pr_debug("diagfwd initializing ..\n");
 	ret = 0;
@@ -4203,7 +4324,8 @@ static int __init diagchar_init(void)
 	diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
 			poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
 	driver->num_clients = max_clients;
-	driver->logging_mode = DIAG_USB_MODE;
+	for (proc = 0; proc < NUM_DIAG_MD_DEV; proc++)
+		driver->logging_mode[proc] = DIAG_USB_MODE;
 	for (i = 0; i < NUM_UPD; i++) {
 		driver->pd_logging_mode[i] = 0;
 		driver->pd_session_clear[i] = 0;
@@ -4228,7 +4350,7 @@ static int __init diagchar_init(void)
 	mutex_init(&driver->hdlc_recovery_mutex);
 	for (i = 0; i < NUM_PERIPHERALS; i++) {
 		mutex_init(&driver->diagfwd_channel_mutex[i]);
-		mutex_init(&driver->rpmsginfo_mutex[i]);
+		spin_lock_init(&driver->rpmsginfo_lock[i]);
 		driver->diag_id_sent[i] = 0;
 	}
 	init_waitqueue_head(&driver->wait_q);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 0a03cc6..950d04c 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -176,39 +176,45 @@ void chk_logging_wakeup(void)
 	int i;
 	int j;
 	int pid = 0;
+	int proc;
 
-	for (j = 0; j < NUM_MD_SESSIONS; j++) {
-		if (!driver->md_session_map[j])
-			continue;
-		pid = driver->md_session_map[j]->pid;
+	for (proc = 0; proc < NUM_DIAG_MD_DEV; proc++) {
+		for (j = 0; j < NUM_MD_SESSIONS; j++) {
+			if (!driver->md_session_map[proc][j])
+				continue;
+			pid = driver->md_session_map[proc][j]->pid;
 
-		/* Find the index of the logging process */
-		for (i = 0; i < driver->num_clients; i++) {
-			if (driver->client_map[i].pid != pid)
-				continue;
-			if (driver->data_ready[i] & USER_SPACE_DATA_TYPE)
-				continue;
+			/* Find the index of the logging process */
+			for (i = 0; i < driver->num_clients; i++) {
+				if (driver->client_map[i].pid != pid)
+					continue;
+				if (driver->data_ready[i] &
+					USER_SPACE_DATA_TYPE)
+					continue;
+				/*
+				 * At very high logging rates a race condition
+				 * can occur where the buffers containing the
+				 * data from a channel are all in use, but the
+				 * data_ready flag is cleared. In this case,
+				 * the buffers never have their data
+				 * read/logged. Detect and remedy this
+				 * situation.
+				 */
+				driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+				atomic_inc(&driver->data_ready_notif[i]);
+				pr_debug("diag: Force wakeup of logging process\n");
+				wake_up_interruptible(&driver->wait_q);
+				break;
+			}
 			/*
-			 * At very high logging rates a race condition can
-			 * occur where the buffers containing the data from
-			 * a channel are all in use, but the data_ready flag
-			 * is cleared. In this case, the buffers never have
-			 * their data read/logged. Detect and remedy this
-			 * situation.
+			 * Diag Memory Device is in normal. Check only for the
+			 * first index as all the indices point to the same
+			 * session structure.
 			 */
-			driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
-			atomic_inc(&driver->data_ready_notif[i]);
-			pr_debug("diag: Force wakeup of logging process\n");
-			wake_up_interruptible(&driver->wait_q);
-			break;
+			if ((driver->md_session_mask[proc] == DIAG_CON_ALL) &&
+				(j == 0))
+				break;
 		}
-		/*
-		 * Diag Memory Device is in normal. Check only for the first
-		 * index as all the indices point to the same session
-		 * structure.
-		 */
-		if ((driver->md_session_mask == DIAG_CON_ALL) && (j == 0))
-			break;
 	}
 }
 
@@ -235,16 +241,16 @@ static void pack_rsp_and_send(unsigned char *buf, int len,
 	mutex_lock(&driver->md_session_lock);
 	session_info = diag_md_session_get_pid(pid);
 	info = (session_info) ? session_info :
-				diag_md_session_get_peripheral(APPS_DATA);
+		diag_md_session_get_peripheral(DIAG_LOCAL_PROC, APPS_DATA);
 
 	/*
 	 * Explicitly check for the Peripheral Modem here
 	 * is necessary till a way to identify a peripheral
 	 * if its supporting qshrink4 feature.
 	 */
-	if (info && info->peripheral_mask) {
+	if (info && info->peripheral_mask[DIAG_LOCAL_PROC]) {
 		for (i = 0; i < NUM_MD_SESSIONS; i++) {
-			if (info->peripheral_mask & (1 << i))
+			if (info->peripheral_mask[DIAG_LOCAL_PROC] & (1 << i))
 				break;
 		}
 		rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, TYPE_CMD);
@@ -273,8 +279,10 @@ static void pack_rsp_and_send(unsigned char *buf, int len,
 		 * for responses. Make sure we don't miss previous wakeups for
 		 * draining responses when we are in Memory Device Mode.
 		 */
-		if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
-				driver->logging_mode == DIAG_MULTI_MODE) {
+		if (driver->logging_mode[DIAG_LOCAL_PROC] ==
+				DIAG_MEMORY_DEVICE_MODE ||
+			driver->logging_mode[DIAG_LOCAL_PROC] ==
+				DIAG_MULTI_MODE) {
 			mutex_lock(&driver->md_session_lock);
 			chk_logging_wakeup();
 			mutex_unlock(&driver->md_session_lock);
@@ -328,16 +336,16 @@ static void encode_rsp_and_send(unsigned char *buf, int len,
 	mutex_lock(&driver->md_session_lock);
 	session_info = diag_md_session_get_pid(pid);
 	info = (session_info) ? session_info :
-				diag_md_session_get_peripheral(APPS_DATA);
+		diag_md_session_get_peripheral(DIAG_LOCAL_PROC, APPS_DATA);
 
 	/*
 	 * Explicitly check for the Peripheral Modem here
 	 * is necessary till a way to identify a peripheral
 	 * if its supporting qshrink4 feature.
 	 */
-	if (info && info->peripheral_mask) {
+	if (info && info->peripheral_mask[DIAG_LOCAL_PROC]) {
 		for (i = 0; i < NUM_MD_SESSIONS; i++) {
-			if (info->peripheral_mask & (1 << i))
+			if (info->peripheral_mask[DIAG_LOCAL_PROC] & (1 << i))
 				break;
 		}
 		rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, TYPE_CMD);
@@ -365,8 +373,10 @@ static void encode_rsp_and_send(unsigned char *buf, int len,
 		 * for responses. Make sure we don't miss previous wakeups for
 		 * draining responses when we are in Memory Device Mode.
 		 */
-		if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
-				driver->logging_mode == DIAG_MULTI_MODE) {
+		if (driver->logging_mode[DIAG_LOCAL_PROC] ==
+			DIAG_MEMORY_DEVICE_MODE ||
+			driver->logging_mode[DIAG_LOCAL_PROC] ==
+				DIAG_MULTI_MODE) {
 			mutex_lock(&driver->md_session_lock);
 			chk_logging_wakeup();
 			mutex_unlock(&driver->md_session_lock);
@@ -410,7 +420,8 @@ static void diag_send_rsp(unsigned char *buf, int len,
 	mutex_lock(&driver->md_session_lock);
 	info = diag_md_session_get_pid(pid);
 	session_info = (info) ? info :
-				diag_md_session_get_peripheral(APPS_DATA);
+			diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
+							APPS_DATA);
 	if (session_info)
 		hdlc_disabled = session_info->hdlc_disabled;
 	else
@@ -481,19 +492,16 @@ void diag_update_userspace_clients(unsigned int type)
 	wake_up_interruptible(&driver->wait_q);
 	mutex_unlock(&driver->diagchar_mutex);
 }
-
-void diag_update_md_clients(unsigned int type)
+void diag_update_md_clients_proc(unsigned int proc, unsigned int type)
 {
 	int i, j;
 
-	mutex_lock(&driver->diagchar_mutex);
-	mutex_lock(&driver->md_session_lock);
 	for (i = 0; i < NUM_MD_SESSIONS; i++) {
-		if (driver->md_session_map[i] != NULL)
+		if (driver->md_session_map[proc][i]) {
 			for (j = 0; j < driver->num_clients; j++) {
 				if (driver->client_map[j].pid != 0 &&
 					driver->client_map[j].pid ==
-					driver->md_session_map[i]->pid) {
+					driver->md_session_map[proc][i]->pid) {
 					if (!(driver->data_ready[j] & type)) {
 						driver->data_ready[j] |= type;
 						atomic_inc(
@@ -502,7 +510,19 @@ void diag_update_md_clients(unsigned int type)
 					break;
 				}
 			}
+		}
 	}
+}
+void diag_update_md_clients(unsigned int type)
+{
+	int proc;
+
+	mutex_lock(&driver->diagchar_mutex);
+	mutex_lock(&driver->md_session_lock);
+
+	for (proc = 0; proc < NUM_DIAG_MD_DEV; proc++)
+		diag_update_md_clients_proc(proc, type);
+
 	mutex_unlock(&driver->md_session_lock);
 	wake_up_interruptible(&driver->wait_q);
 	mutex_unlock(&driver->diagchar_mutex);
@@ -1023,6 +1043,7 @@ static void diag_init_apps_feature(void)
 	driver->apps_feature = 0;
 
 	SET_APPS_FEATURE(driver, F_DIAG_EVENT_REPORT);
+	SET_APPS_FEATURE(driver, F_DIAG_HW_ACCELERATION);
 }
 
 void diag_send_error_rsp(unsigned char *buf, int len,
@@ -1225,7 +1246,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len, int pid)
 		mutex_lock(&driver->md_session_lock);
 		info = diag_md_session_get_pid(pid);
 		if (info) {
-			p_mask = info->peripheral_mask;
+			p_mask = info->peripheral_mask[DIAG_LOCAL_PROC];
 			mutex_unlock(&driver->md_session_lock);
 			MD_PERIPHERAL_PD_MASK(TYPE_CMD, reg_item->proc,
 				pd_mask);
@@ -1235,7 +1256,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len, int pid)
 		} else {
 			mutex_unlock(&driver->md_session_lock);
 			if (MD_PERIPHERAL_MASK(reg_item->proc) &
-				driver->logging_mask) {
+				driver->logging_mask[DIAG_LOCAL_PROC]) {
 				mutex_unlock(&driver->cmd_reg_mutex);
 				diag_send_error_rsp(buf, len, pid);
 				return write_len;
@@ -1337,17 +1358,20 @@ int diag_process_apps_pkt(unsigned char *buf, int len, int pid)
 		else
 			driver->hdlc_disabled = 1;
 		peripheral =
-			diag_md_session_match_pid_peripheral(pid, 0);
+			diag_md_session_match_pid_peripheral(DIAG_LOCAL_PROC,
+								pid, 0);
 		for (i = 0; i < NUM_MD_SESSIONS; i++) {
 			if (peripheral > 0 && info) {
 				if (peripheral & (1 << i))
 					driver->p_hdlc_disabled[i] =
 					info->hdlc_disabled;
-				else if (!diag_md_session_get_peripheral(i))
+				else if (!diag_md_session_get_peripheral(
+						DIAG_LOCAL_PROC, i))
 					driver->p_hdlc_disabled[i] =
 					driver->hdlc_disabled;
 			} else {
-				if (!diag_md_session_get_peripheral(i))
+				if (!diag_md_session_get_peripheral(
+						DIAG_LOCAL_PROC, i))
 					driver->p_hdlc_disabled[i] =
 					driver->hdlc_disabled;
 			}
@@ -1497,9 +1521,10 @@ static int diagfwd_mux_close(int id, int mode)
 		return -EINVAL;
 	}
 
-	if ((driver->logging_mode == DIAG_MULTI_MODE &&
-		driver->md_session_mode == DIAG_MD_NONE) ||
-		(driver->md_session_mode == DIAG_MD_PERIPHERAL)) {
+	if ((driver->logging_mode[DIAG_LOCAL_PROC] == DIAG_MULTI_MODE &&
+		driver->md_session_mode[DIAG_LOCAL_PROC] == DIAG_MD_NONE) ||
+		(driver->md_session_mode[DIAG_LOCAL_PROC] ==
+			DIAG_MD_PERIPHERAL)) {
 		/*
 		 * This case indicates that the USB is removed
 		 * but there is a client running in background
@@ -1527,7 +1552,7 @@ static int diagfwd_mux_close(int id, int mode)
 		pr_debug("diag: In %s, re-enabling HDLC encoding\n",
 		       __func__);
 		mutex_lock(&driver->hdlc_disable_mutex);
-		if (driver->md_session_mode == DIAG_MD_NONE) {
+		if (driver->md_session_mode[DIAG_LOCAL_PROC] == DIAG_MD_NONE) {
 			driver->hdlc_disabled = 0;
 			/*
 			 * HDLC encoding is re-enabled when
@@ -1584,7 +1609,8 @@ static void diag_timer_work_fn(struct work_struct *work)
 	driver->hdlc_disabled = 0;
 	mutex_lock(&driver->md_session_lock);
 	for (i = 0; i < NUM_MD_SESSIONS; i++) {
-		session_info = diag_md_session_get_peripheral(i);
+		session_info = diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
+								i);
 		if (!session_info)
 			driver->p_hdlc_disabled[i] =
 			driver->hdlc_disabled;
@@ -1617,7 +1643,8 @@ static void diag_md_timer_work_fn(struct work_struct *work)
 	if (session_info)
 		session_info->hdlc_disabled = 0;
 	peripheral =
-		diag_md_session_match_pid_peripheral(hdlc_work->pid, 0);
+		diag_md_session_match_pid_peripheral(DIAG_LOCAL_PROC,
+							hdlc_work->pid, 0);
 	if (peripheral > 0 && session_info) {
 		for (i = 0; i < NUM_MD_SESSIONS; i++) {
 			if (peripheral & (1 << i))
@@ -1705,18 +1732,23 @@ static void diag_hdlc_start_recovery(unsigned char *buf, int len,
 				driver->hdlc_disabled = 0;
 
 			peripheral =
-				diag_md_session_match_pid_peripheral(pid, 0);
+				diag_md_session_match_pid_peripheral(
+								DIAG_LOCAL_PROC,
+								pid, 0);
 			for (i = 0; i < NUM_MD_SESSIONS; i++) {
 				if (peripheral > 0 && info) {
 					if (peripheral & (1 << i))
 						driver->p_hdlc_disabled[i] =
 						info->hdlc_disabled;
 					else if (
-					!diag_md_session_get_peripheral(i))
+					!diag_md_session_get_peripheral(
+							DIAG_LOCAL_PROC, i))
 						driver->p_hdlc_disabled[i] =
 						driver->hdlc_disabled;
 				} else {
-					if (!diag_md_session_get_peripheral(i))
+					if (
+					!diag_md_session_get_peripheral(
+							DIAG_LOCAL_PROC, i))
 						driver->p_hdlc_disabled[i] =
 						driver->hdlc_disabled;
 				}
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index c4d4306..9632bcc 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -99,7 +99,7 @@ static void diag_stm_update_work_fn(struct work_struct *work)
 
 void diag_notify_md_client(uint8_t peripheral, int data)
 {
-	int stat = 0;
+	int stat = 0, proc = DIAG_LOCAL_PROC;
 	struct siginfo info;
 	struct pid *pid_struct;
 	struct task_struct *result;
@@ -107,7 +107,7 @@ void diag_notify_md_client(uint8_t peripheral, int data)
 	if (peripheral > NUM_PERIPHERALS)
 		return;
 
-	if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE)
+	if (driver->logging_mode[proc] != DIAG_MEMORY_DEVICE_MODE)
 		return;
 
 	mutex_lock(&driver->md_session_lock);
@@ -116,20 +116,20 @@ void diag_notify_md_client(uint8_t peripheral, int data)
 	info.si_int = (PERIPHERAL_MASK(peripheral) | data);
 	info.si_signo = SIGCONT;
 
-	if (!driver->md_session_map[peripheral] ||
-		driver->md_session_map[peripheral]->pid <= 0) {
+	if (!driver->md_session_map[proc][peripheral] ||
+		driver->md_session_map[proc][peripheral]->pid <= 0) {
 		pr_err("diag: md_session_map[%d] is invalid\n", peripheral);
 		mutex_unlock(&driver->md_session_lock);
 		return;
 	}
 
 	pid_struct = find_get_pid(
-			driver->md_session_map[peripheral]->pid);
+		driver->md_session_map[proc][peripheral]->pid);
 	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 		"md_session_map[%d] pid = %d task = %pK\n",
 		peripheral,
-		driver->md_session_map[peripheral]->pid,
-		driver->md_session_map[peripheral]->task);
+		driver->md_session_map[proc][peripheral]->pid,
+		driver->md_session_map[proc][peripheral]->task);
 
 	if (pid_struct) {
 		result = get_pid_task(pid_struct, PIDTYPE_PID);
@@ -138,13 +138,14 @@ void diag_notify_md_client(uint8_t peripheral, int data)
 			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 				"diag: md_session_map[%d] with pid = %d Exited..\n",
 				peripheral,
-				driver->md_session_map[peripheral]->pid);
+				driver->md_session_map[proc][peripheral]->pid);
 			mutex_unlock(&driver->md_session_lock);
 			return;
 		}
 
-		if (driver->md_session_map[peripheral] &&
-			driver->md_session_map[peripheral]->task == result) {
+		if (driver->md_session_map[proc][peripheral] &&
+			driver->md_session_map[proc][peripheral]->task ==
+								result) {
 			stat = send_sig_info(info.si_signo,
 					&info, result);
 			if (stat)
@@ -801,8 +802,8 @@ void process_diagid_v2_feature_mask(uint32_t diag_id,
 		uint32_t pd_feature_mask)
 {
 	int i = 0;
-	uint32_t diagid_mask_bit = 0;
-	uint32_t feature_id_mask = 0;
+	uint32_t diagid_mask_bit = 0, feature_id_mask = 0;
+	uint8_t hw_accel_type = 0, hw_accel_ver = 0;
 
 	if (!pd_feature_mask)
 		return;
@@ -813,6 +814,10 @@ void process_diagid_v2_feature_mask(uint32_t diag_id,
 		if (feature_id_mask)
 			driver->diagid_v2_feature[i] |= diagid_mask_bit;
 		feature_id_mask = 0;
+
+		diag_map_index_to_hw_accel(i, &hw_accel_type, &hw_accel_ver);
+		if (hw_accel_type && hw_accel_ver)
+			driver->diag_hw_accel[i] = 1;
 	}
 	mutex_unlock(&driver->diagid_v2_mutex);
 }
@@ -837,7 +842,7 @@ static void process_diagid(uint8_t *buf, uint32_t len,
 		return;
 
 	diagid_v2_feature_mask =
-		driver->feature[peripheral].diagid_v2_feature_mask;
+		P_FMASK_DIAGID_V2(peripheral);
 
 	if (len < sizeof(struct diag_ctrl_diagid_header)) {
 		pr_err("diag: Invalid control pkt len(%d) from peripheral: %d to parse packet header\n",
@@ -980,6 +985,8 @@ void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
 
 	while (read_len + header_len < len) {
 		ctrl_pkt = (struct diag_ctrl_pkt_header_t *)ptr;
+		if ((read_len + header_len + ctrl_pkt->len) > len)
+			return;
 		switch (ctrl_pkt->pkt_id) {
 		case DIAG_CTRL_MSG_REG:
 			process_command_registration(ptr, ctrl_pkt->len,
@@ -1496,6 +1503,68 @@ int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
 	return err;
 }
 
+void diag_send_hw_accel_status(uint8_t peripheral)
+{
+	struct diag_hw_accel_cmd_req_t req_params;
+	struct diagfwd_info *fwd_info = NULL;
+	struct diag_id_info *diagid_struct = NULL;
+	uint32_t diagid_mask_bit = 0;
+	uint8_t hw_accel_type = 0, hw_accel_ver = 0;
+	int feature = 0, pd = 0;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Send HW Acceleration Enabled status to peripheral: %d\n",
+		peripheral);
+
+	fwd_info = &peripheral_info[TYPE_CNTL][peripheral];
+	if (!fwd_info) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Error return for peripheral: %d\n", peripheral);
+		return;
+	}
+
+	for (feature = 0; feature < DIAGID_V2_FEATURE_COUNT - 1; feature++) {
+		if (!driver->diag_hw_accel[feature])
+			continue;
+		for (pd = 0; pd <= MAX_PERIPHERAL_UPD; pd++) {
+			if (!pd) {
+				diagid_struct = &fwd_info->root_diag_id;
+				diagid_mask_bit =
+					1 << (diagid_struct->diagid_val - 1);
+			}
+			if (!diagid_mask_bit)
+				continue;
+			if (driver->diagid_v2_feature[feature] &
+				driver->diagid_v2_status[feature] &
+				diagid_mask_bit) {
+				diag_map_index_to_hw_accel(feature,
+					&hw_accel_type, &hw_accel_ver);
+				req_params.header.cmd_code =
+					DIAG_CMD_DIAG_SUBSYS;
+				req_params.header.subsys_id =
+					DIAG_SS_DIAG;
+				req_params.header.subsys_cmd_code =
+					DIAG_HW_ACCEL_CMD;
+				req_params.version = 1;
+				req_params.reserved = 0;
+				req_params.operation = DIAG_HW_ACCEL_OP_ENABLE;
+				req_params.op_req.hw_accel_type = hw_accel_type;
+				req_params.op_req.hw_accel_ver = hw_accel_ver;
+				req_params.op_req.diagid_mask = diagid_mask_bit;
+				DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+					"Sending passthru packet for diag_id: %d\n",
+					diagid_struct->diagid_val);
+				if (P_FMASK_DIAGID_V2(peripheral))
+					diag_send_passthru_ctrl_pkt(
+						&req_params);
+			}
+			diagid_struct = &fwd_info->upd_diag_id[pd];
+			diagid_mask_bit = 0;
+			diagid_mask_bit = 1 << (diagid_struct->diagid_val - 1);
+		}
+	}
+}
+
 int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data)
 {
 	struct diag_ctrl_msg_stm stm_msg;
@@ -1746,7 +1815,7 @@ int diag_send_buffering_wm_values(uint8_t peripheral,
 	return err;
 }
 
-int diag_send_passtru_ctrl_pkt(struct diag_hw_accel_cmd_req_t *req_params)
+int diag_send_passthru_ctrl_pkt(struct diag_hw_accel_cmd_req_t *req_params)
 {
 	struct diag_ctrl_passthru ctrl_pkt;
 	int f_index = -1, err = 0;
@@ -1782,10 +1851,17 @@ int diag_send_passtru_ctrl_pkt(struct diag_hw_accel_cmd_req_t *req_params)
 	diagid_mask = req_params->op_req.diagid_mask;
 	diagid_status = (DIAGIDV2_FEATURE(f_index) & diagid_mask);
 
-	if (req_params->operation == DIAG_HW_ACCEL_OP_DISABLE)
+	if (req_params->operation == DIAG_HW_ACCEL_OP_DISABLE) {
 		DIAGIDV2_STATUS(f_index) &= ~diagid_status;
-	else
+	} else {
 		DIAGIDV2_STATUS(f_index) |= diagid_status;
+		for (i = 0; i < DIAGID_V2_FEATURE_COUNT; i++) {
+			if (i == f_index || !driver->diag_hw_accel[i])
+				continue;
+			DIAGIDV2_STATUS(i) &=
+				~(DIAGIDV2_FEATURE(i) & diagid_mask);
+		}
+	}
 
 	req_params->op_req.diagid_mask = DIAGIDV2_STATUS(f_index);
 
@@ -1804,6 +1880,8 @@ int diag_send_passtru_ctrl_pkt(struct diag_hw_accel_cmd_req_t *req_params)
 		sizeof(ctrl_pkt.diagid_mask) + sizeof(ctrl_pkt.hw_accel_type) +
 		sizeof(ctrl_pkt.hw_accel_ver) + sizeof(ctrl_pkt.control_data);
 	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!P_FMASK_DIAGID_V2(i))
+			continue;
 		err = diagfwd_write(i, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
 		if (err && err != -ENODEV) {
 			pr_err("diag: Unable to send PASSTHRU ctrl packet to peripheral %d, err: %d\n",
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index 7dd6285..b714d5e 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -396,6 +396,7 @@ void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
 int diag_send_real_time_update(uint8_t peripheral, int real_time);
 void diag_map_pd_to_diagid(uint8_t pd, uint8_t *diag_id, int *peripheral);
 int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params);
+void diag_send_hw_accel_status(uint8_t peripheral);
 void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index);
 void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index);
 void diag_real_time_work_fn(struct work_struct *work);
@@ -406,5 +407,5 @@ int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
 		    uint8_t diag_id, struct diag_buffering_mode_t *params);
 int diag_send_buffering_wm_values(uint8_t peripheral,
 		    uint8_t diag_id, struct diag_buffering_mode_t *params);
-int diag_send_passtru_ctrl_pkt(struct diag_hw_accel_cmd_req_t *req_params);
+int diag_send_passthru_ctrl_pkt(struct diag_hw_accel_cmd_req_t *req_params);
 #endif
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index 9b2a384..27a7d1f 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -278,9 +278,11 @@ static int __mhi_close(struct diag_mhi_info *mhi_info, int close_flag)
 	cancel_work_sync(&mhi_info->read_done_work);
 	flush_workqueue(mhi_info->mhi_wq);
 
-	if (close_flag == CLOSE_CHANNELS)
+	if (close_flag == CLOSE_CHANNELS) {
+		mutex_lock(&mhi_info->ch_mutex);
 		mhi_unprepare_from_transfer(mhi_info->mhi_dev);
-
+		mutex_unlock(&mhi_info->ch_mutex);
+	}
 	mhi_buf_tbl_clear(mhi_info);
 	diag_remote_dev_close(mhi_info->dev_id);
 	return 0;
@@ -334,7 +336,9 @@ static int __mhi_open(struct diag_mhi_info *mhi_info, int token, int open_flag)
 		if ((atomic_read(&(mhi_info->read_ch.opened))) &&
 			(atomic_read(&(mhi_info->write_ch.opened))))
 			return 0;
+		mutex_lock(&mhi_info->ch_mutex);
 		err = mhi_prepare_for_transfer(mhi_info->mhi_dev);
+		mutex_unlock(&mhi_info->ch_mutex);
 		if (err) {
 			pr_err("diag: In %s, unable to open ch, err: %d\n",
 				__func__, err);
@@ -850,6 +854,7 @@ int diag_mhi_init(void)
 			spin_lock_init(&mhi_info->lock);
 			spin_lock_init(&mhi_info->read_ch.lock);
 			spin_lock_init(&mhi_info->write_ch.lock);
+			mutex_init(&mhi_info->ch_mutex);
 			INIT_LIST_HEAD(&mhi_info->read_ch.buf_tbl);
 			INIT_LIST_HEAD(&mhi_info->write_ch.buf_tbl);
 			atomic_set(&(mhi_info->read_ch.opened), 0);
diff --git a/drivers/char/diag/diagfwd_mhi.h b/drivers/char/diag/diagfwd_mhi.h
index 0fcc6af..21485cc 100644
--- a/drivers/char/diag/diagfwd_mhi.h
+++ b/drivers/char/diag/diagfwd_mhi.h
@@ -71,6 +71,7 @@ struct diag_mhi_info {
 	wait_queue_head_t mhi_wait_q;
 	struct diag_mhi_ch_t read_ch;
 	struct diag_mhi_ch_t write_ch;
+	struct mutex ch_mutex;
 	spinlock_t lock;
 };
 
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index e85acd7..ca0ccf5 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -197,8 +197,10 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
 
 		mutex_lock(&driver->md_session_lock);
 		if (buf->len < max_size) {
-			if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
-				driver->logging_mode == DIAG_MULTI_MODE) {
+			if (driver->logging_mode[DIAG_LOCAL_PROC] ==
+				DIAG_MEMORY_DEVICE_MODE ||
+				driver->logging_mode[DIAG_LOCAL_PROC] ==
+							DIAG_MULTI_MODE) {
 				ch = &diag_md[DIAG_LOCAL_PROC];
 				if (!ch || !ch->md_info_inited) {
 					mutex_unlock(&driver->md_session_lock);
@@ -1227,7 +1229,7 @@ static void __diag_fwd_open(struct diagfwd_info *fwd_info)
 	 * Keeping the buffers busy for Memory Device and Multi Mode.
 	 */
 
-	if (driver->logging_mode != DIAG_USB_MODE) {
+	if (driver->logging_mode[DIAG_LOCAL_PROC] != DIAG_USB_MODE) {
 		if (fwd_info->buf_1) {
 			atomic_set(&fwd_info->buf_1->in_busy, 0);
 			fwd_info->buffer_status[BUF_1_INDEX] = 0;
diff --git a/drivers/char/diag/diagfwd_rpmsg.c b/drivers/char/diag/diagfwd_rpmsg.c
index 15a93fb..6dda72a 100644
--- a/drivers/char/diag/diagfwd_rpmsg.c
+++ b/drivers/char/diag/diagfwd_rpmsg.c
@@ -21,6 +21,8 @@
 #include "diagfwd_rpmsg.h"
 #include "diag_ipc_logging.h"
 
+#define PERI_RPMSG rpmsg_info->peripheral
+
 struct diag_rpmsg_read_work {
 	struct diag_rpmsg_info *rpmsg_info;
 	const void *ptr_read_done;
@@ -389,16 +391,17 @@ static void diag_state_open_rpmsg(void *ctxt)
 static void diag_rpmsg_queue_read(void *ctxt)
 {
 	struct diag_rpmsg_info *rpmsg_info = NULL;
+	unsigned long flags;
 
 	if (!ctxt)
 		return;
 
 	rpmsg_info = (struct diag_rpmsg_info *)ctxt;
-	mutex_lock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 	if (rpmsg_info->hdl && rpmsg_info->wq &&
 		atomic_read(&rpmsg_info->opened))
 		queue_work(rpmsg_info->wq, &(rpmsg_info->read_work));
-	mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 }
 
 static void diag_state_close_rpmsg(void *ctxt)
@@ -432,6 +435,7 @@ static int diag_rpmsg_read(void *ctxt, unsigned char *buf, int buf_len)
 	struct diag_rpmsg_info *rpmsg_info =  NULL;
 	struct diagfwd_info *fwd_info = NULL;
 	int ret_val = 0;
+	unsigned long flags;
 
 	if (!ctxt || !buf || buf_len <= 0)
 		return -EIO;
@@ -442,15 +446,16 @@ static int diag_rpmsg_read(void *ctxt, unsigned char *buf, int buf_len)
 		return -EIO;
 	}
 
-	mutex_lock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 	if (!atomic_read(&rpmsg_info->opened) ||
 		!rpmsg_info->hdl || !rpmsg_info->inited) {
 		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 			"diag:RPMSG channel not opened");
-		mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
+			flags);
 		return -EIO;
 	}
-	mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 
 	fwd_info = rpmsg_info->fwd_ctxt;
 
@@ -474,22 +479,25 @@ static void diag_rpmsg_read_work_fn(struct work_struct *work)
 	struct diag_rpmsg_info *rpmsg_info = container_of(work,
 							struct diag_rpmsg_info,
 							read_work);
+	unsigned long flags;
 
 	if (!rpmsg_info)
 		return;
 
-	mutex_lock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 
 	if (!atomic_read(&rpmsg_info->opened)) {
-		mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
+			flags);
 		return;
 	}
 	if (!rpmsg_info->inited) {
-		mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
+			flags);
 		diag_ws_release();
 		return;
 	}
-	mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 
 	diagfwd_channel_read(rpmsg_info->fwd_ctxt);
 }
@@ -499,6 +507,7 @@ static int  diag_rpmsg_write(void *ctxt, unsigned char *buf, int len)
 	struct diag_rpmsg_info *rpmsg_info = NULL;
 	int err = 0;
 	struct rpmsg_device *rpdev = NULL;
+	unsigned long flags;
 
 	if (!ctxt || !buf)
 		return -EIO;
@@ -510,15 +519,16 @@ static int  diag_rpmsg_write(void *ctxt, unsigned char *buf, int len)
 		return -EINVAL;
 	}
 
-	mutex_lock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 	if (!rpmsg_info->inited || !rpmsg_info->hdl ||
 		!atomic_read(&rpmsg_info->opened)) {
 		pr_err_ratelimited("diag: In %s, rpmsg not inited, rpmsg_info: %pK, buf: %pK, len: %d\n",
 				 __func__, rpmsg_info, buf, len);
-		mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
+			flags);
 		return -ENODEV;
 	}
-	mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 
 	rpdev = (struct rpmsg_device *)rpmsg_info->hdl;
 	err = rpmsg_send(rpdev->ept, buf, len);
@@ -537,15 +547,18 @@ static void diag_rpmsg_late_init_work_fn(struct work_struct *work)
 	struct diag_rpmsg_info *rpmsg_info = container_of(work,
 							struct diag_rpmsg_info,
 							late_init_work);
+	unsigned long flags;
+
 	if (!rpmsg_info)
 		return;
 
-	mutex_lock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 	if (!rpmsg_info->hdl) {
-		mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
+			flags);
 		return;
 	}
-	mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 
 	diagfwd_channel_open(rpmsg_info->fwd_ctxt);
 	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "rpmsg late init p: %d t: %d\n",
@@ -558,15 +571,18 @@ static void diag_rpmsg_open_work_fn(struct work_struct *work)
 	struct diag_rpmsg_info *rpmsg_info = container_of(work,
 							struct diag_rpmsg_info,
 							open_work);
+	unsigned long flags;
+
 	if (!rpmsg_info)
 		return;
 
-	mutex_lock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 	if (!rpmsg_info->inited) {
-		mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
+			flags);
 		return;
 	}
-	mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 
 	if (rpmsg_info->type != TYPE_CNTL) {
 		diagfwd_channel_open(rpmsg_info->fwd_ctxt);
@@ -581,16 +597,19 @@ static void diag_rpmsg_close_work_fn(struct work_struct *work)
 	struct diag_rpmsg_info *rpmsg_info = container_of(work,
 							struct diag_rpmsg_info,
 							close_work);
+	unsigned long flags;
+
 	if (!rpmsg_info)
 		return;
 
-	mutex_lock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 	if (!rpmsg_info->inited || !rpmsg_info->hdl) {
-		mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
+			flags);
 		return;
 	}
 	rpmsg_info->hdl = NULL;
-	mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 	diagfwd_channel_close(rpmsg_info->fwd_ctxt);
 }
 
@@ -703,18 +722,20 @@ static void rpmsg_late_init(struct diag_rpmsg_info *rpmsg_info)
 
 int diag_rpmsg_init_peripheral(uint8_t peripheral)
 {
+	unsigned long flags;
+
 	if (peripheral >= NUM_PERIPHERALS) {
 		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
 			peripheral);
 		return -EINVAL;
 	}
 
-	mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
+	spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
 	rpmsg_late_init(&rpmsg_data[peripheral]);
 	rpmsg_late_init(&rpmsg_dci[peripheral]);
 	rpmsg_late_init(&rpmsg_cmd[peripheral]);
 	rpmsg_late_init(&rpmsg_dci_cmd[peripheral]);
-	mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
+	spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral], flags);
 
 	return 0;
 }
@@ -722,6 +743,7 @@ int diag_rpmsg_init_peripheral(uint8_t peripheral)
 static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info)
 {
 	char wq_name[DIAG_RPMSG_NAME_SZ + 12];
+	unsigned long flags;
 
 	if (!rpmsg_info)
 		return;
@@ -741,6 +763,7 @@ static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info)
 	INIT_WORK(&(rpmsg_info->close_work), diag_rpmsg_close_work_fn);
 	INIT_WORK(&(rpmsg_info->read_work), diag_rpmsg_read_work_fn);
 	INIT_WORK(&(rpmsg_info->late_init_work), diag_rpmsg_late_init_work_fn);
+	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 	rpmsg_info->hdl = NULL;
 	rpmsg_info->fwd_ctxt = NULL;
 	atomic_set(&rpmsg_info->opened, 0);
@@ -749,6 +772,7 @@ static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info)
 		"%s initialized fwd_ctxt: %pK hdl: %pK\n",
 		rpmsg_info->name, rpmsg_info->fwd_ctxt,
 		rpmsg_info->hdl);
+	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 }
 
 void diag_rpmsg_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
@@ -766,28 +790,26 @@ int diag_rpmsg_init(void)
 {
 	uint8_t peripheral;
 	struct diag_rpmsg_info *rpmsg_info = NULL;
+	unsigned long flags;
 
 	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
 		if (peripheral != PERIPHERAL_WDSP)
 			continue;
 		rpmsg_info = &rpmsg_cntl[peripheral];
-		mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
 		__diag_rpmsg_init(rpmsg_info);
-		mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
 		diagfwd_cntl_register(TRANSPORT_RPMSG, rpmsg_info->peripheral,
 					(void *)rpmsg_info, &rpmsg_ops,
 					&(rpmsg_info->fwd_ctxt));
-		mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
+		spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
 		rpmsg_info->inited = 1;
-		mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
+		spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral],
+			flags);
 		diagfwd_channel_open(rpmsg_info->fwd_ctxt);
 		diagfwd_late_open(rpmsg_info->fwd_ctxt);
-		mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
 		__diag_rpmsg_init(&rpmsg_data[peripheral]);
 		__diag_rpmsg_init(&rpmsg_cmd[peripheral]);
 		__diag_rpmsg_init(&rpmsg_dci[peripheral]);
 		__diag_rpmsg_init(&rpmsg_dci_cmd[peripheral]);
-		mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
 	}
 	return 0;
 }
@@ -814,27 +836,31 @@ static void __diag_rpmsg_exit(struct diag_rpmsg_info *rpmsg_info)
 void diag_rpmsg_early_exit(void)
 {
 	int peripheral = 0;
+	unsigned long flags;
 
 	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
 		if (peripheral != PERIPHERAL_WDSP)
 			continue;
-		mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
+		spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
 		__diag_rpmsg_exit(&rpmsg_cntl[peripheral]);
-		mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
+		spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral],
+			flags);
 	}
 }
 
 void diag_rpmsg_exit(void)
 {
 	int peripheral = 0;
+	unsigned long flags;
 
 	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
-		mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
+		spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
 		__diag_rpmsg_exit(&rpmsg_data[peripheral]);
 		__diag_rpmsg_exit(&rpmsg_cmd[peripheral]);
 		__diag_rpmsg_exit(&rpmsg_dci[peripheral]);
 		__diag_rpmsg_exit(&rpmsg_dci_cmd[peripheral]);
-		mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
+		spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral],
+			flags);
 	}
 }
 
@@ -860,6 +886,7 @@ static struct diag_rpmsg_info *diag_get_rpmsg_ptr(char *name)
 static int diag_rpmsg_probe(struct rpmsg_device *rpdev)
 {
 	struct diag_rpmsg_info *rpmsg_info = NULL;
+	unsigned long flags;
 
 	if (!rpdev)
 		return 0;
@@ -869,10 +896,11 @@ static int diag_rpmsg_probe(struct rpmsg_device *rpdev)
 	rpmsg_info = diag_get_rpmsg_ptr(rpdev->id.name);
 	if (rpmsg_info) {
 
-		mutex_lock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+		spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 		rpmsg_info->hdl = rpdev;
 		atomic_set(&rpmsg_info->opened, 1);
-		mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
+			flags);
 
 		dev_set_drvdata(&rpdev->dev, rpmsg_info);
 		diagfwd_channel_read(rpmsg_info->fwd_ctxt);
@@ -885,15 +913,17 @@ static int diag_rpmsg_probe(struct rpmsg_device *rpdev)
 static void diag_rpmsg_remove(struct rpmsg_device *rpdev)
 {
 	struct diag_rpmsg_info *rpmsg_info = NULL;
+	unsigned long flags;
 
 	if (!rpdev)
 		return;
 
 	rpmsg_info = diag_get_rpmsg_ptr(rpdev->id.name);
 	if (rpmsg_info) {
-		mutex_lock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+		spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 		atomic_set(&rpmsg_info->opened, 0);
-		mutex_unlock(&driver->rpmsginfo_mutex[rpmsg_info->peripheral]);
+		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
+			flags);
 		queue_work(rpmsg_info->wq, &rpmsg_info->close_work);
 	}
 }
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index b65ff69..e9b6ac6 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -443,6 +443,7 @@ static int omap_rng_probe(struct platform_device *pdev)
 	priv->rng.read = omap_rng_do_read;
 	priv->rng.init = omap_rng_init;
 	priv->rng.cleanup = omap_rng_cleanup;
+	priv->rng.quality = 900;
 
 	priv->rng.priv = (unsigned long)priv;
 	platform_set_drvdata(pdev, priv);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index d5f7a12..3fb297b5 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -213,6 +213,9 @@ struct ipmi_user {
 
 	/* Does this interface receive IPMI events? */
 	bool gets_events;
+
+	/* Free must run in process context for RCU cleanup. */
+	struct work_struct remove_work;
 };
 
 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
@@ -1078,6 +1081,15 @@ static int intf_err_seq(struct ipmi_smi *intf,
 }
 
 
+static void free_user_work(struct work_struct *work)
+{
+	struct ipmi_user *user = container_of(work, struct ipmi_user,
+					      remove_work);
+
+	cleanup_srcu_struct(&user->release_barrier);
+	kfree(user);
+}
+
 int ipmi_create_user(unsigned int          if_num,
 		     const struct ipmi_user_hndl *handler,
 		     void                  *handler_data,
@@ -1121,6 +1133,8 @@ int ipmi_create_user(unsigned int          if_num,
 	goto out_kfree;
 
  found:
+	INIT_WORK(&new_user->remove_work, free_user_work);
+
 	rv = init_srcu_struct(&new_user->release_barrier);
 	if (rv)
 		goto out_kfree;
@@ -1183,8 +1197,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
 static void free_user(struct kref *ref)
 {
 	struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
-	cleanup_srcu_struct(&user->release_barrier);
-	kfree(user);
+
+	/* SRCU cleanup must happen in task context. */
+	schedule_work(&user->remove_work);
 }
 
 static void _ipmi_destroy_user(struct ipmi_user *user)
diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
index 9ae2405..0c28e87 100644
--- a/drivers/char/ipmi/ipmi_si_hardcode.c
+++ b/drivers/char/ipmi/ipmi_si_hardcode.c
@@ -200,6 +200,8 @@ void __init ipmi_hardcode_init(void)
 	char *str;
 	char *si_type[SI_MAX_PARMS];
 
+	memset(si_type, 0, sizeof(si_type));
+
 	/* Parse out the si_type string into its components. */
 	str = si_type_str;
 	if (*str != '\0') {
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 76c2010..af44db2 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -688,12 +688,16 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
 			/* End of read */
 			len = ssif_info->multi_len;
 			data = ssif_info->data;
-		} else if (blocknum != ssif_info->multi_pos) {
+		} else if (blocknum + 1 != ssif_info->multi_pos) {
 			/*
 			 * Out of sequence block, just abort.  Block
 			 * numbers start at zero for the second block,
 			 * but multi_pos starts at one, so the +1.
 			 */
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				dev_dbg(&ssif_info->client->dev,
+					"Received message out of sequence, expected %u, got %u\n",
+					ssif_info->multi_pos - 1, blocknum);
 			result = -EIO;
 		} else {
 			ssif_inc_stat(ssif_info, received_message_parts);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 38c6d1a..af6e240 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -777,6 +777,7 @@ static struct crng_state **crng_node_pool __read_mostly;
 #endif
 
 static void invalidate_batched_entropy(void);
+static void numa_crng_init(void);
 
 static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
 static int __init parse_trust_cpu(char *arg)
@@ -805,7 +806,9 @@ static void crng_initialize(struct crng_state *crng)
 		}
 		crng->state[i] ^= rv;
 	}
-	if (trust_cpu && arch_init) {
+	if (trust_cpu && arch_init && crng == &primary_crng) {
+		invalidate_batched_entropy();
+		numa_crng_init();
 		crng_init = 2;
 		pr_notice("random: crng done (trusting CPU's manufacturer)\n");
 	}
@@ -2211,8 +2214,8 @@ struct batched_entropy {
 		u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
 	};
 	unsigned int position;
+	spinlock_t batch_lock;
 };
-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
 
 /*
  * Get a random word for internal kernel use only. The quality of the random
@@ -2222,12 +2225,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
  * wait_for_random_bytes() should be called and return 0 at least once
  * at any point prior.
  */
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+	.batch_lock	= __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+};
+
 u64 get_random_u64(void)
 {
 	u64 ret;
-	bool use_lock;
-	unsigned long flags = 0;
+	unsigned long flags;
 	struct batched_entropy *batch;
 	static void *previous;
 
@@ -2242,28 +2247,25 @@ u64 get_random_u64(void)
 
 	warn_unseeded_randomness(&previous);
 
-	use_lock = READ_ONCE(crng_init) < 2;
-	batch = &get_cpu_var(batched_entropy_u64);
-	if (use_lock)
-		read_lock_irqsave(&batched_entropy_reset_lock, flags);
+	batch = raw_cpu_ptr(&batched_entropy_u64);
+	spin_lock_irqsave(&batch->batch_lock, flags);
 	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
 		extract_crng((u8 *)batch->entropy_u64);
 		batch->position = 0;
 	}
 	ret = batch->entropy_u64[batch->position++];
-	if (use_lock)
-		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-	put_cpu_var(batched_entropy_u64);
+	spin_unlock_irqrestore(&batch->batch_lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL(get_random_u64);
 
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
+	.batch_lock	= __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
+};
 u32 get_random_u32(void)
 {
 	u32 ret;
-	bool use_lock;
-	unsigned long flags = 0;
+	unsigned long flags;
 	struct batched_entropy *batch;
 	static void *previous;
 
@@ -2272,18 +2274,14 @@ u32 get_random_u32(void)
 
 	warn_unseeded_randomness(&previous);
 
-	use_lock = READ_ONCE(crng_init) < 2;
-	batch = &get_cpu_var(batched_entropy_u32);
-	if (use_lock)
-		read_lock_irqsave(&batched_entropy_reset_lock, flags);
+	batch = raw_cpu_ptr(&batched_entropy_u32);
+	spin_lock_irqsave(&batch->batch_lock, flags);
 	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
 		extract_crng((u8 *)batch->entropy_u32);
 		batch->position = 0;
 	}
 	ret = batch->entropy_u32[batch->position++];
-	if (use_lock)
-		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-	put_cpu_var(batched_entropy_u32);
+	spin_unlock_irqrestore(&batch->batch_lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL(get_random_u32);
@@ -2297,12 +2295,19 @@ static void invalidate_batched_entropy(void)
 	int cpu;
 	unsigned long flags;
 
-	write_lock_irqsave(&batched_entropy_reset_lock, flags);
 	for_each_possible_cpu (cpu) {
-		per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
-		per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+		struct batched_entropy *batched_entropy;
+
+		batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
+		spin_lock_irqsave(&batched_entropy->batch_lock, flags);
+		batched_entropy->position = 0;
+		spin_unlock(&batched_entropy->batch_lock);
+
+		batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
+		spin_lock(&batched_entropy->batch_lock);
+		batched_entropy->position = 0;
+		spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
 	}
-	write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
 }
 
 /**
diff --git a/drivers/char/rdbg.c b/drivers/char/rdbg.c
new file mode 100644
index 0000000..826260e
--- /dev/null
+++ b/drivers/char/rdbg.c
@@ -0,0 +1,1213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#define SMP2P_NUM_PROCS				16
+#define MAX_RETRIES				20
+
+#define SM_VERSION				1
+#define SM_BLOCKSIZE				128
+
+#define SMQ_MAGIC_INIT				0xFF00FF00
+#define SMQ_MAGIC_PRODUCER			(SMQ_MAGIC_INIT | 0x1)
+#define SMQ_MAGIC_CONSUMER			(SMQ_MAGIC_INIT | 0x2)
+
+#define SMEM_LC_DEBUGGER 470
+
+enum SMQ_STATUS {
+	SMQ_SUCCESS    =  0,
+	SMQ_ENOMEMORY  = -1,
+	SMQ_EBADPARM   = -2,
+	SMQ_UNDERFLOW  = -3,
+	SMQ_OVERFLOW   = -4
+};
+
+enum smq_type {
+	PRODUCER = 1,
+	CONSUMER = 2,
+	INVALID  = 3
+};
+
+struct smq_block_map {
+	uint32_t index_read;
+	uint32_t num_blocks;
+	uint8_t *map;
+};
+
+struct smq_node {
+	uint16_t index_block;
+	uint16_t num_blocks;
+} __attribute__ ((__packed__));
+
+struct smq_hdr {
+	uint8_t producer_version;
+	uint8_t consumer_version;
+} __attribute__ ((__packed__));
+
+struct smq_out_state {
+	uint32_t init;
+	uint32_t index_check_queue_for_reset;
+	uint32_t index_sent_write;
+	uint32_t index_free_read;
+} __attribute__ ((__packed__));
+
+struct smq_out {
+	struct smq_out_state s;
+	struct smq_node sent[1];
+};
+
+struct smq_in_state {
+	uint32_t init;
+	uint32_t index_check_queue_for_reset_ack;
+	uint32_t index_sent_read;
+	uint32_t index_free_write;
+} __attribute__ ((__packed__));
+
+struct smq_in {
+	struct smq_in_state s;
+	struct smq_node free[1];
+};
+
+struct smq {
+	struct smq_hdr *hdr;
+	struct smq_out *out;
+	struct smq_in *in;
+	uint8_t *blocks;
+	uint32_t num_blocks;
+	struct mutex *lock;
+	uint32_t initialized;
+	struct smq_block_map block_map;
+	enum smq_type type;
+};
+
+struct gpio_info {
+	int gpio_base_id;
+	int irq_base_id;
+	unsigned int smem_bit;
+	struct qcom_smem_state *smem_state;
+};
+
+struct rdbg_data {
+	struct device *device;
+	struct completion work;
+	struct gpio_info in;
+	struct gpio_info out;
+	bool   device_initialized;
+	int    gpio_out_offset;
+	bool   device_opened;
+	void   *smem_addr;
+	size_t smem_size;
+	struct smq    producer_smrb;
+	struct smq    consumer_smrb;
+	struct mutex  write_mutex;
+};
+
+struct rdbg_device {
+	struct cdev cdev;
+	struct class *class;
+	dev_t dev_no;
+	int num_devices;
+	struct rdbg_data *rdbg_data;
+};
+
+
+int registers[32] = {0};
+static struct rdbg_device g_rdbg_instance = {
+	{ {0} },
+	NULL,
+	0,
+	SMP2P_NUM_PROCS,
+	NULL
+};
+
+struct processor_specific_info {
+	char *name;
+	unsigned int smem_buffer_addr;
+	size_t smem_buffer_size;
+};
+
+static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = {
+		{0},	/*APPS*/
+		{"rdbg_modem", 0, 0},	/*MODEM*/
+		{"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024},	/*ADSP*/
+		{0},	/*SMP2P_RESERVED_PROC_1*/
+		{"rdbg_wcnss", 0, 0},		/*WCNSS*/
+		{"rdbg_cdsp", SMEM_LC_DEBUGGER, 16*1024},		/*CDSP*/
+		{NULL},	/*SMP2P_POWER_PROC*/
+		{NULL},	/*SMP2P_TZ_PROC*/
+		{NULL},	/*EMPTY*/
+		{NULL},	/*EMPTY*/
+		{NULL},	/*EMPTY*/
+		{NULL},	/*EMPTY*/
+		{NULL},	/*EMPTY*/
+		{NULL},	/*EMPTY*/
+		{NULL},	/*EMPTY*/
+		{NULL}	/*SMP2P_REMOTE_MOCK_PROC*/
+};
+
+static int smq_blockmap_get(struct smq_block_map *block_map,
+	uint32_t *block_index, uint32_t n)
+{
+	uint32_t start;
+	uint32_t mark = 0;
+	uint32_t found = 0;
+	uint32_t i = 0;
+
+	start = block_map->index_read;
+
+	if (n == 1) {
+		do {
+			if (!block_map->map[block_map->index_read]) {
+				*block_index = block_map->index_read;
+				block_map->map[block_map->index_read] = 1;
+				block_map->index_read++;
+				block_map->index_read %= block_map->num_blocks;
+				return SMQ_SUCCESS;
+			}
+			block_map->index_read++;
+		} while (start != (block_map->index_read %=
+			block_map->num_blocks));
+	} else {
+		mark = block_map->num_blocks;
+
+		do {
+			if (!block_map->map[block_map->index_read]) {
+				if (mark > block_map->index_read) {
+					mark = block_map->index_read;
+					start = block_map->index_read;
+					found = 0;
+				}
+
+				found++;
+				if (found == n) {
+					*block_index = mark;
+					for (i = 0; i < n; i++)
+						block_map->map[mark + i] =
+							(uint8_t)(n - i);
+					block_map->index_read += block_map->map
+						[block_map->index_read] - 1;
+					return SMQ_SUCCESS;
+				}
+			} else {
+				found = 0;
+				block_map->index_read += block_map->map
+					[block_map->index_read] - 1;
+				mark = block_map->num_blocks;
+			}
+			block_map->index_read++;
+		} while (start != (block_map->index_read %=
+			block_map->num_blocks));
+	}
+
+	return SMQ_ENOMEMORY;
+}
+
+static void smq_blockmap_put(struct smq_block_map *block_map, uint32_t i)
+{
+	uint32_t num_blocks = block_map->map[i];
+
+	while (num_blocks--) {
+		block_map->map[i] = 0;
+		i++;
+	}
+}
+
+static int smq_blockmap_reset(struct smq_block_map *block_map)
+{
+	if (!block_map->map)
+		return SMQ_ENOMEMORY;
+	memset(block_map->map, 0, block_map->num_blocks + 1);
+	block_map->index_read = 0;
+
+	return SMQ_SUCCESS;
+}
+
+static int smq_blockmap_ctor(struct smq_block_map *block_map,
+	uint32_t num_blocks)
+{
+	if (num_blocks <= 1)
+		return SMQ_ENOMEMORY;
+
+	block_map->map = kcalloc(num_blocks, sizeof(uint8_t), GFP_KERNEL);
+	if (!block_map->map)
+		return SMQ_ENOMEMORY;
+
+	block_map->num_blocks = num_blocks - 1;
+	smq_blockmap_reset(block_map);
+
+	return SMQ_SUCCESS;
+}
+
+static void smq_blockmap_dtor(struct smq_block_map *block_map)
+{
+	kfree(block_map->map);
+	block_map->map = NULL;
+}
+
+static int smq_free(struct smq *smq, void *data)
+{
+	struct smq_node node;
+	uint32_t index_block;
+	int err = SMQ_SUCCESS;
+
+	if (smq->lock)
+		mutex_lock(smq->lock);
+
+	if ((smq->hdr->producer_version != SM_VERSION) &&
+		(smq->out->s.init != SMQ_MAGIC_PRODUCER)) {
+		err = SMQ_UNDERFLOW;
+		goto bail;
+	}
+
+	index_block = ((uint8_t *)data - smq->blocks) / SM_BLOCKSIZE;
+	if (index_block >= smq->num_blocks) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	node.index_block = (uint16_t)index_block;
+	node.num_blocks = 0;
+	*((struct smq_node *)(smq->in->free +
+		smq->in->s.index_free_write)) = node;
+
+	smq->in->s.index_free_write = (smq->in->s.index_free_write + 1)
+		% smq->num_blocks;
+
+bail:
+	if (smq->lock)
+		mutex_unlock(smq->lock);
+	return err;
+}
+
+static int smq_receive(struct smq *smq, void **pp, int *pnsize, int *pbmore)
+{
+	struct smq_node *node;
+	int err = SMQ_SUCCESS;
+	int more = 0;
+
+	if ((smq->hdr->producer_version != SM_VERSION) &&
+		(smq->out->s.init != SMQ_MAGIC_PRODUCER))
+		return SMQ_UNDERFLOW;
+
+	if (smq->in->s.index_sent_read == smq->out->s.index_sent_write) {
+		err = SMQ_UNDERFLOW;
+		goto bail;
+	}
+
+	node = (struct smq_node *)(smq->out->sent + smq->in->s.index_sent_read);
+	if (node->index_block >= smq->num_blocks) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	smq->in->s.index_sent_read = (smq->in->s.index_sent_read + 1)
+		% smq->num_blocks;
+
+	*pp = smq->blocks + (node->index_block * SM_BLOCKSIZE);
+	*pnsize = SM_BLOCKSIZE * node->num_blocks;
+
+	/*
+	 * Ensure that the reads and writes are updated in the memory
+	 * when they are done and not cached. Also, ensure that the reads
+	 * and writes are not reordered as they are shared between two cores.
+	 */
+	rmb();
+	if (smq->in->s.index_sent_read != smq->out->s.index_sent_write)
+		more = 1;
+
+bail:
+	*pbmore = more;
+	return err;
+}
+
+static int smq_alloc_send(struct smq *smq, const uint8_t *pcb, int nsize)
+{
+	void *pv = 0;
+	int num_blocks;
+	uint32_t index_block = 0;
+	int err = SMQ_SUCCESS;
+	struct smq_node *node = NULL;
+
+	mutex_lock(smq->lock);
+
+	if ((smq->in->s.init == SMQ_MAGIC_CONSUMER) &&
+	 (smq->hdr->consumer_version == SM_VERSION)) {
+		if (smq->out->s.index_check_queue_for_reset ==
+			smq->in->s.index_check_queue_for_reset_ack) {
+			while (smq->out->s.index_free_read !=
+				smq->in->s.index_free_write) {
+				node = (struct smq_node *)(
+					smq->in->free +
+					smq->out->s.index_free_read);
+				if (node->index_block >= smq->num_blocks) {
+					err = SMQ_EBADPARM;
+					goto bail;
+				}
+
+				smq->out->s.index_free_read =
+					(smq->out->s.index_free_read + 1)
+						% smq->num_blocks;
+
+				smq_blockmap_put(&smq->block_map,
+					node->index_block);
+				/*
+				 * Ensure that the reads and writes are
+				 * updated in the memory when they are done
+				 * and not cached. Also, ensure that the reads
+				 * and writes are not reordered as they are
+				 * shared between two cores.
+				 */
+				rmb();
+			}
+		}
+	}
+
+	num_blocks = ALIGN(nsize, SM_BLOCKSIZE)/SM_BLOCKSIZE;
+	err = smq_blockmap_get(&smq->block_map, &index_block, num_blocks);
+	if (err != SMQ_SUCCESS)
+		goto bail;
+
+	pv = smq->blocks + (SM_BLOCKSIZE * index_block);
+
+	err = copy_from_user((void *)pv, (void *)pcb, nsize);
+	if (err != 0)
+		goto bail;
+
+	((struct smq_node *)(smq->out->sent +
+		smq->out->s.index_sent_write))->index_block
+			= (uint16_t)index_block;
+	((struct smq_node *)(smq->out->sent +
+		smq->out->s.index_sent_write))->num_blocks
+			= (uint16_t)num_blocks;
+
+	smq->out->s.index_sent_write = (smq->out->s.index_sent_write + 1)
+		% smq->num_blocks;
+
+bail:
+	if (err != SMQ_SUCCESS) {
+		if (pv)
+			smq_blockmap_put(&smq->block_map, index_block);
+	}
+	mutex_unlock(smq->lock);
+	return err;
+}
+
+static int smq_reset_producer_queue_internal(struct smq *smq,
+	uint32_t reset_num)
+{
+	int retval = 0;
+	uint32_t i;
+
+	if (smq->type != PRODUCER)
+		goto bail;
+
+	mutex_lock(smq->lock);
+	if (smq->out->s.index_check_queue_for_reset != reset_num) {
+		smq->out->s.index_check_queue_for_reset = reset_num;
+		for (i = 0; i < smq->num_blocks; i++)
+			(smq->out->sent + i)->index_block = 0xFFFF;
+
+		smq_blockmap_reset(&smq->block_map);
+		smq->out->s.index_sent_write = 0;
+		smq->out->s.index_free_read = 0;
+		retval = 1;
+	}
+	mutex_unlock(smq->lock);
+
+bail:
+	return retval;
+}
+
+static int smq_check_queue_reset(struct smq *p_cons, struct smq *p_prod)
+{
+	int retval = 0;
+	uint32_t reset_num, i;
+
+	if ((p_cons->type != CONSUMER) ||
+		(p_cons->out->s.init != SMQ_MAGIC_PRODUCER) ||
+		(p_cons->hdr->producer_version != SM_VERSION))
+		goto bail;
+
+	reset_num = p_cons->out->s.index_check_queue_for_reset;
+	if (p_cons->in->s.index_check_queue_for_reset_ack != reset_num) {
+		p_cons->in->s.index_check_queue_for_reset_ack = reset_num;
+		for (i = 0; i < p_cons->num_blocks; i++)
+			(p_cons->in->free + i)->index_block = 0xFFFF;
+
+		p_cons->in->s.index_sent_read = 0;
+		p_cons->in->s.index_free_write = 0;
+
+		retval = smq_reset_producer_queue_internal(p_prod, reset_num);
+	}
+
+bail:
+	return retval;
+}
+
+static int check_subsystem_debug_enabled(void *base_addr, int size)
+{
+	int num_blocks;
+	uint8_t *pb_orig;
+	uint8_t *pb;
+	struct smq smq;
+	int err = 0;
+
+	pb = pb_orig = (uint8_t *)base_addr;
+	pb += sizeof(struct smq_hdr);
+	pb = PTR_ALIGN(pb, 8);
+	size -= pb - (uint8_t *)pb_orig;
+	num_blocks = (int)((size - sizeof(struct smq_out_state) -
+		sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
+		sizeof(struct smq_node) * 2));
+	if (num_blocks <= 0) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	pb += num_blocks * SM_BLOCKSIZE;
+	smq.out = (struct smq_out *)pb;
+	pb += sizeof(struct smq_out_state) + (num_blocks *
+		sizeof(struct smq_node));
+	smq.in = (struct smq_in *)pb;
+
+	if (smq.in->s.init != SMQ_MAGIC_CONSUMER) {
+		pr_err("%s, smq in consumer not initialized\n", __func__);
+		err = -ECOMM;
+	}
+
+bail:
+	return err;
+}
+
+static void smq_dtor(struct smq *smq)
+{
+	if (smq->initialized == SMQ_MAGIC_INIT) {
+		switch (smq->type) {
+		case PRODUCER:
+			smq->out->s.init = 0;
+			smq_blockmap_dtor(&smq->block_map);
+			break;
+		case CONSUMER:
+			smq->in->s.init = 0;
+			break;
+		default:
+		case INVALID:
+			break;
+		}
+
+		smq->initialized = 0;
+	}
+}
+
+/*
+ * The shared memory is used as a circular ring buffer in each direction.
+ * Thus we have a bi-directional shared memory channel between the AP
+ * and a subsystem. We call this SMQ. Each memory channel contains a header,
+ * data and a control mechanism that is used to synchronize read and write
+ * of data between the AP and the remote subsystem.
+ *
+ * Overall SMQ memory view:
+ *
+ *    +------------------------------------------------+
+ *    | SMEM buffer                                    |
+ *    |-----------------------+------------------------|
+ *    |Producer: LA           | Producer: Remote       |
+ *    |Consumer: Remote       |           subsystem    |
+ *    |          subsystem    | Consumer: LA           |
+ *    |                       |                        |
+ *    |               Producer|                Consumer|
+ *    +-----------------------+------------------------+
+ *    |                       |
+ *    |                       |
+ *    |                       +--------------------------------------+
+ *    |                                                              |
+ *    |                                                              |
+ *    v                                                              v
+ *    +--------------------------------------------------------------+
+ *    |   Header  |       Data      |            Control             |
+ *    +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+ *    |           | b | b | b |     | S  |n |n |     | S |n |n |     |
+ *    |  Producer | l | l | l |     | M  |o |o |     | M |o |o |     |
+ *    |    Ver    | o | o | o |     | Q  |d |d |     | Q |d |d |     |
+ *    |-----------| c | c | c | ... |    |e |e | ... |   |e |e | ... |
+ *    |           | k | k | k |     | O  |  |  |     | I |  |  |     |
+ *    |  Consumer |   |   |   |     | u  |0 |1 |     | n |0 |1 |     |
+ *    |    Ver    | 0 | 1 | 2 |     | t  |  |  |     |   |  |  |     |
+ *    +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+ *                                       |           |
+ *                                       +           |
+ *                                                   |
+ *                          +------------------------+
+ *                          |
+ *                          v
+ *                        +----+----+----+----+
+ *                        | SMQ Nodes         |
+ *                        |----|----|----|----|
+ *                 Node # |  0 |  1 |  2 | ...|
+ *                        |----|----|----|----|
+ * Starting Block Index # |  0 |  3 |  8 | ...|
+ *                        |----|----|----|----|
+ *            # of blocks |  3 |  5 |  1 | ...|
+ *                        +----+----+----+----+
+ *
+ * Header: Contains version numbers for software compatibility to ensure
+ * that both producers and consumers on the AP and subsystems know how to
+ * read from and write to the queue.
+ * Both the producer and consumer versions are 1.
+ *     +---------+-------------------+
+ *     | Size    | Field             |
+ *     +---------+-------------------+
+ *     | 1 byte  | Producer Version  |
+ *     +---------+-------------------+
+ *     | 1 byte  | Consumer Version  |
+ *     +---------+-------------------+
+ *
+ * Data: The data portion contains multiple blocks [0..N] of a fixed size.
+ * The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1.
+ * Payload sent from the debug agent app is split (if necessary) and placed
+ * in these blocks. The first data block is placed at the next 8 byte aligned
+ * address after the header.
+ *
+ * The number of blocks for a given SMEM allocation is derived as follows:
+ *   Number of Blocks = ((Total Size - Alignment - Size of Header
+ *		- Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE))
+ *
+ * The producer maintains a private block map of each of these blocks to
+ * determine which of these blocks in the queue is available and which are free.
+ *
+ * Control:
+ * The control portion contains a list of nodes [0..N] where N is number
+ * of available data blocks. Each node identifies the data
+ * block indexes that contain a particular debug message to be transferred,
+ * and the number of blocks it took to hold the contents of the message.
+ *
+ * Each node has the following structure:
+ *     +---------+-------------------+
+ *     | Size    | Field             |
+ *     +---------+-------------------+
+ *     | 2 bytes |Staring Block Index|
+ *     +---------+-------------------+
+ *     | 2 bytes |Number of Blocks   |
+ *     +---------+-------------------+
+ *
+ * The producer and the consumer update different parts of the control channel
+ * (SMQOut / SMQIn) respectively. Each of these control data structures contains
+ * information about the last node that was written / read, and the actual nodes
+ * that were written/read.
+ *
+ * SMQOut Structure (R/W by producer, R by consumer):
+ *     +---------+-------------------+
+ *     | Size    | Field             |
+ *     +---------+-------------------+
+ *     | 4 bytes | Magic Init Number |
+ *     +---------+-------------------+
+ *     | 4 bytes | Reset             |
+ *     +---------+-------------------+
+ *     | 4 bytes | Last Sent Index   |
+ *     +---------+-------------------+
+ *     | 4 bytes | Index Free Read   |
+ *     +---------+-------------------+
+ *
+ * SMQIn Structure (R/W by consumer, R by producer):
+ *     +---------+-------------------+
+ *     | Size    | Field             |
+ *     +---------+-------------------+
+ *     | 4 bytes | Magic Init Number |
+ *     +---------+-------------------+
+ *     | 4 bytes | Reset ACK         |
+ *     +---------+-------------------+
+ *     | 4 bytes | Last Read Index   |
+ *     +---------+-------------------+
+ *     | 4 bytes | Index Free Write  |
+ *     +---------+-------------------+
+ *
+ * Magic Init Number:
+ * Both SMQ Out and SMQ In initialize this field with a predefined magic
+ * number so as to make sure that both the consumer and producer blocks
+ * have fully initialized and have valid data in the shared memory control area.
+ *	Producer Magic #: 0xFF00FF01
+ *	Consumer Magic #: 0xFF00FF02
+ */
+static int smq_ctor(struct smq *smq, void *base_addr, int size,
+	enum smq_type type, struct mutex *lock_ptr)
+{
+	int num_blocks;
+	uint8_t *pb_orig;
+	uint8_t *pb;
+	uint32_t i;
+	int err;
+
+	if (smq->initialized == SMQ_MAGIC_INIT) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	if (!base_addr || !size) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	if (type == PRODUCER)
+		smq->lock = lock_ptr;
+
+	pb_orig = (uint8_t *)base_addr;
+	smq->hdr = (struct smq_hdr *)pb_orig;
+	pb = pb_orig;
+	pb += sizeof(struct smq_hdr);
+	pb = PTR_ALIGN(pb, 8);
+	size -= pb - (uint8_t *)pb_orig;
+	num_blocks = (int)((size - sizeof(struct smq_out_state) -
+		sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
+		sizeof(struct smq_node) * 2));
+	if (num_blocks <= 0) {
+		err = SMQ_ENOMEMORY;
+		goto bail;
+	}
+
+	smq->blocks = pb;
+	smq->num_blocks = num_blocks;
+	pb += num_blocks * SM_BLOCKSIZE;
+	smq->out = (struct smq_out *)pb;
+	pb += sizeof(struct smq_out_state) + (num_blocks *
+		sizeof(struct smq_node));
+	smq->in = (struct smq_in *)pb;
+	smq->type = type;
+	if (type == PRODUCER) {
+		smq->hdr->producer_version = SM_VERSION;
+		for (i = 0; i < smq->num_blocks; i++)
+			(smq->out->sent + i)->index_block = 0xFFFF;
+
+		err = smq_blockmap_ctor(&smq->block_map, smq->num_blocks);
+		if (err != SMQ_SUCCESS)
+			goto bail;
+
+		smq->out->s.index_sent_write = 0;
+		smq->out->s.index_free_read = 0;
+		if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
+			smq->out->s.index_check_queue_for_reset += 1;
+		} else {
+			smq->out->s.index_check_queue_for_reset = 1;
+			smq->out->s.init = SMQ_MAGIC_PRODUCER;
+		}
+	} else {
+		smq->hdr->consumer_version = SM_VERSION;
+		for (i = 0; i < smq->num_blocks; i++)
+			(smq->in->free + i)->index_block = 0xFFFF;
+
+		smq->in->s.index_sent_read = 0;
+		smq->in->s.index_free_write = 0;
+		if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
+			smq->in->s.index_check_queue_for_reset_ack =
+				smq->out->s.index_check_queue_for_reset;
+		} else {
+			smq->in->s.index_check_queue_for_reset_ack = 0;
+		}
+
+		smq->in->s.init = SMQ_MAGIC_CONSUMER;
+	}
+	smq->initialized = SMQ_MAGIC_INIT;
+	err = SMQ_SUCCESS;
+
+bail:
+	return err;
+}
+
+static void send_interrupt_to_subsystem(struct rdbg_data *rdbgdata)
+{
+	unsigned int offset = rdbgdata->gpio_out_offset;
+	unsigned int val;
+
+	val = (registers[offset]) ^ (BIT(rdbgdata->out.smem_bit+offset));
+	qcom_smem_state_update_bits(rdbgdata->out.smem_state,
+				BIT(rdbgdata->out.smem_bit+offset), val);
+	registers[offset] = val;
+	rdbgdata->gpio_out_offset = (offset + 1) % 32;
+}
+
+static irqreturn_t on_interrupt_from(int irq, void *ptr)
+{
+	struct rdbg_data *rdbgdata = (struct rdbg_data *) ptr;
+
+	dev_dbg(rdbgdata->device, "%s: Received interrupt %d from subsystem\n",
+		__func__, irq);
+	complete(&(rdbgdata->work));
+	return IRQ_HANDLED;
+}
+
+static int initialize_smq(struct rdbg_data *rdbgdata)
+{
+	int err = 0;
+	unsigned char *smem_consumer_buffer = rdbgdata->smem_addr;
+
+	smem_consumer_buffer += (rdbgdata->smem_size/2);
+
+	if (smq_ctor(&(rdbgdata->producer_smrb), (void *)(rdbgdata->smem_addr),
+		((rdbgdata->smem_size)/2), PRODUCER, &rdbgdata->write_mutex)) {
+		dev_err(rdbgdata->device, "%s: smq producer allocation failed\n",
+			__func__);
+		err = -ENOMEM;
+		goto bail;
+	}
+
+	if (smq_ctor(&(rdbgdata->consumer_smrb), (void *)smem_consumer_buffer,
+		((rdbgdata->smem_size)/2), CONSUMER, NULL)) {
+		dev_err(rdbgdata->device, "%s: smq consumer allocation failed\n",
+			__func__);
+		err = -ENOMEM;
+	}
+
+bail:
+	return err;
+
+}
+
+static int rdbg_open(struct inode *inode, struct file *filp)
+{
+	int device_id = -1;
+	struct rdbg_device *device = &g_rdbg_instance;
+	struct rdbg_data *rdbgdata = NULL;
+	int err = 0;
+
+	if (!inode || !device->rdbg_data) {
+		pr_err("Memory not allocated yet\n");
+		err = -ENODEV;
+		goto bail;
+	}
+
+	device_id = MINOR(inode->i_rdev);
+	rdbgdata = &device->rdbg_data[device_id];
+
+	if (rdbgdata->device_opened) {
+		dev_err(rdbgdata->device, "%s: Device already opened\n",
+			__func__);
+		err = -EEXIST;
+		goto bail;
+	}
+
+	rdbgdata->smem_size = proc_info[device_id].smem_buffer_size;
+	if (!rdbgdata->smem_size) {
+		dev_err(rdbgdata->device, "%s: smem not initialized\n",
+			 __func__);
+		err = -ENOMEM;
+		goto bail;
+	}
+
+	rdbgdata->smem_addr = qcom_smem_get(QCOM_SMEM_HOST_ANY,
+			      proc_info[device_id].smem_buffer_addr,
+			      &(rdbgdata->smem_size));
+	if (!rdbgdata->smem_addr) {
+		dev_err(rdbgdata->device, "%s: Could not allocate smem memory\n",
+			__func__);
+		err = -ENOMEM;
+		pr_err("rdbg:Could not allocate smem memory\n");
+		goto bail;
+	}
+	dev_dbg(rdbgdata->device, "%s: SMEM address=0x%lx smem_size=%d\n",
+		__func__, (unsigned long)rdbgdata->smem_addr,
+		(unsigned int)rdbgdata->smem_size);
+
+	if (check_subsystem_debug_enabled(rdbgdata->smem_addr,
+		rdbgdata->smem_size/2)) {
+		dev_err(rdbgdata->device, "%s: Subsystem %s is not debug enabled\n",
+			__func__, proc_info[device_id].name);
+		pr_err("rdbg:Sub system debug is not enabled\n");
+		err = -ECOMM;
+		goto bail;
+	}
+
+	init_completion(&rdbgdata->work);
+
+	err = request_threaded_irq(rdbgdata->in.irq_base_id, NULL,
+	      on_interrupt_from,
+	      IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+	      proc_info[device_id].name, (void *)&device->rdbg_data[device_id]);
+	if (err) {
+		dev_err(rdbgdata->device,
+			"%s: Failed to register interrupt.Err=%d,irqid=%d.\n",
+			__func__, err, rdbgdata->in.irq_base_id);
+		pr_err("rdbg : Failed to register interrupt %d\n", err);
+		goto bail;
+	}
+
+	mutex_init(&rdbgdata->write_mutex);
+
+	err = initialize_smq(rdbgdata);
+	if (err) {
+		dev_err(rdbgdata->device, "Error initializing smq. Err=%d\n",
+			err);
+		pr_err("rdbg: initialize_smq() failed with err %d\n", err);
+		goto smq_bail;
+	}
+
+	rdbgdata->device_opened = true;
+
+	filp->private_data = (void *)rdbgdata;
+	return 0;
+
+smq_bail:
+	smq_dtor(&(rdbgdata->producer_smrb));
+	smq_dtor(&(rdbgdata->consumer_smrb));
+	mutex_destroy(&rdbgdata->write_mutex);
+bail:
+	return err;
+}
+
+static int rdbg_release(struct inode *inode, struct file *filp)
+{
+	int device_id = -1;
+	struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+	struct rdbg_data *rdbgdata = NULL;
+	int err = 0;
+
+	if (!inode || !rdbgdevice->rdbg_data) {
+		pr_err("Memory not allocated yet\n");
+		err = -ENODEV;
+		goto bail;
+	}
+
+	device_id = MINOR(inode->i_rdev);
+	rdbgdata = &rdbgdevice->rdbg_data[device_id];
+
+	if (rdbgdata->device_opened) {
+		dev_dbg(rdbgdata->device, "%s: Destroying %s.\n", __func__,
+			proc_info[device_id].name);
+		rdbgdata->device_opened = false;
+		complete(&(rdbgdata->work));
+		if (rdbgdevice->rdbg_data[device_id].producer_smrb.initialized)
+			smq_dtor(&(
+			rdbgdevice->rdbg_data[device_id].producer_smrb));
+		if (rdbgdevice->rdbg_data[device_id].consumer_smrb.initialized)
+			smq_dtor(&(
+			rdbgdevice->rdbg_data[device_id].consumer_smrb));
+		mutex_destroy(&rdbgdata->write_mutex);
+	}
+
+	filp->private_data = NULL;
+
+bail:
+	return err;
+}
+
+static ssize_t rdbg_read(struct file *filp, char __user *buf, size_t size,
+	loff_t *offset)
+{
+	int err = 0;
+	struct rdbg_data *rdbgdata = filp->private_data;
+	void *p_sent_buffer = NULL;
+	int nsize = 0;
+	int more = 0;
+
+	if (!rdbgdata) {
+		pr_err("Invalid argument\n");
+		err = -EINVAL;
+		goto bail;
+	}
+
+	dev_dbg(rdbgdata->device, "%s: In receive\n", __func__);
+	err = wait_for_completion_interruptible(&(rdbgdata->work));
+	if (err) {
+		dev_err(rdbgdata->device, "%s: Error in wait\n", __func__);
+		goto bail;
+	}
+
+	smq_check_queue_reset(&(rdbgdata->consumer_smrb),
+		&(rdbgdata->producer_smrb));
+	if (smq_receive(&(rdbgdata->consumer_smrb), &p_sent_buffer,
+			&nsize, &more) != SMQ_SUCCESS) {
+		dev_err(rdbgdata->device, "%s: Error in smq_recv(). Err code = %d\n",
+			__func__, err);
+		err = -ENODATA;
+		goto bail;
+	}
+	size = ((size < nsize) ? size : nsize);
+	err = copy_to_user(buf, p_sent_buffer, size);
+	if (err != 0) {
+		dev_err(rdbgdata->device, "%s: Error in copy_to_user(). Err code = %d\n",
+			__func__, err);
+		err = -ENODATA;
+		goto bail;
+	}
+	smq_free(&(rdbgdata->consumer_smrb), p_sent_buffer);
+	err = size;
+	dev_dbg(rdbgdata->device, "%s: Read data to buffer with address 0x%lx\n",
+		__func__, (unsigned long) buf);
+
+bail:
+	return err;
+}
+
+static ssize_t rdbg_write(struct file *filp, const char __user *buf,
+	size_t size, loff_t *offset)
+{
+	int err = 0;
+	int num_retries = 0;
+	struct rdbg_data *rdbgdata = filp->private_data;
+
+	if (!rdbgdata) {
+		pr_err("Invalid argument\n");
+		err = -EINVAL;
+		goto bail;
+	}
+
+	do {
+		err = smq_alloc_send(&(rdbgdata->producer_smrb), buf, size);
+		dev_dbg(rdbgdata->device, "%s, smq_alloc_send returned %d.\n",
+			__func__, err);
+	} while (err != 0 && num_retries++ < MAX_RETRIES);
+
+	if (err != 0) {
+		pr_err("rdbg: send_interrupt_to_subsystem failed\n");
+		err = -ECOMM;
+		goto bail;
+	}
+
+	send_interrupt_to_subsystem(rdbgdata);
+
+	err = size;
+
+bail:
+	return err;
+}
+
+static const struct file_operations rdbg_fops = {
+	.open = rdbg_open,
+	.read =  rdbg_read,
+	.write =  rdbg_write,
+	.release = rdbg_release,
+};
+
+static int register_smp2p_out(struct device *dev, char *node_name,
+			struct gpio_info *gpio_info_ptr)
+{
+	struct device_node *node = dev->of_node;
+
+	if (gpio_info_ptr) {
+		if (of_find_property(node, "qcom,smem-states", NULL)) {
+			gpio_info_ptr->smem_state =
+				    qcom_smem_state_get(dev, "rdbg-smp2p-out",
+						&gpio_info_ptr->smem_bit);
+			if (IS_ERR_OR_NULL(gpio_info_ptr->smem_state))
+				pr_err("rdbg: failed get smem state\n");
+		}
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int register_smp2p_in(struct device *dev, char *node_name,
+			struct gpio_info *gpio_info_ptr)
+{
+	int id = 0;
+	struct device_node *node = dev->of_node;
+
+	if (gpio_info_ptr) {
+		id = of_irq_get_byname(node, "rdbg-smp2p-in");
+		gpio_info_ptr->gpio_base_id = id;
+		gpio_info_ptr->irq_base_id = id;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int rdbg_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+	int minor = 0;
+	int err = 0;
+	char *rdbg_compatible_string = "qcom,smp2p-interrupt-rdbg-";
+	int max_len = strlen(rdbg_compatible_string) + strlen("xx-out");
+	char *node_name = kcalloc(max_len, sizeof(char), GFP_KERNEL);
+
+	if (!node_name) {
+		err = -ENOMEM;
+		goto bail;
+	}
+	for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
+		if (!proc_info[minor].name)
+			continue;
+		if (snprintf(node_name, max_len, "%s%d-out",
+			rdbg_compatible_string, minor) <= 0) {
+			pr_err("Error in snprintf\n");
+			err = -ENOMEM;
+			goto bail;
+		}
+
+		if (of_device_is_compatible(dev->of_node, node_name)) {
+			if (register_smp2p_out(dev, node_name,
+			  &rdbgdevice->rdbg_data[minor].out)) {
+				pr_err("register_smp2p_out failed for %s\n",
+				proc_info[minor].name);
+				err = -EINVAL;
+				goto bail;
+			}
+		}
+		if (snprintf(node_name, max_len, "%s%d-in",
+			rdbg_compatible_string, minor) <= 0) {
+			pr_err("Error in snprintf\n");
+			err = -ENOMEM;
+			goto bail;
+		}
+
+		if (of_device_is_compatible(dev->of_node, node_name)) {
+			if (register_smp2p_in(dev, node_name,
+			    &rdbgdevice->rdbg_data[minor].in)) {
+				pr_err("register_smp2p_in failed for %s\n",
+				proc_info[minor].name);
+			}
+		}
+	}
+bail:
+	kfree(node_name);
+	return err;
+}
+
+static const struct of_device_id rdbg_match_table[] = {
+	{ .compatible = "qcom,smp2p-interrupt-rdbg-2-out", },
+	{ .compatible = "qcom,smp2p-interrupt-rdbg-2-in", },
+	{ .compatible = "qcom,smp2p-interrupt-rdbg-5-out", },
+	{ .compatible = "qcom,smp2p-interrupt-rdbg-5-in", },
+	{}
+};
+
+static struct platform_driver rdbg_driver = {
+	.probe = rdbg_probe,
+	.driver = {
+		.name = "rdbg",
+		.of_match_table = rdbg_match_table,
+	},
+};
+
+static int __init rdbg_init(void)
+{
+	struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+	int minor = 0;
+	int major = 0;
+	int minor_nodes_created = 0;
+	int err = 0;
+
+	if (rdbgdevice->num_devices < 1 ||
+		rdbgdevice->num_devices > SMP2P_NUM_PROCS) {
+		pr_err("rgdb: invalid num_devices\n");
+		err = -EDOM;
+		goto bail;
+	}
+	rdbgdevice->rdbg_data = kcalloc(rdbgdevice->num_devices,
+		sizeof(struct rdbg_data), GFP_KERNEL);
+	if (!rdbgdevice->rdbg_data) {
+		err = -ENOMEM;
+		goto bail;
+	}
+	err = platform_driver_register(&rdbg_driver);
+	if (err)
+		goto bail;
+	err = alloc_chrdev_region(&rdbgdevice->dev_no, 0,
+		rdbgdevice->num_devices, "rdbgctl");
+	if (err) {
+		pr_err("Error in alloc_chrdev_region.\n");
+		goto data_bail;
+	}
+	major = MAJOR(rdbgdevice->dev_no);
+
+	cdev_init(&rdbgdevice->cdev, &rdbg_fops);
+	rdbgdevice->cdev.owner = THIS_MODULE;
+	err = cdev_add(&rdbgdevice->cdev, MKDEV(major, 0),
+		rdbgdevice->num_devices);
+	if (err) {
+		pr_err("Error in cdev_add\n");
+		goto chrdev_bail;
+	}
+	rdbgdevice->class = class_create(THIS_MODULE, "rdbg");
+	if (IS_ERR(rdbgdevice->class)) {
+		err = PTR_ERR(rdbgdevice->class);
+		pr_err("Error in class_create\n");
+		goto cdev_bail;
+	}
+	for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
+		if (!proc_info[minor].name)
+			continue;
+		rdbgdevice->rdbg_data[minor].device = device_create(
+			rdbgdevice->class, NULL, MKDEV(major, minor),
+			NULL, "%s", proc_info[minor].name);
+		if (IS_ERR(rdbgdevice->rdbg_data[minor].device)) {
+			err = PTR_ERR(rdbgdevice->rdbg_data[minor].device);
+			pr_err("Error in device_create\n");
+			goto device_bail;
+		}
+		rdbgdevice->rdbg_data[minor].device_initialized = true;
+		minor_nodes_created++;
+		dev_dbg(rdbgdevice->rdbg_data[minor].device,
+			"%s: created /dev/%s c %d %d'\n", __func__,
+			proc_info[minor].name, major, minor);
+	}
+	if (!minor_nodes_created) {
+		pr_err("No device tree entries found\n");
+		err = -EINVAL;
+		goto class_bail;
+	}
+
+	goto bail;
+
+device_bail:
+	for (--minor; minor >= 0; minor--) {
+		if (rdbgdevice->rdbg_data[minor].device_initialized)
+			device_destroy(rdbgdevice->class,
+				MKDEV(MAJOR(rdbgdevice->dev_no), minor));
+	}
+class_bail:
+	class_destroy(rdbgdevice->class);
+cdev_bail:
+	cdev_del(&rdbgdevice->cdev);
+chrdev_bail:
+	unregister_chrdev_region(rdbgdevice->dev_no, rdbgdevice->num_devices);
+data_bail:
+	kfree(rdbgdevice->rdbg_data);
+bail:
+	return err;
+}
+module_init(rdbg_init);
+
+static void __exit rdbg_exit(void)
+{
+	struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+	int minor;
+
+	for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
+		if (rdbgdevice->rdbg_data[minor].device_initialized) {
+			device_destroy(rdbgdevice->class,
+				MKDEV(MAJOR(rdbgdevice->dev_no), minor));
+		}
+	}
+	class_destroy(rdbgdevice->class);
+	cdev_del(&rdbgdevice->cdev);
+	unregister_chrdev_region(rdbgdevice->dev_no, 1);
+	kfree(rdbgdevice->rdbg_data);
+}
+module_exit(rdbg_exit);
+
+MODULE_DESCRIPTION("rdbg module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
index 1b8fa9d..41b9f6c 100644
--- a/drivers/char/tpm/eventlog/tpm2.c
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -37,8 +37,8 @@
  *
  * Returns size of the event. If it is an invalid event, returns 0.
  */
-static int calc_tpm2_event_size(struct tcg_pcr_event2 *event,
-				struct tcg_pcr_event *event_header)
+static size_t calc_tpm2_event_size(struct tcg_pcr_event2 *event,
+				   struct tcg_pcr_event *event_header)
 {
 	struct tcg_efi_specid_event *efispecid;
 	struct tcg_event_field *event_field;
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index 32a8e27..cc4e642 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -69,6 +69,10 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
 	if (status < 0)
 		return status;
 
+	/* The upper layer does not support incomplete sends. */
+	if (status != len)
+		return -E2BIG;
+
 	return 0;
 }
 
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 5b5b5d7..c55f6ae 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -75,7 +75,7 @@ struct ports_driver_data {
 	/* All the console devices handled by this driver */
 	struct list_head consoles;
 };
-static struct ports_driver_data pdrvdata;
+static struct ports_driver_data pdrvdata = { .next_vtermno = 1};
 
 static DEFINE_SPINLOCK(pdrvdata_lock);
 static DECLARE_COMPLETION(early_console_added);
@@ -1405,6 +1405,7 @@ static int add_port(struct ports_device *portdev, u32 id)
 	port->async_queue = NULL;
 
 	port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
+	port->cons.vtermno = 0;
 
 	port->host_connected = port->guest_connected = false;
 	port->stats = (struct port_stats) { 0 };
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 71257a6..80ca7b1 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -606,7 +606,8 @@ static int clk_find_vdd_level(struct clk_core *clk, unsigned long rate)
 	 */
 	for (level = 0; level < clk->num_rate_max; level++)
 		if (DIV_ROUND_CLOSEST(rate, 1000) <=
-				DIV_ROUND_CLOSEST(clk->rate_max[level], 1000))
+				DIV_ROUND_CLOSEST(clk->rate_max[level], 1000) &&
+		    clk->rate_max[level] > 0)
 			break;
 
 	if (level == clk->num_rate_max) {
@@ -2393,6 +2394,8 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
 	if (clk_core_rate_is_protected(core))
 		return -EBUSY;
 
+	set_rate_nesting_count++;
+
 	/* calculate new rates and get the topmost changed clock */
 	top = clk_calc_new_rates(core, req_rate);
 	if (!top) {
@@ -2416,7 +2419,6 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
 	}
 
 	/* change the rates */
-	set_rate_nesting_count++;
 	ret = clk_change_rate(top);
 	set_rate_nesting_count--;
 	if (ret) {
@@ -2444,6 +2446,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
 	return ret;
 
 pre_rate_change_err:
+	set_rate_nesting_count--;
 	if (set_rate_nesting_count == 0) {
 		clk_unvote_new_rate_vdd();
 		clk_cleanup_vdd_votes();
diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
index f404199..794eeff 100644
--- a/drivers/clk/hisilicon/clk-hi3660.c
+++ b/drivers/clk/hisilicon/clk-hi3660.c
@@ -163,8 +163,12 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = {
 	  "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 17, 0, },
 	{ HI3660_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2",
 	  "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 18, 0, },
+	/*
+	 * clk_gate_ufs_subsys is a system bus clock, mark it as critical
+	 * clock and keep it on for system suspend and resume.
+	 */
 	{ HI3660_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_sysbus",
-	  CLK_SET_RATE_PARENT, 0x50, 21, 0, },
+	  CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0x50, 21, 0, },
 	{ HI3660_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus",
 	  CLK_SET_RATE_PARENT, 0x50, 28, 0, },
 	{ HI3660_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus",
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index f54e401..18842d6 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -88,6 +88,32 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
 	return ((unsigned long)vco + postdiv - 1) / postdiv;
 }
 
+static void __mtk_pll_tuner_enable(struct mtk_clk_pll *pll)
+{
+	u32 r;
+
+	if (pll->tuner_en_addr) {
+		r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
+		writel(r, pll->tuner_en_addr);
+	} else if (pll->tuner_addr) {
+		r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
+		writel(r, pll->tuner_addr);
+	}
+}
+
+static void __mtk_pll_tuner_disable(struct mtk_clk_pll *pll)
+{
+	u32 r;
+
+	if (pll->tuner_en_addr) {
+		r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
+		writel(r, pll->tuner_en_addr);
+	} else if (pll->tuner_addr) {
+		r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
+		writel(r, pll->tuner_addr);
+	}
+}
+
 static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
 		int postdiv)
 {
@@ -96,6 +122,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
 
 	pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN;
 
+	/* disable tuner */
+	__mtk_pll_tuner_disable(pll);
+
 	/* set postdiv */
 	val = readl(pll->pd_addr);
 	val &= ~(POSTDIV_MASK << pll->data->pd_shift);
@@ -122,6 +151,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
 	if (pll->tuner_addr)
 		writel(con1 + 1, pll->tuner_addr);
 
+	/* restore tuner_en */
+	__mtk_pll_tuner_enable(pll);
+
 	if (pll_en)
 		udelay(20);
 }
@@ -228,13 +260,7 @@ static int mtk_pll_prepare(struct clk_hw *hw)
 	r |= pll->data->en_mask;
 	writel(r, pll->base_addr + REG_CON0);
 
-	if (pll->tuner_en_addr) {
-		r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
-		writel(r, pll->tuner_en_addr);
-	} else if (pll->tuner_addr) {
-		r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
-		writel(r, pll->tuner_addr);
-	}
+	__mtk_pll_tuner_enable(pll);
 
 	udelay(20);
 
@@ -258,13 +284,7 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
 		writel(r, pll->base_addr + REG_CON0);
 	}
 
-	if (pll->tuner_en_addr) {
-		r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
-		writel(r, pll->tuner_en_addr);
-	} else if (pll->tuner_addr) {
-		r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
-		writel(r, pll->tuner_addr);
-	}
+	__mtk_pll_tuner_disable(pll);
 
 	r = readl(pll->base_addr + REG_CON0);
 	r &= ~CON0_BASE_EN;
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 6628ffa..4d4f6d8 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -1571,6 +1571,7 @@ static struct clk_regmap gxbb_vdec_1_div = {
 		.offset = HHI_VDEC_CLK_CNTL,
 		.shift = 0,
 		.width = 7,
+		.flags = CLK_DIVIDER_ROUND_CLOSEST,
 	},
 	.hw.init = &(struct clk_init_data){
 		.name = "vdec_1_div",
@@ -1616,6 +1617,7 @@ static struct clk_regmap gxbb_vdec_hevc_div = {
 		.offset = HHI_VDEC2_CLK_CNTL,
 		.shift = 16,
 		.width = 7,
+		.flags = CLK_DIVIDER_ROUND_CLOSEST,
 	},
 	.hw.init = &(struct clk_init_data){
 		.name = "vdec_hevc_div",
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 46498ee..a72c058 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -1,5 +1,3 @@
-source "drivers/clk/qcom/mdss/Kconfig"
-
 config QCOM_GDSC
 	bool
 	select PM_GENERIC_DOMAINS if PM
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index f64d82d..40dca85 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -60,5 +60,3 @@
 obj-$(CONFIG_SM_VIDEOCC_LITO) += videocc-lito.o
 obj-$(CONFIG_SM_GPUCC_LITO) += gpucc-lito.o
 obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
-
-obj-y += mdss/
diff --git a/drivers/clk/qcom/camcc-kona.c b/drivers/clk/qcom/camcc-kona.c
index 282c482..95637b2 100644
--- a/drivers/clk/qcom/camcc-kona.c
+++ b/drivers/clk/qcom/camcc-kona.c
@@ -66,7 +66,6 @@ static struct msm_bus_scale_pdata clk_debugfs_scale_table = {
 
 enum {
 	P_BI_TCXO,
-	P_BI_TCXO_MX,
 	P_CAM_CC_PLL0_OUT_EVEN,
 	P_CAM_CC_PLL0_OUT_MAIN,
 	P_CAM_CC_PLL0_OUT_ODD,
@@ -99,7 +98,7 @@ static const char * const cam_cc_parent_names_0[] = {
 };
 
 static const struct parent_map cam_cc_parent_map_1[] = {
-	{ P_BI_TCXO_MX, 0 },
+	{ P_BI_TCXO, 0 },
 	{ P_CAM_CC_PLL2_OUT_AUX2, 3 },
 	{ P_CAM_CC_PLL2_OUT_EARLY, 5 },
 	{ P_CORE_BI_PLL_TEST_SE, 7 },
@@ -322,6 +321,19 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
 	.user_ctl_hi1_val = 0x00000000,
 };
 
+static const struct alpha_pll_config cam_cc_pll2_config_sm8250_v2 = {
+	.l = 0x4B,
+	.cal_l = 0x4B,
+	.alpha = 0x0,
+	.config_ctl_val = 0x08200920,
+	.config_ctl_hi_val = 0x05008011,
+	.config_ctl_hi1_val = 0x00000000,
+	.test_ctl_val = 0x00010000,
+	.user_ctl_val = 0x00000100,
+	.user_ctl_hi_val = 0x00000000,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
 static struct clk_alpha_pll cam_cc_pll2 = {
 	.offset = 0x2000,
 	.vco_table = zonda_vco,
@@ -366,9 +378,9 @@ static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = {
 };
 
 static const struct alpha_pll_config cam_cc_pll3_config = {
-	.l = 0x24,
+	.l = 0xF,
 	.cal_l = 0x44,
-	.alpha = 0x7555,
+	.alpha = 0xA000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
@@ -421,9 +433,9 @@ static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
 };
 
 static const struct alpha_pll_config cam_cc_pll4_config = {
-	.l = 0x24,
+	.l = 0xF,
 	.cal_l = 0x44,
-	.alpha = 0x7555,
+	.alpha = 0xA000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
@@ -498,6 +510,16 @@ static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
 	{ }
 };
 
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src_kona_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 cam_cc_bps_clk_src = {
 	.cmd_rcgr = 0x7010,
 	.mnd_width = 0,
@@ -505,6 +527,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_bps_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_bps_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -535,6 +558,7 @@ static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_camnoc_axi_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -561,6 +585,8 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_cci_0_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -580,6 +606,8 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_cci_1_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -605,6 +633,8 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_cphy_rx_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -630,6 +660,8 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_csi0phytimer_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -649,6 +681,8 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_csi1phytimer_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -668,6 +702,8 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_csi2phytimer_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -687,6 +723,8 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_csi3phytimer_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -706,6 +744,8 @@ static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_csi4phytimer_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -725,6 +765,8 @@ static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_csi5phytimer_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -754,6 +796,8 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_fast_ahb_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -778,6 +822,14 @@ static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = {
 	{ }
 };
 
+static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src_kona_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 cam_cc_fd_core_clk_src = {
 	.cmd_rcgr = 0xc098,
 	.mnd_width = 0,
@@ -785,6 +837,7 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = {
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_fd_core_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_fd_core_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -807,6 +860,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_fd_core_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_icp_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -823,11 +877,17 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
 };
 
 static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
-	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(150000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(200000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(250000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(300000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	F(350000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(425000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	F(475000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(525000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	F(576000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
-	F(720000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(630000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(680000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	{ }
 };
 
@@ -838,6 +898,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
 	.parent_map = cam_cc_parent_map_2,
 	.freq_tbl = ftbl_cam_cc_ife_0_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_ife_0_clk_src",
 		.parent_names = cam_cc_parent_names_2,
@@ -868,6 +929,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_ife_0_csid_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -882,11 +944,17 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
 };
 
 static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
-	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(150000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(200000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(250000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(300000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
 	F(350000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(425000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
 	F(475000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(525000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
 	F(576000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
-	F(720000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(630000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(680000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
 	{ }
 };
 
@@ -897,6 +965,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
 	.parent_map = cam_cc_parent_map_3,
 	.freq_tbl = ftbl_cam_cc_ife_1_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_ife_1_clk_src",
 		.parent_names = cam_cc_parent_names_3,
@@ -920,6 +989,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_ife_1_csid_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -940,12 +1010,21 @@ static const struct freq_tbl ftbl_cam_cc_ife_lite_clk_src[] = {
 	{ }
 };
 
+static const struct freq_tbl ftbl_cam_cc_ife_lite_clk_src_kona_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
 	.cmd_rcgr = 0xc000,
 	.mnd_width = 0,
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_ife_lite_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -967,6 +1046,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_ife_lite_csid_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -996,6 +1076,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
 	.parent_map = cam_cc_parent_map_4,
 	.freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_ipe_0_clk_src",
 		.parent_names = cam_cc_parent_names_4,
@@ -1019,6 +1100,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_bps_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_jpeg_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -1036,18 +1118,27 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
 };
 
 static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
-	F(19200000, P_BI_TCXO_MX, 1, 0, 0),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(24000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 4),
 	F(68571429, P_CAM_CC_PLL2_OUT_EARLY, 14, 0, 0),
 	{ }
 };
 
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src_kona_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(24000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 6),
+	F(68571429, P_CAM_CC_PLL2_OUT_EARLY, 1, 1, 21),
+	{ }
+};
+
 static struct clk_rcg2 cam_cc_mclk0_clk_src = {
 	.cmd_rcgr = 0x5000,
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_1,
 	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_mclk0_clk_src",
 		.parent_names = cam_cc_parent_names_1,
@@ -1067,6 +1158,8 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_1,
 	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_mclk1_clk_src",
 		.parent_names = cam_cc_parent_names_1,
@@ -1086,6 +1179,8 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_1,
 	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_mclk2_clk_src",
 		.parent_names = cam_cc_parent_names_1,
@@ -1105,6 +1200,8 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_1,
 	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_mclk3_clk_src",
 		.parent_names = cam_cc_parent_names_1,
@@ -1124,6 +1221,8 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_1,
 	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_mclk4_clk_src",
 		.parent_names = cam_cc_parent_names_1,
@@ -1143,6 +1242,8 @@ static struct clk_rcg2 cam_cc_mclk5_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_1,
 	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_mclk5_clk_src",
 		.parent_names = cam_cc_parent_names_1,
@@ -1162,6 +1263,8 @@ static struct clk_rcg2 cam_cc_mclk6_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_1,
 	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_mclk6_clk_src",
 		.parent_names = cam_cc_parent_names_1,
@@ -1181,6 +1284,8 @@ static struct clk_rcg2 cam_cc_sbi_csid_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_sbi_csid_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -1205,6 +1310,8 @@ static struct clk_rcg2 cam_cc_sleep_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_5,
 	.freq_tbl = ftbl_cam_cc_sleep_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_sleep_clk_src",
 		.parent_names = cam_cc_parent_names_5,
@@ -1231,6 +1338,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_slow_ahb_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -1255,6 +1363,8 @@ static struct clk_rcg2 cam_cc_xo_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_6,
 	.freq_tbl = ftbl_cam_cc_xo_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_xo_clk_src",
 		.parent_names = cam_cc_parent_names_6_ao,
@@ -2593,10 +2703,31 @@ static const struct qcom_cc_desc cam_cc_kona_desc = {
 
 static const struct of_device_id cam_cc_kona_match_table[] = {
 	{ .compatible = "qcom,camcc-kona" },
+	{ .compatible = "qcom,camcc-kona-v2" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, cam_cc_kona_match_table);
 
+static void cam_cc_kona_fixup_konav2(struct regmap *regmap)
+{
+	clk_zonda_pll_configure(&cam_cc_pll2, regmap,
+		&cam_cc_pll2_config_sm8250_v2);
+	cam_cc_bps_clk_src.freq_tbl = ftbl_cam_cc_bps_clk_src_kona_v2;
+	cam_cc_fd_core_clk_src.freq_tbl = ftbl_cam_cc_fd_core_clk_src_kona_v2;
+	cam_cc_icp_clk_src.freq_tbl = ftbl_cam_cc_fd_core_clk_src_kona_v2;
+	cam_cc_ife_0_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 680000000;
+	cam_cc_ife_1_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 680000000;
+	cam_cc_ife_lite_clk_src.freq_tbl = ftbl_cam_cc_ife_lite_clk_src_kona_v2;
+	cam_cc_jpeg_clk_src.freq_tbl = ftbl_cam_cc_bps_clk_src_kona_v2;
+	cam_cc_mclk0_clk_src.freq_tbl = ftbl_cam_cc_mclk0_clk_src_kona_v2;
+	cam_cc_mclk1_clk_src.freq_tbl = ftbl_cam_cc_mclk0_clk_src_kona_v2;
+	cam_cc_mclk2_clk_src.freq_tbl = ftbl_cam_cc_mclk0_clk_src_kona_v2;
+	cam_cc_mclk3_clk_src.freq_tbl = ftbl_cam_cc_mclk0_clk_src_kona_v2;
+	cam_cc_mclk4_clk_src.freq_tbl = ftbl_cam_cc_mclk0_clk_src_kona_v2;
+	cam_cc_mclk5_clk_src.freq_tbl = ftbl_cam_cc_mclk0_clk_src_kona_v2;
+	cam_cc_mclk6_clk_src.freq_tbl = ftbl_cam_cc_mclk0_clk_src_kona_v2;
+}
+
 static int cam_cc_kona_probe(struct platform_device *pdev)
 {
 	struct regmap *regmap;
@@ -2648,10 +2779,16 @@ static int cam_cc_kona_probe(struct platform_device *pdev)
 
 	clk_lucid_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
 	clk_lucid_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
-	clk_zonda_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
 	clk_lucid_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
 	clk_lucid_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
 
+	if (of_device_is_compatible(pdev->dev.of_node,
+				"qcom,camcc-kona-v2"))
+		cam_cc_kona_fixup_konav2(regmap);
+	else
+		clk_zonda_pll_configure(&cam_cc_pll2, regmap,
+					&cam_cc_pll2_config);
+
 	ret = qcom_cc_really_probe(pdev, &cam_cc_kona_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register CAM CC clocks\n");
diff --git a/drivers/clk/qcom/camcc-lito.c b/drivers/clk/qcom/camcc-lito.c
index f1eb220..125e082 100644
--- a/drivers/clk/qcom/camcc-lito.c
+++ b/drivers/clk/qcom/camcc-lito.c
@@ -64,7 +64,7 @@ static const char * const cam_cc_parent_names_0[] = {
 };
 
 static const struct parent_map cam_cc_parent_map_1[] = {
-	{ P_BI_TCXO_MX, 0 },
+	{ P_BI_TCXO, 0 },
 	{ P_CAM_CC_PLL2_OUT_AUX, 5 },
 	{ P_CORE_BI_PLL_TEST_SE, 7 },
 };
@@ -197,10 +197,11 @@ static const struct alpha_pll_config cam_cc_pll0_config = {
 	.alpha = 0x8000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
-	.config_ctl_hi1_val = 0x029A699C,
-	.user_ctl_val = 0x00000007,
+	.config_ctl_hi1_val = 0x329A699C,
+	.user_ctl_val = 0x00000001,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
+	.test_ctl_hi1_val = 0x01800000,
 };
 
 static struct clk_alpha_pll cam_cc_pll0 = {
@@ -273,10 +274,11 @@ static const struct alpha_pll_config cam_cc_pll1_config = {
 	.alpha = 0x4000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
-	.config_ctl_hi1_val = 0x029A699C,
-	.user_ctl_val = 0x00000007,
+	.config_ctl_hi1_val = 0x329A699C,
+	.user_ctl_val = 0x00000001,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
+	.test_ctl_hi1_val = 0x01800000,
 };
 
 static struct clk_alpha_pll cam_cc_pll1 = {
@@ -327,9 +329,12 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
 	.cal_l = 0x32,
 	.alpha = 0x0,
 	.config_ctl_val = 0x08200920,
-	.config_ctl_hi_val = 0x05008011,
+	.config_ctl_hi_val = 0x15008001,
 	.config_ctl_hi1_val = 0x00000000,
-	.user_ctl_val = 0x0000010F,
+	.user_ctl_val = 0xE0000101,
+	.test_ctl_val = 0x00010000,
+	.test_ctl_hi_val = 0x00000000,
+	.test_ctl_hi1_val = 0x00000000,
 };
 
 static struct clk_alpha_pll cam_cc_pll2 = {
@@ -402,10 +407,11 @@ static const struct alpha_pll_config cam_cc_pll3_config = {
 	.alpha = 0x9555,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
-	.config_ctl_hi1_val = 0x029A699C,
-	.user_ctl_val = 0x00000007,
+	.config_ctl_hi1_val = 0x329A699C,
+	.user_ctl_val = 0x00000001,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
+	.test_ctl_hi1_val = 0x01800000,
 };
 
 static struct clk_alpha_pll cam_cc_pll3 = {
@@ -457,10 +463,11 @@ static const struct alpha_pll_config cam_cc_pll4_config = {
 	.alpha = 0x9555,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
-	.config_ctl_hi1_val = 0x029A699C,
-	.user_ctl_val = 0x00000007,
+	.config_ctl_hi1_val = 0x329A699C,
+	.user_ctl_val = 0x00000001,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
+	.test_ctl_hi1_val = 0x01800000,
 };
 
 static struct clk_alpha_pll cam_cc_pll4 = {
@@ -585,6 +592,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_cci_0_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -604,6 +612,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_cci_1_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -631,6 +640,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_2,
 	.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_cphy_rx_clk_src",
 		.parent_names = cam_cc_parent_names_2,
@@ -658,6 +668,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_csi0phytimer_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -677,6 +688,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_csi1phytimer_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -696,6 +708,7 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_csi2phytimer_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -715,6 +728,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_csi3phytimer_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -744,6 +758,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_fast_ahb_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -952,6 +967,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
 	.freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_ife_lite_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -1079,7 +1095,7 @@ static struct clk_rcg2 cam_cc_lrme_clk_src = {
 };
 
 static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
-	F(19200000, P_BI_TCXO_MX, 1, 0, 0),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(24000000, P_CAM_CC_PLL2_OUT_AUX, 1, 1, 20),
 	F(34285714, P_CAM_CC_PLL2_OUT_AUX, 14, 0, 0),
 	{ }
@@ -1091,6 +1107,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_1,
 	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_mclk0_clk_src",
 		.parent_names = cam_cc_parent_names_1,
@@ -1110,6 +1127,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_1,
 	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_mclk1_clk_src",
 		.parent_names = cam_cc_parent_names_1,
@@ -1129,6 +1147,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_1,
 	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_mclk2_clk_src",
 		.parent_names = cam_cc_parent_names_1,
@@ -1148,6 +1167,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_1,
 	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_mclk3_clk_src",
 		.parent_names = cam_cc_parent_names_1,
@@ -1167,6 +1187,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_1,
 	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_mclk4_clk_src",
 		.parent_names = cam_cc_parent_names_1,
@@ -1217,6 +1238,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
 	.parent_map = cam_cc_parent_map_8,
 	.enable_safe_config = true,
 	.freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_slow_ahb_clk_src",
 		.parent_names = cam_cc_parent_names_8,
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index f28c5a9..a7e2109 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -158,10 +158,10 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
 
 /* LUCID PLL specific settings and offsets */
 #define LUCID_PLL_CAL_VAL	0x44
-#define LUCID_PCAL_DONE		BIT(26)
+#define LUCID_PCAL_DONE		BIT(27)
 
 /* ZONDA PLL specific offsets */
-#define ZONDA_PLL_OUT_MASK	0x9
+#define ZONDA_PLL_OUT_MASK	0xF
 #define ZONDA_STAY_IN_CFA	BIT(16)
 #define ZONDA_PLL_FREQ_LOCK_DET	BIT(29)
 
@@ -1720,7 +1720,7 @@ static int alpha_pll_lucid_prepare(struct clk_hw *hw)
 	int ret;
 
 	/* Return early if calibration is not needed. */
-	regmap_read(pll->clkr.regmap, PLL_STATUS(pll), &regval);
+	regmap_read(pll->clkr.regmap, PLL_MODE(pll), &regval);
 	if (regval & LUCID_PCAL_DONE)
 		return 0;
 
@@ -1850,7 +1850,8 @@ static void clk_alpha_pll_lucid_list_registers(struct seq_file *f,
 		{"PLL_CONFIG_CTL_U", 0x1c},
 		{"PLL_CONFIG_CTL_U1", 0x20},
 		{"PLL_TEST_CTL", 0x24},
-		{"PLL_TEST_CTL_U1", 0x28},
+		{"PLL_TEST_CTL_U", 0x28},
+		{"PLL_TEST_CTL_U1", 0x2C},
 		{"PLL_STATUS", 0x30},
 		{"PLL_ALPHA_VAL", 0x40},
 	};
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 247f456..46e2e18 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -115,6 +115,12 @@ static int clk_branch_toggle(struct clk_hw *hw, bool en,
 		clk_disable_regmap(hw);
 	}
 
+	/*
+	 * Make sure enable/disable request goes through before waiting
+	 * for CLK_OFF status to get updated.
+	 */
+	mb();
+
 	return clk_branch_wait(br, en, check_halt);
 }
 
@@ -126,34 +132,36 @@ static int clk_branch_enable(struct clk_hw *hw)
 static int clk_cbcr_set_flags(struct regmap *regmap, unsigned int reg,
 				unsigned long flags)
 {
-	u32 cbcr_val;
-
-	regmap_read(regmap, reg, &cbcr_val);
+	u32 cbcr_val = 0;
+	u32 cbcr_mask;
+	int ret;
 
 	switch (flags) {
 	case CLKFLAG_PERIPH_OFF_SET:
-		cbcr_val |= BIT(12);
+		cbcr_val = cbcr_mask = BIT(12);
 		break;
 	case CLKFLAG_PERIPH_OFF_CLEAR:
-		cbcr_val &= ~BIT(12);
+		cbcr_mask = BIT(12);
 		break;
 	case CLKFLAG_RETAIN_PERIPH:
-		cbcr_val |= BIT(13);
+		cbcr_val = cbcr_mask = BIT(13);
 		break;
 	case CLKFLAG_NORETAIN_PERIPH:
-		cbcr_val &= ~BIT(13);
+		cbcr_mask = BIT(13);
 		break;
 	case CLKFLAG_RETAIN_MEM:
-		cbcr_val |= BIT(14);
+		cbcr_val = cbcr_mask = BIT(14);
 		break;
 	case CLKFLAG_NORETAIN_MEM:
-		cbcr_val &= ~BIT(14);
+		cbcr_mask = BIT(14);
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	regmap_write(regmap, reg, cbcr_val);
+	ret = regmap_update_bits(regmap, reg, cbcr_mask, cbcr_val);
+	if (ret)
+		return ret;
 
 	/* Make sure power is enabled/disabled before returning. */
 	mb();
diff --git a/drivers/clk/qcom/clk-debug.c b/drivers/clk/qcom/clk-debug.c
index 72fd771..e7af053 100644
--- a/drivers/clk/qcom/clk-debug.c
+++ b/drivers/clk/qcom/clk-debug.c
@@ -295,6 +295,10 @@ static int clk_debug_read_period(void *data, u64 *val)
 	ret = clk_find_and_set_parent(measure, hw);
 	if (!ret) {
 		parent = clk_hw_get_parent(measure);
+		if (!parent) {
+			mutex_unlock(&clk_debug_lock);
+			return -EINVAL;
+		}
 		mux = to_clk_measure(parent);
 		regmap_read(mux->regmap, mux->period_offset, &regval);
 		if (!regval) {
@@ -339,6 +343,8 @@ void clk_debug_measure_add(struct clk_hw *hw, struct dentry *dentry)
 	}
 
 	parent = clk_hw_get_parent(measure);
+	if (!parent)
+		return;
 	meas_parent = to_clk_measure(parent);
 
 	if (parent->init->flags & CLK_IS_MEASURE && !meas_parent->mux_sels) {
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 17c4ced..1a87ad0 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -1,6 +1,7 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2014, 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, 2017-2019, The Linux Foundation.
+ * All rights reserved.
  */
 
 #include <linux/export.h>
@@ -235,9 +236,11 @@ int qcom_cc_really_probe(struct platform_device *pdev,
 	reset->regmap = regmap;
 	reset->reset_map = desc->resets;
 
-	ret = devm_reset_controller_register(dev, &reset->rcdev);
-	if (ret)
-		return ret;
+	if (desc->num_resets) {
+		ret = devm_reset_controller_register(dev, &reset->rcdev);
+		if (ret)
+			return ret;
+	}
 
 	if (desc->gdscs && desc->num_gdscs) {
 		scd = devm_kzalloc(dev, sizeof(*scd), GFP_KERNEL);
diff --git a/drivers/clk/qcom/dispcc-lito.c b/drivers/clk/qcom/dispcc-lito.c
index 6019e57..35a7d34 100644
--- a/drivers/clk/qcom/dispcc-lito.c
+++ b/drivers/clk/qcom/dispcc-lito.c
@@ -149,10 +149,11 @@ static const struct alpha_pll_config disp_cc_pll0_config = {
 	.alpha = 0xE000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
-	.config_ctl_hi1_val = 0x029A699C,
+	.config_ctl_hi1_val = 0x329A699C,
 	.user_ctl_val = 0x00000001,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
+	.test_ctl_hi1_val = 0x01800000,
 };
 
 static struct clk_alpha_pll disp_cc_pll0 = {
@@ -253,6 +254,7 @@ static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
 	.mnd_width = 0,
 	.hid_width = 5,
 	.parent_map = disp_cc_parent_map_0,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_byte0_clk_src",
 		.parent_names = disp_cc_parent_names_0,
@@ -273,6 +275,7 @@ static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
 	.mnd_width = 0,
 	.hid_width = 5,
 	.parent_map = disp_cc_parent_map_0,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_byte1_clk_src",
 		.parent_names = disp_cc_parent_names_0,
@@ -326,6 +329,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
 	.hid_width = 5,
 	.parent_map = disp_cc_parent_map_1,
 	.freq_tbl = ftbl_disp_cc_mdss_dp_crypto_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_dp_crypto_clk_src",
 		.parent_names = disp_cc_parent_names_1,
@@ -355,6 +359,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
 	.hid_width = 5,
 	.parent_map = disp_cc_parent_map_1,
 	.freq_tbl = ftbl_disp_cc_mdss_dp_link_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_dp_link_clk_src",
 		.parent_names = disp_cc_parent_names_1,
@@ -375,6 +380,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_pixel1_clk_src = {
 	.mnd_width = 16,
 	.hid_width = 5,
 	.parent_map = disp_cc_parent_map_1,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_dp_pixel1_clk_src",
 		.parent_names = disp_cc_parent_names_1,
@@ -394,6 +400,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
 	.mnd_width = 16,
 	.hid_width = 5,
 	.parent_map = disp_cc_parent_map_1,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_dp_pixel_clk_src",
 		.parent_names = disp_cc_parent_names_1,
@@ -414,6 +421,7 @@ static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
 	.hid_width = 5,
 	.parent_map = disp_cc_parent_map_0,
 	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_esc0_clk_src",
 		.parent_names = disp_cc_parent_names_0,
@@ -432,6 +440,7 @@ static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
 	.hid_width = 5,
 	.parent_map = disp_cc_parent_map_0,
 	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_esc1_clk_src",
 		.parent_names = disp_cc_parent_names_0,
@@ -480,6 +489,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = disp_cc_parent_map_4,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_pclk0_clk_src",
 		.parent_names = disp_cc_parent_names_4,
@@ -500,6 +510,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = disp_cc_parent_map_4,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_pclk1_clk_src",
 		.parent_names = disp_cc_parent_names_4,
@@ -544,6 +555,7 @@ static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
 	.hid_width = 5,
 	.parent_map = disp_cc_parent_map_2,
 	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_vsync_clk_src",
 		.parent_names = disp_cc_parent_names_2,
diff --git a/drivers/clk/qcom/gcc-kona.c b/drivers/clk/qcom/gcc-kona.c
index 7347cff..87bd776 100644
--- a/drivers/clk/qcom/gcc-kona.c
+++ b/drivers/clk/qcom/gcc-kona.c
@@ -25,6 +25,7 @@
 #include "clk-rcg.h"
 #include "clk-regmap.h"
 #include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
 #include "common.h"
 #include "reset.h"
 #include "vdd-level.h"
@@ -45,6 +46,8 @@ enum {
 	P_GPLL4_OUT_MAIN,
 	P_GPLL9_OUT_MAIN,
 	P_SLEEP_CLK,
+	P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+	P_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK,
 };
 
 static const struct parent_map gcc_parent_map_0[] = {
@@ -139,6 +142,30 @@ static const char * const gcc_parent_names_5[] = {
 	"core_bi_pll_test_se",
 };
 
+static const struct parent_map gcc_parent_map_6[] = {
+	{ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 1 },
+	{ P_BI_TCXO, 2 },
+};
+
+static const char * const gcc_parent_names_6[] = {
+	"usb3_phy_wrapper_gcc_usb30_pipe_clk",
+	"core_bi_pll_test_se",
+	"bi_tcxo",
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+	{ P_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 1 },
+	{ P_BI_TCXO, 2 },
+};
+
+static const char * const gcc_parent_names_7[] = {
+	"usb3_uni_phy_sec_gcc_usb30_pipe_clk",
+	"core_bi_pll_test_se",
+	"bi_tcxo",
+};
+
 static struct pll_vco lucid_vco[] = {
 	{ 249600000, 2000000000, 0 },
 };
@@ -1408,6 +1435,61 @@ static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
 	},
 };
 
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+	.reg = 0xf060,
+	.shift = 0,
+	.width = 2,
+	.parent_map = gcc_parent_map_6,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb3_prim_phy_pipe_clk_src",
+			.parent_names = gcc_parent_names_6,
+			.num_parents = 3,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_dummy usb3_phy_wrapper_gcc_usb30_pipe_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "usb3_phy_wrapper_gcc_usb30_pipe_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+
+static struct clk_dummy core_bi_pll_test_se = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "core_bi_pll_test_se",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
+	.reg = 0x10060,
+	.shift = 0,
+	.width = 2,
+	.parent_map = gcc_parent_map_7,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb3_sec_phy_pipe_clk_src",
+			.parent_names = gcc_parent_names_7,
+			.num_parents = 3,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_dummy usb3_uni_phy_sec_gcc_usb30_pipe_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "usb3_uni_phy_sec_gcc_usb30_pipe_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
 static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
 	.halt_reg = 0x9000c,
 	.halt_check = BRANCH_HALT_VOTED,
@@ -3751,6 +3833,11 @@ static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_usb3_prim_phy_pipe_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb3_prim_phy_pipe_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -3813,6 +3900,11 @@ static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_usb3_sec_phy_pipe_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb3_sec_phy_pipe_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -4083,11 +4175,13 @@ static struct clk_regmap *gcc_kona_clocks[] = {
 	[GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
 	[GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
 	[GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+	[GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
 	[GCC_USB3_SEC_CLKREF_EN] = &gcc_usb3_sec_clkref_en.clkr,
 	[GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
 	[GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
 	[GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
 	[GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+	[GCC_USB3_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb3_sec_phy_pipe_clk_src.clkr,
 	[GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
 	[GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
 	[GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
@@ -4145,6 +4239,14 @@ static const struct qcom_reset_map gcc_kona_resets[] = {
 	[GCC_VIDEO_AXI1_CLK_ARES] = { 0xb028, 2 },
 };
 
+struct clk_hw *gcc_kona_hws[] = {
+	[CORE_BI_PLL_TEST_SE] = &core_bi_pll_test_se.hw,
+	[USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK] =
+		&usb3_phy_wrapper_gcc_usb30_pipe_clk.hw,
+	[USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK] =
+		&usb3_uni_phy_sec_gcc_usb30_pipe_clk.hw,
+};
+
 static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
 	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
 	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
@@ -4182,6 +4284,8 @@ static const struct qcom_cc_desc gcc_kona_desc = {
 	.num_clks = ARRAY_SIZE(gcc_kona_clocks),
 	.resets = gcc_kona_resets,
 	.num_resets = ARRAY_SIZE(gcc_kona_resets),
+	.hwclks = gcc_kona_hws,
+	.num_hwclks = ARRAY_SIZE(gcc_kona_hws),
 };
 
 static const struct of_device_id gcc_kona_match_table[] = {
diff --git a/drivers/clk/qcom/gcc-lito.c b/drivers/clk/qcom/gcc-lito.c
index 52faf73..f5ec81f 100644
--- a/drivers/clk/qcom/gcc-lito.c
+++ b/drivers/clk/qcom/gcc-lito.c
@@ -1124,7 +1124,7 @@ static struct clk_branch gcc_cpuss_rbcpr_clk = {
 
 static struct clk_branch gcc_ddrss_gpu_axi_clk = {
 	.halt_reg = 0x71154,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_VOTED,
 	.clkr = {
 		.enable_reg = 0x71154,
 		.enable_mask = BIT(0),
@@ -1327,7 +1327,7 @@ static struct clk_branch gcc_gpu_iref_clk = {
 
 static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
 	.halt_reg = 0x7100c,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_VOTED,
 	.clkr = {
 		.enable_reg = 0x7100c,
 		.enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 9f0ae40..cd937ce 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -1101,6 +1101,7 @@ static struct clk_rcg2 ufs_axi_clk_src = {
 
 static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
 	F(19200000, P_XO, 1, 0, 0),
+	F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
 	F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
 	F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
 	{ }
diff --git a/drivers/clk/qcom/gpucc-kona.c b/drivers/clk/qcom/gpucc-kona.c
index 0a5562c..d40b2f0 100644
--- a/drivers/clk/qcom/gpucc-kona.c
+++ b/drivers/clk/qcom/gpucc-kona.c
@@ -113,6 +113,8 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = {
 	.hid_width = 5,
 	.parent_map = gpu_cc_parent_map_0,
 	.freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gpu_cc_gmu_clk_src",
 		.parent_names = gpu_cc_parent_names_0,
@@ -336,6 +338,19 @@ static struct clk_branch gpu_cc_sleep_clk = {
 	},
 };
 
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+	.halt_reg = 0x5000,
+	.halt_check = BRANCH_VOTED,
+	.clkr = {
+		.enable_reg = 0x5000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			 .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+			 .ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static struct clk_regmap *gpu_cc_kona_clocks[] = {
 	[GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
 	[GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
@@ -353,6 +368,7 @@ static struct clk_regmap *gpu_cc_kona_clocks[] = {
 	[GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
 	[GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
 	[GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+	[GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
 };
 
 static const struct qcom_reset_map gpu_cc_kona_resets[] = {
diff --git a/drivers/clk/qcom/gpucc-lito.c b/drivers/clk/qcom/gpucc-lito.c
index d854434..310d4bb 100644
--- a/drivers/clk/qcom/gpucc-lito.c
+++ b/drivers/clk/qcom/gpucc-lito.c
@@ -63,20 +63,6 @@ static const char * const gpu_cc_parent_names_0[] = {
 	"core_bi_pll_test_se",
 };
 
-static const struct parent_map gpu_cc_parent_map_1[] = {
-	{ P_BI_TCXO, 0 },
-	{ P_GPLL0_OUT_MAIN, 5 },
-	{ P_GPLL0_OUT_MAIN_DIV, 6 },
-	{ P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gpu_cc_parent_names_1[] = {
-	"bi_tcxo",
-	"gcc_gpu_gpll0_clk_src",
-	"gcc_gpu_gpll0_div_clk_src",
-	"core_bi_pll_test_se",
-};
-
 static struct pll_vco lucid_vco[] = {
 	{ 249600000, 2000000000, 0 },
 };
@@ -87,10 +73,11 @@ static const struct alpha_pll_config gpu_cc_pll1_config = {
 	.alpha = 0xAAA,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
-	.config_ctl_hi1_val = 0x029A699C,
+	.config_ctl_hi1_val = 0x329A699C,
 	.user_ctl_val = 0x00000001,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
+	.test_ctl_hi1_val = 0x01800000,
 };
 
 static struct clk_alpha_pll gpu_cc_pll1 = {
@@ -128,6 +115,7 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = {
 	.hid_width = 5,
 	.parent_map = gpu_cc_parent_map_0,
 	.freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gpu_cc_gmu_clk_src",
 		.parent_names = gpu_cc_parent_names_0,
@@ -142,40 +130,15 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = {
 	},
 };
 
-static const struct freq_tbl ftbl_gpu_cc_rbcpr_clk_src[] = {
-	F(19200000, P_BI_TCXO, 1, 0, 0),
-	F(50000000, P_GPLL0_OUT_MAIN_DIV, 6, 0, 0),
-	{ }
-};
-
-static struct clk_rcg2 gpu_cc_rbcpr_clk_src = {
-	.cmd_rcgr = 0x10b0,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = gpu_cc_parent_map_1,
-	.freq_tbl = ftbl_gpu_cc_rbcpr_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "gpu_cc_rbcpr_clk_src",
-		.parent_names = gpu_cc_parent_names_1,
-		.num_parents = 4,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_rcg2_ops,
-		.vdd_class = &vdd_cx,
-		.num_rate_max = VDD_NUM,
-		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_LOWER] = 19200000,
-			[VDD_NOMINAL] = 50000000},
-	},
-};
-
 static struct clk_branch gpu_cc_ahb_clk = {
 	.halt_reg = 0x1078,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_DELAY,
 	.clkr = {
 		.enable_reg = 0x1078,
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gpu_cc_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -295,24 +258,6 @@ static struct clk_branch gpu_cc_gx_vsense_clk = {
 	},
 };
 
-static struct clk_branch gpu_cc_rbcpr_clk = {
-	.halt_reg = 0x10f0,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x10f0,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gpu_cc_rbcpr_clk",
-			.parent_names = (const char *[]){
-				"gpu_cc_rbcpr_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gpu_cc_sleep_clk = {
 	.halt_reg = 0x1090,
 	.halt_check = BRANCH_HALT_DELAY,
@@ -326,6 +271,19 @@ static struct clk_branch gpu_cc_sleep_clk = {
 	},
 };
 
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+	.halt_reg = 0x5000,
+	.halt_check = BRANCH_VOTED,
+	.clkr = {
+		.enable_reg = 0x5000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			 .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+			 .ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static struct clk_regmap *gpu_cc_lito_clocks[] = {
 	[GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
 	[GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
@@ -338,9 +296,8 @@ static struct clk_regmap *gpu_cc_lito_clocks[] = {
 	[GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
 	[GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
 	[GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
-	[GPU_CC_RBCPR_CLK] = &gpu_cc_rbcpr_clk.clkr,
-	[GPU_CC_RBCPR_CLK_SRC] = &gpu_cc_rbcpr_clk_src.clkr,
 	[GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+	[GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
 };
 
 static const struct regmap_config gpu_cc_lito_regmap_config = {
diff --git a/drivers/clk/qcom/mdss/Kconfig b/drivers/clk/qcom/mdss/Kconfig
deleted file mode 100644
index 3b5b835..0000000
--- a/drivers/clk/qcom/mdss/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-config QCOM_MDSS_PLL
-	bool "MDSS pll programming"
-	depends on COMMON_CLK_QCOM
-	help
-	It provides support for DSI, eDP and HDMI interface pll programming on MDSS
-	hardware. It also handles the pll specific resources and turn them on/off when
-	mdss pll client tries to enable/disable pll clocks.
-
-config QCOM_MDSS_DP_PLL
-	bool "MDSS DisplayPort PLL programming"
-	depends on QCOM_MDSS_PLL
-	default n
-	help
-	This flag enables the modules for DisplayPort (DP) PLL programming and is
-	required for all targets that support DP.
diff --git a/drivers/clk/qcom/mdss/Makefile b/drivers/clk/qcom/mdss/Makefile
deleted file mode 100644
index a6d69af..0000000
--- a/drivers/clk/qcom/mdss/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
-obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
-obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-10nm.o
-obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-7nm.o
-obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-28lpm.o
-obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-28nm-util.o
-obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm.o
-obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm-util.o
-obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-hdmi-pll-28lpm.o
-obj-$(CONFIG_QCOM_MDSS_DP_PLL) += mdss-dp-pll-7nm.o \
-	mdss-dp-pll-7nm-util.o \
-	mdss-dp-pll-10nm.o \
-	mdss-dp-pll-10nm-util.o \
-	mdss-dp-pll-14nm.o \
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c
deleted file mode 100644
index e0c6600..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c
+++ /dev/null
@@ -1,757 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-#include <linux/usb/usbpd.h>
-
-#include "mdss-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-dp-pll-10nm.h"
-
-#define DP_PHY_REVISION_ID0			0x0000
-#define DP_PHY_REVISION_ID1			0x0004
-#define DP_PHY_REVISION_ID2			0x0008
-#define DP_PHY_REVISION_ID3			0x000C
-
-#define DP_PHY_CFG				0x0010
-#define DP_PHY_PD_CTL				0x0018
-#define DP_PHY_MODE				0x001C
-
-#define DP_PHY_AUX_CFG0				0x0020
-#define DP_PHY_AUX_CFG1				0x0024
-#define DP_PHY_AUX_CFG2				0x0028
-#define DP_PHY_AUX_CFG3				0x002C
-#define DP_PHY_AUX_CFG4				0x0030
-#define DP_PHY_AUX_CFG5				0x0034
-#define DP_PHY_AUX_CFG6				0x0038
-#define DP_PHY_AUX_CFG7				0x003C
-#define DP_PHY_AUX_CFG8				0x0040
-#define DP_PHY_AUX_CFG9				0x0044
-#define DP_PHY_AUX_INTERRUPT_MASK		0x0048
-#define DP_PHY_AUX_INTERRUPT_CLEAR		0x004C
-#define DP_PHY_AUX_BIST_CFG			0x0050
-
-#define DP_PHY_VCO_DIV				0x0064
-#define DP_PHY_TX0_TX1_LANE_CTL			0x006C
-#define DP_PHY_TX2_TX3_LANE_CTL			0x0088
-
-#define DP_PHY_SPARE0				0x00AC
-#define DP_PHY_STATUS				0x00C0
-
-/* Tx registers */
-#define TXn_BIST_MODE_LANENO			0x0000
-#define TXn_CLKBUF_ENABLE			0x0008
-#define TXn_TX_EMP_POST1_LVL			0x000C
-
-#define TXn_TX_DRV_LVL				0x001C
-
-#define TXn_RESET_TSYNC_EN			0x0024
-#define TXn_PRE_STALL_LDO_BOOST_EN		0x0028
-#define TXn_TX_BAND				0x002C
-#define TXn_SLEW_CNTL				0x0030
-#define TXn_INTERFACE_SELECT			0x0034
-
-#define TXn_RES_CODE_LANE_TX			0x003C
-#define TXn_RES_CODE_LANE_RX			0x0040
-#define TXn_RES_CODE_LANE_OFFSET_TX		0x0044
-#define TXn_RES_CODE_LANE_OFFSET_RX		0x0048
-
-#define TXn_DEBUG_BUS_SEL			0x0058
-#define TXn_TRANSCEIVER_BIAS_EN			0x005C
-#define TXn_HIGHZ_DRVR_EN			0x0060
-#define TXn_TX_POL_INV				0x0064
-#define TXn_PARRATE_REC_DETECT_IDLE_EN		0x0068
-
-#define TXn_LANE_MODE_1				0x008C
-
-#define TXn_TRAN_DRVR_EMP_EN			0x00C0
-#define TXn_TX_INTERFACE_MODE			0x00C4
-
-#define TXn_VMODE_CTRL1				0x00F0
-
-/* PLL register offset */
-#define QSERDES_COM_ATB_SEL1			0x0000
-#define QSERDES_COM_ATB_SEL2			0x0004
-#define QSERDES_COM_FREQ_UPDATE			0x0008
-#define QSERDES_COM_BG_TIMER			0x000C
-#define QSERDES_COM_SSC_EN_CENTER		0x0010
-#define QSERDES_COM_SSC_ADJ_PER1		0x0014
-#define QSERDES_COM_SSC_ADJ_PER2		0x0018
-#define QSERDES_COM_SSC_PER1			0x001C
-#define QSERDES_COM_SSC_PER2			0x0020
-#define QSERDES_COM_SSC_STEP_SIZE1		0x0024
-#define QSERDES_COM_SSC_STEP_SIZE2		0x0028
-#define QSERDES_COM_POST_DIV			0x002C
-#define QSERDES_COM_POST_DIV_MUX		0x0030
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x0034
-#define QSERDES_COM_CLK_ENABLE1			0x0038
-#define QSERDES_COM_SYS_CLK_CTRL		0x003C
-#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x0040
-#define QSERDES_COM_PLL_EN			0x0044
-#define QSERDES_COM_PLL_IVCO			0x0048
-#define QSERDES_COM_CMN_IETRIM			0x004C
-#define QSERDES_COM_CMN_IPTRIM			0x0050
-
-#define QSERDES_COM_CP_CTRL_MODE0		0x0060
-#define QSERDES_COM_CP_CTRL_MODE1		0x0064
-#define QSERDES_COM_PLL_RCTRL_MODE0		0x0068
-#define QSERDES_COM_PLL_RCTRL_MODE1		0x006C
-#define QSERDES_COM_PLL_CCTRL_MODE0		0x0070
-#define QSERDES_COM_PLL_CCTRL_MODE1		0x0074
-#define QSERDES_COM_PLL_CNTRL			0x0078
-#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM		0x007C
-#define QSERDES_COM_SYSCLK_EN_SEL		0x0080
-#define QSERDES_COM_CML_SYSCLK_SEL		0x0084
-#define QSERDES_COM_RESETSM_CNTRL		0x0088
-#define QSERDES_COM_RESETSM_CNTRL2		0x008C
-#define QSERDES_COM_LOCK_CMP_EN			0x0090
-#define QSERDES_COM_LOCK_CMP_CFG		0x0094
-#define QSERDES_COM_LOCK_CMP1_MODE0		0x0098
-#define QSERDES_COM_LOCK_CMP2_MODE0		0x009C
-#define QSERDES_COM_LOCK_CMP3_MODE0		0x00A0
-
-#define QSERDES_COM_DEC_START_MODE0		0x00B0
-#define QSERDES_COM_DEC_START_MODE1		0x00B4
-#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x00B8
-#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x00BC
-#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x00C0
-#define QSERDES_COM_DIV_FRAC_START1_MODE1	0x00C4
-#define QSERDES_COM_DIV_FRAC_START2_MODE1	0x00C8
-#define QSERDES_COM_DIV_FRAC_START3_MODE1	0x00CC
-#define QSERDES_COM_INTEGLOOP_INITVAL		0x00D0
-#define QSERDES_COM_INTEGLOOP_EN		0x00D4
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x00D8
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x00DC
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	0x00E0
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	0x00E4
-#define QSERDES_COM_VCOCAL_DEADMAN_CTRL		0x00E8
-#define QSERDES_COM_VCO_TUNE_CTRL		0x00EC
-#define QSERDES_COM_VCO_TUNE_MAP		0x00F0
-
-#define QSERDES_COM_CMN_STATUS			0x0124
-#define QSERDES_COM_RESET_SM_STATUS		0x0128
-
-#define QSERDES_COM_CLK_SEL			0x0138
-#define QSERDES_COM_HSCLK_SEL			0x013C
-
-#define QSERDES_COM_CORECLK_DIV_MODE0		0x0148
-
-#define QSERDES_COM_SW_RESET			0x0150
-#define QSERDES_COM_CORE_CLK_EN			0x0154
-#define QSERDES_COM_C_READY_STATUS		0x0158
-#define QSERDES_COM_CMN_CONFIG			0x015C
-
-#define QSERDES_COM_SVS_MODE_CLK_SEL		0x0164
-
-#define DP_PHY_PLL_POLL_SLEEP_US		500
-#define DP_PHY_PLL_POLL_TIMEOUT_US		10000
-
-#define DP_VCO_RATE_8100MHZDIV1000		8100000UL
-#define DP_VCO_RATE_9720MHZDIV1000		9720000UL
-#define DP_VCO_RATE_10800MHZDIV1000		10800000UL
-
-int dp_mux_set_parent_10nm(void *context, unsigned int reg, unsigned int val)
-{
-	struct mdss_pll_resources *dp_res = context;
-	int rc;
-	u32 auxclk_div;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP PLL resources\n");
-		return rc;
-	}
-
-	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
-	auxclk_div &= ~0x03;	/* bits 0 to 1 */
-
-	if (val == 0) /* mux parent index = 0 */
-		auxclk_div |= 1;
-	else if (val == 1) /* mux parent index = 1 */
-		auxclk_div |= 2;
-	else if (val == 2) /* mux parent index = 2 */
-		auxclk_div |= 0;
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_VCO_DIV, auxclk_div);
-	/* Make sure the PHY registers writes are done */
-	wmb();
-	pr_debug("%s: mux=%d auxclk_div=%x\n", __func__, val, auxclk_div);
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	return 0;
-}
-
-int dp_mux_get_parent_10nm(void *context, unsigned int reg, unsigned int *val)
-{
-	int rc;
-	u32 auxclk_div = 0;
-	struct mdss_pll_resources *dp_res = context;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable dp_res resources\n");
-		return rc;
-	}
-
-	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
-	auxclk_div &= 0x03;
-
-	if (auxclk_div == 1) /* Default divider */
-		*val = 0;
-	else if (auxclk_div == 2)
-		*val = 1;
-	else if (auxclk_div == 0)
-		*val = 2;
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	pr_debug("%s: auxclk_div=%d, val=%d\n", __func__, auxclk_div, *val);
-
-	return 0;
-}
-
-static int dp_vco_pll_init_db_10nm(struct dp_pll_db *pdb,
-		unsigned long rate)
-{
-	struct mdss_pll_resources *dp_res = pdb->pll;
-	u32 spare_value = 0;
-
-	spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
-	pdb->lane_cnt = spare_value & 0x0F;
-	pdb->orientation = (spare_value & 0xF0) >> 4;
-
-	pr_debug("%s: spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
-			__func__, spare_value, pdb->lane_cnt, pdb->orientation);
-
-	switch (rate) {
-	case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
-		pr_debug("%s: VCO rate: %ld\n", __func__,
-				DP_VCO_RATE_9720MHZDIV1000);
-		pdb->hsclk_sel = 0x0c;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start1_mode0 = 0x00;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->integloop_gain0_mode0 = 0x3f;
-		pdb->integloop_gain1_mode0 = 0x00;
-		pdb->vco_tune_map = 0x00;
-		pdb->lock_cmp1_mode0 = 0x6f;
-		pdb->lock_cmp2_mode0 = 0x08;
-		pdb->lock_cmp3_mode0 = 0x00;
-		pdb->phy_vco_div = 0x1;
-		pdb->lock_cmp_en = 0x00;
-		break;
-	case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
-		pr_debug("%s: VCO rate: %ld\n", __func__,
-				DP_VCO_RATE_10800MHZDIV1000);
-		pdb->hsclk_sel = 0x04;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start1_mode0 = 0x00;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->integloop_gain0_mode0 = 0x3f;
-		pdb->integloop_gain1_mode0 = 0x00;
-		pdb->vco_tune_map = 0x00;
-		pdb->lock_cmp1_mode0 = 0x0f;
-		pdb->lock_cmp2_mode0 = 0x0e;
-		pdb->lock_cmp3_mode0 = 0x00;
-		pdb->phy_vco_div = 0x1;
-		pdb->lock_cmp_en = 0x00;
-		break;
-	case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
-		pr_debug("%s: VCO rate: %ld\n", __func__,
-				DP_VCO_RATE_10800MHZDIV1000);
-		pdb->hsclk_sel = 0x00;
-		pdb->dec_start_mode0 = 0x8c;
-		pdb->div_frac_start1_mode0 = 0x00;
-		pdb->div_frac_start2_mode0 = 0x00;
-		pdb->div_frac_start3_mode0 = 0x0a;
-		pdb->integloop_gain0_mode0 = 0x3f;
-		pdb->integloop_gain1_mode0 = 0x00;
-		pdb->vco_tune_map = 0x00;
-		pdb->lock_cmp1_mode0 = 0x1f;
-		pdb->lock_cmp2_mode0 = 0x1c;
-		pdb->lock_cmp3_mode0 = 0x00;
-		pdb->phy_vco_div = 0x2;
-		pdb->lock_cmp_en = 0x00;
-		break;
-	case DP_VCO_HSCLK_RATE_8100MHZDIV1000:
-		pr_debug("%s: VCO rate: %ld\n", __func__,
-				DP_VCO_RATE_8100MHZDIV1000);
-		pdb->hsclk_sel = 0x03;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start1_mode0 = 0x00;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->integloop_gain0_mode0 = 0x3f;
-		pdb->integloop_gain1_mode0 = 0x00;
-		pdb->vco_tune_map = 0x00;
-		pdb->lock_cmp1_mode0 = 0x2f;
-		pdb->lock_cmp2_mode0 = 0x2a;
-		pdb->lock_cmp3_mode0 = 0x00;
-		pdb->phy_vco_div = 0x0;
-		pdb->lock_cmp_en = 0x08;
-		break;
-	default:
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static int dp_config_vco_rate_10nm(struct dp_pll_vco_clk *vco,
-		unsigned long rate)
-{
-	u32 res = 0;
-	struct mdss_pll_resources *dp_res = vco->priv;
-	struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
-
-	res = dp_vco_pll_init_db_10nm(pdb, rate);
-	if (res) {
-		pr_err("VCO Init DB failed\n");
-		return res;
-	}
-
-	if (pdb->lane_cnt != 4) {
-		if (pdb->orientation == ORIENTATION_CC2)
-			MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x6d);
-		else
-			MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x75);
-	} else {
-		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x7d);
-	}
-
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_EN_SEL, 0x37);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x02);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_ENABLE1, 0x0e);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_SEL, 0x30);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CMN_CONFIG, 0x02);
-
-	/* Different for each clock rates */
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_HSCLK_SEL, pdb->hsclk_sel);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_DEC_START_MODE0, pdb->dec_start_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_DIV_FRAC_START1_MODE0, pdb->div_frac_start1_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_DIV_FRAC_START2_MODE0, pdb->div_frac_start2_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_DIV_FRAC_START3_MODE0, pdb->div_frac_start3_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_INTEGLOOP_GAIN0_MODE0, pdb->integloop_gain0_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_INTEGLOOP_GAIN1_MODE0, pdb->integloop_gain1_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_VCO_TUNE_MAP, pdb->vco_tune_map);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_LOCK_CMP1_MODE0, pdb->lock_cmp1_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_LOCK_CMP2_MODE0, pdb->lock_cmp2_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_LOCK_CMP3_MODE0, pdb->lock_cmp3_mode0);
-	/* Make sure the PLL register writes are done */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BG_TIMER, 0x0a);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORECLK_DIV_MODE0, 0x0a);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_VCO_TUNE_CTRL, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORE_CLK_EN, 0x1f);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_IVCO, 0x07);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_LOCK_CMP_EN, pdb->lock_cmp_en);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CP_CTRL_MODE0, 0x06);
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-	if (pdb->orientation == ORIENTATION_CC2)
-		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x4c);
-	else
-		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x5c);
-	/* Make sure the PLL register writes are done */
-	wmb();
-
-	/* TX Lane configuration */
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_TX0_TX1_LANE_CTL, 0x05);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_TX2_TX3_LANE_CTL, 0x05);
-
-	/* TX-0 register configuration */
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_VMODE_CTRL1, 0x40);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_INTERFACE_SELECT, 0x3d);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_CLKBUF_ENABLE, 0x0f);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RESET_TSYNC_EN, 0x03);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRAN_DRVR_EMP_EN, 0x03);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base,
-		TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_INTERFACE_MODE, 0x00);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_BAND, 0x4);
-
-	/* TX-1 register configuration */
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_VMODE_CTRL1, 0x40);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_INTERFACE_SELECT, 0x3d);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_CLKBUF_ENABLE, 0x0f);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RESET_TSYNC_EN, 0x03);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRAN_DRVR_EMP_EN, 0x03);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base,
-		TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_INTERFACE_MODE, 0x00);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_BAND, 0x4);
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-	/* dependent on the vco frequency */
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_VCO_DIV, pdb->phy_vco_div);
-
-	return res;
-}
-
-static bool dp_10nm_pll_lock_status(struct mdss_pll_resources *dp_res)
-{
-	u32 status;
-	bool pll_locked;
-
-	/* poll for PLL lock status */
-	if (readl_poll_timeout_atomic((dp_res->pll_base +
-			QSERDES_COM_C_READY_STATUS),
-			status,
-			((status & BIT(0)) > 0),
-			DP_PHY_PLL_POLL_SLEEP_US,
-			DP_PHY_PLL_POLL_TIMEOUT_US)) {
-		pr_err("%s: C_READY status is not high. Status=%x\n",
-				__func__, status);
-		pll_locked = false;
-	} else {
-		pll_locked = true;
-	}
-
-	return pll_locked;
-}
-
-static bool dp_10nm_phy_rdy_status(struct mdss_pll_resources *dp_res)
-{
-	u32 status;
-	bool phy_ready = true;
-
-	/* poll for PHY ready status */
-	if (readl_poll_timeout_atomic((dp_res->phy_base +
-			DP_PHY_STATUS),
-			status,
-			((status & (BIT(1))) > 0),
-			DP_PHY_PLL_POLL_SLEEP_US,
-			DP_PHY_PLL_POLL_TIMEOUT_US)) {
-		pr_err("%s: Phy_ready is not high. Status=%x\n",
-				__func__, status);
-		phy_ready = false;
-	}
-
-	return phy_ready;
-}
-
-static int dp_pll_enable_10nm(struct clk_hw *hw)
-{
-	int rc = 0;
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	struct mdss_pll_resources *dp_res = vco->priv;
-	struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
-	u32 bias_en, drvr_en;
-
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG2, 0x04);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x05);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x09);
-	wmb(); /* Make sure the PHY register writes are done */
-
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_RESETSM_CNTRL, 0x20);
-	wmb();	/* Make sure the PLL register writes are done */
-
-	if (!dp_10nm_pll_lock_status(dp_res)) {
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
-	/* Make sure the PHY register writes are done */
-	wmb();
-	/* poll for PHY ready status */
-	if (!dp_10nm_phy_rdy_status(dp_res)) {
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	pr_debug("%s: PLL is locked\n", __func__);
-
-	if (pdb->lane_cnt == 1) {
-		bias_en = 0x3e;
-		drvr_en = 0x13;
-	} else {
-		bias_en = 0x3f;
-		drvr_en = 0x10;
-	}
-
-	if (pdb->lane_cnt != 4) {
-		if (pdb->orientation == ORIENTATION_CC1) {
-			MDSS_PLL_REG_W(dp_res->ln_tx1_base,
-				TXn_HIGHZ_DRVR_EN, drvr_en);
-			MDSS_PLL_REG_W(dp_res->ln_tx1_base,
-				TXn_TRANSCEIVER_BIAS_EN, bias_en);
-		} else {
-			MDSS_PLL_REG_W(dp_res->ln_tx0_base,
-				TXn_HIGHZ_DRVR_EN, drvr_en);
-			MDSS_PLL_REG_W(dp_res->ln_tx0_base,
-				TXn_TRANSCEIVER_BIAS_EN, bias_en);
-		}
-	} else {
-		MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_HIGHZ_DRVR_EN, drvr_en);
-		MDSS_PLL_REG_W(dp_res->ln_tx0_base,
-			TXn_TRANSCEIVER_BIAS_EN, bias_en);
-		MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_HIGHZ_DRVR_EN, drvr_en);
-		MDSS_PLL_REG_W(dp_res->ln_tx1_base,
-			TXn_TRANSCEIVER_BIAS_EN, bias_en);
-	}
-
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_POL_INV, 0x0a);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_POL_INV, 0x0a);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x18);
-	udelay(2000);
-
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
-
-	/*
-	 * Make sure all the register writes are completed before
-	 * doing any other operation
-	 */
-	wmb();
-
-	/* poll for PHY ready status */
-	if (!dp_10nm_phy_rdy_status(dp_res)) {
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_DRV_LVL, 0x38);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_DRV_LVL, 0x38);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_EMP_POST1_LVL, 0x20);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_EMP_POST1_LVL, 0x20);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x06);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x06);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x07);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x07);
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-lock_err:
-	return rc;
-}
-
-static int dp_pll_disable_10nm(struct clk_hw *hw)
-{
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	/* Assert DP PHY power down */
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x2);
-	/*
-	 * Make sure all the register writes to disable PLL are
-	 * completed before doing any other operation
-	 */
-	wmb();
-
-	return 0;
-}
-
-
-int dp_vco_prepare_10nm(struct clk_hw *hw)
-{
-	int rc = 0;
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	pr_debug("rate=%ld\n", vco->rate);
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP pll resources\n");
-		goto error;
-	}
-
-	if ((dp_res->vco_cached_rate != 0)
-		&& (dp_res->vco_cached_rate == vco->rate)) {
-		rc = vco->hw.init->ops->set_rate(hw,
-			dp_res->vco_cached_rate, dp_res->vco_cached_rate);
-		if (rc) {
-			pr_err("index=%d vco_set_rate failed. rc=%d\n",
-				rc, dp_res->index);
-			mdss_pll_resource_enable(dp_res, false);
-			goto error;
-		}
-	}
-
-	rc = dp_pll_enable_10nm(hw);
-	if (rc) {
-		mdss_pll_resource_enable(dp_res, false);
-		pr_err("ndx=%d failed to enable dp pll\n",
-					dp_res->index);
-		goto error;
-	}
-
-	mdss_pll_resource_enable(dp_res, false);
-error:
-	return rc;
-}
-
-void dp_vco_unprepare_10nm(struct clk_hw *hw)
-{
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	if (!dp_res) {
-		pr_err("Invalid input parameter\n");
-		return;
-	}
-
-	if (!dp_res->pll_on &&
-		mdss_pll_resource_enable(dp_res, true)) {
-		pr_err("pll resource can't be enabled\n");
-		return;
-	}
-	dp_res->vco_cached_rate = vco->rate;
-	dp_pll_disable_10nm(hw);
-
-	dp_res->handoff_resources = false;
-	mdss_pll_resource_enable(dp_res, false);
-	dp_res->pll_on = false;
-}
-
-int dp_vco_set_rate_10nm(struct clk_hw *hw, unsigned long rate,
-					unsigned long parent_rate)
-{
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	struct mdss_pll_resources *dp_res = vco->priv;
-	int rc;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	pr_debug("DP lane CLK rate=%ld\n", rate);
-
-	rc = dp_config_vco_rate_10nm(vco, rate);
-	if (rc)
-		pr_err("%s: Failed to set clk rate\n", __func__);
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	vco->rate = rate;
-
-	return 0;
-}
-
-unsigned long dp_vco_recalc_rate_10nm(struct clk_hw *hw,
-					unsigned long parent_rate)
-{
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	int rc;
-	u32 div, hsclk_div, link_clk_div = 0;
-	u64 vco_rate;
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP pll=%d\n", dp_res->index);
-		return rc;
-	}
-
-	div = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
-	div &= 0x0f;
-
-	if (div == 12)
-		hsclk_div = 6; /* Default */
-	else if (div == 4)
-		hsclk_div = 4;
-	else if (div == 0)
-		hsclk_div = 2;
-	else if (div == 3)
-		hsclk_div = 1;
-	else {
-		pr_debug("unknown divider. forcing to default\n");
-		hsclk_div = 5;
-	}
-
-	div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_AUX_CFG2);
-	div >>= 2;
-
-	if ((div & 0x3) == 0)
-		link_clk_div = 5;
-	else if ((div & 0x3) == 1)
-		link_clk_div = 10;
-	else if ((div & 0x3) == 2)
-		link_clk_div = 20;
-	else
-		pr_err("%s: unsupported div. Phy_mode: %d\n", __func__, div);
-
-	if (link_clk_div == 20) {
-		vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-	} else {
-		if (hsclk_div == 6)
-			vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
-		else if (hsclk_div == 4)
-			vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-		else if (hsclk_div == 2)
-			vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
-		else
-			vco_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000;
-	}
-
-	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	dp_res->vco_cached_rate = vco->rate = vco_rate;
-	return (unsigned long)vco_rate;
-}
-
-long dp_vco_round_rate_10nm(struct clk_hw *hw, unsigned long rate,
-			unsigned long *parent_rate)
-{
-	unsigned long rrate = rate;
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-
-	if (rate <= vco->min_rate)
-		rrate = vco->min_rate;
-	else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
-		rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-	else if (rate <= DP_VCO_HSCLK_RATE_5400MHZDIV1000)
-		rrate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
-	else
-		rrate = vco->max_rate;
-
-	pr_debug("%s: rrate=%ld\n", __func__, rrate);
-
-	*parent_rate = rrate;
-	return rrate;
-}
-
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c
deleted file mode 100644
index b3cfcba..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c
+++ /dev/null
@@ -1,287 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-/*
- * Display Port PLL driver block diagram for branch clocks
- *
- *		+------------------------------+
- *		|         DP_VCO_CLK           |
- *		|                              |
- *		|    +-------------------+     |
- *		|    |   (DP PLL/VCO)    |     |
- *		|    +---------+---------+     |
- *		|              v               |
- *		|   +----------+-----------+   |
- *		|   | hsclk_divsel_clk_src |   |
- *		|   +----------+-----------+   |
- *		+------------------------------+
- *				|
- *	 +------------<---------v------------>----------+
- *	 |                                              |
- * +-----v------------+                                 |
- * | dp_link_clk_src  |                                 |
- * |    divsel_ten    |                                 |
- * +---------+--------+                                 |
- *	|                                               |
- *	|                                               |
- *	v                                               v
- * Input to DISPCC block                                |
- * for link clk, crypto clk                             |
- * and interface clock                                  |
- *							|
- *							|
- *	+--------<------------+-----------------+---<---+
- *	|                     |                 |
- * +-------v------+  +--------v-----+  +--------v------+
- * | vco_divided  |  | vco_divided  |  | vco_divided   |
- * |    _clk_src  |  |    _clk_src  |  |    _clk_src   |
- * |              |  |              |  |               |
- * |divsel_six    |  |  divsel_two  |  |  divsel_four  |
- * +-------+------+  +-----+--------+  +--------+------+
- *         |	           |		        |
- *	v------->----------v-------------<------v
- *                         |
- *		+----------+---------+
- *		|   vco_divided_clk  |
- *		|       _src_mux     |
- *		+---------+----------+
- *                        |
- *                        v
- *              Input to DISPCC block
- *              for DP pixel clock
- *
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
-
-#include "mdss-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-dp-pll-10nm.h"
-
-static struct dp_pll_db dp_pdb;
-static struct clk_ops mux_clk_ops;
-
-static struct regmap_config dp_pll_10nm_cfg = {
-	.reg_bits	= 32,
-	.reg_stride	= 4,
-	.val_bits	= 32,
-	.max_register = 0x910,
-};
-
-static struct regmap_bus dp_pixel_mux_regmap_ops = {
-	.reg_write = dp_mux_set_parent_10nm,
-	.reg_read = dp_mux_get_parent_10nm,
-};
-
-/* Op structures */
-static const struct clk_ops dp_10nm_vco_clk_ops = {
-	.recalc_rate = dp_vco_recalc_rate_10nm,
-	.set_rate = dp_vco_set_rate_10nm,
-	.round_rate = dp_vco_round_rate_10nm,
-	.prepare = dp_vco_prepare_10nm,
-	.unprepare = dp_vco_unprepare_10nm,
-};
-
-static struct dp_pll_vco_clk dp_vco_clk = {
-	.min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
-	.max_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000,
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_vco_clk",
-		.parent_names = (const char *[]){ "xo_board" },
-		.num_parents = 1,
-		.ops = &dp_10nm_vco_clk_ops,
-	},
-};
-
-static struct clk_fixed_factor dp_link_clk_divsel_ten = {
-	.div = 10,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_link_clk_divsel_ten",
-		.parent_names =
-			(const char *[]){ "dp_vco_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dp_vco_divsel_two_clk_src = {
-	.div = 2,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_vco_divsel_two_clk_src",
-		.parent_names =
-			(const char *[]){ "dp_vco_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dp_vco_divsel_four_clk_src = {
-	.div = 4,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_vco_divsel_four_clk_src",
-		.parent_names =
-			(const char *[]){ "dp_vco_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dp_vco_divsel_six_clk_src = {
-	.div = 6,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_vco_divsel_six_clk_src",
-		.parent_names =
-			(const char *[]){ "dp_vco_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-
-static int clk_mux_determine_rate(struct clk_hw *hw,
-				     struct clk_rate_request *req)
-{
-	int ret = 0;
-
-	ret = __clk_mux_determine_rate_closest(hw, req);
-	if (ret)
-		return ret;
-
-	/* Set the new parent of mux if there is a new valid parent */
-	if (hw->clk && req->best_parent_hw->clk)
-		clk_set_parent(hw->clk, req->best_parent_hw->clk);
-
-	return 0;
-}
-
-static unsigned long mux_recalc_rate(struct clk_hw *hw,
-					unsigned long parent_rate)
-{
-	struct clk *div_clk = NULL, *vco_clk = NULL;
-	struct dp_pll_vco_clk *vco = NULL;
-
-	div_clk = clk_get_parent(hw->clk);
-	if (!div_clk)
-		return 0;
-
-	vco_clk = clk_get_parent(div_clk);
-	if (!vco_clk)
-		return 0;
-
-	vco = to_dp_vco_hw(__clk_get_hw(vco_clk));
-	if (!vco)
-		return 0;
-
-	if (vco->rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000)
-		return (vco->rate / 6);
-	else if (vco->rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
-		return (vco->rate / 4);
-	else
-		return (vco->rate / 2);
-}
-
-static struct clk_regmap_mux dp_vco_divided_clk_src_mux = {
-	.reg = 0x64,
-	.shift = 0,
-	.width = 2,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dp_vco_divided_clk_src_mux",
-			.parent_names =
-				(const char *[]){"dp_vco_divsel_two_clk_src",
-					"dp_vco_divsel_four_clk_src",
-					"dp_vco_divsel_six_clk_src"},
-			.num_parents = 3,
-			.ops = &mux_clk_ops,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		},
-	},
-};
-
-static struct clk_hw *mdss_dp_pllcc_10nm[] = {
-	[DP_VCO_CLK] = &dp_vco_clk.hw,
-	[DP_LINK_CLK_DIVSEL_TEN] = &dp_link_clk_divsel_ten.hw,
-	[DP_VCO_DIVIDED_TWO_CLK_SRC] = &dp_vco_divsel_two_clk_src.hw,
-	[DP_VCO_DIVIDED_FOUR_CLK_SRC] = &dp_vco_divsel_four_clk_src.hw,
-	[DP_VCO_DIVIDED_SIX_CLK_SRC] = &dp_vco_divsel_six_clk_src.hw,
-	[DP_VCO_DIVIDED_CLK_SRC_MUX] = &dp_vco_divided_clk_src_mux.clkr.hw,
-};
-
-int dp_pll_clock_register_10nm(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res)
-{
-	int rc = -ENOTSUPP, i = 0;
-	struct clk_onecell_data *clk_data;
-	struct clk *clk;
-	struct regmap *regmap;
-	int num_clks = ARRAY_SIZE(mdss_dp_pllcc_10nm);
-
-	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
-	if (!clk_data)
-		return -ENOMEM;
-
-	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
-				sizeof(struct clk *), GFP_KERNEL);
-	if (!clk_data->clks)
-		return -ENOMEM;
-
-	clk_data->clk_num = num_clks;
-
-	pll_res->priv = &dp_pdb;
-	dp_pdb.pll = pll_res;
-
-	/* Set client data for vco, mux and div clocks */
-	regmap = devm_regmap_init(&pdev->dev, &dp_pixel_mux_regmap_ops,
-			pll_res, &dp_pll_10nm_cfg);
-	dp_vco_divided_clk_src_mux.clkr.regmap = regmap;
-	mux_clk_ops = clk_regmap_mux_closest_ops;
-	mux_clk_ops.determine_rate = clk_mux_determine_rate;
-	mux_clk_ops.recalc_rate = mux_recalc_rate;
-
-	dp_vco_clk.priv = pll_res;
-
-	for (i = DP_VCO_CLK; i <= DP_VCO_DIVIDED_CLK_SRC_MUX; i++) {
-		pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
-		clk = devm_clk_register(&pdev->dev,
-				mdss_dp_pllcc_10nm[i]);
-		if (IS_ERR(clk)) {
-			pr_err("clk registration failed for DP: %d\n",
-					pll_res->index);
-			rc = -EINVAL;
-			goto clk_reg_fail;
-		}
-		clk_data->clks[i] = clk;
-	}
-
-	rc = of_clk_add_provider(pdev->dev.of_node,
-			of_clk_src_onecell_get, clk_data);
-	if (rc) {
-		pr_err("%s: Clock register failed rc=%d\n", __func__, rc);
-		rc = -EPROBE_DEFER;
-	} else {
-		pr_debug("%s SUCCESS\n", __func__);
-	}
-	return 0;
-clk_reg_fail:
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h
deleted file mode 100644
index 5135c87..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __MDSS_DP_PLL_10NM_H
-#define __MDSS_DP_PLL_10NM_H
-
-#define DP_VCO_HSCLK_RATE_1620MHZDIV1000	1620000UL
-#define DP_VCO_HSCLK_RATE_2700MHZDIV1000	2700000UL
-#define DP_VCO_HSCLK_RATE_5400MHZDIV1000	5400000UL
-#define DP_VCO_HSCLK_RATE_8100MHZDIV1000	8100000UL
-
-struct dp_pll_db {
-	struct mdss_pll_resources *pll;
-
-	/* lane and orientation settings */
-	u8 lane_cnt;
-	u8 orientation;
-
-	/* COM PHY settings */
-	u32 hsclk_sel;
-	u32 dec_start_mode0;
-	u32 div_frac_start1_mode0;
-	u32 div_frac_start2_mode0;
-	u32 div_frac_start3_mode0;
-	u32 integloop_gain0_mode0;
-	u32 integloop_gain1_mode0;
-	u32 vco_tune_map;
-	u32 lock_cmp1_mode0;
-	u32 lock_cmp2_mode0;
-	u32 lock_cmp3_mode0;
-	u32 lock_cmp_en;
-
-	/* PHY vco divider */
-	u32 phy_vco_div;
-};
-
-int dp_vco_set_rate_10nm(struct clk_hw *hw, unsigned long rate,
-				unsigned long parent_rate);
-unsigned long dp_vco_recalc_rate_10nm(struct clk_hw *hw,
-				unsigned long parent_rate);
-long dp_vco_round_rate_10nm(struct clk_hw *hw, unsigned long rate,
-				unsigned long *parent_rate);
-int dp_vco_prepare_10nm(struct clk_hw *hw);
-void dp_vco_unprepare_10nm(struct clk_hw *hw);
-int dp_mux_set_parent_10nm(void *context,
-				unsigned int reg, unsigned int val);
-int dp_mux_get_parent_10nm(void *context,
-				unsigned int reg, unsigned int *val);
-#endif /* __MDSS_DP_PLL_10NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-14nm.c b/drivers/clk/qcom/mdss/mdss-dp-pll-14nm.c
deleted file mode 100644
index f1c745d..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-14nm.c
+++ /dev/null
@@ -1,820 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-/*
- ***************************************************************************
- ******** Display Port PLL driver block diagram for branch clocks **********
- ***************************************************************************
-
-			+--------------------------+
-			|       DP_VCO_CLK         |
-			|			   |
-			|  +-------------------+   |
-			|  |   (DP PLL/VCO)    |   |
-			|  +---------+---------+   |
-			|	     v		   |
-			| +----------+-----------+ |
-			| | hsclk_divsel_clk_src | |
-			| +----------+-----------+ |
-			+--------------------------+
-				     |
-				     v
-	   +------------<------------|------------>-------------+
-	   |                         |                          |
-+----------v----------+	  +----------v----------+    +----------v----------+
-|   dp_link_2x_clk    |	  | vco_divided_clk_src	|    | vco_divided_clk_src |
-|     divsel_five     |	  |			|    |			   |
-v----------+----------v	  |	divsel_two	|    |	   divsel_four	   |
-	   |		  +----------+----------+    +----------+----------+
-	   |                         |                          |
-	   v			     v				v
-				     |	+---------------------+	|
-  Input to MMSSCC block		     |	|    (aux_clk_ops)    |	|
-  for link clk, crypto clk	     +-->   vco_divided_clk   <-+
-  and interface clock			|	_src_mux      |
-					+----------+----------+
-						   |
-						   v
-					 Input to MMSSCC block
-					 for DP pixel clock
-
- ******************************************************************************
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-#include <linux/usb/usbpd.h>
-
-#include <dt-bindings/clock/mdss-14nm-pll-clk.h>
-
-#include "mdss-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-dp-pll-14nm.h"
-
-static struct dp_pll_db dp_pdb;
-static struct clk_ops mux_clk_ops;
-
-static struct regmap_config dp_pll_14nm_cfg = {
-	.reg_bits	= 32,
-	.reg_stride	= 4,
-	.val_bits	= 32,
-	.max_register = 0x910,
-};
-
-static struct regmap_bus dp_pixel_mux_regmap_ops = {
-	.reg_write = dp_mux_set_parent_14nm,
-	.reg_read = dp_mux_get_parent_14nm,
-};
-
-/* Op structures */
-static const struct clk_ops dp_14nm_vco_clk_ops = {
-	.recalc_rate = dp_vco_recalc_rate_14nm,
-	.set_rate = dp_vco_set_rate_14nm,
-	.round_rate = dp_vco_round_rate_14nm,
-	.prepare = dp_vco_prepare_14nm,
-	.unprepare = dp_vco_unprepare_14nm,
-};
-
-static struct dp_pll_vco_clk dp_vco_clk = {
-	.min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
-	.max_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000,
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_vco_clk",
-		.parent_names = (const char *[]){ "xo_board" },
-		.num_parents = 1,
-		.ops = &dp_14nm_vco_clk_ops,
-	},
-};
-
-static struct clk_fixed_factor dp_phy_pll_link_clk = {
-	.div = 10,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_phy_pll_link_clk",
-		.parent_names =
-			(const char *[]){ "dp_vco_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dp_vco_divsel_two_clk_src = {
-	.div = 2,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_vco_divsel_two_clk_src",
-		.parent_names =
-			(const char *[]){ "dp_vco_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dp_vco_divsel_four_clk_src = {
-	.div = 4,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_vco_divsel_four_clk_src",
-		.parent_names =
-			(const char *[]){ "dp_vco_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static int clk_mux_determine_rate(struct clk_hw *hw,
-				     struct clk_rate_request *req)
-{
-	int ret = 0;
-
-	ret = __clk_mux_determine_rate_closest(hw, req);
-	if (ret)
-		return ret;
-
-	/* Set the new parent of mux if there is a new valid parent */
-	if (hw->clk && req->best_parent_hw->clk)
-		clk_set_parent(hw->clk, req->best_parent_hw->clk);
-
-	return 0;
-}
-
-
-static unsigned long mux_recalc_rate(struct clk_hw *hw,
-					unsigned long parent_rate)
-{
-	struct clk *div_clk = NULL, *vco_clk = NULL;
-	struct dp_pll_vco_clk *vco = NULL;
-
-	div_clk = clk_get_parent(hw->clk);
-	if (!div_clk)
-		return 0;
-
-	vco_clk = clk_get_parent(div_clk);
-	if (!vco_clk)
-		return 0;
-
-	vco = to_dp_vco_hw(__clk_get_hw(vco_clk));
-	if (!vco)
-		return 0;
-
-	if (vco->rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
-		return (vco->rate / 4);
-	else
-		return (vco->rate / 2);
-}
-
-static struct clk_regmap_mux dp_phy_pll_vco_div_clk = {
-	.reg = 0x64,
-	.shift = 0,
-	.width = 1,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dp_phy_pll_vco_div_clk",
-			.parent_names =
-				(const char *[]){"dp_vco_divsel_two_clk_src",
-					"dp_vco_divsel_four_clk_src"},
-			.num_parents = 2,
-			.ops = &mux_clk_ops,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		},
-	},
-};
-
-static struct clk_hw *mdss_dp_pllcc_14nm[] = {
-	[DP_VCO_CLK] = &dp_vco_clk.hw,
-	[DP_PHY_PLL_LINK_CLK] = &dp_phy_pll_link_clk.hw,
-	[DP_VCO_DIVSEL_FOUR_CLK_SRC] = &dp_vco_divsel_four_clk_src.hw,
-	[DP_VCO_DIVSEL_TWO_CLK_SRC] = &dp_vco_divsel_two_clk_src.hw,
-	[DP_PHY_PLL_VCO_DIV_CLK] = &dp_phy_pll_vco_div_clk.clkr.hw,
-};
-
-
-int dp_mux_set_parent_14nm(void *context, unsigned int reg, unsigned int val)
-{
-	struct mdss_pll_resources *dp_res = context;
-	int rc;
-	u32 auxclk_div;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP PLL resources\n");
-		return rc;
-	}
-
-	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
-	auxclk_div &= ~0x03;	/* bits 0 to 1 */
-
-	if (val == 0) /* mux parent index = 0 */
-		auxclk_div |= 1;
-	else if (val == 1) /* mux parent index = 1 */
-		auxclk_div |= 2;
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_VCO_DIV, auxclk_div);
-	/* Make sure the PHY registers writes are done */
-	wmb();
-	pr_debug("mux=%d auxclk_div=%x\n", val, auxclk_div);
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	return 0;
-}
-
-int dp_mux_get_parent_14nm(void *context, unsigned int reg, unsigned int *val)
-{
-	int rc;
-	u32 auxclk_div = 0;
-	struct mdss_pll_resources *dp_res = context;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable dp_res resources\n");
-		return rc;
-	}
-
-	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
-	auxclk_div &= 0x03;
-
-	if (auxclk_div == 1) /* Default divider */
-		*val = 0;
-	else if (auxclk_div == 2)
-		*val = 1;
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	pr_debug("auxclk_div=%d, val=%d\n", auxclk_div, *val);
-
-	return 0;
-}
-
-static int dp_vco_pll_init_db_14nm(struct dp_pll_db *pdb,
-		unsigned long rate)
-{
-	struct mdss_pll_resources *dp_res = pdb->pll;
-	u32 spare_value = 0;
-
-	spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
-	pdb->lane_cnt = spare_value & 0x0F;
-	pdb->orientation = (spare_value & 0xF0) >> 4;
-
-	pr_debug("spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
-			spare_value, pdb->lane_cnt, pdb->orientation);
-
-	switch (rate) {
-	case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
-		pdb->hsclk_sel = 0x2c;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start1_mode0 = 0x00;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->lock_cmp1_mode0 = 0xbf;
-		pdb->lock_cmp2_mode0 = 0x21;
-		pdb->lock_cmp3_mode0 = 0x00;
-		pdb->phy_vco_div = 0x1;
-		pdb->lane_mode_1 = 0xc6;
-		break;
-	case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
-		pdb->hsclk_sel = 0x24;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start1_mode0 = 0x00;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->lock_cmp1_mode0 = 0x3f;
-		pdb->lock_cmp2_mode0 = 0x38;
-		pdb->lock_cmp3_mode0 = 0x00;
-		pdb->phy_vco_div = 0x1;
-		pdb->lane_mode_1 = 0xc4;
-		break;
-	case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
-		pdb->hsclk_sel = 0x20;
-		pdb->dec_start_mode0 = 0x8c;
-		pdb->div_frac_start1_mode0 = 0x00;
-		pdb->div_frac_start2_mode0 = 0x00;
-		pdb->div_frac_start3_mode0 = 0x0a;
-		pdb->lock_cmp1_mode0 = 0x7f;
-		pdb->lock_cmp2_mode0 = 0x70;
-		pdb->lock_cmp3_mode0 = 0x00;
-		pdb->phy_vco_div = 0x2;
-		pdb->lane_mode_1 = 0xc4;
-		break;
-	default:
-		return -EINVAL;
-	}
-	return 0;
-}
-
-int dp_config_vco_rate_14nm(struct dp_pll_vco_clk *vco,
-		unsigned long rate)
-{
-	u32 res = 0;
-	struct mdss_pll_resources *dp_res = vco->priv;
-	struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
-
-	res = dp_vco_pll_init_db_14nm(pdb, rate);
-	if (res) {
-		pr_err("VCO Init DB failed\n");
-		return res;
-	}
-
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x3d);
-
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_SYSCLK_EN_SEL, 0x37);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_CLK_SELECT, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_SYS_CLK_CTRL, 0x06);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_CLK_ENABLE1, 0x0e);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_BG_CTRL, 0x0f);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_CLK_SELECT, 0x30);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_PLL_IVCO, 0x0f);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_CP_CTRL_MODE0, 0x0b);
-
-	/* Parameters dependent on vco clock frequency */
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_HSCLK_SEL, pdb->hsclk_sel);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_DEC_START_MODE0, pdb->dec_start_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_DIV_FRAC_START1_MODE0, pdb->div_frac_start1_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_DIV_FRAC_START2_MODE0, pdb->div_frac_start2_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_DIV_FRAC_START3_MODE0, pdb->div_frac_start3_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_LOCK_CMP1_MODE0, pdb->lock_cmp1_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_LOCK_CMP2_MODE0, pdb->lock_cmp2_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_LOCK_CMP3_MODE0, pdb->lock_cmp3_mode0);
-
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x40);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_VCO_TUNE_MAP, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_BG_TIMER, 0x08);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_CORECLK_DIV, 0x05);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_VCO_TUNE_CTRL, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_VCO_TUNE1_MODE0, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_VCO_TUNE2_MODE0, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_VCO_TUNE_CTRL, 0x00);
-	wmb(); /* make sure write happens */
-
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_CORE_CLK_EN, 0x0f);
-	wmb(); /* make sure write happens */
-
-	if (pdb->orientation == ORIENTATION_CC2)
-		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0xc9);
-	else
-		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0xd9);
-	wmb(); /* make sure write happens */
-
-	/* TX Lane configuration */
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		DP_PHY_TX0_TX1_LANE_CTL, 0x05);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		DP_PHY_TX2_TX3_LANE_CTL, 0x05);
-
-	/* TX-0 register configuration */
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN, 0x1a);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_VMODE_CTRL1, 0x40);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_INTERFACE_SELECT, 0x3d);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE, 0x0f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_RESET_TSYNC_EN, 0x03);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_TRAN_DRVR_EMP_EN, 0x03);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_TX_INTERFACE_MODE, 0x00);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_TX_EMP_POST1_LVL, 0x2b);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_TX_DRV_LVL, 0x2f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_TX_BAND, 0x4);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX, 0x12);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX, 0x12);
-
-	/* TX-1 register configuration */
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN, 0x1a);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_VMODE_CTRL1, 0x40);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_INTERFACE_SELECT, 0x3d);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE, 0x0f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_RESET_TSYNC_EN, 0x03);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_TRAN_DRVR_EMP_EN, 0x03);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_TX_INTERFACE_MODE, 0x00);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_TX_EMP_POST1_LVL, 0x2b);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_TX_DRV_LVL, 0x2f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_TX_BAND, 0x4);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX, 0x12);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX, 0x12);
-	wmb(); /* make sure write happens */
-
-	/* PHY VCO divider programming */
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		DP_PHY_VCO_DIV, pdb->phy_vco_div);
-	wmb(); /* make sure write happens */
-
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_CMN_CONFIG, 0x02);
-	wmb(); /* make sure write happens */
-
-	return res;
-}
-
-static bool dp_14nm_pll_lock_status(struct mdss_pll_resources *dp_res)
-{
-	u32 status;
-	bool pll_locked;
-
-	/* poll for PLL lock status */
-	if (readl_poll_timeout_atomic((dp_res->pll_base +
-			QSERDES_COM_C_READY_STATUS),
-			status,
-			((status & BIT(0)) > 0),
-			DP_PLL_POLL_SLEEP_US,
-			DP_PLL_POLL_TIMEOUT_US)) {
-		pr_err("C_READY status is not high. Status=%x\n", status);
-		pll_locked = false;
-	} else {
-		pll_locked = true;
-	}
-
-	return pll_locked;
-}
-
-static bool dp_14nm_phy_rdy_status(struct mdss_pll_resources *dp_res)
-{
-	u32 status;
-	bool phy_ready = true;
-
-	/* poll for PHY ready status */
-	if (readl_poll_timeout_atomic((dp_res->phy_base +
-			DP_PHY_STATUS),
-			status,
-			((status & (BIT(1) | BIT(0))) > 0),
-			DP_PHY_POLL_SLEEP_US,
-			DP_PHY_POLL_TIMEOUT_US)) {
-		pr_err("Phy_ready is not high. Status=%x\n", status);
-		phy_ready = false;
-	}
-
-	return phy_ready;
-}
-
-static int dp_pll_enable_14nm(struct clk_hw *hw)
-{
-	int rc = 0;
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x05);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x09);
-	wmb(); /* Make sure the PHY register writes are done */
-
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_RESETSM_CNTRL, 0x20);
-	wmb();	/* Make sure the PLL register writes are done */
-
-	udelay(900); /* hw recommended delay for full PU */
-
-	if (!dp_14nm_pll_lock_status(dp_res)) {
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
-	wmb();	/* Make sure the PHY register writes are done */
-
-	udelay(10); /* hw recommended delay */
-
-	if (!dp_14nm_phy_rdy_status(dp_res)) {
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	pr_debug("PLL is locked\n");
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN, 0x3f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN, 0x10);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN, 0x3f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN, 0x10);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX0_OFFSET + TXn_TX_POL_INV, 0x0a);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-		QSERDES_TX1_OFFSET + TXn_TX_POL_INV, 0x0a);
-
-	/*
-	 * Switch DP Mainlink clock (cc_dpphy_link_clk) from DP
-	 * controller side with final frequency
-	 */
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x18);
-	wmb();	/* Make sure the PHY register writes are done */
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
-	wmb();	/* Make sure the PHY register writes are done */
-
-lock_err:
-	return rc;
-}
-
-static int dp_pll_disable_14nm(struct clk_hw *hw)
-{
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	/* Assert DP PHY power down */
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x2);
-	/*
-	 * Make sure all the register writes to disable PLL are
-	 * completed before doing any other operation
-	 */
-	wmb();
-
-	return 0;
-}
-
-
-int dp_vco_prepare_14nm(struct clk_hw *hw)
-{
-	int rc = 0;
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	pr_debug("rate=%ld\n", vco->rate);
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP pll resources\n");
-		goto error;
-	}
-
-	if ((dp_res->vco_cached_rate != 0)
-		&& (dp_res->vco_cached_rate == vco->rate)) {
-		rc = vco->hw.init->ops->set_rate(hw,
-			dp_res->vco_cached_rate, dp_res->vco_cached_rate);
-		if (rc) {
-			pr_err("index=%d vco_set_rate failed. rc=%d\n",
-				rc, dp_res->index);
-			mdss_pll_resource_enable(dp_res, false);
-			goto error;
-		}
-	}
-
-	rc = dp_pll_enable_14nm(hw);
-	if (rc) {
-		mdss_pll_resource_enable(dp_res, false);
-		pr_err("ndx=%d failed to enable dp pll\n",
-					dp_res->index);
-		goto error;
-	}
-
-	mdss_pll_resource_enable(dp_res, false);
-error:
-	return rc;
-}
-
-void dp_vco_unprepare_14nm(struct clk_hw *hw)
-{
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	if (!dp_res) {
-		pr_err("Invalid input parameter\n");
-		return;
-	}
-
-	if (!dp_res->pll_on &&
-		mdss_pll_resource_enable(dp_res, true)) {
-		pr_err("pll resource can't be enabled\n");
-		return;
-	}
-	dp_res->vco_cached_rate = vco->rate;
-	dp_pll_disable_14nm(hw);
-
-	dp_res->handoff_resources = false;
-	mdss_pll_resource_enable(dp_res, false);
-	dp_res->pll_on = false;
-}
-
-int dp_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
-					unsigned long parent_rate)
-{
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	struct mdss_pll_resources *dp_res = vco->priv;
-	int rc;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	pr_debug("DP lane CLK rate=%ld\n", rate);
-
-	rc = dp_config_vco_rate_14nm(vco, rate);
-	if (rc)
-		pr_err("Failed to set clk rate\n");
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	vco->rate = rate;
-
-	return 0;
-}
-
-unsigned long dp_vco_recalc_rate_14nm(struct clk_hw *hw,
-					unsigned long parent_rate)
-{
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	int rc;
-	u32 div, hsclk_div;
-	u64 vco_rate;
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	if (is_gdsc_disabled(dp_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP pll=%d\n", dp_res->index);
-		return rc;
-	}
-
-	div = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
-	div &= 0x0f;
-
-	if (div == 12)
-		hsclk_div = 5; /* Default */
-	else if (div == 4)
-		hsclk_div = 3;
-	else if (div == 0)
-		hsclk_div = 2;
-	else {
-		pr_debug("unknown divider. forcing to default\n");
-		hsclk_div = 5;
-	}
-
-	if (hsclk_div == 5)
-		vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
-	else if (hsclk_div == 3)
-		vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-	else
-		vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
-
-	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	dp_res->vco_cached_rate = vco->rate = vco_rate;
-	return (unsigned long)vco_rate;
-}
-
-long dp_vco_round_rate_14nm(struct clk_hw *hw, unsigned long rate,
-			unsigned long *parent_rate)
-{
-	unsigned long rrate = rate;
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-
-	if (rate <= vco->min_rate)
-		rrate = vco->min_rate;
-	else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
-		rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-	else
-		rrate = vco->max_rate;
-
-	pr_debug("rrate=%ld\n", rrate);
-
-	*parent_rate = rrate;
-	return rrate;
-}
-
-int dp_pll_clock_register_14nm(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res)
-{
-	int rc = -ENOTSUPP, i = 0;
-	struct clk_onecell_data *clk_data;
-	struct clk *clk;
-	struct regmap *regmap;
-	int num_clks = ARRAY_SIZE(mdss_dp_pllcc_14nm);
-
-	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
-	if (!clk_data)
-		return -ENOMEM;
-
-	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
-				sizeof(struct clk *), GFP_KERNEL);
-	if (!clk_data->clks)
-		return -ENOMEM;
-
-	clk_data->clk_num = num_clks;
-
-	pll_res->priv = &dp_pdb;
-	dp_pdb.pll = pll_res;
-
-	/* Set client data for vco, mux and div clocks */
-	regmap = devm_regmap_init(&pdev->dev, &dp_pixel_mux_regmap_ops,
-			pll_res, &dp_pll_14nm_cfg);
-	dp_phy_pll_vco_div_clk.clkr.regmap = regmap;
-	mux_clk_ops = clk_regmap_mux_closest_ops;
-	mux_clk_ops.determine_rate = clk_mux_determine_rate;
-	mux_clk_ops.recalc_rate = mux_recalc_rate;
-
-	dp_vco_clk.priv = pll_res;
-
-	for (i = DP_VCO_CLK; i <= DP_PHY_PLL_VCO_DIV_CLK; i++) {
-		pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
-		clk = devm_clk_register(&pdev->dev,
-				mdss_dp_pllcc_14nm[i]);
-		if (IS_ERR(clk)) {
-			pr_err("clk registration failed for DP: %d\n",
-					pll_res->index);
-			rc = -EINVAL;
-			goto clk_reg_fail;
-		}
-		clk_data->clks[i] = clk;
-	}
-
-	rc = of_clk_add_provider(pdev->dev.of_node,
-			of_clk_src_onecell_get, clk_data);
-	if (rc) {
-		pr_err("Clock register failed rc=%d\n", rc);
-		rc = -EPROBE_DEFER;
-	} else {
-		pr_debug("SUCCESS\n");
-	}
-	return 0;
-clk_reg_fail:
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-14nm.h b/drivers/clk/qcom/mdss/mdss-dp-pll-14nm.h
deleted file mode 100644
index f8c9c30..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-14nm.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __MDSS_DP_PLL_14NM_H
-#define __MDSS_DP_PLL_14NM_H
-
-#define DP_PHY_REVISION_ID0			0x0000
-#define DP_PHY_REVISION_ID1			0x0004
-#define DP_PHY_REVISION_ID2			0x0008
-#define DP_PHY_REVISION_ID3			0x000C
-
-#define DP_PHY_CFG				0x0010
-#define DP_PHY_CFG_1				0x0014
-#define DP_PHY_PD_CTL				0x0018
-#define DP_PHY_MODE				0x001C
-
-#define DP_PHY_AUX_CFG0				0x0020
-#define DP_PHY_AUX_CFG1				0x0024
-#define DP_PHY_AUX_CFG2				0x0028
-#define DP_PHY_AUX_CFG3				0x002C
-#define DP_PHY_AUX_CFG4				0x0030
-#define DP_PHY_AUX_CFG5				0x0034
-#define DP_PHY_AUX_CFG6				0x0038
-#define DP_PHY_AUX_CFG7				0x003C
-#define DP_PHY_AUX_CFG8				0x0040
-#define DP_PHY_AUX_CFG9				0x0044
-#define DP_PHY_AUX_INTERRUPT_MASK		0x0048
-#define DP_PHY_AUX_INTERRUPT_CLEAR		0x004C
-#define DP_PHY_AUX_BIST_CFG			0x0050
-
-#define DP_PHY_VCO_DIV				0x0068
-#define DP_PHY_TX0_TX1_LANE_CTL			0x006C
-
-#define DP_PHY_TX2_TX3_LANE_CTL			0x0088
-#define DP_PHY_SPARE0				0x00AC
-#define DP_PHY_STATUS				0x00C0
-
-/* Tx registers */
-#define QSERDES_TX0_OFFSET			0x0400
-#define QSERDES_TX1_OFFSET			0x0800
-
-#define TXn_BIST_MODE_LANENO			0x0000
-#define TXn_CLKBUF_ENABLE			0x0008
-#define TXn_TX_EMP_POST1_LVL			0x000C
-
-#define TXn_TX_DRV_LVL				0x001C
-
-#define TXn_RESET_TSYNC_EN			0x0024
-#define TXn_PRE_STALL_LDO_BOOST_EN		0x0028
-#define TXn_TX_BAND				0x002C
-#define TXn_SLEW_CNTL				0x0030
-#define TXn_INTERFACE_SELECT			0x0034
-
-#define TXn_RES_CODE_LANE_TX			0x003C
-#define TXn_RES_CODE_LANE_RX			0x0040
-#define TXn_RES_CODE_LANE_OFFSET_TX		0x0044
-#define TXn_RES_CODE_LANE_OFFSET_RX		0x0048
-
-#define TXn_DEBUG_BUS_SEL			0x0058
-#define TXn_TRANSCEIVER_BIAS_EN			0x005C
-#define TXn_HIGHZ_DRVR_EN			0x0060
-#define TXn_TX_POL_INV				0x0064
-#define TXn_PARRATE_REC_DETECT_IDLE_EN		0x0068
-
-#define TXn_LANE_MODE_1				0x008C
-
-#define TXn_TRAN_DRVR_EMP_EN			0x00C0
-#define TXn_TX_INTERFACE_MODE			0x00C4
-
-#define TXn_VMODE_CTRL1				0x00F0
-
-
-/* PLL register offset */
-#define QSERDES_COM_ATB_SEL1			0x0000
-#define QSERDES_COM_ATB_SEL2			0x0004
-#define QSERDES_COM_FREQ_UPDATE			0x0008
-#define QSERDES_COM_BG_TIMER			0x000C
-#define QSERDES_COM_SSC_EN_CENTER		0x0010
-#define QSERDES_COM_SSC_ADJ_PER1		0x0014
-#define QSERDES_COM_SSC_ADJ_PER2		0x0018
-#define QSERDES_COM_SSC_PER1			0x001C
-#define QSERDES_COM_SSC_PER2			0x0020
-#define QSERDES_COM_SSC_STEP_SIZE1		0x0024
-#define QSERDES_COM_SSC_STEP_SIZE2		0x0028
-#define QSERDES_COM_POST_DIV			0x002C
-#define QSERDES_COM_POST_DIV_MUX		0x0030
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x0034
-#define QSERDES_COM_CLK_ENABLE1			0x0038
-#define QSERDES_COM_SYS_CLK_CTRL		0x003C
-#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x0040
-#define QSERDES_COM_PLL_EN			0x0044
-#define QSERDES_COM_PLL_IVCO			0x0048
-#define QSERDES_COM_LOCK_CMP1_MODE0		0x004C
-#define QSERDES_COM_LOCK_CMP2_MODE0		0x0050
-#define QSERDES_COM_LOCK_CMP3_MODE0		0x0054
-
-#define QSERDES_COM_CP_CTRL_MODE0		0x0078
-#define QSERDES_COM_CP_CTRL_MODE1		0x007C
-#define QSERDES_COM_PLL_RCTRL_MODE0		0x0084
-#define QSERDES_COM_PLL_CCTRL_MODE0		0x0090
-#define QSERDES_COM_PLL_CNTRL			0x009C
-
-#define QSERDES_COM_SYSCLK_EN_SEL		0x00AC
-#define QSERDES_COM_CML_SYSCLK_SEL		0x00B0
-#define QSERDES_COM_RESETSM_CNTRL		0x00B4
-#define QSERDES_COM_RESETSM_CNTRL2		0x00B8
-#define QSERDES_COM_LOCK_CMP_EN			0x00C8
-#define QSERDES_COM_LOCK_CMP_CFG		0x00CC
-
-
-#define QSERDES_COM_DEC_START_MODE0		0x00D0
-#define QSERDES_COM_DEC_START_MODE1		0x00D4
-#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x00DC
-#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x00E0
-#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x00E4
-
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x0108
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x010C
-#define QSERDES_COM_VCO_TUNE_CTRL		0x0124
-#define QSERDES_COM_VCO_TUNE_MAP		0x0128
-#define QSERDES_COM_VCO_TUNE1_MODE0		0x012C
-#define QSERDES_COM_VCO_TUNE2_MODE0		0x0130
-
-#define QSERDES_COM_CMN_STATUS			0x015C
-#define QSERDES_COM_RESET_SM_STATUS		0x0160
-
-#define QSERDES_COM_BG_CTRL			0x0170
-#define QSERDES_COM_CLK_SELECT			0x0174
-#define QSERDES_COM_HSCLK_SEL			0x0178
-#define QSERDES_COM_CORECLK_DIV			0x0184
-#define QSERDES_COM_SW_RESET			0x0188
-#define QSERDES_COM_CORE_CLK_EN			0x018C
-#define QSERDES_COM_C_READY_STATUS		0x0190
-#define QSERDES_COM_CMN_CONFIG			0x0194
-#define QSERDES_COM_SVS_MODE_CLK_SEL		0x019C
-
-#define DP_PLL_POLL_SLEEP_US			500
-#define DP_PLL_POLL_TIMEOUT_US			10000
-
-#define DP_PHY_POLL_SLEEP_US			500
-#define DP_PHY_POLL_TIMEOUT_US			10000
-
-#define DP_VCO_RATE_8100MHZDIV1000		8100000UL
-#define DP_VCO_RATE_10800MHZDIV1000		10800000UL
-
-#define DP_VCO_HSCLK_RATE_1620MHZDIV1000	1620000UL
-#define DP_VCO_HSCLK_RATE_2700MHZDIV1000	2700000UL
-#define DP_VCO_HSCLK_RATE_5400MHZDIV1000	5400000UL
-
-struct dp_pll_db {
-	struct mdss_pll_resources *pll;
-
-	/* lane and orientation settings */
-	u8 lane_cnt;
-	u8 orientation;
-
-	/* COM PHY settings */
-	u32 hsclk_sel;
-	u32 dec_start_mode0;
-	u32 div_frac_start1_mode0;
-	u32 div_frac_start2_mode0;
-	u32 div_frac_start3_mode0;
-	u32 lock_cmp1_mode0;
-	u32 lock_cmp2_mode0;
-	u32 lock_cmp3_mode0;
-
-	/* PHY vco divider */
-	u32 phy_vco_div;
-
-	/* TX settings */
-	u32 lane_mode_1;
-};
-
-int dp_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
-				unsigned long parent_rate);
-unsigned long dp_vco_recalc_rate_14nm(struct clk_hw *hw,
-				unsigned long parent_rate);
-long dp_vco_round_rate_14nm(struct clk_hw *hw, unsigned long rate,
-				unsigned long *parent_rate);
-int dp_vco_prepare_14nm(struct clk_hw *hw);
-void dp_vco_unprepare_14nm(struct clk_hw *hw);
-int dp_mux_set_parent_14nm(void *context,
-				unsigned int reg, unsigned int val);
-int dp_mux_get_parent_14nm(void *context,
-				unsigned int reg, unsigned int *val);
-#endif /* __MDSS_DP_PLL_14NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c
deleted file mode 100644
index a91e0eb..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c
+++ /dev/null
@@ -1,733 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[dp-pll] %s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-#include <linux/usb/usbpd.h>
-
-#include "mdss-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-dp-pll-7nm.h"
-
-#define DP_PHY_CFG				0x0010
-#define DP_PHY_PD_CTL				0x0018
-#define DP_PHY_MODE				0x001C
-
-#define DP_PHY_AUX_CFG1				0x0024
-#define DP_PHY_AUX_CFG2				0x0028
-
-#define DP_PHY_VCO_DIV				0x0070
-#define DP_PHY_TX0_TX1_LANE_CTL			0x0078
-#define DP_PHY_TX2_TX3_LANE_CTL			0x009C
-
-#define DP_PHY_SPARE0				0x00C8
-#define DP_PHY_STATUS				0x00DC
-
-/* Tx registers */
-#define TXn_CLKBUF_ENABLE			0x0008
-#define TXn_TX_EMP_POST1_LVL			0x000C
-
-#define TXn_TX_DRV_LVL				0x0014
-
-#define TXn_RESET_TSYNC_EN			0x001C
-#define TXn_PRE_STALL_LDO_BOOST_EN		0x0020
-#define TXn_TX_BAND				0x0024
-#define TXn_INTERFACE_SELECT			0x002C
-
-#define TXn_RES_CODE_LANE_OFFSET_TX		0x003C
-#define TXn_RES_CODE_LANE_OFFSET_RX		0x0040
-
-#define TXn_TRANSCEIVER_BIAS_EN			0x0054
-#define TXn_HIGHZ_DRVR_EN			0x0058
-#define TXn_TX_POL_INV				0x005C
-#define TXn_PARRATE_REC_DETECT_IDLE_EN		0x0060
-
-#define TXn_TRAN_DRVR_EMP_EN			0x00B8
-#define TXn_TX_INTERFACE_MODE			0x00BC
-
-#define TXn_VMODE_CTRL1				0x00E8
-
-/* PLL register offset */
-#define QSERDES_COM_BG_TIMER			0x000C
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x0044
-#define QSERDES_COM_CLK_ENABLE1			0x0048
-#define QSERDES_COM_SYS_CLK_CTRL		0x004C
-#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x0050
-#define QSERDES_COM_PLL_IVCO			0x0058
-
-#define QSERDES_COM_CP_CTRL_MODE0		0x0074
-#define QSERDES_COM_PLL_RCTRL_MODE0		0x007C
-#define QSERDES_COM_PLL_CCTRL_MODE0		0x0084
-#define QSERDES_COM_SYSCLK_EN_SEL		0x0094
-#define QSERDES_COM_RESETSM_CNTRL		0x009C
-#define QSERDES_COM_LOCK_CMP_EN			0x00A4
-#define QSERDES_COM_LOCK_CMP1_MODE0		0x00AC
-#define QSERDES_COM_LOCK_CMP2_MODE0		0x00B0
-
-#define QSERDES_COM_DEC_START_MODE0		0x00BC
-#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x00CC
-#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x00D0
-#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x00D4
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x00EC
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x00F0
-#define QSERDES_COM_VCO_TUNE_CTRL		0x0108
-#define QSERDES_COM_VCO_TUNE_MAP		0x010C
-
-#define QSERDES_COM_CLK_SEL			0x0154
-#define QSERDES_COM_HSCLK_SEL			0x0158
-
-#define QSERDES_COM_CORECLK_DIV_MODE0		0x0168
-
-#define QSERDES_COM_CORE_CLK_EN			0x0174
-#define QSERDES_COM_C_READY_STATUS		0x0178
-#define QSERDES_COM_CMN_CONFIG			0x017C
-
-#define QSERDES_COM_SVS_MODE_CLK_SEL		0x0184
-
-#define DP_PHY_PLL_POLL_SLEEP_US		500
-#define DP_PHY_PLL_POLL_TIMEOUT_US		10000
-
-#define DP_VCO_RATE_8100MHZDIV1000		8100000UL
-#define DP_VCO_RATE_9720MHZDIV1000		9720000UL
-#define DP_VCO_RATE_10800MHZDIV1000		10800000UL
-
-int dp_mux_set_parent_7nm(void *context, unsigned int reg, unsigned int val)
-{
-	struct mdss_pll_resources *dp_res = context;
-	int rc;
-	u32 auxclk_div;
-
-	if (!context) {
-		pr_err("invalid input parameters\n");
-		return -EINVAL;
-	}
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP PLL resources\n");
-		return rc;
-	}
-
-	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
-	auxclk_div &= ~0x03;
-
-	if (val == 0)
-		auxclk_div |= 1;
-	else if (val == 1)
-		auxclk_div |= 2;
-	else if (val == 2)
-		auxclk_div |= 0;
-
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_VCO_DIV, auxclk_div);
-	/* Make sure the PHY registers writes are done */
-	wmb();
-	pr_debug("mux=%d auxclk_div=%x\n", val, auxclk_div);
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	return 0;
-}
-
-int dp_mux_get_parent_7nm(void *context, unsigned int reg, unsigned int *val)
-{
-	int rc;
-	u32 auxclk_div = 0;
-	struct mdss_pll_resources *dp_res = context;
-
-	if (!context || !val) {
-		pr_err("invalid input parameters\n");
-		return -EINVAL;
-	}
-
-	if (is_gdsc_disabled(dp_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable dp_res resources\n");
-		return rc;
-	}
-
-	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
-	auxclk_div &= 0x03;
-
-	if (auxclk_div == 1) /* Default divider */
-		*val = 0;
-	else if (auxclk_div == 2)
-		*val = 1;
-	else if (auxclk_div == 0)
-		*val = 2;
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	pr_debug("auxclk_div=%d, val=%d\n", auxclk_div, *val);
-
-	return 0;
-}
-
-static int dp_vco_pll_init_db_7nm(struct dp_pll_db_7nm *pdb,
-		unsigned long rate)
-{
-	struct mdss_pll_resources *dp_res = pdb->pll;
-	u32 spare_value = 0;
-
-	spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
-	pdb->lane_cnt = spare_value & 0x0F;
-	pdb->orientation = (spare_value & 0xF0) >> 4;
-
-	pr_debug("spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
-			spare_value, pdb->lane_cnt, pdb->orientation);
-
-	pdb->div_frac_start1_mode0 = 0x00;
-	pdb->integloop_gain0_mode0 = 0x3f;
-	pdb->integloop_gain1_mode0 = 0x00;
-	pdb->vco_tune_map = 0x00;
-	pdb->cmn_config = 0x02;
-	pdb->txn_tran_drv_emp_en = 0xf;
-
-	switch (rate) {
-	case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
-		pr_debug("VCO rate: %ld\n", DP_VCO_RATE_9720MHZDIV1000);
-		pdb->hsclk_sel = 0x05;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->lock_cmp1_mode0 = 0x6f;
-		pdb->lock_cmp2_mode0 = 0x08;
-		pdb->phy_vco_div = 0x1;
-		pdb->lock_cmp_en = 0x04;
-		break;
-	case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
-		pr_debug("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000);
-		pdb->hsclk_sel = 0x03;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->lock_cmp1_mode0 = 0x0f;
-		pdb->lock_cmp2_mode0 = 0x0e;
-		pdb->phy_vco_div = 0x1;
-		pdb->lock_cmp_en = 0x08;
-		break;
-	case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
-		pr_debug("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000);
-		pdb->hsclk_sel = 0x01;
-		pdb->dec_start_mode0 = 0x8c;
-		pdb->div_frac_start2_mode0 = 0x00;
-		pdb->div_frac_start3_mode0 = 0x0a;
-		pdb->lock_cmp1_mode0 = 0x1f;
-		pdb->lock_cmp2_mode0 = 0x1c;
-		pdb->phy_vco_div = 0x2;
-		pdb->lock_cmp_en = 0x08;
-		break;
-	case DP_VCO_HSCLK_RATE_8100MHZDIV1000:
-		pr_debug("VCO rate: %ld\n", DP_VCO_RATE_8100MHZDIV1000);
-		pdb->hsclk_sel = 0x00;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->lock_cmp1_mode0 = 0x2f;
-		pdb->lock_cmp2_mode0 = 0x2a;
-		pdb->phy_vco_div = 0x0;
-		pdb->lock_cmp_en = 0x08;
-		break;
-	default:
-		pr_err("unsupported rate %ld\n", rate);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static int dp_config_vco_rate_7nm(struct dp_pll_vco_clk *vco,
-		unsigned long rate)
-{
-	u32 res = 0;
-	struct mdss_pll_resources *dp_res = vco->priv;
-	struct dp_pll_db_7nm *pdb = (struct dp_pll_db_7nm *)dp_res->priv;
-
-	res = dp_vco_pll_init_db_7nm(pdb, rate);
-	if (res) {
-		pr_err("VCO Init DB failed\n");
-		return res;
-	}
-
-	if (pdb->lane_cnt != 4) {
-		if (pdb->orientation == ORIENTATION_CC2)
-			MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x6d);
-		else
-			MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x75);
-	} else {
-		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x7d);
-	}
-
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL, 0x05);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_EN_SEL, 0x3b);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x02);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_ENABLE1, 0x0c);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_SEL, 0x30);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_HSCLK_SEL, pdb->hsclk_sel);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_IVCO, 0x0f);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_LOCK_CMP_EN, pdb->lock_cmp_en);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CP_CTRL_MODE0, 0x06);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_DEC_START_MODE0, pdb->dec_start_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_DIV_FRAC_START1_MODE0, pdb->div_frac_start1_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_DIV_FRAC_START2_MODE0, pdb->div_frac_start2_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_DIV_FRAC_START3_MODE0, pdb->div_frac_start3_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_CMN_CONFIG, pdb->cmn_config);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_INTEGLOOP_GAIN0_MODE0, pdb->integloop_gain0_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_INTEGLOOP_GAIN1_MODE0, pdb->integloop_gain1_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_VCO_TUNE_MAP, pdb->vco_tune_map);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_LOCK_CMP1_MODE0, pdb->lock_cmp1_mode0);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-		QSERDES_COM_LOCK_CMP2_MODE0, pdb->lock_cmp2_mode0);
-	/* Make sure the PLL register writes are done */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BG_TIMER, 0x0a);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORECLK_DIV_MODE0, 0x0a);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_VCO_TUNE_CTRL, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x17);
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORE_CLK_EN, 0x1f);
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-	if (pdb->orientation == ORIENTATION_CC2)
-		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x4c);
-	else
-		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x5c);
-	/* Make sure the PLL register writes are done */
-	wmb();
-
-	/* TX Lane configuration */
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_TX0_TX1_LANE_CTL, 0x05);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_TX2_TX3_LANE_CTL, 0x05);
-
-	/* TX-0 register configuration */
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_VMODE_CTRL1, 0x40);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_INTERFACE_SELECT, 0x3b);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_CLKBUF_ENABLE, 0x0f);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RESET_TSYNC_EN, 0x03);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRAN_DRVR_EMP_EN,
-		pdb->txn_tran_drv_emp_en);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base,
-		TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_INTERFACE_MODE, 0x00);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_BAND, 0x4);
-
-	/* TX-1 register configuration */
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_VMODE_CTRL1, 0x40);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_INTERFACE_SELECT, 0x3b);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_CLKBUF_ENABLE, 0x0f);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RESET_TSYNC_EN, 0x03);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRAN_DRVR_EMP_EN,
-		pdb->txn_tran_drv_emp_en);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base,
-		TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_INTERFACE_MODE, 0x00);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_BAND, 0x4);
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_VCO_DIV, pdb->phy_vco_div);
-
-	return res;
-}
-
-static bool dp_7nm_pll_lock_status(struct mdss_pll_resources *dp_res)
-{
-	u32 status;
-	bool pll_locked;
-
-	if (readl_poll_timeout_atomic((dp_res->pll_base +
-			QSERDES_COM_C_READY_STATUS),
-			status,
-			((status & BIT(0)) > 0),
-			DP_PHY_PLL_POLL_SLEEP_US,
-			DP_PHY_PLL_POLL_TIMEOUT_US)) {
-		pr_err("C_READY status is not high. Status=%x\n", status);
-		pll_locked = false;
-	} else {
-		pll_locked = true;
-	}
-
-	return pll_locked;
-}
-
-static bool dp_7nm_phy_rdy_status(struct mdss_pll_resources *dp_res)
-{
-	u32 status;
-	bool phy_ready = true;
-
-	/* poll for PHY ready status */
-	if (readl_poll_timeout_atomic((dp_res->phy_base +
-			DP_PHY_STATUS),
-			status,
-			((status & (BIT(1))) > 0),
-			DP_PHY_PLL_POLL_SLEEP_US,
-			DP_PHY_PLL_POLL_TIMEOUT_US)) {
-		pr_err("Phy_ready is not high. Status=%x\n", status);
-		phy_ready = false;
-	}
-
-	return phy_ready;
-}
-
-static int dp_pll_enable_7nm(struct clk_hw *hw)
-{
-	int rc = 0;
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	struct mdss_pll_resources *dp_res = vco->priv;
-	struct dp_pll_db_7nm *pdb = (struct dp_pll_db_7nm *)dp_res->priv;
-	u32 bias_en, drvr_en;
-
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG1, 0x13);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG2, 0xA4);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x05);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x09);
-	wmb(); /* Make sure the PHY register writes are done */
-
-	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_RESETSM_CNTRL, 0x20);
-	wmb();	/* Make sure the PLL register writes are done */
-
-	if (!dp_7nm_pll_lock_status(dp_res)) {
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
-	/* Make sure the PHY register writes are done */
-	wmb();
-	/* poll for PHY ready status */
-	if (!dp_7nm_phy_rdy_status(dp_res)) {
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	pr_debug("PLL is locked\n");
-
-	if (pdb->lane_cnt == 1) {
-		bias_en = 0x3e;
-		drvr_en = 0x13;
-	} else {
-		bias_en = 0x3f;
-		drvr_en = 0x10;
-	}
-
-	if (pdb->lane_cnt != 4) {
-		if (pdb->orientation == ORIENTATION_CC1) {
-			MDSS_PLL_REG_W(dp_res->ln_tx1_base,
-				TXn_HIGHZ_DRVR_EN, drvr_en);
-			MDSS_PLL_REG_W(dp_res->ln_tx1_base,
-				TXn_TRANSCEIVER_BIAS_EN, bias_en);
-		} else {
-			MDSS_PLL_REG_W(dp_res->ln_tx0_base,
-				TXn_HIGHZ_DRVR_EN, drvr_en);
-			MDSS_PLL_REG_W(dp_res->ln_tx0_base,
-				TXn_TRANSCEIVER_BIAS_EN, bias_en);
-		}
-	} else {
-		MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_HIGHZ_DRVR_EN, drvr_en);
-		MDSS_PLL_REG_W(dp_res->ln_tx0_base,
-			TXn_TRANSCEIVER_BIAS_EN, bias_en);
-		MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_HIGHZ_DRVR_EN, drvr_en);
-		MDSS_PLL_REG_W(dp_res->ln_tx1_base,
-			TXn_TRANSCEIVER_BIAS_EN, bias_en);
-	}
-
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_POL_INV, 0x0a);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_POL_INV, 0x0a);
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x18);
-	udelay(2000);
-
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
-
-	/*
-	 * Make sure all the register writes are completed before
-	 * doing any other operation
-	 */
-	wmb();
-
-	/* poll for PHY ready status */
-	if (!dp_7nm_phy_rdy_status(dp_res)) {
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_DRV_LVL, 0x3f);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_DRV_LVL, 0x3f);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_EMP_POST1_LVL, 0x23);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_EMP_POST1_LVL, 0x23);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x11);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x11);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x11);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x11);
-	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_INTERFACE_SELECT, 0x3b);
-	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_INTERFACE_SELECT, 0x3b);
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-lock_err:
-	return rc;
-}
-
-static int dp_pll_disable_7nm(struct clk_hw *hw)
-{
-	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	/* Assert DP PHY power down */
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x2);
-	/*
-	 * Make sure all the register writes to disable PLL are
-	 * completed before doing any other operation
-	 */
-	wmb();
-
-	return 0;
-}
-
-int dp_vco_prepare_7nm(struct clk_hw *hw)
-{
-	int rc = 0;
-	struct dp_pll_vco_clk *vco;
-	struct mdss_pll_resources *dp_res;
-
-	if (!hw) {
-		pr_err("invalid input parameters\n");
-		return -EINVAL;
-	}
-
-	vco = to_dp_vco_hw(hw);
-	dp_res = vco->priv;
-
-	pr_debug("rate=%ld\n", vco->rate);
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP pll resources\n");
-		goto error;
-	}
-
-	if ((dp_res->vco_cached_rate != 0)
-		&& (dp_res->vco_cached_rate == vco->rate)) {
-		rc = vco->hw.init->ops->set_rate(hw,
-			dp_res->vco_cached_rate, dp_res->vco_cached_rate);
-		if (rc) {
-			pr_err("index=%d vco_set_rate failed. rc=%d\n",
-				rc, dp_res->index);
-			mdss_pll_resource_enable(dp_res, false);
-			goto error;
-		}
-	}
-
-	rc = dp_pll_enable_7nm(hw);
-	if (rc) {
-		mdss_pll_resource_enable(dp_res, false);
-		pr_err("ndx=%d failed to enable dp pll\n", dp_res->index);
-		goto error;
-	}
-
-	mdss_pll_resource_enable(dp_res, false);
-error:
-	return rc;
-}
-
-void dp_vco_unprepare_7nm(struct clk_hw *hw)
-{
-	struct dp_pll_vco_clk *vco;
-	struct mdss_pll_resources *dp_res;
-
-	if (!hw) {
-		pr_err("invalid input parameters\n");
-		return;
-	}
-
-	vco = to_dp_vco_hw(hw);
-	dp_res = vco->priv;
-
-	if (!dp_res) {
-		pr_err("invalid input parameter\n");
-		return;
-	}
-
-	if (!dp_res->pll_on &&
-		mdss_pll_resource_enable(dp_res, true)) {
-		pr_err("pll resource can't be enabled\n");
-		return;
-	}
-	dp_res->vco_cached_rate = vco->rate;
-	dp_pll_disable_7nm(hw);
-
-	dp_res->handoff_resources = false;
-	mdss_pll_resource_enable(dp_res, false);
-	dp_res->pll_on = false;
-}
-
-int dp_vco_set_rate_7nm(struct clk_hw *hw, unsigned long rate,
-					unsigned long parent_rate)
-{
-	struct dp_pll_vco_clk *vco;
-	struct mdss_pll_resources *dp_res;
-	int rc;
-
-	if (!hw) {
-		pr_err("invalid input parameters\n");
-		return -EINVAL;
-	}
-
-	vco = to_dp_vco_hw(hw);
-	dp_res = vco->priv;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	pr_debug("DP lane CLK rate=%ld\n", rate);
-
-	rc = dp_config_vco_rate_7nm(vco, rate);
-	if (rc)
-		pr_err("Failed to set clk rate\n");
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	vco->rate = rate;
-
-	return 0;
-}
-
-unsigned long dp_vco_recalc_rate_7nm(struct clk_hw *hw,
-					unsigned long parent_rate)
-{
-	struct dp_pll_vco_clk *vco;
-	int rc;
-	u32 hsclk_sel, link_clk_divsel, hsclk_div, link_clk_div = 0;
-	unsigned long vco_rate;
-	struct mdss_pll_resources *dp_res;
-
-	if (!hw) {
-		pr_err("invalid input parameters\n");
-		return 0;
-	}
-
-	vco = to_dp_vco_hw(hw);
-	dp_res = vco->priv;
-
-	if (is_gdsc_disabled(dp_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP pll=%d\n", dp_res->index);
-		return 0;
-	}
-
-	pr_debug("input rates: parent=%lu, vco=%lu\n", parent_rate, vco->rate);
-
-	hsclk_sel = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
-	hsclk_sel &= 0x0f;
-
-	if (hsclk_sel == 5)
-		hsclk_div = 5;
-	else if (hsclk_sel == 3)
-		hsclk_div = 3;
-	else if (hsclk_sel == 1)
-		hsclk_div = 2;
-	else if (hsclk_sel == 0)
-		hsclk_div = 1;
-	else {
-		pr_debug("unknown divider. forcing to default\n");
-		hsclk_div = 5;
-	}
-
-	link_clk_divsel = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_AUX_CFG2);
-	link_clk_divsel >>= 2;
-	link_clk_divsel &= 0x3;
-
-	if (link_clk_divsel == 0)
-		link_clk_div = 5;
-	else if (link_clk_divsel == 1)
-		link_clk_div = 10;
-	else if (link_clk_divsel == 2)
-		link_clk_div = 20;
-	else
-		pr_err("unsupported div. Phy_mode: %d\n", link_clk_divsel);
-
-	if (link_clk_div == 20) {
-		vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-	} else {
-		if (hsclk_div == 5)
-			vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
-		else if (hsclk_div == 3)
-			vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-		else if (hsclk_div == 2)
-			vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
-		else
-			vco_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000;
-	}
-
-	pr_debug("hsclk: sel=0x%x, div=0x%x; lclk: sel=%u, div=%u, rate=%lu\n",
-		hsclk_sel, hsclk_div, link_clk_divsel, link_clk_div, vco_rate);
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	dp_res->vco_cached_rate = vco->rate = vco_rate;
-	return vco_rate;
-}
-
-long dp_vco_round_rate_7nm(struct clk_hw *hw, unsigned long rate,
-			unsigned long *parent_rate)
-{
-	unsigned long rrate = rate;
-	struct dp_pll_vco_clk *vco;
-
-	if (!hw) {
-		pr_err("invalid input parameters\n");
-		return 0;
-	}
-
-	vco = to_dp_vco_hw(hw);
-	if (rate <= vco->min_rate)
-		rrate = vco->min_rate;
-	else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
-		rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-	else if (rate <= DP_VCO_HSCLK_RATE_5400MHZDIV1000)
-		rrate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
-	else
-		rrate = vco->max_rate;
-
-	pr_debug("rrate=%ld\n", rrate);
-
-	if (parent_rate)
-		*parent_rate = rrate;
-	return rrate;
-}
-
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-7nm.c b/drivers/clk/qcom/mdss/mdss-dp-pll-7nm.c
deleted file mode 100644
index 29fcdf9..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-7nm.c
+++ /dev/null
@@ -1,297 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-/*
- * Display Port PLL driver block diagram for branch clocks
- *
- *		+------------------------------+
- *		|         DP_VCO_CLK           |
- *		|                              |
- *		|    +-------------------+     |
- *		|    |   (DP PLL/VCO)    |     |
- *		|    +---------+---------+     |
- *		|              v               |
- *		|   +----------+-----------+   |
- *		|   | hsclk_divsel_clk_src |   |
- *		|   +----------+-----------+   |
- *		+------------------------------+
- *				|
- *	 +------------<---------v------------>----------+
- *	 |                                              |
- * +-----v------------+                                 |
- * | dp_link_clk_src  |                                 |
- * |    divsel_ten    |                                 |
- * +---------+--------+                                 |
- *	|                                               |
- *	|                                               |
- *	v                                               v
- * Input to DISPCC block                                |
- * for link clk, crypto clk                             |
- * and interface clock                                  |
- *							|
- *							|
- *	+--------<------------+-----------------+---<---+
- *	|                     |                 |
- * +-------v------+  +--------v-----+  +--------v------+
- * | vco_divided  |  | vco_divided  |  | vco_divided   |
- * |    _clk_src  |  |    _clk_src  |  |    _clk_src   |
- * |              |  |              |  |               |
- * |divsel_six    |  |  divsel_two  |  |  divsel_four  |
- * +-------+------+  +-----+--------+  +--------+------+
- *         |	           |		        |
- *	v------->----------v-------------<------v
- *                         |
- *		+----------+---------+
- *		|   vco_divided_clk  |
- *		|       _src_mux     |
- *		+---------+----------+
- *                        |
- *                        v
- *              Input to DISPCC block
- *              for DP pixel clock
- *
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
-
-#include "mdss-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-dp-pll-7nm.h"
-
-static struct dp_pll_db_7nm dp_pdb_7nm;
-static struct clk_ops mux_clk_ops;
-
-static struct regmap_config dp_pll_7nm_cfg = {
-	.reg_bits	= 32,
-	.reg_stride	= 4,
-	.val_bits	= 32,
-	.max_register = 0x910,
-};
-
-static struct regmap_bus dp_pixel_mux_regmap_ops = {
-	.reg_write = dp_mux_set_parent_7nm,
-	.reg_read = dp_mux_get_parent_7nm,
-};
-
-/* Op structures */
-static const struct clk_ops dp_7nm_vco_clk_ops = {
-	.recalc_rate = dp_vco_recalc_rate_7nm,
-	.set_rate = dp_vco_set_rate_7nm,
-	.round_rate = dp_vco_round_rate_7nm,
-	.prepare = dp_vco_prepare_7nm,
-	.unprepare = dp_vco_unprepare_7nm,
-};
-
-static struct dp_pll_vco_clk dp_vco_clk = {
-	.min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
-	.max_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000,
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_vco_clk",
-		.parent_names = (const char *[]){ "xo_board" },
-		.num_parents = 1,
-		.ops = &dp_7nm_vco_clk_ops,
-	},
-};
-
-static struct clk_fixed_factor dp_link_clk_divsel_ten = {
-	.div = 10,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_link_clk_divsel_ten",
-		.parent_names =
-			(const char *[]){ "dp_vco_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dp_vco_divsel_two_clk_src = {
-	.div = 2,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_vco_divsel_two_clk_src",
-		.parent_names =
-			(const char *[]){ "dp_vco_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dp_vco_divsel_four_clk_src = {
-	.div = 4,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_vco_divsel_four_clk_src",
-		.parent_names =
-			(const char *[]){ "dp_vco_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dp_vco_divsel_six_clk_src = {
-	.div = 6,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dp_vco_divsel_six_clk_src",
-		.parent_names =
-			(const char *[]){ "dp_vco_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-
-static int clk_mux_determine_rate(struct clk_hw *hw,
-				  struct clk_rate_request *req)
-{
-	int ret = 0;
-
-	if (!hw || !req) {
-		pr_err("Invalid input parameters\n");
-		return -EINVAL;
-	}
-
-	ret = __clk_mux_determine_rate_closest(hw, req);
-	if (ret)
-		return ret;
-
-	/* Set the new parent of mux if there is a new valid parent */
-	if (hw->clk && req->best_parent_hw->clk)
-		clk_set_parent(hw->clk, req->best_parent_hw->clk);
-
-	return 0;
-}
-
-static unsigned long mux_recalc_rate(struct clk_hw *hw,
-					unsigned long parent_rate)
-{
-	struct clk *div_clk = NULL, *vco_clk = NULL;
-	struct dp_pll_vco_clk *vco = NULL;
-
-	if (!hw) {
-		pr_err("Invalid input parameter\n");
-		return 0;
-	}
-
-	div_clk = clk_get_parent(hw->clk);
-	if (!div_clk)
-		return 0;
-
-	vco_clk = clk_get_parent(div_clk);
-	if (!vco_clk)
-		return 0;
-
-	vco = to_dp_vco_hw(__clk_get_hw(vco_clk));
-	if (!vco)
-		return 0;
-
-	if (vco->rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000)
-		return (vco->rate / 6);
-	else if (vco->rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
-		return (vco->rate / 4);
-	else
-		return (vco->rate / 2);
-}
-
-static struct clk_regmap_mux dp_vco_divided_clk_src_mux = {
-	.reg = 0x64,
-	.shift = 0,
-	.width = 2,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dp_vco_divided_clk_src_mux",
-			.parent_names =
-				(const char *[]){"dp_vco_divsel_two_clk_src",
-					"dp_vco_divsel_four_clk_src",
-					"dp_vco_divsel_six_clk_src"},
-			.num_parents = 3,
-			.ops = &mux_clk_ops,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		},
-	},
-};
-
-static struct clk_hw *mdss_dp_pllcc_7nm[] = {
-	[DP_VCO_CLK] = &dp_vco_clk.hw,
-	[DP_LINK_CLK_DIVSEL_TEN] = &dp_link_clk_divsel_ten.hw,
-	[DP_VCO_DIVIDED_TWO_CLK_SRC] = &dp_vco_divsel_two_clk_src.hw,
-	[DP_VCO_DIVIDED_FOUR_CLK_SRC] = &dp_vco_divsel_four_clk_src.hw,
-	[DP_VCO_DIVIDED_SIX_CLK_SRC] = &dp_vco_divsel_six_clk_src.hw,
-	[DP_VCO_DIVIDED_CLK_SRC_MUX] = &dp_vco_divided_clk_src_mux.clkr.hw,
-};
-
-int dp_pll_clock_register_7nm(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res)
-{
-	int rc = -ENOTSUPP, i = 0;
-	struct clk_onecell_data *clk_data;
-	struct clk *clk;
-	struct regmap *regmap;
-	int num_clks = ARRAY_SIZE(mdss_dp_pllcc_7nm);
-
-	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
-	if (!clk_data)
-		return -ENOMEM;
-
-	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
-				sizeof(struct clk *), GFP_KERNEL);
-	if (!clk_data->clks)
-		return -ENOMEM;
-
-	clk_data->clk_num = num_clks;
-
-	pll_res->priv = &dp_pdb_7nm;
-	dp_pdb_7nm.pll = pll_res;
-
-	/* Set client data for vco, mux and div clocks */
-	regmap = devm_regmap_init(&pdev->dev, &dp_pixel_mux_regmap_ops,
-			pll_res, &dp_pll_7nm_cfg);
-	dp_vco_divided_clk_src_mux.clkr.regmap = regmap;
-	mux_clk_ops = clk_regmap_mux_closest_ops;
-	mux_clk_ops.determine_rate = clk_mux_determine_rate;
-	mux_clk_ops.recalc_rate = mux_recalc_rate;
-
-	dp_vco_clk.priv = pll_res;
-
-	for (i = DP_VCO_CLK; i <= DP_VCO_DIVIDED_CLK_SRC_MUX; i++) {
-		pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
-		clk = devm_clk_register(&pdev->dev, mdss_dp_pllcc_7nm[i]);
-		if (IS_ERR(clk)) {
-			pr_err("clk registration failed for DP: %d\n",
-					pll_res->index);
-			rc = -EINVAL;
-			goto clk_reg_fail;
-		}
-		clk_data->clks[i] = clk;
-	}
-
-	rc = of_clk_add_provider(pdev->dev.of_node,
-			of_clk_src_onecell_get, clk_data);
-	if (rc) {
-		pr_err("Clock register failed rc=%d\n", rc);
-		rc = -EPROBE_DEFER;
-		goto clk_reg_fail;
-	} else {
-		pr_debug("SUCCESS\n");
-	}
-	return rc;
-clk_reg_fail:
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-7nm.h b/drivers/clk/qcom/mdss/mdss-dp-pll-7nm.h
deleted file mode 100644
index 336eb1f..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-7nm.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __MDSS_DP_PLL_7NM_H
-#define __MDSS_DP_PLL_7NM_H
-
-#define DP_VCO_HSCLK_RATE_1620MHZDIV1000	1620000UL
-#define DP_VCO_HSCLK_RATE_2700MHZDIV1000	2700000UL
-#define DP_VCO_HSCLK_RATE_5400MHZDIV1000	5400000UL
-#define DP_VCO_HSCLK_RATE_8100MHZDIV1000	8100000UL
-
-struct dp_pll_db_7nm {
-	struct mdss_pll_resources *pll;
-
-	/* lane and orientation settings */
-	u8 lane_cnt;
-	u8 orientation;
-
-	/* COM PHY settings */
-	u32 hsclk_sel;
-	u32 dec_start_mode0;
-	u32 div_frac_start1_mode0;
-	u32 div_frac_start2_mode0;
-	u32 div_frac_start3_mode0;
-	u32 integloop_gain0_mode0;
-	u32 integloop_gain1_mode0;
-	u32 vco_tune_map;
-	u32 lock_cmp1_mode0;
-	u32 lock_cmp2_mode0;
-	u32 lock_cmp_en;
-	u32 cmn_config;
-	u32 txn_tran_drv_emp_en;
-
-	/* PHY vco divider */
-	u32 phy_vco_div;
-};
-
-int dp_vco_set_rate_7nm(struct clk_hw *hw, unsigned long rate,
-				unsigned long parent_rate);
-unsigned long dp_vco_recalc_rate_7nm(struct clk_hw *hw,
-				unsigned long parent_rate);
-long dp_vco_round_rate_7nm(struct clk_hw *hw, unsigned long rate,
-				unsigned long *parent_rate);
-int dp_vco_prepare_7nm(struct clk_hw *hw);
-void dp_vco_unprepare_7nm(struct clk_hw *hw);
-int dp_mux_set_parent_7nm(void *context,
-				unsigned int reg, unsigned int val);
-int dp_mux_get_parent_7nm(void *context,
-				unsigned int reg, unsigned int *val);
-#endif /* __MDSS_DP_PLL_7NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll.h b/drivers/clk/qcom/mdss/mdss-dp-pll.h
deleted file mode 100644
index 2ef7358..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __MDSS_DP_PLL_H
-#define __MDSS_DP_PLL_H
-
-struct dp_pll_vco_clk {
-	struct clk_hw hw;
-	unsigned long	rate;		/* current vco rate */
-	u64		min_rate;	/* min vco rate */
-	u64		max_rate;	/* max vco rate */
-	void		*priv;
-};
-
-static inline struct dp_pll_vco_clk *to_dp_vco_hw(struct clk_hw *hw)
-{
-	return container_of(hw, struct dp_pll_vco_clk, hw);
-}
-
-#ifdef CONFIG_QCOM_MDSS_DP_PLL
-int dp_pll_clock_register_14nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-
-int dp_pll_clock_register_10nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-
-int dp_pll_clock_register_7nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-#else
-static inline int dp_pll_clock_register_14nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	return 0;
-}
-
-static inline int dp_pll_clock_register_10nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	return 0;
-}
-
-static inline int dp_pll_clock_register_7nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	return 0;
-}
-#endif
-#endif /* __MDSS_DP_PLL_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-20nm-pll-util.c b/drivers/clk/qcom/mdss/mdss-dsi-20nm-pll-util.c
deleted file mode 100644
index f715e8a..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-20nm-pll-util.c
+++ /dev/null
@@ -1,1004 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clock-generic.h>
-
-#include "mdss-pll.h"
-#include "mdss-dsi-pll.h"
-
-#define MMSS_DSI_PHY_PLL_SYS_CLK_CTRL			0x0000
-#define MMSS_DSI_PHY_PLL_PLL_VCOTAIL_EN			0x0004
-#define MMSS_DSI_PHY_PLL_CMN_MODE			0x0008
-#define MMSS_DSI_PHY_PLL_IE_TRIM			0x000C
-#define MMSS_DSI_PHY_PLL_IP_TRIM			0x0010
-
-#define MMSS_DSI_PHY_PLL_PLL_PHSEL_CONTROL		0x0018
-#define MMSS_DSI_PHY_PLL_IPTAT_TRIM_VCCA_TX_SEL		0x001C
-#define MMSS_DSI_PHY_PLL_PLL_PHSEL_DC			0x0020
-#define MMSS_DSI_PHY_PLL_PLL_IP_SETI			0x0024
-#define MMSS_DSI_PHY_PLL_CORE_CLK_IN_SYNC_SEL		0x0028
-
-#define MMSS_DSI_PHY_PLL_BIAS_EN_CLKBUFLR_EN		0x0030
-#define MMSS_DSI_PHY_PLL_PLL_CP_SETI			0x0034
-#define MMSS_DSI_PHY_PLL_PLL_IP_SETP			0x0038
-#define MMSS_DSI_PHY_PLL_PLL_CP_SETP			0x003C
-#define MMSS_DSI_PHY_PLL_ATB_SEL1			0x0040
-#define MMSS_DSI_PHY_PLL_ATB_SEL2			0x0044
-#define MMSS_DSI_PHY_PLL_SYSCLK_EN_SEL_TXBAND		0x0048
-#define MMSS_DSI_PHY_PLL_RESETSM_CNTRL			0x004C
-#define MMSS_DSI_PHY_PLL_RESETSM_CNTRL2			0x0050
-#define MMSS_DSI_PHY_PLL_RESETSM_CNTRL3			0x0054
-#define MMSS_DSI_PHY_PLL_RESETSM_PLL_CAL_COUNT1		0x0058
-#define MMSS_DSI_PHY_PLL_RESETSM_PLL_CAL_COUNT2		0x005C
-#define MMSS_DSI_PHY_PLL_DIV_REF1			0x0060
-#define MMSS_DSI_PHY_PLL_DIV_REF2			0x0064
-#define MMSS_DSI_PHY_PLL_KVCO_COUNT1			0x0068
-#define MMSS_DSI_PHY_PLL_KVCO_COUNT2			0x006C
-#define MMSS_DSI_PHY_PLL_KVCO_CAL_CNTRL			0x0070
-#define MMSS_DSI_PHY_PLL_KVCO_CODE			0x0074
-#define MMSS_DSI_PHY_PLL_VREF_CFG1			0x0078
-#define MMSS_DSI_PHY_PLL_VREF_CFG2			0x007C
-#define MMSS_DSI_PHY_PLL_VREF_CFG3			0x0080
-#define MMSS_DSI_PHY_PLL_VREF_CFG4			0x0084
-#define MMSS_DSI_PHY_PLL_VREF_CFG5			0x0088
-#define MMSS_DSI_PHY_PLL_VREF_CFG6			0x008C
-#define MMSS_DSI_PHY_PLL_PLLLOCK_CMP1			0x0090
-#define MMSS_DSI_PHY_PLL_PLLLOCK_CMP2			0x0094
-#define MMSS_DSI_PHY_PLL_PLLLOCK_CMP3			0x0098
-
-#define MMSS_DSI_PHY_PLL_BGTC				0x00A0
-#define MMSS_DSI_PHY_PLL_PLL_TEST_UPDN			0x00A4
-#define MMSS_DSI_PHY_PLL_PLL_VCO_TUNE			0x00A8
-#define MMSS_DSI_PHY_PLL_DEC_START1			0x00AC
-#define MMSS_DSI_PHY_PLL_PLL_AMP_OS			0x00B0
-#define MMSS_DSI_PHY_PLL_SSC_EN_CENTER			0x00B4
-#define MMSS_DSI_PHY_PLL_SSC_ADJ_PER1			0x00B8
-#define MMSS_DSI_PHY_PLL_SSC_ADJ_PER2			0x00BC
-#define MMSS_DSI_PHY_PLL_SSC_PER1			0x00C0
-#define MMSS_DSI_PHY_PLL_SSC_PER2			0x00C4
-#define MMSS_DSI_PHY_PLL_SSC_STEP_SIZE1			0x00C8
-#define MMSS_DSI_PHY_PLL_SSC_STEP_SIZE2			0x00CC
-#define MMSS_DSI_PHY_PLL_RES_CODE_UP			0x00D0
-#define MMSS_DSI_PHY_PLL_RES_CODE_DN			0x00D4
-#define MMSS_DSI_PHY_PLL_RES_CODE_UP_OFFSET		0x00D8
-#define MMSS_DSI_PHY_PLL_RES_CODE_DN_OFFSET		0x00DC
-#define MMSS_DSI_PHY_PLL_RES_CODE_START_SEG1		0x00E0
-#define MMSS_DSI_PHY_PLL_RES_CODE_START_SEG2		0x00E4
-#define MMSS_DSI_PHY_PLL_RES_CODE_CAL_CSR		0x00E8
-#define MMSS_DSI_PHY_PLL_RES_CODE			0x00EC
-#define MMSS_DSI_PHY_PLL_RES_TRIM_CONTROL		0x00F0
-#define MMSS_DSI_PHY_PLL_RES_TRIM_CONTROL2		0x00F4
-#define MMSS_DSI_PHY_PLL_RES_TRIM_EN_VCOCALDONE		0x00F8
-#define MMSS_DSI_PHY_PLL_FAUX_EN			0x00FC
-
-#define MMSS_DSI_PHY_PLL_DIV_FRAC_START1		0x0100
-#define MMSS_DSI_PHY_PLL_DIV_FRAC_START2		0x0104
-#define MMSS_DSI_PHY_PLL_DIV_FRAC_START3		0x0108
-#define MMSS_DSI_PHY_PLL_DEC_START2			0x010C
-#define MMSS_DSI_PHY_PLL_PLL_RXTXEPCLK_EN		0x0110
-#define MMSS_DSI_PHY_PLL_PLL_CRCTRL			0x0114
-#define MMSS_DSI_PHY_PLL_LOW_POWER_RO_CONTROL		0x013C
-#define MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL		0x0140
-#define MMSS_DSI_PHY_PLL_HR_OCLK2_DIVIDER		0x0144
-#define MMSS_DSI_PHY_PLL_HR_OCLK3_DIVIDER		0x0148
-#define MMSS_DSI_PHY_PLL_PLL_VCO_HIGH			0x014C
-#define MMSS_DSI_PHY_PLL_RESET_SM			0x0150
-#define MMSS_DSI_PHY_PLL_MUXVAL			0x0154
-#define MMSS_DSI_PHY_PLL_CORE_RES_CODE_DN		0x0158
-#define MMSS_DSI_PHY_PLL_CORE_RES_CODE_UP		0x015C
-#define MMSS_DSI_PHY_PLL_CORE_VCO_TUNE			0x0160
-#define MMSS_DSI_PHY_PLL_CORE_VCO_TAIL			0x0164
-#define MMSS_DSI_PHY_PLL_CORE_KVCO_CODE		0x0168
-
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL0		0x014
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL1		0x018
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL2		0x01C
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL3		0x020
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL4		0x024
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL5		0x028
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL6		0x02C
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL7		0x030
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL8		0x034
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL9		0x038
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL10		0x03C
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL11		0x040
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL12		0x044
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL13		0x048
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL14		0x04C
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL15		0x050
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL16		0x054
-#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL17		0x058
-
-#define DSI_PLL_POLL_DELAY_US			1000
-#define DSI_PLL_POLL_TIMEOUT_US			15000
-
-int set_mdss_byte_mux_sel(struct mux_clk *clk, int sel)
-{
-	return 0;
-}
-
-int get_mdss_byte_mux_sel(struct mux_clk *clk)
-{
-	return 0;
-}
-
-int set_mdss_pixel_mux_sel(struct mux_clk *clk, int sel)
-{
-	return 0;
-}
-
-int get_mdss_pixel_mux_sel(struct mux_clk *clk)
-{
-	return 0;
-}
-
-static void pll_20nm_cache_trim_codes(struct mdss_pll_resources *dsi_pll_res)
-{
-	int rc;
-
-	if (dsi_pll_res->reg_upd)
-		return;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return;
-	}
-
-	dsi_pll_res->cache_pll_trim_codes[0] =
-		MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_CORE_KVCO_CODE);
-	dsi_pll_res->cache_pll_trim_codes[1] =
-		MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_CORE_VCO_TUNE);
-
-	pr_debug("core_kvco_code=0x%x core_vco_turn=0x%x\n",
-		dsi_pll_res->cache_pll_trim_codes[0],
-		dsi_pll_res->cache_pll_trim_codes[1]);
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-
-	dsi_pll_res->reg_upd = true;
-}
-
-static void pll_20nm_override_trim_codes(struct mdss_pll_resources *dsi_pll_res)
-{
-	u32 reg_data;
-	void __iomem *pll_base = dsi_pll_res->pll_base;
-
-	/*
-	 * Override mux config for all cached trim codes from
-	 * saved config except for VCO Tune
-	 */
-	reg_data = (dsi_pll_res->cache_pll_trim_codes[0] & 0x3f) | BIT(5);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_KVCO_CODE, reg_data);
-
-	reg_data = (dsi_pll_res->cache_pll_trim_codes[1] & 0x7f) | BIT(7);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_VCO_TUNE, reg_data);
-}
-
-
-int set_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel)
-{
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-	int reg_data;
-
-	pr_debug("bypass_lp_div mux set to %s mode\n",
-				sel ? "indirect" : "direct");
-
-	pr_debug("POST_DIVIDER_CONTROL = 0x%x\n",
-		MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL));
-
-	reg_data = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL);
-	reg_data |= BIT(7);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
-			reg_data | (sel << 5));
-	pr_debug("POST_DIVIDER_CONTROL = 0x%x\n",
-		MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL));
-
-	return 0;
-}
-
-int set_shadow_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel)
-{
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-	int reg_data, rem;
-
-	if (!dsi_pll_res->resource_enable) {
-		pr_err("PLL resources disabled. Dynamic fps invalid\n");
-		return -EINVAL;
-	}
-
-	reg_data = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL);
-	reg_data |= BIT(7);
-
-	pr_debug("%d: reg_data = %x\n", __LINE__, reg_data);
-
-	/* Repeat POST DIVIDER 2 times (4 writes)*/
-	for (rem = 0; rem < 2; rem++)
-		MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
-			MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL16 + (4 * rem),
-			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
-			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
-			(reg_data | (sel << 5)), (reg_data | (sel << 5)));
-
-	return 0;
-}
-
-int get_bypass_lp_div_mux_sel(struct mux_clk *clk)
-{
-	int mux_mode, rc;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	if (is_gdsc_disabled(dsi_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	mux_mode = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL) & BIT(5);
-
-	pr_debug("bypass_lp_div mux mode = %s\n",
-				mux_mode ? "indirect" : "direct");
-	mdss_pll_resource_enable(dsi_pll_res, false);
-
-	return !!mux_mode;
-}
-
-int ndiv_set_div(struct div_clk *clk, int div)
-{
-	int rc, reg_data;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	reg_data = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
-				reg_data | div);
-
-	pr_debug("POST_DIVIDER_CONTROL = 0x%x\n",
-		MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL));
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	return rc;
-}
-
-int shadow_ndiv_set_div(struct div_clk *clk, int div)
-{
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	if (!dsi_pll_res->resource_enable) {
-		pr_err("PLL resources disabled. Dynamic fps invalid\n");
-		return -EINVAL;
-	}
-
-	pr_debug("%d div=%i\n", __LINE__, div);
-
-	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
-		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL14,
-		MMSS_DSI_PHY_PLL_RESETSM_CNTRL3,
-		MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
-		0x07, (0xB | div));
-
-	return 0;
-}
-
-int ndiv_get_div(struct div_clk *clk)
-{
-	int div = 0, rc;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	if (is_gdsc_disabled(dsi_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(clk->priv, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-		MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL) & 0x0F;
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-
-	return div;
-}
-
-int fixed_hr_oclk2_set_div(struct div_clk *clk, int div)
-{
-	int rc;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				MMSS_DSI_PHY_PLL_HR_OCLK2_DIVIDER,
-				(div - 1));
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	return rc;
-}
-
-int shadow_fixed_hr_oclk2_set_div(struct div_clk *clk, int div)
-{
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	if (!dsi_pll_res->resource_enable) {
-		pr_err("PLL resources disabled. Dynamic fps invalid\n");
-		return -EINVAL;
-	}
-	pr_debug("%d div = %d\n", __LINE__, div);
-
-	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
-		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL5,
-		MMSS_DSI_PHY_PLL_HR_OCLK2_DIVIDER,
-		MMSS_DSI_PHY_PLL_HR_OCLK2_DIVIDER,
-		(div - 1), (div - 1));
-
-	return 0;
-}
-
-int fixed_hr_oclk2_get_div(struct div_clk *clk)
-{
-	int div = 0, rc;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	if (is_gdsc_disabled(dsi_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-				MMSS_DSI_PHY_PLL_HR_OCLK2_DIVIDER);
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	return div + 1;
-}
-
-int hr_oclk3_set_div(struct div_clk *clk, int div)
-{
-	int rc;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	pr_debug("%d div = %d\n", __LINE__, div);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				MMSS_DSI_PHY_PLL_HR_OCLK3_DIVIDER,
-				(div - 1));
-	pr_debug("%s: HR_OCLK3_DIVIDER = 0x%x\n", __func__,
-		MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_HR_OCLK3_DIVIDER));
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	return rc;
-}
-
-int shadow_hr_oclk3_set_div(struct div_clk *clk, int div)
-{
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	if (!dsi_pll_res->resource_enable) {
-		pr_err("PLL resources disabled. Dynamic fps invalid\n");
-		return -EINVAL;
-	}
-
-	pr_debug("%d div = %d\n", __LINE__, div);
-
-	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
-		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL6,
-		MMSS_DSI_PHY_PLL_HR_OCLK3_DIVIDER,
-		MMSS_DSI_PHY_PLL_HR_OCLK3_DIVIDER,
-		(div - 1), (div - 1));
-
-	return 0;
-}
-
-int hr_oclk3_get_div(struct div_clk *clk)
-{
-	int div = 0, rc;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	if (is_gdsc_disabled(dsi_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-				MMSS_DSI_PHY_PLL_HR_OCLK3_DIVIDER);
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	return div + 1;
-}
-
-static bool pll_20nm_is_pll_locked(struct mdss_pll_resources *dsi_pll_res)
-{
-	u32 status;
-	bool pll_locked;
-
-	/* poll for PLL ready status */
-	if (readl_poll_timeout_atomic((dsi_pll_res->pll_base +
-			MMSS_DSI_PHY_PLL_RESET_SM),
-			status,
-			((status & BIT(5)) > 0),
-			DSI_PLL_POLL_DELAY_US,
-			DSI_PLL_POLL_TIMEOUT_US)) {
-		pr_debug("DSI PLL status=%x failed to Lock\n", status);
-		pll_locked = false;
-	} else if (readl_poll_timeout_atomic((dsi_pll_res->pll_base +
-			MMSS_DSI_PHY_PLL_RESET_SM),
-			status,
-			((status & BIT(6)) > 0),
-			DSI_PLL_POLL_DELAY_US,
-			DSI_PLL_POLL_TIMEOUT_US)) {
-		pr_debug("DSI PLL status=%x PLl not ready\n", status);
-		pll_locked = false;
-	} else {
-		pll_locked = true;
-	}
-
-	return pll_locked;
-}
-
-void __dsi_pll_disable(void __iomem *pll_base)
-{
-	if (!pll_base) {
-		pr_err("Invalid pll base\n");
-		return;
-	}
-	pr_debug("Disabling PHY PLL for PLL_BASE=%p\n", pll_base);
-
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_VCOTAIL_EN, 0x02);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL3, 0x06);
-}
-
-static void pll_20nm_config_powerdown(void __iomem *pll_base)
-{
-	if (!pll_base) {
-		pr_err("Invalid pll base.\n");
-		return;
-	}
-
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_SYS_CLK_CTRL, 0x00);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_CMN_MODE, 0x01);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_VCOTAIL_EN, 0x82);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_BIAS_EN_CLKBUFLR_EN, 0x02);
-}
-
-static int dsi_pll_enable(struct clk *c)
-{
-	int i, rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	/* Try all enable sequences until one succeeds */
-	for (i = 0; i < vco->pll_en_seq_cnt; i++) {
-		rc = vco->pll_enable_seqs[i](dsi_pll_res);
-		pr_debug("DSI PLL %s after sequence #%d\n",
-			rc ? "unlocked" : "locked", i + 1);
-		if (!rc)
-			break;
-	}
-	/* Disable PLL1 to avoid current leakage while toggling MDSS GDSC */
-	if (dsi_pll_res->pll_1_base)
-		pll_20nm_config_powerdown(dsi_pll_res->pll_1_base);
-
-	if (rc) {
-		mdss_pll_resource_enable(dsi_pll_res, false);
-		pr_err("DSI PLL failed to lock\n");
-	}
-	dsi_pll_res->pll_on = true;
-
-	return rc;
-}
-
-static void dsi_pll_disable(struct clk *c)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	if (!dsi_pll_res->pll_on &&
-		mdss_pll_resource_enable(dsi_pll_res, true)) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return;
-	}
-
-	dsi_pll_res->handoff_resources = false;
-
-	__dsi_pll_disable(dsi_pll_res->pll_base);
-
-	/* Disable PLL1 to avoid current leakage while toggling MDSS GDSC */
-	if (dsi_pll_res->pll_1_base)
-		pll_20nm_config_powerdown(dsi_pll_res->pll_1_base);
-
-	pll_20nm_config_powerdown(dsi_pll_res->pll_base);
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-
-	dsi_pll_res->pll_on = false;
-
-	pr_debug("DSI PLL Disabled\n");
-}
-
-static void pll_20nm_config_common_block_1(void __iomem *pll_base)
-{
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_VCOTAIL_EN, 0x82);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_BIAS_EN_CLKBUFLR_EN, 0x2a);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_BIAS_EN_CLKBUFLR_EN, 0x2b);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL3, 0x02);
-}
-
-static void pll_20nm_config_common_block_2(void __iomem *pll_base)
-{
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_SYS_CLK_CTRL, 0x40);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_IE_TRIM, 0x0F);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_IP_TRIM, 0x0F);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_PHSEL_CONTROL, 0x08);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_IPTAT_TRIM_VCCA_TX_SEL, 0x0E);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_BKG_KVCO_CAL_EN, 0x08);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_SYSCLK_EN_SEL_TXBAND, 0x4A);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DIV_REF1, 0x00);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DIV_REF2, 0x01);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_CNTRL, 0x07);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_KVCO_CAL_CNTRL, 0x1f);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_KVCO_COUNT1, 0x8A);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_VREF_CFG3, 0x10);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_SSC_EN_CENTER, 0x00);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_FAUX_EN, 0x0C);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_RXTXEPCLK_EN, 0x0a);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_LOW_POWER_RO_CONTROL, 0x0f);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_CMN_MODE, 0x00);
-}
-
-static void pll_20nm_config_loop_bw(void __iomem *pll_base)
-{
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_IP_SETI, 0x03);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_CP_SETI, 0x3F);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_IP_SETP, 0x03);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_CP_SETP, 0x1F);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_CRCTRL, 0x77);
-}
-
-static void pll_20nm_vco_rate_calc(struct mdss_pll_vco_calc *vco_calc,
-	s64 vco_clk_rate, s64 ref_clk_rate)
-{
-	s64 multiplier = (1 << 20);
-	s64 duration = 1024, pll_comp_val;
-	s64 dec_start_multiple, dec_start;
-	s32 div_frac_start;
-	s64 dec_start1, dec_start2;
-	s32 div_frac_start1, div_frac_start2, div_frac_start3;
-	s64 pll_plllock_cmp1, pll_plllock_cmp2, pll_plllock_cmp3;
-
-	memset(vco_calc, 0, sizeof(*vco_calc));
-	pr_debug("vco_clk_rate=%lld ref_clk_rate=%lld\n", vco_clk_rate,
-		ref_clk_rate);
-
-	dec_start_multiple = div_s64(vco_clk_rate * multiplier,
-					2 * ref_clk_rate);
-	div_s64_rem(dec_start_multiple,
-			multiplier, &div_frac_start);
-
-	dec_start = div_s64(dec_start_multiple, multiplier);
-	dec_start1 = (dec_start & 0x7f) | BIT(7);
-	dec_start2 = ((dec_start & 0x80) >> 7) | BIT(1);
-	div_frac_start1 = (div_frac_start & 0x7f) | BIT(7);
-	div_frac_start2 = ((div_frac_start >> 7) & 0x7f) | BIT(7);
-	div_frac_start3 = ((div_frac_start >> 14) & 0x3f) | BIT(6);
-	pll_comp_val = (div_s64(dec_start_multiple * 2 * duration,
-				10 * multiplier)) - 1;
-	pll_plllock_cmp1 = pll_comp_val & 0xff;
-	pll_plllock_cmp2 = (pll_comp_val >> 8) & 0xff;
-	pll_plllock_cmp3 = (pll_comp_val >> 16) & 0xff;
-
-	pr_debug("dec_start_multiple = 0x%llx\n", dec_start_multiple);
-	pr_debug("dec_start = 0x%llx, div_frac_start = 0x%x\n",
-			dec_start, div_frac_start);
-	pr_debug("dec_start1 = 0x%llx, dec_start2 = 0x%llx\n",
-			dec_start1, dec_start2);
-	pr_debug("div_frac_start1 = 0x%x, div_frac_start2 = 0x%x\n",
-			div_frac_start1, div_frac_start2);
-	pr_debug("div_frac_start3 = 0x%x\n", div_frac_start3);
-	pr_debug("pll_comp_val = 0x%llx\n", pll_comp_val);
-	pr_debug("pll_plllock_cmp1 = 0x%llx, pll_plllock_cmp2 =%llx\n",
-			pll_plllock_cmp1, pll_plllock_cmp2);
-	pr_debug("pll_plllock_cmp3 = 0x%llx\n",	pll_plllock_cmp3);
-
-	/* Assign to vco struct */
-	vco_calc->div_frac_start1 = div_frac_start1;
-	vco_calc->div_frac_start2 = div_frac_start2;
-	vco_calc->div_frac_start3 = div_frac_start3;
-	vco_calc->dec_start1 = dec_start1;
-	vco_calc->dec_start2 = dec_start2;
-	vco_calc->pll_plllock_cmp1 = pll_plllock_cmp1;
-	vco_calc->pll_plllock_cmp2 = pll_plllock_cmp2;
-	vco_calc->pll_plllock_cmp3 = pll_plllock_cmp3;
-}
-
-static void pll_20nm_config_vco_rate(void __iomem *pll_base,
-	struct mdss_pll_vco_calc *vco_calc)
-{
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DIV_FRAC_START1,
-		vco_calc->div_frac_start1);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DIV_FRAC_START2,
-		vco_calc->div_frac_start2);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DIV_FRAC_START3,
-		vco_calc->div_frac_start3);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DEC_START1,
-		vco_calc->dec_start1);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DEC_START2,
-		vco_calc->dec_start2);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLLLOCK_CMP1,
-		vco_calc->pll_plllock_cmp1);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLLLOCK_CMP2,
-		vco_calc->pll_plllock_cmp2);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLLLOCK_CMP3,
-		vco_calc->pll_plllock_cmp3);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLLLOCK_CMP_EN, 0x01);
-}
-
-int pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate)
-{
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	dsi_pll_res->vco_current_rate = rate;
-	dsi_pll_res->vco_ref_clk_rate = vco->ref_clk_rate;
-
-	return 0;
-}
-
-int shadow_pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco,
-		unsigned long rate)
-{
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-	struct mdss_pll_vco_calc vco_calc;
-	s64 vco_clk_rate = rate;
-	u32 rem;
-
-	if (!dsi_pll_res->resource_enable) {
-		pr_err("PLL resources disabled. Dynamic fps invalid\n");
-		return -EINVAL;
-	}
-
-	pr_debug("req vco set rate: %lld\n", vco_clk_rate);
-
-	pll_20nm_override_trim_codes(dsi_pll_res);
-
-	/* div fraction, start and comp calculations */
-	pll_20nm_vco_rate_calc(&vco_calc, vco_clk_rate,
-		dsi_pll_res->vco_ref_clk_rate);
-
-	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
-		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL0,
-		MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
-		MMSS_DSI_PHY_PLL_PLLLOCK_CMP_EN,
-		0xB1, 0);
-	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
-		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL1,
-		MMSS_DSI_PHY_PLL_PLLLOCK_CMP1,
-		MMSS_DSI_PHY_PLL_PLLLOCK_CMP2,
-		vco_calc.pll_plllock_cmp1, vco_calc.pll_plllock_cmp2);
-	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
-		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL2,
-		MMSS_DSI_PHY_PLL_PLLLOCK_CMP3,
-		MMSS_DSI_PHY_PLL_DEC_START1,
-		vco_calc.pll_plllock_cmp3, vco_calc.dec_start1);
-	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
-		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL3,
-		MMSS_DSI_PHY_PLL_DEC_START2,
-		MMSS_DSI_PHY_PLL_DIV_FRAC_START1,
-		vco_calc.dec_start2, vco_calc.div_frac_start1);
-	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
-		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL4,
-		MMSS_DSI_PHY_PLL_DIV_FRAC_START2,
-		MMSS_DSI_PHY_PLL_DIV_FRAC_START3,
-		vco_calc.div_frac_start2, vco_calc.div_frac_start3);
-	/* Method 2 - Auto PLL calibration */
-	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
-		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL7,
-		MMSS_DSI_PHY_PLL_PLL_VCO_TUNE,
-		MMSS_DSI_PHY_PLL_PLLLOCK_CMP_EN,
-		0, 0x0D);
-	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
-		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL8,
-		MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
-		MMSS_DSI_PHY_PLL_RESETSM_CNTRL3,
-		0xF0, 0x07);
-
-	/*
-	 * RESETSM_CTRL3 has to be set for 12 times (6 reg writes),
-	 * Each register setting write 2 times, running in loop for 5
-	 * times (5 reg writes) and other two iterations are taken
-	 * care (one above and other in shadow_bypass
-	 */
-	for (rem = 0; rem < 5; rem++) {
-		MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
-				MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL9 + (4 * rem),
-				MMSS_DSI_PHY_PLL_RESETSM_CNTRL3,
-				MMSS_DSI_PHY_PLL_RESETSM_CNTRL3,
-				0x07, 0x07);
-	}
-
-	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
-		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL15,
-		MMSS_DSI_PHY_PLL_RESETSM_CNTRL3,
-		MMSS_DSI_PHY_PLL_RESETSM_CNTRL3,
-		0x03, 0x03);
-
-	/* memory barrier */
-	wmb();
-	return 0;
-}
-
-unsigned long pll_20nm_vco_get_rate(struct clk *c)
-{
-	u64 vco_rate, multiplier = (1 << 20);
-	s32 div_frac_start;
-	u32 dec_start;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	u64 ref_clk = vco->ref_clk_rate;
-	int rc;
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	if (is_gdsc_disabled(dsi_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	dec_start = (MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_DEC_START2) & BIT(0)) << 7;
-	dec_start |= (MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_DEC_START1) & 0x7f);
-	pr_debug("dec_start = 0x%x\n", dec_start);
-
-	div_frac_start = (MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_DIV_FRAC_START3) & 0x3f) << 14;
-	div_frac_start |= (MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_DIV_FRAC_START2) & 0x7f) << 7;
-	div_frac_start |= MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			MMSS_DSI_PHY_PLL_DIV_FRAC_START1) & 0x7f;
-	pr_debug("div_frac_start = 0x%x\n", div_frac_start);
-
-	vco_rate = ref_clk * 2 * dec_start;
-	vco_rate += ((ref_clk * 2 * div_frac_start) / multiplier);
-	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-
-	return (unsigned long)vco_rate;
-}
-long pll_20nm_vco_round_rate(struct clk *c, unsigned long rate)
-{
-	unsigned long rrate = rate;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-
-	if (rate < vco->min_rate)
-		rrate = vco->min_rate;
-	if (rate > vco->max_rate)
-		rrate = vco->max_rate;
-
-	return rrate;
-}
-
-enum handoff pll_20nm_vco_handoff(struct clk *c)
-{
-	int rc;
-	enum handoff ret = HANDOFF_DISABLED_CLK;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	if (is_gdsc_disabled(dsi_pll_res))
-		return HANDOFF_DISABLED_CLK;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return ret;
-	}
-
-	if (pll_20nm_is_pll_locked(dsi_pll_res)) {
-		dsi_pll_res->handoff_resources = true;
-		dsi_pll_res->pll_on = true;
-		c->rate = pll_20nm_vco_get_rate(c);
-		ret = HANDOFF_ENABLED_CLK;
-		dsi_pll_res->vco_locking_rate = c->rate;
-		dsi_pll_res->is_init_locked = true;
-		pll_20nm_cache_trim_codes(dsi_pll_res);
-		pr_debug("handoff vco_locking_rate=%llu\n",
-			dsi_pll_res->vco_locking_rate);
-	} else {
-		mdss_pll_resource_enable(dsi_pll_res, false);
-		dsi_pll_res->vco_locking_rate = 0;
-		dsi_pll_res->is_init_locked = false;
-	}
-
-	return ret;
-}
-
-int pll_20nm_vco_prepare(struct clk *c)
-{
-	int rc = 0;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	if (!dsi_pll_res) {
-		pr_err("Dsi pll resources are not available\n");
-		return -EINVAL;
-	}
-
-	if ((dsi_pll_res->vco_cached_rate != 0)
-	    && (dsi_pll_res->vco_cached_rate == c->rate)) {
-		rc = c->ops->set_rate(c, dsi_pll_res->vco_cached_rate);
-		if (rc) {
-			pr_err("vco_set_rate failed. rc=%d\n", rc);
-			goto error;
-		}
-	}
-
-	rc = dsi_pll_enable(c);
-
-error:
-	return rc;
-}
-
-void pll_20nm_vco_unprepare(struct clk *c)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	if (!dsi_pll_res) {
-		pr_err("Dsi pll resources are not available\n");
-		return;
-	}
-
-	dsi_pll_res->vco_cached_rate = c->rate;
-	dsi_pll_disable(c);
-}
-
-static void pll_20nm_config_resetsm(void __iomem *pll_base)
-{
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL, 0x24);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL2, 0x07);
-}
-
-static void pll_20nm_config_vco_start(void __iomem *pll_base)
-{
-
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_VCOTAIL_EN, 0x03);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL3, 0x02);
-	udelay(10);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL3, 0x03);
-}
-
-static void pll_20nm_config_bypass_cal(void __iomem *pll_base)
-{
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL, 0xac);
-	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_BKG_KVCO_CAL_EN, 0x28);
-}
-
-static int pll_20nm_vco_relock(struct mdss_pll_resources *dsi_pll_res)
-{
-	int rc = 0;
-
-	pll_20nm_override_trim_codes(dsi_pll_res);
-	pll_20nm_config_bypass_cal(dsi_pll_res->pll_base);
-	pll_20nm_config_vco_start(dsi_pll_res->pll_base);
-
-	if (!pll_20nm_is_pll_locked(dsi_pll_res)) {
-		pr_err("DSI PLL re-lock failed\n");
-		rc = -EINVAL;
-	}
-
-	return rc;
-}
-
-static int pll_20nm_vco_init_lock(struct mdss_pll_resources *dsi_pll_res)
-{
-	int rc = 0;
-
-	pll_20nm_config_resetsm(dsi_pll_res->pll_base);
-	pll_20nm_config_vco_start(dsi_pll_res->pll_base);
-
-	if (!pll_20nm_is_pll_locked(dsi_pll_res)) {
-		pr_err("DSI PLL init lock failed\n");
-		rc = -EINVAL;
-		goto init_lock_err;
-	}
-
-	pll_20nm_cache_trim_codes(dsi_pll_res);
-
-init_lock_err:
-	return rc;
-}
-
-int pll_20nm_vco_enable_seq(struct mdss_pll_resources *dsi_pll_res)
-{
-	int rc = 0;
-	struct mdss_pll_vco_calc vco_calc;
-
-	if (!dsi_pll_res) {
-		pr_err("Invalid PLL resources\n");
-		return -EINVAL;
-	}
-
-	pll_20nm_config_common_block_1(dsi_pll_res->pll_1_base);
-	pll_20nm_config_common_block_1(dsi_pll_res->pll_base);
-	pll_20nm_config_common_block_2(dsi_pll_res->pll_base);
-	pll_20nm_config_loop_bw(dsi_pll_res->pll_base);
-
-	pll_20nm_vco_rate_calc(&vco_calc, dsi_pll_res->vco_current_rate,
-		dsi_pll_res->vco_ref_clk_rate);
-	pll_20nm_config_vco_rate(dsi_pll_res->pll_base, &vco_calc);
-
-	pr_debug("init lock=%d prev vco_rate=%llu, new vco_rate=%llu\n",
-		dsi_pll_res->is_init_locked, dsi_pll_res->vco_locking_rate,
-		dsi_pll_res->vco_current_rate);
-	/*
-	 * Run auto-lock sequence if it is either bootup initial
-	 * locking or when the vco rate is changed. Otherwise, just
-	 * use stored codes and bypass caliberation.
-	 */
-	if (!dsi_pll_res->is_init_locked || (dsi_pll_res->vco_locking_rate !=
-			dsi_pll_res->vco_current_rate)) {
-		rc = pll_20nm_vco_init_lock(dsi_pll_res);
-		dsi_pll_res->is_init_locked = (rc) ? false : true;
-	} else {
-		rc = pll_20nm_vco_relock(dsi_pll_res);
-	}
-
-	dsi_pll_res->vco_locking_rate = (rc) ? 0 :
-		dsi_pll_res->vco_current_rate;
-
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
deleted file mode 100644
index 1eb4f6f..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ /dev/null
@@ -1,1643 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-#include "mdss-dsi-pll.h"
-#include "mdss-pll.h"
-#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
-#define CREATE_TRACE_POINTS
-#include "mdss_pll_trace.h"
-
-#define VCO_DELAY_USEC 1
-
-#define MHZ_250		250000000UL
-#define MHZ_500		500000000UL
-#define MHZ_1000	1000000000UL
-#define MHZ_1100	1100000000UL
-#define MHZ_1900	1900000000UL
-#define MHZ_3000	3000000000UL
-
-/* Register Offsets from PLL base address */
-#define PLL_ANALOG_CONTROLS_ONE			0x000
-#define PLL_ANALOG_CONTROLS_TWO			0x004
-#define PLL_INT_LOOP_SETTINGS			0x008
-#define PLL_INT_LOOP_SETTINGS_TWO		0x00c
-#define PLL_ANALOG_CONTROLS_THREE		0x010
-#define PLL_ANALOG_CONTROLS_FOUR		0x014
-#define PLL_INT_LOOP_CONTROLS			0x018
-#define PLL_DSM_DIVIDER				0x01c
-#define PLL_FEEDBACK_DIVIDER			0x020
-#define PLL_SYSTEM_MUXES			0x024
-#define PLL_FREQ_UPDATE_CONTROL_OVERRIDES	0x028
-#define PLL_CMODE				0x02c
-#define PLL_CALIBRATION_SETTINGS		0x030
-#define PLL_BAND_SEL_CAL_TIMER_LOW		0x034
-#define PLL_BAND_SEL_CAL_TIMER_HIGH		0x038
-#define PLL_BAND_SEL_CAL_SETTINGS		0x03c
-#define PLL_BAND_SEL_MIN			0x040
-#define PLL_BAND_SEL_MAX			0x044
-#define PLL_BAND_SEL_PFILT			0x048
-#define PLL_BAND_SEL_IFILT			0x04c
-#define PLL_BAND_SEL_CAL_SETTINGS_TWO		0x050
-#define PLL_BAND_SEL_CAL_SETTINGS_THREE		0x054
-#define PLL_BAND_SEL_CAL_SETTINGS_FOUR		0x058
-#define PLL_BAND_SEL_ICODE_HIGH			0x05c
-#define PLL_BAND_SEL_ICODE_LOW			0x060
-#define PLL_FREQ_DETECT_SETTINGS_ONE		0x064
-#define PLL_PFILT				0x07c
-#define PLL_IFILT				0x080
-#define PLL_GAIN				0x084
-#define PLL_ICODE_LOW				0x088
-#define PLL_ICODE_HIGH				0x08c
-#define PLL_LOCKDET				0x090
-#define PLL_OUTDIV				0x094
-#define PLL_FASTLOCK_CONTROL			0x098
-#define PLL_PASS_OUT_OVERRIDE_ONE		0x09c
-#define PLL_PASS_OUT_OVERRIDE_TWO		0x0a0
-#define PLL_CORE_OVERRIDE			0x0a4
-#define PLL_CORE_INPUT_OVERRIDE			0x0a8
-#define PLL_RATE_CHANGE				0x0ac
-#define PLL_PLL_DIGITAL_TIMERS			0x0b0
-#define PLL_PLL_DIGITAL_TIMERS_TWO		0x0b4
-#define PLL_DEC_FRAC_MUXES			0x0c8
-#define PLL_DECIMAL_DIV_START_1			0x0cc
-#define PLL_FRAC_DIV_START_LOW_1		0x0d0
-#define PLL_FRAC_DIV_START_MID_1		0x0d4
-#define PLL_FRAC_DIV_START_HIGH_1		0x0d8
-#define PLL_MASH_CONTROL			0x0ec
-#define PLL_SSC_MUX_CONTROL			0x108
-#define PLL_SSC_STEPSIZE_LOW_1			0x10c
-#define PLL_SSC_STEPSIZE_HIGH_1			0x110
-#define PLL_SSC_DIV_PER_LOW_1			0x114
-#define PLL_SSC_DIV_PER_HIGH_1			0x118
-#define PLL_SSC_DIV_ADJPER_LOW_1		0x11c
-#define PLL_SSC_DIV_ADJPER_HIGH_1		0x120
-#define PLL_SSC_CONTROL				0x13c
-#define PLL_PLL_OUTDIV_RATE			0x140
-#define PLL_PLL_LOCKDET_RATE_1			0x144
-#define PLL_PLL_PROP_GAIN_RATE_1		0x14c
-#define PLL_PLL_BAND_SET_RATE_1			0x154
-#define PLL_PLL_INT_GAIN_IFILT_BAND_1		0x15c
-#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1	0x164
-#define PLL_FASTLOCK_EN_BAND			0x16c
-#define PLL_FREQ_TUNE_ACCUM_INIT_MUX		0x17c
-#define PLL_PLL_LOCK_OVERRIDE			0x180
-#define PLL_PLL_LOCK_DELAY			0x184
-#define PLL_PLL_LOCK_MIN_DELAY			0x188
-#define PLL_CLOCK_INVERTERS			0x18c
-#define PLL_SPARE_AND_JPC_OVERRIDES		0x190
-#define PLL_BIAS_CONTROL_1			0x194
-#define PLL_BIAS_CONTROL_2			0x198
-#define PLL_ALOG_OBSV_BUS_CTRL_1		0x19c
-#define PLL_COMMON_STATUS_ONE			0x1a0
-
-/* Register Offsets from PHY base address */
-#define PHY_CMN_CLK_CFG0	0x010
-#define PHY_CMN_CLK_CFG1	0x014
-#define PHY_CMN_RBUF_CTRL	0x01c
-#define PHY_CMN_PLL_CNTRL	0x038
-#define PHY_CMN_CTRL_0		0x024
-
-/* Bit definition of SSC control registers */
-#define SSC_CENTER		BIT(0)
-#define SSC_EN			BIT(1)
-#define SSC_FREQ_UPDATE		BIT(2)
-#define SSC_FREQ_UPDATE_MUX	BIT(3)
-#define SSC_UPDATE_SSC		BIT(4)
-#define SSC_UPDATE_SSC_MUX	BIT(5)
-#define SSC_START		BIT(6)
-#define SSC_START_MUX		BIT(7)
-
-enum {
-	DSI_PLL_0,
-	DSI_PLL_1,
-	DSI_PLL_MAX
-};
-
-struct dsi_pll_regs {
-	u32 pll_prop_gain_rate;
-	u32 pll_lockdet_rate;
-	u32 decimal_div_start;
-	u32 frac_div_start_low;
-	u32 frac_div_start_mid;
-	u32 frac_div_start_high;
-	u32 pll_clock_inverters;
-	u32 ssc_stepsize_low;
-	u32 ssc_stepsize_high;
-	u32 ssc_div_per_low;
-	u32 ssc_div_per_high;
-	u32 ssc_adjper_low;
-	u32 ssc_adjper_high;
-	u32 ssc_control;
-};
-
-struct dsi_pll_config {
-	u32 ref_freq;
-	bool div_override;
-	u32 output_div;
-	bool ignore_frac;
-	bool disable_prescaler;
-	bool enable_ssc;
-	bool ssc_center;
-	u32 dec_bits;
-	u32 frac_bits;
-	u32 lock_timer;
-	u32 ssc_freq;
-	u32 ssc_offset;
-	u32 ssc_adj_per;
-	u32 thresh_cycles;
-	u32 refclk_cycles;
-};
-
-struct dsi_pll_10nm {
-	struct mdss_pll_resources *rsc;
-	struct dsi_pll_config pll_configuration;
-	struct dsi_pll_regs reg_setup;
-};
-
-static inline int pll_reg_read(void *context, unsigned int reg,
-					unsigned int *val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	*val = MDSS_PLL_REG_R(rsc->pll_base, reg);
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	return rc;
-}
-
-static inline int pll_reg_write(void *context, unsigned int reg,
-					unsigned int val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	MDSS_PLL_REG_W(rsc->pll_base, reg, val);
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	return rc;
-}
-
-static inline int phy_reg_read(void *context, unsigned int reg,
-					unsigned int *val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	*val = MDSS_PLL_REG_R(rsc->phy_base, reg);
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	return rc;
-}
-
-static inline int phy_reg_write(void *context, unsigned int reg,
-					unsigned int val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	MDSS_PLL_REG_W(rsc->phy_base, reg, val);
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	return rc;
-}
-
-static inline int phy_reg_update_bits_sub(struct mdss_pll_resources *rsc,
-		unsigned int reg, unsigned int mask, unsigned int val)
-{
-	u32 reg_val;
-
-	reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
-	reg_val &= ~mask;
-	reg_val |= (val & mask);
-	MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
-
-	return 0;
-}
-
-static inline int phy_reg_update_bits(void *context, unsigned int reg,
-				unsigned int mask, unsigned int val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	rc = phy_reg_update_bits_sub(rsc, reg, mask, val);
-	if (!rc && rsc->slave)
-		rc = phy_reg_update_bits_sub(rsc->slave, reg, mask, val);
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	return rc;
-}
-
-static inline int pclk_mux_read_sel(void *context, unsigned int reg,
-					unsigned int *val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc)
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-	else
-		*val = (MDSS_PLL_REG_R(rsc->phy_base, reg) & 0x3);
-
-	(void)mdss_pll_resource_enable(rsc, false);
-	return rc;
-}
-
-
-static inline int pclk_mux_write_sel_sub(struct mdss_pll_resources *rsc,
-				unsigned int reg, unsigned int val)
-{
-	u32 reg_val;
-
-	reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
-	reg_val &= ~0x03;
-	reg_val |= val;
-
-	MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
-
-	return 0;
-}
-
-static inline int pclk_mux_write_sel(void *context, unsigned int reg,
-					unsigned int val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	rc = pclk_mux_write_sel_sub(rsc, reg, val);
-	if (!rc && rsc->slave)
-		rc = pclk_mux_write_sel_sub(rsc->slave, reg, val);
-
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	/*
-	 * cache the current parent index for cases where parent
-	 * is not changing but rate is changing. In that case
-	 * clock framework won't call parent_set and hence dsiclk_sel
-	 * bit won't be programmed. e.g. dfps update use case.
-	 */
-	rsc->cached_cfg1 = val;
-
-	return rc;
-}
-
-static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
-static struct dsi_pll_10nm plls[DSI_PLL_MAX];
-
-static void dsi_pll_config_slave(struct mdss_pll_resources *rsc)
-{
-	u32 reg;
-	struct mdss_pll_resources *orsc = pll_rsc_db[DSI_PLL_1];
-
-	if (!rsc)
-		return;
-
-	/* Only DSI PLL0 can act as a master */
-	if (rsc->index != DSI_PLL_0)
-		return;
-
-	/* default configuration: source is either internal or ref clock */
-	rsc->slave = NULL;
-
-	if (!orsc) {
-		pr_warn("slave PLL unavilable, assuming standalone config\n");
-		return;
-	}
-
-	/* check to see if the source of DSI1 PLL bitclk is set to external */
-	reg = MDSS_PLL_REG_R(orsc->phy_base, PHY_CMN_CLK_CFG1);
-	reg &= (BIT(2) | BIT(3));
-	if (reg == 0x04)
-		rsc->slave = pll_rsc_db[DSI_PLL_1]; /* external source */
-
-	pr_debug("Slave PLL %s\n", rsc->slave ? "configured" : "absent");
-}
-
-static void dsi_pll_setup_config(struct dsi_pll_10nm *pll,
-				 struct mdss_pll_resources *rsc)
-{
-	struct dsi_pll_config *config = &pll->pll_configuration;
-
-	config->ref_freq = 19200000;
-	config->output_div = 1;
-	config->dec_bits = 8;
-	config->frac_bits = 18;
-	config->lock_timer = 64;
-	config->ssc_freq = 31500;
-	config->ssc_offset = 5000;
-	config->ssc_adj_per = 2;
-	config->thresh_cycles = 32;
-	config->refclk_cycles = 256;
-
-	config->div_override = false;
-	config->ignore_frac = false;
-	config->disable_prescaler = false;
-	config->enable_ssc = rsc->ssc_en;
-	config->ssc_center = rsc->ssc_center;
-
-	if (config->enable_ssc) {
-		if (rsc->ssc_freq)
-			config->ssc_freq = rsc->ssc_freq;
-		if (rsc->ssc_ppm)
-			config->ssc_offset = rsc->ssc_ppm;
-	}
-
-	dsi_pll_config_slave(rsc);
-}
-
-static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll,
-				  struct mdss_pll_resources *rsc)
-{
-	struct dsi_pll_config *config = &pll->pll_configuration;
-	struct dsi_pll_regs *regs = &pll->reg_setup;
-	u64 fref = rsc->vco_ref_clk_rate;
-	u64 pll_freq;
-	u64 divider;
-	u64 dec, dec_multiple;
-	u32 frac;
-	u64 multiplier;
-
-	pll_freq = rsc->vco_current_rate;
-
-	if (config->disable_prescaler)
-		divider = fref;
-	else
-		divider = fref * 2;
-
-	multiplier = 1 << config->frac_bits;
-	dec_multiple = div_u64(pll_freq * multiplier, divider);
-	div_u64_rem(dec_multiple, multiplier, &frac);
-
-	dec = div_u64(dec_multiple, multiplier);
-
-	if (pll_freq <= MHZ_1900)
-		regs->pll_prop_gain_rate = 8;
-	else if (pll_freq <= MHZ_3000)
-		regs->pll_prop_gain_rate = 10;
-	else
-		regs->pll_prop_gain_rate = 12;
-	if (pll_freq < MHZ_1100)
-		regs->pll_clock_inverters = 8;
-	else
-		regs->pll_clock_inverters = 0;
-
-	regs->pll_lockdet_rate = config->lock_timer;
-	regs->decimal_div_start = dec;
-	regs->frac_div_start_low = (frac & 0xff);
-	regs->frac_div_start_mid = (frac & 0xff00) >> 8;
-	regs->frac_div_start_high = (frac & 0x30000) >> 16;
-}
-
-static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll,
-		  struct mdss_pll_resources *rsc)
-{
-	struct dsi_pll_config *config = &pll->pll_configuration;
-	struct dsi_pll_regs *regs = &pll->reg_setup;
-	u32 ssc_per;
-	u32 ssc_mod;
-	u64 ssc_step_size;
-	u64 frac;
-
-	if (!config->enable_ssc) {
-		pr_debug("SSC not enabled\n");
-		return;
-	}
-
-	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
-	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
-	ssc_per -= ssc_mod;
-
-	frac = regs->frac_div_start_low |
-			(regs->frac_div_start_mid << 8) |
-			(regs->frac_div_start_high << 16);
-	ssc_step_size = regs->decimal_div_start;
-	ssc_step_size *= (1 << config->frac_bits);
-	ssc_step_size += frac;
-	ssc_step_size *= config->ssc_offset;
-	ssc_step_size *= (config->ssc_adj_per + 1);
-	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
-	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
-
-	regs->ssc_div_per_low = ssc_per & 0xFF;
-	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
-	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
-	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
-	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
-	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
-
-	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
-
-	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
-			regs->decimal_div_start, frac, config->frac_bits);
-	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
-			ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
-}
-
-static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll,
-		struct mdss_pll_resources *rsc)
-{
-	void __iomem *pll_base = rsc->pll_base;
-	struct dsi_pll_regs *regs = &pll->reg_setup;
-
-	if (pll->pll_configuration.enable_ssc) {
-		pr_debug("SSC is enabled\n");
-
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_1,
-				regs->ssc_stepsize_low);
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_1,
-				regs->ssc_stepsize_high);
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_1,
-				regs->ssc_div_per_low);
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_1,
-				regs->ssc_div_per_high);
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_ADJPER_LOW_1,
-				regs->ssc_adjper_low);
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_ADJPER_HIGH_1,
-				regs->ssc_adjper_high);
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_CONTROL,
-				SSC_EN | regs->ssc_control);
-	}
-}
-
-static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll,
-				  struct mdss_pll_resources *rsc)
-{
-	void __iomem *pll_base = rsc->pll_base;
-
-	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_ONE, 0x80);
-	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_TWO, 0x03);
-	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_THREE, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_DSM_DIVIDER, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_FEEDBACK_DIVIDER, 0x4e);
-	MDSS_PLL_REG_W(pll_base, PLL_CALIBRATION_SETTINGS, 0x40);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
-	MDSS_PLL_REG_W(pll_base, PLL_OUTDIV, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_CORE_OVERRIDE, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_1, 0x08);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_BAND_SET_RATE_1, 0xc0);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x80);
-	MDSS_PLL_REG_W(pll_base, PLL_PFILT, 0x29);
-	MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x3f);
-}
-
-static void dsi_pll_init_val(struct mdss_pll_resources *rsc)
-{
-	void __iomem *pll_base = rsc->pll_base;
-
-	MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x10);
-	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS, 0x3f);
-	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS_TWO, 0x0);
-	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FOUR, 0x0);
-	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_CONTROLS, 0x80);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_UPDATE_CONTROL_OVERRIDES, 0x0);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_LOW, 0x0);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_HIGH, 0x02);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS, 0x82);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_MIN, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_MAX, 0xff);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_PFILT, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_IFILT, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_TWO, 0x25);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_FOUR, 0x4f);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_HIGH, 0x0a);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_LOW, 0x0);
-	MDSS_PLL_REG_W(pll_base, PLL_GAIN, 0x42);
-	MDSS_PLL_REG_W(pll_base, PLL_ICODE_LOW, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_ICODE_HIGH, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_LOCKDET, 0x30);
-	MDSS_PLL_REG_W(pll_base, PLL_FASTLOCK_CONTROL, 0x04);
-	MDSS_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_ONE, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_TWO, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_RATE_CHANGE, 0x01);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS, 0x08);
-	MDSS_PLL_REG_W(pll_base, PLL_DEC_FRAC_MUXES, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_MASH_CONTROL, 0x03);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_MUX_CONTROL, 0x0);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_CONTROL, 0x0);
-	MDSS_PLL_REG_W(pll_base, PLL_FASTLOCK_EN_BAND, 0x03);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_TUNE_ACCUM_INIT_MUX, 0x0);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_MIN_DELAY, 0x19);
-	MDSS_PLL_REG_W(pll_base, PLL_SPARE_AND_JPC_OVERRIDES, 0x0);
-	MDSS_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_1, 0x40);
-	MDSS_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_2, 0x20);
-	MDSS_PLL_REG_W(pll_base, PLL_ALOG_OBSV_BUS_CTRL_1, 0x0);
-}
-
-static void dsi_pll_commit(struct dsi_pll_10nm *pll,
-			   struct mdss_pll_resources *rsc)
-{
-	void __iomem *pll_base = rsc->pll_base;
-	struct dsi_pll_regs *reg = &pll->reg_setup;
-
-	MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x12);
-	MDSS_PLL_REG_W(pll_base, PLL_DECIMAL_DIV_START_1,
-		       reg->decimal_div_start);
-	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_LOW_1,
-		       reg->frac_div_start_low);
-	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_MID_1,
-		       reg->frac_div_start_mid);
-	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1,
-		       reg->frac_div_start_high);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x40);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
-	MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x10);
-	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
-
-}
-
-static int vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
-			unsigned long parent_rate)
-{
-	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *rsc = vco->priv;
-	struct dsi_pll_10nm *pll;
-
-	if (!rsc) {
-		pr_err("pll resource not found\n");
-		return -EINVAL;
-	}
-
-	if (rsc->pll_on)
-		return 0;
-
-	pll = rsc->priv;
-	if (!pll) {
-		pr_err("pll configuration not found\n");
-		return -EINVAL;
-	}
-
-	pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
-
-	rsc->vco_current_rate = rate;
-	rsc->vco_ref_clk_rate = vco->ref_clk_rate;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
-		       rsc->index, rc);
-		return rc;
-	}
-
-	dsi_pll_init_val(rsc);
-
-	dsi_pll_setup_config(pll, rsc);
-
-	dsi_pll_calc_dec_frac(pll, rsc);
-
-	dsi_pll_calc_ssc(pll, rsc);
-
-	dsi_pll_commit(pll, rsc);
-
-	dsi_pll_config_hzindep_reg(pll, rsc);
-
-	dsi_pll_ssc_commit(pll, rsc);
-
-	/* flush, ensure all register writes are done*/
-	wmb();
-
-	mdss_pll_resource_enable(rsc, false);
-
-	return 0;
-}
-
-static int dsi_pll_10nm_lock_status(struct mdss_pll_resources *pll)
-{
-	int rc;
-	u32 status;
-	u32 const delay_us = 100;
-	u32 const timeout_us = 5000;
-
-	rc = readl_poll_timeout_atomic(pll->pll_base + PLL_COMMON_STATUS_ONE,
-				       status,
-				       ((status & BIT(0)) > 0),
-				       delay_us,
-				       timeout_us);
-	if (rc)
-		pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
-			pll->index, status);
-
-	return rc;
-}
-
-static void dsi_pll_disable_pll_bias(struct mdss_pll_resources *rsc)
-{
-	u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
-
-	MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0);
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data & ~BIT(5));
-	ndelay(250);
-}
-
-static void dsi_pll_enable_pll_bias(struct mdss_pll_resources *rsc)
-{
-	u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
-
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data | BIT(5));
-	MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0xc0);
-	ndelay(250);
-}
-
-static void dsi_pll_disable_global_clk(struct mdss_pll_resources *rsc)
-{
-	u32 data;
-
-	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data & ~BIT(5)));
-}
-
-static void dsi_pll_enable_global_clk(struct mdss_pll_resources *rsc)
-{
-	u32 data;
-
-	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data | BIT(5)));
-}
-
-static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
-{
-	int rc;
-	struct mdss_pll_resources *rsc = vco->priv;
-
-	dsi_pll_enable_pll_bias(rsc);
-	if (rsc->slave)
-		dsi_pll_enable_pll_bias(rsc->slave);
-
-	phy_reg_update_bits_sub(rsc, PHY_CMN_CLK_CFG1, 0x03, rsc->cached_cfg1);
-	if (rsc->slave)
-		phy_reg_update_bits_sub(rsc->slave, PHY_CMN_CLK_CFG1,
-				0x03, rsc->cached_cfg1);
-	wmb(); /* ensure dsiclk_sel is always programmed before pll start */
-
-	/* Start PLL */
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0x01);
-
-	/*
-	 * ensure all PLL configurations are written prior to checking
-	 * for PLL lock.
-	 */
-	wmb();
-
-	/* Check for PLL lock */
-	rc = dsi_pll_10nm_lock_status(rsc);
-	if (rc) {
-		pr_err("PLL(%d) lock failed\n", rsc->index);
-		goto error;
-	}
-
-	rsc->pll_on = true;
-
-	dsi_pll_enable_global_clk(rsc);
-	if (rsc->slave)
-		dsi_pll_enable_global_clk(rsc->slave);
-
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
-	if (rsc->slave)
-		MDSS_PLL_REG_W(rsc->slave->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
-
-error:
-	return rc;
-}
-
-static void dsi_pll_disable_sub(struct mdss_pll_resources *rsc)
-{
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0);
-	dsi_pll_disable_pll_bias(rsc);
-}
-
-static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
-{
-	struct mdss_pll_resources *rsc = vco->priv;
-
-	if (!rsc->pll_on &&
-	    mdss_pll_resource_enable(rsc, true)) {
-		pr_err("failed to enable pll (%d) resources\n", rsc->index);
-		return;
-	}
-
-	rsc->handoff_resources = false;
-
-	pr_debug("stop PLL (%d)\n", rsc->index);
-
-	/*
-	 * To avoid any stray glitches while
-	 * abruptly powering down the PLL
-	 * make sure to gate the clock using
-	 * the clock enable bit before powering
-	 * down the PLL
-	 */
-	dsi_pll_disable_global_clk(rsc);
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0);
-	dsi_pll_disable_sub(rsc);
-	if (rsc->slave) {
-		dsi_pll_disable_global_clk(rsc->slave);
-		dsi_pll_disable_sub(rsc->slave);
-	}
-	/* flush, ensure all register writes are done*/
-	wmb();
-	rsc->pll_on = false;
-}
-
-long vco_10nm_round_rate(struct clk_hw *hw, unsigned long rate,
-				unsigned long *parent_rate)
-{
-	unsigned long rrate = rate;
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-
-	if (rate < vco->min_rate)
-		rrate = vco->min_rate;
-	if (rate > vco->max_rate)
-		rrate = vco->max_rate;
-
-	*parent_rate = rrate;
-
-	return rrate;
-}
-
-static void vco_10nm_unprepare(struct clk_hw *hw)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *pll = vco->priv;
-
-	if (!pll) {
-		pr_err("dsi pll resources not available\n");
-		return;
-	}
-
-	/*
-	 * During unprepare in continuous splash use case we want driver
-	 * to pick all dividers instead of retaining bootloader configurations.
-	 */
-	if (!pll->handoff_resources) {
-		pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base,
-							PHY_CMN_CLK_CFG0);
-		pll->cached_outdiv = MDSS_PLL_REG_R(pll->pll_base,
-							PLL_PLL_OUTDIV_RATE);
-		pr_debug("cfg0=%d,cfg1=%d, outdiv=%d\n", pll->cached_cfg0,
-					pll->cached_cfg1, pll->cached_outdiv);
-
-		pll->vco_cached_rate = clk_hw_get_rate(hw);
-	}
-
-	/*
-	 * When continuous splash screen feature is enabled, we need to cache
-	 * the mux configuration for the pixel_clk_src mux clock. The clock
-	 * framework does not call back to re-configure the mux value if it is
-	 * does not change.For such usecases, we need to ensure that the cached
-	 * value is programmed prior to PLL being locked
-	 */
-	if (pll->handoff_resources)
-		pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base,
-							PHY_CMN_CLK_CFG1);
-	dsi_pll_disable(vco);
-	mdss_pll_resource_enable(pll, false);
-}
-
-static int vco_10nm_prepare(struct clk_hw *hw)
-{
-	int rc = 0;
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *pll = vco->priv;
-
-	if (!pll) {
-		pr_err("dsi pll resources are not available\n");
-		return -EINVAL;
-	}
-
-	/* Skip vco recalculation for continuous splash use case */
-	if (pll->handoff_resources)
-		return 0;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("failed to enable pll (%d) resource, rc=%d\n",
-		       pll->index, rc);
-		return rc;
-	}
-
-	if ((pll->vco_cached_rate != 0) &&
-	    (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
-		rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
-				pll->vco_cached_rate);
-		if (rc) {
-			pr_err("pll(%d) set_rate failed, rc=%d\n",
-			       pll->index, rc);
-			mdss_pll_resource_enable(pll, false);
-			return rc;
-		}
-		pr_debug("cfg0=%d, cfg1=%d\n", pll->cached_cfg0,
-			pll->cached_cfg1);
-		MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0,
-					pll->cached_cfg0);
-		MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
-					pll->cached_outdiv);
-	}
-	MDSS_PLL_ATRACE_BEGIN("pll_lock");
-	trace_mdss_pll_lock_start((u64)pll->vco_cached_rate,
-			pll->vco_current_rate,
-			pll->cached_cfg0, pll->cached_cfg1,
-			pll->cached_outdiv, pll->resource_ref_cnt);
-	rc = dsi_pll_enable(vco);
-	MDSS_PLL_ATRACE_END("pll_lock");
-	if (rc) {
-		mdss_pll_resource_enable(pll, false);
-		pr_err("pll(%d) enable failed, rc=%d\n", pll->index, rc);
-		return rc;
-	}
-
-	return rc;
-}
-
-static unsigned long vco_10nm_recalc_rate(struct clk_hw *hw,
-						unsigned long parent_rate)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *pll = vco->priv;
-	int rc;
-	u64 ref_clk = vco->ref_clk_rate;
-	u64 vco_rate;
-	u64 multiplier;
-	u32 frac;
-	u32 dec;
-	u32 outdiv;
-	u64 pll_freq, tmp64;
-
-	if (!vco->priv)
-		pr_err("vco priv is null\n");
-
-	if (!pll) {
-		pr_err("pll is null\n");
-		return 0;
-	}
-
-	/*
-	 * Calculate the vco rate from HW registers only for handoff cases.
-	 * For other cases where a vco_10nm_set_rate() has already been
-	 * called, just return the rate that was set earlier. This is due
-	 * to the fact that recalculating VCO rate requires us to read the
-	 * correct value of the pll_out_div divider clock, which is only set
-	 * afterwards.
-	 */
-	if (pll->vco_current_rate != 0) {
-		pr_debug("returning vco rate = %lld\n", pll->vco_current_rate);
-		return pll->vco_current_rate;
-	}
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("failed to enable pll(%d) resource, rc=%d\n",
-		       pll->index, rc);
-		return 0;
-	}
-
-	if (!dsi_pll_10nm_lock_status(pll))
-		pll->handoff_resources = true;
-
-	dec = MDSS_PLL_REG_R(pll->pll_base, PLL_DECIMAL_DIV_START_1);
-	dec &= 0xFF;
-
-	frac = MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_LOW_1);
-	frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_MID_1) &
-		  0xFF) <<
-		8);
-	frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_HIGH_1) &
-		  0x3) <<
-		16);
-
-	/* OUTDIV_1:0 field is (log(outdiv, 2)) */
-	outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE);
-	outdiv &= 0x3;
-	outdiv = 1 << outdiv;
-
-	/*
-	 * TODO:
-	 *	1. Assumes prescaler is disabled
-	 *	2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
-	 **/
-	multiplier = 1 << 18;
-	pll_freq = dec * (ref_clk * 2);
-	tmp64 = (ref_clk * 2 * frac);
-	pll_freq += div_u64(tmp64, multiplier);
-
-	vco_rate = div_u64(pll_freq, outdiv);
-
-	pr_debug("dec=0x%x, frac=0x%x, outdiv=%d, vco=%llu\n",
-		 dec, frac, outdiv, vco_rate);
-
-	(void)mdss_pll_resource_enable(pll, false);
-
-	return (unsigned long)vco_rate;
-}
-
-static int pixel_clk_get_div(void *context, unsigned int reg, unsigned int *div)
-{
-	int rc;
-	struct mdss_pll_resources *pll = context;
-	u32 reg_val;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
-	*div = (reg_val & 0xF0) >> 4;
-
-	/**
-	 * Common clock framework the divider value is interpreted as one less
-	 * hence we return one less for all dividers except when zero
-	 */
-	if (*div != 0)
-		*div -= 1;
-
-	(void)mdss_pll_resource_enable(pll, false);
-
-	return rc;
-}
-
-static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
-{
-	u32 reg_val;
-
-	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
-	reg_val &= ~0xF0;
-	reg_val |= (div << 4);
-	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
-}
-
-static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
-{
-	int rc;
-	struct mdss_pll_resources *pll = context;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-	/**
-	 * In common clock framework the divider value provided is one less and
-	 * and hence adjusting the divider value by one prior to writing it to
-	 * hardware
-	 */
-	div++;
-	pixel_clk_set_div_sub(pll, div);
-	if (pll->slave)
-		pixel_clk_set_div_sub(pll->slave, div);
-	(void)mdss_pll_resource_enable(pll, false);
-
-	return 0;
-}
-
-static int bit_clk_get_div(void *context, unsigned int reg, unsigned int *div)
-{
-	int rc;
-	struct mdss_pll_resources *pll = context;
-	u32 reg_val;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
-	*div = (reg_val & 0x0F);
-
-	/**
-	 *Common clock framework the divider value is interpreted as one less
-	 * hence we return one less for all dividers except when zero
-	 */
-	if (*div != 0)
-		*div -= 1;
-	(void)mdss_pll_resource_enable(pll, false);
-
-	return rc;
-}
-
-static void bit_clk_set_div_sub(struct mdss_pll_resources *rsc, int div)
-{
-	u32 reg_val;
-
-	reg_val = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
-	reg_val &= ~0x0F;
-	reg_val |= div;
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG0, reg_val);
-}
-
-static int bit_clk_set_div(void *context, unsigned int reg, unsigned int div)
-{
-	int rc;
-	struct mdss_pll_resources *rsc = context;
-	struct dsi_pll_8998 *pll;
-
-	if (!rsc) {
-		pr_err("pll resource not found\n");
-		return -EINVAL;
-	}
-
-	pll = rsc->priv;
-	if (!pll) {
-		pr_err("pll configuration not found\n");
-		return -EINVAL;
-	}
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	/**
-	 * In common clock framework the divider value provided is one less and
-	 * and hence adjusting the divider value by one prior to writing it to
-	 * hardware
-	 */
-	div++;
-
-	bit_clk_set_div_sub(rsc, div);
-	/* For slave PLL, this divider always should be set to 1 */
-	if (rsc->slave)
-		bit_clk_set_div_sub(rsc->slave, 1);
-
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	return rc;
-}
-
-static struct regmap_config dsi_pll_10nm_config = {
-	.reg_bits = 32,
-	.reg_stride = 4,
-	.val_bits = 32,
-	.max_register = 0x7c0,
-};
-
-static struct regmap_bus pll_regmap_bus = {
-	.reg_write = pll_reg_write,
-	.reg_read = pll_reg_read,
-};
-
-static struct regmap_bus pclk_src_mux_regmap_bus = {
-	.reg_read = pclk_mux_read_sel,
-	.reg_write = pclk_mux_write_sel,
-};
-
-static struct regmap_bus pclk_src_regmap_bus = {
-	.reg_write = pixel_clk_set_div,
-	.reg_read = pixel_clk_get_div,
-};
-
-static struct regmap_bus bitclk_src_regmap_bus = {
-	.reg_write = bit_clk_set_div,
-	.reg_read = bit_clk_get_div,
-};
-
-static const struct clk_ops clk_ops_vco_10nm = {
-	.recalc_rate = vco_10nm_recalc_rate,
-	.set_rate = vco_10nm_set_rate,
-	.round_rate = vco_10nm_round_rate,
-	.prepare = vco_10nm_prepare,
-	.unprepare = vco_10nm_unprepare,
-};
-
-static struct regmap_bus mdss_mux_regmap_bus = {
-	.reg_write = mdss_set_mux_sel,
-	.reg_read = mdss_get_mux_sel,
-};
-
-/*
- * Clock tree for generating DSI byte and pixel clocks.
- *
- *
- *                  +---------------+
- *                  |    vco_clk    |
- *                  +-------+-------+
- *                          |
- *                          |
- *                  +---------------+
- *                  |  pll_out_div  |
- *                  |  DIV(1,2,4,8) |
- *                  +-------+-------+
- *                          |
- *                          +-----------------------------+--------+
- *                          |                             |        |
- *                  +-------v-------+                     |        |
- *                  |  bitclk_src   |                     |        |
- *                  |  DIV(1..15)   |                     |        |
- *                  +-------+-------+                     |        |
- *                          |                             |        |
- *                          +----------+---------+        |        |
- *   Shadow Path            |          |         |        |        |
- *       +          +-------v-------+  |  +------v------+ | +------v-------+
- *       |          |  byteclk_src  |  |  |post_bit_div | | |post_vco_div  |
- *       |          |  DIV(8)       |  |  |DIV (2)      | | |DIV(4)        |
- *       |          +-------+-------+  |  +------+------+ | +------+-------+
- *       |                  |          |         |      | |        |
- *       |                  |          |         +------+ |        |
- *       |                  |          +-------------+  | |   +----+
- *       |         +--------+                        |  | |   |
- *       |         |                               +-v--v-v---v------+
- *     +-v---------v----+                           \  pclk_src_mux /
- *     \  byteclk_mux /                              \             /
- *      \            /                                +-----+-----+
- *       +----+-----+                                       |        Shadow Path
- *            |                                             |             +
- *            v                                       +-----v------+      |
- *       dsi_byte_clk                                 |  pclk_src  |      |
- *                                                    | DIV(1..15) |      |
- *                                                    +-----+------+      |
- *                                                          |             |
- *                                                          |             |
- *                                                          +--------+    |
- *                                                                   |    |
- *                                                               +---v----v----+
- *                                                                \  pclk_mux /
- *                                                                 \         /
- *                                                                  +---+---+
- *                                                                      |
- *                                                                      |
- *                                                                      v
- *                                                                   dsi_pclk
- *
- */
-
-static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
-	.ref_clk_rate = 19200000UL,
-	.min_rate = 1000000000UL,
-	.max_rate = 3500000000UL,
-	.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_vco_clk",
-			.parent_names = (const char *[]){"bi_tcxo"},
-			.num_parents = 1,
-			.ops = &clk_ops_vco_10nm,
-			.flags = CLK_GET_RATE_NOCACHE,
-	},
-};
-
-static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
-	.ref_clk_rate = 19200000UL,
-	.min_rate = 1000000000UL,
-	.max_rate = 3500000000UL,
-	.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_vco_clk",
-			.parent_names = (const char *[]){"bi_tcxo"},
-			.num_parents = 1,
-			.ops = &clk_ops_vco_10nm,
-			.flags = CLK_GET_RATE_NOCACHE,
-	},
-};
-
-static struct clk_regmap_div dsi0pll_pll_out_div = {
-	.reg = PLL_PLL_OUTDIV_RATE,
-	.shift = 0,
-	.width = 2,
-	.flags = CLK_DIVIDER_POWER_OF_TWO,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_pll_out_div",
-			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi1pll_pll_out_div = {
-	.reg = PLL_PLL_OUTDIV_RATE,
-	.shift = 0,
-	.width = 2,
-	.flags = CLK_DIVIDER_POWER_OF_TWO,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_pll_out_div",
-			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi0pll_bitclk_src = {
-	.shift = 0,
-	.width = 4,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_bitclk_src",
-			.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi1pll_bitclk_src = {
-	.shift = 0,
-	.width = 4,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_bitclk_src",
-			.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_fixed_factor dsi0pll_post_vco_div = {
-	.div = 4,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi0pll_post_vco_div",
-		.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi1pll_post_vco_div = {
-	.div = 4,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi1pll_post_vco_div",
-		.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi0pll_byteclk_src = {
-	.div = 8,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi0pll_byteclk_src",
-		.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi1pll_byteclk_src = {
-	.div = 8,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi1pll_byteclk_src",
-		.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi0pll_post_bit_div = {
-	.div = 2,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi0pll_post_bit_div",
-		.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
-		.num_parents = 1,
-		.flags = CLK_GET_RATE_NOCACHE,
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi1pll_post_bit_div = {
-	.div = 2,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi1pll_post_bit_div",
-		.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
-		.num_parents = 1,
-		.flags = CLK_GET_RATE_NOCACHE,
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_regmap_mux dsi0pll_byteclk_mux = {
-	.shift = 0,
-	.width = 1,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0_phy_pll_out_byteclk",
-			.parent_names = (const char *[]){"dsi0pll_byteclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_regmap_mux dsi1pll_byteclk_mux = {
-	.shift = 0,
-	.width = 1,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1_phy_pll_out_byteclk",
-			.parent_names = (const char *[]){"dsi1pll_byteclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
-	.reg = PHY_CMN_CLK_CFG1,
-	.shift = 0,
-	.width = 2,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_pclk_src_mux",
-			.parent_names = (const char *[]){"dsi0pll_bitclk_src",
-					"dsi0pll_post_bit_div",
-					"dsi0pll_pll_out_div",
-					"dsi0pll_post_vco_div"},
-			.num_parents = 4,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
-	.reg = PHY_CMN_CLK_CFG1,
-	.shift = 0,
-	.width = 2,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_pclk_src_mux",
-			.parent_names = (const char *[]){"dsi1pll_bitclk_src",
-					"dsi1pll_post_bit_div",
-					"dsi1pll_pll_out_div",
-					"dsi1pll_post_vco_div"},
-			.num_parents = 4,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi0pll_pclk_src = {
-	.shift = 0,
-	.width = 4,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_pclk_src",
-			.parent_names = (const char *[]){
-					"dsi0pll_pclk_src_mux"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi1pll_pclk_src = {
-	.shift = 0,
-	.width = 4,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_pclk_src",
-			.parent_names = (const char *[]){
-					"dsi1pll_pclk_src_mux"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_mux dsi0pll_pclk_mux = {
-	.shift = 0,
-	.width = 1,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0_phy_pll_out_dsiclk",
-			.parent_names = (const char *[]){"dsi0pll_pclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_regmap_mux dsi1pll_pclk_mux = {
-	.shift = 0,
-	.width = 1,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1_phy_pll_out_dsiclk",
-			.parent_names = (const char *[]){"dsi1pll_pclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_hw *mdss_dsi_pllcc_10nm[] = {
-	[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
-	[PLL_OUT_DIV_0_CLK] = &dsi0pll_pll_out_div.clkr.hw,
-	[BITCLK_SRC_0_CLK] = &dsi0pll_bitclk_src.clkr.hw,
-	[BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
-	[POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.hw,
-	[POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.hw,
-	[BYTECLK_MUX_0_CLK] = &dsi0pll_byteclk_mux.clkr.hw,
-	[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
-	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
-	[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
-	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
-	[PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
-	[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
-	[BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
-	[POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.hw,
-	[POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.hw,
-	[BYTECLK_MUX_1_CLK] = &dsi1pll_byteclk_mux.clkr.hw,
-	[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
-	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
-	[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
-};
-
-int dsi_pll_clock_register_10nm(struct platform_device *pdev,
-				  struct mdss_pll_resources *pll_res)
-{
-	int rc = 0, ndx, i;
-	struct clk *clk;
-	struct clk_onecell_data *clk_data;
-	int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_10nm);
-	struct regmap *rmap;
-
-	ndx = pll_res->index;
-
-	if (ndx >= DSI_PLL_MAX) {
-		pr_err("pll index(%d) NOT supported\n", ndx);
-		return -EINVAL;
-	}
-
-	pll_rsc_db[ndx] = pll_res;
-	plls[ndx].rsc = pll_res;
-	pll_res->priv = &plls[ndx];
-	pll_res->vco_delay = VCO_DELAY_USEC;
-
-	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
-	if (!clk_data)
-		return -ENOMEM;
-
-	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
-				sizeof(struct clk *), GFP_KERNEL);
-	if (!clk_data->clks)
-		return -ENOMEM;
-
-	clk_data->clk_num = num_clks;
-
-	/* Establish client data */
-	if (ndx == 0) {
-
-		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi0pll_pll_out_div.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi0pll_bitclk_src.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi0pll_pclk_src.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi0pll_pclk_mux.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi0pll_pclk_src_mux.clkr.regmap = rmap;
-		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi0pll_byteclk_mux.clkr.regmap = rmap;
-
-		dsi0pll_vco_clk.priv = pll_res;
-		for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
-			clk = devm_clk_register(&pdev->dev,
-						mdss_dsi_pllcc_10nm[i]);
-			if (IS_ERR(clk)) {
-				pr_err("clk registration failed for DSI clock:%d\n",
-							pll_res->index);
-				rc = -EINVAL;
-				goto clk_register_fail;
-			}
-			clk_data->clks[i] = clk;
-
-		}
-
-		rc = of_clk_add_provider(pdev->dev.of_node,
-				of_clk_src_onecell_get, clk_data);
-
-
-	} else {
-		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi1pll_pll_out_div.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi1pll_bitclk_src.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi1pll_pclk_src.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi1pll_pclk_mux.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi1pll_pclk_src_mux.clkr.regmap = rmap;
-		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi1pll_byteclk_mux.clkr.regmap = rmap;
-		dsi1pll_vco_clk.priv = pll_res;
-
-		for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
-			clk = devm_clk_register(&pdev->dev,
-						mdss_dsi_pllcc_10nm[i]);
-			if (IS_ERR(clk)) {
-				pr_err("clk registration failed for DSI clock:%d\n",
-						pll_res->index);
-				rc = -EINVAL;
-				goto clk_register_fail;
-			}
-			clk_data->clks[i] = clk;
-
-		}
-
-		rc = of_clk_add_provider(pdev->dev.of_node,
-				of_clk_src_onecell_get, clk_data);
-	}
-	if (!rc) {
-		pr_info("Registered DSI PLL ndx=%d, clocks successfully\n",
-				ndx);
-
-		return rc;
-	}
-clk_register_fail:
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c
deleted file mode 100644
index 455d2c0..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c
+++ /dev/null
@@ -1,1148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-
-#include "mdss-pll.h"
-#include "mdss-dsi-pll.h"
-#include "mdss-dsi-pll-14nm.h"
-
-#define DSI_PLL_POLL_MAX_READS                  15
-#define DSI_PLL_POLL_TIMEOUT_US                 1000
-#define MSM8996_DSI_PLL_REVISION_2		2
-
-#define VCO_REF_CLK_RATE 19200000
-
-#define CEIL(x, y)		(((x) + ((y)-1)) / (y))
-
-static int mdss_pll_read_stored_trim_codes(
-		struct mdss_pll_resources *dsi_pll_res, s64 vco_clk_rate)
-{
-	int i;
-	int rc = 0;
-	bool found = false;
-
-	if (!dsi_pll_res->dfps) {
-		rc = -EINVAL;
-		goto end_read;
-	}
-
-	for (i = 0; i < dsi_pll_res->dfps->panel_dfps.frame_rate_cnt; i++) {
-		struct dfps_codes_info *codes_info =
-			&dsi_pll_res->dfps->codes_dfps[i];
-
-		pr_debug("valid=%d frame_rate=%d, vco_rate=%d, code %d %d\n",
-			codes_info->is_valid, codes_info->frame_rate,
-			codes_info->clk_rate, codes_info->pll_codes.pll_codes_1,
-			codes_info->pll_codes.pll_codes_2);
-
-		if (vco_clk_rate != codes_info->clk_rate &&
-				codes_info->is_valid)
-			continue;
-
-		dsi_pll_res->cache_pll_trim_codes[0] =
-			codes_info->pll_codes.pll_codes_1;
-		dsi_pll_res->cache_pll_trim_codes[1] =
-			codes_info->pll_codes.pll_codes_2;
-		found = true;
-		break;
-	}
-
-	if (!found) {
-		rc = -EINVAL;
-		goto end_read;
-	}
-
-	pr_debug("core_kvco_code=0x%x core_vco_tune=0x%x\n",
-			dsi_pll_res->cache_pll_trim_codes[0],
-			dsi_pll_res->cache_pll_trim_codes[1]);
-
-end_read:
-	return rc;
-}
-
-int post_n1_div_set_div(void *context, unsigned int reg, unsigned int div)
-{
-	struct mdss_pll_resources *pll = context;
-	struct dsi_pll_db *pdb;
-	struct dsi_pll_output *pout;
-	int rc;
-	u32 n1div = 0;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	/* in common clock framework the divider value provided is one less */
-	div++;
-
-	pdb = (struct dsi_pll_db *)pll->priv;
-	pout = &pdb->out;
-
-	/*
-	 * vco rate = bit_clk * postdiv * n1div
-	 * vco range from 1300 to 2600 Mhz
-	 * postdiv = 1
-	 * n1div = 1 to 15
-	 * n1div = roundup(1300Mhz / bit_clk)
-	 * support bit_clk above 86.67Mhz
-	 */
-
-	pout->pll_n1div  = div;
-
-	n1div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
-	n1div &= ~0xf;
-	n1div |= (div & 0xf);
-	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n1div);
-	/* ensure n1 divider is programed */
-	wmb();
-	pr_debug("ndx=%d div=%d postdiv=%x n1div=%x\n",
-			pll->index, div, pout->pll_postdiv, pout->pll_n1div);
-
-	mdss_pll_resource_enable(pll, false);
-
-	return 0;
-}
-
-int post_n1_div_get_div(void *context, unsigned int reg, unsigned int *div)
-{
-	int rc;
-	struct mdss_pll_resources *pll = context;
-	struct dsi_pll_db *pdb;
-	struct dsi_pll_output *pout;
-
-	pdb = (struct dsi_pll_db *)pll->priv;
-	pout = &pdb->out;
-
-	if (is_gdsc_disabled(pll))
-		return 0;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	/*
-	 * postdiv = 1/2/4/8
-	 * n1div = 1 - 15
-	 * fot the time being, assume postdiv = 1
-	 */
-
-	*div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
-	*div &= 0xF;
-
-	/*
-	 * initialize n1div here, it will get updated when
-	 * corresponding set_div is called.
-	 */
-	pout->pll_n1div = *div;
-
-	/* common clock framework will add one to the divider value sent */
-	if (*div == 0)
-		*div = 1; /* value of zero means div is 2 as per SWI */
-	else
-		*div -= 1;
-
-	pr_debug("post n1 get div = %d\n", *div);
-
-	mdss_pll_resource_enable(pll, false);
-
-	return rc;
-}
-
-int n2_div_set_div(void *context, unsigned int reg, unsigned int div)
-{
-	int rc;
-	u32 n2div;
-	struct mdss_pll_resources *pll = context;
-	struct dsi_pll_db *pdb;
-	struct dsi_pll_output *pout;
-	struct mdss_pll_resources *slave;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	/*
-	 * in common clock framework the actual divider value
-	 * provided is one less.
-	 */
-	div++;
-
-	pdb = (struct dsi_pll_db *)pll->priv;
-	pout = &pdb->out;
-
-	/* this is for pixel clock */
-	n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
-	n2div &= ~0xf0;	/* bits 4 to 7 */
-	n2div |= (div << 4);
-	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
-
-	/* commit slave if split display is enabled */
-	slave = pll->slave;
-	if (slave)
-		MDSS_PLL_REG_W(slave->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
-
-	pout->pll_n2div = div;
-
-	/* set dsiclk_sel=1 so that n2div *= 2 */
-	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG1, 1);
-	pr_debug("ndx=%d div=%d n2div=%x\n", pll->index, div, n2div);
-
-	mdss_pll_resource_enable(pll, false);
-
-	return rc;
-}
-
-int shadow_n2_div_set_div(void *context, unsigned int reg, unsigned int div)
-{
-	struct mdss_pll_resources *pll = context;
-	struct dsi_pll_db *pdb;
-	struct dsi_pll_output *pout;
-	u32 data;
-
-	pdb = pll->priv;
-	pout = &pdb->out;
-
-	/*
-	 * in common clock framework the actual divider value
-	 * provided is one less.
-	 */
-	div++;
-
-	pout->pll_n2div = div;
-
-	data = (pout->pll_n1div | (pout->pll_n2div << 4));
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-			DSI_DYNAMIC_REFRESH_PLL_CTRL19,
-			DSIPHY_CMN_CLK_CFG0, DSIPHY_CMN_CLK_CFG1,
-			data, 1);
-	return 0;
-}
-
-int n2_div_get_div(void *context, unsigned int reg, unsigned int *div)
-{
-	int rc;
-	u32 n2div;
-	struct mdss_pll_resources *pll = context;
-	struct dsi_pll_db *pdb;
-	struct dsi_pll_output *pout;
-
-	if (is_gdsc_disabled(pll))
-		return 0;
-
-	pdb = (struct dsi_pll_db *)pll->priv;
-	pout = &pdb->out;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll=%d resources\n",
-						pll->index);
-		return rc;
-	}
-
-	n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
-	n2div >>= 4;
-	n2div &= 0x0f;
-	/*
-	 * initialize n2div here, it will get updated when
-	 * corresponding set_div is called.
-	 */
-	pout->pll_n2div = n2div;
-	mdss_pll_resource_enable(pll, false);
-
-	*div = n2div;
-
-	/* common clock framework will add one to the divider value sent */
-	if (*div == 0)
-		*div = 1; /* value of zero means div is 2 as per SWI */
-	else
-		*div -= 1;
-
-	pr_debug("ndx=%d div=%d\n", pll->index, *div);
-
-	return rc;
-}
-
-static bool pll_is_pll_locked_14nm(struct mdss_pll_resources *pll)
-{
-	u32 status;
-	bool pll_locked;
-
-	/* poll for PLL ready status */
-	if (readl_poll_timeout_atomic((pll->pll_base +
-			DSIPHY_PLL_RESET_SM_READY_STATUS),
-			status,
-			((status & BIT(5)) > 0),
-			DSI_PLL_POLL_MAX_READS,
-			DSI_PLL_POLL_TIMEOUT_US)) {
-		pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
-				pll->index, status);
-		pll_locked = false;
-	} else if (readl_poll_timeout_atomic((pll->pll_base +
-				DSIPHY_PLL_RESET_SM_READY_STATUS),
-				status,
-				((status & BIT(0)) > 0),
-				DSI_PLL_POLL_MAX_READS,
-				DSI_PLL_POLL_TIMEOUT_US)) {
-		pr_err("DSI PLL ndx=%d status=%x PLl not ready\n",
-				pll->index, status);
-		pll_locked = false;
-	} else {
-		pll_locked = true;
-	}
-
-	return pll_locked;
-}
-
-static void dsi_pll_start_14nm(void __iomem *pll_base)
-{
-	pr_debug("start PLL at base=%pK\n", pll_base);
-
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VREF_CFG1, 0x10);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 1);
-}
-
-static void dsi_pll_stop_14nm(void __iomem *pll_base)
-{
-	pr_debug("stop PLL at base=%pK\n", pll_base);
-
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
-}
-
-int dsi_pll_enable_seq_14nm(struct mdss_pll_resources *pll)
-{
-	int rc = 0;
-
-	if (!pll) {
-		pr_err("Invalid PLL resources\n");
-		return -EINVAL;
-	}
-
-	dsi_pll_start_14nm(pll->pll_base);
-
-	/*
-	 * both DSIPHY_PLL_CLKBUFLR_EN and DSIPHY_CMN_GLBL_TEST_CTRL
-	 * enabled at mdss_dsi_14nm_phy_config()
-	 */
-
-	if (!pll_is_pll_locked_14nm(pll)) {
-		pr_err("DSI PLL ndx=%d lock failed\n", pll->index);
-		rc = -EINVAL;
-		goto init_lock_err;
-	}
-
-	pr_debug("DSI PLL ndx=%d Lock success\n", pll->index);
-
-init_lock_err:
-	return rc;
-}
-
-static int dsi_pll_enable(struct clk_hw *hw)
-{
-	int i, rc = 0;
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *pll = vco->priv;
-
-	/* Try all enable sequences until one succeeds */
-	for (i = 0; i < vco->pll_en_seq_cnt; i++) {
-		rc = vco->pll_enable_seqs[i](pll);
-		pr_debug("DSI PLL %s after sequence #%d\n",
-			rc ? "unlocked" : "locked", i + 1);
-		if (!rc)
-			break;
-	}
-
-	if (rc)
-		pr_err("ndx=%d DSI PLL failed to lock\n", pll->index);
-	else
-		pll->pll_on = true;
-
-	return rc;
-}
-
-static void dsi_pll_disable(struct clk_hw *hw)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *pll = vco->priv;
-	struct mdss_pll_resources *slave;
-
-	if (!pll->pll_on &&
-		mdss_pll_resource_enable(pll, true)) {
-		pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
-		return;
-	}
-
-	pll->handoff_resources = false;
-	slave = pll->slave;
-
-	dsi_pll_stop_14nm(pll->pll_base);
-
-	mdss_pll_resource_enable(pll, false);
-
-	pll->pll_on = false;
-
-	pr_debug("DSI PLL ndx=%d Disabled\n", pll->index);
-}
-
-static void mdss_dsi_pll_14nm_input_init(struct mdss_pll_resources *pll,
-					struct dsi_pll_db *pdb)
-{
-	pdb->in.fref = 19200000;	/* 19.2 Mhz*/
-	pdb->in.fdata = 0;		/* bit clock rate */
-	pdb->in.dsiclk_sel = 1;		/* 1, reg: 0x0014 */
-	pdb->in.ssc_en = pll->ssc_en;		/* 1, reg: 0x0494, bit 0 */
-	pdb->in.ldo_en = 0;		/* 0,  reg: 0x004c, bit 0 */
-
-	/* fixed  input */
-	pdb->in.refclk_dbler_en = 0;	/* 0, reg: 0x04c0, bit 1 */
-	pdb->in.vco_measure_time = 5;	/* 5, unknown */
-	pdb->in.kvco_measure_time = 5;	/* 5, unknown */
-	pdb->in.bandgap_timer = 4;	/* 4, reg: 0x0430, bit 3 - 5 */
-	pdb->in.pll_wakeup_timer = 5;	/* 5, reg: 0x043c, bit 0 - 2 */
-	pdb->in.plllock_cnt = 1;	/* 1, reg: 0x0488, bit 1 - 2 */
-	pdb->in.plllock_rng = 0;	/* 0, reg: 0x0488, bit 3 - 4 */
-	pdb->in.ssc_center = pll->ssc_center;/* 0, reg: 0x0494, bit 1 */
-	pdb->in.ssc_adj_period = 37;	/* 37, reg: 0x498, bit 0 - 9 */
-	pdb->in.ssc_spread = pll->ssc_ppm / 1000;
-	pdb->in.ssc_freq = pll->ssc_freq;
-
-	pdb->in.pll_ie_trim = 4;	/* 4, reg: 0x0400 */
-	pdb->in.pll_ip_trim = 4;	/* 4, reg: 0x0404 */
-	pdb->in.pll_cpcset_cur = 1;	/* 1, reg: 0x04f0, bit 0 - 2 */
-	pdb->in.pll_cpmset_cur = 1;	/* 1, reg: 0x04f0, bit 3 - 5 */
-	pdb->in.pll_icpmset = 4;	/* 4, reg: 0x04fc, bit 3 - 5 */
-	pdb->in.pll_icpcset = 4;	/* 4, reg: 0x04fc, bit 0 - 2 */
-	pdb->in.pll_icpmset_p = 0;	/* 0, reg: 0x04f4, bit 0 - 2 */
-	pdb->in.pll_icpmset_m = 0;	/* 0, reg: 0x04f4, bit 3 - 5 */
-	pdb->in.pll_icpcset_p = 0;	/* 0, reg: 0x04f8, bit 0 - 2 */
-	pdb->in.pll_icpcset_m = 0;	/* 0, reg: 0x04f8, bit 3 - 5 */
-	pdb->in.pll_lpf_res1 = 3;	/* 3, reg: 0x0504, bit 0 - 3 */
-	pdb->in.pll_lpf_cap1 = 11;	/* 11, reg: 0x0500, bit 0 - 3 */
-	pdb->in.pll_lpf_cap2 = 1;	/* 1, reg: 0x0500, bit 4 - 7 */
-	pdb->in.pll_iptat_trim = 7;
-	pdb->in.pll_c3ctrl = 2;		/* 2 */
-	pdb->in.pll_r3ctrl = 1;		/* 1 */
-	pdb->out.pll_postdiv = 1;
-}
-
-static void pll_14nm_ssc_calc(struct mdss_pll_resources *pll,
-				struct dsi_pll_db *pdb)
-{
-	u32 period, ssc_period;
-	u32 ref, rem;
-	s64 step_size;
-
-	pr_debug("%s: vco=%lld ref=%lld\n", __func__,
-		pll->vco_current_rate, pll->vco_ref_clk_rate);
-
-	ssc_period = pdb->in.ssc_freq / 500;
-	period = (unsigned long)pll->vco_ref_clk_rate / 1000;
-	ssc_period  = CEIL(period, ssc_period);
-	ssc_period -= 1;
-	pdb->out.ssc_period = ssc_period;
-
-	pr_debug("%s: ssc, freq=%d spread=%d period=%d\n", __func__,
-	pdb->in.ssc_freq, pdb->in.ssc_spread, pdb->out.ssc_period);
-
-	step_size = (u32)pll->vco_current_rate;
-	ref = pll->vco_ref_clk_rate;
-	ref /= 1000;
-	step_size = div_s64(step_size, ref);
-	step_size <<= 20;
-	step_size = div_s64(step_size, 1000);
-	step_size *= pdb->in.ssc_spread;
-	step_size = div_s64(step_size, 1000);
-	step_size *= (pdb->in.ssc_adj_period + 1);
-
-	rem = 0;
-	step_size = div_s64_rem(step_size, ssc_period + 1, &rem);
-	if (rem)
-		step_size++;
-
-	pr_debug("%s: step_size=%lld\n", __func__, step_size);
-
-	step_size &= 0x0ffff;	/* take lower 16 bits */
-
-	pdb->out.ssc_step_size = step_size;
-}
-
-static void pll_14nm_dec_frac_calc(struct mdss_pll_resources *pll,
-				struct dsi_pll_db *pdb)
-{
-	struct dsi_pll_input *pin = &pdb->in;
-	struct dsi_pll_output *pout = &pdb->out;
-	u64 multiplier = BIT(20);
-	u64 dec_start_multiple, dec_start, pll_comp_val;
-	s32 duration, div_frac_start;
-	s64 vco_clk_rate = pll->vco_current_rate;
-	s64 fref = pll->vco_ref_clk_rate;
-
-	pr_debug("vco_clk_rate=%lld ref_clk_rate=%lld\n",
-				vco_clk_rate, fref);
-
-	dec_start_multiple = div_s64(vco_clk_rate * multiplier, fref);
-	div_s64_rem(dec_start_multiple, multiplier, &div_frac_start);
-
-	dec_start = div_s64(dec_start_multiple, multiplier);
-
-	pout->dec_start = (u32)dec_start;
-	pout->div_frac_start = div_frac_start;
-
-	if (pin->plllock_cnt == 0)
-		duration = 1024;
-	else if (pin->plllock_cnt == 1)
-		duration = 256;
-	else if (pin->plllock_cnt == 2)
-		duration = 128;
-	else
-		duration = 32;
-
-	pll_comp_val =  duration * dec_start_multiple;
-	pll_comp_val =  div_u64(pll_comp_val, multiplier);
-	do_div(pll_comp_val, 10);
-
-	pout->plllock_cmp = (u32)pll_comp_val;
-
-	pout->pll_txclk_en = 1;
-	if (pll->revision == MSM8996_DSI_PLL_REVISION_2)
-		pout->cmn_ldo_cntrl = 0x3c;
-	else
-		pout->cmn_ldo_cntrl = 0x1c;
-}
-
-static u32 pll_14nm_kvco_slop(u32 vrate)
-{
-	u32 slop = 0;
-
-	if (vrate > 1300000000UL && vrate <= 1800000000UL)
-		slop =  600;
-	else if (vrate > 1800000000UL && vrate < 2300000000UL)
-		slop = 400;
-	else if (vrate > 2300000000UL && vrate < 2600000000UL)
-		slop = 280;
-
-	return slop;
-}
-
-static void pll_14nm_calc_vco_count(struct dsi_pll_db *pdb,
-			 s64 vco_clk_rate, s64 fref)
-{
-	struct dsi_pll_input *pin = &pdb->in;
-	struct dsi_pll_output *pout = &pdb->out;
-	u64 data;
-	u32 cnt;
-
-	data = fref * pin->vco_measure_time;
-	do_div(data, 1000000);
-	data &= 0x03ff;	/* 10 bits */
-	data -= 2;
-	pout->pll_vco_div_ref = data;
-
-	data = (unsigned long)vco_clk_rate / 1000000;	/* unit is Mhz */
-	data *= pin->vco_measure_time;
-	do_div(data, 10);
-	pout->pll_vco_count = data; /* reg: 0x0474, 0x0478 */
-
-	data = fref * pin->kvco_measure_time;
-	do_div(data, 1000000);
-	data &= 0x03ff;	/* 10 bits */
-	data -= 1;
-	pout->pll_kvco_div_ref = data;
-
-	cnt = pll_14nm_kvco_slop(vco_clk_rate);
-	cnt *= 2;
-	cnt /= 100;
-	cnt *= pin->kvco_measure_time;
-	pout->pll_kvco_count = cnt;
-
-	pout->pll_misc1 = 16;
-	pout->pll_resetsm_cntrl = 48;
-	pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;
-	pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;
-	pout->pll_kvco_code = 0;
-}
-
-static void pll_db_commit_ssc(struct mdss_pll_resources *pll,
-					struct dsi_pll_db *pdb)
-{
-	void __iomem *pll_base = pll->pll_base;
-	struct dsi_pll_input *pin = &pdb->in;
-	struct dsi_pll_output *pout = &pdb->out;
-	char data;
-
-	data = pin->ssc_adj_period;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER1, data);
-	data = (pin->ssc_adj_period >> 8);
-	data &= 0x03;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER2, data);
-
-	data = pout->ssc_period;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER1, data);
-	data = (pout->ssc_period >> 8);
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER2, data);
-
-	data = pout->ssc_step_size;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE1, data);
-	data = (pout->ssc_step_size >> 8);
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE2, data);
-
-	data = (pin->ssc_center & 0x01);
-	data <<= 1;
-	data |= 0x01; /* enable */
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_EN_CENTER, data);
-
-	wmb();	/* make sure register committed */
-}
-
-static void pll_db_commit_common(struct mdss_pll_resources *pll,
-					struct dsi_pll_db *pdb)
-{
-	void __iomem *pll_base = pll->pll_base;
-	struct dsi_pll_input *pin = &pdb->in;
-	struct dsi_pll_output *pout = &pdb->out;
-	char data;
-
-	/* confgiure the non frequency dependent pll registers */
-	data = 0;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SYSCLK_EN_RESET, data);
-
-	/* DSIPHY_PLL_CLKBUFLR_EN updated at dsi phy */
-
-	data = pout->pll_txclk_en;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_TXCLK_EN, data);
-
-	data = pout->pll_resetsm_cntrl;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL, data);
-	data = pout->pll_resetsm_cntrl2;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL2, data);
-	data = pout->pll_resetsm_cntrl5;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL5, data);
-
-	data = pout->pll_vco_div_ref;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF1, data);
-	data = (pout->pll_vco_div_ref >> 8);
-	data &= 0x03;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF2, data);
-
-	data = pout->pll_kvco_div_ref;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF1, data);
-	data = (pout->pll_kvco_div_ref >> 8);
-	data &= 0x03;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF2, data);
-
-	data = pout->pll_misc1;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_MISC1, data);
-
-	data = pin->pll_ie_trim;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IE_TRIM, data);
-
-	data = pin->pll_ip_trim;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IP_TRIM, data);
-
-	data = ((pin->pll_cpmset_cur << 3) | pin->pll_cpcset_cur);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CP_SET_CUR, data);
-
-	data = ((pin->pll_icpcset_p << 3) | pin->pll_icpcset_m);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPCSET, data);
-
-	data = ((pin->pll_icpmset_p << 3) | pin->pll_icpcset_m);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPMSET, data);
-
-	data = ((pin->pll_icpmset << 3) | pin->pll_icpcset);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICP_SET, data);
-
-	data = ((pdb->in.pll_lpf_cap2 << 4) | pdb->in.pll_lpf_cap1);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF1, data);
-
-	data = pin->pll_iptat_trim;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IPTAT_TRIM, data);
-
-	data = (pdb->in.pll_c3ctrl | (pdb->in.pll_r3ctrl << 4));
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_CRCTRL, data);
-}
-
-static void pll_db_commit_14nm(struct mdss_pll_resources *pll,
-					struct dsi_pll_db *pdb)
-{
-	void __iomem *pll_base = pll->pll_base;
-	struct dsi_pll_input *pin = &pdb->in;
-	struct dsi_pll_output *pout = &pdb->out;
-	char data;
-
-	data = pout->cmn_ldo_cntrl;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_LDO_CNTRL, data);
-
-	pll_db_commit_common(pll, pdb);
-
-	/* de assert pll start and apply pll sw reset */
-	/* stop pll */
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
-
-	/* pll sw reset */
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0x20);
-	wmb();	/* make sure register committed */
-	udelay(10);
-
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0);
-	wmb();	/* make sure register committed */
-
-	data = pdb->in.dsiclk_sel; /* set dsiclk_sel = 1  */
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG1, data);
-
-	data = 0xff; /* data, clk, pll normal operation */
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_0, data);
-
-	/* confgiure the frequency dependent pll registers */
-	data = pout->dec_start;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DEC_START, data);
-
-	data = pout->div_frac_start;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START1, data);
-	data = (pout->div_frac_start >> 8);
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START2, data);
-	data = (pout->div_frac_start >> 16);
-	data &= 0x0f;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START3, data);
-
-	data = pout->plllock_cmp;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP1, data);
-	data = (pout->plllock_cmp >> 8);
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP2, data);
-	data = (pout->plllock_cmp >> 16);
-	data &= 0x03;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP3, data);
-
-	data = ((pin->plllock_cnt << 1) | (pin->plllock_rng << 3));
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP_EN, data);
-
-	data = pout->pll_vco_count;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT1, data);
-	data = (pout->pll_vco_count >> 8);
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT2, data);
-
-	data = pout->pll_kvco_count;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT1, data);
-	data = (pout->pll_kvco_count >> 8);
-	data &= 0x03;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT2, data);
-
-	/*
-	 * tx_band = pll_postdiv
-	 * 0: divided by 1 <== for now
-	 * 1: divided by 2
-	 * 2: divided by 4
-	 * 3: divided by 8
-	 */
-	data = (((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF2_POSTDIV, data);
-
-	data = (pout->pll_n1div | (pout->pll_n2div << 4));
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG0, data);
-
-	if (pll->ssc_en)
-		pll_db_commit_ssc(pll, pdb);
-
-	wmb();	/* make sure register committed */
-}
-
-/*
- * pll_source_finding:
- * Both GLBL_TEST_CTRL and CLKBUFLR_EN are configured
- * at mdss_dsi_14nm_phy_config()
- */
-static int pll_source_finding(struct mdss_pll_resources *pll)
-{
-	u32 clk_buf_en;
-	u32 glbl_test_ctrl;
-
-	glbl_test_ctrl = MDSS_PLL_REG_R(pll->pll_base,
-				DSIPHY_CMN_GLBL_TEST_CTRL);
-	clk_buf_en = MDSS_PLL_REG_R(pll->pll_base,
-				DSIPHY_PLL_CLKBUFLR_EN);
-
-	glbl_test_ctrl &= BIT(2);
-	glbl_test_ctrl >>= 2;
-
-	pr_debug("%s: pll=%d clk_buf_en=%x glbl_test_ctrl=%x\n",
-		__func__, pll->index, clk_buf_en, glbl_test_ctrl);
-
-	clk_buf_en &= (PLL_OUTPUT_RIGHT | PLL_OUTPUT_LEFT);
-
-	if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
-			(clk_buf_en == PLL_OUTPUT_BOTH))
-		return PLL_MASTER;
-
-	if ((glbl_test_ctrl == PLL_SOURCE_FROM_RIGHT) &&
-			(clk_buf_en == PLL_OUTPUT_NONE))
-		return PLL_SLAVE;
-
-	if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
-			(clk_buf_en == PLL_OUTPUT_RIGHT))
-		return PLL_STANDALONE;
-
-	pr_debug("%s: Error pll setup, clk_buf_en=%x glbl_test_ctrl=%x\n",
-			__func__, clk_buf_en, glbl_test_ctrl);
-
-	return PLL_UNKNOWN;
-}
-
-static void pll_source_setup(struct mdss_pll_resources *pll)
-{
-	int status;
-	struct dsi_pll_db *pdb = (struct dsi_pll_db *)pll->priv;
-	struct mdss_pll_resources *other;
-
-	if (pdb->source_setup_done)
-		return;
-
-	pdb->source_setup_done++;
-
-	status = pll_source_finding(pll);
-
-	if (status == PLL_STANDALONE || status == PLL_UNKNOWN)
-		return;
-
-	other = pdb->next->pll;
-	if (!other)
-		return;
-
-	pr_debug("%s: status=%d pll=%d other=%d\n", __func__,
-			status, pll->index, other->index);
-
-	if (status == PLL_MASTER)
-		pll->slave = other;
-	else
-		other->slave = pll;
-}
-
-unsigned long pll_vco_recalc_rate_14nm(struct clk_hw *hw,
-					unsigned long parent_rate)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *pll = vco->priv;
-	u64 vco_rate, multiplier = BIT(20);
-	s32 div_frac_start;
-	u32 dec_start;
-	u64 ref_clk = vco->ref_clk_rate;
-	int rc;
-
-	if (pll->vco_current_rate)
-		return (unsigned long)pll->vco_current_rate;
-
-	if (is_gdsc_disabled(pll))
-		return 0;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
-		return rc;
-	}
-
-	dec_start = MDSS_PLL_REG_R(pll->pll_base,
-			DSIPHY_PLL_DEC_START);
-	dec_start &= 0x0ff;
-	pr_debug("dec_start = 0x%x\n", dec_start);
-
-	div_frac_start = (MDSS_PLL_REG_R(pll->pll_base,
-			DSIPHY_PLL_DIV_FRAC_START3) & 0x0f) << 16;
-	div_frac_start |= (MDSS_PLL_REG_R(pll->pll_base,
-			DSIPHY_PLL_DIV_FRAC_START2) & 0x0ff) << 8;
-	div_frac_start |= MDSS_PLL_REG_R(pll->pll_base,
-			DSIPHY_PLL_DIV_FRAC_START1) & 0x0ff;
-	pr_debug("div_frac_start = 0x%x\n", div_frac_start);
-
-	vco_rate = ref_clk * dec_start;
-	vco_rate += ((ref_clk * div_frac_start) / multiplier);
-
-	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
-
-	mdss_pll_resource_enable(pll, false);
-
-	pr_debug("%s: returning vco rate as %lu\n",
-			__func__, (unsigned long)vco_rate);
-	return (unsigned long)vco_rate;
-}
-
-int pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
-					unsigned long parent_rate)
-{
-	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *pll = vco->priv;
-	struct mdss_pll_resources *slave;
-	struct dsi_pll_db *pdb;
-
-	pdb = (struct dsi_pll_db *)pll->priv;
-	if (!pdb) {
-		pr_err("No prov found\n");
-		return -EINVAL;
-	}
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
-		return rc;
-	}
-
-	pll_source_setup(pll);
-
-	pr_debug("%s: ndx=%d base=%pK rate=%lu slave=%pK\n", __func__,
-				pll->index, pll->pll_base, rate, pll->slave);
-
-	pll->vco_current_rate = rate;
-	pll->vco_ref_clk_rate = vco->ref_clk_rate;
-
-	mdss_dsi_pll_14nm_input_init(pll, pdb);
-
-	pll_14nm_dec_frac_calc(pll, pdb);
-
-	if (pll->ssc_en)
-		pll_14nm_ssc_calc(pll, pdb);
-
-	pll_14nm_calc_vco_count(pdb, pll->vco_current_rate,
-					pll->vco_ref_clk_rate);
-
-	/* commit slave if split display is enabled */
-	slave = pll->slave;
-	if (slave)
-		pll_db_commit_14nm(slave, pdb);
-
-	/* commit master itself */
-	pll_db_commit_14nm(pll, pdb);
-
-	mdss_pll_resource_enable(pll, false);
-
-	return rc;
-}
-
-static void shadow_pll_dynamic_refresh_14nm(struct mdss_pll_resources *pll,
-							struct dsi_pll_db *pdb)
-{
-	struct dsi_pll_output *pout = &pdb->out;
-
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL20,
-		DSIPHY_CMN_CTRL_0, DSIPHY_PLL_SYSCLK_EN_RESET,
-		0xFF, 0x0);
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL21,
-		DSIPHY_PLL_DEC_START, DSIPHY_PLL_DIV_FRAC_START1,
-		pout->dec_start, (pout->div_frac_start & 0x0FF));
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL22,
-		DSIPHY_PLL_DIV_FRAC_START2, DSIPHY_PLL_DIV_FRAC_START3,
-		((pout->div_frac_start >> 8) & 0x0FF),
-		((pout->div_frac_start >> 16) & 0x0F));
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL23,
-		DSIPHY_PLL_PLLLOCK_CMP1, DSIPHY_PLL_PLLLOCK_CMP2,
-		(pout->plllock_cmp & 0x0FF),
-		((pout->plllock_cmp >> 8) & 0x0FF));
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL24,
-		DSIPHY_PLL_PLLLOCK_CMP3, DSIPHY_PLL_PLL_VCO_TUNE,
-		((pout->plllock_cmp >> 16) & 0x03),
-		(pll->cache_pll_trim_codes[1] | BIT(7))); /* VCO tune*/
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL25,
-		DSIPHY_PLL_KVCO_CODE, DSIPHY_PLL_RESETSM_CNTRL,
-		(pll->cache_pll_trim_codes[0] | BIT(5)), 0x38);
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL26,
-		DSIPHY_PLL_PLL_LPF2_POSTDIV, DSIPHY_CMN_PLL_CNTRL,
-		(((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1), 0x01);
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL27,
-		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
-		0x01, 0x01);
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL28,
-		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
-		0x01, 0x01);
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL29,
-		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
-		0x01, 0x01);
-	MDSS_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, 0x0000001E);
-	MDSS_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, 0x001FFE00);
-
-	/*
-	 * Ensure all the dynamic refresh registers are written before
-	 * dynamic refresh to change the fps is triggered
-	 */
-	wmb();
-}
-
-int shadow_pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
-					unsigned long parent_rate)
-{
-	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *pll = vco->priv;
-	struct dsi_pll_db *pdb;
-	s64 vco_clk_rate = (s64)rate;
-
-	if (!pll) {
-		pr_err("PLL data not found\n");
-		return -EINVAL;
-	}
-
-	pdb = pll->priv;
-	if (!pdb) {
-		pr_err("No priv data found\n");
-		return -EINVAL;
-	}
-
-	rc = mdss_pll_read_stored_trim_codes(pll, vco_clk_rate);
-	if (rc) {
-		pr_err("cannot find pll codes rate=%lld\n", vco_clk_rate);
-		return -EINVAL;
-	}
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
-		return rc;
-	}
-
-	pr_debug("%s: ndx=%d base=%pK rate=%lu\n", __func__,
-			pll->index, pll->pll_base, rate);
-
-	pll->vco_current_rate = rate;
-	pll->vco_ref_clk_rate = vco->ref_clk_rate;
-
-	mdss_dsi_pll_14nm_input_init(pll, pdb);
-
-	pll_14nm_dec_frac_calc(pll, pdb);
-
-	pll_14nm_calc_vco_count(pdb, pll->vco_current_rate,
-			pll->vco_ref_clk_rate);
-
-	shadow_pll_dynamic_refresh_14nm(pll, pdb);
-
-	rc = mdss_pll_resource_enable(pll, false);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
-		return rc;
-	}
-
-	return rc;
-}
-
-long pll_vco_round_rate_14nm(struct clk_hw *hw, unsigned long rate,
-						unsigned long *parent_rate)
-{
-	unsigned long rrate = rate;
-	u32 div;
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-
-	div = vco->min_rate / rate;
-	if (div > 15) {
-		/* rate < 86.67 Mhz */
-		pr_err("rate=%lu NOT supportted\n", rate);
-		return -EINVAL;
-	}
-
-	if (rate < vco->min_rate)
-		rrate = vco->min_rate;
-	if (rate > vco->max_rate)
-		rrate = vco->max_rate;
-
-	*parent_rate = rrate;
-	return rrate;
-}
-
-int pll_vco_prepare_14nm(struct clk_hw *hw)
-{
-	int rc = 0;
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *pll = vco->priv;
-
-	if (!pll) {
-		pr_err("Dsi pll resources are not available\n");
-		return -EINVAL;
-	}
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("ndx=%d Failed to enable mdss dsi pll resources\n",
-							pll->index);
-		return rc;
-	}
-
-	if ((pll->vco_cached_rate != 0)
-	    && (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
-		rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
-						pll->vco_cached_rate);
-		if (rc) {
-			pr_err("index=%d vco_set_rate failed. rc=%d\n",
-					rc, pll->index);
-			mdss_pll_resource_enable(pll, false);
-			goto error;
-		}
-	}
-
-	rc = dsi_pll_enable(hw);
-
-	if (rc) {
-		mdss_pll_resource_enable(pll, false);
-		pr_err("ndx=%d failed to enable dsi pll\n", pll->index);
-	}
-
-error:
-	return rc;
-}
-
-void pll_vco_unprepare_14nm(struct clk_hw *hw)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *pll = vco->priv;
-
-	if (!pll) {
-		pr_err("Dsi pll resources are not available\n");
-		return;
-	}
-
-	pll->vco_cached_rate = clk_hw_get_rate(hw);
-	dsi_pll_disable(hw);
-}
-
-int dsi_mux_set_parent_14nm(void *context, unsigned int reg, unsigned int val)
-{
-	return 0;
-}
-
-int dsi_mux_get_parent_14nm(void *context, unsigned int reg, unsigned int *val)
-{
-	*val = 0;
-	return 0;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.c
deleted file mode 100644
index b23d117..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.c
+++ /dev/null
@@ -1,592 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/workqueue.h>
-
-#include "mdss-pll.h"
-#include "mdss-dsi-pll.h"
-#include "mdss-dsi-pll-14nm.h"
-#include <dt-bindings/clock/mdss-14nm-pll-clk.h>
-
-#define VCO_DELAY_USEC		1
-
-static struct dsi_pll_db pll_db[DSI_PLL_NUM];
-
-static struct regmap_config dsi_pll_14nm_config = {
-	.reg_bits	= 32,
-	.reg_stride	= 4,
-	.val_bits	= 32,
-	.max_register = 0x588,
-};
-
-static struct regmap_bus post_n1_div_regmap_bus = {
-	.reg_write = post_n1_div_set_div,
-	.reg_read = post_n1_div_get_div,
-};
-
-static struct regmap_bus n2_div_regmap_bus = {
-	.reg_write = n2_div_set_div,
-	.reg_read = n2_div_get_div,
-};
-
-static struct regmap_bus shadow_n2_div_regmap_bus = {
-	.reg_write = shadow_n2_div_set_div,
-	.reg_read = n2_div_get_div,
-};
-
-static struct regmap_bus dsi_mux_regmap_bus = {
-	.reg_write = dsi_mux_set_parent_14nm,
-	.reg_read = dsi_mux_get_parent_14nm,
-};
-
-/* Op structures */
-static const struct clk_ops clk_ops_dsi_vco = {
-	.recalc_rate = pll_vco_recalc_rate_14nm,
-	.set_rate = pll_vco_set_rate_14nm,
-	.round_rate = pll_vco_round_rate_14nm,
-	.prepare = pll_vco_prepare_14nm,
-	.unprepare = pll_vco_unprepare_14nm,
-};
-
-/* Shadow ops for dynamic refresh */
-static const struct clk_ops clk_ops_shadow_dsi_vco = {
-	.recalc_rate = pll_vco_recalc_rate_14nm,
-	.set_rate = shadow_pll_vco_set_rate_14nm,
-	.round_rate = pll_vco_round_rate_14nm,
-};
-
-static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
-	.ref_clk_rate = 19200000UL,
-	.min_rate = 1300000000UL,
-	.max_rate = 2600000000UL,
-	.pll_en_seq_cnt = 1,
-	.pll_enable_seqs[0] = dsi_pll_enable_seq_14nm,
-	.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_vco_clk_14nm",
-			.parent_names = (const char *[]){ "bi_tcxo" },
-			.num_parents = 1,
-			.ops = &clk_ops_dsi_vco,
-		},
-};
-
-static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
-	.ref_clk_rate = 19200000u,
-	.min_rate = 1300000000u,
-	.max_rate = 2600000000u,
-	.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_shadow_vco_clk_14nm",
-			.parent_names = (const char *[]){ "bi_tcxo" },
-			.num_parents = 1,
-			.ops = &clk_ops_shadow_dsi_vco,
-		},
-};
-
-static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
-	.ref_clk_rate = 19200000UL,
-	.min_rate = 1300000000UL,
-	.max_rate = 2600000000UL,
-	.pll_en_seq_cnt = 1,
-	.pll_enable_seqs[0] = dsi_pll_enable_seq_14nm,
-	.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_vco_clk_14nm",
-			.parent_names = (const char *[]){ "bi_tcxo" },
-			.num_parents = 1,
-			.ops = &clk_ops_dsi_vco,
-		},
-};
-
-static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
-	.ref_clk_rate = 19200000u,
-	.min_rate = 1300000000u,
-	.max_rate = 2600000000u,
-	.pll_en_seq_cnt = 1,
-	.pll_enable_seqs[0] = dsi_pll_enable_seq_14nm,
-	.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_shadow_vco_clk_14nm",
-			.parent_names = (const char *[]){ "bi_tcxo" },
-			.num_parents = 1,
-			.ops = &clk_ops_shadow_dsi_vco,
-		},
-};
-
-static struct clk_regmap_div dsi0pll_post_n1_div_clk = {
-	.reg = 0x48,
-	.shift = 0,
-	.width = 4,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_post_n1_div_clk",
-			.parent_names =
-				(const char *[]){ "dsi0pll_vco_clk_14nm" },
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi0pll_shadow_post_n1_div_clk = {
-	.reg = 0x48,
-	.shift = 0,
-	.width = 4,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_shadow_post_n1_div_clk",
-			.parent_names =
-				(const char *[]){"dsi0pll_shadow_vco_clk_14nm"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi1pll_post_n1_div_clk = {
-	.reg = 0x48,
-	.shift = 0,
-	.width = 4,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_post_n1_div_clk",
-			.parent_names =
-				(const char *[]){ "dsi1pll_vco_clk_14nm" },
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi1pll_shadow_post_n1_div_clk = {
-	.reg = 0x48,
-	.shift = 0,
-	.width = 4,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_shadow_post_n1_div_clk",
-			.parent_names =
-				(const char *[]){"dsi1pll_shadow_vco_clk_14nm"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi0pll_n2_div_clk = {
-	.reg = 0x48,
-	.shift = 0,
-	.width = 4,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_n2_div_clk",
-			.parent_names =
-				(const char *[]){ "dsi0pll_post_n1_div_clk" },
-			.num_parents = 1,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi0pll_shadow_n2_div_clk = {
-	.reg = 0x48,
-	.shift = 0,
-	.width = 4,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_shadow_n2_div_clk",
-			.parent_names =
-			(const char *[]){ "dsi0pll_shadow_post_n1_div_clk" },
-			.num_parents = 1,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi1pll_n2_div_clk = {
-	.reg = 0x48,
-	.shift = 0,
-	.width = 4,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_n2_div_clk",
-			.parent_names =
-				(const char *[]){ "dsi1pll_post_n1_div_clk" },
-			.num_parents = 1,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi1pll_shadow_n2_div_clk = {
-	.reg = 0x48,
-	.shift = 0,
-	.width = 4,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_shadow_n2_div_clk",
-			.parent_names =
-			(const char *[]){ "dsi1pll_shadow_post_n1_div_clk" },
-			.num_parents = 1,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_fixed_factor dsi0pll_pixel_clk_src = {
-	.div = 2,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi0pll_pixel_clk_src",
-		.parent_names = (const char *[]){ "dsi0pll_n2_div_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi0pll_shadow_pixel_clk_src = {
-	.div = 2,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi0pll_shadow_pixel_clk_src",
-		.parent_names = (const char *[]){ "dsi0pll_shadow_n2_div_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi1pll_pixel_clk_src = {
-	.div = 2,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi1pll_pixel_clk_src",
-		.parent_names = (const char *[]){ "dsi1pll_n2_div_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi1pll_shadow_pixel_clk_src = {
-	.div = 2,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi1pll_shadow_pixel_clk_src",
-		.parent_names = (const char *[]){ "dsi1pll_shadow_n2_div_clk" },
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_regmap_mux dsi0pll_pixel_clk_mux = {
-	.reg = 0x48,
-	.shift = 0,
-	.width = 1,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0_phy_pll_out_dsiclk",
-			.parent_names =
-				(const char *[]){ "dsi0pll_pixel_clk_src",
-					"dsi0pll_shadow_pixel_clk_src"},
-			.num_parents = 2,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_regmap_mux dsi1pll_pixel_clk_mux = {
-	.reg = 0x48,
-	.shift = 0,
-	.width = 1,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_pixel_clk_mux",
-			.parent_names =
-				(const char *[]){ "dsi1pll_pixel_clk_src",
-					"dsi1pll_shadow_pixel_clk_src"},
-			.num_parents = 2,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_fixed_factor dsi0pll_byte_clk_src = {
-	.div = 8,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi0pll_byte_clk_src",
-		.parent_names = (const char *[]){ "dsi0pll_post_n1_div_clk" },
-		.num_parents = 1,
-		.flags = (CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi0pll_shadow_byte_clk_src = {
-	.div = 8,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi0pll_shadow_byte_clk_src",
-		.parent_names =
-			(const char *[]){ "dsi0pll_shadow_post_n1_div_clk" },
-		.num_parents = 1,
-		.flags = (CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi1pll_byte_clk_src = {
-	.div = 8,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi1pll_byte_clk_src",
-		.parent_names = (const char *[]){ "dsi1pll_post_n1_div_clk" },
-		.num_parents = 1,
-		.flags = (CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi1pll_shadow_byte_clk_src = {
-	.div = 8,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi1pll_shadow_byte_clk_src",
-		.parent_names =
-			(const char *[]){ "dsi1pll_shadow_post_n1_div_clk" },
-		.num_parents = 1,
-		.flags = (CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_regmap_mux dsi0pll_byte_clk_mux = {
-	.reg = 0x48,
-	.shift = 0,
-	.width = 1,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0_phy_pll_out_byteclk",
-			.parent_names =
-				(const char *[]){"dsi0pll_byte_clk_src",
-					"dsi0pll_shadow_byte_clk_src"},
-			.num_parents = 2,
-			.ops = &clk_regmap_mux_closest_ops,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		},
-	},
-};
-
-static struct clk_regmap_mux dsi1pll_byte_clk_mux = {
-	.reg = 0x48,
-	.shift = 0,
-	.width = 1,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_byte_clk_mux",
-			.parent_names =
-				(const char *[]){"dsi1pll_byte_clk_src",
-					"dsi1pll_shadow_byte_clk_src"},
-			.num_parents = 2,
-			.ops = &clk_regmap_mux_closest_ops,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		},
-	},
-};
-
-static struct clk_hw *mdss_dsi_pllcc_14nm[] = {
-	[BYTE0_MUX_CLK] = &dsi0pll_byte_clk_mux.clkr.hw,
-	[BYTE0_SRC_CLK] = &dsi0pll_byte_clk_src.hw,
-	[PIX0_MUX_CLK] = &dsi0pll_pixel_clk_mux.clkr.hw,
-	[PIX0_SRC_CLK] = &dsi0pll_pixel_clk_src.hw,
-	[N2_DIV_0_CLK] = &dsi0pll_n2_div_clk.clkr.hw,
-	[POST_N1_DIV_0_CLK] = &dsi0pll_post_n1_div_clk.clkr.hw,
-	[VCO_CLK_0_CLK] = &dsi0pll_vco_clk.hw,
-	[SHADOW_BYTE0_SRC_CLK] = &dsi0pll_shadow_byte_clk_src.hw,
-	[SHADOW_PIX0_SRC_CLK] = &dsi0pll_shadow_pixel_clk_src.hw,
-	[SHADOW_N2_DIV_0_CLK] = &dsi0pll_shadow_n2_div_clk.clkr.hw,
-	[SHADOW_POST_N1_DIV_0_CLK] = &dsi0pll_shadow_post_n1_div_clk.clkr.hw,
-	[SHADOW_VCO_CLK_0_CLK] = &dsi0pll_shadow_vco_clk.hw,
-	[BYTE1_MUX_CLK] = &dsi1pll_byte_clk_mux.clkr.hw,
-	[BYTE1_SRC_CLK] = &dsi1pll_byte_clk_src.hw,
-	[PIX1_MUX_CLK] = &dsi1pll_pixel_clk_mux.clkr.hw,
-	[PIX1_SRC_CLK] = &dsi1pll_pixel_clk_src.hw,
-	[N2_DIV_1_CLK] = &dsi1pll_n2_div_clk.clkr.hw,
-	[POST_N1_DIV_1_CLK] = &dsi1pll_post_n1_div_clk.clkr.hw,
-	[VCO_CLK_1_CLK] = &dsi1pll_vco_clk.hw,
-	[SHADOW_BYTE1_SRC_CLK] = &dsi1pll_shadow_byte_clk_src.hw,
-	[SHADOW_PIX1_SRC_CLK] = &dsi1pll_shadow_pixel_clk_src.hw,
-	[SHADOW_N2_DIV_1_CLK] = &dsi1pll_shadow_n2_div_clk.clkr.hw,
-	[SHADOW_POST_N1_DIV_1_CLK] = &dsi1pll_shadow_post_n1_div_clk.clkr.hw,
-	[SHADOW_VCO_CLK_1_CLK] = &dsi1pll_shadow_vco_clk.hw,
-};
-
-int dsi_pll_clock_register_14nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int rc = 0, ndx, i;
-	int const ssc_freq_default = 31500; /* default h/w recommended value */
-	int const ssc_ppm_default = 5000; /* default h/w recommended value */
-	struct dsi_pll_db *pdb;
-	struct clk_onecell_data *clk_data;
-	struct clk *clk;
-	struct regmap *regmap;
-	int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_14nm);
-
-	if (pll_res->index >= DSI_PLL_NUM) {
-		pr_err("pll ndx=%d is NOT supported\n", pll_res->index);
-		return -EINVAL;
-	}
-
-	ndx = pll_res->index;
-	pdb = &pll_db[ndx];
-	pll_res->priv = pdb;
-	pdb->pll = pll_res;
-	ndx++;
-	ndx %= DSI_PLL_NUM;
-	pdb->next = &pll_db[ndx];
-
-	if (pll_res->ssc_en) {
-		if (!pll_res->ssc_freq)
-			pll_res->ssc_freq = ssc_freq_default;
-		if (!pll_res->ssc_ppm)
-			pll_res->ssc_ppm = ssc_ppm_default;
-	}
-
-	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
-	if (!clk_data)
-		return -ENOMEM;
-
-	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
-				sizeof(struct clk *), GFP_KERNEL);
-	if (!clk_data->clks)
-		return -ENOMEM;
-
-	clk_data->clk_num = num_clks;
-
-	/* Set client data to mux, div and vco clocks.  */
-	if (pll_res->index == DSI_PLL_1) {
-		regmap = devm_regmap_init(&pdev->dev, &post_n1_div_regmap_bus,
-					pll_res, &dsi_pll_14nm_config);
-		dsi1pll_post_n1_div_clk.clkr.regmap = regmap;
-		dsi1pll_shadow_post_n1_div_clk.clkr.regmap = regmap;
-
-		regmap = devm_regmap_init(&pdev->dev, &n2_div_regmap_bus,
-				pll_res, &dsi_pll_14nm_config);
-		dsi1pll_n2_div_clk.clkr.regmap = regmap;
-
-		regmap = devm_regmap_init(&pdev->dev, &shadow_n2_div_regmap_bus,
-				pll_res, &dsi_pll_14nm_config);
-		dsi1pll_shadow_n2_div_clk.clkr.regmap = regmap;
-
-		regmap = devm_regmap_init(&pdev->dev, &dsi_mux_regmap_bus,
-				pll_res, &dsi_pll_14nm_config);
-		dsi1pll_byte_clk_mux.clkr.regmap = regmap;
-		dsi1pll_pixel_clk_mux.clkr.regmap = regmap;
-
-		dsi1pll_vco_clk.priv = pll_res;
-		dsi1pll_shadow_vco_clk.priv = pll_res;
-
-		pll_res->vco_delay = VCO_DELAY_USEC;
-
-		for (i = BYTE1_MUX_CLK; i <= SHADOW_VCO_CLK_1_CLK; i++) {
-			pr_debug("register clk: %d index: %d\n",
-							i, pll_res->index);
-			clk = devm_clk_register(&pdev->dev,
-					mdss_dsi_pllcc_14nm[i]);
-			if (IS_ERR(clk)) {
-				pr_err("clk registration failed for DSI: %d\n",
-						pll_res->index);
-				rc = -EINVAL;
-				goto clk_reg_fail;
-			}
-			clk_data->clks[i] = clk;
-		}
-
-		rc = of_clk_add_provider(pdev->dev.of_node,
-				of_clk_src_onecell_get, clk_data);
-	} else {
-		regmap = devm_regmap_init(&pdev->dev, &post_n1_div_regmap_bus,
-					pll_res, &dsi_pll_14nm_config);
-		dsi0pll_post_n1_div_clk.clkr.regmap = regmap;
-		dsi0pll_shadow_post_n1_div_clk.clkr.regmap = regmap;
-
-		regmap = devm_regmap_init(&pdev->dev, &n2_div_regmap_bus,
-				pll_res, &dsi_pll_14nm_config);
-		dsi0pll_n2_div_clk.clkr.regmap = regmap;
-
-		regmap = devm_regmap_init(&pdev->dev, &shadow_n2_div_regmap_bus,
-				pll_res, &dsi_pll_14nm_config);
-		dsi0pll_shadow_n2_div_clk.clkr.regmap = regmap;
-
-		regmap = devm_regmap_init(&pdev->dev, &dsi_mux_regmap_bus,
-				pll_res, &dsi_pll_14nm_config);
-		dsi0pll_byte_clk_mux.clkr.regmap = regmap;
-		dsi0pll_pixel_clk_mux.clkr.regmap = regmap;
-
-		dsi0pll_vco_clk.priv = pll_res;
-		dsi0pll_shadow_vco_clk.priv = pll_res;
-		pll_res->vco_delay = VCO_DELAY_USEC;
-
-		for (i = BYTE0_MUX_CLK; i <= SHADOW_VCO_CLK_0_CLK; i++) {
-			pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
-			clk = devm_clk_register(&pdev->dev,
-					mdss_dsi_pllcc_14nm[i]);
-			if (IS_ERR(clk)) {
-				pr_err("clk registration failed for DSI: %d\n",
-						pll_res->index);
-				rc = -EINVAL;
-				goto clk_reg_fail;
-			}
-			clk_data->clks[i] = clk;
-		}
-
-		rc = of_clk_add_provider(pdev->dev.of_node,
-				of_clk_src_onecell_get, clk_data);
-	}
-
-	if (!rc) {
-		pr_info("Registered DSI PLL ndx=%d clocks successfully\n",
-						pll_res->index);
-		return rc;
-	}
-
-clk_reg_fail:
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.h b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.h
deleted file mode 100644
index 25587de..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.h
+++ /dev/null
@@ -1,220 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef MDSS_DSI_PLL_14NM_H
-#define MDSS_DSI_PLL_14NM_H
-
-#define DSIPHY_CMN_CLK_CFG0		0x0010
-#define DSIPHY_CMN_CLK_CFG1		0x0014
-#define DSIPHY_CMN_GLBL_TEST_CTRL	0x0018
-
-#define DSIPHY_CMN_PLL_CNTRL		0x0048
-#define DSIPHY_CMN_CTRL_0		0x001c
-#define DSIPHY_CMN_CTRL_1		0x0020
-
-#define DSIPHY_CMN_LDO_CNTRL		0x004c
-
-#define DSIPHY_PLL_IE_TRIM		0x0400
-#define DSIPHY_PLL_IP_TRIM		0x0404
-
-#define DSIPHY_PLL_IPTAT_TRIM		0x0410
-
-#define DSIPHY_PLL_CLKBUFLR_EN		0x041c
-
-#define DSIPHY_PLL_SYSCLK_EN_RESET	0x0428
-#define DSIPHY_PLL_RESETSM_CNTRL	0x042c
-#define DSIPHY_PLL_RESETSM_CNTRL2	0x0430
-#define DSIPHY_PLL_RESETSM_CNTRL3	0x0434
-#define DSIPHY_PLL_RESETSM_CNTRL4	0x0438
-#define DSIPHY_PLL_RESETSM_CNTRL5	0x043c
-#define DSIPHY_PLL_KVCO_DIV_REF1	0x0440
-#define DSIPHY_PLL_KVCO_DIV_REF2	0x0444
-#define DSIPHY_PLL_KVCO_COUNT1		0x0448
-#define DSIPHY_PLL_KVCO_COUNT2		0x044c
-#define DSIPHY_PLL_VREF_CFG1		0x045c
-
-#define DSIPHY_PLL_KVCO_CODE		0x0458
-
-#define DSIPHY_PLL_VCO_DIV_REF1		0x046c
-#define DSIPHY_PLL_VCO_DIV_REF2		0x0470
-#define DSIPHY_PLL_VCO_COUNT1		0x0474
-#define DSIPHY_PLL_VCO_COUNT2		0x0478
-#define DSIPHY_PLL_PLLLOCK_CMP1		0x047c
-#define DSIPHY_PLL_PLLLOCK_CMP2		0x0480
-#define DSIPHY_PLL_PLLLOCK_CMP3		0x0484
-#define DSIPHY_PLL_PLLLOCK_CMP_EN	0x0488
-#define DSIPHY_PLL_PLL_VCO_TUNE		0x048C
-#define DSIPHY_PLL_DEC_START		0x0490
-#define DSIPHY_PLL_SSC_EN_CENTER	0x0494
-#define DSIPHY_PLL_SSC_ADJ_PER1		0x0498
-#define DSIPHY_PLL_SSC_ADJ_PER2		0x049c
-#define DSIPHY_PLL_SSC_PER1		0x04a0
-#define DSIPHY_PLL_SSC_PER2		0x04a4
-#define DSIPHY_PLL_SSC_STEP_SIZE1	0x04a8
-#define DSIPHY_PLL_SSC_STEP_SIZE2	0x04ac
-#define DSIPHY_PLL_DIV_FRAC_START1	0x04b4
-#define DSIPHY_PLL_DIV_FRAC_START2	0x04b8
-#define DSIPHY_PLL_DIV_FRAC_START3	0x04bc
-#define DSIPHY_PLL_TXCLK_EN		0x04c0
-#define DSIPHY_PLL_PLL_CRCTRL		0x04c4
-
-#define DSIPHY_PLL_RESET_SM_READY_STATUS 0x04cc
-
-#define DSIPHY_PLL_PLL_MISC1		0x04e8
-
-#define DSIPHY_PLL_CP_SET_CUR		0x04f0
-#define DSIPHY_PLL_PLL_ICPMSET		0x04f4
-#define DSIPHY_PLL_PLL_ICPCSET		0x04f8
-#define DSIPHY_PLL_PLL_ICP_SET		0x04fc
-#define DSIPHY_PLL_PLL_LPF1		0x0500
-#define DSIPHY_PLL_PLL_LPF2_POSTDIV	0x0504
-#define DSIPHY_PLL_PLL_BANDGAP	0x0508
-
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL15		0x050
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL19		0x060
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL20		0x064
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL21		0x068
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL22		0x06C
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL23		0x070
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL24		0x074
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL25		0x078
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL26		0x07C
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL27		0x080
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL28		0x084
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL29		0x088
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR	0x094
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2	0x098
-
-struct dsi_pll_input {
-	u32 fref;	/* 19.2 Mhz, reference clk */
-	u32 fdata;	/* bit clock rate */
-	u32 dsiclk_sel; /* 1, reg: 0x0014 */
-	u32 n2div;	/* 1, reg: 0x0010, bit 4-7 */
-	u32 ssc_en;	/* 1, reg: 0x0494, bit 0 */
-	u32 ldo_en;	/* 0,  reg: 0x004c, bit 0 */
-
-	/* fixed  */
-	u32 refclk_dbler_en;	/* 0, reg: 0x04c0, bit 1 */
-	u32 vco_measure_time;	/* 5, unknown */
-	u32 kvco_measure_time;	/* 5, unknown */
-	u32 bandgap_timer;	/* 4, reg: 0x0430, bit 3 - 5 */
-	u32 pll_wakeup_timer;	/* 5, reg: 0x043c, bit 0 - 2 */
-	u32 plllock_cnt;	/* 1, reg: 0x0488, bit 1 - 2 */
-	u32 plllock_rng;	/* 1, reg: 0x0488, bit 3 - 4 */
-	u32 ssc_center;		/* 0, reg: 0x0494, bit 1 */
-	u32 ssc_adj_period;	/* 37, reg: 0x498, bit 0 - 9 */
-	u32 ssc_spread;		/* 0.005  */
-	u32 ssc_freq;		/* unknown */
-	u32 pll_ie_trim;	/* 4, reg: 0x0400 */
-	u32 pll_ip_trim;	/* 4, reg: 0x0404 */
-	u32 pll_iptat_trim;	/* reg: 0x0410 */
-	u32 pll_cpcset_cur;	/* 1, reg: 0x04f0, bit 0 - 2 */
-	u32 pll_cpmset_cur;	/* 1, reg: 0x04f0, bit 3 - 5 */
-
-	u32 pll_icpmset;	/* 4, reg: 0x04fc, bit 3 - 5 */
-	u32 pll_icpcset;	/* 4, reg: 0x04fc, bit 0 - 2 */
-
-	u32 pll_icpmset_p;	/* 0, reg: 0x04f4, bit 0 - 2 */
-	u32 pll_icpmset_m;	/* 0, reg: 0x04f4, bit 3 - 5 */
-
-	u32 pll_icpcset_p;	/* 0, reg: 0x04f8, bit 0 - 2 */
-	u32 pll_icpcset_m;	/* 0, reg: 0x04f8, bit 3 - 5 */
-
-	u32 pll_lpf_res1;	/* 3, reg: 0x0504, bit 0 - 3 */
-	u32 pll_lpf_cap1;	/* 11, reg: 0x0500, bit 0 - 3 */
-	u32 pll_lpf_cap2;	/* 1, reg: 0x0500, bit 4 - 7 */
-	u32 pll_c3ctrl;		/* 2, reg: 0x04c4 */
-	u32 pll_r3ctrl;		/* 1, reg: 0x04c4 */
-};
-
-struct dsi_pll_output {
-	u32 pll_txclk_en;	/* reg: 0x04c0 */
-	u32 dec_start;		/* reg: 0x0490 */
-	u32 div_frac_start;	/* reg: 0x04b4, 0x4b8, 0x04bc */
-	u32 ssc_period;		/* reg: 0x04a0, 0x04a4 */
-	u32 ssc_step_size;	/* reg: 0x04a8, 0x04ac */
-	u32 plllock_cmp;	/* reg: 0x047c, 0x0480, 0x0484 */
-	u32 pll_vco_div_ref;	/* reg: 0x046c, 0x0470 */
-	u32 pll_vco_count;	/* reg: 0x0474, 0x0478 */
-	u32 pll_kvco_div_ref;	/* reg: 0x0440, 0x0444 */
-	u32 pll_kvco_count;	/* reg: 0x0448, 0x044c */
-	u32 pll_misc1;		/* reg: 0x04e8 */
-	u32 pll_lpf2_postdiv;	/* reg: 0x0504 */
-	u32 pll_resetsm_cntrl;	/* reg: 0x042c */
-	u32 pll_resetsm_cntrl2;	/* reg: 0x0430 */
-	u32 pll_resetsm_cntrl5;	/* reg: 0x043c */
-	u32 pll_kvco_code;		/* reg: 0x0458 */
-
-	u32 cmn_clk_cfg0;	/* reg: 0x0010 */
-	u32 cmn_clk_cfg1;	/* reg: 0x0014 */
-	u32 cmn_ldo_cntrl;	/* reg: 0x004c */
-
-	u32 pll_postdiv;	/* vco */
-	u32 pll_n1div;		/* vco */
-	u32 pll_n2div;		/* hr_oclk3, pixel */
-	u32 fcvo;
-};
-
-enum {
-	DSI_PLL_0,
-	DSI_PLL_1,
-	DSI_PLL_NUM
-};
-
-struct dsi_pll_db {
-	struct dsi_pll_db *next;
-	struct mdss_pll_resources *pll;
-	struct dsi_pll_input in;
-	struct dsi_pll_output out;
-	int source_setup_done;
-};
-
-enum {
-	PLL_OUTPUT_NONE,
-	PLL_OUTPUT_RIGHT,
-	PLL_OUTPUT_LEFT,
-	PLL_OUTPUT_BOTH
-};
-
-enum {
-	PLL_SOURCE_FROM_LEFT,
-	PLL_SOURCE_FROM_RIGHT
-};
-
-enum {
-	PLL_UNKNOWN,
-	PLL_STANDALONE,
-	PLL_SLAVE,
-	PLL_MASTER
-};
-
-int pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
-		unsigned long parent_rate);
-int shadow_pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
-		unsigned long parent_rate);
-long pll_vco_round_rate_14nm(struct clk_hw *hw, unsigned long rate,
-		unsigned long *parent_rate);
-unsigned long pll_vco_recalc_rate_14nm(struct clk_hw *hw,
-		unsigned long parent_rate);
-
-int pll_vco_prepare_14nm(struct clk_hw *hw);
-void pll_vco_unprepare_14nm(struct clk_hw *hw);
-
-int shadow_post_n1_div_set_div(void *context,
-			unsigned int reg, unsigned int div);
-int shadow_post_n1_div_get_div(void *context,
-			unsigned int reg, unsigned int *div);
-int shadow_n2_div_set_div(void *context, unsigned int reg, unsigned int div);
-int shadow_n2_div_get_div(void *context, unsigned int reg, unsigned int *div);
-
-int post_n1_div_set_div(void *context, unsigned int reg, unsigned int div);
-int post_n1_div_get_div(void *context, unsigned int reg, unsigned int *div);
-int n2_div_set_div(void *context, unsigned int reg, unsigned int div);
-int n2_div_get_div(void *context, unsigned int reg, unsigned int *div);
-int dsi_pll_enable_seq_14nm(struct mdss_pll_resources *pll);
-int dsi_mux_set_parent_14nm(void *context, unsigned int reg, unsigned int val);
-int dsi_mux_get_parent_14nm(void *context, unsigned int reg, unsigned int *val);
-
-#endif  /* MDSS_DSI_PLL_14NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-20nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-20nm.c
deleted file mode 100644
index aebcd26..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-20nm.c
+++ /dev/null
@@ -1,590 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/workqueue.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8994.h>
-
-#include "mdss-pll.h"
-#include "mdss-dsi-pll.h"
-
-#define VCO_DELAY_USEC		1
-
-static const struct clk_ops bypass_lp_div_mux_clk_ops;
-static const struct clk_ops pixel_clk_src_ops;
-static const struct clk_ops byte_clk_src_ops;
-static const struct clk_ops ndiv_clk_ops;
-
-static const struct clk_ops shadow_pixel_clk_src_ops;
-static const struct clk_ops shadow_byte_clk_src_ops;
-static const struct clk_ops clk_ops_gen_mux_dsi;
-
-static int vco_set_rate_20nm(struct clk *c, unsigned long rate)
-{
-	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	pr_debug("Cancel pending pll off work\n");
-	cancel_work_sync(&dsi_pll_res->pll_off);
-	rc = pll_20nm_vco_set_rate(vco, rate);
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	return rc;
-}
-
-static int pll1_vco_set_rate_20nm(struct clk *c, unsigned long rate)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *pll_res = vco->priv;
-
-	mdss_pll_resource_enable(pll_res, true);
-	__dsi_pll_disable(pll_res->pll_base);
-	mdss_pll_resource_enable(pll_res, false);
-
-	pr_debug("Configuring PLL1 registers.\n");
-
-	return 0;
-}
-
-static int shadow_vco_set_rate_20nm(struct clk *c, unsigned long rate)
-{
-	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	if (!dsi_pll_res->resource_enable) {
-		pr_err("PLL resources disabled. Dynamic fps invalid\n");
-		return -EINVAL;
-	}
-
-	rc = shadow_pll_20nm_vco_set_rate(vco, rate);
-
-	return rc;
-}
-
-/* Op structures */
-
-static const struct clk_ops pll1_clk_ops_dsi_vco = {
-	.set_rate = pll1_vco_set_rate_20nm,
-};
-
-static const struct clk_ops clk_ops_dsi_vco = {
-	.set_rate = vco_set_rate_20nm,
-	.round_rate = pll_20nm_vco_round_rate,
-	.handoff = pll_20nm_vco_handoff,
-	.prepare = pll_20nm_vco_prepare,
-	.unprepare = pll_20nm_vco_unprepare,
-};
-
-static struct clk_div_ops fixed_hr_oclk2_div_ops = {
-	.set_div = fixed_hr_oclk2_set_div,
-	.get_div = fixed_hr_oclk2_get_div,
-};
-
-static struct clk_div_ops ndiv_ops = {
-	.set_div = ndiv_set_div,
-	.get_div = ndiv_get_div,
-};
-
-static struct clk_div_ops hr_oclk3_div_ops = {
-	.set_div = hr_oclk3_set_div,
-	.get_div = hr_oclk3_get_div,
-};
-
-static struct clk_mux_ops bypass_lp_div_mux_ops = {
-	.set_mux_sel = set_bypass_lp_div_mux_sel,
-	.get_mux_sel = get_bypass_lp_div_mux_sel,
-};
-
-static const struct clk_ops shadow_clk_ops_dsi_vco = {
-	.set_rate = shadow_vco_set_rate_20nm,
-	.round_rate = pll_20nm_vco_round_rate,
-	.handoff = pll_20nm_vco_handoff,
-};
-
-static struct clk_div_ops shadow_fixed_hr_oclk2_div_ops = {
-	.set_div = shadow_fixed_hr_oclk2_set_div,
-	.get_div = fixed_hr_oclk2_get_div,
-};
-
-static struct clk_div_ops shadow_ndiv_ops = {
-	.set_div = shadow_ndiv_set_div,
-	.get_div = ndiv_get_div,
-};
-
-static struct clk_div_ops shadow_hr_oclk3_div_ops = {
-	.set_div = shadow_hr_oclk3_set_div,
-	.get_div = hr_oclk3_get_div,
-};
-
-static struct clk_mux_ops shadow_bypass_lp_div_mux_ops = {
-	.set_mux_sel = set_shadow_bypass_lp_div_mux_sel,
-	.get_mux_sel = get_bypass_lp_div_mux_sel,
-};
-
-static struct clk_mux_ops mdss_byte_mux_ops = {
-	.set_mux_sel = set_mdss_byte_mux_sel,
-	.get_mux_sel = get_mdss_byte_mux_sel,
-};
-
-static struct clk_mux_ops mdss_pixel_mux_ops = {
-	.set_mux_sel = set_mdss_pixel_mux_sel,
-	.get_mux_sel = get_mdss_pixel_mux_sel,
-};
-
-static struct dsi_pll_vco_clk mdss_dsi1_vco_clk_src = {
-	.c = {
-		.dbg_name = "mdss_dsi1_vco_clk_src",
-		.ops = &pll1_clk_ops_dsi_vco,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(mdss_dsi1_vco_clk_src.c),
-	},
-};
-
-static struct dsi_pll_vco_clk dsi_vco_clk_8994 = {
-	.ref_clk_rate = 19200000,
-	.min_rate = 300000000,
-	.max_rate = 1500000000,
-	.pll_en_seq_cnt = 1,
-	.pll_enable_seqs[0] = pll_20nm_vco_enable_seq,
-	.c = {
-		.dbg_name = "dsi_vco_clk_8994",
-		.ops = &clk_ops_dsi_vco,
-		CLK_INIT(dsi_vco_clk_8994.c),
-	},
-};
-
-static struct dsi_pll_vco_clk shadow_dsi_vco_clk_8994 = {
-	.ref_clk_rate = 19200000,
-	.min_rate = 300000000,
-	.max_rate = 1500000000,
-	.c = {
-		.dbg_name = "shadow_dsi_vco_clk_8994",
-		.ops = &shadow_clk_ops_dsi_vco,
-		CLK_INIT(shadow_dsi_vco_clk_8994.c),
-	},
-};
-
-static struct div_clk ndiv_clk_8994 = {
-	.data = {
-		.max_div = 15,
-		.min_div = 1,
-	},
-	.ops = &ndiv_ops,
-	.c = {
-		.parent = &dsi_vco_clk_8994.c,
-		.dbg_name = "ndiv_clk_8994",
-		.ops = &ndiv_clk_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(ndiv_clk_8994.c),
-	},
-};
-
-static struct div_clk shadow_ndiv_clk_8994 = {
-	.data = {
-		.max_div = 15,
-		.min_div = 1,
-	},
-	.ops = &shadow_ndiv_ops,
-	.c = {
-		.parent = &shadow_dsi_vco_clk_8994.c,
-		.dbg_name = "shadow_ndiv_clk_8994",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(shadow_ndiv_clk_8994.c),
-	},
-};
-
-static struct div_clk indirect_path_div2_clk_8994 = {
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.c = {
-		.parent = &ndiv_clk_8994.c,
-		.dbg_name = "indirect_path_div2_clk_8994",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(indirect_path_div2_clk_8994.c),
-	},
-};
-
-static struct div_clk shadow_indirect_path_div2_clk_8994 = {
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.c = {
-		.parent = &shadow_ndiv_clk_8994.c,
-		.dbg_name = "shadow_indirect_path_div2_clk_8994",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(shadow_indirect_path_div2_clk_8994.c),
-	},
-};
-
-static struct div_clk hr_oclk3_div_clk_8994 = {
-	.data = {
-		.max_div = 255,
-		.min_div = 1,
-	},
-	.ops = &hr_oclk3_div_ops,
-	.c = {
-		.parent = &dsi_vco_clk_8994.c,
-		.dbg_name = "hr_oclk3_div_clk_8994",
-		.ops = &pixel_clk_src_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(hr_oclk3_div_clk_8994.c),
-	},
-};
-
-static struct div_clk shadow_hr_oclk3_div_clk_8994 = {
-	.data = {
-		.max_div = 255,
-		.min_div = 1,
-	},
-	.ops = &shadow_hr_oclk3_div_ops,
-	.c = {
-		.parent = &shadow_dsi_vco_clk_8994.c,
-		.dbg_name = "shadow_hr_oclk3_div_clk_8994",
-		.ops = &shadow_pixel_clk_src_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(shadow_hr_oclk3_div_clk_8994.c),
-	},
-};
-
-static struct div_clk pixel_clk_src = {
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.c = {
-		.parent = &hr_oclk3_div_clk_8994.c,
-		.dbg_name = "pixel_clk_src",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(pixel_clk_src.c),
-	},
-};
-
-static struct div_clk shadow_pixel_clk_src = {
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.c = {
-		.parent = &shadow_hr_oclk3_div_clk_8994.c,
-		.dbg_name = "shadow_pixel_clk_src",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(shadow_pixel_clk_src.c),
-	},
-};
-
-static struct mux_clk bypass_lp_div_mux_8994 = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]){
-		{&dsi_vco_clk_8994.c, 0},
-		{&indirect_path_div2_clk_8994.c, 1},
-	},
-	.ops = &bypass_lp_div_mux_ops,
-	.c = {
-		.parent = &dsi_vco_clk_8994.c,
-		.dbg_name = "bypass_lp_div_mux_8994",
-		.ops = &bypass_lp_div_mux_clk_ops,
-		CLK_INIT(bypass_lp_div_mux_8994.c),
-	},
-};
-
-static struct mux_clk shadow_bypass_lp_div_mux_8994 = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]){
-		{&shadow_dsi_vco_clk_8994.c, 0},
-		{&shadow_indirect_path_div2_clk_8994.c, 1},
-	},
-	.ops = &shadow_bypass_lp_div_mux_ops,
-	.c = {
-		.parent = &shadow_dsi_vco_clk_8994.c,
-		.dbg_name = "shadow_bypass_lp_div_mux_8994",
-		.ops = &clk_ops_gen_mux,
-		CLK_INIT(shadow_bypass_lp_div_mux_8994.c),
-	},
-};
-
-static struct div_clk fixed_hr_oclk2_div_clk_8994 = {
-	.ops = &fixed_hr_oclk2_div_ops,
-	.data = {
-		.min_div = 4,
-		.max_div = 4,
-	},
-	.c = {
-		.parent = &bypass_lp_div_mux_8994.c,
-		.dbg_name = "fixed_hr_oclk2_div_clk_8994",
-		.ops = &byte_clk_src_ops,
-		CLK_INIT(fixed_hr_oclk2_div_clk_8994.c),
-	},
-};
-
-static struct div_clk shadow_fixed_hr_oclk2_div_clk_8994 = {
-	.ops = &shadow_fixed_hr_oclk2_div_ops,
-	.data = {
-		.min_div = 4,
-		.max_div = 4,
-	},
-	.c = {
-		.parent = &shadow_bypass_lp_div_mux_8994.c,
-		.dbg_name = "shadow_fixed_hr_oclk2_div_clk_8994",
-		.ops = &shadow_byte_clk_src_ops,
-		CLK_INIT(shadow_fixed_hr_oclk2_div_clk_8994.c),
-	},
-};
-
-static struct div_clk byte_clk_src = {
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.c = {
-		.parent = &fixed_hr_oclk2_div_clk_8994.c,
-		.dbg_name = "byte_clk_src",
-		.ops = &clk_ops_div,
-		CLK_INIT(byte_clk_src.c),
-	},
-};
-
-static struct div_clk shadow_byte_clk_src = {
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.c = {
-		.parent = &shadow_fixed_hr_oclk2_div_clk_8994.c,
-		.dbg_name = "shadow_byte_clk_src",
-		.ops = &clk_ops_div,
-		CLK_INIT(shadow_byte_clk_src.c),
-	},
-};
-
-static struct mux_clk mdss_pixel_clk_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&pixel_clk_src.c, 0},
-		{&shadow_pixel_clk_src.c, 1},
-	},
-	.ops = &mdss_pixel_mux_ops,
-	.c = {
-		.parent = &pixel_clk_src.c,
-		.dbg_name = "mdss_pixel_clk_mux",
-		.ops = &clk_ops_gen_mux,
-		CLK_INIT(mdss_pixel_clk_mux.c),
-	}
-};
-
-static struct mux_clk mdss_byte_clk_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&byte_clk_src.c, 0},
-		{&shadow_byte_clk_src.c, 1},
-	},
-	.ops = &mdss_byte_mux_ops,
-	.c = {
-		.parent = &byte_clk_src.c,
-		.dbg_name = "mdss_byte_clk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		CLK_INIT(mdss_byte_clk_mux.c),
-	}
-};
-
-static struct clk_lookup mdss_dsi_pll_1_cc_8994[] = {
-	CLK_LIST(mdss_dsi1_vco_clk_src),
-};
-
-static struct clk_lookup mdss_dsi_pllcc_8994[] = {
-	CLK_LIST(mdss_pixel_clk_mux),
-	CLK_LIST(mdss_byte_clk_mux),
-	CLK_LIST(pixel_clk_src),
-	CLK_LIST(byte_clk_src),
-	CLK_LIST(fixed_hr_oclk2_div_clk_8994),
-	CLK_LIST(bypass_lp_div_mux_8994),
-	CLK_LIST(hr_oclk3_div_clk_8994),
-	CLK_LIST(indirect_path_div2_clk_8994),
-	CLK_LIST(ndiv_clk_8994),
-	CLK_LIST(dsi_vco_clk_8994),
-	CLK_LIST(shadow_pixel_clk_src),
-	CLK_LIST(shadow_byte_clk_src),
-	CLK_LIST(shadow_fixed_hr_oclk2_div_clk_8994),
-	CLK_LIST(shadow_bypass_lp_div_mux_8994),
-	CLK_LIST(shadow_hr_oclk3_div_clk_8994),
-	CLK_LIST(shadow_indirect_path_div2_clk_8994),
-	CLK_LIST(shadow_ndiv_clk_8994),
-	CLK_LIST(shadow_dsi_vco_clk_8994),
-};
-
-static void dsi_pll_off_work(struct work_struct *work)
-{
-	struct mdss_pll_resources *pll_res;
-
-	if (!work) {
-		pr_err("pll_resource is invalid\n");
-		return;
-	}
-
-	pr_debug("Starting PLL off Worker%s\n", __func__);
-
-	pll_res = container_of(work, struct
-			mdss_pll_resources, pll_off);
-
-	mdss_pll_resource_enable(pll_res, true);
-	__dsi_pll_disable(pll_res->pll_base);
-	if (pll_res->pll_1_base)
-		__dsi_pll_disable(pll_res->pll_1_base);
-	mdss_pll_resource_enable(pll_res, false);
-}
-
-static int dsi_pll_regulator_notifier_call(struct notifier_block *self,
-		unsigned long event, void *data)
-{
-
-	struct mdss_pll_resources *pll_res;
-
-	if (!self) {
-		pr_err("pll_resource is invalid\n");
-		goto error;
-	}
-
-	pll_res = container_of(self, struct
-			mdss_pll_resources, gdsc_cb);
-
-	if (event & REGULATOR_EVENT_ENABLE) {
-		pr_debug("Regulator ON event. Scheduling pll off worker\n");
-		schedule_work(&pll_res->pll_off);
-	}
-
-	if (event & REGULATOR_EVENT_DISABLE)
-		pr_debug("Regulator OFF event.\n");
-
-error:
-	return NOTIFY_OK;
-}
-
-int dsi_pll_clock_register_20nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int rc;
-	struct dss_vreg *pll_reg;
-
-	/*
-	 * Set client data to mux, div and vco clocks.
-	 * This needs to be done only for PLL0 since, that is the one in
-	 * use.
-	 **/
-	if (!pll_res->index) {
-		byte_clk_src.priv = pll_res;
-		pixel_clk_src.priv = pll_res;
-		bypass_lp_div_mux_8994.priv = pll_res;
-		indirect_path_div2_clk_8994.priv = pll_res;
-		ndiv_clk_8994.priv = pll_res;
-		fixed_hr_oclk2_div_clk_8994.priv = pll_res;
-		hr_oclk3_div_clk_8994.priv = pll_res;
-		dsi_vco_clk_8994.priv = pll_res;
-
-		shadow_byte_clk_src.priv = pll_res;
-		shadow_pixel_clk_src.priv = pll_res;
-		shadow_bypass_lp_div_mux_8994.priv = pll_res;
-		shadow_indirect_path_div2_clk_8994.priv = pll_res;
-		shadow_ndiv_clk_8994.priv = pll_res;
-		shadow_fixed_hr_oclk2_div_clk_8994.priv = pll_res;
-		shadow_hr_oclk3_div_clk_8994.priv = pll_res;
-		shadow_dsi_vco_clk_8994.priv = pll_res;
-
-		pll_res->vco_delay = VCO_DELAY_USEC;
-
-		/* Set clock source operations */
-		pixel_clk_src_ops = clk_ops_slave_div;
-		pixel_clk_src_ops.prepare = dsi_pll_div_prepare;
-
-		ndiv_clk_ops = clk_ops_div;
-		ndiv_clk_ops.prepare = dsi_pll_div_prepare;
-
-		byte_clk_src_ops = clk_ops_div;
-		byte_clk_src_ops.prepare = dsi_pll_div_prepare;
-
-		bypass_lp_div_mux_clk_ops = clk_ops_gen_mux;
-		bypass_lp_div_mux_clk_ops.prepare = dsi_pll_mux_prepare;
-
-		clk_ops_gen_mux_dsi = clk_ops_gen_mux;
-		clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
-		clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
-
-		shadow_pixel_clk_src_ops = clk_ops_slave_div;
-		shadow_pixel_clk_src_ops.prepare = dsi_pll_div_prepare;
-
-		shadow_byte_clk_src_ops = clk_ops_div;
-		shadow_byte_clk_src_ops.prepare = dsi_pll_div_prepare;
-	} else {
-		mdss_dsi1_vco_clk_src.priv = pll_res;
-	}
-
-	if ((pll_res->target_id == MDSS_PLL_TARGET_8994) ||
-			(pll_res->target_id == MDSS_PLL_TARGET_8992)) {
-		if (pll_res->index) {
-			rc = of_msm_clock_register(pdev->dev.of_node,
-					mdss_dsi_pll_1_cc_8994,
-					ARRAY_SIZE(mdss_dsi_pll_1_cc_8994));
-			if (rc) {
-				pr_err("Clock register failed\n");
-				rc = -EPROBE_DEFER;
-			}
-		} else {
-			rc = of_msm_clock_register(pdev->dev.of_node,
-				mdss_dsi_pllcc_8994,
-				ARRAY_SIZE(mdss_dsi_pllcc_8994));
-			if (rc) {
-				pr_err("Clock register failed\n");
-				rc = -EPROBE_DEFER;
-			}
-			pll_res->gdsc_cb.notifier_call =
-				dsi_pll_regulator_notifier_call;
-			INIT_WORK(&pll_res->pll_off, dsi_pll_off_work);
-
-			pll_reg = mdss_pll_get_mp_by_reg_name(pll_res, "gdsc");
-			if (pll_reg) {
-				pr_debug("Registering for gdsc regulator events\n");
-				if (regulator_register_notifier(pll_reg->vreg,
-							&(pll_res->gdsc_cb)))
-					pr_err("Regulator notification registration failed!\n");
-			}
-		}
-
-	} else {
-		pr_err("Invalid target ID\n");
-		rc = -EINVAL;
-	}
-
-	if (!rc)
-		pr_info("Registered DSI PLL clocks successfully\n");
-
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-28hpm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-28hpm.c
deleted file mode 100644
index d39a6b8..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-28hpm.c
+++ /dev/null
@@ -1,317 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8974.h>
-
-#include "mdss-pll.h"
-#include "mdss-dsi-pll.h"
-
-#define VCO_DELAY_USEC		1
-
-static struct clk_div_ops fixed_2div_ops;
-static const struct clk_ops byte_mux_clk_ops;
-static const struct clk_ops pixel_clk_src_ops;
-static const struct clk_ops byte_clk_src_ops;
-static const struct clk_ops analog_postdiv_clk_ops;
-static struct lpfr_cfg lpfr_lut_struct[] = {
-	{479500000, 8},
-	{480000000, 11},
-	{575500000, 8},
-	{576000000, 12},
-	{610500000, 8},
-	{659500000, 9},
-	{671500000, 10},
-	{672000000, 14},
-	{708500000, 10},
-	{750000000, 11},
-};
-
-static void dsi_pll_software_reset(struct mdss_pll_resources *dsi_pll_res)
-{
-	/*
-	 * Add HW recommended delays after toggling the software
-	 * reset bit off and back on.
-	 */
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x01);
-	udelay(1);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x00);
-	udelay(1);
-}
-
-static int vco_set_rate_hpm(struct clk *c, unsigned long rate)
-{
-	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	rc = vco_set_rate(vco, rate);
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	return rc;
-}
-
-static int dsi_pll_enable_seq_8974(struct mdss_pll_resources *dsi_pll_res)
-{
-	int i, rc = 0;
-	int pll_locked;
-
-	dsi_pll_software_reset(dsi_pll_res);
-
-	/*
-	 * PLL power up sequence.
-	 * Add necessary delays recommeded by hardware.
-	 */
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
-	udelay(1);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
-	udelay(200);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x07);
-	udelay(500);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
-	udelay(500);
-
-	for (i = 0; i < 2; i++) {
-		udelay(100);
-		/* DSI Uniphy lock detect setting */
-		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0c);
-		udelay(100);
-		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d);
-
-		pll_locked = dsi_pll_lock_status(dsi_pll_res);
-		if (pll_locked)
-			break;
-
-		dsi_pll_software_reset(dsi_pll_res);
-		/*
-		 * PLL power up sequence.
-		 * Add necessary delays recommeded by hardware.
-		 */
-		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x1);
-		udelay(1);
-		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x5);
-		udelay(200);
-		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x7);
-		udelay(250);
-		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x5);
-		udelay(200);
-		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x7);
-		udelay(500);
-		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0xf);
-		udelay(500);
-
-	}
-
-	if (!pll_locked) {
-		pr_err("DSI PLL lock failed\n");
-		rc = -EINVAL;
-	} else {
-		pr_debug("DSI PLL Lock success\n");
-	}
-
-	return rc;
-}
-
-/* Op structures */
-
-static const struct clk_ops clk_ops_dsi_vco = {
-	.set_rate = vco_set_rate_hpm,
-	.round_rate = vco_round_rate,
-	.handoff = vco_handoff,
-	.prepare = vco_prepare,
-	.unprepare = vco_unprepare,
-};
-
-
-static struct clk_div_ops fixed_4div_ops = {
-	.set_div = fixed_4div_set_div,
-	.get_div = fixed_4div_get_div,
-};
-
-static struct clk_div_ops analog_postdiv_ops = {
-	.set_div = analog_set_div,
-	.get_div = analog_get_div,
-};
-
-static struct clk_div_ops digital_postdiv_ops = {
-	.set_div = digital_set_div,
-	.get_div = digital_get_div,
-};
-
-static struct clk_mux_ops byte_mux_ops = {
-	.set_mux_sel = set_byte_mux_sel,
-	.get_mux_sel = get_byte_mux_sel,
-};
-
-static struct dsi_pll_vco_clk dsi_vco_clk_8974 = {
-	.ref_clk_rate = 19200000,
-	.min_rate = 350000000,
-	.max_rate = 750000000,
-	.pll_en_seq_cnt = 3,
-	.pll_enable_seqs[0] = dsi_pll_enable_seq_8974,
-	.pll_enable_seqs[1] = dsi_pll_enable_seq_8974,
-	.pll_enable_seqs[2] = dsi_pll_enable_seq_8974,
-	.lpfr_lut_size = 10,
-	.lpfr_lut = lpfr_lut_struct,
-	.c = {
-		.dbg_name = "dsi_vco_clk_8974",
-		.ops = &clk_ops_dsi_vco,
-		CLK_INIT(dsi_vco_clk_8974.c),
-	},
-};
-
-static struct div_clk analog_postdiv_clk_8974 = {
-	.data = {
-		.max_div = 255,
-		.min_div = 1,
-	},
-	.ops = &analog_postdiv_ops,
-	.c = {
-		.parent = &dsi_vco_clk_8974.c,
-		.dbg_name = "analog_postdiv_clk",
-		.ops = &analog_postdiv_clk_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(analog_postdiv_clk_8974.c),
-	},
-};
-
-static struct div_clk indirect_path_div2_clk_8974 = {
-	.ops = &fixed_2div_ops,
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.c = {
-		.parent = &analog_postdiv_clk_8974.c,
-		.dbg_name = "indirect_path_div2_clk",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(indirect_path_div2_clk_8974.c),
-	},
-};
-
-static struct div_clk pixel_clk_src_8974 = {
-	.data = {
-		.max_div = 255,
-		.min_div = 1,
-	},
-	.ops = &digital_postdiv_ops,
-	.c = {
-		.parent = &dsi_vco_clk_8974.c,
-		.dbg_name = "pixel_clk_src_8974",
-		.ops = &pixel_clk_src_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(pixel_clk_src_8974.c),
-	},
-};
-
-static struct mux_clk byte_mux_8974 = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]){
-		{&dsi_vco_clk_8974.c, 0},
-		{&indirect_path_div2_clk_8974.c, 1},
-	},
-	.ops = &byte_mux_ops,
-	.c = {
-		.parent = &dsi_vco_clk_8974.c,
-		.dbg_name = "byte_mux_8974",
-		.ops = &byte_mux_clk_ops,
-		CLK_INIT(byte_mux_8974.c),
-	},
-};
-
-static struct div_clk byte_clk_src_8974 = {
-	.ops = &fixed_4div_ops,
-	.data = {
-		.min_div = 4,
-		.max_div = 4,
-	},
-	.c = {
-		.parent = &byte_mux_8974.c,
-		.dbg_name = "byte_clk_src_8974",
-		.ops = &byte_clk_src_ops,
-		CLK_INIT(byte_clk_src_8974.c),
-	},
-};
-
-static struct clk_lookup mdss_dsi_pllcc_8974[] = {
-	CLK_LOOKUP_OF("pixel_src", pixel_clk_src_8974,
-						"fd8c0000.qcom,mmsscc-mdss"),
-	CLK_LOOKUP_OF("byte_src", byte_clk_src_8974,
-						"fd8c0000.qcom,mmsscc-mdss"),
-};
-
-int dsi_pll_clock_register_hpm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int rc;
-
-	/* Set client data to mux, div and vco clocks */
-	byte_clk_src_8974.priv = pll_res;
-	pixel_clk_src_8974.priv = pll_res;
-	byte_mux_8974.priv = pll_res;
-	indirect_path_div2_clk_8974.priv = pll_res;
-	analog_postdiv_clk_8974.priv = pll_res;
-	dsi_vco_clk_8974.priv = pll_res;
-	pll_res->vco_delay = VCO_DELAY_USEC;
-
-	/* Set clock source operations */
-	pixel_clk_src_ops = clk_ops_slave_div;
-	pixel_clk_src_ops.prepare = dsi_pll_div_prepare;
-
-	analog_postdiv_clk_ops = clk_ops_div;
-	analog_postdiv_clk_ops.prepare = dsi_pll_div_prepare;
-
-	byte_clk_src_ops = clk_ops_div;
-	byte_clk_src_ops.prepare = dsi_pll_div_prepare;
-
-	byte_mux_clk_ops = clk_ops_gen_mux;
-	byte_mux_clk_ops.prepare = dsi_pll_mux_prepare;
-
-	if (pll_res->target_id == MDSS_PLL_TARGET_8974) {
-		rc = of_msm_clock_register(pdev->dev.of_node,
-			mdss_dsi_pllcc_8974, ARRAY_SIZE(mdss_dsi_pllcc_8974));
-		if (rc) {
-			pr_err("Clock register failed\n");
-			rc = -EPROBE_DEFER;
-		}
-	} else {
-		pr_err("Invalid target ID\n");
-		rc = -EINVAL;
-	}
-
-	if (!rc)
-		pr_info("Registered DSI PLL clocks successfully\n");
-
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c
deleted file mode 100644
index 68e9edf..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c
+++ /dev/null
@@ -1,545 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <dt-bindings/clock/mdss-28nm-pll-clk.h>
-
-#include "mdss-pll.h"
-#include "mdss-dsi-pll.h"
-#include "mdss-dsi-pll-28nm.h"
-
-#define VCO_DELAY_USEC			1000
-
-enum {
-	DSI_PLL_0,
-	DSI_PLL_1,
-	DSI_PLL_MAX
-};
-
-static struct lpfr_cfg lpfr_lut_struct[] = {
-	{479500000, 8},
-	{480000000, 11},
-	{575500000, 8},
-	{576000000, 12},
-	{610500000, 8},
-	{659500000, 9},
-	{671500000, 10},
-	{672000000, 14},
-	{708500000, 10},
-	{750000000, 11},
-};
-
-static void dsi_pll_sw_reset(struct mdss_pll_resources *rsc)
-{
-	/*
-	 * DSI PLL software reset. Add HW recommended delays after toggling
-	 * the software reset bit off and back on.
-	 */
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x01);
-	ndelay(500);
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x00);
-}
-
-static void dsi_pll_toggle_lock_detect(
-				struct mdss_pll_resources *rsc)
-{
-	/* DSI PLL toggle lock detect setting */
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x04);
-	ndelay(500);
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x05);
-	udelay(512);
-}
-
-static int dsi_pll_check_lock_status(
-				struct mdss_pll_resources *rsc)
-{
-	int rc = 0;
-
-	rc = dsi_pll_lock_status(rsc);
-	if (rc)
-		pr_debug("PLL Locked\n");
-	else
-		pr_err("PLL failed to lock\n");
-
-	return rc;
-}
-
-
-static int dsi_pll_enable_seq_gf2(struct mdss_pll_resources *rsc)
-{
-	int pll_locked = 0;
-
-	dsi_pll_sw_reset(rsc);
-
-	/*
-	 * GF PART 2 PLL power up sequence.
-	 * Add necessary delays recommended by hardware.
-	 */
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x04);
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
-	udelay(3);
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
-	udelay(500);
-
-	dsi_pll_toggle_lock_detect(rsc);
-
-	pll_locked = dsi_pll_check_lock_status(rsc);
-	return pll_locked ? 0 : -EINVAL;
-}
-
-static int dsi_pll_enable_seq_gf1(struct mdss_pll_resources *rsc)
-{
-	int pll_locked = 0;
-
-	dsi_pll_sw_reset(rsc);
-	/*
-	 * GF PART 1 PLL power up sequence.
-	 * Add necessary delays recommended by hardware.
-	 */
-
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x14);
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
-	udelay(3);
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
-	udelay(500);
-
-	dsi_pll_toggle_lock_detect(rsc);
-
-	pll_locked = dsi_pll_check_lock_status(rsc);
-	return pll_locked ? 0 : -EINVAL;
-}
-
-static int dsi_pll_enable_seq_tsmc(struct mdss_pll_resources *rsc)
-{
-	int pll_locked = 0;
-
-	dsi_pll_sw_reset(rsc);
-	/*
-	 * TSMC PLL power up sequence.
-	 * Add necessary delays recommended by hardware.
-	 */
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x34);
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
-	udelay(500);
-
-	dsi_pll_toggle_lock_detect(rsc);
-
-	pll_locked = dsi_pll_check_lock_status(rsc);
-	return pll_locked ? 0 : -EINVAL;
-}
-
-static struct regmap_config dsi_pll_28lpm_config = {
-	.reg_bits = 32,
-	.reg_stride = 4,
-	.val_bits = 32,
-	.max_register = 0xF4,
-};
-
-static struct regmap_bus analog_postdiv_regmap_bus = {
-	.reg_write = analog_postdiv_reg_write,
-	.reg_read = analog_postdiv_reg_read,
-};
-
-static struct regmap_bus byteclk_src_mux_regmap_bus = {
-	.reg_write = byteclk_mux_write_sel,
-	.reg_read = byteclk_mux_read_sel,
-};
-
-static struct regmap_bus pclk_src_regmap_bus = {
-	.reg_write = pixel_clk_set_div,
-	.reg_read = pixel_clk_get_div,
-};
-
-static const struct clk_ops clk_ops_vco_28lpm = {
-	.recalc_rate = vco_28nm_recalc_rate,
-	.set_rate = vco_28nm_set_rate,
-	.round_rate = vco_28nm_round_rate,
-	.prepare = vco_28nm_prepare,
-	.unprepare = vco_28nm_unprepare,
-};
-
-static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
-	.ref_clk_rate = 19200000UL,
-	.min_rate = 350000000UL,
-	.max_rate = 750000000UL,
-	.pll_en_seq_cnt = 9,
-	.pll_enable_seqs[0] = dsi_pll_enable_seq_tsmc,
-	.pll_enable_seqs[1] = dsi_pll_enable_seq_tsmc,
-	.pll_enable_seqs[2] = dsi_pll_enable_seq_tsmc,
-	.pll_enable_seqs[3] = dsi_pll_enable_seq_gf1,
-	.pll_enable_seqs[4] = dsi_pll_enable_seq_gf1,
-	.pll_enable_seqs[5] = dsi_pll_enable_seq_gf1,
-	.pll_enable_seqs[6] = dsi_pll_enable_seq_gf2,
-	.pll_enable_seqs[7] = dsi_pll_enable_seq_gf2,
-	.pll_enable_seqs[8] = dsi_pll_enable_seq_gf2,
-	.lpfr_lut_size = 10,
-	.lpfr_lut = lpfr_lut_struct,
-	.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_vco_clk",
-			.parent_names = (const char *[]){"cxo"},
-			.num_parents = 1,
-			.ops = &clk_ops_vco_28lpm,
-			.flags = CLK_GET_RATE_NOCACHE,
-	},
-};
-
-static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
-	.ref_clk_rate = 19200000UL,
-	.min_rate = 350000000UL,
-	.max_rate = 750000000UL,
-	.pll_en_seq_cnt = 9,
-	.pll_enable_seqs[0] = dsi_pll_enable_seq_tsmc,
-	.pll_enable_seqs[1] = dsi_pll_enable_seq_tsmc,
-	.pll_enable_seqs[2] = dsi_pll_enable_seq_tsmc,
-	.pll_enable_seqs[3] = dsi_pll_enable_seq_gf1,
-	.pll_enable_seqs[4] = dsi_pll_enable_seq_gf1,
-	.pll_enable_seqs[5] = dsi_pll_enable_seq_gf1,
-	.pll_enable_seqs[6] = dsi_pll_enable_seq_gf2,
-	.pll_enable_seqs[7] = dsi_pll_enable_seq_gf2,
-	.pll_enable_seqs[8] = dsi_pll_enable_seq_gf2,
-	.lpfr_lut_size = 10,
-	.lpfr_lut = lpfr_lut_struct,
-	.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_vco_clk",
-			.parent_names = (const char *[]){"cxo"},
-			.num_parents = 1,
-			.ops = &clk_ops_vco_28lpm,
-			.flags = CLK_GET_RATE_NOCACHE,
-	},
-};
-
-static struct clk_regmap_div dsi0pll_analog_postdiv = {
-	.reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG,
-	.shift = 0,
-	.width = 4,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_analog_postdiv",
-			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi1pll_analog_postdiv = {
-	.reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG,
-	.shift = 0,
-	.width = 4,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_analog_postdiv",
-			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_fixed_factor dsi0pll_indirect_path_src = {
-	.div = 2,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi0pll_indirect_path_src",
-		.parent_names = (const char *[]){"dsi0pll_analog_postdiv"},
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi1pll_indirect_path_src = {
-	.div = 2,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi1pll_indirect_path_src",
-		.parent_names = (const char *[]){"dsi1pll_analog_postdiv"},
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_regmap_mux dsi0pll_byteclk_src_mux = {
-	.reg = DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG,
-	.shift = 1,
-	.width = 1,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_byteclk_src_mux",
-			.parent_names = (const char *[]){
-				"dsi0pll_vco_clk",
-				"dsi0pll_indirect_path_src"},
-			.num_parents = 2,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_regmap_mux dsi1pll_byteclk_src_mux = {
-	.reg = DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG,
-	.shift = 1,
-	.width = 1,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_byteclk_src_mux",
-			.parent_names = (const char *[]){
-				"dsi1pll_vco_clk",
-				"dsi1pll_indirect_path_src"},
-			.num_parents = 2,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_fixed_factor dsi0pll_byteclk_src = {
-	.div = 4,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi0pll_byteclk_src",
-		.parent_names = (const char *[]){
-				"dsi0pll_byteclk_src_mux"},
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi1pll_byteclk_src = {
-	.div = 4,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi1pll_byteclk_src",
-		.parent_names = (const char *[]){
-				"dsi1pll_byteclk_src_mux"},
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_regmap_div dsi0pll_pclk_src = {
-	.reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG,
-	.shift = 0,
-	.width = 8,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_pclk_src",
-			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
-			.num_parents = 1,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi1pll_pclk_src = {
-	.reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG,
-	.shift = 0,
-	.width = 8,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_pclk_src",
-			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
-			.num_parents = 1,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_hw *mdss_dsi_pllcc_28lpm[] = {
-	[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
-	[ANALOG_POSTDIV_0_CLK] = &dsi0pll_analog_postdiv.clkr.hw,
-	[INDIRECT_PATH_SRC_0_CLK] = &dsi0pll_indirect_path_src.hw,
-	[BYTECLK_SRC_MUX_0_CLK] = &dsi0pll_byteclk_src_mux.clkr.hw,
-	[BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
-	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
-	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
-	[ANALOG_POSTDIV_1_CLK] = &dsi1pll_analog_postdiv.clkr.hw,
-	[INDIRECT_PATH_SRC_1_CLK] = &dsi1pll_indirect_path_src.hw,
-	[BYTECLK_SRC_MUX_1_CLK] = &dsi1pll_byteclk_src_mux.clkr.hw,
-	[BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
-	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
-};
-
-int dsi_pll_clock_register_28lpm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int rc = 0, ndx, i;
-	struct clk *clk;
-	struct clk_onecell_data *clk_data;
-	int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_28lpm);
-	struct regmap *rmap;
-
-	int const ssc_freq_min = 30000; /* min. recommended freq. value */
-	int const ssc_freq_max = 33000; /* max. recommended freq. value */
-	int const ssc_ppm_max = 5000; /* max. recommended ppm */
-
-	ndx = pll_res->index;
-
-	if (ndx >= DSI_PLL_MAX) {
-		pr_err("pll index(%d) NOT supported\n", ndx);
-		return -EINVAL;
-	}
-
-	pll_res->vco_delay = VCO_DELAY_USEC;
-
-	if (pll_res->ssc_en) {
-		if (!pll_res->ssc_freq || (pll_res->ssc_freq < ssc_freq_min) ||
-			(pll_res->ssc_freq > ssc_freq_max)) {
-			pll_res->ssc_freq = ssc_freq_min;
-			pr_debug("SSC frequency out of recommended range. Set to default=%d\n",
-				pll_res->ssc_freq);
-		}
-
-		if (!pll_res->ssc_ppm || (pll_res->ssc_ppm > ssc_ppm_max)) {
-			pll_res->ssc_ppm = ssc_ppm_max;
-			pr_debug("SSC PPM out of recommended range. Set to default=%d\n",
-				pll_res->ssc_ppm);
-		}
-	}
-
-	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data),
-					GFP_KERNEL);
-	if (!clk_data)
-		return -ENOMEM;
-
-	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
-				sizeof(struct clk *), GFP_KERNEL);
-	if (!clk_data->clks)
-		return -ENOMEM;
-
-	clk_data->clk_num = num_clks;
-
-	/* Establish client data */
-	if (ndx == 0) {
-		rmap = devm_regmap_init(&pdev->dev, &byteclk_src_mux_regmap_bus,
-				pll_res, &dsi_pll_28lpm_config);
-		if (IS_ERR(rmap)) {
-			pr_err("regmap init failed for DSI clock:%d\n",
-					pll_res->index);
-			return -EINVAL;
-		}
-		dsi0pll_byteclk_src_mux.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &analog_postdiv_regmap_bus,
-				pll_res, &dsi_pll_28lpm_config);
-		if (IS_ERR(rmap)) {
-			pr_err("regmap init failed for DSI clock:%d\n",
-					pll_res->index);
-			return -EINVAL;
-		}
-		dsi0pll_analog_postdiv.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
-				pll_res, &dsi_pll_28lpm_config);
-		if (IS_ERR(rmap)) {
-			pr_err("regmap init failed for DSI clock:%d\n",
-					pll_res->index);
-			return -EINVAL;
-		}
-		dsi0pll_pclk_src.clkr.regmap = rmap;
-
-		dsi0pll_vco_clk.priv = pll_res;
-		for (i = VCO_CLK_0; i <= PCLK_SRC_0_CLK; i++) {
-			clk = devm_clk_register(&pdev->dev,
-						mdss_dsi_pllcc_28lpm[i]);
-			if (IS_ERR(clk)) {
-				pr_err("clk registration failed for DSI clock:%d\n",
-							pll_res->index);
-				rc = -EINVAL;
-				goto clk_register_fail;
-			}
-			clk_data->clks[i] = clk;
-
-		}
-
-		rc = of_clk_add_provider(pdev->dev.of_node,
-				of_clk_src_onecell_get, clk_data);
-
-	} else {
-		rmap = devm_regmap_init(&pdev->dev, &byteclk_src_mux_regmap_bus,
-				pll_res, &dsi_pll_28lpm_config);
-		if (IS_ERR(rmap)) {
-			pr_err("regmap init failed for DSI clock:%d\n",
-					pll_res->index);
-			return -EINVAL;
-		}
-		dsi1pll_byteclk_src_mux.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &analog_postdiv_regmap_bus,
-				pll_res, &dsi_pll_28lpm_config);
-		if (IS_ERR(rmap)) {
-			pr_err("regmap init failed for DSI clock:%d\n",
-					pll_res->index);
-			return -EINVAL;
-		}
-		dsi1pll_analog_postdiv.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
-				pll_res, &dsi_pll_28lpm_config);
-		if (IS_ERR(rmap)) {
-			pr_err("regmap init failed for DSI clock:%d\n",
-					pll_res->index);
-			return -EINVAL;
-		}
-		dsi1pll_pclk_src.clkr.regmap = rmap;
-
-		dsi1pll_vco_clk.priv = pll_res;
-		for (i = VCO_CLK_1; i <= PCLK_SRC_1_CLK; i++) {
-			clk = devm_clk_register(&pdev->dev,
-						mdss_dsi_pllcc_28lpm[i]);
-			if (IS_ERR(clk)) {
-				pr_err("clk registration failed for DSI clock:%d\n",
-							pll_res->index);
-				rc = -EINVAL;
-				goto clk_register_fail;
-			}
-			clk_data->clks[i] = clk;
-
-		}
-
-		rc = of_clk_add_provider(pdev->dev.of_node,
-				of_clk_src_onecell_get, clk_data);
-	}
-	if (!rc) {
-		pr_info("Registered DSI PLL ndx=%d, clocks successfully\n",
-				ndx);
-
-		return rc;
-	}
-
-clk_register_fail:
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm-util.c
deleted file mode 100644
index f168f637..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm-util.c
+++ /dev/null
@@ -1,652 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-
-#include "mdss-pll.h"
-#include "mdss-dsi-pll.h"
-#include "mdss-dsi-pll-28nm.h"
-
-#define DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG	(0x0)
-#define DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG	(0x0008)
-#define DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG	(0x000C)
-#define DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG	(0x0014)
-#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG	(0x0024)
-#define DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG		(0x002C)
-#define DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG	(0x0030)
-#define DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG	(0x0034)
-#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0		(0x0038)
-#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1		(0x003C)
-#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2		(0x0040)
-#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3		(0x0044)
-#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4		(0x0048)
-#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG0		(0x004C)
-#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG1		(0x0050)
-#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG2		(0x0054)
-#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG3		(0x0058)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0		(0x006C)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG2		(0x0074)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3		(0x0078)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4		(0x007C)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG5		(0x0080)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6		(0x0084)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7		(0x0088)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8		(0x008C)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9		(0x0090)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10	(0x0094)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11	(0x0098)
-#define DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG	(0x009C)
-#define DSI_PHY_PLL_UNIPHY_PLL_STATUS		(0x00C0)
-
-#define DSI_PLL_POLL_DELAY_US			50
-#define DSI_PLL_POLL_TIMEOUT_US			500
-
-int analog_postdiv_reg_read(void *context, unsigned int reg,
-		unsigned int *div)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	*div = MDSS_PLL_REG_R(rsc->pll_base, reg);
-
-	pr_debug("analog_postdiv div = %d\n", *div);
-
-	(void)mdss_pll_resource_enable(rsc, false);
-	return rc;
-}
-
-int analog_postdiv_reg_write(void *context, unsigned int reg,
-		unsigned int div)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	pr_debug("analog_postdiv div = %d\n", div);
-
-	MDSS_PLL_REG_W(rsc->pll_base, reg, div);
-
-	(void)mdss_pll_resource_enable(rsc, false);
-	return rc;
-}
-
-int byteclk_mux_read_sel(void *context, unsigned int reg,
-		unsigned int *val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	*val = (MDSS_PLL_REG_R(rsc->pll_base, reg) & BIT(1));
-	pr_debug("byteclk mux mode = %s\n", *val ? "indirect" : "direct");
-
-	(void)mdss_pll_resource_enable(rsc, false);
-	return rc;
-}
-
-int byteclk_mux_write_sel(void *context, unsigned int reg,
-		unsigned int val)
-{
-	int rc = 0;
-	u32 reg_val = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	pr_debug("byteclk mux set to %s mode\n", val ? "indirect" : "direct");
-
-	reg_val = MDSS_PLL_REG_R(rsc->pll_base, reg);
-	reg_val &= ~0x02;
-	reg_val |= val;
-
-	MDSS_PLL_REG_W(rsc->pll_base, reg, reg_val);
-
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	return rc;
-}
-
-int pixel_clk_get_div(void *context, unsigned int reg,
-		unsigned int *div)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	*div = MDSS_PLL_REG_R(rsc->pll_base, reg);
-
-	pr_debug("pclk_src div = %d\n", *div);
-
-	(void)mdss_pll_resource_enable(rsc, false);
-	return rc;
-}
-
-int pixel_clk_set_div(void *context, unsigned int reg,
-		unsigned int div)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	pr_debug("pclk_src div = %d\n", div);
-
-	MDSS_PLL_REG_W(rsc->pll_base, reg, div);
-
-	(void)mdss_pll_resource_enable(rsc, false);
-	return rc;
-}
-
-int dsi_pll_lock_status(struct mdss_pll_resources *rsc)
-{
-	u32 status;
-	int pll_locked;
-
-	/* poll for PLL ready status */
-	if (readl_poll_timeout_atomic((rsc->pll_base +
-			DSI_PHY_PLL_UNIPHY_PLL_STATUS),
-			status,
-			((status & BIT(0)) == 1),
-			DSI_PLL_POLL_DELAY_US,
-			DSI_PLL_POLL_TIMEOUT_US)) {
-		pr_debug("DSI PLL status=%x failed to Lock\n", status);
-		pll_locked = 0;
-	} else {
-		pll_locked = 1;
-	}
-
-	return pll_locked;
-}
-
-static int pll_28nm_vco_rate_calc(struct dsi_pll_vco_clk *vco,
-		struct mdss_dsi_vco_calc *vco_calc, unsigned long vco_clk_rate)
-{
-	s32 rem;
-	s64 frac_n_mode, ref_doubler_en_b;
-	s64 ref_clk_to_pll, div_fb, frac_n_value;
-	int i;
-
-	/* Configure the Loop filter resistance */
-	for (i = 0; i < vco->lpfr_lut_size; i++)
-		if (vco_clk_rate <= vco->lpfr_lut[i].vco_rate)
-			break;
-	if (i == vco->lpfr_lut_size) {
-		pr_err("unable to get loop filter resistance. vco=%ld\n",
-			vco_clk_rate);
-		return -EINVAL;
-	}
-	vco_calc->lpfr_lut_res = vco->lpfr_lut[i].r;
-
-	div_s64_rem(vco_clk_rate, vco->ref_clk_rate, &rem);
-	if (rem) {
-		vco_calc->refclk_cfg = 0x1;
-		frac_n_mode = 1;
-		ref_doubler_en_b = 0;
-	} else {
-		vco_calc->refclk_cfg = 0x0;
-		frac_n_mode = 0;
-		ref_doubler_en_b = 1;
-	}
-
-	pr_debug("refclk_cfg = %lld\n", vco_calc->refclk_cfg);
-
-	ref_clk_to_pll = ((vco->ref_clk_rate * 2 * (vco_calc->refclk_cfg))
-			  + (ref_doubler_en_b * vco->ref_clk_rate));
-
-	div_fb = div_s64_rem(vco_clk_rate, ref_clk_to_pll, &rem);
-	frac_n_value = div_s64(((s64)rem * (1 << 16)), ref_clk_to_pll);
-	vco_calc->gen_vco_clk = vco_clk_rate;
-
-	pr_debug("ref_clk_to_pll = %lld\n", ref_clk_to_pll);
-	pr_debug("div_fb = %lld\n", div_fb);
-	pr_debug("frac_n_value = %lld\n", frac_n_value);
-
-	pr_debug("Generated VCO Clock: %lld\n", vco_calc->gen_vco_clk);
-	rem = 0;
-	if (frac_n_mode) {
-		vco_calc->sdm_cfg0 = 0;
-		vco_calc->sdm_cfg1 = (div_fb & 0x3f) - 1;
-		vco_calc->sdm_cfg3 = div_s64_rem(frac_n_value, 256, &rem);
-		vco_calc->sdm_cfg2 = rem;
-	} else {
-		vco_calc->sdm_cfg0 = (0x1 << 5);
-		vco_calc->sdm_cfg0 |= (div_fb & 0x3f) - 1;
-		vco_calc->sdm_cfg1 = 0;
-		vco_calc->sdm_cfg2 = 0;
-		vco_calc->sdm_cfg3 = 0;
-	}
-
-	pr_debug("sdm_cfg0=%lld\n", vco_calc->sdm_cfg0);
-	pr_debug("sdm_cfg1=%lld\n", vco_calc->sdm_cfg1);
-	pr_debug("sdm_cfg2=%lld\n", vco_calc->sdm_cfg2);
-	pr_debug("sdm_cfg3=%lld\n", vco_calc->sdm_cfg3);
-
-	vco_calc->cal_cfg11 = div_s64_rem(vco_calc->gen_vco_clk,
-			256 * 1000000, &rem);
-	vco_calc->cal_cfg10 = rem / 1000000;
-	pr_debug("cal_cfg10=%lld, cal_cfg11=%lld\n",
-		vco_calc->cal_cfg10, vco_calc->cal_cfg11);
-
-	return 0;
-}
-
-static void pll_28nm_ssc_param_calc(struct dsi_pll_vco_clk *vco,
-		struct mdss_dsi_vco_calc *vco_calc)
-{
-	struct mdss_pll_resources *rsc = vco->priv;
-	s64 ppm_freq, incr, spread_freq, div_rf, frac_n_value;
-	s32 rem;
-
-	if (!rsc->ssc_en) {
-		pr_debug("DSI PLL SSC not enabled\n");
-		return;
-	}
-
-	vco_calc->ssc.kdiv = DIV_ROUND_CLOSEST(vco->ref_clk_rate,
-			1000000) - 1;
-	vco_calc->ssc.triang_steps = DIV_ROUND_CLOSEST(vco->ref_clk_rate,
-			rsc->ssc_freq * (vco_calc->ssc.kdiv + 1));
-	ppm_freq = div_s64(vco_calc->gen_vco_clk * rsc->ssc_ppm,
-			1000000);
-	incr = div64_s64(ppm_freq * 65536, vco->ref_clk_rate * 2 *
-			vco_calc->ssc.triang_steps);
-
-	vco_calc->ssc.triang_inc_7_0 = incr & 0xff;
-	vco_calc->ssc.triang_inc_9_8 = (incr >> 8) & 0x3;
-
-	if (!rsc->ssc_center)
-		spread_freq = vco_calc->gen_vco_clk - ppm_freq;
-	else
-		spread_freq = vco_calc->gen_vco_clk - (ppm_freq / 2);
-
-	div_rf = div_s64(spread_freq, 2 * vco->ref_clk_rate);
-	vco_calc->ssc.dc_offset = (div_rf - 1);
-
-	div_s64_rem(spread_freq, 2 * vco->ref_clk_rate, &rem);
-	frac_n_value = div_s64((s64)rem * 65536, 2 * vco->ref_clk_rate);
-
-	vco_calc->ssc.freq_seed_7_0 = frac_n_value & 0xff;
-	vco_calc->ssc.freq_seed_15_8 = (frac_n_value >> 8) & 0xff;
-}
-
-static void pll_28nm_vco_config(struct dsi_pll_vco_clk *vco,
-		struct mdss_dsi_vco_calc *vco_calc)
-{
-	struct mdss_pll_resources *rsc = vco->priv;
-	void __iomem *pll_base = rsc->pll_base;
-	u32 vco_delay_us = rsc->vco_delay;
-	bool ssc_en = rsc->ssc_en;
-
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG,
-		vco_calc->lpfr_lut_res);
-
-	/* Loop filter capacitance values : c1 and c2 */
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG, 0x70);
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG, 0x15);
-
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG, 0x02);
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3, 0x2b);
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4, 0x66);
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d);
-
-	if (!ssc_en) {
-		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1,
-			(u32)(vco_calc->sdm_cfg1 & 0xff));
-		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2,
-			(u32)(vco_calc->sdm_cfg2 & 0xff));
-		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3,
-			(u32)(vco_calc->sdm_cfg3 & 0xff));
-	} else {
-		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1,
-			(u32)vco_calc->ssc.dc_offset);
-		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2,
-			(u32)vco_calc->ssc.freq_seed_7_0);
-		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3,
-			(u32)vco_calc->ssc.freq_seed_15_8);
-		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG0,
-			(u32)vco_calc->ssc.kdiv);
-		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG1,
-			(u32)vco_calc->ssc.triang_inc_7_0);
-		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG2,
-			(u32)vco_calc->ssc.triang_inc_9_8);
-		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG3,
-			(u32)vco_calc->ssc.triang_steps);
-	}
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4, 0x00);
-
-	/* Add hardware recommended delay for correct PLL configuration */
-	if (vco_delay_us)
-		udelay(vco_delay_us);
-
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG,
-		(u32)vco_calc->refclk_cfg);
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG, 0x00);
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG, 0x71);
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0,
-		(u32)vco_calc->sdm_cfg0);
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0, 0x12);
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6, 0x30);
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7, 0x00);
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8, 0x60);
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9, 0x00);
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10,
-		(u32)(vco_calc->cal_cfg10 & 0xff));
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11,
-		(u32)(vco_calc->cal_cfg11 & 0xff));
-	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG, 0x20);
-	MDSS_PLL_REG_W(pll_base,
-		DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG, 0x3); /* Fixed div-4 */
-}
-
-static int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate)
-{
-	struct mdss_dsi_vco_calc vco_calc = {0};
-	int rc = 0;
-
-	rc = pll_28nm_vco_rate_calc(vco, &vco_calc, rate);
-	if (rc) {
-		pr_err("vco rate calculation failed\n");
-		return rc;
-	}
-
-	pll_28nm_ssc_param_calc(vco, &vco_calc);
-	pll_28nm_vco_config(vco, &vco_calc);
-
-	return 0;
-}
-
-static unsigned long vco_get_rate(struct dsi_pll_vco_clk *vco)
-{
-	struct mdss_pll_resources *rsc = vco->priv;
-	int rc;
-	u32 sdm0, doubler, sdm_byp_div;
-	u64 vco_rate;
-	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
-	u64 ref_clk = vco->ref_clk_rate;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	/* Check to see if the ref clk doubler is enabled */
-	doubler = MDSS_PLL_REG_R(rsc->pll_base,
-				 DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG) & BIT(0);
-	ref_clk += (doubler * vco->ref_clk_rate);
-
-	/* see if it is integer mode or sdm mode */
-	sdm0 = MDSS_PLL_REG_R(rsc->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0);
-	if (sdm0 & BIT(6)) {
-		/* integer mode */
-		sdm_byp_div = (MDSS_PLL_REG_R(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0) & 0x3f) + 1;
-		vco_rate = ref_clk * sdm_byp_div;
-	} else {
-		/* sdm mode */
-		sdm_dc_off = MDSS_PLL_REG_R(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1) & 0xFF;
-		pr_debug("sdm_dc_off = %d\n", sdm_dc_off);
-		sdm2 = MDSS_PLL_REG_R(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2) & 0xFF;
-		sdm3 = MDSS_PLL_REG_R(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3) & 0xFF;
-		sdm_freq_seed = (sdm3 << 8) | sdm2;
-		pr_debug("sdm_freq_seed = %d\n", sdm_freq_seed);
-
-		vco_rate = (ref_clk * (sdm_dc_off + 1)) +
-			mult_frac(ref_clk, sdm_freq_seed, BIT(16));
-		pr_debug("vco rate = %lld\n", vco_rate);
-	}
-
-	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
-
-	mdss_pll_resource_enable(rsc, false);
-
-	return (unsigned long)vco_rate;
-}
-
-static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
-{
-	int i, rc;
-	struct mdss_pll_resources *rsc = vco->priv;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("failed to enable dsi pll(%d) resources\n",
-				rsc->index);
-		return rc;
-	}
-
-	/* Try all enable sequences until one succeeds */
-	for (i = 0; i < vco->pll_en_seq_cnt; i++) {
-		rc = vco->pll_enable_seqs[i](rsc);
-		pr_debug("DSI PLL %s after sequence #%d\n",
-			rc ? "unlocked" : "locked", i + 1);
-		if (!rc)
-			break;
-	}
-
-	if (rc) {
-		mdss_pll_resource_enable(rsc, false);
-		pr_err("DSI PLL failed to lock\n");
-	}
-	rsc->pll_on = true;
-
-	return rc;
-}
-
-static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
-{
-	struct mdss_pll_resources *rsc = vco->priv;
-
-	if (!rsc->pll_on &&
-		mdss_pll_resource_enable(rsc, true)) {
-		pr_err("failed to enable dsi pll(%d) resources\n",
-				rsc->index);
-		return;
-	}
-
-	rsc->handoff_resources = false;
-
-	MDSS_PLL_REG_W(rsc->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x00);
-
-	mdss_pll_resource_enable(rsc, false);
-	rsc->pll_on = false;
-
-	pr_debug("DSI PLL Disabled\n");
-}
-
-int vco_28nm_set_rate(struct clk_hw *hw, unsigned long rate,
-				unsigned long parent_rate)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *rsc = vco->priv;
-	int rc;
-
-	if (!rsc) {
-		pr_err("pll resource not found\n");
-		return -EINVAL;
-	}
-
-	if (rsc->pll_on)
-		return 0;
-
-	pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
-		       rsc->index, rc);
-		return rc;
-	}
-
-	/*
-	 * DSI PLL software reset. Add HW recommended delays after toggling
-	 * the software reset bit off and back on.
-	 */
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x01);
-	udelay(1000);
-	MDSS_PLL_REG_W(rsc->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x00);
-	udelay(1000);
-
-	rc = vco_set_rate(vco, rate);
-	rsc->vco_current_rate = rate;
-
-	mdss_pll_resource_enable(rsc, false);
-
-	return 0;
-}
-
-long vco_28nm_round_rate(struct clk_hw *hw, unsigned long rate,
-				unsigned long *parent_rate)
-{
-	unsigned long rrate = rate;
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-
-	if (rate < vco->min_rate)
-		rrate = vco->min_rate;
-	if (rate > vco->max_rate)
-		rrate = vco->max_rate;
-
-	*parent_rate = rrate;
-
-	return rrate;
-}
-
-unsigned long vco_28nm_recalc_rate(struct clk_hw *hw,
-					unsigned long parent_rate)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *rsc = vco->priv;
-	int rc;
-	u64 vco_rate = 0;
-
-	if (!rsc) {
-		pr_err("dsi pll resources not available\n");
-		return 0;
-	}
-
-	if (rsc->vco_current_rate)
-		return (unsigned long)rsc->vco_current_rate;
-
-	if (is_gdsc_disabled(rsc))
-		return 0;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("failed to enable dsi pll(%d) resources\n",
-				rsc->index);
-		return 0;
-	}
-
-	if (dsi_pll_lock_status(rsc)) {
-		rsc->handoff_resources = true;
-		rsc->pll_on = true;
-		vco_rate = vco_get_rate(vco);
-	} else {
-		mdss_pll_resource_enable(rsc, false);
-	}
-
-	return (unsigned long)vco_rate;
-}
-
-int vco_28nm_prepare(struct clk_hw *hw)
-{
-	int rc = 0;
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *rsc = vco->priv;
-
-	if (!rsc) {
-		pr_err("dsi pll resources not available\n");
-		return -EINVAL;
-	}
-
-	if ((rsc->vco_cached_rate != 0)
-	    && (rsc->vco_cached_rate == clk_hw_get_rate(hw))) {
-		rc = hw->init->ops->set_rate(hw, rsc->vco_cached_rate,
-				rsc->vco_cached_rate);
-		if (rc) {
-			pr_err("pll(%d ) set_rate failed. rc=%d\n",
-					rsc->index, rc);
-			goto error;
-		}
-
-		MDSS_PLL_REG_W(rsc->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG,
-				rsc->cached_postdiv1);
-		MDSS_PLL_REG_W(rsc->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG,
-				rsc->cached_postdiv3);
-		MDSS_PLL_REG_W(rsc->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG,
-				rsc->cached_vreg_cfg);
-	}
-
-	rc = dsi_pll_enable(vco);
-
-error:
-	return rc;
-}
-
-void vco_28nm_unprepare(struct clk_hw *hw)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *rsc = vco->priv;
-
-	if (!rsc) {
-		pr_err("dsi pll resources not available\n");
-		return;
-	}
-
-	rsc->cached_postdiv1 = MDSS_PLL_REG_R(rsc->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG);
-	rsc->cached_postdiv3 = MDSS_PLL_REG_R(rsc->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG);
-	rsc->cached_vreg_cfg = MDSS_PLL_REG_R(rsc->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG);
-
-	rsc->vco_cached_rate = clk_hw_get_rate(hw);
-
-	dsi_pll_disable(vco);
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm.h b/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm.h
deleted file mode 100644
index fc03f36..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __MDSS_DSI_PLL_28NM_H
-#define __MDSS_DSI_PLL_28NM_H
-
-#define DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG          (0x0020)
-#define DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2       (0x0064)
-#define DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG         (0x0068)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1         (0x0070)
-
-#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG	(0x0004)
-#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG	(0x0028)
-#define DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG		(0x0010)
-
-struct ssc_params {
-	s32 kdiv;
-	s64 triang_inc_7_0;
-	s64 triang_inc_9_8;
-	s64 triang_steps;
-	s64 dc_offset;
-	s64 freq_seed_7_0;
-	s64 freq_seed_15_8;
-};
-
-struct mdss_dsi_vco_calc {
-	s64 sdm_cfg0;
-	s64 sdm_cfg1;
-	s64 sdm_cfg2;
-	s64 sdm_cfg3;
-	s64 cal_cfg10;
-	s64 cal_cfg11;
-	s64 refclk_cfg;
-	s64 gen_vco_clk;
-	u32 lpfr_lut_res;
-	struct ssc_params ssc;
-};
-
-unsigned long vco_28nm_recalc_rate(struct clk_hw *hw,
-				unsigned long parent_rate);
-int vco_28nm_set_rate(struct clk_hw *hw, unsigned long rate,
-				unsigned long parent_rate);
-long vco_28nm_round_rate(struct clk_hw *hw, unsigned long rate,
-				unsigned long *parent_rate);
-int vco_28nm_prepare(struct clk_hw *hw);
-void vco_28nm_unprepare(struct clk_hw *hw);
-
-int analog_postdiv_reg_write(void *context,
-				unsigned int reg, unsigned int div);
-int analog_postdiv_reg_read(void *context,
-				unsigned int reg, unsigned int *div);
-int byteclk_mux_write_sel(void *context,
-				unsigned int reg, unsigned int val);
-int byteclk_mux_read_sel(void *context,
-				unsigned int reg, unsigned int *val);
-int pixel_clk_set_div(void *context,
-				unsigned int reg, unsigned int div);
-int pixel_clk_get_div(void *context,
-				unsigned int reg, unsigned int *div);
-
-int dsi_pll_lock_status(struct mdss_pll_resources *rsc);
-#endif /* __MDSS_DSI_PLL_28NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c
deleted file mode 100644
index e1e7abc..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c
+++ /dev/null
@@ -1,1942 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-#include "mdss-dsi-pll.h"
-#include "mdss-pll.h"
-#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
-
-#define VCO_DELAY_USEC 1
-
-#define MHZ_250		250000000UL
-#define MHZ_500		500000000UL
-#define MHZ_1000	1000000000UL
-#define MHZ_1100	1100000000UL
-#define MHZ_1900	1900000000UL
-#define MHZ_3000	3000000000UL
-
-/* Register Offsets from PLL base address */
-#define PLL_ANALOG_CONTROLS_ONE			0x0000
-#define PLL_ANALOG_CONTROLS_TWO			0x0004
-#define PLL_INT_LOOP_SETTINGS			0x0008
-#define PLL_INT_LOOP_SETTINGS_TWO		0x000C
-#define PLL_ANALOG_CONTROLS_THREE		0x0010
-#define PLL_ANALOG_CONTROLS_FOUR		0x0014
-#define PLL_ANALOG_CONTROLS_FIVE		0x0018
-#define PLL_INT_LOOP_CONTROLS			0x001C
-#define PLL_DSM_DIVIDER				0x0020
-#define PLL_FEEDBACK_DIVIDER			0x0024
-#define PLL_SYSTEM_MUXES			0x0028
-#define PLL_FREQ_UPDATE_CONTROL_OVERRIDES	0x002C
-#define PLL_CMODE				0x0030
-#define PLL_PSM_CTRL				0x0034
-#define PLL_RSM_CTRL				0x0038
-#define PLL_VCO_TUNE_MAP			0x003C
-#define PLL_PLL_CNTRL				0x0040
-#define PLL_CALIBRATION_SETTINGS		0x0044
-#define PLL_BAND_SEL_CAL_TIMER_LOW		0x0048
-#define PLL_BAND_SEL_CAL_TIMER_HIGH		0x004C
-#define PLL_BAND_SEL_CAL_SETTINGS		0x0050
-#define PLL_BAND_SEL_MIN			0x0054
-#define PLL_BAND_SEL_MAX			0x0058
-#define PLL_BAND_SEL_PFILT			0x005C
-#define PLL_BAND_SEL_IFILT			0x0060
-#define PLL_BAND_SEL_CAL_SETTINGS_TWO		0x0064
-#define PLL_BAND_SEL_CAL_SETTINGS_THREE		0x0068
-#define PLL_BAND_SEL_CAL_SETTINGS_FOUR		0x006C
-#define PLL_BAND_SEL_ICODE_HIGH			0x0070
-#define PLL_BAND_SEL_ICODE_LOW			0x0074
-#define PLL_FREQ_DETECT_SETTINGS_ONE		0x0078
-#define PLL_FREQ_DETECT_THRESH			0x007C
-#define PLL_FREQ_DET_REFCLK_HIGH		0x0080
-#define PLL_FREQ_DET_REFCLK_LOW			0x0084
-#define PLL_FREQ_DET_PLLCLK_HIGH		0x0088
-#define PLL_FREQ_DET_PLLCLK_LOW			0x008C
-#define PLL_PFILT				0x0090
-#define PLL_IFILT				0x0094
-#define PLL_PLL_GAIN				0x0098
-#define PLL_ICODE_LOW				0x009C
-#define PLL_ICODE_HIGH				0x00A0
-#define PLL_LOCKDET				0x00A4
-#define PLL_OUTDIV				0x00A8
-#define PLL_FASTLOCK_CONTROL			0x00AC
-#define PLL_PASS_OUT_OVERRIDE_ONE		0x00B0
-#define PLL_PASS_OUT_OVERRIDE_TWO		0x00B4
-#define PLL_CORE_OVERRIDE			0x00B8
-#define PLL_CORE_INPUT_OVERRIDE			0x00BC
-#define PLL_RATE_CHANGE				0x00C0
-#define PLL_PLL_DIGITAL_TIMERS			0x00C4
-#define PLL_PLL_DIGITAL_TIMERS_TWO		0x00C8
-#define PLL_DECIMAL_DIV_START			0x00CC
-#define PLL_FRAC_DIV_START_LOW			0x00D0
-#define PLL_FRAC_DIV_START_MID			0x00D4
-#define PLL_FRAC_DIV_START_HIGH			0x00D8
-#define PLL_DEC_FRAC_MUXES			0x00DC
-#define PLL_DECIMAL_DIV_START_1			0x00E0
-#define PLL_FRAC_DIV_START_LOW_1		0x00E4
-#define PLL_FRAC_DIV_START_MID_1		0x00E8
-#define PLL_FRAC_DIV_START_HIGH_1		0x00EC
-#define PLL_DECIMAL_DIV_START_2			0x00F0
-#define PLL_FRAC_DIV_START_LOW_2		0x00F4
-#define PLL_FRAC_DIV_START_MID_2		0x00F8
-#define PLL_FRAC_DIV_START_HIGH_2		0x00FC
-#define PLL_MASH_CONTROL			0x0100
-#define PLL_SSC_STEPSIZE_LOW			0x0104
-#define PLL_SSC_STEPSIZE_HIGH			0x0108
-#define PLL_SSC_DIV_PER_LOW			0x010C
-#define PLL_SSC_DIV_PER_HIGH			0x0110
-#define PLL_SSC_ADJPER_LOW			0x0114
-#define PLL_SSC_ADJPER_HIGH			0x0118
-#define PLL_SSC_MUX_CONTROL			0x011C
-#define PLL_SSC_STEPSIZE_LOW_1			0x0120
-#define PLL_SSC_STEPSIZE_HIGH_1			0x0124
-#define PLL_SSC_DIV_PER_LOW_1			0x0128
-#define PLL_SSC_DIV_PER_HIGH_1			0x012C
-#define PLL_SSC_ADJPER_LOW_1			0x0130
-#define PLL_SSC_ADJPER_HIGH_1			0x0134
-#define PLL_SSC_STEPSIZE_LOW_2			0x0138
-#define PLL_SSC_STEPSIZE_HIGH_2			0x013C
-#define PLL_SSC_DIV_PER_LOW_2			0x0140
-#define PLL_SSC_DIV_PER_HIGH_2			0x0144
-#define PLL_SSC_ADJPER_LOW_2			0x0148
-#define PLL_SSC_ADJPER_HIGH_2			0x014C
-#define PLL_SSC_CONTROL				0x0150
-#define PLL_PLL_OUTDIV_RATE			0x0154
-#define PLL_PLL_LOCKDET_RATE_1			0x0158
-#define PLL_PLL_LOCKDET_RATE_2			0x015C
-#define PLL_PLL_PROP_GAIN_RATE_1		0x0160
-#define PLL_PLL_PROP_GAIN_RATE_2		0x0164
-#define PLL_PLL_BAND_SEL_RATE_1			0x0168
-#define PLL_PLL_BAND_SEL_RATE_2			0x016C
-#define PLL_PLL_INT_GAIN_IFILT_BAND_1		0x0170
-#define PLL_PLL_INT_GAIN_IFILT_BAND_2		0x0174
-#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1	0x0178
-#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_2	0x017C
-#define PLL_PLL_FASTLOCK_EN_BAND		0x0180
-#define PLL_FREQ_TUNE_ACCUM_INIT_MID		0x0184
-#define PLL_FREQ_TUNE_ACCUM_INIT_HIGH		0x0188
-#define PLL_FREQ_TUNE_ACCUM_INIT_MUX		0x018C
-#define PLL_PLL_LOCK_OVERRIDE			0x0190
-#define PLL_PLL_LOCK_DELAY			0x0194
-#define PLL_PLL_LOCK_MIN_DELAY			0x0198
-#define PLL_CLOCK_INVERTERS			0x019C
-#define PLL_SPARE_AND_JPC_OVERRIDES		0x01A0
-#define PLL_BIAS_CONTROL_1			0x01A4
-#define PLL_BIAS_CONTROL_2			0x01A8
-#define PLL_ALOG_OBSV_BUS_CTRL_1		0x01AC
-#define PLL_COMMON_STATUS_ONE			0x01B0
-#define PLL_COMMON_STATUS_TWO			0x01B4
-#define PLL_BAND_SEL_CAL			0x01B8
-#define PLL_ICODE_ACCUM_STATUS_LOW		0x01BC
-#define PLL_ICODE_ACCUM_STATUS_HIGH		0x01C0
-#define PLL_FD_OUT_LOW				0x01C4
-#define PLL_FD_OUT_HIGH				0x01C8
-#define PLL_ALOG_OBSV_BUS_STATUS_1		0x01CC
-#define PLL_PLL_MISC_CONFIG			0x01D0
-#define PLL_FLL_CONFIG				0x01D4
-#define PLL_FLL_FREQ_ACQ_TIME			0x01D8
-#define PLL_FLL_CODE0				0x01DC
-#define PLL_FLL_CODE1				0x01E0
-#define PLL_FLL_GAIN0				0x01E4
-#define PLL_FLL_GAIN1				0x01E8
-#define PLL_SW_RESET				0x01EC
-#define PLL_FAST_PWRUP				0x01F0
-#define PLL_LOCKTIME0				0x01F4
-#define PLL_LOCKTIME1				0x01F8
-#define PLL_DEBUG_BUS_SEL			0x01FC
-#define PLL_DEBUG_BUS0				0x0200
-#define PLL_DEBUG_BUS1				0x0204
-#define PLL_DEBUG_BUS2				0x0208
-#define PLL_DEBUG_BUS3				0x020C
-#define PLL_ANALOG_FLL_CONTROL_OVERRIDES	0x0210
-#define PLL_VCO_CONFIG				0x0214
-#define PLL_VCO_CAL_CODE1_MODE0_STATUS		0x0218
-#define PLL_VCO_CAL_CODE1_MODE1_STATUS		0x021C
-#define PLL_RESET_SM_STATUS			0x0220
-#define PLL_TDC_OFFSET				0x0224
-#define PLL_PS3_PWRDOWN_CONTROLS		0x0228
-#define PLL_PS4_PWRDOWN_CONTROLS		0x022C
-#define PLL_PLL_RST_CONTROLS			0x0230
-#define PLL_GEAR_BAND_SELECT_CONTROLS		0x0234
-#define PLL_PSM_CLK_CONTROLS			0x0238
-#define PLL_SYSTEM_MUXES_2			0x023C
-#define PLL_VCO_CONFIG_1			0x0240
-#define PLL_VCO_CONFIG_2			0x0244
-#define PLL_CLOCK_INVERTERS_1			0x0248
-#define PLL_CLOCK_INVERTERS_2			0x024C
-#define PLL_CMODE_1				0x0250
-#define PLL_CMODE_2				0x0254
-#define PLL_ANALOG_CONTROLS_FIVE_1		0x0258
-#define PLL_ANALOG_CONTROLS_FIVE_2		0x025C
-#define PLL_PERF_OPTIMIZE			0x0260
-
-/* Register Offsets from PHY base address */
-#define PHY_CMN_CLK_CFG0	0x010
-#define PHY_CMN_CLK_CFG1	0x014
-#define PHY_CMN_RBUF_CTRL	0x01C
-#define PHY_CMN_CTRL_0		0x024
-#define PHY_CMN_CTRL_3		0x030
-#define PHY_CMN_PLL_CNTRL	0x03C
-#define PHY_CMN_GLBL_DIGTOP_SPARE4 0x128
-
-/* Bit definition of SSC control registers */
-#define SSC_CENTER		BIT(0)
-#define SSC_EN			BIT(1)
-#define SSC_FREQ_UPDATE		BIT(2)
-#define SSC_FREQ_UPDATE_MUX	BIT(3)
-#define SSC_UPDATE_SSC		BIT(4)
-#define SSC_UPDATE_SSC_MUX	BIT(5)
-#define SSC_START		BIT(6)
-#define SSC_START_MUX		BIT(7)
-
-enum {
-	DSI_PLL_0,
-	DSI_PLL_1,
-	DSI_PLL_MAX
-};
-
-struct dsi_pll_regs {
-	u32 pll_prop_gain_rate;
-	u32 pll_lockdet_rate;
-	u32 decimal_div_start;
-	u32 frac_div_start_low;
-	u32 frac_div_start_mid;
-	u32 frac_div_start_high;
-	u32 pll_clock_inverters;
-	u32 ssc_stepsize_low;
-	u32 ssc_stepsize_high;
-	u32 ssc_div_per_low;
-	u32 ssc_div_per_high;
-	u32 ssc_adjper_low;
-	u32 ssc_adjper_high;
-	u32 ssc_control;
-};
-
-struct dsi_pll_config {
-	u32 ref_freq;
-	bool div_override;
-	u32 output_div;
-	bool ignore_frac;
-	bool disable_prescaler;
-	bool enable_ssc;
-	bool ssc_center;
-	u32 dec_bits;
-	u32 frac_bits;
-	u32 lock_timer;
-	u32 ssc_freq;
-	u32 ssc_offset;
-	u32 ssc_adj_per;
-	u32 thresh_cycles;
-	u32 refclk_cycles;
-};
-
-struct dsi_pll_7nm {
-	struct mdss_pll_resources *rsc;
-	struct dsi_pll_config pll_configuration;
-	struct dsi_pll_regs reg_setup;
-};
-
-static inline bool dsi_pll_7nm_is_hw_revision_v1(
-		struct mdss_pll_resources *rsc)
-{
-	return (rsc->pll_interface_type == MDSS_DSI_PLL_7NM) ? true : false;
-}
-
-static inline bool dsi_pll_7nm_is_hw_revision_v2(
-		struct mdss_pll_resources *rsc)
-{
-	return (rsc->pll_interface_type == MDSS_DSI_PLL_7NM_V2) ? true : false;
-}
-
-static inline bool dsi_pll_7nm_is_hw_revision_v4_1(
-		struct mdss_pll_resources *rsc)
-{
-	return (rsc->pll_interface_type == MDSS_DSI_PLL_7NM_V4_1) ?
-		true : false;
-}
-
-static inline int pll_reg_read(void *context, unsigned int reg,
-					unsigned int *val)
-{
-	int rc = 0;
-	u32 data;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	/*
-	 * DSI PHY/PLL should be both powered on when reading PLL
-	 * registers. Since PHY power has been enabled in DSI PHY
-	 * driver, only PLL power is needed to enable here.
-	 */
-	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data | BIT(5));
-	ndelay(250);
-
-	*val = MDSS_PLL_REG_R(rsc->pll_base, reg);
-
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data);
-
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	return rc;
-}
-
-static inline int pll_reg_write(void *context, unsigned int reg,
-					unsigned int val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	MDSS_PLL_REG_W(rsc->pll_base, reg, val);
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	return rc;
-}
-
-static inline int phy_reg_read(void *context, unsigned int reg,
-					unsigned int *val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	*val = MDSS_PLL_REG_R(rsc->phy_base, reg);
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	return rc;
-}
-
-static inline int phy_reg_write(void *context, unsigned int reg,
-					unsigned int val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	MDSS_PLL_REG_W(rsc->phy_base, reg, val);
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	return rc;
-}
-
-static inline int phy_reg_update_bits_sub(struct mdss_pll_resources *rsc,
-		unsigned int reg, unsigned int mask, unsigned int val)
-{
-	u32 reg_val;
-
-	reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
-	reg_val &= ~mask;
-	reg_val |= (val & mask);
-	MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
-
-	return 0;
-}
-
-static inline int phy_reg_update_bits(void *context, unsigned int reg,
-				unsigned int mask, unsigned int val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	rc = phy_reg_update_bits_sub(rsc, reg, mask, val);
-	if (!rc && rsc->slave)
-		rc = phy_reg_update_bits_sub(rsc->slave, reg, mask, val);
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	return rc;
-}
-
-static inline int pclk_mux_read_sel(void *context, unsigned int reg,
-					unsigned int *val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc)
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-	else
-		*val = (MDSS_PLL_REG_R(rsc->phy_base, reg) & 0x3);
-
-	(void)mdss_pll_resource_enable(rsc, false);
-	return rc;
-}
-
-
-static inline int pclk_mux_write_sel_sub(struct mdss_pll_resources *rsc,
-				unsigned int reg, unsigned int val)
-{
-	u32 reg_val;
-
-	reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
-	reg_val &= ~0x03;
-	reg_val |= val;
-
-	MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
-
-	return 0;
-}
-
-static inline int pclk_mux_write_sel(void *context, unsigned int reg,
-					unsigned int val)
-{
-	int rc = 0;
-	struct mdss_pll_resources *rsc = context;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	rc = pclk_mux_write_sel_sub(rsc, reg, val);
-	if (!rc && rsc->slave)
-		rc = pclk_mux_write_sel_sub(rsc->slave, reg, val);
-
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	/*
-	 * cache the current parent index for cases where parent
-	 * is not changing but rate is changing. In that case
-	 * clock framework won't call parent_set and hence dsiclk_sel
-	 * bit won't be programmed. e.g. dfps update use case.
-	 */
-	rsc->cached_cfg1 = val;
-
-	return rc;
-}
-
-static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
-static struct dsi_pll_7nm plls[DSI_PLL_MAX];
-
-static void dsi_pll_config_slave(struct mdss_pll_resources *rsc)
-{
-	u32 reg;
-	struct mdss_pll_resources *orsc = pll_rsc_db[DSI_PLL_1];
-
-	if (!rsc)
-		return;
-
-	/* Only DSI PLL0 can act as a master */
-	if (rsc->index != DSI_PLL_0)
-		return;
-
-	/* default configuration: source is either internal or ref clock */
-	rsc->slave = NULL;
-
-	if (!orsc) {
-		pr_warn("slave PLL unavilable, assuming standalone config\n");
-		return;
-	}
-
-	/* check to see if the source of DSI1 PLL bitclk is set to external */
-	reg = MDSS_PLL_REG_R(orsc->phy_base, PHY_CMN_CLK_CFG1);
-	reg &= (BIT(2) | BIT(3));
-	if (reg == 0x04)
-		rsc->slave = pll_rsc_db[DSI_PLL_1]; /* external source */
-
-	pr_debug("Slave PLL %s\n", rsc->slave ? "configured" : "absent");
-}
-
-static void dsi_pll_setup_config(struct dsi_pll_7nm *pll,
-				 struct mdss_pll_resources *rsc)
-{
-	struct dsi_pll_config *config = &pll->pll_configuration;
-
-	config->ref_freq = 19200000;
-	config->output_div = 1;
-	config->dec_bits = 8;
-	config->frac_bits = 18;
-	config->lock_timer = 64;
-	config->ssc_freq = 31500;
-	config->ssc_offset = 5000;
-	config->ssc_adj_per = 2;
-	config->thresh_cycles = 32;
-	config->refclk_cycles = 256;
-
-	config->div_override = false;
-	config->ignore_frac = false;
-	config->disable_prescaler = false;
-	config->enable_ssc = rsc->ssc_en;
-	config->ssc_center = rsc->ssc_center;
-
-	if (config->enable_ssc) {
-		if (rsc->ssc_freq)
-			config->ssc_freq = rsc->ssc_freq;
-		if (rsc->ssc_ppm)
-			config->ssc_offset = rsc->ssc_ppm;
-	}
-
-	dsi_pll_config_slave(rsc);
-}
-
-static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll,
-				  struct mdss_pll_resources *rsc)
-{
-	struct dsi_pll_config *config = &pll->pll_configuration;
-	struct dsi_pll_regs *regs = &pll->reg_setup;
-	u64 fref = rsc->vco_ref_clk_rate;
-	u64 pll_freq;
-	u64 divider;
-	u64 dec, dec_multiple;
-	u32 frac;
-	u64 multiplier;
-
-	pll_freq = rsc->vco_current_rate;
-
-	if (config->disable_prescaler)
-		divider = fref;
-	else
-		divider = fref * 2;
-
-	multiplier = 1 << config->frac_bits;
-	dec_multiple = div_u64(pll_freq * multiplier, divider);
-	div_u64_rem(dec_multiple, multiplier, &frac);
-
-	dec = div_u64(dec_multiple, multiplier);
-
-	switch (rsc->pll_interface_type) {
-	case MDSS_DSI_PLL_7NM:
-		regs->pll_clock_inverters = 0x0;
-		break;
-	case MDSS_DSI_PLL_7NM_V2:
-		regs->pll_clock_inverters = 0x28;
-		break;
-	case MDSS_DSI_PLL_7NM_V4_1:
-	default:
-		if (pll_freq <= 1000000000)
-			regs->pll_clock_inverters = 0xA0;
-		else if (pll_freq <= 2500000000)
-			regs->pll_clock_inverters = 0x20;
-		else if (pll_freq <= 3020000000)
-			regs->pll_clock_inverters = 0x00;
-		else
-			regs->pll_clock_inverters = 0x40;
-		break;
-	}
-
-	regs->pll_lockdet_rate = config->lock_timer;
-	regs->decimal_div_start = dec;
-	regs->frac_div_start_low = (frac & 0xff);
-	regs->frac_div_start_mid = (frac & 0xff00) >> 8;
-	regs->frac_div_start_high = (frac & 0x30000) >> 16;
-}
-
-static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll,
-		  struct mdss_pll_resources *rsc)
-{
-	struct dsi_pll_config *config = &pll->pll_configuration;
-	struct dsi_pll_regs *regs = &pll->reg_setup;
-	u32 ssc_per;
-	u32 ssc_mod;
-	u64 ssc_step_size;
-	u64 frac;
-
-	if (!config->enable_ssc) {
-		pr_debug("SSC not enabled\n");
-		return;
-	}
-
-	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
-	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
-	ssc_per -= ssc_mod;
-
-	frac = regs->frac_div_start_low |
-			(regs->frac_div_start_mid << 8) |
-			(regs->frac_div_start_high << 16);
-	ssc_step_size = regs->decimal_div_start;
-	ssc_step_size *= (1 << config->frac_bits);
-	ssc_step_size += frac;
-	ssc_step_size *= config->ssc_offset;
-	ssc_step_size *= (config->ssc_adj_per + 1);
-	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
-	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
-
-	regs->ssc_div_per_low = ssc_per & 0xFF;
-	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
-	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
-	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
-	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
-	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
-
-	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
-
-	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
-			regs->decimal_div_start, frac, config->frac_bits);
-	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
-			ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
-}
-
-static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll,
-		struct mdss_pll_resources *rsc)
-{
-	void __iomem *pll_base = rsc->pll_base;
-	struct dsi_pll_regs *regs = &pll->reg_setup;
-
-	if (pll->pll_configuration.enable_ssc) {
-		pr_debug("SSC is enabled\n");
-
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_1,
-				regs->ssc_stepsize_low);
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_1,
-				regs->ssc_stepsize_high);
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_1,
-				regs->ssc_div_per_low);
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_1,
-				regs->ssc_div_per_high);
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_LOW_1,
-				regs->ssc_adjper_low);
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_HIGH_1,
-				regs->ssc_adjper_high);
-		MDSS_PLL_REG_W(pll_base, PLL_SSC_CONTROL,
-				SSC_EN | regs->ssc_control);
-	}
-}
-
-static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll,
-				  struct mdss_pll_resources *rsc)
-{
-	void __iomem *pll_base = rsc->pll_base;
-	u64 vco_rate = rsc->vco_current_rate;
-
-	switch (rsc->pll_interface_type) {
-	case MDSS_DSI_PLL_7NM:
-	case MDSS_DSI_PLL_7NM_V2:
-		MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FIVE_1, 0x01);
-		MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x00);
-		break;
-	case MDSS_DSI_PLL_7NM_V4_1:
-	default:
-		if (vco_rate < 3100000000)
-			MDSS_PLL_REG_W(pll_base,
-					PLL_ANALOG_CONTROLS_FIVE_1, 0x01);
-		else
-			MDSS_PLL_REG_W(pll_base,
-					PLL_ANALOG_CONTROLS_FIVE_1, 0x03);
-
-		if (vco_rate < 1520000000)
-			MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x08);
-		else if (vco_rate < 2990000000)
-			MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x01);
-		else
-			MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x00);
-
-		break;
-	}
-
-	if (dsi_pll_7nm_is_hw_revision_v1(rsc))
-		MDSS_PLL_REG_W(pll_base, PLL_GEAR_BAND_SELECT_CONTROLS, 0x21);
-
-	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FIVE, 0x01);
-	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_TWO, 0x03);
-	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_THREE, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_DSM_DIVIDER, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_FEEDBACK_DIVIDER, 0x4e);
-	MDSS_PLL_REG_W(pll_base, PLL_CALIBRATION_SETTINGS, 0x40);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
-	MDSS_PLL_REG_W(pll_base, PLL_OUTDIV, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_CORE_OVERRIDE, 0x00);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_BAND_SEL_RATE_1, 0xc0);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x80);
-	MDSS_PLL_REG_W(pll_base, PLL_PFILT, 0x29);
-	MDSS_PLL_REG_W(pll_base, PLL_PFILT, 0x2f);
-	MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x2a);
-
-	switch (rsc->pll_interface_type) {
-	case MDSS_DSI_PLL_7NM:
-		MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x30);
-		break;
-	case MDSS_DSI_PLL_7NM_V2:
-		MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x22);
-		break;
-	case MDSS_DSI_PLL_7NM_V4_1:
-	default:
-		MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x3F);
-		break;
-	}
-
-	if (dsi_pll_7nm_is_hw_revision_v4_1(rsc))
-		MDSS_PLL_REG_W(pll_base, PLL_PERF_OPTIMIZE, 0x22);
-}
-
-static void dsi_pll_init_val(struct mdss_pll_resources *rsc)
-{
-	void __iomem *pll_base = rsc->pll_base;
-
-	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_ONE, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS, 0x0000003F);
-	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS_TWO, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FOUR, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_CONTROLS, 0x00000080);
-	MDSS_PLL_REG_W(pll_base, PLL_SYSTEM_MUXES, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_UPDATE_CONTROL_OVERRIDES, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x00000010);
-	MDSS_PLL_REG_W(pll_base, PLL_PSM_CTRL, 0x00000020);
-	MDSS_PLL_REG_W(pll_base, PLL_RSM_CTRL, 0x00000010);
-	MDSS_PLL_REG_W(pll_base, PLL_VCO_TUNE_MAP, 0x00000002);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_CNTRL, 0x0000001C);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_LOW, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_HIGH, 0x00000002);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS, 0x00000020);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_MIN, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_MAX, 0x000000FF);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_PFILT, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_IFILT, 0x0000000A);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_TWO, 0x00000025);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_THREE, 0x000000BA);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_FOUR, 0x0000004F);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_HIGH, 0x0000000A);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_LOW, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DETECT_SETTINGS_ONE, 0x0000000C);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DETECT_THRESH, 0x00000020);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DET_REFCLK_HIGH, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DET_REFCLK_LOW, 0x000000FF);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DET_PLLCLK_HIGH, 0x00000010);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DET_PLLCLK_LOW, 0x00000046);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_GAIN, 0x00000054);
-	MDSS_PLL_REG_W(pll_base, PLL_ICODE_LOW, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_ICODE_HIGH, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_LOCKDET, 0x00000040);
-	MDSS_PLL_REG_W(pll_base, PLL_FASTLOCK_CONTROL, 0x00000004);
-	MDSS_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_ONE, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_TWO, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_CORE_OVERRIDE, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x00000010);
-	MDSS_PLL_REG_W(pll_base, PLL_RATE_CHANGE, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS, 0x00000008);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS_TWO, 0x00000008);
-	MDSS_PLL_REG_W(pll_base, PLL_DEC_FRAC_MUXES, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_MASH_CONTROL, 0x00000003);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_LOW, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_HIGH, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_MUX_CONTROL, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_1, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_1, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_1, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_1, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_LOW_1, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_HIGH_1, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_2, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_2, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_2, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_2, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_LOW_2, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_HIGH_2, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SSC_CONTROL, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x00000040);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_2, 0x00000040);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_1, 0x0000000C);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_2, 0x0000000A);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_BAND_SEL_RATE_1, 0x000000C0);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_BAND_SEL_RATE_2, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x00000054);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_2, 0x00000054);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x0000004C);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_2, 0x0000004C);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_FASTLOCK_EN_BAND, 0x00000003);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_TUNE_ACCUM_INIT_MID, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_TUNE_ACCUM_INIT_HIGH, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_FREQ_TUNE_ACCUM_INIT_MUX, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x00000080);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x00000006);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_MIN_DELAY, 0x00000019);
-	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SPARE_AND_JPC_OVERRIDES, 0x00000000);
-
-	if (dsi_pll_7nm_is_hw_revision_v1(rsc))
-		MDSS_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_1, 0x00000066);
-	else
-		MDSS_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_1, 0x00000040);
-
-	MDSS_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_2, 0x00000020);
-	MDSS_PLL_REG_W(pll_base, PLL_ALOG_OBSV_BUS_CTRL_1, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_COMMON_STATUS_ONE, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_COMMON_STATUS_TWO, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_ICODE_ACCUM_STATUS_LOW, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_ICODE_ACCUM_STATUS_HIGH, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_FD_OUT_LOW, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_FD_OUT_HIGH, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_ALOG_OBSV_BUS_STATUS_1, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_MISC_CONFIG, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_FLL_CONFIG, 0x00000002);
-	MDSS_PLL_REG_W(pll_base, PLL_FLL_FREQ_ACQ_TIME, 0x00000011);
-	MDSS_PLL_REG_W(pll_base, PLL_FLL_CODE0, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_FLL_CODE1, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_FLL_GAIN0, 0x00000080);
-	MDSS_PLL_REG_W(pll_base, PLL_FLL_GAIN1, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_SW_RESET, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_FAST_PWRUP, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_LOCKTIME0, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_LOCKTIME1, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_DEBUG_BUS_SEL, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_DEBUG_BUS0, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_DEBUG_BUS1, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_DEBUG_BUS2, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_DEBUG_BUS3, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_FLL_CONTROL_OVERRIDES, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_VCO_CAL_CODE1_MODE0_STATUS, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_VCO_CAL_CODE1_MODE1_STATUS, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_RESET_SM_STATUS, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_TDC_OFFSET, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_PS3_PWRDOWN_CONTROLS, 0x0000001D);
-	MDSS_PLL_REG_W(pll_base, PLL_PS4_PWRDOWN_CONTROLS, 0x0000001C);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_RST_CONTROLS, 0x000000FF);
-	MDSS_PLL_REG_W(pll_base, PLL_GEAR_BAND_SELECT_CONTROLS, 0x00000022);
-	MDSS_PLL_REG_W(pll_base, PLL_PSM_CLK_CONTROLS, 0x00000009);
-	MDSS_PLL_REG_W(pll_base, PLL_SYSTEM_MUXES_2, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_2, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS_1, 0x00000040);
-	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS_2, 0x00000000);
-	MDSS_PLL_REG_W(pll_base, PLL_CMODE_1, 0x00000010);
-	MDSS_PLL_REG_W(pll_base, PLL_CMODE_2, 0x00000010);
-	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FIVE_2, 0x00000003);
-
-}
-
-static void dsi_pll_commit(struct dsi_pll_7nm *pll,
-			   struct mdss_pll_resources *rsc)
-{
-	void __iomem *pll_base = rsc->pll_base;
-	struct dsi_pll_regs *reg = &pll->reg_setup;
-
-	MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x12);
-	MDSS_PLL_REG_W(pll_base, PLL_DECIMAL_DIV_START_1,
-		       reg->decimal_div_start);
-	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_LOW_1,
-		       reg->frac_div_start_low);
-	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_MID_1,
-		       reg->frac_div_start_mid);
-	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1,
-		       reg->frac_div_start_high);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x40);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
-	MDSS_PLL_REG_W(pll_base, PLL_CMODE_1, 0x10);
-	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS_1,
-			reg->pll_clock_inverters);
-}
-
-static int vco_7nm_set_rate(struct clk_hw *hw, unsigned long rate,
-			unsigned long parent_rate)
-{
-	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *rsc = vco->priv;
-	struct dsi_pll_7nm *pll;
-
-	if (!rsc) {
-		pr_err("pll resource not found\n");
-		return -EINVAL;
-	}
-
-	if (rsc->pll_on)
-		return 0;
-
-	pll = rsc->priv;
-	if (!pll) {
-		pr_err("pll configuration not found\n");
-		return -EINVAL;
-	}
-
-	pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
-
-	rsc->vco_current_rate = rate;
-	rsc->vco_ref_clk_rate = vco->ref_clk_rate;
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
-		       rsc->index, rc);
-		return rc;
-	}
-
-	dsi_pll_init_val(rsc);
-
-	dsi_pll_setup_config(pll, rsc);
-
-	dsi_pll_calc_dec_frac(pll, rsc);
-
-	dsi_pll_calc_ssc(pll, rsc);
-
-	dsi_pll_commit(pll, rsc);
-
-	dsi_pll_config_hzindep_reg(pll, rsc);
-
-	dsi_pll_ssc_commit(pll, rsc);
-
-	/* flush, ensure all register writes are done*/
-	wmb();
-
-	mdss_pll_resource_enable(rsc, false);
-
-	return 0;
-}
-
-static int dsi_pll_7nm_lock_status(struct mdss_pll_resources *pll)
-{
-	int rc;
-	u32 status;
-	u32 const delay_us = 100;
-	u32 const timeout_us = 5000;
-
-	rc = readl_poll_timeout_atomic(pll->pll_base + PLL_COMMON_STATUS_ONE,
-				       status,
-				       ((status & BIT(0)) > 0),
-				       delay_us,
-				       timeout_us);
-	if (rc && !pll->handoff_resources)
-		pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
-			pll->index, status);
-
-	return rc;
-}
-
-static void dsi_pll_disable_pll_bias(struct mdss_pll_resources *rsc)
-{
-	u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
-
-	MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0);
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data & ~BIT(5));
-	ndelay(250);
-}
-
-static void dsi_pll_enable_pll_bias(struct mdss_pll_resources *rsc)
-{
-	u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
-
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data | BIT(5));
-	MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0xc0);
-	ndelay(250);
-}
-
-static void dsi_pll_disable_global_clk(struct mdss_pll_resources *rsc)
-{
-	u32 data;
-
-	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data & ~BIT(5)));
-}
-
-static void dsi_pll_enable_global_clk(struct mdss_pll_resources *rsc)
-{
-	u32 data;
-
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_3, 0x04);
-
-	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
-
-	/* Turn on clk_en_sel bit prior to resync toggle fifo */
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data | BIT(5) |
-								BIT(4)));
-}
-
-static void dsi_pll_phy_dig_reset(struct mdss_pll_resources *rsc)
-{
-	/*
-	 * Reset the PHY digital domain. This would be needed when
-	 * coming out of a CX or analog rail power collapse while
-	 * ensuring that the pads maintain LP00 or LP11 state
-	 */
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
-	wmb(); /* Ensure that the reset is asserted */
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
-	wmb(); /* Ensure that the reset is deasserted */
-}
-
-static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
-{
-	int rc;
-	struct mdss_pll_resources *rsc = vco->priv;
-
-	dsi_pll_enable_pll_bias(rsc);
-	if (rsc->slave)
-		dsi_pll_enable_pll_bias(rsc->slave);
-
-	phy_reg_update_bits_sub(rsc, PHY_CMN_CLK_CFG1, 0x03, rsc->cached_cfg1);
-	if (rsc->slave)
-		phy_reg_update_bits_sub(rsc->slave, PHY_CMN_CLK_CFG1,
-				0x03, rsc->cached_cfg1);
-	wmb(); /* ensure dsiclk_sel is always programmed before pll start */
-
-	/* Start PLL */
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0x01);
-
-	/*
-	 * ensure all PLL configurations are written prior to checking
-	 * for PLL lock.
-	 */
-	wmb();
-
-	/* Check for PLL lock */
-	rc = dsi_pll_7nm_lock_status(rsc);
-	if (rc) {
-		pr_err("PLL(%d) lock failed\n", rsc->index);
-		goto error;
-	}
-
-	rsc->pll_on = true;
-
-	/*
-	 * assert power on reset for PHY digital in case the PLL is
-	 * enabled after CX of analog domain power collapse. This needs
-	 * to be done before enabling the global clk.
-	 */
-	dsi_pll_phy_dig_reset(rsc);
-	if (rsc->slave)
-		dsi_pll_phy_dig_reset(rsc->slave);
-
-	dsi_pll_enable_global_clk(rsc);
-	if (rsc->slave)
-		dsi_pll_enable_global_clk(rsc->slave);
-
-error:
-	return rc;
-}
-
-static void dsi_pll_disable_sub(struct mdss_pll_resources *rsc)
-{
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0);
-	dsi_pll_disable_pll_bias(rsc);
-}
-
-static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
-{
-	struct mdss_pll_resources *rsc = vco->priv;
-
-	if (!rsc->pll_on &&
-	    mdss_pll_resource_enable(rsc, true)) {
-		pr_err("failed to enable pll (%d) resources\n", rsc->index);
-		return;
-	}
-
-	rsc->handoff_resources = false;
-
-	pr_debug("stop PLL (%d)\n", rsc->index);
-
-	/*
-	 * To avoid any stray glitches while
-	 * abruptly powering down the PLL
-	 * make sure to gate the clock using
-	 * the clock enable bit before powering
-	 * down the PLL
-	 */
-	dsi_pll_disable_global_clk(rsc);
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0);
-	dsi_pll_disable_sub(rsc);
-	if (rsc->slave) {
-		dsi_pll_disable_global_clk(rsc->slave);
-		dsi_pll_disable_sub(rsc->slave);
-	}
-	/* flush, ensure all register writes are done*/
-	wmb();
-	rsc->pll_on = false;
-}
-
-long vco_7nm_round_rate(struct clk_hw *hw, unsigned long rate,
-				unsigned long *parent_rate)
-{
-	unsigned long rrate = rate;
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-
-	if (rate < vco->min_rate)
-		rrate = vco->min_rate;
-	if (rate > vco->max_rate)
-		rrate = vco->max_rate;
-
-	*parent_rate = rrate;
-
-	return rrate;
-}
-
-static void vco_7nm_unprepare(struct clk_hw *hw)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *pll = vco->priv;
-
-	if (!pll) {
-		pr_err("dsi pll resources not available\n");
-		return;
-	}
-
-	/*
-	 * During unprepare in continuous splash use case we want driver
-	 * to pick all dividers instead of retaining bootloader configurations.
-	 */
-	if (!pll->handoff_resources) {
-		pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base,
-							PHY_CMN_CLK_CFG0);
-		pll->cached_outdiv = MDSS_PLL_REG_R(pll->pll_base,
-							PLL_PLL_OUTDIV_RATE);
-		pr_debug("cfg0=%d,cfg1=%d, outdiv=%d\n", pll->cached_cfg0,
-					pll->cached_cfg1, pll->cached_outdiv);
-
-		pll->vco_cached_rate = clk_hw_get_rate(hw);
-	}
-
-	/*
-	 * When continuous splash screen feature is enabled, we need to cache
-	 * the mux configuration for the pixel_clk_src mux clock. The clock
-	 * framework does not call back to re-configure the mux value if it is
-	 * does not change.For such usecases, we need to ensure that the cached
-	 * value is programmed prior to PLL being locked
-	 */
-	if (pll->handoff_resources)
-		pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base,
-							PHY_CMN_CLK_CFG1);
-
-	dsi_pll_disable(vco);
-	mdss_pll_resource_enable(pll, false);
-}
-
-static int vco_7nm_prepare(struct clk_hw *hw)
-{
-	int rc = 0;
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *pll = vco->priv;
-
-	if (!pll) {
-		pr_err("dsi pll resources are not available\n");
-		return -EINVAL;
-	}
-
-	/* Skip vco recalculation for continuous splash use case */
-	if (pll->handoff_resources)
-		return 0;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("failed to enable pll (%d) resource, rc=%d\n",
-		       pll->index, rc);
-		return rc;
-	}
-
-	if ((pll->vco_cached_rate != 0) &&
-	    (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
-		rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
-				pll->vco_cached_rate);
-		if (rc) {
-			pr_err("pll(%d) set_rate failed, rc=%d\n",
-			       pll->index, rc);
-			mdss_pll_resource_enable(pll, false);
-			return rc;
-		}
-		pr_debug("cfg0=%d, cfg1=%d\n", pll->cached_cfg0,
-			pll->cached_cfg1);
-		MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0,
-					pll->cached_cfg0);
-		MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
-					pll->cached_outdiv);
-	}
-
-	rc = dsi_pll_enable(vco);
-	if (rc) {
-		mdss_pll_resource_enable(pll, false);
-		pr_err("pll(%d) enable failed, rc=%d\n", pll->index, rc);
-		return rc;
-	}
-
-	return rc;
-}
-
-static unsigned long vco_7nm_recalc_rate(struct clk_hw *hw,
-						unsigned long parent_rate)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
-	struct mdss_pll_resources *pll = vco->priv;
-	int rc;
-	u64 ref_clk = vco->ref_clk_rate;
-	u64 vco_rate = 0;
-	u64 multiplier;
-	u32 frac;
-	u32 dec;
-	u32 outdiv;
-	u64 pll_freq, tmp64;
-
-	if (!vco->priv) {
-		pr_err("vco priv is null\n");
-		return 0;
-	}
-
-	/*
-	 * Calculate the vco rate from HW registers only for handoff cases.
-	 * For other cases where a vco_10nm_set_rate() has already been
-	 * called, just return the rate that was set earlier. This is due
-	 * to the fact that recalculating VCO rate requires us to read the
-	 * correct value of the pll_out_div divider clock, which is only set
-	 * afterwards.
-	 */
-	if (pll->vco_current_rate != 0) {
-		pr_debug("returning vco rate = %lld\n", pll->vco_current_rate);
-		return pll->vco_current_rate;
-	}
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("failed to enable pll(%d) resource, rc=%d\n",
-		       pll->index, rc);
-		return 0;
-	}
-
-	pll->handoff_resources = true;
-	if (dsi_pll_7nm_lock_status(pll)) {
-		pr_debug("PLL not enabled\n");
-		pll->handoff_resources = false;
-		goto end;
-	}
-
-	dec = MDSS_PLL_REG_R(pll->pll_base, PLL_DECIMAL_DIV_START_1);
-	dec &= 0xFF;
-
-	frac = MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_LOW_1);
-	frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_MID_1) &
-		  0xFF) <<
-		8);
-	frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_HIGH_1) &
-		  0x3) <<
-		16);
-
-	/* OUTDIV_1:0 field is (log(outdiv, 2)) */
-	outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE);
-	outdiv &= 0x3;
-	outdiv = 1 << outdiv;
-
-	/*
-	 * TODO:
-	 *	1. Assumes prescaler is disabled
-	 *	2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
-	 **/
-	multiplier = 1 << 18;
-	pll_freq = dec * (ref_clk * 2);
-	tmp64 = (ref_clk * 2 * frac);
-	pll_freq += div_u64(tmp64, multiplier);
-
-	vco_rate = div_u64(pll_freq, outdiv);
-
-	pr_debug("dec=0x%x, frac=0x%x, outdiv=%d, vco=%llu\n",
-		 dec, frac, outdiv, vco_rate);
-
-end:
-	(void)mdss_pll_resource_enable(pll, false);
-	return (unsigned long)vco_rate;
-}
-
-static int pixel_clk_get_div(void *context, unsigned int reg, unsigned int *div)
-{
-	int rc;
-	struct mdss_pll_resources *pll = context;
-	u32 reg_val;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
-	*div = (reg_val & 0xF0) >> 4;
-
-	/**
-	 * Common clock framework the divider value is interpreted as one less
-	 * hence we return one less for all dividers except when zero
-	 */
-	if (*div != 0)
-		*div -= 1;
-
-	(void)mdss_pll_resource_enable(pll, false);
-
-	return rc;
-}
-
-static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
-{
-	u32 reg_val;
-
-	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
-	reg_val &= ~0xF0;
-	reg_val |= (div << 4);
-	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
-}
-
-static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
-{
-	int rc;
-	struct mdss_pll_resources *pll = context;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-	/**
-	 * In common clock framework the divider value provided is one less and
-	 * and hence adjusting the divider value by one prior to writing it to
-	 * hardware
-	 */
-	div++;
-	pixel_clk_set_div_sub(pll, div);
-	if (pll->slave)
-		pixel_clk_set_div_sub(pll->slave, div);
-	(void)mdss_pll_resource_enable(pll, false);
-
-	return 0;
-}
-
-static int bit_clk_get_div(void *context, unsigned int reg, unsigned int *div)
-{
-	int rc;
-	struct mdss_pll_resources *pll = context;
-	u32 reg_val;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
-	*div = (reg_val & 0x0F);
-
-	/**
-	 *Common clock framework the divider value is interpreted as one less
-	 * hence we return one less for all dividers except when zero
-	 */
-	if (*div != 0)
-		*div -= 1;
-	(void)mdss_pll_resource_enable(pll, false);
-
-	return rc;
-}
-
-static void bit_clk_set_div_sub(struct mdss_pll_resources *rsc, int div)
-{
-	u32 reg_val;
-
-	reg_val = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
-	reg_val &= ~0x0F;
-	reg_val |= div;
-	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG0, reg_val);
-}
-
-static int bit_clk_set_div(void *context, unsigned int reg, unsigned int div)
-{
-	int rc;
-	struct mdss_pll_resources *rsc = context;
-	struct dsi_pll_8998 *pll;
-
-	if (!rsc) {
-		pr_err("pll resource not found\n");
-		return -EINVAL;
-	}
-
-	pll = rsc->priv;
-	if (!pll) {
-		pr_err("pll configuration not found\n");
-		return -EINVAL;
-	}
-
-	rc = mdss_pll_resource_enable(rsc, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	/**
-	 * In common clock framework the divider value provided is one less and
-	 * and hence adjusting the divider value by one prior to writing it to
-	 * hardware
-	 */
-	div++;
-
-	bit_clk_set_div_sub(rsc, div);
-	/* For slave PLL, this divider always should be set to 1 */
-	if (rsc->slave)
-		bit_clk_set_div_sub(rsc->slave, 1);
-
-	(void)mdss_pll_resource_enable(rsc, false);
-
-	return rc;
-}
-
-static struct regmap_config dsi_pll_7nm_config = {
-	.reg_bits = 32,
-	.reg_stride = 4,
-	.val_bits = 32,
-	.max_register = 0x7c0,
-};
-
-static struct regmap_bus pll_regmap_bus = {
-	.reg_write = pll_reg_write,
-	.reg_read = pll_reg_read,
-};
-
-static struct regmap_bus pclk_src_mux_regmap_bus = {
-	.reg_read = pclk_mux_read_sel,
-	.reg_write = pclk_mux_write_sel,
-};
-
-static struct regmap_bus pclk_src_regmap_bus = {
-	.reg_write = pixel_clk_set_div,
-	.reg_read = pixel_clk_get_div,
-};
-
-static struct regmap_bus bitclk_src_regmap_bus = {
-	.reg_write = bit_clk_set_div,
-	.reg_read = bit_clk_get_div,
-};
-
-static const struct clk_ops clk_ops_vco_7nm = {
-	.recalc_rate = vco_7nm_recalc_rate,
-	.set_rate = vco_7nm_set_rate,
-	.round_rate = vco_7nm_round_rate,
-	.prepare = vco_7nm_prepare,
-	.unprepare = vco_7nm_unprepare,
-};
-
-static struct regmap_bus mdss_mux_regmap_bus = {
-	.reg_write = mdss_set_mux_sel,
-	.reg_read = mdss_get_mux_sel,
-};
-
-/*
- * Clock tree for generating DSI byte and pclk.
- *
- *
- *                  +---------------+
- *                  |    vco_clk    |
- *                  +-------+-------+
- *                          |
- *                          |
- *                  +---------------+
- *                  |  pll_out_div  |
- *                  |  DIV(1,2,4,8) |
- *                  +-------+-------+
- *                          |
- *                          +-----------------------------+--------+
- *                          |                             |        |
- *                  +-------v-------+                     |        |
- *                  |  bitclk_src   |                     |        |
- *                  |  DIV(1..15)   |                     |        |
- *                  +-------+-------+                     |        |
- *                          |                             |        |
- *                          +----------+---------+        |        |
- *   Shadow Path            |          |         |        |        |
- *       +          +-------v-------+  |  +------v------+ | +------v-------+
- *       |          |  byteclk_src  |  |  |post_bit_div | | |post_vco_div  |
- *       |          |  DIV(8)       |  |  |DIV (2)      | | |DIV(4)        |
- *       |          +-------+-------+  |  +------+------+ | +------+-------+
- *       |                  |          |         |      | |        |
- *       |                  |          |         +------+ |        |
- *       |                  |          +-------------+  | |   +----+
- *       |         +--------+                        |  | |   |
- *       |         |                               +-v--v-v---v------+
- *     +-v---------v----+                           \  pclk_src_mux /
- *     \  byteclk_mux /                              \             /
- *      \            /                                +-----+-----+
- *       +----+-----+                                       |        Shadow Path
- *            |                                             |             +
- *            v                                       +-----v------+      |
- *       dsi_byte_clk                                 |  pclk_src  |      |
- *                                                    | DIV(1..15) |      |
- *                                                    +-----+------+      |
- *                                                          |             |
- *                                                          |             |
- *                                                          +--------+    |
- *                                                                   |    |
- *                                                               +---v----v----+
- *                                                                \  pclk_mux /
- *                                                                 \         /
- *                                                                  +---+---+
- *                                                                      |
- *                                                                      |
- *                                                                      v
- *                                                                   dsi_pclk
- *
- */
-
-static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
-	.ref_clk_rate = 19200000UL,
-	.min_rate = 1000000000UL,
-	.max_rate = 3500000000UL,
-	.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_vco_clk",
-			.parent_names = (const char *[]){"bi_tcxo"},
-			.num_parents = 1,
-			.ops = &clk_ops_vco_7nm,
-			.flags = CLK_GET_RATE_NOCACHE,
-	},
-};
-
-static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
-	.ref_clk_rate = 19200000UL,
-	.min_rate = 1000000000UL,
-	.max_rate = 3500000000UL,
-	.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_vco_clk",
-			.parent_names = (const char *[]){"bi_tcxo"},
-			.num_parents = 1,
-			.ops = &clk_ops_vco_7nm,
-			.flags = CLK_GET_RATE_NOCACHE,
-	},
-};
-
-static struct clk_regmap_div dsi0pll_pll_out_div = {
-	.reg = PLL_PLL_OUTDIV_RATE,
-	.shift = 0,
-	.width = 2,
-	.flags = CLK_DIVIDER_POWER_OF_TWO,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_pll_out_div",
-			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi1pll_pll_out_div = {
-	.reg = PLL_PLL_OUTDIV_RATE,
-	.shift = 0,
-	.width = 2,
-	.flags = CLK_DIVIDER_POWER_OF_TWO,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_pll_out_div",
-			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi0pll_bitclk_src = {
-	.shift = 0,
-	.width = 4,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_bitclk_src",
-			.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi1pll_bitclk_src = {
-	.shift = 0,
-	.width = 4,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_bitclk_src",
-			.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_fixed_factor dsi0pll_post_vco_div = {
-	.div = 4,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi0pll_post_vco_div",
-		.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi1pll_post_vco_div = {
-	.div = 4,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi1pll_post_vco_div",
-		.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi0pll_byteclk_src = {
-	.div = 8,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi0pll_byteclk_src",
-		.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi1pll_byteclk_src = {
-	.div = 8,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi1pll_byteclk_src",
-		.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
-		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi0pll_post_bit_div = {
-	.div = 2,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi0pll_post_bit_div",
-		.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
-		.num_parents = 1,
-		.flags = CLK_GET_RATE_NOCACHE,
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor dsi1pll_post_bit_div = {
-	.div = 2,
-	.mult = 1,
-	.hw.init = &(struct clk_init_data){
-		.name = "dsi1pll_post_bit_div",
-		.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
-		.num_parents = 1,
-		.flags = CLK_GET_RATE_NOCACHE,
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_regmap_mux dsi0pll_byteclk_mux = {
-	.shift = 0,
-	.width = 1,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0_phy_pll_out_byteclk",
-			.parent_names = (const char *[]){"dsi0pll_byteclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_regmap_mux dsi1pll_byteclk_mux = {
-	.shift = 0,
-	.width = 1,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1_phy_pll_out_byteclk",
-			.parent_names = (const char *[]){"dsi1pll_byteclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
-	.reg = PHY_CMN_CLK_CFG1,
-	.shift = 0,
-	.width = 2,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_pclk_src_mux",
-			.parent_names = (const char *[]){"dsi0pll_bitclk_src",
-					"dsi0pll_post_bit_div",
-					"dsi0pll_pll_out_div",
-					"dsi0pll_post_vco_div"},
-			.num_parents = 4,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
-	.reg = PHY_CMN_CLK_CFG1,
-	.shift = 0,
-	.width = 2,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_pclk_src_mux",
-			.parent_names = (const char *[]){"dsi1pll_bitclk_src",
-					"dsi1pll_post_bit_div",
-					"dsi1pll_pll_out_div",
-					"dsi1pll_post_vco_div"},
-			.num_parents = 4,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi0pll_pclk_src = {
-	.shift = 0,
-	.width = 4,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_pclk_src",
-			.parent_names = (const char *[]){
-					"dsi0pll_pclk_src_mux"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_div dsi1pll_pclk_src = {
-	.shift = 0,
-	.width = 4,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_pclk_src",
-			.parent_names = (const char *[]){
-					"dsi1pll_pclk_src_mux"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
-static struct clk_regmap_mux dsi0pll_pclk_mux = {
-	.shift = 0,
-	.width = 1,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0_phy_pll_out_dsiclk",
-			.parent_names = (const char *[]){"dsi0pll_pclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_regmap_mux dsi1pll_pclk_mux = {
-	.shift = 0,
-	.width = 1,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1_phy_pll_out_dsiclk",
-			.parent_names = (const char *[]){"dsi1pll_pclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
-			.ops = &clk_regmap_mux_closest_ops,
-		},
-	},
-};
-
-static struct clk_hw *mdss_dsi_pllcc_7nm[] = {
-	[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
-	[PLL_OUT_DIV_0_CLK] = &dsi0pll_pll_out_div.clkr.hw,
-	[BITCLK_SRC_0_CLK] = &dsi0pll_bitclk_src.clkr.hw,
-	[BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
-	[POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.hw,
-	[POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.hw,
-	[BYTECLK_MUX_0_CLK] = &dsi0pll_byteclk_mux.clkr.hw,
-	[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
-	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
-	[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
-	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
-	[PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
-	[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
-	[BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
-	[POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.hw,
-	[POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.hw,
-	[BYTECLK_MUX_1_CLK] = &dsi1pll_byteclk_mux.clkr.hw,
-	[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
-	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
-	[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
-};
-
-int dsi_pll_clock_register_7nm(struct platform_device *pdev,
-				  struct mdss_pll_resources *pll_res)
-{
-	int rc = 0, ndx, i;
-	struct clk *clk;
-	struct clk_onecell_data *clk_data;
-	int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_7nm);
-	struct regmap *rmap;
-
-	ndx = pll_res->index;
-
-	if (ndx >= DSI_PLL_MAX) {
-		pr_err("pll index(%d) NOT supported\n", ndx);
-		return -EINVAL;
-	}
-
-	pll_rsc_db[ndx] = pll_res;
-	plls[ndx].rsc = pll_res;
-	pll_res->priv = &plls[ndx];
-	pll_res->vco_delay = VCO_DELAY_USEC;
-
-	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
-	if (!clk_data)
-		return -ENOMEM;
-
-	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
-				sizeof(struct clk *), GFP_KERNEL);
-	if (!clk_data->clks)
-		return -ENOMEM;
-
-	clk_data->clk_num = num_clks;
-
-	/* Establish client data */
-	if (ndx == 0) {
-
-		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
-				pll_res, &dsi_pll_7nm_config);
-		dsi0pll_pll_out_div.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
-				pll_res, &dsi_pll_7nm_config);
-		dsi0pll_bitclk_src.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
-				pll_res, &dsi_pll_7nm_config);
-		dsi0pll_pclk_src.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
-				pll_res, &dsi_pll_7nm_config);
-		dsi0pll_pclk_mux.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
-				pll_res, &dsi_pll_7nm_config);
-		dsi0pll_pclk_src_mux.clkr.regmap = rmap;
-		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
-				pll_res, &dsi_pll_7nm_config);
-		dsi0pll_byteclk_mux.clkr.regmap = rmap;
-
-		dsi0pll_vco_clk.priv = pll_res;
-
-		if (dsi_pll_7nm_is_hw_revision_v4_1(pll_res)) {
-			dsi0pll_vco_clk.min_rate = 600000000;
-			dsi0pll_vco_clk.max_rate = 5000000000;
-		}
-
-		for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
-			clk = devm_clk_register(&pdev->dev,
-						mdss_dsi_pllcc_7nm[i]);
-			if (IS_ERR(clk)) {
-				pr_err("clk registration failed for DSI clock:%d\n",
-							pll_res->index);
-				rc = -EINVAL;
-				goto clk_register_fail;
-			}
-			clk_data->clks[i] = clk;
-
-		}
-
-		rc = of_clk_add_provider(pdev->dev.of_node,
-				of_clk_src_onecell_get, clk_data);
-
-
-	} else {
-		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
-				pll_res, &dsi_pll_7nm_config);
-		dsi1pll_pll_out_div.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
-				pll_res, &dsi_pll_7nm_config);
-		dsi1pll_bitclk_src.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
-				pll_res, &dsi_pll_7nm_config);
-		dsi1pll_pclk_src.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
-				pll_res, &dsi_pll_7nm_config);
-		dsi1pll_pclk_mux.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
-				pll_res, &dsi_pll_7nm_config);
-		dsi1pll_pclk_src_mux.clkr.regmap = rmap;
-		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
-				pll_res, &dsi_pll_7nm_config);
-		dsi1pll_byteclk_mux.clkr.regmap = rmap;
-		dsi1pll_vco_clk.priv = pll_res;
-
-		if (dsi_pll_7nm_is_hw_revision_v4_1(pll_res)) {
-			dsi1pll_vco_clk.min_rate = 600000000;
-			dsi1pll_vco_clk.max_rate = 5000000000;
-		}
-
-		for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
-			clk = devm_clk_register(&pdev->dev,
-						mdss_dsi_pllcc_7nm[i]);
-			if (IS_ERR(clk)) {
-				pr_err("clk registration failed for DSI clock:%d\n",
-						pll_res->index);
-				rc = -EINVAL;
-				goto clk_register_fail;
-			}
-			clk_data->clks[i] = clk;
-
-		}
-
-		rc = of_clk_add_provider(pdev->dev.of_node,
-				of_clk_src_onecell_get, clk_data);
-	}
-	if (!rc) {
-		pr_info("Registered DSI PLL ndx=%d, clocks successfully\n",
-				ndx);
-
-		return rc;
-	}
-clk_register_fail:
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-8996-util.c
deleted file mode 100644
index c8d5f1e..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996-util.c
+++ /dev/null
@@ -1,1130 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clock-generic.h>
-
-#include "mdss-pll.h"
-#include "mdss-dsi-pll.h"
-#include "mdss-dsi-pll-8996.h"
-
-#define DSI_PLL_POLL_MAX_READS                  15
-#define DSI_PLL_POLL_TIMEOUT_US                 1000
-#define MSM8996_DSI_PLL_REVISION_2		2
-
-#define CEIL(x, y)		(((x) + ((y)-1)) / (y))
-
-int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel)
-{
-	return 0;
-}
-
-int get_mdss_byte_mux_sel_8996(struct mux_clk *clk)
-{
-	return 0;
-}
-
-int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel)
-{
-	return 0;
-}
-
-int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk)
-{
-	return 0;
-}
-
-static int mdss_pll_read_stored_trim_codes(
-		struct mdss_pll_resources *dsi_pll_res, s64 vco_clk_rate)
-{
-	int i;
-	int rc = 0;
-	bool found = false;
-
-	if (!dsi_pll_res->dfps) {
-		rc = -EINVAL;
-		goto end_read;
-	}
-
-	for (i = 0; i < dsi_pll_res->dfps->panel_dfps.frame_rate_cnt; i++) {
-		struct dfps_codes_info *codes_info =
-			&dsi_pll_res->dfps->codes_dfps[i];
-
-		pr_debug("valid=%d frame_rate=%d, vco_rate=%d, code %d %d\n",
-			codes_info->is_valid, codes_info->frame_rate,
-			codes_info->clk_rate, codes_info->pll_codes.pll_codes_1,
-			codes_info->pll_codes.pll_codes_2);
-
-		if (vco_clk_rate != codes_info->clk_rate &&
-				codes_info->is_valid)
-			continue;
-
-		dsi_pll_res->cache_pll_trim_codes[0] =
-			codes_info->pll_codes.pll_codes_1;
-		dsi_pll_res->cache_pll_trim_codes[1] =
-			codes_info->pll_codes.pll_codes_2;
-		found = true;
-		break;
-	}
-
-	if (!found) {
-		rc = -EINVAL;
-		goto end_read;
-	}
-
-	pr_debug("core_kvco_code=0x%x core_vco_tune=0x%x\n",
-			dsi_pll_res->cache_pll_trim_codes[0],
-			dsi_pll_res->cache_pll_trim_codes[1]);
-
-end_read:
-	return rc;
-}
-
-int post_n1_div_set_div(struct div_clk *clk, int div)
-{
-	struct mdss_pll_resources *pll = clk->priv;
-	struct dsi_pll_db *pdb;
-	struct dsi_pll_output *pout;
-	int rc;
-	u32 n1div = 0;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	pdb = (struct dsi_pll_db *)pll->priv;
-	pout = &pdb->out;
-
-	/*
-	 * vco rate = bit_clk * postdiv * n1div
-	 * vco range from 1300 to 2600 Mhz
-	 * postdiv = 1
-	 * n1div = 1 to 15
-	 * n1div = roundup(1300Mhz / bit_clk)
-	 * support bit_clk above 86.67Mhz
-	 */
-
-	/* this is for vco/bit clock */
-	pout->pll_postdiv = 1;	/* fixed, divided by 1 */
-	pout->pll_n1div  = div;
-
-	n1div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
-	n1div &= ~0xf;
-	n1div |= (div & 0xf);
-	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n1div);
-	/* ensure n1 divider is programed */
-	wmb();
-	pr_debug("ndx=%d div=%d postdiv=%x n1div=%x\n",
-			pll->index, div, pout->pll_postdiv, pout->pll_n1div);
-
-	mdss_pll_resource_enable(pll, false);
-
-	return 0;
-}
-
-int post_n1_div_get_div(struct div_clk *clk)
-{
-	u32  div;
-	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
-
-	if (is_gdsc_disabled(pll))
-		return 0;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	/*
-	 * postdiv = 1/2/4/8
-	 * n1div = 1 - 15
-	 * fot the time being, assume postdiv = 1
-	 */
-
-	div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
-	div &= 0xF;
-	pr_debug("n1 div = %d\n", div);
-
-	mdss_pll_resource_enable(pll, false);
-
-	return div;
-}
-
-int n2_div_set_div(struct div_clk *clk, int div)
-{
-	int rc;
-	u32 n2div;
-	struct mdss_pll_resources *pll = clk->priv;
-	struct dsi_pll_db *pdb;
-	struct dsi_pll_output *pout;
-	struct mdss_pll_resources *slave;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	pdb = (struct dsi_pll_db *)pll->priv;
-	pout = &pdb->out;
-
-	/* this is for pixel clock */
-	n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
-	n2div &= ~0xf0;	/* bits 4 to 7 */
-	n2div |= (div << 4);
-	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
-
-	/* commit slave if split display is enabled */
-	slave = pll->slave;
-	if (slave)
-		MDSS_PLL_REG_W(slave->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
-
-	pout->pll_n2div = div;
-
-	/* set dsiclk_sel=1 so that n2div *= 2 */
-	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG1, 1);
-	pr_debug("ndx=%d div=%d n2div=%x\n", pll->index, div, n2div);
-
-	mdss_pll_resource_enable(pll, false);
-
-	return rc;
-}
-
-int shadow_n2_div_set_div(struct div_clk *clk, int div)
-{
-	struct mdss_pll_resources *pll = clk->priv;
-	struct dsi_pll_db *pdb;
-	struct dsi_pll_output *pout;
-	u32 data;
-
-	pdb = pll->priv;
-	pout = &pdb->out;
-
-	pout->pll_n2div = div;
-
-	data = (pout->pll_n1div | (pout->pll_n2div << 4));
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-			DSI_DYNAMIC_REFRESH_PLL_CTRL19,
-			DSIPHY_CMN_CLK_CFG0, DSIPHY_CMN_CLK_CFG1,
-			data, 1);
-	return 0;
-}
-
-int n2_div_get_div(struct div_clk *clk)
-{
-	int rc;
-	u32 n2div;
-	struct mdss_pll_resources *pll = clk->priv;
-
-	if (is_gdsc_disabled(pll))
-		return 0;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll=%d resources\n",
-						pll->index);
-		return rc;
-	}
-
-	n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
-	n2div >>= 4;
-	n2div &= 0x0f;
-
-	mdss_pll_resource_enable(pll, false);
-
-	pr_debug("ndx=%d div=%d\n", pll->index, n2div);
-
-	return n2div;
-}
-
-static bool pll_is_pll_locked_8996(struct mdss_pll_resources *pll)
-{
-	u32 status;
-	bool pll_locked;
-
-	/* poll for PLL ready status */
-	if (readl_poll_timeout_atomic((pll->pll_base +
-			DSIPHY_PLL_RESET_SM_READY_STATUS),
-			status,
-			((status & BIT(5)) > 0),
-			DSI_PLL_POLL_MAX_READS,
-			DSI_PLL_POLL_TIMEOUT_US)) {
-		pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
-				pll->index, status);
-		pll_locked = false;
-	} else if (readl_poll_timeout_atomic((pll->pll_base +
-				DSIPHY_PLL_RESET_SM_READY_STATUS),
-				status,
-				((status & BIT(0)) > 0),
-				DSI_PLL_POLL_MAX_READS,
-				DSI_PLL_POLL_TIMEOUT_US)) {
-		pr_err("DSI PLL ndx=%d status=%x PLl not ready\n",
-				pll->index, status);
-		pll_locked = false;
-	} else {
-		pll_locked = true;
-	}
-
-	return pll_locked;
-}
-
-static void dsi_pll_start_8996(void __iomem *pll_base)
-{
-	pr_debug("start PLL at base=%p\n", pll_base);
-
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VREF_CFG1, 0x10);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 1);
-}
-
-static void dsi_pll_stop_8996(void __iomem *pll_base)
-{
-	pr_debug("stop PLL at base=%p\n", pll_base);
-
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
-}
-
-int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll)
-{
-	int rc = 0;
-
-	if (!pll) {
-		pr_err("Invalid PLL resources\n");
-		return -EINVAL;
-	}
-
-	dsi_pll_start_8996(pll->pll_base);
-
-	/*
-	 * both DSIPHY_PLL_CLKBUFLR_EN and DSIPHY_CMN_GLBL_TEST_CTRL
-	 * enabled at mdss_dsi_8996_phy_config()
-	 */
-
-	if (!pll_is_pll_locked_8996(pll)) {
-		pr_err("DSI PLL ndx=%d lock failed\n", pll->index);
-		rc = -EINVAL;
-		goto init_lock_err;
-	}
-
-	pr_debug("DSI PLL ndx=%d Lock success\n", pll->index);
-
-init_lock_err:
-	return rc;
-}
-
-static int dsi_pll_enable(struct clk *c)
-{
-	int i, rc = 0;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *pll = vco->priv;
-
-	/* Try all enable sequences until one succeeds */
-	for (i = 0; i < vco->pll_en_seq_cnt; i++) {
-		rc = vco->pll_enable_seqs[i](pll);
-		pr_debug("DSI PLL %s after sequence #%d\n",
-			rc ? "unlocked" : "locked", i + 1);
-		if (!rc)
-			break;
-	}
-
-	if (rc)
-		pr_err("ndx=%d DSI PLL failed to lock\n", pll->index);
-	else
-		pll->pll_on = true;
-
-	return rc;
-}
-
-static void dsi_pll_disable(struct clk *c)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *pll = vco->priv;
-	struct mdss_pll_resources *slave;
-
-	if (!pll->pll_on &&
-		mdss_pll_resource_enable(pll, true)) {
-		pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
-		return;
-	}
-
-	pll->handoff_resources = false;
-	slave = pll->slave;
-
-	dsi_pll_stop_8996(pll->pll_base);
-
-	mdss_pll_resource_enable(pll, false);
-
-	pll->pll_on = false;
-
-	pr_debug("DSI PLL ndx=%d Disabled\n", pll->index);
-}
-
-static void mdss_dsi_pll_8996_input_init(struct mdss_pll_resources *pll,
-					struct dsi_pll_db *pdb)
-{
-	pdb->in.fref = 19200000;	/* 19.2 Mhz*/
-	pdb->in.fdata = 0;		/* bit clock rate */
-	pdb->in.dsiclk_sel = 1;		/* 1, reg: 0x0014 */
-	pdb->in.ssc_en = pll->ssc_en;		/* 1, reg: 0x0494, bit 0 */
-	pdb->in.ldo_en = 0;		/* 0,  reg: 0x004c, bit 0 */
-
-	/* fixed  input */
-	pdb->in.refclk_dbler_en = 0;	/* 0, reg: 0x04c0, bit 1 */
-	pdb->in.vco_measure_time = 5;	/* 5, unknown */
-	pdb->in.kvco_measure_time = 5;	/* 5, unknown */
-	pdb->in.bandgap_timer = 4;	/* 4, reg: 0x0430, bit 3 - 5 */
-	pdb->in.pll_wakeup_timer = 5;	/* 5, reg: 0x043c, bit 0 - 2 */
-	pdb->in.plllock_cnt = 1;	/* 1, reg: 0x0488, bit 1 - 2 */
-	pdb->in.plllock_rng = 0;	/* 0, reg: 0x0488, bit 3 - 4 */
-	pdb->in.ssc_center = pll->ssc_center;/* 0, reg: 0x0494, bit 1 */
-	pdb->in.ssc_adj_period = 37;	/* 37, reg: 0x498, bit 0 - 9 */
-	pdb->in.ssc_spread = pll->ssc_ppm / 1000;
-	pdb->in.ssc_freq = pll->ssc_freq;
-
-	pdb->in.pll_ie_trim = 4;	/* 4, reg: 0x0400 */
-	pdb->in.pll_ip_trim = 4;	/* 4, reg: 0x0404 */
-	pdb->in.pll_cpcset_cur = 1;	/* 1, reg: 0x04f0, bit 0 - 2 */
-	pdb->in.pll_cpmset_cur = 1;	/* 1, reg: 0x04f0, bit 3 - 5 */
-	pdb->in.pll_icpmset = 4;	/* 4, reg: 0x04fc, bit 3 - 5 */
-	pdb->in.pll_icpcset = 4;	/* 4, reg: 0x04fc, bit 0 - 2 */
-	pdb->in.pll_icpmset_p = 0;	/* 0, reg: 0x04f4, bit 0 - 2 */
-	pdb->in.pll_icpmset_m = 0;	/* 0, reg: 0x04f4, bit 3 - 5 */
-	pdb->in.pll_icpcset_p = 0;	/* 0, reg: 0x04f8, bit 0 - 2 */
-	pdb->in.pll_icpcset_m = 0;	/* 0, reg: 0x04f8, bit 3 - 5 */
-	pdb->in.pll_lpf_res1 = 3;	/* 3, reg: 0x0504, bit 0 - 3 */
-	pdb->in.pll_lpf_cap1 = 11;	/* 11, reg: 0x0500, bit 0 - 3 */
-	pdb->in.pll_lpf_cap2 = 1;	/* 1, reg: 0x0500, bit 4 - 7 */
-	pdb->in.pll_iptat_trim = 7;
-	pdb->in.pll_c3ctrl = 2;		/* 2 */
-	pdb->in.pll_r3ctrl = 1;		/* 1 */
-}
-
-static void pll_8996_ssc_calc(struct mdss_pll_resources *pll,
-				struct dsi_pll_db *pdb)
-{
-	u32 period, ssc_period;
-	u32 ref, rem;
-	s64 step_size;
-
-	pr_debug("%s: vco=%lld ref=%lld\n", __func__,
-		pll->vco_current_rate, pll->vco_ref_clk_rate);
-
-	ssc_period = pdb->in.ssc_freq / 500;
-	period = (unsigned long)pll->vco_ref_clk_rate / 1000;
-	ssc_period  = CEIL(period, ssc_period);
-	ssc_period -= 1;
-	pdb->out.ssc_period = ssc_period;
-
-	pr_debug("%s: ssc, freq=%d spread=%d period=%d\n", __func__,
-	pdb->in.ssc_freq, pdb->in.ssc_spread, pdb->out.ssc_period);
-
-	step_size = (u32)pll->vco_current_rate;
-	ref = pll->vco_ref_clk_rate;
-	ref /= 1000;
-	step_size = div_s64(step_size, ref);
-	step_size <<= 20;
-	step_size = div_s64(step_size, 1000);
-	step_size *= pdb->in.ssc_spread;
-	step_size = div_s64(step_size, 1000);
-	step_size *= (pdb->in.ssc_adj_period + 1);
-
-	rem = 0;
-	step_size = div_s64_rem(step_size, ssc_period + 1, &rem);
-	if (rem)
-		step_size++;
-
-	pr_debug("%s: step_size=%lld\n", __func__, step_size);
-
-	step_size &= 0x0ffff;	/* take lower 16 bits */
-
-	pdb->out.ssc_step_size = step_size;
-}
-
-static void pll_8996_dec_frac_calc(struct mdss_pll_resources *pll,
-				struct dsi_pll_db *pdb)
-{
-	struct dsi_pll_input *pin = &pdb->in;
-	struct dsi_pll_output *pout = &pdb->out;
-	s64 multiplier = BIT(20);
-	s64 dec_start_multiple, dec_start, pll_comp_val;
-	s32 duration, div_frac_start;
-	s64 vco_clk_rate = pll->vco_current_rate;
-	s64 fref = pll->vco_ref_clk_rate;
-
-	pr_debug("vco_clk_rate=%lld ref_clk_rate=%lld\n",
-				vco_clk_rate, fref);
-
-	dec_start_multiple = div_s64(vco_clk_rate * multiplier, fref);
-	div_s64_rem(dec_start_multiple, multiplier, &div_frac_start);
-
-	dec_start = div_s64(dec_start_multiple, multiplier);
-
-	pout->dec_start = (u32)dec_start;
-	pout->div_frac_start = div_frac_start;
-
-	if (pin->plllock_cnt == 0)
-		duration = 1024;
-	else if (pin->plllock_cnt == 1)
-		duration = 256;
-	else if (pin->plllock_cnt == 2)
-		duration = 128;
-	else
-		duration = 32;
-
-	pll_comp_val =  duration * dec_start_multiple;
-	pll_comp_val =  div_s64(pll_comp_val, multiplier);
-	do_div(pll_comp_val, 10);
-
-	pout->plllock_cmp = (u32)pll_comp_val;
-
-	pout->pll_txclk_en = 1;
-	if (pll->revision == MSM8996_DSI_PLL_REVISION_2)
-		pout->cmn_ldo_cntrl = 0x3c;
-	else
-		pout->cmn_ldo_cntrl = 0x1c;
-}
-
-static u32 pll_8996_kvco_slop(u32 vrate)
-{
-	u32 slop = 0;
-
-	if (vrate > 1300000000UL && vrate <= 1800000000UL)
-		slop =  600;
-	else if (vrate > 1800000000UL && vrate < 2300000000UL)
-		slop = 400;
-	else if (vrate > 2300000000UL && vrate < 2600000000UL)
-		slop = 280;
-
-	return slop;
-}
-
-static void pll_8996_calc_vco_count(struct dsi_pll_db *pdb,
-			 s64 vco_clk_rate, s64 fref)
-{
-	struct dsi_pll_input *pin = &pdb->in;
-	struct dsi_pll_output *pout = &pdb->out;
-	s64 data;
-	u32 cnt;
-
-	data = fref * pin->vco_measure_time;
-	do_div(data, 1000000);
-	data &= 0x03ff;	/* 10 bits */
-	data -= 2;
-	pout->pll_vco_div_ref = data;
-
-	data = (unsigned long)vco_clk_rate / 1000000;	/* unit is Mhz */
-	data *= pin->vco_measure_time;
-	do_div(data, 10);
-	pout->pll_vco_count = data; /* reg: 0x0474, 0x0478 */
-
-	data = fref * pin->kvco_measure_time;
-	do_div(data, 1000000);
-	data &= 0x03ff;	/* 10 bits */
-	data -= 1;
-	pout->pll_kvco_div_ref = data;
-
-	cnt = pll_8996_kvco_slop(vco_clk_rate);
-	cnt *= 2;
-	do_div(cnt, 100);
-	cnt *= pin->kvco_measure_time;
-	pout->pll_kvco_count = cnt;
-
-	pout->pll_misc1 = 16;
-	pout->pll_resetsm_cntrl = 48;
-	pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;
-	pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;
-	pout->pll_kvco_code = 0;
-}
-
-static void pll_db_commit_ssc(struct mdss_pll_resources *pll,
-					struct dsi_pll_db *pdb)
-{
-	void __iomem *pll_base = pll->pll_base;
-	struct dsi_pll_input *pin = &pdb->in;
-	struct dsi_pll_output *pout = &pdb->out;
-	char data;
-
-	data = pin->ssc_adj_period;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER1, data);
-	data = (pin->ssc_adj_period >> 8);
-	data &= 0x03;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER2, data);
-
-	data = pout->ssc_period;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER1, data);
-	data = (pout->ssc_period >> 8);
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER2, data);
-
-	data = pout->ssc_step_size;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE1, data);
-	data = (pout->ssc_step_size >> 8);
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE2, data);
-
-	data = (pin->ssc_center & 0x01);
-	data <<= 1;
-	data |= 0x01; /* enable */
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_EN_CENTER, data);
-
-	wmb();	/* make sure register committed */
-}
-
-static void pll_db_commit_common(struct mdss_pll_resources *pll,
-					struct dsi_pll_db *pdb)
-{
-	void __iomem *pll_base = pll->pll_base;
-	struct dsi_pll_input *pin = &pdb->in;
-	struct dsi_pll_output *pout = &pdb->out;
-	char data;
-
-	/* confgiure the non frequency dependent pll registers */
-	data = 0;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SYSCLK_EN_RESET, data);
-
-	/* DSIPHY_PLL_CLKBUFLR_EN updated at dsi phy */
-
-	data = pout->pll_txclk_en;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_TXCLK_EN, data);
-
-	data = pout->pll_resetsm_cntrl;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL, data);
-	data = pout->pll_resetsm_cntrl2;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL2, data);
-	data = pout->pll_resetsm_cntrl5;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL5, data);
-
-	data = pout->pll_vco_div_ref;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF1, data);
-	data = (pout->pll_vco_div_ref >> 8);
-	data &= 0x03;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF2, data);
-
-	data = pout->pll_kvco_div_ref;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF1, data);
-	data = (pout->pll_kvco_div_ref >> 8);
-	data &= 0x03;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF2, data);
-
-	data = pout->pll_misc1;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_MISC1, data);
-
-	data = pin->pll_ie_trim;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IE_TRIM, data);
-
-	data = pin->pll_ip_trim;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IP_TRIM, data);
-
-	data = ((pin->pll_cpmset_cur << 3) | pin->pll_cpcset_cur);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CP_SET_CUR, data);
-
-	data = ((pin->pll_icpcset_p << 3) | pin->pll_icpcset_m);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPCSET, data);
-
-	data = ((pin->pll_icpmset_p << 3) | pin->pll_icpcset_m);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPMSET, data);
-
-	data = ((pin->pll_icpmset << 3) | pin->pll_icpcset);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICP_SET, data);
-
-	data = ((pdb->in.pll_lpf_cap2 << 4) | pdb->in.pll_lpf_cap1);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF1, data);
-
-	data = pin->pll_iptat_trim;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IPTAT_TRIM, data);
-
-	data = (pdb->in.pll_c3ctrl | (pdb->in.pll_r3ctrl << 4));
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_CRCTRL, data);
-}
-
-static void pll_db_commit_8996(struct mdss_pll_resources *pll,
-					struct dsi_pll_db *pdb)
-{
-	void __iomem *pll_base = pll->pll_base;
-	struct dsi_pll_input *pin = &pdb->in;
-	struct dsi_pll_output *pout = &pdb->out;
-	char data;
-
-	data = pout->cmn_ldo_cntrl;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_LDO_CNTRL, data);
-
-	pll_db_commit_common(pll, pdb);
-
-	/* de assert pll start and apply pll sw reset */
-	/* stop pll */
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
-
-	/* pll sw reset */
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0x20);
-	wmb();	/* make sure register committed */
-	udelay(10);
-
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0);
-	wmb();	/* make sure register committed */
-
-	data = pdb->in.dsiclk_sel; /* set dsiclk_sel = 1  */
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG1, data);
-
-	data = 0xff; /* data, clk, pll normal operation */
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_0, data);
-
-	/* confgiure the frequency dependent pll registers */
-	data = pout->dec_start;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DEC_START, data);
-
-	data = pout->div_frac_start;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START1, data);
-	data = (pout->div_frac_start >> 8);
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START2, data);
-	data = (pout->div_frac_start >> 16);
-	data &= 0x0f;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START3, data);
-
-	data = pout->plllock_cmp;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP1, data);
-	data = (pout->plllock_cmp >> 8);
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP2, data);
-	data = (pout->plllock_cmp >> 16);
-	data &= 0x03;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP3, data);
-
-	data = ((pin->plllock_cnt << 1) | (pin->plllock_rng << 3));
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP_EN, data);
-
-	data = pout->pll_vco_count;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT1, data);
-	data = (pout->pll_vco_count >> 8);
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT2, data);
-
-	data = pout->pll_kvco_count;
-	data &= 0x0ff;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT1, data);
-	data = (pout->pll_kvco_count >> 8);
-	data &= 0x03;
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT2, data);
-
-	/*
-	 * tx_band = pll_postdiv
-	 * 0: divided by 1 <== for now
-	 * 1: divided by 2
-	 * 2: divided by 4
-	 * 3: divided by 8
-	 */
-	data = (((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1);
-	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF2_POSTDIV, data);
-
-	data = (pout->pll_n1div | (pout->pll_n2div << 4));
-	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG0, data);
-
-	if (pll->ssc_en)
-		pll_db_commit_ssc(pll, pdb);
-
-	wmb();	/* make sure register committed */
-}
-
-/*
- * pll_source_finding:
- * Both GLBL_TEST_CTRL and CLKBUFLR_EN are configured
- * at mdss_dsi_8996_phy_config()
- */
-static int pll_source_finding(struct mdss_pll_resources *pll)
-{
-	u32 clk_buf_en;
-	u32 glbl_test_ctrl;
-
-	glbl_test_ctrl = MDSS_PLL_REG_R(pll->pll_base,
-				DSIPHY_CMN_GLBL_TEST_CTRL);
-	clk_buf_en = MDSS_PLL_REG_R(pll->pll_base,
-				DSIPHY_PLL_CLKBUFLR_EN);
-
-	glbl_test_ctrl &= BIT(2);
-	glbl_test_ctrl >>= 2;
-
-	pr_debug("%s: pll=%d clk_buf_en=%x glbl_test_ctrl=%x\n",
-		__func__, pll->index, clk_buf_en, glbl_test_ctrl);
-
-	clk_buf_en &= (PLL_OUTPUT_RIGHT | PLL_OUTPUT_LEFT);
-
-	if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
-			(clk_buf_en == PLL_OUTPUT_BOTH))
-		return PLL_MASTER;
-
-	if ((glbl_test_ctrl == PLL_SOURCE_FROM_RIGHT) &&
-			(clk_buf_en == PLL_OUTPUT_NONE))
-		return PLL_SLAVE;
-
-	if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
-			(clk_buf_en == PLL_OUTPUT_RIGHT))
-		return PLL_STANDALONE;
-
-	pr_debug("%s: Error pll setup, clk_buf_en=%x glbl_test_ctrl=%x\n",
-			__func__, clk_buf_en, glbl_test_ctrl);
-
-	return PLL_UNKNOWN;
-}
-
-static void pll_source_setup(struct mdss_pll_resources *pll)
-{
-	int status;
-	struct dsi_pll_db *pdb = (struct dsi_pll_db *)pll->priv;
-	struct mdss_pll_resources *other;
-
-	if (pdb->source_setup_done)
-		return;
-
-	pdb->source_setup_done++;
-
-	status = pll_source_finding(pll);
-
-	if (status == PLL_STANDALONE || status == PLL_UNKNOWN)
-		return;
-
-	other = pdb->next->pll;
-	if (!other)
-		return;
-
-	pr_debug("%s: status=%d pll=%d other=%d\n", __func__,
-			status, pll->index, other->index);
-
-	if (status == PLL_MASTER)
-		pll->slave = other;
-	else
-		other->slave = pll;
-}
-
-int pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
-{
-	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *pll = vco->priv;
-	struct mdss_pll_resources *slave;
-	struct dsi_pll_db *pdb;
-
-	pdb = (struct dsi_pll_db *)pll->priv;
-	if (!pdb) {
-		pr_err("No prov found\n");
-		return -EINVAL;
-	}
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
-		return rc;
-	}
-
-	pll_source_setup(pll);
-
-	pr_debug("%s: ndx=%d base=%p rate=%lu slave=%p\n", __func__,
-				pll->index, pll->pll_base, rate, pll->slave);
-
-	pll->vco_current_rate = rate;
-	pll->vco_ref_clk_rate = vco->ref_clk_rate;
-
-	mdss_dsi_pll_8996_input_init(pll, pdb);
-
-	pll_8996_dec_frac_calc(pll, pdb);
-
-	if (pll->ssc_en)
-		pll_8996_ssc_calc(pll, pdb);
-
-	pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
-					pll->vco_ref_clk_rate);
-
-	/* commit slave if split display is enabled */
-	slave = pll->slave;
-	if (slave)
-		pll_db_commit_8996(slave, pdb);
-
-	/* commit master itself */
-	pll_db_commit_8996(pll, pdb);
-
-	mdss_pll_resource_enable(pll, false);
-
-	return rc;
-}
-
-static void shadow_pll_dynamic_refresh_8996(struct mdss_pll_resources *pll,
-							struct dsi_pll_db *pdb)
-{
-	struct dsi_pll_output *pout = &pdb->out;
-
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL20,
-		DSIPHY_CMN_CTRL_0, DSIPHY_PLL_SYSCLK_EN_RESET,
-		0xFF, 0x0);
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL21,
-		DSIPHY_PLL_DEC_START, DSIPHY_PLL_DIV_FRAC_START1,
-		pout->dec_start, (pout->div_frac_start & 0x0FF));
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL22,
-		DSIPHY_PLL_DIV_FRAC_START2, DSIPHY_PLL_DIV_FRAC_START3,
-		((pout->div_frac_start >> 8) & 0x0FF),
-		((pout->div_frac_start >> 16) & 0x0F));
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL23,
-		DSIPHY_PLL_PLLLOCK_CMP1, DSIPHY_PLL_PLLLOCK_CMP2,
-		(pout->plllock_cmp & 0x0FF),
-		((pout->plllock_cmp >> 8) & 0x0FF));
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL24,
-		DSIPHY_PLL_PLLLOCK_CMP3, DSIPHY_PLL_PLL_VCO_TUNE,
-		((pout->plllock_cmp >> 16) & 0x03),
-		(pll->cache_pll_trim_codes[1] | BIT(7))); /* VCO tune*/
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL25,
-		DSIPHY_PLL_KVCO_CODE, DSIPHY_PLL_RESETSM_CNTRL,
-		(pll->cache_pll_trim_codes[0] | BIT(5)), 0x38);
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL26,
-		DSIPHY_PLL_PLL_LPF2_POSTDIV, DSIPHY_CMN_PLL_CNTRL,
-		(((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1), 0x01);
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL27,
-		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
-		0x01, 0x01);
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL28,
-		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
-		0x01, 0x01);
-	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_CTRL29,
-		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
-		0x01, 0x01);
-	MDSS_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, 0x0000001E);
-	MDSS_PLL_REG_W(pll->dyn_pll_base,
-		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, 0x001FFE00);
-
-	/*
-	 * Ensure all the dynamic refresh registers are written before
-	 * dynamic refresh to change the fps is triggered
-	 */
-	wmb();
-}
-
-int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
-{
-	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *pll = vco->priv;
-	struct dsi_pll_db *pdb;
-	s64 vco_clk_rate = (s64)rate;
-
-	if (!pll) {
-		pr_err("PLL data not found\n");
-		return -EINVAL;
-	}
-
-	pdb = pll->priv;
-	if (!pdb) {
-		pr_err("No priv data found\n");
-		return -EINVAL;
-	}
-
-	rc = mdss_pll_read_stored_trim_codes(pll, vco_clk_rate);
-	if (rc) {
-		pr_err("cannot find pll codes rate=%lld\n", vco_clk_rate);
-		return -EINVAL;
-	}
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
-		return rc;
-	}
-
-	pr_debug("%s: ndx=%d base=%p rate=%lu\n", __func__,
-			pll->index, pll->pll_base, rate);
-
-	pll->vco_current_rate = rate;
-	pll->vco_ref_clk_rate = vco->ref_clk_rate;
-
-	mdss_dsi_pll_8996_input_init(pll, pdb);
-
-	pll_8996_dec_frac_calc(pll, pdb);
-
-	pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
-			pll->vco_ref_clk_rate);
-
-	shadow_pll_dynamic_refresh_8996(pll, pdb);
-
-	rc = mdss_pll_resource_enable(pll, false);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
-		return rc;
-	}
-
-	return rc;
-}
-
-unsigned long pll_vco_get_rate_8996(struct clk *c)
-{
-	u64 vco_rate, multiplier = BIT(20);
-	s32 div_frac_start;
-	u32 dec_start;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	u64 ref_clk = vco->ref_clk_rate;
-	int rc;
-	struct mdss_pll_resources *pll = vco->priv;
-
-	if (is_gdsc_disabled(pll))
-		return 0;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
-		return rc;
-	}
-
-	dec_start = MDSS_PLL_REG_R(pll->pll_base,
-			DSIPHY_PLL_DEC_START);
-	dec_start &= 0x0ff;
-	pr_debug("dec_start = 0x%x\n", dec_start);
-
-	div_frac_start = (MDSS_PLL_REG_R(pll->pll_base,
-			DSIPHY_PLL_DIV_FRAC_START3) & 0x0f) << 16;
-	div_frac_start |= (MDSS_PLL_REG_R(pll->pll_base,
-			DSIPHY_PLL_DIV_FRAC_START2) & 0x0ff) << 8;
-	div_frac_start |= MDSS_PLL_REG_R(pll->pll_base,
-			DSIPHY_PLL_DIV_FRAC_START1) & 0x0ff;
-	pr_debug("div_frac_start = 0x%x\n", div_frac_start);
-
-	vco_rate = ref_clk * dec_start;
-	vco_rate += ((ref_clk * div_frac_start) / multiplier);
-
-	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
-
-	mdss_pll_resource_enable(pll, false);
-
-	return (unsigned long)vco_rate;
-}
-
-long pll_vco_round_rate_8996(struct clk *c, unsigned long rate)
-{
-	unsigned long rrate = rate;
-	u32 div;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-
-	div = vco->min_rate / rate;
-	if (div > 15) {
-		/* rate < 86.67 Mhz */
-		pr_err("rate=%lu NOT supportted\n", rate);
-		return -EINVAL;
-	}
-
-	if (rate < vco->min_rate)
-		rrate = vco->min_rate;
-	if (rate > vco->max_rate)
-		rrate = vco->max_rate;
-
-	return rrate;
-}
-
-enum handoff pll_vco_handoff_8996(struct clk *c)
-{
-	int rc;
-	enum handoff ret = HANDOFF_DISABLED_CLK;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *pll = vco->priv;
-
-	if (is_gdsc_disabled(pll))
-		return HANDOFF_DISABLED_CLK;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
-		return ret;
-	}
-
-	if (pll_is_pll_locked_8996(pll)) {
-		pll->handoff_resources = true;
-		pll->pll_on = true;
-		c->rate = pll_vco_get_rate_8996(c);
-		ret = HANDOFF_ENABLED_CLK;
-	} else {
-		mdss_pll_resource_enable(pll, false);
-	}
-
-	return ret;
-}
-
-enum handoff shadow_pll_vco_handoff_8996(struct clk *c)
-{
-	return HANDOFF_DISABLED_CLK;
-}
-
-int pll_vco_prepare_8996(struct clk *c)
-{
-	int rc = 0;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *pll = vco->priv;
-
-	if (!pll) {
-		pr_err("Dsi pll resources are not available\n");
-		return -EINVAL;
-	}
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("ndx=%d Failed to enable mdss dsi pll resources\n",
-							pll->index);
-		return rc;
-	}
-
-	if ((pll->vco_cached_rate != 0)
-	    && (pll->vco_cached_rate == c->rate)) {
-		rc = c->ops->set_rate(c, pll->vco_cached_rate);
-		if (rc) {
-			pr_err("index=%d vco_set_rate failed. rc=%d\n",
-					rc, pll->index);
-			mdss_pll_resource_enable(pll, false);
-			goto error;
-		}
-	}
-
-	rc = dsi_pll_enable(c);
-
-	if (rc) {
-		mdss_pll_resource_enable(pll, false);
-		pr_err("ndx=%d failed to enable dsi pll\n", pll->index);
-	}
-
-error:
-	return rc;
-}
-
-void pll_vco_unprepare_8996(struct clk *c)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *pll = vco->priv;
-
-	if (!pll) {
-		pr_err("Dsi pll resources are not available\n");
-		return;
-	}
-
-	pll->vco_cached_rate = c->rate;
-	dsi_pll_disable(c);
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.c
deleted file mode 100644
index e975fd3..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.c
+++ /dev/null
@@ -1,548 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/workqueue.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8996.h>
-
-#include "mdss-pll.h"
-#include "mdss-dsi-pll.h"
-#include "mdss-dsi-pll-8996.h"
-
-#define VCO_DELAY_USEC		1
-
-static struct dsi_pll_db pll_db[DSI_PLL_NUM];
-
-static const struct clk_ops n2_clk_src_ops;
-static const struct clk_ops shadow_n2_clk_src_ops;
-static const struct clk_ops byte_clk_src_ops;
-static const struct clk_ops post_n1_div_clk_src_ops;
-static const struct clk_ops shadow_post_n1_div_clk_src_ops;
-
-static const struct clk_ops clk_ops_gen_mux_dsi;
-
-/* Op structures */
-static const struct clk_ops clk_ops_dsi_vco = {
-	.set_rate = pll_vco_set_rate_8996,
-	.round_rate = pll_vco_round_rate_8996,
-	.handoff = pll_vco_handoff_8996,
-	.prepare = pll_vco_prepare_8996,
-	.unprepare = pll_vco_unprepare_8996,
-};
-
-static struct clk_div_ops post_n1_div_ops = {
-	.set_div = post_n1_div_set_div,
-	.get_div = post_n1_div_get_div,
-};
-
-static struct clk_div_ops n2_div_ops = {	/* hr_oclk3 */
-	.set_div = n2_div_set_div,
-	.get_div = n2_div_get_div,
-};
-
-static struct clk_mux_ops mdss_byte_mux_ops = {
-	.set_mux_sel = set_mdss_byte_mux_sel_8996,
-	.get_mux_sel = get_mdss_byte_mux_sel_8996,
-};
-
-static struct clk_mux_ops mdss_pixel_mux_ops = {
-	.set_mux_sel = set_mdss_pixel_mux_sel_8996,
-	.get_mux_sel = get_mdss_pixel_mux_sel_8996,
-};
-
-/* Shadow ops for dynamic refresh */
-static const struct clk_ops clk_ops_shadow_dsi_vco = {
-	.set_rate = shadow_pll_vco_set_rate_8996,
-	.round_rate = pll_vco_round_rate_8996,
-	.handoff = shadow_pll_vco_handoff_8996,
-};
-
-static struct clk_div_ops shadow_post_n1_div_ops = {
-	.set_div = post_n1_div_set_div,
-};
-
-static struct clk_div_ops shadow_n2_div_ops = {
-	.set_div = shadow_n2_div_set_div,
-};
-
-static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
-	.ref_clk_rate = 19200000UL,
-	.min_rate = 1300000000UL,
-	.max_rate = 2600000000UL,
-	.pll_en_seq_cnt = 1,
-	.pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
-	.c = {
-		.dbg_name = "dsi0pll_vco_clk_8996",
-		.ops = &clk_ops_dsi_vco,
-		CLK_INIT(dsi0pll_vco_clk.c),
-	},
-};
-
-static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
-	.ref_clk_rate = 19200000u,
-	.min_rate = 1300000000u,
-	.max_rate = 2600000000u,
-	.c = {
-		.dbg_name = "dsi0pll_shadow_vco_clk",
-		.ops = &clk_ops_shadow_dsi_vco,
-		CLK_INIT(dsi0pll_shadow_vco_clk.c),
-	},
-};
-
-static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
-	.ref_clk_rate = 19200000UL,
-	.min_rate = 1300000000UL,
-	.max_rate = 2600000000UL,
-	.pll_en_seq_cnt = 1,
-	.pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
-	.c = {
-		.dbg_name = "dsi1pll_vco_clk_8996",
-		.ops = &clk_ops_dsi_vco,
-		CLK_INIT(dsi1pll_vco_clk.c),
-	},
-};
-
-static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
-	.ref_clk_rate = 19200000u,
-	.min_rate = 1300000000u,
-	.max_rate = 2600000000u,
-	.pll_en_seq_cnt = 1,
-	.pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
-	.c = {
-		.dbg_name = "dsi1pll_shadow_vco_clk",
-		.ops = &clk_ops_shadow_dsi_vco,
-		CLK_INIT(dsi1pll_shadow_vco_clk.c),
-	},
-};
-
-static struct div_clk dsi0pll_post_n1_div_clk = {
-	.data = {
-		.max_div = 15,
-		.min_div = 1,
-	},
-	.ops = &post_n1_div_ops,
-	.c = {
-		.parent = &dsi0pll_vco_clk.c,
-		.dbg_name = "dsi0pll_post_n1_div_clk",
-		.ops = &post_n1_div_clk_src_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_post_n1_div_clk.c),
-	},
-};
-
-static struct div_clk dsi0pll_shadow_post_n1_div_clk = {
-	.data = {
-		.max_div = 15,
-		.min_div = 1,
-	},
-	.ops = &shadow_post_n1_div_ops,
-	.c = {
-		.parent = &dsi0pll_shadow_vco_clk.c,
-		.dbg_name = "dsi0pll_shadow_post_n1_div_clk",
-		.ops = &shadow_post_n1_div_clk_src_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_shadow_post_n1_div_clk.c),
-	},
-};
-
-static struct div_clk dsi1pll_post_n1_div_clk = {
-	.data = {
-		.max_div = 15,
-		.min_div = 1,
-	},
-	.ops = &post_n1_div_ops,
-	.c = {
-		.parent = &dsi1pll_vco_clk.c,
-		.dbg_name = "dsi1pll_post_n1_div_clk",
-		.ops = &post_n1_div_clk_src_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_post_n1_div_clk.c),
-	},
-};
-
-static struct div_clk dsi1pll_shadow_post_n1_div_clk = {
-	.data = {
-		.max_div = 15,
-		.min_div = 1,
-	},
-	.ops = &shadow_post_n1_div_ops,
-	.c = {
-		.parent = &dsi1pll_shadow_vco_clk.c,
-		.dbg_name = "dsi1pll_shadow_post_n1_div_clk",
-		.ops = &shadow_post_n1_div_clk_src_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_shadow_post_n1_div_clk.c),
-	},
-};
-
-static struct div_clk dsi0pll_n2_div_clk = {
-	.data = {
-		.max_div = 15,
-		.min_div = 1,
-	},
-	.ops = &n2_div_ops,
-	.c = {
-		.parent = &dsi0pll_post_n1_div_clk.c,
-		.dbg_name = "dsi0pll_n2_div_clk",
-		.ops = &n2_clk_src_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_n2_div_clk.c),
-	},
-};
-
-static struct div_clk dsi0pll_shadow_n2_div_clk = {
-	.data = {
-		.max_div = 15,
-		.min_div = 1,
-	},
-	.ops = &shadow_n2_div_ops,
-	.c = {
-		.parent = &dsi0pll_shadow_post_n1_div_clk.c,
-		.dbg_name = "dsi0pll_shadow_n2_div_clk",
-		.ops = &shadow_n2_clk_src_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_shadow_n2_div_clk.c),
-	},
-};
-
-static struct div_clk dsi1pll_n2_div_clk = {
-	.data = {
-		.max_div = 15,
-		.min_div = 1,
-	},
-	.ops = &n2_div_ops,
-	.c = {
-		.parent = &dsi1pll_post_n1_div_clk.c,
-		.dbg_name = "dsi1pll_n2_div_clk",
-		.ops = &n2_clk_src_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_n2_div_clk.c),
-	},
-};
-
-static struct div_clk dsi1pll_shadow_n2_div_clk = {
-	.data = {
-		.max_div = 15,
-		.min_div = 1,
-	},
-	.ops = &shadow_n2_div_ops,
-	.c = {
-		.parent = &dsi1pll_shadow_post_n1_div_clk.c,
-		.dbg_name = "dsi1pll_shadow_n2_div_clk",
-		.ops = &shadow_n2_clk_src_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_shadow_n2_div_clk.c),
-	},
-};
-
-static struct div_clk dsi0pll_pixel_clk_src = {
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.c = {
-		.parent = &dsi0pll_n2_div_clk.c,
-		.dbg_name = "dsi0pll_pixel_clk_src",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_pixel_clk_src.c),
-	},
-};
-
-static struct div_clk dsi0pll_shadow_pixel_clk_src = {
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.c = {
-		.parent = &dsi0pll_shadow_n2_div_clk.c,
-		.dbg_name = "dsi0pll_shadow_pixel_clk_src",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_shadow_pixel_clk_src.c),
-	},
-};
-
-static struct div_clk dsi1pll_pixel_clk_src = {
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.c = {
-		.parent = &dsi1pll_n2_div_clk.c,
-		.dbg_name = "dsi1pll_pixel_clk_src",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_pixel_clk_src.c),
-	},
-};
-
-static struct div_clk dsi1pll_shadow_pixel_clk_src = {
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.c = {
-		.parent = &dsi1pll_shadow_n2_div_clk.c,
-		.dbg_name = "dsi1pll_shadow_pixel_clk_src",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_shadow_pixel_clk_src.c),
-	},
-};
-
-static struct mux_clk dsi0pll_pixel_clk_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&dsi0pll_pixel_clk_src.c, 0},
-		{&dsi0pll_shadow_pixel_clk_src.c, 1},
-	},
-	.ops = &mdss_pixel_mux_ops,
-	.c = {
-		.parent = &dsi0pll_pixel_clk_src.c,
-		.dbg_name = "dsi0pll_pixel_clk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_pixel_clk_mux.c),
-	}
-};
-
-static struct mux_clk dsi1pll_pixel_clk_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&dsi1pll_pixel_clk_src.c, 0},
-		{&dsi1pll_shadow_pixel_clk_src.c, 1},
-	},
-	.ops = &mdss_pixel_mux_ops,
-	.c = {
-		.parent = &dsi1pll_pixel_clk_src.c,
-		.dbg_name = "dsi1pll_pixel_clk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_pixel_clk_mux.c),
-	}
-};
-
-static struct div_clk dsi0pll_byte_clk_src = {
-	.data = {
-		.div = 8,
-		.min_div = 8,
-		.max_div = 8,
-	},
-	.c = {
-		.parent = &dsi0pll_post_n1_div_clk.c,
-		.dbg_name = "dsi0pll_byte_clk_src",
-		.ops = &clk_ops_div,
-		CLK_INIT(dsi0pll_byte_clk_src.c),
-	},
-};
-
-static struct div_clk dsi0pll_shadow_byte_clk_src = {
-	.data = {
-		.div = 8,
-		.min_div = 8,
-		.max_div = 8,
-	},
-	.c = {
-		.parent = &dsi0pll_shadow_post_n1_div_clk.c,
-		.dbg_name = "dsi0pll_shadow_byte_clk_src",
-		.ops = &clk_ops_div,
-		CLK_INIT(dsi0pll_shadow_byte_clk_src.c),
-	},
-};
-
-static struct div_clk dsi1pll_byte_clk_src = {
-	.data = {
-		.div = 8,
-		.min_div = 8,
-		.max_div = 8,
-	},
-	.c = {
-		.parent = &dsi1pll_post_n1_div_clk.c,
-		.dbg_name = "dsi1pll_byte_clk_src",
-		.ops = &clk_ops_div,
-		CLK_INIT(dsi1pll_byte_clk_src.c),
-	},
-};
-
-static struct div_clk dsi1pll_shadow_byte_clk_src = {
-	.data = {
-		.div = 8,
-		.min_div = 8,
-		.max_div = 8,
-	},
-	.c = {
-		.parent = &dsi1pll_shadow_post_n1_div_clk.c,
-		.dbg_name = "dsi1pll_shadow_byte_clk_src",
-		.ops = &clk_ops_div,
-		CLK_INIT(dsi1pll_shadow_byte_clk_src.c),
-	},
-};
-
-static struct mux_clk dsi0pll_byte_clk_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&dsi0pll_byte_clk_src.c, 0},
-		{&dsi0pll_shadow_byte_clk_src.c, 1},
-	},
-	.ops = &mdss_byte_mux_ops,
-	.c = {
-		.parent = &dsi0pll_byte_clk_src.c,
-		.dbg_name = "dsi0pll_byte_clk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_byte_clk_mux.c),
-	}
-};
-static struct mux_clk dsi1pll_byte_clk_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&dsi1pll_byte_clk_src.c, 0},
-		{&dsi1pll_shadow_byte_clk_src.c, 1},
-	},
-	.ops = &mdss_byte_mux_ops,
-	.c = {
-		.parent = &dsi1pll_byte_clk_src.c,
-		.dbg_name = "dsi1pll_byte_clk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_byte_clk_mux.c),
-	}
-};
-
-static struct clk_lookup mdss_dsi_pllcc_8996[] = {
-	CLK_LIST(dsi0pll_byte_clk_mux),
-	CLK_LIST(dsi0pll_byte_clk_src),
-	CLK_LIST(dsi0pll_pixel_clk_mux),
-	CLK_LIST(dsi0pll_pixel_clk_src),
-	CLK_LIST(dsi0pll_n2_div_clk),
-	CLK_LIST(dsi0pll_post_n1_div_clk),
-	CLK_LIST(dsi0pll_vco_clk),
-	CLK_LIST(dsi0pll_shadow_byte_clk_src),
-	CLK_LIST(dsi0pll_shadow_pixel_clk_src),
-	CLK_LIST(dsi0pll_shadow_n2_div_clk),
-	CLK_LIST(dsi0pll_shadow_post_n1_div_clk),
-	CLK_LIST(dsi0pll_shadow_vco_clk),
-};
-
-static struct clk_lookup mdss_dsi_pllcc_8996_1[] = {
-	CLK_LIST(dsi1pll_byte_clk_mux),
-	CLK_LIST(dsi1pll_byte_clk_src),
-	CLK_LIST(dsi1pll_pixel_clk_mux),
-	CLK_LIST(dsi1pll_pixel_clk_src),
-	CLK_LIST(dsi1pll_n2_div_clk),
-	CLK_LIST(dsi1pll_post_n1_div_clk),
-	CLK_LIST(dsi1pll_vco_clk),
-	CLK_LIST(dsi1pll_shadow_byte_clk_src),
-	CLK_LIST(dsi1pll_shadow_pixel_clk_src),
-	CLK_LIST(dsi1pll_shadow_n2_div_clk),
-	CLK_LIST(dsi1pll_shadow_post_n1_div_clk),
-	CLK_LIST(dsi1pll_shadow_vco_clk),
-};
-
-int dsi_pll_clock_register_8996(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int rc = 0, ndx;
-	int const ssc_freq_default = 31500; /* default h/w recommended value */
-	int const ssc_ppm_default = 5000; /* default h/w recommended value */
-	struct dsi_pll_db *pdb;
-
-	if (pll_res->index >= DSI_PLL_NUM) {
-		pr_err("pll ndx=%d is NOT supported\n", pll_res->index);
-		return -EINVAL;
-	}
-
-	ndx = pll_res->index;
-	pdb = &pll_db[ndx];
-	pll_res->priv = pdb;
-	pdb->pll = pll_res;
-	ndx++;
-	ndx %= DSI_PLL_NUM;
-	pdb->next = &pll_db[ndx];
-
-	/* Set clock source operations */
-
-	/* hr_oclk3, pixel */
-	n2_clk_src_ops = clk_ops_slave_div;
-	n2_clk_src_ops.prepare = mdss_pll_div_prepare;
-
-	shadow_n2_clk_src_ops = clk_ops_slave_div;
-
-	/* hr_ockl2, byte, vco pll */
-	post_n1_div_clk_src_ops = clk_ops_div;
-	post_n1_div_clk_src_ops.prepare = mdss_pll_div_prepare;
-
-	shadow_post_n1_div_clk_src_ops = clk_ops_div;
-
-	byte_clk_src_ops = clk_ops_div;
-	byte_clk_src_ops.prepare = mdss_pll_div_prepare;
-
-	clk_ops_gen_mux_dsi = clk_ops_gen_mux;
-	clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
-	clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
-
-	if (pll_res->ssc_en) {
-		if (!pll_res->ssc_freq)
-			pll_res->ssc_freq = ssc_freq_default;
-		if (!pll_res->ssc_ppm)
-			pll_res->ssc_ppm = ssc_ppm_default;
-	}
-
-	/* Set client data to mux, div and vco clocks.  */
-	if (pll_res->index == DSI_PLL_1) {
-		dsi1pll_byte_clk_src.priv = pll_res;
-		dsi1pll_pixel_clk_src.priv = pll_res;
-		dsi1pll_post_n1_div_clk.priv = pll_res;
-		dsi1pll_n2_div_clk.priv = pll_res;
-		dsi1pll_vco_clk.priv = pll_res;
-
-		dsi1pll_shadow_byte_clk_src.priv = pll_res;
-		dsi1pll_shadow_pixel_clk_src.priv = pll_res;
-		dsi1pll_shadow_post_n1_div_clk.priv = pll_res;
-		dsi1pll_shadow_n2_div_clk.priv = pll_res;
-		dsi1pll_shadow_vco_clk.priv = pll_res;
-
-		pll_res->vco_delay = VCO_DELAY_USEC;
-		rc = of_msm_clock_register(pdev->dev.of_node,
-				mdss_dsi_pllcc_8996_1,
-				ARRAY_SIZE(mdss_dsi_pllcc_8996_1));
-	} else {
-		dsi0pll_byte_clk_src.priv = pll_res;
-		dsi0pll_pixel_clk_src.priv = pll_res;
-		dsi0pll_post_n1_div_clk.priv = pll_res;
-		dsi0pll_n2_div_clk.priv = pll_res;
-		dsi0pll_vco_clk.priv = pll_res;
-
-		dsi0pll_shadow_byte_clk_src.priv = pll_res;
-		dsi0pll_shadow_pixel_clk_src.priv = pll_res;
-		dsi0pll_shadow_post_n1_div_clk.priv = pll_res;
-		dsi0pll_shadow_n2_div_clk.priv = pll_res;
-		dsi0pll_shadow_vco_clk.priv = pll_res;
-
-		pll_res->vco_delay = VCO_DELAY_USEC;
-		rc = of_msm_clock_register(pdev->dev.of_node,
-				mdss_dsi_pllcc_8996,
-				ARRAY_SIZE(mdss_dsi_pllcc_8996));
-	}
-
-	if (!rc) {
-		pr_info("Registered DSI PLL ndx=%d clocks successfully\n",
-						pll_res->index);
-	}
-
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.h b/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.h
deleted file mode 100644
index 023a481..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.h
+++ /dev/null
@@ -1,214 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef MDSS_DSI_PLL_8996_H
-#define MDSS_DSI_PLL_8996_H
-
-#define DSIPHY_CMN_CLK_CFG0		0x0010
-#define DSIPHY_CMN_CLK_CFG1		0x0014
-#define DSIPHY_CMN_GLBL_TEST_CTRL	0x0018
-
-#define DSIPHY_CMN_PLL_CNTRL		0x0048
-#define DSIPHY_CMN_CTRL_0		0x001c
-#define DSIPHY_CMN_CTRL_1		0x0020
-
-#define DSIPHY_CMN_LDO_CNTRL		0x004c
-
-#define DSIPHY_PLL_IE_TRIM		0x0400
-#define DSIPHY_PLL_IP_TRIM		0x0404
-
-#define DSIPHY_PLL_IPTAT_TRIM		0x0410
-
-#define DSIPHY_PLL_CLKBUFLR_EN		0x041c
-
-#define DSIPHY_PLL_SYSCLK_EN_RESET	0x0428
-#define DSIPHY_PLL_RESETSM_CNTRL	0x042c
-#define DSIPHY_PLL_RESETSM_CNTRL2	0x0430
-#define DSIPHY_PLL_RESETSM_CNTRL3	0x0434
-#define DSIPHY_PLL_RESETSM_CNTRL4	0x0438
-#define DSIPHY_PLL_RESETSM_CNTRL5	0x043c
-#define DSIPHY_PLL_KVCO_DIV_REF1	0x0440
-#define DSIPHY_PLL_KVCO_DIV_REF2	0x0444
-#define DSIPHY_PLL_KVCO_COUNT1		0x0448
-#define DSIPHY_PLL_KVCO_COUNT2		0x044c
-#define DSIPHY_PLL_VREF_CFG1		0x045c
-
-#define DSIPHY_PLL_KVCO_CODE		0x0458
-
-#define DSIPHY_PLL_VCO_DIV_REF1		0x046c
-#define DSIPHY_PLL_VCO_DIV_REF2		0x0470
-#define DSIPHY_PLL_VCO_COUNT1		0x0474
-#define DSIPHY_PLL_VCO_COUNT2		0x0478
-#define DSIPHY_PLL_PLLLOCK_CMP1		0x047c
-#define DSIPHY_PLL_PLLLOCK_CMP2		0x0480
-#define DSIPHY_PLL_PLLLOCK_CMP3		0x0484
-#define DSIPHY_PLL_PLLLOCK_CMP_EN	0x0488
-#define DSIPHY_PLL_PLL_VCO_TUNE		0x048C
-#define DSIPHY_PLL_DEC_START		0x0490
-#define DSIPHY_PLL_SSC_EN_CENTER	0x0494
-#define DSIPHY_PLL_SSC_ADJ_PER1		0x0498
-#define DSIPHY_PLL_SSC_ADJ_PER2		0x049c
-#define DSIPHY_PLL_SSC_PER1		0x04a0
-#define DSIPHY_PLL_SSC_PER2		0x04a4
-#define DSIPHY_PLL_SSC_STEP_SIZE1	0x04a8
-#define DSIPHY_PLL_SSC_STEP_SIZE2	0x04ac
-#define DSIPHY_PLL_DIV_FRAC_START1	0x04b4
-#define DSIPHY_PLL_DIV_FRAC_START2	0x04b8
-#define DSIPHY_PLL_DIV_FRAC_START3	0x04bc
-#define DSIPHY_PLL_TXCLK_EN		0x04c0
-#define DSIPHY_PLL_PLL_CRCTRL		0x04c4
-
-#define DSIPHY_PLL_RESET_SM_READY_STATUS 0x04cc
-
-#define DSIPHY_PLL_PLL_MISC1		0x04e8
-
-#define DSIPHY_PLL_CP_SET_CUR		0x04f0
-#define DSIPHY_PLL_PLL_ICPMSET		0x04f4
-#define DSIPHY_PLL_PLL_ICPCSET		0x04f8
-#define DSIPHY_PLL_PLL_ICP_SET		0x04fc
-#define DSIPHY_PLL_PLL_LPF1		0x0500
-#define DSIPHY_PLL_PLL_LPF2_POSTDIV	0x0504
-#define DSIPHY_PLL_PLL_BANDGAP	0x0508
-
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL15		0x050
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL19		0x060
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL20		0x064
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL21		0x068
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL22		0x06C
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL23		0x070
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL24		0x074
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL25		0x078
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL26		0x07C
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL27		0x080
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL28		0x084
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL29		0x088
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR	0x094
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2	0x098
-
-struct dsi_pll_input {
-	u32 fref;	/* 19.2 Mhz, reference clk */
-	u32 fdata;	/* bit clock rate */
-	u32 dsiclk_sel; /* 1, reg: 0x0014 */
-	u32 n2div;	/* 1, reg: 0x0010, bit 4-7 */
-	u32 ssc_en;	/* 1, reg: 0x0494, bit 0 */
-	u32 ldo_en;	/* 0,  reg: 0x004c, bit 0 */
-
-	/* fixed  */
-	u32 refclk_dbler_en;	/* 0, reg: 0x04c0, bit 1 */
-	u32 vco_measure_time;	/* 5, unknown */
-	u32 kvco_measure_time;	/* 5, unknown */
-	u32 bandgap_timer;	/* 4, reg: 0x0430, bit 3 - 5 */
-	u32 pll_wakeup_timer;	/* 5, reg: 0x043c, bit 0 - 2 */
-	u32 plllock_cnt;	/* 1, reg: 0x0488, bit 1 - 2 */
-	u32 plllock_rng;	/* 1, reg: 0x0488, bit 3 - 4 */
-	u32 ssc_center;		/* 0, reg: 0x0494, bit 1 */
-	u32 ssc_adj_period;	/* 37, reg: 0x498, bit 0 - 9 */
-	u32 ssc_spread;		/* 0.005  */
-	u32 ssc_freq;		/* unknown */
-	u32 pll_ie_trim;	/* 4, reg: 0x0400 */
-	u32 pll_ip_trim;	/* 4, reg: 0x0404 */
-	u32 pll_iptat_trim;	/* reg: 0x0410 */
-	u32 pll_cpcset_cur;	/* 1, reg: 0x04f0, bit 0 - 2 */
-	u32 pll_cpmset_cur;	/* 1, reg: 0x04f0, bit 3 - 5 */
-
-	u32 pll_icpmset;	/* 4, reg: 0x04fc, bit 3 - 5 */
-	u32 pll_icpcset;	/* 4, reg: 0x04fc, bit 0 - 2 */
-
-	u32 pll_icpmset_p;	/* 0, reg: 0x04f4, bit 0 - 2 */
-	u32 pll_icpmset_m;	/* 0, reg: 0x04f4, bit 3 - 5 */
-
-	u32 pll_icpcset_p;	/* 0, reg: 0x04f8, bit 0 - 2 */
-	u32 pll_icpcset_m;	/* 0, reg: 0x04f8, bit 3 - 5 */
-
-	u32 pll_lpf_res1;	/* 3, reg: 0x0504, bit 0 - 3 */
-	u32 pll_lpf_cap1;	/* 11, reg: 0x0500, bit 0 - 3 */
-	u32 pll_lpf_cap2;	/* 1, reg: 0x0500, bit 4 - 7 */
-	u32 pll_c3ctrl;		/* 2, reg: 0x04c4 */
-	u32 pll_r3ctrl;		/* 1, reg: 0x04c4 */
-};
-
-struct dsi_pll_output {
-	u32 pll_txclk_en;	/* reg: 0x04c0 */
-	u32 dec_start;		/* reg: 0x0490 */
-	u32 div_frac_start;	/* reg: 0x04b4, 0x4b8, 0x04bc */
-	u32 ssc_period;		/* reg: 0x04a0, 0x04a4 */
-	u32 ssc_step_size;	/* reg: 0x04a8, 0x04ac */
-	u32 plllock_cmp;	/* reg: 0x047c, 0x0480, 0x0484 */
-	u32 pll_vco_div_ref;	/* reg: 0x046c, 0x0470 */
-	u32 pll_vco_count;	/* reg: 0x0474, 0x0478 */
-	u32 pll_kvco_div_ref;	/* reg: 0x0440, 0x0444 */
-	u32 pll_kvco_count;	/* reg: 0x0448, 0x044c */
-	u32 pll_misc1;		/* reg: 0x04e8 */
-	u32 pll_lpf2_postdiv;	/* reg: 0x0504 */
-	u32 pll_resetsm_cntrl;	/* reg: 0x042c */
-	u32 pll_resetsm_cntrl2;	/* reg: 0x0430 */
-	u32 pll_resetsm_cntrl5;	/* reg: 0x043c */
-	u32 pll_kvco_code;		/* reg: 0x0458 */
-
-	u32 cmn_clk_cfg0;	/* reg: 0x0010 */
-	u32 cmn_clk_cfg1;	/* reg: 0x0014 */
-	u32 cmn_ldo_cntrl;	/* reg: 0x004c */
-
-	u32 pll_postdiv;	/* vco */
-	u32 pll_n1div;		/* vco */
-	u32 pll_n2div;		/* hr_oclk3, pixel */
-	u32 fcvo;
-};
-
-enum {
-	DSI_PLL_0,
-	DSI_PLL_1,
-	DSI_PLL_NUM
-};
-
-struct dsi_pll_db {
-	struct dsi_pll_db *next;
-	struct mdss_pll_resources *pll;
-	struct dsi_pll_input in;
-	struct dsi_pll_output out;
-	int source_setup_done;
-};
-
-enum {
-	PLL_OUTPUT_NONE,
-	PLL_OUTPUT_RIGHT,
-	PLL_OUTPUT_LEFT,
-	PLL_OUTPUT_BOTH
-};
-
-enum {
-	PLL_SOURCE_FROM_LEFT,
-	PLL_SOURCE_FROM_RIGHT
-};
-
-enum {
-	PLL_UNKNOWN,
-	PLL_STANDALONE,
-	PLL_SLAVE,
-	PLL_MASTER
-};
-
-int pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
-long pll_vco_round_rate_8996(struct clk *c, unsigned long rate);
-enum handoff pll_vco_handoff_8996(struct clk *c);
-enum handoff shadow_pll_vco_handoff_8996(struct clk *c);
-int shadow_post_n1_div_set_div(struct div_clk *clk, int div);
-int shadow_post_n1_div_get_div(struct div_clk *clk);
-int shadow_n2_div_set_div(struct div_clk *clk, int div);
-int shadow_n2_div_get_div(struct div_clk *clk);
-int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
-int pll_vco_prepare_8996(struct clk *c);
-void pll_vco_unprepare_8996(struct clk *c);
-int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel);
-int get_mdss_byte_mux_sel_8996(struct mux_clk *clk);
-int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel);
-int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk);
-int post_n1_div_set_div(struct div_clk *clk, int div);
-int post_n1_div_get_div(struct div_clk *clk);
-int n2_div_set_div(struct div_clk *clk, int div);
-int n2_div_get_div(struct div_clk *clk);
-int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll);
-
-#endif  /* MDSS_DSI_PLL_8996_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-util.c
deleted file mode 100644
index f8b8337..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-util.c
+++ /dev/null
@@ -1,580 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clock-generic.h>
-
-#include "mdss-pll.h"
-#include "mdss-dsi-pll.h"
-
-#define DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG	(0x0)
-#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG	(0x0004)
-#define DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG	(0x0008)
-#define DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG	(0x000C)
-#define DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG		(0x0010)
-#define DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG	(0x0014)
-#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG	(0x0024)
-#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG	(0x0028)
-#define DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG		(0x002C)
-#define DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG	(0x0030)
-#define DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG	(0x0034)
-#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0		(0x0038)
-#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1		(0x003C)
-#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2		(0x0040)
-#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3		(0x0044)
-#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4		(0x0048)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0		(0x006C)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG2		(0x0074)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3		(0x0078)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4		(0x007C)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG5		(0x0080)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6		(0x0084)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7		(0x0088)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8		(0x008C)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9		(0x0090)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10	(0x0094)
-#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11	(0x0098)
-#define DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG	(0x009C)
-#define DSI_PHY_PLL_UNIPHY_PLL_STATUS		(0x00C0)
-
-#define DSI_PLL_POLL_DELAY_US			50
-#define DSI_PLL_POLL_TIMEOUT_US			500
-
-int set_byte_mux_sel(struct mux_clk *clk, int sel)
-{
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	pr_debug("byte mux set to %s mode\n", sel ? "indirect" : "direct");
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG, (sel << 1));
-
-	return 0;
-}
-
-int get_byte_mux_sel(struct mux_clk *clk)
-{
-	int mux_mode, rc;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	if (is_gdsc_disabled(dsi_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	mux_mode = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG) & BIT(1);
-
-	pr_debug("byte mux mode = %s\n", mux_mode ? "indirect" : "direct");
-	mdss_pll_resource_enable(dsi_pll_res, false);
-
-	return !!mux_mode;
-}
-
-int dsi_pll_div_prepare(struct clk *c)
-{
-	struct div_clk *div = to_div_clk(c);
-	/* Restore the divider's value */
-	return div->ops->set_div(div, div->data.div);
-}
-
-int dsi_pll_mux_prepare(struct clk *c)
-{
-	struct mux_clk *mux = to_mux_clk(c);
-	int i, rc, sel = 0;
-	struct mdss_pll_resources *dsi_pll_res = mux->priv;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	for (i = 0; i < mux->num_parents; i++)
-		if (mux->parents[i].src == c->parent) {
-			sel = mux->parents[i].sel;
-			break;
-		}
-
-	if (i == mux->num_parents) {
-		pr_err("Failed to select the parent clock\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	/* Restore the mux source select value */
-	rc = mux->ops->set_mux_sel(mux, sel);
-
-error:
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	return rc;
-}
-
-int fixed_4div_set_div(struct div_clk *clk, int div)
-{
-	int rc;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG, (div - 1));
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	return rc;
-}
-
-int fixed_4div_get_div(struct div_clk *clk)
-{
-	int div = 0, rc;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	if (is_gdsc_disabled(dsi_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG);
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	return div + 1;
-}
-
-int digital_set_div(struct div_clk *clk, int div)
-{
-	int rc;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG, (div - 1));
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	return rc;
-}
-
-int digital_get_div(struct div_clk *clk)
-{
-	int div = 0, rc;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	if (is_gdsc_disabled(dsi_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG);
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	return div + 1;
-}
-
-int analog_set_div(struct div_clk *clk, int div)
-{
-	int rc;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG, div - 1);
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	return rc;
-}
-
-int analog_get_div(struct div_clk *clk)
-{
-	int div = 0, rc;
-	struct mdss_pll_resources *dsi_pll_res = clk->priv;
-
-	if (is_gdsc_disabled(dsi_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(clk->priv, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-		DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG) + 1;
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-
-	return div;
-}
-
-int dsi_pll_lock_status(struct mdss_pll_resources *dsi_pll_res)
-{
-	u32 status;
-	int pll_locked;
-
-	/* poll for PLL ready status */
-	if (readl_poll_timeout_atomic((dsi_pll_res->pll_base +
-			DSI_PHY_PLL_UNIPHY_PLL_STATUS),
-			status,
-			((status & BIT(0)) == 1),
-			DSI_PLL_POLL_DELAY_US,
-			DSI_PLL_POLL_TIMEOUT_US)) {
-		pr_debug("DSI PLL status=%x failed to Lock\n", status);
-		pll_locked = 0;
-	} else {
-		pll_locked = 1;
-	}
-
-	return pll_locked;
-}
-
-int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate)
-{
-	s64 vco_clk_rate = rate;
-	s32 rem;
-	s64 refclk_cfg, frac_n_mode, ref_doubler_en_b;
-	s64 ref_clk_to_pll, div_fbx1000, frac_n_value;
-	s64 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
-	s64 gen_vco_clk, cal_cfg10, cal_cfg11;
-	u32 res;
-	int i;
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	/* Configure the Loop filter resistance */
-	for (i = 0; i < vco->lpfr_lut_size; i++)
-		if (vco_clk_rate <= vco->lpfr_lut[i].vco_rate)
-			break;
-	if (i == vco->lpfr_lut_size) {
-		pr_err("unable to get loop filter resistance. vco=%ld\n", rate);
-		return -EINVAL;
-	}
-	res = vco->lpfr_lut[i].r;
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG, res);
-
-	/* Loop filter capacitance values : c1 and c2 */
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG, 0x70);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG, 0x15);
-
-	div_s64_rem(vco_clk_rate, vco->ref_clk_rate, &rem);
-	if (rem) {
-		refclk_cfg = 0x1;
-		frac_n_mode = 1;
-		ref_doubler_en_b = 0;
-	} else {
-		refclk_cfg = 0x0;
-		frac_n_mode = 0;
-		ref_doubler_en_b = 1;
-	}
-
-	pr_debug("refclk_cfg = %lld\n", refclk_cfg);
-
-	ref_clk_to_pll = ((vco->ref_clk_rate * 2 * (refclk_cfg))
-			  + (ref_doubler_en_b * vco->ref_clk_rate));
-	div_fbx1000 = div_s64((vco_clk_rate * 1000), ref_clk_to_pll);
-
-	div_s64_rem(div_fbx1000, 1000, &rem);
-	frac_n_value = div_s64((rem * (1 << 16)), 1000);
-	gen_vco_clk = div_s64(div_fbx1000 * ref_clk_to_pll, 1000);
-
-	pr_debug("ref_clk_to_pll = %lld\n", ref_clk_to_pll);
-	pr_debug("div_fb = %lld\n", div_fbx1000);
-	pr_debug("frac_n_value = %lld\n", frac_n_value);
-
-	pr_debug("Generated VCO Clock: %lld\n", gen_vco_clk);
-	rem = 0;
-	if (frac_n_mode) {
-		sdm_cfg0 = (0x0 << 5);
-		sdm_cfg0 |= (0x0 & 0x3f);
-		sdm_cfg1 = (div_s64(div_fbx1000, 1000) & 0x3f) - 1;
-		sdm_cfg3 = div_s64_rem(frac_n_value, 256, &rem);
-		sdm_cfg2 = rem;
-	} else {
-		sdm_cfg0 = (0x1 << 5);
-		sdm_cfg0 |= (div_s64(div_fbx1000, 1000) & 0x3f) - 1;
-		sdm_cfg1 = (0x0 & 0x3f);
-		sdm_cfg2 = 0;
-		sdm_cfg3 = 0;
-	}
-
-	pr_debug("sdm_cfg0=%lld\n", sdm_cfg0);
-	pr_debug("sdm_cfg1=%lld\n", sdm_cfg1);
-	pr_debug("sdm_cfg2=%lld\n", sdm_cfg2);
-	pr_debug("sdm_cfg3=%lld\n", sdm_cfg3);
-
-	cal_cfg11 = div_s64_rem(gen_vco_clk, 256 * 1000000, &rem);
-	cal_cfg10 = rem / 1000000;
-	pr_debug("cal_cfg10=%lld, cal_cfg11=%lld\n", cal_cfg10, cal_cfg11);
-
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG, 0x02);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3, 0x2b);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4, 0x66);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d);
-
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-		DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1, (u32)(sdm_cfg1 & 0xff));
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-		DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2, (u32)(sdm_cfg2 & 0xff));
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-		DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3, (u32)(sdm_cfg3 & 0xff));
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4, 0x00);
-
-	/* Add hardware recommended delay for correct PLL configuration */
-	if (dsi_pll_res->vco_delay)
-		udelay(dsi_pll_res->vco_delay);
-
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG, (u32)refclk_cfg);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG, 0x00);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG, 0x71);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0, (u32)sdm_cfg0);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0, 0x12);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6, 0x30);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7, 0x00);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8, 0x60);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9, 0x00);
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-		DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10, (u32)(cal_cfg10 & 0xff));
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-		DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11, (u32)(cal_cfg11 & 0xff));
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG, 0x20);
-
-	return 0;
-}
-
-unsigned long vco_get_rate(struct clk *c)
-{
-	u32 sdm0, doubler, sdm_byp_div;
-	u64 vco_rate;
-	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	u64 ref_clk = vco->ref_clk_rate;
-	int rc;
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	if (is_gdsc_disabled(dsi_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	/* Check to see if the ref clk doubler is enabled */
-	doubler = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-				 DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG) & BIT(0);
-	ref_clk += (doubler * vco->ref_clk_rate);
-
-	/* see if it is integer mode or sdm mode */
-	sdm0 = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-					DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0);
-	if (sdm0 & BIT(6)) {
-		/* integer mode */
-		sdm_byp_div = (MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0) & 0x3f) + 1;
-		vco_rate = ref_clk * sdm_byp_div;
-	} else {
-		/* sdm mode */
-		sdm_dc_off = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1) & 0xFF;
-		pr_debug("sdm_dc_off = %d\n", sdm_dc_off);
-		sdm2 = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2) & 0xFF;
-		sdm3 = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
-			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3) & 0xFF;
-		sdm_freq_seed = (sdm3 << 8) | sdm2;
-		pr_debug("sdm_freq_seed = %d\n", sdm_freq_seed);
-
-		vco_rate = (ref_clk * (sdm_dc_off + 1)) +
-			mult_frac(ref_clk, sdm_freq_seed, BIT(16));
-		pr_debug("vco rate = %lld\n", vco_rate);
-	}
-
-	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-
-	return (unsigned long)vco_rate;
-}
-
-static int dsi_pll_enable(struct clk *c)
-{
-	int i, rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return rc;
-	}
-
-	/* Try all enable sequences until one succeeds */
-	for (i = 0; i < vco->pll_en_seq_cnt; i++) {
-		rc = vco->pll_enable_seqs[i](dsi_pll_res);
-		pr_debug("DSI PLL %s after sequence #%d\n",
-			rc ? "unlocked" : "locked", i + 1);
-		if (!rc)
-			break;
-	}
-
-	if (rc) {
-		mdss_pll_resource_enable(dsi_pll_res, false);
-		pr_err("DSI PLL failed to lock\n");
-	}
-	dsi_pll_res->pll_on = true;
-
-	return rc;
-}
-
-static void dsi_pll_disable(struct clk *c)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	if (!dsi_pll_res->pll_on &&
-		mdss_pll_resource_enable(dsi_pll_res, true)) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return;
-	}
-
-	dsi_pll_res->handoff_resources = false;
-
-	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
-				DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x00);
-
-	mdss_pll_resource_enable(dsi_pll_res, false);
-	dsi_pll_res->pll_on = false;
-
-	pr_debug("DSI PLL Disabled\n");
-}
-
-long vco_round_rate(struct clk *c, unsigned long rate)
-{
-	unsigned long rrate = rate;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-
-	if (rate < vco->min_rate)
-		rrate = vco->min_rate;
-	if (rate > vco->max_rate)
-		rrate = vco->max_rate;
-
-	return rrate;
-}
-
-enum handoff vco_handoff(struct clk *c)
-{
-	int rc;
-	enum handoff ret = HANDOFF_DISABLED_CLK;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	if (is_gdsc_disabled(dsi_pll_res))
-		return HANDOFF_DISABLED_CLK;
-
-	rc = mdss_pll_resource_enable(dsi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss dsi pll resources\n");
-		return ret;
-	}
-
-	if (dsi_pll_lock_status(dsi_pll_res)) {
-		dsi_pll_res->handoff_resources = true;
-		dsi_pll_res->pll_on = true;
-		c->rate = vco_get_rate(c);
-		ret = HANDOFF_ENABLED_CLK;
-	} else {
-		mdss_pll_resource_enable(dsi_pll_res, false);
-	}
-
-	return ret;
-}
-
-int vco_prepare(struct clk *c)
-{
-	int rc = 0;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	if (!dsi_pll_res) {
-		pr_err("Dsi pll resources are not available\n");
-		return -EINVAL;
-	}
-
-	if ((dsi_pll_res->vco_cached_rate != 0)
-	    && (dsi_pll_res->vco_cached_rate == c->rate)) {
-		rc = c->ops->set_rate(c, dsi_pll_res->vco_cached_rate);
-		if (rc) {
-			pr_err("vco_set_rate failed. rc=%d\n", rc);
-			goto error;
-		}
-	}
-
-	rc = dsi_pll_enable(c);
-
-error:
-	return rc;
-}
-
-void vco_unprepare(struct clk *c)
-{
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *dsi_pll_res = vco->priv;
-
-	if (!dsi_pll_res) {
-		pr_err("Dsi pll resources are not available\n");
-		return;
-	}
-
-	dsi_pll_res->vco_cached_rate = c->rate;
-	dsi_pll_disable(c);
-}
-
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll.h b/drivers/clk/qcom/mdss/mdss-dsi-pll.h
deleted file mode 100644
index 398048ef..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __MDSS_DSI_PLL_H
-#define __MDSS_DSI_PLL_H
-
-#include <linux/clk-provider.h>
-#include "mdss-pll.h"
-#define MAX_DSI_PLL_EN_SEQS	10
-
-/* Register offsets for 20nm PHY PLL */
-#define MMSS_DSI_PHY_PLL_PLL_CNTRL		(0x0014)
-#define MMSS_DSI_PHY_PLL_PLL_BKG_KVCO_CAL_EN	(0x002C)
-#define MMSS_DSI_PHY_PLL_PLLLOCK_CMP_EN		(0x009C)
-
-struct lpfr_cfg {
-	unsigned long vco_rate;
-	u32 r;
-};
-
-struct dsi_pll_vco_clk {
-	struct clk_hw	hw;
-	unsigned long	ref_clk_rate;
-	unsigned long	min_rate;
-	unsigned long	max_rate;
-	u32		pll_en_seq_cnt;
-	struct lpfr_cfg *lpfr_lut;
-	u32		lpfr_lut_size;
-	void		*priv;
-
-	int (*pll_enable_seqs[MAX_DSI_PLL_EN_SEQS])
-			(struct mdss_pll_resources *dsi_pll_Res);
-};
-
-int dsi_pll_clock_register_10nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-
-int dsi_pll_clock_register_7nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_28lpm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-
-static inline struct dsi_pll_vco_clk *to_vco_clk_hw(struct clk_hw *hw)
-{
-	return container_of(hw, struct dsi_pll_vco_clk, hw);
-}
-
-int dsi_pll_clock_register_14nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-#endif
diff --git a/drivers/clk/qcom/mdss/mdss-edp-pll-28hpm.c b/drivers/clk/qcom/mdss/mdss-edp-pll-28hpm.c
deleted file mode 100644
index cc148ed..0000000
--- a/drivers/clk/qcom/mdss/mdss-edp-pll-28hpm.c
+++ /dev/null
@@ -1,580 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/iopoll.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-
-#include <dt-bindings/clock/msm-clocks-8974.h>
-
-#include "mdss-pll.h"
-#include "mdss-edp-pll.h"
-
-#define EDP_PHY_PLL_UNIPHY_PLL_REFCLK_CFG	(0x0)
-#define EDP_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG	(0x0004)
-#define EDP_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG	(0x000C)
-#define EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG		(0x0020)
-#define EDP_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG	(0x0024)
-#define EDP_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG	(0x0028)
-#define EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG0		(0x0038)
-#define EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG1		(0x003C)
-#define EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG2		(0x0040)
-#define EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG3		(0x0044)
-#define EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG4		(0x0048)
-#define EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG0		(0x004C)
-#define EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG1		(0x0050)
-#define EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG2		(0x0054)
-#define EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG3		(0x0058)
-#define EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG2	(0x0064)
-#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG0		(0x006C)
-#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG2		(0x0074)
-#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG6		(0x0084)
-#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG7		(0x0088)
-#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG8		(0x008C)
-#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG9		(0x0090)
-#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG10	(0x0094)
-#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG11	(0x0098)
-#define EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG0	(0x005C)
-#define EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG1	(0x0060)
-
-#define EDP_PLL_POLL_DELAY_US			50
-#define EDP_PLL_POLL_TIMEOUT_US			500
-
-static const struct clk_ops edp_mainlink_clk_src_ops;
-static struct clk_div_ops fixed_5div_ops; /* null ops */
-static const struct clk_ops edp_pixel_clk_ops;
-
-static inline struct edp_pll_vco_clk *to_edp_vco_clk(struct clk *clk)
-{
-	return container_of(clk, struct edp_pll_vco_clk, c);
-}
-
-int edp_div_prepare(struct clk *c)
-{
-	struct div_clk *div = to_div_clk(c);
-	/* Restore the divider's value */
-	return div->ops->set_div(div, div->data.div);
-}
-
-static int edp_vco_set_rate(struct clk *c, unsigned long vco_rate)
-{
-	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
-	struct mdss_pll_resources *edp_pll_res = vco->priv;
-	int rc;
-
-	pr_debug("vco_rate=%d\n", (int)vco_rate);
-
-	rc = mdss_pll_resource_enable(edp_pll_res, true);
-	if (rc) {
-		pr_err("failed to enable edp pll res rc=%d\n", rc);
-		rc =  -EINVAL;
-	}
-
-	if (vco_rate == 810000000) {
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG, 0x18);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_REFCLK_CFG, 0x00);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG0, 0x36);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG1, 0x69);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG2, 0xff);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG3, 0x2f);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG4, 0x00);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG0, 0x80);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG1, 0x00);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG2, 0x00);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG3, 0x00);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG0, 0x12);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG2, 0x01);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG6, 0x5a);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG7, 0x0);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG8, 0x60);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG9, 0x0);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG10, 0x2a);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG11, 0x3);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG0, 0x10);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG1, 0x1a);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG, 0x00);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG, 0x00);
-	} else if (vco_rate == 1350000000) {
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_REFCLK_CFG, 0x01);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG0, 0x36);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG1, 0x62);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG2, 0x00);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG3, 0x28);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG4, 0x00);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG0, 0x80);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG1, 0x00);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG2, 0x00);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG3, 0x00);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG0, 0x12);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG2, 0x01);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG6, 0x5a);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG7, 0x0);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG8, 0x60);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG9, 0x0);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG10, 0x46);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG11, 0x5);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG0, 0x10);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG1, 0x1a);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG, 0x00);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG, 0x00);
-	} else {
-		pr_err("rate=%d is NOT supported\n", (int)vco_rate);
-		vco_rate = 0;
-		rc =  -EINVAL;
-	}
-
-	MDSS_PLL_REG_W(edp_pll_res->pll_base,
-					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
-	udelay(100);
-	MDSS_PLL_REG_W(edp_pll_res->pll_base,
-					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
-	udelay(100);
-	MDSS_PLL_REG_W(edp_pll_res->pll_base,
-					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x07);
-	udelay(100);
-	MDSS_PLL_REG_W(edp_pll_res->pll_base,
-					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
-	udelay(100);
-	mdss_pll_resource_enable(edp_pll_res, false);
-
-	vco->rate = vco_rate;
-
-	return rc;
-}
-
-static int edp_pll_ready_poll(struct mdss_pll_resources *edp_pll_res)
-{
-	int cnt;
-	u32 status;
-
-	cnt = 100;
-	while (cnt--) {
-		udelay(100);
-		status = MDSS_PLL_REG_R(edp_pll_res->pll_base, 0xc0);
-		status &= 0x01;
-		if (status)
-			break;
-	}
-	pr_debug("cnt=%d status=%d\n", cnt, (int)status);
-
-	if (status)
-		return 1;
-
-	return 0;
-}
-
-static int edp_vco_enable(struct clk *c)
-{
-	int i, ready;
-	int rc;
-	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
-	struct mdss_pll_resources *edp_pll_res = vco->priv;
-
-	rc = mdss_pll_resource_enable(edp_pll_res, true);
-	if (rc) {
-		pr_err("edp pll resources not available\n");
-		return rc;
-	}
-
-	for (i = 0; i < 3; i++) {
-		ready = edp_pll_ready_poll(edp_pll_res);
-		if (ready)
-			break;
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
-		udelay(100);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
-		udelay(100);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x07);
-		udelay(100);
-		MDSS_PLL_REG_W(edp_pll_res->pll_base,
-					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
-		udelay(100);
-	}
-
-	if (ready) {
-		pr_debug("EDP PLL lock success\n");
-		edp_pll_res->pll_on = true;
-		rc = 0;
-	} else {
-		pr_err("EDP PLL failed to lock\n");
-		mdss_pll_resource_enable(edp_pll_res, false);
-		rc = -EINVAL;
-	}
-
-	return rc;
-}
-
-static void edp_vco_disable(struct clk *c)
-{
-	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
-	struct mdss_pll_resources *edp_pll_res = vco->priv;
-
-	if (!edp_pll_res) {
-		pr_err("Invalid input parameter\n");
-		return;
-	}
-
-	if (!edp_pll_res->pll_on &&
-		mdss_pll_resource_enable(edp_pll_res, true)) {
-		pr_err("edp pll resources not available\n");
-		return;
-	}
-
-	MDSS_PLL_REG_W(edp_pll_res->pll_base, 0x20, 0x00);
-
-	edp_pll_res->handoff_resources = false;
-	edp_pll_res->pll_on = false;
-
-	mdss_pll_resource_enable(edp_pll_res, false);
-
-	pr_debug("EDP PLL Disabled\n");
-}
-
-static unsigned long edp_vco_get_rate(struct clk *c)
-{
-	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
-	struct mdss_pll_resources *edp_pll_res = vco->priv;
-	u32 pll_status, div2;
-	int rc;
-
-	if (is_gdsc_disabled(edp_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(edp_pll_res, true);
-	if (rc) {
-		pr_err("edp pll resources not available\n");
-		return rc;
-	}
-
-	if (vco->rate == 0) {
-		pll_status = MDSS_PLL_REG_R(edp_pll_res->pll_base, 0xc0);
-		if (pll_status & 0x01) {
-			div2 = MDSS_PLL_REG_R(edp_pll_res->pll_base, 0x24);
-			if (div2 & 0x01)
-				vco->rate = 1350000000;
-			else
-				vco->rate = 810000000;
-		}
-	}
-	mdss_pll_resource_enable(edp_pll_res, false);
-
-	pr_debug("rate=%d\n", (int)vco->rate);
-
-	return vco->rate;
-}
-
-static long edp_vco_round_rate(struct clk *c, unsigned long rate)
-{
-	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
-	unsigned long rrate = -ENOENT;
-	unsigned long *lp;
-
-	lp = vco->rate_list;
-	while (*lp) {
-		rrate = *lp;
-		if (rate <= rrate)
-			break;
-		lp++;
-	}
-
-	pr_debug("rrate=%d\n", (int)rrate);
-
-	return rrate;
-}
-
-static int edp_vco_prepare(struct clk *c)
-{
-	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
-
-	pr_debug("rate=%d\n", (int)vco->rate);
-
-	return edp_vco_set_rate(c, vco->rate);
-}
-
-static void edp_vco_unprepare(struct clk *c)
-{
-	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
-
-	pr_debug("rate=%d\n", (int)vco->rate);
-
-	edp_vco_disable(c);
-}
-
-static int edp_pll_lock_status(struct mdss_pll_resources *edp_pll_res)
-{
-	u32 status;
-	int pll_locked = 0;
-	int rc;
-
-	rc = mdss_pll_resource_enable(edp_pll_res, true);
-	if (rc) {
-		pr_err("edp pll resources not available\n");
-		return rc;
-	}
-
-	/* poll for PLL ready status */
-	if (readl_poll_timeout_atomic((edp_pll_res->pll_base + 0xc0),
-			status, ((status & BIT(0)) == 1),
-			EDP_PLL_POLL_DELAY_US,
-			EDP_PLL_POLL_TIMEOUT_US)) {
-		pr_debug("EDP PLL status=%x failed to Lock\n", status);
-		pll_locked = 0;
-	} else {
-		pll_locked = 1;
-	}
-	mdss_pll_resource_enable(edp_pll_res, false);
-
-	return pll_locked;
-}
-
-static enum handoff edp_vco_handoff(struct clk *c)
-{
-	enum handoff ret = HANDOFF_DISABLED_CLK;
-	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
-	struct mdss_pll_resources *edp_pll_res = vco->priv;
-
-	if (is_gdsc_disabled(edp_pll_res))
-		return HANDOFF_DISABLED_CLK;
-
-	if (mdss_pll_resource_enable(edp_pll_res, true)) {
-		pr_err("edp pll resources not available\n");
-		return ret;
-	}
-
-	edp_pll_res->handoff_resources = true;
-
-	if (edp_pll_lock_status(edp_pll_res)) {
-		c->rate = edp_vco_get_rate(c);
-		edp_pll_res->pll_on = true;
-		ret = HANDOFF_ENABLED_CLK;
-	} else {
-		edp_pll_res->handoff_resources = false;
-		mdss_pll_resource_enable(edp_pll_res, false);
-	}
-
-	pr_debug("done, ret=%d\n", ret);
-	return ret;
-}
-
-static unsigned long edp_vco_rate_list[] = {
-		810000000, 1350000000, 0};
-
-struct const clk_ops edp_vco_clk_ops = {
-	.enable = edp_vco_enable,
-	.set_rate = edp_vco_set_rate,
-	.get_rate = edp_vco_get_rate,
-	.round_rate = edp_vco_round_rate,
-	.prepare = edp_vco_prepare,
-	.unprepare = edp_vco_unprepare,
-	.handoff = edp_vco_handoff,
-};
-
-struct edp_pll_vco_clk edp_vco_clk = {
-	.ref_clk_rate = 19200000,
-	.rate = 0,
-	.rate_list = edp_vco_rate_list,
-	.c = {
-		.dbg_name = "edp_vco_clk",
-		.ops = &edp_vco_clk_ops,
-		CLK_INIT(edp_vco_clk.c),
-	},
-};
-
-static unsigned long edp_mainlink_get_rate(struct clk *c)
-{
-	struct div_clk *mclk = to_div_clk(c);
-	struct clk *pclk;
-	unsigned long rate = 0;
-
-	pclk = clk_get_parent(c);
-
-	if (pclk && pclk->ops->get_rate) {
-		rate = pclk->ops->get_rate(pclk);
-		rate /= mclk->data.div;
-	}
-
-	pr_debug("rate=%d div=%d\n", (int)rate, mclk->data.div);
-
-	return rate;
-}
-
-
-struct div_clk edp_mainlink_clk_src = {
-	.ops = &fixed_5div_ops,
-	.data = {
-		.div = 5,
-		.min_div = 5,
-		.max_div = 5,
-	},
-	.c = {
-		.parent = &edp_vco_clk.c,
-		.dbg_name = "edp_mainlink_clk_src",
-		.ops = &edp_mainlink_clk_src_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(edp_mainlink_clk_src.c),
-	}
-};
-
-/*
- * this rate is from pll to clock controller
- * output from pll to CC has two possibilities
- * 1: if mainlink rate is 270M, then 675M
- * 2: if mainlink rate is 162M, then 810M
- */
-static int edp_pixel_set_div(struct div_clk *clk, int div)
-{
-	int rc;
-	struct mdss_pll_resources *edp_pll_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(edp_pll_res, true);
-	if (rc) {
-		pr_err("edp pll resources not available\n");
-		return rc;
-	}
-
-	pr_debug("div=%d\n", div);
-	MDSS_PLL_REG_W(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG, (div - 1));
-	mdss_pll_resource_enable(edp_pll_res, false);
-
-	return 0;
-}
-
-static int edp_pixel_get_div(struct div_clk *clk)
-{
-	int div = 0;
-	int rc;
-	struct mdss_pll_resources *edp_pll_res = clk->priv;
-
-	if (is_gdsc_disabled(edp_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(edp_pll_res, true);
-	if (rc) {
-		pr_err("edp pll resources not available\n");
-		return rc;
-	}
-
-	div = MDSS_PLL_REG_R(edp_pll_res->pll_base,
-				EDP_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG);
-	mdss_pll_resource_enable(edp_pll_res, false);
-	div &= 0x01;
-	pr_debug("div=%d\n", div);
-	return div + 1;
-}
-
-static struct clk_div_ops edp_pixel_ops = {
-	.set_div = edp_pixel_set_div,
-	.get_div = edp_pixel_get_div,
-};
-
-struct div_clk edp_pixel_clk_src = {
-	.data = {
-		.max_div = 2,
-		.min_div = 1,
-	},
-	.ops = &edp_pixel_ops,
-	.c = {
-		.parent = &edp_vco_clk.c,
-		.dbg_name = "edp_pixel_clk_src",
-		.ops = &edp_pixel_clk_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(edp_pixel_clk_src.c),
-	},
-};
-
-static struct clk_lookup mdss_edp_pllcc_8974[] = {
-	CLK_LOOKUP("edp_pixel_src", edp_pixel_clk_src.c,
-						"fd8c0000.qcom,mmsscc-mdss"),
-	CLK_LOOKUP("edp_mainlink_src", edp_mainlink_clk_src.c,
-						"fd8c0000.qcom,mmsscc-mdss"),
-};
-
-int edp_pll_clock_register(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int rc = -ENOTSUPP;
-
-	/* Set client data to div and vco clocks */
-	edp_pixel_clk_src.priv = pll_res;
-	edp_mainlink_clk_src.priv = pll_res;
-	edp_vco_clk.priv = pll_res;
-
-	/* Set clock operation for mainlink and pixel clock */
-	edp_mainlink_clk_src_ops = clk_ops_div;
-	edp_mainlink_clk_src_ops.get_parent = clk_get_parent;
-	edp_mainlink_clk_src_ops.get_rate = edp_mainlink_get_rate;
-
-	edp_pixel_clk_ops = clk_ops_slave_div;
-	edp_pixel_clk_ops.prepare = edp_div_prepare;
-
-	rc = of_msm_clock_register(pdev->dev.of_node, mdss_edp_pllcc_8974,
-					 ARRAY_SIZE(mdss_edp_pllcc_8974));
-	if (rc) {
-		pr_err("Clock register failed rc=%d\n", rc);
-		rc = -EPROBE_DEFER;
-	}
-
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-hdmi-pll-20nm.c b/drivers/clk/qcom/mdss/mdss-hdmi-pll-20nm.c
deleted file mode 100644
index ff21be6..0000000
--- a/drivers/clk/qcom/mdss/mdss-hdmi-pll-20nm.c
+++ /dev/null
@@ -1,970 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8994.h>
-
-#include "mdss-pll.h"
-#include "mdss-hdmi-pll.h"
-
-/* hdmi phy registers */
-
-#define HDMI_PHY_CMD_SIZE  68
-#define HDMI_PHY_CLK_SIZE  97
-
-/* Set to 1 for auto KVCO cal; set to 0 for fixed value */
-#define HDMI_PHY_AUTO_KVCO_CAL    1
-
-/* PLL REGISTERS */
-#define QSERDES_COM_SYS_CLK_CTRL			(0x000)
-#define QSERDES_COM_PLL_VCOTAIL_EN			(0x004)
-#define QSERDES_COM_CMN_MODE				(0x008)
-#define QSERDES_COM_IE_TRIM				(0x00C)
-#define QSERDES_COM_IP_TRIM				(0x010)
-#define QSERDES_COM_PLL_CNTRL				(0x014)
-#define QSERDES_COM_PLL_PHSEL_CONTROL			(0x018)
-#define QSERDES_COM_IPTAT_TRIM_VCCA_TX_SEL		(0x01C)
-#define QSERDES_COM_PLL_PHSEL_DC			(0x020)
-#define QSERDES_COM_PLL_IP_SETI				(0x024)
-#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL		(0x028)
-#define QSERDES_COM_PLL_BKG_KVCO_CAL_EN			(0x02C)
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN			(0x030)
-#define QSERDES_COM_PLL_CP_SETI				(0x034)
-#define QSERDES_COM_PLL_IP_SETP				(0x038)
-#define QSERDES_COM_PLL_CP_SETP				(0x03C)
-#define QSERDES_COM_ATB_SEL1				(0x040)
-#define QSERDES_COM_ATB_SEL2				(0x044)
-#define QSERDES_COM_SYSCLK_EN_SEL_TXBAND		(0x048)
-#define QSERDES_COM_RESETSM_CNTRL			(0x04C)
-#define QSERDES_COM_RESETSM_CNTRL2			(0x050)
-#define QSERDES_COM_RESETSM_CNTRL3			(0x054)
-#define QSERDES_COM_RESETSM_PLL_CAL_COUNT1		(0x058)
-#define QSERDES_COM_RESETSM_PLL_CAL_COUNT2		(0x05C)
-#define QSERDES_COM_DIV_REF1				(0x060)
-#define QSERDES_COM_DIV_REF2				(0x064)
-#define QSERDES_COM_KVCO_COUNT1				(0x068)
-#define QSERDES_COM_KVCO_COUNT2				(0x06C)
-#define QSERDES_COM_KVCO_CAL_CNTRL			(0x070)
-#define QSERDES_COM_KVCO_CODE				(0x074)
-#define QSERDES_COM_VREF_CFG1				(0x078)
-#define QSERDES_COM_VREF_CFG2				(0x07C)
-#define QSERDES_COM_VREF_CFG3				(0x080)
-#define QSERDES_COM_VREF_CFG4				(0x084)
-#define QSERDES_COM_VREF_CFG5				(0x088)
-#define QSERDES_COM_VREF_CFG6				(0x08C)
-#define QSERDES_COM_PLLLOCK_CMP1			(0x090)
-#define QSERDES_COM_PLLLOCK_CMP2			(0x094)
-#define QSERDES_COM_PLLLOCK_CMP3			(0x098)
-#define QSERDES_COM_PLLLOCK_CMP_EN			(0x09C)
-#define QSERDES_COM_BGTC				(0x0A0)
-#define QSERDES_COM_PLL_TEST_UPDN			(0x0A4)
-#define QSERDES_COM_PLL_VCO_TUNE			(0x0A8)
-#define QSERDES_COM_DEC_START1				(0x0AC)
-#define QSERDES_COM_PLL_AMP_OS				(0x0B0)
-#define QSERDES_COM_SSC_EN_CENTER			(0x0B4)
-#define QSERDES_COM_SSC_ADJ_PER1			(0x0B8)
-#define QSERDES_COM_SSC_ADJ_PER2			(0x0BC)
-#define QSERDES_COM_SSC_PER1				(0x0C0)
-#define QSERDES_COM_SSC_PER2				(0x0C4)
-#define QSERDES_COM_SSC_STEP_SIZE1			(0x0C8)
-#define QSERDES_COM_SSC_STEP_SIZE2			(0x0CC)
-#define QSERDES_COM_RES_CODE_UP				(0x0D0)
-#define QSERDES_COM_RES_CODE_DN				(0x0D4)
-#define QSERDES_COM_RES_CODE_UP_OFFSET			(0x0D8)
-#define QSERDES_COM_RES_CODE_DN_OFFSET			(0x0DC)
-#define QSERDES_COM_RES_CODE_START_SEG1			(0x0E0)
-#define QSERDES_COM_RES_CODE_START_SEG2			(0x0E4)
-#define QSERDES_COM_RES_CODE_CAL_CSR			(0x0E8)
-#define QSERDES_COM_RES_CODE				(0x0EC)
-#define QSERDES_COM_RES_TRIM_CONTROL			(0x0F0)
-#define QSERDES_COM_RES_TRIM_CONTROL2			(0x0F4)
-#define QSERDES_COM_RES_TRIM_EN_VCOCALDONE		(0x0F8)
-#define QSERDES_COM_FAUX_EN				(0x0FC)
-#define QSERDES_COM_DIV_FRAC_START1			(0x100)
-#define QSERDES_COM_DIV_FRAC_START2			(0x104)
-#define QSERDES_COM_DIV_FRAC_START3			(0x108)
-#define QSERDES_COM_DEC_START2				(0x10C)
-#define QSERDES_COM_PLL_RXTXEPCLK_EN			(0x110)
-#define QSERDES_COM_PLL_CRCTRL				(0x114)
-#define QSERDES_COM_PLL_CLKEPDIV			(0x118)
-#define QSERDES_COM_PLL_FREQUPDATE			(0x11C)
-#define QSERDES_COM_PLL_BKGCAL_TRIM_UP			(0x120)
-#define QSERDES_COM_PLL_BKGCAL_TRIM_DN			(0x124)
-#define QSERDES_COM_PLL_BKGCAL_TRIM_MUX			(0x128)
-#define QSERDES_COM_PLL_BKGCAL_VREF_CFG			(0x12C)
-#define QSERDES_COM_PLL_BKGCAL_DIV_REF1			(0x130)
-#define QSERDES_COM_PLL_BKGCAL_DIV_REF2			(0x134)
-#define QSERDES_COM_MUXADDR				(0x138)
-#define QSERDES_COM_LOW_POWER_RO_CONTROL		(0x13C)
-#define QSERDES_COM_POST_DIVIDER_CONTROL		(0x140)
-#define QSERDES_COM_HR_OCLK2_DIVIDER			(0x144)
-#define QSERDES_COM_HR_OCLK3_DIVIDER			(0x148)
-#define QSERDES_COM_PLL_VCO_HIGH			(0x14C)
-#define QSERDES_COM_RESET_SM				(0x150)
-#define QSERDES_COM_MUXVAL				(0x154)
-#define QSERDES_COM_CORE_RES_CODE_DN			(0x158)
-#define QSERDES_COM_CORE_RES_CODE_UP			(0x15C)
-#define QSERDES_COM_CORE_VCO_TUNE			(0x160)
-#define QSERDES_COM_CORE_VCO_TAIL			(0x164)
-#define QSERDES_COM_CORE_KVCO_CODE			(0x168)
-
-/* Tx Channel 0 REGISTERS */
-#define QSERDES_TX_L0_BIST_MODE_LANENO			(0x00)
-#define QSERDES_TX_L0_CLKBUF_ENABLE			(0x04)
-#define QSERDES_TX_L0_TX_EMP_POST1_LVL			(0x08)
-#define QSERDES_TX_L0_TX_DRV_LVL			(0x0C)
-#define QSERDES_TX_L0_RESET_TSYNC_EN			(0x10)
-#define QSERDES_TX_L0_LPB_EN				(0x14)
-#define QSERDES_TX_L0_RES_CODE_UP			(0x18)
-#define QSERDES_TX_L0_RES_CODE_DN			(0x1C)
-#define QSERDES_TX_L0_PERL_LENGTH1			(0x20)
-#define QSERDES_TX_L0_PERL_LENGTH2			(0x24)
-#define QSERDES_TX_L0_SERDES_BYP_EN_OUT			(0x28)
-#define QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN	(0x2C)
-#define QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN	(0x30)
-#define QSERDES_TX_L0_BIST_PATTERN1			(0x34)
-#define QSERDES_TX_L0_BIST_PATTERN2			(0x38)
-#define QSERDES_TX_L0_BIST_PATTERN3			(0x3C)
-#define QSERDES_TX_L0_BIST_PATTERN4			(0x40)
-#define QSERDES_TX_L0_BIST_PATTERN5			(0x44)
-#define QSERDES_TX_L0_BIST_PATTERN6			(0x48)
-#define QSERDES_TX_L0_BIST_PATTERN7			(0x4C)
-#define QSERDES_TX_L0_BIST_PATTERN8			(0x50)
-#define QSERDES_TX_L0_LANE_MODE				(0x54)
-#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE		(0x58)
-#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE_CONFIGURATION	(0x5C)
-#define QSERDES_TX_L0_ATB_SEL1				(0x60)
-#define QSERDES_TX_L0_ATB_SEL2				(0x64)
-#define QSERDES_TX_L0_RCV_DETECT_LVL			(0x68)
-#define QSERDES_TX_L0_PRBS_SEED1			(0x6C)
-#define QSERDES_TX_L0_PRBS_SEED2			(0x70)
-#define QSERDES_TX_L0_PRBS_SEED3			(0x74)
-#define QSERDES_TX_L0_PRBS_SEED4			(0x78)
-#define QSERDES_TX_L0_RESET_GEN				(0x7C)
-#define QSERDES_TX_L0_TRAN_DRVR_EMP_EN			(0x80)
-#define QSERDES_TX_L0_TX_INTERFACE_MODE			(0x84)
-#define QSERDES_TX_L0_PWM_CTRL				(0x88)
-#define QSERDES_TX_L0_PWM_DATA				(0x8C)
-#define QSERDES_TX_L0_PWM_ENC_DIV_CTRL			(0x90)
-#define QSERDES_TX_L0_VMODE_CTRL1			(0x94)
-#define QSERDES_TX_L0_VMODE_CTRL2			(0x98)
-#define QSERDES_TX_L0_VMODE_CTRL3			(0x9C)
-#define QSERDES_TX_L0_VMODE_CTRL4			(0xA0)
-#define QSERDES_TX_L0_VMODE_CTRL5			(0xA4)
-#define QSERDES_TX_L0_VMODE_CTRL6			(0xA8)
-#define QSERDES_TX_L0_VMODE_CTRL7			(0xAC)
-#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV_CNTL		(0xB0)
-#define QSERDES_TX_L0_BIST_STATUS			(0xB4)
-#define QSERDES_TX_L0_BIST_ERROR_COUNT1			(0xB8)
-#define QSERDES_TX_L0_BIST_ERROR_COUNT2			(0xBC)
-#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV			(0xC0)
-#define QSERDES_TX_L0_PWM_DEC_STATUS			(0xC4)
-
-/* Tx Channel 1 REGISTERS */
-#define QSERDES_TX_L1_BIST_MODE_LANENO			(0x00)
-#define QSERDES_TX_L1_CLKBUF_ENABLE			(0x04)
-#define QSERDES_TX_L1_TX_EMP_POST1_LVL			(0x08)
-#define QSERDES_TX_L1_TX_DRV_LVL			(0x0C)
-#define QSERDES_TX_L1_RESET_TSYNC_EN			(0x10)
-#define QSERDES_TX_L1_LPB_EN				(0x14)
-#define QSERDES_TX_L1_RES_CODE_UP			(0x18)
-#define QSERDES_TX_L1_RES_CODE_DN			(0x1C)
-#define QSERDES_TX_L1_PERL_LENGTH1			(0x20)
-#define QSERDES_TX_L1_PERL_LENGTH2			(0x24)
-#define QSERDES_TX_L1_SERDES_BYP_EN_OUT			(0x28)
-#define QSERDES_TX_L1_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN	(0x2C)
-#define QSERDES_TX_L1_PARRATE_REC_DETECT_IDLE_EN	(0x30)
-#define QSERDES_TX_L1_BIST_PATTERN1			(0x34)
-#define QSERDES_TX_L1_BIST_PATTERN2			(0x38)
-#define QSERDES_TX_L1_BIST_PATTERN3			(0x3C)
-#define QSERDES_TX_L1_BIST_PATTERN4			(0x40)
-#define QSERDES_TX_L1_BIST_PATTERN5			(0x44)
-#define QSERDES_TX_L1_BIST_PATTERN6			(0x48)
-#define QSERDES_TX_L1_BIST_PATTERN7			(0x4C)
-#define QSERDES_TX_L1_BIST_PATTERN8			(0x50)
-#define QSERDES_TX_L1_LANE_MODE				(0x54)
-#define QSERDES_TX_L1_IDAC_CAL_LANE_MODE		(0x58)
-#define QSERDES_TX_L1_IDAC_CAL_LANE_MODE_CONFIGURATION	(0x5C)
-#define QSERDES_TX_L1_ATB_SEL1				(0x60)
-#define QSERDES_TX_L1_ATB_SEL2				(0x64)
-#define QSERDES_TX_L1_RCV_DETECT_LVL			(0x68)
-#define QSERDES_TX_L1_PRBS_SEED1			(0x6C)
-#define QSERDES_TX_L1_PRBS_SEED2			(0x70)
-#define QSERDES_TX_L1_PRBS_SEED3			(0x74)
-#define QSERDES_TX_L1_PRBS_SEED4			(0x78)
-#define QSERDES_TX_L1_RESET_GEN				(0x7C)
-#define QSERDES_TX_L1_TRAN_DRVR_EMP_EN			(0x80)
-#define QSERDES_TX_L1_TX_INTERFACE_MODE			(0x84)
-#define QSERDES_TX_L1_PWM_CTRL				(0x88)
-#define QSERDES_TX_L1_PWM_DATA				(0x8C)
-#define QSERDES_TX_L1_PWM_ENC_DIV_CTRL			(0x90)
-#define QSERDES_TX_L1_VMODE_CTRL1			(0x94)
-#define QSERDES_TX_L1_VMODE_CTRL2			(0x98)
-#define QSERDES_TX_L1_VMODE_CTRL3			(0x9C)
-#define QSERDES_TX_L1_VMODE_CTRL4			(0xA0)
-#define QSERDES_TX_L1_VMODE_CTRL5			(0xA4)
-#define QSERDES_TX_L1_VMODE_CTRL6			(0xA8)
-#define QSERDES_TX_L1_VMODE_CTRL7			(0xAC)
-#define QSERDES_TX_L1_TX_ALOG_INTF_OBSV_CNTL		(0xB0)
-#define QSERDES_TX_L1_BIST_STATUS			(0xB4)
-#define QSERDES_TX_L1_BIST_ERROR_COUNT1			(0xB8)
-#define QSERDES_TX_L1_BIST_ERROR_COUNT2			(0xBC)
-#define QSERDES_TX_L1_TX_ALOG_INTF_OBSV			(0xC0)
-#define QSERDES_TX_L1_PWM_DEC_STATUS			(0xC4)
-
-/* Tx Channel 2 REGISERS */
-#define QSERDES_TX_L2_BIST_MODE_LANENO			(0x00)
-#define QSERDES_TX_L2_CLKBUF_ENABLE			(0x04)
-#define QSERDES_TX_L2_TX_EMP_POST1_LVL			(0x08)
-#define QSERDES_TX_L2_TX_DRV_LVL			(0x0C)
-#define QSERDES_TX_L2_RESET_TSYNC_EN			(0x10)
-#define QSERDES_TX_L2_LPB_EN				(0x14)
-#define QSERDES_TX_L2_RES_CODE_UP			(0x18)
-#define QSERDES_TX_L2_RES_CODE_DN			(0x1C)
-#define QSERDES_TX_L2_PERL_LENGTH1			(0x20)
-#define QSERDES_TX_L2_PERL_LENGTH2			(0x24)
-#define QSERDES_TX_L2_SERDES_BYP_EN_OUT			(0x28)
-#define QSERDES_TX_L2_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN	(0x2C)
-#define QSERDES_TX_L2_PARRATE_REC_DETECT_IDLE_EN	(0x30)
-#define QSERDES_TX_L2_BIST_PATTERN1			(0x34)
-#define QSERDES_TX_L2_BIST_PATTERN2			(0x38)
-#define QSERDES_TX_L2_BIST_PATTERN3			(0x3C)
-#define QSERDES_TX_L2_BIST_PATTERN4			(0x40)
-#define QSERDES_TX_L2_BIST_PATTERN5			(0x44)
-#define QSERDES_TX_L2_BIST_PATTERN6			(0x48)
-#define QSERDES_TX_L2_BIST_PATTERN7			(0x4C)
-#define QSERDES_TX_L2_BIST_PATTERN8			(0x50)
-#define QSERDES_TX_L2_LANE_MODE				(0x54)
-#define QSERDES_TX_L2_IDAC_CAL_LANE_MODE		(0x58)
-#define QSERDES_TX_L2_IDAC_CAL_LANE_MODE_CONFIGURATION	(0x5C)
-#define QSERDES_TX_L2_ATB_SEL1				(0x60)
-#define QSERDES_TX_L2_ATB_SEL2				(0x64)
-#define QSERDES_TX_L2_RCV_DETECT_LVL			(0x68)
-#define QSERDES_TX_L2_PRBS_SEED1			(0x6C)
-#define QSERDES_TX_L2_PRBS_SEED2			(0x70)
-#define QSERDES_TX_L2_PRBS_SEED3			(0x74)
-#define QSERDES_TX_L2_PRBS_SEED4			(0x78)
-#define QSERDES_TX_L2_RESET_GEN				(0x7C)
-#define QSERDES_TX_L2_TRAN_DRVR_EMP_EN			(0x80)
-#define QSERDES_TX_L2_TX_INTERFACE_MODE			(0x84)
-#define QSERDES_TX_L2_PWM_CTRL				(0x88)
-#define QSERDES_TX_L2_PWM_DATA				(0x8C)
-#define QSERDES_TX_L2_PWM_ENC_DIV_CTRL			(0x90)
-#define QSERDES_TX_L2_VMODE_CTRL1			(0x94)
-#define QSERDES_TX_L2_VMODE_CTRL2			(0x98)
-#define QSERDES_TX_L2_VMODE_CTRL3			(0x9C)
-#define QSERDES_TX_L2_VMODE_CTRL4			(0xA0)
-#define QSERDES_TX_L2_VMODE_CTRL5			(0xA4)
-#define QSERDES_TX_L2_VMODE_CTRL6			(0xA8)
-#define QSERDES_TX_L2_VMODE_CTRL7			(0xAC)
-#define QSERDES_TX_L2_TX_ALOG_INTF_OBSV_CNTL		(0xB0)
-#define QSERDES_TX_L2_BIST_STATUS			(0xB4)
-#define QSERDES_TX_L2_BIST_ERROR_COUNT1			(0xB8)
-#define QSERDES_TX_L2_BIST_ERROR_COUNT2			(0xBC)
-#define QSERDES_TX_L2_TX_ALOG_INTF_OBSV			(0xC0)
-#define QSERDES_TX_L2_PWM_DEC_STATUS			(0xC4)
-
-/* Tx Channel 3 REGISERS */
-#define QSERDES_TX_L3_BIST_MODE_LANENO			(0x00)
-#define QSERDES_TX_L3_CLKBUF_ENABLE			(0x04)
-#define QSERDES_TX_L3_TX_EMP_POST1_LVL			(0x08)
-#define QSERDES_TX_L3_TX_DRV_LVL			(0x0C)
-#define QSERDES_TX_L3_RESET_TSYNC_EN			(0x10)
-#define QSERDES_TX_L3_LPB_EN				(0x14)
-#define QSERDES_TX_L3_RES_CODE_UP			(0x18)
-#define QSERDES_TX_L3_RES_CODE_DN			(0x1C)
-#define QSERDES_TX_L3_PERL_LENGTH1			(0x20)
-#define QSERDES_TX_L3_PERL_LENGTH2			(0x24)
-#define QSERDES_TX_L3_SERDES_BYP_EN_OUT			(0x28)
-#define QSERDES_TX_L3_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN	(0x2C)
-#define QSERDES_TX_L3_PARRATE_REC_DETECT_IDLE_EN	(0x30)
-#define QSERDES_TX_L3_BIST_PATTERN1			(0x34)
-#define QSERDES_TX_L3_BIST_PATTERN2			(0x38)
-#define QSERDES_TX_L3_BIST_PATTERN3			(0x3C)
-#define QSERDES_TX_L3_BIST_PATTERN4			(0x40)
-#define QSERDES_TX_L3_BIST_PATTERN5			(0x44)
-#define QSERDES_TX_L3_BIST_PATTERN6			(0x48)
-#define QSERDES_TX_L3_BIST_PATTERN7			(0x4C)
-#define QSERDES_TX_L3_BIST_PATTERN8			(0x50)
-#define QSERDES_TX_L3_LANE_MODE				(0x54)
-#define QSERDES_TX_L3_IDAC_CAL_LANE_MODE		(0x58)
-#define QSERDES_TX_L3_IDAC_CAL_LANE_MODE_CONFIGURATION	(0x5C)
-#define QSERDES_TX_L3_ATB_SEL1				(0x60)
-#define QSERDES_TX_L3_ATB_SEL2				(0x64)
-#define QSERDES_TX_L3_RCV_DETECT_LVL			(0x68)
-#define QSERDES_TX_L3_PRBS_SEED1			(0x6C)
-#define QSERDES_TX_L3_PRBS_SEED2			(0x70)
-#define QSERDES_TX_L3_PRBS_SEED3			(0x74)
-#define QSERDES_TX_L3_PRBS_SEED4			(0x78)
-#define QSERDES_TX_L3_RESET_GEN				(0x7C)
-#define QSERDES_TX_L3_TRAN_DRVR_EMP_EN			(0x80)
-#define QSERDES_TX_L3_TX_INTERFACE_MODE			(0x84)
-#define QSERDES_TX_L3_PWM_CTRL				(0x88)
-#define QSERDES_TX_L3_PWM_DATA				(0x8C)
-#define QSERDES_TX_L3_PWM_ENC_DIV_CTRL			(0x90)
-#define QSERDES_TX_L3_VMODE_CTRL1			(0x94)
-#define QSERDES_TX_L3_VMODE_CTRL2			(0x98)
-#define QSERDES_TX_L3_VMODE_CTRL3			(0x9C)
-#define QSERDES_TX_L3_VMODE_CTRL4			(0xA0)
-#define QSERDES_TX_L3_VMODE_CTRL5			(0xA4)
-#define QSERDES_TX_L3_VMODE_CTRL6			(0xA8)
-#define QSERDES_TX_L3_VMODE_CTRL7			(0xAC)
-#define QSERDES_TX_L3_TX_ALOG_INTF_OBSV_CNTL		(0xB0)
-#define QSERDES_TX_L3_BIST_STATUS			(0xB4)
-#define QSERDES_TX_L3_BIST_ERROR_COUNT1			(0xB8)
-#define QSERDES_TX_L3_BIST_ERROR_COUNT2			(0xBC)
-#define QSERDES_TX_L3_TX_ALOG_INTF_OBSV			(0xC0)
-#define QSERDES_TX_L3_PWM_DEC_STATUS			(0xC4)
-
-/* HDMI PHY REGISTERS */
-#define HDMI_PHY_CFG					(0x00)
-#define HDMI_PHY_PD_CTL					(0x04)
-#define HDMI_PHY_MODE					(0x08)
-#define HDMI_PHY_MISR_CLEAR				(0x0C)
-#define HDMI_PHY_TX0_TX1_BIST_CFG0			(0x10)
-#define HDMI_PHY_TX0_TX1_BIST_CFG1			(0x14)
-#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE0		(0x18)
-#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE1		(0x1C)
-#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE2		(0x20)
-#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE3		(0x24)
-#define HDMI_PHY_TX0_TX1_PRBS_POLY_BYTE0		(0x28)
-#define HDMI_PHY_TX0_TX1_PRBS_POLY_BYTE1		(0x2C)
-#define HDMI_PHY_TX0_TX1_PRBS_POLY_BYTE2		(0x30)
-#define HDMI_PHY_TX0_TX1_PRBS_POLY_BYTE3		(0x34)
-#define HDMI_PHY_TX2_TX3_BIST_CFG0			(0x38)
-#define HDMI_PHY_TX2_TX3_BIST_CFG1			(0x3C)
-#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE0		(0x40)
-#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE1		(0x44)
-#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE2		(0x48)
-#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE3		(0x4C)
-#define HDMI_PHY_TX2_TX3_PRBS_POLY_BYTE0		(0x50)
-#define HDMI_PHY_TX2_TX3_PRBS_POLY_BYTE1		(0x54)
-#define HDMI_PHY_TX2_TX3_PRBS_POLY_BYTE2		(0x58)
-#define HDMI_PHY_TX2_TX3_PRBS_POLY_BYTE3		(0x5C)
-#define HDMI_PHY_DEBUG_BUS_SEL				(0x60)
-#define HDMI_PHY_TXCAL_CFG0				(0x64)
-#define HDMI_PHY_TXCAL_CFG1				(0x68)
-#define HDMI_PHY_TX0_TX1_BIST_STATUS0			(0x6C)
-#define HDMI_PHY_TX0_TX1_BIST_STATUS1			(0x70)
-#define HDMI_PHY_TX0_TX1_BIST_STATUS2			(0x74)
-#define HDMI_PHY_TX2_TX3_BIST_STATUS0			(0x78)
-#define HDMI_PHY_TX2_TX3_BIST_STATUS1			(0x7C)
-#define HDMI_PHY_TX2_TX3_BIST_STATUS2			(0x80)
-#define HDMI_PHY_PRE_MISR_STATUS0			(0x84)
-#define HDMI_PHY_PRE_MISR_STATUS1			(0x88)
-#define HDMI_PHY_PRE_MISR_STATUS2			(0x8C)
-#define HDMI_PHY_PRE_MISR_STATUS3			(0x90)
-#define HDMI_PHY_POST_MISR_STATUS0			(0x94)
-#define HDMI_PHY_POST_MISR_STATUS1			(0x98)
-#define HDMI_PHY_POST_MISR_STATUS2			(0x9C)
-#define HDMI_PHY_POST_MISR_STATUS3			(0xA0)
-#define HDMI_PHY_STATUS					(0xA4)
-#define HDMI_PHY_MISC3_STATUS				(0xA8)
-#define HDMI_PHY_DEBUG_BUS0				(0xAC)
-#define HDMI_PHY_DEBUG_BUS1				(0xB0)
-#define HDMI_PHY_DEBUG_BUS2				(0xB4)
-#define HDMI_PHY_DEBUG_BUS3				(0xB8)
-#define HDMI_PHY_REVISION_ID0				(0xBC)
-#define HDMI_PHY_REVISION_ID1				(0xC0)
-#define HDMI_PHY_REVISION_ID2				(0xC4)
-#define HDMI_PHY_REVISION_ID3				(0xC8)
-
-#define HDMI_PLL_POLL_DELAY_US			50
-#define HDMI_PLL_POLL_TIMEOUT_US		125000
-#define HDMI_PLL_REF_CLK_RATE			192ULL
-#define HDMI_PLL_DIVISOR			10000000000ULL
-#define HDMI_PLL_DIVISOR_32			100000U
-#define HDMI_PLL_MIN_VCO_CLK			160000000ULL
-#define HDMI_PLL_TMDS_MAX			800000000U
-
-
-static int hdmi_20nm_pll_lock_status(struct mdss_pll_resources *io)
-{
-	u32 status;
-	int pll_locked = 0;
-	int phy_ready = 0;
-	int rc;
-
-	rc = mdss_pll_resource_enable(io, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	/* Poll for C_READY and PHY READY */
-	pr_debug("%s: Waiting for PHY Ready\n", __func__);
-
-	/* poll for PLL ready status */
-	if (!readl_poll_timeout_atomic(
-		(io->pll_base + QSERDES_COM_RESET_SM),
-		status, ((status & BIT(6)) == 1),
-		HDMI_PLL_POLL_DELAY_US,
-		HDMI_PLL_POLL_TIMEOUT_US)) {
-		pr_debug("%s: C READY\n", __func__);
-		pll_locked = 1;
-	} else {
-		pr_debug("%s: C READY TIMEOUT\n", __func__);
-		pll_locked = 0;
-	}
-
-	/* poll for PHY ready status */
-	if (pll_locked && !readl_poll_timeout_atomic(
-		(io->phy_base + HDMI_PHY_STATUS),
-		status, ((status & BIT(0)) == 1),
-		HDMI_PLL_POLL_DELAY_US,
-		HDMI_PLL_POLL_TIMEOUT_US)) {
-		pr_debug("%s: PHY READY\n", __func__);
-		phy_ready = 1;
-	} else {
-		pr_debug("%s: PHY READY TIMEOUT\n", __func__);
-		phy_ready = 0;
-	}
-	mdss_pll_resource_enable(io, false);
-
-	return phy_ready;
-}
-
-static inline struct hdmi_pll_vco_clk *to_hdmi_20nm_vco_clk(struct clk *clk)
-{
-	return container_of(clk, struct hdmi_pll_vco_clk, c);
-}
-
-static inline u32 hdmi_20nm_phy_pll_vco_reg_val(struct hdmi_pll_cfg *pll_cfg,
-								u32 tmds_clk)
-{
-	u32 index = 0;
-
-	while (pll_cfg[index].vco_rate < HDMI_PLL_TMDS_MAX &&
-					pll_cfg[index].vco_rate < tmds_clk)
-		index++;
-	return pll_cfg[index].reg;
-}
-
-static void hdmi_20nm_phy_pll_calc_settings(struct mdss_pll_resources *io,
-			struct hdmi_pll_vco_clk *vco, u32 vco_clk, u32 tmds_clk)
-{
-	u32 val = 0;
-	u64 dec_start_val, frac_start_val, pll_lock_cmp;
-
-	/* Calculate decimal and fractional values */
-	dec_start_val = 1000000UL * vco_clk;
-	do_div(dec_start_val, HDMI_PLL_REF_CLK_RATE);
-	do_div(dec_start_val, 2U);
-	frac_start_val = dec_start_val;
-	do_div(frac_start_val, HDMI_PLL_DIVISOR_32);
-	do_div(frac_start_val, HDMI_PLL_DIVISOR_32);
-	frac_start_val *= HDMI_PLL_DIVISOR;
-	frac_start_val = dec_start_val - frac_start_val;
-	frac_start_val *= (u64)(2 << 19);
-	do_div(frac_start_val, HDMI_PLL_DIVISOR_32);
-	do_div(frac_start_val, HDMI_PLL_DIVISOR_32);
-	pll_lock_cmp = dec_start_val;
-	do_div(pll_lock_cmp, 10U);
-	pll_lock_cmp *= 0x800;
-	do_div(pll_lock_cmp, HDMI_PLL_DIVISOR_32);
-	do_div(pll_lock_cmp, HDMI_PLL_DIVISOR_32);
-	pll_lock_cmp -= 1U;
-	do_div(dec_start_val, HDMI_PLL_DIVISOR_32);
-	do_div(dec_start_val, HDMI_PLL_DIVISOR_32);
-
-	/* PLL loop bandwidth */
-	val = hdmi_20nm_phy_pll_vco_reg_val(vco->ip_seti, tmds_clk);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_IP_SETI, val);
-	val = hdmi_20nm_phy_pll_vco_reg_val(vco->cp_seti, tmds_clk);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CP_SETI, val);
-	val = hdmi_20nm_phy_pll_vco_reg_val(vco->cp_setp, tmds_clk);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CP_SETP, val);
-	val = hdmi_20nm_phy_pll_vco_reg_val(vco->ip_setp, tmds_clk);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_IP_SETP, val);
-	val = hdmi_20nm_phy_pll_vco_reg_val(vco->crctrl, tmds_clk);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CRCTRL, val);
-
-	/* PLL calibration */
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START1,
-		0x80 | (frac_start_val & 0x7F));
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START2,
-		0x80 | ((frac_start_val >> 7) & 0x7F));
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START3,
-		0x40 | ((frac_start_val >> 14) & 0x3F));
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEC_START1,
-		0x80 | (dec_start_val & 0x7F));
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEC_START2,
-		0x02 | (0x01 & (dec_start_val >> 7)));
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLLLOCK_CMP1,
-		pll_lock_cmp & 0xFF);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLLLOCK_CMP2,
-		(pll_lock_cmp >> 8) & 0xFF);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLLLOCK_CMP3,
-		(pll_lock_cmp >> 16) & 0xFF);
-}
-
-static u32 hdmi_20nm_phy_pll_set_clk_rate(struct clk *c, u32 tmds_clk)
-{
-	u32 tx_band = 0;
-
-	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-	u64 vco_clk = tmds_clk;
-
-	while (vco_clk > 0 && vco_clk < HDMI_PLL_MIN_VCO_CLK) {
-		tx_band++;
-		vco_clk *= 2;
-	}
-
-	/* Initially shut down PHY */
-	pr_debug("%s: Disabling PHY\n", __func__);
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x0);
-	udelay(1000);
-	/* memory barrier */
-	mb();
-
-	/* power-up and recommended common block settings */
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x1F);
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x01);
-	udelay(1000);
-	/* memory barrier */
-	mb();
-
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x07);
-	udelay(1000);
-	/* memory barrier */
-	mb();
-
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x05);
-	udelay(1000);
-	/* memory barrier */
-	mb();
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x42);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_VCOTAIL_EN, 0x03);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MODE, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_IE_TRIM, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_IP_TRIM, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CNTRL, 0x07);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_PHSEL_CONTROL, 0x04);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_IPTAT_TRIM_VCCA_TX_SEL, 0xA0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_PHSEL_DC, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CORE_CLK_IN_SYNC_SEL, 0x00);
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_BKG_KVCO_CAL_EN, 0x00);
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x0F);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL1, 0x01);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL2, 0x01);
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYSCLK_EN_SEL_TXBAND,
-		0x4A + (0x10 * tx_band));
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VREF_CFG1, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VREF_CFG2, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BGTC, 0xFF);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_TEST_UPDN, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_VCO_TUNE, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_AMP_OS, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_EN_CENTER, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_CODE_UP, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_CODE_DN, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_KVCO_CODE,
-		tmds_clk > 300000000 ? 0x3F : 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_KVCO_COUNT1,
-		tmds_clk > 300000000 ? 0x00 : 0x8A);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_REF1, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_REF2,
-		tmds_clk > 300000000 ? 0x00 : 0x01);
-
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_KVCO_CAL_CNTRL,
-		tmds_clk > 300000000 ? 0x00 : 0x1F);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VREF_CFG3,
-		tmds_clk > 300000000 ? 0x00 : 0x40);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VREF_CFG4, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VREF_CFG5, 0x10);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESETSM_CNTRL,
-		tmds_clk > 300000000 ? 0x80 : 0x00);
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_CODE_CAL_CSR, 0x77);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_TRIM_EN_VCOCALDONE, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_RXTXEPCLK_EN, 0x0C);
-
-	hdmi_20nm_phy_pll_calc_settings(io, vco, vco_clk, tmds_clk);
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLLLOCK_CMP_EN, 0x11);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CNTRL, 0x07);
-
-	/* Resistor calibration linear search */
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_CODE_START_SEG1, 0x60);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_CODE_START_SEG2, 0x60);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_TRIM_CONTROL, 0x01);
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESETSM_CNTRL2, 0x07);
-
-	udelay(1000);
-	/* memory barrier */
-	mb();
-
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_MODE, tx_band);
-
-	/* TX lanes (transceivers) power-up sequence */
-	MDSS_PLL_REG_W(io->pll_base + 0x400, QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + 0x600, QSERDES_TX_L1_CLKBUF_ENABLE, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + 0x800, QSERDES_TX_L2_CLKBUF_ENABLE, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + 0xA00, QSERDES_TX_L3_CLKBUF_ENABLE, 0x03);
-
-	MDSS_PLL_REG_W(io->pll_base + 0x400,
-		QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + 0x600,
-		QSERDES_TX_L1_TRAN_DRVR_EMP_EN, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + 0x800,
-		QSERDES_TX_L2_TRAN_DRVR_EMP_EN, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + 0xA00,
-		QSERDES_TX_L3_TRAN_DRVR_EMP_EN, 0x03);
-
-	MDSS_PLL_REG_W(io->pll_base + 0x400,
-		QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x6F);
-	MDSS_PLL_REG_W(io->pll_base + 0x600,
-		QSERDES_TX_L1_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x6F);
-	MDSS_PLL_REG_W(io->pll_base + 0x800,
-		QSERDES_TX_L2_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x6F);
-	MDSS_PLL_REG_W(io->pll_base + 0xA00,
-		QSERDES_TX_L3_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x6F);
-
-	MDSS_PLL_REG_W(io->pll_base + 0x400,
-		QSERDES_TX_L0_TX_EMP_POST1_LVL, 0x0000002F);
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TXCAL_CFG0, 0x000000AF);
-
-	MDSS_PLL_REG_W(io->pll_base + 0x400, QSERDES_TX_L0_VMODE_CTRL1, 0x08);
-	MDSS_PLL_REG_W(io->pll_base + 0x800, QSERDES_TX_L2_VMODE_CTRL1, 0x09);
-	MDSS_PLL_REG_W(io->pll_base + 0x400, QSERDES_TX_L0_VMODE_CTRL5, 0xA0);
-	MDSS_PLL_REG_W(io->pll_base + 0x400, QSERDES_TX_L0_VMODE_CTRL6, 0x01);
-	MDSS_PLL_REG_W(io->pll_base + 0x800, QSERDES_TX_L2_VMODE_CTRL5, 0xA0);
-	MDSS_PLL_REG_W(io->pll_base + 0x800, QSERDES_TX_L2_VMODE_CTRL6, 0x01);
-
-	MDSS_PLL_REG_W(io->pll_base + 0x400,
-		QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
-	MDSS_PLL_REG_W(io->pll_base + 0x400,
-		QSERDES_TX_L0_TX_INTERFACE_MODE, 0x00);
-	MDSS_PLL_REG_W(io->pll_base + 0x600,
-		QSERDES_TX_L1_PARRATE_REC_DETECT_IDLE_EN, 0x40);
-	MDSS_PLL_REG_W(io->pll_base + 0x600,
-		QSERDES_TX_L1_TX_INTERFACE_MODE, 0x00);
-	MDSS_PLL_REG_W(io->pll_base + 0x800,
-		QSERDES_TX_L2_PARRATE_REC_DETECT_IDLE_EN, 0x40);
-	MDSS_PLL_REG_W(io->pll_base + 0x800,
-		QSERDES_TX_L2_TX_INTERFACE_MODE, 0x00);
-	MDSS_PLL_REG_W(io->pll_base + 0xA00,
-		QSERDES_TX_L3_PARRATE_REC_DETECT_IDLE_EN, 0x40);
-	MDSS_PLL_REG_W(io->pll_base + 0xA00,
-		QSERDES_TX_L3_TX_INTERFACE_MODE, 0x00);
-
-	return 0;
-}
-
-static int hdmi_20nm_vco_enable(struct clk *c)
-{
-	u32 ready_poll;
-	u32 time_out_loop;
-	/* Hardware recommended timeout iterator */
-	u32 time_out_max = 50000;
-
-	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x00000000);
-	udelay(100);
-	/* memory barrier */
-	mb();
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x00000003);
-	udelay(100);
-	/* memory barrier */
-	mb();
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x00000009);
-	udelay(100);
-	/* memory barrier */
-	mb();
-
-	/* Poll for C_READY and PHY READY */
-	pr_debug("%s: Waiting for PHY Ready\n", __func__);
-	time_out_loop = 0;
-	do {
-		ready_poll = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_RESET_SM);
-		time_out_loop++;
-		udelay(10);
-	} while (((ready_poll  & (1 << 6)) == 0) &&
-		(time_out_loop < time_out_max));
-	if (time_out_loop >= time_out_max)
-		pr_err("%s: ERROR: TIMED OUT BEFORE C READY\n", __func__);
-	else
-		pr_debug("%s: C READY\n", __func__);
-
-	/* Poll for PHY READY */
-	pr_debug("%s: Waiting for PHY Ready\n", __func__);
-	time_out_loop = 0;
-	do {
-		ready_poll = MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS);
-		time_out_loop++;
-		udelay(1);
-	} while (((ready_poll & 0x1) == 0) && (time_out_loop < time_out_max));
-
-	if (time_out_loop >= time_out_max)
-		pr_err("%s: TIMED OUT BEFORE PHY READY\n", __func__);
-	else
-		pr_debug("%s: HDMI PHY READY\n", __func__);
-
-	io->pll_on = true;
-
-	return 0;
-}
-
-
-static int hdmi_20nm_vco_set_rate(struct clk *c, unsigned long rate)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-	void __iomem		*pll_base;
-	void __iomem		*phy_base;
-	unsigned int set_power_dwn = 0;
-	int rc;
-
-	rc = mdss_pll_resource_enable(io, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	if (io->pll_on)
-		set_power_dwn = 1;
-
-	pll_base = io->pll_base;
-	phy_base = io->phy_base;
-
-	pr_debug("rate=%ld\n", rate);
-
-	hdmi_20nm_phy_pll_set_clk_rate(c, rate);
-
-	mdss_pll_resource_enable(io, false);
-
-	if (set_power_dwn)
-		hdmi_20nm_vco_enable(c);
-
-	vco->rate = rate;
-	vco->rate_set = true;
-
-	return 0;
-}
-
-static unsigned long hdmi_20nm_vco_get_rate(struct clk *c)
-{
-	unsigned long freq = 0;
-	int rc;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	if (is_gdsc_disabled(io))
-		return 0;
-
-	rc = mdss_pll_resource_enable(io, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	mdss_pll_resource_enable(io, false);
-
-	return freq;
-}
-
-static long hdmi_20nm_vco_round_rate(struct clk *c, unsigned long rate)
-{
-	unsigned long rrate = rate;
-
-	pr_debug("rrate=%ld\n", rrate);
-
-	return rrate;
-}
-
-static int hdmi_20nm_vco_prepare(struct clk *c)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-	int ret = 0;
-
-	pr_debug("rate=%ld\n", vco->rate);
-
-	if (!vco->rate_set && vco->rate)
-		ret = hdmi_20nm_vco_set_rate(c, vco->rate);
-
-	if (!ret) {
-		ret = mdss_pll_resource_enable(io, true);
-		if (ret)
-			pr_err("pll resource can't be enabled\n");
-	}
-
-	return ret;
-}
-
-static void hdmi_20nm_vco_unprepare(struct clk *c)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	vco->rate_set = false;
-
-	if (!io) {
-		pr_err("Invalid input parameter\n");
-		return;
-	}
-
-	if (!io->pll_on &&
-		mdss_pll_resource_enable(io, true)) {
-		pr_err("pll resource can't be enabled\n");
-		return;
-	}
-
-	io->handoff_resources = false;
-	mdss_pll_resource_enable(io, false);
-	io->pll_on = false;
-}
-
-static enum handoff hdmi_20nm_vco_handoff(struct clk *c)
-{
-	enum handoff ret = HANDOFF_DISABLED_CLK;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	if (is_gdsc_disabled(io))
-		return HANDOFF_DISABLED_CLK;
-
-	if (mdss_pll_resource_enable(io, true)) {
-		pr_err("pll resource can't be enabled\n");
-		return ret;
-	}
-
-	io->handoff_resources = true;
-
-	if (hdmi_20nm_pll_lock_status(io)) {
-		io->pll_on = true;
-		c->rate = hdmi_20nm_vco_get_rate(c);
-		ret = HANDOFF_ENABLED_CLK;
-	} else {
-		io->handoff_resources = false;
-		mdss_pll_resource_enable(io, false);
-	}
-
-	pr_debug("done, ret=%d\n", ret);
-	return ret;
-}
-
-static const struct clk_ops hdmi_20nm_vco_clk_ops = {
-	.enable = hdmi_20nm_vco_enable,
-	.set_rate = hdmi_20nm_vco_set_rate,
-	.get_rate = hdmi_20nm_vco_get_rate,
-	.round_rate = hdmi_20nm_vco_round_rate,
-	.prepare = hdmi_20nm_vco_prepare,
-	.unprepare = hdmi_20nm_vco_unprepare,
-	.handoff = hdmi_20nm_vco_handoff,
-};
-
-static struct hdmi_pll_vco_clk hdmi_20nm_vco_clk = {
-	.ip_seti = (struct hdmi_pll_cfg[]){
-		{550890000, 0x03},
-		{589240000, 0x07},
-		{689290000, 0x03},
-		{727600000, 0x07},
-		{HDMI_PLL_TMDS_MAX, 0x03},
-	},
-	.cp_seti = (struct hdmi_pll_cfg[]){
-		{34440000, 0x3F},
-		{36830000, 0x2F},
-		{68870000, 0x3F},
-		{73660000, 0x2F},
-		{137730000, 0x3F},
-		{147310000, 0x2F},
-		{275450000, 0x3F},
-		{294620000, 0x2F},
-		{344650000, 0x3F},
-		{363800000, 0x2F},
-		{477960000, 0x3F},
-		{512530000, 0x2F},
-		{550890000, 0x1F},
-		{589240000, 0x2F},
-		{630900000, 0x3F},
-		{650590000, 0x2F},
-		{689290000, 0x1F},
-		{727600000, 0x2F},
-		{HDMI_PLL_TMDS_MAX, 0x3F},
-	},
-	.ip_setp = (struct hdmi_pll_cfg[]){
-		{497340000, 0x03},
-		{512530000, 0x07},
-		{535680000, 0x03},
-		{550890000, 0x07},
-		{574060000, 0x03},
-		{727600000, 0x07},
-		{HDMI_PLL_TMDS_MAX, 0x03},
-	},
-	.cp_setp = (struct hdmi_pll_cfg[]){
-		{36830000, 0x1F},
-		{40010000, 0x17},
-		{73660000, 0x1F},
-		{80000000, 0x17},
-		{147310000, 0x1F},
-		{160010000, 0x17},
-		{294620000, 0x1F},
-		{363800000, 0x17},
-		{497340000, 0x0F},
-		{512530000, 0x1F},
-		{535680000, 0x0F},
-		{550890000, 0x1F},
-		{574060000, 0x0F},
-		{589240000, 0x1F},
-		{727600000, 0x17},
-		{HDMI_PLL_TMDS_MAX, 0x07},
-	},
-	.crctrl = (struct hdmi_pll_cfg[]){
-		{40010000, 0xBB},
-		{40030000, 0x77},
-		{80000000, 0xBB},
-		{80060000, 0x77},
-		{160010000, 0xBB},
-		{160120000, 0x77},
-		{772930000, 0xBB},
-		{HDMI_PLL_TMDS_MAX, 0xFF},
-	},
-	.c = {
-		.dbg_name = "hdmi_20nm_vco_clk",
-		.ops = &hdmi_20nm_vco_clk_ops,
-		CLK_INIT(hdmi_20nm_vco_clk.c),
-	},
-};
-
-static struct clk_lookup hdmipllcc_8994[] = {
-	CLK_LIST(hdmi_20nm_vco_clk),
-};
-
-int hdmi_20nm_pll_clock_register(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int rc = -ENOTSUPP;
-
-	/* Set client data for vco, mux and div clocks */
-	hdmi_20nm_vco_clk.priv = pll_res;
-
-	rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_8994,
-						 ARRAY_SIZE(hdmipllcc_8994));
-	if (rc) {
-		pr_err("Clock register failed rc=%d\n", rc);
-		rc = -EPROBE_DEFER;
-	} else {
-		pr_debug("%s: SUCCESS\n", __func__);
-	}
-
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-hdmi-pll-28hpm.c b/drivers/clk/qcom/mdss/mdss-hdmi-pll-28hpm.c
deleted file mode 100644
index 5d5cd54..0000000
--- a/drivers/clk/qcom/mdss/mdss-hdmi-pll-28hpm.c
+++ /dev/null
@@ -1,1091 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-
-#include "mdss-pll.h"
-#include "mdss-hdmi-pll.h"
-
-/* hdmi phy registers */
-#define HDMI_PHY_ANA_CFG0			(0x0000)
-#define HDMI_PHY_ANA_CFG1			(0x0004)
-#define HDMI_PHY_ANA_CFG2			(0x0008)
-#define HDMI_PHY_ANA_CFG3			(0x000C)
-#define HDMI_PHY_PD_CTRL0			(0x0010)
-#define HDMI_PHY_PD_CTRL1			(0x0014)
-#define HDMI_PHY_GLB_CFG			(0x0018)
-#define HDMI_PHY_DCC_CFG0			(0x001C)
-#define HDMI_PHY_DCC_CFG1			(0x0020)
-#define HDMI_PHY_TXCAL_CFG0			(0x0024)
-#define HDMI_PHY_TXCAL_CFG1			(0x0028)
-#define HDMI_PHY_TXCAL_CFG2			(0x002C)
-#define HDMI_PHY_TXCAL_CFG3			(0x0030)
-#define HDMI_PHY_BIST_CFG0			(0x0034)
-#define HDMI_PHY_BIST_CFG1			(0x0038)
-#define HDMI_PHY_BIST_PATN0			(0x003C)
-#define HDMI_PHY_BIST_PATN1			(0x0040)
-#define HDMI_PHY_BIST_PATN2			(0x0044)
-#define HDMI_PHY_BIST_PATN3			(0x0048)
-#define HDMI_PHY_STATUS				(0x005C)
-
-/* hdmi phy unified pll registers */
-#define HDMI_UNI_PLL_REFCLK_CFG			(0x0000)
-#define HDMI_UNI_PLL_POSTDIV1_CFG		(0x0004)
-#define HDMI_UNI_PLL_CHFPUMP_CFG		(0x0008)
-#define HDMI_UNI_PLL_VCOLPF_CFG			(0x000C)
-#define HDMI_UNI_PLL_VREG_CFG			(0x0010)
-#define HDMI_UNI_PLL_PWRGEN_CFG			(0x0014)
-#define HDMI_UNI_PLL_GLB_CFG			(0x0020)
-#define HDMI_UNI_PLL_POSTDIV2_CFG		(0x0024)
-#define HDMI_UNI_PLL_POSTDIV3_CFG		(0x0028)
-#define HDMI_UNI_PLL_LPFR_CFG			(0x002C)
-#define HDMI_UNI_PLL_LPFC1_CFG			(0x0030)
-#define HDMI_UNI_PLL_LPFC2_CFG			(0x0034)
-#define HDMI_UNI_PLL_SDM_CFG0			(0x0038)
-#define HDMI_UNI_PLL_SDM_CFG1			(0x003C)
-#define HDMI_UNI_PLL_SDM_CFG2			(0x0040)
-#define HDMI_UNI_PLL_SDM_CFG3			(0x0044)
-#define HDMI_UNI_PLL_SDM_CFG4			(0x0048)
-#define HDMI_UNI_PLL_SSC_CFG0			(0x004C)
-#define HDMI_UNI_PLL_SSC_CFG1			(0x0050)
-#define HDMI_UNI_PLL_SSC_CFG2			(0x0054)
-#define HDMI_UNI_PLL_SSC_CFG3			(0x0058)
-#define HDMI_UNI_PLL_LKDET_CFG0			(0x005C)
-#define HDMI_UNI_PLL_LKDET_CFG1			(0x0060)
-#define HDMI_UNI_PLL_LKDET_CFG2			(0x0064)
-#define HDMI_UNI_PLL_CAL_CFG0			(0x006C)
-#define HDMI_UNI_PLL_CAL_CFG1			(0x0070)
-#define HDMI_UNI_PLL_CAL_CFG2			(0x0074)
-#define HDMI_UNI_PLL_CAL_CFG3			(0x0078)
-#define HDMI_UNI_PLL_CAL_CFG4			(0x007C)
-#define HDMI_UNI_PLL_CAL_CFG5			(0x0080)
-#define HDMI_UNI_PLL_CAL_CFG6			(0x0084)
-#define HDMI_UNI_PLL_CAL_CFG7			(0x0088)
-#define HDMI_UNI_PLL_CAL_CFG8			(0x008C)
-#define HDMI_UNI_PLL_CAL_CFG9			(0x0090)
-#define HDMI_UNI_PLL_CAL_CFG10			(0x0094)
-#define HDMI_UNI_PLL_CAL_CFG11			(0x0098)
-#define HDMI_UNI_PLL_STATUS			(0x00C0)
-
-#define HDMI_PLL_POLL_DELAY_US			50
-#define HDMI_PLL_POLL_TIMEOUT_US		500
-
-static inline struct hdmi_pll_vco_clk *to_hdmi_vco_clk(struct clk *clk)
-{
-	return container_of(clk, struct hdmi_pll_vco_clk, c);
-}
-
-static void hdmi_vco_disable(struct clk *c)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
-
-	if (!hdmi_pll_res) {
-		pr_err("Invalid input parameter\n");
-		return;
-	}
-
-	if (!hdmi_pll_res->pll_on &&
-		mdss_pll_resource_enable(hdmi_pll_res, true)) {
-		pr_err("pll resource can't be enabled\n");
-		return;
-	}
-
-	MDSS_PLL_REG_W(hdmi_pll_res->pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0);
-	udelay(5);
-	MDSS_PLL_REG_W(hdmi_pll_res->phy_base, HDMI_PHY_GLB_CFG, 0x0);
-
-	hdmi_pll_res->handoff_resources = false;
-	mdss_pll_resource_enable(hdmi_pll_res, false);
-	hdmi_pll_res->pll_on = false;
-} /* hdmi_vco_disable */
-
-static int hdmi_vco_enable(struct clk *c)
-{
-	u32 status;
-	u32 delay_us, timeout_us;
-	int rc;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
-
-	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	/* Global Enable */
-	MDSS_PLL_REG_W(hdmi_pll_res->phy_base, HDMI_PHY_GLB_CFG, 0x81);
-	/* Power up power gen */
-	MDSS_PLL_REG_W(hdmi_pll_res->phy_base, HDMI_PHY_PD_CTRL0, 0x00);
-	udelay(350);
-
-	/* PLL Power-Up */
-	MDSS_PLL_REG_W(hdmi_pll_res->pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
-	udelay(5);
-	/* Power up PLL LDO */
-	MDSS_PLL_REG_W(hdmi_pll_res->pll_base, HDMI_UNI_PLL_GLB_CFG, 0x03);
-	udelay(350);
-
-	/* PLL Power-Up */
-	MDSS_PLL_REG_W(hdmi_pll_res->pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
-	udelay(350);
-
-	/* poll for PLL ready status */
-	delay_us = 100;
-	timeout_us = 2000;
-	if (readl_poll_timeout_atomic(
-		(hdmi_pll_res->pll_base + HDMI_UNI_PLL_STATUS),
-		status, ((status & BIT(0)) == 1), delay_us, timeout_us)) {
-		pr_err("hdmi phy pll status=%x failed to Lock\n", status);
-		hdmi_vco_disable(c);
-		mdss_pll_resource_enable(hdmi_pll_res, false);
-		return -EINVAL;
-	}
-	pr_debug("hdmi phy pll is locked\n");
-
-	udelay(350);
-	/* poll for PHY ready status */
-	delay_us = 100;
-	timeout_us = 2000;
-	if (readl_poll_timeout_atomic(
-		(hdmi_pll_res->phy_base + HDMI_PHY_STATUS),
-		status, ((status & BIT(0)) == 1), delay_us, timeout_us)) {
-		pr_err("hdmi phy status=%x failed to Lock\n", status);
-		hdmi_vco_disable(c);
-		mdss_pll_resource_enable(hdmi_pll_res, false);
-		return -EINVAL;
-	}
-	hdmi_pll_res->pll_on = true;
-	pr_debug("hdmi phy is locked\n");
-
-	return 0;
-} /* hdmi_vco_enable */
-
-
-static void hdmi_phy_pll_calculator(u32 vco_freq,
-				struct mdss_pll_resources *hdmi_pll_res)
-{
-	u32 ref_clk             = 19200000;
-	u32 sdm_mode            = 1;
-	u32 ref_clk_multiplier  = sdm_mode == 1 ? 2 : 1;
-	u32 int_ref_clk_freq    = ref_clk * ref_clk_multiplier;
-	u32 fbclk_pre_div       = 1;
-	u32 ssc_mode            = 0;
-	u32 kvco                = 270;
-	u32 vdd                 = 95;
-	u32 ten_power_six       = 1000000;
-	u32 ssc_ds_ppm          = ssc_mode ? 5000 : 0;
-	u32 sdm_res             = 16;
-	u32 ssc_tri_step        = 32;
-	u32 ssc_freq            = 2;
-	u64 ssc_ds              = vco_freq * ssc_ds_ppm;
-	u32 div_in_freq         = vco_freq / fbclk_pre_div;
-	u64 dc_offset           = (div_in_freq / int_ref_clk_freq - 1) *
-					ten_power_six * 10;
-	u32 ssc_kdiv            = (int_ref_clk_freq / ssc_freq) -
-					ten_power_six;
-	u64 sdm_freq_seed;
-	u32 ssc_tri_inc;
-	u64 fb_div_n;
-	void __iomem		*pll_base = hdmi_pll_res->pll_base;
-	u32 val;
-
-	pr_debug("vco_freq = %u\n", vco_freq);
-
-	do_div(ssc_ds, (u64)ten_power_six);
-
-	fb_div_n = (u64)div_in_freq * (u64)ten_power_six * 10;
-	do_div(fb_div_n, int_ref_clk_freq);
-
-	sdm_freq_seed = ((fb_div_n - dc_offset - ten_power_six * 10) *
-				(1 << sdm_res)  * 10) + 5;
-	do_div(sdm_freq_seed, ((u64)ten_power_six * 100));
-
-	ssc_tri_inc = (u32)ssc_ds;
-	ssc_tri_inc = (ssc_tri_inc / int_ref_clk_freq) * (1 << 16) /
-			ssc_tri_step;
-
-	val = (ref_clk_multiplier == 2 ? 1 : 0) +
-		((fbclk_pre_div == 2 ? 1 : 0) * 16);
-	pr_debug("HDMI_UNI_PLL_REFCLK_CFG = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, val);
-
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CHFPUMP_CFG, 0x02);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_PWRGEN_CFG, 0x00);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
-
-	do_div(dc_offset, (u64)ten_power_six * 10);
-	val = sdm_mode == 0 ? 64 + dc_offset : 0;
-	pr_debug("HDMI_UNI_PLL_SDM_CFG0 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, val);
-
-	val = 64 + dc_offset;
-	pr_debug("HDMI_UNI_PLL_SDM_CFG1 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, val);
-
-	val = sdm_freq_seed & 0xFF;
-	pr_debug("HDMI_UNI_PLL_SDM_CFG2 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, val);
-
-	val = (sdm_freq_seed >> 8) & 0xFF;
-	pr_debug("HDMI_UNI_PLL_SDM_CFG3 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, val);
-
-	val = (sdm_freq_seed >> 16) & 0xFF;
-	pr_debug("HDMI_UNI_PLL_SDM_CFG4 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, val);
-
-	val = (ssc_mode == 0 ? 128 : 0) + (ssc_kdiv / ten_power_six);
-	pr_debug("HDMI_UNI_PLL_SSC_CFG0 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SSC_CFG0, val);
-
-	val = ssc_tri_inc & 0xFF;
-	pr_debug("HDMI_UNI_PLL_SSC_CFG1 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SSC_CFG1, val);
-
-	val = (ssc_tri_inc >> 8) & 0xFF;
-	pr_debug("HDMI_UNI_PLL_SSC_CFG2 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SSC_CFG2, val);
-
-	pr_debug("HDMI_UNI_PLL_SSC_CFG3 = 0x%x\n", ssc_tri_step);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SSC_CFG3, ssc_tri_step);
-
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG0, 0x0A);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG1, 0x04);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG3, 0x00);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG4, 0x00);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG5, 0x00);
-
-	val = (kvco * vdd * 10000) / 6;
-	val += 500000;
-	val /= ten_power_six;
-	pr_debug("HDMI_UNI_PLL_CAL_CFG6 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG6, val & 0xFF);
-
-	val = (kvco * vdd * 10000) / 6;
-	val -= ten_power_six;
-	val /= ten_power_six;
-	val = (val >> 8) & 0xFF;
-	pr_debug("HDMI_UNI_PLL_CAL_CFG7 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG7, val);
-
-	val = (ref_clk * 5) / ten_power_six;
-	pr_debug("HDMI_UNI_PLL_CAL_CFG8 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, val);
-
-	val = ((ref_clk * 5) / ten_power_six) >> 8;
-	pr_debug("HDMI_UNI_PLL_CAL_CFG9 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, val);
-
-	vco_freq /= ten_power_six;
-	val = vco_freq & 0xFF;
-	pr_debug("HDMI_UNI_PLL_CAL_CFG10 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, val);
-
-	val = vco_freq >> 8;
-	pr_debug("HDMI_UNI_PLL_CAL_CFG11 = 0x%x\n", val);
-	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, val);
-} /* hdmi_phy_pll_calculator */
-
-static int hdmi_vco_set_rate(struct clk *c, unsigned long rate)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
-	void __iomem		*pll_base;
-	void __iomem		*phy_base;
-	unsigned int set_power_dwn = 0;
-	int rc;
-
-	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	if (hdmi_pll_res->pll_on) {
-		hdmi_vco_disable(c);
-		set_power_dwn = 1;
-	}
-
-	pll_base = hdmi_pll_res->pll_base;
-	phy_base = hdmi_pll_res->phy_base;
-
-	pr_debug("rate=%ld\n", rate);
-
-	switch (rate) {
-	case 0:
-		break;
-
-	case 756000000:
-		/* 640x480p60 */
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x52);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0xB0);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0xF4);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
-		udelay(50);
-
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
-		udelay(200);
-		break;
-
-	case 810000000:
-		/* 576p50/576i50 case */
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x54);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0x18);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0x2A);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x03);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
-		udelay(50);
-
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
-		udelay(200);
-		break;
-
-	case 810900000:
-		/* 480p60/480i60 case */
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x54);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x66);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0x1D);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0x2A);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x03);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
-		udelay(50);
-
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
-		udelay(200);
-		break;
-
-	case 650000000:
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x4F);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x55);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0xED);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0x8A);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
-		udelay(50);
-
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
-		udelay(200);
-		break;
-
-	case 742500000:
-		/*
-		 * 720p60/720p50/1080i60/1080i50
-		 * 1080p24/1080p30/1080p25 case
-		 */
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x52);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0x56);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0xE6);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
-		udelay(50);
-
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
-		udelay(200);
-		break;
-
-	case 1080000000:
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x5B);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0x20);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0x38);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x04);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
-		udelay(50);
-
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
-		udelay(200);
-		break;
-
-	case 1342500000:
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x36);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x61);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0xF6);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0x3E);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x05);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
-		udelay(50);
-
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x05);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x11);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
-		udelay(200);
-		break;
-
-	case 1485000000:
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x65);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0xAC);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0xCD);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x05);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
-		udelay(50);
-
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x06);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x03);
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
-		udelay(200);
-		break;
-
-	default:
-		pr_debug("Use pll settings calculator for rate=%ld\n", rate);
-
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
-		hdmi_phy_pll_calculator(rate, hdmi_pll_res);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
-		udelay(50);
-
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
-
-		if (rate < 825000000) {
-			MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x01);
-			MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
-		} else if (rate >= 825000000 && rate < 1342500000) {
-			MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x05);
-			MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x03);
-		} else {
-			MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x06);
-			MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x03);
-		}
-
-		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
-
-		if (rate < 825000000)
-			MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x01);
-		else
-			MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x00);
-
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_BIST_PATN0, 0x62);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_BIST_PATN1, 0x03);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_BIST_PATN2, 0x69);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_BIST_PATN3, 0x02);
-
-		udelay(200);
-
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_BIST_CFG1, 0x00);
-		MDSS_PLL_REG_W(phy_base, HDMI_PHY_BIST_CFG0, 0x00);
-	}
-
-	/* Make sure writes complete before disabling iface clock */
-	mb();
-
-	mdss_pll_resource_enable(hdmi_pll_res, false);
-
-	if (set_power_dwn)
-		hdmi_vco_enable(c);
-
-	vco->rate = rate;
-	vco->rate_set = true;
-
-	return 0;
-} /* hdmi_pll_set_rate */
-
-/* HDMI PLL DIV CLK */
-
-static unsigned long hdmi_vco_get_rate(struct clk *c)
-{
-	unsigned long freq = 0;
-	int rc;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
-
-	if (is_gdsc_disabled(hdmi_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	freq = MDSS_PLL_REG_R(hdmi_pll_res->pll_base,
-			HDMI_UNI_PLL_CAL_CFG11) << 8 |
-		MDSS_PLL_REG_R(hdmi_pll_res->pll_base, HDMI_UNI_PLL_CAL_CFG10);
-
-	switch (freq) {
-	case 742:
-		freq = 742500000;
-		break;
-	case 810:
-		if (MDSS_PLL_REG_R(hdmi_pll_res->pll_base,
-					HDMI_UNI_PLL_SDM_CFG3) == 0x18)
-			freq = 810000000;
-		else
-			freq = 810900000;
-		break;
-	case 1342:
-		freq = 1342500000;
-		break;
-	default:
-		freq *= 1000000;
-	}
-	mdss_pll_resource_enable(hdmi_pll_res, false);
-
-	return freq;
-}
-
-static long hdmi_vco_round_rate(struct clk *c, unsigned long rate)
-{
-	unsigned long rrate = rate;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-
-	if (rate < vco->min_rate)
-		rrate = vco->min_rate;
-	if (rate > vco->max_rate)
-		rrate = vco->max_rate;
-
-	pr_debug("rrate=%ld\n", rrate);
-
-	return rrate;
-}
-
-static int hdmi_vco_prepare(struct clk *c)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-	int ret = 0;
-
-	pr_debug("rate=%ld\n", vco->rate);
-
-	if (!vco->rate_set && vco->rate)
-		ret = hdmi_vco_set_rate(c, vco->rate);
-
-	return ret;
-}
-
-static void hdmi_vco_unprepare(struct clk *c)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-
-	vco->rate_set = false;
-}
-
-static int hdmi_pll_lock_status(struct mdss_pll_resources *hdmi_pll_res)
-{
-	u32 status;
-	int pll_locked = 0;
-	int rc;
-
-	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	/* poll for PLL ready status */
-	if (readl_poll_timeout_atomic(
-			(hdmi_pll_res->phy_base + HDMI_PHY_STATUS),
-			status, ((status & BIT(0)) == 1),
-			HDMI_PLL_POLL_DELAY_US,
-			HDMI_PLL_POLL_TIMEOUT_US)) {
-		pr_debug("HDMI PLL status=%x failed to Lock\n", status);
-		pll_locked = 0;
-	} else {
-		pll_locked = 1;
-	}
-	mdss_pll_resource_enable(hdmi_pll_res, false);
-
-	return pll_locked;
-}
-
-static enum handoff hdmi_vco_handoff(struct clk *c)
-{
-	enum handoff ret = HANDOFF_DISABLED_CLK;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
-
-	if (is_gdsc_disabled(hdmi_pll_res))
-		return HANDOFF_DISABLED_CLK;
-
-	if (mdss_pll_resource_enable(hdmi_pll_res, true)) {
-		pr_err("pll resource can't be enabled\n");
-		return ret;
-	}
-
-	hdmi_pll_res->handoff_resources = true;
-
-	if (hdmi_pll_lock_status(hdmi_pll_res)) {
-		hdmi_pll_res->pll_on = true;
-		c->rate = hdmi_vco_get_rate(c);
-		ret = HANDOFF_ENABLED_CLK;
-	} else {
-		hdmi_pll_res->handoff_resources = false;
-		mdss_pll_resource_enable(hdmi_pll_res, false);
-	}
-
-	pr_debug("done, ret=%d\n", ret);
-	return ret;
-}
-
-static const struct clk_ops hdmi_vco_clk_ops = {
-	.enable = hdmi_vco_enable,
-	.set_rate = hdmi_vco_set_rate,
-	.get_rate = hdmi_vco_get_rate,
-	.round_rate = hdmi_vco_round_rate,
-	.prepare = hdmi_vco_prepare,
-	.unprepare = hdmi_vco_unprepare,
-	.disable = hdmi_vco_disable,
-	.handoff = hdmi_vco_handoff,
-};
-
-static struct hdmi_pll_vco_clk hdmi_vco_clk = {
-	.min_rate = 600000000,
-	.max_rate = 1800000000,
-	.c = {
-		.dbg_name = "hdmi_vco_clk",
-		.ops = &hdmi_vco_clk_ops,
-		CLK_INIT(hdmi_vco_clk.c),
-	},
-};
-
-struct div_clk hdmipll_div1_clk = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 1,
-	},
-	.c = {
-		.parent = &hdmi_vco_clk.c,
-		.dbg_name = "hdmipll_div1_clk",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(hdmipll_div1_clk.c),
-	},
-};
-
-struct div_clk hdmipll_div2_clk = {
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.c = {
-		.parent = &hdmi_vco_clk.c,
-		.dbg_name = "hdmipll_div2_clk",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(hdmipll_div2_clk.c),
-	},
-};
-
-struct div_clk hdmipll_div4_clk = {
-	.data = {
-		.div = 4,
-		.min_div = 4,
-		.max_div = 4,
-	},
-	.c = {
-		.parent = &hdmi_vco_clk.c,
-		.dbg_name = "hdmipll_div4_clk",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(hdmipll_div4_clk.c),
-	},
-};
-
-struct div_clk hdmipll_div6_clk = {
-	.data = {
-		.div = 6,
-		.min_div = 6,
-		.max_div = 6,
-	},
-	.c = {
-		.parent = &hdmi_vco_clk.c,
-		.dbg_name = "hdmipll_div6_clk",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(hdmipll_div6_clk.c),
-	},
-};
-
-static int hdmipll_set_mux_sel(struct mux_clk *clk, int mux_sel)
-{
-	struct mdss_pll_resources *hdmi_pll_res = clk->priv;
-	int rc;
-
-	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	pr_debug("mux_sel=%d\n", mux_sel);
-	MDSS_PLL_REG_W(hdmi_pll_res->pll_base,
-				HDMI_UNI_PLL_POSTDIV1_CFG, mux_sel);
-	mdss_pll_resource_enable(hdmi_pll_res, false);
-
-	return 0;
-}
-
-static int hdmipll_get_mux_sel(struct mux_clk *clk)
-{
-	int rc;
-	int mux_sel = 0;
-	struct mdss_pll_resources *hdmi_pll_res = clk->priv;
-
-	if (is_gdsc_disabled(hdmi_pll_res))
-		return 0;
-
-	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	mux_sel = MDSS_PLL_REG_R(hdmi_pll_res->pll_base,
-				HDMI_UNI_PLL_POSTDIV1_CFG);
-	mdss_pll_resource_enable(hdmi_pll_res, false);
-	mux_sel &= 0x03;
-	pr_debug("mux_sel=%d\n", mux_sel);
-
-	return mux_sel;
-}
-
-static struct clk_mux_ops hdmipll_mux_ops = {
-	.set_mux_sel = hdmipll_set_mux_sel,
-	.get_mux_sel = hdmipll_get_mux_sel,
-};
-
-static const struct clk_ops hdmi_mux_ops;
-
-static int hdmi_mux_prepare(struct clk *c)
-{
-	int ret = 0;
-
-	if (c && c->ops && c->ops->set_rate)
-		ret = c->ops->set_rate(c, c->rate);
-
-	return ret;
-}
-
-struct mux_clk hdmipll_mux_clk = {
-	MUX_SRC_LIST(
-		{ &hdmipll_div1_clk.c, 0 },
-		{ &hdmipll_div2_clk.c, 1 },
-		{ &hdmipll_div4_clk.c, 2 },
-		{ &hdmipll_div6_clk.c, 3 },
-	),
-	.ops = &hdmipll_mux_ops,
-	.c = {
-		.parent = &hdmipll_div1_clk.c,
-		.dbg_name = "hdmipll_mux_clk",
-		.ops = &hdmi_mux_ops,
-		CLK_INIT(hdmipll_mux_clk.c),
-	},
-};
-
-struct div_clk hdmipll_clk_src = {
-	.data = {
-		.div = 5,
-		.min_div = 5,
-		.max_div = 5,
-	},
-	.c = {
-		.parent = &hdmipll_mux_clk.c,
-		.dbg_name = "hdmipll_clk_src",
-		.ops = &clk_ops_div,
-		CLK_INIT(hdmipll_clk_src.c),
-	},
-};
-
-static struct clk_lookup hdmipllcc_8974[] = {
-	CLK_LOOKUP("extp_clk_src", hdmipll_clk_src.c,
-						"fd8c0000.qcom,mmsscc-mdss"),
-};
-
-int hdmi_pll_clock_register(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int rc = -ENOTSUPP;
-
-	/* Set client data for vco, mux and div clocks */
-	hdmipll_clk_src.priv = pll_res;
-	hdmipll_mux_clk.priv = pll_res;
-	hdmipll_div1_clk.priv = pll_res;
-	hdmipll_div2_clk.priv = pll_res;
-	hdmipll_div4_clk.priv = pll_res;
-	hdmipll_div6_clk.priv = pll_res;
-	hdmi_vco_clk.priv = pll_res;
-
-	/* Set hdmi mux clock operation */
-	hdmi_mux_ops = clk_ops_gen_mux;
-	hdmi_mux_ops.prepare = hdmi_mux_prepare;
-
-	rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_8974,
-						 ARRAY_SIZE(hdmipllcc_8974));
-	if (rc) {
-		pr_err("Clock register failed rc=%d\n", rc);
-		rc = -EPROBE_DEFER;
-	}
-
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-hdmi-pll-28lpm.c b/drivers/clk/qcom/mdss/mdss-hdmi-pll-28lpm.c
deleted file mode 100644
index 530ad83..0000000
--- a/drivers/clk/qcom/mdss/mdss-hdmi-pll-28lpm.c
+++ /dev/null
@@ -1,790 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include <dt-bindings/clock/mdss-28nm-pll-clk.h>
-#include "mdss-pll.h"
-#include "mdss-hdmi-pll.h"
-
-/* HDMI PLL macros */
-#define HDMI_PHY_PLL_REFCLK_CFG				(0x0400)
-#define HDMI_PHY_PLL_CHRG_PUMP_CFG			(0x0404)
-#define HDMI_PHY_PLL_LOOP_FLT_CFG0			(0x0408)
-#define HDMI_PHY_PLL_LOOP_FLT_CFG1			(0x040c)
-#define HDMI_PHY_PLL_IDAC_ADJ_CFG			(0x0410)
-#define HDMI_PHY_PLL_I_VI_KVCO_CFG			(0x0414)
-#define HDMI_PHY_PLL_PWRDN_B				(0x0418)
-#define HDMI_PHY_PLL_SDM_CFG0				(0x041c)
-#define HDMI_PHY_PLL_SDM_CFG1				(0x0420)
-#define HDMI_PHY_PLL_SDM_CFG2				(0x0424)
-#define HDMI_PHY_PLL_SDM_CFG3				(0x0428)
-#define HDMI_PHY_PLL_SDM_CFG4				(0x042c)
-#define HDMI_PHY_PLL_SSC_CFG0				(0x0430)
-#define HDMI_PHY_PLL_SSC_CFG1				(0x0434)
-#define HDMI_PHY_PLL_SSC_CFG2				(0x0438)
-#define HDMI_PHY_PLL_SSC_CFG3				(0x043c)
-#define HDMI_PHY_PLL_LOCKDET_CFG0			(0x0440)
-#define HDMI_PHY_PLL_LOCKDET_CFG1			(0x0444)
-#define HDMI_PHY_PLL_LOCKDET_CFG2			(0x0448)
-#define HDMI_PHY_PLL_VCOCAL_CFG0			(0x044c)
-#define HDMI_PHY_PLL_VCOCAL_CFG1			(0x0450)
-#define HDMI_PHY_PLL_VCOCAL_CFG2			(0x0454)
-#define HDMI_PHY_PLL_VCOCAL_CFG3			(0x0458)
-#define HDMI_PHY_PLL_VCOCAL_CFG4			(0x045c)
-#define HDMI_PHY_PLL_VCOCAL_CFG5			(0x0460)
-#define HDMI_PHY_PLL_VCOCAL_CFG6			(0x0464)
-#define HDMI_PHY_PLL_VCOCAL_CFG7			(0x0468)
-#define HDMI_PHY_PLL_DEBUG_SEL				(0x046c)
-#define HDMI_PHY_PLL_MISC0				(0x0470)
-#define HDMI_PHY_PLL_MISC1				(0x0474)
-#define HDMI_PHY_PLL_MISC2				(0x0478)
-#define HDMI_PHY_PLL_MISC3				(0x047c)
-#define HDMI_PHY_PLL_MISC4				(0x0480)
-#define HDMI_PHY_PLL_MISC5				(0x0484)
-#define HDMI_PHY_PLL_MISC6				(0x0488)
-#define HDMI_PHY_PLL_DEBUG_BUS0				(0x048c)
-#define HDMI_PHY_PLL_DEBUG_BUS1				(0x0490)
-#define HDMI_PHY_PLL_DEBUG_BUS2				(0x0494)
-#define HDMI_PHY_PLL_STATUS0				(0x0498)
-#define HDMI_PHY_PLL_STATUS1				(0x049c)
-
-#define HDMI_PHY_REG_0					(0x0000)
-#define HDMI_PHY_REG_1					(0x0004)
-#define HDMI_PHY_REG_2					(0x0008)
-#define HDMI_PHY_REG_3					(0x000c)
-#define HDMI_PHY_REG_4					(0x0010)
-#define HDMI_PHY_REG_5					(0x0014)
-#define HDMI_PHY_REG_6					(0x0018)
-#define HDMI_PHY_REG_7					(0x001c)
-#define HDMI_PHY_REG_8					(0x0020)
-#define HDMI_PHY_REG_9					(0x0024)
-#define HDMI_PHY_REG_10					(0x0028)
-#define HDMI_PHY_REG_11					(0x002c)
-#define HDMI_PHY_REG_12					(0x0030)
-#define HDMI_PHY_REG_BIST_CFG				(0x0034)
-#define HDMI_PHY_DEBUG_BUS_SEL				(0x0038)
-#define HDMI_PHY_REG_MISC0				(0x003c)
-#define HDMI_PHY_REG_13					(0x0040)
-#define HDMI_PHY_REG_14					(0x0044)
-#define HDMI_PHY_REG_15					(0x0048)
-
-/* HDMI PHY/PLL bit field macros */
-#define SW_RESET BIT(2)
-#define SW_RESET_PLL BIT(0)
-#define PWRDN_B BIT(7)
-
-#define PLL_PWRDN_B BIT(3)
-#define REG_VTEST_EN BIT(2)
-#define PD_PLL BIT(1)
-#define PD_PLL_REG BIT(0)
-
-
-#define HDMI_PLL_POLL_DELAY_US			50
-#define HDMI_PLL_POLL_TIMEOUT_US		500
-
-static int hdmi_pll_lock_status(struct mdss_pll_resources *hdmi_pll_res)
-{
-	u32 status;
-	int pll_locked = 0;
-	int rc;
-
-	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	/* poll for PLL ready status */
-	if (readl_poll_timeout_atomic(
-			(hdmi_pll_res->pll_base + HDMI_PHY_PLL_STATUS0),
-			status, ((status & BIT(0)) == 1),
-			HDMI_PLL_POLL_DELAY_US,
-			HDMI_PLL_POLL_TIMEOUT_US)) {
-		pr_debug("HDMI PLL status=%x failed to Lock\n", status);
-		pll_locked = 0;
-	} else {
-		pr_debug("HDMI PLL locked\n");
-		pll_locked = 1;
-	}
-	mdss_pll_resource_enable(hdmi_pll_res, false);
-
-	return pll_locked;
-}
-
-static void hdmi_pll_disable_28lpm(struct clk_hw *hw)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw);
-	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
-	u32 val;
-
-	if (!hdmi_pll_res) {
-		pr_err("Invalid input parameter\n");
-		return;
-	}
-
-	val = MDSS_PLL_REG_R(hdmi_pll_res->pll_base, HDMI_PHY_REG_12);
-	val &= (~PWRDN_B);
-	MDSS_PLL_REG_W(hdmi_pll_res->pll_base, HDMI_PHY_REG_12, val);
-
-	val = MDSS_PLL_REG_R(hdmi_pll_res->pll_base, HDMI_PHY_PLL_PWRDN_B);
-	val |= PD_PLL;
-	val &= (~PLL_PWRDN_B);
-	MDSS_PLL_REG_W(hdmi_pll_res->pll_base, HDMI_PHY_PLL_PWRDN_B, val);
-
-	/* Make sure HDMI PHY/PLL are powered down */
-	wmb();
-
-} /* hdmi_pll_disable_28lpm */
-
-static int hdmi_pll_enable_28lpm(struct clk_hw *hw)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw);
-	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
-	void __iomem            *pll_base;
-	u32 val;
-	int pll_lock_retry = 10;
-
-	pll_base = hdmi_pll_res->pll_base;
-
-	/* Assert PLL S/W reset */
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG2, 0x8d);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG0, 0x10);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG1, 0x1a);
-	udelay(10);
-	/* De-assert PLL S/W reset */
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG2, 0x0d);
-
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_1, 0xf2);
-
-	udelay(10);
-
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_2, 0x1f);
-
-	val = MDSS_PLL_REG_R(pll_base, HDMI_PHY_REG_12);
-	val |= BIT(5);
-	/* Assert PHY S/W reset */
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_12, val);
-	val &= ~BIT(5);
-	udelay(10);
-	/* De-assert PHY S/W reset */
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_12, val);
-
-	val = MDSS_PLL_REG_R(pll_base, HDMI_PHY_REG_12);
-	val |= PWRDN_B;
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_12, val);
-
-	/* Wait 10 us for enabling global power for PHY */
-	wmb();
-	udelay(10);
-
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_3, 0x20);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_4, 0x10);
-
-	val = MDSS_PLL_REG_R(pll_base, HDMI_PHY_PLL_PWRDN_B);
-	val |= PLL_PWRDN_B;
-	val |= REG_VTEST_EN;
-	val &= ~PD_PLL;
-	val |= PD_PLL_REG;
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_PWRDN_B, val);
-
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_2, 0x81);
-
-	do {
-		if (!hdmi_pll_lock_status(hdmi_pll_res)) {
-			/* PLL has still not locked.
-			 * Do a software reset and try again
-			 * Assert PLL S/W reset first
-			 */
-			MDSS_PLL_REG_W(pll_base,
-					HDMI_PHY_PLL_LOCKDET_CFG2, 0x8d);
-
-			/* Wait for a short time before de-asserting
-			 * to allow the hardware to complete its job.
-			 * This much of delay should be fine for hardware
-			 * to assert and de-assert.
-			 */
-			udelay(10);
-			MDSS_PLL_REG_W(pll_base,
-					HDMI_PHY_PLL_LOCKDET_CFG2, 0xd);
-
-			/* Wait for a short duration for the PLL calibration
-			 * before checking if the PLL gets locked
-			 */
-			udelay(350);
-		} else {
-			pr_debug("HDMI PLL locked\n");
-			break;
-		}
-
-	} while (--pll_lock_retry);
-
-	if (!pll_lock_retry) {
-		pr_err("HDMI PLL not locked\n");
-		hdmi_pll_disable_28lpm(hw);
-		return -EAGAIN;
-	}
-
-	return 0;
-} /* hdmi_pll_enable_28lpm */
-
-static void hdmi_phy_pll_calculator_28lpm(unsigned long vco_rate,
-			struct mdss_pll_resources *hdmi_pll_res)
-{
-	u32 ref_clk		= 19200000;
-	u32 integer_mode	= 0;
-	u32 ref_clk_multiplier	= integer_mode == 0 ? 2 : 1;
-	u32 int_ref_clk_freq    = ref_clk * ref_clk_multiplier;
-	u32 refclk_cfg		= 0;
-	u32 ten_power_six	= 1000000;
-	u64 multiplier_q	= 0;
-	u64 multiplier_r	= 0;
-	u32 lf_cfg0		= 0;
-	u32 lf_cfg1		= 0;
-	u64 vco_cfg0		= 0;
-	u64 vco_cfg4		= 0;
-	u64 sdm_cfg0		= 0;
-	u64 sdm_cfg1		= 0;
-	u64 sdm_cfg2		= 0;
-	u32 val1		= 0;
-	u32 val2		= 0;
-	u32 val3		= 0;
-	void __iomem *pll_base	= hdmi_pll_res->pll_base;
-
-	multiplier_q = vco_rate;
-	multiplier_r = do_div(multiplier_q, int_ref_clk_freq);
-
-	lf_cfg0 = multiplier_q > 30 ? 0 : (multiplier_q > 16 ? 16 : 32);
-	lf_cfg0 += integer_mode;
-
-	lf_cfg1 = multiplier_q > 30 ? 0xc3 : (multiplier_q > 16 ? 0xbb : 0xf9);
-
-	vco_cfg0 = vco_rate / ten_power_six;
-	vco_cfg4 = ((ref_clk * 5) / ten_power_six) - 1;
-
-	sdm_cfg0 = (integer_mode * 64) + multiplier_q - 1;
-	sdm_cfg1 = 64 + multiplier_q - 1;
-
-	sdm_cfg2 = (multiplier_r) * 65536;
-	do_div(sdm_cfg2, int_ref_clk_freq);
-
-	pr_debug("lf_cfg0 = 0x%x    lf_cfg1 = 0x%x\n", lf_cfg0, lf_cfg1);
-	pr_debug("vco_cfg0 = 0x%llx   vco_cfg4 = 0x%llx\n", vco_cfg0, vco_cfg4);
-	pr_debug("sdm_cfg0 = 0x%llx   sdm_cfg1 = 0x%llx   sdm_cfg2 = 0x%llx\n",
-				sdm_cfg0, sdm_cfg1, sdm_cfg2);
-
-	refclk_cfg = MDSS_PLL_REG_R(pll_base, HDMI_PHY_PLL_REFCLK_CFG);
-	refclk_cfg &= ~0xf;
-	refclk_cfg |= (ref_clk_multiplier == 2) ? 0x8
-			: (ref_clk_multiplier == 1) ? 0 : 0x2;
-
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_REFCLK_CFG, refclk_cfg);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_CHRG_PUMP_CFG, 0x02);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOOP_FLT_CFG0, lf_cfg0);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOOP_FLT_CFG1, lf_cfg1);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_IDAC_ADJ_CFG, 0x2c);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_I_VI_KVCO_CFG, 0x06);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_PWRDN_B, 0x0a);
-
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SDM_CFG0, sdm_cfg0);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SDM_CFG1, sdm_cfg1);
-
-	val1 = sdm_cfg2 & 0xff;
-	val2 = (sdm_cfg2 >> 8) & 0xff;
-	val3 = (sdm_cfg2 >> 16) & 0xff;
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SDM_CFG2, val1);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SDM_CFG3, val2);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SDM_CFG4, val3);
-
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SSC_CFG0, 0x9a);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SSC_CFG1, 0x00);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SSC_CFG2, 0x00);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SSC_CFG3, 0x00);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG0, 0x10);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG1, 0x1a);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG2, 0x0d);
-
-	val1 = vco_cfg0 & 0xff;
-	val2 = (vco_cfg0 >> 8) & 0xff;
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG0, val1);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG1, val2);
-
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG2, 0x3b);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG3, 0x00);
-
-	val1 = vco_cfg4 & 0xff;
-	val2 = (vco_cfg4 >> 8) & 0xff;
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG4, val1);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG5, val2);
-
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG6, 0x33);
-	MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG7, 0x03);
-
-}
-
-int hdmi_vco_set_rate_28lpm(struct clk_hw *hw, unsigned long rate,
-		unsigned long parent_rate)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw);
-	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
-	void __iomem		*pll_base;
-	int rc;
-
-	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	if (hdmi_pll_res->pll_on)
-		return 0;
-
-	pll_base = hdmi_pll_res->pll_base;
-
-	pr_debug("rate=%ld\n", rate);
-
-	hdmi_phy_pll_calculator_28lpm(rate, hdmi_pll_res);
-
-	/* Make sure writes complete before disabling iface clock */
-	wmb();
-
-	vco->rate = rate;
-	hdmi_pll_res->vco_current_rate = rate;
-
-	mdss_pll_resource_enable(hdmi_pll_res, false);
-
-
-	return 0;
-} /* hdmi_pll_set_rate */
-
-static unsigned long hdmi_vco_get_rate(struct hdmi_pll_vco_clk *vco)
-{
-	unsigned long freq = 0;
-	int rc = 0;
-	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
-
-	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable hdmi pll resources\n");
-		return 0;
-	}
-
-	freq = MDSS_PLL_REG_R(hdmi_pll_res->pll_base,
-			HDMI_PHY_PLL_VCOCAL_CFG1) << 8 |
-		MDSS_PLL_REG_R(hdmi_pll_res->pll_base,
-				HDMI_PHY_PLL_VCOCAL_CFG0);
-
-	switch (freq) {
-	case 742:
-		freq = 742500000;
-		break;
-	case 810:
-		if (MDSS_PLL_REG_R(hdmi_pll_res->pll_base,
-					HDMI_PHY_PLL_SDM_CFG3) == 0x18)
-			freq = 810000000;
-		else
-			freq = 810900000;
-		break;
-	case 1342:
-		freq = 1342500000;
-		break;
-	default:
-		freq *= 1000000;
-	}
-	mdss_pll_resource_enable(hdmi_pll_res, false);
-
-	return freq;
-}
-
-long hdmi_vco_round_rate_28lpm(struct clk_hw *hw, unsigned long rate,
-		unsigned long *parent_rate)
-{
-	unsigned long rrate = rate;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw);
-
-	if (rate < vco->min_rate)
-		rrate = vco->min_rate;
-	if (rate > vco->max_rate)
-		rrate = vco->max_rate;
-
-	*parent_rate = rrate;
-	pr_debug("rrate=%ld\n", rrate);
-
-	return rrate;
-}
-
-int hdmi_vco_prepare_28lpm(struct clk_hw *hw)
-{
-	int rc = 0;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw);
-	struct mdss_pll_resources *hdmi_res = vco->priv;
-
-	pr_debug("rate=%ld\n", clk_hw_get_rate(hw));
-	rc = mdss_pll_resource_enable(hdmi_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss HDMI pll resources\n");
-		goto error;
-	}
-
-	if ((hdmi_res->vco_cached_rate != 0)
-		&& (hdmi_res->vco_cached_rate == clk_hw_get_rate(hw))) {
-		rc = vco->hw.init->ops->set_rate(hw,
-			hdmi_res->vco_cached_rate, hdmi_res->vco_cached_rate);
-		if (rc) {
-			pr_err("index=%d vco_set_rate failed. rc=%d\n",
-				rc, hdmi_res->index);
-			mdss_pll_resource_enable(hdmi_res, false);
-			goto error;
-		}
-	}
-
-	rc = hdmi_pll_enable_28lpm(hw);
-	if (rc) {
-		mdss_pll_resource_enable(hdmi_res, false);
-		pr_err("ndx=%d failed to enable hdmi pll\n",
-					hdmi_res->index);
-		goto error;
-	}
-
-	mdss_pll_resource_enable(hdmi_res, false);
-	pr_debug("HDMI PLL enabled\n");
-error:
-	return rc;
-}
-
-void hdmi_vco_unprepare_28lpm(struct clk_hw *hw)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw);
-	struct mdss_pll_resources *hdmi_res = vco->priv;
-
-	if (!hdmi_res) {
-		pr_err("Invalid input parameter\n");
-		return;
-	}
-
-	if (!hdmi_res->pll_on &&
-		mdss_pll_resource_enable(hdmi_res, true)) {
-		pr_err("pll resource can't be enabled\n");
-		return;
-	}
-
-	hdmi_res->vco_cached_rate = clk_hw_get_rate(hw);
-	hdmi_pll_disable_28lpm(hw);
-
-	hdmi_res->handoff_resources = false;
-	mdss_pll_resource_enable(hdmi_res, false);
-	hdmi_res->pll_on = false;
-
-	pr_debug("HDMI PLL disabled\n");
-}
-
-
-unsigned long hdmi_vco_recalc_rate_28lpm(struct clk_hw *hw,
-				unsigned long parent_rate)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw);
-	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
-	u64 vco_rate = 0;
-
-	if (!hdmi_pll_res) {
-		pr_err("dsi pll resources not available\n");
-		return 0;
-	}
-
-	if (hdmi_pll_res->vco_current_rate) {
-		vco_rate = (unsigned long)hdmi_pll_res->vco_current_rate;
-		pr_debug("vco_rate=%lld\n", vco_rate);
-		return vco_rate;
-	}
-
-	if (is_gdsc_disabled(hdmi_pll_res))
-		return 0;
-
-	if (mdss_pll_resource_enable(hdmi_pll_res, true)) {
-		pr_err("Failed to enable hdmi pll resources\n");
-		return 0;
-	}
-
-	if (hdmi_pll_lock_status(hdmi_pll_res)) {
-		hdmi_pll_res->handoff_resources = true;
-		hdmi_pll_res->pll_on = true;
-		vco_rate = hdmi_vco_get_rate(vco);
-	} else {
-		hdmi_pll_res->handoff_resources = false;
-		mdss_pll_resource_enable(hdmi_pll_res, false);
-	}
-
-	pr_debug("vco_rate = %lld\n", vco_rate);
-
-	return (unsigned long)vco_rate;
-}
-
-static int hdmi_mux_set_parent(void *context, unsigned int reg,
-				unsigned int mux_sel)
-{
-	struct mdss_pll_resources *hdmi_pll_res = context;
-	int rc = 0;
-	u32 reg_val = 0;
-	const u32 div_4 = 0x20;
-	const u32 div_6 = 0x30;
-
-	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable hdmi pll resources\n");
-		return rc;
-	}
-
-	/*
-	 * divsel_six is preferred over divsel_four to keep
-	 * vco range within goal limits to maintain margin.
-	 * To achieve this, its precedence order is toggled
-	 * at mux level. So reverse toggle the mux_sel value
-	 * here.
-	 */
-	switch (mux_sel) {
-	case 0x20: /* intended divider is divsel_six */
-		mux_sel = div_6;
-		break;
-	case 0x30: /* intended divider is divsel_four */
-		mux_sel = div_4;
-		break;
-	}
-	pr_debug("mux_sel = %d\n", mux_sel);
-
-	reg_val = MDSS_PLL_REG_R(hdmi_pll_res->pll_base,
-				HDMI_PHY_PLL_REFCLK_CFG);
-	reg_val &= ~0x70;
-	reg_val |= (mux_sel & 0x70);
-	pr_debug("pll_refclk_cfg = 0x%x\n", reg_val);
-	MDSS_PLL_REG_W(hdmi_pll_res->pll_base,
-				HDMI_PHY_PLL_REFCLK_CFG, reg_val);
-
-	(void)mdss_pll_resource_enable(hdmi_pll_res, false);
-
-	return 0;
-}
-
-static int hdmi_mux_get_parent(void *context, unsigned int reg,
-				unsigned int *val)
-{
-	int rc = 0;
-	int mux_sel = 0;
-	struct mdss_pll_resources *hdmi_pll_res = context;
-
-	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
-	if (rc) {
-		*val = 0;
-		pr_err("Failed to enable hdmi pll resources\n");
-	} else {
-		mux_sel = MDSS_PLL_REG_R(hdmi_pll_res->pll_base,
-				HDMI_PHY_PLL_REFCLK_CFG);
-		mux_sel &= 0x70;
-		*val = mux_sel;
-		pr_debug("mux_sel = %d\n", *val);
-	}
-
-	(void)mdss_pll_resource_enable(hdmi_pll_res, false);
-
-	return rc;
-}
-
-static struct regmap_config hdmi_pll_28lpm_cfg = {
-	.reg_bits	= 32,
-	.reg_stride	= 4,
-	.val_bits	= 32,
-	.max_register = 0x49c,
-};
-
-static struct regmap_bus hdmi_pclk_src_mux_regmap_ops = {
-	.reg_write = hdmi_mux_set_parent,
-	.reg_read = hdmi_mux_get_parent,
-};
-
-/* Op structures */
-static const struct clk_ops hdmi_28lpm_vco_clk_ops = {
-	.recalc_rate = hdmi_vco_recalc_rate_28lpm,
-	.set_rate = hdmi_vco_set_rate_28lpm,
-	.round_rate = hdmi_vco_round_rate_28lpm,
-	.prepare = hdmi_vco_prepare_28lpm,
-	.unprepare = hdmi_vco_unprepare_28lpm,
-};
-
-static struct hdmi_pll_vco_clk hdmi_vco_clk = {
-	.min_rate = 540000000,
-	.max_rate = 1125000000,
-	.hw.init = &(struct clk_init_data){
-		.name = "hdmi_vco_clk",
-		.parent_names = (const char *[]){ "cxo" },
-		.num_parents = 1,
-		.ops = &hdmi_28lpm_vco_clk_ops,
-	},
-};
-
-static struct clk_fixed_factor hdmi_vco_divsel_one_clk_src = {
-	.div = 1,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "hdmi_vco_divsel_one_clk_src",
-		.parent_names =
-			(const char *[]){ "hdmi_vco_clk" },
-		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor hdmi_vco_divsel_two_clk_src = {
-	.div = 2,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "hdmi_vco_divsel_two_clk_src",
-		.parent_names =
-			(const char *[]){ "hdmi_vco_clk" },
-		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor hdmi_vco_divsel_four_clk_src = {
-	.div = 4,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "hdmi_vco_divsel_four_clk_src",
-		.parent_names =
-			(const char *[]){ "hdmi_vco_clk" },
-		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_fixed_factor hdmi_vco_divsel_six_clk_src = {
-	.div = 6,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "hdmi_vco_divsel_six_clk_src",
-		.parent_names =
-			(const char *[]){ "hdmi_vco_clk" },
-		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_regmap_mux hdmi_pclk_src_mux = {
-	.reg = HDMI_PHY_PLL_REFCLK_CFG,
-	.shift = 4,
-	.width = 2,
-
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "hdmi_pclk_src_mux",
-			.parent_names =
-				(const char *[]){"hdmi_vco_divsel_one_clk_src",
-					"hdmi_vco_divsel_two_clk_src",
-					"hdmi_vco_divsel_six_clk_src",
-					"hdmi_vco_divsel_four_clk_src"},
-			.num_parents = 4,
-			.ops = &clk_regmap_mux_closest_ops,
-			.flags = CLK_SET_RATE_PARENT,
-		},
-	},
-};
-
-static struct clk_fixed_factor hdmi_pclk_src = {
-	.div = 5,
-	.mult = 1,
-
-	.hw.init = &(struct clk_init_data){
-		.name = "hdmi_phy_pll_clk",
-		.parent_names =
-			(const char *[]){ "hdmi_pclk_src_mux" },
-		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_fixed_factor_ops,
-	},
-};
-
-static struct clk_hw *mdss_hdmi_pllcc_28lpm[] = {
-	[HDMI_VCO_CLK] = &hdmi_vco_clk.hw,
-	[HDMI_VCO_DIVIDED_1_CLK_SRC] = &hdmi_vco_divsel_one_clk_src.hw,
-	[HDMI_VCO_DIVIDED_TWO_CLK_SRC] = &hdmi_vco_divsel_two_clk_src.hw,
-	[HDMI_VCO_DIVIDED_FOUR_CLK_SRC] = &hdmi_vco_divsel_four_clk_src.hw,
-	[HDMI_VCO_DIVIDED_SIX_CLK_SRC] = &hdmi_vco_divsel_six_clk_src.hw,
-	[HDMI_PCLK_SRC_MUX] = &hdmi_pclk_src_mux.clkr.hw,
-	[HDMI_PCLK_SRC] = &hdmi_pclk_src.hw,
-};
-
-int hdmi_pll_clock_register_28lpm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int rc = -ENOTSUPP, i;
-	struct clk *clk;
-	struct clk_onecell_data *clk_data;
-	int num_clks = ARRAY_SIZE(mdss_hdmi_pllcc_28lpm);
-	struct regmap *regmap;
-
-	if (!pdev || !pdev->dev.of_node ||
-		!pll_res || !pll_res->pll_base) {
-		pr_err("Invalid input parameters\n");
-		return -EPROBE_DEFER;
-	}
-
-	clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
-			GFP_KERNEL);
-	if (!clk_data)
-		return -ENOMEM;
-
-	clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
-				sizeof(struct clk *)), GFP_KERNEL);
-	if (!clk_data->clks) {
-		devm_kfree(&pdev->dev, clk_data);
-		return -ENOMEM;
-	}
-	clk_data->clk_num = num_clks;
-
-	/* Set client data for vco, mux and div clocks */
-	regmap = devm_regmap_init(&pdev->dev, &hdmi_pclk_src_mux_regmap_ops,
-			pll_res, &hdmi_pll_28lpm_cfg);
-	hdmi_pclk_src_mux.clkr.regmap = regmap;
-
-	hdmi_vco_clk.priv = pll_res;
-
-	for (i = HDMI_VCO_CLK; i <= HDMI_PCLK_SRC; i++) {
-		pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
-		clk = devm_clk_register(&pdev->dev,
-				mdss_hdmi_pllcc_28lpm[i]);
-		if (IS_ERR(clk)) {
-			pr_err("clk registration failed for HDMI: %d\n",
-					pll_res->index);
-			rc = -EINVAL;
-			goto clk_reg_fail;
-		}
-		clk_data->clks[i] = clk;
-	}
-
-	rc = of_clk_add_provider(pdev->dev.of_node,
-			of_clk_src_onecell_get, clk_data);
-	if (rc) {
-		pr_err("%s: Clock register failed rc=%d\n", __func__, rc);
-		rc = -EPROBE_DEFER;
-	} else {
-		pr_debug("%s SUCCESS\n", __func__);
-		rc = 0;
-	}
-	return rc;
-clk_reg_fail:
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-hdmi-pll-8996.c b/drivers/clk/qcom/mdss/mdss-hdmi-pll-8996.c
deleted file mode 100644
index 27fd315..0000000
--- a/drivers/clk/qcom/mdss/mdss-hdmi-pll-8996.c
+++ /dev/null
@@ -1,2675 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8996.h>
-
-#include "mdss-pll.h"
-#include "mdss-hdmi-pll.h"
-
-/* CONSTANTS */
-#define HDMI_BIT_CLK_TO_PIX_CLK_RATIO            10
-#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD         3400000000UL
-#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD          1500000000UL
-#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD          750000000
-#define HDMI_CLKS_PLL_DIVSEL                     0
-#define HDMI_CORECLK_DIV                         5
-#define HDMI_REF_CLOCK                           19200000
-#define HDMI_64B_ERR_VAL                         0xFFFFFFFFFFFFFFFF
-#define HDMI_VERSION_8996_V1                     1
-#define HDMI_VERSION_8996_V2                     2
-#define HDMI_VERSION_8996_V3                     3
-#define HDMI_VERSION_8996_V3_1_8                 4
-
-#define HDMI_VCO_MAX_FREQ                        12000000000
-#define HDMI_VCO_MIN_FREQ                        8000000000
-#define HDMI_2400MHZ_BIT_CLK_HZ                  2400000000UL
-#define HDMI_2250MHZ_BIT_CLK_HZ                  2250000000UL
-#define HDMI_2000MHZ_BIT_CLK_HZ                  2000000000UL
-#define HDMI_1700MHZ_BIT_CLK_HZ                  1700000000UL
-#define HDMI_1200MHZ_BIT_CLK_HZ                  1200000000UL
-#define HDMI_1334MHZ_BIT_CLK_HZ                  1334000000UL
-#define HDMI_1000MHZ_BIT_CLK_HZ                  1000000000UL
-#define HDMI_850MHZ_BIT_CLK_HZ                   850000000
-#define HDMI_667MHZ_BIT_CLK_HZ                   667000000
-#define HDMI_600MHZ_BIT_CLK_HZ                   600000000
-#define HDMI_500MHZ_BIT_CLK_HZ                   500000000
-#define HDMI_450MHZ_BIT_CLK_HZ                   450000000
-#define HDMI_334MHZ_BIT_CLK_HZ                   334000000
-#define HDMI_300MHZ_BIT_CLK_HZ                   300000000
-#define HDMI_282MHZ_BIT_CLK_HZ                   282000000
-#define HDMI_250MHZ_BIT_CLK_HZ                   250000000
-#define HDMI_KHZ_TO_HZ                           1000
-
-/* PLL REGISTERS */
-#define QSERDES_COM_ATB_SEL1                     (0x000)
-#define QSERDES_COM_ATB_SEL2                     (0x004)
-#define QSERDES_COM_FREQ_UPDATE                  (0x008)
-#define QSERDES_COM_BG_TIMER                     (0x00C)
-#define QSERDES_COM_SSC_EN_CENTER                (0x010)
-#define QSERDES_COM_SSC_ADJ_PER1                 (0x014)
-#define QSERDES_COM_SSC_ADJ_PER2                 (0x018)
-#define QSERDES_COM_SSC_PER1                     (0x01C)
-#define QSERDES_COM_SSC_PER2                     (0x020)
-#define QSERDES_COM_SSC_STEP_SIZE1               (0x024)
-#define QSERDES_COM_SSC_STEP_SIZE2               (0x028)
-#define QSERDES_COM_POST_DIV                     (0x02C)
-#define QSERDES_COM_POST_DIV_MUX                 (0x030)
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN          (0x034)
-#define QSERDES_COM_CLK_ENABLE1                  (0x038)
-#define QSERDES_COM_SYS_CLK_CTRL                 (0x03C)
-#define QSERDES_COM_SYSCLK_BUF_ENABLE            (0x040)
-#define QSERDES_COM_PLL_EN                       (0x044)
-#define QSERDES_COM_PLL_IVCO                     (0x048)
-#define QSERDES_COM_LOCK_CMP1_MODE0              (0x04C)
-#define QSERDES_COM_LOCK_CMP2_MODE0              (0x050)
-#define QSERDES_COM_LOCK_CMP3_MODE0              (0x054)
-#define QSERDES_COM_LOCK_CMP1_MODE1              (0x058)
-#define QSERDES_COM_LOCK_CMP2_MODE1              (0x05C)
-#define QSERDES_COM_LOCK_CMP3_MODE1              (0x060)
-#define QSERDES_COM_LOCK_CMP1_MODE2              (0x064)
-#define QSERDES_COM_CMN_RSVD0                    (0x064)
-#define QSERDES_COM_LOCK_CMP2_MODE2              (0x068)
-#define QSERDES_COM_EP_CLOCK_DETECT_CTRL         (0x068)
-#define QSERDES_COM_LOCK_CMP3_MODE2              (0x06C)
-#define QSERDES_COM_SYSCLK_DET_COMP_STATUS       (0x06C)
-#define QSERDES_COM_BG_TRIM                      (0x070)
-#define QSERDES_COM_CLK_EP_DIV                   (0x074)
-#define QSERDES_COM_CP_CTRL_MODE0                (0x078)
-#define QSERDES_COM_CP_CTRL_MODE1                (0x07C)
-#define QSERDES_COM_CP_CTRL_MODE2                (0x080)
-#define QSERDES_COM_CMN_RSVD1                    (0x080)
-#define QSERDES_COM_PLL_RCTRL_MODE0              (0x084)
-#define QSERDES_COM_PLL_RCTRL_MODE1              (0x088)
-#define QSERDES_COM_PLL_RCTRL_MODE2              (0x08C)
-#define QSERDES_COM_CMN_RSVD2                    (0x08C)
-#define QSERDES_COM_PLL_CCTRL_MODE0              (0x090)
-#define QSERDES_COM_PLL_CCTRL_MODE1              (0x094)
-#define QSERDES_COM_PLL_CCTRL_MODE2              (0x098)
-#define QSERDES_COM_CMN_RSVD3                    (0x098)
-#define QSERDES_COM_PLL_CNTRL                    (0x09C)
-#define QSERDES_COM_PHASE_SEL_CTRL               (0x0A0)
-#define QSERDES_COM_PHASE_SEL_DC                 (0x0A4)
-#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL         (0x0A8)
-#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM          (0x0A8)
-#define QSERDES_COM_SYSCLK_EN_SEL                (0x0AC)
-#define QSERDES_COM_CML_SYSCLK_SEL               (0x0B0)
-#define QSERDES_COM_RESETSM_CNTRL                (0x0B4)
-#define QSERDES_COM_RESETSM_CNTRL2               (0x0B8)
-#define QSERDES_COM_RESTRIM_CTRL                 (0x0BC)
-#define QSERDES_COM_RESTRIM_CTRL2                (0x0C0)
-#define QSERDES_COM_RESCODE_DIV_NUM              (0x0C4)
-#define QSERDES_COM_LOCK_CMP_EN                  (0x0C8)
-#define QSERDES_COM_LOCK_CMP_CFG                 (0x0CC)
-#define QSERDES_COM_DEC_START_MODE0              (0x0D0)
-#define QSERDES_COM_DEC_START_MODE1              (0x0D4)
-#define QSERDES_COM_DEC_START_MODE2              (0x0D8)
-#define QSERDES_COM_VCOCAL_DEADMAN_CTRL          (0x0D8)
-#define QSERDES_COM_DIV_FRAC_START1_MODE0        (0x0DC)
-#define QSERDES_COM_DIV_FRAC_START2_MODE0        (0x0E0)
-#define QSERDES_COM_DIV_FRAC_START3_MODE0        (0x0E4)
-#define QSERDES_COM_DIV_FRAC_START1_MODE1        (0x0E8)
-#define QSERDES_COM_DIV_FRAC_START2_MODE1        (0x0EC)
-#define QSERDES_COM_DIV_FRAC_START3_MODE1        (0x0F0)
-#define QSERDES_COM_DIV_FRAC_START1_MODE2        (0x0F4)
-#define QSERDES_COM_VCO_TUNE_MINVAL1             (0x0F4)
-#define QSERDES_COM_DIV_FRAC_START2_MODE2        (0x0F8)
-#define QSERDES_COM_VCO_TUNE_MINVAL2             (0x0F8)
-#define QSERDES_COM_DIV_FRAC_START3_MODE2        (0x0FC)
-#define QSERDES_COM_CMN_RSVD4                    (0x0FC)
-#define QSERDES_COM_INTEGLOOP_INITVAL            (0x100)
-#define QSERDES_COM_INTEGLOOP_EN                 (0x104)
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0        (0x108)
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0        (0x10C)
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1        (0x110)
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1        (0x114)
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE2        (0x118)
-#define QSERDES_COM_VCO_TUNE_MAXVAL1             (0x118)
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE2        (0x11C)
-#define QSERDES_COM_VCO_TUNE_MAXVAL2             (0x11C)
-#define QSERDES_COM_RES_TRIM_CONTROL2            (0x120)
-#define QSERDES_COM_VCO_TUNE_CTRL                (0x124)
-#define QSERDES_COM_VCO_TUNE_MAP                 (0x128)
-#define QSERDES_COM_VCO_TUNE1_MODE0              (0x12C)
-#define QSERDES_COM_VCO_TUNE2_MODE0              (0x130)
-#define QSERDES_COM_VCO_TUNE1_MODE1              (0x134)
-#define QSERDES_COM_VCO_TUNE2_MODE1              (0x138)
-#define QSERDES_COM_VCO_TUNE1_MODE2              (0x13C)
-#define QSERDES_COM_VCO_TUNE_INITVAL1            (0x13C)
-#define QSERDES_COM_VCO_TUNE2_MODE2              (0x140)
-#define QSERDES_COM_VCO_TUNE_INITVAL2            (0x140)
-#define QSERDES_COM_VCO_TUNE_TIMER1              (0x144)
-#define QSERDES_COM_VCO_TUNE_TIMER2              (0x148)
-#define QSERDES_COM_SAR                          (0x14C)
-#define QSERDES_COM_SAR_CLK                      (0x150)
-#define QSERDES_COM_SAR_CODE_OUT_STATUS          (0x154)
-#define QSERDES_COM_SAR_CODE_READY_STATUS        (0x158)
-#define QSERDES_COM_CMN_STATUS                   (0x15C)
-#define QSERDES_COM_RESET_SM_STATUS              (0x160)
-#define QSERDES_COM_RESTRIM_CODE_STATUS          (0x164)
-#define QSERDES_COM_PLLCAL_CODE1_STATUS          (0x168)
-#define QSERDES_COM_PLLCAL_CODE2_STATUS          (0x16C)
-#define QSERDES_COM_BG_CTRL                      (0x170)
-#define QSERDES_COM_CLK_SELECT                   (0x174)
-#define QSERDES_COM_HSCLK_SEL                    (0x178)
-#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS     (0x17C)
-#define QSERDES_COM_PLL_ANALOG                   (0x180)
-#define QSERDES_COM_CORECLK_DIV                  (0x184)
-#define QSERDES_COM_SW_RESET                     (0x188)
-#define QSERDES_COM_CORE_CLK_EN                  (0x18C)
-#define QSERDES_COM_C_READY_STATUS               (0x190)
-#define QSERDES_COM_CMN_CONFIG                   (0x194)
-#define QSERDES_COM_CMN_RATE_OVERRIDE            (0x198)
-#define QSERDES_COM_SVS_MODE_CLK_SEL             (0x19C)
-#define QSERDES_COM_DEBUG_BUS0                   (0x1A0)
-#define QSERDES_COM_DEBUG_BUS1                   (0x1A4)
-#define QSERDES_COM_DEBUG_BUS2                   (0x1A8)
-#define QSERDES_COM_DEBUG_BUS3                   (0x1AC)
-#define QSERDES_COM_DEBUG_BUS_SEL                (0x1B0)
-#define QSERDES_COM_CMN_MISC1                    (0x1B4)
-#define QSERDES_COM_CMN_MISC2                    (0x1B8)
-#define QSERDES_COM_CORECLK_DIV_MODE1            (0x1BC)
-#define QSERDES_COM_CORECLK_DIV_MODE2            (0x1C0)
-#define QSERDES_COM_CMN_RSVD5                    (0x1C0)
-
-/* Tx Channel base addresses */
-#define HDMI_TX_L0_BASE_OFFSET                   (0x400)
-#define HDMI_TX_L1_BASE_OFFSET                   (0x600)
-#define HDMI_TX_L2_BASE_OFFSET                   (0x800)
-#define HDMI_TX_L3_BASE_OFFSET                   (0xA00)
-
-/* Tx Channel PHY registers */
-#define QSERDES_TX_L0_BIST_MODE_LANENO                    (0x000)
-#define QSERDES_TX_L0_BIST_INVERT                         (0x004)
-#define QSERDES_TX_L0_CLKBUF_ENABLE                       (0x008)
-#define QSERDES_TX_L0_CMN_CONTROL_ONE                     (0x00C)
-#define QSERDES_TX_L0_CMN_CONTROL_TWO                     (0x010)
-#define QSERDES_TX_L0_CMN_CONTROL_THREE                   (0x014)
-#define QSERDES_TX_L0_TX_EMP_POST1_LVL                    (0x018)
-#define QSERDES_TX_L0_TX_POST2_EMPH                       (0x01C)
-#define QSERDES_TX_L0_TX_BOOST_LVL_UP_DN                  (0x020)
-#define QSERDES_TX_L0_HP_PD_ENABLES                       (0x024)
-#define QSERDES_TX_L0_TX_IDLE_LVL_LARGE_AMP               (0x028)
-#define QSERDES_TX_L0_TX_DRV_LVL                          (0x02C)
-#define QSERDES_TX_L0_TX_DRV_LVL_OFFSET                   (0x030)
-#define QSERDES_TX_L0_RESET_TSYNC_EN                      (0x034)
-#define QSERDES_TX_L0_PRE_STALL_LDO_BOOST_EN              (0x038)
-#define QSERDES_TX_L0_TX_BAND                             (0x03C)
-#define QSERDES_TX_L0_SLEW_CNTL                           (0x040)
-#define QSERDES_TX_L0_INTERFACE_SELECT                    (0x044)
-#define QSERDES_TX_L0_LPB_EN                              (0x048)
-#define QSERDES_TX_L0_RES_CODE_LANE_TX                    (0x04C)
-#define QSERDES_TX_L0_RES_CODE_LANE_RX                    (0x050)
-#define QSERDES_TX_L0_RES_CODE_LANE_OFFSET                (0x054)
-#define QSERDES_TX_L0_PERL_LENGTH1                        (0x058)
-#define QSERDES_TX_L0_PERL_LENGTH2                        (0x05C)
-#define QSERDES_TX_L0_SERDES_BYP_EN_OUT                   (0x060)
-#define QSERDES_TX_L0_DEBUG_BUS_SEL                       (0x064)
-#define QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN    (0x068)
-#define QSERDES_TX_L0_TX_POL_INV                          (0x06C)
-#define QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN          (0x070)
-#define QSERDES_TX_L0_BIST_PATTERN1                       (0x074)
-#define QSERDES_TX_L0_BIST_PATTERN2                       (0x078)
-#define QSERDES_TX_L0_BIST_PATTERN3                       (0x07C)
-#define QSERDES_TX_L0_BIST_PATTERN4                       (0x080)
-#define QSERDES_TX_L0_BIST_PATTERN5                       (0x084)
-#define QSERDES_TX_L0_BIST_PATTERN6                       (0x088)
-#define QSERDES_TX_L0_BIST_PATTERN7                       (0x08C)
-#define QSERDES_TX_L0_BIST_PATTERN8                       (0x090)
-#define QSERDES_TX_L0_LANE_MODE                           (0x094)
-#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE                  (0x098)
-#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE_CONFIGURATION    (0x09C)
-#define QSERDES_TX_L0_ATB_SEL1                            (0x0A0)
-#define QSERDES_TX_L0_ATB_SEL2                            (0x0A4)
-#define QSERDES_TX_L0_RCV_DETECT_LVL                      (0x0A8)
-#define QSERDES_TX_L0_RCV_DETECT_LVL_2                    (0x0AC)
-#define QSERDES_TX_L0_PRBS_SEED1                          (0x0B0)
-#define QSERDES_TX_L0_PRBS_SEED2                          (0x0B4)
-#define QSERDES_TX_L0_PRBS_SEED3                          (0x0B8)
-#define QSERDES_TX_L0_PRBS_SEED4                          (0x0BC)
-#define QSERDES_TX_L0_RESET_GEN                           (0x0C0)
-#define QSERDES_TX_L0_RESET_GEN_MUXES                     (0x0C4)
-#define QSERDES_TX_L0_TRAN_DRVR_EMP_EN                    (0x0C8)
-#define QSERDES_TX_L0_TX_INTERFACE_MODE                   (0x0CC)
-#define QSERDES_TX_L0_PWM_CTRL                            (0x0D0)
-#define QSERDES_TX_L0_PWM_ENCODED_OR_DATA                 (0x0D4)
-#define QSERDES_TX_L0_PWM_GEAR_1_DIVIDER_BAND2            (0x0D8)
-#define QSERDES_TX_L0_PWM_GEAR_2_DIVIDER_BAND2            (0x0DC)
-#define QSERDES_TX_L0_PWM_GEAR_3_DIVIDER_BAND2            (0x0E0)
-#define QSERDES_TX_L0_PWM_GEAR_4_DIVIDER_BAND2            (0x0E4)
-#define QSERDES_TX_L0_PWM_GEAR_1_DIVIDER_BAND0_1          (0x0E8)
-#define QSERDES_TX_L0_PWM_GEAR_2_DIVIDER_BAND0_1          (0x0EC)
-#define QSERDES_TX_L0_PWM_GEAR_3_DIVIDER_BAND0_1          (0x0F0)
-#define QSERDES_TX_L0_PWM_GEAR_4_DIVIDER_BAND0_1          (0x0F4)
-#define QSERDES_TX_L0_VMODE_CTRL1                         (0x0F8)
-#define QSERDES_TX_L0_VMODE_CTRL2                         (0x0FC)
-#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV_CNTL              (0x100)
-#define QSERDES_TX_L0_BIST_STATUS                         (0x104)
-#define QSERDES_TX_L0_BIST_ERROR_COUNT1                   (0x108)
-#define QSERDES_TX_L0_BIST_ERROR_COUNT2                   (0x10C)
-#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV                   (0x110)
-
-/* HDMI PHY REGISTERS */
-#define HDMI_PHY_BASE_OFFSET                  (0xC00)
-
-#define HDMI_PHY_CFG                          (0x00)
-#define HDMI_PHY_PD_CTL                       (0x04)
-#define HDMI_PHY_MODE                         (0x08)
-#define HDMI_PHY_MISR_CLEAR                   (0x0C)
-#define HDMI_PHY_TX0_TX1_BIST_CFG0            (0x10)
-#define HDMI_PHY_TX0_TX1_BIST_CFG1            (0x14)
-#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE0      (0x18)
-#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE1      (0x1C)
-#define HDMI_PHY_TX0_TX1_BIST_PATTERN0        (0x20)
-#define HDMI_PHY_TX0_TX1_BIST_PATTERN1        (0x24)
-#define HDMI_PHY_TX2_TX3_BIST_CFG0            (0x28)
-#define HDMI_PHY_TX2_TX3_BIST_CFG1            (0x2C)
-#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE0      (0x30)
-#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE1      (0x34)
-#define HDMI_PHY_TX2_TX3_BIST_PATTERN0        (0x38)
-#define HDMI_PHY_TX2_TX3_BIST_PATTERN1        (0x3C)
-#define HDMI_PHY_DEBUG_BUS_SEL                (0x40)
-#define HDMI_PHY_TXCAL_CFG0                   (0x44)
-#define HDMI_PHY_TXCAL_CFG1                   (0x48)
-#define HDMI_PHY_TX0_TX1_LANE_CTL             (0x4C)
-#define HDMI_PHY_TX2_TX3_LANE_CTL             (0x50)
-#define HDMI_PHY_LANE_BIST_CONFIG             (0x54)
-#define HDMI_PHY_CLOCK                        (0x58)
-#define HDMI_PHY_MISC1                        (0x5C)
-#define HDMI_PHY_MISC2                        (0x60)
-#define HDMI_PHY_TX0_TX1_BIST_STATUS0         (0x64)
-#define HDMI_PHY_TX0_TX1_BIST_STATUS1         (0x68)
-#define HDMI_PHY_TX0_TX1_BIST_STATUS2         (0x6C)
-#define HDMI_PHY_TX2_TX3_BIST_STATUS0         (0x70)
-#define HDMI_PHY_TX2_TX3_BIST_STATUS1         (0x74)
-#define HDMI_PHY_TX2_TX3_BIST_STATUS2         (0x78)
-#define HDMI_PHY_PRE_MISR_STATUS0             (0x7C)
-#define HDMI_PHY_PRE_MISR_STATUS1             (0x80)
-#define HDMI_PHY_PRE_MISR_STATUS2             (0x84)
-#define HDMI_PHY_PRE_MISR_STATUS3             (0x88)
-#define HDMI_PHY_POST_MISR_STATUS0            (0x8C)
-#define HDMI_PHY_POST_MISR_STATUS1            (0x90)
-#define HDMI_PHY_POST_MISR_STATUS2            (0x94)
-#define HDMI_PHY_POST_MISR_STATUS3            (0x98)
-#define HDMI_PHY_STATUS                       (0x9C)
-#define HDMI_PHY_MISC3_STATUS                 (0xA0)
-#define HDMI_PHY_MISC4_STATUS                 (0xA4)
-#define HDMI_PHY_DEBUG_BUS0                   (0xA8)
-#define HDMI_PHY_DEBUG_BUS1                   (0xAC)
-#define HDMI_PHY_DEBUG_BUS2                   (0xB0)
-#define HDMI_PHY_DEBUG_BUS3                   (0xB4)
-#define HDMI_PHY_PHY_REVISION_ID0             (0xB8)
-#define HDMI_PHY_PHY_REVISION_ID1             (0xBC)
-#define HDMI_PHY_PHY_REVISION_ID2             (0xC0)
-#define HDMI_PHY_PHY_REVISION_ID3             (0xC4)
-
-#define HDMI_PLL_POLL_MAX_READS                100
-#define HDMI_PLL_POLL_TIMEOUT_US               1500
-
-enum hdmi_pll_freqs {
-	HDMI_PCLK_25200_KHZ,
-	HDMI_PCLK_27027_KHZ,
-	HDMI_PCLK_27000_KHZ,
-	HDMI_PCLK_74250_KHZ,
-	HDMI_PCLK_148500_KHZ,
-	HDMI_PCLK_154000_KHZ,
-	HDMI_PCLK_268500_KHZ,
-	HDMI_PCLK_297000_KHZ,
-	HDMI_PCLK_594000_KHZ,
-	HDMI_PCLK_MAX
-};
-
-struct hdmi_8996_phy_pll_reg_cfg {
-	u32 tx_l0_lane_mode;
-	u32 tx_l2_lane_mode;
-	u32 tx_l0_tx_band;
-	u32 tx_l1_tx_band;
-	u32 tx_l2_tx_band;
-	u32 tx_l3_tx_band;
-	u32 com_svs_mode_clk_sel;
-	u32 com_hsclk_sel;
-	u32 com_pll_cctrl_mode0;
-	u32 com_pll_rctrl_mode0;
-	u32 com_cp_ctrl_mode0;
-	u32 com_dec_start_mode0;
-	u32 com_div_frac_start1_mode0;
-	u32 com_div_frac_start2_mode0;
-	u32 com_div_frac_start3_mode0;
-	u32 com_integloop_gain0_mode0;
-	u32 com_integloop_gain1_mode0;
-	u32 com_lock_cmp_en;
-	u32 com_lock_cmp1_mode0;
-	u32 com_lock_cmp2_mode0;
-	u32 com_lock_cmp3_mode0;
-	u32 com_core_clk_en;
-	u32 com_coreclk_div;
-	u32 com_restrim_ctrl;
-	u32 com_vco_tune_ctrl;
-
-	u32 tx_l0_tx_drv_lvl;
-	u32 tx_l0_tx_emp_post1_lvl;
-	u32 tx_l1_tx_drv_lvl;
-	u32 tx_l1_tx_emp_post1_lvl;
-	u32 tx_l2_tx_drv_lvl;
-	u32 tx_l2_tx_emp_post1_lvl;
-	u32 tx_l3_tx_drv_lvl;
-	u32 tx_l3_tx_emp_post1_lvl;
-	u32 tx_l0_vmode_ctrl1;
-	u32 tx_l0_vmode_ctrl2;
-	u32 tx_l1_vmode_ctrl1;
-	u32 tx_l1_vmode_ctrl2;
-	u32 tx_l2_vmode_ctrl1;
-	u32 tx_l2_vmode_ctrl2;
-	u32 tx_l3_vmode_ctrl1;
-	u32 tx_l3_vmode_ctrl2;
-	u32 tx_l0_res_code_lane_tx;
-	u32 tx_l1_res_code_lane_tx;
-	u32 tx_l2_res_code_lane_tx;
-	u32 tx_l3_res_code_lane_tx;
-
-	u32 phy_mode;
-};
-
-struct hdmi_8996_v3_post_divider {
-	u64 vco_freq;
-	u64 hsclk_divsel;
-	u64 vco_ratio;
-	u64 tx_band_sel;
-	u64 half_rate_mode;
-};
-
-static inline struct hdmi_pll_vco_clk *to_hdmi_8996_vco_clk(struct clk *clk)
-{
-	return container_of(clk, struct hdmi_pll_vco_clk, c);
-}
-
-static inline u64 hdmi_8996_v1_get_post_div_lt_2g(u64 bclk)
-{
-	if (bclk >= HDMI_2400MHZ_BIT_CLK_HZ)
-		return 2;
-	else if (bclk >= HDMI_1700MHZ_BIT_CLK_HZ)
-		return 3;
-	else if (bclk >= HDMI_1200MHZ_BIT_CLK_HZ)
-		return 4;
-	else if (bclk >= HDMI_850MHZ_BIT_CLK_HZ)
-		return 3;
-	else if (bclk >= HDMI_600MHZ_BIT_CLK_HZ)
-		return 4;
-	else if (bclk >= HDMI_450MHZ_BIT_CLK_HZ)
-		return 3;
-	else if (bclk >= HDMI_300MHZ_BIT_CLK_HZ)
-		return 4;
-
-	return HDMI_64B_ERR_VAL;
-}
-
-static inline u64 hdmi_8996_v2_get_post_div_lt_2g(u64 bclk, u64 vco_range)
-{
-	u64 hdmi_8ghz = vco_range;
-	u64 tmp_calc;
-
-	hdmi_8ghz <<= 2;
-	tmp_calc = hdmi_8ghz;
-	do_div(tmp_calc, 6U);
-
-	if (bclk >= vco_range)
-		return 2;
-	else if (bclk >= tmp_calc)
-		return 3;
-	else if (bclk >= vco_range >> 1)
-		return 4;
-
-	tmp_calc = hdmi_8ghz;
-	do_div(tmp_calc, 12U);
-	if (bclk >= tmp_calc)
-		return 3;
-	else if (bclk >= vco_range >> 2)
-		return 4;
-
-	tmp_calc = hdmi_8ghz;
-	do_div(tmp_calc, 24U);
-	if (bclk >= tmp_calc)
-		return 3;
-	else if (bclk >= vco_range >> 3)
-		return 4;
-
-	return HDMI_64B_ERR_VAL;
-}
-
-static inline u64 hdmi_8996_v2_get_post_div_gt_2g(u64 hsclk)
-{
-	if (hsclk >= 0 && hsclk <= 3)
-		return hsclk + 1;
-
-	return HDMI_64B_ERR_VAL;
-}
-
-static inline u64 hdmi_8996_get_coreclk_div_lt_2g(u64 bclk)
-{
-	if (bclk >= HDMI_1334MHZ_BIT_CLK_HZ)
-		return 1;
-	else if (bclk >= HDMI_1000MHZ_BIT_CLK_HZ)
-		return 1;
-	else if (bclk >= HDMI_667MHZ_BIT_CLK_HZ)
-		return 2;
-	else if (bclk >= HDMI_500MHZ_BIT_CLK_HZ)
-		return 2;
-	else if (bclk >= HDMI_334MHZ_BIT_CLK_HZ)
-		return 3;
-	else if (bclk >= HDMI_250MHZ_BIT_CLK_HZ)
-		return 3;
-
-	return HDMI_64B_ERR_VAL;
-}
-
-static inline u64 hdmi_8996_get_coreclk_div_ratio(u64 clks_pll_divsel,
-						  u64 coreclk_div)
-{
-	if (clks_pll_divsel == 0)
-		return coreclk_div*2;
-	else if (clks_pll_divsel == 1)
-		return coreclk_div*4;
-
-	return HDMI_64B_ERR_VAL;
-}
-
-static inline u64 hdmi_8996_v1_get_tx_band(u64 bclk)
-{
-	if (bclk >= 2400000000UL)
-		return 0;
-	if (bclk >= 1200000000UL)
-		return 1;
-	if (bclk >= 600000000UL)
-		return 2;
-	if (bclk >= 300000000UL)
-		return 3;
-
-	return HDMI_64B_ERR_VAL;
-}
-
-static inline u64 hdmi_8996_v2_get_tx_band(u64 bclk, u64 vco_range)
-{
-	if (bclk >= vco_range)
-		return 0;
-	else if (bclk >= vco_range >> 1)
-		return 1;
-	else if (bclk >= vco_range >> 2)
-		return 2;
-	else if (bclk >= vco_range >> 3)
-		return 3;
-
-	return HDMI_64B_ERR_VAL;
-}
-
-static inline u64 hdmi_8996_v1_get_hsclk(u64 fdata)
-{
-	if (fdata >= 9600000000UL)
-		return 0;
-	else if (fdata >= 4800000000UL)
-		return 1;
-	else if (fdata >= 3200000000UL)
-		return 2;
-	else if (fdata >= 2400000000UL)
-		return 3;
-
-	return HDMI_64B_ERR_VAL;
-}
-
-static inline u64 hdmi_8996_v2_get_hsclk(u64 fdata, u64 vco_range)
-{
-	u64 tmp_calc = vco_range;
-
-	tmp_calc <<= 2;
-	do_div(tmp_calc, 3U);
-	if (fdata >= (vco_range << 2))
-		return 0;
-	else if (fdata >= (vco_range << 1))
-		return 1;
-	else if (fdata >= tmp_calc)
-		return 2;
-	else if (fdata >= vco_range)
-		return 3;
-
-	return HDMI_64B_ERR_VAL;
-
-}
-
-static inline u64 hdmi_8996_v2_get_vco_freq(u64 bclk, u64 vco_range)
-{
-	u64 tx_band_div_ratio = 1U << hdmi_8996_v2_get_tx_band(bclk, vco_range);
-	u64 pll_post_div_ratio;
-
-	if (bclk >= vco_range) {
-		u64 hsclk = hdmi_8996_v2_get_hsclk(bclk, vco_range);
-
-		pll_post_div_ratio = hdmi_8996_v2_get_post_div_gt_2g(hsclk);
-	} else {
-		pll_post_div_ratio = hdmi_8996_v2_get_post_div_lt_2g(bclk,
-								vco_range);
-	}
-
-	return bclk * (pll_post_div_ratio * tx_band_div_ratio);
-}
-
-static inline u64 hdmi_8996_v2_get_fdata(u64 bclk, u64 vco_range)
-{
-	if (bclk >= vco_range)
-		return bclk;
-	u64 tmp_calc = hdmi_8996_v2_get_vco_freq(bclk, vco_range);
-	u64 pll_post_div_ratio_lt_2g = hdmi_8996_v2_get_post_div_lt_2g(
-						bclk, vco_range);
-	if (pll_post_div_ratio_lt_2g == HDMI_64B_ERR_VAL)
-		return HDMI_64B_ERR_VAL;
-
-	do_div(tmp_calc, pll_post_div_ratio_lt_2g);
-		return tmp_calc;
-}
-
-static inline u64 hdmi_8996_get_cpctrl(u64 frac_start, bool gen_ssc)
-{
-	if ((frac_start != 0) || gen_ssc)
-		/*
-		 * This should be ROUND(11/(19.2/20))).
-		 * Since ref clock does not change, hardcoding to 11
-		 */
-		return 0xB;
-
-	return 0x23;
-}
-
-static inline u64 hdmi_8996_get_rctrl(u64 frac_start, bool gen_ssc)
-{
-	if ((frac_start != 0) || gen_ssc)
-		return 0x16;
-
-	return 0x10;
-}
-
-static inline u64 hdmi_8996_get_cctrl(u64 frac_start, bool gen_ssc)
-{
-	if ((frac_start != 0) || (gen_ssc))
-		return 0x28;
-
-	return 0x1;
-}
-
-static inline u64 hdmi_8996_get_integloop_gain(u64 frac_start, bool gen_ssc)
-{
-	if ((frac_start != 0) || gen_ssc)
-		return 0x80;
-
-	return 0xC4;
-}
-
-static inline u64 hdmi_8996_v3_get_integloop_gain(u64 frac_start, u64 bclk,
-							bool gen_ssc)
-{
-	u64 digclk_divsel = bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2;
-	u64 base = ((frac_start != 0) || gen_ssc) ? 0x40 : 0xC4;
-
-	base <<= digclk_divsel;
-
-	return (base <= 2046 ? base : 0x7FE);
-}
-
-static inline u64 hdmi_8996_get_vco_tune(u64 fdata, u64 div)
-{
-	u64 vco_tune;
-
-	vco_tune = fdata * div;
-	do_div(vco_tune, 1000000);
-	vco_tune = 13000 - vco_tune - 256;
-	do_div(vco_tune, 5);
-
-	return vco_tune;
-}
-
-static inline u64 hdmi_8996_get_pll_cmp(u64 pll_cmp_cnt, u64 core_clk)
-{
-	u64 pll_cmp;
-	u64 rem;
-
-	pll_cmp = pll_cmp_cnt * core_clk;
-	rem = do_div(pll_cmp, HDMI_REF_CLOCK);
-	if (rem > (HDMI_REF_CLOCK >> 1))
-		pll_cmp++;
-	pll_cmp -= 1;
-
-	return pll_cmp;
-}
-
-static inline u64 hdmi_8996_v3_get_pll_cmp(u64 pll_cmp_cnt, u64 fdata)
-{
-	u64 dividend = pll_cmp_cnt * fdata;
-	u64 divisor = HDMI_REF_CLOCK * 10;
-	u64 rem;
-
-	rem = do_div(dividend, divisor);
-	if (rem > (divisor >> 1))
-		dividend++;
-
-	return dividend - 1;
-}
-
-static int hdmi_8996_v3_get_post_div(struct hdmi_8996_v3_post_divider *pd,
-						u64 bclk)
-{
-	u32 ratio[] = {2, 3, 4, 5, 6, 9, 10, 12, 14, 15, 20, 21, 25, 28, 35};
-	u32 tx_band_sel[] = {0, 1, 2, 3};
-	u64 vco_freq[60];
-	u64 vco, vco_optimal, half_rate_mode = 0;
-	int vco_optimal_index, vco_freq_index;
-	int i, j, k, x;
-
-	for (i = 0; i <= 1; i++) {
-		vco_optimal = HDMI_VCO_MAX_FREQ;
-		vco_optimal_index = -1;
-		vco_freq_index = 0;
-		for (j = 0; j < 15; j++) {
-			for (k = 0; k < 4; k++) {
-				u64 ratio_mult = ratio[j] << tx_band_sel[k];
-
-				vco = bclk >> half_rate_mode;
-				vco *= ratio_mult;
-				vco_freq[vco_freq_index++] = vco;
-			}
-		}
-
-		for (x = 0; x < 60; x++) {
-			u64 vco_tmp = vco_freq[x];
-
-			if ((vco_tmp >= HDMI_VCO_MIN_FREQ) &&
-					(vco_tmp <= vco_optimal)) {
-				vco_optimal = vco_tmp;
-				vco_optimal_index = x;
-			}
-		}
-
-		if (vco_optimal_index == -1) {
-			if (!half_rate_mode)
-				half_rate_mode++;
-			else
-				return -EINVAL;
-		} else {
-			pd->vco_freq = vco_optimal;
-			pd->tx_band_sel = tx_band_sel[vco_optimal_index % 4];
-			pd->vco_ratio = ratio[vco_optimal_index / 4];
-			break;
-		}
-	}
-
-	switch (pd->vco_ratio) {
-	case 2:
-		pd->hsclk_divsel = 0;
-		break;
-	case 3:
-		pd->hsclk_divsel = 4;
-		break;
-	case 4:
-		pd->hsclk_divsel = 8;
-		break;
-	case 5:
-		pd->hsclk_divsel = 12;
-		break;
-	case 6:
-		pd->hsclk_divsel = 1;
-		break;
-	case 9:
-		pd->hsclk_divsel = 5;
-		break;
-	case 10:
-		pd->hsclk_divsel = 2;
-		break;
-	case 12:
-		pd->hsclk_divsel = 9;
-		break;
-	case 14:
-		pd->hsclk_divsel = 3;
-		break;
-	case 15:
-		pd->hsclk_divsel = 13;
-		break;
-	case 20:
-		pd->hsclk_divsel = 10;
-		break;
-	case 21:
-		pd->hsclk_divsel = 7;
-		break;
-	case 25:
-		pd->hsclk_divsel = 14;
-		break;
-	case 28:
-		pd->hsclk_divsel = 11;
-		break;
-	case 35:
-		pd->hsclk_divsel = 15;
-		break;
-	}
-
-	return 0;
-}
-
-static int hdmi_8996_v1_calculate(u32 pix_clk,
-			       struct hdmi_8996_phy_pll_reg_cfg *cfg)
-{
-	int rc = -EINVAL;
-	u64 fdata, clk_divtx, tmds_clk;
-	u64 bclk;
-	u64 post_div_gt_2g;
-	u64 post_div_lt_2g;
-	u64 coreclk_div1_lt_2g;
-	u64 core_clk_div_ratio;
-	u64 core_clk;
-	u64 pll_cmp;
-	u64 tx_band;
-	u64 tx_band_div_ratio;
-	u64 hsclk;
-	u64 dec_start;
-	u64 frac_start;
-	u64 pll_divisor = 4 * HDMI_REF_CLOCK;
-	u64 cpctrl;
-	u64 rctrl;
-	u64 cctrl;
-	u64 integloop_gain;
-	u64 vco_tune;
-	u64 vco_freq;
-	u64 rem;
-
-	/* FDATA, CLK_DIVTX, PIXEL_CLK, TMDS_CLK */
-	bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
-
-	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
-		tmds_clk = bclk/4;
-	else
-		tmds_clk = bclk;
-
-	post_div_lt_2g = hdmi_8996_v1_get_post_div_lt_2g(bclk);
-	if (post_div_lt_2g == HDMI_64B_ERR_VAL)
-		goto fail;
-
-	coreclk_div1_lt_2g = hdmi_8996_get_coreclk_div_lt_2g(bclk);
-
-	core_clk_div_ratio = hdmi_8996_get_coreclk_div_ratio(
-				HDMI_CLKS_PLL_DIVSEL, HDMI_CORECLK_DIV);
-
-	tx_band = hdmi_8996_v1_get_tx_band(bclk);
-	if (tx_band == HDMI_64B_ERR_VAL)
-		goto fail;
-
-	tx_band_div_ratio = 1 << tx_band;
-
-	if (bclk >= HDMI_2400MHZ_BIT_CLK_HZ) {
-		fdata = bclk;
-		hsclk = hdmi_8996_v1_get_hsclk(fdata);
-		if (hsclk == HDMI_64B_ERR_VAL)
-			goto fail;
-
-		post_div_gt_2g = (hsclk <= 3) ? (hsclk + 1) : HDMI_64B_ERR_VAL;
-		if (post_div_gt_2g == HDMI_64B_ERR_VAL)
-			goto fail;
-
-		vco_freq = bclk * (post_div_gt_2g * tx_band_div_ratio);
-		clk_divtx = vco_freq;
-		do_div(clk_divtx, post_div_gt_2g);
-	} else {
-		vco_freq = bclk * (post_div_lt_2g * tx_band_div_ratio);
-		fdata = vco_freq;
-		do_div(fdata, post_div_lt_2g);
-		hsclk = hdmi_8996_v1_get_hsclk(fdata);
-		if (hsclk == HDMI_64B_ERR_VAL)
-			goto fail;
-
-		clk_divtx = vco_freq;
-		do_div(clk_divtx, post_div_lt_2g);
-		post_div_gt_2g = (hsclk <= 3) ? (hsclk + 1) : HDMI_64B_ERR_VAL;
-		if (post_div_gt_2g == HDMI_64B_ERR_VAL)
-			goto fail;
-	}
-
-	/* Decimal and fraction values */
-	dec_start = fdata * post_div_gt_2g;
-	do_div(dec_start, pll_divisor);
-	frac_start = ((pll_divisor - (((dec_start + 1) * pll_divisor) -
-			(fdata * post_div_gt_2g))) * (1 << 20));
-	rem = do_div(frac_start, pll_divisor);
-	/* Round off frac_start to closest integer */
-	if (rem >= (pll_divisor >> 1))
-		frac_start++;
-
-	cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
-	rctrl = hdmi_8996_get_rctrl(frac_start, false);
-	cctrl = hdmi_8996_get_cctrl(frac_start, false);
-	integloop_gain = hdmi_8996_get_integloop_gain(frac_start, false);
-	vco_tune = hdmi_8996_get_vco_tune(fdata, post_div_gt_2g);
-
-	core_clk = clk_divtx;
-	do_div(core_clk, core_clk_div_ratio);
-	pll_cmp = hdmi_8996_get_pll_cmp(1024, core_clk);
-
-	/* Debug dump */
-	DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
-	DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
-	DEV_DBG("%s: CLK_DIVTX: %llu\n", __func__, clk_divtx);
-	DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
-	DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
-	DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
-	DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
-	DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
-	DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
-	DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
-	DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
-	DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
-	DEV_DBG("%s: VCO_TUNE: %llu\n", __func__, vco_tune);
-	DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
-	DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
-
-	/* Convert these values to register specific values */
-	cfg->tx_l0_lane_mode = 0x3;
-	cfg->tx_l2_lane_mode = 0x3;
-	cfg->tx_l0_tx_band = tx_band + 4;
-	cfg->tx_l1_tx_band = tx_band + 4;
-	cfg->tx_l2_tx_band = tx_band + 4;
-	cfg->tx_l3_tx_band = tx_band + 4;
-	cfg->tx_l0_res_code_lane_tx = 0x33;
-	cfg->tx_l1_res_code_lane_tx = 0x33;
-	cfg->tx_l2_res_code_lane_tx = 0x33;
-	cfg->tx_l3_res_code_lane_tx = 0x33;
-	cfg->com_restrim_ctrl = 0x0;
-	cfg->com_vco_tune_ctrl = 0x1C;
-
-	cfg->com_svs_mode_clk_sel =
-			(bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2);
-	cfg->com_hsclk_sel = (0x28 | hsclk);
-	cfg->com_pll_cctrl_mode0 = cctrl;
-	cfg->com_pll_rctrl_mode0 = rctrl;
-	cfg->com_cp_ctrl_mode0 = cpctrl;
-	cfg->com_dec_start_mode0 = dec_start;
-	cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
-	cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
-	cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
-	cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
-	cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
-	cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
-	cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
-	cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
-	cfg->com_core_clk_en = (0x6C | (HDMI_CLKS_PLL_DIVSEL << 4));
-	cfg->com_coreclk_div = HDMI_CORECLK_DIV;
-
-	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
-		cfg->tx_l0_tx_drv_lvl = 0x25;
-		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l1_tx_drv_lvl = 0x25;
-		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l2_tx_drv_lvl = 0x25;
-		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l3_tx_drv_lvl = 0x22;
-		cfg->tx_l3_tx_emp_post1_lvl = 0x27;
-		cfg->tx_l0_vmode_ctrl1 = 0x00;
-		cfg->tx_l0_vmode_ctrl2 = 0x0D;
-		cfg->tx_l1_vmode_ctrl1 = 0x00;
-		cfg->tx_l1_vmode_ctrl2 = 0x0D;
-		cfg->tx_l2_vmode_ctrl1 = 0x00;
-		cfg->tx_l2_vmode_ctrl2 = 0x0D;
-		cfg->tx_l3_vmode_ctrl1 = 0x00;
-		cfg->tx_l3_vmode_ctrl2 = 0x00;
-		cfg->com_restrim_ctrl = 0x0;
-	} else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
-		cfg->tx_l0_tx_drv_lvl = 0x25;
-		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l1_tx_drv_lvl = 0x25;
-		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l2_tx_drv_lvl = 0x25;
-		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l3_tx_drv_lvl = 0x25;
-		cfg->tx_l3_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l0_vmode_ctrl1 = 0x00;
-		cfg->tx_l0_vmode_ctrl2 = 0x0D;
-		cfg->tx_l1_vmode_ctrl1 = 0x00;
-		cfg->tx_l1_vmode_ctrl2 = 0x0D;
-		cfg->tx_l2_vmode_ctrl1 = 0x00;
-		cfg->tx_l2_vmode_ctrl2 = 0x0D;
-		cfg->tx_l3_vmode_ctrl1 = 0x00;
-		cfg->tx_l3_vmode_ctrl2 = 0x00;
-		cfg->com_restrim_ctrl = 0x0;
-	} else {
-		cfg->tx_l0_tx_drv_lvl = 0x20;
-		cfg->tx_l0_tx_emp_post1_lvl = 0x20;
-		cfg->tx_l1_tx_drv_lvl = 0x20;
-		cfg->tx_l1_tx_emp_post1_lvl = 0x20;
-		cfg->tx_l2_tx_drv_lvl = 0x20;
-		cfg->tx_l2_tx_emp_post1_lvl = 0x20;
-		cfg->tx_l3_tx_drv_lvl = 0x20;
-		cfg->tx_l3_tx_emp_post1_lvl = 0x20;
-		cfg->tx_l0_vmode_ctrl1 = 0x00;
-		cfg->tx_l0_vmode_ctrl2 = 0x0E;
-		cfg->tx_l1_vmode_ctrl1 = 0x00;
-		cfg->tx_l1_vmode_ctrl2 = 0x0E;
-		cfg->tx_l2_vmode_ctrl1 = 0x00;
-		cfg->tx_l2_vmode_ctrl2 = 0x0E;
-		cfg->tx_l3_vmode_ctrl1 = 0x00;
-		cfg->tx_l3_vmode_ctrl2 = 0x0E;
-		cfg->com_restrim_ctrl = 0xD8;
-	}
-
-	cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
-	DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
-	DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
-	DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
-	DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
-	DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
-	DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
-	DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
-	DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
-						cfg->com_svs_mode_clk_sel);
-	DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
-	DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
-						cfg->com_pll_cctrl_mode0);
-	DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
-						cfg->com_pll_rctrl_mode0);
-	DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
-						cfg->com_cp_ctrl_mode0);
-	DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
-						cfg->com_dec_start_mode0);
-	DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
-						cfg->com_div_frac_start1_mode0);
-	DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
-						cfg->com_div_frac_start2_mode0);
-	DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
-						cfg->com_div_frac_start3_mode0);
-	DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
-						cfg->com_integloop_gain0_mode0);
-	DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
-						cfg->com_integloop_gain1_mode0);
-	DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
-						cfg->com_lock_cmp1_mode0);
-	DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
-						cfg->com_lock_cmp2_mode0);
-	DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
-						cfg->com_lock_cmp3_mode0);
-	DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
-	DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
-	DEV_DBG("PLL PARAM: com_restrim_ctrl = 0x%x\n",	cfg->com_restrim_ctrl);
-
-	DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
-	DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
-						cfg->tx_l0_tx_emp_post1_lvl);
-	DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
-	DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
-						cfg->tx_l1_tx_emp_post1_lvl);
-	DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
-	DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
-						cfg->tx_l2_tx_emp_post1_lvl);
-	DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
-	DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
-						cfg->tx_l3_tx_emp_post1_lvl);
-
-	DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
-	DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
-	DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
-	DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
-	DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
-	DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
-	DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
-	DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
-	DEV_DBG("PLL PARAM: tx_l0_res_code_lane_tx = 0x%x\n",
-					cfg->tx_l0_res_code_lane_tx);
-	DEV_DBG("PLL PARAM: tx_l1_res_code_lane_tx = 0x%x\n",
-					cfg->tx_l1_res_code_lane_tx);
-	DEV_DBG("PLL PARAM: tx_l2_res_code_lane_tx = 0x%x\n",
-					cfg->tx_l2_res_code_lane_tx);
-	DEV_DBG("PLL PARAM: tx_l3_res_code_lane_tx = 0x%x\n",
-					cfg->tx_l3_res_code_lane_tx);
-
-	DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
-	rc = 0;
-fail:
-	return rc;
-}
-
-static int hdmi_8996_v2_calculate(u32 pix_clk,
-			       struct hdmi_8996_phy_pll_reg_cfg *cfg)
-{
-	int rc = -EINVAL;
-	u64 fdata, clk_divtx, tmds_clk;
-	u64 bclk;
-	u64 post_div;
-	u64 core_clk_div;
-	u64 core_clk_div_ratio;
-	u64 core_clk;
-	u64 pll_cmp;
-	u64 tx_band;
-	u64 tx_band_div_ratio;
-	u64 hsclk;
-	u64 dec_start;
-	u64 frac_start;
-	u64 pll_divisor = 4 * HDMI_REF_CLOCK;
-	u64 cpctrl;
-	u64 rctrl;
-	u64 cctrl;
-	u64 integloop_gain;
-	u64 vco_tune;
-	u64 vco_freq;
-	u64 vco_range;
-	u64 rem;
-
-	/* FDATA, CLK_DIVTX, PIXEL_CLK, TMDS_CLK */
-	bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
-
-	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
-		tmds_clk = pix_clk >> 2;
-	else
-		tmds_clk = pix_clk;
-
-	vco_range = bclk < HDMI_282MHZ_BIT_CLK_HZ ? HDMI_2000MHZ_BIT_CLK_HZ :
-				HDMI_2250MHZ_BIT_CLK_HZ;
-
-	fdata = hdmi_8996_v2_get_fdata(bclk, vco_range);
-	if (fdata == HDMI_64B_ERR_VAL)
-		goto fail;
-
-	hsclk = hdmi_8996_v2_get_hsclk(fdata, vco_range);
-	if (hsclk == HDMI_64B_ERR_VAL)
-		goto fail;
-
-	if (bclk >= vco_range)
-		post_div = hdmi_8996_v2_get_post_div_gt_2g(hsclk);
-	else
-		post_div = hdmi_8996_v2_get_post_div_lt_2g(bclk, vco_range);
-
-	if (post_div == HDMI_64B_ERR_VAL)
-		goto fail;
-
-	core_clk_div = 5;
-	core_clk_div_ratio = core_clk_div * 2;
-
-	tx_band = hdmi_8996_v2_get_tx_band(bclk, vco_range);
-	if (tx_band == HDMI_64B_ERR_VAL)
-		goto fail;
-
-	tx_band_div_ratio = 1 << tx_band;
-
-	vco_freq = hdmi_8996_v2_get_vco_freq(bclk, vco_range);
-	clk_divtx = vco_freq;
-	do_div(clk_divtx, post_div);
-
-	/* Decimal and fraction values */
-	dec_start = fdata * post_div;
-	do_div(dec_start, pll_divisor);
-	frac_start = ((pll_divisor - (((dec_start + 1) * pll_divisor) -
-			(fdata * post_div))) * (1 << 20));
-	rem = do_div(frac_start, pll_divisor);
-	/* Round off frac_start to closest integer */
-	if (rem >= (pll_divisor >> 1))
-		frac_start++;
-
-	cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
-	rctrl = hdmi_8996_get_rctrl(frac_start, false);
-	cctrl = hdmi_8996_get_cctrl(frac_start, false);
-	integloop_gain = hdmi_8996_get_integloop_gain(frac_start, false);
-	vco_tune = hdmi_8996_get_vco_tune(fdata, post_div);
-
-	core_clk = clk_divtx;
-	do_div(core_clk, core_clk_div_ratio);
-	pll_cmp = hdmi_8996_get_pll_cmp(1024, core_clk);
-
-	/* Debug dump */
-	DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
-	DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
-	DEV_DBG("%s: CLK_DIVTX: %llu\n", __func__, clk_divtx);
-	DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
-	DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
-	DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
-	DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
-	DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
-	DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
-	DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
-	DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
-	DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
-	DEV_DBG("%s: VCO_TUNE: %llu\n", __func__, vco_tune);
-	DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
-	DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
-
-	/* Convert these values to register specific values */
-	cfg->tx_l0_lane_mode = 0x3;
-	cfg->tx_l2_lane_mode = 0x3;
-	cfg->tx_l0_tx_band = tx_band + 4;
-	cfg->tx_l1_tx_band = tx_band + 4;
-	cfg->tx_l2_tx_band = tx_band + 4;
-	cfg->tx_l3_tx_band = tx_band + 4;
-
-	if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
-		cfg->com_svs_mode_clk_sel = 1;
-	else
-		cfg->com_svs_mode_clk_sel = 2;
-
-	cfg->com_hsclk_sel = (0x28 | hsclk);
-	cfg->com_pll_cctrl_mode0 = cctrl;
-	cfg->com_pll_rctrl_mode0 = rctrl;
-	cfg->com_cp_ctrl_mode0 = cpctrl;
-	cfg->com_dec_start_mode0 = dec_start;
-	cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
-	cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
-	cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
-	cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
-	cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
-	cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
-	cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
-	cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
-	cfg->com_core_clk_en = (0x6C | (HDMI_CLKS_PLL_DIVSEL << 4));
-	cfg->com_coreclk_div = HDMI_CORECLK_DIV;
-	cfg->com_vco_tune_ctrl = 0x0;
-
-	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
-		cfg->tx_l0_tx_drv_lvl = 0x25;
-		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l1_tx_drv_lvl = 0x25;
-		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l2_tx_drv_lvl = 0x25;
-		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l3_tx_drv_lvl = 0x22;
-		cfg->tx_l3_tx_emp_post1_lvl = 0x27;
-		cfg->tx_l0_vmode_ctrl1 = 0x00;
-		cfg->tx_l0_vmode_ctrl2 = 0x0D;
-		cfg->tx_l1_vmode_ctrl1 = 0x00;
-		cfg->tx_l1_vmode_ctrl2 = 0x0D;
-		cfg->tx_l2_vmode_ctrl1 = 0x00;
-		cfg->tx_l2_vmode_ctrl2 = 0x0D;
-		cfg->tx_l3_vmode_ctrl1 = 0x00;
-		cfg->tx_l3_vmode_ctrl2 = 0x00;
-		cfg->tx_l0_res_code_lane_tx = 0x3F;
-		cfg->tx_l1_res_code_lane_tx = 0x3F;
-		cfg->tx_l2_res_code_lane_tx = 0x3F;
-		cfg->tx_l3_res_code_lane_tx = 0x3F;
-		cfg->com_restrim_ctrl = 0x0;
-	} else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
-		cfg->tx_l0_tx_drv_lvl = 0x25;
-		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l1_tx_drv_lvl = 0x25;
-		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l2_tx_drv_lvl = 0x25;
-		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l3_tx_drv_lvl = 0x25;
-		cfg->tx_l3_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l0_vmode_ctrl1 = 0x00;
-		cfg->tx_l0_vmode_ctrl2 = 0x0D;
-		cfg->tx_l1_vmode_ctrl1 = 0x00;
-		cfg->tx_l1_vmode_ctrl2 = 0x0D;
-		cfg->tx_l2_vmode_ctrl1 = 0x00;
-		cfg->tx_l2_vmode_ctrl2 = 0x0D;
-		cfg->tx_l3_vmode_ctrl1 = 0x00;
-		cfg->tx_l3_vmode_ctrl2 = 0x00;
-		cfg->tx_l0_res_code_lane_tx = 0x39;
-		cfg->tx_l1_res_code_lane_tx = 0x39;
-		cfg->tx_l2_res_code_lane_tx = 0x39;
-		cfg->tx_l3_res_code_lane_tx = 0x39;
-		cfg->com_restrim_ctrl = 0x0;
-	} else {
-		cfg->tx_l0_tx_drv_lvl = 0x20;
-		cfg->tx_l0_tx_emp_post1_lvl = 0x20;
-		cfg->tx_l1_tx_drv_lvl = 0x20;
-		cfg->tx_l1_tx_emp_post1_lvl = 0x20;
-		cfg->tx_l2_tx_drv_lvl = 0x20;
-		cfg->tx_l2_tx_emp_post1_lvl = 0x20;
-		cfg->tx_l3_tx_drv_lvl = 0x20;
-		cfg->tx_l3_tx_emp_post1_lvl = 0x20;
-		cfg->tx_l0_vmode_ctrl1 = 0x00;
-		cfg->tx_l0_vmode_ctrl2 = 0x0E;
-		cfg->tx_l1_vmode_ctrl1 = 0x00;
-		cfg->tx_l1_vmode_ctrl2 = 0x0E;
-		cfg->tx_l2_vmode_ctrl1 = 0x00;
-		cfg->tx_l2_vmode_ctrl2 = 0x0E;
-		cfg->tx_l3_vmode_ctrl1 = 0x00;
-		cfg->tx_l3_vmode_ctrl2 = 0x0E;
-		cfg->tx_l0_res_code_lane_tx = 0x3F;
-		cfg->tx_l1_res_code_lane_tx = 0x3F;
-		cfg->tx_l2_res_code_lane_tx = 0x3F;
-		cfg->tx_l3_res_code_lane_tx = 0x3F;
-		cfg->com_restrim_ctrl = 0xD8;
-	}
-
-	cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
-	DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
-	DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
-	DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
-	DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
-	DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
-	DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
-	DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
-	DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
-						cfg->com_svs_mode_clk_sel);
-	DEV_DBG("PLL PARAM: com_vco_tune_ctrl = 0x%x\n",
-						cfg->com_vco_tune_ctrl);
-	DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
-	DEV_DBG("PLL PARAM: com_lock_cmp_en = 0x%x\n", cfg->com_lock_cmp_en);
-	DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
-						cfg->com_pll_cctrl_mode0);
-	DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
-						cfg->com_pll_rctrl_mode0);
-	DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
-						cfg->com_cp_ctrl_mode0);
-	DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
-						cfg->com_dec_start_mode0);
-	DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
-						cfg->com_div_frac_start1_mode0);
-	DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
-						cfg->com_div_frac_start2_mode0);
-	DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
-						cfg->com_div_frac_start3_mode0);
-	DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
-						cfg->com_integloop_gain0_mode0);
-	DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
-						cfg->com_integloop_gain1_mode0);
-	DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
-						cfg->com_lock_cmp1_mode0);
-	DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
-						cfg->com_lock_cmp2_mode0);
-	DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
-						cfg->com_lock_cmp3_mode0);
-	DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
-	DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
-
-	DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
-	DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
-						cfg->tx_l0_tx_emp_post1_lvl);
-	DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
-	DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
-						cfg->tx_l1_tx_emp_post1_lvl);
-	DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
-	DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
-						cfg->tx_l2_tx_emp_post1_lvl);
-	DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
-	DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
-						cfg->tx_l3_tx_emp_post1_lvl);
-
-	DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
-	DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
-	DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
-	DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
-	DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
-	DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
-	DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
-	DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
-	DEV_DBG("PLL PARAM: tx_l0_res_code_lane_tx = 0x%x\n",
-					cfg->tx_l0_res_code_lane_tx);
-	DEV_DBG("PLL PARAM: tx_l1_res_code_lane_tx = 0x%x\n",
-					cfg->tx_l1_res_code_lane_tx);
-	DEV_DBG("PLL PARAM: tx_l2_res_code_lane_tx = 0x%x\n",
-					cfg->tx_l2_res_code_lane_tx);
-	DEV_DBG("PLL PARAM: tx_l3_res_code_lane_tx = 0x%x\n",
-					cfg->tx_l3_res_code_lane_tx);
-	DEV_DBG("PLL PARAM: com_restrim_ctrl = 0x%x\n",	cfg->com_restrim_ctrl);
-
-	DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
-	rc = 0;
-fail:
-	return rc;
-}
-
-static int hdmi_8996_v3_calculate(u32 pix_clk,
-			       struct hdmi_8996_phy_pll_reg_cfg *cfg)
-{
-	int rc = -EINVAL;
-	struct hdmi_8996_v3_post_divider pd;
-	u64 fdata, tmds_clk;
-	u64 bclk;
-	u64 pll_cmp;
-	u64 tx_band;
-	u64 hsclk;
-	u64 dec_start;
-	u64 frac_start;
-	u64 pll_divisor = 4 * HDMI_REF_CLOCK;
-	u64 cpctrl;
-	u64 rctrl;
-	u64 cctrl;
-	u64 integloop_gain;
-	u64 vco_freq;
-	u64 rem;
-
-	/* FDATA, HSCLK, PIXEL_CLK, TMDS_CLK */
-	bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
-
-	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
-		tmds_clk = pix_clk >> 2;
-	else
-		tmds_clk = pix_clk;
-
-	if (hdmi_8996_v3_get_post_div(&pd, bclk) || pd.vco_ratio <= 0 ||
-			pd.vco_freq <= 0)
-		goto fail;
-
-	vco_freq = pd.vco_freq;
-	fdata = pd.vco_freq;
-	do_div(fdata, pd.vco_ratio);
-
-	hsclk = pd.hsclk_divsel;
-	dec_start = vco_freq;
-	do_div(dec_start, pll_divisor);
-
-	frac_start = vco_freq * (1 << 20);
-	rem = do_div(frac_start, pll_divisor);
-	frac_start -= dec_start * (1 << 20);
-	if (rem > (pll_divisor >> 1))
-		frac_start++;
-
-	cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
-	rctrl = hdmi_8996_get_rctrl(frac_start, false);
-	cctrl = hdmi_8996_get_cctrl(frac_start, false);
-	integloop_gain = hdmi_8996_v3_get_integloop_gain(frac_start, bclk,
-									false);
-	pll_cmp = hdmi_8996_v3_get_pll_cmp(1024, fdata);
-	tx_band = pd.tx_band_sel;
-
-	/* Debug dump */
-	DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
-	DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
-	DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
-	DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
-	DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
-	DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
-	DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
-	DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
-	DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
-	DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
-	DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
-	DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
-	DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
-
-	/* Convert these values to register specific values */
-	cfg->tx_l0_tx_band = tx_band + 4;
-	cfg->tx_l1_tx_band = tx_band + 4;
-	cfg->tx_l2_tx_band = tx_band + 4;
-	cfg->tx_l3_tx_band = tx_band + 4;
-
-	if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
-		cfg->com_svs_mode_clk_sel = 1;
-	else
-		cfg->com_svs_mode_clk_sel = 2;
-
-	cfg->com_hsclk_sel = (0x20 | hsclk);
-	cfg->com_pll_cctrl_mode0 = cctrl;
-	cfg->com_pll_rctrl_mode0 = rctrl;
-	cfg->com_cp_ctrl_mode0 = cpctrl;
-	cfg->com_dec_start_mode0 = dec_start;
-	cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
-	cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
-	cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
-	cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
-	cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
-	cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
-	cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
-	cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
-	cfg->com_lock_cmp_en = 0x04;
-	cfg->com_core_clk_en = 0x2C;
-	cfg->com_coreclk_div = HDMI_CORECLK_DIV;
-	cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
-	cfg->com_vco_tune_ctrl = 0x0;
-
-	cfg->tx_l0_lane_mode = 0x43;
-	cfg->tx_l2_lane_mode = 0x43;
-
-	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
-		cfg->tx_l0_tx_drv_lvl = 0x25;
-		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l1_tx_drv_lvl = 0x25;
-		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l2_tx_drv_lvl = 0x25;
-		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l3_tx_drv_lvl = 0x22;
-		cfg->tx_l3_tx_emp_post1_lvl = 0x27;
-		cfg->tx_l0_vmode_ctrl1 = 0x00;
-		cfg->tx_l0_vmode_ctrl2 = 0x0D;
-		cfg->tx_l1_vmode_ctrl1 = 0x00;
-		cfg->tx_l1_vmode_ctrl2 = 0x0D;
-		cfg->tx_l2_vmode_ctrl1 = 0x00;
-		cfg->tx_l2_vmode_ctrl2 = 0x0D;
-		cfg->tx_l3_vmode_ctrl1 = 0x00;
-		cfg->tx_l3_vmode_ctrl2 = 0x00;
-	} else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
-		cfg->tx_l0_tx_drv_lvl = 0x25;
-		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l1_tx_drv_lvl = 0x25;
-		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l2_tx_drv_lvl = 0x25;
-		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l3_tx_drv_lvl = 0x25;
-		cfg->tx_l3_tx_emp_post1_lvl = 0x23;
-		cfg->tx_l0_vmode_ctrl1 = 0x00;
-		cfg->tx_l0_vmode_ctrl2 = 0x0D;
-		cfg->tx_l1_vmode_ctrl1 = 0x00;
-		cfg->tx_l1_vmode_ctrl2 = 0x0D;
-		cfg->tx_l2_vmode_ctrl1 = 0x00;
-		cfg->tx_l2_vmode_ctrl2 = 0x0D;
-		cfg->tx_l3_vmode_ctrl1 = 0x00;
-		cfg->tx_l3_vmode_ctrl2 = 0x00;
-	} else {
-		cfg->tx_l0_tx_drv_lvl = 0x20;
-		cfg->tx_l0_tx_emp_post1_lvl = 0x20;
-		cfg->tx_l1_tx_drv_lvl = 0x20;
-		cfg->tx_l1_tx_emp_post1_lvl = 0x20;
-		cfg->tx_l2_tx_drv_lvl = 0x20;
-		cfg->tx_l2_tx_emp_post1_lvl = 0x20;
-		cfg->tx_l3_tx_drv_lvl = 0x20;
-		cfg->tx_l3_tx_emp_post1_lvl = 0x20;
-		cfg->tx_l0_vmode_ctrl1 = 0x00;
-		cfg->tx_l0_vmode_ctrl2 = 0x0E;
-		cfg->tx_l1_vmode_ctrl1 = 0x00;
-		cfg->tx_l1_vmode_ctrl2 = 0x0E;
-		cfg->tx_l2_vmode_ctrl1 = 0x00;
-		cfg->tx_l2_vmode_ctrl2 = 0x0E;
-		cfg->tx_l3_vmode_ctrl1 = 0x00;
-		cfg->tx_l3_vmode_ctrl2 = 0x0E;
-	}
-
-	DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
-	DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
-	DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
-	DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
-	DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
-	DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
-						cfg->com_svs_mode_clk_sel);
-	DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
-	DEV_DBG("PLL PARAM: com_lock_cmp_en = 0x%x\n", cfg->com_lock_cmp_en);
-	DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
-						cfg->com_pll_cctrl_mode0);
-	DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
-						cfg->com_pll_rctrl_mode0);
-	DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
-						cfg->com_cp_ctrl_mode0);
-	DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
-						cfg->com_dec_start_mode0);
-	DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
-						cfg->com_div_frac_start1_mode0);
-	DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
-						cfg->com_div_frac_start2_mode0);
-	DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
-						cfg->com_div_frac_start3_mode0);
-	DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
-						cfg->com_integloop_gain0_mode0);
-	DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
-						cfg->com_integloop_gain1_mode0);
-	DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
-						cfg->com_lock_cmp1_mode0);
-	DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
-						cfg->com_lock_cmp2_mode0);
-	DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
-						cfg->com_lock_cmp3_mode0);
-	DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
-	DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
-	DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
-
-	DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
-	DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
-	DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
-	DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
-						cfg->tx_l0_tx_emp_post1_lvl);
-	DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
-	DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
-						cfg->tx_l1_tx_emp_post1_lvl);
-	DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
-	DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
-						cfg->tx_l2_tx_emp_post1_lvl);
-	DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
-	DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
-						cfg->tx_l3_tx_emp_post1_lvl);
-
-	DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
-	DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
-	DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
-	DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
-	DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
-	DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
-	DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
-	DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
-	rc = 0;
-fail:
-	return rc;
-}
-
-static int hdmi_8996_calculate(u32 pix_clk,
-			       struct hdmi_8996_phy_pll_reg_cfg *cfg, u32 ver)
-{
-	switch (ver) {
-	case HDMI_VERSION_8996_V3:
-	case HDMI_VERSION_8996_V3_1_8:
-		return hdmi_8996_v3_calculate(pix_clk, cfg);
-	case HDMI_VERSION_8996_V2:
-		return hdmi_8996_v2_calculate(pix_clk, cfg);
-	default:
-		return hdmi_8996_v1_calculate(pix_clk, cfg);
-	}
-}
-
-static int hdmi_8996_phy_pll_set_clk_rate(struct clk *c, u32 tmds_clk, u32 ver)
-{
-	int rc = 0;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-	struct hdmi_8996_phy_pll_reg_cfg cfg = {0};
-
-	rc = hdmi_8996_calculate(tmds_clk, &cfg, ver);
-	if (rc) {
-		DEV_ERR("%s: PLL calculation failed\n", __func__);
-		return rc;
-	}
-
-	/* Initially shut down PHY */
-	DEV_DBG("%s: Disabling PHY\n", __func__);
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x0);
-	udelay(500);
-
-	/* Power up sequence */
-	switch (ver) {
-	case HDMI_VERSION_8996_V2:
-	case HDMI_VERSION_8996_V3:
-	case HDMI_VERSION_8996_V3_1_8:
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x04);
-		break;
-	}
-
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x1);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESETSM_CNTRL, 0x20);
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TX0_TX1_LANE_CTL, 0x0F);
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TX2_TX3_LANE_CTL, 0x0F);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-				     QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-				     QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-				     QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-				     QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
-
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-		     QSERDES_TX_L0_LANE_MODE, cfg.tx_l0_lane_mode);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-		     QSERDES_TX_L0_LANE_MODE, cfg.tx_l2_lane_mode);
-
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-		     QSERDES_TX_L0_TX_BAND, cfg.tx_l0_tx_band);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-		     QSERDES_TX_L0_TX_BAND, cfg.tx_l1_tx_band);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-		     QSERDES_TX_L0_TX_BAND, cfg.tx_l2_tx_band);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-		     QSERDES_TX_L0_TX_BAND, cfg.tx_l3_tx_band);
-
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-			       QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-			       QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-			       QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-			       QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1E);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x07);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYSCLK_EN_SEL, 0x37);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x02);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CLK_ENABLE1, 0x0E);
-	if (ver == HDMI_VERSION_8996_V1)
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x06);
-
-	/* Bypass VCO calibration */
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL,
-					cfg.com_svs_mode_clk_sel);
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_TRIM, 0x0F);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_IVCO, 0x0F);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE_CTRL,
-			cfg.com_vco_tune_ctrl);
-
-	switch (ver) {
-	case HDMI_VERSION_8996_V1:
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL,
-					cfg.com_svs_mode_clk_sel);
-		break;
-	default:
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x06);
-	}
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CLK_SELECT, 0x30);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_HSCLK_SEL,
-		       cfg.com_hsclk_sel);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_EN,
-		       cfg.com_lock_cmp_en);
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CCTRL_MODE0,
-		       cfg.com_pll_cctrl_mode0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_RCTRL_MODE0,
-		       cfg.com_pll_rctrl_mode0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CP_CTRL_MODE0,
-		       cfg.com_cp_ctrl_mode0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEC_START_MODE0,
-		       cfg.com_dec_start_mode0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START1_MODE0,
-		       cfg.com_div_frac_start1_mode0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START2_MODE0,
-		       cfg.com_div_frac_start2_mode0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START3_MODE0,
-		       cfg.com_div_frac_start3_mode0);
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
-			cfg.com_integloop_gain0_mode0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
-			cfg.com_integloop_gain1_mode0);
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP1_MODE0,
-			cfg.com_lock_cmp1_mode0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP2_MODE0,
-			cfg.com_lock_cmp2_mode0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP3_MODE0,
-			cfg.com_lock_cmp3_mode0);
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE_MAP, 0x00);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CORE_CLK_EN,
-		       cfg.com_core_clk_en);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CORECLK_DIV,
-		       cfg.com_coreclk_div);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_CONFIG, 0x02);
-
-	if (ver == HDMI_VERSION_8996_V3 || ver == HDMI_VERSION_8996_V3_1_8)
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
-
-	/* TX lanes setup (TX 0/1/2/3) */
-	if (ver == HDMI_VERSION_8996_V3_1_8) {
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-				QSERDES_TX_L0_TX_DRV_LVL,
-				0x00000023);
-	} else {
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-				QSERDES_TX_L0_TX_DRV_LVL,
-				cfg.tx_l0_tx_drv_lvl);
-	}
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-		       QSERDES_TX_L0_TX_EMP_POST1_LVL,
-		       cfg.tx_l0_tx_emp_post1_lvl);
-
-	if (ver == HDMI_VERSION_8996_V3_1_8) {
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-				QSERDES_TX_L0_TX_DRV_LVL,
-				0x00000023);
-	} else {
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-				QSERDES_TX_L0_TX_DRV_LVL,
-				cfg.tx_l1_tx_drv_lvl);
-	}
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-		       QSERDES_TX_L0_TX_EMP_POST1_LVL,
-		       cfg.tx_l1_tx_emp_post1_lvl);
-
-	if (ver == HDMI_VERSION_8996_V3_1_8) {
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-				QSERDES_TX_L0_TX_DRV_LVL,
-				0x00000023);
-	} else {
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-				QSERDES_TX_L0_TX_DRV_LVL,
-				cfg.tx_l2_tx_drv_lvl);
-	}
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-		       QSERDES_TX_L0_TX_EMP_POST1_LVL,
-		       cfg.tx_l2_tx_emp_post1_lvl);
-
-	if (ver == HDMI_VERSION_8996_V3_1_8) {
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-				QSERDES_TX_L0_TX_DRV_LVL,
-				0x00000020);
-	} else {
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-				QSERDES_TX_L0_TX_DRV_LVL,
-				cfg.tx_l3_tx_drv_lvl);
-	}
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-		       QSERDES_TX_L0_TX_EMP_POST1_LVL,
-		       cfg.tx_l3_tx_emp_post1_lvl);
-
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-		       QSERDES_TX_L0_VMODE_CTRL1,
-		       cfg.tx_l0_vmode_ctrl1);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-		       QSERDES_TX_L0_VMODE_CTRL2,
-		       cfg.tx_l0_vmode_ctrl2);
-
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-		       QSERDES_TX_L0_VMODE_CTRL1,
-		       cfg.tx_l1_vmode_ctrl1);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-		       QSERDES_TX_L0_VMODE_CTRL2,
-		       cfg.tx_l1_vmode_ctrl2);
-
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-		       QSERDES_TX_L0_VMODE_CTRL1,
-		       cfg.tx_l2_vmode_ctrl1);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-			QSERDES_TX_L0_VMODE_CTRL2,
-			cfg.tx_l2_vmode_ctrl2);
-
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-		       QSERDES_TX_L0_VMODE_CTRL1,
-		       cfg.tx_l3_vmode_ctrl1);
-	if (ver == HDMI_VERSION_8996_V3_1_8) {
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-				QSERDES_TX_L0_VMODE_CTRL2,
-				0x0000000D);
-	} else {
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-				QSERDES_TX_L0_VMODE_CTRL2,
-				cfg.tx_l3_vmode_ctrl2);
-	}
-
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-		       QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-		       QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-		       QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-		       QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
-
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-		       QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-		       QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-		       QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-		       QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
-
-	if (ver < HDMI_VERSION_8996_V3) {
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-			       QSERDES_TX_L0_RES_CODE_LANE_TX,
-			       cfg.tx_l0_res_code_lane_tx);
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-			       QSERDES_TX_L0_RES_CODE_LANE_TX,
-			       cfg.tx_l1_res_code_lane_tx);
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-			       QSERDES_TX_L0_RES_CODE_LANE_TX,
-			       cfg.tx_l2_res_code_lane_tx);
-		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-			       QSERDES_TX_L0_RES_CODE_LANE_TX,
-			       cfg.tx_l3_res_code_lane_tx);
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESTRIM_CTRL,
-			       cfg.com_restrim_ctrl);
-
-		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
-		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TXCAL_CFG1, 0x05);
-	}
-
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_MODE, cfg.phy_mode);
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x1F);
-
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-			QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-			QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-			QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-			QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
-
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-			QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-			QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-			QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-			QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
-
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-		       QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-		       QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-		       QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-		       QSERDES_TX_L0_HP_PD_ENABLES, 0x03);
-
-	if (ver == HDMI_VERSION_8996_V2) {
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL1, 0x01);
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL2, 0x01);
-	}
-	/*
-	 * Ensure that vco configuration gets flushed to hardware before
-	 * enabling the PLL
-	 */
-	wmb();
-	return 0;
-}
-
-static int hdmi_8996_phy_ready_status(struct mdss_pll_resources *io)
-{
-	u32 status = 0;
-	int phy_ready = 0;
-	int rc;
-	u32 read_count = 0;
-
-	rc = mdss_pll_resource_enable(io, true);
-	if (rc) {
-		DEV_ERR("%s: pll resource can't be enabled\n", __func__);
-		return rc;
-	}
-
-	DEV_DBG("%s: Waiting for PHY Ready\n", __func__);
-
-	/* Poll for PHY read status */
-	while (read_count < HDMI_PLL_POLL_MAX_READS) {
-		status = MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS);
-		if ((status & BIT(0)) == 1) {
-			phy_ready = 1;
-			DEV_DBG("%s: PHY READY\n", __func__);
-			break;
-		}
-		udelay(HDMI_PLL_POLL_TIMEOUT_US);
-		read_count++;
-	}
-
-	if (read_count == HDMI_PLL_POLL_MAX_READS) {
-		phy_ready = 0;
-		DEV_DBG("%s: PHY READY TIMEOUT\n", __func__);
-	}
-
-	mdss_pll_resource_enable(io, false);
-
-	return phy_ready;
-}
-
-static int hdmi_8996_pll_lock_status(struct mdss_pll_resources *io)
-{
-	u32 status;
-	int pll_locked = 0;
-	int rc;
-	u32 read_count = 0;
-
-	rc = mdss_pll_resource_enable(io, true);
-	if (rc) {
-		DEV_ERR("%s: pll resource can't be enabled\n", __func__);
-		return rc;
-	}
-
-	DEV_DBG("%s: Waiting for PLL lock\n", __func__);
-
-	while (read_count < HDMI_PLL_POLL_MAX_READS) {
-		status = MDSS_PLL_REG_R(io->pll_base,
-				QSERDES_COM_C_READY_STATUS);
-		if ((status & BIT(0)) == 1) {
-			pll_locked = 1;
-			DEV_DBG("%s: C READY\n", __func__);
-			break;
-		}
-		udelay(HDMI_PLL_POLL_TIMEOUT_US);
-		read_count++;
-	}
-
-	if (read_count == HDMI_PLL_POLL_MAX_READS) {
-		pll_locked = 0;
-		DEV_DBG("%s: C READY TIMEOUT\n", __func__);
-	}
-
-	mdss_pll_resource_enable(io, false);
-
-	return pll_locked;
-}
-
-static int hdmi_8996_v1_perform_sw_calibration(struct clk *c)
-{
-	int rc = 0;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	u32 max_code = 0x190;
-	u32 min_code = 0x0;
-	u32 max_cnt = 0;
-	u32 min_cnt = 0;
-	u32 expected_counter_value = 0;
-	u32 step = 0;
-	u32 dbus_all = 0;
-	u32 dbus_sel = 0;
-	u32 vco_code = 0;
-	u32 val = 0;
-
-	vco_code = 0xC8;
-
-	DEV_DBG("%s: Starting SW calibration with vco_code = %d\n", __func__,
-		 vco_code);
-
-	expected_counter_value =
-	   (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP3_MODE0) << 16) |
-	   (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP2_MODE0) << 8) |
-	   (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP1_MODE0));
-
-	DEV_DBG("%s: expected_counter_value = %d\n", __func__,
-		 expected_counter_value);
-
-	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
-	val |= BIT(4);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
-
-	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
-	val |= BIT(3);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
-
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEBUG_BUS_SEL, 0x4);
-
-	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
-	val |= BIT(1);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
-
-	udelay(60);
-
-	while (1) {
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE1_MODE0,
-			       vco_code & 0xFF);
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE2_MODE0,
-			       (vco_code >> 8) & 0x3);
-
-		udelay(20);
-
-		val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
-		val &= ~BIT(1);
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
-
-		udelay(60);
-
-		val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
-		val |= BIT(1);
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
-
-		udelay(60);
-
-		dbus_all =
-		  (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS3) << 24) |
-		  (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS2) << 16) |
-		  (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS1) << 8) |
-		  (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS0));
-
-		dbus_sel = (dbus_all >> 9) & 0x3FFFF;
-		DEV_DBG("%s: loop[%d], dbus_all = 0x%x, dbus_sel = 0x%x\n",
-			__func__, step, dbus_all, dbus_sel);
-		if (dbus_sel == 0)
-			DEV_ERR("%s: CHECK HDMI REF CLK\n", __func__);
-
-		if (dbus_sel == expected_counter_value) {
-			max_code = vco_code;
-			max_cnt = dbus_sel;
-			min_code = vco_code;
-			min_cnt = dbus_sel;
-		} else if (dbus_sel == 0) {
-			max_code = vco_code;
-			max_cnt = dbus_sel;
-			vco_code = (max_code + min_code)/2;
-		} else if (dbus_sel > expected_counter_value) {
-			min_code = vco_code;
-			min_cnt = dbus_sel;
-			vco_code = (max_code + min_code)/2;
-		} else if (dbus_sel < expected_counter_value) {
-			max_code = vco_code;
-			max_cnt = dbus_sel;
-			vco_code = (max_code + min_code)/2;
-		}
-
-		step++;
-
-		if ((vco_code == 0) || (vco_code == 0x3FF) || (step > 0x3FF)) {
-			DEV_ERR("%s: VCO tune code search failed\n", __func__);
-			rc = -ENOTSUPP;
-			break;
-		}
-		if ((max_code - min_code) <= 1) {
-			if ((max_code - min_code) == 1) {
-				if (abs((int)(max_cnt - expected_counter_value))
-				    < abs((int)(min_cnt - expected_counter_value
-					))) {
-					vco_code = max_code;
-				} else {
-					vco_code = min_code;
-				}
-			}
-			break;
-		}
-		DEV_DBG("%s: loop[%d], new vco_code = %d\n", __func__, step,
-			 vco_code);
-	}
-
-	DEV_DBG("%s: CALIB done. vco_code = %d\n", __func__, vco_code);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE1_MODE0,
-		       vco_code & 0xFF);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE2_MODE0,
-		       (vco_code >> 8) & 0x3);
-	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
-	val &= ~BIT(1);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
-
-	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
-	val |= BIT(4);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
-
-	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
-	val &= ~BIT(3);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
-
-	return rc;
-}
-
-static int hdmi_8996_v2_perform_sw_calibration(struct clk *c)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-	u32 vco_code1, vco_code2, integral_loop, ready_poll;
-	u32 read_count = 0;
-
-	while (read_count < (HDMI_PLL_POLL_MAX_READS << 1)) {
-		ready_poll = MDSS_PLL_REG_R(io->pll_base,
-				QSERDES_COM_C_READY_STATUS);
-		if ((ready_poll & BIT(0)) == 1) {
-			ready_poll = 1;
-			DEV_DBG("%s: C READY\n", __func__);
-			break;
-		}
-		udelay(HDMI_PLL_POLL_TIMEOUT_US);
-		read_count++;
-	}
-
-	if (read_count == (HDMI_PLL_POLL_MAX_READS << 1)) {
-		ready_poll = 0;
-		DEV_DBG("%s: C READY TIMEOUT, TRYING SW CALIBRATION\n",
-								__func__);
-	}
-
-	vco_code1 = MDSS_PLL_REG_R(io->pll_base,
-				QSERDES_COM_PLLCAL_CODE1_STATUS);
-	vco_code2 = MDSS_PLL_REG_R(io->pll_base,
-				QSERDES_COM_PLLCAL_CODE2_STATUS);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEBUG_BUS_SEL, 0x5);
-	integral_loop = MDSS_PLL_REG_R(io->pll_base,
-				QSERDES_COM_DEBUG_BUS0);
-
-	if (((ready_poll & 0x1) == 0) || (((ready_poll & 1) == 1) &&
-			(vco_code1 == 0xFF) && ((vco_code2 & 0x3) == 0x1) &&
-			(integral_loop > 0xC0))) {
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL1, 0x04);
-		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL2, 0x00);
-		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x17);
-		udelay(100);
-
-		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x11);
-		udelay(100);
-
-		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
-	}
-	return 0;
-}
-
-static int hdmi_8996_perform_sw_calibration(struct clk *c, u32 ver)
-{
-	switch (ver) {
-	case HDMI_VERSION_8996_V1:
-		return hdmi_8996_v1_perform_sw_calibration(c);
-	case HDMI_VERSION_8996_V2:
-		return hdmi_8996_v2_perform_sw_calibration(c);
-	}
-	return 0;
-}
-
-static int hdmi_8996_vco_enable(struct clk *c, u32 ver)
-{
-	int rc = 0;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x1);
-	udelay(100);
-
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
-	udelay(100);
-
-	rc = hdmi_8996_perform_sw_calibration(c, ver);
-	if (rc) {
-		DEV_ERR("%s: software calibration failed\n", __func__);
-		return rc;
-	}
-
-	rc = hdmi_8996_pll_lock_status(io);
-	if (!rc) {
-		DEV_ERR("%s: PLL not locked\n", __func__);
-		return rc;
-	}
-
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-		       QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
-		       0x6F);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
-		       QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
-		       0x6F);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
-		       QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
-		       0x6F);
-	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
-		       QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
-		       0x6F);
-
-	/* Disable SSC */
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_PER1, 0x0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_PER2, 0x0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_STEP_SIZE1, 0x0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_STEP_SIZE2, 0x0);
-	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_EN_CENTER, 0x2);
-
-	rc = hdmi_8996_phy_ready_status(io);
-	if (!rc) {
-		DEV_ERR("%s: PHY not READY\n", __func__);
-		return rc;
-	}
-
-	/* Restart the retiming buffer */
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x18);
-	udelay(1);
-	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
-
-	io->pll_on = true;
-	return 0;
-}
-
-static int hdmi_8996_v1_vco_enable(struct clk *c)
-{
-	return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V1);
-}
-
-static int hdmi_8996_v2_vco_enable(struct clk *c)
-{
-	return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V2);
-}
-
-static int hdmi_8996_v3_vco_enable(struct clk *c)
-{
-	return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V3);
-}
-
-static int hdmi_8996_v3_1p8_vco_enable(struct clk *c)
-{
-	return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V3_1_8);
-}
-
-static int hdmi_8996_vco_get_lock_range(struct clk *c, unsigned long pixel_clk)
-{
-	u32 rng = 64, cmp_cnt = 1024;
-	u32 coreclk_div = 5, clks_pll_divsel = 2;
-	u32 vco_freq, vco_ratio, ppm_range;
-	u64 bclk;
-	struct hdmi_8996_v3_post_divider pd;
-
-	bclk = ((u64)pixel_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
-
-	DEV_DBG("%s: rate=%ld\n", __func__, pixel_clk);
-
-	if (hdmi_8996_v3_get_post_div(&pd, bclk) ||
-		pd.vco_ratio <= 0 || pd.vco_freq <= 0) {
-		DEV_ERR("%s: couldn't get post div\n", __func__);
-		return -EINVAL;
-	}
-
-	do_div(pd.vco_freq, HDMI_KHZ_TO_HZ * HDMI_KHZ_TO_HZ);
-
-	vco_freq  = (u32) pd.vco_freq;
-	vco_ratio = (u32) pd.vco_ratio;
-
-	DEV_DBG("%s: freq %d, ratio %d\n", __func__,
-		vco_freq, vco_ratio);
-
-	ppm_range = (rng * HDMI_REF_CLOCK) / cmp_cnt;
-	ppm_range /= vco_freq / vco_ratio;
-	ppm_range *= coreclk_div * clks_pll_divsel;
-
-	DEV_DBG("%s: ppm range: %d\n", __func__, ppm_range);
-
-	return ppm_range;
-}
-
-static int hdmi_8996_vco_rate_atomic_update(struct clk *c,
-	unsigned long rate, u32 ver)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-	void __iomem *pll;
-	struct hdmi_8996_phy_pll_reg_cfg cfg = {0};
-	int rc = 0;
-
-	rc = hdmi_8996_calculate(rate, &cfg, ver);
-	if (rc) {
-		DEV_ERR("%s: PLL calculation failed\n", __func__);
-		goto end;
-	}
-
-	pll = io->pll_base;
-
-	MDSS_PLL_REG_W(pll, QSERDES_COM_DEC_START_MODE0,
-		       cfg.com_dec_start_mode0);
-	MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START1_MODE0,
-		       cfg.com_div_frac_start1_mode0);
-	MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START2_MODE0,
-		       cfg.com_div_frac_start2_mode0);
-	MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START3_MODE0,
-		       cfg.com_div_frac_start3_mode0);
-
-	MDSS_PLL_REG_W(pll, QSERDES_COM_FREQ_UPDATE, 0x01);
-	MDSS_PLL_REG_W(pll, QSERDES_COM_FREQ_UPDATE, 0x00);
-
-	DEV_DBG("%s: updated to rate %ld\n", __func__, rate);
-end:
-	return rc;
-}
-
-static int hdmi_8996_vco_set_rate(struct clk *c, unsigned long rate, u32 ver)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-	unsigned int set_power_dwn = 0;
-	bool atomic_update = false;
-	int rc, pll_lock_range;
-
-	rc = mdss_pll_resource_enable(io, true);
-	if (rc) {
-		DEV_ERR("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	DEV_DBG("%s: rate %ld\n", __func__, rate);
-
-	if (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_C_READY_STATUS) & BIT(0) &&
-		MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS) & BIT(0)) {
-		pll_lock_range = hdmi_8996_vco_get_lock_range(c, vco->rate);
-
-		if (pll_lock_range > 0 && vco->rate) {
-			u32 range_limit;
-
-			range_limit  = vco->rate *
-				(pll_lock_range / HDMI_KHZ_TO_HZ);
-			range_limit /= HDMI_KHZ_TO_HZ;
-
-			DEV_DBG("%s: range limit %d\n", __func__, range_limit);
-
-			if (abs(rate - vco->rate) < range_limit)
-				atomic_update = true;
-		}
-	}
-
-	if (io->pll_on && !atomic_update)
-		set_power_dwn = 1;
-
-	if (atomic_update) {
-		hdmi_8996_vco_rate_atomic_update(c, rate, ver);
-	} else {
-		rc = hdmi_8996_phy_pll_set_clk_rate(c, rate, ver);
-		if (rc)
-			DEV_ERR("%s: Failed to set clk rate\n", __func__);
-	}
-
-	mdss_pll_resource_enable(io, false);
-
-	if (set_power_dwn)
-		hdmi_8996_vco_enable(c, ver);
-
-	vco->rate = rate;
-	vco->rate_set = true;
-
-	return 0;
-}
-
-static int hdmi_8996_v1_vco_set_rate(struct clk *c, unsigned long rate)
-{
-	return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V1);
-}
-
-static int hdmi_8996_v2_vco_set_rate(struct clk *c, unsigned long rate)
-{
-	return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V2);
-}
-
-static int hdmi_8996_v3_vco_set_rate(struct clk *c, unsigned long rate)
-{
-	return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V3);
-}
-
-static int hdmi_8996_v3_1p8_vco_set_rate(struct clk *c, unsigned long rate)
-{
-	return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V3_1_8);
-}
-
-static unsigned long hdmi_get_hsclk_sel_divisor(unsigned long hsclk_sel)
-{
-	unsigned long divisor;
-
-	switch (hsclk_sel) {
-	case 0:
-		divisor = 2;
-		break;
-	case 1:
-		divisor = 6;
-		break;
-	case 2:
-		divisor = 10;
-		break;
-	case 3:
-		divisor = 14;
-		break;
-	case 4:
-		divisor = 3;
-		break;
-	case 5:
-		divisor = 9;
-		break;
-	case 6:
-	case 13:
-		divisor = 15;
-		break;
-	case 7:
-		divisor = 21;
-		break;
-	case 8:
-		divisor = 4;
-		break;
-	case 9:
-		divisor = 12;
-		break;
-	case 10:
-		divisor = 20;
-		break;
-	case 11:
-		divisor = 28;
-		break;
-	case 12:
-		divisor = 5;
-		break;
-	case 14:
-		divisor = 25;
-		break;
-	case 15:
-		divisor = 35;
-		break;
-	default:
-		divisor = 1;
-		DEV_ERR("%s: invalid hsclk_sel value = %lu",
-				__func__, hsclk_sel);
-		break;
-	}
-
-	return divisor;
-}
-
-static unsigned long hdmi_8996_vco_get_rate(struct clk *c)
-{
-	unsigned long freq = 0, hsclk_sel = 0, tx_band = 0, dec_start = 0,
-		      div_frac_start = 0, vco_clock_freq = 0;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	if (mdss_pll_resource_enable(io, true)) {
-		DEV_ERR("%s: pll resource can't be enabled\n", __func__);
-		return freq;
-	}
-
-	dec_start = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEC_START_MODE0);
-
-	div_frac_start =
-		MDSS_PLL_REG_R(io->pll_base,
-				QSERDES_COM_DIV_FRAC_START1_MODE0) |
-		MDSS_PLL_REG_R(io->pll_base,
-				QSERDES_COM_DIV_FRAC_START2_MODE0) << 8 |
-		MDSS_PLL_REG_R(io->pll_base,
-				QSERDES_COM_DIV_FRAC_START3_MODE0) << 16;
-
-	vco_clock_freq = (dec_start + (div_frac_start / (1 << 20)))
-		* 4 * (HDMI_REF_CLOCK);
-
-	hsclk_sel = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_HSCLK_SEL) & 0x15;
-	hsclk_sel = hdmi_get_hsclk_sel_divisor(hsclk_sel);
-	tx_band = MDSS_PLL_REG_R(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
-			QSERDES_TX_L0_TX_BAND) & 0x3;
-
-	freq = vco_clock_freq / (10 * hsclk_sel * (1 << tx_band));
-
-	mdss_pll_resource_enable(io, false);
-
-	DEV_DBG("%s: freq = %lu\n", __func__, freq);
-
-	return freq;
-}
-
-static long hdmi_8996_vco_round_rate(struct clk *c, unsigned long rate)
-{
-	unsigned long rrate = rate;
-
-	DEV_DBG("rrate=%ld\n", rrate);
-
-	return rrate;
-}
-
-static int hdmi_8996_vco_prepare(struct clk *c, u32 ver)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-	int ret = 0;
-
-	DEV_DBG("rate=%ld\n", vco->rate);
-
-	if (!vco->rate_set && vco->rate)
-		ret = hdmi_8996_vco_set_rate(c, vco->rate, ver);
-
-	if (!ret) {
-		ret = mdss_pll_resource_enable(io, true);
-		if (ret)
-			DEV_ERR("pll resource can't be enabled\n");
-	}
-
-	return ret;
-}
-
-static int hdmi_8996_v1_vco_prepare(struct clk *c)
-{
-	return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V1);
-}
-
-static int hdmi_8996_v2_vco_prepare(struct clk *c)
-{
-	return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V2);
-}
-
-static int hdmi_8996_v3_vco_prepare(struct clk *c)
-{
-	return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V3);
-}
-
-static int hdmi_8996_v3_1p8_vco_prepare(struct clk *c)
-{
-	return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V3_1_8);
-}
-
-static void hdmi_8996_vco_unprepare(struct clk *c)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	vco->rate_set = false;
-
-	if (!io) {
-		DEV_ERR("Invalid input parameter\n");
-		return;
-	}
-
-	if (!io->pll_on &&
-		mdss_pll_resource_enable(io, true)) {
-		DEV_ERR("pll resource can't be enabled\n");
-		return;
-	}
-
-	io->handoff_resources = false;
-	mdss_pll_resource_enable(io, false);
-	io->pll_on = false;
-}
-
-static enum handoff hdmi_8996_vco_handoff(struct clk *c)
-{
-	enum handoff ret = HANDOFF_DISABLED_CLK;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	if (is_gdsc_disabled(io))
-		return HANDOFF_DISABLED_CLK;
-
-	if (mdss_pll_resource_enable(io, true)) {
-		DEV_ERR("pll resource can't be enabled\n");
-		return ret;
-	}
-
-	io->handoff_resources = true;
-
-	if (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_C_READY_STATUS) & BIT(0)) {
-		if (MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS) & BIT(0)) {
-			io->pll_on = true;
-			c->rate = hdmi_8996_vco_get_rate(c);
-			vco->rate = c->rate;
-			ret = HANDOFF_ENABLED_CLK;
-		} else {
-			io->handoff_resources = false;
-			mdss_pll_resource_enable(io, false);
-			DEV_DBG("%s: PHY not ready\n", __func__);
-		}
-	} else {
-		io->handoff_resources = false;
-		mdss_pll_resource_enable(io, false);
-		DEV_DBG("%s: PLL not locked\n", __func__);
-	}
-
-	DEV_DBG("done, ret=%d\n", ret);
-	return ret;
-}
-
-static const struct clk_ops hdmi_8996_v1_vco_clk_ops = {
-	.enable = hdmi_8996_v1_vco_enable,
-	.set_rate = hdmi_8996_v1_vco_set_rate,
-	.get_rate = hdmi_8996_vco_get_rate,
-	.round_rate = hdmi_8996_vco_round_rate,
-	.prepare = hdmi_8996_v1_vco_prepare,
-	.unprepare = hdmi_8996_vco_unprepare,
-	.handoff = hdmi_8996_vco_handoff,
-};
-
-static const struct clk_ops hdmi_8996_v2_vco_clk_ops = {
-	.enable = hdmi_8996_v2_vco_enable,
-	.set_rate = hdmi_8996_v2_vco_set_rate,
-	.get_rate = hdmi_8996_vco_get_rate,
-	.round_rate = hdmi_8996_vco_round_rate,
-	.prepare = hdmi_8996_v2_vco_prepare,
-	.unprepare = hdmi_8996_vco_unprepare,
-	.handoff = hdmi_8996_vco_handoff,
-};
-
-static const struct clk_ops hdmi_8996_v3_vco_clk_ops = {
-	.enable = hdmi_8996_v3_vco_enable,
-	.set_rate = hdmi_8996_v3_vco_set_rate,
-	.get_rate = hdmi_8996_vco_get_rate,
-	.round_rate = hdmi_8996_vco_round_rate,
-	.prepare = hdmi_8996_v3_vco_prepare,
-	.unprepare = hdmi_8996_vco_unprepare,
-	.handoff = hdmi_8996_vco_handoff,
-};
-
-static const struct clk_ops hdmi_8996_v3_1p8_vco_clk_ops = {
-	.enable = hdmi_8996_v3_1p8_vco_enable,
-	.set_rate = hdmi_8996_v3_1p8_vco_set_rate,
-	.get_rate = hdmi_8996_vco_get_rate,
-	.round_rate = hdmi_8996_vco_round_rate,
-	.prepare = hdmi_8996_v3_1p8_vco_prepare,
-	.unprepare = hdmi_8996_vco_unprepare,
-	.handoff = hdmi_8996_vco_handoff,
-};
-
-
-static struct hdmi_pll_vco_clk hdmi_vco_clk = {
-	.c = {
-		.dbg_name = "hdmi_8996_vco_clk",
-		.ops = &hdmi_8996_v1_vco_clk_ops,
-		CLK_INIT(hdmi_vco_clk.c),
-	},
-};
-
-static struct clk_lookup hdmipllcc_8996[] = {
-	CLK_LIST(hdmi_vco_clk),
-};
-
-int hdmi_8996_pll_clock_register(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res, u32 ver)
-{
-	int rc = -ENOTSUPP;
-
-	if (!pll_res || !pll_res->phy_base || !pll_res->pll_base) {
-		DEV_ERR("%s: Invalid input parameters\n", __func__);
-		return -EPROBE_DEFER;
-	}
-
-	/* Set client data for vco, mux and div clocks */
-	hdmi_vco_clk.priv = pll_res;
-
-	switch (ver) {
-	case HDMI_VERSION_8996_V2:
-		hdmi_vco_clk.c.ops = &hdmi_8996_v2_vco_clk_ops;
-		break;
-	case HDMI_VERSION_8996_V3:
-		hdmi_vco_clk.c.ops = &hdmi_8996_v3_vco_clk_ops;
-		break;
-	case HDMI_VERSION_8996_V3_1_8:
-		hdmi_vco_clk.c.ops = &hdmi_8996_v3_1p8_vco_clk_ops;
-		break;
-	default:
-		hdmi_vco_clk.c.ops = &hdmi_8996_v1_vco_clk_ops;
-		break;
-	}
-
-	rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_8996,
-					ARRAY_SIZE(hdmipllcc_8996));
-	if (rc) {
-		DEV_ERR("%s: Clock register failed rc=%d\n", __func__, rc);
-		rc = -EPROBE_DEFER;
-	} else {
-		DEV_DBG("%s SUCCESS\n", __func__);
-	}
-
-	return rc;
-}
-
-int hdmi_8996_v1_pll_clock_register(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res)
-{
-	return hdmi_8996_pll_clock_register(pdev, pll_res,
-						HDMI_VERSION_8996_V1);
-}
-
-int hdmi_8996_v2_pll_clock_register(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res)
-{
-	return hdmi_8996_pll_clock_register(pdev, pll_res,
-						HDMI_VERSION_8996_V2);
-}
-
-int hdmi_8996_v3_pll_clock_register(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res)
-{
-	return hdmi_8996_pll_clock_register(pdev, pll_res,
-						HDMI_VERSION_8996_V3);
-}
-
-int hdmi_8996_v3_1p8_pll_clock_register(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res)
-{
-	return hdmi_8996_pll_clock_register(pdev, pll_res,
-						HDMI_VERSION_8996_V3_1_8);
-}
diff --git a/drivers/clk/qcom/mdss/mdss-hdmi-pll-8998.c b/drivers/clk/qcom/mdss/mdss-hdmi-pll-8998.c
deleted file mode 100644
index 488ec9f6..0000000
--- a/drivers/clk/qcom/mdss/mdss-hdmi-pll-8998.c
+++ /dev/null
@@ -1,827 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8998.h>
-
-#include "mdss-pll.h"
-#include "mdss-hdmi-pll.h"
-
-#define _W(x, y, z) MDSS_PLL_REG_W(x, y, z)
-#define _R(x, y)    MDSS_PLL_REG_R(x, y)
-
-/* PLL REGISTERS */
-#define BIAS_EN_CLKBUFLR_EN          (0x034)
-#define CLK_ENABLE1                  (0x038)
-#define SYS_CLK_CTRL                 (0x03C)
-#define SYSCLK_BUF_ENABLE            (0x040)
-#define PLL_IVCO                     (0x048)
-#define CP_CTRL_MODE0                (0x060)
-#define PLL_RCTRL_MODE0              (0x068)
-#define PLL_CCTRL_MODE0              (0x070)
-#define SYSCLK_EN_SEL                (0x080)
-#define RESETSM_CNTRL                (0x088)
-#define LOCK_CMP_EN                  (0x090)
-#define LOCK_CMP1_MODE0              (0x098)
-#define LOCK_CMP2_MODE0              (0x09C)
-#define LOCK_CMP3_MODE0              (0x0A0)
-#define DEC_START_MODE0              (0x0B0)
-#define DIV_FRAC_START1_MODE0        (0x0B8)
-#define DIV_FRAC_START2_MODE0        (0x0BC)
-#define DIV_FRAC_START3_MODE0        (0x0C0)
-#define INTEGLOOP_GAIN0_MODE0        (0x0D8)
-#define INTEGLOOP_GAIN1_MODE0        (0x0DC)
-#define VCO_TUNE_CTRL                (0x0EC)
-#define VCO_TUNE_MAP                 (0x0F0)
-#define CLK_SELECT                   (0x138)
-#define HSCLK_SEL                    (0x13C)
-#define CORECLK_DIV_MODE0            (0x148)
-#define CORE_CLK_EN                  (0x154)
-#define C_READY_STATUS               (0x158)
-#define SVS_MODE_CLK_SEL             (0x164)
-
-/* Tx Channel PHY registers */
-#define PHY_TX_EMP_POST1_LVL(n)              ((((n) * 0x200) + 0x400) + 0x000)
-#define PHY_TX_INTERFACE_SELECT_TX_BAND(n)   ((((n) * 0x200) + 0x400) + 0x008)
-#define PHY_TX_CLKBUF_TERM_ENABLE(n)         ((((n) * 0x200) + 0x400) + 0x00C)
-#define PHY_TX_DRV_LVL_RES_CODE_OFFSET(n)    ((((n) * 0x200) + 0x400) + 0x014)
-#define PHY_TX_DRV_LVL(n)                    ((((n) * 0x200) + 0x400) + 0x018)
-#define PHY_TX_LANE_CONFIG(n)                ((((n) * 0x200) + 0x400) + 0x01C)
-#define PHY_TX_PRE_DRIVER_1(n)               ((((n) * 0x200) + 0x400) + 0x024)
-#define PHY_TX_PRE_DRIVER_2(n)               ((((n) * 0x200) + 0x400) + 0x028)
-#define PHY_TX_LANE_MODE(n)                  ((((n) * 0x200) + 0x400) + 0x02C)
-
-/* HDMI PHY registers */
-#define PHY_CFG                      (0x00)
-#define PHY_PD_CTL                   (0x04)
-#define PHY_MODE                     (0x10)
-#define PHY_CLOCK                    (0x5C)
-#define PHY_CMN_CTRL                 (0x68)
-#define PHY_STATUS                   (0xB4)
-
-#define HDMI_BIT_CLK_TO_PIX_CLK_RATIO		10
-#define HDMI_MHZ_TO_HZ				1000000
-#define HDMI_HZ_TO_MHZ				1000000
-#define HDMI_REF_CLOCK_MHZ			19.2
-#define HDMI_REF_CLOCK_HZ			(HDMI_REF_CLOCK_MHZ * 1000000)
-#define HDMI_VCO_MIN_RATE_HZ			25000000
-#define HDMI_VCO_MAX_RATE_HZ			600000000
-
-struct 8998_reg_cfg {
-	u32 tx_band;
-	u32 svs_mode_clk_sel;
-	u32 hsclk_sel;
-	u32 lock_cmp_en;
-	u32 cctrl_mode0;
-	u32 rctrl_mode0;
-	u32 cpctrl_mode0;
-	u32 dec_start_mode0;
-	u32 div_frac_start1_mode0;
-	u32 div_frac_start2_mode0;
-	u32 div_frac_start3_mode0;
-	u32 integloop_gain0_mode0;
-	u32 integloop_gain1_mode0;
-	u32 lock_cmp1_mode0;
-	u32 lock_cmp2_mode0;
-	u32 lock_cmp3_mode0;
-	u32 ssc_per1;
-	u32 ssc_per2;
-	u32 ssc_step_size1;
-	u32 ssc_step_size2;
-	u32 core_clk_en;
-	u32 coreclk_div_mode0;
-	u32 phy_mode;
-	u32 vco_freq;
-	u32 hsclk_divsel;
-	u32 vco_ratio;
-	u32 ssc_en_center;
-
-	u32 l0_tx_drv_lvl;
-	u32 l0_tx_emp_post1_lvl;
-	u32 l1_tx_drv_lvl;
-	u32 l1_tx_emp_post1_lvl;
-	u32 l2_tx_drv_lvl;
-	u32 l2_tx_emp_post1_lvl;
-	u32 l3_tx_drv_lvl;
-	u32 l3_tx_emp_post1_lvl;
-
-	u32 l0_pre_driver_1;
-	u32 l0_pre_driver_2;
-	u32 l1_pre_driver_1;
-	u32 l1_pre_driver_2;
-	u32 l2_pre_driver_1;
-	u32 l2_pre_driver_2;
-	u32 l3_pre_driver_1;
-	u32 l3_pre_driver_2;
-
-	bool debug;
-};
-
-static void hdmi_8998_get_div(struct 8998_reg_cfg * cfg, unsigned long pclk)
-{
-	u32 const ratio_list[] = {1, 2, 3, 4, 5, 6,
-				     9, 10, 12, 15, 25};
-	u32 const band_list[] = {0, 1, 2, 3};
-	u32 const sz_ratio = ARRAY_SIZE(ratio_list);
-	u32 const sz_band = ARRAY_SIZE(band_list);
-	u32 const min_freq = 8000, max_freq = 12000;
-	u32 const cmp_cnt = 1024;
-	u32 const th_min = 500, th_max = 1000;
-	u64 bit_clk = pclk * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
-	u32 half_rate_mode = 0;
-	u32 freq_optimal, list_elements;
-	int optimal_index;
-	u32 i, j, k;
-	u32 freq_list[sz_ratio * sz_band];
-	u32 found_hsclk_divsel = 0, found_vco_ratio;
-	u32 found_tx_band_sel, found_vco_freq;
-
-find_optimal_index:
-	freq_optimal = max_freq;
-	optimal_index = -1;
-	list_elements = 0;
-
-	for (i = 0; i < sz_ratio; i++) {
-		for (j = 0; j < sz_band; j++) {
-			u64 freq = (bit_clk / (1 << half_rate_mode));
-
-			freq *= (ratio_list[i] * (1 << band_list[j]));
-			do_div(freq, (u64) HDMI_MHZ_TO_HZ);
-			freq_list[list_elements++] = freq;
-		}
-	}
-
-	for (k = 0; k < ARRAY_SIZE(freq_list); k++) {
-		u32 const clks_pll_div = 2, core_clk_div = 5;
-		u32 const rng1 = 16, rng2 = 8;
-		u32 core_clk, rvar1;
-		u32 th1, th2;
-
-		core_clk = (((freq_list[k] /
-			      ratio_list[k / sz_band]) /
-			      clks_pll_div) / core_clk_div);
-
-		rvar1 = HDMI_REF_CLOCK_HZ / cmp_cnt;
-		rvar1 *= rng1;
-		rvar1 /= core_clk;
-
-		th1 = rvar1;
-
-		rvar1 = HDMI_REF_CLOCK_HZ / cmp_cnt;
-		rvar1 *= rng2;
-		rvar1 /= core_clk;
-
-		th2 = rvar1;
-
-		if (freq_list[k] >= min_freq &&
-				freq_list[k] <= max_freq) {
-			if ((th1 >= th_min && th1 <= th_max) ||
-					(th2 >= th_min && th2 <= th_max)) {
-				if (freq_list[k] <= freq_optimal) {
-					freq_optimal = freq_list[k];
-					optimal_index = k;
-				}
-			}
-		}
-	}
-
-	if (optimal_index == -1) {
-		if (!half_rate_mode) {
-			half_rate_mode = 1;
-			goto find_optimal_index;
-		} else {
-			/* set to default values */
-			found_vco_freq = max_freq;
-			found_hsclk_divsel = 0;
-			found_vco_ratio = 2;
-			found_tx_band_sel = 0;
-			pr_err("Config error for pclk %ld\n", pclk);
-		}
-	} else {
-		found_vco_ratio = ratio_list[optimal_index / sz_band];
-		found_tx_band_sel = band_list[optimal_index % sz_band];
-		found_vco_freq = freq_optimal;
-	}
-
-	switch (found_vco_ratio) {
-	case 1:
-		found_hsclk_divsel = 15;
-		break;
-	case 2:
-		found_hsclk_divsel = 0;
-		break;
-	case 3:
-		found_hsclk_divsel = 4;
-		break;
-	case 4:
-		found_hsclk_divsel = 8;
-		break;
-	case 5:
-		found_hsclk_divsel = 12;
-		break;
-	case 6:
-		found_hsclk_divsel = 1;
-		break;
-	case 9:
-		found_hsclk_divsel = 5;
-		break;
-	case 10:
-		found_hsclk_divsel = 2;
-		break;
-	case 12:
-		found_hsclk_divsel = 9;
-		break;
-	case 15:
-		found_hsclk_divsel = 13;
-		break;
-	case 25:
-		found_hsclk_divsel = 14;
-		break;
-	}
-
-	pr_debug("found_vco_freq=%d\n", found_vco_freq);
-	pr_debug("found_hsclk_divsel=%d\n", found_hsclk_divsel);
-	pr_debug("found_vco_ratio=%d\n", found_vco_ratio);
-	pr_debug("found_tx_band_sel=%d\n", found_tx_band_sel);
-	pr_debug("half_rate_mode=%d\n", half_rate_mode);
-	pr_debug("optimal_index=%d\n", optimal_index);
-
-	cfg->vco_freq = found_vco_freq;
-	cfg->hsclk_divsel = found_hsclk_divsel;
-	cfg->vco_ratio = found_vco_ratio;
-	cfg->tx_band = found_tx_band_sel;
-}
-
-static int hdmi_8998_config_phy(unsigned long rate,
-		struct 8998_reg_cfg * cfg)
-{
-	u64 const high_freq_bit_clk_threshold = 3400000000UL;
-	u64 const dig_freq_bit_clk_threshold = 1500000000UL;
-	u64 const mid_freq_bit_clk_threshold = 750000000;
-	u64 fdata, tmds_clk;
-	u64 pll_div = 4 * HDMI_REF_CLOCK_HZ;
-	u64 bclk;
-	u64 vco_freq_mhz;
-	u64 hsclk_sel, dec_start, div_frac_start;
-	u64 rem;
-	u64 cpctrl, rctrl, cctrl;
-	u64 integloop_gain;
-	u32 digclk_divsel;
-	u32 tmds_bclk_ratio;
-	u64 cmp_rng, cmp_cnt = 1024, pll_cmp;
-	bool gen_ssc = false;
-
-	bclk = rate * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
-
-	if (bclk > high_freq_bit_clk_threshold) {
-		tmds_clk = rate / 4;
-		tmds_bclk_ratio = 1;
-	} else {
-		tmds_clk = rate;
-		tmds_bclk_ratio = 0;
-	}
-
-	hdmi_8998_get_div(cfg, rate);
-
-	vco_freq_mhz = cfg->vco_freq * (u64) HDMI_HZ_TO_MHZ;
-	fdata = cfg->vco_freq;
-	do_div(fdata, cfg->vco_ratio);
-
-	hsclk_sel = cfg->hsclk_divsel;
-	dec_start = vco_freq_mhz;
-	do_div(dec_start, pll_div);
-
-	div_frac_start = vco_freq_mhz * (1 << 20);
-	rem = do_div(div_frac_start, pll_div);
-	div_frac_start -= (dec_start * (1 << 20));
-	if (rem > (pll_div >> 1))
-		div_frac_start++;
-
-	if ((div_frac_start != 0) || gen_ssc) {
-		cpctrl = 0x8;
-		rctrl = 0x16;
-		cctrl = 0x34;
-	} else {
-		cpctrl = 0x30;
-		rctrl = 0x18;
-		cctrl = 0x2;
-	}
-
-	digclk_divsel = (bclk > dig_freq_bit_clk_threshold) ? 0x1 : 0x2;
-
-	integloop_gain = ((div_frac_start != 0) ||
-			gen_ssc) ? 0x3F : 0xC4;
-	integloop_gain <<= digclk_divsel;
-	integloop_gain = (integloop_gain <= 2046 ? integloop_gain : 0x7FE);
-
-	cmp_rng = gen_ssc ? 0x40 : 0x10;
-
-	pll_cmp = cmp_cnt * fdata;
-	rem = do_div(pll_cmp, (u64)(HDMI_REF_CLOCK_MHZ * 10));
-	if (rem > ((u64)(HDMI_REF_CLOCK_MHZ * 10) >> 1))
-		pll_cmp++;
-
-	pll_cmp =  pll_cmp - 1;
-
-	pr_debug("VCO_FREQ = %u\n", cfg->vco_freq);
-	pr_debug("FDATA = %llu\n", fdata);
-	pr_debug("DEC_START = %llu\n", dec_start);
-	pr_debug("DIV_FRAC_START = %llu\n", div_frac_start);
-	pr_debug("CPCTRL = %llu\n", cpctrl);
-	pr_debug("RCTRL = %llu\n", rctrl);
-	pr_debug("CCTRL = %llu\n", cctrl);
-	pr_debug("DIGCLK_DIVSEL = %u\n", digclk_divsel);
-	pr_debug("INTEGLOOP_GAIN = %llu\n", integloop_gain);
-	pr_debug("CMP_RNG = %llu\n", cmp_rng);
-	pr_debug("PLL_CMP = %llu\n", pll_cmp);
-
-	cfg->svs_mode_clk_sel = (digclk_divsel & 0xFF);
-	cfg->hsclk_sel = (0x20 | hsclk_sel);
-	cfg->lock_cmp_en = (gen_ssc ? 0x4 : 0x0);
-	cfg->cctrl_mode0 = (cctrl & 0xFF);
-	cfg->rctrl_mode0 = (rctrl & 0xFF);
-	cfg->cpctrl_mode0 = (cpctrl & 0xFF);
-	cfg->dec_start_mode0 = (dec_start & 0xFF);
-	cfg->div_frac_start1_mode0 = (div_frac_start & 0xFF);
-	cfg->div_frac_start2_mode0 = ((div_frac_start & 0xFF00) >> 8);
-	cfg->div_frac_start3_mode0 = ((div_frac_start & 0xF0000) >> 16);
-	cfg->integloop_gain0_mode0 = (integloop_gain & 0xFF);
-	cfg->integloop_gain1_mode0 = (integloop_gain & 0xF00) >> 8;
-	cfg->lock_cmp1_mode0 = (pll_cmp & 0xFF);
-	cfg->lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
-	cfg->lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
-	cfg->ssc_per1 = 0;
-	cfg->ssc_per2 = 0;
-	cfg->ssc_step_size1 = 0;
-	cfg->ssc_step_size2 = 0;
-	cfg->core_clk_en = 0x2C;
-	cfg->coreclk_div_mode0 = 0x5;
-	cfg->phy_mode = (tmds_bclk_ratio ? 0x5 : 0x4);
-	cfg->ssc_en_center = 0x0;
-
-	if (bclk > high_freq_bit_clk_threshold) {
-		cfg->l0_tx_drv_lvl = 0xA;
-		cfg->l0_tx_emp_post1_lvl = 0x3;
-		cfg->l1_tx_drv_lvl = 0xA;
-		cfg->l1_tx_emp_post1_lvl = 0x3;
-		cfg->l2_tx_drv_lvl = 0xA;
-		cfg->l2_tx_emp_post1_lvl = 0x3;
-		cfg->l3_tx_drv_lvl = 0x8;
-		cfg->l3_tx_emp_post1_lvl = 0x3;
-		cfg->l0_pre_driver_1 = 0x0;
-		cfg->l0_pre_driver_2 = 0x1C;
-		cfg->l1_pre_driver_1 = 0x0;
-		cfg->l1_pre_driver_2 = 0x1C;
-		cfg->l2_pre_driver_1 = 0x0;
-		cfg->l2_pre_driver_2 = 0x1C;
-		cfg->l3_pre_driver_1 = 0x0;
-		cfg->l3_pre_driver_2 = 0x0;
-	} else if (bclk > dig_freq_bit_clk_threshold) {
-		cfg->l0_tx_drv_lvl = 0x9;
-		cfg->l0_tx_emp_post1_lvl = 0x3;
-		cfg->l1_tx_drv_lvl = 0x9;
-		cfg->l1_tx_emp_post1_lvl = 0x3;
-		cfg->l2_tx_drv_lvl = 0x9;
-		cfg->l2_tx_emp_post1_lvl = 0x3;
-		cfg->l3_tx_drv_lvl = 0x8;
-		cfg->l3_tx_emp_post1_lvl = 0x3;
-		cfg->l0_pre_driver_1 = 0x0;
-		cfg->l0_pre_driver_2 = 0x16;
-		cfg->l1_pre_driver_1 = 0x0;
-		cfg->l1_pre_driver_2 = 0x16;
-		cfg->l2_pre_driver_1 = 0x0;
-		cfg->l2_pre_driver_2 = 0x16;
-		cfg->l3_pre_driver_1 = 0x0;
-		cfg->l3_pre_driver_2 = 0x0;
-	} else if (bclk > mid_freq_bit_clk_threshold) {
-		cfg->l0_tx_drv_lvl = 0x9;
-		cfg->l0_tx_emp_post1_lvl = 0x3;
-		cfg->l1_tx_drv_lvl = 0x9;
-		cfg->l1_tx_emp_post1_lvl = 0x3;
-		cfg->l2_tx_drv_lvl = 0x9;
-		cfg->l2_tx_emp_post1_lvl = 0x3;
-		cfg->l3_tx_drv_lvl = 0x8;
-		cfg->l3_tx_emp_post1_lvl = 0x3;
-		cfg->l0_pre_driver_1 = 0x0;
-		cfg->l0_pre_driver_2 = 0x0E;
-		cfg->l1_pre_driver_1 = 0x0;
-		cfg->l1_pre_driver_2 = 0x0E;
-		cfg->l2_pre_driver_1 = 0x0;
-		cfg->l2_pre_driver_2 = 0x0E;
-		cfg->l3_pre_driver_1 = 0x0;
-		cfg->l3_pre_driver_2 = 0x0;
-	} else {
-		cfg->l0_tx_drv_lvl = 0x0;
-		cfg->l0_tx_emp_post1_lvl = 0x0;
-		cfg->l1_tx_drv_lvl = 0x0;
-		cfg->l1_tx_emp_post1_lvl = 0x0;
-		cfg->l2_tx_drv_lvl = 0x0;
-		cfg->l2_tx_emp_post1_lvl = 0x0;
-		cfg->l3_tx_drv_lvl = 0x0;
-		cfg->l3_tx_emp_post1_lvl = 0x0;
-		cfg->l0_pre_driver_1 = 0x0;
-		cfg->l0_pre_driver_2 = 0x01;
-		cfg->l1_pre_driver_1 = 0x0;
-		cfg->l1_pre_driver_2 = 0x01;
-		cfg->l2_pre_driver_1 = 0x0;
-		cfg->l2_pre_driver_2 = 0x01;
-		cfg->l3_pre_driver_1 = 0x0;
-		cfg->l3_pre_driver_2 = 0x0;
-	}
-
-	return 0;
-}
-
-static int hdmi_8998_pll_set_clk_rate(struct clk *c, unsigned long rate)
-{
-	int rc = 0;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-	struct 8998_reg_cfg cfg = {0};
-	void __iomem *phy = io->phy_base, *pll = io->pll_base;
-
-	rc = hdmi_8998_config_phy(rate, &cfg);
-	if (rc) {
-		pr_err("rate calculation failed\n, rc=%d\n", rc);
-		return rc;
-	}
-
-	_W(phy, PHY_PD_CTL, 0x0);
-	udelay(500);
-
-	_W(phy, PHY_PD_CTL, 0x1);
-	_W(pll, RESETSM_CNTRL, 0x20);
-	_W(phy, PHY_CMN_CTRL, 0x6);
-	_W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(0), cfg.tx_band);
-	_W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(1), cfg.tx_band);
-	_W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(2), cfg.tx_band);
-	_W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(3), cfg.tx_band);
-	_W(pll, PHY_TX_CLKBUF_TERM_ENABLE(0), 0x1);
-	_W(pll, PHY_TX_LANE_MODE(0), 0x20);
-	_W(pll, PHY_TX_LANE_MODE(1), 0x20);
-	_W(pll, PHY_TX_LANE_MODE(2), 0x20);
-	_W(pll, PHY_TX_LANE_MODE(3), 0x20);
-	_W(pll, PHY_TX_CLKBUF_TERM_ENABLE(1), 0x1);
-	_W(pll, PHY_TX_CLKBUF_TERM_ENABLE(2), 0x1);
-	_W(pll, PHY_TX_CLKBUF_TERM_ENABLE(3), 0x1);
-	_W(pll, SYSCLK_BUF_ENABLE, 0x2);
-	_W(pll, BIAS_EN_CLKBUFLR_EN, 0xB);
-	_W(pll, SYSCLK_EN_SEL, 0x37);
-	_W(pll, SYS_CLK_CTRL, 0x2);
-	_W(pll, CLK_ENABLE1, 0xE);
-	_W(pll, PLL_IVCO, 0xF);
-	_W(pll, VCO_TUNE_CTRL, 0x0);
-	_W(pll, SVS_MODE_CLK_SEL, cfg.svs_mode_clk_sel);
-	_W(pll, CLK_SELECT, 0x30);
-	_W(pll, HSCLK_SEL, cfg.hsclk_sel);
-	_W(pll, LOCK_CMP_EN, cfg.lock_cmp_en);
-	_W(pll, PLL_CCTRL_MODE0, cfg.cctrl_mode0);
-	_W(pll, PLL_RCTRL_MODE0, cfg.rctrl_mode0);
-	_W(pll, CP_CTRL_MODE0, cfg.cpctrl_mode0);
-	_W(pll, DEC_START_MODE0, cfg.dec_start_mode0);
-	_W(pll, DIV_FRAC_START1_MODE0, cfg.div_frac_start1_mode0);
-	_W(pll, DIV_FRAC_START2_MODE0, cfg.div_frac_start2_mode0);
-	_W(pll, DIV_FRAC_START3_MODE0, cfg.div_frac_start3_mode0);
-	_W(pll, INTEGLOOP_GAIN0_MODE0, cfg.integloop_gain0_mode0);
-	_W(pll, INTEGLOOP_GAIN1_MODE0, cfg.integloop_gain1_mode0);
-	_W(pll, LOCK_CMP1_MODE0, cfg.lock_cmp1_mode0);
-	_W(pll, LOCK_CMP2_MODE0, cfg.lock_cmp2_mode0);
-	_W(pll, LOCK_CMP3_MODE0, cfg.lock_cmp3_mode0);
-	_W(pll, VCO_TUNE_MAP, 0x0);
-	_W(pll, CORE_CLK_EN, cfg.core_clk_en);
-	_W(pll, CORECLK_DIV_MODE0, cfg.coreclk_div_mode0);
-
-	_W(pll, PHY_TX_DRV_LVL(0), cfg.l0_tx_drv_lvl);
-	_W(pll, PHY_TX_DRV_LVL(1), cfg.l1_tx_drv_lvl);
-	_W(pll, PHY_TX_DRV_LVL(2), cfg.l2_tx_drv_lvl);
-	_W(pll, PHY_TX_DRV_LVL(3), cfg.l3_tx_drv_lvl);
-
-	_W(pll, PHY_TX_EMP_POST1_LVL(0), cfg.l0_tx_emp_post1_lvl);
-	_W(pll, PHY_TX_EMP_POST1_LVL(1), cfg.l1_tx_emp_post1_lvl);
-	_W(pll, PHY_TX_EMP_POST1_LVL(2), cfg.l2_tx_emp_post1_lvl);
-	_W(pll, PHY_TX_EMP_POST1_LVL(3), cfg.l3_tx_emp_post1_lvl);
-
-	_W(pll, PHY_TX_PRE_DRIVER_1(0), cfg.l0_pre_driver_1);
-	_W(pll, PHY_TX_PRE_DRIVER_1(1), cfg.l1_pre_driver_1);
-	_W(pll, PHY_TX_PRE_DRIVER_1(2), cfg.l2_pre_driver_1);
-	_W(pll, PHY_TX_PRE_DRIVER_1(3), cfg.l3_pre_driver_1);
-
-	_W(pll, PHY_TX_PRE_DRIVER_2(0), cfg.l0_pre_driver_2);
-	_W(pll, PHY_TX_PRE_DRIVER_2(1), cfg.l1_pre_driver_2);
-	_W(pll, PHY_TX_PRE_DRIVER_2(2), cfg.l2_pre_driver_2);
-	_W(pll, PHY_TX_PRE_DRIVER_2(3), cfg.l3_pre_driver_2);
-
-	_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(0), 0x0);
-	_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(1), 0x0);
-	_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(2), 0x0);
-	_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(3), 0x0);
-
-	_W(phy, PHY_MODE, cfg.phy_mode);
-
-	_W(pll, PHY_TX_LANE_CONFIG(0), 0x10);
-	_W(pll, PHY_TX_LANE_CONFIG(1), 0x10);
-	_W(pll, PHY_TX_LANE_CONFIG(2), 0x10);
-	_W(pll, PHY_TX_LANE_CONFIG(3), 0x10);
-
-	/* Ensure all registers are flushed to hardware */
-	wmb();
-
-	return 0;
-}
-
-static int hdmi_8998_pll_lock_status(struct mdss_pll_resources *io)
-{
-	u32 const delay_us = 100;
-	u32 const timeout_us = 5000;
-	u32 status;
-	int rc = 0;
-	void __iomem *pll = io->pll_base;
-
-	rc = mdss_pll_resource_enable(io, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-	rc = readl_poll_timeout_atomic(pll + C_READY_STATUS,
-			status,
-			((status & BIT(0)) > 0),
-			delay_us,
-			timeout_us);
-	if (rc)
-		pr_err("HDMI PLL(%d) lock failed, status=0x%08x\n",
-				io->index, status);
-	else
-		pr_debug("HDMI PLL(%d) lock passed, status=0x%08x\n",
-				io->index, status);
-
-	mdss_pll_resource_enable(io, false);
-
-	return rc;
-}
-
-static int hdmi_8998_phy_ready_status(struct mdss_pll_resources *io)
-{
-	u32 const delay_us = 100;
-	u32 const timeout_us = 5000;
-	u32 status;
-	int rc = 0;
-	void __iomem *phy = io->phy_base;
-
-	rc = mdss_pll_resource_enable(io, true);
-	if (rc) {
-		pr_err("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	rc = readl_poll_timeout_atomic(phy + PHY_STATUS,
-			status,
-			((status & BIT(0)) > 0),
-			delay_us,
-			timeout_us);
-	if (rc)
-		pr_err("HDMI PHY(%d) not ready, status=0x%08x\n",
-				io->index, status);
-	else
-		pr_debug("HDMI PHY(%d) ready, status=0x%08x\n",
-				io->index, status);
-
-	mdss_pll_resource_enable(io, false);
-
-	return rc;
-}
-
-static int hdmi_8998_vco_set_rate(struct clk *c, unsigned long rate)
-{
-	int rc = 0;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	rc = mdss_pll_resource_enable(io, true);
-	if (rc) {
-		pr_err("pll resource enable failed, rc=%d\n", rc);
-		return rc;
-	}
-
-	if (io->pll_on)
-		goto error;
-
-	rc = hdmi_8998_pll_set_clk_rate(c, rate);
-	if (rc) {
-		pr_err("failed to set clk rate, rc=%d\n", rc);
-		goto error;
-	}
-
-	vco->rate = rate;
-	vco->rate_set = true;
-
-error:
-	(void)mdss_pll_resource_enable(io, false);
-
-	return rc;
-}
-
-static long hdmi_8998_vco_round_rate(struct clk *c, unsigned long rate)
-{
-	unsigned long rrate = rate;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-
-	if (rate < vco->min_rate)
-		rrate = vco->min_rate;
-	if (rate > vco->max_rate)
-		rrate = vco->max_rate;
-
-	return rrate;
-}
-
-static int hdmi_8998_pll_enable(struct clk *c)
-{
-	int rc = 0;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-	void __iomem *phy = io->phy_base, *pll = io->pll_base;
-
-	_W(phy, PHY_CFG, 0x1);
-	udelay(100);
-	_W(phy, PHY_CFG, 0x59);
-	udelay(100);
-
-	_W(phy, PHY_CLOCK, 0x6);
-
-	/* Ensure all registers are flushed to hardware */
-	wmb();
-
-	rc = hdmi_8998_pll_lock_status(io);
-	if (rc) {
-		pr_err("PLL not locked, rc=%d\n", rc);
-		return rc;
-	}
-
-	_W(pll, PHY_TX_LANE_CONFIG(0), 0x1F);
-	_W(pll, PHY_TX_LANE_CONFIG(1), 0x1F);
-	_W(pll, PHY_TX_LANE_CONFIG(2), 0x1F);
-	_W(pll, PHY_TX_LANE_CONFIG(3), 0x1F);
-
-	/* Ensure all registers are flushed to hardware */
-	wmb();
-
-	rc = hdmi_8998_phy_ready_status(io);
-	if (rc) {
-		pr_err("PHY NOT READY, rc=%d\n", rc);
-		return rc;
-	}
-
-	_W(phy, PHY_CFG, 0x58);
-	udelay(1);
-	_W(phy, PHY_CFG, 0x59);
-
-	/* Ensure all registers are flushed to hardware */
-	wmb();
-
-	io->pll_on = true;
-	return rc;
-}
-
-static int hdmi_8998_vco_prepare(struct clk *c)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-	int rc = 0;
-
-	if (!io) {
-		pr_err("hdmi pll resources are not available\n");
-		return -EINVAL;
-	}
-
-	rc = mdss_pll_resource_enable(io, true);
-	if (rc) {
-		pr_err("pll resource enable failed, rc=%d\n", rc);
-		return rc;
-	}
-
-	if (!vco->rate_set && vco->rate) {
-		rc = hdmi_8998_pll_set_clk_rate(c, vco->rate);
-		if (rc) {
-			pr_err("set rate failed, rc=%d\n", rc);
-			goto error;
-		}
-	}
-
-	rc = hdmi_8998_pll_enable(c);
-	if (rc)
-		pr_err("pll enabled failed, rc=%d\n", rc);
-
-error:
-	if (rc)
-		mdss_pll_resource_enable(io, false);
-
-	return rc;
-}
-
-static void hdmi_8998_pll_disable(struct hdmi_pll_vco_clk *vco)
-{
-	struct mdss_pll_resources *io = vco->priv;
-	void __iomem *phy = io->phy_base;
-
-	if (!io->pll_on)
-		return;
-
-	_W(phy, PHY_PD_CTL, 0x0);
-
-	/* Ensure all registers are flushed to hardware */
-	wmb();
-
-	vco->rate_set = false;
-	io->handoff_resources = false;
-	io->pll_on = false;
-}
-
-static void hdmi_8998_vco_unprepare(struct clk *c)
-{
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	if (!io) {
-		pr_err("HDMI pll resources not available\n");
-		return;
-	}
-
-	hdmi_8998_pll_disable(vco);
-	mdss_pll_resource_enable(io, false);
-}
-
-static enum handoff hdmi_8998_vco_handoff(struct clk *c)
-{
-	enum handoff ret = HANDOFF_DISABLED_CLK;
-	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	if (mdss_pll_resource_enable(io, true)) {
-		pr_err("pll resource can't be enabled\n");
-		return ret;
-	}
-
-	io->handoff_resources = true;
-
-	if (_R(io->pll_base, C_READY_STATUS) & BIT(0) &&
-			_R(io->phy_base, PHY_STATUS) & BIT(0)) {
-		io->pll_on = true;
-		/* TODO: calculate rate based on the phy/pll register values. */
-		ret = HANDOFF_ENABLED_CLK;
-	} else {
-		io->handoff_resources = false;
-		mdss_pll_resource_enable(io, false);
-		pr_debug("%s: PHY/PLL not ready\n", __func__);
-	}
-
-	pr_debug("done, ret=%d\n", ret);
-	return ret;
-}
-
-static const struct clk_ops hdmi_8998_vco_clk_ops = {
-	.set_rate = hdmi_8998_vco_set_rate,
-	.round_rate = hdmi_8998_vco_round_rate,
-	.prepare = hdmi_8998_vco_prepare,
-	.unprepare = hdmi_8998_vco_unprepare,
-	.handoff = hdmi_8998_vco_handoff,
-};
-
-static struct hdmi_pll_vco_clk hdmi_vco_clk = {
-	.min_rate = HDMI_VCO_MIN_RATE_HZ,
-	.max_rate = HDMI_VCO_MAX_RATE_HZ,
-	.c = {
-		.dbg_name = "hdmi_8998_vco_clk",
-		.ops = &hdmi_8998_vco_clk_ops,
-		CLK_INIT(hdmi_vco_clk.c),
-	},
-};
-
-static struct clk_lookup hdmipllcc_8998[] = {
-	CLK_LIST(hdmi_vco_clk),
-};
-
-int hdmi_8998_pll_clock_register(struct platform_device *pdev,
-				   struct mdss_pll_resources *pll_res)
-{
-	int rc = 0;
-
-	hdmi_vco_clk.priv = pll_res;
-
-	rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_8998,
-				   ARRAY_SIZE(hdmipllcc_8998));
-	if (rc) {
-		pr_err("clock register failed, rc=%d\n", rc);
-		return rc;
-	}
-
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-hdmi-pll.h b/drivers/clk/qcom/mdss/mdss-hdmi-pll.h
deleted file mode 100644
index 03f6fa2..0000000
--- a/drivers/clk/qcom/mdss/mdss-hdmi-pll.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __MDSS_HDMI_PLL_H
-#define __MDSS_HDMI_PLL_H
-
-struct hdmi_pll_cfg {
-	unsigned long vco_rate;
-	u32 reg;
-};
-
-struct hdmi_pll_vco_clk {
-	struct clk_hw	hw;
-	unsigned long	rate;	/* current vco rate */
-	unsigned long	min_rate;	/* min vco rate */
-	unsigned long	max_rate;	/* max vco rate */
-	bool		rate_set;
-	struct hdmi_pll_cfg *ip_seti;
-	struct hdmi_pll_cfg *cp_seti;
-	struct hdmi_pll_cfg *ip_setp;
-	struct hdmi_pll_cfg *cp_setp;
-	struct hdmi_pll_cfg *crctrl;
-	void		*priv;
-
-};
-
-static inline struct hdmi_pll_vco_clk *to_hdmi_vco_clk_hw(struct clk_hw *hw)
-{
-	return container_of(hw, struct hdmi_pll_vco_clk, hw);
-}
-
-int hdmi_pll_clock_register_28lpm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-
-int hdmi_pll_clock_register(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-
-int hdmi_20nm_pll_clock_register(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-
-int hdmi_8996_v1_pll_clock_register(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res);
-
-int hdmi_8996_v2_pll_clock_register(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res);
-
-int hdmi_8996_v3_pll_clock_register(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res);
-
-int hdmi_8996_v3_1p8_pll_clock_register(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res);
-
-int hdmi_8998_pll_clock_register(struct platform_device *pdev,
-				   struct mdss_pll_resources *pll_res);
-#endif
diff --git a/drivers/clk/qcom/mdss/mdss-pll-util.c b/drivers/clk/qcom/mdss/mdss-pll-util.c
deleted file mode 100644
index 4a543d7..0000000
--- a/drivers/clk/qcom/mdss/mdss-pll-util.c
+++ /dev/null
@@ -1,381 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/string.h>
-#include <linux/of_address.h>
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-#include <linux/memblock.h>
-
-#include "mdss-pll.h"
-
-/**
- * mdss_pll_get_mp_by_reg_name() -- Find power module by regulator name
- *@pll_res: Pointer to the PLL resource
- *@name: Regulator name as specified in the pll dtsi
- *
- * This is a helper function to retrieve the regulator information
- * for each pll resource.
- */
-struct dss_vreg *mdss_pll_get_mp_by_reg_name(struct mdss_pll_resources *pll_res
-		, char *name)
-{
-
-	struct dss_vreg *regulator = NULL;
-	int i;
-
-	if ((pll_res == NULL) || (pll_res->mp.vreg_config == NULL)) {
-		pr_err("%s Invalid PLL resource\n", __func__);
-		goto error;
-	}
-
-	regulator = pll_res->mp.vreg_config;
-
-	for (i = 0; i < pll_res->mp.num_vreg; i++) {
-		if (!strcmp(name, regulator->vreg_name)) {
-			pr_debug("Found regulator match for %s\n", name);
-			break;
-		}
-		regulator++;
-	}
-
-error:
-	return regulator;
-}
-
-int mdss_pll_util_resource_enable(struct mdss_pll_resources *pll_res,
-								bool enable)
-{
-	int rc = 0;
-	struct dss_module_power *mp = &pll_res->mp;
-
-	if (enable) {
-		rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
-		if (rc) {
-			pr_err("Failed to enable vregs rc=%d\n", rc);
-			goto vreg_err;
-		}
-
-		rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
-		if (rc) {
-			pr_err("Failed to set clock rate rc=%d\n", rc);
-			goto clk_err;
-		}
-
-		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
-		if (rc) {
-			pr_err("clock enable failed rc:%d\n", rc);
-			goto clk_err;
-		}
-	} else {
-		msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
-
-		msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
-	}
-
-	return rc;
-
-clk_err:
-	msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
-vreg_err:
-	return rc;
-}
-
-static int mdss_pll_util_parse_dt_supply(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int i = 0, rc = 0;
-	u32 tmp = 0;
-	struct device_node *of_node = NULL, *supply_root_node = NULL;
-	struct device_node *supply_node = NULL;
-	struct dss_module_power *mp = &pll_res->mp;
-
-	of_node = pdev->dev.of_node;
-
-	mp->num_vreg = 0;
-	supply_root_node = of_get_child_by_name(of_node,
-						"qcom,platform-supply-entries");
-	if (!supply_root_node) {
-		pr_err("no supply entry present\n");
-		return rc;
-	}
-
-	for_each_child_of_node(supply_root_node, supply_node) {
-		mp->num_vreg++;
-	}
-
-	if (mp->num_vreg == 0) {
-		pr_debug("no vreg\n");
-		return rc;
-	}
-	pr_debug("vreg found. count=%d\n", mp->num_vreg);
-
-	mp->vreg_config = devm_kzalloc(&pdev->dev, sizeof(struct dss_vreg) *
-						mp->num_vreg, GFP_KERNEL);
-	if (!mp->vreg_config) {
-		rc = -ENOMEM;
-		return rc;
-	}
-
-	for_each_child_of_node(supply_root_node, supply_node) {
-
-		const char *st = NULL;
-
-		rc = of_property_read_string(supply_node,
-						"qcom,supply-name", &st);
-		if (rc) {
-			pr_err(":error reading name. rc=%d\n", rc);
-			goto error;
-		}
-
-		strlcpy(mp->vreg_config[i].vreg_name, st,
-					sizeof(mp->vreg_config[i].vreg_name));
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-min-voltage", &tmp);
-		if (rc) {
-			pr_err(": error reading min volt. rc=%d\n", rc);
-			goto error;
-		}
-		mp->vreg_config[i].min_voltage = tmp;
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-max-voltage", &tmp);
-		if (rc) {
-			pr_err(": error reading max volt. rc=%d\n", rc);
-			goto error;
-		}
-		mp->vreg_config[i].max_voltage = tmp;
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-enable-load", &tmp);
-		if (rc) {
-			pr_err(": error reading enable load. rc=%d\n", rc);
-			goto error;
-		}
-		mp->vreg_config[i].enable_load = tmp;
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-disable-load", &tmp);
-		if (rc) {
-			pr_err(": error reading disable load. rc=%d\n", rc);
-			goto error;
-		}
-		mp->vreg_config[i].disable_load = tmp;
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-pre-on-sleep", &tmp);
-		if (rc)
-			pr_debug("error reading supply pre sleep value. rc=%d\n",
-							rc);
-
-		mp->vreg_config[i].pre_on_sleep = (!rc ? tmp : 0);
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-pre-off-sleep", &tmp);
-		if (rc)
-			pr_debug("error reading supply pre sleep value. rc=%d\n",
-							rc);
-
-		mp->vreg_config[i].pre_off_sleep = (!rc ? tmp : 0);
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-post-on-sleep", &tmp);
-		if (rc)
-			pr_debug("error reading supply post sleep value. rc=%d\n",
-							rc);
-
-		mp->vreg_config[i].post_on_sleep = (!rc ? tmp : 0);
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-post-off-sleep", &tmp);
-		if (rc)
-			pr_debug("error reading supply post sleep value. rc=%d\n",
-							rc);
-
-		mp->vreg_config[i].post_off_sleep = (!rc ? tmp : 0);
-
-		pr_debug("%s min=%d, max=%d, enable=%d, disable=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
-					mp->vreg_config[i].vreg_name,
-					mp->vreg_config[i].min_voltage,
-					mp->vreg_config[i].max_voltage,
-					mp->vreg_config[i].enable_load,
-					mp->vreg_config[i].disable_load,
-					mp->vreg_config[i].pre_on_sleep,
-					mp->vreg_config[i].post_on_sleep,
-					mp->vreg_config[i].pre_off_sleep,
-					mp->vreg_config[i].post_off_sleep);
-		++i;
-
-		rc = 0;
-	}
-
-	return rc;
-
-error:
-	if (mp->vreg_config) {
-		devm_kfree(&pdev->dev, mp->vreg_config);
-		mp->vreg_config = NULL;
-		mp->num_vreg = 0;
-	}
-
-	return rc;
-}
-
-static int mdss_pll_util_parse_dt_clock(struct platform_device *pdev,
-					struct mdss_pll_resources *pll_res)
-{
-	u32 i = 0, rc = 0;
-	struct dss_module_power *mp = &pll_res->mp;
-	const char *clock_name;
-	u32 clock_rate;
-
-	mp->num_clk = of_property_count_strings(pdev->dev.of_node,
-							"clock-names");
-	if (mp->num_clk <= 0) {
-		pr_err("clocks are not defined\n");
-		goto clk_err;
-	}
-
-	mp->clk_config = devm_kzalloc(&pdev->dev,
-			sizeof(struct dss_clk) * mp->num_clk, GFP_KERNEL);
-	if (!mp->clk_config) {
-		rc = -ENOMEM;
-		mp->num_clk = 0;
-		goto clk_err;
-	}
-
-	for (i = 0; i < mp->num_clk; i++) {
-		of_property_read_string_index(pdev->dev.of_node, "clock-names",
-							i, &clock_name);
-		strlcpy(mp->clk_config[i].clk_name, clock_name,
-				sizeof(mp->clk_config[i].clk_name));
-
-		of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
-							i, &clock_rate);
-		mp->clk_config[i].rate = clock_rate;
-
-		if (!clock_rate)
-			mp->clk_config[i].type = DSS_CLK_AHB;
-		else
-			mp->clk_config[i].type = DSS_CLK_PCLK;
-	}
-
-clk_err:
-	return rc;
-}
-
-static void mdss_pll_free_bootmem(u32 mem_addr, u32 size)
-{
-	unsigned long pfn_start, pfn_end, pfn_idx;
-
-	pfn_start = mem_addr >> PAGE_SHIFT;
-	pfn_end = (mem_addr + size) >> PAGE_SHIFT;
-	for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
-		free_reserved_page(pfn_to_page(pfn_idx));
-}
-
-static int mdss_pll_util_parse_dt_dfps(struct platform_device *pdev,
-					struct mdss_pll_resources *pll_res)
-{
-	int rc = 0;
-	struct device_node *pnode;
-	const u32 *addr;
-	struct vm_struct *area;
-	u64 size;
-	u32 offsets[2];
-	unsigned long virt_add;
-
-	pnode = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
-	if (IS_ERR_OR_NULL(pnode)) {
-		rc = PTR_ERR(pnode);
-		goto pnode_err;
-	}
-
-	addr = of_get_address(pnode, 0, &size, NULL);
-	if (!addr) {
-		pr_err("failed to parse the dfps memory address\n");
-		rc = -EINVAL;
-		goto pnode_err;
-	}
-	/* maintain compatibility for 32/64 bit */
-	offsets[0] = (u32) of_read_ulong(addr, 2);
-	offsets[1] = (u32) size;
-
-	area = get_vm_area(offsets[1], VM_IOREMAP);
-	if (!area) {
-		rc = -ENOMEM;
-		goto dfps_mem_err;
-	}
-
-	virt_add = (unsigned long)area->addr;
-	rc = ioremap_page_range(virt_add, (virt_add + offsets[1]),
-			offsets[0], PAGE_KERNEL);
-	if (rc) {
-		rc = -ENOMEM;
-		goto ioremap_err;
-	}
-
-	pll_res->dfps = kzalloc(sizeof(struct dfps_info), GFP_KERNEL);
-	if (IS_ERR_OR_NULL(pll_res->dfps)) {
-		rc = PTR_ERR(pll_res->dfps);
-		pr_err("couldn't allocate dfps kernel memory\n");
-		goto addr_err;
-	}
-
-	/* memcopy complete dfps structure from kernel virtual memory */
-	memcpy_fromio(pll_res->dfps, area->addr, sizeof(struct dfps_info));
-
-addr_err:
-	if (virt_add)
-		unmap_kernel_range(virt_add, (unsigned long) size);
-ioremap_err:
-	if (area)
-		vfree(area->addr);
-dfps_mem_err:
-	/* free the dfps memory here */
-	memblock_free(offsets[0], offsets[1]);
-	mdss_pll_free_bootmem(offsets[0], offsets[1]);
-pnode_err:
-	if (pnode)
-		of_node_put(pnode);
-
-	dma_release_declared_memory(&pdev->dev);
-	return rc;
-}
-
-int mdss_pll_util_resource_parse(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int rc = 0;
-	struct dss_module_power *mp = &pll_res->mp;
-
-	rc = mdss_pll_util_parse_dt_supply(pdev, pll_res);
-	if (rc) {
-		pr_err("vreg parsing failed rc=%d\n", rc);
-		goto end;
-	}
-
-	rc = mdss_pll_util_parse_dt_clock(pdev, pll_res);
-	if (rc) {
-		pr_err("clock name parsing failed rc=%d\n", rc);
-		goto clk_err;
-	}
-
-	if (mdss_pll_util_parse_dt_dfps(pdev, pll_res))
-		pr_err("dfps not enabled!\n");
-
-	return rc;
-
-clk_err:
-	devm_kfree(&pdev->dev, mp->vreg_config);
-	mp->num_vreg = 0;
-end:
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
deleted file mode 100644
index 7b95b107..0000000
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ /dev/null
@@ -1,393 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include "mdss-pll.h"
-#include "mdss-dsi-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-hdmi-pll.h"
-
-int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
-{
-	int rc = 0;
-	int changed = 0;
-
-	if (!pll_res) {
-		pr_err("Invalid input parameters\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * Don't turn off resources during handoff or add more than
-	 * 1 refcount.
-	 */
-	if (pll_res->handoff_resources &&
-		(!enable || (enable & pll_res->resource_enable))) {
-		pr_debug("Do not turn on/off pll resources during handoff case\n");
-		return rc;
-	}
-
-	if (enable) {
-		if (pll_res->resource_ref_cnt == 0)
-			changed++;
-		pll_res->resource_ref_cnt++;
-	} else {
-		if (pll_res->resource_ref_cnt) {
-			pll_res->resource_ref_cnt--;
-			if (pll_res->resource_ref_cnt == 0)
-				changed++;
-		} else {
-			pr_err("PLL Resources already OFF\n");
-		}
-	}
-
-	if (changed) {
-		rc = mdss_pll_util_resource_enable(pll_res, enable);
-		if (rc)
-			pr_err("Resource update failed rc=%d\n", rc);
-		else
-			pll_res->resource_enable = enable;
-	}
-
-	return rc;
-}
-
-static int mdss_pll_resource_init(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int rc = 0;
-	struct dss_module_power *mp = &pll_res->mp;
-
-	rc = msm_dss_config_vreg(&pdev->dev,
-				mp->vreg_config, mp->num_vreg, 1);
-	if (rc) {
-		pr_err("Vreg config failed rc=%d\n", rc);
-		goto vreg_err;
-	}
-
-	rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, mp->num_clk);
-	if (rc) {
-		pr_err("Clock get failed rc=%d\n", rc);
-		goto clk_err;
-	}
-
-	return rc;
-
-clk_err:
-	msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
-vreg_err:
-	return rc;
-}
-
-static void mdss_pll_resource_deinit(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	struct dss_module_power *mp = &pll_res->mp;
-
-	msm_dss_put_clk(mp->clk_config, mp->num_clk);
-
-	msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
-}
-
-static void mdss_pll_resource_release(struct platform_device *pdev,
-					struct mdss_pll_resources *pll_res)
-{
-	struct dss_module_power *mp = &pll_res->mp;
-
-	mp->num_vreg = 0;
-	mp->num_clk = 0;
-}
-
-static int mdss_pll_resource_parse(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int rc = 0;
-	const char *compatible_stream;
-
-	rc = mdss_pll_util_resource_parse(pdev, pll_res);
-	if (rc) {
-		pr_err("Failed to parse the resources rc=%d\n", rc);
-		goto end;
-	}
-
-	compatible_stream = of_get_property(pdev->dev.of_node,
-				"compatible", NULL);
-	if (!compatible_stream) {
-		pr_err("Failed to parse the compatible stream\n");
-		goto err;
-	}
-
-	if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_10nm"))
-		pll_res->pll_interface_type = MDSS_DSI_PLL_10NM;
-	if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_10nm"))
-		pll_res->pll_interface_type = MDSS_DP_PLL_10NM;
-	else if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_7nm"))
-		pll_res->pll_interface_type = MDSS_DP_PLL_7NM;
-	else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_7nm"))
-		pll_res->pll_interface_type = MDSS_DSI_PLL_7NM;
-	else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_7nm_v2"))
-		pll_res->pll_interface_type = MDSS_DSI_PLL_7NM_V2;
-	else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_7nm_v4_1"))
-		pll_res->pll_interface_type = MDSS_DSI_PLL_7NM_V4_1;
-	else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_28lpm"))
-		pll_res->pll_interface_type = MDSS_DSI_PLL_28LPM;
-	else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_14nm"))
-		pll_res->pll_interface_type = MDSS_DSI_PLL_14NM;
-	else if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_14nm"))
-		pll_res->pll_interface_type = MDSS_DP_PLL_14NM;
-	else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_28lpm"))
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_28LPM;
-	else
-		goto err;
-
-	return rc;
-
-err:
-	mdss_pll_resource_release(pdev, pll_res);
-end:
-	return rc;
-}
-static int mdss_pll_clock_register(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res)
-{
-	int rc;
-
-	switch (pll_res->pll_interface_type) {
-	case MDSS_DSI_PLL_10NM:
-		rc = dsi_pll_clock_register_10nm(pdev, pll_res);
-		break;
-	case MDSS_DP_PLL_10NM:
-		rc = dp_pll_clock_register_10nm(pdev, pll_res);
-		break;
-	case MDSS_DSI_PLL_7NM:
-	case MDSS_DSI_PLL_7NM_V2:
-	case MDSS_DSI_PLL_7NM_V4_1:
-		rc = dsi_pll_clock_register_7nm(pdev, pll_res);
-		break;
-	case MDSS_DP_PLL_7NM:
-		rc = dp_pll_clock_register_7nm(pdev, pll_res);
-		break;
-	case MDSS_DSI_PLL_28LPM:
-		rc = dsi_pll_clock_register_28lpm(pdev, pll_res);
-		break;
-	case MDSS_DSI_PLL_14NM:
-		rc = dsi_pll_clock_register_14nm(pdev, pll_res);
-		break;
-	case MDSS_DP_PLL_14NM:
-		rc = dp_pll_clock_register_14nm(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_28LPM:
-		rc = hdmi_pll_clock_register_28lpm(pdev, pll_res);
-		break;
-	case MDSS_UNKNOWN_PLL:
-	default:
-		rc = -EINVAL;
-		break;
-	}
-
-	if (rc)
-		pr_err("Pll ndx=%d clock register failed rc=%d\n",
-				pll_res->index, rc);
-
-	return rc;
-}
-
-static inline int mdss_pll_get_ioresurces(struct platform_device *pdev,
-				void __iomem **regmap, char *resource_name)
-{
-	int rc = 0;
-	struct resource *rsc = platform_get_resource_byname(pdev,
-						IORESOURCE_MEM, resource_name);
-	if (rsc) {
-		*regmap = devm_ioremap(&pdev->dev,
-					rsc->start, resource_size(rsc));
-		if (!regmap)
-			return -ENOMEM;
-	}
-	return rc;
-}
-
-static int mdss_pll_probe(struct platform_device *pdev)
-{
-	int rc = 0;
-	const char *label;
-	struct mdss_pll_resources *pll_res;
-
-	if (!pdev->dev.of_node) {
-		pr_err("MDSS pll driver only supports device tree probe\n");
-		return -ENOTSUPP;
-	}
-
-	label = of_get_property(pdev->dev.of_node, "label", NULL);
-	if (!label)
-		pr_info("MDSS pll label not specified\n");
-	else
-		pr_info("MDSS pll label = %s\n", label);
-
-	pll_res = devm_kzalloc(&pdev->dev, sizeof(struct mdss_pll_resources),
-								GFP_KERNEL);
-	if (!pll_res)
-		return -ENOMEM;
-
-	platform_set_drvdata(pdev, pll_res);
-
-	rc = of_property_read_u32(pdev->dev.of_node, "cell-index",
-			&pll_res->index);
-	if (rc) {
-		pr_err("Unable to get the cell-index rc=%d\n", rc);
-		pll_res->index = 0;
-	}
-
-	pll_res->ssc_en = of_property_read_bool(pdev->dev.of_node,
-						"qcom,dsi-pll-ssc-en");
-
-	if (pll_res->ssc_en) {
-		pr_info("%s: label=%s PLL SSC enabled\n", __func__, label);
-
-		rc = of_property_read_u32(pdev->dev.of_node,
-			"qcom,ssc-frequency-hz", &pll_res->ssc_freq);
-
-		rc = of_property_read_u32(pdev->dev.of_node,
-			"qcom,ssc-ppm", &pll_res->ssc_ppm);
-
-		pll_res->ssc_center = false;
-
-		label = of_get_property(pdev->dev.of_node,
-			"qcom,dsi-pll-ssc-mode", NULL);
-
-		if (label && !strcmp(label, "center-spread"))
-			pll_res->ssc_center = true;
-	}
-
-
-	if (mdss_pll_get_ioresurces(pdev, &pll_res->pll_base, "pll_base")) {
-		pr_err("Unable to remap pll base resources\n");
-		return -ENOMEM;
-	}
-
-	pr_debug("%s: ndx=%d base=%p\n", __func__,
-			pll_res->index, pll_res->pll_base);
-
-	rc = mdss_pll_resource_parse(pdev, pll_res);
-	if (rc) {
-		pr_err("Pll resource parsing from dt failed rc=%d\n", rc);
-		return rc;
-	}
-
-	if (mdss_pll_get_ioresurces(pdev, &pll_res->phy_base, "phy_base")) {
-		pr_err("Unable to remap pll phy base resources\n");
-		return -ENOMEM;
-	}
-
-	if (mdss_pll_get_ioresurces(pdev, &pll_res->dyn_pll_base,
-							"dynamic_pll_base")) {
-		pr_err("Unable to remap dynamic pll base resources\n");
-		return -ENOMEM;
-	}
-
-	if (mdss_pll_get_ioresurces(pdev, &pll_res->ln_tx0_base,
-							"ln_tx0_base")) {
-		pr_err("Unable to remap Lane TX0 base resources\n");
-		return -ENOMEM;
-	}
-
-	if (mdss_pll_get_ioresurces(pdev, &pll_res->ln_tx1_base,
-							"ln_tx1_base")) {
-		pr_err("Unable to remap Lane TX1 base resources\n");
-		return -ENOMEM;
-	}
-
-	if (mdss_pll_get_ioresurces(pdev, &pll_res->gdsc_base, "gdsc_base")) {
-		pr_err("Unable to remap gdsc base resources\n");
-		return -ENOMEM;
-	}
-
-	rc = mdss_pll_resource_init(pdev, pll_res);
-	if (rc) {
-		pr_err("Pll ndx=%d resource init failed rc=%d\n",
-				pll_res->index, rc);
-		return rc;
-	}
-
-	rc = mdss_pll_clock_register(pdev, pll_res);
-	if (rc) {
-		pr_err("Pll ndx=%d clock register failed rc=%d\n",
-			pll_res->index, rc);
-		goto clock_register_error;
-	}
-
-	return rc;
-
-clock_register_error:
-	mdss_pll_resource_deinit(pdev, pll_res);
-	return rc;
-}
-
-static int mdss_pll_remove(struct platform_device *pdev)
-{
-	struct mdss_pll_resources *pll_res;
-
-	pll_res = platform_get_drvdata(pdev);
-	if (!pll_res) {
-		pr_err("Invalid PLL resource data\n");
-		return 0;
-	}
-
-	mdss_pll_resource_deinit(pdev, pll_res);
-	mdss_pll_resource_release(pdev, pll_res);
-	return 0;
-}
-
-static const struct of_device_id mdss_pll_dt_match[] = {
-	{.compatible = "qcom,mdss_dsi_pll_10nm"},
-	{.compatible = "qcom,mdss_dp_pll_10nm"},
-	{.compatible = "qcom,mdss_dsi_pll_7nm"},
-	{.compatible = "qcom,mdss_dsi_pll_7nm_v2"},
-	{.compatible = "qcom,mdss_dsi_pll_7nm_v4_1"},
-	{.compatible = "qcom,mdss_dp_pll_7nm"},
-	{.compatible = "qcom,mdss_dsi_pll_28lpm"},
-	{.compatible = "qcom,mdss_dsi_pll_14nm"},
-	{.compatible = "qcom,mdss_dp_pll_14nm"},
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, mdss_clock_dt_match);
-
-static struct platform_driver mdss_pll_driver = {
-	.probe = mdss_pll_probe,
-	.remove = mdss_pll_remove,
-	.driver = {
-		.name = "mdss_pll",
-		.of_match_table = mdss_pll_dt_match,
-	},
-};
-
-static int __init mdss_pll_driver_init(void)
-{
-	int rc;
-
-	rc = platform_driver_register(&mdss_pll_driver);
-	if (rc)
-		pr_err("mdss_register_pll_driver() failed!\n");
-
-	return rc;
-}
-fs_initcall(mdss_pll_driver_init);
-
-static void __exit mdss_pll_driver_deinit(void)
-{
-	platform_driver_unregister(&mdss_pll_driver);
-}
-module_exit(mdss_pll_driver_deinit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("mdss pll driver");
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
deleted file mode 100644
index 17edd24..0000000
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ /dev/null
@@ -1,244 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __MDSS_PLL_H
-#define __MDSS_PLL_H
-
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/regmap.h>
-#include "../clk-regmap.h"
-#include "../clk-regmap-divider.h"
-#include "../clk-regmap-mux.h"
-
-#if defined(CONFIG_DRM)
-#include <linux/sde_io_util.h>
-#else
-#include <linux/mdss_io_util.h>
-#endif
-
-#define MDSS_PLL_REG_W(base, offset, data)	\
-				writel_relaxed((data), (base) + (offset))
-#define MDSS_PLL_REG_R(base, offset)	readl_relaxed((base) + (offset))
-
-#define PLL_CALC_DATA(addr0, addr1, data0, data1)      \
-	(((data1) << 24) | ((((addr1) / 4) & 0xFF) << 16) | \
-	 ((data0) << 8) | (((addr0) / 4) & 0xFF))
-
-#define MDSS_DYN_PLL_REG_W(base, offset, addr0, addr1, data0, data1)   \
-		writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
-			(base) + (offset))
-
-enum {
-	MDSS_DSI_PLL_10NM,
-	MDSS_DP_PLL_10NM,
-	MDSS_DSI_PLL_7NM,
-	MDSS_DSI_PLL_7NM_V2,
-	MDSS_DSI_PLL_7NM_V4_1,
-	MDSS_DP_PLL_7NM,
-	MDSS_DSI_PLL_28LPM,
-	MDSS_DSI_PLL_14NM,
-	MDSS_DP_PLL_14NM,
-	MDSS_HDMI_PLL_28LPM,
-	MDSS_UNKNOWN_PLL,
-};
-
-enum {
-	MDSS_PLL_TARGET_8996,
-};
-
-#define DFPS_MAX_NUM_OF_FRAME_RATES 20
-
-struct dfps_panel_info {
-	uint32_t enabled;
-	uint32_t frame_rate_cnt;
-	uint32_t frame_rate[DFPS_MAX_NUM_OF_FRAME_RATES]; /* hz */
-};
-
-struct dfps_pll_codes {
-	uint32_t pll_codes_1;
-	uint32_t pll_codes_2;
-};
-
-struct dfps_codes_info {
-	uint32_t is_valid;
-	uint32_t frame_rate;	/* hz */
-	uint32_t clk_rate;	/* hz */
-	struct dfps_pll_codes pll_codes;
-};
-
-struct dfps_info {
-	struct dfps_panel_info panel_dfps;
-	struct dfps_codes_info codes_dfps[DFPS_MAX_NUM_OF_FRAME_RATES];
-	void *dfps_fb_base;
-};
-
-struct mdss_pll_resources {
-
-	/* Pll specific resources like GPIO, power supply, clocks, etc*/
-	struct dss_module_power mp;
-
-	/*
-	 * dsi/edp/hmdi plls' base register, phy, gdsc and dynamic refresh
-	 * register mapping
-	 */
-	void __iomem	*pll_base;
-	void __iomem	*phy_base;
-	void __iomem	*ln_tx0_base;
-	void __iomem	*ln_tx1_base;
-	void __iomem	*gdsc_base;
-	void __iomem	*dyn_pll_base;
-
-	bool	is_init_locked;
-	s64	vco_current_rate;
-	s64	vco_locking_rate;
-	s64	vco_ref_clk_rate;
-
-	/*
-	 * Certain pll's needs to update the same vco rate after resume in
-	 * suspend/resume scenario. Cached the vco rate for such plls.
-	 */
-	unsigned long	vco_cached_rate;
-	u32		cached_cfg0;
-	u32		cached_cfg1;
-	u32		cached_outdiv;
-
-	u32		cached_postdiv1;
-	u32		cached_postdiv3;
-	u32		cached_vreg_cfg;
-
-	/* dsi/edp/hmdi pll interface type */
-	u32		pll_interface_type;
-
-	/*
-	 * Target ID. Used in pll_register API for valid target check before
-	 * registering the PLL clocks.
-	 */
-	u32		target_id;
-
-	/* HW recommended delay during configuration of vco clock rate */
-	u32		vco_delay;
-
-	/* Ref-count of the PLL resources */
-	u32		resource_ref_cnt;
-
-	/*
-	 * Keep track to resource status to avoid updating same status for the
-	 * pll from different paths
-	 */
-	bool		resource_enable;
-
-	/*
-	 * Certain plls' do not allow vco rate update if it is on. Keep track of
-	 * status for them to turn on/off after set rate success.
-	 */
-	bool		pll_on;
-
-	/*
-	 * handoff_status is true of pll is already enabled by bootloader with
-	 * continuous splash enable case. Clock API will call the handoff API
-	 * to enable the status. It is disabled if continuous splash
-	 * feature is disabled.
-	 */
-	bool		handoff_resources;
-
-	/*
-	 * caching the pll trim codes in the case of dynamic refresh
-	 */
-	int		cache_pll_trim_codes[2];
-
-	/*
-	 * for maintaining the status of saving trim codes
-	 */
-	bool		reg_upd;
-
-	/*
-	 * Notifier callback for MDSS gdsc regulator events
-	 */
-	struct notifier_block gdsc_cb;
-
-	/*
-	 * Worker function to call PLL off event
-	 */
-	struct work_struct pll_off;
-
-	/*
-	 * PLL index if multiple index are available. Eg. in case of
-	 * DSI we have 2 plls.
-	 */
-	uint32_t index;
-
-	bool ssc_en;	/* share pll with master */
-	bool ssc_center;	/* default is down spread */
-	u32 ssc_freq;
-	u32 ssc_ppm;
-
-	struct mdss_pll_resources *slave;
-
-	/*
-	 * target pll revision information
-	 */
-	int		revision;
-
-	void *priv;
-
-	/*
-	 * dynamic refresh pll codes stored in this structure
-	 */
-	struct dfps_info *dfps;
-
-};
-
-struct mdss_pll_vco_calc {
-	s32 div_frac_start1;
-	s32 div_frac_start2;
-	s32 div_frac_start3;
-	s64 dec_start1;
-	s64 dec_start2;
-	s64 pll_plllock_cmp1;
-	s64 pll_plllock_cmp2;
-	s64 pll_plllock_cmp3;
-};
-
-static inline bool is_gdsc_disabled(struct mdss_pll_resources *pll_res)
-{
-	if (!pll_res->gdsc_base) {
-		WARN(1, "gdsc_base register is not defined\n");
-		return true;
-	}
-	return readl_relaxed(pll_res->gdsc_base) & BIT(31) ? false : true;
-}
-
-static inline int mdss_pll_div_prepare(struct clk_hw *hw)
-{
-	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
-	/* Restore the divider's value */
-	return hw->init->ops->set_rate(hw, clk_hw_get_rate(hw),
-				clk_hw_get_rate(parent_hw));
-}
-
-static inline int mdss_set_mux_sel(void *context, unsigned int reg,
-					unsigned int val)
-{
-	return 0;
-}
-
-static inline int mdss_get_mux_sel(void *context, unsigned int reg,
-					unsigned int *val)
-{
-	*val = 0;
-	return 0;
-}
-
-int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable);
-int mdss_pll_util_resource_enable(struct mdss_pll_resources *pll_res,
-								bool enable);
-int mdss_pll_util_resource_parse(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-struct dss_vreg *mdss_pll_get_mp_by_reg_name(struct mdss_pll_resources *pll_res
-		, char *name);
-#endif
diff --git a/drivers/clk/qcom/mdss/mdss_pll_trace.h b/drivers/clk/qcom/mdss/mdss_pll_trace.h
deleted file mode 100644
index cf46c7f..0000000
--- a/drivers/clk/qcom/mdss/mdss_pll_trace.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- */
-
-#if !defined(_MDSS_PLL_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
-#define _MDSS_PLL_TRACE_H_
-
-#include <linux/stringify.h>
-#include <linux/types.h>
-#include <linux/tracepoint.h>
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM mdss_pll
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE ../../drivers/clk/qcom/mdss/mdss_pll_trace
-
-
-TRACE_EVENT(mdss_pll_lock_start,
-	TP_PROTO(
-			u64 vco_cached_rate,
-			s64 vco_current_rate,
-			u32 cached_cfg0,
-			u32 cached_cfg1,
-			u32 cached_outdiv,
-			u32 resource_ref_cnt),
-	TP_ARGS(
-			vco_cached_rate,
-			vco_current_rate,
-			cached_cfg0,
-			cached_cfg1,
-			cached_outdiv,
-			resource_ref_cnt),
-	TP_STRUCT__entry(
-			__field(u64, vco_cached_rate)
-			__field(s64, vco_current_rate)
-			__field(u32, cached_cfg0)
-			__field(u32, cached_cfg1)
-			__field(u32, cached_outdiv)
-			__field(u32, resource_ref_cnt)
-
-	),
-	TP_fast_assign(
-			__entry->vco_cached_rate = vco_cached_rate;
-			__entry->vco_current_rate = vco_current_rate;
-			__entry->cached_cfg0 = cached_cfg0;
-			__entry->cached_cfg1 = cached_cfg1;
-			__entry->cached_outdiv = cached_outdiv;
-			__entry->resource_ref_cnt = resource_ref_cnt;
-	),
-	 TP_printk(
-		"vco_cached_rate=%llu vco_current_rate=%lld cached_cfg0=%d cached_cfg1=%d cached_outdiv=%d resource_ref_cnt=%d",
-			__entry->vco_cached_rate,
-			__entry->vco_current_rate,
-			__entry->cached_cfg0,
-			__entry->cached_cfg1,
-			__entry->cached_outdiv,
-			__entry->resource_ref_cnt)
-);
-
-TRACE_EVENT(pll_tracing_mark_write,
-	TP_PROTO(int pid, const char *name, bool trace_begin),
-	TP_ARGS(pid, name, trace_begin),
-	TP_STRUCT__entry(
-			__field(int, pid)
-			__string(trace_name, name)
-			__field(bool, trace_begin)
-	),
-	TP_fast_assign(
-			__entry->pid = pid;
-			__assign_str(trace_name, name);
-			__entry->trace_begin = trace_begin;
-	),
-	TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
-		__entry->pid, __get_str(trace_name))
-)
-
-TRACE_EVENT(mdss_pll_trace_counter,
-	TP_PROTO(int pid, char *name, int value),
-	TP_ARGS(pid, name, value),
-	TP_STRUCT__entry(
-			__field(int, pid)
-			__string(counter_name, name)
-			__field(int, value)
-	),
-	TP_fast_assign(
-			__entry->pid = current->tgid;
-			__assign_str(counter_name, name);
-			__entry->value = value;
-	),
-	TP_printk("%d|%s|%d", __entry->pid,
-			__get_str(counter_name), __entry->value)
-)
-
-#define MDSS_PLL_ATRACE_END(name) trace_pll_tracing_mark_write(current->tgid,\
-		name, 0)
-#define MDSS_PLL_ATRACE_BEGIN(name) trace_pll_tracing_mark_write(current->tgid,\
-		name, 1)
-#define MDSS_PLL_ATRACE_FUNC() MDSS_PLL_ATRACE_BEGIN(__func__)
-#define MDSS_PLL_ATRACE_INT(name, value) \
-	trace_mdss_pll_trace_counter(current->tgid, name, value)
-
-
-#endif /* _MDSS_PLL_TRACE_H_ */
-
-/* This part must be outside protection */
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#include <trace/define_trace.h>
diff --git a/drivers/clk/qcom/npucc-lito.c b/drivers/clk/qcom/npucc-lito.c
index 330266d..e2604f5 100644
--- a/drivers/clk/qcom/npucc-lito.c
+++ b/drivers/clk/qcom/npucc-lito.c
@@ -118,14 +118,15 @@ static const u32 crc_reg_val[] = {
 
 static struct alpha_pll_config npu_cc_pll0_config = {
 	.l = 0x14,
-	.cal_l = 0x44,
+	.cal_l = 0x49,
 	.alpha = 0xD555,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
-	.config_ctl_hi1_val = 0x029A699C,
+	.config_ctl_hi1_val = 0x2A9A699C,
 	.user_ctl_val = 0x00000000,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
+	.test_ctl_hi1_val = 0x01800000,
 	.custom_reg_offset = crc_reg_offset,
 	.custom_reg_val = crc_reg_val,
 	.num_custom_reg = ARRAY_SIZE(crc_reg_offset),
@@ -181,10 +182,11 @@ static struct alpha_pll_config npu_cc_pll1_config = {
 	.alpha = 0xA000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
-	.config_ctl_hi1_val = 0x029A699C,
+	.config_ctl_hi1_val = 0x329A699C,
 	.user_ctl_val = 0x00000000,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
+	.test_ctl_hi1_val = 0x01800000,
 };
 
 static struct clk_alpha_pll npu_cc_pll1 = {
@@ -231,16 +233,17 @@ static struct clk_alpha_pll_postdiv npu_cc_pll1_out_even = {
 	},
 };
 
-static const struct alpha_pll_config npu_q6ss_pll_config = {
+static struct alpha_pll_config npu_q6ss_pll_config = {
 	.l = 0xD,
 	.cal_l = 0x44,
 	.alpha = 0x555,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
-	.config_ctl_hi1_val = 0x029A699C,
+	.config_ctl_hi1_val = 0x329A699C,
 	.user_ctl_val = 0x00000000,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
+	.test_ctl_hi1_val = 0x01800000,
 };
 
 static struct clk_alpha_pll npu_q6ss_pll = {
@@ -248,6 +251,7 @@ static struct clk_alpha_pll npu_q6ss_pll = {
 	.vco_table = lucid_vco,
 	.num_vco = ARRAY_SIZE(lucid_vco),
 	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.config = &npu_q6ss_pll_config,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "npu_q6ss_pll",
@@ -362,6 +366,7 @@ static struct clk_rcg2 npu_cc_lmh_clk_src = {
 	.hid_width = 5,
 	.parent_map = npu_cc_parent_map_0,
 	.freq_tbl = ftbl_npu_cc_lmh_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "npu_cc_lmh_clk_src",
 		.parent_names = npu_cc_parent_names_0,
@@ -414,6 +419,7 @@ static struct clk_rcg2 npu_dsp_core_clk_src = {
 	.hid_width = 5,
 	.parent_map = npu_cc_parent_map_2,
 	.freq_tbl = ftbl_npu_dsp_core_clk_src,
+	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "npu_dsp_core_clk_src",
 		.parent_names = npu_cc_parent_names_2,
diff --git a/drivers/clk/qcom/videocc-kona.c b/drivers/clk/qcom/videocc-kona.c
index dfb2e3b..abd28e7 100644
--- a/drivers/clk/qcom/videocc-kona.c
+++ b/drivers/clk/qcom/videocc-kona.c
@@ -166,6 +166,18 @@ static const struct alpha_pll_config video_pll1_config = {
 	.user_ctl_hi1_val = 0x00000000,
 };
 
+static const struct alpha_pll_config video_pll1_config_sm8250_v2 = {
+	.l = 0x2B,
+	.cal_l = 0x44,
+	.alpha = 0xC000,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x329A699C,
+	.user_ctl_val = 0x00000000,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
 static struct clk_alpha_pll video_pll1 = {
 	.offset = 0x7d0,
 	.vco_table = lucid_vco,
@@ -299,6 +311,13 @@ static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = {
 	{ }
 };
 
+static const struct freq_tbl ftbl_video_cc_mvs1_clk_src_kona_v2[] = {
+	F(840000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+	F(1098000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+	F(1332000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 video_cc_mvs1_clk_src = {
 	.cmd_rcgr = 0xbb4,
 	.mnd_width = 0,
@@ -557,10 +576,20 @@ static const struct qcom_cc_desc video_cc_kona_desc = {
 
 static const struct of_device_id video_cc_kona_match_table[] = {
 	{ .compatible = "qcom,videocc-kona" },
+	{ .compatible = "qcom,videocc-kona-v2" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, video_cc_kona_match_table);
 
+static void video_cc_kona_fixup_konav2(struct regmap *regmap)
+{
+	clk_lucid_pll_configure(&video_pll1, regmap,
+		&video_pll1_config_sm8250_v2);
+	video_cc_mvs1_clk_src.freq_tbl = ftbl_video_cc_mvs1_clk_src_kona_v2;
+	video_cc_mvs1_clk_src.clkr.hw.init->rate_max[VDD_LOWER] = 840000000;
+	video_cc_mvs1_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 1098000000;
+}
+
 static int video_cc_kona_probe(struct platform_device *pdev)
 {
 	unsigned int videocc_bus_id;
@@ -612,6 +641,10 @@ static int video_cc_kona_probe(struct platform_device *pdev)
 	clk_lucid_pll_configure(&video_pll0, regmap, &video_pll0_config);
 	clk_lucid_pll_configure(&video_pll1, regmap, &video_pll1_config);
 
+	if (of_device_is_compatible(pdev->dev.of_node,
+				"qcom,videocc-kona-v2"))
+		video_cc_kona_fixup_konav2(regmap);
+
 	ret = qcom_cc_really_probe(pdev, &video_cc_kona_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register Video CC clocks\n");
diff --git a/drivers/clk/qcom/videocc-lito.c b/drivers/clk/qcom/videocc-lito.c
index 3fa5e8f7..6619bb4 100644
--- a/drivers/clk/qcom/videocc-lito.c
+++ b/drivers/clk/qcom/videocc-lito.c
@@ -11,6 +11,7 @@
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/nvmem-consumer.h>
 #include <linux/of_device.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
@@ -89,10 +90,11 @@ static struct alpha_pll_config video_pll0_config = {
 	.alpha = 0x0,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
-	.config_ctl_hi1_val = 0x029A699C,
+	.config_ctl_hi1_val = 0x329A699C,
 	.user_ctl_val = 0x00000001,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
+	.test_ctl_hi1_val = 0x01800000,
 };
 
 static struct clk_alpha_pll video_pll0 = {
@@ -406,17 +408,13 @@ MODULE_DEVICE_TABLE(of, video_cc_lito_match_table);
 static int video_multipipe_fixup(struct platform_device *pdev,
 				struct regmap *regmap)
 {
-	void __iomem *base;
-	struct resource *res;
-	struct device *dev = &pdev->dev;
 	u32 val, val_fmax;
+	int ret;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(base))
-		return PTR_ERR(base);
+	ret = nvmem_cell_read_u32(&pdev->dev, "iris-bin", &val);
+	if (ret)
+		return ret;
 
-	val = readl_relaxed(base);
 	val_fmax = (val >> IRIS_DISABLE_VP_FMAX) & 0x1;
 	val = (val >> IRIS_DISABLE_MULTIPIPE) & 0x1;
 
@@ -434,7 +432,7 @@ static int video_multipipe_fixup(struct platform_device *pdev,
 				200000000;
 		video_cc_iris_clk_src.clkr.hw.init->rate_max[VDD_HIGH] =
 				200000000;
-		goto done;
+		return 0;
 	}
 
 	if (val_fmax) {
@@ -446,8 +444,7 @@ static int video_multipipe_fixup(struct platform_device *pdev,
 		video_cc_iris_clk_src.clkr.hw.init->rate_max[VDD_HIGH] =
 				365000000;
 	}
-done:
-	devm_iounmap(dev, base);
+
 	return 0;
 }
 
@@ -482,7 +479,7 @@ static int video_cc_lito_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
-	clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
+	clk_lucid_pll_configure(&video_pll0, regmap, &video_pll0_config);
 
 	ret = qcom_cc_really_probe(pdev, &video_cc_lito_desc, regmap);
 	if (ret) {
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 450de24..6419169 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -198,7 +198,7 @@ PNAME(mux_hsadcout_p)	= { "hsadc_src", "ext_hsadc" };
 PNAME(mux_edp_24m_p)	= { "ext_edp_24m", "xin24m" };
 PNAME(mux_tspout_p)	= { "cpll", "gpll", "npll", "xin27m" };
 
-PNAME(mux_aclk_vcodec_pre_p)	= { "aclk_vepu", "aclk_vdpu" };
+PNAME(mux_aclk_vcodec_pre_p)	= { "aclk_vdpu", "aclk_vepu" };
 PNAME(mux_usbphy480m_p)		= { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
 				    "sclk_otgphy0_480m" };
 PNAME(mux_hsicphy480m_p)	= { "cpll", "gpll", "usbphy480m_src" };
@@ -292,13 +292,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
 	COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
 			RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
 			RK3288_CLKGATE_CON(12), 6, GFLAGS),
-	COMPOSITE_NOMUX(0, "atclk", "armclk", CLK_IGNORE_UNUSED,
+	COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
 			RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
 			RK3288_CLKGATE_CON(12), 7, GFLAGS),
 	COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
 			RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
 			RK3288_CLKGATE_CON(12), 8, GFLAGS),
-	GATE(0, "pclk_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
+	GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
 			RK3288_CLKGATE_CON(12), 9, GFLAGS),
 	GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
 			RK3288_CLKGATE_CON(12), 10, GFLAGS),
@@ -399,7 +399,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
 	COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
 			RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
 			RK3288_CLKGATE_CON(3), 11, GFLAGS),
-	MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, 0,
+	MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
 			RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
 	GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
 		RK3288_CLKGATE_CON(9), 0, GFLAGS),
@@ -626,7 +626,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
 	INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
 			RK3288_CLKSEL_CON(22), 7, IFLAGS),
 
-	GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED,
+	GATE(0, "jtag", "ext_jtag", 0,
 			RK3288_CLKGATE_CON(4), 14, GFLAGS),
 
 	COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
@@ -635,7 +635,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
 	COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
 			RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
 			RK3288_CLKGATE_CON(3), 6, GFLAGS),
-	GATE(0, "hsicphy12m_xin12m", "xin12m", CLK_IGNORE_UNUSED,
+	GATE(0, "hsicphy12m_xin12m", "xin12m", 0,
 			RK3288_CLKGATE_CON(13), 9, GFLAGS),
 	DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0,
 			RK3288_CLKSEL_CON(11), 8, 6, DFLAGS),
@@ -676,7 +676,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
 	GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
 	GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
 	GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
-	GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS),
+	GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS),
 
 	/* ddrctrl [DDR Controller PHY clock] gates */
 	GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS),
@@ -816,12 +816,9 @@ static const char *const rk3288_critical_clocks[] __initconst = {
 	"pclk_alive_niu",
 	"pclk_pd_pmu",
 	"pclk_pmu_niu",
-	"pclk_core_niu",
-	"pclk_ddrupctl0",
-	"pclk_publ0",
-	"pclk_ddrupctl1",
-	"pclk_publ1",
 	"pmu_hclk_otg0",
+	/* pwm-regulators on some boards, so handoff-critical later */
+	"pclk_rkpwm",
 };
 
 static void __iomem *rk3288_cru_base;
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index e431661..ecbae8a 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -458,7 +458,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
 			RK3328_CLKSEL_CON(35), 15, 1, MFLAGS, 8, 7, DFLAGS,
 			RK3328_CLKGATE_CON(2), 12, GFLAGS),
 	COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_2plls_p, 0,
-			RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 7, DFLAGS,
+			RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 5, DFLAGS,
 			RK3328_CLKGATE_CON(2), 4, GFLAGS),
 	COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "clk_24m", 0,
 			RK3328_CLKSEL_CON(22), 0, 10, DFLAGS,
@@ -550,15 +550,15 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
 	GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", 0,
 			RK3328_CLKGATE_CON(25), 1, GFLAGS),
 	GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 0,
-			RK3328_CLKGATE_CON(25), 0, GFLAGS),
+			RK3328_CLKGATE_CON(25), 2, GFLAGS),
 	GATE(PCLK_H265, "pclk_h265", "hclk_rkvenc", 0,
-			RK3328_CLKGATE_CON(25), 1, GFLAGS),
+			RK3328_CLKGATE_CON(25), 3, GFLAGS),
 	GATE(ACLK_H264, "aclk_h264", "aclk_rkvenc", 0,
-			RK3328_CLKGATE_CON(25), 0, GFLAGS),
+			RK3328_CLKGATE_CON(25), 4, GFLAGS),
 	GATE(HCLK_H264, "hclk_h264", "hclk_rkvenc", 0,
-			RK3328_CLKGATE_CON(25), 1, GFLAGS),
+			RK3328_CLKGATE_CON(25), 5, GFLAGS),
 	GATE(ACLK_AXISRAM, "aclk_axisram", "aclk_rkvenc", CLK_IGNORE_UNUSED,
-			RK3328_CLKGATE_CON(25), 0, GFLAGS),
+			RK3328_CLKGATE_CON(25), 6, GFLAGS),
 
 	COMPOSITE(SCLK_VENC_CORE, "sclk_venc_core", mux_4plls_p, 0,
 			RK3328_CLKSEL_CON(51), 14, 2, MFLAGS, 8, 5, DFLAGS,
@@ -663,7 +663,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
 
 	/* PD_GMAC */
 	COMPOSITE(ACLK_GMAC, "aclk_gmac", mux_2plls_hdmiphy_p, 0,
-			RK3328_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3328_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 5, DFLAGS,
 			RK3328_CLKGATE_CON(3), 2, GFLAGS),
 	COMPOSITE_NOMUX(PCLK_GMAC, "pclk_gmac", "aclk_gmac", 0,
 			RK3328_CLKSEL_CON(25), 8, 3, DFLAGS,
@@ -733,7 +733,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
 
 	/* PD_PERI */
 	GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 11, GFLAGS),
-	GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 4, GFLAGS),
+	GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 14, GFLAGS),
 
 	GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 0, GFLAGS),
 	GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 1, GFLAGS),
@@ -913,7 +913,7 @@ static void __init rk3328_clk_init(struct device_node *np)
 				     &rk3328_cpuclk_data, rk3328_cpuclk_rates,
 				     ARRAY_SIZE(rk3328_cpuclk_rates));
 
-	rockchip_register_softrst(np, 11, reg_base + RK3328_SOFTRST_CON(0),
+	rockchip_register_softrst(np, 12, reg_base + RK3328_SOFTRST_CON(0),
 				  ROCKCHIP_SOFTRST_HIWORD_MASK);
 
 	rockchip_register_restart_notifier(ctx, RK3328_GLB_SRST_FST, NULL);
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index ebd9436..1ad53d10 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -160,7 +160,7 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
 			   unsigned long parent_rate)
 {
 	struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
-	u32 n_mask, k_mask, m_mask, p_mask;
+	u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
 	struct _ccu_nkmp _nkmp;
 	unsigned long flags;
 	u32 reg;
@@ -179,10 +179,18 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
 
 	ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
 
-	n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift);
-	k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift);
-	m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift);
-	p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift);
+	if (nkmp->n.width)
+		n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
+				 nkmp->n.shift);
+	if (nkmp->k.width)
+		k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
+				 nkmp->k.shift);
+	if (nkmp->m.width)
+		m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
+				 nkmp->m.shift);
+	if (nkmp->p.width)
+		p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
+				 nkmp->p.shift);
 
 	spin_lock_irqsave(nkmp->common.lock, flags);
 
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 830d1c8..dc87866 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -662,8 +662,8 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
 		pll_override_writel(val, params->pmc_divp_reg, pll);
 
 		val = pll_override_readl(params->pmc_divnm_reg, pll);
-		val &= ~(divm_mask(pll) << div_nmp->override_divm_shift) |
-			~(divn_mask(pll) << div_nmp->override_divn_shift);
+		val &= ~((divm_mask(pll) << div_nmp->override_divm_shift) |
+			(divn_mask(pll) << div_nmp->override_divn_shift));
 		val |= (cfg->m << div_nmp->override_divm_shift) |
 			(cfg->n << div_nmp->override_divn_shift);
 		pll_override_writel(val, params->pmc_divnm_reg, pll);
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index d977193..1917483 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = {
 };
 
 static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
-					void __iomem *base,
+					const struct pmc_clk_data *pmc_data,
 					const char **parent_names,
 					int num_parents)
 {
@@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
 	init.num_parents = num_parents;
 
 	pclk->hw.init = &init;
-	pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
+	pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
 	spin_lock_init(&pclk->lock);
 
+	/*
+	 * On some systems, the pmc_plt_clocks already enabled by the
+	 * firmware are being marked as critical to avoid them being
+	 * gated by the clock framework.
+	 */
+	if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw))
+		init.flags |= CLK_IS_CRITICAL;
+
 	ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
 	if (ret) {
 		pclk = ERR_PTR(ret);
@@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev)
 		return PTR_ERR(parent_names);
 
 	for (i = 0; i < PMC_CLK_NUM; i++) {
-		data->clks[i] = plt_clk_register(pdev, i, pmc_data->base,
+		data->clks[i] = plt_clk_register(pdev, i, pmc_data,
 						 parent_names, data->nparents);
 		if (IS_ERR(data->clks[i])) {
 			err = PTR_ERR(data->clks[i]);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c1ddafa4..4d37f01 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -136,6 +136,7 @@
 config NPCM7XX_TIMER
 	bool "NPCM7xx timer driver" if COMPILE_TEST
 	depends on HAS_IOMEM
+	select TIMER_OF
 	select CLKSRC_MMIO
 	help
 	  Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
index eed6fef..30c6f4c 100644
--- a/drivers/clocksource/timer-oxnas-rps.c
+++ b/drivers/clocksource/timer-oxnas-rps.c
@@ -296,4 +296,4 @@ static int __init oxnas_rps_timer_init(struct device_node *np)
 TIMER_OF_DECLARE(ox810se_rps,
 		       "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
 TIMER_OF_DECLARE(ox820_rps,
-		       "oxsemi,ox820se-rps-timer", oxnas_rps_timer_init);
+		       "oxsemi,ox820-rps-timer", oxnas_rps_timer_init);
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 75491fc..0df16eb 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -359,11 +359,11 @@ static int __init armada37xx_cpufreq_driver_init(void)
 	struct armada_37xx_dvfs *dvfs;
 	struct platform_device *pdev;
 	unsigned long freq;
-	unsigned int cur_frequency;
+	unsigned int cur_frequency, base_frequency;
 	struct regmap *nb_pm_base, *avs_base;
 	struct device *cpu_dev;
 	int load_lvl, ret;
-	struct clk *clk;
+	struct clk *clk, *parent;
 
 	nb_pm_base =
 		syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
@@ -399,6 +399,22 @@ static int __init armada37xx_cpufreq_driver_init(void)
 		return PTR_ERR(clk);
 	}
 
+	parent = clk_get_parent(clk);
+	if (IS_ERR(parent)) {
+		dev_err(cpu_dev, "Cannot get parent clock for CPU0\n");
+		clk_put(clk);
+		return PTR_ERR(parent);
+	}
+
+	/* Get parent CPU frequency */
+	base_frequency =  clk_get_rate(parent);
+
+	if (!base_frequency) {
+		dev_err(cpu_dev, "Failed to get parent clock rate for CPU\n");
+		clk_put(clk);
+		return -EINVAL;
+	}
+
 	/* Get nominal (current) CPU frequency */
 	cur_frequency = clk_get_rate(clk);
 	if (!cur_frequency) {
@@ -431,7 +447,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
 	for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
 	     load_lvl++) {
 		unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
-		freq = cur_frequency / dvfs->divider[load_lvl];
+		freq = base_frequency / dvfs->divider[load_lvl];
 		ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
 		if (ret)
 			goto remove_opp;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 4e13361..995685f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1139,6 +1139,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
 				   cpufreq_global_kobject, "policy%u", cpu);
 	if (ret) {
 		pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+		kobject_put(&policy->kobj);
 		goto err_free_real_cpus;
 	}
 
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 6d53f7d..69fc5cf 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -459,6 +459,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
 	/* Failure, so roll back. */
 	pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
 
+	kobject_put(&dbs_data->attr_set.kobj);
+
 	policy->governor_data = NULL;
 
 	if (!have_governor_per_policy())
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index a005711..29f25d5 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1779,7 +1779,7 @@ static const struct pstate_funcs knl_funcs = {
 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
 	ICPU(INTEL_FAM6_SANDYBRIDGE, 		core_funcs),
 	ICPU(INTEL_FAM6_SANDYBRIDGE_X,		core_funcs),
-	ICPU(INTEL_FAM6_ATOM_SILVERMONT1,	silvermont_funcs),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT,	silvermont_funcs),
 	ICPU(INTEL_FAM6_IVYBRIDGE,		core_funcs),
 	ICPU(INTEL_FAM6_HASWELL_CORE,		core_funcs),
 	ICPU(INTEL_FAM6_BROADWELL_CORE,		core_funcs),
@@ -1796,7 +1796,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
 	ICPU(INTEL_FAM6_XEON_PHI_KNL,		knl_funcs),
 	ICPU(INTEL_FAM6_XEON_PHI_KNM,		knl_funcs),
 	ICPU(INTEL_FAM6_ATOM_GOLDMONT,		core_funcs),
-	ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE,       core_funcs),
+	ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS,     core_funcs),
 	ICPU(INTEL_FAM6_SKYLAKE_X,		core_funcs),
 	{}
 };
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index c2dd43f..8d63a6dc 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
 	priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
 	if (IS_ERR(priv.cpu_clk)) {
 		dev_err(priv.dev, "Unable to get cpuclk\n");
-		return PTR_ERR(priv.cpu_clk);
+		err = PTR_ERR(priv.cpu_clk);
+		goto out_node;
 	}
 
 	err = clk_prepare_enable(priv.cpu_clk);
 	if (err) {
 		dev_err(priv.dev, "Unable to prepare cpuclk\n");
-		return err;
+		goto out_node;
 	}
 
 	kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
@@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
 		goto out_ddr;
 	}
 
-	of_node_put(np);
-	np = NULL;
-
 	err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
-	if (!err)
-		return 0;
+	if (err) {
+		dev_err(priv.dev, "Failed to register cpufreq driver\n");
+		goto out_powersave;
+	}
 
-	dev_err(priv.dev, "Failed to register cpufreq driver\n");
+	of_node_put(np);
+	return 0;
 
+out_powersave:
 	clk_disable_unprepare(priv.powersave_clk);
 out_ddr:
 	clk_disable_unprepare(priv.ddr_clk);
 out_cpu:
 	clk_disable_unprepare(priv.cpu_clk);
+out_node:
 	of_node_put(np);
 
 	return err;
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 75dfbd2..c7710c1 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
 	cpu = of_get_cpu_node(policy->cpu, NULL);
 
+	of_node_put(cpu);
 	if (!cpu)
 		goto out;
 
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 61ae06c..e225edb 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
 	volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
 	if (volt_gpio_np)
 		voltage_gpio = read_gpio(volt_gpio_np);
+	of_node_put(volt_gpio_np);
 	if (!voltage_gpio){
 		pr_err("missing cpu-vcore-select gpio\n");
 		return 1;
@@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
 	if (volt_gpio_np)
 		voltage_gpio = read_gpio(volt_gpio_np);
 
+	of_node_put(volt_gpio_np);
 	pvr = mfspr(SPRN_PVR);
 	has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
 
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 41a0f0b..8414c3a 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
 	if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
 	    !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
 		pr_info("invalid CBE regs pointers for cpufreq\n");
+		of_node_put(cpu);
 		return -EINVAL;
 	}
 
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 3216e86..032efe8 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -18,8 +18,9 @@
 #define CORE_COUNT_VAL(val)		(((val) & (GENMASK(18, 16))) >> 16)
 #define LUT_ROW_SIZE			32
 #define CLK_HW_DIV			2
-#define CYCLE_CNTR_OFFSET(c, m)		((c - cpumask_first(m) + 1) * 4)
 
+#define CYCLE_CNTR_OFFSET(c, m, acc_count)				\
+			(acc_count ? ((c - cpumask_first(m) + 1) * 4) : 0)
 enum {
 	REG_ENABLE,
 	REG_FREQ_LUT_TABLE,
@@ -31,6 +32,7 @@ enum {
 };
 
 static unsigned int lut_row_size = LUT_ROW_SIZE;
+static bool accumulative_counter;
 
 struct cpufreq_qcom {
 	struct cpufreq_frequency_table *table;
@@ -79,7 +81,8 @@ static u64 qcom_cpufreq_get_cpu_cycle_counter(int cpu)
 	cpu_counter = &qcom_cpufreq_counter[cpu];
 	spin_lock_irqsave(&cpu_counter->lock, flags);
 
-	offset = CYCLE_CNTR_OFFSET(cpu, &cpu_domain->related_cpus);
+	offset = CYCLE_CNTR_OFFSET(cpu, &cpu_domain->related_cpus,
+					accumulative_counter);
 	val = readl_relaxed_no_log(cpu_domain->reg_bases[REG_CYCLE_CNTR] +
 				   offset);
 
@@ -360,6 +363,9 @@ static int qcom_cpu_resources_init(struct platform_device *pdev,
 		}
 	}
 
+	accumulative_counter = !of_property_read_bool(dev->of_node,
+					"qcom,no-accumulative-counter");
+
 	ret = qcom_get_related_cpus(index, &c->related_cpus);
 	if (ret) {
 		dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 6826912..1a576a5 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -660,13 +660,13 @@ static void wake_up_idle_cpus(void *v)
 	int cpu;
 	struct cpumask cpus;
 
+	preempt_disable();
 	if (v) {
 		cpumask_andnot(&cpus, v, cpu_isolated_mask);
 		cpumask_and(&cpus, &cpus, cpu_online_mask);
 	} else
 		cpumask_andnot(&cpus, cpu_online_mask, cpu_isolated_mask);
 
-	preempt_disable();
 	for_each_cpu(cpu, &cpus) {
 		if (cpu == smp_processor_id())
 			continue;
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 7bc3f18..3a01799 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -102,6 +102,7 @@ static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
 static bool suspend_in_progress;
 static struct hrtimer lpm_hrtimer;
 static DEFINE_PER_CPU(struct hrtimer, histtimer);
+static DEFINE_PER_CPU(struct hrtimer, biastimer);
 static struct lpm_debug *lpm_debug;
 static phys_addr_t lpm_debug_phys;
 static const int num_dbg_elements = 0x100;
@@ -333,6 +334,11 @@ static void histtimer_cancel(void)
 {
 	unsigned int cpu = raw_smp_processor_id();
 	struct hrtimer *cpu_histtimer = &per_cpu(histtimer, cpu);
+	ktime_t time_rem;
+
+	time_rem = hrtimer_get_remaining(cpu_histtimer);
+	if (ktime_to_us(time_rem) <= 0)
+		return;
 
 	hrtimer_try_to_cancel(cpu_histtimer);
 }
@@ -378,11 +384,21 @@ static void clusttimer_cancel(void)
 {
 	int cpu = raw_smp_processor_id();
 	struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
+	ktime_t time_rem;
 
-	hrtimer_try_to_cancel(&cluster->histtimer);
+	time_rem = hrtimer_get_remaining(&cluster->histtimer);
+	if (ktime_to_us(time_rem) > 0)
+		hrtimer_try_to_cancel(&cluster->histtimer);
 
-	if (cluster->parent)
+	if (cluster->parent) {
+		time_rem = hrtimer_get_remaining(
+			&cluster->parent->histtimer);
+
+		if (ktime_to_us(time_rem) <= 0)
+			return;
+
 		hrtimer_try_to_cancel(&cluster->parent->histtimer);
+	}
 }
 
 static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
@@ -413,6 +429,34 @@ static void msm_pm_set_timer(uint32_t modified_time_us)
 	hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
 }
 
+static void biastimer_cancel(void)
+{
+	unsigned int cpu = raw_smp_processor_id();
+	struct hrtimer *cpu_biastimer = &per_cpu(biastimer, cpu);
+	ktime_t time_rem;
+
+	time_rem = hrtimer_get_remaining(cpu_biastimer);
+	if (ktime_to_us(time_rem) <= 0)
+		return;
+
+	hrtimer_try_to_cancel(cpu_biastimer);
+}
+
+static enum hrtimer_restart biastimer_fn(struct hrtimer *h)
+{
+	return HRTIMER_NORESTART;
+}
+
+static void biastimer_start(uint32_t time_ns)
+{
+	ktime_t bias_ktime = ns_to_ktime(time_ns);
+	unsigned int cpu = raw_smp_processor_id();
+	struct hrtimer *cpu_biastimer = &per_cpu(biastimer, cpu);
+
+	cpu_biastimer->function = biastimer_fn;
+	hrtimer_start(cpu_biastimer, bias_ktime, HRTIMER_MODE_REL_PINNED);
+}
+
 static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
 		struct lpm_cpu *cpu, int *idx_restrict,
 		uint32_t *idx_restrict_time)
@@ -573,22 +617,36 @@ static void clear_predict_history(void)
 
 static void update_history(struct cpuidle_device *dev, int idx);
 
-static inline bool is_cpu_biased(int cpu)
+static inline bool is_cpu_biased(int cpu, uint64_t *bias_time)
 {
 	u64 now = sched_clock();
 	u64 last = sched_get_cpu_last_busy_time(cpu);
+	u64 diff = 0;
 
 	if (!last)
 		return false;
 
-	return (now - last) < BIAS_HYST;
+	diff = now - last;
+	if (diff < BIAS_HYST) {
+		*bias_time = BIAS_HYST - diff;
+		return true;
+	}
+
+	return false;
 }
 
-static inline bool lpm_disallowed(s64 sleep_us, int cpu)
+static inline bool lpm_disallowed(s64 sleep_us, int cpu, struct lpm_cpu *pm_cpu)
 {
-	if ((sleep_disabled && !cpu_isolated(cpu)) || is_cpu_biased(cpu))
+	uint64_t bias_time = 0;
+
+	if (sleep_disabled && !cpu_isolated(cpu))
 		return true;
 
+	if (is_cpu_biased(cpu, &bias_time) && (!cpu_isolated(cpu))) {
+		pm_cpu->bias = bias_time;
+		return true;
+	}
+
 	if (sleep_us < 0)
 		return true;
 
@@ -613,7 +671,7 @@ static int cpu_power_select(struct cpuidle_device *dev,
 	uint32_t min_residency, max_residency;
 	struct power_params *pwr_params;
 
-	if (lpm_disallowed(sleep_us, dev->cpu))
+	if (lpm_disallowed(sleep_us, dev->cpu, cpu))
 		goto done_select;
 
 	idx_restrict = cpu->nlevels + 1;
@@ -1279,6 +1337,8 @@ static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
 	 */
 
 	if (!idx) {
+		if (cpu->bias)
+			biastimer_start(cpu->bias);
 		stop_critical_timings();
 		wfi();
 		start_critical_timings();
@@ -1385,11 +1445,15 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev,
 	dev->last_residency = ktime_us_delta(ktime_get(), start);
 	update_history(dev, idx);
 	trace_cpu_idle_exit(idx, success);
-	local_irq_enable();
 	if (lpm_prediction && cpu->lpm_prediction) {
 		histtimer_cancel();
 		clusttimer_cancel();
 	}
+	if (cpu->bias) {
+		biastimer_cancel();
+		cpu->bias = 0;
+	}
+	local_irq_enable();
 	return idx;
 }
 
@@ -1690,6 +1754,8 @@ static int lpm_probe(struct platform_device *pdev)
 	for_each_possible_cpu(cpu) {
 		cpu_histtimer = &per_cpu(histtimer, cpu);
 		hrtimer_init(cpu_histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		cpu_histtimer = &per_cpu(biastimer, cpu);
+		hrtimer_init(cpu_histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	}
 
 	cluster_timer_init(lpm_root_node);
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
index 1f88e6d..7f3d6f0 100644
--- a/drivers/cpuidle/lpm-levels.h
+++ b/drivers/cpuidle/lpm-levels.h
@@ -46,6 +46,7 @@ struct lpm_cpu {
 	uint32_t ref_premature_cnt;
 	uint32_t tmr_add;
 	bool lpm_prediction;
+	uint64_t bias;
 	struct cpuidle_driver *drv;
 	struct lpm_cluster *parent;
 };
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index f5c07498..0c85a51 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -141,9 +141,10 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
 	/* Setup SA */
 	sa = ctx->sa_in;
 
-	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
-				 SA_SAVE_IV : SA_NOT_SAVE_IV),
-				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ?
+				 SA_NOT_SAVE_IV : SA_SAVE_IV),
+				 SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ?
+				 SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE),
 				 SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
 				 SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
 				 SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
@@ -162,6 +163,11 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
 	memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
 	sa = ctx->sa_out;
 	sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+	/*
+	 * SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT.
+	 * it's the DIR_(IN|OUT)BOUND that matters
+	 */
+	sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
 
 	return 0;
 }
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 6eaec9b..d2ec9fd 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -712,7 +712,23 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
 	size_t offset_to_sr_ptr;
 	u32 gd_idx = 0;
 	int tmp;
-	bool is_busy;
+	bool is_busy, force_sd;
+
+	/*
+	 * There's a very subtile/disguised "bug" in the hardware that
+	 * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
+	 * of the hardware spec:
+	 * *drum roll* the AES/(T)DES OFB and CFB modes are listed as
+	 * operation modes for >>> "Block ciphers" <<<.
+	 *
+	 * To workaround this issue and stop the hardware from causing
+	 * "overran dst buffer" on crypttexts that are not a multiple
+	 * of 16 (AES_BLOCK_SIZE), we force the driver to use the
+	 * scatter buffers.
+	 */
+	force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
+		|| req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
+		&& (datalen % AES_BLOCK_SIZE);
 
 	/* figure how many gd are needed */
 	tmp = sg_nents_for_len(src, assoclen + datalen);
@@ -730,7 +746,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
 	}
 
 	/* figure how many sd are needed */
-	if (sg_is_last(dst)) {
+	if (sg_is_last(dst) && force_sd == false) {
 		num_sd = 0;
 	} else {
 		if (datalen > PPC4XX_SD_BUFFER_SIZE) {
@@ -805,9 +821,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
 	pd->sa_len = sa_len;
 
 	pd_uinfo = &dev->pdr_uinfo[pd_entry];
-	pd_uinfo->async_req = req;
 	pd_uinfo->num_gd = num_gd;
 	pd_uinfo->num_sd = num_sd;
+	pd_uinfo->dest_va = dst;
+	pd_uinfo->async_req = req;
 
 	if (iv_len)
 		memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
@@ -826,7 +843,6 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
 		/* get first gd we are going to use */
 		gd_idx = fst_gd;
 		pd_uinfo->first_gd = fst_gd;
-		pd_uinfo->num_gd = num_gd;
 		gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
 		pd->src = gd_dma;
 		/* enable gather */
@@ -863,17 +879,14 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
 		 * Indicate gather array is not used
 		 */
 		pd_uinfo->first_gd = 0xffffffff;
-		pd_uinfo->num_gd = 0;
 	}
-	if (sg_is_last(dst)) {
+	if (!num_sd) {
 		/*
 		 * we know application give us dst a whole piece of memory
 		 * no need to use scatter ring.
 		 */
 		pd_uinfo->using_sd = 0;
 		pd_uinfo->first_sd = 0xffffffff;
-		pd_uinfo->num_sd = 0;
-		pd_uinfo->dest_va = dst;
 		sa->sa_command_0.bf.scatter = 0;
 		pd->dest = (u32)dma_map_page(dev->core_dev->device,
 					     sg_page(dst), dst->offset,
@@ -887,9 +900,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
 		nbytes = datalen;
 		sa->sa_command_0.bf.scatter = 1;
 		pd_uinfo->using_sd = 1;
-		pd_uinfo->dest_va = dst;
 		pd_uinfo->first_sd = fst_sd;
-		pd_uinfo->num_sd = num_sd;
 		sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
 		pd->dest = sd_dma;
 		/* setup scatter descriptor */
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 72790d8..1603dc8 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -935,7 +935,7 @@ void psp_pci_init(void)
 	rc = sev_platform_init(&error);
 	if (rc) {
 		dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
-		goto err;
+		return;
 	}
 
 	dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 5852d29..0669033 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -415,7 +415,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
 /* This function prepers the user key so it can pass to the hmac processing
  * (copy to intenral buffer or hash in case of key longer than block
  */
-static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
 				 unsigned int keylen)
 {
 	dma_addr_t key_dma_addr = 0;
@@ -428,6 +428,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
 	unsigned int hashmode;
 	unsigned int idx = 0;
 	int rc = 0;
+	u8 *key = NULL;
 	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 	dma_addr_t padded_authkey_dma_addr =
 		ctx->auth_state.hmac.padded_authkey_dma_addr;
@@ -446,11 +447,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
 	}
 
 	if (keylen != 0) {
+
+		key = kmemdup(authkey, keylen, GFP_KERNEL);
+		if (!key)
+			return -ENOMEM;
+
 		key_dma_addr = dma_map_single(dev, (void *)key, keylen,
 					      DMA_TO_DEVICE);
 		if (dma_mapping_error(dev, key_dma_addr)) {
 			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 				key, keylen);
+			kzfree(key);
 			return -ENOMEM;
 		}
 		if (keylen > blocksize) {
@@ -533,6 +540,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
 	if (key_dma_addr)
 		dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
 
+	kzfree(key);
+
 	return rc;
 }
 
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 3bcb6bc..90b4870 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
  */
 static unsigned int cc_get_sgl_nents(struct device *dev,
 				     struct scatterlist *sg_list,
-				     unsigned int nbytes, u32 *lbytes,
-				     bool *is_chained)
+				     unsigned int nbytes, u32 *lbytes)
 {
 	unsigned int nents = 0;
 
 	while (nbytes && sg_list) {
-		if (sg_list->length) {
-			nents++;
-			/* get the number of bytes in the last entry */
-			*lbytes = nbytes;
-			nbytes -= (sg_list->length > nbytes) ?
-					nbytes : sg_list->length;
-			sg_list = sg_next(sg_list);
-		} else {
-			sg_list = (struct scatterlist *)sg_page(sg_list);
-			if (is_chained)
-				*is_chained = true;
-		}
+		nents++;
+		/* get the number of bytes in the last entry */
+		*lbytes = nbytes;
+		nbytes -= (sg_list->length > nbytes) ?
+				nbytes : sg_list->length;
+		sg_list = sg_next(sg_list);
 	}
 	dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
 	return nents;
@@ -142,7 +135,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
 {
 	u32 nents, lbytes;
 
-	nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+	nents = cc_get_sgl_nents(dev, sg, end, &lbytes);
 	sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
 		       (direct == CC_SG_TO_BUF));
 }
@@ -311,40 +304,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
 	sgl_data->num_of_buffers++;
 }
 
-static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
-			 enum dma_data_direction direction)
-{
-	u32 i, j;
-	struct scatterlist *l_sg = sg;
-
-	for (i = 0; i < nents; i++) {
-		if (!l_sg)
-			break;
-		if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
-			dev_err(dev, "dma_map_page() sg buffer failed\n");
-			goto err;
-		}
-		l_sg = sg_next(l_sg);
-	}
-	return nents;
-
-err:
-	/* Restore mapped parts */
-	for (j = 0; j < i; j++) {
-		if (!sg)
-			break;
-		dma_unmap_sg(dev, sg, 1, direction);
-		sg = sg_next(sg);
-	}
-	return 0;
-}
-
 static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 		     unsigned int nbytes, int direction, u32 *nents,
 		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 {
-	bool is_chained = false;
-
 	if (sg_is_last(sg)) {
 		/* One entry only case -set to DLLI */
 		if (dma_map_sg(dev, sg, 1, direction) != 1) {
@@ -358,35 +321,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 		*nents = 1;
 		*mapped_nents = 1;
 	} else {  /*sg_is_last*/
-		*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
-					  &is_chained);
+		*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
 		if (*nents > max_sg_nents) {
 			*nents = 0;
 			dev_err(dev, "Too many fragments. current %d max %d\n",
 				*nents, max_sg_nents);
 			return -ENOMEM;
 		}
-		if (!is_chained) {
-			/* In case of mmu the number of mapped nents might
-			 * be changed from the original sgl nents
-			 */
-			*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
-			if (*mapped_nents == 0) {
-				*nents = 0;
-				dev_err(dev, "dma_map_sg() sg buffer failed\n");
-				return -ENOMEM;
-			}
-		} else {
-			/*In this case the driver maps entry by entry so it
-			 * must have the same nents before and after map
-			 */
-			*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
-						      direction);
-			if (*mapped_nents != *nents) {
-				*nents = *mapped_nents;
-				dev_err(dev, "dma_map_sg() sg buffer failed\n");
-				return -ENOMEM;
-			}
+		/* In case of mmu the number of mapped nents might
+		 * be changed from the original sgl nents
+		 */
+		*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
+		if (*mapped_nents == 0) {
+			*nents = 0;
+			dev_err(dev, "dma_map_sg() sg buffer failed\n");
+			return -ENOMEM;
 		}
 	}
 
@@ -571,7 +520,6 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 	u32 dummy;
-	bool chained;
 	u32 size_to_unmap = 0;
 
 	if (areq_ctx->mac_buf_dma_addr) {
@@ -612,6 +560,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 	if (areq_ctx->gen_ctx.iv_dma_addr) {
 		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 				 hw_iv_size, DMA_BIDIRECTIONAL);
+		kzfree(areq_ctx->gen_ctx.iv);
 	}
 
 	/* Release pool */
@@ -636,15 +585,14 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 		size_to_unmap += crypto_aead_ivsize(tfm);
 
 	dma_unmap_sg(dev, req->src,
-		     cc_get_sgl_nents(dev, req->src, size_to_unmap,
-				      &dummy, &chained),
+		     cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy),
 		     DMA_BIDIRECTIONAL);
 	if (req->src != req->dst) {
 		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 			sg_virt(req->dst));
 		dma_unmap_sg(dev, req->dst,
 			     cc_get_sgl_nents(dev, req->dst, size_to_unmap,
-					      &dummy, &chained),
+					      &dummy),
 			     DMA_BIDIRECTIONAL);
 	}
 	if (drvdata->coherent &&
@@ -717,19 +665,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 	struct device *dev = drvdata_to_dev(drvdata);
+	gfp_t flags = cc_gfp_flags(&req->base);
 	int rc = 0;
 
 	if (!req->iv) {
 		areq_ctx->gen_ctx.iv_dma_addr = 0;
+		areq_ctx->gen_ctx.iv = NULL;
 		goto chain_iv_exit;
 	}
 
-	areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
-						       hw_iv_size,
-						       DMA_BIDIRECTIONAL);
+	areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
+	if (!areq_ctx->gen_ctx.iv)
+		return -ENOMEM;
+
+	areq_ctx->gen_ctx.iv_dma_addr =
+		dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
+			       DMA_BIDIRECTIONAL);
 	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
 		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 			hw_iv_size, req->iv);
+		kzfree(areq_ctx->gen_ctx.iv);
+		areq_ctx->gen_ctx.iv = NULL;
 		rc = -ENOMEM;
 		goto chain_iv_exit;
 	}
@@ -1022,7 +978,6 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 	unsigned int size_for_map = req->assoclen + req->cryptlen;
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	u32 sg_index = 0;
-	bool chained = false;
 	bool is_gcm4543 = areq_ctx->is_gcm4543;
 	u32 size_to_skip = req->assoclen;
 
@@ -1043,7 +998,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 			authsize : 0;
 	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
-					    &src_last_bytes, &chained);
+					    &src_last_bytes);
 	sg_index = areq_ctx->src_sgl->length;
 	//check where the data starts
 	while (sg_index <= size_to_skip) {
@@ -1085,7 +1040,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 	}
 
 	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
-					    &dst_last_bytes, &chained);
+					    &dst_last_bytes);
 	sg_index = areq_ctx->dst_sgl->length;
 	offset = size_to_skip;
 
@@ -1486,7 +1441,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
 		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
 			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
 		areq_ctx->in_nents =
-			cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
+			cc_get_sgl_nents(dev, src, nbytes, &dummy);
 		sg_copy_to_buffer(src, areq_ctx->in_nents,
 				  &curr_buff[*curr_buff_cnt], nbytes);
 		*curr_buff_cnt += nbytes;
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index d608a4f..be7f9bd 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -162,6 +162,7 @@ struct cc_alg_template {
 
 struct async_gen_req_ctx {
 	dma_addr_t iv_dma_addr;
+	u8 *iv;
 	enum drv_crypto_direction op_type;
 };
 
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index b4d0a6d..09f708f 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -72,20 +72,28 @@ static inline void tee_fips_error(struct device *dev)
 		dev_err(dev, "TEE reported error!\n");
 }
 
+/*
+ * This function check if cryptocell tee fips error occurred
+ * and in such case triggers system error
+ */
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata)
+{
+	struct device *dev = drvdata_to_dev(p_drvdata);
+
+	if (!cc_get_tee_fips_status(p_drvdata))
+		tee_fips_error(dev);
+}
+
 /* Deferred service handler, run as interrupt-fired tasklet */
 static void fips_dsr(unsigned long devarg)
 {
 	struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
-	struct device *dev = drvdata_to_dev(drvdata);
-	u32 irq, state, val;
+	u32 irq, val;
 
 	irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
 
 	if (irq) {
-		state = cc_ioread(drvdata, CC_REG(GPR_HOST));
-
-		if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
-			tee_fips_error(dev);
+		cc_tee_handle_fips_error(drvdata);
 	}
 
 	/* after verifing that there is nothing to do,
@@ -113,8 +121,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
 	dev_dbg(dev, "Initializing fips tasklet\n");
 	tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
 
-	if (!cc_get_tee_fips_status(p_drvdata))
-		tee_fips_error(dev);
+	cc_tee_handle_fips_error(p_drvdata);
 
 	return 0;
 }
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
index 645e096..67d5fbf 100644
--- a/drivers/crypto/ccree/cc_fips.h
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -18,6 +18,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata);
 void cc_fips_fini(struct cc_drvdata *drvdata);
 void fips_handler(struct cc_drvdata *drvdata);
 void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata);
 
 #else  /* CONFIG_CRYPTO_FIPS */
 
@@ -30,6 +31,7 @@ static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
 static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
 					  bool ok) {}
 static inline void fips_handler(struct cc_drvdata *drvdata) {}
+static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {}
 
 #endif /* CONFIG_CRYPTO_FIPS */
 
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index b931330..2cadd7a 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -64,6 +64,7 @@ struct cc_hash_alg {
 struct hash_key_req_ctx {
 	u32 keylen;
 	dma_addr_t key_dma_addr;
+	u8 *key;
 };
 
 /* hash per-session context */
@@ -724,13 +725,20 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
 	ctx->key_params.keylen = keylen;
 	ctx->key_params.key_dma_addr = 0;
 	ctx->is_hmac = true;
+	ctx->key_params.key = NULL;
 
 	if (keylen) {
+		ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+		if (!ctx->key_params.key)
+			return -ENOMEM;
+
 		ctx->key_params.key_dma_addr =
-			dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+			dma_map_single(dev, (void *)ctx->key_params.key, keylen,
+				       DMA_TO_DEVICE);
 		if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
 			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
-				key, keylen);
+				ctx->key_params.key, keylen);
+			kzfree(ctx->key_params.key);
 			return -ENOMEM;
 		}
 		dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -881,6 +889,9 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
 		dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
 			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 	}
+
+	kzfree(ctx->key_params.key);
+
 	return rc;
 }
 
@@ -907,11 +918,16 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
 
 	ctx->key_params.keylen = keylen;
 
+	ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+	if (!ctx->key_params.key)
+		return -ENOMEM;
+
 	ctx->key_params.key_dma_addr =
-		dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+		dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
 	if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
 		dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 			key, keylen);
+		kzfree(ctx->key_params.key);
 		return -ENOMEM;
 	}
 	dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -963,6 +979,8 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
 	dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
 		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 
+	kzfree(ctx->key_params.key);
+
 	return rc;
 }
 
@@ -1598,7 +1616,7 @@ static struct cc_hash_template driver_hash[] = {
 			.setkey = cc_hash_setkey,
 			.halg = {
 				.digestsize = SHA224_DIGEST_SIZE,
-				.statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
+				.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
 			},
 		},
 		.hash_mode = DRV_HASH_SHA224,
@@ -1623,7 +1641,7 @@ static struct cc_hash_template driver_hash[] = {
 			.setkey = cc_hash_setkey,
 			.halg = {
 				.digestsize = SHA384_DIGEST_SIZE,
-				.statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
+				.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
 			},
 		},
 		.hash_mode = DRV_HASH_SHA384,
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
index 7694583..1abec38 100644
--- a/drivers/crypto/ccree/cc_ivgen.c
+++ b/drivers/crypto/ccree/cc_ivgen.c
@@ -154,9 +154,6 @@ void cc_ivgen_fini(struct cc_drvdata *drvdata)
 	}
 
 	ivgen_ctx->pool = NULL_SRAM_ADDR;
-
-	/* release "this" context */
-	kfree(ivgen_ctx);
 }
 
 /*!
@@ -174,10 +171,12 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
 	int rc;
 
 	/* Allocate "this" context */
-	ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
+	ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL);
 	if (!ivgen_ctx)
 		return -ENOMEM;
 
+	drvdata->ivgen_handle = ivgen_ctx;
+
 	/* Allocate pool's header for initial enc. key/IV */
 	ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
 						  &ivgen_ctx->pool_meta_dma,
@@ -196,8 +195,6 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
 		goto out;
 	}
 
-	drvdata->ivgen_handle = ivgen_ctx;
-
 	return cc_init_iv_sram(drvdata);
 
 out:
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index d990f47..79fc0a3 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -11,6 +11,7 @@
 #include "cc_ivgen.h"
 #include "cc_hash.h"
 #include "cc_pm.h"
+#include "cc_fips.h"
 
 #define POWER_DOWN_ENABLE 0x01
 #define POWER_DOWN_DISABLE 0x00
@@ -25,13 +26,13 @@ int cc_pm_suspend(struct device *dev)
 	int rc;
 
 	dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
-	cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
 	rc = cc_suspend_req_queue(drvdata);
 	if (rc) {
 		dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
 		return rc;
 	}
 	fini_cc_regs(drvdata);
+	cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
 	cc_clk_off(drvdata);
 	return 0;
 }
@@ -42,19 +43,21 @@ int cc_pm_resume(struct device *dev)
 	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 
 	dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
-	cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
-
+	/* Enables the device source clk */
 	rc = cc_clk_on(drvdata);
 	if (rc) {
 		dev_err(dev, "failed getting clock back on. We're toast.\n");
 		return rc;
 	}
 
+	cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
 	rc = init_cc_regs(drvdata, false);
 	if (rc) {
 		dev_err(dev, "init_cc_regs (%x)\n", rc);
 		return rc;
 	}
+	/* check if tee fips error occurred during power down */
+	cc_tee_handle_fips_error(drvdata);
 
 	rc = cc_resume_req_queue(drvdata);
 	if (rc) {
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 23fffde..020c2bf 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -13,7 +13,6 @@
 #include <linux/of.h>
 #include <linux/device-mapper.h>
 #include <linux/clk.h>
-#include <linux/cdev.h>
 #include <linux/regulator/consumer.h>
 #include <linux/msm-bus.h>
 #include <crypto/ice.h>
@@ -46,11 +45,9 @@
 
 #define ICE_REV(x, y) (((x) & ICE_CORE_##y##_REV_MASK) >> ICE_CORE_##y##_REV)
 #define QCOM_UFS_ICE_DEV	"iceufs"
+#define QCOM_UFS_CARD_ICE_DEV	"iceufscard"
 #define QCOM_SDCC_ICE_DEV	"icesdcc"
-#define QCOM_ICE_TYPE_NAME_LEN 8
 #define QCOM_ICE_MAX_BIST_CHECK_COUNT 100
-#define QCOM_ICE_UFS		10
-#define QCOM_ICE_SDCC		20
 
 #define QCOM_ICE_ENCRYPT	0x1
 #define QCOM_ICE_DECRYPT	0x2
@@ -73,57 +70,13 @@ struct ice_clk_info {
 	bool enabled;
 };
 
-struct qcom_ice_bus_vote {
-	uint32_t client_handle;
-	uint32_t curr_vote;
-	int min_bw_vote;
-	int max_bw_vote;
-	int saved_vote;
-	bool is_max_bw_needed;
-	struct device_attribute max_bus_bw;
-};
-
 static LIST_HEAD(ice_devices);
-/*
- * ICE HW device structure.
- */
-struct ice_device {
-	struct list_head	list;
-	struct device		*pdev;
-	struct cdev		cdev;
-	dev_t			device_no;
-	struct class		*driver_class;
-	void __iomem		*mmio;
-	struct resource		*res;
-	int			irq;
-	bool			is_ice_enabled;
-	bool			is_ice_disable_fuse_blown;
-	ice_error_cb		error_cb;
-	void			*host_controller_data; /* UFS/EMMC/other? */
-	struct list_head	clk_list_head;
-	u32			ice_hw_version;
-	bool			is_ice_clk_available;
-	char			ice_instance_type[QCOM_ICE_TYPE_NAME_LEN];
-	struct regulator	*reg;
-	bool			is_regulator_available;
-	struct qcom_ice_bus_vote bus_vote;
-	ktime_t			ice_reset_start_time;
-	ktime_t			ice_reset_complete_time;
-};
 
 static int qti_ice_setting_config(struct request *req,
-		struct platform_device *pdev,
+		struct ice_device *ice_dev,
 		struct ice_crypto_setting *crypto_data,
 		struct ice_data_setting *setting, uint32_t cxt)
 {
-	struct ice_device *ice_dev = platform_get_drvdata(pdev);
-
-	if (!ice_dev) {
-		pr_debug("%s no ICE device\n", __func__);
-		/* make the caller finish peacefully */
-		return 0;
-	}
-
 	if (ice_dev->is_ice_disable_fuse_blown) {
 		pr_err("%s ICE disabled fuse is blown\n", __func__);
 		return -EPERM;
@@ -702,29 +655,36 @@ static int register_ice_device(struct ice_device *ice_dev)
 	unsigned int count = 1;
 	struct device *class_dev;
 	int is_sdcc_ice = !strcmp(ice_dev->ice_instance_type, "sdcc");
+	int is_ufscard_ice = !strcmp(ice_dev->ice_instance_type, "ufscard");
 
 	rc = alloc_chrdev_region(&ice_dev->device_no, baseminor, count,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
+				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
 	if (rc < 0) {
 		pr_err("alloc_chrdev_region failed %d for %s\n", rc,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
+				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
 		return rc;
 	}
 	ice_dev->driver_class = class_create(THIS_MODULE,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
+				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
 	if (IS_ERR(ice_dev->driver_class)) {
 		rc = -ENOMEM;
 		pr_err("class_create failed %d for %s\n", rc,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
+				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
 		goto exit_unreg_chrdev_region;
 	}
 	class_dev = device_create(ice_dev->driver_class, NULL,
 					ice_dev->device_no, NULL,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
+				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
 
 	if (!class_dev) {
 		pr_err("class_device_create failed %d for %s\n", rc,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
+				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
 		rc = -ENOMEM;
 		goto exit_destroy_class;
 	}
@@ -735,7 +695,8 @@ static int register_ice_device(struct ice_device *ice_dev)
 	rc = cdev_add(&ice_dev->cdev, MKDEV(MAJOR(ice_dev->device_no), 0), 1);
 	if (rc < 0) {
 		pr_err("cdev_add failed %d for %s\n", rc,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
+				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
 		goto exit_destroy_device;
 	}
 	return  0;
@@ -803,6 +764,12 @@ static int qcom_ice_probe(struct platform_device *pdev)
 	 */
 	ice_dev->is_ice_enabled = false;
 
+	rc = pfk_initialize_key_table(ice_dev);
+	if (rc) {
+		pr_err("Failed to initialize key table\n");
+		goto err_ice_dev;
+	}
+
 	platform_set_drvdata(pdev, ice_dev);
 	list_add_tail(&ice_dev->list, &ice_devices);
 
@@ -823,6 +790,7 @@ static int qcom_ice_remove(struct platform_device *pdev)
 	if (!ice_dev)
 		return 0;
 
+	pfk_remove(ice_dev);
 	qcom_ice_disable_intr(ice_dev);
 
 	device_init_wakeup(&pdev->dev, false);
@@ -862,29 +830,6 @@ static int qcom_ice_restore_config(void)
 	return ret;
 }
 
-static int qcom_ice_restore_key_config(struct ice_device *ice_dev)
-{
-	struct scm_desc desc = {0};
-	int ret = -1;
-
-	/* For ice 3, key configuration needs to be restored in case of reset */
-
-	desc.arginfo = TZ_OS_KS_RESTORE_KEY_CONFIG_ID_PARAM_ID;
-
-	if (!strcmp(ice_dev->ice_instance_type, "sdcc"))
-		desc.args[0] = QCOM_ICE_SDCC;
-
-	if (!strcmp(ice_dev->ice_instance_type, "ufs"))
-		desc.args[0] = QCOM_ICE_UFS;
-
-	ret = scm_call2(TZ_OS_KS_RESTORE_KEY_CONFIG_ID, &desc);
-
-	if (ret)
-		pr_err("%s: Error:  0x%x\n", __func__, ret);
-
-	return ret;
-}
-
 static int qcom_ice_init_clocks(struct ice_device *ice)
 {
 	int ret = -EINVAL;
@@ -1164,16 +1109,12 @@ static int qcom_ice_finish_power_collapse(struct ice_device *ice_dev)
 		 * restore it
 		 */
 		} else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) {
-			err = qcom_ice_restore_key_config(ice_dev);
-			if (err)
-				goto out;
-
 			/*
 			 * for PFE case, clear the cached ICE key table,
 			 * this will force keys to be reconfigured
 			 * per each next transaction
 			 */
-			pfk_clear_on_reset();
+			pfk_clear_on_reset(ice_dev);
 		}
 	}
 
@@ -1451,11 +1392,18 @@ static int qcom_ice_config_start(struct platform_device *pdev,
 	bool is_pfe = false;
 	unsigned long sec_end = 0;
 	sector_t data_size;
+	struct ice_device *ice_dev;
 
 	if (!pdev || !req) {
 		pr_err("%s: Invalid params passed\n", __func__);
 		return -EINVAL;
 	}
+	ice_dev = platform_get_drvdata(pdev);
+	if (!ice_dev) {
+		pr_debug("%s no ICE device\n", __func__);
+		/* make the caller finish peacefully */
+		return 0;
+	}
 
 	/*
 	 * It is not an error to have a request with no  bio
@@ -1472,7 +1420,8 @@ static int qcom_ice_config_start(struct platform_device *pdev,
 		return 0;
 	}
 
-	ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
+	ret = pfk_load_key_start(req->bio, ice_dev, &pfk_crypto_data,
+			&is_pfe, async);
 	if (is_pfe) {
 		if (ret) {
 			if (ret != -EBUSY && ret != -EAGAIN)
@@ -1481,7 +1430,7 @@ static int qcom_ice_config_start(struct platform_device *pdev,
 			return ret;
 		}
 
-		return qti_ice_setting_config(req, pdev,
+		return qti_ice_setting_config(req, ice_dev,
 				&pfk_crypto_data, setting, ICE_CRYPTO_CXT_FBE);
 	}
 
@@ -1509,8 +1458,8 @@ static int qcom_ice_config_start(struct platform_device *pdev,
 				if ((req->__sector + data_size) > sec_end)
 					return 0;
 				else
-					return qti_ice_setting_config(req, pdev,
-						&ice_data, setting,
+					return qti_ice_setting_config(req,
+						ice_dev, &ice_data, setting,
 						ICE_CRYPTO_CXT_FDE);
 			}
 		}
@@ -1525,12 +1474,14 @@ static int qcom_ice_config_start(struct platform_device *pdev,
 }
 EXPORT_SYMBOL(qcom_ice_config_start);
 
-static int qcom_ice_config_end(struct request *req)
+static int qcom_ice_config_end(struct platform_device *pdev,
+		struct request *req)
 {
 	int ret = 0;
 	bool is_pfe = false;
+	struct ice_device *ice_dev;
 
-	if (!req) {
+	if (!req || !pdev) {
 		pr_err("%s: Invalid params passed\n", __func__);
 		return -EINVAL;
 	}
@@ -1540,7 +1491,14 @@ static int qcom_ice_config_end(struct request *req)
 		return 0;
 	}
 
-	ret = pfk_load_key_end(req->bio, &is_pfe);
+	ice_dev = platform_get_drvdata(pdev);
+	if (!ice_dev) {
+		pr_debug("%s no ICE device\n", __func__);
+		/* make the caller finish peacefully */
+		return 0;
+	}
+
+	ret = pfk_load_key_end(req->bio, ice_dev, &is_pfe);
 	if (is_pfe) {
 		if (ret != 0)
 			pr_err("%s error %d while end configuring ice key for PFE\n",
@@ -1649,7 +1607,7 @@ static struct ice_device *get_ice_device_from_storage_type
 	return NULL;
 }
 
-static int enable_ice_setup(struct ice_device *ice_dev)
+int enable_ice_setup(struct ice_device *ice_dev)
 {
 	int ret = -1, vote;
 
@@ -1706,7 +1664,7 @@ static int enable_ice_setup(struct ice_device *ice_dev)
 	return ret;
 }
 
-static int disable_ice_setup(struct ice_device *ice_dev)
+int disable_ice_setup(struct ice_device *ice_dev)
 {
 	int ret = -1, vote;
 
@@ -1763,6 +1721,11 @@ int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
 		return disable_ice_setup(ice_dev);
 }
 
+struct list_head *get_ice_dev_list(void)
+{
+	return &ice_devices;
+}
+
 struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node)
 {
 	return &qcom_ice_ops;
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 9443d50..f8a29ae3 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -278,6 +278,10 @@ static int qcedev_release(struct inode *inode, struct file *file)
 		pr_err("%s: invalid handle %pK\n",
 					__func__, podev);
 	}
+
+	if (qcedev_unmap_all_buffers(handle))
+		pr_err("%s: failed to unmap all ion buffers\n", __func__);
+
 	kzfree(handle);
 	file->private_data = NULL;
 	if (podev != NULL && podev->platform_support.bus_scale_table != NULL)
diff --git a/drivers/crypto/msm/qcedev_smmu.c b/drivers/crypto/msm/qcedev_smmu.c
index a56ffbe..823831a 100644
--- a/drivers/crypto/msm/qcedev_smmu.c
+++ b/drivers/crypto/msm/qcedev_smmu.c
@@ -407,3 +407,39 @@ int qcedev_check_and_unmap_buffer(void *handle, int fd)
 
 	return 0;
 }
+
+int qcedev_unmap_all_buffers(void *handle)
+{
+	struct qcedev_reg_buf_info *binfo = NULL;
+	struct qcedev_mem_client *mem_client = NULL;
+	struct qcedev_handle *qce_hndl = handle;
+	struct list_head *pos;
+
+	if (!handle) {
+		pr_err("%s: err: invalid input arguments\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
+		pr_err("%s: err: invalid qcedev handle\n", __func__);
+		return -EINVAL;
+	}
+	mem_client = qce_hndl->cntl->mem_client;
+
+	if (mem_client->mtype != MEM_ION)
+		return -EPERM;
+
+	mutex_lock(&qce_hndl->registeredbufs.lock);
+	while (!list_empty(&qce_hndl->registeredbufs.list)) {
+		pos = qce_hndl->registeredbufs.list.next;
+		binfo = list_entry(pos, struct qcedev_reg_buf_info, list);
+		if (binfo)
+			qcedev_unmap_buffer(qce_hndl, mem_client, binfo);
+		list_del(pos);
+		kfree(binfo);
+	}
+	mutex_unlock(&qce_hndl->registeredbufs.lock);
+
+	return 0;
+}
+
diff --git a/drivers/crypto/msm/qcedev_smmu.h b/drivers/crypto/msm/qcedev_smmu.h
index 2f75772..48cf660 100644
--- a/drivers/crypto/msm/qcedev_smmu.h
+++ b/drivers/crypto/msm/qcedev_smmu.h
@@ -73,6 +73,7 @@ int qcedev_check_and_map_buffer(void *qce_hndl,
 		int fd, unsigned int offset, unsigned int fd_size,
 		unsigned long long *vaddr);
 int qcedev_check_and_unmap_buffer(void *handle, int fd);
+int qcedev_unmap_all_buffers(void *handle);
 
 extern struct qcedev_reg_buf_info *global_binfo_in;
 extern struct qcedev_reg_buf_info *global_binfo_out;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index 23305f2..204e4ad 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -250,9 +250,14 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
 	u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
 		dev->sg_src->offset + dev->sg_src->length - ivsize;
 
-	/* store the iv that need to be updated in chain mode */
-	if (ctx->mode & RK_CRYPTO_DEC)
+	/* Store the iv that need to be updated in chain mode.
+	 * And update the IV buffer to contain the next IV for decryption mode.
+	 */
+	if (ctx->mode & RK_CRYPTO_DEC) {
 		memcpy(ctx->iv, src_last_blk, ivsize);
+		sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
+				   ivsize, dev->total - ivsize);
+	}
 
 	err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
 	if (!err)
@@ -288,13 +293,19 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
 	struct ablkcipher_request *req =
 		ablkcipher_request_cast(dev->async_req);
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 	u32 ivsize = crypto_ablkcipher_ivsize(tfm);
 
-	if (ivsize == DES_BLOCK_SIZE)
-		memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
-			      ivsize);
-	else if (ivsize == AES_BLOCK_SIZE)
-		memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
+	/* Update the IV buffer to contain the next IV for encryption mode. */
+	if (!(ctx->mode & RK_CRYPTO_DEC)) {
+		if (dev->aligned) {
+			memcpy(req->info, sg_virt(dev->sg_dst) +
+				dev->sg_dst->length - ivsize, ivsize);
+		} else {
+			memcpy(req->info, dev->addr_vir +
+				dev->count - ivsize, ivsize);
+		}
+	}
 }
 
 static void rk_update_iv(struct rk_crypto_info *dev)
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
index a4b5ff2..f6936bb 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq)
 		}
 	} else {
 		/* Since we have the flag final, we can go up to modulo 4 */
-		end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
+		if (areq->nbytes < 4)
+			end = 0;
+		else
+			end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
 	}
 
 	/* TODO if SGlen % 4 and !op->len then DMA */
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index d6a9f63..9c6b5c1d 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1357,7 +1357,7 @@
 	addi		$idx,$idx,16
 	bdnz		Loop_ctr32_enc
 
-	vadduwm		$ivec,$ivec,$one
+	vadduqm		$ivec,$ivec,$one
 	 vmr		$dat,$inptail
 	 lvx		$inptail,0,$inp
 	 addi		$inp,$inp,16
@@ -1854,7 +1854,7 @@
 	stvx_u		$out1,$x10,$out
 	stvx_u		$out2,$x20,$out
 	addi		$out,$out,0x30
-	b		Lcbc_dec8x_done
+	b		Lctr32_enc8x_done
 
 .align	5
 Lctr32_enc8x_two:
@@ -1866,7 +1866,7 @@
 	stvx_u		$out0,$x00,$out
 	stvx_u		$out1,$x10,$out
 	addi		$out,$out,0x20
-	b		Lcbc_dec8x_done
+	b		Lctr32_enc8x_done
 
 .align	5
 Lctr32_enc8x_one:
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 948806e..a89ebd9 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -325,8 +325,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
 
 	*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
 
-	return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn,
-			vmf->flags & FAULT_FLAG_WRITE);
+	return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
 }
 
 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
@@ -376,8 +375,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
 
 	*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
 
-	return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn,
-			vmf->flags & FAULT_FLAG_WRITE);
+	return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
 }
 #else
 static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 1799e5c..43fd8aa 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -225,6 +225,19 @@
 	  agnostic interface to so that some of the devfreq governors can be
 	  shared across SoCs.
 
+config ARM_QCOM_DEVFREQ_QOSLAT
+	bool "Qualcomm Technologies Inc. DEVFREQ QOSLAT device driver"
+	depends on ARCH_QCOM
+	select DEVFREQ_GOV_PERFORMANCE
+	select DEVFREQ_GOV_POWERSAVE
+	select DEVFREQ_GOV_USERSPACE
+	default n
+	help
+	  Some Qualcomm Technologies, Inc. (QTI) chipsets have an
+	  interface to vote for a memory latency QoS level. This
+	  driver votes on this interface to request a particular
+	  memory latency QoS level.
+
 source "drivers/devfreq/event/Kconfig"
 
 endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index c87871e..e106e9c 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_DEVFREQ_GOV_MEMLAT)       += governor_memlat.o
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_ADRENO_TZ) += governor_msm_adreno_tz.o
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_bw_vbif.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_gpubw_mon.o
 obj-$(CONFIG_DEVFREQ_GOV_CDSPL3)	+= governor_cdsp_l3.o
 
 # DEVFREQ Drivers
@@ -22,6 +23,7 @@
 obj-$(CONFIG_ARM_QCOM_DEVFREQ_FW)	+= devfreq_qcom_fw.o
 obj-$(CONFIG_QCOM_DEVFREQ_DEVBW)		+= devfreq_devbw.o
 obj-$(CONFIG_DEVFREQ_SIMPLE_DEV)	+= devfreq_simple_dev.o
+obj-$(CONFIG_ARM_QCOM_DEVFREQ_QOSLAT)	+= devfreq_qcom_qoslat.o
 
 # DEVFREQ Event Drivers
 obj-$(CONFIG_PM_DEVFREQ_EVENT)		+= event/
diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c
index 4bec02c..5065af69 100644
--- a/drivers/devfreq/arm-memlat-mon.c
+++ b/drivers/devfreq/arm-memlat-mon.c
@@ -20,141 +20,216 @@
 #include <linux/irq.h>
 #include <linux/cpu_pm.h>
 #include <linux/cpu.h>
+#include <linux/of_fdt.h>
 #include "governor.h"
 #include "governor_memlat.h"
 #include <linux/perf_event.h>
 #include <linux/of_device.h>
+#include <linux/mutex.h>
 
-enum ev_index {
+enum common_ev_idx {
 	INST_IDX,
-	CM_IDX,
 	CYC_IDX,
-	STALL_CYC_IDX,
-	NUM_EVENTS
+	STALL_IDX,
+	NUM_COMMON_EVS
 };
 #define INST_EV		0x08
-#define L2DM_EV		0x17
 #define CYC_EV		0x11
 
+enum mon_type {
+	MEMLAT_CPU_GRP,
+	MEMLAT_MON,
+	COMPUTE_MON,
+	NUM_MON_TYPES
+};
+
 struct event_data {
 	struct perf_event *pevent;
 	unsigned long prev_count;
+	unsigned long last_delta;
 };
 
-struct cpu_pmu_stats {
-	struct event_data events[NUM_EVENTS];
-	ktime_t prev_ts;
+struct cpu_data {
+	struct event_data common_evs[NUM_COMMON_EVS];
+	unsigned long freq;
+	unsigned long stall_pct;
 };
 
-struct cpu_grp_info {
-	cpumask_t cpus;
-	unsigned int event_ids[NUM_EVENTS];
-	struct cpu_pmu_stats *cpustats;
-	struct memlat_hwmon hw;
+/**
+ * struct memlat_mon - A specific consumer of cpu_grp generic counters.
+ *
+ * @is_active:			Whether or not this mon is currently running
+ *				memlat.
+ * @cpus:			CPUs this mon votes on behalf of. Must be a
+ *				subset of @cpu_grp's CPUs. If no CPUs provided,
+ *				defaults to using all of @cpu_grp's CPUs.
+ * @miss_ev_id:			The event code corresponding to the @miss_ev
+ *				perf event. Will be 0 for compute.
+ * @miss_ev:			The cache miss perf event exclusive to this
+ *				mon. Will be NULL for compute.
+ * @requested_update_ms:	The mon's desired polling rate. The lowest
+ *				@requested_update_ms of all mons determines
+ *				@cpu_grp's update_ms.
+ * @hw:				The memlat_hwmon struct corresponding to this
+ *				mon's specific memlat instance.
+ * @cpu_grp:			The cpu_grp who owns this mon.
+ */
+struct memlat_mon {
+	bool			is_active;
+	cpumask_t		cpus;
+	unsigned int		miss_ev_id;
+	unsigned int		requested_update_ms;
+	struct event_data	*miss_ev;
+	struct memlat_hwmon	hw;
+
+	struct memlat_cpu_grp	*cpu_grp;
+};
+
+/**
+ * struct memlat_cpu_grp - A coordinator of both HW reads and devfreq updates
+ * for one or more memlat_mons.
+ *
+ * @cpus:			The CPUs this cpu_grp will read events from.
+ * @common_ev_ids:		The event codes of the events all mons need.
+ * @cpus_data:			The cpus data array of length #cpus. Includes
+ *				event_data of all the events all mons need as
+ *				well as common computed cpu data like freq.
+ * @last_update_ts:		Used to avoid redundant reads.
+ * @last_ts_delta_us:		The time difference between the most recent
+ *				update and the one before that. Used to compute
+ *				effective frequency.
+ * @work:			The delayed_work used for handling updates.
+ * @update_ms:			The frequency with which @work triggers.
+ * @num_mons:		The number of @mons for this cpu_grp.
+ * @num_inited_mons:	The number of @mons who have probed.
+ * @num_active_mons:	The number of @mons currently running
+ *				memlat.
+ * @mons:			All of the memlat_mon structs representing
+ *				the different voters who share this cpu_grp.
+ * @mons_lock:		A lock used to protect the @mons.
+ */
+struct memlat_cpu_grp {
+	cpumask_t		cpus;
+	unsigned int		common_ev_ids[NUM_COMMON_EVS];
+	struct cpu_data		*cpus_data;
+	ktime_t			last_update_ts;
+	unsigned long		last_ts_delta_us;
+
+	struct delayed_work	work;
+	unsigned int		update_ms;
+
+	unsigned int		num_mons;
+	unsigned int		num_inited_mons;
+	unsigned int		num_active_mons;
+	struct memlat_mon	*mons;
+	struct mutex		mons_lock;
 };
 
 struct memlat_mon_spec {
-	bool is_compute;
+	enum mon_type type;
 };
 
-#define to_cpustats(cpu_grp, cpu) \
-	(&cpu_grp->cpustats[cpu - cpumask_first(&cpu_grp->cpus)])
-#define to_devstats(cpu_grp, cpu) \
-	(&cpu_grp->hw.core_stats[cpu - cpumask_first(&cpu_grp->cpus)])
-#define to_cpu_grp(hwmon) container_of(hwmon, struct cpu_grp_info, hw)
+#define to_cpu_data(cpu_grp, cpu) \
+	(&cpu_grp->cpus_data[cpu - cpumask_first(&cpu_grp->cpus)])
+#define to_common_evs(cpu_grp, cpu) \
+	(cpu_grp->cpus_data[cpu - cpumask_first(&cpu_grp->cpus)].common_evs)
+#define to_devstats(mon, cpu) \
+	(&mon->hw.core_stats[cpu - cpumask_first(&mon->cpus)])
+#define to_mon(hwmon) container_of(hwmon, struct memlat_mon, hw)
 
-
-static unsigned long compute_freq(struct cpu_pmu_stats *cpustats,
-						unsigned long cyc_cnt)
-{
-	ktime_t ts;
-	unsigned int diff;
-	uint64_t freq = 0;
-
-	ts = ktime_get();
-	diff = ktime_to_us(ktime_sub(ts, cpustats->prev_ts));
-	if (!diff)
-		diff = 1;
-	cpustats->prev_ts = ts;
-	freq = cyc_cnt;
-	do_div(freq, diff);
-
-	return freq;
-}
+static struct workqueue_struct *memlat_wq;
 
 #define MAX_COUNT_LIM 0xFFFFFFFFFFFFFFFF
-static inline unsigned long read_event(struct event_data *event)
+static inline void read_event(struct event_data *event)
 {
-	unsigned long ev_count;
+	unsigned long ev_count = 0;
 	u64 total, enabled, running;
 
 	if (!event->pevent)
-		return 0;
+		return;
 
 	total = perf_event_read_value(event->pevent, &enabled, &running);
 	ev_count = total - event->prev_count;
 	event->prev_count = total;
-	return ev_count;
+	event->last_delta = ev_count;
 }
 
-static void read_perf_counters(int cpu, struct cpu_grp_info *cpu_grp)
+static void update_counts(struct memlat_cpu_grp *cpu_grp)
 {
-	struct cpu_pmu_stats *cpustats = to_cpustats(cpu_grp, cpu);
-	struct dev_stats *devstats = to_devstats(cpu_grp, cpu);
-	unsigned long cyc_cnt, stall_cnt;
+	unsigned int cpu, i;
+	struct memlat_mon *mon;
+	ktime_t now = ktime_get();
+	unsigned long delta = ktime_us_delta(now, cpu_grp->last_update_ts);
 
-	devstats->inst_count = read_event(&cpustats->events[INST_IDX]);
-	devstats->mem_count = read_event(&cpustats->events[CM_IDX]);
-	cyc_cnt = read_event(&cpustats->events[CYC_IDX]);
-	devstats->freq = compute_freq(cpustats, cyc_cnt);
-	if (cpustats->events[STALL_CYC_IDX].pevent) {
-		stall_cnt = read_event(&cpustats->events[STALL_CYC_IDX]);
-		stall_cnt = min(stall_cnt, cyc_cnt);
-		devstats->stall_pct = mult_frac(100, stall_cnt, cyc_cnt);
-	} else {
-		devstats->stall_pct = 100;
+	cpu_grp->last_ts_delta_us = delta;
+	cpu_grp->last_update_ts = now;
+
+	for_each_cpu(cpu, &cpu_grp->cpus) {
+		struct cpu_data *cpu_data = to_cpu_data(cpu_grp, cpu);
+		struct event_data *common_evs = cpu_data->common_evs;
+
+		for (i = 0; i < NUM_COMMON_EVS; i++)
+			read_event(&common_evs[i]);
+
+		if (!common_evs[STALL_IDX].pevent)
+			common_evs[STALL_IDX].last_delta =
+				common_evs[CYC_IDX].last_delta;
+
+		cpu_data->freq = common_evs[CYC_IDX].last_delta / delta;
+		cpu_data->stall_pct = mult_frac(100,
+				common_evs[STALL_IDX].last_delta,
+				common_evs[CYC_IDX].last_delta);
+	}
+
+	for (i = 0; i < cpu_grp->num_mons; i++) {
+		mon = &cpu_grp->mons[i];
+
+		if (!mon->is_active || !mon->miss_ev)
+			continue;
+
+		for_each_cpu(cpu, &mon->cpus) {
+			unsigned int mon_idx =
+				cpu - cpumask_first(&mon->cpus);
+			read_event(&mon->miss_ev[mon_idx]);
+		}
 	}
 }
 
 static unsigned long get_cnt(struct memlat_hwmon *hw)
 {
-	int cpu;
-	struct cpu_grp_info *cpu_grp = to_cpu_grp(hw);
+	struct memlat_mon *mon = to_mon(hw);
+	struct memlat_cpu_grp *cpu_grp = mon->cpu_grp;
+	unsigned int cpu;
 
-	for_each_cpu(cpu, &cpu_grp->cpus)
-		read_perf_counters(cpu, cpu_grp);
+	for_each_cpu(cpu, &mon->cpus) {
+		struct cpu_data *cpu_data = to_cpu_data(cpu_grp, cpu);
+		struct event_data *common_evs = cpu_data->common_evs;
+		unsigned int mon_idx =
+			cpu - cpumask_first(&mon->cpus);
+		struct dev_stats *devstats = to_devstats(mon, cpu);
+
+		devstats->freq = cpu_data->freq;
+		devstats->stall_pct = cpu_data->stall_pct;
+		devstats->inst_count = common_evs[INST_IDX].last_delta;
+
+		if (mon->miss_ev)
+			devstats->mem_count =
+				mon->miss_ev[mon_idx].last_delta;
+		else {
+			devstats->inst_count = 0;
+			devstats->mem_count = 1;
+		}
+	}
 
 	return 0;
 }
 
-static void delete_events(struct cpu_pmu_stats *cpustats)
+static void delete_event(struct event_data *event)
 {
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(cpustats->events); i++) {
-		cpustats->events[i].prev_count = 0;
-		if (cpustats->events[i].pevent) {
-			perf_event_release_kernel(cpustats->events[i].pevent);
-			cpustats->events[i].pevent = NULL;
-		}
-	}
-}
-
-static void stop_hwmon(struct memlat_hwmon *hw)
-{
-	int cpu;
-	struct cpu_grp_info *cpu_grp = to_cpu_grp(hw);
-	struct dev_stats *devstats;
-
-	for_each_cpu(cpu, &cpu_grp->cpus) {
-		delete_events(to_cpustats(cpu_grp, cpu));
-
-		/* Clear governor data */
-		devstats = to_devstats(cpu_grp, cpu);
-		devstats->inst_count = 0;
-		devstats->mem_count = 0;
-		devstats->freq = 0;
-		devstats->stall_pct = 0;
+	event->prev_count = event->last_delta = 0;
+	if (event->pevent) {
+		perf_event_release_kernel(event->pevent);
+		event->pevent = NULL;
 	}
 }
 
@@ -174,58 +249,214 @@ static struct perf_event_attr *alloc_attr(void)
 	return attr;
 }
 
-static int set_events(struct cpu_grp_info *cpu_grp, int cpu)
+static int set_event(struct event_data *ev, int cpu, unsigned int event_id,
+		     struct perf_event_attr *attr)
 {
 	struct perf_event *pevent;
-	struct perf_event_attr *attr;
-	int err, i;
-	unsigned int event_id;
-	struct cpu_pmu_stats *cpustats = to_cpustats(cpu_grp, cpu);
 
-	/* Allocate an attribute for event initialization */
-	attr = alloc_attr();
-	if (!attr)
-		return -ENOMEM;
+	if (!event_id)
+		return 0;
 
-	for (i = 0; i < ARRAY_SIZE(cpustats->events); i++) {
-		event_id = cpu_grp->event_ids[i];
-		if (!event_id)
-			continue;
+	attr->config = event_id;
+	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
+	if (IS_ERR(pevent))
+		return PTR_ERR(pevent);
 
-		attr->config = event_id;
-		pevent = perf_event_create_kernel_counter(attr, cpu, NULL,
-							  NULL, NULL);
-		if (IS_ERR(pevent))
-			goto err_out;
-		cpustats->events[i].pevent = pevent;
-		perf_event_enable(pevent);
-	}
+	ev->pevent = pevent;
+	perf_event_enable(pevent);
 
-	kfree(attr);
 	return 0;
-
-err_out:
-	err = PTR_ERR(pevent);
-	kfree(attr);
-	return err;
 }
 
-static int start_hwmon(struct memlat_hwmon *hw)
+static int init_common_evs(struct memlat_cpu_grp *cpu_grp,
+			   struct perf_event_attr *attr)
 {
-	int cpu, ret = 0;
-	struct cpu_grp_info *cpu_grp = to_cpu_grp(hw);
+	unsigned int cpu, i;
+	int ret = 0;
 
 	for_each_cpu(cpu, &cpu_grp->cpus) {
-		ret = set_events(cpu_grp, cpu);
-		if (ret) {
-			pr_warn("Perf event init failed on CPU%d\n", cpu);
-			break;
+		struct event_data *common_evs = to_common_evs(cpu_grp, cpu);
+
+		for (i = 0; i < NUM_COMMON_EVS; i++) {
+			ret = set_event(&common_evs[i], cpu,
+					cpu_grp->common_ev_ids[i], attr);
+			if (ret)
+				break;
 		}
 	}
 
 	return ret;
 }
 
+static void free_common_evs(struct memlat_cpu_grp *cpu_grp)
+{
+	unsigned int cpu, i;
+
+	for_each_cpu(cpu, &cpu_grp->cpus) {
+		struct event_data *common_evs = to_common_evs(cpu_grp, cpu);
+
+		for (i = 0; i < NUM_COMMON_EVS; i++)
+			delete_event(&common_evs[i]);
+	}
+}
+
+static void memlat_monitor_work(struct work_struct *work)
+{
+	int err;
+	struct memlat_cpu_grp *cpu_grp =
+		container_of(work, struct memlat_cpu_grp, work.work);
+	struct memlat_mon *mon;
+	unsigned int i;
+
+	mutex_lock(&cpu_grp->mons_lock);
+	if (!cpu_grp->num_active_mons)
+		goto unlock_out;
+	update_counts(cpu_grp);
+	for (i = 0; i < cpu_grp->num_mons; i++) {
+		struct devfreq *df;
+
+		mon = &cpu_grp->mons[i];
+
+		if (!mon->is_active)
+			continue;
+
+		df = mon->hw.df;
+		mutex_lock(&df->lock);
+		err = update_devfreq(df);
+		if (err)
+			dev_err(mon->hw.dev, "Memlat update failed: %d\n", err);
+		mutex_unlock(&df->lock);
+	}
+
+	queue_delayed_work(memlat_wq, &cpu_grp->work,
+			   msecs_to_jiffies(cpu_grp->update_ms));
+
+unlock_out:
+	mutex_unlock(&cpu_grp->mons_lock);
+}
+
+static int start_hwmon(struct memlat_hwmon *hw)
+{
+	int ret = 0;
+	unsigned int cpu;
+	struct memlat_mon *mon = to_mon(hw);
+	struct memlat_cpu_grp *cpu_grp = mon->cpu_grp;
+	bool should_init_cpu_grp;
+	struct perf_event_attr *attr = alloc_attr();
+
+	if (!attr)
+		return -ENOMEM;
+
+	mutex_lock(&cpu_grp->mons_lock);
+	should_init_cpu_grp = !(cpu_grp->num_active_mons++);
+	if (should_init_cpu_grp) {
+		ret = init_common_evs(cpu_grp, attr);
+		if (ret)
+			goto unlock_out;
+
+		INIT_DEFERRABLE_WORK(&cpu_grp->work, &memlat_monitor_work);
+	}
+
+	if (mon->miss_ev) {
+		for_each_cpu(cpu, &mon->cpus) {
+			unsigned int idx = cpu - cpumask_first(&mon->cpus);
+
+			ret = set_event(&mon->miss_ev[idx], cpu,
+					mon->miss_ev_id, attr);
+			if (ret)
+				goto unlock_out;
+		}
+	}
+
+	mon->is_active = true;
+
+	if (should_init_cpu_grp)
+		queue_delayed_work(memlat_wq, &cpu_grp->work,
+				   msecs_to_jiffies(cpu_grp->update_ms));
+
+unlock_out:
+	mutex_unlock(&cpu_grp->mons_lock);
+	kfree(attr);
+
+	return ret;
+}
+
+static void stop_hwmon(struct memlat_hwmon *hw)
+{
+	unsigned int cpu;
+	struct memlat_mon *mon = to_mon(hw);
+	struct memlat_cpu_grp *cpu_grp = mon->cpu_grp;
+
+	mutex_lock(&cpu_grp->mons_lock);
+	mon->is_active = false;
+	cpu_grp->num_active_mons--;
+
+	for_each_cpu(cpu, &mon->cpus) {
+		unsigned int idx = cpu - cpumask_first(&mon->cpus);
+		struct dev_stats *devstats = to_devstats(mon, cpu);
+
+		if (mon->miss_ev)
+			delete_event(&mon->miss_ev[idx]);
+		devstats->inst_count = 0;
+		devstats->mem_count = 0;
+		devstats->freq = 0;
+		devstats->stall_pct = 0;
+	}
+
+	if (!cpu_grp->num_active_mons) {
+		cancel_delayed_work(&cpu_grp->work);
+		free_common_evs(cpu_grp);
+	}
+	mutex_unlock(&cpu_grp->mons_lock);
+}
+
+/**
+ * We should set update_ms to the lowest requested_update_ms of all of the
+ * active mons, or 0 (i.e. stop polling) if ALL active mons have 0.
+ * This is expected to be called with cpu_grp->mons_lock taken.
+ */
+static void set_update_ms(struct memlat_cpu_grp *cpu_grp)
+{
+	struct memlat_mon *mon;
+	unsigned int i, new_update_ms = UINT_MAX;
+
+	for (i = 0; i < cpu_grp->num_mons; i++) {
+		mon = &cpu_grp->mons[i];
+		if (mon->is_active && mon->requested_update_ms)
+			new_update_ms =
+				min(new_update_ms, mon->requested_update_ms);
+	}
+
+	if (new_update_ms == UINT_MAX) {
+		cancel_delayed_work(&cpu_grp->work);
+	} else if (cpu_grp->update_ms == UINT_MAX) {
+		queue_delayed_work(memlat_wq, &cpu_grp->work,
+				   msecs_to_jiffies(new_update_ms));
+	} else if (new_update_ms > cpu_grp->update_ms) {
+		cancel_delayed_work(&cpu_grp->work);
+		queue_delayed_work(memlat_wq, &cpu_grp->work,
+				   msecs_to_jiffies(new_update_ms));
+	}
+
+	cpu_grp->update_ms = new_update_ms;
+}
+
+static void request_update_ms(struct memlat_hwmon *hw, unsigned int update_ms)
+{
+	struct devfreq *df = hw->df;
+	struct memlat_mon *mon = to_mon(hw);
+	struct memlat_cpu_grp *cpu_grp = mon->cpu_grp;
+
+	mutex_lock(&df->lock);
+	df->profile->polling_ms = update_ms;
+	mutex_unlock(&df->lock);
+
+	mutex_lock(&cpu_grp->mons_lock);
+	mon->requested_update_ms = update_ms;
+	set_update_ms(cpu_grp);
+	mutex_unlock(&cpu_grp->mons_lock);
+}
+
 static int get_mask_from_dev_handle(struct platform_device *pdev,
 					cpumask_t *mask)
 {
@@ -252,69 +483,58 @@ static int get_mask_from_dev_handle(struct platform_device *pdev,
 	return ret;
 }
 
-static int arm_memlat_mon_driver_probe(struct platform_device *pdev)
+static struct device_node *parse_child_nodes(struct device *dev)
+{
+	struct device_node *of_child;
+	int ddr_type_of = -1;
+	int ddr_type = of_fdt_get_ddrtype();
+	int ret;
+
+	for_each_child_of_node(dev->of_node, of_child) {
+		ret = of_property_read_u32(of_child, "qcom,ddr-type",
+							&ddr_type_of);
+		if (!ret && (ddr_type == ddr_type_of)) {
+			dev_dbg(dev,
+				"ddr-type = %d, is matching DT entry\n",
+				ddr_type_of);
+			return of_child;
+		}
+	}
+	return NULL;
+}
+
+#define DEFAULT_UPDATE_MS 100
+static int memlat_cpu_grp_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct memlat_hwmon *hw;
-	struct cpu_grp_info *cpu_grp;
-	const struct memlat_mon_spec *spec;
-	int cpu, ret;
-	u32 event_id;
+	struct memlat_cpu_grp *cpu_grp;
+	int ret = 0;
+	unsigned int event_id, num_cpus, num_mons;
 
 	cpu_grp = devm_kzalloc(dev, sizeof(*cpu_grp), GFP_KERNEL);
 	if (!cpu_grp)
 		return -ENOMEM;
-	hw = &cpu_grp->hw;
-
-	hw->dev = dev;
-	hw->of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
-	if (!hw->of_node) {
-		dev_err(dev, "Couldn't find a target device\n");
-		return -ENODEV;
-	}
 
 	if (get_mask_from_dev_handle(pdev, &cpu_grp->cpus)) {
-		dev_err(dev, "CPU list is empty\n");
+		dev_err(dev, "No CPUs specified.\n");
 		return -ENODEV;
 	}
 
-	hw->num_cores = cpumask_weight(&cpu_grp->cpus);
-	hw->core_stats = devm_kzalloc(dev, hw->num_cores *
-				sizeof(*(hw->core_stats)), GFP_KERNEL);
-	if (!hw->core_stats)
-		return -ENOMEM;
+	num_mons = of_get_available_child_count(dev->of_node);
 
-	cpu_grp->cpustats = devm_kzalloc(dev, hw->num_cores *
-			sizeof(*(cpu_grp->cpustats)), GFP_KERNEL);
-	if (!cpu_grp->cpustats)
-		return -ENOMEM;
-
-	cpu_grp->event_ids[CYC_IDX] = CYC_EV;
-
-	for_each_cpu(cpu, &cpu_grp->cpus)
-		to_devstats(cpu_grp, cpu)->id = cpu;
-
-	hw->start_hwmon = &start_hwmon;
-	hw->stop_hwmon = &stop_hwmon;
-	hw->get_cnt = &get_cnt;
-
-	spec = of_device_get_match_data(dev);
-	if (spec && spec->is_compute) {
-		ret = register_compute(dev, hw);
-		if (ret)
-			pr_err("Compute Gov registration failed\n");
-
-		return ret;
+	if (!num_mons) {
+		dev_err(dev, "No mons provided.\n");
+		return -ENODEV;
 	}
 
-	ret = of_property_read_u32(dev->of_node, "qcom,cachemiss-ev",
-				   &event_id);
-	if (ret) {
-		dev_dbg(dev, "Cache Miss event not specified. Using def:0x%x\n",
-			L2DM_EV);
-		event_id = L2DM_EV;
-	}
-	cpu_grp->event_ids[CM_IDX] = event_id;
+	cpu_grp->num_mons = num_mons;
+	cpu_grp->num_inited_mons = 0;
+
+	cpu_grp->mons =
+		devm_kzalloc(dev, num_mons * sizeof(*cpu_grp->mons),
+			     GFP_KERNEL);
+	if (!cpu_grp->mons)
+		return -ENOMEM;
 
 	ret = of_property_read_u32(dev->of_node, "qcom,inst-ev", &event_id);
 	if (ret) {
@@ -322,30 +542,191 @@ static int arm_memlat_mon_driver_probe(struct platform_device *pdev)
 			INST_EV);
 		event_id = INST_EV;
 	}
-	cpu_grp->event_ids[INST_IDX] = event_id;
+	cpu_grp->common_ev_ids[INST_IDX] = event_id;
 
-	ret = of_property_read_u32(dev->of_node, "qcom,stall-cycle-ev",
-				   &event_id);
+	ret = of_property_read_u32(dev->of_node, "qcom,cyc-ev", &event_id);
+	if (ret) {
+		dev_dbg(dev, "Cyc event not specified. Using def:0x%x\n",
+			CYC_EV);
+		event_id = CYC_EV;
+	}
+	cpu_grp->common_ev_ids[CYC_IDX] = event_id;
+
+	ret = of_property_read_u32(dev->of_node, "qcom,stall-ev", &event_id);
 	if (ret)
-		dev_dbg(dev, "Stall cycle event not specified. Event ignored.\n");
+		dev_dbg(dev, "Stall event not specified. Skipping.\n");
 	else
-		cpu_grp->event_ids[STALL_CYC_IDX] = event_id;
+		cpu_grp->common_ev_ids[STALL_IDX] = event_id;
 
-	ret = register_memlat(dev, hw);
-	if (ret)
-		pr_err("Mem Latency Gov registration failed\n");
+	num_cpus = cpumask_weight(&cpu_grp->cpus);
+	cpu_grp->cpus_data =
+		devm_kzalloc(dev, num_cpus * sizeof(*cpu_grp->cpus_data),
+			     GFP_KERNEL);
+	if (!cpu_grp->cpus_data)
+		return -ENOMEM;
 
+	mutex_init(&cpu_grp->mons_lock);
+	cpu_grp->update_ms = DEFAULT_UPDATE_MS;
+
+	dev_set_drvdata(dev, cpu_grp);
+
+	return 0;
+}
+
+static int memlat_mon_probe(struct platform_device *pdev, bool is_compute)
+{
+	struct device *dev = &pdev->dev;
+	int ret = 0;
+	struct memlat_cpu_grp *cpu_grp;
+	struct memlat_mon *mon;
+	struct memlat_hwmon *hw;
+	unsigned int event_id, num_cpus, cpu;
+
+	if (!memlat_wq)
+		memlat_wq = create_freezable_workqueue("memlat_wq");
+
+	if (!memlat_wq) {
+		dev_err(dev, "Couldn't create memlat workqueue.\n");
+		return -ENOMEM;
+	}
+
+	cpu_grp = dev_get_drvdata(dev->parent);
+	if (!cpu_grp) {
+		dev_err(dev, "Mon initialized without cpu_grp.\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&cpu_grp->mons_lock);
+	mon = &cpu_grp->mons[cpu_grp->num_inited_mons];
+	mon->is_active = false;
+	mon->requested_update_ms = 0;
+	mon->cpu_grp = cpu_grp;
+
+	if (get_mask_from_dev_handle(pdev, &mon->cpus)) {
+		cpumask_copy(&mon->cpus, &cpu_grp->cpus);
+	} else {
+		if (!cpumask_subset(&mon->cpus, &cpu_grp->cpus)) {
+			dev_err(dev,
+				"Mon CPUs must be a subset of cpu_grp CPUs. mon=%*pbl cpu_grp=%*pbl\n",
+				mon->cpus, cpu_grp->cpus);
+			ret = -EINVAL;
+			goto unlock_out;
+		}
+	}
+
+	num_cpus = cpumask_weight(&mon->cpus);
+
+	hw = &mon->hw;
+	hw->of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+	if (!hw->of_node) {
+		dev_err(dev, "Couldn't find a target device.\n");
+		ret = -ENODEV;
+		goto unlock_out;
+	}
+	hw->dev = dev;
+	hw->num_cores = num_cpus;
+	hw->should_ignore_df_monitor = true;
+	hw->core_stats = devm_kzalloc(dev, num_cpus * sizeof(*(hw->core_stats)),
+				      GFP_KERNEL);
+	if (!hw->core_stats) {
+		ret = -ENOMEM;
+		goto unlock_out;
+	}
+
+	for_each_cpu(cpu, &mon->cpus)
+		to_devstats(mon, cpu)->id = cpu;
+
+	hw->start_hwmon = &start_hwmon;
+	hw->stop_hwmon = &stop_hwmon;
+	hw->get_cnt = &get_cnt;
+	if (of_get_child_count(dev->of_node))
+		hw->get_child_of_node = &parse_child_nodes;
+	hw->request_update_ms = &request_update_ms;
+
+	/*
+	 * Compute mons rely solely on common events.
+	 */
+	if (is_compute) {
+		mon->miss_ev_id = 0;
+		ret = register_compute(dev, hw);
+	} else {
+		mon->miss_ev =
+			devm_kzalloc(dev, num_cpus * sizeof(*mon->miss_ev),
+				     GFP_KERNEL);
+		if (!mon->miss_ev) {
+			ret = -ENOMEM;
+			goto unlock_out;
+		}
+
+		ret = of_property_read_u32(dev->of_node, "qcom,cachemiss-ev",
+						&event_id);
+		if (ret) {
+			dev_err(dev, "Cache miss event missing for mon: %d\n",
+					ret);
+			ret = -EINVAL;
+			goto unlock_out;
+		}
+		mon->miss_ev_id = event_id;
+
+		ret = register_memlat(dev, hw);
+	}
+
+	if (!ret)
+		cpu_grp->num_inited_mons++;
+
+unlock_out:
+	mutex_unlock(&cpu_grp->mons_lock);
 	return ret;
 }
 
+static int arm_memlat_mon_driver_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int ret = 0;
+	const struct memlat_mon_spec *spec = of_device_get_match_data(dev);
+	enum mon_type type = NUM_MON_TYPES;
+
+	if (spec)
+		type = spec->type;
+
+	switch (type) {
+	case MEMLAT_CPU_GRP:
+		ret = memlat_cpu_grp_probe(pdev);
+		if (of_get_available_child_count(dev->of_node))
+			of_platform_populate(dev->of_node, NULL, NULL, dev);
+		break;
+	case MEMLAT_MON:
+		ret = memlat_mon_probe(pdev, false);
+		break;
+	case COMPUTE_MON:
+		ret = memlat_mon_probe(pdev, true);
+		break;
+	default:
+		/*
+		 * This should never happen.
+		 */
+		dev_err(dev, "Invalid memlat mon type specified: %u\n", type);
+		return -EINVAL;
+	}
+
+	if (ret) {
+		dev_err(dev, "Failure to probe memlat device: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
 static const struct memlat_mon_spec spec[] = {
-	[0] = { false },
-	[1] = { true },
+	[0] = { MEMLAT_CPU_GRP },
+	[1] = { MEMLAT_MON },
+	[2] = { COMPUTE_MON },
 };
 
 static const struct of_device_id memlat_match_table[] = {
-	{ .compatible = "qcom,arm-memlat-mon", .data = &spec[0] },
-	{ .compatible = "qcom,arm-cpu-mon", .data = &spec[1] },
+	{ .compatible = "qcom,arm-memlat-cpugrp", .data = &spec[0] },
+	{ .compatible = "qcom,arm-memlat-mon", .data = &spec[1] },
+	{ .compatible = "qcom,arm-compute-mon", .data = &spec[2] },
 	{}
 };
 
diff --git a/drivers/devfreq/devfreq_devbw.c b/drivers/devfreq/devfreq_devbw.c
index 22c3a13..af8440b 100644
--- a/drivers/devfreq/devfreq_devbw.c
+++ b/drivers/devfreq/devfreq_devbw.c
@@ -17,7 +17,9 @@
 #include <linux/mutex.h>
 #include <linux/interrupt.h>
 #include <linux/devfreq.h>
+#include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/of_fdt.h>
 #include <trace/events/power.h>
 #include <linux/msm-bus.h>
 #include <linux/msm-bus-board.h>
@@ -101,6 +103,8 @@ int devfreq_add_devbw(struct device *dev)
 	u32 ports[MAX_PATHS * 2];
 	const char *gov_name;
 	int ret, len, i, num_paths;
+	struct opp_table *opp_table;
+	u32 version;
 
 	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
 	if (!d)
@@ -147,6 +151,15 @@ int devfreq_add_devbw(struct device *dev)
 	p->target = devbw_target;
 	p->get_dev_status = devbw_get_dev_status;
 
+	if (of_device_is_compatible(dev->of_node, "qcom,devbw-ddr")) {
+		version = (1 << of_fdt_get_ddrtype());
+		opp_table = dev_pm_opp_set_supported_hw(dev, &version, 1);
+		if (IS_ERR(opp_table)) {
+			dev_err(dev, "Failed to set supported hardware\n");
+			return PTR_ERR(opp_table);
+		}
+	}
+
 	ret = dev_pm_opp_of_add_table(dev);
 	if (ret)
 		dev_err(dev, "Couldn't parse OPP table:%d\n", ret);
@@ -203,6 +216,8 @@ static int devfreq_devbw_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id devbw_match_table[] = {
+	{ .compatible = "qcom,devbw-llcc" },
+	{ .compatible = "qcom,devbw-ddr" },
 	{ .compatible = "qcom,devbw" },
 	{}
 };
diff --git a/drivers/devfreq/devfreq_qcom_qoslat.c b/drivers/devfreq/devfreq_qcom_qoslat.c
new file mode 100644
index 0000000..3c73985
--- /dev/null
+++ b/drivers/devfreq/devfreq_qcom_qoslat.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "devfreq-qcom-qoslat: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/devfreq.h>
+#include <linux/pm_opp.h>
+#include <linux/of.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/qmp.h>
+
+struct qoslat_data {
+	struct mbox_client		mbox_cl;
+	struct mbox_chan		*mbox;
+	struct devfreq			*df;
+	struct devfreq_dev_profile	profile;
+	unsigned int			qos_level;
+};
+
+#define MAX_MSG_LEN	96
+static int update_qos_level(struct device *dev, struct qoslat_data *d)
+{
+	struct qmp_pkt pkt;
+	char mbox_msg[MAX_MSG_LEN + 1] = {0};
+	char *qos_msg = "off";
+	int ret;
+
+	if (d->qos_level)
+		qos_msg = "on";
+
+	snprintf(mbox_msg, MAX_MSG_LEN, "{class: ddr, perfmode: %s}", qos_msg);
+	pkt.size = MAX_MSG_LEN;
+	pkt.data = mbox_msg;
+
+	ret = mbox_send_message(d->mbox, &pkt);
+	if (ret < 0) {
+		dev_err(dev, "Failed to send mbox message: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dev_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+	struct qoslat_data *d = dev_get_drvdata(dev);
+	struct dev_pm_opp *opp;
+
+	opp = devfreq_recommended_opp(dev, freq, flags);
+	if (!IS_ERR(opp))
+		dev_pm_opp_put(opp);
+	else
+		return PTR_ERR(opp);
+
+	if (*freq == d->qos_level)
+		return 0;
+
+	d->qos_level = *freq;
+
+	return update_qos_level(dev, d);
+}
+
+static int dev_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+	struct qoslat_data *d = dev_get_drvdata(dev);
+
+	*freq = d->qos_level;
+
+	return 0;
+}
+
+static int devfreq_qcom_qoslat_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct qoslat_data *d;
+	struct devfreq_dev_profile *p;
+	const char *gov_name;
+	int ret = 0;
+
+	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	dev_set_drvdata(dev, d);
+
+	if (!of_find_property(dev->of_node, "mboxes", NULL)) {
+		dev_err(dev, "Couldn't find AOP mbox\n");
+		return -EINVAL;
+	}
+	d->mbox_cl.dev = dev;
+	d->mbox_cl.tx_block = true;
+	d->mbox_cl.tx_tout = 1000;
+	d->mbox_cl.knows_txdone = false;
+	d->mbox = mbox_request_channel(&d->mbox_cl, 0);
+	if (IS_ERR(d->mbox)) {
+		ret = PTR_ERR(d->mbox);
+		dev_err(dev, "Failed to get mailbox channel: %d\n", ret);
+		return ret;
+	}
+	d->qos_level = 0;
+
+	p = &d->profile;
+	p->target = dev_target;
+	p->get_cur_freq = dev_get_cur_freq;
+	p->polling_ms = 10;
+
+	ret = dev_pm_opp_of_add_table(dev);
+	if (ret < 0)
+		dev_err(dev, "Couldn't parse OPP table: %d\n", ret);
+
+	if (of_property_read_string(dev->of_node, "governor", &gov_name))
+		gov_name = "powersave";
+
+	d->df = devfreq_add_device(dev, p, gov_name, NULL);
+	if (IS_ERR(d->df)) {
+		ret = PTR_ERR(d->df);
+		dev_err(dev, "Failed to add devfreq device: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id devfreq_qoslat_match_table[] = {
+	{ .compatible = "qcom,devfreq-qoslat" },
+	{}
+};
+
+static struct platform_driver devfreq_qcom_qoslat_driver = {
+	.probe = devfreq_qcom_qoslat_probe,
+	.driver = {
+		.name		= "devfreq-qcom-qoslat",
+		.of_match_table = devfreq_qoslat_match_table,
+	},
+};
+module_platform_driver(devfreq_qcom_qoslat_driver);
+MODULE_DESCRIPTION("Device driver for setting memory latency qos level");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_bw_vbif.c b/drivers/devfreq/governor_bw_vbif.c
index 98bef4d..7113c74 100644
--- a/drivers/devfreq/governor_bw_vbif.c
+++ b/drivers/devfreq/governor_bw_vbif.c
@@ -5,9 +5,12 @@
 
 #include <linux/devfreq.h>
 #include <linux/module.h>
+#include <linux/msm_adreno_devfreq.h>
+
 #include "governor.h"
 
-unsigned long (*extern_get_bw)(void) = NULL;
+static getbw_func extern_get_bw;
+static void *extern_get_bw_data;
 unsigned long *dev_ab;
 static unsigned long dev_ib;
 
@@ -22,12 +25,12 @@ static struct devfreq *df;
 static int devfreq_vbif_get_freq(struct devfreq *df,
 				unsigned long *freq)
 {
-	/* If the IB isn't set yet, check if it should be non-zero. */
-	if (!dev_ib && extern_get_bw) {
-		dev_ib = extern_get_bw();
-		if (dev_ab)
-			*dev_ab = dev_ib / 4;
-	}
+	unsigned long ab, ib;
+
+	extern_get_bw(&ib, &ab, extern_get_bw_data);
+
+	dev_ib = ib;
+	*dev_ab = ab;
 
 	*freq = dev_ib;
 	return 0;
@@ -38,20 +41,19 @@ static int devfreq_vbif_get_freq(struct devfreq *df,
  * value from legacy vbif based bus bandwidth governor.
  * This function is called by KGSL driver.
  */
-void devfreq_vbif_register_callback(void *p)
+void devfreq_vbif_register_callback(getbw_func func, void *data)
 {
-	extern_get_bw = p;
+	extern_get_bw = func;
+	extern_get_bw_data = data;
 }
 
-int devfreq_vbif_update_bw(unsigned long ib, unsigned long ab)
+int devfreq_vbif_update_bw(void)
 {
 	int ret = 0;
 
 	mutex_lock(&df_lock);
 	if (df) {
 		mutex_lock(&df->lock);
-		dev_ib = ib;
-		*dev_ab = ab;
 		ret = update_devfreq(df);
 		mutex_unlock(&df->lock);
 	}
@@ -78,7 +80,10 @@ static int devfreq_vbif_ev_handler(struct devfreq *devfreq,
 
 		mutex_unlock(&df_lock);
 
-		ret = devfreq_vbif_update_bw(0, 0);
+		dev_ib = 0;
+		*dev_ab = 0;
+
+		ret = devfreq_vbif_update_bw();
 		if (ret) {
 			pr_err("Unable to update BW! Gov start failed!\n");
 			return ret;
diff --git a/drivers/devfreq/governor_gpubw_mon.c b/drivers/devfreq/governor_gpubw_mon.c
index e3087a7..c07f6a1 100644
--- a/drivers/devfreq/governor_gpubw_mon.c
+++ b/drivers/devfreq/governor_gpubw_mon.c
@@ -53,7 +53,7 @@ static int devfreq_gpubw_get_target(struct devfreq *df,
 					(df->profile),
 					struct msm_busmon_extended_profile,
 					profile);
-	struct devfreq_dev_status stats;
+	struct devfreq_dev_status *stats = &df->last_status;
 	struct xstats b;
 	int result;
 	int level = 0;
@@ -73,18 +73,18 @@ static int devfreq_gpubw_get_target(struct devfreq *df,
 	if (priv == NULL)
 		return 0;
 
-	stats.private_data = &b;
+	stats->private_data = &b;
 
-	result = df->profile->get_dev_status(df->dev.parent, &stats);
+	result = devfreq_update_stats(df);
 
-	*freq = stats.current_frequency;
+	*freq = stats->current_frequency;
 
-	priv->bus.total_time += stats.total_time;
-	priv->bus.gpu_time += stats.busy_time;
+	priv->bus.total_time += stats->total_time;
+	priv->bus.gpu_time += stats->busy_time;
 	priv->bus.ram_time += b.ram_time;
 	priv->bus.ram_wait += b.ram_wait;
 
-	level = devfreq_get_freq_level(df, stats.current_frequency);
+	level = devfreq_get_freq_level(df, stats->current_frequency);
 
 	if (priv->bus.total_time < LONG_FLOOR)
 		return result;
diff --git a/drivers/devfreq/governor_memlat.c b/drivers/devfreq/governor_memlat.c
index c279ec8..ada844b 100644
--- a/drivers/devfreq/governor_memlat.c
+++ b/drivers/devfreq/governor_memlat.c
@@ -18,6 +18,7 @@
 #include <linux/mutex.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/device.h>
 #include <linux/of.h>
 #include <linux/devfreq.h>
 #include "governor.h"
@@ -151,7 +152,8 @@ static int start_monitor(struct devfreq *df)
 		return ret;
 	}
 
-	devfreq_monitor_start(df);
+	if (!hw->should_ignore_df_monitor)
+		devfreq_monitor_start(df);
 
 	node->mon_started = true;
 
@@ -165,7 +167,9 @@ static void stop_monitor(struct devfreq *df)
 
 	node->mon_started = false;
 
-	devfreq_monitor_stop(df);
+	if (!hw->should_ignore_df_monitor)
+		devfreq_monitor_stop(df);
+
 	hw->stop_hwmon(hw);
 }
 
@@ -341,13 +345,15 @@ static struct attribute_group compute_dev_attr_group = {
 	.attrs = compute_dev_attr,
 };
 
-#define MIN_MS	10U
+#define MIN_MS	0U
 #define MAX_MS	500U
 static int devfreq_memlat_ev_handler(struct devfreq *df,
 					unsigned int event, void *data)
 {
 	int ret;
 	unsigned int sample_ms;
+	struct memlat_node *node;
+	struct memlat_hwmon *hw;
 
 	switch (event) {
 	case DEVFREQ_GOV_START:
@@ -395,10 +401,15 @@ static int devfreq_memlat_ev_handler(struct devfreq *df,
 		break;
 
 	case DEVFREQ_GOV_INTERVAL:
+		node = df->data;
+		hw = node->hw;
 		sample_ms = *(unsigned int *)data;
 		sample_ms = max(MIN_MS, sample_ms);
 		sample_ms = min(MAX_MS, sample_ms);
-		devfreq_interval_update(df, &sample_ms);
+		if (hw->request_update_ms)
+			hw->request_update_ms(hw, sample_ms);
+		if (!hw->should_ignore_df_monitor)
+			devfreq_interval_update(df, &sample_ms);
 		break;
 	}
 
@@ -419,14 +430,18 @@ static struct devfreq_governor devfreq_gov_compute = {
 
 #define NUM_COLS	2
 static struct core_dev_map *init_core_dev_map(struct device *dev,
-		char *prop_name)
+					struct device_node *of_node,
+					char *prop_name)
 {
 	int len, nf, i, j;
 	u32 data;
 	struct core_dev_map *tbl;
 	int ret;
 
-	if (!of_find_property(dev->of_node, prop_name, &len))
+	if (!of_node)
+		of_node = dev->of_node;
+
+	if (!of_find_property(of_node, prop_name, &len))
 		return NULL;
 	len /= sizeof(data);
 
@@ -440,13 +455,13 @@ static struct core_dev_map *init_core_dev_map(struct device *dev,
 		return NULL;
 
 	for (i = 0, j = 0; i < nf; i++, j += 2) {
-		ret = of_property_read_u32_index(dev->of_node, prop_name, j,
+		ret = of_property_read_u32_index(of_node, prop_name, j,
 				&data);
 		if (ret)
 			return NULL;
 		tbl[i].core_mhz = data / 1000;
 
-		ret = of_property_read_u32_index(dev->of_node, prop_name, j + 1,
+		ret = of_property_read_u32_index(of_node, prop_name, j + 1,
 				&data);
 		if (ret)
 			return NULL;
@@ -463,6 +478,7 @@ static struct memlat_node *register_common(struct device *dev,
 					   struct memlat_hwmon *hw)
 {
 	struct memlat_node *node;
+	struct device_node *of_child;
 
 	if (!hw->dev && !hw->of_node)
 		return ERR_PTR(-EINVAL);
@@ -474,7 +490,14 @@ static struct memlat_node *register_common(struct device *dev,
 	node->ratio_ceil = 10;
 	node->hw = hw;
 
-	hw->freq_map = init_core_dev_map(dev, "qcom,core-dev-table");
+	if (hw->get_child_of_node) {
+		of_child = hw->get_child_of_node(dev);
+		hw->freq_map = init_core_dev_map(dev, of_child,
+					"qcom,core-dev-table");
+	} else {
+		hw->freq_map = init_core_dev_map(dev, NULL,
+					"qcom,core-dev-table");
+	}
 	if (!hw->freq_map) {
 		dev_err(dev, "Couldn't find the core-dev freq table!\n");
 		return ERR_PTR(-EINVAL);
diff --git a/drivers/devfreq/governor_memlat.h b/drivers/devfreq/governor_memlat.h
index c3c71a3..0298902 100644
--- a/drivers/devfreq/governor_memlat.h
+++ b/drivers/devfreq/governor_memlat.h
@@ -54,6 +54,9 @@ struct memlat_hwmon {
 	int (*start_hwmon)(struct memlat_hwmon *hw);
 	void (*stop_hwmon)(struct memlat_hwmon *hw);
 	unsigned long (*get_cnt)(struct memlat_hwmon *hw);
+	struct device_node *(*get_child_of_node)(struct device *dev);
+	void (*request_update_ms)(struct memlat_hwmon *hw,
+				  unsigned int update_ms);
 	struct device *dev;
 	struct device_node *of_node;
 
@@ -62,6 +65,7 @@ struct memlat_hwmon {
 
 	struct devfreq *df;
 	struct core_dev_map *freq_map;
+	bool should_ignore_df_monitor;
 };
 
 #ifdef CONFIG_DEVFREQ_GOV_MEMLAT
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
index f6cab36..ae4bfc2 100644
--- a/drivers/devfreq/governor_msm_adreno_tz.c
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -14,6 +14,7 @@
 #include <linux/msm_adreno_devfreq.h>
 #include <asm/cacheflush.h>
 #include <soc/qcom/scm.h>
+#include <soc/qcom/qtee_shmbridge.h>
 #include "governor.h"
 
 static DEFINE_SPINLOCK(tz_lock);
@@ -229,14 +230,25 @@ static int tz_init_ca(struct devfreq_msm_adreno_tz_data *priv)
 	struct scm_desc desc = {0};
 	u8 *tz_buf;
 	int ret;
+	struct qtee_shm shm;
 
 	/* Set data for TZ */
 	tz_ca_data[0] = priv->bin.ctxt_aware_target_pwrlevel;
 	tz_ca_data[1] = priv->bin.ctxt_aware_busy_penalty;
 
-	tz_buf = kzalloc(PAGE_ALIGN(sizeof(tz_ca_data)), GFP_KERNEL);
-	if (!tz_buf)
-		return -ENOMEM;
+	if (!qtee_shmbridge_is_enabled()) {
+		tz_buf = kzalloc(PAGE_ALIGN(sizeof(tz_ca_data)), GFP_KERNEL);
+		if (!tz_buf)
+			return -ENOMEM;
+		desc.args[0] = virt_to_phys(tz_buf);
+	} else {
+		ret = qtee_shmbridge_allocate_shm(
+				PAGE_ALIGN(sizeof(tz_ca_data)), &shm);
+		if (ret)
+			return -ENOMEM;
+		tz_buf = shm.vaddr;
+		desc.args[0] = shm.paddr;
+	}
 
 	memcpy(tz_buf, tz_ca_data, sizeof(tz_ca_data));
 	/* Ensure memcpy completes execution */
@@ -244,15 +256,16 @@ static int tz_init_ca(struct devfreq_msm_adreno_tz_data *priv)
 	dmac_flush_range(tz_buf,
 		tz_buf + PAGE_ALIGN(sizeof(tz_ca_data)));
 
-	desc.args[0] = virt_to_phys(tz_buf);
 	desc.args[1] = sizeof(tz_ca_data);
 	desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
 
 	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS,
 			TZ_V2_INIT_CA_ID_64),
 			&desc);
-
-	kzfree(tz_buf);
+	if (!qtee_shmbridge_is_enabled())
+		kzfree(tz_buf);
+	else
+		qtee_shmbridge_free_shm(&shm);
 
 	return ret;
 }
@@ -268,16 +281,28 @@ static int tz_init(struct devfreq_msm_adreno_tz_data *priv,
 			scm_is_call_available(SCM_SVC_DCVS, TZ_RESET_ID_64)) {
 		struct scm_desc desc = {0};
 		u8 *tz_buf;
+		struct qtee_shm shm;
 
-		tz_buf = kzalloc(PAGE_ALIGN(size_pwrlevels), GFP_KERNEL);
-		if (!tz_buf)
-			return -ENOMEM;
+		if (!qtee_shmbridge_is_enabled()) {
+			tz_buf = kzalloc(PAGE_ALIGN(size_pwrlevels),
+						GFP_KERNEL);
+			if (!tz_buf)
+				return -ENOMEM;
+			desc.args[0] = virt_to_phys(tz_buf);
+		} else {
+			ret = qtee_shmbridge_allocate_shm(
+					PAGE_ALIGN(size_pwrlevels), &shm);
+			if (ret)
+				return -ENOMEM;
+			tz_buf = shm.vaddr;
+			desc.args[0] = shm.paddr;
+		}
+
 		memcpy(tz_buf, tz_pwrlevels, size_pwrlevels);
 		/* Ensure memcpy completes execution */
 		mb();
 		dmac_flush_range(tz_buf, tz_buf + PAGE_ALIGN(size_pwrlevels));
 
-		desc.args[0] = virt_to_phys(tz_buf);
 		desc.args[1] = size_pwrlevels;
 		desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
 
@@ -286,7 +311,10 @@ static int tz_init(struct devfreq_msm_adreno_tz_data *priv,
 		*version = desc.ret[0];
 		if (!ret)
 			priv->is_64 = true;
-		kzfree(tz_buf);
+		if (!qtee_shmbridge_is_enabled())
+			kzfree(tz_buf);
+		else
+			qtee_shmbridge_free_shm(&shm);
 	} else
 		ret = -EINVAL;
 
@@ -333,42 +361,42 @@ static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq)
 {
 	int result = 0;
 	struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
-	struct devfreq_dev_status stats;
+	struct devfreq_dev_status *stats = &devfreq->last_status;
 	int val, level = 0;
 	unsigned int scm_data[4];
 	int context_count = 0;
 
 	/* keeps stats.private_data == NULL   */
-	result = devfreq->profile->get_dev_status(devfreq->dev.parent, &stats);
+	result = devfreq_update_stats(devfreq);
 	if (result) {
 		pr_err(TAG "get_status failed %d\n", result);
 		return result;
 	}
 
-	*freq = stats.current_frequency;
-	priv->bin.total_time += stats.total_time;
-	priv->bin.busy_time += stats.busy_time;
+	*freq = stats->current_frequency;
+	priv->bin.total_time += stats->total_time;
+	priv->bin.busy_time += stats->busy_time;
 
-	if (stats.private_data)
-		context_count =  *((int *)stats.private_data);
+	if (stats->private_data)
+		context_count =  *((int *)stats->private_data);
 
 	/* Update the GPU load statistics */
-	compute_work_load(&stats, priv, devfreq);
+	compute_work_load(stats, priv, devfreq);
 	/*
 	 * Do not waste CPU cycles running this algorithm if
 	 * the GPU just started, or if less than FLOOR time
 	 * has passed since the last run or the gpu hasn't been
 	 * busier than MIN_BUSY.
 	 */
-	if ((stats.total_time == 0) ||
+	if ((stats->total_time == 0) ||
 		(priv->bin.total_time < FLOOR) ||
 		(unsigned int) priv->bin.busy_time < MIN_BUSY) {
 		return 0;
 	}
 
-	level = devfreq_get_freq_level(devfreq, stats.current_frequency);
+	level = devfreq_get_freq_level(devfreq, stats->current_frequency);
 	if (level < 0) {
-		pr_err(TAG "bad freq %ld\n", stats.current_frequency);
+		pr_err(TAG "bad freq %ld\n", stats->current_frequency);
 		return level;
 	}
 
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index a75b95f..db5b8fe 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1606,7 +1606,11 @@ static void at_xdmac_tasklet(unsigned long data)
 					struct at_xdmac_desc,
 					xfer_node);
 		dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
-		BUG_ON(!desc->active_xfer);
+		if (!desc->active_xfer) {
+			dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
+			spin_unlock_bh(&atchan->lock);
+			return;
+		}
 
 		txd = &desc->tx_dma_desc;
 
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 88750a34..bc8050c 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -961,6 +961,7 @@ static void _stop(struct pl330_thread *thrd)
 {
 	void __iomem *regs = thrd->dmac->base;
 	u8 insn[6] = {0, 0, 0, 0, 0, 0};
+	u32 inten = readl(regs + INTEN);
 
 	if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
 		UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
@@ -973,10 +974,13 @@ static void _stop(struct pl330_thread *thrd)
 
 	_emit_KILL(0, insn);
 
-	/* Stop generating interrupts for SEV */
-	writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
-
 	_execute_DBGINSN(thrd, insn, is_manager(thrd));
+
+	/* clear the event */
+	if (inten & (1 << thrd->ev))
+		writel(1 << thrd->ev, regs + INTCLR);
+	/* Stop generating interrupts for SEV */
+	writel(inten & ~(1 << thrd->ev), regs + INTEN);
 }
 
 /* Start doing req 'idx' of thread 'thrd' */
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 6e795a0..c6cbe61 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -434,6 +434,7 @@ struct gpi_dev {
 	struct device *dev;
 	struct resource *res;
 	void __iomem *regs;
+	void *ee_base; /*ee register base address*/
 	u32 max_gpii; /* maximum # of gpii instances available per gpi block */
 	u32 gpii_mask; /* gpii instances available for apps */
 	u32 ev_factor; /* ev ring length factor */
@@ -524,8 +525,6 @@ static irqreturn_t gpi_handle_irq(int irq, void *data);
 static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
 static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
 static void gpi_process_events(struct gpii *gpii);
-static u64 get_gpi_ee_base(u64 base, int num_gpii);
-
 
 static inline struct gpii_chan *to_gpii_chan(struct dma_chan *dma_chan)
 {
@@ -2640,24 +2639,12 @@ static int gpi_smmu_init(struct gpi_dev *gpi_dev)
 	return ret;
 }
 
-/* Variabe EE register offset Kona */
-static u64 get_gpi_ee_base(u64 base, int num_gpii)
-{
-	if (num_gpii == QUP0_NUM_GPII_KONA)
-		base -= QUP0_VAR_OFFSET_KONA;
-	else
-		base -= QUP1_VAR_OFFSET_KONA;
-	return base;
-}
-
 static int gpi_probe(struct platform_device *pdev)
 {
 	struct gpi_dev *gpi_dev;
 	int ret, i;
 	const char *mode = NULL;
-	u64 ee_base;
-	u64 base;
-	bool gpii_offset;
+	u32 gpi_ee_offset;
 
 	gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
 	if (!gpi_dev)
@@ -2678,6 +2665,8 @@ static int gpi_probe(struct platform_device *pdev)
 		return -EFAULT;
 	}
 
+	gpi_dev->ee_base = gpi_dev->regs;
+
 	ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,max-num-gpii",
 				   &gpi_dev->max_gpii);
 	if (ret) {
@@ -2692,8 +2681,13 @@ static int gpi_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-	gpii_offset = of_property_read_bool(gpi_dev->dev->of_node,
-						"qcom,gpii_offset");
+	ret = of_property_read_u32(gpi_dev->dev->of_node,
+					"qcom,gpi-ee-offset", &gpi_ee_offset);
+	if (ret)
+		GPI_LOG(gpi_dev, "No variable ee offset present\n");
+	else
+		gpi_dev->ee_base =
+		(void *)((u64)gpi_dev->ee_base - gpi_ee_offset);
 
 	ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,ev-factor",
 				   &gpi_dev->ev_factor);
@@ -2760,12 +2754,6 @@ static int gpi_probe(struct platform_device *pdev)
 	if (!gpi_dev->gpiis)
 		return -ENOMEM;
 
-	if (gpii_offset) {
-		base = (u64)gpi_dev->regs;
-		ee_base = get_gpi_ee_base(base, gpi_dev->max_gpii);
-	} else
-		ee_base = (u64)gpi_dev->regs;
-
 	/* setup all the supported gpii */
 	INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
 	for (i = 0; i < gpi_dev->max_gpii; i++) {
@@ -2776,9 +2764,9 @@ static int gpi_probe(struct platform_device *pdev)
 			continue;
 
 		/* set up ev cntxt register map */
-		gpii->ev_cntxt_base_reg = (void *)ee_base +
+		gpii->ev_cntxt_base_reg = gpi_dev->ee_base +
 			GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
-		gpii->ev_cntxt_db_reg = (void *)ee_base +
+		gpii->ev_cntxt_db_reg = gpi_dev->ee_base +
 			GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
 		gpii->ev_ring_base_lsb_reg = gpii->ev_cntxt_base_reg +
 			CNTXT_2_RING_BASE_LSB;
@@ -2786,11 +2774,11 @@ static int gpi_probe(struct platform_device *pdev)
 			CNTXT_4_RING_RP_LSB;
 		gpii->ev_ring_wp_lsb_reg = gpii->ev_cntxt_base_reg +
 			CNTXT_6_RING_WP_LSB;
-		gpii->ev_cmd_reg = (void *)ee_base +
+		gpii->ev_cmd_reg = gpi_dev->ee_base +
 			GPI_GPII_n_EV_CH_CMD_OFFS(i);
-		gpii->ieob_src_reg = (void *)ee_base +
+		gpii->ieob_src_reg = gpi_dev->ee_base +
 			GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(i);
-		gpii->ieob_clr_reg = (void *)ee_base +
+		gpii->ieob_clr_reg = gpi_dev->ee_base +
 			GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
 
 		/* set up irq */
@@ -2807,9 +2795,9 @@ static int gpi_probe(struct platform_device *pdev)
 			struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
 
 			/* set up ch cntxt register map */
-			gpii_chan->ch_cntxt_base_reg = (void *)ee_base +
+			gpii_chan->ch_cntxt_base_reg = gpi_dev->ee_base +
 				GPI_GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
-			gpii_chan->ch_cntxt_db_reg = (void *)ee_base +
+			gpii_chan->ch_cntxt_db_reg = gpi_dev->ee_base +
 				GPI_GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
 			gpii_chan->ch_ring_base_lsb_reg =
 				gpii_chan->ch_cntxt_base_reg +
@@ -2820,7 +2808,7 @@ static int gpi_probe(struct platform_device *pdev)
 			gpii_chan->ch_ring_wp_lsb_reg =
 				gpii_chan->ch_cntxt_base_reg +
 				CNTXT_6_RING_WP_LSB;
-			gpii_chan->ch_cmd_reg = (void *)ee_base +
+			gpii_chan->ch_cmd_reg = gpi_dev->ee_base +
 				GPI_GPII_n_CH_CMD_OFFS(i);
 
 			/* vchan setup */
@@ -2836,7 +2824,7 @@ static int gpi_probe(struct platform_device *pdev)
 			     (unsigned long)gpii);
 		init_completion(&gpii->cmd_completion);
 		gpii->gpii_id = i;
-		gpii->regs = (void *)ee_base;
+		gpii->regs = gpi_dev->ee_base;
 		gpii->gpi_dev = gpi_dev;
 		atomic_set(&gpii->dbg_index, 0);
 	}
diff --git a/drivers/dma/qcom/msm_gpi_mmio.h b/drivers/dma/qcom/msm_gpi_mmio.h
index 67221f76..acd74df 100644
--- a/drivers/dma/qcom/msm_gpi_mmio.h
+++ b/drivers/dma/qcom/msm_gpi_mmio.h
@@ -216,7 +216,3 @@ enum CNTXT_OFFS {
 #define GPI_GPII_n_CH_k_SCRATCH_3_OFFS(n, k) \
 	(0x2006C + (0x4000 * (n)) + (0x80 * (k)))
 
-/* Variabe EE register offset */
-#define QUP0_NUM_GPII_KONA 15
-#define QUP0_VAR_OFFSET_KONA (0x1000)
-#define QUP1_VAR_OFFSET_KONA (0x6000)
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 48ee35e..0b05a1e 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1281,6 +1281,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
 	enum dma_status status;
 	unsigned int residue = 0;
 	unsigned int dptr = 0;
+	unsigned int chcrb;
+	unsigned int tcrb;
+	unsigned int i;
 
 	if (!desc)
 		return 0;
@@ -1329,14 +1332,31 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
 	}
 
 	/*
+	 * We need to read two registers.
+	 * Make sure the control register does not skip to next chunk
+	 * while reading the counter.
+	 * Trying it 3 times should be enough: Initial read, retry, retry
+	 * for the paranoid.
+	 */
+	for (i = 0; i < 3; i++) {
+		chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+					    RCAR_DMACHCRB_DPTR_MASK;
+		tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
+		/* Still the same? */
+		if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+			      RCAR_DMACHCRB_DPTR_MASK))
+			break;
+	}
+	WARN_ONCE(i >= 3, "residue might be not continuous!");
+
+	/*
 	 * In descriptor mode the descriptor running pointer is not maintained
 	 * by the interrupt handler, find the running descriptor from the
 	 * descriptor pointer field in the CHCRB register. In non-descriptor
 	 * mode just use the running descriptor pointer.
 	 */
 	if (desc->hwdescs.use) {
-		dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
-			RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+		dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
 		if (dptr == 0)
 			dptr = desc->nchunks;
 		dptr--;
@@ -1354,7 +1374,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
 	}
 
 	/* Add the residue for the current chunk. */
-	residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
+	residue += tcrb << desc->xfer_shift;
 
 	return residue;
 }
@@ -1367,6 +1387,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
 	enum dma_status status;
 	unsigned long flags;
 	unsigned int residue;
+	bool cyclic;
 
 	status = dma_cookie_status(chan, cookie, txstate);
 	if (status == DMA_COMPLETE || !txstate)
@@ -1374,10 +1395,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
 
 	spin_lock_irqsave(&rchan->lock, flags);
 	residue = rcar_dmac_chan_get_residue(rchan, cookie);
+	cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
 	spin_unlock_irqrestore(&rchan->lock, flags);
 
 	/* if there's no residue, the cookie is complete */
-	if (!residue)
+	if (!residue && !cyclic)
 		return DMA_COMPLETE;
 
 	dma_set_residue(txstate, residue);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index b26256f..09b6756 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -22,7 +22,6 @@
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
 #include <linux/of_irq.h>
-#include <linux/pm_clock.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
@@ -141,6 +140,7 @@ struct tegra_adma {
 	struct dma_device		dma_dev;
 	struct device			*dev;
 	void __iomem			*base_addr;
+	struct clk			*ahub_clk;
 	unsigned int			nr_channels;
 	unsigned long			rx_requests_reserved;
 	unsigned long			tx_requests_reserved;
@@ -637,8 +637,9 @@ static int tegra_adma_runtime_suspend(struct device *dev)
 	struct tegra_adma *tdma = dev_get_drvdata(dev);
 
 	tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+	clk_disable_unprepare(tdma->ahub_clk);
 
-	return pm_clk_suspend(dev);
+	return 0;
 }
 
 static int tegra_adma_runtime_resume(struct device *dev)
@@ -646,10 +647,11 @@ static int tegra_adma_runtime_resume(struct device *dev)
 	struct tegra_adma *tdma = dev_get_drvdata(dev);
 	int ret;
 
-	ret = pm_clk_resume(dev);
-	if (ret)
+	ret = clk_prepare_enable(tdma->ahub_clk);
+	if (ret) {
+		dev_err(dev, "ahub clk_enable failed: %d\n", ret);
 		return ret;
-
+	}
 	tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
 
 	return 0;
@@ -692,13 +694,11 @@ static int tegra_adma_probe(struct platform_device *pdev)
 	if (IS_ERR(tdma->base_addr))
 		return PTR_ERR(tdma->base_addr);
 
-	ret = pm_clk_create(&pdev->dev);
-	if (ret)
-		return ret;
-
-	ret = of_pm_clk_add_clk(&pdev->dev, "d_audio");
-	if (ret)
-		goto clk_destroy;
+	tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio");
+	if (IS_ERR(tdma->ahub_clk)) {
+		dev_err(&pdev->dev, "Error: Missing ahub controller clock\n");
+		return PTR_ERR(tdma->ahub_clk);
+	}
 
 	pm_runtime_enable(&pdev->dev);
 
@@ -775,8 +775,6 @@ static int tegra_adma_probe(struct platform_device *pdev)
 	pm_runtime_put_sync(&pdev->dev);
 rpm_disable:
 	pm_runtime_disable(&pdev->dev);
-clk_destroy:
-	pm_clk_destroy(&pdev->dev);
 
 	return ret;
 }
@@ -786,6 +784,7 @@ static int tegra_adma_remove(struct platform_device *pdev)
 	struct tegra_adma *tdma = platform_get_drvdata(pdev);
 	int i;
 
+	of_dma_controller_free(pdev->dev.of_node);
 	dma_async_device_unregister(&tdma->dma_dev);
 
 	for (i = 0; i < tdma->nr_channels; ++i)
@@ -793,7 +792,6 @@ static int tegra_adma_remove(struct platform_device *pdev)
 
 	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
-	pm_clk_destroy(&pdev->dev);
 
 	return 0;
 }
diff --git a/drivers/edac/kryo_arm64_edac.c b/drivers/edac/kryo_arm64_edac.c
index 1417e47..eaa333f 100644
--- a/drivers/edac/kryo_arm64_edac.c
+++ b/drivers/edac/kryo_arm64_edac.c
@@ -40,6 +40,7 @@ module_param(poll_msec, int, 0444);
 #define L3_BIT 0x2
 
 #define QCOM_CPU_PART_KRYO4XX_GOLD 0x804
+#define QCOM_CPU_PART_KRYO5XX_GOLD 0xD0D
 #define QCOM_CPU_PART_KRYO4XX_SILVER_V1 0x803
 #define QCOM_CPU_PART_KRYO4XX_SILVER_V2 0x805
 
@@ -275,6 +276,7 @@ static void kryo_parse_l1_l2_cache_error(u64 errxstatus, u64 errxmisc,
 		}
 		break;
 	case QCOM_CPU_PART_KRYO4XX_GOLD:
+	case QCOM_CPU_PART_KRYO5XX_GOLD:
 		switch (KRYO_ERRXMISC_LVL_GOLD(errxmisc)) {
 		case L1_GOLD_DC_BIT:
 		case L1_GOLD_IC_BIT:
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index df28b65..903a4f1 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -1541,7 +1541,7 @@ static struct dunit_ops dnv_ops = {
 
 static const struct x86_cpu_id pnd2_cpuids[] = {
 	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
-	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
+	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X, 0, (kernel_ulong_t)&dnv_ops },
 	{ }
 };
 MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index 4f0fe68..afcf2ce 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/coresight.h>
@@ -262,7 +262,7 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
 		esoc_mdm_log(
 		"ESOC_FORCE_PWR_OFF: Queueing request: ESOC_REQ_SHUTDOWN\n");
 		esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
-		mdm_power_down(mdm);
+		mdm_toggle_soft_reset(mdm, false);
 		mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
 		break;
 	case ESOC_RESET:
@@ -484,7 +484,7 @@ static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc)
 		mdm->ready = false;
 		esoc_mdm_log(
 		"ESOC_PRIMARY_REBOOT: Powering down the modem\n");
-		mdm_power_down(mdm);
+		mdm_toggle_soft_reset(mdm, false);
 		break;
 	};
 }
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
index 64a23f9..c5fb9c4 100644
--- a/drivers/esoc/esoc-mdm-drv.c
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2015, 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/delay.h>
@@ -13,24 +13,11 @@
 #include "mdm-dbg.h"
 
 /* Default number of powerup trial requests per session */
-#define ESOC_DEF_PON_REQ	2
-static unsigned int n_pon_tries = ESOC_DEF_PON_REQ;
-module_param(n_pon_tries, uint, 0644);
-MODULE_PARM_DESC(n_pon_tries,
-"Number of power-on retrials allowed upon boot failure");
+#define ESOC_DEF_PON_REQ	3
 
-enum esoc_boot_fail_action {
-	BOOT_FAIL_ACTION_RETRY,
-	BOOT_FAIL_ACTION_COLD_RESET,
-	BOOT_FAIL_ACTION_SHUTDOWN,
-	BOOT_FAIL_ACTION_PANIC,
-	BOOT_FAIL_ACTION_NOP,
-};
+#define ESOC_MAX_PON_TRIES	5
 
-static unsigned int boot_fail_action = BOOT_FAIL_ACTION_NOP;
-module_param(boot_fail_action, uint, 0644);
-MODULE_PARM_DESC(boot_fail_action,
-"Actions: 0:Retry PON; 1:Cold reset; 2:Power-down; 3:APQ Panic; 4:No action");
+#define BOOT_FAIL_ACTION_DEF BOOT_FAIL_ACTION_PANIC
 
 enum esoc_pon_state {
 	PON_INIT,
@@ -61,9 +48,60 @@ struct mdm_drv {
 	struct workqueue_struct *mdm_queue;
 	struct work_struct ssr_work;
 	struct notifier_block esoc_restart;
+	struct mutex poff_lock;
+	atomic_t boot_fail_action;
+	atomic_t n_pon_tries;
 };
 #define to_mdm_drv(d)	container_of(d, struct mdm_drv, cmd_eng)
 
+#define S3_RESET_DELAY_MS	1000
+
+static void esoc_client_link_power_off(struct esoc_clink *esoc_clink,
+							unsigned int flags);
+static void esoc_client_link_mdm_crash(struct esoc_clink *esoc_clink);
+
+int esoc_set_boot_fail_action(struct esoc_clink *esoc_clink, u32 action)
+{
+	struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
+
+	if (action >= BOOT_FAIL_ACTION_LAST) {
+		esoc_mdm_log("Unknown boot fail action requested: %u\n",
+				action);
+		return -EINVAL;
+	}
+
+	if (!mdm_drv) {
+		esoc_mdm_log("esoc-mdm driver not present\n");
+		return -EAGAIN;
+	}
+
+	atomic_set(&mdm_drv->boot_fail_action, action);
+	esoc_mdm_log("Boot fail action configured to %u\n", action);
+
+	return 0;
+}
+
+int esoc_set_n_pon_tries(struct esoc_clink *esoc_clink, u32 n_tries)
+{
+	struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
+
+	if (n_tries > ESOC_MAX_PON_TRIES) {
+		esoc_mdm_log(
+			"Num PON tries requested (%u) is over the limit: %u\n",
+			n_tries, ESOC_MAX_PON_TRIES);
+	}
+
+	if (!mdm_drv) {
+		esoc_mdm_log("esoc-mdm driver not present\n");
+		return -EAGAIN;
+	}
+
+	atomic_set(&mdm_drv->n_pon_tries, n_tries);
+	esoc_mdm_log("Num PON tries configured to %u\n", n_tries);
+
+	return 0;
+}
+
 static int esoc_msm_restart_handler(struct notifier_block *nb,
 		unsigned long action, void *data)
 {
@@ -75,10 +113,20 @@ static int esoc_msm_restart_handler(struct notifier_block *nb,
 	if (action == SYS_RESTART) {
 		if (mdm_dbg_stall_notify(ESOC_PRIMARY_REBOOT))
 			return NOTIFY_OK;
+		mutex_lock(&mdm_drv->poff_lock);
+		if (mdm_drv->mode == PWR_OFF) {
+			esoc_mdm_log(
+			"Reboot notifier: mdm already powered-off\n");
+			mutex_unlock(&mdm_drv->poff_lock);
+			return NOTIFY_OK;
+		}
+		esoc_client_link_power_off(esoc_clink, ESOC_HOOK_MDM_DOWN);
 		esoc_mdm_log(
 			"Reboot notifier: Notifying esoc of cold reboot\n");
 		dev_dbg(&esoc_clink->dev, "Notifying esoc of cold reboot\n");
 		clink_ops->notify(ESOC_PRIMARY_REBOOT, esoc_clink);
+		mdm_drv->mode = PWR_OFF;
+		mutex_unlock(&mdm_drv->poff_lock);
 	}
 	return NOTIFY_OK;
 }
@@ -147,10 +195,11 @@ static void mdm_ssr_fn(struct work_struct *work)
 	struct mdm_drv *mdm_drv = container_of(work, struct mdm_drv, ssr_work);
 	struct mdm_ctrl *mdm = get_esoc_clink_data(mdm_drv->esoc_clink);
 
+	esoc_client_link_mdm_crash(mdm_drv->esoc_clink);
+
 	mdm_wait_for_status_low(mdm, false);
 
 	esoc_mdm_log("Starting SSR work\n");
-
 	/*
 	 * If restarting esoc fails, the SSR framework triggers a kernel panic
 	 */
@@ -158,42 +207,57 @@ static void mdm_ssr_fn(struct work_struct *work)
 }
 
 static void esoc_client_link_power_on(struct esoc_clink *esoc_clink,
-							bool mdm_crashed)
+						unsigned int flags)
 {
 	int i;
 	struct esoc_client_hook *client_hook;
 
 	dev_dbg(&esoc_clink->dev, "Calling power_on hooks\n");
 	esoc_mdm_log(
-	"Calling power_on hooks with crash state: %d\n", mdm_crashed);
+	"Calling power_on hooks with flags: 0x%x\n", flags);
 
 	for (i = 0; i < ESOC_MAX_HOOKS; i++) {
 		client_hook = esoc_clink->client_hook[i];
 		if (client_hook && client_hook->esoc_link_power_on)
 			client_hook->esoc_link_power_on(client_hook->priv,
-							mdm_crashed);
+							flags);
 	}
 }
 
 static void esoc_client_link_power_off(struct esoc_clink *esoc_clink,
-							bool mdm_crashed)
+						unsigned int flags)
 {
 	int i;
 	struct esoc_client_hook *client_hook;
 
 	dev_dbg(&esoc_clink->dev, "Calling power_off hooks\n");
 	esoc_mdm_log(
-	"Calling power_off hooks with crash state: %d\n", mdm_crashed);
+	"Calling power_off hooks with flags: 0x%x\n", flags);
 
 	for (i = 0; i < ESOC_MAX_HOOKS; i++) {
 		client_hook = esoc_clink->client_hook[i];
 		if (client_hook && client_hook->esoc_link_power_off) {
 			client_hook->esoc_link_power_off(client_hook->priv,
-							mdm_crashed);
+							flags);
 		}
 	}
 }
 
+static void esoc_client_link_mdm_crash(struct esoc_clink *esoc_clink)
+{
+	int i;
+	struct esoc_client_hook *client_hook;
+
+	dev_dbg(&esoc_clink->dev, "Calling mdm_crash hooks\n");
+	esoc_mdm_log("Calling mdm_crash hooks\n");
+
+	for (i = 0; i < ESOC_MAX_HOOKS; i++) {
+		client_hook = esoc_clink->client_hook[i];
+		if (client_hook && client_hook->esoc_link_mdm_crash)
+			client_hook->esoc_link_mdm_crash(client_hook->priv);
+	}
+}
+
 static void mdm_crash_shutdown(const struct subsys_desc *mdm_subsys)
 {
 	struct esoc_clink *esoc_clink =
@@ -230,7 +294,7 @@ static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys,
 			return 0;
 
 		esoc_clink_queue_request(ESOC_REQ_CRASH_SHUTDOWN, esoc_clink);
-		esoc_client_link_power_off(esoc_clink, true);
+		esoc_client_link_power_off(esoc_clink, ESOC_HOOK_MDM_CRASH);
 
 		esoc_mdm_log("Executing the ESOC_PREPARE_DEBUG command\n");
 		ret = clink_ops->cmd_exe(ESOC_PREPARE_DEBUG,
@@ -243,18 +307,26 @@ static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys,
 		mdm_drv->mode = IN_DEBUG;
 	} else if (!force_stop) {
 		esoc_mdm_log("Graceful shutdown mode\n");
+		mutex_lock(&mdm_drv->poff_lock);
+		if (mdm_drv->mode == PWR_OFF) {
+			mutex_unlock(&mdm_drv->poff_lock);
+			esoc_mdm_log("mdm already powered-off\n");
+			return 0;
+		}
 		if (esoc_clink->subsys.sysmon_shutdown_ret) {
 			esoc_mdm_log(
 			"Executing the ESOC_FORCE_PWR_OFF command\n");
 			ret = clink_ops->cmd_exe(ESOC_FORCE_PWR_OFF,
 							esoc_clink);
 		} else {
-			if (mdm_dbg_stall_cmd(ESOC_PWR_OFF))
+			if (mdm_dbg_stall_cmd(ESOC_PWR_OFF)) {
 				/* Since power off command is masked
 				 * we return success, and leave the state
 				 * of the command engine as is.
 				 */
+				mutex_unlock(&mdm_drv->poff_lock);
 				return 0;
+			}
 			dev_dbg(&esoc_clink->dev, "Sending sysmon-shutdown\n");
 			esoc_mdm_log("Executing the ESOC_PWR_OFF command\n");
 			ret = clink_ops->cmd_exe(ESOC_PWR_OFF, esoc_clink);
@@ -263,25 +335,28 @@ static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys,
 			esoc_mdm_log(
 			"Executing the ESOC_PWR_OFF command failed\n");
 			dev_err(&esoc_clink->dev, "failed to exe power off\n");
+			mutex_unlock(&mdm_drv->poff_lock);
 			return ret;
 		}
-		esoc_client_link_power_off(esoc_clink, false);
+		esoc_client_link_power_off(esoc_clink, ESOC_HOOK_MDM_DOWN);
 		/* Pull the reset line low to turn off the device */
 		clink_ops->cmd_exe(ESOC_FORCE_PWR_OFF, esoc_clink);
 		mdm_drv->mode = PWR_OFF;
+		mutex_unlock(&mdm_drv->poff_lock);
 	}
 	esoc_mdm_log("Shutdown completed\n");
 	return 0;
 }
 
-static void mdm_subsys_retry_powerup_cleanup(struct esoc_clink *esoc_clink)
+static void mdm_subsys_retry_powerup_cleanup(struct esoc_clink *esoc_clink,
+							unsigned int poff_flags)
 {
 	struct mdm_ctrl *mdm = get_esoc_clink_data(esoc_clink);
 	struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
 
 	esoc_mdm_log("Doing cleanup\n");
 
-	esoc_client_link_power_off(esoc_clink, false);
+	esoc_client_link_power_off(esoc_clink, poff_flags);
 	mdm_disable_irqs(mdm);
 	mdm_drv->pon_state = PON_INIT;
 	reinit_completion(&mdm_drv->pon_done);
@@ -292,10 +367,17 @@ static void mdm_subsys_retry_powerup_cleanup(struct esoc_clink *esoc_clink)
 static int mdm_handle_boot_fail(struct esoc_clink *esoc_clink, u8 *pon_trial)
 {
 	struct mdm_ctrl *mdm = get_esoc_clink_data(esoc_clink);
+	struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
 
-	switch (boot_fail_action) {
+	if (*pon_trial == atomic_read(&mdm_drv->n_pon_tries)) {
+		esoc_mdm_log("Reached max. number of boot trials\n");
+		atomic_set(&mdm_drv->boot_fail_action,
+					BOOT_FAIL_ACTION_PANIC);
+	}
+
+	switch (atomic_read(&mdm_drv->boot_fail_action)) {
 	case BOOT_FAIL_ACTION_RETRY:
-		mdm_subsys_retry_powerup_cleanup(esoc_clink);
+		mdm_subsys_retry_powerup_cleanup(esoc_clink, 0);
 		esoc_mdm_log("Request to retry a warm reset\n");
 		(*pon_trial)++;
 		break;
@@ -305,11 +387,20 @@ static int mdm_handle_boot_fail(struct esoc_clink *esoc_clink, u8 *pon_trial)
 	 * issuing a cold reset & a warm reset back to back.
 	 */
 	case BOOT_FAIL_ACTION_COLD_RESET:
-		mdm_subsys_retry_powerup_cleanup(esoc_clink);
+		mdm_subsys_retry_powerup_cleanup(esoc_clink,
+							ESOC_HOOK_MDM_DOWN);
 		esoc_mdm_log("Doing cold reset by power-down and warm reset\n");
 		(*pon_trial)++;
 		mdm_power_down(mdm);
 		break;
+	case BOOT_FAIL_ACTION_S3_RESET:
+		mdm_subsys_retry_powerup_cleanup(esoc_clink,
+							ESOC_HOOK_MDM_DOWN);
+		esoc_mdm_log("Doing an S3 reset\n");
+		(*pon_trial)++;
+		mdm_power_down(mdm);
+		msleep(S3_RESET_DELAY_MS);
+		break;
 	case BOOT_FAIL_ACTION_PANIC:
 		esoc_mdm_log("Calling panic!!\n");
 		panic("Panic requested on external modem boot failure\n");
@@ -319,7 +410,8 @@ static int mdm_handle_boot_fail(struct esoc_clink *esoc_clink, u8 *pon_trial)
 		return -EIO;
 	case BOOT_FAIL_ACTION_SHUTDOWN:
 	default:
-		mdm_subsys_retry_powerup_cleanup(esoc_clink);
+		mdm_subsys_retry_powerup_cleanup(esoc_clink,
+							ESOC_HOOK_MDM_DOWN);
 		esoc_mdm_log("Shutdown the modem and quit\n");
 		mdm_power_down(mdm);
 		return -EIO;
@@ -337,7 +429,7 @@ static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
 	struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
 	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
 	int timeout = INT_MAX;
-	u8 pon_trial = 1;
+	u8 pon_trial = 0;
 
 	esoc_mdm_log("Powerup request from SSR\n");
 
@@ -362,7 +454,7 @@ static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
 				dev_err(&esoc_clink->dev, "pwr on fail\n");
 				return ret;
 			}
-			esoc_client_link_power_on(esoc_clink, false);
+			esoc_client_link_power_on(esoc_clink, 0);
 		} else if (mdm_drv->mode == IN_DEBUG) {
 			esoc_mdm_log("In SSR power-on mode\n");
 			esoc_mdm_log("Executing the ESOC_EXIT_DEBUG command\n");
@@ -381,7 +473,8 @@ static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
 				dev_err(&esoc_clink->dev, "pwr on fail\n");
 				return ret;
 			}
-			esoc_client_link_power_on(esoc_clink, true);
+			esoc_client_link_power_on(esoc_clink,
+							ESOC_HOOK_MDM_CRASH);
 		}
 
 		/*
@@ -406,11 +499,11 @@ static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
 			esoc_mdm_log(
 			"Boot failed. Doing cleanup and attempting to retry\n");
 			pon_trial++;
-			mdm_subsys_retry_powerup_cleanup(esoc_clink);
+			mdm_subsys_retry_powerup_cleanup(esoc_clink, 0);
 		} else if (mdm_drv->pon_state == PON_SUCCESS) {
 			break;
 		}
-	} while (pon_trial <= n_pon_tries);
+	} while (pon_trial <= atomic_read(&mdm_drv->n_pon_tries));
 
 	return 0;
 }
@@ -466,6 +559,7 @@ int esoc_ssr_probe(struct esoc_clink *esoc_clink, struct esoc_drv *drv)
 		dev_err(&esoc_clink->dev, "failed to register cmd engine\n");
 		return ret;
 	}
+	mutex_init(&mdm_drv->poff_lock);
 	ret = mdm_register_ssr(esoc_clink);
 	if (ret)
 		goto ssr_err;
@@ -481,6 +575,8 @@ int esoc_ssr_probe(struct esoc_clink *esoc_clink, struct esoc_drv *drv)
 	mdm_drv->esoc_clink = esoc_clink;
 	mdm_drv->mode = PWR_OFF;
 	mdm_drv->pon_state = PON_INIT;
+	atomic_set(&mdm_drv->boot_fail_action, BOOT_FAIL_ACTION_DEF);
+	atomic_set(&mdm_drv->n_pon_tries, ESOC_DEF_PON_REQ);
 	mdm_drv->esoc_restart.notifier_call = esoc_msm_restart_handler;
 	ret = register_reboot_notifier(&mdm_drv->esoc_restart);
 	if (ret)
diff --git a/drivers/esoc/esoc.h b/drivers/esoc/esoc.h
index cc6bc8a..65b0f63 100644
--- a/drivers/esoc/esoc.h
+++ b/drivers/esoc/esoc.h
@@ -186,3 +186,7 @@ static inline void notify_esoc_clients(struct esoc_clink *esoc_clink,
 bool esoc_req_eng_enabled(struct esoc_clink *esoc_clink);
 bool esoc_cmd_eng_enabled(struct esoc_clink *esoc_clink);
 #endif
+
+/* Modem boot fail actions */
+int esoc_set_boot_fail_action(struct esoc_clink *esoc_clink, u32 action);
+int esoc_set_n_pon_tries(struct esoc_clink *esoc_clink, u32 n_tries);
diff --git a/drivers/esoc/esoc_dev.c b/drivers/esoc/esoc_dev.c
index 2c496f4..04e7a64 100644
--- a/drivers/esoc/esoc_dev.c
+++ b/drivers/esoc/esoc_dev.c
@@ -323,6 +323,12 @@ static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
 		return err;
 	case ESOC_GET_LINK_ID:
 		return esoc_get_link_id(esoc_clink, arg);
+	case ESOC_SET_BOOT_FAIL_ACT:
+		get_user(esoc_cmd, (u32 __user *) arg);
+		return esoc_set_boot_fail_action(esoc_clink, esoc_cmd);
+	case ESOC_SET_N_PON_TRIES:
+		get_user(esoc_cmd, (u32 __user *) arg);
+		return esoc_set_n_pon_tries(esoc_clink, esoc_cmd);
 	default:
 		return -EINVAL;
 	};
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index da0e9bc..9327479 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1726,6 +1726,16 @@ static int arizona_extcon_remove(struct platform_device *pdev)
 	struct arizona_extcon_info *info = platform_get_drvdata(pdev);
 	struct arizona *arizona = info->arizona;
 	int jack_irq_rise, jack_irq_fall;
+	bool change;
+
+	regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
+				 ARIZONA_MICD_ENA, 0,
+				 &change);
+
+	if (change) {
+		regulator_disable(info->micvdd);
+		pm_runtime_put(info->dev);
+	}
 
 	gpiod_put(info->micd_pol_gpio);
 
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index d93e512..6183074 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -178,6 +178,8 @@ static int gpio_extcon_probe(struct platform_device *pdev)
 	if (data->irq <= 0)
 		return data->irq;
 
+	data->check_on_resume = true;
+
 	data->supported_cable = devm_kzalloc(dev,
 					     sizeof(*data->supported_cable) * 2,
 					     GFP_KERNEL);
@@ -229,17 +231,29 @@ static int gpio_extcon_remove(struct platform_device *pdev)
 static int gpio_extcon_resume(struct device *dev)
 {
 	struct gpio_extcon_data *data;
+	int state, ret = 0;
 
 	data = dev_get_drvdata(dev);
-	if (data->check_on_resume)
-		queue_delayed_work(system_power_efficient_wq,
-			&data->work, data->debounce_jiffies);
+	if (data->check_on_resume) {
+		state = gpiod_get_value_cansleep(data->gpiod);
+		ret = extcon_set_state_sync(data->edev, data->extcon_id, state);
+		if (ret)
+			dev_err(dev, "%s: Failed to set extcon gpio state\n",
+					__func__);
+	}
 
-	return 0;
+	return ret;
 }
-#endif
 
-static SIMPLE_DEV_PM_OPS(gpio_extcon_pm_ops, NULL, gpio_extcon_resume);
+static const struct dev_pm_ops gpio_extcon_pm_ops = {
+	SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, gpio_extcon_resume)
+};
+
+#define EXTCON_GPIO_PMOPS (&gpio_extcon_pm_ops)
+
+#else
+#define EXTCON_GPIO_PMOPS NULL
+#endif
 
 static const struct of_device_id extcon_gpio_of_match[] = {
 	{ .compatible = "extcon-gpio"},
@@ -251,7 +265,7 @@ static struct platform_driver gpio_extcon_driver = {
 	.remove		= gpio_extcon_remove,
 	.driver		= {
 		.name	= "extcon-gpio",
-		.pm	= &gpio_extcon_pm_ops,
+		.pm	= EXTCON_GPIO_PMOPS,
 		.of_match_table = of_match_ptr(extcon_gpio_of_match),
 	},
 };
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d984509..818d399 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -28,7 +28,8 @@
 				   -D__NO_FORTIFY \
 				   $(call cc-option,-ffreestanding) \
 				   $(call cc-option,-fno-stack-protector) \
-				   -D__DISABLE_EXPORTS
+				   -D__DISABLE_EXPORTS \
+				   $(DISABLE_LTO)
 
 GCOV_PROFILE			:= n
 KASAN_SANITIZE			:= n
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index b0aeffd..1606abe 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -95,7 +95,7 @@ struct efi_runtime_work {
 	efi_rts_work.status = EFI_ABORTED;				\
 									\
 	init_completion(&efi_rts_work.efi_rts_comp);			\
-	INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts);		\
+	INIT_WORK(&efi_rts_work.work, efi_call_rts);			\
 	efi_rts_work.arg1 = _arg1;					\
 	efi_rts_work.arg2 = _arg2;					\
 	efi_rts_work.arg3 = _arg3;					\
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 0863996..77d2390 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -88,6 +88,7 @@ static u32 psci_function_id[PSCI_FN_MAX];
 				PSCI_1_0_EXT_POWER_STATE_TYPE_MASK)
 
 static u32 psci_cpu_suspend_feature;
+static bool psci_system_reset2_supported;
 
 static inline bool psci_has_ext_power_state(void)
 {
@@ -253,7 +254,17 @@ static int get_set_conduit_method(struct device_node *np)
 
 static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
 {
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+	if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
+	    psci_system_reset2_supported) {
+		/*
+		 * reset_type[31] = 0 (architectural)
+		 * reset_type[30:0] = 0 (SYSTEM_WARM_RESET)
+		 * cookie = 0 (ignored by the implementation)
+		 */
+		invoke_psci_fn(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2), 0, 0, 0);
+	} else {
+		invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+	}
 }
 
 static void psci_sys_poweroff(void)
@@ -448,6 +459,16 @@ static const struct platform_suspend_ops psci_suspend_ops = {
 	.enter          = psci_system_suspend_enter,
 };
 
+static void __init psci_init_system_reset2(void)
+{
+	int ret;
+
+	ret = psci_features(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2));
+
+	if (ret != PSCI_RET_NOT_SUPPORTED)
+		psci_system_reset2_supported = true;
+}
+
 static void __init psci_init_system_suspend(void)
 {
 	int ret;
@@ -585,6 +606,7 @@ static int __init psci_probe(void)
 		psci_init_smccc();
 		psci_init_cpu_suspend();
 		psci_init_system_suspend();
+		psci_init_system_reset2();
 	}
 
 	return 0;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 2342e15..b696ec3 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1225,6 +1225,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
 
 	gpio->offset_timer =
 		devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
+	if (!gpio->offset_timer)
+		return -ENOMEM;
 
 	return aspeed_gpio_setup_irqs(gpio, pdev);
 }
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index e41223c..6cf2e2c 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
 			irq_set_handler_locked(data, handle_edge_irq);
 			break;
 		case IRQ_TYPE_EDGE_BOTH:
+			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
 			irq_set_handler_locked(data, handle_edge_irq);
 			break;
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 2d1dfa1..e86e61d 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -438,8 +438,11 @@ static int mxc_gpio_probe(struct platform_device *pdev)
 
 	/* the controller clock is optional */
 	port->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(port->clk))
+	if (IS_ERR(port->clk)) {
+		if (PTR_ERR(port->clk) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
 		port->clk = NULL;
+	}
 
 	err = clk_prepare_enable(port->clk);
 	if (err) {
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index d4e7a09..e0f149b 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -646,7 +646,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
 
 	of_node_get(chip->of_node);
 
-	return of_gpiochip_scan_gpios(chip);
+	status = of_gpiochip_scan_gpios(chip);
+	if (status) {
+		of_node_put(chip->of_node);
+		gpiochip_remove_pin_ranges(chip);
+	}
+
+	return status;
 }
 
 void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 7056925..869ff62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_fence *fence;
-	struct dma_fence *old, **ptr;
+	struct dma_fence __rcu **ptr;
 	uint32_t seq;
+	int r;
 
 	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
 	if (fence == NULL)
@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
 			       seq, flags | AMDGPU_FENCE_FLAG_INT);
 
 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+	if (unlikely(rcu_dereference_protected(*ptr, 1))) {
+		struct dma_fence *old;
+
+		rcu_read_lock();
+		old = dma_fence_get_rcu_safe(ptr);
+		rcu_read_unlock();
+
+		if (old) {
+			r = dma_fence_wait(old, false);
+			dma_fence_put(old);
+			if (r)
+				return r;
+		}
+	}
+
 	/* This function can't be called concurrently anyway, otherwise
 	 * emitting the fence would mess up the hardware ring buffer.
 	 */
-	old = rcu_dereference_protected(*ptr, 1);
-	if (old && !dma_fence_is_signaled(old)) {
-		DRM_INFO("rcu slot is busy\n");
-		dma_fence_wait(old, false);
-	}
-
 	rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
 
 	*f = &fence->base;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index e70a0d4..c963eec 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -164,6 +164,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
 				    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
 	}
+	WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
 
 	tmp = mmVM_L2_CNTL4_DEFAULT;
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2b8b892..dac7978 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4028,6 +4028,7 @@ static void handle_cursor_update(struct drm_plane *plane,
 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
 
+	memset(&attributes, 0, sizeof(attributes));
 	attributes.address.high_part = upper_32_bits(address);
 	attributes.address.low_part  = lower_32_bits(address);
 	attributes.width             = plane->state->crtc_w;
@@ -4368,8 +4369,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
 						struct dc_stream_state *stream_state)
 {
-	stream_state->mode_changed =
-		crtc_state->mode_changed || crtc_state->active_changed;
+	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
 }
 
 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 95f332e..e4a8b33 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -32,6 +32,12 @@
 
 calcs_ccflags := -mhard-float -msse $(cc_stack_align)
 
+# Use -msse2 only with clang:
+#   https://bugs.freedesktop.org/show_bug.cgi?id=109487
+ifdef CONFIG_CC_IS_CLANG
+calcs_cc_flags += -msse2
+endif
+
 CFLAGS_dcn_calcs.o := $(calcs_ccflags)
 CFLAGS_dcn_calc_auto.o := $(calcs_ccflags)
 CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index bb0cda7..e3f5e5d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1213,6 +1213,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
 		return UPDATE_TYPE_FULL;
 	}
 
+	if (u->surface->force_full_update) {
+		update_flags->bits.full_update = 1;
+		return UPDATE_TYPE_FULL;
+	}
+
 	type = get_plane_info_update_type(u);
 	elevate_update_type(&overall_type, type);
 
@@ -1467,6 +1472,14 @@ void dc_commit_updates_for_stream(struct dc *dc,
 		}
 
 		dc_resource_state_copy_construct(state, context);
+
+		for (i = 0; i < dc->res_pool->pipe_count; i++) {
+			struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+			struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+			if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
+				new_pipe->plane_state->force_full_update = true;
+		}
 	}
 
 
@@ -1510,6 +1523,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
 		dc->current_state = context;
 		dc_release_state(old);
 
+		for (i = 0; i < dc->res_pool->pipe_count; i++) {
+			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+			if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+				pipe_ctx->plane_state->force_full_update = false;
+		}
 	}
 	/*let's use current_state to update watermark etc*/
 	if (update_type >= UPDATE_TYPE_FULL)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 87bf422..e0a96ab 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1401,10 +1401,12 @@ bool dc_remove_plane_from_context(
 			 * For head pipe detach surfaces from pipe for tail
 			 * pipe just zero it out
 			 */
-			if (!pipe_ctx->top_pipe) {
+			if (!pipe_ctx->top_pipe ||
+				(!pipe_ctx->top_pipe->top_pipe &&
+					pipe_ctx->top_pipe->stream_res.opp != pipe_ctx->stream_res.opp)) {
 				pipe_ctx->plane_state = NULL;
 				pipe_ctx->bottom_pipe = NULL;
-			} else  {
+			} else {
 				memset(pipe_ctx, 0, sizeof(*pipe_ctx));
 			}
 		}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 6c9990b..4094b4f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -505,6 +505,9 @@ struct dc_plane_state {
 	struct dc_plane_status status;
 	struct dc_context *ctx;
 
+	/* HACK: Workaround for forcing full reprogramming under some conditions */
+	bool force_full_update;
+
 	/* private to dc_surface.c */
 	enum dc_irq_source irq_source;
 	struct kref refcount;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 3f5b2e6..df936ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -189,6 +189,12 @@ static void submit_channel_request(
 				1,
 				0);
 	}
+
+	REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+
+	REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+				10, aux110->timeout_period/10);
+
 	/* set the delay and the number of bytes to write */
 
 	/* The length include
@@ -241,9 +247,6 @@ static void submit_channel_request(
 		}
 	}
 
-	REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
-	REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
-				10, aux110->timeout_period/10);
 	REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index f7caab8..2c6f50b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -69,11 +69,11 @@ enum {	/* This is the timeout as defined in DP 1.2a,
 	 * at most within ~240usec. That means,
 	 * increasing this timeout will not affect normal operation,
 	 * and we'll timeout after
-	 * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+	 * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec.
 	 * This timeout is especially important for
-	 * resume from S3 and CTS.
+	 * converters, resume from S3, and CTS.
 	 */
-	SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+	SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6
 };
 struct aux_engine_dce110 {
 	struct aux_engine base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index 4a863a5d..321af9a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -406,15 +406,25 @@ void dpp1_dscl_calc_lb_num_partitions(
 		int *num_part_y,
 		int *num_part_c)
 {
+	int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a,
+	lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a;
+
 	int line_size = scl_data->viewport.width < scl_data->recout.width ?
 			scl_data->viewport.width : scl_data->recout.width;
 	int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
 			scl_data->viewport_c.width : scl_data->recout.width;
-	int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
-	int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
-	int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
-	int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
-	int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
+
+	if (line_size == 0)
+		line_size = 1;
+
+	if (line_size_c == 0)
+		line_size_c = 1;
+
+
+	lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
+	memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
+	memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
+	memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
 
 	if (lb_config == LB_MEMORY_CONFIG_1) {
 		lb_memory_size = 816;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index d97ca65..648c80c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -32,6 +32,12 @@
 
 dml_ccflags := -mhard-float -msse $(cc_stack_align)
 
+# Use -msse2 only with clang:
+#   https://bugs.freedesktop.org/show_bug.cgi?id=109487
+ifdef CONFIG_CC_IS_CLANG
+dml_ccflags += -msse2
+endif
+
 CFLAGS_display_mode_lib.o := $(dml_ccflags)
 CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
 CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 281cf9c..86026a5 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1410,6 +1410,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
 			return -EINVAL;
 		}
 		state->content_protection = val;
+	} else if (property == connector->colorspace_property) {
+		state->colorspace = val;
 	} else if (property == config->writeback_fb_id_property) {
 		struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
 		int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
@@ -1507,6 +1509,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
 		*val = state->picture_aspect_ratio;
 	} else if (property == config->content_type_property) {
 		*val = state->content_type;
+	} else if (property == connector->colorspace_property) {
+		*val = state->colorspace;
 	} else if (property == connector->scaling_mode_property) {
 		*val = state->scaling_mode;
 	} else if (property == connector->content_protection_property) {
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 6011d76..7300adb 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -805,6 +805,55 @@ static struct drm_prop_enum_list drm_cp_enum_list[] = {
 };
 DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
 
+static const struct drm_prop_enum_list hdmi_colorspaces[] = {
+	/* For Default case, driver will set the colorspace */
+	{ DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
+	/* Standard Definition Colorimetry based on CEA 861 */
+	{ DRM_MODE_COLORIMETRY_SMPTE_170M_YCC, "SMPTE_170M_YCC" },
+	{ DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" },
+	/* Standard Definition Colorimetry based on IEC 61966-2-4 */
+	{ DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" },
+	/* High Definition Colorimetry based on IEC 61966-2-4 */
+	{ DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" },
+	/* Colorimetry based on IEC 61966-2-1/Amendment 1 */
+	{ DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" },
+	/* Colorimetry based on IEC 61966-2-5 [33] */
+	{ DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" },
+	/* Colorimetry based on IEC 61966-2-5 */
+	{ DRM_MODE_COLORIMETRY_OPRGB, "opRGB" },
+	/* Colorimetry based on ITU-R BT.2020 */
+	{ DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" },
+	/* Colorimetry based on ITU-R BT.2020 */
+	{ DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" },
+	/* Colorimetry based on ITU-R BT.2020 */
+	{ DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" },
+	/* Added as part of Additional Colorimetry Extension in 861.G */
+	{ DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" },
+	{ DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" },
+};
+
+static const struct drm_prop_enum_list dp_colorspaces[] = {
+	/* For Default case, driver will set the colorspace */
+	{ DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
+	/* Standard Definition Colorimetry based on IEC 61966-2-4 */
+	{ DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" },
+	/* High Definition Colorimetry based on IEC 61966-2-4 */
+	{ DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" },
+	/* Colorimetry based on IEC 61966-2-5 */
+	{ DRM_MODE_COLORIMETRY_OPRGB, "opRGB" },
+	{ DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" },
+	/* DP MSA Colorimetry */
+	{ DRM_MODE_DP_COLORIMETRY_BT601_YCC, "BT601_YCC" },
+	{ DRM_MODE_DP_COLORIMETRY_BT709_YCC, "BT709_YCC" },
+	{ DRM_MODE_DP_COLORIMETRY_SRGB, "sRGB" },
+	{ DRM_MODE_DP_COLORIMETRY_RGB_WIDE_GAMUT, "RGB Wide Gamut" },
+	{ DRM_MODE_DP_COLORIMETRY_SCRGB, "scRGB" },
+	/* Colorimetry based on ITU-R BT.2020 */
+	{ DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" },
+	/* Colorimetry based on ITU-R BT.2020 */
+	{ DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" },
+};
+
 /**
  * DOC: standard connector properties
  *
@@ -1374,6 +1423,65 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
 EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
 
 /**
+ * DOC: standard connector properties
+ *
+ * Colorspace:
+ *     drm_mode_create_colorspace_property - create colorspace property
+ *     This property helps select a suitable colorspace based on the sink
+ *     capability. Modern sink devices support wider gamut like BT2020.
+ *     This helps switch to BT2020 mode if the BT2020 encoded video stream
+ *     is being played by the user, same for any other colorspace. Thereby
+ *     giving a good visual experience to users.
+ *
+ *     The expectation from userspace is that it should parse the EDID
+ *     and get supported colorspaces. Use this property and switch to the
+ *     one supported. Sink supported colorspaces should be retrieved by
+ *     userspace from EDID and driver will not explicitly expose them.
+ *
+ *     Basically the expectation from userspace is:
+ *      - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink
+ *        colorspace
+ *      - Set this new property to let the sink know what it
+ *        converted the CRTC output to.
+ *      - This property is just to inform sink what colorspace
+ *        source is trying to drive.
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_colorspace_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_property *prop;
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+		prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+						"Colorspace",
+						hdmi_colorspaces,
+						ARRAY_SIZE(hdmi_colorspaces));
+		if (!prop)
+			return -ENOMEM;
+	} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+		connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+						"Colorspace", dp_colorspaces,
+						ARRAY_SIZE(dp_colorspaces));
+
+		if (!prop)
+			return -ENOMEM;
+	} else {
+		DRM_DEBUG_KMS("Colorspace property not supported\n");
+		return 0;
+	}
+
+	connector->colorspace_property = prop;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_colorspace_property);
+
+/**
  * drm_mode_create_content_type_property - create content type property
  * @dev: DRM device
  *
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 06d051b..751e358 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -486,6 +486,7 @@ static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband
 {
 	int idx = 1;
 	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
+	repmsg->u.path_resources.fec_capability = (raw->msg[idx]) & 0x1;
 	idx++;
 	if (idx > raw->curlen)
 		goto fail_len;
@@ -1693,9 +1694,14 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
 		else {
 			if (port->port_num != txmsg->reply.u.path_resources.port_number)
 				DRM_ERROR("got incorrect port in response\n");
-			DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
-			       txmsg->reply.u.path_resources.avail_payload_bw_number);
+			DRM_DEBUG_KMS("enum path resources %d: %d %d %d\n",
+			txmsg->reply.u.path_resources.port_number,
+			txmsg->reply.u.path_resources.fec_capability,
+			txmsg->reply.u.path_resources.full_payload_bw_number,
+			txmsg->reply.u.path_resources.avail_payload_bw_number);
 			port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
+			port->fec_capability =
+				txmsg->reply.u.path_resources.fec_capability;
 		}
 	}
 
@@ -1850,6 +1856,42 @@ static int drm_dp_send_clear_payload_table(struct drm_dp_mst_topology_mgr *mgr,
 	return ret;
 }
 
+int drm_dp_mst_get_dsc_info(struct drm_dp_mst_topology_mgr *mgr,
+		struct drm_dp_mst_port *port,
+		struct drm_dp_mst_dsc_info *dsc_info)
+{
+	if (!dsc_info)
+		return -EINVAL;
+
+	port = drm_dp_get_validated_port_ref(mgr, port);
+	if (!port)
+		return -EINVAL;
+
+	memcpy(dsc_info, &port->dsc_info, sizeof(struct drm_dp_mst_dsc_info));
+	drm_dp_put_port(port);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_mst_get_dsc_info);
+
+int drm_dp_mst_update_dsc_info(struct drm_dp_mst_topology_mgr *mgr,
+		struct drm_dp_mst_port *port,
+		struct drm_dp_mst_dsc_info *dsc_info)
+{
+	if (!dsc_info)
+		return -EINVAL;
+
+	port = drm_dp_get_validated_port_ref(mgr, port);
+	if (!port)
+		return -EINVAL;
+
+	memcpy(&port->dsc_info, dsc_info, sizeof(struct drm_dp_mst_dsc_info));
+	drm_dp_put_port(port);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_mst_update_dsc_info);
+
 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
 				       int id,
 				       struct drm_dp_payload *payload)
@@ -2122,6 +2164,21 @@ int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
 }
 EXPORT_SYMBOL(drm_dp_send_dpcd_write);
 
+int drm_dp_mst_get_max_sdp_streams_supported(
+		struct drm_dp_mst_topology_mgr *mgr,
+		struct drm_dp_mst_port *port)
+{
+	int ret = -1;
+
+	port = drm_dp_get_validated_port_ref(mgr, port);
+	if (!port)
+		return ret;
+	ret = port->num_sdp_streams;
+	drm_dp_put_port(port);
+	return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_get_max_sdp_streams_supported);
+
 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
 {
 	struct drm_dp_sideband_msg_reply_body reply;
@@ -2194,6 +2251,8 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
 {
 	int ret = 0;
 	struct drm_dp_mst_branch *mstb = NULL;
+	u8 buf;
+	u32 offset = DP_DPCD_REV;
 
 	mutex_lock(&mgr->lock);
 	if (mst_state == mgr->mst_state)
@@ -2204,8 +2263,21 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
 	if (mst_state) {
 		WARN_ON(mgr->mst_primary);
 
+		ret = drm_dp_dpcd_read(mgr->aux, DP_TRAINING_AUX_RD_INTERVAL,
+				&buf, 1);
+		if (ret != 1) {
+			DRM_DEBUG_KMS("failed to read aux rd interval\n");
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+
+		/* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */
+		if (buf & BIT(7))
+			offset = DP_DP13_DPCD_REV;
+
 		/* get dpcd info */
-		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+		ret = drm_dp_dpcd_read(mgr->aux, offset, mgr->dpcd,
+				DP_RECEIVER_CAP_SIZE);
 		if (ret != DP_RECEIVER_CAP_SIZE) {
 			DRM_DEBUG_KMS("failed to read DPCD\n");
 			goto out_unlock;
@@ -2624,6 +2696,20 @@ bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
 }
 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
 
+bool drm_dp_mst_has_fec(struct drm_dp_mst_topology_mgr *mgr,
+		struct drm_dp_mst_port *port)
+{
+	bool ret = false;
+
+	port = drm_dp_get_validated_port_ref(mgr, port);
+	if (!port)
+		return ret;
+	ret = port->fec_capability;
+	drm_dp_put_port(port);
+	return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_has_fec);
+
 /**
  * drm_dp_mst_get_edid() - get EDID for an MST port
  * @connector: toplevel connector to get EDID for
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index ea4941d..d8ae4ca 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -381,11 +381,7 @@ void drm_dev_unplug(struct drm_device *dev)
 	synchronize_srcu(&drm_unplug_srcu);
 
 	drm_dev_unregister(dev);
-
-	mutex_lock(&drm_global_mutex);
-	if (dev->open_count == 0)
-		drm_dev_put(dev);
-	mutex_unlock(&drm_global_mutex);
+	drm_dev_put(dev);
 }
 EXPORT_SYMBOL(drm_dev_unplug);
 
@@ -503,7 +499,7 @@ int drm_dev_init(struct drm_device *dev,
 	}
 
 	kref_init(&dev->ref);
-	dev->dev = parent;
+	dev->dev = get_device(parent);
 	dev->driver = driver;
 
 	INIT_LIST_HEAD(&dev->filelist);
@@ -572,6 +568,7 @@ int drm_dev_init(struct drm_device *dev,
 	drm_minor_free(dev, DRM_MINOR_RENDER);
 	drm_fs_inode_free(dev->anon_inode);
 err_free:
+	put_device(dev->dev);
 	mutex_destroy(&dev->master_mutex);
 	mutex_destroy(&dev->ctxlist_mutex);
 	mutex_destroy(&dev->clientlist_mutex);
@@ -607,6 +604,8 @@ void drm_dev_fini(struct drm_device *dev)
 	drm_minor_free(dev, DRM_MINOR_PRIMARY);
 	drm_minor_free(dev, DRM_MINOR_RENDER);
 
+	put_device(dev->dev);
+
 	mutex_destroy(&dev->master_mutex);
 	mutex_destroy(&dev->ctxlist_mutex);
 	mutex_destroy(&dev->clientlist_mutex);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5de78bc..5c5a463 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2848,6 +2848,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
 #define VSVDB_HDR10_PLUS_IEEE_CODE 0x90848b
 #define VSVDB_HDR10_PLUS_APP_VER_MASK 0x3
 #define HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK 0x06
+#define COLORIMETRY_EXTENDED_DATA_BLOCK 0x05
 #define USE_EXTENDED_TAG 0x07
 #define EXT_VIDEO_CAPABILITY_BLOCK 0x00
 #define EXT_VIDEO_DATA_BLOCK_420	0x0E
@@ -3907,6 +3908,53 @@ u32 block_length, enum luminance_value value)
 }
 
 /*
+ * drm_extract_clrmetry_db - Parse the HDMI colorimetry extended block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the HDMI colorimetry extended block
+ *
+ * Parses the HDMI colorimetry block to extract sink info for @connector.
+ */
+static void
+drm_extract_clrmetry_db(struct drm_connector *connector, const u8 *db)
+{
+
+	if (!db) {
+		DRM_ERROR("invalid db\n");
+		return;
+	}
+
+	/* Byte 3 Bit 0: xvYCC_601 */
+	if (db[2] & BIT(0))
+		connector->color_enc_fmt |= DRM_EDID_CLRMETRY_xvYCC_601;
+	/* Byte 3 Bit 1: xvYCC_709 */
+	if (db[2] & BIT(1))
+		connector->color_enc_fmt |= DRM_EDID_CLRMETRY_xvYCC_709;
+	/* Byte 3 Bit 2: sYCC_601 */
+	if (db[2] & BIT(2))
+		connector->color_enc_fmt |= DRM_EDID_CLRMETRY_sYCC_601;
+	/* Byte 3 Bit 3: ADBYCC_601 */
+	if (db[2] & BIT(3))
+		connector->color_enc_fmt |= DRM_EDID_CLRMETRY_ADBYCC_601;
+	/* Byte 3 Bit 4: ADB_RGB */
+	if (db[2] & BIT(4))
+		connector->color_enc_fmt |= DRM_EDID_CLRMETRY_ADB_RGB;
+	/* Byte 3 Bit 5: BT2020_CYCC */
+	if (db[2] & BIT(5))
+		connector->color_enc_fmt |= DRM_EDID_CLRMETRY_BT2020_CYCC;
+	/* Byte 3 Bit 6: BT2020_YCC */
+	if (db[2] & BIT(6))
+		connector->color_enc_fmt |= DRM_EDID_CLRMETRY_BT2020_YCC;
+	/* Byte 3 Bit 7: BT2020_RGB */
+	if (db[2] & BIT(7))
+		connector->color_enc_fmt |= DRM_EDID_CLRMETRY_BT2020_RGB;
+	/* Byte 4 Bit 7: DCI-P3 */
+	if (db[3] & BIT(7))
+		connector->color_enc_fmt |= DRM_EDID_CLRMETRY_DCI_P3;
+
+	DRM_DEBUG_KMS("colorimetry fmts = 0x%x\n", connector->color_enc_fmt);
+}
+
+/*
  * drm_extract_hdr_db - Parse the HDMI HDR extended block
  * @connector: connector corresponding to the HDMI sink
  * @db: start of the HDMI HDR extended block
@@ -3987,6 +4035,9 @@ drm_hdmi_extract_extended_blk_info(struct drm_connector *connector,
 				case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK:
 					drm_extract_hdr_db(connector, db);
 					break;
+				case COLORIMETRY_EXTENDED_DATA_BLOCK:
+					drm_extract_clrmetry_db(connector, db);
+					break;
 				default:
 					break;
 				}
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index d4147cd..5f278c6 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -486,11 +486,9 @@ int drm_release(struct inode *inode, struct file *filp)
 
 	drm_file_free(file_priv);
 
-	if (!--dev->open_count) {
+	if (!--dev->open_count)
 		drm_lastclose(dev);
-		if (drm_dev_is_unplugged(dev))
-			drm_put_dev(dev);
-	}
+
 	mutex_unlock(&drm_global_mutex);
 
 	drm_minor_release(minor);
@@ -576,6 +574,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
 				file_priv->event_space -= length;
 				list_add(&e->link, &file_priv->event_list);
 				spin_unlock_irq(&dev->event_lock);
+				wake_up_interruptible(&file_priv->event_wait);
 				break;
 			}
 
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 1d9a9d2..a561ad1 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -48,6 +48,7 @@ static LIST_HEAD(panel_list);
 void drm_panel_init(struct drm_panel *panel)
 {
 	INIT_LIST_HEAD(&panel->list);
+	BLOCKING_INIT_NOTIFIER_HEAD(&panel->nh);
 }
 EXPORT_SYMBOL(drm_panel_init);
 
@@ -169,6 +170,27 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np)
 EXPORT_SYMBOL(of_drm_find_panel);
 #endif
 
+int drm_panel_notifier_register(struct drm_panel *panel,
+	struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&panel->nh, nb);
+}
+EXPORT_SYMBOL(drm_panel_notifier_register);
+
+int drm_panel_notifier_unregister(struct drm_panel *panel,
+	struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&panel->nh, nb);
+}
+EXPORT_SYMBOL(drm_panel_notifier_unregister);
+
+int drm_panel_notifier_call_chain(struct drm_panel *panel,
+	unsigned long val, void *v)
+{
+	return blocking_notifier_call_chain(&panel->nh, val, v);
+}
+EXPORT_SYMBOL(drm_panel_notifier_call_chain);
+
 MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
 MODULE_DESCRIPTION("DRM panel infrastructure");
 MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 83c1f46..00675fc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -527,6 +527,9 @@ static int etnaviv_bind(struct device *dev)
 	}
 	drm->dev_private = priv;
 
+	dev->dma_parms = &priv->dma_parms;
+	dma_set_max_seg_size(dev, SZ_2G);
+
 	mutex_init(&priv->gem_lock);
 	INIT_LIST_HEAD(&priv->gem_list);
 	priv->num_gpus = 0;
@@ -564,6 +567,8 @@ static void etnaviv_unbind(struct device *dev)
 
 	component_unbind_all(dev, drm);
 
+	dev->dma_parms = NULL;
+
 	drm->dev_private = NULL;
 	kfree(priv);
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 8d02d1b..b2930d1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -43,6 +43,7 @@ struct etnaviv_file_private {
 
 struct etnaviv_drm_private {
 	int num_gpus;
+	struct device_dma_parameters dma_parms;
 	struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
 
 	/* list of GEM objects: */
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 395dd25..8469be2 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -120,12 +120,6 @@ static inline u64 ptr_to_u64(const void *ptr)
 
 #include <linux/list.h>
 
-static inline int list_is_first(const struct list_head *list,
-				const struct list_head *head)
-{
-	return head->next == list;
-}
-
 static inline void __list_del_many(struct list_head *head,
 				   struct list_head *first)
 {
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6f91634..2d6506c 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -334,8 +334,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
 				    bool *enabled, int width, int height)
 {
 	struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
+	unsigned long conn_configured, conn_seq, mask;
 	unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
-	unsigned long conn_configured, conn_seq;
 	int i, j;
 	bool *save_enabled;
 	bool fallback = true, ret = true;
@@ -353,9 +353,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
 		drm_modeset_backoff(&ctx);
 
 	memcpy(save_enabled, enabled, count);
-	conn_seq = GENMASK(count - 1, 0);
+	mask = GENMASK(count - 1, 0);
 	conn_configured = 0;
 retry:
+	conn_seq = conn_configured;
 	for (i = 0; i < count; i++) {
 		struct drm_fb_helper_connector *fb_conn;
 		struct drm_connector *connector;
@@ -368,8 +369,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
 		if (conn_configured & BIT(i))
 			continue;
 
-		/* First pass, only consider tiled connectors */
-		if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
+		if (conn_seq == 0 && !connector->has_tile)
 			continue;
 
 		if (connector->status == connector_status_connected)
@@ -473,10 +473,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
 		conn_configured |= BIT(i);
 	}
 
-	if (conn_configured != conn_seq) { /* repeat until no more are found */
-		conn_seq = conn_configured;
+	if ((conn_configured & mask) != mask && conn_configured != conn_seq)
 		goto retry;
-	}
 
 	/*
 	 * If the BIOS didn't enable everything it could, fall back to have the
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7d4b710..11e2dcd 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -78,7 +78,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
 	if (disable_partial)
 		ipu_plane_disable(ipu_crtc->plane[1], true);
 	if (disable_full)
-		ipu_plane_disable(ipu_crtc->plane[0], false);
+		ipu_plane_disable(ipu_crtc->plane[0], true);
 }
 
 static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 643f5ed..62444a3 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1473,7 +1473,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
 	if (IS_ERR(regmap))
 		ret = PTR_ERR(regmap);
 	if (ret) {
-		ret = PTR_ERR(regmap);
 		dev_err(dev,
 			"Failed to get system configuration registers: %d\n",
 			ret);
@@ -1509,6 +1508,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
 	of_node_put(remote);
 
 	hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+	of_node_put(i2c_np);
 	if (!hdmi->ddc_adpt) {
 		dev_err(dev, "Failed to get ddc i2c adapter by node\n");
 		return -EINVAL;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 611ac34..588b3b0c 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -300,10 +300,12 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 
 	ret = drm_dev_register(drm, 0);
 	if (ret)
-		goto free_drm;
+		goto uninstall_irq;
 
 	return 0;
 
+uninstall_irq:
+	drm_irq_uninstall(drm);
 free_drm:
 	drm_dev_put(drm);
 
@@ -317,10 +319,11 @@ static int meson_drv_bind(struct device *dev)
 
 static void meson_drv_unbind(struct device *dev)
 {
-	struct drm_device *drm = dev_get_drvdata(dev);
-	struct meson_drm *priv = drm->dev_private;
+	struct meson_drm *priv = dev_get_drvdata(dev);
+	struct drm_device *drm = priv->drm;
 
 	drm_dev_unregister(drm);
+	drm_irq_uninstall(drm);
 	drm_kms_helper_poll_fini(drm);
 	drm_fbdev_cma_fini(priv->fbdev);
 	drm_mode_config_cleanup(drm);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 0570030..843a9d4 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -5,39 +5,21 @@
 	depends on ARCH_QCOM || (ARM && COMPILE_TEST)
 	depends on OF && COMMON_CLK
 	depends on MMU
+	select QCOM_MDT_LOADER if ARCH_QCOM
+	select REGULATOR
 	select DRM_KMS_HELPER
 	select DRM_PANEL
 	select SHMEM
+	select TMPFS
+	select QCOM_SCM
 	select WANT_DEV_COREDUMP
 	select SND_SOC_HDMI_CODEC if SND_SOC
 	select SYNC_FILE
-	select MSM_EXT_DISPLAY
 	select PM_OPP
 	default y
 	help
 	  DRM/KMS driver for MSM/snapdragon.
 
-config DRM_MSM_SDE
-	bool "SDE in Display Driver"
-	depends on DRM_MSM
-	default y
-	help
-	  Enable this option to support display driver
-	  compilation. Choosing this option will compile
-	  SDE folder specifically. Display Port and
-	  DSI-Staging compilation are also dependent on
-	  this option.
-
-config DRM_MSM_DP
-	bool "Enable Display Port"
-	depends on DRM_MSM_SDE && USB_PD
-	default n
-	help
-	  This option enables compilation of Display Port
-	  (DP) folders. In order to compile Display Port
-	  files, DRM_MSM_SDE option also needs to be
-	  enabled.
-
 config DRM_MSM_REGISTER_LOGGING
 	bool "MSM DRM register logging"
 	depends on DRM_MSM
@@ -63,55 +45,20 @@
 config DRM_MSM_HDMI_HDCP
 	bool "Enable HDMI HDCP support in MSM DRM driver"
 	depends on DRM_MSM && QCOM_SCM
-	default n
+	default y
 	help
-	  Compile in support for logging register reads/writes ina  format
-	  that can be parsed by envy tools demsm tool.  If enabled, register
-	  logging can be switched on via msm.reglog=y module param.
-
-config DRM_MSM_HDMI
-	bool "Enable HDMI support in MSM DRM driver"
-	depends on DRM_MSM
-	depends on COMMON_CLK
-	default n
-	help
-	  Compile in support for HDMI driver in msm drm
-	  driver. HDMI external display support is enabled
-	  through this config option. It can be primary or
-	  secondary display on device.
+	  Choose this option to enable HDCP state machine
 
 config DRM_MSM_DSI
 	bool "Enable DSI support in MSM DRM driver"
 	depends on DRM_MSM
 	select DRM_PANEL
 	select DRM_MIPI_DSI
-	default n
+	default y
 	help
 	  Choose this option if you have a need for MIPI DSI connector
 	  support.
 
-config DRM_MSM_DSI_STAGING
-	bool "Enable new DSI driver support in MSM DRM driver"
-	depends on DRM_MSM && DRM_MSM_SDE
-	select DRM_PANEL
-	select DRM_MIPI_DSI
-	default y
-	help
-	  Choose this option if you need MIPI DSI connector support on MSM
-	  which conforms to DRM. MIPI stands for Mobile Industry Processor
-	  Interface and DSI stands for Display Serial Interface which powers
-	  the primary display of your mobile device.
-
-config DSI_PARSER
-	bool "Enable DSI panel configuration parser"
-	depends on DYNAMIC_DEBUG && DRM_MSM_DSI_STAGING
-	default y
-	help
-	  Choose this option if you need text parser for a DSI panel
-	  configurations which can parse a given text file and get the
-	  panel configurations. Also, this module provides a set of APIs
-	  which can be used to get the parsed data.
-
 config DRM_MSM_DSI_PLL
 	bool "Enable DSI PLL driver in MSM DRM"
 	depends on DRM_MSM_DSI && COMMON_CLK
@@ -120,16 +67,6 @@
 	  Choose this option to enable DSI PLL driver which provides DSI
 	  source clocks under common clock framework.
 
-config DRM_SDE_WB
-	bool "Enable Writeback support in SDE DRM"
-	depends on DRM_MSM && DRM_MSM_SDE
-	default y
-	help
-	  Choose this option for writeback connector support.
-	  This option enables a virtual writeback connector where
-	  the output image is written back to memory in the format
-	  selected by the connector's mode and property settings.
-
 config DRM_MSM_DSI_28NM_PHY
 	bool "Enable DSI 28nm PHY driver in MSM DRM"
 	depends on DRM_MSM_DSI
@@ -165,54 +102,3 @@
 	default y
 	help
 	  Choose this option if DSI PHY on SDM845 is used on the platform.
-
-config DRM_MSM_MDP5
-	tristate "MSM MDP5 DRM driver"
-	depends on DRM_MSM
-	default n
-	help
-	  Choose this option if MSM MDP5 revision support is
-	  needed in DRM/KMS. This is not required if sde/mdp4
-	  only target enabled. MDP5 supports DSI and HDMI
-	  displays.
-
-config DRM_MSM_MDP4
-	tristate "MSM MDP4 DRM driver"
-	depends on DRM_MSM
-	default n
-	help
-	  Choose this option if MSM MDP4 revision support is needed in DRM/KMS.
-	  MSM MDP4 DRM driver should be disabled for other MDP revisions to
-	  avoid possible conflicts.  Only select this option if the target
-	  MSM platform is MDP4 based.
-
-config DRM_MSM_HDCP
-	tristate "HDCP for MSM DRM"
-	depends on DRM_MSM
-	default n
-	help
-	  Choose this option if High-bandwidth Digital Content Protection (HDCP)
-	  support is needed in the MSM DRM/KMS driver.  Enabling this config
-	  will allow HDCP encrypted (copy protected) media to be displayed.
-	  This should only be enabled if required for platforms supporting HDCP
-	  over the desired interface.
-
-config DRM_SDE_WB
-	bool "Enable Writeback support in SDE DRM"
-	depends on DRM_MSM && DRM_MSM_SDE
-	default y
-	help
-	  Choose this option for writeback connector support.
-	  This option enables a virtual writeback connector where
-	  the output image is written back to memory in the format
-	  selected by the connector's mode and property settings.
-
-config DRM_SDE_RSC
-	bool "Enable sde resource state coordinator(rsc) driver"
-	depends on DRM_MSM && DRM_MSM_SDE
-	help
-	  The SDE DRM RSC provides display Resource State Coordinator support
-	  to vote the ab/ib bandwidth for primary display. Each rsc client
-	  can vote their active state. Any active request from any client
-	  avoids the display core power collapse. A client can also register
-	  for display core power collapse events on rsc.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index a76d904..9688c38 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,58 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging -Idrivers/gpu/drm/msm/dp
-ccflags-y += -Idrivers/gpu/drm/msm/display-manager
+ccflags-y := -Idrivers/gpu/drm/msm
+ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
 ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
-ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
-ccflags-y += -Idrivers/gpu/drm/msm/sde
-ccflags-y += -Idrivers/media/platform/msm/sde/rotator
-ccflags-y += -Idrivers/gpu/drm/msm/hdmi
 
-msm_drm-$(CONFIG_DRM_MSM_DP) += dp/dp_usbpd.o \
-	dp/dp_parser.o \
-	dp/dp_power.o \
-	dp/dp_catalog.o \
-	dp/dp_catalog_v420.o \
-	dp/dp_catalog_v200.o \
-	dp/dp_aux.o \
-	dp/dp_panel.o \
-	dp/dp_link.o \
-	dp/dp_ctrl.o \
-	dp/dp_audio.o \
-	dp/dp_debug.o \
-	dp/dp_hpd.o \
-	dp/dp_gpio_hpd.o \
-	dp/dp_lphw_hpd.o \
-	dp/dp_display.o \
-	dp/dp_drm.o \
-	dp/dp_hdcp2p2.o \
-	dp/dp_mst_drm.o
-
-msm_drm-$(CONFIG_DRM_MSM_SDE) += sde/sde_crtc.o \
-	sde/sde_encoder.o \
-	sde/sde_encoder_phys_vid.o \
-	sde/sde_encoder_phys_cmd.o \
-	sde/sde_irq.o \
-	sde/sde_core_irq.o \
-	sde/sde_core_perf.o \
-	sde/sde_rm.o \
-	sde/sde_kms_utils.o \
-	sde/sde_kms.o \
-	sde/sde_plane.o \
-	sde/sde_connector.o \
-	sde/sde_color_processing.o \
-	sde/sde_vbif.o \
-	sde_dbg.o \
-	sde_dbg_evtlog.o \
-	sde_io_util.o \
-	sde/sde_hw_reg_dma_v1_color_proc.o \
-	sde/sde_hw_color_proc_v4.o \
-	sde/sde_hw_ad4.o \
-	sde/sde_hw_uidle.o \
-	sde_edid_parser.o \
-	sde_hdcp_1x.o \
-	sde_hdcp_2x.o
-
-msm_drm-$(CONFIG_DRM_MSM_HDMI) += hdmi/hdmi.o \
+msm-y := \
+	hdmi/hdmi.o \
 	hdmi/hdmi_audio.o \
 	hdmi/hdmi_bridge.o \
 	hdmi/hdmi_connector.o \
@@ -61,16 +13,21 @@
 	hdmi/hdmi_phy_8960.o \
 	hdmi/hdmi_phy_8x60.o \
 	hdmi/hdmi_phy_8x74.o \
-
-msm_drm-$(CONFIG_DRM_MSM_EDP) += edp/edp.o \
+	edp/edp.o \
 	edp/edp_aux.o \
 	edp/edp_bridge.o \
 	edp/edp_connector.o \
 	edp/edp_ctrl.o \
 	edp/edp_phy.o \
-
-msm_drm-$(CONFIG_DRM_MSM_MDP5) += disp/mdp_format.o \
+	disp/mdp_format.o \
 	disp/mdp_kms.o \
+	disp/mdp4/mdp4_crtc.o \
+	disp/mdp4/mdp4_dtv_encoder.o \
+	disp/mdp4/mdp4_lcdc_encoder.o \
+	disp/mdp4/mdp4_lvds_connector.o \
+	disp/mdp4/mdp4_irq.o \
+	disp/mdp4/mdp4_kms.o \
+	disp/mdp4/mdp4_plane.o \
 	disp/mdp5/mdp5_cfg.o \
 	disp/mdp5/mdp5_ctl.o \
 	disp/mdp5/mdp5_crtc.o \
@@ -82,118 +39,33 @@
 	disp/mdp5/mdp5_mixer.o \
 	disp/mdp5/mdp5_plane.o \
 	disp/mdp5/mdp5_smp.o \
-
-msm_drm-$(CONFIG_DRM_SDE_RSC) += sde_rsc.o \
-	sde_rsc_hw.o \
-	sde_rsc_hw_v3.o \
-
-# use drm gpu driver only if qcom_kgsl driver not available
-ifneq ($(CONFIG_QCOM_KGSL),y)
-msm_drm-y += adreno/adreno_device.o \
-	adreno/adreno_gpu.o \
-	adreno/a3xx_gpu.o \
-	adreno/a4xx_gpu.o \
-	adreno/a5xx_gpu.o \
-	adreno/a5xx_power.o \
-	adreno/a5xx_preempt.o \
-	adreno/a6xx_gpu.o \
-	adreno/a6xx_gmu.o \
-	adreno/a6xx_hfi.o
-endif
-
-msm_drm-$(CONFIG_DRM_MSM_MDP4) += disp/mdp4/mdp4_crtc.o \
-	disp/mdp4/mdp4_dtv_encoder.o \
-	disp/mdp4/mdp4_lcdc_encoder.o \
-	disp/mdp4/mdp4_lvds_connector.o \
-	disp/mdp4/mdp4_irq.o \
-	disp/mdp4/mdp4_kms.o \
-	disp/mdp4/mdp4_plane.o
-
-msm_drm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-ifeq ($(CONFIG_DRM_MSM_SDE),y)
-msm_drm-$(CONFIG_SYNC_FILE) += sde/sde_fence.o
-endif
-msm_drm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
-msm_drm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
-msm_drm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
-
-msm_drm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
-
-msm_drm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
-			disp/mdp4/mdp4_dsi_encoder.o \
-			dsi/dsi_cfg.o \
-			dsi/dsi_host.o \
-			dsi/dsi_manager.o \
-			dsi/phy/dsi_phy.o \
-			disp/mdp5/mdp5_cmd_encoder.o
-
-msm_drm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
-msm_drm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
-msm_drm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
-msm_drm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
-msm_drm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
-
-ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
-msm_drm-y += dsi/pll/dsi_pll.o
-msm_drm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
-msm_drm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
-msm_drm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
-msm_drm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o
-endif
-msm_drm-$(CONFIG_DRM_MSM_DSI_STAGING) += dsi-staging/dsi_phy.o \
-				dsi-staging/dsi_pwr.o \
-				dsi-staging/dsi_phy.o \
-				dsi-staging/dsi_phy_hw_v2_0.o \
-				dsi-staging/dsi_phy_hw_v3_0.o \
-				dsi-staging/dsi_phy_hw_v4_0.o \
-				dsi-staging/dsi_phy_timing_calc.o \
-				dsi-staging/dsi_phy_timing_v2_0.o \
-				dsi-staging/dsi_phy_timing_v3_0.o \
-				dsi-staging/dsi_phy_timing_v4_0.o \
-				dsi-staging/dsi_ctrl_hw_cmn.o \
-				dsi-staging/dsi_ctrl_hw_1_4.o \
-				dsi-staging/dsi_ctrl_hw_2_0.o \
-				dsi-staging/dsi_ctrl_hw_2_2.o \
-				dsi-staging/dsi_ctrl.o \
-				dsi-staging/dsi_catalog.o \
-				dsi-staging/dsi_drm.o \
-				dsi-staging/dsi_display.o \
-				dsi-staging/dsi_panel.o \
-				dsi-staging/dsi_clk_manager.o \
-				dsi-staging/dsi_display_test.o
-
-msm_drm-$(CONFIG_DSI_PARSER) += dsi-staging/dsi_parser.o
-
-msm_drm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
-				dsi/pll/dsi_pll_28nm.o
-
-msm_drm-$(CONFIG_DRM_MSM_SDE) += \
-	sde/sde_hw_catalog.o \
-	sde/sde_hw_cdm.o \
-	sde/sde_hw_dspp.o \
-	sde/sde_hw_intf.o \
-	sde/sde_hw_lm.o \
-	sde/sde_hw_ctl.o \
-	sde/sde_hw_util.o \
-	sde/sde_hw_sspp.o \
-	sde/sde_hw_wb.o \
-	sde/sde_hw_pingpong.o \
-	sde/sde_hw_top.o \
-	sde/sde_hw_interrupts.o \
-	sde/sde_hw_vbif.o \
-	sde/sde_hw_blk.o \
-	sde/sde_formats.o \
-	sde_power_handle.o \
-	sde/sde_hw_color_processing_v1_7.o \
-	sde/sde_reg_dma.o \
-	sde/sde_hw_reg_dma_v1.o \
-	sde/sde_hw_dsc.o \
-	sde/sde_hw_ds.o
-
-msm_drm-$(CONFIG_DRM_SDE_WB) += sde/sde_wb.o \
-	sde/sde_encoder_phys_wb.o
-
-msm_drm-$(CONFIG_DRM_MSM) += \
+	disp/dpu1/dpu_core_irq.o \
+	disp/dpu1/dpu_core_perf.o \
+	disp/dpu1/dpu_crtc.o \
+	disp/dpu1/dpu_encoder.o \
+	disp/dpu1/dpu_encoder_phys_cmd.o \
+	disp/dpu1/dpu_encoder_phys_vid.o \
+	disp/dpu1/dpu_formats.o \
+	disp/dpu1/dpu_hw_blk.o \
+	disp/dpu1/dpu_hw_catalog.o \
+	disp/dpu1/dpu_hw_cdm.o \
+	disp/dpu1/dpu_hw_ctl.o \
+	disp/dpu1/dpu_hw_interrupts.o \
+	disp/dpu1/dpu_hw_intf.o \
+	disp/dpu1/dpu_hw_lm.o \
+	disp/dpu1/dpu_hw_pingpong.o \
+	disp/dpu1/dpu_hw_sspp.o \
+	disp/dpu1/dpu_hw_top.o \
+	disp/dpu1/dpu_hw_util.o \
+	disp/dpu1/dpu_hw_vbif.o \
+	disp/dpu1/dpu_io_util.o \
+	disp/dpu1/dpu_irq.o \
+	disp/dpu1/dpu_kms.o \
+	disp/dpu1/dpu_mdss.o \
+	disp/dpu1/dpu_plane.o \
+	disp/dpu1/dpu_power_handle.o \
+	disp/dpu1/dpu_rm.o \
+	disp/dpu1/dpu_vbif.o \
 	msm_atomic.o \
 	msm_debugfs.o \
 	msm_drv.o \
@@ -206,14 +78,55 @@
 	msm_gem_vma.o \
 	msm_gpu.o \
 	msm_iommu.o \
-	msm_smmu.o \
 	msm_perf.o \
-	msm_prop.o \
 	msm_rd.o \
 	msm_ringbuffer.o \
 	msm_submitqueue.o
 
-msm_drm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+# use drm gpu driver only if qcom_kgsl driver not available
+#ifneq ($(CONFIG_QCOM_KGSL),y)
+msm_drm-y += adreno/adreno_device.o \
+     	adreno/adreno_gpu.o \
+	adreno/a3xx_gpu.o \
+	adreno/a4xx_gpu.o \
+	adreno/a5xx_gpu.o \
+	adreno/a5xx_power.o \
+	adreno/a5xx_preempt.o\
+	adreno/a6xx_gpu.o\
+	adreno/a6xx_gmu.o\
+ 	adreno/a6xx_hfi.o
+#endif
 
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
+			  disp/dpu1/dpu_dbg.o
 
-obj-$(CONFIG_DRM_MSM)	+= msm_drm.o
+msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
+msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
+msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
+msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
+
+msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
+
+msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+			disp/mdp4/mdp4_dsi_encoder.o \
+			dsi/dsi_cfg.o \
+			dsi/dsi_host.o \
+			dsi/dsi_manager.o \
+			dsi/phy/dsi_phy.o \
+			disp/mdp5/mdp5_cmd_encoder.o
+
+msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
+msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
+msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
+
+ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
+msm-y += dsi/pll/dsi_pll.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
+msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
+msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o
+endif
+
+obj-$(CONFIG_DRM_MSM)	+= msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index ab1d930..ba6f3c1 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -35,7 +35,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
 {
 	struct device *dev = &gpu->pdev->dev;
 	const struct firmware *fw;
-	struct device_node *np;
+	struct device_node *np, *mem_np;
 	struct resource r;
 	phys_addr_t mem_phys;
 	ssize_t mem_size;
@@ -49,11 +49,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
 	if (!np)
 		return -ENODEV;
 
-	np = of_parse_phandle(np, "memory-region", 0);
-	if (!np)
+	mem_np = of_parse_phandle(np, "memory-region", 0);
+	of_node_put(np);
+	if (!mem_np)
 		return -EINVAL;
 
-	ret = of_address_to_resource(np, 0, &r);
+	ret = of_address_to_resource(mem_np, 0, &r);
+	of_node_put(mem_np);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 4b646bf..44d1cda 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -125,6 +125,8 @@ static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
 
+	drm_atomic_helper_wait_for_vblanks(mdp4_kms->dev, state);
+
 	/* see 119ecb7fd */
 	for_each_new_crtc_in_state(state, crtc, crtc_state, i)
 		drm_crtc_vblank_put(crtc);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index fd063fa..5df2034 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -170,6 +170,8 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
 	struct device *dev = &mdp5_kms->pdev->dev;
 	struct mdp5_global_state *global_state;
 
+	drm_atomic_helper_wait_for_vblanks(mdp5_kms->dev, state);
+
 	global_state = mdp5_get_existing_global_state(mdp5_kms);
 
 	if (mdp5_kms->smp)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index 5a828d3..ccaa324 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
@@ -20,12 +20,10 @@
 #include "msm_drv.h"
 #include "mdp5_kms.h"
 
-/*
- * If needed, this can become more specific: something like struct mdp5_mdss,
- * which contains a 'struct msm_mdss base' member.
- */
-struct msm_mdss {
-	struct drm_device *dev;
+#define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
+
+struct mdp5_mdss {
+	struct msm_mdss base;
 
 	void __iomem *mmio, *vbif;
 
@@ -41,22 +39,22 @@ struct msm_mdss {
 	} irqcontroller;
 };
 
-static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
+static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
 {
-	msm_writel(data, mdss->mmio + reg);
+	msm_writel(data, mdp5_mdss->mmio + reg);
 }
 
-static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
+static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
 {
-	return msm_readl(mdss->mmio + reg);
+	return msm_readl(mdp5_mdss->mmio + reg);
 }
 
 static irqreturn_t mdss_irq(int irq, void *arg)
 {
-	struct msm_mdss *mdss = arg;
+	struct mdp5_mdss *mdp5_mdss = arg;
 	u32 intr;
 
-	intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
+	intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
 
 	VERB("intr=%08x", intr);
 
@@ -64,7 +62,7 @@ static irqreturn_t mdss_irq(int irq, void *arg)
 		irq_hw_number_t hwirq = fls(intr) - 1;
 
 		generic_handle_irq(irq_find_mapping(
-				mdss->irqcontroller.domain, hwirq));
+				mdp5_mdss->irqcontroller.domain, hwirq));
 		intr &= ~(1 << hwirq);
 	}
 
@@ -84,19 +82,19 @@ static irqreturn_t mdss_irq(int irq, void *arg)
 
 static void mdss_hw_mask_irq(struct irq_data *irqd)
 {
-	struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+	struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
 
 	smp_mb__before_atomic();
-	clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+	clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
 	smp_mb__after_atomic();
 }
 
 static void mdss_hw_unmask_irq(struct irq_data *irqd)
 {
-	struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+	struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
 
 	smp_mb__before_atomic();
-	set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+	set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
 	smp_mb__after_atomic();
 }
 
@@ -109,13 +107,13 @@ static struct irq_chip mdss_hw_irq_chip = {
 static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
 				 irq_hw_number_t hwirq)
 {
-	struct msm_mdss *mdss = d->host_data;
+	struct mdp5_mdss *mdp5_mdss = d->host_data;
 
 	if (!(VALID_IRQS & (1 << hwirq)))
 		return -EPERM;
 
 	irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
-	irq_set_chip_data(irq, mdss);
+	irq_set_chip_data(irq, mdp5_mdss);
 
 	return 0;
 }
@@ -126,90 +124,99 @@ static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
 };
 
 
-static int mdss_irq_domain_init(struct msm_mdss *mdss)
+static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
 {
-	struct device *dev = mdss->dev->dev;
+	struct device *dev = mdp5_mdss->base.dev->dev;
 	struct irq_domain *d;
 
 	d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
-				  mdss);
+				  mdp5_mdss);
 	if (!d) {
 		dev_err(dev, "mdss irq domain add failed\n");
 		return -ENXIO;
 	}
 
-	mdss->irqcontroller.enabled_mask = 0;
-	mdss->irqcontroller.domain = d;
+	mdp5_mdss->irqcontroller.enabled_mask = 0;
+	mdp5_mdss->irqcontroller.domain = d;
 
 	return 0;
 }
 
-int msm_mdss_enable(struct msm_mdss *mdss)
+static int mdp5_mdss_enable(struct msm_mdss *mdss)
 {
+	struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
 	DBG("");
 
-	clk_prepare_enable(mdss->ahb_clk);
-	if (mdss->axi_clk)
-		clk_prepare_enable(mdss->axi_clk);
-	if (mdss->vsync_clk)
-		clk_prepare_enable(mdss->vsync_clk);
+	clk_prepare_enable(mdp5_mdss->ahb_clk);
+	if (mdp5_mdss->axi_clk)
+		clk_prepare_enable(mdp5_mdss->axi_clk);
+	if (mdp5_mdss->vsync_clk)
+		clk_prepare_enable(mdp5_mdss->vsync_clk);
 
 	return 0;
 }
 
-int msm_mdss_disable(struct msm_mdss *mdss)
+static int mdp5_mdss_disable(struct msm_mdss *mdss)
 {
+	struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
 	DBG("");
 
-	if (mdss->vsync_clk)
-		clk_disable_unprepare(mdss->vsync_clk);
-	if (mdss->axi_clk)
-		clk_disable_unprepare(mdss->axi_clk);
-	clk_disable_unprepare(mdss->ahb_clk);
+	if (mdp5_mdss->vsync_clk)
+		clk_disable_unprepare(mdp5_mdss->vsync_clk);
+	if (mdp5_mdss->axi_clk)
+		clk_disable_unprepare(mdp5_mdss->axi_clk);
+	clk_disable_unprepare(mdp5_mdss->ahb_clk);
 
 	return 0;
 }
 
-static int msm_mdss_get_clocks(struct msm_mdss *mdss)
+static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
 {
-	struct platform_device *pdev = to_platform_device(mdss->dev->dev);
+	struct platform_device *pdev =
+			to_platform_device(mdp5_mdss->base.dev->dev);
 
-	mdss->ahb_clk = msm_clk_get(pdev, "iface");
-	if (IS_ERR(mdss->ahb_clk))
-		mdss->ahb_clk = NULL;
+	mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
+	if (IS_ERR(mdp5_mdss->ahb_clk))
+		mdp5_mdss->ahb_clk = NULL;
 
-	mdss->axi_clk = msm_clk_get(pdev, "bus");
-	if (IS_ERR(mdss->axi_clk))
-		mdss->axi_clk = NULL;
+	mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
+	if (IS_ERR(mdp5_mdss->axi_clk))
+		mdp5_mdss->axi_clk = NULL;
 
-	mdss->vsync_clk = msm_clk_get(pdev, "vsync");
-	if (IS_ERR(mdss->vsync_clk))
-		mdss->vsync_clk = NULL;
+	mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
+	if (IS_ERR(mdp5_mdss->vsync_clk))
+		mdp5_mdss->vsync_clk = NULL;
 
 	return 0;
 }
 
-void msm_mdss_destroy(struct drm_device *dev)
+static void mdp5_mdss_destroy(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_mdss *mdss = priv->mdss;
+	struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
 
-	if (!mdss)
+	if (!mdp5_mdss)
 		return;
 
-	irq_domain_remove(mdss->irqcontroller.domain);
-	mdss->irqcontroller.domain = NULL;
+	irq_domain_remove(mdp5_mdss->irqcontroller.domain);
+	mdp5_mdss->irqcontroller.domain = NULL;
 
-	regulator_disable(mdss->vdd);
+	regulator_disable(mdp5_mdss->vdd);
 
 	pm_runtime_disable(dev->dev);
 }
 
-int msm_mdss_init(struct drm_device *dev)
+static const struct msm_mdss_funcs mdss_funcs = {
+	.enable	= mdp5_mdss_enable,
+	.disable = mdp5_mdss_disable,
+	.destroy = mdp5_mdss_destroy,
+};
+
+int mdp5_mdss_init(struct drm_device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev->dev);
 	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_mdss *mdss;
+	struct mdp5_mdss *mdp5_mdss;
 	int ret;
 
 	DBG("");
@@ -217,40 +224,40 @@ int msm_mdss_init(struct drm_device *dev)
 	if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
 		return 0;
 
-	mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
-	if (!mdss) {
+	mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
+	if (!mdp5_mdss) {
 		ret = -ENOMEM;
 		goto fail;
 	}
 
-	mdss->dev = dev;
+	mdp5_mdss->base.dev = dev;
 
-	mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
-	if (IS_ERR(mdss->mmio)) {
-		ret = PTR_ERR(mdss->mmio);
+	mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
+	if (IS_ERR(mdp5_mdss->mmio)) {
+		ret = PTR_ERR(mdp5_mdss->mmio);
 		goto fail;
 	}
 
-	mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
-	if (IS_ERR(mdss->vbif)) {
-		ret = PTR_ERR(mdss->vbif);
+	mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+	if (IS_ERR(mdp5_mdss->vbif)) {
+		ret = PTR_ERR(mdp5_mdss->vbif);
 		goto fail;
 	}
 
-	ret = msm_mdss_get_clocks(mdss);
+	ret = msm_mdss_get_clocks(mdp5_mdss);
 	if (ret) {
 		dev_err(dev->dev, "failed to get clocks: %d\n", ret);
 		goto fail;
 	}
 
 	/* Regulator to enable GDSCs in downstream kernels */
-	mdss->vdd = devm_regulator_get(dev->dev, "vdd");
-	if (IS_ERR(mdss->vdd)) {
-		ret = PTR_ERR(mdss->vdd);
+	mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
+	if (IS_ERR(mdp5_mdss->vdd)) {
+		ret = PTR_ERR(mdp5_mdss->vdd);
 		goto fail;
 	}
 
-	ret = regulator_enable(mdss->vdd);
+	ret = regulator_enable(mdp5_mdss->vdd);
 	if (ret) {
 		dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
 			ret);
@@ -258,25 +265,26 @@ int msm_mdss_init(struct drm_device *dev)
 	}
 
 	ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
-			       mdss_irq, 0, "mdss_isr", mdss);
+			       mdss_irq, 0, "mdss_isr", mdp5_mdss);
 	if (ret) {
 		dev_err(dev->dev, "failed to init irq: %d\n", ret);
 		goto fail_irq;
 	}
 
-	ret = mdss_irq_domain_init(mdss);
+	ret = mdss_irq_domain_init(mdp5_mdss);
 	if (ret) {
 		dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
 		goto fail_irq;
 	}
 
-	priv->mdss = mdss;
+	mdp5_mdss->base.funcs = &mdss_funcs;
+	priv->mdss = &mdp5_mdss->base;
 
 	pm_runtime_enable(dev->dev);
 
 	return 0;
 fail_irq:
-	regulator_disable(mdss->vdd);
+	regulator_disable(mdp5_mdss->vdd);
 fail:
 	return ret;
 }
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 7d306c5..273cbbe 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -259,7 +259,6 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
 	msm_framebuffer_cleanup(fb, kms->aspace);
 }
 
-#define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
 static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
 					      struct drm_plane_state *state)
 {
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
deleted file mode 100644
index d5982d9..0000000
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ /dev/null
@@ -1,854 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include <linux/of_platform.h>
-#include <linux/msm_ext_display.h>
-
-#include <drm/drm_dp_helper.h>
-
-#include "dp_catalog.h"
-#include "dp_audio.h"
-#include "dp_panel.h"
-
-struct dp_audio_private {
-	struct platform_device *ext_pdev;
-	struct platform_device *pdev;
-	struct dp_catalog_audio *catalog;
-	struct msm_ext_disp_init_data ext_audio_data;
-	struct dp_panel *panel;
-
-	bool ack_enabled;
-	bool session_on;
-	bool engine_on;
-
-	u32 channels;
-
-	struct completion hpd_comp;
-	struct workqueue_struct *notify_workqueue;
-	struct delayed_work notify_delayed_work;
-	struct mutex ops_lock;
-
-	struct dp_audio dp_audio;
-
-	atomic_t acked;
-};
-
-static u32 dp_audio_get_header(struct dp_catalog_audio *catalog,
-		enum dp_catalog_audio_sdp_type sdp,
-		enum dp_catalog_audio_header_type header)
-{
-	catalog->sdp_type = sdp;
-	catalog->sdp_header = header;
-	catalog->get_header(catalog);
-
-	return catalog->data;
-}
-
-static void dp_audio_set_header(struct dp_catalog_audio *catalog,
-		u32 data,
-		enum dp_catalog_audio_sdp_type sdp,
-		enum dp_catalog_audio_header_type header)
-{
-	catalog->sdp_type = sdp;
-	catalog->sdp_header = header;
-	catalog->data = data;
-	catalog->set_header(catalog);
-}
-
-static void dp_audio_stream_sdp(struct dp_audio_private *audio)
-{
-	struct dp_catalog_audio *catalog = audio->catalog;
-	u32 value, new_value;
-	u8 parity_byte;
-
-	/* Config header and parity byte 1 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
-	value &= 0x0000ffff;
-
-	new_value = 0x02;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_1_BIT)
-			| (parity_byte << PARITY_BYTE_1_BIT));
-	pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
-			value, parity_byte);
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
-
-	/* Config header and parity byte 2 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
-	value &= 0xffff0000;
-	new_value = 0x0;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_2_BIT)
-			| (parity_byte << PARITY_BYTE_2_BIT));
-	pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
-			value, parity_byte);
-
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
-
-	/* Config header and parity byte 3 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
-	value &= 0x0000ffff;
-
-	new_value = audio->channels - 1;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_3_BIT)
-			| (parity_byte << PARITY_BYTE_3_BIT));
-	pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
-		value, parity_byte);
-
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
-}
-
-static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
-{
-	struct dp_catalog_audio *catalog = audio->catalog;
-	u32 value, new_value;
-	u8 parity_byte;
-
-	/* Config header and parity byte 1 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
-	value &= 0x0000ffff;
-
-	new_value = 0x1;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_1_BIT)
-			| (parity_byte << PARITY_BYTE_1_BIT));
-	pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
-		value, parity_byte);
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
-
-	/* Config header and parity byte 2 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
-	value &= 0xffff0000;
-
-	new_value = 0x17;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_2_BIT)
-			| (parity_byte << PARITY_BYTE_2_BIT));
-	pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
-			value, parity_byte);
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
-
-	/* Config header and parity byte 3 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
-	value &= 0x0000ffff;
-
-	new_value = (0x0 | (0x11 << 2));
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_3_BIT)
-			| (parity_byte << PARITY_BYTE_3_BIT));
-	pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
-			value, parity_byte);
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
-}
-
-static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
-{
-	struct dp_catalog_audio *catalog = audio->catalog;
-	u32 value, new_value;
-	u8 parity_byte;
-
-	/* Config header and parity byte 1 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
-	value &= 0x0000ffff;
-
-	new_value = 0x84;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_1_BIT)
-			| (parity_byte << PARITY_BYTE_1_BIT));
-	pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
-			value, parity_byte);
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
-
-	/* Config header and parity byte 2 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
-	value &= 0xffff0000;
-
-	new_value = 0x1b;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_2_BIT)
-			| (parity_byte << PARITY_BYTE_2_BIT));
-	pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
-			value, parity_byte);
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
-
-	/* Config header and parity byte 3 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
-	value &= 0x0000ffff;
-
-	new_value = (0x0 | (0x11 << 2));
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_3_BIT)
-			| (parity_byte << PARITY_BYTE_3_BIT));
-	pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
-			new_value, parity_byte);
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
-}
-
-static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
-{
-	struct dp_catalog_audio *catalog = audio->catalog;
-	u32 value, new_value;
-	u8 parity_byte;
-
-	/* Config header and parity byte 1 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
-	value &= 0x0000ffff;
-
-	new_value = 0x05;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_1_BIT)
-			| (parity_byte << PARITY_BYTE_1_BIT));
-	pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
-			value, parity_byte);
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
-
-	/* Config header and parity byte 2 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
-	value &= 0xffff0000;
-
-	new_value = 0x0F;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_2_BIT)
-			| (parity_byte << PARITY_BYTE_2_BIT));
-	pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
-			value, parity_byte);
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
-
-	/* Config header and parity byte 3 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
-	value &= 0x0000ffff;
-
-	new_value = 0x0;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_3_BIT)
-			| (parity_byte << PARITY_BYTE_3_BIT));
-	pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
-			value, parity_byte);
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
-}
-
-static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
-{
-	struct dp_catalog_audio *catalog = audio->catalog;
-	u32 value, new_value;
-	u8 parity_byte;
-
-	/* Config header and parity byte 1 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
-	value &= 0x0000ffff;
-
-	new_value = 0x06;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_1_BIT)
-			| (parity_byte << PARITY_BYTE_1_BIT));
-	pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
-			value, parity_byte);
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
-
-	/* Config header and parity byte 2 */
-	value = dp_audio_get_header(catalog,
-			DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
-	value &= 0xffff0000;
-
-	new_value = 0x0F;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_2_BIT)
-			| (parity_byte << PARITY_BYTE_2_BIT));
-	pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
-			value, parity_byte);
-	dp_audio_set_header(catalog, value,
-		DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
-}
-
-static void dp_audio_setup_sdp(struct dp_audio_private *audio)
-{
-	/* always program stream 0 first before actual stream cfg */
-	audio->catalog->stream_id = DP_STREAM_0;
-	audio->catalog->config_sdp(audio->catalog);
-
-	if (audio->panel->stream_id == DP_STREAM_1) {
-		audio->catalog->stream_id = DP_STREAM_1;
-		audio->catalog->config_sdp(audio->catalog);
-	}
-
-	dp_audio_stream_sdp(audio);
-	dp_audio_timestamp_sdp(audio);
-	dp_audio_infoframe_sdp(audio);
-	dp_audio_copy_management_sdp(audio);
-	dp_audio_isrc_sdp(audio);
-}
-
-static void dp_audio_setup_acr(struct dp_audio_private *audio)
-{
-	u32 select = 0;
-	struct dp_catalog_audio *catalog = audio->catalog;
-
-	switch (audio->dp_audio.bw_code) {
-	case DP_LINK_BW_1_62:
-		select = 0;
-		break;
-	case DP_LINK_BW_2_7:
-		select = 1;
-		break;
-	case DP_LINK_BW_5_4:
-		select = 2;
-		break;
-	case DP_LINK_BW_8_1:
-		select = 3;
-		break;
-	default:
-		pr_debug("Unknown link rate\n");
-		select = 0;
-		break;
-	}
-
-	catalog->data = select;
-	catalog->config_acr(catalog);
-}
-
-static void dp_audio_enable(struct dp_audio_private *audio, bool enable)
-{
-	struct dp_catalog_audio *catalog = audio->catalog;
-
-	catalog->data = enable;
-	catalog->enable(catalog);
-
-	audio->engine_on = enable;
-}
-
-static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
-{
-	struct msm_ext_disp_data *ext_data;
-	struct dp_audio *dp_audio;
-
-	if (!pdev) {
-		pr_err("invalid input\n");
-		return ERR_PTR(-ENODEV);
-	}
-
-	ext_data = platform_get_drvdata(pdev);
-	if (!ext_data) {
-		pr_err("invalid ext disp data\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	dp_audio = ext_data->intf_data;
-	if (!ext_data) {
-		pr_err("invalid intf data\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	return container_of(dp_audio, struct dp_audio_private, dp_audio);
-}
-
-static int dp_audio_info_setup(struct platform_device *pdev,
-	struct msm_ext_disp_audio_setup_params *params)
-{
-	int rc = 0;
-	struct dp_audio_private *audio;
-
-	audio = dp_audio_get_data(pdev);
-	if (IS_ERR(audio)) {
-		rc = PTR_ERR(audio);
-		return rc;
-	}
-
-	mutex_lock(&audio->ops_lock);
-
-	audio->channels = params->num_of_channels;
-
-	if (audio->panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream id: %d\n", audio->panel->stream_id);
-		rc = -EINVAL;
-		mutex_unlock(&audio->ops_lock);
-		return rc;
-	}
-
-	dp_audio_setup_sdp(audio);
-	dp_audio_setup_acr(audio);
-	dp_audio_enable(audio, true);
-
-	mutex_unlock(&audio->ops_lock);
-	return rc;
-}
-
-static int dp_audio_get_edid_blk(struct platform_device *pdev,
-		struct msm_ext_disp_audio_edid_blk *blk)
-{
-	int rc = 0;
-	struct dp_audio_private *audio;
-	struct sde_edid_ctrl *edid;
-
-	audio = dp_audio_get_data(pdev);
-	if (IS_ERR(audio)) {
-		rc = PTR_ERR(audio);
-		goto end;
-	}
-
-	if (!audio->panel || !audio->panel->edid_ctrl) {
-		pr_err("invalid panel data\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	edid = audio->panel->edid_ctrl;
-
-	blk->audio_data_blk = edid->audio_data_block;
-	blk->audio_data_blk_size = edid->adb_size;
-
-	blk->spk_alloc_data_blk = edid->spkr_alloc_data_block;
-	blk->spk_alloc_data_blk_size = edid->sadb_size;
-end:
-	return rc;
-}
-
-static int dp_audio_get_cable_status(struct platform_device *pdev, u32 vote)
-{
-	int rc = 0;
-	struct dp_audio_private *audio;
-
-	audio = dp_audio_get_data(pdev);
-	if (IS_ERR(audio)) {
-		rc = PTR_ERR(audio);
-		goto end;
-	}
-
-	return audio->session_on;
-end:
-	return rc;
-}
-
-static int dp_audio_get_intf_id(struct platform_device *pdev)
-{
-	int rc = 0;
-	struct dp_audio_private *audio;
-
-	audio = dp_audio_get_data(pdev);
-	if (IS_ERR(audio)) {
-		rc = PTR_ERR(audio);
-		goto end;
-	}
-
-	return EXT_DISPLAY_TYPE_DP;
-end:
-	return rc;
-}
-
-static void dp_audio_teardown_done(struct platform_device *pdev)
-{
-	struct dp_audio_private *audio;
-
-	audio = dp_audio_get_data(pdev);
-	if (IS_ERR(audio))
-		return;
-
-	mutex_lock(&audio->ops_lock);
-	dp_audio_enable(audio, false);
-	mutex_unlock(&audio->ops_lock);
-
-	atomic_set(&audio->acked, 1);
-	complete_all(&audio->hpd_comp);
-
-	pr_debug("audio engine disabled\n");
-}
-
-static int dp_audio_ack_done(struct platform_device *pdev, u32 ack)
-{
-	int rc = 0, ack_hpd;
-	struct dp_audio_private *audio;
-
-	audio = dp_audio_get_data(pdev);
-	if (IS_ERR(audio)) {
-		rc = PTR_ERR(audio);
-		goto end;
-	}
-
-	if (ack & AUDIO_ACK_SET_ENABLE) {
-		audio->ack_enabled = ack & AUDIO_ACK_ENABLE ?
-			true : false;
-
-		pr_debug("audio ack feature %s\n",
-			audio->ack_enabled ? "enabled" : "disabled");
-		goto end;
-	}
-
-	if (!audio->ack_enabled)
-		goto end;
-
-	ack_hpd = ack & AUDIO_ACK_CONNECT;
-
-	pr_debug("acknowledging audio (%d)\n", ack_hpd);
-
-	if (!audio->engine_on) {
-		atomic_set(&audio->acked, 1);
-		complete_all(&audio->hpd_comp);
-	}
-end:
-	return rc;
-}
-
-static int dp_audio_codec_ready(struct platform_device *pdev)
-{
-	int rc = 0;
-	struct dp_audio_private *audio;
-
-	audio = dp_audio_get_data(pdev);
-	if (IS_ERR(audio)) {
-		pr_err("invalid input\n");
-		rc = PTR_ERR(audio);
-		goto end;
-	}
-
-	queue_delayed_work(audio->notify_workqueue,
-			&audio->notify_delayed_work, HZ/4);
-end:
-	return rc;
-}
-
-static int dp_audio_register_ext_disp(struct dp_audio_private *audio)
-{
-	int rc = 0;
-	struct device_node *pd = NULL;
-	const char *phandle = "qcom,ext-disp";
-	struct msm_ext_disp_init_data *ext;
-	struct msm_ext_disp_audio_codec_ops *ops;
-
-	ext = &audio->ext_audio_data;
-	ops = &ext->codec_ops;
-
-	ext->codec.type = EXT_DISPLAY_TYPE_DP;
-	ext->codec.ctrl_id = 0;
-	ext->codec.stream_id = audio->panel->stream_id;
-	ext->pdev = audio->pdev;
-	ext->intf_data = &audio->dp_audio;
-
-	ops->audio_info_setup   = dp_audio_info_setup;
-	ops->get_audio_edid_blk = dp_audio_get_edid_blk;
-	ops->cable_status       = dp_audio_get_cable_status;
-	ops->get_intf_id        = dp_audio_get_intf_id;
-	ops->teardown_done      = dp_audio_teardown_done;
-	ops->acknowledge        = dp_audio_ack_done;
-	ops->ready              = dp_audio_codec_ready;
-
-	if (!audio->pdev->dev.of_node) {
-		pr_err("cannot find audio dev.of_node\n");
-		rc = -ENODEV;
-		goto end;
-	}
-
-	pd = of_parse_phandle(audio->pdev->dev.of_node, phandle, 0);
-	if (!pd) {
-		pr_err("cannot parse %s handle\n", phandle);
-		rc = -ENODEV;
-		goto end;
-	}
-
-	audio->ext_pdev = of_find_device_by_node(pd);
-	if (!audio->ext_pdev) {
-		pr_err("cannot find %s pdev\n", phandle);
-		rc = -ENODEV;
-		goto end;
-	}
-#if defined(CONFIG_MSM_EXT_DISPLAY)
-	rc = msm_ext_disp_register_intf(audio->ext_pdev, ext);
-	if (rc)
-		pr_err("failed to register disp\n");
-#endif
-end:
-	if (pd)
-		of_node_put(pd);
-
-	return rc;
-}
-
-static int dp_audio_deregister_ext_disp(struct dp_audio_private *audio)
-{
-	int rc = 0;
-	struct device_node *pd = NULL;
-	const char *phandle = "qcom,ext-disp";
-	struct msm_ext_disp_init_data *ext;
-
-	ext = &audio->ext_audio_data;
-
-	if (!audio->pdev->dev.of_node) {
-		pr_err("cannot find audio dev.of_node\n");
-		rc = -ENODEV;
-		goto end;
-	}
-
-	pd = of_parse_phandle(audio->pdev->dev.of_node, phandle, 0);
-	if (!pd) {
-		pr_err("cannot parse %s handle\n", phandle);
-		rc = -ENODEV;
-		goto end;
-	}
-
-	audio->ext_pdev = of_find_device_by_node(pd);
-	if (!audio->ext_pdev) {
-		pr_err("cannot find %s pdev\n", phandle);
-		rc = -ENODEV;
-		goto end;
-	}
-
-#if defined(CONFIG_MSM_EXT_DISPLAY)
-	rc = msm_ext_disp_deregister_intf(audio->ext_pdev, ext);
-	if (rc)
-		pr_err("failed to deregister disp\n");
-#endif
-
-end:
-	return rc;
-}
-
-static int dp_audio_notify(struct dp_audio_private *audio, u32 state)
-{
-	int rc = 0;
-	struct msm_ext_disp_init_data *ext = &audio->ext_audio_data;
-
-	atomic_set(&audio->acked, 0);
-
-	if (!ext->intf_ops.audio_notify) {
-		pr_err("audio notify not defined\n");
-		goto end;
-	}
-
-	reinit_completion(&audio->hpd_comp);
-	rc = ext->intf_ops.audio_notify(audio->ext_pdev,
-			&ext->codec, state);
-	if (rc)
-		goto end;
-
-	if (atomic_read(&audio->acked))
-		goto end;
-
-	rc = wait_for_completion_timeout(&audio->hpd_comp, HZ * 4);
-	if (!rc) {
-		pr_err("timeout. state=%d err=%d\n", state, rc);
-		rc = -ETIMEDOUT;
-		goto end;
-	}
-
-	pr_debug("success\n");
-end:
-	return rc;
-}
-
-static int dp_audio_config(struct dp_audio_private *audio, u32 state)
-{
-	int rc = 0;
-	struct msm_ext_disp_init_data *ext = &audio->ext_audio_data;
-
-	if (!ext || !ext->intf_ops.audio_config) {
-		pr_err("audio_config not defined\n");
-		goto end;
-	}
-
-	/*
-	 * DP Audio sets default STREAM_0 only, other streams are
-	 * set by audio driver based on the hardware/software support.
-	 */
-	if (audio->panel->stream_id == DP_STREAM_0) {
-		rc = ext->intf_ops.audio_config(audio->ext_pdev,
-				&ext->codec, state);
-		if (rc)
-			pr_err("failed to config audio, err=%d\n", rc);
-	}
-end:
-	return rc;
-}
-
-static int dp_audio_on(struct dp_audio *dp_audio)
-{
-	int rc = 0;
-	struct dp_audio_private *audio;
-	struct msm_ext_disp_init_data *ext;
-
-	if (!dp_audio) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
-	if (IS_ERR(audio)) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp_audio_register_ext_disp(audio);
-
-	ext = &audio->ext_audio_data;
-
-	audio->session_on = true;
-
-	rc = dp_audio_config(audio, EXT_DISPLAY_CABLE_CONNECT);
-	if (rc)
-		goto end;
-
-	rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_CONNECT);
-	if (rc)
-		goto end;
-
-	pr_debug("success\n");
-end:
-	return rc;
-}
-
-static int dp_audio_off(struct dp_audio *dp_audio)
-{
-	int rc = 0;
-	struct dp_audio_private *audio;
-	struct msm_ext_disp_init_data *ext;
-	bool work_pending = false;
-
-	if (!dp_audio) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
-	ext = &audio->ext_audio_data;
-
-	work_pending = cancel_delayed_work_sync(&audio->notify_delayed_work);
-	if (work_pending)
-		pr_debug("pending notification work completed\n");
-
-	rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_DISCONNECT);
-	if (rc)
-		goto end;
-
-	pr_debug("success\n");
-end:
-	dp_audio_config(audio, EXT_DISPLAY_CABLE_DISCONNECT);
-
-	audio->session_on = false;
-	audio->engine_on  = false;
-
-	dp_audio_deregister_ext_disp(audio);
-
-	return rc;
-}
-
-static void dp_audio_notify_work_fn(struct work_struct *work)
-{
-	struct dp_audio_private *audio;
-	struct delayed_work *dw = to_delayed_work(work);
-
-	audio = container_of(dw, struct dp_audio_private, notify_delayed_work);
-
-	dp_audio_notify(audio, EXT_DISPLAY_CABLE_CONNECT);
-}
-
-static int dp_audio_create_notify_workqueue(struct dp_audio_private *audio)
-{
-	audio->notify_workqueue = create_workqueue("sdm_dp_audio_notify");
-	if (IS_ERR_OR_NULL(audio->notify_workqueue)) {
-		pr_err("Error creating notify_workqueue\n");
-		return -EPERM;
-	}
-
-	INIT_DELAYED_WORK(&audio->notify_delayed_work, dp_audio_notify_work_fn);
-
-	return 0;
-}
-
-static void dp_audio_destroy_notify_workqueue(struct dp_audio_private *audio)
-{
-	if (audio->notify_workqueue)
-		destroy_workqueue(audio->notify_workqueue);
-}
-
-struct dp_audio *dp_audio_get(struct platform_device *pdev,
-			struct dp_panel *panel,
-			struct dp_catalog_audio *catalog)
-{
-	int rc = 0;
-	struct dp_audio_private *audio;
-	struct dp_audio *dp_audio;
-
-	if (!pdev || !panel || !catalog) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	audio = devm_kzalloc(&pdev->dev, sizeof(*audio), GFP_KERNEL);
-	if (!audio) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	rc = dp_audio_create_notify_workqueue(audio);
-	if (rc)
-		goto error_notify_workqueue;
-
-	init_completion(&audio->hpd_comp);
-
-	audio->pdev = pdev;
-	audio->panel = panel;
-	audio->catalog = catalog;
-
-	atomic_set(&audio->acked, 0);
-
-	dp_audio = &audio->dp_audio;
-
-	mutex_init(&audio->ops_lock);
-
-	dp_audio->on  = dp_audio_on;
-	dp_audio->off = dp_audio_off;
-
-	catalog->init(catalog);
-
-	return dp_audio;
-
-error_notify_workqueue:
-	devm_kfree(&pdev->dev, audio);
-error:
-	return ERR_PTR(rc);
-}
-
-void dp_audio_put(struct dp_audio *dp_audio)
-{
-	struct dp_audio_private *audio;
-
-	if (!dp_audio)
-		return;
-
-	audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
-
-	mutex_destroy(&audio->ops_lock);
-
-	dp_audio_destroy_notify_workqueue(audio);
-
-	devm_kfree(&audio->pdev->dev, audio);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
deleted file mode 100644
index ce02257..0000000
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_AUDIO_H_
-#define _DP_AUDIO_H_
-
-#include <linux/platform_device.h>
-
-#include "dp_panel.h"
-#include "dp_catalog.h"
-
-/**
- * struct dp_audio
- * @lane_count: number of lanes configured in current session
- * @bw_code: link rate's bandwidth code for current session
- */
-struct dp_audio {
-	u32 lane_count;
-	u32 bw_code;
-
-	/**
-	 * on()
-	 *
-	 * Enables the audio by notifying the user module.
-	 *
-	 * @dp_audio: an instance of struct dp_audio.
-	 *
-	 * Returns the error code in case of failure, 0 in success case.
-	 */
-	int (*on)(struct dp_audio *dp_audio);
-
-	/**
-	 * off()
-	 *
-	 * Disables the audio by notifying the user module.
-	 *
-	 * @dp_audio: an instance of struct dp_audio.
-	 *
-	 * Returns the error code in case of failure, 0 in success case.
-	 */
-	int (*off)(struct dp_audio *dp_audio);
-};
-
-/**
- * dp_audio_get()
- *
- * Creates and instance of dp audio.
- *
- * @pdev: caller's platform device instance.
- * @panel: an instance of dp_panel module.
- * @catalog: an instance of dp_catalog_audio module.
- *
- * Returns the error code in case of failure, otherwize
- * an instance of newly created dp_module.
- */
-struct dp_audio *dp_audio_get(struct platform_device *pdev,
-			struct dp_panel *panel,
-			struct dp_catalog_audio *catalog);
-
-/**
- * dp_audio_put()
- *
- * Cleans the dp_audio instance.
- *
- * @dp_audio: an instance of dp_audio.
- */
-void dp_audio_put(struct dp_audio *dp_audio);
-#endif /* _DP_AUDIO_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
deleted file mode 100644
index 7985c83..0000000
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ /dev/null
@@ -1,865 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include <linux/soc/qcom/fsa4480-i2c.h>
-#include <linux/usb/usbpd.h>
-#include <linux/delay.h>
-
-#include "dp_aux.h"
-
-#define DP_AUX_ENUM_STR(x)		#x
-
-enum {
-	DP_AUX_DATA_INDEX_WRITE = BIT(31),
-};
-
-struct dp_aux_private {
-	struct device *dev;
-	struct dp_aux dp_aux;
-	struct dp_catalog_aux *catalog;
-	struct dp_aux_cfg *cfg;
-	struct device_node *aux_switch_node;
-	struct mutex mutex;
-	struct completion comp;
-	struct drm_dp_aux drm_aux;
-
-	bool cmd_busy;
-	bool native;
-	bool read;
-	bool no_send_addr;
-	bool no_send_stop;
-	bool enabled;
-
-	u32 offset;
-	u32 segment;
-	u32 aux_error_num;
-	u32 retry_cnt;
-
-	atomic_t aborted;
-
-	u8 *dpcd;
-	u8 *edid;
-};
-
-#ifdef CONFIG_DYNAMIC_DEBUG
-static void dp_aux_hex_dump(struct drm_dp_aux *drm_aux,
-		struct drm_dp_aux_msg *msg)
-{
-	char prefix[64];
-	int i, linelen, remaining = msg->size;
-	const int rowsize = 16;
-	u8 linebuf[64];
-	struct dp_aux_private *aux = container_of(drm_aux,
-		struct dp_aux_private, drm_aux);
-
-	snprintf(prefix, sizeof(prefix), "%s %s %4xh(%2zu): ",
-		aux->native ? "NAT" : "I2C",
-		aux->read ? "RD" : "WR",
-		msg->address, msg->size);
-
-	for (i = 0; i < msg->size; i += rowsize) {
-		linelen = min(remaining, rowsize);
-		remaining -= rowsize;
-
-		hex_dump_to_buffer(msg->buffer + i, linelen, rowsize, 1,
-			linebuf, sizeof(linebuf), false);
-
-		pr_debug("%s%s\n", prefix, linebuf);
-	}
-}
-#else
-static void dp_aux_hex_dump(struct drm_dp_aux *drm_aux,
-		struct drm_dp_aux_msg *msg)
-{
-}
-#endif
-
-static char *dp_aux_get_error(u32 aux_error)
-{
-	switch (aux_error) {
-	case DP_AUX_ERR_NONE:
-		return DP_AUX_ENUM_STR(DP_AUX_ERR_NONE);
-	case DP_AUX_ERR_ADDR:
-		return DP_AUX_ENUM_STR(DP_AUX_ERR_ADDR);
-	case DP_AUX_ERR_TOUT:
-		return DP_AUX_ENUM_STR(DP_AUX_ERR_TOUT);
-	case DP_AUX_ERR_NACK:
-		return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK);
-	case DP_AUX_ERR_DEFER:
-		return DP_AUX_ENUM_STR(DP_AUX_ERR_DEFER);
-	case DP_AUX_ERR_NACK_DEFER:
-		return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK_DEFER);
-	default:
-		return "unknown";
-	}
-}
-
-static u32 dp_aux_write(struct dp_aux_private *aux,
-		struct drm_dp_aux_msg *msg)
-{
-	u32 data[4], reg, len;
-	u8 *msgdata = msg->buffer;
-	int const aux_cmd_fifo_len = 128;
-	int i = 0;
-
-	if (aux->read)
-		len = 4;
-	else
-		len = msg->size + 4;
-
-	/*
-	 * cmd fifo only has depth of 144 bytes
-	 * limit buf length to 128 bytes here
-	 */
-	if (len > aux_cmd_fifo_len) {
-		pr_err("buf len error\n");
-		return 0;
-	}
-
-	/* Pack cmd and write to HW */
-	data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
-	if (aux->read)
-		data[0] |=  BIT(4); /* R/W */
-
-	data[1] = (msg->address >> 8) & 0xff;	/* addr[15:8] */
-	data[2] = msg->address & 0xff;		/* addr[7:0] */
-	data[3] = (msg->size - 1) & 0xff;	/* len[7:0] */
-
-	for (i = 0; i < len; i++) {
-		reg = (i < 4) ? data[i] : msgdata[i - 4];
-		reg = ((reg) << 8) & 0x0000ff00; /* index = 0, write */
-		if (i == 0)
-			reg |= DP_AUX_DATA_INDEX_WRITE;
-		aux->catalog->data = reg;
-		aux->catalog->write_data(aux->catalog);
-	}
-
-	aux->catalog->clear_trans(aux->catalog, false);
-	aux->catalog->clear_hw_interrupts(aux->catalog);
-
-	reg = 0; /* Transaction number == 1 */
-	if (!aux->native) { /* i2c */
-		reg |= BIT(8);
-
-		if (aux->no_send_addr)
-			reg |= BIT(10);
-
-		if (aux->no_send_stop)
-			reg |= BIT(11);
-	}
-
-	reg |= BIT(9);
-	aux->catalog->data = reg;
-	aux->catalog->write_trans(aux->catalog);
-
-	return len;
-}
-
-static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
-		struct drm_dp_aux_msg *msg)
-{
-	u32 ret = 0, len = 0, timeout;
-	int const aux_timeout_ms = HZ/4;
-
-	reinit_completion(&aux->comp);
-
-	len = dp_aux_write(aux, msg);
-	if (len == 0) {
-		pr_err("DP AUX write failed\n");
-		return -EINVAL;
-	}
-
-	timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms);
-	if (!timeout) {
-		pr_err("aux %s timeout\n", (aux->read ? "read" : "write"));
-		return -ETIMEDOUT;
-	}
-
-	if (aux->aux_error_num == DP_AUX_ERR_NONE) {
-		ret = len;
-	} else {
-		pr_err_ratelimited("aux err: %s\n",
-			dp_aux_get_error(aux->aux_error_num));
-
-		ret = -EINVAL;
-	}
-
-	return ret;
-}
-
-static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
-		struct drm_dp_aux_msg *msg)
-{
-	u32 data;
-	u8 *dp;
-	u32 i, actual_i;
-	u32 len = msg->size;
-
-	aux->catalog->clear_trans(aux->catalog, true);
-
-	data = 0;
-	data |= DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
-	data |= BIT(0);  /* read */
-
-	aux->catalog->data = data;
-	aux->catalog->write_data(aux->catalog);
-
-	dp = msg->buffer;
-
-	/* discard first byte */
-	data = aux->catalog->read_data(aux->catalog);
-
-	for (i = 0; i < len; i++) {
-		data = aux->catalog->read_data(aux->catalog);
-		*dp++ = (u8)((data >> 8) & 0xff);
-
-		actual_i = (data >> 16) & 0xFF;
-		if (i != actual_i)
-			pr_warn("Index mismatch: expected %d, found %d\n",
-				i, actual_i);
-	}
-}
-
-static void dp_aux_native_handler(struct dp_aux_private *aux)
-{
-	u32 isr = aux->catalog->isr;
-
-	if (isr & DP_INTR_AUX_I2C_DONE)
-		aux->aux_error_num = DP_AUX_ERR_NONE;
-	else if (isr & DP_INTR_WRONG_ADDR)
-		aux->aux_error_num = DP_AUX_ERR_ADDR;
-	else if (isr & DP_INTR_TIMEOUT)
-		aux->aux_error_num = DP_AUX_ERR_TOUT;
-	if (isr & DP_INTR_NACK_DEFER)
-		aux->aux_error_num = DP_AUX_ERR_NACK;
-	if (isr & DP_INTR_AUX_ERROR) {
-		aux->aux_error_num = DP_AUX_ERR_PHY;
-		aux->catalog->clear_hw_interrupts(aux->catalog);
-	}
-
-	complete(&aux->comp);
-}
-
-static void dp_aux_i2c_handler(struct dp_aux_private *aux)
-{
-	u32 isr = aux->catalog->isr;
-
-	if (isr & DP_INTR_AUX_I2C_DONE) {
-		if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER))
-			aux->aux_error_num = DP_AUX_ERR_NACK;
-		else
-			aux->aux_error_num = DP_AUX_ERR_NONE;
-	} else {
-		if (isr & DP_INTR_WRONG_ADDR)
-			aux->aux_error_num = DP_AUX_ERR_ADDR;
-		else if (isr & DP_INTR_TIMEOUT)
-			aux->aux_error_num = DP_AUX_ERR_TOUT;
-		if (isr & DP_INTR_NACK_DEFER)
-			aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
-		if (isr & DP_INTR_I2C_NACK)
-			aux->aux_error_num = DP_AUX_ERR_NACK;
-		if (isr & DP_INTR_I2C_DEFER)
-			aux->aux_error_num = DP_AUX_ERR_DEFER;
-		if (isr & DP_INTR_AUX_ERROR) {
-			aux->aux_error_num = DP_AUX_ERR_PHY;
-			aux->catalog->clear_hw_interrupts(aux->catalog);
-		}
-	}
-
-	complete(&aux->comp);
-}
-
-static void dp_aux_isr(struct dp_aux *dp_aux)
-{
-	struct dp_aux_private *aux;
-
-	if (!dp_aux) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	aux->catalog->get_irq(aux->catalog, aux->cmd_busy);
-
-	if (!aux->cmd_busy)
-		return;
-
-	if (aux->native)
-		dp_aux_native_handler(aux);
-	else
-		dp_aux_i2c_handler(aux);
-}
-
-static void dp_aux_reconfig(struct dp_aux *dp_aux)
-{
-	struct dp_aux_private *aux;
-
-	if (!dp_aux) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	aux->catalog->update_aux_cfg(aux->catalog,
-			aux->cfg, PHY_AUX_CFG1);
-	aux->catalog->reset(aux->catalog);
-}
-
-static void dp_aux_abort_transaction(struct dp_aux *dp_aux)
-{
-	struct dp_aux_private *aux;
-
-	if (!dp_aux) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	atomic_set(&aux->aborted, 1);
-}
-
-static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
-		struct drm_dp_aux_msg *input_msg)
-{
-	u32 const edid_address = 0x50;
-	u32 const segment_address = 0x30;
-	bool i2c_read = input_msg->request &
-		(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
-	u8 *data = NULL;
-
-	if (aux->native || i2c_read || ((input_msg->address != edid_address) &&
-		(input_msg->address != segment_address)))
-		return;
-
-
-	data = input_msg->buffer;
-	if (input_msg->address == segment_address)
-		aux->segment = *data;
-	else
-		aux->offset = *data;
-}
-
-/**
- * dp_aux_transfer_helper() - helper function for EDID read transactions
- *
- * @aux: DP AUX private structure
- * @input_msg: input message from DRM upstream APIs
- * @send_seg: send the seg to sink
- *
- * return: void
- *
- * This helper function is used to fix EDID reads for non-compliant
- * sinks that do not handle the i2c middle-of-transaction flag correctly.
- */
-static void dp_aux_transfer_helper(struct dp_aux_private *aux,
-		struct drm_dp_aux_msg *input_msg, bool send_seg)
-{
-	struct drm_dp_aux_msg helper_msg;
-	u32 const message_size = 0x10;
-	u32 const segment_address = 0x30;
-	u32 const edid_block_length = 0x80;
-	bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
-	bool i2c_read = input_msg->request &
-		(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
-
-	if (!i2c_mot || !i2c_read || (input_msg->size == 0))
-		return;
-
-	/*
-	 * Sending the segment value and EDID offset will be performed
-	 * from the DRM upstream EDID driver for each block. Avoid
-	 * duplicate AUX transactions related to this while reading the
-	 * first 16 bytes of each block.
-	 */
-	if (!(aux->offset % edid_block_length) || !send_seg)
-		goto end;
-
-	aux->read = false;
-	aux->cmd_busy = true;
-	aux->no_send_addr = true;
-	aux->no_send_stop = true;
-
-	/*
-	 * Send the segment address for i2c reads for segment > 0 and for which
-	 * the middle-of-transaction flag is set. This is required to support
-	 * EDID reads of more than 2 blocks as the segment address is reset to 0
-	 * since we are overriding the middle-of-transaction flag for read
-	 * transactions.
-	 */
-	if (aux->segment) {
-		memset(&helper_msg, 0, sizeof(helper_msg));
-		helper_msg.address = segment_address;
-		helper_msg.buffer = &aux->segment;
-		helper_msg.size = 1;
-		dp_aux_cmd_fifo_tx(aux, &helper_msg);
-	}
-
-	/*
-	 * Send the offset address for every i2c read in which the
-	 * middle-of-transaction flag is set. This will ensure that the sink
-	 * will update its read pointer and return the correct portion of the
-	 * EDID buffer in the subsequent i2c read trasntion triggered in the
-	 * native AUX transfer function.
-	 */
-	memset(&helper_msg, 0, sizeof(helper_msg));
-	helper_msg.address = input_msg->address;
-	helper_msg.buffer = &aux->offset;
-	helper_msg.size = 1;
-	dp_aux_cmd_fifo_tx(aux, &helper_msg);
-end:
-	aux->offset += message_size;
-	if (aux->offset == 0x80 || aux->offset == 0x100)
-		aux->segment = 0x0; /* reset segment at end of block */
-}
-
-static int dp_aux_transfer_ready(struct dp_aux_private *aux,
-		struct drm_dp_aux_msg *msg, bool send_seg)
-{
-	int ret = 0;
-	int const aux_cmd_native_max = 16;
-	int const aux_cmd_i2c_max = 128;
-
-	if (atomic_read(&aux->aborted)) {
-		ret = -ETIMEDOUT;
-		goto error;
-	}
-
-	aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
-
-	/* Ignore address only message */
-	if ((msg->size == 0) || (msg->buffer == NULL)) {
-		msg->reply = aux->native ?
-			DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
-		goto error;
-	}
-
-	/* msg sanity check */
-	if ((aux->native && (msg->size > aux_cmd_native_max)) ||
-		(msg->size > aux_cmd_i2c_max)) {
-		pr_err("%s: invalid msg: size(%zu), request(%x)\n",
-			__func__, msg->size, msg->request);
-		ret = -EINVAL;
-		goto error;
-	}
-
-	dp_aux_update_offset_and_segment(aux, msg);
-
-	dp_aux_transfer_helper(aux, msg, send_seg);
-
-	aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
-
-	if (aux->read) {
-		aux->no_send_addr = true;
-		aux->no_send_stop = false;
-	} else {
-		aux->no_send_addr = true;
-		aux->no_send_stop = true;
-	}
-
-	aux->cmd_busy = true;
-error:
-	return ret;
-}
-
-static ssize_t dp_aux_transfer_debug(struct drm_dp_aux *drm_aux,
-		struct drm_dp_aux_msg *msg)
-{
-	u32 timeout;
-	ssize_t ret;
-	struct dp_aux_private *aux = container_of(drm_aux,
-		struct dp_aux_private, drm_aux);
-
-	mutex_lock(&aux->mutex);
-
-	ret = dp_aux_transfer_ready(aux, msg, false);
-	if (ret)
-		goto end;
-
-	aux->aux_error_num = DP_AUX_ERR_NONE;
-
-	if (!aux->dpcd || !aux->edid) {
-		pr_err("invalid aux/dpcd structure\n");
-		goto end;
-	}
-
-	if ((msg->address + msg->size) > SZ_4K) {
-		pr_debug("invalid dpcd access: addr=0x%x, size=0x%lx\n",
-				msg->address, msg->size);
-		goto address_error;
-	}
-
-	if (aux->native) {
-		aux->dp_aux.reg = msg->address;
-		aux->dp_aux.read = aux->read;
-		aux->dp_aux.size = msg->size;
-
-		reinit_completion(&aux->comp);
-
-		if (aux->read) {
-			timeout = wait_for_completion_timeout(&aux->comp, HZ);
-			if (!timeout) {
-				pr_err("aux timeout for 0x%x\n", msg->address);
-				atomic_set(&aux->aborted, 1);
-				ret = -ETIMEDOUT;
-				goto end;
-			}
-
-			memcpy(msg->buffer, aux->dpcd + msg->address,
-				msg->size);
-		} else {
-			memcpy(aux->dpcd + msg->address, msg->buffer,
-				msg->size);
-
-			timeout = wait_for_completion_timeout(&aux->comp, HZ);
-			if (!timeout) {
-				pr_err("aux timeout for 0x%x\n", msg->address);
-				atomic_set(&aux->aborted, 1);
-				ret = -ETIMEDOUT;
-				goto end;
-			}
-		}
-
-		aux->aux_error_num = DP_AUX_ERR_NONE;
-	} else {
-		if (aux->read && msg->address == 0x50) {
-			memcpy(msg->buffer,
-				aux->edid + aux->offset - 16,
-				msg->size);
-		}
-	}
-
-	if (aux->aux_error_num == DP_AUX_ERR_NONE) {
-		dp_aux_hex_dump(drm_aux, msg);
-
-		if (!aux->read)
-			memset(msg->buffer, 0, msg->size);
-
-		msg->reply = aux->native ?
-			DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
-	} else {
-		/* Reply defer to retry */
-		msg->reply = aux->native ?
-			DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
-	}
-
-	ret = msg->size;
-	goto end;
-
-address_error:
-	memset(msg->buffer, 0, msg->size);
-	ret = msg->size;
-end:
-	aux->dp_aux.reg = 0xFFFF;
-	aux->dp_aux.read = true;
-	aux->dp_aux.size = 0;
-
-	mutex_unlock(&aux->mutex);
-	return ret;
-}
-
-/*
- * This function does the real job to process an AUX transaction.
- * It will call aux_reset() function to reset the AUX channel,
- * if the waiting is timeout.
- */
-static ssize_t dp_aux_transfer(struct drm_dp_aux *drm_aux,
-		struct drm_dp_aux_msg *msg)
-{
-	ssize_t ret;
-	int const retry_count = 5;
-	struct dp_aux_private *aux = container_of(drm_aux,
-		struct dp_aux_private, drm_aux);
-
-	mutex_lock(&aux->mutex);
-
-	ret = dp_aux_transfer_ready(aux, msg, true);
-	if (ret)
-		goto unlock_exit;
-
-	if (!aux->cmd_busy) {
-		ret = msg->size;
-		goto unlock_exit;
-	}
-
-	ret = dp_aux_cmd_fifo_tx(aux, msg);
-	if ((ret < 0) && !atomic_read(&aux->aborted)) {
-		aux->retry_cnt++;
-		if (!(aux->retry_cnt % retry_count))
-			aux->catalog->update_aux_cfg(aux->catalog,
-				aux->cfg, PHY_AUX_CFG1);
-		aux->catalog->reset(aux->catalog);
-		goto unlock_exit;
-	} else if (ret < 0) {
-		goto unlock_exit;
-	}
-
-	if (aux->aux_error_num == DP_AUX_ERR_NONE) {
-		if (aux->read)
-			dp_aux_cmd_fifo_rx(aux, msg);
-
-		dp_aux_hex_dump(drm_aux, msg);
-
-		msg->reply = aux->native ?
-			DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
-	} else {
-		/* Reply defer to retry */
-		msg->reply = aux->native ?
-			DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
-	}
-
-	/* Return requested size for success or retry */
-	ret = msg->size;
-	aux->retry_cnt = 0;
-
-unlock_exit:
-	aux->cmd_busy = false;
-	mutex_unlock(&aux->mutex);
-	return ret;
-}
-
-static void dp_aux_reset_phy_config_indices(struct dp_aux_cfg *aux_cfg)
-{
-	int i = 0;
-
-	for (i = 0; i < PHY_AUX_CFG_MAX; i++)
-		aux_cfg[i].current_index = 0;
-}
-
-static void dp_aux_init(struct dp_aux *dp_aux, struct dp_aux_cfg *aux_cfg)
-{
-	struct dp_aux_private *aux;
-
-	if (!dp_aux || !aux_cfg) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	if (aux->enabled)
-		return;
-
-	dp_aux_reset_phy_config_indices(aux_cfg);
-	aux->catalog->setup(aux->catalog, aux_cfg);
-	aux->catalog->reset(aux->catalog);
-	aux->catalog->enable(aux->catalog, true);
-	atomic_set(&aux->aborted, 0);
-	aux->retry_cnt = 0;
-	aux->enabled = true;
-}
-
-static void dp_aux_deinit(struct dp_aux *dp_aux)
-{
-	struct dp_aux_private *aux;
-
-	if (!dp_aux) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	if (!aux->enabled)
-		return;
-
-	atomic_set(&aux->aborted, 1);
-	aux->catalog->enable(aux->catalog, false);
-	aux->enabled = false;
-}
-
-static int dp_aux_register(struct dp_aux *dp_aux)
-{
-	struct dp_aux_private *aux;
-	int ret = 0;
-
-	if (!dp_aux) {
-		pr_err("invalid input\n");
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	aux->drm_aux.name = "sde_dp_aux";
-	aux->drm_aux.dev = aux->dev;
-	aux->drm_aux.transfer = dp_aux_transfer;
-	ret = drm_dp_aux_register(&aux->drm_aux);
-	if (ret) {
-		pr_err("%s: failed to register drm aux: %d\n", __func__, ret);
-		goto exit;
-	}
-	dp_aux->drm_aux = &aux->drm_aux;
-exit:
-	return ret;
-}
-
-static void dp_aux_deregister(struct dp_aux *dp_aux)
-{
-	struct dp_aux_private *aux;
-
-	if (!dp_aux) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-	drm_dp_aux_unregister(&aux->drm_aux);
-}
-
-static void dp_aux_dpcd_updated(struct dp_aux *dp_aux)
-{
-	struct dp_aux_private *aux;
-
-	if (!dp_aux) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	complete(&aux->comp);
-}
-
-static void dp_aux_set_sim_mode(struct dp_aux *dp_aux, bool en,
-		u8 *edid, u8 *dpcd)
-{
-	struct dp_aux_private *aux;
-
-	if (!dp_aux) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	mutex_lock(&aux->mutex);
-
-	aux->edid = edid;
-	aux->dpcd = dpcd;
-
-	if (en) {
-		atomic_set(&aux->aborted, 0);
-		aux->drm_aux.transfer = dp_aux_transfer_debug;
-	} else {
-		aux->drm_aux.transfer = dp_aux_transfer;
-	}
-
-	mutex_unlock(&aux->mutex);
-}
-
-static int dp_aux_configure_aux_switch(struct dp_aux *dp_aux,
-		bool enable, int orientation)
-{
-	struct dp_aux_private *aux;
-	int rc = 0;
-	enum fsa_function event = FSA_USBC_DISPLAYPORT_DISCONNECTED;
-
-	if (!dp_aux) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	if (!aux->aux_switch_node) {
-		pr_debug("undefined fsa4480 handle\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	if (enable) {
-		switch (orientation) {
-		case ORIENTATION_CC1:
-			event = FSA_USBC_ORIENTATION_CC1;
-			break;
-		case ORIENTATION_CC2:
-			event = FSA_USBC_ORIENTATION_CC2;
-			break;
-		default:
-			pr_err("invalid orientation\n");
-			rc = -EINVAL;
-			goto end;
-		}
-	}
-
-	pr_debug("enable=%d, orientation=%d, event=%d\n",
-			enable, orientation, event);
-
-	rc = fsa4480_switch_event(aux->aux_switch_node, event);
-	if (rc)
-		pr_err("failed to configure fsa4480 i2c device (%d)\n", rc);
-end:
-	return rc;
-}
-
-struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
-		struct dp_parser *parser, struct device_node *aux_switch)
-{
-	int rc = 0;
-	struct dp_aux_private *aux;
-	struct dp_aux *dp_aux;
-
-	if (!catalog || !parser ||
-			(!parser->no_aux_switch &&
-				!aux_switch &&
-				!parser->gpio_aux_switch)) {
-		pr_err("invalid input\n");
-		rc = -ENODEV;
-		goto error;
-	}
-
-	aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
-	if (!aux) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	init_completion(&aux->comp);
-	aux->cmd_busy = false;
-	mutex_init(&aux->mutex);
-
-	aux->dev = dev;
-	aux->catalog = catalog;
-	aux->cfg = parser->aux_cfg;
-	aux->aux_switch_node = aux_switch;
-	dp_aux = &aux->dp_aux;
-	aux->retry_cnt = 0;
-	aux->dp_aux.reg = 0xFFFF;
-
-	dp_aux->isr     = dp_aux_isr;
-	dp_aux->init    = dp_aux_init;
-	dp_aux->deinit  = dp_aux_deinit;
-	dp_aux->drm_aux_register = dp_aux_register;
-	dp_aux->drm_aux_deregister = dp_aux_deregister;
-	dp_aux->reconfig = dp_aux_reconfig;
-	dp_aux->abort = dp_aux_abort_transaction;
-	dp_aux->dpcd_updated = dp_aux_dpcd_updated;
-	dp_aux->set_sim_mode = dp_aux_set_sim_mode;
-	dp_aux->aux_switch = dp_aux_configure_aux_switch;
-
-	return dp_aux;
-error:
-	return ERR_PTR(rc);
-}
-
-void dp_aux_put(struct dp_aux *dp_aux)
-{
-	struct dp_aux_private *aux;
-
-	if (!dp_aux)
-		return;
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	mutex_destroy(&aux->mutex);
-
-	devm_kfree(aux->dev, aux);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
deleted file mode 100644
index 888c8cc..0000000
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_AUX_H_
-#define _DP_AUX_H_
-
-#include "dp_catalog.h"
-#include "drm_dp_helper.h"
-
-#define DP_STATE_NOTIFICATION_SENT          BIT(0)
-#define DP_STATE_TRAIN_1_STARTED            BIT(1)
-#define DP_STATE_TRAIN_1_SUCCEEDED          BIT(2)
-#define DP_STATE_TRAIN_1_FAILED             BIT(3)
-#define DP_STATE_TRAIN_2_STARTED            BIT(4)
-#define DP_STATE_TRAIN_2_SUCCEEDED          BIT(5)
-#define DP_STATE_TRAIN_2_FAILED             BIT(6)
-#define DP_STATE_CTRL_POWERED_ON            BIT(7)
-#define DP_STATE_CTRL_POWERED_OFF           BIT(8)
-#define DP_STATE_LINK_MAINTENANCE_STARTED   BIT(9)
-#define DP_STATE_LINK_MAINTENANCE_COMPLETED BIT(10)
-#define DP_STATE_LINK_MAINTENANCE_FAILED    BIT(11)
-
-enum dp_aux_error {
-	DP_AUX_ERR_NONE	= 0,
-	DP_AUX_ERR_ADDR	= -1,
-	DP_AUX_ERR_TOUT	= -2,
-	DP_AUX_ERR_NACK	= -3,
-	DP_AUX_ERR_DEFER	= -4,
-	DP_AUX_ERR_NACK_DEFER	= -5,
-	DP_AUX_ERR_PHY	= -6,
-};
-
-struct dp_aux {
-	u32 reg;
-	u32 size;
-	u32 state;
-
-	bool read;
-
-	struct drm_dp_aux *drm_aux;
-	int (*drm_aux_register)(struct dp_aux *aux);
-	void (*drm_aux_deregister)(struct dp_aux *aux);
-	void (*isr)(struct dp_aux *aux);
-	void (*init)(struct dp_aux *aux, struct dp_aux_cfg *aux_cfg);
-	void (*deinit)(struct dp_aux *aux);
-	void (*reconfig)(struct dp_aux *aux);
-	void (*abort)(struct dp_aux *aux);
-	void (*dpcd_updated)(struct dp_aux *aux);
-	void (*set_sim_mode)(struct dp_aux *aux, bool en, u8 *edid, u8 *dpcd);
-	int (*aux_switch)(struct dp_aux *aux, bool enable, int orientation);
-};
-
-struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
-		struct dp_parser *parser, struct device_node *aux_switch);
-void dp_aux_put(struct dp_aux *aux);
-
-#endif /*__DP_AUX_H_*/
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
deleted file mode 100644
index 0d22a6e..0000000
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ /dev/null
@@ -1,2680 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include <linux/delay.h>
-#include <drm/drm_dp_helper.h>
-
-#include "dp_catalog.h"
-#include "dp_reg.h"
-
-#define DP_GET_MSB(x)	(x >> 8)
-#define DP_GET_LSB(x)	(x & 0xff)
-
-#define dp_catalog_get_priv(x) ({ \
-	struct dp_catalog *dp_catalog; \
-	dp_catalog = container_of(x, struct dp_catalog, x); \
-	container_of(dp_catalog, struct dp_catalog_private, \
-				dp_catalog); \
-})
-
-#define DP_INTERRUPT_STATUS1 \
-	(DP_INTR_AUX_I2C_DONE| \
-	DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
-	DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
-	DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
-	DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
-
-#define DP_INTR_MASK1		(DP_INTERRUPT_STATUS1 << 2)
-
-#define DP_INTERRUPT_STATUS2 \
-	(DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
-	DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
-
-#define DP_INTR_MASK2		(DP_INTERRUPT_STATUS2 << 2)
-
-#define DP_INTERRUPT_STATUS5 \
-	(DP_INTR_MST_DP0_VCPF_SENT | DP_INTR_MST_DP1_VCPF_SENT)
-
-#define DP_INTR_MASK5		(DP_INTERRUPT_STATUS5 << 2)
-
-#define dp_catalog_fill_io(x) { \
-	catalog->io.x = parser->get_io(parser, #x); \
-}
-
-#define dp_catalog_fill_io_buf(x) { \
-	parser->get_io_buf(parser, #x); \
-}
-
-static u8 const vm_pre_emphasis[4][4] = {
-	{0x00, 0x0B, 0x12, 0xFF},       /* pe0, 0 db */
-	{0x00, 0x0A, 0x12, 0xFF},       /* pe1, 3.5 db */
-	{0x00, 0x0C, 0xFF, 0xFF},       /* pe2, 6.0 db */
-	{0xFF, 0xFF, 0xFF, 0xFF}        /* pe3, 9.5 db */
-};
-
-/* voltage swing, 0.2v and 1.0v are not support */
-static u8 const vm_voltage_swing[4][4] = {
-	{0x07, 0x0F, 0x14, 0xFF}, /* sw0, 0.4v  */
-	{0x11, 0x1D, 0x1F, 0xFF}, /* sw1, 0.6 v */
-	{0x18, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */
-	{0xFF, 0xFF, 0xFF, 0xFF}  /* sw1, 1.2 v, optional */
-};
-
-enum dp_flush_bit {
-	DP_PPS_FLUSH,
-	DP_DHDR_FLUSH,
-};
-
-struct dp_catalog_io {
-	struct dp_io_data *dp_ahb;
-	struct dp_io_data *dp_aux;
-	struct dp_io_data *dp_link;
-	struct dp_io_data *dp_p0;
-	struct dp_io_data *dp_phy;
-	struct dp_io_data *dp_ln_tx0;
-	struct dp_io_data *dp_ln_tx1;
-	struct dp_io_data *dp_mmss_cc;
-	struct dp_io_data *dp_pll;
-	struct dp_io_data *usb3_dp_com;
-	struct dp_io_data *hdcp_physical;
-	struct dp_io_data *dp_p1;
-	struct dp_io_data *dp_tcsr;
-};
-
-/* audio related catalog functions */
-struct dp_catalog_private {
-	struct device *dev;
-	struct dp_catalog_io io;
-	struct dp_parser *parser;
-
-	u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
-	struct dp_catalog dp_catalog;
-
-	char exe_mode[SZ_4];
-};
-
-/* aux related catalog functions */
-static u32 dp_catalog_aux_read_data(struct dp_catalog_aux *aux)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!aux) {
-		pr_err("invalid input\n");
-		goto end;
-	}
-
-	catalog = dp_catalog_get_priv(aux);
-	io_data = catalog->io.dp_aux;
-
-	return dp_read(catalog->exe_mode, io_data, DP_AUX_DATA);
-end:
-	return 0;
-}
-
-static int dp_catalog_aux_write_data(struct dp_catalog_aux *aux)
-{
-	int rc = 0;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!aux) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	catalog = dp_catalog_get_priv(aux);
-	io_data = catalog->io.dp_aux;
-
-	dp_write(catalog->exe_mode, io_data, DP_AUX_DATA, aux->data);
-end:
-	return rc;
-}
-
-static int dp_catalog_aux_write_trans(struct dp_catalog_aux *aux)
-{
-	int rc = 0;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!aux) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	catalog = dp_catalog_get_priv(aux);
-	io_data = catalog->io.dp_aux;
-
-	dp_write(catalog->exe_mode, io_data, DP_AUX_TRANS_CTRL, aux->data);
-end:
-	return rc;
-}
-
-static int dp_catalog_aux_clear_trans(struct dp_catalog_aux *aux, bool read)
-{
-	int rc = 0;
-	u32 data = 0;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!aux) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	catalog = dp_catalog_get_priv(aux);
-	io_data = catalog->io.dp_aux;
-
-	if (read) {
-		data = dp_read(catalog->exe_mode, io_data, DP_AUX_TRANS_CTRL);
-		data &= ~BIT(9);
-		dp_write(catalog->exe_mode, io_data, DP_AUX_TRANS_CTRL, data);
-	} else {
-		dp_write(catalog->exe_mode, io_data, DP_AUX_TRANS_CTRL, 0);
-	}
-end:
-	return rc;
-}
-
-static void dp_catalog_aux_clear_hw_interrupts(struct dp_catalog_aux *aux)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 data = 0;
-
-	if (!aux) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(aux);
-	io_data = catalog->io.dp_phy;
-
-	data = dp_read(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_STATUS);
-
-	dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
-	wmb(); /* make sure 0x1f is written before next write */
-	dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
-	wmb(); /* make sure 0x9f is written before next write */
-	dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR, 0);
-	wmb(); /* make sure register is cleared */
-}
-
-static void dp_catalog_aux_reset(struct dp_catalog_aux *aux)
-{
-	u32 aux_ctrl;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!aux) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(aux);
-	io_data = catalog->io.dp_aux;
-
-	aux_ctrl = dp_read(catalog->exe_mode, io_data, DP_AUX_CTRL);
-
-	aux_ctrl |= BIT(1);
-	dp_write(catalog->exe_mode, io_data, DP_AUX_CTRL, aux_ctrl);
-	usleep_range(1000, 1010); /* h/w recommended delay */
-
-	aux_ctrl &= ~BIT(1);
-
-	dp_write(catalog->exe_mode, io_data, DP_AUX_CTRL, aux_ctrl);
-	wmb(); /* make sure AUX reset is done here */
-}
-
-static void dp_catalog_aux_enable(struct dp_catalog_aux *aux, bool enable)
-{
-	u32 aux_ctrl;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!aux) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(aux);
-	io_data = catalog->io.dp_aux;
-
-	aux_ctrl = dp_read(catalog->exe_mode, io_data, DP_AUX_CTRL);
-
-	if (enable) {
-		aux_ctrl |= BIT(0);
-		dp_write(catalog->exe_mode, io_data, DP_AUX_CTRL, aux_ctrl);
-		wmb(); /* make sure AUX module is enabled */
-
-		dp_write(catalog->exe_mode, io_data, DP_TIMEOUT_COUNT, 0xffff);
-		dp_write(catalog->exe_mode, io_data, DP_AUX_LIMITS, 0xffff);
-	} else {
-		aux_ctrl &= ~BIT(0);
-		dp_write(catalog->exe_mode, io_data, DP_AUX_CTRL, aux_ctrl);
-	}
-}
-
-static void dp_catalog_aux_update_cfg(struct dp_catalog_aux *aux,
-		struct dp_aux_cfg *cfg, enum dp_phy_aux_config_type type)
-{
-	struct dp_catalog_private *catalog;
-	u32 new_index = 0, current_index = 0;
-	struct dp_io_data *io_data;
-
-	if (!aux || !cfg || (type >= PHY_AUX_CFG_MAX)) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(aux);
-
-	io_data = catalog->io.dp_phy;
-
-	current_index = cfg[type].current_index;
-	new_index = (current_index + 1) % cfg[type].cfg_cnt;
-	pr_debug("Updating %s from 0x%08x to 0x%08x\n",
-		dp_phy_aux_config_type_to_string(type),
-	cfg[type].lut[current_index], cfg[type].lut[new_index]);
-
-	dp_write(catalog->exe_mode, io_data, cfg[type].offset,
-			cfg[type].lut[new_index]);
-	cfg[type].current_index = new_index;
-}
-
-static void dp_catalog_aux_setup(struct dp_catalog_aux *aux,
-		struct dp_aux_cfg *cfg)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	int i = 0;
-
-	if (!aux || !cfg) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(aux);
-
-	io_data = catalog->io.dp_phy;
-	dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x65);
-	wmb(); /* make sure PD programming happened */
-
-	/* Turn on BIAS current for PHY/PLL */
-	io_data = catalog->io.dp_pll;
-	dp_write(catalog->exe_mode, io_data, QSERDES_COM_BIAS_EN_CLKBUFLR_EN,
-			0x1b);
-
-	io_data = catalog->io.dp_phy;
-	dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x02);
-	wmb(); /* make sure PD programming happened */
-	dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x7d);
-
-	/* Turn on BIAS current for PHY/PLL */
-	io_data = catalog->io.dp_pll;
-	dp_write(catalog->exe_mode, io_data, QSERDES_COM_BIAS_EN_CLKBUFLR_EN,
-			0x3f);
-
-	/* DP AUX CFG register programming */
-	io_data = catalog->io.dp_phy;
-	for (i = 0; i < PHY_AUX_CFG_MAX; i++)
-		dp_write(catalog->exe_mode, io_data, cfg[i].offset,
-				cfg[i].lut[cfg[i].current_index]);
-
-	dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_MASK, 0x1F);
-	wmb(); /* make sure AUX configuration is done before enabling it */
-}
-
-static void dp_catalog_aux_get_irq(struct dp_catalog_aux *aux, bool cmd_busy)
-{
-	u32 ack;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!aux) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(aux);
-	io_data = catalog->io.dp_ahb;
-
-	aux->isr = dp_read(catalog->exe_mode, io_data, DP_INTR_STATUS);
-	aux->isr &= ~DP_INTR_MASK1;
-	ack = aux->isr & DP_INTERRUPT_STATUS1;
-	ack <<= 1;
-	ack |= DP_INTR_MASK1;
-	dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS, ack);
-}
-
-/* controller related catalog functions */
-static u32 dp_catalog_ctrl_read_hdcp_status(struct dp_catalog_ctrl *ctrl)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-	io_data = catalog->io.dp_ahb;
-
-	return dp_read(catalog->exe_mode, io_data, DP_HDCP_STATUS);
-}
-
-static void dp_catalog_panel_setup_vsif_infoframe_sdp(
-		struct dp_catalog_panel *panel)
-{
-	struct dp_catalog_private *catalog;
-	struct drm_msm_ext_hdr_metadata *hdr;
-	struct dp_io_data *io_data;
-	u32 header, parity, data, mst_offset = 0;
-	u8 buf[SZ_64], off = 0;
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		return;
-	}
-
-	if (panel->stream_id == DP_STREAM_1)
-		mst_offset = MMSS_DP1_VSCEXT_0 - MMSS_DP_VSCEXT_0;
-
-	catalog = dp_catalog_get_priv(panel);
-	hdr = &panel->hdr_data.hdr_meta;
-	io_data = catalog->io.dp_link;
-
-	/* HEADER BYTE 1 */
-	header = panel->hdr_data.vscext_header_byte1;
-	parity = dp_header_get_parity(header);
-	data   = ((header << HEADER_BYTE_1_BIT)
-			| (parity << PARITY_BYTE_1_BIT));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_0 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	/* HEADER BYTE 2 */
-	header = panel->hdr_data.vscext_header_byte2;
-	parity = dp_header_get_parity(header);
-	data   = ((header << HEADER_BYTE_2_BIT)
-			| (parity << PARITY_BYTE_2_BIT));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_1 + mst_offset,
-			data);
-
-	/* HEADER BYTE 3 */
-	header = panel->hdr_data.vscext_header_byte3;
-	parity = dp_header_get_parity(header);
-	data   = ((header << HEADER_BYTE_3_BIT)
-			| (parity << PARITY_BYTE_3_BIT));
-	data |= dp_read(catalog->exe_mode, io_data,
-			MMSS_DP_VSCEXT_1 + mst_offset);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_VSCEXT_1 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	print_hex_dump(KERN_DEBUG, "[drm-dp] VSCEXT: ",
-			DUMP_PREFIX_NONE, 16, 4, buf, off, false);
-}
-
-static void dp_catalog_panel_setup_hdr_infoframe_sdp(
-		struct dp_catalog_panel *panel)
-{
-	struct dp_catalog_private *catalog;
-	struct drm_msm_ext_hdr_metadata *hdr;
-	struct dp_io_data *io_data;
-	u32 header, parity, data, mst_offset = 0;
-	u8 buf[SZ_64], off = 0;
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		return;
-	}
-
-	if (panel->stream_id == DP_STREAM_1)
-		mst_offset = MMSS_DP1_GENERIC2_0 - MMSS_DP_GENERIC2_0;
-
-	catalog = dp_catalog_get_priv(panel);
-	hdr = &panel->hdr_data.hdr_meta;
-	io_data = catalog->io.dp_link;
-
-	/* HEADER BYTE 1 */
-	header = panel->hdr_data.shdr_header_byte1;
-	parity = dp_header_get_parity(header);
-	data   = ((header << HEADER_BYTE_1_BIT)
-			| (parity << PARITY_BYTE_1_BIT));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC2_0 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	/* HEADER BYTE 2 */
-	header = panel->hdr_data.shdr_header_byte2;
-	parity = dp_header_get_parity(header);
-	data   = ((header << HEADER_BYTE_2_BIT)
-			| (parity << PARITY_BYTE_2_BIT));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC2_1 + mst_offset,
-			data);
-
-	/* HEADER BYTE 3 */
-	header = panel->hdr_data.shdr_header_byte3;
-	parity = dp_header_get_parity(header);
-	data   = ((header << HEADER_BYTE_3_BIT)
-			| (parity << PARITY_BYTE_3_BIT));
-	data |= dp_read(catalog->exe_mode, io_data,
-			MMSS_DP_VSCEXT_1 + mst_offset);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC2_1 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	data = panel->hdr_data.version;
-	data |= panel->hdr_data.length << 8;
-	data |= hdr->eotf << 16;
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC2_2 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	data = (DP_GET_LSB(hdr->display_primaries_x[0]) |
-		(DP_GET_MSB(hdr->display_primaries_x[0]) << 8) |
-		(DP_GET_LSB(hdr->display_primaries_y[0]) << 16) |
-		(DP_GET_MSB(hdr->display_primaries_y[0]) << 24));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC2_3 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	data = (DP_GET_LSB(hdr->display_primaries_x[1]) |
-		(DP_GET_MSB(hdr->display_primaries_x[1]) << 8) |
-		(DP_GET_LSB(hdr->display_primaries_y[1]) << 16) |
-		(DP_GET_MSB(hdr->display_primaries_y[1]) << 24));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC2_4 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	data = (DP_GET_LSB(hdr->display_primaries_x[2]) |
-		(DP_GET_MSB(hdr->display_primaries_x[2]) << 8) |
-		(DP_GET_LSB(hdr->display_primaries_y[2]) << 16) |
-		(DP_GET_MSB(hdr->display_primaries_y[2]) << 24));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC2_5 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	data = (DP_GET_LSB(hdr->white_point_x) |
-		(DP_GET_MSB(hdr->white_point_x) << 8) |
-		(DP_GET_LSB(hdr->white_point_y) << 16) |
-		(DP_GET_MSB(hdr->white_point_y) << 24));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC2_6 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	data = (DP_GET_LSB(hdr->max_luminance) |
-		(DP_GET_MSB(hdr->max_luminance) << 8) |
-		(DP_GET_LSB(hdr->min_luminance) << 16) |
-		(DP_GET_MSB(hdr->min_luminance) << 24));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC2_7 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	data = (DP_GET_LSB(hdr->max_content_light_level) |
-		(DP_GET_MSB(hdr->max_content_light_level) << 8) |
-		(DP_GET_LSB(hdr->max_average_light_level) << 16) |
-		(DP_GET_MSB(hdr->max_average_light_level) << 24));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC2_8 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	data = 0;
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC2_9 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	print_hex_dump(KERN_DEBUG, "[drm-dp] HDR: ",
-			DUMP_PREFIX_NONE, 16, 4, buf, off, false);
-}
-
-static void dp_catalog_panel_setup_vsc_sdp(struct dp_catalog_panel *panel)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 header, parity, data, mst_offset = 0;
-	u8 bpc, off = 0;
-	u8 buf[SZ_128];
-
-	if (!panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		return;
-	}
-
-	if (panel->stream_id == DP_STREAM_1)
-		mst_offset = MMSS_DP1_GENERIC0_0 - MMSS_DP_GENERIC0_0;
-
-	catalog = dp_catalog_get_priv(panel);
-	io_data = catalog->io.dp_link;
-
-	/* HEADER BYTE 1 */
-	header = panel->hdr_data.vsc_header_byte1;
-	parity = dp_header_get_parity(header);
-	data   = ((header << HEADER_BYTE_1_BIT)
-			| (parity << PARITY_BYTE_1_BIT));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_0 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	/* HEADER BYTE 2 */
-	header = panel->hdr_data.vsc_header_byte2;
-	parity = dp_header_get_parity(header);
-	data   = ((header << HEADER_BYTE_2_BIT)
-			| (parity << PARITY_BYTE_2_BIT));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_1 + mst_offset,
-			data);
-
-	/* HEADER BYTE 3 */
-	header = panel->hdr_data.vsc_header_byte3;
-	parity = dp_header_get_parity(header);
-	data   = ((header << HEADER_BYTE_3_BIT)
-			| (parity << PARITY_BYTE_3_BIT));
-	data |= dp_read(catalog->exe_mode, io_data,
-			MMSS_DP_GENERIC0_1 + mst_offset);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_1 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	data = 0;
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_2 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_3 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_4 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_5 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	switch (panel->hdr_data.bpc) {
-	default:
-	case 10:
-		bpc = BIT(1);
-		break;
-	case 8:
-		bpc = BIT(0);
-		break;
-	case 6:
-		bpc = 0;
-		break;
-	}
-
-	data = (panel->hdr_data.colorimetry & 0xF) |
-		((panel->hdr_data.pixel_encoding & 0xF) << 4) |
-		(bpc << 8) |
-		((panel->hdr_data.dynamic_range & 0x1) << 15) |
-		((panel->hdr_data.content_type & 0x7) << 16);
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_6 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	data = 0;
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_7 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_8 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC0_9 + mst_offset,
-			data);
-	memcpy(buf + off, &data, sizeof(data));
-	off += sizeof(data);
-
-	print_hex_dump(KERN_DEBUG, "[drm-dp] VSC: ",
-			DUMP_PREFIX_NONE, 16, 4, buf, off, false);
-}
-
-static void dp_catalog_panel_config_hdr(struct dp_catalog_panel *panel, bool en,
-		u32 dhdr_max_pkts)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 cfg, cfg2, cfg4, misc;
-	u32 sdp_cfg_off = 0;
-	u32 sdp_cfg2_off = 0;
-	u32 sdp_cfg3_off = 0;
-	u32 sdp_cfg4_off = 0;
-	u32 misc1_misc0_off = 0;
-
-	if (!panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(panel);
-	io_data = catalog->io.dp_link;
-
-	if (panel->stream_id == DP_STREAM_1) {
-		sdp_cfg_off = MMSS_DP1_SDP_CFG - MMSS_DP_SDP_CFG;
-		sdp_cfg2_off = MMSS_DP1_SDP_CFG2 - MMSS_DP_SDP_CFG2;
-		sdp_cfg3_off = MMSS_DP1_SDP_CFG3 - MMSS_DP_SDP_CFG3;
-		sdp_cfg4_off = MMSS_DP1_SDP_CFG4 - MMSS_DP_SDP_CFG4;
-		misc1_misc0_off = DP1_MISC1_MISC0 - DP_MISC1_MISC0;
-	}
-
-	cfg = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_SDP_CFG + sdp_cfg_off);
-	cfg2 = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_SDP_CFG2 + sdp_cfg2_off);
-	misc = dp_read(catalog->exe_mode, io_data,
-				DP_MISC1_MISC0 + misc1_misc0_off);
-
-	if (en) {
-		if (dhdr_max_pkts) {
-			/* VSCEXT_SDP_EN */
-			cfg |= BIT(16);
-			/* DHDR_EN, DHDR_PACKET_LIMIT */
-			cfg4 = (dhdr_max_pkts << 1) | BIT(0);
-			dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG4
-					+ sdp_cfg4_off, cfg4);
-			dp_catalog_panel_setup_vsif_infoframe_sdp(panel);
-		}
-
-		/* GEN0_SDP_EN, GEN2_SDP_EN */
-		cfg |= BIT(17) | BIT(19);
-		dp_write(catalog->exe_mode, io_data,
-				MMSS_DP_SDP_CFG + sdp_cfg_off, cfg);
-
-		/* GENERIC0_SDPSIZE GENERIC2_SDPSIZE */
-		cfg2 |= BIT(16) | BIT(20);
-		dp_write(catalog->exe_mode, io_data,
-				MMSS_DP_SDP_CFG2 + sdp_cfg2_off, cfg2);
-
-		dp_catalog_panel_setup_vsc_sdp(panel);
-		dp_catalog_panel_setup_hdr_infoframe_sdp(panel);
-
-		/* indicates presence of VSC (BIT(6) of MISC1) */
-		misc |= BIT(14);
-
-		if (panel->hdr_data.hdr_meta.eotf)
-			pr_debug("Enabled\n");
-		else
-			pr_debug("Reset\n");
-	} else {
-		/* VSCEXT_SDP_EN, GEN0_SDP_EN */
-		cfg &= ~BIT(16) & ~BIT(17) & ~BIT(19);
-		dp_write(catalog->exe_mode, io_data,
-				MMSS_DP_SDP_CFG + sdp_cfg_off, cfg);
-
-		/* GENERIC0_SDPSIZE GENERIC2_SDPSIZE */
-		cfg2 &= ~BIT(16) & ~BIT(20);
-		dp_write(catalog->exe_mode, io_data,
-				MMSS_DP_SDP_CFG2 + sdp_cfg2_off, cfg2);
-
-		/* DHDR_EN, DHDR_PACKET_LIMIT */
-		cfg4 = 0;
-		dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG4
-				+ sdp_cfg4_off, cfg4);
-
-		/* switch back to MSA */
-		misc &= ~BIT(14);
-
-		pr_debug("Disabled\n");
-	}
-
-	dp_write(catalog->exe_mode, io_data, DP_MISC1_MISC0 + misc1_misc0_off,
-			misc);
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG3 + sdp_cfg3_off,
-			0x01);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG3 + sdp_cfg3_off,
-			0x00);
-}
-
-static void dp_catalog_panel_update_transfer_unit(
-		struct dp_catalog_panel *panel)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!panel || panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(panel);
-	io_data = catalog->io.dp_link;
-
-	dp_write(catalog->exe_mode, io_data, DP_VALID_BOUNDARY,
-			panel->valid_boundary);
-	dp_write(catalog->exe_mode, io_data, DP_TU, panel->dp_tu);
-	dp_write(catalog->exe_mode, io_data, DP_VALID_BOUNDARY_2,
-			panel->valid_boundary2);
-}
-
-static void dp_catalog_ctrl_state_ctrl(struct dp_catalog_ctrl *ctrl, u32 state)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-	io_data = catalog->io.dp_link;
-
-	dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, state);
-	/* make sure to change the hw state */
-	wmb();
-}
-
-static void dp_catalog_ctrl_config_ctrl(struct dp_catalog_ctrl *ctrl, u8 ln_cnt)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 cfg;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-	io_data = catalog->io.dp_link;
-
-	cfg = dp_read(catalog->exe_mode, io_data, DP_CONFIGURATION_CTRL);
-	cfg &= ~(BIT(4) | BIT(5));
-	cfg |= (ln_cnt - 1) << 4;
-	dp_write(catalog->exe_mode, io_data, DP_CONFIGURATION_CTRL, cfg);
-
-	cfg = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_CTRL);
-	cfg |= 0x02000000;
-	dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, cfg);
-
-	pr_debug("DP_MAINLINK_CTRL=0x%x\n", cfg);
-}
-
-static void dp_catalog_panel_config_ctrl(struct dp_catalog_panel *panel,
-		u32 cfg)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 strm_reg_off = 0, mainlink_ctrl;
-
-	if (!panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(panel);
-	io_data = catalog->io.dp_link;
-
-	if (panel->stream_id == DP_STREAM_1)
-		strm_reg_off = DP1_CONFIGURATION_CTRL - DP_CONFIGURATION_CTRL;
-
-	pr_debug("DP_CONFIGURATION_CTRL=0x%x\n", cfg);
-
-	dp_write(catalog->exe_mode, io_data,
-			DP_CONFIGURATION_CTRL + strm_reg_off, cfg);
-
-	mainlink_ctrl = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_CTRL);
-
-	if (panel->stream_id == DP_STREAM_0)
-		io_data = catalog->io.dp_p0;
-	else if (panel->stream_id == DP_STREAM_1)
-		io_data = catalog->io.dp_p1;
-
-	if (mainlink_ctrl & BIT(8))
-		dp_write(catalog->exe_mode, io_data, MMSS_DP_ASYNC_FIFO_CONFIG,
-				0x01);
-	else
-		dp_write(catalog->exe_mode, io_data, MMSS_DP_ASYNC_FIFO_CONFIG,
-				0x00);
-}
-
-static void dp_catalog_panel_config_dto(struct dp_catalog_panel *panel,
-					bool ack)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 dsc_dto;
-
-	if (!panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(panel);
-	io_data = catalog->io.dp_link;
-
-	switch (panel->stream_id) {
-	case DP_STREAM_0:
-		io_data = catalog->io.dp_p0;
-		break;
-	case DP_STREAM_1:
-		io_data = catalog->io.dp_p1;
-		break;
-	default:
-		pr_err("invalid stream id\n");
-		return;
-	}
-
-	dsc_dto = dp_read(catalog->exe_mode, io_data, MMSS_DP_DSC_DTO);
-	if (ack)
-		dsc_dto = BIT(1);
-	else
-		dsc_dto &= ~BIT(1);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_DSC_DTO, dsc_dto);
-}
-
-static void dp_catalog_ctrl_lane_mapping(struct dp_catalog_ctrl *ctrl,
-						bool flipped, char *lane_map)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-	io_data = catalog->io.dp_link;
-
-	dp_write(catalog->exe_mode, io_data, DP_LOGICAL2PHYSICAL_LANE_MAPPING,
-			0xe4);
-}
-
-static void dp_catalog_ctrl_lane_pnswap(struct dp_catalog_ctrl *ctrl,
-						u8 ln_pnswap)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 cfg0, cfg1;
-
-	catalog = dp_catalog_get_priv(ctrl);
-
-	cfg0 = 0x0a;
-	cfg1 = 0x0a;
-
-	cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0;
-	cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2;
-	cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0;
-	cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2;
-
-	io_data = catalog->io.dp_ln_tx0;
-	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV, cfg0);
-
-	io_data = catalog->io.dp_ln_tx1;
-	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV, cfg1);
-}
-
-static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl,
-						bool enable)
-{
-	u32 mainlink_ctrl, reg;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-	io_data = catalog->io.dp_link;
-
-	if (enable) {
-		reg = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_CTRL);
-		mainlink_ctrl = reg & ~(0x03);
-		dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL,
-				mainlink_ctrl);
-		wmb(); /* make sure mainlink is turned off before reset */
-		mainlink_ctrl = reg | 0x02;
-		dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL,
-				mainlink_ctrl);
-		wmb(); /* make sure mainlink entered reset */
-		mainlink_ctrl = reg & ~(0x03);
-		dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL,
-				mainlink_ctrl);
-		wmb(); /* make sure mainlink reset done */
-		mainlink_ctrl = reg | 0x01;
-		dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL,
-				mainlink_ctrl);
-		wmb(); /* make sure mainlink turned on */
-	} else {
-		mainlink_ctrl = dp_read(catalog->exe_mode, io_data,
-						DP_MAINLINK_CTRL);
-		mainlink_ctrl &= ~BIT(0);
-		dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL,
-				mainlink_ctrl);
-	}
-}
-
-static void dp_catalog_panel_config_misc(struct dp_catalog_panel *panel)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 reg_offset = 0;
-
-	if (!panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(panel);
-	io_data = catalog->io.dp_link;
-
-	if (panel->stream_id == DP_STREAM_1)
-		reg_offset = DP1_MISC1_MISC0 - DP_MISC1_MISC0;
-
-	pr_debug("misc settings = 0x%x\n", panel->misc_val);
-	dp_write(catalog->exe_mode, io_data, DP_MISC1_MISC0 + reg_offset,
-			panel->misc_val);
-}
-
-static void dp_catalog_panel_config_msa(struct dp_catalog_panel *panel,
-					u32 rate, u32 stream_rate_khz)
-{
-	u32 pixel_m, pixel_n;
-	u32 mvid, nvid;
-	u32 const nvid_fixed = 0x8000;
-	u32 const link_rate_hbr2 = 540000;
-	u32 const link_rate_hbr3 = 810000;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 strm_reg_off = 0;
-	u32 mvid_reg_off = 0, nvid_reg_off = 0;
-
-	if (!panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(panel);
-	io_data = catalog->io.dp_mmss_cc;
-
-	if (panel->stream_id == DP_STREAM_1)
-		strm_reg_off = MMSS_DP_PIXEL1_M - MMSS_DP_PIXEL_M;
-
-	pixel_m = dp_read(catalog->exe_mode, io_data,
-			MMSS_DP_PIXEL_M + strm_reg_off);
-	pixel_n = dp_read(catalog->exe_mode, io_data,
-			MMSS_DP_PIXEL_N + strm_reg_off);
-	pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
-
-	mvid = (pixel_m & 0xFFFF) * 5;
-	nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
-
-	if (nvid < nvid_fixed) {
-		u32 temp;
-
-		temp = (nvid_fixed / nvid) * nvid;
-		mvid = (nvid_fixed / nvid) * mvid;
-		nvid = temp;
-	}
-
-	pr_debug("rate = %d\n", rate);
-
-	if (panel->widebus_en)
-		mvid <<= 1;
-
-	if (link_rate_hbr2 == rate)
-		nvid *= 2;
-
-	if (link_rate_hbr3 == rate)
-		nvid *= 3;
-
-	io_data = catalog->io.dp_link;
-
-	if (panel->stream_id == DP_STREAM_1) {
-		mvid_reg_off = DP1_SOFTWARE_MVID - DP_SOFTWARE_MVID;
-		nvid_reg_off = DP1_SOFTWARE_NVID - DP_SOFTWARE_NVID;
-	}
-
-	pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
-	dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_MVID + mvid_reg_off,
-			mvid);
-	dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_NVID + nvid_reg_off,
-			nvid);
-}
-
-static void dp_catalog_ctrl_set_pattern(struct dp_catalog_ctrl *ctrl,
-					u32 pattern)
-{
-	int bit, cnt = 10;
-	u32 data;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-	io_data = catalog->io.dp_link;
-
-	bit = 1;
-	bit <<= (pattern - 1);
-	pr_debug("hw: bit=%d train=%d\n", bit, pattern);
-	dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, bit);
-
-	bit = 8;
-	bit <<= (pattern - 1);
-
-	while (cnt--) {
-		data = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_READY);
-		if (data & bit)
-			break;
-	}
-
-	if (cnt == 0)
-		pr_err("set link_train=%d failed\n", pattern);
-}
-
-static void dp_catalog_ctrl_usb_reset(struct dp_catalog_ctrl *ctrl, bool flip)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-
-	io_data = catalog->io.usb3_dp_com;
-
-	dp_write(catalog->exe_mode, io_data, USB3_DP_COM_RESET_OVRD_CTRL, 0x0a);
-	dp_write(catalog->exe_mode, io_data, USB3_DP_COM_PHY_MODE_CTRL, 0x02);
-	dp_write(catalog->exe_mode, io_data, USB3_DP_COM_SW_RESET, 0x01);
-	/* make sure usb3 com phy software reset is done */
-	wmb();
-
-	if (!flip) { /* CC1 */
-		dp_write(catalog->exe_mode, io_data, USB3_DP_COM_TYPEC_CTRL,
-				0x02);
-	} else { /* CC2 */
-		dp_write(catalog->exe_mode, io_data, USB3_DP_COM_TYPEC_CTRL,
-				0x03);
-	}
-
-	dp_write(catalog->exe_mode, io_data, USB3_DP_COM_SWI_CTRL, 0x00);
-	dp_write(catalog->exe_mode, io_data, USB3_DP_COM_SW_RESET, 0x00);
-	/* make sure the software reset is done */
-	wmb();
-
-	dp_write(catalog->exe_mode, io_data, USB3_DP_COM_POWER_DOWN_CTRL, 0x01);
-	dp_write(catalog->exe_mode, io_data, USB3_DP_COM_RESET_OVRD_CTRL, 0x00);
-	/* make sure phy is brought out of reset */
-	wmb();
-}
-
-static void dp_catalog_panel_tpg_cfg(struct dp_catalog_panel *panel,
-	bool enable)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(panel);
-
-	if (panel->stream_id == DP_STREAM_0)
-		io_data = catalog->io.dp_p0;
-	else if (panel->stream_id == DP_STREAM_1)
-		io_data = catalog->io.dp_p1;
-
-	if (!enable) {
-		dp_write(catalog->exe_mode, io_data, MMSS_DP_TPG_MAIN_CONTROL,
-				0x0);
-		dp_write(catalog->exe_mode, io_data, MMSS_DP_BIST_ENABLE, 0x0);
-		dp_write(catalog->exe_mode, io_data, MMSS_DP_TIMING_ENGINE_EN,
-				0x0);
-		wmb(); /* ensure Timing generator is turned off */
-		return;
-	}
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_CONFIG, 0x0);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_HSYNC_CTL,
-			panel->hsync_ctl);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_VSYNC_PERIOD_F0,
-			panel->vsync_period * panel->hsync_period);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0,
-			panel->v_sync_width * panel->hsync_period);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1,
-			0);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_DISPLAY_HCTL,
-			panel->display_hctl);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_ACTIVE_HCTL, 0);
-	dp_write(catalog->exe_mode, io_data, MMSS_INTF_DISPLAY_V_START_F0,
-			panel->display_v_start);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_DISPLAY_V_END_F0,
-			panel->display_v_end);
-	dp_write(catalog->exe_mode, io_data, MMSS_INTF_DISPLAY_V_START_F1, 0);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_POLARITY_CTL, 0);
-	wmb(); /* ensure TPG registers are programmed */
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_TPG_MAIN_CONTROL, 0x100);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_TPG_VIDEO_CONFIG, 0x5);
-	wmb(); /* ensure TPG config is programmed */
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_BIST_ENABLE, 0x1);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_TIMING_ENGINE_EN, 0x1);
-	wmb(); /* ensure Timing generator is turned on */
-}
-
-static void dp_catalog_panel_dsc_cfg(struct dp_catalog_panel *panel)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 reg, offset;
-	int i;
-
-	if (!panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(panel);
-
-	if (panel->stream_id == DP_STREAM_0)
-		io_data = catalog->io.dp_p0;
-	else
-		io_data = catalog->io.dp_p1;
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_DSC_DTO_COUNT,
-			panel->dsc.dto_count);
-
-	reg = dp_read(catalog->exe_mode, io_data, MMSS_DP_DSC_DTO);
-	if (panel->dsc.dto_en) {
-		reg |= BIT(0);
-		reg |= (panel->dsc.dto_n << 8);
-		reg |= (panel->dsc.dto_d << 16);
-	}
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_DSC_DTO, reg);
-
-	io_data = catalog->io.dp_link;
-
-	if (panel->stream_id == DP_STREAM_0)
-		offset = 0;
-	else
-		offset = DP1_COMPRESSION_MODE_CTRL - DP_COMPRESSION_MODE_CTRL;
-
-	dp_write(catalog->exe_mode, io_data, DP_PPS_HB_0_3 + offset, 0x7F1000);
-	dp_write(catalog->exe_mode, io_data, DP_PPS_PB_0_3 + offset, 0xA22300);
-
-	for (i = 0; i < panel->dsc.parity_word_len; i++)
-		dp_write(catalog->exe_mode, io_data,
-				DP_PPS_PB_4_7 + (i << 2) + offset,
-				panel->dsc.parity_word[i]);
-
-	for (i = 0; i < panel->dsc.pps_word_len; i++)
-		dp_write(catalog->exe_mode, io_data,
-				DP_PPS_PPS_0_3 + (i << 2) + offset,
-				panel->dsc.pps_word[i]);
-
-	reg = 0;
-	if (panel->dsc.dsc_en) {
-		reg = BIT(0);
-		reg |= (panel->dsc.eol_byte_num << 3);
-		reg |= (panel->dsc.slice_per_pkt << 5);
-		reg |= (panel->dsc.bytes_per_pkt << 16);
-		reg |= (panel->dsc.be_in_lane << 10);
-	}
-	dp_write(catalog->exe_mode, io_data,
-			DP_COMPRESSION_MODE_CTRL + offset, reg);
-
-	pr_debug("compression:0x%x for stream:%d\n",
-			reg, panel->stream_id);
-}
-
-static void dp_catalog_panel_dp_flush(struct dp_catalog_panel *panel,
-		enum dp_flush_bit flush_bit)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 dp_flush, offset;
-
-	if (!panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(panel);
-	io_data = catalog->io.dp_link;
-
-	if (panel->stream_id == DP_STREAM_0)
-		offset = 0;
-	else
-		offset = MMSS_DP1_FLUSH - MMSS_DP_FLUSH;
-
-	dp_flush = dp_read(catalog->exe_mode, io_data, MMSS_DP_FLUSH + offset);
-	dp_flush |= BIT(flush_bit);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_FLUSH + offset, dp_flush);
-}
-
-static void dp_catalog_panel_pps_flush(struct dp_catalog_panel *panel)
-{
-	dp_catalog_panel_dp_flush(panel, DP_PPS_FLUSH);
-	pr_debug("pps flush for stream:%d\n", panel->stream_id);
-}
-
-static void dp_catalog_panel_dhdr_flush(struct dp_catalog_panel *panel)
-{
-	dp_catalog_panel_dp_flush(panel, DP_DHDR_FLUSH);
-	pr_debug("dhdr flush for stream:%d\n", panel->stream_id);
-}
-
-
-static bool dp_catalog_panel_dhdr_busy(struct dp_catalog_panel *panel)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 dp_flush, offset;
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		return false;
-	}
-
-	catalog = dp_catalog_get_priv(panel);
-	io_data = catalog->io.dp_link;
-
-	if (panel->stream_id == DP_STREAM_0)
-		offset = 0;
-	else
-		offset = MMSS_DP1_FLUSH - MMSS_DP_FLUSH;
-
-	dp_flush = dp_read(catalog->exe_mode, io_data, MMSS_DP_FLUSH + offset);
-
-	return dp_flush & BIT(DP_DHDR_FLUSH) ? true : false;
-}
-
-static void dp_catalog_ctrl_reset(struct dp_catalog_ctrl *ctrl)
-{
-	u32 sw_reset;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-	io_data = catalog->io.dp_ahb;
-
-	sw_reset = dp_read(catalog->exe_mode, io_data, DP_SW_RESET);
-
-	sw_reset |= BIT(0);
-	dp_write(catalog->exe_mode, io_data, DP_SW_RESET, sw_reset);
-	usleep_range(1000, 1010); /* h/w recommended delay */
-
-	sw_reset &= ~BIT(0);
-	dp_write(catalog->exe_mode, io_data, DP_SW_RESET, sw_reset);
-}
-
-static bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog_ctrl *ctrl)
-{
-	u32 data;
-	int cnt = 10;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		goto end;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-	io_data = catalog->io.dp_link;
-
-	while (--cnt) {
-		/* DP_MAINLINK_READY */
-		data = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_READY);
-		if (data & BIT(0))
-			return true;
-
-		usleep_range(1000, 1010); /* 1ms wait before next reg read */
-	}
-	pr_err("mainlink not ready\n");
-end:
-	return false;
-}
-
-static void dp_catalog_ctrl_enable_irq(struct dp_catalog_ctrl *ctrl,
-						bool enable)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-	io_data = catalog->io.dp_ahb;
-
-	if (enable) {
-		dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS,
-				DP_INTR_MASK1);
-		dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS2,
-				DP_INTR_MASK2);
-		dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS5,
-				DP_INTR_MASK5);
-	} else {
-		dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS, 0x00);
-		dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS2, 0x00);
-		dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS5, 0x00);
-	}
-}
-
-static void dp_catalog_ctrl_get_interrupt(struct dp_catalog_ctrl *ctrl)
-{
-	u32 ack = 0;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-	io_data = catalog->io.dp_ahb;
-
-	ctrl->isr = dp_read(catalog->exe_mode, io_data, DP_INTR_STATUS2);
-	ctrl->isr &= ~DP_INTR_MASK2;
-	ack = ctrl->isr & DP_INTERRUPT_STATUS2;
-	ack <<= 1;
-	ack |= DP_INTR_MASK2;
-	dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS2, ack);
-
-	ctrl->isr5 = dp_read(catalog->exe_mode, io_data, DP_INTR_STATUS5);
-	ctrl->isr5 &= ~DP_INTR_MASK5;
-	ack = ctrl->isr5 & DP_INTERRUPT_STATUS5;
-	ack <<= 1;
-	ack |= DP_INTR_MASK5;
-	dp_write(catalog->exe_mode, io_data, DP_INTR_STATUS5, ack);
-}
-
-static void dp_catalog_ctrl_phy_reset(struct dp_catalog_ctrl *ctrl)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-	io_data = catalog->io.dp_ahb;
-
-	dp_write(catalog->exe_mode, io_data, DP_PHY_CTRL, 0x5); /* bit 0 & 2 */
-	usleep_range(1000, 1010); /* h/w recommended delay */
-	dp_write(catalog->exe_mode, io_data, DP_PHY_CTRL, 0x0);
-	wmb(); /* make sure PHY reset done */
-}
-
-static void dp_catalog_ctrl_phy_lane_cfg(struct dp_catalog_ctrl *ctrl,
-		bool flipped, u8 ln_cnt)
-{
-	u32 info = 0x0;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u8 orientation = BIT(!!flipped);
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-
-	io_data = catalog->io.dp_phy;
-
-	info |= (ln_cnt & 0x0F);
-	info |= ((orientation & 0x0F) << 4);
-	pr_debug("Shared Info = 0x%x\n", info);
-
-	dp_write(catalog->exe_mode, io_data, DP_PHY_SPARE0, info);
-}
-
-static void dp_catalog_ctrl_update_vx_px(struct dp_catalog_ctrl *ctrl,
-		u8 v_level, u8 p_level)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u8 value0, value1;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-
-	pr_debug("hw: v=%d p=%d\n", v_level, p_level);
-
-	value0 = vm_voltage_swing[v_level][p_level];
-	value1 = vm_pre_emphasis[v_level][p_level];
-
-	/* program default setting first */
-
-	io_data = catalog->io.dp_ln_tx0;
-	dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL, 0x2A);
-	dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, 0x20);
-
-	io_data = catalog->io.dp_ln_tx1;
-	dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL, 0x2A);
-	dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, 0x20);
-
-	/* Enable MUX to use Cursor values from these registers */
-	value0 |= BIT(5);
-	value1 |= BIT(5);
-
-	/* Configure host and panel only if both values are allowed */
-	if (value0 != 0xFF && value1 != 0xFF) {
-		io_data = catalog->io.dp_ln_tx0;
-		dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL, value0);
-		dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL,
-				value1);
-
-		io_data = catalog->io.dp_ln_tx1;
-		dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL, value0);
-		dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL,
-				value1);
-
-		pr_debug("hw: vx_value=0x%x px_value=0x%x\n",
-			value0, value1);
-	} else {
-		pr_err("invalid vx (0x%x=0x%x), px (0x%x=0x%x\n",
-			v_level, value0, p_level, value1);
-	}
-}
-
-static void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog_ctrl *ctrl,
-			u32 pattern)
-{
-	struct dp_catalog_private *catalog;
-	u32 value = 0x0;
-	struct dp_io_data *io_data = NULL;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-
-	io_data = catalog->io.dp_link;
-
-	dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x0);
-
-	switch (pattern) {
-	case DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING:
-		dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x1);
-		break;
-	case DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT:
-		value &= ~(1 << 16);
-		dp_write(catalog->exe_mode, io_data,
-				DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value);
-		value |= 0xFC;
-		dp_write(catalog->exe_mode, io_data,
-				DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value);
-		dp_write(catalog->exe_mode, io_data, DP_MAINLINK_LEVELS, 0x2);
-		dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x10);
-		break;
-	case DP_TEST_PHY_PATTERN_PRBS7:
-		dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x20);
-		break;
-	case DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN:
-		dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x40);
-		/* 00111110000011111000001111100000 */
-		dp_write(catalog->exe_mode, io_data,
-				DP_TEST_80BIT_CUSTOM_PATTERN_REG0, 0x3E0F83E0);
-		/* 00001111100000111110000011111000 */
-		dp_write(catalog->exe_mode, io_data,
-				DP_TEST_80BIT_CUSTOM_PATTERN_REG1, 0x0F83E0F8);
-		/* 1111100000111110 */
-		dp_write(catalog->exe_mode, io_data,
-				DP_TEST_80BIT_CUSTOM_PATTERN_REG2, 0x0000F83E);
-		break;
-	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1:
-		value = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_CTRL);
-		value &= ~BIT(4);
-		dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, value);
-
-		value = BIT(16);
-		dp_write(catalog->exe_mode, io_data,
-				DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value);
-		value |= 0xFC;
-		dp_write(catalog->exe_mode, io_data,
-				DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value);
-		dp_write(catalog->exe_mode, io_data, DP_MAINLINK_LEVELS, 0x2);
-		dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x10);
-
-		value = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_CTRL);
-		value |= BIT(0);
-		dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, value);
-		break;
-	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3:
-		dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, 0x11);
-		dp_write(catalog->exe_mode, io_data, DP_STATE_CTRL, 0x8);
-		break;
-	default:
-		pr_debug("No valid test pattern requested: 0x%x\n", pattern);
-		return;
-	}
-
-	/* Make sure the test pattern is programmed in the hardware */
-	wmb();
-}
-
-static u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog_ctrl *ctrl)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data = NULL;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return 0;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-
-	io_data = catalog->io.dp_link;
-
-	return dp_read(catalog->exe_mode, io_data, DP_MAINLINK_READY);
-}
-
-static void dp_catalog_ctrl_fec_config(struct dp_catalog_ctrl *ctrl,
-		bool enable)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data = NULL;
-	u32 reg;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-	io_data = catalog->io.dp_link;
-
-	reg = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_CTRL);
-
-	/*
-	 * fec_en = BIT(12)
-	 * fec_seq_mode = BIT(22)
-	 * sde_flush = BIT(23) | BIT(24)
-	 * fb_boundary_sel = BIT(25)
-	 */
-	if (enable)
-		reg |= BIT(12) | BIT(22) | BIT(23) | BIT(24) | BIT(25);
-	else
-		reg &= ~BIT(12);
-
-	dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, reg);
-	/* make sure mainlink configuration is updated with fec sequence */
-	wmb();
-}
-
-static int dp_catalog_reg_dump(struct dp_catalog *dp_catalog,
-		char *name, u8 **out_buf, u32 *out_buf_len)
-{
-	int ret = 0;
-	u8 *buf;
-	u32 len;
-	struct dp_io_data *io_data;
-	struct dp_catalog_private *catalog;
-	struct dp_parser *parser;
-
-	if (!dp_catalog) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	catalog = container_of(dp_catalog, struct dp_catalog_private,
-		dp_catalog);
-
-	parser = catalog->parser;
-	parser->get_io_buf(parser, name);
-	io_data = parser->get_io(parser, name);
-	if (!io_data) {
-		pr_err("IO %s not found\n", name);
-		ret = -EINVAL;
-		goto end;
-	}
-
-	buf = io_data->buf;
-	len = io_data->io.len;
-
-	if (!buf || !len) {
-		pr_err("no buffer available\n");
-		ret = -ENOMEM;
-		goto end;
-	}
-
-	if (!strcmp(catalog->exe_mode, "hw") ||
-	    !strcmp(catalog->exe_mode, "all")) {
-		u32 i, data;
-		u32 const rowsize = 4;
-		void __iomem *addr = io_data->io.base;
-
-		memset(buf, 0, len);
-
-		for (i = 0; i < len / rowsize; i++) {
-			data = readl_relaxed(addr);
-			memcpy(buf + (rowsize * i), &data, sizeof(u32));
-
-			addr += rowsize;
-		}
-	}
-
-	*out_buf = buf;
-	*out_buf_len = len;
-end:
-	if (ret)
-		parser->clear_io_buf(parser);
-
-	return ret;
-}
-
-static void dp_catalog_ctrl_mst_config(struct dp_catalog_ctrl *ctrl,
-		bool enable)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data = NULL;
-	u32 reg;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-
-	io_data = catalog->io.dp_link;
-
-	reg = dp_read(catalog->exe_mode, io_data, DP_MAINLINK_CTRL);
-	if (enable)
-		reg |= (0x04000100);
-	else
-		reg &= ~(0x04000100);
-
-	dp_write(catalog->exe_mode, io_data, DP_MAINLINK_CTRL, reg);
-	/* make sure mainlink MST configuration is updated */
-	wmb();
-}
-
-static void dp_catalog_ctrl_trigger_act(struct dp_catalog_ctrl *ctrl)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data = NULL;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-
-	io_data = catalog->io.dp_link;
-
-	dp_write(catalog->exe_mode, io_data, DP_MST_ACT, 0x1);
-	/* make sure ACT signal is performed */
-	wmb();
-}
-
-static void dp_catalog_ctrl_read_act_complete_sts(struct dp_catalog_ctrl *ctrl,
-		bool *sts)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data = NULL;
-	u32 reg;
-
-	if (!ctrl || !sts) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	*sts = false;
-
-	catalog = dp_catalog_get_priv(ctrl);
-
-	io_data = catalog->io.dp_link;
-
-	reg = dp_read(catalog->exe_mode, io_data, DP_MST_ACT);
-
-	if (!reg)
-		*sts = true;
-}
-
-static void dp_catalog_ctrl_channel_alloc(struct dp_catalog_ctrl *ctrl,
-			u32 ch, u32 ch_start_slot, u32 tot_slot_cnt)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data = NULL;
-	u32 i, slot_reg_1, slot_reg_2, slot;
-	u32 reg_off = 0;
-	int const num_slots_per_reg = 32;
-
-	if (!ctrl || ch >= DP_STREAM_MAX) {
-		pr_err("invalid input. ch %d\n", ch);
-		return;
-	}
-
-	if (ch_start_slot > DP_MAX_TIME_SLOTS ||
-			(ch_start_slot + tot_slot_cnt > DP_MAX_TIME_SLOTS)) {
-		pr_err("invalid slots start %d, tot %d\n",
-			ch_start_slot, tot_slot_cnt);
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-
-	io_data = catalog->io.dp_link;
-
-	pr_debug("ch %d, start_slot %d, tot_slot %d\n",
-			ch, ch_start_slot, tot_slot_cnt);
-
-	if (ch == DP_STREAM_1)
-		reg_off = DP_DP1_TIMESLOT_1_32 - DP_DP0_TIMESLOT_1_32;
-
-	slot_reg_1 = 0;
-	slot_reg_2 = 0;
-
-	if (ch_start_slot && tot_slot_cnt) {
-		ch_start_slot--;
-		for (i = 0; i < tot_slot_cnt; i++) {
-			if (ch_start_slot < num_slots_per_reg) {
-				slot_reg_1 |= BIT(ch_start_slot);
-			} else {
-				slot = ch_start_slot - num_slots_per_reg;
-				slot_reg_2 |= BIT(slot);
-			}
-			ch_start_slot++;
-		}
-	}
-
-	pr_debug("ch:%d slot_reg_1:%d, slot_reg_2:%d\n", ch,
-			slot_reg_1, slot_reg_2);
-
-	dp_write(catalog->exe_mode, io_data, DP_DP0_TIMESLOT_1_32 + reg_off,
-			slot_reg_1);
-	dp_write(catalog->exe_mode, io_data, DP_DP0_TIMESLOT_33_63 + reg_off,
-			slot_reg_2);
-}
-
-static void dp_catalog_ctrl_channel_dealloc(struct dp_catalog_ctrl *ctrl,
-			u32 ch, u32 ch_start_slot, u32 tot_slot_cnt)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data = NULL;
-	u32 i, slot_reg_1, slot_reg_2, slot;
-	u32 reg_off = 0;
-
-	if (!ctrl || ch >= DP_STREAM_MAX) {
-		pr_err("invalid input. ch %d\n", ch);
-		return;
-	}
-
-	if (ch_start_slot > DP_MAX_TIME_SLOTS ||
-			(ch_start_slot + tot_slot_cnt > DP_MAX_TIME_SLOTS)) {
-		pr_err("invalid slots start %d, tot %d\n",
-			ch_start_slot, tot_slot_cnt);
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-
-	io_data = catalog->io.dp_link;
-
-	pr_debug("dealloc ch %d, start_slot %d, tot_slot %d\n",
-			ch, ch_start_slot, tot_slot_cnt);
-
-	if (ch == DP_STREAM_1)
-		reg_off = DP_DP1_TIMESLOT_1_32 - DP_DP0_TIMESLOT_1_32;
-
-	slot_reg_1 = dp_read(catalog->exe_mode, io_data,
-				DP_DP0_TIMESLOT_1_32 + reg_off);
-	slot_reg_2 = dp_read(catalog->exe_mode, io_data,
-				DP_DP0_TIMESLOT_33_63 + reg_off);
-
-	ch_start_slot = ch_start_slot - 1;
-	for (i = 0; i < tot_slot_cnt; i++) {
-		if (ch_start_slot < 33) {
-			slot_reg_1 &= ~BIT(ch_start_slot);
-		} else {
-			slot = ch_start_slot - 33;
-			slot_reg_2 &= ~BIT(slot);
-		}
-		ch_start_slot++;
-	}
-
-	pr_debug("dealloc ch:%d slot_reg_1:%d, slot_reg_2:%d\n", ch,
-			slot_reg_1, slot_reg_2);
-
-	dp_write(catalog->exe_mode, io_data, DP_DP0_TIMESLOT_1_32 + reg_off,
-			slot_reg_1);
-	dp_write(catalog->exe_mode, io_data, DP_DP0_TIMESLOT_33_63 + reg_off,
-			slot_reg_2);
-}
-
-static void dp_catalog_ctrl_update_rg(struct dp_catalog_ctrl *ctrl, u32 ch,
-		u32 x_int, u32 y_frac_enum)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data = NULL;
-	u32 rg, reg_off = 0;
-
-	if (!ctrl || ch >= DP_STREAM_MAX) {
-		pr_err("invalid input. ch %d\n", ch);
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(ctrl);
-
-	io_data = catalog->io.dp_link;
-
-	rg = y_frac_enum;
-	rg |= (x_int << 16);
-
-	pr_debug("ch: %d x_int:%d y_frac_enum:%d rg:%d\n", ch, x_int,
-			y_frac_enum, rg);
-
-	if (ch == DP_STREAM_1)
-		reg_off = DP_DP1_RG - DP_DP0_RG;
-
-	dp_write(catalog->exe_mode, io_data, DP_DP0_RG + reg_off, rg);
-}
-
-static void dp_catalog_ctrl_mainlink_levels(struct dp_catalog_ctrl *ctrl,
-		u8 lane_cnt)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 mainlink_levels, safe_to_exit_level = 14;
-
-	catalog = dp_catalog_get_priv(ctrl);
-
-	io_data   = catalog->io.dp_link;
-
-	switch (lane_cnt) {
-	case 1:
-		safe_to_exit_level = 14;
-		break;
-	case 2:
-		safe_to_exit_level = 8;
-		break;
-	case 4:
-		safe_to_exit_level = 5;
-		break;
-	default:
-		pr_debug("setting the default safe_to_exit_level = %u\n",
-				safe_to_exit_level);
-		break;
-	}
-
-	mainlink_levels = dp_read(catalog->exe_mode, io_data,
-					DP_MAINLINK_LEVELS);
-	mainlink_levels &= 0xFE0;
-	mainlink_levels |= safe_to_exit_level;
-
-	pr_debug("mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
-			mainlink_levels, safe_to_exit_level);
-
-	dp_write(catalog->exe_mode, io_data, DP_MAINLINK_LEVELS,
-			mainlink_levels);
-}
-
-
-/* panel related catalog functions */
-static int dp_catalog_panel_timing_cfg(struct dp_catalog_panel *panel)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 offset = 0, reg;
-
-	if (!panel) {
-		pr_err("invalid input\n");
-		goto end;
-	}
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		goto end;
-	}
-
-	catalog = dp_catalog_get_priv(panel);
-	io_data = catalog->io.dp_link;
-
-	if (panel->stream_id == DP_STREAM_1)
-		offset = DP1_TOTAL_HOR_VER - DP_TOTAL_HOR_VER;
-
-	dp_write(catalog->exe_mode, io_data, DP_TOTAL_HOR_VER + offset,
-			panel->total);
-	dp_write(catalog->exe_mode, io_data,
-			DP_START_HOR_VER_FROM_SYNC + offset, panel->sync_start);
-	dp_write(catalog->exe_mode, io_data,
-		DP_HSYNC_VSYNC_WIDTH_POLARITY + offset, panel->width_blanking);
-	dp_write(catalog->exe_mode, io_data, DP_ACTIVE_HOR_VER + offset,
-			panel->dp_active);
-
-	if (panel->stream_id == DP_STREAM_0)
-		io_data = catalog->io.dp_p0;
-	else
-		io_data = catalog->io.dp_p1;
-
-	reg = dp_read(catalog->exe_mode, io_data, MMSS_DP_INTF_CONFIG);
-
-	if (panel->widebus_en)
-		reg |= BIT(4);
-	else
-		reg &= ~BIT(4);
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_INTF_CONFIG, reg);
-end:
-	return 0;
-}
-
-static void dp_catalog_hpd_config_hpd(struct dp_catalog_hpd *hpd, bool en)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!hpd) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv(hpd);
-	io_data = catalog->io.dp_aux;
-
-	if (en) {
-		u32 reftimer = dp_read(catalog->exe_mode, io_data,
-						DP_DP_HPD_REFTIMER);
-
-		/* Arm only the UNPLUG and HPD_IRQ interrupts */
-		dp_write(catalog->exe_mode, io_data, DP_DP_HPD_INT_ACK, 0xF);
-		dp_write(catalog->exe_mode, io_data, DP_DP_HPD_INT_MASK, 0xA);
-
-		/* Enable REFTIMER to count 1ms */
-		reftimer |= BIT(16);
-		dp_write(catalog->exe_mode, io_data, DP_DP_HPD_REFTIMER,
-				reftimer);
-
-		 /* Connect_time is 250us & disconnect_time is 2ms */
-		dp_write(catalog->exe_mode, io_data, DP_DP_HPD_EVENT_TIME_0,
-				0x3E800FA);
-		dp_write(catalog->exe_mode, io_data, DP_DP_HPD_EVENT_TIME_1,
-				0x1F407D0);
-
-		/* Enable HPD */
-		dp_write(catalog->exe_mode, io_data, DP_DP_HPD_CTRL, 0x1);
-
-	} else {
-		/* Disable HPD */
-		dp_write(catalog->exe_mode, io_data, DP_DP_HPD_CTRL, 0x0);
-	}
-}
-
-static u32 dp_catalog_hpd_get_interrupt(struct dp_catalog_hpd *hpd)
-{
-	u32 isr = 0;
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-
-	if (!hpd) {
-		pr_err("invalid input\n");
-		return isr;
-	}
-
-	catalog = dp_catalog_get_priv(hpd);
-
-	io_data = catalog->io.dp_aux;
-	isr = dp_read(catalog->exe_mode, io_data, DP_DP_HPD_INT_STATUS);
-	dp_write(catalog->exe_mode, io_data, DP_DP_HPD_INT_ACK, (isr & 0xf));
-
-	return isr;
-}
-
-static void dp_catalog_audio_init(struct dp_catalog_audio *audio)
-{
-	struct dp_catalog_private *catalog;
-	static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = {
-		{
-			MMSS_DP_AUDIO_STREAM_0,
-			MMSS_DP_AUDIO_STREAM_1,
-			MMSS_DP_AUDIO_STREAM_1,
-		},
-		{
-			MMSS_DP_AUDIO_TIMESTAMP_0,
-			MMSS_DP_AUDIO_TIMESTAMP_1,
-			MMSS_DP_AUDIO_TIMESTAMP_1,
-		},
-		{
-			MMSS_DP_AUDIO_INFOFRAME_0,
-			MMSS_DP_AUDIO_INFOFRAME_1,
-			MMSS_DP_AUDIO_INFOFRAME_1,
-		},
-		{
-			MMSS_DP_AUDIO_COPYMANAGEMENT_0,
-			MMSS_DP_AUDIO_COPYMANAGEMENT_1,
-			MMSS_DP_AUDIO_COPYMANAGEMENT_1,
-		},
-		{
-			MMSS_DP_AUDIO_ISRC_0,
-			MMSS_DP_AUDIO_ISRC_1,
-			MMSS_DP_AUDIO_ISRC_1,
-		},
-	};
-
-	if (!audio)
-		return;
-
-	catalog = dp_catalog_get_priv(audio);
-
-	catalog->audio_map = sdp_map;
-}
-
-static void dp_catalog_audio_config_sdp(struct dp_catalog_audio *audio)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 sdp_cfg = 0, sdp_cfg_off = 0;
-	u32 sdp_cfg2 = 0, sdp_cfg2_off = 0;
-
-	if (!audio)
-		return;
-
-	if (audio->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream id:%d\n", audio->stream_id);
-		return;
-	}
-
-	if (audio->stream_id == DP_STREAM_1) {
-		sdp_cfg_off = MMSS_DP1_SDP_CFG - MMSS_DP_SDP_CFG;
-		sdp_cfg2_off = MMSS_DP1_SDP_CFG2 - MMSS_DP_SDP_CFG2;
-	}
-
-	catalog = dp_catalog_get_priv(audio);
-	io_data = catalog->io.dp_link;
-
-	sdp_cfg = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_SDP_CFG + sdp_cfg_off);
-
-	/* AUDIO_TIMESTAMP_SDP_EN */
-	sdp_cfg |= BIT(1);
-	/* AUDIO_STREAM_SDP_EN */
-	sdp_cfg |= BIT(2);
-	/* AUDIO_COPY_MANAGEMENT_SDP_EN */
-	sdp_cfg |= BIT(5);
-	/* AUDIO_ISRC_SDP_EN  */
-	sdp_cfg |= BIT(6);
-	/* AUDIO_INFOFRAME_SDP_EN  */
-	sdp_cfg |= BIT(20);
-
-	pr_debug("sdp_cfg = 0x%x\n", sdp_cfg);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG + sdp_cfg_off,
-			sdp_cfg);
-
-	sdp_cfg2 = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_SDP_CFG2 + sdp_cfg_off);
-	/* IFRM_REGSRC -> Do not use reg values */
-	sdp_cfg2 &= ~BIT(0);
-	/* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
-	sdp_cfg2 &= ~BIT(1);
-
-	pr_debug("sdp_cfg2 = 0x%x\n", sdp_cfg2);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG2 + sdp_cfg_off,
-			sdp_cfg2);
-}
-
-static void dp_catalog_audio_get_header(struct dp_catalog_audio *audio)
-{
-	struct dp_catalog_private *catalog;
-	u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
-	struct dp_io_data *io_data;
-	enum dp_catalog_audio_sdp_type sdp;
-	enum dp_catalog_audio_header_type header;
-
-	if (!audio)
-		return;
-
-	catalog = dp_catalog_get_priv(audio);
-
-	io_data    = catalog->io.dp_link;
-	sdp_map = catalog->audio_map;
-	sdp     = audio->sdp_type;
-	header  = audio->sdp_header;
-
-	audio->data = dp_read(catalog->exe_mode, io_data, sdp_map[sdp][header]);
-}
-
-static void dp_catalog_audio_set_header(struct dp_catalog_audio *audio)
-{
-	struct dp_catalog_private *catalog;
-	u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
-	struct dp_io_data *io_data;
-	enum dp_catalog_audio_sdp_type sdp;
-	enum dp_catalog_audio_header_type header;
-	u32 data;
-
-	if (!audio)
-		return;
-
-	catalog = dp_catalog_get_priv(audio);
-
-	io_data    = catalog->io.dp_link;
-	sdp_map = catalog->audio_map;
-	sdp     = audio->sdp_type;
-	header  = audio->sdp_header;
-	data    = audio->data;
-
-	dp_write(catalog->exe_mode, io_data, sdp_map[sdp][header], data);
-}
-
-static void dp_catalog_audio_config_acr(struct dp_catalog_audio *audio)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 acr_ctrl, select;
-
-	catalog = dp_catalog_get_priv(audio);
-
-	select = audio->data;
-	io_data   = catalog->io.dp_link;
-
-	acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
-
-	pr_debug("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl);
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
-}
-
-static void dp_catalog_audio_enable(struct dp_catalog_audio *audio)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	bool enable;
-	u32 audio_ctrl;
-
-	catalog = dp_catalog_get_priv(audio);
-
-	io_data = catalog->io.dp_link;
-	enable = !!audio->data;
-
-	audio_ctrl = dp_read(catalog->exe_mode, io_data, MMSS_DP_AUDIO_CFG);
-
-	if (enable)
-		audio_ctrl |= BIT(0);
-	else
-		audio_ctrl &= ~BIT(0);
-
-	pr_debug("dp_audio_cfg = 0x%x\n", audio_ctrl);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_AUDIO_CFG, audio_ctrl);
-
-	/* make sure audio engine is disabled */
-	wmb();
-}
-
-static void dp_catalog_config_spd_header(struct dp_catalog_panel *panel)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 value, new_value, offset = 0;
-	u8 parity_byte;
-
-	if (!panel || panel->stream_id >= DP_STREAM_MAX)
-		return;
-
-	catalog = dp_catalog_get_priv(panel);
-	io_data = catalog->io.dp_link;
-
-	if (panel->stream_id == DP_STREAM_1)
-		offset = MMSS_DP1_GENERIC0_0 - MMSS_DP_GENERIC0_0;
-
-	/* Config header and parity byte 1 */
-	value = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_GENERIC1_0 + offset);
-
-	new_value = 0x83;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_1_BIT)
-			| (parity_byte << PARITY_BYTE_1_BIT));
-	pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
-			value, parity_byte);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_0 + offset,
-			value);
-
-	/* Config header and parity byte 2 */
-	value = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_GENERIC1_1 + offset);
-
-	new_value = 0x1b;
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_2_BIT)
-			| (parity_byte << PARITY_BYTE_2_BIT));
-	pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
-			value, parity_byte);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_1 + offset,
-			value);
-
-	/* Config header and parity byte 3 */
-	value = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_GENERIC1_1 + offset);
-
-	new_value = (0x0 | (0x12 << 2));
-	parity_byte = dp_header_get_parity(new_value);
-	value |= ((new_value << HEADER_BYTE_3_BIT)
-			| (parity_byte << PARITY_BYTE_3_BIT));
-	pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
-			new_value, parity_byte);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_1 + offset,
-			value);
-}
-
-static void dp_catalog_panel_config_spd(struct dp_catalog_panel *panel)
-{
-	struct dp_catalog_private *catalog;
-	struct dp_io_data *io_data;
-	u32 spd_cfg = 0, spd_cfg2 = 0;
-	u8 *vendor = NULL, *product = NULL;
-	u32 offset = 0;
-	u32 sdp_cfg_off = 0;
-	u32 sdp_cfg2_off = 0;
-	u32 sdp_cfg3_off = 0;
-
-	/*
-	 * Source Device Information
-	 * 00h unknown
-	 * 01h Digital STB
-	 * 02h DVD
-	 * 03h D-VHS
-	 * 04h HDD Video
-	 * 05h DVC
-	 * 06h DSC
-	 * 07h Video CD
-	 * 08h Game
-	 * 09h PC general
-	 * 0ah Bluray-Disc
-	 * 0bh Super Audio CD
-	 * 0ch HD DVD
-	 * 0dh PMP
-	 * 0eh-ffh reserved
-	 */
-	u32 device_type = 0;
-
-	if (!panel || panel->stream_id >= DP_STREAM_MAX)
-		return;
-
-	catalog = dp_catalog_get_priv(panel);
-	io_data = catalog->io.dp_link;
-
-	if (panel->stream_id == DP_STREAM_1)
-		offset = MMSS_DP1_GENERIC0_0 - MMSS_DP_GENERIC0_0;
-
-	dp_catalog_config_spd_header(panel);
-
-	vendor = panel->spd_vendor_name;
-	product = panel->spd_product_description;
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_2 + offset,
-			((vendor[0] & 0x7f) |
-			((vendor[1] & 0x7f) << 8) |
-			((vendor[2] & 0x7f) << 16) |
-			((vendor[3] & 0x7f) << 24)));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_3 + offset,
-			((vendor[4] & 0x7f) |
-			((vendor[5] & 0x7f) << 8) |
-			((vendor[6] & 0x7f) << 16) |
-			((vendor[7] & 0x7f) << 24)));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_4 + offset,
-			((product[0] & 0x7f) |
-			((product[1] & 0x7f) << 8) |
-			((product[2] & 0x7f) << 16) |
-			((product[3] & 0x7f) << 24)));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_5 + offset,
-			((product[4] & 0x7f) |
-			((product[5] & 0x7f) << 8) |
-			((product[6] & 0x7f) << 16) |
-			((product[7] & 0x7f) << 24)));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_6 + offset,
-			((product[8] & 0x7f) |
-			((product[9] & 0x7f) << 8) |
-			((product[10] & 0x7f) << 16) |
-			((product[11] & 0x7f) << 24)));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_7 + offset,
-			((product[12] & 0x7f) |
-			((product[13] & 0x7f) << 8) |
-			((product[14] & 0x7f) << 16) |
-			((product[15] & 0x7f) << 24)));
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_8 + offset,
-			device_type);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_GENERIC1_9 + offset, 0x00);
-
-	if (panel->stream_id == DP_STREAM_1) {
-		sdp_cfg_off = MMSS_DP1_SDP_CFG - MMSS_DP_SDP_CFG;
-		sdp_cfg2_off = MMSS_DP1_SDP_CFG2 - MMSS_DP_SDP_CFG2;
-		sdp_cfg3_off = MMSS_DP1_SDP_CFG3 - MMSS_DP_SDP_CFG3;
-	}
-
-	spd_cfg = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_SDP_CFG + sdp_cfg_off);
-	/* GENERIC1_SDP for SPD Infoframe */
-	spd_cfg |= BIT(18);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG + sdp_cfg_off,
-			spd_cfg);
-
-	spd_cfg2 = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_SDP_CFG2 + sdp_cfg2_off);
-	/* 28 data bytes for SPD Infoframe with GENERIC1 set */
-	spd_cfg2 |= BIT(17);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG2 + sdp_cfg2_off,
-			spd_cfg2);
-
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG3 + sdp_cfg3_off,
-				0x1);
-	dp_write(catalog->exe_mode, io_data, MMSS_DP_SDP_CFG3 + sdp_cfg3_off,
-				0x0);
-}
-
-static void dp_catalog_get_io_buf(struct dp_catalog_private *catalog)
-{
-	struct dp_parser *parser = catalog->parser;
-
-	dp_catalog_fill_io_buf(dp_ahb);
-	dp_catalog_fill_io_buf(dp_aux);
-	dp_catalog_fill_io_buf(dp_link);
-	dp_catalog_fill_io_buf(dp_p0);
-	dp_catalog_fill_io_buf(dp_phy);
-	dp_catalog_fill_io_buf(dp_ln_tx0);
-	dp_catalog_fill_io_buf(dp_ln_tx1);
-	dp_catalog_fill_io_buf(dp_pll);
-	dp_catalog_fill_io_buf(usb3_dp_com);
-	dp_catalog_fill_io_buf(dp_mmss_cc);
-	dp_catalog_fill_io_buf(hdcp_physical);
-	dp_catalog_fill_io_buf(dp_p1);
-	dp_catalog_fill_io_buf(dp_tcsr);
-}
-
-static void dp_catalog_get_io(struct dp_catalog_private *catalog)
-{
-	struct dp_parser *parser = catalog->parser;
-
-	dp_catalog_fill_io(dp_ahb);
-	dp_catalog_fill_io(dp_aux);
-	dp_catalog_fill_io(dp_link);
-	dp_catalog_fill_io(dp_p0);
-	dp_catalog_fill_io(dp_phy);
-	dp_catalog_fill_io(dp_ln_tx0);
-	dp_catalog_fill_io(dp_ln_tx1);
-	dp_catalog_fill_io(dp_pll);
-	dp_catalog_fill_io(usb3_dp_com);
-	dp_catalog_fill_io(dp_mmss_cc);
-	dp_catalog_fill_io(hdcp_physical);
-	dp_catalog_fill_io(dp_p1);
-	dp_catalog_fill_io(dp_tcsr);
-}
-
-static void dp_catalog_set_exe_mode(struct dp_catalog *dp_catalog, char *mode)
-{
-	struct dp_catalog_private *catalog;
-
-	if (!dp_catalog) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = container_of(dp_catalog, struct dp_catalog_private,
-		dp_catalog);
-
-	strlcpy(catalog->exe_mode, mode, sizeof(catalog->exe_mode));
-
-	if (!strcmp(catalog->exe_mode, "hw"))
-		catalog->parser->clear_io_buf(catalog->parser);
-	else
-		dp_catalog_get_io_buf(catalog);
-
-	if (dp_catalog->priv.data && dp_catalog->priv.put)
-		dp_catalog->priv.set_exe_mode(dp_catalog, mode);
-}
-
-static int dp_catalog_init(struct device *dev, struct dp_catalog *catalog,
-			struct dp_parser *parser)
-{
-	int rc = 0;
-	struct dp_catalog_private *catalog_priv;
-
-	catalog_priv = container_of(catalog, struct dp_catalog_private,
-				dp_catalog);
-
-	if (parser->hw_cfg.phy_version == DP_PHY_VERSION_4_2_0)
-		rc = dp_catalog_get_v420(dev, catalog, &catalog_priv->io);
-	else if (parser->hw_cfg.phy_version == DP_PHY_VERSION_2_0_0)
-		rc = dp_catalog_get_v200(dev, catalog, &catalog_priv->io);
-
-	return rc;
-}
-
-void dp_catalog_put(struct dp_catalog *dp_catalog)
-{
-	struct dp_catalog_private *catalog;
-
-	if (!dp_catalog)
-		return;
-
-	catalog = container_of(dp_catalog, struct dp_catalog_private,
-				dp_catalog);
-
-	if (dp_catalog->priv.data && dp_catalog->priv.put)
-		dp_catalog->priv.put(dp_catalog);
-
-	catalog->parser->clear_io_buf(catalog->parser);
-	devm_kfree(catalog->dev, catalog);
-}
-
-struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_parser *parser)
-{
-	int rc = 0;
-	struct dp_catalog *dp_catalog;
-	struct dp_catalog_private *catalog;
-	struct dp_catalog_aux aux = {
-		.read_data     = dp_catalog_aux_read_data,
-		.write_data    = dp_catalog_aux_write_data,
-		.write_trans   = dp_catalog_aux_write_trans,
-		.clear_trans   = dp_catalog_aux_clear_trans,
-		.reset         = dp_catalog_aux_reset,
-		.update_aux_cfg = dp_catalog_aux_update_cfg,
-		.enable        = dp_catalog_aux_enable,
-		.setup         = dp_catalog_aux_setup,
-		.get_irq       = dp_catalog_aux_get_irq,
-		.clear_hw_interrupts = dp_catalog_aux_clear_hw_interrupts,
-	};
-	struct dp_catalog_ctrl ctrl = {
-		.state_ctrl     = dp_catalog_ctrl_state_ctrl,
-		.config_ctrl    = dp_catalog_ctrl_config_ctrl,
-		.lane_mapping   = dp_catalog_ctrl_lane_mapping,
-		.lane_pnswap    = dp_catalog_ctrl_lane_pnswap,
-		.mainlink_ctrl  = dp_catalog_ctrl_mainlink_ctrl,
-		.set_pattern    = dp_catalog_ctrl_set_pattern,
-		.reset          = dp_catalog_ctrl_reset,
-		.usb_reset      = dp_catalog_ctrl_usb_reset,
-		.mainlink_ready = dp_catalog_ctrl_mainlink_ready,
-		.enable_irq     = dp_catalog_ctrl_enable_irq,
-		.phy_reset      = dp_catalog_ctrl_phy_reset,
-		.phy_lane_cfg   = dp_catalog_ctrl_phy_lane_cfg,
-		.update_vx_px   = dp_catalog_ctrl_update_vx_px,
-		.get_interrupt  = dp_catalog_ctrl_get_interrupt,
-		.read_hdcp_status     = dp_catalog_ctrl_read_hdcp_status,
-		.send_phy_pattern    = dp_catalog_ctrl_send_phy_pattern,
-		.read_phy_pattern = dp_catalog_ctrl_read_phy_pattern,
-		.mst_config = dp_catalog_ctrl_mst_config,
-		.trigger_act = dp_catalog_ctrl_trigger_act,
-		.read_act_complete_sts = dp_catalog_ctrl_read_act_complete_sts,
-		.channel_alloc = dp_catalog_ctrl_channel_alloc,
-		.update_rg = dp_catalog_ctrl_update_rg,
-		.channel_dealloc = dp_catalog_ctrl_channel_dealloc,
-		.fec_config = dp_catalog_ctrl_fec_config,
-		.mainlink_levels = dp_catalog_ctrl_mainlink_levels,
-	};
-	struct dp_catalog_hpd hpd = {
-		.config_hpd	= dp_catalog_hpd_config_hpd,
-		.get_interrupt	= dp_catalog_hpd_get_interrupt,
-	};
-	struct dp_catalog_audio audio = {
-		.init       = dp_catalog_audio_init,
-		.config_acr = dp_catalog_audio_config_acr,
-		.enable     = dp_catalog_audio_enable,
-		.config_sdp = dp_catalog_audio_config_sdp,
-		.set_header = dp_catalog_audio_set_header,
-		.get_header = dp_catalog_audio_get_header,
-	};
-	struct dp_catalog_panel panel = {
-		.timing_cfg = dp_catalog_panel_timing_cfg,
-		.config_hdr = dp_catalog_panel_config_hdr,
-		.tpg_config = dp_catalog_panel_tpg_cfg,
-		.config_spd = dp_catalog_panel_config_spd,
-		.config_misc = dp_catalog_panel_config_misc,
-		.config_msa = dp_catalog_panel_config_msa,
-		.update_transfer_unit = dp_catalog_panel_update_transfer_unit,
-		.config_ctrl = dp_catalog_panel_config_ctrl,
-		.config_dto = dp_catalog_panel_config_dto,
-		.dsc_cfg = dp_catalog_panel_dsc_cfg,
-		.pps_flush = dp_catalog_panel_pps_flush,
-		.dhdr_flush = dp_catalog_panel_dhdr_flush,
-		.dhdr_busy = dp_catalog_panel_dhdr_busy,
-	};
-
-	if (!dev || !parser) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	catalog  = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
-	if (!catalog) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	catalog->dev = dev;
-	catalog->parser = parser;
-
-	dp_catalog_get_io(catalog);
-
-	strlcpy(catalog->exe_mode, "hw", sizeof(catalog->exe_mode));
-
-	dp_catalog = &catalog->dp_catalog;
-
-	dp_catalog->aux   = aux;
-	dp_catalog->ctrl  = ctrl;
-	dp_catalog->hpd   = hpd;
-	dp_catalog->audio = audio;
-	dp_catalog->panel = panel;
-
-	rc = dp_catalog_init(dev, dp_catalog, parser);
-	if (rc) {
-		dp_catalog_put(dp_catalog);
-		goto error;
-	}
-
-	dp_catalog->set_exe_mode = dp_catalog_set_exe_mode;
-	dp_catalog->get_reg_dump = dp_catalog_reg_dump;
-
-	return dp_catalog;
-error:
-	return ERR_PTR(rc);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
deleted file mode 100644
index 4c6b959..0000000
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ /dev/null
@@ -1,372 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
-<<<<<<< HEAD
-=======
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
->>>>>>> aacf58a... drm/msm/dp: Add P/N swap support for dp phy
- */
-
-#ifndef _DP_CATALOG_H_
-#define _DP_CATALOG_H_
-
-#include <drm/msm_drm.h>
-
-#include "dp_parser.h"
-
-/* interrupts */
-#define DP_INTR_HPD		BIT(0)
-#define DP_INTR_AUX_I2C_DONE	BIT(3)
-#define DP_INTR_WRONG_ADDR	BIT(6)
-#define DP_INTR_TIMEOUT		BIT(9)
-#define DP_INTR_NACK_DEFER	BIT(12)
-#define DP_INTR_WRONG_DATA_CNT	BIT(15)
-#define DP_INTR_I2C_NACK	BIT(18)
-#define DP_INTR_I2C_DEFER	BIT(21)
-#define DP_INTR_PLL_UNLOCKED	BIT(24)
-#define DP_INTR_AUX_ERROR	BIT(27)
-
-#define DP_INTR_READY_FOR_VIDEO		BIT(0)
-#define DP_INTR_IDLE_PATTERN_SENT	BIT(3)
-#define DP_INTR_FRAME_END		BIT(6)
-#define DP_INTR_CRC_UPDATED		BIT(9)
-
-#define DP_INTR_MST_DP0_VCPF_SENT	BIT(0)
-#define DP_INTR_MST_DP1_VCPF_SENT	BIT(3)
-
-#define DP_MAX_TIME_SLOTS	64
-
-/* stream id */
-enum dp_stream_id {
-	DP_STREAM_0,
-	DP_STREAM_1,
-	DP_STREAM_MAX,
-};
-
-struct dp_catalog_hdr_data {
-	u32 vsc_header_byte0;
-	u32 vsc_header_byte1;
-	u32 vsc_header_byte2;
-	u32 vsc_header_byte3;
-
-	u32 vscext_header_byte0;
-	u32 vscext_header_byte1;
-	u32 vscext_header_byte2;
-	u32 vscext_header_byte3;
-
-	u32 shdr_header_byte0;
-	u32 shdr_header_byte1;
-	u32 shdr_header_byte2;
-	u32 shdr_header_byte3;
-
-	u32 bpc;
-
-	u32 version;
-	u32 length;
-	u32 pixel_encoding;
-	u32 colorimetry;
-	u32 dynamic_range;
-	u32 content_type;
-
-	struct drm_msm_ext_hdr_metadata hdr_meta;
-};
-
-struct dp_catalog_aux {
-	u32 data;
-	u32 isr;
-
-	u32 (*read_data)(struct dp_catalog_aux *aux);
-	int (*write_data)(struct dp_catalog_aux *aux);
-	int (*write_trans)(struct dp_catalog_aux *aux);
-	int (*clear_trans)(struct dp_catalog_aux *aux, bool read);
-	void (*reset)(struct dp_catalog_aux *aux);
-	void (*enable)(struct dp_catalog_aux *aux, bool enable);
-	void (*update_aux_cfg)(struct dp_catalog_aux *aux,
-		struct dp_aux_cfg *cfg, enum dp_phy_aux_config_type type);
-	void (*setup)(struct dp_catalog_aux *aux,
-			struct dp_aux_cfg *aux_cfg);
-	void (*get_irq)(struct dp_catalog_aux *aux, bool cmd_busy);
-	void (*clear_hw_interrupts)(struct dp_catalog_aux *aux);
-};
-
-struct dp_catalog_ctrl {
-	u32 isr;
-	u32 isr5;
-
-	void (*state_ctrl)(struct dp_catalog_ctrl *ctrl, u32 state);
-	void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u8 ln_cnt);
-	void (*lane_mapping)(struct dp_catalog_ctrl *ctrl, bool flipped,
-				char *lane_map);
-	void (*lane_pnswap)(struct dp_catalog_ctrl *ctrl, u8 ln_pnswap);
-	void (*mainlink_ctrl)(struct dp_catalog_ctrl *ctrl, bool enable);
-	void (*set_pattern)(struct dp_catalog_ctrl *ctrl, u32 pattern);
-	void (*reset)(struct dp_catalog_ctrl *ctrl);
-	void (*usb_reset)(struct dp_catalog_ctrl *ctrl, bool flip);
-	bool (*mainlink_ready)(struct dp_catalog_ctrl *ctrl);
-	void (*enable_irq)(struct dp_catalog_ctrl *ctrl, bool enable);
-	void (*phy_reset)(struct dp_catalog_ctrl *ctrl);
-	void (*phy_lane_cfg)(struct dp_catalog_ctrl *ctrl, bool flipped,
-				u8 lane_cnt);
-	void (*update_vx_px)(struct dp_catalog_ctrl *ctrl, u8 v_level,
-				u8 p_level);
-	void (*get_interrupt)(struct dp_catalog_ctrl *ctrl);
-	u32 (*read_hdcp_status)(struct dp_catalog_ctrl *ctrl);
-	void (*send_phy_pattern)(struct dp_catalog_ctrl *ctrl,
-			u32 pattern);
-	u32 (*read_phy_pattern)(struct dp_catalog_ctrl *ctrl);
-	void (*mst_config)(struct dp_catalog_ctrl *ctrl, bool enable);
-	void (*trigger_act)(struct dp_catalog_ctrl *ctrl);
-	void (*read_act_complete_sts)(struct dp_catalog_ctrl *ctrl, bool *sts);
-	void (*channel_alloc)(struct dp_catalog_ctrl *ctrl,
-			u32 ch, u32 ch_start_timeslot, u32 tot_ch_cnt);
-	void (*update_rg)(struct dp_catalog_ctrl *ctrl, u32 ch, u32 x_int,
-			u32 y_frac_enum);
-	void (*channel_dealloc)(struct dp_catalog_ctrl *ctrl,
-			u32 ch, u32 ch_start_timeslot, u32 tot_ch_cnt);
-	void (*fec_config)(struct dp_catalog_ctrl *ctrl, bool enable);
-	void (*mainlink_levels)(struct dp_catalog_ctrl *ctrl, u8 lane_cnt);
-};
-
-struct dp_catalog_hpd {
-	void (*config_hpd)(struct dp_catalog_hpd *hpd, bool en);
-	u32 (*get_interrupt)(struct dp_catalog_hpd *hpd);
-};
-
-#define HEADER_BYTE_2_BIT	 0
-#define PARITY_BYTE_2_BIT	 8
-#define HEADER_BYTE_1_BIT	16
-#define PARITY_BYTE_1_BIT	24
-#define HEADER_BYTE_3_BIT	16
-#define PARITY_BYTE_3_BIT	24
-
-enum dp_catalog_audio_sdp_type {
-	DP_AUDIO_SDP_STREAM,
-	DP_AUDIO_SDP_TIMESTAMP,
-	DP_AUDIO_SDP_INFOFRAME,
-	DP_AUDIO_SDP_COPYMANAGEMENT,
-	DP_AUDIO_SDP_ISRC,
-	DP_AUDIO_SDP_MAX,
-};
-
-enum dp_catalog_audio_header_type {
-	DP_AUDIO_SDP_HEADER_1,
-	DP_AUDIO_SDP_HEADER_2,
-	DP_AUDIO_SDP_HEADER_3,
-	DP_AUDIO_SDP_HEADER_MAX,
-};
-
-struct dp_catalog_audio {
-	enum dp_catalog_audio_sdp_type sdp_type;
-	enum dp_catalog_audio_header_type sdp_header;
-	u32 data;
-
-	enum dp_stream_id stream_id;
-
-	void (*init)(struct dp_catalog_audio *audio);
-	void (*enable)(struct dp_catalog_audio *audio);
-	void (*config_acr)(struct dp_catalog_audio *audio);
-	void (*config_sdp)(struct dp_catalog_audio *audio);
-	void (*set_header)(struct dp_catalog_audio *audio);
-	void (*get_header)(struct dp_catalog_audio *audio);
-};
-
-struct dp_dsc_cfg_data {
-	bool dsc_en;
-	char pps[128];
-	u32 pps_len;
-	u32 pps_word[32];
-	u32 pps_word_len;
-	u8 parity[32];
-	u8 parity_len;
-	u32 parity_word[8];
-	u32 parity_word_len;
-	u32 slice_per_pkt;
-	u32 bytes_per_pkt;
-	u32 eol_byte_num;
-	u32 be_in_lane;
-	u32 dto_en;
-	u32 dto_n;
-	u32 dto_d;
-	u32 dto_count;
-};
-
-struct dp_catalog_panel {
-	u32 total;
-	u32 sync_start;
-	u32 width_blanking;
-	u32 dp_active;
-	u8 *spd_vendor_name;
-	u8 *spd_product_description;
-
-	struct dp_catalog_hdr_data hdr_data;
-
-	/* TPG */
-	u32 hsync_period;
-	u32 vsync_period;
-	u32 display_v_start;
-	u32 display_v_end;
-	u32 v_sync_width;
-	u32 hsync_ctl;
-	u32 display_hctl;
-
-	/* TU */
-	u32 dp_tu;
-	u32 valid_boundary;
-	u32 valid_boundary2;
-
-	u32 misc_val;
-
-	enum dp_stream_id stream_id;
-
-	bool widebus_en;
-	struct dp_dsc_cfg_data dsc;
-
-	int (*timing_cfg)(struct dp_catalog_panel *panel);
-	void (*config_hdr)(struct dp_catalog_panel *panel, bool en,
-			u32 dhdr_max_pkts);
-	void (*tpg_config)(struct dp_catalog_panel *panel, bool enable);
-	void (*config_spd)(struct dp_catalog_panel *panel);
-	void (*config_misc)(struct dp_catalog_panel *panel);
-	void (*config_msa)(struct dp_catalog_panel *panel,
-			u32 rate, u32 stream_rate_khz);
-	void (*update_transfer_unit)(struct dp_catalog_panel *panel);
-	void (*config_ctrl)(struct dp_catalog_panel *panel, u32 cfg);
-	void (*config_dto)(struct dp_catalog_panel *panel, bool ack);
-	void (*dsc_cfg)(struct dp_catalog_panel *panel);
-	void (*pps_flush)(struct dp_catalog_panel *panel);
-	void (*dhdr_flush)(struct dp_catalog_panel *panel);
-	bool (*dhdr_busy)(struct dp_catalog_panel *panel);
-};
-
-struct dp_catalog;
-struct dp_catalog_priv {
-	void *data;
-
-	void (*put)(struct dp_catalog *catalog);
-	void (*set_exe_mode)(struct dp_catalog *dp_catalog, char *mode);
-};
-
-struct dp_catalog {
-	struct dp_catalog_aux aux;
-	struct dp_catalog_ctrl ctrl;
-	struct dp_catalog_audio audio;
-	struct dp_catalog_panel panel;
-	struct dp_catalog_priv priv;
-	struct dp_catalog_hpd hpd;
-
-	void (*set_exe_mode)(struct dp_catalog *dp_catalog, char *mode);
-	int (*get_reg_dump)(struct dp_catalog *dp_catalog,
-		char *mode, u8 **out_buf, u32 *out_buf_len);
-};
-
-static inline u8 dp_ecc_get_g0_value(u8 data)
-{
-	u8 c[4];
-	u8 g[4];
-	u8 ret_data = 0;
-	u8 i;
-
-	for (i = 0; i < 4; i++)
-		c[i] = (data >> i) & 0x01;
-
-	g[0] = c[3];
-	g[1] = c[0] ^ c[3];
-	g[2] = c[1];
-	g[3] = c[2];
-
-	for (i = 0; i < 4; i++)
-		ret_data = ((g[i] & 0x01) << i) | ret_data;
-
-	return ret_data;
-}
-
-static inline u8 dp_ecc_get_g1_value(u8 data)
-{
-	u8 c[4];
-	u8 g[4];
-	u8 ret_data = 0;
-	u8 i;
-
-	for (i = 0; i < 4; i++)
-		c[i] = (data >> i) & 0x01;
-
-	g[0] = c[0] ^ c[3];
-	g[1] = c[0] ^ c[1] ^ c[3];
-	g[2] = c[1] ^ c[2];
-	g[3] = c[2] ^ c[3];
-
-	for (i = 0; i < 4; i++)
-		ret_data = ((g[i] & 0x01) << i) | ret_data;
-
-	return ret_data;
-}
-
-static inline u8 dp_header_get_parity(u32 data)
-{
-	u8 x0 = 0;
-	u8 x1 = 0;
-	u8 ci = 0;
-	u8 iData = 0;
-	u8 i = 0;
-	u8 parity_byte;
-	u8 num_byte = (data > 0xFF) ? 8 : 2;
-
-	for (i = 0; i < num_byte; i++) {
-		iData = (data >> i*4) & 0xF;
-
-		ci = iData ^ x1;
-		x1 = x0 ^ dp_ecc_get_g1_value(ci);
-		x0 = dp_ecc_get_g0_value(ci);
-	}
-
-	parity_byte = x1 | (x0 << 4);
-
-	return parity_byte;
-}
-
-static inline u32 dp_read(char *exe_mode, struct dp_io_data *io_data,
-				u32 offset)
-{
-	u32 data = 0;
-
-	if (!strcmp(exe_mode, "hw") || !strcmp(exe_mode, "all")) {
-		data = readl_relaxed(io_data->io.base + offset);
-	} else if (!strcmp(exe_mode, "sw")) {
-		if (io_data->buf)
-			memcpy(&data, io_data->buf + offset, sizeof(offset));
-	}
-
-	return data;
-}
-
-static inline void dp_write(char *exe_mode, struct dp_io_data *io_data,
-				u32 offset, u32 data)
-{
-	if (!strcmp(exe_mode, "hw") || !strcmp(exe_mode, "all"))
-		writel_relaxed(data, io_data->io.base + offset);
-
-	if (!strcmp(exe_mode, "sw") || !strcmp(exe_mode, "all")) {
-		if (io_data->buf)
-			memcpy(io_data->buf + offset, &data, sizeof(data));
-	}
-}
-
-struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_parser *parser);
-void dp_catalog_put(struct dp_catalog *catalog);
-
-int dp_catalog_get_v420(struct device *dev, struct dp_catalog *catalog,
-		void *io);
-
-int dp_catalog_get_v200(struct device *dev, struct dp_catalog *catalog,
-		void *io);
-
-#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog_v200.c b/drivers/gpu/drm/msm/dp/dp_catalog_v200.c
deleted file mode 100644
index 132e50e..0000000
--- a/drivers/gpu/drm/msm/dp/dp_catalog_v200.c
+++ /dev/null
@@ -1,304 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include <linux/delay.h>
-
-#include "dp_catalog.h"
-#include "dp_reg.h"
-
-#define dp_catalog_get_priv_v200(x) ({ \
-	struct dp_catalog *dp_catalog; \
-	dp_catalog = container_of(x, struct dp_catalog, x); \
-	dp_catalog->priv.data; \
-})
-
-struct dp_catalog_io {
-	struct dp_io_data *dp_ahb;
-	struct dp_io_data *dp_aux;
-	struct dp_io_data *dp_link;
-	struct dp_io_data *dp_p0;
-	struct dp_io_data *dp_phy;
-	struct dp_io_data *dp_ln_tx0;
-	struct dp_io_data *dp_ln_tx1;
-	struct dp_io_data *dp_mmss_cc;
-	struct dp_io_data *dp_pll;
-	struct dp_io_data *usb3_dp_com;
-	struct dp_io_data *hdcp_physical;
-	struct dp_io_data *dp_p1;
-	struct dp_io_data *dp_tcsr;
-};
-
-struct dp_catalog_private_v200 {
-	struct device *dev;
-	struct dp_catalog_io *io;
-
-	char exe_mode[SZ_4];
-};
-
-static void dp_catalog_aux_clear_hw_interrupts_v200(struct dp_catalog_aux *aux)
-{
-	struct dp_catalog_private_v200 *catalog;
-	struct dp_io_data *io_data;
-	u32 data = 0;
-
-	if (!aux) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv_v200(aux);
-	io_data = catalog->io->dp_phy;
-
-	data = dp_read(catalog->exe_mode, io_data,
-				DP_PHY_AUX_INTERRUPT_STATUS_V200);
-
-	dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR_V200,
-			0x1f);
-	wmb(); /* make sure 0x1f is written before next write */
-	dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR_V200,
-			0x9f);
-	wmb(); /* make sure 0x9f is written before next write */
-	dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR_V200,
-			0);
-	wmb(); /* make sure register is cleared */
-}
-
-static void dp_catalog_aux_setup_v200(struct dp_catalog_aux *aux,
-		struct dp_aux_cfg *cfg)
-{
-	struct dp_catalog_private_v200 *catalog;
-	struct dp_io_data *io_data;
-	int i = 0, sw_reset = 0;
-
-	if (!aux || !cfg) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv_v200(aux);
-
-	io_data = catalog->io->dp_ahb;
-	sw_reset = dp_read(catalog->exe_mode, io_data, DP_SW_RESET);
-
-	sw_reset |= BIT(0);
-	dp_write(catalog->exe_mode, io_data, DP_SW_RESET, sw_reset);
-	usleep_range(1000, 1010); /* h/w recommended delay */
-
-	sw_reset &= ~BIT(0);
-	dp_write(catalog->exe_mode, io_data, DP_SW_RESET, sw_reset);
-
-	dp_write(catalog->exe_mode, io_data, DP_PHY_CTRL, 0x4); /* bit 2 */
-	udelay(1000);
-	dp_write(catalog->exe_mode, io_data, DP_PHY_CTRL, 0x0); /* bit 2 */
-	wmb(); /* make sure programming happened */
-
-	io_data = catalog->io->dp_tcsr;
-	dp_write(catalog->exe_mode, io_data, 0x4c, 0x1); /* bit 0 & 2 */
-	wmb(); /* make sure programming happened */
-
-	io_data = catalog->io->dp_phy;
-	dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x3c);
-	wmb(); /* make sure PD programming happened */
-	dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x3d);
-	wmb(); /* make sure PD programming happened */
-
-	/* DP AUX CFG register programming */
-	io_data = catalog->io->dp_phy;
-	for (i = 0; i < PHY_AUX_CFG_MAX; i++)
-		dp_write(catalog->exe_mode, io_data, cfg[i].offset,
-				cfg[i].lut[cfg[i].current_index]);
-
-	dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_MASK_V200,
-			0x1F);
-	wmb(); /* make sure AUX configuration is done before enabling it */
-}
-
-static void dp_catalog_panel_config_msa_v200(struct dp_catalog_panel *panel,
-					u32 rate, u32 stream_rate_khz)
-{
-	u32 pixel_m, pixel_n;
-	u32 mvid, nvid;
-	u32 const nvid_fixed = 0x8000;
-	u32 const link_rate_hbr2 = 540000;
-	u32 const link_rate_hbr3 = 810000;
-	struct dp_catalog_private_v200 *catalog;
-	struct dp_io_data *io_data;
-	u32 strm_reg_off = 0;
-	u32 mvid_reg_off = 0, nvid_reg_off = 0;
-
-	if (!panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id:%d\n", panel->stream_id);
-		return;
-	}
-
-	catalog = dp_catalog_get_priv_v200(panel);
-	io_data = catalog->io->dp_mmss_cc;
-
-	if (panel->stream_id == DP_STREAM_1)
-		strm_reg_off = MMSS_DP_PIXEL1_M_V200 -
-					MMSS_DP_PIXEL_M_V200;
-
-	pixel_m = dp_read(catalog->exe_mode, io_data,
-			MMSS_DP_PIXEL_M_V200 + strm_reg_off);
-	pixel_n = dp_read(catalog->exe_mode, io_data,
-			MMSS_DP_PIXEL_N_V200 + strm_reg_off);
-	pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
-
-	mvid = (pixel_m & 0xFFFF) * 5;
-	nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
-
-	if (nvid < nvid_fixed) {
-		u32 temp;
-
-		temp = (nvid_fixed / nvid) * nvid;
-		mvid = (nvid_fixed / nvid) * mvid;
-		nvid = temp;
-	}
-
-	pr_debug("rate = %d\n", rate);
-
-	if (panel->widebus_en)
-		mvid <<= 1;
-
-	if (link_rate_hbr2 == rate)
-		nvid *= 2;
-
-	if (link_rate_hbr3 == rate)
-		nvid *= 3;
-
-	io_data = catalog->io->dp_link;
-
-	if (panel->stream_id == DP_STREAM_1) {
-		mvid_reg_off = DP1_SOFTWARE_MVID - DP_SOFTWARE_MVID;
-		nvid_reg_off = DP1_SOFTWARE_NVID - DP_SOFTWARE_NVID;
-	}
-
-	pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
-	dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_MVID + mvid_reg_off,
-			mvid);
-	dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_NVID + nvid_reg_off,
-			nvid);
-}
-
-static void dp_catalog_ctrl_lane_mapping_v200(struct dp_catalog_ctrl *ctrl,
-						bool flipped, char *lane_map)
-{
-	struct dp_catalog_private_v200 *catalog;
-	struct dp_io_data *io_data;
-	u8 l_map[4] = { 0 }, i = 0, j = 0;
-	u32 lane_map_reg = 0;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv_v200(ctrl);
-	io_data = catalog->io->dp_link;
-
-	/* For flip case, swap phy lanes with ML0 and ML3, ML1 and ML2 */
-	if (flipped) {
-		for (i = 0; i < DP_MAX_PHY_LN; i++) {
-			if (lane_map[i] == DP_ML0) {
-				for (j = 0; j < DP_MAX_PHY_LN; j++) {
-					if (lane_map[j] == DP_ML3) {
-						l_map[i] = DP_ML3;
-						l_map[j] = DP_ML0;
-						break;
-					}
-				}
-			} else if (lane_map[i] == DP_ML1) {
-				for (j = 0; j < DP_MAX_PHY_LN; j++) {
-					if (lane_map[j] == DP_ML2) {
-						l_map[i] = DP_ML2;
-						l_map[j] = DP_ML1;
-						break;
-					}
-				}
-			}
-		}
-	} else {
-		/* Normal orientation */
-		for (i = 0; i < DP_MAX_PHY_LN; i++)
-			l_map[i] = lane_map[i];
-	}
-
-	lane_map_reg = ((l_map[3]&3)<<6)|((l_map[2]&3)<<4)|((l_map[1]&3)<<2)
-			|(l_map[0]&3);
-
-	dp_write(catalog->exe_mode, io_data, DP_LOGICAL2PHYSICAL_LANE_MAPPING,
-			lane_map_reg);
-}
-
-static void dp_catalog_ctrl_usb_reset_v200(struct dp_catalog_ctrl *ctrl,
-						bool flip)
-{
-}
-
-static void dp_catalog_put_v200(struct dp_catalog *catalog)
-{
-	struct dp_catalog_private_v200 *catalog_priv;
-
-	if (!catalog || !catalog->priv.data)
-		return;
-
-	catalog_priv = catalog->priv.data;
-	devm_kfree(catalog_priv->dev, catalog_priv);
-}
-
-static void dp_catalog_set_exe_mode_v200(struct dp_catalog *catalog, char *mode)
-{
-	struct dp_catalog_private_v200 *catalog_priv;
-
-	if (!catalog || !catalog->priv.data)
-		return;
-
-	catalog_priv = catalog->priv.data;
-
-	strlcpy(catalog_priv->exe_mode, mode, sizeof(catalog_priv->exe_mode));
-}
-
-int dp_catalog_get_v200(struct device *dev, struct dp_catalog *catalog,
-				void *io)
-{
-	struct dp_catalog_private_v200 *catalog_priv;
-
-	if (!dev || !catalog) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	catalog_priv = devm_kzalloc(dev, sizeof(*catalog_priv), GFP_KERNEL);
-	if (!catalog_priv)
-		return -ENOMEM;
-
-	catalog_priv->dev = dev;
-	catalog_priv->io = io;
-	catalog->priv.data = catalog_priv;
-
-	catalog->priv.put                = dp_catalog_put_v200;
-	catalog->priv.set_exe_mode       = dp_catalog_set_exe_mode_v200;
-
-	catalog->aux.clear_hw_interrupts =
-				dp_catalog_aux_clear_hw_interrupts_v200;
-	catalog->aux.setup               = dp_catalog_aux_setup_v200;
-
-	catalog->panel.config_msa        = dp_catalog_panel_config_msa_v200;
-
-	catalog->ctrl.lane_mapping       = dp_catalog_ctrl_lane_mapping_v200;
-	catalog->ctrl.usb_reset          = dp_catalog_ctrl_usb_reset_v200;
-
-	/* Set the default execution mode to hardware mode */
-	dp_catalog_set_exe_mode_v200(catalog, "hw");
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c b/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
deleted file mode 100644
index 51fa987..0000000
--- a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
+++ /dev/null
@@ -1,349 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include "dp_catalog.h"
-#include "dp_reg.h"
-
-#define dp_catalog_get_priv_v420(x) ({ \
-	struct dp_catalog *dp_catalog; \
-	dp_catalog = container_of(x, struct dp_catalog, x); \
-	dp_catalog->priv.data; \
-})
-
-#define MAX_VOLTAGE_LEVELS 4
-#define MAX_PRE_EMP_LEVELS 4
-
-static u8 const vm_pre_emphasis[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = {
-	{0x00, 0x0E, 0x16, 0xFF},       /* pe0, 0 db */
-	{0x00, 0x0E, 0x16, 0xFF},       /* pe1, 3.5 db */
-	{0x00, 0x0E, 0xFF, 0xFF},       /* pe2, 6.0 db */
-	{0xFF, 0xFF, 0xFF, 0xFF}        /* pe3, 9.5 db */
-};
-
-/* voltage swing, 0.2v and 1.0v are not support */
-static u8 const vm_voltage_swing[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = {
-	{0x07, 0x0F, 0x16, 0xFF}, /* sw0, 0.4v  */
-	{0x11, 0x1E, 0x1F, 0xFF}, /* sw1, 0.6 v */
-	{0x1A, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */
-	{0xFF, 0xFF, 0xFF, 0xFF}  /* sw1, 1.2 v, optional */
-};
-
-struct dp_catalog_io {
-	struct dp_io_data *dp_ahb;
-	struct dp_io_data *dp_aux;
-	struct dp_io_data *dp_link;
-	struct dp_io_data *dp_p0;
-	struct dp_io_data *dp_phy;
-	struct dp_io_data *dp_ln_tx0;
-	struct dp_io_data *dp_ln_tx1;
-	struct dp_io_data *dp_mmss_cc;
-	struct dp_io_data *dp_pll;
-	struct dp_io_data *usb3_dp_com;
-	struct dp_io_data *hdcp_physical;
-	struct dp_io_data *dp_p1;
-};
-
-struct dp_catalog_private_v420 {
-	struct device *dev;
-	struct dp_catalog_io *io;
-
-	char exe_mode[SZ_4];
-};
-
-static void dp_catalog_aux_setup_v420(struct dp_catalog_aux *aux,
-		struct dp_aux_cfg *cfg)
-{
-	struct dp_catalog_private_v420 *catalog;
-	struct dp_io_data *io_data;
-	int i = 0;
-
-	if (!aux || !cfg) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv_v420(aux);
-
-	io_data = catalog->io->dp_phy;
-	dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x67);
-	wmb(); /* make sure PD programming happened */
-
-	/* Turn on BIAS current for PHY/PLL */
-	io_data = catalog->io->dp_pll;
-	dp_write(catalog->exe_mode, io_data, QSERDES_COM_BIAS_EN_CLKBUFLR_EN,
-			0x17);
-	wmb(); /* make sure BIAS programming happened */
-
-	io_data = catalog->io->dp_phy;
-	/* DP AUX CFG register programming */
-	for (i = 0; i < PHY_AUX_CFG_MAX; i++) {
-		pr_debug("%s: offset=0x%08x, value=0x%08x\n",
-			dp_phy_aux_config_type_to_string(i),
-			cfg[i].offset, cfg[i].lut[cfg[i].current_index]);
-		dp_write(catalog->exe_mode, io_data, cfg[i].offset,
-			cfg[i].lut[cfg[i].current_index]);
-	}
-	wmb(); /* make sure DP AUX CFG programming happened */
-
-	dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_MASK_V420,
-			0x1F);
-}
-
-static void dp_catalog_aux_clear_hw_interrupts_v420(struct dp_catalog_aux *aux)
-{
-	struct dp_catalog_private_v420 *catalog;
-	struct dp_io_data *io_data;
-	u32 data = 0;
-
-	if (!aux) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv_v420(aux);
-	io_data = catalog->io->dp_phy;
-
-	data = dp_read(catalog->exe_mode, io_data,
-		DP_PHY_AUX_INTERRUPT_STATUS_V420);
-
-	dp_write(catalog->exe_mode, io_data,
-		DP_PHY_AUX_INTERRUPT_CLEAR_V420, 0x1f);
-	wmb(); /* make sure 0x1f is written before next write */
-	dp_write(catalog->exe_mode, io_data,
-		DP_PHY_AUX_INTERRUPT_CLEAR_V420, 0x9f);
-	wmb(); /* make sure 0x9f is written before next write */
-	dp_write(catalog->exe_mode, io_data,
-		DP_PHY_AUX_INTERRUPT_CLEAR_V420, 0);
-	wmb(); /* make sure register is cleared */
-}
-
-static void dp_catalog_panel_config_msa_v420(struct dp_catalog_panel *panel,
-					u32 rate, u32 stream_rate_khz)
-{
-	u32 pixel_m, pixel_n;
-	u32 mvid, nvid, reg_off = 0, mvid_off = 0, nvid_off = 0;
-	u32 const nvid_fixed = 0x8000;
-	u32 const link_rate_hbr2 = 540000;
-	u32 const link_rate_hbr3 = 810000;
-	struct dp_catalog_private_v420 *catalog;
-	struct dp_io_data *io_data;
-
-	if (!panel || !rate) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream id:%d\n", panel->stream_id);
-		return;
-	}
-
-	catalog = dp_catalog_get_priv_v420(panel);
-	io_data = catalog->io->dp_mmss_cc;
-
-	if (panel->stream_id == DP_STREAM_1)
-		reg_off = MMSS_DP_PIXEL1_M_V420 - MMSS_DP_PIXEL_M_V420;
-
-	pixel_m = dp_read(catalog->exe_mode, io_data,
-			MMSS_DP_PIXEL_M_V420 + reg_off);
-	pixel_n = dp_read(catalog->exe_mode, io_data,
-			MMSS_DP_PIXEL_N_V420 + reg_off);
-	pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
-
-	mvid = (pixel_m & 0xFFFF) * 5;
-	nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
-
-	if (nvid < nvid_fixed) {
-		u32 temp;
-
-		temp = (nvid_fixed / nvid) * nvid;
-		mvid = (nvid_fixed / nvid) * mvid;
-		nvid = temp;
-	}
-
-	pr_debug("rate = %d\n", rate);
-
-	if (panel->widebus_en)
-		mvid <<= 1;
-
-	if (link_rate_hbr2 == rate)
-		nvid *= 2;
-
-	if (link_rate_hbr3 == rate)
-		nvid *= 3;
-
-	io_data = catalog->io->dp_link;
-
-	if (panel->stream_id == DP_STREAM_1) {
-		mvid_off = DP1_SOFTWARE_MVID - DP_SOFTWARE_MVID;
-		nvid_off = DP1_SOFTWARE_NVID - DP_SOFTWARE_NVID;
-	}
-
-	pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
-	dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_MVID + mvid_off, mvid);
-	dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_NVID + nvid_off, nvid);
-}
-
-static void dp_catalog_ctrl_phy_lane_cfg_v420(struct dp_catalog_ctrl *ctrl,
-		bool flipped, u8 ln_cnt)
-{
-	u32 info = 0x0;
-	struct dp_catalog_private_v420 *catalog;
-	u8 orientation = BIT(!!flipped);
-	struct dp_io_data *io_data;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv_v420(ctrl);
-	io_data = catalog->io->dp_phy;
-
-	info |= (ln_cnt & 0x0F);
-	info |= ((orientation & 0x0F) << 4);
-	pr_debug("Shared Info = 0x%x\n", info);
-
-	dp_write(catalog->exe_mode, io_data, DP_PHY_SPARE0_V420, info);
-}
-
-static void dp_catalog_ctrl_update_vx_px_v420(struct dp_catalog_ctrl *ctrl,
-		u8 v_level, u8 p_level)
-{
-	struct dp_catalog_private_v420 *catalog;
-	struct dp_io_data *io_data;
-	u8 value0, value1;
-
-	if (!ctrl || !((v_level < MAX_VOLTAGE_LEVELS)
-		&& (p_level < MAX_PRE_EMP_LEVELS))) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	catalog = dp_catalog_get_priv_v420(ctrl);
-
-	pr_debug("hw: v=%d p=%d\n", v_level, p_level);
-
-	value0 = vm_voltage_swing[v_level][p_level];
-	value1 = vm_pre_emphasis[v_level][p_level];
-
-	/* program default setting first */
-	io_data = catalog->io->dp_ln_tx0;
-	dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL_V420, 0x2A);
-	dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, 0x20);
-
-	io_data = catalog->io->dp_ln_tx1;
-	dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL_V420, 0x2A);
-	dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, 0x20);
-
-	/* Enable MUX to use Cursor values from these registers */
-	value0 |= BIT(5);
-	value1 |= BIT(5);
-
-	/* Configure host and panel only if both values are allowed */
-	if (value0 != 0xFF && value1 != 0xFF) {
-		io_data = catalog->io->dp_ln_tx0;
-		dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL_V420,
-				value0);
-		dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL,
-				value1);
-
-		io_data = catalog->io->dp_ln_tx1;
-		dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL_V420,
-				value0);
-		dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL,
-				value1);
-
-		pr_debug("hw: vx_value=0x%x px_value=0x%x\n",
-			value0, value1);
-	} else {
-		pr_err("invalid vx (0x%x=0x%x), px (0x%x=0x%x\n",
-			v_level, value0, p_level, value1);
-	}
-}
-
-static void dp_catalog_ctrl_lane_pnswap_v420(struct dp_catalog_ctrl *ctrl,
-						u8 ln_pnswap)
-{
-	struct dp_catalog_private_v420 *catalog;
-	struct dp_io_data *io_data;
-	u32 cfg0, cfg1;
-
-	catalog = dp_catalog_get_priv_v420(ctrl);
-
-	cfg0 = 0x0a;
-	cfg1 = 0x0a;
-
-	cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0;
-	cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2;
-	cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0;
-	cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2;
-
-	io_data = catalog->io->dp_ln_tx0;
-	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV_V420, cfg0);
-
-	io_data = catalog->io->dp_ln_tx1;
-	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV_V420, cfg1);
-}
-
-static void dp_catalog_put_v420(struct dp_catalog *catalog)
-{
-	struct dp_catalog_private_v420 *catalog_priv;
-
-	if (!catalog || !catalog->priv.data)
-		return;
-
-	catalog_priv = catalog->priv.data;
-	devm_kfree(catalog_priv->dev, catalog_priv);
-}
-
-static void dp_catalog_set_exe_mode_v420(struct dp_catalog *catalog, char *mode)
-{
-	struct dp_catalog_private_v420 *catalog_priv;
-
-	if (!catalog || !catalog->priv.data)
-		return;
-
-	catalog_priv = catalog->priv.data;
-
-	strlcpy(catalog_priv->exe_mode, mode, sizeof(catalog_priv->exe_mode));
-}
-
-int dp_catalog_get_v420(struct device *dev, struct dp_catalog *catalog,
-		void *io)
-{
-	struct dp_catalog_private_v420 *catalog_priv;
-
-	if (!dev || !catalog) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	catalog_priv = devm_kzalloc(dev, sizeof(*catalog_priv), GFP_KERNEL);
-	if (!catalog_priv)
-		return -ENOMEM;
-
-	catalog_priv->dev = dev;
-	catalog_priv->io = io;
-	catalog->priv.data = catalog_priv;
-
-	catalog->priv.put          = dp_catalog_put_v420;
-	catalog->priv.set_exe_mode = dp_catalog_set_exe_mode_v420;
-
-	catalog->aux.setup         = dp_catalog_aux_setup_v420;
-	catalog->aux.clear_hw_interrupts =
-				dp_catalog_aux_clear_hw_interrupts_v420;
-	catalog->panel.config_msa  = dp_catalog_panel_config_msa_v420;
-	catalog->ctrl.phy_lane_cfg = dp_catalog_ctrl_phy_lane_cfg_v420;
-	catalog->ctrl.update_vx_px = dp_catalog_ctrl_update_vx_px_v420;
-	catalog->ctrl.lane_pnswap = dp_catalog_ctrl_lane_pnswap_v420;
-
-	/* Set the default execution mode to hardware mode */
-	dp_catalog_set_exe_mode_v420(catalog, "hw");
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
deleted file mode 100644
index 0d1b061..0000000
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ /dev/null
@@ -1,1317 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include <linux/types.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <drm/drm_fixed.h>
-
-#include "dp_ctrl.h"
-
-#define DP_MST_DEBUG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
-
-#define DP_CTRL_INTR_READY_FOR_VIDEO     BIT(0)
-#define DP_CTRL_INTR_IDLE_PATTERN_SENT  BIT(3)
-
-#define DP_CTRL_INTR_MST_DP0_VCPF_SENT	BIT(0)
-#define DP_CTRL_INTR_MST_DP1_VCPF_SENT	BIT(3)
-
-/* dp state ctrl */
-#define ST_TRAIN_PATTERN_1		BIT(0)
-#define ST_TRAIN_PATTERN_2		BIT(1)
-#define ST_TRAIN_PATTERN_3		BIT(2)
-#define ST_TRAIN_PATTERN_4		BIT(3)
-#define ST_SYMBOL_ERR_RATE_MEASUREMENT	BIT(4)
-#define ST_PRBS7			BIT(5)
-#define ST_CUSTOM_80_BIT_PATTERN	BIT(6)
-#define ST_SEND_VIDEO			BIT(7)
-#define ST_PUSH_IDLE			BIT(8)
-#define MST_DP0_PUSH_VCPF		BIT(12)
-#define MST_DP0_FORCE_VCPF		BIT(13)
-#define MST_DP1_PUSH_VCPF		BIT(14)
-#define MST_DP1_FORCE_VCPF		BIT(15)
-
-#define MR_LINK_TRAINING1  0x8
-#define MR_LINK_SYMBOL_ERM 0x80
-#define MR_LINK_PRBS7 0x100
-#define MR_LINK_CUSTOM80 0x200
-#define MR_LINK_TRAINING4  0x40
-
-struct dp_mst_ch_slot_info {
-	u32 start_slot;
-	u32 tot_slots;
-};
-
-struct dp_mst_channel_info {
-	struct dp_mst_ch_slot_info slot_info[DP_STREAM_MAX];
-};
-
-struct dp_ctrl_private {
-	struct dp_ctrl dp_ctrl;
-
-	struct device *dev;
-	struct dp_aux *aux;
-	struct dp_panel *panel;
-	struct dp_link *link;
-	struct dp_power *power;
-	struct dp_parser *parser;
-	struct dp_catalog_ctrl *catalog;
-
-	struct completion idle_comp;
-	struct completion video_comp;
-
-	bool orientation;
-	bool power_on;
-	bool mst_mode;
-	bool fec_mode;
-
-	atomic_t aborted;
-
-	u32 vic;
-	u32 stream_count;
-	struct dp_mst_channel_info mst_ch_info;
-};
-
-enum notification_status {
-	NOTIFY_UNKNOWN,
-	NOTIFY_CONNECT,
-	NOTIFY_DISCONNECT,
-	NOTIFY_CONNECT_IRQ_HPD,
-	NOTIFY_DISCONNECT_IRQ_HPD,
-};
-
-static void dp_ctrl_idle_patterns_sent(struct dp_ctrl_private *ctrl)
-{
-	pr_debug("idle_patterns_sent\n");
-	complete(&ctrl->idle_comp);
-}
-
-static void dp_ctrl_video_ready(struct dp_ctrl_private *ctrl)
-{
-	pr_debug("dp_video_ready\n");
-	complete(&ctrl->video_comp);
-}
-
-static void dp_ctrl_abort(struct dp_ctrl *dp_ctrl)
-{
-	struct dp_ctrl_private *ctrl;
-
-	if (!dp_ctrl) {
-		pr_err("Invalid input data\n");
-		return;
-	}
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	atomic_set(&ctrl->aborted, 1);
-}
-
-static void dp_ctrl_state_ctrl(struct dp_ctrl_private *ctrl, u32 state)
-{
-	ctrl->catalog->state_ctrl(ctrl->catalog, state);
-}
-
-static void dp_ctrl_push_idle(struct dp_ctrl_private *ctrl,
-				enum dp_stream_id strm)
-{
-	int const idle_pattern_completion_timeout_ms = HZ / 10;
-	u32 state = 0x0;
-
-	if (!ctrl->power_on)
-		return;
-
-	if (!ctrl->mst_mode) {
-		state = ST_PUSH_IDLE;
-		goto trigger_idle;
-	}
-
-	if (strm >= DP_STREAM_MAX) {
-		pr_err("mst push idle, invalid stream:%d\n", strm);
-		return;
-	}
-
-	state |= (strm == DP_STREAM_0) ? MST_DP0_PUSH_VCPF : MST_DP1_PUSH_VCPF;
-
-trigger_idle:
-	reinit_completion(&ctrl->idle_comp);
-	dp_ctrl_state_ctrl(ctrl, state);
-
-	if (!wait_for_completion_timeout(&ctrl->idle_comp,
-			idle_pattern_completion_timeout_ms))
-		pr_warn("time out\n");
-	else
-		pr_debug("mainlink off done\n");
-}
-
-/**
- * dp_ctrl_configure_source_link_params() - configures DP TX source params
- * @ctrl: Display Port Driver data
- * @enable: enable or disable DP transmitter
- *
- * Configures the DP transmitter source params including details such as lane
- * configuration, output format and sink/panel timing information.
- */
-static void dp_ctrl_configure_source_link_params(struct dp_ctrl_private *ctrl,
-		bool enable)
-{
-	if (enable) {
-		ctrl->catalog->lane_mapping(ctrl->catalog, ctrl->orientation,
-						ctrl->parser->l_map);
-		ctrl->catalog->lane_pnswap(ctrl->catalog,
-						ctrl->parser->l_pnswap);
-		ctrl->catalog->mst_config(ctrl->catalog, ctrl->mst_mode);
-		ctrl->catalog->config_ctrl(ctrl->catalog,
-				ctrl->link->link_params.lane_count);
-		ctrl->catalog->mainlink_levels(ctrl->catalog,
-				ctrl->link->link_params.lane_count);
-		ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
-	} else {
-		ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
-	}
-}
-
-static void dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
-{
-	if (!wait_for_completion_timeout(&ctrl->video_comp, HZ / 2))
-		pr_warn("SEND_VIDEO time out\n");
-}
-
-static int dp_ctrl_update_sink_vx_px(struct dp_ctrl_private *ctrl,
-		u32 voltage_level, u32 pre_emphasis_level)
-{
-	int i;
-	u8 buf[4];
-	u32 max_level_reached = 0;
-
-	if (voltage_level == DP_LINK_VOLTAGE_MAX) {
-		pr_debug("max. voltage swing level reached %d\n",
-				voltage_level);
-		max_level_reached |= BIT(2);
-	}
-
-	if (pre_emphasis_level == DP_LINK_PRE_EMPHASIS_MAX) {
-		pr_debug("max. pre-emphasis level reached %d\n",
-				pre_emphasis_level);
-		max_level_reached  |= BIT(5);
-	}
-
-	pre_emphasis_level <<= 3;
-
-	for (i = 0; i < 4; i++)
-		buf[i] = voltage_level | pre_emphasis_level | max_level_reached;
-
-	pr_debug("sink: p|v=0x%x\n", voltage_level | pre_emphasis_level);
-	return drm_dp_dpcd_write(ctrl->aux->drm_aux, 0x103, buf, 4);
-}
-
-static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
-{
-	struct dp_link *link = ctrl->link;
-
-	ctrl->catalog->update_vx_px(ctrl->catalog,
-		link->phy_params.v_level, link->phy_params.p_level);
-
-	return dp_ctrl_update_sink_vx_px(ctrl, link->phy_params.v_level,
-		link->phy_params.p_level);
-}
-
-static int dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
-		u8 pattern)
-{
-	u8 buf[4];
-
-	pr_debug("sink: pattern=%x\n", pattern);
-
-	buf[0] = pattern;
-	return drm_dp_dpcd_write(ctrl->aux->drm_aux,
-		DP_TRAINING_PATTERN_SET, buf, 1);
-}
-
-static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
-					u8 *link_status)
-{
-	int ret = 0, len;
-	u32 const offset = DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS;
-	u32 link_status_read_max_retries = 100;
-
-	while (--link_status_read_max_retries) {
-		len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux,
-			link_status);
-		if (len != DP_LINK_STATUS_SIZE) {
-			pr_err("DP link status read failed, err: %d\n", len);
-			ret = len;
-			break;
-		}
-
-		if (!(link_status[offset] & DP_LINK_STATUS_UPDATED))
-			break;
-	}
-
-	return ret;
-}
-
-static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl)
-{
-	int tries, old_v_level, ret = 0;
-	u8 link_status[DP_LINK_STATUS_SIZE];
-	int const maximum_retries = 5;
-
-	ctrl->aux->state &= ~DP_STATE_TRAIN_1_FAILED;
-	ctrl->aux->state &= ~DP_STATE_TRAIN_1_SUCCEEDED;
-	ctrl->aux->state |= DP_STATE_TRAIN_1_STARTED;
-
-	dp_ctrl_state_ctrl(ctrl, 0);
-	/* Make sure to clear the current pattern before starting a new one */
-	wmb();
-
-	ctrl->catalog->set_pattern(ctrl->catalog, 0x01);
-	ret = dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
-		DP_LINK_SCRAMBLING_DISABLE); /* train_1 */
-	if (ret <= 0) {
-		ret = -EINVAL;
-		goto end;
-	}
-
-	ret = dp_ctrl_update_vx_px(ctrl);
-	if (ret <= 0) {
-		ret = -EINVAL;
-		goto end;
-	}
-
-	tries = 0;
-	old_v_level = ctrl->link->phy_params.v_level;
-	while (1) {
-		if (atomic_read(&ctrl->aborted)) {
-			ret = -EINVAL;
-			break;
-		}
-
-		drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
-
-		ret = dp_ctrl_read_link_status(ctrl, link_status);
-		if (ret)
-			break;
-
-		if (drm_dp_clock_recovery_ok(link_status,
-			ctrl->link->link_params.lane_count)) {
-			break;
-		}
-
-		if (ctrl->link->phy_params.v_level == DP_LINK_VOLTAGE_MAX) {
-			pr_err_ratelimited("max v_level reached\n");
-			ret = -EAGAIN;
-			break;
-		}
-
-		if (old_v_level == ctrl->link->phy_params.v_level) {
-			tries++;
-			if (tries >= maximum_retries) {
-				pr_err("max tries reached\n");
-				ret = -ETIMEDOUT;
-				break;
-			}
-		} else {
-			tries = 0;
-			old_v_level = ctrl->link->phy_params.v_level;
-		}
-
-		pr_debug("clock recovery not done, adjusting vx px\n");
-
-		ctrl->link->adjust_levels(ctrl->link, link_status);
-		ret = dp_ctrl_update_vx_px(ctrl);
-		if (ret <= 0) {
-			ret = -EINVAL;
-			break;
-		}
-	}
-end:
-	ctrl->aux->state &= ~DP_STATE_TRAIN_1_STARTED;
-
-	if (ret)
-		ctrl->aux->state |= DP_STATE_TRAIN_1_FAILED;
-	else
-		ctrl->aux->state |= DP_STATE_TRAIN_1_SUCCEEDED;
-
-	return ret;
-}
-
-static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
-{
-	int ret = 0;
-
-	if (!ctrl)
-		return -EINVAL;
-
-	switch (ctrl->link->link_params.bw_code) {
-	case DP_LINK_BW_8_1:
-		ctrl->link->link_params.bw_code = DP_LINK_BW_5_4;
-		break;
-	case DP_LINK_BW_5_4:
-		ctrl->link->link_params.bw_code = DP_LINK_BW_2_7;
-		break;
-	case DP_LINK_BW_2_7:
-	case DP_LINK_BW_1_62:
-	default:
-		ctrl->link->link_params.bw_code = DP_LINK_BW_1_62;
-		break;
-	}
-
-	pr_debug("new bw code=0x%x\n", ctrl->link->link_params.bw_code);
-
-	return ret;
-}
-
-static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
-{
-	dp_ctrl_train_pattern_set(ctrl, 0);
-	drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
-}
-
-static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
-{
-	int tries = 0, ret = 0;
-	char pattern;
-	int const maximum_retries = 5;
-	u8 link_status[DP_LINK_STATUS_SIZE];
-
-	ctrl->aux->state &= ~DP_STATE_TRAIN_2_FAILED;
-	ctrl->aux->state &= ~DP_STATE_TRAIN_2_SUCCEEDED;
-	ctrl->aux->state |= DP_STATE_TRAIN_2_STARTED;
-
-	dp_ctrl_state_ctrl(ctrl, 0);
-	/* Make sure to clear the current pattern before starting a new one */
-	wmb();
-
-	if (drm_dp_tps3_supported(ctrl->panel->dpcd))
-		pattern = DP_TRAINING_PATTERN_3;
-	else
-		pattern = DP_TRAINING_PATTERN_2;
-
-	ret = dp_ctrl_update_vx_px(ctrl);
-	if (ret <= 0) {
-		ret = -EINVAL;
-		goto end;
-	}
-	ctrl->catalog->set_pattern(ctrl->catalog, pattern);
-	ret = dp_ctrl_train_pattern_set(ctrl,
-		pattern | DP_RECOVERED_CLOCK_OUT_EN);
-	if (ret <= 0) {
-		ret = -EINVAL;
-		goto end;
-	}
-
-	do  {
-		if (atomic_read(&ctrl->aborted)) {
-			ret = -EINVAL;
-			break;
-		}
-
-		drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
-
-		ret = dp_ctrl_read_link_status(ctrl, link_status);
-		if (ret)
-			break;
-
-		if (drm_dp_channel_eq_ok(link_status,
-			ctrl->link->link_params.lane_count))
-			break;
-
-		if (tries > maximum_retries) {
-			ret = -ETIMEDOUT;
-			break;
-		}
-		tries++;
-
-		ctrl->link->adjust_levels(ctrl->link, link_status);
-		ret = dp_ctrl_update_vx_px(ctrl);
-		if (ret <= 0) {
-			ret = -EINVAL;
-			break;
-		}
-	} while (1);
-end:
-	ctrl->aux->state &= ~DP_STATE_TRAIN_2_STARTED;
-
-	if (ret)
-		ctrl->aux->state |= DP_STATE_TRAIN_2_FAILED;
-	else
-		ctrl->aux->state |= DP_STATE_TRAIN_2_SUCCEEDED;
-	return ret;
-}
-
-static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl)
-{
-	int ret = 0;
-	u8 encoding = 0x1;
-	struct drm_dp_link link_info = {0};
-
-	ctrl->link->phy_params.p_level = 0;
-	ctrl->link->phy_params.v_level = 0;
-
-	link_info.num_lanes = ctrl->link->link_params.lane_count;
-	link_info.rate = drm_dp_bw_code_to_link_rate(
-		ctrl->link->link_params.bw_code);
-	link_info.capabilities = ctrl->panel->link_info.capabilities;
-
-	ret = drm_dp_link_configure(ctrl->aux->drm_aux, &link_info);
-	if (ret)
-		goto end;
-
-	ret = drm_dp_dpcd_write(ctrl->aux->drm_aux,
-		DP_MAIN_LINK_CHANNEL_CODING_SET, &encoding, 1);
-	if (ret <= 0) {
-		ret = -EINVAL;
-		goto end;
-	}
-
-	ret = dp_ctrl_link_train_1(ctrl);
-	if (ret) {
-		pr_err("link training #1 failed\n");
-		goto end;
-	}
-
-	/* print success info as this is a result of user initiated action */
-	pr_info("link training #1 successful\n");
-
-	ret = dp_ctrl_link_training_2(ctrl);
-	if (ret) {
-		pr_err("link training #2 failed\n");
-		goto end;
-	}
-
-	/* print success info as this is a result of user initiated action */
-	pr_info("link training #2 successful\n");
-
-end:
-	dp_ctrl_state_ctrl(ctrl, 0);
-	/* Make sure to clear the current pattern before starting a new one */
-	wmb();
-
-	dp_ctrl_clear_training_pattern(ctrl);
-	return ret;
-}
-
-static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl)
-{
-	int ret = 0;
-	const unsigned int fec_cfg_dpcd = 0x120;
-
-	if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
-		goto end;
-
-	/*
-	 * As part of previous calls, DP controller state might have
-	 * transitioned to PUSH_IDLE. In order to start transmitting a link
-	 * training pattern, we have to first to a DP software reset.
-	 */
-	ctrl->catalog->reset(ctrl->catalog);
-
-	if (ctrl->fec_mode)
-		drm_dp_dpcd_writeb(ctrl->aux->drm_aux, fec_cfg_dpcd, 0x01);
-
-	ret = dp_ctrl_link_train(ctrl);
-
-end:
-	return ret;
-}
-
-static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl,
-		char *name, enum dp_pm_type clk_type, u32 rate)
-{
-	u32 num = ctrl->parser->mp[clk_type].num_clk;
-	struct dss_clk *cfg = ctrl->parser->mp[clk_type].clk_config;
-
-	while (num && strcmp(cfg->clk_name, name)) {
-		num--;
-		cfg++;
-	}
-
-	pr_debug("setting rate=%d on clk=%s\n", rate, name);
-
-	if (num)
-		cfg->rate = rate;
-	else
-		pr_err("%s clock could not be set with rate %d\n", name, rate);
-}
-
-static int dp_ctrl_enable_link_clock(struct dp_ctrl_private *ctrl)
-{
-	int ret = 0;
-	u32 rate = drm_dp_bw_code_to_link_rate(ctrl->link->link_params.bw_code);
-	enum dp_pm_type type = DP_LINK_PM;
-
-	pr_debug("rate=%d\n", rate);
-
-	dp_ctrl_set_clock_rate(ctrl, "link_clk", type, rate);
-
-	ret = ctrl->power->clk_enable(ctrl->power, type, true);
-	if (ret) {
-		pr_err("Unabled to start link clocks\n");
-		ret = -EINVAL;
-	}
-
-	return ret;
-}
-
-static void dp_ctrl_disable_link_clock(struct dp_ctrl_private *ctrl)
-{
-	ctrl->power->clk_enable(ctrl->power, DP_LINK_PM, false);
-}
-
-static int dp_ctrl_link_setup(struct dp_ctrl_private *ctrl, bool shallow)
-{
-	int rc = -EINVAL;
-	u32 link_train_max_retries = 100;
-	struct dp_catalog_ctrl *catalog;
-	struct dp_link_params *link_params;
-
-	catalog = ctrl->catalog;
-	link_params = &ctrl->link->link_params;
-
-	catalog->phy_lane_cfg(catalog, ctrl->orientation,
-				link_params->lane_count);
-
-	while (1) {
-		pr_debug("bw_code=%d, lane_count=%d\n",
-			link_params->bw_code, link_params->lane_count);
-
-		rc = dp_ctrl_enable_link_clock(ctrl);
-		if (rc)
-			break;
-
-		dp_ctrl_configure_source_link_params(ctrl, true);
-
-		rc = dp_ctrl_setup_main_link(ctrl);
-		if (!rc)
-			break;
-
-		/*
-		 * Shallow means link training failure is not important.
-		 * If it fails, we still keep the link clocks on.
-		 * In this mode, the system expects DP to be up
-		 * even though the cable is removed. Disconnect interrupt
-		 * will eventually trigger and shutdown DP.
-		 */
-		if (shallow) {
-			rc = 0;
-			break;
-		}
-
-		if (!link_train_max_retries-- || atomic_read(&ctrl->aborted))
-			break;
-
-		dp_ctrl_link_rate_down_shift(ctrl);
-
-		dp_ctrl_configure_source_link_params(ctrl, false);
-		dp_ctrl_disable_link_clock(ctrl);
-
-		/* hw recommended delays before retrying link training */
-		msleep(20);
-	}
-
-	return rc;
-}
-
-static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl,
-		struct dp_panel *dp_panel)
-{
-	int ret = 0;
-	u32 pclk;
-	enum dp_pm_type clk_type;
-	char clk_name[32] = "";
-
-	ret = ctrl->power->set_pixel_clk_parent(ctrl->power,
-			dp_panel->stream_id);
-
-	if (ret)
-		return ret;
-
-	if (dp_panel->stream_id == DP_STREAM_0) {
-		clk_type = DP_STREAM0_PM;
-		strlcpy(clk_name, "strm0_pixel_clk", 32);
-	} else if (dp_panel->stream_id == DP_STREAM_1) {
-		clk_type = DP_STREAM1_PM;
-		strlcpy(clk_name, "strm1_pixel_clk", 32);
-	} else {
-		pr_err("Invalid stream:%d for clk enable\n",
-				dp_panel->stream_id);
-		return -EINVAL;
-	}
-
-	pclk = dp_panel->pinfo.widebus_en ?
-		(dp_panel->pinfo.pixel_clk_khz >> 1) :
-		(dp_panel->pinfo.pixel_clk_khz);
-
-	dp_ctrl_set_clock_rate(ctrl, clk_name, clk_type, pclk);
-
-	ret = ctrl->power->clk_enable(ctrl->power, clk_type, true);
-	if (ret) {
-		pr_err("Unabled to start stream:%d clocks\n",
-				dp_panel->stream_id);
-		ret = -EINVAL;
-	}
-
-	return ret;
-}
-
-static int dp_ctrl_disable_stream_clocks(struct dp_ctrl_private *ctrl,
-		struct dp_panel *dp_panel)
-{
-	int ret = 0;
-
-	if (dp_panel->stream_id == DP_STREAM_0) {
-		return ctrl->power->clk_enable(ctrl->power,
-				DP_STREAM0_PM, false);
-	} else if (dp_panel->stream_id == DP_STREAM_1) {
-		return ctrl->power->clk_enable(ctrl->power,
-				DP_STREAM1_PM, false);
-	} else {
-		pr_err("Invalid stream:%d for clk disable\n",
-				dp_panel->stream_id);
-		ret = -EINVAL;
-	}
-	return ret;
-}
-static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset)
-{
-	struct dp_ctrl_private *ctrl;
-	struct dp_catalog_ctrl *catalog;
-
-	if (!dp_ctrl) {
-		pr_err("Invalid input data\n");
-		return -EINVAL;
-	}
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	ctrl->orientation = flip;
-	catalog = ctrl->catalog;
-
-	if (reset) {
-		catalog->usb_reset(ctrl->catalog, flip);
-		catalog->phy_reset(ctrl->catalog);
-	}
-	catalog->enable_irq(ctrl->catalog, true);
-	atomic_set(&ctrl->aborted, 0);
-
-	return 0;
-}
-
-/**
- * dp_ctrl_host_deinit() - Uninitialize DP controller
- * @ctrl: Display Port Driver data
- *
- * Perform required steps to uninitialize DP controller
- * and its resources.
- */
-static void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
-{
-	struct dp_ctrl_private *ctrl;
-
-	if (!dp_ctrl) {
-		pr_err("Invalid input data\n");
-		return;
-	}
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	ctrl->catalog->enable_irq(ctrl->catalog, false);
-
-	pr_debug("Host deinitialized successfully\n");
-}
-
-static void dp_ctrl_send_video(struct dp_ctrl_private *ctrl)
-{
-	ctrl->catalog->state_ctrl(ctrl->catalog, ST_SEND_VIDEO);
-}
-
-static int dp_ctrl_link_maintenance(struct dp_ctrl *dp_ctrl)
-{
-	int ret = 0;
-	struct dp_ctrl_private *ctrl;
-
-	if (!dp_ctrl) {
-		pr_err("Invalid input data\n");
-		return -EINVAL;
-	}
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_COMPLETED;
-	ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_FAILED;
-
-	if (!ctrl->power_on) {
-		pr_err("ctrl off\n");
-		ret = -EINVAL;
-		goto end;
-	}
-
-	if (atomic_read(&ctrl->aborted))
-		goto end;
-
-	ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_STARTED;
-	ret = dp_ctrl_setup_main_link(ctrl);
-	ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_STARTED;
-
-	if (ret) {
-		ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_FAILED;
-		goto end;
-	}
-
-	ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_COMPLETED;
-
-	if (ctrl->stream_count) {
-		dp_ctrl_send_video(ctrl);
-		dp_ctrl_wait4video_ready(ctrl);
-	}
-end:
-	return ret;
-}
-
-static void dp_ctrl_process_phy_test_request(struct dp_ctrl *dp_ctrl)
-{
-	int ret = 0;
-	struct dp_ctrl_private *ctrl;
-
-	if (!dp_ctrl) {
-		pr_err("Invalid input data\n");
-		return;
-	}
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	if (!ctrl->link->phy_params.phy_test_pattern_sel) {
-		pr_debug("no test pattern selected by sink\n");
-		return;
-	}
-
-	pr_debug("start\n");
-
-	/*
-	 * The global reset will need DP link ralated clocks to be
-	 * running. Add the global reset just before disabling the
-	 * link clocks and core clocks.
-	 */
-	ctrl->catalog->reset(ctrl->catalog);
-	ctrl->dp_ctrl.stream_pre_off(&ctrl->dp_ctrl, ctrl->panel);
-	ctrl->dp_ctrl.stream_off(&ctrl->dp_ctrl, ctrl->panel);
-	ctrl->dp_ctrl.off(&ctrl->dp_ctrl);
-
-	ctrl->aux->init(ctrl->aux, ctrl->parser->aux_cfg);
-
-	ret = ctrl->dp_ctrl.on(&ctrl->dp_ctrl, ctrl->mst_mode,
-					ctrl->fec_mode, false);
-	if (ret)
-		pr_err("failed to enable DP controller\n");
-
-	ctrl->dp_ctrl.stream_on(&ctrl->dp_ctrl, ctrl->panel);
-	pr_debug("end\n");
-}
-
-static void dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
-{
-	bool success = false;
-	u32 pattern_sent = 0x0;
-	u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel;
-
-	dp_ctrl_update_vx_px(ctrl);
-	ctrl->catalog->send_phy_pattern(ctrl->catalog, pattern_requested);
-	ctrl->link->send_test_response(ctrl->link);
-
-	pattern_sent = ctrl->catalog->read_phy_pattern(ctrl->catalog);
-	pr_debug("pattern_request: %s. pattern_sent: 0x%x\n",
-			dp_link_get_phy_test_pattern(pattern_requested),
-			pattern_sent);
-
-	switch (pattern_sent) {
-	case MR_LINK_TRAINING1:
-		if (pattern_requested ==
-				DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING)
-			success = true;
-		break;
-	case MR_LINK_SYMBOL_ERM:
-		if ((pattern_requested ==
-				DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT)
-			|| (pattern_requested ==
-				DP_TEST_PHY_PATTERN_CP2520_PATTERN_1))
-			success = true;
-		break;
-	case MR_LINK_PRBS7:
-		if (pattern_requested == DP_TEST_PHY_PATTERN_PRBS7)
-			success = true;
-		break;
-	case MR_LINK_CUSTOM80:
-		if (pattern_requested ==
-				DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN)
-			success = true;
-		break;
-	case MR_LINK_TRAINING4:
-		if (pattern_requested ==
-				DP_TEST_PHY_PATTERN_CP2520_PATTERN_3)
-			success = true;
-		break;
-	default:
-		success = false;
-		break;
-	}
-
-	pr_debug("%s: %s\n", success ? "success" : "failed",
-			dp_link_get_phy_test_pattern(pattern_requested));
-}
-
-static void dp_ctrl_mst_calculate_rg(struct dp_ctrl_private *ctrl,
-		struct dp_panel *panel, u32 *p_x_int, u32 *p_y_frac_enum)
-{
-	u64 min_slot_cnt, max_slot_cnt;
-	u64 raw_target_sc, target_sc_fixp;
-	u64 ts_denom, ts_enum, ts_int;
-	u64 pclk = panel->pinfo.pixel_clk_khz;
-	u64 lclk = panel->link_info.rate;
-	u64 lanes = panel->link_info.num_lanes;
-	u64 bpp = panel->pinfo.bpp;
-	u64 pbn = panel->pbn;
-	u64 numerator, denominator, temp, temp1, temp2;
-	u32 x_int = 0, y_frac_enum = 0;
-	u64 target_strm_sym, ts_int_fixp, ts_frac_fixp, y_frac_enum_fixp;
-
-	if (panel->pinfo.comp_info.comp_ratio)
-		bpp = panel->pinfo.comp_info.dsc_info.bpp;
-
-	/* min_slot_cnt */
-	numerator = pclk * bpp * 64 * 1000;
-	denominator = lclk * lanes * 8 * 1000;
-	min_slot_cnt = drm_fixp_from_fraction(numerator, denominator);
-
-	/* max_slot_cnt */
-	numerator = pbn * 54 * 1000;
-	denominator = lclk * lanes;
-	max_slot_cnt = drm_fixp_from_fraction(numerator, denominator);
-
-	/* raw_target_sc */
-	numerator = max_slot_cnt + min_slot_cnt;
-	denominator = drm_fixp_from_fraction(2, 1);
-	raw_target_sc = drm_fixp_div(numerator, denominator);
-
-	pr_debug("raw_target_sc before overhead:0x%llx\n", raw_target_sc);
-	pr_debug("dsc_overhead_fp:0x%llx\n", panel->pinfo.dsc_overhead_fp);
-
-	/* apply fec and dsc overhead factor */
-	if (panel->pinfo.dsc_overhead_fp)
-		raw_target_sc = drm_fixp_mul(raw_target_sc,
-					panel->pinfo.dsc_overhead_fp);
-
-	if (panel->fec_overhead_fp)
-		raw_target_sc = drm_fixp_mul(raw_target_sc,
-					panel->fec_overhead_fp);
-
-	pr_debug("raw_target_sc after overhead:0x%llx\n", raw_target_sc);
-
-	/* target_sc */
-	temp = drm_fixp_from_fraction(256 * lanes, 1);
-	numerator = drm_fixp_mul(raw_target_sc, temp);
-	denominator = drm_fixp_from_fraction(256 * lanes, 1);
-	target_sc_fixp = drm_fixp_div(numerator, denominator);
-
-	ts_enum = 256 * lanes;
-	ts_denom = drm_fixp_from_fraction(256 * lanes, 1);
-	ts_int = drm_fixp2int(target_sc_fixp);
-
-	temp = drm_fixp2int_ceil(raw_target_sc);
-	if (temp != ts_int) {
-		temp = drm_fixp_from_fraction(ts_int, 1);
-		temp1 = raw_target_sc - temp;
-		temp2 = drm_fixp_mul(temp1, ts_denom);
-		ts_enum = drm_fixp2int(temp2);
-	}
-
-	/* target_strm_sym */
-	ts_int_fixp = drm_fixp_from_fraction(ts_int, 1);
-	ts_frac_fixp = drm_fixp_from_fraction(ts_enum, drm_fixp2int(ts_denom));
-	temp = ts_int_fixp + ts_frac_fixp;
-	temp1 = drm_fixp_from_fraction(lanes, 1);
-	target_strm_sym = drm_fixp_mul(temp, temp1);
-
-	/* x_int */
-	x_int = drm_fixp2int(target_strm_sym);
-
-	/* y_enum_frac */
-	temp = drm_fixp_from_fraction(x_int, 1);
-	temp1 = target_strm_sym - temp;
-	temp2 = drm_fixp_from_fraction(256, 1);
-	y_frac_enum_fixp = drm_fixp_mul(temp1, temp2);
-
-	temp1 = drm_fixp2int(y_frac_enum_fixp);
-	temp2 = drm_fixp2int_ceil(y_frac_enum_fixp);
-
-	y_frac_enum = (u32)((temp1 == temp2) ? temp1 : temp1 + 1);
-
-	panel->mst_target_sc = raw_target_sc;
-	*p_x_int = x_int;
-	*p_y_frac_enum = y_frac_enum;
-
-	pr_debug("x_int: %d, y_frac_enum: %d\n", x_int, y_frac_enum);
-}
-
-static int dp_ctrl_mst_send_act(struct dp_ctrl_private *ctrl)
-{
-	bool act_complete;
-
-	if (!ctrl->mst_mode)
-		return 0;
-
-	ctrl->catalog->trigger_act(ctrl->catalog);
-	msleep(20); /* needs 1 frame time */
-
-	ctrl->catalog->read_act_complete_sts(ctrl->catalog, &act_complete);
-
-	if (!act_complete)
-		pr_err("mst act trigger complete failed\n");
-	else
-		DP_MST_DEBUG("mst ACT trigger complete SUCCESS\n");
-
-	return 0;
-}
-
-static void dp_ctrl_mst_stream_setup(struct dp_ctrl_private *ctrl,
-		struct dp_panel *panel)
-{
-	u32 x_int, y_frac_enum, lanes, bw_code;
-	int i;
-
-	if (!ctrl->mst_mode)
-		return;
-
-	DP_MST_DEBUG("mst stream channel allocation\n");
-
-	for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
-		ctrl->catalog->channel_alloc(ctrl->catalog,
-				i,
-				ctrl->mst_ch_info.slot_info[i].start_slot,
-				ctrl->mst_ch_info.slot_info[i].tot_slots);
-	}
-
-	lanes = ctrl->link->link_params.lane_count;
-	bw_code = ctrl->link->link_params.bw_code;
-
-	dp_ctrl_mst_calculate_rg(ctrl, panel, &x_int, &y_frac_enum);
-
-	ctrl->catalog->update_rg(ctrl->catalog, panel->stream_id,
-			x_int, y_frac_enum);
-
-	DP_MST_DEBUG("mst stream:%d, start_slot:%d, tot_slots:%d\n",
-			panel->stream_id,
-			panel->channel_start_slot, panel->channel_total_slots);
-
-	DP_MST_DEBUG("mst lane_cnt:%d, bw:%d, x_int:%d, y_frac:%d\n",
-			lanes, bw_code, x_int, y_frac_enum);
-}
-
-static void dp_ctrl_fec_dsc_setup(struct dp_ctrl_private *ctrl)
-{
-	u8 fec_sts = 0;
-	int rlen;
-	u32 dsc_enable;
-	const unsigned int fec_sts_dpcd = 0x280;
-
-	if (ctrl->stream_count || !ctrl->fec_mode)
-		return;
-
-	ctrl->catalog->fec_config(ctrl->catalog, ctrl->fec_mode);
-
-	/* wait for controller to start fec sequence */
-	usleep_range(900, 1000);
-	drm_dp_dpcd_readb(ctrl->aux->drm_aux, fec_sts_dpcd, &fec_sts);
-	pr_debug("sink fec status:%d\n", fec_sts);
-
-	dsc_enable = ctrl->fec_mode ? 1 : 0;
-	rlen = drm_dp_dpcd_writeb(ctrl->aux->drm_aux, DP_DSC_ENABLE,
-			dsc_enable);
-	if (rlen < 1)
-		pr_debug("failed to enable sink dsc\n");
-}
-
-static int dp_ctrl_stream_on(struct dp_ctrl *dp_ctrl, struct dp_panel *panel)
-{
-	int rc = 0;
-	bool link_ready = false;
-	struct dp_ctrl_private *ctrl;
-
-	if (!dp_ctrl || !panel)
-		return -EINVAL;
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	rc = dp_ctrl_enable_stream_clocks(ctrl, panel);
-	if (rc) {
-		pr_err("failure on stream clock enable\n");
-		return rc;
-	}
-
-	rc = panel->hw_cfg(panel, true);
-	if (rc)
-		return rc;
-
-	if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
-		dp_ctrl_send_phy_test_pattern(ctrl);
-		return 0;
-	}
-
-	dp_ctrl_mst_stream_setup(ctrl, panel);
-
-	dp_ctrl_send_video(ctrl);
-
-	dp_ctrl_mst_send_act(ctrl);
-
-	dp_ctrl_wait4video_ready(ctrl);
-
-	dp_ctrl_fec_dsc_setup(ctrl);
-
-	ctrl->stream_count++;
-
-	link_ready = ctrl->catalog->mainlink_ready(ctrl->catalog);
-	pr_debug("mainlink %s\n", link_ready ? "READY" : "NOT READY");
-
-	return rc;
-}
-
-static void dp_ctrl_mst_stream_pre_off(struct dp_ctrl *dp_ctrl,
-		struct dp_panel *panel)
-{
-	struct dp_ctrl_private *ctrl;
-	bool act_complete;
-	int i;
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	if (!ctrl->mst_mode)
-		return;
-
-	for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
-		ctrl->catalog->channel_alloc(ctrl->catalog,
-				i,
-				ctrl->mst_ch_info.slot_info[i].start_slot,
-				ctrl->mst_ch_info.slot_info[i].tot_slots);
-	}
-
-	ctrl->catalog->trigger_act(ctrl->catalog);
-	msleep(20); /* needs 1 frame time */
-	ctrl->catalog->read_act_complete_sts(ctrl->catalog, &act_complete);
-
-	if (!act_complete)
-		pr_err("mst stream_off act trigger complete failed\n");
-	else
-		DP_MST_DEBUG("mst stream_off ACT trigger complete SUCCESS\n");
-}
-
-static void dp_ctrl_stream_pre_off(struct dp_ctrl *dp_ctrl,
-		struct dp_panel *panel)
-{
-	struct dp_ctrl_private *ctrl;
-
-	if (!dp_ctrl || !panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	dp_ctrl_push_idle(ctrl, panel->stream_id);
-
-	dp_ctrl_mst_stream_pre_off(dp_ctrl, panel);
-}
-
-static void dp_ctrl_stream_off(struct dp_ctrl *dp_ctrl, struct dp_panel *panel)
-{
-	struct dp_ctrl_private *ctrl;
-
-	if (!dp_ctrl || !panel)
-		return;
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	if (!ctrl->power_on)
-		return;
-
-	panel->hw_cfg(panel, false);
-
-	dp_ctrl_disable_stream_clocks(ctrl, panel);
-	ctrl->stream_count--;
-}
-
-static int dp_ctrl_on(struct dp_ctrl *dp_ctrl, bool mst_mode,
-				bool fec_mode, bool shallow)
-{
-	int rc = 0;
-	struct dp_ctrl_private *ctrl;
-	u32 rate = 0;
-
-	if (!dp_ctrl) {
-		rc = -EINVAL;
-		goto end;
-	}
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	if (ctrl->power_on)
-		goto end;
-
-	ctrl->mst_mode = mst_mode;
-	ctrl->fec_mode = fec_mode;
-	rate = ctrl->panel->link_info.rate;
-
-	if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
-		pr_debug("using phy test link parameters\n");
-	} else {
-		ctrl->link->link_params.bw_code =
-			drm_dp_link_rate_to_bw_code(rate);
-		ctrl->link->link_params.lane_count =
-			ctrl->panel->link_info.num_lanes;
-	}
-
-	pr_debug("bw_code=%d, lane_count=%d\n",
-		ctrl->link->link_params.bw_code,
-		ctrl->link->link_params.lane_count);
-
-	rc = dp_ctrl_link_setup(ctrl, shallow);
-	ctrl->power_on = true;
-end:
-	return rc;
-}
-
-static void dp_ctrl_off(struct dp_ctrl *dp_ctrl)
-{
-	struct dp_ctrl_private *ctrl;
-
-	if (!dp_ctrl)
-		return;
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	if (!ctrl->power_on)
-		return;
-
-	dp_ctrl_configure_source_link_params(ctrl, false);
-	ctrl->catalog->reset(ctrl->catalog);
-
-	/* Make sure DP is disabled before clk disable */
-	wmb();
-
-	dp_ctrl_disable_link_clock(ctrl);
-
-	ctrl->mst_mode = false;
-	ctrl->fec_mode = false;
-	ctrl->power_on = false;
-	memset(&ctrl->mst_ch_info, 0, sizeof(ctrl->mst_ch_info));
-	pr_debug("DP off done\n");
-}
-
-static void dp_ctrl_set_mst_channel_info(struct dp_ctrl *dp_ctrl,
-		enum dp_stream_id strm,
-		u32 start_slot, u32 tot_slots)
-{
-	struct dp_ctrl_private *ctrl;
-
-	if (!dp_ctrl || strm >= DP_STREAM_MAX) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	ctrl->mst_ch_info.slot_info[strm].start_slot = start_slot;
-	ctrl->mst_ch_info.slot_info[strm].tot_slots = tot_slots;
-}
-
-static void dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
-{
-	struct dp_ctrl_private *ctrl;
-
-	if (!dp_ctrl)
-		return;
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	ctrl->catalog->get_interrupt(ctrl->catalog);
-
-	if (ctrl->catalog->isr & DP_CTRL_INTR_READY_FOR_VIDEO)
-		dp_ctrl_video_ready(ctrl);
-
-	if (ctrl->catalog->isr & DP_CTRL_INTR_IDLE_PATTERN_SENT)
-		dp_ctrl_idle_patterns_sent(ctrl);
-
-	if (ctrl->catalog->isr5 & DP_CTRL_INTR_MST_DP0_VCPF_SENT)
-		dp_ctrl_idle_patterns_sent(ctrl);
-
-	if (ctrl->catalog->isr5 & DP_CTRL_INTR_MST_DP1_VCPF_SENT)
-		dp_ctrl_idle_patterns_sent(ctrl);
-}
-
-struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in)
-{
-	int rc = 0;
-	struct dp_ctrl_private *ctrl;
-	struct dp_ctrl *dp_ctrl;
-
-	if (!in->dev || !in->panel || !in->aux ||
-	    !in->link || !in->catalog) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	ctrl = devm_kzalloc(in->dev, sizeof(*ctrl), GFP_KERNEL);
-	if (!ctrl) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	init_completion(&ctrl->idle_comp);
-	init_completion(&ctrl->video_comp);
-
-	/* in parameters */
-	ctrl->parser   = in->parser;
-	ctrl->panel    = in->panel;
-	ctrl->power    = in->power;
-	ctrl->aux      = in->aux;
-	ctrl->link     = in->link;
-	ctrl->catalog  = in->catalog;
-	ctrl->dev  = in->dev;
-	ctrl->mst_mode = false;
-	ctrl->fec_mode = false;
-
-	dp_ctrl = &ctrl->dp_ctrl;
-
-	/* out parameters */
-	dp_ctrl->init      = dp_ctrl_host_init;
-	dp_ctrl->deinit    = dp_ctrl_host_deinit;
-	dp_ctrl->on        = dp_ctrl_on;
-	dp_ctrl->off       = dp_ctrl_off;
-	dp_ctrl->abort     = dp_ctrl_abort;
-	dp_ctrl->isr       = dp_ctrl_isr;
-	dp_ctrl->link_maintenance = dp_ctrl_link_maintenance;
-	dp_ctrl->process_phy_test_request = dp_ctrl_process_phy_test_request;
-	dp_ctrl->stream_on = dp_ctrl_stream_on;
-	dp_ctrl->stream_off = dp_ctrl_stream_off;
-	dp_ctrl->stream_pre_off = dp_ctrl_stream_pre_off;
-	dp_ctrl->set_mst_channel_info = dp_ctrl_set_mst_channel_info;
-
-	return dp_ctrl;
-error:
-	return ERR_PTR(rc);
-}
-
-void dp_ctrl_put(struct dp_ctrl *dp_ctrl)
-{
-	struct dp_ctrl_private *ctrl;
-
-	if (!dp_ctrl)
-		return;
-
-	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
-	devm_kfree(ctrl->dev, ctrl);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
deleted file mode 100644
index 0f855ef..0000000
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_CTRL_H_
-#define _DP_CTRL_H_
-
-#include "dp_aux.h"
-#include "dp_panel.h"
-#include "dp_link.h"
-#include "dp_parser.h"
-#include "dp_power.h"
-#include "dp_catalog.h"
-
-struct dp_ctrl {
-	int (*init)(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
-	void (*deinit)(struct dp_ctrl *dp_ctrl);
-	int (*on)(struct dp_ctrl *dp_ctrl, bool mst_mode, bool fec_en,
-			bool shallow);
-	void (*off)(struct dp_ctrl *dp_ctrl);
-	void (*abort)(struct dp_ctrl *dp_ctrl);
-	void (*isr)(struct dp_ctrl *dp_ctrl);
-	bool (*handle_sink_request)(struct dp_ctrl *dp_ctrl);
-	void (*process_phy_test_request)(struct dp_ctrl *dp_ctrl);
-	int (*link_maintenance)(struct dp_ctrl *dp_ctrl);
-	int (*stream_on)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel);
-	void (*stream_off)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel);
-	void (*stream_pre_off)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel);
-	void (*set_mst_channel_info)(struct dp_ctrl *dp_ctrl,
-			enum dp_stream_id strm,
-			u32 ch_start_slot, u32 ch_tot_slots);
-};
-
-struct dp_ctrl_in {
-	struct device *dev;
-	struct dp_panel *panel;
-	struct dp_aux *aux;
-	struct dp_link *link;
-	struct dp_parser *parser;
-	struct dp_power *power;
-	struct dp_catalog_ctrl *catalog;
-};
-
-struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in);
-void dp_ctrl_put(struct dp_ctrl *dp_ctrl);
-
-#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
deleted file mode 100644
index e581303..0000000
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ /dev/null
@@ -1,2101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-
-#include "dp_power.h"
-#include "dp_catalog.h"
-#include "dp_aux.h"
-#include "dp_debug.h"
-#include "drm_connector.h"
-#include "sde_connector.h"
-#include "dp_display.h"
-
-#define DEBUG_NAME "drm_dp"
-
-struct dp_debug_private {
-	struct dentry *root;
-	u8 *edid;
-	u32 edid_size;
-
-	u8 *dpcd;
-	u32 dpcd_size;
-
-	u32 mst_con_id;
-
-	char exe_mode[SZ_32];
-	char reg_dump[SZ_32];
-
-	struct dp_hpd *hpd;
-	struct dp_link *link;
-	struct dp_panel *panel;
-	struct dp_aux *aux;
-	struct dp_catalog *catalog;
-	struct drm_connector **connector;
-	struct device *dev;
-	struct dp_debug dp_debug;
-	struct dp_parser *parser;
-	struct dp_ctrl *ctrl;
-	struct mutex lock;
-};
-
-static int dp_debug_get_edid_buf(struct dp_debug_private *debug)
-{
-	int rc = 0;
-
-	if (!debug->edid) {
-		debug->edid = devm_kzalloc(debug->dev, SZ_256, GFP_KERNEL);
-		if (!debug->edid) {
-			rc = -ENOMEM;
-			goto end;
-		}
-
-		debug->edid_size = SZ_256;
-	}
-end:
-	return rc;
-}
-
-static int dp_debug_get_dpcd_buf(struct dp_debug_private *debug)
-{
-	int rc = 0;
-
-	if (!debug->dpcd) {
-		debug->dpcd = devm_kzalloc(debug->dev, SZ_4K, GFP_KERNEL);
-		if (!debug->dpcd) {
-			rc = -ENOMEM;
-			goto end;
-		}
-
-		debug->dpcd_size = SZ_4K;
-	}
-end:
-	return rc;
-}
-
-static ssize_t dp_debug_write_edid(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	u8 *buf = NULL, *buf_t = NULL, *edid = NULL;
-	const int char_to_nib = 2;
-	size_t edid_size = 0;
-	size_t size = 0, edid_buf_index = 0;
-	ssize_t rc = count;
-
-	if (!debug)
-		return -ENODEV;
-
-	mutex_lock(&debug->lock);
-
-	if (*ppos)
-		goto bail;
-
-	size = min_t(size_t, count, SZ_1K);
-
-	buf = kzalloc(size, GFP_KERNEL);
-	if (ZERO_OR_NULL_PTR(buf)) {
-		rc = -ENOMEM;
-		goto bail;
-	}
-
-	if (copy_from_user(buf, user_buff, size))
-		goto bail;
-
-	edid_size = size / char_to_nib;
-	buf_t = buf;
-
-	if (dp_debug_get_edid_buf(debug))
-		goto bail;
-
-	if (edid_size != debug->edid_size) {
-		pr_debug("realloc debug edid\n");
-
-		if (debug->edid) {
-			devm_kfree(debug->dev, debug->edid);
-
-			debug->edid = devm_kzalloc(debug->dev,
-						edid_size, GFP_KERNEL);
-			if (!debug->edid) {
-				rc = -ENOMEM;
-				goto bail;
-			}
-
-			debug->edid_size = edid_size;
-
-			debug->aux->set_sim_mode(debug->aux,
-					debug->dp_debug.sim_mode,
-					debug->edid, debug->dpcd);
-		}
-	}
-
-	while (edid_size--) {
-		char t[3];
-		int d;
-
-		memcpy(t, buf_t, sizeof(char) * char_to_nib);
-		t[char_to_nib] = '\0';
-
-		if (kstrtoint(t, 16, &d)) {
-			pr_err("kstrtoint error\n");
-			goto bail;
-		}
-
-		if (debug->edid && (edid_buf_index < debug->edid_size))
-			debug->edid[edid_buf_index++] = d;
-
-		buf_t += char_to_nib;
-	}
-
-	edid = debug->edid;
-bail:
-	kfree(buf);
-	debug->panel->set_edid(debug->panel, edid);
-
-	/*
-	 * print edid status as this code is executed
-	 * only while running in debug mode which is manually
-	 * triggered by a tester or a script.
-	 */
-	pr_info("[%s]\n", edid ? "SET" : "CLEAR");
-
-	mutex_unlock(&debug->lock);
-	return rc;
-}
-
-static ssize_t dp_debug_write_dpcd(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	u8 *buf = NULL, *buf_t = NULL, *dpcd = NULL;
-	const int char_to_nib = 2;
-	size_t dpcd_size = 0;
-	size_t size = 0, dpcd_buf_index = 0;
-	ssize_t rc = count;
-	char offset_ch[5];
-	u32 offset, data_len;
-	const u32 dp_receiver_cap_size = 16;
-
-	if (!debug)
-		return -ENODEV;
-
-	mutex_lock(&debug->lock);
-
-	if (*ppos)
-		goto bail;
-
-	size = min_t(size_t, count, SZ_2K);
-
-	if (size <= char_to_nib)
-		goto bail;
-
-	buf = kzalloc(size, GFP_KERNEL);
-	if (ZERO_OR_NULL_PTR(buf)) {
-		rc = -ENOMEM;
-		goto bail;
-	}
-
-	if (copy_from_user(buf, user_buff, size))
-		goto bail;
-
-	memcpy(offset_ch, buf, 4);
-	offset_ch[4] = '\0';
-
-	if (kstrtoint(offset_ch, 16, &offset)) {
-		pr_err("offset kstrtoint error\n");
-		goto bail;
-	}
-
-	if (dp_debug_get_dpcd_buf(debug))
-		goto bail;
-
-	if (offset == 0xFFFF) {
-		pr_err("clearing dpcd\n");
-		memset(debug->dpcd, 0, debug->dpcd_size);
-		goto bail;
-	}
-
-	size -= 4;
-
-	dpcd_size = size / char_to_nib;
-	data_len = dpcd_size;
-	buf_t = buf + 4;
-
-	dpcd_buf_index = offset;
-
-	while (dpcd_size--) {
-		char t[3];
-		int d;
-
-		memcpy(t, buf_t, sizeof(char) * char_to_nib);
-		t[char_to_nib] = '\0';
-
-		if (kstrtoint(t, 16, &d)) {
-			pr_err("kstrtoint error\n");
-			goto bail;
-		}
-
-		if (dpcd_buf_index < debug->dpcd_size)
-			debug->dpcd[dpcd_buf_index++] = d;
-
-		buf_t += char_to_nib;
-	}
-
-	dpcd = debug->dpcd;
-bail:
-	kfree(buf);
-
-	/*
-	 * Reset panel's dpcd in case of any failure. Also, set the
-	 * panel's dpcd only if a full dpcd is provided with offset as 0.
-	 */
-	if (!dpcd || (!offset && (data_len == dp_receiver_cap_size))) {
-		debug->panel->set_dpcd(debug->panel, dpcd);
-
-		/*
-		 * print dpcd status as this code is executed
-		 * only while running in debug mode which is manually
-		 * triggered by a tester or a script.
-		 */
-		pr_info("[%s]\n", dpcd ? "SET" : "CLEAR");
-	} else
-		debug->aux->dpcd_updated(debug->aux);
-
-	mutex_unlock(&debug->lock);
-	return rc;
-}
-
-static ssize_t dp_debug_read_dpcd(struct file *file,
-		char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char *buf;
-	int const buf_size = SZ_4K;
-	u32 offset = 0;
-	u32 len = 0;
-
-	if (!debug || !debug->aux || !debug->dpcd)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	buf = kzalloc(buf_size, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	len += snprintf(buf, buf_size, "0x%x", debug->aux->reg);
-
-	if (!debug->aux->read) {
-		while (1) {
-			if (debug->aux->reg + offset >= buf_size ||
-			    offset >= debug->aux->size)
-				break;
-
-			len += snprintf(buf + len, buf_size - len, "0x%x",
-				debug->dpcd[debug->aux->reg + offset++]);
-		}
-
-		if (debug->dp_debug.sim_mode && debug->aux->dpcd_updated)
-			debug->aux->dpcd_updated(debug->aux);
-	}
-
-	if (!copy_to_user(user_buff, buf, len))
-		*ppos += len;
-
-	kfree(buf);
-	return len;
-}
-
-static ssize_t dp_debug_write_hpd(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_8];
-	size_t len = 0;
-	int const hpd_data_mask = 0x7;
-	int hpd = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	/* Leave room for termination char */
-	len = min_t(size_t, count, SZ_8 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		goto end;
-
-	buf[len] = '\0';
-
-	if (kstrtoint(buf, 10, &hpd) != 0)
-		goto end;
-
-	hpd &= hpd_data_mask;
-
-	debug->dp_debug.psm_enabled = !!(hpd & BIT(1));
-
-	debug->hpd->simulate_connect(debug->hpd, !!(hpd & BIT(0)));
-end:
-	return len;
-}
-
-static ssize_t dp_debug_write_edid_modes(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_32];
-	size_t len = 0;
-	int hdisplay = 0, vdisplay = 0, vrefresh = 0, aspect_ratio;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		goto end;
-
-	/* Leave room for termination char */
-	len = min_t(size_t, count, SZ_32 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		goto clear;
-
-	buf[len] = '\0';
-
-	if (sscanf(buf, "%d %d %d %d", &hdisplay, &vdisplay, &vrefresh,
-				&aspect_ratio) != 4)
-		goto clear;
-
-	if (!hdisplay || !vdisplay || !vrefresh)
-		goto clear;
-
-	debug->dp_debug.debug_en = true;
-	debug->dp_debug.hdisplay = hdisplay;
-	debug->dp_debug.vdisplay = vdisplay;
-	debug->dp_debug.vrefresh = vrefresh;
-	debug->dp_debug.aspect_ratio = aspect_ratio;
-	goto end;
-clear:
-	pr_debug("clearing debug modes\n");
-	debug->dp_debug.debug_en = false;
-end:
-	return len;
-}
-
-static ssize_t dp_debug_write_edid_modes_mst(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	struct dp_mst_connector *mst_connector;
-	char buf[SZ_512];
-	char *read_buf;
-	size_t len = 0;
-
-	int hdisplay = 0, vdisplay = 0, vrefresh = 0, aspect_ratio = 0;
-	int con_id = 0, offset = 0, debug_en = 0;
-	bool in_list = false;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		goto end;
-
-	len = min_t(size_t, count, SZ_512 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		goto end;
-
-	buf[len] = '\0';
-	read_buf = buf;
-
-	mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock);
-	while (sscanf(read_buf, "%d %d %d %d %d %d%n", &debug_en, &con_id,
-			&hdisplay, &vdisplay, &vrefresh, &aspect_ratio,
-			&offset) == 6) {
-		list_for_each_entry(mst_connector,
-				&debug->dp_debug.dp_mst_connector_list.list,
-				list) {
-			if (mst_connector->con_id == con_id) {
-				in_list = true;
-				mst_connector->debug_en = (bool) debug_en;
-				mst_connector->hdisplay = hdisplay;
-				mst_connector->vdisplay = vdisplay;
-				mst_connector->vrefresh = vrefresh;
-				mst_connector->aspect_ratio = aspect_ratio;
-			}
-		}
-
-		if (!in_list)
-			pr_debug("dp connector id %d is invalid\n", con_id);
-
-		in_list = false;
-		read_buf += offset;
-	}
-	mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock);
-end:
-	return len;
-}
-
-static ssize_t dp_debug_write_mst_con_id(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	struct dp_mst_connector *mst_connector;
-	char buf[SZ_32];
-	size_t len = 0;
-	int con_id = 0, status;
-	bool in_list = false;
-	const int dp_en = BIT(3), hpd_high = BIT(7), hpd_irq = BIT(8);
-	int vdo = dp_en | hpd_high | hpd_irq;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		goto end;
-
-	/* Leave room for termination char */
-	len = min_t(size_t, count, SZ_32 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		goto clear;
-
-	buf[len] = '\0';
-
-	if (sscanf(buf, "%d %d", &con_id, &status) != 2) {
-		len = 0;
-		goto end;
-	}
-
-	if (!con_id)
-		goto clear;
-
-	/* Verify that the connector id is for a valid mst connector. */
-	mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock);
-	list_for_each_entry(mst_connector,
-			&debug->dp_debug.dp_mst_connector_list.list, list) {
-		if (mst_connector->con_id == con_id) {
-			in_list = true;
-			debug->mst_con_id = con_id;
-			mst_connector->state = status;
-			break;
-		}
-	}
-	mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock);
-
-	if (!in_list)
-		pr_err("invalid connector id %u\n", con_id);
-	else if (status != connector_status_unknown) {
-		debug->dp_debug.mst_hpd_sim = true;
-		debug->hpd->simulate_attention(debug->hpd, vdo);
-	}
-
-	goto end;
-clear:
-	pr_debug("clearing mst_con_id\n");
-	debug->mst_con_id = 0;
-end:
-	return len;
-}
-
-static ssize_t dp_debug_bw_code_write(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_8];
-	size_t len = 0;
-	u32 max_bw_code = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	/* Leave room for termination char */
-	len = min_t(size_t, count, SZ_8 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		return 0;
-
-	buf[len] = '\0';
-
-	if (kstrtoint(buf, 10, &max_bw_code) != 0)
-		return 0;
-
-	if (!is_link_rate_valid(max_bw_code)) {
-		pr_err("Unsupported bw code %d\n", max_bw_code);
-		return len;
-	}
-	debug->panel->max_bw_code = max_bw_code;
-	pr_debug("max_bw_code: %d\n", max_bw_code);
-
-	return len;
-}
-
-static ssize_t dp_debug_mst_mode_read(struct file *file,
-	char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[64];
-	ssize_t len;
-
-	len = scnprintf(buf, sizeof(buf),
-			"mst_mode = %d, mst_state = %d\n",
-			debug->parser->has_mst,
-			debug->panel->mst_state);
-
-	return simple_read_from_buffer(user_buff, count, ppos, buf, len);
-}
-
-static ssize_t dp_debug_mst_mode_write(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_8];
-	size_t len = 0;
-	u32 mst_mode = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	len = min_t(size_t, count, SZ_8 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		return 0;
-
-	buf[len] = '\0';
-
-	if (kstrtoint(buf, 10, &mst_mode) != 0)
-		return 0;
-
-	debug->parser->has_mst = mst_mode ? true : false;
-	pr_debug("mst_enable: %d\n", mst_mode);
-
-	return len;
-}
-
-static ssize_t dp_debug_max_pclk_khz_write(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_8];
-	size_t len = 0;
-	u32 max_pclk = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	len = min_t(size_t, count, SZ_8 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		return 0;
-
-	buf[len] = '\0';
-
-	if (kstrtoint(buf, 10, &max_pclk) != 0)
-		return 0;
-
-	if (max_pclk > debug->parser->max_pclk_khz)
-		pr_err("requested: %d, max_pclk_khz:%d\n", max_pclk,
-				debug->parser->max_pclk_khz);
-	else
-		debug->dp_debug.max_pclk_khz = max_pclk;
-
-	pr_debug("max_pclk_khz: %d\n", max_pclk);
-
-	return len;
-}
-
-static ssize_t dp_debug_max_pclk_khz_read(struct file *file,
-	char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char *buf;
-	u32 len = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	buf = kzalloc(SZ_4K, GFP_KERNEL);
-	if (ZERO_OR_NULL_PTR(buf))
-		return -ENOMEM;
-
-	len += snprintf(buf + len, (SZ_4K - len),
-			"max_pclk_khz = %d, org: %d\n",
-			debug->dp_debug.max_pclk_khz,
-			debug->parser->max_pclk_khz);
-
-	if (copy_to_user(user_buff, buf, len)) {
-		kfree(buf);
-		return -EFAULT;
-	}
-
-	*ppos += len;
-	kfree(buf);
-	return len;
-}
-
-static ssize_t dp_debug_mst_sideband_mode_write(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_8];
-	size_t len = 0;
-	int mst_sideband_mode = 0;
-	u32 mst_port_cnt = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	/* Leave room for termination char */
-	len = min_t(size_t, count, SZ_8 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		return -EFAULT;
-
-	buf[len] = '\0';
-
-	if (sscanf(buf, "%d %u", &mst_sideband_mode, &mst_port_cnt) != 2) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	if (mst_port_cnt > DP_MST_SIM_MAX_PORTS) {
-		pr_err("port cnt:%d exceeding max:%d\n", mst_port_cnt,
-				DP_MST_SIM_MAX_PORTS);
-		return -EINVAL;
-	}
-
-	debug->parser->has_mst_sideband = mst_sideband_mode ? true : false;
-	debug->dp_debug.mst_port_cnt = mst_port_cnt;
-	pr_debug("mst_sideband_mode: %d port_cnt:%d\n",
-			mst_sideband_mode, mst_port_cnt);
-	return count;
-}
-
-static ssize_t dp_debug_widebus_mode_write(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_8];
-	size_t len = 0;
-	u32 widebus_mode = 0;
-
-	if (!debug || !debug->parser)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	len = min_t(size_t, count, SZ_8 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		return -EFAULT;
-
-	buf[len] = '\0';
-
-	if (kstrtoint(buf, 10, &widebus_mode) != 0)
-		return -EINVAL;
-
-	debug->parser->has_widebus = widebus_mode ? true : false;
-	pr_debug("widebus_enable: %d\n", widebus_mode);
-
-	return len;
-}
-
-static ssize_t dp_debug_tpg_write(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_8];
-	size_t len = 0;
-	u32 tpg_state = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	/* Leave room for termination char */
-	len = min_t(size_t, count, SZ_8 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		goto bail;
-
-	buf[len] = '\0';
-
-	if (kstrtoint(buf, 10, &tpg_state) != 0)
-		goto bail;
-
-	tpg_state &= 0x1;
-	pr_debug("tpg_state: %d\n", tpg_state);
-
-	if (tpg_state == debug->dp_debug.tpg_state)
-		goto bail;
-
-	if (debug->panel)
-		debug->panel->tpg_config(debug->panel, tpg_state);
-
-	debug->dp_debug.tpg_state = tpg_state;
-bail:
-	return len;
-}
-
-static ssize_t dp_debug_write_exe_mode(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_32];
-	size_t len = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	len = min_t(size_t, count, SZ_32 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		goto end;
-
-	buf[len] = '\0';
-
-	if (sscanf(buf, "%3s", debug->exe_mode) != 1)
-		goto end;
-
-	if (strcmp(debug->exe_mode, "hw") &&
-	    strcmp(debug->exe_mode, "sw") &&
-	    strcmp(debug->exe_mode, "all"))
-		goto end;
-
-	debug->catalog->set_exe_mode(debug->catalog, debug->exe_mode);
-end:
-	return len;
-}
-
-static ssize_t dp_debug_read_connected(struct file *file,
-		char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_8];
-	u32 len = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	len += snprintf(buf, SZ_8, "%d\n", debug->hpd->hpd_high);
-
-	if (copy_to_user(user_buff, buf, len))
-		return -EFAULT;
-
-	*ppos += len;
-	return len;
-}
-
-static ssize_t dp_debug_write_hdcp(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_8];
-	size_t len = 0;
-	int hdcp = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	/* Leave room for termination char */
-	len = min_t(size_t, count, SZ_8 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		goto end;
-
-	buf[len] = '\0';
-
-	if (kstrtoint(buf, 10, &hdcp) != 0)
-		goto end;
-
-	debug->dp_debug.hdcp_disabled = !hdcp;
-end:
-	return len;
-}
-
-static ssize_t dp_debug_read_hdcp(struct file *file,
-		char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	u32 len = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	len = sizeof(debug->dp_debug.hdcp_status);
-
-	if (copy_to_user(user_buff, debug->dp_debug.hdcp_status, len))
-		return -EFAULT;
-
-	*ppos += len;
-	return len;
-}
-
-static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len)
-{
-	if (rc >= *max_size) {
-		pr_err("buffer overflow\n");
-		return -EINVAL;
-	}
-	*len += rc;
-	*max_size = SZ_4K - *len;
-
-	return 0;
-}
-
-static ssize_t dp_debug_read_edid_modes(struct file *file,
-		char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char *buf;
-	u32 len = 0, ret = 0, max_size = SZ_4K;
-	int rc = 0;
-	struct drm_connector *connector;
-	struct drm_display_mode *mode;
-
-	if (!debug) {
-		pr_err("invalid data\n");
-		rc = -ENODEV;
-		goto error;
-	}
-
-	connector = *debug->connector;
-
-	if (!connector) {
-		pr_err("connector is NULL\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	if (*ppos)
-		goto error;
-
-	buf = kzalloc(SZ_4K, GFP_KERNEL);
-	if (ZERO_OR_NULL_PTR(buf)) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	mutex_lock(&connector->dev->mode_config.mutex);
-	list_for_each_entry(mode, &connector->modes, head) {
-		ret = snprintf(buf + len, max_size,
-		"%s %d %d %d %d %d 0x%x\n",
-		mode->name, mode->vrefresh, mode->picture_aspect_ratio,
-		mode->htotal, mode->vtotal, mode->clock, mode->flags);
-		if (dp_debug_check_buffer_overflow(ret, &max_size, &len))
-			break;
-	}
-	mutex_unlock(&connector->dev->mode_config.mutex);
-
-	if (copy_to_user(user_buff, buf, len)) {
-		kfree(buf);
-		rc = -EFAULT;
-		goto error;
-	}
-
-	*ppos += len;
-	kfree(buf);
-
-	return len;
-error:
-	return rc;
-}
-
-static ssize_t dp_debug_read_edid_modes_mst(struct file *file,
-		char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	struct dp_mst_connector *mst_connector;
-	char *buf;
-	u32 len = 0, ret = 0, max_size = SZ_4K;
-	int rc = 0;
-	struct drm_connector *connector;
-	struct drm_display_mode *mode;
-	bool in_list = false;
-
-	if (!debug) {
-		pr_err("invalid data\n");
-		rc = -ENODEV;
-		goto error;
-	}
-
-	mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock);
-	list_for_each_entry(mst_connector,
-			&debug->dp_debug.dp_mst_connector_list.list, list) {
-		if (mst_connector->con_id == debug->mst_con_id) {
-			connector = mst_connector->conn;
-			in_list = true;
-		}
-	}
-	mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock);
-
-	if (!in_list) {
-		pr_err("connector %u not in mst list\n", debug->mst_con_id);
-		rc = -EINVAL;
-		goto error;
-	}
-
-	if (!connector) {
-		pr_err("connector is NULL\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	if (*ppos)
-		goto error;
-
-	buf = kzalloc(SZ_4K, GFP_KERNEL);
-	if (!buf) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	mutex_lock(&connector->dev->mode_config.mutex);
-	list_for_each_entry(mode, &connector->modes, head) {
-		ret = snprintf(buf + len, max_size,
-				"%s %d %d %d %d %d 0x%x\n",
-				mode->name, mode->vrefresh,
-				mode->picture_aspect_ratio, mode->htotal,
-				mode->vtotal, mode->clock, mode->flags);
-		if (dp_debug_check_buffer_overflow(ret, &max_size, &len))
-			break;
-	}
-	mutex_unlock(&connector->dev->mode_config.mutex);
-
-	if (copy_to_user(user_buff, buf, len)) {
-		kfree(buf);
-		rc = -EFAULT;
-		goto error;
-	}
-
-	*ppos += len;
-	kfree(buf);
-
-	return len;
-error:
-	return rc;
-}
-
-static ssize_t dp_debug_read_mst_con_id(struct file *file,
-		char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char *buf;
-	u32 len = 0, ret = 0, max_size = SZ_4K;
-	int rc = 0;
-
-	if (!debug) {
-		pr_err("invalid data\n");
-		rc = -ENODEV;
-		goto error;
-	}
-
-	if (*ppos)
-		goto error;
-
-	buf = kzalloc(SZ_4K, GFP_KERNEL);
-	if (!buf) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	ret = snprintf(buf, max_size, "%u\n", debug->mst_con_id);
-	len += ret;
-
-	if (copy_to_user(user_buff, buf, len)) {
-		kfree(buf);
-		rc = -EFAULT;
-		goto error;
-	}
-
-	*ppos += len;
-	kfree(buf);
-
-	return len;
-error:
-	return rc;
-}
-
-static ssize_t dp_debug_read_mst_conn_info(struct file *file,
-		char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	struct dp_mst_connector *mst_connector;
-	char *buf;
-	u32 len = 0, ret = 0, max_size = SZ_4K;
-	int rc = 0;
-	struct drm_connector *connector;
-
-	if (!debug) {
-		pr_err("invalid data\n");
-		rc = -ENODEV;
-		goto error;
-	}
-
-	if (*ppos)
-		goto error;
-
-	buf = kzalloc(SZ_4K, GFP_KERNEL);
-	if (!buf) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock);
-	list_for_each_entry(mst_connector,
-			&debug->dp_debug.dp_mst_connector_list.list, list) {
-		/* Do not print info for head node */
-		if (mst_connector->con_id == -1)
-			continue;
-
-		connector = mst_connector->conn;
-
-		if (!connector) {
-			pr_err("connector for id %d is NULL\n",
-					mst_connector->con_id);
-			continue;
-		}
-
-		ret = scnprintf(buf + len, max_size,
-				"conn name:%s, conn id:%d state:%d\n",
-				connector->name, connector->base.id,
-				connector->status);
-		if (dp_debug_check_buffer_overflow(ret, &max_size, &len))
-			break;
-	}
-	mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock);
-
-	if (copy_to_user(user_buff, buf, len)) {
-		kfree(buf);
-		rc = -EFAULT;
-		goto error;
-	}
-
-	*ppos += len;
-	kfree(buf);
-
-	return len;
-error:
-	return rc;
-}
-
-static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff,
-		size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char *buf;
-	u32 len = 0, rc = 0;
-	u32 max_size = SZ_4K;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	buf = kzalloc(SZ_4K, GFP_KERNEL);
-	if (ZERO_OR_NULL_PTR(buf))
-		return -ENOMEM;
-
-	rc = snprintf(buf + len, max_size, "\tstate=0x%x\n", debug->aux->state);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "\tlink_rate=%u\n",
-		debug->panel->link_info.rate);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "\tnum_lanes=%u\n",
-		debug->panel->link_info.num_lanes);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "\tresolution=%dx%d@%dHz\n",
-		debug->panel->pinfo.h_active,
-		debug->panel->pinfo.v_active,
-		debug->panel->pinfo.refresh_rate);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "\tpclock=%dKHz\n",
-		debug->panel->pinfo.pixel_clk_khz);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "\tbpp=%d\n",
-		debug->panel->pinfo.bpp);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	/* Link Information */
-	rc = snprintf(buf + len, max_size, "\ttest_req=%s\n",
-		dp_link_get_test_name(debug->link->sink_request));
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size,
-		"\tlane_count=%d\n", debug->link->link_params.lane_count);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size,
-		"\tbw_code=%d\n", debug->link->link_params.bw_code);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size,
-		"\tv_level=%d\n", debug->link->phy_params.v_level);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size,
-		"\tp_level=%d\n", debug->link->phy_params.p_level);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	if (copy_to_user(user_buff, buf, len))
-		goto error;
-
-	*ppos += len;
-
-	kfree(buf);
-	return len;
-error:
-	kfree(buf);
-	return -EINVAL;
-}
-
-static ssize_t dp_debug_bw_code_read(struct file *file,
-	char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char *buf;
-	u32 len = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	buf = kzalloc(SZ_4K, GFP_KERNEL);
-	if (ZERO_OR_NULL_PTR(buf))
-		return -ENOMEM;
-
-	len += snprintf(buf + len, (SZ_4K - len),
-			"max_bw_code = %d\n", debug->panel->max_bw_code);
-
-	if (copy_to_user(user_buff, buf, len)) {
-		kfree(buf);
-		return -EFAULT;
-	}
-
-	*ppos += len;
-	kfree(buf);
-	return len;
-}
-
-static ssize_t dp_debug_tpg_read(struct file *file,
-	char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_8];
-	u32 len = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	len += snprintf(buf, SZ_8, "%d\n", debug->dp_debug.tpg_state);
-
-	if (copy_to_user(user_buff, buf, len))
-		return -EFAULT;
-
-	*ppos += len;
-	return len;
-}
-
-static ssize_t dp_debug_write_hdr(struct file *file,
-	const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct drm_connector *connector;
-	struct sde_connector *c_conn;
-	struct sde_connector_state *c_state;
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_512];
-	size_t len = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	connector = *debug->connector;
-	c_conn = to_sde_connector(connector);
-	c_state = to_sde_connector_state(connector->state);
-
-	/* Leave room for termination char */
-	len = min_t(size_t, count, SZ_512 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		goto end;
-
-	buf[len] = '\0';
-
-	if (sscanf(buf, "%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x",
-			&c_state->hdr_meta.hdr_supported,
-			&c_state->hdr_meta.hdr_state,
-			&c_state->hdr_meta.eotf,
-			&c_state->hdr_meta.display_primaries_x[0],
-			&c_state->hdr_meta.display_primaries_x[1],
-			&c_state->hdr_meta.display_primaries_x[2],
-			&c_state->hdr_meta.display_primaries_y[0],
-			&c_state->hdr_meta.display_primaries_y[1],
-			&c_state->hdr_meta.display_primaries_y[2],
-			&c_state->hdr_meta.white_point_x,
-			&c_state->hdr_meta.white_point_y,
-			&c_state->hdr_meta.max_luminance,
-			&c_state->hdr_meta.min_luminance,
-			&c_state->hdr_meta.max_content_light_level,
-			&c_state->hdr_meta.max_average_light_level) != 15) {
-		pr_err("invalid input\n");
-		len = -EINVAL;
-	}
-
-	debug->panel->setup_hdr(debug->panel, &c_state->hdr_meta, false, 0);
-end:
-	return len;
-}
-
-static ssize_t dp_debug_read_hdr(struct file *file,
-		char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char *buf;
-	u32 len = 0, i;
-	u32 max_size = SZ_4K;
-	int rc = 0;
-	struct drm_connector *connector;
-	struct sde_connector *c_conn;
-	struct sde_connector_state *c_state;
-	struct drm_msm_ext_hdr_metadata *hdr;
-
-	if (!debug) {
-		pr_err("invalid data\n");
-		rc = -ENODEV;
-		goto error;
-	}
-
-	connector = *debug->connector;
-
-	if (!connector) {
-		pr_err("connector is NULL\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	if (*ppos)
-		goto error;
-
-	buf = kzalloc(SZ_4K, GFP_KERNEL);
-	if (ZERO_OR_NULL_PTR(buf)) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	c_conn = to_sde_connector(connector);
-	c_state = to_sde_connector_state(connector->state);
-
-	hdr = &c_state->hdr_meta;
-
-	rc = snprintf(buf + len, max_size,
-		"============SINK HDR PARAMETERS===========\n");
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "eotf = %d\n",
-		connector->hdr_eotf);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "type_one = %d\n",
-		connector->hdr_metadata_type_one);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "max_luminance = %d\n",
-		connector->hdr_max_luminance);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "avg_luminance = %d\n",
-		connector->hdr_avg_luminance);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "min_luminance = %d\n",
-		connector->hdr_min_luminance);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size,
-		"============VIDEO HDR PARAMETERS===========\n");
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "hdr_state = %d\n", hdr->hdr_state);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "hdr_supported = %d\n",
-			hdr->hdr_supported);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "eotf = %d\n", hdr->eotf);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "white_point_x = %d\n",
-		hdr->white_point_x);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "white_point_y = %d\n",
-		hdr->white_point_y);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "max_luminance = %d\n",
-		hdr->max_luminance);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "min_luminance = %d\n",
-		hdr->min_luminance);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "max_content_light_level = %d\n",
-		hdr->max_content_light_level);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size, "min_content_light_level = %d\n",
-		hdr->max_average_light_level);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	for (i = 0; i < HDR_PRIMARIES_COUNT; i++) {
-		rc = snprintf(buf + len, max_size, "primaries_x[%d] = %d\n",
-			i, hdr->display_primaries_x[i]);
-		if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-			goto error;
-
-		rc = snprintf(buf + len, max_size, "primaries_y[%d] = %d\n",
-			i, hdr->display_primaries_y[i]);
-		if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-			goto error;
-	}
-
-	if (copy_to_user(user_buff, buf, len)) {
-		kfree(buf);
-		rc = -EFAULT;
-		goto error;
-	}
-
-	*ppos += len;
-	kfree(buf);
-
-	return len;
-error:
-	return rc;
-}
-
-static void dp_debug_set_sim_mode(struct dp_debug_private *debug, bool sim)
-{
-	if (sim) {
-		if (dp_debug_get_edid_buf(debug))
-			return;
-
-		if (dp_debug_get_dpcd_buf(debug)) {
-			devm_kfree(debug->dev, debug->edid);
-			debug->edid = NULL;
-			return;
-		}
-
-		debug->dp_debug.sim_mode = true;
-		debug->aux->set_sim_mode(debug->aux, true,
-			debug->edid, debug->dpcd);
-	} else {
-		debug->aux->abort(debug->aux);
-		debug->ctrl->abort(debug->ctrl);
-
-		debug->aux->set_sim_mode(debug->aux, false, NULL, NULL);
-		debug->dp_debug.sim_mode = false;
-
-		debug->panel->set_edid(debug->panel, 0);
-		if (debug->edid) {
-			devm_kfree(debug->dev, debug->edid);
-			debug->edid = NULL;
-		}
-
-		debug->panel->set_dpcd(debug->panel, 0);
-		if (debug->dpcd) {
-			devm_kfree(debug->dev, debug->dpcd);
-			debug->dpcd = NULL;
-		}
-	}
-
-	/*
-	 * print simulation status as this code is executed
-	 * only while running in debug mode which is manually
-	 * triggered by a tester or a script.
-	 */
-	pr_info("%s\n", sim ? "[ON]" : "[OFF]");
-}
-
-static ssize_t dp_debug_write_sim(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_8];
-	size_t len = 0;
-	int sim;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	mutex_lock(&debug->lock);
-
-	/* Leave room for termination char */
-	len = min_t(size_t, count, SZ_8 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		goto end;
-
-	buf[len] = '\0';
-
-	if (kstrtoint(buf, 10, &sim) != 0)
-		goto end;
-
-	dp_debug_set_sim_mode(debug, sim);
-end:
-	mutex_unlock(&debug->lock);
-	return len;
-}
-
-static ssize_t dp_debug_write_attention(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_8];
-	size_t len = 0;
-	int vdo;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	/* Leave room for termination char */
-	len = min_t(size_t, count, SZ_8 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		goto end;
-
-	buf[len] = '\0';
-
-	if (kstrtoint(buf, 10, &vdo) != 0)
-		goto end;
-
-	debug->hpd->simulate_attention(debug->hpd, vdo);
-end:
-	return len;
-}
-
-static ssize_t dp_debug_write_dump(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dp_debug_private *debug = file->private_data;
-	char buf[SZ_32];
-	size_t len = 0;
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	/* Leave room for termination char */
-	len = min_t(size_t, count, SZ_32 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		goto end;
-
-	buf[len] = '\0';
-
-	if (sscanf(buf, "%31s", debug->reg_dump) != 1)
-		goto end;
-
-	/* qfprom register dump not supported */
-	if (!strcmp(debug->reg_dump, "qfprom_physical"))
-		strlcpy(debug->reg_dump, "clear", sizeof(debug->reg_dump));
-end:
-	return len;
-}
-
-static ssize_t dp_debug_read_dump(struct file *file,
-		char __user *user_buff, size_t count, loff_t *ppos)
-{
-	int rc = 0;
-	struct dp_debug_private *debug = file->private_data;
-	u8 *buf = NULL;
-	u32 len = 0;
-	char prefix[SZ_32];
-
-	if (!debug)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	if (!debug->hpd->hpd_high || !strlen(debug->reg_dump))
-		goto end;
-
-	rc = debug->catalog->get_reg_dump(debug->catalog,
-		debug->reg_dump, &buf, &len);
-	if (rc)
-		goto end;
-
-	snprintf(prefix, sizeof(prefix), "%s: ", debug->reg_dump);
-	print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE,
-		16, 4, buf, len, false);
-
-	if (copy_to_user(user_buff, buf, len))
-		return -EFAULT;
-
-	*ppos += len;
-end:
-	return len;
-}
-
-static const struct file_operations dp_debug_fops = {
-	.open = simple_open,
-	.read = dp_debug_read_info,
-};
-
-static const struct file_operations edid_modes_fops = {
-	.open = simple_open,
-	.read = dp_debug_read_edid_modes,
-	.write = dp_debug_write_edid_modes,
-};
-
-static const struct file_operations edid_modes_mst_fops = {
-	.open = simple_open,
-	.read = dp_debug_read_edid_modes_mst,
-	.write = dp_debug_write_edid_modes_mst,
-};
-
-static const struct file_operations mst_conn_info_fops = {
-	.open = simple_open,
-	.read = dp_debug_read_mst_conn_info,
-};
-
-static const struct file_operations mst_con_id_fops = {
-	.open = simple_open,
-	.read = dp_debug_read_mst_con_id,
-	.write = dp_debug_write_mst_con_id,
-};
-
-static const struct file_operations hpd_fops = {
-	.open = simple_open,
-	.write = dp_debug_write_hpd,
-};
-
-static const struct file_operations edid_fops = {
-	.open = simple_open,
-	.write = dp_debug_write_edid,
-};
-
-static const struct file_operations dpcd_fops = {
-	.open = simple_open,
-	.write = dp_debug_write_dpcd,
-	.read = dp_debug_read_dpcd,
-};
-
-static const struct file_operations connected_fops = {
-	.open = simple_open,
-	.read = dp_debug_read_connected,
-};
-
-static const struct file_operations bw_code_fops = {
-	.open = simple_open,
-	.read = dp_debug_bw_code_read,
-	.write = dp_debug_bw_code_write,
-};
-static const struct file_operations exe_mode_fops = {
-	.open = simple_open,
-	.write = dp_debug_write_exe_mode,
-};
-
-static const struct file_operations tpg_fops = {
-	.open = simple_open,
-	.read = dp_debug_tpg_read,
-	.write = dp_debug_tpg_write,
-};
-
-static const struct file_operations hdr_fops = {
-	.open = simple_open,
-	.write = dp_debug_write_hdr,
-	.read = dp_debug_read_hdr,
-};
-
-static const struct file_operations sim_fops = {
-	.open = simple_open,
-	.write = dp_debug_write_sim,
-};
-
-static const struct file_operations attention_fops = {
-	.open = simple_open,
-	.write = dp_debug_write_attention,
-};
-
-static const struct file_operations dump_fops = {
-	.open = simple_open,
-	.write = dp_debug_write_dump,
-	.read = dp_debug_read_dump,
-};
-
-static const struct file_operations mst_mode_fops = {
-	.open = simple_open,
-	.write = dp_debug_mst_mode_write,
-	.read = dp_debug_mst_mode_read,
-};
-
-static const struct file_operations mst_sideband_mode_fops = {
-	.open = simple_open,
-	.write = dp_debug_mst_sideband_mode_write,
-};
-
-static const struct file_operations max_pclk_khz_fops = {
-	.open = simple_open,
-	.write = dp_debug_max_pclk_khz_write,
-	.read = dp_debug_max_pclk_khz_read,
-};
-
-static const struct file_operations hdcp_fops = {
-	.open = simple_open,
-	.write = dp_debug_write_hdcp,
-	.read = dp_debug_read_hdcp,
-};
-
-static const struct file_operations widebus_mode_fops = {
-	.open = simple_open,
-	.write = dp_debug_widebus_mode_write,
-};
-
-static int dp_debug_init(struct dp_debug *dp_debug)
-{
-	int rc = 0;
-	struct dp_debug_private *debug = container_of(dp_debug,
-		struct dp_debug_private, dp_debug);
-	struct dentry *dir, *file;
-
-	dir = debugfs_create_dir(DEBUG_NAME, NULL);
-	if (IS_ERR_OR_NULL(dir)) {
-		if (!dir)
-			rc = -EINVAL;
-		else
-			rc = PTR_ERR(dir);
-		pr_err("[%s] debugfs create dir failed, rc = %d\n",
-		       DEBUG_NAME, rc);
-		goto error;
-	}
-
-	debug->root = dir;
-
-	file = debugfs_create_file("dp_debug", 0444, dir,
-				debug, &dp_debug_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs create file failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("edid_modes", 0644, dir,
-					debug, &edid_modes_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs create edid_modes failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("edid_modes_mst", 0644, dir,
-					debug, &edid_modes_mst_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs create edid_modes_mst failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("mst_con_id", 0644, dir,
-					debug, &mst_con_id_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs create mst_con_id failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("mst_con_info", 0644, dir,
-					debug, &mst_conn_info_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs create mst_conn_info failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("hpd", 0644, dir,
-					debug, &hpd_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs hpd failed, rc=%d\n",
-			DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("connected", 0444, dir,
-					debug, &connected_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs connected failed, rc=%d\n",
-			DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("max_bw_code", 0644, dir,
-			debug, &bw_code_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs max_bw_code failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-	}
-
-	file = debugfs_create_file("exe_mode", 0644, dir,
-			debug, &exe_mode_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs register failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-	}
-
-	file = debugfs_create_file("edid", 0644, dir,
-					debug, &edid_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs edid failed, rc=%d\n",
-			DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("dpcd", 0644, dir,
-					debug, &dpcd_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs dpcd failed, rc=%d\n",
-			DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("tpg_ctrl", 0644, dir,
-			debug, &tpg_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs tpg failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("hdr", 0644, dir,
-		debug, &hdr_fops);
-
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs hdr failed, rc=%d\n",
-			DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("sim", 0644, dir,
-		debug, &sim_fops);
-
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs sim failed, rc=%d\n",
-			DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("attention", 0644, dir,
-		debug, &attention_fops);
-
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs attention failed, rc=%d\n",
-			DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("dump", 0644, dir,
-		debug, &dump_fops);
-
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs dump failed, rc=%d\n",
-			DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("mst_mode", 0644, dir,
-			debug, &mst_mode_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs max_bw_code failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("mst_sideband_mode", 0644, dir,
-			debug, &mst_sideband_mode_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs max_bw_code failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("max_pclk_khz", 0644, dir,
-			debug, &max_pclk_khz_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs max_pclk_khz failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_bool("force_encryption", 0644, dir,
-			&debug->dp_debug.force_encryption);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs force_encryption failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_file("hdcp", 0644, dir,
-					debug, &hdcp_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs hdcp failed, rc=%d\n",
-			DEBUG_NAME, rc);
-		goto error_remove_dir;
-	}
-
-	file = debugfs_create_bool("dsc_feature_enable", 0644, dir,
-			&debug->parser->dsc_feature_enable);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs dsc_feature failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-	}
-
-	file = debugfs_create_bool("fec_feature_enable", 0644, dir,
-			&debug->parser->fec_feature_enable);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs fec_feature_enable failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-	}
-
-	file = debugfs_create_file("widebus_mode", 0644, dir,
-			debug, &widebus_mode_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs widebus failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-	}
-
-	file = debugfs_create_u32("max_lclk_khz", 0644, dir,
-			&debug->parser->max_lclk_khz);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		pr_err("[%s] debugfs max_lclk_khz failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-	}
-
-	return 0;
-
-error_remove_dir:
-	if (!file)
-		rc = -EINVAL;
-	debugfs_remove_recursive(dir);
-error:
-	return rc;
-}
-
-u8 *dp_debug_get_edid(struct dp_debug *dp_debug)
-{
-	struct dp_debug_private *debug;
-
-	if (!dp_debug)
-		return NULL;
-
-	debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
-
-	return debug->edid;
-}
-
-static void dp_debug_abort(struct dp_debug *dp_debug)
-{
-	struct dp_debug_private *debug;
-
-	if (!dp_debug)
-		return;
-
-	debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
-
-	mutex_lock(&debug->lock);
-	dp_debug_set_sim_mode(debug, false);
-	mutex_unlock(&debug->lock);
-}
-
-struct dp_debug *dp_debug_get(struct dp_debug_in *in)
-{
-	int rc = 0;
-	struct dp_debug_private *debug;
-	struct dp_debug *dp_debug;
-
-	if (!in->dev || !in->panel || !in->hpd || !in->link ||
-	    !in->catalog || !in->ctrl) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	debug = devm_kzalloc(in->dev, sizeof(*debug), GFP_KERNEL);
-	if (!debug) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	debug->dp_debug.debug_en = false;
-	debug->hpd = in->hpd;
-	debug->link = in->link;
-	debug->panel = in->panel;
-	debug->aux = in->aux;
-	debug->dev = in->dev;
-	debug->connector = in->connector;
-	debug->catalog = in->catalog;
-	debug->parser = in->parser;
-	debug->ctrl = in->ctrl;
-
-	dp_debug = &debug->dp_debug;
-	dp_debug->vdisplay = 0;
-	dp_debug->hdisplay = 0;
-	dp_debug->vrefresh = 0;
-
-	mutex_init(&debug->lock);
-
-	rc = dp_debug_init(dp_debug);
-	if (rc) {
-		devm_kfree(in->dev, debug);
-		goto error;
-	}
-
-	dp_debug->get_edid = dp_debug_get_edid;
-	dp_debug->abort = dp_debug_abort;
-
-	INIT_LIST_HEAD(&dp_debug->dp_mst_connector_list.list);
-
-	/*
-	 * Do not associate the head of the list with any connector in order to
-	 * maintain backwards compatibility with the SST use case.
-	 */
-	dp_debug->dp_mst_connector_list.con_id = -1;
-	dp_debug->dp_mst_connector_list.conn = NULL;
-	dp_debug->dp_mst_connector_list.debug_en = false;
-
-	dp_debug->max_pclk_khz = debug->parser->max_pclk_khz;
-
-	return dp_debug;
-error:
-	return ERR_PTR(rc);
-}
-
-static int dp_debug_deinit(struct dp_debug *dp_debug)
-{
-	struct dp_debug_private *debug;
-
-	if (!dp_debug)
-		return -EINVAL;
-
-	debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
-
-	debugfs_remove_recursive(debug->root);
-
-	return 0;
-}
-
-void dp_debug_put(struct dp_debug *dp_debug)
-{
-	struct dp_debug_private *debug;
-
-	if (!dp_debug)
-		return;
-
-	debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
-
-	dp_debug_deinit(dp_debug);
-
-	mutex_destroy(&debug->lock);
-
-	if (debug->edid)
-		devm_kfree(debug->dev, debug->edid);
-
-	if (debug->dpcd)
-		devm_kfree(debug->dev, debug->dpcd);
-
-	devm_kfree(debug->dev, debug);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
deleted file mode 100644
index 11b890e..0000000
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_DEBUG_H_
-#define _DP_DEBUG_H_
-
-#include "dp_panel.h"
-#include "dp_ctrl.h"
-#include "dp_link.h"
-#include "dp_usbpd.h"
-#include "dp_aux.h"
-#include "dp_display.h"
-
-/**
- * struct dp_debug
- * @debug_en: specifies whether debug mode enabled
- * @vdisplay: used to filter out vdisplay value
- * @hdisplay: used to filter out hdisplay value
- * @vrefresh: used to filter out vrefresh value
- * @tpg_state: specifies whether tpg feature is enabled
- * @max_pclk_khz: max pclk supported
- * @force_encryption: enable/disable forced encryption for HDCP 2.2
- */
-struct dp_debug {
-	bool debug_en;
-	bool sim_mode;
-	bool psm_enabled;
-	bool hdcp_disabled;
-	int aspect_ratio;
-	int vdisplay;
-	int hdisplay;
-	int vrefresh;
-	bool tpg_state;
-	u32 max_pclk_khz;
-	bool force_encryption;
-	char hdcp_status[SZ_128];
-	struct dp_mst_connector dp_mst_connector_list;
-	bool mst_hpd_sim;
-	u32 mst_port_cnt;
-
-	u8 *(*get_edid)(struct dp_debug *dp_debug);
-	void (*abort)(struct dp_debug *dp_debug);
-};
-
-/**
- * struct dp_debug_in
- * @dev: device instance of the caller
- * @panel: instance of panel module
- * @hpd: instance of hpd module
- * @link: instance of link module
- * @aux: instance of aux module
- * @connector: double pointer to display connector
- * @catalog: instance of catalog module
- * @parser: instance of parser module
- */
-struct dp_debug_in {
-	struct device *dev;
-	struct dp_panel *panel;
-	struct dp_hpd *hpd;
-	struct dp_link *link;
-	struct dp_aux *aux;
-	struct drm_connector **connector;
-	struct dp_catalog *catalog;
-	struct dp_parser *parser;
-	struct dp_ctrl *ctrl;
-};
-
-/**
- * dp_debug_get() - configure and get the DisplayPlot debug module data
- *
- * @in: input structure containing data to initialize the debug module
- * return: pointer to allocated debug module data
- *
- * This function sets up the debug module and provides a way
- * for debugfs input to be communicated with existing modules
- */
-struct dp_debug *dp_debug_get(struct dp_debug_in *in);
-
-/**
- * dp_debug_put()
- *
- * Cleans up dp_debug instance
- *
- * @dp_debug: instance of dp_debug
- */
-void dp_debug_put(struct dp_debug *dp_debug);
-#endif /* _DP_DEBUG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
deleted file mode 100644
index 8ac7f46c..0000000
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ /dev/null
@@ -1,2635 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-#include <linux/component.h>
-#include <linux/of_irq.h>
-#include <linux/extcon.h>
-#include <linux/soc/qcom/fsa4480-i2c.h>
-#include <linux/usb/usbpd.h>
-
-#include "sde_connector.h"
-
-#include "msm_drv.h"
-#include "dp_hpd.h"
-#include "dp_parser.h"
-#include "dp_power.h"
-#include "dp_catalog.h"
-#include "dp_aux.h"
-#include "dp_link.h"
-#include "dp_panel.h"
-#include "dp_ctrl.h"
-#include "dp_audio.h"
-#include "dp_display.h"
-#include "sde_hdcp.h"
-#include "dp_debug.h"
-
-#define DP_MST_DEBUG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
-
-static struct dp_display *g_dp_display;
-#define HPD_STRING_SIZE 30
-
-struct dp_hdcp_dev {
-	void *fd;
-	struct sde_hdcp_ops *ops;
-	enum sde_hdcp_version ver;
-};
-
-struct dp_hdcp {
-	void *data;
-	struct sde_hdcp_ops *ops;
-
-	u32 source_cap;
-
-	struct dp_hdcp_dev dev[HDCP_VERSION_MAX];
-};
-
-struct dp_mst {
-	bool mst_active;
-
-	bool drm_registered;
-	struct dp_mst_drm_cbs cbs;
-};
-
-struct dp_display_private {
-	char *name;
-	int irq;
-
-	/* state variables */
-	bool core_initialized;
-	bool power_on;
-	bool is_connected;
-
-	atomic_t aborted;
-
-	struct platform_device *pdev;
-	struct device_node *aux_switch_node;
-	struct dentry *root;
-	struct completion notification_comp;
-
-	struct dp_hpd     *hpd;
-	struct dp_parser  *parser;
-	struct dp_power   *power;
-	struct dp_catalog *catalog;
-	struct dp_aux     *aux;
-	struct dp_link    *link;
-	struct dp_panel   *panel;
-	struct dp_ctrl    *ctrl;
-	struct dp_debug   *debug;
-
-	struct dp_panel *active_panels[DP_STREAM_MAX];
-	struct dp_hdcp hdcp;
-
-	struct dp_hpd_cb hpd_cb;
-	struct dp_display_mode mode;
-	struct dp_display dp_display;
-	struct msm_drm_private *priv;
-
-	struct workqueue_struct *wq;
-	struct delayed_work hdcp_cb_work;
-	struct work_struct connect_work;
-	struct work_struct attention_work;
-	struct mutex session_lock;
-
-	u32 active_stream_cnt;
-	struct dp_mst mst;
-
-	u32 tot_dsc_blks_in_use;
-
-	bool process_hpd_connect;
-
-	struct notifier_block usb_nb;
-};
-
-static const struct of_device_id dp_dt_match[] = {
-	{.compatible = "qcom,dp-display"},
-	{}
-};
-
-static inline bool dp_display_is_hdcp_enabled(struct dp_display_private *dp)
-{
-	return dp->link->hdcp_status.hdcp_version && dp->hdcp.ops;
-}
-
-static irqreturn_t dp_display_irq(int irq, void *dev_id)
-{
-	struct dp_display_private *dp = dev_id;
-
-	if (!dp) {
-		pr_err("invalid data\n");
-		return IRQ_NONE;
-	}
-
-	/* DP HPD isr */
-	if (dp->hpd->type ==  DP_HPD_LPHW)
-		dp->hpd->isr(dp->hpd);
-
-	/* DP controller isr */
-	dp->ctrl->isr(dp->ctrl);
-
-	/* DP aux isr */
-	dp->aux->isr(dp->aux);
-
-	/* HDCP isr */
-	if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->isr) {
-		if (dp->hdcp.ops->isr(dp->hdcp.data))
-			pr_err("dp_hdcp_isr failed\n");
-	}
-
-	return IRQ_HANDLED;
-}
-static bool dp_display_is_ds_bridge(struct dp_panel *panel)
-{
-	return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
-		DP_DWN_STRM_PORT_PRESENT);
-}
-
-static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
-{
-	return dp_display_is_ds_bridge(dp->panel) &&
-		(dp->link->sink_count.count == 0);
-}
-
-static bool dp_display_is_ready(struct dp_display_private *dp)
-{
-	return dp->hpd->hpd_high && dp->is_connected &&
-		!dp_display_is_sink_count_zero(dp) &&
-		dp->hpd->alt_mode_cfg_done;
-}
-
-static void dp_display_update_hdcp_status(struct dp_display_private *dp,
-					bool reset)
-{
-	if (reset) {
-		dp->link->hdcp_status.hdcp_state = HDCP_STATE_INACTIVE;
-		dp->link->hdcp_status.hdcp_version = HDCP_VERSION_NONE;
-	}
-
-	memset(dp->debug->hdcp_status, 0, sizeof(dp->debug->hdcp_status));
-
-	snprintf(dp->debug->hdcp_status, sizeof(dp->debug->hdcp_status),
-		"%s: %s\ncaps: %d\n",
-		sde_hdcp_version(dp->link->hdcp_status.hdcp_version),
-		sde_hdcp_state_name(dp->link->hdcp_status.hdcp_state),
-		dp->hdcp.source_cap);
-}
-
-static void dp_display_update_hdcp_info(struct dp_display_private *dp)
-{
-	void *fd = NULL;
-	struct dp_hdcp_dev *dev = NULL;
-	struct sde_hdcp_ops *ops = NULL;
-	int i = HDCP_VERSION_2P2;
-
-	dp_display_update_hdcp_status(dp, true);
-
-	dp->hdcp.data = NULL;
-	dp->hdcp.ops = NULL;
-
-	if (dp->debug->hdcp_disabled || dp->debug->sim_mode)
-		return;
-
-	while (i) {
-		dev = &dp->hdcp.dev[i];
-		ops = dev->ops;
-		fd = dev->fd;
-
-		i >>= 1;
-
-		if (!(dp->hdcp.source_cap & dev->ver))
-			continue;
-
-		if (ops->sink_support(fd)) {
-			dp->hdcp.data = fd;
-			dp->hdcp.ops = ops;
-			dp->link->hdcp_status.hdcp_version = dev->ver;
-			break;
-		}
-	}
-
-	pr_debug("HDCP version supported: %s\n",
-		sde_hdcp_version(dp->link->hdcp_status.hdcp_version));
-}
-
-static void dp_display_check_source_hdcp_caps(struct dp_display_private *dp)
-{
-	int i;
-	struct dp_hdcp_dev *hdcp_dev = dp->hdcp.dev;
-
-	if (dp->debug->hdcp_disabled) {
-		pr_debug("hdcp disabled\n");
-		return;
-	}
-
-	for (i = 0; i < HDCP_VERSION_MAX; i++) {
-		struct dp_hdcp_dev *dev = &hdcp_dev[i];
-		struct sde_hdcp_ops *ops = dev->ops;
-		void *fd = dev->fd;
-
-		if (!fd || !ops)
-			continue;
-
-		if (ops->set_mode && ops->set_mode(fd, dp->mst.mst_active))
-			continue;
-
-		if (!(dp->hdcp.source_cap & dev->ver) &&
-				ops->feature_supported &&
-				ops->feature_supported(fd))
-			dp->hdcp.source_cap |= dev->ver;
-	}
-
-	dp_display_update_hdcp_status(dp, false);
-}
-
-static void dp_display_hdcp_register_streams(struct dp_display_private *dp)
-{
-	int rc;
-	size_t i;
-	struct sde_hdcp_ops *ops = dp->hdcp.ops;
-	void *data = dp->hdcp.data;
-
-	if (dp_display_is_ready(dp) && dp->mst.mst_active && ops &&
-			ops->register_streams){
-		struct stream_info streams[DP_STREAM_MAX];
-		int index = 0;
-
-		pr_debug("Registering all active panel streams with HDCP\n");
-		for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
-			if (!dp->active_panels[i])
-				continue;
-			streams[index].stream_id = i;
-			streams[index].virtual_channel =
-				dp->active_panels[i]->vcpi;
-			index++;
-		}
-
-		if (index > 0) {
-			rc = ops->register_streams(data, index, streams);
-			if (rc)
-				pr_err("failed to register streams. rc = %d\n",
-					rc);
-		}
-	}
-}
-
-static void dp_display_hdcp_deregister_stream(struct dp_display_private *dp,
-		enum dp_stream_id stream_id)
-{
-	if (dp->hdcp.ops->deregister_streams) {
-		struct stream_info stream = {stream_id,
-				dp->active_panels[stream_id]->vcpi};
-
-		pr_debug("Deregistering stream within HDCP library\n");
-		dp->hdcp.ops->deregister_streams(dp->hdcp.data, 1, &stream);
-	}
-}
-
-static void dp_display_hdcp_cb_work(struct work_struct *work)
-{
-	struct dp_display_private *dp;
-	struct delayed_work *dw = to_delayed_work(work);
-	struct sde_hdcp_ops *ops;
-	struct dp_link_hdcp_status *status;
-	void *data;
-	int rc = 0;
-	u32 hdcp_auth_state;
-	u8 sink_status = 0;
-
-	dp = container_of(dw, struct dp_display_private, hdcp_cb_work);
-
-	if (!dp->power_on || !dp->is_connected || atomic_read(&dp->aborted))
-		return;
-
-	drm_dp_dpcd_readb(dp->aux->drm_aux, DP_SINK_STATUS, &sink_status);
-	sink_status &= (DP_RECEIVE_PORT_0_STATUS | DP_RECEIVE_PORT_1_STATUS);
-	if (sink_status < 1) {
-		pr_debug("Sink not synchronized. Queuing again then exiting\n");
-		queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
-		return;
-	}
-
-	status = &dp->link->hdcp_status;
-
-	if (status->hdcp_state == HDCP_STATE_INACTIVE) {
-		dp_display_check_source_hdcp_caps(dp);
-		dp_display_update_hdcp_info(dp);
-
-		if (dp_display_is_hdcp_enabled(dp)) {
-			if (dp->hdcp.ops && dp->hdcp.ops->on &&
-					dp->hdcp.ops->on(dp->hdcp.data)) {
-				dp_display_update_hdcp_status(dp, true);
-				return;
-			}
-			status->hdcp_state = HDCP_STATE_AUTHENTICATING;
-		} else {
-			dp_display_update_hdcp_status(dp, true);
-			return;
-		}
-	}
-
-	rc = dp->catalog->ctrl.read_hdcp_status(&dp->catalog->ctrl);
-	if (rc >= 0) {
-		hdcp_auth_state = (rc >> 20) & 0x3;
-		pr_debug("hdcp auth state %d\n", hdcp_auth_state);
-	}
-
-	ops = dp->hdcp.ops;
-	data = dp->hdcp.data;
-
-	pr_debug("%s: %s\n", sde_hdcp_version(status->hdcp_version),
-		sde_hdcp_state_name(status->hdcp_state));
-
-	dp_display_update_hdcp_status(dp, false);
-
-	if (dp->debug->force_encryption && ops && ops->force_encryption)
-		ops->force_encryption(data, dp->debug->force_encryption);
-
-	switch (status->hdcp_state) {
-	case HDCP_STATE_AUTHENTICATING:
-		dp_display_hdcp_register_streams(dp);
-		if (dp->hdcp.ops && dp->hdcp.ops->authenticate)
-			rc = dp->hdcp.ops->authenticate(data);
-		break;
-	case HDCP_STATE_AUTH_FAIL:
-		if (dp_display_is_ready(dp) && dp->power_on) {
-			if (ops && ops->on && ops->on(data)) {
-				dp_display_update_hdcp_status(dp, true);
-				return;
-			}
-			dp_display_hdcp_register_streams(dp);
-			status->hdcp_state = HDCP_STATE_AUTHENTICATING;
-			if (ops && ops->reauthenticate) {
-				rc = ops->reauthenticate(data);
-				if (rc)
-					pr_err("failed rc=%d\n", rc);
-			}
-		} else {
-			pr_debug("not reauthenticating, cable disconnected\n");
-		}
-		break;
-	default:
-		dp_display_hdcp_register_streams(dp);
-		break;
-	}
-}
-
-static void dp_display_notify_hdcp_status_cb(void *ptr,
-		enum sde_hdcp_state state)
-{
-	struct dp_display_private *dp = ptr;
-
-	if (!dp) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	dp->link->hdcp_status.hdcp_state = state;
-
-	queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ/4);
-}
-
-static void dp_display_deinitialize_hdcp(struct dp_display_private *dp)
-{
-	if (!dp) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	sde_dp_hdcp2p2_deinit(dp->hdcp.data);
-}
-
-static int dp_display_initialize_hdcp(struct dp_display_private *dp)
-{
-	struct sde_hdcp_init_data hdcp_init_data;
-	struct dp_parser *parser;
-	void *fd;
-	int rc = 0;
-
-	if (!dp) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	parser = dp->parser;
-
-	hdcp_init_data.client_id     = HDCP_CLIENT_DP;
-	hdcp_init_data.drm_aux       = dp->aux->drm_aux;
-	hdcp_init_data.cb_data       = (void *)dp;
-	hdcp_init_data.workq         = dp->wq;
-	hdcp_init_data.sec_access    = true;
-	hdcp_init_data.notify_status = dp_display_notify_hdcp_status_cb;
-	hdcp_init_data.dp_ahb        = &parser->get_io(parser, "dp_ahb")->io;
-	hdcp_init_data.dp_aux        = &parser->get_io(parser, "dp_aux")->io;
-	hdcp_init_data.dp_link       = &parser->get_io(parser, "dp_link")->io;
-	hdcp_init_data.dp_p0         = &parser->get_io(parser, "dp_p0")->io;
-	hdcp_init_data.qfprom_io     = &parser->get_io(parser,
-						"qfprom_physical")->io;
-	hdcp_init_data.hdcp_io       = &parser->get_io(parser,
-						"hdcp_physical")->io;
-	hdcp_init_data.revision      = &dp->panel->link_info.revision;
-	hdcp_init_data.msm_hdcp_dev  = dp->parser->msm_hdcp_dev;
-
-	fd = sde_hdcp_1x_init(&hdcp_init_data);
-	if (IS_ERR_OR_NULL(fd)) {
-		pr_err("Error initializing HDCP 1.x\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	dp->hdcp.dev[HDCP_VERSION_1X].fd = fd;
-	dp->hdcp.dev[HDCP_VERSION_1X].ops = sde_hdcp_1x_get(fd);
-	dp->hdcp.dev[HDCP_VERSION_1X].ver = HDCP_VERSION_1X;
-	pr_debug("HDCP 1.3 initialized\n");
-
-	fd = sde_dp_hdcp2p2_init(&hdcp_init_data);
-	if (IS_ERR_OR_NULL(fd)) {
-		pr_err("Error initializing HDCP 2.x\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	dp->hdcp.dev[HDCP_VERSION_2P2].fd = fd;
-	dp->hdcp.dev[HDCP_VERSION_2P2].ops = sde_dp_hdcp2p2_get(fd);
-	dp->hdcp.dev[HDCP_VERSION_2P2].ver = HDCP_VERSION_2P2;
-	pr_debug("HDCP 2.2 initialized\n");
-
-	return 0;
-error:
-	dp_display_deinitialize_hdcp(dp);
-
-	return rc;
-}
-
-static int dp_display_bind(struct device *dev, struct device *master,
-		void *data)
-{
-	int rc = 0;
-	struct dp_display_private *dp;
-	struct drm_device *drm;
-	struct platform_device *pdev = to_platform_device(dev);
-
-	if (!dev || !pdev || !master) {
-		pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
-				dev, pdev, master);
-		rc = -EINVAL;
-		goto end;
-	}
-
-	drm = dev_get_drvdata(master);
-	dp = platform_get_drvdata(pdev);
-	if (!drm || !dp) {
-		pr_err("invalid param(s), drm %pK, dp %pK\n",
-				drm, dp);
-		rc = -EINVAL;
-		goto end;
-	}
-
-	dp->dp_display.drm_dev = drm;
-	dp->priv = drm->dev_private;
-end:
-	return rc;
-}
-
-static void dp_display_unbind(struct device *dev, struct device *master,
-		void *data)
-{
-	struct dp_display_private *dp;
-	struct platform_device *pdev = to_platform_device(dev);
-
-	if (!dev || !pdev) {
-		pr_err("invalid param(s)\n");
-		return;
-	}
-
-	dp = platform_get_drvdata(pdev);
-	if (!dp) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	if (dp->power)
-		(void)dp->power->power_client_deinit(dp->power);
-	if (dp->aux)
-		(void)dp->aux->drm_aux_deregister(dp->aux);
-	dp_display_deinitialize_hdcp(dp);
-}
-
-static const struct component_ops dp_display_comp_ops = {
-	.bind = dp_display_bind,
-	.unbind = dp_display_unbind,
-};
-
-static void dp_display_send_hpd_event(struct dp_display_private *dp)
-{
-	struct drm_device *dev = NULL;
-	struct drm_connector *connector;
-	char name[HPD_STRING_SIZE], status[HPD_STRING_SIZE],
-		bpp[HPD_STRING_SIZE], pattern[HPD_STRING_SIZE];
-	char *envp[5];
-
-	if (dp->mst.mst_active) {
-		pr_debug("skip notification for mst mode\n");
-		return;
-	}
-
-	connector = dp->dp_display.base_connector;
-
-	if (!connector) {
-		pr_err("connector not set\n");
-		return;
-	}
-
-	connector->status = connector->funcs->detect(connector, false);
-
-	dev = connector->dev;
-
-	snprintf(name, HPD_STRING_SIZE, "name=%s", connector->name);
-	snprintf(status, HPD_STRING_SIZE, "status=%s",
-		drm_get_connector_status_name(connector->status));
-	snprintf(bpp, HPD_STRING_SIZE, "bpp=%d",
-		dp_link_bit_depth_to_bpp(
-		dp->link->test_video.test_bit_depth));
-	snprintf(pattern, HPD_STRING_SIZE, "pattern=%d",
-		dp->link->test_video.test_video_pattern);
-
-	pr_debug("[%s]:[%s] [%s] [%s]\n", name, status, bpp, pattern);
-	envp[0] = name;
-	envp[1] = status;
-	envp[2] = bpp;
-	envp[3] = pattern;
-	envp[4] = NULL;
-	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
-			envp);
-}
-
-static int dp_display_send_hpd_notification(struct dp_display_private *dp)
-{
-	int ret = 0;
-	bool hpd = dp->is_connected;
-
-	dp->aux->state |= DP_STATE_NOTIFICATION_SENT;
-
-	if (!dp->mst.mst_active)
-		dp->dp_display.is_sst_connected = hpd;
-	else
-		dp->dp_display.is_sst_connected = false;
-
-	reinit_completion(&dp->notification_comp);
-	dp_display_send_hpd_event(dp);
-
-	if (hpd && dp->mst.mst_active)
-		goto skip_wait;
-
-	if (!dp->mst.mst_active && (dp->power_on == hpd))
-		goto skip_wait;
-
-	if (!wait_for_completion_timeout(&dp->notification_comp,
-						HZ * 5)) {
-		pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
-		ret = -EINVAL;
-	}
-	return ret;
-skip_wait:
-	return 0;
-}
-
-static void dp_display_update_mst_state(struct dp_display_private *dp,
-					bool state)
-{
-	dp->mst.mst_active = state;
-	dp->panel->mst_state = state;
-}
-
-static void dp_display_process_mst_hpd_high(struct dp_display_private *dp,
-						bool mst_probe)
-{
-	bool is_mst_receiver;
-	struct dp_mst_hpd_info info;
-	int ret;
-
-	if (!dp->parser->has_mst || !dp->mst.drm_registered) {
-		DP_MST_DEBUG("mst not enabled. has_mst:%d, registered:%d\n",
-				dp->parser->has_mst, dp->mst.drm_registered);
-		return;
-	}
-
-	DP_MST_DEBUG("mst_hpd_high work. mst_probe:%d\n", mst_probe);
-
-	if (!dp->mst.mst_active) {
-		is_mst_receiver = dp->panel->read_mst_cap(dp->panel);
-
-		if (!is_mst_receiver) {
-			DP_MST_DEBUG("sink doesn't support mst\n");
-			return;
-		}
-
-		/* clear sink mst state */
-		drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0);
-
-		ret = drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL,
-				 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
-		if (ret < 0) {
-			pr_err("sink mst enablement failed\n");
-			return;
-		}
-
-		dp_display_update_mst_state(dp, true);
-	} else if (dp->mst.mst_active && mst_probe) {
-		info.mst_protocol = dp->parser->has_mst_sideband;
-		info.mst_port_cnt = dp->debug->mst_port_cnt;
-		info.edid = dp->debug->get_edid(dp->debug);
-
-		if (dp->mst.cbs.hpd)
-			dp->mst.cbs.hpd(&dp->dp_display, true, &info);
-	}
-
-	DP_MST_DEBUG("mst_hpd_high. mst_active:%d\n", dp->mst.mst_active);
-}
-
-static void dp_display_host_init(struct dp_display_private *dp)
-{
-	bool flip = false;
-	bool reset;
-
-	if (dp->core_initialized)
-		return;
-
-	if (dp->hpd->orientation == ORIENTATION_CC2)
-		flip = true;
-
-	reset = dp->debug->sim_mode ? false : !dp->hpd->multi_func;
-
-	dp->power->init(dp->power, flip);
-	dp->hpd->host_init(dp->hpd, &dp->catalog->hpd);
-	dp->ctrl->init(dp->ctrl, flip, reset);
-	dp->aux->init(dp->aux, dp->parser->aux_cfg);
-	enable_irq(dp->irq);
-	dp->panel->init(dp->panel);
-	dp->core_initialized = true;
-
-	/* log this as it results from user action of cable connection */
-	pr_info("[OK]\n");
-}
-
-static void dp_display_host_deinit(struct dp_display_private *dp)
-{
-	if (!dp->core_initialized)
-		return;
-
-	if (dp->active_stream_cnt) {
-		pr_debug("active stream present\n");
-		return;
-	}
-
-	dp->aux->deinit(dp->aux);
-	dp->ctrl->deinit(dp->ctrl);
-	dp->hpd->host_deinit(dp->hpd, &dp->catalog->hpd);
-	dp->power->deinit(dp->power);
-	disable_irq(dp->irq);
-	dp->core_initialized = false;
-	dp->aux->state = 0;
-
-	/* log this as it results from user action of cable dis-connection */
-	pr_info("[OK]\n");
-}
-
-static int dp_display_process_hpd_high(struct dp_display_private *dp)
-{
-	int rc = -EINVAL;
-
-	mutex_lock(&dp->session_lock);
-
-	if (dp->is_connected) {
-		pr_debug("dp already connected, skipping hpd high\n");
-		mutex_unlock(&dp->session_lock);
-		rc = -EISCONN;
-		goto end;
-	}
-
-	dp->is_connected = true;
-
-	dp->dp_display.max_pclk_khz = min(dp->parser->max_pclk_khz,
-					dp->debug->max_pclk_khz);
-
-	dp_display_host_init(dp);
-
-	dp->link->psm_config(dp->link, &dp->panel->link_info, false);
-	dp->debug->psm_enabled = false;
-
-	if (!dp->dp_display.base_connector)
-		goto end;
-
-	rc = dp->panel->read_sink_caps(dp->panel,
-			dp->dp_display.base_connector, dp->hpd->multi_func);
-	/*
-	 * ETIMEDOUT --> cable may have been removed
-	 * ENOTCONN --> no downstream device connected
-	 */
-	if (rc == -ETIMEDOUT || rc == -ENOTCONN) {
-		dp->is_connected = false;
-		goto end;
-	}
-
-	dp->link->process_request(dp->link);
-	dp->panel->handle_sink_request(dp->panel);
-
-	dp_display_process_mst_hpd_high(dp, false);
-
-	rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active,
-				dp->panel->fec_en, false);
-	if (rc) {
-		dp->is_connected = false;
-		goto end;
-	}
-
-	dp->process_hpd_connect = false;
-
-	dp_display_process_mst_hpd_high(dp, true);
-end:
-	mutex_unlock(&dp->session_lock);
-
-	if (!rc)
-		dp_display_send_hpd_notification(dp);
-
-	return rc;
-}
-
-static void dp_display_process_mst_hpd_low(struct dp_display_private *dp)
-{
-	struct dp_mst_hpd_info info = {0};
-
-	if (dp->mst.mst_active) {
-		DP_MST_DEBUG("mst_hpd_low work\n");
-
-		if (dp->mst.cbs.hpd) {
-			info.mst_protocol = dp->parser->has_mst_sideband;
-			dp->mst.cbs.hpd(&dp->dp_display, false, &info);
-		}
-		dp_display_update_mst_state(dp, false);
-	}
-
-	DP_MST_DEBUG("mst_hpd_low. mst_active:%d\n", dp->mst.mst_active);
-}
-
-static int dp_display_process_hpd_low(struct dp_display_private *dp)
-{
-	int rc = 0;
-
-	dp->is_connected = false;
-	dp->process_hpd_connect = false;
-
-	dp_display_process_mst_hpd_low(dp);
-
-	rc = dp_display_send_hpd_notification(dp);
-
-	mutex_lock(&dp->session_lock);
-	if (!dp->active_stream_cnt)
-		dp->ctrl->off(dp->ctrl);
-	mutex_unlock(&dp->session_lock);
-
-	dp->panel->video_test = false;
-
-	return rc;
-}
-
-static int dp_display_usbpd_configure_cb(struct device *dev)
-{
-	int rc = 0;
-	struct dp_display_private *dp;
-
-	if (!dev) {
-		pr_err("invalid dev\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	dp = dev_get_drvdata(dev);
-	if (!dp) {
-		pr_err("no driver data found\n");
-		rc = -ENODEV;
-		goto end;
-	}
-
-	if (!dp->debug->sim_mode && !dp->parser->no_aux_switch
-	    && !dp->parser->gpio_aux_switch) {
-		rc = dp->aux->aux_switch(dp->aux, true, dp->hpd->orientation);
-		if (rc)
-			goto end;
-	}
-
-	mutex_lock(&dp->session_lock);
-	dp_display_host_init(dp);
-
-	/* check for hpd high */
-	if (dp->hpd->hpd_high)
-		queue_work(dp->wq, &dp->connect_work);
-	else
-		dp->process_hpd_connect = true;
-	mutex_unlock(&dp->session_lock);
-end:
-	return rc;
-}
-
-static int dp_display_stream_pre_disable(struct dp_display_private *dp,
-			struct dp_panel *dp_panel)
-{
-	dp->ctrl->stream_pre_off(dp->ctrl, dp_panel);
-
-	return 0;
-}
-
-static void dp_display_stream_disable(struct dp_display_private *dp,
-			struct dp_panel *dp_panel)
-{
-	if (!dp->active_stream_cnt) {
-		pr_err("invalid active_stream_cnt (%d)\n",
-				dp->active_stream_cnt);
-		return;
-	}
-
-	pr_debug("stream_id=%d, active_stream_cnt=%d\n",
-			dp_panel->stream_id, dp->active_stream_cnt);
-
-	dp->ctrl->stream_off(dp->ctrl, dp_panel);
-	dp->active_panels[dp_panel->stream_id] = NULL;
-	dp->active_stream_cnt--;
-}
-
-static void dp_display_clean(struct dp_display_private *dp)
-{
-	int idx;
-	struct dp_panel *dp_panel;
-	struct dp_link_hdcp_status *status = &dp->link->hdcp_status;
-
-	if (dp_display_is_hdcp_enabled(dp) &&
-			status->hdcp_state != HDCP_STATE_INACTIVE) {
-		cancel_delayed_work_sync(&dp->hdcp_cb_work);
-		if (dp->hdcp.ops->off)
-			dp->hdcp.ops->off(dp->hdcp.data);
-
-		dp_display_update_hdcp_status(dp, true);
-	}
-
-	for (idx = DP_STREAM_0; idx < DP_STREAM_MAX; idx++) {
-		if (!dp->active_panels[idx])
-			continue;
-
-		dp_panel = dp->active_panels[idx];
-
-		dp_display_stream_pre_disable(dp, dp_panel);
-		dp_display_stream_disable(dp, dp_panel);
-		dp_panel->deinit(dp_panel, 0);
-	}
-
-	dp->power_on = false;
-
-	mutex_lock(&dp->session_lock);
-	dp->ctrl->off(dp->ctrl);
-	mutex_unlock(&dp->session_lock);
-}
-
-static int dp_display_handle_disconnect(struct dp_display_private *dp)
-{
-	int rc;
-
-	rc = dp_display_process_hpd_low(dp);
-	if (rc) {
-		/* cancel any pending request */
-		dp->ctrl->abort(dp->ctrl);
-		dp->aux->abort(dp->aux);
-	}
-
-	mutex_lock(&dp->session_lock);
-	if (rc && dp->power_on)
-		dp_display_clean(dp);
-
-	dp_display_host_deinit(dp);
-
-	mutex_unlock(&dp->session_lock);
-
-	return rc;
-}
-
-static void dp_display_disconnect_sync(struct dp_display_private *dp)
-{
-	/* cancel any pending request */
-	atomic_set(&dp->aborted, 1);
-	dp->ctrl->abort(dp->ctrl);
-	dp->aux->abort(dp->aux);
-
-	/* wait for idle state */
-	cancel_work_sync(&dp->connect_work);
-	cancel_work_sync(&dp->attention_work);
-	flush_workqueue(dp->wq);
-
-	dp_display_handle_disconnect(dp);
-
-	/* Reset abort value to allow future connections */
-	atomic_set(&dp->aborted, 0);
-}
-
-static int dp_display_usbpd_disconnect_cb(struct device *dev)
-{
-	int rc = 0;
-	struct dp_display_private *dp;
-
-	if (!dev) {
-		pr_err("invalid dev\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	dp = dev_get_drvdata(dev);
-	if (!dp) {
-		pr_err("no driver data found\n");
-		rc = -ENODEV;
-		goto end;
-	}
-
-	mutex_lock(&dp->session_lock);
-	if (dp->debug->psm_enabled && dp->core_initialized)
-		dp->link->psm_config(dp->link, &dp->panel->link_info, true);
-	mutex_unlock(&dp->session_lock);
-
-	dp_display_disconnect_sync(dp);
-
-	if (!dp->debug->sim_mode && !dp->parser->no_aux_switch
-	    && !dp->parser->gpio_aux_switch)
-		dp->aux->aux_switch(dp->aux, false, ORIENTATION_NONE);
-end:
-	return rc;
-}
-
-static int dp_display_stream_enable(struct dp_display_private *dp,
-			struct dp_panel *dp_panel)
-{
-	int rc = 0;
-
-	rc = dp->ctrl->stream_on(dp->ctrl, dp_panel);
-
-	if (dp->debug->tpg_state)
-		dp_panel->tpg_config(dp_panel, true);
-
-	if (!rc) {
-		dp->active_panels[dp_panel->stream_id] = dp_panel;
-		dp->active_stream_cnt++;
-	}
-
-	pr_debug("dp active_stream_cnt:%d\n", dp->active_stream_cnt);
-
-	return rc;
-}
-
-static void dp_display_mst_attention(struct dp_display_private *dp)
-{
-	struct dp_mst_hpd_info hpd_irq = {0};
-
-	if (dp->mst.mst_active && dp->mst.cbs.hpd_irq) {
-		hpd_irq.mst_hpd_sim = dp->debug->mst_hpd_sim;
-		dp->mst.cbs.hpd_irq(&dp->dp_display, &hpd_irq);
-		dp->debug->mst_hpd_sim = false;
-	}
-
-	DP_MST_DEBUG("mst_attention_work. mst_active:%d\n", dp->mst.mst_active);
-}
-
-static void dp_display_attention_work(struct work_struct *work)
-{
-	struct dp_display_private *dp = container_of(work,
-			struct dp_display_private, attention_work);
-
-	mutex_lock(&dp->session_lock);
-
-	if (dp->debug->mst_hpd_sim || !dp->core_initialized) {
-		mutex_unlock(&dp->session_lock);
-		goto mst_attention;
-	}
-
-	if (dp->link->process_request(dp->link)) {
-		mutex_unlock(&dp->session_lock);
-		goto cp_irq;
-	}
-
-	mutex_unlock(&dp->session_lock);
-
-	if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) {
-		if (dp_display_is_sink_count_zero(dp)) {
-			dp_display_handle_disconnect(dp);
-		} else {
-			if (!dp->mst.mst_active)
-				queue_work(dp->wq, &dp->connect_work);
-		}
-
-		goto mst_attention;
-	}
-
-	if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
-		dp_display_handle_disconnect(dp);
-
-		dp->panel->video_test = true;
-		queue_work(dp->wq, &dp->connect_work);
-
-		goto mst_attention;
-	}
-
-	if (dp->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
-		dp->ctrl->process_phy_test_request(dp->ctrl);
-		goto mst_attention;
-	}
-
-	if (dp->link->sink_request & DP_TEST_LINK_TRAINING) {
-		dp->link->send_test_response(dp->link);
-		dp->ctrl->link_maintenance(dp->ctrl);
-		goto mst_attention;
-	}
-
-	if (dp->link->sink_request & DP_LINK_STATUS_UPDATED)
-		dp->ctrl->link_maintenance(dp->ctrl);
-cp_irq:
-	if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->cp_irq)
-		dp->hdcp.ops->cp_irq(dp->hdcp.data);
-mst_attention:
-	dp_display_mst_attention(dp);
-}
-
-static int dp_display_usbpd_attention_cb(struct device *dev)
-{
-	struct dp_display_private *dp;
-
-	if (!dev) {
-		pr_err("invalid dev\n");
-		return -EINVAL;
-	}
-
-	dp = dev_get_drvdata(dev);
-	if (!dp) {
-		pr_err("no driver data found\n");
-		return -ENODEV;
-	}
-
-	pr_debug("hpd_irq:%d, hpd_high:%d, power_on:%d, is_connected:%d\n",
-			dp->hpd->hpd_irq, dp->hpd->hpd_high,
-			dp->power_on, dp->is_connected);
-
-	if (!dp->hpd->hpd_high)
-		dp_display_disconnect_sync(dp);
-	else if ((dp->hpd->hpd_irq && dp->core_initialized) ||
-			dp->debug->mst_hpd_sim)
-		queue_work(dp->wq, &dp->attention_work);
-	else if (dp->process_hpd_connect || !dp->is_connected)
-		queue_work(dp->wq, &dp->connect_work);
-	else
-		pr_debug("ignored\n");
-
-	return 0;
-}
-
-static void dp_display_connect_work(struct work_struct *work)
-{
-	int rc = 0;
-	struct dp_display_private *dp = container_of(work,
-			struct dp_display_private, connect_work);
-
-	if (atomic_read(&dp->aborted)) {
-		pr_warn("HPD off requested\n");
-		return;
-	}
-
-	if (!dp->hpd->hpd_high) {
-		pr_warn("Sink disconnected\n");
-		return;
-	}
-
-	rc = dp_display_process_hpd_high(dp);
-
-	if (!rc && dp->panel->video_test)
-		dp->link->send_test_response(dp->link);
-}
-
-static int dp_display_usb_notifier(struct notifier_block *nb,
-	unsigned long event, void *ptr)
-{
-	struct extcon_dev *edev = ptr;
-	struct dp_display_private *dp = container_of(nb,
-			struct dp_display_private, usb_nb);
-	if (!edev)
-		goto end;
-
-	if (!event && dp->debug->sim_mode) {
-		dp_display_disconnect_sync(dp);
-		dp->debug->abort(dp->debug);
-	}
-end:
-	return NOTIFY_DONE;
-}
-
-static int dp_display_get_usb_extcon(struct dp_display_private *dp)
-{
-	struct extcon_dev *edev;
-	int rc;
-
-	edev = extcon_get_edev_by_phandle(&dp->pdev->dev, 0);
-	if (IS_ERR(edev))
-		return PTR_ERR(edev);
-
-	dp->usb_nb.notifier_call = dp_display_usb_notifier;
-	dp->usb_nb.priority = 2;
-	rc = extcon_register_notifier(edev, EXTCON_USB, &dp->usb_nb);
-	if (rc)
-		pr_err("failed to register for usb event: %d\n", rc);
-
-	return rc;
-}
-
-static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
-{
-	dp_audio_put(dp->panel->audio);
-	dp_ctrl_put(dp->ctrl);
-	dp_link_put(dp->link);
-	dp_panel_put(dp->panel);
-	dp_aux_put(dp->aux);
-	dp_power_put(dp->power);
-	dp_catalog_put(dp->catalog);
-	dp_parser_put(dp->parser);
-	dp_hpd_put(dp->hpd);
-	mutex_destroy(&dp->session_lock);
-	dp_debug_put(dp->debug);
-}
-
-static int dp_init_sub_modules(struct dp_display_private *dp)
-{
-	int rc = 0;
-	bool hdcp_disabled;
-	struct device *dev = &dp->pdev->dev;
-	struct dp_hpd_cb *cb = &dp->hpd_cb;
-	struct dp_ctrl_in ctrl_in = {
-		.dev = dev,
-	};
-	struct dp_panel_in panel_in = {
-		.dev = dev,
-	};
-	struct dp_debug_in debug_in = {
-		.dev = dev,
-	};
-
-	mutex_init(&dp->session_lock);
-
-	dp->parser = dp_parser_get(dp->pdev);
-	if (IS_ERR(dp->parser)) {
-		rc = PTR_ERR(dp->parser);
-		pr_err("failed to initialize parser, rc = %d\n", rc);
-		dp->parser = NULL;
-		goto error;
-	}
-
-	rc = dp->parser->parse(dp->parser);
-	if (rc) {
-		pr_err("device tree parsing failed\n");
-		goto error_catalog;
-	}
-
-	g_dp_display->is_mst_supported = dp->parser->has_mst;
-
-	dp->catalog = dp_catalog_get(dev, dp->parser);
-	if (IS_ERR(dp->catalog)) {
-		rc = PTR_ERR(dp->catalog);
-		pr_err("failed to initialize catalog, rc = %d\n", rc);
-		dp->catalog = NULL;
-		goto error_catalog;
-	}
-
-	dp->power = dp_power_get(dp->parser);
-	if (IS_ERR(dp->power)) {
-		rc = PTR_ERR(dp->power);
-		pr_err("failed to initialize power, rc = %d\n", rc);
-		dp->power = NULL;
-		goto error_power;
-	}
-
-	rc = dp->power->power_client_init(dp->power, &dp->priv->phandle);
-	if (rc) {
-		pr_err("Power client create failed\n");
-		goto error_aux;
-	}
-
-	dp->aux = dp_aux_get(dev, &dp->catalog->aux, dp->parser,
-			dp->aux_switch_node);
-	if (IS_ERR(dp->aux)) {
-		rc = PTR_ERR(dp->aux);
-		pr_err("failed to initialize aux, rc = %d\n", rc);
-		dp->aux = NULL;
-		goto error_aux;
-	}
-
-	rc = dp->aux->drm_aux_register(dp->aux);
-	if (rc) {
-		pr_err("DRM DP AUX register failed\n");
-		goto error_link;
-	}
-
-	dp->link = dp_link_get(dev, dp->aux);
-	if (IS_ERR(dp->link)) {
-		rc = PTR_ERR(dp->link);
-		pr_err("failed to initialize link, rc = %d\n", rc);
-		dp->link = NULL;
-		goto error_link;
-	}
-
-	panel_in.aux = dp->aux;
-	panel_in.catalog = &dp->catalog->panel;
-	panel_in.link = dp->link;
-	panel_in.connector = dp->dp_display.base_connector;
-	panel_in.base_panel = NULL;
-	panel_in.parser = dp->parser;
-
-	dp->panel = dp_panel_get(&panel_in);
-	if (IS_ERR(dp->panel)) {
-		rc = PTR_ERR(dp->panel);
-		pr_err("failed to initialize panel, rc = %d\n", rc);
-		dp->panel = NULL;
-		goto error_panel;
-	}
-
-	ctrl_in.link = dp->link;
-	ctrl_in.panel = dp->panel;
-	ctrl_in.aux = dp->aux;
-	ctrl_in.power = dp->power;
-	ctrl_in.catalog = &dp->catalog->ctrl;
-	ctrl_in.parser = dp->parser;
-
-	dp->ctrl = dp_ctrl_get(&ctrl_in);
-	if (IS_ERR(dp->ctrl)) {
-		rc = PTR_ERR(dp->ctrl);
-		pr_err("failed to initialize ctrl, rc = %d\n", rc);
-		dp->ctrl = NULL;
-		goto error_ctrl;
-	}
-
-	dp->panel->audio = dp_audio_get(dp->pdev, dp->panel,
-						&dp->catalog->audio);
-	if (IS_ERR(dp->panel->audio)) {
-		rc = PTR_ERR(dp->panel->audio);
-		pr_err("failed to initialize audio, rc = %d\n", rc);
-		dp->panel->audio = NULL;
-		goto error_audio;
-	}
-
-	memset(&dp->mst, 0, sizeof(dp->mst));
-	dp->active_stream_cnt = 0;
-
-	cb->configure  = dp_display_usbpd_configure_cb;
-	cb->disconnect = dp_display_usbpd_disconnect_cb;
-	cb->attention  = dp_display_usbpd_attention_cb;
-
-	dp->hpd = dp_hpd_get(dev, dp->parser, &dp->catalog->hpd, cb);
-	if (IS_ERR(dp->hpd)) {
-		rc = PTR_ERR(dp->hpd);
-		pr_err("failed to initialize hpd, rc = %d\n", rc);
-		dp->hpd = NULL;
-		goto error_hpd;
-	}
-
-	hdcp_disabled = !!dp_display_initialize_hdcp(dp);
-
-	debug_in.panel = dp->panel;
-	debug_in.hpd = dp->hpd;
-	debug_in.link = dp->link;
-	debug_in.aux = dp->aux;
-	debug_in.connector = &dp->dp_display.base_connector;
-	debug_in.catalog = dp->catalog;
-	debug_in.parser = dp->parser;
-	debug_in.ctrl = dp->ctrl;
-
-	dp->debug = dp_debug_get(&debug_in);
-	if (IS_ERR(dp->debug)) {
-		rc = PTR_ERR(dp->debug);
-		pr_err("failed to initialize debug, rc = %d\n", rc);
-		dp->debug = NULL;
-		goto error_debug;
-	}
-
-	dp->tot_dsc_blks_in_use = 0;
-
-	dp->debug->hdcp_disabled = hdcp_disabled;
-	dp_display_update_hdcp_status(dp, true);
-
-	dp_display_get_usb_extcon(dp);
-
-	rc = dp->hpd->register_hpd(dp->hpd);
-	if (rc) {
-		pr_err("failed register hpd\n");
-		goto error_hpd_reg;
-	}
-
-	return rc;
-error_hpd_reg:
-	dp_debug_put(dp->debug);
-error_debug:
-	dp_hpd_put(dp->hpd);
-error_hpd:
-	dp_audio_put(dp->panel->audio);
-error_audio:
-	dp_ctrl_put(dp->ctrl);
-error_ctrl:
-	dp_panel_put(dp->panel);
-error_panel:
-	dp_link_put(dp->link);
-error_link:
-	dp_aux_put(dp->aux);
-error_aux:
-	dp_power_put(dp->power);
-error_power:
-	dp_catalog_put(dp->catalog);
-error_catalog:
-	dp_parser_put(dp->parser);
-error:
-	mutex_destroy(&dp->session_lock);
-	return rc;
-}
-
-static int dp_display_post_init(struct dp_display *dp_display)
-{
-	int rc = 0;
-	struct dp_display_private *dp;
-
-	if (!dp_display) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-	if (IS_ERR_OR_NULL(dp)) {
-		pr_err("invalid params\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	rc = dp_init_sub_modules(dp);
-	if (rc)
-		goto end;
-
-	dp_display->post_init = NULL;
-end:
-	pr_debug("%s\n", rc ? "failed" : "success");
-	return rc;
-}
-
-static int dp_display_set_mode(struct dp_display *dp_display, void *panel,
-		struct dp_display_mode *mode)
-{
-	const u32 num_components = 3, default_bpp = 24;
-	struct dp_display_private *dp;
-	struct dp_panel *dp_panel;
-
-	if (!dp_display || !panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp_panel = panel;
-	if (!dp_panel->connector) {
-		pr_err("invalid connector input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	mutex_lock(&dp->session_lock);
-	mode->timing.bpp =
-		dp_panel->connector->display_info.bpc * num_components;
-	if (!mode->timing.bpp)
-		mode->timing.bpp = default_bpp;
-
-	mode->timing.bpp = dp->panel->get_mode_bpp(dp->panel,
-			mode->timing.bpp, mode->timing.pixel_clk_khz);
-
-	dp_panel->pinfo = mode->timing;
-	mutex_unlock(&dp->session_lock);
-
-	return 0;
-}
-
-static int dp_display_prepare(struct dp_display *dp_display, void *panel)
-{
-	struct dp_display_private *dp;
-	struct dp_panel *dp_panel;
-	int rc = 0;
-
-	if (!dp_display || !panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp_panel = panel;
-	if (!dp_panel->connector) {
-		pr_err("invalid connector input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	mutex_lock(&dp->session_lock);
-
-	if (atomic_read(&dp->aborted))
-		goto end;
-
-	if (dp->power_on)
-		goto end;
-
-	if (!dp_display_is_ready(dp))
-		goto end;
-
-	dp_display_host_init(dp);
-
-	if (dp->debug->psm_enabled) {
-		dp->link->psm_config(dp->link, &dp->panel->link_info, false);
-		dp->debug->psm_enabled = false;
-	}
-
-	/*
-	 * Execute the dp controller power on in shallow mode here.
-	 * In normal cases, controller should have been powered on
-	 * by now. In some cases like suspend/resume or framework
-	 * reboot, we end up here without a powered on controller.
-	 * Cable may have been removed in suspended state. In that
-	 * case, link training is bound to fail on system resume.
-	 * So, we execute in shallow mode here to do only minimal
-	 * and required things.
-	 */
-	rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active, dp_panel->fec_en, true);
-	if (rc)
-		goto end;
-
-end:
-	mutex_unlock(&dp->session_lock);
-
-	return 0;
-}
-
-static int dp_display_set_stream_info(struct dp_display *dp_display,
-			void *panel, u32 strm_id, u32 start_slot,
-			u32 num_slots, u32 pbn, int vcpi)
-{
-	int rc = 0;
-	struct dp_panel *dp_panel;
-	struct dp_display_private *dp;
-	const int max_slots = 64;
-
-	if (!dp_display) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	if (strm_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream id:%d\n", strm_id);
-		return -EINVAL;
-	}
-
-	if (start_slot + num_slots > max_slots) {
-		pr_err("invalid channel info received. start:%d, slots:%d\n",
-				start_slot, num_slots);
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	mutex_lock(&dp->session_lock);
-
-	dp->ctrl->set_mst_channel_info(dp->ctrl, strm_id,
-			start_slot, num_slots);
-
-	if (panel) {
-		dp_panel = panel;
-		dp_panel->set_stream_info(dp_panel, strm_id, start_slot,
-				num_slots, pbn, vcpi);
-	}
-
-	mutex_unlock(&dp->session_lock);
-
-	return rc;
-}
-
-static void dp_display_update_dsc_resources(struct dp_display_private *dp,
-		struct dp_panel *panel, bool enable)
-{
-	u32 dsc_blk_cnt = 0;
-
-	if (panel->pinfo.comp_info.comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
-		panel->pinfo.comp_info.comp_ratio) {
-		dsc_blk_cnt = panel->pinfo.h_active /
-				dp->parser->max_dp_dsc_input_width_pixs;
-		if (panel->pinfo.h_active %
-				dp->parser->max_dp_dsc_input_width_pixs)
-			dsc_blk_cnt++;
-	}
-
-	if (enable) {
-		dp->tot_dsc_blks_in_use += dsc_blk_cnt;
-		panel->tot_dsc_blks_in_use += dsc_blk_cnt;
-	} else {
-		dp->tot_dsc_blks_in_use -= dsc_blk_cnt;
-		panel->tot_dsc_blks_in_use -= dsc_blk_cnt;
-	}
-}
-
-static int dp_display_enable(struct dp_display *dp_display, void *panel)
-{
-	int rc = 0;
-	struct dp_display_private *dp;
-
-	if (!dp_display || !panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	mutex_lock(&dp->session_lock);
-
-	if (!dp->core_initialized) {
-		pr_err("host not initialized\n");
-		goto end;
-	}
-
-	rc = dp_display_stream_enable(dp, panel);
-	if (rc)
-		goto end;
-
-	dp_display_update_dsc_resources(dp, panel, true);
-	dp->power_on = true;
-end:
-	mutex_unlock(&dp->session_lock);
-	return rc;
-}
-
-static void dp_display_stream_post_enable(struct dp_display_private *dp,
-			struct dp_panel *dp_panel)
-{
-	dp_panel->spd_config(dp_panel);
-	dp_panel->setup_hdr(dp_panel, NULL, false, 0);
-}
-
-static int dp_display_post_enable(struct dp_display *dp_display, void *panel)
-{
-	struct dp_display_private *dp;
-	struct dp_panel *dp_panel;
-
-	if (!dp_display || !panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-	dp_panel = panel;
-
-	mutex_lock(&dp->session_lock);
-
-	if (!dp->power_on) {
-		pr_debug("stream not setup, return\n");
-		goto end;
-	}
-
-	if (atomic_read(&dp->aborted))
-		goto end;
-
-	if (!dp_display_is_ready(dp) || !dp->core_initialized) {
-		pr_err("display not ready\n");
-		goto end;
-	}
-
-	dp_display_stream_post_enable(dp, dp_panel);
-
-	if (dp_panel->audio_supported) {
-		dp_panel->audio->bw_code = dp->link->link_params.bw_code;
-		dp_panel->audio->lane_count = dp->link->link_params.lane_count;
-		dp_panel->audio->on(dp_panel->audio);
-	}
-
-	cancel_delayed_work_sync(&dp->hdcp_cb_work);
-	queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
-end:
-	dp->aux->state |= DP_STATE_CTRL_POWERED_ON;
-
-	complete_all(&dp->notification_comp);
-	mutex_unlock(&dp->session_lock);
-	return 0;
-}
-
-static int dp_display_pre_disable(struct dp_display *dp_display, void *panel)
-{
-	struct dp_display_private *dp;
-	struct dp_panel *dp_panel = panel;
-	struct dp_link_hdcp_status *status;
-	int rc = 0;
-	size_t i;
-
-	if (!dp_display || !panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	mutex_lock(&dp->session_lock);
-
-	status = &dp->link->hdcp_status;
-
-	if (!dp->power_on) {
-		pr_debug("stream already powered off, return\n");
-		goto end;
-	}
-
-	if (dp_display_is_hdcp_enabled(dp) &&
-			status->hdcp_state != HDCP_STATE_INACTIVE) {
-		flush_delayed_work(&dp->hdcp_cb_work);
-		if (dp->mst.mst_active) {
-			dp_display_hdcp_deregister_stream(dp,
-				dp_panel->stream_id);
-			for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
-				if (i != dp_panel->stream_id &&
-						dp->active_panels[i]) {
-					pr_debug("Streams are still active. Skip disabling HDCP\n");
-					goto stream;
-				}
-			}
-		}
-
-		if (dp->hdcp.ops->off)
-			dp->hdcp.ops->off(dp->hdcp.data);
-
-		dp_display_update_hdcp_status(dp, true);
-	}
-
-stream:
-	if (dp_panel->audio_supported)
-		dp_panel->audio->off(dp_panel->audio);
-
-	rc = dp_display_stream_pre_disable(dp, dp_panel);
-
-end:
-	mutex_unlock(&dp->session_lock);
-	return 0;
-}
-
-static int dp_display_disable(struct dp_display *dp_display, void *panel)
-{
-	struct dp_display_private *dp = NULL;
-	struct dp_panel *dp_panel = NULL;
-
-	if (!dp_display || !panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-	dp_panel = panel;
-
-	mutex_lock(&dp->session_lock);
-
-	if (!dp->power_on || !dp->core_initialized) {
-		pr_debug("Link already powered off, return\n");
-		goto end;
-	}
-
-	dp_display_stream_disable(dp, dp_panel);
-	dp_display_update_dsc_resources(dp, dp_panel, false);
-end:
-	mutex_unlock(&dp->session_lock);
-	return 0;
-}
-
-static int dp_request_irq(struct dp_display *dp_display)
-{
-	int rc = 0;
-	struct dp_display_private *dp;
-
-	if (!dp_display) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
-	if (dp->irq < 0) {
-		rc = dp->irq;
-		pr_err("failed to get irq: %d\n", rc);
-		return rc;
-	}
-
-	rc = devm_request_irq(&dp->pdev->dev, dp->irq, dp_display_irq,
-		IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
-	if (rc < 0) {
-		pr_err("failed to request IRQ%u: %d\n",
-				dp->irq, rc);
-		return rc;
-	}
-	disable_irq(dp->irq);
-
-	return 0;
-}
-
-static struct dp_debug *dp_get_debug(struct dp_display *dp_display)
-{
-	struct dp_display_private *dp;
-
-	if (!dp_display) {
-		pr_err("invalid input\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	return dp->debug;
-}
-
-static int dp_display_unprepare(struct dp_display *dp_display, void *panel)
-{
-	struct dp_display_private *dp;
-	struct dp_panel *dp_panel = panel;
-	u32 flags = 0;
-
-	if (!dp_display || !panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	mutex_lock(&dp->session_lock);
-
-	/*
-	 * Check if the power off sequence was triggered
-	 * by a source initialated action like framework
-	 * reboot or suspend-resume but not from normal
-	 * hot plug.
-	 */
-	if (dp_display_is_ready(dp))
-		flags |= DP_PANEL_SRC_INITIATED_POWER_DOWN;
-
-	if (dp->active_stream_cnt)
-		goto end;
-
-	if (flags & DP_PANEL_SRC_INITIATED_POWER_DOWN) {
-		dp->link->psm_config(dp->link, &dp->panel->link_info, true);
-		dp->debug->psm_enabled = true;
-
-		dp->ctrl->off(dp->ctrl);
-		dp_display_host_deinit(dp);
-	}
-
-	dp->power_on = false;
-	dp->aux->state = DP_STATE_CTRL_POWERED_OFF;
-
-	complete_all(&dp->notification_comp);
-
-	/* log this as it results from user action of cable dis-connection */
-	pr_info("[OK]\n");
-end:
-	dp_panel->deinit(dp_panel, flags);
-	mutex_unlock(&dp->session_lock);
-
-	return 0;
-}
-
-static enum drm_mode_status dp_display_validate_mode(
-		struct dp_display *dp_display,
-		void *panel, struct drm_display_mode *mode)
-{
-	struct dp_display_private *dp;
-	struct drm_dp_link *link_info;
-	u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
-	struct dp_panel *dp_panel;
-	struct dp_debug *debug;
-	enum drm_mode_status mode_status = MODE_BAD;
-	bool in_list = false;
-	struct dp_mst_connector *mst_connector;
-	int hdis, vdis, vref, ar, _hdis, _vdis, _vref, _ar, rate;
-	struct dp_display_mode dp_mode;
-	bool dsc_en;
-
-	if (!dp_display || !mode || !panel) {
-		pr_err("invalid params\n");
-		return mode_status;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	mutex_lock(&dp->session_lock);
-
-	dp_panel = panel;
-	if (!dp_panel->connector) {
-		pr_err("invalid connector\n");
-		goto end;
-	}
-
-	link_info = &dp->panel->link_info;
-
-	debug = dp->debug;
-	if (!debug)
-		goto end;
-
-	dp_display->convert_to_dp_mode(dp_display, panel, mode, &dp_mode);
-
-	dsc_en = dp_mode.timing.comp_info.comp_ratio ? true : false;
-	mode_bpp = dsc_en ? dp_mode.timing.comp_info.dsc_info.bpp :
-			dp_mode.timing.bpp;
-
-	mode_rate_khz = mode->clock * mode_bpp;
-	rate = drm_dp_bw_code_to_link_rate(dp->link->link_params.bw_code);
-	supported_rate_khz = link_info->num_lanes * rate * 8;
-
-	if (mode_rate_khz > supported_rate_khz) {
-		DP_MST_DEBUG("pclk:%d, supported_rate:%d\n",
-				mode->clock, supported_rate_khz);
-		goto end;
-	}
-
-	if (mode->clock > dp_display->max_pclk_khz) {
-		DP_MST_DEBUG("clk:%d, max:%d\n", mode->clock,
-				dp_display->max_pclk_khz);
-		goto end;
-	}
-
-	/*
-	 * If the connector exists in the mst connector list and if debug is
-	 * enabled for that connector, use the mst connector settings from the
-	 * list for validation. Otherwise, use non-mst default settings.
-	 */
-	mutex_lock(&debug->dp_mst_connector_list.lock);
-
-	if (list_empty(&debug->dp_mst_connector_list.list)) {
-		mutex_unlock(&debug->dp_mst_connector_list.lock);
-		goto verify_default;
-	}
-
-	list_for_each_entry(mst_connector, &debug->dp_mst_connector_list.list,
-			list) {
-		if (mst_connector->con_id == dp_panel->connector->base.id) {
-			in_list = true;
-
-			if (!mst_connector->debug_en) {
-				mode_status = MODE_OK;
-				mutex_unlock(
-				&debug->dp_mst_connector_list.lock);
-				goto end;
-			}
-
-			hdis = mst_connector->hdisplay;
-			vdis = mst_connector->vdisplay;
-			vref = mst_connector->vrefresh;
-			ar = mst_connector->aspect_ratio;
-
-			_hdis = mode->hdisplay;
-			_vdis = mode->vdisplay;
-			_vref = mode->vrefresh;
-			_ar = mode->picture_aspect_ratio;
-
-			if (hdis == _hdis && vdis == _vdis && vref == _vref &&
-					ar == _ar) {
-				mode_status = MODE_OK;
-				mutex_unlock(
-				&debug->dp_mst_connector_list.lock);
-				goto end;
-			}
-
-			break;
-		}
-	}
-
-	mutex_unlock(&debug->dp_mst_connector_list.lock);
-
-	if (in_list)
-		goto end;
-
-verify_default:
-	if (debug->debug_en && (mode->hdisplay != debug->hdisplay ||
-			mode->vdisplay != debug->vdisplay ||
-			mode->vrefresh != debug->vrefresh ||
-			mode->picture_aspect_ratio != debug->aspect_ratio))
-		goto end;
-
-	mode_status = MODE_OK;
-end:
-	mutex_unlock(&dp->session_lock);
-	return mode_status;
-}
-
-static int dp_display_get_modes(struct dp_display *dp, void *panel,
-	struct dp_display_mode *dp_mode)
-{
-	struct dp_display_private *dp_display;
-	struct dp_panel *dp_panel;
-	int ret = 0;
-
-	if (!dp || !panel) {
-		pr_err("invalid params\n");
-		return 0;
-	}
-
-	dp_panel = panel;
-	if (!dp_panel->connector) {
-		pr_err("invalid connector\n");
-		return 0;
-	}
-
-	dp_display = container_of(dp, struct dp_display_private, dp_display);
-
-	ret = dp_panel->get_modes(dp_panel, dp_panel->connector, dp_mode);
-	if (dp_mode->timing.pixel_clk_khz)
-		dp->max_pclk_khz = dp_mode->timing.pixel_clk_khz;
-	return ret;
-}
-
-static void dp_display_convert_to_dp_mode(struct dp_display *dp_display,
-		void *panel,
-		const struct drm_display_mode *drm_mode,
-		struct dp_display_mode *dp_mode)
-{
-	struct dp_display_private *dp;
-	struct dp_panel *dp_panel;
-	u32 free_dsc_blks = 0, required_dsc_blks = 0;
-
-	if (!dp_display || !drm_mode || !dp_mode || !panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-	dp_panel = panel;
-
-	memset(dp_mode, 0, sizeof(*dp_mode));
-
-	free_dsc_blks = dp->parser->max_dp_dsc_blks -
-				dp->tot_dsc_blks_in_use +
-				dp_panel->tot_dsc_blks_in_use;
-	required_dsc_blks = drm_mode->hdisplay /
-				dp->parser->max_dp_dsc_input_width_pixs;
-	if (drm_mode->hdisplay % dp->parser->max_dp_dsc_input_width_pixs)
-		required_dsc_blks++;
-
-	if (free_dsc_blks >= required_dsc_blks)
-		dp_mode->capabilities |= DP_PANEL_CAPS_DSC;
-
-	if (dp_mode->capabilities & DP_PANEL_CAPS_DSC)
-		pr_debug("in_use:%d, max:%d, free:%d, req:%d, caps:0x%x, width:%d\n",
-			dp->tot_dsc_blks_in_use, dp->parser->max_dp_dsc_blks,
-			free_dsc_blks, required_dsc_blks, dp_mode->capabilities,
-			dp->parser->max_dp_dsc_input_width_pixs);
-
-	dp_panel->convert_to_dp_mode(dp_panel, drm_mode, dp_mode);
-}
-
-static int dp_display_config_hdr(struct dp_display *dp_display, void *panel,
-			struct drm_msm_ext_hdr_metadata *hdr, bool dhdr_update)
-{
-	struct dp_panel *dp_panel;
-	struct dp_display_private *dp;
-	u64 core_clk_rate;
-
-	if (!dp_display || !panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp_panel = panel;
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	core_clk_rate = dp->power->clk_get_rate(dp->power, "core_clk");
-	if (!core_clk_rate) {
-		pr_err("invalid rate for core_clk\n");
-		return -EINVAL;
-	}
-
-	return dp_panel->setup_hdr(dp_panel, hdr, dhdr_update, core_clk_rate);
-}
-
-static int dp_display_create_workqueue(struct dp_display_private *dp)
-{
-	dp->wq = create_singlethread_workqueue("drm_dp");
-	if (IS_ERR_OR_NULL(dp->wq)) {
-		pr_err("Error creating wq\n");
-		return -EPERM;
-	}
-
-	INIT_DELAYED_WORK(&dp->hdcp_cb_work, dp_display_hdcp_cb_work);
-	INIT_WORK(&dp->connect_work, dp_display_connect_work);
-	INIT_WORK(&dp->attention_work, dp_display_attention_work);
-
-	return 0;
-}
-
-static int dp_display_fsa4480_callback(struct notifier_block *self,
-		unsigned long event, void *data)
-{
-	return 0;
-}
-
-static int dp_display_init_aux_switch(struct dp_display_private *dp)
-{
-	int rc = 0;
-	const char *phandle = "qcom,dp-aux-switch";
-	struct notifier_block nb;
-
-	if (!dp->pdev->dev.of_node) {
-		pr_err("cannot find dev.of_node\n");
-		rc = -ENODEV;
-		goto end;
-	}
-
-	dp->aux_switch_node = of_parse_phandle(dp->pdev->dev.of_node,
-			phandle, 0);
-	if (!dp->aux_switch_node) {
-		pr_warn("cannot parse %s handle\n", phandle);
-		rc = -ENODEV;
-		goto end;
-	}
-
-	nb.notifier_call = dp_display_fsa4480_callback;
-	nb.priority = 0;
-
-	rc = fsa4480_reg_notifier(&nb, dp->aux_switch_node);
-	if (rc) {
-		pr_err("failed to register notifier (%d)\n", rc);
-		goto end;
-	}
-
-	fsa4480_unreg_notifier(&nb, dp->aux_switch_node);
-end:
-	return rc;
-}
-
-static int dp_display_mst_install(struct dp_display *dp_display,
-			struct dp_mst_drm_install_info *mst_install_info)
-{
-	struct dp_display_private *dp;
-
-	if (!dp_display || !mst_install_info) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	if (!mst_install_info->cbs->hpd || !mst_install_info->cbs->hpd_irq) {
-		pr_err("invalid mst cbs\n");
-		return -EINVAL;
-	}
-
-	dp_display->dp_mst_prv_info = mst_install_info->dp_mst_prv_info;
-
-	if (!dp->parser->has_mst) {
-		pr_debug("mst not enabled\n");
-		return -EPERM;
-	}
-
-	memcpy(&dp->mst.cbs, mst_install_info->cbs, sizeof(dp->mst.cbs));
-	dp->mst.drm_registered = true;
-
-	DP_MST_DEBUG("dp mst drm installed\n");
-
-	return 0;
-}
-
-static int dp_display_mst_uninstall(struct dp_display *dp_display)
-{
-	struct dp_display_private *dp;
-
-	if (!dp_display) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	if (!dp->mst.drm_registered) {
-		pr_debug("drm mst not registered\n");
-		return -EPERM;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private,
-				dp_display);
-	memset(&dp->mst.cbs, 0, sizeof(dp->mst.cbs));
-	dp->mst.drm_registered = false;
-
-	DP_MST_DEBUG("dp mst drm uninstalled\n");
-
-	return 0;
-}
-
-static int dp_display_mst_connector_install(struct dp_display *dp_display,
-		struct drm_connector *connector)
-{
-	int rc = 0;
-	struct dp_panel_in panel_in;
-	struct dp_panel *dp_panel;
-	struct dp_display_private *dp;
-	struct dp_mst_connector *mst_connector;
-
-	if (!dp_display || !connector) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	mutex_lock(&dp->session_lock);
-
-	if (!dp->mst.drm_registered) {
-		pr_debug("drm mst not registered\n");
-		mutex_unlock(&dp->session_lock);
-		return -EPERM;
-	}
-
-	panel_in.dev = &dp->pdev->dev;
-	panel_in.aux = dp->aux;
-	panel_in.catalog = &dp->catalog->panel;
-	panel_in.link = dp->link;
-	panel_in.connector = connector;
-	panel_in.base_panel = dp->panel;
-	panel_in.parser = dp->parser;
-
-	dp_panel = dp_panel_get(&panel_in);
-	if (IS_ERR(dp_panel)) {
-		rc = PTR_ERR(dp_panel);
-		pr_err("failed to initialize panel, rc = %d\n", rc);
-		mutex_unlock(&dp->session_lock);
-		return rc;
-	}
-
-	dp_panel->audio = dp_audio_get(dp->pdev, dp_panel, &dp->catalog->audio);
-	if (IS_ERR(dp_panel->audio)) {
-		rc = PTR_ERR(dp_panel->audio);
-		pr_err("[mst] failed to initialize audio, rc = %d\n", rc);
-		dp_panel->audio = NULL;
-		mutex_unlock(&dp->session_lock);
-		return rc;
-	}
-
-	DP_MST_DEBUG("dp mst connector installed. conn:%d\n",
-			connector->base.id);
-
-	mutex_lock(&dp->debug->dp_mst_connector_list.lock);
-
-	mst_connector = kmalloc(sizeof(struct dp_mst_connector),
-			GFP_KERNEL);
-	if (!mst_connector) {
-		mutex_unlock(&dp->debug->dp_mst_connector_list.lock);
-		mutex_unlock(&dp->session_lock);
-		return -ENOMEM;
-	}
-
-	mst_connector->debug_en = false;
-	mst_connector->conn = connector;
-	mst_connector->con_id = connector->base.id;
-	mst_connector->state = connector_status_unknown;
-	INIT_LIST_HEAD(&mst_connector->list);
-
-	list_add(&mst_connector->list,
-			&dp->debug->dp_mst_connector_list.list);
-
-	mutex_unlock(&dp->debug->dp_mst_connector_list.lock);
-	mutex_unlock(&dp->session_lock);
-
-	return 0;
-}
-
-static int dp_display_mst_connector_uninstall(struct dp_display *dp_display,
-			struct drm_connector *connector)
-{
-	int rc = 0;
-	struct sde_connector *sde_conn;
-	struct dp_panel *dp_panel;
-	struct dp_display_private *dp;
-	struct dp_mst_connector *con_to_remove, *temp_con;
-
-	if (!dp_display || !connector) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	mutex_lock(&dp->session_lock);
-
-	if (!dp->mst.drm_registered) {
-		pr_debug("drm mst not registered\n");
-		mutex_unlock(&dp->session_lock);
-		return -EPERM;
-	}
-
-	sde_conn = to_sde_connector(connector);
-	if (!sde_conn->drv_panel) {
-		pr_err("invalid panel for connector:%d\n", connector->base.id);
-		mutex_unlock(&dp->session_lock);
-		return -EINVAL;
-	}
-
-	dp_panel = sde_conn->drv_panel;
-	dp_audio_put(dp_panel->audio);
-	dp_panel_put(dp_panel);
-
-	DP_MST_DEBUG("dp mst connector uninstalled. conn:%d\n",
-			connector->base.id);
-
-	mutex_lock(&dp->debug->dp_mst_connector_list.lock);
-
-	list_for_each_entry_safe(con_to_remove, temp_con,
-			&dp->debug->dp_mst_connector_list.list, list) {
-		if (con_to_remove->conn == connector) {
-			list_del(&con_to_remove->list);
-			kfree(con_to_remove);
-		}
-	}
-
-	mutex_unlock(&dp->debug->dp_mst_connector_list.lock);
-	mutex_unlock(&dp->session_lock);
-
-	return rc;
-}
-
-static int dp_display_mst_get_connector_info(struct dp_display *dp_display,
-			struct drm_connector *connector,
-			struct dp_mst_connector *mst_conn)
-{
-	struct dp_display_private *dp;
-	struct dp_mst_connector *conn, *temp_conn;
-
-	if (!connector || !mst_conn) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	mutex_lock(&dp->session_lock);
-	if (!dp->mst.drm_registered) {
-		pr_debug("drm mst not registered\n");
-		mutex_unlock(&dp->session_lock);
-		return -EPERM;
-	}
-
-	mutex_lock(&dp->debug->dp_mst_connector_list.lock);
-	list_for_each_entry_safe(conn, temp_conn,
-			&dp->debug->dp_mst_connector_list.list, list) {
-		if (conn->con_id == connector->base.id)
-			memcpy(mst_conn, conn, sizeof(*mst_conn));
-	}
-	mutex_unlock(&dp->debug->dp_mst_connector_list.lock);
-	mutex_unlock(&dp->session_lock);
-	return 0;
-}
-
-static int dp_display_mst_connector_update_edid(struct dp_display *dp_display,
-			struct drm_connector *connector,
-			struct edid *edid)
-{
-	int rc = 0;
-	struct sde_connector *sde_conn;
-	struct dp_panel *dp_panel;
-	struct dp_display_private *dp;
-
-	if (!dp_display || !connector || !edid) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	if (!dp->mst.drm_registered) {
-		pr_debug("drm mst not registered\n");
-		return -EPERM;
-	}
-
-	sde_conn = to_sde_connector(connector);
-	if (!sde_conn->drv_panel) {
-		pr_err("invalid panel for connector:%d\n", connector->base.id);
-		return -EINVAL;
-	}
-
-	dp_panel = sde_conn->drv_panel;
-	rc = dp_panel->update_edid(dp_panel, edid);
-
-	DP_MST_DEBUG("dp mst connector:%d edid updated. mode_cnt:%d\n",
-			connector->base.id, rc);
-
-	return rc;
-}
-
-static int dp_display_update_pps(struct dp_display *dp_display,
-		struct drm_connector *connector, char *pps_cmd)
-{
-	struct sde_connector *sde_conn;
-	struct dp_panel *dp_panel;
-	struct dp_display_private *dp;
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	sde_conn = to_sde_connector(connector);
-	if (!sde_conn->drv_panel) {
-		pr_err("invalid panel for connector:%d\n", connector->base.id);
-		return -EINVAL;
-	}
-
-	dp_panel = sde_conn->drv_panel;
-	dp_panel->update_pps(dp_panel, pps_cmd);
-	return 0;
-}
-
-static int dp_display_mst_connector_update_link_info(
-			struct dp_display *dp_display,
-			struct drm_connector *connector)
-{
-	int rc = 0;
-	struct sde_connector *sde_conn;
-	struct dp_panel *dp_panel;
-	struct dp_display_private *dp;
-
-	if (!dp_display || !connector) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	if (!dp->mst.drm_registered) {
-		pr_debug("drm mst not registered\n");
-		return -EPERM;
-	}
-
-	sde_conn = to_sde_connector(connector);
-	if (!sde_conn->drv_panel) {
-		pr_err("invalid panel for connector:%d\n", connector->base.id);
-		return -EINVAL;
-	}
-
-	dp_panel = sde_conn->drv_panel;
-
-	memcpy(dp_panel->dpcd, dp->panel->dpcd,
-			DP_RECEIVER_CAP_SIZE + 1);
-	memcpy(dp_panel->dsc_dpcd, dp->panel->dsc_dpcd,
-			DP_RECEIVER_DSC_CAP_SIZE + 1);
-	memcpy(&dp_panel->link_info, &dp->panel->link_info,
-			sizeof(dp_panel->link_info));
-
-	DP_MST_DEBUG("dp mst connector:%d link info updated\n");
-
-	return rc;
-}
-
-static int dp_display_mst_get_fixed_topology_port(
-			struct dp_display *dp_display,
-			u32 strm_id, u32 *port_num)
-{
-	struct dp_display_private *dp;
-	u32 port;
-
-	if (!dp_display) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	if (strm_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream id:%d\n", strm_id);
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	port = dp->parser->mst_fixed_port[strm_id];
-
-	if (!port || port > 255)
-		return -ENOENT;
-
-	if (port_num)
-		*port_num = port;
-
-	return 0;
-}
-
-static int dp_display_get_mst_caps(struct dp_display *dp_display,
-			struct dp_mst_caps *mst_caps)
-{
-	int rc = 0;
-	struct dp_display_private *dp;
-
-	if (!dp_display || !mst_caps) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-
-	mst_caps->has_mst = dp->parser->has_mst;
-	mst_caps->max_streams_supported = (mst_caps->has_mst) ? 2 : 0;
-	mst_caps->max_dpcd_transaction_bytes = (mst_caps->has_mst) ? 16 : 0;
-	mst_caps->drm_aux = dp->aux->drm_aux;
-
-	return rc;
-}
-
-static int dp_display_probe(struct platform_device *pdev)
-{
-	int rc = 0;
-	struct dp_display_private *dp;
-
-	if (!pdev || !pdev->dev.of_node) {
-		pr_err("pdev not found\n");
-		rc = -ENODEV;
-		goto bail;
-	}
-
-	dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
-	if (!dp) {
-		rc = -ENOMEM;
-		goto bail;
-	}
-
-	init_completion(&dp->notification_comp);
-
-	dp->pdev = pdev;
-	dp->name = "drm_dp";
-
-	memset(&dp->mst, 0, sizeof(dp->mst));
-	atomic_set(&dp->aborted, 0);
-
-	rc = dp_display_init_aux_switch(dp);
-	if (rc) {
-		rc = -EPROBE_DEFER;
-		goto error;
-	}
-
-	rc = dp_display_create_workqueue(dp);
-	if (rc) {
-		pr_err("Failed to create workqueue\n");
-		goto error;
-	}
-
-	platform_set_drvdata(pdev, dp);
-
-	g_dp_display = &dp->dp_display;
-
-	g_dp_display->enable        = dp_display_enable;
-	g_dp_display->post_enable   = dp_display_post_enable;
-	g_dp_display->pre_disable   = dp_display_pre_disable;
-	g_dp_display->disable       = dp_display_disable;
-	g_dp_display->set_mode      = dp_display_set_mode;
-	g_dp_display->validate_mode = dp_display_validate_mode;
-	g_dp_display->get_modes     = dp_display_get_modes;
-	g_dp_display->prepare       = dp_display_prepare;
-	g_dp_display->unprepare     = dp_display_unprepare;
-	g_dp_display->request_irq   = dp_request_irq;
-	g_dp_display->get_debug     = dp_get_debug;
-	g_dp_display->post_open     = NULL;
-	g_dp_display->post_init     = dp_display_post_init;
-	g_dp_display->config_hdr    = dp_display_config_hdr;
-	g_dp_display->mst_install   = dp_display_mst_install;
-	g_dp_display->mst_uninstall = dp_display_mst_uninstall;
-	g_dp_display->mst_connector_install = dp_display_mst_connector_install;
-	g_dp_display->mst_connector_uninstall =
-					dp_display_mst_connector_uninstall;
-	g_dp_display->mst_connector_update_edid =
-					dp_display_mst_connector_update_edid;
-	g_dp_display->mst_connector_update_link_info =
-				dp_display_mst_connector_update_link_info;
-	g_dp_display->get_mst_caps = dp_display_get_mst_caps;
-	g_dp_display->set_stream_info = dp_display_set_stream_info;
-	g_dp_display->update_pps = dp_display_update_pps;
-	g_dp_display->convert_to_dp_mode = dp_display_convert_to_dp_mode;
-	g_dp_display->mst_get_connector_info =
-					dp_display_mst_get_connector_info;
-	g_dp_display->mst_get_fixed_topology_port =
-					dp_display_mst_get_fixed_topology_port;
-
-	rc = component_add(&pdev->dev, &dp_display_comp_ops);
-	if (rc) {
-		pr_err("component add failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	return 0;
-error:
-	devm_kfree(&pdev->dev, dp);
-bail:
-	return rc;
-}
-
-int dp_display_get_displays(void **displays, int count)
-{
-	if (!displays) {
-		pr_err("invalid data\n");
-		return -EINVAL;
-	}
-
-	if (count != 1) {
-		pr_err("invalid number of displays\n");
-		return -EINVAL;
-	}
-
-	displays[0] = g_dp_display;
-	return count;
-}
-
-int dp_display_get_num_of_displays(void)
-{
-	if (!g_dp_display)
-		return 0;
-
-	return 1;
-}
-
-int dp_display_get_num_of_streams(void)
-{
-	return DP_STREAM_MAX;
-}
-
-static void dp_display_set_mst_state(void *dp_display,
-		enum dp_drv_state mst_state)
-{
-	struct dp_display_private *dp;
-
-	if (!g_dp_display) {
-		pr_debug("dp display not initialized\n");
-		return;
-	}
-
-	dp = container_of(g_dp_display, struct dp_display_private, dp_display);
-	if (dp->mst.mst_active && dp->mst.cbs.set_drv_state)
-		dp->mst.cbs.set_drv_state(g_dp_display, mst_state);
-}
-
-static int dp_display_remove(struct platform_device *pdev)
-{
-	struct dp_display_private *dp;
-
-	if (!pdev)
-		return -EINVAL;
-
-	dp = platform_get_drvdata(pdev);
-
-	dp_display_deinit_sub_modules(dp);
-
-	if (dp->wq)
-		destroy_workqueue(dp->wq);
-
-	platform_set_drvdata(pdev, NULL);
-	devm_kfree(&pdev->dev, dp);
-
-	return 0;
-}
-
-static int dp_pm_prepare(struct device *dev)
-{
-	dp_display_set_mst_state(g_dp_display, PM_SUSPEND);
-
-	return 0;
-}
-
-static void dp_pm_complete(struct device *dev)
-{
-	dp_display_set_mst_state(g_dp_display, PM_DEFAULT);
-}
-
-static const struct dev_pm_ops dp_pm_ops = {
-	.prepare = dp_pm_prepare,
-	.complete = dp_pm_complete,
-};
-
-static struct platform_driver dp_display_driver = {
-	.probe  = dp_display_probe,
-	.remove = dp_display_remove,
-	.driver = {
-		.name = "msm-dp-display",
-		.of_match_table = dp_dt_match,
-		.suppress_bind_attrs = true,
-		.pm = &dp_pm_ops,
-	},
-};
-
-static int __init dp_display_init(void)
-{
-	int ret;
-
-	ret = platform_driver_register(&dp_display_driver);
-	if (ret) {
-		pr_err("driver register failed\n");
-		return ret;
-	}
-
-	return ret;
-}
-late_initcall(dp_display_init);
-
-static void __exit dp_display_cleanup(void)
-{
-	platform_driver_unregister(&dp_display_driver);
-}
-module_exit(dp_display_cleanup);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
deleted file mode 100644
index fe332af..0000000
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_DISPLAY_H_
-#define _DP_DISPLAY_H_
-
-#include <linux/list.h>
-#include <drm/drmP.h>
-#include <drm/msm_drm.h>
-
-#include "dp_panel.h"
-
-#define DP_MST_SIM_MAX_PORTS	2
-
-enum dp_drv_state {
-	PM_DEFAULT,
-	PM_SUSPEND,
-};
-
-struct dp_mst_hpd_info {
-	bool mst_protocol;
-	bool mst_hpd_sim;
-	u32 mst_port_cnt;
-	u8 *edid;
-};
-
-struct dp_mst_drm_cbs {
-	void (*hpd)(void *display, bool hpd_status,
-			struct dp_mst_hpd_info *info);
-	void (*hpd_irq)(void *display, struct dp_mst_hpd_info *info);
-	void (*set_drv_state)(void *dp_display,
-			enum dp_drv_state mst_state);
-};
-
-struct dp_mst_drm_install_info {
-	void *dp_mst_prv_info;
-	const struct dp_mst_drm_cbs *cbs;
-};
-
-struct dp_mst_caps {
-	bool has_mst;
-	u32 max_streams_supported;
-	u32 max_dpcd_transaction_bytes;
-	struct drm_dp_aux *drm_aux;
-};
-
-struct dp_mst_connector {
-	bool debug_en;
-	int con_id;
-	int hdisplay;
-	int vdisplay;
-	int vrefresh;
-	int aspect_ratio;
-	struct drm_connector *conn;
-	struct mutex lock;
-	struct list_head list;
-	enum drm_connector_status state;
-};
-
-struct dp_display {
-	struct drm_device *drm_dev;
-	struct dp_bridge *bridge;
-	struct drm_connector *base_connector;
-	void *base_dp_panel;
-	bool is_sst_connected;
-	bool is_mst_supported;
-	u32 max_pclk_khz;
-	void *dp_mst_prv_info;
-
-	int (*enable)(struct dp_display *dp_display, void *panel);
-	int (*post_enable)(struct dp_display *dp_display, void *panel);
-
-	int (*pre_disable)(struct dp_display *dp_display, void *panel);
-	int (*disable)(struct dp_display *dp_display, void *panel);
-
-	int (*set_mode)(struct dp_display *dp_display, void *panel,
-			struct dp_display_mode *mode);
-	enum drm_mode_status (*validate_mode)(struct dp_display *dp_display,
-			void *panel, struct drm_display_mode *mode);
-	int (*get_modes)(struct dp_display *dp_display, void *panel,
-		struct dp_display_mode *dp_mode);
-	int (*prepare)(struct dp_display *dp_display, void *panel);
-	int (*unprepare)(struct dp_display *dp_display, void *panel);
-	int (*request_irq)(struct dp_display *dp_display);
-	struct dp_debug *(*get_debug)(struct dp_display *dp_display);
-	void (*post_open)(struct dp_display *dp_display);
-	int (*config_hdr)(struct dp_display *dp_display, void *panel,
-				struct drm_msm_ext_hdr_metadata *hdr_meta,
-				bool dhdr_update);
-	int (*post_init)(struct dp_display *dp_display);
-	int (*mst_install)(struct dp_display *dp_display,
-			struct dp_mst_drm_install_info *mst_install_info);
-	int (*mst_uninstall)(struct dp_display *dp_display);
-	int (*mst_connector_install)(struct dp_display *dp_display,
-			struct drm_connector *connector);
-	int (*mst_connector_uninstall)(struct dp_display *dp_display,
-			struct drm_connector *connector);
-	int (*mst_connector_update_edid)(struct dp_display *dp_display,
-			struct drm_connector *connector,
-			struct edid *edid);
-	int (*mst_connector_update_link_info)(struct dp_display *dp_display,
-			struct drm_connector *connector);
-	int (*mst_get_connector_info)(struct dp_display *dp_display,
-			struct drm_connector *connector,
-			struct dp_mst_connector *mst_conn);
-	int (*mst_get_fixed_topology_port)(struct dp_display *dp_display,
-			u32 strm_id, u32 *port_num);
-	int (*get_mst_caps)(struct dp_display *dp_display,
-			struct dp_mst_caps *mst_caps);
-	int (*set_stream_info)(struct dp_display *dp_display, void *panel,
-			u32 strm_id, u32 start_slot, u32 num_slots, u32 pbn,
-			int vcpi);
-	void (*convert_to_dp_mode)(struct dp_display *dp_display, void *panel,
-			const struct drm_display_mode *drm_mode,
-			struct dp_display_mode *dp_mode);
-	int (*update_pps)(struct dp_display *dp_display,
-			struct drm_connector *connector, char *pps_cmd);
-};
-
-#ifdef CONFIG_DRM_MSM_DP
-int dp_display_get_num_of_displays(void);
-int dp_display_get_displays(void **displays, int count);
-int dp_display_get_num_of_streams(void);
-#else
-static inline int dp_display_get_num_of_displays(void)
-{
-	return 0;
-}
-static inline int dp_display_get_displays(void **displays, int count)
-{
-	return 0;
-}
-static inline int dp_display_get_num_of_streams(void)
-{
-	return 0;
-}
-static inline int dp_connector_update_pps(struct drm_connector *connector,
-		char *pps_cmd, void *display)
-{
-	return 0;
-}
-#endif
-#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
deleted file mode 100644
index b3b116a..0000000
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ /dev/null
@@ -1,624 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp]: %s: " fmt, __func__
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_crtc.h>
-
-#include "msm_drv.h"
-#include "msm_kms.h"
-#include "sde_connector.h"
-#include "dp_drm.h"
-#include "dp_debug.h"
-
-#define DP_MST_DEBUG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
-
-#define to_dp_bridge(x)     container_of((x), struct dp_bridge, base)
-
-void convert_to_drm_mode(const struct dp_display_mode *dp_mode,
-				struct drm_display_mode *drm_mode)
-{
-	u32 flags = 0;
-
-	memset(drm_mode, 0, sizeof(*drm_mode));
-
-	drm_mode->hdisplay = dp_mode->timing.h_active;
-	drm_mode->hsync_start = drm_mode->hdisplay +
-				dp_mode->timing.h_front_porch;
-	drm_mode->hsync_end = drm_mode->hsync_start +
-			      dp_mode->timing.h_sync_width;
-	drm_mode->htotal = drm_mode->hsync_end + dp_mode->timing.h_back_porch;
-	drm_mode->hskew = dp_mode->timing.h_skew;
-
-	drm_mode->vdisplay = dp_mode->timing.v_active;
-	drm_mode->vsync_start = drm_mode->vdisplay +
-				dp_mode->timing.v_front_porch;
-	drm_mode->vsync_end = drm_mode->vsync_start +
-			      dp_mode->timing.v_sync_width;
-	drm_mode->vtotal = drm_mode->vsync_end + dp_mode->timing.v_back_porch;
-
-	drm_mode->vrefresh = dp_mode->timing.refresh_rate;
-	drm_mode->clock = dp_mode->timing.pixel_clk_khz;
-
-	if (dp_mode->timing.h_active_low)
-		flags |= DRM_MODE_FLAG_NHSYNC;
-	else
-		flags |= DRM_MODE_FLAG_PHSYNC;
-
-	if (dp_mode->timing.v_active_low)
-		flags |= DRM_MODE_FLAG_NVSYNC;
-	else
-		flags |= DRM_MODE_FLAG_PVSYNC;
-
-	drm_mode->flags = flags;
-
-	drm_mode->type = 0x48;
-	drm_mode_set_name(drm_mode);
-}
-
-static int dp_bridge_attach(struct drm_bridge *dp_bridge)
-{
-	struct dp_bridge *bridge = to_dp_bridge(dp_bridge);
-
-	if (!dp_bridge) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	pr_debug("[%d] attached\n", bridge->id);
-
-	return 0;
-}
-
-static void dp_bridge_pre_enable(struct drm_bridge *drm_bridge)
-{
-	int rc = 0;
-	struct dp_bridge *bridge;
-	struct dp_display *dp;
-
-	if (!drm_bridge) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	bridge = to_dp_bridge(drm_bridge);
-	dp = bridge->display;
-
-	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		return;
-	}
-
-	if (!bridge->dp_panel) {
-		pr_err("Invalid dp_panel\n");
-		return;
-	}
-
-	/* By this point mode should have been validated through mode_fixup */
-	rc = dp->set_mode(dp, bridge->dp_panel, &bridge->dp_mode);
-	if (rc) {
-		pr_err("[%d] failed to perform a mode set, rc=%d\n",
-		       bridge->id, rc);
-		return;
-	}
-
-	rc = dp->prepare(dp, bridge->dp_panel);
-	if (rc) {
-		pr_err("[%d] DP display prepare failed, rc=%d\n",
-		       bridge->id, rc);
-		return;
-	}
-
-	/* for SST force stream id, start slot and total slots to 0 */
-	dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0, 0, 0);
-
-	rc = dp->enable(dp, bridge->dp_panel);
-	if (rc) {
-		pr_err("[%d] DP display enable failed, rc=%d\n",
-		       bridge->id, rc);
-		dp->unprepare(dp, bridge->dp_panel);
-	}
-}
-
-static void dp_bridge_enable(struct drm_bridge *drm_bridge)
-{
-	int rc = 0;
-	struct dp_bridge *bridge;
-	struct dp_display *dp;
-
-	if (!drm_bridge) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	bridge = to_dp_bridge(drm_bridge);
-	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		return;
-	}
-
-	if (!bridge->dp_panel) {
-		pr_err("Invalid dp_panel\n");
-		return;
-	}
-
-	dp = bridge->display;
-
-	rc = dp->post_enable(dp, bridge->dp_panel);
-	if (rc)
-		pr_err("[%d] DP display post enable failed, rc=%d\n",
-		       bridge->id, rc);
-}
-
-static void dp_bridge_disable(struct drm_bridge *drm_bridge)
-{
-	int rc = 0;
-	struct dp_bridge *bridge;
-	struct dp_display *dp;
-
-	if (!drm_bridge) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	bridge = to_dp_bridge(drm_bridge);
-	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		return;
-	}
-
-	if (!bridge->dp_panel) {
-		pr_err("Invalid dp_panel\n");
-		return;
-	}
-
-	dp = bridge->display;
-
-	if (!dp) {
-		pr_err("dp is null\n");
-		return;
-	}
-
-	if (dp)
-		sde_connector_helper_bridge_disable(bridge->connector);
-
-	rc = dp->pre_disable(dp, bridge->dp_panel);
-	if (rc) {
-		pr_err("[%d] DP display pre disable failed, rc=%d\n",
-		       bridge->id, rc);
-	}
-}
-
-static void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
-{
-	int rc = 0;
-	struct dp_bridge *bridge;
-	struct dp_display *dp;
-
-	if (!drm_bridge) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	bridge = to_dp_bridge(drm_bridge);
-	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		return;
-	}
-
-	if (!bridge->dp_panel) {
-		pr_err("Invalid dp_panel\n");
-		return;
-	}
-
-	dp = bridge->display;
-
-	rc = dp->disable(dp, bridge->dp_panel);
-	if (rc) {
-		pr_err("[%d] DP display disable failed, rc=%d\n",
-		       bridge->id, rc);
-		return;
-	}
-
-	rc = dp->unprepare(dp, bridge->dp_panel);
-	if (rc) {
-		pr_err("[%d] DP display unprepare failed, rc=%d\n",
-		       bridge->id, rc);
-		return;
-	}
-}
-
-static void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
-{
-	struct dp_bridge *bridge;
-	struct dp_display *dp;
-
-	if (!drm_bridge || !mode || !adjusted_mode) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	bridge = to_dp_bridge(drm_bridge);
-	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		return;
-	}
-
-	if (!bridge->dp_panel) {
-		pr_err("Invalid dp_panel\n");
-		return;
-	}
-
-	dp = bridge->display;
-
-	dp->convert_to_dp_mode(dp, bridge->dp_panel, adjusted_mode,
-			&bridge->dp_mode);
-}
-
-static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted_mode)
-{
-	bool ret = true;
-	struct dp_display_mode dp_mode;
-	struct dp_bridge *bridge;
-	struct dp_display *dp;
-
-	if (!drm_bridge || !mode || !adjusted_mode) {
-		pr_err("Invalid params\n");
-		ret = false;
-		goto end;
-	}
-
-	bridge = to_dp_bridge(drm_bridge);
-	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		ret = false;
-		goto end;
-	}
-
-	if (!bridge->dp_panel) {
-		pr_err("Invalid dp_panel\n");
-		ret = false;
-		goto end;
-	}
-
-	dp = bridge->display;
-
-	dp->convert_to_dp_mode(dp, bridge->dp_panel, mode, &dp_mode);
-	convert_to_drm_mode(&dp_mode, adjusted_mode);
-end:
-	return ret;
-}
-
-static const struct drm_bridge_funcs dp_bridge_ops = {
-	.attach       = dp_bridge_attach,
-	.mode_fixup   = dp_bridge_mode_fixup,
-	.pre_enable   = dp_bridge_pre_enable,
-	.enable       = dp_bridge_enable,
-	.disable      = dp_bridge_disable,
-	.post_disable = dp_bridge_post_disable,
-	.mode_set     = dp_bridge_mode_set,
-};
-
-int dp_connector_config_hdr(struct drm_connector *connector, void *display,
-	struct sde_connector_state *c_state)
-{
-	struct dp_display *dp = display;
-	struct sde_connector *sde_conn;
-
-	if (!display || !c_state || !connector) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	sde_conn = to_sde_connector(connector);
-	if (!sde_conn->drv_panel) {
-		pr_err("invalid dp panel\n");
-		return -EINVAL;
-	}
-
-	return dp->config_hdr(dp, sde_conn->drv_panel, &c_state->hdr_meta,
-			c_state->dyn_hdr_meta.dynamic_hdr_update);
-}
-
-int dp_connector_post_init(struct drm_connector *connector, void *display)
-{
-	int rc;
-	struct dp_display *dp_display = display;
-	struct sde_connector *sde_conn;
-
-	if (!dp_display || !connector)
-		return -EINVAL;
-
-	dp_display->base_connector = connector;
-	dp_display->bridge->connector = connector;
-
-	if (dp_display->post_init) {
-		rc = dp_display->post_init(dp_display);
-		if (rc)
-			goto end;
-	}
-
-	sde_conn = to_sde_connector(connector);
-	dp_display->bridge->dp_panel = sde_conn->drv_panel;
-
-	rc = dp_mst_init(dp_display);
-end:
-	return rc;
-}
-
-int dp_connector_get_mode_info(struct drm_connector *connector,
-		const struct drm_display_mode *drm_mode,
-		struct msm_mode_info *mode_info,
-		u32 max_mixer_width, void *display)
-{
-	const u32 dual_lm = 2;
-	const u32 single_lm = 1;
-	const u32 single_intf = 1;
-	const u32 no_enc = 0;
-	struct msm_display_topology *topology;
-	struct sde_connector *sde_conn;
-	struct dp_panel *dp_panel;
-	struct dp_display_mode dp_mode;
-	struct dp_display *dp_disp = display;
-
-	if (!drm_mode || !mode_info || !max_mixer_width || !connector ||
-			!display) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	memset(mode_info, 0, sizeof(*mode_info));
-
-	sde_conn = to_sde_connector(connector);
-	dp_panel = sde_conn->drv_panel;
-
-	topology = &mode_info->topology;
-	topology->num_lm = (max_mixer_width <= drm_mode->hdisplay) ?
-							dual_lm : single_lm;
-	topology->num_enc = no_enc;
-	topology->num_intf = single_intf;
-
-	mode_info->frame_rate = drm_mode->vrefresh;
-	mode_info->vtotal = drm_mode->vtotal;
-
-	mode_info->wide_bus_en = dp_panel->widebus_en;
-
-	dp_disp->convert_to_dp_mode(dp_disp, dp_panel, drm_mode, &dp_mode);
-
-	if (dp_mode.timing.comp_info.comp_ratio) {
-		memcpy(&mode_info->comp_info,
-			&dp_mode.timing.comp_info,
-			sizeof(mode_info->comp_info));
-
-		topology->num_enc = topology->num_lm;
-	}
-
-	return 0;
-}
-
-int dp_connector_get_info(struct drm_connector *connector,
-		struct msm_display_info *info, void *data)
-{
-	struct dp_display *display = data;
-
-	if (!info || !display || !display->drm_dev) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	info->intf_type = DRM_MODE_CONNECTOR_DisplayPort;
-
-	info->num_of_h_tiles = 1;
-	info->h_tile_instance[0] = 0;
-	info->is_connected = display->is_sst_connected;
-	info->capabilities = MSM_DISPLAY_CAP_VID_MODE | MSM_DISPLAY_CAP_EDID |
-		MSM_DISPLAY_CAP_HOT_PLUG;
-
-	return 0;
-}
-
-enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
-		bool force,
-		void *display)
-{
-	enum drm_connector_status status = connector_status_unknown;
-	struct msm_display_info info;
-	int rc;
-
-	if (!conn || !display)
-		return status;
-
-	/* get display dp_info */
-	memset(&info, 0x0, sizeof(info));
-	rc = dp_connector_get_info(conn, &info, display);
-	if (rc) {
-		pr_err("failed to get display info, rc=%d\n", rc);
-		return connector_status_disconnected;
-	}
-
-	if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
-		status = (info.is_connected ? connector_status_connected :
-					      connector_status_disconnected);
-	else
-		status = connector_status_connected;
-
-	conn->display_info.width_mm = info.width_mm;
-	conn->display_info.height_mm = info.height_mm;
-
-	return status;
-}
-
-void dp_connector_post_open(struct drm_connector *connector, void *display)
-{
-	struct dp_display *dp;
-
-	if (!display) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	dp = display;
-
-	if (dp->post_open)
-		dp->post_open(dp);
-}
-
-int dp_connector_get_modes(struct drm_connector *connector,
-		void *display)
-{
-	int rc = 0;
-	struct dp_display *dp;
-	struct dp_display_mode *dp_mode = NULL;
-	struct drm_display_mode *m, drm_mode;
-	struct sde_connector *sde_conn;
-
-	if (!connector || !display)
-		return 0;
-
-	sde_conn = to_sde_connector(connector);
-	if (!sde_conn->drv_panel) {
-		pr_err("invalid dp panel\n");
-		return 0;
-	}
-
-	dp = display;
-
-	dp_mode = kzalloc(sizeof(*dp_mode),  GFP_KERNEL);
-	if (!dp_mode)
-		return 0;
-
-	/* pluggable case assumes EDID is read when HPD */
-	if (dp->is_sst_connected) {
-		rc = dp->get_modes(dp, sde_conn->drv_panel, dp_mode);
-		if (!rc)
-			pr_err("failed to get DP sink modes, rc=%d\n", rc);
-
-		if (dp_mode->timing.pixel_clk_khz) { /* valid DP mode */
-			memset(&drm_mode, 0x0, sizeof(drm_mode));
-			convert_to_drm_mode(dp_mode, &drm_mode);
-			m = drm_mode_duplicate(connector->dev, &drm_mode);
-			if (!m) {
-				pr_err("failed to add mode %ux%u\n",
-				       drm_mode.hdisplay,
-				       drm_mode.vdisplay);
-				kfree(dp_mode);
-				return 0;
-			}
-			m->width_mm = connector->display_info.width_mm;
-			m->height_mm = connector->display_info.height_mm;
-			drm_mode_probed_add(connector, m);
-		}
-	} else {
-		pr_err("No sink connected\n");
-	}
-	kfree(dp_mode);
-
-	return rc;
-}
-
-int dp_drm_bridge_init(void *data, struct drm_encoder *encoder)
-{
-	int rc = 0;
-	struct dp_bridge *bridge;
-	struct drm_device *dev;
-	struct dp_display *display = data;
-	struct msm_drm_private *priv = NULL;
-
-	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
-	if (!bridge) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	dev = display->drm_dev;
-	bridge->display = display;
-	bridge->base.funcs = &dp_bridge_ops;
-	bridge->base.encoder = encoder;
-
-	priv = dev->dev_private;
-
-	rc = drm_bridge_attach(encoder, &bridge->base, NULL);
-	if (rc) {
-		pr_err("failed to attach bridge, rc=%d\n", rc);
-		goto error_free_bridge;
-	}
-
-	rc = display->request_irq(display);
-	if (rc) {
-		pr_err("request_irq failed, rc=%d\n", rc);
-		goto error_free_bridge;
-	}
-
-	encoder->bridge = &bridge->base;
-	priv->bridges[priv->num_bridges++] = &bridge->base;
-	display->bridge = bridge;
-
-	return 0;
-error_free_bridge:
-	kfree(bridge);
-error:
-	return rc;
-}
-
-void dp_drm_bridge_deinit(void *data)
-{
-	struct dp_display *display = data;
-	struct dp_bridge *bridge = display->bridge;
-
-	if (bridge && bridge->base.encoder)
-		bridge->base.encoder->bridge = NULL;
-
-	kfree(bridge);
-}
-
-enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
-		struct drm_display_mode *mode, void *display)
-{
-	struct dp_display *dp_disp;
-	struct sde_connector *sde_conn;
-
-	if (!mode || !display || !connector) {
-		pr_err("invalid params\n");
-		return MODE_ERROR;
-	}
-
-	sde_conn = to_sde_connector(connector);
-	if (!sde_conn->drv_panel) {
-		pr_err("invalid dp panel\n");
-		return MODE_ERROR;
-	}
-
-	dp_disp = display;
-	mode->vrefresh = drm_mode_vrefresh(mode);
-
-	return dp_disp->validate_mode(dp_disp, sde_conn->drv_panel, mode);
-}
-
-int dp_connector_update_pps(struct drm_connector *connector,
-		char *pps_cmd, void *display)
-{
-	struct dp_display *dp_disp;
-	struct sde_connector *sde_conn;
-
-	if (!display || !connector) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	sde_conn = to_sde_connector(connector);
-	if (!sde_conn->drv_panel) {
-		pr_err("invalid dp panel\n");
-		return MODE_ERROR;
-	}
-
-	dp_disp = display;
-	return dp_disp->update_pps(dp_disp, connector, pps_cmd);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
deleted file mode 100644
index f887f82..0000000
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_DRM_H_
-#define _DP_DRM_H_
-
-#include <linux/types.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "msm_drv.h"
-#include "dp_display.h"
-
-struct dp_bridge {
-	struct drm_bridge base;
-	u32 id;
-
-	struct drm_connector *connector;
-	struct dp_display *display;
-	struct dp_display_mode dp_mode;
-	void *dp_panel;
-};
-
-
-#ifdef CONFIG_DRM_MSM_DP
-/**
- * dp_connector_config_hdr - callback to configure HDR
- * @connector: Pointer to drm connector structure
- * @display: Pointer to private display handle
- * @c_state: connect state data
- * Returns: Zero on success
- */
-int dp_connector_config_hdr(struct drm_connector *connector,
-		void *display,
-		struct sde_connector_state *c_state);
-
-/**
- * dp_connector_post_init - callback to perform additional initialization steps
- * @connector: Pointer to drm connector structure
- * @display: Pointer to private display handle
- * Returns: Zero on success
- */
-int dp_connector_post_init(struct drm_connector *connector, void *display);
-
-/**
- * dp_connector_detect - callback to determine if connector is connected
- * @connector: Pointer to drm connector structure
- * @force: Force detect setting from drm framework
- * @display: Pointer to private display handle
- * Returns: Connector 'is connected' status
- */
-enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
-		bool force,
-		void *display);
-
-/**
- * dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
- * @connector: Pointer to drm connector structure
- * @display: Pointer to private display handle
- * Returns: Number of modes added
- */
-int dp_connector_get_modes(struct drm_connector *connector,
-		void *display);
-
-/**
- * dp_connector_mode_valid - callback to determine if specified mode is valid
- * @connector: Pointer to drm connector structure
- * @mode: Pointer to drm mode structure
- * @display: Pointer to private display handle
- * Returns: Validity status for specified mode
- */
-enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
-		struct drm_display_mode *mode,
-		void *display);
-
-/**
- * dp_connector_get_mode_info - retrieve information of the mode selected
- * @connector: Pointer to drm connector structure
- * @drm_mode: Display mode set for the display
- * @mode_info: Out parameter. Information of the mode
- * @max_mixer_width: max width supported by HW layer mixer
- * @display: Pointer to private display structure
- * Returns: zero on success
- */
-int dp_connector_get_mode_info(struct drm_connector *connector,
-		const struct drm_display_mode *drm_mode,
-		struct msm_mode_info *mode_info,
-		u32 max_mixer_width, void *display);
-
-/**
- * dp_connector_get_info - retrieve connector display info
- * @connector: Pointer to drm connector structure
- * @info: Out parameter. Information of the connected display
- * @display: Pointer to private display structure
- * Returns: zero on success
- */
-int dp_connector_get_info(struct drm_connector *connector,
-		struct msm_display_info *info, void *display);
-
-/**
- * dp_connector_post_open - handle the post open functionalites
- * @connector: Pointer to drm connector structure
- * @display: Pointer to private display structure
- */
-void dp_connector_post_open(struct drm_connector *connector, void *display);
-
-int dp_drm_bridge_init(void *display,
-	struct drm_encoder *encoder);
-
-void dp_drm_bridge_deinit(void *display);
-
-/**
- * convert_to_drm_mode - convert dp mode to drm mode
- * @dp_mode: Point to dp mode
- * @drm_mode: Pointer to drm mode
- */
-void convert_to_drm_mode(const struct dp_display_mode *dp_mode,
-				struct drm_display_mode *drm_mode);
-
-/**
- * dp_connector_update_pps - update pps for given connector
- * @dp_mode: Point to dp mode
- * @pps_cmd: PPS packet
- * @display: Pointer to private display structure
- */
-int dp_connector_update_pps(struct drm_connector *connector,
-		char *pps_cmd, void *display);
-
-/**
- * dp_mst_drm_bridge_init - initialize mst bridge
- * @display: Pointer to private display structure
- * @encoder: Pointer to encoder for mst bridge mapping
- */
-int dp_mst_drm_bridge_init(void *display,
-	struct drm_encoder *encoder);
-
-/**
- * dp_mst_drm_bridge_deinit - de-initialize mst bridges
- * @display: Pointer to private display structure
- */
-void dp_mst_drm_bridge_deinit(void *display);
-
-/**
- * dp_mst_init - initialize mst objects for the given display
- * @display: Pointer to private display structure
- */
-int dp_mst_init(struct dp_display *dp_display);
-
-/**
- * dp_mst_deinit - de-initialize mst objects for the given display
- * @display: Pointer to private display structure
- */
-void dp_mst_deinit(struct dp_display *dp_display);
-#else
-static inline int dp_connector_config_hdr(struct drm_connector *connector,
-		void *display, struct sde_connector_state *c_state)
-{
-	return 0;
-}
-
-static inline int dp_connector_post_init(struct drm_connector *connector,
-		void *display)
-{
-	return 0;
-}
-
-static inline enum drm_connector_status dp_connector_detect(
-		struct drm_connector *conn,
-		bool force,
-		void *display)
-{
-	return 0;
-}
-
-
-static inline int dp_connector_get_modes(struct drm_connector *connector,
-		void *display)
-{
-	return 0;
-}
-
-static inline enum drm_mode_status dp_connector_mode_valid(
-		struct drm_connector *connector,
-		struct drm_display_mode *mode,
-		void *display)
-{
-	return MODE_OK;
-}
-
-static inline int dp_connector_get_mode_info(struct drm_connector *connector,
-		const struct drm_display_mode *drm_mode,
-		struct msm_mode_info *mode_info,
-		u32 max_mixer_width, void *display)
-{
-	return 0;
-}
-
-static inline int dp_connector_get_info(struct drm_connector *connector,
-		struct msm_display_info *info, void *display)
-{
-	return 0;
-}
-
-static inline void dp_connector_post_open(struct drm_connector *connector,
-		void *display)
-{
-}
-
-static inline int dp_drm_bridge_init(void *display, struct drm_encoder *encoder)
-{
-	return 0;
-}
-
-static inline void dp_drm_bridge_deinit(void *display)
-{
-}
-
-static inline void convert_to_drm_mode(const struct dp_display_mode *dp_mode,
-				struct drm_display_mode *drm_mode)
-{
-}
-
-static inline int dp_mst_drm_bridge_init(void *display,
-	struct drm_encoder *encoder)
-{
-	return 0;
-}
-
-static inline void dp_mst_drm_bridge_deinit(void *display)
-{
-}
-
-static inline int dp_mst_init(struct dp_display *dp_display)
-{
-	return 0;
-}
-
-static inline int dp_mst_deinit(struct dp_display *dp_display)
-{
-	return 0;
-}
-#endif
-
-#endif /* _DP_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_gpio_hpd.c b/drivers/gpu/drm/msm/dp/dp_gpio_hpd.c
deleted file mode 100644
index 86082ea..0000000
--- a/drivers/gpu/drm/msm/dp/dp_gpio_hpd.c
+++ /dev/null
@@ -1,297 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/gpio/consumer.h>
-#include <linux/sde_io_util.h>
-#include <linux/of_gpio.h>
-#include "dp_gpio_hpd.h"
-
-struct dp_gpio_hpd_private {
-	struct device *dev;
-	struct dp_hpd base;
-	struct dss_gpio gpio_cfg;
-	struct delayed_work work;
-	struct dp_hpd_cb *cb;
-	int irq;
-	bool hpd;
-};
-
-static int dp_gpio_hpd_connect(struct dp_gpio_hpd_private *gpio_hpd, bool hpd)
-{
-	int rc = 0;
-
-	if (!gpio_hpd) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	gpio_hpd->base.hpd_high = hpd;
-	gpio_hpd->base.alt_mode_cfg_done = hpd;
-	gpio_hpd->base.hpd_irq = false;
-
-	if (!gpio_hpd->cb ||
-		!gpio_hpd->cb->configure ||
-		!gpio_hpd->cb->disconnect) {
-		pr_err("invalid cb\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	if (hpd)
-		rc = gpio_hpd->cb->configure(gpio_hpd->dev);
-	else
-		rc = gpio_hpd->cb->disconnect(gpio_hpd->dev);
-
-error:
-	return rc;
-}
-
-static int dp_gpio_hpd_attention(struct dp_gpio_hpd_private *gpio_hpd)
-{
-	int rc = 0;
-
-	if (!gpio_hpd) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	gpio_hpd->base.hpd_irq = true;
-
-	if (gpio_hpd->cb && gpio_hpd->cb->attention)
-		rc = gpio_hpd->cb->attention(gpio_hpd->dev);
-
-error:
-	return rc;
-}
-
-static irqreturn_t dp_gpio_isr(int unused, void *data)
-{
-	struct dp_gpio_hpd_private *gpio_hpd = data;
-	u32 const disconnect_timeout_retry = 50;
-	bool hpd;
-	int i;
-
-	if (!gpio_hpd)
-		return IRQ_NONE;
-
-	hpd = gpio_get_value_cansleep(gpio_hpd->gpio_cfg.gpio);
-
-	if (!gpio_hpd->hpd && hpd) {
-		gpio_hpd->hpd = true;
-		queue_delayed_work(system_wq, &gpio_hpd->work, 0);
-		return IRQ_HANDLED;
-	}
-
-	if (!gpio_hpd->hpd)
-		return IRQ_HANDLED;
-
-	/* In DP 1.2 spec, 100msec is recommended for the detection
-	 * of HPD connect event. Here we'll poll HPD status for
-	 * 50x2ms = 100ms and if HPD is always low, we know DP is
-	 * disconnected. If HPD is high, HPD_IRQ will be handled
-	 */
-	for (i = 0; i < disconnect_timeout_retry; i++) {
-		if (hpd) {
-			dp_gpio_hpd_attention(gpio_hpd);
-			return IRQ_HANDLED;
-		}
-		usleep_range(2000, 2100);
-		hpd = gpio_get_value_cansleep(gpio_hpd->gpio_cfg.gpio);
-	}
-
-	gpio_hpd->hpd = false;
-	queue_delayed_work(system_wq, &gpio_hpd->work, 0);
-	return IRQ_HANDLED;
-}
-
-static void dp_gpio_hpd_work(struct work_struct *work)
-{
-	struct delayed_work *dw = to_delayed_work(work);
-	struct dp_gpio_hpd_private *gpio_hpd = container_of(dw,
-		struct dp_gpio_hpd_private, work);
-	int ret;
-
-	if (gpio_hpd->hpd) {
-		devm_free_irq(gpio_hpd->dev,
-			gpio_hpd->irq, gpio_hpd);
-		ret = devm_request_threaded_irq(gpio_hpd->dev,
-			gpio_hpd->irq, NULL,
-			dp_gpio_isr,
-			IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-			"dp-gpio-intp", gpio_hpd);
-		dp_gpio_hpd_connect(gpio_hpd, true);
-	} else {
-		devm_free_irq(gpio_hpd->dev,
-				gpio_hpd->irq, gpio_hpd);
-		ret = devm_request_threaded_irq(gpio_hpd->dev,
-			gpio_hpd->irq, NULL,
-			dp_gpio_isr,
-			IRQF_TRIGGER_RISING | IRQF_ONESHOT,
-			"dp-gpio-intp", gpio_hpd);
-		dp_gpio_hpd_connect(gpio_hpd, false);
-	}
-
-	if (ret < 0)
-		pr_err("Cannot claim IRQ dp-gpio-intp\n");
-}
-
-static int dp_gpio_hpd_simulate_connect(struct dp_hpd *dp_hpd, bool hpd)
-{
-	int rc = 0;
-	struct dp_gpio_hpd_private *gpio_hpd;
-
-	if (!dp_hpd) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base);
-
-	dp_gpio_hpd_connect(gpio_hpd, hpd);
-error:
-	return rc;
-}
-
-static int dp_gpio_hpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo)
-{
-	int rc = 0;
-	struct dp_gpio_hpd_private *gpio_hpd;
-
-	if (!dp_hpd) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base);
-
-	dp_gpio_hpd_attention(gpio_hpd);
-error:
-	return rc;
-}
-
-int dp_gpio_hpd_register(struct dp_hpd *dp_hpd)
-{
-	struct dp_gpio_hpd_private *gpio_hpd;
-	int edge;
-	int rc = 0;
-
-	if (!dp_hpd)
-		return -EINVAL;
-
-	gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base);
-
-	gpio_hpd->hpd = gpio_get_value_cansleep(gpio_hpd->gpio_cfg.gpio);
-
-	edge = gpio_hpd->hpd ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
-	rc = devm_request_threaded_irq(gpio_hpd->dev, gpio_hpd->irq, NULL,
-		dp_gpio_isr,
-		edge | IRQF_ONESHOT,
-		"dp-gpio-intp", gpio_hpd);
-	if (rc) {
-		pr_err("Failed to request INTP threaded IRQ: %d\n", rc);
-		return rc;
-	}
-
-	if (gpio_hpd->hpd)
-		queue_delayed_work(system_wq, &gpio_hpd->work, 0);
-
-	return rc;
-}
-
-struct dp_hpd *dp_gpio_hpd_get(struct device *dev,
-	struct dp_hpd_cb *cb)
-{
-	int rc = 0;
-	const char *hpd_gpio_name = "qcom,dp-hpd-gpio";
-	struct dp_gpio_hpd_private *gpio_hpd;
-	struct dp_pinctrl pinctrl = {0};
-
-	if (!dev || !cb) {
-		pr_err("invalid device\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	gpio_hpd = devm_kzalloc(dev, sizeof(*gpio_hpd), GFP_KERNEL);
-	if (!gpio_hpd) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	pinctrl.pin = devm_pinctrl_get(dev);
-	if (!IS_ERR_OR_NULL(pinctrl.pin)) {
-		pinctrl.state_hpd_active = pinctrl_lookup_state(pinctrl.pin,
-						"mdss_dp_hpd_active");
-		if (!IS_ERR_OR_NULL(pinctrl.state_hpd_active)) {
-			rc = pinctrl_select_state(pinctrl.pin,
-					pinctrl.state_hpd_active);
-			if (rc) {
-				pr_err("failed to set hpd active state\n");
-				goto gpio_error;
-			}
-		}
-	}
-
-	gpio_hpd->gpio_cfg.gpio = of_get_named_gpio(dev->of_node,
-		hpd_gpio_name, 0);
-	if (!gpio_is_valid(gpio_hpd->gpio_cfg.gpio)) {
-		pr_err("%s gpio not specified\n", hpd_gpio_name);
-		rc = -EINVAL;
-		goto gpio_error;
-	}
-
-	strlcpy(gpio_hpd->gpio_cfg.gpio_name, hpd_gpio_name,
-		sizeof(gpio_hpd->gpio_cfg.gpio_name));
-	gpio_hpd->gpio_cfg.value = 0;
-
-	rc = gpio_request(gpio_hpd->gpio_cfg.gpio,
-		gpio_hpd->gpio_cfg.gpio_name);
-	if (rc) {
-		pr_err("%s: failed to request gpio\n", hpd_gpio_name);
-		goto gpio_error;
-	}
-	gpio_direction_input(gpio_hpd->gpio_cfg.gpio);
-
-	gpio_hpd->dev = dev;
-	gpio_hpd->cb = cb;
-	gpio_hpd->irq = gpio_to_irq(gpio_hpd->gpio_cfg.gpio);
-	INIT_DELAYED_WORK(&gpio_hpd->work, dp_gpio_hpd_work);
-
-	gpio_hpd->base.simulate_connect = dp_gpio_hpd_simulate_connect;
-	gpio_hpd->base.simulate_attention = dp_gpio_hpd_simulate_attention;
-	gpio_hpd->base.register_hpd = dp_gpio_hpd_register;
-
-	return &gpio_hpd->base;
-
-gpio_error:
-	devm_kfree(dev, gpio_hpd);
-error:
-	return ERR_PTR(rc);
-}
-
-void dp_gpio_hpd_put(struct dp_hpd *dp_hpd)
-{
-	struct dp_gpio_hpd_private *gpio_hpd;
-
-	if (!dp_hpd)
-		return;
-
-	gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base);
-
-	gpio_free(gpio_hpd->gpio_cfg.gpio);
-	devm_kfree(gpio_hpd->dev, gpio_hpd);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_gpio_hpd.h b/drivers/gpu/drm/msm/dp/dp_gpio_hpd.h
deleted file mode 100644
index bb23c3b..0000000
--- a/drivers/gpu/drm/msm/dp/dp_gpio_hpd.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-
-#ifndef _DP_GPIO_HPD_H_
-#define _DP_GPIO_HPD_H_
-
-#include "dp_hpd.h"
-
-/**
- * dp_gpio_hpd_get() - configure and get the DisplayPlot HPD module data
- *
- * @dev: device instance of the caller
- * return: pointer to allocated gpio hpd module data
- *
- * This function sets up the gpio hpd module
- */
-struct dp_hpd *dp_gpio_hpd_get(struct device *dev,
-	struct dp_hpd_cb *cb);
-
-/**
- * dp_gpio_hpd_put()
- *
- * Cleans up dp_hpd instance
- *
- * @hpd: instance of gpio_hpd
- */
-void dp_gpio_hpd_put(struct dp_hpd *hpd);
-
-#endif /* _DP_GPIO_HPD_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
deleted file mode 100644
index f71c25e..0000000
--- a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
+++ /dev/null
@@ -1,978 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[dp-hdcp2p2] %s: " fmt, __func__
-
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/types.h>
-#include <linux/kthread.h>
-#include <linux/msm_hdcp.h>
-#include <linux/kfifo.h>
-#include <drm/drm_dp_helper.h>
-
-#include "sde_hdcp_2x.h"
-
-#define DP_INTR_STATUS2				(0x00000024)
-#define DP_INTR_STATUS3				(0x00000028)
-#define dp_read(offset) readl_relaxed((offset))
-#define dp_write(offset, data) writel_relaxed((data), (offset))
-#define DP_HDCP_RXCAPS_LENGTH 3
-
-enum dp_hdcp2p2_sink_status {
-	SINK_DISCONNECTED,
-	SINK_CONNECTED
-};
-
-struct dp_hdcp2p2_ctrl {
-	DECLARE_KFIFO(cmd_q, enum hdcp_transport_wakeup_cmd, 8);
-	wait_queue_head_t wait_q;
-	atomic_t auth_state;
-	enum dp_hdcp2p2_sink_status sink_status; /* Is sink connected */
-	struct dp_hdcp2p2_interrupts *intr;
-	struct sde_hdcp_init_data init_data;
-	struct mutex mutex; /* mutex to protect access to ctrl */
-	struct mutex msg_lock; /* mutex to protect access to msg buffer */
-	struct sde_hdcp_ops *ops;
-	void *lib_ctx; /* Handle to HDCP 2.2 Trustzone library */
-	struct sde_hdcp_2x_ops *lib; /* Ops for driver to call into TZ */
-
-	struct task_struct *thread;
-	struct hdcp2_buffer response;
-	struct hdcp2_buffer request;
-	uint32_t total_message_length;
-	uint32_t timeout;
-	struct sde_hdcp_2x_msg_part msg_part[HDCP_MAX_MESSAGE_PARTS];
-	u8 sink_rx_status;
-	u8 rx_status;
-	char abort_mask;
-
-	bool polling;
-};
-
-struct dp_hdcp2p2_int_set {
-	u32 interrupt;
-	char *name;
-	void (*func)(struct dp_hdcp2p2_ctrl *ctrl);
-};
-
-struct dp_hdcp2p2_interrupts {
-	u32 reg;
-	struct dp_hdcp2p2_int_set *int_set;
-};
-
-static inline int dp_hdcp2p2_valid_handle(struct dp_hdcp2p2_ctrl *ctrl)
-{
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	if (!ctrl->lib_ctx) {
-		pr_err("HDCP library needs to be acquired\n");
-		return -EINVAL;
-	}
-
-	if (!ctrl->lib) {
-		pr_err("invalid lib ops data\n");
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static inline bool dp_hdcp2p2_is_valid_state(struct dp_hdcp2p2_ctrl *ctrl)
-{
-	enum hdcp_transport_wakeup_cmd cmd;
-
-	if (kfifo_peek(&ctrl->cmd_q, &cmd) &&
-			cmd == HDCP_TRANSPORT_CMD_AUTHENTICATE)
-		return true;
-
-	if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE)
-		return true;
-
-	return false;
-}
-
-static int dp_hdcp2p2_copy_buf(struct dp_hdcp2p2_ctrl *ctrl,
-	struct hdcp_transport_wakeup_data *data)
-{
-	int i = 0;
-	uint32_t num_messages = 0;
-
-	if (!data || !data->message_data)
-		return 0;
-
-	mutex_lock(&ctrl->msg_lock);
-
-	ctrl->timeout = data->timeout;
-	num_messages = data->message_data->num_messages;
-	ctrl->total_message_length = 0; /* Total length of all messages */
-
-	for (i = 0; i < num_messages; i++)
-		ctrl->total_message_length +=
-			data->message_data->messages[i].length;
-
-	memcpy(ctrl->msg_part, data->message_data->messages,
-		sizeof(data->message_data->messages));
-
-	ctrl->rx_status = data->message_data->rx_status;
-	ctrl->abort_mask = data->abort_mask;
-
-	if (!ctrl->total_message_length) {
-		mutex_unlock(&ctrl->msg_lock);
-		return 0;
-	}
-
-	ctrl->response.data = data->buf;
-	ctrl->response.length = ctrl->total_message_length;
-	ctrl->request.data = data->buf;
-	ctrl->request.length = ctrl->total_message_length;
-
-	mutex_unlock(&ctrl->msg_lock);
-
-	return 0;
-}
-
-static void dp_hdcp2p2_send_auth_status(struct dp_hdcp2p2_ctrl *ctrl)
-{
-	ctrl->init_data.notify_status(ctrl->init_data.cb_data,
-		atomic_read(&ctrl->auth_state));
-}
-
-static void dp_hdcp2p2_set_interrupts(struct dp_hdcp2p2_ctrl *ctrl, bool enable)
-{
-	void __iomem *base = ctrl->init_data.dp_ahb->base;
-	struct dp_hdcp2p2_interrupts *intr = ctrl->intr;
-
-	while (intr && intr->reg) {
-		struct dp_hdcp2p2_int_set *int_set = intr->int_set;
-		u32 interrupts = 0;
-
-		while (int_set && int_set->interrupt) {
-			interrupts |= int_set->interrupt;
-			int_set++;
-		}
-
-		if (enable)
-			dp_write(base + intr->reg,
-				dp_read(base + intr->reg) | interrupts);
-		else
-			dp_write(base + intr->reg,
-				dp_read(base + intr->reg) & ~interrupts);
-		intr++;
-	}
-}
-
-static int dp_hdcp2p2_wakeup(struct hdcp_transport_wakeup_data *data)
-{
-	struct dp_hdcp2p2_ctrl *ctrl;
-	u32 const default_timeout_us = 500;
-
-	if (!data) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	ctrl = data->context;
-	if (!ctrl) {
-		pr_err("invalid ctrl\n");
-		return -EINVAL;
-	}
-
-	if (data->timeout)
-		ctrl->timeout = (data->timeout) * 2;
-	else
-		ctrl->timeout = default_timeout_us;
-
-	if (dp_hdcp2p2_copy_buf(ctrl, data))
-		goto exit;
-
-	ctrl->polling = false;
-	switch (data->cmd) {
-	case HDCP_TRANSPORT_CMD_STATUS_SUCCESS:
-		atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED);
-		break;
-	case HDCP_TRANSPORT_CMD_STATUS_FAILED:
-		atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
-		break;
-	default:
-		break;
-	}
-
-	kfifo_put(&ctrl->cmd_q, data->cmd);
-	wake_up(&ctrl->wait_q);
-exit:
-	return 0;
-}
-
-static inline void dp_hdcp2p2_wakeup_lib(struct dp_hdcp2p2_ctrl *ctrl,
-	struct sde_hdcp_2x_wakeup_data *data)
-{
-	int rc = 0;
-
-	if (ctrl && ctrl->lib && ctrl->lib->wakeup &&
-		data && (data->cmd != HDCP_2X_CMD_INVALID)) {
-		rc = ctrl->lib->wakeup(data);
-		if (rc)
-			pr_err("error sending %s to lib\n",
-				sde_hdcp_2x_cmd_to_str(data->cmd));
-	}
-}
-
-static void dp_hdcp2p2_reset(struct dp_hdcp2p2_ctrl *ctrl)
-{
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	ctrl->sink_status = SINK_DISCONNECTED;
-	atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
-}
-
-static int dp_hdcp2p2_register(void *input, bool mst_enabled)
-{
-	int rc;
-	enum sde_hdcp_2x_device_type device_type;
-	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
-
-	rc = dp_hdcp2p2_valid_handle(ctrl);
-	if (rc)
-		return rc;
-
-	if (mst_enabled)
-		device_type = HDCP_TXMTR_DP_MST;
-	else
-		device_type = HDCP_TXMTR_DP;
-
-	return sde_hdcp_2x_enable(ctrl->lib_ctx, device_type);
-}
-
-static int dp_hdcp2p2_on(void *input)
-{
-	int rc = 0;
-	struct dp_hdcp2p2_ctrl *ctrl = input;
-	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
-
-	rc = dp_hdcp2p2_valid_handle(ctrl);
-	if (rc)
-		return rc;
-
-	cdata.cmd = HDCP_2X_CMD_START;
-	cdata.context = ctrl->lib_ctx;
-	rc = ctrl->lib->wakeup(&cdata);
-	if (rc)
-		pr_err("Unable to start the HDCP 2.2 library (%d)\n", rc);
-
-	return rc;
-}
-
-static void dp_hdcp2p2_off(void *input)
-{
-	int rc;
-	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
-	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
-
-	rc = dp_hdcp2p2_valid_handle(ctrl);
-	if (rc)
-		return;
-
-	if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) {
-		cdata.cmd = HDCP_2X_CMD_STOP;
-		cdata.context = ctrl->lib_ctx;
-		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
-	}
-
-	dp_hdcp2p2_set_interrupts(ctrl, false);
-
-	dp_hdcp2p2_reset(ctrl);
-
-	kthread_park(ctrl->thread);
-
-	sde_hdcp_2x_disable(ctrl->lib_ctx);
-}
-
-static int dp_hdcp2p2_authenticate(void *input)
-{
-	int rc;
-	struct dp_hdcp2p2_ctrl *ctrl = input;
-	struct hdcp_transport_wakeup_data cdata = {
-					HDCP_TRANSPORT_CMD_AUTHENTICATE};
-	rc = dp_hdcp2p2_valid_handle(ctrl);
-	if (rc)
-		return rc;
-
-	dp_hdcp2p2_set_interrupts(ctrl, true);
-
-	ctrl->sink_status = SINK_CONNECTED;
-	atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATING);
-
-	kthread_unpark(ctrl->thread);
-
-	cdata.context = input;
-	dp_hdcp2p2_wakeup(&cdata);
-
-	return rc;
-}
-
-static int dp_hdcp2p2_reauthenticate(void *input)
-{
-	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp_hdcp2p2_reset((struct dp_hdcp2p2_ctrl *)input);
-
-	return  dp_hdcp2p2_authenticate(input);
-}
-
-static void dp_hdcp2p2_min_level_change(void *client_ctx,
-		u8 min_enc_level)
-{
-	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)client_ctx;
-	struct sde_hdcp_2x_wakeup_data cdata = {
-		HDCP_2X_CMD_MIN_ENC_LEVEL};
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (!dp_hdcp2p2_is_valid_state(ctrl)) {
-		pr_err("invalid state\n");
-		return;
-	}
-
-	cdata.context = ctrl->lib_ctx;
-	cdata.min_enc_level = min_enc_level;
-	dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
-}
-
-static int dp_hdcp2p2_aux_read_message(struct dp_hdcp2p2_ctrl *ctrl)
-{
-	int rc = 0, max_size = 16, read_size = 0, bytes_read = 0;
-	int size = ctrl->request.length, offset = ctrl->msg_part->offset;
-	u8 *buf = ctrl->request.data;
-
-	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE ||
-		atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL) {
-		pr_err("invalid hdcp state\n");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	if (!buf) {
-		pr_err("invalid request buffer\n");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	pr_debug("offset(0x%x), size(%d)\n", offset, size);
-
-	do {
-		read_size = min(size, max_size);
-
-		bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux,
-				offset, buf, read_size);
-		if (bytes_read != read_size) {
-			pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
-					offset, read_size, bytes_read);
-			rc = -EINVAL;
-			break;
-		}
-
-		buf += read_size;
-		offset += read_size;
-		size -= read_size;
-	} while (size > 0);
-
-exit:
-	return rc;
-}
-
-static int dp_hdcp2p2_aux_write_message(struct dp_hdcp2p2_ctrl *ctrl,
-	u8 *buf, int size, uint offset, uint timeout)
-{
-	int const max_size = 16;
-	int rc = 0, write_size = 0, bytes_written = 0;
-
-	pr_debug("offset(0x%x), size(%d)\n", offset, size);
-
-	do {
-		write_size = min(size, max_size);
-
-		bytes_written = drm_dp_dpcd_write(ctrl->init_data.drm_aux,
-				offset, buf, write_size);
-		if (bytes_written != write_size) {
-			pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
-					offset, write_size, bytes_written);
-			rc = -EINVAL;
-			break;
-		}
-
-		buf += write_size;
-		offset += write_size;
-		size -= write_size;
-	} while (size > 0);
-
-	return rc;
-}
-
-static bool dp_hdcp2p2_feature_supported(void *input)
-{
-	int rc;
-	struct dp_hdcp2p2_ctrl *ctrl = input;
-	struct sde_hdcp_2x_ops *lib = NULL;
-	bool supported = false;
-
-	rc = dp_hdcp2p2_valid_handle(ctrl);
-	if (rc)
-		return supported;
-
-	lib = ctrl->lib;
-	if (lib->feature_supported)
-		supported = lib->feature_supported(
-			ctrl->lib_ctx);
-
-	return supported;
-}
-
-static void dp_hdcp2p2_force_encryption(void *data, bool enable)
-{
-	int rc;
-	struct dp_hdcp2p2_ctrl *ctrl = data;
-	struct sde_hdcp_2x_ops *lib = NULL;
-
-	rc = dp_hdcp2p2_valid_handle(ctrl);
-	if (rc)
-		return;
-
-	lib = ctrl->lib;
-	if (lib->force_encryption)
-		lib->force_encryption(ctrl->lib_ctx, enable);
-}
-
-static void dp_hdcp2p2_send_msg(struct dp_hdcp2p2_ctrl *ctrl)
-{
-	int rc = 0;
-	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	cdata.context = ctrl->lib_ctx;
-
-	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
-		pr_err("hdcp is off\n");
-		goto exit;
-	}
-
-	mutex_lock(&ctrl->msg_lock);
-
-	rc = dp_hdcp2p2_aux_write_message(ctrl, ctrl->response.data,
-			ctrl->response.length, ctrl->msg_part->offset,
-			ctrl->timeout);
-	if (rc) {
-		pr_err("Error sending msg to sink %d\n", rc);
-		mutex_unlock(&ctrl->msg_lock);
-		goto exit;
-	}
-
-	cdata.cmd = HDCP_2X_CMD_MSG_SEND_SUCCESS;
-	cdata.timeout = ctrl->timeout;
-	mutex_unlock(&ctrl->msg_lock);
-
-exit:
-	if (rc == -ETIMEDOUT)
-		cdata.cmd = HDCP_2X_CMD_MSG_SEND_TIMEOUT;
-	else if (rc)
-		cdata.cmd = HDCP_2X_CMD_MSG_SEND_FAILED;
-
-	dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
-}
-
-static int dp_hdcp2p2_get_msg_from_sink(struct dp_hdcp2p2_ctrl *ctrl)
-{
-	int rc = 0;
-	struct sde_hdcp_2x_wakeup_data cdata = { HDCP_2X_CMD_INVALID };
-
-	cdata.context = ctrl->lib_ctx;
-
-	rc = dp_hdcp2p2_aux_read_message(ctrl);
-	if (rc) {
-		pr_err("error reading message %d\n", rc);
-		goto exit;
-	}
-
-	cdata.total_message_length = ctrl->total_message_length;
-	cdata.timeout = ctrl->timeout;
-exit:
-	if (rc == -ETIMEDOUT)
-		cdata.cmd = HDCP_2X_CMD_MSG_RECV_TIMEOUT;
-	else if (rc)
-		cdata.cmd = HDCP_2X_CMD_MSG_RECV_FAILED;
-	else
-		cdata.cmd = HDCP_2X_CMD_MSG_RECV_SUCCESS;
-
-	dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
-
-	return rc;
-}
-
-static void dp_hdcp2p2_recv_msg(struct dp_hdcp2p2_ctrl *ctrl)
-{
-	struct sde_hdcp_2x_wakeup_data cdata = { HDCP_2X_CMD_INVALID };
-
-	cdata.context = ctrl->lib_ctx;
-
-	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
-		pr_err("hdcp is off\n");
-		return;
-	}
-
-	dp_hdcp2p2_get_msg_from_sink(ctrl);
-}
-
-static void dp_hdcp2p2_link_check(struct dp_hdcp2p2_ctrl *ctrl)
-{
-	int rc = 0, retries = 10;
-	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL ||
-		atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
-		pr_err("invalid hdcp state\n");
-		return;
-	}
-
-	cdata.context = ctrl->lib_ctx;
-
-	if (ctrl->sink_rx_status & ctrl->abort_mask) {
-		if (ctrl->sink_rx_status & BIT(3))
-			pr_err("reauth_req set by sink\n");
-
-		if (ctrl->sink_rx_status & BIT(4))
-			pr_err("link failure reported by sink\n");
-
-		ctrl->sink_rx_status = 0;
-		ctrl->rx_status = 0;
-
-		rc = -ENOLINK;
-
-		cdata.cmd = HDCP_2X_CMD_LINK_FAILED;
-		atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
-		goto exit;
-	}
-
-	/* wait for polling to start till spec allowed timeout */
-	while (!ctrl->polling && retries--)
-		msleep(20);
-
-	/* check if sink has made a message available */
-	if (ctrl->polling && (ctrl->sink_rx_status & ctrl->rx_status)) {
-		ctrl->sink_rx_status = 0;
-		ctrl->rx_status = 0;
-
-		dp_hdcp2p2_get_msg_from_sink(ctrl);
-
-		ctrl->polling = false;
-	}
-exit:
-	if (rc)
-		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
-}
-
-static void dp_hdcp2p2_start_auth(struct dp_hdcp2p2_ctrl *ctrl)
-{
-	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_START_AUTH};
-	cdata.context = ctrl->lib_ctx;
-
-	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING)
-		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
-}
-
-static int dp_hdcp2p2_read_rx_status(struct dp_hdcp2p2_ctrl *ctrl,
-		u8 *rx_status)
-{
-	u32 const cp_irq_dpcd_offset = 0x201;
-	u32 const rxstatus_dpcd_offset = 0x69493;
-	ssize_t const bytes_to_read = 1;
-	ssize_t bytes_read = 0;
-	u8 buf = 0;
-	int rc = 0;
-	bool cp_irq = false;
-
-	*rx_status = 0;
-
-	bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux,
-			cp_irq_dpcd_offset, &buf, bytes_to_read);
-	if (bytes_read != bytes_to_read) {
-		pr_err("cp irq read failed\n");
-		rc = bytes_read;
-		goto error;
-	}
-
-	cp_irq = buf & BIT(2);
-	pr_debug("cp_irq=0x%x\n", cp_irq);
-	buf = 0;
-
-	if (cp_irq) {
-		bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux,
-				rxstatus_dpcd_offset, &buf, bytes_to_read);
-		if (bytes_read != bytes_to_read) {
-			pr_err("rxstatus read failed\n");
-			rc = bytes_read;
-			goto error;
-		}
-		*rx_status = buf;
-		pr_debug("rx_status=0x%x\n", *rx_status);
-	}
-
-error:
-	return rc;
-}
-
-static int dp_hdcp2p2_cp_irq(void *input)
-{
-	int rc;
-	struct dp_hdcp2p2_ctrl *ctrl = input;
-
-	rc = dp_hdcp2p2_valid_handle(ctrl);
-	if (rc)
-		return rc;
-
-	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL ||
-		atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
-		pr_err("invalid hdcp state\n");
-		return -EINVAL;
-	}
-
-	ctrl->sink_rx_status = 0;
-	rc = dp_hdcp2p2_read_rx_status(ctrl, &ctrl->sink_rx_status);
-	if (rc) {
-		pr_err("failed to read rx status\n");
-		return rc;
-	}
-
-	pr_debug("sink_rx_status=0x%x\n", ctrl->sink_rx_status);
-
-	if (!ctrl->sink_rx_status) {
-		pr_debug("not a hdcp 2.2 irq\n");
-		return -EINVAL;
-	}
-
-
-	kfifo_put(&ctrl->cmd_q, HDCP_TRANSPORT_CMD_LINK_CHECK);
-	wake_up(&ctrl->wait_q);
-
-	return 0;
-}
-
-static int dp_hdcp2p2_isr(void *input)
-{
-	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
-	int rc = 0;
-	struct dss_io_data *io;
-	struct dp_hdcp2p2_interrupts *intr;
-	u32 hdcp_int_val = 0;
-
-	if (!ctrl || !ctrl->init_data.dp_ahb) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	io = ctrl->init_data.dp_ahb;
-	intr = ctrl->intr;
-
-	while (intr && intr->reg) {
-		struct dp_hdcp2p2_int_set *int_set = intr->int_set;
-
-		hdcp_int_val = dp_read(io->base + intr->reg);
-
-		while (int_set && int_set->interrupt) {
-			if (hdcp_int_val & (int_set->interrupt >> 2)) {
-				pr_debug("%s\n", int_set->name);
-
-				if (int_set->func)
-					int_set->func(ctrl);
-
-				dp_write(io->base + intr->reg, hdcp_int_val |
-					(int_set->interrupt >> 1));
-			}
-			int_set++;
-		}
-		intr++;
-	}
-end:
-	return rc;
-}
-
-static bool dp_hdcp2p2_supported(void *input)
-{
-	struct dp_hdcp2p2_ctrl *ctrl = input;
-	u32 const rxcaps_dpcd_offset = 0x6921d;
-	ssize_t bytes_read = 0;
-	u8 buf[DP_HDCP_RXCAPS_LENGTH];
-
-	pr_debug("Checking sink capability\n");
-
-	bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux,
-			rxcaps_dpcd_offset, &buf, DP_HDCP_RXCAPS_LENGTH);
-	if (bytes_read != DP_HDCP_RXCAPS_LENGTH) {
-		pr_err("RxCaps read failed\n");
-		goto error;
-	}
-
-	pr_debug("HDCP_CAPABLE=%lu\n", (buf[2] & BIT(1)) >> 1);
-	pr_debug("VERSION=%d\n", buf[0]);
-
-	if ((buf[2] & BIT(1)) && (buf[0] == 0x2))
-		return true;
-error:
-	return false;
-}
-
-static int dp_hdcp2p2_change_streams(struct dp_hdcp2p2_ctrl *ctrl,
-		struct sde_hdcp_2x_wakeup_data *cdata)
-{
-	if (!ctrl || cdata->num_streams == 0 || !cdata->streams) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	if (!ctrl->lib_ctx) {
-		pr_err("HDCP library needs to be acquired\n");
-		return -EINVAL;
-	}
-
-	if (!ctrl->lib) {
-		pr_err("invalid lib ops data\n");
-		return -EINVAL;
-	}
-
-	cdata->context = ctrl->lib_ctx;
-	return ctrl->lib->wakeup(cdata);
-}
-
-
-static int dp_hdcp2p2_register_streams(void *input, u8 num_streams,
-			struct stream_info *streams)
-{
-	struct dp_hdcp2p2_ctrl *ctrl = input;
-	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_OPEN_STREAMS};
-
-	cdata.streams = streams;
-	cdata.num_streams = num_streams;
-	return dp_hdcp2p2_change_streams(ctrl, &cdata);
-}
-
-static int dp_hdcp2p2_deregister_streams(void *input, u8 num_streams,
-			struct stream_info *streams)
-{
-	struct dp_hdcp2p2_ctrl *ctrl = input;
-	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_CLOSE_STREAMS};
-
-	cdata.streams = streams;
-	cdata.num_streams = num_streams;
-	return dp_hdcp2p2_change_streams(ctrl, &cdata);
-}
-
-void sde_dp_hdcp2p2_deinit(void *input)
-{
-	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
-	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
-
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) {
-		cdata.cmd = HDCP_2X_CMD_STOP;
-		cdata.context = ctrl->lib_ctx;
-		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
-	}
-
-	sde_hdcp_2x_deregister(ctrl->lib_ctx);
-
-	kthread_stop(ctrl->thread);
-
-	mutex_destroy(&ctrl->mutex);
-	mutex_destroy(&ctrl->msg_lock);
-	kfree(ctrl);
-}
-
-static int dp_hdcp2p2_main(void *data)
-{
-	struct dp_hdcp2p2_ctrl *ctrl = data;
-	enum hdcp_transport_wakeup_cmd cmd;
-
-	while (1) {
-		wait_event(ctrl->wait_q,
-			!kfifo_is_empty(&ctrl->cmd_q) ||
-			kthread_should_stop() ||
-			kthread_should_park());
-
-		if (kthread_should_stop())
-			break;
-
-		if (kfifo_is_empty(&ctrl->cmd_q) && kthread_should_park()) {
-			kthread_parkme();
-			continue;
-		}
-
-		if (!kfifo_get(&ctrl->cmd_q, &cmd))
-			continue;
-
-		switch (cmd) {
-		case HDCP_TRANSPORT_CMD_SEND_MESSAGE:
-			dp_hdcp2p2_send_msg(ctrl);
-			break;
-		case HDCP_TRANSPORT_CMD_RECV_MESSAGE:
-			if (ctrl->rx_status)
-				ctrl->polling = true;
-			else
-				dp_hdcp2p2_recv_msg(ctrl);
-			break;
-		case HDCP_TRANSPORT_CMD_STATUS_SUCCESS:
-			dp_hdcp2p2_send_auth_status(ctrl);
-			break;
-		case HDCP_TRANSPORT_CMD_STATUS_FAILED:
-			dp_hdcp2p2_set_interrupts(ctrl, false);
-			dp_hdcp2p2_send_auth_status(ctrl);
-			break;
-		case HDCP_TRANSPORT_CMD_LINK_POLL:
-			ctrl->polling = true;
-			break;
-		case HDCP_TRANSPORT_CMD_LINK_CHECK:
-			dp_hdcp2p2_link_check(ctrl);
-			break;
-		case HDCP_TRANSPORT_CMD_AUTHENTICATE:
-			dp_hdcp2p2_start_auth(ctrl);
-			break;
-		default:
-			break;
-		}
-	}
-
-	return 0;
-}
-
-void *sde_dp_hdcp2p2_init(struct sde_hdcp_init_data *init_data)
-{
-	int rc;
-	struct dp_hdcp2p2_ctrl *ctrl;
-	static struct sde_hdcp_ops ops = {
-		.isr = dp_hdcp2p2_isr,
-		.reauthenticate = dp_hdcp2p2_reauthenticate,
-		.authenticate = dp_hdcp2p2_authenticate,
-		.feature_supported = dp_hdcp2p2_feature_supported,
-		.force_encryption = dp_hdcp2p2_force_encryption,
-		.sink_support = dp_hdcp2p2_supported,
-		.set_mode = dp_hdcp2p2_register,
-		.on = dp_hdcp2p2_on,
-		.off = dp_hdcp2p2_off,
-		.cp_irq = dp_hdcp2p2_cp_irq,
-		.register_streams = dp_hdcp2p2_register_streams,
-		.deregister_streams = dp_hdcp2p2_deregister_streams,
-	};
-
-	static struct hdcp_transport_ops client_ops = {
-		.wakeup = dp_hdcp2p2_wakeup,
-	};
-	static struct dp_hdcp2p2_int_set int_set1[] = {
-		{BIT(17), "authentication successful", NULL},
-		{BIT(20), "authentication failed", NULL},
-		{BIT(24), "encryption enabled", NULL},
-		{BIT(27), "encryption disabled", NULL},
-		{0},
-	};
-	static struct dp_hdcp2p2_int_set int_set2[] = {
-		{BIT(2),  "key fifo underflow", NULL},
-		{0},
-	};
-	static struct dp_hdcp2p2_interrupts intr[] = {
-		{DP_INTR_STATUS2, int_set1},
-		{DP_INTR_STATUS3, int_set2},
-		{0}
-	};
-	static struct sde_hdcp_2x_ops hdcp2x_ops;
-	struct sde_hdcp_2x_register_data register_data = {0};
-
-	if (!init_data || !init_data->cb_data ||
-			!init_data->notify_status || !init_data->drm_aux) {
-		pr_err("invalid input\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
-	if (!ctrl)
-		return ERR_PTR(-ENOMEM);
-
-	ctrl->init_data = *init_data;
-	ctrl->lib = &hdcp2x_ops;
-	ctrl->response.data = NULL;
-	ctrl->request.data = NULL;
-
-	ctrl->sink_status = SINK_DISCONNECTED;
-	ctrl->intr = intr;
-
-	INIT_KFIFO(ctrl->cmd_q);
-
-	init_waitqueue_head(&ctrl->wait_q);
-	atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
-
-	ctrl->ops = &ops;
-	mutex_init(&ctrl->mutex);
-	mutex_init(&ctrl->msg_lock);
-
-	register_data.hdcp_data = &ctrl->lib_ctx;
-	register_data.client_ops = &client_ops;
-	register_data.ops = &hdcp2x_ops;
-	register_data.client_data = ctrl;
-
-	rc = sde_hdcp_2x_register(&register_data);
-	if (rc) {
-		pr_err("Unable to register with HDCP 2.2 library\n");
-		goto error;
-	}
-
-	if (IS_ENABLED(CONFIG_HDCP_QSEECOM))
-		msm_hdcp_register_cb(init_data->msm_hdcp_dev, ctrl,
-				dp_hdcp2p2_min_level_change);
-
-	ctrl->thread = kthread_run(dp_hdcp2p2_main, ctrl, "dp_hdcp2p2");
-
-	if (IS_ERR(ctrl->thread)) {
-		pr_err("unable to start DP hdcp2p2 thread\n");
-		rc = PTR_ERR(ctrl->thread);
-		ctrl->thread = NULL;
-		goto error;
-	}
-
-	return ctrl;
-error:
-	kfree(ctrl);
-	return ERR_PTR(rc);
-}
-
-struct sde_hdcp_ops *sde_dp_hdcp2p2_get(void *input)
-{
-	return ((struct dp_hdcp2p2_ctrl *)input)->ops;
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
deleted file mode 100644
index a48fe5f..0000000
--- a/drivers/gpu/drm/msm/dp/dp_hpd.c
+++ /dev/null
@@ -1,99 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-
-#include "dp_hpd.h"
-#include "dp_usbpd.h"
-#include "dp_gpio_hpd.h"
-#include "dp_lphw_hpd.h"
-
-static void dp_hpd_host_init(struct dp_hpd *dp_hpd,
-		struct dp_catalog_hpd *catalog)
-{
-	if (!catalog) {
-		pr_err("invalid input\n");
-		return;
-	}
-	catalog->config_hpd(catalog, true);
-}
-
-static void dp_hpd_host_deinit(struct dp_hpd *dp_hpd,
-		struct dp_catalog_hpd *catalog)
-{
-	if (!catalog) {
-		pr_err("invalid input\n");
-		return;
-	}
-	catalog->config_hpd(catalog, false);
-}
-
-static void dp_hpd_isr(struct dp_hpd *dp_hpd)
-{
-}
-
-struct dp_hpd *dp_hpd_get(struct device *dev, struct dp_parser *parser,
-		struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb)
-{
-	struct dp_hpd *dp_hpd;
-
-	if (parser->no_aux_switch && parser->lphw_hpd) {
-		dp_hpd = dp_lphw_hpd_get(dev, parser, catalog, cb);
-		if (IS_ERR(dp_hpd)) {
-			pr_err("failed to get lphw hpd\n");
-			return dp_hpd;
-		}
-		dp_hpd->type = DP_HPD_LPHW;
-	} else if (parser->no_aux_switch) {
-		dp_hpd = dp_gpio_hpd_get(dev, cb);
-		if (IS_ERR(dp_hpd)) {
-			pr_err("failed to get gpio hpd\n");
-			return dp_hpd;
-		}
-		dp_hpd->type = DP_HPD_GPIO;
-	} else {
-		dp_hpd = dp_usbpd_get(dev, cb);
-		if (IS_ERR(dp_hpd)) {
-			pr_err("failed to get usbpd\n");
-			return dp_hpd;
-		}
-		dp_hpd->type = DP_HPD_USBPD;
-	}
-
-	if (!dp_hpd->host_init)
-		dp_hpd->host_init	= dp_hpd_host_init;
-	if (!dp_hpd->host_deinit)
-		dp_hpd->host_deinit	= dp_hpd_host_deinit;
-	if (!dp_hpd->isr)
-		dp_hpd->isr		= dp_hpd_isr;
-
-	return dp_hpd;
-}
-
-void dp_hpd_put(struct dp_hpd *dp_hpd)
-{
-	if (!dp_hpd)
-		return;
-
-	switch (dp_hpd->type) {
-	case DP_HPD_USBPD:
-		dp_usbpd_put(dp_hpd);
-		break;
-	case DP_HPD_GPIO:
-		dp_gpio_hpd_put(dp_hpd);
-		break;
-	case DP_HPD_LPHW:
-		dp_lphw_hpd_put(dp_hpd);
-		break;
-	default:
-		pr_err("unknown hpd type %d\n", dp_hpd->type);
-		break;
-	}
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h
deleted file mode 100644
index bdc5512..0000000
--- a/drivers/gpu/drm/msm/dp/dp_hpd.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_HPD_H_
-#define _DP_HPD_H_
-
-#include <linux/types.h>
-#include "dp_parser.h"
-#include "dp_catalog.h"
-
-struct device;
-
-/**
- * enum dp_hpd_type - dp hpd type
- * @DP_HPD_USBPD:   USB type-c based HPD
- * @DP_HPD_GPIO:    GPIO based HPD
- * @DP_HPD_BUILTIN: Controller built-in HPD
- */
-
-enum dp_hpd_type {
-	DP_HPD_USBPD,
-	DP_HPD_GPIO,
-	DP_HPD_LPHW,
-	DP_HPD_BUILTIN,
-};
-
-/**
- * struct dp_hpd_cb - callback functions provided by the client
- *
- * @configure: called when dp connection is ready.
- * @disconnect: notify the cable disconnect event.
- * @attention: notify any attention message event.
- */
-struct dp_hpd_cb {
-	int (*configure)(struct device *dev);
-	int (*disconnect)(struct device *dev);
-	int (*attention)(struct device *dev);
-};
-
-/**
- * struct dp_hpd - DisplayPort HPD status
- *
- * @type: type of HPD
- * @orientation: plug orientation configuration, USBPD type only.
- * @hpd_high: Hot Plug Detect signal is high.
- * @hpd_irq: Change in the status since last message
- * @alt_mode_cfg_done: bool to specify alt mode status
- * @multi_func: multi-function preferred, USBPD type only
- * @isr: event interrupt, BUILTIN and LPHW type only
- * @register_hpd: register hardware callback
- * @host_init: source or host side setup for hpd
- * @host_deinit: source or host side de-initializations
- * @simulate_connect: simulate disconnect or connect for debug mode
- * @simulate_attention: simulate attention messages for debug mode
- */
-struct dp_hpd {
-	enum dp_hpd_type type;
-	u32 orientation;
-	bool hpd_high;
-	bool hpd_irq;
-	bool alt_mode_cfg_done;
-	bool multi_func;
-
-	void (*isr)(struct dp_hpd *dp_hpd);
-	int (*register_hpd)(struct dp_hpd *dp_hpd);
-	void (*host_init)(struct dp_hpd *hpd, struct dp_catalog_hpd *catalog);
-	void (*host_deinit)(struct dp_hpd *hpd, struct dp_catalog_hpd *catalog);
-	int (*simulate_connect)(struct dp_hpd *dp_hpd, bool hpd);
-	int (*simulate_attention)(struct dp_hpd *dp_hpd, int vdo);
-};
-
-/**
- * dp_hpd_get() - configure and get the DisplayPlot HPD module data
- *
- * @dev: device instance of the caller
- * @parser: DP parser
- * @cb: callback function for HPD response
- * return: pointer to allocated hpd module data
- *
- * This function sets up the hpd module
- */
-struct dp_hpd *dp_hpd_get(struct device *dev, struct dp_parser *parser,
-		struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb);
-
-/**
- * dp_hpd_put()
- *
- * Cleans up dp_hpd instance
- *
- * @dp_hpd: instance of dp_hpd
- */
-void dp_hpd_put(struct dp_hpd *dp_hpd);
-
-#endif /* _DP_HPD_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
deleted file mode 100644
index d67320e..0000000
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ /dev/null
@@ -1,1526 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include "dp_link.h"
-#include "dp_panel.h"
-
-enum dynamic_range {
-	DP_DYNAMIC_RANGE_RGB_VESA = 0x00,
-	DP_DYNAMIC_RANGE_RGB_CEA = 0x01,
-	DP_DYNAMIC_RANGE_UNKNOWN = 0xFFFFFFFF,
-};
-
-enum audio_sample_rate {
-	AUDIO_SAMPLE_RATE_32_KHZ	= 0x00,
-	AUDIO_SAMPLE_RATE_44_1_KHZ	= 0x01,
-	AUDIO_SAMPLE_RATE_48_KHZ	= 0x02,
-	AUDIO_SAMPLE_RATE_88_2_KHZ	= 0x03,
-	AUDIO_SAMPLE_RATE_96_KHZ	= 0x04,
-	AUDIO_SAMPLE_RATE_176_4_KHZ	= 0x05,
-	AUDIO_SAMPLE_RATE_192_KHZ	= 0x06,
-};
-
-enum audio_pattern_type {
-	AUDIO_TEST_PATTERN_OPERATOR_DEFINED	= 0x00,
-	AUDIO_TEST_PATTERN_SAWTOOTH		= 0x01,
-};
-
-struct dp_link_request {
-	u32 test_requested;
-	u32 test_link_rate;
-	u32 test_lane_count;
-};
-
-struct dp_link_private {
-	u32 prev_sink_count;
-	struct device *dev;
-	struct dp_aux *aux;
-	struct dp_link dp_link;
-
-	struct dp_link_request request;
-	u8 link_status[DP_LINK_STATUS_SIZE];
-};
-
-static char *dp_link_get_audio_test_pattern(u32 pattern)
-{
-	switch (pattern) {
-	case AUDIO_TEST_PATTERN_OPERATOR_DEFINED:
-		return DP_LINK_ENUM_STR(AUDIO_TEST_PATTERN_OPERATOR_DEFINED);
-	case AUDIO_TEST_PATTERN_SAWTOOTH:
-		return DP_LINK_ENUM_STR(AUDIO_TEST_PATTERN_SAWTOOTH);
-	default:
-		return "unknown";
-	}
-}
-
-static char *dp_link_get_audio_sample_rate(u32 rate)
-{
-	switch (rate) {
-	case AUDIO_SAMPLE_RATE_32_KHZ:
-		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_32_KHZ);
-	case AUDIO_SAMPLE_RATE_44_1_KHZ:
-		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_44_1_KHZ);
-	case AUDIO_SAMPLE_RATE_48_KHZ:
-		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_48_KHZ);
-	case AUDIO_SAMPLE_RATE_88_2_KHZ:
-		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_88_2_KHZ);
-	case AUDIO_SAMPLE_RATE_96_KHZ:
-		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_96_KHZ);
-	case AUDIO_SAMPLE_RATE_176_4_KHZ:
-		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_176_4_KHZ);
-	case AUDIO_SAMPLE_RATE_192_KHZ:
-		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_192_KHZ);
-	default:
-		return "unknown";
-	}
-}
-
-static int dp_link_get_period(struct dp_link_private *link, int const addr)
-{
-	int ret = 0;
-	u8 bp;
-	u8 data;
-	u32 const param_len = 0x1;
-	u32 const max_audio_period = 0xA;
-
-	/* TEST_AUDIO_PERIOD_CH_XX */
-	if (drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp,
-		param_len) < param_len) {
-		pr_err("failed to read test_audio_period (0x%x)\n", addr);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	data = bp;
-
-	/* Period - Bits 3:0 */
-	data = data & 0xF;
-	if ((int)data > max_audio_period) {
-		pr_err("invalid test_audio_period_ch_1 = 0x%x\n", data);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	ret = data;
-exit:
-	return ret;
-}
-
-static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
-{
-	int ret = 0;
-	struct dp_link_test_audio *req = &link->dp_link.test_audio;
-
-	ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1);
-	if (ret == -EINVAL)
-		goto exit;
-
-	req->test_audio_period_ch_1 = ret;
-	pr_debug("test_audio_period_ch_1 = 0x%x\n", ret);
-
-	ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2);
-	if (ret == -EINVAL)
-		goto exit;
-
-	req->test_audio_period_ch_2 = ret;
-	pr_debug("test_audio_period_ch_2 = 0x%x\n", ret);
-
-	/* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */
-	ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3);
-	if (ret == -EINVAL)
-		goto exit;
-
-	req->test_audio_period_ch_3 = ret;
-	pr_debug("test_audio_period_ch_3 = 0x%x\n", ret);
-
-	ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4);
-	if (ret == -EINVAL)
-		goto exit;
-
-	req->test_audio_period_ch_4 = ret;
-	pr_debug("test_audio_period_ch_4 = 0x%x\n", ret);
-
-	ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5);
-	if (ret == -EINVAL)
-		goto exit;
-
-	req->test_audio_period_ch_5 = ret;
-	pr_debug("test_audio_period_ch_5 = 0x%x\n", ret);
-
-	ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6);
-	if (ret == -EINVAL)
-		goto exit;
-
-	req->test_audio_period_ch_6 = ret;
-	pr_debug("test_audio_period_ch_6 = 0x%x\n", ret);
-
-	ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7);
-	if (ret == -EINVAL)
-		goto exit;
-
-	req->test_audio_period_ch_7 = ret;
-	pr_debug("test_audio_period_ch_7 = 0x%x\n", ret);
-
-	ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8);
-	if (ret == -EINVAL)
-		goto exit;
-
-	req->test_audio_period_ch_8 = ret;
-	pr_debug("test_audio_period_ch_8 = 0x%x\n", ret);
-exit:
-	return ret;
-}
-
-static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
-{
-	int ret = 0;
-	u8 bp;
-	u8 data;
-	int rlen;
-	int const param_len = 0x1;
-	int const max_audio_pattern_type = 0x1;
-
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux,
-		DP_TEST_AUDIO_PATTERN_TYPE, &bp, param_len);
-	if (rlen < param_len) {
-		pr_err("failed to read link audio mode data\n");
-		ret = -EINVAL;
-		goto exit;
-	}
-	data = bp;
-
-	/* Audio Pattern Type - Bits 7:0 */
-	if ((int)data > max_audio_pattern_type) {
-		pr_err("invalid audio pattern type = 0x%x\n", data);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	link->dp_link.test_audio.test_audio_pattern_type = data;
-	pr_debug("audio pattern type = %s\n",
-			dp_link_get_audio_test_pattern(data));
-exit:
-	return ret;
-}
-
-static int dp_link_parse_audio_mode(struct dp_link_private *link)
-{
-	int ret = 0;
-	u8 bp;
-	u8 data;
-	int rlen;
-	int const param_len = 0x1;
-	int const max_audio_sampling_rate = 0x6;
-	int const max_audio_channel_count = 0x8;
-	int sampling_rate = 0x0;
-	int channel_count = 0x0;
-
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_AUDIO_MODE,
-			&bp, param_len);
-	if (rlen < param_len) {
-		pr_err("failed to read link audio mode data\n");
-		ret = -EINVAL;
-		goto exit;
-	}
-	data = bp;
-
-	/* Sampling Rate - Bits 3:0 */
-	sampling_rate = data & 0xF;
-	if (sampling_rate > max_audio_sampling_rate) {
-		pr_err("sampling rate (0x%x) greater than max (0x%x)\n",
-				sampling_rate, max_audio_sampling_rate);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	/* Channel Count - Bits 7:4 */
-	channel_count = ((data & 0xF0) >> 4) + 1;
-	if (channel_count > max_audio_channel_count) {
-		pr_err("channel_count (0x%x) greater than max (0x%x)\n",
-				channel_count, max_audio_channel_count);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate;
-	link->dp_link.test_audio.test_audio_channel_count = channel_count;
-	pr_debug("sampling_rate = %s, channel_count = 0x%x\n",
-		dp_link_get_audio_sample_rate(sampling_rate), channel_count);
-exit:
-	return ret;
-}
-
-/**
- * dp_parse_audio_pattern_params() - parses audio pattern parameters from DPCD
- * @link: Display Port Driver data
- *
- * Returns 0 if it successfully parses the audio link pattern parameters.
- */
-static int dp_link_parse_audio_pattern_params(struct dp_link_private *link)
-{
-	int ret = 0;
-
-	ret = dp_link_parse_audio_mode(link);
-	if (ret)
-		goto exit;
-
-	ret = dp_link_parse_audio_pattern_type(link);
-	if (ret)
-		goto exit;
-
-	ret = dp_link_parse_audio_channel_period(link);
-
-exit:
-	return ret;
-}
-
-/**
- * dp_link_is_video_pattern_valid() - validates the video pattern
- * @pattern: video pattern requested by the sink
- *
- * Returns true if the requested video pattern is supported.
- */
-static bool dp_link_is_video_pattern_valid(u32 pattern)
-{
-	switch (pattern) {
-	case DP_NO_TEST_PATTERN:
-	case DP_COLOR_RAMP:
-	case DP_BLACK_AND_WHITE_VERTICAL_LINES:
-	case DP_COLOR_SQUARE:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static char *dp_link_video_pattern_to_string(u32 test_video_pattern)
-{
-	switch (test_video_pattern) {
-	case DP_NO_TEST_PATTERN:
-		return DP_LINK_ENUM_STR(DP_NO_TEST_PATTERN);
-	case DP_COLOR_RAMP:
-		return DP_LINK_ENUM_STR(DP_COLOR_RAMP);
-	case DP_BLACK_AND_WHITE_VERTICAL_LINES:
-		return DP_LINK_ENUM_STR(DP_BLACK_AND_WHITE_VERTICAL_LINES);
-	case DP_COLOR_SQUARE:
-		return DP_LINK_ENUM_STR(DP_COLOR_SQUARE);
-	default:
-		return "unknown";
-	}
-}
-
-/**
- * dp_link_is_dynamic_range_valid() - validates the dynamic range
- * @bit_depth: the dynamic range value to be checked
- *
- * Returns true if the dynamic range value is supported.
- */
-static bool dp_link_is_dynamic_range_valid(u32 dr)
-{
-	switch (dr) {
-	case DP_DYNAMIC_RANGE_RGB_VESA:
-	case DP_DYNAMIC_RANGE_RGB_CEA:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static char *dp_link_dynamic_range_to_string(u32 dr)
-{
-	switch (dr) {
-	case DP_DYNAMIC_RANGE_RGB_VESA:
-		return DP_LINK_ENUM_STR(DP_DYNAMIC_RANGE_RGB_VESA);
-	case DP_DYNAMIC_RANGE_RGB_CEA:
-		return DP_LINK_ENUM_STR(DP_DYNAMIC_RANGE_RGB_CEA);
-	case DP_DYNAMIC_RANGE_UNKNOWN:
-	default:
-		return "unknown";
-	}
-}
-
-/**
- * dp_link_is_bit_depth_valid() - validates the bit depth requested
- * @bit_depth: bit depth requested by the sink
- *
- * Returns true if the requested bit depth is supported.
- */
-static bool dp_link_is_bit_depth_valid(u32 tbd)
-{
-	/* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */
-	switch (tbd) {
-	case DP_TEST_BIT_DEPTH_6:
-	case DP_TEST_BIT_DEPTH_8:
-	case DP_TEST_BIT_DEPTH_10:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static char *dp_link_bit_depth_to_string(u32 tbd)
-{
-	switch (tbd) {
-	case DP_TEST_BIT_DEPTH_6:
-		return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_6);
-	case DP_TEST_BIT_DEPTH_8:
-		return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_8);
-	case DP_TEST_BIT_DEPTH_10:
-		return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_10);
-	case DP_TEST_BIT_DEPTH_UNKNOWN:
-	default:
-		return "unknown";
-	}
-}
-
-static int dp_link_parse_timing_params1(struct dp_link_private *link,
-	int const addr, int const len, u32 *val)
-{
-	u8 bp[2];
-	int rlen;
-
-	if (len < 2)
-		return -EINVAL;
-
-	/* Read the requested video link pattern (Byte 0x221). */
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, bp, len);
-	if (rlen < len) {
-		pr_err("failed to read 0x%x\n", addr);
-		return -EINVAL;
-	}
-
-	*val = bp[1] | (bp[0] << 8);
-
-	return 0;
-}
-
-static int dp_link_parse_timing_params2(struct dp_link_private *link,
-	int const addr, int const len, u32 *val1, u32 *val2)
-{
-	u8 bp[2];
-	int rlen;
-
-	if (len < 2)
-		return -EINVAL;
-
-	/* Read the requested video link pattern (Byte 0x221). */
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, bp, len);
-	if (rlen < len) {
-		pr_err("failed to read 0x%x\n", addr);
-		return -EINVAL;
-	}
-
-	*val1 = (bp[0] & BIT(7)) >> 7;
-	*val2 = bp[1] | ((bp[0] & 0x7F) << 8);
-
-	return 0;
-}
-
-static int dp_link_parse_timing_params3(struct dp_link_private *link,
-	int const addr, u32 *val)
-{
-	u8 bp;
-	u32 len = 1;
-	int rlen;
-
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
-	if (rlen < 1) {
-		pr_err("failed to read 0x%x\n", addr);
-		return -EINVAL;
-	}
-	*val = bp;
-
-	return 0;
-}
-
-/**
- * dp_parse_video_pattern_params() - parses video pattern parameters from DPCD
- * @link: Display Port Driver data
- *
- * Returns 0 if it successfully parses the video link pattern and the link
- * bit depth requested by the sink and, and if the values parsed are valid.
- */
-static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
-{
-	int ret = 0;
-	int rlen;
-	u8 bp;
-	u8 data;
-	u32 dyn_range;
-	int const param_len = 0x1;
-
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_PATTERN,
-			&bp, param_len);
-	if (rlen < param_len) {
-		pr_err("failed to read link video pattern\n");
-		ret = -EINVAL;
-		goto exit;
-	}
-	data = bp;
-
-	if (!dp_link_is_video_pattern_valid(data)) {
-		pr_err("invalid link video pattern = 0x%x\n", data);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	link->dp_link.test_video.test_video_pattern = data;
-	pr_debug("link video pattern = 0x%x (%s)\n",
-		link->dp_link.test_video.test_video_pattern,
-		dp_link_video_pattern_to_string(
-			link->dp_link.test_video.test_video_pattern));
-
-	/* Read the requested color bit depth and dynamic range (Byte 0x232) */
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_MISC0,
-			&bp, param_len);
-	if (rlen < param_len) {
-		pr_err("failed to read link bit depth\n");
-		ret = -EINVAL;
-		goto exit;
-	}
-	data = bp;
-
-	/* Dynamic Range */
-	dyn_range = (data & DP_TEST_DYNAMIC_RANGE_CEA) >> 3;
-	if (!dp_link_is_dynamic_range_valid(dyn_range)) {
-		pr_err("invalid link dynamic range = 0x%x\n", dyn_range);
-		ret = -EINVAL;
-		goto exit;
-	}
-	link->dp_link.test_video.test_dyn_range = dyn_range;
-	pr_debug("link dynamic range = 0x%x (%s)\n",
-		link->dp_link.test_video.test_dyn_range,
-		dp_link_dynamic_range_to_string(
-			link->dp_link.test_video.test_dyn_range));
-
-	/* Color bit depth */
-	data &= DP_TEST_BIT_DEPTH_MASK;
-	if (!dp_link_is_bit_depth_valid(data)) {
-		pr_err("invalid link bit depth = 0x%x\n", data);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	link->dp_link.test_video.test_bit_depth = data;
-	pr_debug("link bit depth = 0x%x (%s)\n",
-		link->dp_link.test_video.test_bit_depth,
-		dp_link_bit_depth_to_string(
-		link->dp_link.test_video.test_bit_depth));
-
-	/* resolution timing params */
-	ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2,
-			&link->dp_link.test_video.test_h_total);
-	if (ret) {
-		pr_err("failed to parse test_h_total (DP_TEST_H_TOTAL_HI)\n");
-		goto exit;
-	}
-	pr_debug("TEST_H_TOTAL = %d\n", link->dp_link.test_video.test_h_total);
-
-	ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2,
-			&link->dp_link.test_video.test_v_total);
-	if (ret) {
-		pr_err("failed to parse test_v_total (DP_TEST_V_TOTAL_HI)\n");
-		goto exit;
-	}
-	pr_debug("TEST_V_TOTAL = %d\n", link->dp_link.test_video.test_v_total);
-
-	ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2,
-			&link->dp_link.test_video.test_h_start);
-	if (ret) {
-		pr_err("failed to parse test_h_start (DP_TEST_H_START_HI)\n");
-		goto exit;
-	}
-	pr_debug("TEST_H_START = %d\n", link->dp_link.test_video.test_h_start);
-
-	ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2,
-			&link->dp_link.test_video.test_v_start);
-	if (ret) {
-		pr_err("failed to parse test_v_start (DP_TEST_V_START_HI)\n");
-		goto exit;
-	}
-	pr_debug("TEST_V_START = %d\n", link->dp_link.test_video.test_v_start);
-
-	ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2,
-			&link->dp_link.test_video.test_hsync_pol,
-			&link->dp_link.test_video.test_hsync_width);
-	if (ret) {
-		pr_err("failed to parse (DP_TEST_HSYNC_HI)\n");
-		goto exit;
-	}
-	pr_debug("TEST_HSYNC_POL = %d\n",
-		link->dp_link.test_video.test_hsync_pol);
-	pr_debug("TEST_HSYNC_WIDTH = %d\n",
-		link->dp_link.test_video.test_hsync_width);
-
-	ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2,
-			&link->dp_link.test_video.test_vsync_pol,
-			&link->dp_link.test_video.test_vsync_width);
-	if (ret) {
-		pr_err("failed to parse (DP_TEST_VSYNC_HI)\n");
-		goto exit;
-	}
-	pr_debug("TEST_VSYNC_POL = %d\n",
-		link->dp_link.test_video.test_vsync_pol);
-	pr_debug("TEST_VSYNC_WIDTH = %d\n",
-		link->dp_link.test_video.test_vsync_width);
-
-	ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2,
-			&link->dp_link.test_video.test_h_width);
-	if (ret) {
-		pr_err("failed to parse test_h_width (DP_TEST_H_WIDTH_HI)\n");
-		goto exit;
-	}
-	pr_debug("TEST_H_WIDTH = %d\n", link->dp_link.test_video.test_h_width);
-
-	ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2,
-			&link->dp_link.test_video.test_v_height);
-	if (ret) {
-		pr_err("failed to parse test_v_height (DP_TEST_V_HEIGHT_HI)\n");
-		goto exit;
-	}
-	pr_debug("TEST_V_HEIGHT = %d\n",
-		link->dp_link.test_video.test_v_height);
-
-	ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1,
-		&link->dp_link.test_video.test_rr_d);
-	link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR;
-	if (ret) {
-		pr_err("failed to parse test_rr_d (DP_TEST_MISC1)\n");
-		goto exit;
-	}
-	pr_debug("TEST_REFRESH_DENOMINATOR = %d\n",
-		link->dp_link.test_video.test_rr_d);
-
-	ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR,
-		&link->dp_link.test_video.test_rr_n);
-	if (ret) {
-		pr_err("failed to parse test_rr_n (DP_TEST_REFRESH_RATE_NUMERATOR)\n");
-		goto exit;
-	}
-	pr_debug("TEST_REFRESH_NUMERATOR = %d\n",
-		link->dp_link.test_video.test_rr_n);
-exit:
-	return ret;
-}
-
-/**
- * dp_link_parse_link_training_params() - parses link training parameters from
- * DPCD
- * @link: Display Port Driver data
- *
- * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane
- * count (Byte 0x220), and if these values parse are valid.
- */
-static int dp_link_parse_link_training_params(struct dp_link_private *link)
-{
-	u8 bp;
-	u8 data;
-	int ret = 0;
-	int rlen;
-	int const param_len = 0x1;
-
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LINK_RATE,
-			&bp, param_len);
-	if (rlen < param_len) {
-		pr_err("failed to read link rate\n");
-		ret = -EINVAL;
-		goto exit;
-	}
-	data = bp;
-
-	if (!is_link_rate_valid(data)) {
-		pr_err("invalid link rate = 0x%x\n", data);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	link->request.test_link_rate = data;
-	pr_debug("link rate = 0x%x\n", link->request.test_link_rate);
-
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LANE_COUNT,
-			&bp, param_len);
-	if (rlen < param_len) {
-		pr_err("failed to read lane count\n");
-		ret = -EINVAL;
-		goto exit;
-	}
-	data = bp;
-	data &= 0x1F;
-
-	if (!is_lane_count_valid(data)) {
-		pr_err("invalid lane count = 0x%x\n", data);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	link->request.test_lane_count = data;
-	pr_debug("lane count = 0x%x\n", link->request.test_lane_count);
-exit:
-	return ret;
-}
-
-static bool dp_link_is_phy_test_pattern_supported(u32 phy_test_pattern_sel)
-{
-	switch (phy_test_pattern_sel) {
-	case DP_TEST_PHY_PATTERN_NONE:
-	case DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING:
-	case DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT:
-	case DP_TEST_PHY_PATTERN_PRBS7:
-	case DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN:
-	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1:
-	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3:
-		return true;
-	default:
-		return false;
-	}
-}
-
-/**
- * dp_parse_phy_test_params() - parses the phy link parameters
- * @link: Display Port Driver data
- *
- * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being
- * requested.
- */
-static int dp_link_parse_phy_test_params(struct dp_link_private *link)
-{
-	u8 bp;
-	u8 data;
-	int rlen;
-	int const param_len = 0x1;
-	int ret = 0;
-
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_PHY_PATTERN,
-			&bp, param_len);
-	if (rlen < param_len) {
-		pr_err("failed to read phy link pattern\n");
-		ret = -EINVAL;
-		goto end;
-	}
-
-	data = bp;
-
-	link->dp_link.phy_params.phy_test_pattern_sel = data;
-
-	pr_debug("phy_test_pattern_sel = %s\n",
-			dp_link_get_phy_test_pattern(data));
-
-	if (!dp_link_is_phy_test_pattern_supported(data))
-		ret = -EINVAL;
-end:
-	return ret;
-}
-
-/**
- * dp_link_is_video_audio_test_requested() - checks for audio/video link request
- * @link: link requested by the sink
- *
- * Returns true if the requested link is a permitted audio/video link.
- */
-static bool dp_link_is_video_audio_test_requested(u32 link)
-{
-	return (link == DP_TEST_LINK_VIDEO_PATTERN) ||
-		(link == (DP_TEST_LINK_AUDIO_PATTERN |
-		DP_TEST_LINK_VIDEO_PATTERN)) ||
-		(link == DP_TEST_LINK_AUDIO_PATTERN) ||
-		(link == (DP_TEST_LINK_AUDIO_PATTERN |
-		DP_TEST_LINK_AUDIO_DISABLED_VIDEO));
-}
-
-/**
- * dp_link_supported() - checks if link requested by sink is supported
- * @test_requested: link requested by the sink
- *
- * Returns true if the requested link is supported.
- */
-static bool dp_link_is_test_supported(u32 test_requested)
-{
-	return (test_requested == DP_TEST_LINK_TRAINING) ||
-		(test_requested == DP_TEST_LINK_EDID_READ) ||
-		(test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) ||
-		dp_link_is_video_audio_test_requested(test_requested);
-}
-
-static bool dp_link_is_test_edid_read(struct dp_link_private *link)
-{
-	return (link->request.test_requested == DP_TEST_LINK_EDID_READ);
-}
-
-/**
- * dp_sink_parse_test_request() - parses link request parameters from sink
- * @link: Display Port Driver data
- *
- * Parses the DPCD to check if an automated link is requested (Byte 0x201),
- * and what type of link automation is being requested (Byte 0x218).
- */
-static int dp_link_parse_request(struct dp_link_private *link)
-{
-	int ret = 0;
-	u8 bp;
-	u8 data;
-	int rlen;
-	u32 const param_len = 0x1;
-
-	/**
-	 * Read the device service IRQ vector (Byte 0x201) to determine
-	 * whether an automated link has been requested by the sink.
-	 */
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux,
-		DP_DEVICE_SERVICE_IRQ_VECTOR, &bp, param_len);
-	if (rlen < param_len) {
-		pr_err("aux read failed\n");
-		ret = -EINVAL;
-		goto end;
-	}
-
-	data = bp;
-
-	if (!(data & DP_AUTOMATED_TEST_REQUEST))
-		return 0;
-
-	/**
-	 * Read the link request byte (Byte 0x218) to determine what type
-	 * of automated link has been requested by the sink.
-	 */
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_REQUEST,
-			&bp, param_len);
-	if (rlen < param_len) {
-		pr_err("aux read failed\n");
-		ret = -EINVAL;
-		goto end;
-	}
-
-	data = bp;
-
-	if (!dp_link_is_test_supported(data)) {
-		pr_debug("link 0x%x not supported\n", data);
-		goto end;
-	}
-
-	link->request.test_requested = data;
-
-	if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) {
-		ret = dp_link_parse_phy_test_params(link);
-		if (ret)
-			goto end;
-		ret = dp_link_parse_link_training_params(link);
-	}
-
-	if (link->request.test_requested == DP_TEST_LINK_TRAINING)
-		ret = dp_link_parse_link_training_params(link);
-
-	if (dp_link_is_video_audio_test_requested(
-			link->request.test_requested)) {
-		ret = dp_link_parse_video_pattern_params(link);
-		if (ret)
-			goto end;
-
-		ret = dp_link_parse_audio_pattern_params(link);
-	}
-end:
-	/**
-	 * Send a DP_TEST_ACK if all link parameters are valid, otherwise send
-	 * a DP_TEST_NAK.
-	 */
-	if (ret) {
-		link->dp_link.test_response = DP_TEST_NAK;
-	} else {
-		if (!dp_link_is_test_edid_read(link))
-			link->dp_link.test_response = DP_TEST_ACK;
-		else
-			link->dp_link.test_response =
-				DP_TEST_EDID_CHECKSUM_WRITE;
-	}
-
-	return ret;
-}
-
-/**
- * dp_link_parse_sink_count() - parses the sink count
- *
- * Parses the DPCD to check if there is an update to the sink count
- * (Byte 0x200), and whether all the sink devices connected have Content
- * Protection enabled.
- */
-static int dp_link_parse_sink_count(struct dp_link *dp_link)
-{
-	int rlen;
-	int const param_len = 0x1;
-	struct dp_link_private *link = container_of(dp_link,
-			struct dp_link_private, dp_link);
-
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_SINK_COUNT,
-			&link->dp_link.sink_count.count, param_len);
-	if (rlen < param_len) {
-		pr_err("failed to read sink count\n");
-		return -EINVAL;
-	}
-
-	link->dp_link.sink_count.cp_ready =
-		link->dp_link.sink_count.count & DP_SINK_CP_READY;
-	/* BIT 7, BIT 5:0 */
-	link->dp_link.sink_count.count =
-		DP_GET_SINK_COUNT(link->dp_link.sink_count.count);
-
-	pr_debug("sink_count = 0x%x, cp_ready = 0x%x\n",
-		link->dp_link.sink_count.count,
-		link->dp_link.sink_count.cp_ready);
-	return 0;
-}
-
-static void dp_link_parse_sink_status_field(struct dp_link_private *link)
-{
-	int len = 0;
-
-	link->prev_sink_count = link->dp_link.sink_count.count;
-	dp_link_parse_sink_count(&link->dp_link);
-
-	len = drm_dp_dpcd_read_link_status(link->aux->drm_aux,
-		link->link_status);
-	if (len < DP_LINK_STATUS_SIZE)
-		pr_err("DP link status read failed\n");
-	dp_link_parse_request(link);
-}
-
-static bool dp_link_is_link_training_requested(struct dp_link_private *link)
-{
-	return (link->request.test_requested == DP_TEST_LINK_TRAINING);
-}
-
-/**
- * dp_link_process_link_training_request() - processes new training requests
- * @link: Display Port link data
- *
- * This function will handle new link training requests that are initiated by
- * the sink. In particular, it will update the requested lane count and link
- * link rate, and then trigger the link retraining procedure.
- *
- * The function will return 0 if a link training request has been processed,
- * otherwise it will return -EINVAL.
- */
-static int dp_link_process_link_training_request(struct dp_link_private *link)
-{
-	if (!dp_link_is_link_training_requested(link))
-		return -EINVAL;
-
-	pr_debug("%s link rate = 0x%x, lane count = 0x%x\n",
-			dp_link_get_test_name(DP_TEST_LINK_TRAINING),
-			link->request.test_link_rate,
-			link->request.test_lane_count);
-
-	link->dp_link.link_params.lane_count = link->request.test_lane_count;
-	link->dp_link.link_params.bw_code = link->request.test_link_rate;
-
-	return 0;
-}
-
-static void dp_link_send_test_response(struct dp_link *dp_link)
-{
-	struct dp_link_private *link = NULL;
-	u32 const response_len = 0x1;
-
-	if (!dp_link) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	link = container_of(dp_link, struct dp_link_private, dp_link);
-
-	drm_dp_dpcd_write(link->aux->drm_aux, DP_TEST_RESPONSE,
-			&dp_link->test_response, response_len);
-}
-
-static int dp_link_psm_config(struct dp_link *dp_link,
-	struct drm_dp_link *link_info, bool enable)
-{
-	struct dp_link_private *link = NULL;
-	int ret = 0;
-
-	if (!dp_link) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	link = container_of(dp_link, struct dp_link_private, dp_link);
-
-	if (enable)
-		ret = drm_dp_link_power_down(link->aux->drm_aux, link_info);
-	else
-		ret = drm_dp_link_power_up(link->aux->drm_aux, link_info);
-
-	if (ret)
-		pr_err("Failed to %s low power mode\n",
-			(enable ? "enter" : "exit"));
-
-	return ret;
-}
-
-static void dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum)
-{
-	struct dp_link_private *link = NULL;
-	u32 const response_len = 0x1;
-
-	if (!dp_link) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	link = container_of(dp_link, struct dp_link_private, dp_link);
-
-	drm_dp_dpcd_write(link->aux->drm_aux, DP_TEST_EDID_CHECKSUM,
-			&checksum, response_len);
-}
-
-static int dp_link_parse_vx_px(struct dp_link_private *link)
-{
-	u8 bp;
-	u8 data;
-	int const param_len = 0x1;
-	int ret = 0;
-	u32 v0, p0, v1, p1, v2, p2, v3, p3;
-	int rlen;
-
-	pr_debug("\n");
-
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_ADJUST_REQUEST_LANE0_1,
-			&bp, param_len);
-	if (rlen < param_len) {
-		pr_err("failed reading lanes 0/1\n");
-		ret = -EINVAL;
-		goto end;
-	}
-
-	data = bp;
-
-	pr_debug("lanes 0/1 (Byte 0x206): 0x%x\n", data);
-
-	v0 = data & 0x3;
-	data = data >> 2;
-	p0 = data & 0x3;
-	data = data >> 2;
-
-	v1 = data & 0x3;
-	data = data >> 2;
-	p1 = data & 0x3;
-	data = data >> 2;
-
-	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_ADJUST_REQUEST_LANE2_3,
-			&bp, param_len);
-	if (rlen < param_len) {
-		pr_err("failed reading lanes 2/3\n");
-		ret = -EINVAL;
-		goto end;
-	}
-
-	data = bp;
-
-	pr_debug("lanes 2/3 (Byte 0x207): 0x%x\n", data);
-
-	v2 = data & 0x3;
-	data = data >> 2;
-	p2 = data & 0x3;
-	data = data >> 2;
-
-	v3 = data & 0x3;
-	data = data >> 2;
-	p3 = data & 0x3;
-	data = data >> 2;
-
-	pr_debug("vx: 0=%d, 1=%d, 2=%d, 3=%d\n", v0, v1, v2, v3);
-	pr_debug("px: 0=%d, 1=%d, 2=%d, 3=%d\n", p0, p1, p2, p3);
-
-	/**
-	 * Update the voltage and pre-emphasis levels as per DPCD request
-	 * vector.
-	 */
-	pr_debug("Current: v_level = 0x%x, p_level = 0x%x\n",
-			link->dp_link.phy_params.v_level,
-			link->dp_link.phy_params.p_level);
-	pr_debug("Requested: v_level = 0x%x, p_level = 0x%x\n", v0, p0);
-	link->dp_link.phy_params.v_level = v0;
-	link->dp_link.phy_params.p_level = p0;
-
-	pr_debug("Success\n");
-end:
-	return ret;
-}
-
-/**
- * dp_link_process_phy_test_pattern_request() - process new phy link requests
- * @link: Display Port Driver data
- *
- * This function will handle new phy link pattern requests that are initiated
- * by the sink. The function will return 0 if a phy link pattern has been
- * processed, otherwise it will return -EINVAL.
- */
-static int dp_link_process_phy_test_pattern_request(
-		struct dp_link_private *link)
-{
-	u32 test_link_rate = 0, test_lane_count = 0;
-
-	if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) {
-		pr_debug("no phy test\n");
-		return -EINVAL;
-	}
-
-	test_link_rate = link->request.test_link_rate;
-	test_lane_count = link->request.test_lane_count;
-
-	if (!is_link_rate_valid(test_link_rate) ||
-		!is_lane_count_valid(test_lane_count)) {
-		pr_err("Invalid params: link rate = 0x%x, lane count = 0x%x\n",
-				test_link_rate, test_lane_count);
-		return -EINVAL;
-	}
-
-	pr_debug("start\n");
-
-	pr_info("Current: bw_code = 0x%x, lane count = 0x%x\n",
-			link->dp_link.link_params.bw_code,
-			link->dp_link.link_params.lane_count);
-
-	pr_info("Requested: bw_code = 0x%x, lane count = 0x%x\n",
-			test_link_rate, test_lane_count);
-
-	link->dp_link.link_params.lane_count = link->request.test_lane_count;
-	link->dp_link.link_params.bw_code = link->request.test_link_rate;
-
-	dp_link_parse_vx_px(link);
-
-	pr_debug("end\n");
-
-	return 0;
-}
-
-static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
-{
-	return link_status[r - DP_LANE0_1_STATUS];
-}
-
-/**
- * dp_link_process_link_status_update() - processes link status updates
- * @link: Display Port link module data
- *
- * This function will check for changes in the link status, e.g. clock
- * recovery done on all lanes, and trigger link training if there is a
- * failure/error on the link.
- *
- * The function will return 0 if the a link status update has been processed,
- * otherwise it will return -EINVAL.
- */
-static int dp_link_process_link_status_update(struct dp_link_private *link)
-{
-	if (!(get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) &
-		DP_LINK_STATUS_UPDATED) || /* link status updated */
-		(drm_dp_clock_recovery_ok(link->link_status,
-			link->dp_link.link_params.lane_count) &&
-	     drm_dp_channel_eq_ok(link->link_status,
-			link->dp_link.link_params.lane_count)))
-		return -EINVAL;
-
-	pr_debug("channel_eq_done = %d, clock_recovery_done = %d\n",
-			drm_dp_clock_recovery_ok(link->link_status,
-			link->dp_link.link_params.lane_count),
-			drm_dp_clock_recovery_ok(link->link_status,
-			link->dp_link.link_params.lane_count));
-
-	return 0;
-}
-
-static bool dp_link_is_ds_port_status_changed(struct dp_link_private *link)
-{
-	if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) &
-		DP_DOWNSTREAM_PORT_STATUS_CHANGED) /* port status changed */
-		return true;
-
-	if (link->prev_sink_count != link->dp_link.sink_count.count)
-		return true;
-
-	return false;
-}
-
-/**
- * dp_link_process_downstream_port_status_change() - process port status changes
- * @link: Display Port Driver data
- *
- * This function will handle downstream port updates that are initiated by
- * the sink. If the downstream port status has changed, the EDID is read via
- * AUX.
- *
- * The function will return 0 if a downstream port update has been
- * processed, otherwise it will return -EINVAL.
- */
-static int dp_link_process_ds_port_status_change(struct dp_link_private *link)
-{
-	if (!dp_link_is_ds_port_status_changed(link))
-		return -EINVAL;
-
-	/* reset prev_sink_count */
-	link->prev_sink_count = link->dp_link.sink_count.count;
-
-	return 0;
-}
-
-static bool dp_link_is_video_pattern_requested(struct dp_link_private *link)
-{
-	return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN)
-		&& !(link->request.test_requested &
-		DP_TEST_LINK_AUDIO_DISABLED_VIDEO);
-}
-
-static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link)
-{
-	return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN);
-}
-
-/**
- * dp_link_process_video_pattern_request() - process new video pattern request
- * @link: Display Port link module's data
- *
- * This function will handle a new video pattern request that are initiated by
- * the sink. This is acheieved by first sending a disconnect notification to
- * the sink followed by a subsequent connect notification to the user modules,
- * where it is expected that the user modules would draw the required link
- * pattern.
- */
-static int dp_link_process_video_pattern_request(struct dp_link_private *link)
-{
-	if (!dp_link_is_video_pattern_requested(link))
-		goto end;
-
-	pr_debug("%s: bit depth=%d(%d bpp) pattern=%s\n",
-		dp_link_get_test_name(DP_TEST_LINK_VIDEO_PATTERN),
-		link->dp_link.test_video.test_bit_depth,
-		dp_link_bit_depth_to_bpp(
-		link->dp_link.test_video.test_bit_depth),
-		dp_link_video_pattern_to_string(
-			link->dp_link.test_video.test_video_pattern));
-
-	return 0;
-end:
-	return -EINVAL;
-}
-
-/**
- * dp_link_process_audio_pattern_request() - process new audio pattern request
- * @link: Display Port link module data
- *
- * This function will handle a new audio pattern request that is initiated by
- * the sink. This is acheieved by sending the necessary secondary data packets
- * to the sink. It is expected that any simulatenous requests for video
- * patterns will be handled before the audio pattern is sent to the sink.
- */
-static int dp_link_process_audio_pattern_request(struct dp_link_private *link)
-{
-	if (!dp_link_is_audio_pattern_requested(link))
-		return -EINVAL;
-
-	pr_debug("sampling_rate=%s, channel_count=%d, pattern_type=%s\n",
-		dp_link_get_audio_sample_rate(
-			link->dp_link.test_audio.test_audio_sampling_rate),
-		link->dp_link.test_audio.test_audio_channel_count,
-		dp_link_get_audio_test_pattern(
-			link->dp_link.test_audio.test_audio_pattern_type));
-
-	pr_debug("audio_period: ch1=0x%x, ch2=0x%x, ch3=0x%x, ch4=0x%x\n",
-		link->dp_link.test_audio.test_audio_period_ch_1,
-		link->dp_link.test_audio.test_audio_period_ch_2,
-		link->dp_link.test_audio.test_audio_period_ch_3,
-		link->dp_link.test_audio.test_audio_period_ch_4);
-
-	pr_debug("audio_period: ch5=0x%x, ch6=0x%x, ch7=0x%x, ch8=0x%x\n",
-		link->dp_link.test_audio.test_audio_period_ch_5,
-		link->dp_link.test_audio.test_audio_period_ch_6,
-		link->dp_link.test_audio.test_audio_period_ch_7,
-		link->dp_link.test_audio.test_audio_period_ch_8);
-
-	return 0;
-}
-
-static void dp_link_reset_data(struct dp_link_private *link)
-{
-	link->request = (const struct dp_link_request){ 0 };
-	link->dp_link.test_video = (const struct dp_link_test_video){ 0 };
-	link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN;
-	link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 };
-	link->dp_link.phy_params.phy_test_pattern_sel = 0;
-	link->dp_link.sink_request = 0;
-	link->dp_link.test_response = 0;
-}
-
-/**
- * dp_link_process_request() - handle HPD IRQ transition to HIGH
- * @link: pointer to link module data
- *
- * This function will handle the HPD IRQ state transitions from LOW to HIGH
- * (including cases when there are back to back HPD IRQ HIGH) indicating
- * the start of a new link training request or sink status update.
- */
-static int dp_link_process_request(struct dp_link *dp_link)
-{
-	int ret = 0;
-	struct dp_link_private *link;
-
-	if (!dp_link) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	link = container_of(dp_link, struct dp_link_private, dp_link);
-
-	dp_link_reset_data(link);
-
-	dp_link_parse_sink_status_field(link);
-
-	if (dp_link_is_test_edid_read(link)) {
-		dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
-		goto exit;
-	}
-
-	ret = dp_link_process_ds_port_status_change(link);
-	if (!ret) {
-		dp_link->sink_request |= DS_PORT_STATUS_CHANGED;
-		goto exit;
-	}
-
-	ret = dp_link_process_link_training_request(link);
-	if (!ret) {
-		dp_link->sink_request |= DP_TEST_LINK_TRAINING;
-		goto exit;
-	}
-
-	ret = dp_link_process_phy_test_pattern_request(link);
-	if (!ret) {
-		dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN;
-		goto exit;
-	}
-
-	ret = dp_link_process_link_status_update(link);
-	if (!ret) {
-		dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
-		goto exit;
-	}
-
-	ret = dp_link_process_video_pattern_request(link);
-	if (!ret) {
-		dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
-		goto exit;
-	}
-
-	ret = dp_link_process_audio_pattern_request(link);
-	if (!ret) {
-		dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
-		goto exit;
-	}
-
-	pr_debug("no test requested\n");
-	return ret;
-exit:
-	/*
-	 * log this as it can be a use initiated action to run a DP CTS
-	 * test or in normal cases, sink has encountered a problem and
-	 * and want source to redo some part of initialization which can
-	 * be helpful in debugging.
-	 */
-	pr_info("test requested: %s\n",
-		dp_link_get_test_name(dp_link->sink_request));
-	return 0;
-}
-
-static int dp_link_get_colorimetry_config(struct dp_link *dp_link)
-{
-	u32 cc;
-	enum dynamic_range dr;
-	struct dp_link_private *link;
-
-	if (!dp_link) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	link = container_of(dp_link, struct dp_link_private, dp_link);
-
-	/* unless a video pattern CTS test is ongoing, use CEA_VESA */
-	if (dp_link_is_video_pattern_requested(link))
-		dr = link->dp_link.test_video.test_dyn_range;
-	else
-		dr = DP_DYNAMIC_RANGE_RGB_VESA;
-
-	/* Only RGB_VESA nd RGB_CEA supported for now */
-	switch (dr) {
-	case DP_DYNAMIC_RANGE_RGB_CEA:
-		cc = BIT(3);
-		break;
-	case DP_DYNAMIC_RANGE_RGB_VESA:
-	default:
-		cc = 0;
-	}
-
-	return cc;
-}
-
-static int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
-{
-	int i;
-	int max = 0;
-	u8 data;
-	struct dp_link_private *link;
-	u8 buf[8] = {0}, offset = 0;
-
-	if (!dp_link) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	link = container_of(dp_link, struct dp_link_private, dp_link);
-
-	/* use the max level across lanes */
-	for (i = 0; i < dp_link->link_params.lane_count; i++) {
-		data = drm_dp_get_adjust_request_voltage(link_status, i);
-		data >>= DP_TRAIN_VOLTAGE_SWING_SHIFT;
-
-		offset = i * 2;
-		if (offset < sizeof(buf))
-			buf[offset] = data;
-
-		if (max < data)
-			max = data;
-	}
-
-	dp_link->phy_params.v_level = max;
-
-	/* use the max level across lanes */
-	max = 0;
-	for (i = 0; i < dp_link->link_params.lane_count; i++) {
-		data = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
-		data >>= DP_TRAIN_PRE_EMPHASIS_SHIFT;
-
-		offset = (i * 2) + 1;
-		if (offset < sizeof(buf))
-			buf[offset] = data;
-
-		if (max < data)
-			max = data;
-	}
-
-	dp_link->phy_params.p_level = max;
-
-	print_hex_dump(KERN_DEBUG, "[drm-dp] Req (VxPx): ",
-		DUMP_PREFIX_NONE, 8, 2, buf, sizeof(buf), false);
-
-	/**
-	 * Adjust the voltage swing and pre-emphasis level combination to within
-	 * the allowable range.
-	 */
-	if (dp_link->phy_params.v_level > DP_LINK_VOLTAGE_MAX)
-		dp_link->phy_params.v_level = DP_LINK_VOLTAGE_MAX;
-
-	if (dp_link->phy_params.p_level > DP_LINK_PRE_EMPHASIS_MAX)
-		dp_link->phy_params.p_level = DP_LINK_PRE_EMPHASIS_MAX;
-
-	if ((dp_link->phy_params.p_level > DP_LINK_PRE_EMPHASIS_LEVEL_1)
-		&& (dp_link->phy_params.v_level == DP_LINK_VOLTAGE_LEVEL_2))
-		dp_link->phy_params.p_level = DP_LINK_PRE_EMPHASIS_LEVEL_1;
-
-	pr_debug("Set (VxPx): %x%x\n",
-		dp_link->phy_params.v_level, dp_link->phy_params.p_level);
-
-	return 0;
-}
-
-static int dp_link_send_psm_request(struct dp_link *dp_link, bool req)
-{
-	struct dp_link_private *link;
-
-	if (!dp_link) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	link = container_of(dp_link, struct dp_link_private, dp_link);
-
-	return 0;
-}
-
-static u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
-{
-	u32 tbd;
-
-	/*
-	 * Few simplistic rules and assumptions made here:
-	 *    1. Test bit depth is bit depth per color component
-	 *    2. Assume 3 color components
-	 */
-	switch (bpp) {
-	case 18:
-		tbd = DP_TEST_BIT_DEPTH_6;
-		break;
-	case 24:
-		tbd = DP_TEST_BIT_DEPTH_8;
-		break;
-	case 30:
-		tbd = DP_TEST_BIT_DEPTH_10;
-		break;
-	default:
-		tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
-		break;
-	}
-
-	if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
-		tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
-
-	return tbd;
-}
-
-struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux)
-{
-	int rc = 0;
-	struct dp_link_private *link;
-	struct dp_link *dp_link;
-
-	if (!dev || !aux) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL);
-	if (!link) {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	link->dev   = dev;
-	link->aux   = aux;
-
-	dp_link = &link->dp_link;
-
-	dp_link->process_request        = dp_link_process_request;
-	dp_link->get_test_bits_depth    = dp_link_get_test_bits_depth;
-	dp_link->get_colorimetry_config = dp_link_get_colorimetry_config;
-	dp_link->adjust_levels          = dp_link_adjust_levels;
-	dp_link->send_psm_request       = dp_link_send_psm_request;
-	dp_link->send_test_response     = dp_link_send_test_response;
-	dp_link->psm_config             = dp_link_psm_config;
-	dp_link->send_edid_checksum     = dp_link_send_edid_checksum;
-
-	return dp_link;
-error:
-	return ERR_PTR(rc);
-}
-
-void dp_link_put(struct dp_link *dp_link)
-{
-	struct dp_link_private *link;
-
-	if (!dp_link)
-		return;
-
-	link = container_of(dp_link, struct dp_link_private, dp_link);
-
-	devm_kfree(link->dev, link);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
deleted file mode 100644
index 740cf01..0000000
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ /dev/null
@@ -1,205 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_LINK_H_
-#define _DP_LINK_H_
-
-#include "dp_aux.h"
-
-#define DS_PORT_STATUS_CHANGED 0x200
-#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
-#define DP_LINK_ENUM_STR(x)		#x
-
-enum dp_link_voltage_level {
-	DP_LINK_VOLTAGE_LEVEL_0	= 0,
-	DP_LINK_VOLTAGE_LEVEL_1	= 1,
-	DP_LINK_VOLTAGE_LEVEL_2	= 2,
-	DP_LINK_VOLTAGE_MAX	= DP_LINK_VOLTAGE_LEVEL_2,
-};
-
-enum dp_link_preemaphasis_level {
-	DP_LINK_PRE_EMPHASIS_LEVEL_0	= 0,
-	DP_LINK_PRE_EMPHASIS_LEVEL_1	= 1,
-	DP_LINK_PRE_EMPHASIS_LEVEL_2	= 2,
-	DP_LINK_PRE_EMPHASIS_MAX	= DP_LINK_PRE_EMPHASIS_LEVEL_2,
-};
-
-struct dp_link_sink_count {
-	u32 count;
-	bool cp_ready;
-};
-
-struct dp_link_test_video {
-	u32 test_video_pattern;
-	u32 test_bit_depth;
-	u32 test_dyn_range;
-	u32 test_h_total;
-	u32 test_v_total;
-	u32 test_h_start;
-	u32 test_v_start;
-	u32 test_hsync_pol;
-	u32 test_hsync_width;
-	u32 test_vsync_pol;
-	u32 test_vsync_width;
-	u32 test_h_width;
-	u32 test_v_height;
-	u32 test_rr_d;
-	u32 test_rr_n;
-};
-
-struct dp_link_test_audio {
-	u32 test_audio_sampling_rate;
-	u32 test_audio_channel_count;
-	u32 test_audio_pattern_type;
-	u32 test_audio_period_ch_1;
-	u32 test_audio_period_ch_2;
-	u32 test_audio_period_ch_3;
-	u32 test_audio_period_ch_4;
-	u32 test_audio_period_ch_5;
-	u32 test_audio_period_ch_6;
-	u32 test_audio_period_ch_7;
-	u32 test_audio_period_ch_8;
-};
-
-struct dp_link_hdcp_status {
-	int hdcp_state;
-	int hdcp_version;
-};
-
-struct dp_link_phy_params {
-	u32 phy_test_pattern_sel;
-	u8 v_level;
-	u8 p_level;
-};
-
-struct dp_link_params {
-	u32 lane_count;
-	u32 bw_code;
-};
-
-static inline char *dp_link_get_test_name(u32 test_requested)
-{
-	switch (test_requested) {
-	case DP_TEST_LINK_TRAINING:
-		return DP_LINK_ENUM_STR(DP_TEST_LINK_TRAINING);
-	case DP_TEST_LINK_VIDEO_PATTERN:
-		return DP_LINK_ENUM_STR(DP_TEST_LINK_VIDEO_PATTERN);
-	case DP_TEST_LINK_EDID_READ:
-		return DP_LINK_ENUM_STR(DP_TEST_LINK_EDID_READ);
-	case DP_TEST_LINK_PHY_TEST_PATTERN:
-		return DP_LINK_ENUM_STR(DP_TEST_LINK_PHY_TEST_PATTERN);
-	case DP_TEST_LINK_AUDIO_PATTERN:
-		return DP_LINK_ENUM_STR(DP_TEST_LINK_AUDIO_PATTERN);
-	case DS_PORT_STATUS_CHANGED:
-		return DP_LINK_ENUM_STR(DS_PORT_STATUS_CHANGED);
-	case DP_LINK_STATUS_UPDATED:
-		return DP_LINK_ENUM_STR(DP_LINK_STATUS_UPDATED);
-	default:
-		return "unknown";
-	}
-}
-
-struct dp_link {
-	u32 sink_request;
-	u32 test_response;
-
-	struct dp_link_sink_count sink_count;
-	struct dp_link_test_video test_video;
-	struct dp_link_test_audio test_audio;
-	struct dp_link_phy_params phy_params;
-	struct dp_link_params link_params;
-	struct dp_link_hdcp_status hdcp_status;
-
-	u32 (*get_test_bits_depth)(struct dp_link *dp_link, u32 bpp);
-	int (*process_request)(struct dp_link *dp_link);
-	int (*get_colorimetry_config)(struct dp_link *dp_link);
-	int (*adjust_levels)(struct dp_link *dp_link, u8 *link_status);
-	int (*send_psm_request)(struct dp_link *dp_link, bool req);
-	void (*send_test_response)(struct dp_link *dp_link);
-	int (*psm_config)(struct dp_link *dp_link,
-		struct drm_dp_link *link_info, bool enable);
-	void (*send_edid_checksum)(struct dp_link *dp_link, u8 checksum);
-};
-
-static inline char *dp_link_get_phy_test_pattern(u32 phy_test_pattern_sel)
-{
-	switch (phy_test_pattern_sel) {
-	case DP_TEST_PHY_PATTERN_NONE:
-		return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_NONE);
-	case DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING:
-		return DP_LINK_ENUM_STR(
-			DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING);
-	case DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT:
-		return DP_LINK_ENUM_STR(
-			DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT);
-	case DP_TEST_PHY_PATTERN_PRBS7:
-		return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_PRBS7);
-	case DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN:
-		return DP_LINK_ENUM_STR(
-			DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN);
-	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1:
-		return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_1);
-	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_2:
-		return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_2);
-	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3:
-		return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_3);
-	default:
-		return "unknown";
-	}
-}
-
-/**
- * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
- * @tbd: test bit depth
- *
- * Returns the bits per pixel (bpp) to be used corresponding to the
- * git bit depth value. This function assumes that bit depth has
- * already been validated.
- */
-static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
-{
-	u32 bpp;
-
-	/*
-	 * Few simplistic rules and assumptions made here:
-	 *    1. Bit depth is per color component
-	 *    2. If bit depth is unknown return 0
-	 *    3. Assume 3 color components
-	 */
-	switch (tbd) {
-	case DP_TEST_BIT_DEPTH_6:
-		bpp = 18;
-		break;
-	case DP_TEST_BIT_DEPTH_8:
-		bpp = 24;
-		break;
-	case DP_TEST_BIT_DEPTH_10:
-		bpp = 30;
-		break;
-	case DP_TEST_BIT_DEPTH_UNKNOWN:
-	default:
-		bpp = 0;
-	}
-
-	return bpp;
-}
-
-/**
- * dp_link_get() - get the functionalities of dp test module
- *
- *
- * return: a pointer to dp_link struct
- */
-struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux);
-
-/**
- * dp_link_put() - releases the dp test module's resources
- *
- * @dp_link: an instance of dp_link module
- *
- */
-void dp_link_put(struct dp_link *dp_link);
-
-#endif /* _DP_LINK_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_lphw_hpd.c b/drivers/gpu/drm/msm/dp/dp_lphw_hpd.c
deleted file mode 100644
index 8fcb492..0000000
--- a/drivers/gpu/drm/msm/dp/dp_lphw_hpd.c
+++ /dev/null
@@ -1,422 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/gpio/consumer.h>
-#include <linux/sde_io_util.h>
-#include <linux/of_gpio.h>
-#include "dp_lphw_hpd.h"
-
-struct dp_lphw_hpd_private {
-	struct device *dev;
-	struct dp_hpd base;
-	struct dp_parser *parser;
-	struct dp_catalog_hpd *catalog;
-	struct dss_gpio gpio_cfg;
-	struct workqueue_struct *connect_wq;
-	struct delayed_work work;
-	struct work_struct connect;
-	struct work_struct disconnect;
-	struct work_struct attention;
-	struct dp_hpd_cb *cb;
-	int irq;
-	bool hpd;
-};
-
-static void dp_lphw_hpd_attention(struct work_struct *work)
-{
-	struct dp_lphw_hpd_private *lphw_hpd = container_of(work,
-				struct dp_lphw_hpd_private, attention);
-
-	if (!lphw_hpd) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	lphw_hpd->base.hpd_irq = true;
-
-	if (lphw_hpd->cb && lphw_hpd->cb->attention)
-		lphw_hpd->cb->attention(lphw_hpd->dev);
-}
-
-static void dp_lphw_hpd_connect(struct work_struct *work)
-{
-	struct dp_lphw_hpd_private *lphw_hpd = container_of(work,
-				struct dp_lphw_hpd_private, connect);
-
-	if (!lphw_hpd) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	lphw_hpd->base.hpd_high = true;
-	lphw_hpd->base.alt_mode_cfg_done = true;
-	lphw_hpd->base.hpd_irq = false;
-
-	if (lphw_hpd->cb && lphw_hpd->cb->configure)
-		lphw_hpd->cb->configure(lphw_hpd->dev);
-}
-
-static void dp_lphw_hpd_disconnect(struct work_struct *work)
-{
-	struct dp_lphw_hpd_private *lphw_hpd = container_of(work,
-				struct dp_lphw_hpd_private, disconnect);
-
-	if (!lphw_hpd) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	lphw_hpd->base.hpd_high = false;
-	lphw_hpd->base.alt_mode_cfg_done = false;
-	lphw_hpd->base.hpd_irq = false;
-
-	if (lphw_hpd->cb && lphw_hpd->cb->disconnect)
-		lphw_hpd->cb->disconnect(lphw_hpd->dev);
-}
-
-static irqreturn_t dp_tlmm_isr(int unused, void *data)
-{
-	struct dp_lphw_hpd_private *lphw_hpd = data;
-	bool hpd;
-
-	if (!lphw_hpd)
-		return IRQ_NONE;
-
-	/*
-	 * According to the DP spec, HPD high event can be confirmed only after
-	 * the HPD line has een asserted continuously for more than 100ms
-	 */
-	usleep_range(99000, 100000);
-
-	hpd = gpio_get_value_cansleep(lphw_hpd->gpio_cfg.gpio);
-
-	pr_debug("lphw_hpd state = %d, new hpd state = %d\n",
-			lphw_hpd->hpd, hpd);
-	if (!lphw_hpd->hpd && hpd) {
-		lphw_hpd->hpd = true;
-		queue_work(lphw_hpd->connect_wq, &lphw_hpd->connect);
-	}
-
-	return IRQ_HANDLED;
-}
-
-static void dp_lphw_hpd_host_init(struct dp_hpd *dp_hpd,
-		struct dp_catalog_hpd *catalog)
-{
-	struct dp_lphw_hpd_private *lphw_hpd;
-
-	if (!dp_hpd) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
-
-	lphw_hpd->catalog->config_hpd(lphw_hpd->catalog, true);
-
-	/*
-	 * Changing the gpio function to dp controller for the hpd line is not
-	 * stopping the tlmm interrupts generation on function 0.
-	 * So, as an additional step, disable the gpio interrupt irq also
-	 */
-	disable_irq(lphw_hpd->irq);
-}
-
-static void dp_lphw_hpd_host_deinit(struct dp_hpd *dp_hpd,
-		struct dp_catalog_hpd *catalog)
-{
-	struct dp_lphw_hpd_private *lphw_hpd;
-
-	if (!dp_hpd) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
-
-	/* Enable the tlmm interrupt irq which was disabled in host_init */
-	enable_irq(lphw_hpd->irq);
-
-	lphw_hpd->catalog->config_hpd(lphw_hpd->catalog, false);
-}
-
-static void dp_lphw_hpd_isr(struct dp_hpd *dp_hpd)
-{
-	struct dp_lphw_hpd_private *lphw_hpd;
-	u32 isr = 0;
-	int rc = 0;
-
-	if (!dp_hpd) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
-
-	isr = lphw_hpd->catalog->get_interrupt(lphw_hpd->catalog);
-
-	if (isr & DP_HPD_UNPLUG_INT_STATUS) { /* disconnect interrupt */
-
-		pr_debug("disconnect interrupt, hpd isr state: 0x%x\n", isr);
-
-		if (lphw_hpd->base.hpd_high) {
-			lphw_hpd->hpd = false;
-			lphw_hpd->base.hpd_high = false;
-			lphw_hpd->base.alt_mode_cfg_done = false;
-			lphw_hpd->base.hpd_irq = false;
-
-			rc = queue_work(lphw_hpd->connect_wq,
-					&lphw_hpd->disconnect);
-			if (!rc)
-				pr_debug("disconnect not queued\n");
-		} else {
-			pr_err("already disconnected\n");
-		}
-
-	} else if (isr & DP_IRQ_HPD_INT_STATUS) { /* attention interrupt */
-
-		pr_debug("hpd_irq interrupt, hpd isr state: 0x%x\n", isr);
-
-		rc = queue_work(lphw_hpd->connect_wq, &lphw_hpd->attention);
-		if (!rc)
-			pr_debug("attention not queued\n");
-	}
-}
-
-static int dp_lphw_hpd_simulate_connect(struct dp_hpd *dp_hpd, bool hpd)
-{
-	struct dp_lphw_hpd_private *lphw_hpd;
-
-	if (!dp_hpd) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
-
-	lphw_hpd->base.hpd_high = hpd;
-	lphw_hpd->base.alt_mode_cfg_done = hpd;
-	lphw_hpd->base.hpd_irq = false;
-
-	if (!lphw_hpd->cb || !lphw_hpd->cb->configure ||
-			!lphw_hpd->cb->disconnect) {
-		pr_err("invalid callback\n");
-		return -EINVAL;
-	}
-
-	if (hpd)
-		lphw_hpd->cb->configure(lphw_hpd->dev);
-	else
-		lphw_hpd->cb->disconnect(lphw_hpd->dev);
-
-	return 0;
-}
-
-static int dp_lphw_hpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo)
-{
-	struct dp_lphw_hpd_private *lphw_hpd;
-
-	if (!dp_hpd) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
-
-	lphw_hpd->base.hpd_irq = true;
-
-	if (lphw_hpd->cb && lphw_hpd->cb->attention)
-		lphw_hpd->cb->attention(lphw_hpd->dev);
-
-	return 0;
-}
-
-int dp_lphw_hpd_register(struct dp_hpd *dp_hpd)
-{
-	struct dp_lphw_hpd_private *lphw_hpd;
-	int rc = 0;
-
-	if (!dp_hpd)
-		return -EINVAL;
-
-	lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
-
-	lphw_hpd->hpd = gpio_get_value_cansleep(lphw_hpd->gpio_cfg.gpio);
-
-	rc = devm_request_threaded_irq(lphw_hpd->dev, lphw_hpd->irq, NULL,
-		dp_tlmm_isr,
-		IRQF_TRIGGER_RISING | IRQF_ONESHOT,
-		"dp-gpio-intp", lphw_hpd);
-	if (rc) {
-		pr_err("Failed to request INTP threaded IRQ: %d\n", rc);
-		return rc;
-	}
-	enable_irq_wake(lphw_hpd->irq);
-
-	if (lphw_hpd->hpd)
-		queue_work(lphw_hpd->connect_wq, &lphw_hpd->connect);
-
-	return rc;
-}
-
-static void dp_lphw_hpd_deinit(struct dp_lphw_hpd_private *lphw_hpd)
-{
-	struct dp_parser *parser = lphw_hpd->parser;
-	int i = 0;
-
-	for (i = 0; i < parser->mp[DP_PHY_PM].num_vreg; i++) {
-
-		if (!strcmp(parser->mp[DP_PHY_PM].vreg_config[i].vreg_name,
-					"hpd-pwr")) {
-			/* disable the hpd-pwr voltage regulator */
-			if (msm_dss_enable_vreg(
-				&parser->mp[DP_PHY_PM].vreg_config[i], 1,
-				false))
-				pr_err("hpd-pwr vreg not disabled\n");
-
-			break;
-		}
-	}
-}
-
-static void dp_lphw_hpd_init(struct dp_lphw_hpd_private *lphw_hpd)
-{
-	struct dp_pinctrl pinctrl = {0};
-	struct dp_parser *parser = lphw_hpd->parser;
-	int i = 0, rc = 0;
-
-	for (i = 0; i < parser->mp[DP_PHY_PM].num_vreg; i++) {
-
-		if (!strcmp(parser->mp[DP_PHY_PM].vreg_config[i].vreg_name,
-					"hpd-pwr")) {
-			/* enable the hpd-pwr voltage regulator */
-			if (msm_dss_enable_vreg(
-				&parser->mp[DP_PHY_PM].vreg_config[i], 1,
-				true))
-				pr_err("hpd-pwr vreg not enabled\n");
-
-			break;
-		}
-	}
-
-	pinctrl.pin = devm_pinctrl_get(lphw_hpd->dev);
-
-	if (!IS_ERR_OR_NULL(pinctrl.pin)) {
-		pinctrl.state_hpd_active = pinctrl_lookup_state(pinctrl.pin,
-						"mdss_dp_hpd_active");
-
-		if (!IS_ERR_OR_NULL(pinctrl.state_hpd_active)) {
-			rc = pinctrl_select_state(pinctrl.pin,
-					pinctrl.state_hpd_active);
-			if (rc)
-				pr_err("failed to set hpd_active state\n");
-		}
-	}
-}
-
-static int dp_lphw_hpd_create_workqueue(struct dp_lphw_hpd_private *lphw_hpd)
-{
-	lphw_hpd->connect_wq = create_singlethread_workqueue("dp_lphw_work");
-	if (IS_ERR_OR_NULL(lphw_hpd->connect_wq)) {
-		pr_err("Error creating connect_wq\n");
-		return -EPERM;
-	}
-
-	INIT_WORK(&lphw_hpd->connect, dp_lphw_hpd_connect);
-	INIT_WORK(&lphw_hpd->disconnect, dp_lphw_hpd_disconnect);
-	INIT_WORK(&lphw_hpd->attention, dp_lphw_hpd_attention);
-
-	return 0;
-}
-
-struct dp_hpd *dp_lphw_hpd_get(struct device *dev, struct dp_parser *parser,
-	struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb)
-{
-	int rc = 0;
-	const char *hpd_gpio_name = "qcom,dp-hpd-gpio";
-	struct dp_lphw_hpd_private *lphw_hpd;
-
-	if (!dev || !parser || !cb) {
-		pr_err("invalid device\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	lphw_hpd = devm_kzalloc(dev, sizeof(*lphw_hpd), GFP_KERNEL);
-	if (!lphw_hpd) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	lphw_hpd->gpio_cfg.gpio = of_get_named_gpio(dev->of_node,
-		hpd_gpio_name, 0);
-	if (!gpio_is_valid(lphw_hpd->gpio_cfg.gpio)) {
-		pr_err("%s gpio not specified\n", hpd_gpio_name);
-		rc = -EINVAL;
-		goto gpio_error;
-	}
-
-	strlcpy(lphw_hpd->gpio_cfg.gpio_name, hpd_gpio_name,
-		sizeof(lphw_hpd->gpio_cfg.gpio_name));
-	lphw_hpd->gpio_cfg.value = 0;
-
-	rc = gpio_request(lphw_hpd->gpio_cfg.gpio,
-		lphw_hpd->gpio_cfg.gpio_name);
-	if (rc) {
-		pr_err("%s: failed to request gpio\n", hpd_gpio_name);
-		goto gpio_error;
-	}
-	gpio_direction_input(lphw_hpd->gpio_cfg.gpio);
-
-	lphw_hpd->dev = dev;
-	lphw_hpd->cb = cb;
-	lphw_hpd->irq = gpio_to_irq(lphw_hpd->gpio_cfg.gpio);
-
-	rc = dp_lphw_hpd_create_workqueue(lphw_hpd);
-	if (rc) {
-		pr_err("Failed to create a dp_hpd workqueue\n");
-		goto gpio_error;
-	}
-
-	lphw_hpd->parser = parser;
-	lphw_hpd->catalog = catalog;
-	lphw_hpd->base.isr = dp_lphw_hpd_isr;
-	lphw_hpd->base.host_init = dp_lphw_hpd_host_init;
-	lphw_hpd->base.host_deinit = dp_lphw_hpd_host_deinit;
-	lphw_hpd->base.simulate_connect = dp_lphw_hpd_simulate_connect;
-	lphw_hpd->base.simulate_attention = dp_lphw_hpd_simulate_attention;
-	lphw_hpd->base.register_hpd = dp_lphw_hpd_register;
-
-	dp_lphw_hpd_init(lphw_hpd);
-
-	return &lphw_hpd->base;
-
-gpio_error:
-	devm_kfree(dev, lphw_hpd);
-error:
-	return ERR_PTR(rc);
-}
-
-void dp_lphw_hpd_put(struct dp_hpd *dp_hpd)
-{
-	struct dp_lphw_hpd_private *lphw_hpd;
-
-	if (!dp_hpd)
-		return;
-
-	lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
-
-	dp_lphw_hpd_deinit(lphw_hpd);
-	gpio_free(lphw_hpd->gpio_cfg.gpio);
-	devm_kfree(lphw_hpd->dev, lphw_hpd);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_lphw_hpd.h b/drivers/gpu/drm/msm/dp/dp_lphw_hpd.h
deleted file mode 100644
index 9779331..0000000
--- a/drivers/gpu/drm/msm/dp/dp_lphw_hpd.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_LPHW_HPD_H_
-#define _DP_LPHW_HPD_H_
-
-#include "dp_hpd.h"
-
-#define DP_HPD_PLUG_INT_STATUS		BIT(0)
-#define DP_IRQ_HPD_INT_STATUS		BIT(1)
-#define DP_HPD_REPLUG_INT_STATUS	BIT(2)
-#define DP_HPD_UNPLUG_INT_STATUS	BIT(3)
-
-/**
- * dp_lphw_hpd_get() - configure and get the DisplayPlot HPD module data
- *
- * @dev: device instance of the caller
- * return: pointer to allocated gpio hpd module data
- *
- * This function sets up the lphw hpd module
- */
-struct dp_hpd *dp_lphw_hpd_get(struct device *dev, struct dp_parser *parser,
-	struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb);
-
-/**
- * dp_lphw_hpd_put()
- *
- * Cleans up dp_hpd instance
- *
- * @hpd: instance of lphw_hpd
- */
-void dp_lphw_hpd_put(struct dp_hpd *hpd);
-
-#endif /* _DP_LPHW_HPD_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_mst_drm.c b/drivers/gpu/drm/msm/dp/dp_mst_drm.c
deleted file mode 100644
index 508c6dc..0000000
--- a/drivers/gpu/drm/msm/dp/dp_mst_drm.c
+++ /dev/null
@@ -1,1992 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp-mst]: %s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_dp_mst_helper.h>
-#include <drm/drm_fixed.h>
-
-#include "msm_drv.h"
-#include "msm_kms.h"
-#include "sde_connector.h"
-#include "dp_drm.h"
-
-#define DP_MST_DEBUG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
-#define DP_MST_INFO_LOG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
-
-#define MAX_DP_MST_DRM_ENCODERS		2
-#define MAX_DP_MST_DRM_BRIDGES		2
-#define HPD_STRING_SIZE			30
-
-struct dp_drm_mst_fw_helper_ops {
-	int (*calc_pbn_mode)(struct dp_display_mode *dp_mode);
-	int (*find_vcpi_slots)(struct drm_dp_mst_topology_mgr *mgr, int pbn);
-	int (*atomic_find_vcpi_slots)(struct drm_atomic_state *state,
-				  struct drm_dp_mst_topology_mgr *mgr,
-				  struct drm_dp_mst_port *port, int pbn);
-	bool (*allocate_vcpi)(struct drm_dp_mst_topology_mgr *mgr,
-			      struct drm_dp_mst_port *port,
-			      int pbn, int slots);
-	int (*update_payload_part1)(struct drm_dp_mst_topology_mgr *mgr);
-	int (*check_act_status)(struct drm_dp_mst_topology_mgr *mgr);
-	int (*update_payload_part2)(struct drm_dp_mst_topology_mgr *mgr);
-	enum drm_connector_status (*detect_port)(
-		struct drm_connector *connector,
-		struct drm_dp_mst_topology_mgr *mgr,
-		struct drm_dp_mst_port *port);
-	struct edid *(*get_edid)(struct drm_connector *connector,
-		struct drm_dp_mst_topology_mgr *mgr,
-		struct drm_dp_mst_port *port);
-	int (*topology_mgr_set_mst)(struct drm_dp_mst_topology_mgr *mgr,
-		bool mst_state);
-	int (*atomic_release_vcpi_slots)(struct drm_atomic_state *state,
-				     struct drm_dp_mst_topology_mgr *mgr,
-				     int slots);
-	void (*get_vcpi_info)(struct drm_dp_mst_topology_mgr *mgr,
-		int vcpi, int *start_slot, int *num_slots);
-	void (*reset_vcpi_slots)(struct drm_dp_mst_topology_mgr *mgr,
-			struct drm_dp_mst_port *port);
-	void (*deallocate_vcpi)(struct drm_dp_mst_topology_mgr *mgr,
-			struct drm_dp_mst_port *port);
-};
-
-struct dp_mst_sim_port_data {
-	bool input_port;
-	u8 peer_device_type;
-	u8 port_number;
-	bool mcs;
-	bool ddps;
-	bool legacy_device_plug_status;
-	u8 dpcd_revision;
-	u8 peer_guid[16];
-	u8 num_sdp_streams;
-	u8 num_sdp_stream_sinks;
-};
-
-struct dp_mst_sim_mode {
-	bool mst_state;
-	struct edid *edid;
-	struct work_struct probe_work;
-	const struct drm_dp_mst_topology_cbs *cbs;
-	u32 port_cnt;
-};
-
-struct dp_mst_bridge {
-	struct drm_bridge base;
-	u32 id;
-
-	bool in_use;
-
-	struct dp_display *display;
-	struct drm_encoder *encoder;
-	bool encoder_active_sts;
-
-	struct drm_display_mode drm_mode;
-	struct dp_display_mode dp_mode;
-	struct drm_connector *connector;
-	struct drm_connector *old_connector;
-	void *dp_panel;
-	void *old_dp_panel;
-
-	int vcpi;
-	int pbn;
-	int num_slots;
-	int start_slot;
-
-	u32 fixed_port_num;
-	bool fixed_port_added;
-	struct drm_connector *fixed_connector;
-};
-
-struct dp_mst_private {
-	bool mst_initialized;
-	struct dp_mst_caps caps;
-	struct drm_dp_mst_topology_mgr mst_mgr;
-	struct dp_mst_bridge mst_bridge[MAX_DP_MST_DRM_BRIDGES];
-	struct dp_display *dp_display;
-	const struct dp_drm_mst_fw_helper_ops *mst_fw_cbs;
-	struct dp_mst_sim_mode simulator;
-	struct mutex mst_lock;
-	enum dp_drv_state state;
-	bool mst_session_state;
-};
-
-struct dp_mst_encoder_info_cache {
-	u8 cnt;
-	struct drm_encoder *mst_enc[MAX_DP_MST_DRM_BRIDGES];
-};
-
-#define to_dp_mst_bridge(x)     container_of((x), struct dp_mst_bridge, base)
-
-struct dp_mst_private dp_mst;
-struct dp_mst_encoder_info_cache dp_mst_enc_cache;
-
-/* DRM DP MST Framework simulator OPs */
-static void dp_mst_sim_add_port(struct dp_mst_private *mst,
-			struct dp_mst_sim_port_data *port_msg)
-{
-	struct drm_dp_mst_branch *mstb;
-	struct drm_dp_mst_port *port;
-
-	mstb = mst->mst_mgr.mst_primary;
-
-	port = kzalloc(sizeof(*port), GFP_KERNEL);
-	if (!port)
-		return;
-	kref_init(&port->kref);
-	port->parent = mstb;
-	port->port_num = port_msg->port_number;
-	port->mgr = mstb->mgr;
-	port->aux.name = dp_mst.caps.drm_aux->name;
-	port->aux.dev = mst->dp_display->drm_dev->dev;
-
-	port->pdt = port_msg->peer_device_type;
-	port->input = port_msg->input_port;
-	port->mcs = port_msg->mcs;
-	port->ddps = port_msg->ddps;
-	port->ldps = port_msg->legacy_device_plug_status;
-	port->dpcd_rev = port_msg->dpcd_revision;
-	port->num_sdp_streams = port_msg->num_sdp_streams;
-	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
-
-	mutex_lock(&mstb->mgr->lock);
-	kref_get(&port->kref);
-	list_add(&port->next, &mstb->ports);
-	mutex_unlock(&mstb->mgr->lock);
-
-	/* use fixed pbn for simulator ports */
-	port->available_pbn = 2520;
-
-	if (!port->input) {
-		port->connector = (*mstb->mgr->cbs->add_connector)
-				(mstb->mgr, port, NULL);
-		if (!port->connector) {
-			/* remove it from the port list */
-			mutex_lock(&mstb->mgr->lock);
-			list_del(&port->next);
-			mutex_unlock(&mstb->mgr->lock);
-			goto put_port;
-		}
-		(*mstb->mgr->cbs->register_connector)(port->connector);
-	}
-
-put_port:
-	kref_put(&port->kref, NULL);
-}
-
-static void dp_mst_sim_link_probe_work(struct work_struct *work)
-{
-	struct dp_mst_sim_mode *sim;
-	struct dp_mst_private *mst;
-	struct dp_mst_sim_port_data port_data;
-	u8 cnt;
-
-	DP_MST_DEBUG("enter\n");
-	sim = container_of(work, struct dp_mst_sim_mode, probe_work);
-	mst = container_of(sim, struct dp_mst_private, simulator);
-
-	port_data.input_port = false;
-	port_data.peer_device_type = DP_PEER_DEVICE_SST_SINK;
-	port_data.mcs = false;
-	port_data.ddps = true;
-	port_data.legacy_device_plug_status = false;
-	port_data.dpcd_revision = 0;
-	port_data.num_sdp_streams = 0;
-	port_data.num_sdp_stream_sinks = 0;
-
-	for (cnt = 0; cnt < sim->port_cnt; cnt++) {
-		port_data.port_number = cnt;
-		dp_mst_sim_add_port(mst, &port_data);
-	}
-
-	mst->mst_mgr.cbs->hotplug(&mst->mst_mgr);
-	DP_MST_DEBUG("completed\n");
-}
-
-static int dp_mst_sim_no_action(struct drm_dp_mst_topology_mgr *mgr)
-{
-	return 0;
-}
-
-static int dp_mst_sim_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
-{
-	int i, j;
-	int cur_slots = 1;
-	struct drm_dp_payload req_payload;
-	struct drm_dp_mst_port *port;
-
-	mutex_lock(&mgr->payload_lock);
-	for (i = 0; i < mgr->max_payloads; i++) {
-		req_payload.start_slot = cur_slots;
-		if (mgr->proposed_vcpis[i]) {
-			port = container_of(mgr->proposed_vcpis[i],
-					struct drm_dp_mst_port, vcpi);
-			req_payload.num_slots =
-					mgr->proposed_vcpis[i]->num_slots;
-			req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
-		} else {
-			port = NULL;
-			req_payload.num_slots = 0;
-		}
-
-		if (mgr->payloads[i].start_slot != req_payload.start_slot)
-			mgr->payloads[i].start_slot = req_payload.start_slot;
-
-		if (mgr->payloads[i].num_slots != req_payload.num_slots) {
-			if (req_payload.num_slots) {
-				req_payload.payload_state = DP_PAYLOAD_LOCAL;
-				mgr->payloads[i].num_slots =
-						req_payload.num_slots;
-				mgr->payloads[i].vcpi = req_payload.vcpi;
-			} else if (mgr->payloads[i].num_slots) {
-				mgr->payloads[i].num_slots = 0;
-				mgr->payloads[i].payload_state =
-						DP_PAYLOAD_DELETE_LOCAL;
-				req_payload.payload_state =
-						mgr->payloads[i].payload_state;
-				mgr->payloads[i].start_slot = 0;
-			} else
-				req_payload.payload_state =
-					mgr->payloads[i].payload_state;
-
-			mgr->payloads[i].payload_state =
-				req_payload.payload_state;
-		}
-		cur_slots += req_payload.num_slots;
-	}
-
-	for (i = 0; i < mgr->max_payloads; i++) {
-		if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
-			pr_debug("removing payload %d\n", i);
-			for (j = i; j < mgr->max_payloads - 1; j++) {
-				memcpy(&mgr->payloads[j],
-					&mgr->payloads[j + 1],
-					sizeof(struct drm_dp_payload));
-				mgr->proposed_vcpis[j] =
-					mgr->proposed_vcpis[j + 1];
-				if (mgr->proposed_vcpis[j] &&
-					mgr->proposed_vcpis[j]->num_slots) {
-					set_bit(j + 1, &mgr->payload_mask);
-				} else {
-					clear_bit(j + 1, &mgr->payload_mask);
-				}
-			}
-			memset(&mgr->payloads[mgr->max_payloads - 1], 0,
-					sizeof(struct drm_dp_payload));
-			mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
-			clear_bit(mgr->max_payloads, &mgr->payload_mask);
-		}
-	}
-	mutex_unlock(&mgr->payload_lock);
-	return 0;
-}
-
-static int dp_mst_sim_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
-{
-	struct drm_dp_mst_port *port;
-	int i;
-
-	mutex_lock(&mgr->payload_lock);
-	for (i = 0; i < mgr->max_payloads; i++) {
-
-		if (!mgr->proposed_vcpis[i])
-			continue;
-
-		port = container_of(mgr->proposed_vcpis[i],
-				struct drm_dp_mst_port, vcpi);
-
-		pr_debug("payload %d %d\n", i, mgr->payloads[i].payload_state);
-		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL)
-			mgr->payloads[i].payload_state = DP_PAYLOAD_REMOTE;
-		else if (mgr->payloads[i].payload_state ==
-				DP_PAYLOAD_DELETE_LOCAL)
-			mgr->payloads[i].payload_state = 0;
-	}
-	mutex_unlock(&mgr->payload_lock);
-	return 0;
-}
-
-static struct edid *dp_mst_sim_get_edid(struct drm_connector *connector,
-		struct drm_dp_mst_topology_mgr *mgr,
-		struct drm_dp_mst_port *port)
-{
-	struct dp_mst_private *mst = container_of(mgr,
-			struct dp_mst_private, mst_mgr);
-
-	return drm_edid_duplicate(mst->simulator.edid);
-}
-
-static int dp_mst_sim_topology_mgr_set_mst(
-		struct drm_dp_mst_topology_mgr *mgr,
-		bool mst_state)
-{
-	struct dp_mst_private *mst = container_of(mgr,
-			struct dp_mst_private, mst_mgr);
-
-	drm_dp_mst_topology_mgr_set_mst(mgr, mst_state);
-	if (mst_state)
-		queue_work(system_long_wq, &mst->simulator.probe_work);
-
-	mst->simulator.mst_state = mst_state;
-	return 0;
-}
-
-static void _dp_mst_get_vcpi_info(
-		struct drm_dp_mst_topology_mgr *mgr,
-		int vcpi, int *start_slot, int *num_slots)
-{
-	int i;
-
-	*start_slot = 0;
-	*num_slots = 0;
-
-	mutex_lock(&mgr->payload_lock);
-	for (i = 0; i < mgr->max_payloads; i++) {
-		if (mgr->payloads[i].vcpi == vcpi) {
-			*start_slot = mgr->payloads[i].start_slot;
-			*num_slots = mgr->payloads[i].num_slots;
-			break;
-		}
-	}
-	mutex_unlock(&mgr->payload_lock);
-
-	pr_info("vcpi_info. vcpi:%d, start_slot:%d, num_slots:%d\n",
-			vcpi, *start_slot, *num_slots);
-}
-
-static int dp_mst_calc_pbn_mode(struct dp_display_mode *dp_mode)
-{
-	int pbn, bpp;
-	bool dsc_en;
-	s64 pbn_fp;
-
-	dsc_en = dp_mode->timing.comp_info.comp_ratio ? true : false;
-	bpp = dsc_en ? dp_mode->timing.comp_info.dsc_info.bpp :
-		dp_mode->timing.bpp;
-
-	pbn = drm_dp_calc_pbn_mode(dp_mode->timing.pixel_clk_khz, bpp);
-	pbn_fp = drm_fixp_from_fraction(pbn, 1);
-
-	pr_debug("before overhead pbn:%d, bpp:%d\n", pbn, bpp);
-
-	if (dsc_en)
-		pbn_fp = drm_fixp_mul(pbn_fp, dp_mode->dsc_overhead_fp);
-
-	if (dp_mode->fec_overhead_fp)
-		pbn_fp = drm_fixp_mul(pbn_fp, dp_mode->fec_overhead_fp);
-
-	pbn = drm_fixp2int(pbn_fp);
-
-	pr_debug("after overhead pbn:%d, bpp:%d\n", pbn, bpp);
-	return pbn;
-}
-
-static const struct dp_drm_mst_fw_helper_ops drm_dp_mst_fw_helper_ops = {
-	.calc_pbn_mode             = dp_mst_calc_pbn_mode,
-	.find_vcpi_slots           = drm_dp_find_vcpi_slots,
-	.atomic_find_vcpi_slots    = drm_dp_atomic_find_vcpi_slots,
-	.allocate_vcpi             = drm_dp_mst_allocate_vcpi,
-	.update_payload_part1      = drm_dp_update_payload_part1,
-	.check_act_status          = drm_dp_check_act_status,
-	.update_payload_part2      = drm_dp_update_payload_part2,
-	.detect_port               = drm_dp_mst_detect_port,
-	.get_edid                  = drm_dp_mst_get_edid,
-	.topology_mgr_set_mst      = drm_dp_mst_topology_mgr_set_mst,
-	.get_vcpi_info             = _dp_mst_get_vcpi_info,
-	.atomic_release_vcpi_slots = drm_dp_atomic_release_vcpi_slots,
-	.reset_vcpi_slots          = drm_dp_mst_reset_vcpi_slots,
-	.deallocate_vcpi           = drm_dp_mst_deallocate_vcpi,
-};
-
-static const struct dp_drm_mst_fw_helper_ops drm_dp_sim_mst_fw_helper_ops = {
-	.calc_pbn_mode             = dp_mst_calc_pbn_mode,
-	.find_vcpi_slots           = drm_dp_find_vcpi_slots,
-	.atomic_find_vcpi_slots    = drm_dp_atomic_find_vcpi_slots,
-	.allocate_vcpi             = drm_dp_mst_allocate_vcpi,
-	.update_payload_part1      = dp_mst_sim_update_payload_part1,
-	.check_act_status          = dp_mst_sim_no_action,
-	.update_payload_part2      = dp_mst_sim_update_payload_part2,
-	.detect_port               = drm_dp_mst_detect_port,
-	.get_edid                  = dp_mst_sim_get_edid,
-	.topology_mgr_set_mst      = dp_mst_sim_topology_mgr_set_mst,
-	.get_vcpi_info             = _dp_mst_get_vcpi_info,
-	.atomic_release_vcpi_slots = drm_dp_atomic_release_vcpi_slots,
-	.reset_vcpi_slots          = drm_dp_mst_reset_vcpi_slots,
-	.deallocate_vcpi           = drm_dp_mst_deallocate_vcpi,
-};
-
-/* DP MST Bridge OPs */
-
-static int dp_mst_bridge_attach(struct drm_bridge *dp_bridge)
-{
-	struct dp_mst_bridge *bridge;
-
-	DP_MST_DEBUG("enter\n");
-
-	if (!dp_bridge) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	bridge = to_dp_mst_bridge(dp_bridge);
-
-	DP_MST_DEBUG("mst bridge [%d] attached\n", bridge->id);
-
-	return 0;
-}
-
-static bool dp_mst_bridge_mode_fixup(struct drm_bridge *drm_bridge,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted_mode)
-{
-	bool ret = true;
-	struct dp_display_mode dp_mode;
-	struct dp_mst_bridge *bridge;
-	struct dp_display *dp;
-
-	DP_MST_DEBUG("enter\n");
-
-	if (!drm_bridge || !mode || !adjusted_mode) {
-		pr_err("Invalid params\n");
-		ret = false;
-		goto end;
-	}
-
-	bridge = to_dp_mst_bridge(drm_bridge);
-	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		ret = false;
-		goto end;
-	}
-
-	if (!bridge->dp_panel) {
-		pr_err("Invalid dp_panel\n");
-		ret = false;
-		goto end;
-	}
-
-	dp = bridge->display;
-
-	dp->convert_to_dp_mode(dp, bridge->dp_panel, mode, &dp_mode);
-	convert_to_drm_mode(&dp_mode, adjusted_mode);
-
-	DP_MST_DEBUG("mst bridge [%d] mode:%s fixup\n", bridge->id, mode->name);
-end:
-	return ret;
-}
-
-static int _dp_mst_compute_config(struct drm_atomic_state *state,
-		struct dp_mst_private *mst, struct drm_connector *connector,
-		struct dp_display_mode *mode)
-{
-	int slots = 0, pbn;
-	struct sde_connector *c_conn = to_sde_connector(connector);
-	int rc = 0;
-
-	DP_MST_DEBUG("enter\n");
-
-	pbn = mst->mst_fw_cbs->calc_pbn_mode(mode);
-
-	slots = mst->mst_fw_cbs->atomic_find_vcpi_slots(state,
-			&mst->mst_mgr, c_conn->mst_port, pbn);
-	if (slots < 0) {
-		pr_err("mst: failed to find vcpi slots. pbn:%d, slots:%d\n",
-				pbn, slots);
-		return slots;
-	}
-
-	DP_MST_DEBUG("exit\n");
-
-	return rc;
-}
-
-static void _dp_mst_update_timeslots(struct dp_mst_private *mst,
-		struct dp_mst_bridge *mst_bridge)
-{
-	int i;
-	struct dp_mst_bridge *dp_bridge;
-	int pbn, start_slot, num_slots;
-
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		dp_bridge = &mst->mst_bridge[i];
-
-		pbn = 0;
-		start_slot = 0;
-		num_slots = 0;
-
-		if (dp_bridge->vcpi) {
-			mst->mst_fw_cbs->get_vcpi_info(&mst->mst_mgr,
-					dp_bridge->vcpi,
-					&start_slot, &num_slots);
-			pbn = dp_bridge->pbn;
-		}
-
-		if (mst_bridge == dp_bridge)
-			dp_bridge->num_slots = num_slots;
-
-		mst->dp_display->set_stream_info(mst->dp_display,
-				dp_bridge->dp_panel,
-				dp_bridge->id, start_slot, num_slots, pbn,
-				dp_bridge->vcpi);
-
-		pr_info("bridge:%d vcpi:%d start_slot:%d num_slots:%d, pbn:%d\n",
-			dp_bridge->id, dp_bridge->vcpi,
-			start_slot, num_slots, pbn);
-	}
-}
-
-static void _dp_mst_update_single_timeslot(struct dp_mst_private *mst,
-		struct dp_mst_bridge *mst_bridge)
-{
-	int pbn = 0, start_slot = 0, num_slots = 0;
-
-	if (mst->state == PM_SUSPEND) {
-		if (mst_bridge->vcpi) {
-			mst->mst_fw_cbs->get_vcpi_info(&mst->mst_mgr,
-					mst_bridge->vcpi,
-					&start_slot, &num_slots);
-			pbn = mst_bridge->pbn;
-		}
-
-		mst_bridge->num_slots = num_slots;
-
-		mst->dp_display->set_stream_info(mst->dp_display,
-				mst_bridge->dp_panel,
-				mst_bridge->id, start_slot, num_slots, pbn,
-				mst_bridge->vcpi);
-	}
-}
-
-static void _dp_mst_bridge_pre_enable_part1(struct dp_mst_bridge *dp_bridge)
-{
-	struct dp_display *dp_display = dp_bridge->display;
-	struct sde_connector *c_conn =
-		to_sde_connector(dp_bridge->connector);
-	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
-	struct drm_dp_mst_port *port = c_conn->mst_port;
-	bool ret;
-	int pbn, slots;
-
-	/* skip mst specific disable operations during suspend */
-	if (mst->state == PM_SUSPEND) {
-		_dp_mst_update_single_timeslot(mst, dp_bridge);
-		return;
-	}
-
-	pbn = mst->mst_fw_cbs->calc_pbn_mode(&dp_bridge->dp_mode);
-
-	slots = mst->mst_fw_cbs->find_vcpi_slots(&mst->mst_mgr, pbn);
-
-	pr_info("bridge:%d, pbn:%d, slots:%d\n", dp_bridge->id,
-			dp_bridge->pbn, dp_bridge->num_slots);
-
-	ret = mst->mst_fw_cbs->allocate_vcpi(&mst->mst_mgr,
-				       port, pbn, slots);
-	if (!ret) {
-		pr_err("mst: failed to allocate vcpi. bridge:%d\n",
-				dp_bridge->id);
-		return;
-	}
-
-	dp_bridge->vcpi = port->vcpi.vcpi;
-	dp_bridge->pbn = pbn;
-
-	ret = mst->mst_fw_cbs->update_payload_part1(&mst->mst_mgr);
-
-	_dp_mst_update_timeslots(mst, dp_bridge);
-}
-
-static void _dp_mst_bridge_pre_enable_part2(struct dp_mst_bridge *dp_bridge)
-{
-	struct dp_display *dp_display = dp_bridge->display;
-	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
-
-	DP_MST_DEBUG("enter\n");
-
-	/* skip mst specific disable operations during suspend */
-	if (mst->state == PM_SUSPEND)
-		return;
-
-	mst->mst_fw_cbs->check_act_status(&mst->mst_mgr);
-
-	mst->mst_fw_cbs->update_payload_part2(&mst->mst_mgr);
-
-	DP_MST_DEBUG("mst bridge [%d] _pre enable part-2 complete\n",
-			dp_bridge->id);
-}
-
-static void _dp_mst_bridge_pre_disable_part1(struct dp_mst_bridge *dp_bridge)
-{
-	struct dp_display *dp_display = dp_bridge->display;
-	struct sde_connector *c_conn =
-		to_sde_connector(dp_bridge->connector);
-	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
-	struct drm_dp_mst_port *port = c_conn->mst_port;
-
-	DP_MST_DEBUG("enter\n");
-
-	/* skip mst specific disable operations during suspend */
-	if (mst->state == PM_SUSPEND) {
-		_dp_mst_update_single_timeslot(mst, dp_bridge);
-		return;
-	}
-
-	mst->mst_fw_cbs->reset_vcpi_slots(&mst->mst_mgr, port);
-
-	mst->mst_fw_cbs->update_payload_part1(&mst->mst_mgr);
-
-	_dp_mst_update_timeslots(mst, dp_bridge);
-
-	DP_MST_DEBUG("mst bridge [%d] _pre disable part-1 complete\n",
-			dp_bridge->id);
-}
-
-static void _dp_mst_bridge_pre_disable_part2(struct dp_mst_bridge *dp_bridge)
-{
-	struct dp_display *dp_display = dp_bridge->display;
-	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
-	struct sde_connector *c_conn =
-		to_sde_connector(dp_bridge->connector);
-	struct drm_dp_mst_port *port = c_conn->mst_port;
-
-	DP_MST_DEBUG("enter\n");
-
-	/* skip mst specific disable operations during suspend */
-	if (mst->state == PM_SUSPEND)
-		return;
-
-	mst->mst_fw_cbs->check_act_status(&mst->mst_mgr);
-
-	mst->mst_fw_cbs->update_payload_part2(&mst->mst_mgr);
-
-	mst->mst_fw_cbs->deallocate_vcpi(&mst->mst_mgr, port);
-
-	dp_bridge->vcpi = 0;
-	dp_bridge->pbn = 0;
-
-	DP_MST_DEBUG("mst bridge [%d] _pre disable part-2 complete\n",
-			dp_bridge->id);
-}
-
-static void dp_mst_bridge_pre_enable(struct drm_bridge *drm_bridge)
-{
-	int rc = 0;
-	struct dp_mst_bridge *bridge;
-	struct dp_display *dp;
-	struct dp_mst_private *mst;
-
-	if (!drm_bridge) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	bridge = to_dp_mst_bridge(drm_bridge);
-	dp = bridge->display;
-
-	bridge->old_connector = NULL;
-	bridge->old_dp_panel = NULL;
-
-	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		return;
-	}
-
-	mst = dp->dp_mst_prv_info;
-
-	mutex_lock(&mst->mst_lock);
-
-	/* By this point mode should have been validated through mode_fixup */
-	rc = dp->set_mode(dp, bridge->dp_panel, &bridge->dp_mode);
-	if (rc) {
-		pr_err("[%d] failed to perform a mode set, rc=%d\n",
-		       bridge->id, rc);
-		goto end;
-	}
-
-	rc = dp->prepare(dp, bridge->dp_panel);
-	if (rc) {
-		pr_err("[%d] DP display prepare failed, rc=%d\n",
-		       bridge->id, rc);
-		goto end;
-	}
-
-	_dp_mst_bridge_pre_enable_part1(bridge);
-
-	rc = dp->enable(dp, bridge->dp_panel);
-	if (rc) {
-		pr_err("[%d] DP display enable failed, rc=%d\n",
-		       bridge->id, rc);
-		dp->unprepare(dp, bridge->dp_panel);
-		goto end;
-	} else {
-		_dp_mst_bridge_pre_enable_part2(bridge);
-	}
-
-	DP_MST_INFO_LOG("mode: id(%d) mode(%s), refresh(%d)\n",
-			bridge->id, bridge->drm_mode.name,
-			bridge->drm_mode.vrefresh);
-	DP_MST_INFO_LOG("dsc: id(%d) dsc(%d)\n", bridge->id,
-			bridge->dp_mode.timing.comp_info.comp_ratio);
-	DP_MST_INFO_LOG("channel: id(%d) vcpi(%d) start(%d) tot(%d)\n",
-			bridge->id, bridge->vcpi, bridge->start_slot,
-			bridge->num_slots);
-end:
-	mutex_unlock(&mst->mst_lock);
-}
-
-static void dp_mst_bridge_enable(struct drm_bridge *drm_bridge)
-{
-	int rc = 0;
-	struct dp_mst_bridge *bridge;
-	struct dp_display *dp;
-
-	if (!drm_bridge) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	bridge = to_dp_mst_bridge(drm_bridge);
-	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		return;
-	}
-
-	dp = bridge->display;
-
-	rc = dp->post_enable(dp, bridge->dp_panel);
-	if (rc) {
-		pr_err("mst bridge [%d] post enable failed, rc=%d\n",
-		       bridge->id, rc);
-		return;
-	}
-
-	DP_MST_INFO_LOG("mst bridge [%d] post enable complete\n",
-			bridge->id);
-}
-
-static void dp_mst_bridge_disable(struct drm_bridge *drm_bridge)
-{
-	int rc = 0;
-	struct dp_mst_bridge *bridge;
-	struct dp_display *dp;
-	struct dp_mst_private *mst;
-
-	if (!drm_bridge) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	bridge = to_dp_mst_bridge(drm_bridge);
-	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		return;
-	}
-
-	dp = bridge->display;
-
-	mst = dp->dp_mst_prv_info;
-
-	sde_connector_helper_bridge_disable(bridge->connector);
-
-	mutex_lock(&mst->mst_lock);
-
-	_dp_mst_bridge_pre_disable_part1(bridge);
-
-	rc = dp->pre_disable(dp, bridge->dp_panel);
-	if (rc)
-		pr_err("[%d] DP display pre disable failed, rc=%d\n",
-		       bridge->id, rc);
-
-	_dp_mst_bridge_pre_disable_part2(bridge);
-
-	DP_MST_INFO_LOG("mst bridge [%d] disable complete\n", bridge->id);
-
-	mutex_unlock(&mst->mst_lock);
-}
-
-static void dp_mst_bridge_post_disable(struct drm_bridge *drm_bridge)
-{
-	int rc = 0;
-	struct dp_mst_bridge *bridge;
-	struct dp_display *dp;
-	struct dp_mst_private *mst;
-
-	if (!drm_bridge) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	bridge = to_dp_mst_bridge(drm_bridge);
-	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		return;
-	}
-
-	dp = bridge->display;
-	mst = dp->dp_mst_prv_info;
-
-	rc = dp->disable(dp, bridge->dp_panel);
-	if (rc)
-		pr_info("[%d] DP display disable failed, rc=%d\n",
-		       bridge->id, rc);
-
-	rc = dp->unprepare(dp, bridge->dp_panel);
-	if (rc)
-		pr_info("[%d] DP display unprepare failed, rc=%d\n",
-		       bridge->id, rc);
-
-	/* maintain the connector to encoder link during suspend/resume */
-	if (mst->state != PM_SUSPEND) {
-		/* Disconnect the connector and panel info from bridge */
-		mst->mst_bridge[bridge->id].old_connector =
-				mst->mst_bridge[bridge->id].connector;
-		mst->mst_bridge[bridge->id].old_dp_panel =
-				mst->mst_bridge[bridge->id].dp_panel;
-		mst->mst_bridge[bridge->id].connector = NULL;
-		mst->mst_bridge[bridge->id].dp_panel = NULL;
-		mst->mst_bridge[bridge->id].encoder_active_sts = false;
-	}
-
-	DP_MST_INFO_LOG("mst bridge [%d] post disable complete\n",
-			bridge->id);
-}
-
-static void dp_mst_bridge_mode_set(struct drm_bridge *drm_bridge,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
-{
-	struct dp_mst_bridge *bridge;
-	struct dp_display *dp;
-
-	DP_MST_DEBUG("enter\n");
-
-	if (!drm_bridge || !mode || !adjusted_mode) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	bridge = to_dp_mst_bridge(drm_bridge);
-	if (!bridge->connector) {
-		if (!bridge->old_connector) {
-			pr_err("Invalid connector\n");
-			return;
-		}
-		bridge->connector = bridge->old_connector;
-		bridge->old_connector = NULL;
-	}
-
-	if (!bridge->dp_panel) {
-		if (!bridge->old_dp_panel) {
-			pr_err("Invalid dp_panel\n");
-			return;
-		}
-		bridge->dp_panel = bridge->old_dp_panel;
-		bridge->old_dp_panel = NULL;
-	}
-
-	dp = bridge->display;
-
-	memset(&bridge->dp_mode, 0x0, sizeof(struct dp_display_mode));
-	memcpy(&bridge->drm_mode, adjusted_mode, sizeof(bridge->drm_mode));
-	dp->convert_to_dp_mode(dp, bridge->dp_panel, adjusted_mode,
-			&bridge->dp_mode);
-
-	DP_MST_DEBUG("mst bridge [%d] mode set complete\n", bridge->id);
-}
-
-/* DP MST Bridge APIs */
-
-static struct drm_connector *
-dp_mst_drm_fixed_connector_init(struct dp_display *dp_display,
-				struct drm_encoder *encoder);
-
-static const struct drm_bridge_funcs dp_mst_bridge_ops = {
-	.attach       = dp_mst_bridge_attach,
-	.mode_fixup   = dp_mst_bridge_mode_fixup,
-	.pre_enable   = dp_mst_bridge_pre_enable,
-	.enable       = dp_mst_bridge_enable,
-	.disable      = dp_mst_bridge_disable,
-	.post_disable = dp_mst_bridge_post_disable,
-	.mode_set     = dp_mst_bridge_mode_set,
-};
-
-int dp_mst_drm_bridge_init(void *data, struct drm_encoder *encoder)
-{
-	int rc = 0;
-	struct dp_mst_bridge *bridge = NULL;
-	struct drm_device *dev;
-	struct dp_display *display = data;
-	struct msm_drm_private *priv = NULL;
-	struct dp_mst_private *mst = display->dp_mst_prv_info;
-	int i;
-
-	if (!mst || !mst->mst_initialized) {
-		if (dp_mst_enc_cache.cnt >= MAX_DP_MST_DRM_BRIDGES) {
-			pr_info("exceeding max bridge cnt %d\n",
-					dp_mst_enc_cache.cnt);
-			return 0;
-		}
-
-		dp_mst_enc_cache.mst_enc[dp_mst_enc_cache.cnt] = encoder;
-		dp_mst_enc_cache.cnt++;
-		pr_info("mst not initialized. cache encoder information\n");
-		return 0;
-	}
-
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (!mst->mst_bridge[i].in_use) {
-			bridge = &mst->mst_bridge[i];
-			bridge->encoder = encoder;
-			bridge->in_use = true;
-			bridge->id = i;
-			break;
-		}
-	}
-
-	if (i == MAX_DP_MST_DRM_BRIDGES) {
-		pr_err("mst supports only %d bridges\n", i);
-		rc = -EACCES;
-		goto end;
-	}
-
-	dev = display->drm_dev;
-	bridge->display = display;
-	bridge->base.funcs = &dp_mst_bridge_ops;
-	bridge->base.encoder = encoder;
-
-	priv = dev->dev_private;
-
-	rc = drm_bridge_attach(encoder, &bridge->base, NULL);
-	if (rc) {
-		pr_err("failed to attach bridge, rc=%d\n", rc);
-		goto end;
-	}
-
-	encoder->bridge = &bridge->base;
-	priv->bridges[priv->num_bridges++] = &bridge->base;
-
-	DP_MST_DEBUG("mst drm bridge init. bridge id:%d\n", i);
-
-	/*
-	 * If fixed topology port is defined, connector will be created
-	 * immediately.
-	 */
-	rc = display->mst_get_fixed_topology_port(display, bridge->id,
-			&bridge->fixed_port_num);
-	if (!rc) {
-		bridge->fixed_connector =
-			dp_mst_drm_fixed_connector_init(display,
-				bridge->encoder);
-		if (bridge->fixed_connector == NULL) {
-			pr_err("failed to create fixed connector\n");
-			rc = -ENOMEM;
-			goto end;
-		}
-	}
-
-	return 0;
-
-end:
-	return rc;
-}
-
-void dp_mst_drm_bridge_deinit(void *display)
-{
-	DP_MST_DEBUG("mst bridge deinit\n");
-}
-
-/* DP MST Connector OPs */
-
-static enum drm_connector_status
-dp_mst_connector_detect(struct drm_connector *connector, bool force,
-		void *display)
-{
-	struct sde_connector *c_conn = to_sde_connector(connector);
-	struct dp_display *dp_display = c_conn->display;
-	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
-	enum drm_connector_status status;
-	struct dp_mst_connector mst_conn;
-
-	DP_MST_DEBUG("enter:\n");
-
-	status = mst->mst_fw_cbs->detect_port(connector,
-			&mst->mst_mgr,
-			c_conn->mst_port);
-
-	memset(&mst_conn, 0, sizeof(mst_conn));
-	dp_display->mst_get_connector_info(dp_display, connector, &mst_conn);
-	if (mst_conn.conn == connector &&
-			mst_conn.state != connector_status_unknown) {
-		status = mst_conn.state;
-	}
-
-	DP_MST_DEBUG("mst connector:%d detect, status:%d\n",
-			connector->base.id, status);
-
-	DP_MST_DEBUG("exit:\n");
-
-	return status;
-}
-
-static int dp_mst_connector_get_modes(struct drm_connector *connector,
-		void *display)
-{
-	struct sde_connector *c_conn = to_sde_connector(connector);
-	struct dp_display *dp_display = display;
-	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
-	struct edid *edid;
-	int rc = 0;
-
-	DP_MST_DEBUG("enter:\n");
-
-	edid = mst->mst_fw_cbs->get_edid(connector, &mst->mst_mgr,
-			c_conn->mst_port);
-
-	if (edid)
-		rc = dp_display->mst_connector_update_edid(dp_display,
-				connector, edid);
-
-	DP_MST_DEBUG("mst connector get modes. id: %d\n", connector->base.id);
-
-	DP_MST_DEBUG("exit:\n");
-
-	return rc;
-}
-
-enum drm_mode_status dp_mst_connector_mode_valid(
-		struct drm_connector *connector,
-		struct drm_display_mode *mode,
-		void *display)
-{
-	struct dp_display *dp_display = display;
-	struct dp_mst_private *mst;
-	struct sde_connector *c_conn;
-	struct drm_dp_mst_port *mst_port;
-	struct dp_display_mode dp_mode;
-	uint16_t available_pbn, required_pbn;
-	int i, slots_in_use = 0, active_enc_cnt = 0;
-	int available_slots, required_slots;
-	const u32 tot_slots = 63;
-
-	if (!connector || !mode || !display) {
-		pr_err("invalid input\n");
-		return 0;
-	}
-
-	mst = dp_display->dp_mst_prv_info;
-	c_conn = to_sde_connector(connector);
-	mst_port = c_conn->mst_port;
-
-	mutex_lock(&mst->mst_lock);
-	available_pbn = mst_port->available_pbn;
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (mst->mst_bridge[i].encoder_active_sts &&
-			(mst->mst_bridge[i].connector != connector)) {
-			active_enc_cnt++;
-			slots_in_use += mst->mst_bridge[i].num_slots;
-		}
-	}
-	mutex_unlock(&mst->mst_lock);
-
-	if (active_enc_cnt < DP_STREAM_MAX)
-		available_slots = tot_slots - slots_in_use;
-	else {
-		pr_debug("all mst streams are active\n");
-		return MODE_BAD;
-	}
-
-	dp_display->convert_to_dp_mode(dp_display, c_conn->drv_panel,
-			mode, &dp_mode);
-
-	required_pbn = mst->mst_fw_cbs->calc_pbn_mode(&dp_mode);
-	required_slots = mst->mst_fw_cbs->find_vcpi_slots(
-			&mst->mst_mgr, required_pbn);
-
-	if (required_pbn > available_pbn || required_slots > available_slots) {
-		pr_debug("mode:%s not supported\n", mode->name);
-		return MODE_BAD;
-	}
-
-	return dp_connector_mode_valid(connector, mode, display);
-}
-
-int dp_mst_connector_get_info(struct drm_connector *connector,
-		struct msm_display_info *info,
-		void *display)
-{
-	int rc;
-	enum drm_connector_status status = connector_status_unknown;
-
-	DP_MST_DEBUG("enter:\n");
-
-	rc = dp_connector_get_info(connector, info, display);
-
-	if (!rc) {
-		status = dp_mst_connector_detect(connector, false, display);
-
-		if (status == connector_status_connected)
-			info->is_connected = true;
-		else
-			info->is_connected = false;
-	}
-
-	DP_MST_DEBUG("mst connector:%d get info:%d, rc:%d\n",
-			connector->base.id, status, rc);
-
-	DP_MST_DEBUG("exit:\n");
-
-	return rc;
-}
-
-int dp_mst_connector_get_mode_info(struct drm_connector *connector,
-		const struct drm_display_mode *drm_mode,
-		struct msm_mode_info *mode_info,
-		u32 max_mixer_width, void *display)
-{
-	int rc;
-
-	DP_MST_DEBUG("enter:\n");
-
-	rc = dp_connector_get_mode_info(connector, drm_mode, mode_info,
-			max_mixer_width, display);
-
-	DP_MST_DEBUG("mst connector:%d get mode info. rc:%d\n",
-			connector->base.id, rc);
-
-	DP_MST_DEBUG("exit:\n");
-
-	return rc;
-}
-
-static struct drm_encoder *
-dp_mst_atomic_best_encoder(struct drm_connector *connector,
-			void *display, struct drm_connector_state *state)
-{
-	struct dp_display *dp_display = display;
-	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
-	struct sde_connector *conn = to_sde_connector(connector);
-	struct drm_encoder *enc = NULL;
-	u32 i;
-
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (mst->mst_bridge[i].connector == connector) {
-			enc = mst->mst_bridge[i].encoder;
-			goto end;
-		}
-	}
-
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (!mst->mst_bridge[i].encoder_active_sts &&
-			!mst->mst_bridge[i].fixed_connector) {
-			mst->mst_bridge[i].encoder_active_sts = true;
-			mst->mst_bridge[i].connector = connector;
-			mst->mst_bridge[i].dp_panel = conn->drv_panel;
-			enc = mst->mst_bridge[i].encoder;
-			break;
-		}
-	}
-
-end:
-	if (enc)
-		DP_MST_DEBUG("mst connector:%d atomic best encoder:%d\n",
-			connector->base.id, i);
-	else
-		DP_MST_DEBUG("mst connector:%d atomic best encoder failed\n",
-				connector->base.id);
-
-	return enc;
-}
-
-static struct dp_mst_bridge *_dp_mst_get_bridge_from_encoder(
-		struct dp_display *dp_display,
-		struct drm_encoder *encoder)
-{
-	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
-	int i;
-
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (mst->mst_bridge[i].encoder == encoder)
-			return &mst->mst_bridge[i];
-	}
-
-	DP_MST_DEBUG("mst bridge detect for encoder failed\n");
-
-	return NULL;
-}
-
-static int dp_mst_connector_atomic_check(struct drm_connector *connector,
-		void *display, struct drm_connector_state *new_conn_state)
-{
-	int rc = 0, slots, i;
-	struct drm_atomic_state *state;
-	struct drm_connector_state *old_conn_state;
-	struct drm_crtc *old_crtc;
-	struct drm_crtc_state *crtc_state;
-	struct dp_mst_bridge *bridge = NULL;
-	struct dp_display *dp_display = display;
-	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
-	struct sde_connector *c_conn;
-	struct dp_display_mode dp_mode;
-
-	DP_MST_DEBUG("enter:\n");
-
-	/*
-	 * Skip atomic check during mst suspend, to avoid mismanagement of
-	 * available vcpi slots.
-	 */
-	if (mst->state == PM_SUSPEND)
-		return rc;
-
-	if (!new_conn_state)
-		return rc;
-
-	mutex_lock(&mst->mst_lock);
-
-	state = new_conn_state->state;
-
-	old_conn_state = drm_atomic_get_old_connector_state(state, connector);
-	if (!old_conn_state)
-		goto mode_set;
-
-	old_crtc = old_conn_state->crtc;
-	if (!old_crtc)
-		goto mode_set;
-
-	crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc);
-
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		bridge = &mst->mst_bridge[i];
-		DP_MST_DEBUG("bridge id:%d, vcpi:%d, pbn:%d, slots:%d\n",
-				bridge->id, bridge->vcpi, bridge->pbn,
-				bridge->num_slots);
-	}
-
-	bridge = _dp_mst_get_bridge_from_encoder(dp_display,
-			old_conn_state->best_encoder);
-	if (!bridge)
-		goto end;
-
-	slots = bridge->num_slots;
-	if (drm_atomic_crtc_needs_modeset(crtc_state) && slots > 0) {
-		rc = mst->mst_fw_cbs->atomic_release_vcpi_slots(state,
-				&mst->mst_mgr, slots);
-		if (rc) {
-			pr_err("failed releasing %d vcpi slots rc:%d\n",
-					slots, rc);
-			goto end;
-		}
-	}
-
-mode_set:
-	if (!new_conn_state->crtc)
-		goto end;
-
-	crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
-
-	if (drm_atomic_crtc_needs_modeset(crtc_state)) {
-		c_conn = to_sde_connector(connector);
-
-		dp_display->convert_to_dp_mode(dp_display, c_conn->drv_panel,
-				&crtc_state->mode, &dp_mode);
-
-		slots = _dp_mst_compute_config(state, mst, connector, &dp_mode);
-		if (slots < 0) {
-			rc = slots;
-
-			/* Disconnect the conn and panel info from bridge */
-			bridge = _dp_mst_get_bridge_from_encoder(dp_display,
-						new_conn_state->best_encoder);
-			if (!bridge)
-				goto end;
-
-			bridge->connector = NULL;
-			bridge->dp_panel = NULL;
-			bridge->encoder_active_sts = false;
-		}
-	}
-
-end:
-	mutex_unlock(&mst->mst_lock);
-	DP_MST_DEBUG("mst connector:%d atomic check\n", connector->base.id);
-	return rc;
-}
-
-static int dp_mst_connector_config_hdr(struct drm_connector *connector,
-		void *display, struct sde_connector_state *c_state)
-{
-	int rc;
-
-	DP_MST_DEBUG("enter:\n");
-
-	rc = dp_connector_config_hdr(connector, display, c_state);
-
-	DP_MST_DEBUG("mst connector:%d cfg hdr. rc:%d\n",
-			connector->base.id, rc);
-
-	DP_MST_DEBUG("exit:\n");
-
-	return rc;
-}
-
-static void dp_mst_connector_pre_destroy(struct drm_connector *connector,
-		void *display)
-{
-	struct dp_display *dp_display = display;
-
-	DP_MST_DEBUG("enter:\n");
-	dp_display->mst_connector_uninstall(dp_display, connector);
-	DP_MST_DEBUG("exit:\n");
-}
-
-/* DRM MST callbacks */
-
-static struct drm_connector *
-dp_mst_add_connector(struct drm_dp_mst_topology_mgr *mgr,
-		struct drm_dp_mst_port *port, const char *pathprop)
-{
-	static const struct sde_connector_ops dp_mst_connector_ops = {
-		.post_init  = NULL,
-		.detect     = dp_mst_connector_detect,
-		.get_modes  = dp_mst_connector_get_modes,
-		.mode_valid = dp_mst_connector_mode_valid,
-		.get_info   = dp_mst_connector_get_info,
-		.get_mode_info  = dp_mst_connector_get_mode_info,
-		.atomic_best_encoder = dp_mst_atomic_best_encoder,
-		.atomic_check = dp_mst_connector_atomic_check,
-		.config_hdr = dp_mst_connector_config_hdr,
-		.pre_destroy = dp_mst_connector_pre_destroy,
-		.update_pps = dp_connector_update_pps,
-	};
-	struct dp_mst_private *dp_mst;
-	struct drm_device *dev;
-	struct dp_display *dp_display;
-	struct drm_connector *connector;
-	struct sde_connector *c_conn;
-	int rc, i;
-
-	DP_MST_DEBUG("enter\n");
-
-	dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr);
-
-	dp_display = dp_mst->dp_display;
-	dev = dp_display->drm_dev;
-
-	/* make sure connector is not accessed before reset */
-	drm_modeset_lock_all(dev);
-
-	connector = sde_connector_init(dev,
-				dp_mst->mst_bridge[0].encoder,
-				NULL,
-				dp_display,
-				&dp_mst_connector_ops,
-				DRM_CONNECTOR_POLL_HPD,
-				DRM_MODE_CONNECTOR_DisplayPort);
-
-	if (!connector) {
-		pr_err("mst sde_connector_init failed\n");
-		drm_modeset_unlock_all(dev);
-		return connector;
-	}
-
-	rc = dp_display->mst_connector_install(dp_display, connector);
-	if (rc) {
-		pr_err("mst connector install failed\n");
-		sde_connector_destroy(connector);
-		drm_modeset_unlock_all(dev);
-		return NULL;
-	}
-
-	c_conn = to_sde_connector(connector);
-	c_conn->mst_port = port;
-
-	if (connector->funcs->reset)
-		connector->funcs->reset(connector);
-
-	for (i = 1; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		drm_connector_attach_encoder(connector,
-				dp_mst->mst_bridge[i].encoder);
-	}
-
-	drm_object_attach_property(&connector->base,
-			dev->mode_config.path_property, 0);
-	drm_object_attach_property(&connector->base,
-			dev->mode_config.tile_property, 0);
-
-	/* unlock connector and make it accessible */
-	drm_modeset_unlock_all(dev);
-
-	DP_MST_INFO_LOG("add mst connector id:%d\n", connector->base.id);
-
-	return connector;
-}
-
-static void dp_mst_register_connector(struct drm_connector *connector)
-{
-	DP_MST_DEBUG("enter\n");
-
-	connector->status = connector->funcs->detect(connector, false);
-
-	DP_MST_INFO_LOG("register mst connector id:%d\n",
-			connector->base.id);
-	drm_connector_register(connector);
-}
-
-static void dp_mst_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
-					   struct drm_connector *connector)
-{
-	DP_MST_DEBUG("enter\n");
-
-	DP_MST_INFO_LOG("destroy mst connector id:%d\n", connector->base.id);
-
-	drm_connector_unregister(connector);
-	drm_connector_put(connector);
-}
-
-static enum drm_connector_status
-dp_mst_fixed_connector_detect(struct drm_connector *connector, bool force,
-			void *display)
-{
-	struct dp_display *dp_display = display;
-	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
-	int i;
-
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (mst->mst_bridge[i].fixed_connector != connector)
-			continue;
-
-		if (!mst->mst_bridge[i].fixed_port_added)
-			break;
-
-		return dp_mst_connector_detect(connector, force, display);
-	}
-
-	return connector_status_disconnected;
-}
-
-static struct drm_encoder *
-dp_mst_fixed_atomic_best_encoder(struct drm_connector *connector,
-			void *display, struct drm_connector_state *state)
-{
-	struct dp_display *dp_display = display;
-	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
-	struct sde_connector *conn = to_sde_connector(connector);
-	struct drm_encoder *enc = NULL;
-	u32 i;
-
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (mst->mst_bridge[i].connector == connector) {
-			enc = mst->mst_bridge[i].encoder;
-			goto end;
-		}
-	}
-
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (mst->mst_bridge[i].fixed_connector == connector) {
-			mst->mst_bridge[i].encoder_active_sts = true;
-			mst->mst_bridge[i].connector = connector;
-			mst->mst_bridge[i].dp_panel = conn->drv_panel;
-			enc = mst->mst_bridge[i].encoder;
-			break;
-		}
-	}
-
-end:
-	if (enc)
-		DP_MST_DEBUG("mst connector:%d atomic best encoder:%d\n",
-			connector->base.id, i);
-	else
-		DP_MST_DEBUG("mst connector:%d atomic best encoder failed\n",
-				connector->base.id);
-
-	return enc;
-}
-
-static u32 dp_mst_find_fixed_port_num(struct drm_dp_mst_branch *mstb,
-		struct drm_dp_mst_port *target)
-{
-	struct drm_dp_mst_port *port;
-	u32 port_num = 0;
-
-	/*
-	 * search through reversed order of adding sequence, so the port number
-	 * will be unique once topology is fixed
-	 */
-	list_for_each_entry_reverse(port, &mstb->ports, next) {
-		if (port->mstb)
-			port_num += dp_mst_find_fixed_port_num(port->mstb,
-						target);
-		else if (!port->input) {
-			++port_num;
-			if (port == target)
-				break;
-		}
-	}
-
-	return port_num;
-}
-
-static struct drm_connector *
-dp_mst_find_fixed_connector(struct dp_mst_private *dp_mst,
-		struct drm_dp_mst_port *port)
-{
-	struct dp_display *dp_display = dp_mst->dp_display;
-	struct drm_connector *connector = NULL;
-	struct sde_connector *c_conn;
-	u32 port_num;
-	int i;
-
-	mutex_lock(&port->mgr->lock);
-	port_num = dp_mst_find_fixed_port_num(port->mgr->mst_primary, port);
-	mutex_unlock(&port->mgr->lock);
-
-	if (!port_num)
-		return NULL;
-
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (dp_mst->mst_bridge[i].fixed_port_num == port_num) {
-			connector = dp_mst->mst_bridge[i].fixed_connector;
-			c_conn = to_sde_connector(connector);
-			c_conn->mst_port = port;
-			dp_display->mst_connector_update_link_info(dp_display,
-					connector);
-			dp_mst->mst_bridge[i].fixed_port_added = true;
-			DP_MST_DEBUG("found fixed connector %d\n",
-					DRMID(connector));
-			break;
-		}
-	}
-
-	return connector;
-}
-
-static int
-dp_mst_find_first_available_encoder_idx(struct dp_mst_private *dp_mst)
-{
-	int enc_idx = MAX_DP_MST_DRM_BRIDGES;
-	int i;
-
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (!dp_mst->mst_bridge[i].fixed_connector) {
-			enc_idx = i;
-			break;
-		}
-	}
-
-	return enc_idx;
-}
-
-static struct drm_connector *
-dp_mst_add_fixed_connector(struct drm_dp_mst_topology_mgr *mgr,
-		struct drm_dp_mst_port *port, const char *pathprop)
-{
-	struct dp_mst_private *dp_mst;
-	struct drm_device *dev;
-	struct dp_display *dp_display;
-	struct drm_connector *connector;
-	int i, enc_idx;
-
-	DP_MST_DEBUG("enter\n");
-
-	dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr);
-
-	dp_display = dp_mst->dp_display;
-	dev = dp_display->drm_dev;
-
-	if (port->input || port->mstb)
-		enc_idx = MAX_DP_MST_DRM_BRIDGES;
-	else {
-		/* if port is already reserved, return immediately */
-		connector = dp_mst_find_fixed_connector(dp_mst, port);
-		if (connector != NULL)
-			return connector;
-
-		/* first available bridge index for non-reserved port */
-		enc_idx = dp_mst_find_first_available_encoder_idx(dp_mst);
-	}
-
-	/* add normal connector */
-	connector = dp_mst_add_connector(mgr, port, pathprop);
-	if (!connector) {
-		DP_MST_DEBUG("failed to add connector\n");
-		return NULL;
-	}
-
-	drm_modeset_lock_all(dev);
-
-	/* clear encoder list */
-	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
-		connector->encoder_ids[i] = 0;
-
-	/* re-attach encoders from first available encoders */
-	for (i = enc_idx; i < MAX_DP_MST_DRM_BRIDGES; i++)
-		drm_connector_attach_encoder(connector,
-				dp_mst->mst_bridge[i].encoder);
-
-	drm_modeset_unlock_all(dev);
-
-	DP_MST_DEBUG("add mst connector:%d\n", connector->base.id);
-
-	return connector;
-}
-
-static void dp_mst_register_fixed_connector(struct drm_connector *connector)
-{
-	struct sde_connector *c_conn = to_sde_connector(connector);
-	struct dp_display *dp_display = c_conn->display;
-	struct dp_mst_private *dp_mst = dp_display->dp_mst_prv_info;
-	int i;
-
-	DP_MST_DEBUG("enter\n");
-
-	/* skip connector registered for fixed topology ports */
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (dp_mst->mst_bridge[i].fixed_connector == connector) {
-			DP_MST_DEBUG("found fixed connector %d\n",
-					DRMID(connector));
-			return;
-		}
-	}
-
-	dp_mst_register_connector(connector);
-}
-
-static void dp_mst_destroy_fixed_connector(struct drm_dp_mst_topology_mgr *mgr,
-					   struct drm_connector *connector)
-{
-	struct dp_mst_private *dp_mst;
-	int i;
-
-	DP_MST_DEBUG("enter\n");
-
-	dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr);
-
-	/* skip connector destroy for fixed topology ports */
-	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (dp_mst->mst_bridge[i].fixed_connector == connector) {
-			dp_mst->mst_bridge[i].fixed_port_added = false;
-			DP_MST_DEBUG("destroy fixed connector %d\n",
-					DRMID(connector));
-			return;
-		}
-	}
-
-	dp_mst_destroy_connector(mgr, connector);
-}
-
-static struct drm_connector *
-dp_mst_drm_fixed_connector_init(struct dp_display *dp_display,
-			struct drm_encoder *encoder)
-{
-	static const struct sde_connector_ops dp_mst_connector_ops = {
-		.post_init  = NULL,
-		.detect     = dp_mst_fixed_connector_detect,
-		.get_modes  = dp_mst_connector_get_modes,
-		.mode_valid = dp_mst_connector_mode_valid,
-		.get_info   = dp_mst_connector_get_info,
-		.get_mode_info  = dp_mst_connector_get_mode_info,
-		.atomic_best_encoder = dp_mst_fixed_atomic_best_encoder,
-		.atomic_check = dp_mst_connector_atomic_check,
-		.config_hdr = dp_mst_connector_config_hdr,
-		.pre_destroy = dp_mst_connector_pre_destroy,
-	};
-	struct drm_device *dev;
-	struct drm_connector *connector;
-	int rc;
-
-	DP_MST_DEBUG("enter\n");
-
-	dev = dp_display->drm_dev;
-
-	connector = sde_connector_init(dev,
-				encoder,
-				NULL,
-				dp_display,
-				&dp_mst_connector_ops,
-				DRM_CONNECTOR_POLL_HPD,
-				DRM_MODE_CONNECTOR_DisplayPort);
-
-	if (!connector) {
-		pr_err("mst sde_connector_init failed\n");
-		return NULL;
-	}
-
-	rc = dp_display->mst_connector_install(dp_display, connector);
-	if (rc) {
-		pr_err("mst connector install failed\n");
-		sde_connector_destroy(connector);
-		return NULL;
-	}
-
-	drm_object_attach_property(&connector->base,
-			dev->mode_config.path_property, 0);
-	drm_object_attach_property(&connector->base,
-			dev->mode_config.tile_property, 0);
-
-	DP_MST_DEBUG("add mst fixed connector:%d\n", connector->base.id);
-
-	return connector;
-}
-
-static void dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
-{
-	struct dp_mst_private *mst = container_of(mgr, struct dp_mst_private,
-							mst_mgr);
-	struct drm_device *dev = mst->dp_display->drm_dev;
-	char event_string[] = "MST_HOTPLUG=1";
-	char *envp[2];
-
-	envp[0] = event_string;
-	envp[1] = NULL;
-
-	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
-
-	DP_MST_INFO_LOG("mst hot plug event\n");
-}
-
-static void dp_mst_hpd_event_notify(struct dp_mst_private *mst, bool hpd_status)
-{
-	struct drm_device *dev = mst->dp_display->drm_dev;
-	char event_string[] = "MST_HOTPLUG=1";
-	char status[HPD_STRING_SIZE];
-	char *envp[3];
-
-	if (hpd_status)
-		snprintf(status, HPD_STRING_SIZE, "status=connected");
-	else
-		snprintf(status, HPD_STRING_SIZE, "status=disconnected");
-
-	envp[0] = event_string;
-	envp[1] = status;
-	envp[2] = NULL;
-
-	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
-
-	DP_MST_INFO_LOG("%s finished\n", __func__);
-}
-
-/* DP Driver Callback OPs */
-
-static void dp_mst_display_hpd(void *dp_display, bool hpd_status,
-		struct dp_mst_hpd_info *info)
-{
-	int rc;
-	struct dp_display *dp = dp_display;
-	struct dp_mst_private *mst = dp->dp_mst_prv_info;
-
-	mutex_lock(&mst->mst_lock);
-	mst->mst_session_state = hpd_status;
-	mutex_unlock(&mst->mst_lock);
-
-	if (!hpd_status)
-		rc = mst->mst_fw_cbs->topology_mgr_set_mst(&mst->mst_mgr,
-				hpd_status);
-
-	if (info && !info->mst_protocol) {
-		if (hpd_status) {
-			mst->simulator.edid = (struct edid *)info->edid;
-			mst->simulator.port_cnt = info->mst_port_cnt;
-		}
-		mst->mst_fw_cbs = &drm_dp_sim_mst_fw_helper_ops;
-	} else {
-		mst->mst_fw_cbs = &drm_dp_mst_fw_helper_ops;
-	}
-
-	if (hpd_status)
-		rc = mst->mst_fw_cbs->topology_mgr_set_mst(&mst->mst_mgr,
-				hpd_status);
-
-	dp_mst_hpd_event_notify(mst, hpd_status);
-
-	DP_MST_INFO_LOG("mst display hpd:%d, rc:%d\n", hpd_status, rc);
-}
-
-static void dp_mst_display_hpd_irq(void *dp_display,
-			struct dp_mst_hpd_info *info)
-{
-	int rc;
-	struct dp_display *dp = dp_display;
-	struct dp_mst_private *mst = dp->dp_mst_prv_info;
-	u8 esi[14];
-	unsigned int esi_res = DP_SINK_COUNT_ESI + 1;
-	bool handled;
-
-	if (info->mst_hpd_sim) {
-		dp_mst_hotplug(&mst->mst_mgr);
-		return;
-	}
-
-	if (!mst->mst_session_state) {
-		pr_err("mst_hpd_irq received before mst session start\n");
-		return;
-	}
-
-	rc = drm_dp_dpcd_read(mst->caps.drm_aux, DP_SINK_COUNT_ESI,
-		esi, 14);
-	if (rc != 14) {
-		pr_err("dpcd sink status read failed, rlen=%d\n", rc);
-		return;
-	}
-
-	DP_MST_DEBUG("mst irq: esi1[0x%x] esi2[0x%x] esi3[%x]\n",
-			esi[1], esi[2], esi[3]);
-
-	rc = drm_dp_mst_hpd_irq(&mst->mst_mgr, esi, &handled);
-
-	/* ack the request */
-	if (handled) {
-		rc = drm_dp_dpcd_write(mst->caps.drm_aux, esi_res, &esi[1], 3);
-
-		if (rc != 3)
-			pr_err("dpcd esi_res failed. rlen=%d\n", rc);
-	}
-
-	DP_MST_DEBUG("mst display hpd_irq handled:%d rc:%d\n", handled, rc);
-}
-
-static void dp_mst_set_state(void *dp_display, enum dp_drv_state mst_state)
-{
-	struct dp_display *dp = dp_display;
-	struct dp_mst_private *mst = dp->dp_mst_prv_info;
-
-	if (!mst) {
-		pr_debug("mst not initialized\n");
-		return;
-	}
-
-	mst->state = mst_state;
-	DP_MST_INFO_LOG("mst power state:%d\n", mst_state);
-}
-
-/* DP MST APIs */
-
-static const struct dp_mst_drm_cbs dp_mst_display_cbs = {
-	.hpd = dp_mst_display_hpd,
-	.hpd_irq = dp_mst_display_hpd_irq,
-	.set_drv_state = dp_mst_set_state,
-};
-
-static const struct drm_dp_mst_topology_cbs dp_mst_drm_cbs = {
-	.add_connector = dp_mst_add_connector,
-	.register_connector = dp_mst_register_connector,
-	.destroy_connector = dp_mst_destroy_connector,
-	.hotplug = dp_mst_hotplug,
-};
-
-static const struct drm_dp_mst_topology_cbs dp_mst_fixed_drm_cbs = {
-	.add_connector = dp_mst_add_fixed_connector,
-	.register_connector = dp_mst_register_fixed_connector,
-	.destroy_connector = dp_mst_destroy_fixed_connector,
-	.hotplug = dp_mst_hotplug,
-};
-
-static void dp_mst_sim_init(struct dp_mst_private *mst)
-{
-	INIT_WORK(&mst->simulator.probe_work, dp_mst_sim_link_probe_work);
-	mst->simulator.cbs = &dp_mst_drm_cbs;
-}
-
-int dp_mst_init(struct dp_display *dp_display)
-{
-	struct drm_device *dev;
-	int conn_base_id = 0;
-	int ret, i;
-	struct dp_mst_drm_install_info install_info;
-
-	memset(&dp_mst, 0, sizeof(dp_mst));
-
-	if (!dp_display) {
-		pr_err("invalid params\n");
-		return 0;
-	}
-
-	dev = dp_display->drm_dev;
-
-	/* register with DP driver */
-	install_info.dp_mst_prv_info = &dp_mst;
-	install_info.cbs = &dp_mst_display_cbs;
-	dp_display->mst_install(dp_display, &install_info);
-
-	dp_display->get_mst_caps(dp_display, &dp_mst.caps);
-
-	if (!dp_mst.caps.has_mst) {
-		DP_MST_DEBUG("mst not supported\n");
-		return 0;
-	}
-
-	dp_mst.mst_fw_cbs = &drm_dp_mst_fw_helper_ops;
-
-	memset(&dp_mst.mst_mgr, 0, sizeof(dp_mst.mst_mgr));
-	dp_mst.mst_mgr.cbs = &dp_mst_drm_cbs;
-	conn_base_id = dp_display->base_connector->base.id;
-	dp_mst.dp_display = dp_display;
-
-	mutex_init(&dp_mst.mst_lock);
-
-	ret = drm_dp_mst_topology_mgr_init(&dp_mst.mst_mgr, dev,
-					dp_mst.caps.drm_aux,
-					dp_mst.caps.max_dpcd_transaction_bytes,
-					dp_mst.caps.max_streams_supported,
-					conn_base_id);
-	if (ret) {
-		pr_err("dp drm mst topology manager init failed\n");
-		goto error;
-	}
-
-	dp_mst_sim_init(&dp_mst);
-
-	dp_mst.mst_initialized = true;
-
-	/* create drm_bridges for cached mst encoders and clear cache */
-	for (i = 0; i < dp_mst_enc_cache.cnt; i++) {
-		ret = dp_mst_drm_bridge_init(dp_display,
-				dp_mst_enc_cache.mst_enc[i]);
-	}
-	memset(&dp_mst_enc_cache, 0, sizeof(dp_mst_enc_cache));
-
-	/* choose fixed callback function if fixed topology is found */
-	if (!dp_display->mst_get_fixed_topology_port(dp_display, 0, NULL))
-		dp_mst.mst_mgr.cbs = &dp_mst_fixed_drm_cbs;
-
-	DP_MST_INFO_LOG("dp drm mst topology manager init completed\n");
-
-	return ret;
-
-error:
-	mutex_destroy(&dp_mst.mst_lock);
-	return ret;
-}
-
-void dp_mst_deinit(struct dp_display *dp_display)
-{
-	struct dp_mst_private *mst;
-
-	if (!dp_display) {
-		pr_err("invalid params\n");
-		return;
-	}
-
-	mst = dp_display->dp_mst_prv_info;
-
-	if (!mst->mst_initialized)
-		return;
-
-	dp_display->mst_uninstall(dp_display);
-
-	drm_dp_mst_topology_mgr_destroy(&mst->mst_mgr);
-
-	dp_mst.mst_initialized = false;
-
-	mutex_destroy(&mst->mst_lock);
-
-	DP_MST_INFO_LOG("dp drm mst topology manager deinit completed\n");
-}
-
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
deleted file mode 100644
index 2862ca59..0000000
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ /dev/null
@@ -1,3004 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include "dp_panel.h"
-#include <drm/drm_fixed.h>
-
-#define DP_KHZ_TO_HZ 1000
-#define DP_PANEL_DEFAULT_BPP 24
-#define DP_MAX_DS_PORT_COUNT 1
-
-#define DPRX_FEATURE_ENUMERATION_LIST 0x2210
-#define DPRX_EXTENDED_DPCD_FIELD 0x2200
-#define VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED BIT(3)
-#define VSC_EXT_VESA_SDP_SUPPORTED BIT(4)
-#define VSC_EXT_VESA_SDP_CHAINING_SUPPORTED BIT(5)
-
-enum dp_panel_hdr_pixel_encoding {
-	RGB,
-	YCbCr444,
-	YCbCr422,
-	YCbCr420,
-	YONLY,
-	RAW,
-};
-
-enum dp_panel_hdr_rgb_colorimetry {
-	sRGB,
-	RGB_WIDE_GAMUT_FIXED_POINT,
-	RGB_WIDE_GAMUT_FLOATING_POINT,
-	ADOBERGB,
-	DCI_P3,
-	CUSTOM_COLOR_PROFILE,
-	ITU_R_BT_2020_RGB,
-};
-
-enum dp_panel_hdr_dynamic_range {
-	VESA,
-	CEA,
-};
-
-enum dp_panel_hdr_content_type {
-	NOT_DEFINED,
-	GRAPHICS,
-	PHOTO,
-	VIDEO,
-	GAME,
-};
-
-enum dp_panel_hdr_state {
-	HDR_DISABLED,
-	HDR_ENABLED,
-};
-
-struct dp_panel_private {
-	struct device *dev;
-	struct dp_panel dp_panel;
-	struct dp_aux *aux;
-	struct dp_link *link;
-	struct dp_parser *parser;
-	struct dp_catalog_panel *catalog;
-	bool custom_edid;
-	bool custom_dpcd;
-	bool panel_on;
-	bool vsc_supported;
-	bool vscext_supported;
-	bool vscext_chaining_supported;
-	enum dp_panel_hdr_state hdr_state;
-	u8 spd_vendor_name[8];
-	u8 spd_product_description[16];
-	u8 major;
-	u8 minor;
-};
-
-static const struct dp_panel_info fail_safe = {
-	.h_active = 640,
-	.v_active = 480,
-	.h_back_porch = 48,
-	.h_front_porch = 16,
-	.h_sync_width = 96,
-	.h_active_low = 0,
-	.v_back_porch = 33,
-	.v_front_porch = 10,
-	.v_sync_width = 2,
-	.v_active_low = 0,
-	.h_skew = 0,
-	.refresh_rate = 60,
-	.pixel_clk_khz = 25200,
-	.bpp = 24,
-};
-
-/* OEM NAME */
-static const u8 vendor_name[8] = {81, 117, 97, 108, 99, 111, 109, 109};
-
-/* MODEL NAME */
-static const u8 product_desc[16] = {83, 110, 97, 112, 100, 114, 97, 103,
-	111, 110, 0, 0, 0, 0, 0, 0};
-
-struct dp_dhdr_maxpkt_calc_input {
-	u32 mdp_clk;
-	u32 lclk;
-	u32 pclk;
-	u32 h_active;
-	u32 nlanes;
-	s64 mst_target_sc;
-	bool mst_en;
-	bool fec_en;
-};
-
-struct tu_algo_data {
-	s64 lclk_fp;
-	s64 pclk_fp;
-	s64 lwidth;
-	s64 lwidth_fp;
-	s64 hbp_relative_to_pclk;
-	s64 hbp_relative_to_pclk_fp;
-	int nlanes;
-	int bpp;
-	int pixelEnc;
-	int dsc_en;
-	int async_en;
-	int bpc;
-
-	uint delay_start_link_extra_pixclk;
-	int extra_buffer_margin;
-	s64 ratio_fp;
-	s64 original_ratio_fp;
-
-	s64 err_fp;
-	s64 n_err_fp;
-	s64 n_n_err_fp;
-	int tu_size;
-	int tu_size_desired;
-	int tu_size_minus1;
-
-	int valid_boundary_link;
-	s64 resulting_valid_fp;
-	s64 total_valid_fp;
-	s64 effective_valid_fp;
-	s64 effective_valid_recorded_fp;
-	int n_tus;
-	int n_tus_per_lane;
-	int paired_tus;
-	int remainder_tus;
-	int remainder_tus_upper;
-	int remainder_tus_lower;
-	int extra_bytes;
-	int filler_size;
-	int delay_start_link;
-
-	int extra_pclk_cycles;
-	int extra_pclk_cycles_in_link_clk;
-	s64 ratio_by_tu_fp;
-	s64 average_valid2_fp;
-	int new_valid_boundary_link;
-	int remainder_symbols_exist;
-	int n_symbols;
-	s64 n_remainder_symbols_per_lane_fp;
-	s64 last_partial_tu_fp;
-	s64 TU_ratio_err_fp;
-
-	int n_tus_incl_last_incomplete_tu;
-	int extra_pclk_cycles_tmp;
-	int extra_pclk_cycles_in_link_clk_tmp;
-	int extra_required_bytes_new_tmp;
-	int filler_size_tmp;
-	int lower_filler_size_tmp;
-	int delay_start_link_tmp;
-
-	bool boundary_moderation_en;
-	int boundary_mod_lower_err;
-	int upper_boundary_count;
-	int lower_boundary_count;
-	int i_upper_boundary_count;
-	int i_lower_boundary_count;
-	int valid_lower_boundary_link;
-	int even_distribution_BF;
-	int even_distribution_legacy;
-	int even_distribution;
-	int min_hblank_violated;
-	s64 delay_start_time_fp;
-	s64 hbp_time_fp;
-	s64 hactive_time_fp;
-	s64 diff_abs_fp;
-
-	s64 ratio;
-};
-
-static int _tu_param_compare(s64 a, s64 b)
-{
-	u32 a_int, a_frac, a_sign;
-	u32 b_int, b_frac, b_sign;
-	s64 a_temp, b_temp, minus_1;
-
-	if (a == b)
-		return 0;
-
-	minus_1 = drm_fixp_from_fraction(-1, 1);
-
-	a_int = (a >> 32) & 0x7FFFFFFF;
-	a_frac = a & 0xFFFFFFFF;
-	a_sign = (a >> 32) & 0x80000000 ? 1 : 0;
-
-	b_int = (b >> 32) & 0x7FFFFFFF;
-	b_frac = b & 0xFFFFFFFF;
-	b_sign = (b >> 32) & 0x80000000 ? 1 : 0;
-
-	if (a_sign > b_sign)
-		return 2;
-	else if (b_sign > a_sign)
-		return 1;
-
-	if (!a_sign && !b_sign) { /* positive */
-		if (a > b)
-			return 1;
-		else
-			return 2;
-	} else { /* negative */
-		a_temp = drm_fixp_mul(a, minus_1);
-		b_temp = drm_fixp_mul(b, minus_1);
-
-		if (a_temp > b_temp)
-			return 2;
-		else
-			return 1;
-	}
-}
-
-static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in,
-					struct tu_algo_data *tu)
-{
-	int nlanes = in->nlanes;
-	int dsc_num_slices = in->num_of_dsc_slices;
-	int dsc_num_bytes  = 0;
-	int numerator;
-	s64 pclk_dsc_fp;
-	s64 dwidth_dsc_fp;
-	s64 hbp_dsc_fp;
-	s64 overhead_dsc;
-
-	int tot_num_eoc_symbols = 0;
-	int tot_num_hor_bytes   = 0;
-	int tot_num_dummy_bytes = 0;
-	int dwidth_dsc_bytes    = 0;
-	int  eoc_bytes           = 0;
-
-	s64 temp1_fp, temp2_fp, temp3_fp;
-
-	tu->lclk_fp              = drm_fixp_from_fraction(in->lclk, 1);
-	tu->pclk_fp              = drm_fixp_from_fraction(in->pclk_khz, 1000);
-	tu->lwidth               = in->hactive;
-	tu->hbp_relative_to_pclk = in->hporch;
-	tu->nlanes               = in->nlanes;
-	tu->bpp                  = in->bpp;
-	tu->pixelEnc             = in->pixel_enc;
-	tu->dsc_en               = in->dsc_en;
-	tu->async_en             = in->async_en;
-	tu->lwidth_fp            = drm_fixp_from_fraction(in->hactive, 1);
-	tu->hbp_relative_to_pclk_fp = drm_fixp_from_fraction(in->hporch, 1);
-
-	if (tu->pixelEnc == 420) {
-		temp1_fp = drm_fixp_from_fraction(2, 1);
-		tu->pclk_fp = drm_fixp_div(tu->pclk_fp, temp1_fp);
-		tu->lwidth_fp = drm_fixp_div(tu->lwidth_fp, temp1_fp);
-		tu->hbp_relative_to_pclk_fp =
-				drm_fixp_div(tu->hbp_relative_to_pclk_fp, 2);
-	}
-
-	if (tu->pixelEnc == 422) {
-		switch (tu->bpp) {
-		case 24:
-			tu->bpp = 16;
-			tu->bpc = 8;
-			break;
-		case 30:
-			tu->bpp = 20;
-			tu->bpc = 10;
-			break;
-		default:
-			tu->bpp = 16;
-			tu->bpc = 8;
-			break;
-		}
-	} else
-		tu->bpc = tu->bpp/3;
-
-	if (!in->dsc_en)
-		goto fec_check;
-
-	temp1_fp = drm_fixp_from_fraction(in->compress_ratio, 100);
-	temp2_fp = drm_fixp_from_fraction(in->bpp, 1);
-	temp3_fp = drm_fixp_div(temp2_fp, temp1_fp);
-	temp2_fp = drm_fixp_mul(tu->lwidth_fp, temp3_fp);
-
-	temp1_fp = drm_fixp_from_fraction(8, 1);
-	temp3_fp = drm_fixp_div(temp2_fp, temp1_fp);
-
-	numerator = drm_fixp2int(temp3_fp);
-
-	dsc_num_bytes  = numerator / dsc_num_slices;
-	eoc_bytes           = dsc_num_bytes % nlanes;
-	tot_num_eoc_symbols = nlanes * dsc_num_slices;
-	tot_num_hor_bytes   = dsc_num_bytes * dsc_num_slices;
-	tot_num_dummy_bytes = (nlanes - eoc_bytes) * dsc_num_slices;
-
-	if (dsc_num_bytes == 0)
-		pr_info("incorrect no of bytes per slice=%d\n", dsc_num_bytes);
-
-	dwidth_dsc_bytes = (tot_num_hor_bytes +
-				tot_num_eoc_symbols +
-				(eoc_bytes == 0 ? 0 : tot_num_dummy_bytes));
-	overhead_dsc     = dwidth_dsc_bytes / tot_num_hor_bytes;
-
-	dwidth_dsc_fp = drm_fixp_from_fraction(dwidth_dsc_bytes, 3);
-
-	temp2_fp = drm_fixp_mul(tu->pclk_fp, dwidth_dsc_fp);
-	temp1_fp = drm_fixp_div(temp2_fp, tu->lwidth_fp);
-	pclk_dsc_fp = temp1_fp;
-
-	temp1_fp = drm_fixp_div(pclk_dsc_fp, tu->pclk_fp);
-	temp2_fp = drm_fixp_mul(tu->hbp_relative_to_pclk_fp, temp1_fp);
-	hbp_dsc_fp = temp2_fp;
-
-	/* output */
-	tu->pclk_fp = pclk_dsc_fp;
-	tu->lwidth_fp = dwidth_dsc_fp;
-	tu->hbp_relative_to_pclk_fp = hbp_dsc_fp;
-
-fec_check:
-	if (in->fec_en) {
-		temp1_fp = drm_fixp_from_fraction(976, 1000); /* 0.976 */
-		tu->lclk_fp = drm_fixp_mul(tu->lclk_fp, temp1_fp);
-	}
-}
-
-static void _tu_valid_boundary_calc(struct tu_algo_data *tu)
-{
-	s64 temp1_fp, temp2_fp, temp, temp1, temp2;
-	int compare_result_1, compare_result_2, compare_result_3;
-
-	temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
-	temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
-
-	tu->new_valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
-
-	temp = (tu->i_upper_boundary_count *
-				tu->new_valid_boundary_link +
-				tu->i_lower_boundary_count *
-				(tu->new_valid_boundary_link-1));
-	tu->average_valid2_fp = drm_fixp_from_fraction(temp,
-					(tu->i_upper_boundary_count +
-					tu->i_lower_boundary_count));
-
-	temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
-	temp2_fp = tu->lwidth_fp;
-	temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
-	temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp);
-	tu->n_tus = drm_fixp2int(temp2_fp);
-	if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000)
-		tu->n_tus += 1;
-
-	temp1_fp = drm_fixp_from_fraction(tu->n_tus, 1);
-	temp2_fp = drm_fixp_mul(temp1_fp, tu->average_valid2_fp);
-	temp1_fp = drm_fixp_from_fraction(tu->n_symbols, 1);
-	temp2_fp = temp1_fp - temp2_fp;
-	temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
-	temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
-	tu->n_remainder_symbols_per_lane_fp = temp2_fp;
-
-	temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
-	tu->last_partial_tu_fp =
-			drm_fixp_div(tu->n_remainder_symbols_per_lane_fp,
-					temp1_fp);
-
-	if (tu->n_remainder_symbols_per_lane_fp != 0)
-		tu->remainder_symbols_exist = 1;
-	else
-		tu->remainder_symbols_exist = 0;
-
-	temp1_fp = drm_fixp_from_fraction(tu->n_tus, tu->nlanes);
-	tu->n_tus_per_lane = drm_fixp2int(temp1_fp);
-
-	tu->paired_tus = (int)((tu->n_tus_per_lane) /
-					(tu->i_upper_boundary_count +
-					 tu->i_lower_boundary_count));
-
-	tu->remainder_tus = tu->n_tus_per_lane - tu->paired_tus *
-						(tu->i_upper_boundary_count +
-						tu->i_lower_boundary_count);
-
-	if ((tu->remainder_tus - tu->i_upper_boundary_count) > 0) {
-		tu->remainder_tus_upper = tu->i_upper_boundary_count;
-		tu->remainder_tus_lower = tu->remainder_tus -
-						tu->i_upper_boundary_count;
-	} else {
-		tu->remainder_tus_upper = tu->remainder_tus;
-		tu->remainder_tus_lower = 0;
-	}
-
-	temp = tu->paired_tus * (tu->i_upper_boundary_count *
-				tu->new_valid_boundary_link +
-				tu->i_lower_boundary_count *
-				(tu->new_valid_boundary_link - 1)) +
-				(tu->remainder_tus_upper *
-				 tu->new_valid_boundary_link) +
-				(tu->remainder_tus_lower *
-				(tu->new_valid_boundary_link - 1));
-	tu->total_valid_fp = drm_fixp_from_fraction(temp, 1);
-
-	if (tu->remainder_symbols_exist) {
-		temp1_fp = tu->total_valid_fp +
-				tu->n_remainder_symbols_per_lane_fp;
-		temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1);
-		temp2_fp = temp2_fp + tu->last_partial_tu_fp;
-		temp1_fp = drm_fixp_div(temp1_fp, temp2_fp);
-	} else {
-		temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1);
-		temp1_fp = drm_fixp_div(tu->total_valid_fp, temp2_fp);
-	}
-	tu->effective_valid_fp = temp1_fp;
-
-	temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
-	temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
-	tu->n_n_err_fp = tu->effective_valid_fp - temp2_fp;
-
-	temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
-	temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
-	tu->n_err_fp = tu->average_valid2_fp - temp2_fp;
-
-	tu->even_distribution = tu->n_tus % tu->nlanes == 0 ? 1 : 0;
-
-	temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
-	temp2_fp = tu->lwidth_fp;
-	temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
-	temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp);
-
-	if (temp2_fp)
-		tu->n_tus_incl_last_incomplete_tu = drm_fixp2int_ceil(temp2_fp);
-	else
-		tu->n_tus_incl_last_incomplete_tu = 0;
-
-	temp1 = 0;
-	temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
-	temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
-	temp1_fp = tu->average_valid2_fp - temp2_fp;
-	temp2_fp = drm_fixp_from_fraction(tu->n_tus_incl_last_incomplete_tu, 1);
-	temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
-
-	if (temp1_fp)
-		temp1 = drm_fixp2int_ceil(temp1_fp);
-
-	temp = tu->i_upper_boundary_count * tu->nlanes;
-	temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
-	temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
-	temp1_fp = drm_fixp_from_fraction(tu->new_valid_boundary_link, 1);
-	temp2_fp = temp1_fp - temp2_fp;
-	temp1_fp = drm_fixp_from_fraction(temp, 1);
-	temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
-
-	if (temp2_fp)
-		temp2 = drm_fixp2int_ceil(temp2_fp);
-	else
-		temp2 = 0;
-	tu->extra_required_bytes_new_tmp = (int)(temp1 + temp2);
-
-	temp1_fp = drm_fixp_from_fraction(8, tu->bpp);
-	temp2_fp = drm_fixp_from_fraction(
-	tu->extra_required_bytes_new_tmp, 1);
-	temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
-
-	if (temp1_fp)
-		tu->extra_pclk_cycles_tmp = drm_fixp2int_ceil(temp1_fp);
-	else
-		tu->extra_pclk_cycles_tmp = 0;
-
-	temp1_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles_tmp, 1);
-	temp2_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
-	temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
-
-	if (temp1_fp)
-		tu->extra_pclk_cycles_in_link_clk_tmp =
-						drm_fixp2int_ceil(temp1_fp);
-	else
-		tu->extra_pclk_cycles_in_link_clk_tmp = 0;
-
-	tu->filler_size_tmp = tu->tu_size - tu->new_valid_boundary_link;
-
-	tu->lower_filler_size_tmp = tu->filler_size_tmp + 1;
-
-	tu->delay_start_link_tmp = tu->extra_pclk_cycles_in_link_clk_tmp +
-					tu->lower_filler_size_tmp +
-					tu->extra_buffer_margin;
-
-	temp1_fp = drm_fixp_from_fraction(tu->delay_start_link_tmp, 1);
-	tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp);
-
-	compare_result_1 = _tu_param_compare(tu->n_n_err_fp, tu->diff_abs_fp);
-	if (compare_result_1 == 2)
-		compare_result_1 = 1;
-	else
-		compare_result_1 = 0;
-
-	compare_result_2 = _tu_param_compare(tu->n_n_err_fp, tu->err_fp);
-	if (compare_result_2 == 2)
-		compare_result_2 = 1;
-	else
-		compare_result_2 = 0;
-
-	compare_result_3 = _tu_param_compare(tu->hbp_time_fp,
-					tu->delay_start_time_fp);
-	if (compare_result_3 == 2)
-		compare_result_3 = 0;
-	else
-		compare_result_3 = 1;
-
-	if (((tu->even_distribution == 1) ||
-			((tu->even_distribution_BF == 0) &&
-			(tu->even_distribution_legacy == 0))) &&
-			tu->n_err_fp >= 0 && tu->n_n_err_fp >= 0 &&
-			compare_result_2 &&
-			(compare_result_1 || (tu->min_hblank_violated == 1)) &&
-			(tu->new_valid_boundary_link - 1) > 0 &&
-			compare_result_3 &&
-			(tu->delay_start_link_tmp <= 1023)) {
-		tu->upper_boundary_count = tu->i_upper_boundary_count;
-		tu->lower_boundary_count = tu->i_lower_boundary_count;
-		tu->err_fp = tu->n_n_err_fp;
-		tu->boundary_moderation_en = true;
-		tu->tu_size_desired = tu->tu_size;
-		tu->valid_boundary_link = tu->new_valid_boundary_link;
-		tu->effective_valid_recorded_fp = tu->effective_valid_fp;
-		tu->even_distribution_BF = 1;
-		tu->delay_start_link = tu->delay_start_link_tmp;
-	} else if (tu->boundary_mod_lower_err == 0) {
-		compare_result_1 = _tu_param_compare(tu->n_n_err_fp,
-							tu->diff_abs_fp);
-		if (compare_result_1 == 2)
-			tu->boundary_mod_lower_err = 1;
-	}
-}
-
-static void _dp_calc_boundary(struct tu_algo_data *tu)
-{
-
-	s64 temp1_fp = 0, temp2_fp = 0;
-
-	do {
-		tu->err_fp = drm_fixp_from_fraction(1000, 1);
-
-		temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
-		temp2_fp = drm_fixp_from_fraction(
-				tu->delay_start_link_extra_pixclk, 1);
-		temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
-
-		if (temp1_fp)
-			tu->extra_buffer_margin =
-				drm_fixp2int_ceil(temp1_fp);
-		else
-			tu->extra_buffer_margin = 0;
-
-		temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
-		temp1_fp = drm_fixp_mul(tu->lwidth_fp, temp1_fp);
-
-		if (temp1_fp)
-			tu->n_symbols = drm_fixp2int_ceil(temp1_fp);
-		else
-			tu->n_symbols = 0;
-
-		for (tu->tu_size = 32; tu->tu_size <= 64; tu->tu_size++) {
-			for (tu->i_upper_boundary_count = 1;
-				tu->i_upper_boundary_count <= 15;
-				tu->i_upper_boundary_count++) {
-				for (tu->i_lower_boundary_count = 1;
-					tu->i_lower_boundary_count <= 15;
-					tu->i_lower_boundary_count++) {
-					_tu_valid_boundary_calc(tu);
-				}
-			}
-		}
-		tu->delay_start_link_extra_pixclk--;
-	} while (!tu->boundary_moderation_en &&
-		tu->boundary_mod_lower_err == 1 &&
-		tu->delay_start_link_extra_pixclk != 0);
-}
-
-static void _dp_calc_extra_bytes(struct tu_algo_data *tu)
-{
-	u64 temp = 0;
-	s64 temp1_fp = 0, temp2_fp = 0;
-
-	temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
-	temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
-	temp1_fp = drm_fixp_from_fraction(tu->valid_boundary_link, 1);
-	temp2_fp = temp1_fp - temp2_fp;
-	temp1_fp = drm_fixp_from_fraction(tu->n_tus + 1, 1);
-	temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
-
-	temp = drm_fixp2int(temp2_fp);
-	if (temp && temp2_fp)
-		tu->extra_bytes = drm_fixp2int_ceil(temp2_fp);
-	else
-		tu->extra_bytes = 0;
-
-	temp1_fp = drm_fixp_from_fraction(tu->extra_bytes, 1);
-	temp2_fp = drm_fixp_from_fraction(8, tu->bpp);
-	temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
-
-	if (temp1_fp)
-		tu->extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp);
-	else
-		tu->extra_pclk_cycles = drm_fixp2int(temp1_fp);
-
-	temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
-	temp2_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles, 1);
-	temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
-
-	if (temp1_fp)
-		tu->extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp);
-	else
-		tu->extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp);
-}
-
-static void _dp_panel_calc_tu(struct dp_tu_calc_input *in,
-				   struct dp_vc_tu_mapping_table *tu_table)
-{
-	struct tu_algo_data tu;
-	int compare_result_1, compare_result_2;
-	u64 temp = 0;
-	s64 temp_fp = 0, temp1_fp = 0, temp2_fp = 0;
-
-	s64 LCLK_FAST_SKEW_fp = drm_fixp_from_fraction(6, 10000); /* 0.0006 */
-	s64 const_p49_fp = drm_fixp_from_fraction(49, 100); /* 0.49 */
-	s64 const_p56_fp = drm_fixp_from_fraction(56, 100); /* 0.56 */
-	s64 RATIO_SCALE_fp = drm_fixp_from_fraction(1001, 1000);
-
-	u8 DP_BRUTE_FORCE = 1;
-	s64 BRUTE_FORCE_THRESHOLD_fp = drm_fixp_from_fraction(1, 10); /* 0.1 */
-	uint EXTRA_PIXCLK_CYCLE_DELAY = 4;
-	uint HBLANK_MARGIN = 4;
-
-	memset(&tu, 0, sizeof(tu));
-
-	dp_panel_update_tu_timings(in, &tu);
-
-	tu.err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */
-
-	temp1_fp = drm_fixp_from_fraction(4, 1);
-	temp2_fp = drm_fixp_mul(temp1_fp, tu.lclk_fp);
-	temp_fp = drm_fixp_div(temp2_fp, tu.pclk_fp);
-	tu.extra_buffer_margin = drm_fixp2int_ceil(temp_fp);
-
-	temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
-	temp2_fp = drm_fixp_mul(tu.pclk_fp, temp1_fp);
-	temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
-	temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
-	tu.ratio_fp = drm_fixp_div(temp2_fp, tu.lclk_fp);
-
-	tu.original_ratio_fp = tu.ratio_fp;
-	tu.boundary_moderation_en = false;
-	tu.upper_boundary_count = 0;
-	tu.lower_boundary_count = 0;
-	tu.i_upper_boundary_count = 0;
-	tu.i_lower_boundary_count = 0;
-	tu.valid_lower_boundary_link = 0;
-	tu.even_distribution_BF = 0;
-	tu.even_distribution_legacy = 0;
-	tu.even_distribution = 0;
-	tu.delay_start_time_fp = 0;
-
-	tu.err_fp = drm_fixp_from_fraction(1000, 1);
-	tu.n_err_fp = 0;
-	tu.n_n_err_fp = 0;
-
-	tu.ratio = drm_fixp2int(tu.ratio_fp);
-	temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
-	temp2_fp = tu.lwidth_fp % temp1_fp;
-	if (temp2_fp != 0 &&
-			!tu.ratio && tu.dsc_en == 0) {
-		tu.ratio_fp = drm_fixp_mul(tu.ratio_fp, RATIO_SCALE_fp);
-		tu.ratio = drm_fixp2int(tu.ratio_fp);
-		if (tu.ratio)
-			tu.ratio_fp = drm_fixp_from_fraction(1, 1);
-	}
-
-	if (tu.ratio > 1)
-		tu.ratio = 1;
-
-	if (tu.ratio == 1)
-		goto tu_size_calc;
-
-	compare_result_1 = _tu_param_compare(tu.ratio_fp, const_p49_fp);
-	if (!compare_result_1 || compare_result_1 == 1)
-		compare_result_1 = 1;
-	else
-		compare_result_1 = 0;
-
-	compare_result_2 = _tu_param_compare(tu.ratio_fp, const_p56_fp);
-	if (!compare_result_2 || compare_result_2 == 2)
-		compare_result_2 = 1;
-	else
-		compare_result_2 = 0;
-
-	if (tu.dsc_en && compare_result_1 && compare_result_2) {
-		HBLANK_MARGIN += 4;
-		pr_info("Info: increase HBLANK_MARGIN to %d\n", HBLANK_MARGIN);
-	}
-
-tu_size_calc:
-	for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) {
-		temp1_fp = drm_fixp_from_fraction(tu.tu_size, 1);
-		temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
-		temp = drm_fixp2int_ceil(temp2_fp);
-		temp1_fp = drm_fixp_from_fraction(temp, 1);
-		tu.n_err_fp = temp1_fp - temp2_fp;
-
-		if (tu.n_err_fp < tu.err_fp) {
-			tu.err_fp = tu.n_err_fp;
-			tu.tu_size_desired = tu.tu_size;
-		}
-	}
-
-	tu.tu_size_minus1 = tu.tu_size_desired - 1;
-
-	temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
-	temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
-	tu.valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
-
-	temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
-	temp2_fp = tu.lwidth_fp;
-	temp2_fp = drm_fixp_mul(temp2_fp, temp1_fp);
-
-	temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1);
-	temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
-	tu.n_tus = drm_fixp2int(temp2_fp);
-	if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000)
-		tu.n_tus += 1;
-
-	tu.even_distribution_legacy = tu.n_tus % tu.nlanes == 0 ? 1 : 0;
-	pr_info("Info: n_sym = %d, num_of_tus = %d\n",
-		tu.valid_boundary_link, tu.n_tus);
-
-	_dp_calc_extra_bytes(&tu);
-
-	tu.filler_size = tu.tu_size_desired - tu.valid_boundary_link;
-
-	temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
-	tu.ratio_by_tu_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
-
-	tu.delay_start_link = tu.extra_pclk_cycles_in_link_clk +
-				tu.filler_size + tu.extra_buffer_margin;
-
-	tu.resulting_valid_fp =
-			drm_fixp_from_fraction(tu.valid_boundary_link, 1);
-
-	temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
-	temp2_fp = drm_fixp_div(tu.resulting_valid_fp, temp1_fp);
-	tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp;
-
-	temp1_fp = drm_fixp_from_fraction(HBLANK_MARGIN, 1);
-	temp1_fp = tu.hbp_relative_to_pclk_fp - temp1_fp;
-	tu.hbp_time_fp = drm_fixp_div(temp1_fp, tu.pclk_fp);
-
-	temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1);
-	tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp);
-
-	compare_result_1 = _tu_param_compare(tu.hbp_time_fp,
-					tu.delay_start_time_fp);
-	if (compare_result_1 == 2) /* hbp_time_fp < delay_start_time_fp */
-		tu.min_hblank_violated = 1;
-
-	tu.hactive_time_fp = drm_fixp_div(tu.lwidth_fp, tu.pclk_fp);
-
-	compare_result_2 = _tu_param_compare(tu.hactive_time_fp,
-						tu.delay_start_time_fp);
-	if (compare_result_2 == 2)
-		tu.min_hblank_violated = 1;
-
-	tu.delay_start_time_fp = 0;
-
-	/* brute force */
-
-	tu.delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY;
-	tu.diff_abs_fp = tu.resulting_valid_fp - tu.ratio_by_tu_fp;
-
-	temp = drm_fixp2int(tu.diff_abs_fp);
-	if (!temp && tu.diff_abs_fp <= 0xffff)
-		tu.diff_abs_fp = 0;
-
-	/* if(diff_abs < 0) diff_abs *= -1 */
-	if (tu.diff_abs_fp < 0)
-		tu.diff_abs_fp = drm_fixp_mul(tu.diff_abs_fp, -1);
-
-	tu.boundary_mod_lower_err = 0;
-	if ((tu.diff_abs_fp != 0 &&
-			((tu.diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) ||
-			 (tu.even_distribution_legacy == 0) ||
-			 (DP_BRUTE_FORCE == 1))) ||
-			(tu.min_hblank_violated == 1)) {
-
-		_dp_calc_boundary(&tu);
-
-		if (tu.boundary_moderation_en) {
-			temp1_fp = drm_fixp_from_fraction(
-					(tu.upper_boundary_count *
-					tu.valid_boundary_link +
-					tu.lower_boundary_count *
-					(tu.valid_boundary_link - 1)), 1);
-			temp2_fp = drm_fixp_from_fraction(
-					(tu.upper_boundary_count +
-					tu.lower_boundary_count), 1);
-			tu.resulting_valid_fp =
-					drm_fixp_div(temp1_fp, temp2_fp);
-
-			temp1_fp = drm_fixp_from_fraction(
-					tu.tu_size_desired, 1);
-			tu.ratio_by_tu_fp =
-				drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
-
-			tu.valid_lower_boundary_link =
-				tu.valid_boundary_link - 1;
-
-			temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
-			temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp);
-			temp2_fp = drm_fixp_div(temp1_fp,
-						tu.resulting_valid_fp);
-			tu.n_tus = drm_fixp2int(temp2_fp);
-
-			tu.tu_size_minus1 = tu.tu_size_desired - 1;
-			tu.even_distribution_BF = 1;
-
-			temp1_fp =
-				drm_fixp_from_fraction(tu.tu_size_desired, 1);
-			temp2_fp =
-				drm_fixp_div(tu.resulting_valid_fp, temp1_fp);
-			tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp;
-		}
-	}
-
-	temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu.lwidth_fp);
-
-	if (temp2_fp)
-		temp = drm_fixp2int_ceil(temp2_fp);
-	else
-		temp = 0;
-
-	temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
-	temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
-	temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
-	temp2_fp = drm_fixp_div(temp1_fp, temp2_fp);
-	temp1_fp = drm_fixp_from_fraction(temp, 1);
-	temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
-	temp = drm_fixp2int(temp2_fp);
-
-	if (tu.async_en)
-		tu.delay_start_link += (int)temp;
-
-	temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1);
-	tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp);
-
-	/* OUTPUTS */
-	tu_table->valid_boundary_link       = tu.valid_boundary_link;
-	tu_table->delay_start_link          = tu.delay_start_link;
-	tu_table->boundary_moderation_en    = tu.boundary_moderation_en;
-	tu_table->valid_lower_boundary_link = tu.valid_lower_boundary_link;
-	tu_table->upper_boundary_count      = tu.upper_boundary_count;
-	tu_table->lower_boundary_count      = tu.lower_boundary_count;
-	tu_table->tu_size_minus1            = tu.tu_size_minus1;
-
-	pr_info("TU: valid_boundary_link: %d\n", tu_table->valid_boundary_link);
-	pr_info("TU: delay_start_link: %d\n", tu_table->delay_start_link);
-	pr_info("TU: boundary_moderation_en: %d\n",
-			tu_table->boundary_moderation_en);
-	pr_info("TU: valid_lower_boundary_link: %d\n",
-			tu_table->valid_lower_boundary_link);
-	pr_info("TU: upper_boundary_count: %d\n",
-			tu_table->upper_boundary_count);
-	pr_info("TU: lower_boundary_count: %d\n",
-			tu_table->lower_boundary_count);
-	pr_info("TU: tu_size_minus1: %d\n", tu_table->tu_size_minus1);
-}
-
-static void dp_panel_calc_tu_parameters(struct dp_panel *dp_panel,
-		struct dp_vc_tu_mapping_table *tu_table)
-{
-	struct dp_tu_calc_input in;
-	struct dp_panel_info *pinfo;
-	struct dp_panel_private *panel;
-	int bw_code;
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-	pinfo = &dp_panel->pinfo;
-	bw_code = panel->link->link_params.bw_code;
-
-	in.lclk = drm_dp_bw_code_to_link_rate(bw_code) / 1000;
-	in.pclk_khz = pinfo->pixel_clk_khz;
-	in.hactive = pinfo->h_active;
-	in.hporch = pinfo->h_back_porch + pinfo->h_front_porch +
-				pinfo->h_sync_width;
-	in.nlanes = panel->link->link_params.lane_count;
-	in.bpp = pinfo->bpp;
-	in.pixel_enc = 444;
-	in.dsc_en = dp_panel->dsc_en;
-	in.async_en = 0;
-	in.fec_en = dp_panel->fec_en;
-	in.num_of_dsc_slices = pinfo->comp_info.dsc_info.slice_per_pkt;
-
-	switch (pinfo->comp_info.comp_ratio) {
-	case MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1:
-		in.compress_ratio = 200;
-		break;
-	case MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1:
-		in.compress_ratio = 300;
-		break;
-	default:
-		in.compress_ratio = 100;
-	}
-
-	_dp_panel_calc_tu(&in, tu_table);
-}
-
-void dp_panel_calc_tu_test(struct dp_tu_calc_input *in,
-		struct dp_vc_tu_mapping_table *tu_table)
-{
-	_dp_panel_calc_tu(in, tu_table);
-}
-
-static void dp_panel_config_tr_unit(struct dp_panel *dp_panel)
-{
-	struct dp_panel_private *panel;
-	struct dp_catalog_panel *catalog;
-	u32 dp_tu = 0x0;
-	u32 valid_boundary = 0x0;
-	u32 valid_boundary2 = 0x0;
-	struct dp_vc_tu_mapping_table tu_calc_table;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (dp_panel->stream_id != DP_STREAM_0)
-		return;
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-	catalog = panel->catalog;
-
-	dp_panel_calc_tu_parameters(dp_panel, &tu_calc_table);
-
-	dp_tu |= tu_calc_table.tu_size_minus1;
-	valid_boundary |= tu_calc_table.valid_boundary_link;
-	valid_boundary |= (tu_calc_table.delay_start_link << 16);
-
-	valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1);
-	valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16);
-	valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20);
-
-	if (tu_calc_table.boundary_moderation_en)
-		valid_boundary2 |= BIT(0);
-
-	pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
-			dp_tu, valid_boundary, valid_boundary2);
-
-	catalog->dp_tu = dp_tu;
-	catalog->valid_boundary = valid_boundary;
-	catalog->valid_boundary2 = valid_boundary2;
-
-	catalog->update_transfer_unit(catalog);
-}
-
-enum dp_dsc_ratio_type {
-	DSC_8BPC_8BPP,
-	DSC_10BPC_8BPP,
-	DSC_12BPC_8BPP,
-	DSC_RATIO_TYPE_MAX
-};
-
-static u32 dp_dsc_rc_buf_thresh[] = {0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54,
-		0x62, 0x69, 0x70, 0x77, 0x79, 0x7b, 0x7d, 0x7e};
-
-/*
- * DSC 1.1
- * Rate control - Min QP values for each ratio type in dp_dsc_ratio_type
- */
-static char dp_dsc_rc_range_min_qp_1_1[][15] = {
-	{0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 13},
-	{0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 17},
-	{0, 4, 9, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 21},
-	};
-
-/*
- * DSC 1.1 SCR
- * Rate control - Min QP values for each ratio type in dp_dsc_ratio_type
- */
-static char dp_dsc_rc_range_min_qp_1_1_scr1[][15] = {
-	{0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 9, 12},
-	{0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 13, 16},
-	{0, 4, 9, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 17, 20},
-	};
-
-/*
- * DSC 1.1
- * Rate control - Max QP values for each ratio type in dp_dsc_ratio_type
- */
-static char dp_dsc_rc_range_max_qp_1_1[][15] = {
-	{4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11, 12, 13, 13, 15},
-	{8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 15, 16, 17, 17, 19},
-	{12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 19, 20, 21, 21, 23},
-	};
-
-/*
- * DSC 1.1 SCR
- * Rate control - Max QP values for each ratio type in dp_dsc_ratio_type
- */
-static char dp_dsc_rc_range_max_qp_1_1_scr1[][15] = {
-	{4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13},
-	{8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17},
-	{12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21},
-	};
-
-/*
- * DSC 1.1 and DSC 1.1 SCR
- * Rate control - bpg offset values
- */
-static char dp_dsc_rc_range_bpg_offset[] = {2, 0, 0, -2, -4, -6, -8, -8,
-		-8, -10, -10, -12, -12, -12, -12};
-
-struct dp_dsc_dto_data {
-	enum msm_display_compression_ratio comp_ratio;
-	u32 org_bpp; /* bits */
-	u32 dto_numerator;
-	u32 dto_denominator;
-};
-
-struct dp_dsc_dto_data dto_tbl[] = {
-	{MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1, 24, 1, 2},
-	{MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1, 30, 5, 8},
-	{MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1, 24, 1, 3},
-	{MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1, 30, 5, 12},
-};
-
-static void _dp_panel_get_dto_m_n(enum msm_display_compression_ratio ratio,
-		u32 org_bpp, u32 *dto_n, u32 *dto_d)
-{
-	u32 idx;
-
-	for (idx = 0; idx < ARRAY_SIZE(dto_tbl); idx++) {
-		if (ratio == dto_tbl[idx].comp_ratio &&
-				org_bpp == dto_tbl[idx].org_bpp) {
-			*dto_n = dto_tbl[idx].dto_numerator;
-			*dto_d = dto_tbl[idx].dto_denominator;
-			return;
-		}
-	}
-}
-
-static int dp_panel_dsc_create_pps_buf_cmd(struct msm_display_dsc_info *dsc,
-		char *buf, int pps_id)
-{
-	char *bp = buf;
-	char data;
-	int i, bpp;
-
-	*bp++ = (dsc->version & 0xff);		/* pps0 */
-	*bp++ = (pps_id & 0xff);		/* pps1 */
-	bp++;					/* pps2, reserved */
-
-	data = dsc->line_buf_depth & 0x0f;
-	data |= ((dsc->bpc & 0xf) << 4);
-	*bp++ = data;				/* pps3 */
-
-	bpp = dsc->bpp;
-	bpp <<= 4;				/* 4 fraction bits */
-	data = (bpp >> 8);
-	data &= 0x03;				/* upper two bits */
-	data |= ((dsc->block_pred_enable & 0x1) << 5);
-	data |= ((dsc->convert_rgb & 0x1) << 4);
-	data |= ((dsc->enable_422 & 0x1) << 3);
-	data |= ((dsc->vbr_enable & 0x1) << 2);
-	*bp++ = data;				/* pps4 */
-	*bp++ = (bpp & 0xff);			/* pps5 */
-
-	*bp++ = ((dsc->pic_height >> 8) & 0xff); /* pps6 */
-	*bp++ = (dsc->pic_height & 0x0ff);	/* pps7 */
-	*bp++ = ((dsc->pic_width >> 8) & 0xff);	/* pps8 */
-	*bp++ = (dsc->pic_width & 0x0ff);	/* pps9 */
-
-	*bp++ = ((dsc->slice_height >> 8) & 0xff);/* pps10 */
-	*bp++ = (dsc->slice_height & 0x0ff);	/* pps11 */
-	*bp++ = ((dsc->slice_width >> 8) & 0xff); /* pps12 */
-	*bp++ = (dsc->slice_width & 0x0ff);	/* pps13 */
-
-	*bp++ = ((dsc->chunk_size >> 8) & 0xff);/* pps14 */
-	*bp++ = (dsc->chunk_size & 0x0ff);	/* pps15 */
-
-	*bp++ = (dsc->initial_xmit_delay >> 8) & 0x3; /* pps16*/
-	*bp++ = (dsc->initial_xmit_delay & 0xff);/* pps17 */
-
-	*bp++ = ((dsc->initial_dec_delay >> 8) & 0xff); /* pps18 */
-	*bp++ = (dsc->initial_dec_delay & 0xff);/* pps19 */
-
-	bp++;					/* pps20, reserved */
-
-	*bp++ = (dsc->initial_scale_value & 0x3f); /* pps21 */
-
-	*bp++ = ((dsc->scale_increment_interval >> 8) & 0xff); /* pps22 */
-	*bp++ = (dsc->scale_increment_interval & 0xff); /* pps23 */
-
-	*bp++ = ((dsc->scale_decrement_interval >> 8) & 0xf); /* pps24 */
-	*bp++ = (dsc->scale_decrement_interval & 0x0ff);/* pps25 */
-
-	bp++;					/* pps26, reserved */
-
-	*bp++ = (dsc->first_line_bpg_offset & 0x1f);/* pps27 */
-
-	*bp++ = ((dsc->nfl_bpg_offset >> 8) & 0xff);/* pps28 */
-	*bp++ = (dsc->nfl_bpg_offset & 0x0ff);	/* pps29 */
-	*bp++ = ((dsc->slice_bpg_offset >> 8) & 0xff);/* pps30 */
-	*bp++ = (dsc->slice_bpg_offset & 0x0ff);/* pps31 */
-
-	*bp++ = ((dsc->initial_offset >> 8) & 0xff);/* pps32 */
-	*bp++ = (dsc->initial_offset & 0x0ff);	/* pps33 */
-
-	*bp++ = ((dsc->final_offset >> 8) & 0xff);/* pps34 */
-	*bp++ = (dsc->final_offset & 0x0ff);	/* pps35 */
-
-	*bp++ = (dsc->min_qp_flatness & 0x1f);	/* pps36 */
-	*bp++ = (dsc->max_qp_flatness & 0x1f);	/* pps37 */
-
-	*bp++ = ((dsc->rc_model_size >> 8) & 0xff);/* pps38 */
-	*bp++ = (dsc->rc_model_size & 0x0ff);	/* pps39 */
-
-	*bp++ = (dsc->edge_factor & 0x0f);	/* pps40 */
-
-	*bp++ = (dsc->quant_incr_limit0 & 0x1f);	/* pps41 */
-	*bp++ = (dsc->quant_incr_limit1 & 0x1f);	/* pps42 */
-
-	data = ((dsc->tgt_offset_hi & 0xf) << 4);
-	data |= (dsc->tgt_offset_lo & 0x0f);
-	*bp++ = data;				/* pps43 */
-
-	for (i = 0; i < ARRAY_SIZE(dp_dsc_rc_buf_thresh); i++)
-		*bp++ = (dsc->buf_thresh[i] & 0xff); /* pps44 - pps57 */
-
-	for (i = 0; i < 15; i++) {		/* pps58 - pps87 */
-		data = (dsc->range_min_qp[i] & 0x1f);
-		data <<= 3;
-		data |= ((dsc->range_max_qp[i] >> 2) & 0x07);
-		*bp++ = data;
-		data = (dsc->range_max_qp[i] & 0x03);
-		data <<= 6;
-		data |= (dsc->range_bpg_offset[i] & 0x3f);
-		*bp++ = data;
-	}
-
-	return 88;
-}
-
-static void dp_panel_dsc_prepare_pps_packet(struct dp_panel *dp_panel)
-{
-	struct dp_panel_private *panel;
-	struct dp_dsc_cfg_data *dsc;
-	u8 *pps, *parity;
-	u32 *pps_word, *parity_word;
-	int i, index_4;
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-	dsc = &panel->catalog->dsc;
-	pps = dsc->pps;
-	pps_word = dsc->pps_word;
-	parity = dsc->parity;
-	parity_word = dsc->parity_word;
-
-	memset(parity, 0, sizeof(dsc->parity));
-
-	dsc->pps_word_len = dsc->pps_len >> 2;
-	dsc->parity_len = dsc->pps_word_len;
-	dsc->parity_word_len = (dsc->parity_len >> 2) + 1;
-
-	for (i = 0; i < dsc->pps_word_len; i++) {
-		index_4 = i << 2;
-		pps_word[i] = pps[index_4 + 0] << 0 |
-				pps[index_4 + 1] << 8 |
-				pps[index_4 + 2] << 16 |
-				pps[index_4 + 3] << 24;
-
-		parity[i] = dp_header_get_parity(pps_word[i]);
-	}
-
-	for (i = 0; i < dsc->parity_word_len; i++) {
-		index_4 = i << 2;
-		parity_word[i] = parity[index_4 + 0] << 0 |
-				   parity[index_4 + 1] << 8 |
-				   parity[index_4 + 2] << 16 |
-				   parity[index_4 + 3] << 24;
-	}
-}
-
-static void _dp_panel_dsc_get_num_extra_pclk(struct msm_display_dsc_info *dsc,
-				enum msm_display_compression_ratio ratio)
-{
-	unsigned int dto_n, dto_d, remainder;
-	int ack_required, last_few_ack_required, accum_ack;
-	int last_few_pclk, last_few_pclk_required;
-	int start, temp, line_width = dsc->pic_width/2;
-	s64 temp1_fp, temp2_fp;
-
-	_dp_panel_get_dto_m_n(ratio, dsc->bpc * 3, &dto_n, &dto_d);
-
-	ack_required = dsc->pclk_per_line;
-
-	/* number of pclk cycles left outside of the complete DTO set */
-	last_few_pclk = line_width % dto_d;
-
-	/* number of pclk cycles outside of the complete dto */
-	temp1_fp = drm_fixp_from_fraction(line_width, dto_d);
-	temp2_fp = drm_fixp_from_fraction(dto_n, 1);
-	temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
-	temp = drm_fixp2int(temp1_fp);
-	last_few_ack_required = ack_required - temp;
-
-	/*
-	 * check how many more pclk is needed to
-	 * accommodate the last few ack required
-	 */
-	remainder = dto_n;
-	accum_ack = 0;
-	last_few_pclk_required = 0;
-	while (accum_ack < last_few_ack_required) {
-		last_few_pclk_required++;
-
-		if (remainder >= dto_n)
-			start = remainder;
-		else
-			start = remainder + dto_d;
-
-		remainder = start - dto_n;
-		if (remainder < dto_n)
-			accum_ack++;
-	}
-
-	/* if fewer pclk than required */
-	if (last_few_pclk < last_few_pclk_required)
-		dsc->extra_width = last_few_pclk_required - last_few_pclk;
-	else
-		dsc->extra_width = 0;
-
-	pr_debug("extra pclks required: %d\n", dsc->extra_width);
-}
-
-static void _dp_panel_dsc_bw_overhead_calc(struct dp_panel *dp_panel,
-		struct msm_display_dsc_info *dsc,
-		struct dp_display_mode *dp_mode, u32 dsc_byte_cnt)
-{
-	int num_slices, tot_num_eoc_symbols;
-	int tot_num_hor_bytes, tot_num_dummy_bytes;
-	int dwidth_dsc_bytes, eoc_bytes;
-	u32 num_lanes;
-
-	num_lanes = dp_panel->link_info.num_lanes;
-	num_slices = dsc->slice_per_pkt;
-
-	eoc_bytes = dsc_byte_cnt % num_lanes;
-	tot_num_eoc_symbols = num_lanes * num_slices;
-	tot_num_hor_bytes = dsc_byte_cnt * num_slices;
-	tot_num_dummy_bytes = (num_lanes - eoc_bytes) * num_slices;
-
-	if (!eoc_bytes)
-		tot_num_dummy_bytes = 0;
-
-	dwidth_dsc_bytes = tot_num_hor_bytes + tot_num_eoc_symbols +
-				tot_num_dummy_bytes;
-
-	pr_debug("dwidth_dsc_bytes:%d, tot_num_hor_bytes:%d\n",
-			dwidth_dsc_bytes, tot_num_hor_bytes);
-
-	dp_mode->dsc_overhead_fp = drm_fixp_from_fraction(dwidth_dsc_bytes,
-			tot_num_hor_bytes);
-	dp_mode->timing.dsc_overhead_fp = dp_mode->dsc_overhead_fp;
-}
-
-static void dp_panel_dsc_pclk_param_calc(struct dp_panel *dp_panel,
-		struct msm_display_dsc_info *dsc,
-		enum msm_display_compression_ratio ratio,
-		struct dp_display_mode *dp_mode)
-{
-	int slice_per_pkt, slice_per_intf, intf_width;
-	int bytes_in_slice, total_bytes_per_intf;
-	int comp_ratio;
-	s64 temp1_fp, temp2_fp;
-	s64 numerator_fp, denominator_fp;
-	s64 dsc_byte_count_fp;
-	u32 dsc_byte_count, temp1, temp2;
-
-	intf_width = dp_mode->timing.h_active;
-	if (!dsc || !dsc->slice_width || !dsc->slice_per_pkt ||
-		(intf_width < dsc->slice_width))
-		return;
-
-	slice_per_pkt = dsc->slice_per_pkt;
-	slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width);
-
-	if (slice_per_pkt > slice_per_intf)
-		slice_per_pkt = 1;
-
-	bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bpp, 8);
-	total_bytes_per_intf = bytes_in_slice * slice_per_intf;
-
-	dsc->bytes_in_slice = bytes_in_slice;
-	dsc->bytes_per_pkt = bytes_in_slice * slice_per_pkt;
-	dsc->pkt_per_line = slice_per_intf / slice_per_pkt;
-
-	switch (ratio) {
-	case MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1:
-		comp_ratio = 200;
-		break;
-	case MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1:
-		comp_ratio = 300;
-		break;
-	default:
-		comp_ratio = 100;
-		break;
-	}
-
-	temp1_fp = drm_fixp_from_fraction(comp_ratio, 100);
-	temp2_fp = drm_fixp_from_fraction(slice_per_pkt * 8, 1);
-	denominator_fp = drm_fixp_mul(temp1_fp, temp2_fp);
-	numerator_fp = drm_fixp_from_fraction(intf_width * dsc->bpc * 3, 1);
-	dsc_byte_count_fp = drm_fixp_div(numerator_fp, denominator_fp);
-	dsc_byte_count = drm_fixp2int_ceil(dsc_byte_count_fp);
-
-	temp1 = dsc_byte_count * slice_per_intf;
-	temp2 = temp1;
-	if (temp1 % 3 != 0)
-		temp1 += 3 - (temp1 % 3);
-
-	dsc->eol_byte_num = temp1 - temp2;
-
-	temp1_fp = drm_fixp_from_fraction(slice_per_intf, 6);
-	temp2_fp = drm_fixp_mul(dsc_byte_count_fp, temp1_fp);
-	dsc->pclk_per_line = drm_fixp2int_ceil(temp2_fp);
-
-	_dp_panel_dsc_get_num_extra_pclk(dsc, ratio);
-	dsc->pclk_per_line--;
-
-	_dp_panel_dsc_bw_overhead_calc(dp_panel, dsc, dp_mode, dsc_byte_count);
-}
-
-static void dp_panel_dsc_populate_static_params(
-		struct msm_display_dsc_info *dsc)
-{
-	int bpp, bpc;
-	int mux_words_size;
-	int groups_per_line, groups_total;
-	int min_rate_buffer_size;
-	int hrd_delay;
-	int pre_num_extra_mux_bits, num_extra_mux_bits;
-	int slice_bits;
-	int data;
-	int final_value, final_scale;
-	int ratio_index, mod_offset;
-
-	dsc->version = 0x11;
-	dsc->scr_rev = 0;
-	dsc->rc_model_size = 8192;
-
-	if (dsc->version == 0x11 && dsc->scr_rev == 0x1)
-		dsc->first_line_bpg_offset = 15;
-	else
-		dsc->first_line_bpg_offset = 12;
-
-	dsc->edge_factor = 6;
-	dsc->tgt_offset_hi = 3;
-	dsc->tgt_offset_lo = 3;
-	dsc->enable_422 = 0;
-	dsc->convert_rgb = 1;
-	dsc->vbr_enable = 0;
-
-	dsc->buf_thresh = dp_dsc_rc_buf_thresh;
-
-	bpp = dsc->bpp;
-	bpc = dsc->bpc;
-
-	if (bpc == 12)
-		ratio_index = DSC_12BPC_8BPP;
-	else if (bpc == 10)
-		ratio_index = DSC_10BPC_8BPP;
-	else
-		ratio_index = DSC_8BPC_8BPP;
-
-	if (dsc->version == 0x11 && dsc->scr_rev == 0x1) {
-		dsc->range_min_qp =
-			dp_dsc_rc_range_min_qp_1_1_scr1[ratio_index];
-		dsc->range_max_qp =
-			dp_dsc_rc_range_max_qp_1_1_scr1[ratio_index];
-	} else {
-		dsc->range_min_qp = dp_dsc_rc_range_min_qp_1_1[ratio_index];
-		dsc->range_max_qp = dp_dsc_rc_range_max_qp_1_1[ratio_index];
-	}
-	dsc->range_bpg_offset = dp_dsc_rc_range_bpg_offset;
-
-	if (bpp <= 10)
-		dsc->initial_offset = 6144;
-	else
-		dsc->initial_offset = 2048;	/* bpp = 12 */
-
-	if (bpc == 12)
-		mux_words_size = 64;
-	else
-		mux_words_size = 48;		/* bpc == 8/10 */
-
-	dsc->line_buf_depth = bpc + 1;
-
-	if (bpc == 8) {
-		dsc->input_10_bits = 0;
-		dsc->min_qp_flatness = 3;
-		dsc->max_qp_flatness = 12;
-		dsc->quant_incr_limit0 = 11;
-		dsc->quant_incr_limit1 = 11;
-	} else if (bpc == 10) { /* 10bpc */
-		dsc->input_10_bits = 1;
-		dsc->min_qp_flatness = 7;
-		dsc->max_qp_flatness = 16;
-		dsc->quant_incr_limit0 = 15;
-		dsc->quant_incr_limit1 = 15;
-	} else { /* 12 bpc */
-		dsc->input_10_bits = 0;
-		dsc->min_qp_flatness = 11;
-		dsc->max_qp_flatness = 20;
-		dsc->quant_incr_limit0 = 19;
-		dsc->quant_incr_limit1 = 19;
-	}
-
-	mod_offset = dsc->slice_width % 3;
-	switch (mod_offset) {
-	case 0:
-		dsc->slice_last_group_size = 2;
-		break;
-	case 1:
-		dsc->slice_last_group_size = 0;
-		break;
-	case 2:
-		dsc->slice_last_group_size = 1;
-		break;
-	default:
-		break;
-	}
-
-	dsc->det_thresh_flatness = 2 << (bpc - 8);
-
-	dsc->initial_xmit_delay = dsc->rc_model_size / (2 * bpp);
-
-	groups_per_line = DIV_ROUND_UP(dsc->slice_width, 3);
-
-	dsc->chunk_size = dsc->slice_width * bpp / 8;
-	if ((dsc->slice_width * bpp) % 8)
-		dsc->chunk_size++;
-
-	/* rbs-min */
-	min_rate_buffer_size =  dsc->rc_model_size - dsc->initial_offset +
-			dsc->initial_xmit_delay * bpp +
-			groups_per_line * dsc->first_line_bpg_offset;
-
-	hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, bpp);
-
-	dsc->initial_dec_delay = hrd_delay - dsc->initial_xmit_delay;
-
-	dsc->initial_scale_value = 8 * dsc->rc_model_size /
-			(dsc->rc_model_size - dsc->initial_offset);
-
-	slice_bits = 8 * dsc->chunk_size * dsc->slice_height;
-
-	groups_total = groups_per_line * dsc->slice_height;
-
-	data = dsc->first_line_bpg_offset * 2048;
-
-	dsc->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->slice_height - 1));
-
-	pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * bpc + 4) - 2);
-
-	num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size -
-		((slice_bits - pre_num_extra_mux_bits) % mux_words_size));
-
-	data = 2048 * (dsc->rc_model_size - dsc->initial_offset
-		+ num_extra_mux_bits);
-	dsc->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
-
-	data = dsc->initial_xmit_delay * bpp;
-	final_value =  dsc->rc_model_size - data + num_extra_mux_bits;
-
-	final_scale = 8 * dsc->rc_model_size /
-		(dsc->rc_model_size - final_value);
-
-	dsc->final_offset = final_value;
-
-	data = (final_scale - 9) * (dsc->nfl_bpg_offset +
-		dsc->slice_bpg_offset);
-	dsc->scale_increment_interval = (2048 * dsc->final_offset) / data;
-
-	dsc->scale_decrement_interval = groups_per_line /
-		(dsc->initial_scale_value - 8);
-}
-
-struct dp_dsc_slices_per_line {
-	u32 min_ppr;
-	u32 max_ppr;
-	u8 num_slices;
-};
-
-struct dp_dsc_slices_per_line slice_per_line_tbl[] = {
-	{0,     340,    1   },
-	{340,   680,    2   },
-	{680,   1360,   4   },
-	{1360,  3200,   8   },
-	{3200,  4800,   12  },
-	{4800,  6400,   16  },
-	{6400,  8000,   20  },
-	{8000,  9600,   24  }
-};
-
-static int dp_panel_dsc_prepare_basic_params(
-		struct msm_compression_info *comp_info,
-		const struct dp_display_mode *dp_mode,
-		struct dp_panel *dp_panel)
-{
-	int i;
-	struct dp_dsc_slices_per_line *rec;
-	int slice_width;
-	u32 ppr = dp_mode->timing.pixel_clk_khz/1000;
-	int max_slice_width;
-
-	comp_info->dsc_info.slice_per_pkt = 0;
-	for (i = 0; i < ARRAY_SIZE(slice_per_line_tbl); i++) {
-		rec = &slice_per_line_tbl[i];
-		if ((ppr > rec->min_ppr) && (ppr <= rec->max_ppr)) {
-			comp_info->dsc_info.slice_per_pkt = rec->num_slices;
-			i++;
-			break;
-		}
-	}
-
-	if (comp_info->dsc_info.slice_per_pkt == 0)
-		return -EINVAL;
-
-	max_slice_width = dp_panel->dsc_dpcd[12] * 320;
-	slice_width = (dp_mode->timing.h_active /
-				comp_info->dsc_info.slice_per_pkt);
-
-	while (slice_width >= max_slice_width) {
-		if (i == ARRAY_SIZE(slice_per_line_tbl))
-			return -EINVAL;
-
-		rec = &slice_per_line_tbl[i];
-		comp_info->dsc_info.slice_per_pkt = rec->num_slices;
-		slice_width = (dp_mode->timing.h_active /
-				comp_info->dsc_info.slice_per_pkt);
-		i++;
-	}
-
-	comp_info->dsc_info.block_pred_enable =
-			dp_panel->sink_dsc_caps.block_pred_en;
-	comp_info->dsc_info.vbr_enable = 0;
-	comp_info->dsc_info.enable_422 = 0;
-	comp_info->dsc_info.convert_rgb = 1;
-	comp_info->dsc_info.input_10_bits = 0;
-
-	comp_info->dsc_info.pic_width = dp_mode->timing.h_active;
-	comp_info->dsc_info.pic_height = dp_mode->timing.v_active;
-	comp_info->dsc_info.slice_width = slice_width;
-
-	if (comp_info->dsc_info.pic_height % 16 == 0)
-		comp_info->dsc_info.slice_height = 16;
-	else if (comp_info->dsc_info.pic_height % 12 == 0)
-		comp_info->dsc_info.slice_height = 12;
-	else
-		comp_info->dsc_info.slice_height = 15;
-
-	comp_info->dsc_info.bpc = dp_mode->timing.bpp / 3;
-	comp_info->dsc_info.bpp = comp_info->dsc_info.bpc;
-	comp_info->dsc_info.full_frame_slices =
-		DIV_ROUND_UP(dp_mode->timing.h_active, slice_width);
-
-	comp_info->comp_type = MSM_DISPLAY_COMPRESSION_DSC;
-	comp_info->comp_ratio = MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1;
-	return 0;
-}
-
-static int dp_panel_read_dpcd(struct dp_panel *dp_panel, bool multi_func)
-{
-	int rlen, rc = 0;
-	struct dp_panel_private *panel;
-	struct drm_dp_link *link_info;
-	struct drm_dp_aux *drm_aux;
-	u8 *dpcd, rx_feature, temp;
-	u32 dfp_count = 0, offset = DP_DPCD_REV;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	dpcd = dp_panel->dpcd;
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-	drm_aux = panel->aux->drm_aux;
-	link_info = &dp_panel->link_info;
-
-	/* reset vsc data */
-	panel->vsc_supported = false;
-	panel->vscext_supported = false;
-	panel->vscext_chaining_supported = false;
-
-	if (panel->custom_dpcd) {
-		pr_debug("skip dpcd read in debug mode\n");
-		goto skip_dpcd_read;
-	}
-
-	rlen = drm_dp_dpcd_read(drm_aux, DP_TRAINING_AUX_RD_INTERVAL, &temp, 1);
-	if (rlen != 1) {
-		pr_err("error reading DP_TRAINING_AUX_RD_INTERVAL\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	/* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */
-	if (temp & BIT(7)) {
-		pr_debug("using EXTENDED_RECEIVER_CAPABILITY_FIELD\n");
-		offset = DPRX_EXTENDED_DPCD_FIELD;
-	}
-
-	rlen = drm_dp_dpcd_read(drm_aux, offset,
-		dp_panel->dpcd, (DP_RECEIVER_CAP_SIZE + 1));
-	if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
-		pr_err("dpcd read failed, rlen=%d\n", rlen);
-		if (rlen == -ETIMEDOUT)
-			rc = rlen;
-		else
-			rc = -EINVAL;
-
-		goto end;
-	}
-
-	print_hex_dump(KERN_DEBUG, "[drm-dp] SINK DPCD: ",
-		DUMP_PREFIX_NONE, 8, 1, dp_panel->dpcd, rlen, false);
-
-	rlen = drm_dp_dpcd_read(panel->aux->drm_aux,
-		DPRX_FEATURE_ENUMERATION_LIST, &rx_feature, 1);
-	if (rlen != 1) {
-		pr_debug("failed to read DPRX_FEATURE_ENUMERATION_LIST\n");
-		goto skip_dpcd_read;
-	}
-	panel->vsc_supported = !!(rx_feature &
-		VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED);
-	panel->vscext_supported = !!(rx_feature & VSC_EXT_VESA_SDP_SUPPORTED);
-	panel->vscext_chaining_supported = !!(rx_feature &
-			VSC_EXT_VESA_SDP_CHAINING_SUPPORTED);
-
-	pr_debug("vsc=%d, vscext=%d, vscext_chaining=%d\n",
-		panel->vsc_supported, panel->vscext_supported,
-		panel->vscext_chaining_supported);
-
-skip_dpcd_read:
-	panel->major = (link_info->revision >> 4) & 0x0f;
-	panel->minor = link_info->revision & 0x0f;
-
-	/* override link params updated in dp_panel_init_panel_info */
-	link_info->rate = min_t(unsigned long, panel->parser->max_lclk_khz,
-				link_info->rate);
-
-	if (multi_func)
-		link_info->num_lanes = min_t(unsigned int,
-			link_info->num_lanes, 2);
-
-	pr_debug("version:%d.%d, rate:%d, lanes:%d\n", panel->major,
-		panel->minor, link_info->rate, link_info->num_lanes);
-
-	dfp_count = dpcd[DP_DOWN_STREAM_PORT_COUNT] &
-						DP_DOWN_STREAM_PORT_COUNT;
-
-	if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)
-		&& (dpcd[DP_DPCD_REV] > 0x10)) {
-		rlen = drm_dp_dpcd_read(panel->aux->drm_aux,
-			DP_DOWNSTREAM_PORT_0, dp_panel->ds_ports,
-			DP_MAX_DOWNSTREAM_PORTS);
-		if (rlen < DP_MAX_DOWNSTREAM_PORTS) {
-			pr_err("ds port status failed, rlen=%d\n", rlen);
-			rc = -EINVAL;
-			goto end;
-		}
-	}
-
-	if (dfp_count > DP_MAX_DS_PORT_COUNT)
-		pr_debug("DS port count %d greater that max (%d) supported\n",
-			dfp_count, DP_MAX_DS_PORT_COUNT);
-
-end:
-	return rc;
-}
-
-static int dp_panel_set_default_link_params(struct dp_panel *dp_panel)
-{
-	struct drm_dp_link *link_info;
-	const int default_bw_code = 162000;
-	const int default_num_lanes = 1;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-	link_info = &dp_panel->link_info;
-	link_info->rate = default_bw_code;
-	link_info->num_lanes = default_num_lanes;
-	pr_debug("link_rate=%d num_lanes=%d\n",
-		link_info->rate, link_info->num_lanes);
-
-	return 0;
-}
-
-static int dp_panel_set_edid(struct dp_panel *dp_panel, u8 *edid)
-{
-	struct dp_panel_private *panel;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	if (edid) {
-		dp_panel->edid_ctrl->edid = (struct edid *)edid;
-		panel->custom_edid = true;
-	} else {
-		panel->custom_edid = false;
-		dp_panel->edid_ctrl->edid = NULL;
-	}
-
-	pr_debug("%d\n", panel->custom_edid);
-	return 0;
-}
-
-static int dp_panel_set_dpcd(struct dp_panel *dp_panel, u8 *dpcd)
-{
-	struct dp_panel_private *panel;
-	u8 *dp_dpcd;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	dp_dpcd = dp_panel->dpcd;
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	if (dpcd) {
-		memcpy(dp_dpcd, dpcd, DP_RECEIVER_CAP_SIZE + 1);
-		panel->custom_dpcd = true;
-	} else {
-		panel->custom_dpcd = false;
-	}
-
-	pr_debug("%d\n", panel->custom_dpcd);
-
-	return 0;
-}
-
-static int dp_panel_read_edid(struct dp_panel *dp_panel,
-	struct drm_connector *connector)
-{
-	int ret = 0;
-	struct dp_panel_private *panel;
-	struct edid *edid;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	if (panel->custom_edid) {
-		pr_debug("skip edid read in debug mode\n");
-		goto end;
-	}
-
-	sde_get_edid(connector, &panel->aux->drm_aux->ddc,
-		(void **)&dp_panel->edid_ctrl);
-	if (!dp_panel->edid_ctrl->edid) {
-		pr_err("EDID read failed\n");
-		ret = -EINVAL;
-		goto end;
-	}
-end:
-	edid = dp_panel->edid_ctrl->edid;
-	dp_panel->audio_supported = drm_detect_monitor_audio(edid);
-
-	return ret;
-}
-
-static void dp_panel_decode_dsc_dpcd(struct dp_panel *dp_panel)
-{
-	s64 fec_overhead_fp = drm_fixp_from_fraction(1, 1);
-
-	if (!dp_panel->dsc_feature_enable || !dp_panel->fec_feature_enable) {
-		pr_debug("source dsc is not supported\n");
-		return;
-	}
-
-	if (dp_panel->dsc_dpcd[0] && dp_panel->fec_dpcd) {
-		dp_panel->sink_dsc_caps.dsc_capable = true;
-		dp_panel->sink_dsc_caps.version = dp_panel->dsc_dpcd[1];
-		dp_panel->sink_dsc_caps.block_pred_en =
-			dp_panel->dsc_dpcd[6] ? true : false;
-
-		if (dp_panel->sink_dsc_caps.version >= 0x11)
-			dp_panel->dsc_en = true;
-	} else {
-		dp_panel->sink_dsc_caps.dsc_capable = false;
-		dp_panel->dsc_en = false;
-	}
-
-	dp_panel->fec_en = dp_panel->dsc_en;
-	dp_panel->widebus_en = dp_panel->dsc_en;
-
-	/* fec_overhead = 1.00 / 0.97582 */
-	if (dp_panel->fec_en)
-		fec_overhead_fp = drm_fixp_from_fraction(100000, 97582);
-
-	dp_panel->fec_overhead_fp = fec_overhead_fp;
-}
-
-static void dp_panel_read_sink_dsc_caps(struct dp_panel *dp_panel)
-{
-	int rlen;
-	struct dp_panel_private *panel;
-	const int fec_cap = 0x90;
-	int dpcd_rev;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	dp_panel->dsc_en = false;
-	dp_panel->fec_en = false;
-
-	dpcd_rev = dp_panel->dpcd[DP_DPCD_REV];
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	dp_panel->fec_overhead_fp = 0;
-	if (panel->parser->dsc_feature_enable && dpcd_rev >= 0x14) {
-		rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_DSC_SUPPORT,
-			dp_panel->dsc_dpcd, (DP_RECEIVER_DSC_CAP_SIZE + 1));
-		if (rlen < (DP_RECEIVER_DSC_CAP_SIZE + 1)) {
-			pr_debug("dsc dpcd read failed, rlen=%d\n", rlen);
-			return;
-		}
-
-		print_hex_dump(KERN_DEBUG, "[drm-dp] SINK DSC DPCD: ",
-			DUMP_PREFIX_NONE, 8, 1, dp_panel->dsc_dpcd, rlen,
-			false);
-
-		rlen = drm_dp_dpcd_read(panel->aux->drm_aux, fec_cap,
-			&dp_panel->fec_dpcd, 1);
-		if (rlen < 1) {
-			pr_err("fec dpcd read failed, rlen=%d\n", rlen);
-			return;
-		}
-
-		dp_panel_decode_dsc_dpcd(dp_panel);
-	}
-}
-
-static int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
-	struct drm_connector *connector, bool multi_func)
-{
-	int rc = 0, rlen, count, downstream_ports;
-	const int count_len = 1;
-	struct dp_panel_private *panel;
-
-	if (!dp_panel || !connector) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	rc = dp_panel_read_dpcd(dp_panel, multi_func);
-	if (rc || !is_link_rate_valid(drm_dp_link_rate_to_bw_code(
-		dp_panel->link_info.rate)) || !is_lane_count_valid(
-		dp_panel->link_info.num_lanes) ||
-		((drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate)) >
-		dp_panel->max_bw_code)) {
-		if ((rc == -ETIMEDOUT) || (rc == -ENODEV)) {
-			pr_err("DPCD read failed, return early\n");
-			goto end;
-		}
-		pr_err("panel dpcd read failed/incorrect, set default params\n");
-		dp_panel_set_default_link_params(dp_panel);
-	}
-
-	downstream_ports = dp_panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
-				DP_DWN_STRM_PORT_PRESENT;
-
-	if (downstream_ports) {
-		rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_SINK_COUNT,
-				&count, count_len);
-		if (rlen == count_len) {
-			count = DP_GET_SINK_COUNT(count);
-			if (!count) {
-				pr_err("no downstream ports connected\n");
-				panel->link->sink_count.count = 0;
-				rc = -ENOTCONN;
-				goto end;
-			}
-		}
-	}
-
-	rc = dp_panel_read_edid(dp_panel, connector);
-	if (rc) {
-		pr_err("panel edid read failed, set failsafe mode\n");
-		return rc;
-	}
-
-	dp_panel->widebus_en = panel->parser->has_widebus;
-	dp_panel->dsc_feature_enable = panel->parser->dsc_feature_enable;
-	dp_panel->fec_feature_enable = panel->parser->fec_feature_enable;
-
-	dp_panel_read_sink_dsc_caps(dp_panel);
-end:
-	return rc;
-}
-
-static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
-		u32 mode_edid_bpp, u32 mode_pclk_khz)
-{
-	struct drm_dp_link *link_info;
-	const u32 max_supported_bpp = 30, min_supported_bpp = 18;
-	u32 bpp = 0, data_rate_khz = 0;
-
-	bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
-
-	link_info = &dp_panel->link_info;
-	data_rate_khz = link_info->num_lanes * link_info->rate * 8;
-
-	while (bpp > min_supported_bpp) {
-		if (mode_pclk_khz * bpp <= data_rate_khz)
-			break;
-		bpp -= 6;
-	}
-
-	return bpp;
-}
-
-static u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
-		u32 mode_edid_bpp, u32 mode_pclk_khz)
-{
-	struct dp_panel_private *panel;
-	u32 bpp = mode_edid_bpp;
-
-	if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
-		pr_err("invalid input\n");
-		return 0;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	if (dp_panel->video_test)
-		bpp = dp_link_bit_depth_to_bpp(
-				panel->link->test_video.test_bit_depth);
-	else
-		bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp,
-				mode_pclk_khz);
-
-	return bpp;
-}
-
-static void dp_panel_set_test_mode(struct dp_panel_private *panel,
-		struct dp_display_mode *mode)
-{
-	struct dp_panel_info *pinfo = NULL;
-	struct dp_link_test_video *test_info = NULL;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return;
-	}
-
-	pinfo = &mode->timing;
-	test_info = &panel->link->test_video;
-
-	pinfo->h_active = test_info->test_h_width;
-	pinfo->h_sync_width = test_info->test_hsync_width;
-	pinfo->h_back_porch = test_info->test_h_start -
-		test_info->test_hsync_width;
-	pinfo->h_front_porch = test_info->test_h_total -
-		(test_info->test_h_start + test_info->test_h_width);
-
-	pinfo->v_active = test_info->test_v_height;
-	pinfo->v_sync_width = test_info->test_vsync_width;
-	pinfo->v_back_porch = test_info->test_v_start -
-		test_info->test_vsync_width;
-	pinfo->v_front_porch = test_info->test_v_total -
-		(test_info->test_v_start + test_info->test_v_height);
-
-	pinfo->bpp = dp_link_bit_depth_to_bpp(test_info->test_bit_depth);
-	pinfo->h_active_low = test_info->test_hsync_pol;
-	pinfo->v_active_low = test_info->test_vsync_pol;
-
-	pinfo->refresh_rate = test_info->test_rr_n;
-	pinfo->pixel_clk_khz = test_info->test_h_total *
-		test_info->test_v_total * pinfo->refresh_rate;
-
-	if (test_info->test_rr_d == 0)
-		pinfo->pixel_clk_khz /= 1000;
-	else
-		pinfo->pixel_clk_khz /= 1001;
-
-	if (test_info->test_h_width == 640)
-		pinfo->pixel_clk_khz = 25170;
-}
-
-static int dp_panel_get_modes(struct dp_panel *dp_panel,
-	struct drm_connector *connector, struct dp_display_mode *mode)
-{
-	struct dp_panel_private *panel;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	if (dp_panel->video_test) {
-		dp_panel_set_test_mode(panel, mode);
-		return 1;
-	} else if (dp_panel->edid_ctrl->edid) {
-		return _sde_edid_update_modes(connector, dp_panel->edid_ctrl);
-	}
-
-	/* fail-safe mode */
-	memcpy(&mode->timing, &fail_safe,
-		sizeof(fail_safe));
-	return 1;
-}
-
-static void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
-{
-	struct dp_panel_private *panel;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
-		u8 checksum = sde_get_edid_checksum(dp_panel->edid_ctrl);
-
-		panel->link->send_edid_checksum(panel->link, checksum);
-		panel->link->send_test_response(panel->link);
-	}
-}
-
-static void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
-{
-	u32 hsync_start_x, hsync_end_x;
-	struct dp_catalog_panel *catalog;
-	struct dp_panel_private *panel;
-	struct dp_panel_info *pinfo;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (dp_panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream id:%d\n", dp_panel->stream_id);
-		return;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-	catalog = panel->catalog;
-	pinfo = &panel->dp_panel.pinfo;
-
-	if (!panel->panel_on) {
-		pr_debug("DP panel not enabled, handle TPG on next panel on\n");
-		return;
-	}
-
-	if (!enable) {
-		panel->catalog->tpg_config(catalog, false);
-		return;
-	}
-
-	/* TPG config */
-	catalog->hsync_period = pinfo->h_sync_width + pinfo->h_back_porch +
-			pinfo->h_active + pinfo->h_front_porch;
-	catalog->vsync_period = pinfo->v_sync_width + pinfo->v_back_porch +
-			pinfo->v_active + pinfo->v_front_porch;
-
-	catalog->display_v_start = ((pinfo->v_sync_width +
-			pinfo->v_back_porch) * catalog->hsync_period);
-	catalog->display_v_end = ((catalog->vsync_period -
-			pinfo->v_front_porch) * catalog->hsync_period) - 1;
-
-	catalog->display_v_start += pinfo->h_sync_width + pinfo->h_back_porch;
-	catalog->display_v_end -= pinfo->h_front_porch;
-
-	hsync_start_x = pinfo->h_back_porch + pinfo->h_sync_width;
-	hsync_end_x = catalog->hsync_period - pinfo->h_front_porch - 1;
-
-	catalog->v_sync_width = pinfo->v_sync_width;
-
-	catalog->hsync_ctl = (catalog->hsync_period << 16) |
-			pinfo->h_sync_width;
-	catalog->display_hctl = (hsync_end_x << 16) | hsync_start_x;
-
-	panel->catalog->tpg_config(catalog, true);
-}
-
-static int dp_panel_config_timing(struct dp_panel *dp_panel)
-{
-	int rc = 0;
-	u32 data, total_ver, total_hor;
-	struct dp_catalog_panel *catalog;
-	struct dp_panel_private *panel;
-	struct dp_panel_info *pinfo;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-	catalog = panel->catalog;
-	pinfo = &panel->dp_panel.pinfo;
-
-	pr_debug("width=%d hporch= %d %d %d\n",
-		pinfo->h_active, pinfo->h_back_porch,
-		pinfo->h_front_porch, pinfo->h_sync_width);
-
-	pr_debug("height=%d vporch= %d %d %d\n",
-		pinfo->v_active, pinfo->v_back_porch,
-		pinfo->v_front_porch, pinfo->v_sync_width);
-
-	total_hor = pinfo->h_active + pinfo->h_back_porch +
-		pinfo->h_front_porch + pinfo->h_sync_width;
-
-	total_ver = pinfo->v_active + pinfo->v_back_porch +
-			pinfo->v_front_porch + pinfo->v_sync_width;
-
-	data = total_ver;
-	data <<= 16;
-	data |= total_hor;
-
-	catalog->total = data;
-
-	data = (pinfo->v_back_porch + pinfo->v_sync_width);
-	data <<= 16;
-	data |= (pinfo->h_back_porch + pinfo->h_sync_width);
-
-	catalog->sync_start = data;
-
-	data = pinfo->v_sync_width;
-	data <<= 16;
-	data |= (pinfo->v_active_low << 31);
-	data |= pinfo->h_sync_width;
-	data |= (pinfo->h_active_low << 15);
-
-	catalog->width_blanking = data;
-
-	data = pinfo->v_active;
-	data <<= 16;
-	data |= pinfo->h_active;
-
-	catalog->dp_active = data;
-
-	catalog->widebus_en = pinfo->widebus_en;
-
-	panel->catalog->timing_cfg(catalog);
-	panel->panel_on = true;
-end:
-	return rc;
-}
-
-static u32 _dp_panel_calc_be_in_lane(struct dp_panel *dp_panel)
-{
-	struct dp_panel_info *pinfo;
-	struct msm_compression_info *comp_info;
-	u32 dsc_htot_byte_cnt, mod_result;
-	u32 numerator, denominator;
-	s64 temp_fp;
-	u32 be_in_lane = 10;
-
-	pinfo = &dp_panel->pinfo;
-	comp_info = &pinfo->comp_info;
-
-	if (!dp_panel->mst_state)
-		return be_in_lane;
-
-	switch (pinfo->comp_info.comp_ratio) {
-	case MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1:
-		denominator = 16; /* 2 * bits-in-byte */
-		break;
-	case MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1:
-		denominator = 24; /* 3 * bits-in-byte */
-		break;
-	default:
-		denominator = 8; /* 1 * bits-in-byte */
-	}
-
-	numerator = (pinfo->h_active + pinfo->h_back_porch +
-				pinfo->h_front_porch + pinfo->h_sync_width) *
-				pinfo->bpp;
-	temp_fp = drm_fixp_from_fraction(numerator, denominator);
-	dsc_htot_byte_cnt = drm_fixp2int_ceil(temp_fp);
-
-	mod_result = dsc_htot_byte_cnt % 12;
-	if (mod_result == 0)
-		be_in_lane = 8;
-	else if (mod_result <= 3)
-		be_in_lane = 1;
-	else if (mod_result <= 6)
-		be_in_lane = 2;
-	else if (mod_result <= 9)
-		be_in_lane = 4;
-	else if (mod_result <= 11)
-		be_in_lane = 8;
-	else
-		be_in_lane = 10;
-
-	return be_in_lane;
-}
-
-static void dp_panel_config_dsc(struct dp_panel *dp_panel, bool enable)
-{
-	struct dp_catalog_panel *catalog;
-	struct dp_panel_private *panel;
-	struct dp_panel_info *pinfo;
-	struct msm_compression_info *comp_info;
-	struct dp_dsc_cfg_data *dsc;
-	int pps_len;
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	catalog = panel->catalog;
-	dsc = &catalog->dsc;
-	pinfo = &dp_panel->pinfo;
-	comp_info = &pinfo->comp_info;
-
-	if (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC && enable) {
-		pps_len = dp_panel_dsc_create_pps_buf_cmd(&comp_info->dsc_info,
-				dsc->pps, 0);
-		dsc->pps_len = pps_len;
-		dp_panel_dsc_prepare_pps_packet(dp_panel);
-
-		dsc->slice_per_pkt = comp_info->dsc_info.slice_per_pkt - 1;
-		dsc->bytes_per_pkt = comp_info->dsc_info.bytes_per_pkt;
-		dsc->bytes_per_pkt /= comp_info->dsc_info.slice_per_pkt;
-		dsc->eol_byte_num = comp_info->dsc_info.eol_byte_num;
-		dsc->dto_count = comp_info->dsc_info.pclk_per_line;
-		dsc->be_in_lane = _dp_panel_calc_be_in_lane(dp_panel);
-		dsc->dsc_en = true;
-		dsc->dto_en = true;
-
-		_dp_panel_get_dto_m_n(comp_info->comp_ratio, pinfo->bpp,
-				&dsc->dto_n, &dsc->dto_d);
-	} else {
-		dsc->dsc_en = false;
-		dsc->dto_en = false;
-		dsc->dto_n = 0;
-		dsc->dto_d = 0;
-	}
-
-	catalog->stream_id = dp_panel->stream_id;
-	catalog->dsc_cfg(catalog);
-
-	if (catalog->dsc.dsc_en && enable)
-		catalog->pps_flush(catalog);
-}
-
-static int dp_panel_edid_register(struct dp_panel_private *panel)
-{
-	int rc = 0;
-
-	panel->dp_panel.edid_ctrl = sde_edid_init();
-	if (!panel->dp_panel.edid_ctrl) {
-		pr_err("sde edid init for DP failed\n");
-		rc = -ENOMEM;
-	}
-
-	return rc;
-}
-
-static void dp_panel_edid_deregister(struct dp_panel_private *panel)
-{
-	sde_edid_deinit((void **)&panel->dp_panel.edid_ctrl);
-}
-
-static int dp_panel_set_stream_info(struct dp_panel *dp_panel,
-		enum dp_stream_id stream_id, u32 ch_start_slot,
-			u32 ch_tot_slots, u32 pbn, int vcpi)
-{
-	if (!dp_panel || stream_id > DP_STREAM_MAX) {
-		pr_err("invalid input. stream_id: %d\n", stream_id);
-		return -EINVAL;
-	}
-
-	dp_panel->vcpi = vcpi;
-	dp_panel->stream_id = stream_id;
-	dp_panel->channel_start_slot = ch_start_slot;
-	dp_panel->channel_total_slots = ch_tot_slots;
-	dp_panel->pbn = pbn;
-
-	return 0;
-}
-
-static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
-{
-	int rc = 0;
-	struct dp_panel_private *panel;
-	struct dp_panel_info *pinfo;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-	pinfo = &dp_panel->pinfo;
-
-	drm_dp_dpcd_writeb(panel->aux->drm_aux, DP_SET_POWER, DP_SET_POWER_D0);
-
-	/*
-	* According to the DP 1.1 specification, a "Sink Device must exit the
-	* power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
-	* Control Field" (register 0x600).
-	*/
-	usleep_range(1000, 2000);
-
-	drm_dp_link_probe(panel->aux->drm_aux, &dp_panel->link_info);
-end:
-	return rc;
-}
-
-static int dp_panel_deinit_panel_info(struct dp_panel *dp_panel, u32 flags)
-{
-	int rc = 0;
-	struct dp_panel_private *panel;
-	struct dp_catalog_hdr_data *hdr;
-	struct drm_connector *connector;
-	struct sde_connector_state *c_state;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	if (flags & DP_PANEL_SRC_INITIATED_POWER_DOWN) {
-		pr_debug("retain states in src initiated power down request\n");
-		return 0;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-	hdr = &panel->catalog->hdr_data;
-
-	if (!panel->custom_edid && dp_panel->edid_ctrl->edid)
-		sde_free_edid((void **)&dp_panel->edid_ctrl);
-
-	dp_panel_set_stream_info(dp_panel, DP_STREAM_MAX, 0, 0, 0, 0);
-	memset(&dp_panel->pinfo, 0, sizeof(dp_panel->pinfo));
-	memset(&hdr->hdr_meta, 0, sizeof(hdr->hdr_meta));
-	panel->panel_on = false;
-
-	connector = dp_panel->connector;
-	c_state = to_sde_connector_state(connector->state);
-
-	connector->hdr_eotf = 0;
-	connector->hdr_metadata_type_one = 0;
-	connector->hdr_max_luminance = 0;
-	connector->hdr_avg_luminance = 0;
-	connector->hdr_min_luminance = 0;
-	connector->hdr_supported = false;
-	connector->hdr_plus_app_ver = 0;
-
-	memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta));
-	memset(&c_state->dyn_hdr_meta, 0, sizeof(c_state->dyn_hdr_meta));
-
-	return rc;
-}
-
-static u32 dp_panel_get_min_req_link_rate(struct dp_panel *dp_panel)
-{
-	const u32 encoding_factx10 = 8;
-	u32 min_link_rate_khz = 0, lane_cnt;
-	struct dp_panel_info *pinfo;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		goto end;
-	}
-
-	lane_cnt = dp_panel->link_info.num_lanes;
-	pinfo = &dp_panel->pinfo;
-
-	/* num_lanes * lane_count * 8 >= pclk * bpp * 10 */
-	min_link_rate_khz = pinfo->pixel_clk_khz /
-				(lane_cnt * encoding_factx10);
-	min_link_rate_khz *= pinfo->bpp;
-
-	pr_debug("min lclk req=%d khz for pclk=%d khz, lanes=%d, bpp=%d\n",
-		min_link_rate_khz, pinfo->pixel_clk_khz, lane_cnt,
-		pinfo->bpp);
-end:
-	return min_link_rate_khz;
-}
-
-static bool dp_panel_hdr_supported(struct dp_panel *dp_panel)
-{
-	struct dp_panel_private *panel;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		return false;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	return panel->major >= 1 && panel->vsc_supported &&
-		(panel->minor >= 4 || panel->vscext_supported);
-}
-
-static u32 dp_panel_calc_dhdr_pkt_limit(struct dp_panel *dp_panel,
-		struct dp_dhdr_maxpkt_calc_input *input)
-{
-	s64 mdpclk_fp = drm_fixp_from_fraction(input->mdp_clk, 1000000);
-	s64 lclk_fp = drm_fixp_from_fraction(input->lclk, 1000);
-	s64 pclk_fp = drm_fixp_from_fraction(input->pclk, 1000);
-	s64 nlanes_fp = drm_int2fixp(input->nlanes);
-	s64 target_sc = input->mst_target_sc;
-	s64 hactive_fp = drm_int2fixp(input->h_active);
-	const s64 i1_fp = DRM_FIXED_ONE;
-	const s64 i2_fp = drm_int2fixp(2);
-	const s64 i10_fp = drm_int2fixp(10);
-	const s64 i56_fp = drm_int2fixp(56);
-	const s64 i64_fp = drm_int2fixp(64);
-	s64 mst_bw_fp = i1_fp;
-	s64 fec_factor_fp = i1_fp;
-	s64 mst_bw64_fp, mst_bw64_ceil_fp, nlanes56_fp;
-	u32 f1, f2, f3, f4, f5, deploy_period, target_period;
-	s64 f3_f5_slot_fp;
-	u32 calc_pkt_limit;
-	const u32 max_pkt_limit = 64;
-
-	if (input->fec_en && input->mst_en)
-		fec_factor_fp = drm_fixp_from_fraction(64000, 65537);
-
-	if (input->mst_en)
-		mst_bw_fp = drm_fixp_div(target_sc, i64_fp);
-
-	f1 = drm_fixp2int_ceil(drm_fixp_div(drm_fixp_mul(i10_fp, lclk_fp),
-			mdpclk_fp));
-	f2 = drm_fixp2int_ceil(drm_fixp_div(drm_fixp_mul(i2_fp, lclk_fp),
-			mdpclk_fp)) + drm_fixp2int_ceil(drm_fixp_div(
-			drm_fixp_mul(i1_fp, lclk_fp), mdpclk_fp));
-
-	mst_bw64_fp = drm_fixp_mul(mst_bw_fp, i64_fp);
-	if (drm_fixp2int(mst_bw64_fp) == 0)
-		f3_f5_slot_fp = drm_fixp_div(i1_fp, drm_int2fixp(
-				drm_fixp2int_ceil(drm_fixp_div(
-				i1_fp, mst_bw64_fp))));
-	else
-		f3_f5_slot_fp = drm_int2fixp(drm_fixp2int(mst_bw_fp));
-
-	mst_bw64_ceil_fp = drm_int2fixp(drm_fixp2int_ceil(mst_bw64_fp));
-	f3 = drm_fixp2int(drm_fixp_mul(drm_int2fixp(drm_fixp2int(
-				drm_fixp_div(i2_fp, f3_f5_slot_fp)) + 1),
-				(i64_fp - mst_bw64_ceil_fp))) + 2;
-
-	if (!input->mst_en) {
-		f4 = 1 + drm_fixp2int(drm_fixp_div(drm_int2fixp(50),
-				nlanes_fp)) + drm_fixp2int(drm_fixp_div(
-				nlanes_fp, i2_fp));
-		f5 = 0;
-	} else {
-		f4 = 0;
-		nlanes56_fp = drm_fixp_div(i56_fp, nlanes_fp);
-		f5 = drm_fixp2int(drm_fixp_mul(drm_int2fixp(drm_fixp2int(
-				drm_fixp_div(i1_fp + nlanes56_fp,
-				f3_f5_slot_fp)) + 1), (i64_fp -
-				mst_bw64_ceil_fp + i1_fp + nlanes56_fp)));
-	}
-
-	deploy_period = f1 + f2 + f3 + f4 + f5 + 19;
-	target_period = drm_fixp2int(drm_fixp_mul(fec_factor_fp, drm_fixp_mul(
-			hactive_fp, drm_fixp_div(lclk_fp, pclk_fp))));
-
-	calc_pkt_limit = target_period / deploy_period;
-
-	pr_debug("input: %d, %d, %d, %d, %d, 0x%llx, %d, %d\n",
-		input->mdp_clk, input->lclk, input->pclk, input->h_active,
-		input->nlanes, input->mst_target_sc, input->mst_en ? 1 : 0,
-		input->fec_en ? 1 : 0);
-	pr_debug("factors: %d, %d, %d, %d, %d\n", f1, f2, f3, f4, f5);
-	pr_debug("d_p: %d, t_p: %d, maxPkts: %d%s\n", deploy_period,
-		target_period, calc_pkt_limit, calc_pkt_limit > max_pkt_limit ?
-		" CAPPED" : "");
-
-	if (calc_pkt_limit > max_pkt_limit)
-		calc_pkt_limit = max_pkt_limit;
-
-	pr_debug("packet limit per line = %d\n", calc_pkt_limit);
-	return calc_pkt_limit;
-}
-
-static int dp_panel_setup_hdr(struct dp_panel *dp_panel,
-		struct drm_msm_ext_hdr_metadata *hdr_meta,
-		bool dhdr_update, u64 core_clk_rate)
-{
-	int rc = 0, max_pkts = 0;
-	struct dp_panel_private *panel;
-	struct dp_catalog_hdr_data *hdr;
-	struct dp_dhdr_maxpkt_calc_input input;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-	hdr = &panel->catalog->hdr_data;
-
-	/* use cached meta data in case meta data not provided */
-	if (!hdr_meta) {
-		if (hdr->hdr_meta.hdr_state)
-			goto cached;
-		else
-			goto end;
-	}
-
-	panel->hdr_state = hdr_meta->hdr_state;
-
-	hdr->vsc_header_byte0 = 0x00;
-	hdr->vsc_header_byte1 = 0x07;
-	hdr->vsc_header_byte2 = 0x05;
-	hdr->vsc_header_byte3 = 0x13;
-
-	hdr->shdr_header_byte0 = 0x00;
-	hdr->shdr_header_byte1 = 0x87;
-	hdr->shdr_header_byte2 = 0x1D;
-	hdr->shdr_header_byte3 = 0x13 << 2;
-
-	/* VSC SDP Payload for DB16 */
-	hdr->pixel_encoding = RGB;
-	hdr->colorimetry = ITU_R_BT_2020_RGB;
-
-	/* VSC SDP Payload for DB17 */
-	hdr->dynamic_range = CEA;
-
-	/* VSC SDP Payload for DB18 */
-	hdr->content_type = GRAPHICS;
-
-	hdr->bpc = dp_panel->pinfo.bpp / 3;
-
-	hdr->version = 0x01;
-	hdr->length = 0x1A;
-
-	if (panel->hdr_state)
-		memcpy(&hdr->hdr_meta, hdr_meta, sizeof(hdr->hdr_meta));
-	else
-		memset(&hdr->hdr_meta, 0, sizeof(hdr->hdr_meta));
-cached:
-	if (dhdr_update) {
-		hdr->vscext_header_byte0 = 0x00;
-		hdr->vscext_header_byte1 = 0x81;
-		hdr->vscext_header_byte2 = 0x1D;
-		hdr->vscext_header_byte3 = 0x13 << 2;
-
-		input.mdp_clk = core_clk_rate;
-		input.lclk = dp_panel->link_info.rate;
-		input.nlanes = dp_panel->link_info.num_lanes;
-		input.pclk = dp_panel->pinfo.pixel_clk_khz;
-		input.h_active = dp_panel->pinfo.h_active;
-		input.mst_target_sc = dp_panel->mst_target_sc;
-		input.mst_en = dp_panel->mst_state;
-		input.fec_en = dp_panel->fec_en;
-		max_pkts = dp_panel_calc_dhdr_pkt_limit(dp_panel, &input);
-	}
-
-	if (panel->panel_on) {
-		panel->catalog->stream_id = dp_panel->stream_id;
-		panel->catalog->config_hdr(panel->catalog, panel->hdr_state,
-				max_pkts);
-		if (dhdr_update)
-			panel->catalog->dhdr_flush(panel->catalog);
-	}
-end:
-	return rc;
-}
-
-static int dp_panel_spd_config(struct dp_panel *dp_panel)
-{
-	int rc = 0;
-	struct dp_panel_private *panel;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	if (dp_panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream id:%d\n", dp_panel->stream_id);
-		return -EINVAL;
-	}
-
-	if (!dp_panel->spd_enabled) {
-		pr_debug("SPD Infoframe not enabled\n");
-		goto end;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	panel->catalog->spd_vendor_name = panel->spd_vendor_name;
-	panel->catalog->spd_product_description =
-		panel->spd_product_description;
-
-	panel->catalog->stream_id = dp_panel->stream_id;
-	panel->catalog->config_spd(panel->catalog);
-end:
-	return rc;
-}
-
-static void dp_panel_config_ctrl(struct dp_panel *dp_panel)
-{
-	u32 config = 0, tbd;
-	u8 *dpcd = dp_panel->dpcd;
-	struct dp_panel_private *panel;
-	struct dp_catalog_panel *catalog;
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-	catalog = panel->catalog;
-
-	config |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK  */
-	config |= (0 << 11); /* RGB */
-
-	tbd = panel->link->get_test_bits_depth(panel->link,
-			dp_panel->pinfo.bpp);
-
-	if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN)
-		tbd = DP_TEST_BIT_DEPTH_8;
-
-	config |= tbd << 8;
-
-	/* Num of Lanes */
-	config |= ((panel->link->link_params.lane_count - 1) << 4);
-
-	if (drm_dp_enhanced_frame_cap(dpcd))
-		config |= 0x40;
-
-	config |= 0x04; /* progressive video */
-
-	config |= 0x03;	/* sycn clock & static Mvid */
-
-	catalog->config_ctrl(catalog, config);
-}
-
-static void dp_panel_config_misc(struct dp_panel *dp_panel)
-{
-	struct dp_panel_private *panel;
-	struct dp_catalog_panel *catalog;
-	u32 misc_val;
-	u32 tb, cc;
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-	catalog = panel->catalog;
-
-	tb = panel->link->get_test_bits_depth(panel->link, dp_panel->pinfo.bpp);
-	cc = panel->link->get_colorimetry_config(panel->link);
-
-	misc_val = cc;
-	misc_val |= (tb << 5);
-	misc_val |= BIT(0); /* Configure clock to synchronous mode */
-
-	catalog->misc_val = misc_val;
-	catalog->config_misc(catalog);
-}
-
-static void dp_panel_config_msa(struct dp_panel *dp_panel)
-{
-	struct dp_panel_private *panel;
-	struct dp_catalog_panel *catalog;
-	u32 rate;
-	u32 stream_rate_khz;
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-	catalog = panel->catalog;
-
-	catalog->widebus_en = dp_panel->widebus_en;
-
-	rate = drm_dp_bw_code_to_link_rate(panel->link->link_params.bw_code);
-	stream_rate_khz = dp_panel->pinfo.pixel_clk_khz;
-
-	catalog->config_msa(catalog, rate, stream_rate_khz);
-}
-
-static void dp_panel_resolution_info(struct dp_panel_private *panel)
-{
-	struct dp_panel_info *pinfo = &panel->dp_panel.pinfo;
-
-	/*
-	 * print resolution info as this is a result
-	 * of user initiated action of cable connection
-	 */
-	pr_info("DP RESOLUTION: active(back|front|width|low)\n");
-	pr_info("%d(%d|%d|%d|%d)x%d(%d|%d|%d|%d)@%dfps %dbpp %dKhz %dLR %dLn\n",
-		pinfo->h_active, pinfo->h_back_porch, pinfo->h_front_porch,
-		pinfo->h_sync_width, pinfo->h_active_low,
-		pinfo->v_active, pinfo->v_back_porch, pinfo->v_front_porch,
-		pinfo->v_sync_width, pinfo->v_active_low,
-		pinfo->refresh_rate, pinfo->bpp, pinfo->pixel_clk_khz,
-		panel->link->link_params.bw_code,
-		panel->link->link_params.lane_count);
-}
-
-static int dp_panel_hw_cfg(struct dp_panel *dp_panel, bool enable)
-{
-	struct dp_panel_private *panel;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	if (dp_panel->stream_id >= DP_STREAM_MAX) {
-		pr_err("invalid stream_id: %d\n", dp_panel->stream_id);
-		return -EINVAL;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-	panel->catalog->stream_id = dp_panel->stream_id;
-
-	if (enable) {
-		dp_panel_config_ctrl(dp_panel);
-		dp_panel_config_misc(dp_panel);
-		dp_panel_config_msa(dp_panel);
-		dp_panel_config_dsc(dp_panel, enable);
-		dp_panel_config_tr_unit(dp_panel);
-		dp_panel_config_timing(dp_panel);
-		dp_panel_resolution_info(panel);
-	}
-
-	panel->catalog->config_dto(panel->catalog, !enable);
-
-	return 0;
-}
-
-static int dp_panel_read_sink_sts(struct dp_panel *dp_panel, u8 *sts, u32 size)
-{
-	int rlen, rc = 0;
-	struct dp_panel_private *panel;
-
-	if (!dp_panel || !sts || !size) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		return rc;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_SINK_COUNT_ESI,
-		sts, size);
-	if (rlen != size) {
-		pr_err("dpcd sink sts fail rlen:%d size:%d\n", rlen, size);
-		rc = -EINVAL;
-		return rc;
-	}
-
-	return 0;
-}
-
-static int dp_panel_update_edid(struct dp_panel *dp_panel, struct edid *edid)
-{
-	int rc;
-
-	dp_panel->edid_ctrl->edid = edid;
-	sde_parse_edid(dp_panel->edid_ctrl);
-
-	rc = _sde_edid_update_modes(dp_panel->connector, dp_panel->edid_ctrl);
-	dp_panel->audio_supported = drm_detect_monitor_audio(edid);
-
-	return rc;
-}
-
-static bool dp_panel_read_mst_cap(struct dp_panel *dp_panel)
-{
-	int rlen;
-	struct dp_panel_private *panel;
-	u8 dpcd;
-	bool mst_cap = false;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		goto end;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_MSTM_CAP,
-		&dpcd, 1);
-	if (rlen < 1) {
-		pr_err("dpcd mstm_cap read failed, rlen=%d\n", rlen);
-		goto end;
-	}
-
-	mst_cap = (dpcd & DP_MST_CAP) ? true : false;
-
-end:
-	pr_debug("dp mst-cap: %d\n", mst_cap);
-
-	return mst_cap;
-}
-
-static void dp_panel_convert_to_dp_mode(struct dp_panel *dp_panel,
-		const struct drm_display_mode *drm_mode,
-		struct dp_display_mode *dp_mode)
-{
-	const u32 num_components = 3, default_bpp = 24;
-	struct msm_compression_info *comp_info;
-	bool dsc_cap = (dp_mode->capabilities & DP_PANEL_CAPS_DSC) ?
-				true : false;
-
-	dp_mode->timing.h_active = drm_mode->hdisplay;
-	dp_mode->timing.h_back_porch = drm_mode->htotal - drm_mode->hsync_end;
-	dp_mode->timing.h_sync_width = drm_mode->htotal -
-			(drm_mode->hsync_start + dp_mode->timing.h_back_porch);
-	dp_mode->timing.h_front_porch = drm_mode->hsync_start -
-					 drm_mode->hdisplay;
-	dp_mode->timing.h_skew = drm_mode->hskew;
-
-	dp_mode->timing.v_active = drm_mode->vdisplay;
-	dp_mode->timing.v_back_porch = drm_mode->vtotal - drm_mode->vsync_end;
-	dp_mode->timing.v_sync_width = drm_mode->vtotal -
-		(drm_mode->vsync_start + dp_mode->timing.v_back_porch);
-
-	dp_mode->timing.v_front_porch = drm_mode->vsync_start -
-					 drm_mode->vdisplay;
-
-	dp_mode->timing.refresh_rate = drm_mode->vrefresh;
-
-	dp_mode->timing.pixel_clk_khz = drm_mode->clock;
-
-	dp_mode->timing.v_active_low =
-		!!(drm_mode->flags & DRM_MODE_FLAG_NVSYNC);
-
-	dp_mode->timing.h_active_low =
-		!!(drm_mode->flags & DRM_MODE_FLAG_NHSYNC);
-
-	dp_mode->timing.bpp =
-		dp_panel->connector->display_info.bpc * num_components;
-	if (!dp_mode->timing.bpp)
-		dp_mode->timing.bpp = default_bpp;
-
-	dp_mode->timing.bpp = dp_panel_get_mode_bpp(dp_panel,
-			dp_mode->timing.bpp, dp_mode->timing.pixel_clk_khz);
-
-	dp_mode->timing.widebus_en = dp_panel->widebus_en;
-	dp_mode->timing.dsc_overhead_fp = 0;
-
-	if (dp_panel->dsc_en && dsc_cap) {
-		comp_info = &dp_mode->timing.comp_info;
-
-		if (dp_panel_dsc_prepare_basic_params(comp_info,
-					dp_mode, dp_panel)) {
-			pr_debug("prepare DSC basic params failed\n");
-			return;
-		}
-
-		dp_panel_dsc_populate_static_params(&comp_info->dsc_info);
-		dp_panel_dsc_pclk_param_calc(dp_panel,
-				&comp_info->dsc_info,
-				comp_info->comp_ratio,
-				dp_mode);
-	}
-	dp_mode->fec_overhead_fp = dp_panel->fec_overhead_fp;
-}
-
-static void dp_panel_update_pps(struct dp_panel *dp_panel, char *pps_cmd)
-{
-	struct dp_catalog_panel *catalog;
-	struct dp_panel_private *panel;
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	catalog = panel->catalog;
-	catalog->stream_id = dp_panel->stream_id;
-	catalog->pps_flush(catalog);
-}
-
-struct dp_panel *dp_panel_get(struct dp_panel_in *in)
-{
-	int rc = 0;
-	struct dp_panel_private *panel;
-	struct dp_panel *dp_panel;
-	struct sde_connector *sde_conn;
-
-	if (!in->dev || !in->catalog || !in->aux ||
-			!in->link || !in->connector) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL);
-	if (!panel) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	panel->dev = in->dev;
-	panel->aux = in->aux;
-	panel->catalog = in->catalog;
-	panel->link = in->link;
-	panel->parser = in->parser;
-
-	dp_panel = &panel->dp_panel;
-	dp_panel->max_bw_code = DP_LINK_BW_8_1;
-	dp_panel->spd_enabled = true;
-	memcpy(panel->spd_vendor_name, vendor_name, (sizeof(u8) * 8));
-	memcpy(panel->spd_product_description, product_desc, (sizeof(u8) * 16));
-	dp_panel->connector = in->connector;
-
-	dp_panel->dsc_feature_enable = panel->parser->dsc_feature_enable;
-	dp_panel->fec_feature_enable = panel->parser->fec_feature_enable;
-
-	if (in->base_panel) {
-		memcpy(dp_panel->dpcd, in->base_panel->dpcd,
-				DP_RECEIVER_CAP_SIZE + 1);
-		memcpy(dp_panel->dsc_dpcd, in->base_panel->dsc_dpcd,
-				DP_RECEIVER_DSC_CAP_SIZE + 1);
-		memcpy(&dp_panel->link_info, &in->base_panel->link_info,
-				sizeof(dp_panel->link_info));
-		dp_panel->mst_state = in->base_panel->mst_state;
-		dp_panel->widebus_en = in->base_panel->widebus_en;
-		dp_panel->fec_en = in->base_panel->fec_en;
-		dp_panel->dsc_en = in->base_panel->dsc_en;
-		dp_panel->fec_overhead_fp = in->base_panel->fec_overhead_fp;
-	}
-
-	dp_panel->init = dp_panel_init_panel_info;
-	dp_panel->deinit = dp_panel_deinit_panel_info;
-	dp_panel->hw_cfg = dp_panel_hw_cfg;
-	dp_panel->read_sink_caps = dp_panel_read_sink_caps;
-	dp_panel->get_min_req_link_rate = dp_panel_get_min_req_link_rate;
-	dp_panel->get_mode_bpp = dp_panel_get_mode_bpp;
-	dp_panel->get_modes = dp_panel_get_modes;
-	dp_panel->handle_sink_request = dp_panel_handle_sink_request;
-	dp_panel->set_edid = dp_panel_set_edid;
-	dp_panel->set_dpcd = dp_panel_set_dpcd;
-	dp_panel->tpg_config = dp_panel_tpg_config;
-	dp_panel->spd_config = dp_panel_spd_config;
-	dp_panel->setup_hdr = dp_panel_setup_hdr;
-	dp_panel->hdr_supported = dp_panel_hdr_supported;
-	dp_panel->set_stream_info = dp_panel_set_stream_info;
-	dp_panel->read_sink_status = dp_panel_read_sink_sts;
-	dp_panel->update_edid = dp_panel_update_edid;
-	dp_panel->read_mst_cap = dp_panel_read_mst_cap;
-	dp_panel->convert_to_dp_mode = dp_panel_convert_to_dp_mode;
-	dp_panel->update_pps = dp_panel_update_pps;
-
-	sde_conn = to_sde_connector(dp_panel->connector);
-	sde_conn->drv_panel = dp_panel;
-
-	dp_panel_edid_register(panel);
-
-	return dp_panel;
-error:
-	return ERR_PTR(rc);
-}
-
-void dp_panel_put(struct dp_panel *dp_panel)
-{
-	struct dp_panel_private *panel;
-
-	if (!dp_panel)
-		return;
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	dp_panel_edid_deregister(panel);
-	devm_kfree(panel->dev, panel);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
deleted file mode 100644
index dc96090..0000000
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_PANEL_H_
-#define _DP_PANEL_H_
-
-#include <drm/msm_drm.h>
-
-#include "dp_aux.h"
-#include "dp_link.h"
-#include "dp_usbpd.h"
-#include "sde_edid_parser.h"
-#include "sde_connector.h"
-#include "msm_drv.h"
-
-#define DP_RECEIVER_DSC_CAP_SIZE    15
-#define DP_RECEIVER_FEC_STATUS_SIZE 3
-
-/*
- * A source initiated power down flag is set
- * when the DP is powered off while physical
- * DP cable is still connected i.e. without
- * HPD or not initiated by sink like HPD_IRQ.
- * This can happen if framework reboots or
- * device suspends.
- */
-#define DP_PANEL_SRC_INITIATED_POWER_DOWN BIT(0)
-
-enum dp_lane_count {
-	DP_LANE_COUNT_1	= 1,
-	DP_LANE_COUNT_2	= 2,
-	DP_LANE_COUNT_4	= 4,
-};
-
-#define DP_MAX_DOWNSTREAM_PORTS 0x10
-
-struct dp_panel_info {
-	u32 h_active;
-	u32 v_active;
-	u32 h_back_porch;
-	u32 h_front_porch;
-	u32 h_sync_width;
-	u32 h_active_low;
-	u32 v_back_porch;
-	u32 v_front_porch;
-	u32 v_sync_width;
-	u32 v_active_low;
-	u32 h_skew;
-	u32 refresh_rate;
-	u32 pixel_clk_khz;
-	u32 bpp;
-	bool widebus_en;
-	struct msm_compression_info comp_info;
-	s64 dsc_overhead_fp;
-};
-
-struct dp_display_mode {
-	struct dp_panel_info timing;
-	u32 capabilities;
-	s64 fec_overhead_fp;
-	s64 dsc_overhead_fp;
-};
-
-struct dp_panel;
-
-struct dp_panel_in {
-	struct device *dev;
-	struct dp_aux *aux;
-	struct dp_link *link;
-	struct dp_catalog_panel *catalog;
-	struct drm_connector *connector;
-	struct dp_panel *base_panel;
-	struct dp_parser *parser;
-};
-
-struct dp_dsc_caps {
-	bool dsc_capable;
-	u8 version;
-	bool block_pred_en;
-};
-
-struct dp_audio;
-
-#define DP_PANEL_CAPS_DSC	BIT(0)
-
-struct dp_panel {
-	/* dpcd raw data */
-	u8 dpcd[DP_RECEIVER_CAP_SIZE + 1];
-	u8 ds_ports[DP_MAX_DOWNSTREAM_PORTS];
-	u8 dsc_dpcd[DP_RECEIVER_DSC_CAP_SIZE + 1];
-	u8 fec_dpcd;
-	u8 fec_sts_dpcd[DP_RECEIVER_FEC_STATUS_SIZE + 1];
-
-	struct drm_dp_link link_info;
-	struct sde_edid_ctrl *edid_ctrl;
-	struct dp_panel_info pinfo;
-	bool video_test;
-	bool spd_enabled;
-
-	u32 vic;
-	u32 max_pclk_khz;
-	s64 mst_target_sc;
-
-	/* debug */
-	u32 max_bw_code;
-
-	/* By default, stream_id is assigned to DP_INVALID_STREAM.
-	 * Client sets the stream id value using set_stream_id interface.
-	 */
-	enum dp_stream_id stream_id;
-	int vcpi;
-
-	u32 channel_start_slot;
-	u32 channel_total_slots;
-	u32 pbn;
-
-	u32 tot_dsc_blks_in_use;
-	/* DRM connector assosiated with this panel */
-	struct drm_connector *connector;
-
-	struct dp_audio *audio;
-	bool audio_supported;
-
-	struct dp_dsc_caps sink_dsc_caps;
-	bool dsc_feature_enable;
-	bool fec_feature_enable;
-	bool dsc_en;
-	bool fec_en;
-	bool widebus_en;
-	bool mst_state;
-
-	s64 fec_overhead_fp;
-
-	int (*init)(struct dp_panel *dp_panel);
-	int (*deinit)(struct dp_panel *dp_panel, u32 flags);
-	int (*hw_cfg)(struct dp_panel *dp_panel, bool enable);
-	int (*read_sink_caps)(struct dp_panel *dp_panel,
-		struct drm_connector *connector, bool multi_func);
-	u32 (*get_min_req_link_rate)(struct dp_panel *dp_panel);
-	u32 (*get_mode_bpp)(struct dp_panel *dp_panel, u32 mode_max_bpp,
-			u32 mode_pclk_khz);
-	int (*get_modes)(struct dp_panel *dp_panel,
-		struct drm_connector *connector, struct dp_display_mode *mode);
-	void (*handle_sink_request)(struct dp_panel *dp_panel);
-	int (*set_edid)(struct dp_panel *dp_panel, u8 *edid);
-	int (*set_dpcd)(struct dp_panel *dp_panel, u8 *dpcd);
-	int (*setup_hdr)(struct dp_panel *dp_panel,
-		struct drm_msm_ext_hdr_metadata *hdr_meta,
-		bool dhdr_update, u64 core_clk_rate);
-	void (*tpg_config)(struct dp_panel *dp_panel, bool enable);
-	int (*spd_config)(struct dp_panel *dp_panel);
-	bool (*hdr_supported)(struct dp_panel *dp_panel);
-
-	int (*set_stream_info)(struct dp_panel *dp_panel,
-			enum dp_stream_id stream_id, u32 ch_start_slot,
-			u32 ch_tot_slots, u32 pbn, int vcpi);
-
-	int (*read_sink_status)(struct dp_panel *dp_panel, u8 *sts, u32 size);
-	int (*update_edid)(struct dp_panel *dp_panel, struct edid *edid);
-	bool (*read_mst_cap)(struct dp_panel *dp_panel);
-	void (*convert_to_dp_mode)(struct dp_panel *dp_panel,
-		const struct drm_display_mode *drm_mode,
-		struct dp_display_mode *dp_mode);
-	void (*update_pps)(struct dp_panel *dp_panel, char *pps_cmd);
-};
-
-struct dp_tu_calc_input {
-	u64 lclk;        /* 162, 270, 540 and 810 */
-	u64 pclk_khz;    /* in KHz */
-	u64 hactive;     /* active h-width */
-	u64 hporch;      /* bp + fp + pulse */
-	int nlanes;      /* no.of.lanes */
-	int bpp;         /* bits */
-	int pixel_enc;   /* 444, 420, 422 */
-	int dsc_en;     /* dsc on/off */
-	int async_en;   /* async mode */
-	int fec_en;     /* fec */
-	int compress_ratio; /* 2:1 = 200, 3:1 = 300, 3.75:1 = 375 */
-	int num_of_dsc_slices; /* number of slices per line */
-};
-
-struct dp_vc_tu_mapping_table {
-	u32 vic;
-	u8 lanes;
-	u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */
-	u8 bpp;
-	u32 valid_boundary_link;
-	u32 delay_start_link;
-	bool boundary_moderation_en;
-	u32 valid_lower_boundary_link;
-	u32 upper_boundary_count;
-	u32 lower_boundary_count;
-	u32 tu_size_minus1;
-};
-
-/**
- * is_link_rate_valid() - validates the link rate
- * @lane_rate: link rate requested by the sink
- *
- * Returns true if the requested link rate is supported.
- */
-static inline bool is_link_rate_valid(u32 bw_code)
-{
-	return ((bw_code == DP_LINK_BW_1_62) ||
-		(bw_code == DP_LINK_BW_2_7) ||
-		(bw_code == DP_LINK_BW_5_4) ||
-		(bw_code == DP_LINK_BW_8_1));
-}
-
-/**
- * dp_link_is_lane_count_valid() - validates the lane count
- * @lane_count: lane count requested by the sink
- *
- * Returns true if the requested lane count is supported.
- */
-static inline bool is_lane_count_valid(u32 lane_count)
-{
-	return (lane_count == DP_LANE_COUNT_1) ||
-		(lane_count == DP_LANE_COUNT_2) ||
-		(lane_count == DP_LANE_COUNT_4);
-}
-
-struct dp_panel *dp_panel_get(struct dp_panel_in *in);
-void dp_panel_put(struct dp_panel *dp_panel);
-void dp_panel_calc_tu_test(struct dp_tu_calc_input *in,
-		struct dp_vc_tu_mapping_table *tu_table);
-#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
deleted file mode 100644
index bc4369d..0000000
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ /dev/null
@@ -1,933 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include <linux/of_gpio.h>
-#include <linux/of_platform.h>
-
-#include "dp_parser.h"
-
-static void dp_parser_unmap_io_resources(struct dp_parser *parser)
-{
-	int i = 0;
-	struct dp_io *io = &parser->io;
-
-	for (i = 0; i < io->len; i++)
-		msm_dss_iounmap(&io->data[i].io);
-}
-
-static int dp_parser_reg(struct dp_parser *parser)
-{
-	int rc = 0, i = 0;
-	u32 reg_count;
-	struct platform_device *pdev = parser->pdev;
-	struct dp_io *io = &parser->io;
-	struct device *dev = &pdev->dev;
-
-	reg_count = of_property_count_strings(dev->of_node, "reg-names");
-	if (reg_count <= 0) {
-		pr_err("no reg defined\n");
-		return -EINVAL;
-	}
-
-	io->len = reg_count;
-	io->data = devm_kzalloc(dev, sizeof(struct dp_io_data) * reg_count,
-			GFP_KERNEL);
-	if (!io->data)
-		return -ENOMEM;
-
-	for (i = 0; i < reg_count; i++) {
-		of_property_read_string_index(dev->of_node,
-				"reg-names", i,	&io->data[i].name);
-		rc = msm_dss_ioremap_byname(pdev, &io->data[i].io,
-			io->data[i].name);
-		if (rc) {
-			pr_err("unable to remap %s resources\n",
-				io->data[i].name);
-			goto err;
-		}
-	}
-
-	return 0;
-err:
-	dp_parser_unmap_io_resources(parser);
-	return rc;
-}
-
-static const char *dp_get_phy_aux_config_property(u32 cfg_type)
-{
-	switch (cfg_type) {
-	case PHY_AUX_CFG0:
-		return "qcom,aux-cfg0-settings";
-	case PHY_AUX_CFG1:
-		return "qcom,aux-cfg1-settings";
-	case PHY_AUX_CFG2:
-		return "qcom,aux-cfg2-settings";
-	case PHY_AUX_CFG3:
-		return "qcom,aux-cfg3-settings";
-	case PHY_AUX_CFG4:
-		return "qcom,aux-cfg4-settings";
-	case PHY_AUX_CFG5:
-		return "qcom,aux-cfg5-settings";
-	case PHY_AUX_CFG6:
-		return "qcom,aux-cfg6-settings";
-	case PHY_AUX_CFG7:
-		return "qcom,aux-cfg7-settings";
-	case PHY_AUX_CFG8:
-		return "qcom,aux-cfg8-settings";
-	case PHY_AUX_CFG9:
-		return "qcom,aux-cfg9-settings";
-	default:
-		return "unknown";
-	}
-}
-
-static void dp_parser_phy_aux_cfg_reset(struct dp_parser *parser)
-{
-	int i = 0;
-
-	for (i = 0; i < PHY_AUX_CFG_MAX; i++)
-		parser->aux_cfg[i] = (const struct dp_aux_cfg){ 0 };
-}
-
-static int dp_parser_aux(struct dp_parser *parser)
-{
-	struct device_node *of_node = parser->pdev->dev.of_node;
-	int len = 0, i = 0, j = 0, config_count = 0;
-	const char *data;
-	int const minimum_config_count = 1;
-
-	for (i = 0; i < PHY_AUX_CFG_MAX; i++) {
-		const char *property = dp_get_phy_aux_config_property(i);
-
-		data = of_get_property(of_node, property, &len);
-		if (!data) {
-			pr_err("Unable to read %s\n", property);
-			goto error;
-		}
-
-		config_count = len - 1;
-		if ((config_count < minimum_config_count) ||
-			(config_count > DP_AUX_CFG_MAX_VALUE_CNT)) {
-			pr_err("Invalid config count (%d) configs for %s\n",
-					config_count, property);
-			goto error;
-		}
-
-		parser->aux_cfg[i].offset = data[0];
-		parser->aux_cfg[i].cfg_cnt = config_count;
-		pr_debug("%s offset=0x%x, cfg_cnt=%d\n",
-				property,
-				parser->aux_cfg[i].offset,
-				parser->aux_cfg[i].cfg_cnt);
-		for (j = 1; j < len; j++) {
-			parser->aux_cfg[i].lut[j - 1] = data[j];
-			pr_debug("%s lut[%d]=0x%x\n",
-					property,
-					i,
-					parser->aux_cfg[i].lut[j - 1]);
-		}
-	}
-		return 0;
-
-error:
-	dp_parser_phy_aux_cfg_reset(parser);
-	return -EINVAL;
-}
-
-static int dp_parser_misc(struct dp_parser *parser)
-{
-	int rc = 0, len = 0, i = 0;
-	const char *data = NULL;
-
-	struct device_node *of_node = parser->pdev->dev.of_node;
-
-	data = of_get_property(of_node, "qcom,logical2physical-lane-map", &len);
-	if (data && (len == DP_MAX_PHY_LN)) {
-		for (i = 0; i < len; i++)
-			parser->l_map[i] = data[i];
-	}
-
-	data = of_get_property(of_node, "qcom,pn-swap-lane-map", &len);
-	if (data && (len == DP_MAX_PHY_LN)) {
-		for (i = 0; i < len; i++)
-			parser->l_pnswap |= (data[i] & 0x01) << i;
-	}
-
-	rc = of_property_read_u32(of_node,
-		"qcom,max-pclk-frequency-khz", &parser->max_pclk_khz);
-	if (rc)
-		parser->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
-
-	rc = of_property_read_u32(of_node,
-		"qcom,max-lclk-frequency-khz", &parser->max_lclk_khz);
-	if (rc)
-		parser->max_lclk_khz = DP_MAX_LINK_CLK_KHZ;
-
-	return 0;
-}
-
-static int dp_parser_msm_hdcp_dev(struct dp_parser *parser)
-{
-	struct device_node *node;
-	struct platform_device *pdev;
-
-	node = of_find_compatible_node(NULL, NULL, "qcom,msm-hdcp");
-	if (!node) {
-		// This is a non-fatal error, module initialization can proceed
-		pr_warn("couldn't find msm-hdcp node\n");
-		return 0;
-	}
-
-	pdev = of_find_device_by_node(node);
-	if (!pdev) {
-		// This is a non-fatal error, module initialization can proceed
-		pr_warn("couldn't find msm-hdcp pdev\n");
-		return 0;
-	}
-
-	parser->msm_hdcp_dev = &pdev->dev;
-
-	return 0;
-}
-
-static int dp_parser_pinctrl(struct dp_parser *parser)
-{
-	int rc = 0;
-	struct dp_pinctrl *pinctrl = &parser->pinctrl;
-
-	pinctrl->pin = devm_pinctrl_get(&parser->pdev->dev);
-
-	if (IS_ERR_OR_NULL(pinctrl->pin)) {
-		pr_debug("failed to get pinctrl, rc=%d\n", rc);
-		goto error;
-	}
-
-	if (parser->no_aux_switch && parser->lphw_hpd) {
-		pinctrl->state_hpd_tlmm = pinctrl->state_hpd_ctrl = NULL;
-
-		pinctrl->state_hpd_tlmm = pinctrl_lookup_state(pinctrl->pin,
-					"mdss_dp_hpd_tlmm");
-		if (!IS_ERR_OR_NULL(pinctrl->state_hpd_tlmm)) {
-			pinctrl->state_hpd_ctrl = pinctrl_lookup_state(
-				pinctrl->pin, "mdss_dp_hpd_ctrl");
-		}
-
-		if (!pinctrl->state_hpd_tlmm || !pinctrl->state_hpd_ctrl) {
-			pinctrl->state_hpd_tlmm = NULL;
-			pinctrl->state_hpd_ctrl = NULL;
-			pr_debug("tlmm or ctrl pinctrl state does not exist\n");
-		}
-	}
-
-	pinctrl->state_active = pinctrl_lookup_state(pinctrl->pin,
-					"mdss_dp_active");
-	if (IS_ERR_OR_NULL(pinctrl->state_active)) {
-		rc = PTR_ERR(pinctrl->state_active);
-		pr_err("failed to get pinctrl active state, rc=%d\n", rc);
-		goto error;
-	}
-
-	pinctrl->state_suspend = pinctrl_lookup_state(pinctrl->pin,
-					"mdss_dp_sleep");
-	if (IS_ERR_OR_NULL(pinctrl->state_suspend)) {
-		rc = PTR_ERR(pinctrl->state_suspend);
-		pr_err("failed to get pinctrl suspend state, rc=%d\n", rc);
-		goto error;
-	}
-error:
-	return rc;
-}
-
-static int dp_parser_gpio(struct dp_parser *parser)
-{
-	int i = 0;
-	struct device *dev = &parser->pdev->dev;
-	struct device_node *of_node = dev->of_node;
-	struct dss_module_power *mp = &parser->mp[DP_CORE_PM];
-	static const char * const dp_gpios[] = {
-		"qcom,aux-en-gpio",
-		"qcom,aux-sel-gpio",
-		"qcom,usbplug-cc-gpio",
-	};
-
-	if (of_find_property(of_node, "qcom,dp-hpd-gpio", NULL)) {
-		parser->no_aux_switch = true;
-		parser->lphw_hpd = of_find_property(of_node,
-				"qcom,dp-low-power-hw-hpd", NULL);
-		return 0;
-	}
-
-	if (of_find_property(of_node, "qcom,dp-gpio-aux-switch", NULL))
-		parser->gpio_aux_switch = true;
-	mp->gpio_config = devm_kzalloc(dev,
-		sizeof(struct dss_gpio) * ARRAY_SIZE(dp_gpios), GFP_KERNEL);
-	if (!mp->gpio_config)
-		return -ENOMEM;
-
-	mp->num_gpio = ARRAY_SIZE(dp_gpios);
-
-	for (i = 0; i < ARRAY_SIZE(dp_gpios); i++) {
-		mp->gpio_config[i].gpio = of_get_named_gpio(of_node,
-			dp_gpios[i], 0);
-
-		if (!gpio_is_valid(mp->gpio_config[i].gpio)) {
-			pr_debug("%s gpio not specified\n", dp_gpios[i]);
-			/* In case any gpio was not specified, we think gpio
-			 * aux switch also was not specified.
-			 */
-			parser->gpio_aux_switch = false;
-			continue;
-		}
-
-		strlcpy(mp->gpio_config[i].gpio_name, dp_gpios[i],
-			sizeof(mp->gpio_config[i].gpio_name));
-
-		mp->gpio_config[i].value = 0;
-	}
-
-	return 0;
-}
-
-static const char *dp_parser_supply_node_name(enum dp_pm_type module)
-{
-	switch (module) {
-	case DP_CORE_PM:	return "qcom,core-supply-entries";
-	case DP_CTRL_PM:	return "qcom,ctrl-supply-entries";
-	case DP_PHY_PM:		return "qcom,phy-supply-entries";
-	default:		return "???";
-	}
-}
-
-static int dp_parser_get_vreg(struct dp_parser *parser,
-		enum dp_pm_type module)
-{
-	int i = 0, rc = 0;
-	u32 tmp = 0;
-	const char *pm_supply_name = NULL;
-	struct device_node *supply_node = NULL;
-	struct device_node *of_node = parser->pdev->dev.of_node;
-	struct device_node *supply_root_node = NULL;
-	struct dss_module_power *mp = &parser->mp[module];
-
-	mp->num_vreg = 0;
-	pm_supply_name = dp_parser_supply_node_name(module);
-	supply_root_node = of_get_child_by_name(of_node, pm_supply_name);
-	if (!supply_root_node) {
-		pr_err("no supply entry present: %s\n", pm_supply_name);
-		goto novreg;
-	}
-
-	mp->num_vreg = of_get_available_child_count(supply_root_node);
-
-	if (mp->num_vreg == 0) {
-		pr_debug("no vreg\n");
-		goto novreg;
-	} else {
-		pr_debug("vreg found. count=%d\n", mp->num_vreg);
-	}
-
-	mp->vreg_config = devm_kzalloc(&parser->pdev->dev,
-		sizeof(struct dss_vreg) * mp->num_vreg, GFP_KERNEL);
-	if (!mp->vreg_config) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	for_each_child_of_node(supply_root_node, supply_node) {
-		const char *st = NULL;
-		/* vreg-name */
-		rc = of_property_read_string(supply_node,
-			"qcom,supply-name", &st);
-		if (rc) {
-			pr_err("error reading name. rc=%d\n",
-				 rc);
-			goto error;
-		}
-		snprintf(mp->vreg_config[i].vreg_name,
-			ARRAY_SIZE((mp->vreg_config[i].vreg_name)), "%s", st);
-		/* vreg-min-voltage */
-		rc = of_property_read_u32(supply_node,
-			"qcom,supply-min-voltage", &tmp);
-		if (rc) {
-			pr_err("error reading min volt. rc=%d\n",
-				rc);
-			goto error;
-		}
-		mp->vreg_config[i].min_voltage = tmp;
-
-		/* vreg-max-voltage */
-		rc = of_property_read_u32(supply_node,
-			"qcom,supply-max-voltage", &tmp);
-		if (rc) {
-			pr_err("error reading max volt. rc=%d\n",
-				rc);
-			goto error;
-		}
-		mp->vreg_config[i].max_voltage = tmp;
-
-		/* enable-load */
-		rc = of_property_read_u32(supply_node,
-			"qcom,supply-enable-load", &tmp);
-		if (rc) {
-			pr_err("error reading enable load. rc=%d\n",
-				rc);
-			goto error;
-		}
-		mp->vreg_config[i].enable_load = tmp;
-
-		/* disable-load */
-		rc = of_property_read_u32(supply_node,
-			"qcom,supply-disable-load", &tmp);
-		if (rc) {
-			pr_err("error reading disable load. rc=%d\n",
-				rc);
-			goto error;
-		}
-		mp->vreg_config[i].disable_load = tmp;
-
-		pr_debug("%s min=%d, max=%d, enable=%d, disable=%d\n",
-			mp->vreg_config[i].vreg_name,
-			mp->vreg_config[i].min_voltage,
-			mp->vreg_config[i].max_voltage,
-			mp->vreg_config[i].enable_load,
-			mp->vreg_config[i].disable_load
-			);
-		++i;
-	}
-
-	return rc;
-
-error:
-	if (mp->vreg_config) {
-		devm_kfree(&parser->pdev->dev, mp->vreg_config);
-		mp->vreg_config = NULL;
-	}
-novreg:
-	mp->num_vreg = 0;
-
-	return rc;
-}
-
-static void dp_parser_put_vreg_data(struct device *dev,
-	struct dss_module_power *mp)
-{
-	if (!mp) {
-		DEV_ERR("invalid input\n");
-		return;
-	}
-
-	if (mp->vreg_config) {
-		devm_kfree(dev, mp->vreg_config);
-		mp->vreg_config = NULL;
-	}
-	mp->num_vreg = 0;
-}
-
-static int dp_parser_regulator(struct dp_parser *parser)
-{
-	int i, rc = 0;
-	struct platform_device *pdev = parser->pdev;
-
-	/* Parse the regulator information */
-	for (i = DP_CORE_PM; i < DP_MAX_PM; i++) {
-		rc = dp_parser_get_vreg(parser, i);
-		if (rc) {
-			pr_err("get_dt_vreg_data failed for %s. rc=%d\n",
-				dp_parser_pm_name(i), rc);
-			i--;
-			for (; i >= DP_CORE_PM; i--)
-				dp_parser_put_vreg_data(&pdev->dev,
-					&parser->mp[i]);
-			break;
-		}
-	}
-
-	return rc;
-}
-
-static bool dp_parser_check_prefix(const char *clk_prefix, const char *clk_name)
-{
-	return !!strnstr(clk_name, clk_prefix, strlen(clk_name));
-}
-
-static void dp_parser_put_clk_data(struct device *dev,
-	struct dss_module_power *mp)
-{
-	if (!mp) {
-		DEV_ERR("%s: invalid input\n", __func__);
-		return;
-	}
-
-	if (mp->clk_config) {
-		devm_kfree(dev, mp->clk_config);
-		mp->clk_config = NULL;
-	}
-
-	mp->num_clk = 0;
-}
-
-static void dp_parser_put_gpio_data(struct device *dev,
-	struct dss_module_power *mp)
-{
-	if (!mp) {
-		DEV_ERR("%s: invalid input\n", __func__);
-		return;
-	}
-
-	if (mp->gpio_config) {
-		devm_kfree(dev, mp->gpio_config);
-		mp->gpio_config = NULL;
-	}
-
-	mp->num_gpio = 0;
-}
-
-static int dp_parser_init_clk_data(struct dp_parser *parser)
-{
-	int num_clk = 0, i = 0, rc = 0;
-	int core_clk_count = 0, link_clk_count = 0;
-	int strm0_clk_count = 0, strm1_clk_count = 0;
-	const char *core_clk = "core";
-	const char *strm0_clk = "strm0";
-	const char *strm1_clk = "strm1";
-	const char *link_clk = "link";
-	const char *clk_name;
-	struct device *dev = &parser->pdev->dev;
-	struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
-	struct dss_module_power *strm0_power = &parser->mp[DP_STREAM0_PM];
-	struct dss_module_power *strm1_power = &parser->mp[DP_STREAM1_PM];
-	struct dss_module_power *link_power = &parser->mp[DP_LINK_PM];
-
-	num_clk = of_property_count_strings(dev->of_node, "clock-names");
-	if (num_clk <= 0) {
-		pr_err("no clocks are defined\n");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	for (i = 0; i < num_clk; i++) {
-		of_property_read_string_index(dev->of_node,
-				"clock-names", i, &clk_name);
-
-		if (dp_parser_check_prefix(core_clk, clk_name))
-			core_clk_count++;
-
-		if (dp_parser_check_prefix(strm0_clk, clk_name))
-			strm0_clk_count++;
-
-		if (dp_parser_check_prefix(strm1_clk, clk_name))
-			strm1_clk_count++;
-
-		if (dp_parser_check_prefix(link_clk, clk_name))
-			link_clk_count++;
-	}
-
-	/* Initialize the CORE power module */
-	if (core_clk_count <= 0) {
-		pr_err("no core clocks are defined\n");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	core_power->num_clk = core_clk_count;
-	core_power->clk_config = devm_kzalloc(dev,
-			sizeof(struct dss_clk) * core_power->num_clk,
-			GFP_KERNEL);
-	if (!core_power->clk_config) {
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	/* Initialize the STREAM0 power module */
-	if (strm0_clk_count <= 0) {
-		pr_debug("no strm0 clocks are defined\n");
-	} else {
-		strm0_power->num_clk = strm0_clk_count;
-		strm0_power->clk_config = devm_kzalloc(dev,
-			sizeof(struct dss_clk) * strm0_power->num_clk,
-			GFP_KERNEL);
-		if (!strm0_power->clk_config) {
-			strm0_power->num_clk = 0;
-			rc = -EINVAL;
-			goto strm0_clock_error;
-		}
-	}
-
-	/* Initialize the STREAM1 power module */
-	if (strm1_clk_count <= 0) {
-		pr_debug("no strm1 clocks are defined\n");
-	} else {
-		strm1_power->num_clk = strm1_clk_count;
-		strm1_power->clk_config = devm_kzalloc(dev,
-			sizeof(struct dss_clk) * strm1_power->num_clk,
-			GFP_KERNEL);
-		if (!strm1_power->clk_config) {
-			strm1_power->num_clk = 0;
-			rc = -EINVAL;
-			goto strm1_clock_error;
-		}
-	}
-
-	/* Initialize the link power module */
-	if (link_clk_count <= 0) {
-		pr_err("no link clocks are defined\n");
-		rc = -EINVAL;
-		goto link_clock_error;
-	}
-
-	link_power->num_clk = link_clk_count;
-	link_power->clk_config = devm_kzalloc(dev,
-			sizeof(struct dss_clk) * link_power->num_clk,
-			GFP_KERNEL);
-	if (!link_power->clk_config) {
-		link_power->num_clk = 0;
-		rc = -EINVAL;
-		goto link_clock_error;
-	}
-
-	return rc;
-
-link_clock_error:
-	dp_parser_put_clk_data(dev, strm1_power);
-strm1_clock_error:
-	dp_parser_put_clk_data(dev, strm0_power);
-strm0_clock_error:
-	dp_parser_put_clk_data(dev, core_power);
-exit:
-	return rc;
-}
-
-static int dp_parser_clock(struct dp_parser *parser)
-{
-	int rc = 0, i = 0;
-	int num_clk = 0;
-	int core_clk_index = 0, link_clk_index = 0;
-	int core_clk_count = 0, link_clk_count = 0;
-	int strm0_clk_index = 0, strm1_clk_index = 0;
-	int strm0_clk_count = 0, strm1_clk_count = 0;
-	const char *clk_name;
-	const char *core_clk = "core";
-	const char *strm0_clk = "strm0";
-	const char *strm1_clk = "strm1";
-	const char *link_clk = "link";
-	struct device *dev = &parser->pdev->dev;
-	struct dss_module_power *core_power;
-	struct dss_module_power *strm0_power;
-	struct dss_module_power *strm1_power;
-	struct dss_module_power *link_power;
-
-	core_power = &parser->mp[DP_CORE_PM];
-	strm0_power = &parser->mp[DP_STREAM0_PM];
-	strm1_power = &parser->mp[DP_STREAM1_PM];
-	link_power = &parser->mp[DP_LINK_PM];
-
-	rc =  dp_parser_init_clk_data(parser);
-	if (rc) {
-		pr_err("failed to initialize power data\n");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	core_clk_count = core_power->num_clk;
-	link_clk_count = link_power->num_clk;
-	strm0_clk_count = strm0_power->num_clk;
-	strm1_clk_count = strm1_power->num_clk;
-
-	num_clk = of_property_count_strings(dev->of_node, "clock-names");
-
-	for (i = 0; i < num_clk; i++) {
-		of_property_read_string_index(dev->of_node, "clock-names",
-				i, &clk_name);
-
-		if (dp_parser_check_prefix(core_clk, clk_name) &&
-				core_clk_index < core_clk_count) {
-			struct dss_clk *clk =
-				&core_power->clk_config[core_clk_index];
-			strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
-			clk->type = DSS_CLK_AHB;
-			core_clk_index++;
-		} else if (dp_parser_check_prefix(link_clk, clk_name) &&
-			   link_clk_index < link_clk_count) {
-			struct dss_clk *clk =
-				&link_power->clk_config[link_clk_index];
-			strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
-			link_clk_index++;
-
-			if (!strcmp(clk_name, "link_clk"))
-				clk->type = DSS_CLK_PCLK;
-			else
-				clk->type = DSS_CLK_AHB;
-		} else if (dp_parser_check_prefix(strm0_clk, clk_name) &&
-			   strm0_clk_index < strm0_clk_count) {
-			struct dss_clk *clk =
-				&strm0_power->clk_config[strm0_clk_index];
-			strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
-			strm0_clk_index++;
-
-			clk->type = DSS_CLK_PCLK;
-		} else if (dp_parser_check_prefix(strm1_clk, clk_name) &&
-			   strm1_clk_index < strm1_clk_count) {
-			struct dss_clk *clk =
-				&strm1_power->clk_config[strm1_clk_index];
-			strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
-			strm1_clk_index++;
-
-			clk->type = DSS_CLK_PCLK;
-		}
-	}
-
-	pr_debug("clock parsing successful\n");
-
-exit:
-	return rc;
-}
-
-static int dp_parser_catalog(struct dp_parser *parser)
-{
-	int rc;
-	u32 version;
-	struct device *dev = &parser->pdev->dev;
-
-	rc = of_property_read_u32(dev->of_node, "qcom,phy-version", &version);
-
-	if (!rc)
-		parser->hw_cfg.phy_version = version;
-
-	return 0;
-}
-
-static int dp_parser_mst(struct dp_parser *parser)
-{
-	struct device *dev = &parser->pdev->dev;
-	int i;
-
-	parser->has_mst = of_property_read_bool(dev->of_node,
-			"qcom,mst-enable");
-	parser->has_mst_sideband = parser->has_mst;
-
-	pr_debug("mst parsing successful. mst:%d\n", parser->has_mst);
-
-	for (i = 0; i < MAX_DP_MST_STREAMS; i++) {
-		of_property_read_u32_index(dev->of_node,
-				"qcom,mst-fixed-topology-ports", i,
-				&parser->mst_fixed_port[i]);
-	}
-
-	return 0;
-}
-
-static void dp_parser_dsc(struct dp_parser *parser)
-{
-	int rc;
-	struct device *dev = &parser->pdev->dev;
-
-	parser->dsc_feature_enable = of_property_read_bool(dev->of_node,
-			"qcom,dsc-feature-enable");
-
-	rc = of_property_read_u32(dev->of_node,
-		"qcom,max-dp-dsc-blks", &parser->max_dp_dsc_blks);
-	if (rc || !parser->max_dp_dsc_blks)
-		parser->dsc_feature_enable = false;
-
-	rc = of_property_read_u32(dev->of_node,
-		"qcom,max-dp-dsc-input-width-pixs",
-		&parser->max_dp_dsc_input_width_pixs);
-	if (rc || !parser->max_dp_dsc_input_width_pixs)
-		parser->dsc_feature_enable = false;
-
-	pr_debug("dsc parsing successful. dsc:%d, blks:%d, width:%d\n",
-			parser->dsc_feature_enable,
-			parser->max_dp_dsc_blks,
-			parser->max_dp_dsc_input_width_pixs);
-}
-
-static void dp_parser_fec(struct dp_parser *parser)
-{
-	struct device *dev = &parser->pdev->dev;
-
-	parser->fec_feature_enable = of_property_read_bool(dev->of_node,
-			"qcom,fec-feature-enable");
-
-	pr_debug("fec parsing successful. fec:%d\n",
-			parser->fec_feature_enable);
-}
-
-static void dp_parser_widebus(struct dp_parser *parser)
-{
-	struct device *dev = &parser->pdev->dev;
-
-	parser->has_widebus = of_property_read_bool(dev->of_node,
-			"qcom,widebus-enable");
-
-	pr_debug("widebus parsing successful. widebus:%d\n",
-			parser->has_widebus);
-}
-
-static int dp_parser_parse(struct dp_parser *parser)
-{
-	int rc = 0;
-
-	if (!parser) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto err;
-	}
-
-	rc = dp_parser_reg(parser);
-	if (rc)
-		goto err;
-
-	rc = dp_parser_aux(parser);
-	if (rc)
-		goto err;
-
-	rc = dp_parser_misc(parser);
-	if (rc)
-		goto err;
-
-	rc = dp_parser_clock(parser);
-	if (rc)
-		goto err;
-
-	rc = dp_parser_regulator(parser);
-	if (rc)
-		goto err;
-
-	rc = dp_parser_gpio(parser);
-	if (rc)
-		goto err;
-
-	rc = dp_parser_catalog(parser);
-	if (rc)
-		goto err;
-
-	rc = dp_parser_pinctrl(parser);
-	if (rc)
-		goto err;
-
-	rc = dp_parser_msm_hdcp_dev(parser);
-	if (rc)
-		goto err;
-
-	rc = dp_parser_mst(parser);
-	if (rc)
-		goto err;
-
-	dp_parser_dsc(parser);
-	dp_parser_fec(parser);
-	dp_parser_widebus(parser);
-err:
-	return rc;
-}
-
-static struct dp_io_data *dp_parser_get_io(struct dp_parser *dp_parser,
-				char *name)
-{
-	int i = 0;
-	struct dp_io *io;
-
-	if (!dp_parser) {
-		pr_err("invalid input\n");
-		goto err;
-	}
-
-	io = &dp_parser->io;
-
-	for (i = 0; i < io->len; i++) {
-		struct dp_io_data *data = &io->data[i];
-
-		if (!strcmp(data->name, name))
-			return data;
-	}
-err:
-	return NULL;
-}
-
-static void dp_parser_get_io_buf(struct dp_parser *dp_parser, char *name)
-{
-	int i = 0;
-	struct dp_io *io;
-
-	if (!dp_parser) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	io = &dp_parser->io;
-
-	for (i = 0; i < io->len; i++) {
-		struct dp_io_data *data = &io->data[i];
-
-		if (!strcmp(data->name, name)) {
-			if (!data->buf)
-				data->buf = devm_kzalloc(&dp_parser->pdev->dev,
-					data->io.len, GFP_KERNEL);
-		}
-	}
-}
-
-static void dp_parser_clear_io_buf(struct dp_parser *dp_parser)
-{
-	int i = 0;
-	struct dp_io *io;
-
-	if (!dp_parser) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	io = &dp_parser->io;
-
-	for (i = 0; i < io->len; i++) {
-		struct dp_io_data *data = &io->data[i];
-
-		if (data->buf)
-			devm_kfree(&dp_parser->pdev->dev, data->buf);
-
-		data->buf = NULL;
-	}
-}
-
-struct dp_parser *dp_parser_get(struct platform_device *pdev)
-{
-	struct dp_parser *parser;
-
-	parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL);
-	if (!parser)
-		return ERR_PTR(-ENOMEM);
-
-	parser->parse = dp_parser_parse;
-	parser->get_io = dp_parser_get_io;
-	parser->get_io_buf = dp_parser_get_io_buf;
-	parser->clear_io_buf = dp_parser_clear_io_buf;
-	parser->pdev = pdev;
-
-	return parser;
-}
-
-void dp_parser_put(struct dp_parser *parser)
-{
-	int i = 0;
-	struct dss_module_power *power = NULL;
-
-	if (!parser) {
-		pr_err("invalid parser module\n");
-		return;
-	}
-
-	power = parser->mp;
-
-	for (i = 0; i < DP_MAX_PM; i++) {
-		dp_parser_put_clk_data(&parser->pdev->dev, &power[i]);
-		dp_parser_put_vreg_data(&parser->pdev->dev, &power[i]);
-		dp_parser_put_gpio_data(&parser->pdev->dev, &power[i]);
-	}
-
-	dp_parser_clear_io_buf(parser);
-	devm_kfree(&parser->pdev->dev, parser->io.data);
-	devm_kfree(&parser->pdev->dev, parser);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
deleted file mode 100644
index 9caa1a7..0000000
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ /dev/null
@@ -1,271 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_PARSER_H_
-#define _DP_PARSER_H_
-
-#include <linux/sde_io_util.h>
-
-#define DP_LABEL "MDSS DP DISPLAY"
-#define AUX_CFG_LEN	10
-#define DP_MAX_PIXEL_CLK_KHZ	675000
-#define DP_MAX_LINK_CLK_KHZ	810000
-#define MAX_DP_MST_STREAMS	2
-
-enum dp_pm_type {
-	DP_CORE_PM,
-	DP_CTRL_PM,
-	DP_PHY_PM,
-	DP_STREAM0_PM,
-	DP_STREAM1_PM,
-	DP_LINK_PM,
-	DP_MAX_PM
-};
-
-static inline const char *dp_parser_pm_name(enum dp_pm_type module)
-{
-	switch (module) {
-	case DP_CORE_PM:	return "DP_CORE_PM";
-	case DP_CTRL_PM:	return "DP_CTRL_PM";
-	case DP_PHY_PM:		return "DP_PHY_PM";
-	case DP_STREAM0_PM:	return "DP_STREAM0_PM";
-	case DP_STREAM1_PM:	return "DP_STREAM1_PM";
-	case DP_LINK_PM:	return "DP_LINK_PM";
-	default:		return "???";
-	}
-}
-
-/**
- * struct dp_display_data  - display related device tree data.
- *
- * @ctrl_node: referece to controller device
- * @phy_node:  reference to phy device
- * @is_active: is the controller currently active
- * @name: name of the display
- * @display_type: type of the display
- */
-struct dp_display_data {
-	struct device_node *ctrl_node;
-	struct device_node *phy_node;
-	bool is_active;
-	const char *name;
-	const char *display_type;
-};
-
-/**
- * struct dp_io_data - data structure to store DP IO related info
- * @name: name of the IO
- * @buf: buffer corresponding to IO for debugging
- * @io: io data which give len and mapped address
- */
-struct dp_io_data {
-	const char *name;
-	u8 *buf;
-	struct dss_io_data io;
-};
-
-/**
- * struct dp_io - data struct to store array of DP IO info
- * @len: total number of IOs
- * @data: pointer to an array of DP IO data structures.
- */
-struct dp_io {
-	u32 len;
-	struct dp_io_data *data;
-};
-
-/**
- * struct dp_pinctrl - DP's pin control
- *
- * @pin: pin-controller's instance
- * @state_active: active state pin control
- * @state_hpd_active: hpd active state pin control
- * @state_suspend: suspend state pin control
- */
-struct dp_pinctrl {
-	struct pinctrl *pin;
-	struct pinctrl_state *state_active;
-	struct pinctrl_state *state_hpd_active;
-	struct pinctrl_state *state_hpd_tlmm;
-	struct pinctrl_state *state_hpd_ctrl;
-	struct pinctrl_state *state_suspend;
-};
-
-#define DP_ENUM_STR(x)	#x
-#define DP_AUX_CFG_MAX_VALUE_CNT 3
-/**
- * struct dp_aux_cfg - DP's AUX configuration settings
- *
- * @cfg_cnt: count of the configurable settings for the AUX register
- * @current_index: current index of the AUX config lut
- * @offset: register offset of the AUX config register
- * @lut: look up table for the AUX config values for this register
- */
-struct dp_aux_cfg {
-	u32 cfg_cnt;
-	u32 current_index;
-	u32 offset;
-	u32 lut[DP_AUX_CFG_MAX_VALUE_CNT];
-};
-
-/* PHY AUX config registers */
-enum dp_phy_aux_config_type {
-	PHY_AUX_CFG0,
-	PHY_AUX_CFG1,
-	PHY_AUX_CFG2,
-	PHY_AUX_CFG3,
-	PHY_AUX_CFG4,
-	PHY_AUX_CFG5,
-	PHY_AUX_CFG6,
-	PHY_AUX_CFG7,
-	PHY_AUX_CFG8,
-	PHY_AUX_CFG9,
-	PHY_AUX_CFG_MAX,
-};
-
-/**
- * enum dp_phy_version - version of the dp phy
- * @DP_PHY_VERSION_UNKNOWN: Unknown controller version
- * @DP_PHY_VERSION_4_2_0:   DP phy v4.2.0 controller
- * @DP_PHY_VERSION_MAX:     max version
- */
-enum dp_phy_version {
-	DP_PHY_VERSION_UNKNOWN,
-	DP_PHY_VERSION_2_0_0 = 0x200,
-	DP_PHY_VERSION_4_2_0 = 0x420,
-	DP_PHY_VERSION_MAX
-};
-
-/**
- * struct dp_hw_cfg - DP HW specific configuration
- *
- * @phy_version: DP PHY HW version
- */
-struct dp_hw_cfg {
-	enum dp_phy_version phy_version;
-};
-
-static inline char *dp_phy_aux_config_type_to_string(u32 cfg_type)
-{
-	switch (cfg_type) {
-	case PHY_AUX_CFG0:
-		return DP_ENUM_STR(PHY_AUX_CFG0);
-	case PHY_AUX_CFG1:
-		return DP_ENUM_STR(PHY_AUX_CFG1);
-	case PHY_AUX_CFG2:
-		return DP_ENUM_STR(PHY_AUX_CFG2);
-	case PHY_AUX_CFG3:
-		return DP_ENUM_STR(PHY_AUX_CFG3);
-	case PHY_AUX_CFG4:
-		return DP_ENUM_STR(PHY_AUX_CFG4);
-	case PHY_AUX_CFG5:
-		return DP_ENUM_STR(PHY_AUX_CFG5);
-	case PHY_AUX_CFG6:
-		return DP_ENUM_STR(PHY_AUX_CFG6);
-	case PHY_AUX_CFG7:
-		return DP_ENUM_STR(PHY_AUX_CFG7);
-	case PHY_AUX_CFG8:
-		return DP_ENUM_STR(PHY_AUX_CFG8);
-	case PHY_AUX_CFG9:
-		return DP_ENUM_STR(PHY_AUX_CFG9);
-	default:
-		return "unknown";
-	}
-}
-
-/**
- * struct dp_parser - DP parser's data exposed to clients
- *
- * @pdev: platform data of the client
- * @msm_hdcp_dev: device pointer for the HDCP driver
- * @mp: gpio, regulator and clock related data
- * @pinctrl: pin-control related data
- * @disp_data: controller's display related data
- * @l_pnswap: P/N swap status on each lane
- * @max_pclk_khz: maximum pixel clock supported for the platform
- * @max_lclk_khz: maximum link clock supported for the platform
- * @hw_cfg: DP HW specific settings
- * @has_mst: MST feature enable status
- * @has_mst_sideband: MST sideband feature enable status
- * @no_aux_switch: presence AUX switch status
- * @gpio_aux_switch: presence GPIO AUX switch status
- * @dsc_feature_enable: DSC feature enable status
- * @fec_feature_enable: FEC feature enable status
- * @max_dp_dsc_blks: maximum DSC blks for DP interface
- * @max_dp_dsc_input_width_pixs: Maximum input width for DSC block
- * @has_widebus: widebus (2PPC) feature eanble status
-  *@mst_fixed_port: mst port_num reserved for fixed topology
- * @parse: function to be called by client to parse device tree.
- * @get_io: function to be called by client to get io data.
- * @get_io_buf: function to be called by client to get io buffers.
- * @clear_io_buf: function to be called by client to clear io buffers.
- */
-struct dp_parser {
-	struct platform_device *pdev;
-	struct device *msm_hdcp_dev;
-	struct dss_module_power mp[DP_MAX_PM];
-	struct dp_pinctrl pinctrl;
-	struct dp_io io;
-	struct dp_display_data disp_data;
-
-	u8 l_map[4];
-	u8 l_pnswap;
-	struct dp_aux_cfg aux_cfg[AUX_CFG_LEN];
-	u32 max_pclk_khz;
-	u32 max_lclk_khz;
-	struct dp_hw_cfg hw_cfg;
-	bool has_mst;
-	bool has_mst_sideband;
-	bool no_aux_switch;
-	bool dsc_feature_enable;
-	bool fec_feature_enable;
-	bool has_widebus;
-	bool gpio_aux_switch;
-	u32 max_dp_dsc_blks;
-	u32 max_dp_dsc_input_width_pixs;
-	bool lphw_hpd;
-	u32 mst_fixed_port[MAX_DP_MST_STREAMS];
-
-	int (*parse)(struct dp_parser *parser);
-	struct dp_io_data *(*get_io)(struct dp_parser *parser, char *name);
-	void (*get_io_buf)(struct dp_parser *parser, char *name);
-	void (*clear_io_buf)(struct dp_parser *parser);
-};
-
-enum dp_phy_lane_num {
-	DP_PHY_LN0 = 0,
-	DP_PHY_LN1 = 1,
-	DP_PHY_LN2 = 2,
-	DP_PHY_LN3 = 3,
-	DP_MAX_PHY_LN = 4,
-};
-
-enum dp_mainlink_lane_num {
-	DP_ML0 = 0,
-	DP_ML1 = 1,
-	DP_ML2 = 2,
-	DP_ML3 = 3,
-};
-
-/**
- * dp_parser_get() - get the DP's device tree parser module
- *
- * @pdev: platform data of the client
- * return: pointer to dp_parser structure.
- *
- * This function provides client capability to parse the
- * device tree and populate the data structures. The data
- * related to clock, regulators, pin-control and other
- * can be parsed using this module.
- */
-struct dp_parser *dp_parser_get(struct platform_device *pdev);
-
-/**
- * dp_parser_put() - cleans the dp_parser module
- *
- * @parser: pointer to the parser's data.
- */
-void dp_parser_put(struct dp_parser *parser);
-#endif
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
deleted file mode 100644
index deb9a1a..0000000
--- a/drivers/gpu/drm/msm/dp/dp_power.c
+++ /dev/null
@@ -1,720 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include <linux/clk.h>
-#include "dp_power.h"
-#include "dp_catalog.h"
-
-#define DP_CLIENT_NAME_SIZE	20
-
-struct dp_power_private {
-	struct dp_parser *parser;
-	struct platform_device *pdev;
-	struct clk *pixel_clk_rcg;
-	struct clk *pixel_parent;
-	struct clk *pixel1_clk_rcg;
-	struct clk *pixel1_parent;
-
-	struct dp_power dp_power;
-	struct sde_power_client *dp_core_client;
-	struct sde_power_handle *phandle;
-
-	bool core_clks_on;
-	bool link_clks_on;
-	bool strm0_clks_on;
-	bool strm1_clks_on;
-};
-
-static int dp_power_regulator_init(struct dp_power_private *power)
-{
-	int rc = 0, i = 0, j = 0;
-	struct platform_device *pdev;
-	struct dp_parser *parser;
-
-	parser = power->parser;
-	pdev = power->pdev;
-
-	for (i = DP_CORE_PM; !rc && (i < DP_MAX_PM); i++) {
-		rc = msm_dss_config_vreg(&pdev->dev,
-			parser->mp[i].vreg_config,
-			parser->mp[i].num_vreg, 1);
-		if (rc) {
-			pr_err("failed to init vregs for %s\n",
-				dp_parser_pm_name(i));
-			for (j = i - 1; j >= DP_CORE_PM; j--) {
-				msm_dss_config_vreg(&pdev->dev,
-				parser->mp[j].vreg_config,
-				parser->mp[j].num_vreg, 0);
-			}
-
-			goto error;
-		}
-	}
-error:
-	return rc;
-}
-
-static void dp_power_regulator_deinit(struct dp_power_private *power)
-{
-	int rc = 0, i = 0;
-	struct platform_device *pdev;
-	struct dp_parser *parser;
-
-	parser = power->parser;
-	pdev = power->pdev;
-
-	for (i = DP_CORE_PM; (i < DP_MAX_PM); i++) {
-		rc = msm_dss_config_vreg(&pdev->dev,
-			parser->mp[i].vreg_config,
-			parser->mp[i].num_vreg, 0);
-		if (rc)
-			pr_err("failed to deinit vregs for %s\n",
-				dp_parser_pm_name(i));
-	}
-}
-
-static int dp_power_regulator_ctrl(struct dp_power_private *power, bool enable)
-{
-	int rc = 0, i = 0, j = 0;
-	struct dp_parser *parser;
-
-	parser = power->parser;
-
-	for (i = DP_CORE_PM; i < DP_MAX_PM; i++) {
-		rc = msm_dss_enable_vreg(
-			parser->mp[i].vreg_config,
-			parser->mp[i].num_vreg, enable);
-		if (rc) {
-			pr_err("failed to '%s' vregs for %s\n",
-					enable ? "enable" : "disable",
-					dp_parser_pm_name(i));
-			if (enable) {
-				for (j = i-1; j >= DP_CORE_PM; j--) {
-					msm_dss_enable_vreg(
-					parser->mp[j].vreg_config,
-					parser->mp[j].num_vreg, 0);
-				}
-			}
-			goto error;
-		}
-	}
-error:
-	return rc;
-}
-
-static int dp_power_pinctrl_set(struct dp_power_private *power, bool active)
-{
-	int rc = -EFAULT;
-	struct pinctrl_state *pin_state;
-	struct dp_parser *parser;
-
-	parser = power->parser;
-
-	if (IS_ERR_OR_NULL(parser->pinctrl.pin))
-		return 0;
-
-	if (parser->no_aux_switch && parser->lphw_hpd) {
-		pin_state = active ? parser->pinctrl.state_hpd_ctrl
-				: parser->pinctrl.state_hpd_tlmm;
-		if (!IS_ERR_OR_NULL(pin_state)) {
-			rc = pinctrl_select_state(parser->pinctrl.pin,
-				pin_state);
-			if (rc) {
-				pr_err("cannot direct hpd line to %s\n",
-					active ? "ctrl" : "tlmm");
-				return rc;
-			}
-		}
-	}
-
-	if (parser->no_aux_switch)
-		return 0;
-
-	pin_state = active ? parser->pinctrl.state_active
-				: parser->pinctrl.state_suspend;
-	if (!IS_ERR_OR_NULL(pin_state)) {
-		rc = pinctrl_select_state(parser->pinctrl.pin,
-				pin_state);
-		if (rc)
-			pr_err("can not set %s pins\n",
-			       active ? "dp_active"
-			       : "dp_sleep");
-	} else {
-		pr_err("invalid '%s' pinstate\n",
-		       active ? "dp_active"
-		       : "dp_sleep");
-	}
-
-	return rc;
-}
-
-static int dp_power_clk_init(struct dp_power_private *power, bool enable)
-{
-	int rc = 0;
-	struct device *dev;
-	enum dp_pm_type module;
-
-	dev = &power->pdev->dev;
-
-	if (enable) {
-		for (module = DP_CORE_PM; module < DP_MAX_PM; module++) {
-			struct dss_module_power *pm =
-				&power->parser->mp[module];
-
-			if (!pm->num_clk)
-				continue;
-
-			rc = msm_dss_get_clk(dev, pm->clk_config, pm->num_clk);
-			if (rc) {
-				pr_err("failed to get %s clk. err=%d\n",
-					dp_parser_pm_name(module), rc);
-				goto exit;
-			}
-		}
-
-		power->pixel_clk_rcg = devm_clk_get(dev, "pixel_clk_rcg");
-		if (IS_ERR(power->pixel_clk_rcg)) {
-			pr_debug("Unable to get DP pixel clk RCG\n");
-			power->pixel_clk_rcg = NULL;
-		}
-
-		power->pixel_parent = devm_clk_get(dev, "pixel_parent");
-		if (IS_ERR(power->pixel_parent)) {
-			pr_debug("Unable to get DP pixel RCG parent\n");
-			power->pixel_parent = NULL;
-		}
-
-		power->pixel1_clk_rcg = devm_clk_get(dev, "pixel1_clk_rcg");
-		if (IS_ERR(power->pixel1_clk_rcg)) {
-			pr_debug("Unable to get DP pixel1 clk RCG\n");
-			power->pixel1_clk_rcg = NULL;
-		}
-
-		power->pixel1_parent = devm_clk_get(dev, "pixel1_parent");
-		if (IS_ERR(power->pixel1_parent)) {
-			pr_debug("Unable to get DP pixel1 RCG parent\n");
-			power->pixel1_parent = NULL;
-		}
-	} else {
-		if (power->pixel_parent)
-			devm_clk_put(dev, power->pixel_parent);
-
-		if (power->pixel_clk_rcg)
-			devm_clk_put(dev, power->pixel_clk_rcg);
-
-		if (power->pixel1_parent)
-			devm_clk_put(dev, power->pixel1_parent);
-
-		if (power->pixel1_clk_rcg)
-			devm_clk_put(dev, power->pixel1_clk_rcg);
-
-		for (module = DP_CORE_PM; module < DP_MAX_PM; module++) {
-			struct dss_module_power *pm =
-				&power->parser->mp[module];
-
-			if (!pm->num_clk)
-				continue;
-
-			msm_dss_put_clk(pm->clk_config, pm->num_clk);
-		}
-	}
-exit:
-	return rc;
-}
-
-static int dp_power_clk_set_rate(struct dp_power_private *power,
-		enum dp_pm_type module, bool enable)
-{
-	int rc = 0;
-	struct dss_module_power *mp;
-
-	if (!power) {
-		pr_err("invalid power data\n");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	mp = &power->parser->mp[module];
-
-	if (enable) {
-		rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
-		if (rc) {
-			pr_err("failed to set clks rate.\n");
-			goto exit;
-		}
-
-		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, 1);
-		if (rc) {
-			pr_err("failed to enable clks\n");
-			goto exit;
-		}
-	} else {
-		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, 0);
-		if (rc) {
-			pr_err("failed to disable clks\n");
-				goto exit;
-		}
-	}
-exit:
-	return rc;
-}
-
-static int dp_power_clk_enable(struct dp_power *dp_power,
-		enum dp_pm_type pm_type, bool enable)
-{
-	int rc = 0;
-	struct dss_module_power *mp;
-	struct dp_power_private *power;
-
-	if (!dp_power) {
-		pr_err("invalid power data\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	power = container_of(dp_power, struct dp_power_private, dp_power);
-
-	mp = &power->parser->mp[pm_type];
-
-	if (pm_type >= DP_MAX_PM) {
-		pr_err("unsupported power module: %s\n",
-				dp_parser_pm_name(pm_type));
-		return -EINVAL;
-	}
-
-	if (enable) {
-		if (pm_type == DP_CORE_PM && power->core_clks_on) {
-			pr_debug("core clks already enabled\n");
-			return 0;
-		}
-
-		if ((pm_type == DP_STREAM0_PM) && (power->strm0_clks_on)) {
-			pr_debug("strm0 clks already enabled\n");
-			return 0;
-		}
-
-		if ((pm_type == DP_STREAM1_PM) && (power->strm1_clks_on)) {
-			pr_debug("strm1 clks already enabled\n");
-			return 0;
-		}
-
-		if ((pm_type == DP_CTRL_PM) && (!power->core_clks_on)) {
-			pr_debug("Need to enable core clks before link clks\n");
-
-			rc = dp_power_clk_set_rate(power, pm_type, enable);
-			if (rc) {
-				pr_err("failed to enable clks: %s. err=%d\n",
-					dp_parser_pm_name(DP_CORE_PM), rc);
-				goto error;
-			} else {
-				power->core_clks_on = true;
-			}
-		}
-
-		if (pm_type == DP_LINK_PM && power->link_clks_on) {
-			pr_debug("links clks already enabled\n");
-			return 0;
-		}
-	}
-
-	rc = dp_power_clk_set_rate(power, pm_type, enable);
-	if (rc) {
-		pr_err("failed to '%s' clks for: %s. err=%d\n",
-			enable ? "enable" : "disable",
-			dp_parser_pm_name(pm_type), rc);
-			goto error;
-	}
-
-	if (pm_type == DP_CORE_PM)
-		power->core_clks_on = enable;
-	else if (pm_type == DP_STREAM0_PM)
-		power->strm0_clks_on = enable;
-	else if (pm_type == DP_STREAM1_PM)
-		power->strm1_clks_on = enable;
-	else if (pm_type == DP_LINK_PM)
-		power->link_clks_on = enable;
-
-	/*
-	 * This log is printed only when user connects or disconnects
-	 * a DP cable. As this is a user-action and not a frequent
-	 * usecase, it is not going to flood the kernel logs. Also,
-	 * helpful in debugging the NOC issues.
-	 */
-	pr_info("core:%s link:%s strm0:%s strm1:%s\n",
-		power->core_clks_on ? "on" : "off",
-		power->link_clks_on ? "on" : "off",
-		power->strm0_clks_on ? "on" : "off",
-		power->strm1_clks_on ? "on" : "off");
-error:
-	return rc;
-}
-
-static int dp_power_request_gpios(struct dp_power_private *power)
-{
-	int rc = 0, i;
-	struct device *dev;
-	struct dss_module_power *mp;
-	static const char * const gpio_names[] = {
-		"aux_enable", "aux_sel", "usbplug_cc",
-	};
-
-	if (!power) {
-		pr_err("invalid power data\n");
-		return -EINVAL;
-	}
-
-	dev = &power->pdev->dev;
-	mp = &power->parser->mp[DP_CORE_PM];
-
-	for (i = 0; i < ARRAY_SIZE(gpio_names); i++) {
-		unsigned int gpio = mp->gpio_config[i].gpio;
-
-		if (gpio_is_valid(gpio)) {
-			rc = devm_gpio_request(dev, gpio, gpio_names[i]);
-			if (rc) {
-				pr_err("request %s gpio failed, rc=%d\n",
-					       gpio_names[i], rc);
-				goto error;
-			}
-		}
-	}
-	return 0;
-error:
-	for (i = 0; i < ARRAY_SIZE(gpio_names); i++) {
-		unsigned int gpio = mp->gpio_config[i].gpio;
-
-		if (gpio_is_valid(gpio))
-			gpio_free(gpio);
-	}
-	return rc;
-}
-
-static bool dp_power_find_gpio(const char *gpio1, const char *gpio2)
-{
-	return !!strnstr(gpio1, gpio2, strlen(gpio1));
-}
-
-static void dp_power_set_gpio(struct dp_power_private *power, bool flip)
-{
-	int i;
-	struct dss_module_power *mp = &power->parser->mp[DP_CORE_PM];
-	struct dss_gpio *config = mp->gpio_config;
-
-	for (i = 0; i < mp->num_gpio; i++) {
-		if (dp_power_find_gpio(config->gpio_name, "aux-sel"))
-			config->value = flip;
-
-		if (gpio_is_valid(config->gpio)) {
-			pr_debug("gpio %s, value %d\n", config->gpio_name,
-				config->value);
-
-			if (dp_power_find_gpio(config->gpio_name, "aux-en") ||
-			    dp_power_find_gpio(config->gpio_name, "aux-sel"))
-				gpio_direction_output(config->gpio,
-					config->value);
-			else
-				gpio_set_value(config->gpio, config->value);
-
-		}
-		config++;
-	}
-}
-
-static int dp_power_config_gpios(struct dp_power_private *power, bool flip,
-					bool enable)
-{
-	int rc = 0, i;
-	struct dss_module_power *mp;
-	struct dss_gpio *config;
-
-	if (power->parser->no_aux_switch)
-		return 0;
-
-	mp = &power->parser->mp[DP_CORE_PM];
-	config = mp->gpio_config;
-
-	if (enable) {
-		rc = dp_power_request_gpios(power);
-		if (rc) {
-			pr_err("gpio request failed\n");
-			return rc;
-		}
-
-		dp_power_set_gpio(power, flip);
-	} else {
-		for (i = 0; i < mp->num_gpio; i++) {
-			if (gpio_is_valid(config[i].gpio)) {
-				gpio_set_value(config[i].gpio, 0);
-				gpio_free(config[i].gpio);
-			}
-		}
-	}
-
-	return 0;
-}
-
-static int dp_power_client_init(struct dp_power *dp_power,
-		struct sde_power_handle *phandle)
-{
-	int rc = 0;
-	struct dp_power_private *power;
-	char dp_client_name[DP_CLIENT_NAME_SIZE];
-
-	if (!dp_power) {
-		pr_err("invalid power data\n");
-		return -EINVAL;
-	}
-
-	power = container_of(dp_power, struct dp_power_private, dp_power);
-
-	rc = dp_power_regulator_init(power);
-	if (rc) {
-		pr_err("failed to init regulators\n");
-		goto error_power;
-	}
-
-	rc = dp_power_clk_init(power, true);
-	if (rc) {
-		pr_err("failed to init clocks\n");
-		goto error_clk;
-	}
-
-	power->phandle = phandle;
-	snprintf(dp_client_name, DP_CLIENT_NAME_SIZE, "dp_core_client");
-	power->dp_core_client = sde_power_client_create(phandle,
-			dp_client_name);
-	if (IS_ERR_OR_NULL(power->dp_core_client)) {
-		pr_err("[%s] client creation failed for DP\n", dp_client_name);
-		rc = -EINVAL;
-		goto error_client;
-	}
-	return 0;
-
-error_client:
-	dp_power_clk_init(power, false);
-error_clk:
-	dp_power_regulator_deinit(power);
-error_power:
-	return rc;
-}
-
-static void dp_power_client_deinit(struct dp_power *dp_power)
-{
-	struct dp_power_private *power;
-
-	if (!dp_power) {
-		pr_err("invalid power data\n");
-		return;
-	}
-
-	power = container_of(dp_power, struct dp_power_private, dp_power);
-
-	sde_power_client_destroy(power->phandle, power->dp_core_client);
-	dp_power_clk_init(power, false);
-	dp_power_regulator_deinit(power);
-}
-
-static int dp_power_set_pixel_clk_parent(struct dp_power *dp_power, u32 strm_id)
-{
-	int rc = 0;
-	struct dp_power_private *power;
-
-	if (!dp_power || strm_id >= DP_STREAM_MAX) {
-		pr_err("invalid power data. stream %d\n", strm_id);
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	power = container_of(dp_power, struct dp_power_private, dp_power);
-
-	if (strm_id == DP_STREAM_0) {
-		if (power->pixel_clk_rcg && power->pixel_parent)
-			clk_set_parent(power->pixel_clk_rcg,
-					power->pixel_parent);
-	} else if (strm_id == DP_STREAM_1) {
-		if (power->pixel1_clk_rcg && power->pixel1_parent)
-			clk_set_parent(power->pixel1_clk_rcg,
-					power->pixel1_parent);
-	}
-exit:
-	return rc;
-}
-
-static u64 dp_power_clk_get_rate(struct dp_power *dp_power, char *clk_name)
-{
-	size_t i;
-	enum dp_pm_type j;
-	struct dss_module_power *mp;
-	struct dp_power_private *power;
-	bool clk_found = false;
-	u64 rate = 0;
-
-	if (!clk_name) {
-		pr_err("invalid pointer for clk_name\n");
-		return 0;
-	}
-
-	power = container_of(dp_power, struct dp_power_private, dp_power);
-	mp = &power->phandle->mp;
-	for (i = 0; i < mp->num_clk; i++) {
-		if (!strcmp(mp->clk_config[i].clk_name, clk_name)) {
-			rate = clk_get_rate(mp->clk_config[i].clk);
-			clk_found = true;
-			break;
-		}
-	}
-
-	for (j = DP_CORE_PM; j < DP_MAX_PM && !clk_found; j++) {
-		mp = &power->parser->mp[j];
-		for (i = 0; i < mp->num_clk; i++) {
-			if (!strcmp(mp->clk_config[i].clk_name, clk_name)) {
-				rate = clk_get_rate(mp->clk_config[i].clk);
-				clk_found = true;
-				break;
-			}
-		}
-	}
-
-	return rate;
-}
-
-static int dp_power_init(struct dp_power *dp_power, bool flip)
-{
-	int rc = 0;
-	struct dp_power_private *power;
-
-	if (!dp_power) {
-		pr_err("invalid power data\n");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	power = container_of(dp_power, struct dp_power_private, dp_power);
-
-	rc = dp_power_regulator_ctrl(power, true);
-	if (rc) {
-		pr_err("failed to enable regulators\n");
-		goto exit;
-	}
-
-	rc = dp_power_pinctrl_set(power, true);
-	if (rc) {
-		pr_err("failed to set pinctrl state\n");
-		goto err_pinctrl;
-	}
-
-	rc = dp_power_config_gpios(power, flip, true);
-	if (rc) {
-		pr_err("failed to enable gpios\n");
-		goto err_gpio;
-	}
-
-	rc = sde_power_resource_enable(power->phandle,
-		power->dp_core_client, true);
-	if (rc) {
-		pr_err("Power resource enable failed\n");
-		goto err_sde_power;
-	}
-
-	rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
-	if (rc) {
-		pr_err("failed to enable DP core clocks\n");
-		goto err_clk;
-	}
-
-	return 0;
-
-err_clk:
-	sde_power_resource_enable(power->phandle, power->dp_core_client, false);
-err_sde_power:
-	dp_power_config_gpios(power, flip, false);
-err_gpio:
-	dp_power_pinctrl_set(power, false);
-err_pinctrl:
-	dp_power_regulator_ctrl(power, false);
-exit:
-	return rc;
-}
-
-static int dp_power_deinit(struct dp_power *dp_power)
-{
-	int rc = 0;
-	struct dp_power_private *power;
-
-	if (!dp_power) {
-		pr_err("invalid power data\n");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	power = container_of(dp_power, struct dp_power_private, dp_power);
-
-	if (power->link_clks_on)
-		dp_power_clk_enable(dp_power, DP_LINK_PM, false);
-
-	dp_power_clk_enable(dp_power, DP_CORE_PM, false);
-
-	rc = sde_power_resource_enable(power->phandle,
-			power->dp_core_client, false);
-	if (rc) {
-		pr_err("Power resource disable failed, rc=%d\n", rc);
-		goto exit;
-	}
-	dp_power_config_gpios(power, false, false);
-	dp_power_pinctrl_set(power, false);
-	dp_power_regulator_ctrl(power, false);
-exit:
-	return rc;
-}
-
-struct dp_power *dp_power_get(struct dp_parser *parser)
-{
-	int rc = 0;
-	struct dp_power_private *power;
-	struct dp_power *dp_power;
-
-	if (!parser) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	power = devm_kzalloc(&parser->pdev->dev, sizeof(*power), GFP_KERNEL);
-	if (!power) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	power->parser = parser;
-	power->pdev = parser->pdev;
-
-	dp_power = &power->dp_power;
-
-	dp_power->init = dp_power_init;
-	dp_power->deinit = dp_power_deinit;
-	dp_power->clk_enable = dp_power_clk_enable;
-	dp_power->set_pixel_clk_parent = dp_power_set_pixel_clk_parent;
-	dp_power->clk_get_rate = dp_power_clk_get_rate;
-	dp_power->power_client_init = dp_power_client_init;
-	dp_power->power_client_deinit = dp_power_client_deinit;
-
-	return dp_power;
-error:
-	return ERR_PTR(rc);
-}
-
-void dp_power_put(struct dp_power *dp_power)
-{
-	struct dp_power_private *power = NULL;
-
-	if (!dp_power)
-		return;
-
-	power = container_of(dp_power, struct dp_power_private, dp_power);
-
-	devm_kfree(&power->pdev->dev, power);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h
deleted file mode 100644
index bd58166..0000000
--- a/drivers/gpu/drm/msm/dp/dp_power.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_POWER_H_
-#define _DP_POWER_H_
-
-#include "dp_parser.h"
-#include "sde_power_handle.h"
-
-/**
- * sruct dp_power - DisplayPort's power related data
- *
- * @init: initializes the regulators/core clocks/GPIOs/pinctrl
- * @deinit: turns off the regulators/core clocks/GPIOs/pinctrl
- * @clk_enable: enable/disable the DP clocks
- * @set_pixel_clk_parent: set the parent of DP pixel clock
- * @clk_get_rate: get the current rate for provided clk_name
- */
-struct dp_power {
-	int (*init)(struct dp_power *power, bool flip);
-	int (*deinit)(struct dp_power *power);
-	int (*clk_enable)(struct dp_power *power, enum dp_pm_type pm_type,
-				bool enable);
-	int (*set_pixel_clk_parent)(struct dp_power *power, u32 stream_id);
-	u64 (*clk_get_rate)(struct dp_power *power, char *clk_name);
-	int (*power_client_init)(struct dp_power *power,
-				struct sde_power_handle *phandle);
-	void (*power_client_deinit)(struct dp_power *power);
-};
-
-/**
- * dp_power_get() - configure and get the DisplayPort power module data
- *
- * @parser: instance of parser module
- * return: pointer to allocated power module data
- *
- * This API will configure the DisplayPort's power module and provides
- * methods to be called by the client to configure the power related
- * modueles.
- */
-struct dp_power *dp_power_get(struct dp_parser *parser);
-
-/**
- * dp_power_put() - release the power related resources
- *
- * @power: pointer to the power module's data
- */
-void dp_power_put(struct dp_power *power);
-#endif /* _DP_POWER_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
deleted file mode 100644
index a66e36d..0000000
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ /dev/null
@@ -1,437 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_REG_H_
-#define _DP_REG_H_
-
-/* DP_TX Registers */
-#define DP_HW_VERSION				(0x00000000)
-#define DP_SW_RESET				(0x00000010)
-#define DP_PHY_CTRL				(0x00000014)
-#define DP_CLK_CTRL				(0x00000018)
-#define DP_CLK_ACTIVE				(0x0000001C)
-#define DP_INTR_STATUS				(0x00000020)
-#define DP_INTR_STATUS2				(0x00000024)
-#define DP_INTR_STATUS3				(0x00000028)
-#define DP_INTR_STATUS5				(0x00000034)
-
-#define DP_DP_HPD_CTRL				(0x00000000)
-#define DP_DP_HPD_INT_STATUS			(0x00000004)
-#define DP_DP_HPD_INT_ACK			(0x00000008)
-#define DP_DP_HPD_INT_MASK			(0x0000000C)
-#define DP_DP_HPD_REFTIMER			(0x00000018)
-#define DP_DP_HPD_EVENT_TIME_0			(0x0000001C)
-#define DP_DP_HPD_EVENT_TIME_1			(0x00000020)
-#define DP_AUX_CTRL				(0x00000030)
-#define DP_AUX_DATA				(0x00000034)
-#define DP_AUX_TRANS_CTRL			(0x00000038)
-#define DP_TIMEOUT_COUNT			(0x0000003C)
-#define DP_AUX_LIMITS				(0x00000040)
-#define DP_AUX_STATUS				(0x00000044)
-
-#define DP_DPCD_CP_IRQ				(0x201)
-#define DP_DPCD_RXSTATUS			(0x69493)
-
-#define DP_INTERRUPT_TRANS_NUM			(0x000000A0)
-
-#define DP_MAINLINK_CTRL			(0x00000000)
-#define DP_STATE_CTRL				(0x00000004)
-#define DP_CONFIGURATION_CTRL			(0x00000008)
-#define DP_SOFTWARE_MVID			(0x00000010)
-#define DP_SOFTWARE_NVID			(0x00000018)
-#define DP_TOTAL_HOR_VER			(0x0000001C)
-#define DP_START_HOR_VER_FROM_SYNC		(0x00000020)
-#define DP_HSYNC_VSYNC_WIDTH_POLARITY		(0x00000024)
-#define DP_ACTIVE_HOR_VER			(0x00000028)
-#define DP_MISC1_MISC0				(0x0000002C)
-#define DP_VALID_BOUNDARY			(0x00000030)
-#define DP_VALID_BOUNDARY_2			(0x00000034)
-#define DP_LOGICAL2PHYSICAL_LANE_MAPPING	(0x00000038)
-
-#define DP1_CONFIGURATION_CTRL			(0x00000400)
-#define DP_DP0_TIMESLOT_1_32			(0x00000404)
-#define DP_DP0_TIMESLOT_33_63			(0x00000408)
-#define DP_DP1_TIMESLOT_1_32			(0x0000040C)
-#define DP_DP1_TIMESLOT_33_63			(0x00000410)
-#define DP1_SOFTWARE_MVID			(0x00000414)
-#define DP1_SOFTWARE_NVID			(0x00000418)
-#define DP1_TOTAL_HOR_VER			(0x0000041C)
-#define DP1_START_HOR_VER_FROM_SYNC		(0x00000420)
-#define DP1_HSYNC_VSYNC_WIDTH_POLARITY		(0x00000424)
-#define DP1_ACTIVE_HOR_VER			(0x00000428)
-#define DP1_MISC1_MISC0				(0x0000042C)
-#define DP_DP0_RG				(0x000004F8)
-#define DP_DP1_RG				(0x000004FC)
-
-#define DP_MST_ACT				(0x00000500)
-#define DP_MST_MAINLINK_READY			(0x00000504)
-
-#define DP_MAINLINK_READY			(0x00000040)
-#define DP_MAINLINK_LEVELS			(0x00000044)
-#define DP_TU					(0x0000004C)
-
-#define DP_HBR2_COMPLIANCE_SCRAMBLER_RESET	(0x00000054)
-#define DP_TEST_80BIT_CUSTOM_PATTERN_REG0	(0x000000C0)
-#define DP_TEST_80BIT_CUSTOM_PATTERN_REG1	(0x000000C4)
-#define DP_TEST_80BIT_CUSTOM_PATTERN_REG2	(0x000000C8)
-
-#define MMSS_DP_MISC1_MISC0			(0x0000002C)
-#define MMSS_DP_AUDIO_TIMING_GEN		(0x00000080)
-#define MMSS_DP_AUDIO_TIMING_RBR_32		(0x00000084)
-#define MMSS_DP_AUDIO_TIMING_HBR_32		(0x00000088)
-#define MMSS_DP_AUDIO_TIMING_RBR_44		(0x0000008C)
-#define MMSS_DP_AUDIO_TIMING_HBR_44		(0x00000090)
-#define MMSS_DP_AUDIO_TIMING_RBR_48		(0x00000094)
-#define MMSS_DP_AUDIO_TIMING_HBR_48		(0x00000098)
-
-#define MMSS_DP_PSR_CRC_RG			(0x00000154)
-#define MMSS_DP_PSR_CRC_B			(0x00000158)
-
-#define DP_COMPRESSION_MODE_CTRL		(0x00000180)
-#define DP_PPS_HB_0_3				(0x00000184)
-#define DP_PPS_PB_0_3				(0x00000188)
-#define DP_PPS_PB_4_7				(0x0000018C)
-#define DP_PPS_PB_8_11				(0x00000190)
-#define DP_PPS_PB_12_15				(0x00000194)
-#define DP_PPS_PB_16_19				(0x00000198)
-#define DP_PPS_PB_20_23				(0x0000019C)
-#define DP_PPS_PB_24_27				(0x000001A0)
-#define DP_PPS_PB_28_31				(0x000001A4)
-#define DP_PPS_PPS_0_3				(0x000001A8)
-#define DP_PPS_PPS_4_7				(0x000001AC)
-#define DP_PPS_PPS_8_11				(0x000001B0)
-#define DP_PPS_PPS_12_15			(0x000001B4)
-#define DP_PPS_PPS_16_19			(0x000001B8)
-#define DP_PPS_PPS_20_23			(0x000001BC)
-#define DP_PPS_PPS_24_27			(0x000001C0)
-#define DP_PPS_PPS_28_31			(0x000001C4)
-#define DP_PPS_PPS_32_35			(0x000001C8)
-#define DP_PPS_PPS_36_39			(0x000001CC)
-#define DP_PPS_PPS_40_43			(0x000001D0)
-#define DP_PPS_PPS_44_47			(0x000001D4)
-#define DP_PPS_PPS_48_51			(0x000001D8)
-#define DP_PPS_PPS_52_55			(0x000001DC)
-#define DP_PPS_PPS_56_59			(0x000001E0)
-#define DP_PPS_PPS_60_63			(0x000001E4)
-#define DP_PPS_PPS_64_67			(0x000001E8)
-#define DP_PPS_PPS_68_71			(0x000001EC)
-#define DP_PPS_PPS_72_75			(0x000001F0)
-#define DP_PPS_PPS_76_79			(0x000001F4)
-#define DP_PPS_PPS_80_83			(0x000001F8)
-#define DP_PPS_PPS_84_87			(0x000001FC)
-
-#define MMSS_DP_AUDIO_CFG			(0x00000200)
-#define MMSS_DP_AUDIO_STATUS			(0x00000204)
-#define MMSS_DP_AUDIO_PKT_CTRL			(0x00000208)
-#define MMSS_DP_AUDIO_PKT_CTRL2			(0x0000020C)
-#define MMSS_DP_AUDIO_ACR_CTRL			(0x00000210)
-#define MMSS_DP_AUDIO_CTRL_RESET		(0x00000214)
-
-#define MMSS_DP_SDP_CFG				(0x00000228)
-#define MMSS_DP_SDP_CFG2			(0x0000022C)
-#define MMSS_DP_SDP_CFG3			(0x0000024C)
-#define MMSS_DP_SDP_CFG4			(0x000004EC)
-#define MMSS_DP_AUDIO_TIMESTAMP_0		(0x00000230)
-#define MMSS_DP_AUDIO_TIMESTAMP_1		(0x00000234)
-
-#define MMSS_DP_AUDIO_STREAM_0			(0x00000240)
-#define MMSS_DP_AUDIO_STREAM_1			(0x00000244)
-
-#define MMSS_DP_EXTENSION_0			(0x00000250)
-#define MMSS_DP_EXTENSION_1			(0x00000254)
-#define MMSS_DP_EXTENSION_2			(0x00000258)
-#define MMSS_DP_EXTENSION_3			(0x0000025C)
-#define MMSS_DP_EXTENSION_4			(0x00000260)
-#define MMSS_DP_EXTENSION_5			(0x00000264)
-#define MMSS_DP_EXTENSION_6			(0x00000268)
-#define MMSS_DP_EXTENSION_7			(0x0000026C)
-#define MMSS_DP_EXTENSION_8			(0x00000270)
-#define MMSS_DP_EXTENSION_9			(0x00000274)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_0		(0x00000278)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_1		(0x0000027C)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_2		(0x00000280)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_3		(0x00000284)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_4		(0x00000288)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_5		(0x0000028C)
-#define MMSS_DP_AUDIO_ISRC_0			(0x00000290)
-#define MMSS_DP_AUDIO_ISRC_1			(0x00000294)
-#define MMSS_DP_AUDIO_ISRC_2			(0x00000298)
-#define MMSS_DP_AUDIO_ISRC_3			(0x0000029C)
-#define MMSS_DP_AUDIO_ISRC_4			(0x000002A0)
-#define MMSS_DP_AUDIO_ISRC_5			(0x000002A4)
-#define MMSS_DP_AUDIO_INFOFRAME_0		(0x000002A8)
-#define MMSS_DP_AUDIO_INFOFRAME_1		(0x000002AC)
-#define MMSS_DP_AUDIO_INFOFRAME_2		(0x000002B0)
-
-#define MMSS_DP_FLUSH				(0x000002F8)
-#define MMSS_DP1_FLUSH				(0x000002FC)
-
-#define MMSS_DP_GENERIC0_0			(0x00000300)
-#define MMSS_DP_GENERIC0_1			(0x00000304)
-#define MMSS_DP_GENERIC0_2			(0x00000308)
-#define MMSS_DP_GENERIC0_3			(0x0000030C)
-#define MMSS_DP_GENERIC0_4			(0x00000310)
-#define MMSS_DP_GENERIC0_5			(0x00000314)
-#define MMSS_DP_GENERIC0_6			(0x00000318)
-#define MMSS_DP_GENERIC0_7			(0x0000031C)
-#define MMSS_DP_GENERIC0_8			(0x00000320)
-#define MMSS_DP_GENERIC0_9			(0x00000324)
-#define MMSS_DP_GENERIC1_0			(0x00000328)
-#define MMSS_DP_GENERIC1_1			(0x0000032C)
-#define MMSS_DP_GENERIC1_2			(0x00000330)
-#define MMSS_DP_GENERIC1_3			(0x00000334)
-#define MMSS_DP_GENERIC1_4			(0x00000338)
-#define MMSS_DP_GENERIC1_5			(0x0000033C)
-#define MMSS_DP_GENERIC1_6			(0x00000340)
-#define MMSS_DP_GENERIC1_7			(0x00000344)
-#define MMSS_DP_GENERIC1_8			(0x00000348)
-#define MMSS_DP_GENERIC1_9			(0x0000034C)
-
-#define MMSS_DP1_GENERIC0_0			(0x00000490)
-#define MMSS_DP1_GENERIC0_1			(0x00000494)
-#define MMSS_DP1_GENERIC0_2			(0x00000498)
-#define MMSS_DP1_GENERIC0_3			(0x0000049C)
-#define MMSS_DP1_GENERIC0_4			(0x000004A0)
-#define MMSS_DP1_GENERIC0_5			(0x000004A4)
-#define MMSS_DP1_GENERIC0_6			(0x000004A8)
-#define MMSS_DP1_GENERIC0_7			(0x000004AC)
-#define MMSS_DP1_GENERIC0_8			(0x000004B0)
-#define MMSS_DP1_GENERIC0_9			(0x000004B4)
-#define MMSS_DP1_GENERIC1_0			(0x000004B8)
-#define MMSS_DP1_GENERIC1_1			(0x000004BC)
-#define MMSS_DP1_GENERIC1_2			(0x000004C0)
-#define MMSS_DP1_GENERIC1_3			(0x000004C4)
-#define MMSS_DP1_GENERIC1_4			(0x000004C8)
-#define MMSS_DP1_GENERIC1_5			(0x000004CC)
-#define MMSS_DP1_GENERIC1_6			(0x000004D0)
-#define MMSS_DP1_GENERIC1_7			(0x000004D4)
-#define MMSS_DP1_GENERIC1_8			(0x000004D8)
-#define MMSS_DP1_GENERIC1_9			(0x000004DC)
-
-#define MMSS_DP_GENERIC2_0			(0x000003d8)
-#define MMSS_DP_GENERIC2_1			(0x000003dc)
-#define MMSS_DP_GENERIC2_2			(0x000003e0)
-#define MMSS_DP_GENERIC2_3			(0x000003e4)
-#define MMSS_DP_GENERIC2_4			(0x000003e8)
-#define MMSS_DP_GENERIC2_5			(0x000003ec)
-#define MMSS_DP_GENERIC2_6			(0x000003f0)
-#define MMSS_DP_GENERIC2_7			(0x000003f4)
-#define MMSS_DP_GENERIC2_8			(0x000003f8)
-#define MMSS_DP_GENERIC2_9			(0x000003fc)
-#define MMSS_DP1_GENERIC2_0			(0x00000510)
-#define MMSS_DP1_GENERIC2_1			(0x00000514)
-#define MMSS_DP1_GENERIC2_2			(0x00000518)
-#define MMSS_DP1_GENERIC2_3			(0x0000051c)
-#define MMSS_DP1_GENERIC2_4			(0x00000520)
-#define MMSS_DP1_GENERIC2_5			(0x00000524)
-#define MMSS_DP1_GENERIC2_6			(0x00000528)
-#define MMSS_DP1_GENERIC2_7			(0x0000052C)
-#define MMSS_DP1_GENERIC2_8			(0x00000530)
-#define MMSS_DP1_GENERIC2_9			(0x00000534)
-
-#define MMSS_DP1_SDP_CFG			(0x000004E0)
-#define MMSS_DP1_SDP_CFG2			(0x000004E4)
-#define MMSS_DP1_SDP_CFG3			(0x000004E8)
-#define MMSS_DP1_SDP_CFG4			(0x000004F0)
-
-#define DP1_COMPRESSION_MODE_CTRL		(0x00000560)
-#define DP1_PPS_HB_0_3				(0x00000564)
-#define DP1_PPS_PB_0_3				(0x00000568)
-#define DP1_PPS_PB_4_7				(0x0000056C)
-#define DP1_PPS_PB_8_11				(0x00000570)
-#define DP1_PPS_PB_12_15			(0x00000574)
-#define DP1_PPS_PB_16_19			(0x00000578)
-#define DP1_PPS_PB_20_23			(0x0000057C)
-#define DP1_PPS_PB_24_27			(0x00000580)
-#define DP1_PPS_PB_28_31			(0x00000584)
-#define DP1_PPS_PPS_0_3				(0x00000588)
-#define DP1_PPS_PPS_4_7				(0x0000058C)
-#define DP1_PPS_PPS_8_11			(0x00000590)
-#define DP1_PPS_PPS_12_15			(0x00000594)
-#define DP1_PPS_PPS_16_19			(0x00000598)
-#define DP1_PPS_PPS_20_23			(0x0000059C)
-#define DP1_PPS_PPS_24_27			(0x000005A0)
-#define DP1_PPS_PPS_28_31			(0x000005A4)
-#define DP1_PPS_PPS_32_35			(0x000005A8)
-#define DP1_PPS_PPS_36_39			(0x000005AC)
-#define DP1_PPS_PPS_40_43			(0x000005B0)
-#define DP1_PPS_PPS_44_47			(0x000005B4)
-#define DP1_PPS_PPS_48_51			(0x000005B8)
-#define DP1_PPS_PPS_52_55			(0x000005BC)
-#define DP1_PPS_PPS_56_59			(0x000005C0)
-#define DP1_PPS_PPS_60_63			(0x000005C4)
-#define DP1_PPS_PPS_64_67			(0x000005C8)
-#define DP1_PPS_PPS_68_71			(0x000005CC)
-#define DP1_PPS_PPS_72_75			(0x000005D0)
-#define DP1_PPS_PPS_76_79			(0x000005D4)
-#define DP1_PPS_PPS_80_83			(0x000005D8)
-#define DP1_PPS_PPS_84_87			(0x000005DC)
-
-#define MMSS_DP_VSCEXT_0			(0x000002D0)
-#define MMSS_DP_VSCEXT_1			(0x000002D4)
-#define MMSS_DP_VSCEXT_2			(0x000002D8)
-#define MMSS_DP_VSCEXT_3			(0x000002DC)
-#define MMSS_DP_VSCEXT_4			(0x000002E0)
-#define MMSS_DP_VSCEXT_5			(0x000002E4)
-#define MMSS_DP_VSCEXT_6			(0x000002E8)
-#define MMSS_DP_VSCEXT_7			(0x000002EC)
-#define MMSS_DP_VSCEXT_8			(0x000002F0)
-#define MMSS_DP_VSCEXT_9			(0x000002F4)
-
-#define MMSS_DP1_VSCEXT_0			(0x00000468)
-#define MMSS_DP1_VSCEXT_1			(0x0000046c)
-#define MMSS_DP1_VSCEXT_2			(0x00000470)
-#define MMSS_DP1_VSCEXT_3			(0x00000474)
-#define MMSS_DP1_VSCEXT_4			(0x00000478)
-#define MMSS_DP1_VSCEXT_5			(0x0000047c)
-#define MMSS_DP1_VSCEXT_6			(0x00000480)
-#define MMSS_DP1_VSCEXT_7			(0x00000484)
-#define MMSS_DP1_VSCEXT_8			(0x00000488)
-#define MMSS_DP1_VSCEXT_9			(0x0000048c)
-
-#define MMSS_DP_BIST_ENABLE			(0x00000000)
-#define MMSS_DP_TIMING_ENGINE_EN		(0x00000010)
-#define MMSS_DP_INTF_CONFIG			(0x00000014)
-#define MMSS_DP_INTF_HSYNC_CTL			(0x00000018)
-#define MMSS_DP_INTF_VSYNC_PERIOD_F0		(0x0000001C)
-#define MMSS_DP_INTF_VSYNC_PERIOD_F1		(0x00000020)
-#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0	(0x00000024)
-#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1	(0x00000028)
-#define MMSS_INTF_DISPLAY_V_START_F0		(0x0000002C)
-#define MMSS_INTF_DISPLAY_V_START_F1		(0x00000030)
-#define MMSS_DP_INTF_DISPLAY_V_END_F0		(0x00000034)
-#define MMSS_DP_INTF_DISPLAY_V_END_F1		(0x00000038)
-#define MMSS_DP_INTF_ACTIVE_V_START_F0		(0x0000003C)
-#define MMSS_DP_INTF_ACTIVE_V_START_F1		(0x00000040)
-#define MMSS_DP_INTF_ACTIVE_V_END_F0		(0x00000044)
-#define MMSS_DP_INTF_ACTIVE_V_END_F1		(0x00000048)
-#define MMSS_DP_INTF_DISPLAY_HCTL		(0x0000004C)
-#define MMSS_DP_INTF_ACTIVE_HCTL		(0x00000050)
-#define MMSS_DP_INTF_POLARITY_CTL		(0x00000058)
-#define MMSS_DP_TPG_MAIN_CONTROL		(0x00000060)
-#define MMSS_DP_TPG_VIDEO_CONFIG		(0x00000064)
-#define MMSS_DP_DSC_DTO				(0x0000007C)
-#define MMSS_DP_DSC_DTO_COUNT			(0x00000084)
-#define MMSS_DP_ASYNC_FIFO_CONFIG		(0x00000088)
-
-#define MMSS_DP1_BIST_ENABLE			(0x00000000)
-#define MMSS_DP1_TIMING_ENGINE_EN		(0x00000010)
-#define MMSS_DP1_INTF_CONFIG			(0x00000014)
-#define MMSS_DP1_INTF_HSYNC_CTL			(0x00000018)
-#define MMSS_DP1_INTF_VSYNC_PERIOD_F0		(0x0000001C)
-#define MMSS_DP1_INTF_VSYNC_PERIOD_F1		(0x00000020)
-#define MMSS_DP1_INTF_VSYNC_PULSE_WIDTH_F0	(0x00000024)
-#define MMSS_DP1_INTF_VSYNC_PULSE_WIDTH_F1	(0x00000028)
-#define MMSS_DP1_INTF_DISPLAY_V_START_F0	(0x0000002C)
-#define MMSS_DP1_INTF_DISPLAY_V_START_F1	(0x00000030)
-#define MMSS_DP1_INTF_DISPLAY_V_END_F0		(0x00000034)
-#define MMSS_DP1_INTF_DISPLAY_V_END_F1		(0x00000038)
-#define MMSS_DP1_INTF_ACTIVE_V_START_F0		(0x0000003C)
-#define MMSS_DP1_INTF_ACTIVE_V_START_F1		(0x00000040)
-#define MMSS_DP1_INTF_ACTIVE_V_END_F0		(0x00000044)
-#define MMSS_DP1_INTF_ACTIVE_V_END_F1		(0x00000048)
-#define MMSS_DP1_INTF_DISPLAY_HCTL		(0x0000004C)
-#define MMSS_DP1_INTF_ACTIVE_HCTL		(0x00000050)
-#define MMSS_DP1_INTF_POLARITY_CTL		(0x00000058)
-#define MMSS_DP1_TPG_MAIN_CONTROL		(0x00000060)
-#define MMSS_DP1_TPG_VIDEO_CONFIG		(0x00000064)
-#define MMSS_DP1_DSC_DTO			(0x0000007C)
-#define MMSS_DP1_DSC_DTO_COUNT			(0x00000084)
-#define MMSS_DP1_ASYNC_FIFO_CONFIG		(0x00000088)
-
-/*DP PHY Register offsets */
-#define DP_PHY_REVISION_ID0                     (0x00000000)
-#define DP_PHY_REVISION_ID1                     (0x00000004)
-#define DP_PHY_REVISION_ID2                     (0x00000008)
-#define DP_PHY_REVISION_ID3                     (0x0000000C)
-
-#define DP_PHY_CFG                              (0x00000010)
-#define DP_PHY_PD_CTL                           (0x00000018)
-#define DP_PHY_MODE                             (0x0000001C)
-
-#define DP_PHY_AUX_CFG0                         (0x00000020)
-#define DP_PHY_AUX_CFG1                         (0x00000024)
-#define DP_PHY_AUX_CFG2                         (0x00000028)
-#define DP_PHY_AUX_CFG3                         (0x0000002C)
-#define DP_PHY_AUX_CFG4                         (0x00000030)
-#define DP_PHY_AUX_CFG5                         (0x00000034)
-#define DP_PHY_AUX_CFG6                         (0x00000038)
-#define DP_PHY_AUX_CFG7                         (0x0000003C)
-#define DP_PHY_AUX_CFG8                         (0x00000040)
-#define DP_PHY_AUX_CFG9                         (0x00000044)
-#define DP_PHY_AUX_INTERRUPT_MASK               (0x00000048)
-#define DP_PHY_AUX_INTERRUPT_CLEAR              (0x0000004C)
-#define DP_PHY_AUX_INTERRUPT_STATUS             (0x000000BC)
-#define DP_PHY_AUX_INTERRUPT_MASK_V200          (0x00000048)
-#define DP_PHY_AUX_INTERRUPT_CLEAR_V200         (0x0000004C)
-#define DP_PHY_AUX_INTERRUPT_STATUS_V200        (0x000000BC)
-
-#define DP_PHY_SPARE0				(0x00AC)
-
-#define TXn_TX_EMP_POST1_LVL			(0x000C)
-#define TXn_TX_DRV_LVL				(0x001C)
-#define TXn_TX_POL_INV				(0x0064)
-
-#define DP_PHY_AUX_INTERRUPT_MASK_V420		(0x0054)
-#define DP_PHY_AUX_INTERRUPT_CLEAR_V420		(0x0058)
-#define DP_PHY_AUX_INTERRUPT_STATUS_V420	(0x00D8)
-#define DP_PHY_SPARE0_V420			(0x00C8)
-#define TXn_TX_DRV_LVL_V420			(0x0014)
-#define TXn_TX_POL_INV_V420			(0x005C)
-
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		(0x004)
-
-/* DP MMSS_CC registers */
-#define MMSS_DP_LINK_CMD_RCGR			(0x0138)
-#define MMSS_DP_LINK_CFG_RCGR			(0x013C)
-#define MMSS_DP_PIXEL_M				(0x01B4)
-#define MMSS_DP_PIXEL_N				(0x01B8)
-#define MMSS_DP_PIXEL1_M			(0x01CC)
-#define MMSS_DP_PIXEL1_N			(0x01D0)
-#define MMSS_DP_PIXEL_M_V200			(0x0130)
-#define MMSS_DP_PIXEL_N_V200			(0x0134)
-#define MMSS_DP_PIXEL1_M_V200			(0x0148)
-#define MMSS_DP_PIXEL1_N_V200			(0x014C)
-#define MMSS_DP_PIXEL_M_V420			(0x01B4)
-#define MMSS_DP_PIXEL_N_V420			(0x01B8)
-#define MMSS_DP_PIXEL1_M_V420			(0x01CC)
-#define MMSS_DP_PIXEL1_N_V420			(0x01D0)
-
-/* DP HDCP 1.3 registers */
-#define DP_HDCP_CTRL                                   (0x0A0)
-#define DP_HDCP_STATUS                                 (0x0A4)
-#define DP_HDCP_SW_UPPER_AKSV                          (0x098)
-#define DP_HDCP_SW_LOWER_AKSV                          (0x09C)
-#define DP_HDCP_ENTROPY_CTRL0                          (0x350)
-#define DP_HDCP_ENTROPY_CTRL1                          (0x35C)
-#define DP_HDCP_SHA_STATUS                             (0x0C8)
-#define DP_HDCP_RCVPORT_DATA2_0                        (0x0B0)
-#define DP_HDCP_RCVPORT_DATA3                          (0x0A4)
-#define DP_HDCP_RCVPORT_DATA4                          (0x0A8)
-#define DP_HDCP_RCVPORT_DATA5                          (0x0C0)
-#define DP_HDCP_RCVPORT_DATA6                          (0x0C4)
-
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL           (0x024)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA           (0x028)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0      (0x004)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1      (0x008)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7      (0x00C)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8      (0x010)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9      (0x014)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10     (0x018)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11     (0x01C)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12     (0x020)
-
-/* USB3 DP COM registers */
-#define USB3_DP_COM_RESET_OVRD_CTRL (0x1C)
-#define USB3_DP_COM_PHY_MODE_CTRL   (0x00)
-#define USB3_DP_COM_SW_RESET        (0x04)
-#define USB3_DP_COM_TYPEC_CTRL      (0x10)
-#define USB3_DP_COM_SWI_CTRL        (0x0c)
-#define USB3_DP_COM_POWER_DOWN_CTRL (0x08)
-
-
-
-#endif /* _DP_REG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.c b/drivers/gpu/drm/msm/dp/dp_usbpd.c
deleted file mode 100644
index 7b5a480..0000000
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.c
+++ /dev/null
@@ -1,563 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
-
-#include <linux/usb/usbpd.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-
-#include "dp_usbpd.h"
-
-/* DP specific VDM commands */
-#define DP_USBPD_VDM_STATUS	0x10
-#define DP_USBPD_VDM_CONFIGURE	0x11
-
-/* USBPD-TypeC specific Macros */
-#define VDM_VERSION		0x0
-#define USB_C_DP_SID		0xFF01
-
-enum dp_usbpd_pin_assignment {
-	DP_USBPD_PIN_A,
-	DP_USBPD_PIN_B,
-	DP_USBPD_PIN_C,
-	DP_USBPD_PIN_D,
-	DP_USBPD_PIN_E,
-	DP_USBPD_PIN_F,
-	DP_USBPD_PIN_MAX,
-};
-
-enum dp_usbpd_events {
-	DP_USBPD_EVT_DISCOVER,
-	DP_USBPD_EVT_ENTER,
-	DP_USBPD_EVT_STATUS,
-	DP_USBPD_EVT_CONFIGURE,
-	DP_USBPD_EVT_CC_PIN_POLARITY,
-	DP_USBPD_EVT_EXIT,
-	DP_USBPD_EVT_ATTENTION,
-};
-
-enum dp_usbpd_alt_mode {
-	DP_USBPD_ALT_MODE_NONE	    = 0,
-	DP_USBPD_ALT_MODE_INIT	    = BIT(0),
-	DP_USBPD_ALT_MODE_DISCOVER  = BIT(1),
-	DP_USBPD_ALT_MODE_ENTER	    = BIT(2),
-	DP_USBPD_ALT_MODE_STATUS    = BIT(3),
-	DP_USBPD_ALT_MODE_CONFIGURE = BIT(4),
-};
-
-struct dp_usbpd_capabilities {
-	enum dp_usbpd_port port;
-	bool receptacle_state;
-	u8 ulink_pin_config;
-	u8 dlink_pin_config;
-};
-
-struct dp_usbpd_private {
-	bool forced_disconnect;
-	u32 vdo;
-	struct device *dev;
-	struct usbpd *pd;
-	struct usbpd_svid_handler svid_handler;
-	struct dp_hpd_cb *dp_cb;
-	struct dp_usbpd_capabilities cap;
-	struct dp_usbpd dp_usbpd;
-	enum dp_usbpd_alt_mode alt_mode;
-	u32 dp_usbpd_config;
-};
-
-static const char *dp_usbpd_pin_name(u8 pin)
-{
-	switch (pin) {
-	case DP_USBPD_PIN_A: return "DP_USBPD_PIN_ASSIGNMENT_A";
-	case DP_USBPD_PIN_B: return "DP_USBPD_PIN_ASSIGNMENT_B";
-	case DP_USBPD_PIN_C: return "DP_USBPD_PIN_ASSIGNMENT_C";
-	case DP_USBPD_PIN_D: return "DP_USBPD_PIN_ASSIGNMENT_D";
-	case DP_USBPD_PIN_E: return "DP_USBPD_PIN_ASSIGNMENT_E";
-	case DP_USBPD_PIN_F: return "DP_USBPD_PIN_ASSIGNMENT_F";
-	default: return "UNKNOWN";
-	}
-}
-
-static const char *dp_usbpd_port_name(enum dp_usbpd_port port)
-{
-	switch (port) {
-	case DP_USBPD_PORT_NONE: return "DP_USBPD_PORT_NONE";
-	case DP_USBPD_PORT_UFP_D: return "DP_USBPD_PORT_UFP_D";
-	case DP_USBPD_PORT_DFP_D: return "DP_USBPD_PORT_DFP_D";
-	case DP_USBPD_PORT_D_UFP_D: return "DP_USBPD_PORT_D_UFP_D";
-	default: return "DP_USBPD_PORT_NONE";
-	}
-}
-
-static const char *dp_usbpd_cmd_name(u8 cmd)
-{
-	switch (cmd) {
-	case USBPD_SVDM_DISCOVER_MODES: return "USBPD_SVDM_DISCOVER_MODES";
-	case USBPD_SVDM_ENTER_MODE: return "USBPD_SVDM_ENTER_MODE";
-	case USBPD_SVDM_ATTENTION: return "USBPD_SVDM_ATTENTION";
-	case DP_USBPD_VDM_STATUS: return "DP_USBPD_VDM_STATUS";
-	case DP_USBPD_VDM_CONFIGURE: return "DP_USBPD_VDM_CONFIGURE";
-	default: return "DP_USBPD_VDM_ERROR";
-	}
-}
-
-static void dp_usbpd_init_port(enum dp_usbpd_port *port, u32 in_port)
-{
-	switch (in_port) {
-	case 0:
-		*port = DP_USBPD_PORT_NONE;
-		break;
-	case 1:
-		*port = DP_USBPD_PORT_UFP_D;
-		break;
-	case 2:
-		*port = DP_USBPD_PORT_DFP_D;
-		break;
-	case 3:
-		*port = DP_USBPD_PORT_D_UFP_D;
-		break;
-	default:
-		*port = DP_USBPD_PORT_NONE;
-	}
-	pr_debug("port:%s\n", dp_usbpd_port_name(*port));
-}
-
-static void dp_usbpd_get_capabilities(struct dp_usbpd_private *pd)
-{
-	struct dp_usbpd_capabilities *cap = &pd->cap;
-	u32 buf = pd->vdo;
-	int port = buf & 0x3;
-
-	cap->receptacle_state = (buf & BIT(6)) ? true : false;
-	cap->dlink_pin_config = (buf >> 8) & 0xff;
-	cap->ulink_pin_config = (buf >> 16) & 0xff;
-
-	dp_usbpd_init_port(&cap->port, port);
-}
-
-static void dp_usbpd_get_status(struct dp_usbpd_private *pd)
-{
-	struct dp_usbpd *status = &pd->dp_usbpd;
-	u32 buf = pd->vdo;
-	int port = buf & 0x3;
-
-	status->low_pow_st     = (buf & BIT(2)) ? true : false;
-	status->adaptor_dp_en  = (buf & BIT(3)) ? true : false;
-	status->base.multi_func = (buf & BIT(4)) ? true : false;
-	status->usb_config_req = (buf & BIT(5)) ? true : false;
-	status->exit_dp_mode   = (buf & BIT(6)) ? true : false;
-	status->base.hpd_high  = (buf & BIT(7)) ? true : false;
-	status->base.hpd_irq   = (buf & BIT(8)) ? true : false;
-
-	pr_debug("low_pow_st = %d, adaptor_dp_en = %d, multi_func = %d\n",
-			status->low_pow_st, status->adaptor_dp_en,
-			status->base.multi_func);
-	pr_debug("usb_config_req = %d, exit_dp_mode = %d, hpd_high =%d\n",
-			status->usb_config_req,
-			status->exit_dp_mode, status->base.hpd_high);
-	pr_debug("hpd_irq = %d\n", status->base.hpd_irq);
-
-	dp_usbpd_init_port(&status->port, port);
-}
-
-static u32 dp_usbpd_gen_config_pkt(struct dp_usbpd_private *pd)
-{
-	u8 pin_cfg, pin;
-	u32 config = 0;
-	const u32 ufp_d_config = 0x2, dp_ver = 0x1;
-
-	if (pd->cap.receptacle_state)
-		pin_cfg = pd->cap.ulink_pin_config;
-	else
-		pin_cfg = pd->cap.dlink_pin_config;
-
-	for (pin = DP_USBPD_PIN_A; pin < DP_USBPD_PIN_MAX; pin++) {
-		if (pin_cfg & BIT(pin)) {
-			if (pd->dp_usbpd.base.multi_func) {
-				if (pin == DP_USBPD_PIN_D)
-					break;
-			} else {
-				break;
-			}
-		}
-	}
-
-	if (pin == DP_USBPD_PIN_MAX)
-		pin = DP_USBPD_PIN_C;
-
-	pr_debug("pin assignment: %s\n", dp_usbpd_pin_name(pin));
-
-	config |= BIT(pin) << 8;
-
-	config |= (dp_ver << 2);
-	config |= ufp_d_config;
-
-	pr_debug("config = 0x%x\n", config);
-	return config;
-}
-
-static void dp_usbpd_send_event(struct dp_usbpd_private *pd,
-		enum dp_usbpd_events event)
-{
-	u32 config;
-
-	switch (event) {
-	case DP_USBPD_EVT_DISCOVER:
-		usbpd_send_svdm(pd->pd, USB_C_DP_SID,
-			USBPD_SVDM_DISCOVER_MODES,
-			SVDM_CMD_TYPE_INITIATOR, 0x0, 0x0, 0x0);
-		break;
-	case DP_USBPD_EVT_ENTER:
-		usbpd_send_svdm(pd->pd, USB_C_DP_SID,
-			USBPD_SVDM_ENTER_MODE,
-			SVDM_CMD_TYPE_INITIATOR, 0x1, 0x0, 0x0);
-		break;
-	case DP_USBPD_EVT_EXIT:
-		usbpd_send_svdm(pd->pd, USB_C_DP_SID,
-			USBPD_SVDM_EXIT_MODE,
-			SVDM_CMD_TYPE_INITIATOR, 0x1, 0x0, 0x0);
-		break;
-	case DP_USBPD_EVT_STATUS:
-		config = 0x1; /* DFP_D connected */
-		usbpd_send_svdm(pd->pd, USB_C_DP_SID, DP_USBPD_VDM_STATUS,
-			SVDM_CMD_TYPE_INITIATOR, 0x1, &config, 0x1);
-		break;
-	case DP_USBPD_EVT_CONFIGURE:
-		config = dp_usbpd_gen_config_pkt(pd);
-		usbpd_send_svdm(pd->pd, USB_C_DP_SID, DP_USBPD_VDM_CONFIGURE,
-			SVDM_CMD_TYPE_INITIATOR, 0x1, &config, 0x1);
-		break;
-	default:
-		pr_err("unknown event:%d\n", event);
-	}
-}
-
-static void dp_usbpd_connect_cb(struct usbpd_svid_handler *hdlr)
-{
-	struct dp_usbpd_private *pd;
-
-	pd = container_of(hdlr, struct dp_usbpd_private, svid_handler);
-	if (!pd) {
-		pr_err("get_usbpd phandle failed\n");
-		return;
-	}
-
-	pr_debug("\n");
-	dp_usbpd_send_event(pd, DP_USBPD_EVT_DISCOVER);
-}
-
-static void dp_usbpd_disconnect_cb(struct usbpd_svid_handler *hdlr)
-{
-	struct dp_usbpd_private *pd;
-
-	pd = container_of(hdlr, struct dp_usbpd_private, svid_handler);
-	if (!pd) {
-		pr_err("get_usbpd phandle failed\n");
-		return;
-	}
-
-	pd->alt_mode = DP_USBPD_ALT_MODE_NONE;
-	pd->dp_usbpd.base.alt_mode_cfg_done = false;
-	pr_debug("\n");
-
-	if (pd->dp_cb && pd->dp_cb->disconnect)
-		pd->dp_cb->disconnect(pd->dev);
-}
-
-static int dp_usbpd_validate_callback(u8 cmd,
-	enum usbpd_svdm_cmd_type cmd_type, int num_vdos)
-{
-	int ret = 0;
-
-	if (cmd_type == SVDM_CMD_TYPE_RESP_NAK) {
-		pr_err("error: NACK\n");
-		ret = -EINVAL;
-		goto end;
-	}
-
-	if (cmd_type == SVDM_CMD_TYPE_RESP_BUSY) {
-		pr_err("error: BUSY\n");
-		ret = -EBUSY;
-		goto end;
-	}
-
-	if (cmd == USBPD_SVDM_ATTENTION) {
-		if (cmd_type != SVDM_CMD_TYPE_INITIATOR) {
-			pr_err("error: invalid cmd type for attention\n");
-			ret = -EINVAL;
-			goto end;
-		}
-
-		if (!num_vdos) {
-			pr_err("error: no vdo provided\n");
-			ret = -EINVAL;
-			goto end;
-		}
-	} else {
-		if (cmd_type != SVDM_CMD_TYPE_RESP_ACK) {
-			pr_err("error: invalid cmd type\n");
-			ret = -EINVAL;
-		}
-	}
-end:
-	return ret;
-}
-
-
-static int dp_usbpd_get_ss_lanes(struct dp_usbpd_private *pd)
-{
-	int rc = 0;
-	int timeout = 250;
-
-	/*
-	 * By default, USB reserves two lanes for Super Speed.
-	 * Which means DP has remaining two lanes to operate on.
-	 * If multi-function is not supported, request USB to
-	 * release the Super Speed lanes so that DP can use
-	 * all four lanes in case DPCD indicates support for
-	 * four lanes.
-	 */
-	if (!pd->dp_usbpd.base.multi_func) {
-		while (timeout) {
-			rc = pd->svid_handler.request_usb_ss_lane(
-					pd->pd, &pd->svid_handler);
-			if (rc != -EBUSY)
-				break;
-
-			pr_warn("USB busy, retry\n");
-
-			/* wait for hw recommended delay for usb */
-			msleep(20);
-			timeout--;
-		}
-	}
-
-	return rc;
-}
-
-static void dp_usbpd_response_cb(struct usbpd_svid_handler *hdlr, u8 cmd,
-				enum usbpd_svdm_cmd_type cmd_type,
-				const u32 *vdos, int num_vdos)
-{
-	struct dp_usbpd_private *pd;
-	int rc = 0;
-
-	pd = container_of(hdlr, struct dp_usbpd_private, svid_handler);
-
-	pr_debug("callback -> cmd: %s, *vdos = 0x%x, num_vdos = %d\n",
-				dp_usbpd_cmd_name(cmd), *vdos, num_vdos);
-
-	if (dp_usbpd_validate_callback(cmd, cmd_type, num_vdos)) {
-		pr_debug("invalid callback received\n");
-		return;
-	}
-
-	switch (cmd) {
-	case USBPD_SVDM_DISCOVER_MODES:
-		pd->vdo = *vdos;
-		dp_usbpd_get_capabilities(pd);
-
-		pd->alt_mode |= DP_USBPD_ALT_MODE_DISCOVER;
-
-		if (pd->cap.port & BIT(0))
-			dp_usbpd_send_event(pd, DP_USBPD_EVT_ENTER);
-		break;
-	case USBPD_SVDM_ENTER_MODE:
-		pd->alt_mode |= DP_USBPD_ALT_MODE_ENTER;
-
-		dp_usbpd_send_event(pd, DP_USBPD_EVT_STATUS);
-		break;
-	case USBPD_SVDM_ATTENTION:
-		if (pd->forced_disconnect)
-			break;
-
-		pd->vdo = *vdos;
-		dp_usbpd_get_status(pd);
-
-		if (!pd->dp_usbpd.base.alt_mode_cfg_done) {
-			if (pd->dp_usbpd.port & BIT(1))
-				dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE);
-			break;
-		}
-
-		if (pd->dp_cb && pd->dp_cb->attention)
-			pd->dp_cb->attention(pd->dev);
-
-		break;
-	case DP_USBPD_VDM_STATUS:
-		pd->vdo = *vdos;
-		dp_usbpd_get_status(pd);
-
-		if (!(pd->alt_mode & DP_USBPD_ALT_MODE_CONFIGURE)) {
-			pd->alt_mode |= DP_USBPD_ALT_MODE_STATUS;
-
-			if (pd->dp_usbpd.port & BIT(1))
-				dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE);
-		}
-		break;
-	case DP_USBPD_VDM_CONFIGURE:
-		pd->alt_mode |= DP_USBPD_ALT_MODE_CONFIGURE;
-		pd->dp_usbpd.base.alt_mode_cfg_done = true;
-		dp_usbpd_get_status(pd);
-
-		pd->dp_usbpd.base.orientation =
-			usbpd_get_plug_orientation(pd->pd);
-
-		rc = dp_usbpd_get_ss_lanes(pd);
-		if (rc) {
-			pr_err("failed to get SuperSpeed lanes\n");
-			break;
-		}
-
-		if (pd->dp_cb && pd->dp_cb->configure)
-			pd->dp_cb->configure(pd->dev);
-		break;
-	default:
-		pr_err("unknown cmd: %d\n", cmd);
-		break;
-	}
-}
-
-static int dp_usbpd_simulate_connect(struct dp_hpd *dp_hpd, bool hpd)
-{
-	int rc = 0;
-	struct dp_usbpd *dp_usbpd;
-	struct dp_usbpd_private *pd;
-
-	if (!dp_hpd) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base);
-	pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
-
-	dp_usbpd->base.hpd_high = hpd;
-	pd->forced_disconnect = !hpd;
-	pd->dp_usbpd.base.alt_mode_cfg_done = hpd;
-
-	pr_debug("hpd_high=%d, forced_disconnect=%d, orientation=%d\n",
-			dp_usbpd->base.hpd_high, pd->forced_disconnect,
-			pd->dp_usbpd.base.orientation);
-	if (hpd)
-		pd->dp_cb->configure(pd->dev);
-	else
-		pd->dp_cb->disconnect(pd->dev);
-
-error:
-	return rc;
-}
-
-static int dp_usbpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo)
-{
-	int rc = 0;
-	struct dp_usbpd *dp_usbpd;
-	struct dp_usbpd_private *pd;
-
-	dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base);
-	if (!dp_usbpd) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
-
-	pd->vdo = vdo;
-	dp_usbpd_get_status(pd);
-
-	if (pd->dp_cb && pd->dp_cb->attention)
-		pd->dp_cb->attention(pd->dev);
-error:
-	return rc;
-}
-
-int dp_usbpd_register(struct dp_hpd *dp_hpd)
-{
-	struct dp_usbpd *dp_usbpd;
-	struct dp_usbpd_private *usbpd;
-	int rc = 0;
-
-	if (!dp_hpd)
-		return -EINVAL;
-
-	dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base);
-
-	usbpd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
-
-	rc = usbpd_register_svid(usbpd->pd, &usbpd->svid_handler);
-	if (rc)
-		pr_err("pd registration failed\n");
-
-	return rc;
-}
-
-struct dp_hpd *dp_usbpd_get(struct device *dev, struct dp_hpd_cb *cb)
-{
-	int rc = 0;
-	const char *pd_phandle = "qcom,dp-usbpd-detection";
-	struct usbpd *pd = NULL;
-	struct dp_usbpd_private *usbpd;
-	struct dp_usbpd *dp_usbpd;
-	struct usbpd_svid_handler svid_handler = {
-		.svid		= USB_C_DP_SID,
-		.vdm_received	= NULL,
-		.connect	= &dp_usbpd_connect_cb,
-		.svdm_received	= &dp_usbpd_response_cb,
-		.disconnect	= &dp_usbpd_disconnect_cb,
-	};
-
-	if (!cb) {
-		pr_err("invalid cb data\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	pd = devm_usbpd_get_by_phandle(dev, pd_phandle);
-	if (IS_ERR(pd)) {
-		pr_err("usbpd phandle failed (%ld)\n", PTR_ERR(pd));
-		rc = PTR_ERR(pd);
-		goto error;
-	}
-
-	usbpd = devm_kzalloc(dev, sizeof(*usbpd), GFP_KERNEL);
-	if (!usbpd) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	usbpd->dev = dev;
-	usbpd->pd = pd;
-	usbpd->svid_handler = svid_handler;
-	usbpd->dp_cb = cb;
-
-	dp_usbpd = &usbpd->dp_usbpd;
-	dp_usbpd->base.simulate_connect = dp_usbpd_simulate_connect;
-	dp_usbpd->base.simulate_attention = dp_usbpd_simulate_attention;
-	dp_usbpd->base.register_hpd = dp_usbpd_register;
-
-	return &dp_usbpd->base;
-error:
-	return ERR_PTR(rc);
-}
-
-void dp_usbpd_put(struct dp_hpd *dp_hpd)
-{
-	struct dp_usbpd *dp_usbpd;
-	struct dp_usbpd_private *usbpd;
-
-	dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base);
-	if (!dp_usbpd)
-		return;
-
-	usbpd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
-
-	usbpd_unregister_svid(usbpd->pd, &usbpd->svid_handler);
-
-	devm_kfree(usbpd->dev, usbpd);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.h b/drivers/gpu/drm/msm/dp/dp_usbpd.h
deleted file mode 100644
index 899ac4c..0000000
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_USBPD_H_
-#define _DP_USBPD_H_
-
-#include <linux/types.h>
-#include "dp_hpd.h"
-
-struct device;
-
-/**
- * enum dp_usbpd_port - usb/dp port type
- * @DP_USBPD_PORT_NONE: port not configured
- * @DP_USBPD_PORT_UFP_D: Upstream Facing Port - DisplayPort
- * @DP_USBPD_PORT_DFP_D: Downstream Facing Port - DisplayPort
- * @DP_USBPD_PORT_D_UFP_D: Both UFP & DFP - DisplayPort
- */
-
-enum dp_usbpd_port {
-	DP_USBPD_PORT_NONE,
-	DP_USBPD_PORT_UFP_D,
-	DP_USBPD_PORT_DFP_D,
-	DP_USBPD_PORT_D_UFP_D,
-};
-
-/**
- * struct dp_usbpd - DisplayPort status
- *
- * @port: port configured
- * @low_pow_st: low power state
- * @adaptor_dp_en: adaptor functionality enabled
- * @usb_config_req: request to switch to usb
- * @exit_dp_mode: request exit from displayport mode
- * @debug_en: bool to specify debug mode
- */
-struct dp_usbpd {
-	struct dp_hpd base;
-	enum dp_usbpd_port port;
-	bool low_pow_st;
-	bool adaptor_dp_en;
-	bool usb_config_req;
-	bool exit_dp_mode;
-	bool debug_en;
-};
-
-/**
- * dp_usbpd_get() - setup usbpd module
- *
- * @dev: device instance of the caller
- * @cb: struct containing callback function pointers.
- *
- * This function allows the client to initialize the usbpd
- * module. The module will communicate with usb driver and
- * handles the power delivery (PD) communication with the
- * sink/usb device. This module will notify the client using
- * the callback functions about the connection and status.
- */
-struct dp_hpd *dp_usbpd_get(struct device *dev, struct dp_hpd_cb *cb);
-
-void dp_usbpd_put(struct dp_hpd *pd);
-#endif /* _DP_USBPD_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
deleted file mode 100644
index c19372c..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ /dev/null
@@ -1,298 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "msm-dsi-catalog:[%s] " fmt, __func__
-#include <linux/errno.h>
-
-#include "dsi_catalog.h"
-
-/**
- * dsi_catalog_cmn_init() - catalog init for dsi controller v1.4
- */
-static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
-		enum dsi_ctrl_version version)
-{
-	/* common functions */
-	ctrl->ops.host_setup             = dsi_ctrl_hw_cmn_host_setup;
-	ctrl->ops.video_engine_en        = dsi_ctrl_hw_cmn_video_engine_en;
-	ctrl->ops.video_engine_setup     = dsi_ctrl_hw_cmn_video_engine_setup;
-	ctrl->ops.set_video_timing       = dsi_ctrl_hw_cmn_set_video_timing;
-	ctrl->ops.set_timing_db          = dsi_ctrl_hw_cmn_set_timing_db;
-	ctrl->ops.cmd_engine_setup       = dsi_ctrl_hw_cmn_cmd_engine_setup;
-	ctrl->ops.setup_cmd_stream       = dsi_ctrl_hw_cmn_setup_cmd_stream;
-	ctrl->ops.ctrl_en                = dsi_ctrl_hw_cmn_ctrl_en;
-	ctrl->ops.cmd_engine_en          = dsi_ctrl_hw_cmn_cmd_engine_en;
-	ctrl->ops.phy_sw_reset           = dsi_ctrl_hw_cmn_phy_sw_reset;
-	ctrl->ops.soft_reset             = dsi_ctrl_hw_cmn_soft_reset;
-	ctrl->ops.kickoff_command        = dsi_ctrl_hw_cmn_kickoff_command;
-	ctrl->ops.kickoff_fifo_command   = dsi_ctrl_hw_cmn_kickoff_fifo_command;
-	ctrl->ops.reset_cmd_fifo         = dsi_ctrl_hw_cmn_reset_cmd_fifo;
-	ctrl->ops.trigger_command_dma    = dsi_ctrl_hw_cmn_trigger_command_dma;
-	ctrl->ops.get_interrupt_status   = dsi_ctrl_hw_cmn_get_interrupt_status;
-	ctrl->ops.get_error_status       = dsi_ctrl_hw_cmn_get_error_status;
-	ctrl->ops.clear_error_status     = dsi_ctrl_hw_cmn_clear_error_status;
-	ctrl->ops.clear_interrupt_status =
-		dsi_ctrl_hw_cmn_clear_interrupt_status;
-	ctrl->ops.enable_status_interrupts =
-		dsi_ctrl_hw_cmn_enable_status_interrupts;
-	ctrl->ops.enable_error_interrupts =
-		dsi_ctrl_hw_cmn_enable_error_interrupts;
-	ctrl->ops.video_test_pattern_setup =
-		dsi_ctrl_hw_cmn_video_test_pattern_setup;
-	ctrl->ops.cmd_test_pattern_setup =
-		dsi_ctrl_hw_cmn_cmd_test_pattern_setup;
-	ctrl->ops.test_pattern_enable    = dsi_ctrl_hw_cmn_test_pattern_enable;
-	ctrl->ops.trigger_cmd_test_pattern =
-		dsi_ctrl_hw_cmn_trigger_cmd_test_pattern;
-	ctrl->ops.clear_phy0_ln_err = dsi_ctrl_hw_dln0_phy_err;
-	ctrl->ops.phy_reset_config = dsi_ctrl_hw_cmn_phy_reset_config;
-	ctrl->ops.setup_misr = dsi_ctrl_hw_cmn_setup_misr;
-	ctrl->ops.collect_misr = dsi_ctrl_hw_cmn_collect_misr;
-	ctrl->ops.debug_bus = dsi_ctrl_hw_cmn_debug_bus;
-	ctrl->ops.get_cmd_read_data = dsi_ctrl_hw_cmn_get_cmd_read_data;
-	ctrl->ops.clear_rdbk_register = dsi_ctrl_hw_cmn_clear_rdbk_reg;
-	ctrl->ops.ctrl_reset = dsi_ctrl_hw_cmn_ctrl_reset;
-	ctrl->ops.mask_error_intr = dsi_ctrl_hw_cmn_mask_error_intr;
-	ctrl->ops.error_intr_ctrl = dsi_ctrl_hw_cmn_error_intr_ctrl;
-	ctrl->ops.get_error_mask = dsi_ctrl_hw_cmn_get_error_mask;
-	ctrl->ops.get_hw_version = dsi_ctrl_hw_cmn_get_hw_version;
-	ctrl->ops.wait_for_cmd_mode_mdp_idle =
-		dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle;
-	ctrl->ops.setup_avr = dsi_ctrl_hw_cmn_setup_avr;
-	ctrl->ops.set_continuous_clk = dsi_ctrl_hw_cmn_set_continuous_clk;
-
-	switch (version) {
-	case DSI_CTRL_VERSION_1_4:
-		ctrl->ops.setup_lane_map = dsi_ctrl_hw_14_setup_lane_map;
-		ctrl->ops.ulps_ops.ulps_request = dsi_ctrl_hw_cmn_ulps_request;
-		ctrl->ops.ulps_ops.ulps_exit = dsi_ctrl_hw_cmn_ulps_exit;
-		ctrl->ops.wait_for_lane_idle =
-			dsi_ctrl_hw_14_wait_for_lane_idle;
-		ctrl->ops.ulps_ops.get_lanes_in_ulps =
-			dsi_ctrl_hw_cmn_get_lanes_in_ulps;
-		ctrl->ops.clamp_enable = dsi_ctrl_hw_14_clamp_enable;
-		ctrl->ops.clamp_disable = dsi_ctrl_hw_14_clamp_disable;
-		ctrl->ops.reg_dump_to_buffer =
-			dsi_ctrl_hw_14_reg_dump_to_buffer;
-		ctrl->ops.schedule_dma_cmd = NULL;
-		ctrl->ops.get_cont_splash_status = NULL;
-		ctrl->ops.kickoff_command_non_embedded_mode = NULL;
-		ctrl->ops.config_clk_gating = NULL;
-		break;
-	case DSI_CTRL_VERSION_2_0:
-		ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map;
-		ctrl->ops.wait_for_lane_idle =
-			dsi_ctrl_hw_20_wait_for_lane_idle;
-		ctrl->ops.reg_dump_to_buffer =
-			dsi_ctrl_hw_20_reg_dump_to_buffer;
-		ctrl->ops.ulps_ops.ulps_request = NULL;
-		ctrl->ops.ulps_ops.ulps_exit = NULL;
-		ctrl->ops.ulps_ops.get_lanes_in_ulps = NULL;
-		ctrl->ops.clamp_enable = NULL;
-		ctrl->ops.clamp_disable = NULL;
-		ctrl->ops.schedule_dma_cmd = NULL;
-		ctrl->ops.get_cont_splash_status = NULL;
-		ctrl->ops.kickoff_command_non_embedded_mode = NULL;
-		ctrl->ops.config_clk_gating = NULL;
-		break;
-	case DSI_CTRL_VERSION_2_2:
-	case DSI_CTRL_VERSION_2_3:
-	case DSI_CTRL_VERSION_2_4:
-		ctrl->ops.phy_reset_config = dsi_ctrl_hw_22_phy_reset_config;
-		ctrl->ops.config_clk_gating = dsi_ctrl_hw_22_config_clk_gating;
-		ctrl->ops.get_cont_splash_status =
-			dsi_ctrl_hw_22_get_cont_splash_status;
-		ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map;
-		ctrl->ops.wait_for_lane_idle =
-			dsi_ctrl_hw_20_wait_for_lane_idle;
-		ctrl->ops.reg_dump_to_buffer =
-			dsi_ctrl_hw_20_reg_dump_to_buffer;
-		ctrl->ops.ulps_ops.ulps_request = dsi_ctrl_hw_cmn_ulps_request;
-		ctrl->ops.ulps_ops.ulps_exit = dsi_ctrl_hw_cmn_ulps_exit;
-		ctrl->ops.ulps_ops.get_lanes_in_ulps =
-			dsi_ctrl_hw_cmn_get_lanes_in_ulps;
-		ctrl->ops.clamp_enable = NULL;
-		ctrl->ops.clamp_disable = NULL;
-		ctrl->ops.schedule_dma_cmd = dsi_ctrl_hw_22_schedule_dma_cmd;
-		ctrl->ops.kickoff_command_non_embedded_mode =
-			dsi_ctrl_hw_kickoff_non_embedded_mode;
-		break;
-	default:
-		break;
-	}
-}
-
-/**
- * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
- * @ctrl:        Pointer to DSI controller hw object.
- * @version:     DSI controller version.
- * @index:       DSI controller instance ID.
- * @phy_isolation_enabled:       DSI controller works isolated from phy.
- * @null_insertion_enabled:      DSI controller inserts null packet.
- *
- * This function setups the catalog information in the dsi_ctrl_hw object.
- *
- * return: error code for failure and 0 for success.
- */
-int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
-		   enum dsi_ctrl_version version, u32 index,
-		   bool phy_isolation_enabled, bool null_insertion_enabled)
-{
-	int rc = 0;
-
-	if (version == DSI_CTRL_VERSION_UNKNOWN ||
-	    version >= DSI_CTRL_VERSION_MAX) {
-		pr_err("Unsupported version: %d\n", version);
-		return -ENOTSUPP;
-	}
-
-	ctrl->index = index;
-	ctrl->null_insertion_enabled = null_insertion_enabled;
-	set_bit(DSI_CTRL_VIDEO_TPG, ctrl->feature_map);
-	set_bit(DSI_CTRL_CMD_TPG, ctrl->feature_map);
-	set_bit(DSI_CTRL_VARIABLE_REFRESH_RATE, ctrl->feature_map);
-	set_bit(DSI_CTRL_DYNAMIC_REFRESH, ctrl->feature_map);
-	set_bit(DSI_CTRL_DESKEW_CALIB, ctrl->feature_map);
-	set_bit(DSI_CTRL_DPHY, ctrl->feature_map);
-
-	switch (version) {
-	case DSI_CTRL_VERSION_1_4:
-		dsi_catalog_cmn_init(ctrl, version);
-		break;
-	case DSI_CTRL_VERSION_2_0:
-	case DSI_CTRL_VERSION_2_2:
-	case DSI_CTRL_VERSION_2_3:
-	case DSI_CTRL_VERSION_2_4:
-		ctrl->phy_isolation_enabled = phy_isolation_enabled;
-		dsi_catalog_cmn_init(ctrl, version);
-		break;
-	default:
-		return -ENOTSUPP;
-	}
-
-	return rc;
-}
-
-/**
- * dsi_catalog_phy_2_0_init() - catalog init for DSI PHY 14nm
- */
-static void dsi_catalog_phy_2_0_init(struct dsi_phy_hw *phy)
-{
-	phy->ops.regulator_enable = dsi_phy_hw_v2_0_regulator_enable;
-	phy->ops.regulator_disable = dsi_phy_hw_v2_0_regulator_disable;
-	phy->ops.enable = dsi_phy_hw_v2_0_enable;
-	phy->ops.disable = dsi_phy_hw_v2_0_disable;
-	phy->ops.calculate_timing_params =
-		dsi_phy_hw_calculate_timing_params;
-	phy->ops.phy_idle_on = dsi_phy_hw_v2_0_idle_on;
-	phy->ops.phy_idle_off = dsi_phy_hw_v2_0_idle_off;
-	phy->ops.calculate_timing_params =
-		dsi_phy_hw_calculate_timing_params;
-	phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v2_0;
-	phy->ops.clamp_ctrl = dsi_phy_hw_v2_0_clamp_ctrl;
-}
-
-/**
- * dsi_catalog_phy_3_0_init() - catalog init for DSI PHY 10nm
- */
-static void dsi_catalog_phy_3_0_init(struct dsi_phy_hw *phy)
-{
-	phy->ops.regulator_enable = dsi_phy_hw_v3_0_regulator_enable;
-	phy->ops.regulator_disable = dsi_phy_hw_v3_0_regulator_disable;
-	phy->ops.enable = dsi_phy_hw_v3_0_enable;
-	phy->ops.disable = dsi_phy_hw_v3_0_disable;
-	phy->ops.calculate_timing_params =
-		dsi_phy_hw_calculate_timing_params;
-	phy->ops.ulps_ops.wait_for_lane_idle =
-		dsi_phy_hw_v3_0_wait_for_lane_idle;
-	phy->ops.ulps_ops.ulps_request =
-		dsi_phy_hw_v3_0_ulps_request;
-	phy->ops.ulps_ops.ulps_exit =
-		dsi_phy_hw_v3_0_ulps_exit;
-	phy->ops.ulps_ops.get_lanes_in_ulps =
-		dsi_phy_hw_v3_0_get_lanes_in_ulps;
-	phy->ops.ulps_ops.is_lanes_in_ulps =
-		dsi_phy_hw_v3_0_is_lanes_in_ulps;
-	phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v3_0;
-	phy->ops.clamp_ctrl = dsi_phy_hw_v3_0_clamp_ctrl;
-	phy->ops.phy_lane_reset = dsi_phy_hw_v3_0_lane_reset;
-	phy->ops.toggle_resync_fifo = dsi_phy_hw_v3_0_toggle_resync_fifo;
-}
-
-/**
- * dsi_catalog_phy_4_0_init() - catalog init for DSI PHY 7nm
- */
-static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy)
-{
-	phy->ops.regulator_enable = NULL;
-	phy->ops.regulator_disable = NULL;
-	phy->ops.enable = dsi_phy_hw_v4_0_enable;
-	phy->ops.disable = dsi_phy_hw_v4_0_disable;
-	phy->ops.calculate_timing_params =
-		dsi_phy_hw_calculate_timing_params;
-	phy->ops.ulps_ops.wait_for_lane_idle =
-		dsi_phy_hw_v4_0_wait_for_lane_idle;
-	phy->ops.ulps_ops.ulps_request =
-		dsi_phy_hw_v4_0_ulps_request;
-	phy->ops.ulps_ops.ulps_exit =
-		dsi_phy_hw_v4_0_ulps_exit;
-	phy->ops.ulps_ops.get_lanes_in_ulps =
-		dsi_phy_hw_v4_0_get_lanes_in_ulps;
-	phy->ops.ulps_ops.is_lanes_in_ulps =
-		dsi_phy_hw_v4_0_is_lanes_in_ulps;
-	phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v4_0;
-	phy->ops.phy_lane_reset = dsi_phy_hw_v4_0_lane_reset;
-	phy->ops.toggle_resync_fifo = dsi_phy_hw_v4_0_toggle_resync_fifo;
-	phy->ops.reset_clk_en_sel = dsi_phy_hw_v4_0_reset_clk_en_sel;
-}
-
-/**
- * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
- * @ctrl:        Pointer to DSI PHY hw object.
- * @version:     DSI PHY version.
- * @index:       DSI PHY instance ID.
- *
- * This function setups the catalog information in the dsi_phy_hw object.
- *
- * return: error code for failure and 0 for success.
- */
-int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
-			  enum dsi_phy_version version,
-			  u32 index)
-{
-	int rc = 0;
-
-	if (version == DSI_PHY_VERSION_UNKNOWN ||
-	    version >= DSI_PHY_VERSION_MAX) {
-		pr_err("Unsupported version: %d\n", version);
-		return -ENOTSUPP;
-	}
-
-	phy->index = index;
-	phy->version = version;
-	set_bit(DSI_PHY_DPHY, phy->feature_map);
-
-	dsi_phy_timing_calc_init(phy, version);
-
-	switch (version) {
-	case DSI_PHY_VERSION_2_0:
-		dsi_catalog_phy_2_0_init(phy);
-		break;
-	case DSI_PHY_VERSION_3_0:
-		dsi_catalog_phy_3_0_init(phy);
-		break;
-	case DSI_PHY_VERSION_4_0:
-	case DSI_PHY_VERSION_4_1:
-		dsi_catalog_phy_4_0_init(phy);
-		break;
-	case DSI_PHY_VERSION_0_0_HPM:
-	case DSI_PHY_VERSION_0_0_LPM:
-	case DSI_PHY_VERSION_1_0:
-	default:
-		return -ENOTSUPP;
-	}
-
-	return rc;
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
deleted file mode 100644
index 08aa842..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_CATALOG_H_
-#define _DSI_CATALOG_H_
-
-#include "dsi_ctrl_hw.h"
-#include "dsi_phy_hw.h"
-
-/**
- * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
- * @ctrl:        Pointer to DSI controller hw object.
- * @version:     DSI controller version.
- * @index:       DSI controller instance ID.
- * @phy_isolation_enabled:       DSI controller works isolated from phy.
- * @null_insertion_enabled:      DSI controller inserts null packet.
- *
- * This function setups the catalog information in the dsi_ctrl_hw object.
- *
- * return: error code for failure and 0 for success.
- */
-int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
-		   enum dsi_ctrl_version version, u32 index,
-		   bool phy_isolation_enabled, bool null_insertion_enabled);
-
-/**
- * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
- * @phy:        Pointer to DSI PHY hw object.
- * @version:     DSI PHY version.
- * @index:       DSI PHY instance ID.
- *
- * This function setups the catalog information in the dsi_phy_hw object.
- *
- * return: error code for failure and 0 for success.
- */
-int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
-			  enum dsi_phy_version version,
-			  u32 index);
-
-/**
- * dsi_phy_timing_calc_init() - initialize info for DSI PHY timing calculations
- * @phy:        Pointer to DSI PHY hw object.
- * @version:     DSI PHY version.
- *
- * This function setups the catalog information in the dsi_phy_hw object.
- *
- * return: error code for failure and 0 for success.
- */
-int dsi_phy_timing_calc_init(struct dsi_phy_hw *phy,
-	enum dsi_phy_version version);
-
-/**
- * dsi_phy_hw_calculate_timing_params() - DSI PHY timing parameter calculations
- * @phy:        Pointer to DSI PHY hw object.
- * @mode:       DSI mode information.
- * @host:       DSI host configuration.
- * @timing:     DSI phy lane configurations.
- *
- * This function setups the catalog information in the dsi_phy_hw object.
- *
- * return: error code for failure and 0 for success.
- */
-int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
-					    struct dsi_mode_info *mode,
-	struct dsi_host_common_cfg *host,
-	struct dsi_phy_per_lane_cfgs *timing);
-
-/* Definitions for 14nm PHY hardware driver */
-void dsi_phy_hw_v2_0_regulator_enable(struct dsi_phy_hw *phy,
-				      struct dsi_phy_per_lane_cfgs *cfg);
-void dsi_phy_hw_v2_0_regulator_disable(struct dsi_phy_hw *phy);
-void dsi_phy_hw_v2_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
-void dsi_phy_hw_v2_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
-void dsi_phy_hw_v2_0_idle_on(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
-void dsi_phy_hw_v2_0_idle_off(struct dsi_phy_hw *phy);
-int dsi_phy_hw_timing_val_v2_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
-		u32 *timing_val, u32 size);
-void dsi_phy_hw_v2_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable);
-
-/* Definitions for 10nm PHY hardware driver */
-void dsi_phy_hw_v3_0_regulator_enable(struct dsi_phy_hw *phy,
-				      struct dsi_phy_per_lane_cfgs *cfg);
-void dsi_phy_hw_v3_0_regulator_disable(struct dsi_phy_hw *phy);
-void dsi_phy_hw_v3_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
-void dsi_phy_hw_v3_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
-int dsi_phy_hw_v3_0_wait_for_lane_idle(struct dsi_phy_hw *phy, u32 lanes);
-void dsi_phy_hw_v3_0_ulps_request(struct dsi_phy_hw *phy,
-		struct dsi_phy_cfg *cfg, u32 lanes);
-void dsi_phy_hw_v3_0_ulps_exit(struct dsi_phy_hw *phy,
-			struct dsi_phy_cfg *cfg, u32 lanes);
-u32 dsi_phy_hw_v3_0_get_lanes_in_ulps(struct dsi_phy_hw *phy);
-bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes);
-int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
-		u32 *timing_val, u32 size);
-void dsi_phy_hw_v3_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable);
-int dsi_phy_hw_v3_0_lane_reset(struct dsi_phy_hw *phy);
-void dsi_phy_hw_v3_0_toggle_resync_fifo(struct dsi_phy_hw *phy);
-
-/* Definitions for 7nm PHY hardware driver */
-void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
-void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
-int dsi_phy_hw_v4_0_wait_for_lane_idle(struct dsi_phy_hw *phy, u32 lanes);
-void dsi_phy_hw_v4_0_ulps_request(struct dsi_phy_hw *phy,
-		struct dsi_phy_cfg *cfg, u32 lanes);
-void dsi_phy_hw_v4_0_ulps_exit(struct dsi_phy_hw *phy,
-			struct dsi_phy_cfg *cfg, u32 lanes);
-u32 dsi_phy_hw_v4_0_get_lanes_in_ulps(struct dsi_phy_hw *phy);
-bool dsi_phy_hw_v4_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes);
-int dsi_phy_hw_timing_val_v4_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
-		u32 *timing_val, u32 size);
-int dsi_phy_hw_v4_0_lane_reset(struct dsi_phy_hw *phy);
-void dsi_phy_hw_v4_0_toggle_resync_fifo(struct dsi_phy_hw *phy);
-void dsi_phy_hw_v4_0_reset_clk_en_sel(struct dsi_phy_hw *phy);
-
-/* DSI controller common ops */
-u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
-void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl, u32 *entries,
-			       u32 size);
-void dsi_ctrl_hw_cmn_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints);
-void dsi_ctrl_hw_cmn_enable_status_interrupts(struct dsi_ctrl_hw *ctrl,
-					     u32 ints);
-
-u64 dsi_ctrl_hw_cmn_get_error_status(struct dsi_ctrl_hw *ctrl);
-void dsi_ctrl_hw_cmn_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors);
-void dsi_ctrl_hw_cmn_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
-					    u64 errors);
-
-void dsi_ctrl_hw_cmn_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
-				 enum dsi_test_pattern type,
-				 u32 init_val);
-void dsi_ctrl_hw_cmn_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
-			       enum dsi_test_pattern  type,
-			       u32 init_val,
-			       u32 stream_id);
-void dsi_ctrl_hw_cmn_test_pattern_enable(struct dsi_ctrl_hw *ctrl, bool enable);
-void dsi_ctrl_hw_cmn_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
-				 u32 stream_id);
-
-void dsi_ctrl_hw_cmn_host_setup(struct dsi_ctrl_hw *ctrl,
-			       struct dsi_host_common_cfg *config);
-void dsi_ctrl_hw_cmn_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
-void dsi_ctrl_hw_cmn_video_engine_setup(struct dsi_ctrl_hw *ctrl,
-				       struct dsi_host_common_cfg *common_cfg,
-				       struct dsi_video_engine_cfg *cfg);
-
-void dsi_ctrl_hw_cmn_setup_avr(struct dsi_ctrl_hw *ctrl, bool enable);
-
-void dsi_ctrl_hw_cmn_set_video_timing(struct dsi_ctrl_hw *ctrl,
-			 struct dsi_mode_info *mode);
-void dsi_ctrl_hw_cmn_set_timing_db(struct dsi_ctrl_hw *ctrl,
-				     bool enable);
-void dsi_ctrl_hw_cmn_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
-				     struct dsi_host_common_cfg *common_cfg,
-				     struct dsi_cmd_engine_cfg *cfg);
-
-void dsi_ctrl_hw_cmn_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on);
-void dsi_ctrl_hw_cmn_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
-
-void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
-				     struct dsi_mode_info *mode,
-				     u32 h_stride,
-				     u32 vc_id,
-				     struct dsi_rect *roi);
-void dsi_ctrl_hw_cmn_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
-void dsi_ctrl_hw_cmn_soft_reset(struct dsi_ctrl_hw *ctrl);
-
-void dsi_ctrl_hw_cmn_setup_misr(struct dsi_ctrl_hw *ctrl,
-			enum dsi_op_mode panel_mode,
-			bool enable, u32 frame_count);
-u32 dsi_ctrl_hw_cmn_collect_misr(struct dsi_ctrl_hw *ctrl,
-			enum dsi_op_mode panel_mode);
-
-void dsi_ctrl_hw_cmn_kickoff_command(struct dsi_ctrl_hw *ctrl,
-			struct dsi_ctrl_cmd_dma_info *cmd,
-			u32 flags);
-
-void dsi_ctrl_hw_cmn_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
-			     struct dsi_ctrl_cmd_dma_fifo_info *cmd,
-			     u32 flags);
-void dsi_ctrl_hw_cmn_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl);
-void dsi_ctrl_hw_cmn_trigger_command_dma(struct dsi_ctrl_hw *ctrl);
-void dsi_ctrl_hw_dln0_phy_err(struct dsi_ctrl_hw *ctrl);
-void dsi_ctrl_hw_cmn_phy_reset_config(struct dsi_ctrl_hw *ctrl,
-			bool enable);
-void dsi_ctrl_hw_22_phy_reset_config(struct dsi_ctrl_hw *ctrl,
-			bool enable);
-u32 dsi_ctrl_hw_cmn_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
-				     u8 *rd_buf,
-				     u32 read_offset,
-				     u32 rx_byte,
-				     u32 pkt_size, u32 *hw_read_cnt);
-void dsi_ctrl_hw_cmn_clear_rdbk_reg(struct dsi_ctrl_hw *ctrl);
-void dsi_ctrl_hw_22_schedule_dma_cmd(struct dsi_ctrl_hw *ctrl, int line_on);
-int dsi_ctrl_hw_cmn_ctrl_reset(struct dsi_ctrl_hw *ctrl,
-			int mask);
-void dsi_ctrl_hw_cmn_mask_error_intr(struct dsi_ctrl_hw *ctrl, u32 idx,
-			bool en);
-void dsi_ctrl_hw_cmn_error_intr_ctrl(struct dsi_ctrl_hw *ctrl, bool en);
-u32 dsi_ctrl_hw_cmn_get_error_mask(struct dsi_ctrl_hw *ctrl);
-u32 dsi_ctrl_hw_cmn_get_hw_version(struct dsi_ctrl_hw *ctrl);
-int dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl_hw *ctrl);
-
-/* Definitions specific to 1.4 DSI controller hardware */
-int dsi_ctrl_hw_14_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes);
-void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
-		       struct dsi_lane_map *lane_map);
-void dsi_ctrl_hw_cmn_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
-void dsi_ctrl_hw_cmn_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes);
-u32 dsi_ctrl_hw_cmn_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl);
-
-void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
-				 u32 lanes,
-				 bool enable_ulps);
-
-void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
-				  u32 lanes,
-				  bool disable_ulps);
-ssize_t dsi_ctrl_hw_14_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
-					  char *buf,
-					  u32 size);
-
-/* Definitions specific to 2.0 DSI controller hardware */
-void dsi_ctrl_hw_20_setup_lane_map(struct dsi_ctrl_hw *ctrl,
-		       struct dsi_lane_map *lane_map);
-int dsi_ctrl_hw_20_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes);
-ssize_t dsi_ctrl_hw_20_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
-					  char *buf,
-					  u32 size);
-void dsi_ctrl_hw_kickoff_non_embedded_mode(struct dsi_ctrl_hw *ctrl,
-					struct dsi_ctrl_cmd_dma_info *cmd,
-					u32 flags);
-
-/* Definitions specific to 2.2 DSI controller hardware */
-bool dsi_ctrl_hw_22_get_cont_splash_status(struct dsi_ctrl_hw *ctrl);
-void dsi_ctrl_hw_22_config_clk_gating(struct dsi_ctrl_hw *ctrl, bool enable,
-		enum dsi_clk_gate_type clk_selection);
-
-void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable);
-
-#endif /* _DSI_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
deleted file mode 100644
index 2efc2c4..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
+++ /dev/null
@@ -1,312 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_CLK_H_
-#define _DSI_CLK_H_
-
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/clk.h>
-#include "sde_power_handle.h"
-
-#define MAX_STRING_LEN 32
-#define MAX_DSI_CTRL 2
-
-enum dsi_clk_state {
-	DSI_CLK_OFF,
-	DSI_CLK_ON,
-	DSI_CLK_EARLY_GATE,
-};
-
-enum clk_req_client {
-	DSI_CLK_REQ_MDP_CLIENT = 0,
-	DSI_CLK_REQ_DSI_CLIENT,
-};
-
-enum dsi_link_clk_type {
-	DSI_LINK_ESC_CLK,
-	DSI_LINK_BYTE_CLK,
-	DSI_LINK_PIX_CLK,
-	DSI_LINK_BYTE_INTF_CLK,
-	DSI_LINK_CLK_MAX,
-};
-
-enum dsi_link_clk_op_type {
-	DSI_LINK_CLK_SET_RATE = BIT(0),
-	DSI_LINK_CLK_PREPARE = BIT(1),
-	DSI_LINK_CLK_ENABLE = BIT(2),
-	DSI_LINK_CLK_START = BIT(0) | BIT(1) | BIT(2),
-};
-
-enum dsi_clk_type {
-	DSI_CORE_CLK = BIT(0),
-	DSI_LINK_CLK = BIT(1),
-	DSI_ALL_CLKS = (BIT(0) | BIT(1)),
-	DSI_CLKS_MAX = BIT(2),
-};
-
-enum dsi_lclk_type {
-	DSI_LINK_NONE = 0,
-	DSI_LINK_LP_CLK = BIT(0),
-	DSI_LINK_HS_CLK = BIT(1),
-};
-
-struct dsi_clk_ctrl_info {
-	enum dsi_clk_type clk_type;
-	enum dsi_clk_state clk_state;
-	enum clk_req_client client;
-};
-
-struct clk_ctrl_cb {
-	void *priv;
-	int (*dsi_clk_cb)(void *priv, struct dsi_clk_ctrl_info clk_ctrl_info);
-};
-
-/**
- * struct dsi_core_clk_info - Core clock information for DSI hardware
- * @mdp_core_clk:        Handle to MDP core clock.
- * @iface_clk:           Handle to MDP interface clock.
- * @core_mmss_clk:       Handle to MMSS core clock.
- * @bus_clk:             Handle to bus clock.
- * @mnoc_clk:            Handle to MMSS NOC clock.
- * @dsi_core_client:	 Pointer to SDE power client
- * @phandle:             Pointer to SDE power handle
- */
-struct dsi_core_clk_info {
-	struct clk *mdp_core_clk;
-	struct clk *iface_clk;
-	struct clk *core_mmss_clk;
-	struct clk *bus_clk;
-	struct clk *mnoc_clk;
-	struct sde_power_client *dsi_core_client;
-	struct sde_power_handle *phandle;
-};
-
-/**
- * struct dsi_link_hs_clk_info - Set of high speed link clocks for DSI HW
- * @byte_clk:        Handle to DSI byte_clk.
- * @pixel_clk:       Handle to DSI pixel_clk.
- * @byte_intf_clk:   Handle to DSI byte intf. clock.
- */
-struct dsi_link_hs_clk_info {
-	struct clk *byte_clk;
-	struct clk *pixel_clk;
-	struct clk *byte_intf_clk;
-};
-
-/**
- * struct dsi_link_lp_clk_info - Set of low power link clocks for DSI HW.
- * @esc_clk:         Handle to DSI escape clock.
- */
-struct dsi_link_lp_clk_info {
-	struct clk *esc_clk;
-};
-
-/**
- * struct link_clk_freq - Clock frequency information for Link clocks
- * @byte_clk_rate:   Frequency of DSI byte_clk in KHz.
- * @pixel_clk_rate:  Frequency of DSI pixel_clk in KHz.
- * @esc_clk_rate:    Frequency of DSI escape clock in KHz.
- */
-struct link_clk_freq {
-	u32 byte_clk_rate;
-	u32 pix_clk_rate;
-	u32 esc_clk_rate;
-};
-
-/**
- * typedef *pre_clockoff_cb() - Callback before clock is turned off
- * @priv: private data pointer.
- * @clk_type: clock which is being turned off.
- * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
- * @new_state: next state for the clock.
- *
- * @return: error code.
- */
-typedef int (*pre_clockoff_cb)(void *priv,
-			       enum dsi_clk_type clk_type,
-			       enum dsi_lclk_type l_type,
-			       enum dsi_clk_state new_state);
-
-/**
- * typedef *post_clockoff_cb() - Callback after clock is turned off
- * @priv: private data pointer.
- * @clk_type: clock which was turned off.
- * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
- * @curr_state: current state for the clock.
- *
- * @return: error code.
- */
-typedef int (*post_clockoff_cb)(void *priv,
-				enum dsi_clk_type clk_type,
-				enum dsi_lclk_type l_type,
-				enum dsi_clk_state curr_state);
-
-/**
- * typedef *post_clockon_cb() - Callback after clock is turned on
- * @priv: private data pointer.
- * @clk_type: clock which was turned on.
- * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
- * @curr_state: current state for the clock.
- *
- * @return: error code.
- */
-typedef int (*post_clockon_cb)(void *priv,
-			       enum dsi_clk_type clk_type,
-			       enum dsi_lclk_type l_type,
-			       enum dsi_clk_state curr_state);
-
-/**
- * typedef *pre_clockon_cb() - Callback before clock is turned on
- * @priv: private data pointer.
- * @clk_type: clock which is being turned on.
- * @l_type: specifies if the clock is HS or LP type.Valid only for link clocks.
- * @new_state: next state for the clock.
- *
- * @return: error code.
- */
-typedef int (*pre_clockon_cb)(void *priv,
-			      enum dsi_clk_type clk_type,
-			      enum dsi_lclk_type l_type,
-			      enum dsi_clk_state new_state);
-
-
-/**
- * struct dsi_clk_info - clock information for DSI hardware.
- * @name:                    client name.
- * @c_clks[MAX_DSI_CTRL]     array of core clock configurations
- * @l_lp_clks[MAX_DSI_CTRL]  array of low power(esc) clock configurations
- * @l_hs_clks[MAX_DSI_CTRL]  array of high speed clock configurations
- * @bus_handle[MAX_DSI_CTRL] array of bus handles
- * @ctrl_index[MAX_DSI_CTRL] array of DSI controller indexes mapped
- *                           to core and link clock configurations
- * @pre_clkoff_cb            callback before clock is turned off
- * @post_clkoff_cb           callback after clock is turned off
- * @post_clkon_cb            callback after clock is turned on
- * @pre_clkon_cb             callback before clock is turned on
- * @priv_data                pointer to private data
- * @master_ndx               master DSI controller index
- * @dsi_ctrl_count           number of DSI controllers
- */
-struct dsi_clk_info {
-	char name[MAX_STRING_LEN];
-	struct dsi_core_clk_info c_clks[MAX_DSI_CTRL];
-	struct dsi_link_lp_clk_info l_lp_clks[MAX_DSI_CTRL];
-	struct dsi_link_hs_clk_info l_hs_clks[MAX_DSI_CTRL];
-	u32 bus_handle[MAX_DSI_CTRL];
-	u32 ctrl_index[MAX_DSI_CTRL];
-	pre_clockoff_cb pre_clkoff_cb;
-	post_clockoff_cb post_clkoff_cb;
-	post_clockon_cb post_clkon_cb;
-	pre_clockon_cb pre_clkon_cb;
-	void *priv_data;
-	u32 master_ndx;
-	u32 dsi_ctrl_count;
-};
-
-/**
- * struct dsi_clk_link_set - Pair of clock handles to describe link clocks
- * @byte_clk:     Handle to DSi byte_clk.
- * @pixel_clk:    Handle to DSI pixel_clk.
- */
-struct dsi_clk_link_set {
-	struct clk *byte_clk;
-	struct clk *pixel_clk;
-};
-
-/**
- * dsi_display_clk_mngr_update_splash_status() - Update splash stattus
- * @clk_mngr:     Structure containing DSI clock information
- * @status:     Splash status
- */
-void dsi_display_clk_mngr_update_splash_status(void *clk_mgr, bool status);
-
-/**
- * dsi_display_clk_mgr_register() - Register DSI clock manager
- * @info:     Structure containing DSI clock information
- */
-void *dsi_display_clk_mngr_register(struct dsi_clk_info *info);
-
-/**
- * dsi_display_clk_mngr_deregister() - Deregister DSI clock manager
- * @clk_mngr:  DSI clock manager pointer
- */
-int dsi_display_clk_mngr_deregister(void *clk_mngr);
-
-/**
- * dsi_register_clk_handle() - Register clock handle with DSI clock manager
- * @clk_mngr:  DSI clock manager pointer
- * @client:     DSI clock client pointer.
- */
-void *dsi_register_clk_handle(void *clk_mngr, char *client);
-
-/**
- * dsi_deregister_clk_handle() - Deregister clock handle from DSI clock manager
- * @client:     DSI clock client pointer.
- *
- * return: error code in case of failure or 0 for success.
- */
-int dsi_deregister_clk_handle(void *client);
-
-/**
- * dsi_display_link_clk_force_update_ctrl() - force to set link clks
- * @handle:     Handle of desired DSI clock client.
- *
- * return: error code in case of failure or 0 for success.
- */
-
-int dsi_display_link_clk_force_update_ctrl(void *handle);
-
-/**
- * dsi_display_clk_ctrl() - set frequencies for link clks
- * @handle:     Handle of desired DSI clock client.
- * @clk_type:   Clock which is being controlled.
- * @clk_state:  Desired state of clock
- *
- * return: error code in case of failure or 0 for success.
- */
-int dsi_display_clk_ctrl(void *handle,
-	enum dsi_clk_type clk_type, enum dsi_clk_state clk_state);
-
-/**
- * dsi_clk_set_link_frequencies() - set frequencies for link clks
- * @client:     DSI clock client pointer.
- * @freq:       Structure containing link clock frequencies.
- * @index:      Index of the DSI controller.
- *
- * return: error code in case of failure or 0 for success.
- */
-int dsi_clk_set_link_frequencies(void *client, struct link_clk_freq freq,
-					u32 index);
-
-
-/**
- * dsi_clk_set_pixel_clk_rate() - set frequency for pixel_clk
- * @client:       DSI clock client pointer.
- * @pixel_clk:    Pixel_clk rate in Hz.
- * @index:        Index of the DSI controller.
- * return: error code in case of failure or 0 for success.
- */
-int dsi_clk_set_pixel_clk_rate(void *client, u64 pixel_clk, u32 index);
-
-
-/**
- * dsi_clk_set_byte_clk_rate() - set frequency for byte clock
- * @client:       DSI clock client pointer.
- * @byte_clk: Pixel clock rate in Hz.
- * @index:      Index of the DSI controller.
- * return: error code in case of failure or 0 for success.
- */
-int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, u32 index);
-
-/**
- * dsi_clk_update_parent() - update parent clocks for specified clock
- * @parent:       link clock pair which are set as parent.
- * @child:        link clock pair whose parent has to be set.
- */
-int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
-			  struct dsi_clk_link_set *child);
-#endif /* _DSI_CLK_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
deleted file mode 100644
index 9a35c7a..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ /dev/null
@@ -1,1470 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/of.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/msm-bus.h>
-#include "dsi_clk.h"
-
-struct dsi_core_clks {
-	struct dsi_core_clk_info clks;
-	u32 bus_handle;
-};
-
-struct dsi_link_clks {
-	struct dsi_link_hs_clk_info hs_clks;
-	struct dsi_link_lp_clk_info lp_clks;
-	struct link_clk_freq freq;
-};
-
-struct dsi_clk_mngr {
-	char name[MAX_STRING_LEN];
-	struct mutex clk_mutex;
-	struct list_head client_list;
-
-	u32 dsi_ctrl_count;
-	u32 master_ndx;
-	struct dsi_core_clks core_clks[MAX_DSI_CTRL];
-	struct dsi_link_clks link_clks[MAX_DSI_CTRL];
-	u32 ctrl_index[MAX_DSI_CTRL];
-	u32 core_clk_state;
-	u32 link_clk_state;
-
-	pre_clockoff_cb pre_clkoff_cb;
-	post_clockoff_cb post_clkoff_cb;
-	post_clockon_cb post_clkon_cb;
-	pre_clockon_cb pre_clkon_cb;
-
-	bool is_cont_splash_enabled;
-	void *priv_data;
-};
-
-struct dsi_clk_client_info {
-	char name[MAX_STRING_LEN];
-	u32 core_refcount;
-	u32 link_refcount;
-	u32 core_clk_state;
-	u32 link_clk_state;
-	struct list_head list;
-	struct dsi_clk_mngr *mngr;
-};
-
-static int _get_clk_mngr_index(struct dsi_clk_mngr *mngr,
-				u32 dsi_ctrl_index,
-				u32 *clk_mngr_index)
-{
-	int i;
-
-	for (i = 0; i < mngr->dsi_ctrl_count; i++) {
-		if (mngr->ctrl_index[i] == dsi_ctrl_index) {
-			*clk_mngr_index = i;
-			return 0;
-		}
-	}
-
-	return -EINVAL;
-}
-
-/**
- * dsi_clk_set_link_frequencies() - set frequencies for link clks
- * @clks:         Link clock information
- * @pixel_clk:    pixel clock frequency in KHz.
- * @byte_clk:     Byte clock frequency in KHz.
- * @esc_clk:      Escape clock frequency in KHz.
- *
- * return: error code in case of failure or 0 for success.
- */
-int dsi_clk_set_link_frequencies(void *client, struct link_clk_freq freq,
-				u32 index)
-{
-	int rc = 0, clk_mngr_index = 0;
-	struct dsi_clk_client_info *c = client;
-	struct dsi_clk_mngr *mngr;
-
-	if (!client) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mngr = c->mngr;
-	rc = _get_clk_mngr_index(mngr, index, &clk_mngr_index);
-	if (rc) {
-		pr_err("failed to map control index %d\n", index);
-		return -EINVAL;
-	}
-
-	memcpy(&mngr->link_clks[clk_mngr_index].freq, &freq,
-		sizeof(struct link_clk_freq));
-
-	return rc;
-}
-
-/**
- * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock
- * @clks:      DSI link clock information.
- * @pixel_clk: Pixel clock rate in KHz.
- *
- * return: error code in case of failure or 0 for success.
- */
-int dsi_clk_set_pixel_clk_rate(void *client, u64 pixel_clk, u32 index)
-{
-	int rc = 0;
-	struct dsi_clk_client_info *c = client;
-	struct dsi_clk_mngr *mngr;
-
-	mngr = c->mngr;
-	rc = clk_set_rate(mngr->link_clks[index].hs_clks.pixel_clk, pixel_clk);
-	if (rc)
-		pr_err("failed to set clk rate for pixel clk, rc=%d\n", rc);
-	else
-		mngr->link_clks[index].freq.pix_clk_rate = pixel_clk;
-
-	return rc;
-}
-
-/**
- * dsi_clk_set_byte_clk_rate() - set frequency for byte clock
- * @client:       DSI clock client pointer.
- * @byte_clk: Pixel clock rate in Hz.
- * @index:      Index of the DSI controller.
- * return: error code in case of failure or 0 for success.
- */
-int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, u32 index)
-{
-	int rc = 0;
-	struct dsi_clk_client_info *c = client;
-	struct dsi_clk_mngr *mngr;
-
-	mngr = c->mngr;
-	rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_clk, byte_clk);
-	if (rc)
-		pr_err("failed to set clk rate for byte clk, rc=%d\n", rc);
-	else
-		mngr->link_clks[index].freq.byte_clk_rate = byte_clk;
-
-	return rc;
-
-}
-
-/**
- * dsi_clk_update_parent() - update parent clocks for specified clock
- * @parent:       link clock pair which are set as parent.
- * @child:        link clock pair whose parent has to be set.
- */
-int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
-			  struct dsi_clk_link_set *child)
-{
-	int rc = 0;
-
-	rc = clk_set_parent(child->byte_clk, parent->byte_clk);
-	if (rc) {
-		pr_err("failed to set byte clk parent\n");
-		goto error;
-	}
-
-	rc = clk_set_parent(child->pixel_clk, parent->pixel_clk);
-	if (rc) {
-		pr_err("failed to set pixel clk parent\n");
-		goto error;
-	}
-error:
-	return rc;
-}
-
-int dsi_core_clk_start(struct dsi_core_clks *c_clks)
-{
-	int rc = 0;
-
-	if (c_clks->clks.mdp_core_clk) {
-		rc = clk_prepare_enable(c_clks->clks.mdp_core_clk);
-		if (rc) {
-			pr_err("failed to enable mdp_core_clk, rc=%d\n", rc);
-			goto error;
-		}
-	}
-
-	if (c_clks->clks.mnoc_clk) {
-		rc = clk_prepare_enable(c_clks->clks.mnoc_clk);
-		if (rc) {
-			pr_err("failed to enable mnoc_clk, rc=%d\n", rc);
-			goto error_disable_core_clk;
-		}
-	}
-
-	if (c_clks->clks.iface_clk) {
-		rc = clk_prepare_enable(c_clks->clks.iface_clk);
-		if (rc) {
-			pr_err("failed to enable iface_clk, rc=%d\n", rc);
-			goto error_disable_mnoc_clk;
-		}
-	}
-
-	if (c_clks->clks.bus_clk) {
-		rc = clk_prepare_enable(c_clks->clks.bus_clk);
-		if (rc) {
-			pr_err("failed to enable bus_clk, rc=%d\n", rc);
-			goto error_disable_iface_clk;
-		}
-	}
-
-	if (c_clks->clks.core_mmss_clk) {
-		rc = clk_prepare_enable(c_clks->clks.core_mmss_clk);
-		if (rc) {
-			pr_err("failed to enable core_mmss_clk, rc=%d\n", rc);
-			goto error_disable_bus_clk;
-		}
-	}
-
-	if (c_clks->bus_handle) {
-		rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 1);
-		if (rc) {
-			pr_err("bus scale client enable failed, rc=%d\n", rc);
-			goto error_disable_mmss_clk;
-		}
-	}
-
-	return rc;
-
-error_disable_mmss_clk:
-	if (c_clks->clks.core_mmss_clk)
-		clk_disable_unprepare(c_clks->clks.core_mmss_clk);
-error_disable_bus_clk:
-	if (c_clks->clks.bus_clk)
-		clk_disable_unprepare(c_clks->clks.bus_clk);
-error_disable_iface_clk:
-	if (c_clks->clks.iface_clk)
-		clk_disable_unprepare(c_clks->clks.iface_clk);
-error_disable_mnoc_clk:
-	if (c_clks->clks.mnoc_clk)
-		clk_disable_unprepare(c_clks->clks.mnoc_clk);
-error_disable_core_clk:
-	if (c_clks->clks.mdp_core_clk)
-		clk_disable_unprepare(c_clks->clks.mdp_core_clk);
-error:
-	return rc;
-}
-
-int dsi_core_clk_stop(struct dsi_core_clks *c_clks)
-{
-	int rc = 0;
-
-	if (c_clks->bus_handle) {
-		rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 0);
-		if (rc) {
-			pr_err("bus scale client disable failed, rc=%d\n", rc);
-			return rc;
-		}
-	}
-
-	if (c_clks->clks.core_mmss_clk)
-		clk_disable_unprepare(c_clks->clks.core_mmss_clk);
-
-	if (c_clks->clks.bus_clk)
-		clk_disable_unprepare(c_clks->clks.bus_clk);
-
-	if (c_clks->clks.iface_clk)
-		clk_disable_unprepare(c_clks->clks.iface_clk);
-
-	if (c_clks->clks.mnoc_clk)
-		clk_disable_unprepare(c_clks->clks.mnoc_clk);
-
-	if (c_clks->clks.mdp_core_clk)
-		clk_disable_unprepare(c_clks->clks.mdp_core_clk);
-
-	return rc;
-}
-
-static int dsi_link_hs_clk_set_rate(struct dsi_link_hs_clk_info *link_hs_clks,
-		int index)
-{
-	int rc = 0;
-	struct dsi_clk_mngr *mngr;
-	struct dsi_link_clks *l_clks;
-
-	if (index >= MAX_DSI_CTRL) {
-		pr_err("Invalid DSI ctrl index\n");
-		return -EINVAL;
-	}
-
-	l_clks = container_of(link_hs_clks, struct dsi_link_clks, hs_clks);
-	mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[index]);
-
-	/*
-	 * In an ideal world, cont_splash_enabled should not be required inside
-	 * the clock manager. But, in the current driver cont_splash_enabled
-	 * flag is set inside mdp driver and there is no interface event
-	 * associated with this flag setting.
-	 */
-	if (mngr->is_cont_splash_enabled)
-		return 0;
-
-	rc = clk_set_rate(link_hs_clks->byte_clk,
-		l_clks->freq.byte_clk_rate);
-	if (rc) {
-		pr_err("clk_set_rate failed for byte_clk rc = %d\n", rc);
-		goto error;
-	}
-
-	rc = clk_set_rate(link_hs_clks->pixel_clk,
-		l_clks->freq.pix_clk_rate);
-	if (rc) {
-		pr_err("clk_set_rate failed for pixel_clk rc = %d\n", rc);
-		goto error;
-	}
-
-	/*
-	 * If byte_intf_clk is present, set rate for that too.
-	 * For DPHY: byte_intf_clk_rate = byte_clk_rate / 2
-	 * todo: this needs to be revisited when support for CPHY is added
-	 */
-	if (link_hs_clks->byte_intf_clk) {
-		rc = clk_set_rate(link_hs_clks->byte_intf_clk,
-			(l_clks->freq.byte_clk_rate / 2));
-		if (rc) {
-			pr_err("set_rate failed for byte_intf_clk rc = %d\n",
-				rc);
-			goto error;
-		}
-	}
-error:
-	return rc;
-}
-
-static int dsi_link_hs_clk_prepare(struct dsi_link_hs_clk_info *link_hs_clks)
-{
-	int rc = 0;
-
-	rc = clk_prepare(link_hs_clks->byte_clk);
-	if (rc) {
-		pr_err("Failed to prepare dsi byte clk, rc=%d\n", rc);
-		goto byte_clk_err;
-	}
-
-	rc = clk_prepare(link_hs_clks->pixel_clk);
-	if (rc) {
-		pr_err("Failed to prepare dsi pixel clk, rc=%d\n", rc);
-		goto pixel_clk_err;
-	}
-
-	if (link_hs_clks->byte_intf_clk) {
-		rc = clk_prepare(link_hs_clks->byte_intf_clk);
-		if (rc) {
-			pr_err("Failed to prepare dsi byte intf clk, rc=%d\n",
-				rc);
-			goto byte_intf_clk_err;
-		}
-	}
-
-	return rc;
-
-byte_intf_clk_err:
-	clk_unprepare(link_hs_clks->pixel_clk);
-pixel_clk_err:
-	clk_unprepare(link_hs_clks->byte_clk);
-byte_clk_err:
-	return rc;
-}
-
-static void dsi_link_hs_clk_unprepare(struct dsi_link_hs_clk_info *link_hs_clks)
-{
-	if (link_hs_clks->byte_intf_clk)
-		clk_unprepare(link_hs_clks->byte_intf_clk);
-	clk_unprepare(link_hs_clks->pixel_clk);
-	clk_unprepare(link_hs_clks->byte_clk);
-}
-
-static int dsi_link_hs_clk_enable(struct dsi_link_hs_clk_info *link_hs_clks)
-{
-	int rc = 0;
-
-	rc = clk_enable(link_hs_clks->byte_clk);
-	if (rc) {
-		pr_err("Failed to enable dsi byte clk, rc=%d\n", rc);
-		goto byte_clk_err;
-	}
-
-	rc = clk_enable(link_hs_clks->pixel_clk);
-	if (rc) {
-		pr_err("Failed to enable dsi pixel clk, rc=%d\n", rc);
-		goto pixel_clk_err;
-	}
-
-	if (link_hs_clks->byte_intf_clk) {
-		rc = clk_enable(link_hs_clks->byte_intf_clk);
-		if (rc) {
-			pr_err("Failed to enable dsi byte intf clk, rc=%d\n",
-				rc);
-			goto byte_intf_clk_err;
-		}
-	}
-
-	return rc;
-
-byte_intf_clk_err:
-	clk_disable(link_hs_clks->pixel_clk);
-pixel_clk_err:
-	clk_disable(link_hs_clks->byte_clk);
-byte_clk_err:
-	return rc;
-}
-
-static void dsi_link_hs_clk_disable(struct dsi_link_hs_clk_info *link_hs_clks)
-{
-	if (link_hs_clks->byte_intf_clk)
-		clk_disable(link_hs_clks->byte_intf_clk);
-	clk_disable(link_hs_clks->pixel_clk);
-	clk_disable(link_hs_clks->byte_clk);
-}
-
-/**
- * dsi_link_clk_start() - enable dsi link clocks
- */
-static int dsi_link_hs_clk_start(struct dsi_link_hs_clk_info *link_hs_clks,
-	enum dsi_link_clk_op_type op_type, int index)
-{
-	int rc = 0;
-
-	if (index >= MAX_DSI_CTRL) {
-		pr_err("Invalid DSI ctrl index\n");
-		return -EINVAL;
-	}
-
-	if (op_type & DSI_LINK_CLK_SET_RATE) {
-		rc = dsi_link_hs_clk_set_rate(link_hs_clks, index);
-		if (rc) {
-			pr_err("failed to set HS clk rates, rc = %d\n", rc);
-			goto error;
-		}
-	}
-
-	if (op_type & DSI_LINK_CLK_PREPARE) {
-		rc = dsi_link_hs_clk_prepare(link_hs_clks);
-		if (rc) {
-			pr_err("failed to prepare link HS clks, rc = %d\n", rc);
-			goto error;
-		}
-	}
-
-	if (op_type & DSI_LINK_CLK_ENABLE) {
-		rc = dsi_link_hs_clk_enable(link_hs_clks);
-		if (rc) {
-			pr_err("failed to enable link HS clks, rc = %d\n", rc);
-			goto error_unprepare;
-		}
-	}
-
-	pr_debug("HS Link clocks are enabled\n");
-	return rc;
-error_unprepare:
-	dsi_link_hs_clk_unprepare(link_hs_clks);
-error:
-	return rc;
-}
-
-/**
- * dsi_link_clk_stop() - Stop DSI link clocks.
- */
-static int dsi_link_hs_clk_stop(struct dsi_link_hs_clk_info *link_hs_clks)
-{
-	struct dsi_link_clks *l_clks;
-
-	l_clks = container_of(link_hs_clks, struct dsi_link_clks, hs_clks);
-
-	dsi_link_hs_clk_disable(link_hs_clks);
-	dsi_link_hs_clk_unprepare(link_hs_clks);
-
-	pr_debug("HS Link clocks disabled\n");
-
-	return 0;
-}
-
-static int dsi_link_lp_clk_start(struct dsi_link_lp_clk_info *link_lp_clks,
-	int index)
-{
-	int rc = 0;
-	struct dsi_clk_mngr *mngr;
-	struct dsi_link_clks *l_clks;
-
-	if (index >= MAX_DSI_CTRL) {
-		pr_err("Invalid DSI ctrl index\n");
-		return -EINVAL;
-	}
-
-	l_clks = container_of(link_lp_clks, struct dsi_link_clks, lp_clks);
-
-	mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[index]);
-	if (!mngr)
-		return -EINVAL;
-
-	/*
-	 * In an ideal world, cont_splash_enabled should not be required inside
-	 * the clock manager. But, in the current driver cont_splash_enabled
-	 * flag is set inside mdp driver and there is no interface event
-	 * associated with this flag setting. Also, set rate for clock need not
-	 * be called for every enable call. It should be done only once when
-	 * coming out of suspend.
-	 */
-	if (mngr->is_cont_splash_enabled)
-		goto prepare;
-
-	rc = clk_set_rate(link_lp_clks->esc_clk, l_clks->freq.esc_clk_rate);
-	if (rc) {
-		pr_err("clk_set_rate failed for esc_clk rc = %d\n", rc);
-		goto error;
-	}
-
-prepare:
-	rc = clk_prepare_enable(link_lp_clks->esc_clk);
-	if (rc) {
-		pr_err("Failed to enable dsi esc clk\n");
-		clk_unprepare(l_clks->lp_clks.esc_clk);
-	}
-error:
-	pr_debug("LP Link clocks are enabled\n");
-	return rc;
-}
-
-static int dsi_link_lp_clk_stop(
-	struct dsi_link_lp_clk_info *link_lp_clks)
-{
-	struct dsi_link_clks *l_clks;
-
-	l_clks = container_of(link_lp_clks, struct dsi_link_clks, lp_clks);
-
-	clk_disable_unprepare(l_clks->lp_clks.esc_clk);
-
-	pr_debug("LP Link clocks are disabled\n");
-	return 0;
-}
-
-static int dsi_display_core_clk_enable(struct dsi_core_clks *clks,
-	u32 ctrl_count, u32 master_ndx)
-{
-	int rc = 0;
-	int i;
-	struct dsi_core_clks *clk, *m_clks;
-
-	/*
-	 * In case of split DSI usecases, the clock for master controller should
-	 * be enabled before the other controller. Master controller in the
-	 * clock context refers to the controller that sources the clock.
-	 */
-
-	m_clks = &clks[master_ndx];
-	rc = sde_power_resource_enable(m_clks->clks.phandle,
-			m_clks->clks.dsi_core_client, true);
-
-	if (rc) {
-		pr_err("Power resource enable failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = dsi_core_clk_start(m_clks);
-	if (rc) {
-		pr_err("failed to turn on master clocks, rc=%d\n", rc);
-		goto error_disable_master_resource;
-	}
-
-	/* Turn on rest of the core clocks */
-	for (i = 0; i < ctrl_count; i++) {
-		clk = &clks[i];
-		if (!clk || (clk == m_clks))
-			continue;
-
-		rc = sde_power_resource_enable(clk->clks.phandle,
-				clk->clks.dsi_core_client, true);
-		if (rc) {
-			pr_err("Power resource enable failed, rc=%d\n", rc);
-			goto error_disable_master;
-		}
-
-		rc = dsi_core_clk_start(clk);
-		if (rc) {
-			pr_err("failed to turn on clocks, rc=%d\n", rc);
-			(void)sde_power_resource_enable(clk->clks.phandle,
-					clk->clks.dsi_core_client, false);
-			goto error_disable_master;
-		}
-	}
-	return rc;
-error_disable_master:
-	(void)dsi_core_clk_stop(m_clks);
-
-error_disable_master_resource:
-	(void)sde_power_resource_enable(m_clks->clks.phandle,
-				m_clks->clks.dsi_core_client, false);
-error:
-	return rc;
-}
-
-static int dsi_display_link_clk_enable(struct dsi_link_clks *clks,
-	enum dsi_lclk_type l_type, u32 ctrl_count, u32 master_ndx)
-{
-	int rc = 0;
-	int i;
-	struct dsi_link_clks *clk, *m_clks;
-
-	/*
-	 * In case of split DSI usecases, the clock for master controller should
-	 * be enabled before the other controller. Master controller in the
-	 * clock context refers to the controller that sources the clock.
-	 */
-
-	m_clks = &clks[master_ndx];
-
-	if (l_type & DSI_LINK_LP_CLK) {
-		rc = dsi_link_lp_clk_start(&m_clks->lp_clks, master_ndx);
-		if (rc) {
-			pr_err("failed to turn on master lp link clocks, rc=%d\n",
-				rc);
-			goto error;
-		}
-	}
-
-	if (l_type & DSI_LINK_HS_CLK) {
-		rc = dsi_link_hs_clk_start(&m_clks->hs_clks,
-			DSI_LINK_CLK_START, master_ndx);
-		if (rc) {
-			pr_err("failed to turn on master hs link clocks, rc=%d\n",
-				rc);
-			goto error;
-		}
-	}
-
-	for (i = 0; i < ctrl_count; i++) {
-		clk = &clks[i];
-		if (!clk || (clk == m_clks))
-			continue;
-
-		if (l_type & DSI_LINK_LP_CLK) {
-			rc = dsi_link_lp_clk_start(&clk->lp_clks, i);
-			if (rc) {
-				pr_err("failed to turn on lp link clocks, rc=%d\n",
-					rc);
-				goto error_disable_master;
-			}
-		}
-
-		if (l_type & DSI_LINK_HS_CLK) {
-			rc = dsi_link_hs_clk_start(&clk->hs_clks,
-				DSI_LINK_CLK_START, i);
-			if (rc) {
-				pr_err("failed to turn on hs link clocks, rc=%d\n",
-					rc);
-				goto error_disable_master;
-			}
-		}
-	}
-	return rc;
-
-error_disable_master:
-	if (l_type == DSI_LINK_LP_CLK)
-		(void)dsi_link_lp_clk_stop(&m_clks->lp_clks);
-	else if (l_type == DSI_LINK_HS_CLK)
-		(void)dsi_link_hs_clk_stop(&m_clks->hs_clks);
-error:
-	return rc;
-}
-
-static int dsi_display_core_clk_disable(struct dsi_core_clks *clks,
-	u32 ctrl_count, u32 master_ndx)
-{
-	int rc = 0;
-	int i;
-	struct dsi_core_clks *clk, *m_clks;
-
-	/*
-	 * In case of split DSI usecases, clock for slave DSI controllers should
-	 * be disabled first before disabling clock for master controller. Slave
-	 * controllers in the clock context refer to controller which source
-	 * clock from another controller.
-	 */
-
-	m_clks = &clks[master_ndx];
-
-	/* Turn off non-master core clocks */
-	for (i = 0; i < ctrl_count; i++) {
-		clk = &clks[i];
-		if (!clk || (clk == m_clks))
-			continue;
-
-		rc = dsi_core_clk_stop(clk);
-		if (rc) {
-			pr_debug("failed to turn off clocks, rc=%d\n", rc);
-			goto error;
-		}
-
-		rc = sde_power_resource_enable(clk->clks.phandle,
-				clk->clks.dsi_core_client, false);
-		if (rc) {
-			pr_err("Power resource disable failed: %d\n", rc);
-			goto error;
-		}
-	}
-
-	rc = dsi_core_clk_stop(m_clks);
-	if (rc) {
-		pr_err("failed to turn off master clocks, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = sde_power_resource_enable(m_clks->clks.phandle,
-				m_clks->clks.dsi_core_client, false);
-	if (rc)
-		pr_err("Power resource disable failed: %d\n", rc);
-error:
-	return rc;
-}
-
-static int dsi_display_link_clk_disable(struct dsi_link_clks *clks,
-	enum dsi_lclk_type l_type, u32 ctrl_count, u32 master_ndx)
-{
-	int rc = 0;
-	int i;
-	struct dsi_link_clks *clk, *m_clks;
-
-	/*
-	 * In case of split DSI usecases, clock for slave DSI controllers should
-	 * be disabled first before disabling clock for master controller. Slave
-	 * controllers in the clock context refer to controller which source
-	 * clock from another controller.
-	 */
-
-	m_clks = &clks[master_ndx];
-
-	/* Turn off non-master link clocks */
-	for (i = 0; i < ctrl_count; i++) {
-		clk = &clks[i];
-		if (!clk || (clk == m_clks))
-			continue;
-
-		if (l_type & DSI_LINK_LP_CLK) {
-			rc = dsi_link_lp_clk_stop(&clk->lp_clks);
-			if (rc)
-				pr_err("failed to turn off lp link clocks, rc=%d\n",
-					rc);
-		}
-
-		if (l_type & DSI_LINK_HS_CLK) {
-			rc = dsi_link_hs_clk_stop(&clk->hs_clks);
-			if (rc)
-				pr_err("failed to turn off hs link clocks, rc=%d\n",
-					rc);
-		}
-	}
-
-	if (l_type & DSI_LINK_LP_CLK) {
-		rc = dsi_link_lp_clk_stop(&m_clks->lp_clks);
-		if (rc)
-			pr_err("failed to turn off master lp link clocks, rc=%d\n",
-				rc);
-	}
-
-	if (l_type & DSI_LINK_HS_CLK) {
-		rc = dsi_link_hs_clk_stop(&m_clks->hs_clks);
-		if (rc)
-			pr_err("failed to turn off master hs link clocks, rc=%d\n",
-				rc);
-	}
-
-	return rc;
-}
-
-static int dsi_clk_update_link_clk_state(struct dsi_clk_mngr *mngr,
-	struct dsi_link_clks *l_clks, enum dsi_lclk_type l_type, u32 l_state,
-	bool enable)
-{
-	int rc = 0;
-
-	if (!mngr)
-		return -EINVAL;
-
-	if (enable) {
-		if (mngr->pre_clkon_cb) {
-			rc = mngr->pre_clkon_cb(mngr->priv_data, DSI_LINK_CLK,
-				l_type, l_state);
-			if (rc) {
-				pr_err("pre link clk on cb failed for type %d\n",
-					l_type);
-				goto error;
-			}
-		}
-		rc = dsi_display_link_clk_enable(l_clks, l_type,
-				mngr->dsi_ctrl_count, mngr->master_ndx);
-		if (rc) {
-			pr_err("failed to start link clk type %d rc=%d\n",
-				l_type, rc);
-			goto error;
-		}
-
-		if (mngr->post_clkon_cb) {
-			rc = mngr->post_clkon_cb(mngr->priv_data, DSI_LINK_CLK,
-				l_type, l_state);
-			if (rc) {
-				pr_err("post link clk on cb failed for type %d\n",
-					l_type);
-				goto error;
-			}
-		}
-	} else {
-		if (mngr->pre_clkoff_cb) {
-			rc = mngr->pre_clkoff_cb(mngr->priv_data,
-				DSI_LINK_CLK, l_type, l_state);
-			if (rc)
-				pr_err("pre link clk off cb failed\n");
-		}
-
-		rc = dsi_display_link_clk_disable(l_clks, l_type,
-			mngr->dsi_ctrl_count, mngr->master_ndx);
-		if (rc) {
-			pr_err("failed to stop link clk type %d, rc = %d\n",
-			       l_type, rc);
-			goto error;
-		}
-
-		if (mngr->post_clkoff_cb) {
-			rc = mngr->post_clkoff_cb(mngr->priv_data,
-				DSI_LINK_CLK, l_type, l_state);
-			if (rc)
-				pr_err("post link clk off cb failed\n");
-		}
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_update_core_clks(struct dsi_clk_mngr *mngr,
-		struct dsi_core_clks *c_clks)
-{
-	int rc = 0;
-
-	if (mngr->core_clk_state == DSI_CLK_OFF) {
-		rc = mngr->pre_clkon_cb(mngr->priv_data,
-					DSI_CORE_CLK,
-					DSI_LINK_NONE,
-					DSI_CLK_ON);
-		if (rc) {
-			pr_err("failed to turn on MDP FS rc= %d\n", rc);
-			goto error;
-		}
-	}
-	rc = dsi_display_core_clk_enable(c_clks, mngr->dsi_ctrl_count,
-			mngr->master_ndx);
-	if (rc) {
-		pr_err("failed to turn on core clks rc = %d\n", rc);
-		goto error;
-	}
-
-	if (mngr->post_clkon_cb) {
-		rc = mngr->post_clkon_cb(mngr->priv_data,
-					 DSI_CORE_CLK,
-					 DSI_LINK_NONE,
-					 DSI_CLK_ON);
-		if (rc)
-			pr_err("post clk on cb failed, rc = %d\n", rc);
-	}
-	mngr->core_clk_state = DSI_CLK_ON;
-error:
-	return rc;
-}
-
-static int dsi_update_clk_state(struct dsi_clk_mngr *mngr,
-	struct dsi_core_clks *c_clks, u32 c_state,
-	struct dsi_link_clks *l_clks, u32 l_state)
-{
-	int rc = 0;
-	bool l_c_on = false;
-
-	if (!mngr)
-		return -EINVAL;
-
-	pr_debug("c_state = %d, l_state = %d\n",
-		 c_clks ? c_state : -1, l_clks ? l_state : -1);
-	/*
-	 * Below is the sequence to toggle DSI clocks:
-	 *	1. For ON sequence, Core clocks before link clocks
-	 *	2. For OFF sequence, Link clocks before core clocks.
-	 */
-	if (c_clks && (c_state == DSI_CLK_ON))
-		rc = dsi_update_core_clks(mngr, c_clks);
-
-	if (rc)
-		goto error;
-
-	if (l_clks) {
-		if (l_state == DSI_CLK_ON) {
-			rc = dsi_clk_update_link_clk_state(mngr, l_clks,
-				DSI_LINK_LP_CLK, l_state, true);
-			if (rc)
-				goto error;
-
-			rc = dsi_clk_update_link_clk_state(mngr, l_clks,
-				DSI_LINK_HS_CLK, l_state, true);
-			if (rc)
-				goto error;
-		} else {
-			/*
-			 * Two conditions that need to be checked for Link
-			 * clocks:
-			 * 1. Link clocks need core clocks to be on when
-			 *    transitioning from EARLY_GATE to OFF state.
-			 * 2. ULPS mode might have to be enabled in case of OFF
-			 *    state. For ULPS, Link clocks should be turned ON
-			 *    first before they are turned off again.
-			 *
-			 * If Link is going from EARLY_GATE to OFF state AND
-			 * Core clock is already in EARLY_GATE or OFF state,
-			 * turn on Core clocks and link clocks.
-			 *
-			 * ULPS state is managed as part of the pre_clkoff_cb.
-			 */
-			if ((l_state == DSI_CLK_OFF) &&
-			    (mngr->link_clk_state ==
-			    DSI_CLK_EARLY_GATE) &&
-			    (mngr->core_clk_state !=
-			    DSI_CLK_ON)) {
-				rc = dsi_display_core_clk_enable(
-					mngr->core_clks, mngr->dsi_ctrl_count,
-					mngr->master_ndx);
-				if (rc) {
-					pr_err("core clks did not start\n");
-					goto error;
-				}
-
-				rc = dsi_display_link_clk_enable(l_clks,
-					(DSI_LINK_LP_CLK & DSI_LINK_HS_CLK),
-					mngr->dsi_ctrl_count, mngr->master_ndx);
-				if (rc) {
-					pr_err("LP Link clks did not start\n");
-					goto error;
-				}
-				l_c_on = true;
-				pr_debug("ECG: core and Link_on\n");
-			}
-
-			rc = dsi_clk_update_link_clk_state(mngr, l_clks,
-				DSI_LINK_HS_CLK, l_state, false);
-			if (rc)
-				goto error;
-
-			rc = dsi_clk_update_link_clk_state(mngr, l_clks,
-				DSI_LINK_LP_CLK, l_state, false);
-			if (rc)
-				goto error;
-
-			/*
-			 * This check is to save unnecessary clock state
-			 * change when going from EARLY_GATE to OFF. In the
-			 * case where the request happens for both Core and Link
-			 * clocks in the same call, core clocks need to be
-			 * turned on first before OFF state can be entered.
-			 *
-			 * Core clocks are turned on here for Link clocks to go
-			 * to OFF state. If core clock request is also present,
-			 * then core clocks can be turned off Core clocks are
-			 * transitioned to OFF state.
-			 */
-			if (l_c_on && (!(c_clks && (c_state == DSI_CLK_OFF)
-					 && (mngr->core_clk_state ==
-					     DSI_CLK_EARLY_GATE)))) {
-				rc = dsi_display_core_clk_disable(
-					mngr->core_clks, mngr->dsi_ctrl_count,
-					mngr->master_ndx);
-				if (rc) {
-					pr_err("core clks did not stop\n");
-					goto error;
-				}
-
-				l_c_on = false;
-				pr_debug("ECG: core off\n");
-			} else
-				pr_debug("ECG: core off skip\n");
-		}
-
-		mngr->link_clk_state = l_state;
-	}
-
-	if (c_clks && (c_state != DSI_CLK_ON)) {
-		/*
-		 * When going to OFF state from EARLY GATE state, Core clocks
-		 * should be turned on first so that the IOs can be clamped.
-		 * l_c_on flag is set, then the core clocks were turned before
-		 * to the Link clocks go to OFF state. So Core clocks are
-		 * already ON and this step can be skipped.
-		 *
-		 * IOs are clamped in pre_clkoff_cb callback.
-		 */
-		if ((c_state == DSI_CLK_OFF) &&
-		    (mngr->core_clk_state ==
-		    DSI_CLK_EARLY_GATE) && !l_c_on) {
-			rc = dsi_display_core_clk_enable(mngr->core_clks,
-				mngr->dsi_ctrl_count, mngr->master_ndx);
-			if (rc) {
-				pr_err("core clks did not start\n");
-				goto error;
-			}
-			pr_debug("ECG: core on\n");
-		} else
-			pr_debug("ECG: core on skip\n");
-
-		if (mngr->pre_clkoff_cb) {
-			rc = mngr->pre_clkoff_cb(mngr->priv_data,
-						 DSI_CORE_CLK,
-						 DSI_LINK_NONE,
-						 c_state);
-			if (rc)
-				pr_err("pre core clk off cb failed\n");
-		}
-
-		rc = dsi_display_core_clk_disable(c_clks, mngr->dsi_ctrl_count,
-			mngr->master_ndx);
-		if (rc) {
-			pr_err("failed to turn off core clks rc = %d\n", rc);
-			goto error;
-		}
-
-		if (c_state == DSI_CLK_OFF) {
-			if (mngr->post_clkoff_cb) {
-				rc = mngr->post_clkoff_cb(mngr->priv_data,
-						DSI_CORE_CLK,
-						DSI_LINK_NONE,
-						DSI_CLK_OFF);
-				if (rc)
-					pr_err("post clkoff cb fail, rc = %d\n",
-					       rc);
-			}
-		}
-		mngr->core_clk_state = c_state;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_recheck_clk_state(struct dsi_clk_mngr *mngr)
-{
-	int rc = 0;
-	struct list_head *pos = NULL;
-	struct dsi_clk_client_info *c;
-	u32 new_core_clk_state = DSI_CLK_OFF;
-	u32 new_link_clk_state = DSI_CLK_OFF;
-	u32 old_c_clk_state = DSI_CLK_OFF;
-	u32 old_l_clk_state = DSI_CLK_OFF;
-	struct dsi_core_clks *c_clks = NULL;
-	struct dsi_link_clks *l_clks = NULL;
-
-	/*
-	 * Conditions to maintain DSI manager clock state based on
-	 *		clock states of various clients:
-	 *	1. If any client has clock in ON state, DSI manager clock state
-	 *		should be ON.
-	 *	2. If any client is in ECG state with rest of them turned OFF,
-	 *	   go to Early gate state.
-	 *	3. If all clients have clocks as OFF, then go to OFF state.
-	 */
-	list_for_each(pos, &mngr->client_list) {
-		c = list_entry(pos, struct dsi_clk_client_info, list);
-		if (c->core_clk_state == DSI_CLK_ON) {
-			new_core_clk_state = DSI_CLK_ON;
-			break;
-		} else if (c->core_clk_state == DSI_CLK_EARLY_GATE) {
-			new_core_clk_state = DSI_CLK_EARLY_GATE;
-		}
-	}
-
-	list_for_each(pos, &mngr->client_list) {
-		c = list_entry(pos, struct dsi_clk_client_info, list);
-		if (c->link_clk_state == DSI_CLK_ON) {
-			new_link_clk_state = DSI_CLK_ON;
-			break;
-		} else if (c->link_clk_state == DSI_CLK_EARLY_GATE) {
-			new_link_clk_state = DSI_CLK_EARLY_GATE;
-		}
-	}
-
-	if (new_core_clk_state != mngr->core_clk_state)
-		c_clks = mngr->core_clks;
-
-	if (new_link_clk_state != mngr->link_clk_state)
-		l_clks = mngr->link_clks;
-
-	old_c_clk_state = mngr->core_clk_state;
-	old_l_clk_state = mngr->link_clk_state;
-
-	pr_debug("c_clk_state (%d -> %d)\n",
-		old_c_clk_state, new_core_clk_state);
-	pr_debug("l_clk_state (%d -> %d)\n",
-		old_l_clk_state, new_link_clk_state);
-
-	if (c_clks || l_clks) {
-		rc = dsi_update_clk_state(mngr, c_clks, new_core_clk_state,
-					  l_clks, new_link_clk_state);
-		if (rc) {
-			pr_err("failed to update clock state, rc = %d\n", rc);
-			goto error;
-		}
-	}
-
-error:
-	return rc;
-}
-
-int dsi_clk_req_state(void *client, enum dsi_clk_type clk,
-	enum dsi_clk_state state)
-{
-	int rc = 0;
-	struct dsi_clk_client_info *c = client;
-	struct dsi_clk_mngr *mngr;
-	bool changed = false;
-
-	if (!client || !clk || clk > (DSI_CORE_CLK | DSI_LINK_CLK) ||
-	    state > DSI_CLK_EARLY_GATE) {
-		pr_err("Invalid params, client = %pK, clk = 0x%x, state = %d\n",
-		       client, clk, state);
-		return -EINVAL;
-	}
-
-	mngr = c->mngr;
-	mutex_lock(&mngr->clk_mutex);
-
-	pr_debug("[%s]%s: CLK=%d, new_state=%d, core=%d, linkl=%d\n",
-	       mngr->name, c->name, clk, state, c->core_clk_state,
-	       c->link_clk_state);
-
-	/*
-	 * Clock refcount handling as below:
-	 *	i. Increment refcount whenever ON is called.
-	 *	ii. Decrement refcount when transitioning from ON state to
-	 *		either OFF or EARLY_GATE.
-	 *	iii. Do not decrement refcount when changing from
-	 *		EARLY_GATE to OFF.
-	 */
-	if (state == DSI_CLK_ON) {
-		if (clk & DSI_CORE_CLK) {
-			c->core_refcount++;
-			if (c->core_clk_state != DSI_CLK_ON) {
-				c->core_clk_state = DSI_CLK_ON;
-				changed = true;
-			}
-		}
-		if (clk & DSI_LINK_CLK) {
-			c->link_refcount++;
-			if (c->link_clk_state != DSI_CLK_ON) {
-				c->link_clk_state = DSI_CLK_ON;
-				changed = true;
-			}
-		}
-	} else if ((state == DSI_CLK_EARLY_GATE) ||
-		   (state == DSI_CLK_OFF)) {
-		if (clk & DSI_CORE_CLK) {
-			if (c->core_refcount == 0) {
-				if ((c->core_clk_state ==
-				    DSI_CLK_EARLY_GATE) &&
-				    (state == DSI_CLK_OFF)) {
-					changed = true;
-					c->core_clk_state = DSI_CLK_OFF;
-				} else {
-					pr_warn("Core refcount is zero for %s\n",
-						c->name);
-				}
-			} else {
-				c->core_refcount--;
-				if (c->core_refcount == 0) {
-					c->core_clk_state = state;
-					changed = true;
-				}
-			}
-		}
-		if (clk & DSI_LINK_CLK) {
-			if (c->link_refcount == 0) {
-				if ((c->link_clk_state ==
-				    DSI_CLK_EARLY_GATE) &&
-				    (state == DSI_CLK_OFF)) {
-					changed = true;
-					c->link_clk_state = DSI_CLK_OFF;
-				} else {
-					pr_warn("Link refcount is zero for %s\n",
-						c->name);
-				}
-			} else {
-				c->link_refcount--;
-				if (c->link_refcount == 0) {
-					c->link_clk_state = state;
-					changed = true;
-				}
-			}
-		}
-	}
-	pr_debug("[%s]%s: change=%d, Core (ref=%d, state=%d), Link (ref=%d, state=%d)\n",
-		 mngr->name, c->name, changed, c->core_refcount,
-		 c->core_clk_state, c->link_refcount, c->link_clk_state);
-
-	if (changed) {
-		rc = dsi_recheck_clk_state(mngr);
-		if (rc)
-			pr_err("Failed to adjust clock state rc = %d\n", rc);
-	}
-
-	mutex_unlock(&mngr->clk_mutex);
-	return rc;
-}
-
-DEFINE_MUTEX(dsi_mngr_clk_mutex);
-
-static int dsi_display_link_clk_force_update(void *client)
-{
-	int rc = 0;
-	struct dsi_clk_client_info *c = client;
-	struct dsi_clk_mngr *mngr;
-	struct dsi_link_clks *l_clks;
-
-	mngr = c->mngr;
-	mutex_lock(&mngr->clk_mutex);
-
-	l_clks = mngr->link_clks;
-
-	/*
-	 * When link_clk_state is DSI_CLK_OFF, don't change DSI clock rate
-	 * since it is possible to be overwritten, and return -EAGAIN to
-	 * dynamic DSI writing interface to defer the reenabling to the next
-	 * drm commit.
-	 */
-	if (mngr->link_clk_state == DSI_CLK_OFF) {
-		rc = -EAGAIN;
-		goto error;
-	}
-
-	rc = dsi_display_link_clk_disable(l_clks,
-			(DSI_LINK_LP_CLK | DSI_LINK_HS_CLK),
-			mngr->dsi_ctrl_count, mngr->master_ndx);
-	if (rc) {
-		pr_err("%s, failed to stop link clk, rc = %d\n",
-			__func__, rc);
-		goto error;
-	}
-
-	rc = dsi_display_link_clk_enable(l_clks,
-			(DSI_LINK_LP_CLK | DSI_LINK_HS_CLK),
-			mngr->dsi_ctrl_count, mngr->master_ndx);
-	if (rc) {
-		pr_err("%s, failed to start link clk rc= %d\n",
-			__func__, rc);
-		goto error;
-	}
-
-error:
-	mutex_unlock(&mngr->clk_mutex);
-	return rc;
-
-}
-
-int dsi_display_link_clk_force_update_ctrl(void *handle)
-{
-	int rc = 0;
-
-	if (!handle) {
-		pr_err("%s: Invalid arg\n", __func__);
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_mngr_clk_mutex);
-
-	rc = dsi_display_link_clk_force_update(handle);
-
-	mutex_unlock(&dsi_mngr_clk_mutex);
-
-	return rc;
-}
-
-int dsi_display_clk_ctrl(void *handle,
-	enum dsi_clk_type clk_type, enum dsi_clk_state clk_state)
-{
-	int rc = 0;
-
-	if (!handle) {
-		pr_err("%s: Invalid arg\n", __func__);
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_mngr_clk_mutex);
-	rc = dsi_clk_req_state(handle, clk_type, clk_state);
-	if (rc)
-		pr_err("%s: failed set clk state, rc = %d\n", __func__, rc);
-	mutex_unlock(&dsi_mngr_clk_mutex);
-
-	return rc;
-}
-
-void *dsi_register_clk_handle(void *clk_mngr, char *client)
-{
-	void *handle = NULL;
-	struct dsi_clk_mngr *mngr = clk_mngr;
-	struct dsi_clk_client_info *c;
-
-	if (!mngr) {
-		pr_err("bad params\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	mutex_lock(&mngr->clk_mutex);
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c) {
-		handle = ERR_PTR(-ENOMEM);
-		goto error;
-	}
-
-	strlcpy(c->name, client, MAX_STRING_LEN);
-	c->mngr = mngr;
-
-	list_add(&c->list, &mngr->client_list);
-
-	pr_debug("[%s]: Added new client (%s)\n", mngr->name, c->name);
-	handle = c;
-error:
-	mutex_unlock(&mngr->clk_mutex);
-	return handle;
-}
-
-int dsi_deregister_clk_handle(void *client)
-{
-	int rc = 0;
-	struct dsi_clk_client_info *c = client;
-	struct dsi_clk_mngr *mngr;
-	struct list_head *pos = NULL;
-	struct list_head *tmp = NULL;
-	struct dsi_clk_client_info *node = NULL;
-
-	if (!client) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mngr = c->mngr;
-	pr_debug("%s: ENTER\n", mngr->name);
-	mutex_lock(&mngr->clk_mutex);
-	c->core_clk_state = DSI_CLK_OFF;
-	c->link_clk_state = DSI_CLK_OFF;
-
-	rc = dsi_recheck_clk_state(mngr);
-	if (rc) {
-		pr_err("clock state recheck failed rc = %d\n", rc);
-		goto error;
-	}
-
-	list_for_each_safe(pos, tmp, &mngr->client_list) {
-		node = list_entry(pos, struct dsi_clk_client_info,
-			  list);
-		if (node == c) {
-			list_del(&node->list);
-			pr_debug("Removed device (%s)\n", node->name);
-			kfree(node);
-			break;
-		}
-	}
-
-error:
-	mutex_unlock(&mngr->clk_mutex);
-	pr_debug("%s: EXIT, rc = %d\n", mngr->name, rc);
-	return rc;
-}
-
-void dsi_display_clk_mngr_update_splash_status(void *clk_mgr, bool status)
-{
-	struct dsi_clk_mngr *mngr;
-
-	if (!clk_mgr) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	mngr = (struct dsi_clk_mngr *)clk_mgr;
-	mngr->is_cont_splash_enabled = status;
-}
-
-void *dsi_display_clk_mngr_register(struct dsi_clk_info *info)
-{
-	struct dsi_clk_mngr *mngr;
-	int i = 0;
-
-	if (!info) {
-		pr_err("Invalid params\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	mngr = kzalloc(sizeof(*mngr), GFP_KERNEL);
-	if (!mngr) {
-		mngr = ERR_PTR(-ENOMEM);
-		goto error;
-	}
-
-	mutex_init(&mngr->clk_mutex);
-	mngr->dsi_ctrl_count = info->dsi_ctrl_count;
-	mngr->master_ndx = info->master_ndx;
-
-	if (mngr->dsi_ctrl_count > MAX_DSI_CTRL) {
-		kfree(mngr);
-		return ERR_PTR(-EINVAL);
-	}
-
-	for (i = 0; i < mngr->dsi_ctrl_count; i++) {
-		memcpy(&mngr->core_clks[i].clks, &info->c_clks[i],
-			sizeof(struct dsi_core_clk_info));
-		memcpy(&mngr->link_clks[i].hs_clks, &info->l_hs_clks[i],
-			sizeof(struct dsi_link_hs_clk_info));
-		memcpy(&mngr->link_clks[i].lp_clks, &info->l_lp_clks[i],
-			sizeof(struct dsi_link_lp_clk_info));
-		mngr->core_clks[i].bus_handle = info->bus_handle[i];
-		mngr->ctrl_index[i] = info->ctrl_index[i];
-	}
-
-	INIT_LIST_HEAD(&mngr->client_list);
-	mngr->pre_clkon_cb = info->pre_clkon_cb;
-	mngr->post_clkon_cb = info->post_clkon_cb;
-	mngr->pre_clkoff_cb = info->pre_clkoff_cb;
-	mngr->post_clkoff_cb = info->post_clkoff_cb;
-	mngr->priv_data = info->priv_data;
-	memcpy(mngr->name, info->name, MAX_STRING_LEN);
-
-error:
-	pr_debug("EXIT, rc = %ld\n", PTR_ERR(mngr));
-	return mngr;
-}
-
-int dsi_display_clk_mngr_deregister(void *clk_mngr)
-{
-	int rc = 0;
-	struct dsi_clk_mngr *mngr = clk_mngr;
-	struct list_head *position = NULL;
-	struct list_head *tmp = NULL;
-	struct dsi_clk_client_info *node = NULL;
-
-	if (!mngr) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	pr_debug("%s: ENTER\n", mngr->name);
-	mutex_lock(&mngr->clk_mutex);
-
-	list_for_each_safe(position, tmp, &mngr->client_list) {
-		node = list_entry(position, struct dsi_clk_client_info,
-			  list);
-		list_del(&node->list);
-		pr_debug("Removed device (%s)\n", node->name);
-		kfree(node);
-	}
-
-	rc = dsi_recheck_clk_state(mngr);
-	if (rc)
-		pr_err("failed to disable all clocks\n");
-
-	mutex_unlock(&mngr->clk_mutex);
-	pr_debug("%s: EXIT, rc = %d\n", mngr->name, rc);
-	kfree(mngr);
-	return rc;
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
deleted file mode 100644
index cb22ab9..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ /dev/null
@@ -1,3615 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"dsi-ctrl:[%s] " fmt, __func__
-
-#include <linux/of_device.h>
-#include <linux/err.h>
-#include <linux/regulator/consumer.h>
-#include <linux/clk.h>
-#include <linux/msm-bus.h>
-#include <linux/of_irq.h>
-#include <video/mipi_display.h>
-
-#include "msm_drv.h"
-#include "msm_kms.h"
-#include "msm_gpu.h"
-#include "msm_mmu.h"
-#include "dsi_ctrl.h"
-#include "dsi_ctrl_hw.h"
-#include "dsi_clk.h"
-#include "dsi_pwr.h"
-#include "dsi_catalog.h"
-
-#include "sde_dbg.h"
-
-#define DSI_CTRL_DEFAULT_LABEL "MDSS DSI CTRL"
-
-#define DSI_CTRL_TX_TO_MS     200
-
-#define TO_ON_OFF(x) ((x) ? "ON" : "OFF")
-
-#define CEIL(x, y)              (((x) + ((y)-1)) / (y))
-
-#define TICKS_IN_MICRO_SECOND    1000000
-
-/**
- * enum dsi_ctrl_driver_ops - controller driver ops
- */
-enum dsi_ctrl_driver_ops {
-	DSI_CTRL_OP_POWER_STATE_CHANGE,
-	DSI_CTRL_OP_CMD_ENGINE,
-	DSI_CTRL_OP_VID_ENGINE,
-	DSI_CTRL_OP_HOST_ENGINE,
-	DSI_CTRL_OP_CMD_TX,
-	DSI_CTRL_OP_HOST_INIT,
-	DSI_CTRL_OP_TPG,
-	DSI_CTRL_OP_PHY_SW_RESET,
-	DSI_CTRL_OP_ASYNC_TIMING,
-	DSI_CTRL_OP_MAX
-};
-
-struct dsi_ctrl_list_item {
-	struct dsi_ctrl *ctrl;
-	struct list_head list;
-};
-
-static LIST_HEAD(dsi_ctrl_list);
-static DEFINE_MUTEX(dsi_ctrl_list_lock);
-
-static const enum dsi_ctrl_version dsi_ctrl_v1_4 = DSI_CTRL_VERSION_1_4;
-static const enum dsi_ctrl_version dsi_ctrl_v2_0 = DSI_CTRL_VERSION_2_0;
-static const enum dsi_ctrl_version dsi_ctrl_v2_2 = DSI_CTRL_VERSION_2_2;
-static const enum dsi_ctrl_version dsi_ctrl_v2_3 = DSI_CTRL_VERSION_2_3;
-static const enum dsi_ctrl_version dsi_ctrl_v2_4 = DSI_CTRL_VERSION_2_4;
-
-static const struct of_device_id msm_dsi_of_match[] = {
-	{
-		.compatible = "qcom,dsi-ctrl-hw-v1.4",
-		.data = &dsi_ctrl_v1_4,
-	},
-	{
-		.compatible = "qcom,dsi-ctrl-hw-v2.0",
-		.data = &dsi_ctrl_v2_0,
-	},
-	{
-		.compatible = "qcom,dsi-ctrl-hw-v2.2",
-		.data = &dsi_ctrl_v2_2,
-	},
-	{
-		.compatible = "qcom,dsi-ctrl-hw-v2.3",
-		.data = &dsi_ctrl_v2_3,
-	},
-	{
-		.compatible = "qcom,dsi-ctrl-hw-v2.4",
-		.data = &dsi_ctrl_v2_4,
-	},
-	{}
-};
-
-static ssize_t debugfs_state_info_read(struct file *file,
-				       char __user *buff,
-				       size_t count,
-				       loff_t *ppos)
-{
-	struct dsi_ctrl *dsi_ctrl = file->private_data;
-	char *buf;
-	u32 len = 0;
-
-	if (!dsi_ctrl)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	buf = kzalloc(SZ_4K, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	/* Dump current state */
-	len += snprintf((buf + len), (SZ_4K - len), "Current State:\n");
-	len += snprintf((buf + len), (SZ_4K - len),
-			"\tCTRL_ENGINE = %s\n",
-			TO_ON_OFF(dsi_ctrl->current_state.controller_state));
-	len += snprintf((buf + len), (SZ_4K - len),
-			"\tVIDEO_ENGINE = %s\n\tCOMMAND_ENGINE = %s\n",
-			TO_ON_OFF(dsi_ctrl->current_state.vid_engine_state),
-			TO_ON_OFF(dsi_ctrl->current_state.cmd_engine_state));
-
-	/* Dump clock information */
-	len += snprintf((buf + len), (SZ_4K - len), "\nClock Info:\n");
-	len += snprintf((buf + len), (SZ_4K - len),
-			"\tBYTE_CLK = %u, PIXEL_CLK = %u, ESC_CLK = %u\n",
-			dsi_ctrl->clk_freq.byte_clk_rate,
-			dsi_ctrl->clk_freq.pix_clk_rate,
-			dsi_ctrl->clk_freq.esc_clk_rate);
-
-	/* TODO: make sure that this does not exceed 4K */
-	if (copy_to_user(buff, buf, len)) {
-		kfree(buf);
-		return -EFAULT;
-	}
-
-	*ppos += len;
-	kfree(buf);
-	return len;
-}
-
-static ssize_t debugfs_reg_dump_read(struct file *file,
-				     char __user *buff,
-				     size_t count,
-				     loff_t *ppos)
-{
-	struct dsi_ctrl *dsi_ctrl = file->private_data;
-	char *buf;
-	u32 len = 0;
-	struct dsi_clk_ctrl_info clk_info;
-	int rc = 0;
-
-	if (!dsi_ctrl)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	buf = kzalloc(SZ_4K, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	clk_info.client = DSI_CLK_REQ_DSI_CLIENT;
-	clk_info.clk_type = DSI_CORE_CLK;
-	clk_info.clk_state = DSI_CLK_ON;
-
-	rc = dsi_ctrl->clk_cb.dsi_clk_cb(dsi_ctrl->clk_cb.priv, clk_info);
-	if (rc) {
-		pr_err("failed to enable DSI core clocks\n");
-		kfree(buf);
-		return rc;
-	}
-
-	if (dsi_ctrl->hw.ops.reg_dump_to_buffer)
-		len = dsi_ctrl->hw.ops.reg_dump_to_buffer(&dsi_ctrl->hw,
-				buf, SZ_4K);
-
-	clk_info.clk_state = DSI_CLK_OFF;
-	rc = dsi_ctrl->clk_cb.dsi_clk_cb(dsi_ctrl->clk_cb.priv, clk_info);
-	if (rc) {
-		pr_err("failed to disable DSI core clocks\n");
-		kfree(buf);
-		return rc;
-	}
-
-
-	/* TODO: make sure that this does not exceed 4K */
-	if (copy_to_user(buff, buf, len)) {
-		kfree(buf);
-		return -EFAULT;
-	}
-
-	*ppos += len;
-	kfree(buf);
-	return len;
-}
-
-static const struct file_operations state_info_fops = {
-	.open = simple_open,
-	.read = debugfs_state_info_read,
-};
-
-static const struct file_operations reg_dump_fops = {
-	.open = simple_open,
-	.read = debugfs_reg_dump_read,
-};
-
-static int dsi_ctrl_debugfs_init(struct dsi_ctrl *dsi_ctrl,
-				 struct dentry *parent)
-{
-	int rc = 0;
-	struct dentry *dir, *state_file, *reg_dump;
-	char dbg_name[DSI_DEBUG_NAME_LEN];
-
-	dir = debugfs_create_dir(dsi_ctrl->name, parent);
-	if (IS_ERR_OR_NULL(dir)) {
-		rc = PTR_ERR(dir);
-		pr_err("[DSI_%d] debugfs create dir failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto error;
-	}
-
-	state_file = debugfs_create_file("state_info",
-					 0444,
-					 dir,
-					 dsi_ctrl,
-					 &state_info_fops);
-	if (IS_ERR_OR_NULL(state_file)) {
-		rc = PTR_ERR(state_file);
-		pr_err("[DSI_%d] state file failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto error_remove_dir;
-	}
-
-	reg_dump = debugfs_create_file("reg_dump",
-				       0444,
-				       dir,
-				       dsi_ctrl,
-				       &reg_dump_fops);
-	if (IS_ERR_OR_NULL(reg_dump)) {
-		rc = PTR_ERR(reg_dump);
-		pr_err("[DSI_%d] reg dump file failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto error_remove_dir;
-	}
-
-	dsi_ctrl->debugfs_root = dir;
-
-	snprintf(dbg_name, DSI_DEBUG_NAME_LEN, "dsi%d_ctrl",
-						dsi_ctrl->cell_index);
-	sde_dbg_reg_register_base(dbg_name, dsi_ctrl->hw.base,
-				msm_iomap_size(dsi_ctrl->pdev, "dsi_ctrl"));
-	sde_dbg_reg_register_dump_range(dbg_name, dbg_name, 0,
-				msm_iomap_size(dsi_ctrl->pdev, "dsi_ctrl"), 0);
-error_remove_dir:
-	debugfs_remove(dir);
-error:
-	return rc;
-}
-
-static int dsi_ctrl_debugfs_deinit(struct dsi_ctrl *dsi_ctrl)
-{
-	debugfs_remove(dsi_ctrl->debugfs_root);
-	return 0;
-}
-
-static inline struct msm_gem_address_space*
-dsi_ctrl_get_aspace(struct dsi_ctrl *dsi_ctrl,
-		int domain)
-{
-	if (!dsi_ctrl || !dsi_ctrl->drm_dev)
-		return NULL;
-
-	return msm_gem_smmu_address_space_get(dsi_ctrl->drm_dev, domain);
-}
-
-static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl,
-				enum dsi_ctrl_driver_ops op,
-				u32 op_state)
-{
-	int rc = 0;
-	struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
-
-	SDE_EVT32(dsi_ctrl->cell_index, op);
-
-	switch (op) {
-	case DSI_CTRL_OP_POWER_STATE_CHANGE:
-		if (state->power_state == op_state) {
-			pr_err("[%d] No change in state, pwr_state=%d\n",
-			       dsi_ctrl->cell_index, op_state);
-			rc = -EINVAL;
-		} else if (state->power_state == DSI_CTRL_POWER_VREG_ON) {
-			if (state->vid_engine_state == DSI_CTRL_ENGINE_ON) {
-				pr_err("[%d]State error: op=%d: %d\n",
-				       dsi_ctrl->cell_index,
-				       op_state,
-				       state->vid_engine_state);
-				rc = -EINVAL;
-			}
-		}
-		break;
-	case DSI_CTRL_OP_CMD_ENGINE:
-		if (state->cmd_engine_state == op_state) {
-			pr_err("[%d] No change in state, cmd_state=%d\n",
-			       dsi_ctrl->cell_index, op_state);
-			rc = -EINVAL;
-		} else if ((state->power_state != DSI_CTRL_POWER_VREG_ON) ||
-			   (state->controller_state != DSI_CTRL_ENGINE_ON)) {
-			pr_err("[%d]State error: op=%d: %d, %d\n",
-			       dsi_ctrl->cell_index,
-			       op,
-			       state->power_state,
-			       state->controller_state);
-			rc = -EINVAL;
-		}
-		break;
-	case DSI_CTRL_OP_VID_ENGINE:
-		if (state->vid_engine_state == op_state) {
-			pr_err("[%d] No change in state, cmd_state=%d\n",
-			       dsi_ctrl->cell_index, op_state);
-			rc = -EINVAL;
-		} else if ((state->power_state != DSI_CTRL_POWER_VREG_ON) ||
-			   (state->controller_state != DSI_CTRL_ENGINE_ON)) {
-			pr_err("[%d]State error: op=%d: %d, %d\n",
-			       dsi_ctrl->cell_index,
-			       op,
-			       state->power_state,
-			       state->controller_state);
-			rc = -EINVAL;
-		}
-		break;
-	case DSI_CTRL_OP_HOST_ENGINE:
-		if (state->controller_state == op_state) {
-			pr_err("[%d] No change in state, ctrl_state=%d\n",
-			       dsi_ctrl->cell_index, op_state);
-			rc = -EINVAL;
-		} else if (state->power_state != DSI_CTRL_POWER_VREG_ON) {
-			pr_err("[%d]State error (link is off): op=%d:, %d\n",
-			       dsi_ctrl->cell_index,
-			       op_state,
-			       state->power_state);
-			rc = -EINVAL;
-		} else if ((op_state == DSI_CTRL_ENGINE_OFF) &&
-			   ((state->cmd_engine_state != DSI_CTRL_ENGINE_OFF) ||
-			    (state->vid_engine_state != DSI_CTRL_ENGINE_OFF))) {
-			pr_err("[%d]State error (eng on): op=%d: %d, %d\n",
-				  dsi_ctrl->cell_index,
-				  op_state,
-				  state->cmd_engine_state,
-				  state->vid_engine_state);
-			rc = -EINVAL;
-		}
-		break;
-	case DSI_CTRL_OP_CMD_TX:
-		if ((state->power_state != DSI_CTRL_POWER_VREG_ON) ||
-		    (!state->host_initialized) ||
-		    (state->cmd_engine_state != DSI_CTRL_ENGINE_ON)) {
-			pr_err("[%d]State error: op=%d: %d, %d, %d\n",
-			       dsi_ctrl->cell_index,
-			       op,
-			       state->power_state,
-			       state->host_initialized,
-			       state->cmd_engine_state);
-			rc = -EINVAL;
-		}
-		break;
-	case DSI_CTRL_OP_HOST_INIT:
-		if (state->host_initialized == op_state) {
-			pr_err("[%d] No change in state, host_init=%d\n",
-			       dsi_ctrl->cell_index, op_state);
-			rc = -EINVAL;
-		} else if (state->power_state != DSI_CTRL_POWER_VREG_ON) {
-			pr_err("[%d]State error: op=%d: %d\n",
-			       dsi_ctrl->cell_index, op, state->power_state);
-			rc = -EINVAL;
-		}
-		break;
-	case DSI_CTRL_OP_TPG:
-		if (state->tpg_enabled == op_state) {
-			pr_err("[%d] No change in state, tpg_enabled=%d\n",
-			       dsi_ctrl->cell_index, op_state);
-			rc = -EINVAL;
-		} else if ((state->power_state != DSI_CTRL_POWER_VREG_ON) ||
-			   (state->controller_state != DSI_CTRL_ENGINE_ON)) {
-			pr_err("[%d]State error: op=%d: %d, %d\n",
-			       dsi_ctrl->cell_index,
-			       op,
-			       state->power_state,
-			       state->controller_state);
-			rc = -EINVAL;
-		}
-		break;
-	case DSI_CTRL_OP_PHY_SW_RESET:
-		if (state->power_state != DSI_CTRL_POWER_VREG_ON) {
-			pr_err("[%d]State error: op=%d: %d\n",
-			       dsi_ctrl->cell_index, op, state->power_state);
-			rc = -EINVAL;
-		}
-		break;
-	case DSI_CTRL_OP_ASYNC_TIMING:
-		if (state->vid_engine_state != op_state) {
-			pr_err("[%d] Unexpected engine state vid_state=%d\n",
-			       dsi_ctrl->cell_index, op_state);
-			rc = -EINVAL;
-		}
-		break;
-	default:
-		rc = -ENOTSUPP;
-		break;
-	}
-
-	return rc;
-}
-
-bool dsi_ctrl_validate_host_state(struct dsi_ctrl *dsi_ctrl)
-{
-	struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
-
-	if (!state) {
-		pr_err("Invalid host state for DSI controller\n");
-		return -EINVAL;
-	}
-
-	if (!state->host_initialized)
-		return false;
-
-	return true;
-}
-
-static void dsi_ctrl_update_state(struct dsi_ctrl *dsi_ctrl,
-				  enum dsi_ctrl_driver_ops op,
-				  u32 op_state)
-{
-	struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
-
-	switch (op) {
-	case DSI_CTRL_OP_POWER_STATE_CHANGE:
-		state->power_state = op_state;
-		break;
-	case DSI_CTRL_OP_CMD_ENGINE:
-		state->cmd_engine_state = op_state;
-		break;
-	case DSI_CTRL_OP_VID_ENGINE:
-		state->vid_engine_state = op_state;
-		break;
-	case DSI_CTRL_OP_HOST_ENGINE:
-		state->controller_state = op_state;
-		break;
-	case DSI_CTRL_OP_HOST_INIT:
-		state->host_initialized = (op_state == 1) ? true : false;
-		break;
-	case DSI_CTRL_OP_TPG:
-		state->tpg_enabled = (op_state == 1) ? true : false;
-		break;
-	case DSI_CTRL_OP_CMD_TX:
-	case DSI_CTRL_OP_PHY_SW_RESET:
-	default:
-		break;
-	}
-}
-
-static int dsi_ctrl_init_regmap(struct platform_device *pdev,
-				struct dsi_ctrl *ctrl)
-{
-	int rc = 0;
-	void __iomem *ptr;
-
-	ptr = msm_ioremap(pdev, "dsi_ctrl", ctrl->name);
-	if (IS_ERR(ptr)) {
-		rc = PTR_ERR(ptr);
-		return rc;
-	}
-
-	ctrl->hw.base = ptr;
-	pr_debug("[%s] map dsi_ctrl registers to %pK\n", ctrl->name,
-		 ctrl->hw.base);
-
-	switch (ctrl->version) {
-	case DSI_CTRL_VERSION_1_4:
-	case DSI_CTRL_VERSION_2_0:
-		ptr = msm_ioremap(pdev, "mmss_misc", ctrl->name);
-		if (IS_ERR(ptr)) {
-			pr_err("mmss_misc base address not found for [%s]\n",
-					ctrl->name);
-			rc = PTR_ERR(ptr);
-			return rc;
-		}
-		ctrl->hw.mmss_misc_base = ptr;
-		ctrl->hw.disp_cc_base = NULL;
-		break;
-	case DSI_CTRL_VERSION_2_2:
-	case DSI_CTRL_VERSION_2_3:
-	case DSI_CTRL_VERSION_2_4:
-		ptr = msm_ioremap(pdev, "disp_cc_base", ctrl->name);
-		if (IS_ERR(ptr)) {
-			pr_err("disp_cc base address not found for [%s]\n",
-					ctrl->name);
-			rc = PTR_ERR(ptr);
-			return rc;
-		}
-		ctrl->hw.disp_cc_base = ptr;
-		ctrl->hw.mmss_misc_base = NULL;
-		break;
-	default:
-		break;
-	}
-
-	return rc;
-}
-
-static int dsi_ctrl_clocks_deinit(struct dsi_ctrl *ctrl)
-{
-	struct dsi_core_clk_info *core = &ctrl->clk_info.core_clks;
-	struct dsi_link_lp_clk_info *lp_link = &ctrl->clk_info.lp_link_clks;
-	struct dsi_link_hs_clk_info *hs_link = &ctrl->clk_info.hs_link_clks;
-	struct dsi_clk_link_set *rcg = &ctrl->clk_info.rcg_clks;
-
-	if (core->mdp_core_clk)
-		devm_clk_put(&ctrl->pdev->dev, core->mdp_core_clk);
-	if (core->iface_clk)
-		devm_clk_put(&ctrl->pdev->dev, core->iface_clk);
-	if (core->core_mmss_clk)
-		devm_clk_put(&ctrl->pdev->dev, core->core_mmss_clk);
-	if (core->bus_clk)
-		devm_clk_put(&ctrl->pdev->dev, core->bus_clk);
-	if (core->mnoc_clk)
-		devm_clk_put(&ctrl->pdev->dev, core->mnoc_clk);
-
-	memset(core, 0x0, sizeof(*core));
-
-	if (hs_link->byte_clk)
-		devm_clk_put(&ctrl->pdev->dev, hs_link->byte_clk);
-	if (hs_link->pixel_clk)
-		devm_clk_put(&ctrl->pdev->dev, hs_link->pixel_clk);
-	if (lp_link->esc_clk)
-		devm_clk_put(&ctrl->pdev->dev, lp_link->esc_clk);
-	if (hs_link->byte_intf_clk)
-		devm_clk_put(&ctrl->pdev->dev, hs_link->byte_intf_clk);
-
-	memset(hs_link, 0x0, sizeof(*hs_link));
-	memset(lp_link, 0x0, sizeof(*lp_link));
-
-	if (rcg->byte_clk)
-		devm_clk_put(&ctrl->pdev->dev, rcg->byte_clk);
-	if (rcg->pixel_clk)
-		devm_clk_put(&ctrl->pdev->dev, rcg->pixel_clk);
-
-	memset(rcg, 0x0, sizeof(*rcg));
-
-	return 0;
-}
-
-static int dsi_ctrl_clocks_init(struct platform_device *pdev,
-				struct dsi_ctrl *ctrl)
-{
-	int rc = 0;
-	struct dsi_core_clk_info *core = &ctrl->clk_info.core_clks;
-	struct dsi_link_lp_clk_info *lp_link = &ctrl->clk_info.lp_link_clks;
-	struct dsi_link_hs_clk_info *hs_link = &ctrl->clk_info.hs_link_clks;
-	struct dsi_clk_link_set *rcg = &ctrl->clk_info.rcg_clks;
-
-	core->mdp_core_clk = devm_clk_get(&pdev->dev, "mdp_core_clk");
-	if (IS_ERR(core->mdp_core_clk)) {
-		core->mdp_core_clk = NULL;
-		pr_debug("failed to get mdp_core_clk, rc=%d\n", rc);
-	}
-
-	core->iface_clk = devm_clk_get(&pdev->dev, "iface_clk");
-	if (IS_ERR(core->iface_clk)) {
-		core->iface_clk = NULL;
-		pr_debug("failed to get iface_clk, rc=%d\n", rc);
-	}
-
-	core->core_mmss_clk = devm_clk_get(&pdev->dev, "core_mmss_clk");
-	if (IS_ERR(core->core_mmss_clk)) {
-		core->core_mmss_clk = NULL;
-		pr_debug("failed to get core_mmss_clk, rc=%d\n", rc);
-	}
-
-	core->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
-	if (IS_ERR(core->bus_clk)) {
-		core->bus_clk = NULL;
-		pr_debug("failed to get bus_clk, rc=%d\n", rc);
-	}
-
-	core->mnoc_clk = devm_clk_get(&pdev->dev, "mnoc_clk");
-	if (IS_ERR(core->mnoc_clk)) {
-		core->mnoc_clk = NULL;
-		pr_debug("can't get mnoc clock, rc=%d\n", rc);
-	}
-
-	hs_link->byte_clk = devm_clk_get(&pdev->dev, "byte_clk");
-	if (IS_ERR(hs_link->byte_clk)) {
-		rc = PTR_ERR(hs_link->byte_clk);
-		pr_err("failed to get byte_clk, rc=%d\n", rc);
-		goto fail;
-	}
-
-	hs_link->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk");
-	if (IS_ERR(hs_link->pixel_clk)) {
-		rc = PTR_ERR(hs_link->pixel_clk);
-		pr_err("failed to get pixel_clk, rc=%d\n", rc);
-		goto fail;
-	}
-
-	lp_link->esc_clk = devm_clk_get(&pdev->dev, "esc_clk");
-	if (IS_ERR(lp_link->esc_clk)) {
-		rc = PTR_ERR(lp_link->esc_clk);
-		pr_err("failed to get esc_clk, rc=%d\n", rc);
-		goto fail;
-	}
-
-	hs_link->byte_intf_clk = devm_clk_get(&pdev->dev, "byte_intf_clk");
-	if (IS_ERR(hs_link->byte_intf_clk)) {
-		hs_link->byte_intf_clk = NULL;
-		pr_debug("can't find byte intf clk, rc=%d\n", rc);
-	}
-
-	rcg->byte_clk = devm_clk_get(&pdev->dev, "byte_clk_rcg");
-	if (IS_ERR(rcg->byte_clk)) {
-		rc = PTR_ERR(rcg->byte_clk);
-		pr_err("failed to get byte_clk_rcg, rc=%d\n", rc);
-		goto fail;
-	}
-
-	rcg->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk_rcg");
-	if (IS_ERR(rcg->pixel_clk)) {
-		rc = PTR_ERR(rcg->pixel_clk);
-		pr_err("failed to get pixel_clk_rcg, rc=%d\n", rc);
-		goto fail;
-	}
-
-	return 0;
-fail:
-	dsi_ctrl_clocks_deinit(ctrl);
-	return rc;
-}
-
-static int dsi_ctrl_supplies_deinit(struct dsi_ctrl *ctrl)
-{
-	int i = 0;
-	int rc = 0;
-	struct dsi_regulator_info *regs;
-
-	regs = &ctrl->pwr_info.digital;
-	for (i = 0; i < regs->count; i++) {
-		if (!regs->vregs[i].vreg)
-			pr_err("vreg is NULL, should not reach here\n");
-		else
-			devm_regulator_put(regs->vregs[i].vreg);
-	}
-
-	regs = &ctrl->pwr_info.host_pwr;
-	for (i = 0; i < regs->count; i++) {
-		if (!regs->vregs[i].vreg)
-			pr_err("vreg is NULL, should not reach here\n");
-		else
-			devm_regulator_put(regs->vregs[i].vreg);
-	}
-
-	if (!ctrl->pwr_info.host_pwr.vregs) {
-		devm_kfree(&ctrl->pdev->dev, ctrl->pwr_info.host_pwr.vregs);
-		ctrl->pwr_info.host_pwr.vregs = NULL;
-		ctrl->pwr_info.host_pwr.count = 0;
-	}
-
-	if (!ctrl->pwr_info.digital.vregs) {
-		devm_kfree(&ctrl->pdev->dev, ctrl->pwr_info.digital.vregs);
-		ctrl->pwr_info.digital.vregs = NULL;
-		ctrl->pwr_info.digital.count = 0;
-	}
-
-	return rc;
-}
-
-static int dsi_ctrl_supplies_init(struct platform_device *pdev,
-				  struct dsi_ctrl *ctrl)
-{
-	int rc = 0;
-	int i = 0;
-	struct dsi_regulator_info *regs;
-	struct regulator *vreg = NULL;
-
-	rc = dsi_pwr_get_dt_vreg_data(&pdev->dev,
-					  &ctrl->pwr_info.digital,
-					  "qcom,core-supply-entries");
-	if (rc)
-		pr_debug("failed to get digital supply, rc = %d\n", rc);
-
-	rc = dsi_pwr_get_dt_vreg_data(&pdev->dev,
-					  &ctrl->pwr_info.host_pwr,
-					  "qcom,ctrl-supply-entries");
-	if (rc) {
-		pr_err("failed to get host power supplies, rc = %d\n", rc);
-		goto error_digital;
-	}
-
-	regs = &ctrl->pwr_info.digital;
-	for (i = 0; i < regs->count; i++) {
-		vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
-		if (IS_ERR(vreg)) {
-			pr_err("failed to get %s regulator\n",
-			       regs->vregs[i].vreg_name);
-			rc = PTR_ERR(vreg);
-			goto error_host_pwr;
-		}
-		regs->vregs[i].vreg = vreg;
-	}
-
-	regs = &ctrl->pwr_info.host_pwr;
-	for (i = 0; i < regs->count; i++) {
-		vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
-		if (IS_ERR(vreg)) {
-			pr_err("failed to get %s regulator\n",
-			       regs->vregs[i].vreg_name);
-			for (--i; i >= 0; i--)
-				devm_regulator_put(regs->vregs[i].vreg);
-			rc = PTR_ERR(vreg);
-			goto error_digital_put;
-		}
-		regs->vregs[i].vreg = vreg;
-	}
-
-	return rc;
-
-error_digital_put:
-	regs = &ctrl->pwr_info.digital;
-	for (i = 0; i < regs->count; i++)
-		devm_regulator_put(regs->vregs[i].vreg);
-error_host_pwr:
-	devm_kfree(&pdev->dev, ctrl->pwr_info.host_pwr.vregs);
-	ctrl->pwr_info.host_pwr.vregs = NULL;
-	ctrl->pwr_info.host_pwr.count = 0;
-error_digital:
-	if (ctrl->pwr_info.digital.vregs)
-		devm_kfree(&pdev->dev, ctrl->pwr_info.digital.vregs);
-	ctrl->pwr_info.digital.vregs = NULL;
-	ctrl->pwr_info.digital.count = 0;
-	return rc;
-}
-
-static int dsi_ctrl_axi_bus_client_init(struct platform_device *pdev,
-					struct dsi_ctrl *ctrl)
-{
-	int rc = 0;
-	struct dsi_ctrl_bus_scale_info *bus = &ctrl->axi_bus_info;
-
-	bus->bus_scale_table = msm_bus_cl_get_pdata(pdev);
-	if (IS_ERR_OR_NULL(bus->bus_scale_table)) {
-		rc = PTR_ERR(bus->bus_scale_table);
-		pr_debug("msm_bus_cl_get_pdata() failed, rc = %d\n", rc);
-		bus->bus_scale_table = NULL;
-		return rc;
-	}
-
-	bus->bus_handle = msm_bus_scale_register_client(bus->bus_scale_table);
-	if (!bus->bus_handle) {
-		rc = -EINVAL;
-		pr_err("failed to register axi bus client\n");
-	}
-
-	return rc;
-}
-
-static int dsi_ctrl_axi_bus_client_deinit(struct dsi_ctrl *ctrl)
-{
-	struct dsi_ctrl_bus_scale_info *bus = &ctrl->axi_bus_info;
-
-	if (bus->bus_handle) {
-		msm_bus_scale_unregister_client(bus->bus_handle);
-
-		bus->bus_handle = 0;
-	}
-
-	return 0;
-}
-
-static int dsi_ctrl_validate_panel_info(struct dsi_ctrl *dsi_ctrl,
-					struct dsi_host_config *config)
-{
-	int rc = 0;
-	struct dsi_host_common_cfg *host_cfg = &config->common_config;
-
-	if (config->panel_mode >= DSI_OP_MODE_MAX) {
-		pr_err("Invalid dsi operation mode (%d)\n", config->panel_mode);
-		rc = -EINVAL;
-		goto err;
-	}
-
-	if ((host_cfg->data_lanes & (DSI_CLOCK_LANE - 1)) == 0) {
-		pr_err("No data lanes are enabled\n");
-		rc = -EINVAL;
-		goto err;
-	}
-err:
-	return rc;
-}
-
-/* Function returns number of bits per pxl */
-int dsi_ctrl_pixel_format_to_bpp(enum dsi_pixel_format dst_format)
-{
-	u32 bpp = 0;
-
-	switch (dst_format) {
-	case DSI_PIXEL_FORMAT_RGB111:
-		bpp = 3;
-		break;
-	case DSI_PIXEL_FORMAT_RGB332:
-		bpp = 8;
-		break;
-	case DSI_PIXEL_FORMAT_RGB444:
-		bpp = 12;
-		break;
-	case DSI_PIXEL_FORMAT_RGB565:
-		bpp = 16;
-		break;
-	case DSI_PIXEL_FORMAT_RGB666:
-	case DSI_PIXEL_FORMAT_RGB666_LOOSE:
-		bpp = 18;
-		break;
-	case DSI_PIXEL_FORMAT_RGB888:
-		bpp = 24;
-		break;
-	default:
-		bpp = 24;
-		break;
-	}
-	return bpp;
-}
-
-static int dsi_ctrl_update_link_freqs(struct dsi_ctrl *dsi_ctrl,
-	struct dsi_host_config *config, void *clk_handle)
-{
-	int rc = 0;
-	u32 num_of_lanes = 0;
-	u32 bpp, refresh_rate = TICKS_IN_MICRO_SECOND;
-	u64 h_period, v_period, bit_rate, pclk_rate, bit_rate_per_lane,
-	    byte_clk_rate;
-	struct dsi_host_common_cfg *host_cfg = &config->common_config;
-	struct dsi_mode_info *timing = &config->video_timing;
-
-	/* Get bits per pxl in desitnation format */
-	bpp = dsi_ctrl_pixel_format_to_bpp(host_cfg->dst_format);
-
-	if (host_cfg->data_lanes & DSI_DATA_LANE_0)
-		num_of_lanes++;
-	if (host_cfg->data_lanes & DSI_DATA_LANE_1)
-		num_of_lanes++;
-	if (host_cfg->data_lanes & DSI_DATA_LANE_2)
-		num_of_lanes++;
-	if (host_cfg->data_lanes & DSI_DATA_LANE_3)
-		num_of_lanes++;
-
-	if (config->bit_clk_rate_hz_override == 0) {
-		h_period = DSI_H_TOTAL_DSC(timing);
-		v_period = DSI_V_TOTAL(timing);
-
-		if (config->panel_mode == DSI_OP_CMD_MODE)
-			do_div(refresh_rate, timing->mdp_transfer_time_us);
-		else
-			refresh_rate = timing->refresh_rate;
-
-		bit_rate = h_period * v_period * refresh_rate * bpp;
-	} else {
-		bit_rate = config->bit_clk_rate_hz_override * num_of_lanes;
-	}
-
-	bit_rate_per_lane = bit_rate;
-	do_div(bit_rate_per_lane, num_of_lanes);
-	pclk_rate = bit_rate;
-	do_div(pclk_rate, bpp);
-	byte_clk_rate = bit_rate_per_lane;
-	do_div(byte_clk_rate, 8);
-	pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
-		 bit_rate, bit_rate_per_lane);
-	pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
-		  byte_clk_rate, pclk_rate);
-
-	dsi_ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
-	dsi_ctrl->clk_freq.pix_clk_rate = pclk_rate;
-	dsi_ctrl->clk_freq.esc_clk_rate = config->esc_clk_rate_hz;
-	config->bit_clk_rate_hz = dsi_ctrl->clk_freq.byte_clk_rate * 8;
-
-	rc = dsi_clk_set_link_frequencies(clk_handle, dsi_ctrl->clk_freq,
-					dsi_ctrl->cell_index);
-	if (rc)
-		pr_err("Failed to update link frequencies\n");
-
-	return rc;
-}
-
-static int dsi_ctrl_enable_supplies(struct dsi_ctrl *dsi_ctrl, bool enable)
-{
-	int rc = 0;
-
-	if (enable) {
-		if (!dsi_ctrl->current_state.host_initialized) {
-			rc = dsi_pwr_enable_regulator(
-				&dsi_ctrl->pwr_info.host_pwr, true);
-			if (rc) {
-				pr_err("failed to enable host power regs\n");
-				goto error;
-			}
-		}
-
-		rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.digital,
-					      true);
-		if (rc) {
-			pr_err("failed to enable gdsc, rc=%d\n", rc);
-			(void)dsi_pwr_enable_regulator(
-						&dsi_ctrl->pwr_info.host_pwr,
-						false
-						);
-			goto error;
-		}
-	} else {
-		rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.digital,
-					      false);
-		if (rc) {
-			pr_err("failed to disable gdsc, rc=%d\n", rc);
-			goto error;
-		}
-
-		if (!dsi_ctrl->current_state.host_initialized) {
-			rc = dsi_pwr_enable_regulator(
-				&dsi_ctrl->pwr_info.host_pwr, false);
-			if (rc) {
-				pr_err("failed to disable host power regs\n");
-				goto error;
-			}
-		}
-	}
-error:
-	return rc;
-}
-
-static int dsi_ctrl_copy_and_pad_cmd(struct dsi_ctrl *dsi_ctrl,
-				     const struct mipi_dsi_packet *packet,
-				     u8 **buffer,
-				     u32 *size)
-{
-	int rc = 0;
-	u8 *buf = NULL;
-	u32 len, i;
-
-	len = packet->size;
-	len += 0x3; len &= ~0x03; /* Align to 32 bits */
-
-	buf = devm_kzalloc(&dsi_ctrl->pdev->dev, len * sizeof(u8), GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	for (i = 0; i < len; i++) {
-		if (i >= packet->size)
-			buf[i] = 0xFF;
-		else if (i < sizeof(packet->header))
-			buf[i] = packet->header[i];
-		else
-			buf[i] = packet->payload[i - sizeof(packet->header)];
-	}
-
-	if (packet->payload_length > 0)
-		buf[3] |= BIT(6);
-
-
-	/* send embedded BTA for read commands */
-	if ((buf[2] & 0x3f) == MIPI_DSI_DCS_READ)
-		buf[3] |= BIT(5);
-
-	*buffer = buf;
-	*size = len;
-
-	return rc;
-}
-
-int dsi_ctrl_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl *dsi_ctrl)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (dsi_ctrl->host_config.panel_mode != DSI_OP_CMD_MODE)
-		return -EINVAL;
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	rc = dsi_ctrl->hw.ops.wait_for_cmd_mode_mdp_idle(&dsi_ctrl->hw);
-
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-
-	return rc;
-}
-
-static void dsi_ctrl_wait_for_video_done(struct dsi_ctrl *dsi_ctrl)
-{
-	u32 v_total = 0, v_blank = 0, sleep_ms = 0, fps = 0, ret;
-	struct dsi_mode_info *timing;
-
-	/**
-	 * No need to wait if the panel is not video mode or
-	 * if DSI controller supports command DMA scheduling or
-	 * if we are sending init commands.
-	 */
-	if ((dsi_ctrl->host_config.panel_mode != DSI_OP_VIDEO_MODE) ||
-		(dsi_ctrl->version >= DSI_CTRL_VERSION_2_2) ||
-		(dsi_ctrl->current_state.vid_engine_state !=
-					DSI_CTRL_ENGINE_ON))
-		return;
-
-	dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
-				DSI_VIDEO_MODE_FRAME_DONE);
-
-	dsi_ctrl_enable_status_interrupt(dsi_ctrl,
-				DSI_SINT_VIDEO_MODE_FRAME_DONE, NULL);
-	reinit_completion(&dsi_ctrl->irq_info.vid_frame_done);
-	ret = wait_for_completion_timeout(
-			&dsi_ctrl->irq_info.vid_frame_done,
-			msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
-	if (ret <= 0)
-		pr_debug("wait for video done failed\n");
-	dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-				DSI_SINT_VIDEO_MODE_FRAME_DONE);
-
-	timing = &(dsi_ctrl->host_config.video_timing);
-	v_total = timing->v_sync_width + timing->v_back_porch +
-			timing->v_front_porch + timing->v_active;
-	v_blank = timing->v_sync_width + timing->v_back_porch;
-	fps = timing->refresh_rate;
-
-	sleep_ms = CEIL((v_blank * 1000), (v_total * fps)) + 1;
-	udelay(sleep_ms * 1000);
-}
-
-void dsi_message_setup_tx_mode(struct dsi_ctrl *dsi_ctrl,
-		u32 cmd_len,
-		u32 *flags)
-{
-	/**
-	 * Setup the mode of transmission
-	 * override cmd fetch mode during secure session
-	 */
-	if (dsi_ctrl->secure_mode) {
-		*flags &= ~DSI_CTRL_CMD_FETCH_MEMORY;
-		*flags |= DSI_CTRL_CMD_FIFO_STORE;
-		pr_debug("[%s] override to TPG during secure session\n",
-				dsi_ctrl->name);
-		return;
-	}
-
-	/* Check to see if cmd len plus header is greater than fifo size */
-	if ((cmd_len + 4) > DSI_EMBEDDED_MODE_DMA_MAX_SIZE_BYTES) {
-		*flags |= DSI_CTRL_CMD_NON_EMBEDDED_MODE;
-		pr_debug("[%s] override to non-embedded mode,cmd len =%d\n",
-				dsi_ctrl->name, cmd_len);
-		return;
-	}
-}
-
-int dsi_message_validate_tx_mode(struct dsi_ctrl *dsi_ctrl,
-		u32 cmd_len,
-		u32 *flags)
-{
-	int rc = 0;
-
-	if (*flags & DSI_CTRL_CMD_FIFO_STORE) {
-		/* if command size plus header is greater than fifo size */
-		if ((cmd_len + 4) > DSI_CTRL_MAX_CMD_FIFO_STORE_SIZE) {
-			pr_err("Cannot transfer Cmd in FIFO config\n");
-			return -ENOTSUPP;
-		}
-		if (!dsi_ctrl->hw.ops.kickoff_fifo_command) {
-			pr_err("Cannot transfer command,ops not defined\n");
-			return -ENOTSUPP;
-		}
-	}
-
-	if (*flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
-		if (*flags & DSI_CTRL_CMD_BROADCAST) {
-			pr_err("Non embedded not supported with broadcast\n");
-			return -ENOTSUPP;
-		}
-		if (!dsi_ctrl->hw.ops.kickoff_command_non_embedded_mode) {
-			pr_err(" Cannot transfer command,ops not defined\n");
-			return -ENOTSUPP;
-		}
-		if ((cmd_len + 4) > SZ_4K) {
-			pr_err("Cannot transfer,size is greater than 4096\n");
-			return -ENOTSUPP;
-		}
-	}
-
-	if (*flags & DSI_CTRL_CMD_FETCH_MEMORY) {
-		if ((dsi_ctrl->cmd_len + cmd_len + 4) > SZ_4K) {
-			pr_err("Cannot transfer,size is greater than 4096\n");
-			return -ENOTSUPP;
-		}
-	}
-
-	return rc;
-}
-
-static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
-				const struct mipi_dsi_msg *msg,
-				struct dsi_ctrl_cmd_dma_fifo_info *cmd,
-				struct dsi_ctrl_cmd_dma_info *cmd_mem,
-				u32 flags)
-{
-	int rc = 0, ret = 0;
-	u32 hw_flags = 0;
-	u32 line_no = 0x1;
-	struct dsi_mode_info *timing;
-	struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops;
-
-	/* check if custom dma scheduling line needed */
-	if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
-		(flags & DSI_CTRL_CMD_CUSTOM_DMA_SCHED))
-		line_no = dsi_ctrl->host_config.u.video_engine.dma_sched_line;
-
-	timing = &(dsi_ctrl->host_config.video_timing);
-	if (timing)
-		line_no += timing->v_back_porch + timing->v_sync_width +
-				timing->v_active;
-	if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
-		dsi_hw_ops.schedule_dma_cmd &&
-		(dsi_ctrl->current_state.vid_engine_state ==
-					DSI_CTRL_ENGINE_ON))
-		dsi_hw_ops.schedule_dma_cmd(&dsi_ctrl->hw,
-				line_no);
-
-	hw_flags |= (flags & DSI_CTRL_CMD_DEFER_TRIGGER) ?
-			DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER : 0;
-
-	if ((msg->flags & MIPI_DSI_MSG_LASTCOMMAND))
-		hw_flags |= DSI_CTRL_CMD_LAST_COMMAND;
-
-	if (flags & DSI_CTRL_CMD_DEFER_TRIGGER) {
-		if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
-			if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
-				dsi_hw_ops.kickoff_command_non_embedded_mode(
-							&dsi_ctrl->hw,
-							cmd_mem,
-							hw_flags);
-			} else {
-				dsi_hw_ops.kickoff_command(
-						&dsi_ctrl->hw,
-						cmd_mem,
-						hw_flags);
-			}
-		} else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
-			dsi_hw_ops.kickoff_fifo_command(&dsi_ctrl->hw,
-							      cmd,
-							      hw_flags);
-		}
-	}
-
-	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
-		dsi_ctrl_wait_for_video_done(dsi_ctrl);
-		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
-					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
-		if (dsi_hw_ops.mask_error_intr)
-			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
-					BIT(DSI_FIFO_OVERFLOW), true);
-		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
-
-		if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
-			if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
-				dsi_hw_ops.kickoff_command_non_embedded_mode(
-							&dsi_ctrl->hw,
-							cmd_mem,
-							hw_flags);
-			} else {
-				dsi_hw_ops.kickoff_command(
-						&dsi_ctrl->hw,
-						cmd_mem,
-						hw_flags);
-			}
-		} else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
-			dsi_hw_ops.kickoff_fifo_command(&dsi_ctrl->hw,
-							      cmd,
-							      hw_flags);
-		}
-
-		ret = wait_for_completion_timeout(
-				&dsi_ctrl->irq_info.cmd_dma_done,
-				msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
-
-		if (ret == 0) {
-			u32 status = dsi_hw_ops.get_interrupt_status(
-								&dsi_ctrl->hw);
-			u32 mask = DSI_CMD_MODE_DMA_DONE;
-
-			if (status & mask) {
-				status |= (DSI_CMD_MODE_DMA_DONE |
-						DSI_BTA_DONE);
-				dsi_hw_ops.clear_interrupt_status(
-								&dsi_ctrl->hw,
-								status);
-				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-						DSI_SINT_CMD_MODE_DMA_DONE);
-				complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
-				pr_warn("dma_tx done but irq not triggered\n");
-			} else {
-				rc = -ETIMEDOUT;
-				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-						DSI_SINT_CMD_MODE_DMA_DONE);
-				pr_err("[DSI_%d]Command transfer failed\n",
-						dsi_ctrl->cell_index);
-			}
-		}
-
-		if (dsi_hw_ops.mask_error_intr && !dsi_ctrl->esd_check_underway)
-			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
-					BIT(DSI_FIFO_OVERFLOW), false);
-		dsi_hw_ops.reset_cmd_fifo(&dsi_ctrl->hw);
-
-		/*
-		 * DSI 2.2 needs a soft reset whenever we send non-embedded
-		 * mode command followed by embedded mode. Otherwise it will
-		 * result in smmu write faults with DSI as client.
-		 */
-		if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
-			dsi_hw_ops.soft_reset(&dsi_ctrl->hw);
-			dsi_ctrl->cmd_len = 0;
-		}
-	}
-}
-
-static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
-			  const struct mipi_dsi_msg *msg,
-			  u32 flags)
-{
-	int rc = 0;
-	struct mipi_dsi_packet packet;
-	struct dsi_ctrl_cmd_dma_fifo_info cmd;
-	struct dsi_ctrl_cmd_dma_info cmd_mem;
-	u32 length = 0;
-	u8 *buffer = NULL;
-	u32 cnt = 0;
-	u8 *cmdbuf;
-
-	/* Select the tx mode to transfer the command */
-	dsi_message_setup_tx_mode(dsi_ctrl, msg->tx_len, &flags);
-
-	/* Validate the mode before sending the command */
-	rc = dsi_message_validate_tx_mode(dsi_ctrl, msg->tx_len, &flags);
-	if (rc) {
-		pr_err(" Cmd tx validation failed, cannot transfer cmd\n");
-		rc = -ENOTSUPP;
-		goto error;
-	}
-
-	if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
-		cmd_mem.offset = dsi_ctrl->cmd_buffer_iova;
-		cmd_mem.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
-			true : false;
-		cmd_mem.is_master = (flags & DSI_CTRL_CMD_BROADCAST_MASTER) ?
-			true : false;
-		cmd_mem.use_lpm = (msg->flags & MIPI_DSI_MSG_USE_LPM) ?
-			true : false;
-		cmd_mem.datatype = msg->type;
-		cmd_mem.length = msg->tx_len;
-
-		dsi_ctrl->cmd_len = msg->tx_len;
-		memcpy(dsi_ctrl->vaddr, msg->tx_buf, msg->tx_len);
-		pr_debug(" non-embedded mode , size of command =%zd\n",
-					msg->tx_len);
-
-		goto kickoff;
-	}
-
-	rc = mipi_dsi_create_packet(&packet, msg);
-	if (rc) {
-		pr_err("Failed to create message packet, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
-			&packet,
-			&buffer,
-			&length);
-	if (rc) {
-		pr_err("[%s] failed to copy message, rc=%d\n",
-				dsi_ctrl->name, rc);
-		goto error;
-	}
-
-	if ((msg->flags & MIPI_DSI_MSG_LASTCOMMAND))
-		buffer[3] |= BIT(7);//set the last cmd bit in header.
-
-	if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
-		/* Embedded mode config is selected */
-		cmd_mem.offset = dsi_ctrl->cmd_buffer_iova;
-		cmd_mem.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
-			true : false;
-		cmd_mem.is_master = (flags & DSI_CTRL_CMD_BROADCAST_MASTER) ?
-			true : false;
-		cmd_mem.use_lpm = (msg->flags & MIPI_DSI_MSG_USE_LPM) ?
-			true : false;
-
-		cmdbuf = (u8 *)(dsi_ctrl->vaddr);
-
-		msm_gem_sync(dsi_ctrl->tx_cmd_buf);
-		for (cnt = 0; cnt < length; cnt++)
-			cmdbuf[dsi_ctrl->cmd_len + cnt] = buffer[cnt];
-
-		dsi_ctrl->cmd_len += length;
-
-		if (!(msg->flags & MIPI_DSI_MSG_LASTCOMMAND)) {
-			goto error;
-		} else {
-			cmd_mem.length = dsi_ctrl->cmd_len;
-			dsi_ctrl->cmd_len = 0;
-		}
-
-	} else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
-		cmd.command =  (u32 *)buffer;
-		cmd.size = length;
-		cmd.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
-				     true : false;
-		cmd.is_master = (flags & DSI_CTRL_CMD_BROADCAST_MASTER) ?
-				  true : false;
-		cmd.use_lpm = (msg->flags & MIPI_DSI_MSG_USE_LPM) ?
-				  true : false;
-	}
-
-kickoff:
-	dsi_kickoff_msg_tx(dsi_ctrl, msg, &cmd, &cmd_mem, flags);
-error:
-	if (buffer)
-		devm_kfree(&dsi_ctrl->pdev->dev, buffer);
-	return rc;
-}
-
-static int dsi_set_max_return_size(struct dsi_ctrl *dsi_ctrl,
-				   const struct mipi_dsi_msg *rx_msg,
-				   u32 size)
-{
-	int rc = 0;
-	u8 tx[2] = { (u8)(size & 0xFF), (u8)(size >> 8) };
-	u32 flags = DSI_CTRL_CMD_FETCH_MEMORY;
-	struct mipi_dsi_msg msg = {
-		.channel = rx_msg->channel,
-		.type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
-		.tx_len = 2,
-		.tx_buf = tx,
-		.flags = rx_msg->flags,
-	};
-
-	rc = dsi_message_tx(dsi_ctrl, &msg, flags);
-	if (rc)
-		pr_err("failed to send max return size packet, rc=%d\n", rc);
-
-	return rc;
-}
-
-/* Helper functions to support DCS read operation */
-static int dsi_parse_short_read1_resp(const struct mipi_dsi_msg *msg,
-		unsigned char *buff)
-{
-	u8 *data = msg->rx_buf;
-	int read_len = 1;
-
-	if (!data)
-		return 0;
-
-	/* remove dcs type */
-	if (msg->rx_len >= 1)
-		data[0] = buff[1];
-	else
-		read_len = 0;
-
-	return read_len;
-}
-
-static int dsi_parse_short_read2_resp(const struct mipi_dsi_msg *msg,
-		unsigned char *buff)
-{
-	u8 *data = msg->rx_buf;
-	int read_len = 2;
-
-	if (!data)
-		return 0;
-
-	/* remove dcs type */
-	if (msg->rx_len >= 2) {
-		data[0] = buff[1];
-		data[1] = buff[2];
-	} else {
-		read_len = 0;
-	}
-
-	return read_len;
-}
-
-static int dsi_parse_long_read_resp(const struct mipi_dsi_msg *msg,
-		unsigned char *buff)
-{
-	if (!msg->rx_buf)
-		return 0;
-
-	/* remove dcs type */
-	if (msg->rx_buf && msg->rx_len)
-		memcpy(msg->rx_buf, buff + 4, msg->rx_len);
-
-	return msg->rx_len;
-}
-
-static int dsi_message_rx(struct dsi_ctrl *dsi_ctrl,
-			  const struct mipi_dsi_msg *msg,
-			  u32 flags)
-{
-	int rc = 0;
-	u32 rd_pkt_size, total_read_len, hw_read_cnt;
-	u32 current_read_len = 0, total_bytes_read = 0;
-	bool short_resp = false;
-	bool read_done = false;
-	u32 dlen, diff, rlen;
-	unsigned char *buff;
-	char cmd;
-
-	if (!msg) {
-		pr_err("Invalid msg\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	rlen = msg->rx_len;
-	if (msg->rx_len <= 2) {
-		short_resp = true;
-		rd_pkt_size = msg->rx_len;
-		total_read_len = 4;
-	} else {
-		short_resp = false;
-		current_read_len = 10;
-		if (msg->rx_len < current_read_len)
-			rd_pkt_size = msg->rx_len;
-		else
-			rd_pkt_size = current_read_len;
-
-		total_read_len = current_read_len + 6;
-	}
-	buff = msg->rx_buf;
-
-	while (!read_done) {
-		rc = dsi_set_max_return_size(dsi_ctrl, msg, rd_pkt_size);
-		if (rc) {
-			pr_err("Failed to set max return packet size, rc=%d\n",
-			       rc);
-			goto error;
-		}
-
-		/* clear RDBK_DATA registers before proceeding */
-		dsi_ctrl->hw.ops.clear_rdbk_register(&dsi_ctrl->hw);
-
-		rc = dsi_message_tx(dsi_ctrl, msg, flags);
-		if (rc) {
-			pr_err("Message transmission failed, rc=%d\n", rc);
-			goto error;
-		}
-		/*
-		 * wait before reading rdbk_data register, if any delay is
-		 * required after sending the read command.
-		 */
-		if (msg->wait_ms)
-			usleep_range(msg->wait_ms * 1000,
-				     ((msg->wait_ms * 1000) + 10));
-
-		dlen = dsi_ctrl->hw.ops.get_cmd_read_data(&dsi_ctrl->hw,
-					buff, total_bytes_read,
-					total_read_len, rd_pkt_size,
-					&hw_read_cnt);
-		if (!dlen)
-			goto error;
-
-		if (short_resp)
-			break;
-
-		if (rlen <= current_read_len) {
-			diff = current_read_len - rlen;
-			read_done = true;
-		} else {
-			diff = 0;
-			rlen -= current_read_len;
-		}
-
-		dlen -= 2; /* 2 bytes of CRC */
-		dlen -= diff;
-		buff += dlen;
-		total_bytes_read += dlen;
-		if (!read_done) {
-			current_read_len = 14; /* Not first read */
-			if (rlen < current_read_len)
-				rd_pkt_size += rlen;
-			else
-				rd_pkt_size += current_read_len;
-		}
-	}
-
-	if (hw_read_cnt < 16 && !short_resp)
-		buff = msg->rx_buf + (16 - hw_read_cnt);
-	else
-		buff = msg->rx_buf;
-
-	/* parse the data read from panel */
-	cmd = buff[0];
-	switch (cmd) {
-	case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
-		pr_err("Rx ACK_ERROR\n");
-		rc = 0;
-		break;
-	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
-	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
-		rc = dsi_parse_short_read1_resp(msg, buff);
-		break;
-	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
-	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
-		rc = dsi_parse_short_read2_resp(msg, buff);
-		break;
-	case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
-	case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
-		rc = dsi_parse_long_read_resp(msg, buff);
-		break;
-	default:
-		pr_warn("Invalid response\n");
-		rc = 0;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_enable_ulps(struct dsi_ctrl *dsi_ctrl)
-{
-	int rc = 0;
-	u32 lanes = 0;
-	u32 ulps_lanes;
-
-	lanes = dsi_ctrl->host_config.common_config.data_lanes;
-
-	rc = dsi_ctrl->hw.ops.wait_for_lane_idle(&dsi_ctrl->hw, lanes);
-	if (rc) {
-		pr_err("lanes not entering idle, skip ULPS\n");
-		return rc;
-	}
-
-	if (!dsi_ctrl->hw.ops.ulps_ops.ulps_request ||
-			!dsi_ctrl->hw.ops.ulps_ops.ulps_exit) {
-		pr_debug("DSI controller ULPS ops not present\n");
-		return 0;
-	}
-
-	lanes |= DSI_CLOCK_LANE;
-	dsi_ctrl->hw.ops.ulps_ops.ulps_request(&dsi_ctrl->hw, lanes);
-
-	ulps_lanes = dsi_ctrl->hw.ops.ulps_ops.get_lanes_in_ulps(&dsi_ctrl->hw);
-
-	if ((lanes & ulps_lanes) != lanes) {
-		pr_err("Failed to enter ULPS, request=0x%x, actual=0x%x\n",
-		       lanes, ulps_lanes);
-		rc = -EIO;
-	}
-
-	return rc;
-}
-
-static int dsi_disable_ulps(struct dsi_ctrl *dsi_ctrl)
-{
-	int rc = 0;
-	u32 ulps_lanes, lanes = 0;
-
-	dsi_ctrl->hw.ops.clear_phy0_ln_err(&dsi_ctrl->hw);
-
-	if (!dsi_ctrl->hw.ops.ulps_ops.ulps_request ||
-			!dsi_ctrl->hw.ops.ulps_ops.ulps_exit) {
-		pr_debug("DSI controller ULPS ops not present\n");
-		return 0;
-	}
-
-	lanes = dsi_ctrl->host_config.common_config.data_lanes;
-	lanes |= DSI_CLOCK_LANE;
-
-	ulps_lanes = dsi_ctrl->hw.ops.ulps_ops.get_lanes_in_ulps(&dsi_ctrl->hw);
-
-	if ((lanes & ulps_lanes) != lanes)
-		pr_err("Mismatch between lanes in ULPS\n");
-
-	lanes &= ulps_lanes;
-
-	dsi_ctrl->hw.ops.ulps_ops.ulps_exit(&dsi_ctrl->hw, lanes);
-
-	ulps_lanes = dsi_ctrl->hw.ops.ulps_ops.get_lanes_in_ulps(&dsi_ctrl->hw);
-	if (ulps_lanes & lanes) {
-		pr_err("Lanes (0x%x) stuck in ULPS\n", ulps_lanes);
-		rc = -EIO;
-	}
-
-	return rc;
-}
-
-static int dsi_ctrl_drv_state_init(struct dsi_ctrl *dsi_ctrl)
-{
-	int rc = 0;
-	bool splash_enabled = false;
-	struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
-
-	if (!splash_enabled) {
-		state->power_state = DSI_CTRL_POWER_VREG_OFF;
-		state->cmd_engine_state = DSI_CTRL_ENGINE_OFF;
-		state->vid_engine_state = DSI_CTRL_ENGINE_OFF;
-	}
-
-	return rc;
-}
-
-static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
-{
-	struct msm_gem_address_space *aspace = NULL;
-
-	if (dsi_ctrl->tx_cmd_buf) {
-		aspace = dsi_ctrl_get_aspace(dsi_ctrl,
-				MSM_SMMU_DOMAIN_UNSECURE);
-		if (!aspace) {
-			pr_err("failed to get address space\n");
-			return -ENOMEM;
-		}
-
-		msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, aspace);
-
-		mutex_lock(&dsi_ctrl->drm_dev->struct_mutex);
-		msm_gem_free_object(dsi_ctrl->tx_cmd_buf);
-		mutex_unlock(&dsi_ctrl->drm_dev->struct_mutex);
-		dsi_ctrl->tx_cmd_buf = NULL;
-	}
-
-	return 0;
-}
-
-int dsi_ctrl_buffer_init(struct dsi_ctrl *dsi_ctrl)
-{
-	int rc = 0;
-	u64 iova = 0;
-	struct msm_gem_address_space *aspace = NULL;
-
-	aspace = dsi_ctrl_get_aspace(dsi_ctrl, MSM_SMMU_DOMAIN_UNSECURE);
-	if (!aspace) {
-		pr_err("failed to get address space\n");
-		return -ENOMEM;
-	}
-
-	dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev,
-					   SZ_4K,
-					   MSM_BO_UNCACHED);
-
-	if (IS_ERR(dsi_ctrl->tx_cmd_buf)) {
-		rc = PTR_ERR(dsi_ctrl->tx_cmd_buf);
-		pr_err("failed to allocate gem, rc=%d\n", rc);
-		dsi_ctrl->tx_cmd_buf = NULL;
-		goto error;
-	}
-
-	dsi_ctrl->cmd_buffer_size = SZ_4K;
-
-	rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, aspace, &iova);
-	if (rc) {
-		pr_err("failed to get iova, rc=%d\n", rc);
-		(void)dsi_ctrl_buffer_deinit(dsi_ctrl);
-		goto error;
-	}
-
-	if (iova & 0x07) {
-		pr_err("Tx command buffer is not 8 byte aligned\n");
-		rc = -ENOTSUPP;
-		(void)dsi_ctrl_buffer_deinit(dsi_ctrl);
-		goto error;
-	}
-error:
-	return rc;
-}
-
-static int dsi_enable_io_clamp(struct dsi_ctrl *dsi_ctrl,
-		bool enable, bool ulps_enabled)
-{
-	u32 lanes = 0;
-
-	if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE)
-		lanes = dsi_ctrl->host_config.common_config.data_lanes;
-
-	lanes |= DSI_CLOCK_LANE;
-
-	if (enable)
-		dsi_ctrl->hw.ops.clamp_enable(&dsi_ctrl->hw,
-			lanes, ulps_enabled);
-	else
-		dsi_ctrl->hw.ops.clamp_disable(&dsi_ctrl->hw,
-			lanes, ulps_enabled);
-
-	return 0;
-}
-
-static int dsi_ctrl_dts_parse(struct dsi_ctrl *dsi_ctrl,
-				  struct device_node *of_node)
-{
-	u32 index = 0;
-	int rc = 0;
-
-	if (!dsi_ctrl || !of_node) {
-		pr_err("invalid dsi_ctrl:%d or of_node:%d\n",
-					dsi_ctrl != NULL, of_node != NULL);
-		return -EINVAL;
-	}
-
-	rc = of_property_read_u32(of_node, "cell-index", &index);
-	if (rc) {
-		pr_debug("cell index not set, default to 0\n");
-		index = 0;
-	}
-
-	dsi_ctrl->cell_index = index;
-	dsi_ctrl->name = of_get_property(of_node, "label", NULL);
-	if (!dsi_ctrl->name)
-		dsi_ctrl->name = DSI_CTRL_DEFAULT_LABEL;
-
-	dsi_ctrl->phy_isolation_enabled = of_property_read_bool(of_node,
-				    "qcom,dsi-phy-isolation-enabled");
-
-	dsi_ctrl->null_insertion_enabled = of_property_read_bool(of_node,
-					"qcom,null-insertion-enabled");
-
-	return 0;
-}
-
-static int dsi_ctrl_dev_probe(struct platform_device *pdev)
-{
-	struct dsi_ctrl *dsi_ctrl;
-	struct dsi_ctrl_list_item *item;
-	const struct of_device_id *id;
-	enum dsi_ctrl_version version;
-	int rc = 0;
-
-	id = of_match_node(msm_dsi_of_match, pdev->dev.of_node);
-	if (!id)
-		return -ENODEV;
-
-	version = *(enum dsi_ctrl_version *)id->data;
-
-	item = devm_kzalloc(&pdev->dev, sizeof(*item), GFP_KERNEL);
-	if (!item)
-		return -ENOMEM;
-
-	dsi_ctrl = devm_kzalloc(&pdev->dev, sizeof(*dsi_ctrl), GFP_KERNEL);
-	if (!dsi_ctrl)
-		return -ENOMEM;
-
-	dsi_ctrl->version = version;
-	dsi_ctrl->irq_info.irq_num = -1;
-	dsi_ctrl->irq_info.irq_stat_mask = 0x0;
-
-	spin_lock_init(&dsi_ctrl->irq_info.irq_lock);
-
-	rc = dsi_ctrl_dts_parse(dsi_ctrl, pdev->dev.of_node);
-	if (rc) {
-		pr_err("ctrl:%d dts parse failed, rc = %d\n",
-						dsi_ctrl->cell_index, rc);
-		goto fail;
-	}
-
-	rc = dsi_ctrl_init_regmap(pdev, dsi_ctrl);
-	if (rc) {
-		pr_err("Failed to parse register information, rc = %d\n", rc);
-		goto fail;
-	}
-
-	rc = dsi_ctrl_clocks_init(pdev, dsi_ctrl);
-	if (rc) {
-		pr_err("Failed to parse clock information, rc = %d\n", rc);
-		goto fail;
-	}
-
-	rc = dsi_ctrl_supplies_init(pdev, dsi_ctrl);
-	if (rc) {
-		pr_err("Failed to parse voltage supplies, rc = %d\n", rc);
-		goto fail_clks;
-	}
-
-	rc = dsi_catalog_ctrl_setup(&dsi_ctrl->hw, dsi_ctrl->version,
-		dsi_ctrl->cell_index, dsi_ctrl->phy_isolation_enabled,
-		dsi_ctrl->null_insertion_enabled);
-	if (rc) {
-		pr_err("Catalog does not support version (%d)\n",
-		       dsi_ctrl->version);
-		goto fail_supplies;
-	}
-
-	rc = dsi_ctrl_axi_bus_client_init(pdev, dsi_ctrl);
-	if (rc)
-		pr_debug("failed to init axi bus client, rc = %d\n", rc);
-
-	item->ctrl = dsi_ctrl;
-
-	mutex_lock(&dsi_ctrl_list_lock);
-	list_add(&item->list, &dsi_ctrl_list);
-	mutex_unlock(&dsi_ctrl_list_lock);
-
-	mutex_init(&dsi_ctrl->ctrl_lock);
-	dsi_ctrl->secure_mode = false;
-
-	dsi_ctrl->pdev = pdev;
-	platform_set_drvdata(pdev, dsi_ctrl);
-	pr_info("Probe successful for %s\n", dsi_ctrl->name);
-
-	return 0;
-
-fail_supplies:
-	(void)dsi_ctrl_supplies_deinit(dsi_ctrl);
-fail_clks:
-	(void)dsi_ctrl_clocks_deinit(dsi_ctrl);
-fail:
-	return rc;
-}
-
-static int dsi_ctrl_dev_remove(struct platform_device *pdev)
-{
-	int rc = 0;
-	struct dsi_ctrl *dsi_ctrl;
-	struct list_head *pos, *tmp;
-
-	dsi_ctrl = platform_get_drvdata(pdev);
-
-	mutex_lock(&dsi_ctrl_list_lock);
-	list_for_each_safe(pos, tmp, &dsi_ctrl_list) {
-		struct dsi_ctrl_list_item *n = list_entry(pos,
-						  struct dsi_ctrl_list_item,
-						  list);
-		if (n->ctrl == dsi_ctrl) {
-			list_del(&n->list);
-			break;
-		}
-	}
-	mutex_unlock(&dsi_ctrl_list_lock);
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	rc = dsi_ctrl_axi_bus_client_deinit(dsi_ctrl);
-	if (rc)
-		pr_err("failed to deinitialize axi bus client, rc = %d\n", rc);
-
-	rc = dsi_ctrl_supplies_deinit(dsi_ctrl);
-	if (rc)
-		pr_err("failed to deinitialize voltage supplies, rc=%d\n", rc);
-
-	rc = dsi_ctrl_clocks_deinit(dsi_ctrl);
-	if (rc)
-		pr_err("failed to deinitialize clocks, rc=%d\n", rc);
-
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-
-	mutex_destroy(&dsi_ctrl->ctrl_lock);
-	devm_kfree(&pdev->dev, dsi_ctrl);
-
-	platform_set_drvdata(pdev, NULL);
-	return 0;
-}
-
-static struct platform_driver dsi_ctrl_driver = {
-	.probe = dsi_ctrl_dev_probe,
-	.remove = dsi_ctrl_dev_remove,
-	.driver = {
-		.name = "drm_dsi_ctrl",
-		.of_match_table = msm_dsi_of_match,
-		.suppress_bind_attrs = true,
-	},
-};
-
-#if defined(CONFIG_DEBUG_FS)
-
-void dsi_ctrl_debug_dump(u32 *entries, u32 size)
-{
-	struct list_head *pos, *tmp;
-	struct dsi_ctrl *ctrl = NULL;
-
-	if (!entries || !size)
-		return;
-
-	mutex_lock(&dsi_ctrl_list_lock);
-	list_for_each_safe(pos, tmp, &dsi_ctrl_list) {
-		struct dsi_ctrl_list_item *n;
-
-		n = list_entry(pos, struct dsi_ctrl_list_item, list);
-		ctrl = n->ctrl;
-		pr_err("dsi ctrl:%d\n", ctrl->cell_index);
-		ctrl->hw.ops.debug_bus(&ctrl->hw, entries, size);
-	}
-	mutex_unlock(&dsi_ctrl_list_lock);
-}
-
-#endif
-/**
- * dsi_ctrl_get() - get a dsi_ctrl handle from an of_node
- * @of_node:    of_node of the DSI controller.
- *
- * Gets the DSI controller handle for the corresponding of_node. The ref count
- * is incremented to one and all subsequent gets will fail until the original
- * clients calls a put.
- *
- * Return: DSI Controller handle.
- */
-struct dsi_ctrl *dsi_ctrl_get(struct device_node *of_node)
-{
-	struct list_head *pos, *tmp;
-	struct dsi_ctrl *ctrl = NULL;
-
-	mutex_lock(&dsi_ctrl_list_lock);
-	list_for_each_safe(pos, tmp, &dsi_ctrl_list) {
-		struct dsi_ctrl_list_item *n;
-
-		n = list_entry(pos, struct dsi_ctrl_list_item, list);
-		if (n->ctrl->pdev->dev.of_node == of_node) {
-			ctrl = n->ctrl;
-			break;
-		}
-	}
-	mutex_unlock(&dsi_ctrl_list_lock);
-
-	if (!ctrl) {
-		pr_err("Device with of node not found\n");
-		ctrl = ERR_PTR(-EPROBE_DEFER);
-		return ctrl;
-	}
-
-	mutex_lock(&ctrl->ctrl_lock);
-	if (ctrl->refcount == 1) {
-		pr_err("[%s] Device in use\n", ctrl->name);
-		mutex_unlock(&ctrl->ctrl_lock);
-		ctrl = ERR_PTR(-EBUSY);
-		return ctrl;
-	}
-
-	ctrl->refcount++;
-	mutex_unlock(&ctrl->ctrl_lock);
-	return ctrl;
-}
-
-/**
- * dsi_ctrl_put() - releases a dsi controller handle.
- * @dsi_ctrl:       DSI controller handle.
- *
- * Releases the DSI controller. Driver will clean up all resources and puts back
- * the DSI controller into reset state.
- */
-void dsi_ctrl_put(struct dsi_ctrl *dsi_ctrl)
-{
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	if (dsi_ctrl->refcount == 0)
-		pr_err("Unbalanced %s call\n", __func__);
-	else
-		dsi_ctrl->refcount--;
-
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-}
-
-/**
- * dsi_ctrl_drv_init() - initialize dsi controller driver.
- * @dsi_ctrl:      DSI controller handle.
- * @parent:        Parent directory for debug fs.
- *
- * Initializes DSI controller driver. Driver should be initialized after
- * dsi_ctrl_get() succeeds.
- *
- * Return: error code.
- */
-int dsi_ctrl_drv_init(struct dsi_ctrl *dsi_ctrl, struct dentry *parent)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl || !parent) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	rc = dsi_ctrl_drv_state_init(dsi_ctrl);
-	if (rc) {
-		pr_err("Failed to initialize driver state, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = dsi_ctrl_debugfs_init(dsi_ctrl, parent);
-	if (rc) {
-		pr_err("[DSI_%d] failed to init debug fs, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto error;
-	}
-
-error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_drv_deinit() - de-initializes dsi controller driver
- * @dsi_ctrl:      DSI controller handle.
- *
- * Releases all resources acquired by dsi_ctrl_drv_init().
- *
- * Return: error code.
- */
-int dsi_ctrl_drv_deinit(struct dsi_ctrl *dsi_ctrl)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	rc = dsi_ctrl_debugfs_deinit(dsi_ctrl);
-	if (rc)
-		pr_err("failed to release debugfs root, rc=%d\n", rc);
-
-	rc = dsi_ctrl_buffer_deinit(dsi_ctrl);
-	if (rc)
-		pr_err("Failed to free cmd buffers, rc=%d\n", rc);
-
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-int dsi_ctrl_clk_cb_register(struct dsi_ctrl *dsi_ctrl,
-	struct clk_ctrl_cb *clk_cb)
-{
-	if (!dsi_ctrl || !clk_cb) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	dsi_ctrl->clk_cb.priv = clk_cb->priv;
-	dsi_ctrl->clk_cb.dsi_clk_cb = clk_cb->dsi_clk_cb;
-	return 0;
-}
-
-/**
- * dsi_ctrl_phy_sw_reset() - perform a PHY software reset
- * @dsi_ctrl:         DSI controller handle.
- *
- * Performs a PHY software reset on the DSI controller. Reset should be done
- * when the controller power state is DSI_CTRL_POWER_CORE_CLK_ON and the PHY is
- * not enabled.
- *
- * This function will fail if driver is in any other state.
- *
- * Return: error code.
- */
-int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_PHY_SW_RESET, 0x0);
-	if (rc) {
-		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto error;
-	}
-
-	dsi_ctrl->hw.ops.phy_sw_reset(&dsi_ctrl->hw);
-
-	pr_debug("[DSI_%d] PHY soft reset done\n", dsi_ctrl->cell_index);
-	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_PHY_SW_RESET, 0x0);
-error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_seamless_timing_update() - update only controller timing
- * @dsi_ctrl:          DSI controller handle.
- * @timing:            New DSI timing info
- *
- * Updates host timing values to conduct a seamless transition to new timing
- * For example, to update the porch values in a dynamic fps switch.
- *
- * Return: error code.
- */
-int dsi_ctrl_async_timing_update(struct dsi_ctrl *dsi_ctrl,
-		struct dsi_mode_info *timing)
-{
-	struct dsi_mode_info *host_mode;
-	int rc = 0;
-
-	if (!dsi_ctrl || !timing) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_ASYNC_TIMING,
-			DSI_CTRL_ENGINE_ON);
-	if (rc) {
-		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto exit;
-	}
-
-	host_mode = &dsi_ctrl->host_config.video_timing;
-	memcpy(host_mode, timing, sizeof(*host_mode));
-	dsi_ctrl->hw.ops.set_timing_db(&dsi_ctrl->hw, true);
-	dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw, host_mode);
-
-exit:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_timing_db_update() - update only controller Timing DB
- * @dsi_ctrl:          DSI controller handle.
- * @enable:            Enable/disable Timing DB register
- *
- *  Update timing db register value during dfps usecases
- *
- * Return: error code.
- */
-int dsi_ctrl_timing_db_update(struct dsi_ctrl *dsi_ctrl,
-		bool enable)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl) {
-		pr_err("Invalid dsi_ctrl\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_ASYNC_TIMING,
-			DSI_CTRL_ENGINE_ON);
-	if (rc) {
-		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto exit;
-	}
-
-	/*
-	 * Add HW recommended delay for dfps feature.
-	 * When prefetch is enabled, MDSS HW works on 2 vsync
-	 * boundaries i.e. mdp_vsync and panel_vsync.
-	 * In the current implementation we are only waiting
-	 * for mdp_vsync. We need to make sure that interface
-	 * flush is after panel_vsync. So, added the recommended
-	 * delays after dfps update.
-	 */
-	usleep_range(2000, 2010);
-
-	dsi_ctrl->hw.ops.set_timing_db(&dsi_ctrl->hw, enable);
-
-exit:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
-					&dsi_ctrl->host_config.lane_map);
-
-	dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
-				    &dsi_ctrl->host_config.common_config);
-
-	if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
-		dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
-					&dsi_ctrl->host_config.common_config,
-					&dsi_ctrl->host_config.u.cmd_engine);
-
-		dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
-				&dsi_ctrl->host_config.video_timing,
-				dsi_ctrl->host_config.video_timing.h_active * 3,
-				0x0,
-				&dsi_ctrl->roi);
-		dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, true);
-	} else {
-		dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
-					&dsi_ctrl->host_config.common_config,
-					&dsi_ctrl->host_config.u.video_engine);
-		dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw,
-					  &dsi_ctrl->host_config.video_timing);
-		dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, true);
-	}
-
-	dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
-	dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0xFF00E0);
-	dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, true);
-
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-int dsi_ctrl_set_roi(struct dsi_ctrl *dsi_ctrl, struct dsi_rect *roi,
-		bool *changed)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl || !roi || !changed) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	if ((!dsi_rect_is_equal(&dsi_ctrl->roi, roi)) ||
-			dsi_ctrl->modeupdated) {
-		*changed = true;
-		memcpy(&dsi_ctrl->roi, roi, sizeof(dsi_ctrl->roi));
-		dsi_ctrl->modeupdated = false;
-	} else
-		*changed = false;
-
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_config_clk_gating() - Enable/disable DSI PHY clk gating.
- * @dsi_ctrl:                     DSI controller handle.
- * @enable:                       Enable/disable DSI PHY clk gating
- * @clk_selection:                clock to enable/disable clock gating
- *
- * Return: error code.
- */
-int dsi_ctrl_config_clk_gating(struct dsi_ctrl *dsi_ctrl, bool enable,
-			enum dsi_clk_gate_type clk_selection)
-{
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (dsi_ctrl->hw.ops.config_clk_gating)
-		dsi_ctrl->hw.ops.config_clk_gating(&dsi_ctrl->hw, enable,
-				clk_selection);
-
-	return 0;
-}
-
-/**
- * dsi_ctrl_phy_reset_config() - Mask/unmask propagation of ahb reset signal
- *	to DSI PHY hardware.
- * @dsi_ctrl:        DSI controller handle.
- * @enable:			Mask/unmask the PHY reset signal.
- *
- * Return: error code.
- */
-int dsi_ctrl_phy_reset_config(struct dsi_ctrl *dsi_ctrl, bool enable)
-{
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (dsi_ctrl->hw.ops.phy_reset_config)
-		dsi_ctrl->hw.ops.phy_reset_config(&dsi_ctrl->hw, enable);
-
-	return 0;
-}
-
-static bool dsi_ctrl_check_for_spurious_error_interrupts(
-					struct dsi_ctrl *dsi_ctrl)
-{
-	const unsigned long intr_check_interval = msecs_to_jiffies(1000);
-	const unsigned int interrupt_threshold = 15;
-	unsigned long jiffies_now = jiffies;
-
-	if (!dsi_ctrl) {
-		pr_err("Invalid DSI controller structure\n");
-		return false;
-	}
-
-	if (dsi_ctrl->jiffies_start == 0)
-		dsi_ctrl->jiffies_start = jiffies;
-
-	dsi_ctrl->error_interrupt_count++;
-
-	if ((jiffies_now - dsi_ctrl->jiffies_start) < intr_check_interval) {
-		if (dsi_ctrl->error_interrupt_count > interrupt_threshold) {
-			pr_warn("Detected spurious interrupts on dsi ctrl\n");
-			return true;
-		}
-	} else {
-		dsi_ctrl->jiffies_start = jiffies;
-		dsi_ctrl->error_interrupt_count = 1;
-	}
-	return false;
-}
-
-static void dsi_ctrl_handle_error_status(struct dsi_ctrl *dsi_ctrl,
-				unsigned long error)
-{
-	struct dsi_event_cb_info cb_info;
-
-	cb_info = dsi_ctrl->irq_info.irq_err_cb;
-
-	/* disable error interrupts */
-	if (dsi_ctrl->hw.ops.error_intr_ctrl)
-		dsi_ctrl->hw.ops.error_intr_ctrl(&dsi_ctrl->hw, false);
-
-	/* clear error interrupts first */
-	if (dsi_ctrl->hw.ops.clear_error_status)
-		dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
-					error);
-
-	/* DTLN PHY error */
-	if (error & 0x3000E00)
-		pr_err("dsi PHY contention error: 0x%lx\n", error);
-
-	/* TX timeout error */
-	if (error & 0xE0) {
-		if (error & 0xA0) {
-			if (cb_info.event_cb) {
-				cb_info.event_idx = DSI_LP_Rx_TIMEOUT;
-				(void)cb_info.event_cb(cb_info.event_usr_ptr,
-							cb_info.event_idx,
-							dsi_ctrl->cell_index,
-							0, 0, 0, 0);
-			}
-		}
-		pr_err("tx timeout error: 0x%lx\n", error);
-	}
-
-	/* DSI FIFO OVERFLOW error */
-	if (error & 0xF0000) {
-		u32 mask = 0;
-
-		if (dsi_ctrl->hw.ops.get_error_mask)
-			mask = dsi_ctrl->hw.ops.get_error_mask(&dsi_ctrl->hw);
-		/* no need to report FIFO overflow if already masked */
-		if (cb_info.event_cb && !(mask & 0xf0000)) {
-			cb_info.event_idx = DSI_FIFO_OVERFLOW;
-			(void)cb_info.event_cb(cb_info.event_usr_ptr,
-						cb_info.event_idx,
-						dsi_ctrl->cell_index,
-						0, 0, 0, 0);
-			pr_err("dsi FIFO OVERFLOW error: 0x%lx\n", error);
-		}
-	}
-
-	/* DSI FIFO UNDERFLOW error */
-	if (error & 0xF00000) {
-		if (cb_info.event_cb) {
-			cb_info.event_idx = DSI_FIFO_UNDERFLOW;
-			(void)cb_info.event_cb(cb_info.event_usr_ptr,
-						cb_info.event_idx,
-						dsi_ctrl->cell_index,
-						0, 0, 0, 0);
-		}
-		pr_err("dsi FIFO UNDERFLOW error: 0x%lx\n", error);
-	}
-
-	/* DSI PLL UNLOCK error */
-	if (error & BIT(8))
-		pr_err("dsi PLL unlock error: 0x%lx\n", error);
-
-	/* ACK error */
-	if (error & 0xF)
-		pr_err("ack error: 0x%lx\n", error);
-
-	/*
-	 * DSI Phy can go into bad state during ESD influence. This can
-	 * manifest as various types of spurious error interrupts on
-	 * DSI controller. This check will allow us to handle afore mentioned
-	 * case and prevent us from re enabling interrupts until a full ESD
-	 * recovery is completed.
-	 */
-	if (dsi_ctrl_check_for_spurious_error_interrupts(dsi_ctrl) &&
-				dsi_ctrl->esd_check_underway) {
-		dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
-		return;
-	}
-
-	/* enable back DSI interrupts */
-	if (dsi_ctrl->hw.ops.error_intr_ctrl)
-		dsi_ctrl->hw.ops.error_intr_ctrl(&dsi_ctrl->hw, true);
-}
-
-/**
- * dsi_ctrl_isr - interrupt service routine for DSI CTRL component
- * @irq: Incoming IRQ number
- * @ptr: Pointer to user data structure (struct dsi_ctrl)
- * Returns: IRQ_HANDLED if no further action required
- */
-static irqreturn_t dsi_ctrl_isr(int irq, void *ptr)
-{
-	struct dsi_ctrl *dsi_ctrl;
-	struct dsi_event_cb_info cb_info;
-	unsigned long flags;
-	uint32_t status = 0x0, i;
-	uint64_t errors = 0x0;
-
-	if (!ptr)
-		return IRQ_NONE;
-	dsi_ctrl = ptr;
-
-	/* check status interrupts */
-	if (dsi_ctrl->hw.ops.get_interrupt_status)
-		status = dsi_ctrl->hw.ops.get_interrupt_status(&dsi_ctrl->hw);
-
-	/* check error interrupts */
-	if (dsi_ctrl->hw.ops.get_error_status)
-		errors = dsi_ctrl->hw.ops.get_error_status(&dsi_ctrl->hw);
-
-	/* clear interrupts */
-	if (dsi_ctrl->hw.ops.clear_interrupt_status)
-		dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw, 0x0);
-
-	SDE_EVT32_IRQ(dsi_ctrl->cell_index, status, errors);
-
-	/* handle DSI error recovery */
-	if (status & DSI_ERROR)
-		dsi_ctrl_handle_error_status(dsi_ctrl, errors);
-
-	if (status & DSI_CMD_MODE_DMA_DONE) {
-		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-					DSI_SINT_CMD_MODE_DMA_DONE);
-		complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
-	}
-
-	if (status & DSI_CMD_FRAME_DONE) {
-		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-					DSI_SINT_CMD_FRAME_DONE);
-		complete_all(&dsi_ctrl->irq_info.cmd_frame_done);
-	}
-
-	if (status & DSI_VIDEO_MODE_FRAME_DONE) {
-		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-					DSI_SINT_VIDEO_MODE_FRAME_DONE);
-		complete_all(&dsi_ctrl->irq_info.vid_frame_done);
-	}
-
-	if (status & DSI_BTA_DONE) {
-		u32 fifo_overflow_mask = (DSI_DLN0_HS_FIFO_OVERFLOW |
-					DSI_DLN1_HS_FIFO_OVERFLOW |
-					DSI_DLN2_HS_FIFO_OVERFLOW |
-					DSI_DLN3_HS_FIFO_OVERFLOW);
-		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-					DSI_SINT_BTA_DONE);
-		complete_all(&dsi_ctrl->irq_info.bta_done);
-		if (dsi_ctrl->hw.ops.clear_error_status)
-			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
-					fifo_overflow_mask);
-	}
-
-	for (i = 0; status && i < DSI_STATUS_INTERRUPT_COUNT; ++i) {
-		if (status & 0x1) {
-			spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
-			cb_info = dsi_ctrl->irq_info.irq_stat_cb[i];
-			spin_unlock_irqrestore(
-					&dsi_ctrl->irq_info.irq_lock, flags);
-
-			if (cb_info.event_cb)
-				(void)cb_info.event_cb(cb_info.event_usr_ptr,
-						cb_info.event_idx,
-						dsi_ctrl->cell_index,
-						irq, 0, 0, 0);
-		}
-		status >>= 1;
-	}
-
-	return IRQ_HANDLED;
-}
-
-/**
- * _dsi_ctrl_setup_isr - register ISR handler
- * @dsi_ctrl: Pointer to associated dsi_ctrl structure
- * Returns: Zero on success
- */
-static int _dsi_ctrl_setup_isr(struct dsi_ctrl *dsi_ctrl)
-{
-	int irq_num, rc;
-
-	if (!dsi_ctrl)
-		return -EINVAL;
-	if (dsi_ctrl->irq_info.irq_num != -1)
-		return 0;
-
-	init_completion(&dsi_ctrl->irq_info.cmd_dma_done);
-	init_completion(&dsi_ctrl->irq_info.vid_frame_done);
-	init_completion(&dsi_ctrl->irq_info.cmd_frame_done);
-	init_completion(&dsi_ctrl->irq_info.bta_done);
-
-	irq_num = platform_get_irq(dsi_ctrl->pdev, 0);
-	if (irq_num < 0) {
-		pr_err("[DSI_%d] Failed to get IRQ number, %d\n",
-				dsi_ctrl->cell_index, irq_num);
-		rc = irq_num;
-	} else {
-		rc = devm_request_threaded_irq(&dsi_ctrl->pdev->dev, irq_num,
-				dsi_ctrl_isr, NULL, 0, "dsi_ctrl", dsi_ctrl);
-		if (rc) {
-			pr_err("[DSI_%d] Failed to request IRQ, %d\n",
-					dsi_ctrl->cell_index, rc);
-		} else {
-			dsi_ctrl->irq_info.irq_num = irq_num;
-			disable_irq_nosync(irq_num);
-
-			pr_info("[DSI_%d] IRQ %d registered\n",
-					dsi_ctrl->cell_index, irq_num);
-		}
-	}
-	return rc;
-}
-
-/**
- * _dsi_ctrl_destroy_isr - unregister ISR handler
- * @dsi_ctrl: Pointer to associated dsi_ctrl structure
- */
-static void _dsi_ctrl_destroy_isr(struct dsi_ctrl *dsi_ctrl)
-{
-	if (!dsi_ctrl || !dsi_ctrl->pdev || dsi_ctrl->irq_info.irq_num < 0)
-		return;
-
-	if (dsi_ctrl->irq_info.irq_num != -1) {
-		devm_free_irq(&dsi_ctrl->pdev->dev,
-				dsi_ctrl->irq_info.irq_num, dsi_ctrl);
-		dsi_ctrl->irq_info.irq_num = -1;
-	}
-}
-
-void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
-		uint32_t intr_idx, struct dsi_event_cb_info *event_info)
-{
-	unsigned long flags;
-
-	if (!dsi_ctrl || dsi_ctrl->irq_info.irq_num == -1 ||
-			intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
-		return;
-
-	spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
-
-	if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx] == 0) {
-		/* enable irq on first request */
-		if (dsi_ctrl->irq_info.irq_stat_mask == 0)
-			enable_irq(dsi_ctrl->irq_info.irq_num);
-
-		/* update hardware mask */
-		dsi_ctrl->irq_info.irq_stat_mask |= BIT(intr_idx);
-		dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw,
-				dsi_ctrl->irq_info.irq_stat_mask);
-	}
-	++(dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]);
-
-	if (event_info)
-		dsi_ctrl->irq_info.irq_stat_cb[intr_idx] = *event_info;
-
-	spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags);
-}
-
-void dsi_ctrl_disable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
-		uint32_t intr_idx)
-{
-	unsigned long flags;
-
-	if (!dsi_ctrl || dsi_ctrl->irq_info.irq_num == -1 ||
-			intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
-		return;
-
-	spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
-
-	if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx])
-		if (--(dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]) == 0) {
-			dsi_ctrl->irq_info.irq_stat_mask &= ~BIT(intr_idx);
-			dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw,
-					dsi_ctrl->irq_info.irq_stat_mask);
-
-			/* don't need irq if no lines are enabled */
-			if (dsi_ctrl->irq_info.irq_stat_mask == 0)
-				disable_irq_nosync(dsi_ctrl->irq_info.irq_num);
-		}
-
-	spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags);
-}
-
-int dsi_ctrl_host_timing_update(struct dsi_ctrl *dsi_ctrl)
-{
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (dsi_ctrl->hw.ops.host_setup)
-		dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
-				&dsi_ctrl->host_config.common_config);
-
-	if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
-		if (dsi_ctrl->hw.ops.cmd_engine_setup)
-			dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
-					&dsi_ctrl->host_config.common_config,
-					&dsi_ctrl->host_config.u.cmd_engine);
-
-		if (dsi_ctrl->hw.ops.setup_cmd_stream)
-			dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
-				&dsi_ctrl->host_config.video_timing,
-				dsi_ctrl->host_config.video_timing.h_active * 3,
-				0x0, NULL);
-	} else {
-		pr_err("invalid panel mode for resolution switch\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/**
- * dsi_ctrl_update_host_init_state() - Update the host initialization state.
- * @dsi_ctrl:        DSI controller handle.
- * @enable:        boolean signifying host state.
- *
- * Update the host initialization status only while exiting from ulps during
- * suspend state.
- *
- * Return: error code.
- */
-int dsi_ctrl_update_host_init_state(struct dsi_ctrl *dsi_ctrl, bool enable)
-{
-	int rc = 0;
-	u32 state = enable ? 0x1 : 0x0;
-
-	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, state);
-	if (rc) {
-		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		return rc;
-	}
-	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, state);
-	return rc;
-}
-
-/**
- * dsi_ctrl_host_init() - Initialize DSI host hardware.
- * @dsi_ctrl:        DSI controller handle.
- * @is_splash_enabled:        boolean signifying splash status.
- *
- * Initializes DSI controller hardware with host configuration provided by
- * dsi_ctrl_update_host_config(). Initialization can be performed only during
- * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
- * performed.
- *
- * Return: error code.
- */
-int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool is_splash_enabled)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
-	if (rc) {
-		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto error;
-	}
-
-	/* For Splash usecases we omit hw operations as bootloader
-	 * already takes care of them
-	 */
-	if (!is_splash_enabled) {
-		dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
-					&dsi_ctrl->host_config.lane_map);
-
-		dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
-				    &dsi_ctrl->host_config.common_config);
-
-		if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
-			dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
-					&dsi_ctrl->host_config.common_config,
-					&dsi_ctrl->host_config.u.cmd_engine);
-
-			dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
-				&dsi_ctrl->host_config.video_timing,
-				dsi_ctrl->host_config.video_timing.h_active * 3,
-				0x0,
-				NULL);
-		} else {
-			dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
-					&dsi_ctrl->host_config.common_config,
-					&dsi_ctrl->host_config.u.video_engine);
-			dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw,
-					  &dsi_ctrl->host_config.video_timing);
-		}
-	}
-
-	dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
-	dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0xFF00E0);
-
-	pr_debug("[DSI_%d]Host initialization complete, continuous splash status:%d\n",
-		dsi_ctrl->cell_index, is_splash_enabled);
-	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
-error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_isr_configure() - API to register/deregister dsi isr
- * @dsi_ctrl:              DSI controller handle.
- * @enable:		   variable to control register/deregister isr
- */
-void dsi_ctrl_isr_configure(struct dsi_ctrl *dsi_ctrl, bool enable)
-{
-	if (!dsi_ctrl)
-		return;
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	if (enable)
-		_dsi_ctrl_setup_isr(dsi_ctrl);
-	else
-		_dsi_ctrl_destroy_isr(dsi_ctrl);
-
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-}
-
-void dsi_ctrl_set_continuous_clk(struct dsi_ctrl *dsi_ctrl, bool enable)
-{
-	if (!dsi_ctrl)
-		return;
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	dsi_ctrl->hw.ops.set_continuous_clk(&dsi_ctrl->hw, enable);
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-}
-
-int dsi_ctrl_soft_reset(struct dsi_ctrl *dsi_ctrl)
-{
-	if (!dsi_ctrl)
-		return -EINVAL;
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-
-	pr_debug("[DSI_%d]Soft reset complete\n", dsi_ctrl->cell_index);
-	return 0;
-}
-
-int dsi_ctrl_reset(struct dsi_ctrl *dsi_ctrl, int mask)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl)
-		return -EINVAL;
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	rc = dsi_ctrl->hw.ops.ctrl_reset(&dsi_ctrl->hw, mask);
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-
-	return rc;
-}
-
-int dsi_ctrl_get_hw_version(struct dsi_ctrl *dsi_ctrl)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl)
-		return -EINVAL;
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	rc = dsi_ctrl->hw.ops.get_hw_version(&dsi_ctrl->hw);
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-
-	return rc;
-}
-
-int dsi_ctrl_vid_engine_en(struct dsi_ctrl *dsi_ctrl, bool on)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl)
-		return -EINVAL;
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, on);
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-
-	return rc;
-}
-
-int dsi_ctrl_setup_avr(struct dsi_ctrl *dsi_ctrl, bool enable)
-{
-	if (!dsi_ctrl)
-		return -EINVAL;
-
-	if (dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) {
-		mutex_lock(&dsi_ctrl->ctrl_lock);
-		dsi_ctrl->hw.ops.setup_avr(&dsi_ctrl->hw, enable);
-		mutex_unlock(&dsi_ctrl->ctrl_lock);
-	}
-
-	return 0;
-}
-
-/**
- * dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
- * @dsi_ctrl:        DSI controller handle.
- *
- * De-initializes DSI controller hardware. It can be performed only during
- * DSI_CTRL_POWER_CORE_CLK_ON state after LINK clocks have been turned off.
- *
- * Return: error code.
- */
-int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x0);
-	if (rc) {
-		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		pr_err("driver state check failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	pr_debug("[DSI_%d] Host deinitization complete\n",
-		dsi_ctrl->cell_index);
-	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x0);
-error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_update_host_config() - update dsi host configuration
- * @dsi_ctrl:          DSI controller handle.
- * @config:            DSI host configuration.
- * @flags:             dsi_mode_flags modifying the behavior
- *
- * Updates driver with new Host configuration to use for host initialization.
- * This function call will only update the software context. The stored
- * configuration information will be used when the host is initialized.
- *
- * Return: error code.
- */
-int dsi_ctrl_update_host_config(struct dsi_ctrl *ctrl,
-				struct dsi_host_config *config,
-				int flags, void *clk_handle)
-{
-	int rc = 0;
-
-	if (!ctrl || !config) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&ctrl->ctrl_lock);
-
-	rc = dsi_ctrl_validate_panel_info(ctrl, config);
-	if (rc) {
-		pr_err("panel validation failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	if (!(flags & (DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR))) {
-		rc = dsi_ctrl_update_link_freqs(ctrl, config, clk_handle);
-		if (rc) {
-			pr_err("[%s] failed to update link frequencies, rc=%d\n",
-			       ctrl->name, rc);
-			goto error;
-		}
-	}
-
-	pr_debug("[DSI_%d]Host config updated\n", ctrl->cell_index);
-	memcpy(&ctrl->host_config, config, sizeof(ctrl->host_config));
-	ctrl->mode_bounds.x = ctrl->host_config.video_timing.h_active *
-			ctrl->horiz_index;
-	ctrl->mode_bounds.y = 0;
-	ctrl->mode_bounds.w = ctrl->host_config.video_timing.h_active;
-	ctrl->mode_bounds.h = ctrl->host_config.video_timing.v_active;
-	memcpy(&ctrl->roi, &ctrl->mode_bounds, sizeof(ctrl->mode_bounds));
-	ctrl->modeupdated = true;
-	ctrl->roi.x = 0;
-error:
-	mutex_unlock(&ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_validate_timing() - validate a video timing configuration
- * @dsi_ctrl:       DSI controller handle.
- * @timing:         Pointer to timing data.
- *
- * Driver will validate if the timing configuration is supported on the
- * controller hardware.
- *
- * Return: error code if timing is not supported.
- */
-int dsi_ctrl_validate_timing(struct dsi_ctrl *dsi_ctrl,
-			     struct dsi_mode_info *mode)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl || !mode) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	return rc;
-}
-
-/**
- * dsi_ctrl_cmd_transfer() - Transfer commands on DSI link
- * @dsi_ctrl:             DSI controller handle.
- * @msg:                  Message to transfer on DSI link.
- * @flags:                Modifiers for message transfer.
- *
- * Command transfer can be done only when command engine is enabled. The
- * transfer API will block until either the command transfer finishes or
- * the timeout value is reached. If the trigger is deferred, it will return
- * without triggering the transfer. Command parameters are programmed to
- * hardware.
- *
- * Return: error code.
- */
-int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl,
-			  const struct mipi_dsi_msg *msg,
-			  u32 flags)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl || !msg) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CMD_TX, 0x0);
-	if (rc) {
-		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto error;
-	}
-
-	if (flags & DSI_CTRL_CMD_READ) {
-		rc = dsi_message_rx(dsi_ctrl, msg, flags);
-		if (rc <= 0)
-			pr_err("read message failed read length, rc=%d\n", rc);
-	} else {
-		rc = dsi_message_tx(dsi_ctrl, msg, flags);
-		if (rc)
-			pr_err("command msg transfer failed, rc = %d\n", rc);
-	}
-
-	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_TX, 0x0);
-
-error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_cmd_tx_trigger() - Trigger a deferred command.
- * @dsi_ctrl:              DSI controller handle.
- * @flags:                 Modifiers.
- *
- * Return: error code.
- */
-int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
-{
-	int rc = 0, ret = 0;
-	u32 status = 0;
-	u32 mask = (DSI_CMD_MODE_DMA_DONE);
-
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	/* Dont trigger the command if this is not the last ocmmand */
-	if (!(flags & DSI_CTRL_CMD_LAST_COMMAND))
-		return rc;
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	if (!(flags & DSI_CTRL_CMD_BROADCAST_MASTER))
-		dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
-
-	if ((flags & DSI_CTRL_CMD_BROADCAST) &&
-		(flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
-		dsi_ctrl_wait_for_video_done(dsi_ctrl);
-		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
-					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
-		if (dsi_ctrl->hw.ops.mask_error_intr)
-			dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
-					BIT(DSI_FIFO_OVERFLOW), true);
-		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
-
-		/* trigger command */
-		dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
-
-		ret = wait_for_completion_timeout(
-				&dsi_ctrl->irq_info.cmd_dma_done,
-				msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
-
-		if (ret == 0) {
-			status = dsi_ctrl->hw.ops.get_interrupt_status(
-								&dsi_ctrl->hw);
-			if (status & mask) {
-				status |= (DSI_CMD_MODE_DMA_DONE |
-						DSI_BTA_DONE);
-				dsi_ctrl->hw.ops.clear_interrupt_status(
-								&dsi_ctrl->hw,
-								status);
-				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-						DSI_SINT_CMD_MODE_DMA_DONE);
-				complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
-				pr_warn("dma_tx done but irq not triggered\n");
-			} else {
-				rc = -ETIMEDOUT;
-				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-						DSI_SINT_CMD_MODE_DMA_DONE);
-				pr_err("[DSI_%d]Command transfer failed\n",
-						dsi_ctrl->cell_index);
-			}
-		}
-		if (dsi_ctrl->hw.ops.mask_error_intr &&
-				!dsi_ctrl->esd_check_underway)
-			dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
-					BIT(DSI_FIFO_OVERFLOW), false);
-
-		if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
-			dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
-			dsi_ctrl->cmd_len = 0;
-		}
-	}
-
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_cache_misr - Cache frame MISR value
- * @dsi_ctrl: Pointer to associated dsi_ctrl structure
- */
-void dsi_ctrl_cache_misr(struct dsi_ctrl *dsi_ctrl)
-{
-	u32 misr;
-
-	if (!dsi_ctrl || !dsi_ctrl->hw.ops.collect_misr)
-		return;
-
-	misr = dsi_ctrl->hw.ops.collect_misr(&dsi_ctrl->hw,
-				dsi_ctrl->host_config.panel_mode);
-
-	if (misr)
-		dsi_ctrl->misr_cache = misr;
-
-	pr_debug("DSI_%d misr_cache = %x\n", dsi_ctrl->cell_index,
-		dsi_ctrl->misr_cache);
-}
-
-/**
- * dsi_ctrl_get_host_engine_init_state() - Return host init state
- * @dsi_ctrl:          DSI controller handle.
- * @state:             Controller initialization state
- *
- * Return: error code.
- */
-int dsi_ctrl_get_host_engine_init_state(struct dsi_ctrl *dsi_ctrl,
-		bool *state)
-{
-	if (!dsi_ctrl || !state) {
-		pr_err("Invalid Params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	*state = dsi_ctrl->current_state.host_initialized;
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-
-	return 0;
-}
-
-/**
- * dsi_ctrl_update_host_engine_state_for_cont_splash() -
- *            set engine state for dsi controller during continuous splash
- * @dsi_ctrl:          DSI controller handle.
- * @state:             Engine state.
- *
- * Set host engine state for DSI controller during continuous splash.
- *
- * Return: error code.
- */
-int dsi_ctrl_update_host_engine_state_for_cont_splash(struct dsi_ctrl *dsi_ctrl,
-					enum dsi_engine_state state)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
-	if (rc) {
-		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto error;
-	}
-
-	pr_debug("[DSI_%d] Set host engine state = %d\n", dsi_ctrl->cell_index,
-		 state);
-	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
-error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_set_power_state() - set power state for dsi controller
- * @dsi_ctrl:          DSI controller handle.
- * @state:             Power state.
- *
- * Set power state for DSI controller. Power state can be changed only when
- * Controller, Video and Command engines are turned off.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_power_state(struct dsi_ctrl *dsi_ctrl,
-			     enum dsi_power_state state)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl || (state >= DSI_CTRL_POWER_MAX)) {
-		pr_err("Invalid Params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_POWER_STATE_CHANGE,
-				  state);
-	if (rc) {
-		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto error;
-	}
-
-	if (state == DSI_CTRL_POWER_VREG_ON) {
-		rc = dsi_ctrl_enable_supplies(dsi_ctrl, true);
-		if (rc) {
-			pr_err("[%d]failed to enable voltage supplies, rc=%d\n",
-			       dsi_ctrl->cell_index, rc);
-			goto error;
-		}
-	} else if (state == DSI_CTRL_POWER_VREG_OFF) {
-		rc = dsi_ctrl_enable_supplies(dsi_ctrl, false);
-		if (rc) {
-			pr_err("[%d]failed to disable vreg supplies, rc=%d\n",
-			       dsi_ctrl->cell_index, rc);
-			goto error;
-		}
-	}
-
-	pr_debug("[DSI_%d] Power state updated to %d\n", dsi_ctrl->cell_index,
-		 state);
-	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_POWER_STATE_CHANGE, state);
-error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller
- * @dsi_ctrl:          DSI controller handle.
- * @on:                enable/disable test pattern.
- *
- * Test pattern can be enabled only after Video engine (for video mode panels)
- * or command engine (for cmd mode panels) is enabled.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_TPG, on);
-	if (rc) {
-		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto error;
-	}
-
-	if (on) {
-		if (dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) {
-			dsi_ctrl->hw.ops.video_test_pattern_setup(&dsi_ctrl->hw,
-							  DSI_TEST_PATTERN_INC,
-							  0xFFFF);
-		} else {
-			dsi_ctrl->hw.ops.cmd_test_pattern_setup(
-							&dsi_ctrl->hw,
-							DSI_TEST_PATTERN_INC,
-							0xFFFF,
-							0x0);
-		}
-	}
-	dsi_ctrl->hw.ops.test_pattern_enable(&dsi_ctrl->hw, on);
-
-	pr_debug("[DSI_%d]Set test pattern state=%d\n",
-		dsi_ctrl->cell_index, on);
-	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_TPG, on);
-error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_set_host_engine_state() - set host engine state
- * @dsi_ctrl:            DSI Controller handle.
- * @state:               Engine state.
- *
- * Host engine state can be modified only when DSI controller power state is
- * set to DSI_CTRL_POWER_LINK_CLK_ON and cmd, video engines are disabled.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_host_engine_state(struct dsi_ctrl *dsi_ctrl,
-				   enum dsi_engine_state state)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
-	if (rc) {
-		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto error;
-	}
-
-	if (state == DSI_CTRL_ENGINE_ON)
-		dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, true);
-	else
-		dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, false);
-
-	pr_debug("[DSI_%d] Set host engine state = %d\n", dsi_ctrl->cell_index,
-		 state);
-	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
-error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_set_cmd_engine_state() - set command engine state
- * @dsi_ctrl:            DSI Controller handle.
- * @state:               Engine state.
- *
- * Command engine state can be modified only when DSI controller power state is
- * set to DSI_CTRL_POWER_LINK_CLK_ON.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl,
-				  enum dsi_engine_state state)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
-	if (rc) {
-		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto error;
-	}
-
-	if (state == DSI_CTRL_ENGINE_ON)
-		dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, true);
-	else
-		dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, false);
-
-	pr_debug("[DSI_%d] Set cmd engine state = %d\n", dsi_ctrl->cell_index,
-		 state);
-	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
-error:
-	return rc;
-}
-
-/**
- * dsi_ctrl_set_vid_engine_state() - set video engine state
- * @dsi_ctrl:            DSI Controller handle.
- * @state:               Engine state.
- *
- * Video engine state can be modified only when DSI controller power state is
- * set to DSI_CTRL_POWER_LINK_CLK_ON.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_vid_engine_state(struct dsi_ctrl *dsi_ctrl,
-				  enum dsi_engine_state state)
-{
-	int rc = 0;
-	bool on;
-
-	if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_VID_ENGINE, state);
-	if (rc) {
-		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		goto error;
-	}
-
-	on = (state == DSI_CTRL_ENGINE_ON) ? true : false;
-	dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, on);
-
-	/* perform a reset when turning off video engine */
-	if (!on)
-		dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
-
-	pr_debug("[DSI_%d] Set video engine state = %d\n", dsi_ctrl->cell_index,
-		 state);
-	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_VID_ENGINE, state);
-error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
- * @dsi_ctrl:		DSI controller handle.
- * @enable:		enable/disable ULPS.
- *
- * ULPS can be enabled/disabled after DSI host engine is turned on.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	if (enable)
-		rc = dsi_enable_ulps(dsi_ctrl);
-	else
-		rc = dsi_disable_ulps(dsi_ctrl);
-
-	if (rc) {
-		pr_err("[DSI_%d] Ulps state change(%d) failed, rc=%d\n",
-			dsi_ctrl->cell_index, enable, rc);
-		goto error;
-	}
-	pr_debug("[DSI_%d] ULPS state = %d\n", dsi_ctrl->cell_index, enable);
-
-error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_set_clamp_state() - set clamp state for DSI phy
- * @dsi_ctrl:             DSI controller handle.
- * @enable:               enable/disable clamping.
- *
- * Clamps can be enabled/disabled while DSI controller is still turned on.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_clamp_state(struct dsi_ctrl *dsi_ctrl,
-		bool enable, bool ulps_enabled)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (!dsi_ctrl->hw.ops.clamp_enable ||
-			!dsi_ctrl->hw.ops.clamp_disable) {
-		pr_debug("No clamp control for DSI controller\n");
-		return 0;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	rc = dsi_enable_io_clamp(dsi_ctrl, enable, ulps_enabled);
-	if (rc) {
-		pr_err("[DSI_%d] Failed to enable IO clamp\n",
-			dsi_ctrl->cell_index);
-		goto error;
-	}
-
-	pr_debug("[DSI_%d] Clamp state = %d\n", dsi_ctrl->cell_index, enable);
-error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_set_clock_source() - set clock source fpr dsi link clocks
- * @dsi_ctrl:        DSI controller handle.
- * @source_clks:     Source clocks for DSI link clocks.
- *
- * Clock source should be changed while link clocks are disabled.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_clock_source(struct dsi_ctrl *dsi_ctrl,
-			      struct dsi_clk_link_set *source_clks)
-{
-	int rc = 0;
-
-	if (!dsi_ctrl || !source_clks) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
-	rc = dsi_clk_update_parent(source_clks, &dsi_ctrl->clk_info.rcg_clks);
-	if (rc) {
-		pr_err("[DSI_%d]Failed to update link clk parent, rc=%d\n",
-		       dsi_ctrl->cell_index, rc);
-		(void)dsi_clk_update_parent(&dsi_ctrl->clk_info.pll_op_clks,
-					    &dsi_ctrl->clk_info.rcg_clks);
-		goto error;
-	}
-
-	dsi_ctrl->clk_info.pll_op_clks.byte_clk = source_clks->byte_clk;
-	dsi_ctrl->clk_info.pll_op_clks.pixel_clk = source_clks->pixel_clk;
-
-	pr_debug("[DSI_%d] Source clocks are updated\n", dsi_ctrl->cell_index);
-
-error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return rc;
-}
-
-/**
- * dsi_ctrl_setup_misr() - Setup frame MISR
- * @dsi_ctrl:              DSI controller handle.
- * @enable:                enable/disable MISR.
- * @frame_count:           Number of frames to accumulate MISR.
- *
- * Return: error code.
- */
-int dsi_ctrl_setup_misr(struct dsi_ctrl *dsi_ctrl,
-			bool enable,
-			u32 frame_count)
-{
-	if (!dsi_ctrl) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (!dsi_ctrl->hw.ops.setup_misr)
-		return 0;
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	dsi_ctrl->misr_enable = enable;
-	dsi_ctrl->hw.ops.setup_misr(&dsi_ctrl->hw,
-			dsi_ctrl->host_config.panel_mode,
-			enable, frame_count);
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-	return 0;
-}
-
-/**
- * dsi_ctrl_collect_misr() - Read frame MISR
- * @dsi_ctrl:              DSI controller handle.
- *
- * Return: MISR value.
- */
-u32 dsi_ctrl_collect_misr(struct dsi_ctrl *dsi_ctrl)
-{
-	u32 misr;
-
-	if (!dsi_ctrl || !dsi_ctrl->hw.ops.collect_misr)
-		return 0;
-
-	misr = dsi_ctrl->hw.ops.collect_misr(&dsi_ctrl->hw,
-				dsi_ctrl->host_config.panel_mode);
-	if (!misr)
-		misr = dsi_ctrl->misr_cache;
-
-	pr_debug("DSI_%d cached misr = %x, final = %x\n",
-		dsi_ctrl->cell_index, dsi_ctrl->misr_cache, misr);
-
-	return misr;
-}
-
-void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl, u32 idx,
-		bool mask_enable)
-{
-	if (!dsi_ctrl || !dsi_ctrl->hw.ops.error_intr_ctrl
-			|| !dsi_ctrl->hw.ops.clear_error_status) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	/*
-	 * Mask DSI error status interrupts and clear error status
-	 * register
-	 */
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	if (idx & BIT(DSI_ERR_INTR_ALL)) {
-		/*
-		 * The behavior of mask_enable is different in ctrl register
-		 * and mask register and hence mask_enable is manipulated for
-		 * selective error interrupt masking vs total error interrupt
-		 * masking.
-		 */
-
-		dsi_ctrl->hw.ops.error_intr_ctrl(&dsi_ctrl->hw, !mask_enable);
-		dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
-					DSI_ERROR_INTERRUPT_COUNT);
-	} else {
-		dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw, idx,
-								mask_enable);
-		dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
-					DSI_ERROR_INTERRUPT_COUNT);
-	}
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-}
-
-/**
- * dsi_ctrl_irq_update() - Put a irq vote to process DSI error
- *				interrupts at any time.
- * @dsi_ctrl:              DSI controller handle.
- * @enable:		   variable to enable/disable irq
- */
-void dsi_ctrl_irq_update(struct dsi_ctrl *dsi_ctrl, bool enable)
-{
-	if (!dsi_ctrl)
-		return;
-
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-	if (enable)
-		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
-					DSI_SINT_ERROR, NULL);
-	else
-		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-					DSI_SINT_ERROR);
-
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
-}
-
-/**
- * dsi_ctrl_drv_register() - register platform driver for dsi controller
- */
-void dsi_ctrl_drv_register(void)
-{
-	platform_driver_register(&dsi_ctrl_driver);
-}
-
-/**
- * dsi_ctrl_drv_unregister() - unregister platform driver
- */
-void dsi_ctrl_drv_unregister(void)
-{
-	platform_driver_unregister(&dsi_ctrl_driver);
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
deleted file mode 100644
index 527cf07..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ /dev/null
@@ -1,797 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_CTRL_H_
-#define _DSI_CTRL_H_
-
-#include <linux/debugfs.h>
-
-#include "dsi_defs.h"
-#include "dsi_ctrl_hw.h"
-#include "dsi_clk.h"
-#include "dsi_pwr.h"
-#include "drm_mipi_dsi.h"
-
-/*
- * DSI Command transfer modifiers
- * @DSI_CTRL_CMD_READ:             The current transfer involves reading data.
- * @DSI_CTRL_CMD_BROADCAST:        The current transfer needs to be done in
- *				   broadcast mode to multiple slaves.
- * @DSI_CTRL_CMD_BROADCAST_MASTER: This controller is the master and the slaves
- *				   sync to this trigger.
- * @DSI_CTRL_CMD_DEFER_TRIGGER:    Defer the command trigger to later.
- * @DSI_CTRL_CMD_FIFO_STORE:       Use FIFO for command transfer in place of
- *				   reading data from memory.
- * @DSI_CTRL_CMD_FETCH_MEMORY:     Fetch command from memory through AXI bus
- *				   and transfer it.
- * @DSI_CTRL_CMD_LAST_COMMAND:     Trigger the DMA cmd transfer if this is last
- *				   command in the batch.
- * @DSI_CTRL_CMD_NON_EMBEDDED_MODE:Transfer cmd packets in non embedded mode.
- * @DSI_CTRL_CMD_CUSTOM_DMA_SCHED: Use the dma scheduling line number defined in
- *				   display panel dtsi file instead of default.
- */
-#define DSI_CTRL_CMD_READ             0x1
-#define DSI_CTRL_CMD_BROADCAST        0x2
-#define DSI_CTRL_CMD_BROADCAST_MASTER 0x4
-#define DSI_CTRL_CMD_DEFER_TRIGGER    0x8
-#define DSI_CTRL_CMD_FIFO_STORE       0x10
-#define DSI_CTRL_CMD_FETCH_MEMORY     0x20
-#define DSI_CTRL_CMD_LAST_COMMAND     0x40
-#define DSI_CTRL_CMD_NON_EMBEDDED_MODE 0x80
-#define DSI_CTRL_CMD_CUSTOM_DMA_SCHED  0x100
-
-/* DSI embedded mode fifo size
- * If the command is greater than 256 bytes it is sent in non-embedded mode.
- */
-#define DSI_EMBEDDED_MODE_DMA_MAX_SIZE_BYTES 256
-
-/* max size supported for dsi cmd transfer using TPG */
-#define DSI_CTRL_MAX_CMD_FIFO_STORE_SIZE 64
-
-/**
- * enum dsi_power_state - defines power states for dsi controller.
- * @DSI_CTRL_POWER_VREG_OFF:    Digital and analog supplies for DSI controller
-				turned off
- * @DSI_CTRL_POWER_VREG_ON:     Digital and analog supplies for DSI controller
- * @DSI_CTRL_POWER_MAX:         Maximum value.
- */
-enum dsi_power_state {
-	DSI_CTRL_POWER_VREG_OFF = 0,
-	DSI_CTRL_POWER_VREG_ON,
-	DSI_CTRL_POWER_MAX,
-};
-
-/**
- * enum dsi_engine_state - define engine status for dsi controller.
- * @DSI_CTRL_ENGINE_OFF:  Engine is turned off.
- * @DSI_CTRL_ENGINE_ON:   Engine is turned on.
- * @DSI_CTRL_ENGINE_MAX:  Maximum value.
- */
-enum dsi_engine_state {
-	DSI_CTRL_ENGINE_OFF = 0,
-	DSI_CTRL_ENGINE_ON,
-	DSI_CTRL_ENGINE_MAX,
-};
-
-/**
- * struct dsi_ctrl_power_info - digital and analog power supplies for dsi host
- * @digital:  Digital power supply required to turn on DSI controller hardware.
- * @host_pwr: Analog power supplies required to turn on DSI controller hardware.
- *            Even though DSI controller it self does not require an analog
- *            power supply, supplies required for PLL can be defined here to
- *            allow proper control over these supplies.
- */
-struct dsi_ctrl_power_info {
-	struct dsi_regulator_info digital;
-	struct dsi_regulator_info host_pwr;
-};
-
-/**
- * struct dsi_ctrl_clk_info - clock information for DSI controller
- * @core_clks:          Core clocks needed to access DSI controller registers.
- * @hs_link_clks:       Clocks required to transmit high speed data over DSI
- * @lp_link_clks:       Clocks required to perform low power ops over DSI
- * @rcg_clks:           Root clock generation clocks generated in MMSS_CC. The
- *			output of the PLL is set as parent for these root
- *			clocks. These clocks are specific to controller
- *			instance.
- * @mux_clks:           Mux clocks used for Dynamic refresh feature.
- * @ext_clks:           External byte/pixel clocks from the MMSS block. These
- *			clocks are set as parent to rcg clocks.
- * @pll_op_clks:        TODO:
- * @shadow_clks:        TODO:
- */
-struct dsi_ctrl_clk_info {
-	/* Clocks parsed from DT */
-	struct dsi_core_clk_info core_clks;
-	struct dsi_link_hs_clk_info hs_link_clks;
-	struct dsi_link_lp_clk_info lp_link_clks;
-	struct dsi_clk_link_set rcg_clks;
-
-	/* Clocks set by DSI Manager */
-	struct dsi_clk_link_set mux_clks;
-	struct dsi_clk_link_set ext_clks;
-	struct dsi_clk_link_set pll_op_clks;
-	struct dsi_clk_link_set shadow_clks;
-};
-
-/**
- * struct dsi_ctrl_bus_scale_info - Bus scale info for msm-bus bandwidth voting
- * @bus_scale_table:        Bus scale voting usecases.
- * @bus_handle:             Handle used for voting bandwidth.
- */
-struct dsi_ctrl_bus_scale_info {
-	struct msm_bus_scale_pdata *bus_scale_table;
-	u32 bus_handle;
-};
-
-/**
- * struct dsi_ctrl_state_info - current driver state information
- * @power_state:        Status of power states on DSI controller.
- * @cmd_engine_state:   Status of DSI command engine.
- * @vid_engine_state:   Status of DSI video engine.
- * @controller_state:   Status of DSI Controller engine.
- * @host_initialized:	Boolean to indicate status of DSi host Initialization
- * @tpg_enabled:        Boolean to indicate whether tpg is enabled.
- */
-struct dsi_ctrl_state_info {
-	enum dsi_power_state power_state;
-	enum dsi_engine_state cmd_engine_state;
-	enum dsi_engine_state vid_engine_state;
-	enum dsi_engine_state controller_state;
-	bool host_initialized;
-	bool tpg_enabled;
-};
-
-/**
- * struct dsi_ctrl_interrupts - define interrupt information
- * @irq_lock:            Spinlock for ISR handler.
- * @irq_num:             Linux interrupt number associated with device.
- * @irq_stat_mask:       Hardware mask of currently enabled interrupts.
- * @irq_stat_refcount:   Number of times each interrupt has been requested.
- * @irq_stat_cb:         Status IRQ callback definitions.
- * @irq_err_cb:          IRQ callback definition to handle DSI ERRORs.
- * @cmd_dma_done:          Completion signal for DSI_CMD_MODE_DMA_DONE interrupt
- * @vid_frame_done:        Completion signal for DSI_VIDEO_MODE_FRAME_DONE int.
- * @cmd_frame_done:        Completion signal for DSI_CMD_FRAME_DONE interrupt.
- */
-struct dsi_ctrl_interrupts {
-	spinlock_t irq_lock;
-	int irq_num;
-	uint32_t irq_stat_mask;
-	int irq_stat_refcount[DSI_STATUS_INTERRUPT_COUNT];
-	struct dsi_event_cb_info irq_stat_cb[DSI_STATUS_INTERRUPT_COUNT];
-	struct dsi_event_cb_info irq_err_cb;
-
-	struct completion cmd_dma_done;
-	struct completion vid_frame_done;
-	struct completion cmd_frame_done;
-	struct completion bta_done;
-};
-
-/**
- * struct dsi_ctrl - DSI controller object
- * @pdev:                Pointer to platform device.
- * @cell_index:          Instance cell id.
- * @horiz_index:         Index in physical horizontal CTRL layout, 0 = leftmost
- * @name:                Name of the controller instance.
- * @refcount:            ref counter.
- * @ctrl_lock:           Mutex for hardware and object access.
- * @drm_dev:             Pointer to DRM device.
- * @version:             DSI controller version.
- * @hw:                  DSI controller hardware object.
- * @current_state:       Current driver and hardware state.
- * @clk_cb:		 Callback for DSI clock control.
- * @irq_info:            Interrupt information.
- * @recovery_cb:         Recovery call back to SDE.
- * @clk_info:            Clock information.
- * @clk_freq:            DSi Link clock frequency information.
- * @pwr_info:            Power information.
- * @axi_bus_info:        AXI bus information.
- * @host_config:         Current host configuration.
- * @mode_bounds:         Boundaries of the default mode ROI.
- *                       Origin is at top left of all CTRLs.
- * @roi:                 Partial update region of interest.
- *                       Origin is top left of this CTRL.
- * @tx_cmd_buf:          Tx command buffer.
- * @cmd_buffer_iova:     cmd buffer mapped address.
- * @cmd_buffer_size:     Size of command buffer.
- * @vaddr:               CPU virtual address of cmd buffer.
- * @secure_mode:         Indicates if secure-session is in progress
- * @esd_check_underway:  Indicates if esd status check is in progress
- * @debugfs_root:        Root for debugfs entries.
- * @misr_enable:         Frame MISR enable/disable
- * @misr_cache:          Cached Frame MISR value
- * @phy_isolation_enabled:    A boolean property allows to isolate the phy from
- *                          dsi controller and run only dsi controller.
- * @null_insertion_enabled:  A boolean property to allow dsi controller to
- *                           insert null packet.
- * @modeupdated:	  Boolean to send new roi if mode is updated.
- */
-struct dsi_ctrl {
-	struct platform_device *pdev;
-	u32 cell_index;
-	u32 horiz_index;
-	const char *name;
-	u32 refcount;
-	struct mutex ctrl_lock;
-	struct drm_device *drm_dev;
-
-	enum dsi_ctrl_version version;
-	struct dsi_ctrl_hw hw;
-
-	/* Current state */
-	struct dsi_ctrl_state_info current_state;
-	struct clk_ctrl_cb clk_cb;
-
-	struct dsi_ctrl_interrupts irq_info;
-	struct dsi_event_cb_info recovery_cb;
-
-	/* Clock and power states */
-	struct dsi_ctrl_clk_info clk_info;
-	struct link_clk_freq clk_freq;
-	struct dsi_ctrl_power_info pwr_info;
-	struct dsi_ctrl_bus_scale_info axi_bus_info;
-
-	struct dsi_host_config host_config;
-	struct dsi_rect mode_bounds;
-	struct dsi_rect roi;
-
-	/* Command tx and rx */
-	struct drm_gem_object *tx_cmd_buf;
-	u32 cmd_buffer_size;
-	u32 cmd_buffer_iova;
-	u32 cmd_len;
-	void *vaddr;
-	bool secure_mode;
-	bool esd_check_underway;
-
-	/* Debug Information */
-	struct dentry *debugfs_root;
-
-	/* MISR */
-	bool misr_enable;
-	u32 misr_cache;
-
-	/* Check for spurious interrupts */
-	unsigned long jiffies_start;
-	unsigned int error_interrupt_count;
-
-	bool phy_isolation_enabled;
-	bool null_insertion_enabled;
-	bool modeupdated;
-};
-
-/**
- * dsi_ctrl_get() - get a dsi_ctrl handle from an of_node
- * @of_node:    of_node of the DSI controller.
- *
- * Gets the DSI controller handle for the corresponding of_node. The ref count
- * is incremented to one and all subsequent gets will fail until the original
- * clients calls a put.
- *
- * Return: DSI Controller handle.
- */
-struct dsi_ctrl *dsi_ctrl_get(struct device_node *of_node);
-
-/**
- * dsi_ctrl_put() - releases a dsi controller handle.
- * @dsi_ctrl:       DSI controller handle.
- *
- * Releases the DSI controller. Driver will clean up all resources and puts back
- * the DSI controller into reset state.
- */
-void dsi_ctrl_put(struct dsi_ctrl *dsi_ctrl);
-
-/**
- * dsi_ctrl_drv_init() - initialize dsi controller driver.
- * @dsi_ctrl:      DSI controller handle.
- * @parent:        Parent directory for debug fs.
- *
- * Initializes DSI controller driver. Driver should be initialized after
- * dsi_ctrl_get() succeeds.
- *
- * Return: error code.
- */
-int dsi_ctrl_drv_init(struct dsi_ctrl *dsi_ctrl, struct dentry *parent);
-
-/**
- * dsi_ctrl_drv_deinit() - de-initializes dsi controller driver
- * @dsi_ctrl:      DSI controller handle.
- *
- * Releases all resources acquired by dsi_ctrl_drv_init().
- *
- * Return: error code.
- */
-int dsi_ctrl_drv_deinit(struct dsi_ctrl *dsi_ctrl);
-
-/**
- * dsi_ctrl_validate_timing() - validate a video timing configuration
- * @dsi_ctrl:       DSI controller handle.
- * @timing:         Pointer to timing data.
- *
- * Driver will validate if the timing configuration is supported on the
- * controller hardware.
- *
- * Return: error code if timing is not supported.
- */
-int dsi_ctrl_validate_timing(struct dsi_ctrl *dsi_ctrl,
-			     struct dsi_mode_info *timing);
-
-/**
- * dsi_ctrl_update_host_config() - update dsi host configuration
- * @dsi_ctrl:          DSI controller handle.
- * @config:            DSI host configuration.
- * @flags:             dsi_mode_flags modifying the behavior
- * @clk_handle:        Clock handle for DSI clocks
- *
- * Updates driver with new Host configuration to use for host initialization.
- * This function call will only update the software context. The stored
- * configuration information will be used when the host is initialized.
- *
- * Return: error code.
- */
-int dsi_ctrl_update_host_config(struct dsi_ctrl *dsi_ctrl,
-				struct dsi_host_config *config,
-				int flags, void *clk_handle);
-
-/**
- * dsi_ctrl_timing_db_update() - update only controller Timing DB
- * @dsi_ctrl:          DSI controller handle.
- * @enable:            Enable/disable Timing DB register
- *
- * Update timing db register value during dfps usecases
- *
- * Return: error code.
- */
-int dsi_ctrl_timing_db_update(struct dsi_ctrl *dsi_ctrl,
-		bool enable);
-
-/**
- * dsi_ctrl_async_timing_update() - update only controller timing
- * @dsi_ctrl:          DSI controller handle.
- * @timing:            New DSI timing info
- *
- * Updates host timing values to asynchronously transition to new timing
- * For example, to update the porch values in a seamless/dynamic fps switch.
- *
- * Return: error code.
- */
-int dsi_ctrl_async_timing_update(struct dsi_ctrl *dsi_ctrl,
-		struct dsi_mode_info *timing);
-
-/**
- * dsi_ctrl_phy_sw_reset() - perform a PHY software reset
- * @dsi_ctrl:         DSI controller handle.
- *
- * Performs a PHY software reset on the DSI controller. Reset should be done
- * when the controller power state is DSI_CTRL_POWER_CORE_CLK_ON and the PHY is
- * not enabled.
- *
- * This function will fail if driver is in any other state.
- *
- * Return: error code.
- */
-int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl);
-
-/**
- * dsi_ctrl_phy_reset_config() - Mask/unmask propagation of ahb reset signal
- *	to DSI PHY hardware.
- * @dsi_ctrl:        DSI controller handle.
- * @enable:			Mask/unmask the PHY reset signal.
- *
- * Return: error code.
- */
-int dsi_ctrl_phy_reset_config(struct dsi_ctrl *dsi_ctrl, bool enable);
-
-/**
- * dsi_ctrl_config_clk_gating() - Enable/Disable DSI PHY clk gating
- * @dsi_ctrl:        DSI controller handle.
- * @enable:          Enable/disable DSI PHY clk gating
- * @clk_selection:   clock selection for gating
- *
- * Return: error code.
- */
-int dsi_ctrl_config_clk_gating(struct dsi_ctrl *dsi_ctrl, bool enable,
-		 enum dsi_clk_gate_type clk_selection);
-
-/**
- * dsi_ctrl_soft_reset() - perform a soft reset on DSI controller
- * @dsi_ctrl:         DSI controller handle.
- *
- * The video, command and controller engines will be disabled before the
- * reset is triggered. After, the engines will be re-enabled to the same state
- * as before the reset.
- *
- * If the reset is done while MDP timing engine is turned on, the video
- * engine should be re-enabled only during the vertical blanking time.
- *
- * Return: error code
- */
-int dsi_ctrl_soft_reset(struct dsi_ctrl *dsi_ctrl);
-
-/**
- * dsi_ctrl_host_timing_update - reinitialize host with new timing values
- * @dsi_ctrl:         DSI controller handle.
- *
- * Reinitialize DSI controller hardware with new display timing values
- * when resolution is switched dynamically.
- *
- * Return: error code
- */
-int dsi_ctrl_host_timing_update(struct dsi_ctrl *dsi_ctrl);
-
-/**
- * dsi_ctrl_host_init() - Initialize DSI host hardware.
- * @dsi_ctrl:        DSI controller handle.
- * @is_splash_enabled:       boolean signifying splash status.
- *
- * Initializes DSI controller hardware with host configuration provided by
- * dsi_ctrl_update_host_config(). Initialization can be performed only during
- * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
- * performed.
- *
- * Return: error code.
- */
-int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool is_splash_enabled);
-
-/**
- * dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
- * @dsi_ctrl:        DSI controller handle.
- *
- * De-initializes DSI controller hardware. It can be performed only during
- * DSI_CTRL_POWER_CORE_CLK_ON state after LINK clocks have been turned off.
- *
- * Return: error code.
- */
-int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl);
-
-/**
- * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
- * @dsi_ctrl:		DSI controller handle.
- * @enable:		enable/disable ULPS.
- *
- * ULPS can be enabled/disabled after DSI host engine is turned on.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable);
-
-/**
- * dsi_ctrl_setup() - Setup DSI host hardware while coming out of idle screen.
- * @dsi_ctrl:        DSI controller handle.
- *
- * Initializes DSI controller hardware with host configuration provided by
- * dsi_ctrl_update_host_config(). Initialization can be performed only during
- * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
- * performed.
- *
- * Also used to program the video mode timing values.
- *
- * Return: error code.
- */
-int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl);
-
-/**
- * dsi_ctrl_set_roi() - Set DSI controller's region of interest
- * @dsi_ctrl:        DSI controller handle.
- * @roi:             Region of interest rectangle, must be less than mode bounds
- * @changed:         Output parameter, set to true of the controller's ROI was
- *                   dirtied by setting the new ROI, and DCS cmd update needed
- *
- * Return: error code.
- */
-int dsi_ctrl_set_roi(struct dsi_ctrl *dsi_ctrl, struct dsi_rect *roi,
-		bool *changed);
-
-/**
- * dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller
- * @dsi_ctrl:          DSI controller handle.
- * @on:                enable/disable test pattern.
- *
- * Test pattern can be enabled only after Video engine (for video mode panels)
- * or command engine (for cmd mode panels) is enabled.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on);
-
-/**
- * dsi_ctrl_cmd_transfer() - Transfer commands on DSI link
- * @dsi_ctrl:             DSI controller handle.
- * @msg:                  Message to transfer on DSI link.
- * @flags:                Modifiers for message transfer.
- *
- * Command transfer can be done only when command engine is enabled. The
- * transfer API will until either the command transfer finishes or the timeout
- * value is reached. If the trigger is deferred, it will return without
- * triggering the transfer. Command parameters are programmed to hardware.
- *
- * Return: error code.
- */
-int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl,
-			  const struct mipi_dsi_msg *msg,
-			  u32 flags);
-
-/**
- * dsi_ctrl_cmd_tx_trigger() - Trigger a deferred command.
- * @dsi_ctrl:              DSI controller handle.
- * @flags:                 Modifiers.
- *
- * Return: error code.
- */
-int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags);
-
-/**
- * dsi_ctrl_update_host_engine_state_for_cont_splash() - update engine
- *                                 states for cont splash usecase
- * @dsi_ctrl:              DSI controller handle.
- * @state:                 DSI engine state
- *
- * Return: error code.
- */
-int dsi_ctrl_update_host_engine_state_for_cont_splash(struct dsi_ctrl *dsi_ctrl,
-				enum dsi_engine_state state);
-
-/**
- * dsi_ctrl_set_power_state() - set power state for dsi controller
- * @dsi_ctrl:          DSI controller handle.
- * @state:             Power state.
- *
- * Set power state for DSI controller. Power state can be changed only when
- * Controller, Video and Command engines are turned off.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_power_state(struct dsi_ctrl *dsi_ctrl,
-			     enum dsi_power_state state);
-
-/**
- * dsi_ctrl_set_cmd_engine_state() - set command engine state
- * @dsi_ctrl:            DSI Controller handle.
- * @state:               Engine state.
- *
- * Command engine state can be modified only when DSI controller power state is
- * set to DSI_CTRL_POWER_LINK_CLK_ON.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl,
-				  enum dsi_engine_state state);
-
-/**
- * dsi_ctrl_validate_host_state() - validate DSI ctrl host state
- * @dsi_ctrl:            DSI Controller handle.
- *
- * Validate DSI cotroller host state
- *
- * Return: boolean indicating whether host is not initialized.
- */
-bool dsi_ctrl_validate_host_state(struct dsi_ctrl *dsi_ctrl);
-
-/**
- * dsi_ctrl_set_vid_engine_state() - set video engine state
- * @dsi_ctrl:            DSI Controller handle.
- * @state:               Engine state.
- *
- * Video engine state can be modified only when DSI controller power state is
- * set to DSI_CTRL_POWER_LINK_CLK_ON.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_vid_engine_state(struct dsi_ctrl *dsi_ctrl,
-				  enum dsi_engine_state state);
-
-/**
- * dsi_ctrl_set_host_engine_state() - set host engine state
- * @dsi_ctrl:            DSI Controller handle.
- * @state:               Engine state.
- *
- * Host engine state can be modified only when DSI controller power state is
- * set to DSI_CTRL_POWER_LINK_CLK_ON and cmd, video engines are disabled.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_host_engine_state(struct dsi_ctrl *dsi_ctrl,
-				   enum dsi_engine_state state);
-
-/**
- * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
- * @dsi_ctrl:         DSI controller handle.
- * @enable:           enable/disable ULPS.
- *
- * ULPS can be enabled/disabled after DSI host engine is turned on.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable);
-
-/**
- * dsi_ctrl_clk_cb_register() - Register DSI controller clk control callback
- * @dsi_ctrl:         DSI controller handle.
- * @clk__cb:      Structure containing callback for clock control.
- *
- * Register call for DSI clock control
- *
- * Return: error code.
- */
-int dsi_ctrl_clk_cb_register(struct dsi_ctrl *dsi_ctrl,
-	struct clk_ctrl_cb *clk_cb);
-
-/**
- * dsi_ctrl_set_clamp_state() - set clamp state for DSI phy
- * @dsi_ctrl:             DSI controller handle.
- * @enable:               enable/disable clamping.
- * @ulps_enabled:         ulps state.
- *
- * Clamps can be enabled/disabled while DSI controller is still turned on.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_clamp_state(struct dsi_ctrl *dsi_Ctrl,
-		bool enable, bool ulps_enabled);
-
-/**
- * dsi_ctrl_set_clock_source() - set clock source fpr dsi link clocks
- * @dsi_ctrl:        DSI controller handle.
- * @source_clks:     Source clocks for DSI link clocks.
- *
- * Clock source should be changed while link clocks are disabled.
- *
- * Return: error code.
- */
-int dsi_ctrl_set_clock_source(struct dsi_ctrl *dsi_ctrl,
-			      struct dsi_clk_link_set *source_clks);
-
-/**
- * dsi_ctrl_enable_status_interrupt() - enable status interrupts
- * @dsi_ctrl:        DSI controller handle.
- * @intr_idx:        Index interrupt to disable.
- * @event_info:      Pointer to event callback definition
- */
-void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
-		uint32_t intr_idx, struct dsi_event_cb_info *event_info);
-
-/**
- * dsi_ctrl_disable_status_interrupt() - disable status interrupts
- * @dsi_ctrl:        DSI controller handle.
- * @intr_idx:        Index interrupt to disable.
- */
-void dsi_ctrl_disable_status_interrupt(
-		struct dsi_ctrl *dsi_ctrl, uint32_t intr_idx);
-
-/**
- * dsi_ctrl_setup_misr() - Setup frame MISR
- * @dsi_ctrl:              DSI controller handle.
- * @enable:                enable/disable MISR.
- * @frame_count:           Number of frames to accumulate MISR.
- *
- * Return: error code.
- */
-int dsi_ctrl_setup_misr(struct dsi_ctrl *dsi_ctrl,
-			bool enable,
-			u32 frame_count);
-
-/**
- * dsi_ctrl_collect_misr() - Read frame MISR
- * @dsi_ctrl:              DSI controller handle.
- *
- * Return: MISR value.
- */
-u32 dsi_ctrl_collect_misr(struct dsi_ctrl *dsi_ctrl);
-
-/**
- * dsi_ctrl_cache_misr - Cache frame MISR value
- * @dsi_ctrl:              DSI controller handle.
- */
-void dsi_ctrl_cache_misr(struct dsi_ctrl *dsi_ctrl);
-
-/**
- * dsi_ctrl_drv_register() - register platform driver for dsi controller
- */
-void dsi_ctrl_drv_register(void);
-
-/**
- * dsi_ctrl_drv_unregister() - unregister platform driver
- */
-void dsi_ctrl_drv_unregister(void);
-
-/**
- * dsi_ctrl_reset() - Reset DSI PHY CLK/DATA lane
- * @dsi_ctrl:        DSI controller handle.
- * @mask:	     Mask to indicate if CLK and/or DATA lane needs reset.
- */
-int dsi_ctrl_reset(struct dsi_ctrl *dsi_ctrl, int mask);
-
-/**
- * dsi_ctrl_get_hw_version() - read dsi controller hw revision
- * @dsi_ctrl:        DSI controller handle.
- */
-int dsi_ctrl_get_hw_version(struct dsi_ctrl *dsi_ctrl);
-
-/**
- * dsi_ctrl_vid_engine_en() - Control DSI video engine HW state
- * @dsi_ctrl:        DSI controller handle.
- * @on:		variable to control video engine ON/OFF.
- */
-int dsi_ctrl_vid_engine_en(struct dsi_ctrl *dsi_ctrl, bool on);
-
-/**
- * dsi_ctrl_setup_avr() - Set/Clear the AVR_SUPPORT_ENABLE bit
- * @dsi_ctrl:        DSI controller handle.
- * @enable:          variable to control AVR support ON/OFF.
- */
-int dsi_ctrl_setup_avr(struct dsi_ctrl *dsi_ctrl, bool enable);
-
-/**
- * @dsi_ctrl:        DSI controller handle.
- * cmd_len:	     Length of command.
- * flags:	     Config mode flags.
- */
-void dsi_message_setup_tx_mode(struct dsi_ctrl *dsi_ctrl, u32 cmd_len,
-		u32 *flags);
-
-/**
- * @dsi_ctrl:        DSI controller handle.
- * cmd_len:	     Length of command.
- * flags:	     Config mode flags.
- */
-int dsi_message_validate_tx_mode(struct dsi_ctrl *dsi_ctrl, u32 cmd_len,
-		u32 *flags);
-
-/**
- * dsi_ctrl_isr_configure() - API to register/deregister dsi isr
- * @dsi_ctrl:              DSI controller handle.
- * @enable:		   variable to control register/deregister isr
- */
-void dsi_ctrl_isr_configure(struct dsi_ctrl *dsi_ctrl, bool enable);
-
-/**
- * dsi_ctrl_mask_error_status_interrupts() - API to mask dsi ctrl error status
- *                                           interrupts
- * @dsi_ctrl:              DSI controller handle.
- * @idx:                   id indicating which interrupts to enable/disable.
- * @mask_enable:           boolean to enable/disable masking.
- */
-void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl, u32 idx,
-						bool mask_enable);
-
-/**
- * dsi_ctrl_irq_update() - Put a irq vote to process DSI error
- *				interrupts at any time.
- * @dsi_ctrl:              DSI controller handle.
- * @enable:		   variable to control enable/disable irq line
- */
-void dsi_ctrl_irq_update(struct dsi_ctrl *dsi_ctrl, bool enable);
-
-/**
- * dsi_ctrl_get_host_engine_init_state() - Return host init state
- */
-int dsi_ctrl_get_host_engine_init_state(struct dsi_ctrl *dsi_ctrl,
-		bool *state);
-
-/**
- * dsi_ctrl_wait_for_cmd_mode_mdp_idle() - Wait for command mode engine not to
- *				     be busy sending data from display engine.
- * @dsi_ctrl:                     DSI controller handle.
- */
-int dsi_ctrl_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl *dsi_ctrl);
-/**
- * dsi_ctrl_update_host_init_state() - Set the host initialization state
- */
-int dsi_ctrl_update_host_init_state(struct dsi_ctrl *dsi_ctrl, bool en);
-
-/**
- * dsi_ctrl_pixel_format_to_bpp() - returns number of bits per pxl
- */
-int dsi_ctrl_pixel_format_to_bpp(enum dsi_pixel_format dst_format);
-
-/**
- * dsi_ctrl_set_continuous_clk() - API to set/unset force clock lane HS request.
- * @dsi_ctrl:                      DSI controller handle.
- * @enable:			   variable to control continuous clock.
- */
-void dsi_ctrl_set_continuous_clk(struct dsi_ctrl *dsi_ctrl, bool enable);
-#endif /* _DSI_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
deleted file mode 100644
index 2029c16..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ /dev/null
@@ -1,868 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_CTRL_HW_H_
-#define _DSI_CTRL_HW_H_
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/bitmap.h>
-
-#include "dsi_defs.h"
-
-/**
- * Modifier flag for command transmission. If this flag is set, command
- * information is programmed to hardware and transmission is not triggered.
- * Caller should call the trigger_command_dma() to start the transmission. This
- * flag is valed for kickoff_command() and kickoff_fifo_command() operations.
- */
-#define DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER            0x1
-
-/**
- * enum dsi_ctrl_version - version of the dsi host controller
- * @DSI_CTRL_VERSION_UNKNOWN: Unknown controller version
- * @DSI_CTRL_VERSION_1_4:     DSI host v1.4 controller
- * @DSI_CTRL_VERSION_2_0:     DSI host v2.0 controller
- * @DSI_CTRL_VERSION_2_2:     DSI host v2.2 controller
- * @DSI_CTRL_VERSION_2_3:     DSI host v2.3 controller
- * @DSI_CTRL_VERSION_2_4:     DSI host v2.4 controller
- * @DSI_CTRL_VERSION_MAX:     max version
- */
-enum dsi_ctrl_version {
-	DSI_CTRL_VERSION_UNKNOWN,
-	DSI_CTRL_VERSION_1_4,
-	DSI_CTRL_VERSION_2_0,
-	DSI_CTRL_VERSION_2_2,
-	DSI_CTRL_VERSION_2_3,
-	DSI_CTRL_VERSION_2_4,
-	DSI_CTRL_VERSION_MAX
-};
-
-/**
- * enum dsi_ctrl_hw_features - features supported by dsi host controller
- * @DSI_CTRL_VIDEO_TPG:               Test pattern support for video mode.
- * @DSI_CTRL_CMD_TPG:                 Test pattern support for command mode.
- * @DSI_CTRL_VARIABLE_REFRESH_RATE:   variable panel timing
- * @DSI_CTRL_DYNAMIC_REFRESH:         variable pixel clock rate
- * @DSI_CTRL_NULL_PACKET_INSERTION:   NULL packet insertion
- * @DSI_CTRL_DESKEW_CALIB:            Deskew calibration support
- * @DSI_CTRL_DPHY:                    Controller support for DPHY
- * @DSI_CTRL_CPHY:                    Controller support for CPHY
- * @DSI_CTRL_MAX_FEATURES:
- */
-enum dsi_ctrl_hw_features {
-	DSI_CTRL_VIDEO_TPG,
-	DSI_CTRL_CMD_TPG,
-	DSI_CTRL_VARIABLE_REFRESH_RATE,
-	DSI_CTRL_DYNAMIC_REFRESH,
-	DSI_CTRL_NULL_PACKET_INSERTION,
-	DSI_CTRL_DESKEW_CALIB,
-	DSI_CTRL_DPHY,
-	DSI_CTRL_CPHY,
-	DSI_CTRL_MAX_FEATURES
-};
-
-/**
- * enum dsi_test_pattern - test pattern type
- * @DSI_TEST_PATTERN_FIXED:     Test pattern is fixed, based on init value.
- * @DSI_TEST_PATTERN_INC:       Incremental test pattern, base on init value.
- * @DSI_TEST_PATTERN_POLY:      Pattern generated from polynomial and init val.
- * @DSI_TEST_PATTERN_MAX:
- */
-enum dsi_test_pattern {
-	DSI_TEST_PATTERN_FIXED = 0,
-	DSI_TEST_PATTERN_INC,
-	DSI_TEST_PATTERN_POLY,
-	DSI_TEST_PATTERN_MAX
-};
-
-/**
- * enum dsi_status_int_index - index of interrupts generated by DSI controller
- * @DSI_SINT_CMD_MODE_DMA_DONE:        Command mode DMA packets are sent out.
- * @DSI_SINT_CMD_STREAM0_FRAME_DONE:   A frame of cmd mode stream0 is sent out.
- * @DSI_SINT_CMD_STREAM1_FRAME_DONE:   A frame of cmd mode stream1 is sent out.
- * @DSI_SINT_CMD_STREAM2_FRAME_DONE:   A frame of cmd mode stream2 is sent out.
- * @DSI_SINT_VIDEO_MODE_FRAME_DONE:    A frame of video mode stream is sent out.
- * @DSI_SINT_BTA_DONE:                 A BTA is completed.
- * @DSI_SINT_CMD_FRAME_DONE:           A frame of selected cmd mode stream is
- *                                     sent out by MDP.
- * @DSI_SINT_DYN_REFRESH_DONE:         The dynamic refresh operation completed.
- * @DSI_SINT_DESKEW_DONE:              The deskew calibration operation done.
- * @DSI_SINT_DYN_BLANK_DMA_DONE:       The dynamic blankin DMA operation has
- *                                     completed.
- * @DSI_SINT_ERROR:                    DSI error has happened.
- */
-enum dsi_status_int_index {
-	DSI_SINT_CMD_MODE_DMA_DONE = 0,
-	DSI_SINT_CMD_STREAM0_FRAME_DONE = 1,
-	DSI_SINT_CMD_STREAM1_FRAME_DONE = 2,
-	DSI_SINT_CMD_STREAM2_FRAME_DONE = 3,
-	DSI_SINT_VIDEO_MODE_FRAME_DONE = 4,
-	DSI_SINT_BTA_DONE = 5,
-	DSI_SINT_CMD_FRAME_DONE = 6,
-	DSI_SINT_DYN_REFRESH_DONE = 7,
-	DSI_SINT_DESKEW_DONE = 8,
-	DSI_SINT_DYN_BLANK_DMA_DONE = 9,
-	DSI_SINT_ERROR = 10,
-
-	DSI_STATUS_INTERRUPT_COUNT
-};
-
-/**
- * enum dsi_status_int_type - status interrupts generated by DSI controller
- * @DSI_CMD_MODE_DMA_DONE:        Command mode DMA packets are sent out.
- * @DSI_CMD_STREAM0_FRAME_DONE:   A frame of command mode stream0 is sent out.
- * @DSI_CMD_STREAM1_FRAME_DONE:   A frame of command mode stream1 is sent out.
- * @DSI_CMD_STREAM2_FRAME_DONE:   A frame of command mode stream2 is sent out.
- * @DSI_VIDEO_MODE_FRAME_DONE:    A frame of video mode stream is sent out.
- * @DSI_BTA_DONE:                 A BTA is completed.
- * @DSI_CMD_FRAME_DONE:           A frame of selected command mode stream is
- *                                sent out by MDP.
- * @DSI_DYN_REFRESH_DONE:         The dynamic refresh operation has completed.
- * @DSI_DESKEW_DONE:              The deskew calibration operation has completed
- * @DSI_DYN_BLANK_DMA_DONE:       The dynamic blankin DMA operation has
- *                                completed.
- * @DSI_ERROR:                    DSI error has happened.
- */
-enum dsi_status_int_type {
-	DSI_CMD_MODE_DMA_DONE = BIT(DSI_SINT_CMD_MODE_DMA_DONE),
-	DSI_CMD_STREAM0_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM0_FRAME_DONE),
-	DSI_CMD_STREAM1_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM1_FRAME_DONE),
-	DSI_CMD_STREAM2_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM2_FRAME_DONE),
-	DSI_VIDEO_MODE_FRAME_DONE = BIT(DSI_SINT_VIDEO_MODE_FRAME_DONE),
-	DSI_BTA_DONE = BIT(DSI_SINT_BTA_DONE),
-	DSI_CMD_FRAME_DONE = BIT(DSI_SINT_CMD_FRAME_DONE),
-	DSI_DYN_REFRESH_DONE = BIT(DSI_SINT_DYN_REFRESH_DONE),
-	DSI_DESKEW_DONE = BIT(DSI_SINT_DESKEW_DONE),
-	DSI_DYN_BLANK_DMA_DONE = BIT(DSI_SINT_DYN_BLANK_DMA_DONE),
-	DSI_ERROR = BIT(DSI_SINT_ERROR)
-};
-
-/**
- * enum dsi_error_int_index - index of error interrupts from DSI controller
- * @DSI_EINT_RDBK_SINGLE_ECC_ERR:        Single bit ECC error in read packet.
- * @DSI_EINT_RDBK_MULTI_ECC_ERR:         Multi bit ECC error in read packet.
- * @DSI_EINT_RDBK_CRC_ERR:               CRC error in read packet.
- * @DSI_EINT_RDBK_INCOMPLETE_PKT:        Incomplete read packet.
- * @DSI_EINT_PERIPH_ERROR_PKT:           Error packet returned from peripheral,
- * @DSI_EINT_LP_RX_TIMEOUT:              Low power reverse transmission timeout.
- * @DSI_EINT_HS_TX_TIMEOUT:              High speed fwd transmission timeout.
- * @DSI_EINT_BTA_TIMEOUT:                BTA timeout.
- * @DSI_EINT_PLL_UNLOCK:                 PLL has unlocked.
- * @DSI_EINT_DLN0_ESC_ENTRY_ERR:         Incorrect LP Rx escape entry.
- * @DSI_EINT_DLN0_ESC_SYNC_ERR:          LP Rx data is not byte aligned.
- * @DSI_EINT_DLN0_LP_CONTROL_ERR:        Incorrect LP Rx state sequence.
- * @DSI_EINT_PANEL_SPECIFIC_ERR:         DSI Protocol violation error.
- * @DSI_EINT_INTERLEAVE_OP_CONTENTION:   Interleave operation contention.
- * @DSI_EINT_CMD_DMA_FIFO_UNDERFLOW:     Command mode DMA FIFO underflow.
- * @DSI_EINT_CMD_MDP_FIFO_UNDERFLOW:     Command MDP FIFO underflow (failed to
- *                                       receive one complete line from MDP).
- * @DSI_EINT_DLN0_HS_FIFO_OVERFLOW:      High speed FIFO data lane 0 overflows.
- * @DSI_EINT_DLN1_HS_FIFO_OVERFLOW:      High speed FIFO data lane 1 overflows.
- * @DSI_EINT_DLN2_HS_FIFO_OVERFLOW:      High speed FIFO data lane 2 overflows.
- * @DSI_EINT_DLN3_HS_FIFO_OVERFLOW:      High speed FIFO data lane 3 overflows.
- * @DSI_EINT_DLN0_HS_FIFO_UNDERFLOW:     High speed FIFO data lane 0 underflows.
- * @DSI_EINT_DLN1_HS_FIFO_UNDERFLOW:     High speed FIFO data lane 1 underflows.
- * @DSI_EINT_DLN2_HS_FIFO_UNDERFLOW:     High speed FIFO data lane 2 underflows.
- * @DSI_EINT_DLN3_HS_FIFO_UNDERFLOW:     High speed FIFO data lane 3 undeflows.
- * @DSI_EINT_DLN0_LP0_CONTENTION:        PHY level contention while lane 0 low.
- * @DSI_EINT_DLN1_LP0_CONTENTION:        PHY level contention while lane 1 low.
- * @DSI_EINT_DLN2_LP0_CONTENTION:        PHY level contention while lane 2 low.
- * @DSI_EINT_DLN3_LP0_CONTENTION:        PHY level contention while lane 3 low.
- * @DSI_EINT_DLN0_LP1_CONTENTION:        PHY level contention while lane 0 high.
- * @DSI_EINT_DLN1_LP1_CONTENTION:        PHY level contention while lane 1 high.
- * @DSI_EINT_DLN2_LP1_CONTENTION:        PHY level contention while lane 2 high.
- * @DSI_EINT_DLN3_LP1_CONTENTION:        PHY level contention while lane 3 high.
- */
-enum dsi_error_int_index {
-	DSI_EINT_RDBK_SINGLE_ECC_ERR = 0,
-	DSI_EINT_RDBK_MULTI_ECC_ERR = 1,
-	DSI_EINT_RDBK_CRC_ERR = 2,
-	DSI_EINT_RDBK_INCOMPLETE_PKT = 3,
-	DSI_EINT_PERIPH_ERROR_PKT = 4,
-	DSI_EINT_LP_RX_TIMEOUT = 5,
-	DSI_EINT_HS_TX_TIMEOUT = 6,
-	DSI_EINT_BTA_TIMEOUT = 7,
-	DSI_EINT_PLL_UNLOCK = 8,
-	DSI_EINT_DLN0_ESC_ENTRY_ERR = 9,
-	DSI_EINT_DLN0_ESC_SYNC_ERR = 10,
-	DSI_EINT_DLN0_LP_CONTROL_ERR = 11,
-	DSI_EINT_PANEL_SPECIFIC_ERR = 12,
-	DSI_EINT_INTERLEAVE_OP_CONTENTION = 13,
-	DSI_EINT_CMD_DMA_FIFO_UNDERFLOW = 14,
-	DSI_EINT_CMD_MDP_FIFO_UNDERFLOW = 15,
-	DSI_EINT_DLN0_HS_FIFO_OVERFLOW = 16,
-	DSI_EINT_DLN1_HS_FIFO_OVERFLOW = 17,
-	DSI_EINT_DLN2_HS_FIFO_OVERFLOW = 18,
-	DSI_EINT_DLN3_HS_FIFO_OVERFLOW = 19,
-	DSI_EINT_DLN0_HS_FIFO_UNDERFLOW = 20,
-	DSI_EINT_DLN1_HS_FIFO_UNDERFLOW = 21,
-	DSI_EINT_DLN2_HS_FIFO_UNDERFLOW = 22,
-	DSI_EINT_DLN3_HS_FIFO_UNDERFLOW = 23,
-	DSI_EINT_DLN0_LP0_CONTENTION = 24,
-	DSI_EINT_DLN1_LP0_CONTENTION = 25,
-	DSI_EINT_DLN2_LP0_CONTENTION = 26,
-	DSI_EINT_DLN3_LP0_CONTENTION = 27,
-	DSI_EINT_DLN0_LP1_CONTENTION = 28,
-	DSI_EINT_DLN1_LP1_CONTENTION = 29,
-	DSI_EINT_DLN2_LP1_CONTENTION = 30,
-	DSI_EINT_DLN3_LP1_CONTENTION = 31,
-
-	DSI_ERROR_INTERRUPT_COUNT
-};
-
-/**
- * enum dsi_error_int_type - error interrupts generated by DSI controller
- * @DSI_RDBK_SINGLE_ECC_ERR:        Single bit ECC error in read packet.
- * @DSI_RDBK_MULTI_ECC_ERR:         Multi bit ECC error in read packet.
- * @DSI_RDBK_CRC_ERR:               CRC error in read packet.
- * @DSI_RDBK_INCOMPLETE_PKT:        Incomplete read packet.
- * @DSI_PERIPH_ERROR_PKT:           Error packet returned from peripheral,
- * @DSI_LP_RX_TIMEOUT:              Low power reverse transmission timeout.
- * @DSI_HS_TX_TIMEOUT:              High speed forward transmission timeout.
- * @DSI_BTA_TIMEOUT:                BTA timeout.
- * @DSI_PLL_UNLOCK:                 PLL has unlocked.
- * @DSI_DLN0_ESC_ENTRY_ERR:         Incorrect LP Rx escape entry.
- * @DSI_DLN0_ESC_SYNC_ERR:          LP Rx data is not byte aligned.
- * @DSI_DLN0_LP_CONTROL_ERR:        Incorrect LP Rx state sequence.
- * @DSI_PANEL_SPECIFIC_ERR:         DSI Protocol violation.
- * @DSI_INTERLEAVE_OP_CONTENTION:   Interleave operation contention.
- * @DSI_CMD_DMA_FIFO_UNDERFLOW:     Command mode DMA FIFO underflow.
- * @DSI_CMD_MDP_FIFO_UNDERFLOW:     Command MDP FIFO underflow (failed to
- *                                  receive one complete line from MDP).
- * @DSI_DLN0_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 0 overflows.
- * @DSI_DLN1_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 1 overflows.
- * @DSI_DLN2_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 2 overflows.
- * @DSI_DLN3_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 3 overflows.
- * @DSI_DLN0_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 0 underflows.
- * @DSI_DLN1_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 1 underflows.
- * @DSI_DLN2_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 2 underflows.
- * @DSI_DLN3_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 3 undeflows.
- * @DSI_DLN0_LP0_CONTENTION:        PHY level contention while lane 0 is low.
- * @DSI_DLN1_LP0_CONTENTION:        PHY level contention while lane 1 is low.
- * @DSI_DLN2_LP0_CONTENTION:        PHY level contention while lane 2 is low.
- * @DSI_DLN3_LP0_CONTENTION:        PHY level contention while lane 3 is low.
- * @DSI_DLN0_LP1_CONTENTION:        PHY level contention while lane 0 is high.
- * @DSI_DLN1_LP1_CONTENTION:        PHY level contention while lane 1 is high.
- * @DSI_DLN2_LP1_CONTENTION:        PHY level contention while lane 2 is high.
- * @DSI_DLN3_LP1_CONTENTION:        PHY level contention while lane 3 is high.
- */
-enum dsi_error_int_type {
-	DSI_RDBK_SINGLE_ECC_ERR = BIT(DSI_EINT_RDBK_SINGLE_ECC_ERR),
-	DSI_RDBK_MULTI_ECC_ERR = BIT(DSI_EINT_RDBK_MULTI_ECC_ERR),
-	DSI_RDBK_CRC_ERR = BIT(DSI_EINT_RDBK_CRC_ERR),
-	DSI_RDBK_INCOMPLETE_PKT = BIT(DSI_EINT_RDBK_INCOMPLETE_PKT),
-	DSI_PERIPH_ERROR_PKT = BIT(DSI_EINT_PERIPH_ERROR_PKT),
-	DSI_LP_RX_TIMEOUT = BIT(DSI_EINT_LP_RX_TIMEOUT),
-	DSI_HS_TX_TIMEOUT = BIT(DSI_EINT_HS_TX_TIMEOUT),
-	DSI_BTA_TIMEOUT = BIT(DSI_EINT_BTA_TIMEOUT),
-	DSI_PLL_UNLOCK = BIT(DSI_EINT_PLL_UNLOCK),
-	DSI_DLN0_ESC_ENTRY_ERR = BIT(DSI_EINT_DLN0_ESC_ENTRY_ERR),
-	DSI_DLN0_ESC_SYNC_ERR = BIT(DSI_EINT_DLN0_ESC_SYNC_ERR),
-	DSI_DLN0_LP_CONTROL_ERR = BIT(DSI_EINT_DLN0_LP_CONTROL_ERR),
-	DSI_PANEL_SPECIFIC_ERR = BIT(DSI_EINT_PANEL_SPECIFIC_ERR),
-	DSI_INTERLEAVE_OP_CONTENTION = BIT(DSI_EINT_INTERLEAVE_OP_CONTENTION),
-	DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(DSI_EINT_CMD_DMA_FIFO_UNDERFLOW),
-	DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(DSI_EINT_CMD_MDP_FIFO_UNDERFLOW),
-	DSI_DLN0_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN0_HS_FIFO_OVERFLOW),
-	DSI_DLN1_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN1_HS_FIFO_OVERFLOW),
-	DSI_DLN2_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN2_HS_FIFO_OVERFLOW),
-	DSI_DLN3_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN3_HS_FIFO_OVERFLOW),
-	DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN0_HS_FIFO_UNDERFLOW),
-	DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN1_HS_FIFO_UNDERFLOW),
-	DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN2_HS_FIFO_UNDERFLOW),
-	DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN3_HS_FIFO_UNDERFLOW),
-	DSI_DLN0_LP0_CONTENTION = BIT(DSI_EINT_DLN0_LP0_CONTENTION),
-	DSI_DLN1_LP0_CONTENTION = BIT(DSI_EINT_DLN1_LP0_CONTENTION),
-	DSI_DLN2_LP0_CONTENTION = BIT(DSI_EINT_DLN2_LP0_CONTENTION),
-	DSI_DLN3_LP0_CONTENTION = BIT(DSI_EINT_DLN3_LP0_CONTENTION),
-	DSI_DLN0_LP1_CONTENTION = BIT(DSI_EINT_DLN0_LP1_CONTENTION),
-	DSI_DLN1_LP1_CONTENTION = BIT(DSI_EINT_DLN1_LP1_CONTENTION),
-	DSI_DLN2_LP1_CONTENTION = BIT(DSI_EINT_DLN2_LP1_CONTENTION),
-	DSI_DLN3_LP1_CONTENTION = BIT(DSI_EINT_DLN3_LP1_CONTENTION),
-};
-
-/**
- * struct dsi_ctrl_cmd_dma_info - command buffer information
- * @offset:        IOMMU VA for command buffer address.
- * @length:        Length of the command buffer.
- * @datatype:      Datatype of cmd.
- * @en_broadcast:  Enable broadcast mode if set to true.
- * @is_master:     Is master in broadcast mode.
- * @use_lpm:       Use low power mode for command transmission.
- */
-struct dsi_ctrl_cmd_dma_info {
-	u32 offset;
-	u32 length;
-	u8  datatype;
-	bool en_broadcast;
-	bool is_master;
-	bool use_lpm;
-};
-
-/**
- * struct dsi_ctrl_cmd_dma_fifo_info - command payload tp be sent using FIFO
- * @command:        VA for command buffer.
- * @size:           Size of the command buffer.
- * @en_broadcast:   Enable broadcast mode if set to true.
- * @is_master:      Is master in broadcast mode.
- * @use_lpm:        Use low power mode for command transmission.
- */
-struct dsi_ctrl_cmd_dma_fifo_info {
-	u32 *command;
-	u32 size;
-	bool en_broadcast;
-	bool is_master;
-	bool use_lpm;
-};
-
-struct dsi_ctrl_hw;
-
-struct ctrl_ulps_config_ops {
-	/**
-	 * ulps_request() - request ulps entry for specified lanes
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
-	 *                 to enter ULPS.
-	 *
-	 * Caller should check if lanes are in ULPS mode by calling
-	 * get_lanes_in_ulps() operation.
-	 */
-	void (*ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
-
-	/**
-	 * ulps_exit() - exit ULPS on specified lanes
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
-	 *                 to exit ULPS.
-	 *
-	 * Caller should check if lanes are in active mode by calling
-	 * get_lanes_in_ulps() operation.
-	 */
-	void (*ulps_exit)(struct dsi_ctrl_hw *ctrl, u32 lanes);
-
-	/**
-	 * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
-	 * @ctrl:          Pointer to the controller host hardware.
-	 *
-	 * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
-	 * state. If 0 is returned, all the lanes are active.
-	 *
-	 * Return: List of lanes in ULPS state.
-	 */
-	u32 (*get_lanes_in_ulps)(struct dsi_ctrl_hw *ctrl);
-};
-
-/**
- * struct dsi_ctrl_hw_ops - operations supported by dsi host hardware
- */
-struct dsi_ctrl_hw_ops {
-
-	/**
-	 * host_setup() - Setup DSI host configuration
-	 * @ctrl:          Pointer to controller host hardware.
-	 * @config:        Configuration for DSI host controller
-	 */
-	void (*host_setup)(struct dsi_ctrl_hw *ctrl,
-			   struct dsi_host_common_cfg *config);
-
-	/**
-	 * video_engine_en() - enable DSI video engine
-	 * @ctrl:          Pointer to controller host hardware.
-	 * @on:            Enable/disabel video engine.
-	 */
-	void (*video_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
-
-	/**
-	 * setup_avr() - set the AVR_SUPPORT_ENABLE bit in DSI_VIDEO_MODE_CTRL
-	 * @ctrl:	   Pointer to controller host hardware.
-	 * @enable:	   Controls whether this bit is set or cleared
-	 */
-	void (*setup_avr)(struct dsi_ctrl_hw *ctrl, bool enable);
-
-	/**
-	 * video_engine_setup() - Setup dsi host controller for video mode
-	 * @ctrl:          Pointer to controller host hardware.
-	 * @common_cfg:    Common configuration parameters.
-	 * @cfg:           Video mode configuration.
-	 *
-	 * Set up DSI video engine with a specific configuration. Controller and
-	 * video engine are not enabled as part of this function.
-	 */
-	void (*video_engine_setup)(struct dsi_ctrl_hw *ctrl,
-				   struct dsi_host_common_cfg *common_cfg,
-				   struct dsi_video_engine_cfg *cfg);
-
-	/**
-	 * set_video_timing() - set up the timing for video frame
-	 * @ctrl:          Pointer to controller host hardware.
-	 * @mode:          Video mode information.
-	 *
-	 * Set up the video timing parameters for the DSI video mode operation.
-	 */
-	void (*set_video_timing)(struct dsi_ctrl_hw *ctrl,
-				 struct dsi_mode_info *mode);
-
-	/**
-	 * cmd_engine_setup() - setup dsi host controller for command mode
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @common_cfg:    Common configuration parameters.
-	 * @cfg:           Command mode configuration.
-	 *
-	 * Setup DSI CMD engine with a specific configuration. Controller and
-	 * command engine are not enabled as part of this function.
-	 */
-	void (*cmd_engine_setup)(struct dsi_ctrl_hw *ctrl,
-				 struct dsi_host_common_cfg *common_cfg,
-				 struct dsi_cmd_engine_cfg *cfg);
-
-	/**
-	 * setup_cmd_stream() - set up parameters for command pixel streams
-	 * @ctrl:              Pointer to controller host hardware.
-	 * @mode:              Pointer to mode information.
-	 * @h_stride:          Horizontal stride in bytes.
-	 * @vc_id:             stream_id.
-	 *
-	 * Setup parameters for command mode pixel stream size.
-	 */
-	void (*setup_cmd_stream)(struct dsi_ctrl_hw *ctrl,
-				 struct dsi_mode_info *mode,
-				 u32 h_stride,
-				 u32 vc_id,
-				 struct dsi_rect *roi);
-
-	/**
-	 * ctrl_en() - enable DSI controller engine
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @on:            turn on/off the DSI controller engine.
-	 */
-	void (*ctrl_en)(struct dsi_ctrl_hw *ctrl, bool on);
-
-	/**
-	 * cmd_engine_en() - enable DSI controller command engine
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @on:            Turn on/off the DSI command engine.
-	 */
-	void (*cmd_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
-
-	/**
-	 * phy_sw_reset() - perform a soft reset on the PHY.
-	 * @ctrl:        Pointer to the controller host hardware.
-	 */
-	void (*phy_sw_reset)(struct dsi_ctrl_hw *ctrl);
-
-	/**
-	 * config_clk_gating() - enable/disable DSI PHY clk gating
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @enable:        enable/disable DSI PHY clock gating.
-	 * @clk_selection:        clock to enable/disable clock gating.
-	 */
-	void (*config_clk_gating)(struct dsi_ctrl_hw *ctrl, bool enable,
-			enum dsi_clk_gate_type clk_selection);
-
-	/**
-	 * debug_bus() - get dsi debug bus status.
-	 * @ctrl:        Pointer to the controller host hardware.
-	 * @entries:     Array of dsi debug bus control values.
-	 * @size:        Size of dsi debug bus control array.
-	 */
-	void (*debug_bus)(struct dsi_ctrl_hw *ctrl, u32 *entries, u32 size);
-
-	/**
-	 * soft_reset() - perform a soft reset on DSI controller
-	 * @ctrl:          Pointer to the controller host hardware.
-	 *
-	 * The video, command and controller engines will be disabled before the
-	 * reset is triggered. After, the engines will be re-enabled to the same
-	 * state as before the reset.
-	 *
-	 * If the reset is done while MDP timing engine is turned on, the video
-	 * engine should be re-enabled only during the vertical blanking time.
-	 */
-	void (*soft_reset)(struct dsi_ctrl_hw *ctrl);
-
-	/**
-	 * setup_lane_map() - setup mapping between logical and physical lanes
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @lane_map:      Structure defining the mapping between DSI logical
-	 *                 lanes and physical lanes.
-	 */
-	void (*setup_lane_map)(struct dsi_ctrl_hw *ctrl,
-			       struct dsi_lane_map *lane_map);
-
-	/**
-	 * kickoff_command() - transmits commands stored in memory
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @cmd:           Command information.
-	 * @flags:         Modifiers for command transmission.
-	 *
-	 * The controller hardware is programmed with address and size of the
-	 * command buffer. The transmission is kicked off if
-	 * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
-	 * set, caller should make a separate call to trigger_command_dma() to
-	 * transmit the command.
-	 */
-	void (*kickoff_command)(struct dsi_ctrl_hw *ctrl,
-				struct dsi_ctrl_cmd_dma_info *cmd,
-				u32 flags);
-
-	/**
-	 * kickoff_command_non_embedded_mode() - cmd in non embedded mode
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @cmd:           Command information.
-	 * @flags:         Modifiers for command transmission.
-	 *
-	 * If command length is greater than DMA FIFO size of 256 bytes we use
-	 * this non- embedded mode.
-	 * The controller hardware is programmed with address and size of the
-	 * command buffer. The transmission is kicked off if
-	 * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
-	 * set, caller should make a separate call to trigger_command_dma() to
-	 * transmit the command.
-	 */
-
-	void (*kickoff_command_non_embedded_mode)(struct dsi_ctrl_hw *ctrl,
-				struct dsi_ctrl_cmd_dma_info *cmd,
-				u32 flags);
-
-	/**
-	 * kickoff_fifo_command() - transmits a command using FIFO in dsi
-	 *                          hardware.
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @cmd:           Command information.
-	 * @flags:         Modifiers for command transmission.
-	 *
-	 * The controller hardware FIFO is programmed with command header and
-	 * payload. The transmission is kicked off if
-	 * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
-	 * set, caller should make a separate call to trigger_command_dma() to
-	 * transmit the command.
-	 */
-	void (*kickoff_fifo_command)(struct dsi_ctrl_hw *ctrl,
-				     struct dsi_ctrl_cmd_dma_fifo_info *cmd,
-				     u32 flags);
-
-	void (*reset_cmd_fifo)(struct dsi_ctrl_hw *ctrl);
-	/**
-	 * trigger_command_dma() - trigger transmission of command buffer.
-	 * @ctrl:          Pointer to the controller host hardware.
-	 *
-	 * This trigger can be only used if there was a prior call to
-	 * kickoff_command() of kickoff_fifo_command() with
-	 * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
-	 */
-	void (*trigger_command_dma)(struct dsi_ctrl_hw *ctrl);
-
-	/**
-	 * get_cmd_read_data() - get data read from the peripheral
-	 * @ctrl:           Pointer to the controller host hardware.
-	 * @rd_buf:         Buffer where data will be read into.
-	 * @read_offset:    Offset from where to read.
-	 * @rx_byte:        Number of bytes to be read.
-	 * @pkt_size:        Size of response expected.
-	 * @hw_read_cnt:    Actual number of bytes read by HW.
-	 */
-	u32 (*get_cmd_read_data)(struct dsi_ctrl_hw *ctrl,
-				 u8 *rd_buf,
-				 u32 read_offset,
-				 u32 rx_byte,
-				 u32 pkt_size,
-				 u32 *hw_read_cnt);
-
-	/**
-	 * get_cont_splash_status() - get continuous splash status
-	 * @ctrl:           Pointer to the controller host hardware.
-	 */
-	bool (*get_cont_splash_status)(struct dsi_ctrl_hw *ctrl);
-
-	/**
-	 * wait_for_lane_idle() - wait for DSI lanes to go to idle state
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
-	 *                 to be checked to be in idle state.
-	 */
-	int (*wait_for_lane_idle)(struct dsi_ctrl_hw *ctrl, u32 lanes);
-
-	struct ctrl_ulps_config_ops ulps_ops;
-
-	/**
-	 * clamp_enable() - enable DSI clamps
-	 * @ctrl:         Pointer to the controller host hardware.
-	 * @lanes:        ORed list of lanes which need to have clamps released.
-	 * @enable_ulps: ulps state.
-	 */
-
-	/**
-	 * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
-	 * @ctrl:         Pointer to the controller host hardware.
-	 * @lanes:        ORed list of lanes which need to have clamps released.
-	 * @enable_ulps: TODO:??
-	 */
-	void (*clamp_enable)(struct dsi_ctrl_hw *ctrl,
-			     u32 lanes,
-			     bool enable_ulps);
-
-	/**
-	 * clamp_disable() - disable DSI clamps
-	 * @ctrl:         Pointer to the controller host hardware.
-	 * @lanes:        ORed list of lanes which need to have clamps released.
-	 * @disable_ulps: ulps state.
-	 */
-	void (*clamp_disable)(struct dsi_ctrl_hw *ctrl,
-			      u32 lanes,
-			      bool disable_ulps);
-
-	/**
-	 * phy_reset_config() - Disable/enable propagation of  reset signal
-	 *	from ahb domain to DSI PHY
-	 * @ctrl:         Pointer to the controller host hardware.
-	 * @enable:	True to mask the reset signal, false to unmask
-	 */
-	void (*phy_reset_config)(struct dsi_ctrl_hw *ctrl,
-			     bool enable);
-
-	/**
-	 * get_interrupt_status() - returns the interrupt status
-	 * @ctrl:          Pointer to the controller host hardware.
-	 *
-	 * Returns the ORed list of interrupts(enum dsi_status_int_type) that
-	 * are active. This list does not include any error interrupts. Caller
-	 * should call get_error_status for error interrupts.
-	 *
-	 * Return: List of active interrupts.
-	 */
-	u32 (*get_interrupt_status)(struct dsi_ctrl_hw *ctrl);
-
-	/**
-	 * clear_interrupt_status() - clears the specified interrupts
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @ints:          List of interrupts to be cleared.
-	 */
-	void (*clear_interrupt_status)(struct dsi_ctrl_hw *ctrl, u32 ints);
-
-	/**
-	 * enable_status_interrupts() - enable the specified interrupts
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @ints:          List of interrupts to be enabled.
-	 *
-	 * Enables the specified interrupts. This list will override the
-	 * previous interrupts enabled through this function. Caller has to
-	 * maintain the state of the interrupts enabled. To disable all
-	 * interrupts, set ints to 0.
-	 */
-	void (*enable_status_interrupts)(struct dsi_ctrl_hw *ctrl, u32 ints);
-
-	/**
-	 * get_error_status() - returns the error status
-	 * @ctrl:          Pointer to the controller host hardware.
-	 *
-	 * Returns the ORed list of errors(enum dsi_error_int_type) that are
-	 * active. This list does not include any status interrupts. Caller
-	 * should call get_interrupt_status for status interrupts.
-	 *
-	 * Return: List of active error interrupts.
-	 */
-	u64 (*get_error_status)(struct dsi_ctrl_hw *ctrl);
-
-	/**
-	 * clear_error_status() - clears the specified errors
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @errors:          List of errors to be cleared.
-	 */
-	void (*clear_error_status)(struct dsi_ctrl_hw *ctrl, u64 errors);
-
-	/**
-	 * enable_error_interrupts() - enable the specified interrupts
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @errors:        List of errors to be enabled.
-	 *
-	 * Enables the specified interrupts. This list will override the
-	 * previous interrupts enabled through this function. Caller has to
-	 * maintain the state of the interrupts enabled. To disable all
-	 * interrupts, set errors to 0.
-	 */
-	void (*enable_error_interrupts)(struct dsi_ctrl_hw *ctrl, u64 errors);
-
-	/**
-	 * video_test_pattern_setup() - setup test pattern engine for video mode
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @type:          Type of test pattern.
-	 * @init_val:      Initial value to use for generating test pattern.
-	 */
-	void (*video_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
-					 enum dsi_test_pattern type,
-					 u32 init_val);
-
-	/**
-	 * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @type:          Type of test pattern.
-	 * @init_val:      Initial value to use for generating test pattern.
-	 * @stream_id:     Stream Id on which packets are generated.
-	 */
-	void (*cmd_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
-				       enum dsi_test_pattern  type,
-				       u32 init_val,
-				       u32 stream_id);
-
-	/**
-	 * test_pattern_enable() - enable test pattern engine
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @enable:        Enable/Disable test pattern engine.
-	 */
-	void (*test_pattern_enable)(struct dsi_ctrl_hw *ctrl, bool enable);
-
-	/**
-	 * clear_phy0_ln_err() - clear DSI PHY lane-0 errors
-	 * @ctrl:          Pointer to the controller host hardware.
-	 */
-	void (*clear_phy0_ln_err)(struct dsi_ctrl_hw *ctrl);
-
-	/**
-	 * trigger_cmd_test_pattern() - trigger a command mode frame update with
-	 *                              test pattern
-	 * @ctrl:          Pointer to the controller host hardware.
-	 * @stream_id:     Stream on which frame update is sent.
-	 */
-	void (*trigger_cmd_test_pattern)(struct dsi_ctrl_hw *ctrl,
-					 u32 stream_id);
-
-	ssize_t (*reg_dump_to_buffer)(struct dsi_ctrl_hw *ctrl,
-				      char *buf,
-				      u32 size);
-
-	/**
-	 * setup_misr() - Setup frame MISR
-	 * @ctrl:         Pointer to the controller host hardware.
-	 * @panel_mode:   CMD or VIDEO mode indicator
-	 * @enable:       Enable/disable MISR.
-	 * @frame_count:  Number of frames to accumulate MISR.
-	 */
-	void (*setup_misr)(struct dsi_ctrl_hw *ctrl,
-			   enum dsi_op_mode panel_mode,
-			   bool enable, u32 frame_count);
-
-	/**
-	 * collect_misr() - Read frame MISR
-	 * @ctrl:         Pointer to the controller host hardware.
-	 * @panel_mode:   CMD or VIDEO mode indicator
-	 */
-	u32 (*collect_misr)(struct dsi_ctrl_hw *ctrl,
-			    enum dsi_op_mode panel_mode);
-
-	/**
-	 * set_timing_db() - enable/disable Timing DB register
-	 * @ctrl:          Pointer to controller host hardware.
-	 * @enable:        Enable/Disable flag.
-	 *
-	 * Enable or Disabe the Timing DB register.
-	 */
-	void (*set_timing_db)(struct dsi_ctrl_hw *ctrl,
-				 bool enable);
-	/**
-	 * clear_rdbk_register() - Clear and reset read back register
-	 * @ctrl:         Pointer to the controller host hardware.
-	 */
-	void (*clear_rdbk_register)(struct dsi_ctrl_hw *ctrl);
-
-	/** schedule_dma_cmd() - Schdeule DMA command transfer on a
-	 *                       particular blanking line.
-	 * @ctrl:         Pointer to the controller host hardware.
-	 * @line_no:      Blanking line number on whihch DMA command
-	 *                needs to be sent.
-	 */
-	void (*schedule_dma_cmd)(struct dsi_ctrl_hw *ctrl, int line_no);
-
-	/**
-	 * ctrl_reset() - Reset DSI lanes to recover from DSI errors
-	 * @ctrl:         Pointer to the controller host hardware.
-	 * @mask:         Indicates the error type.
-	 */
-	int (*ctrl_reset)(struct dsi_ctrl_hw *ctrl, int mask);
-
-	/**
-	 * mask_error_int() - Mask/Unmask particular DSI error interrupts
-	 * @ctrl:         Pointer to the controller host hardware.
-	 * @idx:	  Indicates the errors to be masked.
-	 * @en:		  Bool for mask or unmask of the error
-	 */
-	void (*mask_error_intr)(struct dsi_ctrl_hw *ctrl, u32 idx, bool en);
-
-	/**
-	 * error_intr_ctrl() - Mask/Unmask master DSI error interrupt
-	 * @ctrl:         Pointer to the controller host hardware.
-	 * @en:		  Bool for mask or unmask of DSI error
-	 */
-	void (*error_intr_ctrl)(struct dsi_ctrl_hw *ctrl, bool en);
-
-	/**
-	 * get_error_mask() - get DSI error interrupt mask status
-	 * @ctrl:         Pointer to the controller host hardware.
-	 */
-	u32 (*get_error_mask)(struct dsi_ctrl_hw *ctrl);
-
-	/**
-	 * get_hw_version() - get DSI controller hw version
-	 * @ctrl:         Pointer to the controller host hardware.
-	 */
-	u32 (*get_hw_version)(struct dsi_ctrl_hw *ctrl);
-
-	/**
-	 * wait_for_cmd_mode_mdp_idle() - wait for command mode engine not to
-	 *                           be busy sending data from display engine
-	 * @ctrl:         Pointer to the controller host hardware.
-	 */
-	int (*wait_for_cmd_mode_mdp_idle)(struct dsi_ctrl_hw *ctrl);
-
-	/**
-	 * hw.ops.set_continuous_clk() - Set continuous clock
-	 * @ctrl:         Pointer to the controller host hardware.
-	 * @enable:	  Bool to control continuous clock request.
-	 */
-	void (*set_continuous_clk)(struct dsi_ctrl_hw *ctrl, bool enable);
-};
-
-/*
- * struct dsi_ctrl_hw - DSI controller hardware object specific to an instance
- * @base:                   VA for the DSI controller base address.
- * @length:                 Length of the DSI controller register map.
- * @mmss_misc_base:         Base address of mmss_misc register map.
- * @mmss_misc_length:       Length of mmss_misc register map.
- * @disp_cc_base:           Base address of disp_cc register map.
- * @disp_cc_length:         Length of disp_cc register map.
- * @index:                  Instance ID of the controller.
- * @feature_map:            Features supported by the DSI controller.
- * @ops:                    Function pointers to the operations supported by the
- *                          controller.
- * @supported_interrupts:   Number of supported interrupts.
- * @supported_errors:       Number of supported errors.
- * @phy_isolation_enabled:    A boolean property allows to isolate the phy from
- *                          dsi controller and run only dsi controller.
- * @null_insertion_enabled:  A boolean property to allow dsi controller to
- *                           insert null packet.
- */
-struct dsi_ctrl_hw {
-	void __iomem *base;
-	u32 length;
-	void __iomem *mmss_misc_base;
-	u32 mmss_misc_length;
-	void __iomem *disp_cc_base;
-	u32 disp_cc_length;
-	u32 index;
-
-	/* features */
-	DECLARE_BITMAP(feature_map, DSI_CTRL_MAX_FEATURES);
-	struct dsi_ctrl_hw_ops ops;
-
-	/* capabilities */
-	u32 supported_interrupts;
-	u64 supported_errors;
-
-	bool phy_isolation_enabled;
-	bool null_insertion_enabled;
-};
-
-#endif /* _DSI_CTRL_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
deleted file mode 100644
index 7420b9b..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
+++ /dev/null
@@ -1,479 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "dsi-hw:" fmt
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-
-#include "dsi_ctrl_hw.h"
-#include "dsi_ctrl_reg.h"
-#include "dsi_hw.h"
-
-#define MMSS_MISC_CLAMP_REG_OFF           0x0014
-
-/**
- * dsi_ctrl_hw_14_setup_lane_map() - setup mapping between
- *	logical and physical lanes
- * @ctrl:          Pointer to the controller host hardware.
- * @lane_map:      Structure defining the mapping between DSI logical
- *                 lanes and physical lanes.
- */
-void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
-			       struct dsi_lane_map *lane_map)
-{
-	DSI_W32(ctrl, DSI_LANE_SWAP_CTRL, lane_map->lane_map_v1);
-
-	pr_debug("[DSI_%d] Lane swap setup complete\n", ctrl->index);
-}
-
-/**
- * dsi_ctrl_hw_14_wait_for_lane_idle()
- * This function waits for all the active DSI lanes to be idle by polling all
- * the FIFO_EMPTY bits and polling he lane status to ensure that all the lanes
- * are in stop state. This function assumes that the bus clocks required to
- * access the registers are already turned on.
- *
- * @ctrl:      Pointer to the controller host hardware.
- * @lanes:     ORed list of lanes (enum dsi_data_lanes) which need
- *             to be stopped.
- *
- * return: Error code.
- */
-int dsi_ctrl_hw_14_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes)
-{
-	int rc = 0, val = 0;
-	u32 stop_state_mask = 0, fifo_empty_mask = 0;
-	u32 const sleep_us = 10;
-	u32 const timeout_us = 100;
-
-	if (lanes & DSI_DATA_LANE_0) {
-		stop_state_mask |= BIT(0);
-		fifo_empty_mask |= (BIT(12) | BIT(16));
-	}
-	if (lanes & DSI_DATA_LANE_1) {
-		stop_state_mask |= BIT(1);
-			fifo_empty_mask |= BIT(20);
-	}
-	if (lanes & DSI_DATA_LANE_2) {
-		stop_state_mask |= BIT(2);
-		fifo_empty_mask |= BIT(24);
-	}
-	if (lanes & DSI_DATA_LANE_3) {
-		stop_state_mask |= BIT(3);
-		fifo_empty_mask |= BIT(28);
-	}
-
-	pr_debug("%s: polling for fifo empty, mask=0x%08x\n", __func__,
-		fifo_empty_mask);
-	rc = readl_poll_timeout(ctrl->base + DSI_FIFO_STATUS, val,
-			(val & fifo_empty_mask), sleep_us, timeout_us);
-	if (rc) {
-		pr_err("%s: fifo not empty, FIFO_STATUS=0x%08x\n",
-				__func__, val);
-		goto error;
-	}
-
-	pr_debug("%s: polling for lanes to be in stop state, mask=0x%08x\n",
-		__func__, stop_state_mask);
-	rc = readl_poll_timeout(ctrl->base + DSI_LANE_STATUS, val,
-			(val & stop_state_mask), sleep_us, timeout_us);
-	if (rc) {
-		pr_err("%s: lanes not in stop state, LANE_STATUS=0x%08x\n",
-			__func__, val);
-		goto error;
-	}
-
-error:
-	return rc;
-
-}
-
-/**
- * ulps_request() - request ulps entry for specified lanes
- * @ctrl:          Pointer to the controller host hardware.
- * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
- *                 to enter ULPS.
- *
- * Caller should check if lanes are in ULPS mode by calling
- * get_lanes_in_ulps() operation.
- */
-void dsi_ctrl_hw_cmn_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
-{
-	u32 reg = 0;
-
-	reg = DSI_R32(ctrl, DSI_LANE_CTRL);
-
-	if (lanes & DSI_CLOCK_LANE)
-		reg |= BIT(4);
-	if (lanes & DSI_DATA_LANE_0)
-		reg |= BIT(0);
-	if (lanes & DSI_DATA_LANE_1)
-		reg |= BIT(1);
-	if (lanes & DSI_DATA_LANE_2)
-		reg |= BIT(2);
-	if (lanes & DSI_DATA_LANE_3)
-		reg |= BIT(3);
-
-	/*
-	 * ULPS entry request. Wait for short time to make sure
-	 * that the lanes enter ULPS. Recommended as per HPG.
-	 */
-	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
-	usleep_range(100, 110);
-
-	pr_debug("[DSI_%d] ULPS requested for lanes 0x%x\n", ctrl->index,
-		 lanes);
-}
-
-/**
- * ulps_exit() - exit ULPS on specified lanes
- * @ctrl:          Pointer to the controller host hardware.
- * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
- *                 to exit ULPS.
- *
- * Caller should check if lanes are in active mode by calling
- * get_lanes_in_ulps() operation.
- */
-void dsi_ctrl_hw_cmn_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes)
-{
-	u32 reg = 0;
-	u32 prev_reg = 0;
-
-	prev_reg = DSI_R32(ctrl, DSI_LANE_CTRL);
-	prev_reg &= BIT(24);
-
-	if (lanes & DSI_CLOCK_LANE)
-		reg |= BIT(12);
-	if (lanes & DSI_DATA_LANE_0)
-		reg |= BIT(8);
-	if (lanes & DSI_DATA_LANE_1)
-		reg |= BIT(9);
-	if (lanes & DSI_DATA_LANE_2)
-		reg |= BIT(10);
-	if (lanes & DSI_DATA_LANE_3)
-		reg |= BIT(11);
-
-	/*
-	 * ULPS Exit Request
-	 * Hardware requirement is to wait for at least 1ms
-	 */
-	DSI_W32(ctrl, DSI_LANE_CTRL, reg | prev_reg);
-	usleep_range(1000, 1010);
-	/*
-	 * Sometimes when exiting ULPS, it is possible that some DSI
-	 * lanes are not in the stop state which could lead to DSI
-	 * commands not going through. To avoid this, force the lanes
-	 * to be in stop state.
-	 */
-	DSI_W32(ctrl, DSI_LANE_CTRL, (reg << 8) | prev_reg);
-	wmb(); /* ensure lanes are put to stop state */
-	DSI_W32(ctrl, DSI_LANE_CTRL, 0x0 | prev_reg);
-	wmb(); /* ensure lanes are put to stop state */
-
-	pr_debug("[DSI_%d] ULPS exit request for lanes=0x%x\n",
-		 ctrl->index, lanes);
-}
-
-/**
- * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
- * @ctrl:          Pointer to the controller host hardware.
- *
- * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
- * state. If 0 is returned, all the lanes are active.
- *
- * Return: List of lanes in ULPS state.
- */
-u32 dsi_ctrl_hw_cmn_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl)
-{
-	u32 reg = 0;
-	u32 lanes = 0;
-
-	reg = DSI_R32(ctrl, DSI_LANE_STATUS);
-	if (!(reg & BIT(8)))
-		lanes |= DSI_DATA_LANE_0;
-	if (!(reg & BIT(9)))
-		lanes |= DSI_DATA_LANE_1;
-	if (!(reg & BIT(10)))
-		lanes |= DSI_DATA_LANE_2;
-	if (!(reg & BIT(11)))
-		lanes |= DSI_DATA_LANE_3;
-	if (!(reg & BIT(12)))
-		lanes |= DSI_CLOCK_LANE;
-
-	pr_debug("[DSI_%d] lanes in ulps = 0x%x\n", ctrl->index, lanes);
-	return lanes;
-}
-
-/**
- * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
- * @ctrl:          Pointer to the controller host hardware.
- * @lanes:         ORed list of lanes which need to be clamped.
- * @enable_ulps:   Boolean to specify if ULPS is enabled in DSI controller
- */
-void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
-				 u32 lanes,
-				 bool enable_ulps)
-{
-	u32 clamp_reg = 0;
-	u32 bit_shift = 0;
-	u32 reg = 0;
-
-	if (ctrl->index == 1)
-		bit_shift = 16;
-
-	if (lanes & DSI_CLOCK_LANE) {
-		clamp_reg |= BIT(9);
-		if (enable_ulps)
-			clamp_reg |= BIT(8);
-	}
-
-	if (lanes & DSI_DATA_LANE_0) {
-		clamp_reg |= BIT(7);
-		if (enable_ulps)
-			clamp_reg |= BIT(6);
-	}
-
-	if (lanes & DSI_DATA_LANE_1) {
-		clamp_reg |= BIT(5);
-		if (enable_ulps)
-			clamp_reg |= BIT(4);
-	}
-
-	if (lanes & DSI_DATA_LANE_2) {
-		clamp_reg |= BIT(3);
-		if (enable_ulps)
-			clamp_reg |= BIT(2);
-	}
-
-	if (lanes & DSI_DATA_LANE_3) {
-		clamp_reg |= BIT(1);
-		if (enable_ulps)
-			clamp_reg |= BIT(0);
-	}
-
-	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
-	reg |= (clamp_reg << bit_shift);
-	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
-
-	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
-	reg |= (BIT(15) << bit_shift);	/* Enable clamp */
-	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
-
-	pr_debug("[DSI_%d] Clamps enabled for lanes=0x%x\n", ctrl->index,
-		 lanes);
-}
-
-/**
- * clamp_disable() - disable DSI clamps
- * @ctrl:          Pointer to the controller host hardware.
- * @lanes:         ORed list of lanes which need to have clamps released.
- * @disable_ulps:   Boolean to specify if ULPS is enabled in DSI controller
- */
-void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
-				  u32 lanes,
-				  bool disable_ulps)
-{
-	u32 clamp_reg = 0;
-	u32 bit_shift = 0;
-	u32 reg = 0;
-
-	if (ctrl->index == 1)
-		bit_shift = 16;
-
-	if (lanes & DSI_CLOCK_LANE) {
-		clamp_reg |= BIT(9);
-		if (disable_ulps)
-			clamp_reg |= BIT(8);
-	}
-
-	if (lanes & DSI_DATA_LANE_0) {
-		clamp_reg |= BIT(7);
-		if (disable_ulps)
-			clamp_reg |= BIT(6);
-	}
-
-	if (lanes & DSI_DATA_LANE_1) {
-		clamp_reg |= BIT(5);
-		if (disable_ulps)
-			clamp_reg |= BIT(4);
-	}
-
-	if (lanes & DSI_DATA_LANE_2) {
-		clamp_reg |= BIT(3);
-		if (disable_ulps)
-			clamp_reg |= BIT(2);
-	}
-
-	if (lanes & DSI_DATA_LANE_3) {
-		clamp_reg |= BIT(1);
-		if (disable_ulps)
-			clamp_reg |= BIT(0);
-	}
-
-	clamp_reg |= BIT(15); /* Enable clamp */
-	clamp_reg <<= bit_shift;
-
-	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
-	reg &= ~(clamp_reg);
-	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
-
-	pr_debug("[DSI_%d] Disable clamps for lanes=%d\n", ctrl->index, lanes);
-}
-
-#define DUMP_REG_VALUE(off) "\t%-30s: 0x%08x\n", #off, DSI_R32(ctrl, off)
-ssize_t dsi_ctrl_hw_14_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
-					  char *buf,
-					  u32 size)
-{
-	u32 len = 0;
-
-	len += snprintf((buf + len), (size - len), "CONFIGURATION REGS:\n");
-
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_HW_VERSION));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_FIFO_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_SYNC_DATATYPE));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_PIXEL_DATATYPE));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_BLANKING_DATATYPE));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_DATA_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_H));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_V));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_TOTAL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_HSYNC));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC_VPOS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_DMA_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DMA_CMD_OFFSET));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DMA_CMD_LENGTH));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DMA_FIFO_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DMA_NULL_PACKET_DATA));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_TOTAL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_TOTAL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_ACK_ERR_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATA0));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATA1));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATA2));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATA3));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATATYPE0));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATATYPE1));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_TRIG_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_EXT_MUX));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_EXT_MUX_TE_PULSE_DETECT_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CMD_MODE_DMA_SW_TRIGGER));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CMD_MODE_MDP_SW_TRIGGER));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CMD_MODE_BTA_SW_TRIGGER));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RESET_SW_TRIGGER));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_LANE_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_LANE_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_LANE_SWAP_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DLN0_PHY_ERR));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_LP_TIMER_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_HS_TIMER_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_TIMEOUT_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CLKOUT_TIMING_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_EOT_PACKET));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_EOT_PACKET_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_GENERIC_ESC_TX_TRIGGER));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_ERR_INT_MASK0));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_INT_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_SOFT_RESET));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CLK_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CLK_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_PHY_SW_RESET));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_AXI2AHB_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL2));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_TOTAL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VBIF_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_AES_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATA_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_WRITE_TRIGGER));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DSI_TIMING_FLUSH));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DSI_TIMING_DB_MODE));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_RESET));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VERSION));
-
-	pr_err("LLENGTH = %d\n", len);
-	return len;
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c
deleted file mode 100644
index 7de0bad..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c
+++ /dev/null
@@ -1,225 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "dsi-hw:" fmt
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-
-#include "dsi_ctrl_hw.h"
-#include "dsi_ctrl_reg.h"
-#include "dsi_hw.h"
-
-void dsi_ctrl_hw_20_setup_lane_map(struct dsi_ctrl_hw *ctrl,
-		       struct dsi_lane_map *lane_map)
-{
-	u32 reg_value = lane_map->lane_map_v2[DSI_LOGICAL_LANE_0] |
-			(lane_map->lane_map_v2[DSI_LOGICAL_LANE_1] << 4) |
-			(lane_map->lane_map_v2[DSI_LOGICAL_LANE_2] << 8) |
-			(lane_map->lane_map_v2[DSI_LOGICAL_LANE_3] << 12);
-
-	DSI_W32(ctrl, DSI_LANE_SWAP_CTRL, reg_value);
-
-	pr_debug("[DSI_%d] Lane swap setup complete\n", ctrl->index);
-}
-
-int dsi_ctrl_hw_20_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl,
-		u32 lanes)
-{
-	int rc = 0, val = 0;
-	u32 fifo_empty_mask = 0;
-	u32 const sleep_us = 10;
-	u32 const timeout_us = 100;
-
-	if (lanes & DSI_DATA_LANE_0)
-		fifo_empty_mask |= (BIT(12) | BIT(16));
-
-	if (lanes & DSI_DATA_LANE_1)
-		fifo_empty_mask |= BIT(20);
-
-	if (lanes & DSI_DATA_LANE_2)
-		fifo_empty_mask |= BIT(24);
-
-	if (lanes & DSI_DATA_LANE_3)
-		fifo_empty_mask |= BIT(28);
-
-	pr_debug("%s: polling for fifo empty, mask=0x%08x\n", __func__,
-		fifo_empty_mask);
-	rc = readl_poll_timeout(ctrl->base + DSI_FIFO_STATUS, val,
-			(val & fifo_empty_mask), sleep_us, timeout_us);
-	if (rc) {
-		pr_err("%s: fifo not empty, FIFO_STATUS=0x%08x\n",
-				__func__, val);
-		goto error;
-	}
-
-error:
-	return rc;
-}
-
-#define DUMP_REG_VALUE(off) "\t%-30s: 0x%08x\n", #off, DSI_R32(ctrl, off)
-ssize_t dsi_ctrl_hw_20_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
-					  char *buf,
-					  u32 size)
-{
-	u32 len = 0;
-
-	len += snprintf((buf + len), (size - len), "CONFIGURATION REGS:\n");
-
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_HW_VERSION));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_FIFO_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_SYNC_DATATYPE));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_PIXEL_DATATYPE));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_BLANKING_DATATYPE));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_DATA_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_H));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_V));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_TOTAL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_HSYNC));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC_VPOS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_DMA_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DMA_CMD_OFFSET));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DMA_CMD_LENGTH));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DMA_FIFO_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DMA_NULL_PACKET_DATA));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_TOTAL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_TOTAL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_ACK_ERR_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATA0));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATA1));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATA2));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATA3));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATATYPE0));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATATYPE1));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_TRIG_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_EXT_MUX));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_EXT_MUX_TE_PULSE_DETECT_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CMD_MODE_DMA_SW_TRIGGER));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CMD_MODE_MDP_SW_TRIGGER));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CMD_MODE_BTA_SW_TRIGGER));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RESET_SW_TRIGGER));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_MISR_CMD_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_MISR_VIDEO_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_LANE_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_LANE_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_LANE_SWAP_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DLN0_PHY_ERR));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_LP_TIMER_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_HS_TIMER_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_TIMEOUT_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CLKOUT_TIMING_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_EOT_PACKET));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_EOT_PACKET_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_GENERIC_ESC_TX_TRIGGER));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_ERR_INT_MASK0));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_INT_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_SOFT_RESET));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CLK_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_CLK_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_PHY_SW_RESET));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_AXI2AHB_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_MISR_CMD_MDP0_32BIT));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_MISR_CMD_MDP1_32BIT));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_MISR_VIDEO_32BIT));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL2));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_TOTAL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VBIF_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_AES_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_RDBK_DATA_CTRL));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_STATUS));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_WRITE_TRIGGER));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DSI_TIMING_FLUSH));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_DSI_TIMING_DB_MODE));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_RESET));
-	len += snprintf((buf + len), (size - len),
-			DUMP_REG_VALUE(DSI_VERSION));
-
-	pr_err("LLENGTH = %d\n", len);
-	return len;
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
deleted file mode 100644
index 8f6afcc..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "dsi-hw:" fmt
-
-#include "dsi_ctrl_hw.h"
-#include "dsi_ctrl_reg.h"
-#include "dsi_hw.h"
-#include "dsi_catalog.h"
-
-#define DISP_CC_MISC_CMD_REG_OFF 0x00
-
-/* register to configure DMA scheduling */
-#define DSI_DMA_SCHEDULE_CTRL 0x100
-
-/**
- * dsi_ctrl_hw_22_phy_reset_config() - to configure clamp control during ulps
- * @ctrl:          Pointer to the controller host hardware.
- * @enable:      boolean to specify enable/disable.
- */
-void dsi_ctrl_hw_22_phy_reset_config(struct dsi_ctrl_hw *ctrl,
-		bool enable)
-{
-	u32 reg = 0;
-
-	reg = DSI_DISP_CC_R32(ctrl, DISP_CC_MISC_CMD_REG_OFF);
-
-	/* Mask/unmask disable PHY reset bit */
-	if (enable)
-		reg &= ~BIT(ctrl->index);
-	else
-		reg |= BIT(ctrl->index);
-	DSI_DISP_CC_W32(ctrl, DISP_CC_MISC_CMD_REG_OFF, reg);
-}
-
-/**
- * dsi_ctrl_hw_22_schedule_dma_cmd() - to schedule DMA command transfer
- * @ctrl:         Pointer to the controller host hardware.
- * @line_no:      Line number at which command needs to be sent.
- */
-void dsi_ctrl_hw_22_schedule_dma_cmd(struct dsi_ctrl_hw *ctrl, int line_no)
-{
-	u32 reg = 0;
-
-	reg = DSI_R32(ctrl, DSI_DMA_SCHEDULE_CTRL);
-	reg |= BIT(28);
-	reg |= (line_no & 0xffff);
-
-	DSI_W32(ctrl, DSI_DMA_SCHEDULE_CTRL, reg);
-}
-
-/*
- * dsi_ctrl_hw_22_get_cont_splash_status() - to verify whether continuous
- *                                           splash is enabled or not
- * @ctrl:          Pointer to the controller host hardware.
- *
- * Return:         Return Continuous splash status
- */
-bool dsi_ctrl_hw_22_get_cont_splash_status(struct dsi_ctrl_hw *ctrl)
-{
-	u32 reg = 0;
-
-	/**
-	 * DSI scratch register 1 is used to notify whether continuous
-	 * splash is enabled or not by bootloader
-	 */
-	reg = DSI_R32(ctrl, DSI_SCRATCH_REGISTER_1);
-	return reg == 0x1;
-}
-
-/*
- * dsi_ctrl_hw_kickoff_non_embedded_mode()-Kickoff cmd  in non-embedded mode
- * @ctrl:                  - Pointer to the controller host hardware.
- * @dsi_ctrl_cmd_dma_info: - command buffer information.
- * @flags:		   - DSI CTRL Flags.
- */
-void dsi_ctrl_hw_kickoff_non_embedded_mode(struct dsi_ctrl_hw *ctrl,
-				    struct dsi_ctrl_cmd_dma_info *cmd,
-				    u32 flags)
-{
-	u32 reg = 0;
-
-	reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
-
-	reg &= ~BIT(31);/* disable broadcast */
-	reg &= ~BIT(30);
-
-	if (cmd->use_lpm)
-		reg |= BIT(26);
-	else
-		reg &= ~BIT(26);
-
-	/* Select non EMBEDDED_MODE, pick the packet header from register */
-	reg &= ~BIT(28);
-	reg |= BIT(24);/* long packet */
-	reg |= BIT(29);/* wc_sel = 1 */
-	reg |= (((cmd->datatype) & 0x03f) << 16);/* data type */
-	DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
-
-	/* Enable WRITE_WATERMARK_DISABLE and READ_WATERMARK_DISABLE bits */
-	reg = DSI_R32(ctrl, DSI_DMA_FIFO_CTRL);
-	reg |= BIT(20);
-	reg |= BIT(16);
-	reg |= 0x33;/* Set READ and WRITE watermark levels to maximum */
-	DSI_W32(ctrl, DSI_DMA_FIFO_CTRL, reg);
-
-	DSI_W32(ctrl, DSI_DMA_CMD_OFFSET, cmd->offset);
-	DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, ((cmd->length) & 0xFFFFFF));
-
-	/* wait for writes to complete before kick off */
-	wmb();
-
-	if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
-		DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
-}
-
-/*
- * dsi_ctrl_hw_22_config_clk_gating() - enable/disable clk gating on DSI PHY
- * @ctrl:          Pointer to the controller host hardware.
- * @enable:        bool to notify enable/disable.
- * @clk_selection:        clock to enable/disable clock gating.
- *
- */
-void dsi_ctrl_hw_22_config_clk_gating(struct dsi_ctrl_hw *ctrl, bool enable,
-				enum dsi_clk_gate_type clk_selection)
-{
-	u32 reg = 0;
-	u32 enable_select = 0;
-
-	reg = DSI_DISP_CC_R32(ctrl, DISP_CC_MISC_CMD_REG_OFF);
-
-	if (clk_selection & PIXEL_CLK)
-		enable_select |= ctrl->index ? BIT(6) : BIT(5);
-
-	if (clk_selection & BYTE_CLK)
-		enable_select |= ctrl->index ? BIT(8) : BIT(7);
-
-	if (clk_selection & DSI_PHY)
-		enable_select |= ctrl->index ? BIT(10) : BIT(9);
-
-	if (enable)
-		reg |= enable_select;
-	else
-		reg &= ~enable_select;
-
-	DSI_DISP_CC_W32(ctrl, DISP_CC_MISC_CMD_REG_OFF, reg);
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
deleted file mode 100644
index 4821fc4..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ /dev/null
@@ -1,1540 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "dsi-hw:" fmt
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-
-#include "dsi_catalog.h"
-#include "dsi_ctrl_hw.h"
-#include "dsi_ctrl_reg.h"
-#include "dsi_hw.h"
-#include "dsi_panel.h"
-#include "dsi_catalog.h"
-#include "sde_dbg.h"
-
-#define MMSS_MISC_CLAMP_REG_OFF           0x0014
-#define DSI_CTRL_DYNAMIC_FORCE_ON         (0x23F|BIT(8)|BIT(9)|BIT(11)|BIT(21))
-#define DSI_CTRL_CMD_MISR_ENABLE          BIT(28)
-#define DSI_CTRL_VIDEO_MISR_ENABLE        BIT(16)
-
-/* Unsupported formats default to RGB888 */
-static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
-	0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4 };
-static const u8 video_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
-	0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3 };
-
-/**
- * dsi_setup_trigger_controls() - setup dsi trigger configurations
- * @ctrl:             Pointer to the controller host hardware.
- * @cfg:              DSI host configuration that is common to both video and
- *                    command modes.
- */
-static void dsi_setup_trigger_controls(struct dsi_ctrl_hw *ctrl,
-				       struct dsi_host_common_cfg *cfg)
-{
-	u32 reg = 0;
-	const u8 trigger_map[DSI_TRIGGER_MAX] = {
-		0x0, 0x2, 0x1, 0x4, 0x5, 0x6 };
-
-	reg |= (cfg->te_mode == DSI_TE_ON_EXT_PIN) ? BIT(31) : 0;
-	reg |= (trigger_map[cfg->dma_cmd_trigger] & 0x7);
-	reg |= (trigger_map[cfg->mdp_cmd_trigger] & 0x7) << 4;
-	DSI_W32(ctrl, DSI_TRIG_CTRL, reg);
-}
-
-/**
- * dsi_ctrl_hw_cmn_host_setup() - setup dsi host configuration
- * @ctrl:             Pointer to the controller host hardware.
- * @cfg:              DSI host configuration that is common to both video and
- *                    command modes.
- */
-void dsi_ctrl_hw_cmn_host_setup(struct dsi_ctrl_hw *ctrl,
-			       struct dsi_host_common_cfg *cfg)
-{
-	u32 reg_value = 0;
-
-	dsi_setup_trigger_controls(ctrl, cfg);
-
-	/* Setup clocking timing controls */
-	reg_value = ((cfg->t_clk_post & 0x3F) << 8);
-	reg_value |= (cfg->t_clk_pre & 0x3F);
-	DSI_W32(ctrl, DSI_CLKOUT_TIMING_CTRL, reg_value);
-
-	/* EOT packet control */
-	reg_value = cfg->append_tx_eot ? 1 : 0;
-	reg_value |= (cfg->ignore_rx_eot ? (1 << 4) : 0);
-	DSI_W32(ctrl, DSI_EOT_PACKET_CTRL, reg_value);
-
-	/* Turn on dsi clocks */
-	DSI_W32(ctrl, DSI_CLK_CTRL, 0x23F);
-
-	/* Setup DSI control register */
-	reg_value = DSI_R32(ctrl, DSI_CTRL);
-	reg_value |= (cfg->en_crc_check ? BIT(24) : 0);
-	reg_value |= (cfg->en_ecc_check ? BIT(20) : 0);
-	reg_value |= BIT(8); /* Clock lane */
-	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_3) ? BIT(7) : 0);
-	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_2) ? BIT(6) : 0);
-	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_1) ? BIT(5) : 0);
-	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_0) ? BIT(4) : 0);
-
-	DSI_W32(ctrl, DSI_CTRL, reg_value);
-
-	if (ctrl->phy_isolation_enabled)
-		DSI_W32(ctrl, DSI_DEBUG_CTRL, BIT(28));
-	pr_debug("[DSI_%d]Host configuration complete\n", ctrl->index);
-}
-
-/**
- * phy_sw_reset() - perform a soft reset on the PHY.
- * @ctrl:        Pointer to the controller host hardware.
- */
-void dsi_ctrl_hw_cmn_phy_sw_reset(struct dsi_ctrl_hw *ctrl)
-{
-	DSI_W32(ctrl, DSI_PHY_SW_RESET, BIT(24)|BIT(0));
-	wmb(); /* make sure reset is asserted */
-	udelay(1000);
-	DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x0);
-	wmb(); /* ensure reset is cleared before waiting */
-	udelay(100);
-
-	pr_debug("[DSI_%d] phy sw reset done\n", ctrl->index);
-}
-
-/**
- * soft_reset() - perform a soft reset on DSI controller
- * @ctrl:          Pointer to the controller host hardware.
- *
- * The video, command and controller engines will be disabled before the
- * reset is triggered and re-enabled after the reset is complete.
- *
- * If the reset is done while MDP timing engine is turned on, the video
- * enigne should be re-enabled only during the vertical blanking time.
- */
-void dsi_ctrl_hw_cmn_soft_reset(struct dsi_ctrl_hw *ctrl)
-{
-	u32 reg = 0;
-	u32 reg_ctrl = 0;
-
-	/* Clear DSI_EN, VIDEO_MODE_EN, CMD_MODE_EN */
-	reg_ctrl = DSI_R32(ctrl, DSI_CTRL);
-	DSI_W32(ctrl, DSI_CTRL, reg_ctrl & ~0x7);
-	wmb(); /* wait controller to be disabled before reset */
-
-	/* Force enable PCLK, BYTECLK, AHBM_HCLK */
-	reg = DSI_R32(ctrl, DSI_CLK_CTRL);
-	DSI_W32(ctrl, DSI_CLK_CTRL, reg | DSI_CTRL_DYNAMIC_FORCE_ON);
-	wmb(); /* wait for clocks to be enabled */
-
-	/* Trigger soft reset */
-	DSI_W32(ctrl, DSI_SOFT_RESET, 0x1);
-	wmb(); /* wait for reset to assert before waiting */
-	udelay(1);
-	DSI_W32(ctrl, DSI_SOFT_RESET, 0x0);
-	wmb(); /* ensure reset is cleared */
-
-	/* Disable force clock on */
-	DSI_W32(ctrl, DSI_CLK_CTRL, reg);
-	wmb(); /* make sure clocks are restored */
-
-	/* Re-enable DSI controller */
-	DSI_W32(ctrl, DSI_CTRL, reg_ctrl);
-	wmb(); /* make sure DSI controller is enabled again */
-	pr_debug("[DSI_%d] ctrl soft reset done\n", ctrl->index);
-}
-
-/**
- * setup_misr() - Setup frame MISR
- * @ctrl:	  Pointer to the controller host hardware.
- * @panel_mode:   CMD or VIDEO mode indicator
- * @enable:	  Enable/disable MISR.
- * @frame_count:  Number of frames to accumulate MISR.
- */
-void dsi_ctrl_hw_cmn_setup_misr(struct dsi_ctrl_hw *ctrl,
-			enum dsi_op_mode panel_mode,
-			bool enable,
-			u32 frame_count)
-{
-	u32 addr;
-	u32 config = 0;
-
-	if (panel_mode == DSI_OP_CMD_MODE) {
-		addr = DSI_MISR_CMD_CTRL;
-		if (enable)
-			config = DSI_CTRL_CMD_MISR_ENABLE;
-	} else {
-		addr = DSI_MISR_VIDEO_CTRL;
-		if (enable)
-			config = DSI_CTRL_VIDEO_MISR_ENABLE;
-		if (frame_count > 255)
-			frame_count = 255;
-		config |= frame_count << 8;
-	}
-
-	pr_debug("[DSI_%d] MISR ctrl: 0x%x\n", ctrl->index,
-			config);
-	DSI_W32(ctrl, addr, config);
-	wmb(); /* make sure MISR is configured */
-}
-
-/**
- * collect_misr() - Read frame MISR
- * @ctrl:	  Pointer to the controller host hardware.
- * @panel_mode:   CMD or VIDEO mode indicator
- */
-u32 dsi_ctrl_hw_cmn_collect_misr(struct dsi_ctrl_hw *ctrl,
-			enum dsi_op_mode panel_mode)
-{
-	u32 addr;
-	u32 enabled;
-	u32 misr = 0;
-
-	if (panel_mode == DSI_OP_CMD_MODE) {
-		addr = DSI_MISR_CMD_MDP0_32BIT;
-		enabled = DSI_R32(ctrl, DSI_MISR_CMD_CTRL) &
-				DSI_CTRL_CMD_MISR_ENABLE;
-	} else {
-		addr = DSI_MISR_VIDEO_32BIT;
-		enabled = DSI_R32(ctrl, DSI_MISR_VIDEO_CTRL) &
-				DSI_CTRL_VIDEO_MISR_ENABLE;
-	}
-
-	if (enabled)
-		misr = DSI_R32(ctrl, addr);
-
-	pr_debug("[DSI_%d] MISR enabled %x value: 0x%x\n", ctrl->index,
-			enabled, misr);
-	return misr;
-}
-
-/**
- * set_timing_db() - enable/disable Timing DB register
- * @ctrl:          Pointer to controller host hardware.
- * @enable:        Enable/Disable flag.
- *
- * Enable or Disabe the Timing DB register.
- */
-void dsi_ctrl_hw_cmn_set_timing_db(struct dsi_ctrl_hw *ctrl,
-				     bool enable)
-{
-	if (enable)
-		DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x1);
-	else
-		DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x0);
-
-	wmb(); /* make sure timing db registers are set */
-	pr_debug("[DSI_%d] ctrl timing DB set:%d\n", ctrl->index,
-				enable);
-	SDE_EVT32(ctrl->index, enable);
-}
-
-/**
- * set_video_timing() - set up the timing for video frame
- * @ctrl:          Pointer to controller host hardware.
- * @mode:          Video mode information.
- *
- * Set up the video timing parameters for the DSI video mode operation.
- */
-void dsi_ctrl_hw_cmn_set_video_timing(struct dsi_ctrl_hw *ctrl,
-				     struct dsi_mode_info *mode)
-{
-	u32 reg = 0;
-	u32 hs_start = 0;
-	u32 hs_end, active_h_start, active_h_end, h_total, width = 0;
-	u32 vs_start = 0, vs_end = 0;
-	u32 vpos_start = 0, vpos_end, active_v_start, active_v_end, v_total;
-
-	if (mode->dsc_enabled && mode->dsc) {
-		width = mode->dsc->pclk_per_line;
-		reg = mode->dsc->bytes_per_pkt << 16;
-		reg |= (0x0b << 8);    /* dtype of compressed image */
-		/*
-		 * pkt_per_line:
-		 * 0 == 1 pkt
-		 * 1 == 2 pkt
-		 * 2 == 4 pkt
-		 * 3 pkt is not support
-		 */
-		if (mode->dsc->pkt_per_line == 4)
-			reg |= (mode->dsc->pkt_per_line - 2) << 6;
-		else
-			reg |= (mode->dsc->pkt_per_line - 1) << 6;
-		reg |= mode->dsc->eol_byte_num << 4;
-		reg |= 1;
-		DSI_W32(ctrl, DSI_VIDEO_COMPRESSION_MODE_CTRL, reg);
-	} else {
-		width = mode->h_active;
-	}
-
-	hs_end = mode->h_sync_width;
-	active_h_start = mode->h_sync_width + mode->h_back_porch;
-	active_h_end = active_h_start + width;
-	h_total = (mode->h_sync_width + mode->h_back_porch + width +
-		   mode->h_front_porch) - 1;
-
-	vpos_end = mode->v_sync_width;
-	active_v_start = mode->v_sync_width + mode->v_back_porch;
-	active_v_end = active_v_start + mode->v_active;
-	v_total = (mode->v_sync_width + mode->v_back_porch + mode->v_active +
-		   mode->v_front_porch) - 1;
-
-	reg = ((active_h_end & 0xFFFF) << 16) | (active_h_start & 0xFFFF);
-	DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_H, reg);
-
-	reg = ((active_v_end & 0xFFFF) << 16) | (active_v_start & 0xFFFF);
-	DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_V, reg);
-
-	reg = ((v_total & 0xFFFF) << 16) | (h_total & 0xFFFF);
-	DSI_W32(ctrl, DSI_VIDEO_MODE_TOTAL, reg);
-
-	reg = ((hs_end & 0xFFFF) << 16) | (hs_start & 0xFFFF);
-	DSI_W32(ctrl, DSI_VIDEO_MODE_HSYNC, reg);
-
-	reg = ((vs_end & 0xFFFF) << 16) | (vs_start & 0xFFFF);
-	DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC, reg);
-
-	reg = ((vpos_end & 0xFFFF) << 16) | (vpos_start & 0xFFFF);
-	DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC_VPOS, reg);
-
-	/* TODO: HS TIMER value? */
-	DSI_W32(ctrl, DSI_HS_TIMER_CTRL, 0x3FD08);
-	DSI_W32(ctrl, DSI_MISR_VIDEO_CTRL, 0x10100);
-	DSI_W32(ctrl, DSI_DSI_TIMING_FLUSH, 0x1);
-	pr_debug("[DSI_%d] ctrl video parameters updated\n", ctrl->index);
-	SDE_EVT32(v_total, h_total);
-}
-
-/**
- * setup_cmd_stream() - set up parameters for command pixel streams
- * @ctrl:              Pointer to controller host hardware.
- * @mode:              Pointer to mode information.
- * @h_stride:          Horizontal stride in bytes.
- * @vc_id:             stream_id
- *
- * Setup parameters for command mode pixel stream size.
- */
-void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
-				     struct dsi_mode_info *mode,
-				     u32 h_stride,
-				     u32 vc_id,
-				     struct dsi_rect *roi)
-{
-	u32 width_final, stride_final;
-	u32 height_final;
-	u32 stream_total = 0, stream_ctrl = 0;
-	u32 reg_ctrl = 0, reg_ctrl2 = 0, data = 0;
-
-	if (roi && (!roi->w || !roi->h))
-		return;
-
-	if (mode->dsc_enabled && mode->dsc) {
-		u32 reg = 0;
-		u32 offset = 0;
-		int pic_width, this_frame_slices, intf_ip_w;
-		struct msm_display_dsc_info dsc;
-
-		memcpy(&dsc, mode->dsc, sizeof(dsc));
-		pic_width = roi ? roi->w : mode->h_active;
-		this_frame_slices = pic_width / dsc.slice_width;
-		intf_ip_w = this_frame_slices * dsc.slice_width;
-		dsi_dsc_pclk_param_calc(&dsc, intf_ip_w);
-
-		if (vc_id != 0)
-			offset = 16;
-		reg_ctrl = DSI_R32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL);
-		reg_ctrl2 = DSI_R32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2);
-		width_final = dsc.pclk_per_line;
-		stride_final = dsc.bytes_per_pkt;
-		height_final = roi ? roi->h : mode->v_active;
-
-		reg = 0x39 << 8;
-		/*
-		 * pkt_per_line:
-		 * 0 == 1 pkt
-		 * 1 == 2 pkt
-		 * 2 == 4 pkt
-		 * 3 pkt is not support
-		 */
-		if (dsc.pkt_per_line == 4)
-			reg |= (dsc.pkt_per_line - 2) << 6;
-		else
-			reg |= (dsc.pkt_per_line - 1) << 6;
-		reg |= dsc.eol_byte_num << 4;
-		reg |= 1;
-
-		reg_ctrl &= ~(0xFFFF << offset);
-		reg_ctrl |= (reg << offset);
-		reg_ctrl2 &= ~(0xFFFF << offset);
-		reg_ctrl2 |= (dsc.bytes_in_slice << offset);
-
-		pr_debug("ctrl %d reg_ctrl 0x%x reg_ctrl2 0x%x\n", ctrl->index,
-				reg_ctrl, reg_ctrl2);
-	} else if (roi) {
-		width_final = roi->w;
-		stride_final = roi->w * 3;
-		height_final = roi->h;
-	} else {
-		width_final = mode->h_active;
-		stride_final = h_stride;
-		height_final = mode->v_active;
-	}
-
-	/* HS Timer value */
-	DSI_W32(ctrl, DSI_HS_TIMER_CTRL, 0x3FD08);
-
-	stream_ctrl = (stride_final + 1) << 16;
-	stream_ctrl |= (vc_id & 0x3) << 8;
-	stream_ctrl |= 0x39; /* packet data type */
-
-	DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl);
-	DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2);
-
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, stream_ctrl);
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, stream_ctrl);
-
-	stream_total = (height_final << 16) | width_final;
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_TOTAL, stream_total);
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_TOTAL, stream_total);
-
-	if (ctrl->null_insertion_enabled) {
-		/* enable null packet insertion */
-		data = (vc_id << 1);
-		data |= 0 << 16;
-		data |= 0x1;
-		DSI_W32(ctrl, DSI_COMMAND_MODE_NULL_INSERTION_CTRL, data);
-	}
-
-	pr_debug("ctrl %d stream_ctrl 0x%x stream_total 0x%x\n", ctrl->index,
-			stream_ctrl, stream_total);
-}
-
-/**
- * setup_avr() - set the AVR_SUPPORT_ENABLE bit in DSI_VIDEO_MODE_CTRL
- * @ctrl:          Pointer to controller host hardware.
- * @enable:        Controls whether this bit is set or cleared
- *
- * Set or clear the AVR_SUPPORT_ENABLE bit in DSI_VIDEO_MODE_CTRL.
- */
-void dsi_ctrl_hw_cmn_setup_avr(struct dsi_ctrl_hw *ctrl, bool enable)
-{
-	u32 reg = DSI_R32(ctrl, DSI_VIDEO_MODE_CTRL);
-
-	if (enable)
-		reg |= BIT(29);
-	else
-		reg &= ~BIT(29);
-
-	DSI_W32(ctrl, DSI_VIDEO_MODE_CTRL, reg);
-	pr_debug("ctrl %d AVR %s\n", ctrl->index,
-			enable ? "enabled" : "disabled");
-}
-
-/**
- * video_engine_setup() - Setup dsi host controller for video mode
- * @ctrl:          Pointer to controller host hardware.
- * @common_cfg:    Common configuration parameters.
- * @cfg:           Video mode configuration.
- *
- * Set up DSI video engine with a specific configuration. Controller and
- * video engine are not enabled as part of this function.
- */
-void dsi_ctrl_hw_cmn_video_engine_setup(struct dsi_ctrl_hw *ctrl,
-				       struct dsi_host_common_cfg *common_cfg,
-				       struct dsi_video_engine_cfg *cfg)
-{
-	u32 reg = 0;
-
-	reg |= (cfg->last_line_interleave_en ? BIT(31) : 0);
-	reg |= (cfg->pulse_mode_hsa_he ? BIT(28) : 0);
-	reg |= (cfg->hfp_lp11_en ? BIT(24) : 0);
-	reg |= (cfg->hbp_lp11_en ? BIT(20) : 0);
-	reg |= (cfg->hsa_lp11_en ? BIT(16) : 0);
-	reg |= (cfg->eof_bllp_lp11_en ? BIT(15) : 0);
-	reg |= (cfg->bllp_lp11_en ? BIT(12) : 0);
-	reg |= (cfg->traffic_mode & 0x3) << 8;
-	reg |= (cfg->vc_id & 0x3);
-	reg |= (video_mode_format_map[common_cfg->dst_format] & 0x3) << 4;
-	DSI_W32(ctrl, DSI_VIDEO_MODE_CTRL, reg);
-
-	reg = (common_cfg->swap_mode & 0x7) << 12;
-	reg |= (common_cfg->bit_swap_red ? BIT(0) : 0);
-	reg |= (common_cfg->bit_swap_green ? BIT(4) : 0);
-	reg |= (common_cfg->bit_swap_blue ? BIT(8) : 0);
-	DSI_W32(ctrl, DSI_VIDEO_MODE_DATA_CTRL, reg);
-	/* Disable Timing double buffering */
-	DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x0);
-
-	pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index);
-}
-
-void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl, u32 *entries, u32 size)
-{
-	u32 reg = 0, i = 0;
-
-	for (i = 0; i < size; i++) {
-		DSI_W32(ctrl, DSI_DEBUG_BUS_CTL, entries[i]);
-		/* make sure that debug test point is enabled */
-		wmb();
-		reg = DSI_R32(ctrl, DSI_DEBUG_BUS_STATUS);
-		pr_err("[DSI_%d] debug bus ctrl: 0x%x status:0x%x\n",
-				ctrl->index, entries[i], reg);
-	}
-}
-
-/**
- * cmd_engine_setup() - setup dsi host controller for command mode
- * @ctrl:          Pointer to the controller host hardware.
- * @common_cfg:    Common configuration parameters.
- * @cfg:           Command mode configuration.
- *
- * Setup DSI CMD engine with a specific configuration. Controller and
- * command engine are not enabled as part of this function.
- */
-void dsi_ctrl_hw_cmn_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
-				     struct dsi_host_common_cfg *common_cfg,
-				     struct dsi_cmd_engine_cfg *cfg)
-{
-	u32 reg = 0;
-
-	reg = (cfg->max_cmd_packets_interleave & 0xF) << 20;
-	reg |= (common_cfg->bit_swap_red ? BIT(4) : 0);
-	reg |= (common_cfg->bit_swap_green ? BIT(8) : 0);
-	reg |= (common_cfg->bit_swap_blue ? BIT(12) : 0);
-	reg |= cmd_mode_format_map[common_cfg->dst_format];
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_CTRL, reg);
-
-	reg = DSI_R32(ctrl, DSI_COMMAND_MODE_MDP_CTRL2);
-	reg |= BIT(16);
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_CTRL2, reg);
-
-	reg = cfg->wr_mem_start & 0xFF;
-	reg |= (cfg->wr_mem_continue & 0xFF) << 8;
-	reg |= (cfg->insert_dcs_command ? BIT(16) : 0);
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL, reg);
-
-	pr_debug("[DSI_%d] Cmd engine setup done\n", ctrl->index);
-}
-
-/**
- * video_engine_en() - enable DSI video engine
- * @ctrl:          Pointer to controller host hardware.
- * @on:            Enable/disabel video engine.
- */
-void dsi_ctrl_hw_cmn_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
-{
-	u32 reg = 0;
-
-	/* Set/Clear VIDEO_MODE_EN bit */
-	reg = DSI_R32(ctrl, DSI_CTRL);
-	if (on)
-		reg |= BIT(1);
-	else
-		reg &= ~BIT(1);
-
-	DSI_W32(ctrl, DSI_CTRL, reg);
-
-	pr_debug("[DSI_%d] Video engine = %d\n", ctrl->index, on);
-}
-
-/**
- * ctrl_en() - enable DSI controller engine
- * @ctrl:          Pointer to the controller host hardware.
- * @on:            turn on/off the DSI controller engine.
- */
-void dsi_ctrl_hw_cmn_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on)
-{
-	u32 reg = 0;
-	u32 clk_ctrl;
-
-	clk_ctrl = DSI_R32(ctrl, DSI_CLK_CTRL);
-	DSI_W32(ctrl, DSI_CLK_CTRL, clk_ctrl | DSI_CTRL_DYNAMIC_FORCE_ON);
-	wmb(); /* wait for clocks to enable */
-
-	/* Set/Clear DSI_EN bit */
-	reg = DSI_R32(ctrl, DSI_CTRL);
-	if (on)
-		reg |= BIT(0);
-	else
-		reg &= ~BIT(0);
-
-	DSI_W32(ctrl, DSI_CTRL, reg);
-	wmb(); /* wait for DSI_EN update before disabling clocks */
-
-	DSI_W32(ctrl, DSI_CLK_CTRL, clk_ctrl);
-	wmb(); /* make sure clocks are restored */
-
-	pr_debug("[DSI_%d] Controller engine = %d\n", ctrl->index, on);
-}
-
-/**
- * cmd_engine_en() - enable DSI controller command engine
- * @ctrl:          Pointer to the controller host hardware.
- * @on:            Turn on/off the DSI command engine.
- */
-void dsi_ctrl_hw_cmn_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
-{
-	u32 reg = 0;
-
-	/* Set/Clear CMD_MODE_EN bit */
-	reg = DSI_R32(ctrl, DSI_CTRL);
-	if (on)
-		reg |= BIT(2);
-	else
-		reg &= ~BIT(2);
-
-	DSI_W32(ctrl, DSI_CTRL, reg);
-
-	pr_debug("[DSI_%d] command engine = %d\n", ctrl->index, on);
-}
-
-/**
- * kickoff_command() - transmits commands stored in memory
- * @ctrl:          Pointer to the controller host hardware.
- * @cmd:           Command information.
- * @flags:         Modifiers for command transmission.
- *
- * The controller hardware is programmed with address and size of the
- * command buffer. The transmission is kicked off if
- * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
- * set, caller should make a separate call to trigger_command_dma() to
- * transmit the command.
- */
-void dsi_ctrl_hw_cmn_kickoff_command(struct dsi_ctrl_hw *ctrl,
-				    struct dsi_ctrl_cmd_dma_info *cmd,
-				    u32 flags)
-{
-	u32 reg = 0;
-
-	/*Set BROADCAST_EN and EMBEDDED_MODE */
-	reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
-	if (cmd->en_broadcast)
-		reg |= BIT(31);
-	else
-		reg &= ~BIT(31);
-
-	if (cmd->is_master)
-		reg |= BIT(30);
-	else
-		reg &= ~BIT(30);
-
-	if (cmd->use_lpm)
-		reg |= BIT(26);
-	else
-		reg &= ~BIT(26);
-
-	reg |= BIT(28);/* Select embedded mode */
-	reg &= ~BIT(24);/* packet type */
-	reg &= ~BIT(29);/* WC_SEL to 0 */
-	DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
-
-	reg = DSI_R32(ctrl, DSI_DMA_FIFO_CTRL);
-	reg |= BIT(20);/* Disable write watermark*/
-	reg |= BIT(16);/* Disable read watermark */
-
-	DSI_W32(ctrl, DSI_DMA_FIFO_CTRL, reg);
-	DSI_W32(ctrl, DSI_DMA_CMD_OFFSET, cmd->offset);
-	DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->length & 0xFFFFFF));
-
-	/* wait for writes to complete before kick off */
-	wmb();
-
-	if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
-		DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
-}
-
-/**
- * kickoff_fifo_command() - transmits a command using FIFO in dsi
- *                          hardware.
- * @ctrl:          Pointer to the controller host hardware.
- * @cmd:           Command information.
- * @flags:         Modifiers for command transmission.
- *
- * The controller hardware FIFO is programmed with command header and
- * payload. The transmission is kicked off if
- * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
- * set, caller should make a separate call to trigger_command_dma() to
- * transmit the command.
- */
-void dsi_ctrl_hw_cmn_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
-					 struct dsi_ctrl_cmd_dma_fifo_info *cmd,
-					 u32 flags)
-{
-	u32 reg = 0, i = 0;
-	u32 *ptr = cmd->command;
-	/*
-	 * Set CMD_DMA_TPG_EN, TPG_DMA_FIFO_MODE and
-	 * CMD_DMA_PATTERN_SEL = custom pattern stored in TPG DMA FIFO
-	 */
-	reg = (BIT(1) | BIT(2) | (0x3 << 16));
-	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
-
-	/*
-	 * Program the FIFO with command buffer. Hardware requires an extra
-	 * DWORD (set to zero) if the length of command buffer is odd DWORDS.
-	 */
-	for (i = 0; i < cmd->size; i += 4) {
-		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, *ptr);
-		ptr++;
-	}
-
-	if ((cmd->size / 4) & 0x1)
-		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, 0);
-
-	/*Set BROADCAST_EN and EMBEDDED_MODE */
-	reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
-	if (cmd->en_broadcast)
-		reg |= BIT(31);
-	else
-		reg &= ~BIT(31);
-
-	if (cmd->is_master)
-		reg |= BIT(30);
-	else
-		reg &= ~BIT(30);
-
-	if (cmd->use_lpm)
-		reg |= BIT(26);
-	else
-		reg &= ~BIT(26);
-
-	reg |= BIT(28);
-
-	DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
-
-	DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->size & 0xFFFFFFFF));
-	/* Finish writes before command trigger */
-	wmb();
-
-	if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
-		DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
-
-	pr_debug("[DSI_%d]size=%d, trigger = %d\n",
-		 ctrl->index, cmd->size,
-		 (flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER) ? false : true);
-}
-
-void dsi_ctrl_hw_cmn_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl)
-{
-	/* disable cmd dma tpg */
-	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, 0x0);
-
-	DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x1);
-	udelay(1);
-	DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x0);
-}
-
-/**
- * trigger_command_dma() - trigger transmission of command buffer.
- * @ctrl:          Pointer to the controller host hardware.
- *
- * This trigger can be only used if there was a prior call to
- * kickoff_command() of kickoff_fifo_command() with
- * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
- */
-void dsi_ctrl_hw_cmn_trigger_command_dma(struct dsi_ctrl_hw *ctrl)
-{
-	DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
-	pr_debug("[DSI_%d] CMD DMA triggered\n", ctrl->index);
-}
-
-/**
- * clear_rdbk_reg() - clear previously read panel data.
- * @ctrl:          Pointer to the controller host hardware.
- *
- * This function is called before sending DSI Rx command to
- * panel in order to clear if any stale data remaining from
- * previous read operation.
- */
-void dsi_ctrl_hw_cmn_clear_rdbk_reg(struct dsi_ctrl_hw *ctrl)
-{
-	DSI_W32(ctrl, DSI_RDBK_DATA_CTRL, 0x1);
-	wmb(); /* ensure read back register is reset */
-	DSI_W32(ctrl, DSI_RDBK_DATA_CTRL, 0x0);
-	wmb(); /* ensure read back register is cleared */
-}
-
-/**
- * get_cmd_read_data() - get data read from the peripheral
- * @ctrl:           Pointer to the controller host hardware.
- * @rd_buf:         Buffer where data will be read into.
- * @total_read_len: Number of bytes to read.
- *
- * return: number of bytes read.
- */
-u32 dsi_ctrl_hw_cmn_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
-				     u8 *rd_buf,
-				     u32 read_offset,
-				     u32 rx_byte,
-				     u32 pkt_size,
-				     u32 *hw_read_cnt)
-{
-	u32 *lp, *temp, data;
-	int i, j = 0, cnt, off;
-	u32 read_cnt;
-	u32 repeated_bytes = 0;
-	u8 reg[16] = {0};
-	bool ack_err = false;
-
-	lp = (u32 *)rd_buf;
-	temp = (u32 *)reg;
-	cnt = (rx_byte + 3) >> 2;
-
-	if (cnt > 4)
-		cnt = 4;
-
-	read_cnt = (DSI_R32(ctrl, DSI_RDBK_DATA_CTRL) >> 16);
-	ack_err = (rx_byte == 4) ? (read_cnt == 8) :
-			((read_cnt - 4) == (pkt_size + 6));
-
-	if (ack_err)
-		read_cnt -= 4;
-	if (!read_cnt) {
-		pr_err("Panel detected error, no data read\n");
-		return 0;
-	}
-
-	if (read_cnt > 16) {
-		int bytes_shifted, data_lost = 0, rem_header = 0;
-
-		bytes_shifted = read_cnt - rx_byte;
-		if (bytes_shifted >= 4)
-			data_lost = bytes_shifted - 4; /* remove DCS header */
-		else
-			rem_header = 4 - bytes_shifted; /* remaining header */
-
-		repeated_bytes = (read_offset - 4) - data_lost + rem_header;
-	}
-
-	off = DSI_RDBK_DATA0;
-	off += ((cnt - 1) * 4);
-
-	for (i = 0; i < cnt; i++) {
-		data = DSI_R32(ctrl, off);
-		if (!repeated_bytes)
-			*lp++ = ntohl(data);
-		else
-			*temp++ = ntohl(data);
-		off -= 4;
-	}
-
-	if (repeated_bytes) {
-		for (i = repeated_bytes; i < 16; i++)
-			rd_buf[j++] = reg[i];
-	}
-
-	*hw_read_cnt = read_cnt;
-	pr_debug("[DSI_%d] Read %d bytes\n", ctrl->index, rx_byte);
-	return rx_byte;
-}
-
-/**
- * get_interrupt_status() - returns the interrupt status
- * @ctrl:          Pointer to the controller host hardware.
- *
- * Returns the ORed list of interrupts(enum dsi_status_int_type) that
- * are active. This list does not include any error interrupts. Caller
- * should call get_error_status for error interrupts.
- *
- * Return: List of active interrupts.
- */
-u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl)
-{
-	u32 reg = 0;
-	u32 ints = 0;
-
-	reg = DSI_R32(ctrl, DSI_INT_CTRL);
-
-	if (reg & BIT(0))
-		ints |= DSI_CMD_MODE_DMA_DONE;
-	if (reg & BIT(8))
-		ints |= DSI_CMD_FRAME_DONE;
-	if (reg & BIT(10))
-		ints |= DSI_CMD_STREAM0_FRAME_DONE;
-	if (reg & BIT(12))
-		ints |= DSI_CMD_STREAM1_FRAME_DONE;
-	if (reg & BIT(14))
-		ints |= DSI_CMD_STREAM2_FRAME_DONE;
-	if (reg & BIT(16))
-		ints |= DSI_VIDEO_MODE_FRAME_DONE;
-	if (reg & BIT(20))
-		ints |= DSI_BTA_DONE;
-	if (reg & BIT(28))
-		ints |= DSI_DYN_REFRESH_DONE;
-	if (reg & BIT(30))
-		ints |= DSI_DESKEW_DONE;
-	if (reg & BIT(24))
-		ints |= DSI_ERROR;
-
-	pr_debug("[DSI_%d] Interrupt status = 0x%x, INT_CTRL=0x%x\n",
-		 ctrl->index, ints, reg);
-	return ints;
-}
-
-/**
- * clear_interrupt_status() - clears the specified interrupts
- * @ctrl:          Pointer to the controller host hardware.
- * @ints:          List of interrupts to be cleared.
- */
-void dsi_ctrl_hw_cmn_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints)
-{
-	u32 reg = 0;
-
-	reg = DSI_R32(ctrl, DSI_INT_CTRL);
-
-	if (ints & DSI_CMD_MODE_DMA_DONE)
-		reg |= BIT(0);
-	if (ints & DSI_CMD_FRAME_DONE)
-		reg |= BIT(8);
-	if (ints & DSI_CMD_STREAM0_FRAME_DONE)
-		reg |= BIT(10);
-	if (ints & DSI_CMD_STREAM1_FRAME_DONE)
-		reg |= BIT(12);
-	if (ints & DSI_CMD_STREAM2_FRAME_DONE)
-		reg |= BIT(14);
-	if (ints & DSI_VIDEO_MODE_FRAME_DONE)
-		reg |= BIT(16);
-	if (ints & DSI_BTA_DONE)
-		reg |= BIT(20);
-	if (ints & DSI_DYN_REFRESH_DONE)
-		reg |= BIT(28);
-	if (ints & DSI_DESKEW_DONE)
-		reg |= BIT(30);
-
-	/*
-	 * Do not clear error status.
-	 * It will be cleared as part of
-	 * error handler function.
-	 */
-	reg &= ~BIT(24);
-	DSI_W32(ctrl, DSI_INT_CTRL, reg);
-
-	pr_debug("[DSI_%d] Clear interrupts, ints = 0x%x, INT_CTRL=0x%x\n",
-		 ctrl->index, ints, reg);
-}
-
-/**
- * enable_status_interrupts() - enable the specified interrupts
- * @ctrl:          Pointer to the controller host hardware.
- * @ints:          List of interrupts to be enabled.
- *
- * Enables the specified interrupts. This list will override the
- * previous interrupts enabled through this function. Caller has to
- * maintain the state of the interrupts enabled. To disable all
- * interrupts, set ints to 0.
- */
-void dsi_ctrl_hw_cmn_enable_status_interrupts(
-		struct dsi_ctrl_hw *ctrl, u32 ints)
-{
-	u32 reg = 0;
-
-	/* Do not change value of DSI_ERROR_MASK bit */
-	reg |= (DSI_R32(ctrl, DSI_INT_CTRL) & BIT(25));
-	if (ints & DSI_CMD_MODE_DMA_DONE)
-		reg |= BIT(1);
-	if (ints & DSI_CMD_FRAME_DONE)
-		reg |= BIT(9);
-	if (ints & DSI_CMD_STREAM0_FRAME_DONE)
-		reg |= BIT(11);
-	if (ints & DSI_CMD_STREAM1_FRAME_DONE)
-		reg |= BIT(13);
-	if (ints & DSI_CMD_STREAM2_FRAME_DONE)
-		reg |= BIT(15);
-	if (ints & DSI_VIDEO_MODE_FRAME_DONE)
-		reg |= BIT(17);
-	if (ints & DSI_BTA_DONE)
-		reg |= BIT(21);
-	if (ints & DSI_DYN_REFRESH_DONE)
-		reg |= BIT(29);
-	if (ints & DSI_DESKEW_DONE)
-		reg |= BIT(31);
-
-	DSI_W32(ctrl, DSI_INT_CTRL, reg);
-
-	pr_debug("[DSI_%d] Enable interrupts 0x%x, INT_CTRL=0x%x\n",
-		 ctrl->index, ints, reg);
-}
-
-/**
- * get_error_status() - returns the error status
- * @ctrl:          Pointer to the controller host hardware.
- *
- * Returns the ORed list of errors(enum dsi_error_int_type) that are
- * active. This list does not include any status interrupts. Caller
- * should call get_interrupt_status for status interrupts.
- *
- * Return: List of active error interrupts.
- */
-u64 dsi_ctrl_hw_cmn_get_error_status(struct dsi_ctrl_hw *ctrl)
-{
-	u32 dln0_phy_err;
-	u32 fifo_status;
-	u32 ack_error;
-	u32 timeout_errors;
-	u32 clk_error;
-	u32 dsi_status;
-	u64 errors = 0, shift = 0x1;
-
-	dln0_phy_err = DSI_R32(ctrl, DSI_DLN0_PHY_ERR);
-	if (dln0_phy_err & BIT(0))
-		errors |= DSI_DLN0_ESC_ENTRY_ERR;
-	if (dln0_phy_err & BIT(4))
-		errors |= DSI_DLN0_ESC_SYNC_ERR;
-	if (dln0_phy_err & BIT(8))
-		errors |= DSI_DLN0_LP_CONTROL_ERR;
-	if (dln0_phy_err & BIT(12))
-		errors |= DSI_DLN0_LP0_CONTENTION;
-	if (dln0_phy_err & BIT(16))
-		errors |= DSI_DLN0_LP1_CONTENTION;
-
-	fifo_status = DSI_R32(ctrl, DSI_FIFO_STATUS);
-	if (fifo_status & BIT(7))
-		errors |= DSI_CMD_MDP_FIFO_UNDERFLOW;
-	if (fifo_status & BIT(10))
-		errors |= DSI_CMD_DMA_FIFO_UNDERFLOW;
-	if (fifo_status & BIT(18))
-		errors |= DSI_DLN0_HS_FIFO_OVERFLOW;
-	if (fifo_status & BIT(19))
-		errors |= DSI_DLN0_HS_FIFO_UNDERFLOW;
-	if (fifo_status & BIT(22))
-		errors |= DSI_DLN1_HS_FIFO_OVERFLOW;
-	if (fifo_status & BIT(23))
-		errors |= DSI_DLN1_HS_FIFO_UNDERFLOW;
-	if (fifo_status & BIT(26))
-		errors |= DSI_DLN2_HS_FIFO_OVERFLOW;
-	if (fifo_status & BIT(27))
-		errors |= DSI_DLN2_HS_FIFO_UNDERFLOW;
-	if (fifo_status & BIT(30))
-		errors |= DSI_DLN3_HS_FIFO_OVERFLOW;
-	if (fifo_status & BIT(31))
-		errors |= DSI_DLN3_HS_FIFO_UNDERFLOW;
-
-	ack_error = DSI_R32(ctrl, DSI_ACK_ERR_STATUS);
-	if (ack_error & BIT(16))
-		errors |= DSI_RDBK_SINGLE_ECC_ERR;
-	if (ack_error & BIT(17))
-		errors |= DSI_RDBK_MULTI_ECC_ERR;
-	if (ack_error & BIT(20))
-		errors |= DSI_RDBK_CRC_ERR;
-	if (ack_error & BIT(23))
-		errors |= DSI_RDBK_INCOMPLETE_PKT;
-	if (ack_error & BIT(24))
-		errors |= DSI_PERIPH_ERROR_PKT;
-	if (ack_error & BIT(15))
-		errors |= (shift << DSI_EINT_PANEL_SPECIFIC_ERR);
-
-	timeout_errors = DSI_R32(ctrl, DSI_TIMEOUT_STATUS);
-	if (timeout_errors & BIT(0))
-		errors |= DSI_HS_TX_TIMEOUT;
-	if (timeout_errors & BIT(4))
-		errors |= DSI_LP_RX_TIMEOUT;
-	if (timeout_errors & BIT(8))
-		errors |= DSI_BTA_TIMEOUT;
-
-	clk_error = DSI_R32(ctrl, DSI_CLK_STATUS);
-	if (clk_error & BIT(16))
-		errors |= DSI_PLL_UNLOCK;
-
-	dsi_status = DSI_R32(ctrl, DSI_STATUS);
-	if (dsi_status & BIT(31))
-		errors |= DSI_INTERLEAVE_OP_CONTENTION;
-
-	pr_debug("[DSI_%d] Error status = 0x%llx, phy=0x%x, fifo=0x%x\n",
-		 ctrl->index, errors, dln0_phy_err, fifo_status);
-	pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
-		 ctrl->index, ack_error, timeout_errors, clk_error, dsi_status);
-	return errors;
-}
-
-/**
- * clear_error_status() - clears the specified errors
- * @ctrl:          Pointer to the controller host hardware.
- * @errors:          List of errors to be cleared.
- */
-void dsi_ctrl_hw_cmn_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors)
-{
-	u32 dln0_phy_err = 0;
-	u32 fifo_status = 0;
-	u32 ack_error = 0;
-	u32 timeout_error = 0;
-	u32 clk_error = 0;
-	u32 dsi_status = 0;
-
-	if (errors & DSI_RDBK_SINGLE_ECC_ERR)
-		ack_error |= BIT(16);
-	if (errors & DSI_RDBK_MULTI_ECC_ERR)
-		ack_error |= BIT(17);
-	if (errors & DSI_RDBK_CRC_ERR)
-		ack_error |= BIT(20);
-	if (errors & DSI_RDBK_INCOMPLETE_PKT)
-		ack_error |= BIT(23);
-	if (errors & DSI_PERIPH_ERROR_PKT)
-		ack_error |= BIT(24);
-	if (errors & DSI_PANEL_SPECIFIC_ERR)
-		ack_error |= BIT(15);
-
-	if (errors & DSI_LP_RX_TIMEOUT)
-		timeout_error |= BIT(4);
-	if (errors & DSI_HS_TX_TIMEOUT)
-		timeout_error |= BIT(0);
-	if (errors & DSI_BTA_TIMEOUT)
-		timeout_error |= BIT(8);
-
-	if (errors & DSI_PLL_UNLOCK)
-		clk_error |= BIT(16);
-
-	if (errors & DSI_DLN0_LP0_CONTENTION)
-		dln0_phy_err |= BIT(12);
-	if (errors & DSI_DLN0_LP1_CONTENTION)
-		dln0_phy_err |= BIT(16);
-	if (errors & DSI_DLN0_ESC_ENTRY_ERR)
-		dln0_phy_err |= BIT(0);
-	if (errors & DSI_DLN0_ESC_SYNC_ERR)
-		dln0_phy_err |= BIT(4);
-	if (errors & DSI_DLN0_LP_CONTROL_ERR)
-		dln0_phy_err |= BIT(8);
-
-	if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
-		fifo_status |= BIT(10);
-	if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
-		fifo_status |= BIT(7);
-	if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
-		fifo_status |= BIT(18);
-	if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
-		fifo_status |= BIT(22);
-	if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
-		fifo_status |= BIT(26);
-	if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
-		fifo_status |= BIT(30);
-	if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
-		fifo_status |= BIT(19);
-	if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
-		fifo_status |= BIT(23);
-	if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
-		fifo_status |= BIT(27);
-	if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
-		fifo_status |= BIT(31);
-
-	if (errors & DSI_INTERLEAVE_OP_CONTENTION)
-		dsi_status |= BIT(31);
-
-	DSI_W32(ctrl, DSI_DLN0_PHY_ERR, dln0_phy_err);
-	DSI_W32(ctrl, DSI_FIFO_STATUS, fifo_status);
-	/* Writing of an extra 0 is needed to clear ack error bits */
-	DSI_W32(ctrl, DSI_ACK_ERR_STATUS, ack_error);
-	wmb(); /* make sure register is committed */
-	DSI_W32(ctrl, DSI_ACK_ERR_STATUS, 0x0);
-	DSI_W32(ctrl, DSI_TIMEOUT_STATUS, timeout_error);
-	DSI_W32(ctrl, DSI_CLK_STATUS, clk_error);
-	DSI_W32(ctrl, DSI_STATUS, dsi_status);
-
-	pr_debug("[DSI_%d] clear errors = 0x%llx, phy=0x%x, fifo=0x%x\n",
-		 ctrl->index, errors, dln0_phy_err, fifo_status);
-	pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
-		 ctrl->index, ack_error, timeout_error, clk_error, dsi_status);
-}
-
-/**
- * enable_error_interrupts() - enable the specified interrupts
- * @ctrl:          Pointer to the controller host hardware.
- * @errors:        List of errors to be enabled.
- *
- * Enables the specified interrupts. This list will override the
- * previous interrupts enabled through this function. Caller has to
- * maintain the state of the interrupts enabled. To disable all
- * interrupts, set errors to 0.
- */
-void dsi_ctrl_hw_cmn_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
-					    u64 errors)
-{
-	u32 int_ctrl = 0;
-	u32 int_mask0 = 0x7FFF3BFF;
-
-	int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
-	if (errors)
-		int_ctrl |= BIT(25);
-	else
-		int_ctrl &= ~BIT(25);
-
-	if (errors & DSI_RDBK_SINGLE_ECC_ERR)
-		int_mask0 &= ~BIT(0);
-	if (errors & DSI_RDBK_MULTI_ECC_ERR)
-		int_mask0 &= ~BIT(1);
-	if (errors & DSI_RDBK_CRC_ERR)
-		int_mask0 &= ~BIT(2);
-	if (errors & DSI_RDBK_INCOMPLETE_PKT)
-		int_mask0 &= ~BIT(3);
-	if (errors & DSI_PERIPH_ERROR_PKT)
-		int_mask0 &= ~BIT(4);
-
-	if (errors & DSI_LP_RX_TIMEOUT)
-		int_mask0 &= ~BIT(5);
-	if (errors & DSI_HS_TX_TIMEOUT)
-		int_mask0 &= ~BIT(6);
-	if (errors & DSI_BTA_TIMEOUT)
-		int_mask0 &= ~BIT(7);
-
-	if (errors & DSI_PLL_UNLOCK)
-		int_mask0 &= ~BIT(28);
-
-	if (errors & DSI_DLN0_LP0_CONTENTION)
-		int_mask0 &= ~BIT(24);
-	if (errors & DSI_DLN0_LP1_CONTENTION)
-		int_mask0 &= ~BIT(25);
-	if (errors & DSI_DLN0_ESC_ENTRY_ERR)
-		int_mask0 &= ~BIT(21);
-	if (errors & DSI_DLN0_ESC_SYNC_ERR)
-		int_mask0 &= ~BIT(22);
-	if (errors & DSI_DLN0_LP_CONTROL_ERR)
-		int_mask0 &= ~BIT(23);
-
-	if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
-		int_mask0 &= ~BIT(9);
-	if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
-		int_mask0 &= ~BIT(11);
-	if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
-		int_mask0 &= ~BIT(16);
-	if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
-		int_mask0 &= ~BIT(17);
-	if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
-		int_mask0 &= ~BIT(18);
-	if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
-		int_mask0 &= ~BIT(19);
-	if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
-		int_mask0 &= ~BIT(26);
-	if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
-		int_mask0 &= ~BIT(27);
-	if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
-		int_mask0 &= ~BIT(29);
-	if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
-		int_mask0 &= ~BIT(30);
-
-	if (errors & DSI_INTERLEAVE_OP_CONTENTION)
-		int_mask0 &= ~BIT(8);
-
-	DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
-	DSI_W32(ctrl, DSI_ERR_INT_MASK0, int_mask0);
-
-	pr_debug("[DSI_%d] enable errors = 0x%llx, int_mask0=0x%x\n",
-		 ctrl->index, errors, int_mask0);
-}
-
-/**
- * video_test_pattern_setup() - setup test pattern engine for video mode
- * @ctrl:          Pointer to the controller host hardware.
- * @type:          Type of test pattern.
- * @init_val:      Initial value to use for generating test pattern.
- */
-void dsi_ctrl_hw_cmn_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
-					     enum dsi_test_pattern type,
-					     u32 init_val)
-{
-	u32 reg = 0;
-
-	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, init_val);
-
-	switch (type) {
-	case DSI_TEST_PATTERN_FIXED:
-		reg |= (0x2 << 4);
-		break;
-	case DSI_TEST_PATTERN_INC:
-		reg |= (0x1 << 4);
-		break;
-	case DSI_TEST_PATTERN_POLY:
-		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_POLY, 0xF0F0F);
-		break;
-	default:
-		break;
-	}
-
-	DSI_W32(ctrl, DSI_TPG_MAIN_CONTROL, 0x100);
-	DSI_W32(ctrl, DSI_TPG_VIDEO_CONFIG, 0x5);
-	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
-
-	pr_debug("[DSI_%d] Video test pattern setup done\n", ctrl->index);
-}
-
-/**
- * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
- * @ctrl:          Pointer to the controller host hardware.
- * @type:          Type of test pattern.
- * @init_val:      Initial value to use for generating test pattern.
- * @stream_id:     Stream Id on which packets are generated.
- */
-void dsi_ctrl_hw_cmn_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
-					   enum dsi_test_pattern type,
-					   u32 init_val,
-					   u32 stream_id)
-{
-	u32 reg = 0;
-	u32 init_offset;
-	u32 poly_offset;
-	u32 pattern_sel_shift;
-
-	switch (stream_id) {
-	case 0:
-		init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0;
-		poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY;
-		pattern_sel_shift = 8;
-		break;
-	case 1:
-		init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1;
-		poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY;
-		pattern_sel_shift = 12;
-		break;
-	case 2:
-		init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2;
-		poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY;
-		pattern_sel_shift = 20;
-		break;
-	default:
-		return;
-	}
-
-	DSI_W32(ctrl, init_offset, init_val);
-
-	switch (type) {
-	case DSI_TEST_PATTERN_FIXED:
-		reg |= (0x2 << pattern_sel_shift);
-		break;
-	case DSI_TEST_PATTERN_INC:
-		reg |= (0x1 << pattern_sel_shift);
-		break;
-	case DSI_TEST_PATTERN_POLY:
-		DSI_W32(ctrl, poly_offset, 0xF0F0F);
-		break;
-	default:
-		break;
-	}
-
-	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
-	pr_debug("[DSI_%d] Cmd test pattern setup done\n", ctrl->index);
-}
-
-/**
- * test_pattern_enable() - enable test pattern engine
- * @ctrl:          Pointer to the controller host hardware.
- * @enable:        Enable/Disable test pattern engine.
- */
-void dsi_ctrl_hw_cmn_test_pattern_enable(struct dsi_ctrl_hw *ctrl,
-					bool enable)
-{
-	u32 reg = DSI_R32(ctrl, DSI_TEST_PATTERN_GEN_CTRL);
-
-	if (enable)
-		reg |= BIT(0);
-	else
-		reg &= ~BIT(0);
-
-	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
-
-	pr_debug("[DSI_%d] Test pattern enable=%d\n", ctrl->index, enable);
-}
-
-/**
- * trigger_cmd_test_pattern() - trigger a command mode frame update with
- *                              test pattern
- * @ctrl:          Pointer to the controller host hardware.
- * @stream_id:     Stream on which frame update is sent.
- */
-void dsi_ctrl_hw_cmn_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
-					     u32 stream_id)
-{
-	switch (stream_id) {
-	case 0:
-		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER, 0x1);
-		break;
-	case 1:
-		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER, 0x1);
-		break;
-	case 2:
-		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER, 0x1);
-		break;
-	default:
-		break;
-	}
-
-	pr_debug("[DSI_%d] Cmd Test pattern trigger\n", ctrl->index);
-}
-
-void dsi_ctrl_hw_dln0_phy_err(struct dsi_ctrl_hw *ctrl)
-{
-	u32 status = 0;
-	/*
-	 * Clear out any phy errors prior to exiting ULPS
-	 * This fixes certain instances where phy does not exit
-	 * ULPS cleanly. Also, do not print error during such cases.
-	 */
-	status = DSI_R32(ctrl, DSI_DLN0_PHY_ERR);
-	if (status & 0x011111) {
-		DSI_W32(ctrl, DSI_DLN0_PHY_ERR, status);
-		pr_err("%s: phy_err_status = %x\n", __func__, status);
-	}
-}
-
-void dsi_ctrl_hw_cmn_phy_reset_config(struct dsi_ctrl_hw *ctrl,
-		bool enable)
-{
-	u32 reg = 0;
-
-	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
-
-	/* Mask/unmask disable PHY reset bit */
-	if (enable)
-		reg |= BIT(30);
-	else
-		reg &= ~BIT(30);
-
-	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
-}
-
-int dsi_ctrl_hw_cmn_ctrl_reset(struct dsi_ctrl_hw *ctrl,
-		int mask)
-{
-	int rc = 0;
-	u32 data;
-
-	pr_debug("DSI CTRL and PHY reset. ctrl-num = %d %d\n",
-			ctrl->index, mask);
-
-	data = DSI_R32(ctrl, 0x0004);
-	/* Disable DSI video mode */
-	DSI_W32(ctrl, 0x004, (data & ~BIT(1)));
-	wmb(); /* ensure register committed */
-	/* Disable DSI controller */
-	DSI_W32(ctrl, 0x004, (data & ~(BIT(0) | BIT(1))));
-	wmb(); /* ensure register committed */
-	/* "Force On" all dynamic clocks */
-	DSI_W32(ctrl, 0x11c, 0x100a00);
-
-	/* DSI_SW_RESET */
-	DSI_W32(ctrl, 0x118, 0x1);
-	wmb(); /* ensure register is committed */
-	DSI_W32(ctrl, 0x118, 0x0);
-	wmb(); /* ensure register is committed */
-
-	/* Remove "Force On" all dynamic clocks */
-	DSI_W32(ctrl, 0x11c, 0x00);
-	/* Enable DSI controller */
-	DSI_W32(ctrl, 0x004, (data & ~BIT(1)));
-	wmb(); /* ensure register committed */
-
-	return rc;
-}
-
-void dsi_ctrl_hw_cmn_mask_error_intr(struct dsi_ctrl_hw *ctrl, u32 idx, bool en)
-{
-	u32 reg = 0;
-	u32 fifo_status = 0, timeout_status = 0;
-	u32 overflow_clear = BIT(10) | BIT(18) | BIT(22) | BIT(26) | BIT(30);
-	u32 underflow_clear = BIT(19) | BIT(23) | BIT(27) | BIT(31);
-	u32 lp_rx_clear = BIT(4);
-
-	reg = DSI_R32(ctrl, 0x10c);
-
-	/*
-	 * Before unmasking we should clear the corresponding error status bits
-	 * that might have been set while we masked these errors. Since these
-	 * are sticky bits, these errors will trigger the moment we unmask
-	 * the error bits.
-	 */
-	if (idx & BIT(DSI_FIFO_OVERFLOW)) {
-		if (en) {
-			reg |= (0x1f << 16);
-			reg |= BIT(9);
-		} else {
-			reg &= ~(0x1f << 16);
-			reg &= ~BIT(9);
-			fifo_status = DSI_R32(ctrl, 0x00c);
-			DSI_W32(ctrl, 0x00c, fifo_status | overflow_clear);
-		}
-	}
-
-	if (idx & BIT(DSI_FIFO_UNDERFLOW)) {
-		if (en)
-			reg |= (0x1b << 26);
-		else {
-			reg &= ~(0x1b << 26);
-			fifo_status = DSI_R32(ctrl, 0x00c);
-			DSI_W32(ctrl, 0x00c, fifo_status | underflow_clear);
-		}
-	}
-
-	if (idx & BIT(DSI_LP_Rx_TIMEOUT)) {
-		if (en)
-			reg |= (0x7 << 23);
-		else {
-			reg &= ~(0x7 << 23);
-			timeout_status = DSI_R32(ctrl, 0x0c0);
-			DSI_W32(ctrl, 0x0c0, timeout_status | lp_rx_clear);
-		}
-	}
-
-	DSI_W32(ctrl, 0x10c, reg);
-	wmb(); /* ensure error is masked */
-}
-
-void dsi_ctrl_hw_cmn_error_intr_ctrl(struct dsi_ctrl_hw *ctrl, bool en)
-{
-	u32 reg = 0;
-	u32 dsi_total_mask = 0x2222AA02;
-
-	reg = DSI_R32(ctrl, 0x110);
-	reg &= dsi_total_mask;
-
-	if (en)
-		reg |= (BIT(24) | BIT(25));
-	else
-		reg &= ~BIT(25);
-
-	DSI_W32(ctrl, 0x110, reg);
-	wmb(); /* ensure error is masked */
-}
-
-u32 dsi_ctrl_hw_cmn_get_error_mask(struct dsi_ctrl_hw *ctrl)
-{
-	u32 reg = 0;
-
-	reg = DSI_R32(ctrl, 0x10c);
-
-	return reg;
-}
-
-u32 dsi_ctrl_hw_cmn_get_hw_version(struct dsi_ctrl_hw *ctrl)
-{
-	u32 reg = 0;
-
-	reg = DSI_R32(ctrl, 0x0);
-
-	return reg;
-}
-
-int dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl_hw *ctrl)
-{
-	int rc = 0, val = 0;
-	u32 cmd_mode_mdp_busy_mask = BIT(2);
-	u32 const sleep_us = 2 * 1000;
-	u32 const timeout_us = 200 * 1000;
-
-	rc = readl_poll_timeout(ctrl->base + DSI_STATUS, val,
-			!(val & cmd_mode_mdp_busy_mask), sleep_us, timeout_us);
-	if (rc)
-		pr_err("%s: timed out waiting for idle\n", __func__);
-
-	return rc;
-}
-
-void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable)
-{
-	u32 reg = 0;
-
-	reg = DSI_R32(ctrl, DSI_LANE_CTRL);
-	if (enable)
-		reg |= BIT(28);
-	else
-		reg &= ~BIT(28);
-	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
-	wmb(); /* make sure request is set */
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
deleted file mode 100644
index 7794ef8..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_CTRL_REG_H_
-#define _DSI_CTRL_REG_H_
-
-#define DSI_HW_VERSION                             (0x0000)
-#define DSI_CTRL                                   (0x0004)
-#define DSI_STATUS                                 (0x0008)
-#define DSI_FIFO_STATUS                            (0x000C)
-#define DSI_VIDEO_MODE_CTRL                        (0x0010)
-#define DSI_VIDEO_MODE_SYNC_DATATYPE               (0x0014)
-#define DSI_VIDEO_MODE_PIXEL_DATATYPE              (0x0018)
-#define DSI_VIDEO_MODE_BLANKING_DATATYPE           (0x001C)
-#define DSI_VIDEO_MODE_DATA_CTRL                   (0x0020)
-#define DSI_VIDEO_MODE_ACTIVE_H                    (0x0024)
-#define DSI_VIDEO_MODE_ACTIVE_V                    (0x0028)
-#define DSI_VIDEO_MODE_TOTAL                       (0x002C)
-#define DSI_VIDEO_MODE_HSYNC                       (0x0030)
-#define DSI_VIDEO_MODE_VSYNC                       (0x0034)
-#define DSI_VIDEO_MODE_VSYNC_VPOS                  (0x0038)
-#define DSI_COMMAND_MODE_DMA_CTRL                  (0x003C)
-#define DSI_COMMAND_MODE_MDP_CTRL                  (0x0040)
-#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL          (0x0044)
-#define DSI_DMA_CMD_OFFSET                         (0x0048)
-#define DSI_DMA_CMD_LENGTH                         (0x004C)
-#define DSI_DMA_FIFO_CTRL                          (0x0050)
-#define DSI_DMA_NULL_PACKET_DATA                   (0x0054)
-#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL          (0x0058)
-#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL         (0x005C)
-#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL          (0x0060)
-#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL         (0x0064)
-#define DSI_ACK_ERR_STATUS                         (0x0068)
-#define DSI_RDBK_DATA0                             (0x006C)
-#define DSI_RDBK_DATA1                             (0x0070)
-#define DSI_RDBK_DATA2                             (0x0074)
-#define DSI_RDBK_DATA3                             (0x0078)
-#define DSI_RDBK_DATATYPE0                         (0x007C)
-#define DSI_RDBK_DATATYPE1                         (0x0080)
-#define DSI_TRIG_CTRL                              (0x0084)
-#define DSI_EXT_MUX                                (0x0088)
-#define DSI_EXT_MUX_TE_PULSE_DETECT_CTRL           (0x008C)
-#define DSI_CMD_MODE_DMA_SW_TRIGGER                (0x0090)
-#define DSI_CMD_MODE_MDP_SW_TRIGGER                (0x0094)
-#define DSI_CMD_MODE_BTA_SW_TRIGGER                (0x0098)
-#define DSI_RESET_SW_TRIGGER                       (0x009C)
-#define DSI_MISR_CMD_CTRL                          (0x00A0)
-#define DSI_MISR_VIDEO_CTRL                        (0x00A4)
-#define DSI_LANE_STATUS                            (0x00A8)
-#define DSI_LANE_CTRL                              (0x00AC)
-#define DSI_LANE_SWAP_CTRL                         (0x00B0)
-#define DSI_DLN0_PHY_ERR                           (0x00B4)
-#define DSI_LP_TIMER_CTRL                          (0x00B8)
-#define DSI_HS_TIMER_CTRL                          (0x00BC)
-#define DSI_TIMEOUT_STATUS                         (0x00C0)
-#define DSI_CLKOUT_TIMING_CTRL                     (0x00C4)
-#define DSI_EOT_PACKET                             (0x00C8)
-#define DSI_EOT_PACKET_CTRL                        (0x00CC)
-#define DSI_GENERIC_ESC_TX_TRIGGER                 (0x00D0)
-#define DSI_CAM_BIST_CTRL                          (0x00D4)
-#define DSI_CAM_BIST_FRAME_SIZE                    (0x00D8)
-#define DSI_CAM_BIST_BLOCK_SIZE                    (0x00DC)
-#define DSI_CAM_BIST_FRAME_CONFIG                  (0x00E0)
-#define DSI_CAM_BIST_LSFR_CTRL                     (0x00E4)
-#define DSI_CAM_BIST_LSFR_INIT                     (0x00E8)
-#define DSI_CAM_BIST_START                         (0x00EC)
-#define DSI_CAM_BIST_STATUS                        (0x00F0)
-#define DSI_ERR_INT_MASK0                          (0x010C)
-#define DSI_INT_CTRL                               (0x0110)
-#define DSI_IOBIST_CTRL                            (0x0114)
-#define DSI_SOFT_RESET                             (0x0118)
-#define DSI_CLK_CTRL                               (0x011C)
-#define DSI_CLK_STATUS                             (0x0120)
-#define DSI_DEBUG_BUS_CTL                          (0x0124)
-#define DSI_DEBUG_BUS_STATUS                       (0x0128)
-#define DSI_PHY_SW_RESET                           (0x012C)
-#define DSI_AXI2AHB_CTRL                           (0x0130)
-#define DSI_MISR_CMD_MDP0_32BIT                    (0x0134)
-#define DSI_MISR_CMD_MDP1_32BIT                    (0x0138)
-#define DSI_MISR_CMD_DMA_32BIT                     (0x013C)
-#define DSI_MISR_VIDEO_32BIT                       (0x0140)
-#define DSI_LANE_MISR_CTRL                         (0x0144)
-#define DSI_LANE0_MISR                             (0x0148)
-#define DSI_LANE1_MISR                             (0x014C)
-#define DSI_LANE2_MISR                             (0x0150)
-#define DSI_LANE3_MISR                             (0x0154)
-#define DSI_TEST_PATTERN_GEN_CTRL                  (0x015C)
-#define DSI_TEST_PATTERN_GEN_VIDEO_POLY            (0x0160)
-#define DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL        (0x0164)
-#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY  (0x0168)
-#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0     (0x016C)
-#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY  (0x0170)
-#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1     (0x0174)
-#define DSI_TEST_PATTERN_GEN_CMD_DMA_POLY          (0x0178)
-#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL      (0x017C)
-#define DSI_TEST_PATTERN_GEN_VIDEO_ENABLE          (0x0180)
-#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER   (0x0184)
-#define DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER   (0x0188)
-#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2     (0x018C)
-#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY  (0x0190)
-#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY  (0x0190)
-#define DSI_COMMAND_MODE_MDP_IDLE_CTRL             (0x0194)
-#define DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER   (0x0198)
-#define DSI_TPG_MAIN_CONTROL                       (0x019C)
-#define DSI_TPG_MAIN_CONTROL2                      (0x01A0)
-#define DSI_TPG_VIDEO_CONFIG                       (0x01A4)
-#define DSI_TPG_COMPONENT_LIMITS                   (0x01A8)
-#define DSI_TPG_RECTANGLE                          (0x01AC)
-#define DSI_TPG_BLACK_WHITE_PATTERN_FRAMES         (0x01B0)
-#define DSI_TPG_RGB_MAPPING                        (0x01B4)
-#define DSI_COMMAND_MODE_MDP_CTRL2                 (0x01B8)
-#define DSI_COMMAND_MODE_MDP_STREAM2_CTRL          (0x01BC)
-#define DSI_COMMAND_MODE_MDP_STREAM2_TOTAL         (0x01C0)
-#define DSI_MISR_CMD_MDP2_8BIT                     (0x01C4)
-#define DSI_MISR_CMD_MDP2_32BIT                    (0x01C8)
-#define DSI_VBIF_CTRL                              (0x01CC)
-#define DSI_AES_CTRL                               (0x01D0)
-#define DSI_RDBK_DATA_CTRL                         (0x01D4)
-#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2     (0x01D8)
-#define DSI_TPG_DMA_FIFO_STATUS                    (0x01DC)
-#define DSI_TPG_DMA_FIFO_WRITE_TRIGGER             (0x01E0)
-#define DSI_DSI_TIMING_FLUSH                       (0x01E4)
-#define DSI_DSI_TIMING_DB_MODE                     (0x01E8)
-#define DSI_TPG_DMA_FIFO_RESET                     (0x01EC)
-#define DSI_SCRATCH_REGISTER_0                     (0x01F0)
-#define DSI_VERSION                                (0x01F4)
-#define DSI_SCRATCH_REGISTER_1                     (0x01F8)
-#define DSI_SCRATCH_REGISTER_2                     (0x01FC)
-#define DSI_DYNAMIC_REFRESH_CTRL                   (0x0200)
-#define DSI_DYNAMIC_REFRESH_PIPE_DELAY             (0x0204)
-#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2            (0x0208)
-#define DSI_DYNAMIC_REFRESH_PLL_DELAY              (0x020C)
-#define DSI_DYNAMIC_REFRESH_STATUS                 (0x0210)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL0              (0x0214)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL1              (0x0218)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL2              (0x021C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL3              (0x0220)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL4              (0x0224)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL5              (0x0228)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL6              (0x022C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL7              (0x0230)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL8              (0x0234)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL9              (0x0238)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL10             (0x023C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL11             (0x0240)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL12             (0x0244)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL13             (0x0248)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL14             (0x024C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL15             (0x0250)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL16             (0x0254)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL17             (0x0258)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL18             (0x025C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL19             (0x0260)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL20             (0x0264)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL21             (0x0268)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL22             (0x026C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL23             (0x0270)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL24             (0x0274)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL25             (0x0278)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL26             (0x027C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL27             (0x0280)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL28             (0x0284)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL29             (0x0288)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL30             (0x028C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL31             (0x0290)
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR         (0x0294)
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2        (0x0298)
-#define DSI_VIDEO_COMPRESSION_MODE_CTRL            (0x02A0)
-#define DSI_VIDEO_COMPRESSION_MODE_CTRL2           (0x02A4)
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL          (0x02A8)
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL2         (0x02AC)
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL3         (0x02B0)
-#define DSI_COMMAND_MODE_NULL_INSERTION_CTRL       (0x02B4)
-#define DSI_READ_BACK_DISABLE_STATUS               (0x02B8)
-#define DSI_DESKEW_CTRL                            (0x02BC)
-#define DSI_DESKEW_DELAY_CTRL                      (0x02C0)
-#define DSI_DESKEW_SW_TRIGGER                      (0x02C4)
-#define DSI_DEBUG_CTRL                             (0x02C8)
-#define DSI_SECURE_DISPLAY_STATUS                  (0x02CC)
-#define DSI_SECURE_DISPLAY_BLOCK_COMMAND_COLOR     (0x02D0)
-#define DSI_SECURE_DISPLAY_BLOCK_VIDEO_COLOR       (0x02D4)
-#define DSI_LOGICAL_LANE_SWAP_CTRL                 (0x0310)
-
-
-#endif /* _DSI_CTRL_REG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
deleted file mode 100644
index 31a90b5..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ /dev/null
@@ -1,625 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_DEFS_H_
-#define _DSI_DEFS_H_
-
-#include <linux/types.h>
-#include <drm/drm_mipi_dsi.h>
-#include "msm_drv.h"
-
-#define DSI_H_TOTAL(t) (((t)->h_active) + ((t)->h_back_porch) + \
-			((t)->h_sync_width) + ((t)->h_front_porch))
-
-#define DSI_V_TOTAL(t) (((t)->v_active) + ((t)->v_back_porch) + \
-			((t)->v_sync_width) + ((t)->v_front_porch))
-
-#define DSI_H_TOTAL_DSC(t) \
-	({\
-		u64 value;\
-		if ((t)->dsc_enabled && (t)->dsc)\
-			value = (t)->dsc->pclk_per_line;\
-		else\
-			value = (t)->h_active;\
-		value = value + (t)->h_back_porch + (t)->h_sync_width +\
-			(t)->h_front_porch;\
-		value;\
-	})
-
-#define DSI_DEBUG_NAME_LEN		32
-#define display_for_each_ctrl(index, display) \
-	for (index = 0; (index < (display)->ctrl_count) &&\
-			(index < MAX_DSI_CTRLS_PER_DISPLAY); index++)
-/**
- * enum dsi_pixel_format - DSI pixel formats
- * @DSI_PIXEL_FORMAT_RGB565:
- * @DSI_PIXEL_FORMAT_RGB666:
- * @DSI_PIXEL_FORMAT_RGB666_LOOSE:
- * @DSI_PIXEL_FORMAT_RGB888:
- * @DSI_PIXEL_FORMAT_RGB111:
- * @DSI_PIXEL_FORMAT_RGB332:
- * @DSI_PIXEL_FORMAT_RGB444:
- * @DSI_PIXEL_FORMAT_MAX:
- */
-enum dsi_pixel_format {
-	DSI_PIXEL_FORMAT_RGB565 = 0,
-	DSI_PIXEL_FORMAT_RGB666,
-	DSI_PIXEL_FORMAT_RGB666_LOOSE,
-	DSI_PIXEL_FORMAT_RGB888,
-	DSI_PIXEL_FORMAT_RGB111,
-	DSI_PIXEL_FORMAT_RGB332,
-	DSI_PIXEL_FORMAT_RGB444,
-	DSI_PIXEL_FORMAT_MAX
-};
-
-/**
- * enum dsi_op_mode - dsi operation mode
- * @DSI_OP_VIDEO_MODE: DSI video mode operation
- * @DSI_OP_CMD_MODE:   DSI Command mode operation
- * @DSI_OP_MODE_MAX:
- */
-enum dsi_op_mode {
-	DSI_OP_VIDEO_MODE = 0,
-	DSI_OP_CMD_MODE,
-	DSI_OP_MODE_MAX
-};
-
-/**
- * enum dsi_mode_flags - flags to signal other drm components via private flags
- * @DSI_MODE_FLAG_SEAMLESS:	Seamless transition requested by user
- * @DSI_MODE_FLAG_DFPS:		Seamless transition is DynamicFPS
- * @DSI_MODE_FLAG_VBLANK_PRE_MODESET:	Transition needs VBLANK before Modeset
- * @DSI_MODE_FLAG_DMS: Seamless transition is dynamic mode switch
- * @DSI_MODE_FLAG_VRR: Seamless transition is DynamicFPS.
- *                     New timing values are sent from DAL.
- */
-enum dsi_mode_flags {
-	DSI_MODE_FLAG_SEAMLESS			= BIT(0),
-	DSI_MODE_FLAG_DFPS			= BIT(1),
-	DSI_MODE_FLAG_VBLANK_PRE_MODESET	= BIT(2),
-	DSI_MODE_FLAG_DMS			= BIT(3),
-	DSI_MODE_FLAG_VRR			= BIT(4),
-};
-
-/**
- * enum dsi_logical_lane - dsi logical lanes
- * @DSI_LOGICAL_LANE_0:     Logical lane 0
- * @DSI_LOGICAL_LANE_1:     Logical lane 1
- * @DSI_LOGICAL_LANE_2:     Logical lane 2
- * @DSI_LOGICAL_LANE_3:     Logical lane 3
- * @DSI_LOGICAL_CLOCK_LANE: Clock lane
- * @DSI_LANE_MAX:           Maximum lanes supported
- */
-enum dsi_logical_lane {
-	DSI_LOGICAL_LANE_0 = 0,
-	DSI_LOGICAL_LANE_1,
-	DSI_LOGICAL_LANE_2,
-	DSI_LOGICAL_LANE_3,
-	DSI_LOGICAL_CLOCK_LANE,
-	DSI_LANE_MAX
-};
-
-/**
- * enum dsi_data_lanes - BIT map for DSI data lanes
- * This is used to identify the active DSI data lanes for
- * various operations like DSI data lane enable/ULPS/clamp
- * configurations.
- * @DSI_DATA_LANE_0: BIT(DSI_LOGICAL_LANE_0)
- * @DSI_DATA_LANE_1: BIT(DSI_LOGICAL_LANE_1)
- * @DSI_DATA_LANE_2: BIT(DSI_LOGICAL_LANE_2)
- * @DSI_DATA_LANE_3: BIT(DSI_LOGICAL_LANE_3)
- * @DSI_CLOCK_LANE:  BIT(DSI_LOGICAL_CLOCK_LANE)
- */
-enum dsi_data_lanes {
-	DSI_DATA_LANE_0 = BIT(DSI_LOGICAL_LANE_0),
-	DSI_DATA_LANE_1 = BIT(DSI_LOGICAL_LANE_1),
-	DSI_DATA_LANE_2 = BIT(DSI_LOGICAL_LANE_2),
-	DSI_DATA_LANE_3 = BIT(DSI_LOGICAL_LANE_3),
-	DSI_CLOCK_LANE  = BIT(DSI_LOGICAL_CLOCK_LANE)
-};
-
-/**
- * enum dsi_phy_data_lanes - dsi physical lanes
- * used for DSI logical to physical lane mapping
- * @DSI_PHYSICAL_LANE_INVALID: Physical lane valid/invalid
- * @DSI_PHYSICAL_LANE_0: Physical lane 0
- * @DSI_PHYSICAL_LANE_1: Physical lane 1
- * @DSI_PHYSICAL_LANE_2: Physical lane 2
- * @DSI_PHYSICAL_LANE_3: Physical lane 3
- */
-enum dsi_phy_data_lanes {
-	DSI_PHYSICAL_LANE_INVALID = 0,
-	DSI_PHYSICAL_LANE_0 = BIT(0),
-	DSI_PHYSICAL_LANE_1 = BIT(1),
-	DSI_PHYSICAL_LANE_2 = BIT(2),
-	DSI_PHYSICAL_LANE_3  = BIT(3)
-};
-
-enum dsi_lane_map_type_v1 {
-	DSI_LANE_MAP_0123,
-	DSI_LANE_MAP_3012,
-	DSI_LANE_MAP_2301,
-	DSI_LANE_MAP_1230,
-	DSI_LANE_MAP_0321,
-	DSI_LANE_MAP_1032,
-	DSI_LANE_MAP_2103,
-	DSI_LANE_MAP_3210,
-};
-
-/**
- * lane_map: DSI logical <-> physical lane mapping
- * lane_map_v1: Lane mapping for DSI controllers < v2.0
- * lane_map_v2: Lane mapping for DSI controllers >= 2.0
- */
-struct dsi_lane_map {
-	enum dsi_lane_map_type_v1 lane_map_v1;
-	u8 lane_map_v2[DSI_LANE_MAX - 1];
-};
-
-/**
- * enum dsi_trigger_type - dsi trigger type
- * @DSI_TRIGGER_NONE:     No trigger.
- * @DSI_TRIGGER_TE:       TE trigger.
- * @DSI_TRIGGER_SEOF:     Start or End of frame.
- * @DSI_TRIGGER_SW:       Software trigger.
- * @DSI_TRIGGER_SW_SEOF:  Software trigger and start/end of frame.
- * @DSI_TRIGGER_SW_TE:    Software and TE triggers.
- * @DSI_TRIGGER_MAX:      Max trigger values.
- */
-enum dsi_trigger_type {
-	DSI_TRIGGER_NONE = 0,
-	DSI_TRIGGER_TE,
-	DSI_TRIGGER_SEOF,
-	DSI_TRIGGER_SW,
-	DSI_TRIGGER_SW_SEOF,
-	DSI_TRIGGER_SW_TE,
-	DSI_TRIGGER_MAX
-};
-
-/**
- * enum dsi_color_swap_mode - color swap mode
- * @DSI_COLOR_SWAP_RGB:
- * @DSI_COLOR_SWAP_RBG:
- * @DSI_COLOR_SWAP_BGR:
- * @DSI_COLOR_SWAP_BRG:
- * @DSI_COLOR_SWAP_GRB:
- * @DSI_COLOR_SWAP_GBR:
- */
-enum dsi_color_swap_mode {
-	DSI_COLOR_SWAP_RGB = 0,
-	DSI_COLOR_SWAP_RBG,
-	DSI_COLOR_SWAP_BGR,
-	DSI_COLOR_SWAP_BRG,
-	DSI_COLOR_SWAP_GRB,
-	DSI_COLOR_SWAP_GBR
-};
-
-/**
- * enum dsi_dfps_type - Dynamic FPS support type
- * @DSI_DFPS_NONE:           Dynamic FPS is not supported.
- * @DSI_DFPS_SUSPEND_RESUME:
- * @DSI_DFPS_IMMEDIATE_CLK:
- * @DSI_DFPS_IMMEDIATE_HFP:
- * @DSI_DFPS_IMMEDIATE_VFP:
- * @DSI_DPFS_MAX:
- */
-enum dsi_dfps_type {
-	DSI_DFPS_NONE = 0,
-	DSI_DFPS_SUSPEND_RESUME,
-	DSI_DFPS_IMMEDIATE_CLK,
-	DSI_DFPS_IMMEDIATE_HFP,
-	DSI_DFPS_IMMEDIATE_VFP,
-	DSI_DFPS_MAX
-};
-
-/**
- * enum dsi_cmd_set_type  - DSI command set type
- * @DSI_CMD_SET_PRE_ON:	                   Panel pre on
- * @DSI_CMD_SET_ON:                        Panel on
- * @DSI_CMD_SET_POST_ON:                   Panel post on
- * @DSI_CMD_SET_PRE_OFF:                   Panel pre off
- * @DSI_CMD_SET_OFF:                       Panel off
- * @DSI_CMD_SET_POST_OFF:                  Panel post off
- * @DSI_CMD_SET_PRE_RES_SWITCH:            Pre resolution switch
- * @DSI_CMD_SET_RES_SWITCH:                Resolution switch
- * @DSI_CMD_SET_POST_RES_SWITCH:           Post resolution switch
- * @DSI_CMD_SET_CMD_TO_VID_SWITCH:         Cmd to video mode switch
- * @DSI_CMD_SET_POST_CMD_TO_VID_SWITCH:    Post cmd to vid switch
- * @DSI_CMD_SET_VID_TO_CMD_SWITCH:         Video to cmd mode switch
- * @DSI_CMD_SET_POST_VID_TO_CMD_SWITCH:    Post vid to cmd switch
- * @DSI_CMD_SET_PANEL_STATUS:              Panel status
- * @DSI_CMD_SET_LP1:                       Low power mode 1
- * @DSI_CMD_SET_LP2:                       Low power mode 2
- * @DSI_CMD_SET_NOLP:                      Low power mode disable
- * @DSI_CMD_SET_PPS:                       DSC PPS command
- * @DSI_CMD_SET_ROI:			   Panel ROI update
- * @DSI_CMD_SET_TIMING_SWITCH:             Timing switch
- * @DSI_CMD_SET_POST_TIMING_SWITCH:        Post timing switch
- * @DSI_CMD_SET_QSYNC_ON                   Enable qsync mode
- * @DSI_CMD_SET_QSYNC_OFF                  Disable qsync mode
- * @DSI_CMD_SET_MAX
- */
-enum dsi_cmd_set_type {
-	DSI_CMD_SET_PRE_ON = 0,
-	DSI_CMD_SET_ON,
-	DSI_CMD_SET_POST_ON,
-	DSI_CMD_SET_PRE_OFF,
-	DSI_CMD_SET_OFF,
-	DSI_CMD_SET_POST_OFF,
-	DSI_CMD_SET_PRE_RES_SWITCH,
-	DSI_CMD_SET_RES_SWITCH,
-	DSI_CMD_SET_POST_RES_SWITCH,
-	DSI_CMD_SET_CMD_TO_VID_SWITCH,
-	DSI_CMD_SET_POST_CMD_TO_VID_SWITCH,
-	DSI_CMD_SET_VID_TO_CMD_SWITCH,
-	DSI_CMD_SET_POST_VID_TO_CMD_SWITCH,
-	DSI_CMD_SET_PANEL_STATUS,
-	DSI_CMD_SET_LP1,
-	DSI_CMD_SET_LP2,
-	DSI_CMD_SET_NOLP,
-	DSI_CMD_SET_PPS,
-	DSI_CMD_SET_ROI,
-	DSI_CMD_SET_TIMING_SWITCH,
-	DSI_CMD_SET_POST_TIMING_SWITCH,
-	DSI_CMD_SET_QSYNC_ON,
-	DSI_CMD_SET_QSYNC_OFF,
-	DSI_CMD_SET_MAX
-};
-
-/**
- * enum dsi_cmd_set_state - command set state
- * @DSI_CMD_SET_STATE_LP:   dsi low power mode
- * @DSI_CMD_SET_STATE_HS:   dsi high speed mode
- * @DSI_CMD_SET_STATE_MAX
- */
-enum dsi_cmd_set_state {
-	DSI_CMD_SET_STATE_LP = 0,
-	DSI_CMD_SET_STATE_HS,
-	DSI_CMD_SET_STATE_MAX
-};
-
-/**
- * enum dsi_clk_gate_type - Type of clock to be gated.
- * @PIXEL_CLK:  DSI pixel clock.
- * @BYTE_CLK:   DSI byte clock.
- * @DSI_PHY:    DSI PHY.
- */
-enum dsi_clk_gate_type {
-	PIXEL_CLK = 1,
-	BYTE_CLK = 2,
-	DSI_PHY = 4,
-};
-
-/**
- * enum dsi_phy_type - DSI phy types
- * @DSI_PHY_TYPE_DPHY:
- * @DSI_PHY_TYPE_CPHY:
- */
-enum dsi_phy_type {
-	DSI_PHY_TYPE_DPHY,
-	DSI_PHY_TYPE_CPHY
-};
-
-/**
- * enum dsi_te_mode - dsi te source
- * @DSI_TE_ON_DATA_LINK:    TE read from DSI link
- * @DSI_TE_ON_EXT_PIN:      TE signal on an external GPIO
- */
-enum dsi_te_mode {
-	DSI_TE_ON_DATA_LINK = 0,
-	DSI_TE_ON_EXT_PIN,
-};
-
-/**
- * enum dsi_video_traffic_mode - video mode pixel transmission type
- * @DSI_VIDEO_TRAFFIC_SYNC_PULSES:       Non-burst mode with sync pulses.
- * @DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS: Non-burst mode with sync start events.
- * @DSI_VIDEO_TRAFFIC_BURST_MODE:        Burst mode using sync start events.
- */
-enum dsi_video_traffic_mode {
-	DSI_VIDEO_TRAFFIC_SYNC_PULSES = 0,
-	DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS,
-	DSI_VIDEO_TRAFFIC_BURST_MODE,
-};
-
-/**
- * struct dsi_cmd_desc - description of a dsi command
- * @msg:		dsi mipi msg packet
- * @last_command:   indicates whether the cmd is the last one to send
- * @post_wait_ms:   post wait duration
- */
-struct dsi_cmd_desc {
-	struct mipi_dsi_msg msg;
-	bool last_command;
-	u32  post_wait_ms;
-};
-
-/**
- * struct dsi_panel_cmd_set - command set of the panel
- * @type:      type of the command
- * @state:     state of the command
- * @count:     number of cmds
- * @ctrl_idx:  index of the dsi control
- * @cmds:      arry of cmds
- */
-struct dsi_panel_cmd_set {
-	enum dsi_cmd_set_type type;
-	enum dsi_cmd_set_state state;
-	u32 count;
-	u32 ctrl_idx;
-	struct dsi_cmd_desc *cmds;
-};
-
-/**
- * struct dsi_mode_info - video mode information dsi frame
- * @h_active:         Active width of one frame in pixels.
- * @h_back_porch:     Horizontal back porch in pixels.
- * @h_sync_width:     HSYNC width in pixels.
- * @h_front_porch:    Horizontal fron porch in pixels.
- * @h_skew:
- * @h_sync_polarity:  Polarity of HSYNC (false is active low).
- * @v_active:         Active height of one frame in lines.
- * @v_back_porch:     Vertical back porch in lines.
- * @v_sync_width:     VSYNC width in lines.
- * @v_front_porch:    Vertical front porch in lines.
- * @v_sync_polarity:  Polarity of VSYNC (false is active low).
- * @refresh_rate:     Refresh rate in Hz.
- * @clk_rate_hz:      DSI bit clock rate per lane in Hz.
- * @mdp_transfer_time_us:   Specifies the mdp transfer time for command mode
- *                    panels in microseconds.
- * @dsc_enabled:      DSC compression enabled.
- * @dsc:              DSC compression configuration.
- * @roi_caps:         Panel ROI capabilities.
- */
-struct dsi_mode_info {
-	u32 h_active;
-	u32 h_back_porch;
-	u32 h_sync_width;
-	u32 h_front_porch;
-	u32 h_skew;
-	bool h_sync_polarity;
-
-	u32 v_active;
-	u32 v_back_porch;
-	u32 v_sync_width;
-	u32 v_front_porch;
-	bool v_sync_polarity;
-
-	u32 refresh_rate;
-	u64 clk_rate_hz;
-	u32 mdp_transfer_time_us;
-	bool dsc_enabled;
-	struct msm_display_dsc_info *dsc;
-	struct msm_roi_caps roi_caps;
-};
-
-/**
- * struct dsi_host_common_cfg - Host configuration common to video and cmd mode
- * @dst_format:          Destination pixel format.
- * @data_lanes:          Physical data lanes to be enabled.
- * @en_crc_check:        Enable CRC checks.
- * @en_ecc_check:        Enable ECC checks.
- * @te_mode:             Source for TE signalling.
- * @mdp_cmd_trigger:     MDP frame update trigger for command mode.
- * @dma_cmd_trigger:     Command DMA trigger.
- * @cmd_trigger_stream:  Command mode stream to trigger.
- * @swap_mode:           DSI color swap mode.
- * @bit_swap_read:       Is red color bit swapped.
- * @bit_swap_green:      Is green color bit swapped.
- * @bit_swap_blue:       Is blue color bit swapped.
- * @t_clk_post:          Number of byte clock cycles that the transmitter shall
- *                       continue sending after last data lane has transitioned
- *                       to LP mode.
- * @t_clk_pre:           Number of byte clock cycles that the high spped clock
- *                       shall be driven prior to data lane transitions from LP
- *                       to HS mode.
- * @ignore_rx_eot:       Ignore Rx EOT packets if set to true.
- * @append_tx_eot:       Append EOT packets for forward transmissions if set to
- *                       true.
- * @ext_bridge_mode:     External bridge is connected.
- * @force_hs_clk_lane:   Send continuous clock to the panel.
- */
-struct dsi_host_common_cfg {
-	enum dsi_pixel_format dst_format;
-	enum dsi_data_lanes data_lanes;
-	bool en_crc_check;
-	bool en_ecc_check;
-	enum dsi_te_mode te_mode;
-	enum dsi_trigger_type mdp_cmd_trigger;
-	enum dsi_trigger_type dma_cmd_trigger;
-	u32 cmd_trigger_stream;
-	enum dsi_color_swap_mode swap_mode;
-	bool bit_swap_red;
-	bool bit_swap_green;
-	bool bit_swap_blue;
-	u32 t_clk_post;
-	u32 t_clk_pre;
-	bool ignore_rx_eot;
-	bool append_tx_eot;
-	bool ext_bridge_mode;
-	bool force_hs_clk_lane;
-};
-
-/**
- * struct dsi_video_engine_cfg - DSI video engine configuration
- * @last_line_interleave_en:   Allow command mode op interleaved on last line of
- *                             video stream.
- * @pulse_mode_hsa_he:         Send HSA and HE following VS/VE packet if set to
- *                             true.
- * @hfp_lp11_en:               Enter low power stop mode (LP-11) during HFP.
- * @hbp_lp11_en:               Enter low power stop mode (LP-11) during HBP.
- * @hsa_lp11_en:               Enter low power stop mode (LP-11) during HSA.
- * @eof_bllp_lp11_en:          Enter low power stop mode (LP-11) during BLLP of
- *                             last line of a frame.
- * @bllp_lp11_en:              Enter low power stop mode (LP-11) during BLLP.
- * @traffic_mode:              Traffic mode for video stream.
- * @vc_id:                     Virtual channel identifier.
- * @dma_sched_line:         Line number, after vactive end, at which command dma
- *			       needs to be triggered.
- */
-struct dsi_video_engine_cfg {
-	bool last_line_interleave_en;
-	bool pulse_mode_hsa_he;
-	bool hfp_lp11_en;
-	bool hbp_lp11_en;
-	bool hsa_lp11_en;
-	bool eof_bllp_lp11_en;
-	bool bllp_lp11_en;
-	bool force_clk_lane_hs;
-	enum dsi_video_traffic_mode traffic_mode;
-	u32 vc_id;
-	u32 dma_sched_line;
-};
-
-/**
- * struct dsi_cmd_engine_cfg - DSI command engine configuration
- * @max_cmd_packets_interleave     Maximum number of command mode RGB packets to
- *                                 send with in one horizontal blanking period
- *                                 of the video mode frame.
- * @wr_mem_start:                  DCS command for write_memory_start.
- * @wr_mem_continue:               DCS command for write_memory_continue.
- * @insert_dcs_command:            Insert DCS command as first byte of payload
- *                                 of the pixel data.
- */
-struct dsi_cmd_engine_cfg {
-	u32 max_cmd_packets_interleave;
-	u32 wr_mem_start;
-	u32 wr_mem_continue;
-	bool insert_dcs_command;
-};
-
-/**
- * struct dsi_host_config - DSI host configuration parameters.
- * @panel_mode:            Operation mode for panel (video or cmd mode).
- * @common_config:         Host configuration common to both Video and Cmd mode.
- * @video_engine:          Video engine configuration if panel is in video mode.
- * @cmd_engine:            Cmd engine configuration if panel is in cmd mode.
- * @esc_clk_rate_khz:      Esc clock frequency in Hz.
- * @bit_clk_rate_hz:       Bit clock frequency in Hz.
- * @bit_clk_rate_hz_override: DSI bit clk rate override from dt/sysfs.
- * @video_timing:          Video timing information of a frame.
- * @lane_map:              Mapping between logical and physical lanes.
- */
-struct dsi_host_config {
-	enum dsi_op_mode panel_mode;
-	struct dsi_host_common_cfg common_config;
-	union {
-		struct dsi_video_engine_cfg video_engine;
-		struct dsi_cmd_engine_cfg cmd_engine;
-	} u;
-	u64 esc_clk_rate_hz;
-	u64 bit_clk_rate_hz;
-	u64 bit_clk_rate_hz_override;
-	struct dsi_mode_info video_timing;
-	struct dsi_lane_map lane_map;
-};
-
-/**
- * struct dsi_display_mode_priv_info - private mode info that will be attached
- *                             with each drm mode
- * @cmd_sets:		  Command sets of the mode
- * @phy_timing_val:       Phy timing values
- * @phy_timing_len:       Phy timing array length
- * @panel_jitter:         Panel jitter for RSC backoff
- * @panel_prefill_lines:  Panel prefill lines for RSC
- * @mdp_transfer_time_us:   Specifies the mdp transfer time for command mode
- *                          panels in microseconds.
- * @clk_rate_hz:          DSI bit clock per lane in hz.
- * @topology:             Topology selected for the panel
- * @dsc:                  DSC compression info
- * @dsc_enabled:          DSC compression enabled
- * @roi_caps:		  Panel ROI capabilities
- */
-struct dsi_display_mode_priv_info {
-	struct dsi_panel_cmd_set cmd_sets[DSI_CMD_SET_MAX];
-
-	u32 *phy_timing_val;
-	u32 phy_timing_len;
-
-	u32 panel_jitter_numer;
-	u32 panel_jitter_denom;
-	u32 panel_prefill_lines;
-	u32 mdp_transfer_time_us;
-	u64 clk_rate_hz;
-
-	struct msm_display_topology topology;
-	struct msm_display_dsc_info dsc;
-	bool dsc_enabled;
-	struct msm_roi_caps roi_caps;
-};
-
-/**
- * struct dsi_display_mode - specifies mode for dsi display
- * @timing:         Timing parameters for the panel.
- * @pixel_clk_khz:  Pixel clock in Khz.
- * @dsi_mode_flags: Flags to signal other drm components via private flags
- * @priv_info:      Mode private info
- */
-struct dsi_display_mode {
-	struct dsi_mode_info timing;
-	u32 pixel_clk_khz;
-	u32 dsi_mode_flags;
-	struct dsi_display_mode_priv_info *priv_info;
-};
-
-/**
- * struct dsi_rect - dsi rectangle representation
- * Note: sde_rect is also using u16, this must be maintained for memcpy
- */
-struct dsi_rect {
-	u16 x;
-	u16 y;
-	u16 w;
-	u16 h;
-};
-
-/**
- * dsi_rect_intersect - intersect two rectangles
- * @r1: first rectangle
- * @r2: scissor rectangle
- * @result: result rectangle, all 0's on no intersection found
- */
-void dsi_rect_intersect(const struct dsi_rect *r1,
-		const struct dsi_rect *r2,
-		struct dsi_rect *result);
-
-/**
- * dsi_rect_is_equal - compares two rects
- * @r1: rect value to compare
- * @r2: rect value to compare
- *
- * Returns true if the rects are same
- */
-static inline bool dsi_rect_is_equal(struct dsi_rect *r1,
-		struct dsi_rect *r2)
-{
-	return r1->x == r2->x && r1->y == r2->y && r1->w == r2->w &&
-			r1->h == r2->h;
-}
-
-struct dsi_event_cb_info {
-	uint32_t event_idx;
-	void *event_usr_ptr;
-
-	int (*event_cb)(void *event_usr_ptr,
-		uint32_t event_idx, uint32_t instance_idx,
-		uint32_t data0, uint32_t data1,
-		uint32_t data2, uint32_t data3);
-};
-
-/**
- * enum dsi_error_status - various dsi errors
- * @DSI_FIFO_OVERFLOW:     DSI FIFO Overflow error
- * @DSI_FIFO_UNDERFLOW:    DSI FIFO Underflow error
- * @DSI_LP_Rx_TIMEOUT:     DSI LP/RX Timeout error
- */
-enum dsi_error_status {
-	DSI_FIFO_OVERFLOW = 1,
-	DSI_FIFO_UNDERFLOW,
-	DSI_LP_Rx_TIMEOUT,
-	DSI_ERR_INTR_ALL,
-};
-
-#endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
deleted file mode 100644
index 0c1bfaa..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ /dev/null
@@ -1,7014 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"msm-dsi-display:[%s] " fmt, __func__
-
-#include <linux/list.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/err.h>
-
-#include "msm_drv.h"
-#include "sde_connector.h"
-#include "msm_mmu.h"
-#include "dsi_display.h"
-#include "dsi_panel.h"
-#include "dsi_ctrl.h"
-#include "dsi_ctrl_hw.h"
-#include "dsi_drm.h"
-#include "dsi_clk.h"
-#include "dsi_pwr.h"
-#include "sde_dbg.h"
-#include "dsi_parser.h"
-
-#define to_dsi_display(x) container_of(x, struct dsi_display, host)
-#define INT_BASE_10 10
-#define NO_OVERRIDE -1
-
-#define MISR_BUFF_SIZE	256
-#define ESD_MODE_STRING_MAX_LEN 256
-
-#define MAX_NAME_SIZE	64
-
-#define DSI_CLOCK_BITRATE_RADIX 10
-#define MAX_TE_SOURCE_ID  2
-
-static char dsi_display_primary[MAX_CMDLINE_PARAM_LEN];
-static char dsi_display_secondary[MAX_CMDLINE_PARAM_LEN];
-static struct dsi_display_boot_param boot_displays[MAX_DSI_ACTIVE_DISPLAY] = {
-	{.boot_param = dsi_display_primary},
-	{.boot_param = dsi_display_secondary},
-};
-
-static const struct of_device_id dsi_display_dt_match[] = {
-	{.compatible = "qcom,dsi-display"},
-	{}
-};
-
-static void dsi_display_mask_ctrl_error_interrupts(struct dsi_display *display,
-			u32 mask, bool enable)
-{
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	if (!display)
-		return;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl)
-			continue;
-		dsi_ctrl_mask_error_status_interrupts(ctrl->ctrl, mask, enable);
-	}
-}
-
-static int dsi_display_config_clk_gating(struct dsi_display *display,
-					bool enable)
-{
-	int rc = 0, i = 0;
-	struct dsi_display_ctrl *mctrl, *ctrl;
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mctrl = &display->ctrl[display->clk_master_idx];
-	if (!mctrl) {
-		pr_err("Invalid controller\n");
-		return -EINVAL;
-	}
-
-	rc = dsi_ctrl_config_clk_gating(mctrl->ctrl, enable, PIXEL_CLK |
-							DSI_PHY);
-	if (rc) {
-		pr_err("[%s] failed to %s clk gating, rc=%d\n",
-				display->name, enable ? "enable" : "disable",
-				rc);
-		return rc;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == mctrl))
-			continue;
-		/**
-		 * In Split DSI usecase we should not enable clock gating on
-		 * DSI PHY1 to ensure no display atrifacts are seen.
-		 */
-		rc = dsi_ctrl_config_clk_gating(ctrl->ctrl, enable, PIXEL_CLK);
-		if (rc) {
-			pr_err("[%s] failed to %s pixel clk gating, rc=%d\n",
-				display->name, enable ? "enable" : "disable",
-				rc);
-			return rc;
-		}
-	}
-
-	return 0;
-}
-
-static void dsi_display_set_ctrl_esd_check_flag(struct dsi_display *display,
-			bool enable)
-{
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	if (!display)
-		return;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl)
-			continue;
-		ctrl->ctrl->esd_check_underway = enable;
-	}
-}
-
-static void dsi_display_ctrl_irq_update(struct dsi_display *display, bool en)
-{
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	if (!display)
-		return;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl)
-			continue;
-		dsi_ctrl_irq_update(ctrl->ctrl, en);
-	}
-}
-
-void dsi_rect_intersect(const struct dsi_rect *r1,
-		const struct dsi_rect *r2,
-		struct dsi_rect *result)
-{
-	int l, t, r, b;
-
-	if (!r1 || !r2 || !result)
-		return;
-
-	l = max(r1->x, r2->x);
-	t = max(r1->y, r2->y);
-	r = min((r1->x + r1->w), (r2->x + r2->w));
-	b = min((r1->y + r1->h), (r2->y + r2->h));
-
-	if (r <= l || b <= t) {
-		memset(result, 0, sizeof(*result));
-	} else {
-		result->x = l;
-		result->y = t;
-		result->w = r - l;
-		result->h = b - t;
-	}
-}
-
-int dsi_display_set_backlight(struct drm_connector *connector,
-		void *display, u32 bl_lvl)
-{
-	struct dsi_display *dsi_display = display;
-	struct dsi_panel *panel;
-	u32 bl_scale, bl_scale_sv;
-	u64 bl_temp;
-	int rc = 0;
-
-	if (dsi_display == NULL || dsi_display->panel == NULL)
-		return -EINVAL;
-
-	panel = dsi_display->panel;
-
-	mutex_lock(&panel->panel_lock);
-	if (!dsi_panel_initialized(panel)) {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	panel->bl_config.bl_level = bl_lvl;
-
-	/* scale backlight */
-	bl_scale = panel->bl_config.bl_scale;
-	bl_temp = bl_lvl * bl_scale / MAX_BL_SCALE_LEVEL;
-
-	bl_scale_sv = panel->bl_config.bl_scale_sv;
-	bl_temp = (u32)bl_temp * bl_scale_sv / MAX_SV_BL_SCALE_LEVEL;
-
-	pr_debug("bl_scale = %u, bl_scale_sv = %u, bl_lvl = %u\n",
-		bl_scale, bl_scale_sv, (u32)bl_temp);
-	rc = dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
-			DSI_CORE_CLK, DSI_CLK_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
-		       dsi_display->name, rc);
-		goto error;
-	}
-
-	rc = dsi_panel_set_backlight(panel, (u32)bl_temp);
-	if (rc)
-		pr_err("unable to set backlight\n");
-
-	rc = dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
-			DSI_CORE_CLK, DSI_CLK_OFF);
-	if (rc) {
-		pr_err("[%s] failed to disable DSI core clocks, rc=%d\n",
-		       dsi_display->name, rc);
-		goto error;
-	}
-
-error:
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-static int dsi_display_cmd_engine_enable(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-	mutex_lock(&m_ctrl->ctrl->ctrl_lock);
-
-	if (display->cmd_engine_refcount > 0) {
-		display->cmd_engine_refcount++;
-		goto done;
-	}
-
-	rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-		       display->name, rc);
-		goto done;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
-						   DSI_CTRL_ENGINE_ON);
-		if (rc) {
-			pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-			       display->name, rc);
-			goto error_disable_master;
-		}
-	}
-
-	display->cmd_engine_refcount++;
-	goto done;
-error_disable_master:
-	(void)dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
-done:
-	mutex_unlock(&m_ctrl->ctrl->ctrl_lock);
-	return rc;
-}
-
-static int dsi_display_cmd_engine_disable(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-	mutex_lock(&m_ctrl->ctrl->ctrl_lock);
-
-	if (display->cmd_engine_refcount == 0) {
-		pr_err("[%s] Invalid refcount\n", display->name);
-		goto done;
-	} else if (display->cmd_engine_refcount > 1) {
-		display->cmd_engine_refcount--;
-		goto done;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
-						   DSI_CTRL_ENGINE_OFF);
-		if (rc)
-			pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-			       display->name, rc);
-	}
-
-	rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
-	if (rc) {
-		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-error:
-	display->cmd_engine_refcount = 0;
-done:
-	mutex_unlock(&m_ctrl->ctrl->ctrl_lock);
-	return rc;
-}
-
-static void dsi_display_aspace_cb_locked(void *cb_data, bool is_detach)
-{
-	struct dsi_display *display;
-	struct dsi_display_ctrl *display_ctrl;
-	int rc, cnt;
-
-	if (!cb_data) {
-		pr_err("aspace cb called with invalid cb_data\n");
-		return;
-	}
-	display = (struct dsi_display *)cb_data;
-
-	/*
-	 * acquire panel_lock to make sure no commands are in-progress
-	 * while detaching the non-secure context banks
-	 */
-	dsi_panel_acquire_panel_lock(display->panel);
-
-	if (is_detach) {
-		/* invalidate the stored iova */
-		display->cmd_buffer_iova = 0;
-
-		/* return the virtual address mapping */
-		msm_gem_put_vaddr(display->tx_cmd_buf);
-		msm_gem_vunmap(display->tx_cmd_buf, OBJ_LOCK_NORMAL);
-
-	} else {
-		rc = msm_gem_get_iova(display->tx_cmd_buf,
-				display->aspace, &(display->cmd_buffer_iova));
-		if (rc) {
-			pr_err("failed to get the iova rc %d\n", rc);
-			goto end;
-		}
-
-		display->vaddr =
-			(void *) msm_gem_get_vaddr(display->tx_cmd_buf);
-
-		if (IS_ERR_OR_NULL(display->vaddr)) {
-			pr_err("failed to get va rc %d\n", rc);
-			goto end;
-		}
-	}
-
-	display_for_each_ctrl(cnt, display) {
-		display_ctrl = &display->ctrl[cnt];
-		display_ctrl->ctrl->cmd_buffer_size = display->cmd_buffer_size;
-		display_ctrl->ctrl->cmd_buffer_iova = display->cmd_buffer_iova;
-		display_ctrl->ctrl->vaddr = display->vaddr;
-		display_ctrl->ctrl->secure_mode = is_detach;
-	}
-
-end:
-	/* release panel_lock */
-	dsi_panel_release_panel_lock(display->panel);
-}
-
-static irqreturn_t dsi_display_panel_te_irq_handler(int irq, void *data)
-{
-	struct dsi_display *display = (struct dsi_display *)data;
-
-	/*
-	 * This irq handler is used for sole purpose of identifying
-	 * ESD attacks on panel and we can safely assume IRQ_HANDLED
-	 * in case of display not being initialized yet
-	 */
-	if (!display)
-		return IRQ_HANDLED;
-
-	SDE_EVT32(SDE_EVTLOG_FUNC_CASE1);
-	complete_all(&display->esd_te_gate);
-	return IRQ_HANDLED;
-}
-
-static void dsi_display_change_te_irq_status(struct dsi_display *display,
-					bool enable)
-{
-	if (!display) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	/* Handle unbalanced irq enable/disable calls */
-	if (enable && !display->is_te_irq_enabled) {
-		enable_irq(gpio_to_irq(display->disp_te_gpio));
-		display->is_te_irq_enabled = true;
-	} else if (!enable && display->is_te_irq_enabled) {
-		disable_irq(gpio_to_irq(display->disp_te_gpio));
-		display->is_te_irq_enabled = false;
-	}
-}
-
-static void dsi_display_register_te_irq(struct dsi_display *display)
-{
-	int rc = 0;
-	struct platform_device *pdev;
-	struct device *dev;
-	unsigned int te_irq;
-
-	pdev = display->pdev;
-	if (!pdev) {
-		pr_err("invalid platform device\n");
-		return;
-	}
-
-	dev = &pdev->dev;
-	if (!dev) {
-		pr_err("invalid device\n");
-		return;
-	}
-
-	if (!gpio_is_valid(display->disp_te_gpio)) {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	init_completion(&display->esd_te_gate);
-	te_irq = gpio_to_irq(display->disp_te_gpio);
-
-	/* Avoid deferred spurious irqs with disable_irq() */
-	irq_set_status_flags(te_irq, IRQ_DISABLE_UNLAZY);
-
-	rc = devm_request_irq(dev, te_irq, dsi_display_panel_te_irq_handler,
-			      IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-			      "TE_GPIO", display);
-	if (rc) {
-		pr_err("TE request_irq failed for ESD rc:%d\n", rc);
-		irq_clear_status_flags(te_irq, IRQ_DISABLE_UNLAZY);
-		goto error;
-	}
-
-	disable_irq(te_irq);
-	display->is_te_irq_enabled = false;
-
-	return;
-
-error:
-	/* disable the TE based ESD check */
-	pr_warn("Unable to register for TE IRQ\n");
-	if (display->panel->esd_config.status_mode == ESD_MODE_PANEL_TE)
-		display->panel->esd_config.esd_enabled = false;
-}
-
-static bool dsi_display_is_te_based_esd(struct dsi_display *display)
-{
-	u32 status_mode = 0;
-
-	if (!display->panel) {
-		pr_err("Invalid panel data\n");
-		return false;
-	}
-
-	status_mode = display->panel->esd_config.status_mode;
-
-	if (status_mode == ESD_MODE_PANEL_TE &&
-			gpio_is_valid(display->disp_te_gpio))
-		return true;
-	return false;
-}
-
-/* Allocate memory for cmd dma tx buffer */
-static int dsi_host_alloc_cmd_tx_buffer(struct dsi_display *display)
-{
-	int rc = 0, cnt = 0;
-	struct dsi_display_ctrl *display_ctrl;
-
-	display->tx_cmd_buf = msm_gem_new(display->drm_dev,
-			SZ_4K,
-			MSM_BO_UNCACHED);
-
-	if ((display->tx_cmd_buf) == NULL) {
-		pr_err("Failed to allocate cmd tx buf memory\n");
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	display->cmd_buffer_size = SZ_4K;
-
-	display->aspace = msm_gem_smmu_address_space_get(
-			display->drm_dev, MSM_SMMU_DOMAIN_UNSECURE);
-	if (!display->aspace) {
-		pr_err("failed to get aspace\n");
-		rc = -EINVAL;
-		goto free_gem;
-	}
-	/* register to aspace */
-	rc = msm_gem_address_space_register_cb(display->aspace,
-			dsi_display_aspace_cb_locked, (void *)display);
-	if (rc) {
-		pr_err("failed to register callback %d\n", rc);
-		goto free_gem;
-	}
-
-	rc = msm_gem_get_iova(display->tx_cmd_buf, display->aspace,
-				&(display->cmd_buffer_iova));
-	if (rc) {
-		pr_err("failed to get the iova rc %d\n", rc);
-		goto free_aspace_cb;
-	}
-
-	display->vaddr =
-		(void *) msm_gem_get_vaddr(display->tx_cmd_buf);
-	if (IS_ERR_OR_NULL(display->vaddr)) {
-		pr_err("failed to get va rc %d\n", rc);
-		rc = -EINVAL;
-		goto put_iova;
-	}
-
-	display_for_each_ctrl(cnt, display) {
-		display_ctrl = &display->ctrl[cnt];
-		display_ctrl->ctrl->cmd_buffer_size = SZ_4K;
-		display_ctrl->ctrl->cmd_buffer_iova =
-					display->cmd_buffer_iova;
-		display_ctrl->ctrl->vaddr = display->vaddr;
-		display_ctrl->ctrl->tx_cmd_buf = display->tx_cmd_buf;
-	}
-
-	return rc;
-
-put_iova:
-	msm_gem_put_iova(display->tx_cmd_buf, display->aspace);
-free_aspace_cb:
-	msm_gem_address_space_unregister_cb(display->aspace,
-			dsi_display_aspace_cb_locked, display);
-free_gem:
-	mutex_lock(&display->drm_dev->struct_mutex);
-	msm_gem_free_object(display->tx_cmd_buf);
-	mutex_unlock(&display->drm_dev->struct_mutex);
-error:
-	return rc;
-}
-
-static bool dsi_display_validate_reg_read(struct dsi_panel *panel)
-{
-	int i, j = 0;
-	int len = 0, *lenp;
-	int group = 0, count = 0;
-	struct drm_panel_esd_config *config;
-
-	if (!panel)
-		return false;
-
-	config = &(panel->esd_config);
-
-	lenp = config->status_valid_params ?: config->status_cmds_rlen;
-	count = config->status_cmd.count;
-
-	for (i = 0; i < count; i++)
-		len += lenp[i];
-
-	for (i = 0; i < len; i++)
-		j += len;
-
-	for (j = 0; j < config->groups; ++j) {
-		for (i = 0; i < len; ++i) {
-			if (config->return_buf[i] !=
-				config->status_value[group + i])
-				break;
-		}
-
-		if (i == len)
-			return true;
-		group += len;
-	}
-
-	return false;
-}
-
-static void dsi_display_parse_te_data(struct dsi_display *display)
-{
-	struct platform_device *pdev;
-	struct device *dev;
-	int rc = 0;
-	u32 val = 0;
-
-	pdev = display->pdev;
-	if (!pdev) {
-		pr_err("Invalid platform device\n");
-		return;
-	}
-
-	dev = &pdev->dev;
-	if (!dev) {
-		pr_err("Invalid platform device\n");
-		return;
-	}
-
-	display->disp_te_gpio = of_get_named_gpio(dev->of_node,
-					"qcom,platform-te-gpio", 0);
-
-	if (display->fw)
-		rc = dsi_parser_read_u32(display->parser_node,
-			"qcom,panel-te-source", &val);
-	else
-		rc = of_property_read_u32(dev->of_node,
-			"qcom,panel-te-source", &val);
-
-	if (rc || (val  > MAX_TE_SOURCE_ID)) {
-		pr_err("invalid vsync source selection\n");
-		val = 0;
-	}
-
-	display->te_source = val;
-}
-
-static int dsi_display_read_status(struct dsi_display_ctrl *ctrl,
-		struct dsi_panel *panel)
-{
-	int i, rc = 0, count = 0, start = 0, *lenp;
-	struct drm_panel_esd_config *config;
-	struct dsi_cmd_desc *cmds;
-	u32 flags = 0;
-
-	if (!panel || !ctrl || !ctrl->ctrl)
-		return -EINVAL;
-
-	/*
-	 * When DSI controller is not in initialized state, we do not want to
-	 * report a false ESD failure and hence we defer until next read
-	 * happen.
-	 */
-	if (!dsi_ctrl_validate_host_state(ctrl->ctrl))
-		return 1;
-
-	config = &(panel->esd_config);
-	lenp = config->status_valid_params ?: config->status_cmds_rlen;
-	count = config->status_cmd.count;
-	cmds = config->status_cmd.cmds;
-	flags |= (DSI_CTRL_CMD_FETCH_MEMORY | DSI_CTRL_CMD_READ |
-		  DSI_CTRL_CMD_CUSTOM_DMA_SCHED);
-
-	for (i = 0; i < count; ++i) {
-		memset(config->status_buf, 0x0, SZ_4K);
-		if (cmds[i].last_command) {
-			cmds[i].msg.flags |= MIPI_DSI_MSG_LASTCOMMAND;
-			flags |= DSI_CTRL_CMD_LAST_COMMAND;
-		}
-		if (config->status_cmd.state == DSI_CMD_SET_STATE_LP)
-			cmds[i].msg.flags |= MIPI_DSI_MSG_USE_LPM;
-		cmds[i].msg.rx_buf = config->status_buf;
-		cmds[i].msg.rx_len = config->status_cmds_rlen[i];
-		rc = dsi_ctrl_cmd_transfer(ctrl->ctrl, &cmds[i].msg, flags);
-		if (rc <= 0) {
-			pr_err("rx cmd transfer failed rc=%d\n", rc);
-			return rc;
-		}
-
-		memcpy(config->return_buf + start,
-			config->status_buf, lenp[i]);
-		start += lenp[i];
-	}
-
-	return rc;
-}
-
-static int dsi_display_validate_status(struct dsi_display_ctrl *ctrl,
-		struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	rc = dsi_display_read_status(ctrl, panel);
-	if (rc <= 0) {
-		goto exit;
-	} else {
-		/*
-		 * panel status read successfully.
-		 * check for validity of the data read back.
-		 */
-		rc = dsi_display_validate_reg_read(panel);
-		if (!rc) {
-			rc = -EINVAL;
-			goto exit;
-		}
-	}
-
-exit:
-	return rc;
-}
-
-static int dsi_display_status_reg_read(struct dsi_display *display)
-{
-	int rc = 0, i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	pr_debug(" ++\n");
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-
-	if (display->tx_cmd_buf == NULL) {
-		rc = dsi_host_alloc_cmd_tx_buffer(display);
-		if (rc) {
-			pr_err("failed to allocate cmd tx buffer memory\n");
-			goto done;
-		}
-	}
-
-	rc = dsi_display_cmd_engine_enable(display);
-	if (rc) {
-		pr_err("cmd engine enable failed\n");
-		return -EPERM;
-	}
-
-	rc = dsi_display_validate_status(m_ctrl, display->panel);
-	if (rc <= 0) {
-		pr_err("[%s] read status failed on master,rc=%d\n",
-		       display->name, rc);
-		goto exit;
-	}
-
-	if (!display->panel->sync_broadcast_en)
-		goto exit;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (ctrl == m_ctrl)
-			continue;
-
-		rc = dsi_display_validate_status(ctrl, display->panel);
-		if (rc <= 0) {
-			pr_err("[%s] read status failed on slave,rc=%d\n",
-			       display->name, rc);
-			goto exit;
-		}
-	}
-exit:
-	dsi_display_cmd_engine_disable(display);
-done:
-	return rc;
-}
-
-static int dsi_display_status_bta_request(struct dsi_display *display)
-{
-	int rc = 0;
-
-	pr_debug(" ++\n");
-	/* TODO: trigger SW BTA and wait for acknowledgment */
-
-	return rc;
-}
-
-static int dsi_display_status_check_te(struct dsi_display *display)
-{
-	int rc = 1;
-	int const esd_te_timeout = msecs_to_jiffies(3*20);
-
-	dsi_display_change_te_irq_status(display, true);
-
-	reinit_completion(&display->esd_te_gate);
-	if (!wait_for_completion_timeout(&display->esd_te_gate,
-				esd_te_timeout)) {
-		pr_err("TE check failed\n");
-		rc = -EINVAL;
-	}
-
-	dsi_display_change_te_irq_status(display, false);
-
-	return rc;
-}
-
-int dsi_display_check_status(struct drm_connector *connector, void *display,
-					bool te_check_override)
-{
-	struct dsi_display *dsi_display = display;
-	struct dsi_panel *panel;
-	u32 status_mode;
-	int rc = 0x1;
-	u32 mask;
-
-	if (!dsi_display || !dsi_display->panel)
-		return -EINVAL;
-
-	panel = dsi_display->panel;
-
-	dsi_panel_acquire_panel_lock(panel);
-
-	if (!panel->panel_initialized) {
-		pr_debug("Panel not initialized\n");
-		goto release_panel_lock;
-	}
-
-	/* Prevent another ESD check,when ESD recovery is underway */
-	if (atomic_read(&panel->esd_recovery_pending))
-		goto release_panel_lock;
-
-	status_mode = panel->esd_config.status_mode;
-
-	if (status_mode == ESD_MODE_SW_SIM_SUCCESS)
-		goto release_panel_lock;
-
-	if (status_mode == ESD_MODE_SW_SIM_FAILURE) {
-		rc = -EINVAL;
-		goto release_panel_lock;
-	}
-	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
-
-	if (te_check_override && gpio_is_valid(dsi_display->disp_te_gpio))
-		status_mode = ESD_MODE_PANEL_TE;
-
-	dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
-		DSI_ALL_CLKS, DSI_CLK_ON);
-
-	/* Mask error interrupts before attempting ESD read */
-	mask = BIT(DSI_FIFO_OVERFLOW) | BIT(DSI_FIFO_UNDERFLOW);
-	dsi_display_set_ctrl_esd_check_flag(dsi_display, true);
-	dsi_display_mask_ctrl_error_interrupts(dsi_display, mask, true);
-
-	if (status_mode == ESD_MODE_REG_READ) {
-		rc = dsi_display_status_reg_read(dsi_display);
-	} else if (status_mode == ESD_MODE_SW_BTA) {
-		rc = dsi_display_status_bta_request(dsi_display);
-	} else if (status_mode == ESD_MODE_PANEL_TE) {
-		rc = dsi_display_status_check_te(dsi_display);
-	} else {
-		pr_warn("unsupported check status mode\n");
-		panel->esd_config.esd_enabled = false;
-	}
-
-	/* Unmask error interrupts */
-	if (rc > 0) {
-		dsi_display_set_ctrl_esd_check_flag(dsi_display, false);
-		dsi_display_mask_ctrl_error_interrupts(dsi_display, mask,
-							false);
-	} else {
-		/* Handle Panel failures during display disable sequence */
-		atomic_set(&panel->esd_recovery_pending, 1);
-	}
-
-	dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
-		DSI_ALL_CLKS, DSI_CLK_OFF);
-
-release_panel_lock:
-	dsi_panel_release_panel_lock(panel);
-	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
-
-	return rc;
-}
-
-static int dsi_display_cmd_prepare(const char *cmd_buf, u32 cmd_buf_len,
-		struct dsi_cmd_desc *cmd, u8 *payload, u32 payload_len)
-{
-	int i;
-
-	memset(cmd, 0x00, sizeof(*cmd));
-	cmd->msg.type = cmd_buf[0];
-	cmd->last_command = (cmd_buf[1] == 1);
-	cmd->msg.channel = cmd_buf[2];
-	cmd->msg.flags = cmd_buf[3];
-	cmd->msg.ctrl = 0;
-	cmd->post_wait_ms = cmd->msg.wait_ms = cmd_buf[4];
-	cmd->msg.tx_len = ((cmd_buf[5] << 8) | (cmd_buf[6]));
-
-	if (cmd->msg.tx_len > payload_len) {
-		pr_err("Incorrect payload length tx_len %zu, payload_len %d\n",
-		       cmd->msg.tx_len, payload_len);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < cmd->msg.tx_len; i++)
-		payload[i] = cmd_buf[7 + i];
-
-	cmd->msg.tx_buf = payload;
-	return 0;
-}
-
-static int dsi_display_ctrl_get_host_init_state(struct dsi_display *dsi_display,
-		bool *state)
-{
-	struct dsi_display_ctrl *ctrl;
-	int i, rc = -EINVAL;
-
-	display_for_each_ctrl(i, dsi_display) {
-		ctrl = &dsi_display->ctrl[i];
-		rc = dsi_ctrl_get_host_engine_init_state(ctrl->ctrl, state);
-		if (rc)
-			break;
-	}
-	return rc;
-}
-
-int dsi_display_cmd_transfer(struct drm_connector *connector,
-		void *display, const char *cmd_buf,
-		u32 cmd_buf_len)
-{
-	struct dsi_display *dsi_display = display;
-	struct dsi_cmd_desc cmd;
-	u8 cmd_payload[MAX_CMD_PAYLOAD_SIZE];
-	int rc = 0;
-	bool state = false;
-
-	if (!dsi_display || !cmd_buf) {
-		pr_err("[DSI] invalid params\n");
-		return -EINVAL;
-	}
-
-	pr_debug("[DSI] Display command transfer\n");
-
-	rc = dsi_display_cmd_prepare(cmd_buf, cmd_buf_len,
-			&cmd, cmd_payload, MAX_CMD_PAYLOAD_SIZE);
-	if (rc) {
-		pr_err("[DSI] command prepare failed. rc %d\n", rc);
-		return rc;
-	}
-
-	mutex_lock(&dsi_display->display_lock);
-	rc = dsi_display_ctrl_get_host_init_state(dsi_display, &state);
-
-	/**
-	 * Handle scenario where a command transfer is initiated through
-	 * sysfs interface when device is in suepnd state.
-	 */
-	if (!rc && !state) {
-		pr_warn_ratelimited("Command xfer attempted while device is in suspend state\n"
-				);
-		rc = -EPERM;
-		goto end;
-	}
-	if (rc || !state) {
-		pr_err("[DSI] Invalid host state %d rc %d\n",
-				state, rc);
-		rc = -EPERM;
-		goto end;
-	}
-
-	rc = dsi_display->host.ops->transfer(&dsi_display->host,
-			&cmd.msg);
-end:
-	mutex_unlock(&dsi_display->display_lock);
-	return rc;
-}
-
-static void _dsi_display_continuous_clk_ctrl(struct dsi_display *display,
-					     bool enable)
-{
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	if (!display || !display->panel->host_config.force_hs_clk_lane)
-		return;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		dsi_ctrl_set_continuous_clk(ctrl->ctrl, enable);
-	}
-}
-
-int dsi_display_soft_reset(void *display)
-{
-	struct dsi_display *dsi_display;
-	struct dsi_display_ctrl *ctrl;
-	int rc = 0;
-	int i;
-
-	if (!display)
-		return -EINVAL;
-
-	dsi_display = display;
-
-	display_for_each_ctrl(i, dsi_display) {
-		ctrl = &dsi_display->ctrl[i];
-		rc = dsi_ctrl_soft_reset(ctrl->ctrl);
-		if (rc) {
-			pr_err("[%s] failed to soft reset host_%d, rc=%d\n",
-					dsi_display->name, i, rc);
-			break;
-		}
-	}
-
-	return rc;
-}
-
-enum dsi_pixel_format dsi_display_get_dst_format(
-		struct drm_connector *connector,
-		void *display)
-{
-	enum dsi_pixel_format format = DSI_PIXEL_FORMAT_MAX;
-	struct dsi_display *dsi_display = (struct dsi_display *)display;
-
-	if (!dsi_display || !dsi_display->panel) {
-		pr_err("Invalid params(s) dsi_display %pK, panel %pK\n",
-			dsi_display,
-			((dsi_display) ? dsi_display->panel : NULL));
-		return format;
-	}
-
-	format = dsi_display->panel->host_config.dst_format;
-	return format;
-}
-
-static void _dsi_display_setup_misr(struct dsi_display *display)
-{
-	int i;
-
-	display_for_each_ctrl(i, display) {
-		dsi_ctrl_setup_misr(display->ctrl[i].ctrl,
-				display->misr_enable,
-				display->misr_frame_count);
-	}
-}
-
-/**
- * dsi_display_get_cont_splash_status - Get continuous splash status.
- * @dsi_display:         DSI display handle.
- *
- * Return: boolean to signify whether continuous splash is enabled.
- */
-static bool dsi_display_get_cont_splash_status(struct dsi_display *display)
-{
-	u32 val = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-	struct dsi_ctrl_hw *hw;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &(display->ctrl[i]);
-		if (!ctrl || !ctrl->ctrl)
-			continue;
-
-		hw = &(ctrl->ctrl->hw);
-		val = hw->ops.get_cont_splash_status(hw);
-		if (!val)
-			return false;
-	}
-	return true;
-}
-
-int dsi_display_set_power(struct drm_connector *connector,
-		int power_mode, void *disp)
-{
-	struct dsi_display *display = disp;
-	int rc = 0;
-
-	if (!display || !display->panel) {
-		pr_err("invalid display/panel\n");
-		return -EINVAL;
-	}
-
-	switch (power_mode) {
-	case SDE_MODE_DPMS_LP1:
-		rc = dsi_panel_set_lp1(display->panel);
-		break;
-	case SDE_MODE_DPMS_LP2:
-		rc = dsi_panel_set_lp2(display->panel);
-		break;
-	default:
-		rc = dsi_panel_set_nolp(display->panel);
-		break;
-	}
-	return rc;
-}
-
-static ssize_t debugfs_dump_info_read(struct file *file,
-				      char __user *user_buf,
-				      size_t user_len,
-				      loff_t *ppos)
-{
-	struct dsi_display *display = file->private_data;
-	char *buf;
-	u32 len = 0;
-	int i;
-
-	if (!display)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	buf = kzalloc(SZ_4K, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	len += snprintf(buf + len, (SZ_4K - len), "name = %s\n", display->name);
-	len += snprintf(buf + len, (SZ_4K - len),
-			"\tResolution = %dx%d\n",
-			display->config.video_timing.h_active,
-			display->config.video_timing.v_active);
-
-	display_for_each_ctrl(i, display) {
-		len += snprintf(buf + len, (SZ_4K - len),
-				"\tCTRL_%d:\n\t\tctrl = %s\n\t\tphy = %s\n",
-				i, display->ctrl[i].ctrl->name,
-				display->ctrl[i].phy->name);
-	}
-
-	len += snprintf(buf + len, (SZ_4K - len),
-			"\tPanel = %s\n", display->panel->name);
-
-	len += snprintf(buf + len, (SZ_4K - len),
-			"\tClock master = %s\n",
-			display->ctrl[display->clk_master_idx].ctrl->name);
-
-	if (copy_to_user(user_buf, buf, len)) {
-		kfree(buf);
-		return -EFAULT;
-	}
-
-	*ppos += len;
-
-	kfree(buf);
-	return len;
-}
-
-static ssize_t debugfs_misr_setup(struct file *file,
-				  const char __user *user_buf,
-				  size_t user_len,
-				  loff_t *ppos)
-{
-	struct dsi_display *display = file->private_data;
-	char *buf;
-	int rc = 0;
-	size_t len;
-	u32 enable, frame_count;
-
-	if (!display)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	buf = kzalloc(MISR_BUFF_SIZE, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	/* leave room for termination char */
-	len = min_t(size_t, user_len, MISR_BUFF_SIZE - 1);
-	if (copy_from_user(buf, user_buf, len)) {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	buf[len] = '\0'; /* terminate the string */
-
-	if (sscanf(buf, "%u %u", &enable, &frame_count) != 2) {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	display->misr_enable = enable;
-	display->misr_frame_count = frame_count;
-
-	mutex_lock(&display->display_lock);
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_CORE_CLK, DSI_CLK_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
-		       display->name, rc);
-		goto unlock;
-	}
-
-	_dsi_display_setup_misr(display);
-
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_CORE_CLK, DSI_CLK_OFF);
-	if (rc) {
-		pr_err("[%s] failed to disable DSI core clocks, rc=%d\n",
-		       display->name, rc);
-		goto unlock;
-	}
-
-	rc = user_len;
-unlock:
-	mutex_unlock(&display->display_lock);
-error:
-	kfree(buf);
-	return rc;
-}
-
-static ssize_t debugfs_misr_read(struct file *file,
-				 char __user *user_buf,
-				 size_t user_len,
-				 loff_t *ppos)
-{
-	struct dsi_display *display = file->private_data;
-	char *buf;
-	u32 len = 0;
-	int rc = 0;
-	struct dsi_ctrl *dsi_ctrl;
-	int i;
-	u32 misr;
-	size_t max_len = min_t(size_t, user_len, MISR_BUFF_SIZE);
-
-	if (!display)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	buf = kzalloc(max_len, GFP_KERNEL);
-	if (ZERO_OR_NULL_PTR(buf))
-		return -ENOMEM;
-
-	mutex_lock(&display->display_lock);
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_CORE_CLK, DSI_CLK_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	display_for_each_ctrl(i, display) {
-		dsi_ctrl = display->ctrl[i].ctrl;
-		misr = dsi_ctrl_collect_misr(display->ctrl[i].ctrl);
-
-		len += snprintf((buf + len), max_len - len,
-			"DSI_%d MISR: 0x%x\n", dsi_ctrl->cell_index, misr);
-
-		if (len >= max_len)
-			break;
-	}
-
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_CORE_CLK, DSI_CLK_OFF);
-	if (rc) {
-		pr_err("[%s] failed to disable DSI core clocks, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	if (copy_to_user(user_buf, buf, max_len)) {
-		rc = -EFAULT;
-		goto error;
-	}
-
-	*ppos += len;
-
-error:
-	mutex_unlock(&display->display_lock);
-	kfree(buf);
-	return len;
-}
-
-static ssize_t debugfs_esd_trigger_check(struct file *file,
-				  const char __user *user_buf,
-				  size_t user_len,
-				  loff_t *ppos)
-{
-	struct dsi_display *display = file->private_data;
-	char *buf;
-	int rc = 0;
-	u32 esd_trigger;
-
-	if (!display)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	if (user_len > sizeof(u32))
-		return -EINVAL;
-
-	if (!user_len || !user_buf)
-		return -EINVAL;
-
-	if (!display->panel ||
-		atomic_read(&display->panel->esd_recovery_pending))
-		return user_len;
-
-	buf = kzalloc(user_len, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	if (copy_from_user(buf, user_buf, user_len)) {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	buf[user_len] = '\0'; /* terminate the string */
-
-	if (kstrtouint(buf, 10, &esd_trigger)) {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	if (esd_trigger != 1) {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	display->esd_trigger = esd_trigger;
-
-	if (display->esd_trigger) {
-		pr_info("ESD attack triggered by user\n");
-		rc = dsi_panel_trigger_esd_attack(display->panel);
-		if (rc) {
-			pr_err("Failed to trigger ESD attack\n");
-			goto error;
-		}
-	}
-
-	rc = user_len;
-error:
-	kfree(buf);
-	return rc;
-}
-
-static ssize_t debugfs_alter_esd_check_mode(struct file *file,
-				  const char __user *user_buf,
-				  size_t user_len,
-				  loff_t *ppos)
-{
-	struct dsi_display *display = file->private_data;
-	struct drm_panel_esd_config *esd_config;
-	char *buf;
-	int rc = 0;
-	size_t len = min_t(size_t, user_len, ESD_MODE_STRING_MAX_LEN);
-
-	if (!display)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	buf = kzalloc(len, GFP_KERNEL);
-	if (ZERO_OR_NULL_PTR(buf))
-		return -ENOMEM;
-
-	if (copy_from_user(buf, user_buf, len)) {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	buf[len] = '\0'; /* terminate the string */
-	if (!display->panel) {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	esd_config = &display->panel->esd_config;
-	if (!esd_config) {
-		pr_err("Invalid panel esd config\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	if (!esd_config->esd_enabled)
-		goto error;
-
-	if (!strcmp(buf, "te_signal_check\n")) {
-		pr_info("ESD check is switched to TE mode by user\n");
-		esd_config->status_mode = ESD_MODE_PANEL_TE;
-		dsi_display_change_te_irq_status(display, true);
-	}
-
-	if (!strcmp(buf, "reg_read\n")) {
-		pr_info("ESD check is switched to reg read by user\n");
-		rc = dsi_panel_parse_esd_reg_read_configs(display->panel);
-		if (rc) {
-			pr_err("failed to alter esd check mode,rc=%d\n",
-						rc);
-			rc = user_len;
-			goto error;
-		}
-		esd_config->status_mode = ESD_MODE_REG_READ;
-		if (dsi_display_is_te_based_esd(display))
-			dsi_display_change_te_irq_status(display, false);
-	}
-
-	if (!strcmp(buf, "esd_sw_sim_success\n"))
-		esd_config->status_mode = ESD_MODE_SW_SIM_SUCCESS;
-
-	if (!strcmp(buf, "esd_sw_sim_failure\n"))
-		esd_config->status_mode = ESD_MODE_SW_SIM_FAILURE;
-
-	rc = len;
-error:
-	kfree(buf);
-	return rc;
-}
-
-static ssize_t debugfs_read_esd_check_mode(struct file *file,
-				 char __user *user_buf,
-				 size_t user_len,
-				 loff_t *ppos)
-{
-	struct dsi_display *display = file->private_data;
-	struct drm_panel_esd_config *esd_config;
-	char *buf;
-	int rc = 0;
-	size_t len = min_t(size_t, user_len, ESD_MODE_STRING_MAX_LEN);
-
-	if (!display)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	if (!display->panel) {
-		pr_err("invalid panel data\n");
-		return -EINVAL;
-	}
-
-	buf = kzalloc(len, GFP_KERNEL);
-	if (ZERO_OR_NULL_PTR(buf))
-		return -ENOMEM;
-
-	esd_config = &display->panel->esd_config;
-	if (!esd_config) {
-		pr_err("Invalid panel esd config\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	if (!esd_config->esd_enabled) {
-		rc = snprintf(buf, len, "ESD feature not enabled");
-		goto output_mode;
-	}
-
-	switch (esd_config->status_mode) {
-	case ESD_MODE_REG_READ:
-		rc = snprintf(buf, len, "reg_read");
-		break;
-	case ESD_MODE_PANEL_TE:
-		rc = snprintf(buf, len, "te_signal_check");
-		break;
-	case ESD_MODE_SW_SIM_FAILURE:
-		rc = snprintf(buf, len, "esd_sw_sim_failure");
-		break;
-	case ESD_MODE_SW_SIM_SUCCESS:
-		rc = snprintf(buf, len, "esd_sw_sim_success");
-		break;
-	default:
-		rc = snprintf(buf, len, "invalid");
-		break;
-	}
-
-output_mode:
-	if (!rc) {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	if (copy_to_user(user_buf, buf, len)) {
-		rc = -EFAULT;
-		goto error;
-	}
-
-	*ppos += len;
-
-error:
-	kfree(buf);
-	return len;
-}
-
-static const struct file_operations dump_info_fops = {
-	.open = simple_open,
-	.read = debugfs_dump_info_read,
-};
-
-static const struct file_operations misr_data_fops = {
-	.open = simple_open,
-	.read = debugfs_misr_read,
-	.write = debugfs_misr_setup,
-};
-
-static const struct file_operations esd_trigger_fops = {
-	.open = simple_open,
-	.write = debugfs_esd_trigger_check,
-};
-
-static const struct file_operations esd_check_mode_fops = {
-	.open = simple_open,
-	.write = debugfs_alter_esd_check_mode,
-	.read = debugfs_read_esd_check_mode,
-};
-
-static int dsi_display_debugfs_init(struct dsi_display *display)
-{
-	int rc = 0;
-	struct dentry *dir, *dump_file, *misr_data;
-	char name[MAX_NAME_SIZE];
-	int i;
-
-	dir = debugfs_create_dir(display->name, NULL);
-	if (IS_ERR_OR_NULL(dir)) {
-		rc = PTR_ERR(dir);
-		pr_err("[%s] debugfs create dir failed, rc = %d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	dump_file = debugfs_create_file("dump_info",
-					0400,
-					dir,
-					display,
-					&dump_info_fops);
-	if (IS_ERR_OR_NULL(dump_file)) {
-		rc = PTR_ERR(dump_file);
-		pr_err("[%s] debugfs create dump info file failed, rc=%d\n",
-		       display->name, rc);
-		goto error_remove_dir;
-	}
-
-	dump_file = debugfs_create_file("esd_trigger",
-					0644,
-					dir,
-					display,
-					&esd_trigger_fops);
-	if (IS_ERR_OR_NULL(dump_file)) {
-		rc = PTR_ERR(dump_file);
-		pr_err("[%s] debugfs for esd trigger file failed, rc=%d\n",
-		       display->name, rc);
-		goto error_remove_dir;
-	}
-
-	dump_file = debugfs_create_file("esd_check_mode",
-					0644,
-					dir,
-					display,
-					&esd_check_mode_fops);
-	if (IS_ERR_OR_NULL(dump_file)) {
-		rc = PTR_ERR(dump_file);
-		pr_err("[%s] debugfs for esd check mode failed, rc=%d\n",
-		       display->name, rc);
-		goto error_remove_dir;
-	}
-
-	misr_data = debugfs_create_file("misr_data",
-					0600,
-					dir,
-					display,
-					&misr_data_fops);
-	if (IS_ERR_OR_NULL(misr_data)) {
-		rc = PTR_ERR(misr_data);
-		pr_err("[%s] debugfs create misr datafile failed, rc=%d\n",
-		       display->name, rc);
-		goto error_remove_dir;
-	}
-
-	display_for_each_ctrl(i, display) {
-		struct msm_dsi_phy *phy = display->ctrl[i].phy;
-
-		if (!phy || !phy->name)
-			continue;
-
-		snprintf(name, ARRAY_SIZE(name),
-				"%s_allow_phy_power_off", phy->name);
-		dump_file = debugfs_create_bool(name, 0600, dir,
-				&phy->allow_phy_power_off);
-		if (IS_ERR_OR_NULL(dump_file)) {
-			rc = PTR_ERR(dump_file);
-			pr_err("[%s] debugfs create %s failed, rc=%d\n",
-			       display->name, name, rc);
-			goto error_remove_dir;
-		}
-
-		snprintf(name, ARRAY_SIZE(name),
-				"%s_regulator_min_datarate_bps", phy->name);
-		dump_file = debugfs_create_u32(name, 0600, dir,
-				&phy->regulator_min_datarate_bps);
-		if (IS_ERR_OR_NULL(dump_file)) {
-			rc = PTR_ERR(dump_file);
-			pr_err("[%s] debugfs create %s failed, rc=%d\n",
-			       display->name, name, rc);
-			goto error_remove_dir;
-		}
-	}
-
-	if (!debugfs_create_bool("ulps_feature_enable", 0600, dir,
-			&display->panel->ulps_feature_enabled)) {
-		pr_err("[%s] debugfs create ulps feature enable file failed\n",
-		       display->name);
-		goto error_remove_dir;
-	}
-
-	if (!debugfs_create_bool("ulps_suspend_feature_enable", 0600, dir,
-			&display->panel->ulps_suspend_enabled)) {
-		pr_err("[%s] debugfs create ulps-suspend feature enable file failed\n",
-		       display->name);
-		goto error_remove_dir;
-	}
-
-	if (!debugfs_create_bool("ulps_status", 0400, dir,
-			&display->ulps_enabled)) {
-		pr_err("[%s] debugfs create ulps status file failed\n",
-		       display->name);
-		goto error_remove_dir;
-	}
-
-	display->root = dir;
-	dsi_parser_dbg_init(display->parser, dir);
-
-	return rc;
-error_remove_dir:
-	debugfs_remove(dir);
-error:
-	return rc;
-}
-
-static int dsi_display_debugfs_deinit(struct dsi_display *display)
-{
-	debugfs_remove_recursive(display->root);
-
-	return 0;
-}
-
-static void adjust_timing_by_ctrl_count(const struct dsi_display *display,
-					struct dsi_display_mode *mode)
-{
-	if (display->ctrl_count > 1) {
-		mode->timing.h_active /= display->ctrl_count;
-		mode->timing.h_front_porch /= display->ctrl_count;
-		mode->timing.h_sync_width /= display->ctrl_count;
-		mode->timing.h_back_porch /= display->ctrl_count;
-		mode->timing.h_skew /= display->ctrl_count;
-		mode->pixel_clk_khz /= display->ctrl_count;
-	}
-}
-
-static int dsi_display_is_ulps_req_valid(struct dsi_display *display,
-		bool enable)
-{
-	/* TODO: make checks based on cont. splash */
-
-	pr_debug("checking ulps req validity\n");
-
-	if (atomic_read(&display->panel->esd_recovery_pending)) {
-		pr_debug("%s: ESD recovery sequence underway\n", __func__);
-		return false;
-	}
-
-	if (!dsi_panel_ulps_feature_enabled(display->panel) &&
-			!display->panel->ulps_suspend_enabled) {
-		pr_debug("%s: ULPS feature is not enabled\n", __func__);
-		return false;
-	}
-
-	if (!dsi_panel_initialized(display->panel) &&
-			!display->panel->ulps_suspend_enabled) {
-		pr_debug("%s: panel not yet initialized\n", __func__);
-		return false;
-	}
-
-	if (enable && display->ulps_enabled) {
-		pr_debug("ULPS already enabled\n");
-		return false;
-	} else if (!enable && !display->ulps_enabled) {
-		pr_debug("ULPS already disabled\n");
-		return false;
-	}
-
-	/*
-	 * No need to enter ULPS when transitioning from splash screen to
-	 * boot animation since it is expected that the clocks would be turned
-	 * right back on.
-	 */
-	if (enable && display->is_cont_splash_enabled)
-		return false;
-
-	return true;
-}
-
-
-/**
- * dsi_display_set_ulps() - set ULPS state for DSI lanes.
- * @dsi_display:         DSI display handle.
- * @enable:           enable/disable ULPS.
- *
- * ULPS can be enabled/disabled after DSI host engine is turned on.
- *
- * Return: error code.
- */
-static int dsi_display_set_ulps(struct dsi_display *display, bool enable)
-{
-	int rc = 0;
-	int i = 0;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (!dsi_display_is_ulps_req_valid(display, enable)) {
-		pr_debug("%s: skipping ULPS config, enable=%d\n",
-			__func__, enable);
-		return 0;
-	}
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-	/*
-	 * ULPS entry-exit can be either through the DSI controller or
-	 * the DSI PHY depending on hardware variation. For some chipsets,
-	 * both controller version and phy version ulps entry-exit ops can
-	 * be present. To handle such cases, send ulps request through PHY,
-	 * if ulps request is handled in PHY, then no need to send request
-	 * through controller.
-	 */
-
-	rc = dsi_phy_set_ulps(m_ctrl->phy, &display->config, enable,
-			display->clamp_enabled);
-
-	if (rc == DSI_PHY_ULPS_ERROR) {
-		pr_err("Ulps PHY state change(%d) failed\n", enable);
-		return -EINVAL;
-	}
-
-	else if (rc == DSI_PHY_ULPS_HANDLED) {
-		display_for_each_ctrl(i, display) {
-			ctrl = &display->ctrl[i];
-			if (!ctrl->ctrl || (ctrl == m_ctrl))
-				continue;
-
-			rc = dsi_phy_set_ulps(ctrl->phy, &display->config,
-					enable, display->clamp_enabled);
-			if (rc == DSI_PHY_ULPS_ERROR) {
-				pr_err("Ulps PHY state change(%d) failed\n",
-						enable);
-				return -EINVAL;
-			}
-		}
-	}
-
-	else if (rc == DSI_PHY_ULPS_NOT_HANDLED) {
-		rc = dsi_ctrl_set_ulps(m_ctrl->ctrl, enable);
-		if (rc) {
-			pr_err("Ulps controller state change(%d) failed\n",
-					enable);
-			return rc;
-		}
-		display_for_each_ctrl(i, display) {
-			ctrl = &display->ctrl[i];
-			if (!ctrl->ctrl || (ctrl == m_ctrl))
-				continue;
-
-			rc = dsi_ctrl_set_ulps(ctrl->ctrl, enable);
-			if (rc) {
-				pr_err("Ulps controller state change(%d) failed\n",
-						enable);
-				return rc;
-			}
-		}
-	}
-
-	display->ulps_enabled = enable;
-	return 0;
-}
-
-/**
- * dsi_display_set_clamp() - set clamp state for DSI IO.
- * @dsi_display:         DSI display handle.
- * @enable:           enable/disable clamping.
- *
- * Return: error code.
- */
-static int dsi_display_set_clamp(struct dsi_display *display, bool enable)
-{
-	int rc = 0;
-	int i = 0;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-	bool ulps_enabled = false;
-
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-	ulps_enabled = display->ulps_enabled;
-
-	/*
-	 * Clamp control can be either through the DSI controller or
-	 * the DSI PHY depending on hardware variation
-	 */
-	rc = dsi_ctrl_set_clamp_state(m_ctrl->ctrl, enable, ulps_enabled);
-	if (rc) {
-		pr_err("DSI ctrl clamp state change(%d) failed\n", enable);
-		return rc;
-	}
-
-	rc = dsi_phy_set_clamp_state(m_ctrl->phy, enable);
-	if (rc) {
-		pr_err("DSI phy clamp state change(%d) failed\n", enable);
-		return rc;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_set_clamp_state(ctrl->ctrl, enable, ulps_enabled);
-		if (rc) {
-			pr_err("DSI Clamp state change(%d) failed\n", enable);
-			return rc;
-		}
-
-		rc = dsi_phy_set_clamp_state(ctrl->phy, enable);
-		if (rc) {
-			pr_err("DSI phy clamp state change(%d) failed\n",
-				enable);
-			return rc;
-		}
-
-		pr_debug("Clamps %s for ctrl%d\n",
-			enable ? "enabled" : "disabled", i);
-	}
-
-	display->clamp_enabled = enable;
-	return 0;
-}
-
-/**
- * dsi_display_setup_ctrl() - setup DSI controller.
- * @dsi_display:         DSI display handle.
- *
- * Return: error code.
- */
-static int dsi_display_ctrl_setup(struct dsi_display *display)
-{
-	int rc = 0;
-	int i = 0;
-	struct dsi_display_ctrl *ctrl, *m_ctrl;
-
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-	rc = dsi_ctrl_setup(m_ctrl->ctrl);
-	if (rc) {
-		pr_err("DSI controller setup failed\n");
-		return rc;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_setup(ctrl->ctrl);
-		if (rc) {
-			pr_err("DSI controller setup failed\n");
-			return rc;
-		}
-	}
-	return 0;
-}
-
-static int dsi_display_phy_enable(struct dsi_display *display);
-
-/**
- * dsi_display_phy_idle_on() - enable DSI PHY while coming out of idle screen.
- * @dsi_display:         DSI display handle.
- * @mmss_clamp:          True if clamp is enabled.
- *
- * Return: error code.
- */
-static int dsi_display_phy_idle_on(struct dsi_display *display,
-		bool mmss_clamp)
-{
-	int rc = 0;
-	int i = 0;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (mmss_clamp && !display->phy_idle_power_off) {
-		dsi_display_phy_enable(display);
-		return 0;
-	}
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-	rc = dsi_phy_idle_ctrl(m_ctrl->phy, true);
-	if (rc) {
-		pr_err("DSI controller setup failed\n");
-		return rc;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_phy_idle_ctrl(ctrl->phy, true);
-		if (rc) {
-			pr_err("DSI controller setup failed\n");
-			return rc;
-		}
-	}
-	display->phy_idle_power_off = false;
-	return 0;
-}
-
-/**
- * dsi_display_phy_idle_off() - disable DSI PHY while going to idle screen.
- * @dsi_display:         DSI display handle.
- *
- * Return: error code.
- */
-static int dsi_display_phy_idle_off(struct dsi_display *display)
-{
-	int rc = 0;
-	int i = 0;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	display_for_each_ctrl(i, display) {
-		struct msm_dsi_phy *phy = display->ctrl[i].phy;
-
-		if (!phy)
-			continue;
-
-		if (!phy->allow_phy_power_off) {
-			pr_debug("phy doesn't support this feature\n");
-			return 0;
-		}
-	}
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-
-	rc = dsi_phy_idle_ctrl(m_ctrl->phy, false);
-	if (rc) {
-		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-		       display->name, rc);
-		return rc;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_phy_idle_ctrl(ctrl->phy, false);
-		if (rc) {
-			pr_err("DSI controller setup failed\n");
-			return rc;
-		}
-	}
-	display->phy_idle_power_off = true;
-	return 0;
-}
-
-void dsi_display_enable_event(struct drm_connector *connector,
-		struct dsi_display *display,
-		uint32_t event_idx, struct dsi_event_cb_info *event_info,
-		bool enable)
-{
-	uint32_t irq_status_idx = DSI_STATUS_INTERRUPT_COUNT;
-	int i;
-
-	if (!display) {
-		pr_err("invalid display\n");
-		return;
-	}
-
-	if (event_info)
-		event_info->event_idx = event_idx;
-
-	switch (event_idx) {
-	case SDE_CONN_EVENT_VID_DONE:
-		irq_status_idx = DSI_SINT_VIDEO_MODE_FRAME_DONE;
-		break;
-	case SDE_CONN_EVENT_CMD_DONE:
-		irq_status_idx = DSI_SINT_CMD_FRAME_DONE;
-		break;
-	case SDE_CONN_EVENT_VID_FIFO_OVERFLOW:
-	case SDE_CONN_EVENT_CMD_FIFO_UNDERFLOW:
-		if (event_info) {
-			display_for_each_ctrl(i, display)
-				display->ctrl[i].ctrl->recovery_cb =
-							*event_info;
-		}
-		break;
-	default:
-		/* nothing to do */
-		pr_debug("[%s] unhandled event %d\n", display->name, event_idx);
-		return;
-	}
-
-	if (enable) {
-		display_for_each_ctrl(i, display)
-			dsi_ctrl_enable_status_interrupt(
-					display->ctrl[i].ctrl, irq_status_idx,
-					event_info);
-	} else {
-		display_for_each_ctrl(i, display)
-			dsi_ctrl_disable_status_interrupt(
-					display->ctrl[i].ctrl, irq_status_idx);
-	}
-}
-
-/**
- * dsi_config_host_engine_state_for_cont_splash()- update host engine state
- *                                                 during continuous splash.
- * @display: Handle to dsi display
- *
- */
-static void dsi_config_host_engine_state_for_cont_splash
-					(struct dsi_display *display)
-{
-	int i;
-	struct dsi_display_ctrl *ctrl;
-	enum dsi_engine_state host_state = DSI_CTRL_ENGINE_ON;
-
-	/* Sequence does not matter for split dsi usecases */
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl)
-			continue;
-
-		dsi_ctrl_update_host_engine_state_for_cont_splash(ctrl->ctrl,
-							host_state);
-	}
-}
-
-static int dsi_display_ctrl_power_on(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	/* Sequence does not matter for split dsi usecases */
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl)
-			continue;
-
-		rc = dsi_ctrl_set_power_state(ctrl->ctrl,
-					      DSI_CTRL_POWER_VREG_ON);
-		if (rc) {
-			pr_err("[%s] Failed to set power state, rc=%d\n",
-			       ctrl->ctrl->name, rc);
-			goto error;
-		}
-	}
-
-	return rc;
-error:
-	for (i = i - 1; i >= 0; i--) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl)
-			continue;
-		(void)dsi_ctrl_set_power_state(ctrl->ctrl,
-			DSI_CTRL_POWER_VREG_OFF);
-	}
-	return rc;
-}
-
-static int dsi_display_ctrl_power_off(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	/* Sequence does not matter for split dsi usecases */
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl)
-			continue;
-
-		rc = dsi_ctrl_set_power_state(ctrl->ctrl,
-			DSI_CTRL_POWER_VREG_OFF);
-		if (rc) {
-			pr_err("[%s] Failed to power off, rc=%d\n",
-			       ctrl->ctrl->name, rc);
-			goto error;
-		}
-	}
-error:
-	return rc;
-}
-
-static void dsi_display_parse_cmdline_topology(struct dsi_display *display,
-					unsigned int display_type)
-{
-	char *boot_str = NULL;
-	char *str = NULL;
-	char *sw_te = NULL;
-	unsigned long cmdline_topology = NO_OVERRIDE;
-	unsigned long cmdline_timing = NO_OVERRIDE;
-
-	if (display_type >= MAX_DSI_ACTIVE_DISPLAY) {
-		pr_err("display_type=%d not supported\n", display_type);
-		goto end;
-	}
-
-	if (display_type == DSI_PRIMARY)
-		boot_str = dsi_display_primary;
-	else
-		boot_str = dsi_display_secondary;
-
-	sw_te = strnstr(boot_str, ":swte", strlen(boot_str));
-	if (sw_te)
-		display->sw_te_using_wd = true;
-
-	str = strnstr(boot_str, ":config", strlen(boot_str));
-	if (!str)
-		goto end;
-
-	if (kstrtol(str + strlen(":config"), INT_BASE_10,
-				(unsigned long *)&cmdline_topology)) {
-		pr_err("invalid config index override: %s\n", boot_str);
-		goto end;
-	}
-
-	str = strnstr(boot_str, ":timing", strlen(boot_str));
-	if (!str)
-		goto end;
-
-	if (kstrtol(str + strlen(":timing"), INT_BASE_10,
-				(unsigned long *)&cmdline_timing)) {
-		pr_err("invalid timing index override: %s. resetting both timing and config\n",
-			boot_str);
-		cmdline_topology = NO_OVERRIDE;
-		goto end;
-	}
-	pr_debug("successfully parsed command line topology and timing\n");
-end:
-	display->cmdline_topology = cmdline_topology;
-	display->cmdline_timing = cmdline_timing;
-}
-
-/**
- * dsi_display_parse_boot_display_selection()- Parse DSI boot display name
- *
- * Return:	returns error status
- */
-static int dsi_display_parse_boot_display_selection(void)
-{
-	char *pos = NULL;
-	char disp_buf[MAX_CMDLINE_PARAM_LEN] = {'\0'};
-	int i, j;
-
-	for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
-		strlcpy(disp_buf, boot_displays[i].boot_param,
-			MAX_CMDLINE_PARAM_LEN);
-
-		pos = strnstr(disp_buf, ":", MAX_CMDLINE_PARAM_LEN);
-
-		/* Use ':' as a delimiter to retrieve the display name */
-		if (!pos) {
-			pr_debug("display name[%s]is not valid\n", disp_buf);
-			continue;
-		}
-
-		for (j = 0; (disp_buf + j) < pos; j++)
-			boot_displays[i].name[j] = *(disp_buf + j);
-
-		boot_displays[i].name[j] = '\0';
-
-		boot_displays[i].boot_disp_en = true;
-	}
-
-	return 0;
-}
-
-static int dsi_display_phy_power_on(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	/* Sequence does not matter for split dsi usecases */
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl)
-			continue;
-
-		rc = dsi_phy_set_power_state(ctrl->phy, true);
-		if (rc) {
-			pr_err("[%s] Failed to set power state, rc=%d\n",
-			       ctrl->phy->name, rc);
-			goto error;
-		}
-	}
-
-	return rc;
-error:
-	for (i = i - 1; i >= 0; i--) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->phy)
-			continue;
-		(void)dsi_phy_set_power_state(ctrl->phy, false);
-	}
-	return rc;
-}
-
-static int dsi_display_phy_power_off(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	/* Sequence does not matter for split dsi usecases */
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->phy)
-			continue;
-
-		rc = dsi_phy_set_power_state(ctrl->phy, false);
-		if (rc) {
-			pr_err("[%s] Failed to power off, rc=%d\n",
-			       ctrl->ctrl->name, rc);
-			goto error;
-		}
-	}
-error:
-	return rc;
-}
-
-static int dsi_display_set_clk_src(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	/*
-	 * In case of split DSI usecases, the clock for master controller should
-	 * be enabled before the other controller. Master controller in the
-	 * clock context refers to the controller that sources the clock.
-	 */
-	m_ctrl = &display->ctrl[display->clk_master_idx];
-
-	rc = dsi_ctrl_set_clock_source(m_ctrl->ctrl,
-		   &display->clock_info.src_clks);
-	if (rc) {
-		pr_err("[%s] failed to set source clocks for master, rc=%d\n",
-			   display->name, rc);
-		return rc;
-	}
-
-	/* Turn on rest of the controllers */
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_set_clock_source(ctrl->ctrl,
-			   &display->clock_info.src_clks);
-		if (rc) {
-			pr_err("[%s] failed to set source clocks, rc=%d\n",
-				   display->name, rc);
-			return rc;
-		}
-	}
-	return 0;
-}
-
-static int dsi_display_phy_reset_config(struct dsi_display *display,
-		bool enable)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		rc = dsi_ctrl_phy_reset_config(ctrl->ctrl, enable);
-		if (rc) {
-			pr_err("[%s] failed to %s phy reset, rc=%d\n",
-			       display->name, enable ? "mask" : "unmask", rc);
-			return rc;
-		}
-	}
-	return 0;
-}
-
-static void dsi_display_toggle_resync_fifo(struct dsi_display *display)
-{
-	struct dsi_display_ctrl *ctrl;
-	int i;
-
-	if (!display)
-		return;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		dsi_phy_toggle_resync_fifo(ctrl->phy);
-	}
-
-	/*
-	 * After retime buffer synchronization we need to turn of clk_en_sel
-	 * bit on each phy.
-	 */
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		dsi_phy_reset_clk_en_sel(ctrl->phy);
-	}
-
-}
-
-static int dsi_display_ctrl_update(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		rc = dsi_ctrl_host_timing_update(ctrl->ctrl);
-		if (rc) {
-			pr_err("[%s] failed to update host_%d, rc=%d\n",
-				   display->name, i, rc);
-			goto error_host_deinit;
-		}
-	}
-
-	return 0;
-error_host_deinit:
-	for (i = i - 1; i >= 0; i--) {
-		ctrl = &display->ctrl[i];
-		(void)dsi_ctrl_host_deinit(ctrl->ctrl);
-	}
-
-	return rc;
-}
-
-static int dsi_display_ctrl_init(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	/* when ULPS suspend feature is enabled, we will keep the lanes in
-	 * ULPS during suspend state and clamp DSI phy. Hence while resuming
-	 * we will programe DSI controller as part of core clock enable.
-	 * After that we should not re-configure DSI controller again here for
-	 * usecases where we are resuming from ulps suspend as it might put
-	 * the HW in bad state.
-	 */
-	if (!display->panel->ulps_suspend_enabled || !display->ulps_enabled) {
-		display_for_each_ctrl(i, display) {
-			ctrl = &display->ctrl[i];
-			rc = dsi_ctrl_host_init(ctrl->ctrl,
-					display->is_cont_splash_enabled);
-			if (rc) {
-				pr_err("[%s] failed to init host_%d, rc=%d\n",
-				       display->name, i, rc);
-				goto error_host_deinit;
-			}
-		}
-	} else {
-		display_for_each_ctrl(i, display) {
-			ctrl = &display->ctrl[i];
-			rc = dsi_ctrl_update_host_init_state(ctrl->ctrl, true);
-			if (rc)
-				pr_debug("host init update failed rc=%d\n", rc);
-		}
-	}
-
-	return rc;
-error_host_deinit:
-	for (i = i - 1; i >= 0; i--) {
-		ctrl = &display->ctrl[i];
-		(void)dsi_ctrl_host_deinit(ctrl->ctrl);
-	}
-	return rc;
-}
-
-static int dsi_display_ctrl_deinit(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		rc = dsi_ctrl_host_deinit(ctrl->ctrl);
-		if (rc) {
-			pr_err("[%s] failed to deinit host_%d, rc=%d\n",
-			       display->name, i, rc);
-		}
-	}
-
-	return rc;
-}
-
-static int dsi_display_ctrl_host_enable(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	/* Host engine states are already taken care for
-	 * continuous splash case
-	 */
-	if (display->is_cont_splash_enabled) {
-		pr_debug("cont splash enabled, host enable not required\n");
-		return 0;
-	}
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-
-	rc = dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable host engine, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_set_host_engine_state(ctrl->ctrl,
-						    DSI_CTRL_ENGINE_ON);
-		if (rc) {
-			pr_err("[%s] failed to enable sl host engine, rc=%d\n",
-			       display->name, rc);
-			goto error_disable_master;
-		}
-	}
-
-	return rc;
-error_disable_master:
-	(void)dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
-error:
-	return rc;
-}
-
-static int dsi_display_ctrl_host_disable(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_set_host_engine_state(ctrl->ctrl,
-						    DSI_CTRL_ENGINE_OFF);
-		if (rc)
-			pr_err("[%s] failed to disable host engine, rc=%d\n",
-			       display->name, rc);
-	}
-
-	rc = dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
-	if (rc) {
-		pr_err("[%s] failed to disable host engine, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_display_vid_engine_enable(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	m_ctrl = &display->ctrl[display->video_master_idx];
-
-	rc = dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable vid engine, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_set_vid_engine_state(ctrl->ctrl,
-						   DSI_CTRL_ENGINE_ON);
-		if (rc) {
-			pr_err("[%s] failed to enable vid engine, rc=%d\n",
-			       display->name, rc);
-			goto error_disable_master;
-		}
-	}
-
-	return rc;
-error_disable_master:
-	(void)dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
-error:
-	return rc;
-}
-
-static int dsi_display_vid_engine_disable(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	m_ctrl = &display->ctrl[display->video_master_idx];
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_set_vid_engine_state(ctrl->ctrl,
-						   DSI_CTRL_ENGINE_OFF);
-		if (rc)
-			pr_err("[%s] failed to disable vid engine, rc=%d\n",
-			       display->name, rc);
-	}
-
-	rc = dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
-	if (rc)
-		pr_err("[%s] failed to disable mvid engine, rc=%d\n",
-		       display->name, rc);
-
-	return rc;
-}
-
-static int dsi_display_phy_enable(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-	enum dsi_phy_pll_source m_src = DSI_PLL_SOURCE_STANDALONE;
-
-	m_ctrl = &display->ctrl[display->clk_master_idx];
-	if (display->ctrl_count > 1)
-		m_src = DSI_PLL_SOURCE_NATIVE;
-
-	rc = dsi_phy_enable(m_ctrl->phy,
-			    &display->config,
-			    m_src,
-			    true,
-			    display->is_cont_splash_enabled);
-	if (rc) {
-		pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_phy_enable(ctrl->phy,
-				    &display->config,
-				    DSI_PLL_SOURCE_NON_NATIVE,
-				    true,
-				    display->is_cont_splash_enabled);
-		if (rc) {
-			pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
-			       display->name, rc);
-			goto error_disable_master;
-		}
-	}
-
-	return rc;
-
-error_disable_master:
-	(void)dsi_phy_disable(m_ctrl->phy);
-error:
-	return rc;
-}
-
-static int dsi_display_phy_disable(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	m_ctrl = &display->ctrl[display->clk_master_idx];
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_phy_disable(ctrl->phy);
-		if (rc)
-			pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
-			       display->name, rc);
-	}
-
-	rc = dsi_phy_disable(m_ctrl->phy);
-	if (rc)
-		pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
-		       display->name, rc);
-
-	return rc;
-}
-
-static int dsi_display_wake_up(struct dsi_display *display)
-{
-	return 0;
-}
-
-static int dsi_display_broadcast_cmd(struct dsi_display *display,
-				     const struct mipi_dsi_msg *msg)
-{
-	int rc = 0;
-	u32 flags, m_flags;
-	struct dsi_display_ctrl *ctrl, *m_ctrl;
-	int i;
-
-	m_flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_BROADCAST_MASTER |
-		   DSI_CTRL_CMD_DEFER_TRIGGER | DSI_CTRL_CMD_FETCH_MEMORY);
-	flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_DEFER_TRIGGER |
-		 DSI_CTRL_CMD_FETCH_MEMORY);
-
-	if ((msg->flags & MIPI_DSI_MSG_LASTCOMMAND)) {
-		flags |= DSI_CTRL_CMD_LAST_COMMAND;
-		m_flags |= DSI_CTRL_CMD_LAST_COMMAND;
-	}
-	/*
-	 * 1. Setup commands in FIFO
-	 * 2. Trigger commands
-	 */
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-	rc = dsi_ctrl_cmd_transfer(m_ctrl->ctrl, msg, m_flags);
-	if (rc) {
-		pr_err("[%s] cmd transfer failed on master,rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (ctrl == m_ctrl)
-			continue;
-
-		rc = dsi_ctrl_cmd_transfer(ctrl->ctrl, msg, flags);
-		if (rc) {
-			pr_err("[%s] cmd transfer failed, rc=%d\n",
-			       display->name, rc);
-			goto error;
-		}
-
-		rc = dsi_ctrl_cmd_tx_trigger(ctrl->ctrl, flags);
-		if (rc) {
-			pr_err("[%s] cmd trigger failed, rc=%d\n",
-			       display->name, rc);
-			goto error;
-		}
-	}
-
-	rc = dsi_ctrl_cmd_tx_trigger(m_ctrl->ctrl, m_flags);
-	if (rc) {
-		pr_err("[%s] cmd trigger failed for master, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_display_phy_sw_reset(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	/* For continuous splash use case ctrl states are updated
-	 * separately and hence we do an early return
-	 */
-	if (display->is_cont_splash_enabled) {
-		pr_debug("cont splash enabled, phy sw reset not required\n");
-		return 0;
-	}
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-
-	rc = dsi_ctrl_phy_sw_reset(m_ctrl->ctrl);
-	if (rc) {
-		pr_err("[%s] failed to reset phy, rc=%d\n", display->name, rc);
-		goto error;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_phy_sw_reset(ctrl->ctrl);
-		if (rc) {
-			pr_err("[%s] failed to reset phy, rc=%d\n",
-			       display->name, rc);
-			goto error;
-		}
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_host_attach(struct mipi_dsi_host *host,
-			   struct mipi_dsi_device *dsi)
-{
-	return 0;
-}
-
-static int dsi_host_detach(struct mipi_dsi_host *host,
-			   struct mipi_dsi_device *dsi)
-{
-	return 0;
-}
-
-static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
-				 const struct mipi_dsi_msg *msg)
-{
-	struct dsi_display *display;
-	int rc = 0, ret = 0;
-
-	if (!host || !msg) {
-		pr_err("Invalid params\n");
-		return 0;
-	}
-
-	display = to_dsi_display(host);
-
-	/* Avoid sending DCS commands when ESD recovery is pending */
-	if (atomic_read(&display->panel->esd_recovery_pending)) {
-		pr_debug("ESD recovery pending\n");
-		return 0;
-	}
-
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_ALL_CLKS, DSI_CLK_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable all DSI clocks, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	rc = dsi_display_wake_up(display);
-	if (rc) {
-		pr_err("[%s] failed to wake up display, rc=%d\n",
-		       display->name, rc);
-		goto error_disable_clks;
-	}
-
-	rc = dsi_display_cmd_engine_enable(display);
-	if (rc) {
-		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-		       display->name, rc);
-		goto error_disable_clks;
-	}
-
-	if (display->tx_cmd_buf == NULL) {
-		rc = dsi_host_alloc_cmd_tx_buffer(display);
-		if (rc) {
-			pr_err("failed to allocate cmd tx buffer memory\n");
-			goto error_disable_cmd_engine;
-		}
-	}
-
-	if (display->ctrl_count > 1 && !(msg->flags & MIPI_DSI_MSG_UNICAST)) {
-		rc = dsi_display_broadcast_cmd(display, msg);
-		if (rc) {
-			pr_err("[%s] cmd broadcast failed, rc=%d\n",
-			       display->name, rc);
-			goto error_disable_cmd_engine;
-		}
-	} else {
-		int ctrl_idx = (msg->flags & MIPI_DSI_MSG_UNICAST) ?
-				msg->ctrl : 0;
-
-		rc = dsi_ctrl_cmd_transfer(display->ctrl[ctrl_idx].ctrl, msg,
-					  DSI_CTRL_CMD_FETCH_MEMORY);
-		if (rc) {
-			pr_err("[%s] cmd transfer failed, rc=%d\n",
-			       display->name, rc);
-			goto error_disable_cmd_engine;
-		}
-	}
-
-error_disable_cmd_engine:
-	ret = dsi_display_cmd_engine_disable(display);
-	if (ret) {
-		pr_err("[%s]failed to disable DSI cmd engine, rc=%d\n",
-				display->name, ret);
-	}
-error_disable_clks:
-	ret = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_ALL_CLKS, DSI_CLK_OFF);
-	if (ret) {
-		pr_err("[%s] failed to disable all DSI clocks, rc=%d\n",
-		       display->name, ret);
-	}
-error:
-	return rc;
-}
-
-
-static struct mipi_dsi_host_ops dsi_host_ops = {
-	.attach = dsi_host_attach,
-	.detach = dsi_host_detach,
-	.transfer = dsi_host_transfer,
-};
-
-static int dsi_display_mipi_host_init(struct dsi_display *display)
-{
-	int rc = 0;
-	struct mipi_dsi_host *host = &display->host;
-
-	host->dev = &display->pdev->dev;
-	host->ops = &dsi_host_ops;
-
-	rc = mipi_dsi_host_register(host);
-	if (rc) {
-		pr_err("[%s] failed to register mipi dsi host, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-error:
-	return rc;
-}
-static int dsi_display_mipi_host_deinit(struct dsi_display *display)
-{
-	int rc = 0;
-	struct mipi_dsi_host *host = &display->host;
-
-	mipi_dsi_host_unregister(host);
-
-	host->dev = NULL;
-	host->ops = NULL;
-
-	return rc;
-}
-
-static int dsi_display_clocks_deinit(struct dsi_display *display)
-{
-	int rc = 0;
-	struct dsi_clk_link_set *src = &display->clock_info.src_clks;
-	struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
-	struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
-
-	if (src->byte_clk) {
-		devm_clk_put(&display->pdev->dev, src->byte_clk);
-		src->byte_clk = NULL;
-	}
-
-	if (src->pixel_clk) {
-		devm_clk_put(&display->pdev->dev, src->pixel_clk);
-		src->pixel_clk = NULL;
-	}
-
-	if (mux->byte_clk) {
-		devm_clk_put(&display->pdev->dev, mux->byte_clk);
-		mux->byte_clk = NULL;
-	}
-
-	if (mux->pixel_clk) {
-		devm_clk_put(&display->pdev->dev, mux->pixel_clk);
-		mux->pixel_clk = NULL;
-	}
-
-	if (shadow->byte_clk) {
-		devm_clk_put(&display->pdev->dev, shadow->byte_clk);
-		shadow->byte_clk = NULL;
-	}
-
-	if (shadow->pixel_clk) {
-		devm_clk_put(&display->pdev->dev, shadow->pixel_clk);
-		shadow->pixel_clk = NULL;
-	}
-
-	return rc;
-}
-
-static bool dsi_display_check_prefix(const char *clk_prefix,
-					const char *clk_name)
-{
-	return !!strnstr(clk_name, clk_prefix, strlen(clk_name));
-}
-
-static int dsi_display_get_clocks_count(struct dsi_display *display,
-						char *dsi_clk_name)
-{
-	if (display->fw)
-		return dsi_parser_count_strings(display->parser_node,
-			dsi_clk_name);
-	else
-		return of_property_count_strings(display->panel_node,
-			dsi_clk_name);
-}
-
-static void dsi_display_get_clock_name(struct dsi_display *display,
-					char *dsi_clk_name, int index,
-					const char **clk_name)
-{
-	if (display->fw)
-		dsi_parser_read_string_index(display->parser_node,
-			dsi_clk_name, index, clk_name);
-	else
-		of_property_read_string_index(display->panel_node,
-			dsi_clk_name, index, clk_name);
-}
-
-static int dsi_display_clocks_init(struct dsi_display *display)
-{
-	int i, rc = 0, num_clk = 0;
-	const char *clk_name;
-	const char *src_byte = "src_byte", *src_pixel = "src_pixel";
-	const char *mux_byte = "mux_byte", *mux_pixel = "mux_pixel";
-	const char *shadow_byte = "shadow_byte", *shadow_pixel = "shadow_pixel";
-	struct clk *dsi_clk;
-	struct dsi_clk_link_set *src = &display->clock_info.src_clks;
-	struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
-	struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
-	char *dsi_clock_name;
-
-	if (!strcmp(display->display_type, "primary"))
-		dsi_clock_name = "qcom,dsi-select-clocks";
-	else
-		dsi_clock_name = "qcom,dsi-select-sec-clocks";
-
-	num_clk = dsi_display_get_clocks_count(display, dsi_clock_name);
-
-	pr_debug("clk count=%d\n", num_clk);
-
-	for (i = 0; i < num_clk; i++) {
-		dsi_display_get_clock_name(display, dsi_clock_name, i,
-						&clk_name);
-
-		pr_debug("clock name:%s\n", clk_name);
-
-		dsi_clk = devm_clk_get(&display->pdev->dev, clk_name);
-		if (IS_ERR_OR_NULL(dsi_clk)) {
-			rc = PTR_ERR(dsi_clk);
-
-			pr_err("failed to get %s, rc=%d\n", clk_name, rc);
-			goto error;
-		}
-
-		if (dsi_display_check_prefix(src_byte, clk_name)) {
-			src->byte_clk = dsi_clk;
-			continue;
-		}
-
-		if (dsi_display_check_prefix(src_pixel, clk_name)) {
-			src->pixel_clk = dsi_clk;
-			continue;
-		}
-
-		if (dsi_display_check_prefix(mux_byte, clk_name)) {
-			mux->byte_clk = dsi_clk;
-			continue;
-		}
-
-		if (dsi_display_check_prefix(mux_pixel, clk_name)) {
-			mux->pixel_clk = dsi_clk;
-			continue;
-		}
-
-		if (dsi_display_check_prefix(shadow_byte, clk_name)) {
-			shadow->byte_clk = dsi_clk;
-			continue;
-		}
-
-		if (dsi_display_check_prefix(shadow_pixel, clk_name)) {
-			shadow->pixel_clk = dsi_clk;
-			continue;
-		}
-	}
-
-	return 0;
-error:
-	(void)dsi_display_clocks_deinit(display);
-	return rc;
-}
-
-static int dsi_display_clk_ctrl_cb(void *priv,
-	struct dsi_clk_ctrl_info clk_state_info)
-{
-	int rc = 0;
-	struct dsi_display *display = NULL;
-	void *clk_handle = NULL;
-
-	if (!priv) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	display = priv;
-
-	if (clk_state_info.client == DSI_CLK_REQ_MDP_CLIENT) {
-		clk_handle = display->mdp_clk_handle;
-	} else if (clk_state_info.client == DSI_CLK_REQ_DSI_CLIENT) {
-		clk_handle = display->dsi_clk_handle;
-	} else {
-		pr_err("invalid clk handle, return error\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * TODO: Wait for CMD_MDP_DONE interrupt if MDP client tries
-	 * to turn off DSI clocks.
-	 */
-	rc = dsi_display_clk_ctrl(clk_handle,
-		clk_state_info.clk_type, clk_state_info.clk_state);
-	if (rc) {
-		pr_err("[%s] failed to %d DSI %d clocks, rc=%d\n",
-		       display->name, clk_state_info.clk_state,
-		       clk_state_info.clk_type, rc);
-		return rc;
-	}
-	return 0;
-}
-
-static void dsi_display_ctrl_isr_configure(struct dsi_display *display, bool en)
-{
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	if (!display)
-		return;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl)
-			continue;
-		dsi_ctrl_isr_configure(ctrl->ctrl, en);
-	}
-}
-
-int dsi_pre_clkoff_cb(void *priv,
-			   enum dsi_clk_type clk,
-			   enum dsi_lclk_type l_type,
-			   enum dsi_clk_state new_state)
-{
-	int rc = 0, i;
-	struct dsi_display *display = priv;
-	struct dsi_display_ctrl *ctrl;
-
-	if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF) &&
-		(l_type & DSI_LINK_LP_CLK)) {
-		/*
-		 * If continuous clock is enabled then disable it
-		 * before entering into ULPS Mode.
-		 */
-		if (display->panel->host_config.force_hs_clk_lane)
-			_dsi_display_continuous_clk_ctrl(display, false);
-		/*
-		 * If ULPS feature is enabled, enter ULPS first.
-		 * However, when blanking the panel, we should enter ULPS
-		 * only if ULPS during suspend feature is enabled.
-		 */
-		if (!dsi_panel_initialized(display->panel)) {
-			if (display->panel->ulps_suspend_enabled)
-				rc = dsi_display_set_ulps(display, true);
-		} else if (dsi_panel_ulps_feature_enabled(display->panel)) {
-			rc = dsi_display_set_ulps(display, true);
-		}
-		if (rc)
-			pr_err("%s: failed enable ulps, rc = %d\n",
-			       __func__, rc);
-	}
-
-	if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF) &&
-		(l_type & DSI_LINK_HS_CLK)) {
-		/*
-		 * PHY clock gating should be disabled before the PLL and the
-		 * branch clocks are turned off. Otherwise, it is possible that
-		 * the clock RCGs may not be turned off correctly resulting
-		 * in clock warnings.
-		 */
-		rc = dsi_display_config_clk_gating(display, false);
-		if (rc)
-			pr_err("[%s] failed to disable clk gating, rc=%d\n",
-					display->name, rc);
-	}
-
-	if ((clk & DSI_CORE_CLK) && (new_state == DSI_CLK_OFF)) {
-		/*
-		 * Enable DSI clamps only if entering idle power collapse or
-		 * when ULPS during suspend is enabled..
-		 */
-		if (dsi_panel_initialized(display->panel) ||
-			display->panel->ulps_suspend_enabled) {
-			dsi_display_phy_idle_off(display);
-			rc = dsi_display_set_clamp(display, true);
-			if (rc)
-				pr_err("%s: Failed to enable dsi clamps. rc=%d\n",
-					__func__, rc);
-
-			rc = dsi_display_phy_reset_config(display, false);
-			if (rc)
-				pr_err("%s: Failed to reset phy, rc=%d\n",
-						__func__, rc);
-		} else {
-			/* Make sure that controller is not in ULPS state when
-			 * the DSI link is not active.
-			 */
-			rc = dsi_display_set_ulps(display, false);
-			if (rc)
-				pr_err("%s: failed to disable ulps. rc=%d\n",
-					__func__, rc);
-		}
-		/* dsi will not be able to serve irqs from here on */
-		dsi_display_ctrl_irq_update(display, false);
-
-		/* cache the MISR values */
-		display_for_each_ctrl(i, display) {
-			ctrl = &display->ctrl[i];
-			if (!ctrl->ctrl)
-				continue;
-			dsi_ctrl_cache_misr(ctrl->ctrl);
-		}
-
-	}
-
-	return rc;
-}
-
-int dsi_post_clkon_cb(void *priv,
-			   enum dsi_clk_type clk,
-			   enum dsi_lclk_type l_type,
-			   enum dsi_clk_state curr_state)
-{
-	int rc = 0;
-	struct dsi_display *display = priv;
-	bool mmss_clamp = false;
-
-	if ((clk & DSI_LINK_CLK) && (l_type & DSI_LINK_LP_CLK)) {
-		mmss_clamp = display->clamp_enabled;
-		/*
-		 * controller setup is needed if coming out of idle
-		 * power collapse with clamps enabled.
-		 */
-		if (mmss_clamp)
-			dsi_display_ctrl_setup(display);
-
-		/*
-		 * Phy setup is needed if coming out of idle
-		 * power collapse with clamps enabled.
-		 */
-		if (display->phy_idle_power_off || mmss_clamp)
-			dsi_display_phy_idle_on(display, mmss_clamp);
-
-		if (display->ulps_enabled && mmss_clamp) {
-			/*
-			 * ULPS Entry Request. This is needed if the lanes were
-			 * in ULPS prior to power collapse, since after
-			 * power collapse and reset, the DSI controller resets
-			 * back to idle state and not ULPS. This ulps entry
-			 * request will transition the state of the DSI
-			 * controller to ULPS which will match the state of the
-			 * DSI phy. This needs to be done prior to disabling
-			 * the DSI clamps.
-			 *
-			 * Also, reset the ulps flag so that ulps_config
-			 * function would reconfigure the controller state to
-			 * ULPS.
-			 */
-			display->ulps_enabled = false;
-			rc = dsi_display_set_ulps(display, true);
-			if (rc) {
-				pr_err("%s: Failed to enter ULPS. rc=%d\n",
-					__func__, rc);
-				goto error;
-			}
-		}
-
-		rc = dsi_display_phy_reset_config(display, true);
-		if (rc) {
-			pr_err("%s: Failed to reset phy, rc=%d\n",
-						__func__, rc);
-			goto error;
-		}
-
-		rc = dsi_display_set_clamp(display, false);
-		if (rc) {
-			pr_err("%s: Failed to disable dsi clamps. rc=%d\n",
-				__func__, rc);
-			goto error;
-		}
-	}
-
-	if ((clk & DSI_LINK_CLK) && (l_type & DSI_LINK_HS_CLK)) {
-		/*
-		 * Toggle the resync FIFO everytime clock changes, except
-		 * when cont-splash screen transition is going on.
-		 * Toggling resync FIFO during cont splash transition
-		 * can lead to blinks on the display.
-		 */
-		if (!display->is_cont_splash_enabled)
-			dsi_display_toggle_resync_fifo(display);
-
-		if (display->ulps_enabled) {
-			rc = dsi_display_set_ulps(display, false);
-			if (rc) {
-				pr_err("%s: failed to disable ulps, rc= %d\n",
-				       __func__, rc);
-				goto error;
-			}
-		}
-
-		if (display->panel->host_config.force_hs_clk_lane)
-			_dsi_display_continuous_clk_ctrl(display, true);
-
-		rc = dsi_display_config_clk_gating(display, true);
-		if (rc) {
-			pr_err("[%s] failed to enable clk gating %d\n",
-					display->name, rc);
-			goto error;
-		}
-	}
-
-	/* enable dsi to serve irqs */
-	if (clk & DSI_CORE_CLK)
-		dsi_display_ctrl_irq_update(display, true);
-
-error:
-	return rc;
-}
-
-int dsi_post_clkoff_cb(void *priv,
-			    enum dsi_clk_type clk_type,
-			    enum dsi_lclk_type l_type,
-			    enum dsi_clk_state curr_state)
-{
-	int rc = 0;
-	struct dsi_display *display = priv;
-
-	if (!display) {
-		pr_err("%s: Invalid arg\n", __func__);
-		return -EINVAL;
-	}
-
-	if ((clk_type & DSI_CORE_CLK) &&
-	    (curr_state == DSI_CLK_OFF)) {
-		rc = dsi_display_phy_power_off(display);
-		if (rc)
-			pr_err("[%s] failed to power off PHY, rc=%d\n",
-				   display->name, rc);
-
-		rc = dsi_display_ctrl_power_off(display);
-		if (rc)
-			pr_err("[%s] failed to power DSI vregs, rc=%d\n",
-				   display->name, rc);
-	}
-	return rc;
-}
-
-int dsi_pre_clkon_cb(void *priv,
-			  enum dsi_clk_type clk_type,
-			  enum dsi_lclk_type l_type,
-			  enum dsi_clk_state new_state)
-{
-	int rc = 0;
-	struct dsi_display *display = priv;
-
-	if (!display) {
-		pr_err("%s: invalid input\n", __func__);
-		return -EINVAL;
-	}
-
-	if ((clk_type & DSI_CORE_CLK) && (new_state == DSI_CLK_ON)) {
-		/*
-		 * Enable DSI core power
-		 * 1.> PANEL_PM are controlled as part of
-		 *     panel_power_ctrl. Needed not be handled here.
-		 * 2.> CORE_PM are controlled by dsi clk manager.
-		 * 3.> CTRL_PM need to be enabled/disabled
-		 *     only during unblank/blank. Their state should
-		 *     not be changed during static screen.
-		 */
-
-		pr_debug("updating power states for ctrl and phy\n");
-		rc = dsi_display_ctrl_power_on(display);
-		if (rc) {
-			pr_err("[%s] failed to power on dsi controllers, rc=%d\n",
-				   display->name, rc);
-			return rc;
-		}
-
-		rc = dsi_display_phy_power_on(display);
-		if (rc) {
-			pr_err("[%s] failed to power on dsi phy, rc = %d\n",
-				   display->name, rc);
-			return rc;
-		}
-
-		pr_debug("%s: Enable DSI core power\n", __func__);
-	}
-
-	return rc;
-}
-
-static void __set_lane_map_v2(u8 *lane_map_v2,
-	enum dsi_phy_data_lanes lane0,
-	enum dsi_phy_data_lanes lane1,
-	enum dsi_phy_data_lanes lane2,
-	enum dsi_phy_data_lanes lane3)
-{
-	lane_map_v2[DSI_LOGICAL_LANE_0] = lane0;
-	lane_map_v2[DSI_LOGICAL_LANE_1] = lane1;
-	lane_map_v2[DSI_LOGICAL_LANE_2] = lane2;
-	lane_map_v2[DSI_LOGICAL_LANE_3] = lane3;
-}
-
-static int dsi_display_parse_lane_map(struct dsi_display *display)
-{
-	int rc = 0, i = 0;
-	const char *data;
-	u8 temp[DSI_LANE_MAX - 1];
-
-	if (!display) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	/* lane-map-v2 supersedes lane-map-v1 setting */
-	rc = of_property_read_u8_array(display->pdev->dev.of_node,
-		"qcom,lane-map-v2", temp, (DSI_LANE_MAX - 1));
-	if (!rc) {
-		for (i = DSI_LOGICAL_LANE_0; i < (DSI_LANE_MAX - 1); i++)
-			display->lane_map.lane_map_v2[i] = BIT(temp[i]);
-		return 0;
-	} else if (rc != EINVAL) {
-		pr_debug("Incorrect mapping, configure default\n");
-		goto set_default;
-	}
-
-	/* lane-map older version, for DSI controller version < 2.0 */
-	data = of_get_property(display->pdev->dev.of_node,
-		"qcom,lane-map", NULL);
-	if (!data)
-		goto set_default;
-
-	if (!strcmp(data, "lane_map_3012")) {
-		display->lane_map.lane_map_v1 = DSI_LANE_MAP_3012;
-		__set_lane_map_v2(display->lane_map.lane_map_v2,
-			DSI_PHYSICAL_LANE_1,
-			DSI_PHYSICAL_LANE_2,
-			DSI_PHYSICAL_LANE_3,
-			DSI_PHYSICAL_LANE_0);
-	} else if (!strcmp(data, "lane_map_2301")) {
-		display->lane_map.lane_map_v1 = DSI_LANE_MAP_2301;
-		__set_lane_map_v2(display->lane_map.lane_map_v2,
-			DSI_PHYSICAL_LANE_2,
-			DSI_PHYSICAL_LANE_3,
-			DSI_PHYSICAL_LANE_0,
-			DSI_PHYSICAL_LANE_1);
-	} else if (!strcmp(data, "lane_map_1230")) {
-		display->lane_map.lane_map_v1 = DSI_LANE_MAP_1230;
-		__set_lane_map_v2(display->lane_map.lane_map_v2,
-			DSI_PHYSICAL_LANE_3,
-			DSI_PHYSICAL_LANE_0,
-			DSI_PHYSICAL_LANE_1,
-			DSI_PHYSICAL_LANE_2);
-	} else if (!strcmp(data, "lane_map_0321")) {
-		display->lane_map.lane_map_v1 = DSI_LANE_MAP_0321;
-		__set_lane_map_v2(display->lane_map.lane_map_v2,
-			DSI_PHYSICAL_LANE_0,
-			DSI_PHYSICAL_LANE_3,
-			DSI_PHYSICAL_LANE_2,
-			DSI_PHYSICAL_LANE_1);
-	} else if (!strcmp(data, "lane_map_1032")) {
-		display->lane_map.lane_map_v1 = DSI_LANE_MAP_1032;
-		__set_lane_map_v2(display->lane_map.lane_map_v2,
-			DSI_PHYSICAL_LANE_1,
-			DSI_PHYSICAL_LANE_0,
-			DSI_PHYSICAL_LANE_3,
-			DSI_PHYSICAL_LANE_2);
-	} else if (!strcmp(data, "lane_map_2103")) {
-		display->lane_map.lane_map_v1 = DSI_LANE_MAP_2103;
-		__set_lane_map_v2(display->lane_map.lane_map_v2,
-			DSI_PHYSICAL_LANE_2,
-			DSI_PHYSICAL_LANE_1,
-			DSI_PHYSICAL_LANE_0,
-			DSI_PHYSICAL_LANE_3);
-	} else if (!strcmp(data, "lane_map_3210")) {
-		display->lane_map.lane_map_v1 = DSI_LANE_MAP_3210;
-		__set_lane_map_v2(display->lane_map.lane_map_v2,
-			DSI_PHYSICAL_LANE_3,
-			DSI_PHYSICAL_LANE_2,
-			DSI_PHYSICAL_LANE_1,
-			DSI_PHYSICAL_LANE_0);
-	} else {
-		pr_warn("%s: invalid lane map %s specified. defaulting to lane_map0123\n",
-			__func__, data);
-		goto set_default;
-	}
-	return 0;
-
-set_default:
-	/* default lane mapping */
-	__set_lane_map_v2(display->lane_map.lane_map_v2, DSI_PHYSICAL_LANE_0,
-		DSI_PHYSICAL_LANE_1, DSI_PHYSICAL_LANE_2, DSI_PHYSICAL_LANE_3);
-	display->lane_map.lane_map_v1 = DSI_LANE_MAP_0123;
-	return 0;
-}
-
-static int dsi_display_get_phandle_index(
-			struct dsi_display *display,
-			const char *propname, int count, int index)
-{
-	struct device_node *disp_node = display->panel_node;
-	u32 *val = NULL;
-	int rc = 0;
-
-	val = kcalloc(count, sizeof(*val), GFP_KERNEL);
-	if (ZERO_OR_NULL_PTR(val)) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	if (index >= count)
-		goto end;
-
-	if (display->fw)
-		rc = dsi_parser_read_u32_array(display->parser_node,
-			propname, val, count);
-	else
-		rc = of_property_read_u32_array(disp_node, propname,
-			val, count);
-	if (rc)
-		goto end;
-
-	rc = val[index];
-
-	pr_debug("%s index=%d\n", propname, rc);
-end:
-	kfree(val);
-	return rc;
-}
-
-static int dsi_display_get_phandle_count(struct dsi_display *display,
-			const char *propname)
-{
-	if (display->fw)
-		return dsi_parser_count_u32_elems(display->parser_node,
-				propname);
-	else
-		return of_property_count_u32_elems(display->panel_node,
-				propname);
-}
-
-static int dsi_display_parse_dt(struct dsi_display *display)
-{
-	int i, rc = 0;
-	u32 phy_count = 0;
-	struct device_node *of_node = display->pdev->dev.of_node;
-	char *dsi_ctrl_name, *dsi_phy_name;
-
-	if (!strcmp(display->display_type, "primary")) {
-		dsi_ctrl_name = "qcom,dsi-ctrl-num";
-		dsi_phy_name = "qcom,dsi-phy-num";
-	} else {
-		dsi_ctrl_name = "qcom,dsi-sec-ctrl-num";
-		dsi_phy_name = "qcom,dsi-sec-phy-num";
-	}
-
-	display->ctrl_count = dsi_display_get_phandle_count(display,
-					dsi_ctrl_name);
-	phy_count = dsi_display_get_phandle_count(display, dsi_phy_name);
-
-	pr_debug("ctrl count=%d, phy count=%d\n",
-			display->ctrl_count, phy_count);
-
-	if (!phy_count || !display->ctrl_count) {
-		pr_err("no ctrl/phys found\n");
-		rc = -ENODEV;
-		goto error;
-	}
-
-	if (phy_count != display->ctrl_count) {
-		pr_err("different ctrl and phy counts\n");
-		rc = -ENODEV;
-		goto error;
-	}
-
-	display_for_each_ctrl(i, display) {
-		struct dsi_display_ctrl *ctrl = &display->ctrl[i];
-		int index;
-		index = dsi_display_get_phandle_index(display, dsi_ctrl_name,
-			display->ctrl_count, i);
-		ctrl->ctrl_of_node = of_parse_phandle(of_node,
-				"qcom,dsi-ctrl", index);
-		of_node_put(ctrl->ctrl_of_node);
-
-		index = dsi_display_get_phandle_index(display, dsi_phy_name,
-			display->ctrl_count, i);
-		ctrl->phy_of_node = of_parse_phandle(of_node,
-				"qcom,dsi-phy", index);
-		of_node_put(ctrl->phy_of_node);
-	}
-
-	/* Parse TE data */
-	dsi_display_parse_te_data(display);
-
-	/* Parse all external bridges from port 0 */
-	display_for_each_ctrl(i, display) {
-		display->ext_bridge[i].node_of =
-			of_graph_get_remote_node(of_node, 0, i);
-		if (display->ext_bridge[i].node_of)
-			display->ext_bridge_cnt++;
-		else
-			break;
-	}
-
-	pr_debug("success\n");
-error:
-	return rc;
-}
-
-static int dsi_display_res_init(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		ctrl->ctrl = dsi_ctrl_get(ctrl->ctrl_of_node);
-		if (IS_ERR_OR_NULL(ctrl->ctrl)) {
-			rc = PTR_ERR(ctrl->ctrl);
-			pr_err("failed to get dsi controller, rc=%d\n", rc);
-			ctrl->ctrl = NULL;
-			goto error_ctrl_put;
-		}
-
-		ctrl->phy = dsi_phy_get(ctrl->phy_of_node);
-		if (IS_ERR_OR_NULL(ctrl->phy)) {
-			rc = PTR_ERR(ctrl->phy);
-			pr_err("failed to get phy controller, rc=%d\n", rc);
-			dsi_ctrl_put(ctrl->ctrl);
-			ctrl->phy = NULL;
-			goto error_ctrl_put;
-		}
-	}
-
-	display->panel = dsi_panel_get(&display->pdev->dev,
-				display->panel_node,
-				display->parser_node,
-				display->display_type,
-				display->cmdline_topology);
-	if (IS_ERR_OR_NULL(display->panel)) {
-		rc = PTR_ERR(display->panel);
-		pr_err("failed to get panel, rc=%d\n", rc);
-		display->panel = NULL;
-		goto error_ctrl_put;
-	}
-
-	rc = dsi_display_parse_lane_map(display);
-	if (rc) {
-		pr_err("Lane map not found, rc=%d\n", rc);
-		goto error_ctrl_put;
-	}
-
-	rc = dsi_display_clocks_init(display);
-	if (rc) {
-		pr_err("Failed to parse clock data, rc=%d\n", rc);
-		goto error_ctrl_put;
-	}
-
-	return 0;
-error_ctrl_put:
-	for (i = i - 1; i >= 0; i--) {
-		ctrl = &display->ctrl[i];
-		dsi_ctrl_put(ctrl->ctrl);
-		dsi_phy_put(ctrl->phy);
-	}
-	return rc;
-}
-
-static int dsi_display_res_deinit(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	rc = dsi_display_clocks_deinit(display);
-	if (rc)
-		pr_err("clocks deinit failed, rc=%d\n", rc);
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		dsi_phy_put(ctrl->phy);
-		dsi_ctrl_put(ctrl->ctrl);
-	}
-
-	if (display->panel)
-		dsi_panel_put(display->panel);
-
-	return rc;
-}
-
-static int dsi_display_validate_mode_set(struct dsi_display *display,
-					 struct dsi_display_mode *mode,
-					 u32 flags)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	/*
-	 * To set a mode:
-	 * 1. Controllers should be turned off.
-	 * 2. Link clocks should be off.
-	 * 3. Phy should be disabled.
-	 */
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if ((ctrl->power_state > DSI_CTRL_POWER_VREG_ON) ||
-		    (ctrl->phy_enabled)) {
-			rc = -EINVAL;
-			goto error;
-		}
-	}
-
-error:
-	return rc;
-}
-
-static bool dsi_display_is_seamless_dfps_possible(
-		const struct dsi_display *display,
-		const struct dsi_display_mode *tgt,
-		const enum dsi_dfps_type dfps_type)
-{
-	struct dsi_display_mode *cur;
-
-	if (!display || !tgt || !display->panel) {
-		pr_err("Invalid params\n");
-		return false;
-	}
-
-	cur = display->panel->cur_mode;
-
-	if (cur->timing.h_active != tgt->timing.h_active) {
-		pr_debug("timing.h_active differs %d %d\n",
-				cur->timing.h_active, tgt->timing.h_active);
-		return false;
-	}
-
-	if (cur->timing.h_back_porch != tgt->timing.h_back_porch) {
-		pr_debug("timing.h_back_porch differs %d %d\n",
-				cur->timing.h_back_porch,
-				tgt->timing.h_back_porch);
-		return false;
-	}
-
-	if (cur->timing.h_sync_width != tgt->timing.h_sync_width) {
-		pr_debug("timing.h_sync_width differs %d %d\n",
-				cur->timing.h_sync_width,
-				tgt->timing.h_sync_width);
-		return false;
-	}
-
-	if (cur->timing.h_front_porch != tgt->timing.h_front_porch) {
-		pr_debug("timing.h_front_porch differs %d %d\n",
-				cur->timing.h_front_porch,
-				tgt->timing.h_front_porch);
-		if (dfps_type != DSI_DFPS_IMMEDIATE_HFP)
-			return false;
-	}
-
-	if (cur->timing.h_skew != tgt->timing.h_skew) {
-		pr_debug("timing.h_skew differs %d %d\n",
-				cur->timing.h_skew,
-				tgt->timing.h_skew);
-		return false;
-	}
-
-	/* skip polarity comparison */
-
-	if (cur->timing.v_active != tgt->timing.v_active) {
-		pr_debug("timing.v_active differs %d %d\n",
-				cur->timing.v_active,
-				tgt->timing.v_active);
-		return false;
-	}
-
-	if (cur->timing.v_back_porch != tgt->timing.v_back_porch) {
-		pr_debug("timing.v_back_porch differs %d %d\n",
-				cur->timing.v_back_porch,
-				tgt->timing.v_back_porch);
-		return false;
-	}
-
-	if (cur->timing.v_sync_width != tgt->timing.v_sync_width) {
-		pr_debug("timing.v_sync_width differs %d %d\n",
-				cur->timing.v_sync_width,
-				tgt->timing.v_sync_width);
-		return false;
-	}
-
-	if (cur->timing.v_front_porch != tgt->timing.v_front_porch) {
-		pr_debug("timing.v_front_porch differs %d %d\n",
-				cur->timing.v_front_porch,
-				tgt->timing.v_front_porch);
-		if (dfps_type != DSI_DFPS_IMMEDIATE_VFP)
-			return false;
-	}
-
-	/* skip polarity comparison */
-
-	if (cur->timing.refresh_rate == tgt->timing.refresh_rate)
-		pr_debug("timing.refresh_rate identical %d %d\n",
-				cur->timing.refresh_rate,
-				tgt->timing.refresh_rate);
-
-	if (cur->pixel_clk_khz != tgt->pixel_clk_khz)
-		pr_debug("pixel_clk_khz differs %d %d\n",
-				cur->pixel_clk_khz, tgt->pixel_clk_khz);
-
-	if (cur->dsi_mode_flags != tgt->dsi_mode_flags)
-		pr_debug("flags differs %d %d\n",
-				cur->dsi_mode_flags, tgt->dsi_mode_flags);
-
-	return true;
-}
-
-static int dsi_display_dfps_update(struct dsi_display *display,
-				   struct dsi_display_mode *dsi_mode)
-{
-	struct dsi_mode_info *timing;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-	struct dsi_display_mode *panel_mode;
-	struct dsi_dfps_capabilities dfps_caps;
-	int rc = 0;
-	int i = 0;
-
-	if (!display || !dsi_mode || !display->panel) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-	timing = &dsi_mode->timing;
-
-	dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
-	if (!dfps_caps.dfps_support) {
-		pr_err("dfps not supported\n");
-		return -ENOTSUPP;
-	}
-
-	if (dfps_caps.type == DSI_DFPS_IMMEDIATE_CLK) {
-		pr_err("dfps clock method not supported\n");
-		return -ENOTSUPP;
-	}
-
-	/* For split DSI, update the clock master first */
-
-	pr_debug("configuring seamless dynamic fps\n\n");
-	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
-
-	m_ctrl = &display->ctrl[display->clk_master_idx];
-	rc = dsi_ctrl_async_timing_update(m_ctrl->ctrl, timing);
-	if (rc) {
-		pr_err("[%s] failed to dfps update host_%d, rc=%d\n",
-				display->name, i, rc);
-		goto error;
-	}
-
-	/* Update the rest of the controllers */
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_async_timing_update(ctrl->ctrl, timing);
-		if (rc) {
-			pr_err("[%s] failed to dfps update host_%d, rc=%d\n",
-					display->name, i, rc);
-			goto error;
-		}
-	}
-
-	panel_mode = display->panel->cur_mode;
-	memcpy(panel_mode, dsi_mode, sizeof(*panel_mode));
-	/*
-	 * dsi_mode_flags flags are used to communicate with other drm driver
-	 * components, and are transient. They aren't inherently part of the
-	 * display panel's mode and shouldn't be saved into the cached currently
-	 * active mode.
-	 */
-	panel_mode->dsi_mode_flags = 0;
-
-error:
-	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
-	return rc;
-}
-
-static int dsi_display_dfps_calc_front_porch(
-		u32 old_fps,
-		u32 new_fps,
-		u32 a_total,
-		u32 b_total,
-		u32 b_fp,
-		u32 *b_fp_out)
-{
-	s32 b_fp_new;
-	int add_porches, diff;
-
-	if (!b_fp_out) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (!a_total || !new_fps) {
-		pr_err("Invalid pixel total or new fps in mode request\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * Keep clock, other porches constant, use new fps, calc front porch
-	 * new_vtotal = old_vtotal * (old_fps / new_fps )
-	 * new_vfp - old_vfp = new_vtotal - old_vtotal
-	 * new_vfp = old_vfp + old_vtotal * ((old_fps - new_fps)/ new_fps)
-	 */
-	diff = abs(old_fps - new_fps);
-	add_porches = mult_frac(b_total, diff, new_fps);
-
-	if (old_fps > new_fps)
-		b_fp_new = b_fp + add_porches;
-	else
-		b_fp_new = b_fp - add_porches;
-
-	pr_debug("fps %u a %u b %u b_fp %u new_fp %d\n",
-			new_fps, a_total, b_total, b_fp, b_fp_new);
-
-	if (b_fp_new < 0) {
-		pr_err("Invalid new_hfp calcluated%d\n", b_fp_new);
-		return -EINVAL;
-	}
-
-	/**
-	 * TODO: To differentiate from clock method when communicating to the
-	 * other components, perhaps we should set clk here to original value
-	 */
-	*b_fp_out = b_fp_new;
-
-	return 0;
-}
-
-/**
- * dsi_display_get_dfps_timing() - Get the new dfps values.
- * @display:         DSI display handle.
- * @adj_mode:        Mode value structure to be changed.
- *                   It contains old timing values and latest fps value.
- *                   New timing values are updated based on new fps.
- * @curr_refresh_rate:  Current fps rate.
- *                      If zero , current fps rate is taken from
- *                      display->panel->cur_mode.
- * Return: error code.
- */
-static int dsi_display_get_dfps_timing(struct dsi_display *display,
-			struct dsi_display_mode *adj_mode,
-				u32 curr_refresh_rate)
-{
-	struct dsi_dfps_capabilities dfps_caps;
-	struct dsi_display_mode per_ctrl_mode;
-	struct dsi_mode_info *timing;
-	struct dsi_ctrl *m_ctrl;
-
-	int rc = 0;
-
-	if (!display || !adj_mode) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-	m_ctrl = display->ctrl[display->clk_master_idx].ctrl;
-
-	dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
-	if (!dfps_caps.dfps_support) {
-		pr_err("dfps not supported by panel\n");
-		return -EINVAL;
-	}
-
-	per_ctrl_mode = *adj_mode;
-	adjust_timing_by_ctrl_count(display, &per_ctrl_mode);
-
-	if (!curr_refresh_rate) {
-		if (!dsi_display_is_seamless_dfps_possible(display,
-				&per_ctrl_mode, dfps_caps.type)) {
-			pr_err("seamless dynamic fps not supported for mode\n");
-			return -EINVAL;
-		}
-		if (display->panel->cur_mode) {
-			curr_refresh_rate =
-				display->panel->cur_mode->timing.refresh_rate;
-		} else {
-			pr_err("cur_mode is not initialized\n");
-			return -EINVAL;
-		}
-	}
-	/* TODO: Remove this direct reference to the dsi_ctrl */
-	timing = &per_ctrl_mode.timing;
-
-	switch (dfps_caps.type) {
-	case DSI_DFPS_IMMEDIATE_VFP:
-		rc = dsi_display_dfps_calc_front_porch(
-				curr_refresh_rate,
-				timing->refresh_rate,
-				DSI_H_TOTAL_DSC(timing),
-				DSI_V_TOTAL(timing),
-				timing->v_front_porch,
-				&adj_mode->timing.v_front_porch);
-		break;
-
-	case DSI_DFPS_IMMEDIATE_HFP:
-		rc = dsi_display_dfps_calc_front_porch(
-				curr_refresh_rate,
-				timing->refresh_rate,
-				DSI_V_TOTAL(timing),
-				DSI_H_TOTAL_DSC(timing),
-				timing->h_front_porch,
-				&adj_mode->timing.h_front_porch);
-		if (!rc)
-			adj_mode->timing.h_front_porch *= display->ctrl_count;
-		break;
-
-	default:
-		pr_err("Unsupported DFPS mode %d\n", dfps_caps.type);
-		rc = -ENOTSUPP;
-	}
-
-	return rc;
-}
-
-static bool dsi_display_validate_mode_seamless(struct dsi_display *display,
-		struct dsi_display_mode *adj_mode)
-{
-	int rc = 0;
-
-	if (!display || !adj_mode) {
-		pr_err("Invalid params\n");
-		return false;
-	}
-
-	/* Currently the only seamless transition is dynamic fps */
-	rc = dsi_display_get_dfps_timing(display, adj_mode, 0);
-	if (rc) {
-		pr_debug("Dynamic FPS not supported for seamless\n");
-	} else {
-		pr_debug("Mode switch is seamless Dynamic FPS\n");
-		adj_mode->dsi_mode_flags |= DSI_MODE_FLAG_DFPS |
-				DSI_MODE_FLAG_VBLANK_PRE_MODESET;
-	}
-
-	return rc;
-}
-
-static int dsi_display_set_mode_sub(struct dsi_display *display,
-				    struct dsi_display_mode *mode,
-				    u32 flags)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-	struct dsi_display_mode_priv_info *priv_info;
-
-	priv_info = mode->priv_info;
-	if (!priv_info) {
-		pr_err("[%s] failed to get private info of the display mode\n",
-			display->name);
-		return -EINVAL;
-	}
-
-	rc = dsi_panel_get_host_cfg_for_mode(display->panel,
-					     mode,
-					     &display->config);
-	if (rc) {
-		pr_err("[%s] failed to get host config for mode, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	memcpy(&display->config.lane_map, &display->lane_map,
-	       sizeof(display->lane_map));
-
-	if (mode->dsi_mode_flags &
-			(DSI_MODE_FLAG_DFPS | DSI_MODE_FLAG_VRR)) {
-		rc = dsi_display_dfps_update(display, mode);
-		if (rc) {
-			pr_err("[%s]DSI dfps update failed, rc=%d\n",
-					display->name, rc);
-			goto error;
-		}
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		rc = dsi_ctrl_update_host_config(ctrl->ctrl, &display->config,
-				mode->dsi_mode_flags, display->dsi_clk_handle);
-		if (rc) {
-			pr_err("[%s] failed to update ctrl config, rc=%d\n",
-			       display->name, rc);
-			goto error;
-		}
-	}
-
-	if (priv_info->phy_timing_len) {
-		display_for_each_ctrl(i, display) {
-			ctrl = &display->ctrl[i];
-			 rc = dsi_phy_set_timing_params(ctrl->phy,
-				priv_info->phy_timing_val,
-				priv_info->phy_timing_len);
-			if (rc)
-				pr_err("failed to add DSI PHY timing params\n");
-		}
-	}
-error:
-	return rc;
-}
-
-/**
- * _dsi_display_dev_init - initializes the display device
- * Initialization will acquire references to the resources required for the
- * display hardware to function.
- * @display:         Handle to the display
- * Returns:          Zero on success
- */
-static int _dsi_display_dev_init(struct dsi_display *display)
-{
-	int rc = 0;
-
-	if (!display) {
-		pr_err("invalid display\n");
-		return -EINVAL;
-	}
-
-	if (!display->panel_node)
-		return 0;
-
-	mutex_lock(&display->display_lock);
-
-	display->parser = dsi_parser_get(&display->pdev->dev);
-	if (display->fw && display->parser)
-		display->parser_node = dsi_parser_get_head_node(
-				display->parser, display->fw->data,
-				display->fw->size);
-
-	rc = dsi_display_parse_dt(display);
-	if (rc) {
-		pr_err("[%s] failed to parse dt, rc=%d\n", display->name, rc);
-		goto error;
-	}
-
-	rc = dsi_display_res_init(display);
-	if (rc) {
-		pr_err("[%s] failed to initialize resources, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-error:
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-/**
- * _dsi_display_dev_deinit - deinitializes the display device
- * All the resources acquired during device init will be released.
- * @display:        Handle to the display
- * Returns:         Zero on success
- */
-static int _dsi_display_dev_deinit(struct dsi_display *display)
-{
-	int rc = 0;
-
-	if (!display) {
-		pr_err("invalid display\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	rc = dsi_display_res_deinit(display);
-	if (rc)
-		pr_err("[%s] failed to deinitialize resource, rc=%d\n",
-		       display->name, rc);
-
-	mutex_unlock(&display->display_lock);
-
-	return rc;
-}
-
-/**
- * dsi_display_cont_splash_config() - Initialize resources for continuous splash
- * @dsi_display:    Pointer to dsi display
- * Returns:     Zero on success
- */
-int dsi_display_cont_splash_config(void *dsi_display)
-{
-	struct dsi_display *display = dsi_display;
-	int rc = 0;
-
-	/* Vote for gdsc required to read register address space */
-	if (!display) {
-		pr_err("invalid input display param\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	/* Vote for gdsc required to read register address space */
-	display->cont_splash_client = sde_power_client_create(display->phandle,
-						"cont_splash_client");
-	rc = sde_power_resource_enable(display->phandle,
-			display->cont_splash_client, true);
-	if (rc) {
-		pr_err("failed to vote gdsc for continuous splash, rc=%d\n",
-							rc);
-		mutex_unlock(&display->display_lock);
-		return -EINVAL;
-	}
-
-	/* Verify whether continuous splash is enabled or not */
-	display->is_cont_splash_enabled =
-		dsi_display_get_cont_splash_status(display);
-	if (!display->is_cont_splash_enabled) {
-		pr_err("Continuous splash is not enabled\n");
-		goto splash_disabled;
-	}
-
-	/* Update splash status for clock manager */
-	dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
-				display->is_cont_splash_enabled);
-
-	/* Set up ctrl isr before enabling core clk */
-	dsi_display_ctrl_isr_configure(display, true);
-
-	/* Vote for Core clk and link clk. Votes on ctrl and phy
-	 * regulator are inplicit from  pre clk on callback
-	 */
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_ALL_CLKS, DSI_CLK_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
-		       display->name, rc);
-		goto clk_manager_update;
-	}
-
-	/* Vote on panel regulator will be removed during suspend path */
-	rc = dsi_pwr_enable_regulator(&display->panel->power_info, true);
-	if (rc) {
-		pr_err("[%s] failed to enable vregs, rc=%d\n",
-				display->panel->name, rc);
-		goto clks_disabled;
-	}
-
-	dsi_config_host_engine_state_for_cont_splash(display);
-	mutex_unlock(&display->display_lock);
-
-	/* Set the current brightness level */
-	dsi_panel_bl_handoff(display->panel);
-
-	return rc;
-
-clks_disabled:
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_ALL_CLKS, DSI_CLK_OFF);
-
-clk_manager_update:
-	dsi_display_ctrl_isr_configure(display, false);
-	/* Update splash status for clock manager */
-	dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
-				false);
-
-splash_disabled:
-	(void)sde_power_resource_enable(display->phandle,
-			display->cont_splash_client, false);
-	display->is_cont_splash_enabled = false;
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-/**
- * dsi_display_splash_res_cleanup() - cleanup for continuous splash
- * @display:    Pointer to dsi display
- * Returns:     Zero on success
- */
-int dsi_display_splash_res_cleanup(struct  dsi_display *display)
-{
-	int rc = 0;
-
-	if (!display->is_cont_splash_enabled)
-		return 0;
-
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_ALL_CLKS, DSI_CLK_OFF);
-	if (rc)
-		pr_err("[%s] failed to disable DSI link clocks, rc=%d\n",
-		       display->name, rc);
-
-	rc = sde_power_resource_enable(display->phandle,
-			display->cont_splash_client, false);
-	if (rc)
-		pr_err("failed to remove vote on gdsc for continuous splash, rc=%d\n",
-				rc);
-
-	display->is_cont_splash_enabled = false;
-	/* Update splash status for clock manager */
-	dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
-				display->is_cont_splash_enabled);
-
-	return rc;
-}
-
-static int dsi_display_force_update_dsi_clk(struct dsi_display *display)
-{
-	int rc = 0;
-
-	rc = dsi_display_link_clk_force_update_ctrl(display->dsi_clk_handle);
-
-	if (!rc) {
-		pr_info("dsi bit clk has been configured to %d\n",
-			display->cached_clk_rate);
-
-		atomic_set(&display->clkrate_change_pending, 0);
-	} else {
-		pr_err("Failed to configure dsi bit clock '%d'. rc = %d\n",
-			display->cached_clk_rate, rc);
-	}
-
-	return rc;
-}
-
-static int dsi_display_request_update_dsi_bitrate(struct dsi_display *display,
-					u32 bit_clk_rate)
-{
-	int rc = 0;
-	int i;
-
-	pr_debug("%s:bit rate:%d\n", __func__, bit_clk_rate);
-	if (!display->panel) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (bit_clk_rate == 0) {
-		pr_err("Invalid bit clock rate\n");
-		return -EINVAL;
-	}
-
-	display->config.bit_clk_rate_hz_override = bit_clk_rate;
-
-	display_for_each_ctrl(i, display) {
-		struct dsi_display_ctrl *dsi_disp_ctrl = &display->ctrl[i];
-		struct dsi_ctrl *ctrl = dsi_disp_ctrl->ctrl;
-		u32 num_of_lanes = 0;
-		u32 bpp = 3;
-		u64 bit_rate, pclk_rate, bit_rate_per_lane, byte_clk_rate;
-		struct dsi_host_common_cfg *host_cfg;
-
-		mutex_lock(&ctrl->ctrl_lock);
-
-		host_cfg = &display->panel->host_config;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_0)
-			num_of_lanes++;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_1)
-			num_of_lanes++;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_2)
-			num_of_lanes++;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_3)
-			num_of_lanes++;
-
-		if (num_of_lanes == 0) {
-			pr_err("Invalid lane count\n");
-			rc = -EINVAL;
-			goto error;
-		}
-
-		bit_rate = display->config.bit_clk_rate_hz_override *
-						num_of_lanes;
-		bit_rate_per_lane = bit_rate;
-		do_div(bit_rate_per_lane, num_of_lanes);
-		pclk_rate = bit_rate;
-		do_div(pclk_rate, (8 * bpp));
-		byte_clk_rate = bit_rate_per_lane;
-		do_div(byte_clk_rate, 8);
-		pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
-			 bit_rate, bit_rate_per_lane);
-		pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
-			  byte_clk_rate, pclk_rate);
-
-		ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
-		ctrl->clk_freq.pix_clk_rate = pclk_rate;
-		rc = dsi_clk_set_link_frequencies(display->dsi_clk_handle,
-			ctrl->clk_freq, ctrl->cell_index);
-		if (rc) {
-			pr_err("Failed to update link frequencies\n");
-			goto error;
-		}
-
-		ctrl->host_config.bit_clk_rate_hz_override = bit_clk_rate;
-error:
-		mutex_unlock(&ctrl->ctrl_lock);
-
-		/* TODO: recover ctrl->clk_freq in case of failure */
-		if (rc)
-			return rc;
-	}
-
-	return 0;
-}
-
-static ssize_t dynamic_dsi_clock_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
-{
-	int rc = 0;
-	struct dsi_display *display;
-	struct dsi_display_ctrl *m_ctrl;
-	struct dsi_ctrl *ctrl;
-
-	display = dev_get_drvdata(dev);
-	if (!display) {
-		pr_err("Invalid display\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-	ctrl = m_ctrl->ctrl;
-	if (ctrl)
-		display->cached_clk_rate = ctrl->clk_freq.byte_clk_rate
-					     * 8;
-
-	rc = snprintf(buf, PAGE_SIZE, "%d\n", display->cached_clk_rate);
-	pr_debug("%s: read dsi clk rate %d\n", __func__,
-		display->cached_clk_rate);
-
-	mutex_unlock(&display->display_lock);
-
-	return rc;
-}
-
-static ssize_t dynamic_dsi_clock_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t count)
-{
-	int rc = 0;
-	int clk_rate;
-	struct dsi_display *display;
-
-	display = dev_get_drvdata(dev);
-	if (!display) {
-		pr_err("Invalid display\n");
-		return -EINVAL;
-	}
-
-	rc = kstrtoint(buf, DSI_CLOCK_BITRATE_RADIX, &clk_rate);
-	if (rc) {
-		pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
-		return rc;
-	}
-
-	if (clk_rate <= 0) {
-		pr_err("%s: bitrate should be greater than 0\n", __func__);
-		return -EINVAL;
-	}
-
-	if (clk_rate == display->cached_clk_rate) {
-		pr_info("%s: ignore duplicated DSI clk setting\n", __func__);
-		return count;
-	}
-
-	pr_info("%s: bitrate param value: '%d'\n", __func__, clk_rate);
-
-	mutex_lock(&display->display_lock);
-
-	display->cached_clk_rate = clk_rate;
-	rc = dsi_display_request_update_dsi_bitrate(display, clk_rate);
-	if (!rc) {
-		pr_info("%s: bit clk is ready to be configured to '%d'\n",
-			__func__, clk_rate);
-	} else {
-		pr_err("%s: Failed to prepare to configure '%d'. rc = %d\n",
-			__func__, clk_rate, rc);
-		/*Caching clock failed, so don't go on doing so.*/
-		atomic_set(&display->clkrate_change_pending, 0);
-		display->cached_clk_rate = 0;
-
-		mutex_unlock(&display->display_lock);
-
-		return rc;
-	}
-	atomic_set(&display->clkrate_change_pending, 1);
-
-	mutex_unlock(&display->display_lock);
-
-	return count;
-
-}
-
-static DEVICE_ATTR_RW(dynamic_dsi_clock);
-
-static struct attribute *dynamic_dsi_clock_fs_attrs[] = {
-	&dev_attr_dynamic_dsi_clock.attr,
-	NULL,
-};
-static struct attribute_group dynamic_dsi_clock_fs_attrs_group = {
-	.attrs = dynamic_dsi_clock_fs_attrs,
-};
-
-static int dsi_display_sysfs_init(struct dsi_display *display)
-{
-	int rc = 0;
-	struct device *dev = &display->pdev->dev;
-
-	if (display->panel->panel_mode == DSI_OP_CMD_MODE)
-		rc = sysfs_create_group(&dev->kobj,
-			&dynamic_dsi_clock_fs_attrs_group);
-
-	return rc;
-
-}
-
-static int dsi_display_sysfs_deinit(struct dsi_display *display)
-{
-	struct device *dev = &display->pdev->dev;
-
-	if (display->panel->panel_mode == DSI_OP_CMD_MODE)
-		sysfs_remove_group(&dev->kobj,
-			&dynamic_dsi_clock_fs_attrs_group);
-
-	return 0;
-
-}
-
-/**
- * dsi_display_bind - bind dsi device with controlling device
- * @dev:        Pointer to base of platform device
- * @master:     Pointer to container of drm device
- * @data:       Pointer to private data
- * Returns:     Zero on success
- */
-static int dsi_display_bind(struct device *dev,
-		struct device *master,
-		void *data)
-{
-	struct dsi_display_ctrl *display_ctrl;
-	struct drm_device *drm;
-	struct dsi_display *display;
-	struct dsi_clk_info info;
-	struct clk_ctrl_cb clk_cb;
-	struct msm_drm_private *priv;
-	void *handle = NULL;
-	struct platform_device *pdev = to_platform_device(dev);
-	char *client1 = "dsi_clk_client";
-	char *client2 = "mdp_event_client";
-	char dsi_client_name[DSI_CLIENT_NAME_SIZE];
-	int i, rc = 0;
-
-	if (!dev || !pdev || !master) {
-		pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
-				dev, pdev, master);
-		return -EINVAL;
-	}
-
-	drm = dev_get_drvdata(master);
-	display = platform_get_drvdata(pdev);
-	if (!drm || !display) {
-		pr_err("invalid param(s), drm %pK, display %pK\n",
-				drm, display);
-		return -EINVAL;
-	}
-	priv = drm->dev_private;
-	if (!display->panel_node)
-		return 0;
-
-	if (!display->fw)
-		display->name = display->panel_node->name;
-	mutex_lock(&display->display_lock);
-
-	rc = dsi_display_debugfs_init(display);
-	if (rc) {
-		pr_err("[%s] debugfs init failed, rc=%d\n", display->name, rc);
-		goto error;
-	}
-
-	atomic_set(&display->clkrate_change_pending, 0);
-	display->cached_clk_rate = 0;
-
-	rc = dsi_display_sysfs_init(display);
-	if (rc) {
-		pr_err("[%s] sysfs init failed, rc=%d\n", display->name, rc);
-		goto error;
-	}
-
-	memset(&info, 0x0, sizeof(info));
-
-	display_for_each_ctrl(i, display) {
-		display_ctrl = &display->ctrl[i];
-		rc = dsi_ctrl_drv_init(display_ctrl->ctrl, display->root);
-		if (rc) {
-			pr_err("[%s] failed to initialize ctrl[%d], rc=%d\n",
-			       display->name, i, rc);
-			goto error_ctrl_deinit;
-		}
-		display_ctrl->ctrl->horiz_index = i;
-
-		rc = dsi_phy_drv_init(display_ctrl->phy);
-		if (rc) {
-			pr_err("[%s] Failed to initialize phy[%d], rc=%d\n",
-				display->name, i, rc);
-			(void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
-			goto error_ctrl_deinit;
-		}
-
-		memcpy(&info.c_clks[i],
-				(&display_ctrl->ctrl->clk_info.core_clks),
-				sizeof(struct dsi_core_clk_info));
-		memcpy(&info.l_hs_clks[i],
-				(&display_ctrl->ctrl->clk_info.hs_link_clks),
-				sizeof(struct dsi_link_hs_clk_info));
-		memcpy(&info.l_lp_clks[i],
-				(&display_ctrl->ctrl->clk_info.lp_link_clks),
-				sizeof(struct dsi_link_lp_clk_info));
-
-		info.c_clks[i].phandle = &priv->phandle;
-		info.bus_handle[i] =
-			display_ctrl->ctrl->axi_bus_info.bus_handle;
-		info.ctrl_index[i] = display_ctrl->ctrl->cell_index;
-		snprintf(dsi_client_name, DSI_CLIENT_NAME_SIZE,
-						"dsi_core_client%u", i);
-		info.c_clks[i].dsi_core_client = sde_power_client_create(
-				info.c_clks[i].phandle, dsi_client_name);
-		if (IS_ERR_OR_NULL(info.c_clks[i].dsi_core_client)) {
-			pr_err("[%s] client creation failed for ctrl[%d]\n",
-					dsi_client_name, i);
-			goto error_ctrl_deinit;
-		}
-	}
-
-	display->phandle = &priv->phandle;
-	info.pre_clkoff_cb = dsi_pre_clkoff_cb;
-	info.pre_clkon_cb = dsi_pre_clkon_cb;
-	info.post_clkoff_cb = dsi_post_clkoff_cb;
-	info.post_clkon_cb = dsi_post_clkon_cb;
-	info.priv_data = display;
-	info.master_ndx = display->clk_master_idx;
-	info.dsi_ctrl_count = display->ctrl_count;
-	snprintf(info.name, MAX_STRING_LEN,
-			"DSI_MNGR-%s", display->name);
-
-	display->clk_mngr = dsi_display_clk_mngr_register(&info);
-	if (IS_ERR_OR_NULL(display->clk_mngr)) {
-		rc = PTR_ERR(display->clk_mngr);
-		display->clk_mngr = NULL;
-		pr_err("dsi clock registration failed, rc = %d\n", rc);
-		goto error_ctrl_deinit;
-	}
-
-	handle = dsi_register_clk_handle(display->clk_mngr, client1);
-	if (IS_ERR_OR_NULL(handle)) {
-		rc = PTR_ERR(handle);
-		pr_err("failed to register %s client, rc = %d\n",
-		       client1, rc);
-		goto error_clk_deinit;
-	} else {
-		display->dsi_clk_handle = handle;
-	}
-
-	handle = dsi_register_clk_handle(display->clk_mngr, client2);
-	if (IS_ERR_OR_NULL(handle)) {
-		rc = PTR_ERR(handle);
-		pr_err("failed to register %s client, rc = %d\n",
-		       client2, rc);
-		goto error_clk_client_deinit;
-	} else {
-		display->mdp_clk_handle = handle;
-	}
-
-	clk_cb.priv = display;
-	clk_cb.dsi_clk_cb = dsi_display_clk_ctrl_cb;
-
-	display_for_each_ctrl(i, display) {
-		display_ctrl = &display->ctrl[i];
-
-		rc = dsi_ctrl_clk_cb_register(display_ctrl->ctrl, &clk_cb);
-		if (rc) {
-			pr_err("[%s] failed to register ctrl clk_cb[%d], rc=%d\n",
-			       display->name, i, rc);
-			goto error_ctrl_deinit;
-		}
-
-		rc = dsi_phy_clk_cb_register(display_ctrl->phy, &clk_cb);
-		if (rc) {
-			pr_err("[%s] failed to register phy clk_cb[%d], rc=%d\n",
-			       display->name, i, rc);
-			goto error_ctrl_deinit;
-		}
-	}
-
-	rc = dsi_display_mipi_host_init(display);
-	if (rc) {
-		pr_err("[%s] failed to initialize mipi host, rc=%d\n",
-		       display->name, rc);
-		goto error_ctrl_deinit;
-	}
-
-	rc = dsi_panel_drv_init(display->panel, &display->host);
-	if (rc) {
-		if (rc != -EPROBE_DEFER)
-			pr_err("[%s] failed to initialize panel driver, rc=%d\n",
-			       display->name, rc);
-		goto error_host_deinit;
-	}
-
-	pr_info("Successfully bind display panel '%s'\n", display->name);
-	display->drm_dev = drm;
-
-	display_for_each_ctrl(i, display) {
-		display_ctrl = &display->ctrl[i];
-
-		if (!display_ctrl->phy || !display_ctrl->ctrl)
-			continue;
-
-		rc = dsi_phy_set_clk_freq(display_ctrl->phy,
-				&display_ctrl->ctrl->clk_freq);
-		if (rc) {
-			pr_err("[%s] failed to set phy clk freq, rc=%d\n",
-					display->name, rc);
-			goto error;
-		}
-	}
-
-	/* register te irq handler */
-	dsi_display_register_te_irq(display);
-
-	goto error;
-
-error_host_deinit:
-	(void)dsi_display_mipi_host_deinit(display);
-error_clk_client_deinit:
-	(void)dsi_deregister_clk_handle(display->dsi_clk_handle);
-error_clk_deinit:
-	(void)dsi_display_clk_mngr_deregister(display->clk_mngr);
-error_ctrl_deinit:
-	for (i = i - 1; i >= 0; i--) {
-		display_ctrl = &display->ctrl[i];
-		(void)dsi_phy_drv_deinit(display_ctrl->phy);
-		(void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
-	}
-	(void)dsi_display_sysfs_deinit(display);
-	(void)dsi_display_debugfs_deinit(display);
-error:
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-/**
- * dsi_display_unbind - unbind dsi from controlling device
- * @dev:        Pointer to base of platform device
- * @master:     Pointer to container of drm device
- * @data:       Pointer to private data
- */
-static void dsi_display_unbind(struct device *dev,
-		struct device *master, void *data)
-{
-	struct dsi_display_ctrl *display_ctrl;
-	struct dsi_display *display;
-	struct platform_device *pdev = to_platform_device(dev);
-	int i, rc = 0;
-
-	if (!dev || !pdev) {
-		pr_err("invalid param(s)\n");
-		return;
-	}
-
-	display = platform_get_drvdata(pdev);
-	if (!display) {
-		pr_err("invalid display\n");
-		return;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	rc = dsi_panel_drv_deinit(display->panel);
-	if (rc)
-		pr_err("[%s] failed to deinit panel driver, rc=%d\n",
-		       display->name, rc);
-
-	rc = dsi_display_mipi_host_deinit(display);
-	if (rc)
-		pr_err("[%s] failed to deinit mipi hosts, rc=%d\n",
-		       display->name,
-		       rc);
-
-	display_for_each_ctrl(i, display) {
-		display_ctrl = &display->ctrl[i];
-
-		rc = dsi_phy_drv_deinit(display_ctrl->phy);
-		if (rc)
-			pr_err("[%s] failed to deinit phy%d driver, rc=%d\n",
-			       display->name, i, rc);
-
-		rc = dsi_ctrl_drv_deinit(display_ctrl->ctrl);
-		if (rc)
-			pr_err("[%s] failed to deinit ctrl%d driver, rc=%d\n",
-			       display->name, i, rc);
-	}
-
-	atomic_set(&display->clkrate_change_pending, 0);
-	(void)dsi_display_sysfs_deinit(display);
-	(void)dsi_display_debugfs_deinit(display);
-
-	mutex_unlock(&display->display_lock);
-}
-
-static const struct component_ops dsi_display_comp_ops = {
-	.bind = dsi_display_bind,
-	.unbind = dsi_display_unbind,
-};
-
-static struct platform_driver dsi_display_driver = {
-	.probe = dsi_display_dev_probe,
-	.remove = dsi_display_dev_remove,
-	.driver = {
-		.name = "msm-dsi-display",
-		.of_match_table = dsi_display_dt_match,
-		.suppress_bind_attrs = true,
-	},
-};
-
-static int dsi_display_init(struct dsi_display *display)
-{
-	int rc = 0;
-	struct platform_device *pdev = display->pdev;
-
-	mutex_init(&display->display_lock);
-
-	rc = _dsi_display_dev_init(display);
-	if (rc) {
-		pr_err("device init failed, rc=%d\n", rc);
-		goto end;
-	}
-
-	rc = component_add(&pdev->dev, &dsi_display_comp_ops);
-	if (rc)
-		pr_err("component add failed, rc=%d\n", rc);
-
-	pr_debug("component add success: %s\n", display->name);
-end:
-	return rc;
-}
-
-static void dsi_display_firmware_display(const struct firmware *fw,
-				void *context)
-{
-	struct dsi_display *display = context;
-
-	if (fw) {
-		pr_debug("reading data from firmware, size=%zd\n",
-			fw->size);
-
-		display->fw = fw;
-		display->name = "dsi_firmware_display";
-	}
-
-	if (dsi_display_init(display))
-		return;
-
-	pr_debug("success\n");
-}
-
-int dsi_display_dev_probe(struct platform_device *pdev)
-{
-	struct dsi_display *display = NULL;
-	struct device_node *node = NULL, *panel_node = NULL, *mdp_node = NULL;
-	int rc = 0, index = DSI_PRIMARY;
-	bool firm_req = false;
-	struct dsi_display_boot_param *boot_disp;
-
-	if (!pdev || !pdev->dev.of_node) {
-		pr_err("pdev not found\n");
-		rc = -ENODEV;
-		goto end;
-	}
-
-	display = devm_kzalloc(&pdev->dev, sizeof(*display), GFP_KERNEL);
-	if (!display) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	display->display_type = of_get_property(pdev->dev.of_node,
-				"label", NULL);
-	if (!display->display_type)
-		display->display_type = "primary";
-
-	if (!strcmp(display->display_type, "secondary"))
-		index = DSI_SECONDARY;
-
-	boot_disp = &boot_displays[index];
-	node = pdev->dev.of_node;
-	if (boot_disp->boot_disp_en) {
-		mdp_node = of_parse_phandle(node, "qcom,mdp", 0);
-		if (!mdp_node) {
-			pr_err("mdp_node not found\n");
-			rc = -ENODEV;
-			goto end;
-		}
-
-		/* The panel name should be same as UEFI name index */
-		panel_node = of_find_node_by_name(mdp_node, boot_disp->name);
-		if (!panel_node)
-			pr_warn("panel_node %s not found\n", boot_disp->name);
-	} else {
-		panel_node = of_parse_phandle(node,
-				"qcom,dsi-default-panel", 0);
-		if (!panel_node)
-			pr_warn("default panel not found\n");
-
-		if (IS_ENABLED(CONFIG_DSI_PARSER))
-			firm_req = !request_firmware_nowait(
-				THIS_MODULE, 1, "dsi_prop",
-				&pdev->dev, GFP_KERNEL, display,
-				dsi_display_firmware_display);
-	}
-
-	boot_disp->node = pdev->dev.of_node;
-	boot_disp->disp = display;
-
-	display->panel_node = panel_node;
-	display->pdev = pdev;
-	display->boot_disp = boot_disp;
-
-	dsi_display_parse_cmdline_topology(display, index);
-
-	platform_set_drvdata(pdev, display);
-
-	/* initialize display in firmware callback */
-	if (!firm_req) {
-		rc = dsi_display_init(display);
-		if (rc)
-			goto end;
-	}
-
-	return 0;
-end:
-	if (display)
-		devm_kfree(&pdev->dev, display);
-
-	return rc;
-}
-
-int dsi_display_dev_remove(struct platform_device *pdev)
-{
-	int rc = 0;
-	struct dsi_display *display;
-
-	if (!pdev) {
-		pr_err("Invalid device\n");
-		return -EINVAL;
-	}
-
-	display = platform_get_drvdata(pdev);
-
-	/* decrement ref count */
-	of_node_put(display->panel_node);
-
-	(void)_dsi_display_dev_deinit(display);
-
-	platform_set_drvdata(pdev, NULL);
-	devm_kfree(&pdev->dev, display);
-	return rc;
-}
-
-int dsi_display_get_num_of_displays(void)
-{
-	int i, count = 0;
-
-	for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
-		struct dsi_display *display = boot_displays[i].disp;
-
-		if (display && display->panel_node)
-			count++;
-	}
-
-	return count;
-}
-
-int dsi_display_get_active_displays(void **display_array, u32 max_display_count)
-{
-	int index = 0, count = 0;
-
-	if (!display_array || !max_display_count) {
-		pr_err("invalid params\n");
-		return 0;
-	}
-
-	for (index = 0; index < MAX_DSI_ACTIVE_DISPLAY; index++) {
-		struct dsi_display *display = boot_displays[index].disp;
-
-		if (display && display->panel_node)
-			display_array[count++] = display;
-	}
-
-	return count;
-}
-
-int dsi_display_drm_bridge_init(struct dsi_display *display,
-		struct drm_encoder *enc)
-{
-	int rc = 0;
-	struct dsi_bridge *bridge;
-	struct msm_drm_private *priv = NULL;
-
-	if (!display || !display->drm_dev || !enc) {
-		pr_err("invalid param(s)\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-	priv = display->drm_dev->dev_private;
-
-	if (!priv) {
-		pr_err("Private data is not present\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	if (display->bridge) {
-		pr_err("display is already initialize\n");
-		goto error;
-	}
-
-	bridge = dsi_drm_bridge_init(display, display->drm_dev, enc);
-	if (IS_ERR_OR_NULL(bridge)) {
-		rc = PTR_ERR(bridge);
-		pr_err("[%s] brige init failed, %d\n", display->name, rc);
-		goto error;
-	}
-
-	display->bridge = bridge;
-	priv->bridges[priv->num_bridges++] = &bridge->base;
-
-error:
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-int dsi_display_drm_bridge_deinit(struct dsi_display *display)
-{
-	int rc = 0;
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	dsi_drm_bridge_cleanup(display->bridge);
-	display->bridge = NULL;
-
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-/* Hook functions to call external connector, pointer validation is
- * done in dsi_display_drm_ext_bridge_init.
- */
-static enum drm_connector_status dsi_display_drm_ext_detect(
-		struct drm_connector *connector,
-		bool force,
-		void *disp)
-{
-	struct dsi_display *display = disp;
-
-	return display->ext_conn->funcs->detect(display->ext_conn, force);
-}
-
-static int dsi_display_drm_ext_get_modes(
-		struct drm_connector *connector, void *disp)
-{
-	struct dsi_display *display = disp;
-	struct drm_display_mode *pmode, *pt;
-	int count;
-
-	/* if there are modes defined in panel, ignore external modes */
-	if (display->panel->num_timing_nodes)
-		return dsi_connector_get_modes(connector, disp);
-
-	count = display->ext_conn->helper_private->get_modes(
-			display->ext_conn);
-
-	list_for_each_entry_safe(pmode, pt,
-			&display->ext_conn->probed_modes, head) {
-		list_move_tail(&pmode->head, &connector->probed_modes);
-	}
-
-	connector->display_info = display->ext_conn->display_info;
-
-	return count;
-}
-
-static enum drm_mode_status dsi_display_drm_ext_mode_valid(
-		struct drm_connector *connector,
-		struct drm_display_mode *mode,
-		void *disp)
-{
-	struct dsi_display *display = disp;
-	enum drm_mode_status status;
-
-	/* always do internal mode_valid check */
-	status = dsi_conn_mode_valid(connector, mode, disp);
-	if (status != MODE_OK)
-		return status;
-
-	return display->ext_conn->helper_private->mode_valid(
-			display->ext_conn, mode);
-}
-
-static int dsi_display_drm_ext_atomic_check(struct drm_connector *connector,
-		void *disp,
-		struct drm_connector_state *c_state)
-{
-	struct dsi_display *display = disp;
-
-	return display->ext_conn->helper_private->atomic_check(
-			display->ext_conn, c_state);
-}
-
-static int dsi_display_ext_get_info(struct drm_connector *connector,
-	struct msm_display_info *info, void *disp)
-{
-	struct dsi_display *display;
-	int i;
-
-	if (!info || !disp) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	display = disp;
-	if (!display->panel) {
-		pr_err("invalid display panel\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	memset(info, 0, sizeof(struct msm_display_info));
-
-	info->intf_type = DRM_MODE_CONNECTOR_DSI;
-	info->num_of_h_tiles = display->ctrl_count;
-	for (i = 0; i < info->num_of_h_tiles; i++)
-		info->h_tile_instance[i] = display->ctrl[i].ctrl->cell_index;
-
-	info->is_connected = connector->status != connector_status_disconnected;
-
-	if (!strcmp(display->display_type, "primary"))
-		info->is_primary = true;
-	else
-		info->is_primary = false;
-
-	info->capabilities |= (MSM_DISPLAY_CAP_VID_MODE |
-		MSM_DISPLAY_CAP_EDID | MSM_DISPLAY_CAP_HOT_PLUG);
-
-	mutex_unlock(&display->display_lock);
-	return 0;
-}
-
-static int dsi_display_ext_get_mode_info(struct drm_connector *connector,
-	const struct drm_display_mode *drm_mode,
-	struct msm_mode_info *mode_info,
-	u32 max_mixer_width, void *display)
-{
-	struct msm_display_topology *topology;
-
-	if (!drm_mode || !mode_info)
-		return -EINVAL;
-
-	memset(mode_info, 0, sizeof(*mode_info));
-	mode_info->frame_rate = drm_mode->vrefresh;
-	mode_info->vtotal = drm_mode->vtotal;
-
-	topology = &mode_info->topology;
-	topology->num_lm = (max_mixer_width <= drm_mode->hdisplay) ? 2 : 1;
-	topology->num_enc = 0;
-	topology->num_intf = topology->num_lm;
-
-	mode_info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
-
-	return 0;
-}
-
-static struct dsi_display_ext_bridge *dsi_display_ext_get_bridge(
-		struct drm_bridge *bridge)
-{
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	struct list_head *connector_list;
-	struct drm_connector *conn_iter;
-	struct sde_connector *sde_conn;
-	struct dsi_display *display;
-	int i;
-
-	if (!bridge || !bridge->encoder) {
-		SDE_ERROR("invalid argument\n");
-		return NULL;
-	}
-
-	priv = bridge->dev->dev_private;
-	sde_kms = to_sde_kms(priv->kms);
-	connector_list = &sde_kms->dev->mode_config.connector_list;
-
-	list_for_each_entry(conn_iter, connector_list, head) {
-		sde_conn = to_sde_connector(conn_iter);
-		if (sde_conn->encoder == bridge->encoder) {
-			display = sde_conn->display;
-			for (i = 0; i < display->ctrl_count; i++) {
-				if (display->ext_bridge[i].bridge == bridge)
-					return &display->ext_bridge[i];
-			}
-		}
-	}
-
-	return NULL;
-}
-
-static void dsi_display_drm_ext_adjust_timing(
-		const struct dsi_display *display,
-		struct drm_display_mode *mode)
-{
-	mode->hdisplay /= display->ctrl_count;
-	mode->hsync_start /= display->ctrl_count;
-	mode->hsync_end /= display->ctrl_count;
-	mode->htotal /= display->ctrl_count;
-	mode->hskew /= display->ctrl_count;
-	mode->clock /= display->ctrl_count;
-}
-
-static enum drm_mode_status dsi_display_drm_ext_bridge_mode_valid(
-		struct drm_bridge *bridge,
-		const struct drm_display_mode *mode)
-{
-	struct dsi_display_ext_bridge *ext_bridge;
-	struct drm_display_mode tmp;
-
-	ext_bridge = dsi_display_ext_get_bridge(bridge);
-	if (!ext_bridge)
-		return MODE_ERROR;
-
-	tmp = *mode;
-	dsi_display_drm_ext_adjust_timing(ext_bridge->display, &tmp);
-	return ext_bridge->orig_funcs->mode_valid(bridge, &tmp);
-}
-
-static bool dsi_display_drm_ext_bridge_mode_fixup(
-		struct drm_bridge *bridge,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	struct dsi_display_ext_bridge *ext_bridge;
-	struct drm_display_mode tmp;
-
-	ext_bridge = dsi_display_ext_get_bridge(bridge);
-	if (!ext_bridge)
-		return false;
-
-	tmp = *mode;
-	dsi_display_drm_ext_adjust_timing(ext_bridge->display, &tmp);
-	return ext_bridge->orig_funcs->mode_fixup(bridge, &tmp, &tmp);
-}
-
-static void dsi_display_drm_ext_bridge_mode_set(
-		struct drm_bridge *bridge,
-		struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	struct dsi_display_ext_bridge *ext_bridge;
-	struct drm_display_mode tmp;
-
-	ext_bridge = dsi_display_ext_get_bridge(bridge);
-	if (!ext_bridge)
-		return;
-
-	tmp = *mode;
-	dsi_display_drm_ext_adjust_timing(ext_bridge->display, &tmp);
-	ext_bridge->orig_funcs->mode_set(bridge, &tmp, &tmp);
-}
-
-static int dsi_host_ext_attach(struct mipi_dsi_host *host,
-			   struct mipi_dsi_device *dsi)
-{
-	struct dsi_display *display = to_dsi_display(host);
-	struct dsi_panel *panel;
-
-	if (!host || !dsi || !display->panel) {
-		pr_err("Invalid param\n");
-		return -EINVAL;
-	}
-
-	pr_debug("DSI[%s]: channel=%d, lanes=%d, format=%d, mode_flags=%lx\n",
-		dsi->name, dsi->channel, dsi->lanes,
-		dsi->format, dsi->mode_flags);
-
-	panel = display->panel;
-	panel->host_config.data_lanes = 0;
-	if (dsi->lanes > 0)
-		panel->host_config.data_lanes |= DSI_DATA_LANE_0;
-	if (dsi->lanes > 1)
-		panel->host_config.data_lanes |= DSI_DATA_LANE_1;
-	if (dsi->lanes > 2)
-		panel->host_config.data_lanes |= DSI_DATA_LANE_2;
-	if (dsi->lanes > 3)
-		panel->host_config.data_lanes |= DSI_DATA_LANE_3;
-
-	switch (dsi->format) {
-	case MIPI_DSI_FMT_RGB888:
-		panel->host_config.dst_format = DSI_PIXEL_FORMAT_RGB888;
-		break;
-	case MIPI_DSI_FMT_RGB666:
-		panel->host_config.dst_format = DSI_PIXEL_FORMAT_RGB666_LOOSE;
-		break;
-	case MIPI_DSI_FMT_RGB666_PACKED:
-		panel->host_config.dst_format = DSI_PIXEL_FORMAT_RGB666;
-		break;
-	case MIPI_DSI_FMT_RGB565:
-	default:
-		panel->host_config.dst_format = DSI_PIXEL_FORMAT_RGB565;
-		break;
-	}
-
-	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
-		panel->panel_mode = DSI_OP_VIDEO_MODE;
-
-		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
-			panel->video_config.traffic_mode =
-					DSI_VIDEO_TRAFFIC_BURST_MODE;
-		else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
-			panel->video_config.traffic_mode =
-					DSI_VIDEO_TRAFFIC_SYNC_PULSES;
-		else
-			panel->video_config.traffic_mode =
-					DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS;
-
-		panel->video_config.hsa_lp11_en =
-			dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSA;
-		panel->video_config.hbp_lp11_en =
-			dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HBP;
-		panel->video_config.hfp_lp11_en =
-			dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HFP;
-		panel->video_config.pulse_mode_hsa_he =
-			dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE;
-		panel->video_config.bllp_lp11_en =
-			dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BLLP;
-		panel->video_config.eof_bllp_lp11_en =
-			dsi->mode_flags & MIPI_DSI_MODE_VIDEO_EOF_BLLP;
-	} else {
-		panel->panel_mode = DSI_OP_CMD_MODE;
-		pr_err("command mode not supported by ext bridge\n");
-		return -ENOTSUPP;
-	}
-
-	panel->bl_config.type = DSI_BACKLIGHT_UNKNOWN;
-
-	return 0;
-}
-
-static struct mipi_dsi_host_ops dsi_host_ext_ops = {
-	.attach = dsi_host_ext_attach,
-	.detach = dsi_host_detach,
-	.transfer = dsi_host_transfer,
-};
-
-int dsi_display_drm_ext_bridge_init(struct dsi_display *display,
-		struct drm_encoder *encoder, struct drm_connector *connector)
-{
-	struct drm_device *drm = encoder->dev;
-	struct drm_bridge *bridge = encoder->bridge;
-	struct drm_bridge *ext_bridge;
-	struct drm_connector *ext_conn;
-	struct sde_connector *sde_conn = to_sde_connector(connector);
-	struct drm_bridge *prev_bridge = bridge;
-	int rc = 0, i;
-
-	for (i = 0; i < display->ext_bridge_cnt; i++) {
-		struct dsi_display_ext_bridge *ext_bridge_info =
-				&display->ext_bridge[i];
-
-		/* return if ext bridge is already initialized */
-		if (ext_bridge_info->bridge)
-			return 0;
-
-		ext_bridge = of_drm_find_bridge(ext_bridge_info->node_of);
-		if (IS_ERR_OR_NULL(ext_bridge)) {
-			rc = PTR_ERR(ext_bridge);
-			pr_err("failed to find ext bridge\n");
-			goto error;
-		}
-
-		/* override functions for mode adjustment */
-		if (display->ext_bridge_cnt > 1) {
-			ext_bridge_info->bridge_funcs = *ext_bridge->funcs;
-			if (ext_bridge->funcs->mode_fixup)
-				ext_bridge_info->bridge_funcs.mode_fixup =
-					dsi_display_drm_ext_bridge_mode_fixup;
-			if (ext_bridge->funcs->mode_valid)
-				ext_bridge_info->bridge_funcs.mode_valid =
-					dsi_display_drm_ext_bridge_mode_valid;
-			if (ext_bridge->funcs->mode_set)
-				ext_bridge_info->bridge_funcs.mode_set =
-					dsi_display_drm_ext_bridge_mode_set;
-			ext_bridge_info->orig_funcs = ext_bridge->funcs;
-			ext_bridge->funcs = &ext_bridge_info->bridge_funcs;
-		}
-
-		rc = drm_bridge_attach(encoder, ext_bridge, prev_bridge);
-		if (rc) {
-			pr_err("[%s] ext brige attach failed, %d\n",
-				display->name, rc);
-			goto error;
-		}
-
-		ext_bridge_info->display = display;
-		ext_bridge_info->bridge = ext_bridge;
-		prev_bridge = ext_bridge;
-
-		/* ext bridge will init its own connector during attach,
-		 * we need to extract it out of the connector list
-		 */
-		spin_lock_irq(&drm->mode_config.connector_list_lock);
-		ext_conn = list_last_entry(&drm->mode_config.connector_list,
-			struct drm_connector, head);
-		if (ext_conn && ext_conn != connector &&
-			ext_conn->encoder_ids[0] == bridge->encoder->base.id) {
-			list_del_init(&ext_conn->head);
-			display->ext_conn = ext_conn;
-		}
-		spin_unlock_irq(&drm->mode_config.connector_list_lock);
-
-		/* if there is no valid external connector created, or in split
-		 * mode, default setting is used from panel defined in DT file.
-		 */
-		if (!display->ext_conn ||
-		    !display->ext_conn->funcs ||
-		    !display->ext_conn->helper_private ||
-		    display->ext_bridge_cnt > 1) {
-			display->ext_conn = NULL;
-			continue;
-		}
-
-		/* otherwise, hook up the functions to use external connector */
-		if (display->ext_conn->funcs->detect)
-			sde_conn->ops.detect = dsi_display_drm_ext_detect;
-
-		if (display->ext_conn->helper_private->get_modes)
-			sde_conn->ops.get_modes =
-				dsi_display_drm_ext_get_modes;
-
-		if (display->ext_conn->helper_private->mode_valid)
-			sde_conn->ops.mode_valid =
-				dsi_display_drm_ext_mode_valid;
-
-		if (display->ext_conn->helper_private->atomic_check)
-			sde_conn->ops.atomic_check =
-				dsi_display_drm_ext_atomic_check;
-
-		sde_conn->ops.get_info =
-				dsi_display_ext_get_info;
-		sde_conn->ops.get_mode_info =
-				dsi_display_ext_get_mode_info;
-
-		/* add support to attach/detach */
-		display->host.ops = &dsi_host_ext_ops;
-	}
-
-	return 0;
-error:
-	return rc;
-}
-
-int dsi_display_get_info(struct drm_connector *connector,
-		struct msm_display_info *info, void *disp)
-{
-	struct dsi_display *display;
-	struct dsi_panel_phy_props phy_props;
-	int i, rc;
-
-	if (!info || !disp) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	display = disp;
-	if (!display->panel) {
-		pr_err("invalid display panel\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-	rc = dsi_panel_get_phy_props(display->panel, &phy_props);
-	if (rc) {
-		pr_err("[%s] failed to get panel phy props, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	memset(info, 0, sizeof(struct msm_display_info));
-	info->intf_type = DRM_MODE_CONNECTOR_DSI;
-	info->num_of_h_tiles = display->ctrl_count;
-	for (i = 0; i < info->num_of_h_tiles; i++)
-		info->h_tile_instance[i] = display->ctrl[i].ctrl->cell_index;
-
-	info->is_connected = true;
-	info->is_primary = false;
-
-	if (!strcmp(display->display_type, "primary"))
-		info->is_primary = true;
-
-	info->width_mm = phy_props.panel_width_mm;
-	info->height_mm = phy_props.panel_height_mm;
-	info->max_width = 1920;
-	info->max_height = 1080;
-	info->qsync_min_fps =
-		display->panel->qsync_min_fps;
-
-	switch (display->panel->panel_mode) {
-	case DSI_OP_VIDEO_MODE:
-		info->capabilities |= MSM_DISPLAY_CAP_VID_MODE;
-		break;
-	case DSI_OP_CMD_MODE:
-		info->capabilities |= MSM_DISPLAY_CAP_CMD_MODE;
-		info->is_te_using_watchdog_timer =
-			display->panel->te_using_watchdog_timer |
-			display->sw_te_using_wd;
-		break;
-	default:
-		pr_err("unknwown dsi panel mode %d\n",
-				display->panel->panel_mode);
-		break;
-	}
-
-	if (display->panel->esd_config.esd_enabled)
-		info->capabilities |= MSM_DISPLAY_ESD_ENABLED;
-
-	info->te_source = display->te_source;
-
-error:
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-static int dsi_display_get_mode_count_no_lock(struct dsi_display *display,
-			u32 *count)
-{
-	struct dsi_dfps_capabilities dfps_caps;
-	int num_dfps_rates, rc = 0;
-
-	if (!display || !display->panel) {
-		pr_err("invalid display:%d panel:%d\n", display != NULL,
-				display ? display->panel != NULL : 0);
-		return -EINVAL;
-	}
-
-	*count = display->panel->num_timing_nodes;
-
-	rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
-	if (rc) {
-		pr_err("[%s] failed to get dfps caps from panel\n",
-				display->name);
-		return rc;
-	}
-
-	num_dfps_rates = !dfps_caps.dfps_support ? 1 :
-			dfps_caps.max_refresh_rate -
-			dfps_caps.min_refresh_rate + 1;
-
-	/* Inflate num_of_modes by fps in dfps */
-	*count = display->panel->num_timing_nodes * num_dfps_rates;
-
-	return 0;
-}
-
-int dsi_display_get_mode_count(struct dsi_display *display,
-			u32 *count)
-{
-	int rc;
-
-	if (!display || !display->panel) {
-		pr_err("invalid display:%d panel:%d\n", display != NULL,
-				display ? display->panel != NULL : 0);
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-	rc = dsi_display_get_mode_count_no_lock(display, count);
-	mutex_unlock(&display->display_lock);
-
-	return 0;
-}
-
-void dsi_display_put_mode(struct dsi_display *display,
-	struct dsi_display_mode *mode)
-{
-	dsi_panel_put_mode(mode);
-}
-
-int dsi_display_get_modes(struct dsi_display *display,
-			  struct dsi_display_mode **out_modes)
-{
-	struct dsi_dfps_capabilities dfps_caps;
-	u32 num_dfps_rates, panel_mode_count, total_mode_count;
-	u32 mode_idx, array_idx = 0;
-	int i, rc = -EINVAL;
-
-	if (!display || !out_modes) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	*out_modes = NULL;
-
-	mutex_lock(&display->display_lock);
-
-	if (display->modes)
-		goto exit;
-
-	rc = dsi_display_get_mode_count_no_lock(display, &total_mode_count);
-	if (rc)
-		goto error;
-
-	display->modes = kcalloc(total_mode_count, sizeof(*display->modes),
-			GFP_KERNEL);
-	if (!display->modes) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
-	if (rc) {
-		pr_err("[%s] failed to get dfps caps from panel\n",
-				display->name);
-		goto error;
-	}
-
-	num_dfps_rates = !dfps_caps.dfps_support ? 1 :
-			dfps_caps.max_refresh_rate -
-			dfps_caps.min_refresh_rate + 1;
-
-	panel_mode_count = display->panel->num_timing_nodes;
-
-	for (mode_idx = 0; mode_idx < panel_mode_count; mode_idx++) {
-		struct dsi_display_mode panel_mode;
-		int topology_override = NO_OVERRIDE;
-
-		if (display->cmdline_timing == mode_idx)
-			topology_override = display->cmdline_topology;
-
-		memset(&panel_mode, 0, sizeof(panel_mode));
-
-		rc = dsi_panel_get_mode(display->panel, mode_idx,
-						&panel_mode, topology_override);
-		if (rc) {
-			pr_err("[%s] failed to get mode idx %d from panel\n",
-				   display->name, mode_idx);
-			goto error;
-		}
-
-		if (display->ctrl_count > 1) { /* TODO: remove if */
-			panel_mode.timing.h_active *= display->ctrl_count;
-			panel_mode.timing.h_front_porch *= display->ctrl_count;
-			panel_mode.timing.h_sync_width *= display->ctrl_count;
-			panel_mode.timing.h_back_porch *= display->ctrl_count;
-			panel_mode.timing.h_skew *= display->ctrl_count;
-			panel_mode.pixel_clk_khz *= display->ctrl_count;
-		}
-
-		for (i = 0; i < num_dfps_rates; i++) {
-			struct dsi_display_mode *sub_mode =
-					&display->modes[array_idx];
-			u32 curr_refresh_rate;
-
-			if (!sub_mode) {
-				pr_err("invalid mode data\n");
-				rc = -EFAULT;
-				goto error;
-			}
-
-			memcpy(sub_mode, &panel_mode, sizeof(panel_mode));
-
-			if (dfps_caps.dfps_support) {
-				curr_refresh_rate =
-					sub_mode->timing.refresh_rate;
-				sub_mode->timing.refresh_rate =
-					dfps_caps.min_refresh_rate +
-					(i % num_dfps_rates);
-
-				dsi_display_get_dfps_timing(display,
-					sub_mode, curr_refresh_rate);
-
-				sub_mode->pixel_clk_khz =
-					(DSI_H_TOTAL_DSC(&sub_mode->timing) *
-					DSI_V_TOTAL(&sub_mode->timing) *
-					sub_mode->timing.refresh_rate) / 1000;
-			}
-			array_idx++;
-		}
-	}
-
-exit:
-	*out_modes = display->modes;
-	rc = 0;
-
-error:
-	if (rc)
-		kfree(display->modes);
-
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-int dsi_display_get_panel_vfp(void *dsi_display,
-	int h_active, int v_active)
-{
-	int i, rc = 0;
-	u32 count, refresh_rate = 0;
-	struct dsi_dfps_capabilities dfps_caps;
-	struct dsi_display *display = (struct dsi_display *)dsi_display;
-
-	if (!display)
-		return -EINVAL;
-
-	rc = dsi_display_get_mode_count(display, &count);
-	if (rc)
-		return rc;
-
-	mutex_lock(&display->display_lock);
-
-	if (display->panel && display->panel->cur_mode)
-		refresh_rate = display->panel->cur_mode->timing.refresh_rate;
-
-	dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
-	if (dfps_caps.dfps_support)
-		refresh_rate = dfps_caps.max_refresh_rate;
-
-	if (!refresh_rate) {
-		mutex_unlock(&display->display_lock);
-		pr_err("Null Refresh Rate\n");
-		return -EINVAL;
-	}
-
-	h_active *= display->ctrl_count;
-
-	for (i = 0; i < count; i++) {
-		struct dsi_display_mode *m = &display->modes[i];
-
-		if (m && v_active == m->timing.v_active &&
-			h_active == m->timing.h_active &&
-			refresh_rate == m->timing.refresh_rate) {
-			rc = m->timing.v_front_porch;
-			break;
-		}
-	}
-	mutex_unlock(&display->display_lock);
-
-	return rc;
-}
-
-int dsi_display_get_default_lms(void *dsi_display, u32 *num_lm)
-{
-	struct dsi_display *display = (struct dsi_display *)dsi_display;
-	u32 count, i;
-	int rc = 0;
-
-	*num_lm = 0;
-
-	rc = dsi_display_get_mode_count(display, &count);
-	if (rc)
-		return rc;
-
-	if (!display->modes) {
-		struct dsi_display_mode *m;
-
-		rc = dsi_display_get_modes(display, &m);
-		if (rc)
-			return rc;
-	}
-
-	mutex_lock(&display->display_lock);
-	for (i = 0; i < count; i++) {
-		struct dsi_display_mode *m = &display->modes[i];
-
-		*num_lm = max(m->priv_info->topology.num_lm, *num_lm);
-	}
-	mutex_unlock(&display->display_lock);
-
-	return rc;
-}
-
-int dsi_display_find_mode(struct dsi_display *display,
-		const struct dsi_display_mode *cmp,
-		struct dsi_display_mode **out_mode)
-{
-	u32 count, i;
-	int rc;
-
-	if (!display || !out_mode)
-		return -EINVAL;
-
-	*out_mode = NULL;
-
-	rc = dsi_display_get_mode_count(display, &count);
-	if (rc)
-		return rc;
-
-	if (!display->modes) {
-		struct dsi_display_mode *m;
-
-		rc = dsi_display_get_modes(display, &m);
-		if (rc)
-			return rc;
-	}
-
-	mutex_lock(&display->display_lock);
-	for (i = 0; i < count; i++) {
-		struct dsi_display_mode *m = &display->modes[i];
-
-		if (cmp->timing.v_active == m->timing.v_active &&
-			cmp->timing.h_active == m->timing.h_active &&
-			cmp->timing.refresh_rate == m->timing.refresh_rate) {
-			*out_mode = m;
-			rc = 0;
-			break;
-		}
-	}
-	mutex_unlock(&display->display_lock);
-
-	if (!*out_mode) {
-		pr_err("[%s] failed to find mode for v_active %u h_active %u rate %u\n",
-				display->name, cmp->timing.v_active,
-				cmp->timing.h_active, cmp->timing.refresh_rate);
-		rc = -ENOENT;
-	}
-
-	return rc;
-}
-
-/**
- * dsi_display_validate_mode_vrr() - Validate if varaible refresh case.
- * @display:     DSI display handle.
- * @cur_dsi_mode:   Current DSI mode.
- * @mode:        Mode value structure to be validated.
- *               MSM_MODE_FLAG_SEAMLESS_VRR flag is set if there
- *               is change in fps but vactive and hactive are same.
- * Return: error code.
- */
-int dsi_display_validate_mode_vrr(struct dsi_display *display,
-			struct dsi_display_mode *cur_dsi_mode,
-			struct dsi_display_mode *mode)
-{
-	int rc = 0;
-	struct dsi_display_mode adj_mode, cur_mode;
-	struct dsi_dfps_capabilities dfps_caps;
-	u32 curr_refresh_rate;
-
-	if (!display || !mode) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (!display->panel || !display->panel->cur_mode) {
-		pr_debug("Current panel mode not set\n");
-		return rc;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	adj_mode = *mode;
-	cur_mode = *cur_dsi_mode;
-
-	if ((cur_mode.timing.refresh_rate != adj_mode.timing.refresh_rate) &&
-		(cur_mode.timing.v_active == adj_mode.timing.v_active) &&
-		(cur_mode.timing.h_active == adj_mode.timing.h_active)) {
-
-		curr_refresh_rate = cur_mode.timing.refresh_rate;
-		rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
-		if (rc) {
-			pr_err("[%s] failed to get dfps caps from panel\n",
-					display->name);
-			goto error;
-		}
-
-		cur_mode.timing.refresh_rate =
-			adj_mode.timing.refresh_rate;
-
-		rc = dsi_display_get_dfps_timing(display,
-			&cur_mode, curr_refresh_rate);
-		if (rc) {
-			pr_err("[%s] seamless vrr not possible rc=%d\n",
-			display->name, rc);
-			goto error;
-		}
-		switch (dfps_caps.type) {
-		/*
-		 * Ignore any round off factors in porch calculation.
-		 * Worse case is set to 5.
-		 */
-		case DSI_DFPS_IMMEDIATE_VFP:
-			if (abs(DSI_V_TOTAL(&cur_mode.timing) -
-				DSI_V_TOTAL(&adj_mode.timing)) > 5)
-				pr_err("Mismatch vfp fps:%d new:%d given:%d\n",
-				adj_mode.timing.refresh_rate,
-				cur_mode.timing.v_front_porch,
-				adj_mode.timing.v_front_porch);
-			break;
-
-		case DSI_DFPS_IMMEDIATE_HFP:
-			if (abs(DSI_H_TOTAL_DSC(&cur_mode.timing) -
-				DSI_H_TOTAL_DSC(&adj_mode.timing)) > 5)
-				pr_err("Mismatch hfp fps:%d new:%d given:%d\n",
-				adj_mode.timing.refresh_rate,
-				cur_mode.timing.h_front_porch,
-				adj_mode.timing.h_front_porch);
-			break;
-
-		default:
-			pr_err("Unsupported DFPS mode %d\n",
-				dfps_caps.type);
-			rc = -ENOTSUPP;
-		}
-
-		pr_debug("Mode switch is seamless variable refresh\n");
-		mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
-		SDE_EVT32(curr_refresh_rate, adj_mode.timing.refresh_rate,
-				cur_mode.timing.h_front_porch,
-				adj_mode.timing.h_front_porch);
-	}
-
-error:
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-int dsi_display_validate_mode(struct dsi_display *display,
-			      struct dsi_display_mode *mode,
-			      u32 flags)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-	struct dsi_display_mode adj_mode;
-
-	if (!display || !mode) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	adj_mode = *mode;
-	adjust_timing_by_ctrl_count(display, &adj_mode);
-
-	rc = dsi_panel_validate_mode(display->panel, &adj_mode);
-	if (rc) {
-		pr_err("[%s] panel mode validation failed, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		rc = dsi_ctrl_validate_timing(ctrl->ctrl, &adj_mode.timing);
-		if (rc) {
-			pr_err("[%s] ctrl mode validation failed, rc=%d\n",
-			       display->name, rc);
-			goto error;
-		}
-
-		rc = dsi_phy_validate_mode(ctrl->phy, &adj_mode.timing);
-		if (rc) {
-			pr_err("[%s] phy mode validation failed, rc=%d\n",
-			       display->name, rc);
-			goto error;
-		}
-	}
-
-	if ((flags & DSI_VALIDATE_FLAG_ALLOW_ADJUST) &&
-			(mode->dsi_mode_flags & DSI_MODE_FLAG_SEAMLESS)) {
-		rc = dsi_display_validate_mode_seamless(display, mode);
-		if (rc) {
-			pr_err("[%s] seamless not possible rc=%d\n",
-				display->name, rc);
-			goto error;
-		}
-	}
-
-error:
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-int dsi_display_set_mode(struct dsi_display *display,
-			 struct dsi_display_mode *mode,
-			 u32 flags)
-{
-	int rc = 0;
-	struct dsi_display_mode adj_mode;
-
-	if (!display || !mode || !display->panel) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	adj_mode = *mode;
-	adjust_timing_by_ctrl_count(display, &adj_mode);
-
-	/*For dynamic DSI setting, use specified clock rate */
-	if (display->cached_clk_rate > 0)
-		adj_mode.priv_info->clk_rate_hz = display->cached_clk_rate;
-
-	rc = dsi_display_validate_mode_set(display, &adj_mode, flags);
-	if (rc) {
-		pr_err("[%s] mode cannot be set\n", display->name);
-		goto error;
-	}
-
-	rc = dsi_display_set_mode_sub(display, &adj_mode, flags);
-	if (rc) {
-		pr_err("[%s] failed to set mode\n", display->name);
-		goto error;
-	}
-
-	if (!display->panel->cur_mode) {
-		display->panel->cur_mode =
-			kzalloc(sizeof(struct dsi_display_mode), GFP_KERNEL);
-		if (!display->panel->cur_mode) {
-			rc = -ENOMEM;
-			goto error;
-		}
-	}
-
-	memcpy(display->panel->cur_mode, &adj_mode, sizeof(adj_mode));
-error:
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-int dsi_display_set_tpg_state(struct dsi_display *display, bool enable)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		rc = dsi_ctrl_set_tpg_state(ctrl->ctrl, enable);
-		if (rc) {
-			pr_err("[%s] failed to set tpg state for host_%d\n",
-			       display->name, i);
-			goto error;
-		}
-	}
-
-	display->is_tpg_enabled = enable;
-error:
-	return rc;
-}
-
-static int dsi_display_pre_switch(struct dsi_display *display)
-{
-	int rc = 0;
-
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_CORE_CLK, DSI_CLK_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	rc = dsi_display_ctrl_update(display);
-	if (rc) {
-		pr_err("[%s] failed to update DSI controller, rc=%d\n",
-			   display->name, rc);
-		goto error_ctrl_clk_off;
-	}
-
-	rc = dsi_display_set_clk_src(display);
-	if (rc) {
-		pr_err("[%s] failed to set DSI link clock source, rc=%d\n",
-			display->name, rc);
-		goto error_ctrl_deinit;
-	}
-
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_LINK_CLK, DSI_CLK_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
-			   display->name, rc);
-		goto error_ctrl_deinit;
-	}
-
-	goto error;
-
-error_ctrl_deinit:
-	(void)dsi_display_ctrl_deinit(display);
-error_ctrl_clk_off:
-	(void)dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_CORE_CLK, DSI_CLK_OFF);
-error:
-	return rc;
-}
-
-static bool _dsi_display_validate_host_state(struct dsi_display *display)
-{
-	int i;
-	struct dsi_display_ctrl *ctrl;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl)
-			continue;
-		if (!dsi_ctrl_validate_host_state(ctrl->ctrl))
-			return false;
-	}
-
-	return true;
-}
-
-static void dsi_display_handle_fifo_underflow(struct work_struct *work)
-{
-	struct dsi_display *display = NULL;
-
-	display = container_of(work, struct dsi_display, fifo_underflow_work);
-	if (!display || !display->panel ||
-	    atomic_read(&display->panel->esd_recovery_pending)) {
-		pr_debug("Invalid recovery use case\n");
-		return;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	if (!_dsi_display_validate_host_state(display)) {
-		mutex_unlock(&display->display_lock);
-		return;
-	}
-
-	pr_debug("handle DSI FIFO underflow error\n");
-
-	dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_ALL_CLKS, DSI_CLK_ON);
-	dsi_display_soft_reset(display);
-	dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_ALL_CLKS, DSI_CLK_OFF);
-
-	mutex_unlock(&display->display_lock);
-}
-
-static void dsi_display_handle_fifo_overflow(struct work_struct *work)
-{
-	struct dsi_display *display = NULL;
-	struct dsi_display_ctrl *ctrl;
-	int i, rc;
-	int mask = BIT(20); /* clock lane */
-	int (*cb_func)(void *event_usr_ptr,
-		uint32_t event_idx, uint32_t instance_idx,
-		uint32_t data0, uint32_t data1,
-		uint32_t data2, uint32_t data3);
-	void *data;
-	u32 version = 0;
-
-	display = container_of(work, struct dsi_display, fifo_overflow_work);
-	if (!display || !display->panel ||
-	    (display->panel->panel_mode != DSI_OP_VIDEO_MODE) ||
-	    atomic_read(&display->panel->esd_recovery_pending)) {
-		pr_debug("Invalid recovery use case\n");
-		return;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	if (!_dsi_display_validate_host_state(display)) {
-		mutex_unlock(&display->display_lock);
-		return;
-	}
-
-	pr_debug("handle DSI FIFO overflow error\n");
-	dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_ALL_CLKS, DSI_CLK_ON);
-
-	/*
-	 * below recovery sequence is not applicable to
-	 * hw version 2.0.0, 2.1.0 and 2.2.0, so return early.
-	 */
-	ctrl = &display->ctrl[display->clk_master_idx];
-	version = dsi_ctrl_get_hw_version(ctrl->ctrl);
-	if (!version || (version < 0x20020001))
-		goto end;
-
-	/* reset ctrl and lanes */
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		rc = dsi_ctrl_reset(ctrl->ctrl, mask);
-		rc = dsi_phy_lane_reset(ctrl->phy);
-	}
-
-	/* wait for display line count to be in active area */
-	ctrl = &display->ctrl[display->clk_master_idx];
-	if (ctrl->ctrl->recovery_cb.event_cb) {
-		cb_func = ctrl->ctrl->recovery_cb.event_cb;
-		data = ctrl->ctrl->recovery_cb.event_usr_ptr;
-		rc = cb_func(data, SDE_CONN_EVENT_VID_FIFO_OVERFLOW,
-				display->clk_master_idx, 0, 0, 0, 0);
-		if (rc < 0) {
-			pr_debug("sde callback failed\n");
-			goto end;
-		}
-	}
-
-	/* Enable Video mode for DSI controller */
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		dsi_ctrl_vid_engine_en(ctrl->ctrl, true);
-	}
-	/*
-	 * Add sufficient delay to make sure
-	 * pixel transmission has started
-	 */
-	udelay(200);
-end:
-	dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_ALL_CLKS, DSI_CLK_OFF);
-	mutex_unlock(&display->display_lock);
-}
-
-static void dsi_display_handle_lp_rx_timeout(struct work_struct *work)
-{
-	struct dsi_display *display = NULL;
-	struct dsi_display_ctrl *ctrl;
-	int i, rc;
-	int mask = (BIT(20) | (0xF << 16)); /* clock lane and 4 data lane */
-	int (*cb_func)(void *event_usr_ptr,
-		uint32_t event_idx, uint32_t instance_idx,
-		uint32_t data0, uint32_t data1,
-		uint32_t data2, uint32_t data3);
-	void *data;
-	u32 version = 0;
-
-	display = container_of(work, struct dsi_display, lp_rx_timeout_work);
-	if (!display || !display->panel ||
-	    (display->panel->panel_mode != DSI_OP_VIDEO_MODE) ||
-	    atomic_read(&display->panel->esd_recovery_pending)) {
-		pr_debug("Invalid recovery use case\n");
-		return;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	if (!_dsi_display_validate_host_state(display)) {
-		mutex_unlock(&display->display_lock);
-		return;
-	}
-
-	pr_debug("handle DSI LP RX Timeout error\n");
-
-	dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_ALL_CLKS, DSI_CLK_ON);
-
-	/*
-	 * below recovery sequence is not applicable to
-	 * hw version 2.0.0, 2.1.0 and 2.2.0, so return early.
-	 */
-	ctrl = &display->ctrl[display->clk_master_idx];
-	version = dsi_ctrl_get_hw_version(ctrl->ctrl);
-	if (!version || (version < 0x20020001))
-		goto end;
-
-	/* reset ctrl and lanes */
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		rc = dsi_ctrl_reset(ctrl->ctrl, mask);
-		rc = dsi_phy_lane_reset(ctrl->phy);
-	}
-
-	ctrl = &display->ctrl[display->clk_master_idx];
-	if (ctrl->ctrl->recovery_cb.event_cb) {
-		cb_func = ctrl->ctrl->recovery_cb.event_cb;
-		data = ctrl->ctrl->recovery_cb.event_usr_ptr;
-		rc = cb_func(data, SDE_CONN_EVENT_VID_FIFO_OVERFLOW,
-				display->clk_master_idx, 0, 0, 0, 0);
-		if (rc < 0) {
-			pr_debug("Target is in suspend/shutdown\n");
-			goto end;
-		}
-	}
-
-	/* Enable Video mode for DSI controller */
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		dsi_ctrl_vid_engine_en(ctrl->ctrl, true);
-	}
-
-	/*
-	 * Add sufficient delay to make sure
-	 * pixel transmission as started
-	 */
-	udelay(200);
-end:
-	dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_ALL_CLKS, DSI_CLK_OFF);
-	mutex_unlock(&display->display_lock);
-}
-
-static int dsi_display_cb_error_handler(void *data,
-		uint32_t event_idx, uint32_t instance_idx,
-		uint32_t data0, uint32_t data1,
-		uint32_t data2, uint32_t data3)
-{
-	struct dsi_display *display =  data;
-
-	if (!display || !(display->err_workq))
-		return -EINVAL;
-
-	switch (event_idx) {
-	case DSI_FIFO_UNDERFLOW:
-		queue_work(display->err_workq, &display->fifo_underflow_work);
-		break;
-	case DSI_FIFO_OVERFLOW:
-		queue_work(display->err_workq, &display->fifo_overflow_work);
-		break;
-	case DSI_LP_Rx_TIMEOUT:
-		queue_work(display->err_workq, &display->lp_rx_timeout_work);
-		break;
-	default:
-		pr_warn("unhandled error interrupt: %d\n", event_idx);
-		break;
-	}
-
-	return 0;
-}
-
-static void dsi_display_register_error_handler(struct dsi_display *display)
-{
-	int i = 0;
-	struct dsi_display_ctrl *ctrl;
-	struct dsi_event_cb_info event_info;
-
-	if (!display)
-		return;
-
-	display->err_workq = create_singlethread_workqueue("dsi_err_workq");
-	if (!display->err_workq) {
-		pr_err("failed to create dsi workq!\n");
-		return;
-	}
-
-	INIT_WORK(&display->fifo_underflow_work,
-				dsi_display_handle_fifo_underflow);
-	INIT_WORK(&display->fifo_overflow_work,
-				dsi_display_handle_fifo_overflow);
-	INIT_WORK(&display->lp_rx_timeout_work,
-				dsi_display_handle_lp_rx_timeout);
-
-	memset(&event_info, 0, sizeof(event_info));
-
-	event_info.event_cb = dsi_display_cb_error_handler;
-	event_info.event_usr_ptr = display;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		ctrl->ctrl->irq_info.irq_err_cb = event_info;
-	}
-}
-
-static void dsi_display_unregister_error_handler(struct dsi_display *display)
-{
-	int i = 0;
-	struct dsi_display_ctrl *ctrl;
-
-	if (!display)
-		return;
-
-	display_for_each_ctrl(i, display) {
-		ctrl = &display->ctrl[i];
-		memset(&ctrl->ctrl->irq_info.irq_err_cb,
-		       0, sizeof(struct dsi_event_cb_info));
-	}
-
-	if (display->err_workq) {
-		destroy_workqueue(display->err_workq);
-		display->err_workq = NULL;
-	}
-}
-
-int dsi_display_prepare(struct dsi_display *display)
-{
-	int rc = 0;
-	struct dsi_display_mode *mode;
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (!display->panel->cur_mode) {
-		pr_err("no valid mode set for the display\n");
-		return -EINVAL;
-	}
-
-	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
-	mutex_lock(&display->display_lock);
-
-	mode = display->panel->cur_mode;
-
-	dsi_display_set_ctrl_esd_check_flag(display, false);
-
-	/* Set up ctrl isr before enabling core clk */
-	dsi_display_ctrl_isr_configure(display, true);
-
-	if (mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) {
-		if (display->is_cont_splash_enabled) {
-			pr_err("DMS is not supposed to be set on first frame\n");
-			return -EINVAL;
-		}
-		/* update dsi ctrl for new mode */
-		rc = dsi_display_pre_switch(display);
-		if (rc)
-			pr_err("[%s] panel pre-prepare-res-switch failed, rc=%d\n",
-					display->name, rc);
-		goto error;
-	}
-
-	if (!display->is_cont_splash_enabled) {
-		/*
-		 * For continuous splash usecase we skip panel
-		 * pre prepare since the regulator vote is already
-		 * taken care in splash resource init
-		 */
-		rc = dsi_panel_pre_prepare(display->panel);
-		if (rc) {
-			pr_err("[%s] panel pre-prepare failed, rc=%d\n",
-					display->name, rc);
-			goto error;
-		}
-	}
-
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_CORE_CLK, DSI_CLK_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
-		       display->name, rc);
-		goto error_panel_post_unprep;
-	}
-
-	/*
-	 * If ULPS during suspend feature is enabled, then DSI PHY was
-	 * left on during suspend. In this case, we do not need to reset/init
-	 * PHY. This would have already been done when the CORE clocks are
-	 * turned on. However, if cont splash is disabled, the first time DSI
-	 * is powered on, phy init needs to be done unconditionally.
-	 */
-	if (!display->panel->ulps_suspend_enabled || !display->ulps_enabled) {
-		rc = dsi_display_phy_sw_reset(display);
-		if (rc) {
-			pr_err("[%s] failed to reset phy, rc=%d\n",
-				display->name, rc);
-			goto error_ctrl_clk_off;
-		}
-
-		rc = dsi_display_phy_enable(display);
-		if (rc) {
-			pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
-			       display->name, rc);
-			goto error_ctrl_clk_off;
-		}
-	}
-
-	rc = dsi_display_set_clk_src(display);
-	if (rc) {
-		pr_err("[%s] failed to set DSI link clock source, rc=%d\n",
-			display->name, rc);
-		goto error_phy_disable;
-	}
-
-	rc = dsi_display_ctrl_init(display);
-	if (rc) {
-		pr_err("[%s] failed to setup DSI controller, rc=%d\n",
-		       display->name, rc);
-		goto error_phy_disable;
-	}
-	/* Set up DSI ERROR event callback */
-	dsi_display_register_error_handler(display);
-
-	rc = dsi_display_ctrl_host_enable(display);
-	if (rc) {
-		pr_err("[%s] failed to enable DSI host, rc=%d\n",
-		       display->name, rc);
-		goto error_ctrl_deinit;
-	}
-
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_LINK_CLK, DSI_CLK_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
-		       display->name, rc);
-		goto error_host_engine_off;
-	}
-
-	if (!display->is_cont_splash_enabled) {
-		/*
-		 * For continuous splash usecase, skip panel prepare and
-		 * ctl reset since the pnael and ctrl is already in active
-		 * state and panel on commands are not needed
-		 */
-		rc = dsi_display_soft_reset(display);
-		if (rc) {
-			pr_err("[%s] failed soft reset, rc=%d\n",
-					display->name, rc);
-			goto error_ctrl_link_off;
-		}
-
-		rc = dsi_panel_prepare(display->panel);
-		if (rc) {
-			pr_err("[%s] panel prepare failed, rc=%d\n",
-					display->name, rc);
-			goto error_ctrl_link_off;
-		}
-	}
-	goto error;
-
-error_ctrl_link_off:
-	(void)dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_LINK_CLK, DSI_CLK_OFF);
-error_host_engine_off:
-	(void)dsi_display_ctrl_host_disable(display);
-error_ctrl_deinit:
-	(void)dsi_display_ctrl_deinit(display);
-error_phy_disable:
-	(void)dsi_display_phy_disable(display);
-error_ctrl_clk_off:
-	(void)dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_CORE_CLK, DSI_CLK_OFF);
-error_panel_post_unprep:
-	(void)dsi_panel_post_unprepare(display->panel);
-error:
-	mutex_unlock(&display->display_lock);
-	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
-	return rc;
-}
-
-static int dsi_display_calc_ctrl_roi(const struct dsi_display *display,
-		const struct dsi_display_ctrl *ctrl,
-		const struct msm_roi_list *req_rois,
-		struct dsi_rect *out_roi)
-{
-	const struct dsi_rect *bounds = &ctrl->ctrl->mode_bounds;
-	struct dsi_display_mode *cur_mode;
-	struct msm_roi_caps *roi_caps;
-	struct dsi_rect req_roi = { 0 };
-	int rc = 0;
-
-	cur_mode = display->panel->cur_mode;
-	if (!cur_mode)
-		return 0;
-
-	roi_caps = &cur_mode->priv_info->roi_caps;
-	if (req_rois->num_rects > roi_caps->num_roi) {
-		pr_err("request for %d rois greater than max %d\n",
-				req_rois->num_rects,
-				roi_caps->num_roi);
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	/**
-	 * if no rois, user wants to reset back to full resolution
-	 * note: h_active is already divided by ctrl_count
-	 */
-	if (!req_rois->num_rects) {
-		*out_roi = *bounds;
-		goto exit;
-	}
-
-	/* intersect with the bounds */
-	req_roi.x = req_rois->roi[0].x1;
-	req_roi.y = req_rois->roi[0].y1;
-	req_roi.w = req_rois->roi[0].x2 - req_rois->roi[0].x1;
-	req_roi.h = req_rois->roi[0].y2 - req_rois->roi[0].y1;
-	dsi_rect_intersect(&req_roi, bounds, out_roi);
-
-exit:
-	/* adjust the ctrl origin to be top left within the ctrl */
-	out_roi->x = out_roi->x - bounds->x;
-
-	pr_debug("ctrl%d:%d: req (%d,%d,%d,%d) bnd (%d,%d,%d,%d) out (%d,%d,%d,%d)\n",
-			ctrl->dsi_ctrl_idx, ctrl->ctrl->cell_index,
-			req_roi.x, req_roi.y, req_roi.w, req_roi.h,
-			bounds->x, bounds->y, bounds->w, bounds->h,
-			out_roi->x, out_roi->y, out_roi->w, out_roi->h);
-
-	return rc;
-}
-
-static int dsi_display_qsync(struct dsi_display *display, bool enable)
-{
-	int i;
-	int rc = 0;
-
-	if (!display->panel->qsync_min_fps) {
-		pr_err("%s:ERROR: qsync set, but no fps\n", __func__);
-		return 0;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	for (i = 0; i < display->ctrl_count; i++) {
-
-		if (enable) {
-			/* send the commands to enable qsync */
-			rc = dsi_panel_send_qsync_on_dcs(display->panel, i);
-			if (rc) {
-				pr_err("fail qsync ON cmds rc:%d\n", rc);
-				goto exit;
-			}
-		} else {
-			/* send the commands to enable qsync */
-			rc = dsi_panel_send_qsync_off_dcs(display->panel, i);
-			if (rc) {
-				pr_err("fail qsync OFF cmds rc:%d\n", rc);
-				goto exit;
-			}
-		}
-
-		dsi_ctrl_setup_avr(display->ctrl[i].ctrl, enable);
-	}
-
-exit:
-	SDE_EVT32(enable, display->panel->qsync_min_fps, rc);
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-static int dsi_display_set_roi(struct dsi_display *display,
-		struct msm_roi_list *rois)
-{
-	struct dsi_display_mode *cur_mode;
-	struct msm_roi_caps *roi_caps;
-	int rc = 0;
-	int i;
-
-	if (!display || !rois || !display->panel)
-		return -EINVAL;
-
-	cur_mode = display->panel->cur_mode;
-	if (!cur_mode)
-		return 0;
-
-	roi_caps = &cur_mode->priv_info->roi_caps;
-	if (!roi_caps->enabled)
-		return 0;
-
-	display_for_each_ctrl(i, display) {
-		struct dsi_display_ctrl *ctrl = &display->ctrl[i];
-		struct dsi_rect ctrl_roi;
-		bool changed = false;
-
-		rc = dsi_display_calc_ctrl_roi(display, ctrl, rois, &ctrl_roi);
-		if (rc) {
-			pr_err("dsi_display_calc_ctrl_roi failed rc %d\n", rc);
-			return rc;
-		}
-
-		rc = dsi_ctrl_set_roi(ctrl->ctrl, &ctrl_roi, &changed);
-		if (rc) {
-			pr_err("dsi_ctrl_set_roi failed rc %d\n", rc);
-			return rc;
-		}
-
-		if (!changed)
-			continue;
-
-		/* send the new roi to the panel via dcs commands */
-		rc = dsi_panel_send_roi_dcs(display->panel, i, &ctrl_roi);
-		if (rc) {
-			pr_err("dsi_panel_set_roi failed rc %d\n", rc);
-			return rc;
-		}
-
-		/* re-program the ctrl with the timing based on the new roi */
-		rc = dsi_ctrl_setup(ctrl->ctrl);
-		if (rc) {
-			pr_err("dsi_ctrl_setup failed rc %d\n", rc);
-			return rc;
-		}
-	}
-
-	return rc;
-}
-
-int dsi_display_pre_kickoff(struct drm_connector *connector,
-		struct dsi_display *display,
-		struct msm_display_kickoff_params *params)
-{
-	int rc = 0;
-	int i;
-	bool enable;
-
-	/* check and setup MISR */
-	if (display->misr_enable)
-		_dsi_display_setup_misr(display);
-
-	if (params->qsync_update) {
-		enable = (params->qsync_mode > 0) ? true : false;
-		rc = dsi_display_qsync(display, enable);
-		if (rc)
-			pr_err("%s failed to send qsync commands\n",
-				__func__);
-		SDE_EVT32(params->qsync_mode, rc);
-	}
-
-	rc = dsi_display_set_roi(display, params->rois);
-
-	/* dynamic DSI clock setting */
-	if (atomic_read(&display->clkrate_change_pending)) {
-		mutex_lock(&display->display_lock);
-		/*
-		 * acquire panel_lock to make sure no commands are in progress
-		 */
-		dsi_panel_acquire_panel_lock(display->panel);
-
-		/*
-		 * Wait for DSI command engine not to be busy sending data
-		 * from display engine.
-		 * If waiting fails, return "rc" instead of below "ret" so as
-		 * not to impact DRM commit. The clock updating would be
-		 * deferred to the next DRM commit.
-		 */
-		display_for_each_ctrl(i, display) {
-			struct dsi_ctrl *ctrl = display->ctrl[i].ctrl;
-			int ret = 0;
-
-			ret = dsi_ctrl_wait_for_cmd_mode_mdp_idle(ctrl);
-			if (ret)
-				goto wait_failure;
-		}
-
-		/*
-		 * Don't check the return value so as not to impact DRM commit
-		 * when error occurs.
-		 */
-		(void)dsi_display_force_update_dsi_clk(display);
-wait_failure:
-		/* release panel_lock */
-		dsi_panel_release_panel_lock(display->panel);
-		mutex_unlock(&display->display_lock);
-	}
-
-	return rc;
-}
-
-int dsi_display_config_ctrl_for_cont_splash(struct dsi_display *display)
-{
-	int rc = 0;
-
-	if (!display || !display->panel) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (!display->panel->cur_mode) {
-		pr_err("no valid mode set for the display\n");
-		return -EINVAL;
-	}
-
-	if (!display->is_cont_splash_enabled)
-		return 0;
-
-	if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
-		rc = dsi_display_vid_engine_enable(display);
-		if (rc) {
-			pr_err("[%s]failed to enable DSI video engine, rc=%d\n",
-			       display->name, rc);
-			goto error_out;
-		}
-	} else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
-		rc = dsi_display_cmd_engine_enable(display);
-		if (rc) {
-			pr_err("[%s]failed to enable DSI cmd engine, rc=%d\n",
-			       display->name, rc);
-			goto error_out;
-		}
-	} else {
-		pr_err("[%s] Invalid configuration\n", display->name);
-		rc = -EINVAL;
-	}
-
-error_out:
-	return rc;
-}
-
-int dsi_display_enable(struct dsi_display *display)
-{
-	int rc = 0;
-	struct dsi_display_mode *mode;
-
-	if (!display || !display->panel) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (!display->panel->cur_mode) {
-		pr_err("no valid mode set for the display\n");
-		return -EINVAL;
-	}
-	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
-
-	/* Engine states and panel states are populated during splash
-	 * resource init and hence we return early
-	 */
-	if (display->is_cont_splash_enabled) {
-
-		dsi_display_config_ctrl_for_cont_splash(display);
-
-		rc = dsi_display_splash_res_cleanup(display);
-		if (rc) {
-			pr_err("Continuous splash res cleanup failed, rc=%d\n",
-				rc);
-			return -EINVAL;
-		}
-
-		display->panel->panel_initialized = true;
-		pr_debug("cont splash enabled, display enable not required\n");
-		return 0;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	mode = display->panel->cur_mode;
-
-	if (mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) {
-		rc = dsi_panel_post_switch(display->panel);
-		if (rc) {
-			pr_err("[%s] failed to switch DSI panel mode, rc=%d\n",
-				   display->name, rc);
-			goto error;
-		}
-	} else {
-		rc = dsi_panel_enable(display->panel);
-		if (rc) {
-			pr_err("[%s] failed to enable DSI panel, rc=%d\n",
-			       display->name, rc);
-			goto error;
-		}
-	}
-
-	if (mode->priv_info->dsc_enabled) {
-		mode->priv_info->dsc.pic_width *= display->ctrl_count;
-		rc = dsi_panel_update_pps(display->panel);
-		if (rc) {
-			pr_err("[%s] panel pps cmd update failed, rc=%d\n",
-				display->name, rc);
-			goto error;
-		}
-	}
-
-	if (mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) {
-		rc = dsi_panel_switch(display->panel);
-		if (rc)
-			pr_err("[%s] failed to switch DSI panel mode, rc=%d\n",
-				   display->name, rc);
-
-		goto error;
-	}
-
-	if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
-		rc = dsi_display_vid_engine_enable(display);
-		if (rc) {
-			pr_err("[%s]failed to enable DSI video engine, rc=%d\n",
-			       display->name, rc);
-			goto error_disable_panel;
-		}
-	} else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
-		rc = dsi_display_cmd_engine_enable(display);
-		if (rc) {
-			pr_err("[%s]failed to enable DSI cmd engine, rc=%d\n",
-			       display->name, rc);
-			goto error_disable_panel;
-		}
-	} else {
-		pr_err("[%s] Invalid configuration\n", display->name);
-		rc = -EINVAL;
-		goto error_disable_panel;
-	}
-
-	goto error;
-
-error_disable_panel:
-	(void)dsi_panel_disable(display->panel);
-error:
-	mutex_unlock(&display->display_lock);
-	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
-	return rc;
-}
-
-int dsi_display_post_enable(struct dsi_display *display)
-{
-	int rc = 0;
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	rc = dsi_panel_post_enable(display->panel);
-	if (rc)
-		pr_err("[%s] panel post-enable failed, rc=%d\n",
-		       display->name, rc);
-
-	/* remove the clk vote for CMD mode panels */
-	if (display->config.panel_mode == DSI_OP_CMD_MODE)
-		dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_ALL_CLKS, DSI_CLK_OFF);
-
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-int dsi_display_pre_disable(struct dsi_display *display)
-{
-	int rc = 0;
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	/* enable the clk vote for CMD mode panels */
-	if (display->config.panel_mode == DSI_OP_CMD_MODE)
-		dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_ALL_CLKS, DSI_CLK_ON);
-
-	rc = dsi_panel_pre_disable(display->panel);
-	if (rc)
-		pr_err("[%s] panel pre-disable failed, rc=%d\n",
-		       display->name, rc);
-
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-int dsi_display_disable(struct dsi_display *display)
-{
-	int rc = 0;
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
-	mutex_lock(&display->display_lock);
-
-	rc = dsi_display_wake_up(display);
-	if (rc)
-		pr_err("[%s] display wake up failed, rc=%d\n",
-		       display->name, rc);
-
-	if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
-		rc = dsi_display_vid_engine_disable(display);
-		if (rc)
-			pr_err("[%s]failed to disable DSI vid engine, rc=%d\n",
-			       display->name, rc);
-	} else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
-		rc = dsi_display_cmd_engine_disable(display);
-		if (rc)
-			pr_err("[%s]failed to disable DSI cmd engine, rc=%d\n",
-			       display->name, rc);
-	} else {
-		pr_err("[%s] Invalid configuration\n", display->name);
-		rc = -EINVAL;
-	}
-
-	rc = dsi_panel_disable(display->panel);
-	if (rc)
-		pr_err("[%s] failed to disable DSI panel, rc=%d\n",
-		       display->name, rc);
-
-	mutex_unlock(&display->display_lock);
-	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
-	return rc;
-}
-
-int dsi_display_update_pps(char *pps_cmd, void *disp)
-{
-	struct dsi_display *display;
-
-	if (pps_cmd == NULL || disp == NULL) {
-		pr_err("Invalid parameter\n");
-		return -EINVAL;
-	}
-
-	display = disp;
-	mutex_lock(&display->display_lock);
-	memcpy(display->panel->dsc_pps_cmd, pps_cmd, DSI_CMD_PPS_SIZE);
-	mutex_unlock(&display->display_lock);
-
-	return 0;
-}
-
-int dsi_display_unprepare(struct dsi_display *display)
-{
-	int rc = 0;
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
-	mutex_lock(&display->display_lock);
-
-	rc = dsi_display_wake_up(display);
-	if (rc)
-		pr_err("[%s] display wake up failed, rc=%d\n",
-		       display->name, rc);
-
-	rc = dsi_panel_unprepare(display->panel);
-	if (rc)
-		pr_err("[%s] panel unprepare failed, rc=%d\n",
-		       display->name, rc);
-
-	rc = dsi_display_ctrl_host_disable(display);
-	if (rc)
-		pr_err("[%s] failed to disable DSI host, rc=%d\n",
-		       display->name, rc);
-
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_LINK_CLK, DSI_CLK_OFF);
-	if (rc)
-		pr_err("[%s] failed to disable Link clocks, rc=%d\n",
-		       display->name, rc);
-
-	rc = dsi_display_ctrl_deinit(display);
-	if (rc)
-		pr_err("[%s] failed to deinit controller, rc=%d\n",
-		       display->name, rc);
-
-	if (!display->panel->ulps_suspend_enabled) {
-		rc = dsi_display_phy_disable(display);
-		if (rc)
-			pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
-			       display->name, rc);
-	}
-
-	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
-			DSI_CORE_CLK, DSI_CLK_OFF);
-	if (rc)
-		pr_err("[%s] failed to disable DSI clocks, rc=%d\n",
-		       display->name, rc);
-
-	/* destrory dsi isr set up */
-	dsi_display_ctrl_isr_configure(display, false);
-
-	rc = dsi_panel_post_unprepare(display->panel);
-	if (rc)
-		pr_err("[%s] panel post-unprepare failed, rc=%d\n",
-		       display->name, rc);
-
-	mutex_unlock(&display->display_lock);
-
-	/* Free up DSI ERROR event callback */
-	dsi_display_unregister_error_handler(display);
-
-	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
-	return rc;
-}
-
-static int __init dsi_display_register(void)
-{
-	dsi_phy_drv_register();
-	dsi_ctrl_drv_register();
-
-	dsi_display_parse_boot_display_selection();
-
-	return platform_driver_register(&dsi_display_driver);
-}
-
-static void __exit dsi_display_unregister(void)
-{
-	platform_driver_unregister(&dsi_display_driver);
-	dsi_ctrl_drv_unregister();
-	dsi_phy_drv_unregister();
-}
-module_param_string(dsi_display0, dsi_display_primary, MAX_CMDLINE_PARAM_LEN,
-								0600);
-MODULE_PARM_DESC(dsi_display0,
-	"msm_drm.dsi_display0=<display node>:<configX> where <display node> is 'primary dsi display node name' and <configX> where x represents index in the topology list");
-module_param_string(dsi_display1, dsi_display_secondary, MAX_CMDLINE_PARAM_LEN,
-								0600);
-MODULE_PARM_DESC(dsi_display1,
-	"msm_drm.dsi_display1=<display node>:<configX> where <display node> is 'secondary dsi display node name' and <configX> where x represents index in the topology list");
-module_init(dsi_display_register);
-module_exit(dsi_display_unregister);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
deleted file mode 100644
index 8162fc8..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ /dev/null
@@ -1,699 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation.All rights reserved.
- */
-
-#ifndef _DSI_DISPLAY_H_
-#define _DSI_DISPLAY_H_
-
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/debugfs.h>
-#include <linux/of_device.h>
-#include <linux/firmware.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-
-#include "msm_drv.h"
-#include "dsi_defs.h"
-#include "dsi_ctrl.h"
-#include "dsi_phy.h"
-#include "dsi_panel.h"
-
-#define MAX_DSI_CTRLS_PER_DISPLAY             2
-#define DSI_CLIENT_NAME_SIZE		20
-#define MAX_CMDLINE_PARAM_LEN	 512
-#define MAX_CMD_PAYLOAD_SIZE	256
-/*
- * DSI Validate Mode modifiers
- * @DSI_VALIDATE_FLAG_ALLOW_ADJUST:	Allow mode validation to also do fixup
- */
-#define DSI_VALIDATE_FLAG_ALLOW_ADJUST	0x1
-
-/**
- * enum dsi_display_selection_type - enumerates DSI display selection types
- * @DSI_PRIMARY:    primary DSI display selected from module parameter
- * @DSI_SECONDARY:  Secondary DSI display selected from module parameter
- * @MAX_DSI_ACTIVE_DISPLAY: Maximum acive displays that can be selected
- */
-enum dsi_display_selection_type {
-	DSI_PRIMARY = 0,
-	DSI_SECONDARY,
-	MAX_DSI_ACTIVE_DISPLAY,
-};
-
-/**
- * enum dsi_display_type - enumerates DSI display types
- * @DSI_DISPLAY_SINGLE:       A panel connected on a single DSI interface.
- * @DSI_DISPLAY_EXT_BRIDGE:   A bridge is connected between panel and DSI host.
- *			      It utilizes a single DSI interface.
- * @DSI_DISPLAY_SPLIT:        A panel that utilizes more than one DSI
- *			      interfaces.
- * @DSI_DISPLAY_SPLIT_EXT_BRIDGE: A bridge is present between panel and DSI
- *				  host. It utilizes more than one DSI interface.
- */
-enum dsi_display_type {
-	DSI_DISPLAY_SINGLE = 0,
-	DSI_DISPLAY_EXT_BRIDGE,
-	DSI_DISPLAY_SPLIT,
-	DSI_DISPLAY_SPLIT_EXT_BRIDGE,
-	DSI_DISPLAY_MAX,
-};
-
-/**
- * struct dsi_display_ctrl - dsi ctrl/phy information for the display
- * @ctrl:           Handle to the DSI controller device.
- * @ctrl_of_node:   pHandle to the DSI controller device.
- * @dsi_ctrl_idx:   DSI controller instance id.
- * @power_state:    Current power state of the DSI controller.
- * @phy:                  Handle to the DSI PHY device.
- * @phy_of_node:          pHandle to the DSI PHY device.
- * @phy_enabled:          PHY power status.
- */
-struct dsi_display_ctrl {
-	/* controller info */
-	struct dsi_ctrl *ctrl;
-	struct device_node *ctrl_of_node;
-	u32 dsi_ctrl_idx;
-
-	enum dsi_power_state power_state;
-
-	/* phy info */
-	struct msm_dsi_phy *phy;
-	struct device_node *phy_of_node;
-
-	bool phy_enabled;
-};
-/**
- * struct dsi_display_boot_param - defines DSI boot display selection
- * @name:Name of DSI display selected as a boot param.
- * @boot_disp_en:bool to indicate dtsi availability of display node
- * @is_primary:bool to indicate whether current display is primary display
- * @length:length of DSI display.
- * @cmdline_topology: Display topology shared from kernel command line.
- */
-struct dsi_display_boot_param {
-	char name[MAX_CMDLINE_PARAM_LEN];
-	char *boot_param;
-	bool boot_disp_en;
-	int length;
-	struct device_node *node;
-	int cmdline_topology;
-	void *disp;
-};
-
-/**
- * struct dsi_display_clk_info - dsi display clock source information
- * @src_clks:          Source clocks for DSI display.
- * @mux_clks:          Mux clocks used for DFPS.
- * @shadow_clks:       Used for DFPS.
- */
-struct dsi_display_clk_info {
-	struct dsi_clk_link_set src_clks;
-	struct dsi_clk_link_set mux_clks;
-	struct dsi_clk_link_set shadow_clks;
-};
-
-/**
- * struct dsi_display_ext_bridge - dsi display external bridge information
- * @display:           Pointer of DSI display.
- * @node_of:           Bridge node created from bridge driver.
- * @bridge:            Bridge created from bridge driver
- * @orig_funcs:        Bridge function from bridge driver (split mode only)
- * @bridge_funcs:      Overridden function from bridge driver (split mode only)
- */
-struct dsi_display_ext_bridge {
-	void *display;
-	struct device_node *node_of;
-	struct drm_bridge *bridge;
-	const struct drm_bridge_funcs *orig_funcs;
-	struct drm_bridge_funcs bridge_funcs;
-};
-
-/**
- * struct dsi_display - dsi display information
- * @pdev:             Pointer to platform device.
- * @drm_dev:          DRM device associated with the display.
- * @drm_conn:         Pointer to DRM connector associated with the display
- * @ext_conn:         Pointer to external connector attached to DSI connector
- * @name:             Name of the display.
- * @display_type:     Display type as defined in device tree.
- * @list:             List pointer.
- * @is_active:        Is display active.
- * @is_cont_splash_enabled:  Is continuous splash enabled
- * @sw_te_using_wd:   Is software te enabled
- * @display_lock:     Mutex for dsi_display interface.
- * @disp_te_gpio:     GPIO for panel TE interrupt.
- * @is_te_irq_enabled:bool to specify whether TE interrupt is enabled.
- * @esd_te_gate:      completion gate to signal TE interrupt.
- * @ctrl_count:       Number of DSI interfaces required by panel.
- * @ctrl:             Controller information for DSI display.
- * @panel:            Handle to DSI panel.
- * @panel_node:       pHandle to DSI panel actually in use.
- * @ext_bridge:       External bridge information for DSI display.
- * @ext_bridge_cnt:   Number of external bridges
- * @modes:            Array of probed DSI modes
- * @type:             DSI display type.
- * @clk_master_idx:   The master controller for controlling clocks. This is an
- *		      index into the ctrl[MAX_DSI_CTRLS_PER_DISPLAY] array.
- * @cmd_master_idx:   The master controller for sending DSI commands to panel.
- * @video_master_idx: The master controller for enabling video engine.
- * @cached_clk_rate:  The cached DSI clock rate set dynamically by sysfs.
- * @clkrate_change_pending: Flag indicating the pending DSI clock re-enabling.
- * @clock_info:       Clock sourcing for DSI display.
- * @config:           DSI host configuration information.
- * @lane_map:         Lane mapping between DSI host and Panel.
- * @cmdline_topology: Display topology shared from kernel command line.
- * @cmdline_timing:   Display timing shared from kernel command line.
- * @is_tpg_enabled:   TPG state.
- * @ulps_enabled:     ulps state.
- * @clamp_enabled:    clamp state.
- * @phy_idle_power_off:   PHY power state.
- * @host:             DRM MIPI DSI Host.
- * @bridge:           Pointer to DRM bridge object.
- * @cmd_engine_refcount:  Reference count enforcing single instance of cmd eng
- * @clk_mngr:         DSI clock manager.
- * @dsi_clk_handle:   DSI clock handle.
- * @mdp_clk_handle:   MDP clock handle.
- * @root:             Debugfs root directory
- * @misr_enable       Frame MISR enable/disable
- * @misr_frame_count  Number of frames to accumulate the MISR value
- * @esd_trigger       field indicating ESD trigger through debugfs
- * @te_source         vsync source pin information
- */
-struct dsi_display {
-	struct platform_device *pdev;
-	struct drm_device *drm_dev;
-	struct drm_connector *drm_conn;
-	struct drm_connector *ext_conn;
-
-	const char *name;
-	const char *display_type;
-	struct list_head list;
-	bool is_cont_splash_enabled;
-	bool sw_te_using_wd;
-	struct mutex display_lock;
-	int disp_te_gpio;
-	bool is_te_irq_enabled;
-	struct completion esd_te_gate;
-
-	u32 ctrl_count;
-	struct dsi_display_ctrl ctrl[MAX_DSI_CTRLS_PER_DISPLAY];
-
-	/* panel info */
-	struct dsi_panel *panel;
-	struct device_node *panel_node;
-	struct device_node *parser_node;
-
-	/* external bridge */
-	struct dsi_display_ext_bridge ext_bridge[MAX_DSI_CTRLS_PER_DISPLAY];
-	u32 ext_bridge_cnt;
-
-	struct dsi_display_mode *modes;
-
-	enum dsi_display_type type;
-	u32 clk_master_idx;
-	u32 cmd_master_idx;
-	u32 video_master_idx;
-
-	/* dynamic DSI clock info*/
-	u32  cached_clk_rate;
-	atomic_t clkrate_change_pending;
-
-	struct dsi_display_clk_info clock_info;
-	struct dsi_host_config config;
-	struct dsi_lane_map lane_map;
-	int cmdline_topology;
-	int cmdline_timing;
-	bool is_tpg_enabled;
-	bool ulps_enabled;
-	bool clamp_enabled;
-	bool phy_idle_power_off;
-	struct drm_gem_object *tx_cmd_buf;
-	u32 cmd_buffer_size;
-	u64 cmd_buffer_iova;
-	void *vaddr;
-	struct msm_gem_address_space *aspace;
-
-	struct mipi_dsi_host host;
-	struct dsi_bridge    *bridge;
-	u32 cmd_engine_refcount;
-
-	struct sde_power_handle *phandle;
-	struct sde_power_client *cont_splash_client;
-
-	void *clk_mngr;
-	void *dsi_clk_handle;
-	void *mdp_clk_handle;
-
-	/* DEBUG FS */
-	struct dentry *root;
-
-	bool misr_enable;
-	u32 misr_frame_count;
-	u32 esd_trigger;
-	/* multiple dsi error handlers */
-	struct workqueue_struct *err_workq;
-	struct work_struct fifo_underflow_work;
-	struct work_struct fifo_overflow_work;
-	struct work_struct lp_rx_timeout_work;
-
-	/* firmware panel data */
-	const struct firmware *fw;
-	void *parser;
-
-	struct dsi_display_boot_param *boot_disp;
-
-	u32 te_source;
-};
-
-int dsi_display_dev_probe(struct platform_device *pdev);
-int dsi_display_dev_remove(struct platform_device *pdev);
-
-/**
- * dsi_display_get_num_of_displays() - returns number of display devices
- *				       supported.
- *
- * Return: number of displays.
- */
-int dsi_display_get_num_of_displays(void);
-
-/**
- * dsi_display_get_active_displays - returns pointers for active display devices
- * @display_array: Pointer to display array to be filled
- * @max_display_count: Size of display_array
- * @Returns: Number of display entries filled
- */
-int dsi_display_get_active_displays(void **display_array,
-		u32 max_display_count);
-
-/**
- * dsi_display_get_display_by_name()- finds display by name
- * @name:	name of the display.
- *
- * Return: handle to the display or error code.
- */
-struct dsi_display *dsi_display_get_display_by_name(const char *name);
-
-/**
- * dsi_display_set_active_state() - sets the state of the display
- * @display:        Handle to display.
- * @is_active:      state
- */
-void dsi_display_set_active_state(struct dsi_display *display, bool is_active);
-
-/**
- * dsi_display_drm_bridge_init() - initializes DRM bridge object for DSI
- * @display:            Handle to the display.
- * @encoder:            Pointer to the encoder object which is connected to the
- *			display.
- *
- * Return: error code.
- */
-int dsi_display_drm_bridge_init(struct dsi_display *display,
-		struct drm_encoder *enc);
-
-/**
- * dsi_display_drm_bridge_deinit() - destroys DRM bridge for the display
- * @display:        Handle to the display.
- *
- * Return: error code.
- */
-int dsi_display_drm_bridge_deinit(struct dsi_display *display);
-
-/**
- * dsi_display_drm_ext_bridge_init() - initializes DRM bridge for ext bridge
- * @display:            Handle to the display.
- * @enc:                Pointer to the encoder object which is connected to the
- *                      display.
- * @connector:          Pointer to the connector object which is connected to
- *                      the display.
- *
- * Return: error code.
- */
-int dsi_display_drm_ext_bridge_init(struct dsi_display *display,
-		struct drm_encoder *enc, struct drm_connector *connector);
-
-/**
- * dsi_display_get_info() - returns the display properties
- * @connector:        Pointer to drm connector structure
- * @info:             Pointer to the structure where info is stored.
- * @disp:             Handle to the display.
- *
- * Return: error code.
- */
-int dsi_display_get_info(struct drm_connector *connector,
-		struct msm_display_info *info, void *disp);
-
-/**
- * dsi_display_get_mode_count() - get number of modes supported by the display
- * @display:            Handle to display.
- * @count:              Number of modes supported
- *
- * Return: error code.
- */
-int dsi_display_get_mode_count(struct dsi_display *display, u32 *count);
-
-/**
- * dsi_display_get_modes() - get modes supported by display
- * @display:            Handle to display.
- * @modes;              Output param, list of DSI modes. Number of modes matches
- *                      count returned by dsi_display_get_mode_count
- *
- * Return: error code.
- */
-int dsi_display_get_modes(struct dsi_display *display,
-			  struct dsi_display_mode **modes);
-
-/**
- * dsi_display_put_mode() - free up mode created for the display
- * @display:            Handle to display.
- * @mode:               Display mode to be freed up
- *
- * Return: error code.
- */
-void dsi_display_put_mode(struct dsi_display *display,
-	struct dsi_display_mode *mode);
-
-/**
- * dsi_display_get_default_lms() - retrieve max number of lms used
- *             for dsi display by traversing through all topologies
- * @display:            Handle to display.
- * @num_lm:             Number of LMs used
- *
- * Return: error code.
- */
-int dsi_display_get_default_lms(void *dsi_display, u32 *num_lm);
-
-/**
- * dsi_display_find_mode() - retrieve cached DSI mode given relevant params
- * @display:            Handle to display.
- * @cmp:                Mode to use as comparison to find original
- * @out_mode:           Output parameter, pointer to retrieved mode
- *
- * Return: error code.
- */
-int dsi_display_find_mode(struct dsi_display *display,
-		const struct dsi_display_mode *cmp,
-		struct dsi_display_mode **out_mode);
-/**
- * dsi_display_validate_mode() - validates if mode is supported by display
- * @display:             Handle to display.
- * @mode:                Mode to be validated.
- * @flags:               Modifier flags.
- *
- * Return: 0 if supported or error code.
- */
-int dsi_display_validate_mode(struct dsi_display *display,
-			      struct dsi_display_mode *mode,
-			      u32 flags);
-
-/**
- * dsi_display_validate_mode_vrr() - validates mode if variable refresh case
- * @display:             Handle to display.
- * @mode:                Mode to be validated..
- *
- * Return: 0 if  error code.
- */
-int dsi_display_validate_mode_vrr(struct dsi_display *display,
-			struct dsi_display_mode *cur_dsi_mode,
-			struct dsi_display_mode *mode);
-
-/**
- * dsi_display_set_mode() - Set mode on the display.
- * @display:           Handle to display.
- * @mode:              mode to be set.
- * @flags:             Modifier flags.
- *
- * Return: error code.
- */
-int dsi_display_set_mode(struct dsi_display *display,
-			 struct dsi_display_mode *mode,
-			 u32 flags);
-
-/**
- * dsi_display_prepare() - prepare display
- * @display:          Handle to display.
- *
- * Prepare will perform power up sequences for the host and panel hardware.
- * Power and clock resources might be turned on (depending on the panel mode).
- * The video engine is not enabled.
- *
- * Return: error code.
- */
-int dsi_display_prepare(struct dsi_display *display);
-
-/**
- * dsi_display_splash_res_cleanup() - cleanup for continuous splash
- * @display:    Pointer to dsi display
- * Returns:     Zero on success
- */
-int dsi_display_splash_res_cleanup(struct  dsi_display *display);
-
-/**
- * dsi_display_config_ctrl_for_cont_splash()- Enable engine modes for DSI
- *                                     controller during continuous splash
- * @display: Handle to DSI display
- *
- * Return:        returns error code
- */
-int dsi_display_config_ctrl_for_cont_splash(struct dsi_display *display);
-
-/**
- * dsi_display_enable() - enable display
- * @display:            Handle to display.
- *
- * Enable will turn on the host engine and the panel. At the end of the enable
- * function, Host and panel hardware are ready to accept pixel data from
- * upstream.
- *
- * Return: error code.
- */
-int dsi_display_enable(struct dsi_display *display);
-
-/**
- * dsi_display_post_enable() - perform post enable operations.
- * @display:         Handle to display.
- *
- * Some panels might require some commands to be sent after pixel data
- * transmission has started. Such commands are sent as part of the post_enable
- * function.
- *
- * Return: error code.
- */
-int dsi_display_post_enable(struct dsi_display *display);
-
-/**
- * dsi_display_pre_disable() - perform pre disable operations.
- * @display:          Handle to display.
- *
- * If a panel requires commands to be sent before pixel data transmission is
- * stopped, those can be sent as part of pre_disable.
- *
- * Return: error code.
- */
-int dsi_display_pre_disable(struct dsi_display *display);
-
-/**
- * dsi_display_disable() - disable panel and host hardware.
- * @display:             Handle to display.
- *
- * Disable host and panel hardware and pixel data transmission can not continue.
- *
- * Return: error code.
- */
-int dsi_display_disable(struct dsi_display *display);
-
-/**
- * dsi_pre_clkoff_cb() - Callback before clock is turned off
- * @priv: private data pointer.
- * @clk_type: clock which is being turned on.
- * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
- * @new_state: next state for the clock.
- *
- * @return: error code.
- */
-int dsi_pre_clkoff_cb(void *priv, enum dsi_clk_type clk_type,
-		enum dsi_lclk_type l_type,
-		enum dsi_clk_state new_state);
-
-/**
- * dsi_display_update_pps() - update PPS buffer.
- * @pps_cmd:             PPS buffer.
- * @display:             Handle to display.
- *
- * Copies new PPS buffer into display structure.
- *
- * Return: error code.
- */
-int dsi_display_update_pps(char *pps_cmd, void *display);
-
-/**
- * dsi_post_clkoff_cb() - Callback after clock is turned off
- * @priv: private data pointer.
- * @clk_type: clock which is being turned on.
- * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
- * @curr_state: current state for the clock.
- *
- * @return: error code.
- */
-int dsi_post_clkoff_cb(void *priv, enum dsi_clk_type clk_type,
-		enum dsi_lclk_type l_type,
-		enum dsi_clk_state curr_state);
-
-/**
- * dsi_post_clkon_cb() - Callback after clock is turned on
- * @priv: private data pointer.
- * @clk_type: clock which is being turned on.
- * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
- * @curr_state: current state for the clock.
- *
- * @return: error code.
- */
-int dsi_post_clkon_cb(void *priv, enum dsi_clk_type clk_type,
-		enum dsi_lclk_type l_type,
-		enum dsi_clk_state curr_state);
-
-/**
- * dsi_pre_clkon_cb() - Callback before clock is turned on
- * @priv: private data pointer.
- * @clk_type: clock which is being turned on.
- * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
- * @new_state: next state for the clock.
- *
- * @return: error code.
- */
-int dsi_pre_clkon_cb(void *priv, enum dsi_clk_type clk_type,
-		enum dsi_lclk_type l_type,
-		enum dsi_clk_state new_state);
-
-/**
- * dsi_display_unprepare() - power off display hardware.
- * @display:            Handle to display.
- *
- * Host and panel hardware is turned off. Panel will be in reset state at the
- * end of the function.
- *
- * Return: error code.
- */
-int dsi_display_unprepare(struct dsi_display *display);
-
-int dsi_display_set_tpg_state(struct dsi_display *display, bool enable);
-
-int dsi_display_clock_gate(struct dsi_display *display, bool enable);
-int dsi_dispaly_static_frame(struct dsi_display *display, bool enable);
-
-/**
- * dsi_display_enable_event() - enable interrupt based connector event
- * @connector:          Pointer to drm connector structure
- * @display:            Handle to display.
- * @event_idx:          Event index.
- * @event_info:         Event callback definition.
- * @enable:             Whether to enable/disable the event interrupt.
- */
-void dsi_display_enable_event(struct drm_connector *connector,
-		struct dsi_display *display,
-		uint32_t event_idx, struct dsi_event_cb_info *event_info,
-		bool enable);
-
-/**
- * dsi_display_set_backlight() - set backlight
- * @connector:          Pointer to drm connector structure
- * @display:            Handle to display.
- * @bl_lvl:             Backlight level.
- * @event_info:         Event callback definition.
- * @enable:             Whether to enable/disable the event interrupt.
- */
-int dsi_display_set_backlight(struct drm_connector *connector,
-		void *display, u32 bl_lvl);
-
-/**
- * dsi_display_check_status() - check if panel is dead or alive
- * @connector:          Pointer to drm connector structure
- * @display:            Handle to display.
- * @te_check_override:	Whether check for TE from panel or default check
- */
-int dsi_display_check_status(struct drm_connector *connector, void *display,
-				bool te_check_override);
-
-/**
- * dsi_display_cmd_transfer() - transfer command to the panel
- * @connector:          Pointer to drm connector structure
- * @display:            Handle to display.
- * @cmd_buf:            Command buffer
- * @cmd_buf_len:        Command buffer length in bytes
- */
-int dsi_display_cmd_transfer(struct drm_connector *connector,
-		void *display, const char *cmd_buffer,
-		u32 cmd_buf_len);
-
-/**
- * dsi_display_soft_reset() - perform a soft reset on DSI controller
- * @display:         Handle to display
- *
- * The video, command and controller engines will be disabled before the
- * reset is triggered. After, the engines will be re-enabled to the same state
- * as before the reset.
- *
- * If the reset is done while MDP timing engine is turned on, the video
- * engine should be re-enabled only during the vertical blanking time.
- *
- * Return: error code
- */
-int dsi_display_soft_reset(void *display);
-
-/**
- * dsi_display_set_power - update power/dpms setting
- * @connector: Pointer to drm connector structure
- * @power_mode: One of the following,
- *              SDE_MODE_DPMS_ON
- *              SDE_MODE_DPMS_LP1
- *              SDE_MODE_DPMS_LP2
- *              SDE_MODE_DPMS_STANDBY
- *              SDE_MODE_DPMS_SUSPEND
- *              SDE_MODE_DPMS_OFF
- * @display: Pointer to private display structure
- * Returns: Zero on success
- */
-int dsi_display_set_power(struct drm_connector *connector,
-		int power_mode, void *display);
-
-/*
- * dsi_display_pre_kickoff - program kickoff-time features
- * @connector: Pointer to drm connector structure
- * @display: Pointer to private display structure
- * @params: Parameters for kickoff-time programming
- * Returns: Zero on success
- */
-int dsi_display_pre_kickoff(struct drm_connector *connector,
-		struct dsi_display *display,
-		struct msm_display_kickoff_params *params);
-/**
- * dsi_display_get_dst_format() - get dst_format from DSI display
- * @connector:        Pointer to drm connector structure
- * @display:         Handle to display
- *
- * Return: enum dsi_pixel_format type
- */
-enum dsi_pixel_format dsi_display_get_dst_format(
-		struct drm_connector *connector,
-		void *display);
-
-/**
- * dsi_display_cont_splash_config() - initialize splash resources
- * @display:         Handle to display
- *
- * Return: Zero on Success
- */
-int dsi_display_cont_splash_config(void *display);
-/*
- * dsi_display_get_panel_vfp - get panel vsync
- * @display: Pointer to private display structure
- * @h_active: width
- * @v_active: height
- * Returns: v_front_porch on success error code on failure
- */
-int dsi_display_get_panel_vfp(void *display,
-	int h_active, int v_active);
-
-#endif /* _DSI_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c
deleted file mode 100644
index 6be8674..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c
+++ /dev/null
@@ -1,96 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/delay.h>
-#include <linux/slab.h>
-
-#include "dsi_display_test.h"
-
-static void dsi_display_test_dump_modes(struct dsi_display_mode *mode, u32
-					count)
-{
-}
-
-static void dsi_display_test_work(struct work_struct *work)
-{
-	struct dsi_display_test *test;
-	struct dsi_display *display;
-	struct dsi_display_mode *modes;
-	u32 count = 0;
-	int rc = 0;
-
-	test = container_of(work, struct dsi_display_test, test_work);
-
-	display = test->display;
-	rc = dsi_display_get_mode_count(display, &count);
-	if (rc) {
-		pr_err("failed to get modes count, rc=%d\n", rc);
-		goto test_fail;
-	}
-
-	rc = dsi_display_get_modes(display, &modes);
-	if (rc) {
-		pr_err("failed to get modes, rc=%d\n", rc);
-		goto test_fail_free_modes;
-	}
-
-	dsi_display_test_dump_modes(modes, count);
-
-	rc = dsi_display_set_mode(display, &modes[0], 0x0);
-	if (rc) {
-		pr_err("failed to set mode, rc=%d\n", rc);
-		goto test_fail_free_modes;
-	}
-
-	rc = dsi_display_prepare(display);
-	if (rc) {
-		pr_err("failed to prepare display, rc=%d\n", rc);
-		goto test_fail_free_modes;
-	}
-
-	rc = dsi_display_enable(display);
-	if (rc) {
-		pr_err("failed to enable display, rc=%d\n", rc);
-		goto test_fail_unprep_disp;
-	}
-	return;
-
-test_fail_unprep_disp:
-	if (rc) {
-		pr_err("failed to unprep display, rc=%d\n", rc);
-		goto test_fail_free_modes;
-	}
-
-test_fail_free_modes:
-	kfree(modes);
-test_fail:
-	return;
-}
-
-int dsi_display_test_init(struct dsi_display *display)
-{
-	static int done;
-	int rc = 0;
-	struct dsi_display_test *test;
-
-	if (done)
-		return rc;
-
-	done = 1;
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	test = kzalloc(sizeof(*test), GFP_KERNEL);
-	if (!test)
-		return -ENOMEM;
-
-	test->display = display;
-	INIT_WORK(&test->test_work, dsi_display_test_work);
-
-	dsi_display_test_work(&test->test_work);
-	return rc;
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.h
deleted file mode 100644
index b226ca4..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_DISPLAY_TEST_H_
-#define _DSI_DISPLAY_TEST_H_
-
-#include "dsi_display.h"
-#include "dsi_ctrl_hw.h"
-#include "dsi_ctrl.h"
-
-struct dsi_display_test {
-	struct dsi_display *display;
-
-	struct work_struct test_work;
-};
-
-int dsi_display_test_init(struct dsi_display *display);
-
-
-#endif /* _DSI_DISPLAY_TEST_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
deleted file mode 100644
index 68fc901..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ /dev/null
@@ -1,927 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-
-#define pr_fmt(fmt)	"dsi-drm:[%s] " fmt, __func__
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_atomic.h>
-
-#include "msm_kms.h"
-#include "sde_connector.h"
-#include "dsi_drm.h"
-#include "sde_trace.h"
-
-#define to_dsi_bridge(x)     container_of((x), struct dsi_bridge, base)
-#define to_dsi_state(x)      container_of((x), struct dsi_connector_state, base)
-
-#define DEFAULT_PANEL_JITTER_NUMERATOR		2
-#define DEFAULT_PANEL_JITTER_DENOMINATOR	1
-#define DEFAULT_PANEL_JITTER_ARRAY_SIZE		2
-#define DEFAULT_PANEL_PREFILL_LINES	25
-
-static struct dsi_display_mode_priv_info default_priv_info = {
-	.panel_jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR,
-	.panel_jitter_denom = DEFAULT_PANEL_JITTER_DENOMINATOR,
-	.panel_prefill_lines = DEFAULT_PANEL_PREFILL_LINES,
-	.dsc_enabled = false,
-};
-
-static void convert_to_dsi_mode(const struct drm_display_mode *drm_mode,
-				struct dsi_display_mode *dsi_mode)
-{
-	memset(dsi_mode, 0, sizeof(*dsi_mode));
-
-	dsi_mode->timing.h_active = drm_mode->hdisplay;
-	dsi_mode->timing.h_back_porch = drm_mode->htotal - drm_mode->hsync_end;
-	dsi_mode->timing.h_sync_width = drm_mode->htotal -
-			(drm_mode->hsync_start + dsi_mode->timing.h_back_porch);
-	dsi_mode->timing.h_front_porch = drm_mode->hsync_start -
-					 drm_mode->hdisplay;
-	dsi_mode->timing.h_skew = drm_mode->hskew;
-
-	dsi_mode->timing.v_active = drm_mode->vdisplay;
-	dsi_mode->timing.v_back_porch = drm_mode->vtotal - drm_mode->vsync_end;
-	dsi_mode->timing.v_sync_width = drm_mode->vtotal -
-		(drm_mode->vsync_start + dsi_mode->timing.v_back_porch);
-
-	dsi_mode->timing.v_front_porch = drm_mode->vsync_start -
-					 drm_mode->vdisplay;
-
-	dsi_mode->timing.refresh_rate = drm_mode->vrefresh;
-
-	dsi_mode->pixel_clk_khz = drm_mode->clock;
-
-	dsi_mode->priv_info =
-		(struct dsi_display_mode_priv_info *)drm_mode->private;
-
-	if (dsi_mode->priv_info) {
-		dsi_mode->timing.dsc_enabled = dsi_mode->priv_info->dsc_enabled;
-		dsi_mode->timing.dsc = &dsi_mode->priv_info->dsc;
-	}
-
-	if (msm_is_mode_seamless(drm_mode))
-		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_SEAMLESS;
-	if (msm_is_mode_dynamic_fps(drm_mode))
-		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DFPS;
-	if (msm_needs_vblank_pre_modeset(drm_mode))
-		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VBLANK_PRE_MODESET;
-	if (msm_is_mode_seamless_dms(drm_mode))
-		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DMS;
-	if (msm_is_mode_seamless_vrr(drm_mode))
-		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
-
-	dsi_mode->timing.h_sync_polarity =
-			!!(drm_mode->flags & DRM_MODE_FLAG_PHSYNC);
-	dsi_mode->timing.v_sync_polarity =
-			!!(drm_mode->flags & DRM_MODE_FLAG_PVSYNC);
-}
-
-void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
-				struct drm_display_mode *drm_mode)
-{
-	memset(drm_mode, 0, sizeof(*drm_mode));
-
-	drm_mode->hdisplay = dsi_mode->timing.h_active;
-	drm_mode->hsync_start = drm_mode->hdisplay +
-				dsi_mode->timing.h_front_porch;
-	drm_mode->hsync_end = drm_mode->hsync_start +
-			      dsi_mode->timing.h_sync_width;
-	drm_mode->htotal = drm_mode->hsync_end + dsi_mode->timing.h_back_porch;
-	drm_mode->hskew = dsi_mode->timing.h_skew;
-
-	drm_mode->vdisplay = dsi_mode->timing.v_active;
-	drm_mode->vsync_start = drm_mode->vdisplay +
-				dsi_mode->timing.v_front_porch;
-	drm_mode->vsync_end = drm_mode->vsync_start +
-			      dsi_mode->timing.v_sync_width;
-	drm_mode->vtotal = drm_mode->vsync_end + dsi_mode->timing.v_back_porch;
-
-	drm_mode->vrefresh = dsi_mode->timing.refresh_rate;
-	drm_mode->clock = dsi_mode->pixel_clk_khz;
-
-	drm_mode->private = (int *)dsi_mode->priv_info;
-
-	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_SEAMLESS)
-		drm_mode->flags |= DRM_MODE_FLAG_SEAMLESS;
-	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_DFPS)
-		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS;
-	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_VBLANK_PRE_MODESET)
-		drm_mode->private_flags |= MSM_MODE_FLAG_VBLANK_PRE_MODESET;
-	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_DMS)
-		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DMS;
-	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_VRR)
-		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_VRR;
-
-	if (dsi_mode->timing.h_sync_polarity)
-		drm_mode->flags |= DRM_MODE_FLAG_PHSYNC;
-	if (dsi_mode->timing.v_sync_polarity)
-		drm_mode->flags |= DRM_MODE_FLAG_PVSYNC;
-
-	drm_mode_set_name(drm_mode);
-}
-
-static int dsi_bridge_attach(struct drm_bridge *bridge)
-{
-	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
-
-	if (!bridge) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	pr_debug("[%d] attached\n", c_bridge->id);
-
-	return 0;
-
-}
-
-static void dsi_bridge_pre_enable(struct drm_bridge *bridge)
-{
-	int rc = 0;
-	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
-
-	if (!bridge) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	if (!c_bridge || !c_bridge->display || !c_bridge->display->panel) {
-		pr_err("Incorrect bridge details\n");
-		return;
-	}
-
-	atomic_set(&c_bridge->display->panel->esd_recovery_pending, 0);
-
-	/* By this point mode should have been validated through mode_fixup */
-	rc = dsi_display_set_mode(c_bridge->display,
-			&(c_bridge->dsi_mode), 0x0);
-	if (rc) {
-		pr_err("[%d] failed to perform a mode set, rc=%d\n",
-		       c_bridge->id, rc);
-		return;
-	}
-
-	if (c_bridge->dsi_mode.dsi_mode_flags &
-		(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR)) {
-		pr_debug("[%d] seamless pre-enable\n", c_bridge->id);
-		return;
-	}
-
-	SDE_ATRACE_BEGIN("dsi_bridge_pre_enable");
-	rc = dsi_display_prepare(c_bridge->display);
-	if (rc) {
-		pr_err("[%d] DSI display prepare failed, rc=%d\n",
-		       c_bridge->id, rc);
-		SDE_ATRACE_END("dsi_bridge_pre_enable");
-		return;
-	}
-
-	SDE_ATRACE_BEGIN("dsi_display_enable");
-	rc = dsi_display_enable(c_bridge->display);
-	if (rc) {
-		pr_err("[%d] DSI display enable failed, rc=%d\n",
-				c_bridge->id, rc);
-		(void)dsi_display_unprepare(c_bridge->display);
-	}
-	SDE_ATRACE_END("dsi_display_enable");
-	SDE_ATRACE_END("dsi_bridge_pre_enable");
-
-	rc = dsi_display_splash_res_cleanup(c_bridge->display);
-	if (rc)
-		pr_err("Continuous splash pipeline cleanup failed, rc=%d\n",
-									rc);
-}
-
-static void dsi_bridge_enable(struct drm_bridge *bridge)
-{
-	int rc = 0;
-	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
-	struct dsi_display *display;
-
-	if (!bridge) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	if (c_bridge->dsi_mode.dsi_mode_flags &
-			(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR)) {
-		pr_debug("[%d] seamless enable\n", c_bridge->id);
-		return;
-	}
-	display = c_bridge->display;
-
-	rc = dsi_display_post_enable(display);
-	if (rc)
-		pr_err("[%d] DSI display post enabled failed, rc=%d\n",
-		       c_bridge->id, rc);
-
-	if (display && display->drm_conn)
-		sde_connector_helper_bridge_enable(display->drm_conn);
-}
-
-static void dsi_bridge_disable(struct drm_bridge *bridge)
-{
-	int rc = 0;
-	struct dsi_display *display;
-	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
-
-	if (!bridge) {
-		pr_err("Invalid params\n");
-		return;
-	}
-	display = c_bridge->display;
-
-	if (display && display->drm_conn)
-		sde_connector_helper_bridge_disable(display->drm_conn);
-
-	rc = dsi_display_pre_disable(c_bridge->display);
-	if (rc) {
-		pr_err("[%d] DSI display pre disable failed, rc=%d\n",
-		       c_bridge->id, rc);
-	}
-}
-
-static void dsi_bridge_post_disable(struct drm_bridge *bridge)
-{
-	int rc = 0;
-	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
-
-	if (!bridge) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	SDE_ATRACE_BEGIN("dsi_bridge_post_disable");
-	SDE_ATRACE_BEGIN("dsi_display_disable");
-	rc = dsi_display_disable(c_bridge->display);
-	if (rc) {
-		pr_err("[%d] DSI display disable failed, rc=%d\n",
-		       c_bridge->id, rc);
-		SDE_ATRACE_END("dsi_display_disable");
-		return;
-	}
-	SDE_ATRACE_END("dsi_display_disable");
-
-	rc = dsi_display_unprepare(c_bridge->display);
-	if (rc) {
-		pr_err("[%d] DSI display unprepare failed, rc=%d\n",
-		       c_bridge->id, rc);
-		SDE_ATRACE_END("dsi_bridge_post_disable");
-		return;
-	}
-	SDE_ATRACE_END("dsi_bridge_post_disable");
-}
-
-static void dsi_bridge_mode_set(struct drm_bridge *bridge,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
-{
-	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
-
-	if (!bridge || !mode || !adjusted_mode) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	memset(&(c_bridge->dsi_mode), 0x0, sizeof(struct dsi_display_mode));
-	convert_to_dsi_mode(adjusted_mode, &(c_bridge->dsi_mode));
-}
-
-static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted_mode)
-{
-	int rc = 0;
-	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
-	struct dsi_display *display;
-	struct dsi_display_mode dsi_mode, cur_dsi_mode, *panel_dsi_mode;
-	struct drm_display_mode cur_mode;
-	struct drm_crtc_state *crtc_state;
-
-	crtc_state = container_of(mode, struct drm_crtc_state, mode);
-
-	if (!bridge || !mode || !adjusted_mode) {
-		pr_err("Invalid params\n");
-		return false;
-	}
-
-	display = c_bridge->display;
-	if (!display) {
-		pr_err("Invalid params\n");
-		return false;
-	}
-
-	/*
-	 * if no timing defined in panel, it must be external mode
-	 * and we'll use empty priv info to populate the mode
-	 */
-	if (display->panel && !display->panel->num_timing_nodes) {
-		*adjusted_mode = *mode;
-		adjusted_mode->private = (int *)&default_priv_info;
-		adjusted_mode->private_flags = 0;
-		return true;
-	}
-
-	convert_to_dsi_mode(mode, &dsi_mode);
-
-	/*
-	 * retrieve dsi mode from dsi driver's cache since not safe to take
-	 * the drm mode config mutex in all paths
-	 */
-	rc = dsi_display_find_mode(display, &dsi_mode, &panel_dsi_mode);
-	if (rc)
-		return rc;
-
-	/* propagate the private info to the adjusted_mode derived dsi mode */
-	dsi_mode.priv_info = panel_dsi_mode->priv_info;
-	dsi_mode.dsi_mode_flags = panel_dsi_mode->dsi_mode_flags;
-	dsi_mode.timing.dsc_enabled = dsi_mode.priv_info->dsc_enabled;
-	dsi_mode.timing.dsc = &dsi_mode.priv_info->dsc;
-
-	rc = dsi_display_validate_mode(c_bridge->display, &dsi_mode,
-			DSI_VALIDATE_FLAG_ALLOW_ADJUST);
-	if (rc) {
-		pr_err("[%d] mode is not valid, rc=%d\n", c_bridge->id, rc);
-		return false;
-	}
-
-	if (bridge->encoder && bridge->encoder->crtc &&
-			crtc_state->crtc) {
-
-		convert_to_dsi_mode(&crtc_state->crtc->state->mode,
-							&cur_dsi_mode);
-		cur_dsi_mode.timing.dsc_enabled =
-				dsi_mode.priv_info->dsc_enabled;
-		cur_dsi_mode.timing.dsc = &dsi_mode.priv_info->dsc;
-		rc = dsi_display_validate_mode_vrr(c_bridge->display,
-					&cur_dsi_mode, &dsi_mode);
-		if (rc)
-			pr_debug("[%s] vrr mode mismatch failure rc=%d\n",
-				c_bridge->display->name, rc);
-
-		cur_mode = crtc_state->crtc->mode;
-
-		/* No DMS/VRR when drm pipeline is changing */
-		if (!drm_mode_equal(&cur_mode, adjusted_mode) &&
-			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
-			(!crtc_state->active_changed ||
-			 display->is_cont_splash_enabled))
-			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
-	}
-
-	/* convert back to drm mode, propagating the private info & flags */
-	dsi_convert_to_drm_mode(&dsi_mode, adjusted_mode);
-
-	return true;
-}
-
-int dsi_conn_get_mode_info(struct drm_connector *connector,
-		const struct drm_display_mode *drm_mode,
-		struct msm_mode_info *mode_info,
-		u32 max_mixer_width, void *display)
-{
-	struct dsi_display_mode dsi_mode;
-	struct dsi_mode_info *timing;
-
-	if (!drm_mode || !mode_info)
-		return -EINVAL;
-
-	convert_to_dsi_mode(drm_mode, &dsi_mode);
-
-	if (!dsi_mode.priv_info)
-		return -EINVAL;
-
-	memset(mode_info, 0, sizeof(*mode_info));
-
-	timing = &dsi_mode.timing;
-	mode_info->frame_rate = dsi_mode.timing.refresh_rate;
-	mode_info->vtotal = DSI_V_TOTAL(timing);
-	mode_info->prefill_lines = dsi_mode.priv_info->panel_prefill_lines;
-	mode_info->jitter_numer = dsi_mode.priv_info->panel_jitter_numer;
-	mode_info->jitter_denom = dsi_mode.priv_info->panel_jitter_denom;
-	mode_info->clk_rate = dsi_mode.priv_info->clk_rate_hz;
-	mode_info->mdp_transfer_time_us =
-		dsi_mode.priv_info->mdp_transfer_time_us;
-
-	memcpy(&mode_info->topology, &dsi_mode.priv_info->topology,
-			sizeof(struct msm_display_topology));
-
-	mode_info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
-	if (dsi_mode.priv_info->dsc_enabled) {
-		mode_info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_DSC;
-		memcpy(&mode_info->comp_info.dsc_info, &dsi_mode.priv_info->dsc,
-			sizeof(dsi_mode.priv_info->dsc));
-		mode_info->comp_info.comp_ratio =
-			MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1;
-	}
-
-	if (dsi_mode.priv_info->roi_caps.enabled) {
-		memcpy(&mode_info->roi_caps, &dsi_mode.priv_info->roi_caps,
-			sizeof(dsi_mode.priv_info->roi_caps));
-	}
-
-	return 0;
-}
-
-static const struct drm_bridge_funcs dsi_bridge_ops = {
-	.attach       = dsi_bridge_attach,
-	.mode_fixup   = dsi_bridge_mode_fixup,
-	.pre_enable   = dsi_bridge_pre_enable,
-	.enable       = dsi_bridge_enable,
-	.disable      = dsi_bridge_disable,
-	.post_disable = dsi_bridge_post_disable,
-	.mode_set     = dsi_bridge_mode_set,
-};
-
-int dsi_conn_set_info_blob(struct drm_connector *connector,
-		void *info, void *display, struct msm_mode_info *mode_info)
-{
-	struct dsi_display *dsi_display = display;
-	struct dsi_panel *panel;
-	enum dsi_pixel_format fmt;
-	u32 bpp;
-
-	if (!info || !dsi_display)
-		return -EINVAL;
-
-	dsi_display->drm_conn = connector;
-
-	sde_kms_info_add_keystr(info,
-		"display type", dsi_display->display_type);
-
-	switch (dsi_display->type) {
-	case DSI_DISPLAY_SINGLE:
-		sde_kms_info_add_keystr(info, "display config",
-					"single display");
-		break;
-	case DSI_DISPLAY_EXT_BRIDGE:
-		sde_kms_info_add_keystr(info, "display config", "ext bridge");
-		break;
-	case DSI_DISPLAY_SPLIT:
-		sde_kms_info_add_keystr(info, "display config",
-					"split display");
-		break;
-	case DSI_DISPLAY_SPLIT_EXT_BRIDGE:
-		sde_kms_info_add_keystr(info, "display config",
-					"split ext bridge");
-		break;
-	default:
-		pr_debug("invalid display type:%d\n", dsi_display->type);
-		break;
-	}
-
-	if (!dsi_display->panel) {
-		pr_debug("invalid panel data\n");
-		goto end;
-	}
-
-	panel = dsi_display->panel;
-	sde_kms_info_add_keystr(info, "panel name", panel->name);
-
-	switch (panel->panel_mode) {
-	case DSI_OP_VIDEO_MODE:
-		sde_kms_info_add_keystr(info, "panel mode", "video");
-		sde_kms_info_add_keystr(info, "qsync support",
-				panel->qsync_min_fps ? "true" : "false");
-		break;
-	case DSI_OP_CMD_MODE:
-		sde_kms_info_add_keystr(info, "panel mode", "command");
-		sde_kms_info_add_keyint(info, "mdp_transfer_time_us",
-				mode_info->mdp_transfer_time_us);
-		sde_kms_info_add_keystr(info, "qsync support",
-				panel->qsync_min_fps ? "true" : "false");
-		break;
-	default:
-		pr_debug("invalid panel type:%d\n", panel->panel_mode);
-		break;
-	}
-	sde_kms_info_add_keystr(info, "dfps support",
-			panel->dfps_caps.dfps_support ? "true" : "false");
-
-	if (panel->dfps_caps.dfps_support) {
-		sde_kms_info_add_keyint(info, "min_fps",
-			panel->dfps_caps.min_refresh_rate);
-		sde_kms_info_add_keyint(info, "max_fps",
-			panel->dfps_caps.max_refresh_rate);
-	}
-
-	switch (panel->phy_props.rotation) {
-	case DSI_PANEL_ROTATE_NONE:
-		sde_kms_info_add_keystr(info, "panel orientation", "none");
-		break;
-	case DSI_PANEL_ROTATE_H_FLIP:
-		sde_kms_info_add_keystr(info, "panel orientation", "horz flip");
-		break;
-	case DSI_PANEL_ROTATE_V_FLIP:
-		sde_kms_info_add_keystr(info, "panel orientation", "vert flip");
-		break;
-	case DSI_PANEL_ROTATE_HV_FLIP:
-		sde_kms_info_add_keystr(info, "panel orientation",
-							"horz & vert flip");
-		break;
-	default:
-		pr_debug("invalid panel rotation:%d\n",
-						panel->phy_props.rotation);
-		break;
-	}
-
-	switch (panel->bl_config.type) {
-	case DSI_BACKLIGHT_PWM:
-		sde_kms_info_add_keystr(info, "backlight type", "pwm");
-		break;
-	case DSI_BACKLIGHT_WLED:
-		sde_kms_info_add_keystr(info, "backlight type", "wled");
-		break;
-	case DSI_BACKLIGHT_DCS:
-		sde_kms_info_add_keystr(info, "backlight type", "dcs");
-		break;
-	default:
-		pr_debug("invalid panel backlight type:%d\n",
-						panel->bl_config.type);
-		break;
-	}
-
-	if (mode_info && mode_info->roi_caps.enabled) {
-		sde_kms_info_add_keyint(info, "partial_update_num_roi",
-				mode_info->roi_caps.num_roi);
-		sde_kms_info_add_keyint(info, "partial_update_xstart",
-				mode_info->roi_caps.align.xstart_pix_align);
-		sde_kms_info_add_keyint(info, "partial_update_walign",
-				mode_info->roi_caps.align.width_pix_align);
-		sde_kms_info_add_keyint(info, "partial_update_wmin",
-				mode_info->roi_caps.align.min_width);
-		sde_kms_info_add_keyint(info, "partial_update_ystart",
-				mode_info->roi_caps.align.ystart_pix_align);
-		sde_kms_info_add_keyint(info, "partial_update_halign",
-				mode_info->roi_caps.align.height_pix_align);
-		sde_kms_info_add_keyint(info, "partial_update_hmin",
-				mode_info->roi_caps.align.min_height);
-		sde_kms_info_add_keyint(info, "partial_update_roimerge",
-				mode_info->roi_caps.merge_rois);
-	}
-
-	fmt = dsi_display->config.common_config.dst_format;
-	bpp = dsi_ctrl_pixel_format_to_bpp(fmt);
-
-	sde_kms_info_add_keyint(info, "bit_depth", bpp);
-
-end:
-	return 0;
-}
-
-enum drm_connector_status dsi_conn_detect(struct drm_connector *conn,
-		bool force,
-		void *display)
-{
-	enum drm_connector_status status = connector_status_unknown;
-	struct msm_display_info info;
-	int rc;
-
-	if (!conn || !display)
-		return status;
-
-	/* get display dsi_info */
-	memset(&info, 0x0, sizeof(info));
-	rc = dsi_display_get_info(conn, &info, display);
-	if (rc) {
-		pr_err("failed to get display info, rc=%d\n", rc);
-		return connector_status_disconnected;
-	}
-
-	if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
-		status = (info.is_connected ? connector_status_connected :
-					      connector_status_disconnected);
-	else
-		status = connector_status_connected;
-
-	conn->display_info.width_mm = info.width_mm;
-	conn->display_info.height_mm = info.height_mm;
-
-	return status;
-}
-
-void dsi_connector_put_modes(struct drm_connector *connector,
-	void *display)
-{
-	struct drm_display_mode *drm_mode;
-	struct dsi_display_mode dsi_mode;
-	struct dsi_display *dsi_display;
-
-	if (!connector || !display)
-		return;
-
-	list_for_each_entry(drm_mode, &connector->modes, head) {
-		convert_to_dsi_mode(drm_mode, &dsi_mode);
-		dsi_display_put_mode(display, &dsi_mode);
-	}
-
-	/* free the display structure modes also */
-	dsi_display = display;
-	kfree(dsi_display->modes);
-	dsi_display->modes = NULL;
-}
-
-
-static int dsi_drm_update_edid_name(struct edid *edid, const char *name)
-{
-	u8 *dtd = (u8 *)&edid->detailed_timings[3];
-	u8 standard_header[] = {0x00, 0x00, 0x00, 0xFE, 0x00};
-	u32 dtd_size = 18;
-	u32 header_size = sizeof(standard_header);
-
-	if (!name)
-		return -EINVAL;
-
-	/* Fill standard header */
-	memcpy(dtd, standard_header, header_size);
-
-	dtd_size -= header_size;
-	dtd_size = min_t(u32, dtd_size, strlen(name));
-
-	memcpy(dtd + header_size, name, dtd_size);
-
-	return 0;
-}
-
-static void dsi_drm_update_dtd(struct edid *edid,
-		struct dsi_display_mode *modes, u32 modes_count)
-{
-	u32 i;
-	u32 count = min_t(u32, modes_count, 3);
-
-	for (i = 0; i < count; i++) {
-		struct detailed_timing *dtd = &edid->detailed_timings[i];
-		struct dsi_display_mode *mode = &modes[i];
-		struct dsi_mode_info *timing = &mode->timing;
-		struct detailed_pixel_timing *pd = &dtd->data.pixel_data;
-		u32 h_blank = timing->h_front_porch + timing->h_sync_width +
-				timing->h_back_porch;
-		u32 v_blank = timing->v_front_porch + timing->v_sync_width +
-				timing->v_back_porch;
-		u32 h_img = 0, v_img = 0;
-
-		dtd->pixel_clock = mode->pixel_clk_khz / 10;
-
-		pd->hactive_lo = timing->h_active & 0xFF;
-		pd->hblank_lo = h_blank & 0xFF;
-		pd->hactive_hblank_hi = ((h_blank >> 8) & 0xF) |
-				((timing->h_active >> 8) & 0xF) << 4;
-
-		pd->vactive_lo = timing->v_active & 0xFF;
-		pd->vblank_lo = v_blank & 0xFF;
-		pd->vactive_vblank_hi = ((v_blank >> 8) & 0xF) |
-				((timing->v_active >> 8) & 0xF) << 4;
-
-		pd->hsync_offset_lo = timing->h_front_porch & 0xFF;
-		pd->hsync_pulse_width_lo = timing->h_sync_width & 0xFF;
-		pd->vsync_offset_pulse_width_lo =
-			((timing->v_front_porch & 0xF) << 4) |
-			(timing->v_sync_width & 0xF);
-
-		pd->hsync_vsync_offset_pulse_width_hi =
-			(((timing->h_front_porch >> 8) & 0x3) << 6) |
-			(((timing->h_sync_width >> 8) & 0x3) << 4) |
-			(((timing->v_front_porch >> 4) & 0x3) << 2) |
-			(((timing->v_sync_width >> 4) & 0x3) << 0);
-
-		pd->width_mm_lo = h_img & 0xFF;
-		pd->height_mm_lo = v_img & 0xFF;
-		pd->width_height_mm_hi = (((h_img >> 8) & 0xF) << 4) |
-			((v_img >> 8) & 0xF);
-
-		pd->hborder = 0;
-		pd->vborder = 0;
-		pd->misc = 0;
-	}
-}
-
-static void dsi_drm_update_checksum(struct edid *edid)
-{
-	u8 *data = (u8 *)edid;
-	u32 i, sum = 0;
-
-	for (i = 0; i < EDID_LENGTH - 1; i++)
-		sum += data[i];
-
-	edid->checksum = 0x100 - (sum & 0xFF);
-}
-
-int dsi_connector_get_modes(struct drm_connector *connector, void *data)
-{
-	int rc, i;
-	u32 count = 0, edid_size;
-	struct dsi_display_mode *modes = NULL;
-	struct drm_display_mode drm_mode;
-	struct dsi_display *display = data;
-	struct edid edid;
-	const u8 edid_buf[EDID_LENGTH] = {
-		0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x44, 0x6D,
-		0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x1B, 0x10, 0x01, 0x03,
-		0x80, 0x50, 0x2D, 0x78, 0x0A, 0x0D, 0xC9, 0xA0, 0x57, 0x47,
-		0x98, 0x27, 0x12, 0x48, 0x4C, 0x00, 0x00, 0x00, 0x01, 0x01,
-		0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
-		0x01, 0x01, 0x01, 0x01,
-	};
-
-	edid_size = min_t(u32, sizeof(edid), EDID_LENGTH);
-
-	memcpy(&edid, edid_buf, edid_size);
-
-	if (sde_connector_get_panel(connector)) {
-		/*
-		 * TODO: If drm_panel is attached, query modes from the panel.
-		 * This is complicated in split dsi cases because panel is not
-		 * attached to both connectors.
-		 */
-		goto end;
-	}
-	rc = dsi_display_get_mode_count(display, &count);
-	if (rc) {
-		pr_err("failed to get num of modes, rc=%d\n", rc);
-		goto end;
-	}
-
-	rc = dsi_display_get_modes(display, &modes);
-	if (rc) {
-		pr_err("failed to get modes, rc=%d\n", rc);
-		count = 0;
-		goto end;
-	}
-
-	for (i = 0; i < count; i++) {
-		struct drm_display_mode *m;
-
-		memset(&drm_mode, 0x0, sizeof(drm_mode));
-		dsi_convert_to_drm_mode(&modes[i], &drm_mode);
-		m = drm_mode_duplicate(connector->dev, &drm_mode);
-		if (!m) {
-			pr_err("failed to add mode %ux%u\n",
-			       drm_mode.hdisplay,
-			       drm_mode.vdisplay);
-			count = -ENOMEM;
-			goto end;
-		}
-		m->width_mm = connector->display_info.width_mm;
-		m->height_mm = connector->display_info.height_mm;
-		drm_mode_probed_add(connector, m);
-	}
-
-	rc = dsi_drm_update_edid_name(&edid, display->panel->name);
-	if (rc) {
-		count = 0;
-		goto end;
-	}
-
-	dsi_drm_update_dtd(&edid, modes, count);
-	dsi_drm_update_checksum(&edid);
-	rc =  drm_connector_update_edid_property(connector, &edid);
-	if (rc)
-		count = 0;
-end:
-	pr_debug("MODE COUNT =%d\n\n", count);
-	return count;
-}
-
-enum drm_mode_status dsi_conn_mode_valid(struct drm_connector *connector,
-		struct drm_display_mode *mode,
-		void *display)
-{
-	struct dsi_display_mode dsi_mode;
-	int rc;
-
-	if (!connector || !mode) {
-		pr_err("Invalid params\n");
-		return MODE_ERROR;
-	}
-
-	convert_to_dsi_mode(mode, &dsi_mode);
-
-	rc = dsi_display_validate_mode(display, &dsi_mode,
-			DSI_VALIDATE_FLAG_ALLOW_ADJUST);
-	if (rc) {
-		pr_err("mode not supported, rc=%d\n", rc);
-		return MODE_BAD;
-	}
-
-	return MODE_OK;
-}
-
-int dsi_conn_pre_kickoff(struct drm_connector *connector,
-		void *display,
-		struct msm_display_kickoff_params *params)
-{
-	if (!connector || !display || !params) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	return dsi_display_pre_kickoff(connector, display, params);
-}
-
-void dsi_conn_enable_event(struct drm_connector *connector,
-		uint32_t event_idx, bool enable, void *display)
-{
-	struct dsi_event_cb_info event_info;
-
-	memset(&event_info, 0, sizeof(event_info));
-
-	event_info.event_cb = sde_connector_trigger_event;
-	event_info.event_usr_ptr = connector;
-
-	dsi_display_enable_event(connector, display,
-			event_idx, &event_info, enable);
-}
-
-int dsi_conn_post_kickoff(struct drm_connector *connector)
-{
-	struct drm_encoder *encoder;
-	struct dsi_bridge *c_bridge;
-	struct dsi_display_mode adj_mode;
-	struct dsi_display *display;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-	int i, rc = 0;
-
-	if (!connector || !connector->state) {
-		pr_err("invalid connector or connector state\n");
-		return -EINVAL;
-	}
-
-	encoder = connector->state->best_encoder;
-	if (!encoder) {
-		pr_debug("best encoder is not available\n");
-		return 0;
-	}
-
-	c_bridge = to_dsi_bridge(encoder->bridge);
-	adj_mode = c_bridge->dsi_mode;
-	display = c_bridge->display;
-
-	if (adj_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR) {
-		m_ctrl = &display->ctrl[display->clk_master_idx];
-		rc = dsi_ctrl_timing_db_update(m_ctrl->ctrl, false);
-		if (rc) {
-			pr_err("[%s] failed to dfps update  rc=%d\n",
-				display->name, rc);
-			return -EINVAL;
-		}
-
-		/* Update the rest of the controllers */
-		display_for_each_ctrl(i, display) {
-			ctrl = &display->ctrl[i];
-			if (!ctrl->ctrl || (ctrl == m_ctrl))
-				continue;
-
-			rc = dsi_ctrl_timing_db_update(ctrl->ctrl, false);
-			if (rc) {
-				pr_err("[%s] failed to dfps update rc=%d\n",
-					display->name,  rc);
-				return -EINVAL;
-			}
-		}
-
-		c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_VRR;
-	}
-
-	return 0;
-}
-
-struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
-				       struct drm_device *dev,
-				       struct drm_encoder *encoder)
-{
-	int rc = 0;
-	struct dsi_bridge *bridge;
-
-	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
-	if (!bridge) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	bridge->display = display;
-	bridge->base.funcs = &dsi_bridge_ops;
-	bridge->base.encoder = encoder;
-
-	rc = drm_bridge_attach(encoder, &bridge->base, NULL);
-	if (rc) {
-		pr_err("failed to attach bridge, rc=%d\n", rc);
-		goto error_free_bridge;
-	}
-
-	encoder->bridge = &bridge->base;
-	return bridge;
-error_free_bridge:
-	kfree(bridge);
-error:
-	return ERR_PTR(rc);
-}
-
-void dsi_drm_bridge_cleanup(struct dsi_bridge *bridge)
-{
-	if (bridge && bridge->base.encoder)
-		bridge->base.encoder->bridge = NULL;
-
-	kfree(bridge);
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
deleted file mode 100644
index 9e18801..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_DRM_H_
-#define _DSI_DRM_H_
-
-#include <linux/types.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "msm_drv.h"
-
-#include "dsi_display.h"
-
-struct dsi_bridge {
-	struct drm_bridge base;
-	u32 id;
-
-	struct dsi_display *display;
-	struct dsi_display_mode dsi_mode;
-};
-
-/**
- * dsi_conn_set_info_blob - callback to perform info blob initialization
- * @connector: Pointer to drm connector structure
- * @info: Pointer to sde connector info structure
- * @display: Pointer to private display handle
- * @mode_info: Pointer to mode info structure
- * Returns: Zero on success
- */
-int dsi_conn_set_info_blob(struct drm_connector *connector,
-		void *info,
-		void *display,
-		struct msm_mode_info *mode_info);
-
-/**
- * dsi_conn_detect - callback to determine if connector is connected
- * @connector: Pointer to drm connector structure
- * @force: Force detect setting from drm framework
- * @display: Pointer to private display handle
- * Returns: Connector 'is connected' status
- */
-enum drm_connector_status dsi_conn_detect(struct drm_connector *conn,
-		bool force,
-		void *display);
-
-/**
- * dsi_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
- * @connector: Pointer to drm connector structure
- * @display: Pointer to private display handle
- * Returns: Number of modes added
- */
-int dsi_connector_get_modes(struct drm_connector *connector,
-		void *display);
-
-/**
- * dsi_connector_put_modes - callback to free up drm modes of the connector
- * @connector: Pointer to drm connector structure
- * @display: Pointer to private display handle
- */
-void dsi_connector_put_modes(struct drm_connector *connector,
-	void *display);
-
-/**
- * dsi_conn_get_mode_info - retrieve information on the mode selected
- * @drm_mode: Display mode set for the display
- * @mode_info: Out parameter. information of the mode.
- * @max_mixer_width: max width supported by HW layer mixer
- * @display: Pointer to private display structure
- * Returns: Zero on success
- */
-int dsi_conn_get_mode_info(struct drm_connector *connector,
-		const struct drm_display_mode *drm_mode,
-		struct msm_mode_info *mode_info, u32 max_mixer_width,
-		void *display);
-
-/**
- * dsi_conn_mode_valid - callback to determine if specified mode is valid
- * @connector: Pointer to drm connector structure
- * @mode: Pointer to drm mode structure
- * @display: Pointer to private display handle
- * Returns: Validity status for specified mode
- */
-enum drm_mode_status dsi_conn_mode_valid(struct drm_connector *connector,
-		struct drm_display_mode *mode,
-		void *display);
-
-/**
- * dsi_conn_enable_event - callback to notify DSI driver of event registration
- * @connector: Pointer to drm connector structure
- * @event_idx: Connector event index
- * @enable: Whether or not the event is enabled
- * @display: Pointer to private display handle
- */
-void dsi_conn_enable_event(struct drm_connector *connector,
-		uint32_t event_idx, bool enable, void *display);
-
-struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
-		struct drm_device *dev,
-		struct drm_encoder *encoder);
-
-void dsi_drm_bridge_cleanup(struct dsi_bridge *bridge);
-
-/**
- * dsi_display_pre_kickoff - program kickoff-time features
- * @connector: Pointer to drm connector structure
- * @display: Pointer to private display structure
- * @params: Parameters for kickoff-time programming
- * Returns: Zero on success
- */
-int dsi_conn_pre_kickoff(struct drm_connector *connector,
-		void *display,
-		struct msm_display_kickoff_params *params);
-
-/**
- * dsi_display_post_kickoff - program post kickoff-time features
- * @connector: Pointer to drm connector structure
- * Returns: Zero on success
- */
-int dsi_conn_post_kickoff(struct drm_connector *connector);
-
-/**
- * dsi_convert_to_drm_mode - Update drm mode with dsi mode information
- * @dsi_mode: input parameter. structure having dsi mode information.
- * @drm_mode: output parameter. DRM mode set for the display
- */
-void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
-				struct drm_display_mode *drm_mode);
-
-#endif /* _DSI_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
deleted file mode 100644
index cd2173c..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_HW_H_
-#define _DSI_HW_H_
-#include <linux/io.h>
-
-#define DSI_R32(dsi_hw, off) readl_relaxed((dsi_hw)->base + (off))
-#define DSI_W32(dsi_hw, off, val) \
-	do {\
-		pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
-			(dsi_hw)->index, #off, (uint32_t)(val)); \
-		writel_relaxed((val), (dsi_hw)->base + (off)); \
-	} while (0)
-
-#define DSI_MMSS_MISC_R32(dsi_hw, off) \
-	readl_relaxed((dsi_hw)->mmss_misc_base + (off))
-#define DSI_MMSS_MISC_W32(dsi_hw, off, val) \
-	do {\
-		pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
-			(dsi_hw)->index, #off, val); \
-		writel_relaxed((val), (dsi_hw)->mmss_misc_base + (off)); \
-	} while (0)
-
-#define DSI_MISC_R32(dsi_hw, off) \
-	readl_relaxed((dsi_hw)->phy_clamp_base + (off))
-#define DSI_MISC_W32(dsi_hw, off, val) \
-	do {\
-		pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
-			(dsi_hw)->index, #off, val); \
-		writel_relaxed((val), (dsi_hw)->phy_clamp_base + (off)); \
-	} while (0)
-#define DSI_DISP_CC_R32(dsi_hw, off) \
-	readl_relaxed((dsi_hw)->disp_cc_base + (off))
-#define DSI_DISP_CC_W32(dsi_hw, off, val) \
-	do {\
-		pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
-			(dsi_hw)->index, #off, val); \
-		writel_relaxed((val), (dsi_hw)->disp_cc_base + (off)); \
-	} while (0)
-
-#define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
-#define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
-
-#endif /* _DSI_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
deleted file mode 100644
index 730a2c2..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ /dev/null
@@ -1,3882 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"msm-dsi-panel:[%s:%d] " fmt, __func__, __LINE__
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <video/mipi_display.h>
-
-#include "dsi_panel.h"
-#include "dsi_ctrl_hw.h"
-#include "dsi_parser.h"
-
-/**
- * topology is currently defined by a set of following 3 values:
- * 1. num of layer mixers
- * 2. num of compression encoders
- * 3. num of interfaces
- */
-#define TOPOLOGY_SET_LEN 3
-#define MAX_TOPOLOGY 5
-
-#define DSI_PANEL_DEFAULT_LABEL  "Default dsi panel"
-
-#define DEFAULT_MDP_TRANSFER_TIME 14000
-
-#define DEFAULT_PANEL_JITTER_NUMERATOR		2
-#define DEFAULT_PANEL_JITTER_DENOMINATOR	1
-#define DEFAULT_PANEL_JITTER_ARRAY_SIZE		2
-#define MAX_PANEL_JITTER		10
-#define DEFAULT_PANEL_PREFILL_LINES	25
-
-enum dsi_dsc_ratio_type {
-	DSC_8BPC_8BPP,
-	DSC_10BPC_8BPP,
-	DSC_12BPC_8BPP,
-	DSC_RATIO_TYPE_MAX
-};
-
-static u32 dsi_dsc_rc_buf_thresh[] = {0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54,
-		0x62, 0x69, 0x70, 0x77, 0x79, 0x7b, 0x7d, 0x7e};
-
-/*
- * DSC 1.1
- * Rate control - Min QP values for each ratio type in dsi_dsc_ratio_type
- */
-static char dsi_dsc_rc_range_min_qp_1_1[][15] = {
-	{0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 12},
-	{0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 17},
-	{0, 4, 9, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 21},
-	};
-
-/*
- * DSC 1.1 SCR
- * Rate control - Min QP values for each ratio type in dsi_dsc_ratio_type
- */
-static char dsi_dsc_rc_range_min_qp_1_1_scr1[][15] = {
-	{0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 9, 12},
-	{0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 13, 16},
-	{0, 4, 9, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 17, 20},
-	};
-
-/*
- * DSC 1.1
- * Rate control - Max QP values for each ratio type in dsi_dsc_ratio_type
- */
-static char dsi_dsc_rc_range_max_qp_1_1[][15] = {
-	{4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11, 12, 13, 13, 15},
-	{4, 8, 9, 10, 11, 11, 11, 12, 13, 14, 15, 16, 17, 17, 19},
-	{12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 19, 20, 21, 21, 23},
-	};
-
-/*
- * DSC 1.1 SCR
- * Rate control - Max QP values for each ratio type in dsi_dsc_ratio_type
- */
-static char dsi_dsc_rc_range_max_qp_1_1_scr1[][15] = {
-	{4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13},
-	{8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17},
-	{12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 23},
-	};
-
-/*
- * DSC 1.1 and DSC 1.1 SCR
- * Rate control - bpg offset values
- */
-static char dsi_dsc_rc_range_bpg_offset[] = {2, 0, 0, -2, -4, -6, -8, -8,
-		-8, -10, -10, -12, -12, -12, -12};
-
-int dsi_dsc_create_pps_buf_cmd(struct msm_display_dsc_info *dsc, char *buf,
-				int pps_id)
-{
-	char *bp;
-	char data;
-	int i, bpp;
-	char *dbgbp;
-
-	dbgbp = buf;
-	bp = buf;
-	/* First 7 bytes are cmd header */
-	*bp++ = 0x0A;
-	*bp++ = 1;
-	*bp++ = 0;
-	*bp++ = 0;
-	*bp++ = 10;
-	*bp++ = 0;
-	*bp++ = 128;
-
-	*bp++ = (dsc->version & 0xff);		/* pps0 */
-	*bp++ = (pps_id & 0xff);		/* pps1 */
-	bp++;					/* pps2, reserved */
-
-	data = dsc->line_buf_depth & 0x0f;
-	data |= ((dsc->bpc & 0xf) << 4);
-	*bp++ = data;				/* pps3 */
-
-	bpp = dsc->bpp;
-	bpp <<= 4;				/* 4 fraction bits */
-	data = (bpp >> 8);
-	data &= 0x03;				/* upper two bits */
-	data |= ((dsc->block_pred_enable & 0x1) << 5);
-	data |= ((dsc->convert_rgb & 0x1) << 4);
-	data |= ((dsc->enable_422 & 0x1) << 3);
-	data |= ((dsc->vbr_enable & 0x1) << 2);
-	*bp++ = data;				/* pps4 */
-	*bp++ = (bpp & 0xff);			/* pps5 */
-
-	*bp++ = ((dsc->pic_height >> 8) & 0xff); /* pps6 */
-	*bp++ = (dsc->pic_height & 0x0ff);	/* pps7 */
-	*bp++ = ((dsc->pic_width >> 8) & 0xff);	/* pps8 */
-	*bp++ = (dsc->pic_width & 0x0ff);	/* pps9 */
-
-	*bp++ = ((dsc->slice_height >> 8) & 0xff);/* pps10 */
-	*bp++ = (dsc->slice_height & 0x0ff);	/* pps11 */
-	*bp++ = ((dsc->slice_width >> 8) & 0xff); /* pps12 */
-	*bp++ = (dsc->slice_width & 0x0ff);	/* pps13 */
-
-	*bp++ = ((dsc->chunk_size >> 8) & 0xff);/* pps14 */
-	*bp++ = (dsc->chunk_size & 0x0ff);	/* pps15 */
-
-	*bp++ = (dsc->initial_xmit_delay >> 8) & 0x3; /* pps16, bit 0, 1 */
-	*bp++ = (dsc->initial_xmit_delay & 0xff);/* pps17 */
-
-	*bp++ = ((dsc->initial_dec_delay >> 8) & 0xff); /* pps18 */
-	*bp++ = (dsc->initial_dec_delay & 0xff);/* pps19 */
-
-	bp++;					/* pps20, reserved */
-
-	*bp++ = (dsc->initial_scale_value & 0x3f); /* pps21 */
-
-	*bp++ = ((dsc->scale_increment_interval >> 8) & 0xff); /* pps22 */
-	*bp++ = (dsc->scale_increment_interval & 0xff); /* pps23 */
-
-	*bp++ = ((dsc->scale_decrement_interval >> 8) & 0xf); /* pps24 */
-	*bp++ = (dsc->scale_decrement_interval & 0x0ff);/* pps25 */
-
-	bp++;					/* pps26, reserved */
-
-	*bp++ = (dsc->first_line_bpg_offset & 0x1f);/* pps27 */
-
-	*bp++ = ((dsc->nfl_bpg_offset >> 8) & 0xff);/* pps28 */
-	*bp++ = (dsc->nfl_bpg_offset & 0x0ff);	/* pps29 */
-	*bp++ = ((dsc->slice_bpg_offset >> 8) & 0xff);/* pps30 */
-	*bp++ = (dsc->slice_bpg_offset & 0x0ff);/* pps31 */
-
-	*bp++ = ((dsc->initial_offset >> 8) & 0xff);/* pps32 */
-	*bp++ = (dsc->initial_offset & 0x0ff);	/* pps33 */
-
-	*bp++ = ((dsc->final_offset >> 8) & 0xff);/* pps34 */
-	*bp++ = (dsc->final_offset & 0x0ff);	/* pps35 */
-
-	*bp++ = (dsc->min_qp_flatness & 0x1f);	/* pps36 */
-	*bp++ = (dsc->max_qp_flatness & 0x1f);	/* pps37 */
-
-	*bp++ = ((dsc->rc_model_size >> 8) & 0xff);/* pps38 */
-	*bp++ = (dsc->rc_model_size & 0x0ff);	/* pps39 */
-
-	*bp++ = (dsc->edge_factor & 0x0f);	/* pps40 */
-
-	*bp++ = (dsc->quant_incr_limit0 & 0x1f);	/* pps41 */
-	*bp++ = (dsc->quant_incr_limit1 & 0x1f);	/* pps42 */
-
-	data = ((dsc->tgt_offset_hi & 0xf) << 4);
-	data |= (dsc->tgt_offset_lo & 0x0f);
-	*bp++ = data;				/* pps43 */
-
-	for (i = 0; i < 14; i++)
-		*bp++ = (dsc->buf_thresh[i] & 0xff); /* pps44 - pps57 */
-
-	for (i = 0; i < 15; i++) {		/* pps58 - pps87 */
-		data = (dsc->range_min_qp[i] & 0x1f);
-		data <<= 3;
-		data |= ((dsc->range_max_qp[i] >> 2) & 0x07);
-		*bp++ = data;
-		data = (dsc->range_max_qp[i] & 0x03);
-		data <<= 6;
-		data |= (dsc->range_bpg_offset[i] & 0x3f);
-		*bp++ = data;
-	}
-
-	return 128;
-}
-
-static int dsi_panel_vreg_get(struct dsi_panel *panel)
-{
-	int rc = 0;
-	int i;
-	struct regulator *vreg = NULL;
-
-	for (i = 0; i < panel->power_info.count; i++) {
-		vreg = devm_regulator_get(panel->parent,
-					  panel->power_info.vregs[i].vreg_name);
-		rc = PTR_RET(vreg);
-		if (rc) {
-			pr_err("failed to get %s regulator\n",
-			       panel->power_info.vregs[i].vreg_name);
-			goto error_put;
-		}
-		panel->power_info.vregs[i].vreg = vreg;
-	}
-
-	return rc;
-error_put:
-	for (i = i - 1; i >= 0; i--) {
-		devm_regulator_put(panel->power_info.vregs[i].vreg);
-		panel->power_info.vregs[i].vreg = NULL;
-	}
-	return rc;
-}
-
-static int dsi_panel_vreg_put(struct dsi_panel *panel)
-{
-	int rc = 0;
-	int i;
-
-	for (i = panel->power_info.count - 1; i >= 0; i--)
-		devm_regulator_put(panel->power_info.vregs[i].vreg);
-
-	return rc;
-}
-
-static int dsi_panel_gpio_request(struct dsi_panel *panel)
-{
-	int rc = 0;
-	struct dsi_panel_reset_config *r_config = &panel->reset_config;
-
-	if (gpio_is_valid(r_config->reset_gpio)) {
-		rc = gpio_request(r_config->reset_gpio, "reset_gpio");
-		if (rc) {
-			pr_err("request for reset_gpio failed, rc=%d\n", rc);
-			goto error;
-		}
-	}
-
-	if (gpio_is_valid(r_config->disp_en_gpio)) {
-		rc = gpio_request(r_config->disp_en_gpio, "disp_en_gpio");
-		if (rc) {
-			pr_err("request for disp_en_gpio failed, rc=%d\n", rc);
-			goto error_release_reset;
-		}
-	}
-
-	if (gpio_is_valid(panel->bl_config.en_gpio)) {
-		rc = gpio_request(panel->bl_config.en_gpio, "bklt_en_gpio");
-		if (rc) {
-			pr_err("request for bklt_en_gpio failed, rc=%d\n", rc);
-			goto error_release_disp_en;
-		}
-	}
-
-	if (gpio_is_valid(r_config->lcd_mode_sel_gpio)) {
-		rc = gpio_request(r_config->lcd_mode_sel_gpio, "mode_gpio");
-		if (rc) {
-			pr_err("request for mode_gpio failed, rc=%d\n", rc);
-			goto error_release_mode_sel;
-		}
-	}
-
-	goto error;
-error_release_mode_sel:
-	if (gpio_is_valid(panel->bl_config.en_gpio))
-		gpio_free(panel->bl_config.en_gpio);
-error_release_disp_en:
-	if (gpio_is_valid(r_config->disp_en_gpio))
-		gpio_free(r_config->disp_en_gpio);
-error_release_reset:
-	if (gpio_is_valid(r_config->reset_gpio))
-		gpio_free(r_config->reset_gpio);
-error:
-	return rc;
-}
-
-static int dsi_panel_gpio_release(struct dsi_panel *panel)
-{
-	int rc = 0;
-	struct dsi_panel_reset_config *r_config = &panel->reset_config;
-
-	if (gpio_is_valid(r_config->reset_gpio))
-		gpio_free(r_config->reset_gpio);
-
-	if (gpio_is_valid(r_config->disp_en_gpio))
-		gpio_free(r_config->disp_en_gpio);
-
-	if (gpio_is_valid(panel->bl_config.en_gpio))
-		gpio_free(panel->bl_config.en_gpio);
-
-	if (gpio_is_valid(panel->reset_config.lcd_mode_sel_gpio))
-		gpio_free(panel->reset_config.lcd_mode_sel_gpio);
-
-	return rc;
-}
-
-int dsi_panel_trigger_esd_attack(struct dsi_panel *panel)
-{
-	struct dsi_panel_reset_config *r_config;
-
-	if (!panel) {
-		pr_err("Invalid panel param\n");
-		return -EINVAL;
-	}
-
-	r_config = &panel->reset_config;
-	if (!r_config) {
-		pr_err("Invalid panel reset configuration\n");
-		return -EINVAL;
-	}
-
-	if (gpio_is_valid(r_config->reset_gpio)) {
-		gpio_set_value(r_config->reset_gpio, 0);
-		pr_info("GPIO pulled low to simulate ESD\n");
-		return 0;
-	}
-	pr_err("failed to pull down gpio\n");
-	return -EINVAL;
-}
-
-static int dsi_panel_reset(struct dsi_panel *panel)
-{
-	int rc = 0;
-	struct dsi_panel_reset_config *r_config = &panel->reset_config;
-	int i;
-
-	if (gpio_is_valid(panel->reset_config.disp_en_gpio)) {
-		rc = gpio_direction_output(panel->reset_config.disp_en_gpio, 1);
-		if (rc) {
-			pr_err("unable to set dir for disp gpio rc=%d\n", rc);
-			goto exit;
-		}
-	}
-
-	if (r_config->count) {
-		rc = gpio_direction_output(r_config->reset_gpio,
-			r_config->sequence[0].level);
-		if (rc) {
-			pr_err("unable to set dir for rst gpio rc=%d\n", rc);
-			goto exit;
-		}
-	}
-
-	for (i = 0; i < r_config->count; i++) {
-		gpio_set_value(r_config->reset_gpio,
-			       r_config->sequence[i].level);
-
-
-		if (r_config->sequence[i].sleep_ms)
-			usleep_range(r_config->sequence[i].sleep_ms * 1000,
-				(r_config->sequence[i].sleep_ms * 1000) + 100);
-	}
-
-	if (gpio_is_valid(panel->bl_config.en_gpio)) {
-		rc = gpio_direction_output(panel->bl_config.en_gpio, 1);
-		if (rc)
-			pr_err("unable to set dir for bklt gpio rc=%d\n", rc);
-	}
-
-	if (gpio_is_valid(panel->reset_config.lcd_mode_sel_gpio)) {
-		bool out = true;
-
-		if ((panel->reset_config.mode_sel_state == MODE_SEL_DUAL_PORT)
-				|| (panel->reset_config.mode_sel_state
-					== MODE_GPIO_LOW))
-			out = false;
-		else if ((panel->reset_config.mode_sel_state
-				== MODE_SEL_SINGLE_PORT) ||
-				(panel->reset_config.mode_sel_state
-				 == MODE_GPIO_HIGH))
-			out = true;
-
-		rc = gpio_direction_output(
-			panel->reset_config.lcd_mode_sel_gpio, out);
-		if (rc)
-			pr_err("unable to set dir for mode gpio rc=%d\n", rc);
-	}
-exit:
-	return rc;
-}
-
-static int dsi_panel_set_pinctrl_state(struct dsi_panel *panel, bool enable)
-{
-	int rc = 0;
-	struct pinctrl_state *state;
-
-	if (panel->host_config.ext_bridge_mode)
-		return 0;
-
-	if (enable)
-		state = panel->pinctrl.active;
-	else
-		state = panel->pinctrl.suspend;
-
-	rc = pinctrl_select_state(panel->pinctrl.pinctrl, state);
-	if (rc)
-		pr_err("[%s] failed to set pin state, rc=%d\n", panel->name,
-		       rc);
-
-	return rc;
-}
-
-
-static int dsi_panel_power_on(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	rc = dsi_pwr_enable_regulator(&panel->power_info, true);
-	if (rc) {
-		pr_err("[%s] failed to enable vregs, rc=%d\n", panel->name, rc);
-		goto exit;
-	}
-
-	rc = dsi_panel_set_pinctrl_state(panel, true);
-	if (rc) {
-		pr_err("[%s] failed to set pinctrl, rc=%d\n", panel->name, rc);
-		goto error_disable_vregs;
-	}
-
-	rc = dsi_panel_reset(panel);
-	if (rc) {
-		pr_err("[%s] failed to reset panel, rc=%d\n", panel->name, rc);
-		goto error_disable_gpio;
-	}
-
-	goto exit;
-
-error_disable_gpio:
-	if (gpio_is_valid(panel->reset_config.disp_en_gpio))
-		gpio_set_value(panel->reset_config.disp_en_gpio, 0);
-
-	if (gpio_is_valid(panel->bl_config.en_gpio))
-		gpio_set_value(panel->bl_config.en_gpio, 0);
-
-	(void)dsi_panel_set_pinctrl_state(panel, false);
-
-error_disable_vregs:
-	(void)dsi_pwr_enable_regulator(&panel->power_info, false);
-
-exit:
-	return rc;
-}
-
-static int dsi_panel_power_off(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (gpio_is_valid(panel->reset_config.disp_en_gpio))
-		gpio_set_value(panel->reset_config.disp_en_gpio, 0);
-
-	if (gpio_is_valid(panel->reset_config.reset_gpio))
-		gpio_set_value(panel->reset_config.reset_gpio, 0);
-
-	if (gpio_is_valid(panel->reset_config.lcd_mode_sel_gpio))
-		gpio_set_value(panel->reset_config.lcd_mode_sel_gpio, 0);
-
-	rc = dsi_panel_set_pinctrl_state(panel, false);
-	if (rc) {
-		pr_err("[%s] failed set pinctrl state, rc=%d\n", panel->name,
-		       rc);
-	}
-
-	rc = dsi_pwr_enable_regulator(&panel->power_info, false);
-	if (rc)
-		pr_err("[%s] failed to enable vregs, rc=%d\n", panel->name, rc);
-
-	return rc;
-}
-static int dsi_panel_tx_cmd_set(struct dsi_panel *panel,
-				enum dsi_cmd_set_type type)
-{
-	int rc = 0, i = 0;
-	ssize_t len;
-	struct dsi_cmd_desc *cmds;
-	u32 count;
-	enum dsi_cmd_set_state state;
-	struct dsi_display_mode *mode;
-	const struct mipi_dsi_host_ops *ops = panel->host->ops;
-
-	if (!panel || !panel->cur_mode)
-		return -EINVAL;
-
-	mode = panel->cur_mode;
-
-	cmds = mode->priv_info->cmd_sets[type].cmds;
-	count = mode->priv_info->cmd_sets[type].count;
-	state = mode->priv_info->cmd_sets[type].state;
-
-	if (count == 0) {
-		pr_debug("[%s] No commands to be sent for state(%d)\n",
-			 panel->name, type);
-		goto error;
-	}
-
-	for (i = 0; i < count; i++) {
-		if (state == DSI_CMD_SET_STATE_LP)
-			cmds->msg.flags |= MIPI_DSI_MSG_USE_LPM;
-
-		if (cmds->last_command)
-			cmds->msg.flags |= MIPI_DSI_MSG_LASTCOMMAND;
-
-		len = ops->transfer(panel->host, &cmds->msg);
-		if (len < 0) {
-			rc = len;
-			pr_err("failed to set cmds(%d), rc=%d\n", type, rc);
-			goto error;
-		}
-		if (cmds->post_wait_ms)
-			usleep_range(cmds->post_wait_ms*1000,
-					((cmds->post_wait_ms*1000)+10));
-		cmds++;
-	}
-error:
-	return rc;
-}
-
-static int dsi_panel_pinctrl_deinit(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (panel->host_config.ext_bridge_mode)
-		return 0;
-
-	devm_pinctrl_put(panel->pinctrl.pinctrl);
-
-	return rc;
-}
-
-static int dsi_panel_pinctrl_init(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (panel->host_config.ext_bridge_mode)
-		return 0;
-
-	/* TODO:  pinctrl is defined in dsi dt node */
-	panel->pinctrl.pinctrl = devm_pinctrl_get(panel->parent);
-	if (IS_ERR_OR_NULL(panel->pinctrl.pinctrl)) {
-		rc = PTR_ERR(panel->pinctrl.pinctrl);
-		pr_err("failed to get pinctrl, rc=%d\n", rc);
-		goto error;
-	}
-
-	panel->pinctrl.active = pinctrl_lookup_state(panel->pinctrl.pinctrl,
-						       "panel_active");
-	if (IS_ERR_OR_NULL(panel->pinctrl.active)) {
-		rc = PTR_ERR(panel->pinctrl.active);
-		pr_err("failed to get pinctrl active state, rc=%d\n", rc);
-		goto error;
-	}
-
-	panel->pinctrl.suspend =
-		pinctrl_lookup_state(panel->pinctrl.pinctrl, "panel_suspend");
-
-	if (IS_ERR_OR_NULL(panel->pinctrl.suspend)) {
-		rc = PTR_ERR(panel->pinctrl.suspend);
-		pr_err("failed to get pinctrl suspend state, rc=%d\n", rc);
-		goto error;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_panel_wled_register(struct dsi_panel *panel,
-		struct dsi_backlight_config *bl)
-{
-	int rc = 0;
-	struct backlight_device *bd;
-
-	bd = backlight_device_get_by_type(BACKLIGHT_RAW);
-	if (!bd) {
-		pr_err("[%s] fail raw backlight register\n", panel->name);
-		rc = -EINVAL;
-	}
-
-	bl->raw_bd = bd;
-	return rc;
-}
-
-static int dsi_panel_update_backlight(struct dsi_panel *panel,
-	u32 bl_lvl)
-{
-	int rc = 0;
-	struct mipi_dsi_device *dsi;
-
-	if (!panel || (bl_lvl > 0xffff)) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	dsi = &panel->mipi_device;
-
-	rc = mipi_dsi_dcs_set_display_brightness(dsi, bl_lvl);
-	if (rc < 0)
-		pr_err("failed to update dcs backlight:%d\n", bl_lvl);
-
-	return rc;
-}
-
-int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl)
-{
-	int rc = 0;
-	struct dsi_backlight_config *bl = &panel->bl_config;
-
-	if (panel->host_config.ext_bridge_mode)
-		return 0;
-
-	pr_debug("backlight type:%d lvl:%d\n", bl->type, bl_lvl);
-	switch (bl->type) {
-	case DSI_BACKLIGHT_WLED:
-		rc = backlight_device_set_brightness(bl->raw_bd, bl_lvl);
-		break;
-	case DSI_BACKLIGHT_DCS:
-		rc = dsi_panel_update_backlight(panel, bl_lvl);
-		break;
-	case DSI_BACKLIGHT_EXTERNAL:
-		break;
-	default:
-		pr_err("Backlight type(%d) not supported\n", bl->type);
-		rc = -ENOTSUPP;
-	}
-
-	return rc;
-}
-
-static u32 dsi_panel_get_brightness(struct dsi_backlight_config *bl)
-{
-	u32 cur_bl_level;
-	struct backlight_device *bd = bl->raw_bd;
-
-	/* default the brightness level to 50% */
-	cur_bl_level = bl->bl_max_level >> 1;
-
-	switch (bl->type) {
-	case DSI_BACKLIGHT_WLED:
-		/* Try to query the backlight level from the backlight device */
-		if (bd->ops && bd->ops->get_brightness)
-			cur_bl_level = bd->ops->get_brightness(bd);
-		break;
-	case DSI_BACKLIGHT_DCS:
-	case DSI_BACKLIGHT_EXTERNAL:
-	default:
-		/*
-		 * Ideally, we should read the backlight level from the
-		 * panel. For now, just set it default value.
-		 */
-		break;
-	}
-
-	pr_debug("cur_bl_level=%d\n", cur_bl_level);
-	return cur_bl_level;
-}
-
-void dsi_panel_bl_handoff(struct dsi_panel *panel)
-{
-	struct dsi_backlight_config *bl = &panel->bl_config;
-
-	bl->bl_level = dsi_panel_get_brightness(bl);
-}
-
-static int dsi_panel_bl_register(struct dsi_panel *panel)
-{
-	int rc = 0;
-	struct dsi_backlight_config *bl = &panel->bl_config;
-
-	if (panel->host_config.ext_bridge_mode)
-		return 0;
-
-	switch (bl->type) {
-	case DSI_BACKLIGHT_WLED:
-		rc = dsi_panel_wled_register(panel, bl);
-		break;
-	case DSI_BACKLIGHT_DCS:
-		break;
-	case DSI_BACKLIGHT_EXTERNAL:
-		break;
-	default:
-		pr_err("Backlight type(%d) not supported\n", bl->type);
-		rc = -ENOTSUPP;
-		goto error;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_panel_bl_unregister(struct dsi_panel *panel)
-{
-	int rc = 0;
-	struct dsi_backlight_config *bl = &panel->bl_config;
-
-	if (panel->host_config.ext_bridge_mode)
-		return 0;
-
-	switch (bl->type) {
-	case DSI_BACKLIGHT_WLED:
-		break;
-	case DSI_BACKLIGHT_DCS:
-		break;
-	case DSI_BACKLIGHT_EXTERNAL:
-		break;
-	default:
-		pr_err("Backlight type(%d) not supported\n", bl->type);
-		rc = -ENOTSUPP;
-		goto error;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_panel_parse_timing(struct dsi_mode_info *mode,
-				  struct dsi_parser_utils *utils)
-{
-	int rc = 0;
-	u64 tmp64 = 0;
-	struct dsi_display_mode *display_mode;
-	struct dsi_display_mode_priv_info *priv_info;
-
-	display_mode = container_of(mode, struct dsi_display_mode, timing);
-
-	priv_info = display_mode->priv_info;
-
-	rc = utils->read_u64(utils->data,
-			"qcom,mdss-dsi-panel-clockrate", &tmp64);
-	if (rc == -EOVERFLOW) {
-		tmp64 = 0;
-		rc = utils->read_u32(utils->data,
-			"qcom,mdss-dsi-panel-clockrate", (u32 *)&tmp64);
-	}
-
-	mode->clk_rate_hz = !rc ? tmp64 : 0;
-	display_mode->priv_info->clk_rate_hz = mode->clk_rate_hz;
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-mdp-transfer-time-us",
-				&mode->mdp_transfer_time_us);
-	if (!rc)
-		display_mode->priv_info->mdp_transfer_time_us =
-			mode->mdp_transfer_time_us;
-	else
-		display_mode->priv_info->mdp_transfer_time_us =
-			DEFAULT_MDP_TRANSFER_TIME;
-
-	rc = utils->read_u32(utils->data,
-				"qcom,mdss-dsi-panel-framerate",
-				&mode->refresh_rate);
-	if (rc) {
-		pr_err("failed to read qcom,mdss-dsi-panel-framerate, rc=%d\n",
-		       rc);
-		goto error;
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-panel-width",
-				  &mode->h_active);
-	if (rc) {
-		pr_err("failed to read qcom,mdss-dsi-panel-width, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = utils->read_u32(utils->data,
-				"qcom,mdss-dsi-h-front-porch",
-				  &mode->h_front_porch);
-	if (rc) {
-		pr_err("failed to read qcom,mdss-dsi-h-front-porch, rc=%d\n",
-		       rc);
-		goto error;
-	}
-
-	rc = utils->read_u32(utils->data,
-				"qcom,mdss-dsi-h-back-porch",
-				  &mode->h_back_porch);
-	if (rc) {
-		pr_err("failed to read qcom,mdss-dsi-h-back-porch, rc=%d\n",
-		       rc);
-		goto error;
-	}
-
-	rc = utils->read_u32(utils->data,
-				"qcom,mdss-dsi-h-pulse-width",
-				  &mode->h_sync_width);
-	if (rc) {
-		pr_err("failed to read qcom,mdss-dsi-h-pulse-width, rc=%d\n",
-		       rc);
-		goto error;
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-h-sync-skew",
-				  &mode->h_skew);
-	if (rc)
-		pr_err("qcom,mdss-dsi-h-sync-skew is not defined, rc=%d\n", rc);
-
-	pr_debug("panel horz active:%d front_portch:%d back_porch:%d sync_skew:%d\n",
-		mode->h_active, mode->h_front_porch, mode->h_back_porch,
-		mode->h_sync_width);
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-panel-height",
-				  &mode->v_active);
-	if (rc) {
-		pr_err("failed to read qcom,mdss-dsi-panel-height, rc=%d\n",
-		       rc);
-		goto error;
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-v-back-porch",
-				  &mode->v_back_porch);
-	if (rc) {
-		pr_err("failed to read qcom,mdss-dsi-v-back-porch, rc=%d\n",
-		       rc);
-		goto error;
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-v-front-porch",
-				  &mode->v_front_porch);
-	if (rc) {
-		pr_err("failed to read qcom,mdss-dsi-v-back-porch, rc=%d\n",
-		       rc);
-		goto error;
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-v-pulse-width",
-				  &mode->v_sync_width);
-	if (rc) {
-		pr_err("failed to read qcom,mdss-dsi-v-pulse-width, rc=%d\n",
-		       rc);
-		goto error;
-	}
-	pr_debug("panel vert active:%d front_portch:%d back_porch:%d pulse_width:%d\n",
-		mode->v_active, mode->v_front_porch, mode->v_back_porch,
-		mode->v_sync_width);
-
-error:
-	return rc;
-}
-
-static int dsi_panel_parse_pixel_format(struct dsi_host_common_cfg *host,
-					struct dsi_parser_utils *utils,
-					const char *name)
-{
-	int rc = 0;
-	u32 bpp = 0;
-	enum dsi_pixel_format fmt;
-	const char *packing;
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-bpp", &bpp);
-	if (rc) {
-		pr_err("[%s] failed to read qcom,mdss-dsi-bpp, rc=%d\n",
-		       name, rc);
-		return rc;
-	}
-
-	switch (bpp) {
-	case 3:
-		fmt = DSI_PIXEL_FORMAT_RGB111;
-		break;
-	case 8:
-		fmt = DSI_PIXEL_FORMAT_RGB332;
-		break;
-	case 12:
-		fmt = DSI_PIXEL_FORMAT_RGB444;
-		break;
-	case 16:
-		fmt = DSI_PIXEL_FORMAT_RGB565;
-		break;
-	case 18:
-		fmt = DSI_PIXEL_FORMAT_RGB666;
-		break;
-	case 24:
-	default:
-		fmt = DSI_PIXEL_FORMAT_RGB888;
-		break;
-	}
-
-	if (fmt == DSI_PIXEL_FORMAT_RGB666) {
-		packing = utils->get_property(utils->data,
-					  "qcom,mdss-dsi-pixel-packing",
-					  NULL);
-		if (packing && !strcmp(packing, "loose"))
-			fmt = DSI_PIXEL_FORMAT_RGB666_LOOSE;
-	}
-
-	host->dst_format = fmt;
-	return rc;
-}
-
-static int dsi_panel_parse_lane_states(struct dsi_host_common_cfg *host,
-				       struct dsi_parser_utils *utils,
-				       const char *name)
-{
-	int rc = 0;
-	bool lane_enabled;
-
-	lane_enabled = utils->read_bool(utils->data,
-					    "qcom,mdss-dsi-lane-0-state");
-	host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_0 : 0);
-
-	lane_enabled = utils->read_bool(utils->data,
-					     "qcom,mdss-dsi-lane-1-state");
-	host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_1 : 0);
-
-	lane_enabled = utils->read_bool(utils->data,
-					    "qcom,mdss-dsi-lane-2-state");
-	host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_2 : 0);
-
-	lane_enabled = utils->read_bool(utils->data,
-					     "qcom,mdss-dsi-lane-3-state");
-	host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_3 : 0);
-
-	if (host->data_lanes == 0) {
-		pr_err("[%s] No data lanes are enabled, rc=%d\n", name, rc);
-		rc = -EINVAL;
-	}
-
-	return rc;
-}
-
-static int dsi_panel_parse_color_swap(struct dsi_host_common_cfg *host,
-				      struct dsi_parser_utils *utils,
-				      const char *name)
-{
-	int rc = 0;
-	const char *swap_mode;
-
-	swap_mode = utils->get_property(utils->data,
-			"qcom,mdss-dsi-color-order", NULL);
-	if (swap_mode) {
-		if (!strcmp(swap_mode, "rgb_swap_rgb")) {
-			host->swap_mode = DSI_COLOR_SWAP_RGB;
-		} else if (!strcmp(swap_mode, "rgb_swap_rbg")) {
-			host->swap_mode = DSI_COLOR_SWAP_RBG;
-		} else if (!strcmp(swap_mode, "rgb_swap_brg")) {
-			host->swap_mode = DSI_COLOR_SWAP_BRG;
-		} else if (!strcmp(swap_mode, "rgb_swap_grb")) {
-			host->swap_mode = DSI_COLOR_SWAP_GRB;
-		} else if (!strcmp(swap_mode, "rgb_swap_gbr")) {
-			host->swap_mode = DSI_COLOR_SWAP_GBR;
-		} else {
-			pr_err("[%s] Unrecognized color order-%s\n",
-			       name, swap_mode);
-			rc = -EINVAL;
-		}
-	} else {
-		pr_debug("[%s] Falling back to default color order\n", name);
-		host->swap_mode = DSI_COLOR_SWAP_RGB;
-	}
-
-	/* bit swap on color channel is not defined in dt */
-	host->bit_swap_red = false;
-	host->bit_swap_green = false;
-	host->bit_swap_blue = false;
-	return rc;
-}
-
-static int dsi_panel_parse_triggers(struct dsi_host_common_cfg *host,
-				    struct dsi_parser_utils *utils,
-				    const char *name)
-{
-	const char *trig;
-	int rc = 0;
-
-	trig = utils->get_property(utils->data,
-			"qcom,mdss-dsi-mdp-trigger", NULL);
-	if (trig) {
-		if (!strcmp(trig, "none")) {
-			host->mdp_cmd_trigger = DSI_TRIGGER_NONE;
-		} else if (!strcmp(trig, "trigger_te")) {
-			host->mdp_cmd_trigger = DSI_TRIGGER_TE;
-		} else if (!strcmp(trig, "trigger_sw")) {
-			host->mdp_cmd_trigger = DSI_TRIGGER_SW;
-		} else if (!strcmp(trig, "trigger_sw_te")) {
-			host->mdp_cmd_trigger = DSI_TRIGGER_SW_TE;
-		} else {
-			pr_err("[%s] Unrecognized mdp trigger type (%s)\n",
-			       name, trig);
-			rc = -EINVAL;
-		}
-
-	} else {
-		pr_debug("[%s] Falling back to default MDP trigger\n",
-			 name);
-		host->mdp_cmd_trigger = DSI_TRIGGER_SW;
-	}
-
-	trig = utils->get_property(utils->data,
-			"qcom,mdss-dsi-dma-trigger", NULL);
-	if (trig) {
-		if (!strcmp(trig, "none")) {
-			host->dma_cmd_trigger = DSI_TRIGGER_NONE;
-		} else if (!strcmp(trig, "trigger_te")) {
-			host->dma_cmd_trigger = DSI_TRIGGER_TE;
-		} else if (!strcmp(trig, "trigger_sw")) {
-			host->dma_cmd_trigger = DSI_TRIGGER_SW;
-		} else if (!strcmp(trig, "trigger_sw_seof")) {
-			host->dma_cmd_trigger = DSI_TRIGGER_SW_SEOF;
-		} else if (!strcmp(trig, "trigger_sw_te")) {
-			host->dma_cmd_trigger = DSI_TRIGGER_SW_TE;
-		} else {
-			pr_err("[%s] Unrecognized mdp trigger type (%s)\n",
-			       name, trig);
-			rc = -EINVAL;
-		}
-
-	} else {
-		pr_debug("[%s] Falling back to default MDP trigger\n", name);
-		host->dma_cmd_trigger = DSI_TRIGGER_SW;
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-te-pin-select",
-			&host->te_mode);
-	if (rc) {
-		pr_warn("[%s] fallback to default te-pin-select\n", name);
-		host->te_mode = 1;
-		rc = 0;
-	}
-
-	return rc;
-}
-
-static int dsi_panel_parse_misc_host_config(struct dsi_host_common_cfg *host,
-					    struct dsi_parser_utils *utils,
-					    const char *name)
-{
-	u32 val = 0;
-	int rc = 0;
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-t-clk-post", &val);
-	if (!rc) {
-		host->t_clk_post = val;
-		pr_debug("[%s] t_clk_post = %d\n", name, val);
-	}
-
-	val = 0;
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-t-clk-pre", &val);
-	if (!rc) {
-		host->t_clk_pre = val;
-		pr_debug("[%s] t_clk_pre = %d\n", name, val);
-	}
-
-	host->ignore_rx_eot = utils->read_bool(utils->data,
-						"qcom,mdss-dsi-rx-eot-ignore");
-
-	host->append_tx_eot = utils->read_bool(utils->data,
-						"qcom,mdss-dsi-tx-eot-append");
-
-	host->ext_bridge_mode = utils->read_bool(utils->data,
-					"qcom,mdss-dsi-ext-bridge-mode");
-
-	host->force_hs_clk_lane = utils->read_bool(utils->data,
-					"qcom,mdss-dsi-force-clock-lane-hs");
-	return 0;
-}
-
-static int dsi_panel_parse_host_config(struct dsi_panel *panel)
-{
-	int rc = 0;
-	struct dsi_parser_utils *utils = &panel->utils;
-
-	rc = dsi_panel_parse_pixel_format(&panel->host_config, utils,
-					  panel->name);
-	if (rc) {
-		pr_err("[%s] failed to get pixel format, rc=%d\n",
-		panel->name, rc);
-		goto error;
-	}
-
-	rc = dsi_panel_parse_lane_states(&panel->host_config, utils,
-					 panel->name);
-	if (rc) {
-		pr_err("[%s] failed to parse lane states, rc=%d\n",
-		       panel->name, rc);
-		goto error;
-	}
-
-	rc = dsi_panel_parse_color_swap(&panel->host_config, utils,
-					panel->name);
-	if (rc) {
-		pr_err("[%s] failed to parse color swap config, rc=%d\n",
-		       panel->name, rc);
-		goto error;
-	}
-
-	rc = dsi_panel_parse_triggers(&panel->host_config, utils,
-				      panel->name);
-	if (rc) {
-		pr_err("[%s] failed to parse triggers, rc=%d\n",
-		       panel->name, rc);
-		goto error;
-	}
-
-	rc = dsi_panel_parse_misc_host_config(&panel->host_config, utils,
-					      panel->name);
-	if (rc) {
-		pr_err("[%s] failed to parse misc host config, rc=%d\n",
-		       panel->name, rc);
-		goto error;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_panel_parse_qsync_caps(struct dsi_panel *panel,
-				     struct device_node *of_node)
-{
-	int rc = 0;
-	u32 val = 0;
-
-	rc = of_property_read_u32(of_node,
-				  "qcom,mdss-dsi-qsync-min-refresh-rate",
-				  &val);
-	if (rc)
-		pr_err("[%s] qsync min fps not defined rc:%d\n",
-			panel->name, rc);
-
-	panel->qsync_min_fps = val;
-
-	return rc;
-}
-
-static int dsi_panel_parse_dfps_caps(struct dsi_panel *panel)
-{
-	int rc = 0;
-	bool supported = false;
-	struct dsi_dfps_capabilities *dfps_caps = &panel->dfps_caps;
-	struct dsi_parser_utils *utils = &panel->utils;
-	const char *name = panel->name;
-	const char *type;
-	u32 val = 0;
-
-	supported = utils->read_bool(utils->data,
-			"qcom,mdss-dsi-pan-enable-dynamic-fps");
-
-	if (!supported) {
-		pr_debug("[%s] DFPS is not supported\n", name);
-		dfps_caps->dfps_support = false;
-	} else {
-
-		type = utils->get_property(utils->data,
-				       "qcom,mdss-dsi-pan-fps-update",
-				       NULL);
-		if (!type) {
-			pr_err("[%s] dfps type not defined\n", name);
-			rc = -EINVAL;
-			goto error;
-		} else if (!strcmp(type, "dfps_suspend_resume_mode")) {
-			dfps_caps->type = DSI_DFPS_SUSPEND_RESUME;
-		} else if (!strcmp(type, "dfps_immediate_clk_mode")) {
-			dfps_caps->type = DSI_DFPS_IMMEDIATE_CLK;
-		} else if (!strcmp(type, "dfps_immediate_porch_mode_hfp")) {
-			dfps_caps->type = DSI_DFPS_IMMEDIATE_HFP;
-		} else if (!strcmp(type, "dfps_immediate_porch_mode_vfp")) {
-			dfps_caps->type = DSI_DFPS_IMMEDIATE_VFP;
-		} else {
-			pr_err("[%s] dfps type is not recognized\n", name);
-			rc = -EINVAL;
-			goto error;
-		}
-
-		rc = utils->read_u32(utils->data,
-					  "qcom,mdss-dsi-min-refresh-rate",
-					  &val);
-		if (rc) {
-			pr_err("[%s] Min refresh rate is not defined\n", name);
-			rc = -EINVAL;
-			goto error;
-		}
-		dfps_caps->min_refresh_rate = val;
-
-		rc = utils->read_u32(utils->data,
-					  "qcom,mdss-dsi-max-refresh-rate",
-					  &val);
-		if (rc) {
-			pr_debug("[%s] Using default refresh rate\n", name);
-			rc = utils->read_u32(utils->data,
-						"qcom,mdss-dsi-panel-framerate",
-						&val);
-			if (rc) {
-				pr_err("[%s] max refresh rate is not defined\n",
-				       name);
-				rc = -EINVAL;
-				goto error;
-			}
-		}
-		dfps_caps->max_refresh_rate = val;
-
-		if (dfps_caps->min_refresh_rate > dfps_caps->max_refresh_rate) {
-			pr_err("[%s] min rate > max rate\n", name);
-			rc = -EINVAL;
-		}
-
-		pr_debug("[%s] DFPS is supported %d-%d, mode %d\n", name,
-				dfps_caps->min_refresh_rate,
-				dfps_caps->max_refresh_rate,
-				dfps_caps->type);
-		dfps_caps->dfps_support = true;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_panel_parse_video_host_config(struct dsi_video_engine_cfg *cfg,
-					     struct dsi_parser_utils *utils,
-					     const char *name)
-{
-	int rc = 0;
-	const char *traffic_mode;
-	u32 vc_id = 0;
-	u32 val = 0;
-	u32 line_no = 0;
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-h-sync-pulse", &val);
-	if (rc) {
-		pr_debug("[%s] fallback to default h-sync-pulse\n", name);
-		cfg->pulse_mode_hsa_he = false;
-	} else if (val == 1) {
-		cfg->pulse_mode_hsa_he = true;
-	} else if (val == 0) {
-		cfg->pulse_mode_hsa_he = false;
-	} else {
-		pr_err("[%s] Unrecognized value for mdss-dsi-h-sync-pulse\n",
-		       name);
-		rc = -EINVAL;
-		goto error;
-	}
-
-	cfg->hfp_lp11_en = utils->read_bool(utils->data,
-						"qcom,mdss-dsi-hfp-power-mode");
-
-	cfg->hbp_lp11_en = utils->read_bool(utils->data,
-						"qcom,mdss-dsi-hbp-power-mode");
-
-	cfg->hsa_lp11_en = utils->read_bool(utils->data,
-						"qcom,mdss-dsi-hsa-power-mode");
-
-	cfg->last_line_interleave_en = utils->read_bool(utils->data,
-					"qcom,mdss-dsi-last-line-interleave");
-
-	cfg->eof_bllp_lp11_en = utils->read_bool(utils->data,
-					"qcom,mdss-dsi-bllp-eof-power-mode");
-
-	cfg->bllp_lp11_en = utils->read_bool(utils->data,
-					"qcom,mdss-dsi-bllp-power-mode");
-
-	cfg->force_clk_lane_hs = of_property_read_bool(utils->data,
-					"qcom,mdss-dsi-force-clock-lane-hs");
-
-	traffic_mode = utils->get_property(utils->data,
-				       "qcom,mdss-dsi-traffic-mode",
-				       NULL);
-	if (!traffic_mode) {
-		pr_debug("[%s] Falling back to default traffic mode\n", name);
-		cfg->traffic_mode = DSI_VIDEO_TRAFFIC_SYNC_PULSES;
-	} else if (!strcmp(traffic_mode, "non_burst_sync_pulse")) {
-		cfg->traffic_mode = DSI_VIDEO_TRAFFIC_SYNC_PULSES;
-	} else if (!strcmp(traffic_mode, "non_burst_sync_event")) {
-		cfg->traffic_mode = DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS;
-	} else if (!strcmp(traffic_mode, "burst_mode")) {
-		cfg->traffic_mode = DSI_VIDEO_TRAFFIC_BURST_MODE;
-	} else {
-		pr_err("[%s] Unrecognized traffic mode-%s\n", name,
-		       traffic_mode);
-		rc = -EINVAL;
-		goto error;
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-virtual-channel-id",
-				  &vc_id);
-	if (rc) {
-		pr_debug("[%s] Fallback to default vc id\n", name);
-		cfg->vc_id = 0;
-	} else {
-		cfg->vc_id = vc_id;
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-dma-schedule-line",
-				  &line_no);
-	if (rc) {
-		pr_debug("[%s] set default dma scheduling line no\n", name);
-		cfg->dma_sched_line = 0x1;
-		/* do not fail since we have default value */
-		rc = 0;
-	} else {
-		cfg->dma_sched_line = line_no;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_panel_parse_cmd_host_config(struct dsi_cmd_engine_cfg *cfg,
-					   struct dsi_parser_utils *utils,
-					   const char *name)
-{
-	u32 val = 0;
-	int rc = 0;
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-wr-mem-start", &val);
-	if (rc) {
-		pr_debug("[%s] Fallback to default wr-mem-start\n", name);
-		cfg->wr_mem_start = 0x2C;
-	} else {
-		cfg->wr_mem_start = val;
-	}
-
-	val = 0;
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-wr-mem-continue",
-				  &val);
-	if (rc) {
-		pr_debug("[%s] Fallback to default wr-mem-continue\n", name);
-		cfg->wr_mem_continue = 0x3C;
-	} else {
-		cfg->wr_mem_continue = val;
-	}
-
-	/* TODO:  fix following */
-	cfg->max_cmd_packets_interleave = 0;
-
-	val = 0;
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-te-dcs-command",
-				  &val);
-	if (rc) {
-		pr_debug("[%s] fallback to default te-dcs-cmd\n", name);
-		cfg->insert_dcs_command = true;
-	} else if (val == 1) {
-		cfg->insert_dcs_command = true;
-	} else if (val == 0) {
-		cfg->insert_dcs_command = false;
-	} else {
-		pr_err("[%s] Unrecognized value for mdss-dsi-te-dcs-command\n",
-		       name);
-		rc = -EINVAL;
-		goto error;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_panel_parse_panel_mode(struct dsi_panel *panel)
-{
-	int rc = 0;
-	struct dsi_parser_utils *utils = &panel->utils;
-	enum dsi_op_mode panel_mode;
-	const char *mode;
-
-	mode = utils->get_property(utils->data,
-			"qcom,mdss-dsi-panel-type", NULL);
-	if (!mode) {
-		pr_debug("[%s] Fallback to default panel mode\n", panel->name);
-		panel_mode = DSI_OP_VIDEO_MODE;
-	} else if (!strcmp(mode, "dsi_video_mode")) {
-		panel_mode = DSI_OP_VIDEO_MODE;
-	} else if (!strcmp(mode, "dsi_cmd_mode")) {
-		panel_mode = DSI_OP_CMD_MODE;
-	} else {
-		pr_err("[%s] Unrecognized panel type-%s\n", panel->name, mode);
-		rc = -EINVAL;
-		goto error;
-	}
-
-	if (panel_mode == DSI_OP_VIDEO_MODE) {
-		rc = dsi_panel_parse_video_host_config(&panel->video_config,
-						       utils,
-						       panel->name);
-		if (rc) {
-			pr_err("[%s] Failed to parse video host cfg, rc=%d\n",
-			       panel->name, rc);
-			goto error;
-		}
-	}
-
-	if (panel_mode == DSI_OP_CMD_MODE) {
-		rc = dsi_panel_parse_cmd_host_config(&panel->cmd_config,
-						     utils,
-						     panel->name);
-		if (rc) {
-			pr_err("[%s] Failed to parse cmd host config, rc=%d\n",
-			       panel->name, rc);
-			goto error;
-		}
-	}
-
-	panel->panel_mode = panel_mode;
-error:
-	return rc;
-}
-
-static int dsi_panel_parse_phy_props(struct dsi_panel *panel)
-{
-	int rc = 0;
-	u32 val = 0;
-	const char *str;
-	struct dsi_panel_phy_props *props = &panel->phy_props;
-	struct dsi_parser_utils *utils = &panel->utils;
-	const char *name = panel->name;
-
-	rc = utils->read_u32(utils->data,
-		  "qcom,mdss-pan-physical-width-dimension", &val);
-	if (rc) {
-		pr_debug("[%s] Physical panel width is not defined\n", name);
-		props->panel_width_mm = 0;
-		rc = 0;
-	} else {
-		props->panel_width_mm = val;
-	}
-
-	rc = utils->read_u32(utils->data,
-				  "qcom,mdss-pan-physical-height-dimension",
-				  &val);
-	if (rc) {
-		pr_debug("[%s] Physical panel height is not defined\n", name);
-		props->panel_height_mm = 0;
-		rc = 0;
-	} else {
-		props->panel_height_mm = val;
-	}
-
-	str = utils->get_property(utils->data,
-			"qcom,mdss-dsi-panel-orientation", NULL);
-	if (!str) {
-		props->rotation = DSI_PANEL_ROTATE_NONE;
-	} else if (!strcmp(str, "180")) {
-		props->rotation = DSI_PANEL_ROTATE_HV_FLIP;
-	} else if (!strcmp(str, "hflip")) {
-		props->rotation = DSI_PANEL_ROTATE_H_FLIP;
-	} else if (!strcmp(str, "vflip")) {
-		props->rotation = DSI_PANEL_ROTATE_V_FLIP;
-	} else {
-		pr_err("[%s] Unrecognized panel rotation-%s\n", name, str);
-		rc = -EINVAL;
-		goto error;
-	}
-error:
-	return rc;
-}
-const char *cmd_set_prop_map[DSI_CMD_SET_MAX] = {
-	"qcom,mdss-dsi-pre-on-command",
-	"qcom,mdss-dsi-on-command",
-	"qcom,mdss-dsi-post-panel-on-command",
-	"qcom,mdss-dsi-pre-off-command",
-	"qcom,mdss-dsi-off-command",
-	"qcom,mdss-dsi-post-off-command",
-	"qcom,mdss-dsi-pre-res-switch",
-	"qcom,mdss-dsi-res-switch",
-	"qcom,mdss-dsi-post-res-switch",
-	"qcom,cmd-to-video-mode-switch-commands",
-	"qcom,cmd-to-video-mode-post-switch-commands",
-	"qcom,video-to-cmd-mode-switch-commands",
-	"qcom,video-to-cmd-mode-post-switch-commands",
-	"qcom,mdss-dsi-panel-status-command",
-	"qcom,mdss-dsi-lp1-command",
-	"qcom,mdss-dsi-lp2-command",
-	"qcom,mdss-dsi-nolp-command",
-	"PPS not parsed from DTSI, generated dynamically",
-	"ROI not parsed from DTSI, generated dynamically",
-	"qcom,mdss-dsi-timing-switch-command",
-	"qcom,mdss-dsi-post-mode-switch-on-command",
-	"qcom,mdss-dsi-qsync-on-commands",
-	"qcom,mdss-dsi-qsync-off-commands",
-};
-
-const char *cmd_set_state_map[DSI_CMD_SET_MAX] = {
-	"qcom,mdss-dsi-pre-on-command-state",
-	"qcom,mdss-dsi-on-command-state",
-	"qcom,mdss-dsi-post-on-command-state",
-	"qcom,mdss-dsi-pre-off-command-state",
-	"qcom,mdss-dsi-off-command-state",
-	"qcom,mdss-dsi-post-off-command-state",
-	"qcom,mdss-dsi-pre-res-switch-state",
-	"qcom,mdss-dsi-res-switch-state",
-	"qcom,mdss-dsi-post-res-switch-state",
-	"qcom,cmd-to-video-mode-switch-commands-state",
-	"qcom,cmd-to-video-mode-post-switch-commands-state",
-	"qcom,video-to-cmd-mode-switch-commands-state",
-	"qcom,video-to-cmd-mode-post-switch-commands-state",
-	"qcom,mdss-dsi-panel-status-command-state",
-	"qcom,mdss-dsi-lp1-command-state",
-	"qcom,mdss-dsi-lp2-command-state",
-	"qcom,mdss-dsi-nolp-command-state",
-	"PPS not parsed from DTSI, generated dynamically",
-	"ROI not parsed from DTSI, generated dynamically",
-	"qcom,mdss-dsi-timing-switch-command-state",
-	"qcom,mdss-dsi-post-mode-switch-on-command-state",
-	"qcom,mdss-dsi-qsync-on-commands-state",
-	"qcom,mdss-dsi-qsync-off-commands-state",
-};
-
-static int dsi_panel_get_cmd_pkt_count(const char *data, u32 length, u32 *cnt)
-{
-	const u32 cmd_set_min_size = 7;
-	u32 count = 0;
-	u32 packet_length;
-	u32 tmp;
-
-	while (length >= cmd_set_min_size) {
-		packet_length = cmd_set_min_size;
-		tmp = ((data[5] << 8) | (data[6]));
-		packet_length += tmp;
-		if (packet_length > length) {
-			pr_err("format error\n");
-			return -EINVAL;
-		}
-		length -= packet_length;
-		data += packet_length;
-		count++;
-	}
-
-	*cnt = count;
-	return 0;
-}
-
-static int dsi_panel_create_cmd_packets(const char *data,
-					u32 length,
-					u32 count,
-					struct dsi_cmd_desc *cmd)
-{
-	int rc = 0;
-	int i, j;
-	u8 *payload;
-
-	for (i = 0; i < count; i++) {
-		u32 size;
-
-		cmd[i].msg.type = data[0];
-		cmd[i].last_command = (data[1] == 1);
-		cmd[i].msg.channel = data[2];
-		cmd[i].msg.flags |= (data[3] == 1 ? MIPI_DSI_MSG_REQ_ACK : 0);
-		cmd[i].msg.ctrl = 0;
-		cmd[i].post_wait_ms = cmd[i].msg.wait_ms = data[4];
-		cmd[i].msg.tx_len = ((data[5] << 8) | (data[6]));
-
-		size = cmd[i].msg.tx_len * sizeof(u8);
-
-		payload = kzalloc(size, GFP_KERNEL);
-		if (!payload) {
-			rc = -ENOMEM;
-			goto error_free_payloads;
-		}
-
-		for (j = 0; j < cmd[i].msg.tx_len; j++)
-			payload[j] = data[7 + j];
-
-		cmd[i].msg.tx_buf = payload;
-		data += (7 + cmd[i].msg.tx_len);
-	}
-
-	return rc;
-error_free_payloads:
-	for (i = i - 1; i >= 0; i--) {
-		cmd--;
-		kfree(cmd->msg.tx_buf);
-	}
-
-	return rc;
-}
-
-static void dsi_panel_destroy_cmd_packets(struct dsi_panel_cmd_set *set)
-{
-	u32 i = 0;
-	struct dsi_cmd_desc *cmd;
-
-	for (i = 0; i < set->count; i++) {
-		cmd = &set->cmds[i];
-		kfree(cmd->msg.tx_buf);
-	}
-}
-
-static void dsi_panel_dealloc_cmd_packets(struct dsi_panel_cmd_set *set)
-{
-	kfree(set->cmds);
-}
-
-static int dsi_panel_alloc_cmd_packets(struct dsi_panel_cmd_set *cmd,
-					u32 packet_count)
-{
-	u32 size;
-
-	size = packet_count * sizeof(*cmd->cmds);
-	cmd->cmds = kzalloc(size, GFP_KERNEL);
-	if (!cmd->cmds)
-		return -ENOMEM;
-
-	cmd->count = packet_count;
-	return 0;
-}
-
-static int dsi_panel_parse_cmd_sets_sub(struct dsi_panel_cmd_set *cmd,
-					enum dsi_cmd_set_type type,
-					struct dsi_parser_utils *utils)
-{
-	int rc = 0;
-	u32 length = 0;
-	const char *data;
-	const char *state;
-	u32 packet_count = 0;
-
-	data = utils->get_property(utils->data, cmd_set_prop_map[type],
-			&length);
-	if (!data) {
-		pr_debug("%s commands not defined\n", cmd_set_prop_map[type]);
-		rc = -ENOTSUPP;
-		goto error;
-	}
-
-	pr_debug("type=%d, name=%s, length=%d\n", type,
-		cmd_set_prop_map[type], length);
-
-	print_hex_dump_debug("", DUMP_PREFIX_NONE,
-		       8, 1, data, length, false);
-
-	rc = dsi_panel_get_cmd_pkt_count(data, length, &packet_count);
-	if (rc) {
-		pr_err("commands failed, rc=%d\n", rc);
-		goto error;
-	}
-	pr_debug("[%s] packet-count=%d, %d\n", cmd_set_prop_map[type],
-		packet_count, length);
-
-	rc = dsi_panel_alloc_cmd_packets(cmd, packet_count);
-	if (rc) {
-		pr_err("failed to allocate cmd packets, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = dsi_panel_create_cmd_packets(data, length, packet_count,
-					  cmd->cmds);
-	if (rc) {
-		pr_err("failed to create cmd packets, rc=%d\n", rc);
-		goto error_free_mem;
-	}
-
-	state = utils->get_property(utils->data, cmd_set_state_map[type], NULL);
-	if (!state || !strcmp(state, "dsi_lp_mode")) {
-		cmd->state = DSI_CMD_SET_STATE_LP;
-	} else if (!strcmp(state, "dsi_hs_mode")) {
-		cmd->state = DSI_CMD_SET_STATE_HS;
-	} else {
-		pr_err("[%s] command state unrecognized-%s\n",
-		       cmd_set_state_map[type], state);
-		goto error_free_mem;
-	}
-
-	return rc;
-error_free_mem:
-	kfree(cmd->cmds);
-	cmd->cmds = NULL;
-error:
-	return rc;
-
-}
-
-static int dsi_panel_parse_cmd_sets(
-		struct dsi_display_mode_priv_info *priv_info,
-		struct dsi_parser_utils *utils)
-{
-	int rc = 0;
-	struct dsi_panel_cmd_set *set;
-	u32 i;
-
-	if (!priv_info) {
-		pr_err("invalid mode priv info\n");
-		return -EINVAL;
-	}
-
-	for (i = DSI_CMD_SET_PRE_ON; i < DSI_CMD_SET_MAX; i++) {
-		set = &priv_info->cmd_sets[i];
-		set->type = i;
-		set->count = 0;
-
-		if (i == DSI_CMD_SET_PPS) {
-			rc = dsi_panel_alloc_cmd_packets(set, 1);
-			if (rc)
-				pr_err("failed to allocate cmd set %d, rc = %d\n",
-					i, rc);
-			set->state = DSI_CMD_SET_STATE_LP;
-		} else {
-			rc = dsi_panel_parse_cmd_sets_sub(set, i, utils);
-			if (rc)
-				pr_debug("failed to parse set %d\n", i);
-		}
-	}
-
-	rc = 0;
-	return rc;
-}
-
-static int dsi_panel_parse_reset_sequence(struct dsi_panel *panel)
-{
-	int rc = 0;
-	int i;
-	u32 length = 0;
-	u32 count = 0;
-	u32 size = 0;
-	u32 *arr_32 = NULL;
-	const u32 *arr;
-	struct dsi_parser_utils *utils = &panel->utils;
-	struct dsi_reset_seq *seq;
-
-	if (panel->host_config.ext_bridge_mode)
-		return 0;
-
-	arr = utils->get_property(utils->data,
-			"qcom,mdss-dsi-reset-sequence", &length);
-	if (!arr) {
-		pr_err("[%s] dsi-reset-sequence not found\n", panel->name);
-		rc = -EINVAL;
-		goto error;
-	}
-	if (length & 0x1) {
-		pr_err("[%s] syntax error for dsi-reset-sequence\n",
-		       panel->name);
-		rc = -EINVAL;
-		goto error;
-	}
-
-	pr_err("RESET SEQ LENGTH = %d\n", length);
-	length = length / sizeof(u32);
-
-	size = length * sizeof(u32);
-
-	arr_32 = kzalloc(size, GFP_KERNEL);
-	if (!arr_32) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	rc = utils->read_u32_array(utils->data, "qcom,mdss-dsi-reset-sequence",
-					arr_32, length);
-	if (rc) {
-		pr_err("[%s] cannot read dso-reset-seqience\n", panel->name);
-		goto error_free_arr_32;
-	}
-
-	count = length / 2;
-	size = count * sizeof(*seq);
-	seq = kzalloc(size, GFP_KERNEL);
-	if (!seq) {
-		rc = -ENOMEM;
-		goto error_free_arr_32;
-	}
-
-	panel->reset_config.sequence = seq;
-	panel->reset_config.count = count;
-
-	for (i = 0; i < length; i += 2) {
-		seq->level = arr_32[i];
-		seq->sleep_ms = arr_32[i + 1];
-		seq++;
-	}
-
-
-error_free_arr_32:
-	kfree(arr_32);
-error:
-	return rc;
-}
-
-static int dsi_panel_parse_misc_features(struct dsi_panel *panel)
-{
-	struct dsi_parser_utils *utils = &panel->utils;
-
-	panel->ulps_feature_enabled =
-		utils->read_bool(utils->data, "qcom,ulps-enabled");
-
-	pr_info("%s: ulps feature %s\n", __func__,
-		(panel->ulps_feature_enabled ? "enabled" : "disabled"));
-
-	panel->ulps_suspend_enabled =
-		utils->read_bool(utils->data, "qcom,suspend-ulps-enabled");
-
-	pr_info("%s: ulps during suspend feature %s\n", __func__,
-		(panel->ulps_suspend_enabled ? "enabled" : "disabled"));
-
-	panel->te_using_watchdog_timer = utils->read_bool(utils->data,
-					"qcom,mdss-dsi-te-using-wd");
-
-	panel->sync_broadcast_en = utils->read_bool(utils->data,
-			"qcom,cmd-sync-wait-broadcast");
-
-	panel->lp11_init = utils->read_bool(utils->data,
-			"qcom,mdss-dsi-lp11-init");
-	return 0;
-}
-
-static int dsi_panel_parse_jitter_config(
-				struct dsi_display_mode *mode,
-				struct dsi_parser_utils *utils)
-{
-	int rc;
-	struct dsi_display_mode_priv_info *priv_info;
-	u32 jitter[DEFAULT_PANEL_JITTER_ARRAY_SIZE] = {0, 0};
-	u64 jitter_val = 0;
-
-	priv_info = mode->priv_info;
-
-	rc = utils->read_u32_array(utils->data, "qcom,mdss-dsi-panel-jitter",
-				jitter, DEFAULT_PANEL_JITTER_ARRAY_SIZE);
-	if (rc) {
-		pr_debug("panel jitter not defined rc=%d\n", rc);
-	} else {
-		jitter_val = jitter[0];
-		jitter_val = div_u64(jitter_val, jitter[1]);
-	}
-
-	if (rc || !jitter_val || (jitter_val > MAX_PANEL_JITTER)) {
-		priv_info->panel_jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR;
-		priv_info->panel_jitter_denom =
-					DEFAULT_PANEL_JITTER_DENOMINATOR;
-	} else {
-		priv_info->panel_jitter_numer = jitter[0];
-		priv_info->panel_jitter_denom = jitter[1];
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-panel-prefill-lines",
-				  &priv_info->panel_prefill_lines);
-	if (rc) {
-		pr_debug("panel prefill lines are not defined rc=%d\n", rc);
-		priv_info->panel_prefill_lines = mode->timing.v_back_porch +
-			mode->timing.v_sync_width + mode->timing.v_front_porch;
-	} else if (priv_info->panel_prefill_lines >=
-					DSI_V_TOTAL(&mode->timing)) {
-		pr_debug("invalid prefill lines config=%d setting to:%d\n",
-		priv_info->panel_prefill_lines, DEFAULT_PANEL_PREFILL_LINES);
-
-		priv_info->panel_prefill_lines = DEFAULT_PANEL_PREFILL_LINES;
-	}
-
-	return 0;
-}
-
-static int dsi_panel_parse_power_cfg(struct dsi_panel *panel)
-{
-	int rc = 0;
-	char *supply_name;
-
-	if (panel->host_config.ext_bridge_mode)
-		return 0;
-
-	if (!strcmp(panel->type, "primary"))
-		supply_name = "qcom,panel-supply-entries";
-	else
-		supply_name = "qcom,panel-sec-supply-entries";
-
-	rc = dsi_pwr_of_get_vreg_data(&panel->utils,
-			&panel->power_info, supply_name);
-	if (rc) {
-		pr_err("[%s] failed to parse vregs\n", panel->name);
-		goto error;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_panel_parse_gpios(struct dsi_panel *panel)
-{
-	int rc = 0;
-	const char *data;
-	struct dsi_parser_utils *utils = &panel->utils;
-	char *reset_gpio_name, *mode_set_gpio_name;
-
-	if (!strcmp(panel->type, "primary")) {
-		reset_gpio_name = "qcom,platform-reset-gpio";
-		mode_set_gpio_name = "qcom,panel-mode-gpio";
-	} else {
-		reset_gpio_name = "qcom,platform-sec-reset-gpio";
-		mode_set_gpio_name = "qcom,panel-sec-mode-gpio";
-	}
-
-	panel->reset_config.reset_gpio = utils->get_named_gpio(utils->data,
-					      reset_gpio_name, 0);
-	if (!gpio_is_valid(panel->reset_config.reset_gpio) &&
-		!panel->host_config.ext_bridge_mode) {
-		rc = panel->reset_config.reset_gpio;
-		pr_err("[%s] failed get reset gpio, rc=%d\n", panel->name, rc);
-		goto error;
-	}
-
-	panel->reset_config.disp_en_gpio = utils->get_named_gpio(utils->data,
-						"qcom,5v-boost-gpio",
-						0);
-	if (!gpio_is_valid(panel->reset_config.disp_en_gpio)) {
-		pr_debug("[%s] 5v-boot-gpio is not set, rc=%d\n",
-			 panel->name, rc);
-		panel->reset_config.disp_en_gpio =
-				utils->get_named_gpio(utils->data,
-					"qcom,platform-en-gpio", 0);
-		if (!gpio_is_valid(panel->reset_config.disp_en_gpio)) {
-			pr_debug("[%s] platform-en-gpio is not set, rc=%d\n",
-				 panel->name, rc);
-		}
-	}
-
-	panel->reset_config.lcd_mode_sel_gpio = utils->get_named_gpio(
-		utils->data, mode_set_gpio_name, 0);
-	if (!gpio_is_valid(panel->reset_config.lcd_mode_sel_gpio))
-		pr_debug("%s:%d mode gpio not specified\n", __func__, __LINE__);
-
-	pr_debug("mode gpio=%d\n", panel->reset_config.lcd_mode_sel_gpio);
-
-	data = utils->get_property(utils->data,
-		"qcom,mdss-dsi-mode-sel-gpio-state", NULL);
-	if (data) {
-		if (!strcmp(data, "single_port"))
-			panel->reset_config.mode_sel_state =
-				MODE_SEL_SINGLE_PORT;
-		else if (!strcmp(data, "dual_port"))
-			panel->reset_config.mode_sel_state =
-				MODE_SEL_DUAL_PORT;
-		else if (!strcmp(data, "high"))
-			panel->reset_config.mode_sel_state =
-				MODE_GPIO_HIGH;
-		else if (!strcmp(data, "low"))
-			panel->reset_config.mode_sel_state =
-				MODE_GPIO_LOW;
-	} else {
-		/* Set default mode as SPLIT mode */
-		panel->reset_config.mode_sel_state = MODE_SEL_DUAL_PORT;
-	}
-
-	/* TODO:  release memory */
-	rc = dsi_panel_parse_reset_sequence(panel);
-	if (rc) {
-		pr_err("[%s] failed to parse reset sequence, rc=%d\n",
-		       panel->name, rc);
-		goto error;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_panel_parse_bl_pwm_config(struct dsi_panel *panel)
-{
-	int rc = 0;
-	u32 val;
-	struct dsi_backlight_config *config = &panel->bl_config;
-	struct dsi_parser_utils *utils = &panel->utils;
-
-	rc = utils->read_u32(utils->data, "qcom,dsi-bl-pmic-bank-select",
-				  &val);
-	if (rc) {
-		pr_err("bl-pmic-bank-select is not defined, rc=%d\n", rc);
-		goto error;
-	}
-	config->pwm_pmic_bank = val;
-
-	rc = utils->read_u32(utils->data, "qcom,dsi-bl-pmic-pwm-frequency",
-				  &val);
-	if (rc) {
-		pr_err("bl-pmic-bank-select is not defined, rc=%d\n", rc);
-		goto error;
-	}
-	config->pwm_period_usecs = val;
-
-	config->pwm_pmi_control = utils->read_bool(utils->data,
-						"qcom,mdss-dsi-bl-pwm-pmi");
-
-	config->pwm_gpio = utils->get_named_gpio(utils->data,
-					     "qcom,mdss-dsi-pwm-gpio",
-					     0);
-	if (!gpio_is_valid(config->pwm_gpio)) {
-		pr_err("pwm gpio is invalid\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-error:
-	return rc;
-}
-
-static int dsi_panel_parse_bl_config(struct dsi_panel *panel)
-{
-	int rc = 0;
-	u32 val = 0;
-	const char *bl_type;
-	const char *data;
-	struct dsi_parser_utils *utils = &panel->utils;
-	char *bl_name;
-
-	if (!strcmp(panel->type, "primary"))
-		bl_name = "qcom,mdss-dsi-bl-pmic-control-type";
-	else
-		bl_name = "qcom,mdss-dsi-sec-bl-pmic-control-type";
-
-	bl_type = utils->get_property(utils->data, bl_name, NULL);
-	if (!bl_type) {
-		panel->bl_config.type = DSI_BACKLIGHT_UNKNOWN;
-	} else if (!strcmp(bl_type, "bl_ctrl_pwm")) {
-		panel->bl_config.type = DSI_BACKLIGHT_PWM;
-	} else if (!strcmp(bl_type, "bl_ctrl_wled")) {
-		panel->bl_config.type = DSI_BACKLIGHT_WLED;
-	} else if (!strcmp(bl_type, "bl_ctrl_dcs")) {
-		panel->bl_config.type = DSI_BACKLIGHT_DCS;
-	} else if (!strcmp(bl_type, "bl_ctrl_external")) {
-		panel->bl_config.type = DSI_BACKLIGHT_EXTERNAL;
-	} else {
-		pr_debug("[%s] bl-pmic-control-type unknown-%s\n",
-			 panel->name, bl_type);
-		panel->bl_config.type = DSI_BACKLIGHT_UNKNOWN;
-	}
-
-	data = utils->get_property(utils->data, "qcom,bl-update-flag", NULL);
-	if (!data) {
-		panel->bl_config.bl_update = BL_UPDATE_NONE;
-	} else if (!strcmp(data, "delay_until_first_frame")) {
-		panel->bl_config.bl_update = BL_UPDATE_DELAY_UNTIL_FIRST_FRAME;
-	} else {
-		pr_debug("[%s] No valid bl-update-flag: %s\n",
-						panel->name, data);
-		panel->bl_config.bl_update = BL_UPDATE_NONE;
-	}
-
-	panel->bl_config.bl_scale = MAX_BL_SCALE_LEVEL;
-	panel->bl_config.bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-bl-min-level", &val);
-	if (rc) {
-		pr_debug("[%s] bl-min-level unspecified, defaulting to zero\n",
-			 panel->name);
-		panel->bl_config.bl_min_level = 0;
-	} else {
-		panel->bl_config.bl_min_level = val;
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-bl-max-level", &val);
-	if (rc) {
-		pr_debug("[%s] bl-max-level unspecified, defaulting to max level\n",
-			 panel->name);
-		panel->bl_config.bl_max_level = MAX_BL_LEVEL;
-	} else {
-		panel->bl_config.bl_max_level = val;
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-brightness-max-level",
-		&val);
-	if (rc) {
-		pr_debug("[%s] brigheness-max-level unspecified, defaulting to 255\n",
-			 panel->name);
-		panel->bl_config.brightness_max_level = 255;
-	} else {
-		panel->bl_config.brightness_max_level = val;
-	}
-
-	if (panel->bl_config.type == DSI_BACKLIGHT_PWM) {
-		rc = dsi_panel_parse_bl_pwm_config(panel);
-		if (rc) {
-			pr_err("[%s] failed to parse pwm config, rc=%d\n",
-			       panel->name, rc);
-			goto error;
-		}
-	}
-
-	panel->bl_config.en_gpio = utils->get_named_gpio(utils->data,
-					      "qcom,platform-bklight-en-gpio",
-					      0);
-	if (!gpio_is_valid(panel->bl_config.en_gpio)) {
-		pr_debug("[%s] failed get bklt gpio, rc=%d\n", panel->name, rc);
-		rc = 0;
-		goto error;
-	}
-
-error:
-	return rc;
-}
-
-void dsi_dsc_pclk_param_calc(struct msm_display_dsc_info *dsc, int intf_width)
-{
-	int slice_per_pkt, slice_per_intf;
-	int bytes_in_slice, total_bytes_per_intf;
-
-	if (!dsc || !dsc->slice_width || !dsc->slice_per_pkt ||
-	    (intf_width < dsc->slice_width)) {
-		pr_err("invalid input, intf_width=%d slice_width=%d\n",
-			intf_width, dsc ? dsc->slice_width : -1);
-		return;
-	}
-
-	slice_per_pkt = dsc->slice_per_pkt;
-	slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width);
-
-	/*
-	 * If slice_per_pkt is greater than slice_per_intf then default to 1.
-	 * This can happen during partial update.
-	 */
-	if (slice_per_pkt > slice_per_intf)
-		slice_per_pkt = 1;
-
-	bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bpp, 8);
-	total_bytes_per_intf = bytes_in_slice * slice_per_intf;
-
-	dsc->eol_byte_num = total_bytes_per_intf % 3;
-	dsc->pclk_per_line =  DIV_ROUND_UP(total_bytes_per_intf, 3);
-	dsc->bytes_in_slice = bytes_in_slice;
-	dsc->bytes_per_pkt = bytes_in_slice * slice_per_pkt;
-	dsc->pkt_per_line = slice_per_intf / slice_per_pkt;
-}
-
-
-int dsi_dsc_populate_static_param(struct msm_display_dsc_info *dsc)
-{
-	int bpp, bpc;
-	int mux_words_size;
-	int groups_per_line, groups_total;
-	int min_rate_buffer_size;
-	int hrd_delay;
-	int pre_num_extra_mux_bits, num_extra_mux_bits;
-	int slice_bits;
-	int data;
-	int final_value, final_scale;
-	int ratio_index, mod_offset;
-
-	dsc->rc_model_size = 8192;
-
-	if (dsc->version == 0x11 && dsc->scr_rev == 0x1)
-		dsc->first_line_bpg_offset = 15;
-	else
-		dsc->first_line_bpg_offset = 12;
-
-	dsc->edge_factor = 6;
-	dsc->tgt_offset_hi = 3;
-	dsc->tgt_offset_lo = 3;
-	dsc->enable_422 = 0;
-	dsc->convert_rgb = 1;
-	dsc->vbr_enable = 0;
-
-	dsc->buf_thresh = dsi_dsc_rc_buf_thresh;
-
-	bpp = dsc->bpp;
-	bpc = dsc->bpc;
-
-	if (bpc == 12)
-		ratio_index = DSC_12BPC_8BPP;
-	else if (bpc == 10)
-		ratio_index = DSC_10BPC_8BPP;
-	else
-		ratio_index = DSC_8BPC_8BPP;
-
-	if (dsc->version == 0x11 && dsc->scr_rev == 0x1) {
-		dsc->range_min_qp =
-			dsi_dsc_rc_range_min_qp_1_1_scr1[ratio_index];
-		dsc->range_max_qp =
-			dsi_dsc_rc_range_max_qp_1_1_scr1[ratio_index];
-	} else {
-		dsc->range_min_qp = dsi_dsc_rc_range_min_qp_1_1[ratio_index];
-		dsc->range_max_qp = dsi_dsc_rc_range_max_qp_1_1[ratio_index];
-	}
-	dsc->range_bpg_offset = dsi_dsc_rc_range_bpg_offset;
-
-	if (bpp <= 10)
-		dsc->initial_offset = 6144;
-	else
-		dsc->initial_offset = 2048;	/* bpp = 12 */
-
-	if (bpc == 12)
-		mux_words_size = 64;
-	else
-		mux_words_size = 48;		/* bpc == 8/10 */
-
-	dsc->line_buf_depth = bpc + 1;
-
-	if (bpc == 8) {
-		dsc->input_10_bits = 0;
-		dsc->min_qp_flatness = 3;
-		dsc->max_qp_flatness = 12;
-		dsc->quant_incr_limit0 = 11;
-		dsc->quant_incr_limit1 = 11;
-	} else if (bpc == 10) { /* 10bpc */
-		dsc->input_10_bits = 1;
-		dsc->min_qp_flatness = 7;
-		dsc->max_qp_flatness = 16;
-		dsc->quant_incr_limit0 = 15;
-		dsc->quant_incr_limit1 = 15;
-	} else { /* 12 bpc */
-		dsc->input_10_bits = 0;
-		dsc->min_qp_flatness = 11;
-		dsc->max_qp_flatness = 20;
-		dsc->quant_incr_limit0 = 19;
-		dsc->quant_incr_limit1 = 19;
-	}
-
-	mod_offset = dsc->slice_width % 3;
-	switch (mod_offset) {
-	case 0:
-		dsc->slice_last_group_size = 2;
-		break;
-	case 1:
-		dsc->slice_last_group_size = 0;
-		break;
-	case 2:
-		dsc->slice_last_group_size = 1;
-		break;
-	default:
-		break;
-	}
-
-	dsc->det_thresh_flatness = 2 << (bpc - 8);
-
-	dsc->initial_xmit_delay = dsc->rc_model_size / (2 * bpp);
-
-	groups_per_line = DIV_ROUND_UP(dsc->slice_width, 3);
-
-	dsc->chunk_size = dsc->slice_width * bpp / 8;
-	if ((dsc->slice_width * bpp) % 8)
-		dsc->chunk_size++;
-
-	/* rbs-min */
-	min_rate_buffer_size =  dsc->rc_model_size - dsc->initial_offset +
-			dsc->initial_xmit_delay * bpp +
-			groups_per_line * dsc->first_line_bpg_offset;
-
-	hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, bpp);
-
-	dsc->initial_dec_delay = hrd_delay - dsc->initial_xmit_delay;
-
-	dsc->initial_scale_value = 8 * dsc->rc_model_size /
-			(dsc->rc_model_size - dsc->initial_offset);
-
-	slice_bits = 8 * dsc->chunk_size * dsc->slice_height;
-
-	groups_total = groups_per_line * dsc->slice_height;
-
-	data = dsc->first_line_bpg_offset * 2048;
-
-	dsc->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->slice_height - 1));
-
-	pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * bpc + 4) - 2);
-
-	num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size -
-		((slice_bits - pre_num_extra_mux_bits) % mux_words_size));
-
-	data = 2048 * (dsc->rc_model_size - dsc->initial_offset
-		+ num_extra_mux_bits);
-	dsc->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
-
-	data = dsc->initial_xmit_delay * bpp;
-	final_value =  dsc->rc_model_size - data + num_extra_mux_bits;
-
-	final_scale = 8 * dsc->rc_model_size /
-		(dsc->rc_model_size - final_value);
-
-	dsc->final_offset = final_value;
-
-	data = (final_scale - 9) * (dsc->nfl_bpg_offset +
-		dsc->slice_bpg_offset);
-	dsc->scale_increment_interval = (2048 * dsc->final_offset) / data;
-
-	dsc->scale_decrement_interval = groups_per_line /
-		(dsc->initial_scale_value - 8);
-
-	return 0;
-}
-
-
-static int dsi_panel_parse_phy_timing(struct dsi_display_mode *mode,
-				struct dsi_parser_utils *utils)
-{
-	const char *data;
-	u32 len, i;
-	int rc = 0;
-	struct dsi_display_mode_priv_info *priv_info;
-
-	priv_info = mode->priv_info;
-
-	data = utils->get_property(utils->data,
-			"qcom,mdss-dsi-panel-phy-timings", &len);
-	if (!data) {
-		pr_debug("Unable to read Phy timing settings\n");
-	} else {
-		priv_info->phy_timing_val =
-			kzalloc((sizeof(u32) * len), GFP_KERNEL);
-		if (!priv_info->phy_timing_val)
-			return -EINVAL;
-
-		for (i = 0; i < len; i++)
-			priv_info->phy_timing_val[i] = data[i];
-
-		priv_info->phy_timing_len = len;
-	}
-
-	mode->pixel_clk_khz = (DSI_H_TOTAL_DSC(&mode->timing) *
-			DSI_V_TOTAL(&mode->timing) *
-			mode->timing.refresh_rate) / 1000;
-	return rc;
-}
-
-static int dsi_panel_parse_dsc_params(struct dsi_display_mode *mode,
-				struct dsi_parser_utils *utils)
-{
-	u32 data;
-	int rc = -EINVAL;
-	int intf_width;
-	const char *compression;
-	struct dsi_display_mode_priv_info *priv_info;
-
-	if (!mode || !mode->priv_info)
-		return -EINVAL;
-
-	priv_info = mode->priv_info;
-
-	priv_info->dsc_enabled = false;
-	compression = utils->get_property(utils->data,
-			"qcom,compression-mode", NULL);
-	if (compression && !strcmp(compression, "dsc"))
-		priv_info->dsc_enabled = true;
-
-	if (!priv_info->dsc_enabled) {
-		pr_debug("dsc compression is not enabled for the mode\n");
-		return 0;
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsc-version", &data);
-	if (rc) {
-		priv_info->dsc.version = 0x11;
-		rc = 0;
-	} else {
-		priv_info->dsc.version = data & 0xff;
-		/* only support DSC 1.1 rev */
-		if (priv_info->dsc.version != 0x11) {
-			pr_err("%s: DSC version:%d not supported\n", __func__,
-					priv_info->dsc.version);
-			rc = -EINVAL;
-			goto error;
-		}
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsc-scr-version", &data);
-	if (rc) {
-		priv_info->dsc.scr_rev = 0x0;
-		rc = 0;
-	} else {
-		priv_info->dsc.scr_rev = data & 0xff;
-		/* only one scr rev supported */
-		if (priv_info->dsc.scr_rev > 0x1) {
-			pr_err("%s: DSC scr version:%d not supported\n",
-					__func__, priv_info->dsc.scr_rev);
-			rc = -EINVAL;
-			goto error;
-		}
-	}
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsc-slice-height", &data);
-	if (rc) {
-		pr_err("failed to parse qcom,mdss-dsc-slice-height\n");
-		goto error;
-	}
-	priv_info->dsc.slice_height = data;
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsc-slice-width", &data);
-	if (rc) {
-		pr_err("failed to parse qcom,mdss-dsc-slice-width\n");
-		goto error;
-	}
-	priv_info->dsc.slice_width = data;
-
-	intf_width = mode->timing.h_active;
-	if (intf_width % priv_info->dsc.slice_width) {
-		pr_err("invalid slice width for the intf width:%d slice width:%d\n",
-			intf_width, priv_info->dsc.slice_width);
-		rc = -EINVAL;
-		goto error;
-	}
-
-	priv_info->dsc.pic_width = mode->timing.h_active;
-	priv_info->dsc.pic_height = mode->timing.v_active;
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsc-slice-per-pkt", &data);
-	if (rc) {
-		pr_err("failed to parse qcom,mdss-dsc-slice-per-pkt\n");
-		goto error;
-	} else if (!data || (data > 2)) {
-		pr_err("invalid dsc slice-per-pkt:%d\n", data);
-		goto error;
-	}
-	priv_info->dsc.slice_per_pkt = data;
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsc-bit-per-component",
-		&data);
-	if (rc) {
-		pr_err("failed to parse qcom,mdss-dsc-bit-per-component\n");
-		goto error;
-	}
-	priv_info->dsc.bpc = data;
-
-	rc = utils->read_u32(utils->data, "qcom,mdss-dsc-bit-per-pixel",
-			&data);
-	if (rc) {
-		pr_err("failed to parse qcom,mdss-dsc-bit-per-pixel\n");
-		goto error;
-	}
-	priv_info->dsc.bpp = data;
-
-	priv_info->dsc.block_pred_enable = utils->read_bool(utils->data,
-		"qcom,mdss-dsc-block-prediction-enable");
-
-	priv_info->dsc.full_frame_slices = DIV_ROUND_UP(intf_width,
-		priv_info->dsc.slice_width);
-
-	dsi_dsc_populate_static_param(&priv_info->dsc);
-	dsi_dsc_pclk_param_calc(&priv_info->dsc, intf_width);
-
-	mode->timing.dsc_enabled = true;
-	mode->timing.dsc = &priv_info->dsc;
-
-error:
-	return rc;
-}
-
-static int dsi_panel_parse_hdr_config(struct dsi_panel *panel)
-{
-	int rc = 0;
-	struct drm_panel_hdr_properties *hdr_prop;
-	struct dsi_parser_utils *utils = &panel->utils;
-
-	hdr_prop = &panel->hdr_props;
-	hdr_prop->hdr_enabled = utils->read_bool(utils->data,
-		"qcom,mdss-dsi-panel-hdr-enabled");
-
-	if (hdr_prop->hdr_enabled) {
-		rc = utils->read_u32_array(utils->data,
-				"qcom,mdss-dsi-panel-hdr-color-primaries",
-				hdr_prop->display_primaries,
-				DISPLAY_PRIMARIES_MAX);
-		if (rc) {
-			pr_err("%s:%d, Unable to read color primaries,rc:%u\n",
-					__func__, __LINE__, rc);
-			hdr_prop->hdr_enabled = false;
-			return rc;
-		}
-
-		rc = utils->read_u32(utils->data,
-			"qcom,mdss-dsi-panel-peak-brightness",
-			&(hdr_prop->peak_brightness));
-		if (rc) {
-			pr_err("%s:%d, Unable to read hdr brightness, rc:%u\n",
-				__func__, __LINE__, rc);
-			hdr_prop->hdr_enabled = false;
-			return rc;
-		}
-
-		rc = utils->read_u32(utils->data,
-			"qcom,mdss-dsi-panel-blackness-level",
-			&(hdr_prop->blackness_level));
-		if (rc) {
-			pr_err("%s:%d, Unable to read hdr brightness, rc:%u\n",
-				__func__, __LINE__, rc);
-			hdr_prop->hdr_enabled = false;
-			return rc;
-		}
-	}
-	return 0;
-}
-
-static int dsi_panel_parse_topology(
-		struct dsi_display_mode_priv_info *priv_info,
-		struct dsi_parser_utils *utils,
-		int topology_override)
-{
-	struct msm_display_topology *topology;
-	u32 top_count, top_sel, *array = NULL;
-	int i, len = 0;
-	int rc = -EINVAL;
-
-	len = utils->count_u32_elems(utils->data, "qcom,display-topology");
-	if (len <= 0 || len % TOPOLOGY_SET_LEN ||
-			len > (TOPOLOGY_SET_LEN * MAX_TOPOLOGY)) {
-		pr_err("invalid topology list for the panel, rc = %d\n", rc);
-		return rc;
-	}
-
-	top_count = len / TOPOLOGY_SET_LEN;
-
-	array = kcalloc(len, sizeof(u32), GFP_KERNEL);
-	if (!array)
-		return -ENOMEM;
-
-	rc = utils->read_u32_array(utils->data,
-			"qcom,display-topology", array, len);
-	if (rc) {
-		pr_err("unable to read the display topologies, rc = %d\n", rc);
-		goto read_fail;
-	}
-
-	topology = kcalloc(top_count, sizeof(*topology), GFP_KERNEL);
-	if (!topology) {
-		rc = -ENOMEM;
-		goto read_fail;
-	}
-
-	for (i = 0; i < top_count; i++) {
-		struct msm_display_topology *top = &topology[i];
-
-		top->num_lm = array[i * TOPOLOGY_SET_LEN];
-		top->num_enc = array[i * TOPOLOGY_SET_LEN + 1];
-		top->num_intf = array[i * TOPOLOGY_SET_LEN + 2];
-	}
-
-	if (topology_override >= 0 && topology_override < top_count) {
-		pr_info("override topology: cfg:%d lm:%d comp_enc:%d intf:%d\n",
-			topology_override,
-			topology[topology_override].num_lm,
-			topology[topology_override].num_enc,
-			topology[topology_override].num_intf);
-		top_sel = topology_override;
-		goto parse_done;
-	}
-
-	rc = utils->read_u32(utils->data,
-			"qcom,default-topology-index", &top_sel);
-	if (rc) {
-		pr_err("no default topology selected, rc = %d\n", rc);
-		goto parse_fail;
-	}
-
-	if (top_sel >= top_count) {
-		rc = -EINVAL;
-		pr_err("default topology is specified is not valid, rc = %d\n",
-			rc);
-		goto parse_fail;
-	}
-
-	pr_info("default topology: lm: %d comp_enc:%d intf: %d\n",
-		topology[top_sel].num_lm,
-		topology[top_sel].num_enc,
-		topology[top_sel].num_intf);
-
-parse_done:
-	memcpy(&priv_info->topology, &topology[top_sel],
-		sizeof(struct msm_display_topology));
-parse_fail:
-	kfree(topology);
-read_fail:
-	kfree(array);
-
-	return rc;
-}
-
-static int dsi_panel_parse_roi_alignment(struct dsi_parser_utils *utils,
-					 struct msm_roi_alignment *align)
-{
-	int len = 0, rc = 0;
-	u32 value[6];
-	struct property *data;
-
-	if (!align)
-		return -EINVAL;
-
-	memset(align, 0, sizeof(*align));
-
-	data = utils->find_property(utils->data,
-			"qcom,panel-roi-alignment", &len);
-	len /= sizeof(u32);
-	if (!data) {
-		pr_err("panel roi alignment not found\n");
-		rc = -EINVAL;
-	} else if (len != 6) {
-		pr_err("incorrect roi alignment len %d\n", len);
-		rc = -EINVAL;
-	} else {
-		rc = utils->read_u32_array(utils->data,
-				"qcom,panel-roi-alignment", value, len);
-		if (rc)
-			pr_debug("error reading panel roi alignment values\n");
-		else {
-			align->xstart_pix_align = value[0];
-			align->ystart_pix_align = value[1];
-			align->width_pix_align = value[2];
-			align->height_pix_align = value[3];
-			align->min_width = value[4];
-			align->min_height = value[5];
-		}
-
-		pr_info("roi alignment: [%d, %d, %d, %d, %d, %d]\n",
-			align->xstart_pix_align,
-			align->width_pix_align,
-			align->ystart_pix_align,
-			align->height_pix_align,
-			align->min_width,
-			align->min_height);
-	}
-
-	return rc;
-}
-
-static int dsi_panel_parse_partial_update_caps(struct dsi_display_mode *mode,
-				struct dsi_parser_utils *utils)
-{
-	struct msm_roi_caps *roi_caps = NULL;
-	const char *data;
-	int rc = 0;
-
-	if (!mode || !mode->priv_info) {
-		pr_err("invalid arguments\n");
-		return -EINVAL;
-	}
-
-	roi_caps = &mode->priv_info->roi_caps;
-
-	memset(roi_caps, 0, sizeof(*roi_caps));
-
-	data = utils->get_property(utils->data,
-		"qcom,partial-update-enabled", NULL);
-	if (data) {
-		if (!strcmp(data, "dual_roi"))
-			roi_caps->num_roi = 2;
-		else if (!strcmp(data, "single_roi"))
-			roi_caps->num_roi = 1;
-		else {
-			pr_info(
-			"invalid value for qcom,partial-update-enabled: %s\n",
-			data);
-			return 0;
-		}
-	} else {
-		pr_info("partial update disabled as the property is not set\n");
-		return 0;
-	}
-
-	roi_caps->merge_rois = utils->read_bool(utils->data,
-			"qcom,partial-update-roi-merge");
-
-	roi_caps->enabled = roi_caps->num_roi > 0;
-
-	pr_info("partial update num_rois=%d enabled=%d\n", roi_caps->num_roi,
-			roi_caps->enabled);
-
-	if (roi_caps->enabled)
-		rc = dsi_panel_parse_roi_alignment(utils,
-				&roi_caps->align);
-
-	if (rc)
-		memset(roi_caps, 0, sizeof(*roi_caps));
-
-	return rc;
-}
-
-static int dsi_panel_parse_dms_info(struct dsi_panel *panel)
-{
-	int dms_enabled;
-	const char *data;
-	struct dsi_parser_utils *utils = &panel->utils;
-
-	panel->dms_mode = DSI_DMS_MODE_DISABLED;
-	dms_enabled = utils->read_bool(utils->data,
-		"qcom,dynamic-mode-switch-enabled");
-	if (!dms_enabled)
-		return 0;
-
-	data = utils->get_property(utils->data,
-			"qcom,dynamic-mode-switch-type", NULL);
-	if (data && !strcmp(data, "dynamic-resolution-switch-immediate")) {
-		panel->dms_mode = DSI_DMS_MODE_RES_SWITCH_IMMEDIATE;
-	} else {
-		pr_err("[%s] unsupported dynamic switch mode: %s\n",
-							panel->name, data);
-		return -EINVAL;
-	}
-
-	return 0;
-};
-
-/*
- * The length of all the valid values to be checked should not be greater
- * than the length of returned data from read command.
- */
-static bool
-dsi_panel_parse_esd_check_valid_params(struct dsi_panel *panel, u32 count)
-{
-	int i;
-	struct drm_panel_esd_config *config = &panel->esd_config;
-
-	for (i = 0; i < count; ++i) {
-		if (config->status_valid_params[i] >
-				config->status_cmds_rlen[i]) {
-			pr_debug("ignore valid params\n");
-			return false;
-		}
-	}
-
-	return true;
-}
-
-static bool dsi_panel_parse_esd_status_len(struct dsi_parser_utils *utils,
-	char *prop_key, u32 **target, u32 cmd_cnt)
-{
-	int tmp;
-
-	if (!utils->find_property(utils->data, prop_key, &tmp))
-		return false;
-
-	tmp /= sizeof(u32);
-	if (tmp != cmd_cnt) {
-		pr_err("request property(%d) do not match cmd count(%d)\n",
-				tmp, cmd_cnt);
-		return false;
-	}
-
-	*target = kcalloc(tmp, sizeof(u32), GFP_KERNEL);
-	if (IS_ERR_OR_NULL(*target)) {
-		pr_err("Error allocating memory for property\n");
-		return false;
-	}
-
-	if (utils->read_u32_array(utils->data, prop_key, *target, tmp)) {
-		pr_err("cannot get values from dts\n");
-		kfree(*target);
-		*target = NULL;
-		return false;
-	}
-
-	return true;
-}
-
-static void dsi_panel_esd_config_deinit(struct drm_panel_esd_config *esd_config)
-{
-	kfree(esd_config->status_buf);
-	kfree(esd_config->return_buf);
-	kfree(esd_config->status_value);
-	kfree(esd_config->status_valid_params);
-	kfree(esd_config->status_cmds_rlen);
-	kfree(esd_config->status_cmd.cmds);
-}
-
-int dsi_panel_parse_esd_reg_read_configs(struct dsi_panel *panel)
-{
-	struct drm_panel_esd_config *esd_config;
-	int rc = 0;
-	u32 tmp;
-	u32 i, status_len, *lenp;
-	struct property *data;
-	struct dsi_parser_utils *utils = &panel->utils;
-
-	if (!panel) {
-		pr_err("Invalid Params\n");
-		return -EINVAL;
-	}
-
-	esd_config = &panel->esd_config;
-	if (!esd_config)
-		return -EINVAL;
-
-	dsi_panel_parse_cmd_sets_sub(&esd_config->status_cmd,
-				DSI_CMD_SET_PANEL_STATUS, utils);
-	if (!esd_config->status_cmd.count) {
-		pr_err("panel status command parsing failed\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	if (!dsi_panel_parse_esd_status_len(utils,
-		"qcom,mdss-dsi-panel-status-read-length",
-			&panel->esd_config.status_cmds_rlen,
-				esd_config->status_cmd.count)) {
-		pr_err("Invalid status read length\n");
-		rc = -EINVAL;
-		goto error1;
-	}
-
-	if (dsi_panel_parse_esd_status_len(utils,
-		"qcom,mdss-dsi-panel-status-valid-params",
-			&panel->esd_config.status_valid_params,
-				esd_config->status_cmd.count)) {
-		if (!dsi_panel_parse_esd_check_valid_params(panel,
-					esd_config->status_cmd.count)) {
-			rc = -EINVAL;
-			goto error2;
-		}
-	}
-
-	status_len = 0;
-	lenp = esd_config->status_valid_params ?: esd_config->status_cmds_rlen;
-	for (i = 0; i < esd_config->status_cmd.count; ++i)
-		status_len += lenp[i];
-
-	if (!status_len) {
-		rc = -EINVAL;
-		goto error2;
-	}
-
-	/*
-	 * Some panel may need multiple read commands to properly
-	 * check panel status. Do a sanity check for proper status
-	 * value which will be compared with the value read by dsi
-	 * controller during ESD check. Also check if multiple read
-	 * commands are there then, there should be corresponding
-	 * status check values for each read command.
-	 */
-	data = utils->find_property(utils->data,
-			"qcom,mdss-dsi-panel-status-value", &tmp);
-	tmp /= sizeof(u32);
-	if (!IS_ERR_OR_NULL(data) && tmp != 0 && (tmp % status_len) == 0) {
-		esd_config->groups = tmp / status_len;
-	} else {
-		pr_err("error parse panel-status-value\n");
-		rc = -EINVAL;
-		goto error2;
-	}
-
-	esd_config->status_value =
-		kzalloc(sizeof(u32) * status_len * esd_config->groups,
-			GFP_KERNEL);
-	if (!esd_config->status_value) {
-		rc = -ENOMEM;
-		goto error2;
-	}
-
-	esd_config->return_buf = kcalloc(status_len * esd_config->groups,
-			sizeof(unsigned char), GFP_KERNEL);
-	if (!esd_config->return_buf) {
-		rc = -ENOMEM;
-		goto error3;
-	}
-
-	esd_config->status_buf = kzalloc(SZ_4K, GFP_KERNEL);
-	if (!esd_config->status_buf) {
-		rc = -ENOMEM;
-		goto error4;
-	}
-
-	rc = utils->read_u32_array(utils->data,
-		"qcom,mdss-dsi-panel-status-value",
-		esd_config->status_value, esd_config->groups * status_len);
-	if (rc) {
-		pr_debug("error reading panel status values\n");
-		memset(esd_config->status_value, 0,
-				esd_config->groups * status_len);
-	}
-
-	return 0;
-
-error4:
-	kfree(esd_config->return_buf);
-error3:
-	kfree(esd_config->status_value);
-error2:
-	kfree(esd_config->status_valid_params);
-	kfree(esd_config->status_cmds_rlen);
-error1:
-	kfree(esd_config->status_cmd.cmds);
-error:
-	return rc;
-}
-
-static int dsi_panel_parse_esd_config(struct dsi_panel *panel)
-{
-	int rc = 0;
-	const char *string;
-	struct drm_panel_esd_config *esd_config;
-	struct dsi_parser_utils *utils = &panel->utils;
-	u8 *esd_mode = NULL;
-
-	esd_config = &panel->esd_config;
-	esd_config->status_mode = ESD_MODE_MAX;
-	esd_config->esd_enabled = utils->read_bool(utils->data,
-		"qcom,esd-check-enabled");
-
-	if (!esd_config->esd_enabled)
-		return 0;
-
-	rc = utils->read_string(utils->data,
-			"qcom,mdss-dsi-panel-status-check-mode", &string);
-	if (!rc) {
-		if (!strcmp(string, "bta_check")) {
-			esd_config->status_mode = ESD_MODE_SW_BTA;
-		} else if (!strcmp(string, "reg_read")) {
-			esd_config->status_mode = ESD_MODE_REG_READ;
-		} else if (!strcmp(string, "te_signal_check")) {
-			if (panel->panel_mode == DSI_OP_CMD_MODE) {
-				esd_config->status_mode = ESD_MODE_PANEL_TE;
-			} else {
-				pr_err("TE-ESD not valid for video mode\n");
-				rc = -EINVAL;
-				goto error;
-			}
-		} else {
-			pr_err("No valid panel-status-check-mode string\n");
-			rc = -EINVAL;
-			goto error;
-		}
-	} else {
-		pr_debug("status check method not defined!\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	if (panel->esd_config.status_mode == ESD_MODE_REG_READ) {
-		rc = dsi_panel_parse_esd_reg_read_configs(panel);
-		if (rc) {
-			pr_err("failed to parse esd reg read mode params, rc=%d\n",
-						rc);
-			goto error;
-		}
-		esd_mode = "register_read";
-	} else if (panel->esd_config.status_mode == ESD_MODE_SW_BTA) {
-		esd_mode = "bta_trigger";
-	} else if (panel->esd_config.status_mode ==  ESD_MODE_PANEL_TE) {
-		esd_mode = "te_check";
-	}
-
-	pr_info("ESD enabled with mode: %s\n", esd_mode);
-
-	return 0;
-
-error:
-	panel->esd_config.esd_enabled = false;
-	return rc;
-}
-
-static void dsi_panel_update_util(struct dsi_panel *panel,
-				  struct device_node *parser_node)
-{
-	struct dsi_parser_utils *utils = &panel->utils;
-
-	if (parser_node) {
-		*utils = *dsi_parser_get_parser_utils();
-		utils->data = parser_node;
-
-		pr_debug("switching to parser APIs\n");
-
-		goto end;
-	}
-
-	*utils = *dsi_parser_get_of_utils();
-	utils->data = panel->panel_of_node;
-end:
-	utils->node = panel->panel_of_node;
-}
-
-struct dsi_panel *dsi_panel_get(struct device *parent,
-				struct device_node *of_node,
-				struct device_node *parser_node,
-				const char *type,
-				int topology_override)
-{
-	struct dsi_panel *panel;
-	struct dsi_parser_utils *utils;
-	int rc = 0;
-
-	panel = kzalloc(sizeof(*panel), GFP_KERNEL);
-	if (!panel)
-		return ERR_PTR(-ENOMEM);
-
-	panel->panel_of_node = of_node;
-	panel->parent = parent;
-	panel->type = type;
-
-	dsi_panel_update_util(panel, parser_node);
-	utils = &panel->utils;
-
-	panel->name = utils->get_property(utils->data,
-				"qcom,mdss-dsi-panel-name", NULL);
-	if (!panel->name)
-		panel->name = DSI_PANEL_DEFAULT_LABEL;
-
-	rc = dsi_panel_parse_host_config(panel);
-	if (rc) {
-		pr_err("failed to parse host configuration, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = dsi_panel_parse_panel_mode(panel);
-	if (rc) {
-		pr_err("failed to parse panel mode configuration, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = dsi_panel_parse_dfps_caps(panel);
-	if (rc)
-		pr_err("failed to parse dfps configuration, rc=%d\n", rc);
-
-	if (!(panel->dfps_caps.dfps_support)) {
-		/* qsync and dfps are mutually exclusive features */
-		rc = dsi_panel_parse_qsync_caps(panel, of_node);
-		if (rc)
-			pr_err("failed to parse qsync features, rc=%d\n", rc);
-	}
-
-	rc = dsi_panel_parse_phy_props(panel);
-	if (rc) {
-		pr_err("failed to parse panel physical dimension, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = dsi_panel_parse_gpios(panel);
-	if (rc) {
-		pr_err("failed to parse panel gpios, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = dsi_panel_parse_power_cfg(panel);
-	if (rc)
-		pr_err("failed to parse power config, rc=%d\n", rc);
-
-	rc = dsi_panel_parse_bl_config(panel);
-	if (rc)
-		pr_err("failed to parse backlight config, rc=%d\n", rc);
-
-
-	rc = dsi_panel_parse_misc_features(panel);
-	if (rc)
-		pr_err("failed to parse misc features, rc=%d\n", rc);
-
-	rc = dsi_panel_parse_hdr_config(panel);
-	if (rc)
-		pr_err("failed to parse hdr config, rc=%d\n", rc);
-
-	rc = dsi_panel_get_mode_count(panel);
-	if (rc) {
-		pr_err("failed to get mode count, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = dsi_panel_parse_dms_info(panel);
-	if (rc)
-		pr_debug("failed to get dms info, rc=%d\n", rc);
-
-	rc = dsi_panel_parse_esd_config(panel);
-	if (rc)
-		pr_debug("failed to parse esd config, rc=%d\n", rc);
-
-	drm_panel_init(&panel->drm_panel);
-	mutex_init(&panel->panel_lock);
-
-	return panel;
-error:
-	kfree(panel);
-	return ERR_PTR(rc);
-}
-
-void dsi_panel_put(struct dsi_panel *panel)
-{
-	/* free resources allocated for ESD check */
-	dsi_panel_esd_config_deinit(&panel->esd_config);
-
-	kfree(panel);
-}
-
-int dsi_panel_drv_init(struct dsi_panel *panel,
-		       struct mipi_dsi_host *host)
-{
-	int rc = 0;
-	struct mipi_dsi_device *dev;
-
-	if (!panel || !host) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	dev = &panel->mipi_device;
-
-	dev->host = host;
-	/*
-	 * We dont have device structure since panel is not a device node.
-	 * When using drm panel framework, the device is probed when the host is
-	 * create.
-	 */
-	dev->channel = 0;
-	dev->lanes = 4;
-
-	panel->host = host;
-	rc = dsi_panel_vreg_get(panel);
-	if (rc) {
-		pr_err("[%s] failed to get panel regulators, rc=%d\n",
-		       panel->name, rc);
-		goto exit;
-	}
-
-	rc = dsi_panel_pinctrl_init(panel);
-	if (rc) {
-		pr_err("[%s] failed to init pinctrl, rc=%d\n", panel->name, rc);
-		goto error_vreg_put;
-	}
-
-	rc = dsi_panel_gpio_request(panel);
-	if (rc) {
-		pr_err("[%s] failed to request gpios, rc=%d\n", panel->name,
-		       rc);
-		goto error_pinctrl_deinit;
-	}
-
-	rc = dsi_panel_bl_register(panel);
-	if (rc) {
-		if (rc != -EPROBE_DEFER)
-			pr_err("[%s] failed to register backlight, rc=%d\n",
-			       panel->name, rc);
-		goto error_gpio_release;
-	}
-
-	goto exit;
-
-error_gpio_release:
-	(void)dsi_panel_gpio_release(panel);
-error_pinctrl_deinit:
-	(void)dsi_panel_pinctrl_deinit(panel);
-error_vreg_put:
-	(void)dsi_panel_vreg_put(panel);
-exit:
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_drv_deinit(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	rc = dsi_panel_bl_unregister(panel);
-	if (rc)
-		pr_err("[%s] failed to unregister backlight, rc=%d\n",
-		       panel->name, rc);
-
-	rc = dsi_panel_gpio_release(panel);
-	if (rc)
-		pr_err("[%s] failed to release gpios, rc=%d\n", panel->name,
-		       rc);
-
-	rc = dsi_panel_pinctrl_deinit(panel);
-	if (rc)
-		pr_err("[%s] failed to deinit gpios, rc=%d\n", panel->name,
-		       rc);
-
-	rc = dsi_panel_vreg_put(panel);
-	if (rc)
-		pr_err("[%s] failed to put regs, rc=%d\n", panel->name, rc);
-
-	panel->host = NULL;
-	memset(&panel->mipi_device, 0x0, sizeof(panel->mipi_device));
-
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_validate_mode(struct dsi_panel *panel,
-			    struct dsi_display_mode *mode)
-{
-	return 0;
-}
-
-int dsi_panel_get_mode_count(struct dsi_panel *panel)
-{
-	const u32 SINGLE_MODE_SUPPORT = 1;
-	struct dsi_parser_utils *utils;
-	struct device_node *timings_np;
-	int count, rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	utils = &panel->utils;
-
-	panel->num_timing_nodes = 0;
-
-	timings_np = utils->get_child_by_name(utils->data,
-			"qcom,mdss-dsi-display-timings");
-	if (!timings_np && !panel->host_config.ext_bridge_mode) {
-		pr_err("no display timing nodes defined\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	count = utils->get_child_count(timings_np);
-	if ((!count && !panel->host_config.ext_bridge_mode) ||
-		count > DSI_MODE_MAX) {
-		pr_err("invalid count of timing nodes: %d\n", count);
-		rc = -EINVAL;
-		goto error;
-	}
-
-	/* No multiresolution support is available for video mode panels */
-	if (panel->panel_mode != DSI_OP_CMD_MODE &&
-		!panel->host_config.ext_bridge_mode)
-		count = SINGLE_MODE_SUPPORT;
-
-	panel->num_timing_nodes = count;
-
-error:
-	return rc;
-}
-
-int dsi_panel_get_phy_props(struct dsi_panel *panel,
-			    struct dsi_panel_phy_props *phy_props)
-{
-	int rc = 0;
-
-	if (!panel || !phy_props) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	memcpy(phy_props, &panel->phy_props, sizeof(*phy_props));
-	return rc;
-}
-
-int dsi_panel_get_dfps_caps(struct dsi_panel *panel,
-			    struct dsi_dfps_capabilities *dfps_caps)
-{
-	int rc = 0;
-
-	if (!panel || !dfps_caps) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	memcpy(dfps_caps, &panel->dfps_caps, sizeof(*dfps_caps));
-	return rc;
-}
-
-void dsi_panel_put_mode(struct dsi_display_mode *mode)
-{
-	int i;
-
-	if (!mode->priv_info)
-		return;
-
-	for (i = 0; i < DSI_CMD_SET_MAX; i++) {
-		dsi_panel_destroy_cmd_packets(&mode->priv_info->cmd_sets[i]);
-		dsi_panel_dealloc_cmd_packets(&mode->priv_info->cmd_sets[i]);
-	}
-
-	kfree(mode->priv_info);
-}
-
-int dsi_panel_get_mode(struct dsi_panel *panel,
-			u32 index, struct dsi_display_mode *mode,
-			int topology_override)
-{
-	struct device_node *timings_np, *child_np;
-	struct dsi_parser_utils *utils;
-	struct dsi_display_mode_priv_info *prv_info;
-	u32 child_idx = 0;
-	int rc = 0, num_timings;
-	void *utils_data = NULL;
-
-	if (!panel || !mode) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-	utils = &panel->utils;
-
-	mode->priv_info = kzalloc(sizeof(*mode->priv_info), GFP_KERNEL);
-	if (!mode->priv_info) {
-		rc = -ENOMEM;
-		goto done;
-	}
-
-	prv_info = mode->priv_info;
-
-	timings_np = utils->get_child_by_name(utils->data,
-		"qcom,mdss-dsi-display-timings");
-	if (!timings_np) {
-		pr_err("no display timing nodes defined\n");
-		rc = -EINVAL;
-		goto parse_fail;
-	}
-
-	num_timings = utils->get_child_count(timings_np);
-	if (!num_timings || num_timings > DSI_MODE_MAX) {
-		pr_err("invalid count of timing nodes: %d\n", num_timings);
-		rc = -EINVAL;
-		goto parse_fail;
-	}
-
-	utils_data = utils->data;
-
-	dsi_for_each_child_node(timings_np, child_np) {
-		if (index != child_idx++)
-			continue;
-
-		utils->data = child_np;
-
-		rc = dsi_panel_parse_timing(&mode->timing, utils);
-		if (rc) {
-			pr_err("failed to parse panel timing, rc=%d\n", rc);
-			goto parse_fail;
-		}
-
-		rc = dsi_panel_parse_dsc_params(mode, utils);
-		if (rc) {
-			pr_err("failed to parse dsc params, rc=%d\n", rc);
-			goto parse_fail;
-		}
-
-		rc = dsi_panel_parse_topology(prv_info, utils,
-				topology_override);
-		if (rc) {
-			pr_err("failed to parse panel topology, rc=%d\n", rc);
-			goto parse_fail;
-		}
-
-		rc = dsi_panel_parse_cmd_sets(prv_info, utils);
-		if (rc) {
-			pr_err("failed to parse command sets, rc=%d\n", rc);
-			goto parse_fail;
-		}
-
-		rc = dsi_panel_parse_jitter_config(mode, utils);
-		if (rc)
-			pr_err(
-			"failed to parse panel jitter config, rc=%d\n", rc);
-
-		rc = dsi_panel_parse_phy_timing(mode, utils);
-		if (rc) {
-			pr_err(
-			"failed to parse panel phy timings, rc=%d\n", rc);
-			goto parse_fail;
-		}
-
-		rc = dsi_panel_parse_partial_update_caps(mode, utils);
-		if (rc)
-			pr_err("failed to partial update caps, rc=%d\n", rc);
-	}
-	goto done;
-
-parse_fail:
-	kfree(mode->priv_info);
-	mode->priv_info = NULL;
-done:
-	utils->data = utils_data;
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_get_host_cfg_for_mode(struct dsi_panel *panel,
-				    struct dsi_display_mode *mode,
-				    struct dsi_host_config *config)
-{
-	int rc = 0;
-
-	if (!panel || !mode || !config) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	config->panel_mode = panel->panel_mode;
-	memcpy(&config->common_config, &panel->host_config,
-	       sizeof(config->common_config));
-
-	if (panel->panel_mode == DSI_OP_VIDEO_MODE) {
-		memcpy(&config->u.video_engine, &panel->video_config,
-		       sizeof(config->u.video_engine));
-	} else {
-		memcpy(&config->u.cmd_engine, &panel->cmd_config,
-		       sizeof(config->u.cmd_engine));
-	}
-
-	memcpy(&config->video_timing, &mode->timing,
-	       sizeof(config->video_timing));
-	config->video_timing.mdp_transfer_time_us =
-			mode->priv_info->mdp_transfer_time_us;
-	config->video_timing.dsc_enabled = mode->priv_info->dsc_enabled;
-	config->video_timing.dsc = &mode->priv_info->dsc;
-
-	config->bit_clk_rate_hz_override = mode->priv_info->clk_rate_hz;
-	config->esc_clk_rate_hz = 19200000;
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_pre_prepare(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	/* If LP11_INIT is set, panel will be powered up during prepare() */
-	if (panel->lp11_init)
-		goto error;
-
-	rc = dsi_panel_power_on(panel);
-	if (rc) {
-		pr_err("[%s] panel power on failed, rc=%d\n", panel->name, rc);
-		goto error;
-	}
-
-error:
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_update_pps(struct dsi_panel *panel)
-{
-	int rc = 0;
-	struct dsi_panel_cmd_set *set = NULL;
-	struct dsi_display_mode_priv_info *priv_info = NULL;
-
-	if (!panel || !panel->cur_mode) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	priv_info = panel->cur_mode->priv_info;
-
-	set = &priv_info->cmd_sets[DSI_CMD_SET_PPS];
-
-	dsi_dsc_create_pps_buf_cmd(&priv_info->dsc, panel->dsc_pps_cmd, 0);
-	rc = dsi_panel_create_cmd_packets(panel->dsc_pps_cmd,
-					  DSI_CMD_PPS_SIZE, 1, set->cmds);
-	if (rc) {
-		pr_err("failed to create cmd packets, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_PPS);
-	if (rc) {
-		pr_err("[%s] failed to send DSI_CMD_SET_PPS cmds, rc=%d\n",
-			panel->name, rc);
-	}
-
-	dsi_panel_destroy_cmd_packets(set);
-error:
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_set_lp1(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_LP1);
-	if (rc)
-		pr_err("[%s] failed to send DSI_CMD_SET_LP1 cmd, rc=%d\n",
-		       panel->name, rc);
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_set_lp2(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_LP2);
-	if (rc)
-		pr_err("[%s] failed to send DSI_CMD_SET_LP2 cmd, rc=%d\n",
-		       panel->name, rc);
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_set_nolp(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_NOLP);
-	if (rc)
-		pr_err("[%s] failed to send DSI_CMD_SET_NOLP cmd, rc=%d\n",
-		       panel->name, rc);
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_prepare(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	if (panel->lp11_init) {
-		rc = dsi_panel_power_on(panel);
-		if (rc) {
-			pr_err("[%s] panel power on failed, rc=%d\n",
-			       panel->name, rc);
-			goto error;
-		}
-	}
-
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_PRE_ON);
-	if (rc) {
-		pr_err("[%s] failed to send DSI_CMD_SET_PRE_ON cmds, rc=%d\n",
-		       panel->name, rc);
-		goto error;
-	}
-
-error:
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-static int dsi_panel_roi_prepare_dcs_cmds(struct dsi_panel_cmd_set *set,
-		struct dsi_rect *roi, int ctrl_idx, int unicast)
-{
-	static const int ROI_CMD_LEN = 5;
-
-	int rc = 0;
-
-	/* DTYPE_DCS_LWRITE */
-	static char *caset, *paset;
-
-	set->cmds = NULL;
-
-	caset = kzalloc(ROI_CMD_LEN, GFP_KERNEL);
-	if (!caset) {
-		rc = -ENOMEM;
-		goto exit;
-	}
-	caset[0] = 0x2a;
-	caset[1] = (roi->x & 0xFF00) >> 8;
-	caset[2] = roi->x & 0xFF;
-	caset[3] = ((roi->x - 1 + roi->w) & 0xFF00) >> 8;
-	caset[4] = (roi->x - 1 + roi->w) & 0xFF;
-
-	paset = kzalloc(ROI_CMD_LEN, GFP_KERNEL);
-	if (!paset) {
-		rc = -ENOMEM;
-		goto error_free_mem;
-	}
-	paset[0] = 0x2b;
-	paset[1] = (roi->y & 0xFF00) >> 8;
-	paset[2] = roi->y & 0xFF;
-	paset[3] = ((roi->y - 1 + roi->h) & 0xFF00) >> 8;
-	paset[4] = (roi->y - 1 + roi->h) & 0xFF;
-
-	set->type = DSI_CMD_SET_ROI;
-	set->state = DSI_CMD_SET_STATE_LP;
-	set->count = 2; /* send caset + paset together */
-	set->cmds = kcalloc(set->count, sizeof(*set->cmds), GFP_KERNEL);
-	if (!set->cmds) {
-		rc = -ENOMEM;
-		goto error_free_mem;
-	}
-	set->cmds[0].msg.channel = 0;
-	set->cmds[0].msg.type = MIPI_DSI_DCS_LONG_WRITE;
-	set->cmds[0].msg.flags = unicast ? MIPI_DSI_MSG_UNICAST : 0;
-	set->cmds[0].msg.ctrl = unicast ? ctrl_idx : 0;
-	set->cmds[0].msg.tx_len = ROI_CMD_LEN;
-	set->cmds[0].msg.tx_buf = caset;
-	set->cmds[0].msg.rx_len = 0;
-	set->cmds[0].msg.rx_buf = 0;
-	set->cmds[0].msg.wait_ms = 0;
-	set->cmds[0].last_command = 0;
-	set->cmds[0].post_wait_ms = 0;
-
-	set->cmds[1].msg.channel = 0;
-	set->cmds[1].msg.type = MIPI_DSI_DCS_LONG_WRITE;
-	set->cmds[1].msg.flags = unicast ? MIPI_DSI_MSG_UNICAST : 0;
-	set->cmds[1].msg.ctrl = unicast ? ctrl_idx : 0;
-	set->cmds[1].msg.tx_len = ROI_CMD_LEN;
-	set->cmds[1].msg.tx_buf = paset;
-	set->cmds[1].msg.rx_len = 0;
-	set->cmds[1].msg.rx_buf = 0;
-	set->cmds[1].msg.wait_ms = 0;
-	set->cmds[1].last_command = 1;
-	set->cmds[1].post_wait_ms = 0;
-
-	goto exit;
-
-error_free_mem:
-	kfree(caset);
-	kfree(paset);
-	kfree(set->cmds);
-
-exit:
-	return rc;
-}
-
-int dsi_panel_send_qsync_on_dcs(struct dsi_panel *panel,
-		int ctrl_idx)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	pr_debug("ctrl:%d qsync on\n", ctrl_idx);
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_QSYNC_ON);
-	if (rc)
-		pr_err("[%s] failed to send DSI_CMD_SET_QSYNC_ON cmds rc=%d\n",
-		       panel->name, rc);
-
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_send_qsync_off_dcs(struct dsi_panel *panel,
-		int ctrl_idx)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	pr_debug("ctrl:%d qsync off\n", ctrl_idx);
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_QSYNC_OFF);
-	if (rc)
-		pr_err("[%s] failed to send DSI_CMD_SET_QSYNC_OFF cmds rc=%d\n",
-		       panel->name, rc);
-
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_send_roi_dcs(struct dsi_panel *panel, int ctrl_idx,
-		struct dsi_rect *roi)
-{
-	int rc = 0;
-	struct dsi_panel_cmd_set *set;
-	struct dsi_display_mode_priv_info *priv_info;
-
-	if (!panel || !panel->cur_mode) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	priv_info = panel->cur_mode->priv_info;
-	set = &priv_info->cmd_sets[DSI_CMD_SET_ROI];
-
-	rc = dsi_panel_roi_prepare_dcs_cmds(set, roi, ctrl_idx, true);
-	if (rc) {
-		pr_err("[%s] failed to prepare DSI_CMD_SET_ROI cmds, rc=%d\n",
-				panel->name, rc);
-		return rc;
-	}
-	pr_debug("[%s] send roi x %d y %d w %d h %d\n", panel->name,
-			roi->x, roi->y, roi->w, roi->h);
-
-	mutex_lock(&panel->panel_lock);
-
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_ROI);
-	if (rc)
-		pr_err("[%s] failed to send DSI_CMD_SET_ROI cmds, rc=%d\n",
-				panel->name, rc);
-
-	mutex_unlock(&panel->panel_lock);
-
-	dsi_panel_destroy_cmd_packets(set);
-
-	return rc;
-}
-
-int dsi_panel_switch(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_TIMING_SWITCH);
-	if (rc)
-		pr_err("[%s] failed to send DSI_CMD_SET_TIMING_SWITCH cmds, rc=%d\n",
-		       panel->name, rc);
-
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_post_switch(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_POST_TIMING_SWITCH);
-	if (rc)
-		pr_err("[%s] failed to send DSI_CMD_SET_POST_TIMING_SWITCH cmds, rc=%d\n",
-		       panel->name, rc);
-
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_enable(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_ON);
-	if (rc) {
-		pr_err("[%s] failed to send DSI_CMD_SET_ON cmds, rc=%d\n",
-		       panel->name, rc);
-	}
-	panel->panel_initialized = true;
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_post_enable(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_POST_ON);
-	if (rc) {
-		pr_err("[%s] failed to send DSI_CMD_SET_POST_ON cmds, rc=%d\n",
-		       panel->name, rc);
-		goto error;
-	}
-error:
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_pre_disable(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_PRE_OFF);
-	if (rc) {
-		pr_err("[%s] failed to send DSI_CMD_SET_PRE_OFF cmds, rc=%d\n",
-		       panel->name, rc);
-		goto error;
-	}
-
-error:
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_disable(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	/* Avoid sending panel off commands when ESD recovery is underway */
-	if (!atomic_read(&panel->esd_recovery_pending)) {
-		rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_OFF);
-		if (rc) {
-			/*
-			 * Sending panel off commands may fail when  DSI
-			 * controller is in a bad state. These failures can be
-			 * ignored since controller will go for full reset on
-			 * subsequent display enable anyway.
-			 */
-			pr_warn_ratelimited("[%s] failed to send DSI_CMD_SET_OFF cmds, rc=%d\n",
-					panel->name, rc);
-			rc = 0;
-		}
-	}
-	panel->panel_initialized = false;
-
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_unprepare(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_POST_OFF);
-	if (rc) {
-		pr_err("[%s] failed to send DSI_CMD_SET_POST_OFF cmds, rc=%d\n",
-		       panel->name, rc);
-		goto error;
-	}
-
-error:
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
-
-int dsi_panel_post_unprepare(struct dsi_panel *panel)
-{
-	int rc = 0;
-
-	if (!panel) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&panel->panel_lock);
-
-	rc = dsi_panel_power_off(panel);
-	if (rc) {
-		pr_err("[%s] panel power_Off failed, rc=%d\n",
-		       panel->name, rc);
-		goto error;
-	}
-error:
-	mutex_unlock(&panel->panel_lock);
-	return rc;
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
deleted file mode 100644
index a2dcebb..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ /dev/null
@@ -1,296 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_PANEL_H_
-#define _DSI_PANEL_H_
-
-#include <linux/of_device.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/errno.h>
-#include <linux/backlight.h>
-#include <drm/drm_panel.h>
-#include <drm/msm_drm.h>
-
-#include "dsi_defs.h"
-#include "dsi_ctrl_hw.h"
-#include "dsi_clk.h"
-#include "dsi_pwr.h"
-#include "dsi_parser.h"
-#include "msm_drv.h"
-
-#define MAX_BL_LEVEL 4096
-#define MAX_BL_SCALE_LEVEL 1024
-#define MAX_SV_BL_SCALE_LEVEL 65535
-#define DSI_CMD_PPS_SIZE 135
-
-#define DSI_MODE_MAX 5
-
-enum dsi_panel_rotation {
-	DSI_PANEL_ROTATE_NONE = 0,
-	DSI_PANEL_ROTATE_HV_FLIP,
-	DSI_PANEL_ROTATE_H_FLIP,
-	DSI_PANEL_ROTATE_V_FLIP
-};
-
-enum dsi_backlight_type {
-	DSI_BACKLIGHT_PWM = 0,
-	DSI_BACKLIGHT_WLED,
-	DSI_BACKLIGHT_DCS,
-	DSI_BACKLIGHT_EXTERNAL,
-	DSI_BACKLIGHT_UNKNOWN,
-	DSI_BACKLIGHT_MAX,
-};
-
-enum bl_update_flag {
-	BL_UPDATE_DELAY_UNTIL_FIRST_FRAME,
-	BL_UPDATE_NONE,
-};
-
-enum {
-	MODE_GPIO_NOT_VALID = 0,
-	MODE_SEL_DUAL_PORT,
-	MODE_SEL_SINGLE_PORT,
-	MODE_GPIO_HIGH,
-	MODE_GPIO_LOW,
-};
-
-enum dsi_dms_mode {
-	DSI_DMS_MODE_DISABLED = 0,
-	DSI_DMS_MODE_RES_SWITCH_IMMEDIATE,
-};
-
-struct dsi_dfps_capabilities {
-	bool dfps_support;
-	enum dsi_dfps_type type;
-	u32 min_refresh_rate;
-	u32 max_refresh_rate;
-};
-
-struct dsi_pinctrl_info {
-	struct pinctrl *pinctrl;
-	struct pinctrl_state *active;
-	struct pinctrl_state *suspend;
-};
-
-struct dsi_panel_phy_props {
-	u32 panel_width_mm;
-	u32 panel_height_mm;
-	enum dsi_panel_rotation rotation;
-};
-
-struct dsi_backlight_config {
-	enum dsi_backlight_type type;
-	enum bl_update_flag bl_update;
-
-	u32 bl_min_level;
-	u32 bl_max_level;
-	u32 brightness_max_level;
-	u32 bl_level;
-	u32 bl_scale;
-	u32 bl_scale_sv;
-
-	int en_gpio;
-	/* PWM params */
-	bool pwm_pmi_control;
-	u32 pwm_pmic_bank;
-	u32 pwm_period_usecs;
-	int pwm_gpio;
-
-	/* WLED params */
-	struct led_trigger *wled;
-	struct backlight_device *raw_bd;
-};
-
-struct dsi_reset_seq {
-	u32 level;
-	u32 sleep_ms;
-};
-
-struct dsi_panel_reset_config {
-	struct dsi_reset_seq *sequence;
-	u32 count;
-
-	int reset_gpio;
-	int disp_en_gpio;
-	int lcd_mode_sel_gpio;
-	u32 mode_sel_state;
-};
-
-enum esd_check_status_mode {
-	ESD_MODE_REG_READ,
-	ESD_MODE_SW_BTA,
-	ESD_MODE_PANEL_TE,
-	ESD_MODE_SW_SIM_SUCCESS,
-	ESD_MODE_SW_SIM_FAILURE,
-	ESD_MODE_MAX
-};
-
-struct drm_panel_esd_config {
-	bool esd_enabled;
-
-	enum esd_check_status_mode status_mode;
-	struct dsi_panel_cmd_set status_cmd;
-	u32 *status_cmds_rlen;
-	u32 *status_valid_params;
-	u32 *status_value;
-	u8 *return_buf;
-	u8 *status_buf;
-	u32 groups;
-};
-
-struct dsi_panel {
-	const char *name;
-	const char *type;
-	struct device_node *panel_of_node;
-	struct mipi_dsi_device mipi_device;
-
-	struct mutex panel_lock;
-	struct drm_panel drm_panel;
-	struct mipi_dsi_host *host;
-	struct device *parent;
-
-	struct dsi_host_common_cfg host_config;
-	struct dsi_video_engine_cfg video_config;
-	struct dsi_cmd_engine_cfg cmd_config;
-	enum dsi_op_mode panel_mode;
-
-	struct dsi_dfps_capabilities dfps_caps;
-	struct dsi_panel_phy_props phy_props;
-
-	struct dsi_display_mode *cur_mode;
-	u32 num_timing_nodes;
-
-	struct dsi_regulator_info power_info;
-	struct dsi_backlight_config bl_config;
-	struct dsi_panel_reset_config reset_config;
-	struct dsi_pinctrl_info pinctrl;
-	struct drm_panel_hdr_properties hdr_props;
-	struct drm_panel_esd_config esd_config;
-
-	struct dsi_parser_utils utils;
-
-	bool lp11_init;
-	bool ulps_feature_enabled;
-	bool ulps_suspend_enabled;
-	bool allow_phy_power_off;
-	atomic_t esd_recovery_pending;
-
-	bool panel_initialized;
-	bool te_using_watchdog_timer;
-	u32 qsync_min_fps;
-
-	char dsc_pps_cmd[DSI_CMD_PPS_SIZE];
-	enum dsi_dms_mode dms_mode;
-
-	bool sync_broadcast_en;
-};
-
-static inline bool dsi_panel_ulps_feature_enabled(struct dsi_panel *panel)
-{
-	return panel->ulps_feature_enabled;
-}
-
-static inline bool dsi_panel_initialized(struct dsi_panel *panel)
-{
-	return panel->panel_initialized;
-}
-
-static inline void dsi_panel_acquire_panel_lock(struct dsi_panel *panel)
-{
-	mutex_lock(&panel->panel_lock);
-}
-
-static inline void dsi_panel_release_panel_lock(struct dsi_panel *panel)
-{
-	mutex_unlock(&panel->panel_lock);
-}
-
-struct dsi_panel *dsi_panel_get(struct device *parent,
-				struct device_node *of_node,
-				struct device_node *parser_node,
-				const char *type,
-				int topology_override);
-
-int dsi_panel_trigger_esd_attack(struct dsi_panel *panel);
-
-void dsi_panel_put(struct dsi_panel *panel);
-
-int dsi_panel_drv_init(struct dsi_panel *panel, struct mipi_dsi_host *host);
-
-int dsi_panel_drv_deinit(struct dsi_panel *panel);
-
-int dsi_panel_get_mode_count(struct dsi_panel *panel);
-
-void dsi_panel_put_mode(struct dsi_display_mode *mode);
-
-int dsi_panel_get_mode(struct dsi_panel *panel,
-		       u32 index,
-		       struct dsi_display_mode *mode,
-		       int topology_override);
-
-int dsi_panel_validate_mode(struct dsi_panel *panel,
-			    struct dsi_display_mode *mode);
-
-int dsi_panel_get_host_cfg_for_mode(struct dsi_panel *panel,
-				    struct dsi_display_mode *mode,
-				    struct dsi_host_config *config);
-
-int dsi_panel_get_phy_props(struct dsi_panel *panel,
-			    struct dsi_panel_phy_props *phy_props);
-int dsi_panel_get_dfps_caps(struct dsi_panel *panel,
-			    struct dsi_dfps_capabilities *dfps_caps);
-
-int dsi_panel_pre_prepare(struct dsi_panel *panel);
-
-int dsi_panel_set_lp1(struct dsi_panel *panel);
-
-int dsi_panel_set_lp2(struct dsi_panel *panel);
-
-int dsi_panel_set_nolp(struct dsi_panel *panel);
-
-int dsi_panel_prepare(struct dsi_panel *panel);
-
-int dsi_panel_enable(struct dsi_panel *panel);
-
-int dsi_panel_post_enable(struct dsi_panel *panel);
-
-int dsi_panel_pre_disable(struct dsi_panel *panel);
-
-int dsi_panel_disable(struct dsi_panel *panel);
-
-int dsi_panel_unprepare(struct dsi_panel *panel);
-
-int dsi_panel_post_unprepare(struct dsi_panel *panel);
-
-int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl);
-
-int dsi_panel_update_pps(struct dsi_panel *panel);
-
-int dsi_panel_send_qsync_on_dcs(struct dsi_panel *panel,
-		int ctrl_idx);
-int dsi_panel_send_qsync_off_dcs(struct dsi_panel *panel,
-		int ctrl_idx);
-
-int dsi_panel_send_roi_dcs(struct dsi_panel *panel, int ctrl_idx,
-		struct dsi_rect *roi);
-
-int dsi_panel_switch(struct dsi_panel *panel);
-
-int dsi_panel_post_switch(struct dsi_panel *panel);
-
-void dsi_dsc_pclk_param_calc(struct msm_display_dsc_info *dsc, int intf_width);
-
-void dsi_panel_bl_handoff(struct dsi_panel *panel);
-
-struct dsi_panel *dsi_panel_ext_bridge_get(struct device *parent,
-				struct device_node *of_node,
-				int topology_override);
-
-int dsi_panel_parse_esd_reg_read_configs(struct dsi_panel *panel);
-
-void dsi_panel_ext_bridge_put(struct dsi_panel *panel);
-
-#endif /* _DSI_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_parser.c b/drivers/gpu/drm/msm/dsi-staging/dsi_parser.c
deleted file mode 100644
index 83669c6..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_parser.c
+++ /dev/null
@@ -1,1248 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[dsi-parser] %s: " fmt, __func__
-
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/firmware.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/device.h>
-
-#include "dsi_parser.h"
-
-#define DSI_PARSER_MAX_NODES 20
-
-enum dsi_parser_prop_type {
-	DSI_PROP_TYPE_STR,
-	DSI_PROP_TYPE_STR_ARRAY,
-	DSI_PROP_TYPE_INT_SET,
-	DSI_PROP_TYPE_INT_SET_ARRAY,
-	DSI_PROP_TYPE_INT_ARRAY,
-};
-
-struct dsi_parser_prop {
-	char *name;
-	char *raw;
-	char *value;
-	char **items;
-	enum dsi_parser_prop_type type;
-	int len;
-};
-
-struct dsi_parser_node {
-	char *name;
-	char *data;
-
-	struct dsi_parser_prop *prop;
-	int prop_count;
-
-	struct dsi_parser_node *child[DSI_PARSER_MAX_NODES];
-	int children_count;
-};
-
-struct dsi_parser {
-	const struct firmware *fw;
-	struct dsi_parser_node *head_node;
-	struct dsi_parser_node *current_node;
-	struct device *dev;
-	char *buf;
-	char file_name[SZ_32];
-};
-
-static int dsi_parser_count(char *buf, int item)
-{
-	int count = 0;
-
-	do {
-		buf = strnchr(buf, strlen(buf), item);
-		if (buf)
-			count++;
-	} while (buf++);
-
-	return count;
-}
-
-static char *dsi_parser_clear_tail(char *buf)
-{
-	int size = strlen(buf);
-	char *end;
-
-	if (!size)
-		goto exit;
-
-	end = buf + size - 1;
-	while (end >= buf && *end == '*')
-		end--;
-
-	*(end + 1) = '\0';
-exit:
-	return buf;
-}
-
-static char *dsi_parser_strim(char *buf)
-{
-	strreplace(buf, '*', ' ');
-
-	return strim(buf);
-}
-
-static char *dsi_parser_get_data(char *start, char *end, char *str)
-{
-	strsep(&str, start);
-	if (str)
-		return dsi_parser_clear_tail(strsep(&str, end));
-
-	return NULL;
-}
-
-static bool dsi_parser_get_tuples_data(
-		struct dsi_parser_prop *prop, char *str)
-{
-	bool middle_of_tx = false;
-
-	if (!str) {
-		pr_err("Invalid input\n");
-		return middle_of_tx;
-	}
-
-	while (str) {
-		char *out = strsep(&str, " ");
-
-		if (str || middle_of_tx) {
-			middle_of_tx = true;
-
-			prop->items[prop->len++] = dsi_parser_strim(out);
-		}
-	}
-
-	return middle_of_tx;
-}
-
-static bool dsi_parser_get_strings(struct device *dev,
-			struct dsi_parser_prop *prop, char *str)
-{
-	bool middle_of_tx = false;
-	int i = 0;
-	int count = 0;
-
-	if (!str) {
-		pr_err("Invalid input\n");
-		goto end;
-	}
-
-	if (!dsi_parser_count(str, '"'))
-		goto end;
-
-	count = dsi_parser_count(str, ',');
-	pr_debug("count=%d\n", count);
-
-	if (!count) {
-		prop->value = dsi_parser_get_data("\"", "\"", str);
-		prop->type = DSI_PROP_TYPE_STR;
-		middle_of_tx = prop->value ? true : false;
-
-		goto end;
-	}
-
-	/* number of items are 1 more than separator */
-	count++;
-	prop->items = devm_kzalloc(dev, count, GFP_KERNEL);
-	if (!prop->items)
-		goto end;
-
-	prop->type = DSI_PROP_TYPE_STR_ARRAY;
-
-	while (str) {
-		char *out = strsep(&str, ",");
-
-		if ((str || middle_of_tx) && (i < count)) {
-			prop->items[i++] =
-				dsi_parser_get_data("\"", "\"", out);
-			prop->len++;
-
-			middle_of_tx = true;
-		}
-	}
-end:
-	return middle_of_tx;
-}
-
-static bool dsi_parser_get_tuples(struct device *dev,
-			struct dsi_parser_prop *prop, char *str)
-{
-	bool middle_of_tx = false;
-	char *data = NULL;
-
-	if (!str) {
-		pr_err("Invalid input\n");
-		return middle_of_tx;
-	}
-
-	while (str) {
-		char *out = strsep(&str, ",");
-
-		if (str || middle_of_tx) {
-			data = dsi_parser_get_data("<", ">", out);
-			middle_of_tx = true;
-
-			dsi_parser_get_tuples_data(prop, data);
-		}
-	}
-
-	return middle_of_tx;
-}
-
-static void dsi_parser_get_int_value(struct dsi_parser_prop *prop,
-					int forced_base)
-{
-	int i;
-
-	for (i = 0; i < prop->len; i++) {
-		int base, val;
-		char *to_int, *tmp;
-		char item[SZ_128];
-
-		strlcpy(item, prop->items[i], SZ_128);
-
-		tmp = item;
-
-		if (forced_base) {
-			base = forced_base;
-		} else {
-			to_int = strsep(&tmp, "x");
-
-			if (!tmp) {
-				tmp = to_int;
-				base = 10;
-			} else {
-				base = 16;
-			}
-		}
-
-		if (kstrtoint(tmp, base, &val)) {
-			pr_err("error converting %s at %d\n",
-				tmp, i);
-
-			continue;
-		}
-
-		prop->value[i] = val & 0xFF;
-	}
-}
-
-static bool dsi_parser_parse_prop(struct device *dev,
-		struct dsi_parser_prop *prop, char *buf)
-{
-	bool found = false;
-	char *out = strsep(&buf, "=");
-
-	if (!out || !buf)
-		goto end;
-
-	prop->raw = devm_kzalloc(dev, strlen(buf) + 1, GFP_KERNEL);
-	if (!prop->raw)
-		goto end;
-
-	strlcpy(prop->raw, buf, strlen(buf) + 1);
-
-	found = true;
-
-	prop->name = dsi_parser_strim(out);
-	pr_debug("RAW: %s: %s\n", prop->name, prop->raw);
-
-	prop->len = 0;
-
-	if (dsi_parser_get_strings(dev, prop, buf))
-		goto end;
-
-	prop->items = devm_kzalloc(dev, strlen(buf) * 2, GFP_KERNEL);
-	if (!prop->items)
-		goto end;
-
-	if (dsi_parser_get_tuples(dev, prop, buf)) {
-		prop->value = devm_kzalloc(dev, prop->len, GFP_KERNEL);
-		if (prop->value) {
-			prop->type = DSI_PROP_TYPE_INT_SET_ARRAY;
-			dsi_parser_get_int_value(prop, 0);
-		}
-		goto end;
-	}
-
-	prop->value = dsi_parser_get_data("<", ">", buf);
-	if (prop->value) {
-		if (dsi_parser_get_tuples_data(prop, prop->value)) {
-			prop->value = devm_kzalloc(dev, prop->len, GFP_KERNEL);
-			if (prop->value) {
-				prop->type = DSI_PROP_TYPE_INT_SET;
-				dsi_parser_get_int_value(prop, 0);
-			}
-			goto end;
-		} else {
-			prop->items[prop->len++] = prop->value;
-		}
-
-		goto end;
-	}
-
-	prop->value = dsi_parser_get_data("[", "]", buf);
-	if (prop->value) {
-		char *out5;
-
-		if (!prop->items)
-			goto end;
-
-		out5 = prop->value;
-		while (out5 && strlen(out5)) {
-			char *out6 = strsep(&out5, " ");
-
-			out6 = dsi_parser_strim(out6);
-			if (out6 && strlen(out6))
-				prop->items[prop->len++] = out6;
-		}
-
-		prop->value = devm_kzalloc(dev, prop->len, GFP_KERNEL);
-		if (prop->value) {
-			prop->type = DSI_PROP_TYPE_INT_ARRAY;
-
-			dsi_parser_get_int_value(prop, 16);
-		}
-	} else {
-		found = false;
-	}
-end:
-	return found;
-}
-
-static char *dsi_parser_clean_name(char *name)
-{
-	char *clean_name = name;
-
-	if (!name) {
-		pr_err("Invalid input\n");
-		return NULL;
-	}
-
-	while (name)
-		clean_name = strsep(&name, ";");
-
-	return dsi_parser_strim(clean_name);
-}
-
-static char *dsi_parser_get_blob(char **buf, bool *has_child)
-{
-	char *data = NULL;
-	char *start = *buf;
-
-	data = strpbrk(*buf, "{}");
-	if (!data)
-		goto end;
-
-	if (*data == '{')
-		*has_child = true;
-
-	if (*has_child) {
-		while (data != *buf) {
-			data--;
-			if (*data == ';') {
-				data++;
-				*data = '\0';
-				*buf = data + 1;
-				break;
-			}
-		}
-	} else {
-		*data = '\0';
-		*buf = data + 1;
-	}
-end:
-	return start;
-}
-
-static struct dsi_parser_node *dsi_parser_find_nodes(struct device *dev,
-							char **buf)
-{
-	struct dsi_parser_node *node = NULL, *cnode = NULL;
-	char *name, *data;
-	bool has_child = false;
-
-	if (!buf || !*buf)
-		goto end;
-
-	data = strpbrk(*buf, "{}");
-	if (!data) {
-		pr_debug("{} not found\n");
-		goto end;
-	}
-
-	if (*data == '}') {
-		*buf = data + 1;
-		goto end;
-	}
-
-	name = strsep(buf, "{");
-
-	if (*buf && name) {
-		node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
-		if (!node)
-			goto end;
-
-		node->name = dsi_parser_clean_name(name);
-		node->data = dsi_parser_get_blob(buf, &has_child);
-
-		if (!has_child)
-			goto end;
-
-		do {
-			cnode = dsi_parser_find_nodes(dev, buf);
-			if (cnode &&
-			    (node->children_count < DSI_PARSER_MAX_NODES))
-				node->child[node->children_count++] = cnode;
-		} while (cnode);
-	}
-end:
-	return node;
-}
-
-static void dsi_parser_count_properties(struct dsi_parser_node *node)
-{
-	int count;
-
-	if (node && strlen(node->data)) {
-		node->prop_count = dsi_parser_count(node->data, ';');
-
-		for (count = 0; count < node->children_count; count++)
-			dsi_parser_count_properties(node->child[count]);
-	}
-}
-
-static void dsi_parser_get_properties(struct device *dev,
-		struct dsi_parser_node *node)
-{
-	int count;
-
-	if (!node)
-		return;
-
-	if (node->prop_count) {
-		int i = 0;
-		char *buf = node->data;
-
-		node->prop = devm_kcalloc(dev, node->prop_count,
-				sizeof(struct dsi_parser_prop),
-				GFP_KERNEL);
-		if (!node->prop)
-			return;
-
-		for (i = 0; i < node->prop_count; i++) {
-			char *out = strsep(&buf, ";");
-			struct dsi_parser_prop *prop = &node->prop[i];
-
-			if (!out || !prop)
-				continue;
-
-			if (!dsi_parser_parse_prop(dev, prop, out)) {
-				char *out1 = strsep(&out, "}");
-
-				if (!out1)
-					continue;
-
-				out1 = dsi_parser_strim(out1);
-
-				if (!out && strlen(out1)) {
-					prop->name = out1;
-					prop->value = "1";
-				}
-			}
-		}
-	}
-
-	for (count = 0; count < node->children_count; count++)
-		dsi_parser_get_properties(dev, node->child[count]);
-}
-
-static struct dsi_parser_prop *dsi_parser_search_property(
-			struct dsi_parser_node *node,
-			const char *name)
-{
-	int i = 0;
-	struct dsi_parser_prop *prop = node->prop;
-
-	for (i = 0; i < node->prop_count; i++) {
-		if (prop[i].name && !strcmp(prop[i].name, name))
-			return &prop[i];
-	}
-
-	return NULL;
-}
-
-/* APIs for the clients */
-struct property *dsi_parser_find_property(const struct device_node *np,
-				  const char *name,
-				  int *lenp)
-{
-	struct dsi_parser_node *node = (struct dsi_parser_node *)np;
-	struct dsi_parser_prop *prop = NULL;
-
-	if (!node || !name || !lenp)
-		goto end;
-
-	prop = dsi_parser_search_property(node, name);
-	if (!prop) {
-		pr_debug("%s not found\n", name);
-		goto end;
-	}
-
-	if (lenp) {
-		if (prop->type == DSI_PROP_TYPE_INT_ARRAY)
-			*lenp = prop->len;
-		else if (prop->type == DSI_PROP_TYPE_INT_SET_ARRAY ||
-			 prop->type == DSI_PROP_TYPE_INT_SET)
-			*lenp = prop->len * sizeof(u32);
-		else
-			*lenp = strlen(prop->raw) + 1;
-
-		pr_debug("%s len=%d\n", name, *lenp);
-	}
-end:
-	return (struct property *)prop;
-}
-
-bool dsi_parser_read_bool(const struct device_node *np,
-			const char *propname)
-{
-	struct dsi_parser_node *node = (struct dsi_parser_node *)np;
-	bool prop_set;
-
-	prop_set = dsi_parser_search_property(node, propname) ? true : false;
-
-	pr_debug("%s=%s\n", propname, prop_set ? "set" : "not set");
-
-	return prop_set;
-}
-
-int dsi_parser_read_string(const struct device_node *np,
-			const char *propname, const char **out_string)
-{
-	struct dsi_parser_node *node = (struct dsi_parser_node *)np;
-	struct dsi_parser_prop *prop;
-	char *property = NULL;
-	int rc = 0;
-
-	prop = dsi_parser_search_property(node, propname);
-	if (!prop) {
-		pr_debug("%s not found\n", propname);
-		rc = -EINVAL;
-	} else {
-		property = prop->value;
-	}
-
-	*out_string = property;
-
-	pr_debug("%s=%s\n", propname, *out_string);
-	return rc;
-}
-
-int dsi_parser_read_u64(const struct device_node *np, const char *propname,
-			 u64 *out_value)
-{
-	return -EINVAL;
-}
-
-int dsi_parser_read_u32(const struct device_node *np,
-			const char *propname, u32 *out_value)
-{
-	struct dsi_parser_node *node = (struct dsi_parser_node *)np;
-	struct dsi_parser_prop *prop;
-	char *property, *to_int, item[SZ_128];
-	int rc = 0, base;
-
-	prop = dsi_parser_search_property(node, propname);
-	if (!prop) {
-		pr_debug("%s not found\n", propname);
-		rc = -EINVAL;
-		goto end;
-	}
-
-	if (!prop->value)
-		goto end;
-
-	strlcpy(item, prop->value, SZ_128);
-	property = item;
-	to_int = strsep(&property, "x");
-
-	if (!property) {
-		property = to_int;
-		base = 10;
-	} else {
-		base = 16;
-	}
-
-	rc = kstrtoint(property, base, out_value);
-	if (rc) {
-		pr_err("prop=%s error(%d) converting %s, base=%d\n",
-			propname, rc, property, base);
-		goto end;
-	}
-
-	pr_debug("%s=%d\n", propname, *out_value);
-end:
-	return rc;
-}
-
-int dsi_parser_read_u32_array(const struct device_node *np,
-			      const char *propname,
-			      u32 *out_values, size_t sz)
-{
-	int i, rc = 0;
-	struct dsi_parser_node *node = (struct dsi_parser_node *)np;
-	struct dsi_parser_prop *prop;
-
-	prop = dsi_parser_search_property(node, propname);
-	if (!prop) {
-		pr_debug("%s not found\n", propname);
-		rc = -EINVAL;
-		goto end;
-	}
-
-	for (i = 0; i < prop->len; i++) {
-		int base, val;
-		char item[SZ_128];
-		char *to_int, *tmp;
-
-		strlcpy(item, prop->items[i], SZ_128);
-
-		tmp = item;
-
-		to_int = strsep(&tmp, "x");
-
-		if (!tmp) {
-			tmp = to_int;
-			base = 10;
-		} else {
-			base = 16;
-		}
-
-		rc = kstrtoint(tmp, base, &val);
-		if (rc) {
-			pr_err("prop=%s error(%d) converting %s(%d), base=%d\n",
-				propname, rc, tmp, i, base);
-			continue;
-		}
-
-		*out_values++ = val;
-
-		pr_debug("%s: [%d]=%d\n", propname, i, *(out_values - 1));
-	}
-end:
-	return rc;
-}
-
-const void *dsi_parser_get_property(const struct device_node *np,
-			const char *name, int *lenp)
-{
-	struct dsi_parser_node *node = (struct dsi_parser_node *)np;
-	struct dsi_parser_prop *prop;
-	char *property = NULL;
-
-	prop = dsi_parser_search_property(node, name);
-	if (!prop) {
-		pr_debug("%s not found\n", name);
-		goto end;
-	}
-
-	property = prop->value;
-
-	if (prop->type == DSI_PROP_TYPE_STR)
-		pr_debug("%s=%s\n", name, property);
-
-	if (lenp) {
-		if (prop->type == DSI_PROP_TYPE_INT_ARRAY)
-			*lenp = prop->len;
-		else if (prop->type == DSI_PROP_TYPE_INT_SET_ARRAY ||
-			 prop->type == DSI_PROP_TYPE_INT_SET)
-			*lenp = prop->len * sizeof(u32);
-		else
-			*lenp = strlen(prop->raw) + 1;
-
-		pr_debug("%s len=%d\n", name, *lenp);
-	}
-end:
-	return property;
-}
-
-struct device_node *dsi_parser_get_child_by_name(const struct device_node *np,
-				const char *name)
-{
-	int index = 0;
-	struct dsi_parser_node *node = (struct dsi_parser_node *)np;
-	struct dsi_parser_node *matched_node = NULL;
-
-	if (!node || !node->children_count)
-		goto end;
-
-	do {
-		struct dsi_parser_node *child_node = node->child[index++];
-
-		if (!child_node)
-			goto end;
-
-		if (!strcmp(child_node->name, name)) {
-			matched_node = child_node;
-			break;
-		}
-	} while (index < node->children_count);
-end:
-	pr_debug("%s: %s\n", name, matched_node ? "found" : "not found");
-
-	return (struct device_node *)matched_node;
-}
-
-struct dsi_parser_node *dsi_parser_get_node_by_name(
-				struct dsi_parser_node *node,
-				char *name)
-{
-	int count = 0;
-	struct dsi_parser_node *matched_node = NULL;
-
-	if (!node) {
-		pr_err("node is null\n");
-		goto end;
-	}
-
-	if (!strcmp(node->name, name)) {
-		matched_node = node;
-		goto end;
-	}
-
-	for (count = 0; count < node->children_count; count++) {
-		matched_node = dsi_parser_get_node_by_name(
-				node->child[count], name);
-		if (matched_node)
-			break;
-	}
-end:
-	pr_debug("%s: %s\n", name, matched_node ? "found" : "not found");
-
-	return matched_node;
-}
-
-int dsi_parser_get_child_count(const struct device_node *np)
-{
-	struct dsi_parser_node *node = (struct dsi_parser_node *)np;
-	int count = 0;
-
-	if (node) {
-		count = node->children_count;
-		pr_debug("node %s child count=%d\n", node->name, count);
-	}
-
-	return count;
-}
-
-struct device_node *dsi_parser_get_next_child(const struct device_node *np,
-	struct device_node *prev)
-{
-	int index = 0;
-	struct dsi_parser_node *parent = (struct dsi_parser_node *)np;
-	struct dsi_parser_node *prev_child = (struct dsi_parser_node *)prev;
-	struct dsi_parser_node *matched_node = NULL;
-
-	if (!parent || !parent->children_count)
-		goto end;
-
-	do {
-		struct dsi_parser_node *child_node = parent->child[index++];
-
-		if (!child_node)
-			goto end;
-
-		if (!prev) {
-			matched_node = child_node;
-			goto end;
-		}
-
-		if (!strcmp(child_node->name, prev_child->name)) {
-			if (index < parent->children_count)
-				matched_node = parent->child[index];
-			break;
-		}
-	} while (index < parent->children_count);
-end:
-	if (matched_node)
-		pr_debug("next child: %s\n", matched_node->name);
-
-	return (struct device_node *)matched_node;
-}
-
-int dsi_parser_count_u32_elems(const struct device_node *np,
-				const char *propname)
-{
-	int count = 0;
-	struct dsi_parser_node *node = (struct dsi_parser_node *)np;
-	struct dsi_parser_prop *prop;
-
-	prop = dsi_parser_search_property(node, propname);
-	if (!prop) {
-		pr_debug("%s not found\n", propname);
-		goto end;
-	}
-
-	count = prop->len;
-
-	pr_debug("prop %s has %d items\n", prop->name, count);
-end:
-	return count;
-}
-
-int dsi_parser_count_strings(const struct device_node *np,
-			    const char *propname)
-{
-	int count = 0;
-	struct dsi_parser_node *node = (struct dsi_parser_node *)np;
-	struct dsi_parser_prop *prop;
-
-	prop = dsi_parser_search_property(node, propname);
-	if (!prop) {
-		pr_debug("%s not found\n", propname);
-		goto end;
-	}
-
-	if (prop->type == DSI_PROP_TYPE_STR_ARRAY)
-		count = prop->len;
-	else if (prop->type == DSI_PROP_TYPE_STR)
-		count = 1;
-
-	pr_debug("prop %s has %d items\n", prop->name, count);
-end:
-	return count;
-}
-
-int dsi_parser_read_string_index(const struct device_node *np,
-				const char *propname,
-				int index, const char **output)
-{
-	struct dsi_parser_node *node = (struct dsi_parser_node *)np;
-	struct dsi_parser_prop *prop;
-
-	prop = dsi_parser_search_property(node, propname);
-	if (!prop) {
-		pr_debug("%s not found\n", propname);
-		goto end;
-	}
-
-	if (prop->type != DSI_PROP_TYPE_STR_ARRAY) {
-		pr_err("not a string array property\n");
-		goto end;
-	}
-
-	if (index >= prop->len) {
-		pr_err("out of bond index %d\n", index);
-		goto end;
-	}
-
-	*output = prop->items[index];
-
-	return 0;
-end:
-	return -EINVAL;
-}
-
-int dsi_parser_get_named_gpio(struct device_node *np,
-				const char *propname, int index)
-{
-	int gpio = -EINVAL;
-
-	dsi_parser_read_u32(np, propname, &gpio);
-
-	return gpio;
-}
-
-void *dsi_parser_get_head_node(void *in,
-				const u8 *data, u32 size)
-{
-	struct dsi_parser *parser = in;
-	char *buf;
-
-	if (!parser || !data || !size) {
-		pr_err("invalid input\n");
-		goto err;
-	}
-
-	parser->buf = devm_kzalloc(parser->dev, size, GFP_KERNEL);
-	if (!parser->buf)
-		goto err;
-
-	buf = parser->buf;
-
-	memcpy(buf, data, size);
-
-	strreplace(buf, '\n', ' ');
-	strreplace(buf, '\t', '*');
-
-	parser->head_node = dsi_parser_find_nodes(parser->dev, &buf);
-	if (!parser->head_node) {
-		pr_err("could not get head node\n");
-		devm_kfree(parser->dev, parser->buf);
-		goto err;
-	}
-
-	dsi_parser_count_properties(parser->head_node);
-	dsi_parser_get_properties(parser->dev, parser->head_node);
-
-	parser->current_node = parser->head_node;
-
-	return parser->head_node;
-err:
-	return NULL;
-}
-
-static int dsi_parser_read_file(struct dsi_parser *parser,
-				const u8 **buf, u32 *size)
-{
-	int rc = 0;
-
-	release_firmware(parser->fw);
-
-	rc = request_firmware(&parser->fw, parser->file_name, parser->dev);
-	if (rc || !parser->fw) {
-		pr_err("couldn't read firmware\n");
-		goto end;
-	}
-
-	*buf = parser->fw->data;
-	*size = parser->fw->size;
-
-	pr_debug("file %s: size %zd\n",
-		parser->file_name, parser->fw->size);
-end:
-	return rc;
-}
-
-static void dsi_parser_free_mem(struct device *dev,
-				struct dsi_parser_node *node)
-{
-	int i = 0;
-
-	if (!node)
-		return;
-
-	pr_debug("node=%s, prop_count=%d\n", node->name, node->prop_count);
-
-	for (i = 0; i < node->prop_count; i++) {
-		struct dsi_parser_prop *prop = &node->prop[i];
-
-		if (!prop)
-			continue;
-
-		pr_debug("deleting prop=%s\n", prop->name);
-
-		if (prop->items)
-			devm_kfree(dev, prop->items);
-
-		if (prop->raw)
-			devm_kfree(dev, prop->raw);
-
-		if ((prop->type == DSI_PROP_TYPE_INT_SET_ARRAY ||
-		     prop->type == DSI_PROP_TYPE_INT_SET ||
-		     prop->type == DSI_PROP_TYPE_INT_ARRAY) && prop->value)
-			devm_kfree(dev, prop->value);
-	}
-
-	if (node->prop)
-		devm_kfree(dev, node->prop);
-
-	for (i = 0; i < node->children_count; i++)
-		dsi_parser_free_mem(dev, node->child[i]);
-
-	devm_kfree(dev, node);
-}
-
-static ssize_t dsi_parser_write_init(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dsi_parser *parser = file->private_data;
-	const u8 *data = NULL;
-	u32 size = 0;
-	char buf[SZ_32];
-	size_t len = 0;
-
-	if (!parser)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	/* Leave room for termination char */
-	len = min_t(size_t, count, SZ_32 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		goto end;
-
-	buf[len] = '\0';
-
-	if (sscanf(buf, "%31s", parser->file_name) != 1) {
-		pr_err("failed to get val\n");
-		goto end;
-	}
-
-	if (dsi_parser_read_file(parser, &data, &size)) {
-		pr_err("failed to read file\n");
-		goto end;
-	}
-
-	dsi_parser_free_mem(parser->dev, parser->head_node);
-
-	if (parser->buf) {
-		devm_kfree(parser->dev, parser->buf);
-		parser->buf = NULL;
-	}
-
-	parser->head_node = dsi_parser_get_head_node(parser, data, size);
-	if (!parser->head_node) {
-		pr_err("failed to parse data\n");
-		goto end;
-	}
-end:
-	return len;
-}
-
-static ssize_t dsi_parser_read_node(struct file *file,
-		char __user *user_buff, size_t count, loff_t *ppos)
-{
-	char *buf = NULL;
-	int i, j, len = 0, max_size = SZ_4K;
-	struct dsi_parser *parser = file->private_data;
-	struct dsi_parser_node *node;
-	struct dsi_parser_prop *prop;
-
-	if (!parser)
-		return -ENODEV;
-
-	if (*ppos)
-		return len;
-
-	buf = kzalloc(SZ_4K, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	node = parser->current_node;
-	if (!node) {
-		len = -EINVAL;
-		goto error;
-	}
-
-	prop = node->prop;
-
-	len += scnprintf(buf + len, max_size - len, "node name=%s\n",
-		node->name);
-	if (len == max_size)
-		goto buffer_overflow;
-
-	len += scnprintf(buf + len, max_size - len, "children count=%d\n",
-		node->children_count);
-	if (len == max_size)
-		goto buffer_overflow;
-
-	for (i = 0; i < node->children_count; i++) {
-		len += scnprintf(buf + len, max_size - len, "child[%d]=%s\n",
-			i, node->child[i]->name);
-		if (len == max_size)
-			goto buffer_overflow;
-	}
-
-	for (i = 0; i < node->prop_count; i++) {
-		if (!prop[i].name)
-			continue;
-
-		len += scnprintf(buf + len, max_size - len,
-			"property=%s\n", prop[i].name);
-		if (len == max_size)
-			goto buffer_overflow;
-
-		if (prop[i].value) {
-			if (prop[i].type == DSI_PROP_TYPE_STR) {
-				len += scnprintf(buf + len, max_size - len,
-					"value=%s\n", prop[i].value);
-				if (len == max_size)
-					goto buffer_overflow;
-			} else {
-				for (j = 0; j < prop[i].len; j++) {
-					len += scnprintf(buf + len,
-						max_size - len,
-						"%x", prop[i].value[j]);
-					if (len == max_size)
-						goto buffer_overflow;
-				}
-
-				len += scnprintf(buf + len, max_size - len,
-						"\n");
-				if (len == max_size)
-					goto buffer_overflow;
-
-			}
-		}
-
-		if (prop[i].len) {
-			len += scnprintf(buf + len, max_size - len, "items:\n");
-			if (len == max_size)
-				goto buffer_overflow;
-		}
-
-		for (j = 0; j < prop[i].len; j++) {
-			char delim;
-
-			if (j && !(j % 10))
-				delim = '\n';
-			else
-				delim = ' ';
-
-			len += scnprintf(buf + len, max_size - len, "%s%c",
-				prop[i].items[j], delim);
-			if (len == max_size)
-				goto buffer_overflow;
-		}
-
-		len += scnprintf(buf + len, max_size - len, "\n\n");
-		if (len == max_size)
-			goto buffer_overflow;
-	}
-buffer_overflow:
-	if (simple_read_from_buffer(user_buff, count, ppos, buf, len)) {
-		len = -EFAULT;
-		goto error;
-	}
-error:
-	kfree(buf);
-
-	return len;
-}
-
-static ssize_t dsi_parser_write_node(struct file *file,
-		const char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct dsi_parser *parser = file->private_data;
-	char buf[SZ_512];
-	size_t len = 0;
-
-	if (!parser)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;
-
-	/* Leave room for termination char */
-	len = min_t(size_t, count, SZ_512 - 1);
-	if (copy_from_user(buf, user_buff, len))
-		goto end;
-
-	buf[len] = '\0';
-
-	strreplace(buf, '\n', ' ');
-
-	if (!strcmp(strim(buf), "head_node"))
-		parser->current_node = parser->head_node;
-	else
-		parser->current_node = dsi_parser_get_node_by_name(
-					parser->head_node, strim(buf));
-end:
-	return len;
-}
-
-static const struct file_operations dsi_parser_init_fops = {
-	.open = simple_open,
-	.write = dsi_parser_write_init,
-};
-
-static const struct file_operations dsi_parser_node_fops = {
-	.open = simple_open,
-	.read = dsi_parser_read_node,
-	.write = dsi_parser_write_node,
-};
-
-int dsi_parser_dbg_init(void *parser, struct dentry *parent_dir)
-{
-	int rc = 0;
-	struct dentry *dir, *file;
-
-	if (!parser || !parent_dir) {
-		pr_err("invalid input\n");
-		goto end;
-	}
-
-	dir = debugfs_create_dir("parser", parent_dir);
-	if (IS_ERR_OR_NULL(dir)) {
-		rc = PTR_ERR(dir);
-
-		pr_err("failed to create parser debugfs\n");
-		goto end;
-	}
-
-	file = debugfs_create_file("init", 0644, dir,
-		parser, &dsi_parser_init_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-
-		pr_err("failed to create init debugfs\n");
-		goto dbg;
-	}
-
-	file = debugfs_create_file("node", 0644, dir,
-		parser, &dsi_parser_node_fops);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-
-		pr_err("failed to create init debugfs\n");
-		goto dbg;
-	}
-
-	pr_debug("success\n");
-	return 0;
-dbg:
-	debugfs_remove_recursive(dir);
-end:
-	return rc;
-}
-
-void *dsi_parser_get(struct device *dev)
-{
-	int rc = 0;
-	struct dsi_parser *parser = NULL;
-
-	if (!dev) {
-		pr_err("invalid data\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	parser = devm_kzalloc(dev, sizeof(*parser), GFP_KERNEL);
-	if (!parser) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	parser->dev = dev;
-
-	strlcpy(parser->file_name, "dsi_prop", sizeof(parser->file_name));
-
-	return parser;
-end:
-	return ERR_PTR(rc);
-}
-
-void dsi_parser_put(void *data)
-{
-	struct dsi_parser *parser = data;
-
-	if (!parser)
-		return;
-
-	dsi_parser_free_mem(parser->dev, parser->head_node);
-
-	devm_kfree(parser->dev, parser->buf);
-	devm_kfree(parser->dev, parser);
-}
-
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_parser.h b/drivers/gpu/drm/msm/dsi-staging/dsi_parser.h
deleted file mode 100644
index 28cffa0a..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_parser.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_PARSER_H_
-#define _DSI_PARSER_H_
-
-#include <linux/of.h>
-#include <linux/of_gpio.h>
-
-#ifdef CONFIG_DSI_PARSER
-void *dsi_parser_get(struct device *dev);
-void dsi_parser_put(void *data);
-int dsi_parser_dbg_init(void *parser, struct dentry *dir);
-void *dsi_parser_get_head_node(void *parser,
-		const u8 *data, u32 size);
-
-const void *dsi_parser_get_property(const struct device_node *np,
-			const char *name, int *lenp);
-bool dsi_parser_read_bool(const struct device_node *np,
-			const char *propname);
-int dsi_parser_read_u64(const struct device_node *np, const char *propname,
-			 u64 *out_value);
-int dsi_parser_read_u32(const struct device_node *np,
-			const char *propname, u32 *out_value);
-int dsi_parser_read_u32_array(const struct device_node *np,
-			const char *propname,
-			u32 *out_values, size_t sz);
-int dsi_parser_read_string(const struct device_node *np,
-			const char *propname, const char **out_string);
-struct device_node *dsi_parser_get_child_by_name(const struct device_node *node,
-				const char *name);
-int dsi_parser_get_child_count(const struct device_node *np);
-struct property *dsi_parser_find_property(const struct device_node *np,
-			const char *name, int *lenp);
-struct device_node *dsi_parser_get_next_child(const struct device_node *np,
-	struct device_node *prev);
-int dsi_parser_count_u32_elems(const struct device_node *np,
-				const char *propname);
-int dsi_parser_count_strings(const struct device_node *np,
-			    const char *propname);
-int dsi_parser_read_string_index(const struct device_node *np,
-				const char *propname,
-				int index, const char **output);
-int dsi_parser_get_named_gpio(struct device_node *np,
-				const char *propname, int index);
-#else /* CONFIG_DSI_PARSER */
-static inline void *dsi_parser_get(struct device *dev)
-{
-	return NULL;
-}
-
-static inline void dsi_parser_put(void *data)
-{
-}
-
-static inline int dsi_parser_dbg_init(void *parser, struct dentry *dir)
-{
-	return -ENODEV;
-}
-
-static inline void *dsi_parser_get_head_node(void *parser,
-		const u8 *data, u32 size)
-{
-	return NULL;
-}
-
-static inline const void *dsi_parser_get_property(const struct device_node *np,
-			const char *name, int *lenp)
-{
-	return NULL;
-}
-
-static inline bool dsi_parser_read_bool(const struct device_node *np,
-			const char *propname)
-{
-	return false;
-}
-
-static inline int dsi_parser_read_u64(const struct device_node *np,
-			const char *propname, u64 *out_value)
-{
-	return -ENODEV;
-}
-
-static inline int dsi_parser_read_u32(const struct device_node *np,
-			const char *propname, u32 *out_value)
-{
-	return -ENODEV;
-}
-
-static inline int dsi_parser_read_u32_array(const struct device_node *np,
-			const char *propname, u32 *out_values, size_t sz)
-{
-	return -ENODEV;
-}
-
-static inline int dsi_parser_read_string(const struct device_node *np,
-			const char *propname, const char **out_string)
-{
-	return -ENODEV;
-}
-
-static inline struct device_node *dsi_parser_get_child_by_name(
-				const struct device_node *node,
-				const char *name)
-{
-	return NULL;
-}
-
-static inline int dsi_parser_get_child_count(const struct device_node *np)
-{
-	return -ENODEV;
-}
-
-static inline struct property *dsi_parser_find_property(
-			const struct device_node *np,
-			const char *name, int *lenp)
-{
-	return NULL;
-}
-
-static inline struct device_node *dsi_parser_get_next_child(
-				const struct device_node *np,
-				struct device_node *prev)
-{
-	return NULL;
-}
-
-static inline int dsi_parser_count_u32_elems(const struct device_node *np,
-				const char *propname)
-{
-	return -ENODEV;
-}
-
-static inline int dsi_parser_count_strings(const struct device_node *np,
-			    const char *propname)
-{
-	return -ENODEV;
-}
-
-static inline int dsi_parser_read_string_index(const struct device_node *np,
-				const char *propname,
-				int index, const char **output)
-{
-	return -ENODEV;
-}
-
-static inline int dsi_parser_get_named_gpio(struct device_node *np,
-				const char *propname, int index)
-{
-	return -ENODEV;
-}
-
-#endif /* CONFIG_DSI_PARSER */
-
-#define dsi_for_each_child_node(parent, child) \
-	for (child = utils->get_next_child(parent, NULL); \
-	     child != NULL; \
-	     child = utils->get_next_child(parent, child))
-
-struct dsi_parser_utils {
-	void *data;
-	struct device_node *node;
-
-	const void *(*get_property)(const struct device_node *np,
-			const char *name, int *lenp);
-	int (*read_u64)(const struct device_node *np,
-			const char *propname, u64 *out_value);
-	int (*read_u32)(const struct device_node *np,
-			const char *propname, u32 *out_value);
-	bool (*read_bool)(const struct device_node *np,
-			 const char *propname);
-	int (*read_u32_array)(const struct device_node *np,
-			const char *propname, u32 *out_values, size_t sz);
-	int (*read_string)(const struct device_node *np, const char *propname,
-				const char **out_string);
-	struct device_node *(*get_child_by_name)(
-				const struct device_node *node,
-				const char *name);
-	int (*get_child_count)(const struct device_node *np);
-	struct property *(*find_property)(const struct device_node *np,
-			const char *name, int *lenp);
-	struct device_node *(*get_next_child)(const struct device_node *np,
-		struct device_node *prev);
-	int (*count_u32_elems)(const struct device_node *np,
-		const char *propname);
-	int (*get_named_gpio)(struct device_node *np,
-				const char *propname, int index);
-	int (*get_available_child_count)(const struct device_node *np);
-};
-
-static inline struct dsi_parser_utils *dsi_parser_get_of_utils(void)
-{
-	static struct dsi_parser_utils of_utils = {
-		.get_property = of_get_property,
-		.read_bool = of_property_read_bool,
-		.read_u64 = of_property_read_u64,
-		.read_u32 = of_property_read_u32,
-		.read_u32_array = of_property_read_u32_array,
-		.read_string = of_property_read_string,
-		.get_child_by_name = of_get_child_by_name,
-		.get_child_count = of_get_child_count,
-		.get_available_child_count = of_get_available_child_count,
-		.find_property = of_find_property,
-		.get_next_child = of_get_next_child,
-		.count_u32_elems = of_property_count_u32_elems,
-		.get_named_gpio = of_get_named_gpio,
-	};
-
-	return &of_utils;
-}
-
-static inline struct dsi_parser_utils *dsi_parser_get_parser_utils(void)
-{
-	static struct dsi_parser_utils parser_utils = {
-		.get_property = dsi_parser_get_property,
-		.read_bool = dsi_parser_read_bool,
-		.read_u64 = dsi_parser_read_u64,
-		.read_u32 = dsi_parser_read_u32,
-		.read_u32_array = dsi_parser_read_u32_array,
-		.read_string = dsi_parser_read_string,
-		.get_child_by_name = dsi_parser_get_child_by_name,
-		.get_child_count = dsi_parser_get_child_count,
-		.get_available_child_count = dsi_parser_get_child_count,
-		.find_property = dsi_parser_find_property,
-		.get_next_child = dsi_parser_get_next_child,
-		.count_u32_elems = dsi_parser_count_u32_elems,
-		.get_named_gpio = dsi_parser_get_named_gpio,
-	};
-
-	return &parser_utils;
-}
-#endif
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
deleted file mode 100644
index 9a1ccc1..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ /dev/null
@@ -1,1131 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"msm-dsi-phy:[%s] " fmt, __func__
-
-#include <linux/of_device.h>
-#include <linux/err.h>
-#include <linux/regulator/consumer.h>
-#include <linux/clk.h>
-#include <linux/msm-bus.h>
-#include <linux/list.h>
-
-#include "msm_drv.h"
-#include "msm_kms.h"
-#include "msm_gpu.h"
-#include "dsi_phy.h"
-#include "dsi_phy_hw.h"
-#include "dsi_clk.h"
-#include "dsi_pwr.h"
-#include "dsi_catalog.h"
-
-#include "sde_dbg.h"
-
-#define DSI_PHY_DEFAULT_LABEL "MDSS PHY CTRL"
-
-#define BITS_PER_BYTE	8
-
-struct dsi_phy_list_item {
-	struct msm_dsi_phy *phy;
-	struct list_head list;
-};
-
-static LIST_HEAD(dsi_phy_list);
-static DEFINE_MUTEX(dsi_phy_list_lock);
-
-static const struct dsi_ver_spec_info dsi_phy_v0_0_hpm = {
-	.version = DSI_PHY_VERSION_0_0_HPM,
-	.lane_cfg_count = 4,
-	.strength_cfg_count = 2,
-	.regulator_cfg_count = 1,
-	.timing_cfg_count = 8,
-};
-static const struct dsi_ver_spec_info dsi_phy_v0_0_lpm = {
-	.version = DSI_PHY_VERSION_0_0_LPM,
-	.lane_cfg_count = 4,
-	.strength_cfg_count = 2,
-	.regulator_cfg_count = 1,
-	.timing_cfg_count = 8,
-};
-static const struct dsi_ver_spec_info dsi_phy_v1_0 = {
-	.version = DSI_PHY_VERSION_1_0,
-	.lane_cfg_count = 4,
-	.strength_cfg_count = 2,
-	.regulator_cfg_count = 1,
-	.timing_cfg_count = 8,
-};
-static const struct dsi_ver_spec_info dsi_phy_v2_0 = {
-	.version = DSI_PHY_VERSION_2_0,
-	.lane_cfg_count = 4,
-	.strength_cfg_count = 2,
-	.regulator_cfg_count = 1,
-	.timing_cfg_count = 8,
-};
-static const struct dsi_ver_spec_info dsi_phy_v3_0 = {
-	.version = DSI_PHY_VERSION_3_0,
-	.lane_cfg_count = 4,
-	.strength_cfg_count = 2,
-	.regulator_cfg_count = 0,
-	.timing_cfg_count = 12,
-};
-
-static const struct dsi_ver_spec_info dsi_phy_v4_0 = {
-	.version = DSI_PHY_VERSION_4_0,
-	.lane_cfg_count = 4,
-	.strength_cfg_count = 2,
-	.regulator_cfg_count = 0,
-	.timing_cfg_count = 14,
-};
-
-static const struct dsi_ver_spec_info dsi_phy_v4_1 = {
-	.version = DSI_PHY_VERSION_4_1,
-	.lane_cfg_count = 4,
-	.strength_cfg_count = 2,
-	.regulator_cfg_count = 0,
-	.timing_cfg_count = 14,
-};
-
-static const struct of_device_id msm_dsi_phy_of_match[] = {
-	{ .compatible = "qcom,dsi-phy-v0.0-hpm",
-	  .data = &dsi_phy_v0_0_hpm,},
-	{ .compatible = "qcom,dsi-phy-v0.0-lpm",
-	  .data = &dsi_phy_v0_0_lpm,},
-	{ .compatible = "qcom,dsi-phy-v1.0",
-	  .data = &dsi_phy_v1_0,},
-	{ .compatible = "qcom,dsi-phy-v2.0",
-	  .data = &dsi_phy_v2_0,},
-	{ .compatible = "qcom,dsi-phy-v3.0",
-	  .data = &dsi_phy_v3_0,},
-	{ .compatible = "qcom,dsi-phy-v4.0",
-	  .data = &dsi_phy_v4_0,},
-	{ .compatible = "qcom,dsi-phy-v4.1",
-	  .data = &dsi_phy_v4_1,},
-	{}
-};
-
-static int dsi_phy_regmap_init(struct platform_device *pdev,
-			       struct msm_dsi_phy *phy)
-{
-	int rc = 0;
-	void __iomem *ptr;
-
-	ptr = msm_ioremap(pdev, "dsi_phy", phy->name);
-	if (IS_ERR(ptr)) {
-		rc = PTR_ERR(ptr);
-		return rc;
-	}
-
-	phy->hw.base = ptr;
-
-	pr_debug("[%s] map dsi_phy registers to %pK\n",
-		phy->name, phy->hw.base);
-
-	switch (phy->ver_info->version) {
-	case DSI_PHY_VERSION_2_0:
-		ptr = msm_ioremap(pdev, "phy_clamp_base", phy->name);
-		if (IS_ERR(ptr))
-			phy->hw.phy_clamp_base = NULL;
-		else
-			phy->hw.phy_clamp_base = ptr;
-		break;
-	default:
-		break;
-	}
-
-	return rc;
-}
-
-static int dsi_phy_regmap_deinit(struct msm_dsi_phy *phy)
-{
-	pr_debug("[%s] unmap registers\n", phy->name);
-	return 0;
-}
-
-static int dsi_phy_supplies_init(struct platform_device *pdev,
-				 struct msm_dsi_phy *phy)
-{
-	int rc = 0;
-	int i = 0;
-	struct dsi_regulator_info *regs;
-	struct regulator *vreg = NULL;
-
-	regs = &phy->pwr_info.digital;
-	regs->vregs = devm_kzalloc(&pdev->dev, sizeof(struct dsi_vreg),
-				   GFP_KERNEL);
-	if (!regs->vregs)
-		goto error;
-
-	regs->count = 1;
-	snprintf(regs->vregs->vreg_name,
-		 ARRAY_SIZE(regs->vregs[i].vreg_name),
-		 "%s", "gdsc");
-
-	rc = dsi_pwr_get_dt_vreg_data(&pdev->dev,
-					  &phy->pwr_info.phy_pwr,
-					  "qcom,phy-supply-entries");
-	if (rc) {
-		pr_err("failed to get host power supplies, rc = %d\n", rc);
-		goto error_digital;
-	}
-
-	regs = &phy->pwr_info.digital;
-	for (i = 0; i < regs->count; i++) {
-		vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
-		rc = PTR_RET(vreg);
-		if (rc) {
-			pr_err("failed to get %s regulator\n",
-			       regs->vregs[i].vreg_name);
-			goto error_host_pwr;
-		}
-		regs->vregs[i].vreg = vreg;
-	}
-
-	regs = &phy->pwr_info.phy_pwr;
-	for (i = 0; i < regs->count; i++) {
-		vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
-		rc = PTR_RET(vreg);
-		if (rc) {
-			pr_err("failed to get %s regulator\n",
-			       regs->vregs[i].vreg_name);
-			for (--i; i >= 0; i--)
-				devm_regulator_put(regs->vregs[i].vreg);
-			goto error_digital_put;
-		}
-		regs->vregs[i].vreg = vreg;
-	}
-
-	return rc;
-
-error_digital_put:
-	regs = &phy->pwr_info.digital;
-	for (i = 0; i < regs->count; i++)
-		devm_regulator_put(regs->vregs[i].vreg);
-error_host_pwr:
-	devm_kfree(&pdev->dev, phy->pwr_info.phy_pwr.vregs);
-	phy->pwr_info.phy_pwr.vregs = NULL;
-	phy->pwr_info.phy_pwr.count = 0;
-error_digital:
-	devm_kfree(&pdev->dev, phy->pwr_info.digital.vregs);
-	phy->pwr_info.digital.vregs = NULL;
-	phy->pwr_info.digital.count = 0;
-error:
-	return rc;
-}
-
-static int dsi_phy_supplies_deinit(struct msm_dsi_phy *phy)
-{
-	int i = 0;
-	int rc = 0;
-	struct dsi_regulator_info *regs;
-
-	regs = &phy->pwr_info.digital;
-	for (i = 0; i < regs->count; i++) {
-		if (!regs->vregs[i].vreg)
-			pr_err("vreg is NULL, should not reach here\n");
-		else
-			devm_regulator_put(regs->vregs[i].vreg);
-	}
-
-	regs = &phy->pwr_info.phy_pwr;
-	for (i = 0; i < regs->count; i++) {
-		if (!regs->vregs[i].vreg)
-			pr_err("vreg is NULL, should not reach here\n");
-		else
-			devm_regulator_put(regs->vregs[i].vreg);
-	}
-
-	if (phy->pwr_info.phy_pwr.vregs) {
-		devm_kfree(&phy->pdev->dev, phy->pwr_info.phy_pwr.vregs);
-		phy->pwr_info.phy_pwr.vregs = NULL;
-		phy->pwr_info.phy_pwr.count = 0;
-	}
-	if (phy->pwr_info.digital.vregs) {
-		devm_kfree(&phy->pdev->dev, phy->pwr_info.digital.vregs);
-		phy->pwr_info.digital.vregs = NULL;
-		phy->pwr_info.digital.count = 0;
-	}
-
-	return rc;
-}
-
-static int dsi_phy_parse_dt_per_lane_cfgs(struct platform_device *pdev,
-					  struct dsi_phy_per_lane_cfgs *cfg,
-					  char *property)
-{
-	int rc = 0, i = 0, j = 0;
-	const u8 *data;
-	u32 len = 0;
-
-	data = of_get_property(pdev->dev.of_node, property, &len);
-	if (!data) {
-		pr_err("Unable to read Phy %s settings\n", property);
-		return -EINVAL;
-	}
-
-	if (len != DSI_LANE_MAX * cfg->count_per_lane) {
-		pr_err("incorrect phy %s settings, exp=%d, act=%d\n",
-		       property, (DSI_LANE_MAX * cfg->count_per_lane), len);
-		return -EINVAL;
-	}
-
-	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
-		for (j = 0; j < cfg->count_per_lane; j++) {
-			cfg->lane[i][j] = *data;
-			data++;
-		}
-	}
-
-	return rc;
-}
-
-static int dsi_phy_settings_init(struct platform_device *pdev,
-				 struct msm_dsi_phy *phy)
-{
-	int rc = 0;
-	struct dsi_phy_per_lane_cfgs *lane = &phy->cfg.lanecfg;
-	struct dsi_phy_per_lane_cfgs *strength = &phy->cfg.strength;
-	struct dsi_phy_per_lane_cfgs *timing = &phy->cfg.timing;
-	struct dsi_phy_per_lane_cfgs *regs = &phy->cfg.regulators;
-
-	lane->count_per_lane = phy->ver_info->lane_cfg_count;
-	rc = dsi_phy_parse_dt_per_lane_cfgs(pdev, lane,
-					    "qcom,platform-lane-config");
-	if (rc) {
-		pr_err("failed to parse lane cfgs, rc=%d\n", rc);
-		goto err;
-	}
-
-	strength->count_per_lane = phy->ver_info->strength_cfg_count;
-	rc = dsi_phy_parse_dt_per_lane_cfgs(pdev, strength,
-					    "qcom,platform-strength-ctrl");
-	if (rc) {
-		pr_err("failed to parse lane cfgs, rc=%d\n", rc);
-		goto err;
-	}
-
-	regs->count_per_lane = phy->ver_info->regulator_cfg_count;
-	if (regs->count_per_lane > 0) {
-		rc = dsi_phy_parse_dt_per_lane_cfgs(pdev, regs,
-					    "qcom,platform-regulator-settings");
-		if (rc) {
-			pr_err("failed to parse lane cfgs, rc=%d\n", rc);
-			goto err;
-		}
-	}
-
-	/* Actual timing values are dependent on panel */
-	timing->count_per_lane = phy->ver_info->timing_cfg_count;
-
-	phy->allow_phy_power_off = of_property_read_bool(pdev->dev.of_node,
-			"qcom,panel-allow-phy-poweroff");
-
-	of_property_read_u32(pdev->dev.of_node,
-			"qcom,dsi-phy-regulator-min-datarate-bps",
-			&phy->regulator_min_datarate_bps);
-
-	phy->cfg.force_clk_lane_hs = of_property_read_bool(pdev->dev.of_node,
-			"qcom,panel-force-clock-lane-hs");
-
-	return 0;
-err:
-	lane->count_per_lane = 0;
-	strength->count_per_lane = 0;
-	regs->count_per_lane = 0;
-	timing->count_per_lane = 0;
-	return rc;
-}
-
-static int dsi_phy_settings_deinit(struct msm_dsi_phy *phy)
-{
-	memset(&phy->cfg.lanecfg, 0x0, sizeof(phy->cfg.lanecfg));
-	memset(&phy->cfg.strength, 0x0, sizeof(phy->cfg.strength));
-	memset(&phy->cfg.timing, 0x0, sizeof(phy->cfg.timing));
-	memset(&phy->cfg.regulators, 0x0, sizeof(phy->cfg.regulators));
-	return 0;
-}
-
-static int dsi_phy_driver_probe(struct platform_device *pdev)
-{
-	struct msm_dsi_phy *dsi_phy;
-	struct dsi_phy_list_item *item;
-	const struct of_device_id *id;
-	const struct dsi_ver_spec_info *ver_info;
-	int rc = 0;
-	u32 index = 0;
-
-	if (!pdev || !pdev->dev.of_node) {
-		pr_err("pdev not found\n");
-		return -ENODEV;
-	}
-
-	id = of_match_node(msm_dsi_phy_of_match, pdev->dev.of_node);
-	if (!id)
-		return -ENODEV;
-
-	ver_info = id->data;
-
-	item = devm_kzalloc(&pdev->dev, sizeof(*item), GFP_KERNEL);
-	if (!item)
-		return -ENOMEM;
-
-
-	dsi_phy = devm_kzalloc(&pdev->dev, sizeof(*dsi_phy), GFP_KERNEL);
-	if (!dsi_phy) {
-		devm_kfree(&pdev->dev, item);
-		return -ENOMEM;
-	}
-
-	rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &index);
-	if (rc) {
-		pr_debug("cell index not set, default to 0\n");
-		index = 0;
-	}
-
-	dsi_phy->index = index;
-
-	dsi_phy->name = of_get_property(pdev->dev.of_node, "label", NULL);
-	if (!dsi_phy->name)
-		dsi_phy->name = DSI_PHY_DEFAULT_LABEL;
-
-	pr_debug("Probing %s device\n", dsi_phy->name);
-
-	dsi_phy->ver_info = ver_info;
-
-	rc = dsi_phy_regmap_init(pdev, dsi_phy);
-	if (rc) {
-		pr_err("Failed to parse register information, rc=%d\n", rc);
-		goto fail;
-	}
-
-	rc = dsi_phy_supplies_init(pdev, dsi_phy);
-	if (rc) {
-		pr_err("failed to parse voltage supplies, rc = %d\n", rc);
-		goto fail_regmap;
-	}
-
-	rc = dsi_catalog_phy_setup(&dsi_phy->hw, ver_info->version,
-				   dsi_phy->index);
-	if (rc) {
-		pr_err("Catalog does not support version (%d)\n",
-		       ver_info->version);
-		goto fail_supplies;
-	}
-
-	rc = dsi_phy_settings_init(pdev, dsi_phy);
-	if (rc) {
-		pr_err("Failed to parse phy setting, rc=%d\n", rc);
-		goto fail_supplies;
-	}
-
-	item->phy = dsi_phy;
-
-	mutex_lock(&dsi_phy_list_lock);
-	list_add(&item->list, &dsi_phy_list);
-	mutex_unlock(&dsi_phy_list_lock);
-
-	mutex_init(&dsi_phy->phy_lock);
-	/** TODO: initialize debugfs */
-	dsi_phy->pdev = pdev;
-	platform_set_drvdata(pdev, dsi_phy);
-	pr_info("Probe successful for %s\n", dsi_phy->name);
-	return 0;
-
-fail_supplies:
-	(void)dsi_phy_supplies_deinit(dsi_phy);
-fail_regmap:
-	(void)dsi_phy_regmap_deinit(dsi_phy);
-fail:
-	devm_kfree(&pdev->dev, dsi_phy);
-	devm_kfree(&pdev->dev, item);
-	return rc;
-}
-
-static int dsi_phy_driver_remove(struct platform_device *pdev)
-{
-	int rc = 0;
-	struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
-	struct list_head *pos, *tmp;
-
-	if (!pdev || !phy) {
-		pr_err("Invalid device\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_phy_list_lock);
-	list_for_each_safe(pos, tmp, &dsi_phy_list) {
-		struct dsi_phy_list_item *n;
-
-		n = list_entry(pos, struct dsi_phy_list_item, list);
-		if (n->phy == phy) {
-			list_del(&n->list);
-			devm_kfree(&pdev->dev, n);
-			break;
-		}
-	}
-	mutex_unlock(&dsi_phy_list_lock);
-
-	mutex_lock(&phy->phy_lock);
-	rc = dsi_phy_settings_deinit(phy);
-	if (rc)
-		pr_err("failed to deinitialize phy settings, rc=%d\n", rc);
-
-	rc = dsi_phy_supplies_deinit(phy);
-	if (rc)
-		pr_err("failed to deinitialize voltage supplies, rc=%d\n", rc);
-
-	rc = dsi_phy_regmap_deinit(phy);
-	if (rc)
-		pr_err("failed to deinitialize regmap, rc=%d\n", rc);
-	mutex_unlock(&phy->phy_lock);
-
-	mutex_destroy(&phy->phy_lock);
-	devm_kfree(&pdev->dev, phy);
-
-	platform_set_drvdata(pdev, NULL);
-
-	return 0;
-}
-
-static struct platform_driver dsi_phy_platform_driver = {
-	.probe      = dsi_phy_driver_probe,
-	.remove     = dsi_phy_driver_remove,
-	.driver     = {
-		.name   = "dsi_phy",
-		.of_match_table = msm_dsi_phy_of_match,
-	},
-};
-
-static void dsi_phy_enable_hw(struct msm_dsi_phy *phy)
-{
-	if (phy->hw.ops.regulator_enable)
-		phy->hw.ops.regulator_enable(&phy->hw, &phy->cfg.regulators);
-
-	if (phy->hw.ops.enable)
-		phy->hw.ops.enable(&phy->hw, &phy->cfg);
-}
-
-static void dsi_phy_disable_hw(struct msm_dsi_phy *phy)
-{
-	if (phy->hw.ops.disable)
-		phy->hw.ops.disable(&phy->hw, &phy->cfg);
-
-	if (phy->hw.ops.regulator_disable)
-		phy->hw.ops.regulator_disable(&phy->hw);
-}
-
-/**
- * dsi_phy_get() - get a dsi phy handle from device node
- * @of_node:           device node for dsi phy controller
- *
- * Gets the DSI PHY handle for the corresponding of_node. The ref count is
- * incremented to one all subsequents get will fail until the original client
- * calls a put.
- *
- * Return: DSI PHY handle or an error code.
- */
-struct msm_dsi_phy *dsi_phy_get(struct device_node *of_node)
-{
-	struct list_head *pos, *tmp;
-	struct msm_dsi_phy *phy = NULL;
-
-	mutex_lock(&dsi_phy_list_lock);
-	list_for_each_safe(pos, tmp, &dsi_phy_list) {
-		struct dsi_phy_list_item *n;
-
-		n = list_entry(pos, struct dsi_phy_list_item, list);
-		if (n->phy->pdev->dev.of_node == of_node) {
-			phy = n->phy;
-			break;
-		}
-	}
-	mutex_unlock(&dsi_phy_list_lock);
-
-	if (!phy) {
-		pr_err("Device with of node not found\n");
-		phy = ERR_PTR(-EPROBE_DEFER);
-		return phy;
-	}
-
-	mutex_lock(&phy->phy_lock);
-	if (phy->refcount > 0) {
-		pr_err("[PHY_%d] Device under use\n", phy->index);
-		phy = ERR_PTR(-EINVAL);
-	} else {
-		phy->refcount++;
-	}
-	mutex_unlock(&phy->phy_lock);
-	return phy;
-}
-
-/**
- * dsi_phy_put() - release dsi phy handle
- * @dsi_phy:              DSI PHY handle.
- *
- * Release the DSI PHY hardware. Driver will clean up all resources and puts
- * back the DSI PHY into reset state.
- */
-void dsi_phy_put(struct msm_dsi_phy *dsi_phy)
-{
-	mutex_lock(&dsi_phy->phy_lock);
-
-	if (dsi_phy->refcount == 0)
-		pr_err("Unbalanced %s call\n", __func__);
-	else
-		dsi_phy->refcount--;
-
-	mutex_unlock(&dsi_phy->phy_lock);
-}
-
-/**
- * dsi_phy_drv_init() - initialize dsi phy driver
- * @dsi_phy:         DSI PHY handle.
- *
- * Initializes DSI PHY driver. Should be called after dsi_phy_get().
- *
- * Return: error code.
- */
-int dsi_phy_drv_init(struct msm_dsi_phy *dsi_phy)
-{
-	char dbg_name[DSI_DEBUG_NAME_LEN];
-
-	snprintf(dbg_name, DSI_DEBUG_NAME_LEN, "dsi%d_phy", dsi_phy->index);
-	sde_dbg_reg_register_base(dbg_name, dsi_phy->hw.base,
-				msm_iomap_size(dsi_phy->pdev, "dsi_phy"));
-	sde_dbg_reg_register_dump_range(dbg_name, dbg_name, 0,
-				msm_iomap_size(dsi_phy->pdev, "dsi_phy"), 0);
-	return 0;
-}
-
-/**
- * dsi_phy_drv_deinit() - de-initialize dsi phy driver
- * @dsi_phy:          DSI PHY handle.
- *
- * Release all resources acquired by dsi_phy_drv_init().
- *
- * Return: error code.
- */
-int dsi_phy_drv_deinit(struct msm_dsi_phy *dsi_phy)
-{
-	return 0;
-}
-
-int dsi_phy_clk_cb_register(struct msm_dsi_phy *dsi_phy,
-	struct clk_ctrl_cb *clk_cb)
-{
-	if (!dsi_phy || !clk_cb) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	dsi_phy->clk_cb.priv = clk_cb->priv;
-	dsi_phy->clk_cb.dsi_clk_cb = clk_cb->dsi_clk_cb;
-	return 0;
-}
-
-/**
- * dsi_phy_validate_mode() - validate a display mode
- * @dsi_phy:            DSI PHY handle.
- * @mode:               Mode information.
- *
- * Validation will fail if the mode cannot be supported by the PHY driver or
- * hardware.
- *
- * Return: error code.
- */
-int dsi_phy_validate_mode(struct msm_dsi_phy *dsi_phy,
-			  struct dsi_mode_info *mode)
-{
-	int rc = 0;
-
-	if (!dsi_phy || !mode) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_phy->phy_lock);
-
-	pr_debug("[PHY_%d] Skipping validation\n", dsi_phy->index);
-
-	mutex_unlock(&dsi_phy->phy_lock);
-	return rc;
-}
-
-/**
- * dsi_phy_set_power_state() - enable/disable dsi phy power supplies
- * @dsi_phy:               DSI PHY handle.
- * @enable:                Boolean flag to enable/disable.
- *
- * Return: error code.
- */
-int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable)
-{
-	int rc = 0;
-
-	if (!dsi_phy) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&dsi_phy->phy_lock);
-
-	if (enable == dsi_phy->power_state) {
-		pr_err("[PHY_%d] No state change\n", dsi_phy->index);
-		goto error;
-	}
-
-	if (enable) {
-		rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.digital, true);
-		if (rc) {
-			pr_err("failed to enable digital regulator\n");
-			goto error;
-		}
-
-		if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_OFF &&
-				dsi_phy->regulator_required) {
-			rc = dsi_pwr_enable_regulator(
-				&dsi_phy->pwr_info.phy_pwr, true);
-			if (rc) {
-				pr_err("failed to enable phy power\n");
-				(void)dsi_pwr_enable_regulator(
-					&dsi_phy->pwr_info.digital, false);
-				goto error;
-			}
-		}
-	} else {
-		if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_OFF &&
-				dsi_phy->regulator_required) {
-			rc = dsi_pwr_enable_regulator(
-				&dsi_phy->pwr_info.phy_pwr, false);
-			if (rc) {
-				pr_err("failed to enable digital regulator\n");
-				goto error;
-			}
-		}
-
-		rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.digital,
-					      false);
-		if (rc) {
-			pr_err("failed to enable phy power\n");
-			goto error;
-		}
-	}
-
-	dsi_phy->power_state = enable;
-error:
-	mutex_unlock(&dsi_phy->phy_lock);
-	return rc;
-}
-
-static int dsi_phy_enable_ulps(struct msm_dsi_phy *phy,
-		struct dsi_host_config *config, bool clamp_enabled)
-{
-	int rc = 0;
-	u32 lanes = 0;
-	u32 ulps_lanes;
-
-	lanes = config->common_config.data_lanes;
-	lanes |= DSI_CLOCK_LANE;
-
-	/*
-	 * If DSI clamps are enabled, it means that the DSI lanes are
-	 * already in idle state. Checking for lanes to be in idle state
-	 * should be skipped during ULPS entry programming while coming
-	 * out of idle screen.
-	 */
-	if (!clamp_enabled) {
-		rc = phy->hw.ops.ulps_ops.wait_for_lane_idle(&phy->hw, lanes);
-		if (rc) {
-			pr_err("lanes not entering idle, skip ULPS\n");
-			return rc;
-		}
-	}
-
-	phy->hw.ops.ulps_ops.ulps_request(&phy->hw, &phy->cfg, lanes);
-
-	ulps_lanes = phy->hw.ops.ulps_ops.get_lanes_in_ulps(&phy->hw);
-
-	if (!phy->hw.ops.ulps_ops.is_lanes_in_ulps(lanes, ulps_lanes)) {
-		pr_err("Failed to enter ULPS, request=0x%x, actual=0x%x\n",
-		       lanes, ulps_lanes);
-		rc = -EIO;
-	}
-
-	return rc;
-}
-
-static int dsi_phy_disable_ulps(struct msm_dsi_phy *phy,
-		 struct dsi_host_config *config)
-{
-	u32 ulps_lanes, lanes = 0;
-
-	lanes = config->common_config.data_lanes;
-	lanes |= DSI_CLOCK_LANE;
-
-	ulps_lanes = phy->hw.ops.ulps_ops.get_lanes_in_ulps(&phy->hw);
-
-	if (!phy->hw.ops.ulps_ops.is_lanes_in_ulps(lanes, ulps_lanes)) {
-		pr_err("Mismatch in ULPS: lanes:%d, ulps_lanes:%d\n",
-				lanes, ulps_lanes);
-		return -EIO;
-	}
-
-	phy->hw.ops.ulps_ops.ulps_exit(&phy->hw, &phy->cfg, lanes);
-
-	ulps_lanes = phy->hw.ops.ulps_ops.get_lanes_in_ulps(&phy->hw);
-
-	if (phy->hw.ops.ulps_ops.is_lanes_in_ulps(lanes, ulps_lanes)) {
-		pr_err("Lanes (0x%x) stuck in ULPS\n", ulps_lanes);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-void dsi_phy_toggle_resync_fifo(struct msm_dsi_phy *phy)
-{
-	if (!phy)
-		return;
-
-	if (!phy->hw.ops.toggle_resync_fifo)
-		return;
-
-	phy->hw.ops.toggle_resync_fifo(&phy->hw);
-}
-
-
-void dsi_phy_reset_clk_en_sel(struct msm_dsi_phy *phy)
-{
-	if (!phy)
-		return;
-
-	if (!phy->hw.ops.reset_clk_en_sel)
-		return;
-
-	phy->hw.ops.reset_clk_en_sel(&phy->hw);
-}
-
-int dsi_phy_set_ulps(struct msm_dsi_phy *phy, struct dsi_host_config *config,
-		bool enable, bool clamp_enabled)
-{
-	int rc = 0;
-
-	if (!phy) {
-		pr_err("Invalid params\n");
-		return DSI_PHY_ULPS_ERROR;
-	}
-
-	if (!phy->hw.ops.ulps_ops.ulps_request ||
-			!phy->hw.ops.ulps_ops.ulps_exit ||
-			!phy->hw.ops.ulps_ops.get_lanes_in_ulps ||
-			!phy->hw.ops.ulps_ops.is_lanes_in_ulps ||
-			!phy->hw.ops.ulps_ops.wait_for_lane_idle) {
-		pr_debug("DSI PHY ULPS ops not present\n");
-		return DSI_PHY_ULPS_NOT_HANDLED;
-	}
-
-	mutex_lock(&phy->phy_lock);
-
-	if (enable)
-		rc = dsi_phy_enable_ulps(phy, config, clamp_enabled);
-	else
-		rc = dsi_phy_disable_ulps(phy, config);
-
-	if (rc) {
-		pr_err("[DSI_PHY%d] Ulps state change(%d) failed, rc=%d\n",
-			phy->index, enable, rc);
-		rc = DSI_PHY_ULPS_ERROR;
-		goto error;
-	}
-	pr_debug("[DSI_PHY%d] ULPS state = %d\n", phy->index, enable);
-
-error:
-	mutex_unlock(&phy->phy_lock);
-	return rc;
-}
-
-/**
- * dsi_phy_enable() - enable DSI PHY hardware
- * @dsi_phy:            DSI PHY handle.
- * @config:             DSI host configuration.
- * @pll_source:         Source PLL for PHY clock.
- * @skip_validation:    Validation will not be performed on parameters.
- * @is_cont_splash_enabled:    check whether continuous splash enabled.
- *
- * Validates and enables DSI PHY.
- *
- * Return: error code.
- */
-int dsi_phy_enable(struct msm_dsi_phy *phy,
-		   struct dsi_host_config *config,
-		   enum dsi_phy_pll_source pll_source,
-		   bool skip_validation,
-		   bool is_cont_splash_enabled)
-{
-	int rc = 0;
-
-	if (!phy || !config) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&phy->phy_lock);
-
-	if (!skip_validation)
-		pr_debug("[PHY_%d] TODO: perform validation\n", phy->index);
-
-	memcpy(&phy->mode, &config->video_timing, sizeof(phy->mode));
-	memcpy(&phy->cfg.lane_map, &config->lane_map, sizeof(config->lane_map));
-	phy->data_lanes = config->common_config.data_lanes;
-	phy->dst_format = config->common_config.dst_format;
-	phy->cfg.pll_source = pll_source;
-	phy->cfg.bit_clk_rate_hz = config->bit_clk_rate_hz;
-
-	/**
-	 * If PHY timing parameters are not present in panel dtsi file,
-	 * then calculate them in the driver
-	 */
-	if (!phy->cfg.is_phy_timing_present)
-		rc = phy->hw.ops.calculate_timing_params(&phy->hw,
-						 &phy->mode,
-						 &config->common_config,
-						 &phy->cfg.timing);
-	if (rc) {
-		pr_err("[%s] failed to set timing, rc=%d\n", phy->name, rc);
-		goto error;
-	}
-
-	if (!is_cont_splash_enabled) {
-		dsi_phy_enable_hw(phy);
-		pr_debug("cont splash not enabled, phy enable required\n");
-	}
-	phy->dsi_phy_state = DSI_PHY_ENGINE_ON;
-
-error:
-	mutex_unlock(&phy->phy_lock);
-
-	return rc;
-}
-
-int dsi_phy_lane_reset(struct msm_dsi_phy *phy)
-{
-	int ret = 0;
-
-	if (!phy)
-		return ret;
-
-	mutex_lock(&phy->phy_lock);
-	if (phy->hw.ops.phy_lane_reset)
-		ret = phy->hw.ops.phy_lane_reset(&phy->hw);
-	mutex_unlock(&phy->phy_lock);
-
-	return ret;
-}
-
-/**
- * dsi_phy_disable() - disable DSI PHY hardware.
- * @phy:        DSI PHY handle.
- *
- * Return: error code.
- */
-int dsi_phy_disable(struct msm_dsi_phy *phy)
-{
-	int rc = 0;
-
-	if (!phy) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&phy->phy_lock);
-	dsi_phy_disable_hw(phy);
-	phy->dsi_phy_state = DSI_PHY_ENGINE_OFF;
-	mutex_unlock(&phy->phy_lock);
-
-	return rc;
-}
-
-/**
- * dsi_phy_set_clamp_state() - configure clamps for DSI lanes
- * @phy:        DSI PHY handle.
- * @enable:     boolean to specify clamp enable/disable.
- *
- * Return: error code.
- */
-int dsi_phy_set_clamp_state(struct msm_dsi_phy *phy, bool enable)
-{
-	if (!phy)
-		return -EINVAL;
-
-	pr_debug("[%s] enable=%d\n", phy->name, enable);
-
-	if (phy->hw.ops.clamp_ctrl)
-		phy->hw.ops.clamp_ctrl(&phy->hw, enable);
-
-	return 0;
-}
-
-/**
- * dsi_phy_idle_ctrl() - enable/disable DSI PHY during idle screen
- * @phy:          DSI PHY handle
- * @enable:       boolean to specify PHY enable/disable.
- *
- * Return: error code.
- */
-
-int dsi_phy_idle_ctrl(struct msm_dsi_phy *phy, bool enable)
-{
-	if (!phy) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	pr_debug("[%s] enable=%d\n", phy->name, enable);
-
-	mutex_lock(&phy->phy_lock);
-	if (enable) {
-		if (phy->hw.ops.phy_idle_on)
-			phy->hw.ops.phy_idle_on(&phy->hw, &phy->cfg);
-
-		if (phy->hw.ops.regulator_enable)
-			phy->hw.ops.regulator_enable(&phy->hw,
-				&phy->cfg.regulators);
-
-		if (phy->hw.ops.enable)
-			phy->hw.ops.enable(&phy->hw, &phy->cfg);
-
-		phy->dsi_phy_state = DSI_PHY_ENGINE_ON;
-	} else {
-		phy->dsi_phy_state = DSI_PHY_ENGINE_OFF;
-
-		if (phy->hw.ops.disable)
-			phy->hw.ops.disable(&phy->hw, &phy->cfg);
-
-		if (phy->hw.ops.phy_idle_off)
-			phy->hw.ops.phy_idle_off(&phy->hw);
-	}
-	mutex_unlock(&phy->phy_lock);
-
-	return 0;
-}
-
-/**
- * dsi_phy_set_clk_freq() - set DSI PHY clock frequency setting
- * @phy:          DSI PHY handle
- * @clk_freq:     link clock frequency
- *
- * Return: error code.
- */
-int dsi_phy_set_clk_freq(struct msm_dsi_phy *phy,
-		struct link_clk_freq *clk_freq)
-{
-	if (!phy || !clk_freq) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	phy->regulator_required = clk_freq->byte_clk_rate >
-		(phy->regulator_min_datarate_bps / BITS_PER_BYTE);
-
-	/*
-	 * DSI PLL needs 0p9 LDO1A for Powering DSI PLL block.
-	 * PLL driver can vote for this regulator in PLL driver file, but for
-	 * the usecase where we come out of idle(static screen), if PLL and
-	 * PHY vote for regulator ,there will be performance delays as both
-	 * votes go through RPM to enable regulators.
-	 */
-	phy->regulator_required = true;
-	pr_debug("[%s] lane_datarate=%u min_datarate=%u required=%d\n",
-			phy->name,
-			clk_freq->byte_clk_rate * BITS_PER_BYTE,
-			phy->regulator_min_datarate_bps,
-			phy->regulator_required);
-
-	return 0;
-}
-
-/**
- * dsi_phy_set_timing_params() - timing parameters for the panel
- * @phy:          DSI PHY handle
- * @timing:       array holding timing params.
- * @size:         size of the array.
- *
- * When PHY timing calculator is not implemented, this array will be used to
- * pass PHY timing information.
- *
- * Return: error code.
- */
-int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
-			      u32 *timing, u32 size)
-{
-	int rc = 0;
-
-	if (!phy || !timing || !size) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&phy->phy_lock);
-
-	if (phy->hw.ops.phy_timing_val)
-		rc = phy->hw.ops.phy_timing_val(&phy->cfg.timing, timing, size);
-	if (!rc)
-		phy->cfg.is_phy_timing_present = true;
-	mutex_unlock(&phy->phy_lock);
-	return rc;
-}
-
-/**
- * dsi_phy_conv_phy_to_logical_lane() - Convert physical to logical lane
- * @lane_map:     logical lane
- * @phy_lane:     physical lane
- *
- * Return: Error code on failure. Lane number on success.
- */
-int dsi_phy_conv_phy_to_logical_lane(
-	struct dsi_lane_map *lane_map, enum dsi_phy_data_lanes phy_lane)
-{
-	int i = 0;
-
-	if (phy_lane > DSI_PHYSICAL_LANE_3)
-		return -EINVAL;
-
-	for (i = DSI_LOGICAL_LANE_0; i < (DSI_LANE_MAX - 1); i++) {
-		if (lane_map->lane_map_v2[i] == phy_lane)
-			break;
-	}
-	return i;
-}
-
-/**
- * dsi_phy_conv_logical_to_phy_lane() - Convert logical to physical lane
- * @lane_map:     physical lane
- * @lane:         logical lane
- *
- * Return: Error code on failure. Lane number on success.
- */
-int dsi_phy_conv_logical_to_phy_lane(
-	struct dsi_lane_map *lane_map, enum dsi_logical_lane lane)
-{
-	int i = 0;
-
-	if (lane > (DSI_LANE_MAX - 1))
-		return -EINVAL;
-
-	for (i = DSI_LOGICAL_LANE_0; i < (DSI_LANE_MAX - 1); i++) {
-		if (BIT(i) == lane_map->lane_map_v2[lane])
-			break;
-	}
-	return i;
-}
-
-void dsi_phy_drv_register(void)
-{
-	platform_driver_register(&dsi_phy_platform_driver);
-}
-
-void dsi_phy_drv_unregister(void)
-{
-	platform_driver_unregister(&dsi_phy_platform_driver);
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
deleted file mode 100644
index 875f2a9..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_PHY_H_
-#define _DSI_PHY_H_
-
-#include "dsi_defs.h"
-#include "dsi_clk.h"
-#include "dsi_pwr.h"
-#include "dsi_phy_hw.h"
-
-struct dsi_ver_spec_info {
-	enum dsi_phy_version version;
-	u32 lane_cfg_count;
-	u32 strength_cfg_count;
-	u32 regulator_cfg_count;
-	u32 timing_cfg_count;
-};
-
-/**
- * struct dsi_phy_power_info - digital and analog power supplies for DSI PHY
- * @digital:       Digital power supply for DSI PHY.
- * @phy_pwr:       Analog power supplies for DSI PHY to work.
- */
-struct dsi_phy_power_info {
-	struct dsi_regulator_info digital;
-	struct dsi_regulator_info phy_pwr;
-};
-
-/**
- * enum phy_engine_state - define engine status for dsi phy.
- * @DSI_PHY_ENGINE_OFF:  Engine is turned off.
- * @DSI_PHY_ENGINE_ON:   Engine is turned on.
- * @DSI_PHY_ENGINE_MAX:  Maximum value.
- */
-enum phy_engine_state {
-	DSI_PHY_ENGINE_OFF = 0,
-	DSI_PHY_ENGINE_ON,
-	DSI_PHY_ENGINE_MAX,
-};
-
-/**
- * enum phy_ulps_return_type - define set_ulps return type for dsi phy.
- * @DSI_PHY_ULPS_HANDLED:      ulps is handled in phy.
- * @DSI_PHY_ULPS_NOT_HANDLED:  ulps is not handled in phy.
- * @DSI_PHY_ULPS_ERROR:        ulps request failed in phy.
- */
-enum phy_ulps_return_type {
-	DSI_PHY_ULPS_HANDLED = 0,
-	DSI_PHY_ULPS_NOT_HANDLED,
-	DSI_PHY_ULPS_ERROR,
-};
-
-/**
- * struct msm_dsi_phy - DSI PHY object
- * @pdev:              Pointer to platform device.
- * @index:             Instance id.
- * @name:              Name of the PHY instance.
- * @refcount:          Reference count.
- * @phy_lock:          Mutex for hardware and object access.
- * @ver_info:          Version specific phy parameters.
- * @hw:                DSI PHY hardware object.
- * @pwr_info:          Power information.
- * @cfg:               DSI phy configuration.
- * @clk_cb:	       structure containing call backs for clock control
- * @power_state:       True if PHY is powered on.
- * @dsi_phy_state:     PHY state information.
- * @mode:              Current mode.
- * @data_lanes:        Number of data lanes used.
- * @dst_format:        Destination format.
- * @allow_phy_power_off: True if PHY is allowed to power off when idle
- * @regulator_min_datarate_bps: Minimum per lane data rate to turn on regulator
- * @regulator_required: True if phy regulator is required
- */
-struct msm_dsi_phy {
-	struct platform_device *pdev;
-	int index;
-	const char *name;
-	u32 refcount;
-	struct mutex phy_lock;
-
-	const struct dsi_ver_spec_info *ver_info;
-	struct dsi_phy_hw hw;
-
-	struct dsi_phy_power_info pwr_info;
-
-	struct dsi_phy_cfg cfg;
-	struct clk_ctrl_cb clk_cb;
-
-	enum phy_engine_state dsi_phy_state;
-	bool power_state;
-	struct dsi_mode_info mode;
-	enum dsi_data_lanes data_lanes;
-	enum dsi_pixel_format dst_format;
-
-	bool allow_phy_power_off;
-	u32 regulator_min_datarate_bps;
-	bool regulator_required;
-};
-
-/**
- * dsi_phy_get() - get a dsi phy handle from device node
- * @of_node:           device node for dsi phy controller
- *
- * Gets the DSI PHY handle for the corresponding of_node. The ref count is
- * incremented to one all subsequents get will fail until the original client
- * calls a put.
- *
- * Return: DSI PHY handle or an error code.
- */
-struct msm_dsi_phy *dsi_phy_get(struct device_node *of_node);
-
-/**
- * dsi_phy_put() - release dsi phy handle
- * @dsi_phy:              DSI PHY handle.
- *
- * Release the DSI PHY hardware. Driver will clean up all resources and puts
- * back the DSI PHY into reset state.
- */
-void dsi_phy_put(struct msm_dsi_phy *dsi_phy);
-
-/**
- * dsi_phy_drv_init() - initialize dsi phy driver
- * @dsi_phy:         DSI PHY handle.
- *
- * Initializes DSI PHY driver. Should be called after dsi_phy_get().
- *
- * Return: error code.
- */
-int dsi_phy_drv_init(struct msm_dsi_phy *dsi_phy);
-
-/**
- * dsi_phy_drv_deinit() - de-initialize dsi phy driver
- * @dsi_phy:          DSI PHY handle.
- *
- * Release all resources acquired by dsi_phy_drv_init().
- *
- * Return: error code.
- */
-int dsi_phy_drv_deinit(struct msm_dsi_phy *dsi_phy);
-
-/**
- * dsi_phy_validate_mode() - validate a display mode
- * @dsi_phy:            DSI PHY handle.
- * @mode:               Mode information.
- *
- * Validation will fail if the mode cannot be supported by the PHY driver or
- * hardware.
- *
- * Return: error code.
- */
-int dsi_phy_validate_mode(struct msm_dsi_phy *dsi_phy,
-			  struct dsi_mode_info *mode);
-
-/**
- * dsi_phy_set_power_state() - enable/disable dsi phy power supplies
- * @dsi_phy:               DSI PHY handle.
- * @enable:                Boolean flag to enable/disable.
- *
- * Return: error code.
- */
-int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable);
-
-/**
- * dsi_phy_enable() - enable DSI PHY hardware
- * @dsi_phy:            DSI PHY handle.
- * @config:             DSI host configuration.
- * @pll_source:         Source PLL for PHY clock.
- * @skip_validation:    Validation will not be performed on parameters.
- * @is_cont_splash_enabled:    check whether continuous splash enabled.
- *
- * Validates and enables DSI PHY.
- *
- * Return: error code.
- */
-int dsi_phy_enable(struct msm_dsi_phy *dsi_phy,
-		   struct dsi_host_config *config,
-		   enum dsi_phy_pll_source pll_source,
-		   bool skip_validation,
-		   bool is_cont_splash_enabled);
-
-/**
- * dsi_phy_disable() - disable DSI PHY hardware.
- * @phy:        DSI PHY handle.
- *
- * Return: error code.
- */
-int dsi_phy_disable(struct msm_dsi_phy *phy);
-
-/**
- * dsi_phy_set_ulps() - set ulps state for DSI pHY
- * @phy:          DSI PHY handle
- * @config:	  DSi host configuration information.
- * @enable:	  Enable/Disable
- * @clamp_enabled: mmss_clamp enabled/disabled
- *
- * Return: error code.
- */
-int dsi_phy_set_ulps(struct msm_dsi_phy *phy,  struct dsi_host_config *config,
-		bool enable, bool clamp_enabled);
-
-/**
- * dsi_phy_clk_cb_register() - Register PHY clock control callback
- * @phy:          DSI PHY handle
- * @clk_cb:	  Structure containing call back for clock control
- *
- * Return: error code.
- */
-int dsi_phy_clk_cb_register(struct msm_dsi_phy *phy,
-	struct clk_ctrl_cb *clk_cb);
-
-/**
- * dsi_phy_idle_ctrl() - enable/disable DSI PHY during idle screen
- * @phy:          DSI PHY handle
- * @enable:       boolean to specify PHY enable/disable.
- *
- * Return: error code.
- */
-int dsi_phy_idle_ctrl(struct msm_dsi_phy *phy, bool enable);
-
-/**
- * dsi_phy_set_clamp_state() - configure clamps for DSI lanes
- * @phy:        DSI PHY handle.
- * @enable:     boolean to specify clamp enable/disable.
- *
- * Return: error code.
- */
-int dsi_phy_set_clamp_state(struct msm_dsi_phy *phy, bool enable);
-
-/**
- * dsi_phy_set_clk_freq() - set DSI PHY clock frequency setting
- * @phy:          DSI PHY handle
- * @clk_freq:     link clock frequency
- *
- * Return: error code.
- */
-int dsi_phy_set_clk_freq(struct msm_dsi_phy *phy,
-		struct link_clk_freq *clk_freq);
-
-/**
- * dsi_phy_set_timing_params() - timing parameters for the panel
- * @phy:          DSI PHY handle
- * @timing:       array holding timing params.
- * @size:         size of the array.
- *
- * When PHY timing calculator is not implemented, this array will be used to
- * pass PHY timing information.
- *
- * Return: error code.
- */
-int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
-			      u32 *timing, u32 size);
-
-/**
- * dsi_phy_lane_reset() - Reset DSI PHY lanes in case of error
- * @phy:	DSI PHY handle
- *
- * Return: error code.
- */
-int dsi_phy_lane_reset(struct msm_dsi_phy *phy);
-
-/**
- * dsi_phy_toggle_resync_fifo() - toggle resync retime FIFO
- * @phy:          DSI PHY handle
- *
- * Toggle the resync retime FIFO to synchronize the data paths.
- * This should be done everytime there is a change in the link clock
- * rate
- */
-void dsi_phy_toggle_resync_fifo(struct msm_dsi_phy *phy);
-
-/**
- * dsi_phy_reset_clk_en_sel() - reset clk_en_select on cmn_clk_cfg1 register
- * @phy:          DSI PHY handle
- *
- * After toggling resync fifo regiater, clk_en_sel bit on cmn_clk_cfg1
- * register has to be reset
- */
-void dsi_phy_reset_clk_en_sel(struct msm_dsi_phy *phy);
-
-/**
- * dsi_phy_drv_register() - register platform driver for dsi phy
- */
-void dsi_phy_drv_register(void);
-
-/**
- * dsi_phy_drv_unregister() - unregister platform driver
- */
-void dsi_phy_drv_unregister(void);
-
-#endif /* _DSI_PHY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
deleted file mode 100644
index 701435a..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
+++ /dev/null
@@ -1,312 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_PHY_HW_H_
-#define _DSI_PHY_HW_H_
-
-#include "dsi_defs.h"
-
-#define DSI_MAX_SETTINGS 8
-#define DSI_PHY_TIMING_V3_SIZE 12
-#define DSI_PHY_TIMING_V4_SIZE 14
-
-/**
- * enum dsi_phy_version - DSI PHY version enumeration
- * @DSI_PHY_VERSION_UNKNOWN:    Unknown version.
- * @DSI_PHY_VERSION_0_0_HPM:    28nm-HPM.
- * @DSI_PHY_VERSION_0_0_LPM:    28nm-HPM.
- * @DSI_PHY_VERSION_1_0:        20nm
- * @DSI_PHY_VERSION_2_0:        14nm
- * @DSI_PHY_VERSION_3_0:        10nm
- * @DSI_PHY_VERSION_4_0:        7nm
- * @DSI_PHY_VERSION_4_1:	7nm
- * @DSI_PHY_VERSION_MAX:
- */
-enum dsi_phy_version {
-	DSI_PHY_VERSION_UNKNOWN,
-	DSI_PHY_VERSION_0_0_HPM, /* 28nm-HPM */
-	DSI_PHY_VERSION_0_0_LPM, /* 28nm-LPM */
-	DSI_PHY_VERSION_1_0, /* 20nm */
-	DSI_PHY_VERSION_2_0, /* 14nm */
-	DSI_PHY_VERSION_3_0, /* 10nm */
-	DSI_PHY_VERSION_4_0, /* 7nm  */
-	DSI_PHY_VERSION_4_1, /* 7nm */
-	DSI_PHY_VERSION_MAX
-};
-
-/**
- * enum dsi_phy_hw_features - features supported by DSI PHY hardware
- * @DSI_PHY_DPHY:        Supports DPHY
- * @DSI_PHY_CPHY:        Supports CPHY
- * @DSI_PHY_MAX_FEATURES:
- */
-enum dsi_phy_hw_features {
-	DSI_PHY_DPHY,
-	DSI_PHY_CPHY,
-	DSI_PHY_MAX_FEATURES
-};
-
-/**
- * enum dsi_phy_pll_source - pll clock source for PHY.
- * @DSI_PLL_SOURCE_STANDALONE:    Clock is sourced from native PLL and is not
- *				  shared by other PHYs.
- * @DSI_PLL_SOURCE_NATIVE:        Clock is sourced from native PLL and is
- *				  shared by other PHYs.
- * @DSI_PLL_SOURCE_NON_NATIVE:    Clock is sourced from other PHYs.
- * @DSI_PLL_SOURCE_MAX:
- */
-enum dsi_phy_pll_source {
-	DSI_PLL_SOURCE_STANDALONE = 0,
-	DSI_PLL_SOURCE_NATIVE,
-	DSI_PLL_SOURCE_NON_NATIVE,
-	DSI_PLL_SOURCE_MAX
-};
-
-/**
- * struct dsi_phy_per_lane_cfgs - Holds register values for PHY parameters
- * @lane:           A set of maximum 8 values for each lane.
- * @lane_v3:        A set of maximum 12 values for each lane.
- * @count_per_lane: Number of values per each lane.
- */
-struct dsi_phy_per_lane_cfgs {
-	u8 lane[DSI_LANE_MAX][DSI_MAX_SETTINGS];
-	u8 lane_v3[DSI_PHY_TIMING_V3_SIZE];
-	u8 lane_v4[DSI_PHY_TIMING_V4_SIZE];
-	u32 count_per_lane;
-};
-
-/**
- * struct dsi_phy_cfg - DSI PHY configuration
- * @lanecfg:          Lane configuration settings.
- * @strength:         Strength settings for lanes.
- * @timing:           Timing parameters for lanes.
- * @is_phy_timing_present:	Boolean whether phy timings are defined.
- * @regulators:       Regulator settings for lanes.
- * @pll_source:       PLL source.
- * @lane_map:         DSI logical to PHY lane mapping.
- * @force_clk_lane_hs:Boolean whether to force clock lane in HS mode.
- * @bit_clk_rate_hz: DSI bit clk rate in HZ.
- */
-struct dsi_phy_cfg {
-	struct dsi_phy_per_lane_cfgs lanecfg;
-	struct dsi_phy_per_lane_cfgs strength;
-	struct dsi_phy_per_lane_cfgs timing;
-	bool is_phy_timing_present;
-	struct dsi_phy_per_lane_cfgs regulators;
-	enum dsi_phy_pll_source pll_source;
-	struct dsi_lane_map lane_map;
-	bool force_clk_lane_hs;
-	unsigned long bit_clk_rate_hz;
-};
-
-struct dsi_phy_hw;
-
-struct phy_ulps_config_ops {
-	/**
-	 * wait_for_lane_idle() - wait for DSI lanes to go to idle state
-	 * @phy:           Pointer to DSI PHY hardware instance.
-	 * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
-	 *                 to be checked to be in idle state.
-	 */
-	int (*wait_for_lane_idle)(struct dsi_phy_hw *phy, u32 lanes);
-
-	/**
-	 * ulps_request() - request ulps entry for specified lanes
-	 * @phy:           Pointer to DSI PHY hardware instance.
-	 * @cfg:           Per lane configurations for timing, strength and lane
-	 *	           configurations.
-	 * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
-	 *                 to enter ULPS.
-	 *
-	 * Caller should check if lanes are in ULPS mode by calling
-	 * get_lanes_in_ulps() operation.
-	 */
-	void (*ulps_request)(struct dsi_phy_hw *phy,
-			struct dsi_phy_cfg *cfg, u32 lanes);
-
-	/**
-	 * ulps_exit() - exit ULPS on specified lanes
-	 * @phy:           Pointer to DSI PHY hardware instance.
-	 * @cfg:           Per lane configurations for timing, strength and lane
-	 *                 configurations.
-	 * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
-	 *                 to exit ULPS.
-	 *
-	 * Caller should check if lanes are in active mode by calling
-	 * get_lanes_in_ulps() operation.
-	 */
-	void (*ulps_exit)(struct dsi_phy_hw *phy,
-			struct dsi_phy_cfg *cfg, u32 lanes);
-
-	/**
-	 * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
-	 * @phy:           Pointer to DSI PHY hardware instance.
-	 *
-	 * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
-	 * state.
-	 *
-	 * Return: List of lanes in ULPS state.
-	 */
-	u32 (*get_lanes_in_ulps)(struct dsi_phy_hw *phy);
-
-	/**
-	 * is_lanes_in_ulps() - checks if the given lanes are in ulps
-	 * @lanes:           lanes to be checked.
-	 * @ulps_lanes:	   lanes in ulps currenly.
-	 *
-	 * Return: true if all the given lanes are in ulps; false otherwise.
-	 */
-	bool (*is_lanes_in_ulps)(u32 ulps, u32 ulps_lanes);
-};
-
-/**
- * struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
- * @regulator_enable:          Enable PHY regulators.
- * @regulator_disable:         Disable PHY regulators.
- * @enable:                    Enable PHY.
- * @disable:                   Disable PHY.
- * @calculate_timing_params:   Calculate PHY timing params from mode information
- */
-struct dsi_phy_hw_ops {
-	/**
-	 * regulator_enable() - enable regulators for DSI PHY
-	 * @phy:      Pointer to DSI PHY hardware object.
-	 * @reg_cfg:  Regulator configuration for all DSI lanes.
-	 */
-	void (*regulator_enable)(struct dsi_phy_hw *phy,
-				 struct dsi_phy_per_lane_cfgs *reg_cfg);
-
-	/**
-	 * regulator_disable() - disable regulators
-	 * @phy:      Pointer to DSI PHY hardware object.
-	 */
-	void (*regulator_disable)(struct dsi_phy_hw *phy);
-
-	/**
-	 * enable() - Enable PHY hardware
-	 * @phy:      Pointer to DSI PHY hardware object.
-	 * @cfg:      Per lane configurations for timing, strength and lane
-	 *	      configurations.
-	 */
-	void (*enable)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
-
-	/**
-	 * disable() - Disable PHY hardware
-	 * @phy:      Pointer to DSI PHY hardware object.
-	 * @cfg:      Per lane configurations for timing, strength and lane
-	 *	      configurations.
-	 */
-	void (*disable)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
-
-	/**
-	 * phy_idle_on() - Enable PHY hardware when entering idle screen
-	 * @phy:      Pointer to DSI PHY hardware object.
-	 * @cfg:      Per lane configurations for timing, strength and lane
-	 *	      configurations.
-	 */
-	void (*phy_idle_on)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
-
-	/**
-	 * phy_idle_off() - Disable PHY hardware when exiting idle screen
-	 * @phy:      Pointer to DSI PHY hardware object.
-	 */
-	void (*phy_idle_off)(struct dsi_phy_hw *phy);
-
-	/**
-	 * calculate_timing_params() - calculates timing parameters.
-	 * @phy:      Pointer to DSI PHY hardware object.
-	 * @mode:     Mode information for which timing has to be calculated.
-	 * @config:   DSI host configuration for this mode.
-	 * @timing:   Timing parameters for each lane which will be returned.
-	 */
-	int (*calculate_timing_params)(struct dsi_phy_hw *phy,
-				       struct dsi_mode_info *mode,
-				       struct dsi_host_common_cfg *config,
-				       struct dsi_phy_per_lane_cfgs *timing);
-
-	/**
-	 * phy_timing_val() - Gets PHY timing values.
-	 * @timing_val: Timing parameters for each lane which will be returned.
-	 * @timing: Array containing PHY timing values
-	 * @size: Size of the array
-	 */
-	int (*phy_timing_val)(struct dsi_phy_per_lane_cfgs *timing_val,
-				u32 *timing, u32 size);
-
-	/**
-	 * clamp_ctrl() - configure clamps for DSI lanes
-	 * @phy:        DSI PHY handle.
-	 * @enable:     boolean to specify clamp enable/disable.
-	 * Return:    error code.
-	 */
-	void (*clamp_ctrl)(struct dsi_phy_hw *phy, bool enable);
-
-	/**
-	 * phy_lane_reset() - Reset dsi phy lanes in case of error.
-	 * @phy:      Pointer to DSI PHY hardware object.
-	 * Return:    error code.
-	 */
-	int (*phy_lane_reset)(struct dsi_phy_hw *phy);
-
-	/**
-	 * toggle_resync_fifo() - toggle resync retime FIFO to sync data paths
-	 * @phy:      Pointer to DSI PHY hardware object.
-	 * Return:    error code.
-	 */
-	void (*toggle_resync_fifo)(struct dsi_phy_hw *phy);
-
-	/**
-	 * reset_clk_en_sel() - reset clk_en_sel on phy cmn_clk_cfg1 register
-	 * @phy:      Pointer to DSI PHY hardware object.
-	 */
-	void (*reset_clk_en_sel)(struct dsi_phy_hw *phy);
-
-	void *timing_ops;
-	struct phy_ulps_config_ops ulps_ops;
-};
-
-/**
- * struct dsi_phy_hw - DSI phy hardware object specific to an instance
- * @base:                  VA for the DSI PHY base address.
- * @length:                Length of the DSI PHY register base map.
- * @index:                 Instance ID of the controller.
- * @version:               DSI PHY version.
- * @phy_clamp_base:        Base address of phy clamp register map.
- * @feature_map:           Features supported by DSI PHY.
- * @ops:                   Function pointer to PHY operations.
- */
-struct dsi_phy_hw {
-	void __iomem *base;
-	u32 length;
-	u32 index;
-
-	enum dsi_phy_version version;
-	void __iomem *phy_clamp_base;
-
-	DECLARE_BITMAP(feature_map, DSI_PHY_MAX_FEATURES);
-	struct dsi_phy_hw_ops ops;
-};
-
-/**
- * dsi_phy_conv_phy_to_logical_lane() - Convert physical to logical lane
- * @lane_map:     logical lane
- * @phy_lane:     physical lane
- *
- * Return: Error code on failure. Lane number on success.
- */
-int dsi_phy_conv_phy_to_logical_lane(
-	struct dsi_lane_map *lane_map, enum dsi_phy_data_lanes phy_lane);
-
-/**
- * dsi_phy_conv_logical_to_phy_lane() - Convert logical to physical lane
- * @lane_map:     physical lane
- * @lane:         logical lane
- *
- * Return: Error code on failure. Lane number on success.
- */
-int dsi_phy_conv_logical_to_phy_lane(
-	struct dsi_lane_map *lane_map, enum dsi_logical_lane lane);
-
-#endif /* _DSI_PHY_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v2_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v2_0.c
deleted file mode 100644
index 38c8bde..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v2_0.c
+++ /dev/null
@@ -1,267 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "dsi-phy-hw:" fmt
-#include <linux/math64.h>
-#include <linux/delay.h>
-#include "dsi_hw.h"
-#include "dsi_phy_hw.h"
-
-#define DSIPHY_CMN_REVISION_ID0                   0x0000
-#define DSIPHY_CMN_REVISION_ID1                   0x0004
-#define DSIPHY_CMN_REVISION_ID2                   0x0008
-#define DSIPHY_CMN_REVISION_ID3                   0x000C
-#define DSIPHY_CMN_CLK_CFG0                       0x0010
-#define DSIPHY_CMN_CLK_CFG1                       0x0014
-#define DSIPHY_CMN_GLBL_TEST_CTRL                 0x0018
-#define DSIPHY_CMN_CTRL_0                         0x001C
-#define DSIPHY_CMN_CTRL_1                         0x0020
-#define DSIPHY_CMN_CAL_HW_TRIGGER                 0x0024
-#define DSIPHY_CMN_CAL_SW_CFG0                    0x0028
-#define DSIPHY_CMN_CAL_SW_CFG1                    0x002C
-#define DSIPHY_CMN_CAL_SW_CFG2                    0x0030
-#define DSIPHY_CMN_CAL_HW_CFG0                    0x0034
-#define DSIPHY_CMN_CAL_HW_CFG1                    0x0038
-#define DSIPHY_CMN_CAL_HW_CFG2                    0x003C
-#define DSIPHY_CMN_CAL_HW_CFG3                    0x0040
-#define DSIPHY_CMN_CAL_HW_CFG4                    0x0044
-#define DSIPHY_CMN_PLL_CNTRL                      0x0048
-#define DSIPHY_CMN_LDO_CNTRL                      0x004C
-
-#define DSIPHY_CMN_REGULATOR_CAL_STATUS0          0x0064
-#define DSIPHY_CMN_REGULATOR_CAL_STATUS1          0x0068
-#define DSI_MDP_ULPS_CLAMP_ENABLE_OFF             0x0054
-
-/* n = 0..3 for data lanes and n = 4 for clock lane */
-#define DSIPHY_DLNX_CFG0(n)                     (0x100 + ((n) * 0x80))
-#define DSIPHY_DLNX_CFG1(n)                     (0x104 + ((n) * 0x80))
-#define DSIPHY_DLNX_CFG2(n)                     (0x108 + ((n) * 0x80))
-#define DSIPHY_DLNX_CFG3(n)                     (0x10C + ((n) * 0x80))
-#define DSIPHY_DLNX_TEST_DATAPATH(n)            (0x110 + ((n) * 0x80))
-#define DSIPHY_DLNX_TEST_STR(n)                 (0x114 + ((n) * 0x80))
-#define DSIPHY_DLNX_TIMING_CTRL_4(n)            (0x118 + ((n) * 0x80))
-#define DSIPHY_DLNX_TIMING_CTRL_5(n)            (0x11C + ((n) * 0x80))
-#define DSIPHY_DLNX_TIMING_CTRL_6(n)            (0x120 + ((n) * 0x80))
-#define DSIPHY_DLNX_TIMING_CTRL_7(n)            (0x124 + ((n) * 0x80))
-#define DSIPHY_DLNX_TIMING_CTRL_8(n)            (0x128 + ((n) * 0x80))
-#define DSIPHY_DLNX_TIMING_CTRL_9(n)            (0x12C + ((n) * 0x80))
-#define DSIPHY_DLNX_TIMING_CTRL_10(n)           (0x130 + ((n) * 0x80))
-#define DSIPHY_DLNX_TIMING_CTRL_11(n)           (0x134 + ((n) * 0x80))
-#define DSIPHY_DLNX_STRENGTH_CTRL_0(n)          (0x138 + ((n) * 0x80))
-#define DSIPHY_DLNX_STRENGTH_CTRL_1(n)          (0x13C + ((n) * 0x80))
-#define DSIPHY_DLNX_BIST_POLY(n)                (0x140 + ((n) * 0x80))
-#define DSIPHY_DLNX_BIST_SEED0(n)               (0x144 + ((n) * 0x80))
-#define DSIPHY_DLNX_BIST_SEED1(n)               (0x148 + ((n) * 0x80))
-#define DSIPHY_DLNX_BIST_HEAD(n)                (0x14C + ((n) * 0x80))
-#define DSIPHY_DLNX_BIST_SOT(n)                 (0x150 + ((n) * 0x80))
-#define DSIPHY_DLNX_BIST_CTRL0(n)               (0x154 + ((n) * 0x80))
-#define DSIPHY_DLNX_BIST_CTRL1(n)               (0x158 + ((n) * 0x80))
-#define DSIPHY_DLNX_BIST_CTRL2(n)               (0x15C + ((n) * 0x80))
-#define DSIPHY_DLNX_BIST_CTRL3(n)               (0x160 + ((n) * 0x80))
-#define DSIPHY_DLNX_VREG_CNTRL(n)               (0x164 + ((n) * 0x80))
-#define DSIPHY_DLNX_HSTX_STR_STATUS(n)          (0x168 + ((n) * 0x80))
-#define DSIPHY_DLNX_BIST_STATUS0(n)             (0x16C + ((n) * 0x80))
-#define DSIPHY_DLNX_BIST_STATUS1(n)             (0x170 + ((n) * 0x80))
-#define DSIPHY_DLNX_BIST_STATUS2(n)             (0x174 + ((n) * 0x80))
-#define DSIPHY_DLNX_BIST_STATUS3(n)             (0x178 + ((n) * 0x80))
-#define DSIPHY_DLNX_MISR_STATUS(n)              (0x17C + ((n) * 0x80))
-
-#define DSIPHY_PLL_CLKBUFLR_EN                  0x041C
-#define DSIPHY_PLL_PLL_BANDGAP                  0x0508
-
-/**
- * regulator_enable() - enable regulators for DSI PHY
- * @phy:      Pointer to DSI PHY hardware object.
- * @reg_cfg:  Regulator configuration for all DSI lanes.
- */
-void dsi_phy_hw_v2_0_regulator_enable(struct dsi_phy_hw *phy,
-				      struct dsi_phy_per_lane_cfgs *reg_cfg)
-{
-	int i;
-
-	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
-		DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(i), reg_cfg->lane[i][0]);
-
-	/* make sure all values are written to hardware */
-	wmb();
-
-	pr_debug("[DSI_%d] Phy regulators enabled\n", phy->index);
-}
-
-/**
- * regulator_disable() - disable regulators
- * @phy:      Pointer to DSI PHY hardware object.
- */
-void dsi_phy_hw_v2_0_regulator_disable(struct dsi_phy_hw *phy)
-{
-	pr_debug("[DSI_%d] Phy regulators disabled\n", phy->index);
-}
-
-/**
- * enable() - Enable PHY hardware
- * @phy:      Pointer to DSI PHY hardware object.
- * @cfg:      Per lane configurations for timing, strength and lane
- *	      configurations.
- */
-void dsi_phy_hw_v2_0_enable(struct dsi_phy_hw *phy,
-			    struct dsi_phy_cfg *cfg)
-{
-	int i;
-	struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
-	u32 data;
-
-	DSI_W32(phy, DSIPHY_CMN_LDO_CNTRL, 0x1C);
-
-	DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0x1);
-	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
-
-		DSI_W32(phy, DSIPHY_DLNX_CFG0(i), cfg->lanecfg.lane[i][0]);
-		DSI_W32(phy, DSIPHY_DLNX_CFG1(i), cfg->lanecfg.lane[i][1]);
-		DSI_W32(phy, DSIPHY_DLNX_CFG2(i), cfg->lanecfg.lane[i][2]);
-		DSI_W32(phy, DSIPHY_DLNX_CFG3(i), cfg->lanecfg.lane[i][3]);
-
-		DSI_W32(phy, DSIPHY_DLNX_TEST_STR(i), 0x88);
-
-		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_4(i), timing->lane[i][0]);
-		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_5(i), timing->lane[i][1]);
-		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_6(i), timing->lane[i][2]);
-		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_7(i), timing->lane[i][3]);
-		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_8(i), timing->lane[i][4]);
-		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_9(i), timing->lane[i][5]);
-		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_10(i), timing->lane[i][6]);
-		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_11(i), timing->lane[i][7]);
-
-		DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_0(i),
-			cfg->strength.lane[i][0]);
-		DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_1(i),
-			cfg->strength.lane[i][1]);
-	}
-
-	/* make sure all values are written to hardware before enabling phy */
-	wmb();
-
-	DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x80);
-	udelay(100);
-	DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x00);
-
-	data = DSI_R32(phy, DSIPHY_CMN_GLBL_TEST_CTRL);
-
-	switch (cfg->pll_source) {
-	case DSI_PLL_SOURCE_STANDALONE:
-		DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x01);
-		data &= ~BIT(2);
-		break;
-	case DSI_PLL_SOURCE_NATIVE:
-		DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x03);
-		data &= ~BIT(2);
-		break;
-	case DSI_PLL_SOURCE_NON_NATIVE:
-		DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x00);
-		data |= BIT(2);
-		break;
-	default:
-		break;
-	}
-
-	DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, data);
-
-	/* Enable bias current for pll1 during split display case */
-	if (cfg->pll_source == DSI_PLL_SOURCE_NON_NATIVE)
-		DSI_W32(phy, DSIPHY_PLL_PLL_BANDGAP, 0x3);
-
-	pr_debug("[DSI_%d]Phy enabled\n", phy->index);
-}
-
-/**
- * disable() - Disable PHY hardware
- * @phy:      Pointer to DSI PHY hardware object.
- */
-void dsi_phy_hw_v2_0_disable(struct dsi_phy_hw *phy,
-			    struct dsi_phy_cfg *cfg)
-{
-	DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0);
-	DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0);
-	DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0);
-	pr_debug("[DSI_%d]Phy disabled\n", phy->index);
-}
-
-/**
- * dsi_phy_hw_v2_0_idle_on() - Enable DSI PHY hardware during idle screen
- * @phy:      Pointer to DSI PHY hardware object.
- */
-void dsi_phy_hw_v2_0_idle_on(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg)
-{
-	int i = 0;
-
-	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
-		DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_0(i),
-			cfg->strength.lane[i][0]);
-		DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_1(i),
-			cfg->strength.lane[i][1]);
-	}
-	wmb(); /* make sure write happens */
-	pr_debug("[DSI_%d]Phy enabled out of idle screen\n", phy->index);
-}
-
-
-/**
- * dsi_phy_hw_v2_0_idle_off() - Disable DSI PHY hardware during idle screen
- * @phy:      Pointer to DSI PHY hardware object.
- */
-void dsi_phy_hw_v2_0_idle_off(struct dsi_phy_hw *phy)
-{
-	int i = 0;
-
-	DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x7f);
-	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
-		DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(i), 0x1c);
-	DSI_W32(phy, DSIPHY_CMN_LDO_CNTRL, 0x1C);
-
-	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
-		DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_1(i), 0x0);
-	wmb(); /* make sure write happens */
-	pr_debug("[DSI_%d]Phy disabled during idle screen\n", phy->index);
-}
-
-int dsi_phy_hw_timing_val_v2_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
-		u32 *timing_val, u32 size)
-{
-	int i = 0, j = 0;
-
-	if (size != (DSI_LANE_MAX * DSI_MAX_SETTINGS)) {
-		pr_err("Unexpected timing array size %d\n", size);
-		return -EINVAL;
-	}
-
-	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
-		for (j = 0; j < DSI_MAX_SETTINGS; j++) {
-			timing_cfg->lane[i][j] = *timing_val;
-			timing_val++;
-		}
-	}
-	return 0;
-}
-
-void dsi_phy_hw_v2_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable)
-{
-	u32 clamp_reg = 0;
-
-	if (!phy->phy_clamp_base) {
-		pr_debug("phy_clamp_base NULL\n");
-		return;
-	}
-
-	if (enable) {
-		clamp_reg |= BIT(0);
-		DSI_MISC_W32(phy, DSI_MDP_ULPS_CLAMP_ENABLE_OFF,
-				clamp_reg);
-		pr_debug("clamp enabled\n");
-	} else {
-		clamp_reg &= ~BIT(0);
-		DSI_MISC_W32(phy, DSI_MDP_ULPS_CLAMP_ENABLE_OFF,
-				clamp_reg);
-		pr_debug("clamp disabled\n");
-	}
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
deleted file mode 100644
index 19e1e9d..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
+++ /dev/null
@@ -1,464 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "dsi-phy-hw:" fmt
-#include <linux/math64.h>
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include "dsi_hw.h"
-#include "dsi_phy_hw.h"
-#include "dsi_catalog.h"
-
-#define DSIPHY_CMN_CLK_CFG0						0x010
-#define DSIPHY_CMN_CLK_CFG1						0x014
-#define DSIPHY_CMN_GLBL_CTRL						0x018
-#define DSIPHY_CMN_RBUF_CTRL						0x01C
-#define DSIPHY_CMN_VREG_CTRL						0x020
-#define DSIPHY_CMN_CTRL_0						0x024
-#define DSIPHY_CMN_CTRL_1						0x028
-#define DSIPHY_CMN_CTRL_2						0x02C
-#define DSIPHY_CMN_LANE_CFG0						0x030
-#define DSIPHY_CMN_LANE_CFG1						0x034
-#define DSIPHY_CMN_PLL_CNTRL						0x038
-#define DSIPHY_CMN_LANE_CTRL0						0x098
-#define DSIPHY_CMN_LANE_CTRL1						0x09C
-#define DSIPHY_CMN_LANE_CTRL2						0x0A0
-#define DSIPHY_CMN_LANE_CTRL3						0x0A4
-#define DSIPHY_CMN_LANE_CTRL4						0x0A8
-#define DSIPHY_CMN_TIMING_CTRL_0					0x0AC
-#define DSIPHY_CMN_TIMING_CTRL_1					0x0B0
-#define DSIPHY_CMN_TIMING_CTRL_2					0x0B4
-#define DSIPHY_CMN_TIMING_CTRL_3					0x0B8
-#define DSIPHY_CMN_TIMING_CTRL_4					0x0BC
-#define DSIPHY_CMN_TIMING_CTRL_5					0x0C0
-#define DSIPHY_CMN_TIMING_CTRL_6					0x0C4
-#define DSIPHY_CMN_TIMING_CTRL_7					0x0C8
-#define DSIPHY_CMN_TIMING_CTRL_8					0x0CC
-#define DSIPHY_CMN_TIMING_CTRL_9					0x0D0
-#define DSIPHY_CMN_TIMING_CTRL_10					0x0D4
-#define DSIPHY_CMN_TIMING_CTRL_11					0x0D8
-#define DSIPHY_CMN_PHY_STATUS						0x0EC
-#define DSIPHY_CMN_LANE_STATUS0						0x0F4
-#define DSIPHY_CMN_LANE_STATUS1						0x0F8
-
-
-/* n = 0..3 for data lanes and n = 4 for clock lane */
-#define DSIPHY_LNX_CFG0(n)                         (0x200 + (0x80 * (n)))
-#define DSIPHY_LNX_CFG1(n)                         (0x204 + (0x80 * (n)))
-#define DSIPHY_LNX_CFG2(n)                         (0x208 + (0x80 * (n)))
-#define DSIPHY_LNX_CFG3(n)                         (0x20C + (0x80 * (n)))
-#define DSIPHY_LNX_TEST_DATAPATH(n)                (0x210 + (0x80 * (n)))
-#define DSIPHY_LNX_PIN_SWAP(n)                     (0x214 + (0x80 * (n)))
-#define DSIPHY_LNX_HSTX_STR_CTRL(n)                (0x218 + (0x80 * (n)))
-#define DSIPHY_LNX_OFFSET_TOP_CTRL(n)              (0x21C + (0x80 * (n)))
-#define DSIPHY_LNX_OFFSET_BOT_CTRL(n)              (0x220 + (0x80 * (n)))
-#define DSIPHY_LNX_LPTX_STR_CTRL(n)                (0x224 + (0x80 * (n)))
-#define DSIPHY_LNX_LPRX_CTRL(n)                    (0x228 + (0x80 * (n)))
-#define DSIPHY_LNX_TX_DCTRL(n)                     (0x22C + (0x80 * (n)))
-
-/**
- * regulator_enable() - enable regulators for DSI PHY
- * @phy:      Pointer to DSI PHY hardware object.
- * @reg_cfg:  Regulator configuration for all DSI lanes.
- */
-void dsi_phy_hw_v3_0_regulator_enable(struct dsi_phy_hw *phy,
-				      struct dsi_phy_per_lane_cfgs *reg_cfg)
-{
-	pr_debug("[DSI_%d] Phy regulators enabled\n", phy->index);
-	/* Nothing to be done for DSI PHY regulator enable */
-}
-
-/**
- * regulator_disable() - disable regulators
- * @phy:      Pointer to DSI PHY hardware object.
- */
-void dsi_phy_hw_v3_0_regulator_disable(struct dsi_phy_hw *phy)
-{
-	pr_debug("[DSI_%d] Phy regulators disabled\n", phy->index);
-	/* Nothing to be done for DSI PHY regulator disable */
-}
-
-void dsi_phy_hw_v3_0_toggle_resync_fifo(struct dsi_phy_hw *phy)
-{
-	DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x00);
-	/* ensure that the FIFO is off */
-	wmb();
-	DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x1);
-	/* ensure that the FIFO is toggled back on */
-	wmb();
-}
-
-static int dsi_phy_hw_v3_0_is_pll_on(struct dsi_phy_hw *phy)
-{
-	u32 data = 0;
-
-	data = DSI_R32(phy, DSIPHY_CMN_PLL_CNTRL);
-	mb(); /*make sure read happened */
-	return (data & BIT(0));
-}
-
-static void dsi_phy_hw_v3_0_config_lpcdrx(struct dsi_phy_hw *phy,
-	struct dsi_phy_cfg *cfg, bool enable)
-{
-	int phy_lane_0 = dsi_phy_conv_logical_to_phy_lane(&cfg->lane_map,
-			DSI_LOGICAL_LANE_0);
-	/*
-	 * LPRX and CDRX need to enabled only for physical data lane
-	 * corresponding to the logical data lane 0
-	 */
-
-	if (enable)
-		DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(phy_lane_0),
-			cfg->strength.lane[phy_lane_0][1]);
-	else
-		DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(phy_lane_0), 0);
-}
-
-static void dsi_phy_hw_v3_0_lane_swap_config(struct dsi_phy_hw *phy,
-		struct dsi_lane_map *lane_map)
-{
-	DSI_W32(phy, DSIPHY_CMN_LANE_CFG0,
-		(lane_map->lane_map_v2[DSI_LOGICAL_LANE_0] |
-		(lane_map->lane_map_v2[DSI_LOGICAL_LANE_1] << 4)));
-	DSI_W32(phy, DSIPHY_CMN_LANE_CFG1,
-		(lane_map->lane_map_v2[DSI_LOGICAL_LANE_2] |
-		(lane_map->lane_map_v2[DSI_LOGICAL_LANE_3] << 4)));
-}
-
-static void dsi_phy_hw_v3_0_lane_settings(struct dsi_phy_hw *phy,
-			    struct dsi_phy_cfg *cfg)
-{
-	int i;
-	u8 tx_dctrl[] = {0x00, 0x00, 0x00, 0x04, 0x01};
-
-	/* Strength ctrl settings */
-	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
-		DSI_W32(phy, DSIPHY_LNX_LPTX_STR_CTRL(i),
-			cfg->strength.lane[i][0]);
-		/*
-		 * Disable LPRX and CDRX for all lanes. And later on, it will
-		 * be only enabled for the physical data lane corresponding
-		 * to the logical data lane 0
-		 */
-		DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(i), 0);
-		DSI_W32(phy, DSIPHY_LNX_PIN_SWAP(i), 0x0);
-		DSI_W32(phy, DSIPHY_LNX_HSTX_STR_CTRL(i), 0x88);
-	}
-	dsi_phy_hw_v3_0_config_lpcdrx(phy, cfg, true);
-
-	/* other settings */
-	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
-		DSI_W32(phy, DSIPHY_LNX_CFG0(i), cfg->lanecfg.lane[i][0]);
-		DSI_W32(phy, DSIPHY_LNX_CFG1(i), cfg->lanecfg.lane[i][1]);
-		DSI_W32(phy, DSIPHY_LNX_CFG2(i), cfg->lanecfg.lane[i][2]);
-		DSI_W32(phy, DSIPHY_LNX_CFG3(i), cfg->lanecfg.lane[i][3]);
-		DSI_W32(phy, DSIPHY_LNX_OFFSET_TOP_CTRL(i), 0x0);
-		DSI_W32(phy, DSIPHY_LNX_OFFSET_BOT_CTRL(i), 0x0);
-		DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(i), tx_dctrl[i]);
-	}
-}
-
-void dsi_phy_hw_v3_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable)
-{
-	u32 reg;
-
-	pr_debug("enable=%s\n", enable ? "true" : "false");
-
-	/*
-	 * DSI PHY lane clamps, also referred to as PHY FreezeIO is
-	 * enalbed by default as part of the initialization sequnce.
-	 * This would get triggered anytime the chip FreezeIO is asserted.
-	 */
-	if (enable)
-		return;
-
-	/*
-	 * Toggle BIT 0 to exlplictly release PHY freeze I/0 to disable
-	 * the clamps.
-	 */
-	reg = DSI_R32(phy, DSIPHY_LNX_TX_DCTRL(3));
-	DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), reg | BIT(0));
-	wmb(); /* Ensure that the freezeio bit is toggled */
-	DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), reg & ~BIT(0));
-	wmb(); /* Ensure that the freezeio bit is toggled */
-}
-
-/**
- * enable() - Enable PHY hardware
- * @phy:      Pointer to DSI PHY hardware object.
- * @cfg:      Per lane configurations for timing, strength and lane
- *	      configurations.
- */
-void dsi_phy_hw_v3_0_enable(struct dsi_phy_hw *phy,
-			    struct dsi_phy_cfg *cfg)
-{
-	int rc = 0;
-	u32 status;
-	u32 const delay_us = 5;
-	u32 const timeout_us = 1000;
-	struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
-	u32 data;
-
-	if (dsi_phy_hw_v3_0_is_pll_on(phy))
-		pr_warn("PLL turned on before configuring PHY\n");
-
-	/* wait for REFGEN READY */
-	rc = readl_poll_timeout_atomic(phy->base + DSIPHY_CMN_PHY_STATUS,
-		status, (status & BIT(0)), delay_us, timeout_us);
-	if (rc) {
-		pr_err("Ref gen not ready. Aborting\n");
-		return;
-	}
-
-	/* de-assert digital and pll power down */
-	data = BIT(6) | BIT(5);
-	DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
-
-	/* Assert PLL core reset */
-	DSI_W32(phy, DSIPHY_CMN_PLL_CNTRL, 0x00);
-
-	/* turn off resync FIFO */
-	DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x00);
-
-	/* Select MS1 byte-clk */
-	DSI_W32(phy, DSIPHY_CMN_GLBL_CTRL, 0x10);
-
-	/* Enable LDO */
-	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL, 0x59);
-
-	/* Configure PHY lane swap */
-	dsi_phy_hw_v3_0_lane_swap_config(phy, &cfg->lane_map);
-
-	/* DSI PHY timings */
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_0, timing->lane_v3[0]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_1, timing->lane_v3[1]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_2, timing->lane_v3[2]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_3, timing->lane_v3[3]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_4, timing->lane_v3[4]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_5, timing->lane_v3[5]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_6, timing->lane_v3[6]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_7, timing->lane_v3[7]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_8, timing->lane_v3[8]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_9, timing->lane_v3[9]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_10, timing->lane_v3[10]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_11, timing->lane_v3[11]);
-
-	/* Remove power down from all blocks */
-	DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x7f);
-
-	/*power up lanes */
-	data = DSI_R32(phy, DSIPHY_CMN_CTRL_0);
-	/* TODO: only power up lanes that are used */
-	data |= 0x1F;
-	DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0x1F);
-
-	/* Select full-rate mode */
-	DSI_W32(phy, DSIPHY_CMN_CTRL_2, 0x40);
-
-	switch (cfg->pll_source) {
-	case DSI_PLL_SOURCE_STANDALONE:
-	case DSI_PLL_SOURCE_NATIVE:
-		data = 0x0; /* internal PLL */
-		break;
-	case DSI_PLL_SOURCE_NON_NATIVE:
-		data = 0x1; /* external PLL */
-		break;
-	default:
-		break;
-	}
-	DSI_W32(phy, DSIPHY_CMN_CLK_CFG1, (data << 2)); /* set PLL src */
-
-	/* DSI lane settings */
-	dsi_phy_hw_v3_0_lane_settings(phy, cfg);
-
-	pr_debug("[DSI_%d]Phy enabled\n", phy->index);
-}
-
-/**
- * disable() - Disable PHY hardware
- * @phy:      Pointer to DSI PHY hardware object.
- */
-void dsi_phy_hw_v3_0_disable(struct dsi_phy_hw *phy,
-			    struct dsi_phy_cfg *cfg)
-{
-	u32 data = 0;
-
-	if (dsi_phy_hw_v3_0_is_pll_on(phy))
-		pr_warn("Turning OFF PHY while PLL is on\n");
-
-	dsi_phy_hw_v3_0_config_lpcdrx(phy, cfg, false);
-
-	data = DSI_R32(phy, DSIPHY_CMN_CTRL_0);
-	/* disable all lanes */
-	data &= ~0x1F;
-	DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0);
-
-	/* Turn off all PHY blocks */
-	DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x00);
-	/* make sure phy is turned off */
-	wmb();
-	pr_debug("[DSI_%d]Phy disabled\n", phy->index);
-}
-
-int dsi_phy_hw_v3_0_wait_for_lane_idle(
-		struct dsi_phy_hw *phy, u32 lanes)
-{
-	int rc = 0, val = 0;
-	u32 stop_state_mask = 0;
-	u32 const sleep_us = 10;
-	u32 const timeout_us = 100;
-
-	stop_state_mask = BIT(4); /* clock lane */
-	if (lanes & DSI_DATA_LANE_0)
-		stop_state_mask |= BIT(0);
-	if (lanes & DSI_DATA_LANE_1)
-		stop_state_mask |= BIT(1);
-	if (lanes & DSI_DATA_LANE_2)
-		stop_state_mask |= BIT(2);
-	if (lanes & DSI_DATA_LANE_3)
-		stop_state_mask |= BIT(3);
-
-	pr_debug("%s: polling for lanes to be in stop state, mask=0x%08x\n",
-		__func__, stop_state_mask);
-	rc = readl_poll_timeout(phy->base + DSIPHY_CMN_LANE_STATUS1, val,
-				((val & stop_state_mask) == stop_state_mask),
-				sleep_us, timeout_us);
-	if (rc) {
-		pr_err("%s: lanes not in stop state, LANE_STATUS=0x%08x\n",
-			__func__, val);
-		return rc;
-	}
-
-	return 0;
-}
-
-void dsi_phy_hw_v3_0_ulps_request(struct dsi_phy_hw *phy,
-		struct dsi_phy_cfg *cfg, u32 lanes)
-{
-	u32 reg = 0;
-
-	if (lanes & DSI_CLOCK_LANE)
-		reg = BIT(4);
-	if (lanes & DSI_DATA_LANE_0)
-		reg |= BIT(0);
-	if (lanes & DSI_DATA_LANE_1)
-		reg |= BIT(1);
-	if (lanes & DSI_DATA_LANE_2)
-		reg |= BIT(2);
-	if (lanes & DSI_DATA_LANE_3)
-		reg |= BIT(3);
-
-	/*
-	 * ULPS entry request. Wait for short time to make sure
-	 * that the lanes enter ULPS. Recommended as per HPG.
-	 */
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg);
-	usleep_range(100, 110);
-
-	/* disable LPRX and CDRX */
-	dsi_phy_hw_v3_0_config_lpcdrx(phy, cfg, false);
-	/* disable lane LDOs */
-	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL, 0x19);
-	pr_debug("[DSI_PHY%d] ULPS requested for lanes 0x%x\n", phy->index,
-		 lanes);
-}
-
-int dsi_phy_hw_v3_0_lane_reset(struct dsi_phy_hw *phy)
-{
-	int ret = 0, loop = 10, u_dly = 200;
-	u32 ln_status = 0;
-
-	while ((ln_status != 0x1f) && loop) {
-		DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x1f);
-		wmb(); /* ensure register is committed */
-		loop--;
-		udelay(u_dly);
-		ln_status = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS1);
-		pr_debug("trial no: %d\n", loop);
-	}
-
-	if (!loop)
-		pr_debug("could not reset phy lanes\n");
-
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x0);
-	wmb(); /* ensure register is committed */
-
-	return ret;
-}
-
-void dsi_phy_hw_v3_0_ulps_exit(struct dsi_phy_hw *phy,
-			struct dsi_phy_cfg *cfg, u32 lanes)
-{
-	u32 reg = 0;
-
-	if (lanes & DSI_CLOCK_LANE)
-		reg = BIT(4);
-	if (lanes & DSI_DATA_LANE_0)
-		reg |= BIT(0);
-	if (lanes & DSI_DATA_LANE_1)
-		reg |= BIT(1);
-	if (lanes & DSI_DATA_LANE_2)
-		reg |= BIT(2);
-	if (lanes & DSI_DATA_LANE_3)
-		reg |= BIT(3);
-
-	/* enable lane LDOs */
-	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL, 0x59);
-	/* enable LPRX and CDRX */
-	dsi_phy_hw_v3_0_config_lpcdrx(phy, cfg, true);
-
-	/* ULPS exit request */
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL2, reg);
-	usleep_range(1000, 1010);
-
-	/* Clear ULPS request flags on all lanes */
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, 0);
-	/* Clear ULPS exit flags on all lanes */
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL2, 0);
-
-	/*
-	 * Sometimes when exiting ULPS, it is possible that some DSI
-	 * lanes are not in the stop state which could lead to DSI
-	 * commands not going through. To avoid this, force the lanes
-	 * to be in stop state.
-	 */
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, reg);
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0);
-	usleep_range(100, 110);
-}
-
-u32 dsi_phy_hw_v3_0_get_lanes_in_ulps(struct dsi_phy_hw *phy)
-{
-	u32 lanes = 0;
-
-	lanes = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS0);
-	pr_debug("[DSI_PHY%d] lanes in ulps = 0x%x\n", phy->index, lanes);
-	return lanes;
-}
-
-bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes)
-{
-	if (lanes & ulps_lanes)
-		return false;
-
-	return true;
-}
-
-int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
-		u32 *timing_val, u32 size)
-{
-	int i = 0;
-
-	if (size != DSI_PHY_TIMING_V3_SIZE) {
-		pr_err("Unexpected timing array size %d\n", size);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < size; i++)
-		timing_cfg->lane_v3[i] = timing_val[i];
-	return 0;
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
deleted file mode 100644
index 8df20ae..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
+++ /dev/null
@@ -1,476 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "dsi-phy-hw-v4: %s:" fmt, __func__
-#include <linux/math64.h>
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include "dsi_hw.h"
-#include "dsi_phy_hw.h"
-#include "dsi_catalog.h"
-
-#define DSIPHY_CMN_REVISION_ID0						0x000
-#define DSIPHY_CMN_REVISION_ID1						0x004
-#define DSIPHY_CMN_REVISION_ID2						0x008
-#define DSIPHY_CMN_REVISION_ID3						0x00C
-#define DSIPHY_CMN_CLK_CFG0						0x010
-#define DSIPHY_CMN_CLK_CFG1						0x014
-#define DSIPHY_CMN_GLBL_CTRL						0x018
-#define DSIPHY_CMN_RBUF_CTRL						0x01C
-#define DSIPHY_CMN_VREG_CTRL_0						0x020
-#define DSIPHY_CMN_CTRL_0						0x024
-#define DSIPHY_CMN_CTRL_1						0x028
-#define DSIPHY_CMN_CTRL_2						0x02C
-#define DSIPHY_CMN_CTRL_3						0x030
-#define DSIPHY_CMN_LANE_CFG0						0x034
-#define DSIPHY_CMN_LANE_CFG1						0x038
-#define DSIPHY_CMN_PLL_CNTRL						0x03C
-#define DSIPHY_CMN_DPHY_SOT						0x040
-#define DSIPHY_CMN_LANE_CTRL0						0x0A0
-#define DSIPHY_CMN_LANE_CTRL1						0x0A4
-#define DSIPHY_CMN_LANE_CTRL2						0x0A8
-#define DSIPHY_CMN_LANE_CTRL3						0x0AC
-#define DSIPHY_CMN_LANE_CTRL4						0x0B0
-#define DSIPHY_CMN_TIMING_CTRL_0					0x0B4
-#define DSIPHY_CMN_TIMING_CTRL_1					0x0B8
-#define DSIPHY_CMN_TIMING_CTRL_2					0x0Bc
-#define DSIPHY_CMN_TIMING_CTRL_3					0x0C0
-#define DSIPHY_CMN_TIMING_CTRL_4					0x0C4
-#define DSIPHY_CMN_TIMING_CTRL_5					0x0C8
-#define DSIPHY_CMN_TIMING_CTRL_6					0x0CC
-#define DSIPHY_CMN_TIMING_CTRL_7					0x0D0
-#define DSIPHY_CMN_TIMING_CTRL_8					0x0D4
-#define DSIPHY_CMN_TIMING_CTRL_9					0x0D8
-#define DSIPHY_CMN_TIMING_CTRL_10					0x0DC
-#define DSIPHY_CMN_TIMING_CTRL_11					0x0E0
-#define DSIPHY_CMN_TIMING_CTRL_12					0x0E4
-#define DSIPHY_CMN_TIMING_CTRL_13					0x0E8
-#define DSIPHY_CMN_GLBL_HSTX_STR_CTRL_0					0x0EC
-#define DSIPHY_CMN_GLBL_HSTX_STR_CTRL_1					0x0F0
-#define DSIPHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL				0x0F4
-#define DSIPHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL				0x0F8
-#define DSIPHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL				0x0FC
-#define DSIPHY_CMN_GLBL_LPTX_STR_CTRL					0x100
-#define DSIPHY_CMN_GLBL_PEMPH_CTRL_0					0x104
-#define DSIPHY_CMN_GLBL_PEMPH_CTRL_1					0x108
-#define DSIPHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL				0x10C
-#define DSIPHY_CMN_VREG_CTRL_1						0x110
-#define DSIPHY_CMN_CTRL_4						0x114
-#define DSIPHY_CMN_PHY_STATUS						0x140
-#define DSIPHY_CMN_LANE_STATUS0						0x148
-#define DSIPHY_CMN_LANE_STATUS1						0x14C
-
-
-/* n = 0..3 for data lanes and n = 4 for clock lane */
-#define DSIPHY_LNX_CFG0(n)                         (0x200 + (0x80 * (n)))
-#define DSIPHY_LNX_CFG1(n)                         (0x204 + (0x80 * (n)))
-#define DSIPHY_LNX_CFG2(n)                         (0x208 + (0x80 * (n)))
-#define DSIPHY_LNX_TEST_DATAPATH(n)                (0x20C + (0x80 * (n)))
-#define DSIPHY_LNX_PIN_SWAP(n)                     (0x210 + (0x80 * (n)))
-#define DSIPHY_LNX_LPRX_CTRL(n)                    (0x214 + (0x80 * (n)))
-#define DSIPHY_LNX_TX_DCTRL(n)                     (0x218 + (0x80 * (n)))
-
-static int dsi_phy_hw_v4_0_is_pll_on(struct dsi_phy_hw *phy)
-{
-	u32 data = 0;
-
-	data = DSI_R32(phy, DSIPHY_CMN_PLL_CNTRL);
-	mb(); /*make sure read happened */
-	return (data & BIT(0));
-}
-
-static void dsi_phy_hw_v4_0_config_lpcdrx(struct dsi_phy_hw *phy,
-	struct dsi_phy_cfg *cfg, bool enable)
-{
-	int phy_lane_0 = dsi_phy_conv_logical_to_phy_lane(&cfg->lane_map,
-			DSI_LOGICAL_LANE_0);
-	/*
-	 * LPRX and CDRX need to enabled only for physical data lane
-	 * corresponding to the logical data lane 0
-	 */
-
-	if (enable)
-		DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(phy_lane_0),
-			cfg->strength.lane[phy_lane_0][1]);
-	else
-		DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(phy_lane_0), 0);
-}
-
-static void dsi_phy_hw_v4_0_lane_swap_config(struct dsi_phy_hw *phy,
-		struct dsi_lane_map *lane_map)
-{
-	DSI_W32(phy, DSIPHY_CMN_LANE_CFG0,
-		(lane_map->lane_map_v2[DSI_LOGICAL_LANE_0] |
-		(lane_map->lane_map_v2[DSI_LOGICAL_LANE_1] << 4)));
-	DSI_W32(phy, DSIPHY_CMN_LANE_CFG1,
-		(lane_map->lane_map_v2[DSI_LOGICAL_LANE_2] |
-		(lane_map->lane_map_v2[DSI_LOGICAL_LANE_3] << 4)));
-}
-
-static void dsi_phy_hw_v4_0_lane_settings(struct dsi_phy_hw *phy,
-			    struct dsi_phy_cfg *cfg)
-{
-	int i;
-	u8 tx_dctrl_v4[] = {0x00, 0x00, 0x00, 0x04, 0x01};
-	u8 tx_dctrl_v4_1[] = {0x40, 0x40, 0x40, 0x46, 0x41};
-	u8 *tx_dctrl;
-
-	if (phy->version == DSI_PHY_VERSION_4_1)
-		tx_dctrl = &tx_dctrl_v4_1[0];
-	else
-		tx_dctrl = &tx_dctrl_v4[0];
-
-	/* Strength ctrl settings */
-	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
-		/*
-		 * Disable LPRX and CDRX for all lanes. And later on, it will
-		 * be only enabled for the physical data lane corresponding
-		 * to the logical data lane 0
-		 */
-		DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(i), 0);
-		DSI_W32(phy, DSIPHY_LNX_PIN_SWAP(i), 0x0);
-	}
-	dsi_phy_hw_v4_0_config_lpcdrx(phy, cfg, true);
-
-	/* other settings */
-	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
-		DSI_W32(phy, DSIPHY_LNX_CFG0(i), cfg->lanecfg.lane[i][0]);
-		DSI_W32(phy, DSIPHY_LNX_CFG1(i), cfg->lanecfg.lane[i][1]);
-		DSI_W32(phy, DSIPHY_LNX_CFG2(i), cfg->lanecfg.lane[i][2]);
-		DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(i), tx_dctrl[i]);
-	}
-
-	if (cfg->force_clk_lane_hs) {
-		u32 reg = DSI_R32(phy, DSIPHY_CMN_LANE_CTRL1);
-
-		reg |= BIT(5) | BIT(6);
-		DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg);
-	}
-}
-
-/**
- * enable() - Enable PHY hardware
- * @phy:      Pointer to DSI PHY hardware object.
- * @cfg:      Per lane configurations for timing, strength and lane
- *	      configurations.
- */
-void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy,
-			    struct dsi_phy_cfg *cfg)
-{
-	int rc = 0;
-	u32 status;
-	u32 const delay_us = 5;
-	u32 const timeout_us = 1000;
-	struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
-	u32 data;
-	bool less_than_1500_mhz = false;
-	u32 vreg_ctrl_0 = 0;
-	u32 glbl_str_swi_cal_sel_ctrl = 0;
-	u32 glbl_hstx_str_ctrl_0 = 0;
-
-	if (dsi_phy_hw_v4_0_is_pll_on(phy))
-		pr_warn("PLL turned on before configuring PHY\n");
-
-	/* wait for REFGEN READY */
-	rc = readl_poll_timeout_atomic(phy->base + DSIPHY_CMN_PHY_STATUS,
-		status, (status & BIT(0)), delay_us, timeout_us);
-	if (rc) {
-		pr_err("Ref gen not ready. Aborting\n");
-		return;
-	}
-
-	if (phy->version == DSI_PHY_VERSION_4_1) {
-		vreg_ctrl_0 = 0x58;
-		glbl_str_swi_cal_sel_ctrl = 0x00;
-		glbl_hstx_str_ctrl_0 = 0x88;
-	} else {
-		/* Alter PHY configurations if data rate less than 1.5GHZ*/
-		if (cfg->bit_clk_rate_hz < 1500000000)
-			less_than_1500_mhz = true;
-		vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
-		glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
-		glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
-	}
-
-	/* de-assert digital and pll power down */
-	data = BIT(6) | BIT(5);
-	DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
-
-	/* Assert PLL core reset */
-	DSI_W32(phy, DSIPHY_CMN_PLL_CNTRL, 0x00);
-
-	/* turn off resync FIFO */
-	DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x00);
-
-	/* Configure PHY lane swap */
-	dsi_phy_hw_v4_0_lane_swap_config(phy, &cfg->lane_map);
-
-	/* Enable LDO */
-	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
-	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_1, 0x5c);
-	DSI_W32(phy, DSIPHY_CMN_CTRL_3, 0x00);
-	DSI_W32(phy, DSIPHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
-					glbl_str_swi_cal_sel_ctrl);
-	DSI_W32(phy, DSIPHY_CMN_GLBL_HSTX_STR_CTRL_0, glbl_hstx_str_ctrl_0);
-	DSI_W32(phy, DSIPHY_CMN_GLBL_PEMPH_CTRL_0, 0x00);
-	DSI_W32(phy, DSIPHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL, 0x03);
-	DSI_W32(phy, DSIPHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL, 0x3c);
-	DSI_W32(phy, DSIPHY_CMN_GLBL_LPTX_STR_CTRL, 0x55);
-
-	/* Remove power down from all blocks */
-	DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x7f);
-
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0x1F);
-
-	/* Select full-rate mode */
-	DSI_W32(phy, DSIPHY_CMN_CTRL_2, 0x40);
-
-	switch (cfg->pll_source) {
-	case DSI_PLL_SOURCE_STANDALONE:
-	case DSI_PLL_SOURCE_NATIVE:
-		data = 0x0; /* internal PLL */
-		break;
-	case DSI_PLL_SOURCE_NON_NATIVE:
-		data = 0x1; /* external PLL */
-		break;
-	default:
-		break;
-	}
-	DSI_W32(phy, DSIPHY_CMN_CLK_CFG1, (data << 2)); /* set PLL src */
-
-	/* DSI PHY timings */
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_0, timing->lane_v4[0]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_1, timing->lane_v4[1]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_2, timing->lane_v4[2]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_3, timing->lane_v4[3]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_4, timing->lane_v4[4]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_5, timing->lane_v4[5]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_6, timing->lane_v4[6]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_7, timing->lane_v4[7]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_8, timing->lane_v4[8]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_9, timing->lane_v4[9]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_10, timing->lane_v4[10]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_11, timing->lane_v4[11]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_12, timing->lane_v4[12]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_13, timing->lane_v4[13]);
-
-	/* DSI lane settings */
-	dsi_phy_hw_v4_0_lane_settings(phy, cfg);
-
-	pr_debug("[DSI_%d]Phy enabled\n", phy->index);
-}
-
-/**
- * disable() - Disable PHY hardware
- * @phy:      Pointer to DSI PHY hardware object.
- */
-void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy,
-			    struct dsi_phy_cfg *cfg)
-{
-	u32 data = 0;
-
-	if (dsi_phy_hw_v4_0_is_pll_on(phy))
-		pr_warn("Turning OFF PHY while PLL is on\n");
-
-	dsi_phy_hw_v4_0_config_lpcdrx(phy, cfg, false);
-
-	data = DSI_R32(phy, DSIPHY_CMN_CTRL_0);
-	/* disable all lanes */
-	data &= ~0x1F;
-	DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0);
-
-	/* Turn off all PHY blocks */
-	DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x00);
-	/* make sure phy is turned off */
-	wmb();
-	pr_debug("[DSI_%d]Phy disabled\n", phy->index);
-}
-
-void dsi_phy_hw_v4_0_toggle_resync_fifo(struct dsi_phy_hw *phy)
-{
-	DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x00);
-	/* ensure that the FIFO is off */
-	wmb();
-	DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x1);
-	/* ensure that the FIFO is toggled back on */
-	wmb();
-}
-
-void dsi_phy_hw_v4_0_reset_clk_en_sel(struct dsi_phy_hw *phy)
-{
-	u32 data = 0;
-
-	/*Turning off CLK_EN_SEL after retime buffer sync */
-	data = DSI_R32(phy, DSIPHY_CMN_CLK_CFG1);
-	data &= ~BIT(4);
-	DSI_W32(phy, DSIPHY_CMN_CLK_CFG1, data);
-	/* ensure that clk_en_sel bit is turned off */
-	wmb();
-}
-
-int dsi_phy_hw_v4_0_wait_for_lane_idle(
-		struct dsi_phy_hw *phy, u32 lanes)
-{
-	int rc = 0, val = 0;
-	u32 stop_state_mask = 0;
-	u32 const sleep_us = 10;
-	u32 const timeout_us = 100;
-
-	stop_state_mask = BIT(4); /* clock lane */
-	if (lanes & DSI_DATA_LANE_0)
-		stop_state_mask |= BIT(0);
-	if (lanes & DSI_DATA_LANE_1)
-		stop_state_mask |= BIT(1);
-	if (lanes & DSI_DATA_LANE_2)
-		stop_state_mask |= BIT(2);
-	if (lanes & DSI_DATA_LANE_3)
-		stop_state_mask |= BIT(3);
-
-	pr_debug("%s: polling for lanes to be in stop state, mask=0x%08x\n",
-		__func__, stop_state_mask);
-	rc = readl_poll_timeout(phy->base + DSIPHY_CMN_LANE_STATUS1, val,
-				((val & stop_state_mask) == stop_state_mask),
-				sleep_us, timeout_us);
-	if (rc) {
-		pr_err("%s: lanes not in stop state, LANE_STATUS=0x%08x\n",
-			__func__, val);
-		return rc;
-	}
-
-	return 0;
-}
-
-void dsi_phy_hw_v4_0_ulps_request(struct dsi_phy_hw *phy,
-		struct dsi_phy_cfg *cfg, u32 lanes)
-{
-	u32 reg = 0;
-
-	if (lanes & DSI_CLOCK_LANE)
-		reg = BIT(4);
-	if (lanes & DSI_DATA_LANE_0)
-		reg |= BIT(0);
-	if (lanes & DSI_DATA_LANE_1)
-		reg |= BIT(1);
-	if (lanes & DSI_DATA_LANE_2)
-		reg |= BIT(2);
-	if (lanes & DSI_DATA_LANE_3)
-		reg |= BIT(3);
-
-	if (cfg->force_clk_lane_hs)
-		reg |= BIT(5) | BIT(6);
-
-	/*
-	 * ULPS entry request. Wait for short time to make sure
-	 * that the lanes enter ULPS. Recommended as per HPG.
-	 */
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg);
-	usleep_range(100, 110);
-
-	/* disable LPRX and CDRX */
-	dsi_phy_hw_v4_0_config_lpcdrx(phy, cfg, false);
-
-	pr_debug("[DSI_PHY%d] ULPS requested for lanes 0x%x\n", phy->index,
-		 lanes);
-}
-
-int dsi_phy_hw_v4_0_lane_reset(struct dsi_phy_hw *phy)
-{
-	int ret = 0, loop = 10, u_dly = 200;
-	u32 ln_status = 0;
-
-	while ((ln_status != 0x1f) && loop) {
-		DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x1f);
-		wmb(); /* ensure register is committed */
-		loop--;
-		udelay(u_dly);
-		ln_status = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS1);
-		pr_debug("trial no: %d\n", loop);
-	}
-
-	if (!loop)
-		pr_debug("could not reset phy lanes\n");
-
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x0);
-	wmb(); /* ensure register is committed */
-
-	return ret;
-}
-
-void dsi_phy_hw_v4_0_ulps_exit(struct dsi_phy_hw *phy,
-			struct dsi_phy_cfg *cfg, u32 lanes)
-{
-	u32 reg = 0;
-
-	if (lanes & DSI_CLOCK_LANE)
-		reg = BIT(4);
-	if (lanes & DSI_DATA_LANE_0)
-		reg |= BIT(0);
-	if (lanes & DSI_DATA_LANE_1)
-		reg |= BIT(1);
-	if (lanes & DSI_DATA_LANE_2)
-		reg |= BIT(2);
-	if (lanes & DSI_DATA_LANE_3)
-		reg |= BIT(3);
-
-	/* enable LPRX and CDRX */
-	dsi_phy_hw_v4_0_config_lpcdrx(phy, cfg, true);
-
-	/* ULPS exit request */
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL2, reg);
-	usleep_range(1000, 1010);
-
-	/* Clear ULPS request flags on all lanes */
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, 0);
-	/* Clear ULPS exit flags on all lanes */
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL2, 0);
-
-	/*
-	 * Sometimes when exiting ULPS, it is possible that some DSI
-	 * lanes are not in the stop state which could lead to DSI
-	 * commands not going through. To avoid this, force the lanes
-	 * to be in stop state.
-	 */
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, reg);
-	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0);
-	usleep_range(100, 110);
-
-	if (cfg->force_clk_lane_hs) {
-		reg = BIT(5) | BIT(6);
-		DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg);
-	}
-}
-
-u32 dsi_phy_hw_v4_0_get_lanes_in_ulps(struct dsi_phy_hw *phy)
-{
-	u32 lanes = 0;
-
-	lanes = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS0);
-	pr_debug("[DSI_PHY%d] lanes in ulps = 0x%x\n", phy->index, lanes);
-	return lanes;
-}
-
-bool dsi_phy_hw_v4_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes)
-{
-	if (lanes & ulps_lanes)
-		return false;
-
-	return true;
-}
-
-int dsi_phy_hw_timing_val_v4_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
-		u32 *timing_val, u32 size)
-{
-	int i = 0;
-
-	if (size != DSI_PHY_TIMING_V4_SIZE) {
-		pr_err("Unexpected timing array size %d\n", size);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < size; i++)
-		timing_cfg->lane_v4[i] = timing_val[i];
-	return 0;
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c
deleted file mode 100644
index 70e7ee3..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c
+++ /dev/null
@@ -1,812 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "dsi-phy-timing:" fmt
-
-#include "dsi_phy_timing_calc.h"
-
-static const u32 bits_per_pixel[DSI_PIXEL_FORMAT_MAX] = {
-	16, 18, 18, 24, 3, 8, 12 };
-
-static int dsi_phy_cmn_validate_and_set(struct timing_entry *t,
-	char const *t_name)
-{
-	if (t->rec & 0xffffff00) {
-		/* Output value can only be 8 bits */
-		pr_err("Incorrect %s rec value - %d\n", t_name, t->rec);
-		return -EINVAL;
-	}
-	t->reg_value = t->rec;
-	return 0;
-}
-
-/**
- * calc_clk_prepare - calculates prepare timing params for clk lane.
- */
-static int calc_clk_prepare(struct dsi_phy_hw *phy,
-				struct phy_clk_params *clk_params,
-			    struct phy_timing_desc *desc,
-			    s32 *actual_frac,
-			    s64 *actual_intermediate)
-{
-	u64 multiplier = BIT(20);
-	struct timing_entry *t = &desc->clk_prepare;
-	int rc = 0;
-	u64 dividend, temp, temp_multiple;
-	s32 frac = 0;
-	s64 intermediate;
-	s64 clk_prep_actual;
-
-	dividend = ((t->rec_max - t->rec_min) *
-		clk_params->clk_prep_buf * multiplier);
-	temp  = roundup(div_s64(dividend, 100), multiplier);
-	temp += (t->rec_min * multiplier);
-	t->rec = div_s64(temp, multiplier);
-
-	rc = dsi_phy_cmn_validate_and_set(t, "clk_prepare");
-	if (rc)
-		goto error;
-
-	/* calculate theoretical value */
-	temp_multiple = 8 * t->reg_value * clk_params->tlpx_numer_ns
-			 * multiplier;
-	intermediate = div_s64(temp_multiple, clk_params->bitclk_mbps);
-	div_s64_rem(temp_multiple, clk_params->bitclk_mbps, &frac);
-	clk_prep_actual = div_s64((intermediate + frac), multiplier);
-
-	pr_debug("CLK_PREPARE:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d\n",
-		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max);
-	pr_debug(" reg_value=%d, actual=%lld\n", t->reg_value, clk_prep_actual);
-
-	*actual_frac = frac;
-	*actual_intermediate = intermediate;
-
-error:
-	return rc;
-}
-
-/**
- * calc_clk_zero - calculates zero timing params for clk lane.
- */
-static int calc_clk_zero(struct dsi_phy_hw *phy,
-			struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc,
-			s32 actual_frac, s64 actual_intermediate)
-{
-	u64 const multiplier = BIT(20);
-	int rc = 0;
-	struct timing_entry *t = &desc->clk_zero;
-	s64 mipi_min, rec_temp1;
-	struct phy_timing_ops *ops = phy->ops.timing_ops;
-
-	mipi_min = ((300 * multiplier) - (actual_intermediate + actual_frac));
-	t->mipi_min = div_s64(mipi_min, multiplier);
-
-	rec_temp1 = div_s64((mipi_min * clk_params->bitclk_mbps),
-			    clk_params->tlpx_numer_ns);
-
-	if (ops->calc_clk_zero) {
-		t->rec_min = ops->calc_clk_zero(rec_temp1, multiplier);
-	} else {
-		rc = -EINVAL;
-		goto error;
-	}
-	t->rec_max = ((t->rec_min > 255) ? 511 : 255);
-
-	t->rec = DIV_ROUND_UP((((t->rec_max - t->rec_min) *
-		clk_params->clk_zero_buf) + (t->rec_min * 100)), 100);
-
-	rc = dsi_phy_cmn_validate_and_set(t, "clk_zero");
-	if (rc)
-		goto error;
-
-
-	pr_debug("CLK_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
-		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
-		 t->reg_value);
-error:
-	return rc;
-}
-
-/**
- * calc_clk_trail - calculates prepare trail params for clk lane.
- */
-static int calc_clk_trail(struct dsi_phy_hw *phy,
-			struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc,
-			s64 *teot_clk_lane)
-{
-	u64 const multiplier = BIT(20);
-	int rc = 0;
-	struct timing_entry *t = &desc->clk_trail;
-	u64 temp_multiple;
-	s32 frac;
-	s64 mipi_max_tr, rec_temp1, mipi_max;
-	s64 teot_clk_lane1;
-	struct phy_timing_ops *ops = phy->ops.timing_ops;
-
-	temp_multiple = div_s64(
-			(12 * multiplier * clk_params->tlpx_numer_ns),
-			clk_params->bitclk_mbps);
-	div_s64_rem(temp_multiple, multiplier, &frac);
-
-	mipi_max_tr = ((105 * multiplier) +
-		       (temp_multiple + frac));
-	teot_clk_lane1 = div_s64(mipi_max_tr, multiplier);
-
-	mipi_max = (mipi_max_tr - (clk_params->treot_ns * multiplier));
-	t->mipi_max = div_s64(mipi_max, multiplier);
-
-	temp_multiple = div_s64(
-			(t->mipi_min * multiplier * clk_params->bitclk_mbps),
-			clk_params->tlpx_numer_ns);
-
-	div_s64_rem(temp_multiple, multiplier, &frac);
-	if (ops->calc_clk_trail_rec_min) {
-		t->rec_min = ops->calc_clk_trail_rec_min(temp_multiple,
-			frac, multiplier);
-	} else {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	/* recommended max */
-	rec_temp1 = div_s64((mipi_max * clk_params->bitclk_mbps),
-			    clk_params->tlpx_numer_ns);
-	if (ops->calc_clk_trail_rec_max) {
-		t->rec_max = ops->calc_clk_trail_rec_max(rec_temp1, multiplier);
-	} else {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	t->rec = DIV_ROUND_UP(
-		(((t->rec_max - t->rec_min) * clk_params->clk_trail_buf) +
-		 (t->rec_min * 100)), 100);
-
-	rc = dsi_phy_cmn_validate_and_set(t, "clk_trail");
-	if (rc)
-		goto error;
-
-	*teot_clk_lane = teot_clk_lane1;
-	pr_debug("CLK_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
-		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
-		 t->reg_value);
-
-error:
-	return rc;
-
-}
-
-/**
- * calc_hs_prepare - calculates prepare timing params for data lanes in HS.
- */
-static int calc_hs_prepare(struct dsi_phy_hw *phy,
-			struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc,
-			u64 *temp_mul)
-{
-	u64 multiplier = BIT(20);
-	int rc = 0;
-	struct timing_entry *t = &desc->hs_prepare;
-	u64 temp_multiple, dividend, temp;
-	s32 frac;
-	s64 rec_temp1, rec_temp2, mipi_max, mipi_min;
-	u32 low_clk_multiplier = 0;
-
-	if (clk_params->bitclk_mbps <= 120)
-		low_clk_multiplier = 2;
-	/* mipi min */
-	temp_multiple = div_s64((4 * multiplier * clk_params->tlpx_numer_ns),
-				clk_params->bitclk_mbps);
-	div_s64_rem(temp_multiple, multiplier, &frac);
-	mipi_min = (40 * multiplier) + (temp_multiple + frac);
-	t->mipi_min = div_s64(mipi_min, multiplier);
-
-	/* mipi_max */
-	temp_multiple = div_s64(
-			(6 * multiplier * clk_params->tlpx_numer_ns),
-			clk_params->bitclk_mbps);
-	div_s64_rem(temp_multiple, multiplier, &frac);
-	mipi_max = (85 * multiplier) + temp_multiple;
-	t->mipi_max = div_s64(mipi_max, multiplier);
-
-	/* recommended min */
-	temp_multiple = div_s64((mipi_min * clk_params->bitclk_mbps),
-				clk_params->tlpx_numer_ns);
-	temp_multiple -= (low_clk_multiplier * multiplier);
-	div_s64_rem(temp_multiple, multiplier, &frac);
-	rec_temp1 = roundup(((temp_multiple + frac) / 8), multiplier);
-	t->rec_min = div_s64(rec_temp1, multiplier);
-
-	/* recommended max */
-	temp_multiple = div_s64((mipi_max * clk_params->bitclk_mbps),
-				clk_params->tlpx_numer_ns);
-	temp_multiple -= (low_clk_multiplier * multiplier);
-	div_s64_rem(temp_multiple, multiplier, &frac);
-	rec_temp2 = rounddown((temp_multiple / 8), multiplier);
-	t->rec_max = div_s64(rec_temp2, multiplier);
-
-	/* register value */
-	dividend = ((rec_temp2 - rec_temp1) * clk_params->hs_prep_buf);
-	temp = roundup(div_u64(dividend, 100), multiplier);
-	t->rec = div_s64((temp + rec_temp1), multiplier);
-
-	rc = dsi_phy_cmn_validate_and_set(t, "hs_prepare");
-	if (rc)
-		goto error;
-
-	temp_multiple = div_s64(
-			(8 * (temp + rec_temp1) * clk_params->tlpx_numer_ns),
-			clk_params->bitclk_mbps);
-
-	*temp_mul = temp_multiple;
-	pr_debug("HS_PREP:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
-		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
-		 t->reg_value);
-error:
-	return rc;
-}
-
-/**
- * calc_hs_zero - calculates zero timing params for data lanes in HS.
- */
-static int calc_hs_zero(struct dsi_phy_hw *phy,
-			struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc,
-			u64 temp_multiple)
-{
-	u64 const multiplier = BIT(20);
-	int rc = 0;
-	struct timing_entry *t = &desc->hs_zero;
-	s64 rec_temp1, mipi_min;
-	struct phy_timing_ops *ops = phy->ops.timing_ops;
-
-	mipi_min = div_s64((10 * clk_params->tlpx_numer_ns * multiplier),
-			   clk_params->bitclk_mbps);
-	rec_temp1 = (145 * multiplier) + mipi_min - temp_multiple;
-	t->mipi_min = div_s64(rec_temp1, multiplier);
-
-	/* recommended min */
-	rec_temp1 = div_s64((rec_temp1 * clk_params->bitclk_mbps),
-			    clk_params->tlpx_numer_ns);
-
-	if (ops->calc_hs_zero) {
-		t->rec_min = ops->calc_hs_zero(rec_temp1, multiplier);
-	} else {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	t->rec_max = ((t->rec_min > 255) ? 511 : 255);
-	t->rec = DIV_ROUND_UP(
-			(((t->rec_max - t->rec_min) * clk_params->hs_zero_buf) +
-			 (t->rec_min * 100)),
-			100);
-
-	rc = dsi_phy_cmn_validate_and_set(t, "hs_zero");
-	if (rc)
-		goto error;
-
-	pr_debug("HS_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
-		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
-		 t->reg_value);
-
-error:
-	return rc;
-}
-
-/**
- * calc_hs_trail - calculates trail timing params for data lanes in HS.
- */
-static int calc_hs_trail(struct dsi_phy_hw *phy,
-			struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc,
-			u64 teot_clk_lane)
-{
-	int rc = 0;
-	struct timing_entry *t = &desc->hs_trail;
-	s64 rec_temp1;
-	struct phy_timing_ops *ops = phy->ops.timing_ops;
-
-	t->mipi_min = 60 +
-			mult_frac(clk_params->tlpx_numer_ns, 4,
-				  clk_params->bitclk_mbps);
-
-	t->mipi_max = teot_clk_lane - clk_params->treot_ns;
-
-	if (ops->calc_hs_trail) {
-		ops->calc_hs_trail(clk_params, desc);
-	} else {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	rec_temp1 = DIV_ROUND_UP(
-			((t->rec_max - t->rec_min) * clk_params->hs_trail_buf),
-			100);
-	t->rec = rec_temp1 + t->rec_min;
-
-	rc = dsi_phy_cmn_validate_and_set(t, "hs_trail");
-	if (rc)
-		goto error;
-
-	pr_debug("HS_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
-		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
-		 t->reg_value);
-
-error:
-	return rc;
-}
-
-/**
- * calc_hs_rqst - calculates rqst timing params for data lanes in HS.
- */
-static int calc_hs_rqst(struct dsi_phy_hw *phy,
-			struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc)
-{
-	int rc = 0;
-	struct timing_entry *t = &desc->hs_rqst;
-
-	t->rec = DIV_ROUND_UP(
-		((t->mipi_min * clk_params->bitclk_mbps) -
-		 (8 * clk_params->tlpx_numer_ns)),
-		(8 * clk_params->tlpx_numer_ns));
-
-	rc = dsi_phy_cmn_validate_and_set(t, "hs_rqst");
-	if (rc)
-		goto error;
-
-	pr_debug("HS_RQST:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
-		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
-		 t->reg_value);
-
-error:
-	return rc;
-}
-
-/**
- * calc_hs_exit - calculates exit timing params for data lanes in HS.
- */
-static int calc_hs_exit(struct dsi_phy_hw *phy,
-			struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc)
-{
-	int rc = 0;
-	struct timing_entry *t = &desc->hs_exit;
-
-	t->rec_min = (DIV_ROUND_UP(
-			(t->mipi_min * clk_params->bitclk_mbps),
-			(8 * clk_params->tlpx_numer_ns)) - 1);
-
-	t->rec = DIV_ROUND_UP(
-		(((t->rec_max - t->rec_min) * clk_params->hs_exit_buf) +
-		 (t->rec_min * 100)), 100);
-
-	rc = dsi_phy_cmn_validate_and_set(t, "hs_exit");
-	if (rc)
-		goto error;
-
-
-	pr_debug("HS_EXIT:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
-		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
-		 t->reg_value);
-
-error:
-	return rc;
-}
-
-/**
- * calc_hs_rqst_clk - calculates rqst timing params for clock lane..
- */
-static int calc_hs_rqst_clk(struct dsi_phy_hw *phy,
-			struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc)
-{
-	int rc = 0;
-	struct timing_entry *t = &desc->hs_rqst_clk;
-
-	t->rec = DIV_ROUND_UP(
-		((t->mipi_min * clk_params->bitclk_mbps) -
-		 (8 * clk_params->tlpx_numer_ns)),
-		(8 * clk_params->tlpx_numer_ns));
-
-	rc = dsi_phy_cmn_validate_and_set(t, "hs_rqst_clk");
-	if (rc)
-		goto error;
-
-	pr_debug("HS_RQST_CLK:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
-		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
-		 t->reg_value);
-
-error:
-	return rc;
-}
-
-/**
- * cal_clk_pulse_time - calculates clk pulse time in nsec
- */
-static s64 cal_clk_pulse_time(u32 inp1, u32 inp2, u32 bitclk_mbps)
-{
-	u64 const multiplier = BIT(20);
-	u64 clk_multiple;
-	s32 frac;
-	s64 temp, result;
-
-	clk_multiple = div_s64((inp1 * multiplier * 1000), bitclk_mbps);
-	div_s64_rem(clk_multiple, multiplier, &frac);
-	temp = (inp2 * multiplier) + (clk_multiple + frac);
-	result = div_s64(temp, multiplier);
-
-	return result;
-}
-
-/**
- * calc_clk_post - calculates clk_post timing params for data lanes in HS.
- */
-static int calc_clk_post(struct dsi_phy_hw *phy,
-			struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc)
-{
-	int rc = 0;
-	struct timing_entry *t = &desc->clk_post;
-	s64 rec_cal1, rec_cal2;
-	u32 input1;
-
-	/* mipi min */
-	t->mipi_min = cal_clk_pulse_time(52, 60, clk_params->bitclk_mbps);
-
-	/* recommended min
-	 * = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1
-	 */
-	rec_cal1 = cal_clk_pulse_time(16, 0, clk_params->bitclk_mbps);
-
-	input1 = (desc->hs_trail.reg_value + 1) * 8;
-	rec_cal2 = cal_clk_pulse_time(input1, 0, clk_params->bitclk_mbps);
-	rec_cal2 += t->mipi_min;
-
-	t->rec_min = div_s64(rec_cal2, rec_cal1) - 1;
-
-	/* recommended max */
-	t->rec_max = 255;
-
-	/* register value */
-	rec_cal1 = (t->rec_max - t->rec_min);
-	rec_cal2 = clk_params->clk_post_buf/100;
-	t->rec = rec_cal1 * rec_cal2 + t->rec_min;
-
-	rc = dsi_phy_cmn_validate_and_set(t, "clk_post");
-	if (rc)
-		goto error;
-
-	pr_debug("CLK_POST:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
-		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
-		 t->reg_value);
-error:
-	return rc;
-}
-
-/**
- * calc_clk_pre - calculates clk_pre timing params for data lanes in HS.
- */
-static int calc_clk_pre(struct dsi_phy_hw *phy,
-			struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc)
-{
-	int rc = 0;
-	struct timing_entry *t = &desc->clk_pre;
-	s64 rec_temp1;
-	s64 clk_prepare, clk_zero, clk_16;
-	u32 input1;
-	s64 rec_cal1, rec_cal2;
-
-	/* mipi min */
-	t->mipi_min = cal_clk_pulse_time(8, 0, clk_params->bitclk_mbps);
-
-	/* recommended min
-	 * val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns)
-	 * val2 = (16 * bit_clk_ns)
-	 * final = roundup(val1/val2, 0) - 1
-	 */
-	input1 = desc->clk_prepare.reg_value * 8;
-	clk_prepare = cal_clk_pulse_time(input1, 0, clk_params->bitclk_mbps);
-
-	input1 = (desc->clk_zero.reg_value + 1) * 8;
-	clk_zero = cal_clk_pulse_time(input1, 0, clk_params->bitclk_mbps);
-
-	clk_16 = cal_clk_pulse_time(16, 0, clk_params->bitclk_mbps);
-
-	rec_temp1 = 52 + clk_prepare + clk_zero + 54;
-	t->rec_min = div_s64(rec_temp1, clk_16) - 1;
-
-	/* recommended max */
-	t->rec_max = 255;
-
-	/* register value */
-	rec_cal1 = (t->rec_max - t->rec_min);
-	rec_cal2 = clk_params->clk_pre_buf/100;
-	t->rec = rec_cal1 * rec_cal2 + t->rec_min;
-
-	rc = dsi_phy_cmn_validate_and_set(t, "clk_pre");
-	if (rc)
-		goto error;
-
-	pr_debug("CLK_PRE:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
-		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
-		 t->reg_value);
-error:
-	return rc;
-}
-
-/**
- * dsi_phy_calc_timing_params - calculates timing paramets for a given bit clock
- */
-static int dsi_phy_cmn_calc_timing_params(struct dsi_phy_hw *phy,
-	struct phy_clk_params *clk_params, struct phy_timing_desc *desc)
-{
-	int rc = 0;
-	s32 actual_frac = 0;
-	s64 actual_intermediate = 0;
-	u64 temp_multiple;
-	s64 teot_clk_lane;
-
-	rc = calc_clk_prepare(phy, clk_params, desc, &actual_frac,
-			      &actual_intermediate);
-	if (rc) {
-		pr_err("clk_prepare calculations failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = calc_clk_zero(phy, clk_params, desc,
-		actual_frac, actual_intermediate);
-	if (rc) {
-		pr_err("clk_zero calculations failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = calc_clk_trail(phy, clk_params, desc, &teot_clk_lane);
-	if (rc) {
-		pr_err("clk_trail calculations failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = calc_hs_prepare(phy, clk_params, desc, &temp_multiple);
-	if (rc) {
-		pr_err("hs_prepare calculations failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = calc_hs_zero(phy, clk_params, desc, temp_multiple);
-	if (rc) {
-		pr_err("hs_zero calculations failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = calc_hs_trail(phy, clk_params, desc, teot_clk_lane);
-	if (rc) {
-		pr_err("hs_trail calculations failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = calc_hs_rqst(phy, clk_params, desc);
-	if (rc) {
-		pr_err("hs_rqst calculations failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = calc_hs_exit(phy, clk_params, desc);
-	if (rc) {
-		pr_err("hs_exit calculations failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = calc_hs_rqst_clk(phy, clk_params, desc);
-	if (rc) {
-		pr_err("hs_rqst_clk calculations failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = calc_clk_post(phy, clk_params, desc);
-	if (rc) {
-		pr_err("clk_post calculations failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	rc = calc_clk_pre(phy, clk_params, desc);
-	if (rc) {
-		pr_err("clk_pre calculations failed, rc=%d\n", rc);
-		goto error;
-	}
-error:
-	return rc;
-}
-
-/**
- * calculate_timing_params() - calculates timing parameters.
- * @phy:      Pointer to DSI PHY hardware object.
- * @mode:     Mode information for which timing has to be calculated.
- * @config:   DSI host configuration for this mode.
- * @timing:   Timing parameters for each lane which will be returned.
- */
-int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
-					    struct dsi_mode_info *mode,
-					    struct dsi_host_common_cfg *host,
-					   struct dsi_phy_per_lane_cfgs *timing)
-{
-	/* constants */
-	u32 const esc_clk_mhz = 192; /* TODO: esc clock is hardcoded */
-	u32 const esc_clk_mmss_cc_prediv = 10;
-	u32 const tlpx_numer = 1000;
-	u32 const tr_eot = 20;
-	u32 const clk_prepare_spec_min = 38;
-	u32 const clk_prepare_spec_max = 95;
-	u32 const clk_trail_spec_min = 60;
-	u32 const hs_exit_spec_min = 100;
-	u32 const hs_exit_reco_max = 255;
-	u32 const hs_rqst_spec_min = 50;
-
-	/* local vars */
-	int rc = 0;
-	u32 h_total, v_total;
-	u32 inter_num;
-	u32 num_of_lanes = 0;
-	u32 bpp;
-	u64 x, y;
-	struct phy_timing_desc desc;
-	struct phy_clk_params clk_params = {0};
-	struct phy_timing_ops *ops = phy->ops.timing_ops;
-
-	memset(&desc, 0x0, sizeof(desc));
-	h_total = DSI_H_TOTAL_DSC(mode);
-	v_total = DSI_V_TOTAL(mode);
-
-	bpp = bits_per_pixel[host->dst_format];
-
-	inter_num = bpp * mode->refresh_rate;
-
-	if (host->data_lanes & DSI_DATA_LANE_0)
-		num_of_lanes++;
-	if (host->data_lanes & DSI_DATA_LANE_1)
-		num_of_lanes++;
-	if (host->data_lanes & DSI_DATA_LANE_2)
-		num_of_lanes++;
-	if (host->data_lanes & DSI_DATA_LANE_3)
-		num_of_lanes++;
-
-
-	x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
-	y = rounddown(x, 1);
-
-	clk_params.bitclk_mbps = rounddown(DIV_ROUND_UP_ULL(y, 1000000), 1);
-	clk_params.escclk_numer = esc_clk_mhz;
-	clk_params.escclk_denom = esc_clk_mmss_cc_prediv;
-	clk_params.tlpx_numer_ns = tlpx_numer;
-	clk_params.treot_ns = tr_eot;
-
-
-	/* Setup default parameters */
-	desc.clk_prepare.mipi_min = clk_prepare_spec_min;
-	desc.clk_prepare.mipi_max = clk_prepare_spec_max;
-	desc.clk_trail.mipi_min = clk_trail_spec_min;
-	desc.hs_exit.mipi_min = hs_exit_spec_min;
-	desc.hs_exit.rec_max = hs_exit_reco_max;
-	desc.hs_rqst.mipi_min = hs_rqst_spec_min;
-	desc.hs_rqst_clk.mipi_min = hs_rqst_spec_min;
-
-	if (ops->get_default_phy_params) {
-		ops->get_default_phy_params(&clk_params);
-	} else {
-		rc = -EINVAL;
-		goto error;
-	}
-
-	desc.clk_prepare.rec_min = DIV_ROUND_UP(
-			(desc.clk_prepare.mipi_min * clk_params.bitclk_mbps),
-			(8 * clk_params.tlpx_numer_ns)
-			);
-
-	desc.clk_prepare.rec_max = rounddown(
-		mult_frac((desc.clk_prepare.mipi_max * clk_params.bitclk_mbps),
-			  1, (8 * clk_params.tlpx_numer_ns)),
-		1);
-
-	pr_debug("BIT CLOCK = %d, tlpx_numer_ns=%d, treot_ns=%d\n",
-	       clk_params.bitclk_mbps, clk_params.tlpx_numer_ns,
-	       clk_params.treot_ns);
-	rc = dsi_phy_cmn_calc_timing_params(phy, &clk_params, &desc);
-	if (rc) {
-		pr_err("Timing calc failed, rc=%d\n", rc);
-		goto error;
-	}
-
-	if (ops->update_timing_params) {
-		ops->update_timing_params(timing, &desc);
-	} else {
-		rc = -EINVAL;
-		goto error;
-	}
-
-error:
-	return rc;
-}
-
-int dsi_phy_timing_calc_init(struct dsi_phy_hw *phy,
-			enum dsi_phy_version version)
-{
-	struct phy_timing_ops *ops = NULL;
-
-	if (version == DSI_PHY_VERSION_UNKNOWN ||
-	    version >= DSI_PHY_VERSION_MAX || !phy) {
-		pr_err("Unsupported version: %d\n", version);
-		return -ENOTSUPP;
-	}
-
-	ops = kzalloc(sizeof(struct phy_timing_ops), GFP_KERNEL);
-	if (!ops)
-		return -EINVAL;
-	phy->ops.timing_ops = ops;
-
-	switch (version) {
-	case DSI_PHY_VERSION_2_0:
-		ops->get_default_phy_params =
-			dsi_phy_hw_v2_0_get_default_phy_params;
-		ops->calc_clk_zero =
-			dsi_phy_hw_v2_0_calc_clk_zero;
-		ops->calc_clk_trail_rec_min =
-			dsi_phy_hw_v2_0_calc_clk_trail_rec_min;
-		ops->calc_clk_trail_rec_max =
-			dsi_phy_hw_v2_0_calc_clk_trail_rec_max;
-		ops->calc_hs_zero =
-			dsi_phy_hw_v2_0_calc_hs_zero;
-		ops->calc_hs_trail =
-			dsi_phy_hw_v2_0_calc_hs_trail;
-		ops->update_timing_params =
-			dsi_phy_hw_v2_0_update_timing_params;
-		break;
-	case DSI_PHY_VERSION_3_0:
-		ops->get_default_phy_params =
-			dsi_phy_hw_v3_0_get_default_phy_params;
-		ops->calc_clk_zero =
-			dsi_phy_hw_v3_0_calc_clk_zero;
-		ops->calc_clk_trail_rec_min =
-			dsi_phy_hw_v3_0_calc_clk_trail_rec_min;
-		ops->calc_clk_trail_rec_max =
-			dsi_phy_hw_v3_0_calc_clk_trail_rec_max;
-		ops->calc_hs_zero =
-			dsi_phy_hw_v3_0_calc_hs_zero;
-		ops->calc_hs_trail =
-			dsi_phy_hw_v3_0_calc_hs_trail;
-		ops->update_timing_params =
-			dsi_phy_hw_v3_0_update_timing_params;
-		break;
-	case DSI_PHY_VERSION_4_0:
-	case DSI_PHY_VERSION_4_1:
-		ops->get_default_phy_params =
-			dsi_phy_hw_v4_0_get_default_phy_params;
-		ops->calc_clk_zero =
-			dsi_phy_hw_v4_0_calc_clk_zero;
-		ops->calc_clk_trail_rec_min =
-			dsi_phy_hw_v4_0_calc_clk_trail_rec_min;
-		ops->calc_clk_trail_rec_max =
-			dsi_phy_hw_v4_0_calc_clk_trail_rec_max;
-		ops->calc_hs_zero =
-			dsi_phy_hw_v4_0_calc_hs_zero;
-		ops->calc_hs_trail =
-			dsi_phy_hw_v4_0_calc_hs_trail;
-		ops->update_timing_params =
-			dsi_phy_hw_v4_0_update_timing_params;
-		break;
-	case DSI_PHY_VERSION_0_0_HPM:
-	case DSI_PHY_VERSION_0_0_LPM:
-	case DSI_PHY_VERSION_1_0:
-	default:
-		kfree(ops);
-		return -ENOTSUPP;
-	}
-
-	return 0;
-}
-
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.h
deleted file mode 100644
index 0c526dc..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_PHY_TIMING_CALC_H_
-#define _DSI_PHY_TIMING_CALC_H_
-
-#include <linux/math64.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/bitmap.h>
-#include <linux/errno.h>
-
-#include "dsi_defs.h"
-#include "dsi_phy_hw.h"
-#include "dsi_catalog.h"
-
-/**
- * struct timing_entry - Calculated values for each timing parameter.
- * @mipi_min:
- * @mipi_max:
- * @rec_min:
- * @rec_max:
- * @rec:
- * @reg_value:       Value to be programmed in register.
- */
-struct timing_entry {
-	s32 mipi_min;
-	s32 mipi_max;
-	s32 rec_min;
-	s32 rec_max;
-	s32 rec;
-	u8 reg_value;
-};
-
-/**
- * struct phy_timing_desc - Timing parameters for DSI PHY.
- */
-struct phy_timing_desc {
-	struct timing_entry clk_prepare;
-	struct timing_entry clk_zero;
-	struct timing_entry clk_trail;
-	struct timing_entry hs_prepare;
-	struct timing_entry hs_zero;
-	struct timing_entry hs_trail;
-	struct timing_entry hs_rqst;
-	struct timing_entry hs_rqst_clk;
-	struct timing_entry hs_exit;
-	struct timing_entry ta_go;
-	struct timing_entry ta_sure;
-	struct timing_entry ta_set;
-	struct timing_entry clk_post;
-	struct timing_entry clk_pre;
-};
-
-/**
- * struct phy_clk_params - Clock parameters for PHY timing calculations.
- */
-struct phy_clk_params {
-	u32 bitclk_mbps;
-	u32 escclk_numer;
-	u32 escclk_denom;
-	u32 tlpx_numer_ns;
-	u32 treot_ns;
-	u32 clk_prep_buf;
-	u32 clk_zero_buf;
-	u32 clk_trail_buf;
-	u32 hs_prep_buf;
-	u32 hs_zero_buf;
-	u32 hs_trail_buf;
-	u32 hs_rqst_buf;
-	u32 hs_exit_buf;
-	u32 clk_pre_buf;
-	u32 clk_post_buf;
-};
-
-/**
- * Various Ops needed for auto-calculation of DSI PHY timing parameters.
- */
-struct phy_timing_ops {
-	void (*get_default_phy_params)(struct phy_clk_params *params);
-
-	int32_t (*calc_clk_zero)(s64 rec_temp1, s64 mult);
-
-	int32_t (*calc_clk_trail_rec_min)(s64 temp_mul,
-		s64 frac, s64 mult);
-
-	int32_t (*calc_clk_trail_rec_max)(s64 temp1, s64 mult);
-
-	int32_t (*calc_hs_zero)(s64 temp1, s64 mult);
-
-	void (*calc_hs_trail)(struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc);
-
-	void (*update_timing_params)(struct dsi_phy_per_lane_cfgs *timing,
-		struct phy_timing_desc *desc);
-};
-
-#define roundup64(x, y) \
-	({ u64 _tmp = (x)+(y)-1; do_div(_tmp, y); _tmp * y; })
-
-/* DSI PHY timing functions for 14nm */
-void dsi_phy_hw_v2_0_get_default_phy_params(struct phy_clk_params *params);
-
-int32_t dsi_phy_hw_v2_0_calc_clk_zero(s64 rec_temp1, s64 mult);
-
-int32_t dsi_phy_hw_v2_0_calc_clk_trail_rec_min(s64 temp_mul,
-		s64 frac, s64 mult);
-
-int32_t dsi_phy_hw_v2_0_calc_clk_trail_rec_max(s64 temp1, s64 mult);
-
-int32_t dsi_phy_hw_v2_0_calc_hs_zero(s64 temp1, s64 mult);
-
-void dsi_phy_hw_v2_0_calc_hs_trail(struct phy_clk_params *clk_params,
-		struct phy_timing_desc *desc);
-
-void dsi_phy_hw_v2_0_update_timing_params(struct dsi_phy_per_lane_cfgs *timing,
-		struct phy_timing_desc *desc);
-
-/* DSI PHY timing functions for 10nm */
-void dsi_phy_hw_v3_0_get_default_phy_params(struct phy_clk_params *params);
-
-int32_t dsi_phy_hw_v3_0_calc_clk_zero(s64 rec_temp1, s64 mult);
-
-int32_t dsi_phy_hw_v3_0_calc_clk_trail_rec_min(s64 temp_mul,
-		s64 frac, s64 mult);
-
-int32_t dsi_phy_hw_v3_0_calc_clk_trail_rec_max(s64 temp1, s64 mult);
-
-int32_t dsi_phy_hw_v3_0_calc_hs_zero(s64 temp1, s64 mult);
-
-void dsi_phy_hw_v3_0_calc_hs_trail(struct phy_clk_params *clk_params,
-		struct phy_timing_desc *desc);
-
-void dsi_phy_hw_v3_0_update_timing_params(struct dsi_phy_per_lane_cfgs *timing,
-		struct phy_timing_desc *desc);
-
-/* DSI PHY timing functions for 7nm */
-void dsi_phy_hw_v4_0_get_default_phy_params(struct phy_clk_params *params);
-
-int32_t dsi_phy_hw_v4_0_calc_clk_zero(s64 rec_temp1, s64 mult);
-
-int32_t dsi_phy_hw_v4_0_calc_clk_trail_rec_min(s64 temp_mul,
-		s64 frac, s64 mult);
-
-int32_t dsi_phy_hw_v4_0_calc_clk_trail_rec_max(s64 temp1, s64 mult);
-
-int32_t dsi_phy_hw_v4_0_calc_hs_zero(s64 temp1, s64 mult);
-
-void dsi_phy_hw_v4_0_calc_hs_trail(struct phy_clk_params *clk_params,
-		struct phy_timing_desc *desc);
-
-void dsi_phy_hw_v4_0_update_timing_params(struct dsi_phy_per_lane_cfgs *timing,
-		struct phy_timing_desc *desc);
-
-#endif /* _DSI_PHY_TIMING_CALC_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v2_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v2_0.c
deleted file mode 100644
index 19b2b9e..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v2_0.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "dsi-phy-timing:" fmt
-#include "dsi_phy_timing_calc.h"
-
-void dsi_phy_hw_v2_0_get_default_phy_params(struct phy_clk_params *params)
-{
-	params->clk_prep_buf = 50;
-	params->clk_zero_buf = 2;
-	params->clk_trail_buf = 30;
-	params->hs_prep_buf = 50;
-	params->hs_zero_buf = 10;
-	params->hs_trail_buf = 30;
-	params->hs_rqst_buf = 0;
-	params->hs_exit_buf = 10;
-}
-
-int32_t dsi_phy_hw_v2_0_calc_clk_zero(s64 rec_temp1, s64 mult)
-{
-	s64 rec_temp2, rec_temp3;
-
-	rec_temp2 = (rec_temp1 - (11 * mult));
-	rec_temp3 = roundup64(div_s64(rec_temp2, 8), mult);
-	return (div_s64(rec_temp3, mult) - 3);
-}
-
-int32_t dsi_phy_hw_v2_0_calc_clk_trail_rec_min(s64 temp_mul,
-		s64 frac, s64 mult)
-{
-	s64 rec_temp1, rec_temp2, rec_temp3;
-
-	rec_temp1 = temp_mul + frac + (3 * mult);
-	rec_temp2 = div_s64(rec_temp1, 8);
-	rec_temp3 = roundup64(rec_temp2, mult);
-
-	return div_s64(rec_temp3, mult);
-}
-
-int32_t dsi_phy_hw_v2_0_calc_clk_trail_rec_max(s64 temp1, s64 mult)
-{
-	s64 rec_temp2, rec_temp3;
-
-	rec_temp2 = temp1 + (3 * mult);
-	rec_temp3 = rec_temp2 / 8;
-	return div_s64(rec_temp3, mult);
-
-}
-
-int32_t dsi_phy_hw_v2_0_calc_hs_zero(s64 temp1, s64 mult)
-{
-	s64 rec_temp2, rec_temp3, rec_min;
-
-	rec_temp2 = temp1 - (11 * mult);
-	rec_temp3 = roundup64((rec_temp2 / 8), mult);
-	rec_min = rec_temp3 - (3 * mult);
-	return div_s64(rec_min, mult);
-}
-
-void dsi_phy_hw_v2_0_calc_hs_trail(struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc)
-{
-	s64 rec_temp1;
-	struct timing_entry *t = &desc->hs_trail;
-
-	t->rec_min = DIV_ROUND_UP(
-		((t->mipi_min * clk_params->bitclk_mbps) +
-		 (3 * clk_params->tlpx_numer_ns)),
-		(8 * clk_params->tlpx_numer_ns));
-
-	rec_temp1 = ((t->mipi_max * clk_params->bitclk_mbps) +
-		     (3 * clk_params->tlpx_numer_ns));
-	t->rec_max = DIV_ROUND_UP_ULL(rec_temp1,
-				      (8 * clk_params->tlpx_numer_ns));
-}
-
-void dsi_phy_hw_v2_0_update_timing_params(
-	struct dsi_phy_per_lane_cfgs *timing,
-	struct phy_timing_desc *desc)
-{
-	int i = 0;
-
-	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
-		timing->lane[i][0] = desc->hs_exit.reg_value;
-
-		if (i == DSI_LOGICAL_CLOCK_LANE)
-			timing->lane[i][1] = desc->clk_zero.reg_value;
-		else
-			timing->lane[i][1] = desc->hs_zero.reg_value;
-
-		if (i == DSI_LOGICAL_CLOCK_LANE)
-			timing->lane[i][2] = desc->clk_prepare.reg_value;
-		else
-			timing->lane[i][2] = desc->hs_prepare.reg_value;
-
-		if (i == DSI_LOGICAL_CLOCK_LANE)
-			timing->lane[i][3] = desc->clk_trail.reg_value;
-		else
-			timing->lane[i][3] = desc->hs_trail.reg_value;
-
-		if (i == DSI_LOGICAL_CLOCK_LANE)
-			timing->lane[i][4] = desc->hs_rqst_clk.reg_value;
-		else
-			timing->lane[i][4] = desc->hs_rqst.reg_value;
-
-		timing->lane[i][5] = 0x2;
-		timing->lane[i][6] = 0x4;
-		timing->lane[i][7] = 0xA0;
-		pr_debug("[%d][%d %d %d %d %d]\n", i, timing->lane[i][0],
-						    timing->lane[i][1],
-						    timing->lane[i][2],
-						    timing->lane[i][3],
-						    timing->lane[i][4]);
-	}
-	timing->count_per_lane = 8;
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v3_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v3_0.c
deleted file mode 100644
index ac9633d..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v3_0.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "dsi-phy-timing:" fmt
-#include "dsi_phy_timing_calc.h"
-
-void dsi_phy_hw_v3_0_get_default_phy_params(
-		struct phy_clk_params *params)
-{
-	params->clk_prep_buf = 0;
-	params->clk_zero_buf = 0;
-	params->clk_trail_buf = 0;
-	params->hs_prep_buf = 0;
-	params->hs_zero_buf = 0;
-	params->hs_trail_buf = 0;
-	params->hs_rqst_buf = 0;
-	params->hs_exit_buf = 0;
-}
-
-int32_t dsi_phy_hw_v3_0_calc_clk_zero(s64 rec_temp1, s64 mult)
-{
-	s64 rec_temp2, rec_temp3;
-
-	rec_temp2 = (rec_temp1 - mult);
-	rec_temp3 = roundup64(div_s64(rec_temp2, 8), mult);
-	return (div_s64(rec_temp3, mult) - 1);
-}
-
-int32_t dsi_phy_hw_v3_0_calc_clk_trail_rec_min(s64 temp_mul,
-		s64 frac, s64 mult)
-{
-	s64 rec_temp1, rec_temp2, rec_temp3;
-
-	rec_temp1 = temp_mul + frac;
-	rec_temp2 = div_s64(rec_temp1, 8);
-	rec_temp3 = roundup64(rec_temp2, mult);
-	return (div_s64(rec_temp3, mult) - 1);
-}
-
-int32_t dsi_phy_hw_v3_0_calc_clk_trail_rec_max(s64 temp1, s64 mult)
-{
-	s64 rec_temp2;
-
-	rec_temp2 = temp1 / 8;
-	return (div_s64(rec_temp2, mult) - 1);
-}
-
-int32_t dsi_phy_hw_v3_0_calc_hs_zero(s64 temp1, s64 mult)
-{
-	s64 rec_temp2, rec_min;
-
-	rec_temp2 = roundup64((temp1 / 8), mult);
-	rec_min = rec_temp2 - (1 * mult);
-	return div_s64(rec_min, mult);
-}
-
-void dsi_phy_hw_v3_0_calc_hs_trail(struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc)
-{
-	s64 rec_temp1;
-	struct timing_entry *t = &desc->hs_trail;
-
-	t->rec_min = DIV_ROUND_UP(
-			(t->mipi_min * clk_params->bitclk_mbps),
-			(8 * clk_params->tlpx_numer_ns)) - 1;
-
-	rec_temp1 = (t->mipi_max * clk_params->bitclk_mbps);
-	t->rec_max =
-		 (div_s64(rec_temp1, (8 * clk_params->tlpx_numer_ns))) - 1;
-}
-
-void dsi_phy_hw_v3_0_update_timing_params(
-	struct dsi_phy_per_lane_cfgs *timing,
-	struct phy_timing_desc *desc)
-{
-	timing->lane_v3[0] = 0x00;
-	timing->lane_v3[1] = desc->clk_zero.reg_value;
-	timing->lane_v3[2] = desc->clk_prepare.reg_value;
-	timing->lane_v3[3] = desc->clk_trail.reg_value;
-	timing->lane_v3[4] = desc->hs_exit.reg_value;
-	timing->lane_v3[5] = desc->hs_zero.reg_value;
-	timing->lane_v3[6] = desc->hs_prepare.reg_value;
-	timing->lane_v3[7] = desc->hs_trail.reg_value;
-	timing->lane_v3[8] = desc->hs_rqst.reg_value;
-	timing->lane_v3[9] = 0x02;
-	timing->lane_v3[10] = 0x04;
-	timing->lane_v3[11] = 0x00;
-
-	pr_debug("[%d %d %d %d]\n", timing->lane_v3[0],
-		timing->lane_v3[1], timing->lane_v3[2], timing->lane_v3[3]);
-	pr_debug("[%d %d %d %d]\n", timing->lane_v3[4],
-		timing->lane_v3[5], timing->lane_v3[6], timing->lane_v3[7]);
-	pr_debug("[%d %d %d %d]\n", timing->lane_v3[8],
-		timing->lane_v3[9], timing->lane_v3[10], timing->lane_v3[11]);
-	timing->count_per_lane = 12;
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v4_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v4_0.c
deleted file mode 100644
index 0a79044..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_v4_0.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "dsi-phy-timing-v4: %s:" fmt, __func__
-#include "dsi_phy_timing_calc.h"
-
-void dsi_phy_hw_v4_0_get_default_phy_params(
-		struct phy_clk_params *params)
-{
-	params->clk_prep_buf = 0;
-	params->clk_zero_buf = 0;
-	params->clk_trail_buf = 0;
-	params->hs_prep_buf = 0;
-	params->hs_zero_buf = 0;
-	params->hs_trail_buf = 0;
-	params->hs_rqst_buf = 0;
-	params->hs_exit_buf = 0;
-}
-
-int32_t dsi_phy_hw_v4_0_calc_clk_zero(s64 rec_temp1, s64 mult)
-{
-	s64 rec_temp2, rec_temp3;
-
-	rec_temp2 = (rec_temp1 - mult);
-	rec_temp3 = roundup(div_s64(rec_temp2, 8), mult);
-	return (div_s64(rec_temp3, mult) - 1);
-}
-
-int32_t dsi_phy_hw_v4_0_calc_clk_trail_rec_min(s64 temp_mul,
-		s64 frac, s64 mult)
-{
-	s64 rec_temp1, rec_temp2, rec_temp3;
-
-	rec_temp1 = temp_mul + frac;
-	rec_temp2 = div_s64(rec_temp1, 8);
-	rec_temp3 = roundup(rec_temp2, mult);
-	return (div_s64(rec_temp3, mult) - 1);
-}
-
-int32_t dsi_phy_hw_v4_0_calc_clk_trail_rec_max(s64 temp1, s64 mult)
-{
-	s64 rec_temp2;
-
-	rec_temp2 = temp1 / 8;
-	return (div_s64(rec_temp2, mult) - 1);
-}
-
-int32_t dsi_phy_hw_v4_0_calc_hs_zero(s64 temp1, s64 mult)
-{
-	s64 rec_temp2, rec_min;
-
-	rec_temp2 = roundup((temp1 / 8), mult);
-	rec_min = rec_temp2 - (1 * mult);
-	return div_s64(rec_min, mult);
-}
-
-void dsi_phy_hw_v4_0_calc_hs_trail(struct phy_clk_params *clk_params,
-			struct phy_timing_desc *desc)
-{
-	s64 rec_temp1;
-	struct timing_entry *t = &desc->hs_trail;
-
-	t->rec_min = DIV_ROUND_UP(
-			(t->mipi_min * clk_params->bitclk_mbps),
-			(8 * clk_params->tlpx_numer_ns)) - 1;
-
-	rec_temp1 = (t->mipi_max * clk_params->bitclk_mbps);
-	t->rec_max =
-		 (div_s64(rec_temp1, (8 * clk_params->tlpx_numer_ns))) - 1;
-}
-
-void dsi_phy_hw_v4_0_update_timing_params(
-	struct dsi_phy_per_lane_cfgs *timing,
-	struct phy_timing_desc *desc)
-{
-	timing->lane_v4[0] = 0x00;
-	timing->lane_v4[1] = desc->clk_zero.reg_value;
-	timing->lane_v4[2] = desc->clk_prepare.reg_value;
-	timing->lane_v4[3] = desc->clk_trail.reg_value;
-	timing->lane_v4[4] = desc->hs_exit.reg_value;
-	timing->lane_v4[5] = desc->hs_zero.reg_value;
-	timing->lane_v4[6] = desc->hs_prepare.reg_value;
-	timing->lane_v4[7] = desc->hs_trail.reg_value;
-	timing->lane_v4[8] = desc->hs_rqst.reg_value;
-	timing->lane_v4[9] = 0x03;
-	timing->lane_v4[10] = 0x04;
-	timing->lane_v4[11] = 0x00;
-	timing->lane_v4[12] = 0x00;
-	timing->lane_v4[13] = 0x00;
-
-	pr_debug("[%d %d %d %d]\n", timing->lane_v4[0],
-		timing->lane_v4[1], timing->lane_v4[2], timing->lane_v4[3]);
-	pr_debug("[%d %d %d %d]\n", timing->lane_v4[4],
-		timing->lane_v4[5], timing->lane_v4[6], timing->lane_v4[7]);
-	pr_debug("[%d %d %d %d]\n", timing->lane_v4[8],
-		timing->lane_v4[9], timing->lane_v4[10], timing->lane_v4[11]);
-	pr_debug("[%d %d]\n", timing->lane_v4[12], timing->lane_v4[13]);
-	timing->count_per_lane = 14;
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
deleted file mode 100644
index a7b52dc..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
+++ /dev/null
@@ -1,366 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/of.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-
-#include "dsi_pwr.h"
-#include "dsi_parser.h"
-
-/*
- * dsi_pwr_parse_supply_node() - parse power supply node from root device node
- */
-static int dsi_pwr_parse_supply_node(struct dsi_parser_utils *utils,
-				     struct device_node *root,
-				     struct dsi_regulator_info *regs)
-{
-	int rc = 0;
-	int i = 0;
-	u32 tmp = 0;
-	struct device_node *node = NULL;
-
-	dsi_for_each_child_node(root, node) {
-		const char *st = NULL;
-
-		rc = utils->read_string(node, "qcom,supply-name", &st);
-		if (rc) {
-			pr_err("failed to read name, rc = %d\n", rc);
-			goto error;
-		}
-
-		snprintf(regs->vregs[i].vreg_name,
-			 ARRAY_SIZE(regs->vregs[i].vreg_name),
-			 "%s", st);
-
-		rc = utils->read_u32(node, "qcom,supply-min-voltage", &tmp);
-		if (rc) {
-			pr_err("failed to read min voltage, rc = %d\n", rc);
-			goto error;
-		}
-		regs->vregs[i].min_voltage = tmp;
-
-		rc = utils->read_u32(node, "qcom,supply-max-voltage", &tmp);
-		if (rc) {
-			pr_err("failed to read max voltage, rc = %d\n", rc);
-			goto error;
-		}
-		regs->vregs[i].max_voltage = tmp;
-
-		rc = utils->read_u32(node, "qcom,supply-enable-load", &tmp);
-		if (rc) {
-			pr_err("failed to read enable load, rc = %d\n", rc);
-			goto error;
-		}
-		regs->vregs[i].enable_load = tmp;
-
-		rc = utils->read_u32(node, "qcom,supply-disable-load", &tmp);
-		if (rc) {
-			pr_err("failed to read disable load, rc = %d\n", rc);
-			goto error;
-		}
-		regs->vregs[i].disable_load = tmp;
-
-		/* Optional values */
-		rc = utils->read_u32(node, "qcom,supply-pre-on-sleep", &tmp);
-		if (rc) {
-			pr_debug("pre-on-sleep not specified\n");
-			rc = 0;
-		} else {
-			regs->vregs[i].pre_on_sleep = tmp;
-		}
-
-		rc = utils->read_u32(node, "qcom,supply-pre-off-sleep", &tmp);
-		if (rc) {
-			pr_debug("pre-off-sleep not specified\n");
-			rc = 0;
-		} else {
-			regs->vregs[i].pre_off_sleep = tmp;
-		}
-
-		rc = utils->read_u32(node, "qcom,supply-post-on-sleep", &tmp);
-		if (rc) {
-			pr_debug("post-on-sleep not specified\n");
-			rc = 0;
-		} else {
-			regs->vregs[i].post_on_sleep = tmp;
-		}
-
-		rc = utils->read_u32(node, "qcom,supply-post-off-sleep", &tmp);
-		if (rc) {
-			pr_debug("post-off-sleep not specified\n");
-			rc = 0;
-		} else {
-			regs->vregs[i].post_off_sleep = tmp;
-		}
-
-		pr_debug("[%s] minv=%d maxv=%d, en_load=%d, dis_load=%d\n",
-			 regs->vregs[i].vreg_name,
-			 regs->vregs[i].min_voltage,
-			 regs->vregs[i].max_voltage,
-			 regs->vregs[i].enable_load,
-			 regs->vregs[i].disable_load);
-		++i;
-	}
-
-error:
-	return rc;
-}
-
-/**
- * dsi_pwr_enable_vregs() - enable/disable regulators
- */
-static int dsi_pwr_enable_vregs(struct dsi_regulator_info *regs, bool enable)
-{
-	int rc = 0, i = 0;
-	struct dsi_vreg *vreg;
-	int num_of_v = 0;
-
-	if (enable) {
-		for (i = 0; i < regs->count; i++) {
-			vreg = &regs->vregs[i];
-			if (vreg->pre_on_sleep)
-				msleep(vreg->pre_on_sleep);
-
-			rc = regulator_set_load(vreg->vreg,
-						vreg->enable_load);
-			if (rc < 0) {
-				pr_err("Setting optimum mode failed for %s\n",
-				       vreg->vreg_name);
-				goto error;
-			}
-			num_of_v = regulator_count_voltages(vreg->vreg);
-			if (num_of_v > 0) {
-				rc = regulator_set_voltage(vreg->vreg,
-							   vreg->min_voltage,
-							   vreg->max_voltage);
-				if (rc) {
-					pr_err("Set voltage(%s) fail, rc=%d\n",
-						 vreg->vreg_name, rc);
-					goto error_disable_opt_mode;
-				}
-			}
-
-			rc = regulator_enable(vreg->vreg);
-			if (rc) {
-				pr_err("enable failed for %s, rc=%d\n",
-				       vreg->vreg_name, rc);
-				goto error_disable_voltage;
-			}
-
-			if (vreg->post_on_sleep)
-				msleep(vreg->post_on_sleep);
-		}
-	} else {
-		for (i = (regs->count - 1); i >= 0; i--) {
-			if (regs->vregs[i].pre_off_sleep)
-				msleep(regs->vregs[i].pre_off_sleep);
-
-			(void)regulator_set_load(regs->vregs[i].vreg,
-						regs->vregs[i].disable_load);
-			(void)regulator_disable(regs->vregs[i].vreg);
-
-			if (regs->vregs[i].post_off_sleep)
-				msleep(regs->vregs[i].post_off_sleep);
-		}
-	}
-
-	return 0;
-error_disable_opt_mode:
-	(void)regulator_set_load(regs->vregs[i].vreg,
-				 regs->vregs[i].disable_load);
-
-error_disable_voltage:
-	if (num_of_v > 0)
-		(void)regulator_set_voltage(regs->vregs[i].vreg,
-					    0, regs->vregs[i].max_voltage);
-error:
-	for (i--; i >= 0; i--) {
-		if (regs->vregs[i].pre_off_sleep)
-			msleep(regs->vregs[i].pre_off_sleep);
-
-		(void)regulator_set_load(regs->vregs[i].vreg,
-					 regs->vregs[i].disable_load);
-
-		num_of_v = regulator_count_voltages(regs->vregs[i].vreg);
-		if (num_of_v > 0)
-			(void)regulator_set_voltage(regs->vregs[i].vreg,
-					    0, regs->vregs[i].max_voltage);
-
-		(void)regulator_disable(regs->vregs[i].vreg);
-
-		if (regs->vregs[i].post_off_sleep)
-			msleep(regs->vregs[i].post_off_sleep);
-	}
-
-	return rc;
-}
-
-/**
- * dsi_pwr_of_get_vreg_data - Parse regulator supply information
- * @of_node:        Device of node to parse for supply information.
- * @regs:           Pointer where regulator information will be copied to.
- * @supply_name:    Name of the supply node.
- *
- * return: error code in case of failure or 0 for success.
- */
-int dsi_pwr_of_get_vreg_data(struct dsi_parser_utils *utils,
-				 struct dsi_regulator_info *regs,
-				 char *supply_name)
-{
-	int rc = 0;
-	struct device_node *supply_root_node = NULL;
-
-	if (!utils || !regs) {
-		pr_err("Bad params\n");
-		return -EINVAL;
-	}
-
-	regs->count = 0;
-	supply_root_node = utils->get_child_by_name(utils->data, supply_name);
-	if (!supply_root_node) {
-		supply_root_node = of_parse_phandle(utils->node,
-					supply_name, 0);
-		if (!supply_root_node) {
-			pr_debug("No supply entry present for %s\n",
-					supply_name);
-			return -EINVAL;
-		}
-	}
-
-	regs->count = utils->get_available_child_count(supply_root_node);
-	if (regs->count == 0) {
-		pr_err("No vregs defined for %s\n", supply_name);
-		return -EINVAL;
-	}
-
-	regs->vregs = kcalloc(regs->count, sizeof(*regs->vregs), GFP_KERNEL);
-	if (!regs->vregs) {
-		regs->count = 0;
-		return -ENOMEM;
-	}
-
-	rc = dsi_pwr_parse_supply_node(utils, supply_root_node, regs);
-	if (rc) {
-		pr_err("failed to parse supply node for %s, rc = %d\n",
-			supply_name, rc);
-
-		kfree(regs->vregs);
-		regs->vregs = NULL;
-		regs->count = 0;
-	}
-
-	return rc;
-}
-
-/**
- * dsi_pwr_get_dt_vreg_data - parse regulator supply information
- * @dev:            Device whose of_node needs to be parsed.
- * @regs:           Pointer where regulator information will be copied to.
- * @supply_name:    Name of the supply node.
- *
- * return: error code in case of failure or 0 for success.
- */
-int dsi_pwr_get_dt_vreg_data(struct device *dev,
-				 struct dsi_regulator_info *regs,
-				 char *supply_name)
-{
-	int rc = 0;
-	struct device_node *of_node = NULL;
-	struct device_node *supply_node = NULL;
-	struct device_node *supply_root_node = NULL;
-	struct dsi_parser_utils utils = *dsi_parser_get_of_utils();
-
-	if (!dev || !regs) {
-		pr_err("Bad params\n");
-		return -EINVAL;
-	}
-
-	of_node = dev->of_node;
-	regs->count = 0;
-	supply_root_node = of_get_child_by_name(of_node, supply_name);
-	if (!supply_root_node) {
-		supply_root_node = of_parse_phandle(of_node, supply_name, 0);
-		if (!supply_root_node) {
-			pr_debug("No supply entry present for %s\n",
-					supply_name);
-			return -EINVAL;
-		}
-	}
-
-	for_each_child_of_node(supply_root_node, supply_node)
-		regs->count++;
-
-	if (regs->count == 0) {
-		pr_err("No vregs defined for %s\n", supply_name);
-		return -EINVAL;
-	}
-
-	regs->vregs = devm_kcalloc(dev, regs->count, sizeof(*regs->vregs),
-				   GFP_KERNEL);
-	if (!regs->vregs) {
-		regs->count = 0;
-		return -ENOMEM;
-	}
-
-	utils.data = of_node;
-	utils.node = of_node;
-
-	rc = dsi_pwr_parse_supply_node(&utils, supply_root_node, regs);
-	if (rc) {
-		pr_err("failed to parse supply node for %s, rc = %d\n",
-		       supply_name, rc);
-		devm_kfree(dev, regs->vregs);
-		regs->vregs = NULL;
-		regs->count = 0;
-	}
-
-	return rc;
-}
-
-/**
- * dsi_pwr_enable_regulator() - enable a set of regulators
- * @regs:       Pointer to set of regulators to enable or disable.
- * @enable:     Enable/Disable regulators.
- *
- * return: error code in case of failure or 0 for success.
- */
-int dsi_pwr_enable_regulator(struct dsi_regulator_info *regs, bool enable)
-{
-	int rc = 0;
-
-	if (regs->count == 0) {
-		pr_debug("No valid regulators to enable\n");
-		return 0;
-	}
-
-	if (!regs->vregs) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (enable) {
-		if (regs->refcount == 0) {
-			rc = dsi_pwr_enable_vregs(regs, true);
-			if (rc)
-				pr_err("failed to enable regulators\n");
-		}
-		regs->refcount++;
-	} else {
-		if (regs->refcount == 0) {
-			pr_err("Unbalanced regulator off:%s\n",
-					regs->vregs->vreg_name);
-		} else {
-			regs->refcount--;
-			if (regs->refcount == 0) {
-				rc = dsi_pwr_enable_vregs(regs, false);
-				if (rc)
-					pr_err("failed to disable vregs\n");
-			}
-		}
-	}
-
-	return rc;
-}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.h b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.h
deleted file mode 100644
index 340dde3..0000000
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DSI_PWR_H_
-#define _DSI_PWR_H_
-
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/regulator/consumer.h>
-
-struct dsi_parser_utils;
-
-/**
- * struct dsi_vreg - regulator information for DSI regulators
- * @vreg:            Handle to the regulator.
- * @vreg_name:       Regulator name.
- * @min_voltage:     Minimum voltage in uV.
- * @max_voltage:     Maximum voltage in uV.
- * @enable_load:     Load, in uA, when enabled.
- * @disable_load:    Load, in uA, when disabled.
- * @pre_on_sleep:    Sleep, in ms, before enabling the regulator.
- * @post_on_sleep:   Sleep, in ms, after enabling the regulator.
- * @pre_off_sleep:   Sleep, in ms, before disabling the regulator.
- * @post_off_sleep:  Sleep, in ms, after disabling the regulator.
- */
-struct dsi_vreg {
-	struct regulator *vreg;
-	char vreg_name[32];
-	u32 min_voltage;
-	u32 max_voltage;
-	u32 enable_load;
-	u32 disable_load;
-	u32 pre_on_sleep;
-	u32 post_on_sleep;
-	u32 pre_off_sleep;
-	u32 post_off_sleep;
-};
-
-/**
- * struct dsi_regulator_info - set of vregs that are turned on/off together.
- * @vregs:       Array of dsi_vreg structures.
- * @count:       Number of vregs.
- * @refcount:    Reference counting for enabling.
- */
-struct dsi_regulator_info {
-	struct dsi_vreg *vregs;
-	u32 count;
-	u32 refcount;
-};
-
-/**
- * dsi_pwr_of_get_vreg_data - parse regulator supply information
- * @of_node:        Device of node to parse for supply information.
- * @regs:           Pointer where regulator information will be copied to.
- * @supply_name:    Name of the supply node.
- *
- * return: error code in case of failure or 0 for success.
- */
-int dsi_pwr_of_get_vreg_data(struct dsi_parser_utils *utils,
-				 struct dsi_regulator_info *regs,
-				 char *supply_name);
-
-/**
- * dsi_pwr_get_dt_vreg_data - parse regulator supply information
- * @dev:            Device whose of_node needs to be parsed.
- * @regs:           Pointer where regulator information will be copied to.
- * @supply_name:    Name of the supply node.
- *
- * return: error code in case of failure or 0 for success.
- */
-int dsi_pwr_get_dt_vreg_data(struct device *dev,
-				 struct dsi_regulator_info *regs,
-				 char *supply_name);
-
-/**
- * dsi_pwr_enable_regulator() - enable a set of regulators
- * @regs:       Pointer to set of regulators to enable or disable.
- * @enable:     Enable/Disable regulators.
- *
- * return: error code in case of failure or 0 for success.
- */
-int dsi_pwr_enable_regulator(struct dsi_regulator_info *regs, bool enable);
-#endif /* _DSI_PWR_H_ */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 9a36012..d8c79f5 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,5 +1,4 @@
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  * Copyright (C) 2014 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -16,148 +15,11 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/msm_drm_notify.h>
-#include <linux/notifier.h>
-
 #include "msm_drv.h"
 #include "msm_gem.h"
 #include "msm_kms.h"
-#include "msm_fence.h"
-#include "sde_trace.h"
 
-#define MULTIPLE_CONN_DETECTED(x) (x > 1)
-
-struct msm_commit {
-	struct drm_device *dev;
-	struct drm_atomic_state *state;
-	uint32_t crtc_mask;
-	bool nonblock;
-	struct kthread_work commit_work;
-};
-
-static BLOCKING_NOTIFIER_HEAD(msm_drm_notifier_list);
-
-/**
- * msm_drm_register_client - register a client notifier
- * @nb: notifier block to callback on events
- *
- * This function registers a notifier callback function
- * to msm_drm_notifier_list, which would be called when
- * received unblank/power down event.
- */
-int msm_drm_register_client(struct notifier_block *nb)
-{
-	return blocking_notifier_chain_register(&msm_drm_notifier_list,
-						nb);
-}
-EXPORT_SYMBOL(msm_drm_register_client);
-
-/**
- * msm_drm_unregister_client - unregister a client notifier
- * @nb: notifier block to callback on events
- *
- * This function unregisters the callback function from
- * msm_drm_notifier_list.
- */
-int msm_drm_unregister_client(struct notifier_block *nb)
-{
-	return blocking_notifier_chain_unregister(&msm_drm_notifier_list,
-						  nb);
-}
-EXPORT_SYMBOL(msm_drm_unregister_client);
-
-/**
- * msm_drm_notifier_call_chain - notify clients of drm_events
- * @val: event MSM_DRM_EARLY_EVENT_BLANK or MSM_DRM_EVENT_BLANK
- * @v: notifier data, inculde display id and display blank
- *     event(unblank or power down).
- */
-static int msm_drm_notifier_call_chain(unsigned long val, void *v)
-{
-	return blocking_notifier_call_chain(&msm_drm_notifier_list, val,
-					    v);
-}
-
-static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state,
-			struct drm_crtc_state *crtc_state, bool enable)
-{
-	struct drm_connector *connector = NULL;
-	struct drm_connector_state  *conn_state = NULL;
-	int i = 0;
-	int conn_cnt = 0;
-
-	if (msm_is_mode_seamless(&crtc_state->mode) ||
-		msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode))
-		return true;
-
-	if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable)
-		return true;
-
-	if (!crtc_state->mode_changed && crtc_state->connectors_changed) {
-		for_each_old_connector_in_state(state, connector,
-				conn_state, i) {
-			if ((conn_state->crtc == crtc_state->crtc) ||
-					(connector->state->crtc ==
-					 crtc_state->crtc))
-				conn_cnt++;
-
-			if (MULTIPLE_CONN_DETECTED(conn_cnt))
-				return true;
-		}
-	}
-
-	return false;
-}
-
-static inline bool _msm_seamless_for_conn(struct drm_connector *connector,
-		struct drm_connector_state *old_conn_state, bool enable)
-{
-	if (!old_conn_state || !old_conn_state->crtc)
-		return false;
-
-	if (!old_conn_state->crtc->state->mode_changed &&
-			!old_conn_state->crtc->state->active_changed &&
-			old_conn_state->crtc->state->connectors_changed) {
-		if (old_conn_state->crtc == connector->state->crtc)
-			return true;
-	}
-
-	if (enable)
-		return false;
-
-	if (msm_is_mode_seamless(&connector->encoder->crtc->state->mode))
-		return true;
-
-	if (msm_is_mode_seamless_vrr(
-			&connector->encoder->crtc->state->adjusted_mode))
-		return true;
-
-	if (msm_is_mode_seamless_dms(
-			&connector->encoder->crtc->state->adjusted_mode))
-		return true;
-
-	return false;
-}
-
-/* clear specified crtcs (no longer pending update) */
-static void commit_destroy(struct msm_commit *c)
-{
-	struct msm_drm_private *priv = c->dev->dev_private;
-	uint32_t crtc_mask = c->crtc_mask;
-
-	/* End_atomic */
-	spin_lock(&priv->pending_crtcs_event.lock);
-	DBG("end: %08x", crtc_mask);
-	priv->pending_crtcs &= ~crtc_mask;
-	wake_up_all_locked(&priv->pending_crtcs_event);
-	spin_unlock(&priv->pending_crtcs_event.lock);
-
-	if (c->nonblock)
-		kfree(c);
-}
-
-static void msm_atomic_wait_for_commit_done(
-		struct drm_device *dev,
+static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
 		struct drm_atomic_state *old_state)
 {
 	struct drm_crtc *crtc;
@@ -179,362 +41,6 @@ static void msm_atomic_wait_for_commit_done(
 	}
 }
 
-static void
-msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
-{
-	struct drm_connector *connector;
-	struct drm_connector_state *old_conn_state;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *old_crtc_state;
-	struct msm_drm_notifier notifier_data;
-	int i, blank;
-
-	SDE_ATRACE_BEGIN("msm_disable");
-	for_each_old_connector_in_state(old_state, connector,
-			old_conn_state, i) {
-		const struct drm_encoder_helper_funcs *funcs;
-		struct drm_encoder *encoder;
-		struct drm_crtc_state *old_crtc_state;
-		unsigned int crtc_idx;
-
-		/*
-		 * Shut down everything that's in the changeset and currently
-		 * still on. So need to check the old, saved state.
-		 */
-		if (!old_conn_state->crtc)
-			continue;
-
-		crtc_idx = drm_crtc_index(old_conn_state->crtc);
-		old_crtc_state = drm_atomic_get_old_crtc_state(old_state,
-							old_conn_state->crtc);
-
-		if (!old_crtc_state->active ||
-		    !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
-			continue;
-
-		encoder = old_conn_state->best_encoder;
-
-		/* We shouldn't get this far if we didn't previously have
-		 * an encoder.. but WARN_ON() rather than explode.
-		 */
-		if (WARN_ON(!encoder))
-			continue;
-
-		if (_msm_seamless_for_conn(connector, old_conn_state, false))
-			continue;
-
-		funcs = encoder->helper_private;
-
-		DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
-				 encoder->base.id, encoder->name);
-
-		if (connector->state->crtc &&
-			connector->state->crtc->state->active_changed) {
-			blank = MSM_DRM_BLANK_POWERDOWN;
-			notifier_data.data = &blank;
-			notifier_data.id = crtc_idx;
-			msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
-						     &notifier_data);
-		}
-		/*
-		 * Each encoder has at most one connector (since we always steal
-		 * it away), so we won't call disable hooks twice.
-		 */
-		drm_bridge_disable(encoder->bridge);
-
-		/* Right function depends upon target state. */
-		if (connector->state->crtc && funcs->prepare)
-			funcs->prepare(encoder);
-		else if (funcs->disable)
-			funcs->disable(encoder);
-		else
-			funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
-
-		drm_bridge_post_disable(encoder->bridge);
-		if (connector->state->crtc &&
-			connector->state->crtc->state->active_changed) {
-			DRM_DEBUG_ATOMIC("Notify blank\n");
-			msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
-						&notifier_data);
-		}
-	}
-
-	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
-		const struct drm_crtc_helper_funcs *funcs;
-
-		/* Shut down everything that needs a full modeset. */
-		if (!drm_atomic_crtc_needs_modeset(crtc->state))
-			continue;
-
-		if (!old_crtc_state->active)
-			continue;
-
-		if (_msm_seamless_for_crtc(old_state, crtc->state, false))
-			continue;
-
-		funcs = crtc->helper_private;
-
-		DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
-				 crtc->base.id);
-
-		/* Right function depends upon target state. */
-		if (crtc->state->enable && funcs->prepare)
-			funcs->prepare(crtc);
-		else if (funcs->disable)
-			funcs->disable(crtc);
-		else
-			funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-	}
-	SDE_ATRACE_END("msm_disable");
-}
-
-static void
-msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
-{
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *old_crtc_state;
-	struct drm_connector *connector;
-	struct drm_connector_state *old_conn_state;
-	int i;
-
-	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
-		const struct drm_crtc_helper_funcs *funcs;
-
-		if (!crtc->state->mode_changed)
-			continue;
-
-		funcs = crtc->helper_private;
-
-		if (crtc->state->enable && funcs->mode_set_nofb) {
-			DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
-					 crtc->base.id);
-
-			funcs->mode_set_nofb(crtc);
-		}
-	}
-
-	for_each_old_connector_in_state(old_state, connector,
-			old_conn_state, i) {
-		const struct drm_encoder_helper_funcs *funcs;
-		struct drm_crtc_state *new_crtc_state;
-		struct drm_encoder *encoder;
-		struct drm_display_mode *mode, *adjusted_mode;
-
-		if (!connector->state->best_encoder)
-			continue;
-
-		encoder = connector->state->best_encoder;
-		funcs = encoder->helper_private;
-		new_crtc_state = connector->state->crtc->state;
-		mode = &new_crtc_state->mode;
-		adjusted_mode = &new_crtc_state->adjusted_mode;
-
-		if (!new_crtc_state->mode_changed &&
-				new_crtc_state->connectors_changed) {
-			if (_msm_seamless_for_conn(connector,
-					old_conn_state, false))
-				continue;
-		} else if (!new_crtc_state->mode_changed) {
-			continue;
-		}
-
-		DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
-				 encoder->base.id, encoder->name);
-
-		/*
-		 * Each encoder has at most one connector (since we always steal
-		 * it away), so we won't call mode_set hooks twice.
-		 */
-		if (funcs->mode_set)
-			funcs->mode_set(encoder, mode, adjusted_mode);
-
-		drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
-	}
-}
-
-/**
- * msm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
- * @dev: DRM device
- * @old_state: atomic state object with old state structures
- *
- * This function shuts down all the outputs that need to be shut down and
- * prepares them (if required) with the new mode.
- *
- * For compatibility with legacy crtc helpers this should be called before
- * drm_atomic_helper_commit_planes(), which is what the default commit function
- * does. But drivers with different needs can group the modeset commits together
- * and do the plane commits at the end. This is useful for drivers doing runtime
- * PM since planes updates then only happen when the CRTC is actually enabled.
- */
-void msm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
-		struct drm_atomic_state *old_state)
-{
-	msm_disable_outputs(dev, old_state);
-
-	drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
-
-	msm_crtc_set_mode(dev, old_state);
-}
-
-/**
- * msm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
- * @dev: DRM device
- * @old_state: atomic state object with old state structures
- *
- * This function enables all the outputs with the new configuration which had to
- * be turned off for the update.
- *
- * For compatibility with legacy crtc helpers this should be called after
- * drm_atomic_helper_commit_planes(), which is what the default commit function
- * does. But drivers with different needs can group the modeset commits together
- * and do the plane commits at the end. This is useful for drivers doing runtime
- * PM since planes updates then only happen when the CRTC is actually enabled.
- */
-static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
-		struct drm_atomic_state *old_state)
-{
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *old_crtc_state;
-	struct drm_crtc_state *new_crtc_state;
-	struct drm_connector *connector;
-	struct drm_connector_state *new_conn_state;
-	struct msm_drm_notifier notifier_data;
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_kms *kms = priv->kms;
-	int bridge_enable_count = 0;
-	int i, blank;
-	bool splash = false;
-
-	SDE_ATRACE_BEGIN("msm_enable");
-	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state,
-			new_crtc_state, i) {
-		const struct drm_crtc_helper_funcs *funcs;
-
-		/* Need to filter out CRTCs where only planes change. */
-		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
-			continue;
-
-		if (!new_crtc_state->active)
-			continue;
-
-		if (_msm_seamless_for_crtc(old_state, crtc->state, true))
-			continue;
-
-		funcs = crtc->helper_private;
-
-		if (crtc->state->enable) {
-			DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
-					 crtc->base.id);
-
-			if (funcs->atomic_enable)
-				funcs->atomic_enable(crtc, old_crtc_state);
-			else
-				funcs->commit(crtc);
-		}
-
-		if (msm_needs_vblank_pre_modeset(
-					&new_crtc_state->adjusted_mode))
-			drm_crtc_wait_one_vblank(crtc);
-
-	}
-
-	for_each_new_connector_in_state(old_state, connector,
-			new_conn_state, i) {
-		const struct drm_encoder_helper_funcs *funcs;
-		struct drm_encoder *encoder;
-		struct drm_connector_state *old_conn_state;
-
-		if (!new_conn_state->best_encoder)
-			continue;
-
-		if (!new_conn_state->crtc->state->active ||
-				!drm_atomic_crtc_needs_modeset(
-					new_conn_state->crtc->state))
-			continue;
-
-		old_conn_state = drm_atomic_get_old_connector_state(
-				old_state, connector);
-		if (_msm_seamless_for_conn(connector, old_conn_state, true))
-			continue;
-
-		encoder = connector->state->best_encoder;
-		funcs = encoder->helper_private;
-
-		DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
-				 encoder->base.id, encoder->name);
-
-		if (kms && kms->funcs && kms->funcs->check_for_splash)
-			splash = kms->funcs->check_for_splash(kms);
-
-		if (splash || (connector->state->crtc &&
-			connector->state->crtc->state->active_changed)) {
-			blank = MSM_DRM_BLANK_UNBLANK;
-			notifier_data.data = &blank;
-			notifier_data.id =
-				connector->state->crtc->index;
-			DRM_DEBUG_ATOMIC("Notify early unblank\n");
-			msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
-					    &notifier_data);
-		}
-		/*
-		 * Each encoder has at most one connector (since we always steal
-		 * it away), so we won't call enable hooks twice.
-		 */
-		drm_bridge_pre_enable(encoder->bridge);
-		++bridge_enable_count;
-
-		if (funcs->enable)
-			funcs->enable(encoder);
-		else
-			funcs->commit(encoder);
-	}
-
-	if (kms && kms->funcs && kms->funcs->commit) {
-		DRM_DEBUG_ATOMIC("triggering commit\n");
-		kms->funcs->commit(kms, old_state);
-	}
-
-	/* If no bridges were pre_enabled, skip iterating over them again */
-	if (bridge_enable_count == 0) {
-		SDE_ATRACE_END("msm_enable");
-		return;
-	}
-
-	for_each_new_connector_in_state(old_state, connector,
-			new_conn_state, i) {
-		struct drm_encoder *encoder;
-		struct drm_connector_state *old_conn_state;
-
-		if (!new_conn_state->best_encoder)
-			continue;
-
-		if (!new_conn_state->crtc->state->active ||
-		    !drm_atomic_crtc_needs_modeset(
-				    new_conn_state->crtc->state))
-			continue;
-
-		old_conn_state = drm_atomic_get_old_connector_state(
-				old_state, connector);
-		if (_msm_seamless_for_conn(connector, old_conn_state, true))
-			continue;
-
-		encoder = connector->state->best_encoder;
-
-		DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n",
-				 encoder->base.id, encoder->name);
-
-		drm_bridge_enable(encoder->bridge);
-
-		if (splash || (connector->state->crtc &&
-			connector->state->crtc->state->active_changed)) {
-			DRM_DEBUG_ATOMIC("Notify unblank\n");
-			msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
-					    &notifier_data);
-		}
-	}
-	SDE_ATRACE_END("msm_enable");
-}
-
 int msm_atomic_prepare_fb(struct drm_plane *plane,
 			  struct drm_plane_state *new_state)
 {
@@ -556,295 +62,6 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
 	return msm_framebuffer_prepare(new_state->fb, kms->aspace);
 }
 
-/* The (potentially) asynchronous part of the commit.  At this point
- * nothing can fail short of armageddon.
- */
-static void complete_commit(struct msm_commit *c)
-{
-	struct drm_atomic_state *state = c->state;
-	struct drm_device *dev = state->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_kms *kms = priv->kms;
-
-	drm_atomic_helper_wait_for_fences(dev, state, false);
-
-	kms->funcs->prepare_commit(kms, state);
-
-	msm_atomic_helper_commit_modeset_disables(dev, state);
-
-	drm_atomic_helper_commit_planes(dev, state, 0);
-
-	msm_atomic_helper_commit_modeset_enables(dev, state);
-
-	/* NOTE: _wait_for_vblanks() only waits for vblank on
-	 * enabled CRTCs.  So we end up faulting when disabling
-	 * due to (potentially) unref'ing the outgoing fb's
-	 * before the vblank when the disable has latched.
-	 *
-	 * But if it did wait on disabled (or newly disabled)
-	 * CRTCs, that would be racy (ie. we could have missed
-	 * the irq.  We need some way to poll for pipe shut
-	 * down.  Or just live with occasionally hitting the
-	 * timeout in the CRTC disable path (which really should
-	 * not be critical path)
-	 */
-
-	msm_atomic_wait_for_commit_done(dev, state);
-
-	drm_atomic_helper_cleanup_planes(dev, state);
-
-	kms->funcs->complete_commit(kms, state);
-
-	drm_atomic_state_put(state);
-
-	commit_destroy(c);
-}
-
-static void _msm_drm_commit_work_cb(struct kthread_work *work)
-{
-	struct msm_commit *commit = NULL;
-
-	if (!work) {
-		DRM_ERROR("%s: Invalid commit work data!\n", __func__);
-		return;
-	}
-
-	commit = container_of(work, struct msm_commit, commit_work);
-
-	SDE_ATRACE_BEGIN("complete_commit");
-	complete_commit(commit);
-	SDE_ATRACE_END("complete_commit");
-}
-
-static struct msm_commit *commit_init(struct drm_atomic_state *state,
-	bool nonblock)
-{
-	struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
-
-	if (!c)
-		return NULL;
-
-	c->dev = state->dev;
-	c->state = state;
-	c->nonblock = nonblock;
-
-	kthread_init_work(&c->commit_work, _msm_drm_commit_work_cb);
-
-	return c;
-}
-
-/* Start display thread function */
-static void msm_atomic_commit_dispatch(struct drm_device *dev,
-		struct drm_atomic_state *state, struct msm_commit *commit)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	struct drm_crtc *crtc = NULL;
-	struct drm_crtc_state *crtc_state = NULL;
-	int ret = -EINVAL, i = 0, j = 0;
-	bool nonblock;
-
-	/* cache since work will kfree commit in non-blocking case */
-	nonblock = commit->nonblock;
-
-	for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
-		for (j = 0; j < priv->num_crtcs; j++) {
-			if (priv->disp_thread[j].crtc_id ==
-						crtc->base.id) {
-				if (priv->disp_thread[j].thread) {
-					kthread_queue_work(
-						&priv->disp_thread[j].worker,
-							&commit->commit_work);
-					/* only return zero if work is
-					 * queued successfully.
-					 */
-					ret = 0;
-				} else {
-					DRM_ERROR(" Error for crtc_id: %d\n",
-						priv->disp_thread[j].crtc_id);
-				}
-				break;
-			}
-		}
-		/*
-		 * TODO: handle cases where there will be more than
-		 * one crtc per commit cycle. Remove this check then.
-		 * Current assumption is there will be only one crtc
-		 * per commit cycle.
-		 */
-		if (j < priv->num_crtcs)
-			break;
-	}
-
-	if (ret) {
-		/**
-		 * this is not expected to happen, but at this point the state
-		 * has been swapped, but we couldn't dispatch to a crtc thread.
-		 * fallback now to a synchronous complete_commit to try and
-		 * ensure that SW and HW state don't get out of sync.
-		 */
-		DRM_ERROR("failed to dispatch commit to any CRTC\n");
-		complete_commit(commit);
-	} else if (!nonblock) {
-		kthread_flush_work(&commit->commit_work);
-	}
-
-	/* free nonblocking commits in this context, after processing */
-	if (!nonblock)
-		kfree(commit);
-}
-
-/**
- * drm_atomic_helper_commit - commit validated state object
- * @dev: DRM device
- * @state: the driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-int msm_atomic_commit(struct drm_device *dev,
-		struct drm_atomic_state *state, bool nonblock)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_commit *c;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
-	struct drm_plane *plane;
-	struct drm_plane_state *old_plane_state, *new_plane_state;
-	int i, ret;
-
-	if (!priv || priv->shutdown_in_progress) {
-		DRM_ERROR("priv is null or shutdwon is in-progress\n");
-		return -EINVAL;
-	}
-
-	SDE_ATRACE_BEGIN("atomic_commit");
-	ret = drm_atomic_helper_prepare_planes(dev, state);
-	if (ret) {
-		SDE_ATRACE_END("atomic_commit");
-		return ret;
-	}
-
-	c = commit_init(state, nonblock);
-	if (!c) {
-		ret = -ENOMEM;
-		goto error;
-	}
-
-	/*
-	 * Figure out what crtcs we have:
-	 */
-	for_each_new_crtc_in_state(state, crtc, crtc_state, i)
-		c->crtc_mask |= drm_crtc_mask(crtc);
-
-	/*
-	 * Figure out what fence to wait for:
-	 */
-	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
-			new_plane_state, i) {
-		if ((new_plane_state->fb != old_plane_state->fb)
-				&& new_plane_state->fb) {
-			struct drm_gem_object *obj =
-				msm_framebuffer_bo(new_plane_state->fb, 0);
-			struct msm_gem_object *msm_obj = to_msm_bo(obj);
-			struct dma_fence *fence =
-				reservation_object_get_excl_rcu(msm_obj->resv);
-
-			drm_atomic_set_fence_for_plane(new_plane_state, fence);
-		}
-	}
-
-	/*
-	 * Wait for pending updates on any of the same crtc's and then
-	 * mark our set of crtc's as busy:
-	 */
-
-	/* Start Atomic */
-	spin_lock(&priv->pending_crtcs_event.lock);
-	ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
-			!(priv->pending_crtcs & c->crtc_mask));
-	if (ret == 0) {
-		DBG("start: %08x", c->crtc_mask);
-		priv->pending_crtcs |= c->crtc_mask;
-	}
-	spin_unlock(&priv->pending_crtcs_event.lock);
-
-	if (ret)
-		goto err_free;
-
-	WARN_ON(drm_atomic_helper_swap_state(state, false) < 0);
-
-	/*
-	 * Provide the driver a chance to prepare for output fences. This is
-	 * done after the point of no return, but before asynchronous commits
-	 * are dispatched to work queues, so that the fence preparation is
-	 * finished before the .atomic_commit returns.
-	 */
-	if (priv && priv->kms && priv->kms->funcs &&
-			priv->kms->funcs->prepare_fence)
-		priv->kms->funcs->prepare_fence(priv->kms, state);
-
-	/*
-	 * Everything below can be run asynchronously without the need to grab
-	 * any modeset locks at all under one conditions: It must be guaranteed
-	 * that the asynchronous work has either been cancelled (if the driver
-	 * supports it, which at least requires that the framebuffers get
-	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
-	 * before the new state gets committed on the software side with
-	 * drm_atomic_helper_swap_state().
-	 *
-	 * This scheme allows new atomic state updates to be prepared and
-	 * checked in parallel to the asynchronous completion of the previous
-	 * update. Which is important since compositors need to figure out the
-	 * composition of the next frame right after having submitted the
-	 * current layout
-	 */
-
-	drm_atomic_state_get(state);
-	msm_atomic_commit_dispatch(dev, state, c);
-
-	SDE_ATRACE_END("atomic_commit");
-
-	return 0;
-err_free:
-	kfree(c);
-error:
-	drm_atomic_helper_cleanup_planes(dev, state);
-	SDE_ATRACE_END("atomic_commit");
-	return ret;
-}
-
-struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev)
-{
-	struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
-
-	if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
-		kfree(state);
-		return NULL;
-	}
-
-	return &state->base;
-}
-
-void msm_atomic_state_clear(struct drm_atomic_state *s)
-{
-	struct msm_kms_state *state = to_kms_state(s);
-
-	drm_atomic_state_default_clear(&state->base);
-	kfree(state->state);
-	state->state = NULL;
-}
-
-void msm_atomic_state_free(struct drm_atomic_state *state)
-{
-	kfree(to_kms_state(state)->state);
-	drm_atomic_state_default_release(state);
-	kfree(state);
-}
-
 void msm_atomic_commit_tail(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
@@ -859,13 +76,27 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
+	if (kms->funcs->commit) {
+		DRM_DEBUG_ATOMIC("triggering commit\n");
+		kms->funcs->commit(kms, state);
+	}
+
 	msm_atomic_wait_for_commit_done(dev, state);
 
 	kms->funcs->complete_commit(kms, state);
 
-	drm_atomic_helper_wait_for_vblanks(dev, state);
-
 	drm_atomic_helper_commit_hw_done(state);
 
 	drm_atomic_helper_cleanup_planes(dev, state);
 }
+int msm_drm_register_client(struct notifier_block *nb)
+{
+	return 0;
+}
+EXPORT_SYMBOL(msm_drm_register_client);
+
+int msm_drm_unregister_client(struct notifier_block *nb)
+{
+	return 0;
+}
+EXPORT_SYMBOL(msm_drm_unregister_client);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b36f62a..8b0605f 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -15,29 +15,7 @@
  * You should have received a copy of the GNU General Public License along with
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
-/*
- * Copyright (c) 2016 Intel Corporation
- *
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
- */
 
-#include <linux/of_address.h>
 #include <linux/kthread.h>
 #include <uapi/linux/sched/types.h>
 #include <drm/drm_of.h>
@@ -47,9 +25,7 @@
 #include "msm_fence.h"
 #include "msm_gpu.h"
 #include "msm_kms.h"
-#include "msm_mmu.h"
-#include "sde_wb.h"
-#include "sde_dbg.h"
+
 
 /*
  * MSM driver version:
@@ -64,58 +40,11 @@
 #define MSM_VERSION_MINOR	3
 #define MSM_VERSION_PATCHLEVEL	0
 
-static void msm_fb_output_poll_changed(struct drm_device *dev)
-{
-	struct msm_drm_private *priv = NULL;
-
-	if (!dev) {
-		DRM_ERROR("output_poll_changed failed, invalid input\n");
-		return;
-	}
-
-	priv = dev->dev_private;
-
-	if (priv->fbdev)
-		drm_fb_helper_hotplug_event(priv->fbdev);
-}
-
-/**
- * msm_atomic_helper_check - validate state object
- * @dev: DRM device
- * @state: the driver state object
- *
- * This is a wrapper for the drm_atomic_helper_check to check the modeset
- * and state checking for planes. Additionally it checks if any secure
- * transition(moving CRTC and planes between secure and non-secure states and
- * vice versa) is allowed or not. When going to secure state, planes
- * with fb_mode as dir translated only can be staged on the CRTC, and only one
- * CRTC should be active.
- * Also mixing of secure and non-secure is not allowed.
- *
- * RETURNS
- * Zero for success or -errorno.
- */
-int msm_atomic_check(struct drm_device *dev,
-			    struct drm_atomic_state *state)
-{
-	struct msm_drm_private *priv;
-
-	priv = dev->dev_private;
-	if (priv && priv->kms && priv->kms->funcs &&
-			priv->kms->funcs->atomic_check)
-		return priv->kms->funcs->atomic_check(priv->kms, state);
-
-	return drm_atomic_helper_check(dev, state);
-}
-
 static const struct drm_mode_config_funcs mode_config_funcs = {
 	.fb_create = msm_framebuffer_create,
-	.output_poll_changed = msm_fb_output_poll_changed,
-	.atomic_check = msm_atomic_check,
-	.atomic_commit = msm_atomic_commit,
-	.atomic_state_alloc = msm_atomic_state_alloc,
-	.atomic_state_clear = msm_atomic_state_clear,
-	.atomic_state_free = msm_atomic_state_free,
+	.output_poll_changed = drm_fb_helper_output_poll_changed,
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = drm_atomic_helper_commit,
 };
 
 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
@@ -241,8 +170,8 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
 	if (!res) {
-		dev_dbg(&pdev->dev, "failed to get memory resource: %s\n",
-									name);
+		dev_err(&pdev->dev, "failed to get memory resource: %s\n",
+			       name);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -255,91 +184,83 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 	}
 
 	if (reglog)
-		dev_dbg(&pdev->dev, "IO:region %s %pK %08lx\n",
-			dbgname, ptr, size);
+		printk(KERN_DEBUG "IO:region %s %pk %08lx\n", dbgname,
+				ptr, size);
 
 	return ptr;
 }
 
-unsigned long msm_iomap_size(struct platform_device *pdev, const char *name)
-{
-	struct resource *res;
-
-	if (name)
-		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
-	else
-		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-	if (!res) {
-		dev_dbg(&pdev->dev, "failed to get memory resource: %s\n",
-									name);
-		return 0;
-	}
-
-	return resource_size(res);
-}
-
-void msm_iounmap(struct platform_device *pdev, void __iomem *addr)
-{
-	devm_iounmap(&pdev->dev, addr);
-}
-
 void msm_writel(u32 data, void __iomem *addr)
 {
 	if (reglog)
-		pr_debug("IO:W %pK %08x\n", addr, data);
+		printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
 	writel(data, addr);
 }
 
 u32 msm_readl(const void __iomem *addr)
 {
 	u32 val = readl(addr);
-
 	if (reglog)
-		pr_err("IO:R %pK %08x\n", addr, val);
+		pr_err("IO:R %p %08x\n", addr, val);
 	return val;
 }
 
-struct vblank_work {
-	struct kthread_work work;
+struct vblank_event {
+	struct list_head node;
 	int crtc_id;
 	bool enable;
-	struct msm_drm_private *priv;
 };
 
 static void vblank_ctrl_worker(struct kthread_work *work)
 {
-	struct vblank_work *cur_work = container_of(work,
-					struct vblank_work, work);
-	struct msm_drm_private *priv = cur_work->priv;
+	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
+						struct msm_vblank_ctrl, work);
+	struct msm_drm_private *priv = container_of(vbl_ctrl,
+					struct msm_drm_private, vblank_ctrl);
 	struct msm_kms *kms = priv->kms;
+	struct vblank_event *vbl_ev, *tmp;
+	unsigned long flags;
 
-	if (cur_work->enable)
-		kms->funcs->enable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
-	else
-		kms->funcs->disable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
+	spin_lock_irqsave(&vbl_ctrl->lock, flags);
+	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+		list_del(&vbl_ev->node);
+		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 
-	kfree(cur_work);
+		if (vbl_ev->enable)
+			kms->funcs->enable_vblank(kms,
+						priv->crtcs[vbl_ev->crtc_id]);
+		else
+			kms->funcs->disable_vblank(kms,
+						priv->crtcs[vbl_ev->crtc_id]);
+
+		kfree(vbl_ev);
+
+		spin_lock_irqsave(&vbl_ctrl->lock, flags);
+	}
+
+	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 }
 
 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
 					int crtc_id, bool enable)
 {
-	struct vblank_work *cur_work;
+	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
+	struct vblank_event *vbl_ev;
+	unsigned long flags;
 
-	if (!priv || crtc_id >= priv->num_crtcs)
-		return -EINVAL;
-
-	cur_work = kzalloc(sizeof(*cur_work), GFP_ATOMIC);
-	if (!cur_work)
+	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
+	if (!vbl_ev)
 		return -ENOMEM;
 
-	kthread_init_work(&cur_work->work, vblank_ctrl_worker);
-	cur_work->crtc_id = crtc_id;
-	cur_work->enable = enable;
-	cur_work->priv = priv;
+	vbl_ev->crtc_id = crtc_id;
+	vbl_ev->enable = enable;
 
-	kthread_queue_work(&priv->disp_thread[crtc_id].worker, &cur_work->work);
+	spin_lock_irqsave(&vbl_ctrl->lock, flags);
+	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
+	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+
+	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
+			&vbl_ctrl->work);
 
 	return 0;
 }
@@ -350,9 +271,21 @@ static int msm_drm_uninit(struct device *dev)
 	struct drm_device *ddev = platform_get_drvdata(pdev);
 	struct msm_drm_private *priv = ddev->dev_private;
 	struct msm_kms *kms = priv->kms;
-	struct msm_gpu *gpu = priv->gpu;
+	struct msm_mdss *mdss = priv->mdss;
+	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
+	struct vblank_event *vbl_ev, *tmp;
 	int i;
 
+	/* We must cancel and cleanup any pending vblank enable/disable
+	 * work before drm_irq_uninstall() to avoid work re-enabling an
+	 * irq after uninstall has disabled it.
+	 */
+	kthread_flush_work(&vbl_ctrl->work);
+	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+		list_del(&vbl_ev->node);
+		kfree(vbl_ev);
+	}
+
 	/* clean up display commit/event worker threads */
 	for (i = 0; i < priv->num_crtcs; i++) {
 		if (priv->disp_thread[i].thread) {
@@ -372,12 +305,7 @@ static int msm_drm_uninit(struct device *dev)
 
 	drm_kms_helper_poll_fini(ddev);
 
-	drm_mode_config_cleanup(ddev);
-
-	if (priv->registered) {
-		drm_dev_unregister(ddev);
-		priv->registered = false;
-	}
+	drm_dev_unregister(ddev);
 
 	msm_perf_debugfs_cleanup(priv);
 	msm_rd_debugfs_cleanup(priv);
@@ -398,15 +326,6 @@ static int msm_drm_uninit(struct device *dev)
 	if (kms && kms->funcs)
 		kms->funcs->destroy(kms);
 
-	if (gpu) {
-		mutex_lock(&ddev->struct_mutex);
-		// XXX what do we do here?
-		//pm_runtime_enable(&pdev->dev);
-		gpu->funcs->pm_suspend(gpu);
-		mutex_unlock(&ddev->struct_mutex);
-		gpu->funcs->destroy(gpu);
-	}
-
 	if (priv->vram.paddr) {
 		unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
 		drm_mm_takedown(&priv->vram.mm);
@@ -416,49 +335,30 @@ static int msm_drm_uninit(struct device *dev)
 
 	component_unbind_all(dev, ddev);
 
-	sde_dbg_destroy();
-	debugfs_remove_recursive(priv->debug_root);
-
-	sde_power_client_destroy(&priv->phandle, priv->pclient);
-	sde_power_resource_deinit(pdev, &priv->phandle);
-
-	msm_mdss_destroy(ddev);
+	if (mdss && mdss->funcs)
+		mdss->funcs->destroy(ddev);
 
 	ddev->dev_private = NULL;
-	kfree(priv);
+	drm_dev_unref(ddev);
 
-	drm_dev_put(ddev);
+	kfree(priv);
 
 	return 0;
 }
 
 #define KMS_MDP4 4
 #define KMS_MDP5 5
-#define KMS_SDE  3
+#define KMS_DPU  3
 
 static int get_mdp_ver(struct platform_device *pdev)
 {
-#ifdef CONFIG_OF
-	static const struct of_device_id match_types[] = { {
-		.compatible = "qcom,mdss_mdp",
-		.data	= (void	*)KMS_MDP5,
-	},
-	{
-		.compatible = "qcom,sde-kms",
-		.data	= (void	*)KMS_SDE,
-	},
-	{},
-	};
 	struct device *dev = &pdev->dev;
-	const struct of_device_id *match;
 
-	match = of_match_node(match_types, dev->of_node);
-	if (match)
-		return (int)(unsigned long)match->data;
-#endif
-	return KMS_MDP4;
+	return (int) (unsigned long) of_device_get_match_data(dev);
 }
 
+#include <linux/of_address.h>
+
 static int msm_init_vram(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
@@ -533,36 +433,111 @@ static int msm_init_vram(struct drm_device *dev)
 	return ret;
 }
 
-#ifdef CONFIG_OF
-static int msm_component_bind_all(struct device *dev,
-				struct drm_device *drm_dev)
+static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 {
-	int ret;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *ddev;
+	struct msm_drm_private *priv;
+	struct msm_kms *kms;
+	struct msm_mdss *mdss;
+	int ret, i;
+	struct sched_param param;
 
-	ret = component_bind_all(dev, drm_dev);
+	ddev = drm_dev_alloc(drv, dev);
+	if (IS_ERR(ddev)) {
+		dev_err(dev, "failed to allocate drm_device\n");
+		return PTR_ERR(ddev);
+	}
+
+	platform_set_drvdata(pdev, ddev);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		ret = -ENOMEM;
+		goto err_unref_drm_dev;
+	}
+
+	ddev->dev_private = priv;
+	priv->dev = ddev;
+
+	switch (get_mdp_ver(pdev)) {
+	case KMS_MDP5:
+		ret = mdp5_mdss_init(ddev);
+		break;
+	case KMS_DPU:
+		ret = dpu_mdss_init(ddev);
+		break;
+	default:
+		ret = 0;
+		break;
+	}
 	if (ret)
-		DRM_ERROR("component_bind_all failed: %d\n", ret);
+		goto err_free_priv;
 
-	return ret;
-}
-#else
-static int msm_component_bind_all(struct device *dev,
-				struct drm_device *drm_dev)
-{
-	return 0;
-}
-#endif
+	mdss = priv->mdss;
 
-static int msm_power_enable_wrapper(void *handle, void *client, bool enable)
-{
-	return sde_power_resource_enable(handle, client, enable);
-}
+	priv->wq = alloc_ordered_workqueue("msm", 0);
 
-static int msm_drm_display_thread_create(struct sched_param param,
-	struct msm_drm_private *priv, struct drm_device *ddev,
-	struct device *dev)
-{
-	int i, ret = 0;
+	INIT_LIST_HEAD(&priv->inactive_list);
+	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
+	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+	spin_lock_init(&priv->vblank_ctrl.lock);
+
+	drm_mode_config_init(ddev);
+
+	/* Bind all our sub-components: */
+	ret = component_bind_all(dev, ddev);
+	if (ret)
+		goto err_destroy_mdss;
+
+	ret = msm_init_vram(ddev);
+	if (ret)
+		goto err_msm_uninit;
+
+	msm_gem_shrinker_init(ddev);
+
+	switch (get_mdp_ver(pdev)) {
+	case KMS_MDP4:
+		kms = mdp4_kms_init(ddev);
+		priv->kms = kms;
+		break;
+	case KMS_MDP5:
+		kms = mdp5_kms_init(ddev);
+		break;
+	case KMS_DPU:
+		kms = dpu_kms_init(ddev);
+		priv->kms = kms;
+		break;
+	default:
+		kms = ERR_PTR(-ENODEV);
+		break;
+	}
+
+	if (IS_ERR(kms)) {
+		/*
+		 * NOTE: once we have GPU support, having no kms should not
+		 * be considered fatal.. ideally we would still support gpu
+		 * and (for example) use dmabuf/prime to share buffers with
+		 * imx drm driver on iMX5
+		 */
+		dev_err(dev, "failed to load kms\n");
+		ret = PTR_ERR(kms);
+		goto err_msm_uninit;
+	}
+
+	/* Enable normalization of plane zpos */
+	ddev->mode_config.normalize_zpos = true;
+
+	if (kms) {
+		ret = kms->funcs->hw_init(kms);
+		if (ret) {
+			dev_err(dev, "kms hw init failed: %d\n", ret);
+			goto err_msm_uninit;
+		}
+	}
+
+	ddev->mode_config.funcs = &mode_config_funcs;
+	ddev->mode_config.helper_private = &mode_config_helper_funcs;
 
 	/**
 	 * this priority was found during empiric testing to have appropriate
@@ -633,203 +608,32 @@ static int msm_drm_display_thread_create(struct sched_param param,
 					priv->event_thread[i].thread = NULL;
 				}
 			}
-			return -EINVAL;
+			goto err_msm_uninit;
 		}
 	}
 
-	/**
-	 * Since pp interrupt is heavy weight, try to queue the work
-	 * into a dedicated worker thread, so that they dont interrupt
-	 * other important events.
-	 */
-	kthread_init_worker(&priv->pp_event_worker);
-	priv->pp_event_thread = kthread_run(kthread_worker_fn,
-			&priv->pp_event_worker, "pp_event");
-
-	ret = sched_setscheduler(priv->pp_event_thread,
-						SCHED_FIFO, &param);
-	if (ret)
-		pr_warn("pp_event thread priority update failed: %d\n",
-								ret);
-
-	if (IS_ERR(priv->pp_event_thread)) {
-		dev_err(dev, "failed to create pp_event kthread\n");
-		ret = PTR_ERR(priv->pp_event_thread);
-		priv->pp_event_thread = NULL;
-		return ret;
-	}
-
-	return 0;
-
-}
-static struct msm_kms *_msm_drm_init_helper(struct msm_drm_private *priv,
-	struct drm_device *ddev, struct device *dev,
-	struct platform_device *pdev)
-{
-	int ret;
-	struct msm_kms *kms;
-
-	switch (get_mdp_ver(pdev)) {
-	case KMS_MDP4:
-		kms = mdp4_kms_init(ddev);
-		break;
-	case KMS_MDP5:
-		kms = mdp5_kms_init(ddev);
-		break;
-	case KMS_SDE:
-		kms = sde_kms_init(ddev);
-		break;
-	default:
-		kms = ERR_PTR(-ENODEV);
-		break;
-	}
-
-	if (IS_ERR_OR_NULL(kms)) {
-		/*
-		 * NOTE: once we have GPU support, having no kms should not
-		 * be considered fatal.. ideally we would still support gpu
-		 * and (for example) use dmabuf/prime to share buffers with
-		 * imx drm driver on iMX5
-		 */
-		dev_err(dev, "failed to load kms\n");
-		return kms;
-	}
-	priv->kms = kms;
-	pm_runtime_enable(dev);
-
-	/**
-	 * Since kms->funcs->hw_init(kms) might call
-	 * drm_object_property_set_value to initialize some custom
-	 * properties we need to make sure mode_config.funcs are populated
-	 * beforehand to avoid dereferencing an unset value during the
-	 * drm_drv_uses_atomic_modeset check.
-	 */
-	ddev->mode_config.funcs = &mode_config_funcs;
-
-	ret = (kms)->funcs->hw_init(kms);
-	if (ret) {
-		dev_err(dev, "kms hw init failed: %d\n", ret);
-		return ERR_PTR(ret);
-	}
-
-	return kms;
-}
-
-static int msm_drm_init(struct device *dev, struct drm_driver *drv)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct drm_device *ddev;
-	struct msm_drm_private *priv;
-	struct msm_kms *kms = NULL;
-	struct sde_dbg_power_ctrl dbg_power_ctrl = { 0 };
-	int ret;
-	struct sched_param param = { 0 };
-
-	ddev = drm_dev_alloc(drv, dev);
-	if (!ddev) {
-		dev_err(dev, "failed to allocate drm_device\n");
-		return -ENOMEM;
-	}
-
-	drm_mode_config_init(ddev);
-	platform_set_drvdata(pdev, ddev);
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv) {
-		ret = -ENOMEM;
-		goto priv_alloc_fail;
-	}
-
-	ddev->dev_private = priv;
-	priv->dev = ddev;
-
-	ret = msm_mdss_init(ddev);
-	if (ret)
-		goto mdss_init_fail;
-
-	priv->wq = alloc_ordered_workqueue("msm_drm", 0);
-	init_waitqueue_head(&priv->pending_crtcs_event);
-
-	INIT_LIST_HEAD(&priv->client_event_list);
-	INIT_LIST_HEAD(&priv->inactive_list);
-
-	ret = sde_power_resource_init(pdev, &priv->phandle);
-	if (ret) {
-		pr_err("sde power resource init failed\n");
-		goto power_init_fail;
-	}
-
-	priv->pclient = sde_power_client_create(&priv->phandle, "sde");
-	if (IS_ERR_OR_NULL(priv->pclient)) {
-		pr_err("sde power client create failed\n");
-		ret = -EINVAL;
-		goto power_client_fail;
-	}
-
-	dbg_power_ctrl.handle = &priv->phandle;
-	dbg_power_ctrl.client = priv->pclient;
-	dbg_power_ctrl.enable_fn = msm_power_enable_wrapper;
-	ret = sde_dbg_init(&pdev->dev, &dbg_power_ctrl);
-	if (ret) {
-		dev_err(dev, "failed to init sde dbg: %d\n", ret);
-		goto dbg_init_fail;
-	}
-
-	/* Bind all our sub-components: */
-	ret = msm_component_bind_all(dev, ddev);
-	if (ret)
-		goto bind_fail;
-
-	ret = msm_init_vram(ddev);
-	if (ret)
-		goto fail;
-
-	ddev->mode_config.funcs = &mode_config_funcs;
-	ddev->mode_config.helper_private = &mode_config_helper_funcs;
-
-	kms = _msm_drm_init_helper(priv, ddev, dev, pdev);
-	if (IS_ERR_OR_NULL(kms)) {
-		dev_err(dev, "msm_drm_init_helper failed\n");
-		goto fail;
-	}
-
-	ret = msm_drm_display_thread_create(param, priv, ddev, dev);
-	if (ret) {
-		dev_err(dev, "msm_drm_display_thread_create failed\n");
-		goto fail;
-	}
-
 	ret = drm_vblank_init(ddev, priv->num_crtcs);
 	if (ret < 0) {
 		dev_err(dev, "failed to initialize vblank\n");
-		goto fail;
+		goto err_msm_uninit;
 	}
 
 	if (kms) {
 		pm_runtime_get_sync(dev);
-		ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
+		ret = drm_irq_install(ddev, kms->irq);
 		pm_runtime_put_sync(dev);
 		if (ret < 0) {
 			dev_err(dev, "failed to install IRQ handler\n");
-			goto fail;
+			goto err_msm_uninit;
 		}
 	}
 
 	ret = drm_dev_register(ddev, 0);
 	if (ret)
-		goto fail;
-	priv->registered = true;
+		goto err_msm_uninit;
 
 	drm_mode_config_reset(ddev);
 
-	if (kms && kms->funcs && kms->funcs->cont_splash_config) {
-		ret = kms->funcs->cont_splash_config(kms);
-		if (ret) {
-			dev_err(dev, "kms cont_splash config failed.\n");
-			goto fail;
-		}
-	}
-
 #ifdef CONFIG_DRM_FBDEV_EMULATION
 	if (fbdev)
 		priv->fbdev = msm_fbdev_init(ddev);
@@ -837,58 +641,28 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 
 	ret = msm_debugfs_late_init(ddev);
 	if (ret)
-		goto fail;
-
-	priv->debug_root = debugfs_create_dir("debug",
-					ddev->primary->debugfs_root);
-	if (IS_ERR_OR_NULL(priv->debug_root)) {
-		pr_err("debugfs_root create_dir fail, error %ld\n",
-		       PTR_ERR(priv->debug_root));
-		priv->debug_root = NULL;
-		goto fail;
-	}
-
-	ret = sde_dbg_debugfs_register(priv->debug_root);
-	if (ret) {
-		dev_err(dev, "failed to reg sde dbg debugfs: %d\n", ret);
-		goto fail;
-	}
-
-	/* perform subdriver post initialization */
-	if (kms && kms->funcs && kms->funcs->postinit) {
-		ret = kms->funcs->postinit(kms);
-		if (ret) {
-			pr_err("kms post init failed: %d\n", ret);
-			goto fail;
-		}
-	}
+		goto err_msm_uninit;
 
 	drm_kms_helper_poll_init(ddev);
 
 	return 0;
 
-fail:
+err_msm_uninit:
 	msm_drm_uninit(dev);
 	return ret;
-bind_fail:
-	sde_dbg_destroy();
-dbg_init_fail:
-	sde_power_client_destroy(&priv->phandle, priv->pclient);
-power_client_fail:
-	sde_power_resource_deinit(pdev, &priv->phandle);
-power_init_fail:
-	msm_mdss_destroy(ddev);
-mdss_init_fail:
+err_destroy_mdss:
+	if (mdss && mdss->funcs)
+		mdss->funcs->destroy(ddev);
+err_free_priv:
 	kfree(priv);
-priv_alloc_fail:
-	drm_dev_put(ddev);
+err_unref_drm_dev:
+	drm_dev_unref(ddev);
 	return ret;
 }
 
 /*
  * DRM operations:
  */
-
 #ifdef CONFIG_QCOM_KGSL
 static void load_gpu(struct drm_device *dev)
 {
@@ -917,19 +691,9 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
 		return -ENOMEM;
 
 	msm_submitqueue_init(dev, ctx);
-	mutex_init(&ctx->power_lock);
 
 	file->driver_priv = ctx;
 
-	if (dev && dev->dev_private) {
-		struct msm_drm_private *priv = dev->dev_private;
-		struct msm_kms *kms;
-
-		kms = priv->kms;
-		if (kms && kms->funcs && kms->funcs->postopen)
-			kms->funcs->postopen(kms, file);
-	}
-
 	return 0;
 }
 
@@ -949,188 +713,19 @@ static void context_close(struct msm_file_private *ctx)
 	kfree(ctx);
 }
 
-static void msm_preclose(struct drm_device *dev, struct drm_file *file)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_kms *kms = priv->kms;
-
-	if (kms && kms->funcs && kms->funcs->preclose)
-		kms->funcs->preclose(kms, file);
-}
-
 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_file_private *ctx = file->driver_priv;
-	struct msm_kms *kms = priv->kms;
-
-	if (kms && kms->funcs && kms->funcs->postclose)
-		kms->funcs->postclose(kms, file);
 
 	mutex_lock(&dev->struct_mutex);
 	if (ctx == priv->lastctx)
 		priv->lastctx = NULL;
 	mutex_unlock(&dev->struct_mutex);
 
-	mutex_lock(&ctx->power_lock);
-	if (ctx->enable_refcnt) {
-		SDE_EVT32(ctx->enable_refcnt);
-		sde_power_resource_enable(&priv->phandle,
-				priv->pclient, false);
-	}
-	mutex_unlock(&ctx->power_lock);
-
 	context_close(ctx);
 }
 
-static int msm_disable_all_modes_commit(
-		struct drm_device *dev,
-		struct drm_atomic_state *state)
-{
-	struct drm_plane *plane;
-	struct drm_crtc *crtc;
-	unsigned int plane_mask;
-	int ret;
-
-	plane_mask = 0;
-	drm_for_each_plane(plane, dev) {
-		struct drm_plane_state *plane_state;
-
-		plane_state = drm_atomic_get_plane_state(state, plane);
-		if (IS_ERR(plane_state)) {
-			ret = PTR_ERR(plane_state);
-			goto fail;
-		}
-
-		plane_state->rotation = 0;
-
-		plane->old_fb = plane->fb;
-		plane_mask |= 1 << drm_plane_index(plane);
-
-		/* disable non-primary: */
-		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
-			continue;
-
-		DRM_DEBUG("disabling plane %d\n", plane->base.id);
-
-		ret = __drm_atomic_helper_disable_plane(plane, plane_state);
-		if (ret != 0)
-			DRM_ERROR("error %d disabling plane %d\n", ret,
-					plane->base.id);
-	}
-
-	drm_for_each_crtc(crtc, dev) {
-		struct drm_mode_set mode_set;
-
-		memset(&mode_set, 0, sizeof(struct drm_mode_set));
-		mode_set.crtc = crtc;
-
-		DRM_DEBUG("disabling crtc %d\n", crtc->base.id);
-
-		ret = __drm_atomic_helper_set_config(&mode_set, state);
-		if (ret != 0)
-			DRM_ERROR("error %d disabling crtc %d\n", ret,
-					crtc->base.id);
-	}
-
-	DRM_DEBUG("committing disables\n");
-	ret = drm_atomic_commit(state);
-
-fail:
-	DRM_DEBUG("disables result %d\n", ret);
-	return ret;
-}
-
-/**
- * msm_clear_all_modes - disables all planes and crtcs via an atomic commit
- *	based on restore_fbdev_mode_atomic in drm_fb_helper.c
- * @dev: device pointer
- * @Return: 0 on success, otherwise -error
- */
-static int msm_disable_all_modes(
-		struct drm_device *dev,
-		struct drm_modeset_acquire_ctx *ctx)
-{
-	struct drm_atomic_state *state;
-	int ret, i;
-
-	state = drm_atomic_state_alloc(dev);
-	if (!state)
-		return -ENOMEM;
-
-	state->acquire_ctx = ctx;
-
-	for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
-		ret = msm_disable_all_modes_commit(dev, state);
-		if (ret != -EDEADLK || ret != -ERESTARTSYS)
-			break;
-		drm_atomic_state_clear(state);
-		drm_modeset_backoff(ctx);
-	}
-
-	drm_atomic_state_put(state);
-
-	return ret;
-}
-
-static void msm_lastclose(struct drm_device *dev)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_kms *kms = priv->kms;
-	struct drm_modeset_acquire_ctx ctx;
-	int i, rc;
-
-	/* check for splash status before triggering cleanup
-	 * if we end up here with splash status ON i.e before first
-	 * commit then ignore the last close call
-	 */
-	if (kms && kms->funcs && kms->funcs->check_for_splash
-		&& kms->funcs->check_for_splash(kms))
-		return;
-
-	/*
-	 * clean up vblank disable immediately as this is the last close.
-	 */
-	for (i = 0; i < dev->num_crtcs; i++) {
-		struct drm_vblank_crtc *vblank = &dev->vblank[i];
-		struct timer_list *disable_timer = &vblank->disable_timer;
-
-		if (del_timer_sync(disable_timer))
-			disable_timer->function(disable_timer);
-	}
-
-	/* wait for pending vblank requests to be executed by worker thread */
-	flush_workqueue(priv->wq);
-
-	if (priv->fbdev) {
-		drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
-		return;
-	}
-
-	drm_modeset_acquire_init(&ctx, 0);
-retry:
-	rc = drm_modeset_lock_all_ctx(dev, &ctx);
-	if (rc)
-		goto fail;
-
-	rc = msm_disable_all_modes(dev, &ctx);
-	if (rc)
-		goto fail;
-
-	if (kms && kms->funcs && kms->funcs->lastclose)
-		kms->funcs->lastclose(kms, &ctx);
-
-fail:
-	if (rc == -EDEADLK) {
-		drm_modeset_backoff(&ctx);
-		goto retry;
-	} else if (rc) {
-		pr_err("last close failed: %d\n", rc);
-	}
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-}
-
 static irqreturn_t msm_irq(int irq, void *arg)
 {
 	struct drm_device *dev = arg;
@@ -1170,7 +765,7 @@ static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
 	struct msm_kms *kms = priv->kms;
 	if (!kms)
 		return -ENXIO;
-	DBG("dev=%pK, crtc=%u", dev, pipe);
+	DBG("dev=%p, crtc=%u", dev, pipe);
 	return vblank_ctrl_queue_work(priv, pipe, true);
 }
 
@@ -1180,7 +775,7 @@ static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
 	struct msm_kms *kms = priv->kms;
 	if (!kms)
 		return;
-	DBG("dev=%pK, crtc=%u", dev, pipe);
+	DBG("dev=%p, crtc=%u", dev, pipe);
 	vblank_ctrl_queue_work(priv, pipe, false);
 }
 
@@ -1377,328 +972,6 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
 	return ret;
 }
 
-static int msm_drm_object_supports_event(struct drm_device *dev,
-		struct drm_msm_event_req *req)
-{
-	int ret = -EINVAL;
-	struct drm_mode_object *arg_obj;
-
-	arg_obj = drm_mode_object_find(dev, NULL, req->object_id,
-				req->object_type);
-	if (!arg_obj)
-		return -ENOENT;
-
-	switch (arg_obj->type) {
-	case DRM_MODE_OBJECT_CRTC:
-	case DRM_MODE_OBJECT_CONNECTOR:
-		ret = 0;
-		break;
-	default:
-		ret = -EOPNOTSUPP;
-		break;
-	}
-
-	drm_mode_object_put(arg_obj);
-
-	return ret;
-}
-
-static int msm_register_event(struct drm_device *dev,
-	struct drm_msm_event_req *req, struct drm_file *file, bool en)
-{
-	int ret = -EINVAL;
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_kms *kms = priv->kms;
-	struct drm_mode_object *arg_obj;
-
-	arg_obj = drm_mode_object_find(dev, file, req->object_id,
-				req->object_type);
-	if (!arg_obj)
-		return -ENOENT;
-
-	ret = kms->funcs->register_events(kms, arg_obj, req->event, en);
-
-	drm_mode_object_put(arg_obj);
-
-	return ret;
-}
-
-static int msm_event_client_count(struct drm_device *dev,
-		struct drm_msm_event_req *req_event, bool locked)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	unsigned long flag = 0;
-	struct msm_drm_event *node;
-	int count = 0;
-
-	if (!locked)
-		spin_lock_irqsave(&dev->event_lock, flag);
-	list_for_each_entry(node, &priv->client_event_list, base.link) {
-		if (node->event.type == req_event->event &&
-			node->info.object_id == req_event->object_id)
-			count++;
-	}
-	if (!locked)
-		spin_unlock_irqrestore(&dev->event_lock, flag);
-
-	return count;
-}
-
-static int msm_ioctl_register_event(struct drm_device *dev, void *data,
-				    struct drm_file *file)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	struct drm_msm_event_req *req_event = data;
-	struct msm_drm_event *client, *node;
-	unsigned long flag = 0;
-	bool dup_request = false;
-	int ret = 0, count = 0;
-
-	ret = msm_drm_object_supports_event(dev, req_event);
-	if (ret) {
-		DRM_ERROR("unsupported event %x object %x object id %d\n",
-			req_event->event, req_event->object_type,
-			req_event->object_id);
-		return ret;
-	}
-
-	spin_lock_irqsave(&dev->event_lock, flag);
-	list_for_each_entry(node, &priv->client_event_list, base.link) {
-		if (node->base.file_priv != file)
-			continue;
-		if (node->event.type == req_event->event &&
-			node->info.object_id == req_event->object_id) {
-			DRM_DEBUG("duplicate request for event %x obj id %d\n",
-				node->event.type, node->info.object_id);
-			dup_request = true;
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&dev->event_lock, flag);
-
-	if (dup_request)
-		return -EALREADY;
-
-	client = kzalloc(sizeof(*client), GFP_KERNEL);
-	if (!client)
-		return -ENOMEM;
-
-	client->base.file_priv = file;
-	client->base.event = &client->event;
-	client->event.type = req_event->event;
-	memcpy(&client->info, req_event, sizeof(client->info));
-
-	/* Get the count of clients that have registered for event.
-	 * Event should be enabled for first client, for subsequent enable
-	 * calls add to client list and return.
-	 */
-	count = msm_event_client_count(dev, req_event, false);
-	/* Add current client to list */
-	spin_lock_irqsave(&dev->event_lock, flag);
-	list_add_tail(&client->base.link, &priv->client_event_list);
-	spin_unlock_irqrestore(&dev->event_lock, flag);
-
-	if (count)
-		return 0;
-
-	ret = msm_register_event(dev, req_event, file, true);
-	if (ret) {
-		DRM_ERROR("failed to enable event %x object %x object id %d\n",
-			req_event->event, req_event->object_type,
-			req_event->object_id);
-		spin_lock_irqsave(&dev->event_lock, flag);
-		list_del(&client->base.link);
-		spin_unlock_irqrestore(&dev->event_lock, flag);
-		kfree(client);
-	}
-	return ret;
-}
-
-static int msm_ioctl_deregister_event(struct drm_device *dev, void *data,
-				      struct drm_file *file)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	struct drm_msm_event_req *req_event = data;
-	struct msm_drm_event *client = NULL, *node, *temp;
-	unsigned long flag = 0;
-	int count = 0;
-	bool found = false;
-	int ret = 0;
-
-	ret = msm_drm_object_supports_event(dev, req_event);
-	if (ret) {
-		DRM_ERROR("unsupported event %x object %x object id %d\n",
-			req_event->event, req_event->object_type,
-			req_event->object_id);
-		return ret;
-	}
-
-	spin_lock_irqsave(&dev->event_lock, flag);
-	list_for_each_entry_safe(node, temp, &priv->client_event_list,
-			base.link) {
-		if (node->event.type == req_event->event &&
-		    node->info.object_id == req_event->object_id &&
-		    node->base.file_priv == file) {
-			client = node;
-			list_del(&client->base.link);
-			found = true;
-			kfree(client);
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&dev->event_lock, flag);
-
-	if (!found)
-		return -ENOENT;
-
-	count = msm_event_client_count(dev, req_event, false);
-	if (!count)
-		ret = msm_register_event(dev, req_event, file, false);
-
-	return ret;
-}
-
-void msm_mode_object_event_notify(struct drm_mode_object *obj,
-		struct drm_device *dev, struct drm_event *event, u8 *payload)
-{
-	struct msm_drm_private *priv = NULL;
-	unsigned long flags;
-	struct msm_drm_event *notify, *node;
-	int len = 0, ret;
-
-	if (!obj || !event || !event->length || !payload) {
-		DRM_ERROR("err param obj %pK event %pK len %d payload %pK\n",
-			obj, event, ((event) ? (event->length) : -1),
-			payload);
-		return;
-	}
-	priv = (dev) ? dev->dev_private : NULL;
-	if (!dev || !priv) {
-		DRM_ERROR("invalid dev %pK priv %pK\n", dev, priv);
-		return;
-	}
-
-	spin_lock_irqsave(&dev->event_lock, flags);
-	list_for_each_entry(node, &priv->client_event_list, base.link) {
-		if (node->event.type != event->type ||
-			obj->id != node->info.object_id)
-			continue;
-		len = event->length + sizeof(struct msm_drm_event);
-		if (node->base.file_priv->event_space < len) {
-			DRM_ERROR("Insufficient space %d for event %x len %d\n",
-				node->base.file_priv->event_space, event->type,
-				len);
-			continue;
-		}
-		notify = kzalloc(len, GFP_ATOMIC);
-		if (!notify)
-			continue;
-		notify->base.file_priv = node->base.file_priv;
-		notify->base.event = &notify->event;
-		notify->event.type = node->event.type;
-		notify->event.length = event->length +
-					sizeof(struct drm_msm_event_resp);
-		memcpy(&notify->info, &node->info, sizeof(notify->info));
-		memcpy(notify->data, payload, event->length);
-		ret = drm_event_reserve_init_locked(dev, node->base.file_priv,
-			&notify->base, &notify->event);
-		if (ret) {
-			kfree(notify);
-			continue;
-		}
-		drm_send_event_locked(dev, &notify->base);
-	}
-	spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-static int msm_release(struct inode *inode, struct file *filp)
-{
-	struct drm_file *file_priv = filp->private_data;
-	struct drm_minor *minor = file_priv->minor;
-	struct drm_device *dev = minor->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_drm_event *node, *temp, *tmp_node;
-	u32 count;
-	unsigned long flags;
-	LIST_HEAD(tmp_head);
-
-	spin_lock_irqsave(&dev->event_lock, flags);
-	list_for_each_entry_safe(node, temp, &priv->client_event_list,
-			base.link) {
-		if (node->base.file_priv != file_priv)
-			continue;
-		list_del(&node->base.link);
-		list_add_tail(&node->base.link, &tmp_head);
-	}
-	spin_unlock_irqrestore(&dev->event_lock, flags);
-
-	list_for_each_entry_safe(node, temp, &tmp_head,
-			base.link) {
-		list_del(&node->base.link);
-		count = msm_event_client_count(dev, &node->info, false);
-
-		list_for_each_entry(tmp_node, &tmp_head, base.link) {
-			if (tmp_node->event.type == node->info.event &&
-				tmp_node->info.object_id ==
-					node->info.object_id)
-				count++;
-		}
-		if (!count)
-			msm_register_event(dev, &node->info, file_priv, false);
-		kfree(node);
-	}
-
-	return drm_release(inode, filp);
-}
-
-/**
- * msm_ioctl_rmfb2 - remove an FB from the configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Remove the FB specified by the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int msm_ioctl_rmfb2(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv)
-{
-	struct drm_framebuffer *fb = NULL;
-	struct drm_framebuffer *fbl = NULL;
-	uint32_t *id = data;
-	int found = 0;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	fb = drm_framebuffer_lookup(dev, file_priv, *id);
-	if (!fb)
-		return -ENOENT;
-
-	/* drop extra ref from traversing drm_framebuffer_lookup */
-	drm_framebuffer_put(fb);
-
-	mutex_lock(&file_priv->fbs_lock);
-	list_for_each_entry(fbl, &file_priv->fbs, filp_head)
-		if (fb == fbl)
-			found = 1;
-	if (!found) {
-		mutex_unlock(&file_priv->fbs_lock);
-		return -ENOENT;
-	}
-
-	list_del_init(&fb->filp_head);
-	mutex_unlock(&file_priv->fbs_lock);
-
-	drm_framebuffer_put(fb);
-
-	return 0;
-}
-EXPORT_SYMBOL(msm_ioctl_rmfb2);
 
 static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
 		struct drm_file *file)
@@ -1721,62 +994,6 @@ static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
 	return msm_submitqueue_remove(file->driver_priv, id);
 }
 
-/**
- * msm_ioctl_power_ctrl - enable/disable power vote on MDSS Hw
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- */
-int msm_ioctl_power_ctrl(struct drm_device *dev, void *data,
-			struct drm_file *file_priv)
-{
-	struct msm_file_private *ctx = file_priv->driver_priv;
-	struct msm_drm_private *priv;
-	struct drm_msm_power_ctrl *power_ctrl = data;
-	bool vote_req = false;
-	int old_cnt;
-	int rc = 0;
-
-	if (unlikely(!power_ctrl)) {
-		DRM_ERROR("invalid ioctl data\n");
-		return -EINVAL;
-	}
-
-	priv = dev->dev_private;
-
-	mutex_lock(&ctx->power_lock);
-
-	old_cnt = ctx->enable_refcnt;
-	if (power_ctrl->enable) {
-		if (!ctx->enable_refcnt)
-			vote_req = true;
-		ctx->enable_refcnt++;
-	} else if (ctx->enable_refcnt) {
-		ctx->enable_refcnt--;
-		if (!ctx->enable_refcnt)
-			vote_req = true;
-	} else {
-		pr_err("ignoring, unbalanced disable\n");
-	}
-
-	if (vote_req) {
-		rc = sde_power_resource_enable(&priv->phandle,
-				priv->pclient, power_ctrl->enable);
-
-		if (rc)
-			ctx->enable_refcnt = old_cnt;
-	}
-
-	pr_debug("pid %d enable %d, refcnt %d, vote_req %d\n",
-			current->pid, power_ctrl->enable, ctx->enable_refcnt,
-			vote_req);
-	SDE_EVT32(current->pid, power_ctrl->enable, ctx->enable_refcnt,
-			vote_req);
-	mutex_unlock(&ctx->power_lock);
-	return rc;
-}
-
 static const struct drm_ioctl_desc msm_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
@@ -1788,14 +1005,6 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE,  msm_ioctl_gem_madvise,  DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW,   msm_ioctl_submitqueue_new,   DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
-	DRM_IOCTL_DEF_DRV(SDE_WB_CONFIG, sde_wb_config, DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(MSM_REGISTER_EVENT,  msm_ioctl_register_event,
-			  DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(MSM_DEREGISTER_EVENT,  msm_ioctl_deregister_event,
-			  DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(MSM_RMFB2, msm_ioctl_rmfb2, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(MSM_POWER_CTRL, msm_ioctl_power_ctrl,
-			DRM_RENDER_ALLOW),
 };
 
 static const struct vm_operations_struct vm_ops = {
@@ -1807,7 +1016,7 @@ static const struct vm_operations_struct vm_ops = {
 static const struct file_operations fops = {
 	.owner              = THIS_MODULE,
 	.open               = drm_open,
-	.release            = msm_release,
+	.release            = drm_release,
 	.unlocked_ioctl     = drm_ioctl,
 	.compat_ioctl       = drm_compat_ioctl,
 	.poll               = drm_poll,
@@ -1824,9 +1033,8 @@ static struct drm_driver msm_driver = {
 				DRIVER_ATOMIC |
 				DRIVER_MODESET,
 	.open               = msm_open,
-	.preclose           = msm_preclose,
-	.postclose          = msm_postclose,
-	.lastclose          = msm_lastclose,
+	.postclose           = msm_postclose,
+	.lastclose          = drm_fb_helper_lastclose,
 	.irq_handler        = msm_irq,
 	.irq_preinstall     = msm_irq_preinstall,
 	.irq_postinstall    = msm_irq_postinstall,
@@ -1840,7 +1048,7 @@ static struct drm_driver msm_driver = {
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_export   = drm_gem_prime_export,
-	.gem_prime_import   = msm_gem_prime_import,
+	.gem_prime_import   = drm_gem_prime_import,
 	.gem_prime_res_obj  = msm_gem_prime_res_obj,
 	.gem_prime_pin      = msm_gem_prime_pin,
 	.gem_prime_unpin    = msm_gem_prime_unpin,
@@ -1855,7 +1063,7 @@ static struct drm_driver msm_driver = {
 	.ioctls             = msm_ioctls,
 	.num_ioctls         = ARRAY_SIZE(msm_ioctls),
 	.fops               = &fops,
-	.name               = "msm_drm",
+	.name               = "msm",
 	.desc               = "MSM Snapdragon DRM",
 	.date               = "20130625",
 	.major              = MSM_VERSION_MAJOR,
@@ -1866,49 +1074,36 @@ static struct drm_driver msm_driver = {
 #ifdef CONFIG_PM_SLEEP
 static int msm_pm_suspend(struct device *dev)
 {
-	struct drm_device *ddev;
-	struct msm_drm_private *priv;
-	struct msm_kms *kms;
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct msm_drm_private *priv = ddev->dev_private;
+	struct msm_kms *kms = priv->kms;
 
-	if (!dev)
-		return -EINVAL;
-
-	ddev = dev_get_drvdata(dev);
-	if (!ddev || !ddev->dev_private)
-		return -EINVAL;
-
-	priv = ddev->dev_private;
-	kms = priv->kms;
-
+	/* TODO: Use atomic helper suspend/resume */
 	if (kms && kms->funcs && kms->funcs->pm_suspend)
 		return kms->funcs->pm_suspend(dev);
 
-	/* disable hot-plug polling */
 	drm_kms_helper_poll_disable(ddev);
 
+	priv->pm_state = drm_atomic_helper_suspend(ddev);
+	if (IS_ERR(priv->pm_state)) {
+		drm_kms_helper_poll_enable(ddev);
+		return PTR_ERR(priv->pm_state);
+	}
+
 	return 0;
 }
 
 static int msm_pm_resume(struct device *dev)
 {
-	struct drm_device *ddev;
-	struct msm_drm_private *priv;
-	struct msm_kms *kms;
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct msm_drm_private *priv = ddev->dev_private;
+	struct msm_kms *kms = priv->kms;
 
-	if (!dev)
-		return -EINVAL;
-
-	ddev = dev_get_drvdata(dev);
-	if (!ddev || !ddev->dev_private)
-		return -EINVAL;
-
-	priv = ddev->dev_private;
-	kms = priv->kms;
-
+	/* TODO: Use atomic helper suspend/resume */
 	if (kms && kms->funcs && kms->funcs->pm_resume)
 		return kms->funcs->pm_resume(dev);
 
-	/* enable hot-plug polling */
+	drm_atomic_helper_resume(ddev, priv->pm_state);
 	drm_kms_helper_poll_enable(ddev);
 
 	return 0;
@@ -1920,11 +1115,12 @@ static int msm_runtime_suspend(struct device *dev)
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct msm_drm_private *priv = ddev->dev_private;
+	struct msm_mdss *mdss = priv->mdss;
 
 	DBG("");
 
-	if (priv->mdss)
-		return msm_mdss_disable(priv->mdss);
+	if (mdss && mdss->funcs)
+		return mdss->funcs->disable(mdss);
 
 	return 0;
 }
@@ -1933,11 +1129,12 @@ static int msm_runtime_resume(struct device *dev)
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct msm_drm_private *priv = ddev->dev_private;
+	struct msm_mdss *mdss = priv->mdss;
 
 	DBG("");
 
-	if (priv->mdss)
-		return msm_mdss_enable(priv->mdss);
+	if (mdss && mdss->funcs)
+		return mdss->funcs->enable(mdss);
 
 	return 0;
 }
@@ -2026,38 +1223,23 @@ static int add_components_mdp(struct device *mdp_dev,
 
 static int compare_name_mdp(struct device *dev, void *data)
 {
-	return (strnstr(dev_name(dev), "mdp", strlen("mdp")) != NULL);
+	return (strnstr(dev_name(dev), "mdp") != NULL);
 }
 
 static int add_display_components(struct device *dev,
 				  struct component_match **matchptr)
 {
-	struct device *mdp_dev = NULL;
-	struct device_node *node;
+	struct device *mdp_dev;
 	int ret;
 
-	if (of_device_is_compatible(dev->of_node, "qcom,sde-kms")) {
-		struct device_node *np = dev->of_node;
-		unsigned int i;
-
-		for (i = 0; ; i++) {
-			node = of_parse_phandle(np, "connectors", i);
-			if (!node)
-				break;
-
-			component_match_add(dev, matchptr, compare_of, node);
-		}
-
-		return 0;
-	}
-
 	/*
-	 * MDP5 based devices don't have a flat hierarchy. There is a top level
-	 * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
-	 * children devices, find the MDP5 node, and then add the interfaces
-	 * to our components list.
+	 * MDP5/DPU based devices don't have a flat hierarchy. There is a top
+	 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
+	 * Populate the children devices, find the MDP5/DPU node, and then add
+	 * the interfaces to our components list.
 	 */
-	if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
+	if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
+	    of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
 		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
 		if (ret) {
 			dev_err(dev, "failed to populate children devices\n");
@@ -2074,8 +1256,8 @@ static int add_display_components(struct device *dev,
 		put_device(mdp_dev);
 
 		/* add the MDP component itself */
-		component_match_add(dev, matchptr, compare_of,
-				   mdp_dev->of_node);
+		drm_of_component_match_add(dev, matchptr, compare_of,
+					   mdp_dev->of_node);
 	} else {
 		/* MDP4 */
 		mdp_dev = dev;
@@ -2088,30 +1270,6 @@ static int add_display_components(struct device *dev,
 	return ret;
 }
 
-struct msm_gem_address_space *
-msm_gem_smmu_address_space_get(struct drm_device *dev,
-		unsigned int domain)
-{
-	struct msm_drm_private *priv = NULL;
-	struct msm_kms *kms;
-	const struct msm_kms_funcs *funcs;
-
-	if ((!dev) || (!dev->dev_private))
-		return NULL;
-
-	priv = dev->dev_private;
-	kms = priv->kms;
-	if (!kms)
-		return NULL;
-
-	funcs = kms->funcs;
-
-	if ((!funcs) || (!funcs->get_address_space))
-		return NULL;
-
-	return funcs->get_address_space(priv->kms, domain);
-}
-
 /*
  * We don't know what's the best binding to link the gpu with the drm device.
  * Fow now, we just hunt for all the possible gpus that we support, and add them
@@ -2126,9 +1284,9 @@ static const struct of_device_id msm_gpu_match[] = {
 
 #ifdef CONFIG_QCOM_KGSL
 static int add_gpu_components(struct device *dev,
-			      struct component_match **matchptr)
+					      struct component_match **matchptr)
 {
-	return 0;
+		return 0;
 }
 #else
 static int add_gpu_components(struct device *dev,
@@ -2169,8 +1327,8 @@ static const struct component_master_ops msm_drm_ops = {
 
 static int msm_pdev_probe(struct platform_device *pdev)
 {
-	int ret;
 	struct component_match *match = NULL;
+	int ret;
 
 	ret = add_display_components(&pdev->dev, &match);
 	if (ret)
@@ -2180,7 +1338,13 @@ static int msm_pdev_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
-	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	/* on all devices that I am aware of, iommu's which can map
+	 * any address the cpu can see are used:
+	 */
+	ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+	if (ret)
+		return ret;
+
 	return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
 }
 
@@ -2189,50 +1353,24 @@ static int msm_pdev_remove(struct platform_device *pdev)
 	component_master_del(&pdev->dev, &msm_drm_ops);
 	of_platform_depopulate(&pdev->dev);
 
-	msm_drm_unbind(&pdev->dev);
-	component_master_del(&pdev->dev, &msm_drm_ops);
 	return 0;
 }
 
-static void msm_pdev_shutdown(struct platform_device *pdev)
-{
-	struct drm_device *ddev = platform_get_drvdata(pdev);
-	struct msm_drm_private *priv = NULL;
-
-	if (!ddev) {
-		DRM_ERROR("invalid drm device node\n");
-		return;
-	}
-
-	priv = ddev->dev_private;
-	if (!priv) {
-		DRM_ERROR("invalid msm drm private node\n");
-		return;
-	}
-
-	msm_lastclose(ddev);
-
-	/* set this after lastclose to allow kickoff from lastclose */
-	priv->shutdown_in_progress = true;
-}
-
 static const struct of_device_id dt_match[] = {
 	{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
 	{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
-	{ .compatible = "qcom,sde-kms", .data = (void *)KMS_SDE },
-	{},
+	{ .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
+	{}
 };
 MODULE_DEVICE_TABLE(of, dt_match);
 
 static struct platform_driver msm_platform_driver = {
 	.probe      = msm_pdev_probe,
 	.remove     = msm_pdev_remove,
-	.shutdown   = msm_pdev_shutdown,
 	.driver     = {
-		.name   = "msm_drm",
+		.name   = "msm",
 		.of_match_table = dt_match,
 		.pm     = &msm_pm_ops,
-		.suppress_bind_attrs = true,
 	},
 };
 
@@ -2252,7 +1390,8 @@ static int __init msm_drm_register(void)
 		return -EINVAL;
 
 	DBG("init");
-	msm_smmu_driver_init();
+	msm_mdp_register();
+	msm_dpu_register();
 	msm_dsi_register();
 	msm_edp_register();
 	msm_hdmi_register();
@@ -2268,7 +1407,8 @@ static void __exit msm_drm_unregister(void)
 	adreno_unregister();
 	msm_edp_unregister();
 	msm_dsi_unregister();
-	msm_smmu_driver_cleanup();
+	msm_mdp_unregister();
+	msm_dpu_unregister();
 }
 
 module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b8de212..754a437 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -33,7 +33,6 @@
 #include <linux/types.h>
 #include <linux/of_graph.h>
 #include <linux/of_device.h>
-#include <linux/sde_io_util.h>
 #include <asm/sizes.h>
 #include <linux/kthread.h>
 
@@ -46,12 +45,6 @@
 #include <drm/msm_drm.h>
 #include <drm/drm_gem.h>
 
-#include "sde_power_handle.h"
-
-#define GET_MAJOR_REV(rev)		((rev) >> 28)
-#define GET_MINOR_REV(rev)		(((rev) >> 16) & 0xFFF)
-#define GET_STEP_REV(rev)		((rev) & 0xFFFF)
-
 struct msm_kms;
 struct msm_gpu;
 struct msm_mmu;
@@ -60,192 +53,51 @@ struct msm_rd_state;
 struct msm_perf_state;
 struct msm_gem_submit;
 struct msm_fence_context;
-struct msm_fence_cb;
 struct msm_gem_address_space;
 struct msm_gem_vma;
 
-#define NUM_DOMAINS    4    /* one for KMS, then one per gpu core (?) */
 #define MAX_CRTCS      8
 #define MAX_PLANES     20
 #define MAX_ENCODERS   8
 #define MAX_BRIDGES    8
 #define MAX_CONNECTORS 8
 
-#define TEARDOWN_DEADLOCK_RETRY_MAX 5
+#define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
 
 struct msm_file_private {
 	rwlock_t queuelock;
 	struct list_head submitqueues;
-
 	int queueid;
-
-	/* update the refcount when user driver calls power_ctrl IOCTL */
-	unsigned short enable_refcnt;
-
-	/* protects enable_refcnt */
-	struct mutex power_lock;
 };
 
 enum msm_mdp_plane_property {
-	/* blob properties, always put these first */
-	PLANE_PROP_CSC_V1,
-	PLANE_PROP_CSC_DMA_V1,
-	PLANE_PROP_INFO,
-	PLANE_PROP_SCALER_LUT_ED,
-	PLANE_PROP_SCALER_LUT_CIR,
-	PLANE_PROP_SCALER_LUT_SEP,
-	PLANE_PROP_SKIN_COLOR,
-	PLANE_PROP_SKY_COLOR,
-	PLANE_PROP_FOLIAGE_COLOR,
-	PLANE_PROP_VIG_GAMUT,
-	PLANE_PROP_VIG_IGC,
-	PLANE_PROP_DMA_IGC,
-	PLANE_PROP_DMA_GC,
-
-	/* # of blob properties */
-	PLANE_PROP_BLOBCOUNT,
-
-	/* range properties */
-	PLANE_PROP_ZPOS = PLANE_PROP_BLOBCOUNT,
+	PLANE_PROP_ZPOS,
 	PLANE_PROP_ALPHA,
-	PLANE_PROP_COLOR_FILL,
-	PLANE_PROP_H_DECIMATE,
-	PLANE_PROP_V_DECIMATE,
-	PLANE_PROP_INPUT_FENCE,
-	PLANE_PROP_HUE_ADJUST,
-	PLANE_PROP_SATURATION_ADJUST,
-	PLANE_PROP_VALUE_ADJUST,
-	PLANE_PROP_CONTRAST_ADJUST,
-	PLANE_PROP_EXCL_RECT_V1,
-	PLANE_PROP_PREFILL_SIZE,
-	PLANE_PROP_PREFILL_TIME,
-	PLANE_PROP_SCALER_V1,
-	PLANE_PROP_SCALER_V2,
-	PLANE_PROP_INVERSE_PMA,
-
-	/* enum/bitmask properties */
-	PLANE_PROP_BLEND_OP,
-	PLANE_PROP_SRC_CONFIG,
-	PLANE_PROP_FB_TRANSLATION_MODE,
-	PLANE_PROP_MULTIRECT_MODE,
-
-	/* total # of properties */
-	PLANE_PROP_COUNT
+	PLANE_PROP_PREMULTIPLIED,
+	PLANE_PROP_MAX_NUM
 };
 
-enum msm_mdp_crtc_property {
-	CRTC_PROP_INFO,
-	CRTC_PROP_DEST_SCALER_LUT_ED,
-	CRTC_PROP_DEST_SCALER_LUT_CIR,
-	CRTC_PROP_DEST_SCALER_LUT_SEP,
-
-	/* # of blob properties */
-	CRTC_PROP_BLOBCOUNT,
-
-	/* range properties */
-	CRTC_PROP_INPUT_FENCE_TIMEOUT = CRTC_PROP_BLOBCOUNT,
-	CRTC_PROP_OUTPUT_FENCE,
-	CRTC_PROP_OUTPUT_FENCE_OFFSET,
-	CRTC_PROP_DIM_LAYER_V1,
-	CRTC_PROP_CORE_CLK,
-	CRTC_PROP_CORE_AB,
-	CRTC_PROP_CORE_IB,
-	CRTC_PROP_LLCC_AB,
-	CRTC_PROP_LLCC_IB,
-	CRTC_PROP_DRAM_AB,
-	CRTC_PROP_DRAM_IB,
-	CRTC_PROP_ROT_PREFILL_BW,
-	CRTC_PROP_ROT_CLK,
-	CRTC_PROP_ROI_V1,
-	CRTC_PROP_SECURITY_LEVEL,
-	CRTC_PROP_IDLE_TIMEOUT,
-	CRTC_PROP_DEST_SCALER,
-	CRTC_PROP_CAPTURE_OUTPUT,
-
-	CRTC_PROP_IDLE_PC_STATE,
-
-	/* total # of properties */
-	CRTC_PROP_COUNT
-};
-
-enum msm_mdp_conn_property {
-	/* blob properties, always put these first */
-	CONNECTOR_PROP_SDE_INFO,
-	CONNECTOR_PROP_MODE_INFO,
-	CONNECTOR_PROP_HDR_INFO,
-	CONNECTOR_PROP_EXT_HDR_INFO,
-	CONNECTOR_PROP_PP_DITHER,
-	CONNECTOR_PROP_HDR_METADATA,
-
-	/* # of blob properties */
-	CONNECTOR_PROP_BLOBCOUNT,
-
-	/* range properties */
-	CONNECTOR_PROP_OUT_FB = CONNECTOR_PROP_BLOBCOUNT,
-	CONNECTOR_PROP_RETIRE_FENCE,
-	CONNECTOR_PROP_DST_X,
-	CONNECTOR_PROP_DST_Y,
-	CONNECTOR_PROP_DST_W,
-	CONNECTOR_PROP_DST_H,
-	CONNECTOR_PROP_ROI_V1,
-	CONNECTOR_PROP_BL_SCALE,
-	CONNECTOR_PROP_SV_BL_SCALE,
-
-	/* enum/bitmask properties */
-	CONNECTOR_PROP_TOPOLOGY_NAME,
-	CONNECTOR_PROP_TOPOLOGY_CONTROL,
-	CONNECTOR_PROP_AUTOREFRESH,
-	CONNECTOR_PROP_LP,
-	CONNECTOR_PROP_FB_TRANSLATION_MODE,
-	CONNECTOR_PROP_QSYNC_MODE,
-	CONNECTOR_PROP_CMD_FRAME_TRIGGER_MODE,
-
-	/* total # of properties */
-	CONNECTOR_PROP_COUNT
+struct msm_vblank_ctrl {
+	struct kthread_work work;
+	struct list_head event_list;
+	spinlock_t lock;
 };
 
 #define MSM_GPU_MAX_RINGS 4
 #define MAX_H_TILES_PER_DISPLAY 2
 
 /**
- * enum msm_display_compression_type - compression method used for pixel stream
- * @MSM_DISPLAY_COMPRESSION_NONE:     Pixel data is not compressed
- * @MSM_DISPLAY_COMPRESSION_DSC:      DSC compresison is used
- */
-enum msm_display_compression_type {
-	MSM_DISPLAY_COMPRESSION_NONE,
-	MSM_DISPLAY_COMPRESSION_DSC,
-};
-
-/**
- * enum msm_display_compression_ratio - compression ratio
- * @MSM_DISPLAY_COMPRESSION_NONE: no compression
- * @MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1: 2 to 1 compression
- * @MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1: 3 to 1 compression
- */
-enum msm_display_compression_ratio {
-	MSM_DISPLAY_COMPRESSION_RATIO_NONE,
-	MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1,
-	MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1,
-	MSM_DISPLAY_COMPRESSION_RATIO_MAX,
-};
-
-/**
  * enum msm_display_caps - features/capabilities supported by displays
  * @MSM_DISPLAY_CAP_VID_MODE:           Video or "active" mode supported
  * @MSM_DISPLAY_CAP_CMD_MODE:           Command mode supported
  * @MSM_DISPLAY_CAP_HOT_PLUG:           Hot plug detection supported
  * @MSM_DISPLAY_CAP_EDID:               EDID supported
- * @MSM_DISPLAY_ESD_ENABLED:            ESD feature enabled
- * @MSM_DISPLAY_CAP_MST_MODE:           Display with MST support
  */
 enum msm_display_caps {
 	MSM_DISPLAY_CAP_VID_MODE	= BIT(0),
 	MSM_DISPLAY_CAP_CMD_MODE	= BIT(1),
 	MSM_DISPLAY_CAP_HOT_PLUG	= BIT(2),
 	MSM_DISPLAY_CAP_EDID		= BIT(3),
-	MSM_DISPLAY_ESD_ENABLED		= BIT(4),
-	MSM_DISPLAY_CAP_MST_MODE	= BIT(5),
 };
 
 /**
@@ -253,169 +105,11 @@ enum msm_display_caps {
  * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
  * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
  * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
- * @MSM_ENC_ACTIVE_REGION - wait for the TG to be in active pixel region
  */
 enum msm_event_wait {
 	MSM_ENC_COMMIT_DONE = 0,
 	MSM_ENC_TX_COMPLETE,
 	MSM_ENC_VBLANK,
-	MSM_ENC_ACTIVE_REGION,
-};
-
-/**
- * struct msm_roi_alignment - region of interest alignment restrictions
- * @xstart_pix_align: left x offset alignment restriction
- * @width_pix_align: width alignment restriction
- * @ystart_pix_align: top y offset alignment restriction
- * @height_pix_align: height alignment restriction
- * @min_width: minimum width restriction
- * @min_height: minimum height restriction
- */
-struct msm_roi_alignment {
-	uint32_t xstart_pix_align;
-	uint32_t width_pix_align;
-	uint32_t ystart_pix_align;
-	uint32_t height_pix_align;
-	uint32_t min_width;
-	uint32_t min_height;
-};
-
-/**
- * struct msm_roi_caps - display's region of interest capabilities
- * @enabled: true if some region of interest is supported
- * @merge_rois: merge rois before sending to display
- * @num_roi: maximum number of rois supported
- * @align: roi alignment restrictions
- */
-struct msm_roi_caps {
-	bool enabled;
-	bool merge_rois;
-	uint32_t num_roi;
-	struct msm_roi_alignment align;
-};
-
-/**
- * struct msm_display_dsc_info - defines dsc configuration
- * @version:                 DSC version.
- * @scr_rev:                 DSC revision.
- * @pic_height:              Picture height in pixels.
- * @pic_width:               Picture width in pixels.
- * @initial_lines:           Number of initial lines stored in encoder.
- * @pkt_per_line:            Number of packets per line.
- * @bytes_in_slice:          Number of bytes in slice.
- * @eol_byte_num:            Valid bytes at the end of line.
- * @pclk_per_line:           Compressed width.
- * @full_frame_slices:       Number of slice per interface.
- * @slice_height:            Slice height in pixels.
- * @slice_width:             Slice width in pixels.
- * @chunk_size:              Chunk size in bytes for slice multiplexing.
- * @slice_last_group_size:   Size of last group in pixels.
- * @bpp:                     Target bits per pixel.
- * @bpc:                     Number of bits per component.
- * @line_buf_depth:          Line buffer bit depth.
- * @block_pred_enable:       Block prediction enabled/disabled.
- * @vbr_enable:              VBR mode.
- * @enable_422:              Indicates if input uses 4:2:2 sampling.
- * @convert_rgb:             DSC color space conversion.
- * @input_10_bits:           10 bit per component input.
- * @slice_per_pkt:           Number of slices per packet.
- * @initial_dec_delay:       Initial decoding delay.
- * @initial_xmit_delay:      Initial transmission delay.
- * @initial_scale_value:     Scale factor value at the beginning of a slice.
- * @scale_decrement_interval: Scale set up at the beginning of a slice.
- * @scale_increment_interval: Scale set up at the end of a slice.
- * @first_line_bpg_offset:   Extra bits allocated on the first line of a slice.
- * @nfl_bpg_offset:          Slice specific settings.
- * @slice_bpg_offset:        Slice specific settings.
- * @initial_offset:          Initial offset at the start of a slice.
- * @final_offset:            Maximum end-of-slice value.
- * @rc_model_size:           Number of bits in RC model.
- * @det_thresh_flatness:     Flatness threshold.
- * @max_qp_flatness:         Maximum QP for flatness adjustment.
- * @min_qp_flatness:         Minimum QP for flatness adjustment.
- * @edge_factor:             Ratio to detect presence of edge.
- * @quant_incr_limit0:       QP threshold.
- * @quant_incr_limit1:       QP threshold.
- * @tgt_offset_hi:           Upper end of variability range.
- * @tgt_offset_lo:           Lower end of variability range.
- * @buf_thresh:              Thresholds in RC model
- * @range_min_qp:            Min QP allowed.
- * @range_max_qp:            Max QP allowed.
- * @range_bpg_offset:        Bits per group adjustment.
- * @extra_width:             Extra width required in timing calculations.
- */
-struct msm_display_dsc_info {
-	u8 version;
-	u8 scr_rev;
-
-	int pic_height;
-	int pic_width;
-	int slice_height;
-	int slice_width;
-
-	int initial_lines;
-	int pkt_per_line;
-	int bytes_in_slice;
-	int bytes_per_pkt;
-	int eol_byte_num;
-	int pclk_per_line;
-	int full_frame_slices;
-	int slice_last_group_size;
-	int bpp;
-	int bpc;
-	int line_buf_depth;
-
-	int slice_per_pkt;
-	int chunk_size;
-	bool block_pred_enable;
-	int vbr_enable;
-	int enable_422;
-	int convert_rgb;
-	int input_10_bits;
-
-	int initial_dec_delay;
-	int initial_xmit_delay;
-	int initial_scale_value;
-	int scale_decrement_interval;
-	int scale_increment_interval;
-	int first_line_bpg_offset;
-	int nfl_bpg_offset;
-	int slice_bpg_offset;
-	int initial_offset;
-	int final_offset;
-
-	int rc_model_size;
-	int det_thresh_flatness;
-	int max_qp_flatness;
-	int min_qp_flatness;
-	int edge_factor;
-	int quant_incr_limit0;
-	int quant_incr_limit1;
-	int tgt_offset_hi;
-	int tgt_offset_lo;
-
-	u32 *buf_thresh;
-	char *range_min_qp;
-	char *range_max_qp;
-	char *range_bpg_offset;
-
-	u32 extra_width;
-};
-
-/**
- * struct msm_compression_info - defined panel compression
- * @comp_type:        type of compression supported
- * @comp_ratio:       compression ratio
- * @dsc_info:         dsc configuration if the compression
- *                    supported is DSC
- */
-struct msm_compression_info {
-	enum msm_display_compression_type comp_type;
-	enum msm_display_compression_ratio comp_ratio;
-
-	union{
-		struct msm_display_dsc_info dsc_info;
-	};
 };
 
 /**
@@ -431,118 +125,21 @@ struct msm_display_topology {
 };
 
 /**
- * struct msm_mode_info - defines all msm custom mode info
- * @frame_rate:      frame_rate of the mode
- * @vtotal:          vtotal calculated for the mode
- * @prefill_lines:   prefill lines based on porches.
- * @jitter_numer:	display panel jitter numerator configuration
- * @jitter_denom:	display panel jitter denominator configuration
- * @clk_rate:	     DSI bit clock per lane in HZ.
- * @topology:        supported topology for the mode
- * @comp_info:       compression info supported
- * @roi_caps:        panel roi capabilities
- * @wide_bus_en:	wide-bus mode cfg for interface module
- * @mdp_transfer_time_us   Specifies the mdp transfer time for command mode
- *                         panels in microseconds.
- */
-struct msm_mode_info {
-	uint32_t frame_rate;
-	uint32_t vtotal;
-	uint32_t prefill_lines;
-	uint32_t jitter_numer;
-	uint32_t jitter_denom;
-	uint64_t clk_rate;
-	struct msm_display_topology topology;
-	struct msm_compression_info comp_info;
-	struct msm_roi_caps roi_caps;
-	bool wide_bus_en;
-	u32 mdp_transfer_time_us;
-};
-
-/**
  * struct msm_display_info - defines display properties
  * @intf_type:          DRM_MODE_CONNECTOR_ display type
  * @capabilities:       Bitmask of display flags
  * @num_of_h_tiles:     Number of horizontal tiles in case of split interface
  * @h_tile_instance:    Controller instance used per tile. Number of elements is
  *                      based on num_of_h_tiles
- * @is_connected:       Set to true if display is connected
- * @width_mm:           Physical width
- * @height_mm:          Physical height
- * @max_width:          Max width of display. In case of hot pluggable display
- *                      this is max width supported by controller
- * @max_height:         Max height of display. In case of hot pluggable display
- *                      this is max height supported by controller
- * @clk_rate:           DSI bit clock per lane in HZ.
- * @is_primary:         Set to true if display is primary display
  * @is_te_using_watchdog_timer:  Boolean to indicate watchdog TE is
  *				 used instead of panel TE in cmd mode panels
- * @roi_caps:           Region of interest capability info
- * @qsync_min_fps	Minimum fps supported by Qsync feature
- * @te_source		vsync source pin information
  */
 struct msm_display_info {
 	int intf_type;
 	uint32_t capabilities;
-
 	uint32_t num_of_h_tiles;
 	uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
-
-	bool is_connected;
-
-	unsigned int width_mm;
-	unsigned int height_mm;
-
-	uint32_t max_width;
-	uint32_t max_height;
-	uint64_t clk_rate;
-
-	bool is_primary;
 	bool is_te_using_watchdog_timer;
-	struct msm_roi_caps roi_caps;
-
-	uint32_t qsync_min_fps;
-	uint32_t te_source;
-};
-
-#define MSM_MAX_ROI	4
-
-/**
- * struct msm_roi_list - list of regions of interest for a drm object
- * @num_rects: number of valid rectangles in the roi array
- * @roi: list of roi rectangles
- */
-struct msm_roi_list {
-	uint32_t num_rects;
-	struct drm_clip_rect roi[MSM_MAX_ROI];
-};
-
-/**
- * struct - msm_display_kickoff_params - info for display features at kickoff
- * @rois: Regions of interest structure for mapping CRTC to Connector output
- * @qsync_mode: Qsync mode, where 0: disabled 1: continuous mode
- * @qsync_update: Qsync settings were changed/updated
- */
-struct msm_display_kickoff_params {
-	struct msm_roi_list *rois;
-	struct drm_msm_ext_hdr_metadata *hdr_meta;
-	uint32_t qsync_mode;
-	bool qsync_update;
-};
-
-/**
- * struct msm_drm_event - defines custom event notification struct
- * @base: base object required for event notification by DRM framework.
- * @event: event object required for event notification by DRM framework.
- * @info: contains information of DRM object for which events has been
- *        requested.
- * @data: memory location which contains response payload for event.
- */
-struct msm_drm_event {
-	struct drm_pending_event base;
-	struct drm_event event;
-	struct drm_msm_event_req info;
-	u8 data[];
 };
 
 /* Commit/Event thread specific structure */
@@ -559,13 +156,10 @@ struct msm_drm_private {
 
 	struct msm_kms *kms;
 
-	struct sde_power_handle phandle;
-	struct sde_power_client *pclient;
-
 	/* subordinate devices, if present: */
 	struct platform_device *gpu_pdev;
 
-	/* top level MDSS wrapper device (for MDP5 only) */
+	/* top level MDSS wrapper device (for MDP5/DPU only) */
 	struct msm_mdss *mdss;
 
 	/* possibly this should be in the kms component, but it is
@@ -597,10 +191,6 @@ struct msm_drm_private {
 
 	struct workqueue_struct *wq;
 
-	/* crtcs pending async atomic updates: */
-	uint32_t pending_crtcs;
-	wait_queue_head_t pending_crtcs_event;
-
 	unsigned int num_planes;
 	struct drm_plane *planes[MAX_PLANES];
 
@@ -610,9 +200,6 @@ struct msm_drm_private {
 	struct msm_drm_thread disp_thread[MAX_CRTCS];
 	struct msm_drm_thread event_thread[MAX_CRTCS];
 
-	struct task_struct *pp_event_thread;
-	struct kthread_worker pp_event_worker;
-
 	unsigned int num_encoders;
 	struct drm_encoder *encoders[MAX_ENCODERS];
 
@@ -623,12 +210,7 @@ struct msm_drm_private {
 	struct drm_connector *connectors[MAX_CONNECTORS];
 
 	/* Properties */
-	struct drm_property *plane_property[PLANE_PROP_COUNT];
-	struct drm_property *crtc_property[CRTC_PROP_COUNT];
-	struct drm_property *conn_property[CONNECTOR_PROP_COUNT];
-
-	/* Color processing properties for the crtc */
-	struct drm_property **cp_property;
+	struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
 
 	/* VRAM carveout, used when no IOMMU: */
 	struct {
@@ -644,31 +226,10 @@ struct msm_drm_private {
 	struct notifier_block vmap_notifier;
 	struct shrinker shrinker;
 
+	struct msm_vblank_ctrl vblank_ctrl;
 	struct drm_atomic_state *pm_state;
-
-	/* task holding struct_mutex.. currently only used in submit path
-	 * to detect and reject faults from copy_from_user() for submit
-	 * ioctl.
-	 */
-	struct task_struct *struct_mutex_task;
-
-	/* list of clients waiting for events */
-	struct list_head client_event_list;
-
-	/* whether registered and drm_dev_unregister should be called */
-	bool registered;
-
-	/* msm drv debug root node */
-	struct dentry *debug_root;
-
-	/* update the flag when msm driver receives shutdown notification */
-	bool shutdown_in_progress;
 };
 
-/* get struct msm_kms * from drm_device * */
-#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
-		((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
-
 struct msm_format {
 	uint32_t pixel_format;
 };
@@ -676,35 +237,14 @@ struct msm_format {
 int msm_atomic_prepare_fb(struct drm_plane *plane,
 			  struct drm_plane_state *new_state);
 void msm_atomic_commit_tail(struct drm_atomic_state *state);
-int msm_atomic_commit(struct drm_device *dev,
-	struct drm_atomic_state *state, bool nonblock);
-
-/* callback from wq once fence has passed: */
-struct msm_fence_cb {
-	struct work_struct work;
-	uint32_t fence;
-	void (*func)(struct msm_fence_cb *cb);
-};
-
-void __msm_fence_worker(struct work_struct *work);
-
-#define INIT_FENCE_CB(_cb, _func)  do {                     \
-		INIT_WORK(&(_cb)->work, __msm_fence_worker); \
-		(_cb)->func = _func;                         \
-	} while (0)
-
 struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
 void msm_atomic_state_clear(struct drm_atomic_state *state);
 void msm_atomic_state_free(struct drm_atomic_state *state);
 
 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt,
-		unsigned int flags);
+		struct msm_gem_vma *vma, struct sg_table *sgt);
 int msm_gem_map_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt, int npages,
-		unsigned int flags);
-
-struct device *msm_gem_get_aspace_device(struct msm_gem_address_space *aspace);
+		struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
 
 void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
 
@@ -712,61 +252,9 @@ struct msm_gem_address_space *
 msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
 		const char *name);
 
-/* For SDE  display */
-struct msm_gem_address_space *
-msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
-		const char *name);
-
-/**
- * msm_gem_add_obj_to_aspace_active_list: adds obj to active obj list in aspace
- */
-void msm_gem_add_obj_to_aspace_active_list(
-		struct msm_gem_address_space *aspace,
-		struct drm_gem_object *obj);
-
-/**
- * msm_gem_remove_obj_from_aspace_active_list: removes obj from  active obj
- * list in aspace
- */
-void msm_gem_remove_obj_from_aspace_active_list(
-		struct msm_gem_address_space *aspace,
-		struct drm_gem_object *obj);
-
-/**
- * msm_gem_smmu_address_space_get: returns the aspace pointer for the requested
- * domain
- */
-struct msm_gem_address_space *
-msm_gem_smmu_address_space_get(struct drm_device *dev,
-		unsigned int domain);
 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 
-/**
- * msm_gem_aspace_domain_attach_detach: function to inform the attach/detach
- * of the domain for this aspace
- */
-void msm_gem_aspace_domain_attach_detach_update(
-		struct msm_gem_address_space *aspace,
-		bool is_detach);
-
-/**
- * msm_gem_address_space_register_cb: function to register callback for attach
- * and detach of the domain
- */
-int msm_gem_address_space_register_cb(
-		struct msm_gem_address_space *aspace,
-		void (*cb)(void *, bool),
-		void *cb_data);
-
-/**
- * msm_gem_address_space_register_cb: function to unregister callback
- */
-int msm_gem_address_space_unregister_cb(
-		struct msm_gem_address_space *aspace,
-		void (*cb)(void *, bool),
-		void *cb_data);
-
 void msm_gem_submit_free(struct msm_gem_submit *submit);
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		struct drm_file *file);
@@ -774,7 +262,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 void msm_gem_shrinker_init(struct drm_device *dev);
 void msm_gem_shrinker_cleanup(struct drm_device *dev);
 
-void msm_gem_sync(struct drm_gem_object *obj);
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
 			struct vm_area_struct *vma);
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -788,7 +275,6 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj);
 void msm_gem_put_pages(struct drm_gem_object *obj);
 void msm_gem_put_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace);
-dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj);
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 		struct drm_mode_create_dumb *args);
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -802,8 +288,6 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
 		struct dma_buf_attachment *attach, struct sg_table *sg);
 int msm_gem_prime_pin(struct drm_gem_object *obj);
 void msm_gem_prime_unpin(struct drm_gem_object *obj);
-struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
-					    struct dma_buf *dma_buf);
 void *msm_gem_get_vaddr(struct drm_gem_object *obj);
 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
 void msm_gem_put_vaddr(struct drm_gem_object *obj);
@@ -830,22 +314,15 @@ void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
 		struct drm_gem_object **bo, uint64_t *iova);
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 		struct dma_buf *dmabuf, struct sg_table *sgt);
-int msm_gem_delayed_import(struct drm_gem_object *obj);
 
-void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable);
-void msm_framebuffer_set_keepattrs(struct drm_framebuffer *fb, bool enable);
 int msm_framebuffer_prepare(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace);
 void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace);
 uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace, int plane);
-uint32_t msm_framebuffer_phys(struct drm_framebuffer *fb, int plane);
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
-struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
-		const struct drm_mode_fb_cmd2 *mode_cmd,
-		struct drm_gem_object **bos);
 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
 		struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
 struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
@@ -855,52 +332,18 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
 void msm_fbdev_free(struct drm_device *dev);
 
 struct hdmi;
-#ifdef CONFIG_DRM_MSM_HDMI
 int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
 		struct drm_encoder *encoder);
 void __init msm_hdmi_register(void);
 void __exit msm_hdmi_unregister(void);
-#else
-static inline void __init msm_hdmi_register(void)
-{
-}
-static inline void __exit msm_hdmi_unregister(void)
-{
-}
-#endif
 
 struct msm_edp;
-#ifdef CONFIG_DRM_MSM_EDP
 void __init msm_edp_register(void);
 void __exit msm_edp_unregister(void);
 int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
 		struct drm_encoder *encoder);
-#else
-static inline void __init msm_edp_register(void)
-{
-}
-static inline void __exit msm_edp_unregister(void)
-{
-}
-
-static inline int msm_edp_modeset_init(struct msm_edp *edp,
-		struct drm_device *dev, struct drm_encoder *encoder)
-{
-	return -EINVAL;
-}
-#endif
 
 struct msm_dsi;
-
-/* *
- * msm_mode_object_event_notify - notify user-space clients of drm object
- *                                events.
- * @obj: mode object (crtc/connector) that is generating the event.
- * @event: event that needs to be notified.
- * @payload: payload for the event.
- */
-void msm_mode_object_event_notify(struct drm_mode_object *obj,
-		struct drm_device *dev, struct drm_event *event, u8 *payload);
 #ifdef CONFIG_DRM_MSM_DSI
 void __init msm_dsi_register(void);
 void __exit msm_dsi_unregister(void);
@@ -921,17 +364,10 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
 }
 #endif
 
-#ifdef CONFIG_DRM_MSM_MDP5
 void __init msm_mdp_register(void);
 void __exit msm_mdp_unregister(void);
-#else
-static inline void __init msm_mdp_register(void)
-{
-}
-static inline void __exit msm_mdp_unregister(void)
-{
-}
-#endif
+void __init msm_dpu_register(void);
+void __exit msm_dpu_unregister(void);
 
 #ifdef CONFIG_DEBUG_FS
 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
@@ -959,8 +395,6 @@ struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
 	const char *name);
 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 		const char *dbgname);
-unsigned long msm_iomap_size(struct platform_device *pdev, const char *name);
-void msm_iounmap(struct platform_device *dev, void __iomem *addr);
 void msm_writel(u32 data, void __iomem *addr);
 u32 msm_readl(const void __iomem *addr);
 
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index e948b03..17358db 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -1,5 +1,4 @@
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -16,8 +15,6 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/dma-mapping.h>
-#include <linux/dma-buf.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
@@ -26,17 +23,16 @@
 #include "msm_kms.h"
 #include "msm_gem.h"
 
-#define MSM_FRAMEBUFFER_FLAG_KMAP	BIT(0)
-
 struct msm_framebuffer {
 	struct drm_framebuffer base;
 	const struct msm_format *format;
-	void *vaddr[MAX_PLANE];
-	atomic_t kmap_count;
-	u32 flags;
 };
 #define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
 
+static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+		const struct drm_mode_fb_cmd2 *mode_cmd,
+		struct drm_gem_object **bos);
+
 static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
 	.create_handle = drm_gem_fb_create_handle,
 	.destroy = drm_gem_fb_destroy,
@@ -45,16 +41,8 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
 #ifdef CONFIG_DEBUG_FS
 void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
-	struct msm_framebuffer *msm_fb;
-	int i, n;
+	int i, n = fb->format->num_planes;
 
-	if (!fb) {
-		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
-		return;
-	}
-
-	msm_fb = to_msm_framebuffer(fb);
-	n = fb->format->num_planes;
 	seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
 			fb->width, fb->height, (char *)&fb->format->format,
 			drm_framebuffer_read_refcount(fb), fb->base.id);
@@ -67,113 +55,6 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 }
 #endif
 
-void msm_framebuffer_set_keepattrs(struct drm_framebuffer *fb, bool enable)
-{
-	struct msm_framebuffer *msm_fb;
-	int i, n;
-	struct drm_gem_object *bo;
-	struct msm_gem_object *msm_obj;
-
-	if (!fb) {
-		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
-		return;
-	}
-
-	if (!fb->format) {
-		DRM_ERROR("from:%pS null fb->format\n",
-				__builtin_return_address(0));
-		return;
-	}
-
-	msm_fb = to_msm_framebuffer(fb);
-	n = fb->format->num_planes;
-	for (i = 0; i < n; i++) {
-		bo = msm_framebuffer_bo(fb, i);
-		if (bo) {
-			msm_obj = to_msm_bo(bo);
-			if (enable)
-				msm_obj->flags |= MSM_BO_KEEPATTRS;
-			else
-				msm_obj->flags &= ~MSM_BO_KEEPATTRS;
-		}
-	}
-}
-
-void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable)
-{
-	struct msm_framebuffer *msm_fb;
-
-	if (!fb) {
-		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
-		return;
-	}
-
-	msm_fb = to_msm_framebuffer(fb);
-	if (enable)
-		msm_fb->flags |= MSM_FRAMEBUFFER_FLAG_KMAP;
-	else
-		msm_fb->flags &= ~MSM_FRAMEBUFFER_FLAG_KMAP;
-}
-
-static int msm_framebuffer_kmap(struct drm_framebuffer *fb)
-{
-	struct msm_framebuffer *msm_fb;
-	int i, n;
-	struct drm_gem_object *bo;
-
-	if (!fb) {
-		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
-		return -EINVAL;
-	}
-
-	msm_fb = to_msm_framebuffer(fb);
-	n = fb->format->num_planes;
-	if (atomic_inc_return(&msm_fb->kmap_count) > 1)
-		return 0;
-
-	for (i = 0; i < n; i++) {
-		bo = msm_framebuffer_bo(fb, i);
-		if (!bo || !bo->dma_buf) {
-			msm_fb->vaddr[i] = NULL;
-			continue;
-		}
-		dma_buf_begin_cpu_access(bo->dma_buf, DMA_BIDIRECTIONAL);
-		msm_fb->vaddr[i] = dma_buf_kmap(bo->dma_buf, 0);
-		DRM_INFO("FB[%u]: vaddr[%d]:%ux%u:0x%llx\n", fb->base.id, i,
-				fb->width, fb->height, (u64) msm_fb->vaddr[i]);
-	}
-
-	return 0;
-}
-
-static void msm_framebuffer_kunmap(struct drm_framebuffer *fb)
-{
-	struct msm_framebuffer *msm_fb;
-	int i, n;
-	struct drm_gem_object *bo;
-
-	if (!fb) {
-		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
-		return;
-	}
-
-	msm_fb = to_msm_framebuffer(fb);
-	n = fb->format->num_planes;
-	if (atomic_dec_return(&msm_fb->kmap_count) > 0)
-		return;
-
-	for (i = 0; i < n; i++) {
-		bo = msm_framebuffer_bo(fb, i);
-		if (!bo || !msm_fb->vaddr[i])
-			continue;
-		if (bo->dma_buf) {
-			dma_buf_kunmap(bo->dma_buf, 0, msm_fb->vaddr[i]);
-			dma_buf_end_cpu_access(bo->dma_buf, DMA_BIDIRECTIONAL);
-		}
-		msm_fb->vaddr[i] = NULL;
-	}
-}
-
 /* prepare/pin all the fb's bo's for scanout.  Note that it is not valid
  * to prepare an fb more multiple different initiator 'id's.  But that
  * should be fine, since only the scanout (mdpN) side of things needs
@@ -182,17 +63,9 @@ static void msm_framebuffer_kunmap(struct drm_framebuffer *fb)
 int msm_framebuffer_prepare(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace)
 {
-	struct msm_framebuffer *msm_fb;
-	int ret, i, n;
+	int ret, i, n = fb->format->num_planes;
 	uint64_t iova;
 
-	if (!fb) {
-		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
-		return -EINVAL;
-	}
-
-	msm_fb = to_msm_framebuffer(fb);
-	n = fb->format->num_planes;
 	for (i = 0; i < n; i++) {
 		ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
 		DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
@@ -200,28 +73,13 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
 			return ret;
 	}
 
-	if (msm_fb->flags & MSM_FRAMEBUFFER_FLAG_KMAP)
-		msm_framebuffer_kmap(fb);
-
 	return 0;
 }
 
 void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace)
 {
-	struct msm_framebuffer *msm_fb;
-	int i, n;
-
-	if (fb == NULL) {
-		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
-		return;
-	}
-
-	msm_fb = to_msm_framebuffer(fb);
-	n = fb->format->num_planes;
-
-	if (msm_fb->flags & MSM_FRAMEBUFFER_FLAG_KMAP)
-		msm_framebuffer_kunmap(fb);
+	int i, n = fb->format->num_planes;
 
 	for (i = 0; i < n; i++)
 		msm_gem_put_iova(fb->obj[i], aspace);
@@ -230,54 +88,21 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
 uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace, int plane)
 {
-
-	if (!fb) {
-		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
-		return -EINVAL;
-	}
-
 	if (!fb->obj[plane])
 		return 0;
-
 	return msm_gem_iova(fb->obj[plane], aspace) + fb->offsets[plane];
 }
 
-uint32_t msm_framebuffer_phys(struct drm_framebuffer *fb,
-		int plane)
-{
-	struct msm_framebuffer *msm_fb;
-	dma_addr_t phys_addr;
-
-	if (!fb) {
-		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
-		return -EINVAL;
-	}
-
-	msm_fb = to_msm_framebuffer(fb);
-
-	if (!msm_fb->base.obj[plane])
-		return 0;
-
-	phys_addr = msm_gem_get_dma_addr(msm_fb->base.obj[plane]);
-	if (!phys_addr)
-		return 0;
-
-	return phys_addr + fb->offsets[plane];
-}
-
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
 {
-	if (!fb) {
-		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
-		return ERR_PTR(-EINVAL);
-	}
-
 	return drm_gem_fb_get_obj(fb, plane);
 }
 
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
 {
-	return fb ? (to_msm_framebuffer(fb))->format : NULL;
+	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+
+	return msm_fb->format;
 }
 
 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
@@ -309,23 +134,23 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
 	return ERR_PTR(ret);
 }
 
-struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
-		const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
+static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+		const struct drm_mode_fb_cmd2 *mode_cmd,
+		struct drm_gem_object **bos)
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	struct msm_framebuffer *msm_fb = NULL;
 	struct drm_framebuffer *fb;
 	const struct msm_format *format;
-	int ret, i, num_planes;
+	int ret, i, n;
 	unsigned int hsub, vsub;
-	bool is_modified = false;
 
-	DBG("create framebuffer: dev=%pK, mode_cmd=%pK (%dx%d@%4.4s)",
+	DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
 			dev, mode_cmd, mode_cmd->width, mode_cmd->height,
 			(char *)&mode_cmd->pixel_format);
 
-	num_planes = drm_format_num_planes(mode_cmd->pixel_format);
+	n = drm_format_num_planes(mode_cmd->pixel_format);
 	hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
 	vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
 
@@ -347,55 +172,28 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 	fb = &msm_fb->base;
 
 	msm_fb->format = format;
-	atomic_set(&msm_fb->kmap_count, 0);
 
-	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
-		for (i = 0; i < ARRAY_SIZE(mode_cmd->modifier); i++) {
-			if (mode_cmd->modifier[i]) {
-				is_modified = true;
-				break;
-			}
-		}
-	}
-
-	if (num_planes > ARRAY_SIZE(fb->obj)) {
+	if (n > ARRAY_SIZE(fb->obj)) {
 		ret = -EINVAL;
 		goto fail;
 	}
 
-	if (is_modified) {
-		if (!kms->funcs->check_modified_format) {
-			dev_err(dev->dev, "can't check modified fb format\n");
+	for (i = 0; i < n; i++) {
+		unsigned int width = mode_cmd->width / (i ? hsub : 1);
+		unsigned int height = mode_cmd->height / (i ? vsub : 1);
+		unsigned int min_size;
+
+		min_size = (height - 1) * mode_cmd->pitches[i]
+			 + width * drm_format_plane_cpp(mode_cmd->pixel_format,
+					 i) + mode_cmd->offsets[i];
+
+		if (bos[i]->size < min_size) {
 			ret = -EINVAL;
 			goto fail;
-		} else {
-			ret = kms->funcs->check_modified_format(
-				kms, msm_fb->format, mode_cmd, bos);
-			if (ret)
-				goto fail;
 		}
-	} else {
-		for (i = 0; i < num_planes; i++) {
-			unsigned int width = mode_cmd->width / (i ? hsub : 1);
-			unsigned int height = mode_cmd->height / (i ? vsub : 1);
-			unsigned int min_size;
-			unsigned int cpp;
 
-			cpp = drm_format_plane_cpp(mode_cmd->pixel_format, i);
-
-			min_size = (height - 1) * mode_cmd->pitches[i]
-				 + width * cpp
-				 + mode_cmd->offsets[i];
-
-			if (bos[i]->size < min_size) {
-				ret = -EINVAL;
-				goto fail;
-			}
-		}
-	}
-
-	for (i = 0; i < num_planes; i++)
 		msm_fb->base.obj[i] = bos[i];
+	}
 
 	drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
 
@@ -405,7 +203,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 		goto fail;
 	}
 
-	DBG("create: FB ID: %d (%pK)", fb->base.id, fb);
+	DBG("create: FB ID: %d (%p)", fb->base.id, fb);
 
 	return fb;
 
@@ -416,7 +214,8 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 }
 
 struct drm_framebuffer *
-msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format)
+msm_alloc_stolen_fb(struct drm_device *dev, int w, int h,
+				int p, uint32_t format)
 {
 	struct drm_mode_fb_cmd2 mode_cmd = {
 		.pixel_format = format,
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 6923ec2..456622b 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -117,7 +117,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 		goto fail_unlock;
 	}
 
-	DBG("fbi=%pK, dev=%pK", fbi, dev);
+	DBG("fbi=%p, dev=%p", fbi, dev);
 
 	fbdev->fb = fb;
 	helper->fb = fb;
@@ -141,7 +141,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 	fbi->fix.smem_start = paddr;
 	fbi->fix.smem_len = bo->size;
 
-	DBG("par=%pK, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+	DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
 	DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
 
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 7655835..c3693df 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1,5 +1,4 @@
 /*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -20,14 +19,12 @@
 #include <linux/shmem_fs.h>
 #include <linux/dma-buf.h>
 #include <linux/pfn_t.h>
-#include <linux/ion.h>
 
 #include "msm_drv.h"
 #include "msm_fence.h"
 #include "msm_gem.h"
 #include "msm_gpu.h"
 #include "msm_mmu.h"
-#include "sde_dbg.h"
 
 static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
 
@@ -79,10 +76,6 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
 static struct page **get_pages(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	struct device *aspace_dev;
-
-	if (obj->import_attach)
-		return msm_obj->pages;
 
 	if (!msm_obj->pages) {
 		struct drm_device *dev = obj->dev;
@@ -111,15 +104,12 @@ static struct page **get_pages(struct drm_gem_object *obj)
 			return ptr;
 		}
 
-		/*
-		 * Make sure to flush the CPU cache for newly allocated memory
-		 * so we don't get ourselves into trouble with a dirty cache
+		/* For non-cached buffers, ensure the new pages are clean
+		 * because display controller, GPU, etc. are not coherent:
 		 */
-		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
-			aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
-			dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
-				msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
-		}
+		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
+					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 	}
 
 	return msm_obj->pages;
@@ -139,7 +129,6 @@ static void put_pages_vram(struct drm_gem_object *obj)
 
 static void put_pages(struct drm_gem_object *obj)
 {
-	struct device *aspace_dev;
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 	if (msm_obj->pages) {
@@ -148,13 +137,10 @@ static void put_pages(struct drm_gem_object *obj)
 			 * pages are clean because display controller,
 			 * GPU, etc. are not coherent:
 			 */
-			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
-				aspace_dev =
-				    msm_gem_get_aspace_device(msm_obj->aspace);
-				dma_unmap_sg(aspace_dev, msm_obj->sgt->sgl,
+			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+				dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
 					     msm_obj->sgt->nents,
 					     DMA_BIDIRECTIONAL);
-			}
 
 			sg_free_table(msm_obj->sgt);
 			kfree(msm_obj->sgt);
@@ -191,26 +177,6 @@ void msm_gem_put_pages(struct drm_gem_object *obj)
 	/* when we start tracking the pin count, then do something here */
 }
 
-void msm_gem_sync(struct drm_gem_object *obj)
-{
-	struct msm_gem_object *msm_obj;
-	struct device *aspace_dev;
-
-	if (!obj)
-		return;
-
-	msm_obj = to_msm_bo(obj);
-
-	/*
-	 * dma_sync_sg_for_device synchronises a single contiguous or
-	 * scatter/gather mapping for the CPU and device.
-	 */
-	aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
-	dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
-		       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
-}
-
-
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
 		struct vm_area_struct *vma)
 {
@@ -291,7 +257,7 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf)
 
 	pfn = page_to_pfn(pages[pgoff]);
 
-	VERB("Inserting %pK pfn %lx, pa %lx", (void *)vmf->address,
+	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 			pfn, pfn << PAGE_SHIFT);
 
 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
@@ -332,25 +298,6 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
 	return offset;
 }
 
-dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj)
-{
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	struct sg_table *sgt;
-
-	if (!msm_obj->sgt) {
-		sgt = dma_buf_map_attachment(obj->import_attach,
-						DMA_BIDIRECTIONAL);
-		if (IS_ERR_OR_NULL(sgt)) {
-			DRM_ERROR("dma_buf_map_attachment failure, err=%ld\n",
-					PTR_ERR(sgt));
-			return 0;
-		}
-		msm_obj->sgt = sgt;
-	}
-
-	return sg_phys(msm_obj->sgt->sgl);
-}
-
 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace)
 {
@@ -405,14 +352,7 @@ put_iova(struct drm_gem_object *obj)
 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
-		msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
-				msm_obj->flags);
-		/*
-		 * put_iova removes the domain connected to the obj which makes
-		 * the aspace inaccessible. Store the aspace, as it is used to
-		 * update the active_list during gem_free_obj and gem_purge.
-		 */
-		msm_obj->aspace = vma->aspace;
+		msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
 		del_vma(vma);
 	}
 }
@@ -437,16 +377,6 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
 	if (!vma) {
 		struct page **pages;
 
-		/* perform delayed import for buffers without existing sgt */
-		if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))) {
-			ret = msm_gem_delayed_import(obj);
-			if (ret) {
-				DRM_ERROR("delayed dma-buf import failed %d\n",
-						ret);
-				goto unlock;
-			}
-		}
-
 		vma = add_vma(obj, aspace);
 		if (IS_ERR(vma)) {
 			ret = PTR_ERR(vma);
@@ -460,20 +390,13 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
 		}
 
 		ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
-				obj->size >> PAGE_SHIFT,
-				msm_obj->flags);
+				obj->size >> PAGE_SHIFT);
 		if (ret)
 			goto fail;
 	}
 
 	*iova = vma->iova;
 
-	if (aspace &&  !msm_obj->in_active_list) {
-		mutex_lock(&aspace->list_lock);
-		msm_gem_add_obj_to_aspace_active_list(aspace, obj);
-		mutex_unlock(&aspace->list_lock);
-	}
-
 	mutex_unlock(&msm_obj->lock);
 	return 0;
 
@@ -512,60 +435,6 @@ void msm_gem_put_iova(struct drm_gem_object *obj,
 	// things that are no longer needed..
 }
 
-void msm_gem_aspace_domain_attach_detach_update(
-		struct msm_gem_address_space *aspace,
-		bool is_detach)
-{
-	struct msm_gem_object *msm_obj;
-	struct drm_gem_object *obj;
-	struct aspace_client *aclient;
-	int ret;
-	uint64_t iova;
-
-	if (!aspace)
-		return;
-
-	mutex_lock(&aspace->list_lock);
-	if (is_detach) {
-		/* Indicate to clients domain is getting detached */
-		list_for_each_entry(aclient, &aspace->clients, list) {
-			if (aclient->cb)
-				aclient->cb(aclient->cb_data,
-						is_detach);
-		}
-
-		/**
-		 * Unmap active buffers,
-		 * typically clients should do this when the callback is called,
-		 * but this needs to be done for the buffers which are not
-		 * attached to any planes.
-		 */
-		list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
-			obj = &msm_obj->base;
-			if (obj->import_attach)
-				put_iova(obj);
-		}
-	} else {
-		/* map active buffers */
-		list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
-			obj = &msm_obj->base;
-			ret = msm_gem_get_iova(obj, aspace, &iova);
-			if (ret) {
-				mutex_unlock(&aspace->list_lock);
-				return;
-			}
-		}
-
-		/* Indicate to clients domain is attached */
-		list_for_each_entry(aclient, &aspace->clients, list) {
-			if (aclient->cb)
-				aclient->cb(aclient->cb_data,
-						is_detach);
-		}
-	}
-	mutex_unlock(&aspace->list_lock);
-}
-
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 		struct drm_mode_create_dumb *args)
 {
@@ -624,20 +493,8 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
 			ret = PTR_ERR(pages);
 			goto fail;
 		}
-
-		if (obj->import_attach) {
-			ret = dma_buf_begin_cpu_access(
-				obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
-			if (ret)
-				goto fail;
-
-			msm_obj->vaddr =
-				dma_buf_vmap(obj->import_attach->dmabuf);
-		} else {
-			msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
-		}
-
 		if (msm_obj->vaddr == NULL) {
 			ret = -ENOMEM;
 			goto fail;
@@ -712,12 +569,6 @@ void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
 	mutex_lock_nested(&msm_obj->lock, subclass);
 
 	put_iova(obj);
-	if (msm_obj->aspace) {
-		mutex_lock(&msm_obj->aspace->list_lock);
-		msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
-				obj);
-		mutex_unlock(&msm_obj->aspace->list_lock);
-	}
 
 	msm_gem_vunmap_locked(obj);
 
@@ -750,14 +601,7 @@ static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
 	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
 		return;
 
-	if (obj->import_attach) {
-		dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
-		dma_buf_end_cpu_access(obj->import_attach->dmabuf,
-						DMA_BIDIRECTIONAL);
-	} else {
-		vunmap(msm_obj->vaddr);
-	}
-
+	vunmap(msm_obj->vaddr);
 	msm_obj->vaddr = NULL;
 }
 
@@ -895,7 +739,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 		break;
 	}
 
-	seq_printf(m, "%08x: %c %2d (%2d) %08llx %pK\t",
+	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
 			obj->name, kref_read(&obj->refcount),
 			off, msm_obj->vaddr);
@@ -959,12 +803,6 @@ void msm_gem_free_object(struct drm_gem_object *obj)
 	mutex_lock(&msm_obj->lock);
 
 	put_iova(obj);
-	if (msm_obj->aspace) {
-		mutex_lock(&msm_obj->aspace->list_lock);
-		msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
-				obj);
-		mutex_unlock(&msm_obj->aspace->list_lock);
-	}
 
 	if (obj->import_attach) {
 		if (msm_obj->vaddr)
@@ -1049,9 +887,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
 
 	INIT_LIST_HEAD(&msm_obj->submit_entry);
 	INIT_LIST_HEAD(&msm_obj->vmas);
-	INIT_LIST_HEAD(&msm_obj->iova_list);
-	msm_obj->aspace = NULL;
-	msm_obj->in_active_list = false;
 
 	if (struct_mutex_locked) {
 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1145,61 +980,13 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 	return _msm_gem_new(dev, size, flags, false);
 }
 
-int msm_gem_delayed_import(struct drm_gem_object *obj)
-{
-	struct dma_buf_attachment *attach;
-	struct sg_table *sgt;
-	struct msm_gem_object *msm_obj;
-	int ret = 0;
-
-	if (!obj) {
-		DRM_ERROR("NULL drm gem object\n");
-		return -EINVAL;
-	}
-
-	msm_obj = to_msm_bo(obj);
-
-	if (!obj->import_attach) {
-		DRM_ERROR("NULL dma_buf_attachment in drm gem object\n");
-		return -EINVAL;
-	}
-
-	attach = obj->import_attach;
-	attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
-
-	if (msm_obj->flags & MSM_BO_SKIPSYNC)
-		attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
-
-	if (msm_obj->flags & MSM_BO_KEEPATTRS)
-		attach->dma_map_attrs |=
-				DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
-
-	/*
-	 * dma_buf_map_attachment will call dma_map_sg for ion buffer
-	 * mapping, and iova will get mapped when the function returns.
-	 */
-	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
-	if (IS_ERR(sgt)) {
-		ret = PTR_ERR(sgt);
-		DRM_ERROR("dma_buf_map_attachment failure, err=%d\n",
-				ret);
-		goto fail_import;
-	}
-	msm_obj->sgt = sgt;
-	msm_obj->pages = NULL;
-
-fail_import:
-	return ret;
-}
-
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 		struct dma_buf *dmabuf, struct sg_table *sgt)
 {
 	struct msm_gem_object *msm_obj;
-	struct drm_gem_object *obj = NULL;
+	struct drm_gem_object *obj;
 	uint32_t size;
-	int ret;
-	unsigned long flags = 0;
+	int ret, npages;
 
 	/* if we don't have IOMMU, don't bother pretending we can import: */
 	if (!iommu_present(&platform_bus_type)) {
@@ -1209,39 +996,30 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 
 	size = PAGE_ALIGN(dmabuf->size);
 
-	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj,
-			false);
+	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
 	if (ret)
 		goto fail;
 
 	drm_gem_private_object_init(dev, obj, size);
 
+	npages = size / PAGE_SIZE;
+
 	msm_obj = to_msm_bo(obj);
 	mutex_lock(&msm_obj->lock);
 	msm_obj->sgt = sgt;
-	msm_obj->pages = NULL;
-	/*
-	 * 1) If sg table is NULL, user should call msm_gem_delayed_import
-	 * to add back the sg table to the drm gem object.
-	 *
-	 * 2) Add buffer flag unconditionally for all import cases.
-	 *    # Cached buffer will be attached immediately hence sgt will
-	 *      be available upon gem obj creation.
-	 *    # Un-cached buffer will follow delayed attach hence sgt
-	 *      will be NULL upon gem obj creation.
-	 */
-	msm_obj->flags |= MSM_BO_EXTBUF;
+	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *),
+							GFP_KERNEL);
+	if (!msm_obj->pages) {
+		mutex_unlock(&msm_obj->lock);
+		ret = -ENOMEM;
+		goto fail;
+	}
 
-	/*
-	 * For all uncached buffers, there is no need to perform cache
-	 * maintenance on dma map/unmap time.
-	 */
-	ret = dma_buf_get_flags(dmabuf, &flags);
+	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL,
+						npages);
 	if (ret) {
-		DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
-	} else if ((flags & ION_FLAG_CACHED) == 0) {
-		DRM_DEBUG("Buffer is uncached type\n");
-		msm_obj->flags |= MSM_BO_SKIPSYNC;
+		mutex_unlock(&msm_obj->lock);
+		goto fail;
 	}
 
 	mutex_unlock(&msm_obj->lock);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 49a4412..c5d9bd3 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -24,39 +24,6 @@
 
 /* Additional internal-use only BO flags: */
 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
-#define MSM_BO_KEEPATTRS     0x20000000    /* keep h/w bus attributes */
-#define MSM_BO_SKIPSYNC      0x40000000    /* skip dmabuf cpu sync */
-#define MSM_BO_EXTBUF        0x80000000    /* indicate BO is an import buffer */
-
-struct msm_gem_object;
-
-struct msm_gem_aspace_ops {
-	int (*map)(struct msm_gem_address_space *space, struct msm_gem_vma *vma,
-		struct sg_table *sgt, int npages, unsigned int flags);
-
-	void (*unmap)(struct msm_gem_address_space *space,
-		struct msm_gem_vma *vma, struct sg_table *sgt,
-		unsigned int flags);
-
-	void (*destroy)(struct msm_gem_address_space *space);
-	void (*add_to_active)(struct msm_gem_address_space *space,
-		struct msm_gem_object *obj);
-	void (*remove_from_active)(struct msm_gem_address_space *space,
-		struct msm_gem_object *obj);
-	int (*register_cb)(struct msm_gem_address_space *space,
-			void (*cb)(void *cb, bool data),
-			void *cb_data);
-	int (*unregister_cb)(struct msm_gem_address_space *space,
-			void (*cb)(void *cb, bool data),
-			void *cb_data);
-};
-
-struct aspace_client {
-	void (*cb)(void *cb, bool data);
-	void *cb_data;
-	struct list_head list;
-};
-
 
 struct msm_gem_address_space {
 	const char *name;
@@ -67,14 +34,6 @@ struct msm_gem_address_space {
 	spinlock_t lock; /* Protects drm_mm node allocation/removal */
 	struct msm_mmu *mmu;
 	struct kref kref;
-	bool domain_attached;
-	const struct msm_gem_aspace_ops *ops;
-	struct drm_device *dev;
-	/* list of mapped objects */
-	struct list_head active_list;
-	/* list of clients */
-	struct list_head clients;
-	struct mutex list_lock; /* Protects active_list & clients */
 };
 
 struct msm_gem_vma {
@@ -132,10 +91,6 @@ struct msm_gem_object {
 	 */
 	struct drm_mm_node *vram_node;
 	struct mutex lock; /* Protects resources associated with bo */
-	struct list_head iova_list;
-
-	struct msm_gem_address_space *aspace;
-	bool in_active_list;
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 021971e..13403c6 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -1,5 +1,4 @@
 /*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -18,12 +17,8 @@
 
 #include "msm_drv.h"
 #include "msm_gem.h"
-#include "msm_mmu.h"
-#include "msm_kms.h"
 
 #include <linux/dma-buf.h>
-#include <linux/ion.h>
-#include <linux/msm_ion.h>
 
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
@@ -82,124 +77,3 @@ struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
 
 	return msm_obj->resv;
 }
-
-
-struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
-					    struct dma_buf *dma_buf)
-{
-	struct dma_buf_attachment *attach;
-	struct sg_table *sgt = NULL;
-	struct drm_gem_object *obj;
-	struct device *attach_dev = NULL;
-	unsigned long flags = 0;
-	struct msm_drm_private *priv;
-	struct msm_kms *kms;
-	int ret;
-	u32 domain;
-
-	if (!dma_buf || !dev->dev_private)
-		return ERR_PTR(-EINVAL);
-
-	priv = dev->dev_private;
-	kms = priv->kms;
-
-	if (dma_buf->priv && !dma_buf->ops->begin_cpu_access) {
-		obj = dma_buf->priv;
-		if (obj->dev == dev) {
-			/*
-			 * Importing dmabuf exported from out own gem increases
-			 * refcount on gem itself instead of f_count of dmabuf.
-			 */
-			drm_gem_object_get(obj);
-			return obj;
-		}
-	}
-
-	if (!dev->driver->gem_prime_import_sg_table) {
-		DRM_ERROR("NULL gem_prime_import_sg_table\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	get_dma_buf(dma_buf);
-
-	ret = dma_buf_get_flags(dma_buf, &flags);
-	if (ret) {
-		DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
-		goto fail_put;
-	}
-
-	if (!kms || !kms->funcs->get_address_space_device) {
-		DRM_ERROR("invalid kms ops\n");
-		goto fail_put;
-	}
-
-	if (flags & ION_FLAG_SECURE) {
-		if (flags & ION_FLAG_CP_PIXEL)
-			attach_dev = kms->funcs->get_address_space_device(kms,
-						MSM_SMMU_DOMAIN_SECURE);
-
-		else if ((flags & ION_FLAG_CP_SEC_DISPLAY)
-				|| (flags & ION_FLAG_CP_CAMERA_PREVIEW))
-			attach_dev = dev->dev;
-		else
-			DRM_ERROR("invalid ion secure flag: 0x%x\n", flags);
-	} else {
-		attach_dev = kms->funcs->get_address_space_device(kms,
-						MSM_SMMU_DOMAIN_UNSECURE);
-	}
-
-	if (!attach_dev) {
-		DRM_ERROR("aspace device not found for domain:%d\n", domain);
-		ret = -EINVAL;
-		goto fail_put;
-	}
-
-	attach = dma_buf_attach(dma_buf, attach_dev);
-	if (IS_ERR(attach)) {
-		DRM_ERROR("dma_buf_attach failure, err=%ld\n", PTR_ERR(attach));
-		return ERR_CAST(attach);
-	}
-
-	/*
-	 * For cached buffers where CPU access is required, dma_map_attachment
-	 * must be called now to allow user-space to perform cpu sync begin/end
-	 * otherwise do delayed mapping during the commit.
-	 */
-	if (flags & ION_FLAG_CACHED) {
-		attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
-		sgt = dma_buf_map_attachment(
-				attach, DMA_BIDIRECTIONAL);
-		if (IS_ERR(sgt)) {
-			ret = PTR_ERR(sgt);
-			DRM_ERROR(
-			"dma_buf_map_attachment failure, err=%d\n",
-				ret);
-			goto fail_detach;
-		}
-	}
-
-	/*
-	 * If importing a NULL sg table (i.e. for uncached buffers),
-	 * create a drm gem object with only the dma buf attachment.
-	 */
-	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
-	if (IS_ERR(obj)) {
-		ret = PTR_ERR(obj);
-		DRM_ERROR("gem_prime_import_sg_table failure, err=%d\n", ret);
-		goto fail_unmap;
-	}
-
-	obj->import_attach = attach;
-
-	return obj;
-
-fail_unmap:
-	if (sgt)
-		dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
-fail_detach:
-	dma_buf_detach(dma_buf, attach);
-fail_put:
-	dma_buf_put(dma_buf);
-
-	return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index da9ae7a..ffbec22 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -1,5 +1,4 @@
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2016 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -20,184 +19,15 @@
 #include "msm_gem.h"
 #include "msm_mmu.h"
 
-/* SDE address space operations */
-static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt,
-		unsigned int flags)
-{
-	if (!vma->iova)
-		return;
-
-	if (aspace) {
-		aspace->mmu->funcs->unmap_dma_buf(aspace->mmu, sgt,
-				DMA_BIDIRECTIONAL, flags);
-	}
-
-	vma->iova = 0;
-	msm_gem_address_space_put(aspace);
-}
-
-static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt,
-		int npages, unsigned int flags)
-{
-	int ret = -EINVAL;
-
-	if (!aspace || !aspace->domain_attached)
-		return ret;
-
-	ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt,
-			DMA_BIDIRECTIONAL, flags);
-	if (!ret)
-		vma->iova = sg_dma_address(sgt->sgl);
-
-	/* Get a reference to the aspace to keep it around */
-	kref_get(&aspace->kref);
-
-	return ret;
-}
-
-static void smmu_aspace_destroy(struct msm_gem_address_space *aspace)
-{
-	if (aspace->mmu)
-		aspace->mmu->funcs->destroy(aspace->mmu);
-}
-
-static void smmu_aspace_add_to_active(
-		struct msm_gem_address_space *aspace,
-		struct msm_gem_object *msm_obj)
-{
-	WARN_ON(!mutex_is_locked(&aspace->list_lock));
-	list_move_tail(&msm_obj->iova_list, &aspace->active_list);
-}
-
-static void smmu_aspace_remove_from_active(
-		struct msm_gem_address_space *aspace,
-		struct msm_gem_object *obj)
-{
-	struct msm_gem_object *msm_obj, *next;
-
-	WARN_ON(!mutex_is_locked(&aspace->list_lock));
-
-	list_for_each_entry_safe(msm_obj, next, &aspace->active_list,
-			iova_list) {
-		if (msm_obj == obj) {
-			list_del(&msm_obj->iova_list);
-			break;
-		}
-	}
-}
-
-static int smmu_aspace_register_cb(
-		struct msm_gem_address_space *aspace,
-		void (*cb)(void *, bool),
-		void *cb_data)
-{
-	struct aspace_client *aclient = NULL;
-	struct aspace_client *temp;
-
-	if (!aspace)
-		return -EINVAL;
-
-	if (!aspace->domain_attached)
-		return -EACCES;
-
-	aclient = kzalloc(sizeof(*aclient), GFP_KERNEL);
-	if (!aclient)
-		return -ENOMEM;
-
-	aclient->cb = cb;
-	aclient->cb_data = cb_data;
-	INIT_LIST_HEAD(&aclient->list);
-
-	/* check if callback is already registered */
-	mutex_lock(&aspace->list_lock);
-	list_for_each_entry(temp, &aspace->clients, list) {
-		if ((temp->cb == aclient->cb) &&
-			(temp->cb_data == aclient->cb_data)) {
-			kfree(aclient);
-			mutex_unlock(&aspace->list_lock);
-			return -EEXIST;
-		}
-	}
-
-	list_move_tail(&aclient->list, &aspace->clients);
-	mutex_unlock(&aspace->list_lock);
-
-	return 0;
-}
-
-static int smmu_aspace_unregister_cb(
-		struct msm_gem_address_space *aspace,
-		void (*cb)(void *, bool),
-		void *cb_data)
-{
-	struct aspace_client *aclient = NULL;
-	int rc = -ENOENT;
-
-	if (!aspace || !cb)
-		return -EINVAL;
-
-	mutex_lock(&aspace->list_lock);
-	list_for_each_entry(aclient, &aspace->clients, list) {
-		if ((aclient->cb == cb) &&
-			(aclient->cb_data == cb_data)) {
-			list_del(&aclient->list);
-			kfree(aclient);
-			rc = 0;
-			break;
-		}
-	}
-	mutex_unlock(&aspace->list_lock);
-
-	return rc;
-}
-
-static const struct msm_gem_aspace_ops smmu_aspace_ops = {
-	.map = smmu_aspace_map_vma,
-	.unmap = smmu_aspace_unmap_vma,
-	.destroy = smmu_aspace_destroy,
-	.add_to_active = smmu_aspace_add_to_active,
-	.remove_from_active = smmu_aspace_remove_from_active,
-	.register_cb = smmu_aspace_register_cb,
-	.unregister_cb = smmu_aspace_unregister_cb,
-};
-
-struct msm_gem_address_space *
-msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
-		const char *name)
-{
-	struct msm_gem_address_space *aspace;
-
-	if (!mmu)
-		return ERR_PTR(-EINVAL);
-
-	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
-	if (!aspace)
-		return ERR_PTR(-ENOMEM);
-
-	spin_lock_init(&aspace->lock);
-	aspace->dev = dev;
-	aspace->name = name;
-	aspace->mmu = mmu;
-	aspace->ops = &smmu_aspace_ops;
-	INIT_LIST_HEAD(&aspace->active_list);
-	INIT_LIST_HEAD(&aspace->clients);
-	kref_init(&aspace->kref);
-	mutex_init(&aspace->list_lock);
-
-	return aspace;
-}
-
 static void
 msm_gem_address_space_destroy(struct kref *kref)
 {
 	struct msm_gem_address_space *aspace = container_of(kref,
 			struct msm_gem_address_space, kref);
 
-	if (aspace && aspace->ops->destroy)
-		aspace->ops->destroy(aspace);
-
+	drm_mm_takedown(&aspace->mm);
+	if (aspace->mmu)
+		aspace->mmu->funcs->destroy(aspace->mmu);
 	kfree(aspace);
 }
 
@@ -208,10 +38,9 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
 		kref_put(&aspace->kref, msm_gem_address_space_destroy);
 }
 
-/* GPU address space operations */
-static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt,
-		unsigned int flags)
+void
+msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt)
 {
 	if (!aspace || !vma->iova)
 		return;
@@ -230,19 +59,9 @@ static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
 	msm_gem_address_space_put(aspace);
 }
 
-void
-msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt,
-		unsigned int flags)
-{
-	if (aspace && aspace->ops->unmap)
-		aspace->ops->unmap(aspace, vma, sgt, flags);
-}
-
-
-static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt,
-		int npages, unsigned int flags)
+int
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
 {
 	int ret;
 
@@ -272,19 +91,6 @@ static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
 	return ret;
 }
 
-static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
-{
-	drm_mm_takedown(&aspace->mm);
-	if (aspace->mmu)
-		aspace->mmu->funcs->destroy(aspace->mmu);
-}
-
-static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
-	.map = iommu_aspace_map_vma,
-	.unmap = iommu_aspace_unmap_vma,
-	.destroy = iommu_aspace_destroy,
-};
-
 struct msm_gem_address_space *
 msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
 		const char *name)
@@ -300,7 +106,6 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
 	spin_lock_init(&aspace->lock);
 	aspace->name = name;
 	aspace->mmu = msm_iommu_new(dev, domain);
-	aspace->ops = &msm_iommu_aspace_ops;
 
 	drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
 		size >> PAGE_SHIFT);
@@ -309,65 +114,3 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
 
 	return aspace;
 }
-
-int
-msm_gem_map_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt, int npages,
-		unsigned int flags)
-{
-	if (aspace && aspace->ops->map)
-		return aspace->ops->map(aspace, vma, sgt, npages, flags);
-
-	return -EINVAL;
-}
-
-struct device *msm_gem_get_aspace_device(struct msm_gem_address_space *aspace)
-{
-	struct device *client_dev = NULL;
-
-	if (aspace && aspace->mmu && aspace->mmu->funcs->get_dev)
-		client_dev = aspace->mmu->funcs->get_dev(aspace->mmu);
-
-	return client_dev;
-}
-
-void msm_gem_add_obj_to_aspace_active_list(
-		struct msm_gem_address_space *aspace,
-		struct drm_gem_object *obj)
-{
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-
-	if (aspace && aspace->ops && aspace->ops->add_to_active)
-		aspace->ops->add_to_active(aspace, msm_obj);
-}
-
-void msm_gem_remove_obj_from_aspace_active_list(
-		struct msm_gem_address_space *aspace,
-		struct drm_gem_object *obj)
-{
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-
-	if (aspace && aspace->ops && aspace->ops->remove_from_active)
-		aspace->ops->remove_from_active(aspace, msm_obj);
-}
-
-int msm_gem_address_space_register_cb(struct msm_gem_address_space *aspace,
-		void (*cb)(void *, bool),
-		void *cb_data)
-{
-	if (aspace && aspace->ops && aspace->ops->register_cb)
-		return aspace->ops->register_cb(aspace, cb, cb_data);
-
-	return -EINVAL;
-}
-
-int msm_gem_address_space_unregister_cb(struct msm_gem_address_space *aspace,
-		void (*cb)(void *, bool),
-		void *cb_data)
-{
-	if (aspace && aspace->ops && aspace->ops->unregister_cb)
-		return aspace->ops->unregister_cb(aspace, cb, cb_data);
-
-	return -EINVAL;
-}
-
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 3115822..52a2146 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -877,18 +877,18 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 		goto fail;
 
 	gpu->ebi1_clk = msm_clk_get(pdev, "bus");
-	DBG("ebi1_clk: %pK", gpu->ebi1_clk);
+	DBG("ebi1_clk: %p", gpu->ebi1_clk);
 	if (IS_ERR(gpu->ebi1_clk))
 		gpu->ebi1_clk = NULL;
 
 	/* Acquire regulators: */
 	gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
-	DBG("gpu_reg: %pK", gpu->gpu_reg);
+	DBG("gpu_reg: %p", gpu->gpu_reg);
 	if (IS_ERR(gpu->gpu_reg))
 		gpu->gpu_reg = NULL;
 
 	gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
-	DBG("gpu_cx: %pK", gpu->gpu_cx);
+	DBG("gpu_cx: %p", gpu->gpu_cx);
 	if (IS_ERR(gpu->gpu_cx))
 		gpu->gpu_cx = NULL;
 
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e062b07..f41a0b6 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -26,19 +26,6 @@
 
 #define MAX_PLANE	4
 
-/**
- * Device Private DRM Mode Flags
- * drm_mode->private_flags
- */
-/* Connector has interpreted seamless transition request as dynamic fps */
-#define MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS	(1<<0)
-/* Transition to new mode requires a wait-for-vblank before the modeset */
-#define MSM_MODE_FLAG_VBLANK_PRE_MODESET	(1<<1)
-/* Request to switch the connector mode */
-#define MSM_MODE_FLAG_SEAMLESS_DMS			(1<<2)
-/* Request to switch the fps */
-#define MSM_MODE_FLAG_SEAMLESS_VRR			(1<<3)
-
 /* As there are different display controller blocks depending on the
  * snapdragon version, the kms support is split out and the appropriate
  * implementation is loaded at runtime.  The kms module is responsible
@@ -47,7 +34,6 @@
 struct msm_kms_funcs {
 	/* hw initialization: */
 	int (*hw_init)(struct msm_kms *kms);
-	int (*postinit)(struct msm_kms *kms);
 	/* irq handling: */
 	void (*irq_preinstall)(struct msm_kms *kms);
 	int (*irq_postinstall)(struct msm_kms *kms);
@@ -56,31 +42,23 @@ struct msm_kms_funcs {
 	int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
 	void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
 	/* modeset, bracketing atomic_commit(): */
-	void (*prepare_fence)(struct msm_kms *kms,
-			struct drm_atomic_state *state);
 	void (*prepare_commit)(struct msm_kms *kms,
-			struct drm_atomic_state *state);
+				struct drm_atomic_state *state);
 	void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
 	void (*complete_commit)(struct msm_kms *kms,
-			struct drm_atomic_state *state);
+				struct drm_atomic_state *state);
 	/* functions to wait for atomic commit completed on each CRTC */
 	void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
 					struct drm_crtc *crtc);
-	/* function pointer to wait for pixel transfer to panel to complete*/
-	void (*wait_for_tx_complete)(struct msm_kms *kms,
-					struct drm_crtc *crtc);
 	/* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
 	const struct msm_format *(*get_format)(struct msm_kms *kms,
 					const uint32_t format,
-					const uint64_t modifier);
+					const uint64_t modifiers);
 	/* do format checking on format modified through fb_cmd2 modifiers */
 	int (*check_modified_format)(const struct msm_kms *kms,
 			const struct msm_format *msm_fmt,
 			const struct drm_mode_fb_cmd2 *cmd,
 			struct drm_gem_object **bos);
-	/* perform complete atomic check of given atomic state */
-	int (*atomic_check)(struct msm_kms *kms,
-			struct drm_atomic_state *state);
 	/* misc: */
 	long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
 			struct drm_encoder *encoder);
@@ -88,13 +66,6 @@ struct msm_kms_funcs {
 			struct drm_encoder *encoder,
 			struct drm_encoder *slave_encoder,
 			bool is_cmd_mode);
-	void (*postopen)(struct msm_kms *kms, struct drm_file *file);
-	void (*preclose)(struct msm_kms *kms, struct drm_file *file);
-	void (*postclose)(struct msm_kms *kms, struct drm_file *file);
-	void (*lastclose)(struct msm_kms *kms,
-			struct drm_modeset_acquire_ctx *ctx);
-	int (*register_events)(struct msm_kms *kms,
-			struct drm_mode_object *obj, u32 event, bool en);
 	void (*set_encoder_mode)(struct msm_kms *kms,
 				 struct drm_encoder *encoder,
 				 bool cmd_mode);
@@ -103,21 +74,10 @@ struct msm_kms_funcs {
 	int (*pm_resume)(struct device *dev);
 	/* cleanup: */
 	void (*destroy)(struct msm_kms *kms);
-	/* get address space */
-	struct msm_gem_address_space *(*get_address_space)(
-			struct msm_kms *kms,
-			unsigned int domain);
-	struct device *(*get_address_space_device)(
-			struct msm_kms *kms,
-			unsigned int domain);
 #ifdef CONFIG_DEBUG_FS
 	/* debugfs: */
 	int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
 #endif
-	/* handle continuous splash  */
-	int (*cont_splash_config)(struct msm_kms *kms);
-	/* check for continuous splash status */
-	bool (*check_for_splash)(struct msm_kms *kms);
 };
 
 struct msm_kms {
@@ -130,91 +90,28 @@ struct msm_kms {
 	struct msm_gem_address_space *aspace;
 };
 
-/**
- * Subclass of drm_atomic_state, to allow kms backend to have driver
- * private global state.  The kms backend can do whatever it wants
- * with the ->state ptr.  On ->atomic_state_clear() the ->state ptr
- * is kfree'd and set back to NULL.
- */
-struct msm_kms_state {
-	struct drm_atomic_state base;
-	void *state;
-};
-#define to_kms_state(x) container_of(x, struct msm_kms_state, base)
-
 static inline void msm_kms_init(struct msm_kms *kms,
 		const struct msm_kms_funcs *funcs)
 {
 	kms->funcs = funcs;
 }
 
-#ifdef CONFIG_DRM_MSM_MDP4
 struct msm_kms *mdp4_kms_init(struct drm_device *dev);
-#else
-static inline
-struct msm_kms *mdp4_kms_init(struct drm_device *dev) { return NULL; };
-#endif
-#ifdef CONFIG_DRM_MSM_MDP5
 struct msm_kms *mdp5_kms_init(struct drm_device *dev);
-int msm_mdss_init(struct drm_device *dev);
-void msm_mdss_destroy(struct drm_device *dev);
-struct msm_kms *mdp5_kms_init(struct drm_device *dev);
-int msm_mdss_enable(struct msm_mdss *mdss);
-int msm_mdss_disable(struct msm_mdss *mdss);
-#else
-static inline int msm_mdss_init(struct drm_device *dev)
-{
-	return 0;
-}
-static inline void msm_mdss_destroy(struct drm_device *dev)
-{
-}
-static inline struct msm_kms *mdp5_kms_init(struct drm_device *dev)
-{
-	return NULL;
-}
-static inline int msm_mdss_enable(struct msm_mdss *mdss)
-{
-	return 0;
-}
-static inline int msm_mdss_disable(struct msm_mdss *mdss)
-{
-	return 0;
-}
-#endif
+struct msm_kms *dpu_kms_init(struct drm_device *dev);
 
-struct msm_kms *sde_kms_init(struct drm_device *dev);
+struct msm_mdss_funcs {
+	int (*enable)(struct msm_mdss *mdss);
+	int (*disable)(struct msm_mdss *mdss);
+	void (*destroy)(struct drm_device *dev);
+};
 
+struct msm_mdss {
+	struct drm_device *dev;
+	const struct msm_mdss_funcs *funcs;
+};
 
-/**
- * Mode Set Utility Functions
- */
-static inline bool msm_is_mode_seamless(const struct drm_display_mode *mode)
-{
-	return (mode->flags & DRM_MODE_FLAG_SEAMLESS);
-}
+int mdp5_mdss_init(struct drm_device *dev);
+int dpu_mdss_init(struct drm_device *dev);
 
-static inline bool msm_is_mode_seamless_dms(const struct drm_display_mode *mode)
-{
-	return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DMS)
-		: false;
-}
-
-static inline bool msm_is_mode_dynamic_fps(const struct drm_display_mode *mode)
-{
-	return ((mode->flags & DRM_MODE_FLAG_SEAMLESS) &&
-		(mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS));
-}
-
-static inline bool msm_is_mode_seamless_vrr(const struct drm_display_mode *mode)
-{
-	return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_VRR)
-		: false;
-}
-
-static inline bool msm_needs_vblank_pre_modeset(
-		const struct drm_display_mode *mode)
-{
-	return (mode->private_flags & MSM_MODE_FLAG_VBLANK_PRE_MODESET);
-}
 #endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index ee6cbcd..ffb8a49 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -20,16 +20,6 @@
 
 #include <linux/iommu.h>
 
-struct msm_mmu;
-
-enum msm_mmu_domain_type {
-	MSM_SMMU_DOMAIN_UNSECURE,
-	MSM_SMMU_DOMAIN_NRT_UNSECURE,
-	MSM_SMMU_DOMAIN_SECURE,
-	MSM_SMMU_DOMAIN_NRT_SECURE,
-	MSM_SMMU_DOMAIN_MAX,
-};
-
 struct msm_mmu_funcs {
 	int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
 	void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
@@ -37,23 +27,7 @@ struct msm_mmu_funcs {
 			unsigned int len, int prot);
 	int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
 			unsigned int len);
-	int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
-			enum dma_data_direction dir);
-	void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
-		enum dma_data_direction dir);
-	int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
-			int dir, u32 flags);
-	void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
-			int dir, u32 flags);
 	void (*destroy)(struct msm_mmu *mmu);
-	bool (*is_domain_secure)(struct msm_mmu *mmu);
-	int (*set_attribute)(struct msm_mmu *mmu,
-			enum iommu_attr attr, void *data);
-	int (*one_to_one_map)(struct msm_mmu *mmu, uint32_t iova,
-			uint32_t dest_address, uint32_t size, int prot);
-	int (*one_to_one_unmap)(struct msm_mmu *mmu, uint32_t dest_address,
-					uint32_t size);
-	struct device *(*get_dev)(struct msm_mmu *mmu);
 };
 
 struct msm_mmu {
@@ -71,8 +45,7 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
 }
 
 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_smmu_new(struct device *dev,
-	enum msm_mmu_domain_type domain);
+struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
 
 static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
 		int (*handler)(void *arg, unsigned long iova, int flags))
@@ -81,8 +54,4 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
 	mmu->handler = handler;
 }
 
-/* SDE smmu driver initialize and cleanup functions */
-int __init msm_smmu_driver_init(void);
-void __exit msm_smmu_driver_cleanup(void);
-
 #endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
deleted file mode 100644
index aa8caee..0000000
--- a/drivers/gpu/drm/msm/msm_prop.c
+++ /dev/null
@@ -1,674 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#include "msm_prop.h"
-
-void msm_property_init(struct msm_property_info *info,
-		struct drm_mode_object *base,
-		struct drm_device *dev,
-		struct drm_property **property_array,
-		struct msm_property_data *property_data,
-		uint32_t property_count,
-		uint32_t blob_count,
-		uint32_t state_size)
-{
-	/* prevent access if any of these are NULL */
-	if (!base || !dev || !property_array || !property_data) {
-		property_count = 0;
-		blob_count = 0;
-
-		DRM_ERROR("invalid arguments, forcing zero properties\n");
-		return;
-	}
-
-	/* can't have more blob properties than total properties */
-	if (blob_count > property_count) {
-		blob_count = property_count;
-
-		DBG("Capping number of blob properties to %d", blob_count);
-	}
-
-	if (!info) {
-		DRM_ERROR("info pointer is NULL\n");
-	} else {
-		info->base = base;
-		info->dev = dev;
-		info->property_array = property_array;
-		info->property_data = property_data;
-		info->property_count = property_count;
-		info->blob_count = blob_count;
-		info->install_request = 0;
-		info->install_count = 0;
-		info->recent_idx = 0;
-		info->is_active = false;
-		info->state_size = state_size;
-		info->state_cache_size = 0;
-		mutex_init(&info->property_lock);
-
-		memset(property_data,
-				0,
-				sizeof(struct msm_property_data) *
-				property_count);
-	}
-}
-
-void msm_property_destroy(struct msm_property_info *info)
-{
-	if (!info)
-		return;
-
-	/* free state cache */
-	while (info->state_cache_size > 0)
-		kfree(info->state_cache[--(info->state_cache_size)]);
-
-	mutex_destroy(&info->property_lock);
-}
-
-int msm_property_pop_dirty(struct msm_property_info *info,
-		struct msm_property_state *property_state)
-{
-	struct list_head *item;
-	int rc = 0;
-
-	if (!info || !property_state || !property_state->values) {
-		DRM_ERROR("invalid argument(s)\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&info->property_lock);
-	if (list_empty(&property_state->dirty_list)) {
-		rc = -EAGAIN;
-	} else {
-		item = property_state->dirty_list.next;
-		list_del_init(item);
-		rc = container_of(item, struct msm_property_value, dirty_node)
-			- property_state->values;
-		DRM_DEBUG_KMS("property %d dirty\n", rc);
-	}
-	mutex_unlock(&info->property_lock);
-
-	return rc;
-}
-
-/**
- * _msm_property_set_dirty_no_lock - flag given property as being dirty
- *                                   This function doesn't mutex protect the
- *                                   dirty linked list.
- * @info: Pointer to property info container struct
- * @property_state: Pointer to property state container struct
- * @property_idx: Property index
- */
-static void _msm_property_set_dirty_no_lock(
-		struct msm_property_info *info,
-		struct msm_property_state *property_state,
-		uint32_t property_idx)
-{
-	if (!info || !property_state || !property_state->values ||
-			property_idx >= info->property_count) {
-		DRM_ERROR("invalid argument(s), idx %u\n", property_idx);
-		return;
-	}
-
-	/* avoid re-inserting if already dirty */
-	if (!list_empty(&property_state->values[property_idx].dirty_node)) {
-		DRM_DEBUG_KMS("property %u already dirty\n", property_idx);
-		return;
-	}
-
-	list_add_tail(&property_state->values[property_idx].dirty_node,
-			&property_state->dirty_list);
-}
-
-bool msm_property_is_dirty(
-		struct msm_property_info *info,
-		struct msm_property_state *property_state,
-		uint32_t property_idx)
-{
-	if (!info || !property_state || !property_state->values ||
-			property_idx >= info->property_count) {
-		DRM_ERROR("invalid argument(s), idx %u\n", property_idx);
-		return false;
-	}
-
-	return !list_empty(&property_state->values[property_idx].dirty_node);
-}
-
-/**
- * _msm_property_install_integer - install standard drm range property
- * @info: Pointer to property info container struct
- * @name: Property name
- * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
- * @min: Min property value
- * @max: Max property value
- * @init: Default Property value
- * @property_idx: Property index
- * @force_dirty: Whether or not to filter 'dirty' status on unchanged values
- */
-static void _msm_property_install_integer(struct msm_property_info *info,
-		const char *name, int flags, uint64_t min, uint64_t max,
-		uint64_t init, uint32_t property_idx, bool force_dirty)
-{
-	struct drm_property **prop;
-
-	if (!info)
-		return;
-
-	++info->install_request;
-
-	if (!name || (property_idx >= info->property_count)) {
-		DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
-	} else {
-		prop = &info->property_array[property_idx];
-		/*
-		 * Properties need to be attached to each drm object that
-		 * uses them, but only need to be created once
-		 */
-		if (!*prop) {
-			*prop = drm_property_create_range(info->dev,
-					flags, name, min, max);
-			if (!*prop)
-				DRM_ERROR("create %s property failed\n", name);
-		}
-
-		/* save init value for later */
-		info->property_data[property_idx].default_value = init;
-		info->property_data[property_idx].force_dirty = force_dirty;
-
-		/* always attach property, if created */
-		if (*prop) {
-			drm_object_attach_property(info->base, *prop, init);
-			++info->install_count;
-		}
-	}
-}
-
-void msm_property_install_range(struct msm_property_info *info,
-		const char *name, int flags, uint64_t min, uint64_t max,
-		uint64_t init, uint32_t property_idx)
-{
-	_msm_property_install_integer(info, name, flags,
-			min, max, init, property_idx, false);
-}
-
-void msm_property_install_volatile_range(struct msm_property_info *info,
-		const char *name, int flags, uint64_t min, uint64_t max,
-		uint64_t init, uint32_t property_idx)
-{
-	_msm_property_install_integer(info, name, flags,
-			min, max, init, property_idx, true);
-}
-
-void msm_property_install_enum(struct msm_property_info *info,
-		const char *name, int flags, int is_bitmask,
-		const struct drm_prop_enum_list *values, int num_values,
-		uint32_t property_idx)
-{
-	struct drm_property **prop;
-
-	if (!info)
-		return;
-
-	++info->install_request;
-
-	if (!name || !values || !num_values ||
-			(property_idx >= info->property_count)) {
-		DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
-	} else {
-		prop = &info->property_array[property_idx];
-		/*
-		 * Properties need to be attached to each drm object that
-		 * uses them, but only need to be created once
-		 */
-		if (!*prop) {
-			/* 'bitmask' is a special type of 'enum' */
-			if (is_bitmask)
-				*prop = drm_property_create_bitmask(info->dev,
-						DRM_MODE_PROP_BITMASK | flags,
-						name, values, num_values, -1);
-			else
-				*prop = drm_property_create_enum(info->dev,
-						DRM_MODE_PROP_ENUM | flags,
-						name, values, num_values);
-			if (!*prop)
-				DRM_ERROR("create %s property failed\n", name);
-		}
-
-		/* save init value for later */
-		info->property_data[property_idx].default_value = 0;
-		info->property_data[property_idx].force_dirty = false;
-
-		/* select first defined value for enums */
-		if (!is_bitmask)
-			info->property_data[property_idx].default_value =
-				values->type;
-
-		/* always attach property, if created */
-		if (*prop) {
-			drm_object_attach_property(info->base, *prop,
-					info->property_data
-					[property_idx].default_value);
-			++info->install_count;
-		}
-	}
-}
-
-void msm_property_install_blob(struct msm_property_info *info,
-		const char *name, int flags, uint32_t property_idx)
-{
-	struct drm_property **prop;
-
-	if (!info)
-		return;
-
-	++info->install_request;
-
-	if (!name || (property_idx >= info->blob_count)) {
-		DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
-	} else {
-		prop = &info->property_array[property_idx];
-		/*
-		 * Properties need to be attached to each drm object that
-		 * uses them, but only need to be created once
-		 */
-		if (!*prop) {
-			/* use 'create' for blob property place holder */
-			*prop = drm_property_create(info->dev,
-					DRM_MODE_PROP_BLOB | flags, name, 0);
-			if (!*prop)
-				DRM_ERROR("create %s property failed\n", name);
-		}
-
-		/* save init value for later */
-		info->property_data[property_idx].default_value = 0;
-		info->property_data[property_idx].force_dirty = true;
-
-		/* always attach property, if created */
-		if (*prop) {
-			drm_object_attach_property(info->base, *prop, -1);
-			++info->install_count;
-		}
-	}
-}
-
-int msm_property_install_get_status(struct msm_property_info *info)
-{
-	int rc = -ENOMEM;
-
-	if (info && (info->install_request == info->install_count))
-		rc = 0;
-
-	return rc;
-}
-
-int msm_property_index(struct msm_property_info *info,
-		struct drm_property *property)
-{
-	uint32_t count;
-	int32_t idx;
-	int rc = -EINVAL;
-
-	if (!info || !property) {
-		DRM_ERROR("invalid argument(s)\n");
-	} else {
-		/*
-		 * Linear search, but start from last found index. This will
-		 * help if any single property is accessed multiple times in a
-		 * row. Ideally, we could keep a list of properties sorted in
-		 * the order of most recent access, but that may be overkill
-		 * for now.
-		 */
-		mutex_lock(&info->property_lock);
-		idx = info->recent_idx;
-		count = info->property_count;
-		while (count) {
-			--count;
-
-			/* stop searching on match */
-			if (info->property_array[idx] == property) {
-				info->recent_idx = idx;
-				rc = idx;
-				break;
-			}
-
-			/* move to next valid index */
-			if (--idx < 0)
-				idx = info->property_count - 1;
-		}
-		mutex_unlock(&info->property_lock);
-	}
-
-	return rc;
-}
-
-int msm_property_set_dirty(struct msm_property_info *info,
-		struct msm_property_state *property_state,
-		int property_idx)
-{
-	if (!info || !property_state || !property_state->values) {
-		DRM_ERROR("invalid argument(s)\n");
-		return -EINVAL;
-	}
-	mutex_lock(&info->property_lock);
-	_msm_property_set_dirty_no_lock(info, property_state, property_idx);
-	mutex_unlock(&info->property_lock);
-	return 0;
-}
-
-int msm_property_atomic_set(struct msm_property_info *info,
-		struct msm_property_state *property_state,
-		struct drm_property *property, uint64_t val)
-{
-	struct drm_property_blob *blob;
-	int property_idx, rc = -EINVAL;
-
-	if (!info || !property_state) {
-		DRM_ERROR("invalid argument(s)\n");
-		return -EINVAL;
-	}
-
-	property_idx = msm_property_index(info, property);
-	if ((property_idx == -EINVAL) || !property_state->values) {
-		DRM_ERROR("invalid argument(s)\n");
-	} else {
-		/* extra handling for incoming properties */
-		mutex_lock(&info->property_lock);
-		if ((property->flags & DRM_MODE_PROP_BLOB) &&
-			(property_idx < info->blob_count)) {
-
-			/* need to clear previous ref */
-			if (property_state->values[property_idx].blob)
-				drm_property_blob_put(
-					property_state->values[
-						property_idx].blob);
-
-			/* DRM lookup also takes a reference */
-			blob = drm_property_lookup_blob(info->dev,
-				(uint32_t)val);
-			if (val && !blob) {
-				DRM_ERROR("prop %d blob id 0x%llx not found\n",
-						property_idx, val);
-				val = 0;
-			} else {
-				if (blob) {
-					DBG("Blob %u saved", blob->base.id);
-					val = blob->base.id;
-				}
-
-				/* save the new blob */
-				property_state->values[property_idx].blob =
-					blob;
-			}
-		}
-
-		/* update value and flag as dirty */
-		if (property_state->values[property_idx].value != val ||
-				info->property_data[property_idx].force_dirty) {
-			property_state->values[property_idx].value = val;
-			_msm_property_set_dirty_no_lock(info, property_state,
-					property_idx);
-
-			DBG("%s - %lld", property->name, val);
-		}
-		mutex_unlock(&info->property_lock);
-		rc = 0;
-	}
-
-	return rc;
-}
-
-int msm_property_atomic_get(struct msm_property_info *info,
-		struct msm_property_state *property_state,
-		struct drm_property *property, uint64_t *val)
-{
-	int property_idx, rc = -EINVAL;
-
-	property_idx = msm_property_index(info, property);
-	if (!info || (property_idx == -EINVAL) ||
-			!property_state->values || !val) {
-		DRM_DEBUG("Invalid argument(s)\n");
-	} else {
-		mutex_lock(&info->property_lock);
-		*val = property_state->values[property_idx].value;
-		mutex_unlock(&info->property_lock);
-		rc = 0;
-	}
-
-	return rc;
-}
-
-void *msm_property_alloc_state(struct msm_property_info *info)
-{
-	void *state = NULL;
-
-	if (!info) {
-		DRM_ERROR("invalid property info\n");
-		return NULL;
-	}
-
-	mutex_lock(&info->property_lock);
-	if (info->state_cache_size)
-		state = info->state_cache[--(info->state_cache_size)];
-	mutex_unlock(&info->property_lock);
-
-	if (!state && info->state_size)
-		state = kmalloc(info->state_size, GFP_KERNEL);
-
-	if (!state)
-		DRM_ERROR("failed to allocate state\n");
-
-	return state;
-}
-
-/**
- * _msm_property_free_state - helper function for freeing local state objects
- * @info: Pointer to property info container struct
- * @st: Pointer to state object
- */
-static void _msm_property_free_state(struct msm_property_info *info, void *st)
-{
-	if (!info || !st)
-		return;
-
-	mutex_lock(&info->property_lock);
-	if (info->state_cache_size < MSM_PROP_STATE_CACHE_SIZE)
-		info->state_cache[(info->state_cache_size)++] = st;
-	else
-		kfree(st);
-	mutex_unlock(&info->property_lock);
-}
-
-void msm_property_reset_state(struct msm_property_info *info, void *state,
-		struct msm_property_state *property_state,
-		struct msm_property_value *property_values)
-{
-	uint32_t i;
-
-	if (!info) {
-		DRM_ERROR("invalid property info\n");
-		return;
-	}
-
-	if (state)
-		memset(state, 0, info->state_size);
-
-	if (property_state) {
-		property_state->property_count = info->property_count;
-		property_state->values = property_values;
-		INIT_LIST_HEAD(&property_state->dirty_list);
-	}
-
-	/*
-	 * Assign default property values. This helper is mostly used
-	 * to initialize newly created state objects.
-	 */
-	if (property_values)
-		for (i = 0; i < info->property_count; ++i) {
-			property_values[i].value =
-				info->property_data[i].default_value;
-			property_values[i].blob = NULL;
-			INIT_LIST_HEAD(&property_values[i].dirty_node);
-		}
-}
-
-void msm_property_duplicate_state(struct msm_property_info *info,
-		void *old_state, void *state,
-		struct msm_property_state *property_state,
-		struct msm_property_value *property_values)
-{
-	uint32_t i;
-
-	if (!info || !old_state || !state) {
-		DRM_ERROR("invalid argument(s)\n");
-		return;
-	}
-
-	memcpy(state, old_state, info->state_size);
-
-	if (!property_state)
-		return;
-
-	INIT_LIST_HEAD(&property_state->dirty_list);
-	property_state->values = property_values;
-
-	if (property_state->values)
-		/* add ref count for blobs and initialize dirty nodes */
-		for (i = 0; i < info->property_count; ++i) {
-			if (property_state->values[i].blob)
-				drm_property_blob_get(
-						property_state->values[i].blob);
-			INIT_LIST_HEAD(&property_state->values[i].dirty_node);
-		}
-}
-
-void msm_property_destroy_state(struct msm_property_info *info, void *state,
-		struct msm_property_state *property_state)
-{
-	uint32_t i;
-
-	if (!info || !state) {
-		DRM_ERROR("invalid argument(s)\n");
-		return;
-	}
-	if (property_state && property_state->values) {
-		/* remove ref count for blobs */
-		for (i = 0; i < info->property_count; ++i)
-			if (property_state->values[i].blob) {
-				drm_property_blob_put(
-						property_state->values[i].blob);
-				property_state->values[i].blob = NULL;
-			}
-	}
-
-	_msm_property_free_state(info, state);
-}
-
-void *msm_property_get_blob(struct msm_property_info *info,
-		struct msm_property_state *property_state,
-		size_t *byte_len,
-		uint32_t property_idx)
-{
-	struct drm_property_blob *blob;
-	size_t len = 0;
-	void *rc = 0;
-
-	if (!info || !property_state || !property_state->values ||
-			(property_idx >= info->blob_count)) {
-		DRM_ERROR("invalid argument(s)\n");
-	} else {
-		blob = property_state->values[property_idx].blob;
-		if (blob) {
-			len = blob->length;
-			rc = blob->data;
-		}
-	}
-
-	if (byte_len)
-		*byte_len = len;
-
-	return rc;
-}
-
-int msm_property_set_blob(struct msm_property_info *info,
-		struct drm_property_blob **blob_reference,
-		void *blob_data,
-		size_t byte_len,
-		uint32_t property_idx)
-{
-	struct drm_property_blob *blob = NULL;
-	int rc = -EINVAL;
-
-	if (!info || !blob_reference || (property_idx >= info->blob_count)) {
-		DRM_ERROR("invalid argument(s)\n");
-	} else {
-		/* create blob */
-		if (blob_data && byte_len) {
-			blob = drm_property_create_blob(info->dev,
-					byte_len,
-					blob_data);
-			if (IS_ERR_OR_NULL(blob)) {
-				rc = PTR_ERR(blob);
-				DRM_ERROR("failed to create blob, %d\n", rc);
-				goto exit;
-			}
-		}
-
-		/* update drm object */
-		rc = drm_object_property_set_value(info->base,
-				info->property_array[property_idx],
-				blob ? blob->base.id : 0);
-		if (rc) {
-			DRM_ERROR("failed to set blob to property\n");
-			if (blob)
-				drm_property_blob_put(blob);
-			goto exit;
-		}
-
-		/* update local reference */
-		if (*blob_reference)
-			drm_property_blob_put(*blob_reference);
-		*blob_reference = blob;
-	}
-
-exit:
-	return rc;
-}
-
-int msm_property_set_property(struct msm_property_info *info,
-		struct msm_property_state *property_state,
-		uint32_t property_idx,
-		uint64_t val)
-{
-	int rc = -EINVAL;
-
-	if (!info || (property_idx >= info->property_count) ||
-			property_idx < info->blob_count ||
-			!property_state || !property_state->values) {
-		DRM_ERROR("invalid argument(s)\n");
-	} else {
-		struct drm_property *drm_prop;
-
-		mutex_lock(&info->property_lock);
-
-		/* update cached value */
-		property_state->values[property_idx].value = val;
-
-		/* update the new default value for immutables */
-		drm_prop = info->property_array[property_idx];
-		if (drm_prop->flags & DRM_MODE_PROP_IMMUTABLE)
-			info->property_data[property_idx].default_value = val;
-
-		mutex_unlock(&info->property_lock);
-
-		/* update drm object */
-		rc = drm_object_property_set_value(info->base, drm_prop, val);
-		if (rc)
-			DRM_ERROR("failed set property value, idx %d rc %d\n",
-					property_idx, rc);
-
-	}
-
-	return rc;
-}
-
diff --git a/drivers/gpu/drm/msm/msm_prop.h b/drivers/gpu/drm/msm/msm_prop.h
deleted file mode 100644
index 6403b90..0000000
--- a/drivers/gpu/drm/msm/msm_prop.h
+++ /dev/null
@@ -1,429 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _MSM_PROP_H_
-#define _MSM_PROP_H_
-
-#include <linux/list.h>
-#include "msm_drv.h"
-
-#define MSM_PROP_STATE_CACHE_SIZE	2
-
-/**
- * struct msm_property_data - opaque structure for tracking per
- *                            drm-object per property stuff
- * @default_value: Default property value for this drm object
- * @force_dirty: Always dirty property on incoming sets, rather than checking
- *               for modified values
- */
-struct msm_property_data {
-	uint64_t default_value;
-	bool force_dirty;
-};
-
-/**
- * struct msm_property_value - opaque structure for tracking per
- *                             drm-object per property stuff
- * @value: Current property value for this drm object
- * @blob: Pointer to associated blob data, if available
- * @dirty_node: Linked list node to track if property is dirty or not
- */
-struct msm_property_value {
-	uint64_t value;
-	struct drm_property_blob *blob;
-	struct list_head dirty_node;
-};
-
-/**
- * struct msm_property_info: Structure for property/state helper functions
- * @base: Pointer to base drm object (plane/crtc/etc.)
- * @dev: Pointer to drm device object
- * @property_array: Pointer to array for storing created property objects
- * @property_data: Pointer to array for storing private property data
- * @property_count: Total number of properties
- * @blob_count: Total number of blob properties, should be <= count
- * @install_request: Total number of property 'install' requests
- * @install_count: Total number of successful 'install' requests
- * @recent_idx: Index of property most recently accessed by set/get
- * @is_active: Whether or not drm component properties are 'active'
- * @state_cache: Cache of local states, to prevent alloc/free thrashing
- * @state_size: Size of local state structures
- * @state_cache_size: Number of state structures currently stored in state_cache
- * @property_lock: Mutex to protect local variables
- */
-struct msm_property_info {
-	struct drm_mode_object *base;
-	struct drm_device *dev;
-
-	struct drm_property **property_array;
-	struct msm_property_data *property_data;
-	uint32_t property_count;
-	uint32_t blob_count;
-	uint32_t install_request;
-	uint32_t install_count;
-
-	int32_t recent_idx;
-
-	bool is_active;
-
-	void *state_cache[MSM_PROP_STATE_CACHE_SIZE];
-	uint32_t state_size;
-	int32_t state_cache_size;
-	struct mutex property_lock;
-};
-
-/**
- * struct msm_property_state - Structure for local property state information
- * @property_count: Total number of properties
- * @values: Pointer to array of msm_property_value objects
- * @dirty_list: List of all properties that have been 'atomic_set' but not
- *              yet cleared with 'msm_property_pop_dirty'
- */
-struct msm_property_state {
-	uint32_t property_count;
-	struct msm_property_value *values;
-	struct list_head dirty_list;
-};
-
-/**
- * msm_property_index_to_drm_property - get drm property struct from prop index
- * @info: Pointer to property info container struct
- * @property_idx: Property index
- * Returns: drm_property pointer associated with property index
- */
-static inline
-struct drm_property *msm_property_index_to_drm_property(
-		struct msm_property_info *info, uint32_t property_idx)
-{
-	if (!info || property_idx >= info->property_count)
-		return NULL;
-
-	return info->property_array[property_idx];
-}
-
-/**
- * msm_property_get_default - query default value of a property
- * @info: Pointer to property info container struct
- * @property_idx: Property index
- * Returns: Default value for specified property
- */
-static inline
-uint64_t msm_property_get_default(struct msm_property_info *info,
-		uint32_t property_idx)
-{
-	uint64_t rc = 0;
-
-	if (!info)
-		return 0;
-
-	mutex_lock(&info->property_lock);
-	if (property_idx < info->property_count)
-		rc = info->property_data[property_idx].default_value;
-	mutex_unlock(&info->property_lock);
-
-	return rc;
-}
-
-/**
- * msm_property_set_is_active - set overall 'active' status for all properties
- * @info: Pointer to property info container struct
- * @is_active: New 'is active' status
- */
-static inline
-void msm_property_set_is_active(struct msm_property_info *info, bool is_active)
-{
-	if (info) {
-		mutex_lock(&info->property_lock);
-		info->is_active = is_active;
-		mutex_unlock(&info->property_lock);
-	}
-}
-
-/**
- * msm_property_get_is_active - query property 'is active' status
- * @info: Pointer to property info container struct
- * Returns: Current 'is active's status
- */
-static inline
-bool msm_property_get_is_active(struct msm_property_info *info)
-{
-	bool rc = false;
-
-	if (info) {
-		mutex_lock(&info->property_lock);
-		rc = info->is_active;
-		mutex_unlock(&info->property_lock);
-	}
-
-	return rc;
-}
-
-/**
- * msm_property_pop_dirty - determine next dirty property and clear
- *                          its dirty flag
- * @info: Pointer to property info container struct
- * @property_state: Pointer to property state container struct
- * Returns: Valid msm property index on success,
- *          -EAGAIN if no dirty properties are available
- *          Property indicies returned from this function are similar
- *          to those returned by the msm_property_index function.
- */
-int msm_property_pop_dirty(struct msm_property_info *info,
-		struct msm_property_state *property_state);
-
-/**
- * msm_property_init - initialize property info structure
- * @info: Pointer to property info container struct
- * @base: Pointer to base drm object (plane/crtc/etc.)
- * @dev: Pointer to drm device object
- * @property_array: Pointer to array for storing created property objects
- * @property_data: Pointer to array for storing private property data
- * @property_count: Total number of properties
- * @blob_count: Total number of blob properties, should be <= count
- * @state_size: Size of local state object
- */
-void msm_property_init(struct msm_property_info *info,
-		struct drm_mode_object *base,
-		struct drm_device *dev,
-		struct drm_property **property_array,
-		struct msm_property_data *property_data,
-		uint32_t property_count,
-		uint32_t blob_count,
-		uint32_t state_size);
-
-/**
- * msm_property_destroy - destroy helper info structure
- *
- * @info: Pointer to property info container struct
- */
-void msm_property_destroy(struct msm_property_info *info);
-
-/**
- * msm_property_install_range - install standard drm range property
- * @info: Pointer to property info container struct
- * @name: Property name
- * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
- * @min: Min property value
- * @max: Max property value
- * @init: Default Property value
- * @property_idx: Property index
- */
-void msm_property_install_range(struct msm_property_info *info,
-		const char *name,
-		int flags,
-		uint64_t min,
-		uint64_t max,
-		uint64_t init,
-		uint32_t property_idx);
-
-/**
- * msm_property_install_volatile_range - install drm range property
- *	This function is similar to msm_property_install_range, but assumes
- *	that the property is meant for holding user pointers or descriptors
- *	that may reference volatile data without having an updated value.
- * @info: Pointer to property info container struct
- * @name: Property name
- * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
- * @min: Min property value
- * @max: Max property value
- * @init: Default Property value
- * @property_idx: Property index
- */
-void msm_property_install_volatile_range(struct msm_property_info *info,
-		const char *name,
-		int flags,
-		uint64_t min,
-		uint64_t max,
-		uint64_t init,
-		uint32_t property_idx);
-
-/**
- * msm_property_install_enum - install standard drm enum/bitmask property
- * @info: Pointer to property info container struct
- * @name: Property name
- * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
- * @is_bitmask: Set to non-zero to create a bitmask property, rather than an
- *              enumeration one
- * @values: Array of allowable enumeration/bitmask values
- * @num_values: Size of values array
- * @property_idx: Property index
- */
-void msm_property_install_enum(struct msm_property_info *info,
-		const char *name,
-		int flags,
-		int is_bitmask,
-		const struct drm_prop_enum_list *values,
-		int num_values,
-		uint32_t property_idx);
-
-/**
- * msm_property_install_blob - install standard drm blob property
- * @info: Pointer to property info container struct
- * @name: Property name
- * @flags: Extra flags for property creation
- * @property_idx: Property index
- */
-void msm_property_install_blob(struct msm_property_info *info,
-		const char *name,
-		int flags,
-		uint32_t property_idx);
-
-/**
- * msm_property_install_get_status - query overal status of property additions
- * @info: Pointer to property info container struct
- * Returns: Zero if previous property install calls were all successful
- */
-int msm_property_install_get_status(struct msm_property_info *info);
-
-/**
- * msm_property_index - determine property index from drm_property ptr
- * @info: Pointer to property info container struct
- * @property: Incoming property pointer
- * Returns: Valid property index, or -EINVAL on error
- */
-int msm_property_index(struct msm_property_info *info,
-		struct drm_property *property);
-
-/**
- * msm_property_set_dirty - forcibly flag a property as dirty
- * @info: Pointer to property info container struct
- * @property_state: Pointer to property state container struct
- * @property_idx: Property index
- * Returns: Zero on success
- */
-int msm_property_set_dirty(struct msm_property_info *info,
-		struct msm_property_state *property_state,
-		int property_idx);
-
-/**
- * msm_property_is_dirty - check whether a property is dirty
- *	Note: Intended for use during atomic_check before pop_dirty usage
- * @info: Pointer to property info container struct
- * @property_state: Pointer to property state container struct
- * @property_idx: Property index
- * Returns: true if dirty, false otherwise
- */
-bool msm_property_is_dirty(
-		struct msm_property_info *info,
-		struct msm_property_state *property_state,
-		uint32_t property_idx);
-
-/**
- * msm_property_atomic_set - helper function for atomic property set callback
- * @info: Pointer to property info container struct
- * @property_state: Pointer to local state structure
- * @property: Incoming property pointer
- * @val: Incoming property value
- * Returns: Zero on success
- */
-int msm_property_atomic_set(struct msm_property_info *info,
-		struct msm_property_state *property_state,
-		struct drm_property *property,
-		uint64_t val);
-
-/**
- * msm_property_atomic_get - helper function for atomic property get callback
- * @info: Pointer to property info container struct
- * @property_state: Pointer to local state structure
- * @property: Incoming property pointer
- * @val: Pointer to variable for receiving property value
- * Returns: Zero on success
- */
-int msm_property_atomic_get(struct msm_property_info *info,
-		struct msm_property_state *property_state,
-		struct drm_property *property,
-		uint64_t *val);
-
-/**
- * msm_property_alloc_state - helper function for allocating local state objects
- * @info: Pointer to property info container struct
- */
-void *msm_property_alloc_state(struct msm_property_info *info);
-
-/**
- * msm_property_reset_state - helper function for state reset callback
- * @info: Pointer to property info container struct
- * @state: Pointer to local state structure
- * @property_state: Pointer to property state container struct
- * @property_values: Pointer to property values cache array
- */
-void msm_property_reset_state(struct msm_property_info *info, void *state,
-		struct msm_property_state *property_state,
-		struct msm_property_value *property_values);
-
-/**
- * msm_property_duplicate_state - helper function for duplicate state cb
- * @info: Pointer to property info container struct
- * @old_state: Pointer to original state structure
- * @state: Pointer to newly created state structure
- * @property_state: Pointer to destination property state container struct
- * @property_values: Pointer to property values cache array
- */
-void msm_property_duplicate_state(struct msm_property_info *info,
-		void *old_state,
-		void *state,
-		struct msm_property_state *property_state,
-		struct msm_property_value *property_values);
-
-/**
- * msm_property_destroy_state - helper function for destroy state cb
- * @info: Pointer to property info container struct
- * @state: Pointer to local state structure
- * @property_state: Pointer to property state container struct
- */
-void msm_property_destroy_state(struct msm_property_info *info,
-		void *state,
-		struct msm_property_state *property_state);
-
-/**
- * msm_property_get_blob - obtain cached data pointer for drm blob property
- * @info: Pointer to property info container struct
- * @property_state: Pointer to property state container struct
- * @byte_len: Optional pointer to variable for accepting blob size
- * @property_idx: Property index
- * Returns: Pointer to blob data
- */
-void *msm_property_get_blob(struct msm_property_info *info,
-		struct msm_property_state *property_state,
-		size_t *byte_len,
-		uint32_t property_idx);
-
-/**
- * msm_property_set_blob - update blob property on a drm object
- * This function updates the blob property value of the given drm object. Its
- * intended use is to update blob properties that have been created with the
- * DRM_MODE_PROP_IMMUTABLE flag set.
- * @info: Pointer to property info container struct
- * @blob_reference: Reference to a pointer that holds the created data blob
- * @blob_data: Pointer to blob data
- * @byte_len: Length of blob data, in bytes
- * @property_idx: Property index
- * Returns: Zero on success
- */
-int msm_property_set_blob(struct msm_property_info *info,
-		struct drm_property_blob **blob_reference,
-		void *blob_data,
-		size_t byte_len,
-		uint32_t property_idx);
-
-/**
- * msm_property_set_property - update property on a drm object
- * This function updates the property value of the given drm object. Its
- * intended use is to update properties that have been created with the
- * DRM_MODE_PROP_IMMUTABLE flag set.
- * Note: This function cannot be called on a blob.
- * @info: Pointer to property info container struct
- * @property_state: Pointer to property state container struct
- * @property_idx: Property index
- * @val: value of the property to set
- * Returns: Zero on success
- */
-int msm_property_set_property(struct msm_property_info *info,
-		struct msm_property_state *property_state,
-		uint32_t property_idx,
-		uint64_t val);
-
-#endif /* _MSM_PROP_H_ */
-
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
deleted file mode 100644
index 8800fb0..0000000
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/pm_runtime.h>
-#include <linux/msm_dma_iommu_mapping.h>
-
-#include <asm/dma-iommu.h>
-#include <soc/qcom/secure_buffer.h>
-
-#include "msm_drv.h"
-#include "msm_gem.h"
-#include "msm_mmu.h"
-#include "sde_dbg.h"
-
-struct msm_smmu_client {
-	struct device *dev;
-	struct iommu_domain *domain;
-	bool domain_attached;
-	bool secure;
-};
-
-struct msm_smmu {
-	struct msm_mmu base;
-	struct device *client_dev;
-	struct msm_smmu_client *client;
-};
-
-struct msm_smmu_domain {
-	const char *label;
-	bool secure;
-};
-
-#define to_msm_smmu(x) container_of(x, struct msm_smmu, base)
-#define msm_smmu_to_client(smmu) (smmu->client)
-
-static int msm_smmu_attach(struct msm_mmu *mmu, const char * const *names,
-		int cnt)
-{
-	struct msm_smmu *smmu = to_msm_smmu(mmu);
-	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-	int rc = 0;
-
-	if (!client) {
-		pr_err("undefined smmu client\n");
-		return -EINVAL;
-	}
-
-	/* domain attach only once */
-	if (client->domain_attached)
-		return 0;
-
-	rc = iommu_attach_device(client->domain, client->dev);
-	if (rc) {
-		dev_err(client->dev, "iommu attach dev failed (%d)\n", rc);
-		return rc;
-	}
-
-	client->domain_attached = true;
-
-	dev_dbg(client->dev, "iommu domain attached\n");
-
-	return 0;
-}
-
-static void msm_smmu_detach(struct msm_mmu *mmu, const char * const *names,
-		int cnt)
-{
-	struct msm_smmu *smmu = to_msm_smmu(mmu);
-	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-
-	if (!client) {
-		pr_err("undefined smmu client\n");
-		return;
-	}
-
-	if (!client->domain_attached)
-		return;
-
-	pm_runtime_get_sync(mmu->dev);
-	iommu_detach_device(client->domain, client->dev);
-	pm_runtime_put_sync(mmu->dev);
-
-	client->domain_attached = false;
-	dev_dbg(client->dev, "iommu domain detached\n");
-}
-
-static int msm_smmu_set_attribute(struct msm_mmu *mmu,
-		enum iommu_attr attr, void *data)
-{
-	struct msm_smmu *smmu = to_msm_smmu(mmu);
-	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-	int ret = 0;
-
-	if (!client || !client->domain)
-		return -ENODEV;
-
-	ret = iommu_domain_set_attr(client->domain, attr, data);
-	if (ret)
-		DRM_ERROR("set domain attribute failed:%d\n", ret);
-
-	return ret;
-}
-
-static int msm_smmu_one_to_one_unmap(struct msm_mmu *mmu,
-				uint32_t dest_address, uint32_t size)
-{
-	struct msm_smmu *smmu = to_msm_smmu(mmu);
-	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-	int ret = 0;
-
-	if (!client || !client->domain)
-		return -ENODEV;
-
-	ret = iommu_unmap(client->domain, dest_address, size);
-	if (ret != size)
-		pr_err("smmu unmap failed\n");
-
-	return 0;
-}
-
-static int msm_smmu_one_to_one_map(struct msm_mmu *mmu, uint32_t iova,
-		uint32_t dest_address, uint32_t size, int prot)
-{
-	struct msm_smmu *smmu = to_msm_smmu(mmu);
-	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-	int ret = 0;
-
-	if (!client || !client->domain)
-		return -ENODEV;
-
-	ret = iommu_map(client->domain, dest_address, dest_address,
-			size, prot);
-	if (ret)
-		pr_err("smmu map failed\n");
-
-	return ret;
-}
-
-static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
-		struct sg_table *sgt, unsigned int len, int prot)
-{
-	struct msm_smmu *smmu = to_msm_smmu(mmu);
-	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-	size_t ret = 0;
-
-	if (sgt && sgt->sgl) {
-		ret = iommu_map_sg(client->domain, iova, sgt->sgl,
-				sgt->nents, prot);
-		WARN_ON((int)ret < 0);
-		DRM_DEBUG("%pad/0x%x/0x%x/\n", &sgt->sgl->dma_address,
-				sgt->sgl->dma_length, prot);
-		SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length, prot);
-	}
-	return (ret == len) ? 0 : -EINVAL;
-}
-
-static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
-		struct sg_table *sgt, unsigned int len)
-{
-	struct msm_smmu *smmu = to_msm_smmu(mmu);
-	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-
-	pm_runtime_get_sync(mmu->dev);
-	iommu_unmap(client->domain, iova, len);
-	pm_runtime_put_sync(mmu->dev);
-
-	return 0;
-}
-
-static void msm_smmu_destroy(struct msm_mmu *mmu)
-{
-	struct msm_smmu *smmu = to_msm_smmu(mmu);
-	struct platform_device *pdev = to_platform_device(smmu->client_dev);
-
-	if (smmu->client_dev)
-		platform_device_unregister(pdev);
-	kfree(smmu);
-}
-
-struct device *msm_smmu_get_dev(struct msm_mmu *mmu)
-{
-	struct msm_smmu *smmu = to_msm_smmu(mmu);
-
-	return smmu->client_dev;
-}
-
-static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
-		int dir, u32 flags)
-{
-	struct msm_smmu *smmu = to_msm_smmu(mmu);
-	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-	unsigned long attrs = 0x0;
-	int ret;
-
-	if (!sgt || !client) {
-		DRM_ERROR("sg table is invalid\n");
-		return -ENOMEM;
-	}
-
-	/*
-	 * For import buffer type, dma_map_sg_attrs is called during
-	 * dma_buf_map_attachment and is not required to call again
-	 */
-	if (!(flags & MSM_BO_EXTBUF)) {
-		ret = dma_map_sg_attrs(client->dev, sgt->sgl, sgt->nents, dir,
-				attrs);
-		if (!ret) {
-			DRM_ERROR("dma map sg failed\n");
-			return -ENOMEM;
-		}
-	}
-
-	if (sgt && sgt->sgl) {
-		DRM_DEBUG("%pad/0x%x/0x%x/0x%lx\n",
-				&sgt->sgl->dma_address, sgt->sgl->dma_length,
-				dir, attrs);
-		SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length,
-				dir, attrs, client->secure);
-	}
-
-	return 0;
-}
-
-
-static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
-		int dir, u32 flags)
-{
-	struct msm_smmu *smmu = to_msm_smmu(mmu);
-	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-
-	if (!sgt || !client) {
-		DRM_ERROR("sg table is invalid\n");
-		return;
-	}
-
-	if (sgt->sgl) {
-		DRM_DEBUG("%pad/0x%x/0x%x\n",
-				&sgt->sgl->dma_address, sgt->sgl->dma_length,
-				dir);
-		SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length,
-				dir, client->secure);
-	}
-
-	if (!(flags & MSM_BO_EXTBUF))
-		dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
-}
-
-static bool msm_smmu_is_domain_secure(struct msm_mmu *mmu)
-{
-	struct msm_smmu *smmu = to_msm_smmu(mmu);
-	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-
-	return client->secure;
-}
-
-static const struct msm_mmu_funcs funcs = {
-	.attach = msm_smmu_attach,
-	.detach = msm_smmu_detach,
-	.map = msm_smmu_map,
-	.unmap = msm_smmu_unmap,
-	.map_dma_buf = msm_smmu_map_dma_buf,
-	.unmap_dma_buf = msm_smmu_unmap_dma_buf,
-	.destroy = msm_smmu_destroy,
-	.is_domain_secure = msm_smmu_is_domain_secure,
-	.set_attribute = msm_smmu_set_attribute,
-	.one_to_one_map = msm_smmu_one_to_one_map,
-	.one_to_one_unmap = msm_smmu_one_to_one_unmap,
-	.get_dev = msm_smmu_get_dev,
-};
-
-static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
-	[MSM_SMMU_DOMAIN_UNSECURE] = {
-		.label = "mdp_ns",
-		.secure = false,
-	},
-	[MSM_SMMU_DOMAIN_SECURE] = {
-		.label = "mdp_s",
-		.secure = true,
-	},
-	[MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
-		.label = "rot_ns",
-		.secure = false,
-	},
-	[MSM_SMMU_DOMAIN_NRT_SECURE] = {
-		.label = "rot_s",
-		.secure = true,
-	},
-};
-
-static const struct of_device_id msm_smmu_dt_match[] = {
-	{ .compatible = "qcom,smmu_sde_unsec",
-		.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] },
-	{ .compatible = "qcom,smmu_sde_sec",
-		.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_SECURE] },
-	{ .compatible = "qcom,smmu_sde_nrt_unsec",
-		.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_UNSECURE] },
-	{ .compatible = "qcom,smmu_sde_nrt_sec",
-		.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_SECURE] },
-	{}
-};
-MODULE_DEVICE_TABLE(of, msm_smmu_dt_match);
-
-static struct device *msm_smmu_device_create(struct device *dev,
-		enum msm_mmu_domain_type domain,
-		struct msm_smmu *smmu)
-{
-	struct device_node *child;
-	struct platform_device *pdev;
-	int i;
-	const char *compat = NULL;
-
-	for (i = 0; i < ARRAY_SIZE(msm_smmu_dt_match); i++) {
-		if (msm_smmu_dt_match[i].data == &msm_smmu_domains[domain]) {
-			compat = msm_smmu_dt_match[i].compatible;
-			break;
-		}
-	}
-
-	if (!compat) {
-		DRM_DEBUG("unable to find matching domain for %d\n", domain);
-		return ERR_PTR(-ENOENT);
-	}
-	DRM_DEBUG("found domain %d compat: %s\n", domain, compat);
-
-	child = of_find_compatible_node(dev->of_node, NULL, compat);
-	if (!child) {
-		DRM_DEBUG("unable to find compatible node for %s\n", compat);
-		return ERR_PTR(-ENODEV);
-	}
-
-	pdev = of_platform_device_create(child, NULL, dev);
-	if (!pdev) {
-		DRM_ERROR("unable to create smmu platform dev for domain %d\n",
-				domain);
-		return ERR_PTR(-ENODEV);
-	}
-
-	smmu->client = platform_get_drvdata(pdev);
-
-	return &pdev->dev;
-}
-
-struct msm_mmu *msm_smmu_new(struct device *dev,
-		enum msm_mmu_domain_type domain)
-{
-	struct msm_smmu *smmu;
-	struct device *client_dev;
-
-	smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
-	if (!smmu)
-		return ERR_PTR(-ENOMEM);
-
-	client_dev = msm_smmu_device_create(dev, domain, smmu);
-	if (IS_ERR(client_dev)) {
-		kfree(smmu);
-		return (void *)client_dev ? : ERR_PTR(-ENODEV);
-	}
-
-	smmu->client_dev = client_dev;
-	msm_mmu_init(&smmu->base, dev, &funcs);
-
-	return &smmu->base;
-}
-
-static int msm_smmu_fault_handler(struct iommu_domain *domain,
-		struct device *dev, unsigned long iova,
-		int flags, void *token)
-{
-	struct msm_smmu_client *client;
-	int rc = -EINVAL;
-
-	if (!token) {
-		DRM_ERROR("Error: token is NULL\n");
-		return -EINVAL;
-	}
-
-	client = (struct msm_smmu_client *)token;
-
-	/* see iommu.h for fault flags definition */
-	SDE_EVT32(iova, flags);
-	DRM_ERROR("trigger dump, iova=0x%08lx, flags=0x%x\n", iova, flags);
-	DRM_ERROR("SMMU device:%s", client->dev ? client->dev->kobj.name : "");
-
-	/*
-	 * return -ENOSYS to allow smmu driver to dump out useful
-	 * debug info.
-	 */
-	return rc;
-}
-
-/**
- * msm_smmu_probe()
- * @pdev: platform device
- *
- * Each smmu context acts as a separate device and the context banks are
- * configured with a VA range.
- * Registers the clks as each context bank has its own clks, for which voting
- * has to be done everytime before using that context bank.
- */
-static int msm_smmu_probe(struct platform_device *pdev)
-{
-	const struct of_device_id *match;
-	struct msm_smmu_client *client;
-	const struct msm_smmu_domain *domain;
-
-	match = of_match_device(msm_smmu_dt_match, &pdev->dev);
-	if (!match || !match->data) {
-		dev_err(&pdev->dev, "probe failed as match data is invalid\n");
-		return -EINVAL;
-	}
-
-	domain = match->data;
-	if (!domain) {
-		dev_err(&pdev->dev, "no matching device found\n");
-		return -EINVAL;
-	}
-
-	DRM_INFO("probing device %s\n", match->compatible);
-
-	client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
-	if (!client)
-		return -ENOMEM;
-
-	client->dev = &pdev->dev;
-	client->domain = iommu_get_domain_for_dev(client->dev);
-	if (!client->domain) {
-		dev_err(&pdev->dev, "iommu get domain for dev failed\n");
-		return -EINVAL;
-	}
-
-	if (!client->dev->dma_parms)
-		client->dev->dma_parms = devm_kzalloc(client->dev,
-				sizeof(*client->dev->dma_parms), GFP_KERNEL);
-	dma_set_max_seg_size(client->dev, DMA_BIT_MASK(32));
-	dma_set_seg_boundary(client->dev, DMA_BIT_MASK(64));
-
-	iommu_set_fault_handler(client->domain,
-			msm_smmu_fault_handler, (void *)client);
-
-	DRM_INFO("Created domain %s, secure=%d\n",
-			domain->label, domain->secure);
-
-	platform_set_drvdata(pdev, client);
-
-	return 0;
-}
-
-static int msm_smmu_remove(struct platform_device *pdev)
-{
-	struct msm_smmu_client *client;
-
-	client = platform_get_drvdata(pdev);
-	client->domain_attached = false;
-
-	return 0;
-}
-
-static struct platform_driver msm_smmu_driver = {
-	.probe = msm_smmu_probe,
-	.remove = msm_smmu_remove,
-	.driver = {
-		.name = "msmdrm_smmu",
-		.of_match_table = msm_smmu_dt_match,
-		.suppress_bind_attrs = true,
-	},
-};
-
-int __init msm_smmu_driver_init(void)
-{
-	int ret;
-
-	ret = platform_driver_register(&msm_smmu_driver);
-	if (ret)
-		pr_err("mdss_smmu_register_driver() failed!\n");
-
-	return ret;
-}
-
-void __exit msm_smmu_driver_cleanup(void)
-{
-	platform_driver_unregister(&msm_smmu_driver);
-}
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MSM SMMU driver");
diff --git a/drivers/gpu/drm/msm/sde/sde_ad4.h b/drivers/gpu/drm/msm/sde/sde_ad4.h
deleted file mode 100644
index 85fae2c..0000000
--- a/drivers/gpu/drm/msm/sde/sde_ad4.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-#ifndef _SDE_AD4_H_
-#define _SDE_AD4_H_
-
-#include <drm/drm_mode.h>
-#include <drm/drm_property.h>
-#include "sde_hw_dspp.h"
-
-/**
- * enum ad4_modes - ad4 modes supported by driver
- */
-enum ad4_modes {
-	AD4_OFF,
-	AD4_AUTO_STRENGTH,
-	AD4_CALIBRATION,
-	AD4_MANUAL,
-};
-
-/**
- * struct drm_prop_enum_list - drm structure for creating enum property and
- *                             enumerating values
- */
-static const struct drm_prop_enum_list ad4_modes[] = {
-	{AD4_OFF, "off"},
-	{AD4_AUTO_STRENGTH, "auto_strength_mode"},
-	{AD4_CALIBRATION, "calibration_mode"},
-	{AD4_MANUAL, "manual_mode"},
-};
-
-/**
- * enum ad_property - properties that can be set for ad
- */
-enum ad_property {
-	AD_MODE,
-	AD_INIT,
-	AD_CFG,
-	AD_INPUT,
-	AD_SUSPEND,
-	AD_ASSERTIVE,
-	AD_BACKLIGHT,
-	AD_STRENGTH,
-	AD_ROI,
-	AD_IPC_SUSPEND,
-	AD_IPC_RESUME,
-	AD_IPC_RESET,
-	AD_PROPMAX,
-};
-
-/**
- * enum ad_intr_resp_property - ad4 interrupt response enum
- */
-enum ad_intr_resp_property {
-	AD4_IN_OUT_BACKLIGHT,
-	AD4_RESPMAX,
-};
-
-/**
- * struct sde_ad_hw_cfg - structure for setting the ad properties
- * @prop: enum of ad property
- * @hw_cfg: payload for the prop being set.
- */
-struct sde_ad_hw_cfg {
-	enum ad_property prop;
-	struct sde_hw_cp_cfg *hw_cfg;
-};
-
-/**
- * sde_validate_dspp_ad4() - api to validate if ad property is allowed for
- *                           the display with allocated dspp/mixers.
- * @dspp: pointer to dspp info structure.
- * @prop: pointer to u32 pointing to ad property
- */
-int sde_validate_dspp_ad4(struct sde_hw_dspp *dspp, u32 *prop);
-
-/**
- * sde_setup_dspp_ad4 - api to apply the ad property, sde_validate_dspp_ad4
- *                      should be called before call this function
- * @dspp: pointer to dspp info structure.
- * @cfg: pointer to struct sde_ad_hw_cfg
- */
-void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *cfg);
-
-/**
- * sde_read_intr_resp_ad4 - api to get ad4 interrupt status for event
- * @dspp: pointer to dspp object
- * @event: event for which response is needed
- * @resp_in: read ad4 input value of event requested
- * @resp_out: read ad4 output value of event requested
- */
-void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event,
-			u32 *resp_in, u32 *resp_out);
-
-#endif /* _SDE_AD4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
deleted file mode 100644
index b380481..0000000
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ /dev/null
@@ -1,3276 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/dma-buf.h>
-#include <drm/msm_drm_pp.h>
-#include "sde_color_processing.h"
-#include "sde_kms.h"
-#include "sde_crtc.h"
-#include "sde_hw_dspp.h"
-#include "sde_hw_lm.h"
-#include "sde_ad4.h"
-#include "sde_hw_interrupts.h"
-#include "sde_core_irq.h"
-#include "dsi_panel.h"
-#include "sde_hw_color_proc_common_v4.h"
-
-struct sde_cp_node {
-	u32 property_id;
-	u32 prop_flags;
-	u32 feature;
-	void *blob_ptr;
-	uint64_t prop_val;
-	const struct sde_pp_blk *pp_blk;
-	struct list_head feature_list;
-	struct list_head active_list;
-	struct list_head dirty_list;
-	bool is_dspp_feature;
-	u32 prop_blob_sz;
-	struct sde_irq_callback *irq;
-};
-
-struct sde_cp_prop_attach {
-	struct drm_crtc *crtc;
-	struct drm_property *prop;
-	struct sde_cp_node *prop_node;
-	u32 feature;
-	uint64_t val;
-};
-
-#define ALIGNED_OFFSET (U32_MAX & ~(LTM_GUARD_BYTES))
-
-static void dspp_pcc_install_property(struct drm_crtc *crtc);
-
-static void dspp_hsic_install_property(struct drm_crtc *crtc);
-
-static void dspp_memcolor_install_property(struct drm_crtc *crtc);
-
-static void dspp_sixzone_install_property(struct drm_crtc *crtc);
-
-static void dspp_ad_install_property(struct drm_crtc *crtc);
-
-static void dspp_ltm_install_property(struct drm_crtc *crtc);
-
-static void dspp_vlut_install_property(struct drm_crtc *crtc);
-
-static void dspp_gamut_install_property(struct drm_crtc *crtc);
-
-static void dspp_gc_install_property(struct drm_crtc *crtc);
-
-static void dspp_igc_install_property(struct drm_crtc *crtc);
-
-static void dspp_hist_install_property(struct drm_crtc *crtc);
-
-static void dspp_dither_install_property(struct drm_crtc *crtc);
-
-typedef void (*dspp_prop_install_func_t)(struct drm_crtc *crtc);
-
-static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
-
-static void sde_cp_update_list(struct sde_cp_node *prop_node,
-		struct sde_crtc *crtc, bool dirty_list);
-
-static int sde_cp_ad_validate_prop(struct sde_cp_node *prop_node,
-		struct sde_crtc *crtc);
-
-static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg);
-
-static void sde_cp_ad_set_prop(struct sde_crtc *sde_crtc,
-		enum ad_property ad_prop);
-
-static void sde_cp_notify_hist_event(struct drm_crtc *crtc_drm, void *arg);
-
-static void _sde_cp_crtc_set_ltm_buffer(struct sde_crtc *sde_crtc, void *cfg);
-static void _sde_cp_crtc_free_ltm_buffer(struct sde_crtc *sde_crtc, void *cfg);
-static void _sde_cp_crtc_queue_ltm_buffer(struct sde_crtc *sde_crtc, void *cfg);
-static int _sde_cp_crtc_get_ltm_buffer(struct sde_crtc *sde_crtc, u64 *addr);
-static void _sde_cp_crtc_enable_ltm_hist(struct sde_crtc *sde_crtc,
-		struct sde_hw_dspp *hw_dspp, struct sde_hw_cp_cfg *hw_cfg);
-static void _sde_cp_crtc_disable_ltm_hist(struct sde_crtc *sde_crtc,
-		struct sde_hw_dspp *hw_dspp, struct sde_hw_cp_cfg *hw_cfg);
-static void sde_cp_notify_ltm_hist(struct drm_crtc *crtc_drm, void *arg);
-static void sde_cp_notify_ltm_wb_pb(struct drm_crtc *crtc_drm, void *arg);
-static void _sde_cp_crtc_update_ltm_roi(struct sde_crtc *sde_crtc,
-		struct sde_hw_cp_cfg *hw_cfg);
-
-#define setup_dspp_prop_install_funcs(func) \
-do { \
-	func[SDE_DSPP_PCC] = dspp_pcc_install_property; \
-	func[SDE_DSPP_HSIC] = dspp_hsic_install_property; \
-	func[SDE_DSPP_MEMCOLOR] = dspp_memcolor_install_property; \
-	func[SDE_DSPP_SIXZONE] = dspp_sixzone_install_property; \
-	func[SDE_DSPP_AD] = dspp_ad_install_property; \
-	func[SDE_DSPP_LTM] = dspp_ltm_install_property; \
-	func[SDE_DSPP_VLUT] = dspp_vlut_install_property; \
-	func[SDE_DSPP_GAMUT] = dspp_gamut_install_property; \
-	func[SDE_DSPP_GC] = dspp_gc_install_property; \
-	func[SDE_DSPP_IGC] = dspp_igc_install_property; \
-	func[SDE_DSPP_HIST] = dspp_hist_install_property; \
-	func[SDE_DSPP_DITHER] = dspp_dither_install_property; \
-} while (0)
-
-typedef void (*lm_prop_install_func_t)(struct drm_crtc *crtc);
-
-static lm_prop_install_func_t lm_prop_install_func[SDE_MIXER_MAX];
-
-static void lm_gc_install_property(struct drm_crtc *crtc);
-
-#define setup_lm_prop_install_funcs(func) \
-	(func[SDE_MIXER_GC] = lm_gc_install_property)
-
-enum {
-	/* Append new DSPP features before SDE_CP_CRTC_DSPP_MAX */
-	/* DSPP Features start */
-	SDE_CP_CRTC_DSPP_IGC,
-	SDE_CP_CRTC_DSPP_PCC,
-	SDE_CP_CRTC_DSPP_GC,
-	SDE_CP_CRTC_DSPP_HSIC,
-	SDE_CP_CRTC_DSPP_MEMCOL_SKIN,
-	SDE_CP_CRTC_DSPP_MEMCOL_SKY,
-	SDE_CP_CRTC_DSPP_MEMCOL_FOLIAGE,
-	SDE_CP_CRTC_DSPP_MEMCOL_PROT,
-	SDE_CP_CRTC_DSPP_SIXZONE,
-	SDE_CP_CRTC_DSPP_GAMUT,
-	SDE_CP_CRTC_DSPP_DITHER,
-	SDE_CP_CRTC_DSPP_HIST_CTRL,
-	SDE_CP_CRTC_DSPP_HIST_IRQ,
-	SDE_CP_CRTC_DSPP_AD,
-	SDE_CP_CRTC_DSPP_VLUT,
-	SDE_CP_CRTC_DSPP_AD_MODE,
-	SDE_CP_CRTC_DSPP_AD_INIT,
-	SDE_CP_CRTC_DSPP_AD_CFG,
-	SDE_CP_CRTC_DSPP_AD_INPUT,
-	SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS,
-	SDE_CP_CRTC_DSPP_AD_BACKLIGHT,
-	SDE_CP_CRTC_DSPP_AD_STRENGTH,
-	SDE_CP_CRTC_DSPP_AD_ROI,
-	SDE_CP_CRTC_DSPP_LTM,
-	SDE_CP_CRTC_DSPP_LTM_INIT,
-	SDE_CP_CRTC_DSPP_LTM_ROI,
-	SDE_CP_CRTC_DSPP_LTM_HIST_CTL,
-	SDE_CP_CRTC_DSPP_LTM_HIST_THRESH,
-	SDE_CP_CRTC_DSPP_LTM_SET_BUF,
-	SDE_CP_CRTC_DSPP_LTM_QUEUE_BUF,
-	SDE_CP_CRTC_DSPP_LTM_VLUT,
-	SDE_CP_CRTC_DSPP_MAX,
-	/* DSPP features end */
-
-	/* Append new LM features before SDE_CP_CRTC_MAX_FEATURES */
-	/* LM feature start*/
-	SDE_CP_CRTC_LM_GC,
-	/* LM feature end*/
-
-	SDE_CP_CRTC_MAX_FEATURES,
-};
-
-static void _sde_cp_crtc_enable_hist_irq(struct sde_crtc *sde_crtc);
-
-typedef int (*set_feature_wrapper)(struct sde_hw_dspp *hw_dspp,
-				   struct sde_hw_cp_cfg *hw_cfg,
-				   struct sde_crtc *hw_crtc);
-
-static int set_dspp_vlut_feature(struct sde_hw_dspp *hw_dspp,
-				 struct sde_hw_cp_cfg *hw_cfg,
-				 struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_vlut)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_vlut(hw_dspp, hw_cfg);
-	return ret;
-}
-
-static int set_dspp_pcc_feature(struct sde_hw_dspp *hw_dspp,
-				struct sde_hw_cp_cfg *hw_cfg,
-				struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_pcc)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_pcc(hw_dspp, hw_cfg);
-	return ret;
-}
-
-static int set_dspp_igc_feature(struct sde_hw_dspp *hw_dspp,
-				struct sde_hw_cp_cfg *hw_cfg,
-				struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_igc)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_igc(hw_dspp, hw_cfg);
-	return ret;
-}
-
-static int set_dspp_gc_feature(struct sde_hw_dspp *hw_dspp,
-			       struct sde_hw_cp_cfg *hw_cfg,
-			       struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_gc)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_gc(hw_dspp, hw_cfg);
-	return ret;
-}
-
-static int set_dspp_hsic_feature(struct sde_hw_dspp *hw_dspp,
-				 struct sde_hw_cp_cfg *hw_cfg,
-				 struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_pa_hsic)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_pa_hsic(hw_dspp, hw_cfg);
-
-	return ret;
-}
-
-
-static int set_dspp_memcol_skin_feature(struct sde_hw_dspp *hw_dspp,
-					struct sde_hw_cp_cfg *hw_cfg,
-					struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_pa_memcol_skin)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_pa_memcol_skin(hw_dspp, hw_cfg);
-	return ret;
-}
-
-static int set_dspp_memcol_sky_feature(struct sde_hw_dspp *hw_dspp,
-				       struct sde_hw_cp_cfg *hw_cfg,
-				       struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_pa_memcol_sky)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_pa_memcol_sky(hw_dspp, hw_cfg);
-	return ret;
-}
-
-static int set_dspp_memcol_foliage_feature(struct sde_hw_dspp *hw_dspp,
-					   struct sde_hw_cp_cfg *hw_cfg,
-					   struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_pa_memcol_foliage)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_pa_memcol_foliage(hw_dspp, hw_cfg);
-	return ret;
-}
-
-
-static int set_dspp_memcol_prot_feature(struct sde_hw_dspp *hw_dspp,
-					struct sde_hw_cp_cfg *hw_cfg,
-					struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_pa_memcol_prot)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_pa_memcol_prot(hw_dspp, hw_cfg);
-	return ret;
-}
-
-static int set_dspp_sixzone_feature(struct sde_hw_dspp *hw_dspp,
-				    struct sde_hw_cp_cfg *hw_cfg,
-				    struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_sixzone)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_sixzone(hw_dspp, hw_cfg);
-	return ret;
-}
-
-static int set_dspp_gamut_feature(struct sde_hw_dspp *hw_dspp,
-				  struct sde_hw_cp_cfg *hw_cfg,
-				  struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_gamut)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_gamut(hw_dspp, hw_cfg);
-	return ret;
-}
-
-static int set_dspp_dither_feature(struct sde_hw_dspp *hw_dspp,
-				   struct sde_hw_cp_cfg *hw_cfg,
-				   struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_pa_dither)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_pa_dither(hw_dspp, hw_cfg);
-	return ret;
-}
-
-static int set_dspp_hist_ctrl_feature(struct sde_hw_dspp *hw_dspp,
-				      struct sde_hw_cp_cfg *hw_cfg,
-				      struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-	bool feature_enabled;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_histogram) {
-		ret = -EINVAL;
-	} else {
-		feature_enabled = hw_cfg->payload &&
-			*((u64 *)hw_cfg->payload) != 0;
-		hw_dspp->ops.setup_histogram(hw_dspp, &feature_enabled);
-	}
-	return ret;
-}
-
-static int set_dspp_hist_irq_feature(struct sde_hw_dspp *hw_dspp,
-				     struct sde_hw_cp_cfg *hw_cfg,
-				     struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-	struct sde_hw_mixer *hw_lm = hw_cfg->mixer_info;
-
-	if (!hw_dspp)
-		ret = -EINVAL;
-	else if (!hw_lm->cfg.right_mixer)
-		_sde_cp_crtc_enable_hist_irq(hw_crtc);
-	return ret;
-}
-
-
-static int set_dspp_ad_mode_feature(struct sde_hw_dspp *hw_dspp,
-				    struct sde_hw_cp_cfg *hw_cfg,
-				    struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_ad) {
-		ret = -EINVAL;
-	} else {
-		struct sde_ad_hw_cfg ad_cfg;
-
-		ad_cfg.prop = AD_MODE;
-		ad_cfg.hw_cfg = hw_cfg;
-		hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
-	}
-	return ret;
-}
-
-static int set_dspp_ad_init_feature(struct sde_hw_dspp *hw_dspp,
-				    struct sde_hw_cp_cfg *hw_cfg,
-				    struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_ad) {
-		ret = -EINVAL;
-	} else {
-		struct sde_ad_hw_cfg ad_cfg;
-
-		ad_cfg.prop = AD_INIT;
-		ad_cfg.hw_cfg = hw_cfg;
-		hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
-	}
-	return ret;
-}
-
-static int set_dspp_ad_cfg_feature(struct sde_hw_dspp *hw_dspp,
-				   struct sde_hw_cp_cfg *hw_cfg,
-				   struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_ad) {
-		ret = -EINVAL;
-	} else {
-		struct sde_ad_hw_cfg ad_cfg;
-
-		ad_cfg.prop = AD_CFG;
-		ad_cfg.hw_cfg = hw_cfg;
-		hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
-	}
-	return ret;
-}
-
-static int set_dspp_ad_input_feature(struct sde_hw_dspp *hw_dspp,
-				     struct sde_hw_cp_cfg *hw_cfg,
-				     struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_ad) {
-		ret = -EINVAL;
-	} else {
-		struct sde_ad_hw_cfg ad_cfg;
-
-		ad_cfg.prop = AD_INPUT;
-		ad_cfg.hw_cfg = hw_cfg;
-		hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
-	}
-	return ret;
-}
-
-static int set_dspp_ad_assertive_feature(struct sde_hw_dspp *hw_dspp,
-					 struct sde_hw_cp_cfg *hw_cfg,
-					 struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_ad) {
-		ret = -EINVAL;
-	} else {
-		struct sde_ad_hw_cfg ad_cfg;
-
-		ad_cfg.prop = AD_ASSERTIVE;
-		ad_cfg.hw_cfg = hw_cfg;
-		hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
-	}
-	return ret;
-}
-
-static int set_dspp_ad_backlight_feature(struct sde_hw_dspp *hw_dspp,
-					 struct sde_hw_cp_cfg *hw_cfg,
-					 struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_ad) {
-		ret = -EINVAL;
-	} else {
-		struct sde_ad_hw_cfg ad_cfg;
-
-		ad_cfg.prop = AD_BACKLIGHT;
-		ad_cfg.hw_cfg = hw_cfg;
-		hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
-	}
-	return ret;
-}
-
-static int set_dspp_ad_strength_feature(struct sde_hw_dspp *hw_dspp,
-					struct sde_hw_cp_cfg *hw_cfg,
-					struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_ad) {
-		ret = -EINVAL;
-	} else {
-		struct sde_ad_hw_cfg ad_cfg;
-
-		ad_cfg.prop = AD_STRENGTH;
-		ad_cfg.hw_cfg = hw_cfg;
-		hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
-	}
-	return ret;
-}
-
-static int set_dspp_ad_roi_feature(struct sde_hw_dspp *hw_dspp,
-				   struct sde_hw_cp_cfg *hw_cfg,
-				   struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_ad) {
-		ret = -EINVAL;
-	} else {
-		struct sde_ad_hw_cfg ad_cfg;
-
-		ad_cfg.prop = AD_ROI;
-		ad_cfg.hw_cfg = hw_cfg;
-		hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
-	}
-	return ret;
-}
-
-static int set_lm_gc_feature(struct sde_hw_dspp *hw_dspp,
-			     struct sde_hw_cp_cfg *hw_cfg,
-			     struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-	struct sde_hw_mixer *hw_lm = (struct sde_hw_mixer *)hw_cfg->mixer_info;
-
-	if (!hw_lm->ops.setup_gc)
-		ret = -EINVAL;
-	else
-		hw_lm->ops.setup_gc(hw_lm, hw_cfg);
-	return ret;
-}
-
-static int set_ltm_init_feature(struct sde_hw_dspp *hw_dspp,
-				   struct sde_hw_cp_cfg *hw_cfg,
-				   struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_ltm_init)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_ltm_init(hw_dspp, hw_cfg);
-
-	return ret;
-}
-
-static int set_ltm_roi_feature(struct sde_hw_dspp *hw_dspp,
-				   struct sde_hw_cp_cfg *hw_cfg,
-				   struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_ltm_roi) {
-		ret = -EINVAL;
-	} else {
-		hw_dspp->ops.setup_ltm_roi(hw_dspp, hw_cfg);
-		_sde_cp_crtc_update_ltm_roi(hw_crtc, hw_cfg);
-	}
-
-	return ret;
-}
-
-static int set_ltm_vlut_feature(struct sde_hw_dspp *hw_dspp,
-				   struct sde_hw_cp_cfg *hw_cfg,
-				   struct sde_crtc *hw_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_ltm_vlut)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_ltm_vlut(hw_dspp, hw_cfg);
-
-	return ret;
-}
-
-static int set_ltm_thresh_feature(struct sde_hw_dspp *hw_dspp,
-				   struct sde_hw_cp_cfg *hw_cfg,
-				   struct sde_crtc *sde_crtc)
-{
-	int ret = 0;
-
-	if (!hw_dspp || !hw_dspp->ops.setup_ltm_thresh)
-		ret = -EINVAL;
-	else
-		hw_dspp->ops.setup_ltm_thresh(hw_dspp, hw_cfg);
-
-	return ret;
-}
-
-static int set_ltm_buffers_feature(struct sde_hw_dspp *hw_dspp,
-				   struct sde_hw_cp_cfg *hw_cfg,
-				   struct sde_crtc *sde_crtc)
-{
-	int ret = 0;
-	struct sde_hw_mixer *hw_lm;
-	struct drm_msm_ltm_buffers_ctrl *payload;
-
-	if (!sde_crtc || !hw_dspp) {
-		ret = -EINVAL;
-	} else {
-		hw_lm = hw_cfg->mixer_info;
-		/* in merge mode, both LTM cores use the same buffer */
-		if (!hw_lm->cfg.right_mixer) {
-			payload = hw_cfg->payload;
-			mutex_lock(&sde_crtc->ltm_buffer_lock);
-			if (payload)
-				_sde_cp_crtc_set_ltm_buffer(sde_crtc, hw_cfg);
-			else
-				_sde_cp_crtc_free_ltm_buffer(sde_crtc, hw_cfg);
-			mutex_unlock(&sde_crtc->ltm_buffer_lock);
-		}
-	}
-
-	return ret;
-}
-
-static int set_ltm_queue_buf_feature(struct sde_hw_dspp *hw_dspp,
-				   struct sde_hw_cp_cfg *hw_cfg,
-				   struct sde_crtc *sde_crtc)
-{
-	int ret = 0;
-	struct sde_hw_mixer *hw_lm;
-
-	if (!sde_crtc || !hw_dspp) {
-		ret = -EINVAL;
-	} else {
-		hw_lm = hw_cfg->mixer_info;
-		/* in merge mode, both LTM cores use the same buffer */
-		if (!hw_lm->cfg.right_mixer) {
-			mutex_lock(&sde_crtc->ltm_buffer_lock);
-			_sde_cp_crtc_queue_ltm_buffer(sde_crtc, hw_cfg);
-			mutex_unlock(&sde_crtc->ltm_buffer_lock);
-		}
-	}
-
-	return ret;
-}
-
-static int set_ltm_hist_crtl_feature(struct sde_hw_dspp *hw_dspp,
-				   struct sde_hw_cp_cfg *hw_cfg,
-				   struct sde_crtc *sde_crtc)
-{
-	int ret = 0;
-	bool feature_enabled = false;
-
-	if (!sde_crtc || !hw_dspp || !hw_dspp->ops.setup_ltm_hist_ctrl) {
-		ret = -EINVAL;
-	} else {
-		mutex_lock(&sde_crtc->ltm_buffer_lock);
-		feature_enabled = hw_cfg->payload &&
-			(*((u64 *)hw_cfg->payload) != 0);
-		if (feature_enabled)
-			_sde_cp_crtc_enable_ltm_hist(sde_crtc, hw_dspp, hw_cfg);
-		else
-			_sde_cp_crtc_disable_ltm_hist(sde_crtc, hw_dspp,
-					hw_cfg);
-		mutex_unlock(&sde_crtc->ltm_buffer_lock);
-	}
-
-	return ret;
-}
-
-set_feature_wrapper crtc_feature_wrappers[SDE_CP_CRTC_MAX_FEATURES];
-
-#define setup_crtc_feature_wrappers(wrappers) \
-do { \
-	memset(wrappers, 0, sizeof(wrappers)); \
-	wrappers[SDE_CP_CRTC_DSPP_VLUT] = set_dspp_vlut_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_PCC] = set_dspp_pcc_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_IGC] = set_dspp_igc_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_GC] = set_dspp_gc_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_HSIC] =\
-		set_dspp_hsic_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_MEMCOL_SKIN] = set_dspp_memcol_skin_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_MEMCOL_SKY] =\
-		set_dspp_memcol_sky_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_MEMCOL_FOLIAGE] =\
-		set_dspp_memcol_foliage_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_MEMCOL_PROT] = set_dspp_memcol_prot_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_SIXZONE] = set_dspp_sixzone_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_GAMUT] = set_dspp_gamut_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_DITHER] = set_dspp_dither_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_HIST_CTRL] = set_dspp_hist_ctrl_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_HIST_IRQ] = set_dspp_hist_irq_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_AD_MODE] = set_dspp_ad_mode_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_AD_INIT] = set_dspp_ad_init_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_AD_CFG] = set_dspp_ad_cfg_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_AD_INPUT] = set_dspp_ad_input_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS] =\
-		set_dspp_ad_assertive_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_AD_BACKLIGHT] =\
-		set_dspp_ad_backlight_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_AD_STRENGTH] = set_dspp_ad_strength_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_AD_ROI] = set_dspp_ad_roi_feature; \
-	wrappers[SDE_CP_CRTC_LM_GC] = set_lm_gc_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_LTM_INIT] = set_ltm_init_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_LTM_ROI] = set_ltm_roi_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_LTM_VLUT] = set_ltm_vlut_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_LTM_HIST_THRESH] = set_ltm_thresh_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_LTM_SET_BUF] = set_ltm_buffers_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_LTM_QUEUE_BUF] = set_ltm_queue_buf_feature; \
-	wrappers[SDE_CP_CRTC_DSPP_LTM_HIST_CTL] = set_ltm_hist_crtl_feature; \
-} while (0)
-
-#define INIT_PROP_ATTACH(p, crtc, prop, node, feature, val) \
-	do { \
-		(p)->crtc = crtc; \
-		(p)->prop = prop; \
-		(p)->prop_node = node; \
-		(p)->feature = feature; \
-		(p)->val = val; \
-	} while (0)
-
-static void sde_cp_get_hw_payload(struct sde_cp_node *prop_node,
-				  struct sde_hw_cp_cfg *hw_cfg,
-				  bool *feature_enabled)
-{
-	struct drm_property_blob *blob = NULL;
-	memset(hw_cfg, 0, sizeof(*hw_cfg));
-	*feature_enabled = false;
-
-	blob = prop_node->blob_ptr;
-	if (prop_node->prop_flags & DRM_MODE_PROP_BLOB) {
-		if (blob) {
-			hw_cfg->len = blob->length;
-			hw_cfg->payload = blob->data;
-			*feature_enabled = true;
-		}
-	} else if (prop_node->prop_flags & DRM_MODE_PROP_RANGE) {
-		/* Check if local blob is Set */
-		if (!blob) {
-			if (prop_node->prop_val) {
-				hw_cfg->len = sizeof(prop_node->prop_val);
-				hw_cfg->payload = &prop_node->prop_val;
-			}
-		} else {
-			hw_cfg->len = (prop_node->prop_val) ? blob->length :
-					0;
-			hw_cfg->payload = (prop_node->prop_val) ? blob->data
-						: NULL;
-		}
-		if (prop_node->prop_val)
-			*feature_enabled = true;
-	} else if (prop_node->prop_flags & DRM_MODE_PROP_ENUM) {
-		*feature_enabled = (prop_node->prop_val != 0);
-		hw_cfg->len = sizeof(prop_node->prop_val);
-		hw_cfg->payload = &prop_node->prop_val;
-	} else {
-		DRM_ERROR("property type is not supported\n");
-	}
-}
-
-static int sde_cp_disable_crtc_blob_property(struct sde_cp_node *prop_node)
-{
-	struct drm_property_blob *blob = prop_node->blob_ptr;
-
-	if (!blob)
-		return 0;
-	drm_property_blob_put(blob);
-	prop_node->blob_ptr = NULL;
-	return 0;
-}
-
-static int sde_cp_create_local_blob(struct drm_crtc *crtc, u32 feature, int len)
-{
-	int ret = -EINVAL;
-	bool found = false;
-	struct sde_cp_node *prop_node = NULL;
-	struct drm_property_blob *blob_ptr;
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-
-	list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
-		if (prop_node->feature == feature) {
-			found = true;
-			break;
-		}
-	}
-
-	if (!found || !(prop_node->prop_flags & DRM_MODE_PROP_RANGE)) {
-		DRM_ERROR("local blob create failed prop found %d flags %d\n",
-		       found, prop_node->prop_flags);
-		return ret;
-	}
-
-	blob_ptr = drm_property_create_blob(crtc->dev, len, NULL);
-	ret = (IS_ERR_OR_NULL(blob_ptr)) ? PTR_ERR(blob_ptr) : 0;
-	if (!ret)
-		prop_node->blob_ptr = blob_ptr;
-
-	return ret;
-}
-
-static void sde_cp_destroy_local_blob(struct sde_cp_node *prop_node)
-{
-	if (!(prop_node->prop_flags & DRM_MODE_PROP_BLOB) &&
-		prop_node->blob_ptr)
-		drm_property_blob_put(prop_node->blob_ptr);
-}
-
-static int sde_cp_handle_range_property(struct sde_cp_node *prop_node,
-					uint64_t val)
-{
-	int ret = 0;
-	struct drm_property_blob *blob_ptr = prop_node->blob_ptr;
-
-	if (!blob_ptr) {
-		prop_node->prop_val = val;
-		return 0;
-	}
-
-	if (!val) {
-		prop_node->prop_val = 0;
-		return 0;
-	}
-
-	ret = copy_from_user(blob_ptr->data, u64_to_user_ptr(val),
-			blob_ptr->length);
-	if (ret) {
-		DRM_ERROR("failed to get the property info ret %d", ret);
-		ret = -EFAULT;
-	} else {
-		prop_node->prop_val = val;
-	}
-
-	return ret;
-}
-
-static int sde_cp_disable_crtc_property(struct drm_crtc *crtc,
-					 struct drm_property *property,
-					 struct sde_cp_node *prop_node)
-{
-	int ret = -EINVAL;
-
-	if (property->flags & DRM_MODE_PROP_BLOB) {
-		ret = sde_cp_disable_crtc_blob_property(prop_node);
-	} else if (property->flags & DRM_MODE_PROP_RANGE) {
-		ret = sde_cp_handle_range_property(prop_node, 0);
-	} else if (property->flags & DRM_MODE_PROP_ENUM) {
-		ret = 0;
-		prop_node->prop_val = 0;
-	}
-	return ret;
-}
-
-static int sde_cp_enable_crtc_blob_property(struct drm_crtc *crtc,
-					       struct sde_cp_node *prop_node,
-					       uint64_t val)
-{
-	struct drm_property_blob *blob = NULL;
-
-	/**
-	 * For non-blob based properties add support to create a blob
-	 * using the val and store the blob_ptr in prop_node.
-	 */
-	blob = drm_property_lookup_blob(crtc->dev, val);
-	if (!blob) {
-		DRM_ERROR("invalid blob id %lld\n", val);
-		return -EINVAL;
-	}
-	if (blob->length != prop_node->prop_blob_sz) {
-		DRM_ERROR("invalid blob len %zd exp %d feature %d\n",
-		    blob->length, prop_node->prop_blob_sz, prop_node->feature);
-		drm_property_blob_put(blob);
-		return -EINVAL;
-	}
-	/* Release refernce to existing payload of the property */
-	if (prop_node->blob_ptr)
-		drm_property_blob_put(prop_node->blob_ptr);
-
-	prop_node->blob_ptr = blob;
-	return 0;
-}
-
-static int sde_cp_enable_crtc_property(struct drm_crtc *crtc,
-				       struct drm_property *property,
-				       struct sde_cp_node *prop_node,
-				       uint64_t val)
-{
-	int ret = -EINVAL;
-
-	if (property->flags & DRM_MODE_PROP_BLOB) {
-		ret = sde_cp_enable_crtc_blob_property(crtc, prop_node, val);
-	} else if (property->flags & DRM_MODE_PROP_RANGE) {
-		ret = sde_cp_handle_range_property(prop_node, val);
-	} else if (property->flags & DRM_MODE_PROP_ENUM) {
-		ret = 0;
-		prop_node->prop_val = val;
-	}
-	return ret;
-}
-
-static struct sde_kms *get_kms(struct drm_crtc *crtc)
-{
-	struct msm_drm_private *priv = crtc->dev->dev_private;
-
-	return to_sde_kms(priv->kms);
-}
-
-static void sde_cp_crtc_prop_attach(struct sde_cp_prop_attach *prop_attach)
-{
-
-	struct sde_crtc *sde_crtc = to_sde_crtc(prop_attach->crtc);
-
-	drm_object_attach_property(&prop_attach->crtc->base,
-				   prop_attach->prop, prop_attach->val);
-
-	INIT_LIST_HEAD(&prop_attach->prop_node->active_list);
-	INIT_LIST_HEAD(&prop_attach->prop_node->dirty_list);
-
-	prop_attach->prop_node->property_id = prop_attach->prop->base.id;
-	prop_attach->prop_node->prop_flags = prop_attach->prop->flags;
-	prop_attach->prop_node->feature = prop_attach->feature;
-
-	if (prop_attach->feature < SDE_CP_CRTC_DSPP_MAX)
-		prop_attach->prop_node->is_dspp_feature = true;
-	else
-		prop_attach->prop_node->is_dspp_feature = false;
-
-	list_add(&prop_attach->prop_node->feature_list,
-		 &sde_crtc->feature_list);
-}
-
-void sde_cp_crtc_init(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc = NULL;
-
-	if (!crtc) {
-		DRM_ERROR("invalid crtc %pK\n", crtc);
-		return;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	if (!sde_crtc) {
-		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
-		return;
-	}
-
-	/* create blob to store histogram data */
-	sde_crtc->hist_blob = drm_property_create_blob(crtc->dev,
-				sizeof(struct drm_msm_hist), NULL);
-	if (IS_ERR(sde_crtc->hist_blob))
-		sde_crtc->hist_blob = NULL;
-
-	mutex_init(&sde_crtc->crtc_cp_lock);
-	INIT_LIST_HEAD(&sde_crtc->active_list);
-	INIT_LIST_HEAD(&sde_crtc->dirty_list);
-	INIT_LIST_HEAD(&sde_crtc->feature_list);
-	INIT_LIST_HEAD(&sde_crtc->ad_dirty);
-	INIT_LIST_HEAD(&sde_crtc->ad_active);
-	mutex_init(&sde_crtc->ltm_buffer_lock);
-	spin_lock_init(&sde_crtc->ltm_lock);
-	INIT_LIST_HEAD(&sde_crtc->ltm_buf_free);
-	INIT_LIST_HEAD(&sde_crtc->ltm_buf_busy);
-}
-
-static void sde_cp_crtc_install_immutable_property(struct drm_crtc *crtc,
-						   char *name,
-						   u32 feature)
-{
-	struct drm_property *prop;
-	struct sde_cp_node *prop_node = NULL;
-	struct msm_drm_private *priv;
-	struct sde_cp_prop_attach prop_attach;
-	uint64_t val = 0;
-
-	if (feature >=  SDE_CP_CRTC_MAX_FEATURES) {
-		DRM_ERROR("invalid feature %d max %d\n", feature,
-		       SDE_CP_CRTC_MAX_FEATURES);
-		return;
-	}
-
-	prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
-	if (!prop_node)
-		return;
-
-	priv = crtc->dev->dev_private;
-	prop = priv->cp_property[feature];
-
-	if (!prop) {
-		prop = drm_property_create_range(crtc->dev,
-				DRM_MODE_PROP_IMMUTABLE, name, 0, 1);
-		if (!prop) {
-			DRM_ERROR("property create failed: %s\n", name);
-			kfree(prop_node);
-			return;
-		}
-		priv->cp_property[feature] = prop;
-	}
-
-	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
-				feature, val);
-	sde_cp_crtc_prop_attach(&prop_attach);
-}
-
-static void sde_cp_crtc_install_range_property(struct drm_crtc *crtc,
-					     char *name,
-					     u32 feature,
-					     uint64_t min, uint64_t max,
-					     uint64_t val)
-{
-	struct drm_property *prop;
-	struct sde_cp_node *prop_node = NULL;
-	struct msm_drm_private *priv;
-	struct sde_cp_prop_attach prop_attach;
-
-	if (feature >=  SDE_CP_CRTC_MAX_FEATURES) {
-		DRM_ERROR("invalid feature %d max %d\n", feature,
-			  SDE_CP_CRTC_MAX_FEATURES);
-		return;
-	}
-
-	prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
-	if (!prop_node)
-		return;
-
-	priv = crtc->dev->dev_private;
-	prop = priv->cp_property[feature];
-
-	if (!prop) {
-		prop = drm_property_create_range(crtc->dev, 0, name, min, max);
-		if (!prop) {
-			DRM_ERROR("property create failed: %s\n", name);
-			kfree(prop_node);
-			return;
-		}
-		priv->cp_property[feature] = prop;
-	}
-
-	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
-				feature, val);
-
-	sde_cp_crtc_prop_attach(&prop_attach);
-}
-
-static void sde_cp_crtc_install_blob_property(struct drm_crtc *crtc, char *name,
-			u32 feature, u32 blob_sz)
-{
-	struct drm_property *prop;
-	struct sde_cp_node *prop_node = NULL;
-	struct msm_drm_private *priv;
-	uint64_t val = 0;
-	struct sde_cp_prop_attach prop_attach;
-
-	if (feature >=  SDE_CP_CRTC_MAX_FEATURES) {
-		DRM_ERROR("invalid feature %d max %d\n", feature,
-		       SDE_CP_CRTC_MAX_FEATURES);
-		return;
-	}
-
-	prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
-	if (!prop_node)
-		return;
-
-	priv = crtc->dev->dev_private;
-	prop = priv->cp_property[feature];
-
-	if (!prop) {
-		prop = drm_property_create(crtc->dev,
-					   DRM_MODE_PROP_BLOB, name, 0);
-		if (!prop) {
-			DRM_ERROR("property create failed: %s\n", name);
-			kfree(prop_node);
-			return;
-		}
-		priv->cp_property[feature] = prop;
-	}
-
-	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
-				feature, val);
-	prop_node->prop_blob_sz = blob_sz;
-
-	sde_cp_crtc_prop_attach(&prop_attach);
-}
-
-static void sde_cp_crtc_install_enum_property(struct drm_crtc *crtc,
-	u32 feature, const struct drm_prop_enum_list *list, u32 enum_sz,
-	char *name)
-{
-	struct drm_property *prop;
-	struct sde_cp_node *prop_node = NULL;
-	struct msm_drm_private *priv;
-	uint64_t val = 0;
-	struct sde_cp_prop_attach prop_attach;
-
-	if (feature >=  SDE_CP_CRTC_MAX_FEATURES) {
-		DRM_ERROR("invalid feature %d max %d\n", feature,
-		       SDE_CP_CRTC_MAX_FEATURES);
-		return;
-	}
-
-	prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
-	if (!prop_node)
-		return;
-
-	priv = crtc->dev->dev_private;
-	prop = priv->cp_property[feature];
-
-	if (!prop) {
-		prop = drm_property_create_enum(crtc->dev, 0, name,
-			list, enum_sz);
-		if (!prop) {
-			DRM_ERROR("property create failed: %s\n", name);
-			kfree(prop_node);
-			return;
-		}
-		priv->cp_property[feature] = prop;
-	}
-
-	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
-				feature, val);
-
-	sde_cp_crtc_prop_attach(&prop_attach);
-}
-
-static struct sde_crtc_irq_info *_sde_cp_get_intr_node(u32 event,
-				struct sde_crtc *sde_crtc)
-{
-	bool found = false;
-	struct sde_crtc_irq_info *node = NULL;
-
-	list_for_each_entry(node, &sde_crtc->user_event_list, list) {
-		if (node->event == event) {
-			found = true;
-			break;
-		}
-	}
-
-	if (!found)
-		node = NULL;
-
-	return node;
-}
-
-static void _sde_cp_crtc_enable_hist_irq(struct sde_crtc *sde_crtc)
-{
-	struct drm_crtc *crtc_drm = &sde_crtc->base;
-	struct sde_kms *kms = NULL;
-	struct sde_hw_mixer *hw_lm;
-	struct sde_hw_dspp *hw_dspp = NULL;
-	struct sde_crtc_irq_info *node = NULL;
-	int i, irq_idx, ret = 0;
-	unsigned long flags;
-
-	if (!crtc_drm) {
-		DRM_ERROR("invalid crtc %pK\n", crtc_drm);
-		return;
-	}
-
-	kms = get_kms(crtc_drm);
-
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
-		hw_lm = sde_crtc->mixers[i].hw_lm;
-		hw_dspp = sde_crtc->mixers[i].hw_dspp;
-		if (!hw_lm->cfg.right_mixer)
-			break;
-	}
-
-	if (!hw_dspp) {
-		DRM_ERROR("invalid dspp\n");
-		return;
-	}
-
-	irq_idx = sde_core_irq_idx_lookup(kms, SDE_IRQ_TYPE_HIST_DSPP_DONE,
-					hw_dspp->idx);
-	if (irq_idx < 0) {
-		DRM_ERROR("failed to get irq idx\n");
-		return;
-	}
-
-	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
-	node = _sde_cp_get_intr_node(DRM_EVENT_HISTOGRAM, sde_crtc);
-	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
-
-	if (!node)
-		return;
-
-	spin_lock_irqsave(&node->state_lock, flags);
-	if (node->state == IRQ_DISABLED) {
-		ret = sde_core_irq_enable(kms, &irq_idx, 1);
-		if (ret)
-			DRM_ERROR("failed to enable irq %d\n", irq_idx);
-		else
-			node->state = IRQ_ENABLED;
-	}
-	spin_unlock_irqrestore(&node->state_lock, flags);
-}
-
-static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
-				   struct sde_crtc *sde_crtc)
-{
-	struct sde_hw_cp_cfg hw_cfg;
-	struct sde_hw_mixer *hw_lm;
-	struct sde_hw_dspp *hw_dspp;
-	u32 num_mixers = sde_crtc->num_mixers;
-	int i = 0, ret = 0;
-	bool feature_enabled = false;
-	struct sde_mdss_cfg *catalog = NULL;
-
-	memset(&hw_cfg, 0, sizeof(hw_cfg));
-	sde_cp_get_hw_payload(prop_node, &hw_cfg, &feature_enabled);
-	hw_cfg.num_of_mixers = sde_crtc->num_mixers;
-	hw_cfg.last_feature = 0;
-
-	for (i = 0; i < num_mixers; i++) {
-		hw_dspp = sde_crtc->mixers[i].hw_dspp;
-		if (!hw_dspp || i >= DSPP_MAX)
-			continue;
-		hw_cfg.dspp[i] = hw_dspp;
-	}
-
-	if ((prop_node->feature >= SDE_CP_CRTC_MAX_FEATURES) ||
-			crtc_feature_wrappers[prop_node->feature] == NULL) {
-		ret = -EINVAL;
-	} else {
-		set_feature_wrapper set_feature =
-			crtc_feature_wrappers[prop_node->feature];
-		catalog = get_kms(&sde_crtc->base)->catalog;
-		hw_cfg.broadcast_disabled = catalog->dma_cfg.broadcast_disabled;
-
-		for (i = 0; i < num_mixers && !ret; i++) {
-			hw_lm = sde_crtc->mixers[i].hw_lm;
-			hw_dspp = sde_crtc->mixers[i].hw_dspp;
-			if (!hw_lm) {
-				ret = -EINVAL;
-				continue;
-			}
-			hw_cfg.ctl = sde_crtc->mixers[i].hw_ctl;
-			hw_cfg.mixer_info = hw_lm;
-			hw_cfg.displayh = num_mixers * hw_lm->cfg.out_width;
-			hw_cfg.displayv = hw_lm->cfg.out_height;
-
-			ret = set_feature(hw_dspp, &hw_cfg, sde_crtc);
-			if (ret)
-				break;
-		}
-
-		if (ret) {
-			DRM_ERROR("failed to %s feature %d\n",
-				((feature_enabled) ? "enable" : "disable"),
-				prop_node->feature);
-			return;
-		}
-	}
-
-	if (feature_enabled) {
-		DRM_DEBUG_DRIVER("Add feature to active list %d\n",
-				 prop_node->property_id);
-		sde_cp_update_list(prop_node, sde_crtc, false);
-	} else {
-		DRM_DEBUG_DRIVER("remove feature from active list %d\n",
-			 prop_node->property_id);
-		list_del_init(&prop_node->active_list);
-	}
-	/* Programming of feature done remove from dirty list */
-	list_del_init(&prop_node->dirty_list);
-}
-
-void sde_cp_crtc_apply_properties(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc = NULL;
-	bool set_dspp_flush = false, set_lm_flush = false;
-	struct sde_cp_node *prop_node = NULL, *n = NULL;
-	struct sde_hw_ctl *ctl;
-	u32 num_mixers = 0, i = 0;
-
-	if (!crtc || !crtc->dev) {
-		DRM_ERROR("invalid crtc %pK dev %pK\n", crtc,
-			  (crtc ? crtc->dev : NULL));
-		return;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	if (!sde_crtc) {
-		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
-		return;
-	}
-
-	num_mixers = sde_crtc->num_mixers;
-	if (!num_mixers) {
-		DRM_DEBUG_DRIVER("no mixers for this crtc\n");
-		return;
-	}
-
-	mutex_lock(&sde_crtc->crtc_cp_lock);
-
-	/* Check if dirty lists are empty and ad features are disabled for
-	 * early return. If ad properties are active then we need to issue
-	 * dspp flush.
-	 **/
-	if (list_empty(&sde_crtc->dirty_list) &&
-		list_empty(&sde_crtc->ad_dirty)) {
-		if (list_empty(&sde_crtc->ad_active)) {
-			DRM_DEBUG_DRIVER("Dirty list is empty\n");
-			goto exit;
-		}
-		set_dspp_flush = true;
-	}
-
-	if (!list_empty(&sde_crtc->ad_active))
-		sde_cp_ad_set_prop(sde_crtc, AD_IPC_RESET);
-
-	list_for_each_entry_safe(prop_node, n, &sde_crtc->dirty_list,
-				dirty_list) {
-		sde_cp_crtc_setfeature(prop_node, sde_crtc);
-		/* Set the flush flag to true */
-		if (prop_node->is_dspp_feature)
-			set_dspp_flush = true;
-		else
-			set_lm_flush = true;
-	}
-
-	list_for_each_entry_safe(prop_node, n, &sde_crtc->ad_dirty,
-				dirty_list) {
-		set_dspp_flush = true;
-		sde_cp_crtc_setfeature(prop_node, sde_crtc);
-	}
-
-	for (i = 0; i < num_mixers; i++) {
-		ctl = sde_crtc->mixers[i].hw_ctl;
-		if (!ctl)
-			continue;
-		if (set_dspp_flush && ctl->ops.update_bitmask_dspp
-				&& sde_crtc->mixers[i].hw_dspp) {
-			ctl->ops.update_bitmask_dspp(ctl,
-					sde_crtc->mixers[i].hw_dspp->idx, 1);
-		}
-		if (set_lm_flush && ctl->ops.update_bitmask_mixer
-				&& sde_crtc->mixers[i].hw_lm) {
-			ctl->ops.update_bitmask_mixer(ctl,
-					sde_crtc->mixers[i].hw_lm->idx, 1);
-		}
-	}
-exit:
-	mutex_unlock(&sde_crtc->crtc_cp_lock);
-}
-
-void sde_cp_crtc_install_properties(struct drm_crtc *crtc)
-{
-	struct sde_kms *kms = NULL;
-	struct sde_crtc *sde_crtc = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	unsigned long features = 0;
-	int i = 0;
-	struct msm_drm_private *priv;
-
-	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
-		DRM_ERROR("invalid crtc %pK dev %pK\n",
-		       crtc, ((crtc) ? crtc->dev : NULL));
-		return;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	if (!sde_crtc) {
-		DRM_ERROR("sde_crtc %pK\n", sde_crtc);
-		return;
-	}
-
-	kms = get_kms(crtc);
-	if (!kms || !kms->catalog) {
-		DRM_ERROR("invalid sde kms %pK catalog %pK sde_crtc %pK\n",
-		 kms, ((kms) ? kms->catalog : NULL), sde_crtc);
-		return;
-	}
-
-	mutex_lock(&sde_crtc->crtc_cp_lock);
-
-	/**
-	 * Function can be called during the atomic_check with test_only flag
-	 * and actual commit. Allocate properties only if feature list is
-	 * empty during the atomic_check with test_only flag.
-	 */
-	if (!list_empty(&sde_crtc->feature_list))
-		goto exit;
-
-	catalog = kms->catalog;
-	priv = crtc->dev->dev_private;
-	/**
-	 * DSPP/LM properties are global to all the CRTCS.
-	 * Properties are created for first CRTC and re-used for later
-	 * crtcs.
-	 */
-	if (!priv->cp_property) {
-		priv->cp_property = kzalloc((sizeof(priv->cp_property) *
-				SDE_CP_CRTC_MAX_FEATURES), GFP_KERNEL);
-		setup_dspp_prop_install_funcs(dspp_prop_install_func);
-		setup_lm_prop_install_funcs(lm_prop_install_func);
-		setup_crtc_feature_wrappers(crtc_feature_wrappers);
-	}
-	if (!priv->cp_property)
-		goto exit;
-
-	if (!catalog->dspp_count)
-		goto lm_property;
-
-	/* Check for all the DSPP properties and attach it to CRTC */
-	features = catalog->dspp[0].features;
-	for (i = 0; i < SDE_DSPP_MAX; i++) {
-		if (!test_bit(i, &features))
-			continue;
-		if (dspp_prop_install_func[i])
-			dspp_prop_install_func[i](crtc);
-	}
-
-lm_property:
-	if (!catalog->mixer_count)
-		goto exit;
-
-	/* Check for all the LM properties and attach it to CRTC */
-	features = catalog->mixer[0].features;
-	for (i = 0; i < SDE_MIXER_MAX; i++) {
-		if (!test_bit(i, &features))
-			continue;
-		if (lm_prop_install_func[i])
-			lm_prop_install_func[i](crtc);
-	}
-exit:
-	mutex_unlock(&sde_crtc->crtc_cp_lock);
-
-}
-
-int sde_cp_crtc_set_property(struct drm_crtc *crtc,
-				struct drm_property *property,
-				uint64_t val)
-{
-	struct sde_cp_node *prop_node = NULL;
-	struct sde_crtc *sde_crtc = NULL;
-	int ret = 0, i = 0, dspp_cnt, lm_cnt;
-	u8 found = 0;
-
-	if (!crtc || !property) {
-		DRM_ERROR("invalid crtc %pK property %pK\n", crtc, property);
-		return -EINVAL;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	if (!sde_crtc) {
-		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
-		return -EINVAL;
-	}
-
-	mutex_lock(&sde_crtc->crtc_cp_lock);
-	list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
-		if (property->base.id == prop_node->property_id) {
-			found = 1;
-			break;
-		}
-	}
-
-	if (!found) {
-		ret = -ENOENT;
-		goto exit;
-	}
-
-	/**
-	 * sde_crtc is virtual ensure that hardware has been attached to the
-	 * crtc. Check LM and dspp counts based on whether feature is a
-	 * dspp/lm feature.
-	 */
-	if (!sde_crtc->num_mixers ||
-	    sde_crtc->num_mixers > ARRAY_SIZE(sde_crtc->mixers)) {
-		DRM_INFO("Invalid mixer config act cnt %d max cnt %ld\n",
-			sde_crtc->num_mixers,
-				(long)ARRAY_SIZE(sde_crtc->mixers));
-		ret = -EPERM;
-		goto exit;
-	}
-
-	dspp_cnt = 0;
-	lm_cnt = 0;
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
-		if (sde_crtc->mixers[i].hw_dspp)
-			dspp_cnt++;
-		if (sde_crtc->mixers[i].hw_lm)
-			lm_cnt++;
-	}
-
-	if (prop_node->is_dspp_feature && dspp_cnt < sde_crtc->num_mixers) {
-		DRM_ERROR("invalid dspp cnt %d mixer cnt %d\n", dspp_cnt,
-			sde_crtc->num_mixers);
-		ret = -EINVAL;
-		goto exit;
-	} else if (lm_cnt < sde_crtc->num_mixers) {
-		DRM_ERROR("invalid lm cnt %d mixer cnt %d\n", lm_cnt,
-			sde_crtc->num_mixers);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	ret = sde_cp_ad_validate_prop(prop_node, sde_crtc);
-	if (ret) {
-		DRM_ERROR("ad property validation failed ret %d\n", ret);
-		goto exit;
-	}
-
-	/* remove the property from dirty list */
-	list_del_init(&prop_node->dirty_list);
-
-	if (!val)
-		ret = sde_cp_disable_crtc_property(crtc, property, prop_node);
-	else
-		ret = sde_cp_enable_crtc_property(crtc, property,
-						  prop_node, val);
-
-	if (!ret) {
-		/* remove the property from active list */
-		list_del_init(&prop_node->active_list);
-		/* Mark the feature as dirty */
-		sde_cp_update_list(prop_node, sde_crtc, true);
-	}
-exit:
-	mutex_unlock(&sde_crtc->crtc_cp_lock);
-	return ret;
-}
-
-int sde_cp_crtc_get_property(struct drm_crtc *crtc,
-			     struct drm_property *property, uint64_t *val)
-{
-	struct sde_cp_node *prop_node = NULL;
-	struct sde_crtc *sde_crtc = NULL;
-
-	if (!crtc || !property || !val) {
-		DRM_ERROR("invalid crtc %pK property %pK val %pK\n",
-			  crtc, property, val);
-		return -EINVAL;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	if (!sde_crtc) {
-		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
-		return -EINVAL;
-	}
-	/* Return 0 if property is not supported */
-	*val = 0;
-	mutex_lock(&sde_crtc->crtc_cp_lock);
-	list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
-		if (property->base.id == prop_node->property_id) {
-			*val = prop_node->prop_val;
-			break;
-		}
-	}
-	mutex_unlock(&sde_crtc->crtc_cp_lock);
-	return 0;
-}
-
-void sde_cp_crtc_destroy_properties(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc = NULL;
-	struct sde_cp_node *prop_node = NULL, *n = NULL;
-	u32 i = 0;
-
-	if (!crtc) {
-		DRM_ERROR("invalid crtc %pK\n", crtc);
-		return;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	if (!sde_crtc) {
-		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
-		return;
-	}
-
-	list_for_each_entry_safe(prop_node, n, &sde_crtc->feature_list,
-				 feature_list) {
-		if (prop_node->prop_flags & DRM_MODE_PROP_BLOB
-		    && prop_node->blob_ptr)
-			drm_property_blob_put(prop_node->blob_ptr);
-
-		list_del_init(&prop_node->active_list);
-		list_del_init(&prop_node->dirty_list);
-		list_del_init(&prop_node->feature_list);
-		sde_cp_destroy_local_blob(prop_node);
-		kfree(prop_node);
-	}
-
-	if (sde_crtc->hist_blob)
-		drm_property_blob_put(sde_crtc->hist_blob);
-
-	for (i = 0; i < sde_crtc->ltm_buffer_cnt; i++) {
-		if (sde_crtc->ltm_buffers[i]) {
-			msm_gem_put_vaddr(sde_crtc->ltm_buffers[i]->gem);
-			drm_framebuffer_put(sde_crtc->ltm_buffers[i]->fb);
-			msm_gem_put_iova(sde_crtc->ltm_buffers[i]->gem,
-					sde_crtc->ltm_buffers[i]->aspace);
-			kfree(sde_crtc->ltm_buffers[i]);
-			sde_crtc->ltm_buffers[i] = NULL;
-		}
-	}
-	sde_crtc->ltm_buffer_cnt = 0;
-	sde_crtc->ltm_hist_en = false;
-
-	mutex_destroy(&sde_crtc->crtc_cp_lock);
-	INIT_LIST_HEAD(&sde_crtc->active_list);
-	INIT_LIST_HEAD(&sde_crtc->dirty_list);
-	INIT_LIST_HEAD(&sde_crtc->feature_list);
-	INIT_LIST_HEAD(&sde_crtc->ad_dirty);
-	INIT_LIST_HEAD(&sde_crtc->ad_active);
-	mutex_destroy(&sde_crtc->ltm_buffer_lock);
-	INIT_LIST_HEAD(&sde_crtc->ltm_buf_free);
-	INIT_LIST_HEAD(&sde_crtc->ltm_buf_busy);
-}
-
-void sde_cp_crtc_suspend(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc = NULL;
-	struct sde_cp_node *prop_node = NULL, *n = NULL;
-	bool ad_suspend = false;
-	unsigned long irq_flags;
-
-	if (!crtc) {
-		DRM_ERROR("crtc %pK\n", crtc);
-		return;
-	}
-	sde_crtc = to_sde_crtc(crtc);
-	if (!sde_crtc) {
-		DRM_ERROR("sde_crtc %pK\n", sde_crtc);
-		return;
-	}
-
-	mutex_lock(&sde_crtc->crtc_cp_lock);
-	list_for_each_entry_safe(prop_node, n, &sde_crtc->active_list,
-				 active_list) {
-		sde_cp_update_list(prop_node, sde_crtc, true);
-		list_del_init(&prop_node->active_list);
-	}
-
-	list_for_each_entry_safe(prop_node, n, &sde_crtc->ad_active,
-				 active_list) {
-		sde_cp_update_list(prop_node, sde_crtc, true);
-		list_del_init(&prop_node->active_list);
-		ad_suspend = true;
-	}
-	mutex_unlock(&sde_crtc->crtc_cp_lock);
-
-	spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
-	sde_crtc->ltm_hist_en = false;
-	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-
-	if (ad_suspend)
-		sde_cp_ad_set_prop(sde_crtc, AD_SUSPEND);
-}
-
-void sde_cp_crtc_resume(struct drm_crtc *crtc)
-{
-	/* placeholder for operations needed during resume */
-}
-
-void sde_cp_crtc_clear(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc = NULL;
-	unsigned long flags;
-
-	if (!crtc) {
-		DRM_ERROR("crtc %pK\n", crtc);
-		return;
-	}
-	sde_crtc = to_sde_crtc(crtc);
-	if (!sde_crtc) {
-		DRM_ERROR("sde_crtc %pK\n", sde_crtc);
-		return;
-	}
-
-	mutex_lock(&sde_crtc->crtc_cp_lock);
-	list_del_init(&sde_crtc->active_list);
-	list_del_init(&sde_crtc->dirty_list);
-	list_del_init(&sde_crtc->ad_active);
-	list_del_init(&sde_crtc->ad_dirty);
-	mutex_unlock(&sde_crtc->crtc_cp_lock);
-
-	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
-	list_del_init(&sde_crtc->user_event_list);
-	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
-}
-
-static void dspp_pcc_install_property(struct drm_crtc *crtc)
-{
-	char feature_name[256];
-	struct sde_kms *kms = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	u32 version;
-
-	kms = get_kms(crtc);
-	catalog = kms->catalog;
-
-	version = catalog->dspp[0].sblk->pcc.version >> 16;
-	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-		"SDE_DSPP_PCC_V", version);
-	switch (version) {
-	case 1:
-	case 4:
-		sde_cp_crtc_install_blob_property(crtc, feature_name,
-			SDE_CP_CRTC_DSPP_PCC, sizeof(struct drm_msm_pcc));
-		break;
-	default:
-		DRM_ERROR("version %d not supported\n", version);
-		break;
-	}
-}
-
-static void dspp_hsic_install_property(struct drm_crtc *crtc)
-{
-	char feature_name[256];
-	struct sde_kms *kms = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	u32 version;
-
-	kms = get_kms(crtc);
-	catalog = kms->catalog;
-	version = catalog->dspp[0].sblk->hsic.version >> 16;
-	switch (version) {
-	case 1:
-		snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-			"SDE_DSPP_PA_HSIC_V", version);
-		sde_cp_crtc_install_blob_property(crtc, feature_name,
-			SDE_CP_CRTC_DSPP_HSIC, sizeof(struct drm_msm_pa_hsic));
-		break;
-	default:
-		DRM_ERROR("version %d not supported\n", version);
-		break;
-	}
-}
-
-static void dspp_memcolor_install_property(struct drm_crtc *crtc)
-{
-	char feature_name[256];
-	struct sde_kms *kms = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	u32 version;
-
-	kms = get_kms(crtc);
-	catalog = kms->catalog;
-	version = catalog->dspp[0].sblk->memcolor.version >> 16;
-	switch (version) {
-	case 1:
-		snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-			"SDE_DSPP_PA_MEMCOL_SKIN_V", version);
-		sde_cp_crtc_install_blob_property(crtc, feature_name,
-			SDE_CP_CRTC_DSPP_MEMCOL_SKIN,
-			sizeof(struct drm_msm_memcol));
-		snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-			"SDE_DSPP_PA_MEMCOL_SKY_V", version);
-		sde_cp_crtc_install_blob_property(crtc, feature_name,
-			SDE_CP_CRTC_DSPP_MEMCOL_SKY,
-			sizeof(struct drm_msm_memcol));
-		snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-			"SDE_DSPP_PA_MEMCOL_FOLIAGE_V", version);
-		sde_cp_crtc_install_blob_property(crtc, feature_name,
-			SDE_CP_CRTC_DSPP_MEMCOL_FOLIAGE,
-			sizeof(struct drm_msm_memcol));
-		snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-			"SDE_DSPP_PA_MEMCOL_PROT_V", version);
-		sde_cp_crtc_install_blob_property(crtc, feature_name,
-			SDE_CP_CRTC_DSPP_MEMCOL_PROT,
-			sizeof(struct drm_msm_memcol));
-		break;
-	default:
-		DRM_ERROR("version %d not supported\n", version);
-		break;
-	}
-}
-
-static void dspp_sixzone_install_property(struct drm_crtc *crtc)
-{
-	char feature_name[256];
-	struct sde_kms *kms = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	u32 version;
-
-	kms = get_kms(crtc);
-	catalog = kms->catalog;
-	version = catalog->dspp[0].sblk->sixzone.version >> 16;
-	switch (version) {
-	case 1:
-		snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-			"SDE_DSPP_PA_SIXZONE_V", version);
-		sde_cp_crtc_install_blob_property(crtc, feature_name,
-			SDE_CP_CRTC_DSPP_SIXZONE,
-			sizeof(struct drm_msm_sixzone));
-		break;
-	default:
-		DRM_ERROR("version %d not supported\n", version);
-		break;
-	}
-}
-
-static void dspp_vlut_install_property(struct drm_crtc *crtc)
-{
-	char feature_name[256];
-	struct sde_kms *kms = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	u32 version;
-
-	kms = get_kms(crtc);
-	catalog = kms->catalog;
-	version = catalog->dspp[0].sblk->vlut.version >> 16;
-	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-		"SDE_DSPP_VLUT_V", version);
-	switch (version) {
-	case 1:
-		sde_cp_crtc_install_range_property(crtc, feature_name,
-			SDE_CP_CRTC_DSPP_VLUT, 0, U64_MAX, 0);
-		sde_cp_create_local_blob(crtc,
-			SDE_CP_CRTC_DSPP_VLUT,
-			sizeof(struct drm_msm_pa_vlut));
-		break;
-	default:
-		DRM_ERROR("version %d not supported\n", version);
-		break;
-	}
-}
-
-static void dspp_ad_install_property(struct drm_crtc *crtc)
-{
-	char feature_name[256];
-	struct sde_kms *kms = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	u32 version;
-
-	kms = get_kms(crtc);
-	catalog = kms->catalog;
-	version = catalog->dspp[0].sblk->ad.version >> 16;
-	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-		"SDE_DSPP_AD_V", version);
-	switch (version) {
-	case 3:
-		sde_cp_crtc_install_immutable_property(crtc,
-			feature_name, SDE_CP_CRTC_DSPP_AD);
-		break;
-	case 4:
-		sde_cp_crtc_install_immutable_property(crtc,
-			feature_name, SDE_CP_CRTC_DSPP_AD);
-
-		sde_cp_crtc_install_enum_property(crtc,
-			SDE_CP_CRTC_DSPP_AD_MODE, ad4_modes,
-			ARRAY_SIZE(ad4_modes), "SDE_DSPP_AD_V4_MODE");
-
-		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_INIT",
-			SDE_CP_CRTC_DSPP_AD_INIT, 0, U64_MAX, 0);
-		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_AD_INIT,
-			sizeof(struct drm_msm_ad4_init));
-
-		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_CFG",
-			SDE_CP_CRTC_DSPP_AD_CFG, 0, U64_MAX, 0);
-		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_AD_CFG,
-			sizeof(struct drm_msm_ad4_cfg));
-		sde_cp_crtc_install_range_property(crtc,
-			"SDE_DSPP_AD_V4_ASSERTIVENESS",
-			SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS, 0, (BIT(8) - 1), 0);
-		sde_cp_crtc_install_range_property(crtc,
-			"SDE_DSPP_AD_V4_STRENGTH",
-			SDE_CP_CRTC_DSPP_AD_STRENGTH, 0, U64_MAX, 0);
-		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_AD_STRENGTH,
-			sizeof(struct drm_msm_ad4_manual_str_cfg));
-		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_INPUT",
-			SDE_CP_CRTC_DSPP_AD_INPUT, 0, U16_MAX, 0);
-		sde_cp_crtc_install_range_property(crtc,
-				"SDE_DSPP_AD_V4_BACKLIGHT",
-			SDE_CP_CRTC_DSPP_AD_BACKLIGHT, 0, (BIT(16) - 1),
-			0);
-
-		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_ROI",
-			SDE_CP_CRTC_DSPP_AD_ROI, 0, U64_MAX, 0);
-		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_AD_ROI,
-			sizeof(struct drm_msm_ad4_roi_cfg));
-		break;
-	default:
-		DRM_ERROR("version %d not supported\n", version);
-		break;
-	}
-}
-
-static void dspp_ltm_install_property(struct drm_crtc *crtc)
-{
-	char feature_name[256];
-	struct sde_kms *kms = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	u32 version;
-
-	kms = get_kms(crtc);
-	catalog = kms->catalog;
-	version = catalog->dspp[0].sblk->ltm.version >> 16;
-	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-		"SDE_DSPP_LTM_V", version);
-	switch (version) {
-	case 1:
-		sde_cp_crtc_install_immutable_property(crtc,
-			feature_name, SDE_CP_CRTC_DSPP_LTM);
-
-		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_LTM_INIT_V1",
-			SDE_CP_CRTC_DSPP_LTM_INIT, 0, U64_MAX, 0);
-		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_LTM_INIT,
-			sizeof(struct drm_msm_ltm_init_param));
-
-		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_LTM_ROI_V1",
-			SDE_CP_CRTC_DSPP_LTM_ROI, 0, U64_MAX, 0);
-		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_LTM_ROI,
-			sizeof(struct drm_msm_ltm_cfg_param));
-
-		sde_cp_crtc_install_enum_property(crtc,
-			SDE_CP_CRTC_DSPP_LTM_HIST_CTL, sde_ltm_hist_modes,
-			ARRAY_SIZE(sde_ltm_hist_modes),
-			"SDE_DSPP_LTM_HIST_CTRL_V1");
-
-		sde_cp_crtc_install_range_property(crtc,
-			"SDE_DSPP_LTM_HIST_THRESH_V1",
-			SDE_CP_CRTC_DSPP_LTM_HIST_THRESH, 0, (BIT(10) - 1), 0);
-
-		sde_cp_crtc_install_range_property(crtc,
-			"SDE_DSPP_LTM_SET_BUF_V1",
-			SDE_CP_CRTC_DSPP_LTM_SET_BUF, 0, U64_MAX, 0);
-		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_LTM_SET_BUF,
-			sizeof(struct drm_msm_ltm_buffers_ctrl));
-
-		sde_cp_crtc_install_range_property(crtc,
-			"SDE_DSPP_LTM_QUEUE_BUF_V1",
-			SDE_CP_CRTC_DSPP_LTM_QUEUE_BUF, 0, U64_MAX, 0);
-
-		sde_cp_crtc_install_range_property(crtc,
-			"SDE_DSPP_LTM_VLUT_V1",
-			SDE_CP_CRTC_DSPP_LTM_VLUT, 0, U64_MAX, 0);
-		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_LTM_VLUT,
-			sizeof(struct drm_msm_ltm_data));
-		break;
-	default:
-		DRM_ERROR("version %d not supported\n", version);
-		break;
-	}
-}
-
-static void lm_gc_install_property(struct drm_crtc *crtc)
-{
-	char feature_name[256];
-	struct sde_kms *kms = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	u32 version;
-
-	kms = get_kms(crtc);
-	catalog = kms->catalog;
-	version = catalog->mixer[0].sblk->gc.version >> 16;
-	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-		 "SDE_LM_GC_V", version);
-	switch (version) {
-	case 1:
-		sde_cp_crtc_install_blob_property(crtc, feature_name,
-			SDE_CP_CRTC_LM_GC, sizeof(struct drm_msm_pgc_lut));
-		break;
-	default:
-		DRM_ERROR("version %d not supported\n", version);
-		break;
-	}
-}
-
-static void dspp_gamut_install_property(struct drm_crtc *crtc)
-{
-	char feature_name[256];
-	struct sde_kms *kms = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	u32 version;
-
-	kms = get_kms(crtc);
-	catalog = kms->catalog;
-
-	version = catalog->dspp[0].sblk->gamut.version >> 16;
-	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-		"SDE_DSPP_GAMUT_V", version);
-	switch (version) {
-	case 4:
-		sde_cp_crtc_install_blob_property(crtc, feature_name,
-			SDE_CP_CRTC_DSPP_GAMUT,
-			sizeof(struct drm_msm_3d_gamut));
-		break;
-	default:
-		DRM_ERROR("version %d not supported\n", version);
-		break;
-	}
-}
-
-static void dspp_gc_install_property(struct drm_crtc *crtc)
-{
-	char feature_name[256];
-	struct sde_kms *kms = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	u32 version;
-
-	kms = get_kms(crtc);
-	catalog = kms->catalog;
-
-	version = catalog->dspp[0].sblk->gc.version >> 16;
-	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-		"SDE_DSPP_GC_V", version);
-	switch (version) {
-	case 1:
-		sde_cp_crtc_install_blob_property(crtc, feature_name,
-			SDE_CP_CRTC_DSPP_GC, sizeof(struct drm_msm_pgc_lut));
-		break;
-	default:
-		DRM_ERROR("version %d not supported\n", version);
-		break;
-	}
-}
-
-static void dspp_igc_install_property(struct drm_crtc *crtc)
-{
-	char feature_name[256];
-	struct sde_kms *kms = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	u32 version;
-
-	kms = get_kms(crtc);
-	catalog = kms->catalog;
-
-	version = catalog->dspp[0].sblk->igc.version >> 16;
-	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-		"SDE_DSPP_IGC_V", version);
-	switch (version) {
-	case 3:
-		sde_cp_crtc_install_blob_property(crtc, feature_name,
-			SDE_CP_CRTC_DSPP_IGC, sizeof(struct drm_msm_igc_lut));
-		break;
-	default:
-		DRM_ERROR("version %d not supported\n", version);
-		break;
-	}
-}
-
-static void dspp_hist_install_property(struct drm_crtc *crtc)
-{
-	struct sde_kms *kms = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	u32 version;
-
-	kms = get_kms(crtc);
-	catalog = kms->catalog;
-
-	version = catalog->dspp[0].sblk->hist.version >> 16;
-	switch (version) {
-	case 1:
-		sde_cp_crtc_install_enum_property(crtc,
-			SDE_CP_CRTC_DSPP_HIST_CTRL, sde_hist_modes,
-			ARRAY_SIZE(sde_hist_modes), "SDE_DSPP_HIST_CTRL_V1");
-		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_HIST_IRQ_V1",
-			SDE_CP_CRTC_DSPP_HIST_IRQ, 0, U16_MAX, 0);
-		break;
-	default:
-		DRM_ERROR("version %d not supported\n", version);
-		break;
-	}
-}
-
-static void dspp_dither_install_property(struct drm_crtc *crtc)
-{
-	char feature_name[256];
-	struct sde_kms *kms = NULL;
-	struct sde_mdss_cfg *catalog = NULL;
-	u32 version;
-
-	kms = get_kms(crtc);
-	catalog = kms->catalog;
-
-	version = catalog->dspp[0].sblk->dither.version >> 16;
-	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
-		"SDE_DSPP_PA_DITHER_V", version);
-	switch (version) {
-	case 1:
-		sde_cp_crtc_install_blob_property(crtc, feature_name,
-			SDE_CP_CRTC_DSPP_DITHER,
-			sizeof(struct drm_msm_pa_dither));
-		break;
-	default:
-		DRM_ERROR("version %d not supported\n", version);
-		break;
-	}
-}
-
-static void sde_cp_update_list(struct sde_cp_node *prop_node,
-		struct sde_crtc *crtc, bool dirty_list)
-{
-	switch (prop_node->feature) {
-	case SDE_CP_CRTC_DSPP_AD_MODE:
-	case SDE_CP_CRTC_DSPP_AD_INIT:
-	case SDE_CP_CRTC_DSPP_AD_CFG:
-	case SDE_CP_CRTC_DSPP_AD_INPUT:
-	case SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS:
-	case SDE_CP_CRTC_DSPP_AD_BACKLIGHT:
-	case SDE_CP_CRTC_DSPP_AD_STRENGTH:
-	case SDE_CP_CRTC_DSPP_AD_ROI:
-		if (dirty_list)
-			list_add_tail(&prop_node->dirty_list, &crtc->ad_dirty);
-		else
-			list_add_tail(&prop_node->active_list,
-					&crtc->ad_active);
-		break;
-	case SDE_CP_CRTC_DSPP_LTM_SET_BUF:
-	case SDE_CP_CRTC_DSPP_LTM_QUEUE_BUF:
-		if (dirty_list)
-			list_add_tail(&prop_node->dirty_list,
-					&crtc->dirty_list);
-		break;
-	default:
-		/* color processing properties handle here */
-		if (dirty_list)
-			list_add_tail(&prop_node->dirty_list,
-					&crtc->dirty_list);
-		else
-			list_add_tail(&prop_node->active_list,
-					&crtc->active_list);
-		break;
-	}
-}
-
-static int sde_cp_ad_validate_prop(struct sde_cp_node *prop_node,
-		struct sde_crtc *crtc)
-{
-	int i = 0, ret = 0;
-	u32 ad_prop;
-
-	for (i = 0; i < crtc->num_mixers && !ret; i++) {
-		if (!crtc->mixers[i].hw_dspp) {
-			ret = -EINVAL;
-			continue;
-		}
-		switch (prop_node->feature) {
-		case SDE_CP_CRTC_DSPP_AD_MODE:
-			ad_prop = AD_MODE;
-			break;
-		case SDE_CP_CRTC_DSPP_AD_INIT:
-			ad_prop = AD_INIT;
-			break;
-		case SDE_CP_CRTC_DSPP_AD_CFG:
-			ad_prop = AD_CFG;
-			break;
-		case SDE_CP_CRTC_DSPP_AD_INPUT:
-			ad_prop = AD_INPUT;
-			break;
-		case SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS:
-			ad_prop = AD_ASSERTIVE;
-			break;
-		case SDE_CP_CRTC_DSPP_AD_BACKLIGHT:
-			ad_prop = AD_BACKLIGHT;
-			break;
-		case SDE_CP_CRTC_DSPP_AD_STRENGTH:
-			ad_prop = AD_STRENGTH;
-			break;
-		case SDE_CP_CRTC_DSPP_AD_ROI:
-			ad_prop = AD_ROI;
-			break;
-		default:
-			/* Not an AD property */
-			return 0;
-		}
-		if (!crtc->mixers[i].hw_dspp->ops.validate_ad)
-			ret = -EINVAL;
-		else
-			ret = crtc->mixers[i].hw_dspp->ops.validate_ad(
-				crtc->mixers[i].hw_dspp, &ad_prop);
-	}
-	return ret;
-}
-
-static void sde_cp_ad_interrupt_cb(void *arg, int irq_idx)
-{
-	struct sde_crtc *crtc = arg;
-
-	sde_crtc_event_queue(&crtc->base, sde_cp_notify_ad_event,
-							NULL, true);
-}
-
-static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg)
-{
-	uint32_t input_bl = 0, output_bl = 0;
-	uint32_t scale = MAX_SV_BL_SCALE_LEVEL;
-	struct sde_hw_mixer *hw_lm = NULL;
-	struct sde_hw_dspp *hw_dspp = NULL;
-	u32 num_mixers;
-	struct sde_crtc *crtc;
-	struct drm_event event;
-	int i;
-	struct msm_drm_private *priv;
-	struct sde_kms *kms;
-	int ret;
-
-	crtc = to_sde_crtc(crtc_drm);
-	num_mixers = crtc->num_mixers;
-	if (!num_mixers)
-		return;
-
-	for (i = 0; i < num_mixers; i++) {
-		hw_lm = crtc->mixers[i].hw_lm;
-		hw_dspp = crtc->mixers[i].hw_dspp;
-		if (!hw_lm->cfg.right_mixer)
-			break;
-	}
-
-	if (!hw_dspp)
-		return;
-
-	kms = get_kms(crtc_drm);
-	if (!kms || !kms->dev) {
-		SDE_ERROR("invalid arg(s)\n");
-		return;
-	}
-
-	priv = kms->dev->dev_private;
-	ret = sde_power_resource_enable(&priv->phandle, kms->core_client, true);
-	if (ret) {
-		SDE_ERROR("failed to enable power resource %d\n", ret);
-		SDE_EVT32(ret, SDE_EVTLOG_ERROR);
-		return;
-	}
-
-	hw_dspp->ops.ad_read_intr_resp(hw_dspp, AD4_IN_OUT_BACKLIGHT,
-			&input_bl, &output_bl);
-
-	sde_power_resource_enable(&priv->phandle, kms->core_client,
-					false);
-	if (!input_bl || input_bl < output_bl)
-		return;
-
-	scale = (output_bl * MAX_SV_BL_SCALE_LEVEL) / input_bl;
-	event.length = sizeof(u32);
-	event.type = DRM_EVENT_AD_BACKLIGHT;
-	msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev,
-			&event, (u8 *)&scale);
-}
-
-int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en,
-	struct sde_irq_callback *ad_irq)
-{
-	struct sde_kms *kms = NULL;
-	u32 num_mixers;
-	struct sde_hw_mixer *hw_lm;
-	struct sde_hw_dspp *hw_dspp = NULL;
-	struct sde_crtc *crtc;
-	int i;
-	int irq_idx, ret;
-	unsigned long flags;
-	struct sde_cp_node prop_node;
-	struct sde_crtc_irq_info *node = NULL;
-
-	if (!crtc_drm || !ad_irq) {
-		DRM_ERROR("invalid crtc %pK irq %pK\n", crtc_drm, ad_irq);
-		return -EINVAL;
-	}
-
-	crtc = to_sde_crtc(crtc_drm);
-	if (!crtc) {
-		DRM_ERROR("invalid sde_crtc %pK\n", crtc);
-		return -EINVAL;
-	}
-
-	kms = get_kms(crtc_drm);
-	num_mixers = crtc->num_mixers;
-
-	memset(&prop_node, 0, sizeof(prop_node));
-	prop_node.feature = SDE_CP_CRTC_DSPP_AD_BACKLIGHT;
-	ret = sde_cp_ad_validate_prop(&prop_node, crtc);
-	if (ret) {
-		DRM_ERROR("Ad not supported ret %d\n", ret);
-		goto exit;
-	}
-
-	for (i = 0; i < num_mixers; i++) {
-		hw_lm = crtc->mixers[i].hw_lm;
-		hw_dspp = crtc->mixers[i].hw_dspp;
-		if (!hw_lm->cfg.right_mixer)
-			break;
-	}
-
-	if (!hw_dspp) {
-		DRM_ERROR("invalid dspp\n");
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	irq_idx = sde_core_irq_idx_lookup(kms, SDE_IRQ_TYPE_AD4_BL_DONE,
-			hw_dspp->idx);
-	if (irq_idx < 0) {
-		DRM_ERROR("failed to get the irq idx ret %d\n", irq_idx);
-		ret = irq_idx;
-		goto exit;
-	}
-
-	node = container_of(ad_irq, struct sde_crtc_irq_info, irq);
-
-	if (!en) {
-		spin_lock_irqsave(&node->state_lock, flags);
-		if (node->state == IRQ_ENABLED) {
-			ret = sde_core_irq_disable(kms, &irq_idx, 1);
-			if (ret)
-				DRM_ERROR("disable irq %d error %d\n",
-					irq_idx, ret);
-			else
-				node->state = IRQ_NOINIT;
-		} else {
-			node->state = IRQ_NOINIT;
-		}
-		spin_unlock_irqrestore(&node->state_lock, flags);
-		sde_core_irq_unregister_callback(kms, irq_idx, ad_irq);
-		ret = 0;
-		goto exit;
-	}
-
-	ad_irq->arg = crtc;
-	ad_irq->func = sde_cp_ad_interrupt_cb;
-	ret = sde_core_irq_register_callback(kms, irq_idx, ad_irq);
-	if (ret) {
-		DRM_ERROR("failed to register the callback ret %d\n", ret);
-		goto exit;
-	}
-
-	spin_lock_irqsave(&node->state_lock, flags);
-	if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) {
-		ret = sde_core_irq_enable(kms, &irq_idx, 1);
-		if (ret) {
-			DRM_ERROR("enable irq %d error %d\n", irq_idx, ret);
-			sde_core_irq_unregister_callback(kms, irq_idx, ad_irq);
-		} else {
-			node->state = IRQ_ENABLED;
-		}
-	}
-	spin_unlock_irqrestore(&node->state_lock, flags);
-
-exit:
-	return ret;
-}
-
-static void sde_cp_ad_set_prop(struct sde_crtc *sde_crtc,
-		enum ad_property ad_prop)
-{
-	struct sde_ad_hw_cfg ad_cfg;
-	struct sde_hw_cp_cfg hw_cfg;
-	struct sde_hw_dspp *hw_dspp = NULL;
-	struct sde_hw_mixer *hw_lm = NULL;
-	u32 num_mixers = sde_crtc->num_mixers;
-	int i = 0, ret = 0;
-
-	hw_cfg.num_of_mixers = sde_crtc->num_mixers;
-
-	for (i = 0; i < num_mixers && !ret; i++) {
-		hw_lm = sde_crtc->mixers[i].hw_lm;
-		hw_dspp = sde_crtc->mixers[i].hw_dspp;
-		if (!hw_lm || !hw_dspp || !hw_dspp->ops.validate_ad ||
-				!hw_dspp->ops.setup_ad) {
-			ret = -EINVAL;
-			continue;
-		}
-
-		hw_cfg.displayh = num_mixers * hw_lm->cfg.out_width;
-		hw_cfg.displayv = hw_lm->cfg.out_height;
-		hw_cfg.mixer_info = hw_lm;
-		ad_cfg.prop = ad_prop;
-		ad_cfg.hw_cfg = &hw_cfg;
-		ret = hw_dspp->ops.validate_ad(hw_dspp, (u32 *)&ad_prop);
-		if (!ret)
-			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
-	}
-}
-
-void sde_cp_crtc_pre_ipc(struct drm_crtc *drm_crtc)
-{
-	struct sde_crtc *sde_crtc;
-
-	sde_crtc = to_sde_crtc(drm_crtc);
-	if (!sde_crtc) {
-		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
-		return;
-	}
-
-	sde_cp_ad_set_prop(sde_crtc, AD_IPC_SUSPEND);
-}
-
-void sde_cp_crtc_post_ipc(struct drm_crtc *drm_crtc)
-{
-	struct sde_crtc *sde_crtc;
-
-	sde_crtc = to_sde_crtc(drm_crtc);
-	if (!sde_crtc) {
-		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
-		return;
-	}
-
-	sde_cp_ad_set_prop(sde_crtc, AD_IPC_RESUME);
-}
-
-static void sde_cp_hist_interrupt_cb(void *arg, int irq_idx)
-{
-	struct sde_crtc *crtc = arg;
-	struct drm_crtc *crtc_drm = &crtc->base;
-	struct sde_hw_dspp *hw_dspp;
-	struct sde_kms *kms;
-	struct sde_crtc_irq_info *node = NULL;
-	u32 i;
-	int ret = 0;
-	unsigned long flags;
-
-	/* disable histogram irq */
-	kms = get_kms(crtc_drm);
-	spin_lock_irqsave(&crtc->spin_lock, flags);
-	node = _sde_cp_get_intr_node(DRM_EVENT_HISTOGRAM, crtc);
-	spin_unlock_irqrestore(&crtc->spin_lock, flags);
-
-	if (!node) {
-		DRM_DEBUG_DRIVER("cannot find histogram event node in crtc\n");
-		return;
-	}
-
-	spin_lock_irqsave(&node->state_lock, flags);
-	if (node->state == IRQ_ENABLED) {
-		if (sde_core_irq_disable_nolock(kms, irq_idx)) {
-			DRM_ERROR("failed to disable irq %d, ret %d\n",
-				irq_idx, ret);
-			spin_unlock_irqrestore(&node->state_lock, flags);
-			return;
-		}
-		node->state = IRQ_DISABLED;
-	}
-	spin_unlock_irqrestore(&node->state_lock, flags);
-
-	/* lock histogram buffer */
-	for (i = 0; i < crtc->num_mixers; i++) {
-		hw_dspp = crtc->mixers[i].hw_dspp;
-		if (hw_dspp && hw_dspp->ops.lock_histogram)
-			hw_dspp->ops.lock_histogram(hw_dspp, NULL);
-	}
-
-	/* notify histogram event */
-	sde_crtc_event_queue(crtc_drm, sde_cp_notify_hist_event,
-							NULL, true);
-}
-
-static void sde_cp_notify_hist_event(struct drm_crtc *crtc_drm, void *arg)
-{
-	struct sde_hw_dspp *hw_dspp = NULL;
-	struct sde_crtc *crtc;
-	struct drm_event event;
-	struct drm_msm_hist *hist_data;
-	struct msm_drm_private *priv;
-	struct sde_kms *kms;
-	int ret;
-	u32 i;
-
-	if (!crtc_drm) {
-		DRM_ERROR("invalid crtc %pK\n", crtc_drm);
-		return;
-	}
-
-	crtc = to_sde_crtc(crtc_drm);
-	if (!crtc) {
-		DRM_ERROR("invalid sde_crtc %pK\n", crtc);
-		return;
-	}
-
-	if (!crtc->hist_blob)
-		return;
-
-	kms = get_kms(crtc_drm);
-	if (!kms || !kms->dev) {
-		SDE_ERROR("invalid arg(s)\n");
-		return;
-	}
-
-	priv = kms->dev->dev_private;
-	ret = sde_power_resource_enable(&priv->phandle, kms->core_client, true);
-	if (ret) {
-		SDE_ERROR("failed to enable power resource %d\n", ret);
-		SDE_EVT32(ret, SDE_EVTLOG_ERROR);
-		return;
-	}
-
-	/* read histogram data into blob */
-	hist_data = (struct drm_msm_hist *)crtc->hist_blob->data;
-	memset(hist_data->data, 0, sizeof(hist_data->data));
-	for (i = 0; i < crtc->num_mixers; i++) {
-		hw_dspp = crtc->mixers[i].hw_dspp;
-		if (!hw_dspp || !hw_dspp->ops.read_histogram) {
-			DRM_ERROR("invalid dspp %pK or read_histogram func\n",
-				hw_dspp);
-			sde_power_resource_enable(&priv->phandle,
-						kms->core_client, false);
-			return;
-		}
-		hw_dspp->ops.read_histogram(hw_dspp, hist_data);
-	}
-
-	sde_power_resource_enable(&priv->phandle, kms->core_client,
-					false);
-	/* send histogram event with blob id */
-	event.length = sizeof(u32);
-	event.type = DRM_EVENT_HISTOGRAM;
-	msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev,
-			&event, (u8 *)(&crtc->hist_blob->base.id));
-}
-
-int sde_cp_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
-	struct sde_irq_callback *hist_irq)
-{
-	struct sde_kms *kms = NULL;
-	u32 num_mixers;
-	struct sde_hw_mixer *hw_lm;
-	struct sde_hw_dspp *hw_dspp = NULL;
-	struct sde_crtc *crtc;
-	struct sde_crtc_irq_info *node = NULL;
-	int i, irq_idx, ret = 0;
-	unsigned long flags;
-
-	if (!crtc_drm || !hist_irq) {
-		DRM_ERROR("invalid crtc %pK irq %pK\n", crtc_drm, hist_irq);
-		return -EINVAL;
-	}
-
-	crtc = to_sde_crtc(crtc_drm);
-	if (!crtc) {
-		DRM_ERROR("invalid sde_crtc %pK\n", crtc);
-		return -EINVAL;
-	}
-
-	kms = get_kms(crtc_drm);
-	num_mixers = crtc->num_mixers;
-
-	for (i = 0; i < num_mixers; i++) {
-		hw_lm = crtc->mixers[i].hw_lm;
-		hw_dspp = crtc->mixers[i].hw_dspp;
-		if (!hw_lm->cfg.right_mixer)
-			break;
-	}
-
-	if (!hw_dspp) {
-		DRM_ERROR("invalid dspp\n");
-		ret = -EPERM;
-		goto exit;
-	}
-
-	irq_idx = sde_core_irq_idx_lookup(kms, SDE_IRQ_TYPE_HIST_DSPP_DONE,
-			hw_dspp->idx);
-	if (irq_idx < 0) {
-		DRM_ERROR("failed to get the irq idx ret %d\n", irq_idx);
-		ret = irq_idx;
-		goto exit;
-	}
-
-	node = container_of(hist_irq, struct sde_crtc_irq_info, irq);
-
-	/* deregister histogram irq */
-	if (!en) {
-		spin_lock_irqsave(&node->state_lock, flags);
-		if (node->state == IRQ_ENABLED) {
-			node->state = IRQ_DISABLING;
-			spin_unlock_irqrestore(&node->state_lock, flags);
-			ret = sde_core_irq_disable(kms, &irq_idx, 1);
-			spin_lock_irqsave(&node->state_lock, flags);
-			if (ret) {
-				DRM_ERROR("disable irq %d error %d\n",
-					irq_idx, ret);
-				node->state = IRQ_ENABLED;
-			} else {
-				node->state = IRQ_NOINIT;
-			}
-			spin_unlock_irqrestore(&node->state_lock, flags);
-		} else if (node->state == IRQ_DISABLED) {
-			node->state = IRQ_NOINIT;
-			spin_unlock_irqrestore(&node->state_lock, flags);
-		} else {
-			spin_unlock_irqrestore(&node->state_lock, flags);
-		}
-
-		sde_core_irq_unregister_callback(kms, irq_idx, hist_irq);
-		goto exit;
-	}
-
-	/* register histogram irq */
-	hist_irq->arg = crtc;
-	hist_irq->func = sde_cp_hist_interrupt_cb;
-	ret = sde_core_irq_register_callback(kms, irq_idx, hist_irq);
-	if (ret) {
-		DRM_ERROR("failed to register the callback ret %d\n", ret);
-		goto exit;
-	}
-
-	spin_lock_irqsave(&node->state_lock, flags);
-	if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) {
-		ret = sde_core_irq_enable(kms, &irq_idx, 1);
-		if (ret) {
-			DRM_ERROR("enable irq %d error %d\n", irq_idx, ret);
-			sde_core_irq_unregister_callback(kms,
-				irq_idx, hist_irq);
-		} else {
-			node->state = IRQ_ENABLED;
-		}
-	}
-	spin_unlock_irqrestore(&node->state_lock, flags);
-
-exit:
-	return ret;
-}
-
-/* needs to be called within ltm_buffer_lock mutex */
-static void _sde_cp_crtc_free_ltm_buffer(struct sde_crtc *sde_crtc, void *cfg)
-{
-	u32 i = 0, buffer_count = 0;
-	unsigned long irq_flags;
-
-	if (!sde_crtc) {
-		DRM_ERROR("invalid parameters sde_crtc %pK\n", sde_crtc);
-		return;
-	}
-
-	spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
-	if (sde_crtc->ltm_hist_en) {
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		DRM_ERROR("cannot free LTM buffers when hist is enabled\n");
-		return;
-	}
-	if (!sde_crtc->ltm_buffer_cnt) {
-		/* ltm_buffers are already freed */
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		return;
-	}
-	if (!list_empty(&sde_crtc->ltm_buf_busy)) {
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		DRM_ERROR("ltm_buf_busy is not empty\n");
-		return;
-	}
-
-	buffer_count = sde_crtc->ltm_buffer_cnt;
-	sde_crtc->ltm_buffer_cnt = 0;
-	INIT_LIST_HEAD(&sde_crtc->ltm_buf_free);
-	INIT_LIST_HEAD(&sde_crtc->ltm_buf_busy);
-	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-
-	for (i = 0; i < buffer_count && sde_crtc->ltm_buffers[i]; i++) {
-		msm_gem_put_vaddr(sde_crtc->ltm_buffers[i]->gem);
-		drm_framebuffer_put(sde_crtc->ltm_buffers[i]->fb);
-		msm_gem_put_iova(sde_crtc->ltm_buffers[i]->gem,
-			sde_crtc->ltm_buffers[i]->aspace);
-		kfree(sde_crtc->ltm_buffers[i]);
-		sde_crtc->ltm_buffers[i] = NULL;
-	}
-}
-
-/* needs to be called within ltm_buffer_lock mutex */
-static void _sde_cp_crtc_set_ltm_buffer(struct sde_crtc *sde_crtc, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct drm_msm_ltm_buffers_ctrl *buf_cfg;
-	struct drm_framebuffer *fb;
-	struct drm_crtc *crtc;
-	u32 size = 0, expected_size = 0;
-	u32 i = 0, j = 0, num = 0, iova_aligned;
-	int ret = 0;
-	unsigned long irq_flags;
-
-	if (!sde_crtc || !cfg) {
-		DRM_ERROR("invalid parameters sde_crtc %pK cfg %pK\n", sde_crtc,
-				cfg);
-		return;
-	}
-
-	crtc = &sde_crtc->base;
-	if (!crtc) {
-		DRM_ERROR("invalid parameters drm_crtc %pK\n", crtc);
-		return;
-	}
-
-	buf_cfg = hw_cfg->payload;
-	num = buf_cfg->num_of_buffers;
-	if (num == 0 || num > LTM_BUFFER_SIZE) {
-		DRM_ERROR("invalid buffer size %d\n", num);
-		return;
-	}
-
-	spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
-	if (sde_crtc->ltm_buffer_cnt) {
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		DRM_DEBUG("%d ltm_buffers already allocated\n",
-			sde_crtc->ltm_buffer_cnt);
-		return;
-	}
-	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-
-	expected_size = sizeof(struct drm_msm_ltm_stats_data) + LTM_GUARD_BYTES;
-	for (i = 0; i < num; i++) {
-		sde_crtc->ltm_buffers[i] = kzalloc(
-			sizeof(struct sde_ltm_buffer), GFP_KERNEL);
-		if (IS_ERR_OR_NULL(sde_crtc->ltm_buffers[i]))
-			goto exit;
-
-		sde_crtc->ltm_buffers[i]->drm_fb_id = buf_cfg->fds[i];
-		fb = drm_framebuffer_lookup(crtc->dev, NULL, buf_cfg->fds[i]);
-		if (!fb) {
-			DRM_ERROR("unknown framebuffer ID %d\n",
-					buf_cfg->fds[i]);
-			goto exit;
-		}
-
-		sde_crtc->ltm_buffers[i]->fb = fb;
-		sde_crtc->ltm_buffers[i]->gem = msm_framebuffer_bo(fb, 0);
-		if (!sde_crtc->ltm_buffers[i]->gem) {
-			DRM_ERROR("failed to get gem object\n");
-			goto exit;
-		}
-
-		size = PAGE_ALIGN(sde_crtc->ltm_buffers[i]->gem->size);
-		if (size < expected_size) {
-			DRM_ERROR("Invalid buffer size\n");
-			goto exit;
-		}
-
-		sde_crtc->ltm_buffers[i]->aspace =
-			msm_gem_smmu_address_space_get(crtc->dev,
-			MSM_SMMU_DOMAIN_UNSECURE);
-		if (!sde_crtc->ltm_buffers[i]->aspace) {
-			DRM_ERROR("failed to get aspace\n");
-			goto exit;
-		}
-		ret = msm_gem_get_iova(sde_crtc->ltm_buffers[i]->gem,
-				       sde_crtc->ltm_buffers[i]->aspace,
-				       &sde_crtc->ltm_buffers[i]->iova);
-		if (ret) {
-			DRM_ERROR("failed to get the iova ret %d\n", ret);
-			goto exit;
-		}
-
-		sde_crtc->ltm_buffers[i]->kva = msm_gem_get_vaddr(
-			sde_crtc->ltm_buffers[i]->gem);
-		if (IS_ERR_OR_NULL(sde_crtc->ltm_buffers[i]->kva)) {
-			DRM_ERROR("failed to get kva\n");
-			goto exit;
-		}
-		iova_aligned = (sde_crtc->ltm_buffers[i]->iova +
-				LTM_GUARD_BYTES) & ALIGNED_OFFSET;
-		sde_crtc->ltm_buffers[i]->offset = iova_aligned -
-			sde_crtc->ltm_buffers[i]->iova;
-	}
-	spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
-	/* Add buffers to ltm_buf_free list */
-	for (i = 0; i < num; i++)
-		list_add(&sde_crtc->ltm_buffers[i]->node,
-			&sde_crtc->ltm_buf_free);
-	sde_crtc->ltm_buffer_cnt = num;
-	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-
-	return;
-exit:
-	for (j = 0; j < i; j++) {
-		if (sde_crtc->ltm_buffers[i]->aspace)
-			msm_gem_put_iova(sde_crtc->ltm_buffers[i]->gem,
-				sde_crtc->ltm_buffers[i]->aspace);
-		if (sde_crtc->ltm_buffers[i]->gem)
-			msm_gem_put_vaddr(sde_crtc->ltm_buffers[i]->gem);
-		if (sde_crtc->ltm_buffers[i]->fb)
-			drm_framebuffer_put(sde_crtc->ltm_buffers[i]->fb);
-		kfree(sde_crtc->ltm_buffers[i]);
-		sde_crtc->ltm_buffers[i] = NULL;
-	}
-}
-
-/* needs to be called within ltm_buffer_lock mutex */
-static void _sde_cp_crtc_queue_ltm_buffer(struct sde_crtc *sde_crtc, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct drm_msm_ltm_buffer *buf;
-	struct drm_msm_ltm_stats_data *ltm_data = NULL;
-	u32 i;
-	bool found = false;
-	unsigned long irq_flags;
-
-	if (!sde_crtc || !cfg) {
-		DRM_ERROR("invalid parameters sde_crtc %pK cfg %pK\n", sde_crtc,
-				cfg);
-		return;
-	}
-
-	buf = hw_cfg->payload;
-	if (!buf) {
-		DRM_ERROR("invalid parameters payload %pK\n", buf);
-		return;
-	}
-
-	spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
-	if (!sde_crtc->ltm_buffer_cnt) {
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		DRM_ERROR("LTM buffers are not allocated\n");
-		return;
-	}
-
-	for (i = 0; i < LTM_BUFFER_SIZE; i++) {
-		if (sde_crtc->ltm_buffers[i] && buf->fd ==
-				sde_crtc->ltm_buffers[i]->drm_fb_id) {
-			/* clear the status flag */
-			ltm_data = (struct drm_msm_ltm_stats_data *)
-				((u8 *)sde_crtc->ltm_buffers[i]->kva +
-				 sde_crtc->ltm_buffers[i]->offset);
-			ltm_data->status_flag = 0;
-
-			list_add_tail(&sde_crtc->ltm_buffers[i]->node,
-					&sde_crtc->ltm_buf_free);
-			found = true;
-		}
-	}
-	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-
-	if (!found)
-		DRM_ERROR("failed to found a matching buffer fd %d", buf->fd);
-}
-
-/* this func needs to be called within the ltm_buffer_lock and ltm_lock */
-static int _sde_cp_crtc_get_ltm_buffer(struct sde_crtc *sde_crtc, u64 *addr)
-{
-	struct sde_ltm_buffer *buf;
-
-	if (!sde_crtc || !addr) {
-		DRM_ERROR("invalid parameters sde_crtc %pK cfg %pK\n",
-				sde_crtc, addr);
-		return -EINVAL;
-	}
-
-	/**
-	 * for LTM merge mode, both LTM blocks will use the same buffer for
-	 * hist collection. The first LTM will acquire a buffer from buf_free
-	 * list and move that buffer to buf_busy list; the second LTM block
-	 * will get the same buffer from busy list for HW programming
-	 */
-	if (!list_empty(&sde_crtc->ltm_buf_busy)) {
-		buf = list_first_entry(&sde_crtc->ltm_buf_busy,
-			struct sde_ltm_buffer, node);
-		*addr = buf->iova + buf->offset;
-		DRM_DEBUG_DRIVER("ltm_buf_busy list already has a buffer\n");
-		return 0;
-	}
-
-	buf = list_first_entry(&sde_crtc->ltm_buf_free, struct sde_ltm_buffer,
-				node);
-
-	*addr = buf->iova + buf->offset;
-	list_del_init(&buf->node);
-	list_add_tail(&buf->node, &sde_crtc->ltm_buf_busy);
-
-	return 0;
-}
-
-/* this func needs to be called within the ltm_buffer_lock mutex */
-static void _sde_cp_crtc_enable_ltm_hist(struct sde_crtc *sde_crtc,
-	struct sde_hw_dspp *hw_dspp, struct sde_hw_cp_cfg *hw_cfg)
-{
-	int ret = 0;
-	u64 addr = 0;
-	unsigned long irq_flags;
-	struct sde_hw_mixer *hw_lm = hw_cfg->mixer_info;
-
-	spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
-	if (!sde_crtc->ltm_buffer_cnt) {
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		DRM_ERROR("LTM buffers are not allocated\n");
-		return;
-	}
-
-	if (!hw_lm->cfg.right_mixer && sde_crtc->ltm_hist_en) {
-		/* histogram is already enabled */
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		return;
-	}
-
-	ret = _sde_cp_crtc_get_ltm_buffer(sde_crtc, &addr);
-	if (!ret) {
-		if (!hw_lm->cfg.right_mixer)
-			sde_crtc->ltm_hist_en = true;
-		hw_dspp->ops.setup_ltm_hist_ctrl(hw_dspp, hw_cfg,
-			true, addr);
-	}
-	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-}
-
-/* this func needs to be called within the ltm_buffer_lock mutex */
-static void _sde_cp_crtc_disable_ltm_hist(struct sde_crtc *sde_crtc,
-	struct sde_hw_dspp *hw_dspp, struct sde_hw_cp_cfg *hw_cfg)
-{
-	unsigned long irq_flags;
-	struct sde_hw_mixer *hw_lm = hw_cfg->mixer_info;
-	u32 i = 0;
-
-	spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
-	if (!hw_lm->cfg.right_mixer && !sde_crtc->ltm_hist_en) {
-		/* histogram is already disabled */
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		return;
-	}
-	sde_crtc->ltm_hist_en = false;
-	INIT_LIST_HEAD(&sde_crtc->ltm_buf_free);
-	INIT_LIST_HEAD(&sde_crtc->ltm_buf_busy);
-	for (i = 0; i < sde_crtc->ltm_buffer_cnt; i++)
-		list_add(&sde_crtc->ltm_buffers[i]->node,
-			&sde_crtc->ltm_buf_free);
-	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-}
-
-static void sde_cp_ltm_hist_interrupt_cb(void *arg, int irq_idx)
-{
-	struct sde_crtc *sde_crtc = arg;
-	struct sde_ltm_buffer *busy_buf, *free_buf;
-	struct sde_hw_dspp *hw_dspp = NULL;
-	struct drm_msm_ltm_stats_data *ltm_data = NULL;
-	u32 num_mixers = 0, i = 0, status = 0, ltm_hist_status = 0;
-	u64 addr = 0;
-	int idx = -1;
-	unsigned long irq_flags;
-	struct sde_ltm_phase_info phase;
-	struct sde_hw_cp_cfg hw_cfg;
-	struct sde_hw_mixer *hw_lm;
-
-	if (!sde_crtc) {
-		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
-		return;
-	}
-	/* read intr_status register value */
-	num_mixers = sde_crtc->num_mixers;
-	if (!num_mixers)
-		return;
-
-	for (i = 0; i < num_mixers; i++) {
-		hw_dspp = sde_crtc->mixers[i].hw_dspp;
-		if (!hw_dspp) {
-			DRM_ERROR("invalid dspp for mixer %d\n", i);
-			return;
-		}
-		hw_dspp->ops.ltm_read_intr_status(hw_dspp, &status);
-		if (status & LTM_STATS_SAT)
-			ltm_hist_status |= LTM_STATS_SAT;
-		if (status & LTM_STATS_MERGE_SAT)
-			ltm_hist_status |= LTM_STATS_MERGE_SAT;
-	}
-
-	spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
-	if (!sde_crtc->ltm_buffer_cnt) {
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		/* all LTM buffers are freed, no further action is needed */
-		return;
-	}
-
-	if (!sde_crtc->ltm_hist_en) {
-		/* histogram is disabled, no need to notify user space */
-		for (i = 0; i < sde_crtc->num_mixers; i++) {
-			hw_dspp = sde_crtc->mixers[i].hw_dspp;
-			if (!hw_dspp || i >= DSPP_MAX)
-				continue;
-			hw_dspp->ops.setup_ltm_hist_ctrl(hw_dspp, NULL, false,
-				0);
-		}
-
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		return;
-	}
-
-	/* if no free buffer available, the same buffer is used by HW */
-	if (list_empty(&sde_crtc->ltm_buf_free)) {
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		DRM_DEBUG_DRIVER("no free buffer available\n");
-		return;
-	}
-
-	busy_buf = list_first_entry(&sde_crtc->ltm_buf_busy,
-			struct sde_ltm_buffer, node);
-	free_buf = list_first_entry(&sde_crtc->ltm_buf_free,
-			struct sde_ltm_buffer, node);
-
-	/* find the index of buffer in the ltm_buffers */
-	for (i = 0; i < sde_crtc->ltm_buffer_cnt; i++) {
-		if (busy_buf->drm_fb_id == sde_crtc->ltm_buffers[i]->drm_fb_id)
-			idx = i;
-	}
-	if (idx < 0) {
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		DRM_ERROR("failed to found the buffer in the list fb_id %d\n",
-				busy_buf->drm_fb_id);
-		return;
-	}
-
-	addr = free_buf->iova + free_buf->offset;
-	for (i = 0; i < num_mixers; i++) {
-		hw_dspp = sde_crtc->mixers[i].hw_dspp;
-		if (!hw_dspp) {
-			spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-			DRM_ERROR("invalid dspp for mixer %d\n", i);
-			return;
-		}
-		hw_dspp->ops.setup_ltm_hist_buffer(hw_dspp, addr);
-	}
-
-	list_del_init(&busy_buf->node);
-	list_del_init(&free_buf->node);
-	list_add_tail(&free_buf->node, &sde_crtc->ltm_buf_busy);
-
-	ltm_data = (struct drm_msm_ltm_stats_data *)
-		((u8 *)sde_crtc->ltm_buffers[idx]->kva +
-		sde_crtc->ltm_buffers[idx]->offset);
-	ltm_data->status_flag = ltm_hist_status;
-
-	hw_lm = sde_crtc->mixers[0].hw_lm;
-	if (!hw_lm) {
-		DRM_ERROR("invalid layer mixer\n");
-		return;
-	}
-	hw_cfg.num_of_mixers = num_mixers;
-	hw_cfg.displayh = num_mixers * hw_lm->cfg.out_width;
-	hw_cfg.displayv = hw_lm->cfg.out_height;
-
-	sde_ltm_get_phase_info(&hw_cfg, &phase);
-	ltm_data->display_h = hw_cfg.displayh;
-	ltm_data->display_v = hw_cfg.displayv;
-	ltm_data->init_h[0] = phase.init_h[LTM_0];
-	ltm_data->init_h[1] = phase.init_h[LTM_1];
-	ltm_data->init_v = phase.init_v;
-	ltm_data->inc_v = phase.inc_v;
-	ltm_data->inc_h = phase.inc_h;
-	ltm_data->portrait_en = phase.portrait_en;
-	ltm_data->merge_en = phase.merge_en;
-	ltm_data->cfg_param_01 = sde_crtc->ltm_cfg.cfg_param_01;
-	ltm_data->cfg_param_02 = sde_crtc->ltm_cfg.cfg_param_02;
-	ltm_data->cfg_param_03 = sde_crtc->ltm_cfg.cfg_param_03;
-	ltm_data->cfg_param_04 = sde_crtc->ltm_cfg.cfg_param_04;
-	sde_crtc_event_queue(&sde_crtc->base, sde_cp_notify_ltm_hist,
-				sde_crtc->ltm_buffers[idx], true);
-	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-}
-
-static void sde_cp_ltm_wb_pb_interrupt_cb(void *arg, int irq_idx)
-{
-	struct sde_crtc *sde_crtc = arg;
-
-	sde_crtc_event_queue(&sde_crtc->base, sde_cp_notify_ltm_wb_pb, NULL,
-				true);
-}
-
-static void sde_cp_notify_ltm_hist(struct drm_crtc *crtc, void *arg)
-{
-	struct drm_event event;
-	struct drm_msm_ltm_buffer payload = {};
-	struct sde_ltm_buffer *buf;
-	struct sde_crtc *sde_crtc;
-	unsigned long irq_flags;
-
-	if (!crtc || !arg) {
-		DRM_ERROR("invalid drm_crtc %pK or arg %pK\n", crtc, arg);
-		return;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	if (!sde_crtc) {
-		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
-		return;
-	}
-
-	mutex_lock(&sde_crtc->ltm_buffer_lock);
-	spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
-	if (!sde_crtc->ltm_buffer_cnt) {
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		mutex_unlock(&sde_crtc->ltm_buffer_lock);
-		/* all LTM buffers are freed, no further action is needed */
-		return;
-	}
-
-	if (!sde_crtc->ltm_hist_en) {
-		/* histogram is disabled, no need to notify user space */
-		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-		mutex_unlock(&sde_crtc->ltm_buffer_lock);
-		return;
-	}
-
-	buf = (struct sde_ltm_buffer *)arg;
-	payload.fd = buf->drm_fb_id;
-	payload.offset = buf->offset;
-	event.length = sizeof(struct drm_msm_ltm_buffer);
-	event.type = DRM_EVENT_LTM_HIST;
-	msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
-					(u8 *)&payload);
-	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
-	mutex_unlock(&sde_crtc->ltm_buffer_lock);
-}
-
-static void sde_cp_notify_ltm_wb_pb(struct drm_crtc *crtc, void *arg)
-{
-	struct drm_event event;
-	struct drm_msm_ltm_buffer payload = {};
-
-	if (!crtc) {
-		DRM_ERROR("invalid drm_crtc %pK\n", crtc);
-		return;
-	}
-
-	payload.fd = 0;
-	payload.offset = 0;
-	event.length = sizeof(struct drm_msm_ltm_buffer);
-	event.type = DRM_EVENT_LTM_WB_PB;
-	msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
-					(u8 *)&payload);
-}
-
-static int sde_cp_ltm_register_irq(struct sde_kms *kms,
-		struct sde_crtc *sde_crtc, struct sde_hw_dspp *hw_dspp,
-		struct sde_irq_callback *ltm_irq, enum sde_intr_type irq)
-{
-	int irq_idx, ret = 0;
-
-	if (irq == SDE_IRQ_TYPE_LTM_STATS_DONE) {
-		ltm_irq->func = sde_cp_ltm_hist_interrupt_cb;
-	} else if (irq == SDE_IRQ_TYPE_LTM_STATS_WB_PB) {
-		ltm_irq->func = sde_cp_ltm_wb_pb_interrupt_cb;
-	} else {
-		DRM_ERROR("invalid irq type %d\n", irq);
-		return -EINVAL;
-	}
-
-	irq_idx = sde_core_irq_idx_lookup(kms, irq, hw_dspp->idx);
-	if (irq_idx < 0) {
-		DRM_ERROR("failed to get the irq idx %d\n", irq_idx);
-		return irq_idx;
-	}
-
-	ltm_irq->arg = sde_crtc;
-	ret = sde_core_irq_register_callback(kms, irq_idx, ltm_irq);
-	if (ret) {
-		DRM_ERROR("failed to register the callback ret %d\n", ret);
-		return ret;
-	}
-
-	ret = sde_core_irq_enable(kms, &irq_idx, 1);
-	if (ret) {
-		DRM_ERROR("enable irq %d error %d\n", irq_idx, ret);
-		sde_core_irq_unregister_callback(kms, irq_idx, ltm_irq);
-	}
-
-	return ret;
-}
-
-static int sde_cp_ltm_unregister_irq(struct sde_kms *kms,
-		struct sde_crtc *sde_crtc, struct sde_hw_dspp *hw_dspp,
-		struct sde_irq_callback *ltm_irq, enum sde_intr_type irq)
-{
-	int irq_idx, ret = 0;
-
-	if (!(irq == SDE_IRQ_TYPE_LTM_STATS_DONE ||
-		irq == SDE_IRQ_TYPE_LTM_STATS_WB_PB)) {
-		DRM_ERROR("invalid irq type %d\n", irq);
-		return -EINVAL;
-	}
-
-	irq_idx = sde_core_irq_idx_lookup(kms, irq, hw_dspp->idx);
-	if (irq_idx < 0) {
-		DRM_ERROR("failed to get the irq idx %d\n", irq_idx);
-		return irq_idx;
-	}
-
-	ret = sde_core_irq_disable(kms, &irq_idx, 1);
-	if (ret)
-		DRM_ERROR("disable irq %d error %d\n", irq_idx, ret);
-
-	sde_core_irq_unregister_callback(kms, irq_idx, ltm_irq);
-	return ret;
-}
-
-int sde_cp_ltm_hist_interrupt(struct drm_crtc *crtc, bool en,
-				struct sde_irq_callback *ltm_irq)
-{
-	struct sde_kms *kms = NULL;
-	struct sde_hw_dspp *hw_dspp = NULL;
-	struct sde_crtc *sde_crtc;
-	int ret = 0;
-
-	if (!crtc || !ltm_irq) {
-		DRM_ERROR("invalid params: crtc %pK irq %pK\n", crtc, ltm_irq);
-		return -EINVAL;
-	}
-
-	kms = get_kms(crtc);
-	sde_crtc = to_sde_crtc(crtc);
-	if (!kms || !sde_crtc) {
-		DRM_ERROR("invalid params: kms %pK sde_crtc %pK\n", kms,
-				sde_crtc);
-		return -EINVAL;
-	}
-
-	/* enable interrupt on master LTM block */
-	hw_dspp = sde_crtc->mixers[0].hw_dspp;
-	if (!hw_dspp) {
-		DRM_ERROR("invalid dspp\n");
-		return -EINVAL;
-	}
-
-	if (en) {
-		ret = sde_cp_ltm_register_irq(kms, sde_crtc, hw_dspp,
-				ltm_irq, SDE_IRQ_TYPE_LTM_STATS_DONE);
-		if (ret)
-			DRM_ERROR("failed to register stats_done irq\n");
-	} else {
-		ret = sde_cp_ltm_unregister_irq(kms, sde_crtc, hw_dspp,
-				ltm_irq, SDE_IRQ_TYPE_LTM_STATS_DONE);
-		if (ret)
-			DRM_ERROR("failed to unregister stats_done irq\n");
-	}
-	return ret;
-}
-
-int sde_cp_ltm_wb_pb_interrupt(struct drm_crtc *crtc, bool en,
-				struct sde_irq_callback *ltm_irq)
-{
-	struct sde_kms *kms = NULL;
-	struct sde_hw_dspp *hw_dspp = NULL;
-	struct sde_crtc *sde_crtc;
-	int ret = 0;
-
-	if (!crtc || !ltm_irq) {
-		DRM_ERROR("invalid params: crtc %pK irq %pK\n", crtc, ltm_irq);
-		return -EINVAL;
-	}
-
-	kms = get_kms(crtc);
-	sde_crtc = to_sde_crtc(crtc);
-	if (!kms || !sde_crtc) {
-		DRM_ERROR("invalid params: kms %pK sde_crtc %pK\n", kms,
-				sde_crtc);
-		return -EINVAL;
-	}
-
-	/* enable interrupt on master LTM block */
-	hw_dspp = sde_crtc->mixers[0].hw_dspp;
-	if (!hw_dspp) {
-		DRM_ERROR("invalid dspp\n");
-		return -EINVAL;
-	}
-
-	if (en) {
-		ret = sde_cp_ltm_register_irq(kms, sde_crtc, hw_dspp,
-				ltm_irq, SDE_IRQ_TYPE_LTM_STATS_WB_PB);
-		if (ret)
-			DRM_ERROR("failed to register WB_PB irq\n");
-	} else {
-		ret = sde_cp_ltm_unregister_irq(kms, sde_crtc, hw_dspp,
-				ltm_irq, SDE_IRQ_TYPE_LTM_STATS_WB_PB);
-		if (ret)
-			DRM_ERROR("failed to unregister WB_PB irq\n");
-	}
-	return ret;
-}
-
-static void _sde_cp_crtc_update_ltm_roi(struct sde_crtc *sde_crtc,
-		struct sde_hw_cp_cfg *hw_cfg)
-{
-	struct drm_msm_ltm_cfg_param *cfg_param = NULL;
-
-	/* disable case */
-	if (!hw_cfg->payload) {
-		memset(&sde_crtc->ltm_cfg, 0,
-			sizeof(struct drm_msm_ltm_cfg_param));
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_ltm_cfg_param)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_ltm_cfg_param));
-		return;
-	}
-
-	cfg_param = hw_cfg->payload;
-	/* input param exceeds the display width */
-	if (cfg_param->cfg_param_01 + cfg_param->cfg_param_03 >
-			hw_cfg->displayh) {
-		DRM_DEBUG_DRIVER("invalid input = [%u,%u], displayh = %u\n",
-			cfg_param->cfg_param_01, cfg_param->cfg_param_03,
-			hw_cfg->displayh);
-		/* set the roi width to max register value */
-		cfg_param->cfg_param_03 = 0xFFFF;
-	}
-
-	/* input param exceeds the display height */
-	if (cfg_param->cfg_param_02 + cfg_param->cfg_param_04 >
-			hw_cfg->displayv) {
-		DRM_DEBUG_DRIVER("invalid input = [%u,%u], displayv = %u\n",
-			cfg_param->cfg_param_02, cfg_param->cfg_param_04,
-			hw_cfg->displayv);
-		/* set the roi height to max register value */
-		cfg_param->cfg_param_04 = 0xFFFF;
-	}
-
-	sde_crtc->ltm_cfg = *cfg_param;
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.h b/drivers/gpu/drm/msm/sde/sde_color_processing.h
deleted file mode 100644
index ab7a34b..0000000
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_COLOR_PROCESSING_H
-#define _SDE_COLOR_PROCESSING_H
-#include <drm/drm_crtc.h>
-
-struct sde_irq_callback;
-
-/*
- * PA MEMORY COLOR types
- * @MEMCOLOR_SKIN          Skin memory color type
- * @MEMCOLOR_SKY           Sky memory color type
- * @MEMCOLOR_FOLIAGE       Foliage memory color type
- */
-enum sde_memcolor_type {
-	MEMCOLOR_SKIN = 0,
-	MEMCOLOR_SKY,
-	MEMCOLOR_FOLIAGE,
-	MEMCOLOR_MAX
-};
-
-/*
- * PA HISTOGRAM modes
- * @HIST_DISABLED          Histogram disabled
- * @HIST_ENABLED           Histogram enabled
- */
-enum sde_hist_modes {
-	HIST_DISABLED,
-	HIST_ENABLED
-};
-
-/**
- * struct drm_prop_enum_list - drm structure for creating enum property and
- *                             enumerating values
- */
-static const struct drm_prop_enum_list sde_hist_modes[] = {
-	{HIST_DISABLED, "hist_off"},
-	{HIST_ENABLED, "hist_on"},
-};
-
-/*
- * LTM HISTOGRAM modes
- * @LTM_HIST_DISABLED          Histogram disabled
- * @LTM_HIST_ENABLED           Histogram enabled
- */
-enum ltm_hist_modes {
-	LTM_HIST_DISABLED,
-	LTM_HIST_ENABLED
-};
-
-/**
- * struct drm_prop_enum_list - drm structure for creating enum property and
- *                             enumerating values
- */
-static const struct drm_prop_enum_list sde_ltm_hist_modes[] = {
-	{LTM_HIST_DISABLED, "ltm_hist_off"},
-	{LTM_HIST_ENABLED, "ltm_hist_on"},
-};
-
-/**
- * sde_cp_crtc_init(): Initialize color processing lists for a crtc.
- *                     Should be called during crtc initialization.
- * @crtc:  Pointer to sde_crtc.
- */
-void sde_cp_crtc_init(struct drm_crtc *crtc);
-
-/**
- * sde_cp_crtc_install_properties(): Installs the color processing
- *                                properties for a crtc.
- *                                Should be called during crtc initialization.
- * @crtc:  Pointer to crtc.
- */
-void sde_cp_crtc_install_properties(struct drm_crtc *crtc);
-
-/**
- * sde_cp_crtc_destroy_properties: Destroys color processing
- *                                            properties for a crtc.
- * should be called during crtc de-initialization.
- * @crtc:  Pointer to crtc.
- */
-void sde_cp_crtc_destroy_properties(struct drm_crtc *crtc);
-
-/**
- * sde_cp_crtc_set_property: Set a color processing property
- *                                      for a crtc.
- *                                      Should be during atomic set property.
- * @crtc: Pointer to crtc.
- * @property: Property that needs to enabled/disabled.
- * @val: Value of property.
- */
-int sde_cp_crtc_set_property(struct drm_crtc *crtc,
-				struct drm_property *property, uint64_t val);
-
-/**
- * sde_cp_crtc_apply_properties: Enable/disable properties
- *                               for a crtc.
- *                               Should be called during atomic commit call.
- * @crtc: Pointer to crtc.
- */
-void sde_cp_crtc_apply_properties(struct drm_crtc *crtc);
-
-/**
- * sde_cp_crtc_get_property: Get value of color processing property
- *                                      for a crtc.
- *                                      Should be during atomic get property.
- * @crtc: Pointer to crtc.
- * @property: Property that needs to enabled/disabled.
- * @val: Value of property.
- *
- */
-int sde_cp_crtc_get_property(struct drm_crtc *crtc,
-				struct drm_property *property, uint64_t *val);
-
-/**
- * sde_cp_crtc_suspend: Suspend the crtc features
- * @crtc: Pointer to crtc.
- */
-void sde_cp_crtc_suspend(struct drm_crtc *crtc);
-
-/**
- * sde_cp_crtc_resume: Resume the crtc features
- * @crtc: Pointer to crtc.
- */
-void sde_cp_crtc_resume(struct drm_crtc *crtc);
-
-/**
- * sde_cp_crtc_clear: Clear the active list and dirty list of crtc features
- * @crtc: Pointer to crtc.
- */
-void sde_cp_crtc_clear(struct drm_crtc *crtc);
-
-/**
- * sde_cp_ad_interrupt: Api to enable/disable ad interrupt
- * @crtc: Pointer to crtc.
- * @en: Variable to enable/disable interrupt.
- * @irq: Pointer to irq callback
- */
-int sde_cp_ad_interrupt(struct drm_crtc *crtc, bool en,
-		struct sde_irq_callback *irq);
-
-/**
- * sde_cp_crtc_pre_ipc: Handle color processing features
- *                      before entering IPC
- * @crtc: Pointer to crtc.
- */
-void sde_cp_crtc_pre_ipc(struct drm_crtc *crtc);
-
-/**
- * sde_cp_crtc_post_ipc: Handle color processing features
- *                       after exiting IPC
- * @crtc: Pointer to crtc.
- */
-void sde_cp_crtc_post_ipc(struct drm_crtc *crtc);
-
-/**
- * sde_cp_hist_interrupt: Api to enable/disable histogram interrupt
- * @crtc: Pointer to crtc.
- * @en: Variable to enable/disable interrupt.
- * @irq: Pointer to irq callback
- */
-int sde_cp_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
-	struct sde_irq_callback *hist_irq);
-
-/**
- * sde_cp_ltm_hist_interrupt: API to enable/disable LTM hist interrupt
- * @crtc: Pointer to crtc.
- * @en: Variable to enable/disable interrupt.
- * @irq: Pointer to irq callback
- */
-int sde_cp_ltm_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
-	struct sde_irq_callback *hist_irq);
-
-/**
- * sde_cp_ltm_wb_pb_interrupt: API to enable/disable LTM wb_pb interrupt
- * @crtc: Pointer to crtc.
- * @en: Variable to enable/disable interrupt.
- * @irq: Pointer to irq callback
- */
-int sde_cp_ltm_wb_pb_interrupt(struct drm_crtc *crtc_drm, bool en,
-	struct sde_irq_callback *hist_irq);
-
-#endif /*_SDE_COLOR_PROCESSING_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
deleted file mode 100644
index 3422181..0000000
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ /dev/null
@@ -1,2558 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-#include "msm_drv.h"
-#include "sde_dbg.h"
-
-#include "sde_kms.h"
-#include "sde_connector.h"
-#include "sde_encoder.h"
-#include <linux/backlight.h>
-#include <linux/string.h>
-#include "dsi_drm.h"
-#include "dsi_display.h"
-#include "sde_crtc.h"
-#include "sde_rm.h"
-
-#define BL_NODE_NAME_SIZE 32
-#define HDR10_PLUS_VSIF_TYPE_CODE      0x81
-
-/* Autorefresh will occur after FRAME_CNT frames. Large values are unlikely */
-#define AUTOREFRESH_MAX_FRAME_CNT 6
-
-#define SDE_DEBUG_CONN(c, fmt, ...) SDE_DEBUG("conn%d " fmt,\
-		(c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
-
-#define SDE_ERROR_CONN(c, fmt, ...) SDE_ERROR("conn%d " fmt,\
-		(c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
-static u32 dither_matrix[DITHER_MATRIX_SZ] = {
-	15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
-};
-
-static const struct drm_prop_enum_list e_topology_name[] = {
-	{SDE_RM_TOPOLOGY_NONE,	"sde_none"},
-	{SDE_RM_TOPOLOGY_SINGLEPIPE,	"sde_singlepipe"},
-	{SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,	"sde_singlepipe_dsc"},
-	{SDE_RM_TOPOLOGY_DUALPIPE,	"sde_dualpipe"},
-	{SDE_RM_TOPOLOGY_DUALPIPE_DSC,	"sde_dualpipe_dsc"},
-	{SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,	"sde_dualpipemerge"},
-	{SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC,	"sde_dualpipemerge_dsc"},
-	{SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,	"sde_dualpipe_dscmerge"},
-	{SDE_RM_TOPOLOGY_PPSPLIT,	"sde_ppsplit"},
-};
-static const struct drm_prop_enum_list e_topology_control[] = {
-	{SDE_RM_TOPCTL_RESERVE_LOCK,	"reserve_lock"},
-	{SDE_RM_TOPCTL_RESERVE_CLEAR,	"reserve_clear"},
-	{SDE_RM_TOPCTL_DSPP,		"dspp"},
-	{SDE_RM_TOPCTL_DS,		"ds"},
-};
-static const struct drm_prop_enum_list e_power_mode[] = {
-	{SDE_MODE_DPMS_ON,	"ON"},
-	{SDE_MODE_DPMS_LP1,	"LP1"},
-	{SDE_MODE_DPMS_LP2,	"LP2"},
-	{SDE_MODE_DPMS_OFF,	"OFF"},
-};
-static const struct drm_prop_enum_list e_qsync_mode[] = {
-	{SDE_RM_QSYNC_DISABLED,	"none"},
-	{SDE_RM_QSYNC_CONTINUOUS_MODE,	"continuous"},
-};
-static const struct drm_prop_enum_list e_frame_trigger_mode[] = {
-	{FRAME_DONE_WAIT_DEFAULT, "default"},
-	{FRAME_DONE_WAIT_SERIALIZE, "serialize_frame_trigger"},
-	{FRAME_DONE_WAIT_POSTED_START, "posted_start"},
-};
-
-static int sde_backlight_device_update_status(struct backlight_device *bd)
-{
-	int brightness;
-	struct dsi_display *display;
-	struct sde_connector *c_conn;
-	int bl_lvl;
-	struct drm_event event;
-	int rc = 0;
-
-	brightness = bd->props.brightness;
-
-	if ((bd->props.power != FB_BLANK_UNBLANK) ||
-			(bd->props.state & BL_CORE_FBBLANK) ||
-			(bd->props.state & BL_CORE_SUSPENDED))
-		brightness = 0;
-
-	c_conn = bl_get_data(bd);
-	display = (struct dsi_display *) c_conn->display;
-	if (brightness > display->panel->bl_config.bl_max_level)
-		brightness = display->panel->bl_config.bl_max_level;
-
-	/* map UI brightness into driver backlight level with rounding */
-	bl_lvl = mult_frac(brightness, display->panel->bl_config.bl_max_level,
-			display->panel->bl_config.brightness_max_level);
-
-	if (!bl_lvl && brightness)
-		bl_lvl = 1;
-
-	if (display->panel->bl_config.bl_update ==
-		BL_UPDATE_DELAY_UNTIL_FIRST_FRAME && !c_conn->allow_bl_update) {
-		c_conn->unset_bl_level = bl_lvl;
-		return 0;
-	}
-
-	if (c_conn->ops.set_backlight) {
-		/* skip notifying user space if bl is 0 */
-		if (brightness != 0) {
-			event.type = DRM_EVENT_SYS_BACKLIGHT;
-			event.length = sizeof(u32);
-			msm_mode_object_event_notify(&c_conn->base.base,
-				c_conn->base.dev, &event, (u8 *)&brightness);
-		}
-		rc = c_conn->ops.set_backlight(&c_conn->base,
-				c_conn->display, bl_lvl);
-		c_conn->unset_bl_level = 0;
-	}
-
-	return rc;
-}
-
-static int sde_backlight_device_get_brightness(struct backlight_device *bd)
-{
-	return 0;
-}
-
-static const struct backlight_ops sde_backlight_device_ops = {
-	.update_status = sde_backlight_device_update_status,
-	.get_brightness = sde_backlight_device_get_brightness,
-};
-
-static int sde_backlight_setup(struct sde_connector *c_conn,
-					struct drm_device *dev)
-{
-	struct backlight_properties props;
-	struct dsi_display *display;
-	struct dsi_backlight_config *bl_config;
-	static int display_count;
-	char bl_node_name[BL_NODE_NAME_SIZE];
-
-	if (!c_conn || !dev || !dev->dev) {
-		SDE_ERROR("invalid param\n");
-		return -EINVAL;
-	} else if (c_conn->connector_type != DRM_MODE_CONNECTOR_DSI) {
-		return 0;
-	}
-
-	memset(&props, 0, sizeof(props));
-	props.type = BACKLIGHT_RAW;
-	props.power = FB_BLANK_UNBLANK;
-
-	display = (struct dsi_display *) c_conn->display;
-	bl_config = &display->panel->bl_config;
-	props.max_brightness = bl_config->brightness_max_level;
-	props.brightness = bl_config->brightness_max_level;
-	snprintf(bl_node_name, BL_NODE_NAME_SIZE, "panel%u-backlight",
-							display_count);
-	c_conn->bl_device = backlight_device_register(bl_node_name, dev->dev,
-			c_conn, &sde_backlight_device_ops, &props);
-	if (IS_ERR_OR_NULL(c_conn->bl_device)) {
-		SDE_ERROR("Failed to register backlight: %ld\n",
-				    PTR_ERR(c_conn->bl_device));
-		c_conn->bl_device = NULL;
-		return -ENODEV;
-	}
-	display_count++;
-
-	return 0;
-}
-
-int sde_connector_trigger_event(void *drm_connector,
-		uint32_t event_idx, uint32_t instance_idx,
-		uint32_t data0, uint32_t data1,
-		uint32_t data2, uint32_t data3)
-{
-	struct sde_connector *c_conn;
-	unsigned long irq_flags;
-	int (*cb_func)(uint32_t event_idx,
-			uint32_t instance_idx, void *usr,
-			uint32_t data0, uint32_t data1,
-			uint32_t data2, uint32_t data3);
-	void *usr;
-	int rc = 0;
-
-	/*
-	 * This function may potentially be called from an ISR context, so
-	 * avoid excessive logging/etc.
-	 */
-	if (!drm_connector)
-		return -EINVAL;
-	else if (event_idx >= SDE_CONN_EVENT_COUNT)
-		return -EINVAL;
-	c_conn = to_sde_connector(drm_connector);
-
-	spin_lock_irqsave(&c_conn->event_lock, irq_flags);
-	cb_func = c_conn->event_table[event_idx].cb_func;
-	usr = c_conn->event_table[event_idx].usr;
-	spin_unlock_irqrestore(&c_conn->event_lock, irq_flags);
-
-	if (cb_func)
-		rc = cb_func(event_idx, instance_idx, usr,
-			data0, data1, data2, data3);
-	else
-		rc = -EAGAIN;
-
-	return rc;
-}
-
-int sde_connector_register_event(struct drm_connector *connector,
-		uint32_t event_idx,
-		int (*cb_func)(uint32_t event_idx,
-			uint32_t instance_idx, void *usr,
-			uint32_t data0, uint32_t data1,
-			uint32_t data2, uint32_t data3),
-		void *usr)
-{
-	struct sde_connector *c_conn;
-	unsigned long irq_flags;
-
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return -EINVAL;
-	} else if (event_idx >= SDE_CONN_EVENT_COUNT) {
-		SDE_ERROR("conn%d, invalid event %d\n",
-				connector->base.id, event_idx);
-		return -EINVAL;
-	}
-	c_conn = to_sde_connector(connector);
-
-	spin_lock_irqsave(&c_conn->event_lock, irq_flags);
-	c_conn->event_table[event_idx].cb_func = cb_func;
-	c_conn->event_table[event_idx].usr = usr;
-	spin_unlock_irqrestore(&c_conn->event_lock, irq_flags);
-
-	/* optionally notify display of event registration */
-	if (c_conn->ops.enable_event && c_conn->display)
-		c_conn->ops.enable_event(connector, event_idx,
-				cb_func != NULL, c_conn->display);
-	return 0;
-}
-
-void sde_connector_unregister_event(struct drm_connector *connector,
-		uint32_t event_idx)
-{
-	(void)sde_connector_register_event(connector, event_idx, 0, 0);
-}
-
-static int _sde_connector_get_default_dither_cfg_v1(
-		struct sde_connector *c_conn, void *cfg)
-{
-	struct drm_msm_dither *dither_cfg = (struct drm_msm_dither *)cfg;
-	enum dsi_pixel_format dst_format = DSI_PIXEL_FORMAT_MAX;
-
-	if (!c_conn || !cfg) {
-		SDE_ERROR("invalid argument(s), c_conn %pK, cfg %pK\n",
-				c_conn, cfg);
-		return -EINVAL;
-	}
-
-	if (!c_conn->ops.get_dst_format) {
-		SDE_DEBUG("get_dst_format is unavailable\n");
-		return 0;
-	}
-
-	dst_format = c_conn->ops.get_dst_format(&c_conn->base, c_conn->display);
-	switch (dst_format) {
-	case DSI_PIXEL_FORMAT_RGB888:
-		dither_cfg->c0_bitdepth = 8;
-		dither_cfg->c1_bitdepth = 8;
-		dither_cfg->c2_bitdepth = 8;
-		dither_cfg->c3_bitdepth = 8;
-		break;
-	case DSI_PIXEL_FORMAT_RGB666:
-	case DSI_PIXEL_FORMAT_RGB666_LOOSE:
-		dither_cfg->c0_bitdepth = 6;
-		dither_cfg->c1_bitdepth = 6;
-		dither_cfg->c2_bitdepth = 6;
-		dither_cfg->c3_bitdepth = 6;
-		break;
-	default:
-		SDE_DEBUG("no default dither config for dst_format %d\n",
-			dst_format);
-		return -ENODATA;
-	}
-
-	memcpy(&dither_cfg->matrix, dither_matrix,
-			sizeof(u32) * DITHER_MATRIX_SZ);
-	dither_cfg->temporal_en = 0;
-	return 0;
-}
-
-static void _sde_connector_install_dither_property(struct drm_device *dev,
-		struct sde_kms *sde_kms, struct sde_connector *c_conn)
-{
-	char prop_name[DRM_PROP_NAME_LEN];
-	struct sde_mdss_cfg *catalog = NULL;
-	struct drm_property_blob *blob_ptr;
-	void *cfg;
-	int ret = 0;
-	u32 version = 0, len = 0;
-	bool defalut_dither_needed = false;
-
-	if (!dev || !sde_kms || !c_conn) {
-		SDE_ERROR("invld args (s), dev %pK, sde_kms %pK, c_conn %pK\n",
-				dev, sde_kms, c_conn);
-		return;
-	}
-
-	catalog = sde_kms->catalog;
-	version = SDE_COLOR_PROCESS_MAJOR(
-			catalog->pingpong[0].sblk->dither.version);
-	snprintf(prop_name, ARRAY_SIZE(prop_name), "%s%d",
-			"SDE_PP_DITHER_V", version);
-	switch (version) {
-	case 1:
-		msm_property_install_blob(&c_conn->property_info, prop_name,
-			DRM_MODE_PROP_BLOB,
-			CONNECTOR_PROP_PP_DITHER);
-		len = sizeof(struct drm_msm_dither);
-		cfg = kzalloc(len, GFP_KERNEL);
-		if (!cfg)
-			return;
-
-		ret = _sde_connector_get_default_dither_cfg_v1(c_conn, cfg);
-		if (!ret)
-			defalut_dither_needed = true;
-		break;
-	default:
-		SDE_ERROR("unsupported dither version %d\n", version);
-		return;
-	}
-
-	if (defalut_dither_needed) {
-		blob_ptr = drm_property_create_blob(dev, len, cfg);
-		if (IS_ERR_OR_NULL(blob_ptr))
-			goto exit;
-		c_conn->blob_dither = blob_ptr;
-	}
-exit:
-	kfree(cfg);
-}
-
-int sde_connector_get_dither_cfg(struct drm_connector *conn,
-			struct drm_connector_state *state, void **cfg,
-			size_t *len)
-{
-	struct sde_connector *c_conn = NULL;
-	struct sde_connector_state *c_state = NULL;
-	size_t dither_sz = 0;
-	u32 *p = (u32 *)cfg;
-
-	if (!conn || !state || !p)
-		return -EINVAL;
-
-	c_conn = to_sde_connector(conn);
-	c_state = to_sde_connector_state(state);
-
-	/* try to get user config data first */
-	*cfg = msm_property_get_blob(&c_conn->property_info,
-					&c_state->property_state,
-					&dither_sz,
-					CONNECTOR_PROP_PP_DITHER);
-	/* if user config data doesn't exist, use default dither blob */
-	if (*cfg == NULL && c_conn->blob_dither) {
-		*cfg = &c_conn->blob_dither->data;
-		dither_sz = c_conn->blob_dither->length;
-	}
-	*len = dither_sz;
-	return 0;
-}
-
-int sde_connector_get_mode_info(struct drm_connector_state *conn_state,
-	struct msm_mode_info *mode_info)
-{
-	struct sde_connector_state *sde_conn_state = NULL;
-
-	if (!conn_state || !mode_info) {
-		SDE_ERROR("Invalid arguments\n");
-		return -EINVAL;
-	}
-
-	sde_conn_state = to_sde_connector_state(conn_state);
-	memcpy(mode_info, &sde_conn_state->mode_info,
-		sizeof(sde_conn_state->mode_info));
-
-	return 0;
-}
-
-static int sde_connector_handle_disp_recovery(uint32_t event_idx,
-			uint32_t instance_idx, void *usr,
-			uint32_t data0, uint32_t data1,
-			uint32_t data2, uint32_t data3)
-{
-	struct sde_connector *c_conn = usr;
-	int rc = 0;
-
-	if (!c_conn)
-		return -EINVAL;
-
-	rc = sde_kms_handle_recovery(c_conn->encoder);
-
-	return rc;
-}
-
-int sde_connector_get_info(struct drm_connector *connector,
-		struct msm_display_info *info)
-{
-	struct sde_connector *c_conn;
-
-	if (!connector || !info) {
-		SDE_ERROR("invalid argument(s), conn %pK, info %pK\n",
-				connector, info);
-		return -EINVAL;
-	}
-
-	c_conn = to_sde_connector(connector);
-
-	if (!c_conn->display || !c_conn->ops.get_info) {
-		SDE_ERROR("display info not supported for %pK\n",
-				c_conn->display);
-		return -EINVAL;
-	}
-
-	return c_conn->ops.get_info(&c_conn->base, info, c_conn->display);
-}
-
-void sde_connector_schedule_status_work(struct drm_connector *connector,
-		bool en)
-{
-	struct sde_connector *c_conn;
-	struct msm_display_info info;
-
-	c_conn = to_sde_connector(connector);
-	if (!c_conn)
-		return;
-
-	/* Return if there is no change in ESD status check condition */
-	if (en == c_conn->esd_status_check)
-		return;
-
-	sde_connector_get_info(connector, &info);
-	if (c_conn->ops.check_status &&
-		(info.capabilities & MSM_DISPLAY_ESD_ENABLED)) {
-		if (en) {
-			u32 interval;
-
-			/*
-			 * If debugfs property is not set then take
-			 * default value
-			 */
-			interval = c_conn->esd_status_interval ?
-				c_conn->esd_status_interval :
-					STATUS_CHECK_INTERVAL_MS;
-			/* Schedule ESD status check */
-			schedule_delayed_work(&c_conn->status_work,
-				msecs_to_jiffies(interval));
-			c_conn->esd_status_check = true;
-		} else {
-			/* Cancel any pending ESD status check */
-			cancel_delayed_work_sync(&c_conn->status_work);
-			c_conn->esd_status_check = false;
-		}
-	}
-}
-
-static int _sde_connector_update_power_locked(struct sde_connector *c_conn)
-{
-	struct drm_connector *connector;
-	void *display;
-	int (*set_power)(struct drm_connector *conn, int status, void *disp);
-	int mode, rc = 0;
-
-	if (!c_conn)
-		return -EINVAL;
-	connector = &c_conn->base;
-
-	switch (c_conn->dpms_mode) {
-	case DRM_MODE_DPMS_ON:
-		mode = c_conn->lp_mode;
-		break;
-	case DRM_MODE_DPMS_STANDBY:
-		mode = SDE_MODE_DPMS_STANDBY;
-		break;
-	case DRM_MODE_DPMS_SUSPEND:
-		mode = SDE_MODE_DPMS_SUSPEND;
-		break;
-	case DRM_MODE_DPMS_OFF:
-		mode = SDE_MODE_DPMS_OFF;
-		break;
-	default:
-		mode = c_conn->lp_mode;
-		SDE_ERROR("conn %d dpms set to unrecognized mode %d\n",
-				connector->base.id, mode);
-		break;
-	}
-
-	SDE_EVT32(connector->base.id, c_conn->dpms_mode, c_conn->lp_mode, mode);
-	SDE_DEBUG("conn %d - dpms %d, lp %d, panel %d\n", connector->base.id,
-			c_conn->dpms_mode, c_conn->lp_mode, mode);
-
-	if (mode != c_conn->last_panel_power_mode && c_conn->ops.set_power) {
-		display = c_conn->display;
-		set_power = c_conn->ops.set_power;
-
-		mutex_unlock(&c_conn->lock);
-		rc = set_power(connector, mode, display);
-		mutex_lock(&c_conn->lock);
-	}
-	c_conn->last_panel_power_mode = mode;
-
-	mutex_unlock(&c_conn->lock);
-	if (mode != SDE_MODE_DPMS_ON)
-		sde_connector_schedule_status_work(connector, false);
-	else
-		sde_connector_schedule_status_work(connector, true);
-	mutex_lock(&c_conn->lock);
-
-	return rc;
-}
-
-static int _sde_connector_update_bl_scale(struct sde_connector *c_conn)
-{
-	struct dsi_display *dsi_display;
-	struct dsi_backlight_config *bl_config;
-	int rc = 0;
-
-	if (!c_conn) {
-		SDE_ERROR("Invalid params sde_connector null\n");
-		return -EINVAL;
-	}
-
-	dsi_display = c_conn->display;
-	if (!dsi_display || !dsi_display->panel) {
-		SDE_ERROR("Invalid params(s) dsi_display %pK, panel %pK\n",
-			dsi_display,
-			((dsi_display) ? dsi_display->panel : NULL));
-		return -EINVAL;
-	}
-
-	bl_config = &dsi_display->panel->bl_config;
-
-	if (c_conn->bl_scale > MAX_BL_SCALE_LEVEL)
-		bl_config->bl_scale = MAX_BL_SCALE_LEVEL;
-	else
-		bl_config->bl_scale = c_conn->bl_scale;
-
-	if (c_conn->bl_scale_sv > MAX_SV_BL_SCALE_LEVEL)
-		bl_config->bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
-	else
-		bl_config->bl_scale_sv = c_conn->bl_scale_sv;
-
-	SDE_DEBUG("bl_scale = %u, bl_scale_sv = %u, bl_level = %u\n",
-		bl_config->bl_scale, bl_config->bl_scale_sv,
-		bl_config->bl_level);
-	rc = c_conn->ops.set_backlight(&c_conn->base,
-			dsi_display, bl_config->bl_level);
-
-	return rc;
-}
-
-void sde_connector_set_qsync_params(struct drm_connector *connector)
-{
-	struct sde_connector *c_conn = to_sde_connector(connector);
-	u32 qsync_propval;
-
-	if (!connector)
-		return;
-
-	c_conn->qsync_updated = false;
-	qsync_propval = sde_connector_get_property(c_conn->base.state,
-			CONNECTOR_PROP_QSYNC_MODE);
-
-	if (qsync_propval != c_conn->qsync_mode) {
-		SDE_DEBUG("updated qsync mode %d -> %d\n", c_conn->qsync_mode,
-				qsync_propval);
-		c_conn->qsync_updated = true;
-		c_conn->qsync_mode = qsync_propval;
-	}
-}
-
-static int _sde_connector_update_hdr_metadata(struct sde_connector *c_conn,
-		struct sde_connector_state *c_state)
-{
-	int rc = 0;
-
-	if (c_conn->ops.config_hdr)
-		rc = c_conn->ops.config_hdr(&c_conn->base, c_conn->display,
-				c_state);
-
-	if (rc)
-		SDE_ERROR_CONN(c_conn, "cannot apply hdr metadata %d\n", rc);
-
-	SDE_DEBUG_CONN(c_conn, "updated hdr metadata: %d\n", rc);
-	return rc;
-}
-
-static int _sde_connector_update_dirty_properties(
-				struct drm_connector *connector)
-{
-	struct sde_connector *c_conn;
-	struct sde_connector_state *c_state;
-	int idx;
-
-	if (!connector) {
-		SDE_ERROR("invalid argument\n");
-		return -EINVAL;
-	}
-
-	c_conn = to_sde_connector(connector);
-	c_state = to_sde_connector_state(connector->state);
-
-	while ((idx = msm_property_pop_dirty(&c_conn->property_info,
-					&c_state->property_state)) >= 0) {
-		switch (idx) {
-		case CONNECTOR_PROP_LP:
-			mutex_lock(&c_conn->lock);
-			c_conn->lp_mode = sde_connector_get_property(
-					connector->state, CONNECTOR_PROP_LP);
-			_sde_connector_update_power_locked(c_conn);
-			mutex_unlock(&c_conn->lock);
-			break;
-		case CONNECTOR_PROP_BL_SCALE:
-		case CONNECTOR_PROP_SV_BL_SCALE:
-			_sde_connector_update_bl_scale(c_conn);
-			break;
-		case CONNECTOR_PROP_HDR_METADATA:
-			_sde_connector_update_hdr_metadata(c_conn, c_state);
-			break;
-		default:
-			/* nothing to do for most properties */
-			break;
-		}
-	}
-
-	/*
-	 * Special handling for postproc properties and
-	 * for updating backlight if any unset backlight level is present
-	 */
-	if (c_conn->bl_scale_dirty || c_conn->unset_bl_level) {
-		_sde_connector_update_bl_scale(c_conn);
-		c_conn->bl_scale_dirty = false;
-	}
-
-	return 0;
-}
-
-struct sde_connector_dyn_hdr_metadata *sde_connector_get_dyn_hdr_meta(
-		struct drm_connector *connector)
-{
-	struct sde_connector_state *c_state;
-
-	if (!connector)
-		return NULL;
-
-	c_state = to_sde_connector_state(connector->state);
-	return &c_state->dyn_hdr_meta;
-}
-
-int sde_connector_pre_kickoff(struct drm_connector *connector)
-{
-	struct sde_connector *c_conn;
-	struct sde_connector_state *c_state;
-	struct msm_display_kickoff_params params;
-	int rc;
-
-	if (!connector) {
-		SDE_ERROR("invalid argument\n");
-		return -EINVAL;
-	}
-
-	c_conn = to_sde_connector(connector);
-	c_state = to_sde_connector_state(connector->state);
-	if (!c_conn->display) {
-		SDE_ERROR("invalid connector display\n");
-		return -EINVAL;
-	}
-
-	rc = _sde_connector_update_dirty_properties(connector);
-	if (rc) {
-		SDE_EVT32(connector->base.id, SDE_EVTLOG_ERROR);
-		goto end;
-	}
-
-	if (!c_conn->ops.pre_kickoff)
-		return 0;
-
-	params.rois = &c_state->rois;
-	params.hdr_meta = &c_state->hdr_meta;
-	params.qsync_update = false;
-
-	if (c_conn->qsync_updated) {
-		params.qsync_mode = c_conn->qsync_mode;
-		params.qsync_update = true;
-		SDE_EVT32(connector->base.id, params.qsync_mode);
-	}
-
-	SDE_EVT32_VERBOSE(connector->base.id);
-
-	rc = c_conn->ops.pre_kickoff(connector, c_conn->display, &params);
-
-end:
-	return rc;
-}
-
-void sde_connector_helper_bridge_disable(struct drm_connector *connector)
-{
-	int rc;
-	struct sde_connector *c_conn = NULL;
-
-	if (!connector)
-		return;
-
-	rc = _sde_connector_update_dirty_properties(connector);
-	if (rc) {
-		SDE_ERROR("conn %d final pre kickoff failed %d\n",
-				connector->base.id, rc);
-		SDE_EVT32(connector->base.id, SDE_EVTLOG_ERROR);
-	}
-
-	/* Disable ESD thread */
-	sde_connector_schedule_status_work(connector, false);
-
-	c_conn = to_sde_connector(connector);
-	if (c_conn->bl_device) {
-		c_conn->bl_device->props.power = FB_BLANK_POWERDOWN;
-		c_conn->bl_device->props.state |= BL_CORE_FBBLANK;
-		backlight_update_status(c_conn->bl_device);
-	}
-
-	c_conn->allow_bl_update = false;
-}
-
-void sde_connector_helper_bridge_enable(struct drm_connector *connector)
-{
-	struct sde_connector *c_conn = NULL;
-	struct dsi_display *display;
-
-	if (!connector)
-		return;
-
-	c_conn = to_sde_connector(connector);
-	display = (struct dsi_display *) c_conn->display;
-
-	/*
-	 * Special handling for some panels which need atleast
-	 * one frame to be transferred to GRAM before enabling backlight.
-	 * So delay backlight update to these panels until the
-	 * first frame commit is received from the HW.
-	 */
-	if (display->panel->bl_config.bl_update ==
-				BL_UPDATE_DELAY_UNTIL_FIRST_FRAME)
-		sde_encoder_wait_for_event(c_conn->encoder,
-				MSM_ENC_TX_COMPLETE);
-	c_conn->allow_bl_update = true;
-
-	if (c_conn->bl_device) {
-		c_conn->bl_device->props.power = FB_BLANK_UNBLANK;
-		c_conn->bl_device->props.state &= ~BL_CORE_FBBLANK;
-		backlight_update_status(c_conn->bl_device);
-	}
-	c_conn->panel_dead = false;
-}
-
-int sde_connector_clk_ctrl(struct drm_connector *connector, bool enable)
-{
-	struct sde_connector *c_conn;
-	struct dsi_display *display;
-	u32 state = enable ? DSI_CLK_ON : DSI_CLK_OFF;
-	int rc = 0;
-
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return -EINVAL;
-	}
-
-	c_conn = to_sde_connector(connector);
-	display = (struct dsi_display *) c_conn->display;
-
-	if (display && c_conn->ops.clk_ctrl)
-		rc = c_conn->ops.clk_ctrl(display->mdp_clk_handle,
-				DSI_ALL_CLKS, state);
-
-	return rc;
-}
-
-void sde_connector_destroy(struct drm_connector *connector)
-{
-	struct sde_connector *c_conn;
-
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return;
-	}
-
-	c_conn = to_sde_connector(connector);
-
-	/* cancel if any pending esd work */
-	sde_connector_schedule_status_work(connector, false);
-
-	if (c_conn->ops.pre_destroy)
-		c_conn->ops.pre_destroy(connector, c_conn->display);
-
-	if (c_conn->blob_caps)
-		drm_property_blob_put(c_conn->blob_caps);
-	if (c_conn->blob_hdr)
-		drm_property_blob_put(c_conn->blob_hdr);
-	if (c_conn->blob_dither)
-		drm_property_blob_put(c_conn->blob_dither);
-	if (c_conn->blob_mode_info)
-		drm_property_blob_put(c_conn->blob_mode_info);
-	if (c_conn->blob_ext_hdr)
-		drm_property_blob_put(c_conn->blob_ext_hdr);
-
-	if (c_conn->bl_device)
-		backlight_device_unregister(c_conn->bl_device);
-	drm_connector_unregister(connector);
-	mutex_destroy(&c_conn->lock);
-	sde_fence_deinit(c_conn->retire_fence);
-	drm_connector_cleanup(connector);
-	msm_property_destroy(&c_conn->property_info);
-	kfree(c_conn);
-}
-
-/**
- * _sde_connector_destroy_fb - clean up connector state's out_fb buffer
- * @c_conn: Pointer to sde connector structure
- * @c_state: Pointer to sde connector state structure
- */
-static void _sde_connector_destroy_fb(struct sde_connector *c_conn,
-		struct sde_connector_state *c_state)
-{
-	if (!c_state || !c_state->out_fb) {
-		SDE_ERROR("invalid state %pK\n", c_state);
-		return;
-	}
-
-	drm_framebuffer_put(c_state->out_fb);
-	c_state->out_fb = NULL;
-
-	if (c_conn)
-		c_state->property_values[CONNECTOR_PROP_OUT_FB].value =
-			msm_property_get_default(&c_conn->property_info,
-					CONNECTOR_PROP_OUT_FB);
-	else
-		c_state->property_values[CONNECTOR_PROP_OUT_FB].value = ~0;
-}
-
-static void sde_connector_atomic_destroy_state(struct drm_connector *connector,
-		struct drm_connector_state *state)
-{
-	struct sde_connector *c_conn = NULL;
-	struct sde_connector_state *c_state = NULL;
-
-	if (!state) {
-		SDE_ERROR("invalid state\n");
-		return;
-	}
-
-	/*
-	 * The base DRM framework currently always passes in a NULL
-	 * connector pointer. This is not correct, but attempt to
-	 * handle that case as much as possible.
-	 */
-	if (connector)
-		c_conn = to_sde_connector(connector);
-	c_state = to_sde_connector_state(state);
-
-	if (c_state->out_fb)
-		_sde_connector_destroy_fb(c_conn, c_state);
-
-	__drm_atomic_helper_connector_destroy_state(&c_state->base);
-
-	if (!c_conn) {
-		kfree(c_state);
-	} else {
-		/* destroy value helper */
-		msm_property_destroy_state(&c_conn->property_info, c_state,
-				&c_state->property_state);
-	}
-}
-
-static void sde_connector_atomic_reset(struct drm_connector *connector)
-{
-	struct sde_connector *c_conn;
-	struct sde_connector_state *c_state;
-
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return;
-	}
-
-	c_conn = to_sde_connector(connector);
-
-	if (connector->state &&
-			!sde_crtc_is_reset_required(connector->state->crtc)) {
-		SDE_DEBUG_CONN(c_conn, "avoid reset for connector\n");
-		return;
-	}
-
-	if (connector->state) {
-		sde_connector_atomic_destroy_state(connector, connector->state);
-		connector->state = 0;
-	}
-
-	c_state = msm_property_alloc_state(&c_conn->property_info);
-	if (!c_state) {
-		SDE_ERROR("state alloc failed\n");
-		return;
-	}
-
-	/* reset value helper, zero out state structure and reset properties */
-	msm_property_reset_state(&c_conn->property_info, c_state,
-			&c_state->property_state,
-			c_state->property_values);
-
-	__drm_atomic_helper_connector_reset(connector, &c_state->base);
-}
-
-static struct drm_connector_state *
-sde_connector_atomic_duplicate_state(struct drm_connector *connector)
-{
-	struct sde_connector *c_conn;
-	struct sde_connector_state *c_state, *c_oldstate;
-
-	if (!connector || !connector->state) {
-		SDE_ERROR("invalid connector %pK\n", connector);
-		return NULL;
-	}
-
-	c_conn = to_sde_connector(connector);
-	c_oldstate = to_sde_connector_state(connector->state);
-	c_state = msm_property_alloc_state(&c_conn->property_info);
-	if (!c_state) {
-		SDE_ERROR("state alloc failed\n");
-		return NULL;
-	}
-
-	/* duplicate value helper */
-	msm_property_duplicate_state(&c_conn->property_info,
-			c_oldstate, c_state,
-			&c_state->property_state, c_state->property_values);
-
-	__drm_atomic_helper_connector_duplicate_state(connector,
-			&c_state->base);
-
-	/* additional handling for drm framebuffer objects */
-	if (c_state->out_fb)
-		drm_framebuffer_get(c_state->out_fb);
-
-	/* clear dynamic HDR metadata from prev state */
-	if (c_state->dyn_hdr_meta.dynamic_hdr_update) {
-		c_state->dyn_hdr_meta.dynamic_hdr_update = false;
-		c_state->dyn_hdr_meta.dynamic_hdr_payload_size = 0;
-	}
-
-	return &c_state->base;
-}
-
-int sde_connector_roi_v1_check_roi(struct drm_connector_state *conn_state)
-{
-	const struct msm_roi_alignment *align = NULL;
-	struct sde_connector *c_conn = NULL;
-	struct msm_mode_info mode_info;
-	struct sde_connector_state *c_state;
-	int i, w, h;
-
-	if (!conn_state)
-		return -EINVAL;
-
-	memset(&mode_info, 0, sizeof(mode_info));
-
-	c_state = to_sde_connector_state(conn_state);
-	c_conn = to_sde_connector(conn_state->connector);
-
-	memcpy(&mode_info, &c_state->mode_info, sizeof(c_state->mode_info));
-
-	if (!mode_info.roi_caps.enabled)
-		return 0;
-
-	if (c_state->rois.num_rects > mode_info.roi_caps.num_roi) {
-		SDE_ERROR_CONN(c_conn, "too many rects specified: %d > %d\n",
-				c_state->rois.num_rects,
-				mode_info.roi_caps.num_roi);
-		return -E2BIG;
-	}
-
-	align = &mode_info.roi_caps.align;
-	for (i = 0; i < c_state->rois.num_rects; ++i) {
-		struct drm_clip_rect *roi_conn;
-
-		roi_conn = &c_state->rois.roi[i];
-		w = roi_conn->x2 - roi_conn->x1;
-		h = roi_conn->y2 - roi_conn->y1;
-
-		SDE_EVT32_VERBOSE(DRMID(&c_conn->base),
-				roi_conn->x1, roi_conn->y1,
-				roi_conn->x2, roi_conn->y2);
-
-		if (w <= 0 || h <= 0) {
-			SDE_ERROR_CONN(c_conn, "invalid conn roi w %d h %d\n",
-					w, h);
-			return -EINVAL;
-		}
-
-		if (w < align->min_width || w % align->width_pix_align) {
-			SDE_ERROR_CONN(c_conn,
-					"invalid conn roi width %d min %d align %d\n",
-					w, align->min_width,
-					align->width_pix_align);
-			return -EINVAL;
-		}
-
-		if (h < align->min_height || h % align->height_pix_align) {
-			SDE_ERROR_CONN(c_conn,
-					"invalid conn roi height %d min %d align %d\n",
-					h, align->min_height,
-					align->height_pix_align);
-			return -EINVAL;
-		}
-
-		if (roi_conn->x1 % align->xstart_pix_align) {
-			SDE_ERROR_CONN(c_conn,
-					"invalid conn roi x1 %d align %d\n",
-					roi_conn->x1, align->xstart_pix_align);
-			return -EINVAL;
-		}
-
-		if (roi_conn->y1 % align->ystart_pix_align) {
-			SDE_ERROR_CONN(c_conn,
-					"invalid conn roi y1 %d align %d\n",
-					roi_conn->y1, align->ystart_pix_align);
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
-static int _sde_connector_set_roi_v1(
-		struct sde_connector *c_conn,
-		struct sde_connector_state *c_state,
-		void __user *usr_ptr)
-{
-	struct sde_drm_roi_v1 roi_v1;
-	int i;
-
-	if (!c_conn || !c_state) {
-		SDE_ERROR("invalid args\n");
-		return -EINVAL;
-	}
-
-	memset(&c_state->rois, 0, sizeof(c_state->rois));
-
-	if (!usr_ptr) {
-		SDE_DEBUG_CONN(c_conn, "rois cleared\n");
-		return 0;
-	}
-
-	if (copy_from_user(&roi_v1, usr_ptr, sizeof(roi_v1))) {
-		SDE_ERROR_CONN(c_conn, "failed to copy roi_v1 data\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG_CONN(c_conn, "num_rects %d\n", roi_v1.num_rects);
-
-	if (roi_v1.num_rects == 0) {
-		SDE_DEBUG_CONN(c_conn, "rois cleared\n");
-		return 0;
-	}
-
-	if (roi_v1.num_rects > SDE_MAX_ROI_V1) {
-		SDE_ERROR_CONN(c_conn, "num roi rects more than supported: %d",
-				roi_v1.num_rects);
-		return -EINVAL;
-	}
-
-	c_state->rois.num_rects = roi_v1.num_rects;
-	for (i = 0; i < roi_v1.num_rects; ++i) {
-		c_state->rois.roi[i] = roi_v1.roi[i];
-		SDE_DEBUG_CONN(c_conn, "roi%d: roi (%d,%d) (%d,%d)\n", i,
-				c_state->rois.roi[i].x1,
-				c_state->rois.roi[i].y1,
-				c_state->rois.roi[i].x2,
-				c_state->rois.roi[i].y2);
-	}
-
-	return 0;
-}
-
-static int _sde_connector_set_ext_hdr_info(
-	struct sde_connector *c_conn,
-	struct sde_connector_state *c_state,
-	void __user *usr_ptr)
-{
-	int rc = 0;
-	struct drm_connector *connector;
-	struct drm_msm_ext_hdr_metadata *hdr_meta;
-	size_t payload_size = 0;
-	u8 *payload = NULL;
-	int i;
-
-	if (!c_conn || !c_state) {
-		SDE_ERROR_CONN(c_conn, "invalid args\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	connector = &c_conn->base;
-
-	if (!connector->hdr_supported) {
-		SDE_ERROR_CONN(c_conn, "sink doesn't support HDR\n");
-		rc = -ENOTSUPP;
-		goto end;
-	}
-
-	memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta));
-
-	if (!usr_ptr) {
-		SDE_DEBUG_CONN(c_conn, "hdr metadata cleared\n");
-		goto end;
-	}
-
-	if (copy_from_user(&c_state->hdr_meta,
-		(void __user *)usr_ptr,
-			sizeof(*hdr_meta))) {
-		SDE_ERROR_CONN(c_conn, "failed to copy hdr metadata\n");
-		rc = -EFAULT;
-		goto end;
-	}
-
-	hdr_meta = &c_state->hdr_meta;
-
-	/* dynamic metadata support */
-	if (!hdr_meta->hdr_plus_payload_size || !hdr_meta->hdr_plus_payload)
-		goto skip_dhdr;
-
-	if (!connector->hdr_plus_app_ver) {
-		SDE_ERROR_CONN(c_conn, "sink doesn't support dynamic HDR\n");
-		rc = -ENOTSUPP;
-		goto end;
-	}
-
-	payload_size = hdr_meta->hdr_plus_payload_size;
-	if (payload_size > sizeof(c_state->dyn_hdr_meta.dynamic_hdr_payload)) {
-		SDE_ERROR_CONN(c_conn, "payload size exceeds limit\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	payload = c_state->dyn_hdr_meta.dynamic_hdr_payload;
-	if (copy_from_user(payload,
-			(void __user *)c_state->hdr_meta.hdr_plus_payload,
-			payload_size)) {
-		SDE_ERROR_CONN(c_conn, "failed to copy dhdr metadata\n");
-		rc = -EFAULT;
-		goto end;
-	}
-
-	/* verify 1st header byte, programmed in DP Infoframe SDP header */
-	if (payload_size < 1 || (payload[0] != HDR10_PLUS_VSIF_TYPE_CODE)) {
-		SDE_ERROR_CONN(c_conn, "invalid payload detected, size: %d\n",
-				payload_size);
-		rc = -EINVAL;
-		goto end;
-	}
-
-	c_state->dyn_hdr_meta.dynamic_hdr_update = true;
-
-skip_dhdr:
-	c_state->dyn_hdr_meta.dynamic_hdr_payload_size = payload_size;
-
-	SDE_DEBUG_CONN(c_conn, "hdr_state %d\n", hdr_meta->hdr_state);
-	SDE_DEBUG_CONN(c_conn, "hdr_supported %d\n", hdr_meta->hdr_supported);
-	SDE_DEBUG_CONN(c_conn, "eotf %d\n", hdr_meta->eotf);
-	SDE_DEBUG_CONN(c_conn, "white_point_x %d\n", hdr_meta->white_point_x);
-	SDE_DEBUG_CONN(c_conn, "white_point_y %d\n", hdr_meta->white_point_y);
-	SDE_DEBUG_CONN(c_conn, "max_luminance %d\n", hdr_meta->max_luminance);
-	SDE_DEBUG_CONN(c_conn, "max_content_light_level %d\n",
-				hdr_meta->max_content_light_level);
-	SDE_DEBUG_CONN(c_conn, "max_average_light_level %d\n",
-				hdr_meta->max_average_light_level);
-
-	for (i = 0; i < HDR_PRIMARIES_COUNT; i++) {
-		SDE_DEBUG_CONN(c_conn, "display_primaries_x [%d]\n",
-				   hdr_meta->display_primaries_x[i]);
-		SDE_DEBUG_CONN(c_conn, "display_primaries_y [%d]\n",
-				   hdr_meta->display_primaries_y[i]);
-	}
-	SDE_DEBUG_CONN(c_conn, "hdr_plus payload%s updated, size %d\n",
-			c_state->dyn_hdr_meta.dynamic_hdr_update ? "" : " NOT",
-			c_state->dyn_hdr_meta.dynamic_hdr_payload_size);
-
-end:
-	return rc;
-}
-
-static int sde_connector_atomic_set_property(struct drm_connector *connector,
-		struct drm_connector_state *state,
-		struct drm_property *property,
-		uint64_t val)
-{
-	struct sde_connector *c_conn;
-	struct sde_connector_state *c_state;
-	int idx, rc;
-	uint64_t fence_fd;
-
-	if (!connector || !state || !property) {
-		SDE_ERROR("invalid argument(s), conn %pK, state %pK, prp %pK\n",
-				connector, state, property);
-		return -EINVAL;
-	}
-
-	c_conn = to_sde_connector(connector);
-	c_state = to_sde_connector_state(state);
-
-	/* generic property handling */
-	rc = msm_property_atomic_set(&c_conn->property_info,
-			&c_state->property_state, property, val);
-	if (rc)
-		goto end;
-
-	/* connector-specific property handling */
-	idx = msm_property_index(&c_conn->property_info, property);
-	switch (idx) {
-	case CONNECTOR_PROP_OUT_FB:
-		/* clear old fb, if present */
-		if (c_state->out_fb)
-			_sde_connector_destroy_fb(c_conn, c_state);
-
-		/* convert fb val to drm framebuffer and prepare it */
-		c_state->out_fb =
-			drm_framebuffer_lookup(connector->dev, NULL, val);
-		if (!c_state->out_fb && val) {
-			SDE_ERROR("failed to look up fb %lld\n", val);
-			rc = -EFAULT;
-		} else if (!c_state->out_fb && !val) {
-			SDE_DEBUG("cleared fb_id\n");
-			rc = 0;
-		} else {
-			msm_framebuffer_set_kmap(c_state->out_fb,
-					c_conn->fb_kmap);
-		}
-		break;
-	case CONNECTOR_PROP_RETIRE_FENCE:
-		if (!val)
-			goto end;
-
-		/*
-		 * update the the offset to a timeline for commit completion
-		 */
-		rc = sde_fence_create(c_conn->retire_fence, &fence_fd, 1);
-		if (rc) {
-			SDE_ERROR("fence create failed rc:%d\n", rc);
-			goto end;
-		}
-
-		rc = copy_to_user((uint64_t __user *)(uintptr_t)val, &fence_fd,
-			sizeof(uint64_t));
-		if (rc) {
-			SDE_ERROR("copy to user failed rc:%d\n", rc);
-			/* fence will be released with timeline update */
-			put_unused_fd(fence_fd);
-			rc = -EFAULT;
-			goto end;
-		}
-		break;
-	case CONNECTOR_PROP_ROI_V1:
-		rc = _sde_connector_set_roi_v1(c_conn, c_state,
-				(void *)(uintptr_t)val);
-		if (rc)
-			SDE_ERROR_CONN(c_conn, "invalid roi_v1, rc: %d\n", rc);
-		break;
-	/* CONNECTOR_PROP_BL_SCALE and CONNECTOR_PROP_SV_BL_SCALE are
-	 * color-processing properties. These two properties require
-	 * special handling since they don't quite fit the current standard
-	 * atomic set property framework.
-	 */
-	case CONNECTOR_PROP_BL_SCALE:
-		c_conn->bl_scale = val;
-		c_conn->bl_scale_dirty = true;
-		break;
-	case CONNECTOR_PROP_SV_BL_SCALE:
-		c_conn->bl_scale_sv = val;
-		c_conn->bl_scale_dirty = true;
-		break;
-	case CONNECTOR_PROP_HDR_METADATA:
-		rc = _sde_connector_set_ext_hdr_info(c_conn,
-			c_state, (void *)(uintptr_t)val);
-		if (rc)
-			SDE_ERROR_CONN(c_conn, "cannot set hdr info %d\n", rc);
-		break;
-	default:
-		break;
-	}
-
-	/* check for custom property handling */
-	if (!rc && c_conn->ops.set_property) {
-		rc = c_conn->ops.set_property(connector,
-				state,
-				idx,
-				val,
-				c_conn->display);
-
-		/* potentially clean up out_fb if rc != 0 */
-		if ((idx == CONNECTOR_PROP_OUT_FB) && rc)
-			_sde_connector_destroy_fb(c_conn, c_state);
-	}
-end:
-	return rc;
-}
-
-static int sde_connector_atomic_get_property(struct drm_connector *connector,
-		const struct drm_connector_state *state,
-		struct drm_property *property,
-		uint64_t *val)
-{
-	struct sde_connector *c_conn;
-	struct sde_connector_state *c_state;
-	int idx, rc = -EINVAL;
-
-	if (!connector || !state) {
-		SDE_ERROR("invalid argument(s), conn %pK, state %pK\n",
-				connector, state);
-		return -EINVAL;
-	}
-
-	c_conn = to_sde_connector(connector);
-	c_state = to_sde_connector_state(state);
-
-	idx = msm_property_index(&c_conn->property_info, property);
-	if (idx == CONNECTOR_PROP_RETIRE_FENCE) {
-		*val = ~0;
-		rc = 0;
-	} else {
-		/* get cached property value */
-		rc = msm_property_atomic_get(&c_conn->property_info,
-				&c_state->property_state, property, val);
-	}
-
-	/* allow for custom override */
-	if (c_conn->ops.get_property)
-		rc = c_conn->ops.get_property(connector,
-				(struct drm_connector_state *)state,
-				idx,
-				val,
-				c_conn->display);
-	return rc;
-}
-
-void sde_conn_timeline_status(struct drm_connector *conn)
-{
-	struct sde_connector *c_conn;
-
-	if (!conn) {
-		SDE_ERROR("invalid connector\n");
-		return;
-	}
-
-	c_conn = to_sde_connector(conn);
-	sde_fence_timeline_status(c_conn->retire_fence, &conn->base);
-}
-
-void sde_connector_prepare_fence(struct drm_connector *connector)
-{
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return;
-	}
-
-	sde_fence_prepare(to_sde_connector(connector)->retire_fence);
-}
-
-void sde_connector_complete_commit(struct drm_connector *connector,
-		ktime_t ts, enum sde_fence_event fence_event)
-{
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return;
-	}
-
-	/* signal connector's retire fence */
-	sde_fence_signal(to_sde_connector(connector)->retire_fence,
-			ts, fence_event);
-}
-
-void sde_connector_commit_reset(struct drm_connector *connector, ktime_t ts)
-{
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return;
-	}
-
-	/* signal connector's retire fence */
-	sde_fence_signal(to_sde_connector(connector)->retire_fence,
-			ts, SDE_FENCE_RESET_TIMELINE);
-}
-
-static void sde_connector_update_hdr_props(struct drm_connector *connector)
-{
-	struct sde_connector *c_conn = to_sde_connector(connector);
-	struct drm_msm_ext_hdr_properties hdr = {0};
-
-	hdr.hdr_metadata_type_one = connector->hdr_metadata_type_one ? 1 : 0;
-	hdr.hdr_supported = connector->hdr_supported ? 1 : 0;
-	hdr.hdr_eotf = connector->hdr_eotf;
-	hdr.hdr_max_luminance = connector->hdr_max_luminance;
-	hdr.hdr_avg_luminance = connector->hdr_avg_luminance;
-	hdr.hdr_min_luminance = connector->hdr_min_luminance;
-	hdr.hdr_plus_supported = connector->hdr_plus_app_ver;
-
-	msm_property_set_blob(&c_conn->property_info, &c_conn->blob_ext_hdr,
-			&hdr, sizeof(hdr), CONNECTOR_PROP_EXT_HDR_INFO);
-}
-
-static enum drm_connector_status
-sde_connector_detect(struct drm_connector *connector, bool force)
-{
-	enum drm_connector_status status = connector_status_unknown;
-	struct sde_connector *c_conn;
-
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return status;
-	}
-
-	c_conn = to_sde_connector(connector);
-
-	if (c_conn->ops.detect)
-		status = c_conn->ops.detect(connector,
-				force,
-				c_conn->display);
-
-	return status;
-}
-
-int sde_connector_get_dpms(struct drm_connector *connector)
-{
-	struct sde_connector *c_conn;
-	int rc;
-
-	if (!connector) {
-		SDE_DEBUG("invalid connector\n");
-		return DRM_MODE_DPMS_OFF;
-	}
-
-	c_conn = to_sde_connector(connector);
-
-	mutex_lock(&c_conn->lock);
-	rc = c_conn->dpms_mode;
-	mutex_unlock(&c_conn->lock);
-
-	return rc;
-}
-
-int sde_connector_set_property_for_commit(struct drm_connector *connector,
-		struct drm_atomic_state *atomic_state,
-		uint32_t property_idx, uint64_t value)
-{
-	struct drm_connector_state *state;
-	struct drm_property *property;
-	struct sde_connector *c_conn;
-
-	if (!connector || !atomic_state) {
-		SDE_ERROR("invalid argument(s), conn %d, state %d\n",
-				connector != NULL, atomic_state != NULL);
-		return -EINVAL;
-	}
-
-	c_conn = to_sde_connector(connector);
-	property = msm_property_index_to_drm_property(
-			&c_conn->property_info, property_idx);
-	if (!property) {
-		SDE_ERROR("invalid property index %d\n", property_idx);
-		return -EINVAL;
-	}
-
-	state = drm_atomic_get_connector_state(atomic_state, connector);
-	if (IS_ERR_OR_NULL(state)) {
-		SDE_ERROR("failed to get conn %d state\n",
-				connector->base.id);
-		return -EINVAL;
-	}
-
-	return sde_connector_atomic_set_property(
-			connector, state, property, value);
-}
-
-int sde_connector_helper_reset_custom_properties(
-		struct drm_connector *connector,
-		struct drm_connector_state *connector_state)
-{
-	struct sde_connector *c_conn;
-	struct sde_connector_state *c_state;
-	struct drm_property *drm_prop;
-	enum msm_mdp_conn_property prop_idx;
-
-	if (!connector || !connector_state) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	c_conn = to_sde_connector(connector);
-	c_state = to_sde_connector_state(connector_state);
-
-	for (prop_idx = 0; prop_idx < CONNECTOR_PROP_COUNT; prop_idx++) {
-		uint64_t val = c_state->property_values[prop_idx].value;
-		uint64_t def;
-		int ret;
-
-		drm_prop = msm_property_index_to_drm_property(
-				&c_conn->property_info, prop_idx);
-		if (!drm_prop) {
-			/* not all props will be installed, based on caps */
-			SDE_DEBUG_CONN(c_conn, "invalid property index %d\n",
-					prop_idx);
-			continue;
-		}
-
-		def = msm_property_get_default(&c_conn->property_info,
-				prop_idx);
-		if (val == def)
-			continue;
-
-		SDE_DEBUG_CONN(c_conn, "set prop %s idx %d from %llu to %llu\n",
-				drm_prop->name, prop_idx, val, def);
-
-		ret = sde_connector_atomic_set_property(connector,
-				connector_state, drm_prop, def);
-		if (ret) {
-			SDE_ERROR_CONN(c_conn,
-					"set property failed, idx %d ret %d\n",
-					prop_idx, ret);
-			continue;
-		}
-	}
-
-	return 0;
-}
-
-static int _sde_connector_primary_preference(struct sde_connector *sde_conn,
-		struct sde_kms *sde_kms)
-{
-	int ret = 0;
-	u32 num_lm = 0;
-
-	if (!sde_conn || !sde_kms || !sde_conn->ops.get_default_lms) {
-		SDE_DEBUG("invalid input params");
-		return -EINVAL;
-	}
-
-	ret = sde_conn->ops.get_default_lms(sde_conn->display, &num_lm);
-	if (ret || !num_lm) {
-		SDE_DEBUG("failed to get default lm count");
-		return ret;
-	}
-
-	if (num_lm > sde_kms->catalog->mixer_count) {
-		SDE_DEBUG(
-				"topology requesting more lms [%d] than hw exists [%d]",
-				num_lm, sde_kms->catalog->mixer_count);
-		return -EINVAL;
-	}
-
-	sde_hw_mixer_set_preference(sde_kms->catalog, num_lm);
-
-	return ret;
-}
-
-int sde_connector_get_panel_vfp(struct drm_connector *connector,
-	struct drm_display_mode *mode)
-{
-	struct sde_connector *c_conn;
-	int vfp = -EINVAL;
-
-	if (!connector || !mode) {
-		SDE_ERROR("invalid connector\n");
-		return vfp;
-	}
-	c_conn = to_sde_connector(connector);
-	if (!c_conn->ops.get_panel_vfp)
-		return vfp;
-
-	vfp = c_conn->ops.get_panel_vfp(c_conn->display,
-		mode->hdisplay, mode->vdisplay);
-	if (vfp <= 0)
-		SDE_ERROR("Failed get_panel_vfp %d\n", vfp);
-
-	return vfp;
-}
-
-static int _sde_debugfs_conn_cmd_tx_open(struct inode *inode, struct file *file)
-{
-	/* non-seekable */
-	file->private_data = inode->i_private;
-	return nonseekable_open(inode, file);
-}
-
-static ssize_t _sde_debugfs_conn_cmd_tx_sts_read(struct file *file,
-		char __user *buf, size_t count, loff_t *ppos)
-{
-	struct drm_connector *connector = file->private_data;
-	struct sde_connector *c_conn;
-	char buffer[MAX_CMD_PAYLOAD_SIZE];
-	int blen = 0;
-
-	if (*ppos)
-		return 0;
-
-	if (!connector) {
-		SDE_ERROR("invalid argument, conn is NULL\n");
-		return 0;
-	}
-
-	c_conn = to_sde_connector(connector);
-
-	mutex_lock(&c_conn->lock);
-	blen = snprintf(buffer, MAX_CMD_PAYLOAD_SIZE,
-		"last_cmd_tx_sts:0x%x",
-		c_conn->last_cmd_tx_sts);
-	mutex_unlock(&c_conn->lock);
-
-	SDE_DEBUG("output: %s\n", buffer);
-	if (blen <= 0) {
-		SDE_ERROR("snprintf failed, blen %d\n", blen);
-		return 0;
-	}
-
-	if (copy_to_user(buf, buffer, blen)) {
-		SDE_ERROR("copy to user buffer failed\n");
-		return -EFAULT;
-	}
-
-	*ppos += blen;
-	return blen;
-}
-
-static ssize_t _sde_debugfs_conn_cmd_tx_write(struct file *file,
-			const char __user *p, size_t count, loff_t *ppos)
-{
-	struct drm_connector *connector = file->private_data;
-	struct sde_connector *c_conn;
-	char *input, *token, *input_copy, *input_dup = NULL;
-	const char *delim = " ";
-	u32 buf_size = 0;
-	char buffer[MAX_CMD_PAYLOAD_SIZE];
-	int rc = 0, strtoint;
-
-	if (*ppos || !connector) {
-		SDE_ERROR("invalid argument(s), conn %d\n", connector != NULL);
-		return 0;
-	}
-
-	c_conn = to_sde_connector(connector);
-
-	if (!c_conn->ops.cmd_transfer) {
-		SDE_ERROR("no cmd transfer support for connector name %s\n",
-				c_conn->name);
-		return 0;
-	}
-
-	input = kmalloc(count + 1, GFP_KERNEL);
-	if (!input)
-		return -ENOMEM;
-
-	if (copy_from_user(input, p, count)) {
-		SDE_ERROR("copy from user failed\n");
-		rc = -EFAULT;
-		goto end;
-	}
-	input[count] = '\0';
-
-	SDE_INFO("Command requested for trasnfer to panel: %s\n", input);
-
-	input_copy = kstrdup(input, GFP_KERNEL);
-	if (!input_copy) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	input_dup = input_copy;
-	token = strsep(&input_copy, delim);
-	while (token) {
-		rc = kstrtoint(token, 0, &strtoint);
-		if (rc) {
-			SDE_ERROR("input buffer conversion failed\n");
-			goto end;
-		}
-
-		if (buf_size >= MAX_CMD_PAYLOAD_SIZE) {
-			SDE_ERROR("buffer size exceeding the limit %d\n",
-					MAX_CMD_PAYLOAD_SIZE);
-			goto end;
-		}
-		buffer[buf_size++] = (strtoint & 0xff);
-		token = strsep(&input_copy, delim);
-	}
-	SDE_DEBUG("command packet size in bytes: %u\n", buf_size);
-	if (!buf_size)
-		goto end;
-
-	mutex_lock(&c_conn->lock);
-	rc = c_conn->ops.cmd_transfer(&c_conn->base, c_conn->display, buffer,
-			buf_size);
-	c_conn->last_cmd_tx_sts = !rc ? true : false;
-	mutex_unlock(&c_conn->lock);
-
-	rc = count;
-end:
-	kfree(input_dup);
-	kfree(input);
-	return rc;
-}
-
-static const struct file_operations conn_cmd_tx_fops = {
-	.open =		_sde_debugfs_conn_cmd_tx_open,
-	.read =		_sde_debugfs_conn_cmd_tx_sts_read,
-	.write =	_sde_debugfs_conn_cmd_tx_write,
-};
-
-#ifdef CONFIG_DEBUG_FS
-/**
- * sde_connector_init_debugfs - initialize connector debugfs
- * @connector: Pointer to drm connector
- */
-static int sde_connector_init_debugfs(struct drm_connector *connector)
-{
-	struct sde_connector *sde_connector;
-	struct msm_display_info info;
-
-	if (!connector || !connector->debugfs_entry) {
-		SDE_ERROR("invalid connector\n");
-		return -EINVAL;
-	}
-
-	sde_connector = to_sde_connector(connector);
-
-	sde_connector_get_info(connector, &info);
-	if (sde_connector->ops.check_status &&
-		(info.capabilities & MSM_DISPLAY_ESD_ENABLED)) {
-		debugfs_create_u32("esd_status_interval", 0600,
-				connector->debugfs_entry,
-				&sde_connector->esd_status_interval);
-	}
-
-	if (!debugfs_create_bool("fb_kmap", 0600, connector->debugfs_entry,
-			&sde_connector->fb_kmap)) {
-		SDE_ERROR("failed to create connector fb_kmap\n");
-		return -ENOMEM;
-	}
-
-	if (sde_connector->ops.cmd_transfer) {
-		if (!debugfs_create_file("tx_cmd", 0600,
-			connector->debugfs_entry,
-			connector, &conn_cmd_tx_fops)) {
-			SDE_ERROR("failed to create connector cmd_tx\n");
-			return -ENOMEM;
-		}
-	}
-
-	return 0;
-}
-#else
-static int sde_connector_init_debugfs(struct drm_connector *connector)
-{
-	return 0;
-}
-#endif
-
-static int sde_connector_late_register(struct drm_connector *connector)
-{
-	return sde_connector_init_debugfs(connector);
-}
-
-static void sde_connector_early_unregister(struct drm_connector *connector)
-{
-	/* debugfs under connector->debugfs are deleted by drm_debugfs */
-}
-
-static int sde_connector_fill_modes(struct drm_connector *connector,
-		uint32_t max_width, uint32_t max_height)
-{
-	int rc, mode_count = 0;
-	struct sde_connector *sde_conn = NULL;
-
-	sde_conn = to_sde_connector(connector);
-	if (!sde_conn) {
-		SDE_ERROR("invalid arguments\n");
-		return 0;
-	}
-
-	mode_count = drm_helper_probe_single_connector_modes(connector,
-			max_width, max_height);
-
-	rc = sde_connector_set_blob_data(connector,
-				connector->state,
-				CONNECTOR_PROP_MODE_INFO);
-	if (rc) {
-		SDE_ERROR_CONN(sde_conn,
-			"failed to setup mode info prop, rc = %d\n", rc);
-		return 0;
-	}
-
-	return mode_count;
-}
-
-static const struct drm_connector_funcs sde_connector_ops = {
-	.reset =                  sde_connector_atomic_reset,
-	.detect =                 sde_connector_detect,
-	.destroy =                sde_connector_destroy,
-	.fill_modes =             sde_connector_fill_modes,
-	.atomic_duplicate_state = sde_connector_atomic_duplicate_state,
-	.atomic_destroy_state =   sde_connector_atomic_destroy_state,
-	.atomic_set_property =    sde_connector_atomic_set_property,
-	.atomic_get_property =    sde_connector_atomic_get_property,
-	.late_register =          sde_connector_late_register,
-	.early_unregister =       sde_connector_early_unregister,
-};
-
-static int sde_connector_get_modes(struct drm_connector *connector)
-{
-	struct sde_connector *c_conn;
-	int mode_count = 0;
-
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return 0;
-	}
-
-	c_conn = to_sde_connector(connector);
-	if (!c_conn->ops.get_modes) {
-		SDE_DEBUG("missing get_modes callback\n");
-		return 0;
-	}
-
-	mode_count = c_conn->ops.get_modes(connector, c_conn->display);
-	if (!mode_count) {
-		SDE_ERROR_CONN(c_conn, "failed to get modes\n");
-		return 0;
-	}
-
-	if (c_conn->hdr_capable)
-		sde_connector_update_hdr_props(connector);
-
-	return mode_count;
-}
-
-static enum drm_mode_status
-sde_connector_mode_valid(struct drm_connector *connector,
-		struct drm_display_mode *mode)
-{
-	struct sde_connector *c_conn;
-
-	if (!connector || !mode) {
-		SDE_ERROR("invalid argument(s), conn %pK, mode %pK\n",
-				connector, mode);
-		return MODE_ERROR;
-	}
-
-	c_conn = to_sde_connector(connector);
-
-	if (c_conn->ops.mode_valid)
-		return c_conn->ops.mode_valid(connector, mode, c_conn->display);
-
-	/* assume all modes okay by default */
-	return MODE_OK;
-}
-
-static struct drm_encoder *
-sde_connector_best_encoder(struct drm_connector *connector)
-{
-	struct sde_connector *c_conn = to_sde_connector(connector);
-
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return NULL;
-	}
-
-	/*
-	 * This is true for now, revisit this code when multiple encoders are
-	 * supported.
-	 */
-	return c_conn->encoder;
-}
-
-static struct drm_encoder *
-sde_connector_atomic_best_encoder(struct drm_connector *connector,
-		struct drm_connector_state *connector_state)
-{
-	struct sde_connector *c_conn;
-	struct drm_encoder *encoder = NULL;
-
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return NULL;
-	}
-
-	c_conn = to_sde_connector(connector);
-
-	if (c_conn->ops.atomic_best_encoder)
-		encoder = c_conn->ops.atomic_best_encoder(connector,
-				c_conn->display, connector_state);
-
-	return encoder;
-}
-
-static int sde_connector_atomic_check(struct drm_connector *connector,
-		struct drm_connector_state *new_conn_state)
-{
-	struct sde_connector *c_conn;
-
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return 0;
-	}
-
-	c_conn = to_sde_connector(connector);
-
-	if (c_conn->ops.atomic_check)
-		return c_conn->ops.atomic_check(connector,
-				c_conn->display, new_conn_state);
-
-	return 0;
-}
-
-static void _sde_connector_report_panel_dead(struct sde_connector *conn)
-{
-	struct drm_event event;
-
-	if (!conn)
-		return;
-
-	/* Panel dead notification can come:
-	 * 1) ESD thread
-	 * 2) Commit thread (if TE stops coming)
-	 * So such case, avoid failure notification twice.
-	 */
-	if (conn->panel_dead)
-		return;
-
-	conn->panel_dead = true;
-	event.type = DRM_EVENT_PANEL_DEAD;
-	event.length = sizeof(bool);
-	msm_mode_object_event_notify(&conn->base.base,
-		conn->base.dev, &event, (u8 *)&conn->panel_dead);
-	sde_encoder_display_failure_notification(conn->encoder);
-	SDE_EVT32(SDE_EVTLOG_ERROR);
-	SDE_ERROR("esd check failed report PANEL_DEAD conn_id: %d enc_id: %d\n",
-			conn->base.base.id, conn->encoder->base.id);
-}
-
-int sde_connector_esd_status(struct drm_connector *conn)
-{
-	struct sde_connector *sde_conn = NULL;
-	struct dsi_display *display;
-	int ret = 0;
-
-	if (!conn)
-		return ret;
-
-	sde_conn = to_sde_connector(conn);
-	if (!sde_conn || !sde_conn->ops.check_status)
-		return ret;
-
-	display = sde_conn->display;
-
-	/* protect this call with ESD status check call */
-	mutex_lock(&sde_conn->lock);
-	if (atomic_read(&(display->panel->esd_recovery_pending))) {
-		SDE_ERROR("ESD recovery already pending\n");
-		mutex_unlock(&sde_conn->lock);
-		return -ETIMEDOUT;
-	}
-	ret = sde_conn->ops.check_status(&sde_conn->base,
-					 sde_conn->display, true);
-	mutex_unlock(&sde_conn->lock);
-
-	if (ret <= 0) {
-		/* cancel if any pending esd work */
-		sde_connector_schedule_status_work(conn, false);
-		_sde_connector_report_panel_dead(sde_conn);
-		ret = -ETIMEDOUT;
-	} else {
-		SDE_DEBUG("Successfully received TE from panel\n");
-		ret = 0;
-	}
-	SDE_EVT32(ret);
-
-	return ret;
-}
-
-static void sde_connector_check_status_work(struct work_struct *work)
-{
-	struct sde_connector *conn;
-	int rc = 0;
-
-	conn = container_of(to_delayed_work(work),
-			struct sde_connector, status_work);
-	if (!conn) {
-		SDE_ERROR("not able to get connector object\n");
-		return;
-	}
-
-	mutex_lock(&conn->lock);
-	if (!conn->ops.check_status ||
-			(conn->dpms_mode != DRM_MODE_DPMS_ON)) {
-		SDE_DEBUG("dpms mode: %d\n", conn->dpms_mode);
-		mutex_unlock(&conn->lock);
-		return;
-	}
-
-	rc = conn->ops.check_status(&conn->base, conn->display, false);
-	mutex_unlock(&conn->lock);
-
-	if (rc > 0) {
-		u32 interval;
-
-		SDE_DEBUG("esd check status success conn_id: %d enc_id: %d\n",
-				conn->base.base.id, conn->encoder->base.id);
-
-		/* If debugfs property is not set then take default value */
-		interval = conn->esd_status_interval ?
-			conn->esd_status_interval : STATUS_CHECK_INTERVAL_MS;
-		schedule_delayed_work(&conn->status_work,
-			msecs_to_jiffies(interval));
-		return;
-	}
-
-	_sde_connector_report_panel_dead(conn);
-}
-
-static const struct drm_connector_helper_funcs sde_connector_helper_ops = {
-	.get_modes =    sde_connector_get_modes,
-	.mode_valid =   sde_connector_mode_valid,
-	.best_encoder = sde_connector_best_encoder,
-};
-
-static const struct drm_connector_helper_funcs sde_connector_helper_ops_v2 = {
-	.get_modes =    sde_connector_get_modes,
-	.mode_valid =   sde_connector_mode_valid,
-	.best_encoder = sde_connector_best_encoder,
-	.atomic_best_encoder = sde_connector_atomic_best_encoder,
-	.atomic_check = sde_connector_atomic_check,
-};
-
-static int sde_connector_populate_mode_info(struct drm_connector *conn,
-	struct sde_kms_info *info)
-{
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	struct sde_connector *c_conn = NULL;
-	struct drm_display_mode *mode;
-	struct msm_mode_info mode_info;
-	int rc = 0;
-
-	if (!conn || !conn->dev || !conn->dev->dev_private) {
-		SDE_ERROR("invalid arguments\n");
-		return -EINVAL;
-	}
-
-	priv = conn->dev->dev_private;
-	sde_kms = to_sde_kms(priv->kms);
-
-	c_conn = to_sde_connector(conn);
-	if (!c_conn->ops.get_mode_info) {
-		SDE_ERROR_CONN(c_conn, "get_mode_info not defined\n");
-		return -EINVAL;
-	}
-
-	list_for_each_entry(mode, &conn->modes, head) {
-		int topology_idx = 0;
-
-		memset(&mode_info, 0, sizeof(mode_info));
-
-		rc = c_conn->ops.get_mode_info(&c_conn->base, mode, &mode_info,
-			sde_kms->catalog->max_mixer_width,
-			c_conn->display);
-		if (rc) {
-			SDE_ERROR_CONN(c_conn,
-				"failed to get mode info for mode %s\n",
-				mode->name);
-			continue;
-		}
-
-		sde_kms_info_add_keystr(info, "mode_name", mode->name);
-
-		topology_idx = (int)sde_rm_get_topology_name(
-							mode_info.topology);
-		if (topology_idx < SDE_RM_TOPOLOGY_MAX) {
-			sde_kms_info_add_keystr(info, "topology",
-					e_topology_name[topology_idx].name);
-		} else {
-			SDE_ERROR_CONN(c_conn, "invalid topology\n");
-			continue;
-		}
-
-		sde_kms_info_add_keyint(info, "mdp_transfer_time_us",
-			mode_info.mdp_transfer_time_us);
-
-		if (!mode_info.roi_caps.num_roi)
-			continue;
-
-		sde_kms_info_add_keyint(info, "partial_update_num_roi",
-			mode_info.roi_caps.num_roi);
-		sde_kms_info_add_keyint(info, "partial_update_xstart",
-			mode_info.roi_caps.align.xstart_pix_align);
-		sde_kms_info_add_keyint(info, "partial_update_walign",
-			mode_info.roi_caps.align.width_pix_align);
-		sde_kms_info_add_keyint(info, "partial_update_wmin",
-			mode_info.roi_caps.align.min_width);
-		sde_kms_info_add_keyint(info, "partial_update_ystart",
-			mode_info.roi_caps.align.ystart_pix_align);
-		sde_kms_info_add_keyint(info, "partial_update_halign",
-			mode_info.roi_caps.align.height_pix_align);
-		sde_kms_info_add_keyint(info, "partial_update_hmin",
-			mode_info.roi_caps.align.min_height);
-		sde_kms_info_add_keyint(info, "partial_update_roimerge",
-			mode_info.roi_caps.merge_rois);
-	}
-
-	return rc;
-}
-
-int sde_connector_set_blob_data(struct drm_connector *conn,
-		struct drm_connector_state *state,
-		enum msm_mdp_conn_property prop_id)
-{
-	struct sde_kms_info *info;
-	struct sde_connector *c_conn = NULL;
-	struct sde_connector_state *sde_conn_state = NULL;
-	struct msm_mode_info mode_info;
-	struct drm_property_blob **blob = NULL;
-	int rc = 0;
-
-	c_conn = to_sde_connector(conn);
-	if (!c_conn) {
-		SDE_ERROR("invalid argument\n");
-		return -EINVAL;
-	}
-
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
-
-	sde_kms_info_reset(info);
-
-	switch (prop_id) {
-	case CONNECTOR_PROP_SDE_INFO:
-		memset(&mode_info, 0, sizeof(mode_info));
-
-		if (state) {
-			sde_conn_state = to_sde_connector_state(state);
-			memcpy(&mode_info, &sde_conn_state->mode_info,
-					sizeof(sde_conn_state->mode_info));
-		} else {
-			/**
-			 * connector state is assigned only on first
-			 * atomic_commit. But this function is allowed to be
-			 * invoked during probe/init sequence. So not throwing
-			 * an error.
-			 */
-			SDE_DEBUG_CONN(c_conn, "invalid connector state\n");
-		}
-
-		if (c_conn->ops.set_info_blob) {
-			rc = c_conn->ops.set_info_blob(conn, info,
-					c_conn->display, &mode_info);
-			if (rc) {
-				SDE_ERROR_CONN(c_conn,
-						"set_info_blob failed, %d\n",
-						rc);
-				goto exit;
-			}
-		}
-
-		blob = &c_conn->blob_caps;
-	break;
-	case CONNECTOR_PROP_MODE_INFO:
-		rc = sde_connector_populate_mode_info(conn, info);
-		if (rc) {
-			SDE_ERROR_CONN(c_conn,
-					"mode info population failed, %d\n",
-					rc);
-			goto exit;
-		}
-		blob = &c_conn->blob_mode_info;
-	break;
-	default:
-		SDE_ERROR_CONN(c_conn, "invalid prop_id: %d\n", prop_id);
-		goto exit;
-	}
-
-	msm_property_set_blob(&c_conn->property_info,
-			blob,
-			SDE_KMS_INFO_DATA(info),
-			SDE_KMS_INFO_DATALEN(info),
-			prop_id);
-exit:
-	kfree(info);
-
-	return rc;
-}
-
-static int _sde_connector_install_properties(struct drm_device *dev,
-	struct sde_kms *sde_kms, struct sde_connector *c_conn,
-	int connector_type, void *display,
-	struct msm_display_info *display_info)
-{
-	struct dsi_display *dsi_display;
-	int rc;
-
-	msm_property_install_blob(&c_conn->property_info, "capabilities",
-			DRM_MODE_PROP_IMMUTABLE, CONNECTOR_PROP_SDE_INFO);
-
-	rc = sde_connector_set_blob_data(&c_conn->base,
-			NULL, CONNECTOR_PROP_SDE_INFO);
-	if (rc) {
-		SDE_ERROR_CONN(c_conn,
-			"failed to setup connector info, rc = %d\n", rc);
-		return rc;
-	}
-
-	msm_property_install_blob(&c_conn->property_info, "mode_properties",
-			DRM_MODE_PROP_IMMUTABLE, CONNECTOR_PROP_MODE_INFO);
-
-	if (connector_type == DRM_MODE_CONNECTOR_DSI) {
-		dsi_display = (struct dsi_display *)(display);
-		if (dsi_display && dsi_display->panel &&
-			dsi_display->panel->hdr_props.hdr_enabled == true) {
-			msm_property_install_blob(&c_conn->property_info,
-				"hdr_properties",
-				DRM_MODE_PROP_IMMUTABLE,
-				CONNECTOR_PROP_HDR_INFO);
-
-			msm_property_set_blob(&c_conn->property_info,
-				&c_conn->blob_hdr,
-				&dsi_display->panel->hdr_props,
-				sizeof(dsi_display->panel->hdr_props),
-				CONNECTOR_PROP_HDR_INFO);
-		}
-	}
-
-	msm_property_install_volatile_range(
-			&c_conn->property_info, "sde_drm_roi_v1", 0x0,
-			0, ~0, 0, CONNECTOR_PROP_ROI_V1);
-
-	/* install PP_DITHER properties */
-	_sde_connector_install_dither_property(dev, sde_kms, c_conn);
-
-	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
-		struct drm_msm_ext_hdr_properties hdr = {0};
-
-		msm_property_install_blob(&c_conn->property_info,
-				"ext_hdr_properties",
-				DRM_MODE_PROP_IMMUTABLE,
-				CONNECTOR_PROP_EXT_HDR_INFO);
-
-		/* set default values to avoid reading uninitialized data */
-		msm_property_set_blob(&c_conn->property_info,
-			      &c_conn->blob_ext_hdr,
-			      &hdr,
-			      sizeof(hdr),
-			      CONNECTOR_PROP_EXT_HDR_INFO);
-	}
-
-	msm_property_install_volatile_range(&c_conn->property_info,
-		"hdr_metadata", 0x0, 0, ~0, 0, CONNECTOR_PROP_HDR_METADATA);
-
-	msm_property_install_volatile_range(&c_conn->property_info,
-		"RETIRE_FENCE", 0x0, 0, ~0, 0, CONNECTOR_PROP_RETIRE_FENCE);
-
-	msm_property_install_range(&c_conn->property_info, "autorefresh",
-			0x0, 0, AUTOREFRESH_MAX_FRAME_CNT, 0,
-			CONNECTOR_PROP_AUTOREFRESH);
-
-	if (connector_type == DRM_MODE_CONNECTOR_DSI) {
-		if (sde_kms->catalog->has_qsync && display_info->qsync_min_fps)
-			msm_property_install_enum(&c_conn->property_info,
-					"qsync_mode", 0, 0, e_qsync_mode,
-					ARRAY_SIZE(e_qsync_mode),
-					CONNECTOR_PROP_QSYNC_MODE);
-
-		if (display_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
-			msm_property_install_enum(&c_conn->property_info,
-				"frame_trigger_mode", 0, 0,
-				e_frame_trigger_mode,
-				ARRAY_SIZE(e_frame_trigger_mode),
-				CONNECTOR_PROP_CMD_FRAME_TRIGGER_MODE);
-	}
-
-	msm_property_install_range(&c_conn->property_info, "bl_scale",
-		0x0, 0, MAX_BL_SCALE_LEVEL, MAX_BL_SCALE_LEVEL,
-		CONNECTOR_PROP_BL_SCALE);
-
-	msm_property_install_range(&c_conn->property_info, "sv_bl_scale",
-		0x0, 0, MAX_SV_BL_SCALE_LEVEL, MAX_SV_BL_SCALE_LEVEL,
-		CONNECTOR_PROP_SV_BL_SCALE);
-
-	c_conn->bl_scale_dirty = false;
-	c_conn->bl_scale = MAX_BL_SCALE_LEVEL;
-	c_conn->bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
-
-	/* enum/bitmask properties */
-	msm_property_install_enum(&c_conn->property_info, "topology_name",
-			DRM_MODE_PROP_IMMUTABLE, 0, e_topology_name,
-			ARRAY_SIZE(e_topology_name),
-			CONNECTOR_PROP_TOPOLOGY_NAME);
-	msm_property_install_enum(&c_conn->property_info, "topology_control",
-			0, 1, e_topology_control,
-			ARRAY_SIZE(e_topology_control),
-			CONNECTOR_PROP_TOPOLOGY_CONTROL);
-	msm_property_install_enum(&c_conn->property_info, "LP",
-			0, 0, e_power_mode,
-			ARRAY_SIZE(e_power_mode),
-			CONNECTOR_PROP_LP);
-
-	return 0;
-}
-
-struct drm_connector *sde_connector_init(struct drm_device *dev,
-		struct drm_encoder *encoder,
-		struct drm_panel *panel,
-		void *display,
-		const struct sde_connector_ops *ops,
-		int connector_poll,
-		int connector_type)
-{
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	struct sde_connector *c_conn = NULL;
-	struct msm_display_info display_info;
-	int rc;
-
-	if (!dev || !dev->dev_private || !encoder) {
-		SDE_ERROR("invalid argument(s), dev %pK, enc %pK\n",
-				dev, encoder);
-		return ERR_PTR(-EINVAL);
-	}
-
-	priv = dev->dev_private;
-	if (!priv->kms) {
-		SDE_ERROR("invalid kms reference\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	c_conn = kzalloc(sizeof(*c_conn), GFP_KERNEL);
-	if (!c_conn) {
-		SDE_ERROR("failed to alloc sde connector\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	memset(&display_info, 0, sizeof(display_info));
-
-	rc = drm_connector_init(dev,
-			&c_conn->base,
-			&sde_connector_ops,
-			connector_type);
-	if (rc)
-		goto error_free_conn;
-
-	spin_lock_init(&c_conn->event_lock);
-
-	c_conn->connector_type = connector_type;
-	c_conn->encoder = encoder;
-	c_conn->panel = panel;
-	c_conn->display = display;
-
-	c_conn->dpms_mode = DRM_MODE_DPMS_ON;
-	c_conn->lp_mode = 0;
-	c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON;
-
-	sde_kms = to_sde_kms(priv->kms);
-	if (sde_kms->vbif[VBIF_NRT]) {
-		c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
-			sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
-		c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
-			sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
-	} else {
-		c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
-			sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
-		c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
-			sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
-	}
-
-	if (ops)
-		c_conn->ops = *ops;
-
-	if (ops && ops->atomic_best_encoder && ops->atomic_check)
-		c_conn->base.helper_private = &sde_connector_helper_ops_v2;
-	else
-		c_conn->base.helper_private = &sde_connector_helper_ops;
-
-	c_conn->base.polled = connector_poll;
-	c_conn->base.interlace_allowed = 0;
-	c_conn->base.doublescan_allowed = 0;
-
-	snprintf(c_conn->name,
-			SDE_CONNECTOR_NAME_SIZE,
-			"conn%u",
-			c_conn->base.base.id);
-
-	c_conn->retire_fence = sde_fence_init(c_conn->name,
-			c_conn->base.base.id);
-	if (IS_ERR(c_conn->retire_fence)) {
-		rc = PTR_ERR(c_conn->retire_fence);
-		SDE_ERROR("failed to init fence, %d\n", rc);
-		goto error_cleanup_conn;
-	}
-
-	mutex_init(&c_conn->lock);
-
-	rc = drm_connector_attach_encoder(&c_conn->base, encoder);
-	if (rc) {
-		SDE_ERROR("failed to attach encoder to connector, %d\n", rc);
-		goto error_cleanup_fence;
-	}
-
-	rc = sde_backlight_setup(c_conn, dev);
-	if (rc) {
-		SDE_ERROR("failed to setup backlight, rc=%d\n", rc);
-		goto error_cleanup_fence;
-	}
-
-	/* create properties */
-	msm_property_init(&c_conn->property_info, &c_conn->base.base, dev,
-			priv->conn_property, c_conn->property_data,
-			CONNECTOR_PROP_COUNT, CONNECTOR_PROP_BLOBCOUNT,
-			sizeof(struct sde_connector_state));
-
-	if (c_conn->ops.post_init) {
-		rc = c_conn->ops.post_init(&c_conn->base, display);
-		if (rc) {
-			SDE_ERROR("post-init failed, %d\n", rc);
-			goto error_cleanup_fence;
-		}
-	}
-
-	rc = sde_connector_get_info(&c_conn->base, &display_info);
-	if (!rc && (connector_type == DRM_MODE_CONNECTOR_DSI) &&
-			(display_info.capabilities & MSM_DISPLAY_CAP_VID_MODE))
-		sde_connector_register_event(&c_conn->base,
-			SDE_CONN_EVENT_VID_FIFO_OVERFLOW,
-			sde_connector_handle_disp_recovery,
-			c_conn);
-
-	rc = _sde_connector_install_properties(dev, sde_kms, c_conn,
-		connector_type, display, &display_info);
-	if (rc)
-		goto error_cleanup_fence;
-
-	rc = msm_property_install_get_status(&c_conn->property_info);
-	if (rc) {
-		SDE_ERROR("failed to create one or more properties\n");
-		goto error_destroy_property;
-	}
-
-	if (display_info.is_primary)
-		_sde_connector_primary_preference(c_conn, sde_kms);
-
-	SDE_DEBUG("connector %d attach encoder %d\n",
-			c_conn->base.base.id, encoder->base.id);
-
-	INIT_DELAYED_WORK(&c_conn->status_work,
-			sde_connector_check_status_work);
-
-	return &c_conn->base;
-
-error_destroy_property:
-	if (c_conn->blob_caps)
-		drm_property_blob_put(c_conn->blob_caps);
-	if (c_conn->blob_hdr)
-		drm_property_blob_put(c_conn->blob_hdr);
-	if (c_conn->blob_dither)
-		drm_property_blob_put(c_conn->blob_dither);
-	if (c_conn->blob_mode_info)
-		drm_property_blob_put(c_conn->blob_mode_info);
-	if (c_conn->blob_ext_hdr)
-		drm_property_blob_put(c_conn->blob_ext_hdr);
-
-	msm_property_destroy(&c_conn->property_info);
-error_cleanup_fence:
-	mutex_destroy(&c_conn->lock);
-	sde_fence_deinit(c_conn->retire_fence);
-error_cleanup_conn:
-	drm_connector_cleanup(&c_conn->base);
-error_free_conn:
-	kfree(c_conn);
-
-	return ERR_PTR(rc);
-}
-
-static int _sde_conn_hw_recovery_handler(
-		struct drm_connector *connector, bool val)
-{
-	struct sde_connector *c_conn;
-
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return -EINVAL;
-	}
-	c_conn = to_sde_connector(connector);
-
-	if (c_conn->encoder)
-		sde_encoder_recovery_events_handler(c_conn->encoder, val);
-
-	return 0;
-}
-
-int sde_connector_register_custom_event(struct sde_kms *kms,
-		struct drm_connector *conn_drm, u32 event, bool val)
-{
-	int ret = -EINVAL;
-
-	switch (event) {
-	case DRM_EVENT_SYS_BACKLIGHT:
-		ret = 0;
-		break;
-	case DRM_EVENT_PANEL_DEAD:
-		ret = 0;
-		break;
-	case DRM_EVENT_SDE_HW_RECOVERY:
-		ret = _sde_conn_hw_recovery_handler(conn_drm, val);
-		break;
-	default:
-		break;
-	}
-	return ret;
-}
-
-int sde_connector_event_notify(struct drm_connector *connector, uint32_t type,
-		uint32_t len, uint32_t val)
-{
-	struct drm_event event;
-	int ret;
-
-	if (!connector) {
-		SDE_ERROR("invalid connector\n");
-		return -EINVAL;
-	}
-
-	switch (type) {
-	case DRM_EVENT_SYS_BACKLIGHT:
-	case DRM_EVENT_PANEL_DEAD:
-	case DRM_EVENT_SDE_HW_RECOVERY:
-		ret = 0;
-		break;
-	default:
-		SDE_ERROR("connector %d, Unsupported event %d\n",
-				connector->base.id, type);
-		return -EINVAL;
-	}
-
-	event.type = type;
-	event.length = len;
-	msm_mode_object_event_notify(&connector->base, connector->dev, &event,
-			(u8 *)&val);
-
-	SDE_EVT32(connector->base.id, type, len, val);
-	SDE_DEBUG("connector:%d hw recovery event(%d) value (%d) notified\n",
-			connector->base.id, type, val);
-
-	return ret;
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
deleted file mode 100644
index f83cc12..0000000
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ /dev/null
@@ -1,908 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_CONNECTOR_H_
-#define _SDE_CONNECTOR_H_
-
-#include <uapi/drm/msm_drm_pp.h>
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_panel.h>
-
-#include "msm_drv.h"
-#include "msm_prop.h"
-#include "sde_kms.h"
-#include "sde_fence.h"
-
-#define SDE_CONNECTOR_NAME_SIZE	16
-#define SDE_CONNECTOR_DHDR_MEMPOOL_MAX_SIZE	SZ_32
-
-struct sde_connector;
-struct sde_connector_state;
-
-/**
- * struct sde_connector_ops - callback functions for generic sde connector
- * Individual callbacks documented below.
- */
-struct sde_connector_ops {
-	/**
-	 * post_init - perform additional initialization steps
-	 * @connector: Pointer to drm connector structure
-	 * @display: Pointer to private display handle
-	 * Returns: Zero on success
-	 */
-	int (*post_init)(struct drm_connector *connector,
-			void *display);
-
-	/**
-	 * set_info_blob - initialize given info blob
-	 * @connector: Pointer to drm connector structure
-	 * @info: Pointer to sde connector info structure
-	 * @display: Pointer to private display handle
-	 * @mode_info: Pointer to mode info structure
-	 * Returns: Zero on success
-	 */
-	int (*set_info_blob)(struct drm_connector *connector,
-			void *info,
-			void *display,
-			struct msm_mode_info *mode_info);
-
-	/**
-	 * detect - determine if connector is connected
-	 * @connector: Pointer to drm connector structure
-	 * @force: Force detect setting from drm framework
-	 * @display: Pointer to private display handle
-	 * Returns: Connector 'is connected' status
-	 */
-	enum drm_connector_status (*detect)(struct drm_connector *connector,
-			bool force,
-			void *display);
-
-	/**
-	 * get_modes - add drm modes via drm_mode_probed_add()
-	 * @connector: Pointer to drm connector structure
-	 * @display: Pointer to private display handle
-	 * Returns: Number of modes added
-	 */
-	int (*get_modes)(struct drm_connector *connector,
-			void *display);
-
-	/**
-	 * update_pps - update pps command for the display panel
-	 * @connector: Pointer to drm connector structure
-	 * @pps_cmd: Pointer to pps command
-	 * @display: Pointer to private display handle
-	 * Returns: Zero on success
-	 */
-	int (*update_pps)(struct drm_connector *connector,
-			char *pps_cmd, void *display);
-
-	/**
-	 * mode_valid - determine if specified mode is valid
-	 * @connector: Pointer to drm connector structure
-	 * @mode: Pointer to drm mode structure
-	 * @display: Pointer to private display handle
-	 * Returns: Validity status for specified mode
-	 */
-	enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
-			struct drm_display_mode *mode,
-			void *display);
-
-	/**
-	 * set_property - set property value
-	 * @connector: Pointer to drm connector structure
-	 * @state: Pointer to drm connector state structure
-	 * @property_index: DRM property index
-	 * @value: Incoming property value
-	 * @display: Pointer to private display structure
-	 * Returns: Zero on success
-	 */
-	int (*set_property)(struct drm_connector *connector,
-			struct drm_connector_state *state,
-			int property_index,
-			uint64_t value,
-			void *display);
-
-	/**
-	 * get_property - get property value
-	 * @connector: Pointer to drm connector structure
-	 * @state: Pointer to drm connector state structure
-	 * @property_index: DRM property index
-	 * @value: Pointer to variable for accepting property value
-	 * @display: Pointer to private display structure
-	 * Returns: Zero on success
-	 */
-	int (*get_property)(struct drm_connector *connector,
-			struct drm_connector_state *state,
-			int property_index,
-			uint64_t *value,
-			void *display);
-
-	/**
-	 * get_info - get display information
-	 * @connector: Pointer to drm connector structure
-	 * @info: Pointer to msm display info structure
-	 * @display: Pointer to private display structure
-	 * Returns: Zero on success
-	 */
-	int (*get_info)(struct drm_connector *connector,
-			struct msm_display_info *info, void *display);
-
-	/**
-	 * get_mode_info - retrieve mode information
-	 * @connector: Pointer to drm connector structure
-	 * @drm_mode: Display mode set for the display
-	 * @mode_info: Out parameter. information of the display mode
-	 * @max_mixer_width: max width supported by HW layer mixer
-	 * @display: Pointer to private display structure
-	 * Returns: Zero on success
-	 */
-	int (*get_mode_info)(struct drm_connector *connector,
-			const struct drm_display_mode *drm_mode,
-			struct msm_mode_info *mode_info,
-			u32 max_mixer_width, void *display);
-
-	/**
-	 * enable_event - notify display of event registration/unregistration
-	 * @connector: Pointer to drm connector structure
-	 * @event_idx: SDE connector event index
-	 * @enable: Whether the event is being enabled/disabled
-	 * @display: Pointer to private display structure
-	 */
-	void (*enable_event)(struct drm_connector *connector,
-			uint32_t event_idx, bool enable, void *display);
-
-	/**
-	 * set_backlight - set backlight level
-	 * @connector: Pointer to drm connector structure
-	 * @display: Pointer to private display structure
-	 * @bl_lvel: Backlight level
-	 */
-	int (*set_backlight)(struct drm_connector *connector,
-			void *display, u32 bl_lvl);
-
-	/**
-	 * soft_reset - perform a soft reset on the connector
-	 * @display: Pointer to private display structure
-	 * Return: Zero on success, -ERROR otherwise
-	 */
-	int (*soft_reset)(void *display);
-
-	/**
-	 * pre_kickoff - trigger display to program kickoff-time features
-	 * @connector: Pointer to drm connector structure
-	 * @display: Pointer to private display structure
-	 * @params: Parameter bundle of connector-stored information for
-	 *	kickoff-time programming into the display
-	 * Returns: Zero on success
-	 */
-	int (*pre_kickoff)(struct drm_connector *connector,
-			void *display,
-			struct msm_display_kickoff_params *params);
-
-	/**
-	 * clk_ctrl - perform clk enable/disable on the connector
-	 * @handle: Pointer to clk handle
-	 * @type: Type of clks
-	 * @enable: State of clks
-	 */
-	int (*clk_ctrl)(void *handle, u32 type, u32 state);
-
-	/**
-	 * set_power - update dpms setting
-	 * @connector: Pointer to drm connector structure
-	 * @power_mode: One of the following,
-	 *              SDE_MODE_DPMS_ON
-	 *              SDE_MODE_DPMS_LP1
-	 *              SDE_MODE_DPMS_LP2
-	 *              SDE_MODE_DPMS_OFF
-	 * @display: Pointer to private display structure
-	 * Returns: Zero on success
-	 */
-	int (*set_power)(struct drm_connector *connector,
-			int power_mode, void *display);
-
-	/**
-	 * get_dst_format - get dst_format from display
-	 * @connector: Pointer to drm connector structure
-	 * @display: Pointer to private display handle
-	 * Returns: dst_format of display
-	 */
-	enum dsi_pixel_format (*get_dst_format)(struct drm_connector *connector,
-			void *display);
-
-	/**
-	 * post_kickoff - display to program post kickoff-time features
-	 * @connector: Pointer to drm connector structure
-	 * Returns: Zero on success
-	 */
-	int (*post_kickoff)(struct drm_connector *connector);
-
-	/**
-	 * post_open - calls connector to process post open functionalities
-	 * @display: Pointer to private display structure
-	 */
-	void (*post_open)(struct drm_connector *connector, void *display);
-
-	/**
-	 * check_status - check status of connected display panel
-	 * @connector: Pointer to drm connector structure
-	 * @display: Pointer to private display handle
-	 * @te_check_override: Whether check TE from panel or default check
-	 * Returns: positive value for success, negetive or zero for failure
-	 */
-	int (*check_status)(struct drm_connector *connector, void *display,
-					bool te_check_override);
-
-	/**
-	 * cmd_transfer - Transfer command to the connected display panel
-	 * @connector: Pointer to drm connector structure
-	 * @display: Pointer to private display handle
-	 * @cmd_buf: Command buffer
-	 * @cmd_buf_len: Command buffer length in bytes
-	 * Returns: Zero for success, negetive for failure
-	 */
-	int (*cmd_transfer)(struct drm_connector *connector,
-			void *display, const char *cmd_buf,
-			u32 cmd_buf_len);
-
-	/**
-	 * config_hdr - configure HDR
-	 * @connector: Pointer to drm connector structure
-	 * @display: Pointer to private display handle
-	 * @c_state: Pointer to connector state
-	 * Returns: Zero on success, negative error code for failures
-	 */
-	int (*config_hdr)(struct drm_connector *connector, void *display,
-		struct sde_connector_state *c_state);
-
-	/**
-	 * atomic_best_encoder - atomic best encoder selection for connector
-	 * @connector: Pointer to drm connector structure
-	 * @display: Pointer to private display handle
-	 * @c_state: Pointer to connector state
-	 * Returns: valid drm_encoder for success
-	 */
-	struct drm_encoder *(*atomic_best_encoder)(
-			struct drm_connector *connector,
-			void *display,
-			struct drm_connector_state *c_state);
-
-	/**
-	 * atomic_check - atomic check handling for connector
-	 * @connector: Pointer to drm connector structure
-	 * @display: Pointer to private display handle
-	 * @c_state: Pointer to connector state
-	 * Returns: valid drm_encoder for success
-	 */
-	int (*atomic_check)(struct drm_connector *connector,
-			void *display,
-			struct drm_connector_state *c_state);
-
-	/**
-	 * pre_destroy - handle pre destroy operations for the connector
-	 * @connector: Pointer to drm connector structure
-	 * @display: Pointer to private display handle
-	 * Returns: Zero on success, negative error code for failures
-	 */
-	void (*pre_destroy)(struct drm_connector *connector, void *display);
-
-	/**
-	 * cont_splash_config - initialize splash resources
-	 * @display: Pointer to private display handle
-	 * Returns: zero for success, negetive for failure
-	 */
-	int (*cont_splash_config)(void *display);
-
-	/**
-	 * get_panel_vfp - returns original panel vfp
-	 * @display: Pointer to private display handle
-	 * @h_active: width
-	 * @v_active: height
-	 * Returns: v_front_porch on success error-code on failure
-	 */
-	int (*get_panel_vfp)(void *display, int h_active, int v_active);
-
-	/**
-	 * get_default_lm - returns default number of lm
-	 * @display: Pointer to private display handle
-	 * @num_lm: Pointer to number of lms to be populated
-	 * Returns: zero for success, negetive for failure
-	 */
-	int (*get_default_lms)(void *display, u32 *num_lm);
-};
-
-/**
- * enum sde_connector_events - list of recognized connector events
- */
-enum sde_connector_events {
-	SDE_CONN_EVENT_VID_DONE, /* video mode frame done */
-	SDE_CONN_EVENT_CMD_DONE, /* command mode frame done */
-	SDE_CONN_EVENT_VID_FIFO_OVERFLOW, /* dsi fifo overflow error */
-	SDE_CONN_EVENT_CMD_FIFO_UNDERFLOW, /* dsi fifo underflow error */
-	SDE_CONN_EVENT_COUNT,
-};
-
-/**
- * struct sde_connector_evt - local event registration entry structure
- * @cb_func: Pointer to desired callback function
- * @usr: User pointer to pass to callback on event trigger
- * Returns: Zero success, negetive for failure
- */
-struct sde_connector_evt {
-	int (*cb_func)(uint32_t event_idx,
-			uint32_t instance_idx, void *usr,
-			uint32_t data0, uint32_t data1,
-			uint32_t data2, uint32_t data3);
-	void *usr;
-};
-
-struct sde_connector_dyn_hdr_metadata {
-	u8 dynamic_hdr_payload[SDE_CONNECTOR_DHDR_MEMPOOL_MAX_SIZE];
-	int dynamic_hdr_payload_size;
-	bool dynamic_hdr_update;
-};
-
-/**
- * struct sde_connector - local sde connector structure
- * @base: Base drm connector structure
- * @connector_type: Set to one of DRM_MODE_CONNECTOR_ types
- * @encoder: Pointer to preferred drm encoder
- * @panel: Pointer to drm panel, if present
- * @display: Pointer to private display data structure
- * @drv_panel: Pointer to interface driver's panel module, if present
- * @mst_port: Pointer to mst port, if present
- * @mmu_secure: MMU id for secure buffers
- * @mmu_unsecure: MMU id for unsecure buffers
- * @name: ASCII name of connector
- * @lock: Mutex lock object for this structure
- * @retire_fence: Retire fence context reference
- * @ops: Local callback function pointer table
- * @dpms_mode: DPMS property setting from user space
- * @lp_mode: LP property setting from user space
- * @last_panel_power_mode: Last consolidated dpms/lp mode setting
- * @property_info: Private structure for generic property handling
- * @property_data: Array of private data for generic property handling
- * @blob_caps: Pointer to blob structure for 'capabilities' property
- * @blob_hdr: Pointer to blob structure for 'hdr_properties' property
- * @blob_ext_hdr: Pointer to blob structure for 'ext_hdr_properties' property
- * @blob_dither: Pointer to blob structure for default dither config
- * @blob_mode_info: Pointer to blob structure for mode info
- * @fb_kmap: true if kernel mapping of framebuffer is requested
- * @event_table: Array of registered events
- * @event_lock: Lock object for event_table
- * @bl_device: backlight device node
- * @status_work: work object to perform status checks
- * @esd_status_interval: variable to change ESD check interval in millisec
- * @panel_dead: Flag to indicate if panel has gone bad
- * @esd_status_check: Flag to indicate if ESD thread is scheduled or not
- * @bl_scale_dirty: Flag to indicate PP BL scale value(s) is changed
- * @bl_scale: BL scale value for ABA feature
- * @bl_scale_sv: BL scale value for sunlight visibility feature
- * @unset_bl_level: BL level that needs to be set later
- * @allow_bl_update: Flag to indicate if BL update is allowed currently or not
- * @qsync_mode: Cached Qsync mode, 0=disabled, 1=continuous mode
- * @qsync_updated: Qsync settings were updated
- * last_cmd_tx_sts: status of the last command transfer
- * @hdr_capable: external hdr support present
- * @core_clk_rate: MDP core clk rate used for dynamic HDR packet calculation
- */
-struct sde_connector {
-	struct drm_connector base;
-
-	int connector_type;
-
-	struct drm_encoder *encoder;
-	struct drm_panel *panel;
-	void *display;
-	void *drv_panel;
-	void *mst_port;
-
-	struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
-
-	char name[SDE_CONNECTOR_NAME_SIZE];
-
-	struct mutex lock;
-	struct sde_fence_context *retire_fence;
-	struct sde_connector_ops ops;
-	int dpms_mode;
-	int lp_mode;
-	int last_panel_power_mode;
-
-	struct msm_property_info property_info;
-	struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
-	struct drm_property_blob *blob_caps;
-	struct drm_property_blob *blob_hdr;
-	struct drm_property_blob *blob_ext_hdr;
-	struct drm_property_blob *blob_dither;
-	struct drm_property_blob *blob_mode_info;
-
-	bool fb_kmap;
-	struct sde_connector_evt event_table[SDE_CONN_EVENT_COUNT];
-	spinlock_t event_lock;
-
-	struct backlight_device *bl_device;
-	struct delayed_work status_work;
-	u32 esd_status_interval;
-	bool panel_dead;
-	bool esd_status_check;
-
-	bool bl_scale_dirty;
-	u32 bl_scale;
-	u32 bl_scale_sv;
-	u32 unset_bl_level;
-	bool allow_bl_update;
-
-	u32 qsync_mode;
-	bool qsync_updated;
-
-	bool last_cmd_tx_sts;
-	bool hdr_capable;
-};
-
-/**
- * to_sde_connector - convert drm_connector pointer to sde connector pointer
- * @X: Pointer to drm_connector structure
- * Returns: Pointer to sde_connector structure
- */
-#define to_sde_connector(x)     container_of((x), struct sde_connector, base)
-
-/**
- * sde_connector_get_display - get sde connector's private display pointer
- * @C: Pointer to drm connector structure
- * Returns: Pointer to associated private display structure
- */
-#define sde_connector_get_display(C) \
-	((C) ? to_sde_connector((C))->display : NULL)
-
-/**
- * sde_connector_get_panel - get sde connector's private panel pointer
- * @C: Pointer to drm connector structure
- * Returns: Pointer to associated private display structure
- */
-#define sde_connector_get_panel(C) \
-	((C) ? to_sde_connector((C))->panel : NULL)
-
-/**
- * sde_connector_get_encoder - get sde connector's private encoder pointer
- * @C: Pointer to drm connector structure
- * Returns: Pointer to associated private encoder structure
- */
-#define sde_connector_get_encoder(C) \
-	((C) ? to_sde_connector((C))->encoder : NULL)
-
-/**
- * sde_connector_qsync_updated - indicates if connector updated qsync
- * @C: Pointer to drm connector structure
- * Returns: True if qsync is updated; false otherwise
- */
-#define sde_connector_is_qsync_updated(C) \
-	((C) ? to_sde_connector((C))->qsync_updated : 0)
-
-/**
- * sde_connector_get_qsync_mode - get sde connector's qsync_mode
- * @C: Pointer to drm connector structure
- * Returns: Current cached qsync_mode for given connector
- */
-#define sde_connector_get_qsync_mode(C) \
-	((C) ? to_sde_connector((C))->qsync_mode : 0)
-
-/**
- * sde_connector_get_propinfo - get sde connector's property info pointer
- * @C: Pointer to drm connector structure
- * Returns: Pointer to associated private property info structure
- */
-#define sde_connector_get_propinfo(C) \
-	((C) ? &to_sde_connector((C))->property_info : NULL)
-
-/**
- * struct sde_connector_state - private connector status structure
- * @base: Base drm connector structure
- * @out_fb: Pointer to output frame buffer, if applicable
- * @property_state: Local storage for msm_prop properties
- * @property_values: Local cache of current connector property values
- * @rois: Regions of interest structure for mapping CRTC to Connector output
- * @property_blobs: blob properties
- * @mode_info: local copy of msm_mode_info struct
- * @hdr_meta: HDR metadata info passed from userspace
- * @dyn_hdr_meta: Dynamic HDR metadata payload and state tracking
- * @old_topology_name: topology of previous atomic state. remove this in later
- *	kernel versions which provide drm_atomic_state old_state pointers
- */
-struct sde_connector_state {
-	struct drm_connector_state base;
-	struct drm_framebuffer *out_fb;
-	struct msm_property_state property_state;
-	struct msm_property_value property_values[CONNECTOR_PROP_COUNT];
-
-	struct msm_roi_list rois;
-	struct drm_property_blob *property_blobs[CONNECTOR_PROP_BLOBCOUNT];
-	struct msm_mode_info mode_info;
-	struct drm_msm_ext_hdr_metadata hdr_meta;
-	struct sde_connector_dyn_hdr_metadata dyn_hdr_meta;
-	enum sde_rm_topology_name old_topology_name;
-};
-
-/**
- * to_sde_connector_state - convert drm_connector_state pointer to
- *                          sde connector state pointer
- * @X: Pointer to drm_connector_state structure
- * Returns: Pointer to sde_connector_state structure
- */
-#define to_sde_connector_state(x) \
-	container_of((x), struct sde_connector_state, base)
-
-/**
- * sde_connector_get_property - query integer value of connector property
- * @S: Pointer to drm connector state
- * @X: Property index, from enum msm_mdp_connector_property
- * Returns: Integer value of requested property
- */
-#define sde_connector_get_property(S, X) \
-	((S) && ((X) < CONNECTOR_PROP_COUNT) ? \
-	 (to_sde_connector_state((S))->property_values[(X)].value) : 0)
-
-/**
- * sde_connector_get_property_state - retrieve property state cache
- * @S: Pointer to drm connector state
- * Returns: Pointer to local property state structure
- */
-#define sde_connector_get_property_state(S) \
-	((S) ? (&to_sde_connector_state((S))->property_state) : NULL)
-
-/**
- * sde_connector_get_out_fb - query out_fb value from sde connector state
- * @S: Pointer to drm connector state
- * Returns: Output fb associated with specified connector state
- */
-#define sde_connector_get_out_fb(S) \
-	((S) ? to_sde_connector_state((S))->out_fb : 0)
-
-/**
- * sde_connector_get_topology_name - helper accessor to retrieve topology_name
- * @connector: pointer to drm connector
- * Returns: value of the CONNECTOR_PROP_TOPOLOGY_NAME property or 0
- */
-static inline uint64_t sde_connector_get_topology_name(
-		struct drm_connector *connector)
-{
-	if (!connector || !connector->state)
-		return 0;
-	return sde_connector_get_property(connector->state,
-			CONNECTOR_PROP_TOPOLOGY_NAME);
-}
-
-/**
- * sde_connector_get_old_topology_name - helper accessor to retrieve
- *	topology_name for the previous mode
- * @connector: pointer to drm connector state
- * Returns: cached value of the previous topology, or SDE_RM_TOPOLOGY_NONE
- */
-static inline enum sde_rm_topology_name sde_connector_get_old_topology_name(
-		struct drm_connector_state *state)
-{
-	struct sde_connector_state *c_state = to_sde_connector_state(state);
-
-	if (!state)
-		return SDE_RM_TOPOLOGY_NONE;
-
-	return c_state->old_topology_name;
-}
-
-/**
- * sde_connector_set_old_topology_name - helper to cache value of previous
- *	mode's topology
- * @connector: pointer to drm connector state
- * Returns: 0 on success, negative errno on failure
- */
-static inline int sde_connector_set_old_topology_name(
-		struct drm_connector_state *state,
-		enum sde_rm_topology_name top)
-{
-	struct sde_connector_state *c_state = to_sde_connector_state(state);
-
-	if (!state)
-		return -EINVAL;
-
-	c_state->old_topology_name = top;
-
-	return 0;
-}
-
-/**
- * sde_connector_get_lp - helper accessor to retrieve LP state
- * @connector: pointer to drm connector
- * Returns: value of the CONNECTOR_PROP_LP property or 0
- */
-static inline uint64_t sde_connector_get_lp(
-		struct drm_connector *connector)
-{
-	if (!connector || !connector->state)
-		return 0;
-	return sde_connector_get_property(connector->state,
-			CONNECTOR_PROP_LP);
-}
-
-/**
- * sde_connector_set_property_for_commit - add property set to atomic state
- *	Add a connector state property update for the specified property index
- *	to the atomic state in preparation for a drm_atomic_commit.
- * @connector: Pointer to drm connector
- * @atomic_state: Pointer to DRM atomic state structure for commit
- * @property_idx: Connector property index
- * @value: Updated property value
- * Returns: Zero on success
- */
-int sde_connector_set_property_for_commit(struct drm_connector *connector,
-		struct drm_atomic_state *atomic_state,
-		uint32_t property_idx, uint64_t value);
-
-/**
- * sde_connector_init - create drm connector object for a given display
- * @dev: Pointer to drm device struct
- * @encoder: Pointer to associated encoder
- * @panel: Pointer to associated panel, can be NULL
- * @display: Pointer to associated display object
- * @ops: Pointer to callback operations function table
- * @connector_poll: Set to appropriate DRM_CONNECTOR_POLL_ setting
- * @connector_type: Set to appropriate DRM_MODE_CONNECTOR_ type
- * Returns: Pointer to newly created drm connector struct
- */
-struct drm_connector *sde_connector_init(struct drm_device *dev,
-		struct drm_encoder *encoder,
-		struct drm_panel *panel,
-		void *display,
-		const struct sde_connector_ops *ops,
-		int connector_poll,
-		int connector_type);
-
-/**
- * sde_connector_prepare_fence - prepare fence support for current commit
- * @connector: Pointer to drm connector object
- */
-void sde_connector_prepare_fence(struct drm_connector *connector);
-
-/**
- * sde_connector_complete_commit - signal completion of current commit
- * @connector: Pointer to drm connector object
- * @ts: timestamp to be updated in the fence signalling
- * @fence_event: enum value to indicate nature of fence event
- */
-void sde_connector_complete_commit(struct drm_connector *connector,
-		ktime_t ts, enum sde_fence_event fence_event);
-
-/**
- * sde_connector_commit_reset - reset the completion signal
- * @connector: Pointer to drm connector object
- * @ts: timestamp to be updated in the fence signalling
- */
-void sde_connector_commit_reset(struct drm_connector *connector, ktime_t ts);
-
-/**
- * sde_connector_get_info - query display specific information
- * @connector: Pointer to drm connector object
- * @info: Pointer to msm display information structure
- * Returns: Zero on success
- */
-int sde_connector_get_info(struct drm_connector *connector,
-		struct msm_display_info *info);
-
-/**
- * sde_connector_clk_ctrl - enables/disables the connector clks
- * @connector: Pointer to drm connector object
- * @enable: true/false to enable/disable
- * Returns: Zero on success
- */
-int sde_connector_clk_ctrl(struct drm_connector *connector, bool enable);
-
-/**
- * sde_connector_get_dpms - query dpms setting
- * @connector: Pointer to drm connector structure
- * Returns: Current DPMS setting for connector
- */
-int sde_connector_get_dpms(struct drm_connector *connector);
-
-/**
- * sde_connector_set_qsync_params - set status of qsync_updated for current
- *                                  frame and update the cached qsync_mode
- * @connector: pointer to drm connector
- *
- * This must be called after the connector set_property values are applied,
- * and before sde_connector's qsync_updated or qsync_mode fields are accessed.
- * It must only be called once per frame update for the given connector.
- */
-void sde_connector_set_qsync_params(struct drm_connector *connector);
-
-/**
-* sde_connector_get_dyn_hdr_meta - returns pointer to connector state's dynamic
-*				   HDR metadata info
-* @connector: pointer to drm connector
-*/
-
-struct sde_connector_dyn_hdr_metadata *sde_connector_get_dyn_hdr_meta(
-		struct drm_connector *connector);
-
-/**
- * sde_connector_trigger_event - indicate that an event has occurred
- *	Any callbacks that have been registered against this event will
- *	be called from the same thread context.
- * @connector: Pointer to drm connector structure
- * @event_idx: Index of event to trigger
- * @instance_idx: Event-specific "instance index" to pass to callback
- * @data0: Event-specific "data" to pass to callback
- * @data1: Event-specific "data" to pass to callback
- * @data2: Event-specific "data" to pass to callback
- * @data3: Event-specific "data" to pass to callback
- * Returns: Zero on success
- */
-int sde_connector_trigger_event(void *drm_connector,
-		uint32_t event_idx, uint32_t instance_idx,
-		uint32_t data0, uint32_t data1,
-		uint32_t data2, uint32_t data3);
-
-/**
- * sde_connector_register_event - register a callback function for an event
- * @connector: Pointer to drm connector structure
- * @event_idx: Index of event to register
- * @cb_func: Pointer to desired callback function
- * @usr: User pointer to pass to callback on event trigger
- * Returns: Zero on success
- */
-int sde_connector_register_event(struct drm_connector *connector,
-		uint32_t event_idx,
-		int (*cb_func)(uint32_t event_idx,
-			uint32_t instance_idx, void *usr,
-			uint32_t data0, uint32_t data1,
-			uint32_t data2, uint32_t data3),
-		void *usr);
-
-/**
- * sde_connector_unregister_event - unregister all callbacks for an event
- * @connector: Pointer to drm connector structure
- * @event_idx: Index of event to register
- */
-void sde_connector_unregister_event(struct drm_connector *connector,
-		uint32_t event_idx);
-
-/**
- * sde_connector_register_custom_event - register for async events
- * @kms: Pointer to sde_kms
- * @conn_drm: Pointer to drm connector object
- * @event: Event for which request is being sent
- * @en: Flag to enable/disable the event
- * Returns: Zero on success
- */
-int sde_connector_register_custom_event(struct sde_kms *kms,
-		struct drm_connector *conn_drm, u32 event, bool en);
-
-/**
- * sde_connector_pre_kickoff - trigger kickoff time feature programming
- * @connector: Pointer to drm connector object
- * Returns: Zero on success
- */
-int sde_connector_pre_kickoff(struct drm_connector *connector);
-
-/**
- * sde_connector_needs_offset - adjust the output fence offset based on
- *                              display type
- * @connector: Pointer to drm connector object
- * Returns: true if offset is required, false for all other cases.
- */
-static inline bool sde_connector_needs_offset(struct drm_connector *connector)
-{
-	struct sde_connector *c_conn;
-
-	if (!connector)
-		return false;
-
-	c_conn = to_sde_connector(connector);
-	return (c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
-}
-
-/**
- * sde_connector_get_dither_cfg - get dither property data
- * @conn: Pointer to drm_connector struct
- * @state: Pointer to drm_connector_state struct
- * @cfg: Pointer to pointer to dither cfg
- * @len: length of the dither data
- * Returns: Zero on success
- */
-int sde_connector_get_dither_cfg(struct drm_connector *conn,
-		struct drm_connector_state *state, void **cfg, size_t *len);
-
-/**
- * sde_connector_set_blob_data - set connector blob property data
- * @conn: Pointer to drm_connector struct
- * @state: Pointer to the drm_connector_state struct
- * @prop_id: property id to be populated
- * Returns: Zero on success
- */
-int sde_connector_set_blob_data(struct drm_connector *conn,
-		struct drm_connector_state *state,
-		enum msm_mdp_conn_property prop_id);
-
-/**
- * sde_connector_roi_v1_check_roi - validate connector ROI
- * @conn_state: Pointer to drm_connector_state struct
- * Returns: Zero on success
- */
-int sde_connector_roi_v1_check_roi(struct drm_connector_state *conn_state);
-
-/**
- * sde_connector_schedule_status_work - manage ESD thread
- * conn: Pointer to drm_connector struct
- * @en: flag to start/stop ESD thread
- */
-void sde_connector_schedule_status_work(struct drm_connector *conn, bool en);
-
-/**
- * sde_connector_helper_reset_properties - reset properties to default values in
- *	the given DRM connector state object
- * @connector: Pointer to DRM connector object
- * @connector_state: Pointer to DRM connector state object
- * Returns: 0 on success, negative errno on failure
- */
-int sde_connector_helper_reset_custom_properties(
-		struct drm_connector *connector,
-		struct drm_connector_state *connector_state);
-
-/**
- * sde_connector_get_mode_info - get information of the current mode in the
- *                               given connector state.
- * conn_state: Pointer to the DRM connector state object
- * mode_info: Pointer to the mode info structure
- */
-int sde_connector_get_mode_info(struct drm_connector_state *conn_state,
-	struct msm_mode_info *mode_info);
-
-/**
- * sde_conn_timeline_status - current buffer timeline status
- * conn: Pointer to drm_connector struct
- */
-void sde_conn_timeline_status(struct drm_connector *conn);
-
-/**
- * sde_connector_helper_bridge_disable - helper function for drm bridge disable
- * @connector: Pointer to DRM connector object
- */
-void sde_connector_helper_bridge_disable(struct drm_connector *connector);
-
-/**
- * sde_connector_destroy - destroy drm connector object
- * @connector: Pointer to DRM connector object
- */
-void sde_connector_destroy(struct drm_connector *connector);
-
-/**
- * sde_connector_event_notify - signal hw recovery event to client
- * @connector: pointer to connector
- * @type:     event type
- * @len:     length of the value of the event
- * @val:     value
- */
-int sde_connector_event_notify(struct drm_connector *connector, uint32_t type,
-		uint32_t len, uint32_t val);
-/**
- * sde_connector_helper_bridge_enable - helper function for drm bridge enable
- * @connector: Pointer to DRM connector object
- */
-void sde_connector_helper_bridge_enable(struct drm_connector *connector);
-
-/**
- * sde_connector_get_panel_vfp - helper to get panel vfp
- * @connector: pointer to drm connector
- * @h_active: panel width
- * @v_active: panel heigth
- * Returns: v_front_porch on success error-code on failure
- */
-int sde_connector_get_panel_vfp(struct drm_connector *connector,
-	struct drm_display_mode *mode);
-/**
- * sde_connector_esd_status - helper function to check te status
- * @connector: Pointer to DRM connector object
- */
-int sde_connector_esd_status(struct drm_connector *connector);
-
-#endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
deleted file mode 100644
index 1885cbe..0000000
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ /dev/null
@@ -1,663 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/debugfs.h>
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-#include <linux/kthread.h>
-
-#include "sde_core_irq.h"
-#include "sde_power_handle.h"
-
-/**
- * sde_core_irq_callback_handler - dispatch core interrupts
- * @arg:		private data of callback handler
- * @irq_idx:		interrupt index
- */
-static void sde_core_irq_callback_handler(void *arg, int irq_idx)
-{
-	struct sde_kms *sde_kms = arg;
-	struct sde_irq *irq_obj = &sde_kms->irq_obj;
-	struct sde_irq_callback *cb;
-	unsigned long irq_flags;
-	bool cb_tbl_error = false;
-	int enable_counts = 0;
-
-	pr_debug("irq_idx=%d\n", irq_idx);
-
-	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
-	if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
-		/* print error outside lock */
-		cb_tbl_error = true;
-		enable_counts = atomic_read(
-				&sde_kms->irq_obj.enable_counts[irq_idx]);
-	}
-
-	atomic_inc(&irq_obj->irq_counts[irq_idx]);
-
-	/*
-	 * Perform registered function callback
-	 */
-	list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
-		if (cb->func)
-			cb->func(cb->arg, irq_idx);
-	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
-
-	if (cb_tbl_error) {
-		/*
-		 * If enable count is zero and callback list is empty, then it's
-		 * not a fatal issue. Log this case as debug. If the enable
-		 * count is nonzero and callback list is empty, then its a real
-		 * issue. Log this case as error to ensure we don't have silent
-		 * IRQs running.
-		 */
-		if (!enable_counts) {
-			SDE_DEBUG("irq has no callback, idx %d enables %d\n",
-					irq_idx, enable_counts);
-			SDE_EVT32_IRQ(irq_idx, enable_counts);
-		} else {
-			SDE_ERROR("irq has no callback, idx %d enables %d\n",
-					irq_idx, enable_counts);
-			SDE_EVT32_IRQ(irq_idx, enable_counts, SDE_EVTLOG_ERROR);
-		}
-	}
-
-	/*
-	 * Clear pending interrupt status in HW.
-	 * NOTE: sde_core_irq_callback_handler is protected by top-level
-	 *       spinlock, so it is safe to clear any interrupt status here.
-	 */
-	sde_kms->hw_intr->ops.clear_intr_status_nolock(
-			sde_kms->hw_intr,
-			irq_idx);
-}
-
-int sde_core_irq_idx_lookup(struct sde_kms *sde_kms,
-		enum sde_intr_type intr_type, u32 instance_idx)
-{
-	if (!sde_kms || !sde_kms->hw_intr ||
-			!sde_kms->hw_intr->ops.irq_idx_lookup)
-		return -EINVAL;
-
-	return sde_kms->hw_intr->ops.irq_idx_lookup(
-			sde_kms->hw_intr, intr_type,
-			instance_idx);
-}
-
-/**
- * _sde_core_irq_enable - enable core interrupt given by the index
- * @sde_kms:		Pointer to sde kms context
- * @irq_idx:		interrupt index
- */
-static int _sde_core_irq_enable(struct sde_kms *sde_kms, int irq_idx)
-{
-	unsigned long irq_flags;
-	int ret = 0;
-
-	if (!sde_kms || !sde_kms->hw_intr ||
-			!sde_kms->irq_obj.enable_counts ||
-			!sde_kms->irq_obj.irq_counts) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
-		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
-			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
-
-	SDE_EVT32(irq_idx,
-			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
-	spin_lock_irqsave(&sde_kms->hw_intr->irq_lock, irq_flags);
-	if (atomic_inc_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 1)
-		ret = sde_kms->hw_intr->ops.enable_irq_nolock(
-				sde_kms->hw_intr, irq_idx);
-	spin_unlock_irqrestore(&sde_kms->hw_intr->irq_lock, irq_flags);
-	if (ret)
-		SDE_ERROR("Fail to enable IRQ for irq_idx:%d\n", irq_idx);
-
-	SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
-
-	if (atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]) == 1) {
-		spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
-		/* empty callback list but interrupt is enabled */
-		if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]))
-			SDE_ERROR("irq_idx=%d enabled with no callback\n",
-					irq_idx);
-		spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
-	}
-	return ret;
-}
-
-int sde_core_irq_enable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
-{
-	int i, ret = 0;
-
-	if (!sde_kms || !irq_idxs || !irq_count) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; (i < irq_count) && !ret; i++)
-		ret = _sde_core_irq_enable(sde_kms, irq_idxs[i]);
-
-	return ret;
-}
-
-/**
- * _sde_core_irq_disable - disable core interrupt given by the index
- * @sde_kms:		Pointer to sde kms context
- * @irq_idx:		interrupt index
- */
-static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
-{
-	int ret = 0;
-	unsigned long irq_flags;
-
-	if (!sde_kms || !sde_kms->hw_intr || !sde_kms->irq_obj.enable_counts) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
-		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
-			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
-
-	SDE_EVT32(irq_idx,
-			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
-
-	spin_lock_irqsave(&sde_kms->hw_intr->irq_lock, irq_flags);
-	if (atomic_add_unless(&sde_kms->irq_obj.enable_counts[irq_idx], -1, 0)
-		&& atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0)
-		ret = sde_kms->hw_intr->ops.disable_irq_nolock(
-				sde_kms->hw_intr, irq_idx);
-	spin_unlock_irqrestore(&sde_kms->hw_intr->irq_lock, irq_flags);
-
-	if (ret)
-		SDE_ERROR("Fail to disable IRQ for irq_idx:%d\n", irq_idx);
-	SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
-
-	return ret;
-}
-
-int sde_core_irq_disable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
-{
-	int i, ret = 0;
-
-	if (!sde_kms || !irq_idxs || !irq_count) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; (i < irq_count) && !ret; i++)
-		ret = _sde_core_irq_disable(sde_kms, irq_idxs[i]);
-
-	return ret;
-}
-
-/**
- * sde_core_irq_disable_nolock - disable core interrupt given by the index
- *                               without lock
- * @sde_kms:		Pointer to sde kms context
- * @irq_idx:		interrupt index
- */
-int sde_core_irq_disable_nolock(struct sde_kms *sde_kms, int irq_idx)
-{
-	int ret = 0;
-
-	if (!sde_kms || !sde_kms->hw_intr || !sde_kms->irq_obj.enable_counts) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
-		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
-			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
-
-	SDE_EVT32(irq_idx,
-			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
-	if (atomic_dec_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0) {
-		ret = sde_kms->hw_intr->ops.disable_irq_nolock(
-				sde_kms->hw_intr,
-				irq_idx);
-		if (ret)
-			SDE_ERROR("Fail to disable IRQ for irq_idx:%d\n",
-					irq_idx);
-		SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
-	}
-
-	return ret;
-}
-
-u32 sde_core_irq_read_nolock(struct sde_kms *sde_kms, int irq_idx, bool clear)
-{
-	if (!sde_kms || !sde_kms->hw_intr ||
-			!sde_kms->hw_intr->ops.get_interrupt_status)
-		return 0;
-
-	if (irq_idx < 0) {
-		SDE_ERROR("[%pS] invalid irq_idx=%d\n",
-				__builtin_return_address(0), irq_idx);
-		return 0;
-	}
-
-	return sde_kms->hw_intr->ops.get_intr_status_nolock(sde_kms->hw_intr,
-			irq_idx, clear);
-}
-
-u32 sde_core_irq_read(struct sde_kms *sde_kms, int irq_idx, bool clear)
-{
-	if (!sde_kms || !sde_kms->hw_intr ||
-			!sde_kms->hw_intr->ops.get_interrupt_status)
-		return 0;
-
-	if (irq_idx < 0) {
-		SDE_ERROR("[%pS] invalid irq_idx=%d\n",
-				__builtin_return_address(0), irq_idx);
-		return 0;
-	}
-
-	return sde_kms->hw_intr->ops.get_interrupt_status(sde_kms->hw_intr,
-			irq_idx, clear);
-}
-
-int sde_core_irq_register_callback(struct sde_kms *sde_kms, int irq_idx,
-		struct sde_irq_callback *register_irq_cb)
-{
-	unsigned long irq_flags;
-
-	if (!sde_kms || !sde_kms->irq_obj.irq_cb_tbl) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	if (!register_irq_cb || !register_irq_cb->func) {
-		SDE_ERROR("invalid irq_cb:%d func:%d\n",
-				register_irq_cb != NULL,
-				register_irq_cb ?
-					register_irq_cb->func != NULL : -1);
-		return -EINVAL;
-	}
-
-	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
-		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
-
-	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
-	SDE_EVT32(irq_idx, register_irq_cb);
-	list_del_init(&register_irq_cb->list);
-	list_add_tail(&register_irq_cb->list,
-			&sde_kms->irq_obj.irq_cb_tbl[irq_idx]);
-	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
-
-	return 0;
-}
-
-int sde_core_irq_unregister_callback(struct sde_kms *sde_kms, int irq_idx,
-		struct sde_irq_callback *register_irq_cb)
-{
-	unsigned long irq_flags;
-
-	if (!sde_kms || !sde_kms->irq_obj.irq_cb_tbl) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	if (!register_irq_cb || !register_irq_cb->func) {
-		SDE_ERROR("invalid irq_cb:%d func:%d\n",
-				register_irq_cb != NULL,
-				register_irq_cb ?
-					register_irq_cb->func != NULL : -1);
-		return -EINVAL;
-	}
-
-	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
-		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
-
-	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
-	SDE_EVT32(irq_idx, register_irq_cb);
-	list_del_init(&register_irq_cb->list);
-	/* empty callback list but interrupt is still enabled */
-	if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
-			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]))
-		SDE_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
-	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
-
-	return 0;
-}
-
-static void sde_clear_all_irqs(struct sde_kms *sde_kms)
-{
-	if (!sde_kms || !sde_kms->hw_intr ||
-			!sde_kms->hw_intr->ops.clear_all_irqs)
-		return;
-
-	sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
-}
-
-static void sde_disable_all_irqs(struct sde_kms *sde_kms)
-{
-	if (!sde_kms || !sde_kms->hw_intr ||
-			!sde_kms->hw_intr->ops.disable_all_irqs)
-		return;
-
-	sde_kms->hw_intr->ops.disable_all_irqs(sde_kms->hw_intr);
-}
-
-#ifdef CONFIG_DEBUG_FS
-#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix)				\
-static int __prefix ## _open(struct inode *inode, struct file *file)	\
-{									\
-	return single_open(file, __prefix ## _show, inode->i_private);	\
-}									\
-static const struct file_operations __prefix ## _fops = {		\
-	.owner = THIS_MODULE,						\
-	.open = __prefix ## _open,					\
-	.release = single_release,					\
-	.read = seq_read,						\
-	.llseek = seq_lseek,						\
-}
-
-static int sde_debugfs_core_irq_show(struct seq_file *s, void *v)
-{
-	struct sde_irq *irq_obj = s->private;
-	struct sde_irq_callback *cb;
-	unsigned long irq_flags;
-	int i, irq_count, enable_count, cb_count;
-
-	if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
-		SDE_ERROR("invalid parameters\n");
-		return 0;
-	}
-
-	for (i = 0; i < irq_obj->total_irqs; i++) {
-		spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
-		cb_count = 0;
-		irq_count = atomic_read(&irq_obj->irq_counts[i]);
-		enable_count = atomic_read(&irq_obj->enable_counts[i]);
-		list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
-			cb_count++;
-		spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
-
-		if (irq_count || enable_count || cb_count)
-			seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
-					i, irq_count, enable_count, cb_count);
-	}
-
-	return 0;
-}
-
-DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_core_irq);
-
-int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
-		struct dentry *parent)
-{
-	sde_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0400,
-			parent, &sde_kms->irq_obj,
-			&sde_debugfs_core_irq_fops);
-
-	return 0;
-}
-
-void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
-{
-	debugfs_remove(sde_kms->irq_obj.debugfs_file);
-	sde_kms->irq_obj.debugfs_file = NULL;
-}
-
-#else
-int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
-		struct dentry *parent)
-{
-	return 0;
-}
-
-void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
-{
-}
-#endif
-
-void sde_core_irq_preinstall(struct sde_kms *sde_kms)
-{
-	struct msm_drm_private *priv;
-	int i;
-	int rc;
-
-	if (!sde_kms) {
-		SDE_ERROR("invalid sde_kms\n");
-		return;
-	} else if (!sde_kms->dev) {
-		SDE_ERROR("invalid drm device\n");
-		return;
-	} else if (!sde_kms->dev->dev_private) {
-		SDE_ERROR("invalid device private\n");
-		return;
-	}
-	priv = sde_kms->dev->dev_private;
-
-	rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
-			true);
-	if (rc) {
-		SDE_ERROR("failed to enable power resource %d\n", rc);
-		SDE_EVT32(rc, SDE_EVTLOG_ERROR);
-		return;
-	}
-
-	sde_clear_all_irqs(sde_kms);
-	sde_disable_all_irqs(sde_kms);
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
-
-	spin_lock_init(&sde_kms->irq_obj.cb_lock);
-
-	/* Create irq callbacks for all possible irq_idx */
-	sde_kms->irq_obj.total_irqs = sde_kms->hw_intr->sde_irq_map_size;
-	sde_kms->irq_obj.irq_cb_tbl = kcalloc(sde_kms->irq_obj.total_irqs,
-			sizeof(struct list_head), GFP_KERNEL);
-	sde_kms->irq_obj.enable_counts = kcalloc(sde_kms->irq_obj.total_irqs,
-			sizeof(atomic_t), GFP_KERNEL);
-	sde_kms->irq_obj.irq_counts = kcalloc(sde_kms->irq_obj.total_irqs,
-			sizeof(atomic_t), GFP_KERNEL);
-	for (i = 0; i < sde_kms->irq_obj.total_irqs; i++) {
-		INIT_LIST_HEAD(&sde_kms->irq_obj.irq_cb_tbl[i]);
-		atomic_set(&sde_kms->irq_obj.enable_counts[i], 0);
-		atomic_set(&sde_kms->irq_obj.irq_counts[i], 0);
-	}
-}
-
-int sde_core_irq_postinstall(struct sde_kms *sde_kms)
-{
-	return 0;
-}
-
-void sde_core_irq_uninstall(struct sde_kms *sde_kms)
-{
-	struct msm_drm_private *priv;
-	int i;
-	int rc;
-	unsigned long irq_flags;
-
-	if (!sde_kms) {
-		SDE_ERROR("invalid sde_kms\n");
-		return;
-	} else if (!sde_kms->dev) {
-		SDE_ERROR("invalid drm device\n");
-		return;
-	} else if (!sde_kms->dev->dev_private) {
-		SDE_ERROR("invalid device private\n");
-		return;
-	}
-	priv = sde_kms->dev->dev_private;
-
-	rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
-			true);
-	if (rc) {
-		SDE_ERROR("failed to enable power resource %d\n", rc);
-		SDE_EVT32(rc, SDE_EVTLOG_ERROR);
-		return;
-	}
-
-	for (i = 0; i < sde_kms->irq_obj.total_irqs; i++)
-		if (atomic_read(&sde_kms->irq_obj.enable_counts[i]) ||
-				!list_empty(&sde_kms->irq_obj.irq_cb_tbl[i]))
-			SDE_ERROR("irq_idx=%d still enabled/registered\n", i);
-
-	sde_clear_all_irqs(sde_kms);
-	sde_disable_all_irqs(sde_kms);
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
-
-	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
-	kfree(sde_kms->irq_obj.irq_cb_tbl);
-	kfree(sde_kms->irq_obj.enable_counts);
-	kfree(sde_kms->irq_obj.irq_counts);
-	sde_kms->irq_obj.irq_cb_tbl = NULL;
-	sde_kms->irq_obj.enable_counts = NULL;
-	sde_kms->irq_obj.irq_counts = NULL;
-	sde_kms->irq_obj.total_irqs = 0;
-	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
-}
-
-static void sde_core_irq_mask(struct irq_data *irqd)
-{
-	struct sde_kms *sde_kms;
-
-	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
-		SDE_ERROR("invalid parameters irqd %d\n", irqd != NULL);
-		return;
-	}
-	sde_kms = irq_data_get_irq_chip_data(irqd);
-
-	/* memory barrier */
-	smp_mb__before_atomic();
-	clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
-	/* memory barrier */
-	smp_mb__after_atomic();
-}
-
-static void sde_core_irq_unmask(struct irq_data *irqd)
-{
-	struct sde_kms *sde_kms;
-
-	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
-		SDE_ERROR("invalid parameters irqd %d\n", irqd != NULL);
-		return;
-	}
-	sde_kms = irq_data_get_irq_chip_data(irqd);
-
-	/* memory barrier */
-	smp_mb__before_atomic();
-	set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
-	/* memory barrier */
-	smp_mb__after_atomic();
-}
-
-static struct irq_chip sde_core_irq_chip = {
-	.name = "sde",
-	.irq_mask = sde_core_irq_mask,
-	.irq_unmask = sde_core_irq_unmask,
-};
-
-static int sde_core_irqdomain_map(struct irq_domain *domain,
-		unsigned int irq, irq_hw_number_t hwirq)
-{
-	struct sde_kms *sde_kms;
-	int rc;
-
-	if (!domain || !domain->host_data) {
-		SDE_ERROR("invalid parameters domain %d\n", domain != NULL);
-		return -EINVAL;
-	}
-	sde_kms = domain->host_data;
-
-	irq_set_chip_and_handler(irq, &sde_core_irq_chip, handle_level_irq);
-	rc = irq_set_chip_data(irq, sde_kms);
-
-	return rc;
-}
-
-static const struct irq_domain_ops sde_core_irqdomain_ops = {
-	.map = sde_core_irqdomain_map,
-	.xlate = irq_domain_xlate_onecell,
-};
-
-int sde_core_irq_domain_add(struct sde_kms *sde_kms)
-{
-	struct device *dev;
-	struct irq_domain *domain;
-
-	if (!sde_kms->dev || !sde_kms->dev->dev) {
-		pr_err("invalid device handles\n");
-		return -EINVAL;
-	}
-
-	dev = sde_kms->dev->dev;
-
-	domain = irq_domain_add_linear(dev->of_node, 32,
-			&sde_core_irqdomain_ops, sde_kms);
-	if (!domain) {
-		pr_err("failed to add irq_domain\n");
-		return -EINVAL;
-	}
-
-	sde_kms->irq_controller.enabled_mask = 0;
-	sde_kms->irq_controller.domain = domain;
-
-	return 0;
-}
-
-int sde_core_irq_domain_fini(struct sde_kms *sde_kms)
-{
-	if (sde_kms->irq_controller.domain) {
-		irq_domain_remove(sde_kms->irq_controller.domain);
-		sde_kms->irq_controller.domain = NULL;
-	}
-	return 0;
-}
-
-irqreturn_t sde_core_irq(struct sde_kms *sde_kms)
-{
-	/*
-	 * Read interrupt status from all sources. Interrupt status are
-	 * stored within hw_intr.
-	 * Function will also clear the interrupt status after reading.
-	 * Individual interrupt status bit will only get stored if it
-	 * is enabled.
-	 */
-	sde_kms->hw_intr->ops.get_interrupt_statuses(sde_kms->hw_intr);
-
-	/*
-	 * Dispatch to HW driver to handle interrupt lookup that is being
-	 * fired. When matching interrupt is located, HW driver will call to
-	 * sde_core_irq_callback_handler with the irq_idx from the lookup table.
-	 * sde_core_irq_callback_handler will perform the registered function
-	 * callback, and do the interrupt status clearing once the registered
-	 * callback is finished.
-	 */
-	sde_kms->hw_intr->ops.dispatch_irqs(
-			sde_kms->hw_intr,
-			sde_core_irq_callback_handler,
-			sde_kms);
-
-	return IRQ_HANDLED;
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.h b/drivers/gpu/drm/msm/sde/sde_core_irq.h
deleted file mode 100644
index 3bc42f5..0000000
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_CORE_IRQ_H__
-#define __SDE_CORE_IRQ_H__
-
-#include "sde_kms.h"
-#include "sde_hw_interrupts.h"
-
-/**
- * sde_core_irq_preinstall - perform pre-installation of core IRQ handler
- * @sde_kms:		SDE handle
- * @return:		none
- */
-void sde_core_irq_preinstall(struct sde_kms *sde_kms);
-
-/**
- * sde_core_irq_postinstall - perform post-installation of core IRQ handler
- * @sde_kms:		SDE handle
- * @return:		0 if success; error code otherwise
- */
-int sde_core_irq_postinstall(struct sde_kms *sde_kms);
-
-/**
- * sde_core_irq_uninstall - uninstall core IRQ handler
- * @sde_kms:		SDE handle
- * @return:		none
- */
-void sde_core_irq_uninstall(struct sde_kms *sde_kms);
-
-/**
- * sde_core_irq_domain_add - Add core IRQ domain for SDE
- * @sde_kms:		SDE handle
- * @return:		none
- */
-int sde_core_irq_domain_add(struct sde_kms *sde_kms);
-
-/**
- * sde_core_irq_domain_fini - uninstall core IRQ domain
- * @sde_kms:		SDE handle
- * @return:		0 if success; error code otherwise
- */
-int sde_core_irq_domain_fini(struct sde_kms *sde_kms);
-
-/**
- * sde_core_irq - core IRQ handler
- * @sde_kms:		SDE handle
- * @return:		interrupt handling status
- */
-irqreturn_t sde_core_irq(struct sde_kms *sde_kms);
-
-/**
- * sde_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
- *                      interrupt mapping table.
- * @sde_kms:		SDE handle
- * @intr_type:		SDE HW interrupt type for lookup
- * @instance_idx:	SDE HW block instance defined in sde_hw_mdss.h
- * @return:		irq_idx or -EINVAL when fail to lookup
- */
-int sde_core_irq_idx_lookup(
-		struct sde_kms *sde_kms,
-		enum sde_intr_type intr_type,
-		uint32_t instance_idx);
-
-/**
- * sde_core_irq_enable - IRQ helper function for enabling one or more IRQs
- * @sde_kms:		SDE handle
- * @irq_idxs:		Array of irq index
- * @irq_count:		Number of irq_idx provided in the array
- * @return:		0 for success enabling IRQ, otherwise failure
- *
- * This function increments count on each enable and decrements on each
- * disable.  Interrupts is enabled if count is 0 before increment.
- */
-int sde_core_irq_enable(
-		struct sde_kms *sde_kms,
-		int *irq_idxs,
-		uint32_t irq_count);
-
-/**
- * sde_core_irq_disable - IRQ helper function for disabling one of more IRQs
- * @sde_kms:		SDE handle
- * @irq_idxs:		Array of irq index
- * @irq_count:		Number of irq_idx provided in the array
- * @return:		0 for success disabling IRQ, otherwise failure
- *
- * This function increments count on each enable and decrements on each
- * disable.  Interrupts is disabled if count is 0 after decrement.
- */
-int sde_core_irq_disable(
-		struct sde_kms *sde_kms,
-		int *irq_idxs,
-		uint32_t irq_count);
-
-/**
- * sde_core_irq_disable_nolock - no lock version of sde_core_irq_disable
- * @sde_kms:		SDE handle
- * @irq_idx:		Irq index
- * @return:		0 for success disabling IRQ, otherwise failure
- *
- * This function increments count on each enable and decrements on each
- * disable.  Interrupts is disabled if count is 0 after decrement.
- */
-int sde_core_irq_disable_nolock(
-		struct sde_kms *sde_kms,
-		int irq_idx);
-
-/**
- * sde_core_irq_read - IRQ helper function for reading IRQ status
- * @sde_kms:		SDE handle
- * @irq_idx:		irq index
- * @clear:		True to clear the irq after read
- * @return:		non-zero if irq detected; otherwise no irq detected
- */
-u32 sde_core_irq_read(
-		struct sde_kms *sde_kms,
-		int irq_idx,
-		bool clear);
-
-/**
- * sde_core_irq_read - no lock version of sde_core_irq_read
- * @sde_kms:		SDE handle
- * @irq_idx:		irq index
- * @clear:		True to clear the irq after read
- * @return:		non-zero if irq detected; otherwise no irq detected
- */
-u32 sde_core_irq_read_nolock(
-		struct sde_kms *sde_kms,
-		int irq_idx,
-		bool clear);
-
-/**
- * sde_core_irq_register_callback - For registering callback function on IRQ
- *                             interrupt
- * @sde_kms:		SDE handle
- * @irq_idx:		irq index
- * @irq_cb:		IRQ callback structure, containing callback function
- *			and argument. Passing NULL for irq_cb will unregister
- *			the callback for the given irq_idx
- *			This must exist until un-registration.
- * @return:		0 for success registering callback, otherwise failure
- *
- * This function supports registration of multiple callbacks for each interrupt.
- */
-int sde_core_irq_register_callback(
-		struct sde_kms *sde_kms,
-		int irq_idx,
-		struct sde_irq_callback *irq_cb);
-
-/**
- * sde_core_irq_unregister_callback - For unregistering callback function on IRQ
- *                             interrupt
- * @sde_kms:		SDE handle
- * @irq_idx:		irq index
- * @irq_cb:		IRQ callback structure, containing callback function
- *			and argument. Passing NULL for irq_cb will unregister
- *			the callback for the given irq_idx
- *			This must match with registration.
- * @return:		0 for success registering callback, otherwise failure
- *
- * This function supports registration of multiple callbacks for each interrupt.
- */
-int sde_core_irq_unregister_callback(
-		struct sde_kms *sde_kms,
-		int irq_idx,
-		struct sde_irq_callback *irq_cb);
-
-/**
- * sde_debugfs_core_irq_init - register core irq debugfs
- * @sde_kms: pointer to kms
- * @parent: debugfs directory root
- * @Return: 0 on success
- */
-int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
-		struct dentry *parent);
-
-/**
- * sde_debugfs_core_irq_destroy - deregister core irq debugfs
- * @sde_kms: pointer to kms
- */
-void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms);
-
-#endif /* __SDE_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
deleted file mode 100644
index a044b8e..0000000
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ /dev/null
@@ -1,1252 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/debugfs.h>
-#include <linux/errno.h>
-#include <linux/mutex.h>
-#include <linux/sort.h>
-#include <linux/clk.h>
-#include <linux/bitmap.h>
-#include <linux/sde_rsc.h>
-#include <linux/platform_device.h>
-#include <linux/soc/qcom/llcc-qcom.h>
-
-#include "msm_prop.h"
-
-#include "sde_kms.h"
-#include "sde_trace.h"
-#include "sde_crtc.h"
-#include "sde_encoder.h"
-#include "sde_core_perf.h"
-
-#define SDE_PERF_MODE_STRING_SIZE	128
-#define SDE_PERF_THRESHOLD_HIGH_MIN     12800000
-
-static DEFINE_MUTEX(sde_core_perf_lock);
-
-/**
- * enum sde_perf_mode - performance tuning mode
- * @SDE_PERF_MODE_NORMAL: performance controlled by user mode client
- * @SDE_PERF_MODE_MINIMUM: performance bounded by minimum setting
- * @SDE_PERF_MODE_FIXED: performance bounded by fixed setting
- */
-enum sde_perf_mode {
-	SDE_PERF_MODE_NORMAL,
-	SDE_PERF_MODE_MINIMUM,
-	SDE_PERF_MODE_FIXED,
-	SDE_PERF_MODE_MAX
-};
-
-/**
- * enum sde_perf_vote_mode: perf vote mode.
- * @APPS_RSC_MODE:	It combines the vote for all displays and votes it
- *                      through APPS rsc. This is default mode when display
- *                      rsc is not available.
- * @DISP_RSC_MODE:	It combines the vote for all displays and votes it
- *                      through display rsc. This is default configuration
- *                      when display rsc is available.
- * @DISP_RSC_PRIMARY_MODE:	The primary display votes through display rsc
- *                      while all other displays votes through apps rsc.
- */
-enum sde_perf_vote_mode {
-	APPS_RSC_MODE,
-	DISP_RSC_MODE,
-	DISP_RSC_PRIMARY_MODE,
-};
-
-static struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
-{
-	struct msm_drm_private *priv;
-
-	if (!crtc->dev || !crtc->dev->dev_private) {
-		SDE_ERROR("invalid device\n");
-		return NULL;
-	}
-
-	priv = crtc->dev->dev_private;
-	if (!priv || !priv->kms) {
-		SDE_ERROR("invalid kms\n");
-		return NULL;
-	}
-
-	return to_sde_kms(priv->kms);
-}
-
-static bool _sde_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
-{
-	return sde_crtc_is_enabled(crtc);
-}
-
-static bool _sde_core_video_mode_intf_connected(struct drm_crtc *crtc)
-{
-	struct drm_crtc *tmp_crtc;
-	bool intf_connected = false;
-
-	if (!crtc)
-		goto end;
-
-	drm_for_each_crtc(tmp_crtc, crtc->dev) {
-		if ((sde_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
-				_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
-			SDE_DEBUG("video interface connected crtc:%d\n",
-				tmp_crtc->base.id);
-			intf_connected = true;
-			goto end;
-		}
-	}
-
-end:
-	return intf_connected;
-}
-
-static void _sde_core_perf_calc_crtc(struct sde_kms *kms,
-		struct drm_crtc *crtc,
-		struct drm_crtc_state *state,
-		struct sde_core_perf_params *perf)
-{
-	struct sde_crtc_state *sde_cstate;
-	int i;
-
-	if (!kms || !kms->catalog || !crtc || !state || !perf) {
-		SDE_ERROR("invalid parameters\n");
-		return;
-	}
-
-	sde_cstate = to_sde_crtc_state(state);
-	memset(perf, 0, sizeof(struct sde_core_perf_params));
-
-	perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC] =
-		sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
-	perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC] =
-		sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
-
-	if (sde_cstate->bw_split_vote) {
-		perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
-			sde_crtc_get_property(sde_cstate, CRTC_PROP_LLCC_AB);
-		perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
-			sde_crtc_get_property(sde_cstate, CRTC_PROP_LLCC_IB);
-		perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI] =
-			sde_crtc_get_property(sde_cstate, CRTC_PROP_DRAM_AB);
-		perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI] =
-			sde_crtc_get_property(sde_cstate, CRTC_PROP_DRAM_IB);
-	} else {
-		perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
-			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
-		perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
-			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
-		perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI] =
-			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
-		perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI] =
-			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
-	}
-
-	perf->core_clk_rate =
-			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
-
-	if (!sde_cstate->bw_control) {
-		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
-			perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
-					1000ULL;
-			perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
-		}
-		perf->core_clk_rate = kms->perf.max_core_clk_rate;
-	} else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_MINIMUM) {
-		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
-			perf->bw_ctl[i] = 0;
-			perf->max_per_pipe_ib[i] = 0;
-		}
-		perf->core_clk_rate = 0;
-	} else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED) {
-		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
-			perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
-			perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
-		}
-		perf->core_clk_rate = kms->perf.fix_core_clk_rate;
-	}
-
-	SDE_EVT32(crtc->base.id, perf->core_clk_rate);
-	trace_sde_perf_calc_crtc(crtc->base.id,
-			perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC],
-			perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC],
-			perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI],
-			perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC],
-			perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC],
-			perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI],
-			perf->core_clk_rate);
-
-	SDE_DEBUG(
-		"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
-			crtc->base.id, perf->core_clk_rate,
-			perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC],
-			perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC],
-			perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC],
-			perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC],
-			perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI],
-			perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI]);
-}
-
-int sde_core_perf_crtc_check(struct drm_crtc *crtc,
-		struct drm_crtc_state *state)
-{
-	u32 bw, threshold;
-	u64 bw_sum_of_intfs = 0;
-	enum sde_crtc_client_type curr_client_type;
-	bool is_video_mode;
-	struct sde_crtc_state *sde_cstate;
-	struct drm_crtc *tmp_crtc;
-	struct sde_kms *kms;
-	int i;
-
-	if (!crtc || !state) {
-		SDE_ERROR("invalid crtc\n");
-		return -EINVAL;
-	}
-
-	kms = _sde_crtc_get_kms(crtc);
-	if (!kms || !kms->catalog) {
-		SDE_ERROR("invalid parameters\n");
-		return 0;
-	}
-
-	/* we only need bandwidth check on real-time clients (interfaces) */
-	if (sde_crtc_get_client_type(crtc) == NRT_CLIENT)
-		return 0;
-
-	sde_cstate = to_sde_crtc_state(state);
-
-	/* obtain new values */
-	_sde_core_perf_calc_crtc(kms, crtc, state, &sde_cstate->new_perf);
-
-	for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC;
-			i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
-		bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl[i];
-		curr_client_type = sde_crtc_get_client_type(crtc);
-
-		drm_for_each_crtc(tmp_crtc, crtc->dev) {
-			if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
-			    (sde_crtc_get_client_type(tmp_crtc) ==
-					    curr_client_type) &&
-			    (tmp_crtc != crtc)) {
-				struct sde_crtc_state *tmp_cstate =
-					to_sde_crtc_state(tmp_crtc->state);
-
-				SDE_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
-					tmp_crtc->base.id,
-					tmp_cstate->new_perf.bw_ctl[i],
-					tmp_cstate->bw_control);
-				/*
-				 * For bw check only use the bw if the
-				 * atomic property has been already set
-				 */
-				if (tmp_cstate->bw_control)
-					bw_sum_of_intfs +=
-						tmp_cstate->new_perf.bw_ctl[i];
-			}
-		}
-
-		/* convert bandwidth to kb */
-		bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
-		SDE_DEBUG("calculated bandwidth=%uk\n", bw);
-
-		is_video_mode = sde_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
-		threshold = (is_video_mode ||
-			_sde_core_video_mode_intf_connected(crtc)) ?
-			kms->catalog->perf.max_bw_low :
-			kms->catalog->perf.max_bw_high;
-
-		SDE_DEBUG("final threshold bw limit = %d\n", threshold);
-
-		if (!sde_cstate->bw_control) {
-			SDE_DEBUG("bypass bandwidth check\n");
-		} else if (!threshold) {
-			SDE_ERROR("no bandwidth limits specified\n");
-			return -E2BIG;
-		} else if (bw > threshold) {
-			SDE_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
-					threshold);
-			return -E2BIG;
-		}
-	}
-
-	return 0;
-}
-
-static inline bool _is_crtc_client_type_matches(struct drm_crtc *tmp_crtc,
-	enum sde_crtc_client_type curr_client_type,
-	struct sde_core_perf *perf)
-{
-	if (!tmp_crtc)
-		return false;
-	else if (perf->bw_vote_mode == DISP_RSC_PRIMARY_MODE &&
-							perf->sde_rsc_available)
-		return curr_client_type == sde_crtc_get_client_type(tmp_crtc);
-	else
-		return true;
-}
-
-static inline enum sde_crtc_client_type _get_sde_client_type(
-	enum sde_crtc_client_type curr_client_type,
-	struct sde_core_perf *perf)
-{
-	if (perf->bw_vote_mode == DISP_RSC_PRIMARY_MODE &&
-						perf->sde_rsc_available)
-		return curr_client_type;
-	else if (perf->bw_vote_mode != APPS_RSC_MODE && perf->sde_rsc_available)
-		return RT_RSC_CLIENT;
-	else
-		return RT_CLIENT;
-}
-
-/**
- * @_sde_core_perf_activate_llcc() - Activates/deactivates the system llcc
- * @kms - pointer to the kms
- * @uid - ID for which the llcc would be activated
- * @activate - boolean to indicate if activate/deactivate the LLCC
- *
- * Function assumes that caller has already acquired the "sde_core_perf_lock",
- * which would protect from any race condition between CRTC's
- */
-static int _sde_core_perf_activate_llcc(struct sde_kms *kms,
-	u32 uid, bool activate)
-{
-	struct llcc_slice_desc *slice;
-	struct drm_device *drm_dev;
-	struct device *dev;
-	struct platform_device *pdev;
-	int rc = 0;
-
-	if (!kms || !kms->dev || !kms->dev->dev) {
-		SDE_ERROR("wrong params won't activate llcc\n");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	drm_dev = kms->dev;
-	dev = drm_dev->dev;
-	pdev = to_platform_device(dev);
-
-	/* If LLCC is already in the requested state, skip */
-	SDE_EVT32(activate, kms->perf.llcc_active);
-	if ((activate && kms->perf.llcc_active) ||
-		(!activate && !kms->perf.llcc_active)) {
-		SDE_DEBUG("skip llcc request:%d state:%d\n",
-			activate, kms->perf.llcc_active);
-		goto exit;
-	}
-
-	SDE_DEBUG("activate/deactivate the llcc request:%d state:%d\n",
-		activate, kms->perf.llcc_active);
-
-	slice = llcc_slice_getd(uid);
-	if (IS_ERR_OR_NULL(slice))  {
-		SDE_ERROR("failed to get llcc slice for uid:%d\n", uid);
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	if (activate) {
-		llcc_slice_activate(slice);
-		kms->perf.llcc_active = true;
-	} else {
-		llcc_slice_deactivate(slice);
-		kms->perf.llcc_active = false;
-	}
-
-exit:
-	if (rc)
-		SDE_ERROR("error activating llcc:%d rc:%d\n",
-			activate, rc);
-	return rc;
-
-}
-
-static void _sde_core_perf_crtc_update_llcc(struct sde_kms *kms,
-		struct drm_crtc *crtc)
-{
-	struct drm_crtc *tmp_crtc;
-	struct sde_crtc *sde_crtc;
-	enum sde_crtc_client_type curr_client_type
-					= sde_crtc_get_client_type(crtc);
-	u32 total_llcc_active = 0;
-
-	if (!kms->perf.catalog->sc_cfg.has_sys_cache) {
-		SDE_DEBUG("System Cache is not enabled!. Won't use\n");
-		return;
-	}
-
-	drm_for_each_crtc(tmp_crtc, crtc->dev) {
-		if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
-			_is_crtc_client_type_matches(tmp_crtc, curr_client_type,
-								&kms->perf)) {
-
-			/* use current perf, which are the values voted */
-			sde_crtc = to_sde_crtc(tmp_crtc);
-			total_llcc_active |=
-			  sde_crtc->cur_perf.llcc_active;
-
-			SDE_DEBUG("crtc=%d llcc:%u active:0x%x\n",
-				tmp_crtc->base.id,
-				sde_crtc->cur_perf.llcc_active,
-				total_llcc_active);
-
-			if (total_llcc_active)
-				break;
-		}
-	}
-
-	_sde_core_perf_activate_llcc(kms, LLCC_ROTATOR,
-			total_llcc_active ? true : false);
-}
-
-static void _sde_core_uidle_setup_wd(struct sde_kms *kms,
-	bool enable)
-{
-	struct sde_uidle_wd_cfg wd;
-	struct sde_hw_uidle *uidle;
-
-	uidle = kms->hw_uidle;
-	wd.enable = enable;
-	wd.clear = false;
-	wd.granularity = SDE_UIDLE_WD_GRANULARITY;
-	wd.heart_beat = SDE_UIDLE_WD_HEART_BEAT;
-	wd.load_value = SDE_UIDLE_WD_LOAD_VAL;
-
-	if (uidle->ops.setup_wd_timer)
-		uidle->ops.setup_wd_timer(uidle, &wd);
-}
-
-static void _sde_core_uidle_setup_cfg(struct sde_kms *kms,
-	bool enable)
-{
-	struct sde_uidle_ctl_cfg cfg;
-	struct sde_hw_uidle *uidle;
-
-	uidle = kms->hw_uidle;
-	cfg.uidle_enable = enable;
-	cfg.fal10_danger =
-		kms->catalog->uidle_cfg.fal10_danger;
-	cfg.fal10_exit_cnt =
-		kms->catalog->uidle_cfg.fal10_exit_cnt;
-	cfg.fal10_exit_danger =
-		kms->catalog->uidle_cfg.fal10_exit_danger;
-
-	SDE_DEBUG("fal10_danger:%d fal10_exit_cnt:%d fal10_exit_danger:%d\n",
-		cfg.fal10_danger, cfg.fal10_exit_cnt, cfg.fal10_exit_danger);
-	SDE_EVT32(enable, cfg.fal10_danger, cfg.fal10_exit_cnt,
-		cfg.fal10_exit_danger);
-
-	if (uidle->ops.set_uidle_ctl)
-		uidle->ops.set_uidle_ctl(uidle, &cfg);
-}
-
-static void _sde_core_uidle_setup_ctl(struct drm_crtc *crtc,
-	bool enable)
-{
-	struct drm_encoder *drm_enc;
-
-	/* Disable uidle in the CTL */
-	drm_for_each_encoder(drm_enc, crtc->dev) {
-		if (drm_enc->crtc != crtc)
-			continue;
-
-		sde_encoder_uidle_enable(drm_enc, enable);
-	}
-}
-
-static int _sde_core_perf_enable_uidle(struct sde_kms *kms,
-	struct drm_crtc *crtc, bool enable)
-{
-	int rc = 0;
-
-	if (!kms->dev || !kms->dev->dev || !kms->hw_uidle) {
-		SDE_ERROR("wrong params won't enable uidlen");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	/* if no status change, just return */
-	if ((enable && kms->perf.uidle_enabled) ||
-		(!enable && !kms->perf.uidle_enabled)) {
-		SDE_DEBUG("no status change enable:%d uidle:%d\n",
-			enable, kms->perf.uidle_enabled);
-		goto exit;
-	}
-
-	SDE_EVT32(enable);
-	_sde_core_uidle_setup_wd(kms, enable);
-	_sde_core_uidle_setup_cfg(kms, enable);
-	_sde_core_uidle_setup_ctl(crtc, enable);
-
-	kms->perf.uidle_enabled = enable;
-
-exit:
-	return rc;
-}
-
-static inline bool _sde_core_perf_is_wb(struct drm_crtc *crtc)
-{
-	enum sde_intf_mode if_mode = INTF_MODE_NONE;
-
-	if_mode = sde_crtc_get_intf_mode(crtc);
-	if (if_mode == INTF_MODE_WB_BLOCK ||
-		if_mode == INTF_MODE_WB_LINE)
-		return true;
-
-	return false;
-}
-
-static bool _sde_core_perf_is_cwb(struct drm_crtc *crtc)
-{
-	struct drm_encoder *encoder;
-
-	/* if any other encoder is connected to same crtc in clone mode */
-	drm_for_each_encoder(encoder, crtc->dev) {
-		if (encoder->crtc == crtc &&
-				sde_encoder_in_clone_mode(encoder)) {
-			return true;
-		}
-	}
-
-	return false;
-}
-
-static void _sde_core_perf_uidle_setup_cntr(struct sde_kms *sde_kms,
-	bool enable)
-{
-	struct sde_hw_uidle *uidle;
-
-	uidle = sde_kms->hw_uidle;
-
-	SDE_EVT32(enable);
-	if (uidle->ops.uidle_setup_cntr && (enable !=
-			sde_kms->catalog->uidle_cfg.perf_cntr_en)) {
-		uidle->ops.uidle_setup_cntr(uidle, enable);
-		sde_kms->catalog->uidle_cfg.perf_cntr_en = enable;
-	}
-}
-
-void sde_core_perf_crtc_update_uidle(struct drm_crtc *crtc,
-	bool enable)
-{
-	struct drm_crtc *tmp_crtc;
-	struct sde_kms *kms;
-	bool disable_uidle = false;
-	u32 fps;
-
-	if (!crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	kms = _sde_crtc_get_kms(crtc);
-	if (!kms || !kms->catalog) {
-		SDE_ERROR("invalid kms\n");
-		return;
-	}
-
-	mutex_lock(&sde_core_perf_lock);
-
-	if (!kms->perf.catalog->uidle_cfg.uidle_rev ||
-		(enable && !kms->perf.catalog->uidle_cfg.debugfs_ctrl)) {
-		SDE_DEBUG("uidle is not enabled %d %d\n",
-			kms->perf.catalog->uidle_cfg.uidle_rev,
-			kms->perf.catalog->uidle_cfg.debugfs_ctrl);
-		goto exit;
-	}
-
-	drm_for_each_crtc(tmp_crtc, crtc->dev) {
-		if (_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
-
-			fps = sde_crtc_get_fps_mode(tmp_crtc);
-
-			SDE_DEBUG("crtc=%d fps:%d wb:%d cwb:%d dis:%d en:%d\n",
-				tmp_crtc->base.id, fps,
-				_sde_core_perf_is_wb(tmp_crtc),
-				_sde_core_perf_is_cwb(tmp_crtc),
-				disable_uidle, enable);
-
-			if (_sde_core_perf_is_wb(tmp_crtc) ||
-				_sde_core_perf_is_cwb(tmp_crtc) || (!fps ||
-				 fps > kms->perf.catalog->uidle_cfg.max_fps)) {
-				disable_uidle = true;
-				break;
-			}
-		}
-	}
-
-	_sde_core_perf_enable_uidle(kms, crtc,
-		(enable && !disable_uidle) ? true : false);
-
-	/* If perf counters enabled, set them up now */
-	if (kms->catalog->uidle_cfg.debugfs_perf)
-		_sde_core_perf_uidle_setup_cntr(kms, enable);
-
-exit:
-	mutex_unlock(&sde_core_perf_lock);
-}
-
-static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
-		struct drm_crtc *crtc, u32 bus_id)
-{
-	u64 bw_sum_of_intfs = 0, bus_ab_quota, bus_ib_quota;
-	struct sde_core_perf_params perf = { { 0 } };
-	enum sde_crtc_client_type client_vote, curr_client_type
-					= sde_crtc_get_client_type(crtc);
-	struct drm_crtc *tmp_crtc;
-	struct sde_crtc_state *sde_cstate;
-	struct msm_drm_private *priv = kms->dev->dev_private;
-	struct sde_crtc *sde_crtc;
-
-	u64 tmp_max_per_pipe_ib;
-	u64 tmp_bw_ctl;
-
-	drm_for_each_crtc(tmp_crtc, crtc->dev) {
-		if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
-		    _is_crtc_client_type_matches(tmp_crtc, curr_client_type,
-								&kms->perf)) {
-
-			/* use current perf, which are the values voted */
-			sde_crtc = to_sde_crtc(tmp_crtc);
-			tmp_max_per_pipe_ib =
-			  sde_crtc->cur_perf.max_per_pipe_ib[bus_id];
-			tmp_bw_ctl =
-			  sde_crtc->cur_perf.bw_ctl[bus_id];
-
-			perf.max_per_pipe_ib[bus_id] =
-				max(perf.max_per_pipe_ib[bus_id],
-				tmp_max_per_pipe_ib);
-
-			bw_sum_of_intfs += tmp_bw_ctl;
-
-			SDE_DEBUG("crtc=%d bus_id=%d bw=%llu perf_pipe:%llu\n",
-				tmp_crtc->base.id, bus_id,
-				tmp_bw_ctl, tmp_max_per_pipe_ib);
-		}
-	}
-
-	bus_ab_quota = max(bw_sum_of_intfs, kms->perf.perf_tune.min_bus_vote);
-	bus_ib_quota = perf.max_per_pipe_ib[bus_id];
-
-	if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED) {
-		bus_ab_quota = kms->perf.fix_core_ab_vote;
-		bus_ib_quota = kms->perf.fix_core_ib_vote;
-	}
-
-	client_vote = _get_sde_client_type(curr_client_type, &kms->perf);
-	switch (client_vote) {
-	case NRT_CLIENT:
-		sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
-				SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
-				bus_id, bus_ab_quota, bus_ib_quota);
-		SDE_DEBUG("client:%s bus_id=%d ab=%llu ib=%llu\n", "nrt",
-				bus_id, bus_ab_quota, bus_ib_quota);
-		break;
-
-	case RT_CLIENT:
-		sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
-				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
-				bus_id, bus_ab_quota, bus_ib_quota);
-		SDE_DEBUG("client:%s bus_id=%d ab=%llu ib=%llu\n", "rt",
-				bus_id, bus_ab_quota, bus_ib_quota);
-		break;
-
-	case RT_RSC_CLIENT:
-		sde_cstate = to_sde_crtc_state(crtc->state);
-		sde_rsc_client_vote(sde_cstate->rsc_client,
-				bus_id, bus_ab_quota, bus_ib_quota);
-		SDE_DEBUG("client:%s bus_id=%d ab=%llu ib=%llu\n", "rt_rsc",
-				bus_id, bus_ab_quota, bus_ib_quota);
-		break;
-
-	default:
-		SDE_ERROR("invalid client type:%d\n", curr_client_type);
-		break;
-	}
-
-	if (kms->perf.bw_vote_mode_updated) {
-		switch (kms->perf.bw_vote_mode) {
-		case DISP_RSC_MODE:
-			sde_power_data_bus_set_quota(&priv->phandle,
-				kms->core_client,
-				SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
-				bus_id, 0, 0);
-			sde_power_data_bus_set_quota(&priv->phandle,
-				kms->core_client,
-				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
-				bus_id, 0, 0);
-			kms->perf.bw_vote_mode_updated = false;
-			break;
-
-		case APPS_RSC_MODE:
-			sde_cstate = to_sde_crtc_state(crtc->state);
-			if (sde_cstate->rsc_client) {
-				sde_rsc_client_vote(sde_cstate->rsc_client,
-								bus_id, 0, 0);
-				kms->perf.bw_vote_mode_updated = false;
-			}
-			break;
-
-		default:
-			break;
-		}
-	}
-}
-
-/**
- * @sde_core_perf_crtc_release_bw() - request zero bandwidth
- * @crtc - pointer to a crtc
- *
- * Function checks a state variable for the crtc, if all pending commit
- * requests are done, meaning no more bandwidth is needed, release
- * bandwidth request.
- */
-void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
-{
-	struct drm_crtc *tmp_crtc;
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *sde_cstate;
-	struct sde_kms *kms;
-	int i;
-
-	if (!crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	kms = _sde_crtc_get_kms(crtc);
-	if (!kms || !kms->catalog) {
-		SDE_ERROR("invalid kms\n");
-		return;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	sde_cstate = to_sde_crtc_state(crtc->state);
-
-	/* only do this for command mode rt client (non-rsc client) */
-	if ((sde_crtc_get_intf_mode(crtc) != INTF_MODE_CMD) &&
-		(sde_crtc_get_client_type(crtc) != RT_RSC_CLIENT))
-		return;
-
-	/*
-	 * If video interface present, cmd panel bandwidth cannot be
-	 * released.
-	 */
-	if (sde_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
-		drm_for_each_crtc(tmp_crtc, crtc->dev) {
-			if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
-				sde_crtc_get_intf_mode(tmp_crtc) ==
-						INTF_MODE_VIDEO)
-				return;
-		}
-
-	/* Release the bandwidth */
-	if (kms->perf.enable_bw_release) {
-		trace_sde_cmd_release_bw(crtc->base.id);
-		SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
-		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
-			sde_crtc->cur_perf.bw_ctl[i] = 0;
-			_sde_core_perf_crtc_update_bus(kms, crtc, i);
-		}
-	}
-}
-
-static u64 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms)
-{
-	u64 clk_rate = kms->perf.perf_tune.min_core_clk;
-	struct drm_crtc *tmp_crtc;
-	struct sde_crtc *sde_crtc;
-	u64 tmp_rate;
-
-	drm_for_each_crtc(tmp_crtc, kms->dev) {
-		if (_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
-
-			/* use current perf, which are the values voted */
-			sde_crtc = to_sde_crtc(tmp_crtc);
-			tmp_rate = sde_crtc->cur_perf.core_clk_rate;
-
-			clk_rate = max(tmp_rate, clk_rate);
-
-			clk_rate = clk_round_rate(kms->perf.core_clk, clk_rate);
-		}
-	}
-
-	if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED)
-		clk_rate = kms->perf.fix_core_clk_rate;
-
-	SDE_DEBUG("clk:%llu\n", clk_rate);
-
-	return clk_rate;
-}
-
-void sde_core_perf_crtc_update(struct drm_crtc *crtc,
-		int params_changed, bool stop_req)
-{
-	struct sde_core_perf_params *new, *old;
-	int update_bus = 0, update_clk = 0, update_llcc = 0;
-	u64 clk_rate = 0;
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *sde_cstate;
-	int ret, i;
-	struct msm_drm_private *priv;
-	struct sde_kms *kms;
-
-	if (!crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	kms = _sde_crtc_get_kms(crtc);
-	if (!kms || !kms->catalog) {
-		SDE_ERROR("invalid kms\n");
-		return;
-	}
-	priv = kms->dev->dev_private;
-	sde_crtc = to_sde_crtc(crtc);
-	sde_cstate = to_sde_crtc_state(crtc->state);
-
-	SDE_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
-			crtc->base.id, stop_req, kms->perf.core_clk_rate);
-
-	mutex_lock(&sde_core_perf_lock);
-
-	/*
-	 * cache the performance numbers in the crtc prior to the
-	 * crtc kickoff, so the same numbers are used during the
-	 * perf update that happens post kickoff.
-	 */
-	if (params_changed)
-		memcpy(&sde_crtc->new_perf, &sde_cstate->new_perf,
-			sizeof(struct sde_core_perf_params));
-
-	old = &sde_crtc->cur_perf;
-	new = &sde_crtc->new_perf;
-
-	if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
-
-		/*
-		 * cases for the llcc update.
-		 * 1. llcc is transitioning: 'inactive->active' during kickoff,
-		 *	for current request.
-		 * 2. llcc is transitioning: 'active->inactive'at the end of the
-		 *	commit or during stop
-		 */
-
-		if ((params_changed &&
-			 new->llcc_active && !old->llcc_active) ||
-		    (!params_changed &&
-			!new->llcc_active && old->llcc_active)) {
-
-			SDE_DEBUG("crtc=%d p=%d new_llcc=%d, old_llcc=%d\n",
-				crtc->base.id, params_changed,
-				new->llcc_active, old->llcc_active);
-
-			old->llcc_active = new->llcc_active;
-			update_llcc = 1;
-		}
-
-		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
-			/*
-			 * cases for bus bandwidth update.
-			 * 1. new bandwidth vote - "ab or ib vote" is higher
-			 *    than current vote for update request.
-			 * 2. new bandwidth vote - "ab or ib vote" is lower
-			 *    than current vote at end of commit or stop.
-			 */
-
-			if ((params_changed &&
-				(new->bw_ctl[i] > old->bw_ctl[i])) ||
-			    (!params_changed &&
-				(new->bw_ctl[i] < old->bw_ctl[i]))) {
-
-				SDE_DEBUG(
-					"crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
-					crtc->base.id, params_changed,
-					new->bw_ctl[i], old->bw_ctl[i]);
-				old->bw_ctl[i] = new->bw_ctl[i];
-				update_bus |= BIT(i);
-			}
-
-			if ((params_changed &&
-				(new->max_per_pipe_ib[i] >
-				 old->max_per_pipe_ib[i])) ||
-			    (!params_changed &&
-				(new->max_per_pipe_ib[i] <
-				old->max_per_pipe_ib[i]))) {
-
-				SDE_DEBUG(
-					"crtc=%d p=%d new_ib=%llu,old_ib=%llu\n",
-					crtc->base.id, params_changed,
-					new->max_per_pipe_ib[i],
-					old->max_per_pipe_ib[i]);
-				old->max_per_pipe_ib[i] =
-						new->max_per_pipe_ib[i];
-				update_bus |= BIT(i);
-			}
-
-			/* display rsc override during solver mode */
-			if (kms->perf.bw_vote_mode == DISP_RSC_MODE &&
-				get_sde_rsc_current_state(SDE_RSC_INDEX) !=
-						SDE_RSC_CLK_STATE) {
-				/* update new bandwidth in all cases */
-				if (params_changed && ((new->bw_ctl[i] !=
-						old->bw_ctl[i]) ||
-				      (new->max_per_pipe_ib[i] !=
-						old->max_per_pipe_ib[i]))) {
-					old->bw_ctl[i] = new->bw_ctl[i];
-					old->max_per_pipe_ib[i] =
-							new->max_per_pipe_ib[i];
-					update_bus |= BIT(i);
-				/*
-				 * reduce bw vote is not required in solver
-				 * mode
-				 */
-				} else if (!params_changed) {
-					update_bus &= ~BIT(i);
-				}
-			}
-		}
-
-		if ((params_changed &&
-				(new->core_clk_rate > old->core_clk_rate)) ||
-				(!params_changed &&
-				(new->core_clk_rate < old->core_clk_rate))) {
-			old->core_clk_rate = new->core_clk_rate;
-			update_clk = 1;
-		}
-	} else {
-		SDE_DEBUG("crtc=%d disable\n", crtc->base.id);
-		memset(old, 0, sizeof(*old));
-		memset(new, 0, sizeof(*new));
-		update_bus = ~0;
-		update_clk = 1;
-		update_llcc = 1;
-	}
-	trace_sde_perf_crtc_update(crtc->base.id,
-		new->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC],
-		new->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC],
-		new->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC],
-		new->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC],
-		new->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI],
-		new->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI],
-		new->core_clk_rate, stop_req,
-		update_bus, update_clk, params_changed);
-
-	if (update_llcc)
-		_sde_core_perf_crtc_update_llcc(kms, crtc);
-
-	for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
-		if (update_bus & BIT(i))
-			_sde_core_perf_crtc_update_bus(kms, crtc, i);
-	}
-
-	if (kms->perf.bw_vote_mode == DISP_RSC_MODE &&
-	    ((get_sde_rsc_current_state(SDE_RSC_INDEX) != SDE_RSC_CLK_STATE
-	      && params_changed) ||
-	    (get_sde_rsc_current_state(SDE_RSC_INDEX) == SDE_RSC_CLK_STATE
-	      && update_bus)))
-		sde_rsc_client_trigger_vote(sde_cstate->rsc_client,
-				update_bus ? true : false);
-
-	/*
-	 * Update the clock after bandwidth vote to ensure
-	 * bandwidth is available before clock rate is increased.
-	 */
-	if (update_clk) {
-		clk_rate = _sde_core_perf_get_core_clk_rate(kms);
-
-		SDE_EVT32(kms->dev, stop_req, clk_rate, params_changed,
-			old->core_clk_rate, new->core_clk_rate);
-		ret = sde_power_clk_set_rate(&priv->phandle,
-				kms->perf.clk_name, clk_rate);
-		if (ret) {
-			SDE_ERROR("failed to set %s clock rate %llu\n",
-					kms->perf.clk_name, clk_rate);
-			mutex_unlock(&sde_core_perf_lock);
-			return;
-		}
-
-		kms->perf.core_clk_rate = clk_rate;
-		SDE_DEBUG("update clk rate = %lld HZ\n", clk_rate);
-	}
-	mutex_unlock(&sde_core_perf_lock);
-
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-static ssize_t _sde_core_perf_threshold_high_write(struct file *file,
-		    const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	struct sde_core_perf *perf = file->private_data;
-	u32 threshold_high = 0;
-	char buf[10];
-
-	if (!perf)
-		return -ENODEV;
-
-	if (count >= sizeof(buf))
-		return -EFAULT;
-
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
-	buf[count] = 0;	/* end of string */
-
-	if (kstrtouint(buf, 0, &threshold_high))
-		return -EFAULT;
-
-	if (threshold_high < SDE_PERF_THRESHOLD_HIGH_MIN)
-		threshold_high = SDE_PERF_THRESHOLD_HIGH_MIN;
-
-	perf->catalog->perf.max_bw_high = threshold_high;
-
-	return count;
-}
-
-static ssize_t _sde_core_perf_threshold_high_read(struct file *file,
-			char __user *buff, size_t count, loff_t *ppos)
-{
-	struct sde_core_perf *perf = file->private_data;
-	int len = 0;
-	char buf[20] = {'\0'};
-
-	if (!perf)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;	/* the end */
-
-	len = snprintf(buf, sizeof(buf),
-			"%d\n", perf->catalog->perf.max_bw_high);
-
-	if (len < 0 || len >= sizeof(buf))
-		return 0;
-
-	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
-		return -EFAULT;
-
-	*ppos += len;   /* increase offset */
-
-	return len;
-}
-
-static ssize_t _sde_core_perf_mode_write(struct file *file,
-		    const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	struct sde_core_perf *perf = file->private_data;
-	struct sde_perf_cfg *cfg = &perf->catalog->perf;
-	u32 perf_mode = 0;
-	char buf[10];
-
-	if (!perf)
-		return -ENODEV;
-
-	if (count >= sizeof(buf))
-		return -EFAULT;
-
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
-	buf[count] = 0;	/* end of string */
-
-	if (kstrtouint(buf, 0, &perf_mode))
-		return -EFAULT;
-
-	if (perf_mode >= SDE_PERF_MODE_MAX)
-		return -EFAULT;
-
-	if (perf_mode == SDE_PERF_MODE_FIXED) {
-		DRM_INFO("fix performance mode\n");
-	} else if (perf_mode == SDE_PERF_MODE_MINIMUM) {
-		/* run the driver with max clk and BW vote */
-		perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
-		perf->perf_tune.min_bus_vote =
-				(u64) cfg->max_bw_high * 1000;
-		DRM_INFO("minimum performance mode\n");
-	} else if (perf_mode == SDE_PERF_MODE_NORMAL) {
-		/* reset the perf tune params to 0 */
-		perf->perf_tune.min_core_clk = 0;
-		perf->perf_tune.min_bus_vote = 0;
-		DRM_INFO("normal performance mode\n");
-	}
-	perf->perf_tune.mode = perf_mode;
-
-	return count;
-}
-
-static ssize_t _sde_core_perf_mode_read(struct file *file,
-			char __user *buff, size_t count, loff_t *ppos)
-{
-	struct sde_core_perf *perf = file->private_data;
-	int len = 0;
-	char buf[SDE_PERF_MODE_STRING_SIZE] = {'\0'};
-
-	if (!perf)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;	/* the end */
-
-	len = snprintf(buf, sizeof(buf),
-			"mode %d min_mdp_clk %llu min_bus_vote %llu\n",
-			perf->perf_tune.mode,
-			perf->perf_tune.min_core_clk,
-			perf->perf_tune.min_bus_vote);
-	if (len < 0 || len >= sizeof(buf))
-		return 0;
-
-	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
-		return -EFAULT;
-
-	*ppos += len;   /* increase offset */
-
-	return len;
-}
-
-static const struct file_operations sde_core_perf_threshold_high_fops = {
-	.open = simple_open,
-	.read = _sde_core_perf_threshold_high_read,
-	.write = _sde_core_perf_threshold_high_write,
-};
-
-static const struct file_operations sde_core_perf_mode_fops = {
-	.open = simple_open,
-	.read = _sde_core_perf_mode_read,
-	.write = _sde_core_perf_mode_write,
-};
-
-static void sde_core_perf_debugfs_destroy(struct sde_core_perf *perf)
-{
-	debugfs_remove_recursive(perf->debugfs_root);
-	perf->debugfs_root = NULL;
-}
-
-int sde_core_perf_debugfs_init(struct sde_core_perf *perf,
-		struct dentry *parent)
-{
-	struct sde_mdss_cfg *catalog = perf->catalog;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-
-	priv = perf->dev->dev_private;
-	if (!priv || !priv->kms) {
-		SDE_ERROR("invalid KMS reference\n");
-		return -EINVAL;
-	}
-
-	sde_kms = to_sde_kms(priv->kms);
-
-	perf->debugfs_root = debugfs_create_dir("core_perf", parent);
-	if (!perf->debugfs_root) {
-		SDE_ERROR("failed to create core perf debugfs\n");
-		return -EINVAL;
-	}
-
-	debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
-			&perf->max_core_clk_rate);
-	debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
-			&perf->core_clk_rate);
-	debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
-			(u32 *)&catalog->perf.max_bw_low);
-	debugfs_create_file("threshold_high", 0600, perf->debugfs_root,
-			(u32 *)perf, &sde_core_perf_threshold_high_fops);
-	debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
-			(u32 *)&catalog->perf.min_core_ib);
-	debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
-			(u32 *)&catalog->perf.min_llcc_ib);
-	debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
-			(u32 *)&catalog->perf.min_dram_ib);
-	debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
-			(u32 *)perf, &sde_core_perf_mode_fops);
-	debugfs_create_u32("bw_vote_mode", 0600, perf->debugfs_root,
-			&perf->bw_vote_mode);
-	debugfs_create_bool("bw_vote_mode_updated", 0600, perf->debugfs_root,
-			&perf->bw_vote_mode_updated);
-	debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
-			&perf->fix_core_clk_rate);
-	debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
-			&perf->fix_core_ib_vote);
-	debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
-			&perf->fix_core_ab_vote);
-
-	debugfs_create_u32("uidle_perf_cnt", 0600, perf->debugfs_root,
-			&sde_kms->catalog->uidle_cfg.debugfs_perf);
-	debugfs_create_bool("uidle_enable", 0600, perf->debugfs_root,
-			&sde_kms->catalog->uidle_cfg.debugfs_ctrl);
-
-	return 0;
-}
-#else
-static void sde_core_perf_debugfs_destroy(struct sde_core_perf *perf)
-{
-}
-
-int sde_core_perf_debugfs_init(struct sde_core_perf *perf,
-		struct dentry *parent)
-{
-	return 0;
-}
-#endif
-
-void sde_core_perf_destroy(struct sde_core_perf *perf)
-{
-	if (!perf) {
-		SDE_ERROR("invalid parameters\n");
-		return;
-	}
-
-	sde_core_perf_debugfs_destroy(perf);
-	perf->max_core_clk_rate = 0;
-	perf->core_clk = NULL;
-	perf->clk_name = NULL;
-	perf->phandle = NULL;
-	perf->catalog = NULL;
-	perf->dev = NULL;
-}
-
-int sde_core_perf_init(struct sde_core_perf *perf,
-		struct drm_device *dev,
-		struct sde_mdss_cfg *catalog,
-		struct sde_power_handle *phandle,
-		struct sde_power_client *pclient,
-		char *clk_name)
-{
-	if (!perf || !dev || !catalog || !phandle || !pclient || !clk_name) {
-		SDE_ERROR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	perf->dev = dev;
-	perf->catalog = catalog;
-	perf->phandle = phandle;
-	perf->pclient = pclient;
-	perf->clk_name = clk_name;
-	perf->sde_rsc_available = is_sde_rsc_available(SDE_RSC_INDEX);
-	/* set default mode */
-	if (perf->sde_rsc_available)
-		perf->bw_vote_mode = DISP_RSC_MODE;
-	else
-		perf->bw_vote_mode = APPS_RSC_MODE;
-
-	perf->core_clk = sde_power_clk_get_clk(phandle, clk_name);
-	if (!perf->core_clk) {
-		SDE_ERROR("invalid core clk\n");
-		goto err;
-	}
-
-	perf->max_core_clk_rate = sde_power_clk_get_max_rate(phandle, clk_name);
-	if (!perf->max_core_clk_rate) {
-		SDE_DEBUG("optional max core clk rate, use default\n");
-		perf->max_core_clk_rate = SDE_PERF_DEFAULT_MAX_CORE_CLK_RATE;
-	}
-
-	return 0;
-
-err:
-	sde_core_perf_destroy(perf);
-	return -ENODEV;
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.h b/drivers/gpu/drm/msm/sde/sde_core_perf.h
deleted file mode 100644
index 4f2dd04..0000000
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_CORE_PERF_H_
-#define _SDE_CORE_PERF_H_
-
-#include <linux/types.h>
-#include <linux/dcache.h>
-#include <linux/mutex.h>
-#include <drm/drm_crtc.h>
-
-#include "sde_hw_catalog.h"
-#include "sde_power_handle.h"
-
-#define	SDE_PERF_DEFAULT_MAX_CORE_CLK_RATE	320000000
-
-/**
- *  uidle performance counters mode
- * @SDE_PERF_UIDLE_DISABLE: Disable logging (default)
- * @SDE_PERF_UIDLE_CNT: Enable logging of uidle performance counters
- * @SDE_PERF_UIDLE_STATUS: Enable logging of uidle status
- * @SDE_PERF_UIDLE_MAX: Max available mode
- */
-#define SDE_PERF_UIDLE_DISABLE 0x0
-#define SDE_PERF_UIDLE_CNT BIT(0)
-#define SDE_PERF_UIDLE_STATUS BIT(1)
-#define SDE_PERF_UIDLE_MAX BIT(2)
-
-/**
- * struct sde_core_perf_params - definition of performance parameters
- * @max_per_pipe_ib: maximum instantaneous bandwidth request
- * @bw_ctl: arbitrated bandwidth request
- * @core_clk_rate: core clock rate request
- * @llcc_active: request to activate/deactivate the llcc
- */
-struct sde_core_perf_params {
-	u64 max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MAX];
-	u64 bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MAX];
-	u64 core_clk_rate;
-	bool llcc_active;
-};
-
-/**
- * struct sde_core_perf_tune - definition of performance tuning control
- * @mode: performance mode
- * @min_core_clk: minimum core clock
- * @min_bus_vote: minimum bus vote
- */
-struct sde_core_perf_tune {
-	u32 mode;
-	u64 min_core_clk;
-	u64 min_bus_vote;
-};
-
-/**
- * struct sde_core_perf - definition of core performance context
- * @dev: Pointer to drm device
- * @debugfs_root: top level debug folder
- * @catalog: Pointer to catalog configuration
- * @phandle: Pointer to power handler
- * @pclient: Pointer to power client
- * @clk_name: core clock name
- * @core_clk: Pointer to core clock structure
- * @core_clk_rate: current core clock rate
- * @max_core_clk_rate: maximum allowable core clock rate
- * @perf_tune: debug control for performance tuning
- * @enable_bw_release: debug control for bandwidth release
- * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
- * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
- * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
- * @bw_vote_mode: apps rsc vs display rsc bandwidth vote mode
- * @sde_rsc_available: is display rsc available
- * @bw_vote_mode_updated: bandwidth vote mode update
- * @llcc_active: status of the llcc, true if active.
- * @uidle_enabled: indicates if uidle is already enabled
- */
-struct sde_core_perf {
-	struct drm_device *dev;
-	struct dentry *debugfs_root;
-	struct sde_mdss_cfg *catalog;
-	struct sde_power_handle *phandle;
-	struct sde_power_client *pclient;
-	char *clk_name;
-	struct clk *core_clk;
-	u64 core_clk_rate;
-	u64 max_core_clk_rate;
-	struct sde_core_perf_tune perf_tune;
-	u32 enable_bw_release;
-	u64 fix_core_clk_rate;
-	u64 fix_core_ib_vote;
-	u64 fix_core_ab_vote;
-	u32 bw_vote_mode;
-	bool sde_rsc_available;
-	bool bw_vote_mode_updated;
-	bool llcc_active;
-	bool uidle_enabled;
-};
-
-/**
- * sde_core_perf_crtc_check - validate performance of the given crtc state
- * @crtc: Pointer to crtc
- * @state: Pointer to new crtc state
- * return: zero if success, or error code otherwise
- */
-int sde_core_perf_crtc_check(struct drm_crtc *crtc,
-		struct drm_crtc_state *state);
-
-/**
- * sde_core_perf_crtc_update - update performance of the given crtc
- * @crtc: Pointer to crtc
- * @params_changed: true if crtc parameters are modified
- * @stop_req: true if this is a stop request
- */
-void sde_core_perf_crtc_update(struct drm_crtc *crtc,
-		int params_changed, bool stop_req);
-
-/**
- * sde_core_perf_crtc_release_bw - release bandwidth of the given crtc
- * @crtc: Pointer to crtc
- */
-void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc);
-
-/**
- * sde_core_perf_crtc_update_uidle - attempts to enable uidle of the given crtc
- * @crtc: Pointer to crtc
- * @enable: enable/disable uidle
- */
-void sde_core_perf_crtc_update_uidle(struct drm_crtc *crtc, bool enable);
-
-/**
- * sde_core_perf_destroy - destroy the given core performance context
- * @perf: Pointer to core performance context
- */
-void sde_core_perf_destroy(struct sde_core_perf *perf);
-
-/**
- * sde_core_perf_init - initialize the given core performance context
- * @perf: Pointer to core performance context
- * @dev: Pointer to drm device
- * @catalog: Pointer to catalog
- * @phandle: Pointer to power handle
- * @pclient: Pointer to power client
- * @clk_name: core clock name
- */
-int sde_core_perf_init(struct sde_core_perf *perf,
-		struct drm_device *dev,
-		struct sde_mdss_cfg *catalog,
-		struct sde_power_handle *phandle,
-		struct sde_power_client *pclient,
-		char *clk_name);
-
-/**
- * sde_core_perf_debugfs_init - initialize debugfs for core performance context
- * @perf: Pointer to core performance context
- * @debugfs_parent: Pointer to parent debugfs
- */
-int sde_core_perf_debugfs_init(struct sde_core_perf *perf,
-		struct dentry *parent);
-
-#endif /* _SDE_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
deleted file mode 100644
index 21e6698..0000000
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ /dev/null
@@ -1,6316 +0,0 @@
-/*
- * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-#include <linux/sort.h>
-#include <linux/debugfs.h>
-#include <linux/ktime.h>
-#include <uapi/drm/sde_drm.h>
-#include <drm/drm_mode.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_flip_work.h>
-#include <linux/clk/qcom.h>
-
-#include "sde_kms.h"
-#include "sde_hw_lm.h"
-#include "sde_hw_ctl.h"
-#include "sde_crtc.h"
-#include "sde_plane.h"
-#include "sde_hw_util.h"
-#include "sde_hw_catalog.h"
-#include "sde_color_processing.h"
-#include "sde_encoder.h"
-#include "sde_connector.h"
-#include "sde_vbif.h"
-#include "sde_power_handle.h"
-#include "sde_core_perf.h"
-#include "sde_trace.h"
-
-#define SDE_PSTATES_MAX (SDE_STAGE_MAX * 4)
-#define SDE_MULTIRECT_PLANE_MAX (SDE_STAGE_MAX * 2)
-
-struct sde_crtc_custom_events {
-	u32 event;
-	int (*func)(struct drm_crtc *crtc, bool en,
-			struct sde_irq_callback *irq);
-};
-
-static int sde_crtc_power_interrupt_handler(struct drm_crtc *crtc_drm,
-	bool en, struct sde_irq_callback *ad_irq);
-static int sde_crtc_idle_interrupt_handler(struct drm_crtc *crtc_drm,
-	bool en, struct sde_irq_callback *idle_irq);
-static int sde_crtc_pm_event_handler(struct drm_crtc *crtc, bool en,
-		struct sde_irq_callback *noirq);
-
-static struct sde_crtc_custom_events custom_events[] = {
-	{DRM_EVENT_AD_BACKLIGHT, sde_cp_ad_interrupt},
-	{DRM_EVENT_CRTC_POWER, sde_crtc_power_interrupt_handler},
-	{DRM_EVENT_IDLE_NOTIFY, sde_crtc_idle_interrupt_handler},
-	{DRM_EVENT_HISTOGRAM, sde_cp_hist_interrupt},
-	{DRM_EVENT_SDE_POWER, sde_crtc_pm_event_handler},
-	{DRM_EVENT_LTM_HIST, sde_cp_ltm_hist_interrupt},
-	{DRM_EVENT_LTM_WB_PB, sde_cp_ltm_wb_pb_interrupt},
-};
-
-/* default input fence timeout, in ms */
-#define SDE_CRTC_INPUT_FENCE_TIMEOUT    10000
-
-/*
- * The default input fence timeout is 2 seconds while max allowed
- * range is 10 seconds. Any value above 10 seconds adds glitches beyond
- * tolerance limit.
- */
-#define SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT 10000
-
-/* layer mixer index on sde_crtc */
-#define LEFT_MIXER 0
-#define RIGHT_MIXER 1
-
-#define MISR_BUFF_SIZE			256
-
-/*
- * Time period for fps calculation in micro seconds.
- * Default value is set to 1 sec.
- */
-#define DEFAULT_FPS_PERIOD_1_SEC	1000000
-#define MAX_FPS_PERIOD_5_SECONDS	5000000
-#define MAX_FRAME_COUNT			1000
-#define MILI_TO_MICRO			1000
-
-static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
-{
-	struct msm_drm_private *priv;
-
-	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
-		SDE_ERROR("invalid crtc\n");
-		return NULL;
-	}
-	priv = crtc->dev->dev_private;
-	if (!priv || !priv->kms) {
-		SDE_ERROR("invalid kms\n");
-		return NULL;
-	}
-
-	return to_sde_kms(priv->kms);
-}
-
-static inline struct drm_encoder *_sde_crtc_get_encoder(struct drm_crtc *crtc)
-{
-	struct drm_encoder *enc;
-
-	drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
-		return enc;
-
-	return NULL;
-}
-
-static inline int _sde_crtc_power_enable(struct sde_crtc *sde_crtc, bool enable)
-{
-	struct drm_crtc *crtc;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-
-	if (!sde_crtc) {
-		SDE_ERROR("invalid sde crtc\n");
-		return -EINVAL;
-	}
-
-	crtc = &sde_crtc->base;
-	if (!crtc->dev || !crtc->dev->dev_private) {
-		SDE_ERROR("invalid drm device\n");
-		return -EINVAL;
-	}
-
-	priv = crtc->dev->dev_private;
-	if (!priv->kms) {
-		SDE_ERROR("invalid kms\n");
-		return -EINVAL;
-	}
-
-	sde_kms = to_sde_kms(priv->kms);
-
-	return sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
-									enable);
-}
-
-/**
- * sde_crtc_calc_fps() - Calculates fps value.
- * @sde_crtc   : CRTC structure
- *
- * This function is called at frame done. It counts the number
- * of frames done for every 1 sec. Stores the value in measured_fps.
- * measured_fps value is 10 times the calculated fps value.
- * For example, measured_fps= 594 for calculated fps of 59.4
- */
-static void sde_crtc_calc_fps(struct sde_crtc *sde_crtc)
-{
-	ktime_t current_time_us;
-	u64 fps, diff_us;
-
-	current_time_us = ktime_get();
-	diff_us = (u64)ktime_us_delta(current_time_us,
-			sde_crtc->fps_info.last_sampled_time_us);
-	sde_crtc->fps_info.frame_count++;
-
-	if (diff_us >= DEFAULT_FPS_PERIOD_1_SEC) {
-
-		 /* Multiplying with 10 to get fps in floating point */
-		fps = ((u64)sde_crtc->fps_info.frame_count)
-						* DEFAULT_FPS_PERIOD_1_SEC * 10;
-		do_div(fps, diff_us);
-		sde_crtc->fps_info.measured_fps = (unsigned int)fps;
-		SDE_DEBUG(" FPS for crtc%d is %d.%d\n",
-				sde_crtc->base.base.id, (unsigned int)fps/10,
-				(unsigned int)fps%10);
-		sde_crtc->fps_info.last_sampled_time_us = current_time_us;
-		sde_crtc->fps_info.frame_count = 0;
-	}
-
-	if (!sde_crtc->fps_info.time_buf)
-		return;
-
-	/**
-	 * Array indexing is based on sliding window algorithm.
-	 * sde_crtc->time_buf has a maximum capacity of MAX_FRAME_COUNT
-	 * time slots. As the count increases to MAX_FRAME_COUNT + 1, the
-	 * counter loops around and comes back to the first index to store
-	 * the next ktime.
-	 */
-	sde_crtc->fps_info.time_buf[sde_crtc->fps_info.next_time_index++] =
-								ktime_get();
-	sde_crtc->fps_info.next_time_index %= MAX_FRAME_COUNT;
-}
-
-static void _sde_crtc_deinit_events(struct sde_crtc *sde_crtc)
-{
-	if (!sde_crtc)
-		return;
-}
-
-static int _sde_debugfs_fps_status_show(struct seq_file *s, void *data)
-{
-	struct sde_crtc *sde_crtc;
-	u64 fps_int, fps_float;
-	ktime_t current_time_us;
-	u64 fps, diff_us;
-
-	if (!s || !s->private) {
-		SDE_ERROR("invalid input param(s)\n");
-		return -EAGAIN;
-	}
-
-	sde_crtc = s->private;
-
-	current_time_us = ktime_get();
-	diff_us = (u64)ktime_us_delta(current_time_us,
-			sde_crtc->fps_info.last_sampled_time_us);
-
-	if (diff_us >= DEFAULT_FPS_PERIOD_1_SEC) {
-
-		 /* Multiplying with 10 to get fps in floating point */
-		fps = ((u64)sde_crtc->fps_info.frame_count)
-						* DEFAULT_FPS_PERIOD_1_SEC * 10;
-		do_div(fps, diff_us);
-		sde_crtc->fps_info.measured_fps = (unsigned int)fps;
-		sde_crtc->fps_info.last_sampled_time_us = current_time_us;
-		sde_crtc->fps_info.frame_count = 0;
-		SDE_DEBUG("Measured FPS for crtc%d is %d.%d\n",
-				sde_crtc->base.base.id, (unsigned int)fps/10,
-				(unsigned int)fps%10);
-	}
-
-	fps_int = (unsigned int) sde_crtc->fps_info.measured_fps;
-	fps_float = do_div(fps_int, 10);
-
-	seq_printf(s, "fps: %llu.%llu\n", fps_int, fps_float);
-
-	return 0;
-}
-
-
-static int _sde_debugfs_fps_status(struct inode *inode, struct file *file)
-{
-	return single_open(file, _sde_debugfs_fps_status_show,
-			inode->i_private);
-}
-
-static ssize_t fps_periodicity_ms_store(struct device *device,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct drm_crtc *crtc;
-	struct sde_crtc *sde_crtc;
-	int res;
-
-	/* Base of the input */
-	int cnt = 10;
-
-	if (!device || !buf) {
-		SDE_ERROR("invalid input param(s)\n");
-		return -EAGAIN;
-	}
-
-	crtc = dev_get_drvdata(device);
-	if (!crtc)
-		return -EINVAL;
-
-	sde_crtc = to_sde_crtc(crtc);
-
-	res = kstrtou32(buf, cnt, &sde_crtc->fps_info.fps_periodic_duration);
-	if (res < 0)
-		return res;
-
-	if (sde_crtc->fps_info.fps_periodic_duration <= 0)
-		sde_crtc->fps_info.fps_periodic_duration =
-						DEFAULT_FPS_PERIOD_1_SEC;
-	else if ((sde_crtc->fps_info.fps_periodic_duration) * MILI_TO_MICRO >
-						MAX_FPS_PERIOD_5_SECONDS)
-		sde_crtc->fps_info.fps_periodic_duration =
-						MAX_FPS_PERIOD_5_SECONDS;
-	else
-		sde_crtc->fps_info.fps_periodic_duration *= MILI_TO_MICRO;
-
-	return count;
-}
-
-static ssize_t fps_periodicity_ms_show(struct device *device,
-		struct device_attribute *attr, char *buf)
-{
-	struct drm_crtc *crtc;
-	struct sde_crtc *sde_crtc;
-
-	if (!device || !buf) {
-		SDE_ERROR("invalid input param(s)\n");
-		return -EAGAIN;
-	}
-
-	crtc = dev_get_drvdata(device);
-	if (!crtc)
-		return -EINVAL;
-
-	sde_crtc = to_sde_crtc(crtc);
-
-	return scnprintf(buf, PAGE_SIZE, "%d\n",
-		(sde_crtc->fps_info.fps_periodic_duration)/MILI_TO_MICRO);
-}
-
-static ssize_t measured_fps_show(struct device *device,
-		struct device_attribute *attr, char *buf)
-{
-	struct drm_crtc *crtc;
-	struct sde_crtc *sde_crtc;
-	unsigned int fps_int, fps_decimal;
-	u64 fps = 0, frame_count = 1;
-	ktime_t current_time;
-	int i = 0, current_time_index;
-	u64 diff_us;
-
-	if (!device || !buf) {
-		SDE_ERROR("invalid input param(s)\n");
-		return -EAGAIN;
-	}
-
-	crtc = dev_get_drvdata(device);
-	if (!crtc) {
-		scnprintf(buf, PAGE_SIZE, "fps information not available");
-		return -EINVAL;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-
-	if (!sde_crtc->fps_info.time_buf) {
-		scnprintf(buf, PAGE_SIZE,
-				"timebuf null - fps information not available");
-		return -EINVAL;
-	}
-
-	/**
-	 * Whenever the time_index counter comes to zero upon decrementing,
-	 * it is set to the last index since it is the next index that we
-	 * should check for calculating the buftime.
-	 */
-	current_time_index = (sde_crtc->fps_info.next_time_index == 0) ?
-		MAX_FRAME_COUNT - 1 : (sde_crtc->fps_info.next_time_index - 1);
-
-	current_time = ktime_get();
-
-	for (i = 0; i < MAX_FRAME_COUNT; i++) {
-		u64 ptime = (u64)ktime_to_us(current_time);
-		u64 buftime = (u64)ktime_to_us(
-			sde_crtc->fps_info.time_buf[current_time_index]);
-		diff_us = (u64)ktime_us_delta(current_time,
-			sde_crtc->fps_info.time_buf[current_time_index]);
-		if (ptime > buftime && diff_us >= (u64)
-				sde_crtc->fps_info.fps_periodic_duration) {
-
-			/* Multiplying with 10 to get fps in floating point */
-			fps = frame_count * DEFAULT_FPS_PERIOD_1_SEC * 10;
-			do_div(fps, diff_us);
-			sde_crtc->fps_info.measured_fps = (unsigned int)fps;
-			SDE_DEBUG("measured fps: %d\n",
-					sde_crtc->fps_info.measured_fps);
-			break;
-		}
-
-		current_time_index = (current_time_index == 0) ?
-			(MAX_FRAME_COUNT - 1) : (current_time_index - 1);
-		SDE_DEBUG("current time index: %d\n", current_time_index);
-
-		frame_count++;
-	}
-
-	if (i == MAX_FRAME_COUNT) {
-
-		current_time_index = (sde_crtc->fps_info.next_time_index == 0) ?
-		MAX_FRAME_COUNT - 1 : (sde_crtc->fps_info.next_time_index - 1);
-
-		diff_us = (u64)ktime_us_delta(current_time,
-			sde_crtc->fps_info.time_buf[current_time_index]);
-
-		if (diff_us >= sde_crtc->fps_info.fps_periodic_duration) {
-
-			/* Multiplying with 10 to get fps in floating point */
-			fps = (frame_count) * DEFAULT_FPS_PERIOD_1_SEC * 10;
-			do_div(fps, diff_us);
-			sde_crtc->fps_info.measured_fps = (unsigned int)fps;
-		}
-	}
-
-	fps_int = (unsigned int) sde_crtc->fps_info.measured_fps;
-	fps_decimal = do_div(fps_int, 10);
-	return scnprintf(buf, PAGE_SIZE,
-		"fps: %d.%d duration:%d frame_count:%lld", fps_int, fps_decimal,
-			sde_crtc->fps_info.fps_periodic_duration, frame_count);
-}
-
-static ssize_t vsync_event_show(struct device *device,
-	struct device_attribute *attr, char *buf)
-{
-	struct drm_crtc *crtc;
-	struct sde_crtc *sde_crtc;
-
-	if (!device || !buf) {
-		SDE_ERROR("invalid input param(s)\n");
-		return -EAGAIN;
-	}
-
-	crtc = dev_get_drvdata(device);
-	sde_crtc = to_sde_crtc(crtc);
-	return scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n",
-			ktime_to_ns(sde_crtc->vblank_last_cb_time));
-}
-
-static DEVICE_ATTR_RO(vsync_event);
-static DEVICE_ATTR_RO(measured_fps);
-static DEVICE_ATTR_RW(fps_periodicity_ms);
-
-static struct attribute *sde_crtc_dev_attrs[] = {
-	&dev_attr_vsync_event.attr,
-	&dev_attr_measured_fps.attr,
-	&dev_attr_fps_periodicity_ms.attr,
-	NULL
-};
-
-static const struct attribute_group sde_crtc_attr_group = {
-	.attrs = sde_crtc_dev_attrs,
-};
-
-static const struct attribute_group *sde_crtc_attr_groups[] = {
-	&sde_crtc_attr_group,
-	NULL,
-};
-
-static void sde_crtc_destroy(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-
-	SDE_DEBUG("\n");
-
-	if (!crtc)
-		return;
-
-	if (sde_crtc->vsync_event_sf)
-		sysfs_put(sde_crtc->vsync_event_sf);
-	if (sde_crtc->sysfs_dev)
-		device_unregister(sde_crtc->sysfs_dev);
-
-	if (sde_crtc->blob_info)
-		drm_property_blob_put(sde_crtc->blob_info);
-	msm_property_destroy(&sde_crtc->property_info);
-	sde_cp_crtc_destroy_properties(crtc);
-
-	sde_fence_deinit(sde_crtc->output_fence);
-	_sde_crtc_deinit_events(sde_crtc);
-
-	drm_crtc_cleanup(crtc);
-	mutex_destroy(&sde_crtc->crtc_lock);
-	kfree(sde_crtc);
-}
-
-static bool sde_crtc_mode_fixup(struct drm_crtc *crtc,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	SDE_DEBUG("\n");
-
-	if ((msm_is_mode_seamless(adjusted_mode) ||
-			msm_is_mode_seamless_vrr(adjusted_mode)) &&
-		(!crtc->enabled)) {
-		SDE_ERROR("crtc state prevents seamless transition\n");
-		return false;
-	}
-
-	return true;
-}
-
-static void _sde_crtc_setup_blend_cfg(struct sde_crtc_mixer *mixer,
-	struct sde_plane_state *pstate, struct sde_format *format)
-{
-	uint32_t blend_op, fg_alpha, bg_alpha;
-	uint32_t blend_type;
-	struct sde_hw_mixer *lm = mixer->hw_lm;
-
-	/* default to opaque blending */
-	fg_alpha = sde_plane_get_property(pstate, PLANE_PROP_ALPHA);
-	bg_alpha = 0xFF - fg_alpha;
-	blend_op = SDE_BLEND_FG_ALPHA_FG_CONST | SDE_BLEND_BG_ALPHA_BG_CONST;
-	blend_type = sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP);
-
-	SDE_DEBUG("blend type:0x%x blend alpha:0x%x\n", blend_type, fg_alpha);
-
-	switch (blend_type) {
-
-	case SDE_DRM_BLEND_OP_OPAQUE:
-		blend_op = SDE_BLEND_FG_ALPHA_FG_CONST |
-			SDE_BLEND_BG_ALPHA_BG_CONST;
-		break;
-
-	case SDE_DRM_BLEND_OP_PREMULTIPLIED:
-		if (format->alpha_enable) {
-			blend_op = SDE_BLEND_FG_ALPHA_FG_CONST |
-				SDE_BLEND_BG_ALPHA_FG_PIXEL;
-			if (fg_alpha != 0xff) {
-				bg_alpha = fg_alpha;
-				blend_op |= SDE_BLEND_BG_MOD_ALPHA |
-					SDE_BLEND_BG_INV_MOD_ALPHA;
-			} else {
-				blend_op |= SDE_BLEND_BG_INV_ALPHA;
-			}
-		}
-		break;
-
-	case SDE_DRM_BLEND_OP_COVERAGE:
-		if (format->alpha_enable) {
-			blend_op = SDE_BLEND_FG_ALPHA_FG_PIXEL |
-				SDE_BLEND_BG_ALPHA_FG_PIXEL;
-			if (fg_alpha != 0xff) {
-				bg_alpha = fg_alpha;
-				blend_op |= SDE_BLEND_FG_MOD_ALPHA |
-					SDE_BLEND_BG_MOD_ALPHA |
-					SDE_BLEND_BG_INV_MOD_ALPHA;
-			} else {
-				blend_op |= SDE_BLEND_BG_INV_ALPHA;
-			}
-		}
-		break;
-	default:
-		/* do nothing */
-		break;
-	}
-
-	lm->ops.setup_blend_config(lm, pstate->stage, fg_alpha,
-						bg_alpha, blend_op);
-	SDE_DEBUG(
-		"format: %4.4s, alpha_enable %u fg alpha:0x%x bg alpha:0x%x blend_op:0x%x\n",
-		(char *) &format->base.pixel_format,
-		format->alpha_enable, fg_alpha, bg_alpha, blend_op);
-}
-
-static void _sde_crtc_setup_dim_layer_cfg(struct drm_crtc *crtc,
-		struct sde_crtc *sde_crtc, struct sde_crtc_mixer *mixer,
-		struct sde_hw_dim_layer *dim_layer)
-{
-	struct sde_crtc_state *cstate;
-	struct sde_hw_mixer *lm;
-	struct sde_hw_dim_layer split_dim_layer;
-	int i;
-
-	if (!dim_layer->rect.w || !dim_layer->rect.h) {
-		SDE_DEBUG("empty dim_layer\n");
-		return;
-	}
-
-	cstate = to_sde_crtc_state(crtc->state);
-
-	SDE_DEBUG("dim_layer - flags:%d, stage:%d\n",
-			dim_layer->flags, dim_layer->stage);
-
-	split_dim_layer.stage = dim_layer->stage;
-	split_dim_layer.color_fill = dim_layer->color_fill;
-
-	/*
-	 * traverse through the layer mixers attached to crtc and find the
-	 * intersecting dim layer rect in each LM and program accordingly.
-	 */
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
-		split_dim_layer.flags = dim_layer->flags;
-
-		sde_kms_rect_intersect(&cstate->lm_roi[i], &dim_layer->rect,
-					&split_dim_layer.rect);
-		if (sde_kms_rect_is_null(&split_dim_layer.rect)) {
-			/*
-			 * no extra programming required for non-intersecting
-			 * layer mixers with INCLUSIVE dim layer
-			 */
-			if (split_dim_layer.flags & SDE_DRM_DIM_LAYER_INCLUSIVE)
-				continue;
-
-			/*
-			 * program the other non-intersecting layer mixers with
-			 * INCLUSIVE dim layer of full size for uniformity
-			 * with EXCLUSIVE dim layer config.
-			 */
-			split_dim_layer.flags &= ~SDE_DRM_DIM_LAYER_EXCLUSIVE;
-			split_dim_layer.flags |= SDE_DRM_DIM_LAYER_INCLUSIVE;
-			memcpy(&split_dim_layer.rect, &cstate->lm_bounds[i],
-					sizeof(split_dim_layer.rect));
-
-		} else {
-			split_dim_layer.rect.x =
-					split_dim_layer.rect.x -
-						cstate->lm_roi[i].x;
-			split_dim_layer.rect.y =
-					split_dim_layer.rect.y -
-						cstate->lm_roi[i].y;
-		}
-
-		SDE_EVT32_VERBOSE(DRMID(crtc),
-				cstate->lm_roi[i].x,
-				cstate->lm_roi[i].y,
-				cstate->lm_roi[i].w,
-				cstate->lm_roi[i].h,
-				dim_layer->rect.x,
-				dim_layer->rect.y,
-				dim_layer->rect.w,
-				dim_layer->rect.h,
-				split_dim_layer.rect.x,
-				split_dim_layer.rect.y,
-				split_dim_layer.rect.w,
-				split_dim_layer.rect.h);
-
-		SDE_DEBUG("split_dim_layer - LM:%d, rect:{%d,%d,%d,%d}}\n",
-			i, split_dim_layer.rect.x, split_dim_layer.rect.y,
-			split_dim_layer.rect.w, split_dim_layer.rect.h);
-
-		lm = mixer[i].hw_lm;
-		mixer[i].mixer_op_mode |= 1 << split_dim_layer.stage;
-		lm->ops.setup_dim_layer(lm, &split_dim_layer);
-	}
-}
-
-void sde_crtc_get_crtc_roi(struct drm_crtc_state *state,
-		const struct sde_rect **crtc_roi)
-{
-	struct sde_crtc_state *crtc_state;
-
-	if (!state || !crtc_roi)
-		return;
-
-	crtc_state = to_sde_crtc_state(state);
-	*crtc_roi = &crtc_state->crtc_roi;
-}
-
-bool sde_crtc_is_crtc_roi_dirty(struct drm_crtc_state *state)
-{
-	struct sde_crtc_state *cstate;
-	struct sde_crtc *sde_crtc;
-
-	if (!state || !state->crtc)
-		return false;
-
-	sde_crtc = to_sde_crtc(state->crtc);
-	cstate = to_sde_crtc_state(state);
-
-	return msm_property_is_dirty(&sde_crtc->property_info,
-			&cstate->property_state, CRTC_PROP_ROI_V1);
-}
-
-static int _sde_crtc_set_roi_v1(struct drm_crtc_state *state,
-		void __user *usr_ptr)
-{
-	struct drm_crtc *crtc;
-	struct sde_crtc_state *cstate;
-	struct sde_drm_roi_v1 roi_v1;
-	int i;
-
-	if (!state) {
-		SDE_ERROR("invalid args\n");
-		return -EINVAL;
-	}
-
-	cstate = to_sde_crtc_state(state);
-	crtc = cstate->base.crtc;
-
-	memset(&cstate->user_roi_list, 0, sizeof(cstate->user_roi_list));
-
-	if (!usr_ptr) {
-		SDE_DEBUG("crtc%d: rois cleared\n", DRMID(crtc));
-		return 0;
-	}
-
-	if (copy_from_user(&roi_v1, usr_ptr, sizeof(roi_v1))) {
-		SDE_ERROR("crtc%d: failed to copy roi_v1 data\n", DRMID(crtc));
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("crtc%d: num_rects %d\n", DRMID(crtc), roi_v1.num_rects);
-
-	if (roi_v1.num_rects == 0) {
-		SDE_DEBUG("crtc%d: rois cleared\n", DRMID(crtc));
-		return 0;
-	}
-
-	if (roi_v1.num_rects > SDE_MAX_ROI_V1) {
-		SDE_ERROR("crtc%d: too many rects specified: %d\n", DRMID(crtc),
-				roi_v1.num_rects);
-		return -EINVAL;
-	}
-
-	cstate->user_roi_list.num_rects = roi_v1.num_rects;
-	for (i = 0; i < roi_v1.num_rects; ++i) {
-		cstate->user_roi_list.roi[i] = roi_v1.roi[i];
-		SDE_DEBUG("crtc%d: roi%d: roi (%d,%d) (%d,%d)\n",
-				DRMID(crtc), i,
-				cstate->user_roi_list.roi[i].x1,
-				cstate->user_roi_list.roi[i].y1,
-				cstate->user_roi_list.roi[i].x2,
-				cstate->user_roi_list.roi[i].y2);
-		SDE_EVT32_VERBOSE(DRMID(crtc),
-				cstate->user_roi_list.roi[i].x1,
-				cstate->user_roi_list.roi[i].y1,
-				cstate->user_roi_list.roi[i].x2,
-				cstate->user_roi_list.roi[i].y2);
-	}
-
-	return 0;
-}
-
-static bool _sde_crtc_setup_is_3dmux_dsc(struct drm_crtc_state *state)
-{
-	int i;
-	struct sde_crtc_state *cstate;
-	bool is_3dmux_dsc = false;
-
-	cstate = to_sde_crtc_state(state);
-
-	for (i = 0; i < cstate->num_connectors; i++) {
-		struct drm_connector *conn = cstate->connectors[i];
-
-		if (sde_connector_get_topology_name(conn) ==
-				SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC)
-			is_3dmux_dsc = true;
-	}
-
-	return is_3dmux_dsc;
-}
-
-static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc,
-		struct drm_crtc_state *state)
-{
-	struct drm_connector *conn;
-	struct drm_connector_state *conn_state;
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *crtc_state;
-	struct sde_rect *crtc_roi;
-	struct msm_mode_info mode_info;
-	int i = 0;
-	int rc;
-	bool is_crtc_roi_dirty;
-	bool is_any_conn_roi_dirty;
-
-	if (!crtc || !state)
-		return -EINVAL;
-
-	sde_crtc = to_sde_crtc(crtc);
-	crtc_state = to_sde_crtc_state(state);
-	crtc_roi = &crtc_state->crtc_roi;
-
-	is_crtc_roi_dirty = sde_crtc_is_crtc_roi_dirty(state);
-	is_any_conn_roi_dirty = false;
-
-	for_each_new_connector_in_state(state->state, conn, conn_state, i) {
-		struct sde_connector *sde_conn;
-		struct sde_connector_state *sde_conn_state;
-		struct sde_rect conn_roi;
-
-		if (!conn_state || conn_state->crtc != crtc)
-			continue;
-
-		rc = sde_connector_get_mode_info(conn_state, &mode_info);
-		if (rc) {
-			SDE_ERROR("failed to get mode info\n");
-			return -EINVAL;
-		}
-
-		if (!mode_info.roi_caps.enabled)
-			continue;
-
-		sde_conn = to_sde_connector(conn_state->connector);
-		sde_conn_state = to_sde_connector_state(conn_state);
-
-		is_any_conn_roi_dirty = is_any_conn_roi_dirty ||
-				msm_property_is_dirty(
-						&sde_conn->property_info,
-						&sde_conn_state->property_state,
-						CONNECTOR_PROP_ROI_V1);
-
-		/*
-		 * current driver only supports same connector and crtc size,
-		 * but if support for different sizes is added, driver needs
-		 * to check the connector roi here to make sure is full screen
-		 * for dsc 3d-mux topology that doesn't support partial update.
-		 */
-		if (memcmp(&sde_conn_state->rois, &crtc_state->user_roi_list,
-				sizeof(crtc_state->user_roi_list))) {
-			SDE_ERROR("%s: crtc -> conn roi scaling unsupported\n",
-					sde_crtc->name);
-			return -EINVAL;
-		}
-
-		sde_kms_rect_merge_rectangles(&sde_conn_state->rois, &conn_roi);
-		SDE_EVT32_VERBOSE(DRMID(crtc), DRMID(conn),
-				conn_roi.x, conn_roi.y,
-				conn_roi.w, conn_roi.h);
-	}
-
-	/*
-	 * Check against CRTC ROI and Connector ROI not being updated together.
-	 * This restriction should be relaxed when Connector ROI scaling is
-	 * supported.
-	 */
-	if (is_any_conn_roi_dirty != is_crtc_roi_dirty) {
-		SDE_ERROR("connector/crtc rois not updated together\n");
-		return -EINVAL;
-	}
-
-	sde_kms_rect_merge_rectangles(&crtc_state->user_roi_list, crtc_roi);
-
-	/* clear the ROI to null if it matches full screen anyways */
-	if (crtc_roi->x == 0 && crtc_roi->y == 0 &&
-			crtc_roi->w == state->adjusted_mode.hdisplay &&
-			crtc_roi->h == state->adjusted_mode.vdisplay)
-		memset(crtc_roi, 0, sizeof(*crtc_roi));
-
-	SDE_DEBUG("%s: crtc roi (%d,%d,%d,%d)\n", sde_crtc->name,
-			crtc_roi->x, crtc_roi->y, crtc_roi->w, crtc_roi->h);
-	SDE_EVT32_VERBOSE(DRMID(crtc), crtc_roi->x, crtc_roi->y, crtc_roi->w,
-			crtc_roi->h);
-
-	return 0;
-}
-
-static int _sde_crtc_check_autorefresh(struct drm_crtc *crtc,
-		struct drm_crtc_state *state)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *crtc_state;
-	struct drm_connector *conn;
-	struct drm_connector_state *conn_state;
-	int i;
-
-	if (!crtc || !state)
-		return -EINVAL;
-
-	sde_crtc = to_sde_crtc(crtc);
-	crtc_state = to_sde_crtc_state(state);
-
-	if (sde_kms_rect_is_null(&crtc_state->crtc_roi))
-		return 0;
-
-	/* partial update active, check if autorefresh is also requested */
-	for_each_new_connector_in_state(state->state, conn, conn_state, i) {
-		uint64_t autorefresh;
-
-		if (!conn_state || conn_state->crtc != crtc)
-			continue;
-
-		autorefresh = sde_connector_get_property(conn_state,
-				CONNECTOR_PROP_AUTOREFRESH);
-		if (autorefresh) {
-			SDE_ERROR(
-				"%s: autorefresh & partial crtc roi incompatible %llu\n",
-					sde_crtc->name, autorefresh);
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
-static int _sde_crtc_set_lm_roi(struct drm_crtc *crtc,
-		struct drm_crtc_state *state, int lm_idx)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *crtc_state;
-	const struct sde_rect *crtc_roi;
-	const struct sde_rect *lm_bounds;
-	struct sde_rect *lm_roi;
-
-	if (!crtc || !state || lm_idx >= ARRAY_SIZE(crtc_state->lm_bounds))
-		return -EINVAL;
-
-	sde_crtc = to_sde_crtc(crtc);
-	crtc_state = to_sde_crtc_state(state);
-	crtc_roi = &crtc_state->crtc_roi;
-	lm_bounds = &crtc_state->lm_bounds[lm_idx];
-	lm_roi = &crtc_state->lm_roi[lm_idx];
-
-	if (sde_kms_rect_is_null(crtc_roi))
-		memcpy(lm_roi, lm_bounds, sizeof(*lm_roi));
-	else
-		sde_kms_rect_intersect(crtc_roi, lm_bounds, lm_roi);
-
-	SDE_DEBUG("%s: lm%d roi (%d,%d,%d,%d)\n", sde_crtc->name, lm_idx,
-			lm_roi->x, lm_roi->y, lm_roi->w, lm_roi->h);
-
-	/*
-	 * partial update is not supported with 3dmux dsc or dest scaler.
-	 * hence, crtc roi must match the mixer dimensions.
-	 */
-	if (crtc_state->num_ds_enabled ||
-		_sde_crtc_setup_is_3dmux_dsc(state)) {
-		if (memcmp(lm_roi, lm_bounds, sizeof(struct sde_rect))) {
-			SDE_ERROR("Unsupported: Dest scaler/3d mux DSC + PU\n");
-			return -EINVAL;
-		}
-	}
-
-	/* if any dimension is zero, clear all dimensions for clarity */
-	if (sde_kms_rect_is_null(lm_roi))
-		memset(lm_roi, 0, sizeof(*lm_roi));
-
-	return 0;
-}
-
-static u32 _sde_crtc_get_displays_affected(struct drm_crtc *crtc,
-		struct drm_crtc_state *state)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *crtc_state;
-	u32 disp_bitmask = 0;
-	int i;
-
-	sde_crtc = to_sde_crtc(crtc);
-	crtc_state = to_sde_crtc_state(state);
-
-	/* pingpong split: one ROI, one LM, two physical displays */
-	if (crtc_state->is_ppsplit) {
-		u32 lm_split_width = crtc_state->lm_bounds[0].w / 2;
-		struct sde_rect *roi = &crtc_state->lm_roi[0];
-
-		if (sde_kms_rect_is_null(roi))
-			disp_bitmask = 0;
-		else if ((u32)roi->x + (u32)roi->w <= lm_split_width)
-			disp_bitmask = BIT(0);		/* left only */
-		else if (roi->x >= lm_split_width)
-			disp_bitmask = BIT(1);		/* right only */
-		else
-			disp_bitmask = BIT(0) | BIT(1); /* left and right */
-	} else {
-		for (i = 0; i < sde_crtc->num_mixers; i++) {
-			if (!sde_kms_rect_is_null(&crtc_state->lm_roi[i]))
-				disp_bitmask |= BIT(i);
-		}
-	}
-
-	SDE_DEBUG("affected displays 0x%x\n", disp_bitmask);
-
-	return disp_bitmask;
-}
-
-static int _sde_crtc_check_rois_centered_and_symmetric(struct drm_crtc *crtc,
-		struct drm_crtc_state *state)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *crtc_state;
-	const struct sde_rect *roi[CRTC_DUAL_MIXERS];
-
-	if (!crtc || !state)
-		return -EINVAL;
-
-	sde_crtc = to_sde_crtc(crtc);
-	crtc_state = to_sde_crtc_state(state);
-
-	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
-		SDE_ERROR("%s: unsupported number of mixers: %d\n",
-				sde_crtc->name, sde_crtc->num_mixers);
-		return -EINVAL;
-	}
-
-	/*
-	 * If using pingpong split: one ROI, one LM, two physical displays
-	 * then the ROI must be centered on the panel split boundary and
-	 * be of equal width across the split.
-	 */
-	if (crtc_state->is_ppsplit) {
-		u16 panel_split_width;
-		u32 display_mask;
-
-		roi[0] = &crtc_state->lm_roi[0];
-
-		if (sde_kms_rect_is_null(roi[0]))
-			return 0;
-
-		display_mask = _sde_crtc_get_displays_affected(crtc, state);
-		if (display_mask != (BIT(0) | BIT(1)))
-			return 0;
-
-		panel_split_width = crtc_state->lm_bounds[0].w / 2;
-		if (roi[0]->x + roi[0]->w / 2 != panel_split_width) {
-			SDE_ERROR("%s: roi x %d w %d split %d\n",
-					sde_crtc->name, roi[0]->x, roi[0]->w,
-					panel_split_width);
-			return -EINVAL;
-		}
-
-		return 0;
-	}
-
-	/*
-	 * On certain HW, if using 2 LM, ROIs must be split evenly between the
-	 * LMs and be of equal width.
-	 */
-	if (sde_crtc->num_mixers < 2)
-		return 0;
-
-	roi[0] = &crtc_state->lm_roi[0];
-	roi[1] = &crtc_state->lm_roi[1];
-
-	/* if one of the roi is null it's a left/right-only update */
-	if (sde_kms_rect_is_null(roi[0]) || sde_kms_rect_is_null(roi[1]))
-		return 0;
-
-	/* check lm rois are equal width & first roi ends at 2nd roi */
-	if (roi[0]->x + roi[0]->w != roi[1]->x || roi[0]->w != roi[1]->w) {
-		SDE_ERROR(
-			"%s: rois not centered and symmetric: roi0 x %d w %d roi1 x %d w %d\n",
-				sde_crtc->name, roi[0]->x, roi[0]->w,
-				roi[1]->x, roi[1]->w);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int _sde_crtc_check_planes_within_crtc_roi(struct drm_crtc *crtc,
-		struct drm_crtc_state *state)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *crtc_state;
-	const struct sde_rect *crtc_roi;
-	const struct drm_plane_state *pstate;
-	struct drm_plane *plane;
-
-	if (!crtc || !state)
-		return -EINVAL;
-
-	/*
-	 * Reject commit if a Plane CRTC destination coordinates fall outside
-	 * the partial CRTC ROI. LM output is determined via connector ROIs,
-	 * if they are specified, not Plane CRTC ROIs.
-	 */
-
-	sde_crtc = to_sde_crtc(crtc);
-	crtc_state = to_sde_crtc_state(state);
-	crtc_roi = &crtc_state->crtc_roi;
-
-	if (sde_kms_rect_is_null(crtc_roi))
-		return 0;
-
-	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
-		struct sde_rect plane_roi, intersection;
-
-		if (IS_ERR_OR_NULL(pstate)) {
-			int rc = PTR_ERR(pstate);
-
-			SDE_ERROR("%s: failed to get plane%d state, %d\n",
-					sde_crtc->name, plane->base.id, rc);
-			return rc;
-		}
-
-		plane_roi.x = pstate->crtc_x;
-		plane_roi.y = pstate->crtc_y;
-		plane_roi.w = pstate->crtc_w;
-		plane_roi.h = pstate->crtc_h;
-		sde_kms_rect_intersect(crtc_roi, &plane_roi, &intersection);
-		if (!sde_kms_rect_is_equal(&plane_roi, &intersection)) {
-			SDE_ERROR(
-				"%s: plane%d crtc roi (%d,%d,%d,%d) outside crtc roi (%d,%d,%d,%d)\n",
-					sde_crtc->name, plane->base.id,
-					plane_roi.x, plane_roi.y,
-					plane_roi.w, plane_roi.h,
-					crtc_roi->x, crtc_roi->y,
-					crtc_roi->w, crtc_roi->h);
-			return -E2BIG;
-		}
-	}
-
-	return 0;
-}
-
-static int _sde_crtc_check_rois(struct drm_crtc *crtc,
-		struct drm_crtc_state *state)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *sde_crtc_state;
-	struct msm_mode_info mode_info;
-	int rc, lm_idx, i;
-
-	if (!crtc || !state)
-		return -EINVAL;
-
-	memset(&mode_info, 0, sizeof(mode_info));
-
-	sde_crtc = to_sde_crtc(crtc);
-	sde_crtc_state = to_sde_crtc_state(state);
-
-	/*
-	 * check connector array cached at modeset time since incoming atomic
-	 * state may not include any connectors if they aren't modified
-	 */
-	for (i = 0; i < sde_crtc_state->num_connectors; i++) {
-		struct drm_connector *conn = sde_crtc_state->connectors[i];
-
-		if (!conn || !conn->state)
-			continue;
-
-		rc = sde_connector_get_mode_info(conn->state, &mode_info);
-		if (rc) {
-			SDE_ERROR("failed to get mode info\n");
-			return -EINVAL;
-		}
-
-		if (!mode_info.roi_caps.enabled)
-			continue;
-
-		if (sde_crtc_state->user_roi_list.num_rects >
-				mode_info.roi_caps.num_roi) {
-			SDE_ERROR("roi count is exceeding limit, %d > %d\n",
-					sde_crtc_state->user_roi_list.num_rects,
-					mode_info.roi_caps.num_roi);
-			return -E2BIG;
-		}
-
-		rc = _sde_crtc_set_crtc_roi(crtc, state);
-		if (rc)
-			return rc;
-
-		rc = _sde_crtc_check_autorefresh(crtc, state);
-		if (rc)
-			return rc;
-
-		for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
-			rc = _sde_crtc_set_lm_roi(crtc, state, lm_idx);
-			if (rc)
-				return rc;
-		}
-
-		rc = _sde_crtc_check_rois_centered_and_symmetric(crtc, state);
-		if (rc)
-			return rc;
-
-		rc = _sde_crtc_check_planes_within_crtc_roi(crtc, state);
-		if (rc)
-			return rc;
-	}
-
-	return 0;
-}
-
-static void _sde_crtc_program_lm_output_roi(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *crtc_state;
-	const struct sde_rect *lm_roi;
-	struct sde_hw_mixer *hw_lm;
-	int lm_idx, lm_horiz_position;
-
-	if (!crtc)
-		return;
-
-	sde_crtc = to_sde_crtc(crtc);
-	crtc_state = to_sde_crtc_state(crtc->state);
-
-	lm_horiz_position = 0;
-	for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
-		struct sde_hw_mixer_cfg cfg;
-
-		lm_roi = &crtc_state->lm_roi[lm_idx];
-		hw_lm = sde_crtc->mixers[lm_idx].hw_lm;
-
-		SDE_EVT32(DRMID(crtc_state->base.crtc), lm_idx,
-			lm_roi->x, lm_roi->y, lm_roi->w, lm_roi->h);
-
-		if (sde_kms_rect_is_null(lm_roi))
-			continue;
-
-		hw_lm->cfg.out_width = lm_roi->w;
-		hw_lm->cfg.out_height = lm_roi->h;
-		hw_lm->cfg.right_mixer = lm_horiz_position;
-
-		cfg.out_width = lm_roi->w;
-		cfg.out_height = lm_roi->h;
-		cfg.right_mixer = lm_horiz_position++;
-		cfg.flags = 0;
-		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
-	}
-}
-
-struct plane_state {
-	struct sde_plane_state *sde_pstate;
-	const struct drm_plane_state *drm_pstate;
-	int stage;
-	u32 pipe_id;
-};
-
-static int pstate_cmp(const void *a, const void *b)
-{
-	struct plane_state *pa = (struct plane_state *)a;
-	struct plane_state *pb = (struct plane_state *)b;
-	int rc = 0;
-	int pa_zpos, pb_zpos;
-
-	pa_zpos = sde_plane_get_property(pa->sde_pstate, PLANE_PROP_ZPOS);
-	pb_zpos = sde_plane_get_property(pb->sde_pstate, PLANE_PROP_ZPOS);
-
-	if (pa_zpos != pb_zpos)
-		rc = pa_zpos - pb_zpos;
-	else
-		rc = pa->drm_pstate->crtc_x - pb->drm_pstate->crtc_x;
-
-	return rc;
-}
-
-/*
- * validate and set source split:
- * use pstates sorted by stage to check planes on same stage
- * we assume that all pipes are in source split so its valid to compare
- * without taking into account left/right mixer placement
- */
-static int _sde_crtc_validate_src_split_order(struct drm_crtc *crtc,
-		struct plane_state *pstates, int cnt)
-{
-	struct plane_state *prv_pstate, *cur_pstate;
-	struct sde_rect left_rect, right_rect;
-	struct sde_kms *sde_kms;
-	int32_t left_pid, right_pid;
-	int32_t stage;
-	int i, rc = 0;
-
-	sde_kms = _sde_crtc_get_kms(crtc);
-	if (!sde_kms || !sde_kms->catalog) {
-		SDE_ERROR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	for (i = 1; i < cnt; i++) {
-		prv_pstate = &pstates[i - 1];
-		cur_pstate = &pstates[i];
-
-		if (prv_pstate->stage != cur_pstate->stage)
-			continue;
-
-		stage = cur_pstate->stage;
-
-		left_pid = prv_pstate->sde_pstate->base.plane->base.id;
-		POPULATE_RECT(&left_rect, prv_pstate->drm_pstate->crtc_x,
-			prv_pstate->drm_pstate->crtc_y,
-			prv_pstate->drm_pstate->crtc_w,
-			prv_pstate->drm_pstate->crtc_h, false);
-
-		right_pid = cur_pstate->sde_pstate->base.plane->base.id;
-		POPULATE_RECT(&right_rect, cur_pstate->drm_pstate->crtc_x,
-			cur_pstate->drm_pstate->crtc_y,
-			cur_pstate->drm_pstate->crtc_w,
-			cur_pstate->drm_pstate->crtc_h, false);
-
-		if (right_rect.x < left_rect.x) {
-			swap(left_pid, right_pid);
-			swap(left_rect, right_rect);
-			swap(prv_pstate, cur_pstate);
-		}
-
-		/*
-		 * - planes are enumerated in pipe-priority order such that
-		 *   planes with lower drm_id must be left-most in a shared
-		 *   blend-stage when using source split.
-		 * - planes in source split must be contiguous in width
-		 * - planes in source split must have same dest yoff and height
-		 */
-		if ((right_pid < left_pid) &&
-			!sde_kms->catalog->pipe_order_type) {
-			SDE_ERROR(
-			  "invalid src split cfg, stage:%d left:%d right:%d\n",
-				stage, left_pid, right_pid);
-			return -EINVAL;
-		} else if (right_rect.x != (left_rect.x + left_rect.w)) {
-			SDE_ERROR(
-			  "invalid coordinates, stage:%d l:%d-%d r:%d-%d\n",
-				stage, left_rect.x, left_rect.w,
-				right_rect.x, right_rect.w);
-			return -EINVAL;
-		} else if ((left_rect.y != right_rect.y) ||
-				(left_rect.h != right_rect.h)) {
-			SDE_ERROR(
-			  "stage:%d invalid yoff/ht: l_yxh:%dx%d r_yxh:%dx%d\n",
-				stage, left_rect.y, left_rect.h,
-				right_rect.y, right_rect.h);
-			return -EINVAL;
-		}
-	}
-
-	return rc;
-}
-
-static void _sde_crtc_set_src_split_order(struct drm_crtc *crtc,
-		struct plane_state *pstates, int cnt)
-{
-	struct plane_state *prv_pstate, *cur_pstate, *nxt_pstate;
-	struct sde_kms *sde_kms;
-	struct sde_rect left_rect, right_rect;
-	int32_t left_pid, right_pid;
-	int32_t stage;
-	int i;
-
-	sde_kms = _sde_crtc_get_kms(crtc);
-	if (!sde_kms || !sde_kms->catalog) {
-		SDE_ERROR("invalid parameters\n");
-		return;
-	}
-
-	if (!sde_kms->catalog->pipe_order_type)
-		return;
-
-	for (i = 0; i < cnt; i++) {
-		prv_pstate = (i > 0) ? &pstates[i - 1] : NULL;
-		cur_pstate = &pstates[i];
-		nxt_pstate = ((i + 1) < cnt) ? &pstates[i + 1] : NULL;
-
-		if ((!prv_pstate) || (prv_pstate->stage != cur_pstate->stage)) {
-			/*
-			 * reset if prv or nxt pipes are not in the same stage
-			 * as the cur pipe
-			 */
-			if ((!nxt_pstate)
-				    || (nxt_pstate->stage != cur_pstate->stage))
-				cur_pstate->sde_pstate->pipe_order_flags = 0;
-
-			continue;
-		}
-
-		stage = cur_pstate->stage;
-
-		left_pid = prv_pstate->sde_pstate->base.plane->base.id;
-		POPULATE_RECT(&left_rect, prv_pstate->drm_pstate->crtc_x,
-			prv_pstate->drm_pstate->crtc_y,
-			prv_pstate->drm_pstate->crtc_w,
-			prv_pstate->drm_pstate->crtc_h, false);
-
-		right_pid = cur_pstate->sde_pstate->base.plane->base.id;
-		POPULATE_RECT(&right_rect, cur_pstate->drm_pstate->crtc_x,
-			cur_pstate->drm_pstate->crtc_y,
-			cur_pstate->drm_pstate->crtc_w,
-			cur_pstate->drm_pstate->crtc_h, false);
-
-		if (right_rect.x < left_rect.x) {
-			swap(left_pid, right_pid);
-			swap(left_rect, right_rect);
-			swap(prv_pstate, cur_pstate);
-		}
-
-		cur_pstate->sde_pstate->pipe_order_flags = SDE_SSPP_RIGHT;
-		prv_pstate->sde_pstate->pipe_order_flags = 0;
-	}
-
-	for (i = 0; i < cnt; i++) {
-		cur_pstate = &pstates[i];
-		sde_plane_setup_src_split_order(
-			cur_pstate->drm_pstate->plane,
-			cur_pstate->sde_pstate->multirect_index,
-			cur_pstate->sde_pstate->pipe_order_flags);
-	}
-}
-static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_state, struct sde_crtc *sde_crtc,
-		struct sde_crtc_mixer *mixer)
-{
-	struct drm_plane *plane;
-	struct drm_framebuffer *fb;
-	struct drm_plane_state *state;
-	struct sde_crtc_state *cstate;
-	struct sde_plane_state *pstate = NULL;
-	struct plane_state *pstates = NULL;
-	struct sde_format *format;
-	struct sde_hw_ctl *ctl;
-	struct sde_hw_mixer *lm;
-	struct sde_hw_stage_cfg *stage_cfg;
-	struct sde_rect plane_crtc_roi;
-	uint32_t stage_idx, lm_idx;
-	int zpos_cnt[SDE_STAGE_MAX + 1] = { 0 };
-	int i, cnt = 0;
-	bool bg_alpha_enable = false;
-
-	if (!sde_crtc || !crtc->state || !mixer) {
-		SDE_ERROR("invalid sde_crtc or mixer\n");
-		return;
-	}
-
-	ctl = mixer->hw_ctl;
-	lm = mixer->hw_lm;
-	stage_cfg = &sde_crtc->stage_cfg;
-	cstate = to_sde_crtc_state(crtc->state);
-	pstates = kcalloc(SDE_PSTATES_MAX,
-			sizeof(struct plane_state), GFP_KERNEL);
-	if (!pstates)
-		return;
-
-	drm_atomic_crtc_for_each_plane(plane, crtc) {
-		state = plane->state;
-		if (!state)
-			continue;
-
-		plane_crtc_roi.x = state->crtc_x;
-		plane_crtc_roi.y = state->crtc_y;
-		plane_crtc_roi.w = state->crtc_w;
-		plane_crtc_roi.h = state->crtc_h;
-
-		pstate = to_sde_plane_state(state);
-		fb = state->fb;
-
-		sde_plane_ctl_flush(plane, ctl, true);
-
-		SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
-				crtc->base.id,
-				pstate->stage,
-				plane->base.id,
-				sde_plane_pipe(plane) - SSPP_VIG0,
-				state->fb ? state->fb->base.id : -1);
-
-		format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
-		if (!format) {
-			SDE_ERROR("invalid format\n");
-			goto end;
-		}
-
-		if (pstate->stage == SDE_STAGE_BASE && format->alpha_enable)
-			bg_alpha_enable = true;
-
-		SDE_EVT32(DRMID(crtc), DRMID(plane),
-				state->fb ? state->fb->base.id : -1,
-				state->src_x >> 16, state->src_y >> 16,
-				state->src_w >> 16, state->src_h >> 16,
-				state->crtc_x, state->crtc_y,
-				state->crtc_w, state->crtc_h,
-				pstate->rotation);
-
-		stage_idx = zpos_cnt[pstate->stage]++;
-		stage_cfg->stage[pstate->stage][stage_idx] =
-					sde_plane_pipe(plane);
-		stage_cfg->multirect_index[pstate->stage][stage_idx] =
-					pstate->multirect_index;
-
-		SDE_EVT32(DRMID(crtc), DRMID(plane), stage_idx,
-			sde_plane_pipe(plane) - SSPP_VIG0, pstate->stage,
-			pstate->multirect_index, pstate->multirect_mode,
-			format->base.pixel_format, fb ? fb->modifier : 0);
-
-		/* blend config update */
-		for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
-			_sde_crtc_setup_blend_cfg(mixer + lm_idx, pstate,
-								format);
-
-			if (bg_alpha_enable && !format->alpha_enable)
-				mixer[lm_idx].mixer_op_mode = 0;
-			else
-				mixer[lm_idx].mixer_op_mode |=
-						1 << pstate->stage;
-		}
-
-		if (cnt >= SDE_PSTATES_MAX)
-			continue;
-
-		pstates[cnt].sde_pstate = pstate;
-		pstates[cnt].drm_pstate = state;
-		pstates[cnt].stage = sde_plane_get_property(
-				pstates[cnt].sde_pstate, PLANE_PROP_ZPOS);
-		pstates[cnt].pipe_id = sde_plane_pipe(plane);
-
-		cnt++;
-	}
-
-	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
-	_sde_crtc_set_src_split_order(crtc, pstates, cnt);
-
-	if (lm && lm->ops.setup_dim_layer) {
-		cstate = to_sde_crtc_state(crtc->state);
-		for (i = 0; i < cstate->num_dim_layers; i++)
-			_sde_crtc_setup_dim_layer_cfg(crtc, sde_crtc,
-					mixer, &cstate->dim_layer[i]);
-	}
-
-	_sde_crtc_program_lm_output_roi(crtc);
-
-end:
-	kfree(pstates);
-}
-
-static void _sde_crtc_swap_mixers_for_right_partial_update(
-		struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	struct drm_encoder *drm_enc;
-	bool is_right_only;
-	bool encoder_in_dsc_merge = false;
-
-	if (!crtc || !crtc->state)
-		return;
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(crtc->state);
-
-	if (sde_crtc->num_mixers != CRTC_DUAL_MIXERS)
-		return;
-
-	drm_for_each_encoder_mask(drm_enc, crtc->dev,
-			crtc->state->encoder_mask) {
-		if (sde_encoder_is_dsc_merge(drm_enc)) {
-			encoder_in_dsc_merge = true;
-			break;
-		}
-	}
-
-	/**
-	 * For right-only partial update with DSC merge, we swap LM0 & LM1.
-	 * This is due to two reasons:
-	 * - On 8996, there is a DSC HW requirement that in DSC Merge Mode,
-	 *   the left DSC must be used, right DSC cannot be used alone.
-	 *   For right-only partial update, this means swap layer mixers to map
-	 *   Left LM to Right INTF. On later HW this was relaxed.
-	 * - In DSC Merge mode, the physical encoder has already registered
-	 *   PP0 as the master, to switch to right-only we would have to
-	 *   reprogram to be driven by PP1 instead.
-	 * To support both cases, we prefer to support the mixer swap solution.
-	 */
-	if (!encoder_in_dsc_merge)
-		return;
-
-	is_right_only = sde_kms_rect_is_null(&cstate->lm_roi[0]) &&
-			!sde_kms_rect_is_null(&cstate->lm_roi[1]);
-
-	if (is_right_only && !sde_crtc->mixers_swapped) {
-		/* right-only update swap mixers */
-		swap(sde_crtc->mixers[0], sde_crtc->mixers[1]);
-		sde_crtc->mixers_swapped = true;
-	} else if (!is_right_only && sde_crtc->mixers_swapped) {
-		/* left-only or full update, swap back */
-		swap(sde_crtc->mixers[0], sde_crtc->mixers[1]);
-		sde_crtc->mixers_swapped = false;
-	}
-
-	SDE_DEBUG("%s: right_only %d swapped %d, mix0->lm%d, mix1->lm%d\n",
-			sde_crtc->name, is_right_only, sde_crtc->mixers_swapped,
-			sde_crtc->mixers[0].hw_lm->idx - LM_0,
-			sde_crtc->mixers[1].hw_lm->idx - LM_0);
-	SDE_EVT32(DRMID(crtc), is_right_only, sde_crtc->mixers_swapped,
-			sde_crtc->mixers[0].hw_lm->idx - LM_0,
-			sde_crtc->mixers[1].hw_lm->idx - LM_0);
-}
-
-/**
- * _sde_crtc_blend_setup - configure crtc mixers
- * @crtc: Pointer to drm crtc structure
- * @old_state: Pointer to old crtc state
- * @add_planes: Whether or not to add planes to mixers
- */
-static void _sde_crtc_blend_setup(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_state, bool add_planes)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *sde_crtc_state;
-	struct sde_crtc_mixer *mixer;
-	struct sde_hw_ctl *ctl;
-	struct sde_hw_mixer *lm;
-	struct sde_ctl_flush_cfg cfg = {0,};
-
-	int i;
-
-	if (!crtc)
-		return;
-
-	sde_crtc = to_sde_crtc(crtc);
-	sde_crtc_state = to_sde_crtc_state(crtc->state);
-	mixer = sde_crtc->mixers;
-
-	SDE_DEBUG("%s\n", sde_crtc->name);
-
-	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
-		SDE_ERROR("invalid number mixers: %d\n", sde_crtc->num_mixers);
-		return;
-	}
-
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
-		if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
-			SDE_ERROR("invalid lm or ctl assigned to mixer\n");
-			return;
-		}
-		mixer[i].mixer_op_mode = 0;
-		if (mixer[i].hw_ctl->ops.clear_all_blendstages)
-			mixer[i].hw_ctl->ops.clear_all_blendstages(
-					mixer[i].hw_ctl);
-
-		/* clear dim_layer settings */
-		lm = mixer[i].hw_lm;
-		if (lm->ops.clear_dim_layer)
-			lm->ops.clear_dim_layer(lm);
-	}
-
-	_sde_crtc_swap_mixers_for_right_partial_update(crtc);
-
-	/* initialize stage cfg */
-	memset(&sde_crtc->stage_cfg, 0, sizeof(struct sde_hw_stage_cfg));
-
-	if (add_planes)
-		_sde_crtc_blend_setup_mixer(crtc, old_state, sde_crtc, mixer);
-
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
-		const struct sde_rect *lm_roi = &sde_crtc_state->lm_roi[i];
-
-		ctl = mixer[i].hw_ctl;
-		lm = mixer[i].hw_lm;
-
-		if (sde_kms_rect_is_null(lm_roi)) {
-			SDE_DEBUG(
-				"%s: lm%d leave ctl%d mask 0 since null roi\n",
-					sde_crtc->name, lm->idx - LM_0,
-					ctl->idx - CTL_0);
-			continue;
-		}
-
-		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
-
-		/* stage config flush mask */
-		ctl->ops.update_bitmask_mixer(ctl, mixer[i].hw_lm->idx, 1);
-		ctl->ops.get_pending_flush(ctl, &cfg);
-
-		SDE_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
-			mixer[i].hw_lm->idx - LM_0,
-			mixer[i].mixer_op_mode,
-			ctl->idx - CTL_0,
-			cfg.pending_flush_mask);
-
-		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
-			&sde_crtc->stage_cfg);
-	}
-
-	_sde_crtc_program_lm_output_roi(crtc);
-}
-
-int sde_crtc_find_plane_fb_modes(struct drm_crtc *crtc,
-		uint32_t *fb_ns, uint32_t *fb_sec, uint32_t *fb_sec_dir)
-{
-	struct drm_plane *plane;
-	struct sde_plane_state *sde_pstate;
-	uint32_t mode = 0;
-	int rc;
-
-	if (!crtc) {
-		SDE_ERROR("invalid state\n");
-		return -EINVAL;
-	}
-
-	*fb_ns = 0;
-	*fb_sec = 0;
-	*fb_sec_dir = 0;
-	drm_atomic_crtc_for_each_plane(plane, crtc) {
-		if (IS_ERR_OR_NULL(plane) || IS_ERR_OR_NULL(plane->state)) {
-			rc = PTR_ERR(plane);
-			SDE_ERROR("crtc%d failed to get plane%d state%d\n",
-					DRMID(crtc), DRMID(plane), rc);
-			return rc;
-		}
-		sde_pstate = to_sde_plane_state(plane->state);
-		mode = sde_plane_get_property(sde_pstate,
-				PLANE_PROP_FB_TRANSLATION_MODE);
-
-		switch (mode) {
-		case SDE_DRM_FB_NON_SEC:
-			(*fb_ns)++;
-			break;
-		case SDE_DRM_FB_SEC:
-			(*fb_sec)++;
-			break;
-		case SDE_DRM_FB_SEC_DIR_TRANS:
-			(*fb_sec_dir)++;
-			break;
-		default:
-			SDE_ERROR("Error: Plane[%d], fb_trans_mode:%d",
-					DRMID(plane), mode);
-			return -EINVAL;
-		}
-	}
-	return 0;
-}
-
-int sde_crtc_state_find_plane_fb_modes(struct drm_crtc_state *state,
-		uint32_t *fb_ns, uint32_t *fb_sec, uint32_t *fb_sec_dir)
-{
-	struct drm_plane *plane;
-	const struct drm_plane_state *pstate;
-	struct sde_plane_state *sde_pstate;
-	uint32_t mode = 0;
-	int rc;
-
-	if (!state) {
-		SDE_ERROR("invalid state\n");
-		return -EINVAL;
-	}
-
-	*fb_ns = 0;
-	*fb_sec = 0;
-	*fb_sec_dir = 0;
-	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
-		if (IS_ERR_OR_NULL(pstate)) {
-			rc = PTR_ERR(pstate);
-			SDE_ERROR("crtc%d failed to get plane%d state%d\n",
-					DRMID(state->crtc), DRMID(plane), rc);
-			return rc;
-		}
-		sde_pstate = to_sde_plane_state(pstate);
-		mode = sde_plane_get_property(sde_pstate,
-				PLANE_PROP_FB_TRANSLATION_MODE);
-
-		switch (mode) {
-		case SDE_DRM_FB_NON_SEC:
-			(*fb_ns)++;
-			break;
-		case SDE_DRM_FB_SEC:
-			(*fb_sec)++;
-			break;
-		case SDE_DRM_FB_SEC_DIR_TRANS:
-			(*fb_sec_dir)++;
-			break;
-		default:
-			SDE_ERROR("Error: Plane[%d], fb_trans_mode:%d",
-					DRMID(plane), mode);
-			return -EINVAL;
-		}
-	}
-	return 0;
-}
-
-static void _sde_drm_fb_sec_dir_trans(
-	struct sde_kms_smmu_state_data *smmu_state, uint32_t secure_level,
-	struct sde_mdss_cfg *catalog, bool old_valid_fb, int *ops)
-{
-	/* secure display usecase */
-	if ((smmu_state->state == ATTACHED)
-			&& (secure_level == SDE_DRM_SEC_ONLY)) {
-		smmu_state->state = catalog->sui_ns_allowed ?
-			DETACH_SEC_REQ : DETACH_ALL_REQ;
-		smmu_state->secure_level = secure_level;
-		smmu_state->transition_type = PRE_COMMIT;
-		*ops |= SDE_KMS_OPS_SECURE_STATE_CHANGE;
-		if (old_valid_fb)
-			*ops |= (SDE_KMS_OPS_WAIT_FOR_TX_DONE  |
-					SDE_KMS_OPS_CLEANUP_PLANE_FB);
-		if (catalog->sui_misr_supported)
-			smmu_state->sui_misr_state =
-				SUI_MISR_ENABLE_REQ;
-	/* secure camera usecase */
-	} else if (smmu_state->state == ATTACHED) {
-		smmu_state->state = DETACH_SEC_REQ;
-		smmu_state->secure_level = secure_level;
-		smmu_state->transition_type = PRE_COMMIT;
-		*ops |= SDE_KMS_OPS_SECURE_STATE_CHANGE;
-	}
-}
-
-static void _sde_drm_fb_transactions(
-	struct sde_kms_smmu_state_data *smmu_state,
-	struct sde_mdss_cfg *catalog, bool old_valid_fb, bool post_commit,
-	int *ops)
-{
-	if (((smmu_state->state == DETACHED)
-				|| (smmu_state->state == DETACH_ALL_REQ))
-			|| ((smmu_state->secure_level == SDE_DRM_SEC_ONLY)
-				&& ((smmu_state->state == DETACHED_SEC)
-				|| (smmu_state->state == DETACH_SEC_REQ)))) {
-		smmu_state->state = catalog->sui_ns_allowed ?
-			ATTACH_SEC_REQ : ATTACH_ALL_REQ;
-		smmu_state->transition_type = post_commit ?
-			POST_COMMIT : PRE_COMMIT;
-		*ops |= SDE_KMS_OPS_SECURE_STATE_CHANGE;
-		if (old_valid_fb)
-			*ops |= SDE_KMS_OPS_WAIT_FOR_TX_DONE;
-		if (catalog->sui_misr_supported)
-			smmu_state->sui_misr_state =
-				SUI_MISR_DISABLE_REQ;
-	} else if ((smmu_state->state == DETACHED_SEC)
-			|| (smmu_state->state == DETACH_SEC_REQ)) {
-		smmu_state->state = ATTACH_SEC_REQ;
-		smmu_state->transition_type = post_commit ?
-			POST_COMMIT : PRE_COMMIT;
-		*ops |= SDE_KMS_OPS_SECURE_STATE_CHANGE;
-		if (old_valid_fb)
-			*ops |= SDE_KMS_OPS_WAIT_FOR_TX_DONE;
-	}
-}
-
-/**
- * sde_crtc_get_secure_transition_ops - determines the operations that
- * need to be performed before transitioning to secure state
- * This function should be called after swapping the new state
- * @crtc: Pointer to drm crtc structure
- * Returns the bitmask of operations need to be performed, -Error in
- * case of error cases
- */
-int sde_crtc_get_secure_transition_ops(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_crtc_state,
-		bool old_valid_fb)
-{
-	struct drm_plane *plane;
-	struct drm_encoder *encoder;
-	struct sde_crtc *sde_crtc;
-	struct sde_kms *sde_kms;
-	struct sde_mdss_cfg *catalog;
-	struct sde_kms_smmu_state_data *smmu_state;
-	uint32_t translation_mode = 0, secure_level;
-	int ops  = 0;
-	bool post_commit = false;
-
-	if (!crtc || !crtc->state) {
-		SDE_ERROR("invalid crtc\n");
-		return -EINVAL;
-	}
-
-	sde_kms = _sde_crtc_get_kms(crtc);
-	if (!sde_kms)
-		return -EINVAL;
-
-	smmu_state = &sde_kms->smmu_state;
-	sde_crtc = to_sde_crtc(crtc);
-	secure_level = sde_crtc_get_secure_level(crtc, crtc->state);
-	catalog = sde_kms->catalog;
-
-	/*
-	 * SMMU operations need to be delayed in case of video mode panels
-	 * when switching back to non_secure mode
-	 */
-	drm_for_each_encoder_mask(encoder, crtc->dev,
-			crtc->state->encoder_mask) {
-		post_commit |= sde_encoder_check_mode(encoder,
-						MSM_DISPLAY_CAP_VID_MODE);
-	}
-
-	SDE_DEBUG("crtc%d: secure_level %d old_valid_fb %d post_commit %d\n",
-			DRMID(crtc), secure_level, old_valid_fb, post_commit);
-	SDE_EVT32_VERBOSE(DRMID(crtc), secure_level, smmu_state->state,
-			old_valid_fb, post_commit, SDE_EVTLOG_FUNC_ENTRY);
-
-	drm_atomic_crtc_for_each_plane(plane, crtc) {
-		if (!plane->state)
-			continue;
-
-		translation_mode = sde_plane_get_property(
-				to_sde_plane_state(plane->state),
-				PLANE_PROP_FB_TRANSLATION_MODE);
-		if (translation_mode > SDE_DRM_FB_SEC_DIR_TRANS) {
-			SDE_ERROR("crtc%d: invalid translation_mode %d\n",
-					DRMID(crtc), translation_mode);
-			return -EINVAL;
-		}
-
-		/* we can break if we find sec_dir plane */
-		if (translation_mode == SDE_DRM_FB_SEC_DIR_TRANS)
-			break;
-	}
-
-	mutex_lock(&sde_kms->secure_transition_lock);
-
-	switch (translation_mode) {
-	case SDE_DRM_FB_SEC_DIR_TRANS:
-		_sde_drm_fb_sec_dir_trans(smmu_state, secure_level,
-				catalog, old_valid_fb, &ops);
-		break;
-
-	case SDE_DRM_FB_SEC:
-	case SDE_DRM_FB_NON_SEC:
-		_sde_drm_fb_transactions(smmu_state, catalog,
-				old_valid_fb, post_commit, &ops);
-		break;
-
-	default:
-		SDE_ERROR("crtc%d: invalid plane fb_mode %d\n",
-				DRMID(crtc), translation_mode);
-		ops = -EINVAL;
-	}
-
-	/* log only during actual transition times */
-	if (ops) {
-		SDE_DEBUG("crtc%d: state%d sec%d sec_lvl%d type%d ops%x\n",
-			DRMID(crtc), smmu_state->state,
-			secure_level, smmu_state->secure_level,
-			smmu_state->transition_type, ops);
-		SDE_EVT32(DRMID(crtc), secure_level, translation_mode,
-				smmu_state->state, smmu_state->transition_type,
-				smmu_state->secure_level, old_valid_fb,
-				post_commit, ops, SDE_EVTLOG_FUNC_EXIT);
-	}
-
-	mutex_unlock(&sde_kms->secure_transition_lock);
-
-	return ops;
-}
-
-/**
- * _sde_crtc_setup_scaler3_lut - Set up scaler lut
- * LUTs are configured only once during boot
- * @sde_crtc: Pointer to sde crtc
- * @cstate: Pointer to sde crtc state
- */
-static int _sde_crtc_set_dest_scaler_lut(struct sde_crtc *sde_crtc,
-		struct sde_crtc_state *cstate, uint32_t lut_idx)
-{
-	struct sde_hw_scaler3_lut_cfg *cfg;
-	struct sde_kms *sde_kms;
-	u32 *lut_data = NULL;
-	size_t len = 0;
-	int ret = 0;
-
-	if (!sde_crtc || !cstate) {
-		SDE_ERROR("invalid args\n");
-		return -EINVAL;
-	}
-
-	sde_kms = _sde_crtc_get_kms(&sde_crtc->base);
-	if (!sde_kms)
-		return -EINVAL;
-
-	if (is_qseed3_rev_qseed3lite(sde_kms->catalog))
-		return 0;
-
-	lut_data = msm_property_get_blob(&sde_crtc->property_info,
-			&cstate->property_state, &len, lut_idx);
-	if (!lut_data || !len) {
-		SDE_DEBUG("%s: lut(%d): cleared: %pK, %zu\n", sde_crtc->name,
-				lut_idx, lut_data, len);
-		lut_data = NULL;
-		len = 0;
-	}
-
-	cfg = &cstate->scl3_lut_cfg;
-
-	switch (lut_idx) {
-	case CRTC_PROP_DEST_SCALER_LUT_ED:
-		cfg->dir_lut = lut_data;
-		cfg->dir_len = len;
-		break;
-	case CRTC_PROP_DEST_SCALER_LUT_CIR:
-		cfg->cir_lut = lut_data;
-		cfg->cir_len = len;
-		break;
-	case CRTC_PROP_DEST_SCALER_LUT_SEP:
-		cfg->sep_lut = lut_data;
-		cfg->sep_len = len;
-		break;
-	default:
-		ret = -EINVAL;
-		SDE_ERROR("%s:invalid LUT idx(%d)\n", sde_crtc->name, lut_idx);
-		SDE_EVT32(DRMID(&sde_crtc->base), lut_idx, SDE_EVTLOG_ERROR);
-		break;
-	}
-
-	cfg->is_configured = cfg->dir_lut && cfg->cir_lut && cfg->sep_lut;
-
-	SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base), ret, lut_idx, len,
-			cfg->is_configured);
-	return ret;
-}
-
-void sde_crtc_timeline_status(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc;
-
-	if (!crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	sde_fence_timeline_status(sde_crtc->output_fence, &crtc->base);
-}
-
-static int _sde_validate_hw_resources(struct sde_crtc *sde_crtc)
-{
-	int i;
-
-	/**
-	 * Check if sufficient hw resources are
-	 * available as per target caps & topology
-	 */
-	if (!sde_crtc) {
-		SDE_ERROR("invalid argument\n");
-		return -EINVAL;
-	}
-
-	if (!sde_crtc->num_mixers ||
-		sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
-		SDE_ERROR("%s: invalid number mixers: %d\n",
-			sde_crtc->name, sde_crtc->num_mixers);
-		SDE_EVT32(DRMID(&sde_crtc->base), sde_crtc->num_mixers,
-			SDE_EVTLOG_ERROR);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
-		if (!sde_crtc->mixers[i].hw_lm || !sde_crtc->mixers[i].hw_ctl
-			|| !sde_crtc->mixers[i].hw_ds) {
-			SDE_ERROR("%s:insufficient resources for mixer(%d)\n",
-				sde_crtc->name, i);
-			SDE_EVT32(DRMID(&sde_crtc->base), sde_crtc->num_mixers,
-				i, sde_crtc->mixers[i].hw_lm,
-				sde_crtc->mixers[i].hw_ctl,
-				sde_crtc->mixers[i].hw_ds, SDE_EVTLOG_ERROR);
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
-/**
- * _sde_crtc_dest_scaler_setup - Set up dest scaler block
- * @crtc: Pointer to drm crtc
- */
-static void _sde_crtc_dest_scaler_setup(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	struct sde_hw_mixer *hw_lm;
-	struct sde_hw_ctl *hw_ctl;
-	struct sde_hw_ds *hw_ds;
-	struct sde_hw_ds_cfg *cfg;
-	struct sde_kms *kms;
-	u32 op_mode = 0;
-	u32 lm_idx = 0, num_mixers = 0;
-	int i, count = 0;
-	bool ds_dirty = false;
-
-	if (!crtc)
-		return;
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(crtc->state);
-	kms = _sde_crtc_get_kms(crtc);
-	num_mixers = sde_crtc->num_mixers;
-	count = cstate->num_ds;
-
-	SDE_DEBUG("crtc%d\n", crtc->base.id);
-	SDE_EVT32(DRMID(crtc), num_mixers, count, cstate->ds_dirty,
-		sde_crtc->ds_reconfig, cstate->num_ds_enabled);
-
-	/**
-	 * destination scaler configuration will be done either
-	 * or on set property or on power collapse (idle/suspend)
-	 */
-	ds_dirty = (cstate->ds_dirty || sde_crtc->ds_reconfig);
-	if (sde_crtc->ds_reconfig) {
-		SDE_DEBUG("reconfigure dest scaler block\n");
-		sde_crtc->ds_reconfig = false;
-	}
-
-	if (!ds_dirty) {
-		SDE_DEBUG("no change in settings, skip commit\n");
-	} else if (!kms || !kms->catalog) {
-		SDE_ERROR("crtc%d:invalid parameters\n", crtc->base.id);
-	} else if (!kms->catalog->mdp[0].has_dest_scaler) {
-		SDE_DEBUG("dest scaler feature not supported\n");
-	} else if (_sde_validate_hw_resources(sde_crtc)) {
-		//do nothing
-	} else if ((!cstate->scl3_lut_cfg.is_configured) &&
-			(!is_qseed3_rev_qseed3lite(kms->catalog))) {
-		SDE_ERROR("crtc%d:no LUT data available\n", crtc->base.id);
-	} else {
-		for (i = 0; i < count; i++) {
-			cfg = &cstate->ds_cfg[i];
-
-			if (!cfg->flags)
-				continue;
-
-			lm_idx = cfg->idx;
-			hw_lm  = sde_crtc->mixers[lm_idx].hw_lm;
-			hw_ctl = sde_crtc->mixers[lm_idx].hw_ctl;
-			hw_ds  = sde_crtc->mixers[lm_idx].hw_ds;
-
-			/* Setup op mode - Dual/single */
-			if (cfg->flags & SDE_DRM_DESTSCALER_ENABLE)
-				op_mode |= BIT(hw_ds->idx - DS_0);
-
-			if ((i == count-1) && hw_ds->ops.setup_opmode) {
-				op_mode |= (cstate->num_ds_enabled ==
-					CRTC_DUAL_MIXERS) ?
-					SDE_DS_OP_MODE_DUAL : 0;
-				hw_ds->ops.setup_opmode(hw_ds, op_mode);
-				SDE_EVT32_VERBOSE(DRMID(crtc), op_mode);
-			}
-
-			/* Setup scaler */
-			if ((cfg->flags & SDE_DRM_DESTSCALER_SCALE_UPDATE) ||
-				(cfg->flags &
-					SDE_DRM_DESTSCALER_ENHANCER_UPDATE)) {
-				if (hw_ds->ops.setup_scaler)
-					hw_ds->ops.setup_scaler(hw_ds,
-						&cfg->scl3_cfg,
-						&cstate->scl3_lut_cfg);
-
-			}
-
-			/*
-			 * Dest scaler shares the flush bit of the LM in control
-			 */
-			if (hw_ctl && hw_ctl->ops.update_bitmask_mixer)
-				hw_ctl->ops.update_bitmask_mixer(
-						hw_ctl, hw_lm->idx, 1);
-		}
-	}
-}
-
-static void sde_crtc_frame_event_cb(void *data, u32 event)
-{
-	struct drm_crtc *crtc = (struct drm_crtc *)data;
-	struct sde_crtc *sde_crtc;
-	struct msm_drm_private *priv;
-	struct sde_crtc_frame_event *fevent;
-	struct sde_crtc_frame_event_cb_data *cb_data;
-	struct drm_plane *plane;
-	u32 ubwc_error;
-	unsigned long flags;
-	u32 crtc_id;
-
-	cb_data = (struct sde_crtc_frame_event_cb_data *)data;
-	if (!data) {
-		SDE_ERROR("invalid parameters\n");
-		return;
-	}
-
-	crtc = cb_data->crtc;
-	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
-		SDE_ERROR("invalid parameters\n");
-		return;
-	}
-	sde_crtc = to_sde_crtc(crtc);
-	priv = crtc->dev->dev_private;
-	crtc_id = drm_crtc_index(crtc);
-
-	SDE_DEBUG("crtc%d\n", crtc->base.id);
-	SDE_EVT32_VERBOSE(DRMID(crtc), event);
-
-	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
-	fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
-			struct sde_crtc_frame_event, list);
-	if (fevent)
-		list_del_init(&fevent->list);
-	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
-
-	if (!fevent) {
-		SDE_ERROR("crtc%d event %d overflow\n",
-				crtc->base.id, event);
-		SDE_EVT32(DRMID(crtc), event);
-		return;
-	}
-
-	/* log and clear plane ubwc errors if any */
-	if (event & (SDE_ENCODER_FRAME_EVENT_ERROR
-				| SDE_ENCODER_FRAME_EVENT_PANEL_DEAD
-				| SDE_ENCODER_FRAME_EVENT_DONE)) {
-		drm_for_each_plane_mask(plane, crtc->dev,
-						sde_crtc->plane_mask_old) {
-			ubwc_error = sde_plane_get_ubwc_error(plane);
-			if (ubwc_error) {
-				SDE_EVT32(DRMID(crtc), DRMID(plane),
-						ubwc_error, SDE_EVTLOG_ERROR);
-				SDE_DEBUG("crtc%d plane %d ubwc_error %d\n",
-						DRMID(crtc), DRMID(plane),
-						ubwc_error);
-				sde_plane_clear_ubwc_error(plane);
-			}
-		}
-	}
-
-	fevent->event = event;
-	fevent->crtc = crtc;
-	fevent->connector = cb_data->connector;
-	fevent->ts = ktime_get();
-	kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
-}
-
-void sde_crtc_prepare_commit(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_state)
-{
-	struct drm_device *dev;
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	struct drm_connector *conn;
-	struct drm_encoder *encoder;
-	struct drm_connector_list_iter conn_iter;
-
-	if (!crtc || !crtc->state) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	dev = crtc->dev;
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(crtc->state);
-	SDE_EVT32_VERBOSE(DRMID(crtc));
-
-	SDE_ATRACE_BEGIN("sde_crtc_prepare_commit");
-
-	/* identify connectors attached to this crtc */
-	cstate->num_connectors = 0;
-
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(conn, &conn_iter)
-		if (conn->state && conn->state->crtc == crtc &&
-				cstate->num_connectors < MAX_CONNECTORS) {
-			encoder = conn->state->best_encoder;
-			if (encoder)
-				sde_encoder_register_frame_event_callback(
-						encoder,
-						sde_crtc_frame_event_cb,
-						crtc);
-
-			cstate->connectors[cstate->num_connectors++] = conn;
-			sde_connector_prepare_fence(conn);
-		}
-	drm_connector_list_iter_end(&conn_iter);
-
-	/* prepare main output fence */
-	sde_fence_prepare(sde_crtc->output_fence);
-	SDE_ATRACE_END("sde_crtc_prepare_commit");
-}
-
-/**
- *  sde_crtc_complete_flip - signal pending page_flip events
- * Any pending vblank events are added to the vblank_event_list
- * so that the next vblank interrupt shall signal them.
- * However PAGE_FLIP events are not handled through the vblank_event_list.
- * This API signals any pending PAGE_FLIP events requested through
- * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the sde_crtc->event.
- * if file!=NULL, this is preclose potential cancel-flip path
- * @crtc: Pointer to drm crtc structure
- * @file: Pointer to drm file
- */
-void sde_crtc_complete_flip(struct drm_crtc *crtc,
-		struct drm_file *file)
-{
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-	struct drm_device *dev = crtc->dev;
-	struct drm_pending_vblank_event *event;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev->event_lock, flags);
-	event = sde_crtc->event;
-	if (!event)
-		goto end;
-
-	/*
-	 * if regular vblank case (!file) or if cancel-flip from
-	 * preclose on file that requested flip, then send the
-	 * event:
-	 */
-	if (!file || (event->base.file_priv == file)) {
-		sde_crtc->event = NULL;
-		DRM_DEBUG_VBL("%s: send event: %pK\n",
-					sde_crtc->name, event);
-		SDE_EVT32_VERBOSE(DRMID(crtc));
-		drm_crtc_send_vblank_event(crtc, event);
-	}
-
-end:
-	spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc)
-{
-	struct drm_encoder *encoder;
-
-	if (!crtc || !crtc->dev) {
-		SDE_ERROR("invalid crtc\n");
-		return INTF_MODE_NONE;
-	}
-
-	drm_for_each_encoder_mask(encoder, crtc->dev,
-			crtc->state->encoder_mask) {
-		/* continue if copy encoder is encountered */
-		if (sde_encoder_in_clone_mode(encoder))
-			continue;
-
-		return sde_encoder_get_intf_mode(encoder);
-	}
-
-	return INTF_MODE_NONE;
-}
-
-u32 sde_crtc_get_fps_mode(struct drm_crtc *crtc)
-{
-	struct drm_encoder *encoder;
-
-	if (!crtc || !crtc->dev) {
-		SDE_ERROR("invalid crtc\n");
-		return INTF_MODE_NONE;
-	}
-
-	drm_for_each_encoder(encoder, crtc->dev)
-		if ((encoder->crtc == crtc)
-				&& !sde_encoder_in_cont_splash(encoder))
-			return sde_encoder_get_fps(encoder);
-
-	return 0;
-}
-
-static void sde_crtc_vblank_cb(void *data)
-{
-	struct drm_crtc *crtc = (struct drm_crtc *)data;
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-
-	/* keep statistics on vblank callback - with auto reset via debugfs */
-	if (ktime_compare(sde_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
-		sde_crtc->vblank_cb_time = ktime_get();
-	else
-		sde_crtc->vblank_cb_count++;
-
-	sde_crtc->vblank_last_cb_time = ktime_get();
-	sysfs_notify_dirent(sde_crtc->vsync_event_sf);
-
-	drm_crtc_handle_vblank(crtc);
-	DRM_DEBUG_VBL("crtc%d\n", crtc->base.id);
-	SDE_EVT32_VERBOSE(DRMID(crtc));
-}
-
-static void _sde_crtc_retire_event(struct drm_connector *connector,
-		ktime_t ts, enum sde_fence_event fence_event)
-{
-	if (!connector) {
-		SDE_ERROR("invalid param\n");
-		return;
-	}
-
-	SDE_ATRACE_BEGIN("signal_retire_fence");
-	sde_connector_complete_commit(connector, ts, fence_event);
-	SDE_ATRACE_END("signal_retire_fence");
-}
-
-static void sde_crtc_frame_event_work(struct kthread_work *work)
-{
-	struct msm_drm_private *priv;
-	struct sde_crtc_frame_event *fevent;
-	struct drm_crtc *crtc;
-	struct sde_crtc *sde_crtc;
-	struct sde_kms *sde_kms;
-	unsigned long flags;
-	bool in_clone_mode = false;
-
-	if (!work) {
-		SDE_ERROR("invalid work handle\n");
-		return;
-	}
-
-	fevent = container_of(work, struct sde_crtc_frame_event, work);
-	if (!fevent->crtc || !fevent->crtc->state) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	crtc = fevent->crtc;
-	sde_crtc = to_sde_crtc(crtc);
-
-	sde_kms = _sde_crtc_get_kms(crtc);
-	if (!sde_kms) {
-		SDE_ERROR("invalid kms handle\n");
-		return;
-	}
-	priv = sde_kms->dev->dev_private;
-	SDE_ATRACE_BEGIN("crtc_frame_event");
-
-	SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
-			ktime_to_ns(fevent->ts));
-
-	SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_ENTRY);
-
-	in_clone_mode = sde_encoder_in_clone_mode(fevent->connector->encoder);
-
-	if (!in_clone_mode && (fevent->event & (SDE_ENCODER_FRAME_EVENT_ERROR
-					| SDE_ENCODER_FRAME_EVENT_PANEL_DEAD
-					| SDE_ENCODER_FRAME_EVENT_DONE))) {
-		if (atomic_read(&sde_crtc->frame_pending) < 1) {
-			/* this should not happen */
-			SDE_ERROR("crtc%d ts:%lld invalid frame_pending:%d\n",
-					crtc->base.id,
-					ktime_to_ns(fevent->ts),
-					atomic_read(&sde_crtc->frame_pending));
-			SDE_EVT32(DRMID(crtc), fevent->event,
-							SDE_EVTLOG_FUNC_CASE1);
-		} else if (atomic_dec_return(&sde_crtc->frame_pending) == 0) {
-			/* release bandwidth and other resources */
-			SDE_DEBUG("crtc%d ts:%lld last pending\n",
-					crtc->base.id,
-					ktime_to_ns(fevent->ts));
-			SDE_EVT32(DRMID(crtc), fevent->event,
-							SDE_EVTLOG_FUNC_CASE2);
-			sde_core_perf_crtc_release_bw(crtc);
-		} else {
-			SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
-							SDE_EVTLOG_FUNC_CASE3);
-		}
-	}
-
-	if (fevent->event & SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE) {
-		SDE_ATRACE_BEGIN("signal_release_fence");
-		sde_fence_signal(sde_crtc->output_fence, fevent->ts,
-				(fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR)
-				? SDE_FENCE_SIGNAL_ERROR : SDE_FENCE_SIGNAL);
-		SDE_ATRACE_END("signal_release_fence");
-	}
-
-	if (fevent->event & SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE)
-		/* this api should be called without spin_lock */
-		_sde_crtc_retire_event(fevent->connector, fevent->ts,
-				(fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR)
-				? SDE_FENCE_SIGNAL_ERROR : SDE_FENCE_SIGNAL);
-
-	if (fevent->event & SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)
-		SDE_ERROR("crtc%d ts:%lld received panel dead event\n",
-				crtc->base.id, ktime_to_ns(fevent->ts));
-
-	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
-	list_add_tail(&fevent->list, &sde_crtc->frame_event_list);
-	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
-	SDE_ATRACE_END("crtc_frame_event");
-}
-
-void sde_crtc_complete_commit(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_state)
-{
-	struct sde_crtc *sde_crtc;
-
-	if (!crtc || !crtc->state) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	SDE_EVT32_VERBOSE(DRMID(crtc));
-
-	sde_core_perf_crtc_update(crtc, 0, false);
-}
-
-/**
- * _sde_crtc_set_input_fence_timeout - update ns version of in fence timeout
- * @cstate: Pointer to sde crtc state
- */
-static void _sde_crtc_set_input_fence_timeout(struct sde_crtc_state *cstate)
-{
-	if (!cstate) {
-		SDE_ERROR("invalid cstate\n");
-		return;
-	}
-	cstate->input_fence_timeout_ns =
-		sde_crtc_get_property(cstate, CRTC_PROP_INPUT_FENCE_TIMEOUT);
-	cstate->input_fence_timeout_ns *= NSEC_PER_MSEC;
-}
-
-/**
- * _sde_crtc_clear_dim_layers_v1 - clear all dim layer settings
- * @cstate:      Pointer to sde crtc state
- */
-static void _sde_crtc_clear_dim_layers_v1(struct sde_crtc_state *cstate)
-{
-	u32 i;
-
-	if (!cstate)
-		return;
-
-	for (i = 0; i < cstate->num_dim_layers; i++)
-		memset(&cstate->dim_layer[i], 0, sizeof(cstate->dim_layer[i]));
-
-	cstate->num_dim_layers = 0;
-}
-
-/**
- * _sde_crtc_set_dim_layer_v1 - copy dim layer settings from userspace
- * @cstate:      Pointer to sde crtc state
- * @user_ptr:    User ptr for sde_drm_dim_layer_v1 struct
- */
-static void _sde_crtc_set_dim_layer_v1(struct sde_crtc_state *cstate,
-		void __user *usr_ptr)
-{
-	struct sde_drm_dim_layer_v1 dim_layer_v1;
-	struct sde_drm_dim_layer_cfg *user_cfg;
-	struct sde_hw_dim_layer *dim_layer;
-	u32 count, i;
-
-	if (!cstate) {
-		SDE_ERROR("invalid cstate\n");
-		return;
-	}
-	dim_layer = cstate->dim_layer;
-
-	if (!usr_ptr) {
-		/* usr_ptr is null when setting the default property value */
-		_sde_crtc_clear_dim_layers_v1(cstate);
-		SDE_DEBUG("dim_layer data removed\n");
-		return;
-	}
-
-	if (copy_from_user(&dim_layer_v1, usr_ptr, sizeof(dim_layer_v1))) {
-		SDE_ERROR("failed to copy dim_layer data\n");
-		return;
-	}
-
-	count = dim_layer_v1.num_layers;
-	if (count > SDE_MAX_DIM_LAYERS) {
-		SDE_ERROR("invalid number of dim_layers:%d", count);
-		return;
-	}
-
-	/* populate from user space */
-	cstate->num_dim_layers = count;
-	for (i = 0; i < count; i++) {
-		user_cfg = &dim_layer_v1.layer_cfg[i];
-
-		dim_layer[i].flags = user_cfg->flags;
-		dim_layer[i].stage = user_cfg->stage + SDE_STAGE_0;
-
-		dim_layer[i].rect.x = user_cfg->rect.x1;
-		dim_layer[i].rect.y = user_cfg->rect.y1;
-		dim_layer[i].rect.w = user_cfg->rect.x2 - user_cfg->rect.x1;
-		dim_layer[i].rect.h = user_cfg->rect.y2 - user_cfg->rect.y1;
-
-		dim_layer[i].color_fill = (struct sde_mdss_color) {
-				user_cfg->color_fill.color_0,
-				user_cfg->color_fill.color_1,
-				user_cfg->color_fill.color_2,
-				user_cfg->color_fill.color_3,
-		};
-
-		SDE_DEBUG("dim_layer[%d] - flags:%d, stage:%d\n",
-				i, dim_layer[i].flags, dim_layer[i].stage);
-		SDE_DEBUG(" rect:{%d,%d,%d,%d}, color:{%d,%d,%d,%d}\n",
-				dim_layer[i].rect.x, dim_layer[i].rect.y,
-				dim_layer[i].rect.w, dim_layer[i].rect.h,
-				dim_layer[i].color_fill.color_0,
-				dim_layer[i].color_fill.color_1,
-				dim_layer[i].color_fill.color_2,
-				dim_layer[i].color_fill.color_3);
-	}
-}
-
-/**
- * _sde_crtc_set_dest_scaler - copy dest scaler settings from userspace
- * @sde_crtc   :  Pointer to sde crtc
- * @cstate :  Pointer to sde crtc state
- * @usr_ptr:  User ptr for sde_drm_dest_scaler_data struct
- */
-static int _sde_crtc_set_dest_scaler(struct sde_crtc *sde_crtc,
-				struct sde_crtc_state *cstate,
-				void __user *usr_ptr)
-{
-	struct sde_drm_dest_scaler_data ds_data;
-	struct sde_drm_dest_scaler_cfg *ds_cfg_usr;
-	struct sde_drm_scaler_v2 scaler_v2;
-	void __user *scaler_v2_usr;
-	int i, count;
-
-	if (!sde_crtc || !cstate) {
-		SDE_ERROR("invalid sde_crtc/state\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("crtc %s\n", sde_crtc->name);
-
-	if (!usr_ptr) {
-		SDE_DEBUG("ds data removed\n");
-		return 0;
-	}
-
-	if (copy_from_user(&ds_data, usr_ptr, sizeof(ds_data))) {
-		SDE_ERROR("%s:failed to copy dest scaler data from user\n",
-			sde_crtc->name);
-		return -EINVAL;
-	}
-
-	count = ds_data.num_dest_scaler;
-	if (!count) {
-		SDE_DEBUG("no ds data available\n");
-		return 0;
-	}
-
-	if (count > SDE_MAX_DS_COUNT) {
-		SDE_ERROR("%s: invalid config: num_ds(%d) max(%d)\n",
-			sde_crtc->name, count, SDE_MAX_DS_COUNT);
-		SDE_EVT32(DRMID(&sde_crtc->base), count, SDE_EVTLOG_ERROR);
-		return -EINVAL;
-	}
-
-	/* Populate from user space */
-	for (i = 0; i < count; i++) {
-		ds_cfg_usr = &ds_data.ds_cfg[i];
-
-		cstate->ds_cfg[i].idx = ds_cfg_usr->index;
-		cstate->ds_cfg[i].flags = ds_cfg_usr->flags;
-		cstate->ds_cfg[i].lm_width = ds_cfg_usr->lm_width;
-		cstate->ds_cfg[i].lm_height = ds_cfg_usr->lm_height;
-		memset(&scaler_v2, 0, sizeof(scaler_v2));
-
-		if (ds_cfg_usr->scaler_cfg) {
-			scaler_v2_usr =
-			(void __user *)((uintptr_t)ds_cfg_usr->scaler_cfg);
-
-			if (copy_from_user(&scaler_v2, scaler_v2_usr,
-					sizeof(scaler_v2))) {
-				SDE_ERROR("%s:scaler: copy from user failed\n",
-					sde_crtc->name);
-				return -EINVAL;
-			}
-		}
-
-		sde_set_scaler_v2(&cstate->ds_cfg[i].scl3_cfg, &scaler_v2);
-
-		SDE_DEBUG("en(%d)dir(%d)de(%d) src(%dx%d) dst(%dx%d)\n",
-			scaler_v2.enable, scaler_v2.dir_en, scaler_v2.de.enable,
-			scaler_v2.src_width[0], scaler_v2.src_height[0],
-			scaler_v2.dst_width, scaler_v2.dst_height);
-		SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base),
-			scaler_v2.enable, scaler_v2.dir_en, scaler_v2.de.enable,
-			scaler_v2.src_width[0], scaler_v2.src_height[0],
-			scaler_v2.dst_width, scaler_v2.dst_height);
-
-		SDE_DEBUG("ds cfg[%d]-ndx(%d) flags(%d) lm(%dx%d)\n",
-			i, ds_cfg_usr->index, ds_cfg_usr->flags,
-			ds_cfg_usr->lm_width, ds_cfg_usr->lm_height);
-		SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base), i, ds_cfg_usr->index,
-			ds_cfg_usr->flags, ds_cfg_usr->lm_width,
-			ds_cfg_usr->lm_height);
-	}
-
-	cstate->num_ds = count;
-	cstate->ds_dirty = true;
-	SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base), count, cstate->ds_dirty);
-
-	return 0;
-}
-
-static int _sde_crtc_check_dest_scaler_lm(struct drm_crtc *crtc,
-	struct drm_display_mode *mode, struct sde_hw_ds_cfg *cfg, u32 hdisplay,
-	u32 prev_lm_width, u32 prev_lm_height)
-{
-	if (cfg->lm_width > hdisplay || cfg->lm_height > mode->vdisplay
-		|| !cfg->lm_width || !cfg->lm_height) {
-		SDE_ERROR("crtc%d: lm size[%d,%d] display [%d,%d]\n",
-			crtc->base.id, cfg->lm_width, cfg->lm_height,
-			hdisplay, mode->vdisplay);
-		SDE_EVT32(DRMID(crtc), cfg->lm_width, cfg->lm_height,
-			hdisplay, mode->vdisplay, SDE_EVTLOG_ERROR);
-		return -E2BIG;
-	}
-
-	if (!prev_lm_width && !prev_lm_height) {
-		prev_lm_width = cfg->lm_width;
-		prev_lm_height = cfg->lm_height;
-	} else {
-		if (cfg->lm_width != prev_lm_width ||
-			cfg->lm_height != prev_lm_height) {
-			SDE_ERROR("crtc%d:lm left[%d,%d]right[%d %d]\n",
-				crtc->base.id, cfg->lm_width,
-				cfg->lm_height, prev_lm_width,
-				prev_lm_height);
-			SDE_EVT32(DRMID(crtc), cfg->lm_width,
-				cfg->lm_height, prev_lm_width,
-				prev_lm_height, SDE_EVTLOG_ERROR);
-			return -EINVAL;
-		}
-	}
-	return 0;
-}
-
-static int _sde_crtc_check_dest_scaler_cfg(struct drm_crtc *crtc,
-	struct sde_crtc *sde_crtc, struct drm_display_mode *mode,
-	struct sde_hw_ds *hw_ds, struct sde_hw_ds_cfg *cfg, u32 hdisplay,
-	u32 max_in_width, u32 max_out_width)
-{
-	if (cfg->flags & SDE_DRM_DESTSCALER_SCALE_UPDATE ||
-		cfg->flags & SDE_DRM_DESTSCALER_ENHANCER_UPDATE) {
-
-		/**
-		 * Scaler src and dst width shouldn't exceed the maximum
-		 * width limitation. Also, if there is no partial update
-		 * dst width and height must match display resolution.
-		 */
-		if (cfg->scl3_cfg.src_width[0] > max_in_width ||
-			cfg->scl3_cfg.dst_width > max_out_width ||
-			!cfg->scl3_cfg.src_width[0] ||
-			!cfg->scl3_cfg.dst_width ||
-			(!(cfg->flags & SDE_DRM_DESTSCALER_PU_ENABLE)
-			 && (cfg->scl3_cfg.dst_width != hdisplay ||
-			 cfg->scl3_cfg.dst_height != mode->vdisplay))) {
-			SDE_ERROR("crtc%d: ", crtc->base.id);
-			SDE_ERROR("src_w(%d) dst(%dx%d) display(%dx%d)",
-				cfg->scl3_cfg.src_width[0],
-				cfg->scl3_cfg.dst_width,
-				cfg->scl3_cfg.dst_height,
-				hdisplay, mode->vdisplay);
-			SDE_ERROR("num_mixers(%d) flags(%d) ds-%d:\n",
-				sde_crtc->num_mixers, cfg->flags,
-				hw_ds->idx - DS_0);
-			SDE_ERROR("scale_en = %d, DE_en =%d\n",
-				cfg->scl3_cfg.enable,
-				cfg->scl3_cfg.de.enable);
-
-			SDE_EVT32(DRMID(crtc), cfg->scl3_cfg.enable,
-				cfg->scl3_cfg.de.enable, cfg->flags,
-				max_in_width, max_out_width,
-				cfg->scl3_cfg.src_width[0],
-				cfg->scl3_cfg.dst_width,
-				cfg->scl3_cfg.dst_height, hdisplay,
-				mode->vdisplay, sde_crtc->num_mixers,
-				SDE_EVTLOG_ERROR);
-
-			cfg->flags &=
-				~SDE_DRM_DESTSCALER_SCALE_UPDATE;
-			cfg->flags &=
-				~SDE_DRM_DESTSCALER_ENHANCER_UPDATE;
-
-			return -EINVAL;
-		}
-	}
-	return 0;
-}
-
-static int _sde_crtc_check_dest_scaler_validate_ds(struct drm_crtc *crtc,
-	struct sde_crtc *sde_crtc, struct sde_crtc_state *cstate,
-	struct drm_display_mode *mode, struct sde_hw_ds *hw_ds,
-	struct sde_hw_ds_cfg *cfg, u32 hdisplay, u32 *num_ds_enable,
-	u32 prev_lm_width, u32 prev_lm_height, u32 max_in_width,
-	u32 max_out_width)
-{
-	int i, ret;
-	u32 lm_idx;
-
-	for (i = 0; i < cstate->num_ds; i++) {
-		cfg = &cstate->ds_cfg[i];
-		lm_idx = cfg->idx;
-
-		/**
-		 * Validate against topology
-		 * No of dest scalers should match the num of mixers
-		 * unless it is partial update left only/right only use case
-		 */
-		if (lm_idx >= sde_crtc->num_mixers || (i != lm_idx &&
-			!(cfg->flags & SDE_DRM_DESTSCALER_PU_ENABLE))) {
-			SDE_ERROR("crtc%d: ds_cfg id(%d):idx(%d), flags(%d)\n",
-				crtc->base.id, i, lm_idx, cfg->flags);
-			SDE_EVT32(DRMID(crtc), i, lm_idx, cfg->flags,
-				SDE_EVTLOG_ERROR);
-			return -EINVAL;
-		}
-
-		hw_ds = sde_crtc->mixers[lm_idx].hw_ds;
-
-		if (!max_in_width && !max_out_width) {
-			max_in_width = hw_ds->scl->top->maxinputwidth;
-			max_out_width = hw_ds->scl->top->maxoutputwidth;
-
-			if (cstate->num_ds == CRTC_DUAL_MIXERS)
-				max_in_width -= SDE_DS_OVERFETCH_SIZE;
-
-			SDE_DEBUG("max DS width [%d,%d] for num_ds = %d\n",
-				max_in_width, max_out_width, cstate->num_ds);
-		}
-
-		/* Check LM width and height */
-		ret = _sde_crtc_check_dest_scaler_lm(crtc, mode, cfg, hdisplay,
-				prev_lm_width, prev_lm_height);
-		if (ret)
-			return ret;
-
-		/* Check scaler data */
-		ret = _sde_crtc_check_dest_scaler_cfg(crtc, sde_crtc, mode,
-				hw_ds, cfg, hdisplay,
-				max_in_width, max_out_width);
-		if (ret)
-			return ret;
-
-		if (cfg->flags & SDE_DRM_DESTSCALER_ENABLE)
-			(*num_ds_enable)++;
-
-		SDE_DEBUG("ds[%d]: flags[0x%X]\n",
-			hw_ds->idx - DS_0, cfg->flags);
-		SDE_EVT32_VERBOSE(DRMID(crtc), hw_ds->idx - DS_0, cfg->flags);
-	}
-
-	return 0;
-}
-
-static void _sde_crtc_check_dest_scaler_data_disable(struct drm_crtc *crtc,
-	struct sde_crtc_state *cstate, struct sde_hw_ds_cfg *cfg,
-	u32 num_ds_enable)
-{
-	int i;
-
-	SDE_DEBUG("dest scaler status : %d -> %d\n",
-		cstate->num_ds_enabled, num_ds_enable);
-	SDE_EVT32_VERBOSE(DRMID(crtc), cstate->num_ds_enabled, num_ds_enable,
-			cstate->num_ds, cstate->ds_dirty);
-
-	if (cstate->num_ds_enabled != num_ds_enable) {
-		/* Disabling destination scaler */
-		if (!num_ds_enable) {
-			for (i = 0; i < cstate->num_ds; i++) {
-				cfg = &cstate->ds_cfg[i];
-				cfg->idx = i;
-				/* Update scaler settings in disable case */
-				cfg->flags = SDE_DRM_DESTSCALER_SCALE_UPDATE;
-				cfg->scl3_cfg.enable = 0;
-				cfg->scl3_cfg.de.enable = 0;
-			}
-		}
-		cstate->num_ds_enabled = num_ds_enable;
-		cstate->ds_dirty = true;
-	} else {
-		if (!cstate->num_ds_enabled)
-			cstate->ds_dirty = false;
-	}
-}
-
-/**
- * _sde_crtc_check_dest_scaler_data - validate the dest scaler data
- * @crtc  :  Pointer to drm crtc
- * @state :  Pointer to drm crtc state
- */
-static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
-				struct drm_crtc_state *state)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	struct drm_display_mode *mode;
-	struct sde_kms *kms;
-	struct sde_hw_ds *hw_ds;
-	struct sde_hw_ds_cfg *cfg;
-	u32 ret = 0;
-	u32 num_ds_enable = 0, hdisplay = 0;
-	u32 max_in_width = 0, max_out_width = 0;
-	u32 prev_lm_width = 0, prev_lm_height = 0;
-
-	if (!crtc || !state)
-		return -EINVAL;
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(state);
-	kms = _sde_crtc_get_kms(crtc);
-	mode = &state->adjusted_mode;
-
-	SDE_DEBUG("crtc%d\n", crtc->base.id);
-
-	if (!cstate->ds_dirty) {
-		SDE_DEBUG("dest scaler property not set, skip validation\n");
-		return 0;
-	}
-
-	if (!kms || !kms->catalog) {
-		SDE_ERROR("crtc%d: invalid parameters\n", crtc->base.id);
-		return -EINVAL;
-	}
-
-	if (!kms->catalog->mdp[0].has_dest_scaler) {
-		SDE_DEBUG("dest scaler feature not supported\n");
-		return 0;
-	}
-
-	if (!sde_crtc->num_mixers) {
-		SDE_DEBUG("mixers not allocated\n");
-		return 0;
-	}
-
-	ret = _sde_validate_hw_resources(sde_crtc);
-	if (ret)
-		goto err;
-
-	/**
-	 * No of dest scalers shouldn't exceed hw ds block count and
-	 * also, match the num of mixers unless it is partial update
-	 * left only/right only use case - currently PU + DS is not supported
-	 */
-	if (cstate->num_ds > kms->catalog->ds_count ||
-		((cstate->num_ds != sde_crtc->num_mixers) &&
-		!(cstate->ds_cfg[0].flags & SDE_DRM_DESTSCALER_PU_ENABLE))) {
-		SDE_ERROR("crtc%d: num_ds(%d), hw_ds_cnt(%d) flags(%d)\n",
-			crtc->base.id, cstate->num_ds, kms->catalog->ds_count,
-			cstate->ds_cfg[0].flags);
-		ret = -EINVAL;
-		goto err;
-	}
-
-	/**
-	 * Check if DS needs to be enabled or disabled
-	 * In case of enable, validate the data
-	 */
-	if (!(cstate->ds_cfg[0].flags & SDE_DRM_DESTSCALER_ENABLE)) {
-		SDE_DEBUG("disable dest scaler, num(%d) flags(%d)\n",
-			cstate->num_ds, cstate->ds_cfg[0].flags);
-		goto disable;
-	}
-
-	/* Display resolution */
-	hdisplay = mode->hdisplay/sde_crtc->num_mixers;
-
-	/* Validate the DS data */
-	ret = _sde_crtc_check_dest_scaler_validate_ds(crtc, sde_crtc, cstate,
-			mode, hw_ds, cfg, hdisplay, &num_ds_enable,
-			prev_lm_width, prev_lm_height,
-			max_in_width, max_out_width);
-	if (ret)
-		goto err;
-
-disable:
-	_sde_crtc_check_dest_scaler_data_disable(crtc, cstate, cfg,
-			num_ds_enable);
-	return 0;
-
-err:
-	cstate->ds_dirty = false;
-	return ret;
-}
-
-/**
- * _sde_crtc_wait_for_fences - wait for incoming framebuffer sync fences
- * @crtc: Pointer to CRTC object
- */
-static void _sde_crtc_wait_for_fences(struct drm_crtc *crtc)
-{
-	struct drm_plane *plane = NULL;
-	uint32_t wait_ms = 1;
-	ktime_t kt_end, kt_wait;
-	int rc = 0;
-
-	SDE_DEBUG("\n");
-
-	if (!crtc || !crtc->state) {
-		SDE_ERROR("invalid crtc/state %pK\n", crtc);
-		return;
-	}
-
-	/* use monotonic timer to limit total fence wait time */
-	kt_end = ktime_add_ns(ktime_get(),
-		to_sde_crtc_state(crtc->state)->input_fence_timeout_ns);
-
-	/*
-	 * Wait for fences sequentially, as all of them need to be signalled
-	 * before we can proceed.
-	 *
-	 * Limit total wait time to INPUT_FENCE_TIMEOUT, but still call
-	 * sde_plane_wait_input_fence with wait_ms == 0 after the timeout so
-	 * that each plane can check its fence status and react appropriately
-	 * if its fence has timed out. Call input fence wait multiple times if
-	 * fence wait is interrupted due to interrupt call.
-	 */
-	SDE_ATRACE_BEGIN("plane_wait_input_fence");
-	drm_atomic_crtc_for_each_plane(plane, crtc) {
-		do {
-			kt_wait = ktime_sub(kt_end, ktime_get());
-			if (ktime_compare(kt_wait, ktime_set(0, 0)) >= 0)
-				wait_ms = ktime_to_ms(kt_wait);
-			else
-				wait_ms = 0;
-
-			rc = sde_plane_wait_input_fence(plane, wait_ms);
-		} while (wait_ms && rc == -ERESTARTSYS);
-	}
-	SDE_ATRACE_END("plane_wait_input_fence");
-}
-
-static void _sde_crtc_setup_mixer_for_encoder(
-		struct drm_crtc *crtc,
-		struct drm_encoder *enc)
-{
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-	struct sde_kms *sde_kms = _sde_crtc_get_kms(crtc);
-	struct sde_rm *rm = &sde_kms->rm;
-	struct sde_crtc_mixer *mixer;
-	struct sde_hw_ctl *last_valid_ctl = NULL;
-	int i;
-	struct sde_rm_hw_iter lm_iter, ctl_iter, dspp_iter, ds_iter;
-
-	sde_rm_init_hw_iter(&lm_iter, enc->base.id, SDE_HW_BLK_LM);
-	sde_rm_init_hw_iter(&ctl_iter, enc->base.id, SDE_HW_BLK_CTL);
-	sde_rm_init_hw_iter(&dspp_iter, enc->base.id, SDE_HW_BLK_DSPP);
-	sde_rm_init_hw_iter(&ds_iter, enc->base.id, SDE_HW_BLK_DS);
-
-	/* Set up all the mixers and ctls reserved by this encoder */
-	for (i = sde_crtc->num_mixers; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
-		mixer = &sde_crtc->mixers[i];
-
-		if (!sde_rm_get_hw(rm, &lm_iter))
-			break;
-		mixer->hw_lm = (struct sde_hw_mixer *)lm_iter.hw;
-
-		/* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
-		if (!sde_rm_get_hw(rm, &ctl_iter)) {
-			SDE_DEBUG("no ctl assigned to lm %d, using previous\n",
-					mixer->hw_lm->idx - LM_0);
-			mixer->hw_ctl = last_valid_ctl;
-		} else {
-			mixer->hw_ctl = (struct sde_hw_ctl *)ctl_iter.hw;
-			last_valid_ctl = mixer->hw_ctl;
-			sde_crtc->num_ctls++;
-		}
-
-		/* Shouldn't happen, mixers are always >= ctls */
-		if (!mixer->hw_ctl) {
-			SDE_ERROR("no valid ctls found for lm %d\n",
-					mixer->hw_lm->idx - LM_0);
-			return;
-		}
-
-		/* Dspp may be null */
-		(void) sde_rm_get_hw(rm, &dspp_iter);
-		mixer->hw_dspp = (struct sde_hw_dspp *)dspp_iter.hw;
-
-		/* DS may be null */
-		(void) sde_rm_get_hw(rm, &ds_iter);
-		mixer->hw_ds = (struct sde_hw_ds *)ds_iter.hw;
-
-		mixer->encoder = enc;
-
-		sde_crtc->num_mixers++;
-		SDE_DEBUG("setup mixer %d: lm %d\n",
-				i, mixer->hw_lm->idx - LM_0);
-		SDE_DEBUG("setup mixer %d: ctl %d\n",
-				i, mixer->hw_ctl->idx - CTL_0);
-		if (mixer->hw_ds)
-			SDE_DEBUG("setup mixer %d: ds %d\n",
-				i, mixer->hw_ds->idx - DS_0);
-	}
-}
-
-static void _sde_crtc_setup_mixers(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-	struct drm_encoder *enc;
-
-	sde_crtc->num_ctls = 0;
-	sde_crtc->num_mixers = 0;
-	sde_crtc->mixers_swapped = false;
-	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
-
-	mutex_lock(&sde_crtc->crtc_lock);
-	/* Check for mixers on all encoders attached to this crtc */
-	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
-		if (enc->crtc != crtc)
-			continue;
-
-		/* avoid overwriting mixers info from a copy encoder */
-		if (sde_encoder_in_clone_mode(enc))
-			continue;
-
-		_sde_crtc_setup_mixer_for_encoder(crtc, enc);
-	}
-
-	mutex_unlock(&sde_crtc->crtc_lock);
-	_sde_crtc_check_dest_scaler_data(crtc, crtc->state);
-}
-
-static void _sde_crtc_setup_is_ppsplit(struct drm_crtc_state *state)
-{
-	int i;
-	struct sde_crtc_state *cstate;
-
-	cstate = to_sde_crtc_state(state);
-
-	cstate->is_ppsplit = false;
-	for (i = 0; i < cstate->num_connectors; i++) {
-		struct drm_connector *conn = cstate->connectors[i];
-
-		if (sde_connector_get_topology_name(conn) ==
-				SDE_RM_TOPOLOGY_PPSPLIT)
-			cstate->is_ppsplit = true;
-	}
-}
-
-static void _sde_crtc_setup_lm_bounds(struct drm_crtc *crtc,
-		struct drm_crtc_state *state)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	struct drm_display_mode *adj_mode;
-	u32 crtc_split_width;
-	int i;
-
-	if (!crtc || !state) {
-		SDE_ERROR("invalid args\n");
-		return;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(state);
-
-	adj_mode = &state->adjusted_mode;
-	crtc_split_width = sde_crtc_get_mixer_width(sde_crtc, cstate, adj_mode);
-
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
-		cstate->lm_bounds[i].x = crtc_split_width * i;
-		cstate->lm_bounds[i].y = 0;
-		cstate->lm_bounds[i].w = crtc_split_width;
-		cstate->lm_bounds[i].h =
-			sde_crtc_get_mixer_height(sde_crtc, cstate, adj_mode);
-		memcpy(&cstate->lm_roi[i], &cstate->lm_bounds[i],
-				sizeof(cstate->lm_roi[i]));
-		SDE_EVT32_VERBOSE(DRMID(crtc), i,
-				cstate->lm_bounds[i].x, cstate->lm_bounds[i].y,
-				cstate->lm_bounds[i].w, cstate->lm_bounds[i].h);
-		SDE_DEBUG("%s: lm%d bnd&roi (%d,%d,%d,%d)\n", sde_crtc->name, i,
-				cstate->lm_roi[i].x, cstate->lm_roi[i].y,
-				cstate->lm_roi[i].w, cstate->lm_roi[i].h);
-	}
-
-	drm_mode_debug_printmodeline(adj_mode);
-}
-
-static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_state)
-{
-	struct sde_crtc *sde_crtc;
-	struct drm_encoder *encoder;
-	struct drm_device *dev;
-	struct sde_kms *sde_kms;
-	struct sde_splash_display *splash_display;
-	bool cont_splash_enabled = false;
-	size_t i;
-
-	if (!crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	if (!crtc->state->enable) {
-		SDE_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
-				crtc->base.id, crtc->state->enable);
-		return;
-	}
-
-	if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
-		SDE_ERROR("power resource is not enabled\n");
-		return;
-	}
-
-	sde_kms = _sde_crtc_get_kms(crtc);
-	if (!sde_kms)
-		return;
-
-	SDE_ATRACE_BEGIN("crtc_atomic_begin");
-	SDE_DEBUG("crtc%d\n", crtc->base.id);
-
-	sde_crtc = to_sde_crtc(crtc);
-	dev = crtc->dev;
-
-	if (!sde_crtc->num_mixers) {
-		_sde_crtc_setup_mixers(crtc);
-		_sde_crtc_setup_is_ppsplit(crtc->state);
-		_sde_crtc_setup_lm_bounds(crtc, crtc->state);
-	}
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		if (encoder->crtc != crtc)
-			continue;
-
-		/* encoder will trigger pending mask now */
-		sde_encoder_trigger_kickoff_pending(encoder);
-	}
-
-	/*
-	 * If no mixers have been allocated in sde_crtc_atomic_check(),
-	 * it means we are trying to flush a CRTC whose state is disabled:
-	 * nothing else needs to be done.
-	 */
-	if (unlikely(!sde_crtc->num_mixers))
-		goto end;
-
-	_sde_crtc_blend_setup(crtc, old_state, true);
-	_sde_crtc_dest_scaler_setup(crtc);
-
-	/* cancel the idle notify delayed work */
-	if (sde_encoder_check_mode(sde_crtc->mixers[0].encoder,
-					MSM_DISPLAY_CAP_VID_MODE) &&
-		kthread_cancel_delayed_work_sync(&sde_crtc->idle_notify_work))
-		SDE_DEBUG("idle notify work cancelled\n");
-
-	/*
-	 * Since CP properties use AXI buffer to program the
-	 * HW, check if context bank is in attached state,
-	 * apply color processing properties only if
-	 * smmu state is attached,
-	 */
-	for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
-		splash_display = &sde_kms->splash_data.splash_display[i];
-		if (splash_display->cont_splash_enabled &&
-			splash_display->encoder &&
-			crtc == splash_display->encoder->crtc)
-			cont_splash_enabled = true;
-	}
-
-	if (sde_kms_is_cp_operation_allowed(sde_kms) &&
-			(cont_splash_enabled || sde_crtc->enabled))
-		sde_cp_crtc_apply_properties(crtc);
-
-	/*
-	 * PP_DONE irq is only used by command mode for now.
-	 * It is better to request pending before FLUSH and START trigger
-	 * to make sure no pp_done irq missed.
-	 * This is safe because no pp_done will happen before SW trigger
-	 * in command mode.
-	 */
-
-end:
-	SDE_ATRACE_END("crtc_atomic_begin");
-}
-
-static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_crtc_state)
-{
-	struct drm_encoder *encoder;
-	struct sde_crtc *sde_crtc;
-	struct drm_device *dev;
-	struct drm_plane *plane;
-	struct msm_drm_private *priv;
-	struct msm_drm_thread *event_thread;
-	struct sde_crtc_state *cstate;
-	struct sde_kms *sde_kms;
-	int idle_time = 0;
-
-	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	if (!crtc->state->enable) {
-		SDE_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
-				crtc->base.id, crtc->state->enable);
-		return;
-	}
-
-	if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
-		SDE_ERROR("power resource is not enabled\n");
-		return;
-	}
-
-	sde_kms = _sde_crtc_get_kms(crtc);
-	if (!sde_kms) {
-		SDE_ERROR("invalid kms\n");
-		return;
-	}
-
-	SDE_DEBUG("crtc%d\n", crtc->base.id);
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(crtc->state);
-	dev = crtc->dev;
-	priv = dev->dev_private;
-
-	if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
-		SDE_ERROR("invalid crtc index[%d]\n", crtc->index);
-		return;
-	}
-
-	event_thread = &priv->event_thread[crtc->index];
-	idle_time = sde_crtc_get_property(cstate, CRTC_PROP_IDLE_TIMEOUT);
-
-	/*
-	 * If no mixers has been allocated in sde_crtc_atomic_check(),
-	 * it means we are trying to flush a CRTC whose state is disabled:
-	 * nothing else needs to be done.
-	 */
-	if (unlikely(!sde_crtc->num_mixers))
-		return;
-
-	SDE_ATRACE_BEGIN("sde_crtc_atomic_flush");
-
-	/*
-	 * For planes without commit update, drm framework will not add
-	 * those planes to current state since hardware update is not
-	 * required. However, if those planes were power collapsed since
-	 * last commit cycle, driver has to restore the hardware state
-	 * of those planes explicitly here prior to plane flush.
-	 * Also use this iteration to see if any plane requires cache,
-	 * so during the perf update driver can activate/deactivate
-	 * the cache accordingly.
-	 */
-	sde_crtc->new_perf.llcc_active = false;
-	drm_atomic_crtc_for_each_plane(plane, crtc) {
-		sde_plane_restore(plane);
-
-		if (sde_plane_is_cache_required(plane))
-			sde_crtc->new_perf.llcc_active = true;
-	}
-
-	/* wait for acquire fences before anything else is done */
-	_sde_crtc_wait_for_fences(crtc);
-
-	/* schedule the idle notify delayed work */
-	if (idle_time && sde_encoder_check_mode(sde_crtc->mixers[0].encoder,
-						MSM_DISPLAY_CAP_VID_MODE)) {
-		kthread_queue_delayed_work(&event_thread->worker,
-					&sde_crtc->idle_notify_work,
-					msecs_to_jiffies(idle_time));
-		SDE_DEBUG("schedule idle notify work in %dms\n", idle_time);
-	}
-
-	if (!cstate->rsc_update) {
-		drm_for_each_encoder_mask(encoder, dev,
-				crtc->state->encoder_mask) {
-			cstate->rsc_client =
-				sde_encoder_get_rsc_client(encoder);
-		}
-		cstate->rsc_update = true;
-	}
-
-	/* update performance setting before crtc kickoff */
-	sde_core_perf_crtc_update(crtc, 1, false);
-
-	/*
-	 * Final plane updates: Give each plane a chance to complete all
-	 *                      required writes/flushing before crtc's "flush
-	 *                      everything" call below.
-	 */
-	drm_atomic_crtc_for_each_plane(plane, crtc) {
-		if (sde_kms->smmu_state.transition_error)
-			sde_plane_set_error(plane, true);
-		sde_plane_flush(plane);
-	}
-
-	/* Kickoff will be scheduled by outer layer */
-	SDE_ATRACE_END("sde_crtc_atomic_flush");
-}
-
-/**
- * sde_crtc_destroy_state - state destroy hook
- * @crtc: drm CRTC
- * @state: CRTC state object to release
- */
-static void sde_crtc_destroy_state(struct drm_crtc *crtc,
-		struct drm_crtc_state *state)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	struct drm_encoder *enc;
-	struct sde_kms *sde_kms;
-
-	if (!crtc || !state) {
-		SDE_ERROR("invalid argument(s)\n");
-		return;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(state);
-	enc = _sde_crtc_get_encoder(crtc);
-	sde_kms = _sde_crtc_get_kms(crtc);
-
-	SDE_DEBUG("crtc%d\n", crtc->base.id);
-
-	if (sde_kms && enc && !sde_encoder_in_cont_splash(enc))
-		sde_rm_release(&sde_kms->rm, enc, true);
-
-	__drm_atomic_helper_crtc_destroy_state(state);
-
-	/* destroy value helper */
-	msm_property_destroy_state(&sde_crtc->property_info, cstate,
-			&cstate->property_state);
-}
-
-static int _sde_crtc_flush_event_thread(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc;
-	int i;
-
-	if (!crtc) {
-		SDE_ERROR("invalid argument\n");
-		return -EINVAL;
-	}
-	sde_crtc = to_sde_crtc(crtc);
-
-	if (!atomic_read(&sde_crtc->frame_pending)) {
-		SDE_DEBUG("no frames pending\n");
-		return 0;
-	}
-
-	SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_ENTRY);
-
-	/*
-	 * flush all the event thread work to make sure all the
-	 * FRAME_EVENTS from encoder are propagated to crtc
-	 */
-	for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) {
-		if (list_empty(&sde_crtc->frame_events[i].list))
-			kthread_flush_work(&sde_crtc->frame_events[i].work);
-	}
-
-	SDE_EVT32_VERBOSE(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
-
-	return 0;
-}
-
-/**
- * _sde_crtc_remove_pipe_flush - remove staged pipes from flush mask
- * @crtc: Pointer to crtc structure
- */
-static void _sde_crtc_remove_pipe_flush(struct drm_crtc *crtc)
-{
-	struct drm_plane *plane;
-	struct drm_plane_state *state;
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_mixer *mixer;
-	struct sde_hw_ctl *ctl;
-
-	if (!crtc)
-		return;
-
-	sde_crtc = to_sde_crtc(crtc);
-	mixer = sde_crtc->mixers;
-	if (!mixer)
-		return;
-	ctl = mixer->hw_ctl;
-
-	drm_atomic_crtc_for_each_plane(plane, crtc) {
-		state = plane->state;
-		if (!state)
-			continue;
-
-		/* clear plane flush bitmask */
-		sde_plane_ctl_flush(plane, ctl, false);
-	}
-}
-
-/**
- * sde_crtc_reset_hw - attempt hardware reset on errors
- * @crtc: Pointer to DRM crtc instance
- * @old_state: Pointer to crtc state for previous commit
- * @recovery_events: Whether or not recovery events are enabled
- * Returns: Zero if current commit should still be attempted
- */
-int sde_crtc_reset_hw(struct drm_crtc *crtc, struct drm_crtc_state *old_state,
-	bool recovery_events)
-{
-	struct drm_plane *plane_halt[MAX_PLANES];
-	struct drm_plane *plane;
-	struct drm_encoder *encoder;
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	struct sde_hw_ctl *ctl;
-	signed int i, plane_count;
-	int rc;
-
-	if (!crtc || !crtc->dev || !old_state || !crtc->state)
-		return -EINVAL;
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(crtc->state);
-
-	SDE_EVT32(DRMID(crtc), recovery_events, SDE_EVTLOG_FUNC_ENTRY);
-
-	/* optionally generate a panic instead of performing a h/w reset */
-	SDE_DBG_CTRL("stop_ftrace", "reset_hw_panic");
-
-	for (i = 0; i < sde_crtc->num_ctls; ++i) {
-		ctl = sde_crtc->mixers[i].hw_ctl;
-		if (!ctl || !ctl->ops.reset)
-			continue;
-
-		rc = ctl->ops.reset(ctl);
-		if (rc) {
-			SDE_DEBUG("crtc%d: ctl%d reset failure\n",
-					crtc->base.id, ctl->idx - CTL_0);
-			SDE_EVT32(DRMID(crtc), ctl->idx - CTL_0,
-					SDE_EVTLOG_ERROR);
-			break;
-		}
-	}
-
-	/* Early out if simple ctl reset succeeded */
-	if (i == sde_crtc->num_ctls)
-		return 0;
-
-	SDE_DEBUG("crtc%d: issuing hard reset\n", DRMID(crtc));
-
-	/* force all components in the system into reset at the same time */
-	for (i = 0; i < sde_crtc->num_ctls; ++i) {
-		ctl = sde_crtc->mixers[i].hw_ctl;
-		if (!ctl || !ctl->ops.hard_reset)
-			continue;
-
-		SDE_EVT32(DRMID(crtc), ctl->idx - CTL_0);
-		ctl->ops.hard_reset(ctl, true);
-	}
-
-	plane_count = 0;
-	drm_atomic_crtc_state_for_each_plane(plane, old_state) {
-		if (plane_count >= ARRAY_SIZE(plane_halt))
-			break;
-
-		plane_halt[plane_count++] = plane;
-		sde_plane_halt_requests(plane, true);
-		sde_plane_set_revalidate(plane, true);
-	}
-
-	/* provide safe "border color only" commit configuration for later */
-	_sde_crtc_remove_pipe_flush(crtc);
-	_sde_crtc_blend_setup(crtc, old_state, false);
-
-	/* take h/w components out of reset */
-	for (i = plane_count - 1; i >= 0; --i)
-		sde_plane_halt_requests(plane_halt[i], false);
-
-	/* attempt to poll for start of frame cycle before reset release */
-	list_for_each_entry(encoder,
-			&crtc->dev->mode_config.encoder_list, head) {
-		if (encoder->crtc != crtc)
-			continue;
-		if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
-			sde_encoder_poll_line_counts(encoder);
-	}
-
-	for (i = 0; i < sde_crtc->num_ctls; ++i) {
-		ctl = sde_crtc->mixers[i].hw_ctl;
-		if (!ctl || !ctl->ops.hard_reset)
-			continue;
-
-		ctl->ops.hard_reset(ctl, false);
-	}
-
-	list_for_each_entry(encoder,
-			&crtc->dev->mode_config.encoder_list, head) {
-		if (encoder->crtc != crtc)
-			continue;
-
-		if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
-			sde_encoder_kickoff(encoder, false);
-	}
-
-	/* panic the device if VBIF is not in good state */
-	return !recovery_events ? 0 : -EAGAIN;
-}
-
-void sde_crtc_commit_kickoff(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_state)
-{
-	struct drm_encoder *encoder;
-	struct drm_device *dev;
-	struct sde_crtc *sde_crtc;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	struct sde_crtc_state *cstate;
-	bool is_error = false, reset_req;
-	unsigned long flags;
-	enum sde_crtc_idle_pc_state idle_pc_state;
-	struct sde_encoder_kickoff_params params = { 0 };
-
-	if (!crtc) {
-		SDE_ERROR("invalid argument\n");
-		return;
-	}
-	dev = crtc->dev;
-	sde_crtc = to_sde_crtc(crtc);
-	sde_kms = _sde_crtc_get_kms(crtc);
-	reset_req = false;
-
-	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
-		SDE_ERROR("invalid argument\n");
-		return;
-	}
-
-	priv = sde_kms->dev->dev_private;
-	cstate = to_sde_crtc_state(crtc->state);
-
-	/*
-	 * If no mixers has been allocated in sde_crtc_atomic_check(),
-	 * it means we are trying to start a CRTC whose state is disabled:
-	 * nothing else needs to be done.
-	 */
-	if (unlikely(!sde_crtc->num_mixers))
-		return;
-
-	SDE_ATRACE_BEGIN("crtc_commit");
-
-	idle_pc_state = sde_crtc_get_property(cstate, CRTC_PROP_IDLE_PC_STATE);
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		if (encoder->crtc != crtc)
-			continue;
-
-		/*
-		 * Encoder will flush/start now, unless it has a tx pending.
-		 * If so, it may delay and flush at an irq event (e.g. ppdone)
-		 */
-		params.affected_displays = _sde_crtc_get_displays_affected(crtc,
-				crtc->state);
-		if (sde_encoder_prepare_for_kickoff(encoder, &params))
-			reset_req = true;
-
-		if (idle_pc_state != IDLE_PC_NONE)
-			sde_encoder_control_idle_pc(encoder,
-			    (idle_pc_state == IDLE_PC_ENABLE) ? true : false);
-	}
-
-	/*
-	 * Optionally attempt h/w recovery if any errors were detected while
-	 * preparing for the kickoff
-	 */
-	if (reset_req) {
-		sde_crtc->frame_trigger_mode = params.frame_trigger_mode;
-		if (sde_crtc->frame_trigger_mode
-					!= FRAME_DONE_WAIT_POSTED_START &&
-		    sde_crtc_reset_hw(crtc, old_state,
-						params.recovery_events_enabled))
-			is_error = true;
-	}
-
-	sde_crtc_calc_fps(sde_crtc);
-	SDE_ATRACE_BEGIN("flush_event_thread");
-	_sde_crtc_flush_event_thread(crtc);
-	SDE_ATRACE_END("flush_event_thread");
-	sde_crtc->plane_mask_old = crtc->state->plane_mask;
-
-	if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
-		/* acquire bandwidth and other resources */
-		SDE_DEBUG("crtc%d first commit\n", crtc->base.id);
-		SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_CASE1);
-	} else {
-		SDE_DEBUG("crtc%d commit\n", crtc->base.id);
-		SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_CASE2);
-	}
-	sde_crtc->play_count++;
-
-	sde_vbif_clear_errors(sde_kms);
-
-	if (is_error) {
-		_sde_crtc_remove_pipe_flush(crtc);
-		_sde_crtc_blend_setup(crtc, old_state, false);
-	}
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		if (encoder->crtc != crtc)
-			continue;
-
-		sde_encoder_kickoff(encoder, false);
-	}
-
-	/* store the event after frame trigger */
-	if (sde_crtc->event) {
-		WARN_ON(sde_crtc->event);
-	} else {
-		spin_lock_irqsave(&dev->event_lock, flags);
-		sde_crtc->event = crtc->state->event;
-		spin_unlock_irqrestore(&dev->event_lock, flags);
-	}
-
-	SDE_ATRACE_END("crtc_commit");
-}
-
-/**
- * _sde_crtc_vblank_enable_no_lock - update power resource and vblank request
- * @sde_crtc: Pointer to sde crtc structure
- * @enable: Whether to enable/disable vblanks
- *
- * @Return: error code
- */
-static int _sde_crtc_vblank_enable_no_lock(
-		struct sde_crtc *sde_crtc, bool enable)
-{
-	struct drm_device *dev;
-	struct drm_crtc *crtc;
-	struct drm_encoder *enc;
-
-	if (!sde_crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return -EINVAL;
-	}
-
-	crtc = &sde_crtc->base;
-	dev = crtc->dev;
-
-	if (enable) {
-		int ret;
-
-		/* drop lock since power crtc cb may try to re-acquire lock */
-		mutex_unlock(&sde_crtc->crtc_lock);
-		ret = _sde_crtc_power_enable(sde_crtc, true);
-		mutex_lock(&sde_crtc->crtc_lock);
-		if (ret)
-			return ret;
-
-		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
-			if (enc->crtc != crtc)
-				continue;
-
-			SDE_EVT32(DRMID(&sde_crtc->base), DRMID(enc), enable,
-					sde_crtc->enabled,
-					sde_crtc->suspend,
-					sde_crtc->vblank_requested);
-
-			sde_encoder_register_vblank_callback(enc,
-					sde_crtc_vblank_cb, (void *)crtc);
-		}
-	} else {
-		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
-			if (enc->crtc != crtc)
-				continue;
-
-			SDE_EVT32(DRMID(&sde_crtc->base), DRMID(enc), enable,
-					sde_crtc->enabled,
-					sde_crtc->suspend,
-					sde_crtc->vblank_requested);
-
-			sde_encoder_register_vblank_callback(enc, NULL, NULL);
-		}
-
-		/* drop lock since power crtc cb may try to re-acquire lock */
-		mutex_unlock(&sde_crtc->crtc_lock);
-		_sde_crtc_power_enable(sde_crtc, false);
-		mutex_lock(&sde_crtc->crtc_lock);
-	}
-
-	return 0;
-}
-
-/**
- * _sde_crtc_set_suspend - notify crtc of suspend enable/disable
- * @crtc: Pointer to drm crtc object
- * @enable: true to enable suspend, false to indicate resume
- */
-static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
-{
-	struct sde_crtc *sde_crtc;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	int ret = 0;
-
-	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-	sde_crtc = to_sde_crtc(crtc);
-	priv = crtc->dev->dev_private;
-
-	if (!priv->kms) {
-		SDE_ERROR("invalid crtc kms\n");
-		return;
-	}
-	sde_kms = to_sde_kms(priv->kms);
-
-	SDE_DEBUG("crtc%d suspend = %d\n", crtc->base.id, enable);
-	SDE_EVT32_VERBOSE(DRMID(crtc), enable);
-
-	mutex_lock(&sde_crtc->crtc_lock);
-
-	/*
-	 * If the vblank is enabled, release a power reference on suspend
-	 * and take it back during resume (if it is still enabled).
-	 */
-	SDE_EVT32(DRMID(&sde_crtc->base), enable, sde_crtc->enabled,
-			sde_crtc->suspend, sde_crtc->vblank_requested);
-	if (sde_crtc->suspend == enable)
-		SDE_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
-				crtc->base.id, enable);
-	else if (sde_crtc->enabled && sde_crtc->vblank_requested) {
-		ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, !enable);
-		if (ret)
-			SDE_ERROR("%s vblank enable failed: %d\n",
-					sde_crtc->name, ret);
-	}
-
-	sde_crtc->suspend = enable;
-	mutex_unlock(&sde_crtc->crtc_lock);
-}
-
-/**
- * sde_crtc_duplicate_state - state duplicate hook
- * @crtc: Pointer to drm crtc structure
- * @Returns: Pointer to new drm_crtc_state structure
- */
-static struct drm_crtc_state *sde_crtc_duplicate_state(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate, *old_cstate;
-
-	if (!crtc || !crtc->state) {
-		SDE_ERROR("invalid argument(s)\n");
-		return NULL;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	old_cstate = to_sde_crtc_state(crtc->state);
-	cstate = msm_property_alloc_state(&sde_crtc->property_info);
-	if (!cstate) {
-		SDE_ERROR("failed to allocate state\n");
-		return NULL;
-	}
-
-	/* duplicate value helper */
-	msm_property_duplicate_state(&sde_crtc->property_info,
-			old_cstate, cstate,
-			&cstate->property_state, cstate->property_values);
-
-	/* clear destination scaler dirty bit */
-	cstate->ds_dirty = false;
-
-	/* duplicate base helper */
-	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
-
-	return &cstate->base;
-}
-
-/**
- * sde_crtc_reset - reset hook for CRTCs
- * Resets the atomic state for @crtc by freeing the state pointer (which might
- * be NULL, e.g. at driver load time) and allocating a new empty state object.
- * @crtc: Pointer to drm crtc structure
- */
-static void sde_crtc_reset(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-
-	if (!crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	/* revert suspend actions, if necessary */
-	if (sde_kms_is_suspend_state(crtc->dev)) {
-		_sde_crtc_set_suspend(crtc, false);
-
-		if (!sde_crtc_is_reset_required(crtc)) {
-			SDE_DEBUG("avoiding reset for crtc:%d\n",
-					crtc->base.id);
-			return;
-		}
-	}
-
-	/* remove previous state, if present */
-	if (crtc->state) {
-		sde_crtc_destroy_state(crtc, crtc->state);
-		crtc->state = 0;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = msm_property_alloc_state(&sde_crtc->property_info);
-	if (!cstate) {
-		SDE_ERROR("failed to allocate state\n");
-		return;
-	}
-
-	/* reset value helper */
-	msm_property_reset_state(&sde_crtc->property_info, cstate,
-			&cstate->property_state,
-			cstate->property_values);
-
-	_sde_crtc_set_input_fence_timeout(cstate);
-
-	cstate->base.crtc = crtc;
-	crtc->state = &cstate->base;
-}
-
-static void sde_crtc_handle_power_event(u32 event_type, void *arg)
-{
-	struct drm_crtc *crtc = arg;
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	struct drm_plane *plane;
-	struct drm_encoder *encoder;
-	u32 power_on;
-	unsigned long flags;
-	struct sde_crtc_irq_info *node = NULL;
-	int ret = 0;
-	struct drm_event event;
-	struct msm_drm_private *priv;
-
-	if (!crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(crtc->state);
-	priv = crtc->dev->dev_private;
-
-	mutex_lock(&sde_crtc->crtc_lock);
-
-	SDE_EVT32(DRMID(crtc), event_type);
-
-	switch (event_type) {
-	case SDE_POWER_EVENT_POST_ENABLE:
-		/* disable mdp LUT memory retention */
-		ret = sde_power_clk_set_flags(&priv->phandle, "lut_clk",
-					CLKFLAG_NORETAIN_MEM);
-		if (ret)
-			SDE_ERROR("disable LUT memory retention err %d\n", ret);
-
-		/* restore encoder; crtc will be programmed during commit */
-		drm_for_each_encoder_mask(encoder, crtc->dev,
-				crtc->state->encoder_mask) {
-			sde_encoder_virt_restore(encoder);
-		}
-
-		/* restore UIDLE */
-		sde_core_perf_crtc_update_uidle(crtc, true);
-
-		spin_lock_irqsave(&sde_crtc->spin_lock, flags);
-		list_for_each_entry(node, &sde_crtc->user_event_list, list) {
-			ret = 0;
-			if (node->func)
-				ret = node->func(crtc, true, &node->irq);
-			if (ret)
-				SDE_ERROR("%s failed to enable event %x\n",
-						sde_crtc->name, node->event);
-		}
-		spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
-
-		sde_cp_crtc_post_ipc(crtc);
-		break;
-	case SDE_POWER_EVENT_PRE_DISABLE:
-		/* enable mdp LUT memory retention */
-		ret = sde_power_clk_set_flags(&priv->phandle, "lut_clk",
-					CLKFLAG_RETAIN_MEM);
-		if (ret)
-			SDE_ERROR("enable LUT memory retention err %d\n", ret);
-
-		drm_for_each_encoder_mask(encoder, crtc->dev,
-				crtc->state->encoder_mask) {
-			/*
-			 * disable the vsync source after updating the
-			 * rsc state. rsc state update might have vsync wait
-			 * and vsync source must be disabled after it.
-			 * It will avoid generating any vsync from this point
-			 * till mode-2 entry. It is SW workaround for HW
-			 * limitation and should not be removed without
-			 * checking the updated design.
-			 */
-			sde_encoder_control_te(encoder, false);
-		}
-
-		spin_lock_irqsave(&sde_crtc->spin_lock, flags);
-		node = NULL;
-		list_for_each_entry(node, &sde_crtc->user_event_list, list) {
-			ret = 0;
-			if (node->func)
-				ret = node->func(crtc, false, &node->irq);
-			if (ret)
-				SDE_ERROR("%s failed to disable event %x\n",
-						sde_crtc->name, node->event);
-		}
-		spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
-
-		sde_cp_crtc_pre_ipc(crtc);
-		break;
-	case SDE_POWER_EVENT_POST_DISABLE:
-		/*
-		 * set revalidate flag in planes, so it will be re-programmed
-		 * in the next frame update
-		 */
-		drm_atomic_crtc_for_each_plane(plane, crtc)
-			sde_plane_set_revalidate(plane, true);
-
-		sde_cp_crtc_suspend(crtc);
-
-		/**
-		 * destination scaler if enabled should be reconfigured
-		 * in the next frame update
-		 */
-		if (cstate->num_ds_enabled)
-			sde_crtc->ds_reconfig = true;
-
-		event.type = DRM_EVENT_SDE_POWER;
-		event.length = sizeof(power_on);
-		power_on = 0;
-		msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
-				(u8 *)&power_on);
-		break;
-	default:
-		SDE_DEBUG("event:%d not handled\n", event_type);
-		break;
-	}
-
-	mutex_unlock(&sde_crtc->crtc_lock);
-}
-
-static void sde_crtc_disable(struct drm_crtc *crtc)
-{
-	struct sde_kms *sde_kms;
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	struct drm_encoder *encoder;
-	struct msm_drm_private *priv;
-	unsigned long flags;
-	struct sde_crtc_irq_info *node = NULL;
-	struct drm_event event;
-	u32 power_on;
-	bool in_cont_splash = false;
-	int ret, i;
-
-	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	sde_kms = _sde_crtc_get_kms(crtc);
-	if (!sde_kms) {
-		SDE_ERROR("invalid kms\n");
-		return;
-	}
-
-	if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
-		SDE_ERROR("power resource is not enabled\n");
-		return;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(crtc->state);
-	priv = crtc->dev->dev_private;
-
-	SDE_DEBUG("crtc%d\n", crtc->base.id);
-
-	if (sde_kms_is_suspend_state(crtc->dev))
-		_sde_crtc_set_suspend(crtc, true);
-
-	mutex_lock(&sde_crtc->crtc_lock);
-	SDE_EVT32_VERBOSE(DRMID(crtc));
-
-	/* update color processing on suspend */
-	event.type = DRM_EVENT_CRTC_POWER;
-	event.length = sizeof(u32);
-	sde_cp_crtc_suspend(crtc);
-	power_on = 0;
-	msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
-			(u8 *)&power_on);
-
-	/* destination scaler if enabled should be reconfigured on resume */
-	if (cstate->num_ds_enabled)
-		sde_crtc->ds_reconfig = true;
-
-	_sde_crtc_flush_event_thread(crtc);
-
-	SDE_EVT32(DRMID(crtc), sde_crtc->enabled, sde_crtc->suspend,
-			sde_crtc->vblank_requested,
-			crtc->state->active, crtc->state->enable);
-	if (sde_crtc->enabled && !sde_crtc->suspend &&
-			sde_crtc->vblank_requested) {
-		ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, false);
-		if (ret)
-			SDE_ERROR("%s vblank enable failed: %d\n",
-					sde_crtc->name, ret);
-	}
-	sde_crtc->enabled = false;
-
-	/* Try to disable uidle */
-	sde_core_perf_crtc_update_uidle(crtc, false);
-
-	if (atomic_read(&sde_crtc->frame_pending)) {
-		SDE_ERROR("crtc%d frame_pending%d\n", crtc->base.id,
-				atomic_read(&sde_crtc->frame_pending));
-		SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->frame_pending),
-							SDE_EVTLOG_FUNC_CASE2);
-		sde_core_perf_crtc_release_bw(crtc);
-		atomic_set(&sde_crtc->frame_pending, 0);
-	}
-
-	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
-	list_for_each_entry(node, &sde_crtc->user_event_list, list) {
-		ret = 0;
-		if (node->func)
-			ret = node->func(crtc, false, &node->irq);
-		if (ret)
-			SDE_ERROR("%s failed to disable event %x\n",
-					sde_crtc->name, node->event);
-	}
-	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
-
-	drm_for_each_encoder_mask(encoder, crtc->dev,
-			crtc->state->encoder_mask) {
-		if (sde_encoder_in_cont_splash(encoder)) {
-			in_cont_splash = true;
-			break;
-		}
-	}
-
-	/* avoid clk/bw downvote if cont-splash is enabled */
-	if (!in_cont_splash)
-		sde_core_perf_crtc_update(crtc, 0, true);
-
-	drm_for_each_encoder_mask(encoder, crtc->dev,
-			crtc->state->encoder_mask) {
-		sde_encoder_register_frame_event_callback(encoder, NULL, NULL);
-		cstate->rsc_client = NULL;
-		cstate->rsc_update = false;
-
-		/*
-		 * reset idle power-collapse to original state during suspend;
-		 * user-mode will change the state on resume, if required
-		 */
-		if (sde_kms->catalog->has_idle_pc)
-			sde_encoder_control_idle_pc(encoder, true);
-	}
-
-	if (sde_crtc->power_event)
-		sde_power_handle_unregister_event(&priv->phandle,
-				sde_crtc->power_event);
-
-	/**
-	 * All callbacks are unregistered and frame done waits are complete
-	 * at this point. No buffers are accessed by hardware.
-	 * reset the fence timeline if crtc will not be enabled for this commit
-	 */
-	if (!crtc->state->active || !crtc->state->enable) {
-		sde_fence_signal(sde_crtc->output_fence,
-				ktime_get(), SDE_FENCE_RESET_TIMELINE);
-		for (i = 0; i < cstate->num_connectors; ++i)
-			sde_connector_commit_reset(cstate->connectors[i],
-					ktime_get());
-	}
-
-	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
-	sde_crtc->num_mixers = 0;
-	sde_crtc->mixers_swapped = false;
-
-	/* disable clk & bw control until clk & bw properties are set */
-	cstate->bw_control = false;
-	cstate->bw_split_vote = false;
-
-	mutex_unlock(&sde_crtc->crtc_lock);
-}
-
-static void sde_crtc_enable(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_crtc_state)
-{
-	struct sde_crtc *sde_crtc;
-	struct drm_encoder *encoder;
-	struct msm_drm_private *priv;
-	unsigned long flags;
-	struct sde_crtc_irq_info *node = NULL;
-	struct drm_event event;
-	u32 power_on;
-	int ret, i;
-	struct sde_crtc_state *cstate;
-
-	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-	priv = crtc->dev->dev_private;
-	cstate = to_sde_crtc_state(crtc->state);
-
-	if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
-		SDE_ERROR("power resource is not enabled\n");
-		return;
-	}
-
-	SDE_DEBUG("crtc%d\n", crtc->base.id);
-	SDE_EVT32_VERBOSE(DRMID(crtc));
-	sde_crtc = to_sde_crtc(crtc);
-
-	mutex_lock(&sde_crtc->crtc_lock);
-	SDE_EVT32(DRMID(crtc), sde_crtc->enabled, sde_crtc->suspend,
-			sde_crtc->vblank_requested);
-
-	/*
-	 * Try to enable uidle (if possible), we do this before the call
-	 * to return early during seamless dms mode, so any fps
-	 * change is also consider to enable/disable UIDLE
-	 */
-	sde_core_perf_crtc_update_uidle(crtc, true);
-
-	/* return early if crtc is already enabled, do this after UIDLE check */
-	if (sde_crtc->enabled) {
-		if (msm_is_mode_seamless_dms(&crtc->state->adjusted_mode))
-			SDE_DEBUG("%s extra crtc enable expected during DMS\n",
-					sde_crtc->name);
-		else
-			WARN(1, "%s unexpected crtc enable\n", sde_crtc->name);
-
-		mutex_unlock(&sde_crtc->crtc_lock);
-		return;
-	}
-
-	drm_for_each_encoder_mask(encoder, crtc->dev,
-			crtc->state->encoder_mask) {
-		sde_encoder_register_frame_event_callback(encoder,
-				sde_crtc_frame_event_cb, crtc);
-	}
-
-	if (!sde_crtc->enabled && !sde_crtc->suspend &&
-			sde_crtc->vblank_requested) {
-		ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, true);
-		if (ret)
-			SDE_ERROR("%s vblank enable failed: %d\n",
-					sde_crtc->name, ret);
-	}
-	sde_crtc->enabled = true;
-
-	/* update color processing on resume */
-	event.type = DRM_EVENT_CRTC_POWER;
-	event.length = sizeof(u32);
-	sde_cp_crtc_resume(crtc);
-	power_on = 1;
-	msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
-			(u8 *)&power_on);
-
-	mutex_unlock(&sde_crtc->crtc_lock);
-
-	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
-	list_for_each_entry(node, &sde_crtc->user_event_list, list) {
-		ret = 0;
-		if (node->func)
-			ret = node->func(crtc, true, &node->irq);
-		if (ret)
-			SDE_ERROR("%s failed to enable event %x\n",
-				sde_crtc->name, node->event);
-	}
-	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
-
-	sde_crtc->power_event = sde_power_handle_register_event(
-		&priv->phandle,
-		SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE |
-		SDE_POWER_EVENT_PRE_DISABLE,
-		sde_crtc_handle_power_event, crtc, sde_crtc->name);
-
-	/* Enable ESD thread */
-	for (i = 0; i < cstate->num_connectors; i++)
-		sde_connector_schedule_status_work(cstate->connectors[i], true);
-}
-
-/* no input validation - caller API has all the checks */
-static int _sde_crtc_excl_dim_layer_check(struct drm_crtc_state *state,
-		struct plane_state pstates[], int cnt)
-{
-	struct sde_crtc_state *cstate = to_sde_crtc_state(state);
-	struct drm_display_mode *mode = &state->adjusted_mode;
-	const struct drm_plane_state *pstate;
-	struct sde_plane_state *sde_pstate;
-	int rc = 0, i;
-
-	/* Check dim layer rect bounds and stage */
-	for (i = 0; i < cstate->num_dim_layers; i++) {
-		if ((CHECK_LAYER_BOUNDS(cstate->dim_layer[i].rect.y,
-			cstate->dim_layer[i].rect.h, mode->vdisplay)) ||
-		    (CHECK_LAYER_BOUNDS(cstate->dim_layer[i].rect.x,
-			cstate->dim_layer[i].rect.w, mode->hdisplay)) ||
-		    (cstate->dim_layer[i].stage >= SDE_STAGE_MAX) ||
-		    (!cstate->dim_layer[i].rect.w) ||
-		    (!cstate->dim_layer[i].rect.h)) {
-			SDE_ERROR("invalid dim_layer:{%d,%d,%d,%d}, stage:%d\n",
-					cstate->dim_layer[i].rect.x,
-					cstate->dim_layer[i].rect.y,
-					cstate->dim_layer[i].rect.w,
-					cstate->dim_layer[i].rect.h,
-					cstate->dim_layer[i].stage);
-			SDE_ERROR("display: %dx%d\n", mode->hdisplay,
-					mode->vdisplay);
-			rc = -E2BIG;
-			goto end;
-		}
-	}
-
-	/* log all src and excl_rect, useful for debugging */
-	for (i = 0; i < cnt; i++) {
-		pstate = pstates[i].drm_pstate;
-		sde_pstate = to_sde_plane_state(pstate);
-		SDE_DEBUG("p %d z %d src{%d,%d,%d,%d} excl_rect{%d,%d,%d,%d}\n",
-			pstate->plane->base.id, pstates[i].stage,
-			pstate->crtc_x, pstate->crtc_y,
-			pstate->crtc_w, pstate->crtc_h,
-			sde_pstate->excl_rect.x, sde_pstate->excl_rect.y,
-			sde_pstate->excl_rect.w, sde_pstate->excl_rect.h);
-	}
-
-end:
-	return rc;
-}
-
-static int _sde_crtc_check_secure_blend_config(struct drm_crtc *crtc,
-	struct drm_crtc_state *state, struct plane_state pstates[],
-	struct sde_crtc_state *cstate, struct sde_kms *sde_kms,
-	int cnt, int secure, int fb_ns, int fb_sec, int fb_sec_dir)
-{
-	struct drm_plane *plane;
-	int i;
-	if (secure == SDE_DRM_SEC_ONLY) {
-		/*
-		 * validate planes - only fb_sec_dir is allowed during sec_crtc
-		 * - fb_sec_dir is for secure camera preview and
-		 * secure display use case
-		 * - fb_sec is for secure video playback
-		 * - fb_ns is for normal non secure use cases
-		 */
-		if (fb_ns || fb_sec) {
-			SDE_ERROR(
-			 "crtc%d: invalid fb_modes Sec:%d, NS:%d, Sec_Dir:%d\n",
-				DRMID(crtc), fb_sec, fb_ns, fb_sec_dir);
-			return -EINVAL;
-		}
-
-		/*
-		 * - only one blending stage is allowed in sec_crtc
-		 * - validate if pipe is allowed for sec-ui updates
-		 */
-		for (i = 1; i < cnt; i++) {
-			if (!pstates[i].drm_pstate
-					|| !pstates[i].drm_pstate->plane) {
-				SDE_ERROR("crtc%d: invalid pstate at i:%d\n",
-						DRMID(crtc), i);
-				return -EINVAL;
-			}
-			plane = pstates[i].drm_pstate->plane;
-
-			if (!sde_plane_is_sec_ui_allowed(plane)) {
-				SDE_ERROR("crtc%d: sec-ui not allowed in p%d\n",
-						DRMID(crtc), plane->base.id);
-				return -EINVAL;
-
-			} else if (pstates[i].stage != pstates[i-1].stage) {
-				SDE_ERROR(
-				  "crtc%d: invalid blend stages %d:%d, %d:%d\n",
-				  DRMID(crtc), i, pstates[i].stage,
-				  i-1, pstates[i-1].stage);
-				return -EINVAL;
-			}
-		}
-
-		/* check if all the dim_layers are in the same stage */
-		for (i = 1; i < cstate->num_dim_layers; i++) {
-			if (cstate->dim_layer[i].stage !=
-					cstate->dim_layer[i-1].stage) {
-				SDE_ERROR(
-				"crtc%d: invalid dimlayer stage %d:%d, %d:%d\n",
-					DRMID(crtc),
-					i, cstate->dim_layer[i].stage,
-					i-1, cstate->dim_layer[i-1].stage);
-				return -EINVAL;
-			}
-		}
-
-		/*
-		 * if secure-ui supported blendstage is specified,
-		 * - fail empty commit
-		 * - validate dim_layer or plane is staged in the supported
-		 *   blendstage
-		 */
-		if (sde_kms->catalog->sui_supported_blendstage) {
-			int sec_stage = cnt ? pstates[0].sde_pstate->stage :
-						cstate->dim_layer[0].stage;
-
-			if ((!cnt && !cstate->num_dim_layers) ||
-				(sde_kms->catalog->sui_supported_blendstage
-						!= (sec_stage - SDE_STAGE_0))) {
-				SDE_ERROR(
-				  "crtc%d: empty cnt%d/dim%d or bad stage%d\n",
-					DRMID(crtc), cnt,
-					cstate->num_dim_layers, sec_stage);
-				return -EINVAL;
-			}
-		}
-	}
-
-	return 0;
-}
-
-static int _sde_crtc_check_secure_single_encoder(struct drm_crtc *crtc,
-	int fb_sec_dir)
-{
-	struct drm_encoder *encoder;
-	int encoder_cnt = 0;
-
-	if (fb_sec_dir) {
-		drm_for_each_encoder_mask(encoder, crtc->dev,
-			crtc->state->encoder_mask)
-			encoder_cnt++;
-
-		if (encoder_cnt > MAX_ALLOWED_ENCODER_CNT_PER_SECURE_CRTC) {
-			SDE_ERROR("crtc%d, invalid virtual encoder crtc%d\n",
-				DRMID(crtc), encoder_cnt);
-			return -EINVAL;
-		}
-	}
-	return 0;
-}
-
-static int _sde_crtc_check_secure_state_smmu_translation(struct drm_crtc *crtc,
-	struct drm_crtc_state *state, struct sde_kms *sde_kms, int secure,
-	int fb_ns, int fb_sec, int fb_sec_dir)
-{
-	struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
-	struct drm_encoder *encoder;
-	int is_video_mode = false;
-
-	drm_for_each_encoder_mask(encoder, crtc->dev,
-			crtc->state->encoder_mask) {
-		is_video_mode |= sde_encoder_check_mode(encoder,
-						MSM_DISPLAY_CAP_VID_MODE);
-	}
-
-	/*
-	 * In video mode check for null commit before transition
-	 * from secure to non secure and vice versa
-	 */
-	if (is_video_mode && smmu_state &&
-		state->plane_mask && crtc->state->plane_mask &&
-		((fb_sec_dir && ((smmu_state->state == ATTACHED) &&
-			(secure == SDE_DRM_SEC_ONLY))) ||
-		    (fb_ns && ((smmu_state->state == DETACHED) ||
-			(smmu_state->state == DETACH_ALL_REQ))) ||
-		    (fb_ns && ((smmu_state->state == DETACHED_SEC) ||
-			(smmu_state->state == DETACH_SEC_REQ)) &&
-			(smmu_state->secure_level == SDE_DRM_SEC_ONLY)))) {
-
-		SDE_EVT32(DRMID(crtc), fb_ns, fb_sec_dir,
-			smmu_state->state, smmu_state->secure_level,
-			secure, crtc->state->plane_mask, state->plane_mask);
-		SDE_ERROR(
-		 "crtc%d Invalid transition;sec%d state%d slvl%d ns%d sdir%d\n",
-			DRMID(crtc), secure, smmu_state->state,
-			smmu_state->secure_level, fb_ns, fb_sec_dir);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
-		struct drm_crtc_state *state, struct plane_state pstates[],
-		int cnt)
-{
-	struct sde_crtc_state *cstate;
-	struct sde_kms *sde_kms;
-	uint32_t secure;
-	uint32_t fb_ns = 0, fb_sec = 0, fb_sec_dir = 0;
-	int rc;
-
-	if (!crtc || !state) {
-		SDE_ERROR("invalid arguments\n");
-		return -EINVAL;
-	}
-
-	sde_kms = _sde_crtc_get_kms(crtc);
-	if (!sde_kms || !sde_kms->catalog) {
-		SDE_ERROR("invalid kms\n");
-		return -EINVAL;
-	}
-
-	cstate = to_sde_crtc_state(state);
-
-	secure = sde_crtc_get_property(cstate, CRTC_PROP_SECURITY_LEVEL);
-
-	rc = sde_crtc_state_find_plane_fb_modes(state, &fb_ns,
-					&fb_sec, &fb_sec_dir);
-	if (rc)
-		return rc;
-
-	rc = _sde_crtc_check_secure_blend_config(crtc, state, pstates, cstate,
-			sde_kms, cnt, secure, fb_ns, fb_sec, fb_sec_dir);
-	if (rc)
-		return rc;
-
-	/*
-	 * secure_crtc is not allowed in a shared toppolgy
-	 * across different encoders.
-	 */
-	rc = _sde_crtc_check_secure_single_encoder(crtc, fb_sec_dir);
-	if (rc)
-		return rc;
-
-	rc = _sde_crtc_check_secure_state_smmu_translation(crtc, state, sde_kms,
-			secure, fb_ns, fb_sec, fb_sec_dir);
-	if (rc)
-		return rc;
-
-	SDE_DEBUG("crtc:%d Secure validation successful\n", DRMID(crtc));
-
-	return 0;
-}
-
-static int _sde_crtc_check_get_pstates(struct drm_crtc *crtc,
-		struct drm_crtc_state *state,
-		struct drm_display_mode *mode,
-		struct plane_state *pstates,
-		struct drm_plane *plane,
-		struct sde_multirect_plane_states *multirect_plane,
-		int *cnt)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	const struct drm_plane_state *pstate;
-	const struct drm_plane_state *pipe_staged[SSPP_MAX];
-	int rc = 0, multirect_count = 0, i;
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(state);
-
-	memset(pipe_staged, 0, sizeof(pipe_staged));
-
-	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
-		if (IS_ERR_OR_NULL(pstate)) {
-			rc = PTR_ERR(pstate);
-			SDE_ERROR("%s: failed to get plane%d state, %d\n",
-					sde_crtc->name, plane->base.id, rc);
-			return rc;
-		}
-
-		if (*cnt >= SDE_PSTATES_MAX)
-			continue;
-
-		pstates[*cnt].sde_pstate = to_sde_plane_state(pstate);
-		pstates[*cnt].drm_pstate = pstate;
-		pstates[*cnt].stage = sde_plane_get_property(
-				pstates[*cnt].sde_pstate, PLANE_PROP_ZPOS);
-		pstates[*cnt].pipe_id = sde_plane_pipe(plane);
-
-		/* check dim layer stage with every plane */
-		for (i = 0; i < cstate->num_dim_layers; i++) {
-			if (cstate->dim_layer[i].stage ==
-					(pstates[*cnt].stage + SDE_STAGE_0)) {
-				SDE_ERROR(
-					"plane:%d/dim_layer:%i-same stage:%d\n",
-					plane->base.id, i,
-					cstate->dim_layer[i].stage);
-				return -EINVAL;
-			}
-		}
-
-		if (pipe_staged[pstates[*cnt].pipe_id]) {
-			multirect_plane[multirect_count].r0 =
-				pipe_staged[pstates[*cnt].pipe_id];
-			multirect_plane[multirect_count].r1 = pstate;
-			multirect_count++;
-
-			pipe_staged[pstates[*cnt].pipe_id] = NULL;
-		} else {
-			pipe_staged[pstates[*cnt].pipe_id] = pstate;
-		}
-
-		(*cnt)++;
-
-		if (CHECK_LAYER_BOUNDS(pstate->crtc_y, pstate->crtc_h,
-				mode->vdisplay) ||
-		    CHECK_LAYER_BOUNDS(pstate->crtc_x, pstate->crtc_w,
-				mode->hdisplay)) {
-			SDE_ERROR("invalid vertical/horizontal destination\n");
-			SDE_ERROR("y:%d h:%d vdisp:%d x:%d w:%d hdisp:%d\n",
-				pstate->crtc_y, pstate->crtc_h, mode->vdisplay,
-				pstate->crtc_x, pstate->crtc_w, mode->hdisplay);
-			return -E2BIG;
-		}
-	}
-
-	for (i = 1; i < SSPP_MAX; i++) {
-		if (pipe_staged[i]) {
-			if (is_sde_plane_virtual(pipe_staged[i]->plane)) {
-				SDE_ERROR(
-					"r1 only virt plane:%d not supported\n",
-					pipe_staged[i]->plane->base.id);
-				return -EINVAL;
-			}
-			sde_plane_clear_multirect(pipe_staged[i]);
-		}
-	}
-
-	for (i = 0; i < multirect_count; i++) {
-		if (sde_plane_validate_multirect_v2(&multirect_plane[i])) {
-			SDE_ERROR(
-			"multirect validation failed for planes (%d - %d)\n",
-					multirect_plane[i].r0->plane->base.id,
-					multirect_plane[i].r1->plane->base.id);
-			return -EINVAL;
-		}
-	}
-	return rc;
-}
-
-static int _sde_crtc_check_zpos(struct drm_crtc_state *state,
-		struct sde_crtc *sde_crtc,
-		struct plane_state *pstates,
-		struct sde_crtc_state *cstate,
-		struct drm_display_mode *mode,
-		int cnt)
-{
-	int rc = 0, i, z_pos;
-	u32 zpos_cnt = 0;
-
-	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
-
-	rc = _sde_crtc_excl_dim_layer_check(state, pstates, cnt);
-	if (rc)
-		return rc;
-
-	if (!sde_is_custom_client()) {
-		int stage_old = pstates[0].stage;
-
-		z_pos = 0;
-		for (i = 0; i < cnt; i++) {
-			if (stage_old != pstates[i].stage)
-				++z_pos;
-			stage_old = pstates[i].stage;
-			pstates[i].stage = z_pos;
-		}
-	}
-
-	z_pos = -1;
-	for (i = 0; i < cnt; i++) {
-		/* reset counts at every new blend stage */
-		if (pstates[i].stage != z_pos) {
-			zpos_cnt = 0;
-			z_pos = pstates[i].stage;
-		}
-
-		/* verify z_pos setting before using it */
-		if (z_pos >= SDE_STAGE_MAX - SDE_STAGE_0) {
-			SDE_ERROR("> %d plane stages assigned\n",
-					SDE_STAGE_MAX - SDE_STAGE_0);
-			return -EINVAL;
-		} else if (zpos_cnt == 2) {
-			SDE_ERROR("> 2 planes @ stage %d\n", z_pos);
-			return -EINVAL;
-		} else {
-			zpos_cnt++;
-		}
-
-		pstates[i].sde_pstate->stage = z_pos + SDE_STAGE_0;
-		SDE_DEBUG("%s: zpos %d", sde_crtc->name, z_pos);
-	}
-	return rc;
-}
-
-static int _sde_crtc_atomic_check_pstates(struct drm_crtc *crtc,
-		struct drm_crtc_state *state,
-		struct plane_state *pstates,
-		struct sde_multirect_plane_states *multirect_plane)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	struct sde_kms *kms;
-	struct drm_plane *plane;
-	struct drm_display_mode *mode;
-	int rc = 0, cnt = 0;
-
-	kms = _sde_crtc_get_kms(crtc);
-
-	if (!kms || !kms->catalog) {
-		SDE_ERROR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(state);
-	mode = &state->adjusted_mode;
-
-	/* get plane state for all drm planes associated with crtc state */
-	rc = _sde_crtc_check_get_pstates(crtc, state, mode, pstates,
-			plane, multirect_plane, &cnt);
-	if (rc)
-		return rc;
-
-	/* assign mixer stages based on sorted zpos property */
-	rc = _sde_crtc_check_zpos(state, sde_crtc, pstates, cstate, mode, cnt);
-	if (rc)
-		return rc;
-
-	rc = _sde_crtc_check_secure_state(crtc, state, pstates, cnt);
-	if (rc)
-		return rc;
-
-	/*
-	 * validate and set source split:
-	 * use pstates sorted by stage to check planes on same stage
-	 * we assume that all pipes are in source split so its valid to compare
-	 * without taking into account left/right mixer placement
-	 */
-	rc = _sde_crtc_validate_src_split_order(crtc, pstates, cnt);
-	if (rc)
-		return rc;
-
-	return 0;
-}
-
-static int sde_crtc_atomic_check(struct drm_crtc *crtc,
-		struct drm_crtc_state *state)
-{
-	struct drm_device *dev;
-	struct sde_crtc *sde_crtc;
-	struct plane_state *pstates = NULL;
-	struct sde_crtc_state *cstate;
-	struct drm_display_mode *mode;
-	int rc = 0;
-	struct sde_multirect_plane_states *multirect_plane = NULL;
-	struct drm_connector *conn;
-	struct drm_connector_list_iter conn_iter;
-
-	if (!crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return -EINVAL;
-	}
-
-	dev = crtc->dev;
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(state);
-
-	if (!state->enable || !state->active) {
-		SDE_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
-				crtc->base.id, state->enable, state->active);
-		goto end;
-	}
-
-	pstates = kcalloc(SDE_PSTATES_MAX,
-			sizeof(struct plane_state), GFP_KERNEL);
-
-	multirect_plane = kcalloc(SDE_MULTIRECT_PLANE_MAX,
-			sizeof(struct sde_multirect_plane_states),
-			GFP_KERNEL);
-
-	if (!pstates || !multirect_plane) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	mode = &state->adjusted_mode;
-	SDE_DEBUG("%s: check", sde_crtc->name);
-
-	/* force a full mode set if active state changed */
-	if (state->active_changed)
-		state->mode_changed = true;
-
-	rc = _sde_crtc_check_dest_scaler_data(crtc, state);
-	if (rc) {
-		SDE_ERROR("crtc%d failed dest scaler check %d\n",
-			crtc->base.id, rc);
-		goto end;
-	}
-
-	/* identify connectors attached to this crtc */
-	cstate->num_connectors = 0;
-
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(conn, &conn_iter)
-		if (conn->state && conn->state->crtc == crtc &&
-				cstate->num_connectors < MAX_CONNECTORS) {
-			cstate->connectors[cstate->num_connectors++] = conn;
-		}
-	drm_connector_list_iter_end(&conn_iter);
-
-	_sde_crtc_setup_is_ppsplit(state);
-	_sde_crtc_setup_lm_bounds(crtc, state);
-
-	rc = _sde_crtc_atomic_check_pstates(crtc, state, pstates,
-			multirect_plane);
-	if (rc) {
-		SDE_ERROR("crtc%d failed pstate check %d\n", crtc->base.id, rc);
-		goto end;
-	}
-
-	rc = sde_core_perf_crtc_check(crtc, state);
-	if (rc) {
-		SDE_ERROR("crtc%d failed performance check %d\n",
-				crtc->base.id, rc);
-		goto end;
-	}
-
-	rc = _sde_crtc_check_rois(crtc, state);
-	if (rc) {
-		SDE_ERROR("crtc%d failed roi check %d\n", crtc->base.id, rc);
-		goto end;
-	}
-
-end:
-	kfree(pstates);
-	kfree(multirect_plane);
-	return rc;
-}
-
-int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
-{
-	struct sde_crtc *sde_crtc;
-	int ret;
-
-	if (!crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return -EINVAL;
-	}
-	sde_crtc = to_sde_crtc(crtc);
-
-	mutex_lock(&sde_crtc->crtc_lock);
-	SDE_EVT32(DRMID(&sde_crtc->base), en, sde_crtc->enabled,
-			sde_crtc->suspend, sde_crtc->vblank_requested);
-	if (sde_crtc->enabled && !sde_crtc->suspend) {
-		ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, en);
-		if (ret)
-			SDE_ERROR("%s vblank enable failed: %d\n",
-					sde_crtc->name, ret);
-	}
-	sde_crtc->vblank_requested = en;
-	mutex_unlock(&sde_crtc->crtc_lock);
-
-	return 0;
-}
-
-/**
- * sde_crtc_install_properties - install all drm properties for crtc
- * @crtc: Pointer to drm crtc structure
- */
-static void sde_crtc_install_properties(struct drm_crtc *crtc,
-				struct sde_mdss_cfg *catalog)
-{
-	struct sde_crtc *sde_crtc;
-	struct drm_device *dev;
-	struct sde_kms_info *info;
-	struct sde_kms *sde_kms;
-	static const struct drm_prop_enum_list e_secure_level[] = {
-		{SDE_DRM_SEC_NON_SEC, "sec_and_non_sec"},
-		{SDE_DRM_SEC_ONLY, "sec_only"},
-	};
-
-	static const struct drm_prop_enum_list e_cwb_data_points[] = {
-		{CAPTURE_MIXER_OUT, "capture_mixer_out"},
-		{CAPTURE_DSPP_OUT, "capture_pp_out"},
-	};
-
-	static const struct drm_prop_enum_list e_idle_pc_state[] = {
-		{IDLE_PC_NONE, "idle_pc_none"},
-		{IDLE_PC_ENABLE, "idle_pc_enable"},
-		{IDLE_PC_DISABLE, "idle_pc_disable"},
-	};
-
-	SDE_DEBUG("\n");
-
-	if (!crtc || !catalog) {
-		SDE_ERROR("invalid crtc or catalog\n");
-		return;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	dev = crtc->dev;
-	sde_kms = _sde_crtc_get_kms(crtc);
-
-	if (!sde_kms) {
-		SDE_ERROR("invalid argument\n");
-		return;
-	}
-
-	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
-	if (!info) {
-		SDE_ERROR("failed to allocate info memory\n");
-		return;
-	}
-
-	/* range properties */
-	msm_property_install_range(&sde_crtc->property_info,
-		"input_fence_timeout", 0x0, 0, SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT,
-		SDE_CRTC_INPUT_FENCE_TIMEOUT, CRTC_PROP_INPUT_FENCE_TIMEOUT);
-
-	msm_property_install_volatile_range(&sde_crtc->property_info,
-		"output_fence", 0x0, 0, ~0, 0, CRTC_PROP_OUTPUT_FENCE);
-
-	msm_property_install_range(&sde_crtc->property_info,
-			"output_fence_offset", 0x0, 0, 1, 0,
-			CRTC_PROP_OUTPUT_FENCE_OFFSET);
-
-	msm_property_install_range(&sde_crtc->property_info,
-			"core_clk", 0x0, 0, U64_MAX,
-			sde_kms->perf.max_core_clk_rate,
-			CRTC_PROP_CORE_CLK);
-	msm_property_install_range(&sde_crtc->property_info,
-			"core_ab", 0x0, 0, U64_MAX,
-			catalog->perf.max_bw_high * 1000ULL,
-			CRTC_PROP_CORE_AB);
-	msm_property_install_range(&sde_crtc->property_info,
-			"core_ib", 0x0, 0, U64_MAX,
-			catalog->perf.max_bw_high * 1000ULL,
-			CRTC_PROP_CORE_IB);
-	msm_property_install_range(&sde_crtc->property_info,
-			"llcc_ab", 0x0, 0, U64_MAX,
-			catalog->perf.max_bw_high * 1000ULL,
-			CRTC_PROP_LLCC_AB);
-	msm_property_install_range(&sde_crtc->property_info,
-			"llcc_ib", 0x0, 0, U64_MAX,
-			catalog->perf.max_bw_high * 1000ULL,
-			CRTC_PROP_LLCC_IB);
-	msm_property_install_range(&sde_crtc->property_info,
-			"dram_ab", 0x0, 0, U64_MAX,
-			catalog->perf.max_bw_high * 1000ULL,
-			CRTC_PROP_DRAM_AB);
-	msm_property_install_range(&sde_crtc->property_info,
-			"dram_ib", 0x0, 0, U64_MAX,
-			catalog->perf.max_bw_high * 1000ULL,
-			CRTC_PROP_DRAM_IB);
-	msm_property_install_range(&sde_crtc->property_info,
-			"rot_prefill_bw", 0, 0, U64_MAX,
-			catalog->perf.max_bw_high * 1000ULL,
-			CRTC_PROP_ROT_PREFILL_BW);
-	msm_property_install_range(&sde_crtc->property_info,
-			"rot_clk", 0, 0, U64_MAX,
-			sde_kms->perf.max_core_clk_rate,
-			CRTC_PROP_ROT_CLK);
-
-	msm_property_install_range(&sde_crtc->property_info,
-		"idle_time", 0, 0, U64_MAX, 0,
-		CRTC_PROP_IDLE_TIMEOUT);
-
-	if (catalog->has_idle_pc)
-		msm_property_install_enum(&sde_crtc->property_info,
-			"idle_pc_state", 0x0, 0, e_idle_pc_state,
-			ARRAY_SIZE(e_idle_pc_state),
-			CRTC_PROP_IDLE_PC_STATE);
-
-	if (catalog->has_cwb_support)
-		msm_property_install_enum(&sde_crtc->property_info,
-				"capture_mode", 0, 0, e_cwb_data_points,
-				ARRAY_SIZE(e_cwb_data_points),
-				CRTC_PROP_CAPTURE_OUTPUT);
-
-	msm_property_install_blob(&sde_crtc->property_info, "capabilities",
-		DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO);
-
-	msm_property_install_volatile_range(&sde_crtc->property_info,
-		"sde_drm_roi_v1", 0x0, 0, ~0, 0, CRTC_PROP_ROI_V1);
-
-	msm_property_install_enum(&sde_crtc->property_info, "security_level",
-			0x0, 0, e_secure_level,
-			ARRAY_SIZE(e_secure_level),
-			CRTC_PROP_SECURITY_LEVEL);
-
-	sde_kms_info_reset(info);
-
-	if (catalog->has_dim_layer) {
-		msm_property_install_volatile_range(&sde_crtc->property_info,
-			"dim_layer_v1", 0x0, 0, ~0, 0, CRTC_PROP_DIM_LAYER_V1);
-		sde_kms_info_add_keyint(info, "dim_layer_v1_max_layers",
-				SDE_MAX_DIM_LAYERS);
-	}
-
-	sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
-	sde_kms_info_add_keyint(info, "max_linewidth",
-			catalog->max_mixer_width);
-	sde_kms_info_add_keyint(info, "max_blendstages",
-			catalog->max_mixer_blendstages);
-	if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED2)
-		sde_kms_info_add_keystr(info, "qseed_type", "qseed2");
-	if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3)
-		sde_kms_info_add_keystr(info, "qseed_type", "qseed3");
-	if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3LITE)
-		sde_kms_info_add_keystr(info, "qseed_type", "qseed3lite");
-
-	sde_kms_info_add_keyint(info, "UBWC version", catalog->ubwc_version);
-	sde_kms_info_add_keyint(info, "UBWC macrotile_mode",
-				catalog->macrotile_mode);
-	sde_kms_info_add_keyint(info, "UBWC highest banking bit",
-				catalog->mdp[0].highest_bank_bit);
-	sde_kms_info_add_keyint(info, "UBWC swizzle",
-				catalog->mdp[0].ubwc_swizzle);
-
-	if (of_fdt_get_ddrtype() == LP_DDR4_TYPE)
-		sde_kms_info_add_keystr(info, "DDR version", "DDR4");
-	else
-		sde_kms_info_add_keystr(info, "DDR version", "DDR5");
-
-	if (sde_is_custom_client()) {
-		/* No support for SMART_DMA_V1 yet */
-		if (catalog->smart_dma_rev == SDE_SSPP_SMART_DMA_V2)
-			sde_kms_info_add_keystr(info,
-					"smart_dma_rev", "smart_dma_v2");
-		else if (catalog->smart_dma_rev == SDE_SSPP_SMART_DMA_V2p5)
-			sde_kms_info_add_keystr(info,
-					"smart_dma_rev", "smart_dma_v2p5");
-	}
-
-	if (catalog->mdp[0].has_dest_scaler) {
-		sde_kms_info_add_keyint(info, "has_dest_scaler",
-				catalog->mdp[0].has_dest_scaler);
-		sde_kms_info_add_keyint(info, "dest_scaler_count",
-					catalog->ds_count);
-
-		if (catalog->ds[0].top) {
-			sde_kms_info_add_keyint(info,
-					"max_dest_scaler_input_width",
-					catalog->ds[0].top->maxinputwidth);
-			sde_kms_info_add_keyint(info,
-					"max_dest_scaler_output_width",
-					catalog->ds[0].top->maxinputwidth);
-			sde_kms_info_add_keyint(info, "max_dest_scale_up",
-					catalog->ds[0].top->maxupscale);
-		}
-
-		if (catalog->ds[0].features & BIT(SDE_SSPP_SCALER_QSEED3)) {
-			msm_property_install_volatile_range(
-					&sde_crtc->property_info, "dest_scaler",
-					0x0, 0, ~0, 0, CRTC_PROP_DEST_SCALER);
-			msm_property_install_blob(&sde_crtc->property_info,
-					"ds_lut_ed", 0,
-					CRTC_PROP_DEST_SCALER_LUT_ED);
-			msm_property_install_blob(&sde_crtc->property_info,
-					"ds_lut_cir", 0,
-					CRTC_PROP_DEST_SCALER_LUT_CIR);
-			msm_property_install_blob(&sde_crtc->property_info,
-					"ds_lut_sep", 0,
-					CRTC_PROP_DEST_SCALER_LUT_SEP);
-		} else if (catalog->ds[0].features
-				& BIT(SDE_SSPP_SCALER_QSEED3LITE)) {
-			msm_property_install_volatile_range(
-					&sde_crtc->property_info, "dest_scaler",
-					0x0, 0, ~0, 0, CRTC_PROP_DEST_SCALER);
-		}
-	}
-
-	sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split);
-	sde_kms_info_add_keyint(info, "has_hdr", catalog->has_hdr);
-	sde_kms_info_add_keyint(info, "has_hdr_plus", catalog->has_hdr_plus);
-	if (catalog->perf.max_bw_low)
-		sde_kms_info_add_keyint(info, "max_bandwidth_low",
-				catalog->perf.max_bw_low * 1000LL);
-	if (catalog->perf.max_bw_high)
-		sde_kms_info_add_keyint(info, "max_bandwidth_high",
-				catalog->perf.max_bw_high * 1000LL);
-	if (catalog->perf.min_core_ib)
-		sde_kms_info_add_keyint(info, "min_core_ib",
-				catalog->perf.min_core_ib * 1000LL);
-	if (catalog->perf.min_llcc_ib)
-		sde_kms_info_add_keyint(info, "min_llcc_ib",
-				catalog->perf.min_llcc_ib * 1000LL);
-	if (catalog->perf.min_dram_ib)
-		sde_kms_info_add_keyint(info, "min_dram_ib",
-				catalog->perf.min_dram_ib * 1000LL);
-	if (sde_kms->perf.max_core_clk_rate)
-		sde_kms_info_add_keyint(info, "max_mdp_clk",
-				sde_kms->perf.max_core_clk_rate);
-	sde_kms_info_add_keystr(info, "core_ib_ff",
-			catalog->perf.core_ib_ff);
-	sde_kms_info_add_keystr(info, "core_clk_ff",
-			catalog->perf.core_clk_ff);
-	sde_kms_info_add_keystr(info, "comp_ratio_rt",
-			catalog->perf.comp_ratio_rt);
-	sde_kms_info_add_keystr(info, "comp_ratio_nrt",
-			catalog->perf.comp_ratio_nrt);
-	sde_kms_info_add_keyint(info, "dest_scale_prefill_lines",
-			catalog->perf.dest_scale_prefill_lines);
-	sde_kms_info_add_keyint(info, "undersized_prefill_lines",
-			catalog->perf.undersized_prefill_lines);
-	sde_kms_info_add_keyint(info, "macrotile_prefill_lines",
-			catalog->perf.macrotile_prefill_lines);
-	sde_kms_info_add_keyint(info, "yuv_nv12_prefill_lines",
-			catalog->perf.yuv_nv12_prefill_lines);
-	sde_kms_info_add_keyint(info, "linear_prefill_lines",
-			catalog->perf.linear_prefill_lines);
-	sde_kms_info_add_keyint(info, "downscaling_prefill_lines",
-			catalog->perf.downscaling_prefill_lines);
-	sde_kms_info_add_keyint(info, "xtra_prefill_lines",
-			catalog->perf.xtra_prefill_lines);
-	sde_kms_info_add_keyint(info, "amortizable_threshold",
-			catalog->perf.amortizable_threshold);
-	sde_kms_info_add_keyint(info, "min_prefill_lines",
-			catalog->perf.min_prefill_lines);
-	sde_kms_info_add_keyint(info, "sec_ui_blendstage",
-			catalog->sui_supported_blendstage);
-
-	if (catalog->ubwc_bw_calc_version)
-		sde_kms_info_add_keyint(info, "ubwc_bw_calc_ver",
-				catalog->ubwc_bw_calc_version);
-
-	msm_property_set_blob(&sde_crtc->property_info, &sde_crtc->blob_info,
-			info->data, SDE_KMS_INFO_DATALEN(info), CRTC_PROP_INFO);
-
-	kfree(info);
-}
-
-static int _sde_crtc_get_output_fence(struct drm_crtc *crtc,
-	const struct drm_crtc_state *state, uint64_t *val)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	uint32_t offset;
-	bool is_vid = false;
-	struct drm_encoder *encoder;
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(state);
-
-	drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
-		is_vid |= sde_encoder_check_mode(encoder,
-						MSM_DISPLAY_CAP_VID_MODE);
-		if (is_vid)
-			break;
-	}
-
-	offset = sde_crtc_get_property(cstate, CRTC_PROP_OUTPUT_FENCE_OFFSET);
-
-	/*
-	 * Increment trigger offset for vidoe mode alone as its release fence
-	 * can be triggered only after the next frame-update. For cmd mode &
-	 * virtual displays the release fence for the current frame can be
-	 * triggered right after PP_DONE/WB_DONE interrupt
-	 */
-	if (is_vid)
-		offset++;
-
-	/*
-	 * Hwcomposer now queries the fences using the commit list in atomic
-	 * commit ioctl. The offset should be set to next timeline
-	 * which will be incremented during the prepare commit phase
-	 */
-	offset++;
-
-	return sde_fence_create(sde_crtc->output_fence, val, offset);
-}
-
-/**
- * sde_crtc_atomic_set_property - atomically set a crtc drm property
- * @crtc: Pointer to drm crtc structure
- * @state: Pointer to drm crtc state structure
- * @property: Pointer to targeted drm property
- * @val: Updated property value
- * @Returns: Zero on success
- */
-static int sde_crtc_atomic_set_property(struct drm_crtc *crtc,
-		struct drm_crtc_state *state,
-		struct drm_property *property,
-		uint64_t val)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	int idx, ret;
-	uint64_t fence_fd;
-
-	if (!crtc || !state || !property) {
-		SDE_ERROR("invalid argument(s)\n");
-		return -EINVAL;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(state);
-
-	SDE_ATRACE_BEGIN("sde_crtc_atomic_set_property");
-	/* check with cp property system first */
-	ret = sde_cp_crtc_set_property(crtc, property, val);
-	if (ret != -ENOENT)
-		goto exit;
-
-	/* if not handled by cp, check msm_property system */
-	ret = msm_property_atomic_set(&sde_crtc->property_info,
-			&cstate->property_state, property, val);
-	if (ret)
-		goto exit;
-
-	idx = msm_property_index(&sde_crtc->property_info, property);
-	switch (idx) {
-	case CRTC_PROP_INPUT_FENCE_TIMEOUT:
-		_sde_crtc_set_input_fence_timeout(cstate);
-		break;
-	case CRTC_PROP_DIM_LAYER_V1:
-		_sde_crtc_set_dim_layer_v1(cstate,
-					(void __user *)(uintptr_t)val);
-		break;
-	case CRTC_PROP_ROI_V1:
-		ret = _sde_crtc_set_roi_v1(state,
-					(void __user *)(uintptr_t)val);
-		break;
-	case CRTC_PROP_DEST_SCALER:
-		ret = _sde_crtc_set_dest_scaler(sde_crtc, cstate,
-				(void __user *)(uintptr_t)val);
-		break;
-	case CRTC_PROP_DEST_SCALER_LUT_ED:
-	case CRTC_PROP_DEST_SCALER_LUT_CIR:
-	case CRTC_PROP_DEST_SCALER_LUT_SEP:
-		ret = _sde_crtc_set_dest_scaler_lut(sde_crtc, cstate, idx);
-		break;
-	case CRTC_PROP_CORE_CLK:
-	case CRTC_PROP_CORE_AB:
-	case CRTC_PROP_CORE_IB:
-		cstate->bw_control = true;
-		break;
-	case CRTC_PROP_LLCC_AB:
-	case CRTC_PROP_LLCC_IB:
-	case CRTC_PROP_DRAM_AB:
-	case CRTC_PROP_DRAM_IB:
-		cstate->bw_control = true;
-		cstate->bw_split_vote = true;
-		break;
-	case CRTC_PROP_OUTPUT_FENCE:
-		if (!val)
-			goto exit;
-
-		ret = _sde_crtc_get_output_fence(crtc, state, &fence_fd);
-		if (ret) {
-			SDE_ERROR("fence create failed rc:%d\n", ret);
-			goto exit;
-		}
-
-		ret = copy_to_user((uint64_t __user *)(uintptr_t)val, &fence_fd,
-				sizeof(uint64_t));
-		if (ret) {
-			SDE_ERROR("copy to user failed rc:%d\n", ret);
-			put_unused_fd(fence_fd);
-			ret = -EFAULT;
-			goto exit;
-		}
-		break;
-	default:
-		/* nothing to do */
-		break;
-	}
-
-exit:
-	if (ret) {
-		if (ret != -EPERM)
-			SDE_ERROR("%s: failed to set property%d %s: %d\n",
-				crtc->name, DRMID(property),
-				property->name, ret);
-		else
-			SDE_DEBUG("%s: failed to set property%d %s: %d\n",
-				crtc->name, DRMID(property),
-				property->name, ret);
-	} else {
-		SDE_DEBUG("%s: %s[%d] <= 0x%llx\n", crtc->name, property->name,
-				property->base.id, val);
-	}
-
-	SDE_ATRACE_END("sde_crtc_atomic_set_property");
-	return ret;
-}
-
-/**
- * sde_crtc_atomic_get_property - retrieve a crtc drm property
- * @crtc: Pointer to drm crtc structure
- * @state: Pointer to drm crtc state structure
- * @property: Pointer to targeted drm property
- * @val: Pointer to variable for receiving property value
- * @Returns: Zero on success
- */
-static int sde_crtc_atomic_get_property(struct drm_crtc *crtc,
-		const struct drm_crtc_state *state,
-		struct drm_property *property,
-		uint64_t *val)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	int ret = -EINVAL, i;
-
-	if (!crtc || !state) {
-		SDE_ERROR("invalid argument(s)\n");
-		goto end;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(state);
-
-	i = msm_property_index(&sde_crtc->property_info, property);
-	if (i == CRTC_PROP_OUTPUT_FENCE) {
-		*val = ~0;
-		ret = 0;
-	} else {
-		ret = msm_property_atomic_get(&sde_crtc->property_info,
-			&cstate->property_state, property, val);
-		if (ret)
-			ret = sde_cp_crtc_get_property(crtc, property, val);
-	}
-	if (ret)
-		DRM_ERROR("get property failed\n");
-
-end:
-	return ret;
-}
-
-int sde_crtc_helper_reset_custom_properties(struct drm_crtc *crtc,
-		struct drm_crtc_state *crtc_state)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	struct drm_property *drm_prop;
-	enum msm_mdp_crtc_property prop_idx;
-
-	if (!crtc || !crtc_state) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(crtc_state);
-
-	sde_cp_crtc_clear(crtc);
-
-	for (prop_idx = 0; prop_idx < CRTC_PROP_COUNT; prop_idx++) {
-		uint64_t val = cstate->property_values[prop_idx].value;
-		uint64_t def;
-		int ret;
-
-		drm_prop = msm_property_index_to_drm_property(
-				&sde_crtc->property_info, prop_idx);
-		if (!drm_prop) {
-			/* not all props will be installed, based on caps */
-			SDE_DEBUG("%s: invalid property index %d\n",
-					sde_crtc->name, prop_idx);
-			continue;
-		}
-
-		def = msm_property_get_default(&sde_crtc->property_info,
-				prop_idx);
-		if (val == def)
-			continue;
-
-		SDE_DEBUG("%s: set prop %s idx %d from %llu to %llu\n",
-				sde_crtc->name, drm_prop->name, prop_idx, val,
-				def);
-
-		ret = sde_crtc_atomic_set_property(crtc, crtc_state, drm_prop,
-				def);
-		if (ret) {
-			SDE_ERROR("%s: set property failed, idx %d ret %d\n",
-					sde_crtc->name, prop_idx, ret);
-			continue;
-		}
-	}
-
-	return 0;
-}
-
-void sde_crtc_misr_setup(struct drm_crtc *crtc, bool enable, u32 frame_count)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_mixer *m;
-	int i;
-
-	if (!crtc) {
-		SDE_ERROR("invalid argument\n");
-		return;
-	}
-	sde_crtc = to_sde_crtc(crtc);
-
-	sde_crtc->misr_enable_sui = enable;
-	sde_crtc->misr_frame_count = frame_count;
-	for (i = 0; i < sde_crtc->num_mixers; ++i) {
-		m = &sde_crtc->mixers[i];
-		if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
-			continue;
-
-		m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
-	}
-}
-
-void sde_crtc_get_misr_info(struct drm_crtc *crtc,
-			struct sde_crtc_misr_info *crtc_misr_info)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_kms *sde_kms;
-
-	if (!crtc_misr_info) {
-		SDE_ERROR("invalid misr info\n");
-		return;
-	}
-
-	crtc_misr_info->misr_enable = false;
-	crtc_misr_info->misr_frame_count = 0;
-
-	if (!crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	sde_kms = _sde_crtc_get_kms(crtc);
-	if (!sde_kms) {
-		SDE_ERROR("invalid sde_kms\n");
-		return;
-	}
-
-	if (sde_kms_is_secure_session_inprogress(sde_kms))
-		return;
-
-	sde_crtc = to_sde_crtc(crtc);
-	crtc_misr_info->misr_enable =
-			sde_crtc->misr_enable_debugfs ? true : false;
-	crtc_misr_info->misr_frame_count = sde_crtc->misr_frame_count;
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int _sde_debugfs_status_show(struct seq_file *s, void *data)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_plane_state *pstate = NULL;
-	struct sde_crtc_mixer *m;
-
-	struct drm_crtc *crtc;
-	struct drm_plane *plane;
-	struct drm_display_mode *mode;
-	struct drm_framebuffer *fb;
-	struct drm_plane_state *state;
-	struct sde_crtc_state *cstate;
-
-	int i, out_width, out_height;
-
-	if (!s || !s->private)
-		return -EINVAL;
-
-	sde_crtc = s->private;
-	crtc = &sde_crtc->base;
-	cstate = to_sde_crtc_state(crtc->state);
-
-	mutex_lock(&sde_crtc->crtc_lock);
-	mode = &crtc->state->adjusted_mode;
-	out_width = sde_crtc_get_mixer_width(sde_crtc, cstate, mode);
-	out_height = sde_crtc_get_mixer_height(sde_crtc, cstate, mode);
-
-	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
-				mode->hdisplay, mode->vdisplay);
-
-	seq_puts(s, "\n");
-
-	for (i = 0; i < sde_crtc->num_mixers; ++i) {
-		m = &sde_crtc->mixers[i];
-		if (!m->hw_lm)
-			seq_printf(s, "\tmixer[%d] has no lm\n", i);
-		else if (!m->hw_ctl)
-			seq_printf(s, "\tmixer[%d] has no ctl\n", i);
-		else
-			seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
-				m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
-				out_width, out_height);
-	}
-
-	seq_puts(s, "\n");
-
-	for (i = 0; i < cstate->num_dim_layers; i++) {
-		struct sde_hw_dim_layer *dim_layer = &cstate->dim_layer[i];
-
-		seq_printf(s, "\tdim_layer:%d] stage:%d flags:%d\n",
-				i, dim_layer->stage, dim_layer->flags);
-		seq_printf(s, "\tdst_x:%d dst_y:%d dst_w:%d dst_h:%d\n",
-				dim_layer->rect.x, dim_layer->rect.y,
-				dim_layer->rect.w, dim_layer->rect.h);
-		seq_printf(s,
-			"\tcolor_0:%d color_1:%d color_2:%d color_3:%d\n",
-				dim_layer->color_fill.color_0,
-				dim_layer->color_fill.color_1,
-				dim_layer->color_fill.color_2,
-				dim_layer->color_fill.color_3);
-		seq_puts(s, "\n");
-	}
-
-	drm_atomic_crtc_for_each_plane(plane, crtc) {
-		pstate = to_sde_plane_state(plane->state);
-		state = plane->state;
-
-		if (!pstate || !state)
-			continue;
-
-		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
-			pstate->stage);
-
-		if (plane->state->fb) {
-			fb = plane->state->fb;
-
-			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
-				fb->base.id, (char *) &fb->format->format,
-				fb->width, fb->height);
-			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
-				seq_printf(s, "cpp[%d]:%u ",
-						i, fb->format->cpp[i]);
-			seq_puts(s, "\n\t");
-
-			seq_printf(s, "modifier:%8llu ", fb->modifier);
-			seq_puts(s, "\n");
-
-			seq_puts(s, "\t");
-			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
-				seq_printf(s, "pitches[%d]:%8u ", i,
-							fb->pitches[i]);
-			seq_puts(s, "\n");
-
-			seq_puts(s, "\t");
-			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
-				seq_printf(s, "offsets[%d]:%8u ", i,
-							fb->offsets[i]);
-			seq_puts(s, "\n");
-		}
-
-		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
-			state->src_x, state->src_y, state->src_w, state->src_h);
-
-		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
-			state->crtc_x, state->crtc_y, state->crtc_w,
-			state->crtc_h);
-		seq_printf(s, "\tmultirect: mode: %d index: %d\n",
-			pstate->multirect_mode, pstate->multirect_index);
-
-		seq_printf(s, "\texcl_rect: x:%4d y:%4d w:%4d h:%4d\n",
-			pstate->excl_rect.x, pstate->excl_rect.y,
-			pstate->excl_rect.w, pstate->excl_rect.h);
-
-		seq_puts(s, "\n");
-	}
-
-	if (sde_crtc->vblank_cb_count) {
-		ktime_t diff = ktime_sub(ktime_get(), sde_crtc->vblank_cb_time);
-		u32 diff_ms = ktime_to_ms(diff);
-		u64 fps = diff_ms ? DIV_ROUND_CLOSEST(
-				sde_crtc->vblank_cb_count * 1000, diff_ms) : 0;
-
-		seq_printf(s,
-			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
-				fps, sde_crtc->vblank_cb_count,
-				ktime_to_ms(diff), sde_crtc->play_count);
-
-		/* reset time & count for next measurement */
-		sde_crtc->vblank_cb_count = 0;
-		sde_crtc->vblank_cb_time = ktime_set(0, 0);
-	}
-
-	seq_printf(s, "vblank_enable:%d\n", sde_crtc->vblank_requested);
-
-	mutex_unlock(&sde_crtc->crtc_lock);
-
-	return 0;
-}
-
-static int _sde_debugfs_status_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, _sde_debugfs_status_show, inode->i_private);
-}
-
-static ssize_t _sde_crtc_misr_setup(struct file *file,
-		const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	struct drm_crtc *crtc;
-	struct sde_crtc *sde_crtc;
-	int rc;
-	char buf[MISR_BUFF_SIZE + 1];
-	u32 frame_count, enable;
-	size_t buff_copy;
-	struct sde_kms *sde_kms;
-
-	if (!file || !file->private_data)
-		return -EINVAL;
-
-	sde_crtc = file->private_data;
-	crtc = &sde_crtc->base;
-
-	sde_kms = _sde_crtc_get_kms(crtc);
-	if (!sde_kms) {
-		SDE_ERROR("invalid sde_kms\n");
-		return -EINVAL;
-	}
-
-	buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
-	if (copy_from_user(buf, user_buf, buff_copy)) {
-		SDE_ERROR("buffer copy failed\n");
-		return -EINVAL;
-	}
-
-	buf[buff_copy] = 0; /* end of string */
-
-	if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
-		return -EINVAL;
-
-	if (sde_kms_is_secure_session_inprogress(sde_kms)) {
-		SDE_DEBUG("crtc:%d misr enable/disable not allowed\n",
-				DRMID(crtc));
-		return -EINVAL;
-	}
-
-	rc = _sde_crtc_power_enable(sde_crtc, true);
-	if (rc)
-		return rc;
-
-	sde_crtc->misr_enable_debugfs = enable;
-	sde_crtc_misr_setup(crtc, enable, frame_count);
-	_sde_crtc_power_enable(sde_crtc, false);
-
-	return count;
-}
-
-static ssize_t _sde_crtc_misr_read(struct file *file,
-		char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct drm_crtc *crtc;
-	struct sde_crtc *sde_crtc;
-	struct sde_kms *sde_kms;
-	struct sde_crtc_mixer *m;
-	int i = 0, rc;
-	ssize_t len = 0;
-	char buf[MISR_BUFF_SIZE + 1] = {'\0'};
-
-	if (*ppos)
-		return 0;
-
-	if (!file || !file->private_data)
-		return -EINVAL;
-
-	sde_crtc = file->private_data;
-	crtc = &sde_crtc->base;
-	sde_kms = _sde_crtc_get_kms(crtc);
-	if (!sde_kms)
-		return -EINVAL;
-
-	rc = _sde_crtc_power_enable(sde_crtc, true);
-	if (rc)
-		return rc;
-
-	if (sde_kms_is_secure_session_inprogress(sde_kms)) {
-		SDE_DEBUG("crtc:%d misr read not allowed\n", DRMID(crtc));
-		goto end;
-	}
-
-	if (!sde_crtc->misr_enable_debugfs) {
-		len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
-				"disabled\n");
-		goto buff_check;
-	}
-
-	for (i = 0; i < sde_crtc->num_mixers; ++i) {
-		u32 misr_value = 0;
-
-		m = &sde_crtc->mixers[i];
-		if (!m->hw_lm || !m->hw_lm->ops.collect_misr) {
-			len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
-					"invalid\n");
-			SDE_ERROR("crtc:%d invalid misr ops\n", DRMID(crtc));
-			continue;
-		}
-
-		rc = m->hw_lm->ops.collect_misr(m->hw_lm, false, &misr_value);
-		if (rc) {
-			len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
-					"invalid\n");
-			SDE_ERROR("crtc:%d failed to collect misr %d\n",
-					DRMID(crtc), rc);
-			continue;
-		} else {
-			len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
-					"lm idx:%d\n", m->hw_lm->idx - LM_0);
-			len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
-					"0x%x\n", misr_value);
-		}
-	}
-
-buff_check:
-	if (count <= len) {
-		len = 0;
-		goto end;
-	}
-
-	if (copy_to_user(user_buff, buf, len)) {
-		len = -EFAULT;
-		goto end;
-	}
-
-	*ppos += len;   /* increase offset */
-
-end:
-	_sde_crtc_power_enable(sde_crtc, false);
-	return len;
-}
-
-#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix)                          \
-static int __prefix ## _open(struct inode *inode, struct file *file)	\
-{									\
-	return single_open(file, __prefix ## _show, inode->i_private);	\
-}									\
-static const struct file_operations __prefix ## _fops = {		\
-	.owner = THIS_MODULE,						\
-	.open = __prefix ## _open,					\
-	.release = single_release,					\
-	.read = seq_read,						\
-	.llseek = seq_lseek,						\
-}
-
-static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v)
-{
-	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
-	int i;
-
-	seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
-	seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
-	seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
-	seq_printf(s, "core_clk_rate: %llu\n",
-			sde_crtc->cur_perf.core_clk_rate);
-	for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC;
-			i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
-		seq_printf(s, "bw_ctl[%s]: %llu\n",
-				sde_power_handle_get_dbus_name(i),
-				sde_crtc->cur_perf.bw_ctl[i]);
-		seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
-				sde_power_handle_get_dbus_name(i),
-				sde_crtc->cur_perf.max_per_pipe_ib[i]);
-	}
-
-	return 0;
-}
-DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_crtc_debugfs_state);
-
-static int _sde_debugfs_fence_status_show(struct seq_file *s, void *data)
-{
-	struct drm_crtc *crtc;
-	struct drm_plane *plane;
-	struct drm_connector *conn;
-	struct drm_mode_object *drm_obj;
-	struct sde_crtc *sde_crtc;
-	struct sde_crtc_state *cstate;
-	struct sde_fence_context *ctx;
-	struct drm_connector_list_iter conn_iter;
-	struct drm_device *dev;
-
-	if (!s || !s->private)
-		return -EINVAL;
-
-	sde_crtc = s->private;
-	crtc = &sde_crtc->base;
-	dev = crtc->dev;
-	cstate = to_sde_crtc_state(crtc->state);
-
-	/* Dump input fence info */
-	seq_puts(s, "===Input fence===\n");
-	drm_atomic_crtc_for_each_plane(plane, crtc) {
-		struct sde_plane_state *pstate;
-		struct dma_fence *fence;
-
-		pstate = to_sde_plane_state(plane->state);
-		if (!pstate)
-			continue;
-
-		seq_printf(s, "plane:%u stage:%d\n", plane->base.id,
-			pstate->stage);
-
-		fence = pstate->input_fence;
-		if (fence)
-			sde_fence_list_dump(fence, &s);
-	}
-
-	/* Dump release fence info */
-	seq_puts(s, "\n");
-	seq_puts(s, "===Release fence===\n");
-	ctx = sde_crtc->output_fence;
-	drm_obj = &crtc->base;
-	sde_debugfs_timeline_dump(ctx, drm_obj, &s);
-	seq_puts(s, "\n");
-
-	/* Dump retire fence info */
-	seq_puts(s, "===Retire fence===\n");
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(conn, &conn_iter)
-		if (conn->state && conn->state->crtc == crtc &&
-				cstate->num_connectors < MAX_CONNECTORS) {
-			struct sde_connector *c_conn;
-
-			c_conn = to_sde_connector(conn);
-			ctx = c_conn->retire_fence;
-			drm_obj = &conn->base;
-			sde_debugfs_timeline_dump(ctx, drm_obj, &s);
-		}
-	drm_connector_list_iter_end(&conn_iter);
-	seq_puts(s, "\n");
-
-	return 0;
-}
-
-static int _sde_debugfs_fence_status(struct inode *inode, struct file *file)
-{
-	return single_open(file, _sde_debugfs_fence_status_show,
-				inode->i_private);
-}
-
-static int _sde_crtc_init_debugfs(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc;
-	struct sde_kms *sde_kms;
-
-	static const struct file_operations debugfs_status_fops = {
-		.open =		_sde_debugfs_status_open,
-		.read =		seq_read,
-		.llseek =	seq_lseek,
-		.release =	single_release,
-	};
-	static const struct file_operations debugfs_misr_fops = {
-		.open =		simple_open,
-		.read =		_sde_crtc_misr_read,
-		.write =	_sde_crtc_misr_setup,
-	};
-	static const struct file_operations debugfs_fps_fops = {
-		.open =		_sde_debugfs_fps_status,
-		.read =		seq_read,
-	};
-	static const struct file_operations debugfs_fence_fops = {
-		.open =		_sde_debugfs_fence_status,
-		.read =		seq_read,
-	};
-
-	if (!crtc)
-		return -EINVAL;
-	sde_crtc = to_sde_crtc(crtc);
-
-	sde_kms = _sde_crtc_get_kms(crtc);
-	if (!sde_kms)
-		return -EINVAL;
-
-	sde_crtc->debugfs_root = debugfs_create_dir(sde_crtc->name,
-			crtc->dev->primary->debugfs_root);
-	if (!sde_crtc->debugfs_root)
-		return -ENOMEM;
-
-	/* don't error check these */
-	debugfs_create_file("status", 0400,
-			sde_crtc->debugfs_root,
-			sde_crtc, &debugfs_status_fops);
-	debugfs_create_file("state", 0400,
-			sde_crtc->debugfs_root,
-			&sde_crtc->base,
-			&sde_crtc_debugfs_state_fops);
-	debugfs_create_file("misr_data", 0600, sde_crtc->debugfs_root,
-					sde_crtc, &debugfs_misr_fops);
-	debugfs_create_file("fps", 0400, sde_crtc->debugfs_root,
-					sde_crtc, &debugfs_fps_fops);
-	debugfs_create_file("fence_status", 0400, sde_crtc->debugfs_root,
-					sde_crtc, &debugfs_fence_fops);
-
-	return 0;
-}
-
-static void _sde_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc;
-
-	if (!crtc)
-		return;
-	sde_crtc = to_sde_crtc(crtc);
-	debugfs_remove_recursive(sde_crtc->debugfs_root);
-}
-#else
-static int _sde_crtc_init_debugfs(struct drm_crtc *crtc)
-{
-	return 0;
-}
-
-static void _sde_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
-
-static int sde_crtc_late_register(struct drm_crtc *crtc)
-{
-	return _sde_crtc_init_debugfs(crtc);
-}
-
-static void sde_crtc_early_unregister(struct drm_crtc *crtc)
-{
-	_sde_crtc_destroy_debugfs(crtc);
-}
-
-static const struct drm_crtc_funcs sde_crtc_funcs = {
-	.set_config = drm_atomic_helper_set_config,
-	.destroy = sde_crtc_destroy,
-	.page_flip = drm_atomic_helper_page_flip,
-	.atomic_set_property = sde_crtc_atomic_set_property,
-	.atomic_get_property = sde_crtc_atomic_get_property,
-	.reset = sde_crtc_reset,
-	.atomic_duplicate_state = sde_crtc_duplicate_state,
-	.atomic_destroy_state = sde_crtc_destroy_state,
-	.late_register = sde_crtc_late_register,
-	.early_unregister = sde_crtc_early_unregister,
-};
-
-static const struct drm_crtc_helper_funcs sde_crtc_helper_funcs = {
-	.mode_fixup = sde_crtc_mode_fixup,
-	.disable = sde_crtc_disable,
-	.atomic_enable = sde_crtc_enable,
-	.atomic_check = sde_crtc_atomic_check,
-	.atomic_begin = sde_crtc_atomic_begin,
-	.atomic_flush = sde_crtc_atomic_flush,
-};
-
-static void _sde_crtc_event_cb(struct kthread_work *work)
-{
-	struct sde_crtc_event *event;
-	struct sde_crtc *sde_crtc;
-	unsigned long irq_flags;
-
-	if (!work) {
-		SDE_ERROR("invalid work item\n");
-		return;
-	}
-
-	event = container_of(work, struct sde_crtc_event, kt_work);
-
-	/* set sde_crtc to NULL for static work structures */
-	sde_crtc = event->sde_crtc;
-	if (!sde_crtc)
-		return;
-
-	if (event->cb_func)
-		event->cb_func(&sde_crtc->base, event->usr);
-
-	spin_lock_irqsave(&sde_crtc->event_lock, irq_flags);
-	list_add_tail(&event->list, &sde_crtc->event_free_list);
-	spin_unlock_irqrestore(&sde_crtc->event_lock, irq_flags);
-}
-
-int sde_crtc_event_queue(struct drm_crtc *crtc,
-		void (*func)(struct drm_crtc *crtc, void *usr),
-		void *usr, bool color_processing_event)
-{
-	unsigned long irq_flags;
-	struct sde_crtc *sde_crtc;
-	struct msm_drm_private *priv;
-	struct sde_crtc_event *event = NULL;
-	u32 crtc_id;
-
-	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !func) {
-		SDE_ERROR("invalid parameters\n");
-		return -EINVAL;
-	}
-	sde_crtc = to_sde_crtc(crtc);
-	priv = crtc->dev->dev_private;
-	crtc_id = drm_crtc_index(crtc);
-
-	/*
-	 * Obtain an event struct from the private cache. This event
-	 * queue may be called from ISR contexts, so use a private
-	 * cache to avoid calling any memory allocation functions.
-	 */
-	spin_lock_irqsave(&sde_crtc->event_lock, irq_flags);
-	if (!list_empty(&sde_crtc->event_free_list)) {
-		event = list_first_entry(&sde_crtc->event_free_list,
-				struct sde_crtc_event, list);
-		list_del_init(&event->list);
-	}
-	spin_unlock_irqrestore(&sde_crtc->event_lock, irq_flags);
-
-	if (!event)
-		return -ENOMEM;
-
-	/* populate event node */
-	event->sde_crtc = sde_crtc;
-	event->cb_func = func;
-	event->usr = usr;
-
-	/* queue new event request */
-	kthread_init_work(&event->kt_work, _sde_crtc_event_cb);
-	if (color_processing_event)
-		kthread_queue_work(&priv->pp_event_worker,
-			&event->kt_work);
-	else
-		kthread_queue_work(&priv->event_thread[crtc_id].worker,
-			&event->kt_work);
-
-	return 0;
-}
-
-static int _sde_crtc_init_events(struct sde_crtc *sde_crtc)
-{
-	int i, rc = 0;
-
-	if (!sde_crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return -EINVAL;
-	}
-
-	spin_lock_init(&sde_crtc->event_lock);
-
-	INIT_LIST_HEAD(&sde_crtc->event_free_list);
-	for (i = 0; i < SDE_CRTC_MAX_EVENT_COUNT; ++i)
-		list_add_tail(&sde_crtc->event_cache[i].list,
-				&sde_crtc->event_free_list);
-
-	return rc;
-}
-
-/*
- * __sde_crtc_idle_notify_work - signal idle timeout to user space
- */
-static void __sde_crtc_idle_notify_work(struct kthread_work *work)
-{
-	struct sde_crtc *sde_crtc = container_of(work, struct sde_crtc,
-				idle_notify_work.work);
-	struct drm_crtc *crtc;
-	struct drm_event event;
-	int ret = 0;
-
-	if (!sde_crtc) {
-		SDE_ERROR("invalid sde crtc\n");
-	} else {
-		crtc = &sde_crtc->base;
-		event.type = DRM_EVENT_IDLE_NOTIFY;
-		event.length = sizeof(u32);
-		msm_mode_object_event_notify(&crtc->base, crtc->dev,
-				&event, (u8 *)&ret);
-
-		SDE_DEBUG("crtc[%d]: idle timeout notified\n", crtc->base.id);
-	}
-}
-
-/* initialize crtc */
-struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
-{
-	struct drm_crtc *crtc = NULL;
-	struct sde_crtc *sde_crtc = NULL;
-	struct msm_drm_private *priv = NULL;
-	struct sde_kms *kms = NULL;
-	int i, rc;
-
-	priv = dev->dev_private;
-	kms = to_sde_kms(priv->kms);
-
-	sde_crtc = kzalloc(sizeof(*sde_crtc), GFP_KERNEL);
-	if (!sde_crtc)
-		return ERR_PTR(-ENOMEM);
-
-	crtc = &sde_crtc->base;
-	crtc->dev = dev;
-
-	mutex_init(&sde_crtc->crtc_lock);
-	spin_lock_init(&sde_crtc->spin_lock);
-	atomic_set(&sde_crtc->frame_pending, 0);
-
-	sde_crtc->enabled = false;
-
-	/* Below parameters are for fps calculation for sysfs node */
-	sde_crtc->fps_info.fps_periodic_duration = DEFAULT_FPS_PERIOD_1_SEC;
-	sde_crtc->fps_info.time_buf = kmalloc_array(MAX_FRAME_COUNT,
-			sizeof(ktime_t), GFP_KERNEL);
-
-	if (!sde_crtc->fps_info.time_buf)
-		SDE_ERROR("invalid buffer\n");
-	else
-		memset(sde_crtc->fps_info.time_buf, 0,
-			sizeof(*(sde_crtc->fps_info.time_buf)));
-
-	INIT_LIST_HEAD(&sde_crtc->frame_event_list);
-	INIT_LIST_HEAD(&sde_crtc->user_event_list);
-	for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) {
-		INIT_LIST_HEAD(&sde_crtc->frame_events[i].list);
-		list_add(&sde_crtc->frame_events[i].list,
-				&sde_crtc->frame_event_list);
-		kthread_init_work(&sde_crtc->frame_events[i].work,
-				sde_crtc_frame_event_work);
-	}
-
-	drm_crtc_init_with_planes(dev, crtc, plane, NULL, &sde_crtc_funcs,
-				NULL);
-
-	drm_crtc_helper_add(crtc, &sde_crtc_helper_funcs);
-
-	/* save user friendly CRTC name for later */
-	snprintf(sde_crtc->name, SDE_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
-
-	/* initialize event handling */
-	rc = _sde_crtc_init_events(sde_crtc);
-	if (rc) {
-		drm_crtc_cleanup(crtc);
-		kfree(sde_crtc);
-		return ERR_PTR(rc);
-	}
-
-	/* initialize output fence support */
-	sde_crtc->output_fence = sde_fence_init(sde_crtc->name, crtc->base.id);
-
-	if (IS_ERR(sde_crtc->output_fence)) {
-		rc = PTR_ERR(sde_crtc->output_fence);
-		SDE_ERROR("failed to init fence, %d\n", rc);
-		drm_crtc_cleanup(crtc);
-		kfree(sde_crtc);
-		return ERR_PTR(rc);
-	}
-
-	/* create CRTC properties */
-	msm_property_init(&sde_crtc->property_info, &crtc->base, dev,
-			priv->crtc_property, sde_crtc->property_data,
-			CRTC_PROP_COUNT, CRTC_PROP_BLOBCOUNT,
-			sizeof(struct sde_crtc_state));
-
-	sde_crtc_install_properties(crtc, kms->catalog);
-
-	/* Install color processing properties */
-	sde_cp_crtc_init(crtc);
-	sde_cp_crtc_install_properties(crtc);
-
-	sde_crtc->cur_perf.llcc_active = false;
-	sde_crtc->new_perf.llcc_active = false;
-
-	kthread_init_delayed_work(&sde_crtc->idle_notify_work,
-					__sde_crtc_idle_notify_work);
-
-	SDE_DEBUG("crtc=%d new_llcc=%d, old_llcc=%d\n",
-		crtc->base.id,
-		sde_crtc->new_perf.llcc_active,
-		sde_crtc->cur_perf.llcc_active);
-
-	SDE_DEBUG("%s: successfully initialized crtc\n", sde_crtc->name);
-	return crtc;
-}
-
-int sde_crtc_post_init(struct drm_device *dev, struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc;
-	int rc = 0;
-
-	if (!dev || !dev->primary || !dev->primary->kdev || !crtc) {
-		SDE_ERROR("invalid input param(s)\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	sde_crtc = to_sde_crtc(crtc);
-	sde_crtc->sysfs_dev = device_create_with_groups(
-		dev->primary->kdev->class, dev->primary->kdev, 0, crtc,
-		sde_crtc_attr_groups, "sde-crtc-%d", crtc->index);
-	if (IS_ERR_OR_NULL(sde_crtc->sysfs_dev)) {
-		SDE_ERROR("crtc:%d sysfs create failed rc:%ld\n", crtc->index,
-			PTR_ERR(sde_crtc->sysfs_dev));
-		if (!sde_crtc->sysfs_dev)
-			rc = -EINVAL;
-		else
-			rc = PTR_ERR(sde_crtc->sysfs_dev);
-		goto end;
-	}
-
-	sde_crtc->vsync_event_sf = sysfs_get_dirent(
-		sde_crtc->sysfs_dev->kobj.sd, "vsync_event");
-	if (!sde_crtc->vsync_event_sf)
-		SDE_ERROR("crtc:%d vsync_event sysfs create failed\n",
-						crtc->base.id);
-
-end:
-	return rc;
-}
-
-static int _sde_crtc_event_enable(struct sde_kms *kms,
-		struct drm_crtc *crtc_drm, u32 event)
-{
-	struct sde_crtc *crtc = NULL;
-	struct sde_crtc_irq_info *node;
-	struct msm_drm_private *priv;
-	unsigned long flags;
-	bool found = false;
-	int ret, i = 0;
-	bool add_event = false;
-
-	crtc = to_sde_crtc(crtc_drm);
-	spin_lock_irqsave(&crtc->spin_lock, flags);
-	list_for_each_entry(node, &crtc->user_event_list, list) {
-		if (node->event == event) {
-			found = true;
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&crtc->spin_lock, flags);
-
-	/* event already enabled */
-	if (found)
-		return 0;
-
-	node = NULL;
-	for (i = 0; i < ARRAY_SIZE(custom_events); i++) {
-		if (custom_events[i].event == event &&
-			custom_events[i].func) {
-			node = kzalloc(sizeof(*node), GFP_KERNEL);
-			if (!node)
-				return -ENOMEM;
-			INIT_LIST_HEAD(&node->list);
-			node->func = custom_events[i].func;
-			node->event = event;
-			node->state = IRQ_NOINIT;
-			spin_lock_init(&node->state_lock);
-			break;
-		}
-	}
-
-	if (!node) {
-		SDE_ERROR("unsupported event %x\n", event);
-		return -EINVAL;
-	}
-
-	priv = kms->dev->dev_private;
-	ret = 0;
-	if (crtc_drm->enabled) {
-		ret = sde_power_resource_enable(&priv->phandle,
-				kms->core_client, true);
-		if (ret) {
-			SDE_ERROR("failed to enable power resource %d\n", ret);
-			SDE_EVT32(ret, SDE_EVTLOG_ERROR);
-			kfree(node);
-			return ret;
-		}
-
-		INIT_LIST_HEAD(&node->irq.list);
-
-		mutex_lock(&crtc->crtc_lock);
-		ret = node->func(crtc_drm, true, &node->irq);
-		if (!ret) {
-			spin_lock_irqsave(&crtc->spin_lock, flags);
-			list_add_tail(&node->list, &crtc->user_event_list);
-			add_event = true;
-			spin_unlock_irqrestore(&crtc->spin_lock, flags);
-		}
-		mutex_unlock(&crtc->crtc_lock);
-
-		sde_power_resource_enable(&priv->phandle, kms->core_client,
-				false);
-	}
-
-	if (add_event)
-		return 0;
-
-	if (!ret) {
-		spin_lock_irqsave(&crtc->spin_lock, flags);
-		list_add_tail(&node->list, &crtc->user_event_list);
-		spin_unlock_irqrestore(&crtc->spin_lock, flags);
-	} else {
-		kfree(node);
-	}
-
-	return ret;
-}
-
-static int _sde_crtc_event_disable(struct sde_kms *kms,
-		struct drm_crtc *crtc_drm, u32 event)
-{
-	struct sde_crtc *crtc = NULL;
-	struct sde_crtc_irq_info *node = NULL;
-	struct msm_drm_private *priv;
-	unsigned long flags;
-	bool found = false;
-	int ret;
-
-	crtc = to_sde_crtc(crtc_drm);
-	spin_lock_irqsave(&crtc->spin_lock, flags);
-	list_for_each_entry(node, &crtc->user_event_list, list) {
-		if (node->event == event) {
-			list_del(&node->list);
-			found = true;
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&crtc->spin_lock, flags);
-
-	/* event already disabled */
-	if (!found)
-		return 0;
-
-	/**
-	 * crtc is disabled interrupts are cleared remove from the list,
-	 * no need to disable/de-register.
-	 */
-	if (!crtc_drm->enabled) {
-		kfree(node);
-		return 0;
-	}
-	priv = kms->dev->dev_private;
-	ret = sde_power_resource_enable(&priv->phandle, kms->core_client, true);
-	if (ret) {
-		SDE_ERROR("failed to enable power resource %d\n", ret);
-		SDE_EVT32(ret, SDE_EVTLOG_ERROR);
-		kfree(node);
-		return ret;
-	}
-
-	ret = node->func(crtc_drm, false, &node->irq);
-	kfree(node);
-	sde_power_resource_enable(&priv->phandle, kms->core_client, false);
-	return ret;
-}
-
-int sde_crtc_register_custom_event(struct sde_kms *kms,
-		struct drm_crtc *crtc_drm, u32 event, bool en)
-{
-	struct sde_crtc *crtc = NULL;
-	int ret;
-
-	crtc = to_sde_crtc(crtc_drm);
-	if (!crtc || !kms || !kms->dev) {
-		DRM_ERROR("invalid sde_crtc %pK kms %pK dev %pK\n", crtc,
-			kms, ((kms) ? (kms->dev) : NULL));
-		return -EINVAL;
-	}
-
-	if (en)
-		ret = _sde_crtc_event_enable(kms, crtc_drm, event);
-	else
-		ret = _sde_crtc_event_disable(kms, crtc_drm, event);
-
-	return ret;
-}
-
-static int sde_crtc_power_interrupt_handler(struct drm_crtc *crtc_drm,
-	bool en, struct sde_irq_callback *irq)
-{
-	return 0;
-}
-
-static int sde_crtc_pm_event_handler(struct drm_crtc *crtc, bool en,
-		struct sde_irq_callback *noirq)
-{
-	/*
-	 * IRQ object noirq is not being used here since there is
-	 * no crtc irq from pm event.
-	 */
-	return 0;
-}
-
-static int sde_crtc_idle_interrupt_handler(struct drm_crtc *crtc_drm,
-	bool en, struct sde_irq_callback *irq)
-{
-	return 0;
-}
-
-/**
- * sde_crtc_update_cont_splash_settings - update mixer settings
- *	and initial clk during device bootup for cont_splash use case
- * @crtc: Pointer to drm crtc structure
- */
-void sde_crtc_update_cont_splash_settings(struct drm_crtc *crtc)
-{
-	struct sde_kms *kms = NULL;
-	struct msm_drm_private *priv;
-	struct sde_crtc *sde_crtc;
-
-	if (!crtc || !crtc->state || !crtc->dev || !crtc->dev->dev_private) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	priv = crtc->dev->dev_private;
-	kms = to_sde_kms(priv->kms);
-	if (!kms || !kms->catalog) {
-		SDE_ERROR("invalid parameters\n");
-		return;
-	}
-
-	_sde_crtc_setup_mixers(crtc);
-	crtc->enabled = true;
-
-	/* update core clk value for initial state with cont-splash */
-	sde_crtc = to_sde_crtc(crtc);
-	sde_crtc->cur_perf.core_clk_rate = kms->perf.max_core_clk_rate;
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
deleted file mode 100644
index 0171f4c..0000000
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ /dev/null
@@ -1,776 +0,0 @@
-/*
- * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _SDE_CRTC_H_
-#define _SDE_CRTC_H_
-
-#include <linux/kthread.h>
-#include <linux/of_fdt.h>
-#include <drm/drm_crtc.h>
-#include "msm_prop.h"
-#include "sde_fence.h"
-#include "sde_kms.h"
-#include "sde_core_perf.h"
-#include "sde_hw_ds.h"
-
-#define SDE_CRTC_NAME_SIZE	12
-
-/* define the maximum number of in-flight frame events */
-/* Expand it to 2x for handling atleast 2 connectors safely */
-#define SDE_CRTC_FRAME_EVENT_SIZE	(4 * 2)
-
-/**
- * enum sde_crtc_client_type: crtc client type
- * @RT_CLIENT:	RealTime client like video/cmd mode display
- *              voting through apps rsc
- * @NRT_CLIENT:	Non-RealTime client like WB display
- *              voting through apps rsc
- * @RT_RSC_CLIENT:	Realtime display RSC voting client
- */
-enum sde_crtc_client_type {
-	RT_CLIENT,
-	NRT_CLIENT,
-	RT_RSC_CLIENT,
-};
-
-/**
- * enum sde_crtc_output_capture_point
- * @MIXER_OUT : capture mixer output
- * @DSPP_OUT : capture output of dspp
- */
-enum sde_crtc_output_capture_point {
-	CAPTURE_MIXER_OUT,
-	CAPTURE_DSPP_OUT
-};
-
-/**
- * enum sde_crtc_idle_pc_state: states of idle power collapse
- * @IDLE_PC_NONE: no-op
- * @IDLE_PC_ENABLE: enable idle power-collapse
- * @IDLE_PC_DISABLE: disable idle power-collapse
- */
-enum sde_crtc_idle_pc_state {
-	IDLE_PC_NONE,
-	IDLE_PC_ENABLE,
-	IDLE_PC_DISABLE,
-};
-
-/**
- * @connectors    : Currently associated drm connectors for retire event
- * @num_connectors: Number of associated drm connectors for retire event
- * @list:	event list
- */
-struct sde_crtc_retire_event {
-	struct drm_connector *connectors[MAX_CONNECTORS];
-	int num_connectors;
-	struct list_head list;
-};
-
-/**
- * struct sde_crtc_mixer: stores the map for each virtual pipeline in the CRTC
- * @hw_lm:	LM HW Driver context
- * @hw_ctl:	CTL Path HW driver context
- * @hw_dspp:	DSPP HW driver context
- * @hw_ds:	DS HW driver context
- * @encoder:	Encoder attached to this lm & ctl
- * @mixer_op_mode: mixer blending operation mode
- */
-struct sde_crtc_mixer {
-	struct sde_hw_mixer *hw_lm;
-	struct sde_hw_ctl *hw_ctl;
-	struct sde_hw_dspp *hw_dspp;
-	struct sde_hw_ds *hw_ds;
-	struct drm_encoder *encoder;
-	u32 mixer_op_mode;
-};
-
-/**
- * struct sde_crtc_frame_event_cb_data : info of drm objects of a frame event
- * @crtc:       pointer to drm crtc object registered for frame event
- * @connector:  pointer to drm connector which is source of frame event
- */
-struct sde_crtc_frame_event_cb_data {
-	struct drm_crtc *crtc;
-	struct drm_connector *connector;
-};
-
-/**
- * struct sde_crtc_frame_event: stores crtc frame event for crtc processing
- * @work:	base work structure
- * @crtc:	Pointer to crtc handling this event
- * @connector:  pointer to drm connector which is source of frame event
- * @list:	event list
- * @ts:		timestamp at queue entry
- * @event:	event identifier
- */
-struct sde_crtc_frame_event {
-	struct kthread_work work;
-	struct drm_crtc *crtc;
-	struct drm_connector *connector;
-	struct list_head list;
-	ktime_t ts;
-	u32 event;
-};
-
-/**
- * struct sde_crtc_event - event callback tracking structure
- * @list:     Linked list tracking node
- * @kt_work:  Kthread worker structure
- * @sde_crtc: Pointer to associated sde_crtc structure
- * @cb_func:  Pointer to callback function
- * @usr:      Pointer to user data to be provided to the callback
- */
-struct sde_crtc_event {
-	struct list_head list;
-	struct kthread_work kt_work;
-	void *sde_crtc;
-
-	void (*cb_func)(struct drm_crtc *crtc, void *usr);
-	void *usr;
-};
-/**
- * struct sde_crtc_fps_info - structure for measuring fps periodicity
- * @frame_count		: Total frames during configured periodic duration
- * @last_sampled_time_us: Stores the last ktime in microsecs when fps
- *                        was calculated
- * @measured_fps	: Last measured fps value
- * @fps_periodic_duration	: Duration in milliseconds to measure the fps.
- *                                Default value is 1 second.
- * @time_buf		: Buffer for storing ktime of the commits
- * @next_time_index	: index into time_buf for storing ktime for next commit
- */
-struct sde_crtc_fps_info {
-	u32 frame_count;
-	ktime_t last_sampled_time_us;
-	u32 measured_fps;
-	u32 fps_periodic_duration;
-	ktime_t *time_buf;
-	u32 next_time_index;
-};
-
-/**
- * struct sde_ltm_buffer - defines LTM buffer structure.
- * @fb: frm framebuffer for the buffer
- * @gem: drm gem handle for the buffer
- * @asapce : pointer to address space
- * @drm_fb_id: framebuffer id associated with this buffer
- * @offset: offset for alignment
- * @iova: device address
- * @kva: kernel virtual address
- * @node: list node for LTM buffer list;
- */
-struct sde_ltm_buffer {
-	struct drm_framebuffer *fb;
-	struct drm_gem_object *gem;
-	struct msm_gem_address_space *aspace;
-	u32 drm_fb_id;
-	u32 offset;
-	u64 iova;
-	void *kva;
-	struct list_head node;
-};
-
-/**
- * struct sde_crtc_misr_info - structure for misr information
- * @misr_enable : enable/disable flag
- * @misr_frame_count : Number of frames for misr calculation.
- */
-struct sde_crtc_misr_info {
-	bool misr_enable;
-	u32 misr_frame_count;
-};
-
-/*
- * Maximum number of free event structures to cache
- */
-#define SDE_CRTC_MAX_EVENT_COUNT	16
-
-/**
- * struct sde_crtc - virtualized CRTC data structure
- * @base          : Base drm crtc structure
- * @name          : ASCII description of this crtc
- * @num_ctls      : Number of ctl paths in use
- * @num_mixers    : Number of mixers in use
- * @mixers_swapped: Whether the mixers have been swapped for left/right update
- *                  especially in the case of DSC Merge.
- * @mixers        : List of active mixers
- * @event         : Pointer to last received drm vblank event. If there is a
- *                  pending vblank event, this will be non-null.
- * @vsync_count   : Running count of received vsync events
- * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
- * @property_info : Opaque structure for generic property support
- * @property_defaults : Array of default values for generic property support
- * @output_fence  : output release fence context
- * @stage_cfg     : H/w mixer stage configuration
- * @debugfs_root  : Parent of debugfs node
- * @vblank_cb_count : count of vblank callback since last reset
- * @play_count    : frame count between crtc enable and disable
- * @vblank_cb_time  : ktime at vblank count reset
- * @vblank_last_cb_time  : ktime at last vblank notification
- * @sysfs_dev  : sysfs device node for crtc
- * @vsync_event_sf : vsync event notifier sysfs device
- * @vblank_requested : whether the user has requested vblank events
- * @suspend         : whether or not a suspend operation is in progress
- * @enabled       : whether the SDE CRTC is currently enabled. updated in the
- *                  commit-thread, not state-swap time which is earlier, so
- *                  safe to make decisions on during VBLANK on/off work
- * @ds_reconfig   : force reconfiguration of the destination scaler block
- * @feature_list  : list of color processing features supported on a crtc
- * @active_list   : list of color processing features are active
- * @dirty_list    : list of color processing features are dirty
- * @ad_dirty      : list containing ad properties that are dirty
- * @ad_active     : list containing ad properties that are active
- * @crtc_lock     : crtc lock around create, destroy and access.
- * @frame_pending : Whether or not an update is pending
- * @frame_events  : static allocation of in-flight frame events
- * @frame_event_list : available frame event list
- * @spin_lock     : spin lock for frame event, transaction status, etc...
- * @event_thread  : Pointer to event handler thread
- * @event_worker  : Event worker queue
- * @event_cache   : Local cache of event worker structures
- * @event_free_list : List of available event structures
- * @event_lock    : Spinlock around event handling code
- * @misr_enable_sui : boolean entry indicates misr enable/disable status
- *                    for secure cases.
- * @misr_enable_debugfs : boolean entry indicates misr enable/disable status
- *                        from debugfs.
- * @misr_frame_count  : misr frame count provided by client
- * @misr_data     : store misr data before turning off the clocks.
- * @idle_notify_work: delayed worker to notify idle timeout to user space
- * @power_event   : registered power event handle
- * @cur_perf      : current performance committed to clock/bandwidth driver
- * @plane_mask_old: keeps track of the planes used in the previous commit
- * @frame_trigger_mode: frame trigger mode
- * @ltm_buffer_cnt  : number of ltm buffers
- * @ltm_buffers     : struct stores ltm buffer related data
- * @ltm_buf_free    : list of LTM buffers that are available
- * @ltm_buf_busy    : list of LTM buffers that are been used by HW
- * @ltm_hist_en     : flag to indicate whether LTM hist is enabled or not
- * @ltm_buffer_lock : muttx to protect ltm_buffers allcation and free
- * @ltm_lock        : Spinlock to protect ltm buffer_cnt, hist_en and ltm lists
- */
-struct sde_crtc {
-	struct drm_crtc base;
-	char name[SDE_CRTC_NAME_SIZE];
-
-	/* HW Resources reserved for the crtc */
-	u32 num_ctls;
-	u32 num_mixers;
-	bool mixers_swapped;
-	struct sde_crtc_mixer mixers[CRTC_DUAL_MIXERS];
-
-	struct drm_pending_vblank_event *event;
-	u32 vsync_count;
-
-	struct msm_property_info property_info;
-	struct msm_property_data property_data[CRTC_PROP_COUNT];
-	struct drm_property_blob *blob_info;
-
-	/* output fence support */
-	struct sde_fence_context *output_fence;
-
-	struct sde_hw_stage_cfg stage_cfg;
-	struct dentry *debugfs_root;
-
-	u32 vblank_cb_count;
-	u64 play_count;
-	ktime_t vblank_cb_time;
-	ktime_t vblank_last_cb_time;
-	struct sde_crtc_fps_info fps_info;
-	struct device *sysfs_dev;
-	struct kernfs_node *vsync_event_sf;
-	bool vblank_requested;
-	bool suspend;
-	bool enabled;
-
-	bool ds_reconfig;
-	struct list_head feature_list;
-	struct list_head active_list;
-	struct list_head dirty_list;
-	struct list_head ad_dirty;
-	struct list_head ad_active;
-	struct list_head user_event_list;
-
-	struct mutex crtc_lock;
-	struct mutex crtc_cp_lock;
-
-	atomic_t frame_pending;
-	struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
-	struct list_head frame_event_list;
-	spinlock_t spin_lock;
-
-	/* for handling internal event thread */
-	struct sde_crtc_event event_cache[SDE_CRTC_MAX_EVENT_COUNT];
-	struct list_head event_free_list;
-	spinlock_t event_lock;
-	bool misr_enable_sui;
-	bool misr_enable_debugfs;
-	u32 misr_frame_count;
-	struct kthread_delayed_work idle_notify_work;
-
-	struct sde_power_event *power_event;
-
-	struct sde_core_perf_params cur_perf;
-	struct sde_core_perf_params new_perf;
-
-	u32 plane_mask_old;
-
-	/* blob for histogram data */
-	struct drm_property_blob *hist_blob;
-	enum frame_trigger_mode_type frame_trigger_mode;
-
-	u32 ltm_buffer_cnt;
-	struct sde_ltm_buffer *ltm_buffers[LTM_BUFFER_SIZE];
-	struct list_head ltm_buf_free;
-	struct list_head ltm_buf_busy;
-	bool ltm_hist_en;
-	struct drm_msm_ltm_cfg_param ltm_cfg;
-	struct mutex ltm_buffer_lock;
-	spinlock_t ltm_lock;
-};
-
-#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
-
-/**
- * struct sde_crtc_state - sde container for atomic crtc state
- * @base: Base drm crtc state structure
- * @connectors    : Currently associated drm connectors
- * @num_connectors: Number of associated drm connectors
- * @rsc_client    : sde rsc client when mode is valid
- * @is_ppsplit    : Whether current topology requires PPSplit special handling
- * @bw_control    : true if bw/clk controlled by core bw/clk properties
- * @bw_split_vote : true if bw controlled by llcc/dram bw properties
- * @crtc_roi      : Current CRTC ROI. Possibly sub-rectangle of mode.
- *                  Origin top left of CRTC.
- * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
- *                  Origin top left of CRTC.
- * @lm_roi        : Current LM ROI, possibly sub-rectangle of mode.
- *                  Origin top left of CRTC.
- * @user_roi_list : List of user's requested ROIs as from set property
- * @property_state: Local storage for msm_prop properties
- * @property_values: Current crtc property values
- * @input_fence_timeout_ns : Cached input fence timeout, in ns
- * @num_dim_layers: Number of dim layers
- * @dim_layer: Dim layer configs
- * @num_ds: Number of destination scalers to be configured
- * @num_ds_enabled: Number of destination scalers enabled
- * @ds_dirty: Boolean to indicate if dirty or not
- * @ds_cfg: Destination scaler config
- * @scl3_lut_cfg: QSEED3 lut config
- * @new_perf: new performance state being requested
- */
-struct sde_crtc_state {
-	struct drm_crtc_state base;
-
-	struct drm_connector *connectors[MAX_CONNECTORS];
-	int num_connectors;
-	struct sde_rsc_client *rsc_client;
-	bool rsc_update;
-	bool bw_control;
-	bool bw_split_vote;
-
-	bool is_ppsplit;
-	struct sde_rect crtc_roi;
-	struct sde_rect lm_bounds[CRTC_DUAL_MIXERS];
-	struct sde_rect lm_roi[CRTC_DUAL_MIXERS];
-	struct msm_roi_list user_roi_list;
-
-	struct msm_property_state property_state;
-	struct msm_property_value property_values[CRTC_PROP_COUNT];
-	uint64_t input_fence_timeout_ns;
-	uint32_t num_dim_layers;
-	struct sde_hw_dim_layer dim_layer[SDE_MAX_DIM_LAYERS];
-	uint32_t num_ds;
-	uint32_t num_ds_enabled;
-	bool ds_dirty;
-	struct sde_hw_ds_cfg ds_cfg[SDE_MAX_DS_COUNT];
-	struct sde_hw_scaler3_lut_cfg scl3_lut_cfg;
-
-	struct sde_core_perf_params new_perf;
-};
-
-enum sde_crtc_irq_state {
-	IRQ_NOINIT,
-	IRQ_ENABLED,
-	IRQ_DISABLING,
-	IRQ_DISABLED,
-};
-
-/**
- * sde_crtc_irq_info - crtc interrupt info
- * @irq: interrupt callback
- * @event: event type of the interrupt
- * @func: function pointer to enable/disable the interrupt
- * @list: list of user customized event in crtc
- * @state: state of the interrupt
- * @state_lock: spin lock for interrupt state
- */
-struct sde_crtc_irq_info {
-	struct sde_irq_callback irq;
-	u32 event;
-	int (*func)(struct drm_crtc *crtc, bool en,
-			struct sde_irq_callback *irq);
-	struct list_head list;
-	enum sde_crtc_irq_state state;
-	spinlock_t state_lock;
-};
-
-#define to_sde_crtc_state(x) \
-	container_of(x, struct sde_crtc_state, base)
-
-/**
- * sde_crtc_get_property - query integer value of crtc property
- * @S: Pointer to crtc state
- * @X: Property index, from enum msm_mdp_crtc_property
- * Returns: Integer value of requested property
- */
-#define sde_crtc_get_property(S, X) \
-	((S) && ((X) < CRTC_PROP_COUNT) ? ((S)->property_values[(X)].value) : 0)
-
-/**
- * sde_crtc_get_mixer_width - get the mixer width
- * Mixer width will be same as panel width(/2 for split)
- * unless destination scaler feature is enabled
- */
-static inline int sde_crtc_get_mixer_width(struct sde_crtc *sde_crtc,
-	struct sde_crtc_state *cstate, struct drm_display_mode *mode)
-{
-	u32 mixer_width;
-
-	if (!sde_crtc || !cstate || !mode)
-		return 0;
-
-	if (cstate->num_ds_enabled)
-		mixer_width = cstate->ds_cfg[0].lm_width;
-	else
-		mixer_width = (sde_crtc->num_mixers == CRTC_DUAL_MIXERS ?
-			mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
-
-	return mixer_width;
-}
-
-/**
- * sde_crtc_get_mixer_height - get the mixer height
- * Mixer height will be same as panel height unless
- * destination scaler feature is enabled
- */
-static inline int sde_crtc_get_mixer_height(struct sde_crtc *sde_crtc,
-		struct sde_crtc_state *cstate, struct drm_display_mode *mode)
-{
-	if (!sde_crtc || !cstate || !mode)
-		return 0;
-
-	return (cstate->num_ds_enabled ?
-			cstate->ds_cfg[0].lm_height : mode->vdisplay);
-}
-
-/**
- * sde_crtc_frame_pending - retun the number of pending frames
- * @crtc: Pointer to drm crtc object
- */
-static inline int sde_crtc_frame_pending(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc;
-
-	if (!crtc)
-		return -EINVAL;
-
-	sde_crtc = to_sde_crtc(crtc);
-	return atomic_read(&sde_crtc->frame_pending);
-}
-
-/**
- * sde_crtc_reset_hw - attempt hardware reset on errors
- * @crtc: Pointer to DRM crtc instance
- * @old_state: Pointer to crtc state for previous commit
- * @recovery_events: Whether or not recovery events are enabled
- * Returns: Zero if current commit should still be attempted
- */
-int sde_crtc_reset_hw(struct drm_crtc *crtc, struct drm_crtc_state *old_state,
-	bool recovery_events);
-
-/**
- * sde_crtc_request_frame_reset - requests for next frame reset
- * @crtc: Pointer to drm crtc object
- */
-static inline int sde_crtc_request_frame_reset(struct drm_crtc *crtc)
-{
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-
-	if (sde_crtc->frame_trigger_mode == FRAME_DONE_WAIT_POSTED_START)
-		sde_crtc_reset_hw(crtc, crtc->state, false);
-
-	return 0;
-}
-
-/**
- * sde_crtc_vblank - enable or disable vblanks for this crtc
- * @crtc: Pointer to drm crtc object
- * @en: true to enable vblanks, false to disable
- */
-int sde_crtc_vblank(struct drm_crtc *crtc, bool en);
-
-/**
- * sde_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
- * @crtc: Pointer to drm crtc object
- * @old_state: Pointer to drm crtc old state object
- */
-void sde_crtc_commit_kickoff(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_state);
-
-/**
- * sde_crtc_prepare_commit - callback to prepare for output fences
- * @crtc: Pointer to drm crtc object
- * @old_state: Pointer to drm crtc old state object
- */
-void sde_crtc_prepare_commit(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_state);
-
-/**
- * sde_crtc_complete_commit - callback signalling completion of current commit
- * @crtc: Pointer to drm crtc object
- * @old_state: Pointer to drm crtc old state object
- */
-void sde_crtc_complete_commit(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_state);
-
-/**
- * sde_crtc_init - create a new crtc object
- * @dev: sde device
- * @plane: base plane
- * @Return: new crtc object or error
- */
-struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane);
-
-/**
- * sde_crtc_post_init - update crtc object with post initialization. It
- *      can update the debugfs, sysfs, entires.
- * @dev: sde device
- * @crtc: Pointer to drm crtc structure
- */
-int sde_crtc_post_init(struct drm_device *dev, struct drm_crtc *crtc);
-
-/**
- * sde_crtc_complete_flip - complete flip for clients
- * @crtc: Pointer to drm crtc object
- * @file: client to cancel's file handle
- */
-void sde_crtc_complete_flip(struct drm_crtc *crtc, struct drm_file *file);
-
-/**
- * sde_crtc_register_custom_event - api for enabling/disabling crtc event
- * @kms: Pointer to sde_kms
- * @crtc_drm: Pointer to crtc object
- * @event: Event that client is interested
- * @en: Flag to enable/disable the event
- */
-int sde_crtc_register_custom_event(struct sde_kms *kms,
-		struct drm_crtc *crtc_drm, u32 event, bool en);
-
-/**
- * sde_crtc_get_intf_mode - get interface mode of the given crtc
- * @crtc: Pointert to crtc
- */
-enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc);
-
-/**
- * sde_crtc_get_fps_mode - get frame rate of the given crtc
- * @crtc: Pointert to crtc
- */
-u32 sde_crtc_get_fps_mode(struct drm_crtc *crtc);
-
-/**
- * sde_crtc_get_client_type - check the crtc type- rt, nrt, rsc, etc.
- * @crtc: Pointer to crtc
- */
-static inline enum sde_crtc_client_type sde_crtc_get_client_type(
-						struct drm_crtc *crtc)
-{
-	struct sde_crtc_state *cstate =
-			crtc ? to_sde_crtc_state(crtc->state) : NULL;
-
-	if (!cstate)
-		return NRT_CLIENT;
-
-	return sde_crtc_get_intf_mode(crtc) == INTF_MODE_WB_LINE ? NRT_CLIENT :
-			(cstate->rsc_client ? RT_RSC_CLIENT : RT_CLIENT);
-}
-
-/**
- * sde_crtc_is_enabled - check if sde crtc is enabled or not
- * @crtc: Pointer to crtc
- */
-static inline bool sde_crtc_is_enabled(struct drm_crtc *crtc)
-{
-	return crtc ? crtc->enabled : false;
-}
-
-/**
- * sde_crtc_is_reset_required - validate the reset request based on the
- *	pm_suspend and crtc's active status. crtc's are left active
- *	on pm_suspend during LP1/LP2 states, as the display is still
- *	left ON. Avoid reset for the subsequent pm_resume in such cases.
- * @crtc: Pointer to crtc
- * return: false if in suspend state and crtc active, true otherwise
- */
-static inline bool sde_crtc_is_reset_required(struct drm_crtc *crtc)
-{
-	/*
-	 * reset is required even when there is no crtc_state as it is required
-	 * to create the initial state object
-	 */
-	if (!crtc || !crtc->state)
-		return true;
-
-	/* reset not required if crtc is active during suspend state */
-	if (sde_kms_is_suspend_state(crtc->dev) && crtc->state->active)
-		return false;
-
-	return true;
-}
-
-/**
- * sde_crtc_event_queue - request event callback
- * @crtc: Pointer to drm crtc structure
- * @func: Pointer to callback function
- * @usr: Pointer to user data to be passed to callback
- * @color_processing_event: True if color processing event
- * Returns: Zero on success
- */
-int sde_crtc_event_queue(struct drm_crtc *crtc,
-		void (*func)(struct drm_crtc *crtc, void *usr),
-		void *usr, bool color_processing_event);
-
-/**
- * sde_crtc_get_crtc_roi - retrieve the crtc_roi from the given state object
- *	used to allow the planes to adjust their final lm out_xy value in the
- *	case of partial update
- * @crtc_state: Pointer to crtc state
- * @crtc_roi: Output pointer to crtc roi in the given state
- */
-void sde_crtc_get_crtc_roi(struct drm_crtc_state *state,
-		const struct sde_rect **crtc_roi);
-
-/**
- * sde_crtc_is_crtc_roi_dirty - retrieve whether crtc_roi was updated this frame
- *	Note: Only use during atomic_check since dirty properties may be popped
- * @crtc_state: Pointer to crtc state
- * Return: true if roi is dirty, false otherwise
- */
-bool sde_crtc_is_crtc_roi_dirty(struct drm_crtc_state *state);
-
-/** sde_crt_get_secure_level - retrieve the secure level from the give state
- *	object, this is used to determine the secure state of the crtc
- * @crtc : Pointer to drm crtc structure
- * @usr: Pointer to drm crtc state
- * return: secure_level
- */
-static inline int sde_crtc_get_secure_level(struct drm_crtc *crtc,
-		struct drm_crtc_state *state)
-{
-	if (!crtc || !state)
-		return -EINVAL;
-
-	return sde_crtc_get_property(to_sde_crtc_state(state),
-			CRTC_PROP_SECURITY_LEVEL);
-}
-
-/**
- * sde_crtc_get_secure_transition - determines the operations to be
- * performed before transitioning to secure state
- * This function should be called after swapping the new state
- * @crtc: Pointer to drm crtc structure
- * @old_crtc_state: Poniter to previous CRTC state
- * Returns the bitmask of operations need to be performed, -Error in
- * case of error cases
- */
-int sde_crtc_get_secure_transition_ops(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_crtc_state,
-		bool old_valid_fb);
-
-/**
- * sde_crtc_find_plane_fb_modes - finds the modes of all planes attached
- *                                  to crtc
- * @crtc: Pointer to DRM crtc object
- * @fb_ns: number of non secure planes
- * @fb_sec: number of secure-playback planes
- * @fb_sec_dir: number of secure-ui/secure-camera planes
- */
-int sde_crtc_find_plane_fb_modes(struct drm_crtc *crtc,
-		uint32_t *fb_ns, uint32_t *fb_sec, uint32_t *fb_sec_dir);
-
-/**
- * sde_crtc_state_find_plane_fb_modes - finds the modes of all planes attached
- *                                       to the crtc state
- * @crtc_state: Pointer to DRM crtc state object
- * @fb_ns: number of non secure planes
- * @fb_sec: number of secure-playback planes
- * @fb_sec_dir: number of secure-ui/secure-camera planes
- */
-int sde_crtc_state_find_plane_fb_modes(struct drm_crtc_state *state,
-		uint32_t *fb_ns, uint32_t *fb_sec, uint32_t *fb_sec_dir);
-
-/**
- * sde_crtc_secure_ctrl - Initiates the transition between secure and
- *                          non-secure world
- * @crtc: Pointer to crtc
- * @post_commit: if this operation is triggered after commit
- */
-int sde_crtc_secure_ctrl(struct drm_crtc *crtc, bool post_commit);
-
-/**
- * sde_crtc_helper_reset_properties - reset properties to default values in the
- *	given DRM CRTC state object
- * @crtc: Pointer to DRM crtc object
- * @crtc_state: Pointer to DRM crtc state object
- * Returns: 0 on success, negative errno on failure
- */
-int sde_crtc_helper_reset_custom_properties(struct drm_crtc *crtc,
-		struct drm_crtc_state *crtc_state);
-
-/**
- * sde_crtc_timeline_status - current buffer timeline status
- * @crtc: Pointer to crtc
- */
-void sde_crtc_timeline_status(struct drm_crtc *crtc);
-
-/**
- * sde_crtc_update_cont_splash_settings - update mixer settings
- *	during device bootup for cont_splash use case
- * @crtc: Pointer to drm crtc structure
- */
-void sde_crtc_update_cont_splash_settings(
-		struct drm_crtc *crtc);
-
-/**
- * sde_crtc_misr_setup - to configure and enable/disable MISR
- * @crtc: Pointer to drm crtc structure
- * @enable: boolean to indicate enable/disable misr
- * @frame_count: frame_count to be configured
- */
-void sde_crtc_misr_setup(struct drm_crtc *crtc, bool enable, u32 frame_count);
-
-/**
- * sde_crtc_get_misr_info - to configure and enable/disable MISR
- * @crtc: Pointer to drm crtc structure
- * @crtc_misr_info: Pointer to crtc misr info structure
- */
-void sde_crtc_get_misr_info(struct drm_crtc *crtc,
-		struct sde_crtc_misr_info *crtc_misr_info);
-
-#endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
deleted file mode 100644
index 41557a5..0000000
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ /dev/null
@@ -1,5830 +0,0 @@
-/*
- * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-#include <linux/kthread.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/sde_rsc.h>
-
-#include "msm_drv.h"
-#include "sde_kms.h"
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include "sde_hwio.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_intf.h"
-#include "sde_hw_ctl.h"
-#include "sde_formats.h"
-#include "sde_encoder_phys.h"
-#include "sde_power_handle.h"
-#include "sde_hw_dsc.h"
-#include "sde_crtc.h"
-#include "sde_trace.h"
-#include "sde_core_irq.h"
-
-#define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
-		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
-
-#define SDE_ERROR_ENC(e, fmt, ...) SDE_ERROR("enc%d " fmt,\
-		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
-
-#define SDE_DEBUG_PHYS(p, fmt, ...) SDE_DEBUG("enc%d intf%d pp%d " fmt,\
-		(p) ? (p)->parent->base.id : -1, \
-		(p) ? (p)->intf_idx - INTF_0 : -1, \
-		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
-		##__VA_ARGS__)
-
-#define SDE_ERROR_PHYS(p, fmt, ...) SDE_ERROR("enc%d intf%d pp%d " fmt,\
-		(p) ? (p)->parent->base.id : -1, \
-		(p) ? (p)->intf_idx - INTF_0 : -1, \
-		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
-		##__VA_ARGS__)
-
-/*
- * Two to anticipate panels that can do cmd/vid dynamic switching
- * plan is to create all possible physical encoder types, and switch between
- * them at runtime
- */
-#define NUM_PHYS_ENCODER_TYPES 2
-
-#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
-	(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
-
-#define MAX_CHANNELS_PER_ENC 2
-
-#define MISR_BUFF_SIZE			256
-
-#define IDLE_SHORT_TIMEOUT	1
-
-#define EVT_TIME_OUT_SPLIT 2
-
-/* Maximum number of VSYNC wait attempts for RSC state transition */
-#define MAX_RSC_WAIT	5
-
-#define TOPOLOGY_DUALPIPE_MERGE_MODE(x) \
-		(((x) == SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE) || \
-		((x) == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE) || \
-		((x) == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC))
-
-/**
- * enum sde_enc_rc_events - events for resource control state machine
- * @SDE_ENC_RC_EVENT_KICKOFF:
- *	This event happens at NORMAL priority.
- *	Event that signals the start of the transfer. When this event is
- *	received, enable MDP/DSI core clocks and request RSC with CMD state.
- *	Regardless of the previous state, the resource should be in ON state
- *	at the end of this event.
- * @SDE_ENC_RC_EVENT_FRAME_DONE:
- *	This event happens at INTERRUPT level.
- *	Event signals the end of the data transfer after the PP FRAME_DONE
- *	event. At the end of this event, a delayed work is scheduled to go to
- *	IDLE_PC state after IDLE_POWERCOLLAPSE_DURATION time.
- * @SDE_ENC_RC_EVENT_PRE_STOP:
- *	This event happens at NORMAL priority.
- *	This event, when received during the ON state, set RSC to IDLE, and
- *	and leave the RC STATE in the PRE_OFF state.
- *	It should be followed by the STOP event as part of encoder disable.
- *	If received during IDLE or OFF states, it will do nothing.
- * @SDE_ENC_RC_EVENT_STOP:
- *	This event happens at NORMAL priority.
- *	When this event is received, disable all the MDP/DSI core clocks, and
- *	disable IRQs. It should be called from the PRE_OFF or IDLE states.
- *	IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
- *	PRE_OFF is expected when PRE_STOP was executed during the ON state.
- *	Resource state should be in OFF at the end of the event.
- * @SDE_ENC_RC_EVENT_PRE_MODESET:
- *	This event happens at NORMAL priority from a work item.
- *	Event signals that there is a seamless mode switch is in prgoress. A
- *	client needs to turn of only irq - leave clocks ON to reduce the mode
- *	switch latency.
- * @SDE_ENC_RC_EVENT_POST_MODESET:
- *	This event happens at NORMAL priority from a work item.
- *	Event signals that seamless mode switch is complete and resources are
- *	acquired. Clients wants to turn on the irq again and update the rsc
- *	with new vtotal.
- * @SDE_ENC_RC_EVENT_ENTER_IDLE:
- *	This event happens at NORMAL priority from a work item.
- *	Event signals that there were no frame updates for
- *	IDLE_POWERCOLLAPSE_DURATION time. This would disable MDP/DSI core clocks
- *      and request RSC with IDLE state and change the resource state to IDLE.
- * @SDE_ENC_RC_EVENT_EARLY_WAKEUP:
- *	This event is triggered from the input event thread when touch event is
- *	received from the input device. On receiving this event,
- *      - If the device is in SDE_ENC_RC_STATE_IDLE state, it turns ON the
-	  clocks and enable RSC.
- *      - If the device is in SDE_ENC_RC_STATE_ON state, it resets the delayed
- *        off work since a new commit is imminent.
- */
-enum sde_enc_rc_events {
-	SDE_ENC_RC_EVENT_KICKOFF = 1,
-	SDE_ENC_RC_EVENT_FRAME_DONE,
-	SDE_ENC_RC_EVENT_PRE_STOP,
-	SDE_ENC_RC_EVENT_STOP,
-	SDE_ENC_RC_EVENT_PRE_MODESET,
-	SDE_ENC_RC_EVENT_POST_MODESET,
-	SDE_ENC_RC_EVENT_ENTER_IDLE,
-	SDE_ENC_RC_EVENT_EARLY_WAKEUP,
-};
-
-/*
- * enum sde_enc_rc_states - states that the resource control maintains
- * @SDE_ENC_RC_STATE_OFF: Resource is in OFF state
- * @SDE_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
- * @SDE_ENC_RC_STATE_ON: Resource is in ON state
- * @SDE_ENC_RC_STATE_MODESET: Resource is in modeset state
- * @SDE_ENC_RC_STATE_IDLE: Resource is in IDLE state
- */
-enum sde_enc_rc_states {
-	SDE_ENC_RC_STATE_OFF,
-	SDE_ENC_RC_STATE_PRE_OFF,
-	SDE_ENC_RC_STATE_ON,
-	SDE_ENC_RC_STATE_MODESET,
-	SDE_ENC_RC_STATE_IDLE
-};
-
-/**
- * struct sde_encoder_virt - virtual encoder. Container of one or more physical
- *	encoders. Virtual encoder manages one "logical" display. Physical
- *	encoders manage one intf block, tied to a specific panel/sub-panel.
- *	Virtual encoder defers as much as possible to the physical encoders.
- *	Virtual encoder registers itself with the DRM Framework as the encoder.
- * @base:		drm_encoder base class for registration with DRM
- * @enc_spin_lock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
- * @bus_scaling_client:	Client handle to the bus scaling interface
- * @te_source:		vsync source pin information
- * @num_phys_encs:	Actual number of physical encoders contained.
- * @phys_encs:		Container of physical encoders managed.
- * @cur_master:		Pointer to the current master in this mode. Optimization
- *			Only valid after enable. Cleared as disable.
- * @hw_pp		Handle to the pingpong blocks used for the display. No.
- *			pingpong blocks can be different than num_phys_encs.
- * @hw_dsc:		Array of DSC block handles used for the display.
- * @dirty_dsc_ids:	Cached dsc indexes for dirty DSC blocks needing flush
- * @intfs_swapped	Whether or not the phys_enc interfaces have been swapped
- *			for partial update right-only cases, such as pingpong
- *			split where virtual pingpong does not generate IRQs
- * @crtc_vblank_cb:	Callback into the upper layer / CRTC for
- *			notification of the VBLANK
- * @crtc_vblank_cb_data:	Data from upper layer for VBLANK notification
- * @crtc_kickoff_cb:		Callback into CRTC that will flush & start
- *				all CTL paths
- * @crtc_kickoff_cb_data:	Opaque user data given to crtc_kickoff_cb
- * @debugfs_root:		Debug file system root file node
- * @enc_lock:			Lock around physical encoder create/destroy and
-				access.
- * @frame_done_cnt:		Atomic counter for tracking which phys_enc is
- *				done with frame processing.
- * @crtc_frame_event_cb:	callback handler for frame event
- * @crtc_frame_event_cb_data:	callback handler private data
- * @vsync_event_timer:		vsync timer
- * @rsc_client:			rsc client pointer
- * @rsc_state_init:		boolean to indicate rsc config init
- * @disp_info:			local copy of msm_display_info struct
- * @misr_enable:		misr enable/disable status
- * @misr_frame_count:		misr frame count before start capturing the data
- * @idle_pc_enabled:		indicate if idle power collapse is enabled
- *				currently. This can be controlled by user-mode
- * @rc_lock:			resource control mutex lock to protect
- *				virt encoder over various state changes
- * @rc_state:			resource controller state
- * @delayed_off_work:		delayed worker to schedule disabling of
- *				clks and resources after IDLE_TIMEOUT time.
- * @vsync_event_work:		worker to handle vsync event for autorefresh
- * @input_event_work:		worker to handle input device touch events
- * @esd_trigger_work:		worker to handle esd trigger events
- * @input_handler:			handler for input device events
- * @topology:                   topology of the display
- * @vblank_enabled:		boolean to track userspace vblank vote
- * @idle_pc_restore:		flag to indicate idle_pc_restore happened
- * @frame_trigger_mode:		frame trigger mode indication for command
- *				mode display
- * @dynamic_hdr_updated:	flag to indicate if mempool was programmed
- * @rsc_config:			rsc configuration for display vtotal, fps, etc.
- * @cur_conn_roi:		current connector roi
- * @prv_conn_roi:		previous connector roi to optimize if unchanged
- * @crtc			pointer to drm_crtc
- * @recovery_events_enabled:	status of hw recovery feature enable by client
- * @elevated_ahb_vote:		increase AHB bus speed for the first frame
- *				after power collapse
- * @pm_qos_cpu_req:		pm_qos request for cpu frequency
- * @mode_info:                  stores the current mode information
- */
-struct sde_encoder_virt {
-	struct drm_encoder base;
-	spinlock_t enc_spinlock;
-	struct mutex vblank_ctl_lock;
-	uint32_t bus_scaling_client;
-
-	uint32_t display_num_of_h_tiles;
-	uint32_t te_source;
-
-	unsigned int num_phys_encs;
-	struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
-	struct sde_encoder_phys *cur_master;
-	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
-	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
-	struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
-	enum sde_dsc dirty_dsc_ids[MAX_CHANNELS_PER_ENC];
-
-	bool intfs_swapped;
-
-	void (*crtc_vblank_cb)(void *data);
-	void *crtc_vblank_cb_data;
-
-	struct dentry *debugfs_root;
-	struct mutex enc_lock;
-	atomic_t frame_done_cnt[MAX_PHYS_ENCODERS_PER_VIRTUAL];
-	void (*crtc_frame_event_cb)(void *data, u32 event);
-	struct sde_crtc_frame_event_cb_data crtc_frame_event_cb_data;
-
-	struct timer_list vsync_event_timer;
-
-	struct sde_rsc_client *rsc_client;
-	bool rsc_state_init;
-	struct msm_display_info disp_info;
-	bool misr_enable;
-	u32 misr_frame_count;
-
-	bool idle_pc_enabled;
-	struct mutex rc_lock;
-	enum sde_enc_rc_states rc_state;
-	struct kthread_delayed_work delayed_off_work;
-	struct kthread_work vsync_event_work;
-	struct kthread_work input_event_work;
-	struct kthread_work esd_trigger_work;
-	struct input_handler *input_handler;
-	struct msm_display_topology topology;
-	bool vblank_enabled;
-	bool idle_pc_restore;
-	enum frame_trigger_mode_type frame_trigger_mode;
-	bool dynamic_hdr_updated;
-
-	struct sde_rsc_cmd_config rsc_config;
-	struct sde_rect cur_conn_roi;
-	struct sde_rect prv_conn_roi;
-	struct drm_crtc *crtc;
-
-	bool recovery_events_enabled;
-	bool elevated_ahb_vote;
-	struct pm_qos_request pm_qos_cpu_req;
-	struct msm_mode_info mode_info;
-};
-
-#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
-
-void sde_encoder_uidle_enable(struct drm_encoder *drm_enc, bool enable)
-{
-	struct sde_encoder_virt *sde_enc;
-	int i;
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (phys && phys->hw_ctl && phys->hw_ctl->ops.uidle_enable) {
-			SDE_EVT32(DRMID(drm_enc), enable);
-			phys->hw_ctl->ops.uidle_enable(phys->hw_ctl, enable);
-		}
-	}
-}
-
-static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc,
-	struct sde_kms *sde_kms)
-{
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-	struct pm_qos_request *req;
-	u32 cpu_mask;
-	u32 cpu_dma_latency;
-	int cpu;
-
-	if (!sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
-		return;
-
-	cpu_mask = sde_kms->catalog->perf.cpu_mask;
-	cpu_dma_latency = sde_kms->catalog->perf.cpu_dma_latency;
-
-	req = &sde_enc->pm_qos_cpu_req;
-	req->type = PM_QOS_REQ_AFFINE_CORES;
-	cpumask_empty(&req->cpus_affine);
-	for_each_possible_cpu(cpu) {
-		if ((1 << cpu) & cpu_mask)
-			cpumask_set_cpu(cpu, &req->cpus_affine);
-	}
-	pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, cpu_dma_latency);
-
-	SDE_EVT32_VERBOSE(DRMID(drm_enc), cpu_mask, cpu_dma_latency);
-}
-
-static void _sde_encoder_pm_qos_remove_request(struct drm_encoder *drm_enc,
-	struct sde_kms *sde_kms)
-{
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-
-	if (!sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
-		return;
-
-	pm_qos_remove_request(&sde_enc->pm_qos_cpu_req);
-}
-
-static bool _sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct msm_compression_info *comp_info;
-
-	if (!drm_enc)
-		return false;
-
-	sde_enc  = to_sde_encoder_virt(drm_enc);
-	comp_info = &sde_enc->mode_info.comp_info;
-
-	return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
-}
-
-static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id,
-	s64 timeout_ms, struct sde_encoder_wait_info *info)
-{
-	int rc = 0;
-	s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms);
-	ktime_t cur_ktime;
-	ktime_t exp_ktime = ktime_add_ms(ktime_get(), timeout_ms);
-
-	do {
-		rc = wait_event_timeout(*(info->wq),
-			atomic_read(info->atomic_cnt) == 0, wait_time_jiffies);
-		cur_ktime = ktime_get();
-
-		SDE_EVT32(drm_id, hw_id, rc, ktime_to_ms(cur_ktime),
-			timeout_ms, atomic_read(info->atomic_cnt));
-	/* If we timed out, counter is valid and time is less, wait again */
-	} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
-			(ktime_compare_safe(exp_ktime, cur_ktime) > 0));
-
-	return rc;
-}
-
-bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc)
-{
-	enum sde_rm_topology_name topology;
-	struct sde_encoder_virt *sde_enc;
-	struct drm_connector *drm_conn;
-
-	if (!drm_enc)
-		return false;
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	if (!sde_enc->cur_master)
-		return false;
-
-	drm_conn = sde_enc->cur_master->connector;
-	if (!drm_conn)
-		return false;
-
-	topology = sde_connector_get_topology_name(drm_conn);
-	if (topology == SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE)
-		return true;
-
-	return false;
-}
-
-bool sde_encoder_is_primary_display(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-
-	return sde_enc && sde_enc->disp_info.is_primary;
-}
-
-int sde_encoder_in_cont_splash(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-
-	return sde_enc && sde_enc->cur_master &&
-		sde_enc->cur_master->cont_splash_enabled;
-}
-
-static inline int _sde_encoder_power_enable(struct sde_encoder_virt *sde_enc,
-								bool enable)
-{
-	struct drm_encoder *drm_enc;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-
-	if (!sde_enc) {
-		SDE_ERROR("invalid sde enc\n");
-		return -EINVAL;
-	}
-
-	drm_enc = &sde_enc->base;
-	if (!drm_enc->dev || !drm_enc->dev->dev_private) {
-		SDE_ERROR("drm device invalid\n");
-		return -EINVAL;
-	}
-
-	priv = drm_enc->dev->dev_private;
-	if (!priv->kms) {
-		SDE_ERROR("invalid kms\n");
-		return -EINVAL;
-	}
-
-	sde_kms = to_sde_kms(priv->kms);
-
-	return sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
-									enable);
-}
-
-void sde_encoder_helper_report_irq_timeout(struct sde_encoder_phys *phys_enc,
-		enum sde_intr_idx intr_idx)
-{
-	SDE_EVT32(DRMID(phys_enc->parent),
-			phys_enc->intf_idx - INTF_0,
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			intr_idx);
-	SDE_ERROR_PHYS(phys_enc, "irq %d timeout\n", intr_idx);
-
-	if (phys_enc->parent_ops.handle_frame_done)
-		phys_enc->parent_ops.handle_frame_done(
-				phys_enc->parent, phys_enc,
-				SDE_ENCODER_FRAME_EVENT_ERROR);
-}
-
-int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc,
-		enum sde_intr_idx intr_idx,
-		struct sde_encoder_wait_info *wait_info)
-{
-	struct sde_encoder_irq *irq;
-	u32 irq_status;
-	int ret, i;
-
-	if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-	irq = &phys_enc->irq[intr_idx];
-
-	/* note: do master / slave checking outside */
-
-	/* return EWOULDBLOCK since we know the wait isn't necessary */
-	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
-		SDE_ERROR_PHYS(phys_enc, "encoder is disabled\n");
-		SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
-				irq->irq_idx, intr_idx, SDE_EVTLOG_ERROR);
-		return -EWOULDBLOCK;
-	}
-
-	if (irq->irq_idx < 0) {
-		SDE_DEBUG_PHYS(phys_enc, "irq %s hw %d disabled, skip wait\n",
-				irq->name, irq->hw_idx);
-		SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
-				irq->irq_idx);
-		return 0;
-	}
-
-	SDE_DEBUG_PHYS(phys_enc, "pending_cnt %d\n",
-			atomic_read(wait_info->atomic_cnt));
-	SDE_EVT32_VERBOSE(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
-		irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
-		atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_ENTRY);
-
-	/*
-	 * Some module X may disable interrupt for longer duration
-	 * and it may trigger all interrupts including timer interrupt
-	 * when module X again enable the interrupt.
-	 * That may cause interrupt wait timeout API in this API.
-	 * It is handled by split the wait timer in two halves.
-	 */
-
-	for (i = 0; i < EVT_TIME_OUT_SPLIT; i++) {
-		ret = _sde_encoder_wait_timeout(DRMID(phys_enc->parent),
-				irq->hw_idx,
-				(wait_info->timeout_ms/EVT_TIME_OUT_SPLIT),
-				wait_info);
-		if (ret)
-			break;
-	}
-
-	if (ret <= 0) {
-		irq_status = sde_core_irq_read(phys_enc->sde_kms,
-				irq->irq_idx, true);
-		if (irq_status) {
-			unsigned long flags;
-
-			SDE_EVT32(DRMID(phys_enc->parent), intr_idx,
-				irq->hw_idx, irq->irq_idx,
-				phys_enc->hw_pp->idx - PINGPONG_0,
-				atomic_read(wait_info->atomic_cnt));
-			SDE_DEBUG_PHYS(phys_enc,
-					"done but irq %d not triggered\n",
-					irq->irq_idx);
-			local_irq_save(flags);
-			irq->cb.func(phys_enc, irq->irq_idx);
-			local_irq_restore(flags);
-			ret = 0;
-		} else {
-			ret = -ETIMEDOUT;
-			SDE_EVT32(DRMID(phys_enc->parent), intr_idx,
-				irq->hw_idx, irq->irq_idx,
-				phys_enc->hw_pp->idx - PINGPONG_0,
-				atomic_read(wait_info->atomic_cnt), irq_status,
-				SDE_EVTLOG_ERROR);
-		}
-	} else {
-		ret = 0;
-		SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
-			irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
-			atomic_read(wait_info->atomic_cnt));
-	}
-
-	SDE_EVT32_VERBOSE(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
-		irq->irq_idx, ret, phys_enc->hw_pp->idx - PINGPONG_0,
-		atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_EXIT);
-
-	return ret;
-}
-
-int sde_encoder_helper_register_irq(struct sde_encoder_phys *phys_enc,
-		enum sde_intr_idx intr_idx)
-{
-	struct sde_encoder_irq *irq;
-	int ret = 0;
-
-	if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-	irq = &phys_enc->irq[intr_idx];
-
-	if (irq->irq_idx >= 0) {
-		SDE_DEBUG_PHYS(phys_enc,
-				"skipping already registered irq %s type %d\n",
-				irq->name, irq->intr_type);
-		return 0;
-	}
-
-	irq->irq_idx = sde_core_irq_idx_lookup(phys_enc->sde_kms,
-			irq->intr_type, irq->hw_idx);
-	if (irq->irq_idx < 0) {
-		SDE_ERROR_PHYS(phys_enc,
-			"failed to lookup IRQ index for %s type:%d\n",
-			irq->name, irq->intr_type);
-		return -EINVAL;
-	}
-
-	ret = sde_core_irq_register_callback(phys_enc->sde_kms, irq->irq_idx,
-			&irq->cb);
-	if (ret) {
-		SDE_ERROR_PHYS(phys_enc,
-			"failed to register IRQ callback for %s\n",
-			irq->name);
-		irq->irq_idx = -EINVAL;
-		return ret;
-	}
-
-	ret = sde_core_irq_enable(phys_enc->sde_kms, &irq->irq_idx, 1);
-	if (ret) {
-		SDE_ERROR_PHYS(phys_enc,
-			"enable IRQ for intr:%s failed, irq_idx %d\n",
-			irq->name, irq->irq_idx);
-
-		sde_core_irq_unregister_callback(phys_enc->sde_kms,
-				irq->irq_idx, &irq->cb);
-
-		SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
-				irq->irq_idx, SDE_EVTLOG_ERROR);
-		irq->irq_idx = -EINVAL;
-		return ret;
-	}
-
-	SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx, irq->irq_idx);
-	SDE_DEBUG_PHYS(phys_enc, "registered irq %s idx: %d\n",
-			irq->name, irq->irq_idx);
-
-	return ret;
-}
-
-int sde_encoder_helper_unregister_irq(struct sde_encoder_phys *phys_enc,
-		enum sde_intr_idx intr_idx)
-{
-	struct sde_encoder_irq *irq;
-	int ret;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-	irq = &phys_enc->irq[intr_idx];
-
-	/* silently skip irqs that weren't registered */
-	if (irq->irq_idx < 0) {
-		SDE_ERROR(
-			"extra unregister irq, enc%d intr_idx:0x%x hw_idx:0x%x irq_idx:0x%x\n",
-				DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
-				irq->irq_idx);
-		SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
-				irq->irq_idx, SDE_EVTLOG_ERROR);
-		return 0;
-	}
-
-	ret = sde_core_irq_disable(phys_enc->sde_kms, &irq->irq_idx, 1);
-	if (ret)
-		SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
-				irq->irq_idx, ret, SDE_EVTLOG_ERROR);
-
-	ret = sde_core_irq_unregister_callback(phys_enc->sde_kms, irq->irq_idx,
-			&irq->cb);
-	if (ret)
-		SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
-				irq->irq_idx, ret, SDE_EVTLOG_ERROR);
-
-	SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx, irq->irq_idx);
-	SDE_DEBUG_PHYS(phys_enc, "unregistered %d\n", irq->irq_idx);
-
-	irq->irq_idx = -EINVAL;
-
-	return 0;
-}
-
-void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc,
-		struct sde_encoder_hw_resources *hw_res,
-		struct drm_connector_state *conn_state)
-{
-	struct sde_encoder_virt *sde_enc = NULL;
-	int i = 0;
-
-	if (!hw_res || !drm_enc || !conn_state) {
-		SDE_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
-				!drm_enc, !hw_res, !conn_state);
-		return;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	SDE_DEBUG_ENC(sde_enc, "\n");
-
-	/* Query resources used by phys encs, expected to be without overlap */
-	memset(hw_res, 0, sizeof(*hw_res));
-	hw_res->display_num_of_h_tiles = sde_enc->display_num_of_h_tiles;
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (phys && phys->ops.get_hw_resources)
-			phys->ops.get_hw_resources(phys, hw_res, conn_state);
-	}
-
-	sde_connector_get_mode_info(conn_state, &sde_enc->mode_info);
-	hw_res->topology = sde_enc->mode_info.topology;
-	hw_res->is_primary = sde_enc->disp_info.is_primary;
-}
-
-void sde_encoder_destroy(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc = NULL;
-	int i = 0;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	SDE_DEBUG_ENC(sde_enc, "\n");
-
-	mutex_lock(&sde_enc->enc_lock);
-	sde_rsc_client_destroy(sde_enc->rsc_client);
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (phys && phys->ops.destroy) {
-			phys->ops.destroy(phys);
-			--sde_enc->num_phys_encs;
-			sde_enc->phys_encs[i] = NULL;
-		}
-	}
-
-	if (sde_enc->num_phys_encs)
-		SDE_ERROR_ENC(sde_enc, "expected 0 num_phys_encs not %d\n",
-				sde_enc->num_phys_encs);
-	sde_enc->num_phys_encs = 0;
-	mutex_unlock(&sde_enc->enc_lock);
-
-	drm_encoder_cleanup(drm_enc);
-	mutex_destroy(&sde_enc->enc_lock);
-
-	kfree(sde_enc->input_handler);
-	sde_enc->input_handler = NULL;
-
-	kfree(sde_enc);
-}
-
-void sde_encoder_helper_update_intf_cfg(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct sde_hw_intf_cfg_v1 *intf_cfg;
-	enum sde_3d_blend_mode mode_3d;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid arg, encoder %d\n", !phys_enc);
-		return;
-	}
-
-	sde_enc = to_sde_encoder_virt(phys_enc->parent);
-	intf_cfg = &sde_enc->cur_master->intf_cfg_v1;
-
-	SDE_DEBUG_ENC(sde_enc,
-			"intf_cfg updated for %d at idx %d\n",
-			phys_enc->intf_idx,
-			intf_cfg->intf_count);
-
-
-	/* setup interface configuration */
-	if (intf_cfg->intf_count >= MAX_INTF_PER_CTL_V1) {
-		pr_err("invalid inf_count %d\n", intf_cfg->intf_count);
-		return;
-	}
-	intf_cfg->intf[intf_cfg->intf_count++] = phys_enc->intf_idx;
-	if (phys_enc == sde_enc->cur_master) {
-		if (sde_enc->cur_master->intf_mode == INTF_MODE_CMD)
-			intf_cfg->intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
-		else
-			intf_cfg->intf_mode_sel = SDE_CTL_MODE_SEL_VID;
-	}
-
-	/* configure this interface as master for split display */
-	if (phys_enc->split_role == ENC_ROLE_MASTER)
-		intf_cfg->intf_master = phys_enc->hw_intf->idx;
-
-	/* setup which pp blk will connect to this intf */
-	if (phys_enc->hw_intf->ops.bind_pingpong_blk)
-		phys_enc->hw_intf->ops.bind_pingpong_blk(
-				phys_enc->hw_intf,
-				true,
-				phys_enc->hw_pp->idx);
-
-
-	/*setup merge_3d configuration */
-	mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
-
-	if (mode_3d && phys_enc->hw_pp->merge_3d &&
-			intf_cfg->merge_3d_count < MAX_MERGE_3D_PER_CTL_V1)
-		intf_cfg->merge_3d[intf_cfg->merge_3d_count++] =
-			phys_enc->hw_pp->merge_3d->idx;
-
-	if (phys_enc->hw_pp->ops.setup_3d_mode)
-		phys_enc->hw_pp->ops.setup_3d_mode(phys_enc->hw_pp,
-				mode_3d);
-}
-
-void sde_encoder_helper_split_config(
-		struct sde_encoder_phys *phys_enc,
-		enum sde_intf interface)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct split_pipe_cfg cfg = { 0 };
-	struct sde_hw_mdp *hw_mdptop;
-	enum sde_rm_topology_name topology;
-	struct msm_display_info *disp_info;
-
-	if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
-		SDE_ERROR("invalid arg(s), encoder %d\n", !phys_enc);
-		return;
-	}
-
-	sde_enc = to_sde_encoder_virt(phys_enc->parent);
-	hw_mdptop = phys_enc->hw_mdptop;
-	disp_info = &sde_enc->disp_info;
-
-	if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI)
-		return;
-
-	/**
-	 * disable split modes since encoder will be operating in as the only
-	 * encoder, either for the entire use case in the case of, for example,
-	 * single DSI, or for this frame in the case of left/right only partial
-	 * update.
-	 */
-	if (phys_enc->split_role == ENC_ROLE_SOLO) {
-		if (hw_mdptop->ops.setup_split_pipe)
-			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
-		if (hw_mdptop->ops.setup_pp_split)
-			hw_mdptop->ops.setup_pp_split(hw_mdptop, &cfg);
-		return;
-	}
-
-	cfg.en = true;
-	cfg.mode = phys_enc->intf_mode;
-	cfg.intf = interface;
-
-	if (cfg.en && phys_enc->ops.needs_single_flush &&
-			phys_enc->ops.needs_single_flush(phys_enc))
-		cfg.split_flush_en = true;
-
-	topology = sde_connector_get_topology_name(phys_enc->connector);
-	if (topology == SDE_RM_TOPOLOGY_PPSPLIT)
-		cfg.pp_split_slave = cfg.intf;
-	else
-		cfg.pp_split_slave = INTF_MAX;
-
-	if (phys_enc->split_role == ENC_ROLE_MASTER) {
-		SDE_DEBUG_ENC(sde_enc, "enable %d\n", cfg.en);
-
-		if (hw_mdptop->ops.setup_split_pipe)
-			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
-	} else if (sde_enc->hw_pp[0]) {
-		/*
-		 * slave encoder
-		 * - determine split index from master index,
-		 *   assume master is first pp
-		 */
-		cfg.pp_split_index = sde_enc->hw_pp[0]->idx - PINGPONG_0;
-		SDE_DEBUG_ENC(sde_enc, "master using pp%d\n",
-				cfg.pp_split_index);
-
-		if (hw_mdptop->ops.setup_pp_split)
-			hw_mdptop->ops.setup_pp_split(hw_mdptop, &cfg);
-	}
-}
-
-bool sde_encoder_in_clone_mode(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-	int i = 0;
-
-	if (!drm_enc)
-		return false;
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	if (!sde_enc)
-		return false;
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (phys && phys->in_clone_mode)
-			return true;
-	}
-
-	return false;
-}
-
-static int _sde_encoder_atomic_check_phys_enc(struct sde_encoder_virt *sde_enc,
-	struct drm_crtc_state *crtc_state,
-	struct drm_connector_state *conn_state)
-{
-	const struct drm_display_mode *mode;
-	struct drm_display_mode *adj_mode;
-	int i = 0;
-	int ret = 0;
-
-	mode = &crtc_state->mode;
-	adj_mode = &crtc_state->adjusted_mode;
-
-	/* perform atomic check on the first physical encoder (master) */
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (phys && phys->ops.atomic_check)
-			ret = phys->ops.atomic_check(phys, crtc_state,
-					conn_state);
-		else if (phys && phys->ops.mode_fixup)
-			if (!phys->ops.mode_fixup(phys, mode, adj_mode))
-				ret = -EINVAL;
-
-		if (ret) {
-			SDE_ERROR_ENC(sde_enc,
-					"mode unsupported, phys idx %d\n", i);
-			break;
-		}
-	}
-
-	return ret;
-}
-
-static int _sde_encoder_atomic_check_pu_roi(struct sde_encoder_virt *sde_enc,
-	struct drm_crtc_state *crtc_state,
-	struct drm_connector_state *conn_state,
-	struct sde_connector_state *sde_conn_state,
-	struct sde_crtc_state *sde_crtc_state)
-{
-	int ret = 0;
-
-	if (drm_atomic_crtc_needs_modeset(crtc_state)) {
-		struct sde_rect mode_roi, roi;
-
-		mode_roi.x = 0;
-		mode_roi.y = 0;
-		mode_roi.w = crtc_state->adjusted_mode.hdisplay;
-		mode_roi.h = crtc_state->adjusted_mode.vdisplay;
-
-		if (sde_conn_state->rois.num_rects) {
-			sde_kms_rect_merge_rectangles(
-					&sde_conn_state->rois, &roi);
-			if (!sde_kms_rect_is_equal(&mode_roi, &roi)) {
-				SDE_ERROR_ENC(sde_enc,
-					"roi (%d,%d,%d,%d) on connector invalid during modeset\n",
-					roi.x, roi.y, roi.w, roi.h);
-				ret = -EINVAL;
-			}
-		}
-
-		if (sde_crtc_state->user_roi_list.num_rects) {
-			sde_kms_rect_merge_rectangles(
-					&sde_crtc_state->user_roi_list, &roi);
-			if (!sde_kms_rect_is_equal(&mode_roi, &roi)) {
-				SDE_ERROR_ENC(sde_enc,
-					"roi (%d,%d,%d,%d) on crtc invalid during modeset\n",
-					roi.x, roi.y, roi.w, roi.h);
-				ret = -EINVAL;
-			}
-		}
-	}
-
-	return ret;
-}
-
-static int _sde_encoder_atomic_check_reserve(struct drm_encoder *drm_enc,
-	struct drm_crtc_state *crtc_state,
-	struct drm_connector_state *conn_state,
-	struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms,
-	struct sde_connector *sde_conn,
-	struct sde_connector_state *sde_conn_state)
-{
-	int ret = 0;
-	struct drm_display_mode *adj_mode = &crtc_state->adjusted_mode;
-
-	if (sde_conn && drm_atomic_crtc_needs_modeset(crtc_state)) {
-		struct msm_display_topology *topology = NULL;
-
-		ret = sde_conn->ops.get_mode_info(&sde_conn->base, adj_mode,
-				&sde_conn_state->mode_info,
-				sde_kms->catalog->max_mixer_width,
-				sde_conn->display);
-		if (ret) {
-			SDE_ERROR_ENC(sde_enc,
-				"failed to get mode info, rc = %d\n", ret);
-			return ret;
-		}
-
-		if (sde_conn_state->mode_info.comp_info.comp_type &&
-			sde_conn_state->mode_info.comp_info.comp_ratio >=
-					MSM_DISPLAY_COMPRESSION_RATIO_MAX) {
-			SDE_ERROR_ENC(sde_enc,
-				"invalid compression ratio: %d\n",
-				sde_conn_state->mode_info.comp_info.comp_ratio);
-			ret = -EINVAL;
-			return ret;
-		}
-
-		/* Reserve dynamic resources, indicating atomic_check phase */
-		ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state,
-			conn_state, true);
-		if (ret) {
-			SDE_ERROR_ENC(sde_enc,
-				"RM failed to reserve resources, rc = %d\n",
-				ret);
-			return ret;
-		}
-
-		/**
-		 * Update connector state with the topology selected for the
-		 * resource set validated. Reset the topology if we are
-		 * de-activating crtc.
-		 */
-		if (crtc_state->active)
-			topology = &sde_conn_state->mode_info.topology;
-
-		ret = sde_rm_update_topology(conn_state, topology);
-		if (ret) {
-			SDE_ERROR_ENC(sde_enc,
-				"RM failed to update topology, rc: %d\n", ret);
-			return ret;
-		}
-
-		ret = sde_connector_set_blob_data(conn_state->connector,
-				conn_state,
-				CONNECTOR_PROP_SDE_INFO);
-		if (ret) {
-			SDE_ERROR_ENC(sde_enc,
-				"connector failed to update info, rc: %d\n",
-				ret);
-			return ret;
-		}
-	}
-
-	return ret;
-}
-
-static int sde_encoder_virt_atomic_check(
-	struct drm_encoder *drm_enc, struct drm_crtc_state *crtc_state,
-	struct drm_connector_state *conn_state)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	const struct drm_display_mode *mode;
-	struct drm_display_mode *adj_mode;
-	struct sde_connector *sde_conn = NULL;
-	struct sde_connector_state *sde_conn_state = NULL;
-	struct sde_crtc_state *sde_crtc_state = NULL;
-	enum sde_rm_topology_name old_top;
-	int ret = 0;
-
-	if (!drm_enc || !crtc_state || !conn_state) {
-		SDE_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
-				!drm_enc, !crtc_state, !conn_state);
-		return -EINVAL;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	SDE_DEBUG_ENC(sde_enc, "\n");
-
-	priv = drm_enc->dev->dev_private;
-	sde_kms = to_sde_kms(priv->kms);
-	mode = &crtc_state->mode;
-	adj_mode = &crtc_state->adjusted_mode;
-	sde_conn = to_sde_connector(conn_state->connector);
-	sde_conn_state = to_sde_connector_state(conn_state);
-	sde_crtc_state = to_sde_crtc_state(crtc_state);
-
-	SDE_EVT32(DRMID(drm_enc), drm_atomic_crtc_needs_modeset(crtc_state));
-
-	ret = _sde_encoder_atomic_check_phys_enc(sde_enc, crtc_state,
-			conn_state);
-	if (ret)
-		return ret;
-
-	ret = _sde_encoder_atomic_check_pu_roi(sde_enc, crtc_state,
-			conn_state, sde_conn_state, sde_crtc_state);
-	if (ret)
-		return ret;
-
-	/**
-	 * record topology in previous atomic state to be able to handle
-	 * topology transitions correctly.
-	 */
-	old_top  = sde_connector_get_property(conn_state,
-				CONNECTOR_PROP_TOPOLOGY_NAME);
-	ret = sde_connector_set_old_topology_name(conn_state, old_top);
-	if (ret)
-		return ret;
-
-	ret = _sde_encoder_atomic_check_reserve(drm_enc, crtc_state,
-			conn_state, sde_enc, sde_kms, sde_conn, sde_conn_state);
-	if (ret)
-		return ret;
-
-	ret = sde_connector_roi_v1_check_roi(conn_state);
-	if (ret) {
-		SDE_ERROR_ENC(sde_enc, "connector roi check failed, rc: %d",
-				ret);
-		return ret;
-	}
-
-	drm_mode_set_crtcinfo(adj_mode, 0);
-	SDE_EVT32(DRMID(drm_enc), adj_mode->flags, adj_mode->private_flags);
-
-	return ret;
-}
-
-static int _sde_encoder_dsc_update_pic_dim(struct msm_display_dsc_info *dsc,
-		int pic_width, int pic_height)
-{
-	if (!dsc || !pic_width || !pic_height) {
-		SDE_ERROR("invalid input: pic_width=%d pic_height=%d\n",
-			pic_width, pic_height);
-		return -EINVAL;
-	}
-
-	if ((pic_width % dsc->slice_width) ||
-	    (pic_height % dsc->slice_height)) {
-		SDE_ERROR("pic_dim=%dx%d has to be multiple of slice=%dx%d\n",
-			pic_width, pic_height,
-			dsc->slice_width, dsc->slice_height);
-		return -EINVAL;
-	}
-
-	dsc->pic_width = pic_width;
-	dsc->pic_height = pic_height;
-
-	return 0;
-}
-
-static void _sde_encoder_dsc_pclk_param_calc(struct msm_display_dsc_info *dsc,
-		int intf_width)
-{
-	int slice_per_pkt, slice_per_intf;
-	int bytes_in_slice, total_bytes_per_intf;
-
-	if (!dsc || !dsc->slice_width || !dsc->slice_per_pkt ||
-	    (intf_width < dsc->slice_width)) {
-		SDE_ERROR("invalid input: intf_width=%d slice_width=%d\n",
-			intf_width, dsc ? dsc->slice_width : -1);
-		return;
-	}
-
-	slice_per_pkt = dsc->slice_per_pkt;
-	slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width);
-
-	/*
-	 * If slice_per_pkt is greater than slice_per_intf then default to 1.
-	 * This can happen during partial update.
-	 */
-	if (slice_per_pkt > slice_per_intf)
-		slice_per_pkt = 1;
-
-	bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bpp, 8);
-	total_bytes_per_intf = bytes_in_slice * slice_per_intf;
-
-	dsc->eol_byte_num = total_bytes_per_intf % 3;
-	dsc->pclk_per_line =  DIV_ROUND_UP(total_bytes_per_intf, 3);
-	dsc->bytes_in_slice = bytes_in_slice;
-	dsc->bytes_per_pkt = bytes_in_slice * slice_per_pkt;
-	dsc->pkt_per_line = slice_per_intf / slice_per_pkt;
-}
-
-static int _sde_encoder_dsc_initial_line_calc(struct msm_display_dsc_info *dsc,
-		int enc_ip_width)
-{
-	int max_ssm_delay, max_se_size, obuf_latency;
-	int input_ssm_out_latency, base_hs_latency;
-	int multi_hs_extra_latency,  mux_word_size;
-
-	/* Hardent core config */
-	int max_muxword_size = 48;
-	int output_rate = 64;
-	int rtl_max_bpc = 10;
-	int pipeline_latency = 28;
-
-	max_se_size = 4 * (rtl_max_bpc + 1);
-	max_ssm_delay = max_se_size + max_muxword_size - 1;
-	mux_word_size = (dsc->bpc >= 12 ? 64 : 48);
-	input_ssm_out_latency = pipeline_latency + (3 * (max_ssm_delay + 2));
-	obuf_latency = DIV_ROUND_UP((9 * output_rate +
-				mux_word_size), dsc->bpp) + 1;
-	base_hs_latency = dsc->initial_xmit_delay + input_ssm_out_latency
-				+ obuf_latency;
-	multi_hs_extra_latency = DIV_ROUND_UP((8 * dsc->chunk_size), dsc->bpp);
-	dsc->initial_lines = DIV_ROUND_UP((base_hs_latency +
-				multi_hs_extra_latency), dsc->slice_width);
-
-	return 0;
-}
-
-static bool _sde_encoder_dsc_ich_reset_override_needed(bool pu_en,
-		struct msm_display_dsc_info *dsc)
-{
-	/*
-	 * As per the DSC spec, ICH_RESET can be either end of the slice line
-	 * or at the end of the slice. HW internally generates ich_reset at
-	 * end of the slice line if DSC_MERGE is used or encoder has two
-	 * soft slices. However, if encoder has only 1 soft slice and DSC_MERGE
-	 * is not used then it will generate ich_reset at the end of slice.
-	 *
-	 * Now as per the spec, during one PPS session, position where
-	 * ich_reset is generated should not change. Now if full-screen frame
-	 * has more than 1 soft slice then HW will automatically generate
-	 * ich_reset at the end of slice_line. But for the same panel, if
-	 * partial frame is enabled and only 1 encoder is used with 1 slice,
-	 * then HW will generate ich_reset at end of the slice. This is a
-	 * mismatch. Prevent this by overriding HW's decision.
-	 */
-	return pu_en && dsc && (dsc->full_frame_slices > 1) &&
-		(dsc->slice_width == dsc->pic_width);
-}
-
-static void _sde_encoder_dsc_pipe_cfg(struct sde_hw_dsc *hw_dsc,
-		struct sde_hw_pingpong *hw_pp, struct msm_display_dsc_info *dsc,
-		u32 common_mode, bool ich_reset, bool enable,
-		struct sde_hw_pingpong *hw_dsc_pp)
-{
-	if (!enable) {
-		if (hw_dsc_pp && hw_dsc_pp->ops.disable_dsc)
-			hw_dsc_pp->ops.disable_dsc(hw_dsc_pp);
-
-		if (hw_dsc && hw_dsc->ops.dsc_disable)
-			hw_dsc->ops.dsc_disable(hw_dsc);
-
-		if (hw_dsc && hw_dsc->ops.bind_pingpong_blk)
-			hw_dsc->ops.bind_pingpong_blk(hw_dsc, false,
-					PINGPONG_MAX);
-		return;
-	}
-
-	if (!dsc || !hw_dsc || !hw_pp || !hw_dsc_pp) {
-		SDE_ERROR("invalid params %d %d %d %d\n", !dsc, !hw_dsc,
-				!hw_pp, !hw_dsc_pp);
-		return;
-	}
-
-	if (hw_dsc->ops.dsc_config)
-		hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, ich_reset);
-
-	if (hw_dsc->ops.dsc_config_thresh)
-		hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
-
-	if (hw_dsc_pp->ops.setup_dsc)
-		hw_dsc_pp->ops.setup_dsc(hw_dsc_pp);
-
-	if (hw_dsc->ops.bind_pingpong_blk)
-		hw_dsc->ops.bind_pingpong_blk(hw_dsc, true, hw_pp->idx);
-
-	if (hw_dsc_pp->ops.enable_dsc)
-		hw_dsc_pp->ops.enable_dsc(hw_dsc_pp);
-}
-
-static void _sde_encoder_get_connector_roi(
-		struct sde_encoder_virt *sde_enc,
-		struct sde_rect *merged_conn_roi)
-{
-	struct drm_connector *drm_conn;
-	struct sde_connector_state *c_state;
-
-	if (!sde_enc || !merged_conn_roi)
-		return;
-
-	drm_conn = sde_enc->phys_encs[0]->connector;
-
-	if (!drm_conn || !drm_conn->state)
-		return;
-
-	c_state = to_sde_connector_state(drm_conn->state);
-	sde_kms_rect_merge_rectangles(&c_state->rois, merged_conn_roi);
-}
-
-static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
-{
-	int this_frame_slices;
-	int intf_ip_w, enc_ip_w;
-	int ich_res, dsc_common_mode = 0;
-
-	struct sde_hw_pingpong *hw_pp = sde_enc->hw_pp[0];
-	struct sde_hw_pingpong *hw_dsc_pp = sde_enc->hw_dsc_pp[0];
-	struct sde_hw_dsc *hw_dsc = sde_enc->hw_dsc[0];
-	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
-	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
-	struct msm_display_dsc_info *dsc = NULL;
-	struct sde_hw_ctl *hw_ctl;
-	struct sde_ctl_dsc_cfg cfg;
-
-	if (hw_dsc == NULL || hw_pp == NULL || !enc_master) {
-		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
-		return -EINVAL;
-	}
-
-
-	hw_ctl = enc_master->hw_ctl;
-
-	memset(&cfg, 0, sizeof(cfg));
-	dsc = &sde_enc->mode_info.comp_info.dsc_info;
-	_sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
-
-	this_frame_slices = roi->w / dsc->slice_width;
-	intf_ip_w = this_frame_slices * dsc->slice_width;
-	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
-
-	enc_ip_w = intf_ip_w;
-	_sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
-
-	ich_res = _sde_encoder_dsc_ich_reset_override_needed(false, dsc);
-
-	if (enc_master->intf_mode == INTF_MODE_VIDEO)
-		dsc_common_mode = DSC_MODE_VIDEO;
-
-	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
-		roi->w, roi->h, dsc_common_mode);
-	SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h, dsc_common_mode);
-
-	_sde_encoder_dsc_pipe_cfg(hw_dsc, hw_pp, dsc, dsc_common_mode,
-			ich_res, true, hw_dsc_pp);
-	cfg.dsc[cfg.dsc_count++] = hw_dsc->idx;
-
-	/* setup dsc active configuration in the control path */
-	if (hw_ctl->ops.setup_dsc_cfg) {
-		hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg);
-		SDE_DEBUG_ENC(sde_enc,
-				"setup dsc_cfg hw_ctl[%d], count:%d,dsc[0]:%d, dsc[1]:%d\n",
-				hw_ctl->idx,
-				cfg.dsc_count,
-				cfg.dsc[0],
-				cfg.dsc[1]);
-	}
-
-	if (hw_ctl->ops.update_bitmask_dsc)
-		hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc->idx, 1);
-
-	return 0;
-}
-
-static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc,
-		struct sde_encoder_kickoff_params *params)
-{
-	int this_frame_slices;
-	int intf_ip_w, enc_ip_w;
-	int ich_res, dsc_common_mode;
-
-	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
-	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
-	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
-	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
-	struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
-	struct msm_display_dsc_info dsc[MAX_CHANNELS_PER_ENC];
-	bool half_panel_partial_update;
-	struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl;
-	struct sde_ctl_dsc_cfg cfg;
-	int i;
-
-	memset(&cfg, 0, sizeof(cfg));
-
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-		hw_pp[i] = sde_enc->hw_pp[i];
-		hw_dsc[i] = sde_enc->hw_dsc[i];
-		hw_dsc_pp[i] = sde_enc->hw_dsc_pp[i];
-
-		if (!hw_pp[i] || !hw_dsc[i] || !hw_dsc_pp[i]) {
-			SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
-			return -EINVAL;
-		}
-	}
-
-	half_panel_partial_update =
-			hweight_long(params->affected_displays) == 1;
-
-	dsc_common_mode = 0;
-	if (!half_panel_partial_update)
-		dsc_common_mode |= DSC_MODE_SPLIT_PANEL;
-	if (enc_master->intf_mode == INTF_MODE_VIDEO)
-		dsc_common_mode |= DSC_MODE_VIDEO;
-
-	memcpy(&dsc[0], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[0]));
-	memcpy(&dsc[1], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[1]));
-
-	/*
-	 * Since both DSC use same pic dimension, set same pic dimension
-	 * to both DSC structures.
-	 */
-	_sde_encoder_dsc_update_pic_dim(&dsc[0], roi->w, roi->h);
-	_sde_encoder_dsc_update_pic_dim(&dsc[1], roi->w, roi->h);
-
-	this_frame_slices = roi->w / dsc[0].slice_width;
-	intf_ip_w = this_frame_slices * dsc[0].slice_width;
-
-	if (!half_panel_partial_update)
-		intf_ip_w /= 2;
-
-	/*
-	 * In this topology when both interfaces are active, they have same
-	 * load so intf_ip_w will be same.
-	 */
-	_sde_encoder_dsc_pclk_param_calc(&dsc[0], intf_ip_w);
-	_sde_encoder_dsc_pclk_param_calc(&dsc[1], intf_ip_w);
-
-	/*
-	 * In this topology, since there is no dsc_merge, uncompressed input
-	 * to encoder and interface is same.
-	 */
-	enc_ip_w = intf_ip_w;
-	_sde_encoder_dsc_initial_line_calc(&dsc[0], enc_ip_w);
-	_sde_encoder_dsc_initial_line_calc(&dsc[1], enc_ip_w);
-
-	/*
-	 * __is_ich_reset_override_needed should be called only after
-	 * updating pic dimension, mdss_panel_dsc_update_pic_dim.
-	 */
-	ich_res = _sde_encoder_dsc_ich_reset_override_needed(
-			half_panel_partial_update, &dsc[0]);
-
-	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
-			roi->w, roi->h, dsc_common_mode);
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		bool active = !!((1 << i) & params->affected_displays);
-
-		SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h,
-				dsc_common_mode, i, active);
-		_sde_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], &dsc[i],
-				dsc_common_mode, ich_res, active, hw_dsc_pp[i]);
-
-		if (active) {
-			if (cfg.dsc_count >= MAX_DSC_PER_CTL_V1) {
-				pr_err("Invalid dsc count:%d\n",
-						cfg.dsc_count);
-				return -EINVAL;
-			}
-			cfg.dsc[cfg.dsc_count++] = hw_dsc[i]->idx;
-
-			if (hw_ctl->ops.update_bitmask_dsc)
-				hw_ctl->ops.update_bitmask_dsc(hw_ctl,
-						hw_dsc[i]->idx, 1);
-		}
-	}
-
-	/* setup dsc active configuration in the control path */
-	if (hw_ctl->ops.setup_dsc_cfg) {
-		hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg);
-		SDE_DEBUG_ENC(sde_enc,
-				"setup dsc_cfg hw_ctl[%d], count:%d,dsc[0]:%d, dsc[1]:%d\n",
-				hw_ctl->idx,
-				cfg.dsc_count,
-				cfg.dsc[0],
-				cfg.dsc[1]);
-	}
-	return 0;
-}
-
-static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc,
-		struct sde_encoder_kickoff_params *params)
-{
-	int this_frame_slices;
-	int intf_ip_w, enc_ip_w;
-	int ich_res, dsc_common_mode;
-
-	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
-	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
-	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
-	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
-	struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
-	struct msm_display_dsc_info *dsc = NULL;
-	bool half_panel_partial_update;
-	struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl;
-	struct sde_ctl_dsc_cfg cfg;
-	int i;
-
-	memset(&cfg, 0, sizeof(cfg));
-
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-		hw_pp[i] = sde_enc->hw_pp[i];
-		hw_dsc[i] = sde_enc->hw_dsc[i];
-		hw_dsc_pp[i] = sde_enc->hw_dsc_pp[i];
-
-		if (!hw_pp[i] || !hw_dsc[i] || !hw_dsc_pp[i]) {
-			SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
-			return -EINVAL;
-		}
-	}
-
-	dsc = &sde_enc->mode_info.comp_info.dsc_info;
-
-	half_panel_partial_update =
-			hweight_long(params->affected_displays) == 1;
-
-	dsc_common_mode = 0;
-	if (!half_panel_partial_update)
-		dsc_common_mode |= DSC_MODE_SPLIT_PANEL | DSC_MODE_MULTIPLEX;
-	if (enc_master->intf_mode == INTF_MODE_VIDEO)
-		dsc_common_mode |= DSC_MODE_VIDEO;
-
-	_sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
-
-	this_frame_slices = roi->w / dsc->slice_width;
-	intf_ip_w = this_frame_slices * dsc->slice_width;
-	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
-
-	/*
-	 * dsc merge case: when using 2 encoders for the same stream,
-	 * no. of slices need to be same on both the encoders.
-	 */
-	enc_ip_w = intf_ip_w / 2;
-	_sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
-
-	ich_res = _sde_encoder_dsc_ich_reset_override_needed(
-			half_panel_partial_update, dsc);
-
-	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
-			roi->w, roi->h, dsc_common_mode);
-	SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h,
-			dsc_common_mode, i, params->affected_displays);
-
-	_sde_encoder_dsc_pipe_cfg(hw_dsc[0], hw_pp[0], dsc, dsc_common_mode,
-			ich_res, true, hw_dsc_pp[0]);
-	cfg.dsc[0] = hw_dsc[0]->idx;
-	cfg.dsc_count++;
-	if (hw_ctl->ops.update_bitmask_dsc)
-		hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc[0]->idx, 1);
-
-
-	_sde_encoder_dsc_pipe_cfg(hw_dsc[1], hw_pp[1], dsc, dsc_common_mode,
-			ich_res, !half_panel_partial_update, hw_dsc_pp[1]);
-	if (!half_panel_partial_update) {
-		cfg.dsc[1] = hw_dsc[1]->idx;
-		cfg.dsc_count++;
-		if (hw_ctl->ops.update_bitmask_dsc)
-			hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc[1]->idx,
-					1);
-	}
-	/* setup dsc active configuration in the control path */
-	if (hw_ctl->ops.setup_dsc_cfg) {
-		hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg);
-		SDE_DEBUG_ENC(sde_enc,
-				"setup_dsc_cfg hw_ctl[%d], count:%d,dsc[0]:%d, dsc[1]:%d\n",
-				hw_ctl->idx,
-				cfg.dsc_count,
-				cfg.dsc[0],
-				cfg.dsc[1]);
-	}
-	return 0;
-}
-
-static int _sde_encoder_update_roi(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct drm_connector *drm_conn;
-	struct drm_display_mode *adj_mode;
-	struct sde_rect roi;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder parameter\n");
-		return -EINVAL;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	if (!sde_enc->crtc || !sde_enc->crtc->state) {
-		SDE_ERROR("invalid crtc parameter\n");
-		return -EINVAL;
-	}
-
-	if (!sde_enc->cur_master) {
-		SDE_ERROR("invalid cur_master parameter\n");
-		return -EINVAL;
-	}
-
-	adj_mode = &sde_enc->cur_master->cached_mode;
-	drm_conn = sde_enc->cur_master->connector;
-
-	_sde_encoder_get_connector_roi(sde_enc, &roi);
-	if (sde_kms_rect_is_null(&roi)) {
-		roi.w = adj_mode->hdisplay;
-		roi.h = adj_mode->vdisplay;
-	}
-
-	memcpy(&sde_enc->prv_conn_roi, &sde_enc->cur_conn_roi,
-			sizeof(sde_enc->prv_conn_roi));
-	memcpy(&sde_enc->cur_conn_roi, &roi, sizeof(sde_enc->cur_conn_roi));
-
-	return 0;
-}
-
-static int _sde_encoder_dsc_setup(struct sde_encoder_virt *sde_enc,
-		struct sde_encoder_kickoff_params *params)
-{
-	enum sde_rm_topology_name topology;
-	struct drm_connector *drm_conn;
-	int ret = 0;
-
-	if (!sde_enc || !params || !sde_enc->phys_encs[0] ||
-			!sde_enc->phys_encs[0]->connector)
-		return -EINVAL;
-
-	drm_conn = sde_enc->phys_encs[0]->connector;
-
-	topology = sde_connector_get_topology_name(drm_conn);
-	if (topology == SDE_RM_TOPOLOGY_NONE) {
-		SDE_ERROR_ENC(sde_enc, "topology not set yet\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG_ENC(sde_enc, "topology:%d\n", topology);
-	SDE_EVT32(DRMID(&sde_enc->base), topology,
-			sde_enc->cur_conn_roi.x,
-			sde_enc->cur_conn_roi.y,
-			sde_enc->cur_conn_roi.w,
-			sde_enc->cur_conn_roi.h,
-			sde_enc->prv_conn_roi.x,
-			sde_enc->prv_conn_roi.y,
-			sde_enc->prv_conn_roi.w,
-			sde_enc->prv_conn_roi.h,
-			sde_enc->cur_master->cached_mode.hdisplay,
-			sde_enc->cur_master->cached_mode.vdisplay);
-
-	if (sde_kms_rect_is_equal(&sde_enc->cur_conn_roi,
-			&sde_enc->prv_conn_roi))
-		return ret;
-
-	switch (topology) {
-	case SDE_RM_TOPOLOGY_SINGLEPIPE_DSC:
-	case SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC:
-		ret = _sde_encoder_dsc_n_lm_1_enc_1_intf(sde_enc);
-		break;
-	case SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE:
-		ret = _sde_encoder_dsc_2_lm_2_enc_1_intf(sde_enc, params);
-		break;
-	case SDE_RM_TOPOLOGY_DUALPIPE_DSC:
-		ret = _sde_encoder_dsc_2_lm_2_enc_2_intf(sde_enc, params);
-		break;
-	default:
-		SDE_ERROR_ENC(sde_enc, "No DSC support for topology %d",
-				topology);
-		return -EINVAL;
-	}
-
-	return ret;
-}
-
-void sde_encoder_helper_vsync_config(struct sde_encoder_phys *phys_enc,
-			u32 vsync_source, bool is_dummy)
-{
-	struct sde_vsync_source_cfg vsync_cfg = { 0 };
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	struct sde_hw_mdp *hw_mdptop;
-	struct drm_encoder *drm_enc;
-	struct sde_encoder_virt *sde_enc;
-	int i;
-
-	sde_enc = to_sde_encoder_virt(phys_enc->parent);
-
-	if (!sde_enc) {
-		SDE_ERROR("invalid param sde_enc:%d\n", sde_enc != NULL);
-		return;
-	} else if (sde_enc->num_phys_encs > ARRAY_SIZE(sde_enc->hw_pp)) {
-		SDE_ERROR("invalid num phys enc %d/%d\n",
-				sde_enc->num_phys_encs,
-				(int) ARRAY_SIZE(sde_enc->hw_pp));
-		return;
-	}
-
-	drm_enc = &sde_enc->base;
-	/* this pointers are checked in virt_enable_helper */
-	priv = drm_enc->dev->dev_private;
-
-	sde_kms = to_sde_kms(priv->kms);
-	if (!sde_kms) {
-		SDE_ERROR("invalid sde_kms\n");
-		return;
-	}
-
-	hw_mdptop = sde_kms->hw_mdp;
-	if (!hw_mdptop) {
-		SDE_ERROR("invalid mdptop\n");
-		return;
-	}
-
-	if (hw_mdptop->ops.setup_vsync_source) {
-		for (i = 0; i < sde_enc->num_phys_encs; i++)
-			vsync_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
-
-		vsync_cfg.pp_count = sde_enc->num_phys_encs;
-		vsync_cfg.frame_rate = sde_enc->mode_info.frame_rate;
-		vsync_cfg.vsync_source = vsync_source;
-		vsync_cfg.is_dummy = is_dummy;
-
-		hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
-	}
-}
-
-static void _sde_encoder_update_vsync_source(struct sde_encoder_virt *sde_enc,
-			struct msm_display_info *disp_info, bool is_dummy)
-{
-	struct sde_encoder_phys *phys;
-	int i;
-	u32 vsync_source;
-
-	if (!sde_enc || !disp_info) {
-		SDE_ERROR("invalid param sde_enc:%d or disp_info:%d\n",
-					sde_enc != NULL, disp_info != NULL);
-		return;
-	} else if (sde_enc->num_phys_encs > ARRAY_SIZE(sde_enc->hw_pp)) {
-		SDE_ERROR("invalid num phys enc %d/%d\n",
-				sde_enc->num_phys_encs,
-				(int) ARRAY_SIZE(sde_enc->hw_pp));
-		return;
-	}
-
-	if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
-		if (is_dummy)
-			vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_0 -
-					sde_enc->te_source;
-		else if (disp_info->is_te_using_watchdog_timer)
-			vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_4;
-		else
-			vsync_source = sde_enc->te_source;
-
-		for (i = 0; i < sde_enc->num_phys_encs; i++) {
-			phys = sde_enc->phys_encs[i];
-
-			if (phys && phys->ops.setup_vsync_source)
-				phys->ops.setup_vsync_source(phys,
-					vsync_source, is_dummy);
-		}
-	}
-}
-
-static void _sde_encoder_dsc_disable(struct sde_encoder_virt *sde_enc)
-{
-	int i;
-	struct sde_hw_pingpong *hw_pp = NULL;
-	struct sde_hw_pingpong *hw_dsc_pp = NULL;
-	struct sde_hw_dsc *hw_dsc = NULL;
-	struct sde_hw_ctl *hw_ctl = NULL;
-	struct sde_ctl_dsc_cfg cfg;
-
-	if (!sde_enc || !sde_enc->phys_encs[0] ||
-			!sde_enc->phys_encs[0]->connector) {
-		SDE_ERROR("invalid params %d %d\n",
-			!sde_enc, sde_enc ? !sde_enc->phys_encs[0] : -1);
-		return;
-	}
-
-	if (sde_enc->cur_master)
-		hw_ctl = sde_enc->cur_master->hw_ctl;
-
-	/* Disable DSC for all the pp's present in this topology */
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-		hw_pp = sde_enc->hw_pp[i];
-		hw_dsc = sde_enc->hw_dsc[i];
-		hw_dsc_pp = sde_enc->hw_dsc_pp[i];
-
-		_sde_encoder_dsc_pipe_cfg(hw_dsc, hw_pp, NULL,
-						0, 0, 0, hw_dsc_pp);
-
-		if (hw_dsc)
-			sde_enc->dirty_dsc_ids[i] = hw_dsc->idx;
-	}
-
-	/* Clear the DSC ACTIVE config for this CTL */
-	if (hw_ctl && hw_ctl->ops.setup_dsc_cfg) {
-		memset(&cfg, 0, sizeof(cfg));
-		hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg);
-	}
-
-	/**
-	 * Since pending flushes from previous commit get cleared
-	 * sometime after this point, setting DSC flush bits now
-	 * will have no effect. Therefore dirty_dsc_ids track which
-	 * DSC blocks must be flushed for the next trigger.
-	 */
-}
-
-static int _sde_encoder_switch_to_watchdog_vsync(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct msm_display_info disp_info;
-
-	if (!drm_enc) {
-		pr_err("invalid drm encoder\n");
-		return -EINVAL;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-
-	sde_encoder_control_te(drm_enc, false);
-
-	memcpy(&disp_info, &sde_enc->disp_info, sizeof(disp_info));
-	disp_info.is_te_using_watchdog_timer = true;
-	_sde_encoder_update_vsync_source(sde_enc, &disp_info, false);
-
-	sde_encoder_control_te(drm_enc, true);
-
-	return 0;
-}
-
-static int _sde_encoder_rsc_client_update_vsync_wait(
-	struct drm_encoder *drm_enc, struct sde_encoder_virt *sde_enc,
-	int wait_vblank_crtc_id)
-{
-	int wait_refcount = 0, ret = 0;
-	int pipe = -1;
-	int wait_count = 0;
-	struct drm_crtc *primary_crtc;
-	struct drm_crtc *crtc;
-
-	crtc = sde_enc->crtc;
-
-	if (wait_vblank_crtc_id)
-		wait_refcount =
-			sde_rsc_client_get_vsync_refcount(sde_enc->rsc_client);
-	SDE_EVT32_VERBOSE(DRMID(drm_enc), wait_vblank_crtc_id, wait_refcount,
-			SDE_EVTLOG_FUNC_ENTRY);
-
-	if (crtc->base.id != wait_vblank_crtc_id) {
-		primary_crtc = drm_crtc_find(drm_enc->dev,
-				NULL, wait_vblank_crtc_id);
-		if (!primary_crtc) {
-			SDE_ERROR_ENC(sde_enc,
-					"failed to find primary crtc id %d\n",
-					wait_vblank_crtc_id);
-			return -EINVAL;
-		}
-		pipe = drm_crtc_index(primary_crtc);
-	}
-
-	/**
-	 * note: VBLANK is expected to be enabled at this point in
-	 * resource control state machine if on primary CRTC
-	 */
-	for (wait_count = 0; wait_count < MAX_RSC_WAIT; wait_count++) {
-		if (sde_rsc_client_is_state_update_complete(
-				sde_enc->rsc_client))
-			break;
-
-		if (crtc->base.id == wait_vblank_crtc_id)
-			ret = sde_encoder_wait_for_event(drm_enc,
-					MSM_ENC_VBLANK);
-		else
-			drm_wait_one_vblank(drm_enc->dev, pipe);
-
-		if (ret) {
-			SDE_ERROR_ENC(sde_enc,
-					"wait for vblank failed ret:%d\n", ret);
-			/**
-			 * rsc hardware may hang without vsync. avoid rsc hang
-			 * by generating the vsync from watchdog timer.
-			 */
-			if (crtc->base.id == wait_vblank_crtc_id)
-				_sde_encoder_switch_to_watchdog_vsync(drm_enc);
-		}
-	}
-
-	if (wait_count >= MAX_RSC_WAIT)
-		SDE_EVT32(DRMID(drm_enc), wait_vblank_crtc_id, wait_count,
-				SDE_EVTLOG_ERROR);
-
-	if (wait_refcount)
-		sde_rsc_client_reset_vsync_refcount(sde_enc->rsc_client);
-	SDE_EVT32_VERBOSE(DRMID(drm_enc), wait_vblank_crtc_id, wait_refcount,
-			SDE_EVTLOG_FUNC_EXIT);
-
-	return ret;
-}
-
-static int _sde_encoder_update_rsc_client(
-		struct drm_encoder *drm_enc, bool enable)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct drm_crtc *crtc;
-	enum sde_rsc_state rsc_state = SDE_RSC_IDLE_STATE;
-	struct sde_rsc_cmd_config *rsc_config;
-	int ret, prefill_lines;
-	struct msm_display_info *disp_info;
-	struct msm_mode_info *mode_info;
-	int wait_vblank_crtc_id = SDE_RSC_INVALID_CRTC_ID;
-	u32 qsync_mode = 0;
-
-	if (!drm_enc || !drm_enc->dev) {
-		SDE_ERROR("invalid encoder arguments\n");
-		return -EINVAL;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	mode_info = &sde_enc->mode_info;
-
-	crtc = sde_enc->crtc;
-
-	if (!sde_enc->crtc) {
-		SDE_ERROR("invalid crtc parameter\n");
-		return -EINVAL;
-	}
-	disp_info = &sde_enc->disp_info;
-	rsc_config = &sde_enc->rsc_config;
-
-	if (!sde_enc->rsc_client) {
-		SDE_DEBUG_ENC(sde_enc, "rsc client not created\n");
-		return 0;
-	}
-
-	/**
-	 * only primary command mode panel without Qsync can request CMD state.
-	 * all other panels/displays can request for VID state including
-	 * secondary command mode panel.
-	 * Clone mode encoder can request CLK STATE only.
-	 */
-	if (sde_enc->cur_master)
-		qsync_mode = sde_connector_get_qsync_mode(
-				sde_enc->cur_master->connector);
-
-	if (sde_encoder_in_clone_mode(drm_enc) || !disp_info->is_primary ||
-			  (disp_info->is_primary && qsync_mode))
-		rsc_state = enable ? SDE_RSC_CLK_STATE : SDE_RSC_IDLE_STATE;
-	else if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
-		rsc_state = enable ? SDE_RSC_CMD_STATE : SDE_RSC_IDLE_STATE;
-	else if (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE)
-		rsc_state = enable ? SDE_RSC_VID_STATE : SDE_RSC_IDLE_STATE;
-
-	SDE_EVT32(rsc_state, qsync_mode);
-
-	prefill_lines = mode_info->prefill_lines;
-
-	/* compare specific items and reconfigure the rsc */
-	if ((rsc_config->fps != mode_info->frame_rate) ||
-	    (rsc_config->vtotal != mode_info->vtotal) ||
-	    (rsc_config->prefill_lines != prefill_lines) ||
-	    (rsc_config->jitter_numer != mode_info->jitter_numer) ||
-	    (rsc_config->jitter_denom != mode_info->jitter_denom)) {
-		rsc_config->fps = mode_info->frame_rate;
-		rsc_config->vtotal = mode_info->vtotal;
-		rsc_config->prefill_lines = prefill_lines;
-		rsc_config->jitter_numer = mode_info->jitter_numer;
-		rsc_config->jitter_denom = mode_info->jitter_denom;
-		sde_enc->rsc_state_init = false;
-	}
-
-	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_init
-			&& disp_info->is_primary) {
-		/* update it only once */
-		sde_enc->rsc_state_init = true;
-
-		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
-			rsc_state, rsc_config, crtc->base.id,
-			&wait_vblank_crtc_id);
-	} else {
-		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
-			rsc_state, NULL, crtc->base.id,
-			&wait_vblank_crtc_id);
-	}
-
-	/**
-	 * if RSC performed a state change that requires a VBLANK wait, it will
-	 * set wait_vblank_crtc_id to the CRTC whose VBLANK we must wait on.
-	 *
-	 * if we are the primary display, we will need to enable and wait
-	 * locally since we hold the commit thread
-	 *
-	 * if we are an external display, we must send a signal to the primary
-	 * to enable its VBLANK and wait one, since the RSC hardware is driven
-	 * by the primary panel's VBLANK signals
-	 */
-	SDE_EVT32_VERBOSE(DRMID(drm_enc), wait_vblank_crtc_id);
-	if (ret) {
-		SDE_ERROR_ENC(sde_enc,
-				"sde rsc client update failed ret:%d\n", ret);
-		return ret;
-	} else if (wait_vblank_crtc_id == SDE_RSC_INVALID_CRTC_ID) {
-		return ret;
-	}
-
-	ret = _sde_encoder_rsc_client_update_vsync_wait(drm_enc,
-			sde_enc, wait_vblank_crtc_id);
-
-	return ret;
-}
-
-static void _sde_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
-{
-	struct sde_encoder_virt *sde_enc;
-	int i;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-
-	SDE_DEBUG_ENC(sde_enc, "enable:%d\n", enable);
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (phys && phys->ops.irq_control)
-			phys->ops.irq_control(phys, enable);
-	}
-
-}
-
-/* keep track of the userspace vblank during modeset */
-static void _sde_encoder_modeset_helper_locked(struct drm_encoder *drm_enc,
-		u32 sw_event)
-{
-	struct sde_encoder_virt *sde_enc;
-	bool enable;
-	int i;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	SDE_DEBUG_ENC(sde_enc, "sw_event:%d, vblank_enabled:%d\n",
-			sw_event, sde_enc->vblank_enabled);
-
-	/* nothing to do if vblank not enabled by userspace */
-	if (!sde_enc->vblank_enabled)
-		return;
-
-	/* disable vblank on pre_modeset */
-	if (sw_event == SDE_ENC_RC_EVENT_PRE_MODESET)
-		enable = false;
-	/* enable vblank on post_modeset */
-	else if (sw_event == SDE_ENC_RC_EVENT_POST_MODESET)
-		enable = true;
-	else
-		return;
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (phys && phys->ops.control_vblank_irq)
-			phys->ops.control_vblank_irq(phys, enable);
-	}
-}
-
-struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-
-	if (!drm_enc)
-		return NULL;
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	return sde_enc->rsc_client;
-}
-
-static int _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
-		bool enable)
-{
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	struct sde_encoder_virt *sde_enc;
-	int rc;
-	bool is_cmd_mode, is_primary;
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	priv = drm_enc->dev->dev_private;
-	sde_kms = to_sde_kms(priv->kms);
-
-	is_cmd_mode = sde_enc->disp_info.capabilities &
-			MSM_DISPLAY_CAP_CMD_MODE;
-	is_primary = sde_enc->disp_info.is_primary;
-
-	SDE_DEBUG_ENC(sde_enc, "enable:%d\n", enable);
-	SDE_EVT32(DRMID(drm_enc), enable);
-
-	if (!sde_enc->cur_master) {
-		SDE_ERROR("encoder master not set\n");
-		return -EINVAL;
-	}
-
-	if (enable) {
-		/* enable SDE core clks */
-		rc = sde_power_resource_enable(&priv->phandle,
-				sde_kms->core_client, true);
-		if (rc) {
-			SDE_ERROR("failed to enable power resource %d\n", rc);
-			SDE_EVT32(rc, SDE_EVTLOG_ERROR);
-			return rc;
-		}
-
-		sde_enc->elevated_ahb_vote = true;
-		/* enable DSI clks */
-		rc = sde_connector_clk_ctrl(sde_enc->cur_master->connector,
-				true);
-		if (rc) {
-			SDE_ERROR("failed to enable clk control %d\n", rc);
-			sde_power_resource_enable(&priv->phandle,
-					sde_kms->core_client, false);
-			return rc;
-		}
-
-		/* enable all the irq */
-		_sde_encoder_irq_control(drm_enc, true);
-
-		if (is_cmd_mode)
-			_sde_encoder_pm_qos_add_request(drm_enc, sde_kms);
-
-	} else {
-		if (is_cmd_mode)
-			_sde_encoder_pm_qos_remove_request(drm_enc, sde_kms);
-
-		/* disable all the irq */
-		_sde_encoder_irq_control(drm_enc, false);
-
-		/* disable DSI clks */
-		sde_connector_clk_ctrl(sde_enc->cur_master->connector, false);
-
-		/* disable SDE core clks */
-		sde_power_resource_enable(&priv->phandle,
-				sde_kms->core_client, false);
-	}
-
-	return 0;
-}
-
-static void sde_encoder_misr_configure(struct drm_encoder *drm_enc,
-		bool enable, u32 frame_count)
-{
-	struct sde_encoder_virt *sde_enc;
-	int i;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	sde_enc = to_sde_encoder_virt(drm_enc);
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (!phys || !phys->ops.setup_misr)
-			continue;
-
-		phys->ops.setup_misr(phys, enable, frame_count);
-	}
-}
-
-static void sde_encoder_input_event_handler(struct input_handle *handle,
-	unsigned int type, unsigned int code, int value)
-{
-	struct drm_encoder *drm_enc = NULL;
-	struct sde_encoder_virt *sde_enc = NULL;
-	struct msm_drm_thread *disp_thread = NULL;
-	struct msm_drm_private *priv = NULL;
-
-	if (!handle || !handle->handler || !handle->handler->private) {
-		SDE_ERROR("invalid encoder for the input event\n");
-		return;
-	}
-
-	drm_enc = (struct drm_encoder *)handle->handler->private;
-	if (!drm_enc->dev || !drm_enc->dev->dev_private) {
-		SDE_ERROR("invalid parameters\n");
-		return;
-	}
-
-	priv = drm_enc->dev->dev_private;
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	if (!sde_enc->crtc || (sde_enc->crtc->index
-			>= ARRAY_SIZE(priv->disp_thread))) {
-		SDE_DEBUG_ENC(sde_enc,
-			"invalid cached CRTC: %d or crtc index: %d\n",
-			sde_enc->crtc == NULL,
-			sde_enc->crtc ? sde_enc->crtc->index : -EINVAL);
-		return;
-	}
-
-	SDE_EVT32_VERBOSE(DRMID(drm_enc));
-
-	disp_thread = &priv->disp_thread[sde_enc->crtc->index];
-
-	kthread_queue_work(&disp_thread->worker,
-				&sde_enc->input_event_work);
-}
-
-void sde_encoder_control_idle_pc(struct drm_encoder *drm_enc, bool enable)
-{
-	struct sde_encoder_virt *sde_enc;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	sde_enc = to_sde_encoder_virt(drm_enc);
-
-	/* return early if there is no state change */
-	if (sde_enc->idle_pc_enabled == enable)
-		return;
-
-	sde_enc->idle_pc_enabled = enable;
-
-	SDE_DEBUG("idle-pc state:%d\n", sde_enc->idle_pc_enabled);
-	SDE_EVT32(sde_enc->idle_pc_enabled);
-}
-
-static void _sde_encoder_rc_cancel_delayed(struct sde_encoder_virt *sde_enc,
-	u32 sw_event)
-{
-	if (kthread_cancel_delayed_work_sync(
-			&sde_enc->delayed_off_work))
-		SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
-				sw_event);
-}
-
-static int _sde_encoder_rc_kickoff(struct drm_encoder *drm_enc,
-	u32 sw_event, struct sde_encoder_virt *sde_enc, bool is_vid_mode)
-{
-	int ret = 0;
-
-	/* cancel delayed off work, if any */
-	_sde_encoder_rc_cancel_delayed(sde_enc, sw_event);
-
-	mutex_lock(&sde_enc->rc_lock);
-
-	/* return if the resource control is already in ON state */
-	if (sde_enc->rc_state == SDE_ENC_RC_STATE_ON) {
-		SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in ON state\n",
-				sw_event);
-		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-			SDE_EVTLOG_FUNC_CASE1);
-		goto end;
-	} else if (sde_enc->rc_state != SDE_ENC_RC_STATE_OFF &&
-			sde_enc->rc_state != SDE_ENC_RC_STATE_IDLE) {
-		SDE_ERROR_ENC(sde_enc, "sw_event:%d, rc in state %d\n",
-				sw_event, sde_enc->rc_state);
-		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-				SDE_EVTLOG_ERROR);
-		goto end;
-	}
-
-	if (is_vid_mode && sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) {
-		_sde_encoder_irq_control(drm_enc, true);
-	} else {
-		/* enable all the clks and resources */
-		ret = _sde_encoder_resource_control_helper(drm_enc,
-				true);
-		if (ret) {
-			SDE_ERROR_ENC(sde_enc,
-					"sw_event:%d, rc in state %d\n",
-					sw_event, sde_enc->rc_state);
-			SDE_EVT32(DRMID(drm_enc), sw_event,
-					sde_enc->rc_state,
-					SDE_EVTLOG_ERROR);
-			goto end;
-		}
-		_sde_encoder_update_rsc_client(drm_enc, true);
-	}
-	SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-			SDE_ENC_RC_STATE_ON, SDE_EVTLOG_FUNC_CASE1);
-	sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
-
-end:
-	mutex_unlock(&sde_enc->rc_lock);
-	return ret;
-}
-
-static int _sde_encoder_rc_frame_done(struct drm_encoder *drm_enc,
-	u32 sw_event, struct sde_encoder_virt *sde_enc,
-	struct msm_drm_private *priv)
-{
-	unsigned int lp, idle_pc_duration;
-	struct msm_drm_thread *disp_thread;
-	bool autorefresh_enabled = false;
-
-	if (!sde_enc->crtc) {
-		SDE_ERROR("invalid crtc, sw_event:%u\n", sw_event);
-		return -EINVAL;
-	}
-
-	if (sde_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
-		SDE_ERROR("invalid crtc index :%u\n",
-				sde_enc->crtc->index);
-		return -EINVAL;
-	}
-	disp_thread = &priv->disp_thread[sde_enc->crtc->index];
-
-	/*
-	 * mutex lock is not used as this event happens at interrupt
-	 * context. And locking is not required as, the other events
-	 * like KICKOFF and STOP does a wait-for-idle before executing
-	 * the resource_control
-	 */
-	if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
-		SDE_ERROR_ENC(sde_enc, "sw_event:%d,rc:%d-unexpected\n",
-				sw_event, sde_enc->rc_state);
-		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-				SDE_EVTLOG_ERROR);
-		return -EINVAL;
-	}
-
-	/*
-	 * schedule off work item only when there are no
-	 * frames pending
-	 */
-	if (sde_crtc_frame_pending(sde_enc->crtc) > 1) {
-		SDE_DEBUG_ENC(sde_enc, "skip schedule work");
-		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-			SDE_EVTLOG_FUNC_CASE2);
-		return 0;
-	}
-
-	/* schedule delayed off work if autorefresh is disabled */
-	if (sde_enc->cur_master &&
-		sde_enc->cur_master->ops.is_autorefresh_enabled)
-		autorefresh_enabled =
-			sde_enc->cur_master->ops.is_autorefresh_enabled(
-						sde_enc->cur_master);
-
-	/* set idle timeout based on master connector's lp value */
-	if (sde_enc->cur_master)
-		lp = sde_connector_get_lp(
-				sde_enc->cur_master->connector);
-	else
-		lp = SDE_MODE_DPMS_ON;
-
-	if (lp == SDE_MODE_DPMS_LP2)
-		idle_pc_duration = IDLE_SHORT_TIMEOUT;
-	else
-		idle_pc_duration = IDLE_POWERCOLLAPSE_DURATION;
-
-	if (!autorefresh_enabled)
-		kthread_mod_delayed_work(
-			&disp_thread->worker,
-			&sde_enc->delayed_off_work,
-			msecs_to_jiffies(idle_pc_duration));
-	SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-			autorefresh_enabled,
-			idle_pc_duration, SDE_EVTLOG_FUNC_CASE2);
-	SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work scheduled\n",
-			sw_event);
-	return 0;
-}
-
-static int _sde_encoder_rc_pre_stop(struct drm_encoder *drm_enc,
-	u32 sw_event, struct sde_encoder_virt *sde_enc, bool is_vid_mode)
-{
-	/* cancel delayed off work, if any */
-	_sde_encoder_rc_cancel_delayed(sde_enc, sw_event);
-
-	mutex_lock(&sde_enc->rc_lock);
-
-	if (is_vid_mode &&
-		  sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) {
-		_sde_encoder_irq_control(drm_enc, true);
-	}
-	/* skip if is already OFF or IDLE, resources are off already */
-	else if (sde_enc->rc_state == SDE_ENC_RC_STATE_OFF ||
-			sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) {
-		SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in %d state\n",
-				sw_event, sde_enc->rc_state);
-		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-			SDE_EVTLOG_FUNC_CASE3);
-		goto end;
-	}
-
-	/**
-	 * IRQs are still enabled currently, which allows wait for
-	 * VBLANK which RSC may require to correctly transition to OFF
-	 */
-	_sde_encoder_update_rsc_client(drm_enc, false);
-
-	SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-			SDE_ENC_RC_STATE_PRE_OFF,
-			SDE_EVTLOG_FUNC_CASE3);
-
-	sde_enc->rc_state = SDE_ENC_RC_STATE_PRE_OFF;
-
-end:
-	mutex_unlock(&sde_enc->rc_lock);
-	return 0;
-}
-
-static int _sde_encoder_rc_stop(struct drm_encoder *drm_enc,
-	u32 sw_event, struct sde_encoder_virt *sde_enc)
-{
-	int ret = 0;
-
-	/* cancel vsync event work and timer */
-	kthread_cancel_work_sync(&sde_enc->vsync_event_work);
-	if (sde_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI)
-		del_timer_sync(&sde_enc->vsync_event_timer);
-
-	mutex_lock(&sde_enc->rc_lock);
-	/* return if the resource control is already in OFF state */
-	if (sde_enc->rc_state == SDE_ENC_RC_STATE_OFF) {
-		SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in OFF state\n",
-				sw_event);
-		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-			SDE_EVTLOG_FUNC_CASE4);
-		goto end;
-	} else if (sde_enc->rc_state == SDE_ENC_RC_STATE_ON ||
-		   sde_enc->rc_state == SDE_ENC_RC_STATE_MODESET) {
-		SDE_ERROR_ENC(sde_enc, "sw_event:%d, rc in state %d\n",
-				sw_event, sde_enc->rc_state);
-		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-				SDE_EVTLOG_ERROR);
-		ret = -EINVAL;
-		goto end;
-	}
-
-	/**
-	 * expect to arrive here only if in either idle state or pre-off
-	 * and in IDLE state the resources are already disabled
-	 */
-	if (sde_enc->rc_state == SDE_ENC_RC_STATE_PRE_OFF)
-		_sde_encoder_resource_control_helper(drm_enc, false);
-
-	SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-			SDE_ENC_RC_STATE_OFF, SDE_EVTLOG_FUNC_CASE4);
-
-	sde_enc->rc_state = SDE_ENC_RC_STATE_OFF;
-
-end:
-	mutex_unlock(&sde_enc->rc_lock);
-	return ret;
-}
-
-static int _sde_encoder_rc_pre_modeset(struct drm_encoder *drm_enc,
-	u32 sw_event, struct sde_encoder_virt *sde_enc)
-{
-	int ret = 0;
-
-	/* cancel delayed off work, if any */
-	_sde_encoder_rc_cancel_delayed(sde_enc, sw_event);
-
-	mutex_lock(&sde_enc->rc_lock);
-
-	/* return if the resource control is already in ON state */
-	if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
-		/* enable all the clks and resources */
-		ret = _sde_encoder_resource_control_helper(drm_enc,
-				true);
-		if (ret) {
-			SDE_ERROR_ENC(sde_enc,
-					"sw_event:%d, rc in state %d\n",
-					sw_event, sde_enc->rc_state);
-			SDE_EVT32(DRMID(drm_enc), sw_event,
-					sde_enc->rc_state,
-					SDE_EVTLOG_ERROR);
-			goto end;
-		}
-
-		_sde_encoder_update_rsc_client(drm_enc, true);
-
-		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-			SDE_ENC_RC_STATE_ON, SDE_EVTLOG_FUNC_CASE5);
-		sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
-	}
-
-	ret = sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
-	if (ret && ret != -EWOULDBLOCK) {
-		SDE_ERROR_ENC(sde_enc,
-				"wait for commit done returned %d\n",
-				ret);
-		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-				ret, SDE_EVTLOG_ERROR);
-		ret = -EINVAL;
-		goto end;
-	}
-
-	_sde_encoder_irq_control(drm_enc, false);
-	_sde_encoder_modeset_helper_locked(drm_enc, sw_event);
-
-	SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-		SDE_ENC_RC_STATE_MODESET, SDE_EVTLOG_FUNC_CASE5);
-
-	sde_enc->rc_state = SDE_ENC_RC_STATE_MODESET;
-
-end:
-	mutex_unlock(&sde_enc->rc_lock);
-	return ret;
-}
-
-static int _sde_encoder_rc_post_modeset(struct drm_encoder *drm_enc,
-	u32 sw_event, struct sde_encoder_virt *sde_enc)
-{
-	int ret = 0;
-
-	mutex_lock(&sde_enc->rc_lock);
-
-	/* return if the resource control is already in ON state */
-	if (sde_enc->rc_state != SDE_ENC_RC_STATE_MODESET) {
-		SDE_ERROR_ENC(sde_enc,
-				"sw_event:%d, rc:%d !MODESET state\n",
-				sw_event, sde_enc->rc_state);
-		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-				SDE_EVTLOG_ERROR);
-		ret = -EINVAL;
-		goto end;
-	}
-
-	_sde_encoder_modeset_helper_locked(drm_enc, sw_event);
-	_sde_encoder_irq_control(drm_enc, true);
-
-	_sde_encoder_update_rsc_client(drm_enc, true);
-
-	SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-			SDE_ENC_RC_STATE_ON, SDE_EVTLOG_FUNC_CASE6);
-
-	sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
-
-end:
-	mutex_unlock(&sde_enc->rc_lock);
-	return ret;
-}
-
-static int _sde_encoder_rc_idle(struct drm_encoder *drm_enc,
-	u32 sw_event, struct sde_encoder_virt *sde_enc, bool is_vid_mode)
-{
-	mutex_lock(&sde_enc->rc_lock);
-
-	if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
-		SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc:%d !ON state\n",
-				sw_event, sde_enc->rc_state);
-		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-				SDE_EVTLOG_ERROR);
-		goto end;
-	} else if (sde_crtc_frame_pending(sde_enc->crtc) > 1) {
-		SDE_ERROR_ENC(sde_enc, "skip idle entry");
-		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-			sde_crtc_frame_pending(sde_enc->crtc),
-			SDE_EVTLOG_ERROR);
-		goto end;
-	}
-
-	if (is_vid_mode) {
-		_sde_encoder_irq_control(drm_enc, false);
-	} else {
-		/* disable all the clks and resources */
-		_sde_encoder_update_rsc_client(drm_enc, false);
-		_sde_encoder_resource_control_helper(drm_enc, false);
-	}
-
-	SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-			SDE_ENC_RC_STATE_IDLE, SDE_EVTLOG_FUNC_CASE7);
-	sde_enc->rc_state = SDE_ENC_RC_STATE_IDLE;
-
-end:
-	mutex_unlock(&sde_enc->rc_lock);
-	return 0;
-}
-
-static int _sde_encoder_rc_early_wakeup(struct drm_encoder *drm_enc,
-	u32 sw_event, struct sde_encoder_virt *sde_enc,
-	struct msm_drm_private *priv, bool is_vid_mode)
-{
-	bool autorefresh_enabled = false;
-	struct msm_drm_thread *disp_thread;
-	int ret = 0;
-
-	if (!sde_enc->crtc ||
-		sde_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
-		SDE_DEBUG_ENC(sde_enc,
-			"invalid crtc:%d or crtc index:%d , sw_event:%u\n",
-			sde_enc->crtc == NULL,
-			sde_enc->crtc ? sde_enc->crtc->index : -EINVAL,
-			sw_event);
-		return -EINVAL;
-	}
-
-	disp_thread = &priv->disp_thread[sde_enc->crtc->index];
-
-	mutex_lock(&sde_enc->rc_lock);
-
-	if (sde_enc->rc_state == SDE_ENC_RC_STATE_ON) {
-		if (sde_enc->cur_master &&
-			sde_enc->cur_master->ops.is_autorefresh_enabled)
-			autorefresh_enabled =
-			sde_enc->cur_master->ops.is_autorefresh_enabled(
-						sde_enc->cur_master);
-		if (autorefresh_enabled) {
-			SDE_DEBUG_ENC(sde_enc,
-				"not handling early wakeup since auto refresh is enabled\n");
-			goto end;
-		}
-
-		if (!sde_crtc_frame_pending(sde_enc->crtc))
-			kthread_mod_delayed_work(&disp_thread->worker,
-					&sde_enc->delayed_off_work,
-					msecs_to_jiffies(
-					IDLE_POWERCOLLAPSE_DURATION));
-	} else if (sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) {
-		/* enable all the clks and resources */
-		ret = _sde_encoder_resource_control_helper(drm_enc,
-				true);
-		if (ret) {
-			SDE_ERROR_ENC(sde_enc,
-					"sw_event:%d, rc in state %d\n",
-					sw_event, sde_enc->rc_state);
-			SDE_EVT32(DRMID(drm_enc), sw_event,
-					sde_enc->rc_state,
-					SDE_EVTLOG_ERROR);
-			goto end;
-		}
-
-		_sde_encoder_update_rsc_client(drm_enc, true);
-
-		/*
-		 * In some cases, commit comes with slight delay
-		 * (> 80 ms)after early wake up, prevent clock switch
-		 * off to avoid jank in next update. So, increase the
-		 * command mode idle timeout sufficiently to prevent
-		 * such case.
-		 */
-		kthread_mod_delayed_work(&disp_thread->worker,
-				&sde_enc->delayed_off_work,
-				msecs_to_jiffies(
-				IDLE_POWERCOLLAPSE_IN_EARLY_WAKEUP));
-
-		sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
-	}
-
-	SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-			SDE_ENC_RC_STATE_ON, SDE_EVTLOG_FUNC_CASE8);
-
-end:
-	mutex_unlock(&sde_enc->rc_lock);
-	return ret;
-}
-
-static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
-		u32 sw_event)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct msm_drm_private *priv;
-	int ret = 0;
-	bool is_vid_mode = false;
-
-	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
-		SDE_ERROR("invalid encoder parameters, sw_event:%u\n",
-				sw_event);
-		return -EINVAL;
-	}
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	priv = drm_enc->dev->dev_private;
-	is_vid_mode = sde_enc->disp_info.capabilities &
-						MSM_DISPLAY_CAP_VID_MODE;
-
-	/*
-	 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
-	 * events and return early for other events (ie wb display).
-	 */
-	if (!sde_enc->idle_pc_enabled &&
-			(sw_event != SDE_ENC_RC_EVENT_KICKOFF &&
-			sw_event != SDE_ENC_RC_EVENT_PRE_MODESET &&
-			sw_event != SDE_ENC_RC_EVENT_POST_MODESET &&
-			sw_event != SDE_ENC_RC_EVENT_STOP &&
-			sw_event != SDE_ENC_RC_EVENT_PRE_STOP))
-		return 0;
-
-	SDE_DEBUG_ENC(sde_enc, "sw_event:%d, idle_pc:%d\n",
-			sw_event, sde_enc->idle_pc_enabled);
-	SDE_EVT32_VERBOSE(DRMID(drm_enc), sw_event, sde_enc->idle_pc_enabled,
-			sde_enc->rc_state, SDE_EVTLOG_FUNC_ENTRY);
-
-	switch (sw_event) {
-	case SDE_ENC_RC_EVENT_KICKOFF:
-		ret = _sde_encoder_rc_kickoff(drm_enc, sw_event, sde_enc,
-				is_vid_mode);
-		break;
-	case SDE_ENC_RC_EVENT_FRAME_DONE:
-		ret = _sde_encoder_rc_frame_done(drm_enc, sw_event, sde_enc,
-				priv);
-		break;
-	case SDE_ENC_RC_EVENT_PRE_STOP:
-		ret = _sde_encoder_rc_pre_stop(drm_enc, sw_event, sde_enc,
-				is_vid_mode);
-		break;
-	case SDE_ENC_RC_EVENT_STOP:
-		ret = _sde_encoder_rc_stop(drm_enc, sw_event, sde_enc);
-		break;
-	case SDE_ENC_RC_EVENT_PRE_MODESET:
-		ret = _sde_encoder_rc_pre_modeset(drm_enc, sw_event, sde_enc);
-		break;
-	case SDE_ENC_RC_EVENT_POST_MODESET:
-		ret = _sde_encoder_rc_post_modeset(drm_enc, sw_event, sde_enc);
-		break;
-	case SDE_ENC_RC_EVENT_ENTER_IDLE:
-		ret = _sde_encoder_rc_idle(drm_enc, sw_event, sde_enc,
-				is_vid_mode);
-		break;
-	case SDE_ENC_RC_EVENT_EARLY_WAKEUP:
-		ret = _sde_encoder_rc_early_wakeup(drm_enc, sw_event, sde_enc,
-				priv, is_vid_mode);
-		break;
-	default:
-		SDE_EVT32(DRMID(drm_enc), sw_event, SDE_EVTLOG_ERROR);
-		SDE_ERROR("unexpected sw_event: %d\n", sw_event);
-		break;
-	}
-
-	SDE_EVT32_VERBOSE(DRMID(drm_enc), sw_event, sde_enc->idle_pc_enabled,
-			sde_enc->rc_state, SDE_EVTLOG_FUNC_EXIT);
-	return ret;
-}
-
-static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
-				      struct drm_display_mode *mode,
-				      struct drm_display_mode *adj_mode)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	struct list_head *connector_list;
-	struct drm_connector *conn = NULL, *conn_iter;
-	struct sde_connector_state *sde_conn_state = NULL;
-	struct sde_connector *sde_conn = NULL;
-	struct sde_rm_hw_iter dsc_iter, pp_iter;
-	struct sde_rm_hw_request request_hw;
-	int i = 0, ret;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
-		SDE_ERROR("power resource is not enabled\n");
-		return;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	SDE_DEBUG_ENC(sde_enc, "\n");
-
-	priv = drm_enc->dev->dev_private;
-	sde_kms = to_sde_kms(priv->kms);
-	connector_list = &sde_kms->dev->mode_config.connector_list;
-
-	SDE_EVT32(DRMID(drm_enc));
-
-	/*
-	 * cache the crtc in sde_enc on enable for duration of use case
-	 * for correctly servicing asynchronous irq events and timers
-	 */
-	if (!drm_enc->crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-	sde_enc->crtc = drm_enc->crtc;
-
-	list_for_each_entry(conn_iter, connector_list, head)
-		if (conn_iter->encoder == drm_enc)
-			conn = conn_iter;
-
-	if (!conn) {
-		SDE_ERROR_ENC(sde_enc, "failed to find attached connector\n");
-		return;
-	} else if (!conn->state) {
-		SDE_ERROR_ENC(sde_enc, "invalid connector state\n");
-		return;
-	}
-
-	sde_conn = to_sde_connector(conn);
-	sde_conn_state = to_sde_connector_state(conn->state);
-	if (sde_conn && sde_conn_state) {
-		ret = sde_conn->ops.get_mode_info(&sde_conn->base, adj_mode,
-				&sde_conn_state->mode_info,
-				sde_kms->catalog->max_mixer_width,
-				sde_conn->display);
-		if (ret) {
-			SDE_ERROR_ENC(sde_enc,
-				"failed to get mode info from the display\n");
-			return;
-		}
-	}
-
-	/* release resources before seamless mode change */
-	if (msm_is_mode_seamless_dms(adj_mode)) {
-		/* restore resource state before releasing them */
-		ret = sde_encoder_resource_control(drm_enc,
-				SDE_ENC_RC_EVENT_PRE_MODESET);
-		if (ret) {
-			SDE_ERROR_ENC(sde_enc,
-					"sde resource control failed: %d\n",
-					ret);
-			return;
-		}
-
-		/*
-		 * Disable dsc before switch the mode and after pre_modeset,
-		 * to guarantee that previous kickoff finished.
-		 */
-		_sde_encoder_dsc_disable(sde_enc);
-	}
-
-	/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
-	ret = sde_rm_reserve(&sde_kms->rm, drm_enc, drm_enc->crtc->state,
-			conn->state, false);
-	if (ret) {
-		SDE_ERROR_ENC(sde_enc,
-				"failed to reserve hw resources, %d\n", ret);
-		return;
-	}
-
-	sde_rm_init_hw_iter(&pp_iter, drm_enc->base.id, SDE_HW_BLK_PINGPONG);
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-		sde_enc->hw_pp[i] = NULL;
-		if (!sde_rm_get_hw(&sde_kms->rm, &pp_iter))
-			break;
-		sde_enc->hw_pp[i] = (struct sde_hw_pingpong *) pp_iter.hw;
-	}
-
-	sde_rm_init_hw_iter(&dsc_iter, drm_enc->base.id, SDE_HW_BLK_DSC);
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-		sde_enc->hw_dsc[i] = NULL;
-		if (!sde_rm_get_hw(&sde_kms->rm, &dsc_iter))
-			break;
-		sde_enc->hw_dsc[i] = (struct sde_hw_dsc *) dsc_iter.hw;
-	}
-
-	/* Get PP for DSC configuration */
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-		sde_enc->hw_dsc_pp[i] = NULL;
-		if (!sde_enc->hw_dsc[i])
-			continue;
-
-		request_hw.id = sde_enc->hw_dsc[i]->base.id;
-		request_hw.type = SDE_HW_BLK_PINGPONG;
-		if (!sde_rm_request_hw_blk(&sde_kms->rm, &request_hw))
-			break;
-		sde_enc->hw_dsc_pp[i] =
-			(struct sde_hw_pingpong *) request_hw.hw;
-	}
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (phys) {
-			if (!sde_enc->hw_pp[i]) {
-				SDE_ERROR_ENC(sde_enc,
-				    "invalid pingpong block for the encoder\n");
-				return;
-			}
-			phys->hw_pp = sde_enc->hw_pp[i];
-			phys->connector = conn->state->connector;
-			if (phys->ops.mode_set)
-				phys->ops.mode_set(phys, mode, adj_mode);
-		}
-	}
-
-	/* update resources after seamless mode change */
-	if (msm_is_mode_seamless_dms(adj_mode))
-		sde_encoder_resource_control(&sde_enc->base,
-						SDE_ENC_RC_EVENT_POST_MODESET);
-}
-
-void sde_encoder_control_te(struct drm_encoder *drm_enc, bool enable)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct sde_encoder_phys *phys;
-	int i;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid parameters\n");
-		return;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	if (!sde_enc) {
-		SDE_ERROR("invalid sde encoder\n");
-		return;
-	}
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		phys = sde_enc->phys_encs[i];
-		if (phys && phys->ops.control_te)
-			phys->ops.control_te(phys, enable);
-	}
-}
-
-static int _sde_encoder_input_connect(struct input_handler *handler,
-	struct input_dev *dev, const struct input_device_id *id)
-{
-	struct input_handle *handle;
-	int rc = 0;
-
-	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
-	if (!handle)
-		return -ENOMEM;
-
-	handle->dev = dev;
-	handle->handler = handler;
-	handle->name = handler->name;
-
-	rc = input_register_handle(handle);
-	if (rc) {
-		pr_err("failed to register input handle\n");
-		goto error;
-	}
-
-	rc = input_open_device(handle);
-	if (rc) {
-		pr_err("failed to open input device\n");
-		goto error_unregister;
-	}
-
-	return 0;
-
-error_unregister:
-	input_unregister_handle(handle);
-
-error:
-	kfree(handle);
-
-	return rc;
-}
-
-static void _sde_encoder_input_disconnect(struct input_handle *handle)
-{
-	 input_close_device(handle);
-	 input_unregister_handle(handle);
-	 kfree(handle);
-}
-
-/**
- * Structure for specifying event parameters on which to receive callbacks.
- * This structure will trigger a callback in case of a touch event (specified by
- * EV_ABS) where there is a change in X and Y coordinates,
- */
-static const struct input_device_id sde_input_ids[] = {
-	{
-		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
-		.evbit = { BIT_MASK(EV_ABS) },
-		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
-					BIT_MASK(ABS_MT_POSITION_X) |
-					BIT_MASK(ABS_MT_POSITION_Y) },
-	},
-	{ },
-};
-
-static int _sde_encoder_input_handler_register(
-		struct input_handler *input_handler)
-{
-	int rc = 0;
-
-	rc = input_register_handler(input_handler);
-	if (rc) {
-		pr_err("input_register_handler failed, rc= %d\n", rc);
-		kfree(input_handler);
-		return rc;
-	}
-
-	return rc;
-}
-
-static int _sde_encoder_input_handler(
-		struct sde_encoder_virt *sde_enc)
-{
-	struct input_handler *input_handler = NULL;
-	int rc = 0;
-
-	if (sde_enc->input_handler) {
-		SDE_ERROR_ENC(sde_enc,
-				"input_handle is active. unexpected\n");
-		return -EINVAL;
-	}
-
-	input_handler = kzalloc(sizeof(*sde_enc->input_handler), GFP_KERNEL);
-	if (!input_handler)
-		return -ENOMEM;
-
-	input_handler->event = sde_encoder_input_event_handler;
-	input_handler->connect = _sde_encoder_input_connect;
-	input_handler->disconnect = _sde_encoder_input_disconnect;
-	input_handler->name = "sde";
-	input_handler->id_table = sde_input_ids;
-	input_handler->private = sde_enc;
-
-	sde_enc->input_handler = input_handler;
-
-	return rc;
-}
-
-static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc = NULL;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-
-	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
-		SDE_ERROR("invalid parameters\n");
-		return;
-	}
-
-	priv = drm_enc->dev->dev_private;
-	sde_kms = to_sde_kms(priv->kms);
-	if (!sde_kms) {
-		SDE_ERROR("invalid sde_kms\n");
-		return;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	if (!sde_enc || !sde_enc->cur_master) {
-		SDE_ERROR("invalid sde encoder/master\n");
-		return;
-	}
-
-	if (sde_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
-	    sde_enc->cur_master->hw_mdptop &&
-	    sde_enc->cur_master->hw_mdptop->ops.intf_audio_select)
-		sde_enc->cur_master->hw_mdptop->ops.intf_audio_select(
-					sde_enc->cur_master->hw_mdptop);
-
-	if (sde_enc->cur_master->hw_mdptop &&
-			sde_enc->cur_master->hw_mdptop->ops.reset_ubwc)
-		sde_enc->cur_master->hw_mdptop->ops.reset_ubwc(
-				sde_enc->cur_master->hw_mdptop,
-				sde_kms->catalog);
-
-	if (sde_enc->cur_master->hw_ctl &&
-			sde_enc->cur_master->hw_ctl->ops.setup_intf_cfg_v1 &&
-			!sde_enc->cur_master->cont_splash_enabled)
-		sde_enc->cur_master->hw_ctl->ops.setup_intf_cfg_v1(
-				sde_enc->cur_master->hw_ctl,
-				&sde_enc->cur_master->intf_cfg_v1);
-
-	_sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info, false);
-	sde_encoder_control_te(drm_enc, true);
-
-	memset(&sde_enc->prv_conn_roi, 0, sizeof(sde_enc->prv_conn_roi));
-	memset(&sde_enc->cur_conn_roi, 0, sizeof(sde_enc->cur_conn_roi));
-}
-
-void sde_encoder_virt_restore(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc = NULL;
-	int i;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	memset(&sde_enc->cur_master->intf_cfg_v1, 0,
-			sizeof(sde_enc->cur_master->intf_cfg_v1));
-	sde_enc->idle_pc_restore = true;
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (!phys)
-			continue;
-
-		if (phys->hw_ctl && phys->hw_ctl->ops.clear_pending_flush)
-			phys->hw_ctl->ops.clear_pending_flush(phys->hw_ctl);
-
-		if ((phys != sde_enc->cur_master) && phys->ops.restore)
-			phys->ops.restore(phys);
-	}
-
-	if (sde_enc->cur_master && sde_enc->cur_master->ops.restore)
-		sde_enc->cur_master->ops.restore(sde_enc->cur_master);
-
-	_sde_encoder_virt_enable_helper(drm_enc);
-}
-
-static void sde_encoder_off_work(struct kthread_work *work)
-{
-	struct sde_encoder_virt *sde_enc = container_of(work,
-			struct sde_encoder_virt, delayed_off_work.work);
-	struct drm_encoder *drm_enc;
-
-	if (!sde_enc) {
-		SDE_ERROR("invalid sde encoder\n");
-		return;
-	}
-	drm_enc = &sde_enc->base;
-
-	SDE_ATRACE_BEGIN("sde_encoder_off_work");
-	sde_encoder_idle_request(drm_enc);
-	SDE_ATRACE_END("sde_encoder_off_work");
-}
-
-static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc = NULL;
-	int i, ret = 0;
-	struct msm_compression_info *comp_info = NULL;
-	struct drm_display_mode *cur_mode = NULL;
-	struct msm_display_info *disp_info;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	disp_info = &sde_enc->disp_info;
-
-	if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
-		SDE_ERROR("power resource is not enabled\n");
-		return;
-	}
-
-	if (drm_enc->crtc && !sde_enc->crtc)
-		sde_enc->crtc = drm_enc->crtc;
-
-	comp_info = &sde_enc->mode_info.comp_info;
-	cur_mode = &sde_enc->base.crtc->state->adjusted_mode;
-
-	SDE_DEBUG_ENC(sde_enc, "\n");
-	SDE_EVT32(DRMID(drm_enc), cur_mode->hdisplay, cur_mode->vdisplay);
-
-	sde_enc->cur_master = NULL;
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
-			SDE_DEBUG_ENC(sde_enc, "master is now idx %d\n", i);
-			sde_enc->cur_master = phys;
-			break;
-		}
-	}
-
-	if (!sde_enc->cur_master) {
-		SDE_ERROR("virt encoder has no master! num_phys %d\n", i);
-		return;
-	}
-
-	/* register input handler if not already registered */
-	if (sde_enc->input_handler && !msm_is_mode_seamless_dms(cur_mode)) {
-		ret = _sde_encoder_input_handler_register(
-				sde_enc->input_handler);
-		if (ret)
-			SDE_ERROR(
-			"input handler registration failed, rc = %d\n", ret);
-	}
-
-	if (!(msm_is_mode_seamless_vrr(cur_mode)
-			|| msm_is_mode_seamless_dms(cur_mode)))
-		kthread_init_delayed_work(&sde_enc->delayed_off_work,
-			sde_encoder_off_work);
-
-	ret = sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
-	if (ret) {
-		SDE_ERROR_ENC(sde_enc, "sde resource control failed: %d\n",
-				ret);
-		return;
-	}
-
-	memset(&sde_enc->cur_master->intf_cfg_v1, 0,
-			sizeof(sde_enc->cur_master->intf_cfg_v1));
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (!phys)
-			continue;
-
-		phys->comp_type = comp_info->comp_type;
-		phys->comp_ratio = comp_info->comp_ratio;
-		phys->wide_bus_en = sde_enc->mode_info.wide_bus_en;
-		phys->frame_trigger_mode = sde_enc->frame_trigger_mode;
-		if (phys->comp_type == MSM_DISPLAY_COMPRESSION_DSC) {
-			phys->dsc_extra_pclk_cycle_cnt =
-				comp_info->dsc_info.pclk_per_line;
-			phys->dsc_extra_disp_width =
-				comp_info->dsc_info.extra_width;
-		}
-		if (phys != sde_enc->cur_master) {
-			/**
-			 * on DMS request, the encoder will be enabled
-			 * already. Invoke restore to reconfigure the
-			 * new mode.
-			 */
-			if (msm_is_mode_seamless_dms(cur_mode) &&
-					phys->ops.restore)
-				phys->ops.restore(phys);
-			else if (phys->ops.enable)
-				phys->ops.enable(phys);
-		}
-
-		if (sde_enc->misr_enable && (sde_enc->disp_info.capabilities &
-		     MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr)
-			phys->ops.setup_misr(phys, true,
-						sde_enc->misr_frame_count);
-	}
-
-	if (msm_is_mode_seamless_dms(cur_mode) &&
-			sde_enc->cur_master->ops.restore)
-		sde_enc->cur_master->ops.restore(sde_enc->cur_master);
-	else if (sde_enc->cur_master->ops.enable)
-		sde_enc->cur_master->ops.enable(sde_enc->cur_master);
-
-	_sde_encoder_virt_enable_helper(drm_enc);
-}
-
-static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc = NULL;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	enum sde_intf_mode intf_mode;
-	int i = 0;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	} else if (!drm_enc->dev) {
-		SDE_ERROR("invalid dev\n");
-		return;
-	} else if (!drm_enc->dev->dev_private) {
-		SDE_ERROR("invalid dev_private\n");
-		return;
-	}
-
-	if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
-		SDE_ERROR("power resource is not enabled\n");
-		return;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	SDE_DEBUG_ENC(sde_enc, "\n");
-
-	priv = drm_enc->dev->dev_private;
-	sde_kms = to_sde_kms(priv->kms);
-	intf_mode = sde_encoder_get_intf_mode(drm_enc);
-
-	SDE_EVT32(DRMID(drm_enc));
-
-	/* wait for idle */
-	sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
-
-	if (sde_enc->input_handler)
-		input_unregister_handler(sde_enc->input_handler);
-
-	/*
-	 * For primary command mode and video mode encoders, execute the
-	 * resource control pre-stop operations before the physical encoders
-	 * are disabled, to allow the rsc to transition its states properly.
-	 *
-	 * For other encoder types, rsc should not be enabled until after
-	 * they have been fully disabled, so delay the pre-stop operations
-	 * until after the physical disable calls have returned.
-	 */
-	if (sde_enc->disp_info.is_primary &&
-	    (intf_mode == INTF_MODE_CMD || intf_mode == INTF_MODE_VIDEO)) {
-		sde_encoder_resource_control(drm_enc,
-				SDE_ENC_RC_EVENT_PRE_STOP);
-		for (i = 0; i < sde_enc->num_phys_encs; i++) {
-			struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-			if (phys && phys->ops.disable)
-				phys->ops.disable(phys);
-		}
-	} else {
-		for (i = 0; i < sde_enc->num_phys_encs; i++) {
-			struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-			if (phys && phys->ops.disable)
-				phys->ops.disable(phys);
-		}
-		sde_encoder_resource_control(drm_enc,
-				SDE_ENC_RC_EVENT_PRE_STOP);
-	}
-
-	/*
-	 * disable dsc after the transfer is complete (for command mode)
-	 * and after physical encoder is disabled, to make sure timing
-	 * engine is already disabled (for video mode).
-	 */
-	_sde_encoder_dsc_disable(sde_enc);
-
-	sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_STOP);
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		if (sde_enc->phys_encs[i]) {
-			sde_enc->phys_encs[i]->cont_splash_enabled = false;
-			sde_enc->phys_encs[i]->cont_splash_single_flush = 0;
-			sde_enc->phys_encs[i]->connector = NULL;
-		}
-	}
-
-	sde_enc->cur_master = NULL;
-	/*
-	 * clear the cached crtc in sde_enc on use case finish, after all the
-	 * outstanding events and timers have been completed
-	 */
-	sde_enc->crtc = NULL;
-	memset(&sde_enc->mode_info, 0, sizeof(sde_enc->mode_info));
-
-	SDE_DEBUG_ENC(sde_enc, "encoder disabled\n");
-
-	sde_rm_release(&sde_kms->rm, drm_enc, false);
-}
-
-void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
-		struct sde_encoder_phys_wb *wb_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-
-	if (wb_enc) {
-		if (sde_encoder_helper_reset_mixers(phys_enc,
-				wb_enc->fb_disable))
-			return;
-
-		if (wb_enc->hw_wb->ops.bind_pingpong_blk) {
-			wb_enc->hw_wb->ops.bind_pingpong_blk(wb_enc->hw_wb,
-					false, phys_enc->hw_pp->idx);
-
-			if (phys_enc->hw_ctl->ops.update_bitmask_wb)
-				phys_enc->hw_ctl->ops.update_bitmask_wb(
-						phys_enc->hw_ctl,
-						wb_enc->hw_wb->idx, true);
-		}
-	} else {
-		if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
-			phys_enc->hw_intf->ops.bind_pingpong_blk(
-					phys_enc->hw_intf, false,
-					phys_enc->hw_pp->idx);
-
-			if (phys_enc->hw_ctl->ops.update_bitmask_intf)
-				phys_enc->hw_ctl->ops.update_bitmask_intf(
-						phys_enc->hw_ctl,
-						phys_enc->hw_intf->idx, true);
-		}
-	}
-
-	if (phys_enc->hw_pp && phys_enc->hw_pp->ops.reset_3d_mode) {
-		phys_enc->hw_pp->ops.reset_3d_mode(phys_enc->hw_pp);
-
-		if (phys_enc->hw_ctl->ops.update_bitmask_merge3d &&
-				phys_enc->hw_pp->merge_3d)
-			phys_enc->hw_ctl->ops.update_bitmask_merge3d(
-					phys_enc->hw_ctl,
-					phys_enc->hw_pp->merge_3d->idx, true);
-	}
-
-	if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.bind_pingpong_blk &&
-			phys_enc->hw_pp) {
-		phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm,
-				false, phys_enc->hw_pp->idx);
-
-		if (phys_enc->hw_ctl->ops.update_bitmask_cdm)
-			phys_enc->hw_ctl->ops.update_bitmask_cdm(
-					phys_enc->hw_ctl,
-					phys_enc->hw_cdm->idx, true);
-	}
-
-	sde_enc = to_sde_encoder_virt(phys_enc->parent);
-
-	if (phys_enc == sde_enc->cur_master && phys_enc->hw_pp &&
-			phys_enc->hw_ctl->ops.reset_post_disable)
-		phys_enc->hw_ctl->ops.reset_post_disable(
-				phys_enc->hw_ctl, &phys_enc->intf_cfg_v1,
-				phys_enc->hw_pp->merge_3d ?
-				phys_enc->hw_pp->merge_3d->idx : 0);
-
-	phys_enc->hw_ctl->ops.trigger_flush(phys_enc->hw_ctl);
-	phys_enc->hw_ctl->ops.trigger_start(phys_enc->hw_ctl);
-}
-
-static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog,
-		enum sde_intf_type type, u32 controller_id)
-{
-	int i = 0;
-
-	for (i = 0; i < catalog->intf_count; i++) {
-		if (catalog->intf[i].type == type
-		    && catalog->intf[i].controller_id == controller_id) {
-			return catalog->intf[i].id;
-		}
-	}
-
-	return INTF_MAX;
-}
-
-static enum sde_wb sde_encoder_get_wb(struct sde_mdss_cfg *catalog,
-		enum sde_intf_type type, u32 controller_id)
-{
-	if (controller_id < catalog->wb_count)
-		return catalog->wb[controller_id].id;
-
-	return WB_MAX;
-}
-
-void sde_encoder_perf_uidle_status(struct sde_kms *sde_kms,
-	struct drm_crtc *crtc)
-{
-	struct sde_hw_uidle *uidle;
-	struct sde_uidle_cntr cntr;
-	struct sde_uidle_status status;
-
-	if (!sde_kms || !crtc || !sde_kms->hw_uidle) {
-		pr_err("invalid params %d %d\n",
-			!sde_kms, !crtc);
-		return;
-	}
-
-	/* check if perf counters are enabled and setup */
-	if (!sde_kms->catalog->uidle_cfg.perf_cntr_en)
-		return;
-
-	uidle = sde_kms->hw_uidle;
-	if ((sde_kms->catalog->uidle_cfg.debugfs_perf & SDE_PERF_UIDLE_STATUS)
-			&& uidle->ops.uidle_get_status) {
-
-		uidle->ops.uidle_get_status(uidle, &status);
-		trace_sde_perf_uidle_status(
-			crtc->base.id,
-			status.uidle_danger_status_0,
-			status.uidle_danger_status_1,
-			status.uidle_safe_status_0,
-			status.uidle_safe_status_1,
-			status.uidle_idle_status_0,
-			status.uidle_idle_status_1,
-			status.uidle_fal_status_0,
-			status.uidle_fal_status_1);
-	}
-
-	if ((sde_kms->catalog->uidle_cfg.debugfs_perf & SDE_PERF_UIDLE_CNT)
-			&& uidle->ops.uidle_get_cntr) {
-
-		uidle->ops.uidle_get_cntr(uidle, &cntr);
-		trace_sde_perf_uidle_cntr(
-			crtc->base.id,
-			cntr.fal1_gate_cntr,
-			cntr.fal10_gate_cntr,
-			cntr.fal_wait_gate_cntr,
-			cntr.fal1_num_transitions_cntr,
-			cntr.fal10_num_transitions_cntr,
-			cntr.min_gate_cntr,
-			cntr.max_gate_cntr);
-	}
-}
-
-static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc,
-		struct sde_encoder_phys *phy_enc)
-{
-	struct sde_encoder_virt *sde_enc = NULL;
-	unsigned long lock_flags;
-
-	if (!drm_enc || !phy_enc)
-		return;
-
-	SDE_ATRACE_BEGIN("encoder_vblank_callback");
-	sde_enc = to_sde_encoder_virt(drm_enc);
-
-	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
-	if (sde_enc->crtc_vblank_cb)
-		sde_enc->crtc_vblank_cb(sde_enc->crtc_vblank_cb_data);
-	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
-
-	if (phy_enc->sde_kms &&
-			phy_enc->sde_kms->catalog->uidle_cfg.debugfs_perf)
-		sde_encoder_perf_uidle_status(phy_enc->sde_kms, sde_enc->crtc);
-
-	atomic_inc(&phy_enc->vsync_cnt);
-	SDE_ATRACE_END("encoder_vblank_callback");
-}
-
-static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc,
-		struct sde_encoder_phys *phy_enc)
-{
-	if (!phy_enc)
-		return;
-
-	SDE_ATRACE_BEGIN("encoder_underrun_callback");
-	atomic_inc(&phy_enc->underrun_cnt);
-	SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt));
-
-	trace_sde_encoder_underrun(DRMID(drm_enc),
-		atomic_read(&phy_enc->underrun_cnt));
-
-	SDE_DBG_CTRL("stop_ftrace");
-	SDE_DBG_CTRL("panic_underrun");
-
-	SDE_ATRACE_END("encoder_underrun_callback");
-}
-
-void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
-		void (*vbl_cb)(void *), void *vbl_data)
-{
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-	unsigned long lock_flags;
-	bool enable;
-	int i;
-
-	enable = vbl_cb ? true : false;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	SDE_DEBUG_ENC(sde_enc, "\n");
-	SDE_EVT32(DRMID(drm_enc), enable);
-
-	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
-	sde_enc->crtc_vblank_cb = vbl_cb;
-	sde_enc->crtc_vblank_cb_data = vbl_data;
-	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (phys && phys->ops.control_vblank_irq)
-			phys->ops.control_vblank_irq(phys, enable);
-	}
-	sde_enc->vblank_enabled = enable;
-}
-
-void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
-			void (*frame_event_cb)(void *, u32 event),
-			struct drm_crtc *crtc)
-{
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-	unsigned long lock_flags;
-	bool enable;
-
-	enable = frame_event_cb ? true : false;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	SDE_DEBUG_ENC(sde_enc, "\n");
-	SDE_EVT32(DRMID(drm_enc), enable, 0);
-
-	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
-	sde_enc->crtc_frame_event_cb = frame_event_cb;
-	sde_enc->crtc_frame_event_cb_data.crtc = crtc;
-	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
-}
-
-static void sde_encoder_frame_done_callback(
-		struct drm_encoder *drm_enc,
-		struct sde_encoder_phys *ready_phys, u32 event)
-{
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-	unsigned int i;
-	bool trigger = true, is_cmd_mode;
-	enum sde_rm_topology_name topology = SDE_RM_TOPOLOGY_NONE;
-
-	if (!drm_enc || !sde_enc->cur_master) {
-		SDE_ERROR("invalid param: drm_enc %pK, cur_master %pK\n",
-				drm_enc, drm_enc ? sde_enc->cur_master : 0);
-		return;
-	}
-
-	sde_enc->crtc_frame_event_cb_data.connector =
-				sde_enc->cur_master->connector;
-	is_cmd_mode = sde_enc->disp_info.capabilities &
-					MSM_DISPLAY_CAP_CMD_MODE;
-
-	if (event & (SDE_ENCODER_FRAME_EVENT_DONE
-			| SDE_ENCODER_FRAME_EVENT_ERROR
-			| SDE_ENCODER_FRAME_EVENT_PANEL_DEAD) && is_cmd_mode) {
-
-		if (ready_phys->connector)
-			topology = sde_connector_get_topology_name(
-							ready_phys->connector);
-
-		/* One of the physical encoders has become idle */
-		for (i = 0; i < sde_enc->num_phys_encs; i++) {
-			if ((sde_enc->phys_encs[i] == ready_phys) ||
-			     (event & SDE_ENCODER_FRAME_EVENT_ERROR)) {
-				SDE_EVT32_VERBOSE(DRMID(drm_enc), i,
-				     atomic_read(&sde_enc->frame_done_cnt[i]));
-				if (!atomic_add_unless(
-					&sde_enc->frame_done_cnt[i], 1, 1)) {
-					SDE_EVT32(DRMID(drm_enc), event,
-						ready_phys->intf_idx,
-						SDE_EVTLOG_ERROR);
-					SDE_ERROR_ENC(sde_enc,
-						"intf idx:%d, event:%d\n",
-						ready_phys->intf_idx, event);
-					return;
-				}
-			}
-
-			if (topology != SDE_RM_TOPOLOGY_PPSPLIT &&
-			    atomic_read(&sde_enc->frame_done_cnt[i]) != 1)
-				trigger = false;
-		}
-
-		if (trigger) {
-			sde_encoder_resource_control(drm_enc,
-					SDE_ENC_RC_EVENT_FRAME_DONE);
-
-			if (sde_enc->crtc_frame_event_cb)
-				sde_enc->crtc_frame_event_cb(
-					&sde_enc->crtc_frame_event_cb_data,
-					event);
-			for (i = 0; i < sde_enc->num_phys_encs; i++)
-				atomic_set(&sde_enc->frame_done_cnt[i], 0);
-		}
-	} else if (sde_enc->crtc_frame_event_cb) {
-		if (!is_cmd_mode)
-			sde_encoder_resource_control(drm_enc,
-					SDE_ENC_RC_EVENT_FRAME_DONE);
-
-		sde_enc->crtc_frame_event_cb(
-				&sde_enc->crtc_frame_event_cb_data, event);
-	}
-}
-
-static void sde_encoder_get_qsync_fps_callback(
-	struct drm_encoder *drm_enc,
-	u32 *qsync_fps)
-{
-	struct msm_display_info *disp_info;
-	struct sde_encoder_virt *sde_enc;
-
-	if (!qsync_fps)
-		return;
-
-	*qsync_fps = 0;
-	if (!drm_enc) {
-		SDE_ERROR("invalid drm encoder\n");
-		return;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	disp_info = &sde_enc->disp_info;
-	*qsync_fps = disp_info->qsync_min_fps;
-}
-
-int sde_encoder_idle_request(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid drm encoder\n");
-		return -EINVAL;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	sde_encoder_resource_control(&sde_enc->base,
-						SDE_ENC_RC_EVENT_ENTER_IDLE);
-
-	return 0;
-}
-
-/**
- * _sde_encoder_trigger_flush - trigger flush for a physical encoder
- * drm_enc: Pointer to drm encoder structure
- * phys: Pointer to physical encoder structure
- * extra_flush: Additional bit mask to include in flush trigger
- */
-static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc,
-		struct sde_encoder_phys *phys,
-		struct sde_ctl_flush_cfg *extra_flush)
-{
-	struct sde_hw_ctl *ctl;
-	unsigned long lock_flags;
-	struct sde_encoder_virt *sde_enc;
-	int pend_ret_fence_cnt;
-	struct sde_connector *c_conn;
-
-	if (!drm_enc || !phys) {
-		SDE_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
-				!drm_enc, !phys);
-		return;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	c_conn = to_sde_connector(phys->connector);
-
-	if (!phys->hw_pp) {
-		SDE_ERROR("invalid pingpong hw\n");
-		return;
-	}
-
-	ctl = phys->hw_ctl;
-	if (!ctl || !phys->ops.trigger_flush) {
-		SDE_ERROR("missing ctl/trigger cb\n");
-		return;
-	}
-
-	if (phys->split_role == ENC_ROLE_SKIP) {
-		SDE_DEBUG_ENC(to_sde_encoder_virt(phys->parent),
-				"skip flush pp%d ctl%d\n",
-				phys->hw_pp->idx - PINGPONG_0,
-				ctl->idx - CTL_0);
-		return;
-	}
-
-	/* update pending counts and trigger kickoff ctl flush atomically */
-	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
-
-	if (phys->ops.is_master && phys->ops.is_master(phys))
-		atomic_inc(&phys->pending_retire_fence_cnt);
-
-	pend_ret_fence_cnt = atomic_read(&phys->pending_retire_fence_cnt);
-
-	if (phys->hw_intf && phys->hw_intf->cap->type == INTF_DP &&
-			ctl->ops.update_bitmask_periph) {
-		/* perform peripheral flush on every frame update for dp dsc */
-		if (phys->comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
-				phys->comp_ratio && c_conn->ops.update_pps) {
-			c_conn->ops.update_pps(phys->connector, NULL,
-					c_conn->display);
-			ctl->ops.update_bitmask_periph(ctl,
-					phys->hw_intf->idx, 1);
-		}
-
-		if (sde_enc->dynamic_hdr_updated)
-			ctl->ops.update_bitmask_periph(ctl,
-					phys->hw_intf->idx, 1);
-	}
-
-	if ((extra_flush && extra_flush->pending_flush_mask)
-			&& ctl->ops.update_pending_flush)
-		ctl->ops.update_pending_flush(ctl, extra_flush);
-
-	phys->ops.trigger_flush(phys);
-
-	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
-
-	if (ctl->ops.get_pending_flush) {
-		struct sde_ctl_flush_cfg pending_flush = {0,};
-
-		ctl->ops.get_pending_flush(ctl, &pending_flush);
-		SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0,
-				ctl->idx - CTL_0,
-				pending_flush.pending_flush_mask,
-				pend_ret_fence_cnt);
-	} else {
-		SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0,
-				ctl->idx - CTL_0,
-				pend_ret_fence_cnt);
-	}
-}
-
-/**
- * _sde_encoder_trigger_start - trigger start for a physical encoder
- * phys: Pointer to physical encoder structure
- */
-static inline void _sde_encoder_trigger_start(struct sde_encoder_phys *phys)
-{
-	struct sde_hw_ctl *ctl;
-	struct sde_encoder_virt *sde_enc;
-
-	if (!phys) {
-		SDE_ERROR("invalid argument(s)\n");
-		return;
-	}
-
-	if (!phys->hw_pp) {
-		SDE_ERROR("invalid pingpong hw\n");
-		return;
-	}
-
-	if (!phys->parent) {
-		SDE_ERROR("invalid parent\n");
-		return;
-	}
-	/* avoid ctrl start for encoder in clone mode */
-	if (phys->in_clone_mode)
-		return;
-
-	ctl = phys->hw_ctl;
-	sde_enc = to_sde_encoder_virt(phys->parent);
-
-	if (phys->split_role == ENC_ROLE_SKIP) {
-		SDE_DEBUG_ENC(sde_enc,
-				"skip start pp%d ctl%d\n",
-				phys->hw_pp->idx - PINGPONG_0,
-				ctl->idx - CTL_0);
-		return;
-	}
-
-	if (phys->ops.trigger_start && phys->enable_state != SDE_ENC_DISABLED)
-		phys->ops.trigger_start(phys);
-}
-
-void sde_encoder_helper_trigger_flush(struct sde_encoder_phys *phys_enc)
-{
-	struct sde_hw_ctl *ctl;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	ctl = phys_enc->hw_ctl;
-	if (ctl && ctl->ops.trigger_flush)
-		ctl->ops.trigger_flush(ctl);
-}
-
-void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc)
-{
-	struct sde_hw_ctl *ctl;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	ctl = phys_enc->hw_ctl;
-	if (ctl && ctl->ops.trigger_start) {
-		ctl->ops.trigger_start(ctl);
-		SDE_EVT32(DRMID(phys_enc->parent), ctl->idx - CTL_0);
-	}
-}
-
-void sde_encoder_helper_hw_reset(struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct sde_connector *sde_con;
-	void *sde_con_disp;
-	struct sde_hw_ctl *ctl;
-	int rc;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	sde_enc = to_sde_encoder_virt(phys_enc->parent);
-	ctl = phys_enc->hw_ctl;
-
-	if (!ctl || !ctl->ops.reset)
-		return;
-
-	SDE_DEBUG_ENC(sde_enc, "ctl %d reset\n",  ctl->idx);
-	SDE_EVT32(DRMID(phys_enc->parent), ctl->idx);
-
-	if (phys_enc->ops.is_master && phys_enc->ops.is_master(phys_enc) &&
-			phys_enc->connector) {
-		sde_con = to_sde_connector(phys_enc->connector);
-		sde_con_disp = sde_connector_get_display(phys_enc->connector);
-
-		if (sde_con->ops.soft_reset) {
-			rc = sde_con->ops.soft_reset(sde_con_disp);
-			if (rc) {
-				SDE_ERROR_ENC(sde_enc,
-						"connector soft reset failure\n");
-				SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus",
-								"panic");
-			}
-		}
-	}
-
-	phys_enc->enable_state = SDE_ENC_ENABLED;
-}
-
-/**
- * _sde_encoder_kickoff_phys - handle physical encoder kickoff
- *	Iterate through the physical encoders and perform consolidated flush
- *	and/or control start triggering as needed. This is done in the virtual
- *	encoder rather than the individual physical ones in order to handle
- *	use cases that require visibility into multiple physical encoders at
- *	a time.
- * sde_enc: Pointer to virtual encoder structure
- */
-static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc)
-{
-	struct sde_hw_ctl *ctl;
-	uint32_t i;
-	struct sde_ctl_flush_cfg pending_flush = {0,};
-	u32 pending_kickoff_cnt;
-	struct msm_drm_private *priv = NULL;
-	struct sde_kms *sde_kms = NULL;
-	bool is_vid_mode = false;
-	struct sde_crtc_misr_info crtc_misr_info = {false, 0};
-
-	if (!sde_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	is_vid_mode = sde_enc->disp_info.capabilities &
-					MSM_DISPLAY_CAP_VID_MODE;
-
-	/* don't perform flush/start operations for slave encoders */
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-		enum sde_rm_topology_name topology = SDE_RM_TOPOLOGY_NONE;
-
-		if (!phys || phys->enable_state == SDE_ENC_DISABLED)
-			continue;
-
-		ctl = phys->hw_ctl;
-		if (!ctl)
-			continue;
-
-		if (phys->connector)
-			topology = sde_connector_get_topology_name(
-					phys->connector);
-
-		if (!phys->ops.needs_single_flush ||
-				!phys->ops.needs_single_flush(phys)) {
-			if (ctl->ops.reg_dma_flush)
-				ctl->ops.reg_dma_flush(ctl, is_vid_mode);
-			_sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0);
-		} else if (ctl->ops.get_pending_flush) {
-			ctl->ops.get_pending_flush(ctl, &pending_flush);
-		}
-	}
-
-	/* for split flush, combine pending flush masks and send to master */
-	if (pending_flush.pending_flush_mask && sde_enc->cur_master) {
-		ctl = sde_enc->cur_master->hw_ctl;
-		if (ctl->ops.reg_dma_flush)
-			ctl->ops.reg_dma_flush(ctl, is_vid_mode);
-		_sde_encoder_trigger_flush(&sde_enc->base, sde_enc->cur_master,
-						&pending_flush);
-	}
-
-	/* update pending_kickoff_cnt AFTER flush but before trigger start */
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (!phys || phys->enable_state == SDE_ENC_DISABLED)
-			continue;
-
-		if (!phys->ops.needs_single_flush ||
-				!phys->ops.needs_single_flush(phys)) {
-			pending_kickoff_cnt =
-					sde_encoder_phys_inc_pending(phys);
-			SDE_EVT32(pending_kickoff_cnt, SDE_EVTLOG_FUNC_CASE1);
-		} else {
-			pending_kickoff_cnt =
-					sde_encoder_phys_inc_pending(phys);
-			SDE_EVT32(pending_kickoff_cnt,
-					pending_flush.pending_flush_mask,
-					SDE_EVTLOG_FUNC_CASE2);
-		}
-	}
-
-	if (sde_enc->misr_enable)
-		sde_encoder_misr_configure(&sde_enc->base, true,
-				sde_enc->misr_frame_count);
-
-	sde_crtc_get_misr_info(sde_enc->crtc, &crtc_misr_info);
-	if (crtc_misr_info.misr_enable)
-		sde_crtc_misr_setup(sde_enc->crtc, true,
-				crtc_misr_info.misr_frame_count);
-
-	_sde_encoder_trigger_start(sde_enc->cur_master);
-
-	if (sde_enc->elevated_ahb_vote) {
-		priv = sde_enc->base.dev->dev_private;
-		if (priv != NULL) {
-			sde_kms = to_sde_kms(priv->kms);
-			if (sde_kms != NULL) {
-				sde_power_scale_reg_bus(&priv->phandle,
-						sde_kms->core_client,
-						VOTE_INDEX_LOW,
-						false);
-			}
-		}
-		sde_enc->elevated_ahb_vote = false;
-	}
-
-}
-
-static void _sde_encoder_ppsplit_swap_intf_for_right_only_update(
-		struct drm_encoder *drm_enc,
-		unsigned long *affected_displays,
-		int num_active_phys)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct sde_encoder_phys *master;
-	enum sde_rm_topology_name topology;
-	bool is_right_only;
-
-	if (!drm_enc || !affected_displays)
-		return;
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	master = sde_enc->cur_master;
-	if (!master || !master->connector)
-		return;
-
-	topology = sde_connector_get_topology_name(master->connector);
-	if (topology != SDE_RM_TOPOLOGY_PPSPLIT)
-		return;
-
-	/*
-	 * For pingpong split, the slave pingpong won't generate IRQs. For
-	 * right-only updates, we can't swap pingpongs, or simply swap the
-	 * master/slave assignment, we actually have to swap the interfaces
-	 * so that the master physical encoder will use a pingpong/interface
-	 * that generates irqs on which to wait.
-	 */
-	is_right_only = !test_bit(0, affected_displays) &&
-			test_bit(1, affected_displays);
-
-	if (is_right_only && !sde_enc->intfs_swapped) {
-		/* right-only update swap interfaces */
-		swap(sde_enc->phys_encs[0]->intf_idx,
-				sde_enc->phys_encs[1]->intf_idx);
-		sde_enc->intfs_swapped = true;
-	} else if (!is_right_only && sde_enc->intfs_swapped) {
-		/* left-only or full update, swap back */
-		swap(sde_enc->phys_encs[0]->intf_idx,
-				sde_enc->phys_encs[1]->intf_idx);
-		sde_enc->intfs_swapped = false;
-	}
-
-	SDE_DEBUG_ENC(sde_enc,
-			"right_only %d swapped %d phys0->intf%d, phys1->intf%d\n",
-			is_right_only, sde_enc->intfs_swapped,
-			sde_enc->phys_encs[0]->intf_idx - INTF_0,
-			sde_enc->phys_encs[1]->intf_idx - INTF_0);
-	SDE_EVT32(DRMID(drm_enc), is_right_only, sde_enc->intfs_swapped,
-			sde_enc->phys_encs[0]->intf_idx - INTF_0,
-			sde_enc->phys_encs[1]->intf_idx - INTF_0,
-			*affected_displays);
-
-	/* ppsplit always uses master since ppslave invalid for irqs*/
-	if (num_active_phys == 1)
-		*affected_displays = BIT(0);
-}
-
-static void _sde_encoder_update_master(struct drm_encoder *drm_enc,
-		struct sde_encoder_kickoff_params *params)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct sde_encoder_phys *phys;
-	int i, num_active_phys;
-	bool master_assigned = false;
-
-	if (!drm_enc || !params)
-		return;
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-
-	if (sde_enc->num_phys_encs <= 1)
-		return;
-
-	/* count bits set */
-	num_active_phys = hweight_long(params->affected_displays);
-
-	SDE_DEBUG_ENC(sde_enc, "affected_displays 0x%lx num_active_phys %d\n",
-			params->affected_displays, num_active_phys);
-	SDE_EVT32_VERBOSE(DRMID(drm_enc), params->affected_displays,
-			num_active_phys);
-
-	/* for left/right only update, ppsplit master switches interface */
-	_sde_encoder_ppsplit_swap_intf_for_right_only_update(drm_enc,
-			&params->affected_displays, num_active_phys);
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		enum sde_enc_split_role prv_role, new_role;
-		bool active = false;
-
-		phys = sde_enc->phys_encs[i];
-		if (!phys || !phys->ops.update_split_role || !phys->hw_pp)
-			continue;
-
-		active = test_bit(i, &params->affected_displays);
-		prv_role = phys->split_role;
-
-		if (active && num_active_phys == 1)
-			new_role = ENC_ROLE_SOLO;
-		else if (active && !master_assigned)
-			new_role = ENC_ROLE_MASTER;
-		else if (active)
-			new_role = ENC_ROLE_SLAVE;
-		else
-			new_role = ENC_ROLE_SKIP;
-
-		phys->ops.update_split_role(phys, new_role);
-		if (new_role == ENC_ROLE_SOLO || new_role == ENC_ROLE_MASTER) {
-			sde_enc->cur_master = phys;
-			master_assigned = true;
-		}
-
-		SDE_DEBUG_ENC(sde_enc, "pp %d role prv %d new %d active %d\n",
-				phys->hw_pp->idx - PINGPONG_0, prv_role,
-				phys->split_role, active);
-		SDE_EVT32(DRMID(drm_enc), params->affected_displays,
-				phys->hw_pp->idx - PINGPONG_0, prv_role,
-				phys->split_role, active, num_active_phys);
-	}
-}
-
-bool sde_encoder_check_mode(struct drm_encoder *drm_enc, u32 mode)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct msm_display_info *disp_info;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return false;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	disp_info = &sde_enc->disp_info;
-
-	return (disp_info->capabilities & mode);
-}
-
-void sde_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct sde_encoder_phys *phys;
-	unsigned int i;
-	struct sde_hw_ctl *ctl;
-	struct msm_display_info *disp_info;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	disp_info = &sde_enc->disp_info;
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		phys = sde_enc->phys_encs[i];
-
-		if (phys && phys->hw_ctl) {
-			ctl = phys->hw_ctl;
-			/*
-			 * avoid clearing the pending flush during the first
-			 * frame update after idle power collpase as the
-			 * restore path would have updated the pending flush
-			 */
-			if (!sde_enc->idle_pc_restore &&
-					ctl->ops.clear_pending_flush)
-				ctl->ops.clear_pending_flush(ctl);
-
-			/* update only for command mode primary ctl */
-			if ((phys == sde_enc->cur_master) &&
-			   (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
-			    && ctl->ops.trigger_pending)
-				ctl->ops.trigger_pending(ctl);
-		}
-	}
-	sde_enc->idle_pc_restore = false;
-}
-
-static void _sde_encoder_setup_dither(struct sde_encoder_phys *phys)
-{
-	void *dither_cfg;
-	int ret = 0, i = 0;
-	size_t len = 0;
-	enum sde_rm_topology_name topology;
-	struct drm_encoder *drm_enc;
-	struct msm_display_dsc_info *dsc = NULL;
-	struct sde_encoder_virt *sde_enc;
-	struct sde_hw_pingpong *hw_pp;
-
-	if (!phys || !phys->connector || !phys->hw_pp ||
-			!phys->hw_pp->ops.setup_dither || !phys->parent)
-		return;
-
-	topology = sde_connector_get_topology_name(phys->connector);
-	if ((topology == SDE_RM_TOPOLOGY_PPSPLIT) &&
-			(phys->split_role == ENC_ROLE_SLAVE))
-		return;
-
-	drm_enc = phys->parent;
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	dsc = &sde_enc->mode_info.comp_info.dsc_info;
-	/* disable dither for 10 bpp or 10bpc dsc config */
-	if (dsc->bpp == 10 || dsc->bpc == 10) {
-		phys->hw_pp->ops.setup_dither(phys->hw_pp, NULL, 0);
-		return;
-	}
-
-	ret = sde_connector_get_dither_cfg(phys->connector,
-			phys->connector->state, &dither_cfg, &len);
-	if (ret)
-		return;
-
-	if (TOPOLOGY_DUALPIPE_MERGE_MODE(topology)) {
-		for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-			hw_pp = sde_enc->hw_pp[i];
-			if (hw_pp) {
-				phys->hw_pp->ops.setup_dither(hw_pp, dither_cfg,
-								len);
-			}
-		}
-	} else {
-		phys->hw_pp->ops.setup_dither(phys->hw_pp, dither_cfg, len);
-	}
-}
-
-static u32 _sde_encoder_calculate_linetime(struct sde_encoder_virt *sde_enc,
-		struct drm_display_mode *mode)
-{
-	u64 pclk_rate;
-	u32 pclk_period;
-	u32 line_time;
-
-	/*
-	 * For linetime calculation, only operate on master encoder.
-	 */
-	if (!sde_enc->cur_master)
-		return 0;
-
-	if (!sde_enc->cur_master->ops.get_line_count) {
-		SDE_ERROR("get_line_count function not defined\n");
-		return 0;
-	}
-
-	pclk_rate = mode->clock; /* pixel clock in kHz */
-	if (pclk_rate == 0) {
-		SDE_ERROR("pclk is 0, cannot calculate line time\n");
-		return 0;
-	}
-
-	pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
-	if (pclk_period == 0) {
-		SDE_ERROR("pclk period is 0\n");
-		return 0;
-	}
-
-	/*
-	 * Line time calculation based on Pixel clock and HTOTAL.
-	 * Final unit is in ns.
-	 */
-	line_time = (pclk_period * mode->htotal) / 1000;
-	if (line_time == 0) {
-		SDE_ERROR("line time calculation is 0\n");
-		return 0;
-	}
-
-	SDE_DEBUG_ENC(sde_enc,
-			"clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
-			pclk_rate, pclk_period, line_time);
-
-	return line_time;
-}
-
-static int _sde_encoder_wakeup_time(struct drm_encoder *drm_enc,
-		ktime_t *wakeup_time)
-{
-	struct drm_display_mode *mode;
-	struct sde_encoder_virt *sde_enc;
-	u32 cur_line;
-	u32 line_time;
-	u32 vtotal, time_to_vsync;
-	ktime_t cur_time;
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	mode = &sde_enc->cur_master->cached_mode;
-
-	line_time = _sde_encoder_calculate_linetime(sde_enc, mode);
-	if (!line_time)
-		return -EINVAL;
-
-	cur_line = sde_enc->cur_master->ops.get_line_count(sde_enc->cur_master);
-
-	vtotal = mode->vtotal;
-	if (cur_line >= vtotal)
-		time_to_vsync = line_time * vtotal;
-	else
-		time_to_vsync = line_time * (vtotal - cur_line);
-
-	if (time_to_vsync == 0) {
-		SDE_ERROR("time to vsync should not be zero, vtotal=%d\n",
-				vtotal);
-		return -EINVAL;
-	}
-
-	cur_time = ktime_get();
-	*wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
-
-	SDE_DEBUG_ENC(sde_enc,
-			"cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
-			cur_line, vtotal, time_to_vsync,
-			ktime_to_ms(cur_time),
-			ktime_to_ms(*wakeup_time));
-	return 0;
-}
-
-static void sde_encoder_vsync_event_handler(struct timer_list *t)
-{
-	struct drm_encoder *drm_enc;
-	struct sde_encoder_virt *sde_enc =
-			from_timer(sde_enc, t, vsync_event_timer);
-	struct msm_drm_private *priv;
-	struct msm_drm_thread *event_thread;
-
-	if (!sde_enc || !sde_enc->crtc) {
-		SDE_ERROR("invalid encoder parameters %d\n", !sde_enc);
-		return;
-	}
-
-	drm_enc = &sde_enc->base;
-
-	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
-		SDE_ERROR("invalid encoder parameters\n");
-		return;
-	}
-
-	priv = drm_enc->dev->dev_private;
-
-	if (sde_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
-		SDE_ERROR("invalid crtc index:%u\n",
-				sde_enc->crtc->index);
-		return;
-	}
-	event_thread = &priv->event_thread[sde_enc->crtc->index];
-	if (!event_thread) {
-		SDE_ERROR("event_thread not found for crtc:%d\n",
-				sde_enc->crtc->index);
-		return;
-	}
-
-	kthread_queue_work(&event_thread->worker,
-				&sde_enc->vsync_event_work);
-}
-
-static void sde_encoder_esd_trigger_work_handler(struct kthread_work *work)
-{
-	struct sde_encoder_virt *sde_enc = container_of(work,
-				struct sde_encoder_virt, esd_trigger_work);
-
-	if (!sde_enc) {
-		SDE_ERROR("invalid sde encoder\n");
-		return;
-	}
-
-	sde_encoder_resource_control(&sde_enc->base,
-			SDE_ENC_RC_EVENT_KICKOFF);
-}
-
-static void sde_encoder_input_event_work_handler(struct kthread_work *work)
-{
-	struct sde_encoder_virt *sde_enc = container_of(work,
-				struct sde_encoder_virt, input_event_work);
-
-	if (!sde_enc) {
-		SDE_ERROR("invalid sde encoder\n");
-		return;
-	}
-
-	sde_encoder_resource_control(&sde_enc->base,
-			SDE_ENC_RC_EVENT_EARLY_WAKEUP);
-}
-
-static void sde_encoder_vsync_event_work_handler(struct kthread_work *work)
-{
-	struct sde_encoder_virt *sde_enc = container_of(work,
-			struct sde_encoder_virt, vsync_event_work);
-	bool autorefresh_enabled = false;
-	int rc = 0;
-	ktime_t wakeup_time;
-
-	if (!sde_enc) {
-		SDE_ERROR("invalid sde encoder\n");
-		return;
-	}
-
-	rc = _sde_encoder_power_enable(sde_enc, true);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "sde enc power enabled failed:%d\n", rc);
-		return;
-	}
-
-	if (sde_enc->cur_master &&
-		sde_enc->cur_master->ops.is_autorefresh_enabled)
-		autorefresh_enabled =
-			sde_enc->cur_master->ops.is_autorefresh_enabled(
-						sde_enc->cur_master);
-
-	/* Update timer if autorefresh is enabled else return */
-	if (!autorefresh_enabled)
-		goto exit;
-
-	rc = _sde_encoder_wakeup_time(&sde_enc->base, &wakeup_time);
-	if (rc)
-		goto exit;
-
-	SDE_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
-	mod_timer(&sde_enc->vsync_event_timer,
-			nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
-
-exit:
-	_sde_encoder_power_enable(sde_enc, false);
-}
-
-int sde_encoder_poll_line_counts(struct drm_encoder *drm_enc)
-{
-	static const uint64_t timeout_us = 50000;
-	static const uint64_t sleep_us = 20;
-	struct sde_encoder_virt *sde_enc;
-	ktime_t cur_ktime, exp_ktime;
-	uint32_t line_count, tmp, i;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	if (!sde_enc->cur_master ||
-			!sde_enc->cur_master->ops.get_line_count) {
-		SDE_DEBUG_ENC(sde_enc, "can't get master line count\n");
-		SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_ERROR);
-		return -EINVAL;
-	}
-
-	exp_ktime = ktime_add_ms(ktime_get(), timeout_us / 1000);
-
-	line_count = sde_enc->cur_master->ops.get_line_count(
-			sde_enc->cur_master);
-
-	for (i = 0; i < (timeout_us * 2 / sleep_us); ++i) {
-		tmp = line_count;
-		line_count = sde_enc->cur_master->ops.get_line_count(
-				sde_enc->cur_master);
-		if (line_count < tmp) {
-			SDE_EVT32(DRMID(drm_enc), line_count);
-			return 0;
-		}
-
-		cur_ktime = ktime_get();
-		if (ktime_compare_safe(exp_ktime, cur_ktime) <= 0)
-			break;
-
-		usleep_range(sleep_us / 2, sleep_us);
-	}
-
-	SDE_EVT32(DRMID(drm_enc), line_count, SDE_EVTLOG_ERROR);
-	return -ETIMEDOUT;
-}
-
-static int _helper_flush_qsync(struct sde_encoder_phys *phys_enc)
-{
-	struct drm_encoder *drm_enc;
-	struct sde_rm_hw_iter rm_iter;
-	bool lm_valid = false;
-	bool intf_valid = false;
-
-	if (!phys_enc || !phys_enc->parent) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	drm_enc = phys_enc->parent;
-
-	/* Flush the interfaces for AVR update or Qsync with INTF TE */
-	if (phys_enc->intf_mode == INTF_MODE_VIDEO ||
-			(phys_enc->intf_mode == INTF_MODE_CMD &&
-			phys_enc->has_intf_te)) {
-		sde_rm_init_hw_iter(&rm_iter, drm_enc->base.id,
-				SDE_HW_BLK_INTF);
-		while (sde_rm_get_hw(&phys_enc->sde_kms->rm, &rm_iter)) {
-			struct sde_hw_intf *hw_intf =
-				(struct sde_hw_intf *)rm_iter.hw;
-
-			if (!hw_intf)
-				continue;
-
-			if (phys_enc->hw_ctl->ops.update_bitmask_intf)
-				phys_enc->hw_ctl->ops.update_bitmask_intf(
-						phys_enc->hw_ctl,
-						hw_intf->idx, 1);
-
-			intf_valid = true;
-		}
-
-		if (!intf_valid) {
-			SDE_ERROR_ENC(to_sde_encoder_virt(drm_enc),
-				"intf not found to flush\n");
-			return -EFAULT;
-		}
-	} else {
-		sde_rm_init_hw_iter(&rm_iter, drm_enc->base.id, SDE_HW_BLK_LM);
-		while (sde_rm_get_hw(&phys_enc->sde_kms->rm, &rm_iter)) {
-			struct sde_hw_mixer *hw_lm =
-					(struct sde_hw_mixer *)rm_iter.hw;
-
-			if (!hw_lm)
-				continue;
-
-			/* update LM flush for HW without INTF TE */
-			if (phys_enc->hw_ctl->ops.update_bitmask_mixer)
-				phys_enc->hw_ctl->ops.update_bitmask_mixer(
-						phys_enc->hw_ctl,
-						hw_lm->idx, 1);
-
-			lm_valid = true;
-		}
-
-		if (!lm_valid) {
-			SDE_ERROR_ENC(to_sde_encoder_virt(drm_enc),
-				"lm not found to flush\n");
-			return -EFAULT;
-		}
-	}
-
-	return 0;
-}
-
-static bool _sde_encoder_dsc_is_dirty(struct sde_encoder_virt *sde_enc)
-{
-	int i;
-
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-		/**
-		 * This dirty_dsc_hw field is set during DSC disable to
-		 * indicate which DSC blocks need to be flushed
-		 */
-		if (sde_enc->dirty_dsc_ids[i])
-			return true;
-	}
-
-	return false;
-}
-
-static void _helper_flush_dsc(struct sde_encoder_virt *sde_enc)
-{
-	int i;
-	struct sde_hw_ctl *hw_ctl = NULL;
-	enum sde_dsc dsc_idx;
-
-	if (sde_enc->cur_master)
-		hw_ctl = sde_enc->cur_master->hw_ctl;
-
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-		dsc_idx = sde_enc->dirty_dsc_ids[i];
-		if (dsc_idx && hw_ctl && hw_ctl->ops.update_bitmask_dsc)
-			hw_ctl->ops.update_bitmask_dsc(hw_ctl, dsc_idx, 1);
-
-		sde_enc->dirty_dsc_ids[i] = DSC_NONE;
-	}
-}
-static void _sde_encoder_helper_hdr_plus_mempool_update(
-		struct sde_encoder_virt *sde_enc)
-{
-	struct sde_connector_dyn_hdr_metadata *dhdr_meta = NULL;
-	struct sde_hw_mdp *mdptop = NULL;
-
-	sde_enc->dynamic_hdr_updated = false;
-	if (sde_enc->cur_master) {
-		mdptop = sde_enc->cur_master->hw_mdptop;
-		dhdr_meta = sde_connector_get_dyn_hdr_meta(
-				sde_enc->cur_master->connector);
-	}
-
-	if (!mdptop || !dhdr_meta || !dhdr_meta->dynamic_hdr_update)
-		return;
-
-	if (mdptop->ops.set_hdr_plus_metadata) {
-		sde_enc->dynamic_hdr_updated = true;
-		mdptop->ops.set_hdr_plus_metadata(
-				mdptop, dhdr_meta->dynamic_hdr_payload,
-				dhdr_meta->dynamic_hdr_payload_size,
-				sde_enc->cur_master->intf_idx == INTF_0 ?
-				0 : 1);
-	}
-}
-
-static void _sde_encoder_needs_hw_reset(struct drm_encoder *drm_enc,
-	int ln_cnt1)
-{
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-	struct sde_encoder_phys *phys;
-	int ln_cnt2, i;
-
-	/* query line count before cur_master is updated */
-	if (sde_enc->cur_master && sde_enc->cur_master->ops.get_wr_line_count)
-		ln_cnt2 = sde_enc->cur_master->ops.get_wr_line_count(
-			sde_enc->cur_master);
-	else
-		ln_cnt2 = -EINVAL;
-
-	SDE_EVT32(DRMID(drm_enc), ln_cnt1, ln_cnt2);
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		phys = sde_enc->phys_encs[i];
-		if (phys && phys->ops.hw_reset)
-			phys->ops.hw_reset(phys);
-	}
-}
-
-int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
-		struct sde_encoder_kickoff_params *params)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct sde_encoder_phys *phys;
-	struct sde_kms *sde_kms = NULL;
-	struct msm_drm_private *priv = NULL;
-	bool needs_hw_reset = false;
-	int ln_cnt1 = -EINVAL, i, rc, ret = 0;
-	struct msm_display_info *disp_info;
-
-	if (!drm_enc || !params || !drm_enc->dev ||
-		!drm_enc->dev->dev_private) {
-		SDE_ERROR("invalid args\n");
-		return -EINVAL;
-	}
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	priv = drm_enc->dev->dev_private;
-	sde_kms = to_sde_kms(priv->kms);
-	disp_info = &sde_enc->disp_info;
-
-	SDE_DEBUG_ENC(sde_enc, "\n");
-	SDE_EVT32(DRMID(drm_enc));
-
-	/* save this for later, in case of errors */
-	if (sde_enc->cur_master && sde_enc->cur_master->ops.get_wr_line_count)
-		ln_cnt1 = sde_enc->cur_master->ops.get_wr_line_count(
-				sde_enc->cur_master);
-
-	/* update the qsync parameters for the current frame */
-	if (sde_enc->cur_master)
-		sde_connector_set_qsync_params(
-				sde_enc->cur_master->connector);
-
-
-	if (sde_enc->cur_master && sde_enc->cur_master->connector &&
-	    disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
-		sde_enc->frame_trigger_mode = sde_connector_get_property(
-			sde_enc->cur_master->connector->state,
-			CONNECTOR_PROP_CMD_FRAME_TRIGGER_MODE);
-
-	_sde_encoder_helper_hdr_plus_mempool_update(sde_enc);
-
-	/* prepare for next kickoff, may include waiting on previous kickoff */
-	SDE_ATRACE_BEGIN("sde_encoder_prepare_for_kickoff");
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		phys = sde_enc->phys_encs[i];
-		params->is_primary = sde_enc->disp_info.is_primary;
-		params->frame_trigger_mode = sde_enc->frame_trigger_mode;
-		params->recovery_events_enabled =
-					sde_enc->recovery_events_enabled;
-		if (phys) {
-			if (phys->ops.prepare_for_kickoff) {
-				rc = phys->ops.prepare_for_kickoff(
-						phys, params);
-				if (rc)
-					ret = rc;
-			}
-			if (phys->enable_state == SDE_ENC_ERR_NEEDS_HW_RESET)
-				needs_hw_reset = true;
-			_sde_encoder_setup_dither(phys);
-
-			if (sde_enc->cur_master &&
-					sde_connector_is_qsync_updated(
-					sde_enc->cur_master->connector)) {
-				_helper_flush_qsync(phys);
-			}
-		}
-	}
-
-	rc = sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "resource kickoff failed rc %d\n", rc);
-		ret = rc;
-		goto end;
-	}
-
-	/* if any phys needs reset, reset all phys, in-order */
-	if (needs_hw_reset)
-		_sde_encoder_needs_hw_reset(drm_enc, ln_cnt1);
-
-	_sde_encoder_update_master(drm_enc, params);
-
-	_sde_encoder_update_roi(drm_enc);
-
-	if (sde_enc->cur_master && sde_enc->cur_master->connector) {
-		rc = sde_connector_pre_kickoff(sde_enc->cur_master->connector);
-		if (rc) {
-			SDE_ERROR_ENC(sde_enc, "kickoff conn%d failed rc %d\n",
-					sde_enc->cur_master->connector->base.id,
-					rc);
-			ret = rc;
-		}
-	}
-
-	if (_sde_encoder_is_dsc_enabled(drm_enc) && sde_enc->cur_master &&
-			!sde_enc->cur_master->cont_splash_enabled) {
-		rc = _sde_encoder_dsc_setup(sde_enc, params);
-		if (rc) {
-			SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n", rc);
-			ret = rc;
-		}
-	} else if (_sde_encoder_dsc_is_dirty(sde_enc)) {
-		_helper_flush_dsc(sde_enc);
-	}
-
-end:
-	SDE_ATRACE_END("sde_encoder_prepare_for_kickoff");
-	return ret;
-}
-
-/**
- * _sde_encoder_reset_ctl_hw - reset h/w configuration for all ctl's associated
- *	with the specified encoder, and unstage all pipes from it
- * @encoder:	encoder pointer
- * Returns: 0 on success
- */
-static int _sde_encoder_reset_ctl_hw(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct sde_encoder_phys *phys;
-	unsigned int i;
-	int rc = 0;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-
-	SDE_ATRACE_BEGIN("encoder_release_lm");
-	SDE_DEBUG_ENC(sde_enc, "\n");
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		phys = sde_enc->phys_encs[i];
-		if (!phys)
-			continue;
-
-		SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0);
-
-		rc = sde_encoder_helper_reset_mixers(phys, NULL);
-		if (rc)
-			SDE_EVT32(DRMID(drm_enc), rc, SDE_EVTLOG_ERROR);
-	}
-
-	SDE_ATRACE_END("encoder_release_lm");
-	return rc;
-}
-
-void sde_encoder_kickoff(struct drm_encoder *drm_enc, bool is_error)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct sde_encoder_phys *phys;
-	ktime_t wakeup_time;
-	unsigned int i;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	SDE_ATRACE_BEGIN("encoder_kickoff");
-	sde_enc = to_sde_encoder_virt(drm_enc);
-
-	SDE_DEBUG_ENC(sde_enc, "\n");
-
-	/* create a 'no pipes' commit to release buffers on errors */
-	if (is_error)
-		_sde_encoder_reset_ctl_hw(drm_enc);
-
-	/* All phys encs are ready to go, trigger the kickoff */
-	_sde_encoder_kickoff_phys(sde_enc);
-
-	/* allow phys encs to handle any post-kickoff business */
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		phys = sde_enc->phys_encs[i];
-		if (phys && phys->ops.handle_post_kickoff)
-			phys->ops.handle_post_kickoff(phys);
-	}
-
-	if (sde_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
-			!_sde_encoder_wakeup_time(drm_enc, &wakeup_time)) {
-		SDE_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
-		mod_timer(&sde_enc->vsync_event_timer,
-				nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
-	}
-
-	SDE_ATRACE_END("encoder_kickoff");
-}
-
-int sde_encoder_helper_reset_mixers(struct sde_encoder_phys *phys_enc,
-		struct drm_framebuffer *fb)
-{
-	struct drm_encoder *drm_enc;
-	struct sde_hw_mixer_cfg mixer;
-	struct sde_rm_hw_iter lm_iter;
-	bool lm_valid = false;
-
-	if (!phys_enc || !phys_enc->parent) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	drm_enc = phys_enc->parent;
-	memset(&mixer, 0, sizeof(mixer));
-
-	/* reset associated CTL/LMs */
-	if (phys_enc->hw_ctl->ops.clear_all_blendstages)
-		phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
-
-	sde_rm_init_hw_iter(&lm_iter, drm_enc->base.id, SDE_HW_BLK_LM);
-	while (sde_rm_get_hw(&phys_enc->sde_kms->rm, &lm_iter)) {
-		struct sde_hw_mixer *hw_lm = (struct sde_hw_mixer *)lm_iter.hw;
-
-		if (!hw_lm)
-			continue;
-
-		/* need to flush LM to remove it */
-		if (phys_enc->hw_ctl->ops.update_bitmask_mixer)
-			phys_enc->hw_ctl->ops.update_bitmask_mixer(
-					phys_enc->hw_ctl,
-					hw_lm->idx, 1);
-
-		if (fb) {
-			/* assume a single LM if targeting a frame buffer */
-			if (lm_valid)
-				continue;
-
-			mixer.out_height = fb->height;
-			mixer.out_width = fb->width;
-
-			if (hw_lm->ops.setup_mixer_out)
-				hw_lm->ops.setup_mixer_out(hw_lm, &mixer);
-		}
-
-		lm_valid = true;
-
-		/* only enable border color on LM */
-		if (phys_enc->hw_ctl->ops.setup_blendstage)
-			phys_enc->hw_ctl->ops.setup_blendstage(
-					phys_enc->hw_ctl, hw_lm->idx, NULL);
-	}
-
-	if (!lm_valid) {
-		SDE_ERROR_ENC(to_sde_encoder_virt(drm_enc), "lm not found\n");
-		return -EFAULT;
-	}
-	return 0;
-}
-
-void sde_encoder_prepare_commit(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct sde_encoder_phys *phys;
-	int i;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	sde_enc = to_sde_encoder_virt(drm_enc);
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		phys = sde_enc->phys_encs[i];
-		if (phys && phys->ops.prepare_commit)
-			phys->ops.prepare_commit(phys);
-	}
-}
-
-void sde_encoder_helper_setup_misr(struct sde_encoder_phys *phys_enc,
-						bool enable, u32 frame_count)
-{
-	if (!phys_enc)
-		return;
-
-	if (phys_enc->hw_intf && phys_enc->hw_intf->ops.setup_misr)
-		phys_enc->hw_intf->ops.setup_misr(phys_enc->hw_intf,
-				enable, frame_count);
-}
-
-int sde_encoder_helper_collect_misr(struct sde_encoder_phys *phys_enc,
-		bool nonblock, u32 *misr_value)
-{
-	if (!phys_enc)
-		return -EINVAL;
-
-	return phys_enc->hw_intf && phys_enc->hw_intf->ops.collect_misr ?
-			phys_enc->hw_intf->ops.collect_misr(phys_enc->hw_intf,
-			nonblock, misr_value) : -ENOTSUPP;
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int _sde_encoder_status_show(struct seq_file *s, void *data)
-{
-	struct sde_encoder_virt *sde_enc;
-	int i;
-
-	if (!s || !s->private)
-		return -EINVAL;
-
-	sde_enc = s->private;
-
-	mutex_lock(&sde_enc->enc_lock);
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (!phys)
-			continue;
-
-		seq_printf(s, "intf:%d    vsync:%8d     underrun:%8d    ",
-				phys->intf_idx - INTF_0,
-				atomic_read(&phys->vsync_cnt),
-				atomic_read(&phys->underrun_cnt));
-
-		switch (phys->intf_mode) {
-		case INTF_MODE_VIDEO:
-			seq_puts(s, "mode: video\n");
-			break;
-		case INTF_MODE_CMD:
-			seq_puts(s, "mode: command\n");
-			break;
-		case INTF_MODE_WB_BLOCK:
-			seq_puts(s, "mode: wb block\n");
-			break;
-		case INTF_MODE_WB_LINE:
-			seq_puts(s, "mode: wb line\n");
-			break;
-		default:
-			seq_puts(s, "mode: ???\n");
-			break;
-		}
-	}
-	mutex_unlock(&sde_enc->enc_lock);
-
-	return 0;
-}
-
-static int _sde_encoder_debugfs_status_open(struct inode *inode,
-		struct file *file)
-{
-	return single_open(file, _sde_encoder_status_show, inode->i_private);
-}
-
-static ssize_t _sde_encoder_misr_setup(struct file *file,
-		const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	struct sde_encoder_virt *sde_enc;
-	int rc;
-	char buf[MISR_BUFF_SIZE + 1];
-	size_t buff_copy;
-	u32 frame_count, enable;
-	struct msm_drm_private *priv = NULL;
-	struct sde_kms *sde_kms = NULL;
-
-	if (!file || !file->private_data)
-		return -EINVAL;
-
-	sde_enc = file->private_data;
-	priv = sde_enc->base.dev->dev_private;
-	if (!sde_enc || !priv || !priv->kms)
-		return -EINVAL;
-
-	sde_kms = to_sde_kms(priv->kms);
-
-	if (sde_kms_is_secure_session_inprogress(sde_kms)) {
-		SDE_DEBUG_ENC(sde_enc, "misr enable/disable not allowed\n");
-		return -ENOTSUPP;
-	}
-
-	buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
-	if (copy_from_user(buf, user_buf, buff_copy))
-		return -EINVAL;
-
-	buf[buff_copy] = 0; /* end of string */
-
-	if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
-		return -EINVAL;
-
-	rc = _sde_encoder_power_enable(sde_enc, true);
-	if (rc)
-		return rc;
-
-	sde_enc->misr_enable = enable;
-	sde_enc->misr_frame_count = frame_count;
-	sde_encoder_misr_configure(&sde_enc->base, enable, frame_count);
-	_sde_encoder_power_enable(sde_enc, false);
-	return count;
-}
-
-static ssize_t _sde_encoder_misr_read(struct file *file,
-		char __user *user_buff, size_t count, loff_t *ppos)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct msm_drm_private *priv = NULL;
-	struct sde_kms *sde_kms = NULL;
-	int i = 0, len = 0;
-	char buf[MISR_BUFF_SIZE + 1] = {'\0'};
-	int rc;
-
-	if (*ppos)
-		return 0;
-
-	if (!file || !file->private_data)
-		return -EINVAL;
-
-	sde_enc = file->private_data;
-	priv = sde_enc->base.dev->dev_private;
-	if (priv != NULL)
-		sde_kms = to_sde_kms(priv->kms);
-
-	if (sde_kms_is_secure_session_inprogress(sde_kms)) {
-		SDE_DEBUG_ENC(sde_enc, "misr read not allowed\n");
-		return -ENOTSUPP;
-	}
-
-	rc = _sde_encoder_power_enable(sde_enc, true);
-	if (rc)
-		return rc;
-
-	if (!sde_enc->misr_enable) {
-		len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
-				"disabled\n");
-		goto buff_check;
-	}
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-		u32 misr_value = 0;
-
-		if (!phys || !phys->ops.collect_misr) {
-			len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
-					"invalid\n");
-			SDE_ERROR_ENC(sde_enc, "invalid misr ops\n");
-			continue;
-		}
-
-		rc = phys->ops.collect_misr(phys, false, &misr_value);
-		if (rc) {
-			len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
-					"invalid\n");
-			SDE_ERROR_ENC(sde_enc, "failed to collect misr %d\n",
-					rc);
-			continue;
-		} else {
-			len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
-					"Intf idx:%d\n",
-					phys->intf_idx - INTF_0);
-			len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
-					"0x%x\n", misr_value);
-		}
-	}
-
-buff_check:
-	if (count <= len) {
-		len = 0;
-		goto end;
-	}
-
-	if (copy_to_user(user_buff, buf, len)) {
-		len = -EFAULT;
-		goto end;
-	}
-
-	*ppos += len;   /* increase offset */
-
-end:
-	_sde_encoder_power_enable(sde_enc, false);
-	return len;
-}
-
-static int _sde_encoder_init_debugfs(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	int i;
-
-	static const struct file_operations debugfs_status_fops = {
-		.open =		_sde_encoder_debugfs_status_open,
-		.read =		seq_read,
-		.llseek =	seq_lseek,
-		.release =	single_release,
-	};
-
-	static const struct file_operations debugfs_misr_fops = {
-		.open = simple_open,
-		.read = _sde_encoder_misr_read,
-		.write = _sde_encoder_misr_setup,
-	};
-
-	char name[SDE_NAME_SIZE];
-
-	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
-		SDE_ERROR("invalid encoder or kms\n");
-		return -EINVAL;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	priv = drm_enc->dev->dev_private;
-	sde_kms = to_sde_kms(priv->kms);
-
-	snprintf(name, SDE_NAME_SIZE, "encoder%u", drm_enc->base.id);
-
-	/* create overall sub-directory for the encoder */
-	sde_enc->debugfs_root = debugfs_create_dir(name,
-			drm_enc->dev->primary->debugfs_root);
-	if (!sde_enc->debugfs_root)
-		return -ENOMEM;
-
-	/* don't error check these */
-	debugfs_create_file("status", 0400,
-		sde_enc->debugfs_root, sde_enc, &debugfs_status_fops);
-
-	debugfs_create_file("misr_data", 0600,
-		sde_enc->debugfs_root, sde_enc, &debugfs_misr_fops);
-
-	debugfs_create_bool("idle_power_collapse", 0600, sde_enc->debugfs_root,
-			&sde_enc->idle_pc_enabled);
-
-	debugfs_create_u32("frame_trigger_mode", 0400, sde_enc->debugfs_root,
-			&sde_enc->frame_trigger_mode);
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++)
-		if (sde_enc->phys_encs[i] &&
-				sde_enc->phys_encs[i]->ops.late_register)
-			sde_enc->phys_encs[i]->ops.late_register(
-					sde_enc->phys_encs[i],
-					sde_enc->debugfs_root);
-
-	return 0;
-}
-
-static void _sde_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-
-	if (!drm_enc)
-		return;
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	debugfs_remove_recursive(sde_enc->debugfs_root);
-}
-#else
-static int _sde_encoder_init_debugfs(struct drm_encoder *drm_enc)
-{
-	return 0;
-}
-
-static void _sde_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
-}
-#endif
-
-static int sde_encoder_late_register(struct drm_encoder *encoder)
-{
-	return _sde_encoder_init_debugfs(encoder);
-}
-
-static void sde_encoder_early_unregister(struct drm_encoder *encoder)
-{
-	_sde_encoder_destroy_debugfs(encoder);
-}
-
-static int sde_encoder_virt_add_phys_encs(
-		u32 display_caps,
-		struct sde_encoder_virt *sde_enc,
-		struct sde_enc_phys_init_params *params)
-{
-	struct sde_encoder_phys *enc = NULL;
-
-	SDE_DEBUG_ENC(sde_enc, "\n");
-
-	/*
-	 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
-	 * in this function, check up-front.
-	 */
-	if (sde_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
-			ARRAY_SIZE(sde_enc->phys_encs)) {
-		SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
-			  sde_enc->num_phys_encs);
-		return -EINVAL;
-	}
-
-	if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
-		enc = sde_encoder_phys_vid_init(params);
-
-		if (IS_ERR_OR_NULL(enc)) {
-			SDE_ERROR_ENC(sde_enc, "failed to init vid enc: %ld\n",
-				PTR_ERR(enc));
-			return !enc ? -EINVAL : PTR_ERR(enc);
-		}
-
-		sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
-		++sde_enc->num_phys_encs;
-	}
-
-	if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
-		enc = sde_encoder_phys_cmd_init(params);
-
-		if (IS_ERR_OR_NULL(enc)) {
-			SDE_ERROR_ENC(sde_enc, "failed to init cmd enc: %ld\n",
-				PTR_ERR(enc));
-			return !enc ? -EINVAL : PTR_ERR(enc);
-		}
-
-		sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
-		++sde_enc->num_phys_encs;
-	}
-
-	return 0;
-}
-
-static int sde_encoder_virt_add_phys_enc_wb(struct sde_encoder_virt *sde_enc,
-		struct sde_enc_phys_init_params *params)
-{
-	struct sde_encoder_phys *enc = NULL;
-
-	if (!sde_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG_ENC(sde_enc, "\n");
-
-	if (sde_enc->num_phys_encs + 1 >= ARRAY_SIZE(sde_enc->phys_encs)) {
-		SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
-			  sde_enc->num_phys_encs);
-		return -EINVAL;
-	}
-
-	enc = sde_encoder_phys_wb_init(params);
-
-	if (IS_ERR_OR_NULL(enc)) {
-		SDE_ERROR_ENC(sde_enc, "failed to init wb enc: %ld\n",
-			PTR_ERR(enc));
-		return !enc ? -EINVAL : PTR_ERR(enc);
-	}
-
-	sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
-	++sde_enc->num_phys_encs;
-
-	return 0;
-}
-
-static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
-				 struct sde_kms *sde_kms,
-				 struct msm_display_info *disp_info,
-				 int *drm_enc_mode)
-{
-	int ret = 0;
-	int i = 0;
-	enum sde_intf_type intf_type;
-	struct sde_encoder_virt_ops parent_ops = {
-		sde_encoder_vblank_callback,
-		sde_encoder_underrun_callback,
-		sde_encoder_frame_done_callback,
-		sde_encoder_get_qsync_fps_callback,
-	};
-	struct sde_enc_phys_init_params phys_params;
-
-	if (!sde_enc || !sde_kms) {
-		SDE_ERROR("invalid arg(s), enc %d kms %d\n",
-				!sde_enc, !sde_kms);
-		return -EINVAL;
-	}
-
-	memset(&phys_params, 0, sizeof(phys_params));
-	phys_params.sde_kms = sde_kms;
-	phys_params.parent = &sde_enc->base;
-	phys_params.parent_ops = parent_ops;
-	phys_params.enc_spinlock = &sde_enc->enc_spinlock;
-	phys_params.vblank_ctl_lock = &sde_enc->vblank_ctl_lock;
-
-	SDE_DEBUG("\n");
-
-	if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
-		*drm_enc_mode = DRM_MODE_ENCODER_DSI;
-		intf_type = INTF_DSI;
-	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
-		*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
-		intf_type = INTF_HDMI;
-	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
-		if (disp_info->capabilities & MSM_DISPLAY_CAP_MST_MODE)
-			*drm_enc_mode = DRM_MODE_ENCODER_DPMST;
-		else
-			*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
-		intf_type = INTF_DP;
-	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_VIRTUAL) {
-		*drm_enc_mode = DRM_MODE_ENCODER_VIRTUAL;
-		intf_type = INTF_WB;
-	} else {
-		SDE_ERROR_ENC(sde_enc, "unsupported display interface type\n");
-		return -EINVAL;
-	}
-
-	WARN_ON(disp_info->num_of_h_tiles < 1);
-
-	sde_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
-	sde_enc->te_source = disp_info->te_source;
-
-	SDE_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
-
-	if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
-	    (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
-		sde_enc->idle_pc_enabled = sde_kms->catalog->has_idle_pc;
-
-	mutex_lock(&sde_enc->enc_lock);
-	for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
-		/*
-		 * Left-most tile is at index 0, content is controller id
-		 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
-		 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
-		 */
-		u32 controller_id = disp_info->h_tile_instance[i];
-
-		if (disp_info->num_of_h_tiles > 1) {
-			if (i == 0)
-				phys_params.split_role = ENC_ROLE_MASTER;
-			else
-				phys_params.split_role = ENC_ROLE_SLAVE;
-		} else {
-			phys_params.split_role = ENC_ROLE_SOLO;
-		}
-
-		SDE_DEBUG("h_tile_instance %d = %d, split_role %d\n",
-				i, controller_id, phys_params.split_role);
-
-		if (intf_type == INTF_WB) {
-			phys_params.intf_idx = INTF_MAX;
-			phys_params.wb_idx = sde_encoder_get_wb(
-					sde_kms->catalog,
-					intf_type, controller_id);
-			if (phys_params.wb_idx == WB_MAX) {
-				SDE_ERROR_ENC(sde_enc,
-					"could not get wb: type %d, id %d\n",
-					intf_type, controller_id);
-				ret = -EINVAL;
-			}
-		} else {
-			phys_params.wb_idx = WB_MAX;
-			phys_params.intf_idx = sde_encoder_get_intf(
-					sde_kms->catalog, intf_type,
-					controller_id);
-			if (phys_params.intf_idx == INTF_MAX) {
-				SDE_ERROR_ENC(sde_enc,
-					"could not get wb: type %d, id %d\n",
-					intf_type, controller_id);
-				ret = -EINVAL;
-			}
-		}
-
-		if (!ret) {
-			if (intf_type == INTF_WB)
-				ret = sde_encoder_virt_add_phys_enc_wb(sde_enc,
-						&phys_params);
-			else
-				ret = sde_encoder_virt_add_phys_encs(
-						disp_info->capabilities,
-						sde_enc,
-						&phys_params);
-			if (ret)
-				SDE_ERROR_ENC(sde_enc,
-						"failed to add phys encs\n");
-		}
-	}
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (phys) {
-			atomic_set(&phys->vsync_cnt, 0);
-			atomic_set(&phys->underrun_cnt, 0);
-		}
-	}
-	mutex_unlock(&sde_enc->enc_lock);
-
-	return ret;
-}
-
-static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = {
-	.mode_set = sde_encoder_virt_mode_set,
-	.disable = sde_encoder_virt_disable,
-	.enable = sde_encoder_virt_enable,
-	.atomic_check = sde_encoder_virt_atomic_check,
-};
-
-static const struct drm_encoder_funcs sde_encoder_funcs = {
-		.destroy = sde_encoder_destroy,
-		.late_register = sde_encoder_late_register,
-		.early_unregister = sde_encoder_early_unregister,
-};
-
-struct drm_encoder *sde_encoder_init(
-		struct drm_device *dev,
-		struct msm_display_info *disp_info)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	struct sde_kms *sde_kms = to_sde_kms(priv->kms);
-	struct drm_encoder *drm_enc = NULL;
-	struct sde_encoder_virt *sde_enc = NULL;
-	int drm_enc_mode = DRM_MODE_ENCODER_NONE;
-	char name[SDE_NAME_SIZE];
-	int ret = 0, i, intf_index = INTF_MAX;
-	struct sde_encoder_phys *phys = NULL;
-
-	sde_enc = kzalloc(sizeof(*sde_enc), GFP_KERNEL);
-	if (!sde_enc) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	mutex_init(&sde_enc->enc_lock);
-	ret = sde_encoder_setup_display(sde_enc, sde_kms, disp_info,
-			&drm_enc_mode);
-	if (ret)
-		goto fail;
-
-	sde_enc->cur_master = NULL;
-	spin_lock_init(&sde_enc->enc_spinlock);
-	mutex_init(&sde_enc->vblank_ctl_lock);
-	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
-		atomic_set(&sde_enc->frame_done_cnt[i], 0);
-	drm_enc = &sde_enc->base;
-	drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode, NULL);
-	drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs);
-
-	if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
-		timer_setup(&sde_enc->vsync_event_timer,
-				sde_encoder_vsync_event_handler, 0);
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		phys = sde_enc->phys_encs[i];
-		if (!phys)
-			continue;
-		if (phys->ops.is_master && phys->ops.is_master(phys))
-			intf_index = phys->intf_idx - INTF_0;
-	}
-	snprintf(name, SDE_NAME_SIZE, "rsc_enc%u", drm_enc->base.id);
-	sde_enc->rsc_client = sde_rsc_client_create(SDE_RSC_INDEX, name,
-		disp_info->is_primary ? SDE_RSC_PRIMARY_DISP_CLIENT :
-		SDE_RSC_EXTERNAL_DISP_CLIENT, intf_index + 1);
-	if (IS_ERR_OR_NULL(sde_enc->rsc_client)) {
-		SDE_DEBUG("sde rsc client create failed :%ld\n",
-						PTR_ERR(sde_enc->rsc_client));
-		sde_enc->rsc_client = NULL;
-	}
-
-	if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
-		ret = _sde_encoder_input_handler(sde_enc);
-		if (ret)
-			SDE_ERROR(
-			"input handler registration failed, rc = %d\n", ret);
-	}
-
-	mutex_init(&sde_enc->rc_lock);
-	kthread_init_delayed_work(&sde_enc->delayed_off_work,
-			sde_encoder_off_work);
-	sde_enc->vblank_enabled = false;
-
-	kthread_init_work(&sde_enc->vsync_event_work,
-			sde_encoder_vsync_event_work_handler);
-
-	kthread_init_work(&sde_enc->input_event_work,
-			sde_encoder_input_event_work_handler);
-
-	kthread_init_work(&sde_enc->esd_trigger_work,
-			sde_encoder_esd_trigger_work_handler);
-
-	memcpy(&sde_enc->disp_info, disp_info, sizeof(*disp_info));
-
-	SDE_DEBUG_ENC(sde_enc, "created\n");
-
-	return drm_enc;
-
-fail:
-	SDE_ERROR("failed to create encoder\n");
-	if (drm_enc)
-		sde_encoder_destroy(drm_enc);
-
-	return ERR_PTR(ret);
-}
-
-int sde_encoder_wait_for_event(struct drm_encoder *drm_enc,
-	enum msm_event_wait event)
-{
-	int (*fn_wait)(struct sde_encoder_phys *phys_enc) = NULL;
-	struct sde_encoder_virt *sde_enc = NULL;
-	int i, ret = 0;
-	char atrace_buf[32];
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	SDE_DEBUG_ENC(sde_enc, "\n");
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		switch (event) {
-		case MSM_ENC_COMMIT_DONE:
-			fn_wait = phys->ops.wait_for_commit_done;
-			break;
-		case MSM_ENC_TX_COMPLETE:
-			fn_wait = phys->ops.wait_for_tx_complete;
-			break;
-		case MSM_ENC_VBLANK:
-			fn_wait = phys->ops.wait_for_vblank;
-			break;
-		case MSM_ENC_ACTIVE_REGION:
-			fn_wait = phys->ops.wait_for_active;
-			break;
-		default:
-			SDE_ERROR_ENC(sde_enc, "unknown wait event %d\n",
-					event);
-			return -EINVAL;
-		}
-
-		if (phys && fn_wait) {
-			snprintf(atrace_buf, sizeof(atrace_buf),
-				"wait_completion_event_%d", event);
-			SDE_ATRACE_BEGIN(atrace_buf);
-			ret = fn_wait(phys);
-			SDE_ATRACE_END(atrace_buf);
-			if (ret)
-				return ret;
-		}
-	}
-
-	return ret;
-}
-
-u32 sde_encoder_get_fps(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return 0;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-
-	return sde_enc->mode_info.frame_rate;
-}
-
-enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder)
-{
-	struct sde_encoder_virt *sde_enc = NULL;
-	int i;
-
-	if (!encoder) {
-		SDE_ERROR("invalid encoder\n");
-		return INTF_MODE_NONE;
-	}
-	sde_enc = to_sde_encoder_virt(encoder);
-
-	if (sde_enc->cur_master)
-		return sde_enc->cur_master->intf_mode;
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (phys)
-			return phys->intf_mode;
-	}
-
-	return INTF_MODE_NONE;
-}
-
-static void _sde_encoder_cache_hw_res_cont_splash(
-		struct drm_encoder *encoder,
-		struct sde_kms *sde_kms)
-{
-	int i, idx;
-	struct sde_encoder_virt *sde_enc;
-	struct sde_encoder_phys *phys_enc;
-	struct sde_rm_hw_iter dsc_iter, pp_iter, ctl_iter, intf_iter;
-
-	sde_enc = to_sde_encoder_virt(encoder);
-
-	sde_rm_init_hw_iter(&pp_iter, encoder->base.id, SDE_HW_BLK_PINGPONG);
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-		sde_enc->hw_pp[i] = NULL;
-		if (!sde_rm_get_hw(&sde_kms->rm, &pp_iter))
-			break;
-		sde_enc->hw_pp[i] = (struct sde_hw_pingpong *) pp_iter.hw;
-	}
-
-	sde_rm_init_hw_iter(&dsc_iter, encoder->base.id, SDE_HW_BLK_DSC);
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-		sde_enc->hw_dsc[i] = NULL;
-		if (!sde_rm_get_hw(&sde_kms->rm, &dsc_iter))
-			break;
-		sde_enc->hw_dsc[i] = (struct sde_hw_dsc *) dsc_iter.hw;
-	}
-
-	/*
-	 * If we have multiple phys encoders with one controller, make
-	 * sure to populate the controller pointer in both phys encoders.
-	 */
-	for (idx = 0; idx < sde_enc->num_phys_encs; idx++) {
-		phys_enc = sde_enc->phys_encs[idx];
-		phys_enc->hw_ctl = NULL;
-
-		sde_rm_init_hw_iter(&ctl_iter, encoder->base.id,
-				SDE_HW_BLK_CTL);
-		for (i = 0; i < sde_enc->num_phys_encs; i++) {
-			if (sde_rm_get_hw(&sde_kms->rm, &ctl_iter)) {
-				phys_enc->hw_ctl =
-					(struct sde_hw_ctl *) ctl_iter.hw;
-				pr_debug("HW CTL intf_idx:%d hw_ctl:[0x%pK]\n",
-					phys_enc->intf_idx, phys_enc->hw_ctl);
-			}
-		}
-	}
-
-	sde_rm_init_hw_iter(&intf_iter, encoder->base.id, SDE_HW_BLK_INTF);
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		phys->hw_intf = NULL;
-		if (!sde_rm_get_hw(&sde_kms->rm, &intf_iter))
-			break;
-		phys->hw_intf = (struct sde_hw_intf *) intf_iter.hw;
-	}
-}
-
-/**
- * sde_encoder_update_caps_for_cont_splash - update encoder settings during
- *	device bootup when cont_splash is enabled
- * @drm_enc:	Pointer to drm encoder structure
- * @splash_display: Pointer to sde_splash_display corresponding to this encoder
- * @enable:	boolean indicates enable or displae state of splash
- * @Return:	true if successful in updating the encoder structure
- */
-int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder,
-		struct sde_splash_display *splash_display, bool enable)
-{
-	struct sde_encoder_virt *sde_enc;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	struct drm_connector *conn = NULL;
-	struct sde_connector *sde_conn = NULL;
-	struct sde_connector_state *sde_conn_state = NULL;
-	struct drm_display_mode *drm_mode = NULL;
-	struct sde_encoder_phys *phys_enc;
-	int ret = 0, i;
-
-	if (!encoder) {
-		SDE_ERROR("invalid drm enc\n");
-		return -EINVAL;
-	}
-
-	if (!encoder->dev || !encoder->dev->dev_private) {
-		SDE_ERROR("drm device invalid\n");
-		return -EINVAL;
-	}
-
-	priv = encoder->dev->dev_private;
-	if (!priv->kms) {
-		SDE_ERROR("invalid kms\n");
-		return -EINVAL;
-	}
-
-	sde_kms = to_sde_kms(priv->kms);
-	sde_enc = to_sde_encoder_virt(encoder);
-	if (!priv->num_connectors) {
-		SDE_ERROR_ENC(sde_enc, "No connectors registered\n");
-		return -EINVAL;
-	}
-	SDE_DEBUG_ENC(sde_enc,
-			"num of connectors: %d\n", priv->num_connectors);
-
-	SDE_DEBUG_ENC(sde_enc, "enable: %d\n", enable);
-	if (!enable) {
-		for (i = 0; i < sde_enc->num_phys_encs; i++) {
-			phys_enc = sde_enc->phys_encs[i];
-			if (phys_enc)
-				phys_enc->cont_splash_enabled = false;
-		}
-		return ret;
-	}
-
-	if (!splash_display) {
-		SDE_ERROR_ENC(sde_enc, "invalid splash data\n");
-		return  -EINVAL;
-	}
-
-	for (i = 0; i < priv->num_connectors; i++) {
-		SDE_DEBUG_ENC(sde_enc, "connector id: %d\n",
-				priv->connectors[i]->base.id);
-		sde_conn = to_sde_connector(priv->connectors[i]);
-		if (!sde_conn->encoder) {
-			SDE_DEBUG_ENC(sde_enc,
-				"encoder not attached to connector\n");
-			continue;
-		}
-		if (sde_conn->encoder->base.id
-				== encoder->base.id) {
-			conn = (priv->connectors[i]);
-			break;
-		}
-	}
-
-	if (!conn || !conn->state) {
-		SDE_ERROR_ENC(sde_enc, "connector not found\n");
-		return -EINVAL;
-	}
-
-	sde_conn_state = to_sde_connector_state(conn->state);
-
-	if (!sde_conn->ops.get_mode_info) {
-		SDE_ERROR_ENC(sde_enc, "conn: get_mode_info ops not found\n");
-		return -EINVAL;
-	}
-
-	ret = sde_conn->ops.get_mode_info(&sde_conn->base,
-			&encoder->crtc->state->adjusted_mode,
-			&sde_conn_state->mode_info,
-			sde_kms->catalog->max_mixer_width,
-			sde_conn->display);
-	if (ret) {
-		SDE_ERROR_ENC(sde_enc,
-			"conn: ->get_mode_info failed. ret=%d\n", ret);
-		return ret;
-	}
-
-	ret = sde_rm_reserve(&sde_kms->rm, encoder, encoder->crtc->state,
-			conn->state, false);
-	if (ret) {
-		SDE_ERROR_ENC(sde_enc,
-			"failed to reserve hw resources, %d\n", ret);
-		return ret;
-	}
-
-	if (sde_conn->encoder) {
-		conn->state->best_encoder = sde_conn->encoder;
-		SDE_DEBUG_ENC(sde_enc,
-			"configured cstate->best_encoder to ID = %d\n",
-			conn->state->best_encoder->base.id);
-	} else {
-		SDE_ERROR_ENC(sde_enc, "No encoder mapped to connector=%d\n",
-				conn->base.id);
-	}
-
-	SDE_DEBUG_ENC(sde_enc, "connector topology = %llu\n",
-			sde_connector_get_topology_name(conn));
-	drm_mode = &encoder->crtc->state->adjusted_mode;
-	SDE_DEBUG_ENC(sde_enc, "hdisplay = %d, vdisplay = %d\n",
-			drm_mode->hdisplay, drm_mode->vdisplay);
-	drm_set_preferred_mode(conn, drm_mode->hdisplay, drm_mode->vdisplay);
-
-	if (encoder->bridge) {
-		SDE_DEBUG_ENC(sde_enc, "Bridge mapped to encoder\n");
-		/*
-		 * For cont-splash use case, we update the mode
-		 * configurations manually. This will skip the
-		 * usually mode set call when actual frame is
-		 * pushed from framework. The bridge needs to
-		 * be updated with the current drm mode by
-		 * calling the bridge mode set ops.
-		 */
-		if (encoder->bridge->funcs) {
-			SDE_DEBUG_ENC(sde_enc, "calling mode_set\n");
-			encoder->bridge->funcs->mode_set(encoder->bridge,
-						drm_mode, drm_mode);
-		}
-	} else {
-		SDE_ERROR_ENC(sde_enc, "No bridge attached to encoder\n");
-	}
-
-	_sde_encoder_cache_hw_res_cont_splash(encoder, sde_kms);
-
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-		if (!phys) {
-			SDE_ERROR_ENC(sde_enc,
-				"phys encoders not initialized\n");
-			return -EINVAL;
-		}
-
-		/* update connector for master and slave phys encoders */
-		phys->connector = conn;
-		phys->cont_splash_enabled = true;
-		phys->cont_splash_single_flush =
-			splash_display->single_flush_en;
-
-		phys->hw_pp = sde_enc->hw_pp[i];
-		if (phys->ops.cont_splash_mode_set)
-			phys->ops.cont_splash_mode_set(phys, drm_mode);
-
-		if (phys->ops.is_master && phys->ops.is_master(phys))
-			sde_enc->cur_master = phys;
-	}
-
-	return ret;
-}
-
-int sde_encoder_display_failure_notification(struct drm_encoder *enc)
-{
-	struct msm_drm_thread *event_thread = NULL;
-	struct msm_drm_private *priv = NULL;
-	struct sde_encoder_virt *sde_enc = NULL;
-
-	if (!enc || !enc->dev || !enc->dev->dev_private) {
-		SDE_ERROR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	priv = enc->dev->dev_private;
-	sde_enc = to_sde_encoder_virt(enc);
-	if (!sde_enc->crtc || (sde_enc->crtc->index
-			>= ARRAY_SIZE(priv->event_thread))) {
-		SDE_DEBUG_ENC(sde_enc,
-			"invalid cached CRTC: %d or crtc index: %d\n",
-			sde_enc->crtc == NULL,
-			sde_enc->crtc ? sde_enc->crtc->index : -EINVAL);
-		return -EINVAL;
-	}
-
-	SDE_EVT32_VERBOSE(DRMID(enc));
-
-	event_thread = &priv->event_thread[sde_enc->crtc->index];
-
-	kthread_queue_work(&event_thread->worker,
-			   &sde_enc->esd_trigger_work);
-	kthread_flush_work(&sde_enc->esd_trigger_work);
-
-	/**
-	 * panel may stop generating te signal (vsync) during esd failure. rsc
-	 * hardware may hang without vsync. Avoid rsc hang by generating the
-	 * vsync from watchdog timer instead of panel.
-	 */
-	_sde_encoder_switch_to_watchdog_vsync(enc);
-
-	sde_encoder_wait_for_event(enc, MSM_ENC_TX_COMPLETE);
-
-	return 0;
-}
-
-bool sde_encoder_recovery_events_enabled(struct drm_encoder *encoder)
-{
-	struct sde_encoder_virt *sde_enc;
-
-	if (!encoder) {
-		SDE_ERROR("invalid drm enc\n");
-		return false;
-	}
-
-	sde_enc = to_sde_encoder_virt(encoder);
-
-	return sde_enc->recovery_events_enabled;
-}
-
-void sde_encoder_recovery_events_handler(struct drm_encoder *encoder,
-		bool enabled)
-{
-	struct sde_encoder_virt *sde_enc;
-
-	if (!encoder) {
-		SDE_ERROR("invalid drm enc\n");
-		return;
-	}
-
-	sde_enc = to_sde_encoder_virt(encoder);
-	sde_enc->recovery_events_enabled = enabled;
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
deleted file mode 100644
index a03ec6d..0000000
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __SDE_ENCODER_H__
-#define __SDE_ENCODER_H__
-
-#include <drm/drm_crtc.h>
-
-#include "msm_prop.h"
-#include "sde_hw_mdss.h"
-#include "sde_kms.h"
-
-#define SDE_ENCODER_FRAME_EVENT_DONE			BIT(0)
-#define SDE_ENCODER_FRAME_EVENT_ERROR			BIT(1)
-#define SDE_ENCODER_FRAME_EVENT_PANEL_DEAD		BIT(2)
-#define SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE	BIT(3)
-#define SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE	BIT(4)
-
-#define IDLE_POWERCOLLAPSE_DURATION	(66 - 16/2)
-#define IDLE_POWERCOLLAPSE_IN_EARLY_WAKEUP (200 - 16/2)
-
-/**
- * Encoder functions and data types
- * @intfs:	Interfaces this encoder is using, INTF_MODE_NONE if unused
- * @wbs:	Writebacks this encoder is using, INTF_MODE_NONE if unused
- * @needs_cdm:	Encoder requests a CDM based on pixel format conversion needs
- * @display_num_of_h_tiles: Number of horizontal tiles in case of split
- *                          interface
- * @is_primary: set to true if the display is primary display
- * @topology:   Topology of the display
- */
-struct sde_encoder_hw_resources {
-	enum sde_intf_mode intfs[INTF_MAX];
-	enum sde_intf_mode wbs[WB_MAX];
-	bool needs_cdm;
-	u32 display_num_of_h_tiles;
-	bool is_primary;
-	struct msm_display_topology topology;
-};
-
-/**
- * sde_encoder_kickoff_params - info encoder requires at kickoff
- * @is_primary: set to true if the display is primary display
- * @affected_displays:  bitmask, bit set means the ROI of the commit lies within
- *                      the bounds of the physical display at the bit index
- * @recovery_events_enabled: indicates status of client for recoovery events
- * @frame_trigger_mode: indicates frame trigger mode
- */
-struct sde_encoder_kickoff_params {
-	u32 is_primary;
-	unsigned long affected_displays;
-	bool recovery_events_enabled;
-	enum frame_trigger_mode_type frame_trigger_mode;
-};
-
-/**
- * sde_encoder_get_hw_resources - Populate table of required hardware resources
- * @encoder:	encoder pointer
- * @hw_res:	resource table to populate with encoder required resources
- * @conn_state:	report hw reqs based on this proposed connector state
- */
-void sde_encoder_get_hw_resources(struct drm_encoder *encoder,
-		struct sde_encoder_hw_resources *hw_res,
-		struct drm_connector_state *conn_state);
-
-/**
- * sde_encoder_register_vblank_callback - provide callback to encoder that
- *	will be called on the next vblank.
- * @encoder:	encoder pointer
- * @cb:		callback pointer, provide NULL to deregister and disable IRQs
- * @data:	user data provided to callback
- */
-void sde_encoder_register_vblank_callback(struct drm_encoder *encoder,
-		void (*cb)(void *), void *data);
-
-/**
- * sde_encoder_register_frame_event_callback - provide callback to encoder that
- *	will be called after the request is complete, or other events.
- * @encoder:	encoder pointer
- * @cb:		callback pointer, provide NULL to deregister
- * @crtc:	pointer to drm_crtc object interested in frame events
- */
-void sde_encoder_register_frame_event_callback(struct drm_encoder *encoder,
-		void (*cb)(void *, u32), struct drm_crtc *crtc);
-
-/**
- * sde_encoder_get_rsc_client - gets the rsc client state for primary
- *      for primary display.
- * @encoder:	encoder pointer
- */
-struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *encoder);
-
-/**
- * sde_encoder_poll_line_counts - poll encoder line counts for start of frame
- * @encoder:	encoder pointer
- * @Returns:	zero on success
- */
-int sde_encoder_poll_line_counts(struct drm_encoder *encoder);
-
-/**
- * sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
- *	path (i.e. ctl flush and start) at next appropriate time.
- *	Immediately: if no previous commit is outstanding.
- *	Delayed: Block until next trigger can be issued.
- * @encoder:	encoder pointer
- * @params:	kickoff time parameters
- * @Returns:	Zero on success, last detected error otherwise
- */
-int sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
-		struct sde_encoder_kickoff_params *params);
-
-/**
- * sde_encoder_trigger_kickoff_pending - Clear the flush bits from previous
- *        kickoff and trigger the ctl prepare progress for command mode display.
- * @encoder:	encoder pointer
- */
-void sde_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
-
-/**
- * sde_encoder_kickoff - trigger a double buffer flip of the ctl path
- *	(i.e. ctl flush and start) immediately.
- * @encoder:	encoder pointer
- * @is_error:	whether the current commit needs to be aborted and replaced
- *		with a 'safe' commit
- */
-void sde_encoder_kickoff(struct drm_encoder *encoder, bool is_error);
-
-/**
- * sde_encoder_wait_for_event - Waits for encoder events
- * @encoder:	encoder pointer
- * @event:      event to wait for
- * MSM_ENC_COMMIT_DONE -  Wait for hardware to have flushed the current pending
- *                        frames to hardware at a vblank or ctl_start
- *                        Encoders will map this differently depending on the
- *                        panel type.
- *	                  vid mode -> vsync_irq
- *                        cmd mode -> ctl_start
- * MSM_ENC_TX_COMPLETE -  Wait for the hardware to transfer all the pixels to
- *                        the panel. Encoders will map this differently
- *                        depending on the panel type.
- *                        vid mode -> vsync_irq
- *                        cmd mode -> pp_done
- * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
- */
-int sde_encoder_wait_for_event(struct drm_encoder *drm_encoder,
-						enum msm_event_wait event);
-
-/**
- * sde_encoder_idle_request - request for idle request to avoid 4 vsync cycle
- *                            to turn off the clocks.
- * @encoder:	encoder pointer
- * Returns: 0 on success, errorcode otherwise
- */
-int sde_encoder_idle_request(struct drm_encoder *drm_enc);
-
-/*
- * sde_encoder_get_fps - get interface frame rate of the given encoder
- * @encoder: Pointer to drm encoder object
- */
-u32 sde_encoder_get_fps(struct drm_encoder *encoder);
-
-/*
- * sde_encoder_get_intf_mode - get interface mode of the given encoder
- * @encoder: Pointer to drm encoder object
- */
-enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder);
-
-/**
- * sde_encoder_control_te - control enabling/disabling VSYNC_IN_EN
- * @encoder:	encoder pointer
- * @enable:	boolean to indicate enable/disable
- */
-void sde_encoder_control_te(struct drm_encoder *encoder, bool enable);
-
-/**
- * sde_encoder_virt_restore - restore the encoder configs
- * @encoder:	encoder pointer
- */
-void sde_encoder_virt_restore(struct drm_encoder *encoder);
-
-/**
- * sde_encoder_is_dsc_merge - check if encoder is in DSC merge mode
- * @drm_enc: Pointer to drm encoder object
- * @Return: true if encoder is in DSC merge mode
- */
-bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc);
-
-/**
- * sde_encoder_check_mode - check if given mode is supported or not
- * @drm_enc: Pointer to drm encoder object
- * @mode: Mode to be checked
- * @Return: true if it is cmd mode
- */
-bool sde_encoder_check_mode(struct drm_encoder *drm_enc, u32 mode);
-
-/**
- * sde_encoder_init - initialize virtual encoder object
- * @dev:        Pointer to drm device structure
- * @disp_info:  Pointer to display information structure
- * Returns:     Pointer to newly created drm encoder
- */
-struct drm_encoder *sde_encoder_init(
-		struct drm_device *dev,
-		struct msm_display_info *disp_info);
-
-/**
- * sde_encoder_destroy - destroy previously initialized virtual encoder
- * @drm_enc:    Pointer to previously created drm encoder structure
- */
-void sde_encoder_destroy(struct drm_encoder *drm_enc);
-
-/**
- * sde_encoder_prepare_commit - prepare encoder at the very beginning of an
- *	atomic commit, before any registers are written
- * @drm_enc:    Pointer to previously created drm encoder structure
- */
-void sde_encoder_prepare_commit(struct drm_encoder *drm_enc);
-
-/**
- * sde_encoder_update_caps_for_cont_splash - update encoder settings during
- *	device bootup when cont_splash is enabled
- * @drm_enc:    Pointer to drm encoder structure
- * @splash_display: Pointer to sde_splash_display corresponding to this encoder
- * @enable:	boolean indicates enable or displae state of splash
- * @Return:     true if successful in updating the encoder structure
- */
-int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder,
-		struct sde_splash_display *splash_display, bool enable);
-
-/**
- * sde_encoder_display_failure_notification - update sde encoder state for
- * esd timeout or other display failure notification. This event flows from
- * dsi, sde_connector to sde_encoder.
- *
- * This api must not be called from crtc_commit (display) thread because it
- * requests the flush work on same thread. It is called from esd check thread
- * based on current design.
- *
- *      TODO: manage the event at sde_kms level for forward processing.
- * @drm_enc:    Pointer to drm encoder structure
- * @Return:     true if successful in updating the encoder structure
- */
-int sde_encoder_display_failure_notification(struct drm_encoder *enc);
-
-/**
- * sde_encoder_recovery_events_enabled - checks if client has enabled
- * sw recovery mechanism for this connector
- * @drm_enc:    Pointer to drm encoder structure
- * @Return:     true if enabled
- */
-bool sde_encoder_recovery_events_enabled(struct drm_encoder *encoder);
-
-/**
- * sde_encoder_recovery_events_handler - handler to enable/disable the
- * sw recovery for this connector
- * @drm_enc:    Pointer to drm encoder structure
- */
-void sde_encoder_recovery_events_handler(struct drm_encoder *encoder,
-		bool val);
-/**
- * sde_encoder_in_clone_mode - checks if underlying phys encoder is in clone
- *	mode or independent display mode. ref@ WB in Concurrent writeback mode.
- * @drm_enc:    Pointer to drm encoder structure
- * @Return:     true if successful in updating the encoder structure
- */
-bool sde_encoder_in_clone_mode(struct drm_encoder *enc);
-
-/**
- * sde_encoder_is_primary_display - checks if underlying display is primary
- *     display or not.
- * @drm_enc:    Pointer to drm encoder structure
- * @Return:     true if it is primary display. false if secondary display
- */
-bool sde_encoder_is_primary_display(struct drm_encoder *enc);
-
-/**
- * sde_encoder_control_idle_pc - control enable/disable of idle power collapse
- * @drm_enc:    Pointer to drm encoder structure
- * @enable:	enable/disable flag
- */
-void sde_encoder_control_idle_pc(struct drm_encoder *enc, bool enable);
-
-/**
- * sde_encoder_in_cont_splash - checks if display is in continuous splash
- * @drm_enc:    Pointer to drm encoder structure
- * @Return:     true if display in continuous splash
- */
-int sde_encoder_in_cont_splash(struct drm_encoder *enc);
-
-/**
- * sde_encoder_uidle_enable - control enable/disable of uidle
- * @drm_enc:    Pointer to drm encoder structure
- * @enable:	enable/disable flag
- */
-void sde_encoder_uidle_enable(struct drm_encoder *drm_enc, bool enable);
-
-#endif /* __SDE_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
deleted file mode 100644
index 619d3d2..0000000
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ /dev/null
@@ -1,727 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_ENCODER_PHYS_H__
-#define __SDE_ENCODER_PHYS_H__
-
-#include <linux/jiffies.h>
-#include <linux/sde_rsc.h>
-
-#include "sde_kms.h"
-#include "sde_hw_intf.h"
-#include "sde_hw_pingpong.h"
-#include "sde_hw_ctl.h"
-#include "sde_hw_top.h"
-#include "sde_hw_wb.h"
-#include "sde_hw_cdm.h"
-#include "sde_encoder.h"
-#include "sde_connector.h"
-
-#define SDE_ENCODER_NAME_MAX	16
-
-/* wait for at most 2 vsync for lowest refresh rate (24hz) */
-#define KICKOFF_TIMEOUT_MS		84
-#define KICKOFF_TIMEOUT_JIFFIES		msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
-
-/**
- * enum sde_enc_split_role - Role this physical encoder will play in a
- *	split-panel configuration, where one panel is master, and others slaves.
- *	Masters have extra responsibilities, like managing the VBLANK IRQ.
- * @ENC_ROLE_SOLO:	This is the one and only panel. This encoder is master.
- * @ENC_ROLE_MASTER:	This encoder is the master of a split panel config.
- * @ENC_ROLE_SLAVE:	This encoder is not the master of a split panel config.
- * @ENC_ROLE_SKIP:	This encoder is not participating in kickoffs
- */
-enum sde_enc_split_role {
-	ENC_ROLE_SOLO,
-	ENC_ROLE_MASTER,
-	ENC_ROLE_SLAVE,
-	ENC_ROLE_SKIP
-};
-
-/**
- * enum sde_enc_enable_state - current enabled state of the physical encoder
- * @SDE_ENC_DISABLING:	Encoder transitioning to disable state
- *			Events bounding transition are encoder type specific
- * @SDE_ENC_DISABLED:	Encoder is disabled
- * @SDE_ENC_ENABLING:	Encoder transitioning to enabled
- *			Events bounding transition are encoder type specific
- * @SDE_ENC_ENABLED:	Encoder is enabled
- * @SDE_ENC_ERR_NEEDS_HW_RESET:	Encoder is enabled, but requires a hw_reset
- *				to recover from a previous error
- */
-enum sde_enc_enable_state {
-	SDE_ENC_DISABLING,
-	SDE_ENC_DISABLED,
-	SDE_ENC_ENABLING,
-	SDE_ENC_ENABLED,
-	SDE_ENC_ERR_NEEDS_HW_RESET
-};
-
-struct sde_encoder_phys;
-
-/**
- * struct sde_encoder_virt_ops - Interface the containing virtual encoder
- *	provides for the physical encoders to use to callback.
- * @handle_vblank_virt:	Notify virtual encoder of vblank IRQ reception
- *			Note: This is called from IRQ handler context.
- * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
- *			Note: This is called from IRQ handler context.
- * @handle_frame_done:	Notify virtual encoder that this phys encoder
- *			completes last request frame.
- * @get_qsync_fps:	Returns the min fps for the qsync feature.
- */
-struct sde_encoder_virt_ops {
-	void (*handle_vblank_virt)(struct drm_encoder *parent,
-			struct sde_encoder_phys *phys);
-	void (*handle_underrun_virt)(struct drm_encoder *parent,
-			struct sde_encoder_phys *phys);
-	void (*handle_frame_done)(struct drm_encoder *parent,
-			struct sde_encoder_phys *phys, u32 event);
-	void (*get_qsync_fps)(struct drm_encoder *parent,
-			u32 *qsync_fps);
-};
-
-/**
- * struct sde_encoder_phys_ops - Interface the physical encoders provide to
- *	the containing virtual encoder.
- * @late_register:		DRM Call. Add Userspace interfaces, debugfs.
- * @prepare_commit:		MSM Atomic Call, start of atomic commit sequence
- * @is_master:			Whether this phys_enc is the current master
- *				encoder. Can be switched at enable time. Based
- *				on split_role and current mode (CMD/VID).
- * @mode_fixup:			DRM Call. Fixup a DRM mode.
- * @cont_splash_mode_set:	mode set with specific HW resources during
- *                              cont splash enabled state.
- * @mode_set:			DRM Call. Set a DRM mode.
- *				This likely caches the mode, for use at enable.
- * @enable:			DRM Call. Enable a DRM mode.
- * @disable:			DRM Call. Disable mode.
- * @atomic_check:		DRM Call. Atomic check new DRM state.
- * @destroy:			DRM Call. Destroy and release resources.
- * @get_hw_resources:		Populate the structure with the hardware
- *				resources that this phys_enc is using.
- *				Expect no overlap between phys_encs.
- * @control_vblank_irq		Register/Deregister for VBLANK IRQ
- * @wait_for_commit_done:	Wait for hardware to have flushed the
- *				current pending frames to hardware
- * @wait_for_tx_complete:	Wait for hardware to transfer the pixels
- *				to the panel
- * @wait_for_vblank:		Wait for VBLANK, for sub-driver internal use
- * @prepare_for_kickoff:	Do any work necessary prior to a kickoff
- *				For CMD encoder, may wait for previous tx done
- * @handle_post_kickoff:	Do any work necessary post-kickoff work
- * @trigger_flush:		Process flush event on physical encoder
- * @trigger_start:		Process start event on physical encoder
- * @needs_single_flush:		Whether encoder slaves need to be flushed
- * @setup_misr:		Sets up MISR, enable and disables based on sysfs
- * @collect_misr:		Collects MISR data on frame update
- * @hw_reset:			Issue HW recovery such as CTL reset and clear
- *				SDE_ENC_ERR_NEEDS_HW_RESET state
- * @irq_control:		Handler to enable/disable all the encoder IRQs
- * @update_split_role:		Update the split role of the phys enc
- * @control_te:			Interface to control the vsync_enable status
- * @restore:			Restore all the encoder configs.
- * @is_autorefresh_enabled:	provides the autorefresh current
- *                              enable/disable state.
- * @get_line_count:		Obtain current internal vertical line count
- * @get_wr_line_count:		Obtain current output vertical line count
- * @wait_dma_trigger:		Returns true if lut dma has to trigger and wait
- *                              unitl transaction is complete.
- * @wait_for_active:		Wait for display scan line to be in active area
- * @setup_vsync_source:		Configure vsync source selection for cmd mode.
- */
-
-struct sde_encoder_phys_ops {
-	int (*late_register)(struct sde_encoder_phys *encoder,
-			struct dentry *debugfs_root);
-	void (*prepare_commit)(struct sde_encoder_phys *encoder);
-	bool (*is_master)(struct sde_encoder_phys *encoder);
-	bool (*mode_fixup)(struct sde_encoder_phys *encoder,
-			const struct drm_display_mode *mode,
-			struct drm_display_mode *adjusted_mode);
-	void (*mode_set)(struct sde_encoder_phys *encoder,
-			struct drm_display_mode *mode,
-			struct drm_display_mode *adjusted_mode);
-	void (*cont_splash_mode_set)(struct sde_encoder_phys *encoder,
-			struct drm_display_mode *adjusted_mode);
-	void (*enable)(struct sde_encoder_phys *encoder);
-	void (*disable)(struct sde_encoder_phys *encoder);
-	int (*atomic_check)(struct sde_encoder_phys *encoder,
-			    struct drm_crtc_state *crtc_state,
-			    struct drm_connector_state *conn_state);
-	void (*destroy)(struct sde_encoder_phys *encoder);
-	void (*get_hw_resources)(struct sde_encoder_phys *encoder,
-			struct sde_encoder_hw_resources *hw_res,
-			struct drm_connector_state *conn_state);
-	int (*control_vblank_irq)(struct sde_encoder_phys *enc, bool enable);
-	int (*wait_for_commit_done)(struct sde_encoder_phys *phys_enc);
-	int (*wait_for_tx_complete)(struct sde_encoder_phys *phys_enc);
-	int (*wait_for_vblank)(struct sde_encoder_phys *phys_enc);
-	int (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc,
-			struct sde_encoder_kickoff_params *params);
-	void (*handle_post_kickoff)(struct sde_encoder_phys *phys_enc);
-	void (*trigger_flush)(struct sde_encoder_phys *phys_enc);
-	void (*trigger_start)(struct sde_encoder_phys *phys_enc);
-	bool (*needs_single_flush)(struct sde_encoder_phys *phys_enc);
-
-	void (*setup_misr)(struct sde_encoder_phys *phys_encs,
-				bool enable, u32 frame_count);
-	int (*collect_misr)(struct sde_encoder_phys *phys_enc, bool nonblock,
-			u32 *misr_value);
-	void (*hw_reset)(struct sde_encoder_phys *phys_enc);
-	void (*irq_control)(struct sde_encoder_phys *phys, bool enable);
-	void (*update_split_role)(struct sde_encoder_phys *phys_enc,
-			enum sde_enc_split_role role);
-	void (*control_te)(struct sde_encoder_phys *phys_enc, bool enable);
-	void (*restore)(struct sde_encoder_phys *phys);
-	bool (*is_autorefresh_enabled)(struct sde_encoder_phys *phys);
-	int (*get_line_count)(struct sde_encoder_phys *phys);
-	int (*get_wr_line_count)(struct sde_encoder_phys *phys);
-	bool (*wait_dma_trigger)(struct sde_encoder_phys *phys);
-	int (*wait_for_active)(struct sde_encoder_phys *phys);
-	void (*setup_vsync_source)(struct sde_encoder_phys *phys,
-			u32 vsync_source, bool is_dummy);
-};
-
-/**
- * enum sde_intr_idx - sde encoder interrupt index
- * @INTR_IDX_VSYNC:    Vsync interrupt for video mode panel
- * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
- * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
- * @INTR_IDX_RDPTR:    Readpointer done unterrupt for cmd mode panel
- * @INTR_IDX_WB_DONE:  Writeback done interrupt for WB
- * @INTR_IDX_PP2_OVFL: Pingpong overflow interrupt on PP2 for Concurrent WB
- * @INTR_IDX_PP3_OVFL: Pingpong overflow interrupt on PP3 for Concurrent WB
- * @INTR_IDX_PP4_OVFL: Pingpong overflow interrupt on PP4 for Concurrent WB
- * @INTR_IDX_PP5_OVFL: Pingpong overflow interrupt on PP5 for Concurrent WB
- * @INTR_IDX_AUTOREFRESH_DONE:  Autorefresh done for cmd mode panel meaning
- *                              autorefresh has triggered a double buffer flip
- */
-enum sde_intr_idx {
-	INTR_IDX_VSYNC,
-	INTR_IDX_PINGPONG,
-	INTR_IDX_UNDERRUN,
-	INTR_IDX_CTL_START,
-	INTR_IDX_RDPTR,
-	INTR_IDX_AUTOREFRESH_DONE,
-	INTR_IDX_WB_DONE,
-	INTR_IDX_PP2_OVFL,
-	INTR_IDX_PP3_OVFL,
-	INTR_IDX_PP4_OVFL,
-	INTR_IDX_PP5_OVFL,
-	INTR_IDX_MAX,
-};
-
-/**
- * sde_encoder_irq - tracking structure for interrupts
- * @name:		string name of interrupt
- * @intr_type:		Encoder interrupt type
- * @intr_idx:		Encoder interrupt enumeration
- * @hw_idx:		HW Block ID
- * @irq_idx:		IRQ interface lookup index from SDE IRQ framework
- *			will be -EINVAL if IRQ is not registered
- * @irq_cb:		interrupt callback
- */
-struct sde_encoder_irq {
-	const char *name;
-	enum sde_intr_type intr_type;
-	enum sde_intr_idx intr_idx;
-	int hw_idx;
-	int irq_idx;
-	struct sde_irq_callback cb;
-};
-
-/**
- * struct sde_encoder_phys - physical encoder that drives a single INTF block
- *	tied to a specific panel / sub-panel. Abstract type, sub-classed by
- *	phys_vid or phys_cmd for video mode or command mode encs respectively.
- * @parent:		Pointer to the containing virtual encoder
- * @connector:		If a mode is set, cached pointer to the active connector
- * @ops:		Operations exposed to the virtual encoder
- * @parent_ops:		Callbacks exposed by the parent to the phys_enc
- * @hw_mdptop:		Hardware interface to the top registers
- * @hw_ctl:		Hardware interface to the ctl registers
- * @hw_intf:		Hardware interface to INTF registers
- * @hw_cdm:		Hardware interface to the cdm registers
- * @cdm_cfg:		Chroma-down hardware configuration
- * @hw_pp:		Hardware interface to the ping pong registers
- * @sde_kms:		Pointer to the sde_kms top level
- * @cached_mode:	DRM mode cached at mode_set time, acted on in enable
- * @enabled:		Whether the encoder has enabled and running a mode
- * @split_role:		Role to play in a split-panel configuration
- * @intf_mode:		Interface mode
- * @intf_idx:		Interface index on sde hardware
- * @intf_cfg:		Interface hardware configuration
- * @intf_cfg_v1:        Interface hardware configuration to be used if control
- *                      path supports SDE_CTL_ACTIVE_CFG
- * @comp_type:      Type of compression supported
- * @comp_ratio:		Compression ratio
- * @dsc_extra_pclk_cycle_cnt: Extra pclk cycle count for DSC over DP
- * @dsc_extra_disp_width: Additional display width for DSC over DP
- * @wide_bus_en:	Wide-bus configuraiton
- * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
- * @enable_state:	Enable state tracking
- * @vblank_refcount:	Reference count of vblank request
- * @wbirq_refcount:	Reference count of wb irq request
- * @vsync_cnt:		Vsync count for the physical encoder
- * @underrun_cnt:	Underrun count for the physical encoder
- * @pending_kickoff_cnt:	Atomic counter tracking the number of kickoffs
- *				vs. the number of done/vblank irqs. Should hover
- *				between 0-2 Incremented when a new kickoff is
- *				scheduled. Decremented in irq handler
- * @pending_ctlstart_cnt:	Atomic counter tracking the number of ctl start
- *                              pending.
- * @pending_retire_fence_cnt:   Atomic counter tracking the pending retire
- *                              fences that have to be signalled.
- * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
- * @irq:			IRQ tracking structures
- * @has_intf_te:		Interface TE configuration support
- * @cont_splash_single_flush	Variable to check if single flush is enabled.
- * @cont_splash_enabled:	Variable to store continuous splash settings.
- * @in_clone_mode		Indicates if encoder is in clone mode ref@CWB
- * @vfp_cached:			cached vertical front porch to be used for
- *				programming ROT and MDP fetch start
- * @frame_trigger_mode:		frame trigger mode indication for command
- *				mode display
- */
-struct sde_encoder_phys {
-	struct drm_encoder *parent;
-	struct drm_connector *connector;
-	struct sde_encoder_phys_ops ops;
-	struct sde_encoder_virt_ops parent_ops;
-	struct sde_hw_mdp *hw_mdptop;
-	struct sde_hw_ctl *hw_ctl;
-	struct sde_hw_intf *hw_intf;
-	struct sde_hw_cdm *hw_cdm;
-	struct sde_hw_cdm_cfg cdm_cfg;
-	struct sde_hw_pingpong *hw_pp;
-	struct sde_kms *sde_kms;
-	struct drm_display_mode cached_mode;
-	enum sde_enc_split_role split_role;
-	enum sde_intf_mode intf_mode;
-	enum sde_intf intf_idx;
-	struct sde_hw_intf_cfg intf_cfg;
-	struct sde_hw_intf_cfg_v1 intf_cfg_v1;
-	enum msm_display_compression_type comp_type;
-	enum msm_display_compression_ratio comp_ratio;
-	u32 dsc_extra_pclk_cycle_cnt;
-	u32 dsc_extra_disp_width;
-	bool wide_bus_en;
-	spinlock_t *enc_spinlock;
-	enum sde_enc_enable_state enable_state;
-	struct mutex *vblank_ctl_lock;
-	atomic_t vblank_refcount;
-	atomic_t wbirq_refcount;
-	atomic_t vsync_cnt;
-	atomic_t underrun_cnt;
-	atomic_t pending_ctlstart_cnt;
-	atomic_t pending_kickoff_cnt;
-	atomic_t pending_retire_fence_cnt;
-	wait_queue_head_t pending_kickoff_wq;
-	struct sde_encoder_irq irq[INTR_IDX_MAX];
-	bool has_intf_te;
-	u32 cont_splash_single_flush;
-	bool cont_splash_enabled;
-	bool in_clone_mode;
-	int vfp_cached;
-	enum frame_trigger_mode_type frame_trigger_mode;
-};
-
-static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
-{
-	atomic_inc_return(&phys->pending_ctlstart_cnt);
-	return atomic_inc_return(&phys->pending_kickoff_cnt);
-}
-
-/**
- * struct sde_encoder_phys_vid - sub-class of sde_encoder_phys to handle video
- *	mode specific operations
- * @base:	Baseclass physical encoder structure
- * @timing_params: Current timing parameter
- * @error_count: Number of consecutive kickoffs that experienced an error
- */
-struct sde_encoder_phys_vid {
-	struct sde_encoder_phys base;
-	struct intf_timing_params timing_params;
-	int error_count;
-};
-
-/**
- * struct sde_encoder_phys_cmd_autorefresh - autorefresh state tracking
- * @cfg: current active autorefresh configuration
- * @kickoff_cnt: atomic count tracking autorefresh done irq kickoffs pending
- * @kickoff_wq:	wait queue for waiting on autorefresh done irq
- */
-struct sde_encoder_phys_cmd_autorefresh {
-	struct sde_hw_autorefresh cfg;
-	atomic_t kickoff_cnt;
-	wait_queue_head_t kickoff_wq;
-};
-
-/**
- * struct sde_encoder_phys_cmd - sub-class of sde_encoder_phys to handle command
- *	mode specific operations
- * @base:	Baseclass physical encoder structure
- * @intf_idx:	Intf Block index used by this phys encoder
- * @stream_sel:	Stream selection for multi-stream interfaces
- * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
- * @autorefresh: autorefresh feature state
- * @pending_rd_ptr_cnt: atomic counter to indicate if retire fence can be
- *                      signaled at the next rd_ptr_irq
- * @rd_ptr_timestamp: last rd_ptr_irq timestamp
- * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
- * @pending_vblank_wq: Wait queue for blocking until VBLANK received
- * @ctl_start_threshold: A threshold in microseconds allows command mode
- *   engine to trigger the retire fence without waiting for rd_ptr.
- */
-struct sde_encoder_phys_cmd {
-	struct sde_encoder_phys base;
-	int stream_sel;
-	int pp_timeout_report_cnt;
-	struct sde_encoder_phys_cmd_autorefresh autorefresh;
-	atomic_t pending_rd_ptr_cnt;
-	ktime_t rd_ptr_timestamp;
-	atomic_t pending_vblank_cnt;
-	wait_queue_head_t pending_vblank_wq;
-	u32 ctl_start_threshold;
-};
-
-/**
- * struct sde_encoder_phys_wb - sub-class of sde_encoder_phys to handle
- *	writeback specific operations
- * @base:		Baseclass physical encoder structure
- * @hw_wb:		Hardware interface to the wb registers
- * @wbdone_timeout:	Timeout value for writeback done in msec
- * @bypass_irqreg:	Bypass irq register/unregister if non-zero
- * @wbdone_complete:	for wbdone irq synchronization
- * @wb_cfg:		Writeback hardware configuration
- * @cdp_cfg:		Writeback CDP configuration
- * @wb_roi:		Writeback region-of-interest
- * @wb_fmt:		Writeback pixel format
- * @wb_fb:		Pointer to current writeback framebuffer
- * @wb_aspace:		Pointer to current writeback address space
- * @frame_count:	Counter of completed writeback operations
- * @kickoff_count:	Counter of issued writeback operations
- * @aspace:		address space identifier for non-secure/secure domain
- * @wb_dev:		Pointer to writeback device
- * @start_time:		Start time of writeback latest request
- * @end_time:		End time of writeback latest request
- * @bo_disable:		Buffer object(s) to use during the disabling state
- * @fb_disable:		Frame buffer to use during the disabling state
- * @crtc		Pointer to drm_crtc
- */
-struct sde_encoder_phys_wb {
-	struct sde_encoder_phys base;
-	struct sde_hw_wb *hw_wb;
-	u32 wbdone_timeout;
-	u32 bypass_irqreg;
-	struct completion wbdone_complete;
-	struct sde_hw_wb_cfg wb_cfg;
-	struct sde_hw_wb_cdp_cfg cdp_cfg;
-	struct sde_rect wb_roi;
-	const struct sde_format *wb_fmt;
-	struct drm_framebuffer *wb_fb;
-	struct msm_gem_address_space *wb_aspace;
-	u32 frame_count;
-	u32 kickoff_count;
-	struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
-	struct sde_wb_device *wb_dev;
-	ktime_t start_time;
-	ktime_t end_time;
-	struct drm_gem_object *bo_disable[SDE_MAX_PLANES];
-	struct drm_framebuffer *fb_disable;
-	struct drm_crtc *crtc;
-};
-
-/**
- * struct sde_enc_phys_init_params - initialization parameters for phys encs
- * @sde_kms:		Pointer to the sde_kms top level
- * @parent:		Pointer to the containing virtual encoder
- * @parent_ops:		Callbacks exposed by the parent to the phys_enc
- * @split_role:		Role to play in a split-panel configuration
- * @intf_idx:		Interface index this phys_enc will control
- * @wb_idx:		Writeback index this phys_enc will control
- * @comp_type:      Type of compression supported
- * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
- */
-struct sde_enc_phys_init_params {
-	struct sde_kms *sde_kms;
-	struct drm_encoder *parent;
-	struct sde_encoder_virt_ops parent_ops;
-	enum sde_enc_split_role split_role;
-	enum sde_intf intf_idx;
-	enum sde_wb wb_idx;
-	enum msm_display_compression_type comp_type;
-	spinlock_t *enc_spinlock;
-	struct mutex *vblank_ctl_lock;
-};
-
-/**
- * sde_encoder_wait_info - container for passing arguments to irq wait functions
- * @wq: wait queue structure
- * @atomic_cnt: wait until atomic_cnt equals zero
- * @timeout_ms: timeout value in milliseconds
- */
-struct sde_encoder_wait_info {
-	wait_queue_head_t *wq;
-	atomic_t *atomic_cnt;
-	s64 timeout_ms;
-};
-
-/**
- * sde_encoder_phys_vid_init - Construct a new video mode physical encoder
- * @p:	Pointer to init params structure
- * Return: Error code or newly allocated encoder
- */
-struct sde_encoder_phys *sde_encoder_phys_vid_init(
-		struct sde_enc_phys_init_params *p);
-
-/**
- * sde_encoder_phys_cmd_init - Construct a new command mode physical encoder
- * @p:	Pointer to init params structure
- * Return: Error code or newly allocated encoder
- */
-struct sde_encoder_phys *sde_encoder_phys_cmd_init(
-		struct sde_enc_phys_init_params *p);
-
-/**
- * sde_encoder_phys_wb_init - Construct a new writeback physical encoder
- * @p:	Pointer to init params structure
- * Return: Error code or newly allocated encoder
- */
-#ifdef CONFIG_DRM_SDE_WB
-struct sde_encoder_phys *sde_encoder_phys_wb_init(
-		struct sde_enc_phys_init_params *p);
-#else
-static inline
-struct sde_encoder_phys *sde_encoder_phys_wb_init(
-		struct sde_enc_phys_init_params *p)
-{
-	return NULL;
-}
-#endif
-
-void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
-		struct drm_framebuffer *fb, const struct sde_format *format,
-		struct sde_rect *wb_roi);
-
-/**
- * sde_encoder_helper_trigger_flush - control flush helper function
- *	This helper function may be optionally specified by physical
- *	encoders if they require ctl_flush triggering.
- * @phys_enc: Pointer to physical encoder structure
- */
-void sde_encoder_helper_trigger_flush(struct sde_encoder_phys *phys_enc);
-
-/**
- * sde_encoder_helper_trigger_start - control start helper function
- *	This helper function may be optionally specified by physical
- *	encoders if they require ctl_start triggering.
- * @phys_enc: Pointer to physical encoder structure
- */
-void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc);
-
-/**
- * sde_encoder_helper_vsync_config - configure vsync source for cmd mode
- * @phys_enc: Pointer to physical encoder structure
- * @vsync_source: vsync source selection
- * @is_dummy: used only for RSC
- */
-void sde_encoder_helper_vsync_config(struct sde_encoder_phys *phys_enc,
-			u32 vsync_source, bool is_dummy);
-
-/**
- * sde_encoder_helper_wait_event_timeout - wait for event with timeout
- *	taking into account that jiffies may jump between reads leading to
- *	incorrectly detected timeouts. Prevent failure in this scenario by
- *	making sure that elapsed time during wait is valid.
- * @drm_id: drm object id for logging
- * @hw_id: hw instance id for logging
- * @info: wait info structure
- */
-int sde_encoder_helper_wait_event_timeout(
-		int32_t drm_id,
-		int32_t hw_id,
-		struct sde_encoder_wait_info *info);
-
-/**
- * sde_encoder_helper_hw_reset - issue ctl hw reset
- *	This helper function may be optionally specified by physical
- *	encoders if they require ctl hw reset. If state is currently
- *	SDE_ENC_ERR_NEEDS_HW_RESET, it is set back to SDE_ENC_ENABLED.
- * @phys_enc: Pointer to physical encoder structure
- */
-void sde_encoder_helper_hw_reset(struct sde_encoder_phys *phys_enc);
-
-static inline enum sde_3d_blend_mode sde_encoder_helper_get_3d_blend_mode(
-		struct sde_encoder_phys *phys_enc)
-{
-	enum sde_rm_topology_name topology;
-
-	if (!phys_enc || phys_enc->enable_state == SDE_ENC_DISABLING)
-		return BLEND_3D_NONE;
-
-	topology = sde_connector_get_topology_name(phys_enc->connector);
-	if (phys_enc->split_role == ENC_ROLE_SOLO &&
-			(topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE ||
-			 topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC))
-		return BLEND_3D_H_ROW_INT;
-
-	return BLEND_3D_NONE;
-}
-
-/**
- * sde_encoder_helper_split_config - split display configuration helper function
- *	This helper function may be used by physical encoders to configure
- *	the split display related registers.
- * @phys_enc: Pointer to physical encoder structure
- * @interface: enum sde_intf setting
- */
-void sde_encoder_helper_split_config(
-		struct sde_encoder_phys *phys_enc,
-		enum sde_intf interface);
-
-/**
- * sde_encoder_helper_reset_mixers - reset mixers associated with phys enc
- * @phys_enc: Pointer to physical encoder structure
- * @fb: Optional fb for specifying new mixer output resolution, may be NULL
- * Return: Zero on success
- */
-int sde_encoder_helper_reset_mixers(struct sde_encoder_phys *phys_enc,
-		struct drm_framebuffer *fb);
-
-/**
- * sde_encoder_helper_report_irq_timeout - utility to report error that irq has
- *	timed out, including reporting frame error event to crtc and debug dump
- * @phys_enc: Pointer to physical encoder structure
- * @intr_idx: Failing interrupt index
- */
-void sde_encoder_helper_report_irq_timeout(struct sde_encoder_phys *phys_enc,
-		enum sde_intr_idx intr_idx);
-
-/**
- * sde_encoder_helper_wait_for_irq - utility to wait on an irq.
- *	note: will call sde_encoder_helper_wait_for_irq on timeout
- * @phys_enc: Pointer to physical encoder structure
- * @intr_idx: encoder interrupt index
- * @wait_info: wait info struct
- * @Return: 0 or -ERROR
- */
-int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc,
-		enum sde_intr_idx intr_idx,
-		struct sde_encoder_wait_info *wait_info);
-
-/**
- * sde_encoder_helper_register_irq - register and enable an irq
- * @phys_enc: Pointer to physical encoder structure
- * @intr_idx: encoder interrupt index
- * @Return: 0 or -ERROR
- */
-int sde_encoder_helper_register_irq(struct sde_encoder_phys *phys_enc,
-		enum sde_intr_idx intr_idx);
-
-/**
- * sde_encoder_helper_unregister_irq - unregister and disable an irq
- * @phys_enc: Pointer to physical encoder structure
- * @intr_idx: encoder interrupt index
- * @Return: 0 or -ERROR
- */
-int sde_encoder_helper_unregister_irq(struct sde_encoder_phys *phys_enc,
-		enum sde_intr_idx intr_idx);
-
-/**
- * sde_encoder_helper_update_intf_cfg - update interface configuration for
- *                                      single control path.
- * @phys_enc: Pointer to physical encoder structure
- */
-void sde_encoder_helper_update_intf_cfg(
-		struct sde_encoder_phys *phys_enc);
-
-/**
- * _sde_encoder_phys_is_dual_ctl - check if encoder needs dual ctl path.
- * @phys_enc: Pointer to physical encoder structure
- * @Return: true if dual ctl paths else false
- */
-static inline bool _sde_encoder_phys_is_dual_ctl(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_kms *sde_kms;
-	enum sde_rm_topology_name topology;
-
-	if (!phys_enc) {
-		pr_err("invalid phys_enc\n");
-		return false;
-	}
-
-	sde_kms = phys_enc->sde_kms;
-	if (!sde_kms) {
-		pr_err("invalid kms\n");
-		return false;
-	}
-
-	topology = sde_connector_get_topology_name(phys_enc->connector);
-
-	return sde_rm_topology_is_dual_ctl(&sde_kms->rm, topology);
-}
-
-/**
- * _sde_encoder_phys_is_ppsplit - check if pp_split is enabled
- * @phys_enc: Pointer to physical encoder structure
- * @Return: true or false
- */
-static inline bool _sde_encoder_phys_is_ppsplit(
-		struct sde_encoder_phys *phys_enc)
-{
-	enum sde_rm_topology_name topology;
-
-	if (!phys_enc) {
-		pr_err("invalid phys_enc\n");
-		return false;
-	}
-
-	topology = sde_connector_get_topology_name(phys_enc->connector);
-	if (topology == SDE_RM_TOPOLOGY_PPSPLIT)
-		return true;
-
-	return false;
-}
-
-static inline bool sde_encoder_phys_needs_single_flush(
-		struct sde_encoder_phys *phys_enc)
-{
-	if (!phys_enc)
-		return false;
-
-	return (_sde_encoder_phys_is_ppsplit(phys_enc) ||
-				!_sde_encoder_phys_is_dual_ctl(phys_enc));
-}
-
-/**
- * sde_encoder_helper_phys_disable - helper function to disable virt encoder
- * @phys_enc: Pointer to physical encoder structure
- * @wb_enc: Pointer to writeback encoder structure
- */
-void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
-		struct sde_encoder_phys_wb *wb_enc);
-
-/**
- * sde_encoder_helper_setup_misr - helper function to setup misr
- * @enable: enable/disable flag
- * @frame_count: frame count for misr
- */
-void sde_encoder_helper_setup_misr(struct sde_encoder_phys *phys_enc,
-		bool enable, u32 frame_count);
-
-/**
- * sde_encoder_helper_collect_misr - helper function to collect misr
- * @nonblock:  blocking/non-blocking flag
- * @misr_value:  pointer to misr value
- * @Return: zero on success
- */
-int sde_encoder_helper_collect_misr(struct sde_encoder_phys *phys_enc,
-		bool nonblock, u32 *misr_value);
-
-#endif /* __sde_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
deleted file mode 100644
index 5657480..0000000
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ /dev/null
@@ -1,1808 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-#include "sde_encoder_phys.h"
-#include "sde_hw_interrupts.h"
-#include "sde_core_irq.h"
-#include "sde_formats.h"
-#include "sde_trace.h"
-
-#define SDE_DEBUG_CMDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
-		(e) && (e)->base.parent ? \
-		(e)->base.parent->base.id : -1, \
-		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
-
-#define SDE_ERROR_CMDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
-		(e) && (e)->base.parent ? \
-		(e)->base.parent->base.id : -1, \
-		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
-
-#define to_sde_encoder_phys_cmd(x) \
-	container_of(x, struct sde_encoder_phys_cmd, base)
-
-#define PP_TIMEOUT_MAX_TRIALS	4
-
-/*
- * Tearcheck sync start and continue thresholds are empirically found
- * based on common panels In the future, may want to allow panels to override
- * these default values
- */
-#define DEFAULT_TEARCHECK_SYNC_THRESH_START	4
-#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE	4
-
-#define SDE_ENC_WR_PTR_START_TIMEOUT_US 20000
-
-/*
- * Threshold for signalling retire fences in cases where
- * CTL_START_IRQ is received just after RD_PTR_IRQ
- */
-#define SDE_ENC_CTL_START_THRESHOLD_US 500
-
-#define SDE_ENC_MAX_POLL_TIMEOUT_US	2000
-
-static inline int _sde_encoder_phys_cmd_get_idle_timeout(
-		struct sde_encoder_phys_cmd *cmd_enc)
-{
-	return cmd_enc->autorefresh.cfg.frame_count ?
-			cmd_enc->autorefresh.cfg.frame_count *
-			KICKOFF_TIMEOUT_MS : KICKOFF_TIMEOUT_MS;
-}
-
-static inline bool sde_encoder_phys_cmd_is_master(
-		struct sde_encoder_phys *phys_enc)
-{
-	return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
-}
-
-static bool sde_encoder_phys_cmd_mode_fixup(
-		struct sde_encoder_phys *phys_enc,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adj_mode)
-{
-	if (phys_enc)
-		SDE_DEBUG_CMDENC(to_sde_encoder_phys_cmd(phys_enc), "\n");
-	return true;
-}
-
-static uint64_t _sde_encoder_phys_cmd_get_autorefresh_property(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct drm_connector *conn = phys_enc->connector;
-
-	if (!conn || !conn->state)
-		return 0;
-
-	return sde_connector_get_property(conn->state,
-				CONNECTOR_PROP_AUTOREFRESH);
-}
-
-static void _sde_encoder_phys_cmd_config_autorefresh(
-		struct sde_encoder_phys *phys_enc,
-		u32 new_frame_count)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_hw_pingpong *hw_pp = phys_enc->hw_pp;
-	struct sde_hw_intf *hw_intf = phys_enc->hw_intf;
-	struct drm_connector *conn = phys_enc->connector;
-	struct sde_hw_autorefresh *cfg_cur, cfg_nxt;
-
-	if (!conn || !conn->state || !hw_pp || !hw_intf)
-		return;
-
-	cfg_cur = &cmd_enc->autorefresh.cfg;
-
-	/* autorefresh property value should be validated already */
-	memset(&cfg_nxt, 0, sizeof(cfg_nxt));
-	cfg_nxt.frame_count = new_frame_count;
-	cfg_nxt.enable = (cfg_nxt.frame_count != 0);
-
-	SDE_DEBUG_CMDENC(cmd_enc, "autorefresh state %d->%d framecount %d\n",
-			cfg_cur->enable, cfg_nxt.enable, cfg_nxt.frame_count);
-	SDE_EVT32(DRMID(phys_enc->parent), hw_pp->idx, hw_intf->idx,
-			cfg_cur->enable, cfg_nxt.enable, cfg_nxt.frame_count);
-
-	/* only proceed on state changes */
-	if (cfg_nxt.enable == cfg_cur->enable)
-		return;
-
-	memcpy(cfg_cur, &cfg_nxt, sizeof(*cfg_cur));
-
-	if (phys_enc->has_intf_te && hw_intf->ops.setup_autorefresh)
-		hw_intf->ops.setup_autorefresh(hw_intf, cfg_cur);
-	else if (hw_pp->ops.setup_autorefresh)
-		hw_pp->ops.setup_autorefresh(hw_pp, cfg_cur);
-}
-
-static void _sde_encoder_phys_cmd_update_flush_mask(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_cmd *cmd_enc;
-	struct sde_hw_ctl *ctl;
-
-	if (!phys_enc || !phys_enc->hw_intf || !phys_enc->hw_pp)
-		return;
-
-	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
-	ctl = phys_enc->hw_ctl;
-
-	if (!ctl)
-		return;
-
-	if (!ctl->ops.update_bitmask_intf ||
-		(test_bit(SDE_CTL_ACTIVE_CFG, &ctl->caps->features) &&
-		!ctl->ops.update_bitmask_merge3d)) {
-		SDE_ERROR("invalid hw_ctl ops %d\n", ctl->idx);
-		return;
-	}
-
-	ctl->ops.update_bitmask_intf(ctl, phys_enc->intf_idx, 1);
-
-	if (ctl->ops.update_bitmask_merge3d && phys_enc->hw_pp->merge_3d)
-		ctl->ops.update_bitmask_merge3d(ctl,
-			phys_enc->hw_pp->merge_3d->idx, 1);
-
-	SDE_DEBUG_CMDENC(cmd_enc, "update pending flush ctl %d intf_idx %x\n",
-			ctl->idx - CTL_0, phys_enc->intf_idx);
-}
-
-static void _sde_encoder_phys_cmd_update_intf_cfg(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_hw_ctl *ctl;
-
-	if (!phys_enc)
-		return;
-
-	ctl = phys_enc->hw_ctl;
-	if (!ctl)
-		return;
-
-	if (ctl->ops.setup_intf_cfg) {
-		struct sde_hw_intf_cfg intf_cfg = { 0 };
-
-		intf_cfg.intf = phys_enc->intf_idx;
-		intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
-		intf_cfg.stream_sel = cmd_enc->stream_sel;
-		intf_cfg.mode_3d =
-			sde_encoder_helper_get_3d_blend_mode(phys_enc);
-		ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
-	} else if (test_bit(SDE_CTL_ACTIVE_CFG, &ctl->caps->features)) {
-		sde_encoder_helper_update_intf_cfg(phys_enc);
-	}
-}
-
-static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
-{
-	struct sde_encoder_phys *phys_enc = arg;
-	unsigned long lock_flags;
-	int new_cnt;
-	u32 event = SDE_ENCODER_FRAME_EVENT_DONE |
-			SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
-
-	if (!phys_enc || !phys_enc->hw_pp)
-		return;
-
-	SDE_ATRACE_BEGIN("pp_done_irq");
-
-	/* notify all synchronous clients first, then asynchronous clients */
-	if (phys_enc->parent_ops.handle_frame_done &&
-	    atomic_read(&phys_enc->pending_kickoff_cnt))
-		phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
-				phys_enc, event);
-
-	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
-	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-
-	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
-			phys_enc->hw_pp->idx - PINGPONG_0, new_cnt, event);
-
-	/*
-	 * Reduce the refcount for the retire fence as well as for the ctl_start
-	 * if the counters are greater than zero. Signal retire fence if there
-	 * was a retire fence count pending and kickoff count is zero.
-	 */
-	if (sde_encoder_phys_cmd_is_master(phys_enc) && (new_cnt == 0)) {
-		while (atomic_add_unless(&phys_enc->pending_retire_fence_cnt,
-				-1, 0)) {
-			if (phys_enc->parent_ops.handle_frame_done)
-				phys_enc->parent_ops.handle_frame_done(
-					phys_enc->parent, phys_enc,
-				SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
-			atomic_add_unless(&phys_enc->pending_ctlstart_cnt,
-				-1, 0);
-		}
-	}
-
-	/* Signal any waiting atomic commit thread */
-	wake_up_all(&phys_enc->pending_kickoff_wq);
-	SDE_ATRACE_END("pp_done_irq");
-}
-
-static void sde_encoder_phys_cmd_autorefresh_done_irq(void *arg, int irq_idx)
-{
-	struct sde_encoder_phys *phys_enc = arg;
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
-	unsigned long lock_flags;
-	int new_cnt;
-
-	if (!cmd_enc)
-		return;
-
-	phys_enc = &cmd_enc->base;
-	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-	new_cnt = atomic_add_unless(&cmd_enc->autorefresh.kickoff_cnt, -1, 0);
-	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-
-	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			phys_enc->hw_intf->idx - INTF_0,
-			new_cnt);
-
-	/* Signal any waiting atomic commit thread */
-	wake_up_all(&cmd_enc->autorefresh.kickoff_wq);
-}
-
-static void sde_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
-{
-	struct sde_encoder_phys *phys_enc = arg;
-	struct sde_encoder_phys_cmd *cmd_enc;
-	u32 event = 0;
-
-	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf)
-		return;
-
-	SDE_ATRACE_BEGIN("rd_ptr_irq");
-	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
-
-	/**
-	 * signal only for master, when the ctl_start irq is
-	 * done and incremented the pending_rd_ptr_cnt.
-	 */
-	if (sde_encoder_phys_cmd_is_master(phys_enc)
-		    && atomic_add_unless(&cmd_enc->pending_rd_ptr_cnt, -1, 0)
-		    && atomic_add_unless(
-				&phys_enc->pending_retire_fence_cnt, -1, 0)) {
-
-		event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
-		if (phys_enc->parent_ops.handle_frame_done)
-			phys_enc->parent_ops.handle_frame_done(
-				phys_enc->parent, phys_enc, event);
-	}
-
-	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			phys_enc->hw_intf->idx - INTF_0,
-			event, 0xfff);
-
-	if (phys_enc->parent_ops.handle_vblank_virt)
-		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
-			phys_enc);
-
-	cmd_enc->rd_ptr_timestamp = ktime_get();
-
-	atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
-	wake_up_all(&cmd_enc->pending_vblank_wq);
-	SDE_ATRACE_END("rd_ptr_irq");
-}
-
-static void sde_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
-{
-	struct sde_encoder_phys *phys_enc = arg;
-	struct sde_encoder_phys_cmd *cmd_enc;
-	struct sde_hw_ctl *ctl;
-	u32 event = 0;
-	s64 time_diff_us;
-
-	if (!phys_enc || !phys_enc->hw_ctl)
-		return;
-
-	SDE_ATRACE_BEGIN("ctl_start_irq");
-	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
-
-	ctl = phys_enc->hw_ctl;
-	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
-
-	time_diff_us = ktime_us_delta(ktime_get(), cmd_enc->rd_ptr_timestamp);
-
-	/* handle retire fence based on only master */
-	if (sde_encoder_phys_cmd_is_master(phys_enc)
-			&& atomic_read(&phys_enc->pending_retire_fence_cnt)) {
-		/**
-		 * Handle rare cases where the ctl_start_irq is received
-		 * after rd_ptr_irq. If it falls within a threshold, it is
-		 * guaranteed the frame would be picked up in the current TE.
-		 * Signal retire fence immediately in such case. The threshold
-		 * timer adds extra line time duration based on lowest panel
-		 * fps for qsync enabled case.
-		 */
-		if ((time_diff_us <= cmd_enc->ctl_start_threshold)
-			    && atomic_add_unless(
-				&phys_enc->pending_retire_fence_cnt, -1, 0)) {
-
-			event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
-
-			if (phys_enc->parent_ops.handle_frame_done)
-				phys_enc->parent_ops.handle_frame_done(
-					phys_enc->parent, phys_enc, event);
-
-		/**
-		 * In ideal cases, ctl_start_irq is received before the
-		 * rd_ptr_irq, so set the atomic flag to indicate the event
-		 * and rd_ptr_irq will handle signalling the retire fence
-		 */
-		} else {
-			atomic_inc(&cmd_enc->pending_rd_ptr_cnt);
-		}
-	}
-
-	SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0,
-				time_diff_us, event, 0xfff);
-
-	/* Signal any waiting ctl start interrupt */
-	wake_up_all(&phys_enc->pending_kickoff_wq);
-	SDE_ATRACE_END("ctl_start_irq");
-}
-
-static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
-{
-	struct sde_encoder_phys *phys_enc = arg;
-
-	if (!phys_enc)
-		return;
-
-	if (phys_enc->parent_ops.handle_underrun_virt)
-		phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
-			phys_enc);
-}
-
-static void _sde_encoder_phys_cmd_setup_irq_hw_idx(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_irq *irq;
-
-	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl) {
-		SDE_ERROR("invalid args %d %d\n", !phys_enc,
-			phys_enc ? !phys_enc->hw_pp : 0);
-		return;
-	}
-
-	if (phys_enc->has_intf_te && !phys_enc->hw_intf) {
-		SDE_ERROR("invalid intf configuration\n");
-		return;
-	}
-
-	irq = &phys_enc->irq[INTR_IDX_CTL_START];
-	irq->hw_idx = phys_enc->hw_ctl->idx;
-	irq->irq_idx = -EINVAL;
-
-	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
-	irq->hw_idx = phys_enc->hw_pp->idx;
-	irq->irq_idx = -EINVAL;
-
-	irq = &phys_enc->irq[INTR_IDX_RDPTR];
-	irq->irq_idx = -EINVAL;
-	if (phys_enc->has_intf_te)
-		irq->hw_idx = phys_enc->hw_intf->idx;
-	else
-		irq->hw_idx = phys_enc->hw_pp->idx;
-
-	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
-	irq->hw_idx = phys_enc->intf_idx;
-	irq->irq_idx = -EINVAL;
-
-	irq = &phys_enc->irq[INTR_IDX_AUTOREFRESH_DONE];
-	irq->irq_idx = -EINVAL;
-	if (phys_enc->has_intf_te)
-		irq->hw_idx = phys_enc->hw_intf->idx;
-	else
-		irq->hw_idx = phys_enc->hw_pp->idx;
-}
-
-static void sde_encoder_phys_cmd_cont_splash_mode_set(
-		struct sde_encoder_phys *phys_enc,
-		struct drm_display_mode *adj_mode)
-{
-	struct sde_hw_intf *hw_intf;
-	struct sde_hw_pingpong *hw_pp;
-	struct sde_encoder_phys_cmd *cmd_enc;
-
-	if (!phys_enc || !adj_mode) {
-		SDE_ERROR("invalid args\n");
-		return;
-	}
-
-	phys_enc->cached_mode = *adj_mode;
-	phys_enc->enable_state = SDE_ENC_ENABLED;
-
-	if (!phys_enc->hw_ctl || !phys_enc->hw_pp) {
-		SDE_DEBUG("invalid ctl:%d pp:%d\n",
-			(phys_enc->hw_ctl == NULL),
-			(phys_enc->hw_pp == NULL));
-		return;
-	}
-
-	if (sde_encoder_phys_cmd_is_master(phys_enc)) {
-		cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
-		hw_pp = phys_enc->hw_pp;
-		hw_intf = phys_enc->hw_intf;
-
-		if (phys_enc->has_intf_te && hw_intf &&
-				hw_intf->ops.get_autorefresh) {
-			hw_intf->ops.get_autorefresh(hw_intf,
-					&cmd_enc->autorefresh.cfg);
-		} else if (hw_pp && hw_pp->ops.get_autorefresh) {
-			hw_pp->ops.get_autorefresh(hw_pp,
-					&cmd_enc->autorefresh.cfg);
-		}
-	}
-
-	_sde_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
-}
-
-static void sde_encoder_phys_cmd_mode_set(
-		struct sde_encoder_phys *phys_enc,
-		struct drm_display_mode *mode,
-		struct drm_display_mode *adj_mode)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_rm *rm = &phys_enc->sde_kms->rm;
-	struct sde_rm_hw_iter iter;
-	int i, instance;
-
-	if (!phys_enc || !mode || !adj_mode) {
-		SDE_ERROR("invalid args\n");
-		return;
-	}
-	phys_enc->cached_mode = *adj_mode;
-	SDE_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
-	drm_mode_debug_printmodeline(adj_mode);
-
-	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
-
-	/* Retrieve previously allocated HW Resources. Shouldn't fail */
-	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
-	for (i = 0; i <= instance; i++) {
-		if (sde_rm_get_hw(rm, &iter))
-			phys_enc->hw_ctl = (struct sde_hw_ctl *)iter.hw;
-	}
-
-	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
-		SDE_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
-				PTR_ERR(phys_enc->hw_ctl));
-		phys_enc->hw_ctl = NULL;
-		return;
-	}
-
-	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_INTF);
-	for (i = 0; i <= instance; i++) {
-		if (sde_rm_get_hw(rm, &iter))
-			phys_enc->hw_intf = (struct sde_hw_intf *)iter.hw;
-	}
-
-	if (IS_ERR_OR_NULL(phys_enc->hw_intf)) {
-		SDE_ERROR_CMDENC(cmd_enc, "failed to init intf: %ld\n",
-				PTR_ERR(phys_enc->hw_intf));
-		phys_enc->hw_intf = NULL;
-		return;
-	}
-
-	_sde_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
-}
-
-static int _sde_encoder_phys_cmd_handle_ppdone_timeout(
-		struct sde_encoder_phys *phys_enc,
-		bool recovery_events)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
-	u32 frame_event = SDE_ENCODER_FRAME_EVENT_ERROR
-				| SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
-	struct drm_connector *conn;
-	int event;
-	u32 pending_kickoff_cnt;
-
-	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
-		return -EINVAL;
-
-	conn = phys_enc->connector;
-
-	if (atomic_read(&phys_enc->pending_kickoff_cnt) == 0)
-		return 0;
-
-	cmd_enc->pp_timeout_report_cnt++;
-	pending_kickoff_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
-
-	if (sde_encoder_phys_cmd_is_master(phys_enc)) {
-		 /* trigger the retire fence if it was missed */
-		if (atomic_add_unless(&phys_enc->pending_retire_fence_cnt,
-				-1, 0))
-			phys_enc->parent_ops.handle_frame_done(
-				phys_enc->parent,
-				phys_enc,
-				SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
-		atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
-	}
-
-	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
-			cmd_enc->pp_timeout_report_cnt,
-			pending_kickoff_cnt,
-			frame_event);
-
-	/* decrement the kickoff_cnt before checking for ESD status */
-	atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
-
-	/* check if panel is still sending TE signal or not */
-	if (sde_connector_esd_status(phys_enc->connector))
-		goto exit;
-
-	/* to avoid flooding, only log first time, and "dead" time */
-	if (cmd_enc->pp_timeout_report_cnt == 1) {
-		SDE_ERROR_CMDENC(cmd_enc,
-				"pp:%d kickoff timed out ctl %d koff_cnt %d\n",
-				phys_enc->hw_pp->idx - PINGPONG_0,
-				phys_enc->hw_ctl->idx - CTL_0,
-				pending_kickoff_cnt);
-
-		SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
-		sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
-		SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus");
-		sde_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
-	}
-
-	/*
-	 * if the recovery event is registered by user, don't panic
-	 * trigger panic on first timeout if no listener registered
-	 */
-	if (recovery_events) {
-		event = cmd_enc->pp_timeout_report_cnt > PP_TIMEOUT_MAX_TRIALS ?
-			SDE_RECOVERY_HARD_RESET : SDE_RECOVERY_CAPTURE;
-		sde_connector_event_notify(conn, DRM_EVENT_SDE_HW_RECOVERY,
-				sizeof(uint8_t), event);
-	} else if (cmd_enc->pp_timeout_report_cnt) {
-		SDE_DBG_DUMP("panic");
-	}
-
-	/* request a ctl reset before the next kickoff */
-	phys_enc->enable_state = SDE_ENC_ERR_NEEDS_HW_RESET;
-
-exit:
-	if (phys_enc->parent_ops.handle_frame_done)
-		phys_enc->parent_ops.handle_frame_done(
-				phys_enc->parent, phys_enc, frame_event);
-
-	return -ETIMEDOUT;
-}
-
-static bool _sde_encoder_phys_is_ppsplit_slave(
-		struct sde_encoder_phys *phys_enc)
-{
-	if (!phys_enc)
-		return false;
-
-	return _sde_encoder_phys_is_ppsplit(phys_enc) &&
-			phys_enc->split_role == ENC_ROLE_SLAVE;
-}
-
-static bool _sde_encoder_phys_is_disabling_ppsplit_slave(
-		struct sde_encoder_phys *phys_enc)
-{
-	enum sde_rm_topology_name old_top;
-
-	if (!phys_enc || !phys_enc->connector ||
-			phys_enc->split_role != ENC_ROLE_SLAVE)
-		return false;
-
-	old_top = sde_connector_get_old_topology_name(
-			phys_enc->connector->state);
-
-	return old_top == SDE_RM_TOPOLOGY_PPSPLIT;
-}
-
-static int _sde_encoder_phys_cmd_poll_write_pointer_started(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_hw_pingpong *hw_pp = phys_enc->hw_pp;
-	struct sde_hw_intf *hw_intf = phys_enc->hw_intf;
-	struct sde_hw_pp_vsync_info info;
-	u32 timeout_us = SDE_ENC_WR_PTR_START_TIMEOUT_US;
-	int ret = 0;
-
-	if (!hw_pp || !hw_intf)
-		return 0;
-
-	if (phys_enc->has_intf_te) {
-		if (!hw_intf->ops.get_vsync_info ||
-				!hw_intf->ops.poll_timeout_wr_ptr)
-			goto end;
-	} else {
-		if (!hw_pp->ops.get_vsync_info ||
-				!hw_pp->ops.poll_timeout_wr_ptr)
-			goto end;
-	}
-
-	if (phys_enc->has_intf_te)
-		ret = hw_intf->ops.get_vsync_info(hw_intf, &info);
-	else
-		ret = hw_pp->ops.get_vsync_info(hw_pp, &info);
-
-	if (ret)
-		return ret;
-
-	SDE_DEBUG_CMDENC(cmd_enc,
-			"pp:%d intf:%d rd_ptr %d wr_ptr %d\n",
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			phys_enc->hw_intf->idx - INTF_0,
-			info.rd_ptr_line_count,
-			info.wr_ptr_line_count);
-	SDE_EVT32_VERBOSE(DRMID(phys_enc->parent),
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			phys_enc->hw_intf->idx - INTF_0,
-			info.wr_ptr_line_count);
-
-	if (phys_enc->has_intf_te)
-		ret = hw_intf->ops.poll_timeout_wr_ptr(hw_intf, timeout_us);
-	else
-		ret = hw_pp->ops.poll_timeout_wr_ptr(hw_pp, timeout_us);
-
-	if (ret) {
-		SDE_EVT32(DRMID(phys_enc->parent),
-				phys_enc->hw_pp->idx - PINGPONG_0,
-				phys_enc->hw_intf->idx - INTF_0,
-				timeout_us,
-				ret);
-		SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus", "panic");
-	}
-
-end:
-	return ret;
-}
-
-static bool _sde_encoder_phys_cmd_is_ongoing_pptx(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_hw_pingpong *hw_pp;
-	struct sde_hw_pp_vsync_info info;
-	struct sde_hw_intf *hw_intf;
-
-	if (!phys_enc)
-		return false;
-
-	if (phys_enc->has_intf_te) {
-		hw_intf = phys_enc->hw_intf;
-		if (!hw_intf || !hw_intf->ops.get_vsync_info)
-			return false;
-
-		hw_intf->ops.get_vsync_info(hw_intf, &info);
-	} else {
-		hw_pp = phys_enc->hw_pp;
-		if (!hw_pp || !hw_pp->ops.get_vsync_info)
-			return false;
-
-		hw_pp->ops.get_vsync_info(hw_pp, &info);
-	}
-
-	SDE_EVT32(DRMID(phys_enc->parent),
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			phys_enc->hw_intf->idx - INTF_0,
-			atomic_read(&phys_enc->pending_kickoff_cnt),
-			info.wr_ptr_line_count,
-			phys_enc->cached_mode.vdisplay);
-
-	if (info.wr_ptr_line_count > 0 && info.wr_ptr_line_count <
-			phys_enc->cached_mode.vdisplay)
-		return true;
-
-	return false;
-}
-
-static int _sde_encoder_phys_cmd_wait_for_idle(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_encoder_wait_info wait_info;
-	bool recovery_events;
-	int ret, i, pending_cnt;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	wait_info.wq = &phys_enc->pending_kickoff_wq;
-	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
-	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
-	recovery_events = sde_encoder_recovery_events_enabled(
-			phys_enc->parent);
-
-	/* slave encoder doesn't enable for ppsplit */
-	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
-		return 0;
-
-	ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
-			&wait_info);
-	if (ret == -ETIMEDOUT) {
-		pending_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
-		for (i = 0; i < pending_cnt; i++)
-			_sde_encoder_phys_cmd_handle_ppdone_timeout(phys_enc,
-				recovery_events);
-	} else if (!ret) {
-		if (cmd_enc->pp_timeout_report_cnt && recovery_events) {
-			struct drm_connector *conn = phys_enc->connector;
-
-			sde_connector_event_notify(conn,
-					DRM_EVENT_SDE_HW_RECOVERY,
-					sizeof(uint8_t),
-					SDE_RECOVERY_SUCCESS);
-		}
-		cmd_enc->pp_timeout_report_cnt = 0;
-	}
-
-	return ret;
-}
-
-static int _sde_encoder_phys_cmd_wait_for_autorefresh_done(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_encoder_wait_info wait_info;
-	int ret = 0;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	/* only master deals with autorefresh */
-	if (!sde_encoder_phys_cmd_is_master(phys_enc))
-		return 0;
-
-	wait_info.wq = &cmd_enc->autorefresh.kickoff_wq;
-	wait_info.atomic_cnt = &cmd_enc->autorefresh.kickoff_cnt;
-	wait_info.timeout_ms = _sde_encoder_phys_cmd_get_idle_timeout(cmd_enc);
-
-	/* wait for autorefresh kickoff to start */
-	ret = sde_encoder_helper_wait_for_irq(phys_enc,
-			INTR_IDX_AUTOREFRESH_DONE, &wait_info);
-
-	/* double check that kickoff has started by reading write ptr reg */
-	if (!ret)
-		ret = _sde_encoder_phys_cmd_poll_write_pointer_started(
-			phys_enc);
-	else
-		sde_encoder_helper_report_irq_timeout(phys_enc,
-				INTR_IDX_AUTOREFRESH_DONE);
-
-	return ret;
-}
-
-static int sde_encoder_phys_cmd_control_vblank_irq(
-		struct sde_encoder_phys *phys_enc,
-		bool enable)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-	int ret = 0;
-	int refcount;
-
-	if (!phys_enc || !phys_enc->hw_pp) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(phys_enc->vblank_ctl_lock);
-	refcount = atomic_read(&phys_enc->vblank_refcount);
-
-	/* Slave encoders don't report vblank */
-	if (!sde_encoder_phys_cmd_is_master(phys_enc))
-		goto end;
-
-	/* protect against negative */
-	if (!enable && refcount == 0) {
-		ret = -EINVAL;
-		goto end;
-	}
-
-	SDE_DEBUG_CMDENC(cmd_enc, "[%pS] enable=%d/%d\n",
-			__builtin_return_address(0), enable, refcount);
-	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
-			enable, refcount);
-
-	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
-		ret = sde_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
-	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
-		ret = sde_encoder_helper_unregister_irq(phys_enc,
-				INTR_IDX_RDPTR);
-
-end:
-	if (ret) {
-		SDE_ERROR_CMDENC(cmd_enc,
-				"control vblank irq error %d, enable %d, refcount %d\n",
-				ret, enable, refcount);
-		SDE_EVT32(DRMID(phys_enc->parent),
-				phys_enc->hw_pp->idx - PINGPONG_0,
-				enable, refcount, SDE_EVTLOG_ERROR);
-	}
-
-	mutex_unlock(phys_enc->vblank_ctl_lock);
-	return ret;
-}
-
-void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc,
-		bool enable)
-{
-	struct sde_encoder_phys_cmd *cmd_enc;
-
-	if (!phys_enc)
-		return;
-
-	/**
-	 * pingpong split slaves do not register for IRQs
-	 * check old and new topologies
-	 */
-	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc) ||
-			_sde_encoder_phys_is_disabling_ppsplit_slave(phys_enc))
-		return;
-
-	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
-
-	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
-			enable, atomic_read(&phys_enc->vblank_refcount));
-
-	if (enable) {
-		sde_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
-		sde_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
-		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
-
-		if (sde_encoder_phys_cmd_is_master(phys_enc)) {
-			sde_encoder_helper_register_irq(phys_enc,
-					INTR_IDX_CTL_START);
-			sde_encoder_helper_register_irq(phys_enc,
-					INTR_IDX_AUTOREFRESH_DONE);
-		}
-
-	} else {
-		if (sde_encoder_phys_cmd_is_master(phys_enc)) {
-			sde_encoder_helper_unregister_irq(phys_enc,
-					INTR_IDX_CTL_START);
-			sde_encoder_helper_unregister_irq(phys_enc,
-					INTR_IDX_AUTOREFRESH_DONE);
-		}
-
-		sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
-		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
-		sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
-	}
-}
-
-static int _get_tearcheck_threshold(struct sde_encoder_phys *phys_enc,
-	u32 *extra_frame_trigger_time)
-{
-	struct drm_connector *conn = phys_enc->connector;
-	u32 qsync_mode;
-	struct drm_display_mode *mode;
-	u32 threshold_lines = 0;
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
-
-	*extra_frame_trigger_time = 0;
-	if (!conn || !conn->state)
-		return 0;
-
-	mode = &phys_enc->cached_mode;
-	qsync_mode = sde_connector_get_qsync_mode(conn);
-
-	if (mode && (qsync_mode == SDE_RM_QSYNC_CONTINUOUS_MODE)) {
-		u32 qsync_min_fps = 0;
-		u32 default_fps = mode->vrefresh;
-		u32 yres = mode->vtotal;
-		u32 slow_time_ns;
-		u32 default_time_ns;
-		u32 extra_time_ns;
-		u32 total_extra_lines;
-		u32 default_line_time_ns;
-
-		if (phys_enc->parent_ops.get_qsync_fps)
-			phys_enc->parent_ops.get_qsync_fps(
-				phys_enc->parent, &qsync_min_fps);
-
-		if (!qsync_min_fps || !default_fps || !yres) {
-			SDE_ERROR_CMDENC(cmd_enc,
-				"wrong qsync params %d %d %d\n",
-				qsync_min_fps, default_fps, yres);
-			goto exit;
-		}
-
-		if (qsync_min_fps >= default_fps) {
-			SDE_ERROR_CMDENC(cmd_enc,
-				"qsync fps:%d must be less than default:%d\n",
-				qsync_min_fps, default_fps);
-			goto exit;
-		}
-
-		/* Calculate the number of extra lines*/
-		slow_time_ns = (1 * 1000000000) / qsync_min_fps;
-		default_time_ns = (1 * 1000000000) / default_fps;
-		extra_time_ns = slow_time_ns - default_time_ns;
-		default_line_time_ns = (1 * 1000000000) / (default_fps * yres);
-
-		total_extra_lines = extra_time_ns / default_line_time_ns;
-		threshold_lines += total_extra_lines;
-
-		SDE_DEBUG_CMDENC(cmd_enc, "slow:%d default:%d extra:%d(ns)\n",
-			slow_time_ns, default_time_ns, extra_time_ns);
-		SDE_DEBUG_CMDENC(cmd_enc, "extra_lines:%d threshold:%d\n",
-			total_extra_lines, threshold_lines);
-		SDE_DEBUG_CMDENC(cmd_enc, "min_fps:%d fps:%d yres:%d\n",
-			qsync_min_fps, default_fps, yres);
-
-		SDE_EVT32(qsync_mode, qsync_min_fps, extra_time_ns, default_fps,
-			yres, threshold_lines);
-
-		*extra_frame_trigger_time = extra_time_ns;
-	}
-
-exit:
-	threshold_lines += DEFAULT_TEARCHECK_SYNC_THRESH_START;
-
-	return threshold_lines;
-}
-
-static void sde_encoder_phys_cmd_tearcheck_config(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_hw_tear_check tc_cfg = { 0 };
-	struct drm_display_mode *mode;
-	bool tc_enable = true;
-	u32 vsync_hz, extra_frame_trigger_time;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-
-	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	mode = &phys_enc->cached_mode;
-
-	SDE_DEBUG_CMDENC(cmd_enc, "pp %d, intf %d\n",
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			phys_enc->hw_intf->idx - INTF_0);
-
-	if (phys_enc->has_intf_te) {
-		if (!phys_enc->hw_intf->ops.setup_tearcheck ||
-			!phys_enc->hw_intf->ops.enable_tearcheck) {
-			SDE_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
-			return;
-		}
-	} else {
-		if (!phys_enc->hw_pp->ops.setup_tearcheck ||
-			!phys_enc->hw_pp->ops.enable_tearcheck) {
-			SDE_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
-			return;
-		}
-	}
-
-	sde_kms = phys_enc->sde_kms;
-	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
-		SDE_ERROR("invalid device\n");
-		return;
-	}
-	priv = sde_kms->dev->dev_private;
-
-	/*
-	 * TE default: dsi byte clock calculated base on 70 fps;
-	 * around 14 ms to complete a kickoff cycle if te disabled;
-	 * vclk_line base on 60 fps; write is faster than read;
-	 * init == start == rdptr;
-	 *
-	 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
-	 * frequency divided by the no. of rows (lines) in the LCDpanel.
-	 */
-	vsync_hz = sde_power_clk_get_rate(&priv->phandle, "vsync_clk");
-	if (!vsync_hz || !mode->vtotal || !mode->vrefresh) {
-		SDE_DEBUG_CMDENC(cmd_enc,
-			"invalid params - vsync_hz %u vtot %u vrefresh %u\n",
-			vsync_hz, mode->vtotal, mode->vrefresh);
-		return;
-	}
-
-	tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
-
-	/* enable external TE after kickoff to avoid premature autorefresh */
-	tc_cfg.hw_vsync_mode = 0;
-
-	/*
-	 * By setting sync_cfg_height to near max register value, we essentially
-	 * disable sde hw generated TE signal, since hw TE will arrive first.
-	 * Only caveat is if due to error, we hit wrap-around.
-	 */
-	tc_cfg.sync_cfg_height = 0xFFF0;
-	tc_cfg.vsync_init_val = mode->vdisplay;
-	tc_cfg.sync_threshold_start = _get_tearcheck_threshold(phys_enc,
-			&extra_frame_trigger_time);
-	tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
-	tc_cfg.start_pos = mode->vdisplay;
-	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
-
-	cmd_enc->ctl_start_threshold = (extra_frame_trigger_time / 1000) +
-			SDE_ENC_CTL_START_THRESHOLD_US;
-
-	SDE_DEBUG_CMDENC(cmd_enc,
-		"tc %d intf %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
-		phys_enc->hw_pp->idx - PINGPONG_0,
-		phys_enc->hw_intf->idx - INTF_0,
-		vsync_hz, mode->vtotal, mode->vrefresh);
-	SDE_DEBUG_CMDENC(cmd_enc,
-		"tc %d intf %d enable %u start_pos %u rd_ptr_irq %u\n",
-		phys_enc->hw_pp->idx - PINGPONG_0,
-		phys_enc->hw_intf->idx - INTF_0,
-		tc_enable, tc_cfg.start_pos, tc_cfg.rd_ptr_irq);
-	SDE_DEBUG_CMDENC(cmd_enc,
-		"tc %d intf %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
-		phys_enc->hw_pp->idx - PINGPONG_0,
-		phys_enc->hw_intf->idx - INTF_0,
-		tc_cfg.hw_vsync_mode, tc_cfg.vsync_count,
-		tc_cfg.vsync_init_val);
-	SDE_DEBUG_CMDENC(cmd_enc,
-		"tc %d intf %d cfgheight %u thresh_start %u thresh_cont %u ctl_start_threshold:%d\n",
-		phys_enc->hw_pp->idx - PINGPONG_0,
-		phys_enc->hw_intf->idx - INTF_0,
-		tc_cfg.sync_cfg_height,
-		tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue,
-		cmd_enc->ctl_start_threshold);
-
-	if (phys_enc->has_intf_te) {
-		phys_enc->hw_intf->ops.setup_tearcheck(phys_enc->hw_intf,
-				&tc_cfg);
-		phys_enc->hw_intf->ops.enable_tearcheck(phys_enc->hw_intf,
-				tc_enable);
-	} else {
-		phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
-		phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp,
-				tc_enable);
-	}
-}
-
-static void _sde_encoder_phys_cmd_pingpong_config(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-
-	if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
-		SDE_ERROR("invalid arg(s), enc %d\n", !phys_enc);
-		return;
-	}
-
-	SDE_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
-			phys_enc->hw_pp->idx - PINGPONG_0);
-	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
-
-	if (!_sde_encoder_phys_is_ppsplit_slave(phys_enc))
-		_sde_encoder_phys_cmd_update_intf_cfg(phys_enc);
-	sde_encoder_phys_cmd_tearcheck_config(phys_enc);
-}
-
-static void sde_encoder_phys_cmd_enable_helper(
-		struct sde_encoder_phys *phys_enc)
-{
-	if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
-		SDE_ERROR("invalid arg(s), encoder %d\n", !phys_enc);
-		return;
-	}
-
-	sde_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
-
-	_sde_encoder_phys_cmd_pingpong_config(phys_enc);
-
-	/*
-	 * For pp-split, skip setting the flush bit for the slave intf, since
-	 * both intfs use same ctl and HW will only flush the master.
-	 */
-	if (_sde_encoder_phys_is_ppsplit(phys_enc) &&
-		!sde_encoder_phys_cmd_is_master(phys_enc))
-		goto skip_flush;
-
-	_sde_encoder_phys_cmd_update_flush_mask(phys_enc);
-
-skip_flush:
-	return;
-}
-
-static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-
-	if (!phys_enc || !phys_enc->hw_pp) {
-		SDE_ERROR("invalid phys encoder\n");
-		return;
-	}
-
-	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
-
-	if (phys_enc->enable_state == SDE_ENC_ENABLED) {
-		if (!phys_enc->cont_splash_enabled)
-			SDE_ERROR("already enabled\n");
-		return;
-	}
-
-	sde_encoder_phys_cmd_enable_helper(phys_enc);
-	phys_enc->enable_state = SDE_ENC_ENABLED;
-}
-
-static bool sde_encoder_phys_cmd_is_autorefresh_enabled(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_hw_pingpong *hw_pp;
-	struct sde_hw_intf *hw_intf;
-	struct sde_hw_autorefresh cfg;
-	int ret;
-
-	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf)
-		return false;
-
-	if (!sde_encoder_phys_cmd_is_master(phys_enc))
-		return false;
-
-	if (phys_enc->has_intf_te) {
-		hw_intf = phys_enc->hw_intf;
-		if (!hw_intf->ops.get_autorefresh)
-			return false;
-
-		ret = hw_intf->ops.get_autorefresh(hw_intf, &cfg);
-	} else {
-		hw_pp = phys_enc->hw_pp;
-		if (!hw_pp->ops.get_autorefresh)
-			return false;
-
-		ret = hw_pp->ops.get_autorefresh(hw_pp, &cfg);
-	}
-
-	if (ret)
-		return false;
-
-	return cfg.enable;
-}
-
-static void sde_encoder_phys_cmd_connect_te(
-		struct sde_encoder_phys *phys_enc, bool enable)
-{
-	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf)
-		return;
-
-	if (phys_enc->has_intf_te &&
-			phys_enc->hw_intf->ops.connect_external_te)
-		phys_enc->hw_intf->ops.connect_external_te(phys_enc->hw_intf,
-				enable);
-	else if (phys_enc->hw_pp->ops.connect_external_te)
-		phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp,
-				enable);
-	else
-		return;
-
-	SDE_EVT32(DRMID(phys_enc->parent), enable);
-}
-
-static int sde_encoder_phys_cmd_te_get_line_count(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_hw_pingpong *hw_pp;
-	struct sde_hw_intf *hw_intf;
-	u32 line_count;
-
-	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf)
-		return -EINVAL;
-
-	if (!sde_encoder_phys_cmd_is_master(phys_enc))
-		return -EINVAL;
-
-	if (phys_enc->has_intf_te) {
-		hw_intf = phys_enc->hw_intf;
-		if (!hw_intf->ops.get_line_count)
-			return -EINVAL;
-
-		line_count = hw_intf->ops.get_line_count(hw_intf);
-	} else {
-		hw_pp = phys_enc->hw_pp;
-		if (!hw_pp->ops.get_line_count)
-			return -EINVAL;
-
-		line_count = hw_pp->ops.get_line_count(hw_pp);
-	}
-
-	return line_count;
-}
-
-static int sde_encoder_phys_cmd_get_write_line_count(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_hw_pingpong *hw_pp;
-	struct sde_hw_intf *hw_intf;
-	struct sde_hw_pp_vsync_info info;
-
-	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf)
-		return -EINVAL;
-
-	if (!sde_encoder_phys_cmd_is_master(phys_enc))
-		return -EINVAL;
-
-	if (phys_enc->has_intf_te) {
-		hw_intf = phys_enc->hw_intf;
-		if (!hw_intf->ops.get_vsync_info)
-			return -EINVAL;
-
-		if (hw_intf->ops.get_vsync_info(hw_intf, &info))
-			return -EINVAL;
-	} else {
-		hw_pp = phys_enc->hw_pp;
-		if (!hw_pp->ops.get_vsync_info)
-			return -EINVAL;
-
-		if (hw_pp->ops.get_vsync_info(hw_pp, &info))
-			return -EINVAL;
-	}
-
-	return (int)info.wr_ptr_line_count;
-}
-
-static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-
-	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	SDE_DEBUG_CMDENC(cmd_enc, "pp %d intf %d state %d\n",
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			phys_enc->hw_intf->idx - INTF_0,
-			phys_enc->enable_state);
-	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
-			phys_enc->hw_intf->idx - INTF_0,
-			phys_enc->enable_state);
-
-	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
-		SDE_ERROR_CMDENC(cmd_enc, "already disabled\n");
-		return;
-	}
-
-	if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.enable_tearcheck)
-		phys_enc->hw_intf->ops.enable_tearcheck(
-				phys_enc->hw_intf,
-				false);
-	else if (phys_enc->hw_pp->ops.enable_tearcheck)
-		phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp,
-				false);
-
-	phys_enc->enable_state = SDE_ENC_DISABLED;
-}
-
-static void sde_encoder_phys_cmd_destroy(struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	kfree(cmd_enc);
-}
-
-static void sde_encoder_phys_cmd_get_hw_resources(
-		struct sde_encoder_phys *phys_enc,
-		struct sde_encoder_hw_resources *hw_res,
-		struct drm_connector_state *conn_state)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
-		SDE_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
-		return;
-	}
-
-	SDE_DEBUG_CMDENC(cmd_enc, "\n");
-	hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
-}
-
-static int sde_encoder_phys_cmd_prepare_for_kickoff(
-		struct sde_encoder_phys *phys_enc,
-		struct sde_encoder_kickoff_params *params)
-{
-	struct sde_hw_tear_check tc_cfg = {0};
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
-	int ret = 0;
-	u32 extra_frame_trigger_time;
-
-	if (!phys_enc || !phys_enc->hw_pp) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
-
-	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
-			atomic_read(&phys_enc->pending_kickoff_cnt),
-			atomic_read(&cmd_enc->autorefresh.kickoff_cnt));
-
-	if (phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_DEFAULT) {
-		/*
-		 * Mark kickoff request as outstanding. If there are more
-		 * than one outstanding frame, then we have to wait for the
-		 * previous frame to complete
-		 */
-		ret = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
-		if (ret) {
-			atomic_set(&phys_enc->pending_kickoff_cnt, 0);
-			SDE_EVT32(DRMID(phys_enc->parent),
-					phys_enc->hw_pp->idx - PINGPONG_0);
-			SDE_ERROR("failed wait_for_idle: %d\n", ret);
-		}
-	}
-
-	if (sde_connector_is_qsync_updated(phys_enc->connector)) {
-		tc_cfg.sync_threshold_start =
-			_get_tearcheck_threshold(phys_enc,
-				&extra_frame_trigger_time);
-		if (phys_enc->has_intf_te &&
-				phys_enc->hw_intf->ops.update_tearcheck)
-			phys_enc->hw_intf->ops.update_tearcheck(
-					phys_enc->hw_intf, &tc_cfg);
-		else if (phys_enc->hw_pp->ops.update_tearcheck)
-			phys_enc->hw_pp->ops.update_tearcheck(
-					phys_enc->hw_pp, &tc_cfg);
-
-		cmd_enc->ctl_start_threshold =
-			(extra_frame_trigger_time / 1000) +
-				SDE_ENC_CTL_START_THRESHOLD_US;
-		SDE_EVT32(DRMID(phys_enc->parent),
-		    tc_cfg.sync_threshold_start, cmd_enc->ctl_start_threshold);
-	}
-
-	SDE_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			atomic_read(&phys_enc->pending_kickoff_cnt));
-	return ret;
-}
-
-static int _sde_encoder_phys_cmd_wait_for_ctl_start(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_encoder_wait_info wait_info;
-	int ret;
-	bool frame_pending = true;
-	struct sde_hw_ctl *ctl;
-
-	if (!phys_enc || !phys_enc->hw_ctl) {
-		SDE_ERROR("invalid argument(s)\n");
-		return -EINVAL;
-	}
-	ctl = phys_enc->hw_ctl;
-
-	wait_info.wq = &phys_enc->pending_kickoff_wq;
-	wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
-	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
-
-	/* slave encoder doesn't enable for ppsplit */
-	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
-		return 0;
-
-	ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
-			&wait_info);
-	if (ret == -ETIMEDOUT) {
-		struct sde_hw_ctl *ctl = phys_enc->hw_ctl;
-
-		if (ctl && ctl->ops.get_start_state)
-			frame_pending = ctl->ops.get_start_state(ctl);
-
-		if (frame_pending)
-			SDE_ERROR_CMDENC(cmd_enc,
-					"ctl start interrupt wait failed\n");
-		else
-			ret = 0;
-
-		if (sde_encoder_phys_cmd_is_master(phys_enc)) {
-			/*
-			 * Signaling the retire fence at ctl start timeout
-			 * to allow the next commit and avoid device freeze.
-			 * As ctl start timeout can occurs due to no read ptr,
-			 * updating pending_rd_ptr_cnt here may not cover all
-			 * cases. Hence signaling the retire fence.
-			 */
-			if (atomic_add_unless(
-			 &phys_enc->pending_retire_fence_cnt, -1, 0))
-				phys_enc->parent_ops.handle_frame_done(
-				 phys_enc->parent,
-				 phys_enc,
-				 SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
-			atomic_add_unless(
-				&phys_enc->pending_ctlstart_cnt, -1, 0);
-		}
-	} else if ((ret == 0) &&
-	    (phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_POSTED_START) &&
-	    atomic_read(&phys_enc->pending_kickoff_cnt) &&
-	    ctl->ops.get_scheduler_status &&
-	    (ctl->ops.get_scheduler_status(ctl) & BIT(0)) &&
-	    phys_enc->parent_ops.handle_frame_done) {
-		atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
-
-		phys_enc->parent_ops.handle_frame_done(
-			phys_enc->parent, phys_enc,
-			SDE_ENCODER_FRAME_EVENT_DONE |
-			SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE);
-	}
-
-	return ret;
-}
-
-static int sde_encoder_phys_cmd_wait_for_tx_complete(
-		struct sde_encoder_phys *phys_enc)
-{
-	int rc;
-	struct sde_encoder_phys_cmd *cmd_enc;
-
-	if (!phys_enc)
-		return -EINVAL;
-
-	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
-
-	rc = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
-	if (rc) {
-		SDE_EVT32(DRMID(phys_enc->parent),
-				phys_enc->intf_idx - INTF_0);
-		SDE_ERROR("failed wait_for_idle: %d\n", rc);
-	}
-
-	return rc;
-}
-
-static int sde_encoder_phys_cmd_wait_for_commit_done(
-		struct sde_encoder_phys *phys_enc)
-{
-	int rc = 0;
-	struct sde_encoder_phys_cmd *cmd_enc;
-
-	if (!phys_enc)
-		return -EINVAL;
-
-	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
-
-	/* only required for master controller */
-	if (sde_encoder_phys_cmd_is_master(phys_enc))
-		rc = _sde_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
-
-	if (!rc && sde_encoder_phys_cmd_is_master(phys_enc) &&
-			cmd_enc->autorefresh.cfg.enable)
-		rc = _sde_encoder_phys_cmd_wait_for_autorefresh_done(phys_enc);
-
-	/* wait for posted start or serialize trigger */
-	if ((atomic_read(&phys_enc->pending_kickoff_cnt) > 1) ||
-	    (!rc && phys_enc->frame_trigger_mode ==
-						FRAME_DONE_WAIT_SERIALIZE)) {
-		rc = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
-		if (rc) {
-			atomic_set(&phys_enc->pending_kickoff_cnt, 0);
-			SDE_EVT32(DRMID(phys_enc->parent),
-					phys_enc->hw_pp->idx - PINGPONG_0);
-			SDE_ERROR("failed wait_for_idle: %d\n", rc);
-		}
-	}
-
-	return rc;
-}
-
-static int sde_encoder_phys_cmd_wait_for_vblank(
-		struct sde_encoder_phys *phys_enc)
-{
-	int rc = 0;
-	struct sde_encoder_phys_cmd *cmd_enc;
-	struct sde_encoder_wait_info wait_info;
-
-	if (!phys_enc)
-		return -EINVAL;
-
-	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
-
-	/* only required for master controller */
-	if (!sde_encoder_phys_cmd_is_master(phys_enc))
-		return rc;
-
-	wait_info.wq = &cmd_enc->pending_vblank_wq;
-	wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
-	wait_info.timeout_ms = _sde_encoder_phys_cmd_get_idle_timeout(cmd_enc);
-
-	atomic_inc(&cmd_enc->pending_vblank_cnt);
-
-	rc = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
-			&wait_info);
-
-	return rc;
-}
-
-static void sde_encoder_phys_cmd_update_split_role(
-		struct sde_encoder_phys *phys_enc,
-		enum sde_enc_split_role role)
-{
-	struct sde_encoder_phys_cmd *cmd_enc;
-	enum sde_enc_split_role old_role;
-	bool is_ppsplit;
-
-	if (!phys_enc)
-		return;
-
-	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
-	old_role = phys_enc->split_role;
-	is_ppsplit = _sde_encoder_phys_is_ppsplit(phys_enc);
-
-	phys_enc->split_role = role;
-
-	SDE_DEBUG_CMDENC(cmd_enc, "old role %d new role %d\n",
-			old_role, role);
-
-	/*
-	 * ppsplit solo needs to reprogram because intf may have swapped without
-	 * role changing on left-only, right-only back-to-back commits
-	 */
-	if (!(is_ppsplit && role == ENC_ROLE_SOLO) &&
-			(role == old_role || role == ENC_ROLE_SKIP))
-		return;
-
-	sde_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
-	_sde_encoder_phys_cmd_pingpong_config(phys_enc);
-	_sde_encoder_phys_cmd_update_flush_mask(phys_enc);
-}
-
-static void sde_encoder_phys_cmd_prepare_commit(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-	int trial = 0;
-
-	if (!phys_enc)
-		return;
-
-	if (!sde_encoder_phys_cmd_is_master(phys_enc))
-		return;
-
-	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
-			cmd_enc->autorefresh.cfg.enable);
-
-	if (!sde_encoder_phys_cmd_is_autorefresh_enabled(phys_enc))
-		return;
-
-	/*
-	 * If autorefresh is enabled, disable it and make sure it is safe to
-	 * proceed with current frame commit/push. Sequence fallowed is,
-	 * 1. Disable TE
-	 * 2. Disable autorefresh config
-	 * 4. Poll for frame transfer ongoing to be false
-	 * 5. Enable TE back
-	 */
-	sde_encoder_phys_cmd_connect_te(phys_enc, false);
-
-	_sde_encoder_phys_cmd_config_autorefresh(phys_enc, 0);
-
-	do {
-		udelay(SDE_ENC_MAX_POLL_TIMEOUT_US);
-		if ((trial * SDE_ENC_MAX_POLL_TIMEOUT_US)
-				> (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
-			SDE_ERROR_CMDENC(cmd_enc,
-					"disable autorefresh failed\n");
-			break;
-		}
-
-		trial++;
-	} while (_sde_encoder_phys_cmd_is_ongoing_pptx(phys_enc));
-
-	sde_encoder_phys_cmd_connect_te(phys_enc, true);
-
-	SDE_DEBUG_CMDENC(cmd_enc, "disabled autorefresh\n");
-}
-
-static void sde_encoder_phys_cmd_trigger_start(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
-	u32 frame_cnt;
-
-	if (!phys_enc)
-		return;
-
-	/* we don't issue CTL_START when using autorefresh */
-	frame_cnt = _sde_encoder_phys_cmd_get_autorefresh_property(phys_enc);
-	if (frame_cnt) {
-		_sde_encoder_phys_cmd_config_autorefresh(phys_enc, frame_cnt);
-		atomic_inc(&cmd_enc->autorefresh.kickoff_cnt);
-	} else {
-		sde_encoder_helper_trigger_start(phys_enc);
-	}
-}
-
-static void sde_encoder_phys_cmd_setup_vsync_source(
-		struct sde_encoder_phys *phys_enc,
-		u32 vsync_source, bool is_dummy)
-{
-	if (!phys_enc || !phys_enc->hw_intf)
-		return;
-
-	sde_encoder_helper_vsync_config(phys_enc, vsync_source, is_dummy);
-
-	if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel)
-		phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf,
-				vsync_source);
-}
-
-static void sde_encoder_phys_cmd_init_ops(struct sde_encoder_phys_ops *ops)
-{
-	ops->prepare_commit = sde_encoder_phys_cmd_prepare_commit;
-	ops->is_master = sde_encoder_phys_cmd_is_master;
-	ops->mode_set = sde_encoder_phys_cmd_mode_set;
-	ops->cont_splash_mode_set = sde_encoder_phys_cmd_cont_splash_mode_set;
-	ops->mode_fixup = sde_encoder_phys_cmd_mode_fixup;
-	ops->enable = sde_encoder_phys_cmd_enable;
-	ops->disable = sde_encoder_phys_cmd_disable;
-	ops->destroy = sde_encoder_phys_cmd_destroy;
-	ops->get_hw_resources = sde_encoder_phys_cmd_get_hw_resources;
-	ops->control_vblank_irq = sde_encoder_phys_cmd_control_vblank_irq;
-	ops->wait_for_commit_done = sde_encoder_phys_cmd_wait_for_commit_done;
-	ops->prepare_for_kickoff = sde_encoder_phys_cmd_prepare_for_kickoff;
-	ops->wait_for_tx_complete = sde_encoder_phys_cmd_wait_for_tx_complete;
-	ops->wait_for_vblank = sde_encoder_phys_cmd_wait_for_vblank;
-	ops->trigger_flush = sde_encoder_helper_trigger_flush;
-	ops->trigger_start = sde_encoder_phys_cmd_trigger_start;
-	ops->needs_single_flush = sde_encoder_phys_needs_single_flush;
-	ops->hw_reset = sde_encoder_helper_hw_reset;
-	ops->irq_control = sde_encoder_phys_cmd_irq_control;
-	ops->update_split_role = sde_encoder_phys_cmd_update_split_role;
-	ops->restore = sde_encoder_phys_cmd_enable_helper;
-	ops->control_te = sde_encoder_phys_cmd_connect_te;
-	ops->is_autorefresh_enabled =
-			sde_encoder_phys_cmd_is_autorefresh_enabled;
-	ops->get_line_count = sde_encoder_phys_cmd_te_get_line_count;
-	ops->get_wr_line_count = sde_encoder_phys_cmd_get_write_line_count;
-	ops->wait_for_active = NULL;
-	ops->setup_vsync_source = sde_encoder_phys_cmd_setup_vsync_source;
-	ops->setup_misr = sde_encoder_helper_setup_misr;
-	ops->collect_misr = sde_encoder_helper_collect_misr;
-}
-
-struct sde_encoder_phys *sde_encoder_phys_cmd_init(
-		struct sde_enc_phys_init_params *p)
-{
-	struct sde_encoder_phys *phys_enc = NULL;
-	struct sde_encoder_phys_cmd *cmd_enc = NULL;
-	struct sde_hw_mdp *hw_mdp;
-	struct sde_encoder_irq *irq;
-	int i, ret = 0;
-
-	SDE_DEBUG("intf %d\n", p->intf_idx - INTF_0);
-
-	cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
-	if (!cmd_enc) {
-		ret = -ENOMEM;
-		SDE_ERROR("failed to allocate\n");
-		goto fail;
-	}
-	phys_enc = &cmd_enc->base;
-
-	hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
-	if (IS_ERR_OR_NULL(hw_mdp)) {
-		ret = PTR_ERR(hw_mdp);
-		SDE_ERROR("failed to get mdptop\n");
-		goto fail_mdp_init;
-	}
-	phys_enc->hw_mdptop = hw_mdp;
-	phys_enc->intf_idx = p->intf_idx;
-
-	phys_enc->parent = p->parent;
-	phys_enc->parent_ops = p->parent_ops;
-	phys_enc->sde_kms = p->sde_kms;
-	phys_enc->split_role = p->split_role;
-	phys_enc->intf_mode = INTF_MODE_CMD;
-	phys_enc->enc_spinlock = p->enc_spinlock;
-	phys_enc->vblank_ctl_lock = p->vblank_ctl_lock;
-	cmd_enc->stream_sel = 0;
-	cmd_enc->ctl_start_threshold = SDE_ENC_CTL_START_THRESHOLD_US;
-	phys_enc->enable_state = SDE_ENC_DISABLED;
-	sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
-	phys_enc->comp_type = p->comp_type;
-
-	if (sde_hw_intf_te_supported(phys_enc->sde_kms->catalog))
-		phys_enc->has_intf_te = true;
-	else
-		phys_enc->has_intf_te = false;
-
-	for (i = 0; i < INTR_IDX_MAX; i++) {
-		irq = &phys_enc->irq[i];
-		INIT_LIST_HEAD(&irq->cb.list);
-		irq->irq_idx = -EINVAL;
-		irq->hw_idx = -EINVAL;
-		irq->cb.arg = phys_enc;
-	}
-
-	irq = &phys_enc->irq[INTR_IDX_CTL_START];
-	irq->name = "ctl_start";
-	irq->intr_type = SDE_IRQ_TYPE_CTL_START;
-	irq->intr_idx = INTR_IDX_CTL_START;
-	irq->cb.func = sde_encoder_phys_cmd_ctl_start_irq;
-
-	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
-	irq->name = "pp_done";
-	irq->intr_type = SDE_IRQ_TYPE_PING_PONG_COMP;
-	irq->intr_idx = INTR_IDX_PINGPONG;
-	irq->cb.func = sde_encoder_phys_cmd_pp_tx_done_irq;
-
-	irq = &phys_enc->irq[INTR_IDX_RDPTR];
-	irq->intr_idx = INTR_IDX_RDPTR;
-	irq->name = "te_rd_ptr";
-
-	if (phys_enc->has_intf_te)
-		irq->intr_type = SDE_IRQ_TYPE_INTF_TEAR_RD_PTR;
-	else
-		irq->intr_type = SDE_IRQ_TYPE_PING_PONG_RD_PTR;
-
-	irq->cb.func = sde_encoder_phys_cmd_te_rd_ptr_irq;
-
-	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
-	irq->name = "underrun";
-	irq->intr_type = SDE_IRQ_TYPE_INTF_UNDER_RUN;
-	irq->intr_idx = INTR_IDX_UNDERRUN;
-	irq->cb.func = sde_encoder_phys_cmd_underrun_irq;
-
-	irq = &phys_enc->irq[INTR_IDX_AUTOREFRESH_DONE];
-	irq->name = "autorefresh_done";
-
-	if (phys_enc->has_intf_te)
-		irq->intr_type = SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF;
-	else
-		irq->intr_type = SDE_IRQ_TYPE_PING_PONG_AUTO_REF;
-
-	irq->intr_idx = INTR_IDX_AUTOREFRESH_DONE;
-	irq->cb.func = sde_encoder_phys_cmd_autorefresh_done_irq;
-
-	atomic_set(&phys_enc->vblank_refcount, 0);
-	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
-	atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
-	atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
-	atomic_set(&cmd_enc->pending_rd_ptr_cnt, 0);
-	atomic_set(&cmd_enc->pending_vblank_cnt, 0);
-	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
-	init_waitqueue_head(&cmd_enc->pending_vblank_wq);
-	atomic_set(&cmd_enc->autorefresh.kickoff_cnt, 0);
-	init_waitqueue_head(&cmd_enc->autorefresh.kickoff_wq);
-
-	SDE_DEBUG_CMDENC(cmd_enc, "created\n");
-
-	return phys_enc;
-
-fail_mdp_init:
-	kfree(cmd_enc);
-fail:
-	return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
deleted file mode 100644
index 41eb834..0000000
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ /dev/null
@@ -1,1332 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-#include "sde_encoder_phys.h"
-#include "sde_hw_interrupts.h"
-#include "sde_core_irq.h"
-#include "sde_formats.h"
-#include "dsi_display.h"
-#include "sde_trace.h"
-
-#define SDE_DEBUG_VIDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
-		(e) && (e)->base.parent ? \
-		(e)->base.parent->base.id : -1, \
-		(e) && (e)->base.hw_intf ? \
-		(e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
-
-#define SDE_ERROR_VIDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
-		(e) && (e)->base.parent ? \
-		(e)->base.parent->base.id : -1, \
-		(e) && (e)->base.hw_intf ? \
-		(e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
-
-#define to_sde_encoder_phys_vid(x) \
-	container_of(x, struct sde_encoder_phys_vid, base)
-
-/* maximum number of consecutive kickoff errors */
-#define KICKOFF_MAX_ERRORS	2
-
-/* Poll time to do recovery during active region */
-#define POLL_TIME_USEC_FOR_LN_CNT 500
-#define MAX_POLL_CNT 10
-
-static bool sde_encoder_phys_vid_is_master(
-		struct sde_encoder_phys *phys_enc)
-{
-	bool ret = false;
-
-	if (phys_enc->split_role != ENC_ROLE_SLAVE)
-		ret = true;
-
-	return ret;
-}
-
-static void drm_mode_to_intf_timing_params(
-		const struct sde_encoder_phys_vid *vid_enc,
-		const struct drm_display_mode *mode,
-		struct intf_timing_params *timing)
-{
-	const struct sde_encoder_phys *phys_enc = &vid_enc->base;
-	enum msm_display_compression_ratio comp_ratio =
-				MSM_DISPLAY_COMPRESSION_RATIO_NONE;
-
-	memset(timing, 0, sizeof(*timing));
-
-	if ((mode->htotal < mode->hsync_end)
-			|| (mode->hsync_start < mode->hdisplay)
-			|| (mode->vtotal < mode->vsync_end)
-			|| (mode->vsync_start < mode->vdisplay)
-			|| (mode->hsync_end < mode->hsync_start)
-			|| (mode->vsync_end < mode->vsync_start)) {
-		SDE_ERROR(
-		    "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
-				mode->hsync_start, mode->hsync_end,
-				mode->htotal, mode->hdisplay);
-		SDE_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
-				mode->vsync_start, mode->vsync_end,
-				mode->vtotal, mode->vdisplay);
-		return;
-	}
-
-	/*
-	 * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
-	 *  Active Region      Front Porch   Sync   Back Porch
-	 * <-----------------><------------><-----><----------->
-	 * <- [hv]display --->
-	 * <--------- [hv]sync_start ------>
-	 * <----------------- [hv]sync_end ------->
-	 * <---------------------------- [hv]total ------------->
-	 */
-	timing->width = mode->hdisplay;	/* active width */
-
-	if (phys_enc->hw_intf->cap->type != INTF_DP &&
-		vid_enc->base.comp_type == MSM_DISPLAY_COMPRESSION_DSC) {
-		comp_ratio = vid_enc->base.comp_ratio;
-		if (comp_ratio == MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1)
-			timing->width = DIV_ROUND_UP(timing->width, 2);
-		else
-			timing->width = DIV_ROUND_UP(timing->width, 3);
-	}
-
-	timing->height = mode->vdisplay;	/* active height */
-	timing->xres = timing->width;
-	timing->yres = timing->height;
-	timing->h_back_porch = mode->htotal - mode->hsync_end;
-	timing->h_front_porch = mode->hsync_start - mode->hdisplay;
-	timing->v_back_porch = mode->vtotal - mode->vsync_end;
-	timing->v_front_porch = mode->vsync_start - mode->vdisplay;
-	timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
-	timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
-	timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
-	timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
-	timing->border_clr = 0;
-	timing->underflow_clr = 0xff;
-	timing->hsync_skew = mode->hskew;
-	timing->v_front_porch_fixed = vid_enc->base.vfp_cached;
-	timing->compression_en = false;
-
-	/* DSI controller cannot handle active-low sync signals. */
-	if (phys_enc->hw_intf->cap->type == INTF_DSI) {
-		timing->hsync_polarity = 0;
-		timing->vsync_polarity = 0;
-	}
-
-	/* for DP/EDP, Shift timings to align it to bottom right */
-	if ((phys_enc->hw_intf->cap->type == INTF_DP) ||
-		(phys_enc->hw_intf->cap->type == INTF_EDP)) {
-		timing->h_back_porch += timing->h_front_porch;
-		timing->h_front_porch = 0;
-		timing->v_back_porch += timing->v_front_porch;
-		timing->v_front_porch = 0;
-	}
-
-	timing->wide_bus_en = vid_enc->base.wide_bus_en;
-
-	/*
-	 * for DP, divide the horizonal parameters by 2 when
-	 * widebus or compression is enabled, irrespective of
-	 * compression ratio
-	 */
-	if (phys_enc->hw_intf->cap->type == INTF_DP &&
-		(timing->wide_bus_en || vid_enc->base.comp_ratio)) {
-		timing->width = timing->width >> 1;
-		timing->xres = timing->xres >> 1;
-		timing->h_back_porch = timing->h_back_porch >> 1;
-		timing->h_front_porch = timing->h_front_porch >> 1;
-		timing->hsync_pulse_width = timing->hsync_pulse_width >> 1;
-
-		if (vid_enc->base.comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
-				vid_enc->base.comp_ratio) {
-			timing->compression_en = true;
-			timing->extra_dto_cycles =
-				vid_enc->base.dsc_extra_pclk_cycle_cnt;
-			timing->width += vid_enc->base.dsc_extra_disp_width;
-			timing->h_back_porch +=
-				vid_enc->base.dsc_extra_disp_width;
-		}
-	}
-
-	/*
-	 * For edp only:
-	 * DISPLAY_V_START = (VBP * HCYCLE) + HBP
-	 * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
-	 */
-	/*
-	 * if (vid_enc->hw->cap->type == INTF_EDP) {
-	 * display_v_start += mode->htotal - mode->hsync_start;
-	 * display_v_end -= mode->hsync_start - mode->hdisplay;
-	 * }
-	 */
-}
-
-static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
-{
-	u32 active = timing->xres;
-	u32 inactive =
-	    timing->h_back_porch + timing->h_front_porch +
-	    timing->hsync_pulse_width;
-	return active + inactive;
-}
-
-static inline u32 get_vertical_total(const struct intf_timing_params *timing,
-	bool use_fixed_vfp)
-{
-	u32 inactive;
-	u32 active = timing->yres;
-	u32 v_front_porch = use_fixed_vfp ?
-		timing->v_front_porch_fixed : timing->v_front_porch;
-
-	inactive = timing->v_back_porch + v_front_porch +
-			    timing->vsync_pulse_width;
-	return active + inactive;
-}
-
-/*
- * programmable_fetch_get_num_lines:
- *	Number of fetch lines in vertical front porch
- * @timing: Pointer to the intf timing information for the requested mode
- *
- * Returns the number of fetch lines in vertical front porch at which mdp
- * can start fetching the next frame.
- *
- * Number of needed prefetch lines is anything that cannot be absorbed in the
- * start of frame time (back porch + vsync pulse width).
- *
- * Some panels have very large VFP, however we only need a total number of
- * lines based on the chip worst case latencies.
- */
-static u32 programmable_fetch_get_num_lines(
-		struct sde_encoder_phys_vid *vid_enc,
-		const struct intf_timing_params *timing,
-		bool use_fixed_vfp)
-{
-	struct sde_encoder_phys *phys_enc = &vid_enc->base;
-	u32 worst_case_needed_lines =
-	    phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
-	u32 start_of_frame_lines =
-	    timing->v_back_porch + timing->vsync_pulse_width;
-	u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
-	u32 actual_vfp_lines = 0;
-	u32 v_front_porch = use_fixed_vfp ?
-		timing->v_front_porch_fixed : timing->v_front_porch;
-
-	/* Fetch must be outside active lines, otherwise undefined. */
-	if (start_of_frame_lines >= worst_case_needed_lines) {
-		SDE_DEBUG_VIDENC(vid_enc,
-				"prog fetch is not needed, large vbp+vsw\n");
-		actual_vfp_lines = 0;
-	} else if (v_front_porch < needed_vfp_lines) {
-		/* Warn fetch needed, but not enough porch in panel config */
-		pr_warn_once
-			("low vbp+vfp may lead to perf issues in some cases\n");
-		SDE_DEBUG_VIDENC(vid_enc,
-				"less vfp than fetch req, using entire vfp\n");
-		actual_vfp_lines = v_front_porch;
-	} else {
-		SDE_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
-		actual_vfp_lines = needed_vfp_lines;
-	}
-
-	SDE_DEBUG_VIDENC(vid_enc,
-		"v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
-		v_front_porch, timing->v_back_porch,
-		timing->vsync_pulse_width);
-	SDE_DEBUG_VIDENC(vid_enc,
-		"wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
-		worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
-
-	return actual_vfp_lines;
-}
-
-/*
- * programmable_fetch_config: Programs HW to prefetch lines by offsetting
- *	the start of fetch into the vertical front porch for cases where the
- *	vsync pulse width and vertical back porch time is insufficient
- *
- *	Gets # of lines to pre-fetch, then calculate VSYNC counter value.
- *	HW layer requires VSYNC counter of first pixel of tgt VFP line.
- *
- * @timing: Pointer to the intf timing information for the requested mode
- */
-static void programmable_fetch_config(struct sde_encoder_phys *phys_enc,
-				      const struct intf_timing_params *timing)
-{
-	struct sde_encoder_phys_vid *vid_enc =
-		to_sde_encoder_phys_vid(phys_enc);
-	struct intf_prog_fetch f = { 0 };
-	u32 vfp_fetch_lines = 0;
-	u32 horiz_total = 0;
-	u32 vert_total = 0;
-	u32 vfp_fetch_start_vsync_counter = 0;
-	unsigned long lock_flags;
-	struct sde_mdss_cfg *m;
-
-	if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch))
-		return;
-
-	m = phys_enc->sde_kms->catalog;
-
-	vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc,
-							   timing, true);
-	if (vfp_fetch_lines) {
-		vert_total = get_vertical_total(timing, true);
-		horiz_total = get_horizontal_total(timing);
-		vfp_fetch_start_vsync_counter =
-			(vert_total - vfp_fetch_lines) * horiz_total + 1;
-
-		/**
-		 * Check if we need to throttle the fetch to start
-		 * from second line after the active region.
-		 */
-		if (m->delay_prg_fetch_start)
-			vfp_fetch_start_vsync_counter += horiz_total;
-
-		f.enable = 1;
-		f.fetch_start = vfp_fetch_start_vsync_counter;
-	}
-
-	SDE_DEBUG_VIDENC(vid_enc,
-		"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
-		vfp_fetch_lines, vfp_fetch_start_vsync_counter);
-
-	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-	phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f);
-	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-}
-
-static bool sde_encoder_phys_vid_mode_fixup(
-		struct sde_encoder_phys *phys_enc,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adj_mode)
-{
-	if (phys_enc)
-		SDE_DEBUG_VIDENC(to_sde_encoder_phys_vid(phys_enc), "\n");
-
-	/*
-	 * Modifying mode has consequences when the mode comes back to us
-	 */
-	return true;
-}
-
-/* vid_enc timing_params must be configured before calling this function */
-static void _sde_encoder_phys_vid_setup_avr(
-		struct sde_encoder_phys *phys_enc, u32 qsync_min_fps)
-{
-	struct sde_encoder_phys_vid *vid_enc;
-	struct drm_display_mode mode;
-
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-	mode = phys_enc->cached_mode;
-	if (vid_enc->base.hw_intf->ops.avr_setup) {
-		struct intf_avr_params avr_params = {0};
-		u32 default_fps = mode.vrefresh;
-		int ret;
-
-		if (!default_fps) {
-			SDE_ERROR_VIDENC(vid_enc,
-					"invalid default fps %d\n",
-					default_fps);
-			return;
-		}
-
-		if (qsync_min_fps >= default_fps) {
-			SDE_ERROR_VIDENC(vid_enc,
-				"qsync fps %d must be less than default %d\n",
-				qsync_min_fps, default_fps);
-			return;
-		}
-
-		avr_params.default_fps = default_fps;
-		avr_params.min_fps = qsync_min_fps;
-
-		ret = vid_enc->base.hw_intf->ops.avr_setup(
-				vid_enc->base.hw_intf,
-				&vid_enc->timing_params, &avr_params);
-		if (ret)
-			SDE_ERROR_VIDENC(vid_enc,
-				"bad settings, can't configure AVR\n");
-
-		SDE_EVT32(DRMID(phys_enc->parent), default_fps,
-				qsync_min_fps, ret);
-	}
-}
-
-static void _sde_encoder_phys_vid_avr_ctrl(struct sde_encoder_phys *phys_enc)
-{
-	struct intf_avr_params avr_params;
-	struct sde_encoder_phys_vid *vid_enc =
-			to_sde_encoder_phys_vid(phys_enc);
-
-	avr_params.avr_mode = sde_connector_get_qsync_mode(
-			phys_enc->connector);
-
-	if (vid_enc->base.hw_intf->ops.avr_ctrl) {
-		vid_enc->base.hw_intf->ops.avr_ctrl(
-				vid_enc->base.hw_intf,
-				&avr_params);
-	}
-
-	SDE_EVT32(DRMID(phys_enc->parent),
-		phys_enc->hw_intf->idx - INTF_0,
-		avr_params.avr_mode);
-}
-
-static void sde_encoder_phys_vid_setup_timing_engine(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_vid *vid_enc;
-	struct drm_display_mode mode;
-	struct intf_timing_params timing_params = { 0 };
-	const struct sde_format *fmt = NULL;
-	u32 fmt_fourcc = DRM_FORMAT_RGB888;
-	u32 qsync_min_fps = 0;
-	unsigned long lock_flags;
-	struct sde_hw_intf_cfg intf_cfg = { 0 };
-
-	if (!phys_enc || !phys_enc->sde_kms || !phys_enc->hw_ctl) {
-		SDE_ERROR("invalid encoder %d\n", !phys_enc);
-		return;
-	}
-
-	mode = phys_enc->cached_mode;
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-	if (!phys_enc->hw_intf->ops.setup_timing_gen) {
-		SDE_ERROR("timing engine setup is not supported\n");
-		return;
-	}
-
-	SDE_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
-	drm_mode_debug_printmodeline(&mode);
-
-	if (phys_enc->split_role != ENC_ROLE_SOLO) {
-		mode.hdisplay >>= 1;
-		mode.htotal >>= 1;
-		mode.hsync_start >>= 1;
-		mode.hsync_end >>= 1;
-
-		SDE_DEBUG_VIDENC(vid_enc,
-			"split_role %d, halve horizontal %d %d %d %d\n",
-			phys_enc->split_role,
-			mode.hdisplay, mode.htotal,
-			mode.hsync_start, mode.hsync_end);
-	}
-
-	if (!phys_enc->vfp_cached) {
-		phys_enc->vfp_cached =
-			sde_connector_get_panel_vfp(phys_enc->connector, &mode);
-		if (phys_enc->vfp_cached <= 0)
-			phys_enc->vfp_cached = mode.vsync_start - mode.vdisplay;
-	}
-
-	drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
-
-	vid_enc->timing_params = timing_params;
-
-	if (phys_enc->cont_splash_enabled) {
-		SDE_DEBUG_VIDENC(vid_enc,
-			"skipping intf programming since cont splash is enabled\n");
-		goto exit;
-	}
-
-	fmt = sde_get_sde_format(fmt_fourcc);
-	SDE_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
-
-	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-	phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
-			&timing_params, fmt);
-
-	if (test_bit(SDE_CTL_ACTIVE_CFG,
-				&phys_enc->hw_ctl->caps->features)) {
-		sde_encoder_helper_update_intf_cfg(phys_enc);
-	} else if (phys_enc->hw_ctl->ops.setup_intf_cfg) {
-		intf_cfg.intf = phys_enc->hw_intf->idx;
-		intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_VID;
-		intf_cfg.stream_sel = 0; /* Don't care value for video mode */
-		intf_cfg.mode_3d =
-			sde_encoder_helper_get_3d_blend_mode(phys_enc);
-
-		phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl,
-				&intf_cfg);
-	}
-	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-	if (phys_enc->hw_intf->cap->type == INTF_DSI)
-		programmable_fetch_config(phys_enc, &timing_params);
-
-exit:
-	if (phys_enc->parent_ops.get_qsync_fps)
-		phys_enc->parent_ops.get_qsync_fps(
-				phys_enc->parent, &qsync_min_fps);
-
-	/* only panels which support qsync will have a non-zero min fps */
-	if (qsync_min_fps) {
-		_sde_encoder_phys_vid_setup_avr(phys_enc, qsync_min_fps);
-		_sde_encoder_phys_vid_avr_ctrl(phys_enc);
-	}
-}
-
-static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
-{
-	struct sde_encoder_phys *phys_enc = arg;
-	struct sde_hw_ctl *hw_ctl;
-	unsigned long lock_flags;
-	u32 flush_register = ~0;
-	u32 reset_status = 0;
-	int new_cnt = -1, old_cnt = -1;
-	u32 event = 0;
-	int pend_ret_fence_cnt = 0;
-
-	if (!phys_enc)
-		return;
-
-	hw_ctl = phys_enc->hw_ctl;
-	if (!hw_ctl)
-		return;
-
-	SDE_ATRACE_BEGIN("vblank_irq");
-
-	/*
-	 * only decrement the pending flush count if we've actually flushed
-	 * hardware. due to sw irq latency, vblank may have already happened
-	 * so we need to double-check with hw that it accepted the flush bits
-	 */
-	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-
-	old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
-
-	if (hw_ctl && hw_ctl->ops.get_flush_register)
-		flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
-
-	if (flush_register)
-		goto not_flushed;
-
-	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
-	pend_ret_fence_cnt = atomic_read(&phys_enc->pending_retire_fence_cnt);
-
-	/* signal only for master, where there is a pending kickoff */
-	if (sde_encoder_phys_vid_is_master(phys_enc) &&
-	    atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0)) {
-		event = SDE_ENCODER_FRAME_EVENT_DONE |
-			SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE |
-			SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
-	}
-
-not_flushed:
-	if (hw_ctl && hw_ctl->ops.get_reset)
-		reset_status = hw_ctl->ops.get_reset(hw_ctl);
-
-	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-
-	if (event && phys_enc->parent_ops.handle_frame_done)
-		phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
-			phys_enc, event);
-
-	if (phys_enc->parent_ops.handle_vblank_virt)
-		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
-				phys_enc);
-
-	SDE_EVT32_IRQ(DRMID(phys_enc->parent), phys_enc->hw_intf->idx - INTF_0,
-			old_cnt, atomic_read(&phys_enc->pending_kickoff_cnt),
-			reset_status ? SDE_EVTLOG_ERROR : 0,
-			flush_register, event,
-			atomic_read(&phys_enc->pending_retire_fence_cnt));
-
-	/* Signal any waiting atomic commit thread */
-	wake_up_all(&phys_enc->pending_kickoff_wq);
-	SDE_ATRACE_END("vblank_irq");
-}
-
-static void sde_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
-{
-	struct sde_encoder_phys *phys_enc = arg;
-
-	if (!phys_enc)
-		return;
-
-	if (phys_enc->parent_ops.handle_underrun_virt)
-		phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
-			phys_enc);
-}
-
-static void _sde_encoder_phys_vid_setup_irq_hw_idx(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_irq *irq;
-
-	/*
-	 * Initialize irq->hw_idx only when irq is not registered.
-	 * Prevent invalidating irq->irq_idx as modeset may be
-	 * called many times during dfps.
-	 */
-
-	irq = &phys_enc->irq[INTR_IDX_VSYNC];
-	if (irq->irq_idx < 0)
-		irq->hw_idx = phys_enc->intf_idx;
-
-	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
-	if (irq->irq_idx < 0)
-		irq->hw_idx = phys_enc->intf_idx;
-}
-
-static void sde_encoder_phys_vid_cont_splash_mode_set(
-		struct sde_encoder_phys *phys_enc,
-		struct drm_display_mode *adj_mode)
-{
-	if (!phys_enc || !adj_mode) {
-		SDE_ERROR("invalid args\n");
-		return;
-	}
-
-	phys_enc->cached_mode = *adj_mode;
-	phys_enc->enable_state = SDE_ENC_ENABLED;
-
-	_sde_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
-}
-
-static void sde_encoder_phys_vid_mode_set(
-		struct sde_encoder_phys *phys_enc,
-		struct drm_display_mode *mode,
-		struct drm_display_mode *adj_mode)
-{
-	struct sde_rm *rm;
-	struct sde_rm_hw_iter iter;
-	int i, instance;
-	struct sde_encoder_phys_vid *vid_enc;
-
-	if (!phys_enc || !phys_enc->sde_kms) {
-		SDE_ERROR("invalid encoder/kms\n");
-		return;
-	}
-
-	rm = &phys_enc->sde_kms->rm;
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-
-	if (adj_mode) {
-		phys_enc->cached_mode = *adj_mode;
-		drm_mode_debug_printmodeline(adj_mode);
-		SDE_DEBUG_VIDENC(vid_enc, "caching mode:\n");
-	}
-
-	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
-
-	/* Retrieve previously allocated HW Resources. Shouldn't fail */
-	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
-	for (i = 0; i <= instance; i++) {
-		if (sde_rm_get_hw(rm, &iter))
-			phys_enc->hw_ctl = (struct sde_hw_ctl *)iter.hw;
-	}
-	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
-		SDE_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
-				PTR_ERR(phys_enc->hw_ctl));
-		phys_enc->hw_ctl = NULL;
-		return;
-	}
-
-	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_INTF);
-	for (i = 0; i <= instance; i++) {
-		if (sde_rm_get_hw(rm, &iter))
-			phys_enc->hw_intf = (struct sde_hw_intf *)iter.hw;
-	}
-
-	if (IS_ERR_OR_NULL(phys_enc->hw_intf)) {
-		SDE_ERROR_VIDENC(vid_enc, "failed to init intf: %ld\n",
-				PTR_ERR(phys_enc->hw_intf));
-		phys_enc->hw_intf = NULL;
-		return;
-	}
-
-	_sde_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
-}
-
-static int sde_encoder_phys_vid_control_vblank_irq(
-		struct sde_encoder_phys *phys_enc,
-		bool enable)
-{
-	int ret = 0;
-	struct sde_encoder_phys_vid *vid_enc;
-	int refcount;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(phys_enc->vblank_ctl_lock);
-	refcount = atomic_read(&phys_enc->vblank_refcount);
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-
-	/* Slave encoders don't report vblank */
-	if (!sde_encoder_phys_vid_is_master(phys_enc))
-		goto end;
-
-	/* protect against negative */
-	if (!enable && refcount == 0) {
-		ret = -EINVAL;
-		goto end;
-	}
-
-	SDE_DEBUG_VIDENC(vid_enc, "[%pS] enable=%d/%d\n",
-			__builtin_return_address(0),
-			enable, atomic_read(&phys_enc->vblank_refcount));
-
-	SDE_EVT32(DRMID(phys_enc->parent), enable,
-			atomic_read(&phys_enc->vblank_refcount));
-
-	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1) {
-		ret = sde_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
-		if (ret)
-			atomic_dec_return(&phys_enc->vblank_refcount);
-	} else if (!enable &&
-			atomic_dec_return(&phys_enc->vblank_refcount) == 0) {
-		ret = sde_encoder_helper_unregister_irq(phys_enc,
-				INTR_IDX_VSYNC);
-		if (ret)
-			atomic_inc_return(&phys_enc->vblank_refcount);
-	}
-
-end:
-	if (ret) {
-		SDE_ERROR_VIDENC(vid_enc,
-				"control vblank irq error %d, enable %d\n",
-				ret, enable);
-		SDE_EVT32(DRMID(phys_enc->parent),
-				phys_enc->hw_intf->idx - INTF_0,
-				enable, refcount, SDE_EVTLOG_ERROR);
-	}
-	mutex_unlock(phys_enc->vblank_ctl_lock);
-	return ret;
-}
-
-static bool sde_encoder_phys_vid_wait_dma_trigger(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_vid *vid_enc;
-	struct sde_hw_intf *intf;
-	struct sde_hw_ctl *ctl;
-	struct intf_status status;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return false;
-	}
-
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-	intf = phys_enc->hw_intf;
-	ctl = phys_enc->hw_ctl;
-	if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
-		SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
-			phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL);
-		return false;
-	}
-
-	if (!intf->ops.get_status)
-		return false;
-
-	intf->ops.get_status(intf, &status);
-
-	/* if interface is not enabled, return true to wait for dma trigger */
-	return status.is_en ? false : true;
-}
-
-static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
-{
-	struct msm_drm_private *priv;
-	struct sde_encoder_phys_vid *vid_enc;
-	struct sde_hw_intf *intf;
-	struct sde_hw_ctl *ctl;
-
-	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
-			!phys_enc->parent->dev->dev_private ||
-			!phys_enc->sde_kms) {
-		SDE_ERROR("invalid encoder/device\n");
-		return;
-	}
-	priv = phys_enc->parent->dev->dev_private;
-
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-	intf = phys_enc->hw_intf;
-	ctl = phys_enc->hw_ctl;
-	if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
-		SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
-				!phys_enc->hw_intf, !phys_enc->hw_ctl);
-		return;
-	}
-	if (!ctl->ops.update_bitmask_intf ||
-		(test_bit(SDE_CTL_ACTIVE_CFG, &ctl->caps->features) &&
-		!ctl->ops.update_bitmask_merge3d)) {
-		SDE_ERROR("invalid hw_ctl ops %d\n", ctl->idx);
-		return;
-	}
-
-	SDE_DEBUG_VIDENC(vid_enc, "\n");
-
-	if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
-		return;
-
-	if (!phys_enc->cont_splash_enabled)
-		sde_encoder_helper_split_config(phys_enc,
-				phys_enc->hw_intf->idx);
-
-	sde_encoder_phys_vid_setup_timing_engine(phys_enc);
-
-	/*
-	 * For cases where both the interfaces are connected to same ctl,
-	 * set the flush bit for both master and slave.
-	 * For single flush cases (dual-ctl or pp-split), skip setting the
-	 * flush bit for the slave intf, since both intfs use same ctl
-	 * and HW will only flush the master.
-	 */
-	if (!test_bit(SDE_CTL_ACTIVE_CFG, &ctl->caps->features) &&
-			sde_encoder_phys_needs_single_flush(phys_enc) &&
-		!sde_encoder_phys_vid_is_master(phys_enc))
-		goto skip_flush;
-
-	/**
-	 * skip flushing intf during cont. splash handoff since bootloader
-	 * has already enabled the hardware and is single buffered.
-	 */
-	if (phys_enc->cont_splash_enabled) {
-		SDE_DEBUG_VIDENC(vid_enc,
-		"skipping intf flush bit set as cont. splash is enabled\n");
-		goto skip_flush;
-	}
-
-	ctl->ops.update_bitmask_intf(ctl, intf->idx, 1);
-
-	if (ctl->ops.update_bitmask_merge3d && phys_enc->hw_pp->merge_3d)
-		ctl->ops.update_bitmask_merge3d(ctl,
-			phys_enc->hw_pp->merge_3d->idx, 1);
-
-	if (phys_enc->hw_intf->cap->type == INTF_DP &&
-		phys_enc->comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
-		phys_enc->comp_ratio && ctl->ops.update_bitmask_periph)
-		ctl->ops.update_bitmask_periph(ctl, intf->idx, 1);
-
-skip_flush:
-	SDE_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d intf %d\n",
-		ctl->idx - CTL_0, intf->idx);
-	SDE_EVT32(DRMID(phys_enc->parent),
-		atomic_read(&phys_enc->pending_retire_fence_cnt));
-
-	/* ctl_flush & timing engine enable will be triggered by framework */
-	if (phys_enc->enable_state == SDE_ENC_DISABLED)
-		phys_enc->enable_state = SDE_ENC_ENABLING;
-}
-
-static void sde_encoder_phys_vid_destroy(struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_vid *vid_enc;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-	SDE_DEBUG_VIDENC(vid_enc, "\n");
-	kfree(vid_enc);
-}
-
-static void sde_encoder_phys_vid_get_hw_resources(
-		struct sde_encoder_phys *phys_enc,
-		struct sde_encoder_hw_resources *hw_res,
-		struct drm_connector_state *conn_state)
-{
-	struct sde_encoder_phys_vid *vid_enc;
-
-	if (!phys_enc || !hw_res) {
-		SDE_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
-				!phys_enc, !hw_res, !conn_state);
-		return;
-	}
-
-	if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
-		SDE_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
-		return;
-	}
-
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-	SDE_DEBUG_VIDENC(vid_enc, "\n");
-	hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_VIDEO;
-}
-
-static int _sde_encoder_phys_vid_wait_for_vblank(
-		struct sde_encoder_phys *phys_enc, bool notify)
-{
-	struct sde_encoder_wait_info wait_info;
-	int ret = 0;
-	u32 event = SDE_ENCODER_FRAME_EVENT_ERROR |
-		SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE |
-		SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
-
-	if (!phys_enc) {
-		pr_err("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	wait_info.wq = &phys_enc->pending_kickoff_wq;
-	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
-	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
-
-	/* Wait for kickoff to complete */
-	ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
-			&wait_info);
-
-	if (notify && (ret == -ETIMEDOUT) &&
-	    atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0) &&
-	    phys_enc->parent_ops.handle_frame_done)
-		phys_enc->parent_ops.handle_frame_done(
-			phys_enc->parent, phys_enc, event);
-
-	SDE_EVT32(DRMID(phys_enc->parent), event, notify, ret,
-			ret ? SDE_EVTLOG_FATAL : 0);
-	return ret;
-}
-
-static int sde_encoder_phys_vid_wait_for_vblank(
-		struct sde_encoder_phys *phys_enc)
-{
-	return _sde_encoder_phys_vid_wait_for_vblank(phys_enc, true);
-}
-
-static int sde_encoder_phys_vid_wait_for_vblank_no_notify(
-		struct sde_encoder_phys *phys_enc)
-{
-	return _sde_encoder_phys_vid_wait_for_vblank(phys_enc, false);
-}
-
-static int sde_encoder_phys_vid_prepare_for_kickoff(
-		struct sde_encoder_phys *phys_enc,
-		struct sde_encoder_kickoff_params *params)
-{
-	struct sde_encoder_phys_vid *vid_enc;
-	struct sde_hw_ctl *ctl;
-	bool recovery_events;
-	struct drm_connector *conn;
-	int event;
-	int rc;
-
-	if (!phys_enc || !params || !phys_enc->hw_ctl) {
-		SDE_ERROR("invalid encoder/parameters\n");
-		return -EINVAL;
-	}
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-
-	ctl = phys_enc->hw_ctl;
-	if (!ctl->ops.wait_reset_status)
-		return 0;
-
-	conn = phys_enc->connector;
-	recovery_events = sde_encoder_recovery_events_enabled(
-			phys_enc->parent);
-	/*
-	 * hw supports hardware initiated ctl reset, so before we kickoff a new
-	 * frame, need to check and wait for hw initiated ctl reset completion
-	 */
-	rc = ctl->ops.wait_reset_status(ctl);
-	if (rc) {
-		SDE_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
-				ctl->idx, rc);
-
-		++vid_enc->error_count;
-
-		/* to avoid flooding, only log first time, and "dead" time */
-		if (vid_enc->error_count == 1) {
-			SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
-
-			sde_encoder_helper_unregister_irq(
-					phys_enc, INTR_IDX_VSYNC);
-			SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus");
-			sde_encoder_helper_register_irq(
-					phys_enc, INTR_IDX_VSYNC);
-		}
-
-		/*
-		 * if the recovery event is registered by user, don't panic
-		 * trigger panic on first timeout if no listener registered
-		 */
-		if (recovery_events) {
-			event = vid_enc->error_count > KICKOFF_MAX_ERRORS ?
-				SDE_RECOVERY_HARD_RESET : SDE_RECOVERY_CAPTURE;
-			sde_connector_event_notify(conn,
-					DRM_EVENT_SDE_HW_RECOVERY,
-					sizeof(uint8_t), event);
-		} else {
-			SDE_DBG_DUMP("panic");
-		}
-
-		/* request a ctl reset before the next flush */
-		phys_enc->enable_state = SDE_ENC_ERR_NEEDS_HW_RESET;
-	} else {
-		if (recovery_events && vid_enc->error_count)
-			sde_connector_event_notify(conn,
-					DRM_EVENT_SDE_HW_RECOVERY,
-					sizeof(uint8_t),
-					SDE_RECOVERY_SUCCESS);
-		vid_enc->error_count = 0;
-	}
-
-	if (sde_connector_is_qsync_updated(phys_enc->connector))
-		_sde_encoder_phys_vid_avr_ctrl(phys_enc);
-
-	return rc;
-}
-
-static void sde_encoder_phys_vid_single_vblank_wait(
-		struct sde_encoder_phys *phys_enc)
-{
-	int ret;
-	struct sde_encoder_phys_vid *vid_enc
-					= to_sde_encoder_phys_vid(phys_enc);
-
-	/*
-	 * Wait for a vsync so we know the ENABLE=0 latched before
-	 * the (connector) source of the vsync's gets disabled,
-	 * otherwise we end up in a funny state if we re-enable
-	 * before the disable latches, which results that some of
-	 * the settings changes for the new modeset (like new
-	 * scanout buffer) don't latch properly..
-	 */
-	ret = sde_encoder_phys_vid_control_vblank_irq(phys_enc, true);
-	if (ret) {
-		SDE_ERROR_VIDENC(vid_enc,
-				"failed to enable vblank irq: %d\n",
-				ret);
-		SDE_EVT32(DRMID(phys_enc->parent),
-				phys_enc->hw_intf->idx - INTF_0, ret,
-				SDE_EVTLOG_FUNC_CASE1,
-				SDE_EVTLOG_ERROR);
-	} else {
-		ret = _sde_encoder_phys_vid_wait_for_vblank(phys_enc, false);
-		if (ret) {
-			atomic_set(&phys_enc->pending_kickoff_cnt, 0);
-			SDE_ERROR_VIDENC(vid_enc,
-					"failure waiting for disable: %d\n",
-					ret);
-			SDE_EVT32(DRMID(phys_enc->parent),
-					phys_enc->hw_intf->idx - INTF_0, ret,
-					SDE_EVTLOG_FUNC_CASE2,
-					SDE_EVTLOG_ERROR);
-		}
-		sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
-	}
-}
-
-static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
-{
-	struct msm_drm_private *priv;
-	struct sde_encoder_phys_vid *vid_enc;
-	unsigned long lock_flags;
-	struct intf_status intf_status = {0};
-
-	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
-			!phys_enc->parent->dev->dev_private) {
-		SDE_ERROR("invalid encoder/device\n");
-		return;
-	}
-	priv = phys_enc->parent->dev->dev_private;
-
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-	if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
-		SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
-				!phys_enc->hw_intf, !phys_enc->hw_ctl);
-		return;
-	}
-
-	SDE_DEBUG_VIDENC(vid_enc, "\n");
-
-	if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
-		return;
-	else if (!sde_encoder_phys_vid_is_master(phys_enc))
-		goto exit;
-
-	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
-		SDE_ERROR("already disabled\n");
-		return;
-	}
-
-	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-	phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
-	sde_encoder_phys_inc_pending(phys_enc);
-	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-
-	sde_encoder_phys_vid_single_vblank_wait(phys_enc);
-	if (phys_enc->hw_intf->ops.get_status)
-		phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf,
-			&intf_status);
-
-	if (intf_status.is_en) {
-		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-		sde_encoder_phys_inc_pending(phys_enc);
-		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-
-		sde_encoder_phys_vid_single_vblank_wait(phys_enc);
-	}
-
-	sde_encoder_helper_phys_disable(phys_enc, NULL);
-exit:
-	SDE_EVT32(DRMID(phys_enc->parent),
-		atomic_read(&phys_enc->pending_retire_fence_cnt));
-	phys_enc->vfp_cached = 0;
-	phys_enc->enable_state = SDE_ENC_DISABLED;
-}
-
-static void sde_encoder_phys_vid_handle_post_kickoff(
-		struct sde_encoder_phys *phys_enc)
-{
-	unsigned long lock_flags;
-	struct sde_encoder_phys_vid *vid_enc;
-	u32 avr_mode;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-	SDE_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
-
-	/*
-	 * Video mode must flush CTL before enabling timing engine
-	 * Video encoders need to turn on their interfaces now
-	 */
-	if (phys_enc->enable_state == SDE_ENC_ENABLING) {
-		if (sde_encoder_phys_vid_is_master(phys_enc)) {
-			SDE_EVT32(DRMID(phys_enc->parent),
-				phys_enc->hw_intf->idx - INTF_0);
-			spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-			phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf,
-				1);
-			spin_unlock_irqrestore(phys_enc->enc_spinlock,
-				lock_flags);
-		}
-		phys_enc->enable_state = SDE_ENC_ENABLED;
-	}
-
-	avr_mode = sde_connector_get_qsync_mode(phys_enc->connector);
-
-	if (avr_mode && vid_enc->base.hw_intf->ops.avr_trigger) {
-		vid_enc->base.hw_intf->ops.avr_trigger(vid_enc->base.hw_intf);
-		SDE_EVT32(DRMID(phys_enc->parent),
-				phys_enc->hw_intf->idx - INTF_0,
-				SDE_EVTLOG_FUNC_CASE9);
-	}
-}
-
-static void sde_encoder_phys_vid_irq_control(struct sde_encoder_phys *phys_enc,
-		bool enable)
-{
-	struct sde_encoder_phys_vid *vid_enc;
-	int ret;
-
-	if (!phys_enc)
-		return;
-
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-
-	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_intf->idx - INTF_0,
-			enable, atomic_read(&phys_enc->vblank_refcount));
-
-	if (enable) {
-		ret = sde_encoder_phys_vid_control_vblank_irq(phys_enc, true);
-		if (ret)
-			return;
-
-		sde_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
-	} else {
-		sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
-		sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
-	}
-}
-
-static int sde_encoder_phys_vid_get_line_count(
-		struct sde_encoder_phys *phys_enc)
-{
-	if (!phys_enc)
-		return -EINVAL;
-
-	if (!sde_encoder_phys_vid_is_master(phys_enc))
-		return -EINVAL;
-
-	if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
-		return -EINVAL;
-
-	return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
-}
-
-static int sde_encoder_phys_vid_wait_for_active(
-			struct sde_encoder_phys *phys_enc)
-{
-	struct drm_display_mode mode;
-	struct sde_encoder_phys_vid *vid_enc;
-	u32 ln_cnt, min_ln_cnt, active_lns_cnt;
-	u32 clk_period, time_of_line;
-	u32 delay, retry = MAX_POLL_CNT;
-
-	vid_enc =  to_sde_encoder_phys_vid(phys_enc);
-
-	if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count) {
-		SDE_ERROR_VIDENC(vid_enc, "invalid vid_enc params\n");
-		return -EINVAL;
-	}
-
-	mode = phys_enc->cached_mode;
-
-	/*
-	 * calculate clk_period as pico second to maintain good
-	 * accuracy with high pclk rate and this number is in 17 bit
-	 * range.
-	 */
-	clk_period = DIV_ROUND_UP_ULL(1000000000, mode.clock);
-	if (!clk_period) {
-		SDE_ERROR_VIDENC(vid_enc, "Unable to calculate clock period\n");
-		return -EINVAL;
-	}
-
-	min_ln_cnt = (mode.vtotal - mode.vsync_start) +
-		(mode.vsync_end - mode.vsync_start);
-	active_lns_cnt = mode.vdisplay;
-	time_of_line = mode.htotal * clk_period;
-
-	/* delay in micro seconds */
-	delay = (time_of_line * (min_ln_cnt +
-		(mode.vsync_start - mode.vdisplay))) / 1000000;
-
-	/*
-	 * Wait for max delay before
-	 * polling to check active region
-	 */
-	if (delay > POLL_TIME_USEC_FOR_LN_CNT)
-		delay = POLL_TIME_USEC_FOR_LN_CNT;
-
-	while (retry) {
-		ln_cnt = phys_enc->hw_intf->ops.get_line_count(
-				phys_enc->hw_intf);
-
-		if ((ln_cnt >= min_ln_cnt) &&
-			(ln_cnt < (active_lns_cnt + min_ln_cnt))) {
-			SDE_DEBUG_VIDENC(vid_enc,
-					"Needed lines left line_cnt=%d\n",
-					ln_cnt);
-			return 0;
-		}
-
-		SDE_ERROR_VIDENC(vid_enc, "line count is less. line_cnt = %d\n",
-				ln_cnt);
-		/* Add delay so that line count is in active region */
-		udelay(delay);
-		retry--;
-	}
-
-	return -EINVAL;
-}
-
-static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
-{
-	ops->is_master = sde_encoder_phys_vid_is_master;
-	ops->mode_set = sde_encoder_phys_vid_mode_set;
-	ops->cont_splash_mode_set = sde_encoder_phys_vid_cont_splash_mode_set;
-	ops->mode_fixup = sde_encoder_phys_vid_mode_fixup;
-	ops->enable = sde_encoder_phys_vid_enable;
-	ops->disable = sde_encoder_phys_vid_disable;
-	ops->destroy = sde_encoder_phys_vid_destroy;
-	ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources;
-	ops->control_vblank_irq = sde_encoder_phys_vid_control_vblank_irq;
-	ops->wait_for_commit_done = sde_encoder_phys_vid_wait_for_vblank;
-	ops->wait_for_vblank = sde_encoder_phys_vid_wait_for_vblank_no_notify;
-	ops->wait_for_tx_complete = sde_encoder_phys_vid_wait_for_vblank;
-	ops->irq_control = sde_encoder_phys_vid_irq_control;
-	ops->prepare_for_kickoff = sde_encoder_phys_vid_prepare_for_kickoff;
-	ops->handle_post_kickoff = sde_encoder_phys_vid_handle_post_kickoff;
-	ops->needs_single_flush = sde_encoder_phys_needs_single_flush;
-	ops->setup_misr = sde_encoder_helper_setup_misr;
-	ops->collect_misr = sde_encoder_helper_collect_misr;
-	ops->trigger_flush = sde_encoder_helper_trigger_flush;
-	ops->hw_reset = sde_encoder_helper_hw_reset;
-	ops->get_line_count = sde_encoder_phys_vid_get_line_count;
-	ops->get_wr_line_count = sde_encoder_phys_vid_get_line_count;
-	ops->wait_dma_trigger = sde_encoder_phys_vid_wait_dma_trigger;
-	ops->wait_for_active = sde_encoder_phys_vid_wait_for_active;
-}
-
-struct sde_encoder_phys *sde_encoder_phys_vid_init(
-		struct sde_enc_phys_init_params *p)
-{
-	struct sde_encoder_phys *phys_enc = NULL;
-	struct sde_encoder_phys_vid *vid_enc = NULL;
-	struct sde_hw_mdp *hw_mdp;
-	struct sde_encoder_irq *irq;
-	int i, ret = 0;
-
-	if (!p) {
-		ret = -EINVAL;
-		goto fail;
-	}
-
-	vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
-	if (!vid_enc) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	phys_enc = &vid_enc->base;
-
-	hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
-	if (IS_ERR_OR_NULL(hw_mdp)) {
-		ret = PTR_ERR(hw_mdp);
-		SDE_ERROR("failed to get mdptop\n");
-		goto fail;
-	}
-	phys_enc->hw_mdptop = hw_mdp;
-	phys_enc->intf_idx = p->intf_idx;
-
-	SDE_DEBUG_VIDENC(vid_enc, "\n");
-
-	sde_encoder_phys_vid_init_ops(&phys_enc->ops);
-	phys_enc->parent = p->parent;
-	phys_enc->parent_ops = p->parent_ops;
-	phys_enc->sde_kms = p->sde_kms;
-	phys_enc->split_role = p->split_role;
-	phys_enc->intf_mode = INTF_MODE_VIDEO;
-	phys_enc->enc_spinlock = p->enc_spinlock;
-	phys_enc->vblank_ctl_lock = p->vblank_ctl_lock;
-	phys_enc->comp_type = p->comp_type;
-	for (i = 0; i < INTR_IDX_MAX; i++) {
-		irq = &phys_enc->irq[i];
-		INIT_LIST_HEAD(&irq->cb.list);
-		irq->irq_idx = -EINVAL;
-		irq->hw_idx = -EINVAL;
-		irq->cb.arg = phys_enc;
-	}
-
-	irq = &phys_enc->irq[INTR_IDX_VSYNC];
-	irq->name = "vsync_irq";
-	irq->intr_type = SDE_IRQ_TYPE_INTF_VSYNC;
-	irq->intr_idx = INTR_IDX_VSYNC;
-	irq->cb.func = sde_encoder_phys_vid_vblank_irq;
-
-	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
-	irq->name = "underrun";
-	irq->intr_type = SDE_IRQ_TYPE_INTF_UNDER_RUN;
-	irq->intr_idx = INTR_IDX_UNDERRUN;
-	irq->cb.func = sde_encoder_phys_vid_underrun_irq;
-
-	atomic_set(&phys_enc->vblank_refcount, 0);
-	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
-	atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
-	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
-	phys_enc->enable_state = SDE_ENC_DISABLED;
-
-	SDE_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
-
-	return phys_enc;
-
-fail:
-	SDE_ERROR("failed to create encoder\n");
-	if (vid_enc)
-		sde_encoder_phys_vid_destroy(phys_enc);
-
-	return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
deleted file mode 100644
index 6754977..0000000
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ /dev/null
@@ -1,1869 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-#include <linux/debugfs.h>
-#include <uapi/drm/sde_drm.h>
-
-#include "sde_encoder_phys.h"
-#include "sde_formats.h"
-#include "sde_hw_top.h"
-#include "sde_hw_interrupts.h"
-#include "sde_core_irq.h"
-#include "sde_wb.h"
-#include "sde_vbif.h"
-#include "sde_crtc.h"
-
-#define to_sde_encoder_phys_wb(x) \
-	container_of(x, struct sde_encoder_phys_wb, base)
-
-#define WBID(wb_enc) \
-	((wb_enc && wb_enc->wb_dev) ? wb_enc->wb_dev->wb_idx - WB_0 : -1)
-
-#define TO_S15D16(_x_)	((_x_) << 7)
-
-static const u32 cwb_irq_tbl[PINGPONG_MAX] = {SDE_NONE, SDE_NONE,
-	INTR_IDX_PP2_OVFL, INTR_IDX_PP3_OVFL, INTR_IDX_PP4_OVFL,
-	INTR_IDX_PP5_OVFL, SDE_NONE, SDE_NONE};
-
-/**
- * sde_rgb2yuv_601l - rgb to yuv color space conversion matrix
- *
- */
-static struct sde_csc_cfg sde_encoder_phys_wb_rgb2yuv_601l = {
-	{
-		TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032),
-		TO_S15D16(0x1fb5), TO_S15D16(0x1f6c), TO_S15D16(0x00e1),
-		TO_S15D16(0x00e1), TO_S15D16(0x1f45), TO_S15D16(0x1fdc)
-	},
-	{ 0x00, 0x00, 0x00 },
-	{ 0x0040, 0x0200, 0x0200 },
-	{ 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
-	{ 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
-};
-
-/**
- * sde_encoder_phys_wb_is_master - report wb always as master encoder
- */
-static bool sde_encoder_phys_wb_is_master(struct sde_encoder_phys *phys_enc)
-{
-	return true;
-}
-
-/**
- * sde_encoder_phys_wb_get_intr_type - get interrupt type based on block mode
- * @hw_wb:	Pointer to h/w writeback driver
- */
-static enum sde_intr_type sde_encoder_phys_wb_get_intr_type(
-		struct sde_hw_wb *hw_wb)
-{
-	return (hw_wb->caps->features & BIT(SDE_WB_BLOCK_MODE)) ?
-			SDE_IRQ_TYPE_WB_ROT_COMP : SDE_IRQ_TYPE_WB_WFD_COMP;
-}
-
-/**
- * sde_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface
- * @phys_enc:	Pointer to physical encoder
- */
-static void sde_encoder_phys_wb_set_ot_limit(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
-	struct sde_vbif_set_ot_params ot_params;
-
-	memset(&ot_params, 0, sizeof(ot_params));
-	ot_params.xin_id = hw_wb->caps->xin_id;
-	ot_params.num = hw_wb->idx - WB_0;
-	ot_params.width = wb_enc->wb_roi.w;
-	ot_params.height = wb_enc->wb_roi.h;
-	ot_params.is_wfd = true;
-	ot_params.frame_rate = phys_enc->cached_mode.vrefresh;
-	ot_params.vbif_idx = hw_wb->caps->vbif_idx;
-	ot_params.clk_ctrl = hw_wb->caps->clk_ctrl;
-	ot_params.rd = false;
-
-	sde_vbif_set_ot_limit(phys_enc->sde_kms, &ot_params);
-}
-
-/**
- * sde_encoder_phys_wb_set_traffic_shaper - set traffic shaper for writeback
- * @phys_enc:	Pointer to physical encoder
- */
-static void sde_encoder_phys_wb_set_traffic_shaper(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	struct sde_hw_wb_cfg *wb_cfg = &wb_enc->wb_cfg;
-
-	/* traffic shaper is only enabled for rotator */
-	wb_cfg->ts_cfg.en = false;
-}
-
-/**
- * sde_encoder_phys_wb_set_qos_remap - set QoS remapper for writeback
- * @phys_enc:	Pointer to physical encoder
- */
-static void sde_encoder_phys_wb_set_qos_remap(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_wb *wb_enc;
-	struct sde_hw_wb *hw_wb;
-	struct drm_crtc *crtc;
-	struct sde_vbif_set_qos_params qos_params;
-
-	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) {
-		SDE_ERROR("invalid arguments\n");
-		return;
-	}
-
-	wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	if (!wb_enc->crtc) {
-		SDE_ERROR("invalid crtc");
-		return;
-	}
-
-	crtc = wb_enc->crtc;
-
-	if (!wb_enc->hw_wb || !wb_enc->hw_wb->caps) {
-		SDE_ERROR("invalid writeback hardware\n");
-		return;
-	}
-
-	hw_wb = wb_enc->hw_wb;
-
-	memset(&qos_params, 0, sizeof(qos_params));
-	qos_params.vbif_idx = hw_wb->caps->vbif_idx;
-	qos_params.xin_id = hw_wb->caps->xin_id;
-	qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
-	qos_params.num = hw_wb->idx - WB_0;
-	qos_params.client_type = phys_enc->in_clone_mode ?
-					VBIF_CWB_CLIENT : VBIF_NRT_CLIENT;
-
-	SDE_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d rt:%d clone:%d\n",
-			qos_params.num,
-			qos_params.vbif_idx,
-			qos_params.xin_id, qos_params.client_type);
-
-	sde_vbif_set_qos_remap(phys_enc->sde_kms, &qos_params);
-}
-
-static u64 _sde_encoder_phys_wb_get_qos_lut(const struct sde_qos_lut_tbl *tbl,
-		u32 total_fl)
-{
-	int i;
-
-	if (!tbl || !tbl->nentry || !tbl->entries)
-		return 0;
-
-	for (i = 0; i < tbl->nentry; i++)
-		if (total_fl <= tbl->entries[i].fl)
-			return tbl->entries[i].lut;
-
-	/* if last fl is zero, use as default */
-	if (!tbl->entries[i-1].fl)
-		return tbl->entries[i-1].lut;
-
-	return 0;
-}
-
-/**
- * sde_encoder_phys_wb_set_qos - set QoS/danger/safe LUTs for writeback
- * @phys_enc:	Pointer to physical encoder
- */
-static void sde_encoder_phys_wb_set_qos(struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_wb *wb_enc;
-	struct sde_hw_wb *hw_wb;
-	struct sde_hw_wb_qos_cfg qos_cfg;
-	struct sde_mdss_cfg *catalog;
-
-	if (!phys_enc || !phys_enc->sde_kms || !phys_enc->sde_kms->catalog) {
-		SDE_ERROR("invalid parameter(s)\n");
-		return;
-	}
-	catalog = phys_enc->sde_kms->catalog;
-
-	wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	if (!wb_enc->hw_wb) {
-		SDE_ERROR("invalid writeback hardware\n");
-		return;
-	}
-
-	hw_wb = wb_enc->hw_wb;
-
-	memset(&qos_cfg, 0, sizeof(struct sde_hw_wb_qos_cfg));
-	qos_cfg.danger_safe_en = true;
-	qos_cfg.danger_lut =
-		catalog->perf.danger_lut_tbl[SDE_QOS_LUT_USAGE_NRT];
-
-	if (phys_enc->in_clone_mode)
-		qos_cfg.safe_lut = (u32) _sde_encoder_phys_wb_get_qos_lut(
-			&catalog->perf.sfe_lut_tbl[SDE_QOS_LUT_USAGE_CWB], 0);
-	else
-		qos_cfg.safe_lut = (u32) _sde_encoder_phys_wb_get_qos_lut(
-			&catalog->perf.sfe_lut_tbl[SDE_QOS_LUT_USAGE_NRT], 0);
-
-	if (phys_enc->in_clone_mode)
-		qos_cfg.creq_lut = _sde_encoder_phys_wb_get_qos_lut(
-			&catalog->perf.qos_lut_tbl[SDE_QOS_LUT_USAGE_CWB], 0);
-	else
-		qos_cfg.creq_lut = _sde_encoder_phys_wb_get_qos_lut(
-			&catalog->perf.qos_lut_tbl[SDE_QOS_LUT_USAGE_NRT], 0);
-
-	if (hw_wb->ops.setup_danger_safe_lut)
-		hw_wb->ops.setup_danger_safe_lut(hw_wb, &qos_cfg);
-
-	if (hw_wb->ops.setup_creq_lut)
-		hw_wb->ops.setup_creq_lut(hw_wb, &qos_cfg);
-
-	if (hw_wb->ops.setup_qos_ctrl)
-		hw_wb->ops.setup_qos_ctrl(hw_wb, &qos_cfg);
-}
-
-/**
- * sde_encoder_phys_setup_cdm - setup chroma down block
- * @phys_enc:	Pointer to physical encoder
- * @fb:		Pointer to output framebuffer
- * @format:	Output format
- */
-void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
-		struct drm_framebuffer *fb, const struct sde_format *format,
-		struct sde_rect *wb_roi)
-{
-	struct sde_hw_cdm *hw_cdm;
-	struct sde_hw_cdm_cfg *cdm_cfg;
-	struct sde_hw_pingpong *hw_pp;
-	int ret;
-
-	if (!phys_enc || !format)
-		return;
-
-	cdm_cfg = &phys_enc->cdm_cfg;
-	hw_pp = phys_enc->hw_pp;
-	hw_cdm = phys_enc->hw_cdm;
-	if (!hw_cdm)
-		return;
-
-	if (!SDE_FORMAT_IS_YUV(format)) {
-		SDE_DEBUG("[cdm_disable fmt:%x]\n",
-				format->base.pixel_format);
-
-		if (hw_cdm && hw_cdm->ops.disable)
-			hw_cdm->ops.disable(hw_cdm);
-
-		return;
-	}
-
-	memset(cdm_cfg, 0, sizeof(struct sde_hw_cdm_cfg));
-
-	if (!wb_roi)
-		return;
-
-	cdm_cfg->output_width = wb_roi->w;
-	cdm_cfg->output_height = wb_roi->h;
-	cdm_cfg->output_fmt = format;
-	cdm_cfg->output_type = CDM_CDWN_OUTPUT_WB;
-	cdm_cfg->output_bit_depth = SDE_FORMAT_IS_DX(format) ?
-		CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
-
-	/* enable 10 bit logic */
-	switch (cdm_cfg->output_fmt->chroma_sample) {
-	case SDE_CHROMA_RGB:
-		cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
-		cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
-		break;
-	case SDE_CHROMA_H2V1:
-		cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
-		cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
-		break;
-	case SDE_CHROMA_420:
-		cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
-		cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
-		break;
-	case SDE_CHROMA_H1V2:
-	default:
-		SDE_ERROR("unsupported chroma sampling type\n");
-		cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
-		cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
-		break;
-	}
-
-	SDE_DEBUG("[cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
-			cdm_cfg->output_width,
-			cdm_cfg->output_height,
-			cdm_cfg->output_fmt->base.pixel_format,
-			cdm_cfg->output_type,
-			cdm_cfg->output_bit_depth,
-			cdm_cfg->h_cdwn_type,
-			cdm_cfg->v_cdwn_type);
-
-	if (hw_cdm && hw_cdm->ops.setup_csc_data) {
-		ret = hw_cdm->ops.setup_csc_data(hw_cdm,
-				&sde_encoder_phys_wb_rgb2yuv_601l);
-		if (ret < 0) {
-			SDE_ERROR("failed to setup CSC %d\n", ret);
-			return;
-		}
-	}
-
-	if (hw_cdm && hw_cdm->ops.setup_cdwn) {
-		ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg);
-		if (ret < 0) {
-			SDE_ERROR("failed to setup CDM %d\n", ret);
-			return;
-		}
-	}
-
-	if (hw_cdm && hw_pp && hw_cdm->ops.enable) {
-		cdm_cfg->pp_id = hw_pp->idx;
-		ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
-		if (ret < 0) {
-			SDE_ERROR("failed to enable CDM %d\n", ret);
-			return;
-		}
-	}
-}
-
-/**
- * sde_encoder_phys_wb_setup_fb - setup output framebuffer
- * @phys_enc:	Pointer to physical encoder
- * @fb:		Pointer to output framebuffer
- * @wb_roi:	Pointer to output region of interest
- */
-static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
-		struct drm_framebuffer *fb, struct sde_rect *wb_roi)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	struct sde_hw_wb *hw_wb;
-	struct sde_hw_wb_cfg *wb_cfg;
-	struct sde_hw_wb_cdp_cfg *cdp_cfg;
-	const struct msm_format *format;
-	int ret;
-	struct msm_gem_address_space *aspace;
-	u32 fb_mode;
-
-	if (!phys_enc || !phys_enc->sde_kms || !phys_enc->sde_kms->catalog ||
-			!phys_enc->connector) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	hw_wb = wb_enc->hw_wb;
-	wb_cfg = &wb_enc->wb_cfg;
-	cdp_cfg = &wb_enc->cdp_cfg;
-	memset(wb_cfg, 0, sizeof(struct sde_hw_wb_cfg));
-
-	wb_cfg->intf_mode = phys_enc->intf_mode;
-
-	fb_mode = sde_connector_get_property(phys_enc->connector->state,
-			CONNECTOR_PROP_FB_TRANSLATION_MODE);
-	if (phys_enc->enable_state == SDE_ENC_DISABLING)
-		wb_cfg->is_secure = false;
-	else if (fb_mode == SDE_DRM_FB_SEC)
-		wb_cfg->is_secure = true;
-	else
-		wb_cfg->is_secure = false;
-
-	aspace = (wb_cfg->is_secure) ?
-			wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] :
-			wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
-
-	SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
-
-	ret = msm_framebuffer_prepare(fb, aspace);
-	if (ret) {
-		SDE_ERROR("prep fb failed, %d\n", ret);
-		return;
-	}
-
-	/* cache framebuffer for cleanup in writeback done */
-	wb_enc->wb_fb = fb;
-	wb_enc->wb_aspace = aspace;
-
-	format = msm_framebuffer_format(fb);
-	if (!format) {
-		SDE_DEBUG("invalid format for fb\n");
-		return;
-	}
-
-	wb_cfg->dest.format = sde_get_sde_format_ext(
-			format->pixel_format,
-			fb->modifier);
-	if (!wb_cfg->dest.format) {
-		/* this error should be detected during atomic_check */
-		SDE_ERROR("failed to get format %x\n", format->pixel_format);
-		return;
-	}
-	wb_cfg->roi = *wb_roi;
-
-	if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
-		ret = sde_format_populate_layout(aspace, fb, &wb_cfg->dest);
-		if (ret) {
-			SDE_DEBUG("failed to populate layout %d\n", ret);
-			return;
-		}
-		wb_cfg->dest.width = fb->width;
-		wb_cfg->dest.height = fb->height;
-		wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
-	} else {
-		ret = sde_format_populate_layout_with_roi(aspace, fb, wb_roi,
-			&wb_cfg->dest);
-		if (ret) {
-			/* this error should be detected during atomic_check */
-			SDE_DEBUG("failed to populate layout %d\n", ret);
-			return;
-		}
-	}
-
-	if ((wb_cfg->dest.format->fetch_planes == SDE_PLANE_PLANAR) &&
-			(wb_cfg->dest.format->element[0] == C1_B_Cb))
-		swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
-
-	SDE_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n",
-			wb_cfg->dest.plane_addr[0],
-			wb_cfg->dest.plane_addr[1],
-			wb_cfg->dest.plane_addr[2],
-			wb_cfg->dest.plane_addr[3]);
-	SDE_DEBUG("[fb_stride:%8.8x,%8.8x,%8.8x,%8.8x]\n",
-			wb_cfg->dest.plane_pitch[0],
-			wb_cfg->dest.plane_pitch[1],
-			wb_cfg->dest.plane_pitch[2],
-			wb_cfg->dest.plane_pitch[3]);
-
-	if (hw_wb->ops.setup_roi)
-		hw_wb->ops.setup_roi(hw_wb, wb_cfg);
-
-	if (hw_wb->ops.setup_outformat)
-		hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
-
-	if (hw_wb->ops.setup_cdp) {
-		memset(cdp_cfg, 0, sizeof(struct sde_hw_wb_cdp_cfg));
-
-		cdp_cfg->enable = phys_enc->sde_kms->catalog->perf.cdp_cfg
-				[SDE_PERF_CDP_USAGE_NRT].wr_enable;
-		cdp_cfg->ubwc_meta_enable =
-				SDE_FORMAT_IS_UBWC(wb_cfg->dest.format);
-		cdp_cfg->tile_amortize_enable =
-				SDE_FORMAT_IS_UBWC(wb_cfg->dest.format) ||
-				SDE_FORMAT_IS_TILE(wb_cfg->dest.format);
-		cdp_cfg->preload_ahead = SDE_WB_CDP_PRELOAD_AHEAD_64;
-
-		hw_wb->ops.setup_cdp(hw_wb, cdp_cfg);
-	}
-
-	if (hw_wb->ops.setup_outaddress) {
-		SDE_EVT32(hw_wb->idx,
-				wb_cfg->dest.width,
-				wb_cfg->dest.height,
-				wb_cfg->dest.plane_addr[0],
-				wb_cfg->dest.plane_size[0],
-				wb_cfg->dest.plane_addr[1],
-				wb_cfg->dest.plane_size[1],
-				wb_cfg->dest.plane_addr[2],
-				wb_cfg->dest.plane_size[2],
-				wb_cfg->dest.plane_addr[3],
-				wb_cfg->dest.plane_size[3]);
-		hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
-	}
-}
-
-static void _sde_encoder_phys_wb_setup_cwb(struct sde_encoder_phys *phys_enc,
-					bool enable)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
-	struct sde_hw_ctl *hw_ctl = phys_enc->hw_ctl;
-	struct sde_crtc *crtc = to_sde_crtc(wb_enc->crtc);
-	struct sde_hw_pingpong *hw_pp = phys_enc->hw_pp;
-	bool need_merge = (crtc->num_mixers > 1);
-	int i = 0;
-
-	if (!phys_enc->in_clone_mode) {
-		SDE_DEBUG("not in CWB mode. early return\n");
-		return;
-	}
-
-	if (!hw_pp || !hw_ctl || !hw_wb || hw_pp->idx >= PINGPONG_MAX) {
-		SDE_ERROR("invalid hw resources - return\n");
-		return;
-	}
-
-	hw_ctl = crtc->mixers[0].hw_ctl;
-	if (hw_ctl && hw_ctl->ops.setup_intf_cfg_v1 &&
-			test_bit(SDE_WB_CWB_CTRL, &hw_wb->caps->features)) {
-		struct sde_hw_intf_cfg_v1 intf_cfg = { 0, };
-
-		for (i = 0; i < crtc->num_mixers; i++)
-			intf_cfg.cwb[intf_cfg.cwb_count++] =
-				(enum sde_cwb)(hw_pp->idx + i);
-
-		if (enable && hw_pp->merge_3d && (intf_cfg.merge_3d_count <
-				MAX_MERGE_3D_PER_CTL_V1) && need_merge)
-			intf_cfg.merge_3d[intf_cfg.merge_3d_count++] =
-				hw_pp->merge_3d->idx;
-
-		if (hw_pp->ops.setup_3d_mode)
-			hw_pp->ops.setup_3d_mode(hw_pp, (enable && need_merge) ?
-					BLEND_3D_H_ROW_INT : 0);
-
-		if (hw_wb->ops.bind_pingpong_blk)
-			hw_wb->ops.bind_pingpong_blk(hw_wb, enable, hw_pp->idx);
-
-		if (hw_ctl->ops.update_cwb_cfg) {
-			hw_ctl->ops.update_cwb_cfg(hw_ctl, &intf_cfg);
-			SDE_DEBUG("in CWB mode on CTL_%d PP-%d merge3d:%d\n",
-					hw_ctl->idx - CTL_0,
-					hw_pp->idx - PINGPONG_0,
-					hw_pp->merge_3d ?
-					hw_pp->merge_3d->idx - MERGE_3D_0 : -1);
-		}
-	} else {
-		struct sde_hw_intf_cfg *intf_cfg = &phys_enc->intf_cfg;
-
-		memset(intf_cfg, 0, sizeof(struct sde_hw_intf_cfg));
-		intf_cfg->intf = SDE_NONE;
-		intf_cfg->wb = hw_wb->idx;
-
-		if (hw_ctl && hw_ctl->ops.update_wb_cfg) {
-			hw_ctl->ops.update_wb_cfg(hw_ctl, intf_cfg, enable);
-			SDE_DEBUG("in CWB mode adding WB for CTL_%d\n",
-					hw_ctl->idx - CTL_0);
-		}
-	}
-}
-
-/**
- * sde_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
- * @phys_enc:	Pointer to physical encoder
- */
-static void sde_encoder_phys_wb_setup_cdp(struct sde_encoder_phys *phys_enc,
-		const struct sde_format *format)
-{
-	struct sde_encoder_phys_wb *wb_enc;
-	struct sde_hw_wb *hw_wb;
-	struct sde_hw_cdm *hw_cdm;
-	struct sde_hw_ctl *ctl;
-	const int num_wb = 1;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	if (phys_enc->in_clone_mode) {
-		SDE_DEBUG("in CWB mode. early return\n");
-		return;
-	}
-
-	wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	hw_wb = wb_enc->hw_wb;
-	hw_cdm = phys_enc->hw_cdm;
-	ctl = phys_enc->hw_ctl;
-
-	if (test_bit(SDE_CTL_ACTIVE_CFG, &ctl->caps->features) &&
-		(phys_enc->hw_ctl &&
-		 phys_enc->hw_ctl->ops.setup_intf_cfg_v1)) {
-		struct sde_hw_intf_cfg_v1 *intf_cfg_v1 = &phys_enc->intf_cfg_v1;
-		struct sde_hw_pingpong *hw_pp = phys_enc->hw_pp;
-		enum sde_3d_blend_mode mode_3d;
-
-		memset(intf_cfg_v1, 0, sizeof(struct sde_hw_intf_cfg_v1));
-
-		mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
-		intf_cfg_v1->intf_count = SDE_NONE;
-		intf_cfg_v1->wb_count = num_wb;
-		intf_cfg_v1->wb[0] = hw_wb->idx;
-		if (SDE_FORMAT_IS_YUV(format)) {
-			intf_cfg_v1->cdm_count = num_wb;
-			intf_cfg_v1->cdm[0] = hw_cdm->idx;
-		}
-
-		if (mode_3d && hw_pp && hw_pp->merge_3d &&
-			intf_cfg_v1->merge_3d_count < MAX_MERGE_3D_PER_CTL_V1)
-			intf_cfg_v1->merge_3d[intf_cfg_v1->merge_3d_count++] =
-					hw_pp->merge_3d->idx;
-
-		if (hw_pp && hw_pp->ops.setup_3d_mode)
-			hw_pp->ops.setup_3d_mode(hw_pp, mode_3d);
-
-		/* setup which pp blk will connect to this wb */
-		if (hw_pp && hw_wb->ops.bind_pingpong_blk)
-			hw_wb->ops.bind_pingpong_blk(hw_wb, true,
-					hw_pp->idx);
-
-		phys_enc->hw_ctl->ops.setup_intf_cfg_v1(phys_enc->hw_ctl,
-				intf_cfg_v1);
-	} else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) {
-		struct sde_hw_intf_cfg *intf_cfg = &phys_enc->intf_cfg;
-
-		memset(intf_cfg, 0, sizeof(struct sde_hw_intf_cfg));
-
-		intf_cfg->intf = SDE_NONE;
-		intf_cfg->wb = hw_wb->idx;
-		intf_cfg->mode_3d =
-			sde_encoder_helper_get_3d_blend_mode(phys_enc);
-		phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl,
-				intf_cfg);
-	}
-
-}
-
-static void _sde_enc_phys_wb_detect_cwb(struct sde_encoder_phys *phys_enc,
-		struct drm_crtc_state *crtc_state)
-{
-	struct drm_encoder *encoder;
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	const struct sde_wb_cfg *wb_cfg = wb_enc->hw_wb->caps;
-
-	phys_enc->in_clone_mode = false;
-
-	/* Check if WB has CWB support */
-	if (!(wb_cfg->features & BIT(SDE_WB_HAS_CWB)))
-		return;
-
-	/* if any other encoder is connected to same crtc enable clone mode*/
-	drm_for_each_encoder(encoder, crtc_state->crtc->dev) {
-		if (encoder->crtc != crtc_state->crtc)
-			continue;
-
-		if (phys_enc->parent != encoder) {
-			phys_enc->in_clone_mode = true;
-			break;
-		}
-	}
-
-	SDE_DEBUG("detect CWB - status:%d\n", phys_enc->in_clone_mode);
-}
-
-static int _sde_enc_phys_wb_validate_cwb(struct sde_encoder_phys *phys_enc,
-			struct drm_crtc_state *crtc_state,
-			 struct drm_connector_state *conn_state)
-{
-	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc_state);
-	struct sde_rect wb_roi = {0,};
-	struct sde_rect pu_roi = {0,};
-	int data_pt;
-	int ds_outw = 0;
-	int ds_outh = 0;
-	int ds_in_use = false;
-	int i = 0;
-	int ret = 0;
-
-	if (!phys_enc->in_clone_mode) {
-		SDE_DEBUG("not in CWB mode. early return\n");
-		goto exit;
-	}
-
-	ret = sde_wb_connector_state_get_output_roi(conn_state, &wb_roi);
-	if (ret) {
-		SDE_ERROR("failed to get roi %d\n", ret);
-		goto exit;
-	}
-
-	data_pt = sde_crtc_get_property(cstate, CRTC_PROP_CAPTURE_OUTPUT);
-
-	/* compute cumulative ds output dimensions if in use */
-	for (i = 0; i < cstate->num_ds; i++)
-		if (cstate->ds_cfg[i].scl3_cfg.enable) {
-			ds_in_use = true;
-			ds_outw += cstate->ds_cfg[i].scl3_cfg.dst_width;
-			ds_outh += cstate->ds_cfg[i].scl3_cfg.dst_height;
-		}
-
-	/* if ds in use check wb roi against ds output dimensions */
-	if ((data_pt == CAPTURE_DSPP_OUT) &&  ds_in_use &&
-			((wb_roi.w != ds_outw) || (wb_roi.h != ds_outh))) {
-		SDE_ERROR("invalid wb roi with dest scalar [%dx%d vs %dx%d]\n",
-				wb_roi.w, wb_roi.h, ds_outw, ds_outh);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	/* validate conn roi against pu rect */
-	if (cstate->user_roi_list.num_rects) {
-		sde_kms_rect_merge_rectangles(&cstate->user_roi_list, &pu_roi);
-		if (wb_roi.w != pu_roi.w || wb_roi.h != pu_roi.h) {
-			SDE_ERROR("invalid wb roi with pu [%dx%d vs %dx%d]\n",
-					wb_roi.w, wb_roi.h, pu_roi.w, pu_roi.h);
-			ret = -EINVAL;
-			goto exit;
-		}
-	}
-exit:
-	return ret;
-}
-
-/**
- * sde_encoder_phys_wb_atomic_check - verify and fixup given atomic states
- * @phys_enc:	Pointer to physical encoder
- * @crtc_state:	Pointer to CRTC atomic state
- * @conn_state:	Pointer to connector atomic state
- */
-static int sde_encoder_phys_wb_atomic_check(
-		struct sde_encoder_phys *phys_enc,
-		struct drm_crtc_state *crtc_state,
-		struct drm_connector_state *conn_state)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
-	const struct sde_wb_cfg *wb_cfg = hw_wb->caps;
-	struct drm_framebuffer *fb;
-	const struct sde_format *fmt;
-	struct sde_rect wb_roi;
-	const struct drm_display_mode *mode = &crtc_state->mode;
-	int rc;
-
-	SDE_DEBUG("[atomic_check:%d,%d,\"%s\",%d,%d]\n",
-			hw_wb->idx - WB_0, mode->base.id, mode->name,
-			mode->hdisplay, mode->vdisplay);
-
-	if (!conn_state || !conn_state->connector) {
-		SDE_ERROR("invalid connector state\n");
-		return -EINVAL;
-	} else if (conn_state->connector->status !=
-			connector_status_connected) {
-		SDE_ERROR("connector not connected %d\n",
-				conn_state->connector->status);
-		return -EINVAL;
-	}
-
-	_sde_enc_phys_wb_detect_cwb(phys_enc, crtc_state);
-
-	memset(&wb_roi, 0, sizeof(struct sde_rect));
-
-	rc = sde_wb_connector_state_get_output_roi(conn_state, &wb_roi);
-	if (rc) {
-		SDE_ERROR("failed to get roi %d\n", rc);
-		return rc;
-	}
-
-	SDE_DEBUG("[roi:%u,%u,%u,%u]\n", wb_roi.x, wb_roi.y,
-			wb_roi.w, wb_roi.h);
-
-	/* bypass check if commit with no framebuffer */
-	fb = sde_wb_connector_state_get_output_fb(conn_state);
-	if (!fb) {
-		SDE_DEBUG("no output framebuffer\n");
-		return 0;
-	}
-
-	SDE_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
-			fb->width, fb->height);
-
-	fmt = sde_get_sde_format_ext(fb->format->format, fb->modifier);
-	if (!fmt) {
-		SDE_ERROR("unsupported output pixel format:%x\n",
-				fb->format->format);
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("[fb_fmt:%x,%llx]\n", fb->format->format,
-			fb->modifier);
-
-	if (SDE_FORMAT_IS_YUV(fmt) &&
-			!(wb_cfg->features & BIT(SDE_WB_YUV_CONFIG))) {
-		SDE_ERROR("invalid output format %x\n", fmt->base.pixel_format);
-		return -EINVAL;
-	}
-
-	if (SDE_FORMAT_IS_UBWC(fmt) &&
-			!(wb_cfg->features & BIT(SDE_WB_UBWC))) {
-		SDE_ERROR("invalid output format %x\n", fmt->base.pixel_format);
-		return -EINVAL;
-	}
-
-	if (SDE_FORMAT_IS_YUV(fmt) != !!phys_enc->hw_cdm)
-		crtc_state->mode_changed = true;
-
-	if (wb_roi.w && wb_roi.h) {
-		if (wb_roi.w != mode->hdisplay) {
-			SDE_ERROR("invalid roi w=%d, mode w=%d\n", wb_roi.w,
-					mode->hdisplay);
-			return -EINVAL;
-		} else if (wb_roi.h != mode->vdisplay) {
-			SDE_ERROR("invalid roi h=%d, mode h=%d\n", wb_roi.h,
-					mode->vdisplay);
-			return -EINVAL;
-		} else if (wb_roi.x + wb_roi.w > fb->width) {
-			SDE_ERROR("invalid roi x=%d, w=%d, fb w=%d\n",
-					wb_roi.x, wb_roi.w, fb->width);
-			return -EINVAL;
-		} else if (wb_roi.y + wb_roi.h > fb->height) {
-			SDE_ERROR("invalid roi y=%d, h=%d, fb h=%d\n",
-					wb_roi.y, wb_roi.h, fb->height);
-			return -EINVAL;
-		} else if (wb_roi.w > wb_cfg->sblk->maxlinewidth) {
-			SDE_ERROR("invalid roi w=%d, maxlinewidth=%u\n",
-					wb_roi.w, wb_cfg->sblk->maxlinewidth);
-			return -EINVAL;
-		}
-	} else {
-		if (wb_roi.x || wb_roi.y) {
-			SDE_ERROR("invalid roi x=%d, y=%d\n",
-					wb_roi.x, wb_roi.y);
-			return -EINVAL;
-		} else if (fb->width != mode->hdisplay) {
-			SDE_ERROR("invalid fb w=%d, mode w=%d\n", fb->width,
-					mode->hdisplay);
-			return -EINVAL;
-		} else if (fb->height != mode->vdisplay) {
-			SDE_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
-					mode->vdisplay);
-			return -EINVAL;
-		} else if (fb->width > wb_cfg->sblk->maxlinewidth) {
-			SDE_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
-					fb->width, wb_cfg->sblk->maxlinewidth);
-			return -EINVAL;
-		}
-	}
-
-	rc = _sde_enc_phys_wb_validate_cwb(phys_enc, crtc_state, conn_state);
-	if (rc) {
-		SDE_ERROR("failed in cwb validation %d\n", rc);
-		return rc;
-	}
-
-	return rc;
-}
-
-static void _sde_encoder_phys_wb_update_cwb_flush(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_wb *wb_enc;
-	struct sde_hw_wb *hw_wb;
-	struct sde_hw_ctl *hw_ctl;
-	struct sde_hw_cdm *hw_cdm;
-	struct sde_hw_pingpong *hw_pp;
-	struct sde_crtc *crtc;
-	struct sde_crtc_state *crtc_state;
-	int i = 0;
-	int cwb_capture_mode = 0;
-	enum sde_cwb cwb_idx = 0;
-	enum sde_cwb src_pp_idx = 0;
-	bool dspp_out = false;
-	bool need_merge = false;
-
-	if (!phys_enc->in_clone_mode) {
-		SDE_DEBUG("not in CWB mode. early return\n");
-		return;
-	}
-
-	wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	crtc = to_sde_crtc(wb_enc->crtc);
-	crtc_state = to_sde_crtc_state(wb_enc->crtc->state);
-	cwb_capture_mode = sde_crtc_get_property(crtc_state,
-			CRTC_PROP_CAPTURE_OUTPUT);
-
-	hw_pp = phys_enc->hw_pp;
-	hw_wb = wb_enc->hw_wb;
-	hw_cdm = phys_enc->hw_cdm;
-
-	/* In CWB mode, program actual source master sde_hw_ctl from crtc */
-	hw_ctl = crtc->mixers[0].hw_ctl;
-	if (!hw_ctl || !hw_wb || !hw_pp) {
-		SDE_ERROR("[wb] HW resource not available for CWB\n");
-		return;
-	}
-
-	/* treating LM idx of primary display ctl path as source ping-pong idx*/
-	src_pp_idx = (enum sde_cwb)crtc->mixers[0].hw_lm->idx;
-	cwb_idx = (enum sde_cwb)hw_pp->idx;
-	dspp_out = (cwb_capture_mode == CAPTURE_DSPP_OUT);
-	need_merge = (crtc->num_mixers > 1) ? true : false;
-
-	if (src_pp_idx > CWB_0 ||  ((cwb_idx + crtc->num_mixers) > CWB_MAX)) {
-		SDE_ERROR("invalid hw config for CWB\n");
-		return;
-	}
-
-	if (hw_ctl->ops.update_bitmask_wb)
-		hw_ctl->ops.update_bitmask_wb(hw_ctl, hw_wb->idx, 1);
-
-	if (hw_ctl->ops.update_bitmask_cdm && hw_cdm)
-		hw_ctl->ops.update_bitmask_cdm(hw_ctl, hw_cdm->idx, 1);
-
-	if (test_bit(SDE_WB_CWB_CTRL, &hw_wb->caps->features)) {
-		for (i = 0; i < crtc->num_mixers; i++) {
-			cwb_idx = (enum sde_cwb) (hw_pp->idx + i);
-			src_pp_idx = (enum sde_cwb) (src_pp_idx + i);
-
-			if (hw_wb->ops.program_cwb_ctrl)
-				hw_wb->ops.program_cwb_ctrl(hw_wb, cwb_idx,
-						src_pp_idx, dspp_out);
-
-			if (hw_ctl->ops.update_bitmask_cwb)
-				hw_ctl->ops.update_bitmask_cwb(hw_ctl,
-						cwb_idx, 1);
-		}
-
-		if (need_merge && hw_ctl->ops.update_bitmask_merge3d
-				&& hw_pp && hw_pp->merge_3d)
-			hw_ctl->ops.update_bitmask_merge3d(hw_ctl,
-					hw_pp->merge_3d->idx, 1);
-	} else {
-		phys_enc->hw_mdptop->ops.set_cwb_ppb_cntl(phys_enc->hw_mdptop,
-				need_merge, dspp_out);
-	}
-}
-
-/**
- * _sde_encoder_phys_wb_update_flush - flush hardware update
- * @phys_enc:	Pointer to physical encoder
- */
-static void _sde_encoder_phys_wb_update_flush(struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_wb *wb_enc;
-	struct sde_hw_wb *hw_wb;
-	struct sde_hw_ctl *hw_ctl;
-	struct sde_hw_cdm *hw_cdm;
-	struct sde_hw_pingpong *hw_pp;
-	struct sde_ctl_flush_cfg pending_flush = {0,};
-
-	if (!phys_enc)
-		return;
-
-	wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	hw_wb = wb_enc->hw_wb;
-	hw_cdm = phys_enc->hw_cdm;
-	hw_pp = phys_enc->hw_pp;
-	hw_ctl = phys_enc->hw_ctl;
-
-	SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
-
-	if (phys_enc->in_clone_mode) {
-		SDE_DEBUG("in CWB mode. early return\n");
-		return;
-	}
-
-	if (!hw_ctl) {
-		SDE_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0);
-		return;
-	}
-
-	if (hw_ctl->ops.update_bitmask_wb)
-		hw_ctl->ops.update_bitmask_wb(hw_ctl, hw_wb->idx, 1);
-
-	if (hw_ctl->ops.update_bitmask_cdm && hw_cdm)
-		hw_ctl->ops.update_bitmask_cdm(hw_ctl, hw_cdm->idx, 1);
-
-	if (hw_ctl->ops.update_bitmask_merge3d && hw_pp && hw_pp->merge_3d)
-		hw_ctl->ops.update_bitmask_merge3d(hw_ctl,
-				hw_pp->merge_3d->idx, 1);
-
-	if (hw_ctl->ops.get_pending_flush)
-		hw_ctl->ops.get_pending_flush(hw_ctl,
-				&pending_flush);
-
-	SDE_DEBUG("Pending flush mask for CTL_%d is 0x%x, WB %d\n",
-			hw_ctl->idx - CTL_0, pending_flush.pending_flush_mask,
-			hw_wb->idx - WB_0);
-}
-
-/**
- * sde_encoder_phys_wb_setup - setup writeback encoder
- * @phys_enc:	Pointer to physical encoder
- */
-static void sde_encoder_phys_wb_setup(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
-	struct drm_display_mode mode = phys_enc->cached_mode;
-	struct drm_framebuffer *fb;
-	struct sde_rect *wb_roi = &wb_enc->wb_roi;
-
-	SDE_DEBUG("[mode_set:%d,%d,\"%s\",%d,%d]\n",
-			hw_wb->idx - WB_0, mode.base.id, mode.name,
-			mode.hdisplay, mode.vdisplay);
-
-	memset(wb_roi, 0, sizeof(struct sde_rect));
-
-	/* clear writeback framebuffer - will be updated in setup_fb */
-	wb_enc->wb_fb = NULL;
-	wb_enc->wb_aspace = NULL;
-
-	if (phys_enc->enable_state == SDE_ENC_DISABLING) {
-		fb = wb_enc->fb_disable;
-		wb_roi->w = 0;
-		wb_roi->h = 0;
-	} else {
-		fb = sde_wb_get_output_fb(wb_enc->wb_dev);
-		sde_wb_get_output_roi(wb_enc->wb_dev, wb_roi);
-	}
-
-	if (!fb) {
-		SDE_DEBUG("no output framebuffer\n");
-		return;
-	}
-
-	SDE_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
-			fb->width, fb->height);
-
-	if (wb_roi->w == 0 || wb_roi->h == 0) {
-		wb_roi->x = 0;
-		wb_roi->y = 0;
-		wb_roi->w = fb->width;
-		wb_roi->h = fb->height;
-	}
-
-	SDE_DEBUG("[roi:%u,%u,%u,%u]\n", wb_roi->x, wb_roi->y,
-			wb_roi->w, wb_roi->h);
-
-	wb_enc->wb_fmt = sde_get_sde_format_ext(fb->format->format,
-							fb->modifier);
-	if (!wb_enc->wb_fmt) {
-		SDE_ERROR("unsupported output pixel format: %d\n",
-				fb->format->format);
-		return;
-	}
-
-	SDE_DEBUG("[fb_fmt:%x,%llx]\n", fb->format->format,
-			fb->modifier);
-
-	sde_encoder_phys_wb_set_ot_limit(phys_enc);
-
-	sde_encoder_phys_wb_set_traffic_shaper(phys_enc);
-
-	sde_encoder_phys_wb_set_qos_remap(phys_enc);
-
-	sde_encoder_phys_wb_set_qos(phys_enc);
-
-	sde_encoder_phys_setup_cdm(phys_enc, fb, wb_enc->wb_fmt, wb_roi);
-
-	sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi);
-
-	sde_encoder_phys_wb_setup_cdp(phys_enc, wb_enc->wb_fmt);
-
-	_sde_encoder_phys_wb_setup_cwb(phys_enc, true);
-}
-
-static void _sde_encoder_phys_wb_frame_done_helper(void *arg, bool frame_error)
-{
-	struct sde_encoder_phys_wb *wb_enc = arg;
-	struct sde_encoder_phys *phys_enc = &wb_enc->base;
-	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
-	u32 event = frame_error ? SDE_ENCODER_FRAME_EVENT_ERROR : 0;
-
-	event |= SDE_ENCODER_FRAME_EVENT_DONE |
-		SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
-
-	SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0, wb_enc->frame_count);
-
-	/* don't notify upper layer for internal commit */
-	if (phys_enc->enable_state == SDE_ENC_DISABLING)
-		goto complete;
-
-	if (!phys_enc->in_clone_mode)
-		event |= SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
-
-	atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0);
-	if (phys_enc->parent_ops.handle_frame_done)
-		phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
-				phys_enc, event);
-
-	if (phys_enc->parent_ops.handle_vblank_virt)
-		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
-				phys_enc);
-
-	SDE_EVT32_IRQ(DRMID(phys_enc->parent), hw_wb->idx - WB_0, event);
-
-complete:
-	complete_all(&wb_enc->wbdone_complete);
-}
-
-/**
- * sde_encoder_phys_wb_done_irq - Pingpong overflow interrupt handler for CWB
- * @arg:	Pointer to writeback encoder
- * @irq_idx:	interrupt index
- */
-static void sde_encoder_phys_cwb_ovflow(void *arg, int irq_idx)
-{
-	_sde_encoder_phys_wb_frame_done_helper(arg, true);
-}
-
-/**
- * sde_encoder_phys_wb_done_irq - writeback interrupt handler
- * @arg:	Pointer to writeback encoder
- * @irq_idx:	interrupt index
- */
-static void sde_encoder_phys_wb_done_irq(void *arg, int irq_idx)
-{
-	_sde_encoder_phys_wb_frame_done_helper(arg, false);
-}
-
-/**
- * sde_encoder_phys_wb_irq_ctrl - irq control of WB
- * @phys:	Pointer to physical encoder
- * @enable:	indicates enable or disable interrupts
- */
-static void sde_encoder_phys_wb_irq_ctrl(
-		struct sde_encoder_phys *phys, bool enable)
-{
-
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys);
-	int index = 0, refcount;
-	int ret = 0, pp = 0;
-
-	if (!wb_enc)
-		return;
-
-	if (wb_enc->bypass_irqreg)
-		return;
-
-	pp = phys->hw_pp->idx - PINGPONG_0;
-	if ((pp + CRTC_DUAL_MIXERS) >= PINGPONG_MAX) {
-		SDE_ERROR("invalid pingpong index for WB or CWB\n");
-		return;
-	}
-
-	refcount = atomic_read(&phys->wbirq_refcount);
-
-	if (enable && atomic_inc_return(&phys->wbirq_refcount) == 1) {
-		sde_encoder_helper_register_irq(phys, INTR_IDX_WB_DONE);
-		if (ret)
-			atomic_dec_return(&phys->wbirq_refcount);
-
-		for (index = 0; index < CRTC_DUAL_MIXERS; index++)
-			if (cwb_irq_tbl[index + pp] != SDE_NONE)
-				sde_encoder_helper_register_irq(phys,
-					cwb_irq_tbl[index + pp]);
-	} else if (!enable &&
-			atomic_dec_return(&phys->wbirq_refcount) == 0) {
-		sde_encoder_helper_unregister_irq(phys, INTR_IDX_WB_DONE);
-		if (ret)
-			atomic_inc_return(&phys->wbirq_refcount);
-
-		for (index = 0; index < CRTC_DUAL_MIXERS; index++)
-			if (cwb_irq_tbl[index + pp] != SDE_NONE)
-				sde_encoder_helper_unregister_irq(phys,
-					cwb_irq_tbl[index + pp]);
-	}
-}
-
-/**
- * sde_encoder_phys_wb_mode_set - set display mode
- * @phys_enc:	Pointer to physical encoder
- * @mode:	Pointer to requested display mode
- * @adj_mode:	Pointer to adjusted display mode
- */
-static void sde_encoder_phys_wb_mode_set(
-		struct sde_encoder_phys *phys_enc,
-		struct drm_display_mode *mode,
-		struct drm_display_mode *adj_mode)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	struct sde_rm *rm = &phys_enc->sde_kms->rm;
-	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
-	struct sde_rm_hw_iter iter;
-	int i, instance;
-
-	phys_enc->cached_mode = *adj_mode;
-	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
-
-	SDE_DEBUG("[mode_set_cache:%d,%d,\"%s\",%d,%d]\n",
-			hw_wb->idx - WB_0, mode->base.id,
-			mode->name, mode->hdisplay, mode->vdisplay);
-
-	phys_enc->hw_ctl = NULL;
-	phys_enc->hw_cdm = NULL;
-
-	/* Retrieve previously allocated HW Resources. CTL shouldn't fail */
-	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
-	for (i = 0; i <= instance; i++) {
-		sde_rm_get_hw(rm, &iter);
-		if (i == instance)
-			phys_enc->hw_ctl = (struct sde_hw_ctl *) iter.hw;
-	}
-
-	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
-		SDE_ERROR("failed init ctl: %ld\n",
-			(!phys_enc->hw_ctl) ?
-			-EINVAL : PTR_ERR(phys_enc->hw_ctl));
-		phys_enc->hw_ctl = NULL;
-		return;
-	}
-
-	/* CDM is optional */
-	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CDM);
-	for (i = 0; i <= instance; i++) {
-		sde_rm_get_hw(rm, &iter);
-		if (i == instance)
-			phys_enc->hw_cdm = (struct sde_hw_cdm *) iter.hw;
-	}
-
-	if (IS_ERR(phys_enc->hw_cdm)) {
-		SDE_ERROR("CDM required but not allocated: %ld\n",
-			PTR_ERR(phys_enc->hw_cdm));
-		phys_enc->hw_cdm = NULL;
-	}
-}
-
-/**
- * sde_encoder_phys_wb_wait_for_commit_done - wait until request is committed
- * @phys_enc:	Pointer to physical encoder
- */
-static int sde_encoder_phys_wb_wait_for_commit_done(
-		struct sde_encoder_phys *phys_enc)
-{
-	unsigned long ret;
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	u32 irq_status, event = 0;
-	u64 wb_time = 0;
-	int rc = 0;
-	int irq_idx = phys_enc->irq[INTR_IDX_WB_DONE].irq_idx;
-	u32 timeout = max_t(u32, wb_enc->wbdone_timeout, KICKOFF_TIMEOUT_MS);
-
-	/* Return EWOULDBLOCK since we know the wait isn't necessary */
-	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
-		SDE_ERROR("encoder already disabled\n");
-		return -EWOULDBLOCK;
-	}
-
-	SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count,
-			!!wb_enc->wb_fb);
-
-	/* signal completion if commit with no framebuffer */
-	if (!wb_enc->wb_fb) {
-		SDE_DEBUG("no output framebuffer\n");
-		_sde_encoder_phys_wb_frame_done_helper(wb_enc, false);
-	}
-
-	ret = wait_for_completion_timeout(&wb_enc->wbdone_complete,
-			msecs_to_jiffies(timeout));
-
-	if (!ret) {
-		SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc),
-				wb_enc->frame_count);
-		irq_status = sde_core_irq_read(phys_enc->sde_kms,
-				irq_idx, true);
-		if (irq_status) {
-			SDE_DEBUG("wb:%d done but irq not triggered\n",
-					WBID(wb_enc));
-			_sde_encoder_phys_wb_frame_done_helper(wb_enc, false);
-		} else {
-			SDE_ERROR("wb:%d kickoff timed out\n",
-					WBID(wb_enc));
-			atomic_add_unless(
-				&phys_enc->pending_retire_fence_cnt, -1, 0);
-
-			event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE
-				| SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE
-				| SDE_ENCODER_FRAME_EVENT_ERROR;
-			if (phys_enc->parent_ops.handle_frame_done)
-				phys_enc->parent_ops.handle_frame_done(
-					phys_enc->parent, phys_enc, event);
-			rc = -ETIMEDOUT;
-		}
-	}
-
-	if (!rc)
-		wb_enc->end_time = ktime_get();
-
-	/* once operation is done, disable traffic shaper */
-	if (wb_enc->wb_cfg.ts_cfg.en && wb_enc->hw_wb &&
-			wb_enc->hw_wb->ops.setup_trafficshaper) {
-		wb_enc->wb_cfg.ts_cfg.en = false;
-		wb_enc->hw_wb->ops.setup_trafficshaper(
-				wb_enc->hw_wb, &wb_enc->wb_cfg);
-	}
-
-	/* remove vote for iommu/clk/bus */
-	wb_enc->frame_count++;
-
-	if (!rc) {
-		wb_time = (u64)ktime_to_us(wb_enc->end_time) -
-				(u64)ktime_to_us(wb_enc->start_time);
-		SDE_DEBUG("wb:%d took %llu us\n", WBID(wb_enc), wb_time);
-	}
-
-	/* cleanup writeback framebuffer */
-	if (wb_enc->wb_fb && wb_enc->wb_aspace) {
-		msm_framebuffer_cleanup(wb_enc->wb_fb, wb_enc->wb_aspace);
-		wb_enc->wb_fb = NULL;
-		wb_enc->wb_aspace = NULL;
-	}
-
-	SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count,
-			wb_time, event, rc);
-
-	return rc;
-}
-
-/**
- * sde_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
- * @phys_enc:	Pointer to physical encoder
- * @params:	kickoff parameters
- * Returns:	Zero on success
- */
-static int sde_encoder_phys_wb_prepare_for_kickoff(
-		struct sde_encoder_phys *phys_enc,
-		struct sde_encoder_kickoff_params *params)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-
-	SDE_DEBUG("[wb:%d,%u]\n", wb_enc->hw_wb->idx - WB_0,
-			wb_enc->kickoff_count);
-
-	reinit_completion(&wb_enc->wbdone_complete);
-
-	wb_enc->kickoff_count++;
-
-	/* set OT limit & enable traffic shaper */
-	sde_encoder_phys_wb_setup(phys_enc);
-
-	_sde_encoder_phys_wb_update_flush(phys_enc);
-
-	_sde_encoder_phys_wb_update_cwb_flush(phys_enc);
-
-	/* vote for iommu/clk/bus */
-	wb_enc->start_time = ktime_get();
-
-	SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->kickoff_count);
-	return 0;
-}
-
-/**
- * sde_encoder_phys_wb_trigger_flush - trigger flush processing
- * @phys_enc:	Pointer to physical encoder
- */
-static void sde_encoder_phys_wb_trigger_flush(struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-
-	if (!phys_enc || !wb_enc->hw_wb) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	/*
-	 * Bail out iff in CWB mode. In case of CWB, primary control-path
-	 * which is actually driving would trigger the flush
-	 */
-	if (phys_enc->in_clone_mode) {
-		SDE_DEBUG("in CWB mode. early return\n");
-		return;
-	}
-
-	SDE_DEBUG("[wb:%d]\n", wb_enc->hw_wb->idx - WB_0);
-
-	/* clear pending flush if commit with no framebuffer */
-	if (!wb_enc->wb_fb) {
-		SDE_DEBUG("no output framebuffer\n");
-		return;
-	}
-
-	sde_encoder_helper_trigger_flush(phys_enc);
-}
-
-/**
- * sde_encoder_phys_wb_handle_post_kickoff - post-kickoff processing
- * @phys_enc:	Pointer to physical encoder
- */
-static void sde_encoder_phys_wb_handle_post_kickoff(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-
-	SDE_DEBUG("[wb:%d]\n", wb_enc->hw_wb->idx - WB_0);
-
-	SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc));
-}
-
-/**
- * _sde_encoder_phys_wb_init_internal_fb - create fb for internal commit
- * @wb_enc:		Pointer to writeback encoder
- * @pixel_format:	DRM pixel format
- * @width:		Desired fb width
- * @height:		Desired fb height
- * @pitch:		Desired fb pitch
- */
-static int _sde_encoder_phys_wb_init_internal_fb(
-		struct sde_encoder_phys_wb *wb_enc,
-		uint32_t pixel_format, uint32_t width,
-		uint32_t height, uint32_t pitch)
-{
-	struct drm_device *dev;
-	struct drm_framebuffer *fb;
-	struct drm_mode_fb_cmd2 mode_cmd;
-	uint32_t size;
-	int nplanes, i, ret;
-	struct msm_gem_address_space *aspace;
-
-	if (!wb_enc || !wb_enc->base.parent || !wb_enc->base.sde_kms) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	aspace = wb_enc->base.sde_kms->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
-	if (!aspace) {
-		SDE_ERROR("invalid address space\n");
-		return -EINVAL;
-	}
-
-	dev = wb_enc->base.sde_kms->dev;
-	if (!dev) {
-		SDE_ERROR("invalid dev\n");
-		return -EINVAL;
-	}
-
-	memset(&mode_cmd, 0, sizeof(mode_cmd));
-	mode_cmd.pixel_format = pixel_format;
-	mode_cmd.width = width;
-	mode_cmd.height = height;
-	mode_cmd.pitches[0] = pitch;
-
-	size = sde_format_get_framebuffer_size(pixel_format,
-			mode_cmd.width, mode_cmd.height,
-			mode_cmd.pitches, 0);
-	if (!size) {
-		SDE_DEBUG("not creating zero size buffer\n");
-		return -EINVAL;
-	}
-
-	/* allocate gem tracking object */
-	nplanes = drm_format_num_planes(pixel_format);
-	if (nplanes >= SDE_MAX_PLANES) {
-		SDE_ERROR("requested format has too many planes\n");
-		return -EINVAL;
-	}
-
-	wb_enc->bo_disable[0] = msm_gem_new(dev, size,
-			MSM_BO_SCANOUT | MSM_BO_WC);
-	if (IS_ERR_OR_NULL(wb_enc->bo_disable[0])) {
-		ret = PTR_ERR(wb_enc->bo_disable[0]);
-		wb_enc->bo_disable[0] = NULL;
-
-		SDE_ERROR("failed to create bo, %d\n", ret);
-		return ret;
-	}
-
-	for (i = 0; i < nplanes; ++i) {
-		wb_enc->bo_disable[i] = wb_enc->bo_disable[0];
-		mode_cmd.pitches[i] = width *
-			drm_format_plane_cpp(pixel_format, i);
-	}
-
-	fb = msm_framebuffer_init(dev, &mode_cmd, wb_enc->bo_disable);
-	if (IS_ERR_OR_NULL(fb)) {
-		ret = PTR_ERR(fb);
-		drm_gem_object_put(wb_enc->bo_disable[0]);
-		wb_enc->bo_disable[0] = NULL;
-
-		SDE_ERROR("failed to init fb, %d\n", ret);
-		return ret;
-	}
-
-	/* prepare the backing buffer now so that it's available later */
-	ret = msm_framebuffer_prepare(fb, aspace);
-	if (!ret)
-		wb_enc->fb_disable = fb;
-	return ret;
-}
-
-/**
- * _sde_encoder_phys_wb_destroy_internal_fb - deconstruct internal fb
- * @wb_enc:		Pointer to writeback encoder
- */
-static void _sde_encoder_phys_wb_destroy_internal_fb(
-		struct sde_encoder_phys_wb *wb_enc)
-{
-	if (!wb_enc)
-		return;
-
-	if (wb_enc->fb_disable) {
-		drm_framebuffer_unregister_private(wb_enc->fb_disable);
-		drm_framebuffer_remove(wb_enc->fb_disable);
-		wb_enc->fb_disable = NULL;
-	}
-
-	if (wb_enc->bo_disable[0]) {
-		drm_gem_object_put(wb_enc->bo_disable[0]);
-		wb_enc->bo_disable[0] = NULL;
-	}
-}
-
-/**
- * sde_encoder_phys_wb_enable - enable writeback encoder
- * @phys_enc:	Pointer to physical encoder
- */
-static void sde_encoder_phys_wb_enable(struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
-	struct drm_device *dev;
-	struct drm_connector *connector;
-
-	SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
-
-	if (!wb_enc->base.parent || !wb_enc->base.parent->dev) {
-		SDE_ERROR("invalid drm device\n");
-		return;
-	}
-	dev = wb_enc->base.parent->dev;
-
-	/* find associated writeback connector */
-	connector = phys_enc->connector;
-
-	if (!connector || connector->encoder != phys_enc->parent) {
-		SDE_ERROR("failed to find writeback connector\n");
-		return;
-	}
-	wb_enc->wb_dev = sde_wb_connector_get_wb(connector);
-
-	phys_enc->enable_state = SDE_ENC_ENABLED;
-
-	/*
-	 * cache the crtc in wb_enc on enable for duration of use case
-	 * for correctly servicing asynchronous irq events and timers
-	 */
-	wb_enc->crtc = phys_enc->parent->crtc;
-}
-
-/**
- * sde_encoder_phys_wb_disable - disable writeback encoder
- * @phys_enc:	Pointer to physical encoder
- */
-static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
-
-	SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
-
-	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
-		SDE_ERROR("encoder is already disabled\n");
-		return;
-	}
-
-	if (wb_enc->frame_count != wb_enc->kickoff_count) {
-		SDE_DEBUG("[wait_for_done: wb:%d, frame:%u, kickoff:%u]\n",
-				hw_wb->idx - WB_0, wb_enc->frame_count,
-				wb_enc->kickoff_count);
-		sde_encoder_phys_wb_wait_for_commit_done(phys_enc);
-	}
-
-	if (!phys_enc->hw_ctl || !phys_enc->parent ||
-			!phys_enc->sde_kms || !wb_enc->fb_disable) {
-		SDE_DEBUG("invalid enc, skipping extra commit\n");
-		goto exit;
-	}
-
-	/* avoid reset frame for CWB */
-	if (phys_enc->in_clone_mode) {
-		_sde_encoder_phys_wb_setup_cwb(phys_enc, false);
-		phys_enc->in_clone_mode = false;
-		goto exit;
-	}
-
-	/* reset h/w before final flush */
-	if (phys_enc->hw_ctl->ops.clear_pending_flush)
-		phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
-
-	/*
-	 * New CTL reset sequence from 5.0 MDP onwards.
-	 * If has_3d_merge_reset is not set, legacy reset
-	 * sequence is executed.
-	 */
-	if (hw_wb->catalog->has_3d_merge_reset) {
-		sde_encoder_helper_phys_disable(phys_enc, wb_enc);
-		goto exit;
-	}
-
-	if (sde_encoder_helper_reset_mixers(phys_enc, wb_enc->fb_disable))
-		goto exit;
-
-	phys_enc->enable_state = SDE_ENC_DISABLING;
-
-	sde_encoder_phys_wb_prepare_for_kickoff(phys_enc, NULL);
-	sde_encoder_phys_wb_irq_ctrl(phys_enc, true);
-	if (phys_enc->hw_ctl->ops.trigger_flush)
-		phys_enc->hw_ctl->ops.trigger_flush(phys_enc->hw_ctl);
-
-	sde_encoder_helper_trigger_start(phys_enc);
-	sde_encoder_phys_wb_wait_for_commit_done(phys_enc);
-	sde_encoder_phys_wb_irq_ctrl(phys_enc, false);
-exit:
-	phys_enc->enable_state = SDE_ENC_DISABLED;
-	wb_enc->crtc = NULL;
-}
-
-/**
- * sde_encoder_phys_wb_get_hw_resources - get hardware resources
- * @phys_enc:	Pointer to physical encoder
- * @hw_res:	Pointer to encoder resources
- */
-static void sde_encoder_phys_wb_get_hw_resources(
-		struct sde_encoder_phys *phys_enc,
-		struct sde_encoder_hw_resources *hw_res,
-		struct drm_connector_state *conn_state)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	struct sde_hw_wb *hw_wb;
-	struct drm_framebuffer *fb;
-	const struct sde_format *fmt = NULL;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	fb = sde_wb_connector_state_get_output_fb(conn_state);
-	if (fb) {
-		fmt = sde_get_sde_format_ext(fb->format->format, fb->modifier);
-		if (!fmt) {
-			SDE_ERROR("unsupported output pixel format:%d\n",
-					fb->format->format);
-			return;
-		}
-	}
-
-	hw_wb = wb_enc->hw_wb;
-	hw_res->wbs[hw_wb->idx - WB_0] = phys_enc->intf_mode;
-	hw_res->needs_cdm = fmt ? SDE_FORMAT_IS_YUV(fmt) : false;
-	SDE_DEBUG("[wb:%d] intf_mode=%d needs_cdm=%d\n", hw_wb->idx - WB_0,
-			hw_res->wbs[hw_wb->idx - WB_0],
-			hw_res->needs_cdm);
-}
-
-#ifdef CONFIG_DEBUG_FS
-/**
- * sde_encoder_phys_wb_init_debugfs - initialize writeback encoder debugfs
- * @phys_enc:		Pointer to physical encoder
- * @debugfs_root:	Pointer to virtual encoder's debugfs_root dir
- */
-static int sde_encoder_phys_wb_init_debugfs(
-		struct sde_encoder_phys *phys_enc, struct dentry *debugfs_root)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-
-	if (!phys_enc || !wb_enc->hw_wb || !debugfs_root)
-		return -EINVAL;
-
-	if (!debugfs_create_u32("wbdone_timeout", 0600,
-			debugfs_root, &wb_enc->wbdone_timeout)) {
-		SDE_ERROR("failed to create debugfs/wbdone_timeout\n");
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-#else
-static int sde_encoder_phys_wb_init_debugfs(
-		struct sde_encoder_phys *phys_enc, struct dentry *debugfs_root)
-{
-	return 0;
-}
-#endif
-
-static int sde_encoder_phys_wb_late_register(struct sde_encoder_phys *phys_enc,
-		struct dentry *debugfs_root)
-{
-	return sde_encoder_phys_wb_init_debugfs(phys_enc, debugfs_root);
-}
-
-/**
- * sde_encoder_phys_wb_destroy - destroy writeback encoder
- * @phys_enc:	Pointer to physical encoder
- */
-static void sde_encoder_phys_wb_destroy(struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
-
-	SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
-
-	if (!phys_enc)
-		return;
-
-	_sde_encoder_phys_wb_destroy_internal_fb(wb_enc);
-
-	kfree(wb_enc);
-}
-
-/**
- * sde_encoder_phys_wb_init_ops - initialize writeback operations
- * @ops:	Pointer to encoder operation table
- */
-static void sde_encoder_phys_wb_init_ops(struct sde_encoder_phys_ops *ops)
-{
-	ops->late_register = sde_encoder_phys_wb_late_register;
-	ops->is_master = sde_encoder_phys_wb_is_master;
-	ops->mode_set = sde_encoder_phys_wb_mode_set;
-	ops->enable = sde_encoder_phys_wb_enable;
-	ops->disable = sde_encoder_phys_wb_disable;
-	ops->destroy = sde_encoder_phys_wb_destroy;
-	ops->atomic_check = sde_encoder_phys_wb_atomic_check;
-	ops->get_hw_resources = sde_encoder_phys_wb_get_hw_resources;
-	ops->wait_for_commit_done = sde_encoder_phys_wb_wait_for_commit_done;
-	ops->prepare_for_kickoff = sde_encoder_phys_wb_prepare_for_kickoff;
-	ops->handle_post_kickoff = sde_encoder_phys_wb_handle_post_kickoff;
-	ops->trigger_flush = sde_encoder_phys_wb_trigger_flush;
-	ops->trigger_start = sde_encoder_helper_trigger_start;
-	ops->hw_reset = sde_encoder_helper_hw_reset;
-	ops->irq_control = sde_encoder_phys_wb_irq_ctrl;
-}
-
-/**
- * sde_encoder_phys_wb_init - initialize writeback encoder
- * @init:	Pointer to init info structure with initialization params
- */
-struct sde_encoder_phys *sde_encoder_phys_wb_init(
-		struct sde_enc_phys_init_params *p)
-{
-	struct sde_encoder_phys *phys_enc;
-	struct sde_encoder_phys_wb *wb_enc;
-	struct sde_hw_mdp *hw_mdp;
-	struct sde_encoder_irq *irq;
-	int ret = 0;
-
-	SDE_DEBUG("\n");
-
-	if (!p || !p->parent) {
-		SDE_ERROR("invalid params\n");
-		ret = -EINVAL;
-		goto fail_alloc;
-	}
-
-	wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
-	if (!wb_enc) {
-		SDE_ERROR("failed to allocate wb enc\n");
-		ret = -ENOMEM;
-		goto fail_alloc;
-	}
-	wb_enc->wbdone_timeout = KICKOFF_TIMEOUT_MS;
-	init_completion(&wb_enc->wbdone_complete);
-
-	phys_enc = &wb_enc->base;
-
-	if (p->sde_kms->vbif[VBIF_NRT]) {
-		wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
-			p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
-		wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
-			p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
-	} else {
-		wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
-			p->sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
-		wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
-			p->sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
-	}
-
-	hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
-	if (IS_ERR_OR_NULL(hw_mdp)) {
-		ret = PTR_ERR(hw_mdp);
-		SDE_ERROR("failed to init hw_top: %d\n", ret);
-		goto fail_mdp_init;
-	}
-	phys_enc->hw_mdptop = hw_mdp;
-
-	/**
-	 * hw_wb resource permanently assigned to this encoder
-	 * Other resources allocated at atomic commit time by use case
-	 */
-	if (p->wb_idx != SDE_NONE) {
-		struct sde_rm_hw_iter iter;
-
-		sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_WB);
-		while (sde_rm_get_hw(&p->sde_kms->rm, &iter)) {
-			struct sde_hw_wb *hw_wb = (struct sde_hw_wb *)iter.hw;
-
-			if (hw_wb->idx == p->wb_idx) {
-				wb_enc->hw_wb = hw_wb;
-				break;
-			}
-		}
-
-		if (!wb_enc->hw_wb) {
-			ret = -EINVAL;
-			SDE_ERROR("failed to init hw_wb%d\n", p->wb_idx - WB_0);
-			goto fail_wb_init;
-		}
-	} else {
-		ret = -EINVAL;
-		SDE_ERROR("invalid wb_idx\n");
-		goto fail_wb_check;
-	}
-
-	sde_encoder_phys_wb_init_ops(&phys_enc->ops);
-	phys_enc->parent = p->parent;
-	phys_enc->parent_ops = p->parent_ops;
-	phys_enc->sde_kms = p->sde_kms;
-	phys_enc->split_role = p->split_role;
-	phys_enc->intf_mode = INTF_MODE_WB_LINE;
-	phys_enc->intf_idx = p->intf_idx;
-	phys_enc->enc_spinlock = p->enc_spinlock;
-	phys_enc->vblank_ctl_lock = p->vblank_ctl_lock;
-	atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
-	atomic_set(&phys_enc->wbirq_refcount, 0);
-
-	irq = &phys_enc->irq[INTR_IDX_WB_DONE];
-	INIT_LIST_HEAD(&irq->cb.list);
-	irq->name = "wb_done";
-	irq->hw_idx =  wb_enc->hw_wb->idx;
-	irq->irq_idx = -1;
-	irq->intr_type = sde_encoder_phys_wb_get_intr_type(wb_enc->hw_wb);
-	irq->intr_idx = INTR_IDX_WB_DONE;
-	irq->cb.arg = wb_enc;
-	irq->cb.func = sde_encoder_phys_wb_done_irq;
-
-	irq = &phys_enc->irq[INTR_IDX_PP2_OVFL];
-	INIT_LIST_HEAD(&irq->cb.list);
-	irq->name = "pp2_overflow";
-	irq->hw_idx = CWB_2;
-	irq->irq_idx = -1;
-	irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
-	irq->intr_idx = INTR_IDX_PP2_OVFL;
-	irq->cb.arg = wb_enc;
-	irq->cb.func = sde_encoder_phys_cwb_ovflow;
-
-	irq = &phys_enc->irq[INTR_IDX_PP3_OVFL];
-	INIT_LIST_HEAD(&irq->cb.list);
-	irq->name = "pp3_overflow";
-	irq->hw_idx = CWB_3;
-	irq->irq_idx = -1;
-	irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
-	irq->intr_idx = INTR_IDX_PP3_OVFL;
-	irq->cb.arg = wb_enc;
-	irq->cb.func = sde_encoder_phys_cwb_ovflow;
-
-	irq = &phys_enc->irq[INTR_IDX_PP4_OVFL];
-	INIT_LIST_HEAD(&irq->cb.list);
-	irq->name = "pp4_overflow";
-	irq->hw_idx = CWB_4;
-	irq->irq_idx = -1;
-	irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
-	irq->intr_idx = INTR_IDX_PP4_OVFL;
-	irq->cb.arg = wb_enc;
-	irq->cb.func = sde_encoder_phys_cwb_ovflow;
-
-	irq = &phys_enc->irq[INTR_IDX_PP5_OVFL];
-	INIT_LIST_HEAD(&irq->cb.list);
-	irq->name = "pp5_overflow";
-	irq->hw_idx = CWB_5;
-	irq->irq_idx = -1;
-	irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
-	irq->intr_idx = INTR_IDX_PP5_OVFL;
-	irq->cb.arg = wb_enc;
-	irq->cb.func = sde_encoder_phys_cwb_ovflow;
-
-	/* create internal buffer for disable logic */
-	if (_sde_encoder_phys_wb_init_internal_fb(wb_enc,
-				DRM_FORMAT_RGB888, 2, 1, 6)) {
-		SDE_ERROR("failed to init internal fb\n");
-		goto fail_wb_init;
-	}
-
-	SDE_DEBUG("Created sde_encoder_phys_wb for wb %d\n",
-			wb_enc->hw_wb->idx - WB_0);
-
-	return phys_enc;
-
-fail_wb_init:
-fail_wb_check:
-fail_mdp_init:
-	kfree(wb_enc);
-fail_alloc:
-	return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
deleted file mode 100644
index dc9cc77..0000000
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ /dev/null
@@ -1,494 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-#include <linux/sync_file.h>
-#include <linux/dma-fence.h>
-#include "msm_drv.h"
-#include "sde_kms.h"
-#include "sde_fence.h"
-
-#define TIMELINE_VAL_LENGTH		128
-
-void *sde_sync_get(uint64_t fd)
-{
-	/* force signed compare, fdget accepts an int argument */
-	return (signed int)fd >= 0 ? sync_file_get_fence(fd) : NULL;
-}
-
-void sde_sync_put(void *fence)
-{
-	if (fence)
-		dma_fence_put(fence);
-}
-
-signed long sde_sync_wait(void *fnc, long timeout_ms)
-{
-	struct dma_fence *fence = fnc;
-	int rc;
-	char timeline_str[TIMELINE_VAL_LENGTH];
-
-	if (!fence)
-		return -EINVAL;
-	else if (dma_fence_is_signaled(fence))
-		return timeout_ms ? msecs_to_jiffies(timeout_ms) : 1;
-
-	rc = dma_fence_wait_timeout(fence, true,
-				msecs_to_jiffies(timeout_ms));
-	if (!rc || (rc == -EINVAL)) {
-		if (fence->ops->timeline_value_str)
-			fence->ops->timeline_value_str(fence,
-					timeline_str, TIMELINE_VAL_LENGTH);
-
-		SDE_ERROR(
-			"fence driver name:%s timeline name:%s seqno:0x%x timeline:%s signaled:0x%x\n",
-			fence->ops->get_driver_name(fence),
-			fence->ops->get_timeline_name(fence),
-			fence->seqno, timeline_str,
-			fence->ops->signaled ?
-				fence->ops->signaled(fence) : 0xffffffff);
-	}
-
-	return rc;
-}
-
-uint32_t sde_sync_get_name_prefix(void *fence)
-{
-	const char *name;
-	uint32_t i, prefix;
-	struct dma_fence *f = fence;
-
-	if (!fence)
-		return 0;
-
-	name = f->ops->get_driver_name(f);
-	if (!name)
-		return 0;
-
-	prefix = 0x0;
-	for (i = 0; i < sizeof(uint32_t) && name[i]; ++i)
-		prefix = (prefix << CHAR_BIT) | name[i];
-
-	return prefix;
-}
-
-/**
- * struct sde_fence - release/retire fence structure
- * @fence: base fence structure
- * @name: name of each fence- it is fence timeline + commit_count
- * @fence_list: list to associated this fence on timeline/context
- * @fd: fd attached to this fence - debugging purpose.
- */
-struct sde_fence {
-	struct dma_fence base;
-	struct sde_fence_context *ctx;
-	char name[SDE_FENCE_NAME_SIZE];
-	struct list_head	fence_list;
-	int fd;
-};
-
-static void sde_fence_destroy(struct kref *kref)
-{
-	struct sde_fence_context *ctx;
-
-	if (!kref) {
-		SDE_ERROR("received invalid kref\n");
-		return;
-	}
-
-	ctx = container_of(kref, struct sde_fence_context, kref);
-	kfree(ctx);
-}
-
-static inline struct sde_fence *to_sde_fence(struct dma_fence *fence)
-{
-	return container_of(fence, struct sde_fence, base);
-}
-
-static const char *sde_fence_get_driver_name(struct dma_fence *fence)
-{
-	struct sde_fence *f = to_sde_fence(fence);
-
-	return f->name;
-}
-
-static const char *sde_fence_get_timeline_name(struct dma_fence *fence)
-{
-	struct sde_fence *f = to_sde_fence(fence);
-
-	return f->ctx->name;
-}
-
-static bool sde_fence_enable_signaling(struct dma_fence *fence)
-{
-	return true;
-}
-
-static bool sde_fence_signaled(struct dma_fence *fence)
-{
-	struct sde_fence *f = to_sde_fence(fence);
-	bool status;
-
-	status = (int)((fence->seqno - f->ctx->done_count) <= 0);
-	SDE_DEBUG("status:%d fence seq:%d and timeline:%d\n",
-			status, fence->seqno, f->ctx->done_count);
-	return status;
-}
-
-static void sde_fence_release(struct dma_fence *fence)
-{
-	struct sde_fence *f;
-
-	if (fence) {
-		f = to_sde_fence(fence);
-		kref_put(&f->ctx->kref, sde_fence_destroy);
-		kfree(f);
-	}
-}
-
-static void sde_fence_value_str(struct dma_fence *fence, char *str, int size)
-{
-	if (!fence || !str)
-		return;
-
-	snprintf(str, size, "%d", fence->seqno);
-}
-
-static void sde_fence_timeline_value_str(struct dma_fence *fence, char *str,
-		int size)
-{
-	struct sde_fence *f = to_sde_fence(fence);
-
-	if (!fence || !f->ctx || !str)
-		return;
-
-	snprintf(str, size, "%d", f->ctx->done_count);
-}
-
-static struct dma_fence_ops sde_fence_ops = {
-	.get_driver_name = sde_fence_get_driver_name,
-	.get_timeline_name = sde_fence_get_timeline_name,
-	.enable_signaling = sde_fence_enable_signaling,
-	.signaled = sde_fence_signaled,
-	.wait = dma_fence_default_wait,
-	.release = sde_fence_release,
-	.fence_value_str = sde_fence_value_str,
-	.timeline_value_str = sde_fence_timeline_value_str,
-};
-
-/**
- * _sde_fence_create_fd - create fence object and return an fd for it
- * This function is NOT thread-safe.
- * @timeline: Timeline to associate with fence
- * @val: Timeline value at which to signal the fence
- * Return: File descriptor on success, or error code on error
- */
-static int _sde_fence_create_fd(void *fence_ctx, uint32_t val)
-{
-	struct sde_fence *sde_fence;
-	struct sync_file *sync_file;
-	signed int fd = -EINVAL;
-	struct sde_fence_context *ctx = fence_ctx;
-
-	if (!ctx) {
-		SDE_ERROR("invalid context\n");
-		goto exit;
-	}
-
-	sde_fence = kzalloc(sizeof(*sde_fence), GFP_KERNEL);
-	if (!sde_fence)
-		return -ENOMEM;
-
-	sde_fence->ctx = fence_ctx;
-	snprintf(sde_fence->name, SDE_FENCE_NAME_SIZE, "sde_fence:%s:%u",
-						sde_fence->ctx->name, val);
-	dma_fence_init(&sde_fence->base, &sde_fence_ops, &ctx->lock,
-		ctx->context, val);
-	kref_get(&ctx->kref);
-
-	/* create fd */
-	fd = get_unused_fd_flags(0);
-	if (fd < 0) {
-		SDE_ERROR("failed to get_unused_fd_flags(), %s\n",
-							sde_fence->name);
-		dma_fence_put(&sde_fence->base);
-		goto exit;
-	}
-
-	/* create fence */
-	sync_file = sync_file_create(&sde_fence->base);
-	if (sync_file == NULL) {
-		put_unused_fd(fd);
-		fd = -EINVAL;
-		SDE_ERROR("couldn't create fence, %s\n", sde_fence->name);
-		dma_fence_put(&sde_fence->base);
-		goto exit;
-	}
-
-	fd_install(fd, sync_file->file);
-	sde_fence->fd = fd;
-
-	spin_lock(&ctx->list_lock);
-	list_add_tail(&sde_fence->fence_list, &ctx->fence_list_head);
-	spin_unlock(&ctx->list_lock);
-
-exit:
-	return fd;
-}
-
-struct sde_fence_context *sde_fence_init(const char *name, uint32_t drm_id)
-{
-	struct sde_fence_context *ctx;
-
-	if (!name) {
-		SDE_ERROR("invalid argument(s)\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-
-	if (!ctx) {
-		SDE_ERROR("failed to alloc fence ctx\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	strlcpy(ctx->name, name, ARRAY_SIZE(ctx->name));
-	ctx->drm_id = drm_id;
-	kref_init(&ctx->kref);
-	ctx->context = dma_fence_context_alloc(1);
-
-	spin_lock_init(&ctx->lock);
-	spin_lock_init(&ctx->list_lock);
-	INIT_LIST_HEAD(&ctx->fence_list_head);
-
-	return ctx;
-}
-
-void sde_fence_deinit(struct sde_fence_context *ctx)
-{
-	if (!ctx) {
-		SDE_ERROR("invalid fence\n");
-		return;
-	}
-
-	kref_put(&ctx->kref, sde_fence_destroy);
-}
-
-void sde_fence_prepare(struct sde_fence_context *ctx)
-{
-	unsigned long flags;
-
-	if (!ctx) {
-		SDE_ERROR("invalid argument(s), fence %pK\n", ctx);
-	} else {
-		spin_lock_irqsave(&ctx->lock, flags);
-		++ctx->commit_count;
-		spin_unlock_irqrestore(&ctx->lock, flags);
-	}
-}
-
-static void _sde_fence_trigger(struct sde_fence_context *ctx,
-		ktime_t ts, bool error)
-{
-	unsigned long flags;
-	struct sde_fence *fc, *next;
-	bool is_signaled = false;
-
-	kref_get(&ctx->kref);
-
-	spin_lock(&ctx->list_lock);
-	if (list_empty(&ctx->fence_list_head)) {
-		SDE_DEBUG("nothing to trigger!\n");
-		goto end;
-	}
-
-	list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
-		spin_lock_irqsave(&ctx->lock, flags);
-		fc->base.error = error ? -EBUSY : 0;
-		fc->base.timestamp = ts;
-		is_signaled = dma_fence_is_signaled_locked(&fc->base);
-		spin_unlock_irqrestore(&ctx->lock, flags);
-
-		if (is_signaled) {
-			list_del_init(&fc->fence_list);
-			dma_fence_put(&fc->base);
-		}
-	}
-end:
-	spin_unlock(&ctx->list_lock);
-	kref_put(&ctx->kref, sde_fence_destroy);
-}
-
-int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val,
-							uint32_t offset)
-{
-	uint32_t trigger_value;
-	int fd = -1, rc = -EINVAL;
-	unsigned long flags;
-	struct sde_fence *fc;
-
-	if (!ctx || !val) {
-		SDE_ERROR("invalid argument(s), fence %d, pval %d\n",
-				ctx != NULL, val != NULL);
-		return rc;
-	}
-
-	/*
-	 * Allow created fences to have a constant offset with respect
-	 * to the timeline. This allows us to delay the fence signalling
-	 * w.r.t. the commit completion (e.g., an offset of +1 would
-	 * cause fences returned during a particular commit to signal
-	 * after an additional delay of one commit, rather than at the
-	 * end of the current one.
-	 */
-	spin_lock_irqsave(&ctx->lock, flags);
-	trigger_value = ctx->commit_count + offset;
-	spin_unlock_irqrestore(&ctx->lock, flags);
-
-	spin_lock(&ctx->list_lock);
-	list_for_each_entry(fc, &ctx->fence_list_head, fence_list) {
-		if (trigger_value == fc->base.seqno) {
-			fd = fc->fd;
-			*val = fd;
-			break;
-		}
-	}
-	spin_unlock(&ctx->list_lock);
-
-	if (fd < 0) {
-		fd = _sde_fence_create_fd(ctx, trigger_value);
-		*val = fd;
-		SDE_DEBUG("fd:%d trigger:%d commit:%d offset:%d\n",
-				fd, trigger_value, ctx->commit_count, offset);
-	}
-
-	SDE_EVT32(ctx->drm_id, trigger_value, fd);
-	rc = (fd >= 0) ? 0 : fd;
-
-	return rc;
-}
-
-void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts,
-		enum sde_fence_event fence_event)
-{
-	unsigned long flags;
-
-	if (!ctx) {
-		SDE_ERROR("invalid ctx, %pK\n", ctx);
-		return;
-	}
-
-	spin_lock_irqsave(&ctx->lock, flags);
-	if (fence_event == SDE_FENCE_RESET_TIMELINE) {
-		if ((int)(ctx->done_count - ctx->commit_count) < 0) {
-			SDE_ERROR(
-				"timeline reset attempt! done count:%d commit:%d\n",
-				ctx->done_count, ctx->commit_count);
-			ctx->done_count = ctx->commit_count;
-			SDE_EVT32(ctx->drm_id, ctx->done_count,
-				ctx->commit_count, ktime_to_us(ts),
-				fence_event, SDE_EVTLOG_FATAL);
-		} else {
-			spin_unlock_irqrestore(&ctx->lock, flags);
-			return;
-		}
-	} else if ((int)(ctx->done_count - ctx->commit_count) < 0) {
-		++ctx->done_count;
-		SDE_DEBUG("fence_signal:done count:%d commit count:%d\n",
-					ctx->done_count, ctx->commit_count);
-	} else {
-		SDE_ERROR("extra signal attempt! done count:%d commit:%d\n",
-					ctx->done_count, ctx->commit_count);
-		SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
-			ktime_to_us(ts), fence_event, SDE_EVTLOG_FATAL);
-		spin_unlock_irqrestore(&ctx->lock, flags);
-		return;
-	}
-	spin_unlock_irqrestore(&ctx->lock, flags);
-
-	SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
-			ktime_to_us(ts));
-
-	_sde_fence_trigger(ctx, ts, (fence_event == SDE_FENCE_SIGNAL_ERROR));
-}
-
-void sde_fence_timeline_status(struct sde_fence_context *ctx,
-					struct drm_mode_object *drm_obj)
-{
-	char *obj_name;
-
-	if (!ctx || !drm_obj) {
-		SDE_ERROR("invalid input params\n");
-		return;
-	}
-
-	switch (drm_obj->type) {
-	case DRM_MODE_OBJECT_CRTC:
-		obj_name = "crtc";
-		break;
-	case DRM_MODE_OBJECT_CONNECTOR:
-		obj_name = "connector";
-		break;
-	default:
-		obj_name = "unknown";
-		break;
-	}
-
-	SDE_ERROR("drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
-		obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
-		ctx->commit_count);
-}
-
-void sde_fence_list_dump(struct dma_fence *fence, struct seq_file **s)
-{
-	char timeline_str[TIMELINE_VAL_LENGTH];
-
-	if (fence->ops->timeline_value_str)
-		fence->ops->timeline_value_str(fence,
-		timeline_str, TIMELINE_VAL_LENGTH);
-
-	seq_printf(*s, "fence name:%s timeline name:%s seqno:0x%x timeline:%s signaled:0x%x\n",
-		fence->ops->get_driver_name(fence),
-		fence->ops->get_timeline_name(fence),
-		fence->seqno, timeline_str,
-		fence->ops->signaled ?
-		fence->ops->signaled(fence) : 0xffffffff);
-}
-
-void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
-		struct drm_mode_object *drm_obj, struct seq_file **s)
-{
-	char *obj_name;
-	struct sde_fence *fc, *next;
-	struct dma_fence *fence;
-
-	if (!ctx || !drm_obj) {
-		SDE_ERROR("invalid input params\n");
-		return;
-	}
-
-	switch (drm_obj->type) {
-	case DRM_MODE_OBJECT_CRTC:
-		obj_name = "crtc";
-		break;
-	case DRM_MODE_OBJECT_CONNECTOR:
-		obj_name = "connector";
-		break;
-	default:
-		obj_name = "unknown";
-		break;
-	}
-
-	seq_printf(*s, "drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
-		obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
-		ctx->commit_count);
-
-	spin_lock(&ctx->list_lock);
-	list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
-		fence = &fc->base;
-		sde_fence_list_dump(fence, s);
-	}
-	spin_unlock(&ctx->list_lock);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
deleted file mode 100644
index 15f50ae..0000000
--- a/drivers/gpu/drm/msm/sde/sde_fence.h
+++ /dev/null
@@ -1,236 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_FENCE_H_
-#define _SDE_FENCE_H_
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/mutex.h>
-
-#ifndef CHAR_BIT
-#define CHAR_BIT 8 /* define this if limits.h not available */
-#endif
-
-#define SDE_FENCE_NAME_SIZE	24
-
-/**
- * struct sde_fence_context - release/retire fence context/timeline structure
- * @commit_count: Number of detected commits since bootup
- * @done_count: Number of completed commits since bootup
- * @drm_id: ID number of owning DRM Object
- * @ref: kref counter on timeline
- * @lock: spinlock for fence counter protection
- * @list_lock: spinlock for timeline protection
- * @context: fence context
- * @list_head: fence list to hold all the fence created on this context
- * @name: name of fence context/timeline
- */
-struct sde_fence_context {
-	unsigned int commit_count;
-	unsigned int done_count;
-	uint32_t drm_id;
-	struct kref kref;
-	spinlock_t lock;
-	spinlock_t list_lock;
-	u64 context;
-	struct list_head fence_list_head;
-	char name[SDE_FENCE_NAME_SIZE];
-};
-
-/**
- * enum sde_fence_event - sde fence event as hint fence operation
- * @SDE_FENCE_SIGNAL: Signal the fence cleanly with current timeline
- * @SDE_FENCE_RESET_TIMELINE: Reset timeline of the fence context
- * @SDE_FENCE_SIGNAL: Signal the fence but indicate error throughfence status
- */
-enum sde_fence_event {
-	SDE_FENCE_SIGNAL,
-	SDE_FENCE_RESET_TIMELINE,
-	SDE_FENCE_SIGNAL_ERROR
-};
-
-#if IS_ENABLED(CONFIG_SYNC_FILE)
-/**
- * sde_sync_get - Query sync fence object from a file handle
- *
- * On success, this function also increments the refcount of the sync fence
- *
- * @fd: Integer sync fence handle
- *
- * Return: Pointer to sync fence object, or NULL
- */
-void *sde_sync_get(uint64_t fd);
-
-/**
- * sde_sync_put - Releases a sync fence object acquired by @sde_sync_get
- *
- * This function decrements the sync fence's reference count; the object will
- * be released if the reference count goes to zero.
- *
- * @fence: Pointer to sync fence
- */
-void sde_sync_put(void *fence);
-
-/**
- * sde_sync_wait - Query sync fence object from a file handle
- *
- * @fence: Pointer to sync fence
- * @timeout_ms: Time to wait, in milliseconds. Waits forever if timeout_ms < 0
- *
- * Return:
- * Zero if timed out
- * -ERESTARTSYS if wait interrupted
- * remaining jiffies in all other success cases.
- */
-signed long sde_sync_wait(void *fence, long timeout_ms);
-
-/**
- * sde_sync_get_name_prefix - get integer representation of fence name prefix
- * @fence: Pointer to opaque fence structure
- *
- * Return: 32-bit integer containing first 4 characters of fence name,
- *         big-endian notation
- */
-uint32_t sde_sync_get_name_prefix(void *fence);
-
-/**
- * sde_fence_init - initialize fence object
- * @drm_id: ID number of owning DRM Object
- * @name: Timeline name
- * Returns: fence context object on success
- */
-struct sde_fence_context *sde_fence_init(const char *name,
-		uint32_t drm_id);
-
-/**
- * sde_fence_deinit - deinit fence container
- * @fence: Pointer fence container
- */
-void sde_fence_deinit(struct sde_fence_context *fence);
-
-/**
- * sde_fence_prepare - prepare to return fences for current commit
- * @fence: Pointer fence container
- * Returns: Zero on success
- */
-void sde_fence_prepare(struct sde_fence_context *fence);
-/**
- * sde_fence_create - create output fence object
- * @fence: Pointer fence container
- * @val: Pointer to output value variable, fence fd will be placed here
- * @offset: Fence signal commit offset, e.g., +1 to signal on next commit
- * Returns: Zero on success
- */
-int sde_fence_create(struct sde_fence_context *fence, uint64_t *val,
-							uint32_t offset);
-
-/**
- * sde_fence_signal - advance fence timeline to signal outstanding fences
- * @fence: Pointer fence container
- * @ts: fence timestamp
- * @fence_event: fence event to indicate nature of fence signal.
- */
-void sde_fence_signal(struct sde_fence_context *fence, ktime_t ts,
-		enum sde_fence_event fence_event);
-
-/**
- * sde_fence_timeline_status - prints fence timeline status
- * @fence: Pointer fence container
- * @drm_obj Pointer to drm object associated with fence timeline
- */
-void sde_fence_timeline_status(struct sde_fence_context *ctx,
-					struct drm_mode_object *drm_obj);
-
-/**
- * sde_fence_timeline_dump - utility to dump fence list info in debugfs node
- * @fence: Pointer fence container
- * @drm_obj: Pointer to drm object associated with fence timeline
- * @s: used to writing on debugfs node
- */
-void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
-		struct drm_mode_object *drm_obj, struct seq_file **s);
-
-/**
- * sde_fence_timeline_status - dumps fence timeline in debugfs node
- * @fence: Pointer fence container
- * @s: used to writing on debugfs node
- */
-void sde_fence_list_dump(struct dma_fence *fence, struct seq_file **s);
-
-#else
-static inline void *sde_sync_get(uint64_t fd)
-{
-	return NULL;
-}
-
-static inline void sde_sync_put(void *fence)
-{
-}
-
-static inline signed long sde_sync_wait(void *fence, long timeout_ms)
-{
-	return 0;
-}
-
-static inline uint32_t sde_sync_get_name_prefix(void *fence)
-{
-	return 0x0;
-}
-
-static inline struct sde_fence_context *sde_fence_init(const char *name,
-		uint32_t drm_id)
-{
-	/* do nothing */
-	return NULL;
-}
-
-static inline void sde_fence_deinit(struct sde_fence_context *fence)
-{
-	/* do nothing */
-}
-
-static inline int sde_fence_get(struct sde_fence_context *fence, uint64_t *val)
-{
-	return -EINVAL;
-}
-
-static inline void sde_fence_signal(struct sde_fence_context *fence,
-						ktime_t ts, bool reset_timeline)
-{
-	/* do nothing */
-}
-
-static inline void sde_fence_prepare(struct sde_fence_context *fence)
-{
-	/* do nothing */
-}
-
-static inline int sde_fence_create(struct sde_fence_context *fence,
-						uint64_t *val, uint32_t offset)
-{
-	return 0;
-}
-
-static inline void sde_fence_timeline_status(struct sde_fence_context *ctx,
-					struct drm_mode_object *drm_obj);
-{
-	/* do nothing */
-}
-
-void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
-		struct drm_mode_object *drm_obj, struct seq_file **s)
-{
-	/* do nothing */
-}
-
-void sde_fence_list_dump(struct dma_fence *fence, struct seq_file **s)
-{
-	/* do nothing */
-}
-
-#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
-
-#endif /* _SDE_FENCE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
deleted file mode 100644
index 19206f4..0000000
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ /dev/null
@@ -1,1382 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <uapi/drm/drm_fourcc.h>
-#include <uapi/media/msm_media_info.h>
-
-#include "sde_kms.h"
-#include "sde_formats.h"
-
-#define SDE_UBWC_META_MACRO_W_H		16
-#define SDE_UBWC_META_BLOCK_SIZE	256
-#define SDE_UBWC_PLANE_SIZE_ALIGNMENT	4096
-
-#define SDE_TILE_HEIGHT_DEFAULT	1
-#define SDE_TILE_HEIGHT_TILED	4
-#define SDE_TILE_HEIGHT_UBWC	4
-#define SDE_TILE_HEIGHT_NV12	8
-
-#define SDE_MAX_IMG_WIDTH		0x3FFF
-#define SDE_MAX_IMG_HEIGHT		0x3FFF
-
-/**
- * SDE supported format packing, bpp, and other format
- * information.
- * SDE currently only supports interleaved RGB formats
- * UBWC support for a pixel format is indicated by the flag,
- * there is additional meta data plane for such formats
- */
-
-#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha,   \
-bp, flg, fm, np)                                                          \
-{                                                                         \
-	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
-	.fetch_planes = SDE_PLANE_INTERLEAVED,                            \
-	.alpha_enable = alpha,                                            \
-	.element = { (e0), (e1), (e2), (e3) },                            \
-	.bits = { g, b, r, a },                                           \
-	.chroma_sample = SDE_CHROMA_RGB,                                  \
-	.unpack_align_msb = 0,                                            \
-	.unpack_tight = 1,                                                \
-	.unpack_count = uc,                                               \
-	.bpp = bp,                                                        \
-	.fetch_mode = fm,                                                 \
-	.flag = {(flg)},                                                  \
-	.num_planes = np,                                                 \
-	.tile_height = SDE_TILE_HEIGHT_DEFAULT                            \
-}
-
-#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc,    \
-alpha, bp, flg, fm, np, th)                                               \
-{                                                                         \
-	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
-	.fetch_planes = SDE_PLANE_INTERLEAVED,                            \
-	.alpha_enable = alpha,                                            \
-	.element = { (e0), (e1), (e2), (e3) },                            \
-	.bits = { g, b, r, a },                                           \
-	.chroma_sample = SDE_CHROMA_RGB,                                  \
-	.unpack_align_msb = 0,                                            \
-	.unpack_tight = 1,                                                \
-	.unpack_count = uc,                                               \
-	.bpp = bp,                                                        \
-	.fetch_mode = fm,                                                 \
-	.flag = {(flg)},                                                  \
-	.num_planes = np,                                                 \
-	.tile_height = th                                                 \
-}
-
-
-#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3,              \
-alpha, chroma, count, bp, flg, fm, np)                                    \
-{                                                                         \
-	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
-	.fetch_planes = SDE_PLANE_INTERLEAVED,                            \
-	.alpha_enable = alpha,                                            \
-	.element = { (e0), (e1), (e2), (e3)},                             \
-	.bits = { g, b, r, a },                                           \
-	.chroma_sample = chroma,                                          \
-	.unpack_align_msb = 0,                                            \
-	.unpack_tight = 1,                                                \
-	.unpack_count = count,                                            \
-	.bpp = bp,                                                        \
-	.fetch_mode = fm,                                                 \
-	.flag = {(flg)},                                                  \
-	.num_planes = np,                                                 \
-	.tile_height = SDE_TILE_HEIGHT_DEFAULT                            \
-}
-
-#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)      \
-{                                                                         \
-	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
-	.fetch_planes = SDE_PLANE_PSEUDO_PLANAR,                          \
-	.alpha_enable = false,                                            \
-	.element = { (e0), (e1), 0, 0 },                                  \
-	.bits = { g, b, r, a },                                           \
-	.chroma_sample = chroma,                                          \
-	.unpack_align_msb = 0,                                            \
-	.unpack_tight = 1,                                                \
-	.unpack_count = 2,                                                \
-	.bpp = 2,                                                         \
-	.fetch_mode = fm,                                                 \
-	.flag = {(flg)},                                                  \
-	.num_planes = np,                                                 \
-	.tile_height = SDE_TILE_HEIGHT_DEFAULT                            \
-}
-
-#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma,             \
-flg, fm, np, th)                                                          \
-{                                                                         \
-	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
-	.fetch_planes = SDE_PLANE_PSEUDO_PLANAR,                          \
-	.alpha_enable = false,                                            \
-	.element = { (e0), (e1), 0, 0 },                                  \
-	.bits = { g, b, r, a },                                           \
-	.chroma_sample = chroma,                                          \
-	.unpack_align_msb = 0,                                            \
-	.unpack_tight = 1,                                                \
-	.unpack_count = 2,                                                \
-	.bpp = 2,                                                         \
-	.fetch_mode = fm,                                                 \
-	.flag = {(flg)},                                                  \
-	.num_planes = np,                                                 \
-	.tile_height = th                                                 \
-}
-
-#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
-{                                                                         \
-	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
-	.fetch_planes = SDE_PLANE_PSEUDO_PLANAR,                          \
-	.alpha_enable = false,                                            \
-	.element = { (e0), (e1), 0, 0 },                                  \
-	.bits = { g, b, r, a },                                           \
-	.chroma_sample = chroma,                                          \
-	.unpack_align_msb = 1,                                            \
-	.unpack_tight = 0,                                                \
-	.unpack_count = 2,                                                \
-	.bpp = 2,                                                         \
-	.fetch_mode = fm,                                                 \
-	.flag = {(flg)},                                                  \
-	.num_planes = np,                                                 \
-	.tile_height = SDE_TILE_HEIGHT_DEFAULT                            \
-}
-
-#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma,       \
-flg, fm, np, th)                                                          \
-{                                                                         \
-	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
-	.fetch_planes = SDE_PLANE_PSEUDO_PLANAR,                          \
-	.alpha_enable = false,                                            \
-	.element = { (e0), (e1), 0, 0 },                                  \
-	.bits = { g, b, r, a },                                           \
-	.chroma_sample = chroma,                                          \
-	.unpack_align_msb = 1,                                            \
-	.unpack_tight = 0,                                                \
-	.unpack_count = 2,                                                \
-	.bpp = 2,                                                         \
-	.fetch_mode = fm,                                                 \
-	.flag = {(flg)},                                                  \
-	.num_planes = np,                                                 \
-	.tile_height = th                                                 \
-}
-
-
-#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp,    \
-flg, fm, np)                                                      \
-{                                                                         \
-	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
-	.fetch_planes = SDE_PLANE_PLANAR,                                 \
-	.alpha_enable = alpha,                                            \
-	.element = { (e0), (e1), (e2), 0 },                               \
-	.bits = { g, b, r, a },                                           \
-	.chroma_sample = chroma,                                          \
-	.unpack_align_msb = 0,                                            \
-	.unpack_tight = 1,                                                \
-	.unpack_count = 1,                                                \
-	.bpp = bp,                                                        \
-	.fetch_mode = fm,                                                 \
-	.flag = {(flg)},                                                  \
-	.num_planes = np,                                                 \
-	.tile_height = SDE_TILE_HEIGHT_DEFAULT                            \
-}
-
-/*
- * struct sde_media_color_map - maps drm format to media format
- * @format: DRM base pixel format
- * @color: Media API color related to DRM format
- */
-struct sde_media_color_map {
-	uint32_t format;
-	uint32_t color;
-};
-
-static const struct sde_format sde_format_map[] = {
-	INTERLEAVED_RGB_FMT(ARGB8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
-		true, 4, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(ABGR8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 4, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(XBGR8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 4, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(RGBA8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
-		true, 4, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(BGRA8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
-		true, 4, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(BGRX8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
-		false, 4, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(XRGB8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
-		false, 4, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(RGBX8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
-		false, 4, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(RGB888,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
-		false, 3, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(BGR888,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
-		false, 3, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(RGB565,
-		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
-		false, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(BGR565,
-		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
-		false, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(ARGB1555,
-		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
-		true, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(ABGR1555,
-		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
-		true, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(RGBA5551,
-		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(BGRA5551,
-		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
-		true, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(XRGB1555,
-		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
-		false, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(XBGR1555,
-		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
-		false, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(RGBX5551,
-		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		false, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(BGRX5551,
-		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
-		false, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(ARGB4444,
-		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
-		true, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(ABGR4444,
-		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
-		true, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(RGBA4444,
-		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(BGRA4444,
-		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
-		true, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(XRGB4444,
-		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
-		false, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(XBGR4444,
-		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
-		false, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(RGBX4444,
-		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		false, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(BGRX4444,
-		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
-		false, 2, 0,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(BGRA1010102,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
-		true, 4, SDE_FORMAT_FLAG_DX,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(RGBA1010102,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
-		true, 4, SDE_FORMAT_FLAG_DX,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(ABGR2101010,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 4, SDE_FORMAT_FLAG_DX,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(ARGB2101010,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
-		true, 4, SDE_FORMAT_FLAG_DX,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(XRGB2101010,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
-		false, 4, SDE_FORMAT_FLAG_DX,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(BGRX1010102,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
-		false, 4, SDE_FORMAT_FLAG_DX,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(XBGR2101010,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		false, 4, SDE_FORMAT_FLAG_DX,
-		SDE_FETCH_LINEAR, 1),
-
-	INTERLEAVED_RGB_FMT(RGBX1010102,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
-		false, 4, SDE_FORMAT_FLAG_DX,
-		SDE_FETCH_LINEAR, 1),
-
-	PSEUDO_YUV_FMT(NV12,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C2_R_Cr,
-		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
-		SDE_FETCH_LINEAR, 2),
-
-	PSEUDO_YUV_FMT(NV21,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C1_B_Cb,
-		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
-		SDE_FETCH_LINEAR, 2),
-
-	PSEUDO_YUV_FMT(NV16,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C2_R_Cr,
-		SDE_CHROMA_H2V1, SDE_FORMAT_FLAG_YUV,
-		SDE_FETCH_LINEAR, 2),
-
-	PSEUDO_YUV_FMT(NV61,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C1_B_Cb,
-		SDE_CHROMA_H2V1, SDE_FORMAT_FLAG_YUV,
-		SDE_FETCH_LINEAR, 2),
-
-	INTERLEAVED_YUV_FMT(VYUY,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
-		false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
-		SDE_FETCH_LINEAR, 2),
-
-	INTERLEAVED_YUV_FMT(UYVY,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
-		false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
-		SDE_FETCH_LINEAR, 2),
-
-	INTERLEAVED_YUV_FMT(YUYV,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
-		false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
-		SDE_FETCH_LINEAR, 2),
-
-	INTERLEAVED_YUV_FMT(YVYU,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
-		false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
-		SDE_FETCH_LINEAR, 2),
-
-	PLANAR_YUV_FMT(YUV420,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C1_B_Cb, C0_G_Y,
-		false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
-		SDE_FETCH_LINEAR, 3),
-
-	PLANAR_YUV_FMT(YVU420,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C2_R_Cr, C0_G_Y,
-		false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
-		SDE_FETCH_LINEAR, 3),
-};
-
-/*
- * A5x tile formats tables:
- * These tables hold the A5x tile formats supported.
- */
-static const struct sde_format sde_format_map_tile[] = {
-	INTERLEAVED_RGB_FMT_TILED(BGR565,
-		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
-		false, 2, 0,
-		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
-
-	INTERLEAVED_RGB_FMT_TILED(ARGB8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
-		true, 4, 0,
-		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
-
-	INTERLEAVED_RGB_FMT_TILED(ABGR8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
-		true, 4, 0,
-		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
-
-	INTERLEAVED_RGB_FMT_TILED(XBGR8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		false, 4, 0,
-		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
-
-	INTERLEAVED_RGB_FMT_TILED(RGBA8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 4, 0,
-		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
-
-	INTERLEAVED_RGB_FMT_TILED(BGRA8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
-		true, 4, 0,
-		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
-
-	INTERLEAVED_RGB_FMT_TILED(BGRX8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
-		false, 4, 0,
-		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
-
-	INTERLEAVED_RGB_FMT_TILED(XRGB8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
-		false, 4, 0,
-		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
-
-	INTERLEAVED_RGB_FMT_TILED(RGBX8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		false, 4, 0,
-		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
-
-	INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 4, SDE_FORMAT_FLAG_DX,
-		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
-
-	INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 4, SDE_FORMAT_FLAG_DX,
-		SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
-
-	PSEUDO_YUV_FMT_TILED(NV12,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C2_R_Cr,
-		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
-		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
-
-	PSEUDO_YUV_FMT_TILED(NV21,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C1_B_Cb,
-		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
-		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
-};
-
-static const struct sde_format sde_format_map_p010_tile[] = {
-	PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C2_R_Cr,
-		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
-		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
-};
-
-static const struct sde_format sde_format_map_tp10_tile[] = {
-	PSEUDO_YUV_FMT_TILED(NV12,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C2_R_Cr,
-		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
-		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
-};
-
-/*
- * UBWC formats table:
- * This table holds the UBWC formats supported.
- * If a compression ratio needs to be used for this or any other format,
- * the data will be passed by user-space.
- */
-static const struct sde_format sde_format_map_ubwc[] = {
-	INTERLEAVED_RGB_FMT_TILED(BGR565,
-		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
-		false, 2, SDE_FORMAT_FLAG_COMPRESSED,
-		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
-
-	INTERLEAVED_RGB_FMT_TILED(ABGR8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 4, SDE_FORMAT_FLAG_COMPRESSED,
-		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
-
-	INTERLEAVED_RGB_FMT_TILED(XBGR8888,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		false, 4, SDE_FORMAT_FLAG_COMPRESSED,
-		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
-
-	INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED,
-		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
-
-	INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
-		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-		true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED,
-		SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
-
-	PSEUDO_YUV_FMT_TILED(NV12,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C2_R_Cr,
-		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV |
-				SDE_FORMAT_FLAG_COMPRESSED,
-		SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12),
-};
-
-static const struct sde_format sde_format_map_p010[] = {
-	PSEUDO_YUV_FMT_LOOSE(NV12,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C2_R_Cr,
-		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
-		SDE_FETCH_LINEAR, 2),
-};
-
-static const struct sde_format sde_format_map_p010_ubwc[] = {
-	PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C2_R_Cr,
-		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX |
-				SDE_FORMAT_FLAG_COMPRESSED),
-		SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12),
-};
-
-static const struct sde_format sde_format_map_tp10_ubwc[] = {
-	PSEUDO_YUV_FMT_TILED(NV12,
-		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C2_R_Cr,
-		SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX |
-				SDE_FORMAT_FLAG_COMPRESSED),
-		SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12),
-};
-
-bool sde_format_is_tp10_ubwc(const struct sde_format *fmt)
-{
-	if (SDE_FORMAT_IS_YUV(fmt) && SDE_FORMAT_IS_DX(fmt) &&
-			SDE_FORMAT_IS_UBWC(fmt) &&
-			(fmt->num_planes == 4) && fmt->unpack_tight)
-		return true;
-	else
-		return false;
-}
-
-/* _sde_get_v_h_subsample_rate - Get subsample rates for all formats we support
- *   Note: Not using the drm_format_*_subsampling since we have formats
- */
-static void _sde_get_v_h_subsample_rate(
-	enum sde_chroma_samp_type chroma_sample,
-	uint32_t *v_sample,
-	uint32_t *h_sample)
-{
-	if (!v_sample || !h_sample)
-		return;
-
-	switch (chroma_sample) {
-	case SDE_CHROMA_H2V1:
-		*v_sample = 1;
-		*h_sample = 2;
-		break;
-	case SDE_CHROMA_H1V2:
-		*v_sample = 2;
-		*h_sample = 1;
-		break;
-	case SDE_CHROMA_420:
-		*v_sample = 2;
-		*h_sample = 2;
-		break;
-	default:
-		*v_sample = 1;
-		*h_sample = 1;
-		break;
-	}
-}
-
-static int _sde_format_get_media_color_ubwc(const struct sde_format *fmt)
-{
-	static const struct sde_media_color_map sde_media_ubwc_map[] = {
-		{DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
-		{DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
-		{DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
-		{DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
-		{DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
-	};
-	int color_fmt = -1;
-	int i;
-
-	if (fmt->base.pixel_format == DRM_FORMAT_NV12) {
-		if (SDE_FORMAT_IS_DX(fmt)) {
-			if (fmt->unpack_tight)
-				color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
-			else
-				color_fmt = COLOR_FMT_P010_UBWC;
-		} else
-			color_fmt = COLOR_FMT_NV12_UBWC;
-		return color_fmt;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(sde_media_ubwc_map); ++i)
-		if (fmt->base.pixel_format == sde_media_ubwc_map[i].format) {
-			color_fmt = sde_media_ubwc_map[i].color;
-			break;
-		}
-	return color_fmt;
-}
-
-static int _sde_format_get_plane_sizes_ubwc(
-		const struct sde_format *fmt,
-		const uint32_t width,
-		const uint32_t height,
-		struct sde_hw_fmt_layout *layout)
-{
-	int i;
-	int color;
-	bool meta = SDE_FORMAT_IS_UBWC(fmt);
-
-	memset(layout, 0, sizeof(struct sde_hw_fmt_layout));
-	layout->format = fmt;
-	layout->width = width;
-	layout->height = height;
-	layout->num_planes = fmt->num_planes;
-
-	color = _sde_format_get_media_color_ubwc(fmt);
-	if (color < 0) {
-		DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
-			(char *)&fmt->base.pixel_format);
-		return -EINVAL;
-	}
-
-	if (SDE_FORMAT_IS_YUV(layout->format)) {
-		uint32_t y_sclines, uv_sclines;
-		uint32_t y_meta_scanlines = 0;
-		uint32_t uv_meta_scanlines = 0;
-
-		layout->num_planes = 2;
-		layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
-		y_sclines = VENUS_Y_SCANLINES(color, height);
-		layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
-			y_sclines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
-
-		layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
-		uv_sclines = VENUS_UV_SCANLINES(color, height);
-		layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
-			uv_sclines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
-
-		if (!meta)
-			goto done;
-
-		layout->num_planes += 2;
-		layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
-		y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
-		layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
-			y_meta_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
-
-		layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
-		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
-		layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
-			uv_meta_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
-
-	} else {
-		uint32_t rgb_scanlines, rgb_meta_scanlines;
-
-		layout->num_planes = 1;
-
-		layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
-		rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
-		layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
-			rgb_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
-
-		if (!meta)
-			goto done;
-		layout->num_planes += 2;
-		layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
-		rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
-		layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
-			rgb_meta_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
-	}
-
-done:
-	for (i = 0; i < SDE_MAX_PLANES; i++)
-		layout->total_size += layout->plane_size[i];
-
-	return 0;
-}
-
-static int _sde_format_get_plane_sizes_linear(
-		const struct sde_format *fmt,
-		const uint32_t width,
-		const uint32_t height,
-		struct sde_hw_fmt_layout *layout,
-		const uint32_t *pitches)
-{
-	int i;
-
-	memset(layout, 0, sizeof(struct sde_hw_fmt_layout));
-	layout->format = fmt;
-	layout->width = width;
-	layout->height = height;
-	layout->num_planes = fmt->num_planes;
-
-	/* Due to memset above, only need to set planes of interest */
-	if (fmt->fetch_planes == SDE_PLANE_INTERLEAVED) {
-		layout->num_planes = 1;
-		layout->plane_size[0] = width * height * layout->format->bpp;
-		layout->plane_pitch[0] = width * layout->format->bpp;
-	} else {
-		uint32_t v_subsample, h_subsample;
-		uint32_t chroma_samp;
-		uint32_t bpp = 1;
-
-		chroma_samp = fmt->chroma_sample;
-		_sde_get_v_h_subsample_rate(chroma_samp, &v_subsample,
-				&h_subsample);
-
-		if (width % h_subsample || height % v_subsample) {
-			DRM_ERROR("mismatch in subsample vs dimensions\n");
-			return -EINVAL;
-		}
-
-		if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
-			(SDE_FORMAT_IS_DX(fmt)))
-			bpp = 2;
-		layout->plane_pitch[0] = width * bpp;
-		layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
-		layout->plane_size[0] = layout->plane_pitch[0] * height;
-		layout->plane_size[1] = layout->plane_pitch[1] *
-				(height / v_subsample);
-
-		if (fmt->fetch_planes == SDE_PLANE_PSEUDO_PLANAR) {
-			layout->num_planes = 2;
-			layout->plane_size[1] *= 2;
-			layout->plane_pitch[1] *= 2;
-		} else {
-			/* planar */
-			layout->num_planes = 3;
-			layout->plane_size[2] = layout->plane_size[1];
-			layout->plane_pitch[2] = layout->plane_pitch[1];
-		}
-	}
-
-	/*
-	 * linear format: allow user allocated pitches if they are greater than
-	 * the requirement.
-	 * ubwc format: pitch values are computed uniformly across
-	 * all the components based on ubwc specifications.
-	 */
-	for (i = 0; i < layout->num_planes && i < SDE_MAX_PLANES; ++i) {
-		if (pitches && layout->plane_pitch[i] < pitches[i])
-			layout->plane_pitch[i] = pitches[i];
-	}
-
-	for (i = 0; i < SDE_MAX_PLANES; i++)
-		layout->total_size += layout->plane_size[i];
-
-	return 0;
-}
-
-int sde_format_get_plane_sizes(
-		const struct sde_format *fmt,
-		const uint32_t w,
-		const uint32_t h,
-		struct sde_hw_fmt_layout *layout,
-		const uint32_t *pitches)
-{
-	if (!layout || !fmt) {
-		DRM_ERROR("invalid pointer\n");
-		return -EINVAL;
-	}
-
-	if ((w > SDE_MAX_IMG_WIDTH) || (h > SDE_MAX_IMG_HEIGHT)) {
-		DRM_ERROR("image dimensions outside max range\n");
-		return -ERANGE;
-	}
-
-	if (SDE_FORMAT_IS_UBWC(fmt) || SDE_FORMAT_IS_TILE(fmt))
-		return _sde_format_get_plane_sizes_ubwc(fmt, w, h, layout);
-
-	return _sde_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
-}
-
-int sde_format_get_block_size(const struct sde_format *fmt,
-		uint32_t *w, uint32_t *h)
-{
-	if (!fmt || !w || !h) {
-		DRM_ERROR("invalid pointer\n");
-		return -EINVAL;
-	}
-
-	/* TP10 is 96x96 and all others are 128x128 */
-	if (SDE_FORMAT_IS_YUV(fmt) && SDE_FORMAT_IS_DX(fmt) &&
-			(fmt->num_planes == 2) && fmt->unpack_tight)
-		*w = *h = 96;
-	else
-		*w = *h = 128;
-
-	return 0;
-}
-
-uint32_t sde_format_get_framebuffer_size(
-		const uint32_t format,
-		const uint32_t width,
-		const uint32_t height,
-		const uint32_t *pitches,
-		const uint64_t modifier)
-{
-	const struct sde_format *fmt;
-	struct sde_hw_fmt_layout layout;
-
-	fmt = sde_get_sde_format_ext(format, modifier);
-	if (!fmt)
-		return 0;
-
-	if (!pitches)
-		return -EINVAL;
-
-	if (sde_format_get_plane_sizes(fmt, width, height, &layout, pitches))
-		layout.total_size = 0;
-
-	return layout.total_size;
-}
-
-static int _sde_format_populate_addrs_ubwc(
-		struct msm_gem_address_space *aspace,
-		struct drm_framebuffer *fb,
-		struct sde_hw_fmt_layout *layout)
-{
-	uint32_t base_addr;
-	bool meta;
-
-	if (!fb || !layout) {
-		DRM_ERROR("invalid pointers\n");
-		return -EINVAL;
-	}
-
-	if (aspace)
-		base_addr = msm_framebuffer_iova(fb, aspace, 0);
-	else
-		base_addr = msm_framebuffer_phys(fb, 0);
-	if (!base_addr) {
-		DRM_ERROR("failed to retrieve base addr\n");
-		return -EFAULT;
-	}
-
-	meta = SDE_FORMAT_IS_UBWC(layout->format);
-
-	/* Per-format logic for verifying active planes */
-	if (SDE_FORMAT_IS_YUV(layout->format)) {
-		/************************************************/
-		/*      UBWC            **                      */
-		/*      buffer          **      SDE PLANE       */
-		/*      format          **                      */
-		/************************************************/
-		/* -------------------  ** -------------------- */
-		/* |      Y meta     |  ** |    Y bitstream   | */
-		/* |       data      |  ** |       plane      | */
-		/* -------------------  ** -------------------- */
-		/* |    Y bitstream  |  ** |  CbCr bitstream  | */
-		/* |       data      |  ** |       plane      | */
-		/* -------------------  ** -------------------- */
-		/* |   Cbcr metadata |  ** |       Y meta     | */
-		/* |       data      |  ** |       plane      | */
-		/* -------------------  ** -------------------- */
-		/* |  CbCr bitstream |  ** |     CbCr meta    | */
-		/* |       data      |  ** |       plane      | */
-		/* -------------------  ** -------------------- */
-		/************************************************/
-
-		/* configure Y bitstream plane */
-		layout->plane_addr[0] = base_addr + layout->plane_size[2];
-
-		/* configure CbCr bitstream plane */
-		layout->plane_addr[1] = base_addr + layout->plane_size[0]
-			+ layout->plane_size[2] + layout->plane_size[3];
-
-		if (!meta)
-			goto done;
-
-		/* configure Y metadata plane */
-		layout->plane_addr[2] = base_addr;
-
-		/* configure CbCr metadata plane */
-		layout->plane_addr[3] = base_addr + layout->plane_size[0]
-			+ layout->plane_size[2];
-
-	} else {
-		/************************************************/
-		/*      UBWC            **                      */
-		/*      buffer          **      SDE PLANE       */
-		/*      format          **                      */
-		/************************************************/
-		/* -------------------  ** -------------------- */
-		/* |      RGB meta   |  ** |   RGB bitstream  | */
-		/* |       data      |  ** |       plane      | */
-		/* -------------------  ** -------------------- */
-		/* |  RGB bitstream  |  ** |       NONE       | */
-		/* |       data      |  ** |                  | */
-		/* -------------------  ** -------------------- */
-		/*                      ** |     RGB meta     | */
-		/*                      ** |       plane      | */
-		/*                      ** -------------------- */
-		/************************************************/
-
-		layout->plane_addr[0] = base_addr + layout->plane_size[2];
-		layout->plane_addr[1] = 0;
-
-		if (!meta)
-			goto done;
-
-		layout->plane_addr[2] = base_addr;
-		layout->plane_addr[3] = 0;
-	}
-done:
-	return 0;
-}
-
-static int _sde_format_populate_addrs_linear(
-		struct msm_gem_address_space *aspace,
-		struct drm_framebuffer *fb,
-		struct sde_hw_fmt_layout *layout)
-{
-	unsigned int i;
-
-	/* Can now check the pitches given vs pitches expected */
-	for (i = 0; i < layout->num_planes; ++i) {
-		if (layout->plane_pitch[i] > fb->pitches[i]) {
-			DRM_ERROR("plane %u expected pitch %u, fb %u\n",
-				i, layout->plane_pitch[i], fb->pitches[i]);
-			return -EINVAL;
-		}
-	}
-
-	/* Populate addresses for simple formats here */
-	for (i = 0; i < layout->num_planes; ++i) {
-		if (aspace)
-			layout->plane_addr[i] =
-				msm_framebuffer_iova(fb, aspace, i);
-		else
-			layout->plane_addr[i] = msm_framebuffer_phys(fb, i);
-		if (!layout->plane_addr[i]) {
-			DRM_ERROR("failed to retrieve base addr\n");
-			return -EFAULT;
-		}
-	}
-
-	return 0;
-}
-
-int sde_format_populate_layout(
-		struct msm_gem_address_space *aspace,
-		struct drm_framebuffer *fb,
-		struct sde_hw_fmt_layout *layout)
-{
-	uint32_t plane_addr[SDE_MAX_PLANES];
-	int i, ret;
-
-	if (!fb || !layout) {
-		DRM_ERROR("invalid arguments\n");
-		return -EINVAL;
-	}
-
-	if ((fb->width > SDE_MAX_IMG_WIDTH) ||
-			(fb->height > SDE_MAX_IMG_HEIGHT)) {
-		DRM_ERROR("image dimensions outside max range\n");
-		return -ERANGE;
-	}
-
-	layout->format = to_sde_format(msm_framebuffer_format(fb));
-
-	/* Populate the plane sizes etc via get_format */
-	ret = sde_format_get_plane_sizes(layout->format, fb->width, fb->height,
-			layout, fb->pitches);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < SDE_MAX_PLANES; ++i)
-		plane_addr[i] = layout->plane_addr[i];
-
-	/* Populate the addresses given the fb */
-	if (SDE_FORMAT_IS_UBWC(layout->format) ||
-			SDE_FORMAT_IS_TILE(layout->format))
-		ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout);
-	else
-		ret = _sde_format_populate_addrs_linear(aspace, fb, layout);
-
-	/* check if anything changed */
-	if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
-		ret = -EAGAIN;
-
-	return ret;
-}
-
-static void _sde_format_calc_offset_linear(struct sde_hw_fmt_layout *source,
-		u32 x, u32 y)
-{
-	if ((x == 0) && (y == 0))
-		return;
-
-	source->plane_addr[0] += y * source->plane_pitch[0];
-
-	if (source->num_planes == 1) {
-		source->plane_addr[0] += x * source->format->bpp;
-	} else {
-		uint32_t xoff, yoff;
-		uint32_t v_subsample = 1;
-		uint32_t h_subsample = 1;
-
-		_sde_get_v_h_subsample_rate(source->format->chroma_sample,
-				&v_subsample, &h_subsample);
-
-		xoff = x / h_subsample;
-		yoff = y / v_subsample;
-
-		source->plane_addr[0] += x;
-		source->plane_addr[1] += xoff +
-				(yoff * source->plane_pitch[1]);
-		if (source->num_planes == 2) /* pseudo planar */
-			source->plane_addr[1] += xoff;
-		else /* planar */
-			source->plane_addr[2] += xoff +
-				(yoff * source->plane_pitch[2]);
-	}
-}
-
-int sde_format_populate_layout_with_roi(
-		struct msm_gem_address_space *aspace,
-		struct drm_framebuffer *fb,
-		struct sde_rect *roi,
-		struct sde_hw_fmt_layout *layout)
-{
-	int ret;
-
-	ret = sde_format_populate_layout(aspace, fb, layout);
-	if (ret || !roi)
-		return ret;
-
-	if (!roi->w || !roi->h || (roi->x + roi->w > fb->width) ||
-			(roi->y + roi->h > fb->height)) {
-		DRM_ERROR("invalid roi=[%d,%d,%d,%d], fb=[%u,%u]\n",
-				roi->x, roi->y, roi->w, roi->h,
-				fb->width, fb->height);
-		ret = -EINVAL;
-	} else if (SDE_FORMAT_IS_LINEAR(layout->format)) {
-		_sde_format_calc_offset_linear(layout, roi->x, roi->y);
-		layout->width = roi->w;
-		layout->height = roi->h;
-	} else if (roi->x || roi->y || (roi->w != fb->width) ||
-			(roi->h != fb->height)) {
-		DRM_ERROR("non-linear layout with roi not supported\n");
-		ret = -EINVAL;
-	}
-
-	return ret;
-}
-
-int sde_format_check_modified_format(
-		const struct msm_kms *kms,
-		const struct msm_format *msm_fmt,
-		const struct drm_mode_fb_cmd2 *cmd,
-		struct drm_gem_object **bos)
-{
-	int ret, i, num_base_fmt_planes;
-	const struct sde_format *fmt;
-	struct sde_hw_fmt_layout layout;
-	uint32_t bos_total_size = 0;
-
-	if (!msm_fmt || !cmd || !bos) {
-		DRM_ERROR("invalid arguments\n");
-		return -EINVAL;
-	}
-
-	fmt = to_sde_format(msm_fmt);
-	num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
-
-	ret = sde_format_get_plane_sizes(fmt, cmd->width, cmd->height,
-			&layout, cmd->pitches);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < num_base_fmt_planes; i++) {
-		if (!bos[i]) {
-			DRM_ERROR("invalid handle for plane %d\n", i);
-			return -EINVAL;
-		}
-		if ((i == 0) || (bos[i] != bos[0]))
-			bos_total_size += bos[i]->size;
-	}
-
-	if (bos_total_size < layout.total_size) {
-		DRM_ERROR("buffers total size too small %u expected %u\n",
-				bos_total_size, layout.total_size);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-const struct sde_format *sde_get_sde_format_ext(
-		const uint32_t format,
-		const uint64_t modifier)
-{
-	uint32_t i = 0;
-	const struct sde_format *fmt = NULL;
-	const struct sde_format *map = NULL;
-	ssize_t map_size = 0;
-
-	/*
-	 * Currently only support exactly zero or one modifier.
-	 * All planes use the same modifier.
-	 */
-	SDE_DEBUG("plane format modifier 0x%llX\n", modifier);
-
-	switch (modifier) {
-	case 0:
-		map = sde_format_map;
-		map_size = ARRAY_SIZE(sde_format_map);
-		break;
-	case DRM_FORMAT_MOD_QCOM_COMPRESSED:
-	case DRM_FORMAT_MOD_QCOM_COMPRESSED | DRM_FORMAT_MOD_QCOM_TILE:
-		map = sde_format_map_ubwc;
-		map_size = ARRAY_SIZE(sde_format_map_ubwc);
-		SDE_DEBUG("found fmt: %4.4s  DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
-				(char *)&format);
-		break;
-	case DRM_FORMAT_MOD_QCOM_DX:
-		map = sde_format_map_p010;
-		map_size = ARRAY_SIZE(sde_format_map_p010);
-		SDE_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_DX\n",
-				(char *)&format);
-		break;
-	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED):
-	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
-			DRM_FORMAT_MOD_QCOM_TILE):
-		map = sde_format_map_p010_ubwc;
-		map_size = ARRAY_SIZE(sde_format_map_p010_ubwc);
-		SDE_DEBUG(
-			"found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED/DX\n",
-				(char *)&format);
-		break;
-	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
-		DRM_FORMAT_MOD_QCOM_TIGHT):
-	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
-		DRM_FORMAT_MOD_QCOM_TIGHT | DRM_FORMAT_MOD_QCOM_TILE):
-		map = sde_format_map_tp10_ubwc;
-		map_size = ARRAY_SIZE(sde_format_map_tp10_ubwc);
-		SDE_DEBUG(
-			"found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED/DX/TIGHT\n",
-				(char *)&format);
-		break;
-	case DRM_FORMAT_MOD_QCOM_TILE:
-		map = sde_format_map_tile;
-		map_size = ARRAY_SIZE(sde_format_map_tile);
-		SDE_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE\n",
-				(char *)&format);
-		break;
-	case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX):
-		map = sde_format_map_p010_tile;
-		map_size = ARRAY_SIZE(sde_format_map_p010_tile);
-		SDE_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE/DX\n",
-				(char *)&format);
-		break;
-	case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX |
-			DRM_FORMAT_MOD_QCOM_TIGHT):
-		map = sde_format_map_tp10_tile;
-		map_size = ARRAY_SIZE(sde_format_map_tp10_tile);
-		SDE_DEBUG(
-			"found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE/DX/TIGHT\n",
-				(char *)&format);
-		break;
-	default:
-		SDE_ERROR("unsupported format modifier %llX\n", modifier);
-		return NULL;
-	}
-
-	for (i = 0; i < map_size; i++) {
-		if (format == map[i].base.pixel_format) {
-			fmt = &map[i];
-			break;
-		}
-	}
-
-	if (fmt == NULL)
-		SDE_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
-				(char *)&format, modifier);
-	else
-		SDE_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
-				(char *)&format, modifier,
-				SDE_FORMAT_IS_UBWC(fmt),
-				SDE_FORMAT_IS_YUV(fmt));
-
-	return fmt;
-}
-
-const struct msm_format *sde_get_msm_format(
-		struct msm_kms *kms,
-		const uint32_t format,
-		const uint64_t modifier)
-{
-	const struct sde_format *fmt = sde_get_sde_format_ext(format,
-			modifier);
-	if (fmt)
-		return &fmt->base;
-	return NULL;
-}
-
-uint32_t sde_populate_formats(
-		const struct sde_format_extended *format_list,
-		uint32_t *pixel_formats,
-		uint64_t *pixel_modifiers,
-		uint32_t pixel_formats_max)
-{
-	uint32_t i, fourcc_format;
-
-	if (!format_list || !pixel_formats)
-		return 0;
-
-	for (i = 0, fourcc_format = 0;
-			format_list->fourcc_format && i < pixel_formats_max;
-			++format_list) {
-		/* verify if listed format is in sde_format_map? */
-
-		/* optionally return modified formats */
-		if (pixel_modifiers) {
-			/* assume same modifier for all fb planes */
-			pixel_formats[i] = format_list->fourcc_format;
-			pixel_modifiers[i++] = format_list->modifier;
-		} else {
-			/* assume base formats grouped together */
-			if (fourcc_format != format_list->fourcc_format) {
-				fourcc_format = format_list->fourcc_format;
-				pixel_formats[i++] = fourcc_format;
-			}
-		}
-	}
-
-	return i;
-}
-
-int sde_format_validate_fmt(struct msm_kms *kms,
-	const struct sde_format *sde_fmt,
-	const struct sde_format_extended *fmt_list)
-{
-	const struct sde_format *fmt_tmp;
-	bool valid_format = false;
-	int ret = 0;
-
-	if (!sde_fmt || !fmt_list) {
-		SDE_ERROR("invalid fmt:%d list:%d\n",
-			!sde_fmt, !fmt_list);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	while (fmt_list->fourcc_format) {
-		fmt_tmp = sde_get_sde_format_ext(fmt_list->fourcc_format,
-					fmt_list->modifier);
-		if (fmt_tmp
-		  && (fmt_tmp->base.pixel_format == sde_fmt->base.pixel_format)
-		  && (fmt_tmp->fetch_mode == sde_fmt->fetch_mode)) {
-			valid_format = true;
-			break;
-		}
-		++fmt_list;
-	}
-
-	if (!valid_format) {
-		SDE_ERROR("fmt:%d mode:%d not found within the list!\n",
-			sde_fmt->base.pixel_format, sde_fmt->fetch_mode);
-		ret = -EINVAL;
-	}
-exit:
-	return ret;
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
deleted file mode 100644
index 3c47172..0000000
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_FORMATS_H
-#define _SDE_FORMATS_H
-
-#include <drm/drm_fourcc.h>
-#include "msm_gem.h"
-#include "sde_hw_mdss.h"
-
-/**
- * sde_get_sde_format_ext() - Returns sde format structure pointer.
- * @format:          DRM FourCC Code
- * @modifier:        format modifier from client
- */
-const struct sde_format *sde_get_sde_format_ext(
-		const uint32_t format,
-		const uint64_t modifier);
-
-#define sde_get_sde_format(f) sde_get_sde_format_ext(f, 0)
-
-/**
- * sde_get_msm_format - get an sde_format by its msm_format base
- *                     callback function registers with the msm_kms layer
- * @kms:             kms driver
- * @format:          DRM FourCC Code
- * @modifier:        data layout modifier
- */
-const struct msm_format *sde_get_msm_format(
-		struct msm_kms *kms,
-		const uint32_t format,
-		const uint64_t modifier);
-
-/**
- * sde_populate_formats - populate the given array with fourcc codes supported
- * @format_list:       pointer to list of possible formats
- * @pixel_formats:     array to populate with fourcc codes
- * @pixel_modifiers:   array to populate with drm modifiers, can be NULL
- * @pixel_formats_max: length of pixel formats array
- * Return: number of elements populated
- */
-uint32_t sde_populate_formats(
-		const struct sde_format_extended *format_list,
-		uint32_t *pixel_formats,
-		uint64_t *pixel_modifiers,
-		uint32_t pixel_formats_max);
-
-/**
- * sde_format_get_plane_sizes - calculate size and layout of given buffer format
- * @fmt:             pointer to sde_format
- * @w:               width of the buffer
- * @h:               height of the buffer
- * @layout:          layout of the buffer
- * @pitches:         array of size [SDE_MAX_PLANES] to populate
- *		     pitch for each plane
- *
- * Return: size of the buffer
- */
-int sde_format_get_plane_sizes(
-		const struct sde_format *fmt,
-		const uint32_t w,
-		const uint32_t h,
-		struct sde_hw_fmt_layout *layout,
-		const uint32_t *pitches);
-
-/**
- * sde_format_get_block_size - get block size of given format when
- *	operating in block mode
- * @fmt:             pointer to sde_format
- * @w:               pointer to width of the block
- * @h:               pointer to height of the block
- *
- * Return: 0 if success; error oode otherwise
- */
-int sde_format_get_block_size(const struct sde_format *fmt,
-		uint32_t *w, uint32_t *h);
-
-/**
- * sde_format_check_modified_format - validate format and buffers for
- *                   sde non-standard, i.e. modified format
- * @kms:             kms driver
- * @msm_fmt:         pointer to the msm_fmt base pointer of an sde_format
- * @cmd:             fb_cmd2 structure user request
- * @bos:             gem buffer object list
- *
- * Return: error code on failure, 0 on success
- */
-int sde_format_check_modified_format(
-		const struct msm_kms *kms,
-		const struct msm_format *msm_fmt,
-		const struct drm_mode_fb_cmd2 *cmd,
-		struct drm_gem_object **bos);
-
-/**
- * sde_format_populate_layout - populate the given format layout based on
- *                     mmu, fb, and format found in the fb
- * @aspace:            address space pointer
- * @fb:                framebuffer pointer
- * @fmtl:              format layout structure to populate
- *
- * Return: error code on failure, -EAGAIN if success but the addresses
- *         are the same as before or 0 if new addresses were populated
- */
-int sde_format_populate_layout(
-		struct msm_gem_address_space *aspace,
-		struct drm_framebuffer *fb,
-		struct sde_hw_fmt_layout *fmtl);
-
-/**
- * sde_format_populate_layout_with_roi - populate the given format layout
- *                     based on mmu, fb, roi, and format found in the fb
- * @aspace:            address space pointer
- * @fb:                framebuffer pointer
- * @roi:               region of interest (optional)
- * @fmtl:              format layout structure to populate
- *
- * Return: error code on failure, 0 on success
- */
-int sde_format_populate_layout_with_roi(
-		struct msm_gem_address_space *aspace,
-		struct drm_framebuffer *fb,
-		struct sde_rect *roi,
-		struct sde_hw_fmt_layout *fmtl);
-
-/**
- * sde_format_get_framebuffer_size - get framebuffer memory size
- * @format:            DRM pixel format
- * @width:             pixel width
- * @height:            pixel height
- * @pitches:           array of size [SDE_MAX_PLANES] to populate
- *		       pitch for each plane
- * @modifier:          drm modifier
- *
- * Return: memory size required for frame buffer
- */
-uint32_t sde_format_get_framebuffer_size(
-		const uint32_t format,
-		const uint32_t width,
-		const uint32_t height,
-		const uint32_t *pitches,
-		const uint64_t modifier);
-
-/**
- * sde_format_is_tp10_ubwc - check if the format is tp10 ubwc
- * @format:            DRM pixel format
- *
- * Return: returns true if the format is tp10 ubwc, otherwise false.
- */
-bool sde_format_is_tp10_ubwc(const struct sde_format *fmt);
-
-/**
- * sde_format_validate_fmt - validates if the format "sde_fmt" is within
- *	the list "fmt_list"
- * @kms: pointer to the kms object
- * @sde_fmt: pointer to the format to look within the list
- * @fmt_list: list where driver will loop to look for the 'sde_fmt' format.
- * @result: returns 0 if the format is found, otherwise will return an
- *	error code.
- */
-int sde_format_validate_fmt(struct msm_kms *kms,
-	const struct sde_format *sde_fmt,
-	const struct sde_format_extended *fmt_list);
-
-#endif /*_SDE_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
deleted file mode 100644
index 4a2b73f..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
+++ /dev/null
@@ -1,1771 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-#include <drm/msm_drm_pp.h>
-#include "sde_hw_catalog.h"
-#include "sde_hw_util.h"
-#include "sde_hw_mdss.h"
-#include "sde_hw_lm.h"
-#include "sde_ad4.h"
-
-#define AD_STATE_READY(x) \
-	(((x) & ad4_init) && \
-	((x) & ad4_cfg) && \
-	((x) & ad4_mode) && \
-	(((x) & ad4_input) | ((x) & ad4_strength)))
-
-#define MERGE_WIDTH_RIGHT 6
-#define MERGE_WIDTH_LEFT 5
-#define AD_IPC_FRAME_COUNT 2
-
-enum ad4_ops_bitmask {
-	ad4_init = BIT(AD_INIT),
-	ad4_cfg = BIT(AD_CFG),
-	ad4_mode = BIT(AD_MODE),
-	ad4_input = BIT(AD_INPUT),
-	ad4_strength = BIT(AD_STRENGTH),
-	ad4_ops_max = BIT(31),
-};
-
-enum ad4_state {
-	ad4_state_idle,
-	ad4_state_startup,
-	ad4_state_run,
-	/* idle power collapse suspend state */
-	ad4_state_ipcs,
-	/* idle power collapse resume state */
-	ad4_state_ipcr,
-	/* manual mode state */
-	ad4_state_manual,
-	ad4_state_max,
-};
-
-struct ad4_roi_info {
-	u32 h_start;
-	u32 h_end;
-	u32 v_start;
-	u32 v_end;
-	u32 f_in;
-	u32 f_out;
-};
-
-typedef int (*ad4_prop_setup)(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *ad);
-
-static int ad4_params_check(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-
-static int ad4_no_op_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
-static int ad4_setup_debug(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
-static int ad4_setup_debug_manual(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode);
-static int ad4_mode_setup_common(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_init_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
-static int ad4_init_setup_idle(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_init_setup_run(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_init_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
-static int ad4_cfg_setup_idle(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_cfg_setup_run(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_cfg_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_input_setup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_roi_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
-static int ad4_roi_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_roi_coordinate_offset(struct sde_hw_cp_cfg *hw_cfg,
-		struct ad4_roi_info *output);
-static int ad4_input_setup_idle(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_input_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_suspend_setup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_assertive_setup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_assertive_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_backlight_setup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_backlight_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_strength_setup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_strength_setup_idle(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-
-static int ad4_ipc_suspend_setup_run(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_ipc_suspend_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_ipc_resume_setup_ipcs(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_ipc_reset_setup_startup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_ipc_reset_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_cfg_ipc_reset(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-
-static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = {
-	[ad4_state_idle][AD_MODE] = ad4_mode_setup_common,
-	[ad4_state_idle][AD_INIT] = ad4_init_setup_idle,
-	[ad4_state_idle][AD_CFG] = ad4_cfg_setup_idle,
-	[ad4_state_idle][AD_INPUT] = ad4_input_setup_idle,
-	[ad4_state_idle][AD_SUSPEND] = ad4_suspend_setup,
-	[ad4_state_idle][AD_ASSERTIVE] = ad4_assertive_setup,
-	[ad4_state_idle][AD_BACKLIGHT] = ad4_backlight_setup,
-	[ad4_state_idle][AD_STRENGTH] = ad4_strength_setup_idle,
-	[ad4_state_idle][AD_ROI] = ad4_roi_setup,
-	[ad4_state_idle][AD_IPC_SUSPEND] = ad4_no_op_setup,
-	[ad4_state_idle][AD_IPC_RESUME] = ad4_no_op_setup,
-	[ad4_state_idle][AD_IPC_RESET] = ad4_no_op_setup,
-
-	[ad4_state_startup][AD_MODE] = ad4_mode_setup_common,
-	[ad4_state_startup][AD_INIT] = ad4_init_setup,
-	[ad4_state_startup][AD_CFG] = ad4_cfg_setup,
-	[ad4_state_startup][AD_INPUT] = ad4_input_setup,
-	[ad4_state_startup][AD_SUSPEND] = ad4_suspend_setup,
-	[ad4_state_startup][AD_ASSERTIVE] = ad4_assertive_setup,
-	[ad4_state_startup][AD_BACKLIGHT] = ad4_backlight_setup,
-	[ad4_state_startup][AD_IPC_SUSPEND] = ad4_no_op_setup,
-	[ad4_state_startup][AD_STRENGTH] = ad4_no_op_setup,
-	[ad4_state_startup][AD_ROI] = ad4_roi_setup,
-	[ad4_state_startup][AD_IPC_RESUME] = ad4_no_op_setup,
-	[ad4_state_startup][AD_IPC_RESET] = ad4_ipc_reset_setup_startup,
-
-	[ad4_state_run][AD_MODE] = ad4_mode_setup_common,
-	[ad4_state_run][AD_INIT] = ad4_init_setup_run,
-	[ad4_state_run][AD_CFG] = ad4_cfg_setup_run,
-	[ad4_state_run][AD_INPUT] = ad4_input_setup,
-	[ad4_state_run][AD_SUSPEND] = ad4_suspend_setup,
-	[ad4_state_run][AD_ASSERTIVE] = ad4_assertive_setup,
-	[ad4_state_run][AD_BACKLIGHT] = ad4_backlight_setup,
-	[ad4_state_run][AD_STRENGTH] = ad4_no_op_setup,
-	[ad4_state_run][AD_ROI] = ad4_roi_setup,
-	[ad4_state_run][AD_IPC_SUSPEND] = ad4_ipc_suspend_setup_run,
-	[ad4_state_run][AD_IPC_RESUME] = ad4_no_op_setup,
-	[ad4_state_run][AD_IPC_RESET] = ad4_setup_debug,
-
-	[ad4_state_ipcs][AD_MODE] = ad4_no_op_setup,
-	[ad4_state_ipcs][AD_INIT] = ad4_no_op_setup,
-	[ad4_state_ipcs][AD_CFG] = ad4_no_op_setup,
-	[ad4_state_ipcs][AD_INPUT] = ad4_no_op_setup,
-	[ad4_state_ipcs][AD_SUSPEND] = ad4_no_op_setup,
-	[ad4_state_ipcs][AD_ASSERTIVE] = ad4_no_op_setup,
-	[ad4_state_ipcs][AD_BACKLIGHT] = ad4_no_op_setup,
-	[ad4_state_ipcs][AD_STRENGTH] = ad4_no_op_setup,
-	[ad4_state_ipcs][AD_ROI] = ad4_no_op_setup,
-	[ad4_state_ipcs][AD_IPC_SUSPEND] = ad4_no_op_setup,
-	[ad4_state_ipcs][AD_IPC_RESUME] = ad4_ipc_resume_setup_ipcs,
-	[ad4_state_ipcs][AD_IPC_RESET] = ad4_no_op_setup,
-
-	[ad4_state_ipcr][AD_MODE] = ad4_mode_setup_common,
-	[ad4_state_ipcr][AD_INIT] = ad4_init_setup_ipcr,
-	[ad4_state_ipcr][AD_CFG] = ad4_cfg_setup_ipcr,
-	[ad4_state_ipcr][AD_INPUT] = ad4_input_setup_ipcr,
-	[ad4_state_ipcr][AD_SUSPEND] = ad4_suspend_setup,
-	[ad4_state_ipcr][AD_ASSERTIVE] = ad4_assertive_setup_ipcr,
-	[ad4_state_ipcr][AD_BACKLIGHT] = ad4_backlight_setup_ipcr,
-	[ad4_state_ipcr][AD_STRENGTH] = ad4_no_op_setup,
-	[ad4_state_ipcr][AD_ROI] = ad4_roi_setup_ipcr,
-	[ad4_state_ipcr][AD_IPC_SUSPEND] = ad4_ipc_suspend_setup_ipcr,
-	[ad4_state_ipcr][AD_IPC_RESUME] = ad4_no_op_setup,
-	[ad4_state_ipcr][AD_IPC_RESET] = ad4_ipc_reset_setup_ipcr,
-
-	[ad4_state_manual][AD_MODE] = ad4_mode_setup_common,
-	[ad4_state_manual][AD_INIT] = ad4_init_setup,
-	[ad4_state_manual][AD_CFG] = ad4_cfg_setup,
-	[ad4_state_manual][AD_INPUT] = ad4_no_op_setup,
-	[ad4_state_manual][AD_SUSPEND] = ad4_no_op_setup,
-	[ad4_state_manual][AD_ASSERTIVE] = ad4_no_op_setup,
-	[ad4_state_manual][AD_BACKLIGHT] = ad4_no_op_setup,
-	[ad4_state_manual][AD_STRENGTH] = ad4_strength_setup,
-	[ad4_state_manual][AD_ROI] = ad4_roi_setup,
-	[ad4_state_manual][AD_IPC_SUSPEND] = ad4_no_op_setup,
-	[ad4_state_manual][AD_IPC_RESUME] = ad4_no_op_setup,
-	[ad4_state_manual][AD_IPC_RESET] = ad4_setup_debug_manual,
-};
-
-struct ad4_info {
-	enum ad4_state state;
-	u32 completed_ops_mask;
-	bool ad4_support;
-	enum ad4_modes mode;
-	bool is_master;
-	u32 last_assertive;
-	u32 cached_assertive;
-	u32 last_str_inroi;
-	u32 last_str_outroi;
-	u64 last_als;
-	u64 cached_als;
-	u64 last_bl;
-	u64 cached_bl;
-	u32 frame_count;
-	u32 frmt_mode;
-	u32 irdx_control_0;
-	u32 tf_ctrl;
-	u32 vc_control_0;
-	struct ad4_roi_info last_roi_cfg;
-	struct ad4_roi_info cached_roi_cfg;
-};
-
-static struct ad4_info info[DSPP_MAX] = {
-	[DSPP_0] = {ad4_state_idle, 0, true, AD4_OFF, false, 0x80, 0x80},
-	[DSPP_1] = {ad4_state_idle, 0, true, AD4_OFF, false, 0x80, 0x80},
-	[DSPP_2] = {ad4_state_max, 0, false, AD4_OFF, false, 0x80, 0x80},
-	[DSPP_3] = {ad4_state_max, 0, false, AD4_OFF, false, 0x80, 0x80},
-};
-
-void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *ad_cfg)
-{
-	int ret = 0;
-	struct sde_ad_hw_cfg *cfg = ad_cfg;
-
-	ret = ad4_params_check(dspp, ad_cfg);
-	if (ret)
-		return;
-
-	ret = prop_set_func[info[dspp->idx].state][cfg->prop](dspp, ad_cfg);
-	if (ret)
-		DRM_ERROR("op failed %d ret %d\n", cfg->prop, ret);
-}
-
-int sde_validate_dspp_ad4(struct sde_hw_dspp *dspp, u32 *prop)
-{
-
-	if (!dspp || !prop) {
-		DRM_ERROR("invalid params dspp %pK prop %pK\n", dspp, prop);
-		return -EINVAL;
-	}
-
-	if (*prop >= AD_PROPMAX) {
-		DRM_ERROR("invalid prop set %d\n", *prop);
-		return -EINVAL;
-	}
-
-	if (dspp->idx >= DSPP_MAX || !info[dspp->idx].ad4_support) {
-		DRM_ERROR("ad4 not supported for dspp idx %d\n", dspp->idx);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int ad4_params_check(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	struct sde_hw_mixer *hw_lm;
-
-	if (!dspp || !cfg || !cfg->hw_cfg) {
-		DRM_ERROR("invalid dspp %pK cfg %pK hw_cfg %pK\n",
-			dspp, cfg, ((cfg) ? (cfg->hw_cfg) : NULL));
-		return -EINVAL;
-	}
-
-	if (!cfg->hw_cfg->mixer_info) {
-		DRM_ERROR("invalid mixed info\n");
-		return -EINVAL;
-	}
-
-	if (dspp->idx >= DSPP_MAX || !info[dspp->idx].ad4_support) {
-		DRM_ERROR("ad4 not supported for dspp idx %d\n", dspp->idx);
-		return -EINVAL;
-	}
-
-	if (cfg->prop >= AD_PROPMAX) {
-		DRM_ERROR("invalid prop set %d\n", cfg->prop);
-		return -EINVAL;
-	}
-
-	if (info[dspp->idx].state >= ad4_state_max) {
-		DRM_ERROR("in max state for dspp idx %d\n", dspp->idx);
-		return -EINVAL;
-	}
-
-	if (!prop_set_func[info[dspp->idx].state][cfg->prop]) {
-		DRM_ERROR("prop set not implemented for state %d prop %d\n",
-				info[dspp->idx].state, cfg->prop);
-		return -EINVAL;
-	}
-
-	if (!cfg->hw_cfg->num_of_mixers ||
-	    cfg->hw_cfg->num_of_mixers > CRTC_DUAL_MIXERS) {
-		DRM_ERROR("invalid mixer cnt %d\n",
-				cfg->hw_cfg->num_of_mixers);
-		return -EINVAL;
-	}
-	hw_lm = cfg->hw_cfg->mixer_info;
-	if (!hw_lm) {
-		DRM_ERROR("invalid mixer info\n");
-		return -EINVAL;
-	}
-
-	if (cfg->hw_cfg->num_of_mixers == 1 &&
-	    hw_lm->cfg.out_height != cfg->hw_cfg->displayv &&
-	    hw_lm->cfg.out_width != cfg->hw_cfg->displayh) {
-		DRM_ERROR("single_lm lmh %d lmw %d displayh %d displayw %d\n",
-			hw_lm->cfg.out_height, hw_lm->cfg.out_width,
-			cfg->hw_cfg->displayh, cfg->hw_cfg->displayv);
-		return -EINVAL;
-	} else if (hw_lm->cfg.out_height != cfg->hw_cfg->displayv &&
-		   hw_lm->cfg.out_width != (cfg->hw_cfg->displayh >> 1)) {
-		DRM_ERROR("dual_lm lmh %d lmw %d displayh %d displayw %d\n",
-			hw_lm->cfg.out_height, hw_lm->cfg.out_width,
-			cfg->hw_cfg->displayh, cfg->hw_cfg->displayv);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int ad4_no_op_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
-{
-	return 0;
-}
-
-static int ad4_setup_debug(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
-{
-	u32 in_str = 0, out_str = 0;
-	struct sde_hw_mixer *hw_lm;
-
-	hw_lm = cfg->hw_cfg->mixer_info;
-	if ((cfg->hw_cfg->num_of_mixers == 2) && hw_lm->cfg.right_mixer)
-		/* this AD core is the salve core */
-		return 0;
-
-	in_str = SDE_REG_READ(&dspp->hw, dspp->cap->sblk->ad.base + 0x4c);
-	out_str = SDE_REG_READ(&dspp->hw, dspp->cap->sblk->ad.base + 0x50);
-	pr_debug("%s(): AD in strength %d, out strength %d\n", __func__,
-				    in_str, out_str);
-	return 0;
-}
-
-static int ad4_setup_debug_manual(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u32 in_str = 0, out_str = 0;
-	struct sde_hw_mixer *hw_lm;
-
-	hw_lm = cfg->hw_cfg->mixer_info;
-	if ((cfg->hw_cfg->num_of_mixers == 2) && hw_lm->cfg.right_mixer)
-		/* this AD core is the salve core */
-		return 0;
-
-	in_str = SDE_REG_READ(&dspp->hw, dspp->cap->sblk->ad.base + 0x15c);
-	out_str = SDE_REG_READ(&dspp->hw, dspp->cap->sblk->ad.base + 0x160);
-	pr_debug("%s(): AD in strength = %d, out strength = %d in manual mode\n",
-			 __func__, in_str, out_str);
-
-	return 0;
-}
-
-static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode)
-{
-	u32 blk_offset;
-
-	if (mode == AD4_OFF) {
-		blk_offset = 0x04;
-		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-				0x101);
-		info[dspp->idx].state = ad4_state_idle;
-		pr_debug("%s(): AD state move to idle\n", __func__);
-		info[dspp->idx].completed_ops_mask = 0;
-		/* reset last values to register default */
-		info[dspp->idx].last_assertive = 0x80;
-		info[dspp->idx].cached_assertive = U8_MAX;
-		info[dspp->idx].last_bl = 0xFFFF;
-		info[dspp->idx].cached_bl = U64_MAX;
-		info[dspp->idx].last_als = 0x0;
-		info[dspp->idx].cached_als = U64_MAX;
-		info[dspp->idx].last_roi_cfg.h_start = 0x0;
-		info[dspp->idx].last_roi_cfg.h_end = 0xffff;
-		info[dspp->idx].last_roi_cfg.v_start = 0x0;
-		info[dspp->idx].last_roi_cfg.v_end = 0xffff;
-		info[dspp->idx].last_roi_cfg.f_in = 0x400;
-		info[dspp->idx].last_roi_cfg.f_out = 0x400;
-		info[dspp->idx].cached_roi_cfg.h_start = U32_MAX;
-		info[dspp->idx].cached_roi_cfg.h_end = U32_MAX;
-		info[dspp->idx].cached_roi_cfg.v_start = U32_MAX;
-		info[dspp->idx].cached_roi_cfg.v_end = U32_MAX;
-		info[dspp->idx].cached_roi_cfg.f_in = U32_MAX;
-		info[dspp->idx].cached_roi_cfg.f_out = U32_MAX;
-	} else {
-		if (mode == AD4_MANUAL) {
-			/*vc_control_0 */
-			blk_offset = 0x138;
-			SDE_REG_WRITE(&dspp->hw,
-				dspp->cap->sblk->ad.base + blk_offset, 0);
-			/* irdx_control_0 */
-			blk_offset = 0x13c;
-			SDE_REG_WRITE(&dspp->hw,
-				dspp->cap->sblk->ad.base + blk_offset,
-				info[dspp->idx].irdx_control_0);
-		}
-		if (info[dspp->idx].state == ad4_state_idle) {
-			if (mode == AD4_MANUAL) {
-				info[dspp->idx].state = ad4_state_manual;
-				pr_debug("%s(): AD state move to manual\n",
-					__func__);
-			} else {
-				info[dspp->idx].frame_count = 0;
-				info[dspp->idx].state = ad4_state_startup;
-				pr_debug("%s(): AD state move to startup\n",
-					__func__);
-			}
-		}
-		blk_offset = 0x04;
-		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-				0x100);
-	}
-
-	return 0;
-}
-
-static int ad4_init_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
-{
-	u32 frame_start, frame_end, proc_start, proc_end;
-	struct sde_hw_mixer *hw_lm;
-	u32 blk_offset, tile_ctl, val, i;
-	u32 off1, off2, off3, off4, off5, off6;
-	struct drm_msm_ad4_init *init;
-
-	if (!cfg->hw_cfg->payload) {
-		info[dspp->idx].completed_ops_mask &= ~ad4_init;
-		return 0;
-	}
-
-	if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_init)) {
-		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
-			sizeof(struct drm_msm_ad4_init), cfg->hw_cfg->len,
-			cfg->hw_cfg->payload);
-		return -EINVAL;
-	}
-
-	hw_lm = cfg->hw_cfg->mixer_info;
-	if (cfg->hw_cfg->num_of_mixers == 1) {
-		frame_start = 0;
-		frame_end = 0xffff;
-		proc_start = 0;
-		proc_end = 0xffff;
-		tile_ctl = 0;
-		info[dspp->idx].is_master = true;
-	} else {
-		tile_ctl = 0x5;
-		if (hw_lm->cfg.right_mixer) {
-			frame_start = (cfg->hw_cfg->displayh >> 1) -
-				MERGE_WIDTH_RIGHT;
-			frame_end = cfg->hw_cfg->displayh - 1;
-			proc_start = (cfg->hw_cfg->displayh >> 1);
-			proc_end = frame_end;
-			tile_ctl |= 0x10;
-			info[dspp->idx].is_master = false;
-		} else {
-			frame_start = 0;
-			frame_end = (cfg->hw_cfg->displayh >> 1) +
-				MERGE_WIDTH_LEFT;
-			proc_start = 0;
-			proc_end = (cfg->hw_cfg->displayh >> 1) - 1;
-			tile_ctl |= 0x10;
-			info[dspp->idx].is_master = true;
-		}
-	}
-
-	init = cfg->hw_cfg->payload;
-
-	info[dspp->idx].frmt_mode = (init->init_param_009 & (BIT(14) - 1));
-
-	blk_offset = 0xc;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			init->init_param_010);
-
-	init->init_param_012 = cfg->hw_cfg->displayv & (BIT(17) - 1);
-	init->init_param_011 = cfg->hw_cfg->displayh & (BIT(17) - 1);
-	blk_offset = 0x10;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			((init->init_param_011 << 16) | init->init_param_012));
-
-	blk_offset = 0x14;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			tile_ctl);
-
-	blk_offset = 0x44;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			((((init->init_param_013) & (BIT(17) - 1)) << 16) |
-			(init->init_param_014 & (BIT(17) - 1))));
-
-	blk_offset = 0x5c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_015 & (BIT(16) - 1)));
-	blk_offset = 0x60;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_016 & (BIT(8) - 1)));
-	blk_offset = 0x64;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_017 & (BIT(12) - 1)));
-	blk_offset = 0x68;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_018 & (BIT(12) - 1)));
-	blk_offset = 0x6c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_019 & (BIT(12) - 1)));
-	blk_offset = 0x70;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_020 & (BIT(16) - 1)));
-	blk_offset = 0x74;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_021 & (BIT(8) - 1)));
-	blk_offset = 0x78;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_022 & (BIT(8) - 1)));
-	blk_offset = 0x7c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_023 & (BIT(16) - 1)));
-	blk_offset = 0x80;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-		(((init->init_param_024 & (BIT(16) - 1)) << 16) |
-		((init->init_param_025 & (BIT(16) - 1)))));
-	blk_offset = 0x84;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-		(((init->init_param_026 & (BIT(16) - 1)) << 16) |
-		((init->init_param_027 & (BIT(16) - 1)))));
-
-	blk_offset = 0x90;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_028 & (BIT(16) - 1)));
-	blk_offset = 0x94;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_029 & (BIT(16) - 1)));
-
-	blk_offset = 0x98;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-		(((init->init_param_035 & (BIT(16) - 1)) << 16) |
-		((init->init_param_030 & (BIT(16) - 1)))));
-
-	blk_offset = 0x9c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-		(((init->init_param_032 & (BIT(16) - 1)) << 16) |
-		((init->init_param_031 & (BIT(16) - 1)))));
-	blk_offset = 0xa0;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-		(((init->init_param_034 & (BIT(16) - 1)) << 16) |
-		((init->init_param_033 & (BIT(16) - 1)))));
-
-	blk_offset = 0xb4;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_036 & (BIT(8) - 1)));
-	blk_offset = 0xcc;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_037 & (BIT(8) - 1)));
-	blk_offset = 0xc0;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_038 & (BIT(8) - 1)));
-	blk_offset = 0xd8;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_039 & (BIT(8) - 1)));
-
-	blk_offset = 0xe8;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_040 & (BIT(16) - 1)));
-
-	blk_offset = 0xf4;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_041 & (BIT(8) - 1)));
-
-	blk_offset = 0x100;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_042 & (BIT(16) - 1)));
-
-	blk_offset = 0x10c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_043 & (BIT(8) - 1)));
-
-	blk_offset = 0x120;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_044 & (BIT(16) - 1)));
-	blk_offset = 0x124;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_045 & (BIT(16) - 1)));
-
-	blk_offset = 0x128;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_046 & (BIT(1) - 1)));
-	blk_offset = 0x12c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_047 & (BIT(8) - 1)));
-
-	info[dspp->idx].irdx_control_0 = (init->init_param_048 & (BIT(5) - 1));
-
-	blk_offset = 0x140;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_049 & (BIT(8) - 1)));
-
-	blk_offset = 0x144;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_050 & (BIT(8) - 1)));
-	blk_offset = 0x148;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-		(((init->init_param_051 & (BIT(8) - 1)) << 8) |
-		((init->init_param_052 & (BIT(8) - 1)))));
-
-	blk_offset = 0x14c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_053 & (BIT(10) - 1)));
-	blk_offset = 0x150;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_054 & (BIT(10) - 1)));
-	blk_offset = 0x154;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_055 & (BIT(8) - 1)));
-
-	blk_offset = 0x158;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_056 & (BIT(8) - 1)));
-	blk_offset = 0x164;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_057 & (BIT(8) - 1)));
-	blk_offset = 0x168;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_058 & (BIT(4) - 1)));
-
-	blk_offset = 0x17c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(frame_start & (BIT(16) - 1)));
-	blk_offset = 0x180;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(frame_end & (BIT(16) - 1)));
-	blk_offset = 0x184;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(proc_start & (BIT(16) - 1)));
-	blk_offset = 0x188;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(proc_end & (BIT(16) - 1)));
-
-	blk_offset = 0x18c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_059 & (BIT(4) - 1)));
-
-	blk_offset = 0x190;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-		(((init->init_param_061 & (BIT(8) - 1)) << 8) |
-		((init->init_param_060 & (BIT(8) - 1)))));
-
-	blk_offset = 0x194;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_062 & (BIT(10) - 1)));
-
-	blk_offset = 0x1a0;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_063 & (BIT(10) - 1)));
-	blk_offset = 0x1a4;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_064 & (BIT(10) - 1)));
-	blk_offset = 0x1a8;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_065 & (BIT(10) - 1)));
-	blk_offset = 0x1ac;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_066 & (BIT(8) - 1)));
-	blk_offset = 0x1b0;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_067 & (BIT(8) - 1)));
-	blk_offset = 0x1b4;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_068 & (BIT(6) - 1)));
-
-	blk_offset = 0x460;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_069 & (BIT(16) - 1)));
-	blk_offset = 0x464;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_070 & (BIT(10) - 1)));
-	blk_offset = 0x468;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_071 & (BIT(10) - 1)));
-	blk_offset = 0x46c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_072 & (BIT(10) - 1)));
-	blk_offset = 0x470;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_073 & (BIT(8) - 1)));
-	blk_offset = 0x474;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_074 & (BIT(10) - 1)));
-	blk_offset = 0x478;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_075 & (BIT(10) - 1)));
-
-	off1 = 0x1c0;
-	off2 = 0x210;
-	off3 = 0x260;
-	off4 = 0x2b0;
-	off5 = 0x380;
-	off6 = 0x3d0;
-	for (i = 0; i < AD4_LUT_GRP0_SIZE - 1; i = i + 2) {
-		val = (init->init_param_001[i] & (BIT(16) - 1));
-		val |= ((init->init_param_001[i + 1] & (BIT(16) - 1))
-				<< 16);
-		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off1, val);
-		off1 += 4;
-
-		val = (init->init_param_002[i] & (BIT(16) - 1));
-		val |= ((init->init_param_002[i + 1] & (BIT(16) - 1))
-				<< 16);
-		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off2, val);
-		off2 += 4;
-
-		val = (init->init_param_003[i] & (BIT(16) - 1));
-		val |= ((init->init_param_003[i + 1] & (BIT(16) - 1))
-				<< 16);
-		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off3, val);
-		off3 += 4;
-
-		val = (init->init_param_004[i] & (BIT(16) - 1));
-		val |= ((init->init_param_004[i + 1] & (BIT(16) - 1))
-				<< 16);
-		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off4, val);
-		off4 += 4;
-
-		val = (init->init_param_007[i] & (BIT(16) - 1));
-		val |= ((init->init_param_007[i + 1] &
-				(BIT(16) - 1)) << 16);
-		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off5, val);
-		off5 += 4;
-
-		val = (init->init_param_008[i] & (BIT(12) - 1));
-		val |= ((init->init_param_008[i + 1] &
-				(BIT(12) - 1)) << 16);
-		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off6, val);
-		off6 += 4;
-	}
-	/* write last index data */
-	i = AD4_LUT_GRP0_SIZE - 1;
-	val = ((init->init_param_001[i] & (BIT(16) - 1)) << 16);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off1, val);
-	val = ((init->init_param_002[i] & (BIT(16) - 1)) << 16);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off2, val);
-	val = ((init->init_param_003[i] & (BIT(16) - 1)) << 16);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off3, val);
-	val = ((init->init_param_004[i] & (BIT(16) - 1)) << 16);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off4, val);
-	val = ((init->init_param_007[i] & (BIT(16) - 1)) << 16);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off5, val);
-	val = ((init->init_param_008[i] & (BIT(12) - 1)) << 16);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off6, val);
-
-	off1 = 0x300;
-	off2 = 0x340;
-	for (i = 0; i < AD4_LUT_GRP1_SIZE; i = i + 2) {
-		val = (init->init_param_005[i] & (BIT(16) - 1));
-		val |= ((init->init_param_005[i + 1] &
-				(BIT(16) - 1)) << 16);
-		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off1, val);
-		off1 += 4;
-
-		val = (init->init_param_006[i] & (BIT(16) - 1));
-		val |= ((init->init_param_006[i + 1] & (BIT(16) - 1))
-				<< 16);
-		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off2, val);
-		off2 += 4;
-	}
-
-	return 0;
-}
-
-static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
-{
-	u32 blk_offset, val;
-	struct drm_msm_ad4_cfg *ad_cfg;
-
-	if (!cfg->hw_cfg->payload) {
-		info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
-		return 0;
-	}
-
-	if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_cfg)) {
-		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
-			sizeof(struct drm_msm_ad4_cfg), cfg->hw_cfg->len,
-			cfg->hw_cfg->payload);
-		return -EINVAL;
-	}
-	ad_cfg = cfg->hw_cfg->payload;
-
-	blk_offset = 0x20;
-	val = (ad_cfg->cfg_param_005 & (BIT(8) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset = 0x24;
-	val = (ad_cfg->cfg_param_006 & (BIT(7) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	info[dspp->idx].tf_ctrl = (ad_cfg->cfg_param_008 & (BIT(8) - 1));
-
-	blk_offset = 0x38;
-	val = (ad_cfg->cfg_param_009 & (BIT(10) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	blk_offset = 0x3c;
-	val = (ad_cfg->cfg_param_010 & (BIT(12) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	blk_offset = 0x88;
-	val = (ad_cfg->cfg_param_013 & (BIT(8) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_014 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	blk_offset = 0xa4;
-	val = (ad_cfg->cfg_param_015 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_016 & (BIT(10) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_017 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_018 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	blk_offset = 0xc4;
-	val = (ad_cfg->cfg_param_019 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_020 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	blk_offset = 0xb8;
-	val = (ad_cfg->cfg_param_021 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_022 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	blk_offset = 0xd0;
-	val = (ad_cfg->cfg_param_023 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_024 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	blk_offset = 0xdc;
-	val = (ad_cfg->cfg_param_025 & (BIT(16) - 1));
-	val |= ((ad_cfg->cfg_param_026 & (BIT(16) - 1)) << 16);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_027 & (BIT(16) - 1));
-	val |= ((ad_cfg->cfg_param_028 & (BIT(16) - 1)) << 16);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_029 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	blk_offset = 0xec;
-	val = (ad_cfg->cfg_param_030 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_031 & (BIT(12) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	blk_offset = 0xf8;
-	val = (ad_cfg->cfg_param_032 & (BIT(10) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_033 & (BIT(8) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	blk_offset = 0x104;
-	val = (ad_cfg->cfg_param_034 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_035 & (BIT(12) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	blk_offset = 0x110;
-	val = (ad_cfg->cfg_param_036 & (BIT(12) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_037 & (BIT(12) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_038 & (BIT(8) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_039 & (BIT(8) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	blk_offset = 0x134;
-	val = (ad_cfg->cfg_param_040 & (BIT(12) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	info[dspp->idx].vc_control_0 = (ad_cfg->cfg_param_041 & (BIT(7) - 1));
-
-	blk_offset = 0x16c;
-	val = (ad_cfg->cfg_param_044 & (BIT(8) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_045 & (BIT(8) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (ad_cfg->cfg_param_046 & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	return 0;
-}
-
-static int ad4_input_setup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u64 *val, als;
-	u32 blk_offset;
-
-	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
-		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
-			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
-		return -EINVAL;
-	}
-
-	blk_offset = 0x28;
-	if (cfg->hw_cfg->payload) {
-		val = cfg->hw_cfg->payload;
-	} else {
-		als = 0;
-		val = &als;
-	}
-	info[dspp->idx].last_als = (*val & (BIT(16) - 1));
-	info[dspp->idx].completed_ops_mask |= ad4_input;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			info[dspp->idx].last_als);
-	return 0;
-}
-
-static int ad4_roi_setup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	int ret = 0;
-	u32 blk_offset = 0, val = 0;
-	struct ad4_roi_info roi_cfg = {};
-
-	ret = ad4_roi_coordinate_offset(cfg->hw_cfg, &roi_cfg);
-	if (ret) {
-		DRM_ERROR("params invalid\n");
-		return -EINVAL;
-	}
-	info[dspp->idx].last_roi_cfg = roi_cfg;
-
-	/*roi h start and end*/
-	blk_offset = 0x18;
-	val = (info[dspp->idx].last_roi_cfg.h_end & (BIT(16) - 1));
-	val |= ((info[dspp->idx].last_roi_cfg.h_start & (BIT(16) - 1)) << 16);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	/*roi v start and end*/
-	blk_offset += 4;
-	val = (info[dspp->idx].last_roi_cfg.v_end & (BIT(16) - 1));
-	val |= ((info[dspp->idx].last_roi_cfg.v_start & (BIT(16) - 1)) << 16);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	/*roi factor in and out*/
-	blk_offset = 0x40;
-	val = ((info[dspp->idx].last_roi_cfg.f_in & (BIT(16) - 1)) << 16);
-	val |= (info[dspp->idx].last_roi_cfg.f_out & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	return ret;
-}
-
-static int ad4_roi_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	int ret = 0;
-	struct ad4_roi_info roi_cfg = {};
-
-	ret = ad4_roi_coordinate_offset(cfg->hw_cfg, &roi_cfg);
-	if (ret) {
-		DRM_ERROR("params invalid\n");
-		return -EINVAL;
-	}
-
-	info[dspp->idx].cached_roi_cfg = roi_cfg;
-
-	return 0;
-}
-
-static int ad4_roi_coordinate_offset(struct sde_hw_cp_cfg *hw_cfg,
-						struct ad4_roi_info *output)
-{
-	struct sde_hw_mixer *hw_lm = hw_cfg->mixer_info;
-	struct drm_msm_ad4_roi_cfg *roi = NULL;
-
-	if (!hw_cfg->payload) {
-		output->h_start = 0x0;
-		output->h_end = hw_cfg->displayh;
-		output->v_start = 0x0;
-		output->v_end = hw_cfg->displayv;
-		output->f_in = 0x400;
-		output->f_out = 0x400;
-		return 0;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_ad4_roi_cfg)) {
-		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
-			sizeof(struct drm_msm_ad4_roi_cfg), hw_cfg->len,
-			hw_cfg->payload);
-		return -EINVAL;
-	}
-	roi = (struct drm_msm_ad4_roi_cfg *)hw_cfg->payload;
-
-	if (roi->h_x >= hw_cfg->displayh || roi->v_x >= hw_cfg->displayv) {
-		DRM_ERROR("invalid roi=[%u,%u,%u,%u], display=[%u,%u]\n",
-			roi->h_x, roi->h_y, roi->v_x, roi->v_y,
-			hw_cfg->displayh, hw_cfg->displayv);
-		return -EINVAL;
-	}
-
-	if (roi->h_x >= roi->h_y || roi->v_x >= roi->v_y) {
-		DRM_ERROR("invalid roi=[%u,%u,%u,%u], display=[%u,%u]\n",
-			roi->h_x, roi->h_y, roi->v_x, roi->v_y,
-			hw_cfg->displayh, hw_cfg->displayv);
-		return -EINVAL;
-	}
-
-	if (roi->h_y > hw_cfg->displayh)
-		roi->h_y = hw_cfg->displayh;
-
-	if (roi->v_y > hw_cfg->displayv)
-		roi->v_y = hw_cfg->displayv;
-
-	/* single dspp cfg */
-	output->h_start = roi->h_x;
-	output->h_end = roi->h_y;
-	output->v_start = roi->v_x;
-	output->v_end = roi->v_y;
-	output->f_in = roi->factor_in;
-	output->f_out = roi->factor_out;
-
-	/* check whether dual dspp */
-	if (hw_cfg->num_of_mixers != 2)
-		return 0;
-
-	if (roi->h_y <= hw_lm->cfg.out_width) {
-		if (hw_lm->cfg.right_mixer) {
-			/* the region on the left of screen, clear right info */
-			output->h_start = 0;
-			output->h_end = 0;
-			output->v_start = 0;
-			output->v_end = 0;
-		}
-	} else if (roi->h_x < hw_lm->cfg.out_width) {
-		/* the region occupy both sides of screen: left and right */
-		if (hw_lm->cfg.right_mixer) {
-			output->h_start = 0;
-			output->h_end -= (hw_lm->cfg.out_width -
-					MERGE_WIDTH_RIGHT);
-		} else {
-			output->h_end = hw_lm->cfg.out_width;
-		}
-	} else {
-		/* the region on the right of the screen*/
-		if (hw_lm->cfg.right_mixer) {
-			output->h_start -= (hw_lm->cfg.out_width -
-					MERGE_WIDTH_RIGHT);
-			output->h_end -= (hw_lm->cfg.out_width -
-					MERGE_WIDTH_RIGHT);
-		} else {
-			output->h_start = 0;
-			output->h_end = 0;
-			output->v_start = 0;
-			output->v_end = 0;
-		}
-	}
-	return 0;
-}
-
-static int ad4_suspend_setup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	info[dspp->idx].state = ad4_state_idle;
-	pr_debug("%s(): AD state move to idle\n", __func__);
-	info[dspp->idx].completed_ops_mask = 0;
-	return 0;
-}
-
-static int ad4_mode_setup_common(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-
-	if (cfg->hw_cfg->len != sizeof(u64) || !cfg->hw_cfg->payload) {
-		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
-			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
-		return -EINVAL;
-	}
-
-	info[dspp->idx].mode = *((enum ad4_modes *)
-					(cfg->hw_cfg->payload));
-	info[dspp->idx].completed_ops_mask |= ad4_mode;
-
-	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask) ||
-					info[dspp->idx].mode == AD4_OFF)
-		ad4_mode_setup(dspp, info[dspp->idx].mode);
-
-	return 0;
-}
-
-static int ad4_init_setup_idle(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	int ret;
-	u32 blk_offset;
-
-	if (!cfg->hw_cfg->payload) {
-		info[dspp->idx].completed_ops_mask &= ~ad4_init;
-		return 0;
-	}
-
-	ret = ad4_init_setup(dspp, cfg);
-	if (ret)
-		return ret;
-
-	/* enable memory initialization*/
-	/* frmt mode */
-	blk_offset = 0x8;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(info[dspp->idx].frmt_mode & 0x1fff));
-	/* memory init */
-	blk_offset = 0x450;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0x1);
-
-	/* enforce 0 initial strength when powering up AD config */
-	/* irdx_control_0 */
-	blk_offset = 0x13c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0x6);
-
-	info[dspp->idx].completed_ops_mask |= ad4_init;
-
-	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask))
-		ad4_mode_setup(dspp, info[dspp->idx].mode);
-
-	return 0;
-}
-
-static int ad4_init_setup_run(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	int ret;
-	u32 blk_offset;
-
-	if (!cfg->hw_cfg->payload) {
-		info[dspp->idx].completed_ops_mask &= ~ad4_init;
-		return 0;
-	}
-
-	ret = ad4_init_setup(dspp, cfg);
-	if (ret)
-		return ret;
-
-	/* disable memory initialization*/
-	/* frmt mode */
-	blk_offset = 0x8;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(info[dspp->idx].frmt_mode | 0x2000));
-	/* no need to explicitly set memory initialization sequence,
-	 * since AD hw were not powered off.
-	 */
-
-	/* irdx_control_0 */
-	blk_offset = 0x13c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			info[dspp->idx].irdx_control_0);
-
-	return 0;
-}
-
-static int ad4_init_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	int ret;
-	u32 blk_offset;
-
-	if (!cfg->hw_cfg->payload) {
-		info[dspp->idx].completed_ops_mask &= ~ad4_init;
-		return 0;
-	}
-
-	ret = ad4_init_setup(dspp, cfg);
-	if (ret)
-		return ret;
-	/* no need to explicitly set memory initialization sequence,
-	 * since register reset values are the correct configuration
-	 */
-	/* frmt mode */
-	blk_offset = 0x8;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(info[dspp->idx].frmt_mode | 0x2000));
-	/* irdx_control_0 */
-	blk_offset = 0x13c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			info[dspp->idx].irdx_control_0);
-
-	info[dspp->idx].completed_ops_mask |= ad4_init;
-	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask))
-		ad4_mode_setup(dspp, info[dspp->idx].mode);
-
-	return 0;
-}
-
-static int ad4_cfg_setup_idle(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	int ret;
-	u32 blk_offset;
-
-	if (!cfg->hw_cfg->payload) {
-		info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
-		return 0;
-	}
-
-	ret = ad4_cfg_setup(dspp, cfg);
-	if (ret)
-		return ret;
-
-	/* enforce 0 initial strength when powering up AD config */
-	/* assertiveness */
-	blk_offset = 0x30;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0x0);
-	/* tf control */
-	blk_offset = 0x34;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0x55);
-
-	/* vc_control_0 */
-	blk_offset = 0x138;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-		info[dspp->idx].vc_control_0);
-
-	info[dspp->idx].completed_ops_mask |= ad4_cfg;
-	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask))
-		ad4_mode_setup(dspp, info[dspp->idx].mode);
-	return 0;
-}
-
-static int ad4_cfg_setup_run(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	int ret;
-	u32 blk_offset;
-
-	if (!cfg->hw_cfg->payload) {
-		info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
-		return 0;
-	}
-
-	ret = ad4_cfg_setup(dspp, cfg);
-	if (ret)
-		return ret;
-
-	/* assertiveness */
-	blk_offset = 0x30;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			info[dspp->idx].last_assertive);
-	/* tf control */
-	blk_offset = 0x34;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-		info[dspp->idx].tf_ctrl);
-	/* vc_control_0 */
-	blk_offset = 0x138;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-		info[dspp->idx].vc_control_0);
-
-	return 0;
-}
-
-static int ad4_cfg_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	int ret;
-	u32 blk_offset;
-
-	if (!cfg->hw_cfg->payload) {
-		info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
-		return 0;
-	}
-
-	ret = ad4_cfg_setup(dspp, cfg);
-	if (ret)
-		return ret;
-
-	/* assertiveness */
-	blk_offset = 0x30;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			info[dspp->idx].last_assertive);
-
-	info[dspp->idx].completed_ops_mask |= ad4_cfg;
-	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask))
-		ad4_mode_setup(dspp, info[dspp->idx].mode);
-	return 0;
-}
-
-static int ad4_input_setup_idle(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	int ret;
-
-	ret = ad4_input_setup(dspp, cfg);
-	if (ret)
-		return ret;
-
-	info[dspp->idx].completed_ops_mask |= ad4_input;
-	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask))
-		ad4_mode_setup(dspp, info[dspp->idx].mode);
-
-	return 0;
-}
-
-static int ad4_input_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u64 *val, als;
-	u32 blk_offset;
-
-	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
-		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
-			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
-		return -EINVAL;
-	}
-
-	blk_offset = 0x28;
-	if (cfg->hw_cfg->payload) {
-		val = cfg->hw_cfg->payload;
-	} else {
-		als = 0;
-		val = &als;
-	}
-	info[dspp->idx].cached_als = *val & (BIT(16) - 1);
-	info[dspp->idx].completed_ops_mask |= ad4_input;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			info[dspp->idx].last_als);
-
-	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask))
-		ad4_mode_setup(dspp, info[dspp->idx].mode);
-
-	return 0;
-}
-
-static int ad4_assertive_setup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u64 *val, assertive;
-	u32 blk_offset;
-
-	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
-		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
-			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
-		return -EINVAL;
-	}
-
-	blk_offset = 0x30;
-	if (cfg->hw_cfg->payload) {
-		val = cfg->hw_cfg->payload;
-	} else {
-		assertive = 0;
-		val = &assertive;
-	}
-
-	info[dspp->idx].last_assertive = *val & (BIT(8) - 1);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(info[dspp->idx].last_assertive));
-	return 0;
-}
-
-static int ad4_assertive_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u64 *val, assertive;
-	u32 blk_offset;
-
-	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
-		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
-			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
-		return -EINVAL;
-	}
-
-	blk_offset = 0x30;
-	if (cfg->hw_cfg->payload) {
-		val = cfg->hw_cfg->payload;
-	} else {
-		assertive = 0;
-		val = &assertive;
-	}
-
-	info[dspp->idx].cached_assertive = *val & (BIT(8) - 1);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			info[dspp->idx].last_assertive);
-
-	return 0;
-}
-
-static int ad4_backlight_setup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u64 *val, bl;
-	u32 blk_offset;
-
-	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
-		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
-			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
-		return -EINVAL;
-	}
-
-	blk_offset = 0x2c;
-	if (cfg->hw_cfg->payload) {
-		val = cfg->hw_cfg->payload;
-	} else {
-		bl = 0;
-		val = &bl;
-	}
-
-	info[dspp->idx].last_bl = *val & (BIT(16) - 1);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			info[dspp->idx].last_bl);
-	return 0;
-}
-
-static int ad4_backlight_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u64 *val, bl;
-	u32 blk_offset;
-
-	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
-		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
-			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
-		return -EINVAL;
-	}
-
-	blk_offset = 0x2c;
-	if (cfg->hw_cfg->payload) {
-		val = cfg->hw_cfg->payload;
-	} else {
-		bl = 0;
-		val = &bl;
-	}
-
-	info[dspp->idx].cached_bl = *val & (BIT(16) - 1);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			info[dspp->idx].last_bl);
-
-	return 0;
-}
-
-void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event,
-		u32 *resp_in, u32 *resp_out)
-{
-	if (!dspp || !resp_in || !resp_out) {
-		DRM_ERROR("invalid params dspp %pK resp_in %pK resp_out %pK\n",
-				dspp, resp_in, resp_out);
-		return;
-	}
-
-	switch (event) {
-	case AD4_IN_OUT_BACKLIGHT:
-		*resp_in = SDE_REG_READ(&dspp->hw,
-				dspp->cap->sblk->ad.base + 0x2c);
-		*resp_out = SDE_REG_READ(&dspp->hw,
-				dspp->cap->sblk->ad.base + 0x48);
-		pr_debug("%s(): AD4 input BL %u, output BL %u\n", __func__,
-			(*resp_in), (*resp_out));
-		break;
-	default:
-		break;
-	}
-}
-
-static int ad4_ipc_suspend_setup_run(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u32 in_str = 0, out_str = 0, i = 0;
-	struct sde_hw_mixer *hw_lm;
-
-	hw_lm = cfg->hw_cfg->mixer_info;
-	if ((cfg->hw_cfg->num_of_mixers == 2) && hw_lm->cfg.right_mixer) {
-		/* this AD core is the salve core */
-		for (i = DSPP_0; i < DSPP_MAX; i++) {
-			if (info[i].is_master) {
-				in_str = info[i].last_str_inroi;
-				out_str = info[i].last_str_outroi;
-				break;
-			}
-		}
-	} else {
-		in_str = SDE_REG_READ(&dspp->hw,
-				dspp->cap->sblk->ad.base + 0x4c);
-		out_str = SDE_REG_READ(&dspp->hw,
-				dspp->cap->sblk->ad.base + 0x50);
-		pr_debug("%s(): AD in strength %d, out %d\n", __func__,
-				in_str, out_str);
-	}
-	info[dspp->idx].last_str_inroi = in_str;
-	info[dspp->idx].last_str_outroi = out_str;
-	info[dspp->idx].state = ad4_state_ipcs;
-	pr_debug("%s(): AD state move to ipcs\n", __func__);
-
-	return 0;
-}
-
-static int ad4_ipc_resume_setup_ipcs(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u32 blk_offset, val;
-
-	info[dspp->idx].frame_count = 0;
-	info[dspp->idx].state = ad4_state_ipcr;
-	pr_debug("%s(): AD state move to ipcr\n", __func__);
-
-	/* no need to rewrite frmt_mode bit 13 and mem_init,
-	 * since the default register values are exactly what
-	 * we wanted.
-	 */
-
-	/* ipc resume with manual strength */
-	/* tf control */
-	blk_offset = 0x34;
-	val = (0x55 & (BIT(8) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	/* set roi config */
-	blk_offset = 0x18;
-	val = (info[dspp->idx].last_roi_cfg.h_end & (BIT(16) - 1));
-	val |= ((info[dspp->idx].last_roi_cfg.h_start & (BIT(16) - 1)) << 16);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (info[dspp->idx].last_roi_cfg.v_end & (BIT(16) - 1));
-	val |= ((info[dspp->idx].last_roi_cfg.v_start & (BIT(16) - 1)) << 16);
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset = 0x40;
-	val = ((info[dspp->idx].last_roi_cfg.f_in & (BIT(16) - 1)) << 16);
-	val |= (info[dspp->idx].last_roi_cfg.f_out & (BIT(16) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	/* set manual strength */
-	blk_offset = 0x15c;
-	val = (info[dspp->idx].last_str_inroi & (BIT(10) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset = 0x160;
-	val = (info[dspp->idx].last_str_outroi & (BIT(10) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	/* enable manual mode */
-	blk_offset = 0x138;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0);
-
-	return 0;
-}
-
-static int ad4_ipc_suspend_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	info[dspp->idx].state = ad4_state_ipcs;
-	pr_debug("%s(): AD state move to ipcs\n", __func__);
-	return 0;
-}
-
-static int ad4_ipc_reset_setup_ipcr(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	int ret;
-	u32 in_str = 0, out_str = 0, i = 0;
-	struct sde_hw_mixer *hw_lm;
-
-	/* Read AD calculator strength output during the 2 frames of manual
-	 * strength mode, and assign the strength output to last_str
-	 * when frame count reaches AD_IPC_FRAME_COUNT to avoid flickers
-	 * caused by strength was not converged before entering IPC mode
-	 */
-	hw_lm = cfg->hw_cfg->mixer_info;
-	if ((cfg->hw_cfg->num_of_mixers == 2) && hw_lm->cfg.right_mixer) {
-		/* this AD core is the salve core */
-		for (i = DSPP_0; i < DSPP_MAX; i++) {
-			if (info[i].is_master) {
-				in_str = info[i].last_str_inroi;
-				out_str = info[i].last_str_outroi;
-				break;
-			}
-		}
-	} else {
-		in_str = SDE_REG_READ(&dspp->hw,
-				dspp->cap->sblk->ad.base + 0x4c);
-		out_str = SDE_REG_READ(&dspp->hw,
-				dspp->cap->sblk->ad.base + 0x50);
-		pr_debug("%s(): AD in strength %d, out %d\n", __func__,
-				in_str, out_str);
-	}
-
-	if (info[dspp->idx].frame_count == AD_IPC_FRAME_COUNT) {
-		info[dspp->idx].state = ad4_state_run;
-		pr_debug("%s(): AD state move to run\n", __func__);
-		info[dspp->idx].last_str_inroi = in_str;
-		info[dspp->idx].last_str_outroi = out_str;
-		ret = ad4_cfg_ipc_reset(dspp, cfg);
-		if (ret)
-			return ret;
-	} else {
-		info[dspp->idx].frame_count++;
-	}
-
-	return 0;
-}
-
-static int ad4_cfg_ipc_reset(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u32 blk_offset, val = 0;
-
-	/* revert manual strength */
-	/* tf control */
-	blk_offset = 0x34;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-		info[dspp->idx].tf_ctrl);
-	/* vc_control_0 */
-	blk_offset = 0x138;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-		info[dspp->idx].vc_control_0);
-
-	/* reset cached ALS, backlight and assertiveness */
-	if (info[dspp->idx].cached_als != U64_MAX) {
-		SDE_REG_WRITE(&dspp->hw,
-				dspp->cap->sblk->ad.base + 0x28,
-				info[dspp->idx].cached_als);
-		info[dspp->idx].last_als = info[dspp->idx].cached_als;
-		info[dspp->idx].cached_als = U64_MAX;
-	}
-	if (info[dspp->idx].cached_bl != U64_MAX) {
-		SDE_REG_WRITE(&dspp->hw,
-				dspp->cap->sblk->ad.base + 0x2c,
-				info[dspp->idx].cached_bl);
-		info[dspp->idx].last_bl = info[dspp->idx].cached_bl;
-		info[dspp->idx].cached_bl = U64_MAX;
-	}
-	if (info[dspp->idx].cached_assertive != U8_MAX) {
-		SDE_REG_WRITE(&dspp->hw,
-				dspp->cap->sblk->ad.base + 0x30,
-				info[dspp->idx].cached_assertive);
-		info[dspp->idx].last_assertive =
-				info[dspp->idx].cached_assertive;
-		info[dspp->idx].cached_assertive = U8_MAX;
-	}
-
-	/*reset cached roi config*/
-	if (info[dspp->idx].cached_roi_cfg.h_start != U32_MAX) {
-		blk_offset = 0x18;
-		val = (info[dspp->idx].cached_roi_cfg.h_end & (BIT(16) - 1));
-		val |= ((info[dspp->idx].cached_roi_cfg.h_start &
-			(BIT(16) - 1)) << 16);
-		SDE_REG_WRITE(&dspp->hw,
-			dspp->cap->sblk->ad.base + blk_offset, val);
-		blk_offset += 4;
-		val = (info[dspp->idx].cached_roi_cfg.v_end & (BIT(16) - 1));
-		val |= ((info[dspp->idx].cached_roi_cfg.v_start &
-			(BIT(16) - 1)) << 16);
-		SDE_REG_WRITE(&dspp->hw,
-			dspp->cap->sblk->ad.base + blk_offset, val);
-		blk_offset = 0x40;
-		val = ((info[dspp->idx].cached_roi_cfg.f_in &
-			(BIT(16) - 1)) << 16);
-		val |= (info[dspp->idx].cached_roi_cfg.f_out & (BIT(16) - 1));
-		SDE_REG_WRITE(&dspp->hw,
-			dspp->cap->sblk->ad.base + blk_offset, val);
-
-		info[dspp->idx].last_roi_cfg = info[dspp->idx].cached_roi_cfg;
-		info[dspp->idx].cached_roi_cfg.h_start = U32_MAX;
-		info[dspp->idx].cached_roi_cfg.h_end = U32_MAX;
-		info[dspp->idx].cached_roi_cfg.v_start = U32_MAX;
-		info[dspp->idx].cached_roi_cfg.v_end = U32_MAX;
-		info[dspp->idx].cached_roi_cfg.f_in = U32_MAX;
-		info[dspp->idx].cached_roi_cfg.f_out = U32_MAX;
-	}
-	return 0;
-}
-
-static int ad4_ipc_reset_setup_startup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u32 blk_offset;
-
-	if (info[dspp->idx].frame_count == AD_IPC_FRAME_COUNT) {
-		info[dspp->idx].state = ad4_state_run;
-		pr_debug("%s(): AD state move to run\n", __func__);
-
-		/* revert enforce 0 initial strength */
-		/* irdx_control_0 */
-		blk_offset = 0x13c;
-		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-				info[dspp->idx].irdx_control_0);
-		/* assertiveness */
-		blk_offset = 0x30;
-		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-				info[dspp->idx].last_assertive);
-		/* tf control */
-		blk_offset = 0x34;
-		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-				info[dspp->idx].tf_ctrl);
-	} else {
-		info[dspp->idx].frame_count++;
-	}
-
-	return 0;
-}
-
-static int ad4_strength_setup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u64 in_str = 0, out_str = 0, val;
-	u32 blk_offset = 0x15c;
-	struct drm_msm_ad4_manual_str_cfg *str_cfg = NULL;
-
-	if (cfg->hw_cfg->payload && (cfg->hw_cfg->len !=
-		sizeof(struct drm_msm_ad4_manual_str_cfg))) {
-		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
-				sizeof(struct drm_msm_ad4_manual_str_cfg),
-				cfg->hw_cfg->len, cfg->hw_cfg->payload);
-		return -EINVAL;
-	}
-
-	if (cfg->hw_cfg->payload) {
-		str_cfg = (struct drm_msm_ad4_manual_str_cfg *)
-			cfg->hw_cfg->payload;
-		in_str = str_cfg->in_str;
-		out_str = str_cfg->out_str;
-	}
-
-	/* set manual strength */
-	info[dspp->idx].completed_ops_mask |= ad4_strength;
-	val = (in_str & (BIT(10) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-	blk_offset += 4;
-	val = (out_str & (BIT(10) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	return 0;
-}
-
-static int ad4_strength_setup_idle(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	int ret;
-
-	ret = ad4_strength_setup(dspp, cfg);
-	if (ret)
-		return ret;
-
-	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask))
-		ad4_mode_setup(dspp, info[dspp->idx].mode);
-	return 0;
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_blk.c b/drivers/gpu/drm/msm/sde/sde_hw_blk.c
deleted file mode 100644
index 8facfa8..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_blk.c
+++ /dev/null
@@ -1,148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-#include "sde_hw_mdss.h"
-#include "sde_hw_blk.h"
-
-/* Serialization lock for sde_hw_blk_list */
-static DEFINE_MUTEX(sde_hw_blk_lock);
-
-/* List of all hw block objects */
-static LIST_HEAD(sde_hw_blk_list);
-
-/**
- * sde_hw_blk_init - initialize hw block object
- * @type: hw block type - enum sde_hw_blk_type
- * @id: instance id of the hw block
- * @ops: Pointer to block operations
- * return: 0 if success; error code otherwise
- */
-int sde_hw_blk_init(struct sde_hw_blk *hw_blk, u32 type, int id,
-		struct sde_hw_blk_ops *ops)
-{
-	if (!hw_blk) {
-		pr_err("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	INIT_LIST_HEAD(&hw_blk->list);
-	hw_blk->type = type;
-	hw_blk->id = id;
-	atomic_set(&hw_blk->refcount, 0);
-
-	if (ops)
-		hw_blk->ops = *ops;
-
-	mutex_lock(&sde_hw_blk_lock);
-	list_add(&hw_blk->list, &sde_hw_blk_list);
-	mutex_unlock(&sde_hw_blk_lock);
-
-	return 0;
-}
-
-/**
- * sde_hw_blk_destroy - destroy hw block object.
- * @hw_blk:  pointer to hw block object
- * return: none
- */
-void sde_hw_blk_destroy(struct sde_hw_blk *hw_blk)
-{
-	if (!hw_blk) {
-		pr_err("invalid parameters\n");
-		return;
-	}
-
-	if (atomic_read(&hw_blk->refcount))
-		pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
-				hw_blk->id);
-
-	mutex_lock(&sde_hw_blk_lock);
-	list_del(&hw_blk->list);
-	mutex_unlock(&sde_hw_blk_lock);
-}
-
-/**
- * sde_hw_blk_get - get hw_blk from free pool
- * @hw_blk: if specified, increment reference count only
- * @type: if hw_blk is not specified, allocate the next available of this type
- * @id: if specified (>= 0), allocate the given instance of the above type
- * return: pointer to hw block object
- */
-struct sde_hw_blk *sde_hw_blk_get(struct sde_hw_blk *hw_blk, u32 type, int id)
-{
-	struct sde_hw_blk *curr;
-	int rc, refcount;
-
-	if (!hw_blk) {
-		mutex_lock(&sde_hw_blk_lock);
-		list_for_each_entry(curr, &sde_hw_blk_list, list) {
-			if ((curr->type != type) ||
-					(id >= 0 && curr->id != id) ||
-					(id < 0 &&
-						atomic_read(&curr->refcount)))
-				continue;
-
-			hw_blk = curr;
-			break;
-		}
-		mutex_unlock(&sde_hw_blk_lock);
-	}
-
-	if (!hw_blk) {
-		pr_debug("no hw_blk:%d\n", type);
-		return NULL;
-	}
-
-	refcount = atomic_inc_return(&hw_blk->refcount);
-
-	if (refcount == 1 && hw_blk->ops.start) {
-		rc = hw_blk->ops.start(hw_blk);
-		if (rc) {
-			pr_err("failed to start  hw_blk:%d rc:%d\n", type, rc);
-			goto error_start;
-		}
-	}
-
-	pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
-			hw_blk->id, refcount);
-	return hw_blk;
-
-error_start:
-	sde_hw_blk_put(hw_blk);
-	return ERR_PTR(rc);
-}
-
-/**
- * sde_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
- * @hw_blk: hw block to be freed
- * @free_blk: function to be called when reference count goes to zero
- */
-void sde_hw_blk_put(struct sde_hw_blk *hw_blk)
-{
-	if (!hw_blk) {
-		pr_err("invalid parameters\n");
-		return;
-	}
-
-	pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
-			atomic_read(&hw_blk->refcount));
-
-	if (!atomic_read(&hw_blk->refcount)) {
-		pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
-		return;
-	}
-
-	if (atomic_dec_return(&hw_blk->refcount))
-		return;
-
-	if (hw_blk->ops.stop)
-		hw_blk->ops.stop(hw_blk);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_blk.h b/drivers/gpu/drm/msm/sde/sde_hw_blk.h
deleted file mode 100644
index f49c75d..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_blk.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_BLK_H
-#define _SDE_HW_BLK_H
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/atomic.h>
-
-struct sde_hw_blk;
-
-/**
- * struct sde_hw_blk_ops - common hardware block operations
- * @start: start operation on first get
- * @stop: stop operation on last put
- */
-struct sde_hw_blk_ops {
-	int (*start)(struct sde_hw_blk *hw_blk);
-	void (*stop)(struct sde_hw_blk *hw_blk);
-};
-
-/**
- * struct sde_hw_blk - definition of hardware block object
- * @list: list of hardware blocks
- * @type: hardware block type
- * @id: instance id
- * @refcount: reference/usage count
- */
-struct sde_hw_blk {
-	struct list_head list;
-	u32 type;
-	int id;
-	atomic_t refcount;
-	struct sde_hw_blk_ops ops;
-};
-
-int sde_hw_blk_init(struct sde_hw_blk *hw_blk, u32 type, int id,
-		struct sde_hw_blk_ops *ops);
-void sde_hw_blk_destroy(struct sde_hw_blk *hw_blk);
-
-struct sde_hw_blk *sde_hw_blk_get(struct sde_hw_blk *hw_blk, u32 type, int id);
-void sde_hw_blk_put(struct sde_hw_blk *hw_blk);
-#endif /*_SDE_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
deleted file mode 100644
index 9052e0f..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ /dev/null
@@ -1,4109 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-#include <linux/slab.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/soc/qcom/llcc-qcom.h>
-#include <linux/pm_qos.h>
-
-#include "sde_hw_mdss.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_catalog_format.h"
-#include "sde_kms.h"
-#include "sde_hw_uidle.h"
-
-/*************************************************************
- * MACRO DEFINITION
- *************************************************************/
-
-/**
- * Max hardware block in certain hardware. For ex: sspp pipes
- * can have QSEED, pcc, igc, pa, csc, qos entries, etc. This count is
- * 64 based on software design. It should be increased if any of the
- * hardware block has more subblocks.
- */
-#define MAX_SDE_HW_BLK  64
-
-/* each entry will have register address and bit offset in that register */
-#define MAX_BIT_OFFSET 2
-
-/* default line width for sspp, mixer, ds (input), wb */
-#define DEFAULT_SDE_LINE_WIDTH 2048
-
-/* default output line width for ds */
-#define DEFAULT_SDE_OUTPUT_LINE_WIDTH 2560
-
-/* max mixer blend stages */
-#define DEFAULT_SDE_MIXER_BLENDSTAGES 7
-
-/* max bank bit for macro tile and ubwc format */
-#define DEFAULT_SDE_HIGHEST_BANK_BIT 15
-
-/* default ubwc version */
-#define DEFAULT_SDE_UBWC_VERSION SDE_HW_UBWC_VER_10
-
-/* default ubwc static config register value */
-#define DEFAULT_SDE_UBWC_STATIC 0x0
-
-/* default ubwc swizzle register value */
-#define DEFAULT_SDE_UBWC_SWIZZLE 0x0
-
-/* default ubwc macrotile mode value */
-#define DEFAULT_SDE_UBWC_MACROTILE_MODE 0x0
-
-/* default hardware block size if dtsi entry is not present */
-#define DEFAULT_SDE_HW_BLOCK_LEN 0x100
-
-/* total number of intf - dp, dsi, hdmi */
-#define INTF_COUNT			3
-
-#define MAX_UPSCALE_RATIO		20
-#define MAX_DOWNSCALE_RATIO		4
-#define SSPP_UNITY_SCALE		1
-
-#define MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_NUMERATOR	11
-#define MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DENOMINATOR	5
-#define MAX_DOWNSCALE_RATIO_INLINE_ROT_NRT_DEFAULT	4
-#define MAX_PRE_ROT_HEIGHT_INLINE_ROT_DEFAULT	1088
-
-#define MAX_HORZ_DECIMATION		4
-#define MAX_VERT_DECIMATION		4
-
-#define MAX_SPLIT_DISPLAY_CTL		2
-#define MAX_PP_SPLIT_DISPLAY_CTL	1
-
-#define MDSS_BASE_OFFSET		0x0
-
-#define ROT_LM_OFFSET			3
-#define LINE_LM_OFFSET			5
-#define LINE_MODE_WB_OFFSET		2
-
-/**
- * these configurations are decided based on max mdp clock. It accounts
- * for max and min display resolution based on virtual hardware resource
- * support.
- */
-#define MAX_DISPLAY_HEIGHT_WITH_DECIMATION		2160
-#define MAX_DISPLAY_HEIGHT				5120
-#define MIN_DISPLAY_HEIGHT				0
-#define MIN_DISPLAY_WIDTH				0
-#define MAX_LM_PER_DISPLAY				2
-
-/* maximum XIN halt timeout in usec */
-#define VBIF_XIN_HALT_TIMEOUT		0x4000
-
-#define DEFAULT_PIXEL_RAM_SIZE		(50 * 1024)
-
-/* access property value based on prop_type and hardware index */
-#define PROP_VALUE_ACCESS(p, i, j)		((p + i)->value[j])
-
-/*
- * access element within PROP_TYPE_BIT_OFFSET_ARRAYs based on prop_type,
- * hardware index and offset array index
- */
-#define PROP_BITVALUE_ACCESS(p, i, j, k)	((p + i)->bit_value[j][k])
-
-#define DEFAULT_SBUF_HEADROOM		(20)
-#define DEFAULT_SBUF_PREFILL		(128)
-
-/*
- * Default parameter values
- */
-#define DEFAULT_MAX_BW_HIGH			7000000
-#define DEFAULT_MAX_BW_LOW			7000000
-#define DEFAULT_UNDERSIZED_PREFILL_LINES	2
-#define DEFAULT_XTRA_PREFILL_LINES		2
-#define DEFAULT_DEST_SCALE_PREFILL_LINES	3
-#define DEFAULT_MACROTILE_PREFILL_LINES		4
-#define DEFAULT_YUV_NV12_PREFILL_LINES		8
-#define DEFAULT_LINEAR_PREFILL_LINES		1
-#define DEFAULT_DOWNSCALING_PREFILL_LINES	1
-#define DEFAULT_CORE_IB_FF			"6.0"
-#define DEFAULT_CORE_CLK_FF			"1.0"
-#define DEFAULT_COMP_RATIO_RT \
-		"NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23"
-#define DEFAULT_COMP_RATIO_NRT \
-		"NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25"
-#define DEFAULT_MAX_PER_PIPE_BW			2400000
-#define DEFAULT_AMORTIZABLE_THRESHOLD		25
-#define DEFAULT_CPU_MASK			0
-#define DEFAULT_CPU_DMA_LATENCY			PM_QOS_DEFAULT_VALUE
-
-/* Uidle values */
-#define SDE_UIDLE_FAL10_EXIT_CNT 128
-#define SDE_UIDLE_FAL10_EXIT_DANGER 4
-#define SDE_UIDLE_FAL10_DANGER 6
-#define SDE_UIDLE_FAL10_TARGET_IDLE 50
-#define SDE_UIDLE_FAL1_TARGET_IDLE 10
-#define SDE_UIDLE_FAL10_THRESHOLD 12
-#define SDE_UIDLE_MAX_DWNSCALE 1500
-#define SDE_UIDLE_MAX_FPS 60
-
-
-/*************************************************************
- *  DTSI PROPERTY INDEX
- *************************************************************/
-enum {
-	HW_OFF,
-	HW_LEN,
-	HW_DISP,
-	HW_PROP_MAX,
-};
-
-enum sde_prop {
-	SDE_OFF,
-	SDE_LEN,
-	SSPP_LINEWIDTH,
-	MIXER_LINEWIDTH,
-	MIXER_BLEND,
-	WB_LINEWIDTH,
-	BANK_BIT,
-	UBWC_VERSION,
-	UBWC_STATIC,
-	UBWC_SWIZZLE,
-	QSEED_TYPE,
-	CSC_TYPE,
-	PANIC_PER_PIPE,
-	SRC_SPLIT,
-	DIM_LAYER,
-	SMART_DMA_REV,
-	IDLE_PC,
-	DEST_SCALER,
-	SMART_PANEL_ALIGN_MODE,
-	MACROTILE_MODE,
-	UBWC_BW_CALC_VERSION,
-	PIPE_ORDER_VERSION,
-	SEC_SID_MASK,
-	SDE_PROP_MAX,
-};
-
-enum {
-	PERF_MAX_BW_LOW,
-	PERF_MAX_BW_HIGH,
-	PERF_MIN_CORE_IB,
-	PERF_MIN_LLCC_IB,
-	PERF_MIN_DRAM_IB,
-	PERF_CORE_IB_FF,
-	PERF_CORE_CLK_FF,
-	PERF_COMP_RATIO_RT,
-	PERF_COMP_RATIO_NRT,
-	PERF_UNDERSIZED_PREFILL_LINES,
-	PERF_DEST_SCALE_PREFILL_LINES,
-	PERF_MACROTILE_PREFILL_LINES,
-	PERF_YUV_NV12_PREFILL_LINES,
-	PERF_LINEAR_PREFILL_LINES,
-	PERF_DOWNSCALING_PREFILL_LINES,
-	PERF_XTRA_PREFILL_LINES,
-	PERF_AMORTIZABLE_THRESHOLD,
-	PERF_DANGER_LUT,
-	PERF_SAFE_LUT_LINEAR,
-	PERF_SAFE_LUT_MACROTILE,
-	PERF_SAFE_LUT_NRT,
-	PERF_SAFE_LUT_CWB,
-	PERF_QOS_LUT_LINEAR,
-	PERF_QOS_LUT_MACROTILE,
-	PERF_QOS_LUT_NRT,
-	PERF_QOS_LUT_CWB,
-	PERF_CDP_SETTING,
-	PERF_CPU_MASK,
-	PERF_CPU_DMA_LATENCY,
-	PERF_QOS_LUT_MACROTILE_QSEED,
-	PERF_SAFE_LUT_MACROTILE_QSEED,
-	PERF_PROP_MAX,
-};
-
-enum {
-	SSPP_OFF,
-	SSPP_SIZE,
-	SSPP_TYPE,
-	SSPP_XIN,
-	SSPP_CLK_CTRL,
-	SSPP_CLK_STATUS,
-	SSPP_SCALE_SIZE,
-	SSPP_VIG_BLOCKS,
-	SSPP_RGB_BLOCKS,
-	SSPP_DMA_BLOCKS,
-	SSPP_EXCL_RECT,
-	SSPP_SMART_DMA,
-	SSPP_MAX_PER_PIPE_BW,
-	SSPP_PROP_MAX,
-};
-
-enum {
-	VIG_QSEED_OFF,
-	VIG_QSEED_LEN,
-	VIG_CSC_OFF,
-	VIG_HSIC_PROP,
-	VIG_MEMCOLOR_PROP,
-	VIG_PCC_PROP,
-	VIG_GAMUT_PROP,
-	VIG_IGC_PROP,
-	VIG_INVERSE_PMA,
-	VIG_PROP_MAX,
-};
-
-enum {
-	RGB_SCALER_OFF,
-	RGB_SCALER_LEN,
-	RGB_PCC_PROP,
-	RGB_PROP_MAX,
-};
-
-enum {
-	DMA_IGC_PROP,
-	DMA_GC_PROP,
-	DMA_DGM_INVERSE_PMA,
-	DMA_CSC_OFF,
-	DMA_PROP_MAX,
-};
-
-enum {
-	INTF_OFF,
-	INTF_LEN,
-	INTF_PREFETCH,
-	INTF_TYPE,
-	INTF_PROP_MAX,
-};
-
-enum {
-	PP_OFF,
-	PP_LEN,
-	TE_OFF,
-	TE_LEN,
-	TE2_OFF,
-	TE2_LEN,
-	PP_SLAVE,
-	DITHER_OFF,
-	DITHER_LEN,
-	DITHER_VER,
-	PP_MERGE_3D_ID,
-	PP_PROP_MAX,
-};
-
-enum {
-	DSC_OFF,
-	DSC_LEN,
-	DSC_PROP_MAX,
-};
-
-enum {
-	DS_TOP_OFF,
-	DS_TOP_LEN,
-	DS_TOP_INPUT_LINEWIDTH,
-	DS_TOP_OUTPUT_LINEWIDTH,
-	DS_TOP_PROP_MAX,
-};
-
-enum {
-	DS_OFF,
-	DS_LEN,
-	DS_PROP_MAX,
-};
-
-enum {
-	DSPP_TOP_OFF,
-	DSPP_TOP_SIZE,
-	DSPP_TOP_PROP_MAX,
-};
-
-enum {
-	DSPP_OFF,
-	DSPP_SIZE,
-	DSPP_BLOCKS,
-	DSPP_PROP_MAX,
-};
-
-enum {
-	DSPP_IGC_PROP,
-	DSPP_PCC_PROP,
-	DSPP_GC_PROP,
-	DSPP_HSIC_PROP,
-	DSPP_MEMCOLOR_PROP,
-	DSPP_SIXZONE_PROP,
-	DSPP_GAMUT_PROP,
-	DSPP_DITHER_PROP,
-	DSPP_HIST_PROP,
-	DSPP_VLUT_PROP,
-	DSPP_BLOCKS_PROP_MAX,
-};
-
-enum {
-	AD_OFF,
-	AD_VERSION,
-	AD_PROP_MAX,
-};
-
-enum {
-	LTM_OFF,
-	LTM_VERSION,
-	LTM_PROP_MAX,
-};
-
-enum {
-	MIXER_OFF,
-	MIXER_LEN,
-	MIXER_PAIR_MASK,
-	MIXER_BLOCKS,
-	MIXER_DISP,
-	MIXER_CWB,
-	MIXER_PROP_MAX,
-};
-
-enum {
-	MIXER_GC_PROP,
-	MIXER_BLOCKS_PROP_MAX,
-};
-
-enum {
-	MIXER_BLEND_OP_OFF,
-	MIXER_BLEND_PROP_MAX,
-};
-
-enum {
-	WB_OFF,
-	WB_LEN,
-	WB_ID,
-	WB_XIN_ID,
-	WB_CLK_CTRL,
-	WB_PROP_MAX,
-};
-
-enum {
-	VBIF_OFF,
-	VBIF_LEN,
-	VBIF_ID,
-	VBIF_DEFAULT_OT_RD_LIMIT,
-	VBIF_DEFAULT_OT_WR_LIMIT,
-	VBIF_DYNAMIC_OT_RD_LIMIT,
-	VBIF_DYNAMIC_OT_WR_LIMIT,
-	VBIF_MEMTYPE_0,
-	VBIF_MEMTYPE_1,
-	VBIF_QOS_RT_REMAP,
-	VBIF_QOS_NRT_REMAP,
-	VBIF_QOS_CWB_REMAP,
-	VBIF_QOS_LUTDMA_REMAP,
-	VBIF_PROP_MAX,
-};
-
-enum {
-	UIDLE_OFF,
-	UIDLE_LEN,
-	UIDLE_PROP_MAX,
-};
-
-enum {
-	REG_DMA_OFF,
-	REG_DMA_VERSION,
-	REG_DMA_TRIGGER_OFF,
-	REG_DMA_BROADCAST_DISABLED,
-	REG_DMA_XIN_ID,
-	REG_DMA_CLK_CTRL,
-	REG_DMA_PROP_MAX
-};
-
-/*************************************************************
- * dts property definition
- *************************************************************/
-enum prop_type {
-	PROP_TYPE_BOOL,
-	PROP_TYPE_U32,
-	PROP_TYPE_U32_ARRAY,
-	PROP_TYPE_STRING,
-	PROP_TYPE_STRING_ARRAY,
-	PROP_TYPE_BIT_OFFSET_ARRAY,
-	PROP_TYPE_NODE,
-};
-
-struct sde_prop_type {
-	/* use property index from enum property for readability purpose */
-	u8 id;
-	/* it should be property name based on dtsi documentation */
-	char *prop_name;
-	/**
-	 * if property is marked mandatory then it will fail parsing
-	 * when property is not present
-	 */
-	u32  is_mandatory;
-	/* property type based on "enum prop_type"  */
-	enum prop_type type;
-};
-
-struct sde_prop_value {
-	u32 value[MAX_SDE_HW_BLK];
-	u32 bit_value[MAX_SDE_HW_BLK][MAX_BIT_OFFSET];
-};
-
-/*************************************************************
- * dts property list
- *************************************************************/
-static struct sde_prop_type sde_prop[] = {
-	{SDE_OFF, "qcom,sde-off", true, PROP_TYPE_U32},
-	{SDE_LEN, "qcom,sde-len", false, PROP_TYPE_U32},
-	{SSPP_LINEWIDTH, "qcom,sde-sspp-linewidth", false, PROP_TYPE_U32},
-	{MIXER_LINEWIDTH, "qcom,sde-mixer-linewidth", false, PROP_TYPE_U32},
-	{MIXER_BLEND, "qcom,sde-mixer-blendstages", false, PROP_TYPE_U32},
-	{WB_LINEWIDTH, "qcom,sde-wb-linewidth", false, PROP_TYPE_U32},
-	{BANK_BIT, "qcom,sde-highest-bank-bit", false, PROP_TYPE_U32},
-	{UBWC_VERSION, "qcom,sde-ubwc-version", false, PROP_TYPE_U32},
-	{UBWC_STATIC, "qcom,sde-ubwc-static", false, PROP_TYPE_U32},
-	{UBWC_SWIZZLE, "qcom,sde-ubwc-swizzle", false, PROP_TYPE_U32},
-	{QSEED_TYPE, "qcom,sde-qseed-type", false, PROP_TYPE_STRING},
-	{CSC_TYPE, "qcom,sde-csc-type", false, PROP_TYPE_STRING},
-	{PANIC_PER_PIPE, "qcom,sde-panic-per-pipe", false, PROP_TYPE_BOOL},
-	{SRC_SPLIT, "qcom,sde-has-src-split", false, PROP_TYPE_BOOL},
-	{DIM_LAYER, "qcom,sde-has-dim-layer", false, PROP_TYPE_BOOL},
-	{SMART_DMA_REV, "qcom,sde-smart-dma-rev", false, PROP_TYPE_STRING},
-	{IDLE_PC, "qcom,sde-has-idle-pc", false, PROP_TYPE_BOOL},
-	{DEST_SCALER, "qcom,sde-has-dest-scaler", false, PROP_TYPE_BOOL},
-	{SMART_PANEL_ALIGN_MODE, "qcom,sde-smart-panel-align-mode",
-			false, PROP_TYPE_U32},
-	{MACROTILE_MODE, "qcom,sde-macrotile-mode", false, PROP_TYPE_U32},
-	{UBWC_BW_CALC_VERSION, "qcom,sde-ubwc-bw-calc-version", false,
-			PROP_TYPE_U32},
-	{PIPE_ORDER_VERSION, "qcom,sde-pipe-order-version", false,
-			PROP_TYPE_U32},
-	{SEC_SID_MASK, "qcom,sde-secure-sid-mask", false, PROP_TYPE_U32_ARRAY},
-};
-
-static struct sde_prop_type sde_perf_prop[] = {
-	{PERF_MAX_BW_LOW, "qcom,sde-max-bw-low-kbps", false, PROP_TYPE_U32},
-	{PERF_MAX_BW_HIGH, "qcom,sde-max-bw-high-kbps", false, PROP_TYPE_U32},
-	{PERF_MIN_CORE_IB, "qcom,sde-min-core-ib-kbps", false, PROP_TYPE_U32},
-	{PERF_MIN_LLCC_IB, "qcom,sde-min-llcc-ib-kbps", false, PROP_TYPE_U32},
-	{PERF_MIN_DRAM_IB, "qcom,sde-min-dram-ib-kbps", false, PROP_TYPE_U32},
-	{PERF_CORE_IB_FF, "qcom,sde-core-ib-ff", false, PROP_TYPE_STRING},
-	{PERF_CORE_CLK_FF, "qcom,sde-core-clk-ff", false, PROP_TYPE_STRING},
-	{PERF_COMP_RATIO_RT, "qcom,sde-comp-ratio-rt", false,
-			PROP_TYPE_STRING},
-	{PERF_COMP_RATIO_NRT, "qcom,sde-comp-ratio-nrt", false,
-			PROP_TYPE_STRING},
-	{PERF_UNDERSIZED_PREFILL_LINES, "qcom,sde-undersizedprefill-lines",
-			false, PROP_TYPE_U32},
-	{PERF_DEST_SCALE_PREFILL_LINES, "qcom,sde-dest-scaleprefill-lines",
-			false, PROP_TYPE_U32},
-	{PERF_MACROTILE_PREFILL_LINES, "qcom,sde-macrotileprefill-lines",
-			false, PROP_TYPE_U32},
-	{PERF_YUV_NV12_PREFILL_LINES, "qcom,sde-yuv-nv12prefill-lines",
-			false, PROP_TYPE_U32},
-	{PERF_LINEAR_PREFILL_LINES, "qcom,sde-linearprefill-lines",
-			false, PROP_TYPE_U32},
-	{PERF_DOWNSCALING_PREFILL_LINES, "qcom,sde-downscalingprefill-lines",
-			false, PROP_TYPE_U32},
-	{PERF_XTRA_PREFILL_LINES, "qcom,sde-xtra-prefill-lines",
-			false, PROP_TYPE_U32},
-	{PERF_AMORTIZABLE_THRESHOLD, "qcom,sde-amortizable-threshold",
-			false, PROP_TYPE_U32},
-	{PERF_DANGER_LUT, "qcom,sde-danger-lut", false, PROP_TYPE_U32_ARRAY},
-	{PERF_SAFE_LUT_LINEAR, "qcom,sde-safe-lut-linear", false,
-			PROP_TYPE_U32_ARRAY},
-	{PERF_SAFE_LUT_MACROTILE, "qcom,sde-safe-lut-macrotile", false,
-			PROP_TYPE_U32_ARRAY},
-	{PERF_SAFE_LUT_NRT, "qcom,sde-safe-lut-nrt", false,
-			PROP_TYPE_U32_ARRAY},
-	{PERF_SAFE_LUT_CWB, "qcom,sde-safe-lut-cwb", false,
-			PROP_TYPE_U32_ARRAY},
-	{PERF_QOS_LUT_LINEAR, "qcom,sde-qos-lut-linear", false,
-			PROP_TYPE_U32_ARRAY},
-	{PERF_QOS_LUT_MACROTILE, "qcom,sde-qos-lut-macrotile", false,
-			PROP_TYPE_U32_ARRAY},
-	{PERF_QOS_LUT_NRT, "qcom,sde-qos-lut-nrt", false,
-			PROP_TYPE_U32_ARRAY},
-	{PERF_QOS_LUT_CWB, "qcom,sde-qos-lut-cwb", false,
-			PROP_TYPE_U32_ARRAY},
-
-	{PERF_CDP_SETTING, "qcom,sde-cdp-setting", false,
-			PROP_TYPE_U32_ARRAY},
-	{PERF_CPU_MASK, "qcom,sde-qos-cpu-mask", false, PROP_TYPE_U32},
-	{PERF_CPU_DMA_LATENCY, "qcom,sde-qos-cpu-dma-latency", false,
-			PROP_TYPE_U32},
-	{PERF_QOS_LUT_MACROTILE_QSEED, "qcom,sde-qos-lut-macrotile-qseed",
-			false, PROP_TYPE_U32_ARRAY},
-	{PERF_SAFE_LUT_MACROTILE_QSEED, "qcom,sde-safe-lut-macrotile-qseed",
-			false, PROP_TYPE_U32_ARRAY},
-};
-
-static struct sde_prop_type sspp_prop[] = {
-	{SSPP_OFF, "qcom,sde-sspp-off", true, PROP_TYPE_U32_ARRAY},
-	{SSPP_SIZE, "qcom,sde-sspp-src-size", false, PROP_TYPE_U32},
-	{SSPP_TYPE, "qcom,sde-sspp-type", true, PROP_TYPE_STRING_ARRAY},
-	{SSPP_XIN, "qcom,sde-sspp-xin-id", true, PROP_TYPE_U32_ARRAY},
-	{SSPP_CLK_CTRL, "qcom,sde-sspp-clk-ctrl", false,
-		PROP_TYPE_BIT_OFFSET_ARRAY},
-	{SSPP_CLK_STATUS, "qcom,sde-sspp-clk-status", false,
-		PROP_TYPE_BIT_OFFSET_ARRAY},
-	{SSPP_SCALE_SIZE, "qcom,sde-sspp-scale-size", false, PROP_TYPE_U32},
-	{SSPP_VIG_BLOCKS, "qcom,sde-sspp-vig-blocks", false, PROP_TYPE_NODE},
-	{SSPP_RGB_BLOCKS, "qcom,sde-sspp-rgb-blocks", false, PROP_TYPE_NODE},
-	{SSPP_DMA_BLOCKS, "qcom,sde-sspp-dma-blocks", false, PROP_TYPE_NODE},
-	{SSPP_EXCL_RECT, "qcom,sde-sspp-excl-rect", false, PROP_TYPE_U32_ARRAY},
-	{SSPP_SMART_DMA, "qcom,sde-sspp-smart-dma-priority", false,
-		PROP_TYPE_U32_ARRAY},
-	{SSPP_MAX_PER_PIPE_BW, "qcom,sde-max-per-pipe-bw-kbps", false,
-		PROP_TYPE_U32_ARRAY},
-};
-
-static struct sde_prop_type vig_prop[] = {
-	{VIG_QSEED_OFF, "qcom,sde-vig-qseed-off", false, PROP_TYPE_U32},
-	{VIG_QSEED_LEN, "qcom,sde-vig-qseed-size", false, PROP_TYPE_U32},
-	{VIG_CSC_OFF, "qcom,sde-vig-csc-off", false, PROP_TYPE_U32},
-	{VIG_HSIC_PROP, "qcom,sde-vig-hsic", false, PROP_TYPE_U32_ARRAY},
-	{VIG_MEMCOLOR_PROP, "qcom,sde-vig-memcolor", false,
-		PROP_TYPE_U32_ARRAY},
-	{VIG_PCC_PROP, "qcom,sde-vig-pcc", false, PROP_TYPE_U32_ARRAY},
-	{VIG_GAMUT_PROP, "qcom,sde-vig-gamut", false, PROP_TYPE_U32_ARRAY},
-	{VIG_IGC_PROP, "qcom,sde-vig-igc", false, PROP_TYPE_U32_ARRAY},
-	{VIG_INVERSE_PMA, "qcom,sde-vig-inverse-pma", false, PROP_TYPE_BOOL},
-};
-
-static struct sde_prop_type rgb_prop[] = {
-	{RGB_SCALER_OFF, "qcom,sde-rgb-scaler-off", false, PROP_TYPE_U32},
-	{RGB_SCALER_LEN, "qcom,sde-rgb-scaler-size", false, PROP_TYPE_U32},
-	{RGB_PCC_PROP, "qcom,sde-rgb-pcc", false, PROP_TYPE_U32_ARRAY},
-};
-
-static struct sde_prop_type dma_prop[] = {
-	{DMA_IGC_PROP, "qcom,sde-dma-igc", false, PROP_TYPE_U32_ARRAY},
-	{DMA_GC_PROP, "qcom,sde-dma-gc", false, PROP_TYPE_U32_ARRAY},
-	{DMA_DGM_INVERSE_PMA, "qcom,sde-dma-inverse-pma", false,
-		PROP_TYPE_BOOL},
-	{DMA_CSC_OFF, "qcom,sde-dma-csc-off", false, PROP_TYPE_U32},
-};
-
-static struct sde_prop_type ctl_prop[] = {
-	{HW_OFF, "qcom,sde-ctl-off", true, PROP_TYPE_U32_ARRAY},
-	{HW_LEN, "qcom,sde-ctl-size", false, PROP_TYPE_U32},
-	{HW_DISP, "qcom,sde-ctl-display-pref", false, PROP_TYPE_STRING_ARRAY},
-};
-
-struct sde_prop_type mixer_blend_prop[] = {
-	{MIXER_BLEND_OP_OFF, "qcom,sde-mixer-blend-op-off", true,
-		PROP_TYPE_U32_ARRAY},
-};
-
-static struct sde_prop_type mixer_prop[] = {
-	{MIXER_OFF, "qcom,sde-mixer-off", true, PROP_TYPE_U32_ARRAY},
-	{MIXER_LEN, "qcom,sde-mixer-size", false, PROP_TYPE_U32},
-	{MIXER_PAIR_MASK, "qcom,sde-mixer-pair-mask", true,
-		PROP_TYPE_U32_ARRAY},
-	{MIXER_BLOCKS, "qcom,sde-mixer-blocks", false, PROP_TYPE_NODE},
-	{MIXER_DISP, "qcom,sde-mixer-display-pref", false,
-		PROP_TYPE_STRING_ARRAY},
-	{MIXER_CWB, "qcom,sde-mixer-cwb-pref", false,
-		PROP_TYPE_STRING_ARRAY},
-};
-
-static struct sde_prop_type mixer_blocks_prop[] = {
-	{MIXER_GC_PROP, "qcom,sde-mixer-gc", false, PROP_TYPE_U32_ARRAY},
-};
-
-static struct sde_prop_type dspp_top_prop[] = {
-	{DSPP_TOP_OFF, "qcom,sde-dspp-top-off", true, PROP_TYPE_U32},
-	{DSPP_TOP_SIZE, "qcom,sde-dspp-top-size", false, PROP_TYPE_U32},
-};
-
-static struct sde_prop_type dspp_prop[] = {
-	{DSPP_OFF, "qcom,sde-dspp-off", true, PROP_TYPE_U32_ARRAY},
-	{DSPP_SIZE, "qcom,sde-dspp-size", false, PROP_TYPE_U32},
-	{DSPP_BLOCKS, "qcom,sde-dspp-blocks", false, PROP_TYPE_NODE},
-};
-
-static struct sde_prop_type dspp_blocks_prop[] = {
-	{DSPP_IGC_PROP, "qcom,sde-dspp-igc", false, PROP_TYPE_U32_ARRAY},
-	{DSPP_PCC_PROP, "qcom,sde-dspp-pcc", false, PROP_TYPE_U32_ARRAY},
-	{DSPP_GC_PROP, "qcom,sde-dspp-gc", false, PROP_TYPE_U32_ARRAY},
-	{DSPP_HSIC_PROP, "qcom,sde-dspp-hsic", false, PROP_TYPE_U32_ARRAY},
-	{DSPP_MEMCOLOR_PROP, "qcom,sde-dspp-memcolor", false,
-		PROP_TYPE_U32_ARRAY},
-	{DSPP_SIXZONE_PROP, "qcom,sde-dspp-sixzone", false,
-		PROP_TYPE_U32_ARRAY},
-	{DSPP_GAMUT_PROP, "qcom,sde-dspp-gamut", false, PROP_TYPE_U32_ARRAY},
-	{DSPP_DITHER_PROP, "qcom,sde-dspp-dither", false, PROP_TYPE_U32_ARRAY},
-	{DSPP_HIST_PROP, "qcom,sde-dspp-hist", false, PROP_TYPE_U32_ARRAY},
-	{DSPP_VLUT_PROP, "qcom,sde-dspp-vlut", false, PROP_TYPE_U32_ARRAY},
-};
-
-static struct sde_prop_type ad_prop[] = {
-	{AD_OFF, "qcom,sde-dspp-ad-off", false, PROP_TYPE_U32_ARRAY},
-	{AD_VERSION, "qcom,sde-dspp-ad-version", false, PROP_TYPE_U32},
-};
-
-static struct sde_prop_type ltm_prop[] = {
-	{LTM_OFF, "qcom,sde-dspp-ltm-off", false, PROP_TYPE_U32_ARRAY},
-	{LTM_VERSION, "qcom,sde-dspp-ltm-version", false, PROP_TYPE_U32},
-};
-
-static struct sde_prop_type ds_top_prop[] = {
-	{DS_TOP_OFF, "qcom,sde-dest-scaler-top-off", false, PROP_TYPE_U32},
-	{DS_TOP_LEN, "qcom,sde-dest-scaler-top-size", false, PROP_TYPE_U32},
-	{DS_TOP_INPUT_LINEWIDTH, "qcom,sde-max-dest-scaler-input-linewidth",
-		false, PROP_TYPE_U32},
-	{DS_TOP_OUTPUT_LINEWIDTH, "qcom,sde-max-dest-scaler-output-linewidth",
-		false, PROP_TYPE_U32},
-};
-
-static struct sde_prop_type ds_prop[] = {
-	{DS_OFF, "qcom,sde-dest-scaler-off", false, PROP_TYPE_U32_ARRAY},
-	{DS_LEN, "qcom,sde-dest-scaler-size", false, PROP_TYPE_U32},
-};
-
-static struct sde_prop_type pp_prop[] = {
-	{PP_OFF, "qcom,sde-pp-off", true, PROP_TYPE_U32_ARRAY},
-	{PP_LEN, "qcom,sde-pp-size", false, PROP_TYPE_U32},
-	{TE_OFF, "qcom,sde-te-off", false, PROP_TYPE_U32_ARRAY},
-	{TE_LEN, "qcom,sde-te-size", false, PROP_TYPE_U32},
-	{TE2_OFF, "qcom,sde-te2-off", false, PROP_TYPE_U32_ARRAY},
-	{TE2_LEN, "qcom,sde-te2-size", false, PROP_TYPE_U32},
-	{PP_SLAVE, "qcom,sde-pp-slave", false, PROP_TYPE_U32_ARRAY},
-	{DITHER_OFF, "qcom,sde-dither-off", false, PROP_TYPE_U32_ARRAY},
-	{DITHER_LEN, "qcom,sde-dither-size", false, PROP_TYPE_U32},
-	{DITHER_VER, "qcom,sde-dither-version", false, PROP_TYPE_U32},
-	{PP_MERGE_3D_ID, "qcom,sde-pp-merge-3d-id", false, PROP_TYPE_U32_ARRAY},
-};
-
-static struct sde_prop_type dsc_prop[] = {
-	{DSC_OFF, "qcom,sde-dsc-off", false, PROP_TYPE_U32_ARRAY},
-	{DSC_LEN, "qcom,sde-dsc-size", false, PROP_TYPE_U32},
-};
-
-static struct sde_prop_type cdm_prop[] = {
-	{HW_OFF, "qcom,sde-cdm-off", false, PROP_TYPE_U32_ARRAY},
-	{HW_LEN, "qcom,sde-cdm-size", false, PROP_TYPE_U32},
-};
-
-static struct sde_prop_type intf_prop[] = {
-	{INTF_OFF, "qcom,sde-intf-off", true, PROP_TYPE_U32_ARRAY},
-	{INTF_LEN, "qcom,sde-intf-size", false, PROP_TYPE_U32},
-	{INTF_PREFETCH, "qcom,sde-intf-max-prefetch-lines", false,
-						PROP_TYPE_U32_ARRAY},
-	{INTF_TYPE, "qcom,sde-intf-type", false, PROP_TYPE_STRING_ARRAY},
-};
-
-static struct sde_prop_type wb_prop[] = {
-	{WB_OFF, "qcom,sde-wb-off", true, PROP_TYPE_U32_ARRAY},
-	{WB_LEN, "qcom,sde-wb-size", false, PROP_TYPE_U32},
-	{WB_ID, "qcom,sde-wb-id", true, PROP_TYPE_U32_ARRAY},
-	{WB_XIN_ID, "qcom,sde-wb-xin-id", false, PROP_TYPE_U32_ARRAY},
-	{WB_CLK_CTRL, "qcom,sde-wb-clk-ctrl", false,
-		PROP_TYPE_BIT_OFFSET_ARRAY},
-};
-
-static struct sde_prop_type vbif_prop[] = {
-	{VBIF_OFF, "qcom,sde-vbif-off", true, PROP_TYPE_U32_ARRAY},
-	{VBIF_LEN, "qcom,sde-vbif-size", false, PROP_TYPE_U32},
-	{VBIF_ID, "qcom,sde-vbif-id", false, PROP_TYPE_U32_ARRAY},
-	{VBIF_DEFAULT_OT_RD_LIMIT, "qcom,sde-vbif-default-ot-rd-limit", false,
-		PROP_TYPE_U32},
-	{VBIF_DEFAULT_OT_WR_LIMIT, "qcom,sde-vbif-default-ot-wr-limit", false,
-		PROP_TYPE_U32},
-	{VBIF_DYNAMIC_OT_RD_LIMIT, "qcom,sde-vbif-dynamic-ot-rd-limit", false,
-		PROP_TYPE_U32_ARRAY},
-	{VBIF_DYNAMIC_OT_WR_LIMIT, "qcom,sde-vbif-dynamic-ot-wr-limit", false,
-		PROP_TYPE_U32_ARRAY},
-	{VBIF_MEMTYPE_0, "qcom,sde-vbif-memtype-0", false, PROP_TYPE_U32_ARRAY},
-	{VBIF_MEMTYPE_1, "qcom,sde-vbif-memtype-1", false, PROP_TYPE_U32_ARRAY},
-	{VBIF_QOS_RT_REMAP, "qcom,sde-vbif-qos-rt-remap", false,
-		PROP_TYPE_U32_ARRAY},
-	{VBIF_QOS_NRT_REMAP, "qcom,sde-vbif-qos-nrt-remap", false,
-		PROP_TYPE_U32_ARRAY},
-	{VBIF_QOS_CWB_REMAP, "qcom,sde-vbif-qos-cwb-remap", false,
-		PROP_TYPE_U32_ARRAY},
-	{VBIF_QOS_LUTDMA_REMAP, "qcom,sde-vbif-qos-lutdma-remap", false,
-		PROP_TYPE_U32_ARRAY},
-};
-
-static struct sde_prop_type uidle_prop[] = {
-	{UIDLE_OFF, "qcom,sde-uidle-off", false, PROP_TYPE_U32},
-	{UIDLE_LEN, "qcom,sde-uidle-size", false, PROP_TYPE_U32},
-};
-
-static struct sde_prop_type reg_dma_prop[REG_DMA_PROP_MAX] = {
-	[REG_DMA_OFF] =  {REG_DMA_OFF, "qcom,sde-reg-dma-off", false,
-		PROP_TYPE_U32},
-	[REG_DMA_VERSION] = {REG_DMA_VERSION, "qcom,sde-reg-dma-version",
-		false, PROP_TYPE_U32},
-	[REG_DMA_TRIGGER_OFF] = {REG_DMA_TRIGGER_OFF,
-		"qcom,sde-reg-dma-trigger-off", false,
-		PROP_TYPE_U32},
-	[REG_DMA_BROADCAST_DISABLED] = {REG_DMA_BROADCAST_DISABLED,
-		"qcom,sde-reg-dma-broadcast-disabled", false, PROP_TYPE_BOOL},
-	[REG_DMA_XIN_ID] = {REG_DMA_XIN_ID,
-		"qcom,sde-reg-dma-xin-id", false, PROP_TYPE_U32},
-	[REG_DMA_CLK_CTRL] = {REG_DMA_XIN_ID,
-		"qcom,sde-reg-dma-clk-ctrl", false, PROP_TYPE_BIT_OFFSET_ARRAY},
-};
-
-static struct sde_prop_type merge_3d_prop[] = {
-	{HW_OFF, "qcom,sde-merge-3d-off", false, PROP_TYPE_U32_ARRAY},
-	{HW_LEN, "qcom,sde-merge-3d-size", false, PROP_TYPE_U32},
-};
-/*************************************************************
- * static API list
- *************************************************************/
-
-static int _parse_dt_u32_handler(struct device_node *np,
-	char *prop_name, u32 *offsets, int len, bool mandatory)
-{
-	int rc = -EINVAL;
-
-	if (len > MAX_SDE_HW_BLK) {
-		SDE_ERROR(
-			"prop: %s tries out of bound access for u32 array read len: %d\n",
-				prop_name, len);
-		return -E2BIG;
-	}
-
-	rc = of_property_read_u32_array(np, prop_name, offsets, len);
-	if (rc && mandatory)
-		SDE_ERROR("mandatory prop: %s u32 array read len:%d\n",
-				prop_name, len);
-	else if (rc)
-		SDE_DEBUG("optional prop: %s u32 array read len:%d\n",
-				prop_name, len);
-
-	return rc;
-}
-
-static int _parse_dt_bit_offset(struct device_node *np,
-	char *prop_name, struct sde_prop_value *prop_value, u32 prop_index,
-	u32 count, bool mandatory)
-{
-	int rc = 0, len, i, j;
-	const u32 *arr;
-
-	arr = of_get_property(np, prop_name, &len);
-	if (arr) {
-		len /= sizeof(u32);
-		len &= ~0x1;
-
-		if (len > (MAX_SDE_HW_BLK * MAX_BIT_OFFSET)) {
-			SDE_ERROR(
-				"prop: %s len: %d will lead to out of bound access\n",
-				prop_name, len / MAX_BIT_OFFSET);
-			return -E2BIG;
-		}
-
-		for (i = 0, j = 0; i < len; j++) {
-			PROP_BITVALUE_ACCESS(prop_value, prop_index, j, 0) =
-				be32_to_cpu(arr[i]);
-			i++;
-			PROP_BITVALUE_ACCESS(prop_value, prop_index, j, 1) =
-				be32_to_cpu(arr[i]);
-			i++;
-		}
-	} else {
-		if (mandatory) {
-			SDE_ERROR("error mandatory property '%s' not found\n",
-				prop_name);
-			rc = -EINVAL;
-		} else {
-			SDE_DEBUG("error optional property '%s' not found\n",
-				prop_name);
-		}
-	}
-
-	return rc;
-}
-
-static int _validate_dt_entry(struct device_node *np,
-	struct sde_prop_type *sde_prop, u32 prop_size, int *prop_count,
-	int *off_count)
-{
-	int rc = 0, i, val;
-	struct device_node *snp = NULL;
-
-	if (off_count) {
-		*off_count = of_property_count_u32_elems(np,
-				sde_prop[0].prop_name);
-		if ((*off_count > MAX_BLOCKS) || (*off_count < 0)) {
-			if (sde_prop[0].is_mandatory) {
-				SDE_ERROR(
-					"invalid hw offset prop name:%s count: %d\n",
-					sde_prop[0].prop_name, *off_count);
-				rc = -EINVAL;
-			}
-			*off_count = 0;
-			memset(prop_count, 0, sizeof(int) * prop_size);
-			return rc;
-		}
-	}
-
-	for (i = 0; i < prop_size; i++) {
-		switch (sde_prop[i].type) {
-		case PROP_TYPE_U32:
-			rc = of_property_read_u32(np, sde_prop[i].prop_name,
-				&val);
-			break;
-		case PROP_TYPE_U32_ARRAY:
-			prop_count[i] = of_property_count_u32_elems(np,
-				sde_prop[i].prop_name);
-			if (prop_count[i] < 0)
-				rc = prop_count[i];
-			break;
-		case PROP_TYPE_STRING_ARRAY:
-			prop_count[i] = of_property_count_strings(np,
-				sde_prop[i].prop_name);
-			if (prop_count[i] < 0)
-				rc = prop_count[i];
-			break;
-		case PROP_TYPE_BIT_OFFSET_ARRAY:
-			of_get_property(np, sde_prop[i].prop_name, &val);
-			prop_count[i] = val / (MAX_BIT_OFFSET * sizeof(u32));
-			break;
-		case PROP_TYPE_NODE:
-			snp = of_get_child_by_name(np,
-					sde_prop[i].prop_name);
-			if (!snp)
-				rc = -EINVAL;
-			break;
-		default:
-			SDE_DEBUG("invalid property type:%d\n",
-							sde_prop[i].type);
-			break;
-		}
-		SDE_DEBUG(
-			"prop id:%d prop name:%s prop type:%d prop_count:%d\n",
-			i, sde_prop[i].prop_name,
-			sde_prop[i].type, prop_count[i]);
-
-		if (rc && sde_prop[i].is_mandatory &&
-		   ((sde_prop[i].type == PROP_TYPE_U32) ||
-		    (sde_prop[i].type == PROP_TYPE_NODE))) {
-			SDE_ERROR("prop:%s not present\n",
-						sde_prop[i].prop_name);
-			goto end;
-		} else if (sde_prop[i].type == PROP_TYPE_U32 ||
-			sde_prop[i].type == PROP_TYPE_BOOL ||
-			sde_prop[i].type == PROP_TYPE_NODE) {
-			rc = 0;
-			continue;
-		}
-
-		if (off_count && (prop_count[i] != *off_count) &&
-				sde_prop[i].is_mandatory) {
-			SDE_ERROR(
-				"prop:%s count:%d is different compared to offset array:%d\n",
-				sde_prop[i].prop_name,
-				prop_count[i], *off_count);
-			rc = -EINVAL;
-			goto end;
-		} else if (off_count && prop_count[i] != *off_count) {
-			SDE_DEBUG(
-				"prop:%s count:%d is different compared to offset array:%d\n",
-				sde_prop[i].prop_name,
-				prop_count[i], *off_count);
-			rc = 0;
-			prop_count[i] = 0;
-		}
-		if (prop_count[i] < 0) {
-			prop_count[i] = 0;
-			if (sde_prop[i].is_mandatory) {
-				SDE_ERROR("prop:%s count:%d is negative\n",
-					sde_prop[i].prop_name, prop_count[i]);
-				rc = -EINVAL;
-			} else {
-				rc = 0;
-				SDE_DEBUG("prop:%s count:%d is negative\n",
-					sde_prop[i].prop_name, prop_count[i]);
-			}
-		}
-	}
-
-end:
-	return rc;
-}
-
-static int _read_dt_entry(struct device_node *np,
-	struct sde_prop_type *sde_prop, u32 prop_size, int *prop_count,
-	bool *prop_exists,
-	struct sde_prop_value *prop_value)
-{
-	int rc = 0, i, j;
-
-	for (i = 0; i < prop_size; i++) {
-		prop_exists[i] = true;
-		switch (sde_prop[i].type) {
-		case PROP_TYPE_U32:
-			rc = of_property_read_u32(np, sde_prop[i].prop_name,
-				&PROP_VALUE_ACCESS(prop_value, i, 0));
-			SDE_DEBUG(
-				"prop id:%d prop name:%s prop type:%d value:0x%x\n",
-				i, sde_prop[i].prop_name,
-				sde_prop[i].type,
-				PROP_VALUE_ACCESS(prop_value, i, 0));
-			if (rc)
-				prop_exists[i] = false;
-			break;
-		case PROP_TYPE_BOOL:
-			PROP_VALUE_ACCESS(prop_value, i, 0) =
-				of_property_read_bool(np,
-					sde_prop[i].prop_name);
-			SDE_DEBUG(
-				"prop id:%d prop name:%s prop type:%d value:0x%x\n",
-				i, sde_prop[i].prop_name,
-				sde_prop[i].type,
-				PROP_VALUE_ACCESS(prop_value, i, 0));
-			break;
-		case PROP_TYPE_U32_ARRAY:
-			rc = _parse_dt_u32_handler(np, sde_prop[i].prop_name,
-				&PROP_VALUE_ACCESS(prop_value, i, 0),
-				prop_count[i], sde_prop[i].is_mandatory);
-			if (rc && sde_prop[i].is_mandatory) {
-				SDE_ERROR(
-					"%s prop validation success but read failed\n",
-					sde_prop[i].prop_name);
-				prop_exists[i] = false;
-				goto end;
-			} else {
-				if (rc)
-					prop_exists[i] = false;
-				/* only for debug purpose */
-				SDE_DEBUG(
-					"prop id:%d prop name:%s prop type:%d",
-					i, sde_prop[i].prop_name,
-					sde_prop[i].type);
-				for (j = 0; j < prop_count[i]; j++)
-					SDE_DEBUG(" value[%d]:0x%x ", j,
-						PROP_VALUE_ACCESS(prop_value, i,
-								j));
-				SDE_DEBUG("\n");
-			}
-			break;
-		case PROP_TYPE_BIT_OFFSET_ARRAY:
-			rc = _parse_dt_bit_offset(np, sde_prop[i].prop_name,
-				prop_value, i, prop_count[i],
-				sde_prop[i].is_mandatory);
-			if (rc && sde_prop[i].is_mandatory) {
-				SDE_ERROR(
-					"%s prop validation success but read failed\n",
-					sde_prop[i].prop_name);
-				prop_exists[i] = false;
-				goto end;
-			} else {
-				if (rc)
-					prop_exists[i] = false;
-				SDE_DEBUG(
-					"prop id:%d prop name:%s prop type:%d",
-					i, sde_prop[i].prop_name,
-					sde_prop[i].type);
-				for (j = 0; j < prop_count[i]; j++)
-					SDE_DEBUG(
-					"count[%d]: bit:0x%x off:0x%x\n", j,
-					PROP_BITVALUE_ACCESS(prop_value,
-						i, j, 0),
-					PROP_BITVALUE_ACCESS(prop_value,
-						i, j, 1));
-				SDE_DEBUG("\n");
-			}
-			break;
-		case PROP_TYPE_NODE:
-			/* Node will be parsed in calling function */
-			rc = 0;
-			break;
-		default:
-			SDE_DEBUG("invalid property type:%d\n",
-							sde_prop[i].type);
-			break;
-		}
-		rc = 0;
-	}
-
-end:
-	return rc;
-}
-
-static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
-	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
-	bool *prop_exists, struct sde_prop_value *prop_value, u32 *vig_count)
-{
-	sblk->maxupscale = MAX_UPSCALE_RATIO;
-	sblk->maxdwnscale = MAX_DOWNSCALE_RATIO;
-	sspp->id = SSPP_VIG0 + *vig_count;
-	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
-			sspp->id - SSPP_VIG0);
-	sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count;
-	sspp->type = SSPP_TYPE_VIG;
-	set_bit(SDE_PERF_SSPP_QOS, &sspp->perf_features);
-	if (sde_cfg->vbif_qos_nlvl == 8)
-		set_bit(SDE_PERF_SSPP_QOS_8LVL, &sspp->perf_features);
-	(*vig_count)++;
-
-	if (!prop_value)
-		return;
-
-	if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
-		set_bit(SDE_SSPP_SCALER_QSEED2, &sspp->features);
-		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
-		sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
-			VIG_QSEED_OFF, 0);
-		sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
-			VIG_QSEED_LEN, 0);
-		snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
-				"sspp_scaler%u", sspp->id - SSPP_VIG0);
-	} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
-		set_bit(SDE_SSPP_SCALER_QSEED3, &sspp->features);
-		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
-		sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
-			VIG_QSEED_OFF, 0);
-		sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
-			VIG_QSEED_LEN, 0);
-		snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_scaler%u", sspp->id - SSPP_VIG0);
-	} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3LITE) {
-		set_bit(SDE_SSPP_SCALER_QSEED3LITE, &sspp->features);
-		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3LITE;
-		sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
-			VIG_QSEED_OFF, 0);
-		sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
-			VIG_QSEED_LEN, 0);
-		snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_scaler%u", sspp->id - SSPP_VIG0);
-	}
-
-	sblk->csc_blk.id = SDE_SSPP_CSC;
-	snprintf(sblk->csc_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_csc%u", sspp->id - SSPP_VIG0);
-	if (sde_cfg->csc_type == SDE_SSPP_CSC) {
-		set_bit(SDE_SSPP_CSC, &sspp->features);
-		sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value,
-							VIG_CSC_OFF, 0);
-	} else if (sde_cfg->csc_type == SDE_SSPP_CSC_10BIT) {
-		set_bit(SDE_SSPP_CSC_10BIT, &sspp->features);
-		sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value,
-							VIG_CSC_OFF, 0);
-	}
-
-	sblk->hsic_blk.id = SDE_SSPP_HSIC;
-	snprintf(sblk->hsic_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_hsic%u", sspp->id - SSPP_VIG0);
-	if (prop_exists[VIG_HSIC_PROP]) {
-		sblk->hsic_blk.base = PROP_VALUE_ACCESS(prop_value,
-			VIG_HSIC_PROP, 0);
-		sblk->hsic_blk.version = PROP_VALUE_ACCESS(prop_value,
-			VIG_HSIC_PROP, 1);
-		sblk->hsic_blk.len = 0;
-		set_bit(SDE_SSPP_HSIC, &sspp->features);
-	}
-
-	sblk->memcolor_blk.id = SDE_SSPP_MEMCOLOR;
-	snprintf(sblk->memcolor_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_memcolor%u", sspp->id - SSPP_VIG0);
-	if (prop_exists[VIG_MEMCOLOR_PROP]) {
-		sblk->memcolor_blk.base = PROP_VALUE_ACCESS(prop_value,
-			VIG_MEMCOLOR_PROP, 0);
-		sblk->memcolor_blk.version = PROP_VALUE_ACCESS(prop_value,
-			VIG_MEMCOLOR_PROP, 1);
-		sblk->memcolor_blk.len = 0;
-		set_bit(SDE_SSPP_MEMCOLOR, &sspp->features);
-	}
-
-	sblk->pcc_blk.id = SDE_SSPP_PCC;
-	snprintf(sblk->pcc_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_pcc%u", sspp->id - SSPP_VIG0);
-	if (prop_exists[VIG_PCC_PROP]) {
-		sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value,
-			VIG_PCC_PROP, 0);
-		sblk->pcc_blk.version = PROP_VALUE_ACCESS(prop_value,
-			VIG_PCC_PROP, 1);
-		sblk->pcc_blk.len = 0;
-		set_bit(SDE_SSPP_PCC, &sspp->features);
-	}
-
-	if (prop_exists[VIG_GAMUT_PROP]) {
-		sblk->gamut_blk.id = SDE_SSPP_VIG_GAMUT;
-		snprintf(sblk->gamut_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_vig_gamut%u", sspp->id - SSPP_VIG0);
-		sblk->gamut_blk.base = PROP_VALUE_ACCESS(prop_value,
-			VIG_GAMUT_PROP, 0);
-		sblk->gamut_blk.version = PROP_VALUE_ACCESS(prop_value,
-			VIG_GAMUT_PROP, 1);
-		sblk->gamut_blk.len = 0;
-		set_bit(SDE_SSPP_VIG_GAMUT, &sspp->features);
-	}
-
-	if (prop_exists[VIG_IGC_PROP]) {
-		sblk->igc_blk[0].id = SDE_SSPP_VIG_IGC;
-		snprintf(sblk->igc_blk[0].name, SDE_HW_BLK_NAME_LEN,
-			"sspp_vig_igc%u", sspp->id - SSPP_VIG0);
-		sblk->igc_blk[0].base = PROP_VALUE_ACCESS(prop_value,
-			VIG_IGC_PROP, 0);
-		sblk->igc_blk[0].version = PROP_VALUE_ACCESS(prop_value,
-			VIG_IGC_PROP, 1);
-		sblk->igc_blk[0].len = 0;
-		set_bit(SDE_SSPP_VIG_IGC, &sspp->features);
-	}
-
-	if (PROP_VALUE_ACCESS(prop_value, VIG_INVERSE_PMA, 0))
-		set_bit(SDE_SSPP_INVERSE_PMA, &sspp->features);
-
-	sblk->format_list = sde_cfg->vig_formats;
-	sblk->virt_format_list = sde_cfg->virt_vig_formats;
-	if (IS_SDE_INLINE_ROT_REV_100(sde_cfg->true_inline_rot_rev)) {
-		set_bit(SDE_SSPP_TRUE_INLINE_ROT_V1, &sspp->features);
-		sblk->in_rot_format_list = sde_cfg->inline_rot_formats;
-		sblk->in_rot_maxdwnscale_rt_num =
-			sde_cfg->true_inline_dwnscale_rt_num;
-		sblk->in_rot_maxdwnscale_rt_denom =
-			sde_cfg->true_inline_dwnscale_rt_denom;
-		sblk->in_rot_maxdwnscale_nrt =
-			sde_cfg->true_inline_dwnscale_nrt;
-		sblk->in_rot_maxheight =
-			MAX_PRE_ROT_HEIGHT_INLINE_ROT_DEFAULT;
-		sblk->in_rot_prefill_fudge_lines =
-			sde_cfg->true_inline_prefill_fudge_lines;
-		sblk->in_rot_prefill_lines_nv12 =
-			sde_cfg->true_inline_prefill_lines_nv12;
-		sblk->in_rot_prefill_lines =
-			sde_cfg->true_inline_prefill_lines;
-	}
-
-	if (sde_cfg->sc_cfg.has_sys_cache) {
-		set_bit(SDE_PERF_SSPP_SYS_CACHE, &sspp->perf_features);
-		sblk->llcc_scid = sde_cfg->sc_cfg.llcc_scid;
-		sblk->llcc_slice_size =
-			sde_cfg->sc_cfg.llcc_slice_size;
-	}
-}
-
-static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
-	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
-	bool *prop_exists, struct sde_prop_value *prop_value, u32 *rgb_count)
-{
-	sblk->maxupscale = MAX_UPSCALE_RATIO;
-	sblk->maxdwnscale = MAX_DOWNSCALE_RATIO;
-	sspp->id = SSPP_RGB0 + *rgb_count;
-	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
-			sspp->id - SSPP_VIG0);
-	sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count;
-	sspp->type = SSPP_TYPE_RGB;
-	set_bit(SDE_PERF_SSPP_QOS, &sspp->perf_features);
-	if (sde_cfg->vbif_qos_nlvl == 8)
-		set_bit(SDE_PERF_SSPP_QOS_8LVL, &sspp->perf_features);
-	(*rgb_count)++;
-
-	if (!prop_value)
-		return;
-
-	if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
-		set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
-		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
-		sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
-			RGB_SCALER_OFF, 0);
-		sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
-			RGB_SCALER_LEN, 0);
-		snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_scaler%u", sspp->id - SSPP_VIG0);
-	} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
-		set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
-		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
-		sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
-			RGB_SCALER_LEN, 0);
-		sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
-			SSPP_SCALE_SIZE, 0);
-		snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_scaler%u", sspp->id - SSPP_VIG0);
-	}
-
-	sblk->pcc_blk.id = SDE_SSPP_PCC;
-	if (prop_exists[RGB_PCC_PROP]) {
-		sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value,
-			RGB_PCC_PROP, 0);
-		sblk->pcc_blk.version = PROP_VALUE_ACCESS(prop_value,
-			RGB_PCC_PROP, 1);
-		sblk->pcc_blk.len = 0;
-		set_bit(SDE_SSPP_PCC, &sspp->features);
-	}
-
-	sblk->format_list = sde_cfg->dma_formats;
-	sblk->virt_format_list = NULL;
-}
-
-static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg,
-	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
-	struct sde_prop_value *prop_value, u32 *cursor_count)
-{
-	if (!IS_SDE_MAJOR_MINOR_SAME(sde_cfg->hwversion, SDE_HW_VER_300))
-		SDE_ERROR("invalid sspp type %d, xin id %d\n",
-				sspp->type, sspp->xin_id);
-	set_bit(SDE_SSPP_CURSOR, &sspp->features);
-	sblk->maxupscale = SSPP_UNITY_SCALE;
-	sblk->maxdwnscale = SSPP_UNITY_SCALE;
-	sblk->format_list = sde_cfg->cursor_formats;
-	sblk->virt_format_list = NULL;
-	sspp->id = SSPP_CURSOR0 + *cursor_count;
-	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
-			sspp->id - SSPP_VIG0);
-	sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count;
-	sspp->type = SSPP_TYPE_CURSOR;
-	(*cursor_count)++;
-}
-
-static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg,
-	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
-	bool prop_exists[][DMA_PROP_MAX], struct sde_prop_value *prop_value,
-	u32 *dma_count, u32 dgm_count)
-{
-	u32 i = 0;
-
-	sblk->maxupscale = SSPP_UNITY_SCALE;
-	sblk->maxdwnscale = SSPP_UNITY_SCALE;
-	sblk->format_list = sde_cfg->dma_formats;
-	sblk->virt_format_list = sde_cfg->dma_formats;
-	sspp->id = SSPP_DMA0 + *dma_count;
-	sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count;
-	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
-			sspp->id - SSPP_VIG0);
-	sspp->type = SSPP_TYPE_DMA;
-	set_bit(SDE_PERF_SSPP_QOS, &sspp->perf_features);
-	if (sde_cfg->vbif_qos_nlvl == 8)
-		set_bit(SDE_PERF_SSPP_QOS_8LVL, &sspp->perf_features);
-	(*dma_count)++;
-
-	if (!prop_value)
-		return;
-
-	sblk->num_igc_blk = dgm_count;
-	sblk->num_gc_blk = dgm_count;
-	sblk->num_dgm_csc_blk = dgm_count;
-	for (i = 0; i < dgm_count; i++) {
-		if (prop_exists[i][DMA_IGC_PROP]) {
-			sblk->igc_blk[i].id = SDE_SSPP_DMA_IGC;
-			snprintf(sblk->igc_blk[i].name, SDE_HW_BLK_NAME_LEN,
-				"sspp_dma_igc%u", sspp->id - SSPP_DMA0);
-			sblk->igc_blk[i].base = PROP_VALUE_ACCESS(
-				&prop_value[i * DMA_PROP_MAX], DMA_IGC_PROP, 0);
-			sblk->igc_blk[i].version = PROP_VALUE_ACCESS(
-				&prop_value[i * DMA_PROP_MAX], DMA_IGC_PROP, 1);
-			sblk->igc_blk[i].len = 0;
-			set_bit(SDE_SSPP_DMA_IGC, &sspp->features);
-		}
-
-		if (prop_exists[i][DMA_GC_PROP]) {
-			sblk->gc_blk[i].id = SDE_SSPP_DMA_GC;
-			snprintf(sblk->gc_blk[0].name, SDE_HW_BLK_NAME_LEN,
-				"sspp_dma_gc%u", sspp->id - SSPP_DMA0);
-			sblk->gc_blk[i].base = PROP_VALUE_ACCESS(
-				&prop_value[i * DMA_PROP_MAX], DMA_GC_PROP, 0);
-			sblk->gc_blk[i].version = PROP_VALUE_ACCESS(
-				&prop_value[i * DMA_PROP_MAX], DMA_GC_PROP, 1);
-			sblk->gc_blk[i].len = 0;
-			set_bit(SDE_SSPP_DMA_GC, &sspp->features);
-		}
-
-		if (PROP_VALUE_ACCESS(&prop_value[i * DMA_PROP_MAX],
-			DMA_DGM_INVERSE_PMA, 0))
-			set_bit(SDE_SSPP_DGM_INVERSE_PMA, &sspp->features);
-
-		if (prop_exists[i][DMA_CSC_OFF]) {
-			sblk->dgm_csc_blk[i].id = SDE_SSPP_DGM_CSC;
-			snprintf(sblk->csc_blk.name, SDE_HW_BLK_NAME_LEN,
-				"sspp_dgm_csc%u", sspp->id - SSPP_DMA0);
-			set_bit(SDE_SSPP_DGM_CSC, &sspp->features);
-			sblk->dgm_csc_blk[i].base = PROP_VALUE_ACCESS(
-				&prop_value[i * DMA_PROP_MAX], DMA_CSC_OFF, 0);
-		}
-	}
-}
-
-static int sde_dgm_parse_dt(struct device_node *np, u32 index,
-	struct sde_prop_value *prop_value, bool *prop_exists)
-{
-	int rc = 0;
-	u32 child_idx = 0;
-	int prop_count[DMA_PROP_MAX] = {0};
-	struct device_node *dgm_snp = NULL;
-
-	for_each_child_of_node(np, dgm_snp) {
-		if (index != child_idx++)
-			continue;
-		rc = _validate_dt_entry(dgm_snp, dma_prop, ARRAY_SIZE(dma_prop),
-				prop_count, NULL);
-		if (rc)
-			return rc;
-		rc = _read_dt_entry(dgm_snp, dma_prop, ARRAY_SIZE(dma_prop),
-				prop_count, prop_exists,
-				prop_value);
-	}
-
-	return rc;
-}
-
-static int sde_sspp_parse_dt(struct device_node *np,
-	struct sde_mdss_cfg *sde_cfg)
-{
-	int rc, prop_count[SSPP_PROP_MAX], off_count, i, j;
-	int vig_prop_count[VIG_PROP_MAX], rgb_prop_count[RGB_PROP_MAX];
-	bool prop_exists[SSPP_PROP_MAX], vig_prop_exists[VIG_PROP_MAX];
-	bool rgb_prop_exists[RGB_PROP_MAX];
-	bool dgm_prop_exists[SSPP_SUBBLK_COUNT_MAX][DMA_PROP_MAX];
-	struct sde_prop_value *prop_value = NULL;
-	struct sde_prop_value *vig_prop_value = NULL, *rgb_prop_value = NULL;
-	struct sde_prop_value *dgm_prop_value = NULL;
-	const char *type;
-	struct sde_sspp_cfg *sspp;
-	struct sde_sspp_sub_blks *sblk;
-	u32 vig_count = 0, dma_count = 0, rgb_count = 0, cursor_count = 0;
-	u32 dgm_count = 0;
-	struct device_node *snp = NULL;
-
-	prop_value = kcalloc(SSPP_PROP_MAX,
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop),
-		prop_count, &off_count);
-	if (rc)
-		goto end;
-
-	rc = _read_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop), prop_count,
-					prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	sde_cfg->sspp_count = off_count;
-
-	/* get vig feature dt properties if they exist */
-	snp = of_get_child_by_name(np, sspp_prop[SSPP_VIG_BLOCKS].prop_name);
-	if (snp) {
-		vig_prop_value = kcalloc(VIG_PROP_MAX,
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-		if (!vig_prop_value) {
-			rc = -ENOMEM;
-			goto end;
-		}
-		rc = _validate_dt_entry(snp, vig_prop, ARRAY_SIZE(vig_prop),
-			vig_prop_count, NULL);
-		if (rc)
-			goto end;
-		rc = _read_dt_entry(snp, vig_prop, ARRAY_SIZE(vig_prop),
-				vig_prop_count, vig_prop_exists,
-				vig_prop_value);
-	}
-
-	/* get rgb feature dt properties if they exist */
-	snp = of_get_child_by_name(np, sspp_prop[SSPP_RGB_BLOCKS].prop_name);
-	if (snp) {
-		rgb_prop_value = kcalloc(RGB_PROP_MAX,
-					sizeof(struct sde_prop_value),
-					GFP_KERNEL);
-		if (!rgb_prop_value) {
-			rc = -ENOMEM;
-			goto end;
-		}
-		rc = _validate_dt_entry(snp, rgb_prop, ARRAY_SIZE(rgb_prop),
-			rgb_prop_count, NULL);
-		if (rc)
-			goto end;
-		rc = _read_dt_entry(snp, rgb_prop, ARRAY_SIZE(rgb_prop),
-				rgb_prop_count, rgb_prop_exists,
-				rgb_prop_value);
-	}
-
-	/* get dma feature dt properties if they exist */
-	snp = of_get_child_by_name(np, sspp_prop[SSPP_DMA_BLOCKS].prop_name);
-	if (snp) {
-		dgm_count = of_get_child_count(snp);
-		if (dgm_count > 0 && dgm_count <= SSPP_SUBBLK_COUNT_MAX) {
-			dgm_prop_value = kzalloc(dgm_count * DMA_PROP_MAX *
-					sizeof(struct sde_prop_value),
-					GFP_KERNEL);
-			if (!dgm_prop_value) {
-				rc = -ENOMEM;
-				goto end;
-			}
-			for (i = 0; i < dgm_count; i++)
-				sde_dgm_parse_dt(snp, i,
-					&dgm_prop_value[i * DMA_PROP_MAX],
-					&dgm_prop_exists[i][0]);
-		}
-	}
-
-	for (i = 0; i < off_count; i++) {
-		sspp = sde_cfg->sspp + i;
-		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
-		if (!sblk) {
-			rc = -ENOMEM;
-			/* catalog deinit will release the allocated blocks */
-			goto end;
-		}
-		sspp->sblk = sblk;
-
-		sspp->base = PROP_VALUE_ACCESS(prop_value, SSPP_OFF, i);
-		sspp->len = PROP_VALUE_ACCESS(prop_value, SSPP_SIZE, 0);
-		sblk->maxlinewidth = sde_cfg->max_sspp_linewidth;
-
-		set_bit(SDE_SSPP_SRC, &sspp->features);
-
-		if (sde_cfg->has_cdp)
-			set_bit(SDE_PERF_SSPP_CDP, &sspp->perf_features);
-
-		if (sde_cfg->ts_prefill_rev == 1) {
-			set_bit(SDE_PERF_SSPP_TS_PREFILL, &sspp->perf_features);
-		} else if (sde_cfg->ts_prefill_rev == 2) {
-			set_bit(SDE_PERF_SSPP_TS_PREFILL, &sspp->perf_features);
-			set_bit(SDE_PERF_SSPP_TS_PREFILL_REC1,
-					&sspp->perf_features);
-		}
-
-		sblk->smart_dma_priority =
-			PROP_VALUE_ACCESS(prop_value, SSPP_SMART_DMA, i);
-
-		if (sblk->smart_dma_priority && sde_cfg->smart_dma_rev)
-			set_bit(sde_cfg->smart_dma_rev, &sspp->features);
-
-		sblk->src_blk.id = SDE_SSPP_SRC;
-
-		of_property_read_string_index(np,
-				sspp_prop[SSPP_TYPE].prop_name, i, &type);
-		if (!strcmp(type, "vig")) {
-			_sde_sspp_setup_vig(sde_cfg, sspp, sblk,
-				vig_prop_exists, vig_prop_value, &vig_count);
-		} else if (!strcmp(type, "rgb")) {
-			_sde_sspp_setup_rgb(sde_cfg, sspp, sblk,
-				rgb_prop_exists, rgb_prop_value, &rgb_count);
-		} else if (!strcmp(type, "cursor")) {
-			/* No prop values for cursor pipes */
-			_sde_sspp_setup_cursor(sde_cfg, sspp, sblk, NULL,
-								&cursor_count);
-		} else if (!strcmp(type, "dma")) {
-			_sde_sspp_setup_dma(sde_cfg, sspp, sblk,
-				dgm_prop_exists, dgm_prop_value, &dma_count,
-				dgm_count);
-		} else {
-			SDE_ERROR("invalid sspp type:%s\n", type);
-			rc = -EINVAL;
-			goto end;
-		}
-
-		if (sde_cfg->uidle_cfg.uidle_rev)
-			set_bit(SDE_PERF_SSPP_UIDLE, &sspp->perf_features);
-
-		snprintf(sblk->src_blk.name, SDE_HW_BLK_NAME_LEN, "sspp_src_%u",
-				sspp->id - SSPP_VIG0);
-
-		if (sspp->clk_ctrl >= SDE_CLK_CTRL_MAX) {
-			SDE_ERROR("%s: invalid clk ctrl: %d\n",
-					sblk->src_blk.name, sspp->clk_ctrl);
-			rc = -EINVAL;
-			goto end;
-		}
-
-		if (sde_cfg->has_decimation) {
-			sblk->maxhdeciexp = MAX_HORZ_DECIMATION;
-			sblk->maxvdeciexp = MAX_VERT_DECIMATION;
-		} else {
-			sblk->maxhdeciexp = 0;
-			sblk->maxvdeciexp = 0;
-		}
-
-		sspp->xin_id = PROP_VALUE_ACCESS(prop_value, SSPP_XIN, i);
-		sblk->pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE;
-		sblk->src_blk.len = PROP_VALUE_ACCESS(prop_value, SSPP_SIZE, 0);
-
-		if (PROP_VALUE_ACCESS(prop_value, SSPP_EXCL_RECT, i) == 1)
-			set_bit(SDE_SSPP_EXCL_RECT, &sspp->features);
-
-		if (prop_exists[SSPP_MAX_PER_PIPE_BW])
-			sblk->max_per_pipe_bw = PROP_VALUE_ACCESS(prop_value,
-					SSPP_MAX_PER_PIPE_BW, i);
-		else
-			sblk->max_per_pipe_bw = DEFAULT_MAX_PER_PIPE_BW;
-
-		for (j = 0; j < sde_cfg->mdp_count; j++) {
-			sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].reg_off =
-				PROP_BITVALUE_ACCESS(prop_value,
-						SSPP_CLK_CTRL, i, 0);
-			sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].bit_off =
-				PROP_BITVALUE_ACCESS(prop_value,
-						SSPP_CLK_CTRL, i, 1);
-		}
-
-		SDE_DEBUG(
-			"xin:%d ram:%d clk%d:%x/%d\n",
-			sspp->xin_id,
-			sblk->pixel_ram_size,
-			sspp->clk_ctrl,
-			sde_cfg->mdp[0].clk_ctrls[sspp->clk_ctrl].reg_off,
-			sde_cfg->mdp[0].clk_ctrls[sspp->clk_ctrl].bit_off);
-	}
-
-end:
-	kfree(prop_value);
-	kfree(vig_prop_value);
-	kfree(rgb_prop_value);
-	kfree(dgm_prop_value);
-	return rc;
-}
-
-static int sde_ctl_parse_dt(struct device_node *np,
-		struct sde_mdss_cfg *sde_cfg)
-{
-	int rc, prop_count[HW_PROP_MAX], i;
-	bool prop_exists[HW_PROP_MAX];
-	struct sde_prop_value *prop_value = NULL;
-	struct sde_ctl_cfg *ctl;
-	u32 off_count;
-
-	if (!sde_cfg) {
-		SDE_ERROR("invalid argument input param\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	prop_value = kzalloc(HW_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, ctl_prop, ARRAY_SIZE(ctl_prop), prop_count,
-		&off_count);
-	if (rc)
-		goto end;
-
-	sde_cfg->ctl_count = off_count;
-
-	rc = _read_dt_entry(np, ctl_prop, ARRAY_SIZE(ctl_prop), prop_count,
-		prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	for (i = 0; i < off_count; i++) {
-		const char *disp_pref = NULL;
-
-		ctl = sde_cfg->ctl + i;
-		ctl->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
-		ctl->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
-		ctl->id = CTL_0 + i;
-		snprintf(ctl->name, SDE_HW_BLK_NAME_LEN, "ctl_%u",
-				ctl->id - CTL_0);
-
-		of_property_read_string_index(np,
-				ctl_prop[HW_DISP].prop_name, i, &disp_pref);
-		if (disp_pref && !strcmp(disp_pref, "primary"))
-			set_bit(SDE_CTL_PRIMARY_PREF, &ctl->features);
-		if (i < MAX_SPLIT_DISPLAY_CTL)
-			set_bit(SDE_CTL_SPLIT_DISPLAY, &ctl->features);
-		if (i < MAX_PP_SPLIT_DISPLAY_CTL)
-			set_bit(SDE_CTL_PINGPONG_SPLIT, &ctl->features);
-		if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev))
-			set_bit(SDE_CTL_ACTIVE_CFG, &ctl->features);
-		if (IS_SDE_UIDLE_REV_100(sde_cfg->uidle_cfg.uidle_rev))
-			set_bit(SDE_CTL_UIDLE, &ctl->features);
-	}
-end:
-	kfree(prop_value);
-	return rc;
-}
-
-void sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm)
-{
-	u32 i;
-
-	for (i = 0; i < sde_cfg->mixer_count; i++) {
-		clear_bit(SDE_DISP_PRIMARY_PREF,
-				&sde_cfg->mixer[i].features);
-		if (i < num_lm)
-			set_bit(SDE_DISP_PRIMARY_PREF,
-					&sde_cfg->mixer[i].features);
-	}
-}
-
-static int sde_mixer_parse_dt(struct device_node *np,
-						struct sde_mdss_cfg *sde_cfg)
-{
-	int rc, prop_count[MIXER_PROP_MAX], i, j;
-	int blocks_prop_count[MIXER_BLOCKS_PROP_MAX];
-	int blend_prop_count[MIXER_BLEND_PROP_MAX];
-	bool prop_exists[MIXER_PROP_MAX];
-	bool blocks_prop_exists[MIXER_BLOCKS_PROP_MAX];
-	bool blend_prop_exists[MIXER_BLEND_PROP_MAX];
-	struct sde_prop_value *prop_value = NULL, *blocks_prop_value = NULL;
-	struct sde_prop_value *blend_prop_value = NULL;
-	u32 off_count, blend_off_count, max_blendstages, lm_pair_mask;
-	struct sde_lm_cfg *mixer;
-	struct sde_lm_sub_blks *sblk;
-	int pp_count, dspp_count, ds_count, mixer_count;
-	u32 pp_idx, dspp_idx, ds_idx;
-	u32 mixer_base;
-	struct device_node *snp = NULL;
-
-	if (!sde_cfg) {
-		SDE_ERROR("invalid argument input param\n");
-		rc = -EINVAL;
-		goto end;
-	}
-	max_blendstages = sde_cfg->max_mixer_blendstages;
-
-	prop_value = kcalloc(MIXER_PROP_MAX,
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, mixer_prop, ARRAY_SIZE(mixer_prop),
-		prop_count, &off_count);
-	if (rc)
-		goto end;
-
-	rc = _read_dt_entry(np, mixer_prop, ARRAY_SIZE(mixer_prop), prop_count,
-		prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	pp_count = sde_cfg->pingpong_count;
-	dspp_count = sde_cfg->dspp_count;
-	ds_count = sde_cfg->ds_count;
-
-	/* get mixer feature dt properties if they exist */
-	snp = of_get_child_by_name(np, mixer_prop[MIXER_BLOCKS].prop_name);
-	if (snp) {
-		blocks_prop_value = kzalloc(MIXER_BLOCKS_PROP_MAX *
-				MAX_SDE_HW_BLK * sizeof(struct sde_prop_value),
-				GFP_KERNEL);
-		if (!blocks_prop_value) {
-			rc = -ENOMEM;
-			goto end;
-		}
-		rc = _validate_dt_entry(snp, mixer_blocks_prop,
-			ARRAY_SIZE(mixer_blocks_prop), blocks_prop_count, NULL);
-		if (rc)
-			goto end;
-		rc = _read_dt_entry(snp, mixer_blocks_prop,
-				ARRAY_SIZE(mixer_blocks_prop),
-				blocks_prop_count, blocks_prop_exists,
-				blocks_prop_value);
-	}
-
-	/* get the blend_op register offsets */
-	blend_prop_value = kzalloc(MIXER_BLEND_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!blend_prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-	rc = _validate_dt_entry(np, mixer_blend_prop,
-		ARRAY_SIZE(mixer_blend_prop), blend_prop_count,
-		&blend_off_count);
-	if (rc)
-		goto end;
-
-	rc = _read_dt_entry(np, mixer_blend_prop, ARRAY_SIZE(mixer_blend_prop),
-		blend_prop_count, blend_prop_exists, blend_prop_value);
-	if (rc)
-		goto end;
-
-	for (i = 0, mixer_count = 0, pp_idx = 0, dspp_idx = 0,
-			ds_idx = 0; i < off_count; i++) {
-		const char *disp_pref = NULL;
-		const char *cwb_pref = NULL;
-
-		mixer_base = PROP_VALUE_ACCESS(prop_value, MIXER_OFF, i);
-		if (!mixer_base)
-			continue;
-
-		mixer = sde_cfg->mixer + mixer_count;
-
-		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
-		if (!sblk) {
-			rc = -ENOMEM;
-			/* catalog deinit will release the allocated blocks */
-			goto end;
-		}
-		mixer->sblk = sblk;
-
-		mixer->base = mixer_base;
-		mixer->len = PROP_VALUE_ACCESS(prop_value, MIXER_LEN, 0);
-		mixer->id = LM_0 + i;
-		snprintf(mixer->name, SDE_HW_BLK_NAME_LEN, "lm_%u",
-				mixer->id - LM_0);
-
-		if (!prop_exists[MIXER_LEN])
-			mixer->len = DEFAULT_SDE_HW_BLOCK_LEN;
-
-		lm_pair_mask = PROP_VALUE_ACCESS(prop_value,
-				MIXER_PAIR_MASK, i);
-		if (lm_pair_mask)
-			mixer->lm_pair_mask = 1 << lm_pair_mask;
-
-		sblk->maxblendstages = max_blendstages;
-		sblk->maxwidth = sde_cfg->max_mixer_width;
-
-		for (j = 0; j < blend_off_count; j++)
-			sblk->blendstage_base[j] =
-				PROP_VALUE_ACCESS(blend_prop_value,
-						MIXER_BLEND_OP_OFF, j);
-
-		if (sde_cfg->has_src_split)
-			set_bit(SDE_MIXER_SOURCESPLIT, &mixer->features);
-		if (sde_cfg->has_dim_layer)
-			set_bit(SDE_DIM_LAYER, &mixer->features);
-
-		of_property_read_string_index(np,
-			mixer_prop[MIXER_DISP].prop_name, i, &disp_pref);
-		if (disp_pref && !strcmp(disp_pref, "primary"))
-			set_bit(SDE_DISP_PRIMARY_PREF, &mixer->features);
-
-		of_property_read_string_index(np,
-			mixer_prop[MIXER_CWB].prop_name, i, &cwb_pref);
-		if (cwb_pref && !strcmp(cwb_pref, "cwb"))
-			set_bit(SDE_DISP_CWB_PREF, &mixer->features);
-
-		mixer->pingpong = pp_count > 0 ? pp_idx + PINGPONG_0
-							: PINGPONG_MAX;
-		mixer->dspp = dspp_count > 0 ? dspp_idx + DSPP_0
-							: DSPP_MAX;
-		mixer->ds = ds_count > 0 ? ds_idx + DS_0 : DS_MAX;
-		pp_count--;
-		dspp_count--;
-		ds_count--;
-		pp_idx++;
-		dspp_idx++;
-		ds_idx++;
-
-		mixer_count++;
-
-		sblk->gc.id = SDE_MIXER_GC;
-		if (blocks_prop_value && blocks_prop_exists[MIXER_GC_PROP]) {
-			sblk->gc.base = PROP_VALUE_ACCESS(blocks_prop_value,
-					MIXER_GC_PROP, 0);
-			sblk->gc.version = PROP_VALUE_ACCESS(blocks_prop_value,
-					MIXER_GC_PROP, 1);
-			sblk->gc.len = 0;
-			set_bit(SDE_MIXER_GC, &mixer->features);
-		}
-	}
-	sde_cfg->mixer_count = mixer_count;
-
-end:
-	kfree(prop_value);
-	kfree(blocks_prop_value);
-	kfree(blend_prop_value);
-	return rc;
-}
-
-static int sde_intf_parse_dt(struct device_node *np,
-						struct sde_mdss_cfg *sde_cfg)
-{
-	int rc, prop_count[INTF_PROP_MAX], i;
-	struct sde_prop_value *prop_value = NULL;
-	bool prop_exists[INTF_PROP_MAX];
-	u32 off_count;
-	u32 dsi_count = 0, none_count = 0, hdmi_count = 0, dp_count = 0;
-	const char *type;
-	struct sde_intf_cfg *intf;
-
-	if (!sde_cfg) {
-		SDE_ERROR("invalid argument\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	prop_value = kzalloc(INTF_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, intf_prop, ARRAY_SIZE(intf_prop),
-		prop_count, &off_count);
-	if (rc)
-		goto end;
-
-	sde_cfg->intf_count = off_count;
-
-	rc = _read_dt_entry(np, intf_prop, ARRAY_SIZE(intf_prop), prop_count,
-		prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	for (i = 0; i < off_count; i++) {
-		intf = sde_cfg->intf + i;
-		intf->base = PROP_VALUE_ACCESS(prop_value, INTF_OFF, i);
-		intf->len = PROP_VALUE_ACCESS(prop_value, INTF_LEN, 0);
-		intf->id = INTF_0 + i;
-		snprintf(intf->name, SDE_HW_BLK_NAME_LEN, "intf_%u",
-				intf->id - INTF_0);
-
-		if (!prop_exists[INTF_LEN])
-			intf->len = DEFAULT_SDE_HW_BLOCK_LEN;
-
-		intf->prog_fetch_lines_worst_case =
-				!prop_exists[INTF_PREFETCH] ?
-				sde_cfg->perf.min_prefill_lines :
-				PROP_VALUE_ACCESS(prop_value, INTF_PREFETCH, i);
-
-		of_property_read_string_index(np,
-				intf_prop[INTF_TYPE].prop_name, i, &type);
-		if (!strcmp(type, "dsi")) {
-			intf->type = INTF_DSI;
-			intf->controller_id = dsi_count;
-			dsi_count++;
-		} else if (!strcmp(type, "hdmi")) {
-			intf->type = INTF_HDMI;
-			intf->controller_id = hdmi_count;
-			hdmi_count++;
-		} else if (!strcmp(type, "dp")) {
-			intf->type = INTF_DP;
-			intf->controller_id = dp_count;
-			dp_count++;
-		} else {
-			intf->type = INTF_NONE;
-			intf->controller_id = none_count;
-			none_count++;
-		}
-		if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev))
-			set_bit(SDE_INTF_INPUT_CTRL, &intf->features);
-
-		if (IS_SDE_MAJOR_SAME((sde_cfg->hwversion),
-				SDE_HW_VER_500) ||
-				IS_SDE_MAJOR_SAME((sde_cfg->hwversion),
-				SDE_HW_VER_600))
-			set_bit(SDE_INTF_TE, &intf->features);
-	}
-
-end:
-	kfree(prop_value);
-	return rc;
-}
-
-static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
-{
-	int rc, prop_count[WB_PROP_MAX], i, j;
-	struct sde_prop_value *prop_value = NULL;
-	bool prop_exists[WB_PROP_MAX];
-	u32 off_count;
-	struct sde_wb_cfg *wb;
-	struct sde_wb_sub_blocks *sblk;
-
-	if (!sde_cfg) {
-		SDE_ERROR("invalid argument\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	prop_value = kzalloc(WB_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, wb_prop, ARRAY_SIZE(wb_prop), prop_count,
-		&off_count);
-	if (rc)
-		goto end;
-
-	sde_cfg->wb_count = off_count;
-
-	rc = _read_dt_entry(np, wb_prop, ARRAY_SIZE(wb_prop), prop_count,
-		prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	for (i = 0; i < off_count; i++) {
-		wb = sde_cfg->wb + i;
-		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
-		if (!sblk) {
-			rc = -ENOMEM;
-			/* catalog deinit will release the allocated blocks */
-			goto end;
-		}
-		wb->sblk = sblk;
-
-		wb->base = PROP_VALUE_ACCESS(prop_value, WB_OFF, i);
-		wb->id = WB_0 + PROP_VALUE_ACCESS(prop_value, WB_ID, i);
-		snprintf(wb->name, SDE_HW_BLK_NAME_LEN, "wb_%u",
-				wb->id - WB_0);
-		wb->clk_ctrl = SDE_CLK_CTRL_WB0 +
-			PROP_VALUE_ACCESS(prop_value, WB_ID, i);
-		wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i);
-
-		if (wb->clk_ctrl >= SDE_CLK_CTRL_MAX) {
-			SDE_ERROR("%s: invalid clk ctrl: %d\n",
-					wb->name, wb->clk_ctrl);
-			rc = -EINVAL;
-			goto end;
-		}
-
-		if (IS_SDE_MAJOR_MINOR_SAME((sde_cfg->hwversion),
-				SDE_HW_VER_170))
-			wb->vbif_idx = VBIF_NRT;
-		else
-			wb->vbif_idx = VBIF_RT;
-
-		wb->len = PROP_VALUE_ACCESS(prop_value, WB_LEN, 0);
-		if (!prop_exists[WB_LEN])
-			wb->len = DEFAULT_SDE_HW_BLOCK_LEN;
-		sblk->maxlinewidth = sde_cfg->max_wb_linewidth;
-
-		if (wb->id >= LINE_MODE_WB_OFFSET)
-			set_bit(SDE_WB_LINE_MODE, &wb->features);
-		else
-			set_bit(SDE_WB_BLOCK_MODE, &wb->features);
-		set_bit(SDE_WB_TRAFFIC_SHAPER, &wb->features);
-		set_bit(SDE_WB_YUV_CONFIG, &wb->features);
-
-		if (sde_cfg->has_cdp)
-			set_bit(SDE_WB_CDP, &wb->features);
-
-		set_bit(SDE_WB_QOS, &wb->features);
-		if (sde_cfg->vbif_qos_nlvl == 8)
-			set_bit(SDE_WB_QOS_8LVL, &wb->features);
-
-		if (sde_cfg->has_wb_ubwc)
-			set_bit(SDE_WB_UBWC, &wb->features);
-
-		set_bit(SDE_WB_XY_ROI_OFFSET, &wb->features);
-
-		if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev))
-			set_bit(SDE_WB_INPUT_CTRL, &wb->features);
-
-		if (sde_cfg->has_cwb_support) {
-			set_bit(SDE_WB_HAS_CWB, &wb->features);
-			if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev))
-				set_bit(SDE_WB_CWB_CTRL, &wb->features);
-		}
-
-		for (j = 0; j < sde_cfg->mdp_count; j++) {
-			sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].reg_off =
-				PROP_BITVALUE_ACCESS(prop_value,
-						WB_CLK_CTRL, i, 0);
-			sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].bit_off =
-				PROP_BITVALUE_ACCESS(prop_value,
-						WB_CLK_CTRL, i, 1);
-		}
-
-		wb->format_list = sde_cfg->wb_formats;
-
-		SDE_DEBUG(
-			"wb:%d xin:%d vbif:%d clk%d:%x/%d\n",
-			wb->id - WB_0,
-			wb->xin_id,
-			wb->vbif_idx,
-			wb->clk_ctrl,
-			sde_cfg->mdp[0].clk_ctrls[wb->clk_ctrl].reg_off,
-			sde_cfg->mdp[0].clk_ctrls[wb->clk_ctrl].bit_off);
-	}
-
-end:
-	kfree(prop_value);
-	return rc;
-}
-
-static void _sde_dspp_setup_blocks(struct sde_mdss_cfg *sde_cfg,
-	struct sde_dspp_cfg *dspp, struct sde_dspp_sub_blks *sblk,
-	bool *prop_exists, struct sde_prop_value *prop_value)
-{
-	sblk->igc.id = SDE_DSPP_IGC;
-	if (prop_exists[DSPP_IGC_PROP]) {
-		sblk->igc.base = PROP_VALUE_ACCESS(prop_value,
-			DSPP_IGC_PROP, 0);
-		sblk->igc.version = PROP_VALUE_ACCESS(prop_value,
-			DSPP_IGC_PROP, 1);
-		sblk->igc.len = 0;
-		set_bit(SDE_DSPP_IGC, &dspp->features);
-	}
-
-	sblk->pcc.id = SDE_DSPP_PCC;
-	if (prop_exists[DSPP_PCC_PROP]) {
-		sblk->pcc.base = PROP_VALUE_ACCESS(prop_value,
-			DSPP_PCC_PROP, 0);
-		sblk->pcc.version = PROP_VALUE_ACCESS(prop_value,
-			DSPP_PCC_PROP, 1);
-		sblk->pcc.len = 0;
-		set_bit(SDE_DSPP_PCC, &dspp->features);
-	}
-
-	sblk->gc.id = SDE_DSPP_GC;
-	if (prop_exists[DSPP_GC_PROP]) {
-		sblk->gc.base = PROP_VALUE_ACCESS(prop_value, DSPP_GC_PROP, 0);
-		sblk->gc.version = PROP_VALUE_ACCESS(prop_value,
-			DSPP_GC_PROP, 1);
-		sblk->gc.len = 0;
-		set_bit(SDE_DSPP_GC, &dspp->features);
-	}
-
-	sblk->gamut.id = SDE_DSPP_GAMUT;
-	if (prop_exists[DSPP_GAMUT_PROP]) {
-		sblk->gamut.base = PROP_VALUE_ACCESS(prop_value,
-			DSPP_GAMUT_PROP, 0);
-		sblk->gamut.version = PROP_VALUE_ACCESS(prop_value,
-			DSPP_GAMUT_PROP, 1);
-		sblk->gamut.len = 0;
-		set_bit(SDE_DSPP_GAMUT, &dspp->features);
-	}
-
-	sblk->dither.id = SDE_DSPP_DITHER;
-	if (prop_exists[DSPP_DITHER_PROP]) {
-		sblk->dither.base = PROP_VALUE_ACCESS(prop_value,
-			DSPP_DITHER_PROP, 0);
-		sblk->dither.version = PROP_VALUE_ACCESS(prop_value,
-			DSPP_DITHER_PROP, 1);
-		sblk->dither.len = 0;
-		set_bit(SDE_DSPP_DITHER, &dspp->features);
-	}
-
-	sblk->hist.id = SDE_DSPP_HIST;
-	if (prop_exists[DSPP_HIST_PROP]) {
-		sblk->hist.base = PROP_VALUE_ACCESS(prop_value,
-			DSPP_HIST_PROP, 0);
-		sblk->hist.version = PROP_VALUE_ACCESS(prop_value,
-			DSPP_HIST_PROP, 1);
-		sblk->hist.len = 0;
-		set_bit(SDE_DSPP_HIST, &dspp->features);
-	}
-
-	sblk->hsic.id = SDE_DSPP_HSIC;
-	if (prop_exists[DSPP_HSIC_PROP]) {
-		sblk->hsic.base = PROP_VALUE_ACCESS(prop_value,
-			DSPP_HSIC_PROP, 0);
-		sblk->hsic.version = PROP_VALUE_ACCESS(prop_value,
-			DSPP_HSIC_PROP, 1);
-		sblk->hsic.len = 0;
-		set_bit(SDE_DSPP_HSIC, &dspp->features);
-	}
-
-	sblk->memcolor.id = SDE_DSPP_MEMCOLOR;
-	if (prop_exists[DSPP_MEMCOLOR_PROP]) {
-		sblk->memcolor.base = PROP_VALUE_ACCESS(prop_value,
-			DSPP_MEMCOLOR_PROP, 0);
-		sblk->memcolor.version = PROP_VALUE_ACCESS(prop_value,
-			DSPP_MEMCOLOR_PROP, 1);
-		sblk->memcolor.len = 0;
-		set_bit(SDE_DSPP_MEMCOLOR, &dspp->features);
-	}
-
-	sblk->sixzone.id = SDE_DSPP_SIXZONE;
-	if (prop_exists[DSPP_SIXZONE_PROP]) {
-		sblk->sixzone.base = PROP_VALUE_ACCESS(prop_value,
-			DSPP_SIXZONE_PROP, 0);
-		sblk->sixzone.version = PROP_VALUE_ACCESS(prop_value,
-			DSPP_SIXZONE_PROP, 1);
-		sblk->sixzone.len = 0;
-		set_bit(SDE_DSPP_SIXZONE, &dspp->features);
-	}
-
-	sblk->vlut.id = SDE_DSPP_VLUT;
-	if (prop_exists[DSPP_VLUT_PROP]) {
-		sblk->vlut.base = PROP_VALUE_ACCESS(prop_value,
-			DSPP_VLUT_PROP, 0);
-		sblk->vlut.version = PROP_VALUE_ACCESS(prop_value,
-			DSPP_VLUT_PROP, 1);
-		sblk->sixzone.len = 0;
-		set_bit(SDE_DSPP_VLUT, &dspp->features);
-	}
-}
-
-static int sde_rot_parse_dt(struct device_node *np,
-	struct sde_mdss_cfg *sde_cfg)
-{
-	struct platform_device *pdev;
-	struct of_phandle_args phargs;
-	struct llcc_slice_desc *slice;
-	int rc = 0;
-
-	rc = of_parse_phandle_with_args(np,
-		"qcom,sde-inline-rotator", "#list-cells",
-		0, &phargs);
-
-	if (rc) {
-		/*
-		 * This is not a fatal error, system cache can be disabled
-		 * in device tree, anyways recommendation is to have it
-		 * enabled, so print an error but don't fail
-		 */
-		SDE_DEBUG("sys cache will be disabled rc:%d\n", rc);
-		rc = 0;
-		goto exit;
-	}
-
-	if (!phargs.np || !phargs.args_count) {
-		SDE_ERROR("wrong phandle args %d %d\n",
-			!phargs.np, !phargs.args_count);
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	pdev = of_find_device_by_node(phargs.np);
-	if (!pdev) {
-		SDE_ERROR("invalid sde rotator node\n");
-		goto exit;
-	}
-
-	slice = llcc_slice_getd(LLCC_ROTATOR);
-	if (IS_ERR_OR_NULL(slice))  {
-		SDE_ERROR("failed to get rotator slice!\n");
-		rc = -EINVAL;
-		goto cleanup;
-	}
-
-	sde_cfg->sc_cfg.llcc_scid = llcc_get_slice_id(slice);
-	sde_cfg->sc_cfg.llcc_slice_size = llcc_get_slice_size(slice);
-	llcc_slice_putd(slice);
-
-	sde_cfg->sc_cfg.has_sys_cache = true;
-
-	SDE_DEBUG("rotator llcc scid:%d slice_size:%zukb\n",
-		sde_cfg->sc_cfg.llcc_scid, sde_cfg->sc_cfg.llcc_slice_size);
-cleanup:
-	of_node_put(phargs.np);
-exit:
-	return rc;
-}
-
-static int sde_dspp_top_parse_dt(struct device_node *np,
-		struct sde_mdss_cfg *sde_cfg)
-{
-	int rc, prop_count[DSPP_TOP_PROP_MAX];
-	bool prop_exists[DSPP_TOP_PROP_MAX];
-	struct sde_prop_value *prop_value = NULL;
-	u32 off_count;
-
-	if (!sde_cfg) {
-		SDE_ERROR("invalid argument\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	prop_value = kzalloc(DSPP_TOP_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, dspp_top_prop, ARRAY_SIZE(dspp_top_prop),
-		prop_count, &off_count);
-	if (rc)
-		goto end;
-
-	rc = _read_dt_entry(np, dspp_top_prop, ARRAY_SIZE(dspp_top_prop),
-		prop_count, prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	if (off_count != 1) {
-		SDE_ERROR("invalid dspp_top off_count:%d\n", off_count);
-		rc = -EINVAL;
-		goto end;
-	}
-
-	sde_cfg->dspp_top.base =
-		PROP_VALUE_ACCESS(prop_value, DSPP_TOP_OFF, 0);
-	sde_cfg->dspp_top.len =
-		PROP_VALUE_ACCESS(prop_value, DSPP_TOP_SIZE, 0);
-	snprintf(sde_cfg->dspp_top.name, SDE_HW_BLK_NAME_LEN, "dspp_top");
-
-end:
-	kfree(prop_value);
-	return rc;
-}
-
-static int sde_dspp_parse_dt(struct device_node *np,
-						struct sde_mdss_cfg *sde_cfg)
-{
-	int rc, prop_count[DSPP_PROP_MAX], i;
-	int ad_prop_count[AD_PROP_MAX];
-	int ltm_prop_count[LTM_PROP_MAX];
-	bool prop_exists[DSPP_PROP_MAX], ad_prop_exists[AD_PROP_MAX];
-	bool ltm_prop_exists[LTM_PROP_MAX];
-	bool blocks_prop_exists[DSPP_BLOCKS_PROP_MAX];
-	struct sde_prop_value *ad_prop_value = NULL, *ltm_prop_value = NULL;
-	int blocks_prop_count[DSPP_BLOCKS_PROP_MAX];
-	struct sde_prop_value *prop_value = NULL, *blocks_prop_value = NULL;
-	u32 off_count, ad_off_count, ltm_off_count;
-	struct sde_dspp_cfg *dspp;
-	struct sde_dspp_sub_blks *sblk;
-	struct device_node *snp = NULL;
-
-	if (!sde_cfg) {
-		SDE_ERROR("invalid argument\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	prop_value = kzalloc(DSPP_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, dspp_prop, ARRAY_SIZE(dspp_prop),
-		prop_count, &off_count);
-	if (rc)
-		goto end;
-
-	sde_cfg->dspp_count = off_count;
-
-	rc = _read_dt_entry(np, dspp_prop, ARRAY_SIZE(dspp_prop), prop_count,
-		prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	/* Parse AD dtsi entries */
-	ad_prop_value = kcalloc(AD_PROP_MAX,
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!ad_prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-	rc = _validate_dt_entry(np, ad_prop, ARRAY_SIZE(ad_prop),
-		ad_prop_count, &ad_off_count);
-	if (rc)
-		goto end;
-	rc = _read_dt_entry(np, ad_prop, ARRAY_SIZE(ad_prop), ad_prop_count,
-		ad_prop_exists, ad_prop_value);
-	if (rc)
-		goto end;
-
-	/* Parse LTM dtsi entries */
-	ltm_prop_value = kcalloc(LTM_PROP_MAX,
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!ltm_prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-	rc = _validate_dt_entry(np, ltm_prop, ARRAY_SIZE(ltm_prop),
-		ltm_prop_count, &ltm_off_count);
-	if (rc)
-		goto end;
-	rc = _read_dt_entry(np, ltm_prop, ARRAY_SIZE(ltm_prop), ltm_prop_count,
-		ltm_prop_exists, ltm_prop_value);
-	if (rc)
-		goto end;
-
-	/* get DSPP feature dt properties if they exist */
-	snp = of_get_child_by_name(np, dspp_prop[DSPP_BLOCKS].prop_name);
-	if (snp) {
-		blocks_prop_value = kzalloc(DSPP_BLOCKS_PROP_MAX *
-				MAX_SDE_HW_BLK * sizeof(struct sde_prop_value),
-				GFP_KERNEL);
-		if (!blocks_prop_value) {
-			rc = -ENOMEM;
-			goto end;
-		}
-		rc = _validate_dt_entry(snp, dspp_blocks_prop,
-			ARRAY_SIZE(dspp_blocks_prop), blocks_prop_count, NULL);
-		if (rc)
-			goto end;
-		rc = _read_dt_entry(snp, dspp_blocks_prop,
-			ARRAY_SIZE(dspp_blocks_prop), blocks_prop_count,
-			blocks_prop_exists, blocks_prop_value);
-		if (rc)
-			goto end;
-	}
-
-	for (i = 0; i < off_count; i++) {
-		dspp = sde_cfg->dspp + i;
-		dspp->base = PROP_VALUE_ACCESS(prop_value, DSPP_OFF, i);
-		dspp->len = PROP_VALUE_ACCESS(prop_value, DSPP_SIZE, 0);
-		dspp->id = DSPP_0 + i;
-		snprintf(dspp->name, SDE_HW_BLK_NAME_LEN, "dspp_%u",
-				dspp->id - DSPP_0);
-
-		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
-		if (!sblk) {
-			rc = -ENOMEM;
-			/* catalog deinit will release the allocated blocks */
-			goto end;
-		}
-		dspp->sblk = sblk;
-
-		if (blocks_prop_value)
-			_sde_dspp_setup_blocks(sde_cfg, dspp, sblk,
-					blocks_prop_exists, blocks_prop_value);
-
-		sblk->ad.id = SDE_DSPP_AD;
-		sde_cfg->ad_count = ad_off_count;
-		if (ad_prop_value && (i < ad_off_count) &&
-		    ad_prop_exists[AD_OFF]) {
-			sblk->ad.base = PROP_VALUE_ACCESS(ad_prop_value,
-				AD_OFF, i);
-			sblk->ad.version = PROP_VALUE_ACCESS(ad_prop_value,
-				AD_VERSION, 0);
-			set_bit(SDE_DSPP_AD, &dspp->features);
-		}
-
-		sblk->ltm.id = SDE_DSPP_LTM;
-		sde_cfg->ltm_count = ltm_off_count;
-		if (ltm_prop_value && (i < ltm_off_count) &&
-		    ltm_prop_exists[LTM_OFF]) {
-			sblk->ltm.base = PROP_VALUE_ACCESS(ltm_prop_value,
-				LTM_OFF, i);
-			sblk->ltm.version = PROP_VALUE_ACCESS(ltm_prop_value,
-				LTM_VERSION, 0);
-			set_bit(SDE_DSPP_LTM, &dspp->features);
-		}
-
-	}
-
-end:
-	kfree(prop_value);
-	kfree(ad_prop_value);
-	kfree(ltm_prop_value);
-	kfree(blocks_prop_value);
-	return rc;
-}
-
-static int sde_ds_parse_dt(struct device_node *np,
-			struct sde_mdss_cfg *sde_cfg)
-{
-	int rc, prop_count[DS_PROP_MAX], top_prop_count[DS_TOP_PROP_MAX], i;
-	struct sde_prop_value *prop_value = NULL, *top_prop_value = NULL;
-	bool prop_exists[DS_PROP_MAX], top_prop_exists[DS_TOP_PROP_MAX];
-	u32 off_count = 0, top_off_count = 0;
-	struct sde_ds_cfg *ds;
-	struct sde_ds_top_cfg *ds_top = NULL;
-
-	if (!sde_cfg) {
-		SDE_ERROR("invalid argument\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	if (!sde_cfg->mdp[0].has_dest_scaler) {
-		SDE_DEBUG("dest scaler feature not supported\n");
-		rc = 0;
-		goto end;
-	}
-
-	/* Parse the dest scaler top register offset and capabilities */
-	top_prop_value = kzalloc(DS_TOP_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!top_prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, ds_top_prop,
-				ARRAY_SIZE(ds_top_prop),
-				top_prop_count, &top_off_count);
-	if (rc)
-		goto end;
-
-	rc = _read_dt_entry(np, ds_top_prop,
-			ARRAY_SIZE(ds_top_prop), top_prop_count,
-			top_prop_exists, top_prop_value);
-	if (rc)
-		goto end;
-
-	/* Parse the offset of each dest scaler block */
-	prop_value = kcalloc(DS_PROP_MAX,
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, ds_prop, ARRAY_SIZE(ds_prop), prop_count,
-		&off_count);
-	if (rc)
-		goto end;
-
-	sde_cfg->ds_count = off_count;
-
-	rc = _read_dt_entry(np, ds_prop, ARRAY_SIZE(ds_prop), prop_count,
-		prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	if (!off_count)
-		goto end;
-
-	ds_top = kzalloc(sizeof(struct sde_ds_top_cfg), GFP_KERNEL);
-	if (!ds_top) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	ds_top->id = DS_TOP;
-	snprintf(ds_top->name, SDE_HW_BLK_NAME_LEN, "ds_top_%u",
-		ds_top->id - DS_TOP);
-	ds_top->base = PROP_VALUE_ACCESS(top_prop_value, DS_TOP_OFF, 0);
-	ds_top->len = PROP_VALUE_ACCESS(top_prop_value, DS_TOP_LEN, 0);
-	ds_top->maxupscale = MAX_UPSCALE_RATIO;
-
-	ds_top->maxinputwidth = PROP_VALUE_ACCESS(top_prop_value,
-			DS_TOP_INPUT_LINEWIDTH, 0);
-	if (!top_prop_exists[DS_TOP_INPUT_LINEWIDTH])
-		ds_top->maxinputwidth = DEFAULT_SDE_LINE_WIDTH;
-
-	ds_top->maxoutputwidth = PROP_VALUE_ACCESS(top_prop_value,
-			DS_TOP_OUTPUT_LINEWIDTH, 0);
-	if (!top_prop_exists[DS_TOP_OUTPUT_LINEWIDTH])
-		ds_top->maxoutputwidth = DEFAULT_SDE_OUTPUT_LINE_WIDTH;
-
-	for (i = 0; i < off_count; i++) {
-		ds = sde_cfg->ds + i;
-		ds->top = ds_top;
-		ds->base = PROP_VALUE_ACCESS(prop_value, DS_OFF, i);
-		ds->id = DS_0 + i;
-		ds->len = PROP_VALUE_ACCESS(prop_value, DS_LEN, 0);
-		snprintf(ds->name, SDE_HW_BLK_NAME_LEN, "ds_%u",
-			ds->id - DS_0);
-
-		if (!prop_exists[DS_LEN])
-			ds->len = DEFAULT_SDE_HW_BLOCK_LEN;
-
-		if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3)
-			set_bit(SDE_SSPP_SCALER_QSEED3, &ds->features);
-		else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3LITE)
-			set_bit(SDE_SSPP_SCALER_QSEED3LITE, &ds->features);
-
-	}
-
-end:
-	kfree(top_prop_value);
-	kfree(prop_value);
-	return rc;
-};
-
-static int sde_dsc_parse_dt(struct device_node *np,
-			struct sde_mdss_cfg *sde_cfg)
-{
-	int rc, prop_count[MAX_BLOCKS], i;
-	struct sde_prop_value *prop_value = NULL;
-	bool prop_exists[DSC_PROP_MAX];
-	u32 off_count;
-	struct sde_dsc_cfg *dsc;
-
-	if (!sde_cfg) {
-		SDE_ERROR("invalid argument\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	prop_value = kzalloc(DSC_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, dsc_prop, ARRAY_SIZE(dsc_prop), prop_count,
-		&off_count);
-	if (rc)
-		goto end;
-
-	sde_cfg->dsc_count = off_count;
-
-	rc = _read_dt_entry(np, dsc_prop, ARRAY_SIZE(dsc_prop), prop_count,
-		prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	for (i = 0; i < off_count; i++) {
-		dsc = sde_cfg->dsc + i;
-		dsc->base = PROP_VALUE_ACCESS(prop_value, DSC_OFF, i);
-		dsc->id = DSC_0 + i;
-		dsc->len = PROP_VALUE_ACCESS(prop_value, DSC_LEN, 0);
-		snprintf(dsc->name, SDE_HW_BLK_NAME_LEN, "dsc_%u",
-				dsc->id - DSC_0);
-
-		if (!prop_exists[DSC_LEN])
-			dsc->len = DEFAULT_SDE_HW_BLOCK_LEN;
-
-		if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev))
-			set_bit(SDE_DSC_OUTPUT_CTRL, &dsc->features);
-	}
-
-end:
-	kfree(prop_value);
-	return rc;
-};
-
-static int sde_cdm_parse_dt(struct device_node *np,
-				struct sde_mdss_cfg *sde_cfg)
-{
-	int rc, prop_count[HW_PROP_MAX], i;
-	struct sde_prop_value *prop_value = NULL;
-	bool prop_exists[HW_PROP_MAX];
-	u32 off_count;
-	struct sde_cdm_cfg *cdm;
-
-	if (!sde_cfg) {
-		SDE_ERROR("invalid argument\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	prop_value = kzalloc(HW_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count,
-		&off_count);
-	if (rc)
-		goto end;
-
-	sde_cfg->cdm_count = off_count;
-
-	rc = _read_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count,
-		prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	for (i = 0; i < off_count; i++) {
-		cdm = sde_cfg->cdm + i;
-		cdm->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
-		cdm->id = CDM_0 + i;
-		snprintf(cdm->name, SDE_HW_BLK_NAME_LEN, "cdm_%u",
-				cdm->id - CDM_0);
-		cdm->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
-
-		/* intf3 and wb2 for cdm block */
-		cdm->wb_connect = sde_cfg->wb_count ? BIT(WB_2) : BIT(31);
-		cdm->intf_connect = sde_cfg->intf_count ? BIT(INTF_3) : BIT(31);
-
-		if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev))
-			set_bit(SDE_CDM_INPUT_CTRL, &cdm->features);
-	}
-
-end:
-	kfree(prop_value);
-	return rc;
-}
-
-static int sde_uidle_parse_dt(struct device_node *np,
-				struct sde_mdss_cfg *sde_cfg)
-{
-	int rc = 0, prop_count[UIDLE_PROP_MAX];
-	bool prop_exists[UIDLE_PROP_MAX];
-	struct sde_prop_value *prop_value = NULL;
-	u32 off_count;
-
-	if (!sde_cfg) {
-		SDE_ERROR("invalid argument\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	if (!sde_cfg->uidle_cfg.uidle_rev)
-		goto end;
-
-	prop_value = kcalloc(UIDLE_PROP_MAX,
-		sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, uidle_prop, ARRAY_SIZE(uidle_prop),
-			prop_count, &off_count);
-	if (rc)
-		goto end;
-
-	rc = _read_dt_entry(np, uidle_prop, ARRAY_SIZE(uidle_prop), prop_count,
-		prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	if (!prop_exists[UIDLE_LEN] || !prop_exists[UIDLE_OFF]) {
-		SDE_DEBUG("offset/len missing, will disable uidle:%d,%d\n",
-			prop_exists[UIDLE_LEN], prop_exists[UIDLE_OFF]);
-		rc = -EINVAL;
-		goto end;
-	}
-
-	sde_cfg->uidle_cfg.id = UIDLE;
-	sde_cfg->uidle_cfg.base =
-		PROP_VALUE_ACCESS(prop_value, UIDLE_OFF, 0);
-	sde_cfg->uidle_cfg.len =
-		PROP_VALUE_ACCESS(prop_value, UIDLE_LEN, 0);
-
-	/* validate */
-	if (!sde_cfg->uidle_cfg.base || !sde_cfg->uidle_cfg.len) {
-		SDE_ERROR("invalid reg/len [%d, %d], will disable uidle\n",
-			sde_cfg->uidle_cfg.base, sde_cfg->uidle_cfg.len);
-		rc = -EINVAL;
-	}
-
-end:
-	if (rc && sde_cfg->uidle_cfg.uidle_rev) {
-		SDE_DEBUG("wrong dt entries, will disable uidle\n");
-		sde_cfg->uidle_cfg.uidle_rev = 0;
-	}
-
-	kfree(prop_value);
-	/* optional feature, so always return success */
-	return 0;
-}
-
-static int _sde_vbif_populate_ot_parsing(struct sde_vbif_cfg *vbif,
-	struct sde_prop_value *prop_value, int *prop_count)
-{
-	int j, k;
-
-	vbif->default_ot_rd_limit = PROP_VALUE_ACCESS(prop_value,
-			VBIF_DEFAULT_OT_RD_LIMIT, 0);
-	SDE_DEBUG("default_ot_rd_limit=%u\n",
-			vbif->default_ot_rd_limit);
-
-	vbif->default_ot_wr_limit = PROP_VALUE_ACCESS(prop_value,
-			VBIF_DEFAULT_OT_WR_LIMIT, 0);
-	SDE_DEBUG("default_ot_wr_limit=%u\n",
-			vbif->default_ot_wr_limit);
-
-	vbif->dynamic_ot_rd_tbl.count =
-			prop_count[VBIF_DYNAMIC_OT_RD_LIMIT] / 2;
-	SDE_DEBUG("dynamic_ot_rd_tbl.count=%u\n",
-			vbif->dynamic_ot_rd_tbl.count);
-	if (vbif->dynamic_ot_rd_tbl.count) {
-		vbif->dynamic_ot_rd_tbl.cfg = kcalloc(
-			vbif->dynamic_ot_rd_tbl.count,
-			sizeof(struct sde_vbif_dynamic_ot_cfg),
-			GFP_KERNEL);
-		if (!vbif->dynamic_ot_rd_tbl.cfg)
-			return -ENOMEM;
-	}
-
-	for (j = 0, k = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
-		vbif->dynamic_ot_rd_tbl.cfg[j].pps = (u64)
-			PROP_VALUE_ACCESS(prop_value,
-			VBIF_DYNAMIC_OT_RD_LIMIT, k++);
-		vbif->dynamic_ot_rd_tbl.cfg[j].ot_limit =
-			PROP_VALUE_ACCESS(prop_value,
-			VBIF_DYNAMIC_OT_RD_LIMIT, k++);
-		SDE_DEBUG("dynamic_ot_rd_tbl[%d].cfg=<%llu %u>\n", j,
-			vbif->dynamic_ot_rd_tbl.cfg[j].pps,
-			vbif->dynamic_ot_rd_tbl.cfg[j].ot_limit);
-	}
-
-	vbif->dynamic_ot_wr_tbl.count =
-			prop_count[VBIF_DYNAMIC_OT_WR_LIMIT] / 2;
-	SDE_DEBUG("dynamic_ot_wr_tbl.count=%u\n",
-			vbif->dynamic_ot_wr_tbl.count);
-	if (vbif->dynamic_ot_wr_tbl.count) {
-		vbif->dynamic_ot_wr_tbl.cfg = kcalloc(
-			vbif->dynamic_ot_wr_tbl.count,
-			sizeof(struct sde_vbif_dynamic_ot_cfg),
-			GFP_KERNEL);
-		if (!vbif->dynamic_ot_wr_tbl.cfg)
-			return -ENOMEM;
-	}
-
-	for (j = 0, k = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
-		vbif->dynamic_ot_wr_tbl.cfg[j].pps = (u64)
-			PROP_VALUE_ACCESS(prop_value,
-			VBIF_DYNAMIC_OT_WR_LIMIT, k++);
-		vbif->dynamic_ot_wr_tbl.cfg[j].ot_limit =
-			PROP_VALUE_ACCESS(prop_value,
-			VBIF_DYNAMIC_OT_WR_LIMIT, k++);
-		SDE_DEBUG("dynamic_ot_wr_tbl[%d].cfg=<%llu %u>\n", j,
-			vbif->dynamic_ot_wr_tbl.cfg[j].pps,
-			vbif->dynamic_ot_wr_tbl.cfg[j].ot_limit);
-	}
-
-	if (vbif->default_ot_rd_limit || vbif->default_ot_wr_limit ||
-			vbif->dynamic_ot_rd_tbl.count ||
-			vbif->dynamic_ot_wr_tbl.count)
-		set_bit(SDE_VBIF_QOS_OTLIM, &vbif->features);
-
-	return 0;
-}
-
-static int _sde_vbif_populate_qos_parsing(struct sde_mdss_cfg *sde_cfg,
-	struct sde_vbif_cfg *vbif, struct sde_prop_value *prop_value,
-	int *prop_count)
-{
-	int i, j;
-	int prop_index = VBIF_QOS_RT_REMAP;
-
-	for (i = VBIF_RT_CLIENT;
-			((i < VBIF_MAX_CLIENT) && (prop_index < VBIF_PROP_MAX));
-				i++, prop_index++) {
-		vbif->qos_tbl[i].npriority_lvl = prop_count[prop_index];
-		SDE_DEBUG("qos_tbl[%d].npriority_lvl=%u\n",
-				i, vbif->qos_tbl[i].npriority_lvl);
-
-		if (vbif->qos_tbl[i].npriority_lvl == sde_cfg->vbif_qos_nlvl) {
-			vbif->qos_tbl[i].priority_lvl = kcalloc(
-					vbif->qos_tbl[i].npriority_lvl,
-					sizeof(u32), GFP_KERNEL);
-			if (!vbif->qos_tbl[i].priority_lvl)
-				return -ENOMEM;
-		} else if (vbif->qos_tbl[i].npriority_lvl) {
-			vbif->qos_tbl[i].npriority_lvl = 0;
-			vbif->qos_tbl[i].priority_lvl = NULL;
-			SDE_ERROR("invalid qos table for client:%d, prop:%d\n",
-					i, prop_index);
-		}
-
-		for (j = 0; j < vbif->qos_tbl[i].npriority_lvl; j++) {
-			vbif->qos_tbl[i].priority_lvl[j] =
-				PROP_VALUE_ACCESS(prop_value, prop_index, j);
-			SDE_DEBUG("client:%d, prop:%d, lvl[%d]=%u\n",
-					i, prop_index, j,
-					vbif->qos_tbl[i].priority_lvl[j]);
-		}
-
-		if (vbif->qos_tbl[i].npriority_lvl)
-			set_bit(SDE_VBIF_QOS_REMAP, &vbif->features);
-	}
-
-	return 0;
-}
-
-static int _sde_vbif_populate(struct sde_mdss_cfg *sde_cfg,
-	struct sde_vbif_cfg *vbif, struct sde_prop_value *prop_value,
-	int *prop_count, u32 vbif_len, int i)
-{
-	int j, k, rc;
-
-	vbif = sde_cfg->vbif + i;
-	vbif->base = PROP_VALUE_ACCESS(prop_value, VBIF_OFF, i);
-	vbif->len = vbif_len;
-	vbif->id = VBIF_0 + PROP_VALUE_ACCESS(prop_value, VBIF_ID, i);
-	snprintf(vbif->name, SDE_HW_BLK_NAME_LEN, "vbif_%u",
-			vbif->id - VBIF_0);
-
-	SDE_DEBUG("vbif:%d\n", vbif->id - VBIF_0);
-
-	vbif->xin_halt_timeout = VBIF_XIN_HALT_TIMEOUT;
-
-	rc = _sde_vbif_populate_ot_parsing(vbif, prop_value, prop_count);
-	if (rc)
-		return rc;
-
-	rc = _sde_vbif_populate_qos_parsing(sde_cfg, vbif, prop_value,
-			prop_count);
-	if (rc)
-		return rc;
-
-	vbif->memtype_count = prop_count[VBIF_MEMTYPE_0] +
-				prop_count[VBIF_MEMTYPE_1];
-	if (vbif->memtype_count > MAX_XIN_COUNT) {
-		vbif->memtype_count = 0;
-		SDE_ERROR("too many memtype defs, ignoring entries\n");
-	}
-	for (j = 0, k = 0; j < prop_count[VBIF_MEMTYPE_0]; j++)
-		vbif->memtype[k++] = PROP_VALUE_ACCESS(
-				prop_value, VBIF_MEMTYPE_0, j);
-	for (j = 0; j < prop_count[VBIF_MEMTYPE_1]; j++)
-		vbif->memtype[k++] = PROP_VALUE_ACCESS(
-				prop_value, VBIF_MEMTYPE_1, j);
-
-	return 0;
-}
-
-static int sde_vbif_parse_dt(struct device_node *np,
-				struct sde_mdss_cfg *sde_cfg)
-{
-	int rc, prop_count[VBIF_PROP_MAX], i;
-	struct sde_prop_value *prop_value = NULL;
-	bool prop_exists[VBIF_PROP_MAX];
-	u32 off_count, vbif_len;
-	struct sde_vbif_cfg *vbif;
-
-	if (!sde_cfg) {
-		SDE_ERROR("invalid argument\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	prop_value = kzalloc(VBIF_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop),
-			prop_count, &off_count);
-	if (rc)
-		goto end;
-
-	rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_RD_LIMIT], 1,
-			&prop_count[VBIF_DYNAMIC_OT_RD_LIMIT], NULL);
-	if (rc)
-		goto end;
-
-	rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_WR_LIMIT], 1,
-			&prop_count[VBIF_DYNAMIC_OT_WR_LIMIT], NULL);
-	if (rc)
-		goto end;
-
-	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_0], 1,
-			&prop_count[VBIF_MEMTYPE_0], NULL);
-	if (rc)
-		goto end;
-
-	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_1], 1,
-			&prop_count[VBIF_MEMTYPE_1], NULL);
-	if (rc)
-		goto end;
-
-	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_RT_REMAP], 1,
-			&prop_count[VBIF_QOS_RT_REMAP], NULL);
-	if (rc)
-		goto end;
-
-	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_NRT_REMAP], 1,
-			&prop_count[VBIF_QOS_NRT_REMAP], NULL);
-	if (rc)
-		goto end;
-
-	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_CWB_REMAP], 1,
-			&prop_count[VBIF_QOS_CWB_REMAP], NULL);
-	if (rc)
-		goto end;
-
-	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_LUTDMA_REMAP], 1,
-			&prop_count[VBIF_QOS_LUTDMA_REMAP], NULL);
-	if (rc)
-		goto end;
-
-	sde_cfg->vbif_count = off_count;
-
-	rc = _read_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop), prop_count,
-		prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	vbif_len = PROP_VALUE_ACCESS(prop_value, VBIF_LEN, 0);
-	if (!prop_exists[VBIF_LEN])
-		vbif_len = DEFAULT_SDE_HW_BLOCK_LEN;
-
-	for (i = 0; i < off_count; i++) {
-		rc = _sde_vbif_populate(sde_cfg, vbif, prop_value,
-				prop_count, vbif_len, i);
-		if (rc)
-			goto end;
-	}
-
-end:
-	kfree(prop_value);
-	return rc;
-}
-
-static int sde_pp_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
-{
-	int rc, prop_count[PP_PROP_MAX], i;
-	struct sde_prop_value *prop_value = NULL;
-	bool prop_exists[PP_PROP_MAX];
-	u32 off_count, major_version;
-	struct sde_pingpong_cfg *pp;
-	struct sde_pingpong_sub_blks *sblk;
-
-	if (!sde_cfg) {
-		SDE_ERROR("invalid argument\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	prop_value = kzalloc(PP_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, pp_prop, ARRAY_SIZE(pp_prop), prop_count,
-		&off_count);
-	if (rc)
-		goto end;
-
-	sde_cfg->pingpong_count = off_count;
-
-	rc = _read_dt_entry(np, pp_prop, ARRAY_SIZE(pp_prop), prop_count,
-		prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	for (i = 0; i < off_count; i++) {
-		pp = sde_cfg->pingpong + i;
-		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
-		if (!sblk) {
-			rc = -ENOMEM;
-			/* catalog deinit will release the allocated blocks */
-			goto end;
-		}
-		pp->sblk = sblk;
-
-		pp->base = PROP_VALUE_ACCESS(prop_value, PP_OFF, i);
-		pp->id = PINGPONG_0 + i;
-		snprintf(pp->name, SDE_HW_BLK_NAME_LEN, "pingpong_%u",
-				pp->id - PINGPONG_0);
-		pp->len = PROP_VALUE_ACCESS(prop_value, PP_LEN, 0);
-
-		sblk->te.base = PROP_VALUE_ACCESS(prop_value, TE_OFF, i);
-		sblk->te.id = SDE_PINGPONG_TE;
-		snprintf(sblk->te.name, SDE_HW_BLK_NAME_LEN, "te_%u",
-				pp->id - PINGPONG_0);
-
-		major_version = SDE_HW_MAJOR(sde_cfg->hwversion);
-		if (major_version < SDE_HW_MAJOR(SDE_HW_VER_500))
-			set_bit(SDE_PINGPONG_TE, &pp->features);
-
-		sblk->te2.base = PROP_VALUE_ACCESS(prop_value, TE2_OFF, i);
-		if (sblk->te2.base) {
-			sblk->te2.id = SDE_PINGPONG_TE2;
-			snprintf(sblk->te2.name, SDE_HW_BLK_NAME_LEN, "te2_%u",
-					pp->id - PINGPONG_0);
-			set_bit(SDE_PINGPONG_TE2, &pp->features);
-			set_bit(SDE_PINGPONG_SPLIT, &pp->features);
-		}
-
-		if (PROP_VALUE_ACCESS(prop_value, PP_SLAVE, i))
-			set_bit(SDE_PINGPONG_SLAVE, &pp->features);
-
-		sblk->dsc.base = PROP_VALUE_ACCESS(prop_value, DSC_OFF, i);
-		if (sblk->dsc.base) {
-			sblk->dsc.id = SDE_PINGPONG_DSC;
-			snprintf(sblk->dsc.name, SDE_HW_BLK_NAME_LEN, "dsc_%u",
-					pp->id - PINGPONG_0);
-			set_bit(SDE_PINGPONG_DSC, &pp->features);
-		}
-
-		sblk->dither.base = PROP_VALUE_ACCESS(prop_value, DITHER_OFF,
-							i);
-		if (sblk->dither.base) {
-			sblk->dither.id = SDE_PINGPONG_DITHER;
-			snprintf(sblk->dither.name, SDE_HW_BLK_NAME_LEN,
-					"dither_%u", pp->id);
-			set_bit(SDE_PINGPONG_DITHER, &pp->features);
-		}
-		sblk->dither.len = PROP_VALUE_ACCESS(prop_value, DITHER_LEN, 0);
-		sblk->dither.version = PROP_VALUE_ACCESS(prop_value, DITHER_VER,
-								0);
-
-		if (prop_exists[PP_MERGE_3D_ID]) {
-			set_bit(SDE_PINGPONG_MERGE_3D, &pp->features);
-			pp->merge_3d_id = PROP_VALUE_ACCESS(prop_value,
-					PP_MERGE_3D_ID, i) + 1;
-		}
-	}
-
-end:
-	kfree(prop_value);
-	return rc;
-}
-
-static int _sde_parse_prop_check(struct sde_mdss_cfg *cfg,
-	bool prop_exists[SDE_PROP_MAX], struct sde_prop_value *prop_value)
-{
-	cfg->max_sspp_linewidth = PROP_VALUE_ACCESS(prop_value,
-			SSPP_LINEWIDTH, 0);
-	if (!prop_exists[SSPP_LINEWIDTH])
-		cfg->max_sspp_linewidth = DEFAULT_SDE_LINE_WIDTH;
-
-	cfg->max_mixer_width = PROP_VALUE_ACCESS(prop_value,
-			MIXER_LINEWIDTH, 0);
-	if (!prop_exists[MIXER_LINEWIDTH])
-		cfg->max_mixer_width = DEFAULT_SDE_LINE_WIDTH;
-
-	cfg->max_mixer_blendstages = PROP_VALUE_ACCESS(prop_value,
-			MIXER_BLEND, 0);
-	if (!prop_exists[MIXER_BLEND])
-		cfg->max_mixer_blendstages = DEFAULT_SDE_MIXER_BLENDSTAGES;
-
-	cfg->max_wb_linewidth = PROP_VALUE_ACCESS(prop_value, WB_LINEWIDTH, 0);
-	if (!prop_exists[WB_LINEWIDTH])
-		cfg->max_wb_linewidth = DEFAULT_SDE_LINE_WIDTH;
-
-	cfg->mdp[0].highest_bank_bit = PROP_VALUE_ACCESS(prop_value,
-			BANK_BIT, 0);
-	if (!prop_exists[BANK_BIT])
-		cfg->mdp[0].highest_bank_bit = DEFAULT_SDE_HIGHEST_BANK_BIT;
-
-	if (of_fdt_get_ddrtype() == LP_DDR4_TYPE)
-		cfg->mdp[0].highest_bank_bit = 0x02;
-
-	cfg->ubwc_version = SDE_HW_UBWC_VER(PROP_VALUE_ACCESS(prop_value,
-			UBWC_VERSION, 0));
-	if (!prop_exists[UBWC_VERSION])
-		cfg->ubwc_version = DEFAULT_SDE_UBWC_VERSION;
-
-	cfg->macrotile_mode = PROP_VALUE_ACCESS(prop_value, MACROTILE_MODE, 0);
-	if (!prop_exists[MACROTILE_MODE])
-		cfg->macrotile_mode = DEFAULT_SDE_UBWC_MACROTILE_MODE;
-
-	cfg->ubwc_bw_calc_version =
-		PROP_VALUE_ACCESS(prop_value, UBWC_BW_CALC_VERSION, 0);
-
-	cfg->mdp[0].ubwc_static = PROP_VALUE_ACCESS(prop_value, UBWC_STATIC, 0);
-	if (!prop_exists[UBWC_STATIC])
-		cfg->mdp[0].ubwc_static = DEFAULT_SDE_UBWC_STATIC;
-
-	cfg->mdp[0].ubwc_swizzle = PROP_VALUE_ACCESS(prop_value,
-			UBWC_SWIZZLE, 0);
-	if (!prop_exists[UBWC_SWIZZLE])
-		cfg->mdp[0].ubwc_swizzle = DEFAULT_SDE_UBWC_SWIZZLE;
-
-	cfg->mdp[0].has_dest_scaler =
-		PROP_VALUE_ACCESS(prop_value, DEST_SCALER, 0);
-
-	cfg->mdp[0].smart_panel_align_mode =
-		PROP_VALUE_ACCESS(prop_value, SMART_PANEL_ALIGN_MODE, 0);
-	return 0;
-}
-
-static int sde_top_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
-{
-	int rc, i, dma_rc, len, prop_count[SDE_PROP_MAX];
-	struct sde_prop_value *prop_value = NULL;
-	bool prop_exists[SDE_PROP_MAX];
-	const char *type;
-	u32 major_version;
-
-	if (!cfg) {
-		SDE_ERROR("invalid argument\n");
-		return -EINVAL;
-	}
-
-	prop_value = kzalloc(SDE_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value)
-		return -ENOMEM;
-
-	rc = _validate_dt_entry(np, sde_prop, ARRAY_SIZE(sde_prop), prop_count,
-		&len);
-	if (rc)
-		goto end;
-
-	rc = _validate_dt_entry(np, &sde_prop[SEC_SID_MASK], 1,
-				&prop_count[SEC_SID_MASK], NULL);
-	if (rc)
-		goto end;
-
-	rc = _read_dt_entry(np, sde_prop, ARRAY_SIZE(sde_prop), prop_count,
-		prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	cfg->mdss_count = 1;
-	cfg->mdss[0].base = MDSS_BASE_OFFSET;
-	cfg->mdss[0].id = MDP_TOP;
-	snprintf(cfg->mdss[0].name, SDE_HW_BLK_NAME_LEN, "mdss_%u",
-			cfg->mdss[0].id - MDP_TOP);
-
-	cfg->mdp_count = 1;
-	cfg->mdp[0].id = MDP_TOP;
-	snprintf(cfg->mdp[0].name, SDE_HW_BLK_NAME_LEN, "top_%u",
-		cfg->mdp[0].id - MDP_TOP);
-	cfg->mdp[0].base = PROP_VALUE_ACCESS(prop_value, SDE_OFF, 0);
-	cfg->mdp[0].len = PROP_VALUE_ACCESS(prop_value, SDE_LEN, 0);
-	if (!prop_exists[SDE_LEN])
-		cfg->mdp[0].len = DEFAULT_SDE_HW_BLOCK_LEN;
-
-	rc = _sde_parse_prop_check(cfg, prop_exists, prop_value);
-	if (rc)
-		SDE_ERROR("sde parse property check failed\n");
-
-	major_version = SDE_HW_MAJOR(cfg->hwversion);
-	if (major_version < SDE_HW_MAJOR(SDE_HW_VER_500))
-		set_bit(SDE_MDP_VSYNC_SEL, &cfg->mdp[0].features);
-
-	if (prop_exists[SEC_SID_MASK]) {
-		cfg->sec_sid_mask_count = prop_count[SEC_SID_MASK];
-		for (i = 0; i < cfg->sec_sid_mask_count; i++)
-			cfg->sec_sid_mask[i] =
-				PROP_VALUE_ACCESS(prop_value, SEC_SID_MASK, i);
-	}
-
-	rc = of_property_read_string(np, sde_prop[QSEED_TYPE].prop_name, &type);
-	if (!rc && !strcmp(type, "qseedv3")) {
-		cfg->qseed_type = SDE_SSPP_SCALER_QSEED3;
-	} else if (!rc && !strcmp(type, "qseedv3lite")) {
-		cfg->qseed_type = SDE_SSPP_SCALER_QSEED3LITE;
-	} else if (!rc && !strcmp(type, "qseedv2")) {
-		cfg->qseed_type = SDE_SSPP_SCALER_QSEED2;
-	} else if (rc) {
-		SDE_DEBUG("invalid QSEED configuration\n");
-		rc = 0;
-	}
-
-	rc = of_property_read_string(np, sde_prop[CSC_TYPE].prop_name, &type);
-	if (!rc && !strcmp(type, "csc")) {
-		cfg->csc_type = SDE_SSPP_CSC;
-	} else if (!rc && !strcmp(type, "csc-10bit")) {
-		cfg->csc_type = SDE_SSPP_CSC_10BIT;
-	} else if (rc) {
-		SDE_DEBUG("invalid csc configuration\n");
-		rc = 0;
-	}
-
-	/*
-	 * Current SDE support only Smart DMA 2.0-2.5.
-	 * No support for Smart DMA 1.0 yet.
-	 */
-	cfg->smart_dma_rev = 0;
-	dma_rc = of_property_read_string(np, sde_prop[SMART_DMA_REV].prop_name,
-			&type);
-	if (dma_rc) {
-		SDE_DEBUG("invalid SMART_DMA_REV node in device tree: %d\n",
-				dma_rc);
-	} else if (!strcmp(type, "smart_dma_v2p5")) {
-		cfg->smart_dma_rev = SDE_SSPP_SMART_DMA_V2p5;
-	} else if (!strcmp(type, "smart_dma_v2")) {
-		cfg->smart_dma_rev = SDE_SSPP_SMART_DMA_V2;
-	} else if (!strcmp(type, "smart_dma_v1")) {
-		SDE_ERROR("smart dma 1.0 is not supported in SDE\n");
-	} else {
-		SDE_DEBUG("unknown smart dma version\n");
-	}
-
-	cfg->has_src_split = PROP_VALUE_ACCESS(prop_value, SRC_SPLIT, 0);
-	cfg->has_dim_layer = PROP_VALUE_ACCESS(prop_value, DIM_LAYER, 0);
-	cfg->has_idle_pc = PROP_VALUE_ACCESS(prop_value, IDLE_PC, 0);
-	cfg->pipe_order_type = PROP_VALUE_ACCESS(prop_value,
-		PIPE_ORDER_VERSION, 0);
-end:
-	kfree(prop_value);
-	return rc;
-}
-
-static int sde_parse_reg_dma_dt(struct device_node *np,
-		struct sde_mdss_cfg *sde_cfg)
-{
-	int rc = 0, i, prop_count[REG_DMA_PROP_MAX];
-	struct sde_prop_value *prop_value = NULL;
-	u32 off_count;
-	bool prop_exists[REG_DMA_PROP_MAX];
-
-	prop_value = kcalloc(REG_DMA_PROP_MAX,
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _validate_dt_entry(np, reg_dma_prop, ARRAY_SIZE(reg_dma_prop),
-			prop_count, &off_count);
-	if (rc || !off_count)
-		goto end;
-
-	rc = _read_dt_entry(np, reg_dma_prop, ARRAY_SIZE(reg_dma_prop),
-			prop_count, prop_exists, prop_value);
-	if (rc)
-		goto end;
-
-	sde_cfg->reg_dma_count = off_count;
-	sde_cfg->dma_cfg.base = PROP_VALUE_ACCESS(prop_value, REG_DMA_OFF, 0);
-	sde_cfg->dma_cfg.version = PROP_VALUE_ACCESS(prop_value,
-						REG_DMA_VERSION, 0);
-	sde_cfg->dma_cfg.trigger_sel_off = PROP_VALUE_ACCESS(prop_value,
-						REG_DMA_TRIGGER_OFF, 0);
-	sde_cfg->dma_cfg.broadcast_disabled = PROP_VALUE_ACCESS(prop_value,
-						REG_DMA_BROADCAST_DISABLED, 0);
-	sde_cfg->dma_cfg.xin_id = PROP_VALUE_ACCESS(prop_value,
-						REG_DMA_XIN_ID, 0);
-	sde_cfg->dma_cfg.clk_ctrl = SDE_CLK_CTRL_LUTDMA;
-	sde_cfg->dma_cfg.vbif_idx = VBIF_RT;
-
-	for (i = 0; i < sde_cfg->mdp_count; i++) {
-		sde_cfg->mdp[i].clk_ctrls[sde_cfg->dma_cfg.clk_ctrl].reg_off =
-			PROP_BITVALUE_ACCESS(prop_value,
-					REG_DMA_CLK_CTRL, 0, 0);
-		sde_cfg->mdp[i].clk_ctrls[sde_cfg->dma_cfg.clk_ctrl].bit_off =
-			PROP_BITVALUE_ACCESS(prop_value,
-					REG_DMA_CLK_CTRL, 0, 1);
-	}
-
-end:
-	kfree(prop_value);
-	/* reg dma is optional feature hence return 0 */
-	return 0;
-}
-
-static int _sde_perf_parse_dt_validate(struct device_node *np, int *prop_count)
-{
-	int rc, len;
-
-	rc = _validate_dt_entry(np, sde_perf_prop, ARRAY_SIZE(sde_perf_prop),
-			prop_count, &len);
-	if (rc)
-		return rc;
-
-	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_DANGER_LUT], 1,
-			&prop_count[PERF_DANGER_LUT], NULL);
-	if (rc)
-		return rc;
-
-	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_SAFE_LUT_LINEAR], 1,
-			&prop_count[PERF_SAFE_LUT_LINEAR], NULL);
-	if (rc)
-		return rc;
-
-	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_SAFE_LUT_MACROTILE], 1,
-			&prop_count[PERF_SAFE_LUT_MACROTILE], NULL);
-	if (rc)
-		return rc;
-
-	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_SAFE_LUT_NRT], 1,
-			&prop_count[PERF_SAFE_LUT_NRT], NULL);
-	if (rc)
-		return rc;
-
-	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_SAFE_LUT_CWB], 1,
-			&prop_count[PERF_SAFE_LUT_CWB], NULL);
-	if (rc)
-		return rc;
-
-	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_QOS_LUT_LINEAR], 1,
-			&prop_count[PERF_QOS_LUT_LINEAR], NULL);
-	if (rc)
-		return rc;
-
-	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_QOS_LUT_MACROTILE], 1,
-			&prop_count[PERF_QOS_LUT_MACROTILE], NULL);
-	if (rc)
-		return rc;
-
-	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_QOS_LUT_NRT], 1,
-			&prop_count[PERF_QOS_LUT_NRT], NULL);
-	if (rc)
-		return rc;
-
-	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_QOS_LUT_CWB], 1,
-			&prop_count[PERF_QOS_LUT_CWB], NULL);
-	if (rc)
-		return rc;
-
-	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_CDP_SETTING], 1,
-			&prop_count[PERF_CDP_SETTING], NULL);
-	if (rc)
-		return rc;
-
-	rc = _validate_dt_entry(np,
-			&sde_perf_prop[PERF_QOS_LUT_MACROTILE_QSEED], 1,
-			&prop_count[PERF_QOS_LUT_MACROTILE_QSEED], NULL);
-	if (rc)
-		return rc;
-
-	rc = _validate_dt_entry(np,
-			&sde_perf_prop[PERF_SAFE_LUT_MACROTILE_QSEED], 1,
-			&prop_count[PERF_SAFE_LUT_MACROTILE_QSEED], NULL);
-
-	return rc;
-}
-
-static int _sde_perf_parse_dt_cfg_qos(struct sde_mdss_cfg *cfg, int *prop_count,
-	struct sde_prop_value *prop_value, bool *prop_exists)
-{
-	int j, k;
-
-	if (prop_exists[PERF_DANGER_LUT] && prop_count[PERF_DANGER_LUT] <=
-			SDE_QOS_LUT_USAGE_MAX) {
-		for (j = 0; j < prop_count[PERF_DANGER_LUT]; j++) {
-			cfg->perf.danger_lut_tbl[j] =
-					PROP_VALUE_ACCESS(prop_value,
-						PERF_DANGER_LUT, j);
-			SDE_DEBUG("danger usage:%d lut:0x%x\n",
-					j, cfg->perf.danger_lut_tbl[j]);
-		}
-	}
-
-	for (j = 0; j < SDE_QOS_LUT_USAGE_MAX; j++) {
-		static const u32 safe_key[SDE_QOS_LUT_USAGE_MAX] = {
-			[SDE_QOS_LUT_USAGE_LINEAR] =
-					PERF_SAFE_LUT_LINEAR,
-			[SDE_QOS_LUT_USAGE_MACROTILE] =
-					PERF_SAFE_LUT_MACROTILE,
-			[SDE_QOS_LUT_USAGE_NRT] =
-					PERF_SAFE_LUT_NRT,
-			[SDE_QOS_LUT_USAGE_CWB] =
-					PERF_SAFE_LUT_CWB,
-			[SDE_QOS_LUT_USAGE_MACROTILE_QSEED] =
-					PERF_SAFE_LUT_MACROTILE_QSEED,
-		};
-		const u32 entry_size = 2;
-		int m, count;
-		int key = safe_key[j];
-
-		if (!prop_exists[key])
-			continue;
-
-		count = prop_count[key] / entry_size;
-
-		cfg->perf.sfe_lut_tbl[j].entries = kcalloc(count,
-			sizeof(struct sde_qos_lut_entry), GFP_KERNEL);
-		if (!cfg->perf.sfe_lut_tbl[j].entries)
-			return -ENOMEM;
-
-		for (k = 0, m = 0; k < count; k++, m += entry_size) {
-			u64 lut_lo;
-
-			cfg->perf.sfe_lut_tbl[j].entries[k].fl =
-					PROP_VALUE_ACCESS(prop_value, key, m);
-			lut_lo = PROP_VALUE_ACCESS(prop_value, key, m + 1);
-			cfg->perf.sfe_lut_tbl[j].entries[k].lut = lut_lo;
-			SDE_DEBUG("safe usage:%d.%d fl:%d lut:0x%llx\n",
-				j, k,
-				cfg->perf.sfe_lut_tbl[j].entries[k].fl,
-				cfg->perf.sfe_lut_tbl[j].entries[k].lut);
-		}
-		cfg->perf.sfe_lut_tbl[j].nentry = count;
-	}
-
-	for (j = 0; j < SDE_QOS_LUT_USAGE_MAX; j++) {
-		static const u32 prop_key[SDE_QOS_LUT_USAGE_MAX] = {
-			[SDE_QOS_LUT_USAGE_LINEAR] =
-					PERF_QOS_LUT_LINEAR,
-			[SDE_QOS_LUT_USAGE_MACROTILE] =
-					PERF_QOS_LUT_MACROTILE,
-			[SDE_QOS_LUT_USAGE_NRT] =
-					PERF_QOS_LUT_NRT,
-			[SDE_QOS_LUT_USAGE_CWB] =
-					PERF_QOS_LUT_CWB,
-			[SDE_QOS_LUT_USAGE_MACROTILE_QSEED] =
-					PERF_QOS_LUT_MACROTILE_QSEED,
-		};
-		const u32 entry_size = 3;
-		int m, count;
-		int key = prop_key[j];
-
-		if (!prop_exists[key])
-			continue;
-
-		count = prop_count[key] / entry_size;
-
-		cfg->perf.qos_lut_tbl[j].entries = kcalloc(count,
-			sizeof(struct sde_qos_lut_entry), GFP_KERNEL);
-		if (!cfg->perf.qos_lut_tbl[j].entries)
-			return -ENOMEM;
-
-		for (k = 0, m = 0; k < count; k++, m += entry_size) {
-			u64 lut_hi, lut_lo;
-
-			cfg->perf.qos_lut_tbl[j].entries[k].fl =
-					PROP_VALUE_ACCESS(prop_value, key, m);
-			lut_hi = PROP_VALUE_ACCESS(prop_value, key, m + 1);
-			lut_lo = PROP_VALUE_ACCESS(prop_value, key, m + 2);
-			cfg->perf.qos_lut_tbl[j].entries[k].lut =
-					(lut_hi << 32) | lut_lo;
-			SDE_DEBUG("usage:%d.%d fl:%d lut:0x%llx\n",
-				j, k,
-				cfg->perf.qos_lut_tbl[j].entries[k].fl,
-				cfg->perf.qos_lut_tbl[j].entries[k].lut);
-		}
-		cfg->perf.qos_lut_tbl[j].nentry = count;
-	}
-
-	return 0;
-}
-
-static void _sde_perf_parse_dt_cfg_populate(struct sde_mdss_cfg *cfg,
-		int *prop_count,
-		struct sde_prop_value *prop_value,
-		bool *prop_exists)
-{
-	cfg->perf.max_bw_low =
-			prop_exists[PERF_MAX_BW_LOW] ?
-			PROP_VALUE_ACCESS(prop_value, PERF_MAX_BW_LOW, 0) :
-			DEFAULT_MAX_BW_LOW;
-	cfg->perf.max_bw_high =
-			prop_exists[PERF_MAX_BW_HIGH] ?
-			PROP_VALUE_ACCESS(prop_value, PERF_MAX_BW_HIGH, 0) :
-			DEFAULT_MAX_BW_HIGH;
-	cfg->perf.min_core_ib =
-			prop_exists[PERF_MIN_CORE_IB] ?
-			PROP_VALUE_ACCESS(prop_value, PERF_MIN_CORE_IB, 0) :
-			DEFAULT_MAX_BW_LOW;
-	cfg->perf.min_llcc_ib =
-			prop_exists[PERF_MIN_LLCC_IB] ?
-			PROP_VALUE_ACCESS(prop_value, PERF_MIN_LLCC_IB, 0) :
-			DEFAULT_MAX_BW_LOW;
-	cfg->perf.min_dram_ib =
-			prop_exists[PERF_MIN_DRAM_IB] ?
-			PROP_VALUE_ACCESS(prop_value, PERF_MIN_DRAM_IB, 0) :
-			DEFAULT_MAX_BW_LOW;
-
-	cfg->perf.undersized_prefill_lines =
-			prop_exists[PERF_UNDERSIZED_PREFILL_LINES] ?
-			PROP_VALUE_ACCESS(prop_value,
-					PERF_UNDERSIZED_PREFILL_LINES, 0) :
-			DEFAULT_UNDERSIZED_PREFILL_LINES;
-	cfg->perf.xtra_prefill_lines =
-			prop_exists[PERF_XTRA_PREFILL_LINES] ?
-			PROP_VALUE_ACCESS(prop_value,
-					PERF_XTRA_PREFILL_LINES, 0) :
-			DEFAULT_XTRA_PREFILL_LINES;
-	cfg->perf.dest_scale_prefill_lines =
-			prop_exists[PERF_DEST_SCALE_PREFILL_LINES] ?
-			PROP_VALUE_ACCESS(prop_value,
-					PERF_DEST_SCALE_PREFILL_LINES, 0) :
-			DEFAULT_DEST_SCALE_PREFILL_LINES;
-	cfg->perf.macrotile_prefill_lines =
-			prop_exists[PERF_MACROTILE_PREFILL_LINES] ?
-			PROP_VALUE_ACCESS(prop_value,
-					PERF_MACROTILE_PREFILL_LINES, 0) :
-			DEFAULT_MACROTILE_PREFILL_LINES;
-	cfg->perf.yuv_nv12_prefill_lines =
-			prop_exists[PERF_YUV_NV12_PREFILL_LINES] ?
-			PROP_VALUE_ACCESS(prop_value,
-					PERF_YUV_NV12_PREFILL_LINES, 0) :
-			DEFAULT_YUV_NV12_PREFILL_LINES;
-	cfg->perf.linear_prefill_lines =
-			prop_exists[PERF_LINEAR_PREFILL_LINES] ?
-			PROP_VALUE_ACCESS(prop_value,
-					PERF_LINEAR_PREFILL_LINES, 0) :
-			DEFAULT_LINEAR_PREFILL_LINES;
-	cfg->perf.downscaling_prefill_lines =
-			prop_exists[PERF_DOWNSCALING_PREFILL_LINES] ?
-			PROP_VALUE_ACCESS(prop_value,
-					PERF_DOWNSCALING_PREFILL_LINES, 0) :
-			DEFAULT_DOWNSCALING_PREFILL_LINES;
-	cfg->perf.amortizable_threshold =
-			prop_exists[PERF_AMORTIZABLE_THRESHOLD] ?
-			PROP_VALUE_ACCESS(prop_value,
-					PERF_AMORTIZABLE_THRESHOLD, 0) :
-			DEFAULT_AMORTIZABLE_THRESHOLD;
-}
-
-static int _sde_perf_parse_dt_cfg(struct device_node *np,
-	struct sde_mdss_cfg *cfg, int *prop_count,
-	struct sde_prop_value *prop_value, bool *prop_exists)
-{
-	int rc, j;
-	const char *str = NULL;
-
-	/*
-	 * The following performance parameters (e.g. core_ib_ff) are
-	 * mapped directly as device tree string constants.
-	 */
-	rc = of_property_read_string(np,
-			sde_perf_prop[PERF_CORE_IB_FF].prop_name, &str);
-	cfg->perf.core_ib_ff = rc ? DEFAULT_CORE_IB_FF : str;
-	rc = of_property_read_string(np,
-			sde_perf_prop[PERF_CORE_CLK_FF].prop_name, &str);
-	cfg->perf.core_clk_ff = rc ? DEFAULT_CORE_CLK_FF : str;
-	rc = of_property_read_string(np,
-			sde_perf_prop[PERF_COMP_RATIO_RT].prop_name, &str);
-	cfg->perf.comp_ratio_rt = rc ? DEFAULT_COMP_RATIO_RT : str;
-	rc = of_property_read_string(np,
-			sde_perf_prop[PERF_COMP_RATIO_NRT].prop_name, &str);
-	cfg->perf.comp_ratio_nrt = rc ? DEFAULT_COMP_RATIO_NRT : str;
-	rc = 0;
-
-	_sde_perf_parse_dt_cfg_populate(cfg, prop_count, prop_value,
-			prop_exists);
-
-	rc = _sde_perf_parse_dt_cfg_qos(cfg, prop_count, prop_value,
-			prop_exists);
-	if (rc)
-		return rc;
-
-	if (prop_exists[PERF_CDP_SETTING]) {
-		const u32 prop_size = 2;
-		u32 count = prop_count[PERF_CDP_SETTING] / prop_size;
-
-		count = min_t(u32, count, SDE_PERF_CDP_USAGE_MAX);
-
-		for (j = 0; j < count; j++) {
-			cfg->perf.cdp_cfg[j].rd_enable =
-					PROP_VALUE_ACCESS(prop_value,
-					PERF_CDP_SETTING, j * prop_size);
-			cfg->perf.cdp_cfg[j].wr_enable =
-					PROP_VALUE_ACCESS(prop_value,
-					PERF_CDP_SETTING, j * prop_size + 1);
-			SDE_DEBUG("cdp usage:%d rd:%d wr:%d\n",
-				j, cfg->perf.cdp_cfg[j].rd_enable,
-				cfg->perf.cdp_cfg[j].wr_enable);
-		}
-
-		cfg->has_cdp = true;
-	}
-
-	cfg->perf.cpu_mask =
-			prop_exists[PERF_CPU_MASK] ?
-			PROP_VALUE_ACCESS(prop_value, PERF_CPU_MASK, 0) :
-			DEFAULT_CPU_MASK;
-	cfg->perf.cpu_dma_latency =
-			prop_exists[PERF_CPU_DMA_LATENCY] ?
-			PROP_VALUE_ACCESS(prop_value, PERF_CPU_DMA_LATENCY, 0) :
-			DEFAULT_CPU_DMA_LATENCY;
-
-	return 0;
-}
-
-static int sde_perf_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
-{
-	int rc, prop_count[PERF_PROP_MAX];
-	struct sde_prop_value *prop_value = NULL;
-	bool prop_exists[PERF_PROP_MAX];
-
-	if (!cfg) {
-		SDE_ERROR("invalid argument\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	prop_value = kzalloc(PERF_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = _sde_perf_parse_dt_validate(np, prop_count);
-	if (rc)
-		goto freeprop;
-
-	rc = _read_dt_entry(np, sde_perf_prop, ARRAY_SIZE(sde_perf_prop),
-			prop_count, prop_exists, prop_value);
-	if (rc)
-		goto freeprop;
-
-	rc = _sde_perf_parse_dt_cfg(np, cfg, prop_count, prop_value,
-			prop_exists);
-
-freeprop:
-	kfree(prop_value);
-end:
-	return rc;
-}
-
-static int sde_parse_merge_3d_dt(struct device_node *np,
-		struct sde_mdss_cfg *sde_cfg)
-{
-	int rc, prop_count[HW_PROP_MAX], off_count, i;
-	struct sde_prop_value *prop_value = NULL;
-	bool prop_exists[HW_PROP_MAX];
-	struct sde_merge_3d_cfg *merge_3d;
-
-	prop_value = kcalloc(HW_PROP_MAX, sizeof(struct sde_prop_value),
-			GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto fail;
-	}
-
-	rc = _validate_dt_entry(np, merge_3d_prop, ARRAY_SIZE(merge_3d_prop),
-		prop_count, &off_count);
-	if (rc)
-		goto error;
-
-	sde_cfg->merge_3d_count = off_count;
-
-	rc = _read_dt_entry(np, merge_3d_prop, ARRAY_SIZE(merge_3d_prop),
-			prop_count,
-			prop_exists, prop_value);
-	if (rc)
-		goto error;
-
-	for (i = 0; i < off_count; i++) {
-		merge_3d = sde_cfg->merge_3d + i;
-		merge_3d->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
-		merge_3d->id = MERGE_3D_0 + i;
-		snprintf(merge_3d->name, SDE_HW_BLK_NAME_LEN, "merge_3d_%u",
-				merge_3d->id -  MERGE_3D_0);
-		merge_3d->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
-	}
-
-	return 0;
-error:
-	sde_cfg->merge_3d_count = 0;
-	kfree(prop_value);
-fail:
-	return rc;
-}
-
-static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg,
-	uint32_t hw_rev)
-{
-	int rc = 0;
-	uint32_t dma_list_size, vig_list_size, wb2_list_size;
-	uint32_t virt_vig_list_size, in_rot_list_size = 0;
-	uint32_t cursor_list_size = 0;
-	uint32_t index = 0;
-
-
-	if (sde_cfg->has_cursor) {
-		cursor_list_size = ARRAY_SIZE(cursor_formats);
-		sde_cfg->cursor_formats = kcalloc(cursor_list_size,
-			sizeof(struct sde_format_extended), GFP_KERNEL);
-		if (!sde_cfg->cursor_formats) {
-			rc = -ENOMEM;
-			goto end;
-		}
-		index = sde_copy_formats(sde_cfg->cursor_formats,
-			cursor_list_size, 0, cursor_formats,
-			ARRAY_SIZE(cursor_formats));
-	}
-
-	dma_list_size = ARRAY_SIZE(plane_formats);
-	vig_list_size = ARRAY_SIZE(plane_formats_vig);
-	if (sde_cfg->has_vig_p010)
-		vig_list_size += ARRAY_SIZE(p010_ubwc_formats);
-	virt_vig_list_size = ARRAY_SIZE(plane_formats);
-	wb2_list_size = ARRAY_SIZE(wb2_formats);
-
-	if (IS_SDE_INLINE_ROT_REV_100(sde_cfg->true_inline_rot_rev))
-		in_rot_list_size = ARRAY_SIZE(true_inline_rot_v1_fmts);
-
-	sde_cfg->dma_formats = kcalloc(dma_list_size,
-		sizeof(struct sde_format_extended), GFP_KERNEL);
-	if (!sde_cfg->dma_formats) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	sde_cfg->vig_formats = kcalloc(vig_list_size,
-		sizeof(struct sde_format_extended), GFP_KERNEL);
-	if (!sde_cfg->vig_formats) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	sde_cfg->virt_vig_formats = kcalloc(virt_vig_list_size,
-		sizeof(struct sde_format_extended), GFP_KERNEL);
-	if (!sde_cfg->virt_vig_formats) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	sde_cfg->wb_formats = kcalloc(wb2_list_size,
-		sizeof(struct sde_format_extended), GFP_KERNEL);
-	if (!sde_cfg->wb_formats) {
-		SDE_ERROR("failed to allocate wb format list\n");
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	if (in_rot_list_size) {
-		sde_cfg->inline_rot_formats = kcalloc(in_rot_list_size,
-			sizeof(struct sde_format_extended), GFP_KERNEL);
-		if (!sde_cfg->inline_rot_formats) {
-			SDE_ERROR("failed to alloc inline rot format list\n");
-			rc = -ENOMEM;
-			goto end;
-		}
-	}
-
-	index = sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
-		0, plane_formats, ARRAY_SIZE(plane_formats));
-
-	index = sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
-		0, plane_formats_vig, ARRAY_SIZE(plane_formats_vig));
-	if (sde_cfg->has_vig_p010)
-		index += sde_copy_formats(sde_cfg->vig_formats,
-			vig_list_size, index, p010_ubwc_formats,
-			ARRAY_SIZE(p010_ubwc_formats));
-
-	index = sde_copy_formats(sde_cfg->virt_vig_formats, virt_vig_list_size,
-		0, plane_formats, ARRAY_SIZE(plane_formats));
-
-	index = sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
-		0, wb2_formats, ARRAY_SIZE(wb2_formats));
-	if (in_rot_list_size)
-		index = sde_copy_formats(sde_cfg->inline_rot_formats,
-			in_rot_list_size, 0, true_inline_rot_v1_fmts,
-			ARRAY_SIZE(true_inline_rot_v1_fmts));
-end:
-	return rc;
-}
-
-static void _sde_hw_setup_uidle(struct sde_uidle_cfg *uidle_cfg)
-{
-	if (!uidle_cfg->uidle_rev)
-		return;
-
-	if (IS_SDE_UIDLE_REV_100(uidle_cfg->uidle_rev)) {
-		uidle_cfg->fal10_exit_cnt = SDE_UIDLE_FAL10_EXIT_CNT;
-		uidle_cfg->fal10_exit_danger = SDE_UIDLE_FAL10_EXIT_DANGER;
-		uidle_cfg->fal10_danger = SDE_UIDLE_FAL10_DANGER;
-		uidle_cfg->fal10_target_idle_time = SDE_UIDLE_FAL10_TARGET_IDLE;
-		uidle_cfg->fal1_target_idle_time = SDE_UIDLE_FAL1_TARGET_IDLE;
-		uidle_cfg->fal10_threshold = SDE_UIDLE_FAL10_THRESHOLD;
-		uidle_cfg->max_dwnscale = SDE_UIDLE_MAX_DWNSCALE;
-		uidle_cfg->max_fps = SDE_UIDLE_MAX_FPS;
-		uidle_cfg->debugfs_ctrl = true;
-	} else {
-		pr_err("invalid uidle rev:0x%x, disabling uidle\n",
-			uidle_cfg->uidle_rev);
-		uidle_cfg->uidle_rev = 0;
-	}
-}
-
-static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
-{
-	int i, rc = 0;
-
-	if (!sde_cfg)
-		return -EINVAL;
-
-	for (i = 0; i < MDSS_INTR_MAX; i++)
-		set_bit(i, sde_cfg->mdss_irqs);
-
-	if (IS_MSM8996_TARGET(hw_rev)) {
-		sde_cfg->perf.min_prefill_lines = 21;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
-		sde_cfg->has_decimation = true;
-	} else if (IS_MSM8998_TARGET(hw_rev)) {
-		sde_cfg->has_wb_ubwc = true;
-		sde_cfg->perf.min_prefill_lines = 25;
-		sde_cfg->vbif_qos_nlvl = 4;
-		sde_cfg->ts_prefill_rev = 1;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
-		sde_cfg->has_decimation = true;
-		sde_cfg->has_cursor = true;
-		sde_cfg->has_hdr = true;
-	} else if (IS_SDM845_TARGET(hw_rev)) {
-		sde_cfg->has_wb_ubwc = true;
-		sde_cfg->has_cwb_support = true;
-		sde_cfg->perf.min_prefill_lines = 24;
-		sde_cfg->vbif_qos_nlvl = 8;
-		sde_cfg->ts_prefill_rev = 2;
-		sde_cfg->sui_misr_supported = true;
-		sde_cfg->sui_block_xin_mask = 0x3F71;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
-		sde_cfg->has_decimation = true;
-		sde_cfg->has_hdr = true;
-		sde_cfg->has_vig_p010 = true;
-	} else if (IS_SDM670_TARGET(hw_rev)) {
-		sde_cfg->has_wb_ubwc = true;
-		sde_cfg->perf.min_prefill_lines = 24;
-		sde_cfg->vbif_qos_nlvl = 8;
-		sde_cfg->ts_prefill_rev = 2;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
-		sde_cfg->has_decimation = true;
-		sde_cfg->has_hdr = true;
-		sde_cfg->has_vig_p010 = true;
-	} else if (IS_SM8150_TARGET(hw_rev)) {
-		sde_cfg->has_cwb_support = true;
-		sde_cfg->has_wb_ubwc = true;
-		sde_cfg->has_qsync = true;
-		sde_cfg->has_hdr = true;
-		sde_cfg->has_hdr_plus = true;
-		set_bit(SDE_MDP_DHDR_MEMPOOL, &sde_cfg->mdp[0].features);
-		sde_cfg->has_vig_p010 = true;
-		sde_cfg->perf.min_prefill_lines = 24;
-		sde_cfg->vbif_qos_nlvl = 8;
-		sde_cfg->ts_prefill_rev = 2;
-		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
-		sde_cfg->delay_prg_fetch_start = true;
-		sde_cfg->sui_ns_allowed = true;
-		sde_cfg->sui_misr_supported = true;
-		sde_cfg->sui_block_xin_mask = 0x3F71;
-		sde_cfg->has_sui_blendstage = true;
-		sde_cfg->has_qos_fl_nocalc = true;
-		sde_cfg->has_3d_merge_reset = true;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
-		sde_cfg->has_decimation = true;
-	} else if (IS_SDMSHRIKE_TARGET(hw_rev)) {
-		sde_cfg->has_wb_ubwc = true;
-		sde_cfg->perf.min_prefill_lines = 24;
-		sde_cfg->vbif_qos_nlvl = 8;
-		sde_cfg->ts_prefill_rev = 2;
-		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
-		sde_cfg->delay_prg_fetch_start = true;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
-		sde_cfg->has_decimation = true;
-		sde_cfg->has_hdr = true;
-		sde_cfg->has_vig_p010 = true;
-	} else if (IS_SM6150_TARGET(hw_rev)) {
-		sde_cfg->has_cwb_support = true;
-		sde_cfg->has_qsync = true;
-		sde_cfg->perf.min_prefill_lines = 24;
-		sde_cfg->vbif_qos_nlvl = 8;
-		sde_cfg->ts_prefill_rev = 2;
-		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
-		sde_cfg->delay_prg_fetch_start = true;
-		sde_cfg->sui_ns_allowed = true;
-		sde_cfg->sui_misr_supported = true;
-		sde_cfg->has_decimation = true;
-		sde_cfg->sui_block_xin_mask = 0x2EE1;
-		sde_cfg->has_sui_blendstage = true;
-		sde_cfg->has_qos_fl_nocalc = true;
-		sde_cfg->has_3d_merge_reset = true;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
-		sde_cfg->has_hdr = true;
-		sde_cfg->has_vig_p010 = true;
-	} else if (IS_SDMMAGPIE_TARGET(hw_rev)) {
-		sde_cfg->has_cwb_support = true;
-		sde_cfg->has_wb_ubwc = true;
-		sde_cfg->has_qsync = true;
-		sde_cfg->perf.min_prefill_lines = 24;
-		sde_cfg->vbif_qos_nlvl = 8;
-		sde_cfg->ts_prefill_rev = 2;
-		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
-		sde_cfg->delay_prg_fetch_start = true;
-		sde_cfg->sui_ns_allowed = true;
-		sde_cfg->sui_misr_supported = true;
-		sde_cfg->sui_block_xin_mask = 0xE71;
-		sde_cfg->has_sui_blendstage = true;
-		sde_cfg->has_qos_fl_nocalc = true;
-		sde_cfg->has_3d_merge_reset = true;
-	} else if (IS_KONA_TARGET(hw_rev)) {
-		sde_cfg->has_cwb_support = true;
-		sde_cfg->has_wb_ubwc = true;
-		sde_cfg->has_qsync = true;
-		sde_cfg->perf.min_prefill_lines = 24;
-		sde_cfg->vbif_qos_nlvl = 8;
-		sde_cfg->ts_prefill_rev = 2;
-		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
-		sde_cfg->delay_prg_fetch_start = true;
-		sde_cfg->sui_ns_allowed = true;
-		sde_cfg->sui_misr_supported = true;
-		sde_cfg->sui_block_xin_mask = 0x3F71;
-		sde_cfg->has_sui_blendstage = true;
-		sde_cfg->has_qos_fl_nocalc = true;
-		sde_cfg->has_3d_merge_reset = true;
-		clear_bit(MDSS_INTR_AD4_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_AD4_1_INTR, sde_cfg->mdss_irqs);
-		sde_cfg->has_hdr = true;
-		sde_cfg->has_hdr_plus = true;
-		set_bit(SDE_MDP_DHDR_MEMPOOL, &sde_cfg->mdp[0].features);
-		sde_cfg->has_vig_p010 = true;
-		sde_cfg->true_inline_rot_rev = SDE_INLINE_ROT_VERSION_1_0_0;
-		sde_cfg->true_inline_dwnscale_rt_num =
-			MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_NUMERATOR;
-		sde_cfg->true_inline_dwnscale_rt_denom =
-			MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DENOMINATOR;
-		sde_cfg->true_inline_dwnscale_nrt =
-			MAX_DOWNSCALE_RATIO_INLINE_ROT_NRT_DEFAULT;
-		sde_cfg->true_inline_prefill_fudge_lines = 2;
-		sde_cfg->true_inline_prefill_lines_nv12 = 32;
-		sde_cfg->true_inline_prefill_lines = 48;
-		sde_cfg->uidle_cfg.uidle_rev = SDE_UIDLE_VERSION_1_0_0;
-	} else {
-		SDE_ERROR("unsupported chipset id:%X\n", hw_rev);
-		sde_cfg->perf.min_prefill_lines = 0xffff;
-		rc = -ENODEV;
-	}
-
-	if (!rc)
-		rc = sde_hardware_format_caps(sde_cfg, hw_rev);
-
-	_sde_hw_setup_uidle(&sde_cfg->uidle_cfg);
-
-	return rc;
-}
-
-static int _sde_hardware_post_caps(struct sde_mdss_cfg *sde_cfg,
-	uint32_t hw_rev)
-{
-	int rc = 0, i;
-	u32 max_horz_deci = 0, max_vert_deci = 0;
-
-	if (!sde_cfg)
-		return -EINVAL;
-
-	if (sde_cfg->has_sui_blendstage)
-		sde_cfg->sui_supported_blendstage =
-			sde_cfg->max_mixer_blendstages - SDE_STAGE_0;
-
-	for (i = 0; i < sde_cfg->sspp_count; i++) {
-		if (sde_cfg->sspp[i].sblk) {
-			max_horz_deci = max(max_horz_deci,
-				sde_cfg->sspp[i].sblk->maxhdeciexp);
-			max_vert_deci = max(max_vert_deci,
-				sde_cfg->sspp[i].sblk->maxvdeciexp);
-		}
-
-		if (sde_cfg->has_qos_fl_nocalc)
-			set_bit(SDE_PERF_SSPP_QOS_FL_NOCALC,
-				&sde_cfg->sspp[i].perf_features);
-
-		/*
-		 * set sec-ui blocked SSPP feature flag based on blocked
-		 * xin-mask if sec-ui-misr feature is enabled;
-		 */
-		if (sde_cfg->sui_misr_supported
-				&& (sde_cfg->sui_block_xin_mask
-					& BIT(sde_cfg->sspp[i].xin_id)))
-			set_bit(SDE_SSPP_BLOCK_SEC_UI,
-					&sde_cfg->sspp[i].features);
-	}
-
-	/* this should be updated based on HW rev in future */
-	sde_cfg->max_lm_per_display = MAX_LM_PER_DISPLAY;
-
-	if (max_horz_deci)
-		sde_cfg->max_display_width = sde_cfg->max_sspp_linewidth *
-			max_horz_deci;
-	else
-		sde_cfg->max_display_width = sde_cfg->max_mixer_width *
-			sde_cfg->max_lm_per_display;
-
-	if (max_vert_deci)
-		sde_cfg->max_display_height =
-			MAX_DISPLAY_HEIGHT_WITH_DECIMATION * max_vert_deci;
-	else
-		sde_cfg->max_display_height = MAX_DISPLAY_HEIGHT;
-
-	sde_cfg->min_display_height = MIN_DISPLAY_HEIGHT;
-	sde_cfg->min_display_width = MIN_DISPLAY_WIDTH;
-
-	return rc;
-}
-
-void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
-{
-	int i, j;
-
-	if (!sde_cfg)
-		return;
-
-	for (i = 0; i < sde_cfg->sspp_count; i++)
-		kfree(sde_cfg->sspp[i].sblk);
-
-	for (i = 0; i < sde_cfg->mixer_count; i++)
-		kfree(sde_cfg->mixer[i].sblk);
-
-	for (i = 0; i < sde_cfg->wb_count; i++)
-		kfree(sde_cfg->wb[i].sblk);
-
-	for (i = 0; i < sde_cfg->dspp_count; i++)
-		kfree(sde_cfg->dspp[i].sblk);
-
-	if (sde_cfg->ds_count)
-		kfree(sde_cfg->ds[0].top);
-
-	for (i = 0; i < sde_cfg->pingpong_count; i++)
-		kfree(sde_cfg->pingpong[i].sblk);
-
-	for (i = 0; i < sde_cfg->vbif_count; i++) {
-		kfree(sde_cfg->vbif[i].dynamic_ot_rd_tbl.cfg);
-		kfree(sde_cfg->vbif[i].dynamic_ot_wr_tbl.cfg);
-
-		for (j = VBIF_RT_CLIENT; j < VBIF_MAX_CLIENT; j++)
-			kfree(sde_cfg->vbif[i].qos_tbl[j].priority_lvl);
-	}
-
-	for (i = 0; i < SDE_QOS_LUT_USAGE_MAX; i++) {
-		kfree(sde_cfg->perf.sfe_lut_tbl[i].entries);
-		kfree(sde_cfg->perf.qos_lut_tbl[i].entries);
-	}
-
-	kfree(sde_cfg->dma_formats);
-	kfree(sde_cfg->cursor_formats);
-	kfree(sde_cfg->vig_formats);
-	kfree(sde_cfg->wb_formats);
-	kfree(sde_cfg->virt_vig_formats);
-	kfree(sde_cfg->inline_rot_formats);
-
-	kfree(sde_cfg);
-}
-
-/*************************************************************
- * hardware catalog init
- *************************************************************/
-struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
-{
-	int rc;
-	struct sde_mdss_cfg *sde_cfg;
-	struct device_node *np = dev->dev->of_node;
-
-	sde_cfg = kzalloc(sizeof(*sde_cfg), GFP_KERNEL);
-	if (!sde_cfg)
-		return ERR_PTR(-ENOMEM);
-
-	sde_cfg->hwversion = hw_rev;
-
-	rc = _sde_hardware_pre_caps(sde_cfg, hw_rev);
-	if (rc)
-		goto end;
-
-	rc = sde_top_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_perf_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_rot_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	/* uidle must be done before sspp and ctl,
-	 * so if something goes wrong, we won't
-	 * enable it in ctl and sspp.
-	 */
-	rc = sde_uidle_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_ctl_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_sspp_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_dspp_top_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_dspp_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_ds_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_dsc_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_pp_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	/* mixer parsing should be done after dspp,
-	 * ds and pp for mapping setup
-	 */
-	rc = sde_mixer_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_intf_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_wb_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	/* cdm parsing should be done after intf and wb for mapping setup */
-	rc = sde_cdm_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_vbif_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_parse_reg_dma_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = sde_parse_merge_3d_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
-	rc = _sde_hardware_post_caps(sde_cfg, hw_rev);
-	if (rc)
-		goto end;
-
-	return sde_cfg;
-
-end:
-	sde_hw_catalog_deinit(sde_cfg);
-	return NULL;
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
deleted file mode 100644
index af77a61..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ /dev/null
@@ -1,1413 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_CATALOG_H
-#define _SDE_HW_CATALOG_H
-
-#include <linux/kernel.h>
-#include <linux/bug.h>
-#include <linux/bitmap.h>
-#include <linux/err.h>
-#include <linux/msm-bus.h>
-#include <linux/of_fdt.h>
-#include <drm/drmP.h>
-
-/**
- * Max hardware block count: For ex: max 12 SSPP pipes or
- * 5 ctl paths. In all cases, it can have max 12 hardware blocks
- * based on current design
- */
-#define MAX_BLOCKS    12
-
-#define SDE_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28)    |\
-		((MINOR & 0xFFF) << 16)  |\
-		(STEP & 0xFFFF))
-
-#define SDE_HW_MAJOR(rev)		((rev) >> 28)
-#define SDE_HW_MINOR(rev)		(((rev) >> 16) & 0xFFF)
-#define SDE_HW_STEP(rev)		((rev) & 0xFFFF)
-#define SDE_HW_MAJOR_MINOR(rev)		((rev) >> 16)
-
-#define IS_SDE_MAJOR_SAME(rev1, rev2)   \
-	(SDE_HW_MAJOR((rev1)) == SDE_HW_MAJOR((rev2)))
-
-#define IS_SDE_MAJOR_MINOR_SAME(rev1, rev2)   \
-	(SDE_HW_MAJOR_MINOR((rev1)) == SDE_HW_MAJOR_MINOR((rev2)))
-
-#define SDE_HW_VER_170	SDE_HW_VER(1, 7, 0) /* 8996 v1.0 */
-#define SDE_HW_VER_171	SDE_HW_VER(1, 7, 1) /* 8996 v2.0 */
-#define SDE_HW_VER_172	SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */
-#define SDE_HW_VER_300	SDE_HW_VER(3, 0, 0) /* 8998 v1.0 */
-#define SDE_HW_VER_301	SDE_HW_VER(3, 0, 1) /* 8998 v1.1 */
-#define SDE_HW_VER_400	SDE_HW_VER(4, 0, 0) /* sdm845 v1.0 */
-#define SDE_HW_VER_401	SDE_HW_VER(4, 0, 1) /* sdm845 v2.0 */
-#define SDE_HW_VER_410	SDE_HW_VER(4, 1, 0) /* sdm670 v1.0 */
-#define SDE_HW_VER_500	SDE_HW_VER(5, 0, 0) /* sm8150 v1.0 */
-#define SDE_HW_VER_501	SDE_HW_VER(5, 0, 1) /* sm8150 v2.0 */
-#define SDE_HW_VER_510	SDE_HW_VER(5, 1, 0) /* sdmshrike v1.0 */
-#define SDE_HW_VER_520	SDE_HW_VER(5, 2, 0) /* sdmmagpie v1.0 */
-#define SDE_HW_VER_530	SDE_HW_VER(5, 3, 0) /* sm6150 v1.0 */
-#define SDE_HW_VER_600	SDE_HW_VER(6, 0, 0) /* kona */
-
-#define IS_MSM8996_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_170)
-#define IS_MSM8998_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_300)
-#define IS_SDM845_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400)
-#define IS_SDM670_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_410)
-#define IS_SM8150_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_500)
-#define IS_SDMSHRIKE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_510)
-#define IS_SDMMAGPIE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_520)
-#define IS_SM6150_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_530)
-#define IS_KONA_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_600)
-
-#define SDE_HW_BLK_NAME_LEN	16
-
-#define MAX_IMG_WIDTH 0x3fff
-#define MAX_IMG_HEIGHT 0x3fff
-
-#define CRTC_DUAL_MIXERS	2
-
-#define SDE_COLOR_PROCESS_VER(MAJOR, MINOR) \
-		((((MAJOR) & 0xFFFF) << 16) | (((MINOR) & 0xFFFF)))
-#define SDE_COLOR_PROCESS_MAJOR(version) (((version) & 0xFFFF0000) >> 16)
-#define SDE_COLOR_PROCESS_MINOR(version) ((version) & 0xFFFF)
-
-#define MAX_XIN_COUNT 16
-#define SSPP_SUBBLK_COUNT_MAX 2
-
-#define SDE_CTL_CFG_VERSION_1_0_0       0x100
-#define MAX_INTF_PER_CTL_V1                 2
-#define MAX_DSC_PER_CTL_V1                  2
-#define MAX_CWB_PER_CTL_V1                  2
-#define MAX_MERGE_3D_PER_CTL_V1             2
-#define MAX_WB_PER_CTL_V1                   1
-#define MAX_CDM_PER_CTL_V1                  1
-#define IS_SDE_CTL_REV_100(rev) \
-	((rev) == SDE_CTL_CFG_VERSION_1_0_0)
-
-/**
- * True inline rotation supported versions
- */
-#define SDE_INLINE_ROT_VERSION_1_0_0	0x100
-#define IS_SDE_INLINE_ROT_REV_100(rev) \
-	((rev) == SDE_INLINE_ROT_VERSION_1_0_0)
-
-/*
- * UIDLE supported versions
- */
-#define SDE_UIDLE_VERSION_1_0_0	0x100
-#define IS_SDE_UIDLE_REV_100(rev) \
-	((rev) == SDE_UIDLE_VERSION_1_0_0)
-
-#define SDE_HW_UBWC_VER(rev) \
-	SDE_HW_VER((((rev) >> 8) & 0xF), (((rev) >> 4) & 0xF), ((rev) & 0xF))
-
-/**
- * Supported UBWC feature versions
- */
-enum {
-	SDE_HW_UBWC_VER_10 = SDE_HW_UBWC_VER(0x100),
-	SDE_HW_UBWC_VER_20 = SDE_HW_UBWC_VER(0x200),
-	SDE_HW_UBWC_VER_30 = SDE_HW_UBWC_VER(0x300),
-	SDE_HW_UBWC_VER_40 = SDE_HW_UBWC_VER(0x400),
-};
-
-#define IS_UBWC_20_SUPPORTED(rev) \
-		IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_20)
-#define IS_UBWC_30_SUPPORTED(rev) \
-		IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_30)
-#define IS_UBWC_40_SUPPORTED(rev) \
-		IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_40)
-
-/**
- * Supported SSPP system cache settings
- */
-#define SSPP_SYS_CACHE_EN_FLAG	BIT(0)
-#define SSPP_SYS_CACHE_SCID		BIT(1)
-#define SSPP_SYS_CACHE_OP_MODE	BIT(2)
-#define SSPP_SYS_CACHE_OP_TYPE	BIT(3)
-#define SSPP_SYS_CACHE_NO_ALLOC	BIT(4)
-
-/**
- * SDE INTERRUPTS - maintains the possible hw irq's allowed by HW
- * The order in this enum must match the order of the irqs defined
- * by 'sde_irq_map'
- */
-enum sde_intr_enum {
-	MDSS_INTR_SSPP_TOP0_INTR,
-	MDSS_INTR_SSPP_TOP0_INTR2,
-	MDSS_INTF_TEAR_1_INTR,
-	MDSS_INTF_TEAR_2_INTR,
-	MDSS_INTR_SSPP_TOP0_HIST_INTR,
-	MDSS_INTR_INTF_0_INTR,
-	MDSS_INTR_INTF_1_INTR,
-	MDSS_INTR_INTF_2_INTR,
-	MDSS_INTR_INTF_3_INTR,
-	MDSS_INTR_INTF_4_INTR,
-	MDSS_INTR_AD4_0_INTR,
-	MDSS_INTR_AD4_1_INTR,
-	MDSS_INTR_LTM_0_INTR,
-	MDSS_INTR_LTM_1_INTR,
-	MDSS_INTR_MAX
-};
-
-/**
- * MDP TOP BLOCK features
- * @SDE_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
- * @SDE_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
- * @SDE_MDP_BWC,           MDSS HW supports Bandwidth compression.
- * @SDE_MDP_UBWC_1_0,      This chipsets supports Universal Bandwidth
- *                         compression initial revision
- * @SDE_MDP_UBWC_1_5,      Universal Bandwidth compression version 1.5
- * @SDE_MDP_VSYNC_SEL      Vsync selection for command mode panels
- * @SDE_MDP_DHDR_MEMPOOL   Dynamic HDR Metadata mempool present
- * @SDE_MDP_MAX            Maximum value
-
- */
-enum {
-	SDE_MDP_PANIC_PER_PIPE = 0x1,
-	SDE_MDP_10BIT_SUPPORT,
-	SDE_MDP_BWC,
-	SDE_MDP_UBWC_1_0,
-	SDE_MDP_UBWC_1_5,
-	SDE_MDP_VSYNC_SEL,
-	SDE_MDP_DHDR_MEMPOOL,
-	SDE_MDP_MAX
-};
-
-/**
- * SSPP sub-blocks/features
- * @SDE_SSPP_SRC             Src and fetch part of the pipes,
- * @SDE_SSPP_SCALER_QSEED2,  QSEED2 algorithm support
- * @SDE_SSPP_SCALER_QSEED3,  QSEED3 alogorithm support
- * @SDE_SSPP_SCALER_RGB,     RGB Scaler, supported by RGB pipes
- * @SDE_SSPP_CSC,            Support of Color space converion
- * @SDE_SSPP_CSC_10BIT,      Support of 10-bit Color space conversion
- * @SDE_SSPP_HSIC,           Global HSIC control
- * @SDE_SSPP_MEMCOLOR        Memory Color Support
- * @SDE_SSPP_PCC,            Color correction support
- * @SDE_SSPP_CURSOR,         SSPP can be used as a cursor layer
- * @SDE_SSPP_EXCL_RECT,      SSPP supports exclusion rect
- * @SDE_SSPP_SMART_DMA_V1,   SmartDMA 1.0 support
- * @SDE_SSPP_SMART_DMA_V2,   SmartDMA 2.0 support
- * @SDE_SSPP_SMART_DMA_V2p5, SmartDMA 2.5 support
- * @SDE_SSPP_VIG_IGC,        VIG 1D LUT IGC
- * @SDE_SSPP_VIG_GAMUT,      VIG 3D LUT Gamut
- * @SDE_SSPP_DMA_IGC,        DMA 1D LUT IGC
- * @SDE_SSPP_DMA_GC,         DMA 1D LUT GC
- * @SDE_SSPP_INVERSE_PMA     Alpha unmultiply (PMA) support
- * @SDE_SSPP_DGM_INVERSE_PMA Alpha unmultiply (PMA) support in DGM block
- * @SDE_SSPP_DGM_CSC         Support of color space conversion in DGM block
- * @SDE_SSPP_SEC_UI_ALLOWED   Allows secure-ui layers
- * @SDE_SSPP_BLOCK_SEC_UI    Blocks secure-ui layers
- * @SDE_SSPP_SCALER_QSEED3LITE Qseed3lite algorithm support
- * @SDE_SSPP_TRUE_INLINE_ROT_V1, Support of SSPP true inline rotation v1
- * @SDE_SSPP_MAX             maximum value
- */
-enum {
-	SDE_SSPP_SRC = 0x1,
-	SDE_SSPP_SCALER_QSEED2,
-	SDE_SSPP_SCALER_QSEED3,
-	SDE_SSPP_SCALER_RGB,
-	SDE_SSPP_CSC,
-	SDE_SSPP_CSC_10BIT,
-	SDE_SSPP_HSIC,
-	SDE_SSPP_MEMCOLOR,
-	SDE_SSPP_PCC,
-	SDE_SSPP_CURSOR,
-	SDE_SSPP_EXCL_RECT,
-	SDE_SSPP_SMART_DMA_V1,
-	SDE_SSPP_SMART_DMA_V2,
-	SDE_SSPP_SMART_DMA_V2p5,
-	SDE_SSPP_VIG_IGC,
-	SDE_SSPP_VIG_GAMUT,
-	SDE_SSPP_DMA_IGC,
-	SDE_SSPP_DMA_GC,
-	SDE_SSPP_INVERSE_PMA,
-	SDE_SSPP_DGM_INVERSE_PMA,
-	SDE_SSPP_DGM_CSC,
-	SDE_SSPP_SEC_UI_ALLOWED,
-	SDE_SSPP_BLOCK_SEC_UI,
-	SDE_SSPP_SCALER_QSEED3LITE,
-	SDE_SSPP_TRUE_INLINE_ROT_V1,
-	SDE_SSPP_MAX
-};
-
-/**
- * SDE performance features
- * @SDE_PERF_SSPP_QOS,            SSPP support QoS control, danger/safe/creq
- * @SDE_PERF_SSPP_QOS_8LVL,       SSPP support 8-level QoS control
- * @SDE_PERF_SSPP_TS_PREFILL      Supports prefill with traffic shaper
- * @SDE_PERF_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
- * @SDE_PERF_SSPP_CDP             Supports client driven prefetch
- * @SDE_PERF_SSPP_QOS_FL_NOCALC   Avoid fill level calc for QoS/danger/safe
- * @SDE_PERF_SSPP_SYS_CACHE,      SSPP supports system cache
- * @SDE_PERF_SSPP_UIDLE,          sspp supports uidle
- * @SDE_PERF_SSPP_MAX             Maximum value
- */
-enum {
-	SDE_PERF_SSPP_QOS = 0x1,
-	SDE_PERF_SSPP_QOS_8LVL,
-	SDE_PERF_SSPP_TS_PREFILL,
-	SDE_PERF_SSPP_TS_PREFILL_REC1,
-	SDE_PERF_SSPP_CDP,
-	SDE_PERF_SSPP_QOS_FL_NOCALC,
-	SDE_PERF_SSPP_SYS_CACHE,
-	SDE_PERF_SSPP_UIDLE,
-	SDE_PERF_SSPP_MAX
-};
-
-/*
- * MIXER sub-blocks/features
- * @SDE_MIXER_LAYER           Layer mixer layer blend configuration,
- * @SDE_MIXER_SOURCESPLIT     Layer mixer supports source-split configuration
- * @SDE_MIXER_GC              Gamma correction block
- * @SDE_DIM_LAYER             Layer mixer supports dim layer
- * @SDE_DISP_CWB_PREF         Layer mixer preferred for CWB
- * @SDE_DISP_PRIMARY_PREF     Layer mixer preferred for primary display
- * @SDE_MIXER_MAX             maximum value
- */
-enum {
-	SDE_MIXER_LAYER = 0x1,
-	SDE_MIXER_SOURCESPLIT,
-	SDE_MIXER_GC,
-	SDE_DIM_LAYER,
-	SDE_DISP_PRIMARY_PREF,
-	SDE_DISP_CWB_PREF,
-	SDE_MIXER_MAX
-};
-
-/**
- * DSPP sub-blocks
- * @SDE_DSPP_IGC             DSPP Inverse gamma correction block
- * @SDE_DSPP_PCC             Panel color correction block
- * @SDE_DSPP_GC              Gamma correction block
- * @SDE_DSPP_HSIC            Global HSIC block
- * @SDE_DSPP_MEMCOLOR        Memory Color block
- * @SDE_DSPP_SIXZONE         Six zone block
- * @SDE_DSPP_GAMUT           Gamut bloc
- * @SDE_DSPP_DITHER          Dither block
- * @SDE_DSPP_HIST            Histogram block
- * @SDE_DSPP_VLUT            PA VLUT block
- * @SDE_DSPP_AD              AD block
- * @SDE_DSPP_LTM             LTM block
- * @SDE_DSPP_MAX             maximum value
- */
-enum {
-	SDE_DSPP_IGC = 0x1,
-	SDE_DSPP_PCC,
-	SDE_DSPP_GC,
-	SDE_DSPP_HSIC,
-	SDE_DSPP_MEMCOLOR,
-	SDE_DSPP_SIXZONE,
-	SDE_DSPP_GAMUT,
-	SDE_DSPP_DITHER,
-	SDE_DSPP_HIST,
-	SDE_DSPP_VLUT,
-	SDE_DSPP_AD,
-	SDE_DSPP_LTM,
-	SDE_DSPP_MAX
-};
-
-/**
- * LTM sub-features
- * @SDE_LTM_INIT             LTM INIT feature
- * @SDE_LTM_ROI              LTM ROI feature
- * @SDE_LTM_VLUT             LTM VLUT feature
- * @SDE_LTM_MAX              maximum value
- */
-enum {
-	SDE_LTM_INIT = 0x1,
-	SDE_LTM_ROI,
-	SDE_LTM_VLUT,
-	SDE_LTM_MAX
-};
-
-/**
- * PINGPONG sub-blocks
- * @SDE_PINGPONG_TE         Tear check block
- * @SDE_PINGPONG_TE2        Additional tear check block for split pipes
- * @SDE_PINGPONG_SPLIT      PP block supports split fifo
- * @SDE_PINGPONG_SLAVE      PP block is a suitable slave for split fifo
- * @SDE_PINGPONG_DSC,       Display stream compression blocks
- * @SDE_PINGPONG_DITHER,    Dither blocks
- * @SDE_PINGPONG_MERGE_3D,  Separate MERGE_3D block exists
- * @SDE_PINGPONG_MAX
- */
-enum {
-	SDE_PINGPONG_TE = 0x1,
-	SDE_PINGPONG_TE2,
-	SDE_PINGPONG_SPLIT,
-	SDE_PINGPONG_SLAVE,
-	SDE_PINGPONG_DSC,
-	SDE_PINGPONG_DITHER,
-	SDE_PINGPONG_MERGE_3D,
-	SDE_PINGPONG_MAX
-};
-
-/** DSC sub-blocks
- * @SDE_DSC_OUTPUT_CTRL         Supports the control of the pp id which gets
- *                              the pixel output from this DSC.
- * @SDE_DSC_MAX
- */
-enum {
-	SDE_DSC_OUTPUT_CTRL = 0x1,
-	SDE_DSC_MAX
-};
-
-/**
- * CTL sub-blocks
- * @SDE_CTL_SPLIT_DISPLAY       CTL supports video mode split display
- * @SDE_CTL_PINGPONG_SPLIT      CTL supports pingpong split
- * @SDE_CTL_PRIMARY_PREF        CTL preferred for primary display
- * @SDE_CTL_ACTIVE_CFG          CTL configuration is specified using active
- *                              blocks
- * @SDE_CTL_UIDLE               CTL supports uidle
- * @SDE_CTL_MAX
- */
-enum {
-	SDE_CTL_SPLIT_DISPLAY = 0x1,
-	SDE_CTL_PINGPONG_SPLIT,
-	SDE_CTL_PRIMARY_PREF,
-	SDE_CTL_ACTIVE_CFG,
-	SDE_CTL_UIDLE,
-	SDE_CTL_MAX
-};
-
-/**
- * INTF sub-blocks
- * @SDE_INTF_INPUT_CTRL         Supports the setting of pp block from which
- *                              pixel data arrives to this INTF
- * @SDE_INTF_TE                 INTF block has TE configuration support
- * @SDE_INTF_MAX
- */
-enum {
-	SDE_INTF_INPUT_CTRL = 0x1,
-	SDE_INTF_TE,
-	SDE_INTF_MAX
-};
-
-/**
- * WB sub-blocks and features
- * @SDE_WB_LINE_MODE        Writeback module supports line/linear mode
- * @SDE_WB_BLOCK_MODE       Writeback module supports block mode read
- * @SDE_WB_ROTATE           rotation support,this is available if writeback
- *                          supports block mode read
- * @SDE_WB_CSC              Writeback color conversion block support
- * @SDE_WB_CHROMA_DOWN,     Writeback chroma down block,
- * @SDE_WB_DOWNSCALE,       Writeback integer downscaler,
- * @SDE_WB_DITHER,          Dither block
- * @SDE_WB_TRAFFIC_SHAPER,  Writeback traffic shaper bloc
- * @SDE_WB_UBWC,            Writeback Universal bandwidth compression
- * @SDE_WB_YUV_CONFIG       Writeback supports output of YUV colorspace
- * @SDE_WB_PIPE_ALPHA       Writeback supports pipe alpha
- * @SDE_WB_XY_ROI_OFFSET    Writeback supports x/y-offset of out ROI in
- *                          the destination image
- * @SDE_WB_QOS,             Writeback supports QoS control, danger/safe/creq
- * @SDE_WB_QOS_8LVL,        Writeback supports 8-level QoS control
- * @SDE_WB_CDP              Writeback supports client driven prefetch
- * @SDE_WB_INPUT_CTRL       Writeback supports from which pp block input pixel
- *                          data arrives.
- * @SDE_WB_HAS_CWB          Writeback block supports concurrent writeback
- * @SDE_WB_CWB_CTRL         Separate CWB control is available for configuring
- * @SDE_WB_MAX              maximum value
- */
-enum {
-	SDE_WB_LINE_MODE = 0x1,
-	SDE_WB_BLOCK_MODE,
-	SDE_WB_ROTATE = SDE_WB_BLOCK_MODE,
-	SDE_WB_CSC,
-	SDE_WB_CHROMA_DOWN,
-	SDE_WB_DOWNSCALE,
-	SDE_WB_DITHER,
-	SDE_WB_TRAFFIC_SHAPER,
-	SDE_WB_UBWC,
-	SDE_WB_YUV_CONFIG,
-	SDE_WB_PIPE_ALPHA,
-	SDE_WB_XY_ROI_OFFSET,
-	SDE_WB_QOS,
-	SDE_WB_QOS_8LVL,
-	SDE_WB_CDP,
-	SDE_WB_INPUT_CTRL,
-	SDE_WB_HAS_CWB,
-	SDE_WB_CWB_CTRL,
-	SDE_WB_MAX
-};
-
-/* CDM features
- * @SDE_CDM_INPUT_CTRL     CDM supports from which pp block intput pixel data
- *                         arrives
- * @SDE_CDM_MAX            maximum value
- */
-enum {
-	SDE_CDM_INPUT_CTRL = 0x1,
-	SDE_CDM_MAX
-};
-
-/**
- * VBIF sub-blocks and features
- * @SDE_VBIF_QOS_OTLIM        VBIF supports OT Limit
- * @SDE_VBIF_QOS_REMAP        VBIF supports QoS priority remap
- * @SDE_VBIF_MAX              maximum value
- */
-enum {
-	SDE_VBIF_QOS_OTLIM = 0x1,
-	SDE_VBIF_QOS_REMAP,
-	SDE_VBIF_MAX
-};
-
-/**
- * MACRO SDE_HW_BLK_INFO - information of HW blocks inside SDE
- * @name:              string name for debug purposes
- * @id:                enum identifying this block
- * @base:              register base offset to mdss
- * @len:               length of hardware block
- * @features           bit mask identifying sub-blocks/features
- * @perf_features   bit mask identifying performance sub-blocks/features
- */
-#define SDE_HW_BLK_INFO \
-	char name[SDE_HW_BLK_NAME_LEN]; \
-	u32 id; \
-	u32 base; \
-	u32 len; \
-	unsigned long features; \
-	unsigned long perf_features
-
-/**
- * MACRO SDE_HW_SUBBLK_INFO - information of HW sub-block inside SDE
- * @name:              string name for debug purposes
- * @id:                enum identifying this sub-block
- * @base:              offset of this sub-block relative to the block
- *                     offset
- * @len                register block length of this sub-block
- */
-#define SDE_HW_SUBBLK_INFO \
-	char name[SDE_HW_BLK_NAME_LEN]; \
-	u32 id; \
-	u32 base; \
-	u32 len
-
-/**
- * struct sde_src_blk: SSPP part of the source pipes
- * @info:   HW register and features supported by this sub-blk
- */
-struct sde_src_blk {
-	SDE_HW_SUBBLK_INFO;
-};
-
-/**
- * struct sde_scaler_blk: Scaler information
- * @info:   HW register and features supported by this sub-blk
- * @version: qseed block revision
- * @h_preload: horizontal preload
- * @v_preload: vertical preload
- */
-struct sde_scaler_blk {
-	SDE_HW_SUBBLK_INFO;
-	u32 version;
-	u32 h_preload;
-	u32 v_preload;
-};
-
-struct sde_csc_blk {
-	SDE_HW_SUBBLK_INFO;
-};
-
-/**
- * struct sde_pp_blk : Pixel processing sub-blk information
- * @info:   HW register and features supported by this sub-blk
- * @version: HW Algorithm version
- */
-struct sde_pp_blk {
-	SDE_HW_SUBBLK_INFO;
-	u32 version;
-};
-
-/**
- * struct sde_format_extended - define sde specific pixel format+modifier
- * @fourcc_format: Base FOURCC pixel format code
- * @modifier: 64-bit drm format modifier, same modifier must be applied to all
- *            framebuffer planes
- */
-struct sde_format_extended {
-	uint32_t fourcc_format;
-	uint64_t modifier;
-};
-
-/**
- * enum sde_qos_lut_usage - define QoS LUT use cases
- */
-enum sde_qos_lut_usage {
-	SDE_QOS_LUT_USAGE_LINEAR,
-	SDE_QOS_LUT_USAGE_MACROTILE,
-	SDE_QOS_LUT_USAGE_NRT,
-	SDE_QOS_LUT_USAGE_CWB,
-	SDE_QOS_LUT_USAGE_MACROTILE_QSEED,
-	SDE_QOS_LUT_USAGE_MAX,
-};
-
-/**
- * struct sde_qos_lut_entry - define QoS LUT table entry
- * @fl: fill level, or zero on last entry to indicate default lut
- * @lut: lut to use if equal to or less than fill level
- */
-struct sde_qos_lut_entry {
-	u32 fl;
-	u64 lut;
-};
-
-/**
- * struct sde_qos_lut_tbl - define QoS LUT table
- * @nentry: number of entry in this table
- * @entries: Pointer to table entries
- */
-struct sde_qos_lut_tbl {
-	u32 nentry;
-	struct sde_qos_lut_entry *entries;
-};
-
-/**
- * struct sde_sspp_sub_blks : SSPP sub-blocks
- * @maxdwnscale: max downscale ratio supported(without DECIMATION)
- * @maxupscale:  maxupscale ratio supported
- * @maxwidth:    max pixelwidth supported by this pipe
- * @creq_vblank: creq priority during vertical blanking
- * @danger_vblank: danger priority during vertical blanking
- * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
- * @smart_dma_priority: hw priority of rect1 of multirect pipe
- * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
- * @src_blk:
- * @scaler_blk:
- * @csc_blk:
- * @hsic:
- * @memcolor:
- * @pcc_blk:
- * @gamut_blk: 3D LUT gamut block
- * @num_igc_blk: number of IGC block
- * @igc_blk: 1D LUT IGC block
- * @num_gc_blk: number of GC block
- * @gc_blk: 1D LUT GC block
- * @num_dgm_csc_blk: number of DGM CSC blocks
- * @dgm_csc_blk: DGM CSC blocks
- * @format_list: Pointer to list of supported formats
- * @virt_format_list: Pointer to list of supported formats for virtual planes
- * @in_rot_format_list: Pointer to list of supported formats for inline rotation
- * @in_rot_maxdwnscale_rt_num: max downscale ratio for inline rotation
- *                                 rt clients - numerator
- * @in_rot_maxdwnscale_rt_denom: max downscale ratio for inline rotation
- *                                 rt clients - denominator
- * @in_rot_maxdwnscale_nrt: max downscale ratio for inline rotation nrt clients
- * @in_rot_maxheight: max pre rotated height for inline rotation
- * @in_rot_prefill_fudge_lines: prefill fudge lines for inline rotation
- * @in_rot_prefill_lines_mv12: prefill lines for nv12 format inline rotation
- * @in_rot_prefill_lines: prefill lines for inline rotation
- * @llcc_scid: scid for the system cache
- * @llcc_slice size: slice size of the system cache
- */
-struct sde_sspp_sub_blks {
-	u32 maxlinewidth;
-	u32 creq_vblank;
-	u32 danger_vblank;
-	u32 pixel_ram_size;
-	u32 maxdwnscale;
-	u32 maxupscale;
-	u32 maxhdeciexp; /* max decimation is 2^value */
-	u32 maxvdeciexp; /* max decimation is 2^value */
-	u32 smart_dma_priority;
-	u32 max_per_pipe_bw;
-	struct sde_src_blk src_blk;
-	struct sde_scaler_blk scaler_blk;
-	struct sde_pp_blk csc_blk;
-	struct sde_pp_blk hsic_blk;
-	struct sde_pp_blk memcolor_blk;
-	struct sde_pp_blk pcc_blk;
-	struct sde_pp_blk gamut_blk;
-	u32 num_igc_blk;
-	struct sde_pp_blk igc_blk[SSPP_SUBBLK_COUNT_MAX];
-	u32 num_gc_blk;
-	struct sde_pp_blk gc_blk[SSPP_SUBBLK_COUNT_MAX];
-	u32 num_dgm_csc_blk;
-	struct sde_pp_blk dgm_csc_blk[SSPP_SUBBLK_COUNT_MAX];
-
-	const struct sde_format_extended *format_list;
-	const struct sde_format_extended *virt_format_list;
-	const struct sde_format_extended *in_rot_format_list;
-	u32 in_rot_maxdwnscale_rt_num;
-	u32 in_rot_maxdwnscale_rt_denom;
-	u32 in_rot_maxdwnscale_nrt;
-	u32 in_rot_maxheight;
-	u32 in_rot_prefill_fudge_lines;
-	u32 in_rot_prefill_lines_nv12;
-	u32 in_rot_prefill_lines;
-	int llcc_scid;
-	size_t llcc_slice_size;
-};
-
-/**
- * struct sde_lm_sub_blks:      information of mixer block
- * @maxwidth:               Max pixel width supported by this mixer
- * @maxblendstages:         Max number of blend-stages supported
- * @blendstage_base:        Blend-stage register base offset
- * @gc: gamma correction block
- */
-struct sde_lm_sub_blks {
-	u32 maxwidth;
-	u32 maxblendstages;
-	u32 blendstage_base[MAX_BLOCKS];
-	struct sde_pp_blk gc;
-};
-
-struct sde_dspp_sub_blks {
-	struct sde_pp_blk igc;
-	struct sde_pp_blk pcc;
-	struct sde_pp_blk gc;
-	struct sde_pp_blk hsic;
-	struct sde_pp_blk memcolor;
-	struct sde_pp_blk sixzone;
-	struct sde_pp_blk gamut;
-	struct sde_pp_blk dither;
-	struct sde_pp_blk hist;
-	struct sde_pp_blk ad;
-	struct sde_pp_blk ltm;
-	struct sde_pp_blk vlut;
-};
-
-struct sde_pingpong_sub_blks {
-	struct sde_pp_blk te;
-	struct sde_pp_blk te2;
-	struct sde_pp_blk dsc;
-	struct sde_pp_blk dither;
-};
-
-struct sde_wb_sub_blocks {
-	u32 maxlinewidth;
-};
-
-struct sde_mdss_base_cfg {
-	SDE_HW_BLK_INFO;
-};
-
-/**
- * sde_clk_ctrl_type - Defines top level clock control signals
- */
-enum sde_clk_ctrl_type {
-	SDE_CLK_CTRL_NONE,
-	SDE_CLK_CTRL_VIG0,
-	SDE_CLK_CTRL_VIG1,
-	SDE_CLK_CTRL_VIG2,
-	SDE_CLK_CTRL_VIG3,
-	SDE_CLK_CTRL_VIG4,
-	SDE_CLK_CTRL_RGB0,
-	SDE_CLK_CTRL_RGB1,
-	SDE_CLK_CTRL_RGB2,
-	SDE_CLK_CTRL_RGB3,
-	SDE_CLK_CTRL_DMA0,
-	SDE_CLK_CTRL_DMA1,
-	SDE_CLK_CTRL_CURSOR0,
-	SDE_CLK_CTRL_CURSOR1,
-	SDE_CLK_CTRL_WB0,
-	SDE_CLK_CTRL_WB1,
-	SDE_CLK_CTRL_WB2,
-	SDE_CLK_CTRL_LUTDMA,
-	SDE_CLK_CTRL_MAX,
-};
-
-/* struct sde_clk_ctrl_reg : Clock control register
- * @reg_off:           register offset
- * @bit_off:           bit offset
- */
-struct sde_clk_ctrl_reg {
-	u32 reg_off;
-	u32 bit_off;
-};
-
-/* struct sde_mdp_cfg : MDP TOP-BLK instance info
- * @id:                index identifying this block
- * @base:              register base offset to mdss
- * @features           bit mask identifying sub-blocks/features
- * @highest_bank_bit:  UBWC parameter
- * @ubwc_static:       ubwc static configuration
- * @ubwc_swizzle:      ubwc default swizzle setting
- * @has_dest_scaler:   indicates support of destination scaler
- * @smart_panel_align_mode: split display smart panel align modes
- * @clk_ctrls          clock control register definition
- */
-struct sde_mdp_cfg {
-	SDE_HW_BLK_INFO;
-	u32 highest_bank_bit;
-	u32 ubwc_static;
-	u32 ubwc_swizzle;
-	bool has_dest_scaler;
-	u32 smart_panel_align_mode;
-	struct sde_clk_ctrl_reg clk_ctrls[SDE_CLK_CTRL_MAX];
-};
-
-/* struct sde_uidle_cfg : MDP TOP-BLK instance info
- * @id:                     index identifying this block
- * @base:                   register base offset to mdss
- * @features:               bit mask identifying sub-blocks/features
- * @fal10_exit_cnt:         fal10 exit counter
- * @fal10_exit_danger:      fal10 exit danger level
- * @fal10_danger:           fal10 danger level
- * @fal10_target_idle_time: fal10 targeted time in uS
- * @fal1_target_idle_time:  fal1 targeted time in uS
- * @fal10_threshold:        fal10 threshold value
- * @max_downscale:          maximum downscaling ratio x1000.
- *	                    This ratio is multiplied x1000 to allow
- *	                    3 decimal precision digits.
- * @max_fps:                maximum fps to allow micro idle
- * @uidle_rev:              uidle revision supported by the target,
- *                          zero if no support
- * @debugfs_perf:           enable/disable performance counters and status
- *                          logging
- * @debugfs_ctrl:           uidle is enabled/disabled through debugfs
- * @perf_cntr_en:           performance counters are enabled/disabled
- */
-struct sde_uidle_cfg {
-	SDE_HW_BLK_INFO;
-	/* global settings */
-	u32 fal10_exit_cnt;
-	u32 fal10_exit_danger;
-	u32 fal10_danger;
-	/* per-pipe settings */
-	u32 fal10_target_idle_time;
-	u32 fal1_target_idle_time;
-	u32 fal10_threshold;
-	u32 max_dwnscale;
-	u32 max_fps;
-	u32 uidle_rev;
-	u32 debugfs_perf;
-	bool debugfs_ctrl;
-	bool perf_cntr_en;
-};
-
-/* struct sde_mdp_cfg : MDP TOP-BLK instance info
- * @id:                index identifying this block
- * @base:              register base offset to mdss
- * @features           bit mask identifying sub-blocks/features
- */
-struct sde_ctl_cfg {
-	SDE_HW_BLK_INFO;
-};
-
-/**
- * struct sde_sspp_cfg - information of source pipes
- * @id:                index identifying this block
- * @base               register offset of this block
- * @features           bit mask identifying sub-blocks/features
- * @sblk:              SSPP sub-blocks information
- * @xin_id:            bus client identifier
- * @clk_ctrl           clock control identifier
- * @type               sspp type identifier
- */
-struct sde_sspp_cfg {
-	SDE_HW_BLK_INFO;
-	struct sde_sspp_sub_blks *sblk;
-	u32 xin_id;
-	enum sde_clk_ctrl_type clk_ctrl;
-	u32 type;
-};
-
-/**
- * struct sde_lm_cfg - information of layer mixer blocks
- * @id:                index identifying this block
- * @base               register offset of this block
- * @features           bit mask identifying sub-blocks/features
- * @sblk:              LM Sub-blocks information
- * @dspp:              ID of connected DSPP, DSPP_MAX if unsupported
- * @pingpong:          ID of connected PingPong, PINGPONG_MAX if unsupported
- * @ds:                ID of connected DS, DS_MAX if unsupported
- * @lm_pair_mask:      Bitmask of LMs that can be controlled by same CTL
- */
-struct sde_lm_cfg {
-	SDE_HW_BLK_INFO;
-	const struct sde_lm_sub_blks *sblk;
-	u32 dspp;
-	u32 pingpong;
-	u32 ds;
-	unsigned long lm_pair_mask;
-};
-
-/**
- * struct sde_dspp_cfg - information of DSPP top block
- * @id                 enum identifying this block
- * @base               register offset of this block
- * @features           bit mask identifying sub-blocks/features
- *                     supported by this block
- */
-struct sde_dspp_top_cfg  {
-	SDE_HW_BLK_INFO;
-};
-
-/**
- * struct sde_dspp_cfg - information of DSPP blocks
- * @id                 enum identifying this block
- * @base               register offset of this block
- * @features           bit mask identifying sub-blocks/features
- *                     supported by this block
- * @sblk               sub-blocks information
- */
-struct sde_dspp_cfg  {
-	SDE_HW_BLK_INFO;
-	const struct sde_dspp_sub_blks *sblk;
-};
-
-/**
- * struct sde_ds_top_cfg - information of dest scaler top
- * @id               enum identifying this block
- * @base             register offset of this block
- * @features         bit mask identifying features
- * @version          hw version of dest scaler
- * @maxinputwidth    maximum input line width
- * @maxoutputwidth   maximum output line width
- * @maxupscale       maximum upscale ratio
- */
-struct sde_ds_top_cfg {
-	SDE_HW_BLK_INFO;
-	u32 version;
-	u32 maxinputwidth;
-	u32 maxoutputwidth;
-	u32 maxupscale;
-};
-
-/**
- * struct sde_ds_cfg - information of dest scaler blocks
- * @id          enum identifying this block
- * @base        register offset wrt DS top offset
- * @features    bit mask identifying features
- * @version     hw version of the qseed block
- * @top         DS top information
- */
-struct sde_ds_cfg {
-	SDE_HW_BLK_INFO;
-	u32 version;
-	const struct sde_ds_top_cfg *top;
-};
-
-/**
- * struct sde_pingpong_cfg - information of PING-PONG blocks
- * @id                 enum identifying this block
- * @base               register offset of this block
- * @features           bit mask identifying sub-blocks/features
- * @sblk               sub-blocks information
- * @merge_3d_id        merge_3d block id
- */
-struct sde_pingpong_cfg  {
-	SDE_HW_BLK_INFO;
-	const struct sde_pingpong_sub_blks *sblk;
-	int merge_3d_id;
-};
-
-/**
- * struct sde_dsc_cfg - information of DSC blocks
- * @id                 enum identifying this block
- * @base               register offset of this block
- * @len:               length of hardware block
- * @features           bit mask identifying sub-blocks/features
- */
-struct sde_dsc_cfg {
-	SDE_HW_BLK_INFO;
-};
-
-/**
- * struct sde_cdm_cfg - information of chroma down blocks
- * @id                 enum identifying this block
- * @base               register offset of this block
- * @features           bit mask identifying sub-blocks/features
- * @intf_connect       Bitmask of INTF IDs this CDM can connect to
- * @wb_connect:        Bitmask of Writeback IDs this CDM can connect to
- */
-struct sde_cdm_cfg   {
-	SDE_HW_BLK_INFO;
-	unsigned long intf_connect;
-	unsigned long wb_connect;
-};
-
-/**
- * struct sde_intf_cfg - information of timing engine blocks
- * @id                 enum identifying this block
- * @base               register offset of this block
- * @features           bit mask identifying sub-blocks/features
- * @type:              Interface type(DSI, DP, HDMI)
- * @controller_id:     Controller Instance ID in case of multiple of intf type
- * @prog_fetch_lines_worst_case	Worst case latency num lines needed to prefetch
- */
-struct sde_intf_cfg  {
-	SDE_HW_BLK_INFO;
-	u32 type;   /* interface type*/
-	u32 controller_id;
-	u32 prog_fetch_lines_worst_case;
-};
-
-/**
- * struct sde_wb_cfg - information of writeback blocks
- * @id                 enum identifying this block
- * @base               register offset of this block
- * @features           bit mask identifying sub-blocks/features
- * @sblk               sub-block information
- * @format_list: Pointer to list of supported formats
- * @vbif_idx           vbif identifier
- * @xin_id             client interface identifier
- * @clk_ctrl           clock control identifier
- */
-struct sde_wb_cfg {
-	SDE_HW_BLK_INFO;
-	const struct sde_wb_sub_blocks *sblk;
-	const struct sde_format_extended *format_list;
-	u32 vbif_idx;
-	u32 xin_id;
-	enum sde_clk_ctrl_type clk_ctrl;
-};
-
-/**
- * struct sde_merge_3d_cfg - information of merge_3d blocks
- * @id                 enum identifying this block
- * @base               register offset of this block
- * @len:               length of hardware block
- * @features           bit mask identifying sub-blocks/features
- */
-struct sde_merge_3d_cfg {
-	SDE_HW_BLK_INFO;
-};
-
-/**
- * struct sde_vbif_dynamic_ot_cfg - dynamic OT setting
- * @pps                pixel per seconds
- * @ot_limit           OT limit to use up to specified pixel per second
- */
-struct sde_vbif_dynamic_ot_cfg {
-	u64 pps;
-	u32 ot_limit;
-};
-
-/**
- * struct sde_vbif_dynamic_ot_tbl - dynamic OT setting table
- * @count              length of cfg
- * @cfg                pointer to array of configuration settings with
- *                     ascending requirements
- */
-struct sde_vbif_dynamic_ot_tbl {
-	u32 count;
-	struct sde_vbif_dynamic_ot_cfg *cfg;
-};
-
-/**
- * struct sde_vbif_qos_tbl - QoS priority table
- * @npriority_lvl      num of priority level
- * @priority_lvl       pointer to array of priority level in ascending order
- */
-struct sde_vbif_qos_tbl {
-	u32 npriority_lvl;
-	u32 *priority_lvl;
-};
-
-/**
- * enum sde_vbif_client_type
- * @VBIF_RT_CLIENT: real time client
- * @VBIF_NRT_CLIENT: non-realtime clients like writeback
- * @VBIF_CWB_CLIENT: concurrent writeback client
- * @VBIF_LUTDMA_CLIENT: LUTDMA client
- * @VBIF_MAX_CLIENT: max number of clients
- */
-enum sde_vbif_client_type {
-	VBIF_RT_CLIENT,
-	VBIF_NRT_CLIENT,
-	VBIF_CWB_CLIENT,
-	VBIF_LUTDMA_CLIENT,
-	VBIF_MAX_CLIENT
-};
-
-/**
- * struct sde_vbif_cfg - information of VBIF blocks
- * @id                 enum identifying this block
- * @base               register offset of this block
- * @features           bit mask identifying sub-blocks/features
- * @ot_rd_limit        default OT read limit
- * @ot_wr_limit        default OT write limit
- * @xin_halt_timeout   maximum time (in usec) for xin to halt
- * @dynamic_ot_rd_tbl  dynamic OT read configuration table
- * @dynamic_ot_wr_tbl  dynamic OT write configuration table
- * @qos_tbl            Array of QoS priority table
- * @memtype_count      number of defined memtypes
- * @memtype            array of xin memtype definitions
- */
-struct sde_vbif_cfg {
-	SDE_HW_BLK_INFO;
-	u32 default_ot_rd_limit;
-	u32 default_ot_wr_limit;
-	u32 xin_halt_timeout;
-	struct sde_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
-	struct sde_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
-	struct sde_vbif_qos_tbl qos_tbl[VBIF_MAX_CLIENT];
-	u32 memtype_count;
-	u32 memtype[MAX_XIN_COUNT];
-};
-/**
- * struct sde_reg_dma_cfg - information of lut dma blocks
- * @id                 enum identifying this block
- * @base               register offset of this block
- * @features           bit mask identifying sub-blocks/features
- * @version            version of lutdma hw block
- * @trigger_sel_off    offset to trigger select registers of lutdma
- * @broadcast_disabled flag indicating if broadcast usage should be avoided
- * @xin_id             VBIF xin client-id for LUTDMA
- * @vbif_idx           VBIF id (RT/NRT)
- * @clk_ctrl           VBIF xin client clk-ctrl
- */
-struct sde_reg_dma_cfg {
-	SDE_HW_BLK_INFO;
-	u32 version;
-	u32 trigger_sel_off;
-	u32 broadcast_disabled;
-	u32 xin_id;
-	u32 vbif_idx;
-	enum sde_clk_ctrl_type clk_ctrl;
-};
-
-/**
- * Define CDP use cases
- * @SDE_PERF_CDP_UDAGE_RT: real-time use cases
- * @SDE_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
- */
-enum {
-	SDE_PERF_CDP_USAGE_RT,
-	SDE_PERF_CDP_USAGE_NRT,
-	SDE_PERF_CDP_USAGE_MAX
-};
-
-/**
- * struct sde_perf_cdp_cfg - define CDP use case configuration
- * @rd_enable: true if read pipe CDP is enabled
- * @wr_enable: true if write pipe CDP is enabled
- */
-struct sde_perf_cdp_cfg {
-	bool rd_enable;
-	bool wr_enable;
-};
-
-/**
- * struct sde_sc_cfg - define system cache configuration
- * @has_sys_cache: true if system cache is enabled
- * @llcc_scid: scid for the system cache
- * @llcc_slice_size: slice size of the system cache
- */
-struct sde_sc_cfg {
-	bool has_sys_cache;
-	int llcc_scid;
-	size_t llcc_slice_size;
-};
-
-/**
- * struct sde_perf_cfg - performance control settings
- * @max_bw_low         low threshold of maximum bandwidth (kbps)
- * @max_bw_high        high threshold of maximum bandwidth (kbps)
- * @min_core_ib        minimum bandwidth for core (kbps)
- * @min_core_ib        minimum mnoc ib vote in kbps
- * @min_llcc_ib        minimum llcc ib vote in kbps
- * @min_dram_ib        minimum dram ib vote in kbps
- * @core_ib_ff         core instantaneous bandwidth fudge factor
- * @core_clk_ff        core clock fudge factor
- * @comp_ratio_rt      string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
- * @comp_ratio_nrt     string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
- * @undersized_prefill_lines   undersized prefill in lines
- * @xtra_prefill_lines         extra prefill latency in lines
- * @dest_scale_prefill_lines   destination scaler latency in lines
- * @macrotile_perfill_lines    macrotile latency in lines
- * @yuv_nv12_prefill_lines     yuv_nv12 latency in lines
- * @linear_prefill_lines       linear latency in lines
- * @downscaling_prefill_lines  downscaling latency in lines
- * @amortizable_theshold minimum y position for traffic shaping prefill
- * @min_prefill_lines  minimum pipeline latency in lines
- * @danger_lut_tbl: LUT tables for danger signals
- * @sfe_lut_tbl: LUT tables for safe signals
- * @qos_lut_tbl: LUT tables for QoS signals
- * @cdp_cfg            cdp use case configurations
- * @cpu_mask:          pm_qos cpu mask value
- * @cpu_dma_latency:   pm_qos cpu dma latency value
- */
-struct sde_perf_cfg {
-	u32 max_bw_low;
-	u32 max_bw_high;
-	u32 min_core_ib;
-	u32 min_llcc_ib;
-	u32 min_dram_ib;
-	const char *core_ib_ff;
-	const char *core_clk_ff;
-	const char *comp_ratio_rt;
-	const char *comp_ratio_nrt;
-	u32 undersized_prefill_lines;
-	u32 xtra_prefill_lines;
-	u32 dest_scale_prefill_lines;
-	u32 macrotile_prefill_lines;
-	u32 yuv_nv12_prefill_lines;
-	u32 linear_prefill_lines;
-	u32 downscaling_prefill_lines;
-	u32 amortizable_threshold;
-	u32 min_prefill_lines;
-	u32 danger_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
-	struct sde_qos_lut_tbl sfe_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
-	struct sde_qos_lut_tbl qos_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
-	struct sde_perf_cdp_cfg cdp_cfg[SDE_PERF_CDP_USAGE_MAX];
-	u32 cpu_mask;
-	u32 cpu_dma_latency;
-};
-
-/**
- * struct sde_mdss_cfg - information of MDSS HW
- * This is the main catalog data structure representing
- * this HW version. Contains number of instances,
- * register offsets, capabilities of the all MDSS HW sub-blocks.
- *
- * @max_sspp_linewidth max source pipe line width support.
- * @max_mixer_width    max layer mixer line width support.
- * @max_mixer_blendstages max layer mixer blend stages or
- *                       supported z order
- * @max_wb_linewidth   max writeback line width support.
- * @max_display_width   maximum display width support.
- * @max_display_height  maximum display height support.
- * @max_lm_per_display  maximum layer mixer per display
- * @min_display_width   minimum display width support.
- * @min_display_height  minimum display height support.
- * @qseed_type         qseed2 or qseed3 support.
- * @csc_type           csc or csc_10bit support.
- * @smart_dma_rev      Supported version of SmartDMA feature.
- * @ctl_rev            supported version of control path.
- * @has_src_split      source split feature status
- * @has_cdp            Client driven prefetch feature status
- * @has_wb_ubwc        UBWC feature supported on WB
- * @has_cwb_support    indicates if device supports primary capture through CWB
- * @ubwc_version       UBWC feature version (0x0 for not supported)
- * @ubwc_bw_calc_version indicate how UBWC BW has to be calculated
- * @has_idle_pc        indicate if idle power collapse feature is supported
- * @has_hdr            HDR feature support
- * @has_hdr_plus       HDR10+ feature support
- * @dma_formats        Supported formats for dma pipe
- * @cursor_formats     Supported formats for cursor pipe
- * @vig_formats        Supported formats for vig pipe
- * @wb_formats         Supported formats for wb
- * @virt_vig_formats   Supported formats for virtual vig pipe
- * @vbif_qos_nlvl      number of vbif QoS priority level
- * @ts_prefill_rev     prefill traffic shaper feature revision
- * @true_inline_rot_rev	inline rotator feature revision
- * @true_inline_dwnscale_rt_num    true inline rotator downscale ratio for rt
- *                                       - numerator
- * @true_inline_dwnscale_rt_denom    true inline rot downscale ratio for rt
- *                                       - denominator
- * @true_inline_dwnscale_nrt    true inline rotator downscale ratio for nrt
- * @true_inline_prefill_fudge_lines    true inline rotator prefill fudge lines
- * @true_inline_prefill_lines_nv12    true inline prefill lines for nv12 format
- * @true_inline_prefill_lines    true inline prefill lines
- * @macrotile_mode     UBWC parameter for macro tile channel distribution
- * @pipe_order_type    indicate if it is required to specify pipe order
- * @delay_prg_fetch_start indicates if throttling the fetch start is required
- * @has_qsync	       Supports qsync feature
- * @has_3d_merge_reset Supports 3D merge reset
- * @has_decimation     Supports decimation
- * @has_qos_fl_nocalc  flag to indicate QoS fill level needs no calculation
- * @sc_cfg: system cache configuration
- * @uidle_cfg		Settings for uidle feature
- * @sui_misr_supported  indicate if secure-ui-misr is supported
- * @sui_block_xin_mask  mask of all the xin-clients to be blocked during
- *                         secure-ui when secure-ui-misr feature is supported
- * @sec_sid_mask_count  number of SID masks
- * @sec_sid_mask        SID masks used during the scm_call for transition
- *                         between secure/non-secure sessions
- * @sui_ns_allowed      flag to indicate non-secure context banks are allowed
- *                         during secure-ui session
- * @sui_supported_blendstage  secure-ui supported blendstage
- * @has_sui_blendstage  flag to indicate secure-ui has a blendstage restriction
- * @has_cursor    indicates if hardware cursor is supported
- * @has_vig_p010  indicates if vig pipe supports p010 format
- * @inline_rot_formats	formats supported by the inline rotator feature
- * @mdss_irqs	  bitmap with the irqs supported by the target
- */
-struct sde_mdss_cfg {
-	u32 hwversion;
-
-	u32 max_sspp_linewidth;
-	u32 max_mixer_width;
-	u32 max_mixer_blendstages;
-	u32 max_wb_linewidth;
-
-	u32 max_display_width;
-	u32 max_display_height;
-	u32 min_display_width;
-	u32 min_display_height;
-	u32 max_lm_per_display;
-
-	u32 qseed_type;
-	u32 csc_type;
-	u32 smart_dma_rev;
-	u32 ctl_rev;
-	bool has_src_split;
-	bool has_cdp;
-	bool has_dim_layer;
-	bool has_wb_ubwc;
-	bool has_cwb_support;
-	u32 ubwc_version;
-	u32 ubwc_bw_calc_version;
-	bool has_idle_pc;
-	u32 vbif_qos_nlvl;
-	u32 ts_prefill_rev;
-	u32 true_inline_rot_rev;
-	u32 true_inline_dwnscale_rt_num;
-	u32 true_inline_dwnscale_rt_denom;
-	u32 true_inline_dwnscale_nrt;
-	u32 true_inline_prefill_fudge_lines;
-	u32 true_inline_prefill_lines_nv12;
-	u32 true_inline_prefill_lines;
-	u32 macrotile_mode;
-	u32 pipe_order_type;
-	bool delay_prg_fetch_start;
-	bool has_qsync;
-	bool has_3d_merge_reset;
-	bool has_decimation;
-	bool has_qos_fl_nocalc;
-
-	struct sde_sc_cfg sc_cfg;
-
-	bool sui_misr_supported;
-	u32 sui_block_xin_mask;
-
-	u32 sec_sid_mask_count;
-	u32 sec_sid_mask[MAX_BLOCKS];
-	u32 sui_ns_allowed;
-	u32 sui_supported_blendstage;
-	bool has_sui_blendstage;
-
-	bool has_hdr;
-	bool has_hdr_plus;
-	bool has_cursor;
-	bool has_vig_p010;
-	u32 mdss_count;
-	struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
-
-	u32 mdp_count;
-	struct sde_mdp_cfg mdp[MAX_BLOCKS];
-
-	/* uidle is a singleton */
-	struct sde_uidle_cfg uidle_cfg;
-
-	u32 ctl_count;
-	struct sde_ctl_cfg ctl[MAX_BLOCKS];
-
-	u32 sspp_count;
-	struct sde_sspp_cfg sspp[MAX_BLOCKS];
-
-	u32 mixer_count;
-	struct sde_lm_cfg mixer[MAX_BLOCKS];
-
-	struct sde_dspp_top_cfg dspp_top;
-
-	u32 dspp_count;
-	struct sde_dspp_cfg dspp[MAX_BLOCKS];
-
-	u32 ds_count;
-	struct sde_ds_cfg ds[MAX_BLOCKS];
-
-	u32 pingpong_count;
-	struct sde_pingpong_cfg pingpong[MAX_BLOCKS];
-
-	u32 dsc_count;
-	struct sde_dsc_cfg dsc[MAX_BLOCKS];
-
-	u32 cdm_count;
-	struct sde_cdm_cfg cdm[MAX_BLOCKS];
-
-	u32 intf_count;
-	struct sde_intf_cfg intf[MAX_BLOCKS];
-
-	u32 wb_count;
-	struct sde_wb_cfg wb[MAX_BLOCKS];
-
-	u32 vbif_count;
-	struct sde_vbif_cfg vbif[MAX_BLOCKS];
-
-	u32 reg_dma_count;
-	struct sde_reg_dma_cfg dma_cfg;
-
-	u32 ad_count;
-	u32 ltm_count;
-
-	u32 merge_3d_count;
-	struct sde_merge_3d_cfg merge_3d[MAX_BLOCKS];
-
-	/* Add additional block data structures here */
-
-	struct sde_perf_cfg perf;
-	struct sde_format_extended *dma_formats;
-	struct sde_format_extended *cursor_formats;
-	struct sde_format_extended *vig_formats;
-	struct sde_format_extended *wb_formats;
-	struct sde_format_extended *virt_vig_formats;
-	struct sde_format_extended *inline_rot_formats;
-
-	DECLARE_BITMAP(mdss_irqs, MDSS_INTR_MAX);
-};
-
-struct sde_mdss_hw_cfg_handler {
-	u32 major;
-	u32 minor;
-	struct sde_mdss_cfg* (*cfg_init)(u32 data);
-};
-
-/*
- * Access Macros
- */
-#define BLK_MDP(s) ((s)->mdp)
-#define BLK_CTL(s) ((s)->ctl)
-#define BLK_VIG(s) ((s)->vig)
-#define BLK_RGB(s) ((s)->rgb)
-#define BLK_DMA(s) ((s)->dma)
-#define BLK_CURSOR(s) ((s)->cursor)
-#define BLK_MIXER(s) ((s)->mixer)
-#define BLK_DSPP(s) ((s)->dspp)
-#define BLK_DS(s) ((s)->ds)
-#define BLK_PINGPONG(s) ((s)->pingpong)
-#define BLK_CDM(s) ((s)->cdm)
-#define BLK_INTF(s) ((s)->intf)
-#define BLK_WB(s) ((s)->wb)
-#define BLK_AD(s) ((s)->ad)
-#define BLK_LTM(s) ((s)->ltm)
-
-/**
- * sde_hw_set_preference: populate the individual hw lm preferences,
- *                        overwrite if exists
- * @sde_cfg:              pointer to sspp cfg
- * @num_lm:               num lms to set preference
- */
-void sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm);
-
-/**
- * sde_hw_catalog_init - sde hardware catalog init API parses dtsi property
- * and stores all parsed offset, hardware capabilities in config structure.
- * @dev:          drm device node.
- * @hw_rev:       caller needs provide the hardware revision before parsing.
- *
- * Return: parsed sde config structure
- */
-struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev);
-
-/**
- * sde_hw_catalog_deinit - sde hardware catalog cleanup
- * @sde_cfg:      pointer returned from init function
- */
-void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg);
-
-/**
- * sde_hw_sspp_multirect_enabled - check multirect enabled for the sspp
- * @cfg:          pointer to sspp cfg
- */
-static inline bool sde_hw_sspp_multirect_enabled(const struct sde_sspp_cfg *cfg)
-{
-	return test_bit(SDE_SSPP_SMART_DMA_V1, &cfg->features) ||
-			 test_bit(SDE_SSPP_SMART_DMA_V2, &cfg->features) ||
-			 test_bit(SDE_SSPP_SMART_DMA_V2p5, &cfg->features);
-}
-
-static inline bool sde_hw_intf_te_supported(const struct sde_mdss_cfg *sde_cfg)
-{
-	return test_bit(SDE_INTF_TE, &(sde_cfg->intf[0].features));
-}
-#endif /* _SDE_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
deleted file mode 100644
index 4cac323..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#include "sde_hw_mdss.h"
-
-#define RGB_10BIT_FMTS	{DRM_FORMAT_BGRA1010102, 0}, \
-	{DRM_FORMAT_BGRX1010102, 0}, \
-	{DRM_FORMAT_RGBA1010102, 0}, \
-	{DRM_FORMAT_RGBX1010102, 0}, \
-	{DRM_FORMAT_ABGR2101010, 0}, \
-	{DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED}, \
-	{DRM_FORMAT_XBGR2101010, 0}, \
-	{DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED}, \
-	{DRM_FORMAT_ARGB2101010, 0}, \
-	{DRM_FORMAT_XRGB2101010, 0}
-
-#define RGB_FMTS	{DRM_FORMAT_ARGB8888, 0}, \
-	{DRM_FORMAT_ABGR8888, 0}, \
-	{DRM_FORMAT_RGBA8888, 0}, \
-	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, \
-	{DRM_FORMAT_BGRA8888, 0}, \
-	{DRM_FORMAT_XRGB8888, 0}, \
-	{DRM_FORMAT_RGBX8888, 0}, \
-	{DRM_FORMAT_BGRX8888, 0}, \
-	{DRM_FORMAT_XBGR8888, 0}, \
-	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, \
-	{DRM_FORMAT_RGB888, 0}, \
-	{DRM_FORMAT_BGR888, 0}, \
-	{DRM_FORMAT_RGB565, 0}, \
-	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, \
-	{DRM_FORMAT_BGR565, 0}, \
-	{DRM_FORMAT_ARGB1555, 0}, \
-	{DRM_FORMAT_ABGR1555, 0}, \
-	{DRM_FORMAT_RGBA5551, 0}, \
-	{DRM_FORMAT_BGRA5551, 0}, \
-	{DRM_FORMAT_XRGB1555, 0}, \
-	{DRM_FORMAT_XBGR1555, 0}, \
-	{DRM_FORMAT_RGBX5551, 0}, \
-	{DRM_FORMAT_BGRX5551, 0}, \
-	{DRM_FORMAT_ARGB4444, 0}, \
-	{DRM_FORMAT_ABGR4444, 0}, \
-	{DRM_FORMAT_RGBA4444, 0}, \
-	{DRM_FORMAT_BGRA4444, 0}, \
-	{DRM_FORMAT_XRGB4444, 0}, \
-	{DRM_FORMAT_XBGR4444, 0}, \
-	{DRM_FORMAT_RGBX4444, 0}, \
-	{DRM_FORMAT_BGRX4444, 0}
-
-#define TP10_UBWC_FMTS	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED | \
-		DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_TIGHT}
-
-#define P010_FMTS	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_DX}
-
-
-static const struct sde_format_extended plane_formats[] = {
-	RGB_FMTS,
-	RGB_10BIT_FMTS,
-	{0, 0},
-};
-
-static const struct sde_format_extended plane_formats_vig[] = {
-	RGB_FMTS,
-
-	{DRM_FORMAT_NV12, 0},
-	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
-	{DRM_FORMAT_NV21, 0},
-	{DRM_FORMAT_NV16, 0},
-	{DRM_FORMAT_NV61, 0},
-	{DRM_FORMAT_VYUY, 0},
-	{DRM_FORMAT_UYVY, 0},
-	{DRM_FORMAT_YUYV, 0},
-	{DRM_FORMAT_YVYU, 0},
-	{DRM_FORMAT_YUV420, 0},
-	{DRM_FORMAT_YVU420, 0},
-
-	RGB_10BIT_FMTS,
-	TP10_UBWC_FMTS,
-	P010_FMTS,
-
-	{0, 0},
-};
-
-static const struct sde_format_extended cursor_formats[] = {
-	{DRM_FORMAT_ARGB8888, 0},
-	{DRM_FORMAT_ABGR8888, 0},
-	{DRM_FORMAT_RGBA8888, 0},
-	{DRM_FORMAT_BGRA8888, 0},
-	{DRM_FORMAT_XRGB8888, 0},
-	{DRM_FORMAT_ARGB1555, 0},
-	{DRM_FORMAT_ABGR1555, 0},
-	{DRM_FORMAT_RGBA5551, 0},
-	{DRM_FORMAT_BGRA5551, 0},
-	{DRM_FORMAT_ARGB4444, 0},
-	{DRM_FORMAT_ABGR4444, 0},
-	{DRM_FORMAT_RGBA4444, 0},
-	{DRM_FORMAT_BGRA4444, 0},
-	{0, 0},
-};
-
-static const struct sde_format_extended wb2_formats[] = {
-	{DRM_FORMAT_RGB565, 0},
-	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
-	{DRM_FORMAT_RGB888, 0},
-	{DRM_FORMAT_ARGB8888, 0},
-	{DRM_FORMAT_RGBA8888, 0},
-	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
-	{DRM_FORMAT_XRGB8888, 0},
-	{DRM_FORMAT_RGBX8888, 0},
-	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
-	{DRM_FORMAT_ARGB1555, 0},
-	{DRM_FORMAT_RGBA5551, 0},
-	{DRM_FORMAT_XRGB1555, 0},
-	{DRM_FORMAT_RGBX5551, 0},
-	{DRM_FORMAT_ARGB4444, 0},
-	{DRM_FORMAT_RGBA4444, 0},
-	{DRM_FORMAT_RGBX4444, 0},
-	{DRM_FORMAT_XRGB4444, 0},
-
-	{DRM_FORMAT_BGR565, 0},
-	{DRM_FORMAT_BGR888, 0},
-	{DRM_FORMAT_ABGR8888, 0},
-	{DRM_FORMAT_BGRA8888, 0},
-	{DRM_FORMAT_BGRX8888, 0},
-	{DRM_FORMAT_XBGR8888, 0},
-	{DRM_FORMAT_ABGR1555, 0},
-	{DRM_FORMAT_BGRA5551, 0},
-	{DRM_FORMAT_XBGR1555, 0},
-	{DRM_FORMAT_BGRX5551, 0},
-	{DRM_FORMAT_ABGR4444, 0},
-	{DRM_FORMAT_BGRA4444, 0},
-	{DRM_FORMAT_BGRX4444, 0},
-	{DRM_FORMAT_XBGR4444, 0},
-
-	{DRM_FORMAT_YUV420, 0},
-	{DRM_FORMAT_NV12, 0},
-	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
-	{DRM_FORMAT_NV16, 0},
-	{DRM_FORMAT_YUYV, 0},
-
-	RGB_10BIT_FMTS,
-	TP10_UBWC_FMTS,
-
-	{0, 0},
-};
-
-static const struct sde_format_extended p010_ubwc_formats[] = {
-	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_DX |
-		DRM_FORMAT_MOD_QCOM_COMPRESSED},
-};
-
-static const struct sde_format_extended true_inline_rot_v1_fmts[] = {
-	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
-	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED |
-		DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_TIGHT}, /* tp10 */
-	{0, 0},
-};
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
deleted file mode 100644
index 295e226..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
+++ /dev/null
@@ -1,359 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#include "sde_hw_mdss.h"
-#include "sde_hwio.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_cdm.h"
-#include "sde_dbg.h"
-#include "sde_kms.h"
-
-#define CDM_CSC_10_OPMODE                  0x000
-#define CDM_CSC_10_BASE                    0x004
-
-#define CDM_CDWN2_OP_MODE                  0x100
-#define CDM_CDWN2_CLAMP_OUT                0x104
-#define CDM_CDWN2_PARAMS_3D_0              0x108
-#define CDM_CDWN2_PARAMS_3D_1              0x10C
-#define CDM_CDWN2_COEFF_COSITE_H_0         0x110
-#define CDM_CDWN2_COEFF_COSITE_H_1         0x114
-#define CDM_CDWN2_COEFF_COSITE_H_2         0x118
-#define CDM_CDWN2_COEFF_OFFSITE_H_0        0x11C
-#define CDM_CDWN2_COEFF_OFFSITE_H_1        0x120
-#define CDM_CDWN2_COEFF_OFFSITE_H_2        0x124
-#define CDM_CDWN2_COEFF_COSITE_V           0x128
-#define CDM_CDWN2_COEFF_OFFSITE_V          0x12C
-#define CDM_CDWN2_OUT_SIZE                 0x130
-
-#define CDM_HDMI_PACK_OP_MODE              0x200
-#define CDM_CSC_10_MATRIX_COEFF_0          0x004
-
-#define CDM_MUX                            0x224
-
-/**
- * Horizontal coefficients for cosite chroma downscale
- * s13 representation of coefficients
- */
-static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
-
-/**
- * Horizontal coefficients for offsite chroma downscale
- */
-static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
-
-/**
- * Vertical coefficients for cosite chroma downscale
- */
-static u32 cosite_v_coeff[] = {0x00080004};
-/**
- * Vertical coefficients for offsite chroma downscale
- */
-static u32 offsite_v_coeff[] = {0x00060002};
-
-/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
-static struct sde_csc_cfg rgb2yuv_cfg = {
-	{
-		0x0083, 0x0102, 0x0032,
-		0x1fb5, 0x1f6c, 0x00e1,
-		0x00e1, 0x1f45, 0x1fdc
-	},
-	{ 0x00, 0x00, 0x00 },
-	{ 0x0040, 0x0200, 0x0200 },
-	{ 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
-	{ 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
-};
-
-static struct sde_cdm_cfg *_cdm_offset(enum sde_cdm cdm,
-		struct sde_mdss_cfg *m,
-		void __iomem *addr,
-		struct sde_hw_blk_reg_map *b)
-{
-	int i;
-
-	for (i = 0; i < m->cdm_count; i++) {
-		if (cdm == m->cdm[i].id) {
-			b->base_off = addr;
-			b->blk_off = m->cdm[i].base;
-			b->length = m->cdm[i].len;
-			b->hwversion = m->hwversion;
-			b->log_mask = SDE_DBG_MASK_CDM;
-			return &m->cdm[i];
-		}
-	}
-
-	return ERR_PTR(-EINVAL);
-}
-
-static int sde_hw_cdm_setup_csc_10bit(struct sde_hw_cdm *ctx,
-		struct sde_csc_cfg *data)
-{
-	sde_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
-
-	return 0;
-}
-
-static int sde_hw_cdm_setup_cdwn(struct sde_hw_cdm *ctx,
-		struct sde_hw_cdm_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	u32 opmode = 0;
-	u32 out_size = 0;
-
-	if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
-		opmode &= ~BIT(7);
-	else
-		opmode |= BIT(7);
-
-	/* ENABLE DWNS_H bit */
-	opmode |= BIT(1);
-
-	switch (cfg->h_cdwn_type) {
-	case CDM_CDWN_DISABLE:
-		/* CLEAR METHOD_H field */
-		opmode &= ~(0x18);
-		/* CLEAR DWNS_H bit */
-		opmode &= ~BIT(1);
-		break;
-	case CDM_CDWN_PIXEL_DROP:
-		/* Clear METHOD_H field (pixel drop is 0) */
-		opmode &= ~(0x18);
-		break;
-	case CDM_CDWN_AVG:
-		/* Clear METHOD_H field (Average is 0x1) */
-		opmode &= ~(0x18);
-		opmode |= (0x1 << 0x3);
-		break;
-	case CDM_CDWN_COSITE:
-		/* Clear METHOD_H field (Average is 0x2) */
-		opmode &= ~(0x18);
-		opmode |= (0x2 << 0x3);
-		/* Co-site horizontal coefficients */
-		SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
-				cosite_h_coeff[0]);
-		SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
-				cosite_h_coeff[1]);
-		SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
-				cosite_h_coeff[2]);
-		break;
-	case CDM_CDWN_OFFSITE:
-		/* Clear METHOD_H field (Average is 0x3) */
-		opmode &= ~(0x18);
-		opmode |= (0x3 << 0x3);
-
-		/* Off-site horizontal coefficients */
-		SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
-				offsite_h_coeff[0]);
-		SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
-				offsite_h_coeff[1]);
-		SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
-				offsite_h_coeff[2]);
-		break;
-	default:
-		pr_err("%s invalid horz down sampling type\n", __func__);
-		return -EINVAL;
-	}
-
-	/* ENABLE DWNS_V bit */
-	opmode |= BIT(2);
-
-	switch (cfg->v_cdwn_type) {
-	case CDM_CDWN_DISABLE:
-		/* CLEAR METHOD_V field */
-		opmode &= ~(0x60);
-		/* CLEAR DWNS_V bit */
-		opmode &= ~BIT(2);
-		break;
-	case CDM_CDWN_PIXEL_DROP:
-		/* Clear METHOD_V field (pixel drop is 0) */
-		opmode &= ~(0x60);
-		break;
-	case CDM_CDWN_AVG:
-		/* Clear METHOD_V field (Average is 0x1) */
-		opmode &= ~(0x60);
-		opmode |= (0x1 << 0x5);
-		break;
-	case CDM_CDWN_COSITE:
-		/* Clear METHOD_V field (Average is 0x2) */
-		opmode &= ~(0x60);
-		opmode |= (0x2 << 0x5);
-		/* Co-site vertical coefficients */
-		SDE_REG_WRITE(c,
-				CDM_CDWN2_COEFF_COSITE_V,
-				cosite_v_coeff[0]);
-		break;
-	case CDM_CDWN_OFFSITE:
-		/* Clear METHOD_V field (Average is 0x3) */
-		opmode &= ~(0x60);
-		opmode |= (0x3 << 0x5);
-
-		/* Off-site vertical coefficients */
-		SDE_REG_WRITE(c,
-				CDM_CDWN2_COEFF_OFFSITE_V,
-				offsite_v_coeff[0]);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	if (cfg->v_cdwn_type || cfg->h_cdwn_type)
-		opmode |= BIT(0); /* EN CDWN module */
-	else
-		opmode &= ~BIT(0);
-
-	out_size = (cfg->output_width & 0xFFFF) |
-		((cfg->output_height & 0xFFFF) << 16);
-	SDE_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
-	SDE_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
-	SDE_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
-			((0x3FF << 16) | 0x0));
-
-	return 0;
-}
-
-int sde_hw_cdm_enable(struct sde_hw_cdm *ctx,
-		struct sde_hw_cdm_cfg *cdm)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	const struct sde_format *fmt;
-	struct cdm_output_cfg cdm_cfg = { 0 };
-	u32 opmode = 0;
-	u32 csc = 0;
-
-	if (!ctx || !cdm)
-		return -EINVAL;
-
-	fmt = cdm->output_fmt;
-
-	if (!SDE_FORMAT_IS_YUV(fmt))
-		return -EINVAL;
-
-	if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
-		if (fmt->chroma_sample != SDE_CHROMA_H1V2)
-			return -EINVAL; /*unsupported format */
-		opmode = BIT(0);
-		opmode |= (fmt->chroma_sample << 1);
-		cdm_cfg.intf_en = true;
-	} else {
-		opmode = 0;
-		cdm_cfg.wb_en = true;
-	}
-
-	csc |= BIT(2);
-	csc &= ~BIT(1);
-	csc |= BIT(0);
-
-	if (ctx && ctx->ops.bind_pingpong_blk)
-		ctx->ops.bind_pingpong_blk(ctx, true,
-				cdm->pp_id);
-	else if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
-		ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
-
-	SDE_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
-	SDE_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
-	return 0;
-}
-
-void sde_hw_cdm_disable(struct sde_hw_cdm *ctx)
-{
-	struct cdm_output_cfg cdm_cfg = { 0 };
-
-	if (!ctx)
-		return;
-
-	if (ctx && ctx->ops.bind_pingpong_blk)
-		ctx->ops.bind_pingpong_blk(ctx, false, 0);
-	else if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
-		ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
-}
-
-static void sde_hw_cdm_bind_pingpong_blk(
-		struct sde_hw_cdm *ctx,
-		bool enable,
-		const enum sde_pingpong pp)
-{
-	struct sde_hw_blk_reg_map *c;
-	int mux_cfg = 0xF;
-
-	if (!ctx || (enable && (pp < PINGPONG_0 || pp >= PINGPONG_MAX)))
-		return;
-
-	c = &ctx->hw;
-
-	if (enable)
-		mux_cfg = (pp - PINGPONG_0) & 0x7;
-
-	SDE_REG_WRITE(c, CDM_MUX, mux_cfg);
-}
-
-
-static void _setup_cdm_ops(struct sde_hw_cdm_ops *ops,
-	unsigned long features)
-{
-	ops->setup_csc_data = sde_hw_cdm_setup_csc_10bit;
-	ops->setup_cdwn = sde_hw_cdm_setup_cdwn;
-	ops->enable = sde_hw_cdm_enable;
-	ops->disable = sde_hw_cdm_disable;
-	if (features & BIT(SDE_CDM_INPUT_CTRL))
-		ops->bind_pingpong_blk = sde_hw_cdm_bind_pingpong_blk;
-}
-
-static struct sde_hw_blk_ops sde_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
-
-struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m,
-		struct sde_hw_mdp *hw_mdp)
-{
-	struct sde_hw_cdm *c;
-	struct sde_cdm_cfg *cfg;
-	int rc;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _cdm_offset(idx, m, addr, &c->hw);
-	if (IS_ERR_OR_NULL(cfg)) {
-		kfree(c);
-		return ERR_PTR(-EINVAL);
-	}
-
-	c->idx = idx;
-	c->caps = cfg;
-	_setup_cdm_ops(&c->ops, c->caps->features);
-	c->hw_mdp = hw_mdp;
-
-	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_CDM, idx, &sde_hw_ops);
-	if (rc) {
-		SDE_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
-
-	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
-			c->hw.blk_off + c->hw.length, c->hw.xin_id);
-
-	/*
-	 * Perform any default initialization for the chroma down module
-	 * @setup default csc coefficients
-	 */
-	sde_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
-
-	return c;
-
-blk_init_error:
-	kzfree(c);
-
-	return ERR_PTR(rc);
-}
-
-void sde_hw_cdm_destroy(struct sde_hw_cdm *cdm)
-{
-	if (cdm)
-		sde_hw_blk_destroy(&cdm->base);
-	kfree(cdm);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.h b/drivers/gpu/drm/msm/sde/sde_hw_cdm.h
deleted file mode 100644
index 563318a..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_CDM_H
-#define _SDE_HW_CDM_H
-
-#include "sde_hw_mdss.h"
-#include "sde_hw_top.h"
-#include "sde_hw_blk.h"
-
-struct sde_hw_cdm;
-
-struct sde_hw_cdm_cfg {
-	u32 output_width;
-	u32 output_height;
-	u32 output_bit_depth;
-	u32 h_cdwn_type;
-	u32 v_cdwn_type;
-	const struct sde_format *output_fmt;
-	u32 output_type;
-	int flags;
-	int pp_id;
-};
-
-enum sde_hw_cdwn_type {
-	CDM_CDWN_DISABLE,
-	CDM_CDWN_PIXEL_DROP,
-	CDM_CDWN_AVG,
-	CDM_CDWN_COSITE,
-	CDM_CDWN_OFFSITE,
-};
-
-enum sde_hw_cdwn_output_type {
-	CDM_CDWN_OUTPUT_HDMI,
-	CDM_CDWN_OUTPUT_WB,
-};
-
-enum sde_hw_cdwn_output_bit_depth {
-	CDM_CDWN_OUTPUT_8BIT,
-	CDM_CDWN_OUTPUT_10BIT,
-};
-
-/**
- * struct sde_hw_cdm_ops : Interface to the chroma down Hw driver functions
- *                         Assumption is these functions will be called after
- *                         clocks are enabled
- *  @setup_csc:            Programs the csc matrix
- *  @setup_cdwn:           Sets up the chroma down sub module
- *  @enable:               Enables the output to interface and programs the
- *                         output packer
- *  @disable:              Puts the cdm in bypass mode
- * @bind_pingpong_blk:    enable/disable the connection with pingpong which
- *                        will feed pixels to this cdm
- */
-struct sde_hw_cdm_ops {
-	/**
-	 * Programs the CSC matrix for conversion from RGB space to YUV space,
-	 * it is optional to call this function as this matrix is automatically
-	 * set during initialization, user should call this if it wants
-	 * to program a different matrix than default matrix.
-	 * @cdm:          Pointer to the chroma down context structure
-	 * @data          Pointer to CSC configuration data
-	 * return:        0 if success; error code otherwise
-	 */
-	int (*setup_csc_data)(struct sde_hw_cdm *cdm,
-			struct sde_csc_cfg *data);
-
-	/**
-	 * Programs the Chroma downsample part.
-	 * @cdm         Pointer to chroma down context
-	 */
-	int (*setup_cdwn)(struct sde_hw_cdm *cdm,
-	struct sde_hw_cdm_cfg *cfg);
-
-	/**
-	 * Enable the CDM module
-	 * @cdm         Pointer to chroma down context
-	 */
-	int (*enable)(struct sde_hw_cdm *cdm,
-	struct sde_hw_cdm_cfg *cfg);
-
-	/**
-	 * Disable the CDM module
-	 * @cdm         Pointer to chroma down context
-	 */
-	void (*disable)(struct sde_hw_cdm *cdm);
-
-	/**
-	 * Enable/disable the connection with pingpong
-	 * @cdm         Pointer to chroma down context
-	 * @enable      Enable/disable control
-	 * @pp          pingpong block id.
-	 */
-	void (*bind_pingpong_blk)(struct sde_hw_cdm *cdm,
-			bool enable,
-			const enum sde_pingpong pp);
-};
-
-struct sde_hw_cdm {
-	struct sde_hw_blk base;
-	struct sde_hw_blk_reg_map hw;
-
-	/* chroma down */
-	const struct sde_cdm_cfg *caps;
-	enum  sde_cdm  idx;
-
-	/* mdp top hw driver */
-	struct sde_hw_mdp *hw_mdp;
-
-	/* ops */
-	struct sde_hw_cdm_ops ops;
-};
-
-/**
- * sde_hw_cdm - convert base object sde_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct sde_hw_cdm *to_sde_hw_cdm(struct sde_hw_blk *hw)
-{
-	return container_of(hw, struct sde_hw_cdm, base);
-}
-
-/**
- * sde_hw_cdm_init - initializes the cdm hw driver object.
- * should be called once before accessing every cdm.
- * @idx:  cdm index for which driver object is required
- * @addr: mapped register io address of MDP
- * @m :   pointer to mdss catalog data
- * @hw_mdp:  pointer to mdp top hw driver object
- */
-struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m,
-		struct sde_hw_mdp *hw_mdp);
-
-/**
- * sde_hw_cdm_destroy - destroys CDM driver context
- * @cdm:   pointer to CDM driver context
- */
-void sde_hw_cdm_destroy(struct sde_hw_cdm *cdm);
-
-#endif /*_SDE_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h
deleted file mode 100644
index acfccab..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h
+++ /dev/null
@@ -1,163 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-#ifndef _SDE_HW_COLOR_PROC_COMMON_V4_H_
-#define _SDE_HW_COLOR_PROC_COMMON_V4_H_
-
-#include "sde_hw_mdss.h"
-
-#define GAMUT_TABLE_SEL_OFF 0x4
-#define GAMUT_UPPER_COLOR_OFF 0x8
-#define GAMUT_LOWER_COLOR_OFF 0xc
-#define GAMUT_SCALEA_OFFSET_OFF 0x10
-#define GAMUT_SCALEB_OFFSET_OFF 0xe0
-#define GAMUT_TABLE0_SEL BIT(12)
-#define GAMUT_MAP_EN BIT(1)
-#define GAMUT_EN BIT(0)
-#define GAMUT_MODE_13B_OFF 640
-#define GAMUT_MODE_5_OFF 1248
-
-enum {
-	gamut_mode_17 = 0,
-	gamut_mode_5,
-	gamut_mode_13a,
-	gamut_mode_13b,
-	gamut_mode_17b,
-};
-
-#define GC_C0_OFF 0x4
-#define GC_C0_INDEX_OFF 0x8
-#define GC_8B_ROUND_EN BIT(1)
-#define GC_EN BIT(0)
-#define GC_TBL_NUM 3
-#define GC_LUT_SWAP_OFF 0x1c
-
-#define IGC_TBL_NUM 3
-#define IGC_DITHER_OFF 0x7e0
-#define IGC_OPMODE_OFF 0x0
-#define IGC_C0_OFF 0x0
-#define IGC_DATA_MASK (BIT(12) - 1)
-#define IGC_DSPP_SEL_MASK_MAX (BIT(4) - 1)
-#define IGC_DSPP_SEL_MASK(n) \
-	((IGC_DSPP_SEL_MASK_MAX & ~(1 << (n))) << 28)
-#define IGC_INDEX_UPDATE BIT(25)
-#define IGC_EN BIT(0)
-#define IGC_DIS 0
-#define IGC_DITHER_DATA_MASK (BIT(4) - 1)
-
-#define PCC_NUM_PLANES 3
-#define PCC_NUM_COEFF 11
-#define PCC_EN BIT(0)
-#define PCC_DIS 0
-#define PCC_C_OFF 0x4
-#define PCC_R_OFF 0x10
-#define PCC_G_OFF 0x1c
-#define PCC_B_OFF 0x28
-#define PCC_RG_OFF 0x34
-#define PCC_RB_OFF 0x40
-#define PCC_GB_OFF 0x4c
-#define PCC_RGB_OFF 0x58
-#define PCC_RR_OFF 0x64
-#define PCC_GG_OFF 0x70
-#define PCC_BB_OFF 0x7c
-
-#define PA_EN BIT(20)
-#define PA_HUE_EN BIT(25)
-#define PA_SAT_EN BIT(26)
-#define PA_VAL_EN BIT(27)
-#define PA_CONT_EN BIT(28)
-
-#define PA_SIXZONE_HUE_EN BIT(29)
-#define PA_SIXZONE_SAT_EN BIT(30)
-#define PA_SIXZONE_VAL_EN BIT(31)
-
-#define PA_HIST_EN BIT(16)
-
-#define PA_SKIN_EN BIT(5)
-#define PA_FOL_EN BIT(6)
-#define PA_SKY_EN BIT(7)
-
-#define PA_HUE_MASK (BIT(12) - 1)
-#define PA_SAT_MASK (BIT(16) - 1)
-#define PA_VAL_MASK (BIT(8) - 1)
-#define PA_CONT_MASK (BIT(8) - 1)
-
-#define PA_HUE_OFF 0x1c
-#define PA_SAT_OFF 0x20
-#define PA_VAL_OFF 0x24
-#define PA_CONT_OFF 0x28
-#define PA_PWL_HOLD_OFF 0x40
-
-#define PA_DISABLE_REQUIRED(x) \
-	!((x) & (PA_SKIN_EN | PA_SKY_EN | \
-	PA_FOL_EN | PA_HUE_EN | \
-	PA_SAT_EN | PA_VAL_EN | \
-	PA_CONT_EN | PA_HIST_EN | \
-	PA_SIXZONE_HUE_EN | PA_SIXZONE_SAT_EN | \
-	PA_SIXZONE_VAL_EN))
-
-#define SIXZONE_ADJ_CURVE_P1_OFF 0x4
-#define SIXZONE_THRESHOLDS_OFF 0x8
-
-#define MEMCOL_SIZE0 20
-#define MEMCOL_SIZE1 8
-#define MEMCOL_PWL0_OFF 0x0
-#define MEMCOL_PWL2_OFF 0x3C
-#define MEMCOL_HOLD_SIZE 0x4
-
-#define MEMCOL_PROT_VAL_EN BIT(24)
-#define MEMCOL_PROT_SAT_EN BIT(23)
-#define MEMCOL_PROT_HUE_EN BIT(22)
-#define MEMCOL_PROT_CONT_EN BIT(18)
-#define MEMCOL_PROT_SIXZONE_EN BIT(17)
-#define MEMCOL_PROT_BLEND_EN BIT(3)
-
-#define MEMCOL_PROT_MASK \
-	(MEMCOL_PROT_VAL_EN | MEMCOL_PROT_SAT_EN | \
-	MEMCOL_PROT_HUE_EN | MEMCOL_PROT_CONT_EN | \
-	MEMCOL_PROT_SIXZONE_EN | MEMCOL_PROT_BLEND_EN)
-
-#define SSPP 0
-#define DSPP 1
-
-struct sde_ltm_phase_info {
-	u32 init_h[LTM_MAX];
-	u32 init_v;
-	u32 inc_h;
-	u32 inc_v;
-	bool portrait_en;
-	bool merge_en;
-};
-
-static inline void sde_ltm_get_phase_info(struct sde_hw_cp_cfg *hw_cfg,
-		struct sde_ltm_phase_info *info)
-{
-	u32 count_v, count_h, num_mixers;
-
-	if (hw_cfg->displayh < hw_cfg->displayv) {
-		count_h = 4;
-		count_v = 8;
-		info->portrait_en = true;
-	} else {
-		count_h = 8;
-		count_v = 4;
-		info->portrait_en = false;
-	}
-
-	num_mixers = hw_cfg->num_of_mixers;
-	if (num_mixers == 1)
-		info->merge_en = false;
-	else
-		info->merge_en = true;
-
-	info->init_h[LTM_0] = (1 << 23);
-	info->init_v = (1 << 23);
-	info->inc_h = ((count_h - 1) << 24) / (hw_cfg->displayh - 1);
-	info->inc_v = ((count_v - 1) << 24) / (hw_cfg->displayv - 1);
-	if (info->merge_en)
-		info->init_h[LTM_1] = info->init_h[LTM_0] +
-			info->inc_h * (hw_cfg->displayh / 2);
-}
-
-#endif /* _SDE_HW_COLOR_PROC_COMMON_V4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.c b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.c
deleted file mode 100644
index 80f3cb46..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.c
+++ /dev/null
@@ -1,391 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-#include <drm/msm_drm_pp.h>
-#include "sde_hw_color_proc_common_v4.h"
-#include "sde_hw_color_proc_v4.h"
-
-static int sde_write_3d_gamut(struct sde_hw_blk_reg_map *hw,
-		struct drm_msm_3d_gamut *payload, u32 base,
-		u32 *opcode, u32 pipe, u32 scale_tbl_a_len,
-		u32 scale_tbl_b_len)
-{
-	u32 reg, tbl_len, tbl_off, scale_off, i, j;
-	u32 scale_tbl_len, scale_tbl_off;
-	u32 *scale_data;
-
-	if (!payload || !opcode || !hw) {
-		DRM_ERROR("invalid payload %pK opcode %pK hw %pK\n",
-			payload, opcode, hw);
-		return -EINVAL;
-	}
-
-	switch (payload->mode) {
-	case GAMUT_3D_MODE_17:
-		tbl_len = GAMUT_3D_MODE17_TBL_SZ;
-		tbl_off = 0;
-		if (pipe == DSPP) {
-			scale_off = GAMUT_SCALEA_OFFSET_OFF;
-			*opcode = gamut_mode_17;
-		} else {
-			*opcode = (*opcode & (BIT(5) - 1)) >> 2;
-			if (*opcode == gamut_mode_17b)
-				*opcode = gamut_mode_17;
-			else
-				*opcode = gamut_mode_17b;
-			scale_off = (*opcode == gamut_mode_17) ?
-				GAMUT_SCALEA_OFFSET_OFF :
-				GAMUT_SCALEB_OFFSET_OFF;
-		}
-		break;
-	case GAMUT_3D_MODE_13:
-		*opcode = (*opcode & (BIT(4) - 1)) >> 2;
-		if (*opcode == gamut_mode_13a)
-			*opcode = gamut_mode_13b;
-		else
-			*opcode = gamut_mode_13a;
-		tbl_len = GAMUT_3D_MODE13_TBL_SZ;
-		tbl_off = (*opcode == gamut_mode_13a) ? 0 :
-			GAMUT_MODE_13B_OFF;
-		scale_off = (*opcode == gamut_mode_13a) ?
-			GAMUT_SCALEA_OFFSET_OFF : GAMUT_SCALEB_OFFSET_OFF;
-		*opcode <<= 2;
-		break;
-	case GAMUT_3D_MODE_5:
-		*opcode = gamut_mode_5 << 2;
-		tbl_len = GAMUT_3D_MODE5_TBL_SZ;
-		tbl_off = GAMUT_MODE_5_OFF;
-		scale_off = GAMUT_SCALEB_OFFSET_OFF;
-		break;
-	default:
-		DRM_ERROR("invalid mode %d\n", payload->mode);
-		return -EINVAL;
-	}
-
-	if (payload->flags & GAMUT_3D_MAP_EN)
-		*opcode |= GAMUT_MAP_EN;
-	*opcode |= GAMUT_EN;
-
-	for (i = 0; i < GAMUT_3D_TBL_NUM; i++) {
-		reg = GAMUT_TABLE0_SEL << i;
-		reg |= ((tbl_off) & (BIT(11) - 1));
-		SDE_REG_WRITE(hw, base + GAMUT_TABLE_SEL_OFF, reg);
-		for (j = 0; j < tbl_len; j++) {
-			SDE_REG_WRITE(hw, base + GAMUT_LOWER_COLOR_OFF,
-					payload->col[i][j].c2_c1);
-			SDE_REG_WRITE(hw, base + GAMUT_UPPER_COLOR_OFF,
-					payload->col[i][j].c0);
-		}
-	}
-
-	if ((*opcode & GAMUT_MAP_EN)) {
-		if (scale_off == GAMUT_SCALEA_OFFSET_OFF)
-			scale_tbl_len = scale_tbl_a_len;
-		else
-			scale_tbl_len = scale_tbl_b_len;
-		for (i = 0; i < GAMUT_3D_SCALE_OFF_TBL_NUM; i++) {
-			scale_tbl_off = base + scale_off +
-					i * scale_tbl_len * sizeof(u32);
-			scale_data = &payload->scale_off[i][0];
-			for (j = 0; j < scale_tbl_len; j++)
-				SDE_REG_WRITE(hw,
-					scale_tbl_off + (j * sizeof(u32)),
-					scale_data[j]);
-		}
-	}
-	SDE_REG_WRITE(hw, base, *opcode);
-	return 0;
-}
-
-void sde_setup_dspp_3d_gamutv4(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct drm_msm_3d_gamut *payload;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	u32 op_mode;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	op_mode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->gamut.base);
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable gamut feature\n");
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->gamut.base, 0);
-		return;
-	}
-
-	payload = hw_cfg->payload;
-	sde_write_3d_gamut(&ctx->hw, payload, ctx->cap->sblk->gamut.base,
-		&op_mode, DSPP, GAMUT_3D_SCALE_OFF_SZ, GAMUT_3D_SCALEB_OFF_SZ);
-
-}
-
-void sde_setup_dspp_3d_gamutv41(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct drm_msm_3d_gamut *payload;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	u32 op_mode;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	op_mode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->gamut.base);
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable gamut feature\n");
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->gamut.base, 0);
-		return;
-	}
-
-	payload = hw_cfg->payload;
-	sde_write_3d_gamut(&ctx->hw, payload, ctx->cap->sblk->gamut.base,
-		&op_mode, DSPP, GAMUT_3D_SCALE_OFF_SZ, GAMUT_3D_SCALE_OFF_SZ);
-}
-
-void sde_setup_dspp_igcv3(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct drm_msm_igc_lut *lut_cfg;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	int i = 0, j = 0;
-	u32 *addr = NULL;
-	u32 offset = 0;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable igc feature\n");
-		SDE_REG_WRITE(&ctx->hw, IGC_OPMODE_OFF, 0);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_igc_lut)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-				hw_cfg->len, sizeof(struct drm_msm_igc_lut));
-		return;
-	}
-
-	lut_cfg = hw_cfg->payload;
-
-	for (i = 0; i < IGC_TBL_NUM; i++) {
-		addr = lut_cfg->c0 + (i * ARRAY_SIZE(lut_cfg->c0));
-		offset = IGC_C0_OFF + (i * sizeof(u32));
-
-		for (j = 0; j < IGC_TBL_LEN; j++) {
-			addr[j] &= IGC_DATA_MASK;
-			addr[j] |= IGC_DSPP_SEL_MASK(ctx->idx - 1);
-			if (j == 0)
-				addr[j] |= IGC_INDEX_UPDATE;
-			/* IGC lut registers are part of DSPP Top HW block */
-			SDE_REG_WRITE(&ctx->hw_top, offset, addr[j]);
-		}
-	}
-
-	if (lut_cfg->flags & IGC_DITHER_ENABLE) {
-		SDE_REG_WRITE(&ctx->hw, IGC_DITHER_OFF,
-			lut_cfg->strength & IGC_DITHER_DATA_MASK);
-	}
-
-	SDE_REG_WRITE(&ctx->hw, IGC_OPMODE_OFF, IGC_EN);
-}
-
-void sde_setup_dspp_pccv4(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct drm_msm_pcc *pcc_cfg;
-	struct drm_msm_pcc_coeff *coeffs = NULL;
-	int i = 0;
-	u32 base = 0;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable pcc feature\n");
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base, 0);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_pcc)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-				hw_cfg->len, sizeof(struct drm_msm_pcc));
-		return;
-	}
-
-	pcc_cfg = hw_cfg->payload;
-
-	for (i = 0; i < PCC_NUM_PLANES; i++) {
-		base = ctx->cap->sblk->pcc.base + (i * sizeof(u32));
-		switch (i) {
-		case 0:
-			coeffs = &pcc_cfg->r;
-			SDE_REG_WRITE(&ctx->hw,
-				base + PCC_RR_OFF, pcc_cfg->r_rr);
-			SDE_REG_WRITE(&ctx->hw,
-				base + PCC_GG_OFF, pcc_cfg->r_gg);
-			SDE_REG_WRITE(&ctx->hw,
-				base + PCC_BB_OFF, pcc_cfg->r_bb);
-			break;
-		case 1:
-			coeffs = &pcc_cfg->g;
-			SDE_REG_WRITE(&ctx->hw,
-				base + PCC_RR_OFF, pcc_cfg->g_rr);
-			SDE_REG_WRITE(&ctx->hw,
-				base + PCC_GG_OFF, pcc_cfg->g_gg);
-			SDE_REG_WRITE(&ctx->hw,
-				base + PCC_BB_OFF, pcc_cfg->g_bb);
-			break;
-		case 2:
-			coeffs = &pcc_cfg->b;
-			SDE_REG_WRITE(&ctx->hw,
-				base + PCC_RR_OFF, pcc_cfg->b_rr);
-			SDE_REG_WRITE(&ctx->hw,
-				base + PCC_GG_OFF, pcc_cfg->b_gg);
-			SDE_REG_WRITE(&ctx->hw,
-				base + PCC_BB_OFF, pcc_cfg->b_bb);
-			break;
-		default:
-			DRM_ERROR("invalid pcc plane: %d\n", i);
-			return;
-		}
-
-		SDE_REG_WRITE(&ctx->hw, base + PCC_C_OFF, coeffs->c);
-		SDE_REG_WRITE(&ctx->hw, base + PCC_R_OFF, coeffs->r);
-		SDE_REG_WRITE(&ctx->hw, base + PCC_G_OFF, coeffs->g);
-		SDE_REG_WRITE(&ctx->hw, base + PCC_B_OFF, coeffs->b);
-		SDE_REG_WRITE(&ctx->hw, base + PCC_RG_OFF, coeffs->rg);
-		SDE_REG_WRITE(&ctx->hw, base + PCC_RB_OFF, coeffs->rb);
-		SDE_REG_WRITE(&ctx->hw, base + PCC_GB_OFF, coeffs->gb);
-		SDE_REG_WRITE(&ctx->hw, base + PCC_RGB_OFF, coeffs->rgb);
-	}
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base, PCC_EN);
-}
-
-void sde_setup_dspp_ltm_threshv1(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	u64 thresh = 0;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid parameters ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	if (!hw_cfg->payload) {
-		DRM_ERROR("invalid payload parameters for ltm thresh param\n");
-		return;
-	}
-
-	thresh = *((u64 *)hw_cfg->payload);
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x60,
-			(thresh & 0x3FF));
-}
-
-void sde_setup_dspp_ltm_hist_bufferv1(struct sde_hw_dspp *ctx, u64 addr)
-{
-	struct drm_msm_ltm_stats_data *hist = NULL;
-	u64 lh_addr, hs_addr;
-
-	if (!ctx || !addr) {
-		DRM_ERROR("invalid parameter ctx %pK addr 0x%llx\n", ctx, addr);
-		return;
-	}
-
-	hist = (struct drm_msm_ltm_stats_data *)addr;
-	lh_addr = (u64)(&hist->stats_02[0]);
-	hs_addr = (u64)(&hist->stats_03[0]);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x70,
-			(addr & 0xFFFFFF00));
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x74,
-			(lh_addr & 0xFFFFFF00));
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x78,
-			(hs_addr & 0xFFFFFF00));
-}
-
-void sde_setup_dspp_ltm_hist_ctrlv1(struct sde_hw_dspp *ctx, void *cfg,
-				    bool enable, u64 addr)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_ltm_phase_info phase;
-	u32 op_mode, offset;
-
-	if (!ctx) {
-		DRM_ERROR("invalid parameters ctx %pK\n", ctx);
-		return;
-	}
-
-	if (enable && (!addr || !cfg)) {
-		DRM_ERROR("invalid addr 0x%llx cfg %pK\n", addr, cfg);
-		return;
-	}
-
-	offset = ctx->cap->sblk->ltm.base + 0x4;
-	op_mode = SDE_REG_READ(&ctx->hw, offset);
-
-	if (!enable) {
-		op_mode &= ~BIT(0);
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x4,
-			(op_mode & 0x1FFFFFF));
-		return;
-	}
-
-	if (ctx->idx >= DSPP_MAX) {
-		DRM_ERROR("Invalid idx %d\n", ctx->idx);
-		return;
-	}
-
-	memset(&phase, 0, sizeof(phase));
-	sde_ltm_get_phase_info(hw_cfg, &phase);
-
-	if (phase.portrait_en)
-		op_mode |= BIT(2);
-	else
-		op_mode &= ~BIT(2);
-
-	if (phase.merge_en)
-		op_mode |= BIT(16);
-	else
-		op_mode &= ~(BIT(16) | BIT(17));
-
-	offset = ctx->cap->sblk->ltm.base + 0x8;
-	SDE_REG_WRITE(&ctx->hw, offset, (phase.init_h[ctx->idx] & 0x7FFFFFF));
-	offset += 4;
-	SDE_REG_WRITE(&ctx->hw, offset, (phase.init_v & 0xFFFFFF));
-	offset += 4;
-	SDE_REG_WRITE(&ctx->hw, offset, (phase.inc_h & 0xFFFFFF));
-	offset += 4;
-	SDE_REG_WRITE(&ctx->hw, offset, (phase.inc_v & 0xFFFFFF));
-
-	op_mode |= BIT(0);
-	sde_setup_dspp_ltm_hist_bufferv1(ctx, addr);
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x4,
-			(op_mode & 0x1FFFFFF));
-}
-
-void sde_ltm_read_intr_status(struct sde_hw_dspp *ctx, u32 *status)
-{
-	u32 clear;
-
-	if (!ctx || !status) {
-		DRM_ERROR("invalid parameters ctx %pK status %pK\n", ctx,
-				status);
-		return;
-	}
-
-	*status = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->ltm.base + 0x54);
-	pr_debug("%s(): LTM interrupt status 0x%x\n", __func__, *status);
-	/* clear the hist_sat and hist_merge_sat bits */
-	clear = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->ltm.base + 0x58);
-	clear |= BIT(1) | BIT(2);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x58, clear);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.h b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.h
deleted file mode 100644
index 5a19942..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-#ifndef _SDE_HW_COLOR_PROC_V4_H_
-#define _SDE_HW_COLOR_PROC_V4_H_
-
-#include "sde_hw_util.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_dspp.h"
-/**
- * sde_setup_dspp_3d_gamutv4 - Function for 3d gamut v4 version feature
- *                             programming.
- * @ctx: dspp ctx pointer
- * @cfg: pointer to sde_hw_cp_cfg
- */
-void sde_setup_dspp_3d_gamutv4(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_3d_gamutv41 - Function for 3d gamut v4_1 version feature
- *                             programming.
- * @ctx: dspp ctx pointer
- * @cfg: pointer to sde_hw_cp_cfg
- */
-void sde_setup_dspp_3d_gamutv41(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_igcv3 - Function for igc v3 version feature
- *                             programming.
- * @ctx: dspp ctx pointer
- * @cfg: pointer to sde_hw_cp_cfg
- */
-void sde_setup_dspp_igcv3(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_pccv4 - Function for pcc v4 version feature
- *                             programming.
- * @ctx: dspp ctx pointer
- * @cfg: pointer to sde_hw_cp_cfg
- */
-void sde_setup_dspp_pccv4(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_ltm_threshv1 - Function for ltm thresh v1 programming.
- * @ctx: dspp ctx pointer
- * @cfg: pointer to sde_hw_cp_cfg
- */
-void sde_setup_dspp_ltm_threshv1(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_ltm_hist_ctrlv1 - Function for ltm hist_ctrl v1 programming.
- * @ctx: dspp ctx pointer
- * @cfg: pointer to sde_hw_cp_cfg
- * @enable: feature enable/disable value
- * @addr: aligned iova address
- */
-void sde_setup_dspp_ltm_hist_ctrlv1(struct sde_hw_dspp *ctx, void *cfg,
-				    bool enable, u64 addr);
-/**
- * sde_setup_dspp_ltm_hist_bufferv1 - Function for setting ltm hist buffer v1.
- * @ctx: dspp ctx pointer
- * @addr: aligned iova address
- */
-void sde_setup_dspp_ltm_hist_bufferv1(struct sde_hw_dspp *ctx, u64 addr);
-
-/**
- * sde_ltm_read_intr_status - api to get ltm interrupt status
- * @dspp: pointer to dspp object
- * @status: Pointer to u32 where ltm status value is dumped.
- */
-void sde_ltm_read_intr_status(struct sde_hw_dspp *dspp, u32 *status);
-
-#endif /* _SDE_HW_COLOR_PROC_V4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h b/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h
deleted file mode 100644
index ac675e4..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_COLOR_PROCESSING_H
-#define _SDE_HW_COLOR_PROCESSING_H
-
-#include "sde_hw_color_processing_v1_7.h"
-#include "sde_hw_reg_dma_v1_color_proc.h"
-#include "sde_hw_color_proc_v4.h"
-
-#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
deleted file mode 100644
index 9238e85..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
+++ /dev/null
@@ -1,1007 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#include <drm/msm_drm_pp.h>
-#include "sde_hw_color_processing_v1_7.h"
-#include "sde_hw_ctl.h"
-
-#define REG_MASK_SHIFT(n, shift) ((REG_MASK(n)) << (shift))
-
-#define PA_HUE_VIG_OFF		0x110
-#define PA_SAT_VIG_OFF		0x114
-#define PA_VAL_VIG_OFF		0x118
-#define PA_CONT_VIG_OFF		0x11C
-
-#define PA_HUE_DSPP_OFF		0x1c
-#define PA_SAT_DSPP_OFF		0x20
-#define PA_VAL_DSPP_OFF		0x24
-#define PA_CONT_DSPP_OFF	0x28
-
-#define PA_HIST_CTRL_DSPP_OFF	0x4
-#define PA_HIST_DATA_DSPP_OFF	0x400
-
-#define PA_LUTV_DSPP_OFF	0x1400
-#define PA_LUT_SWAP_OFF		0x234
-
-#define PA_LUTV_DSPP_CTRL_OFF	0x4c
-#define PA_LUTV_DSPP_SWAP_OFF	0x18
-
-#define PA_DITH_DSPP_MATRIX_OFF	0x4
-
-#define PA_HUE_MASK		0xFFF
-#define PA_SAT_MASK		0xFFFF
-#define PA_VAL_MASK		0xFF
-#define PA_CONT_MASK		0xFF
-
-#define MEMCOL_PWL0_OFF		0x88
-#define MEMCOL_PWL0_MASK	0xFFFF07FF
-#define MEMCOL_PWL1_OFF		0x8C
-#define MEMCOL_PWL1_MASK	0xFFFFFFFF
-#define MEMCOL_HUE_REGION_OFF	0x90
-#define MEMCOL_HUE_REGION_MASK	0x7FF07FF
-#define MEMCOL_SAT_REGION_OFF	0x94
-#define MEMCOL_SAT_REGION_MASK	0xFFFFFF
-#define MEMCOL_VAL_REGION_OFF	0x98
-#define MEMCOL_VAL_REGION_MASK	0xFFFFFF
-#define MEMCOL_P0_LEN		0x14
-#define MEMCOL_P1_LEN		0x8
-#define MEMCOL_PWL2_OFF		0x218
-#define MEMCOL_PWL2_MASK	0xFFFFFFFF
-#define MEMCOL_BLEND_GAIN_OFF	0x21C
-#define MEMCOL_PWL_HOLD_OFF	0x214
-
-#define VIG_OP_PA_EN		BIT(4)
-#define VIG_OP_PA_SKIN_EN	BIT(5)
-#define VIG_OP_PA_FOL_EN	BIT(6)
-#define VIG_OP_PA_SKY_EN	BIT(7)
-#define VIG_OP_PA_HUE_EN	BIT(25)
-#define VIG_OP_PA_SAT_EN	BIT(26)
-#define VIG_OP_PA_VAL_EN	BIT(27)
-#define VIG_OP_PA_CONT_EN	BIT(28)
-
-#define DSPP_OP_SZ_VAL_EN	BIT(31)
-#define DSPP_OP_SZ_SAT_EN	BIT(30)
-#define DSPP_OP_SZ_HUE_EN	BIT(29)
-#define DSPP_OP_PA_HUE_EN	BIT(25)
-#define DSPP_OP_PA_SAT_EN	BIT(26)
-#define DSPP_OP_PA_VAL_EN	BIT(27)
-#define DSPP_OP_PA_CONT_EN	BIT(28)
-#define DSPP_OP_PA_EN		BIT(20)
-#define DSPP_OP_PA_LUTV_EN	BIT(19)
-#define DSPP_OP_PA_HIST_EN	BIT(16)
-#define DSPP_OP_PA_SKIN_EN	BIT(5)
-#define DSPP_OP_PA_FOL_EN	BIT(6)
-#define DSPP_OP_PA_SKY_EN	BIT(7)
-
-#define DSPP_SZ_ADJ_CURVE_P1_OFF	0x4
-#define DSPP_SZ_THRESHOLDS_OFF	0x8
-#define DSPP_PA_PWL_HOLD_OFF	0x40
-
-#define DSPP_MEMCOL_SIZE0	0x14
-#define DSPP_MEMCOL_SIZE1	0x8
-#define DSPP_MEMCOL_PWL0_OFF	0x0
-#define DSPP_MEMCOL_PWL2_OFF	0x3C
-#define DSPP_MEMCOL_HOLD_SIZE	0x4
-
-#define DSPP_MEMCOL_PROT_VAL_EN BIT(24)
-#define DSPP_MEMCOL_PROT_SAT_EN BIT(23)
-#define DSPP_MEMCOL_PROT_HUE_EN BIT(22)
-#define DSPP_MEMCOL_PROT_CONT_EN BIT(18)
-#define DSPP_MEMCOL_PROT_SIXZONE_EN BIT(17)
-#define DSPP_MEMCOL_PROT_BLEND_EN BIT(3)
-
-#define DSPP_MEMCOL_MASK \
-	(DSPP_OP_PA_SKIN_EN | DSPP_OP_PA_SKY_EN | DSPP_OP_PA_FOL_EN)
-
-#define DSPP_MEMCOL_PROT_MASK \
-	(DSPP_MEMCOL_PROT_HUE_EN | DSPP_MEMCOL_PROT_SAT_EN | \
-	DSPP_MEMCOL_PROT_VAL_EN | DSPP_MEMCOL_PROT_CONT_EN | \
-	DSPP_MEMCOL_PROT_SIXZONE_EN | DSPP_MEMCOL_PROT_BLEND_EN)
-
-#define PA_VIG_DISABLE_REQUIRED(x) \
-			!((x) & (VIG_OP_PA_SKIN_EN | VIG_OP_PA_SKY_EN | \
-			VIG_OP_PA_FOL_EN | VIG_OP_PA_HUE_EN | \
-			VIG_OP_PA_SAT_EN | VIG_OP_PA_VAL_EN | \
-			VIG_OP_PA_CONT_EN))
-
-#define PA_DSPP_DISABLE_REQUIRED(x) \
-			!((x) & (DSPP_OP_PA_SKIN_EN | DSPP_OP_PA_SKY_EN | \
-			DSPP_OP_PA_FOL_EN | DSPP_OP_PA_HUE_EN | \
-			DSPP_OP_PA_SAT_EN | DSPP_OP_PA_VAL_EN | \
-			DSPP_OP_PA_CONT_EN | DSPP_OP_PA_HIST_EN | \
-			DSPP_OP_SZ_HUE_EN | DSPP_OP_SZ_SAT_EN | \
-			DSPP_OP_SZ_VAL_EN))
-
-#define DSPP_OP_PCC_ENABLE	BIT(0)
-#define PCC_OP_MODE_OFF		0
-#define PCC_CONST_COEFF_OFF	4
-#define PCC_R_COEFF_OFF		0x10
-#define PCC_G_COEFF_OFF		0x1C
-#define PCC_B_COEFF_OFF		0x28
-#define PCC_RG_COEFF_OFF	0x34
-#define PCC_RB_COEFF_OFF	0x40
-#define PCC_GB_COEFF_OFF	0x4C
-#define PCC_RGB_COEFF_OFF	0x58
-#define PCC_CONST_COEFF_MASK	0xFFFF
-#define PCC_COEFF_MASK		0x3FFFF
-
-#define SSPP	0
-#define DSPP	1
-
-#define PGC_C0_OFF 0x4
-#define PGC_C0_INDEX_OFF 0x8
-#define PGC_8B_ROUND_EN BIT(1)
-#define PGC_EN BIT(0)
-#define PGC_TBL_NUM 3
-#define PGC_LUT_SWAP_OFF 0x1c
-
-
-static void __setup_pa_hue(struct sde_hw_blk_reg_map *hw,
-		const struct sde_pp_blk *blk, u32 hue, int loc)
-{
-	u32 base = blk->base;
-	u32 offset = (loc == DSPP) ? PA_HUE_DSPP_OFF : PA_HUE_VIG_OFF;
-	u32 op_hue_en = (loc == DSPP) ? DSPP_OP_PA_HUE_EN : VIG_OP_PA_HUE_EN;
-	u32 op_pa_en = (loc == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
-	u32 disable_req;
-	u32 opmode;
-
-	opmode = SDE_REG_READ(hw, base);
-	SDE_REG_WRITE(hw, base + offset, hue & PA_HUE_MASK);
-
-	if (!hue) {
-		opmode &= ~op_hue_en;
-		disable_req = (loc == DSPP) ?
-			PA_DSPP_DISABLE_REQUIRED(opmode) :
-			PA_VIG_DISABLE_REQUIRED(opmode);
-		if (disable_req)
-			opmode &= ~op_pa_en;
-	} else {
-		opmode |= (op_hue_en | op_pa_en);
-	}
-
-	SDE_REG_WRITE(hw, base, opmode);
-}
-
-void sde_setup_pipe_pa_hue_v1_7(struct sde_hw_pipe *ctx, void *cfg)
-{
-	uint32_t hue = *((uint32_t *)cfg);
-
-	__setup_pa_hue(&ctx->hw, &ctx->cap->sblk->hsic_blk, hue, SSPP);
-}
-
-static void __setup_pa_sat(struct sde_hw_blk_reg_map *hw,
-		const struct sde_pp_blk *blk, u32 sat, int loc)
-{
-	u32 base = blk->base;
-	u32 offset = (loc == DSPP) ? PA_SAT_DSPP_OFF : PA_SAT_VIG_OFF;
-	u32 op_sat_en = (loc == DSPP) ? DSPP_OP_PA_SAT_EN : VIG_OP_PA_SAT_EN;
-	u32 op_pa_en = (loc == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
-	u32 disable_req;
-	u32 opmode;
-
-	opmode = SDE_REG_READ(hw, base);
-	SDE_REG_WRITE(hw, base + offset, sat & PA_SAT_MASK);
-
-	if (!sat) {
-		opmode &= ~op_sat_en;
-		disable_req = (loc == DSPP) ?
-			PA_DSPP_DISABLE_REQUIRED(opmode) :
-			PA_VIG_DISABLE_REQUIRED(opmode);
-		if (disable_req)
-			opmode &= ~op_pa_en;
-	} else {
-		opmode |= (op_sat_en | op_pa_en);
-	}
-
-	SDE_REG_WRITE(hw, base, opmode);
-}
-
-void sde_setup_pipe_pa_sat_v1_7(struct sde_hw_pipe *ctx, void *cfg)
-{
-	uint32_t sat = *((uint32_t *)cfg);
-
-	__setup_pa_sat(&ctx->hw, &ctx->cap->sblk->hsic_blk, sat, SSPP);
-}
-
-static void __setup_pa_val(struct sde_hw_blk_reg_map *hw,
-		const struct sde_pp_blk *blk, u32 value, int loc)
-{
-	u32 base = blk->base;
-	u32 offset = (loc == DSPP) ? PA_VAL_DSPP_OFF : PA_VAL_VIG_OFF;
-	u32 op_val_en = (loc == DSPP) ? DSPP_OP_PA_VAL_EN : VIG_OP_PA_VAL_EN;
-	u32 op_pa_en = (loc == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
-	u32 disable_req;
-	u32 opmode;
-
-	opmode = SDE_REG_READ(hw, base);
-	SDE_REG_WRITE(hw, base + offset, value & PA_VAL_MASK);
-
-	if (!value) {
-		opmode &= ~op_val_en;
-		disable_req = (loc == DSPP) ?
-			PA_DSPP_DISABLE_REQUIRED(opmode) :
-			PA_VIG_DISABLE_REQUIRED(opmode);
-		if (disable_req)
-			opmode &= ~op_pa_en;
-	} else {
-		opmode |= (op_val_en | op_pa_en);
-	}
-
-	SDE_REG_WRITE(hw, base, opmode);
-}
-
-void sde_setup_pipe_pa_val_v1_7(struct sde_hw_pipe *ctx, void *cfg)
-{
-	uint32_t value = *((uint32_t *)cfg);
-
-	__setup_pa_val(&ctx->hw, &ctx->cap->sblk->hsic_blk, value, SSPP);
-}
-
-static void __setup_pa_cont(struct sde_hw_blk_reg_map *hw,
-		const struct sde_pp_blk *blk, u32 contrast, int loc)
-{
-	u32 base = blk->base;
-	u32 offset = (loc == DSPP) ? PA_CONT_DSPP_OFF : PA_CONT_VIG_OFF;
-	u32 op_cont_en = (loc == DSPP) ?
-		DSPP_OP_PA_CONT_EN : VIG_OP_PA_CONT_EN;
-	u32 op_pa_en = (loc == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
-	u32 disable_req;
-	u32 opmode;
-
-	opmode = SDE_REG_READ(hw, base);
-	SDE_REG_WRITE(hw, base + offset, contrast & PA_CONT_MASK);
-
-	if (!contrast) {
-		opmode &= ~op_cont_en;
-		disable_req = (loc == DSPP) ?
-			PA_DSPP_DISABLE_REQUIRED(opmode) :
-			PA_VIG_DISABLE_REQUIRED(opmode);
-		if (disable_req)
-			opmode &= ~op_pa_en;
-	} else {
-		opmode |= (op_cont_en | op_pa_en);
-	}
-
-	SDE_REG_WRITE(hw, base, opmode);
-}
-
-void sde_setup_pipe_pa_cont_v1_7(struct sde_hw_pipe *ctx, void *cfg)
-{
-	uint32_t contrast = *((uint32_t *)cfg);
-
-	__setup_pa_cont(&ctx->hw, &ctx->cap->sblk->hsic_blk, contrast, SSPP);
-}
-
-void sde_setup_dspp_pa_hsic_v17(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct drm_msm_pa_hsic *hsic_cfg;
-	u32 hue = 0;
-	u32 sat = 0;
-	u32 val = 0;
-	u32 cont = 0;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	if (hw_cfg->payload &&
-		(hw_cfg->len != sizeof(struct drm_msm_pa_hsic))) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_pa_hsic));
-		return;
-	}
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable pa hsic feature\n");
-	} else {
-		hsic_cfg = hw_cfg->payload;
-		if (hsic_cfg->flags & PA_HSIC_HUE_ENABLE)
-			hue = hsic_cfg->hue;
-		if (hsic_cfg->flags & PA_HSIC_SAT_ENABLE)
-			sat = hsic_cfg->saturation;
-		if (hsic_cfg->flags & PA_HSIC_VAL_ENABLE)
-			val = hsic_cfg->value;
-		if (hsic_cfg->flags & PA_HSIC_CONT_ENABLE)
-			cont = hsic_cfg->contrast;
-	}
-
-	__setup_pa_hue(&ctx->hw, &ctx->cap->sblk->hsic, hue, DSPP);
-	__setup_pa_sat(&ctx->hw, &ctx->cap->sblk->hsic, sat, DSPP);
-	__setup_pa_val(&ctx->hw, &ctx->cap->sblk->hsic, val, DSPP);
-	__setup_pa_cont(&ctx->hw, &ctx->cap->sblk->hsic, cont, DSPP);
-}
-
-void sde_setup_dspp_sixzone_v17(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct drm_msm_sixzone *sixzone;
-	u32 opcode = 0, local_opcode = 0;
-	u32 reg = 0, hold = 0, local_hold = 0;
-	u32 addr = 0;
-	int i = 0;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	opcode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->hsic.base);
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable sixzone feature\n");
-		opcode &= ~(DSPP_OP_SZ_HUE_EN | DSPP_OP_SZ_SAT_EN |
-			DSPP_OP_SZ_VAL_EN);
-		if (PA_DSPP_DISABLE_REQUIRED(opcode))
-			opcode &= ~DSPP_OP_PA_EN;
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_sixzone)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_sixzone));
-		return;
-	}
-
-	sixzone = hw_cfg->payload;
-
-	reg = BIT(26);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->sixzone.base, reg);
-
-	addr = ctx->cap->sblk->sixzone.base + DSPP_SZ_ADJ_CURVE_P1_OFF;
-	for (i = 0; i < SIXZONE_LUT_SIZE; i++) {
-		SDE_REG_WRITE(&ctx->hw, addr, sixzone->curve[i].p1);
-		SDE_REG_WRITE(&ctx->hw, (addr - 4), sixzone->curve[i].p0);
-	}
-
-	addr = ctx->cap->sblk->sixzone.base + DSPP_SZ_THRESHOLDS_OFF;
-	SDE_REG_WRITE(&ctx->hw, addr, sixzone->threshold);
-	SDE_REG_WRITE(&ctx->hw, (addr + 4), sixzone->adjust_p0);
-	SDE_REG_WRITE(&ctx->hw, (addr + 8), sixzone->adjust_p1);
-
-	hold = SDE_REG_READ(&ctx->hw,
-		(ctx->cap->sblk->hsic.base + DSPP_PA_PWL_HOLD_OFF));
-	local_hold = ((sixzone->sat_hold & REG_MASK(2)) << 12);
-	local_hold |= ((sixzone->val_hold & REG_MASK(2)) << 14);
-	hold &= ~REG_MASK_SHIFT(4, 12);
-	hold |= local_hold;
-	SDE_REG_WRITE(&ctx->hw,
-		(ctx->cap->sblk->hsic.base + DSPP_PA_PWL_HOLD_OFF),
-		hold);
-
-	if (sixzone->flags & SIXZONE_HUE_ENABLE)
-		local_opcode |= DSPP_OP_SZ_HUE_EN;
-	if (sixzone->flags & SIXZONE_SAT_ENABLE)
-		local_opcode |= DSPP_OP_SZ_SAT_EN;
-	if (sixzone->flags & SIXZONE_VAL_ENABLE)
-		local_opcode |= DSPP_OP_SZ_VAL_EN;
-
-	if (local_opcode)
-		local_opcode |= DSPP_OP_PA_EN;
-
-	opcode &= ~REG_MASK_SHIFT(3, 29);
-	opcode |= local_opcode;
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-}
-
-void sde_setup_pipe_pa_memcol_v1_7(struct sde_hw_pipe *ctx,
-				   enum sde_memcolor_type type,
-				   void *cfg)
-{
-	struct drm_msm_memcol *mc = cfg;
-	u32 base = ctx->cap->sblk->memcolor_blk.base;
-	u32 off, op, mc_en, hold = 0;
-	u32 mc_i = 0;
-
-	switch (type) {
-	case MEMCOLOR_SKIN:
-		mc_en = VIG_OP_PA_SKIN_EN;
-		mc_i = 0;
-		break;
-	case MEMCOLOR_SKY:
-		mc_en = VIG_OP_PA_SKY_EN;
-		mc_i = 1;
-		break;
-	case MEMCOLOR_FOLIAGE:
-		mc_en = VIG_OP_PA_FOL_EN;
-		mc_i = 2;
-		break;
-	default:
-		DRM_ERROR("Invalid memory color type %d\n", type);
-		return;
-	}
-
-	op = SDE_REG_READ(&ctx->hw, base);
-	if (!mc) {
-		op &= ~mc_en;
-		if (PA_VIG_DISABLE_REQUIRED(op))
-			op &= ~VIG_OP_PA_EN;
-		SDE_REG_WRITE(&ctx->hw, base, op);
-		return;
-	}
-
-	off = base + (mc_i * MEMCOL_P0_LEN);
-	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL0_OFF),
-		      mc->color_adjust_p0 & MEMCOL_PWL0_MASK);
-	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL1_OFF),
-		      mc->color_adjust_p1 & MEMCOL_PWL1_MASK);
-	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_HUE_REGION_OFF),
-		      mc->hue_region & MEMCOL_HUE_REGION_MASK);
-	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_SAT_REGION_OFF),
-		      mc->sat_region & MEMCOL_SAT_REGION_MASK);
-	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_VAL_REGION_OFF),
-		      mc->val_region & MEMCOL_VAL_REGION_MASK);
-
-	off = base + (mc_i * MEMCOL_P1_LEN);
-	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL2_OFF),
-		      mc->color_adjust_p2 & MEMCOL_PWL2_MASK);
-	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_BLEND_GAIN_OFF), mc->blend_gain);
-
-	hold = SDE_REG_READ(&ctx->hw, off + MEMCOL_PWL_HOLD_OFF);
-	hold &= ~(0xF << (mc_i * 4));
-	hold |= ((mc->sat_hold & 0x3) << (mc_i * 4));
-	hold |= ((mc->val_hold & 0x3) << ((mc_i * 4) + 2));
-	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL_HOLD_OFF), hold);
-
-	op |= VIG_OP_PA_EN | mc_en;
-	SDE_REG_WRITE(&ctx->hw, base, op);
-}
-
-static void __setup_dspp_memcol(struct sde_hw_dspp *ctx,
-		enum sde_memcolor_type type,
-		struct drm_msm_memcol *memcolor)
-{
-	u32 addr = 0, offset = 0, idx = 0;
-	u32 hold = 0, local_hold = 0, hold_shift = 0;
-
-	switch (type) {
-	case MEMCOLOR_SKIN:
-		idx = 0;
-		break;
-	case MEMCOLOR_SKY:
-		idx = 1;
-		break;
-	case MEMCOLOR_FOLIAGE:
-		idx = 2;
-		break;
-	default:
-		DRM_ERROR("Invalid memory color type %d\n", type);
-		return;
-	}
-
-	offset = DSPP_MEMCOL_PWL0_OFF + (idx * DSPP_MEMCOL_SIZE0);
-	addr = ctx->cap->sblk->memcolor.base + offset;
-	hold_shift = idx * DSPP_MEMCOL_HOLD_SIZE;
-
-	SDE_REG_WRITE(&ctx->hw, addr, memcolor->color_adjust_p0);
-	addr += 4;
-	SDE_REG_WRITE(&ctx->hw, addr, memcolor->color_adjust_p1);
-	addr += 4;
-	SDE_REG_WRITE(&ctx->hw, addr, memcolor->hue_region);
-	addr += 4;
-	SDE_REG_WRITE(&ctx->hw, addr, memcolor->sat_region);
-	addr += 4;
-	SDE_REG_WRITE(&ctx->hw, addr, memcolor->val_region);
-
-	offset = DSPP_MEMCOL_PWL2_OFF + (idx * DSPP_MEMCOL_SIZE1);
-	addr = ctx->cap->sblk->memcolor.base + offset;
-
-	SDE_REG_WRITE(&ctx->hw, addr, memcolor->color_adjust_p2);
-	addr += 4;
-	SDE_REG_WRITE(&ctx->hw, addr, memcolor->blend_gain);
-
-	addr = ctx->cap->sblk->hsic.base + DSPP_PA_PWL_HOLD_OFF;
-	hold = SDE_REG_READ(&ctx->hw, addr);
-	local_hold = ((memcolor->sat_hold & REG_MASK(2)) << hold_shift);
-	local_hold |=
-		((memcolor->val_hold & REG_MASK(2)) << (hold_shift + 2));
-	hold &= ~REG_MASK_SHIFT(4, hold_shift);
-	hold |= local_hold;
-	SDE_REG_WRITE(&ctx->hw, addr, hold);
-}
-
-void sde_setup_dspp_memcol_skin_v17(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct drm_msm_memcol *memcolor;
-	u32 opcode = 0;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	opcode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->hsic.base);
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable memcolor skin feature\n");
-		opcode &= ~(DSPP_OP_PA_SKIN_EN);
-		if (PA_DSPP_DISABLE_REQUIRED(opcode))
-			opcode &= ~DSPP_OP_PA_EN;
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_memcol)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_memcol));
-		return;
-	}
-
-	memcolor = hw_cfg->payload;
-
-	__setup_dspp_memcol(ctx, MEMCOLOR_SKIN, memcolor);
-
-	opcode |= (DSPP_OP_PA_SKIN_EN | DSPP_OP_PA_EN);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-}
-
-void sde_setup_dspp_memcol_sky_v17(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct drm_msm_memcol *memcolor;
-	u32 opcode = 0;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	opcode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->hsic.base);
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable memcolor sky feature\n");
-		opcode &= ~(DSPP_OP_PA_SKY_EN);
-		if (PA_DSPP_DISABLE_REQUIRED(opcode))
-			opcode &= ~DSPP_OP_PA_EN;
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_memcol)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_memcol));
-		return;
-	}
-
-	memcolor = hw_cfg->payload;
-
-	__setup_dspp_memcol(ctx, MEMCOLOR_SKY, memcolor);
-
-	opcode |= (DSPP_OP_PA_SKY_EN | DSPP_OP_PA_EN);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-}
-
-void sde_setup_dspp_memcol_foliage_v17(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct drm_msm_memcol *memcolor;
-	u32 opcode = 0;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	opcode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->hsic.base);
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable memcolor foliage feature\n");
-		opcode &= ~(DSPP_OP_PA_FOL_EN);
-		if (PA_DSPP_DISABLE_REQUIRED(opcode))
-			opcode &= ~DSPP_OP_PA_EN;
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_memcol)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_memcol));
-		return;
-	}
-
-	memcolor = hw_cfg->payload;
-
-	__setup_dspp_memcol(ctx, MEMCOLOR_FOLIAGE, memcolor);
-
-	opcode |= (DSPP_OP_PA_FOL_EN | DSPP_OP_PA_EN);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-}
-
-void sde_setup_dspp_memcol_prot_v17(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct drm_msm_memcol *memcolor;
-	u32 opcode = 0, local_opcode = 0;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	opcode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->hsic.base);
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable memcolor prot feature\n");
-		opcode &= ~(DSPP_MEMCOL_PROT_MASK);
-		if (PA_DSPP_DISABLE_REQUIRED(opcode))
-			opcode &= ~DSPP_OP_PA_EN;
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_memcol)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_memcol));
-		return;
-	}
-
-	memcolor = hw_cfg->payload;
-
-	if (memcolor->prot_flags) {
-		if (memcolor->prot_flags & MEMCOL_PROT_HUE)
-			local_opcode |= DSPP_MEMCOL_PROT_HUE_EN;
-		if (memcolor->prot_flags & MEMCOL_PROT_SAT)
-			local_opcode |= DSPP_MEMCOL_PROT_SAT_EN;
-		if (memcolor->prot_flags & MEMCOL_PROT_VAL)
-			local_opcode |= DSPP_MEMCOL_PROT_VAL_EN;
-		if (memcolor->prot_flags & MEMCOL_PROT_CONT)
-			local_opcode |= DSPP_MEMCOL_PROT_CONT_EN;
-		if (memcolor->prot_flags & MEMCOL_PROT_SIXZONE)
-			local_opcode |= DSPP_MEMCOL_PROT_SIXZONE_EN;
-		if (memcolor->prot_flags & MEMCOL_PROT_BLEND)
-			local_opcode |= DSPP_MEMCOL_PROT_BLEND_EN;
-	}
-
-	if (local_opcode) {
-		local_opcode |= DSPP_OP_PA_EN;
-		opcode &= ~(DSPP_MEMCOL_PROT_MASK);
-		opcode |= local_opcode;
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-	}
-}
-
-void sde_setup_dspp_pcc_v1_7(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct drm_msm_pcc *pcc;
-	void  __iomem *base;
-
-	if (!hw_cfg  || (hw_cfg->len != sizeof(*pcc)  && hw_cfg->payload)) {
-		DRM_ERROR(
-			"invalid params hw %pK payload %pK payloadsize %d exp size %zd\n",
-			   hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
-			   ((hw_cfg) ? hw_cfg->len : 0), sizeof(*pcc));
-		return;
-	}
-	base = ctx->hw.base_off + ctx->cap->base;
-
-	/* Turn off feature */
-	if (!hw_cfg->payload) {
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base,
-			      PCC_OP_MODE_OFF);
-		return;
-	}
-	DRM_DEBUG_DRIVER("Enable PCC feature\n");
-	pcc = hw_cfg->payload;
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF,
-				  pcc->r.c & PCC_CONST_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw,
-		      ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 4,
-		      pcc->g.c & PCC_CONST_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw,
-		      ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 8,
-		      pcc->b.c & PCC_CONST_COEFF_MASK);
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF,
-				  pcc->r.r & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 4,
-				  pcc->g.r & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 8,
-				  pcc->b.r & PCC_COEFF_MASK);
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF,
-				  pcc->r.g & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 4,
-				  pcc->g.g & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 8,
-				  pcc->b.g & PCC_COEFF_MASK);
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF,
-				  pcc->r.b & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 4,
-				  pcc->g.b & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 8,
-				  pcc->b.b & PCC_COEFF_MASK);
-
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF,
-				  pcc->r.rg & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 4,
-				  pcc->g.rg & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 8,
-				  pcc->b.rg & PCC_COEFF_MASK);
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF,
-				  pcc->r.rb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 4,
-				  pcc->g.rb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 8,
-				  pcc->b.rb & PCC_COEFF_MASK);
-
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF,
-				  pcc->r.gb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 4,
-				  pcc->g.gb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 8,
-				  pcc->b.gb & PCC_COEFF_MASK);
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF,
-				  pcc->r.rgb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw,
-		      ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 4,
-		      pcc->g.rgb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw,
-		      ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 8,
-		      pcc->b.rgb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base, DSPP_OP_PCC_ENABLE);
-}
-
-void sde_setup_dspp_pa_vlut_v1_7(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct drm_msm_pa_vlut *payload = NULL;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	u32 base = ctx->cap->sblk->vlut.base;
-	u32 offset = base + PA_LUTV_DSPP_OFF;
-	u32 op_mode, tmp;
-	int i = 0, j = 0;
-
-	if (!hw_cfg || (hw_cfg->payload && hw_cfg->len !=
-			sizeof(struct drm_msm_pa_vlut))) {
-		DRM_ERROR("hw %pK payload %pK payloadsize %d exp size %zd\n",
-			  hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
-			  ((hw_cfg) ? hw_cfg->len : 0),
-			  sizeof(struct drm_msm_pa_vlut));
-		return;
-	}
-	op_mode = SDE_REG_READ(&ctx->hw, base);
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("Disable vlut feature\n");
-		/**
-		 * In the PA_VLUT disable case, remove PA_VLUT enable bit(19)
-		 * first, then check whether any other PA sub-features are
-		 * enabled or not. If none of the sub-features are enabled,
-		 * remove the PA global enable bit(20).
-		 */
-		op_mode &= ~((u32)DSPP_OP_PA_LUTV_EN);
-		if (PA_DSPP_DISABLE_REQUIRED(op_mode))
-			op_mode &= ~((u32)DSPP_OP_PA_EN);
-		SDE_REG_WRITE(&ctx->hw, base, op_mode);
-		return;
-	}
-	payload = hw_cfg->payload;
-	DRM_DEBUG_DRIVER("Enable vlut feature flags %llx\n", payload->flags);
-	for (i = 0, j = 0; i < ARRAY_SIZE(payload->val); i += 2, j += 4) {
-		tmp = (payload->val[i] & REG_MASK(10)) |
-			((payload->val[i + 1] & REG_MASK(10)) << 16);
-		SDE_REG_WRITE(&ctx->hw, (offset + j),
-			     tmp);
-	}
-	SDE_REG_WRITE(&ctx->hw, (base + PA_LUT_SWAP_OFF), 1);
-	op_mode |= DSPP_OP_PA_EN | DSPP_OP_PA_LUTV_EN;
-	SDE_REG_WRITE(&ctx->hw, base, op_mode);
-}
-
-void sde_setup_dspp_pa_vlut_v1_8(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct drm_msm_pa_vlut *payload = NULL;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_ctl *ctl = NULL;
-	u32 vlut_base, pa_hist_base;
-	u32 ctrl_off, swap_off;
-	u32 tmp = 0;
-	int i = 0, j = 0;
-
-	if (!ctx) {
-		DRM_ERROR("invalid input parameter NULL ctx\n");
-		return;
-	}
-
-	if (!hw_cfg || (hw_cfg->payload && hw_cfg->len !=
-			sizeof(struct drm_msm_pa_vlut))) {
-		DRM_ERROR("hw %pK payload %pK payloadsize %d exp size %zd\n",
-			  hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
-			  ((hw_cfg) ? hw_cfg->len : 0),
-			  sizeof(struct drm_msm_pa_vlut));
-		return;
-	}
-
-	ctl = hw_cfg->ctl;
-	vlut_base = ctx->cap->sblk->vlut.base;
-	pa_hist_base = ctx->cap->sblk->hist.base;
-	ctrl_off = pa_hist_base + PA_LUTV_DSPP_CTRL_OFF;
-	swap_off = pa_hist_base + PA_LUTV_DSPP_SWAP_OFF;
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("Disable vlut feature\n");
-		SDE_REG_WRITE(&ctx->hw, ctrl_off, 0);
-		goto exit;
-	}
-
-	payload = hw_cfg->payload;
-	DRM_DEBUG_DRIVER("Enable vlut feature flags %llx\n", payload->flags);
-	for (i = 0, j = 0; i < ARRAY_SIZE(payload->val); i += 2, j += 4) {
-		tmp = (payload->val[i] & REG_MASK(10)) |
-			((payload->val[i + 1] & REG_MASK(10)) << 16);
-		SDE_REG_WRITE(&ctx->hw, (vlut_base + j), tmp);
-	}
-	SDE_REG_WRITE(&ctx->hw, ctrl_off, 1);
-	SDE_REG_WRITE(&ctx->hw, swap_off, 1);
-
-exit:
-	/* update flush bit */
-	if (ctl && ctl->ops.update_bitmask_dspp_pavlut)
-		ctl->ops.update_bitmask_dspp_pavlut(ctl, ctx->idx, 1);
-}
-
-void sde_setup_dspp_gc_v1_7(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct drm_msm_pgc_lut *payload = NULL;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	u32 c0_off, c1_off, c2_off, i;
-
-	if (!hw_cfg || (hw_cfg->payload && hw_cfg->len !=
-			sizeof(struct drm_msm_pgc_lut))) {
-		DRM_ERROR("hw %pK payload %pK payloadsize %d exp size %zd\n",
-			  hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
-			  ((hw_cfg) ? hw_cfg->len : 0),
-			  sizeof(struct drm_msm_pgc_lut));
-		return;
-	}
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("Disable pgc feature\n");
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->gc.base, 0);
-		return;
-	}
-	payload = hw_cfg->payload;
-
-	/* Initialize index offsets */
-	c0_off = ctx->cap->sblk->gc.base + PGC_C0_INDEX_OFF;
-	c1_off = c0_off + (sizeof(u32) * 2);
-	c2_off = c1_off + (sizeof(u32) * 2);
-	SDE_REG_WRITE(&ctx->hw, c0_off, 0);
-	SDE_REG_WRITE(&ctx->hw, c1_off, 0);
-	SDE_REG_WRITE(&ctx->hw, c2_off, 0);
-
-	/* Initialize table offsets */
-	c0_off = ctx->cap->sblk->gc.base + PGC_C0_OFF;
-	c1_off = c0_off + (sizeof(u32) * 2);
-	c2_off = c1_off + (sizeof(u32) * 2);
-
-	for (i = 0; i < PGC_TBL_LEN; i++) {
-		SDE_REG_WRITE(&ctx->hw, c0_off, payload->c0[i]);
-		SDE_REG_WRITE(&ctx->hw, c1_off, payload->c1[i]);
-		SDE_REG_WRITE(&ctx->hw, c2_off, payload->c2[i]);
-	}
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->gc.base + PGC_LUT_SWAP_OFF,
-			BIT(0));
-	i = BIT(0) | ((payload->flags & PGC_8B_ROUND) ? BIT(1) : 0);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->gc.base, i);
-}
-
-void sde_setup_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg)
-{
-	u32 base, offset;
-	u32 op_mode;
-	bool feature_enabled;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid parameters ctx %pK cfg %pK", ctx, cfg);
-		return;
-	}
-
-	feature_enabled = *(bool *)cfg;
-	base = ctx->cap->sblk->hist.base;
-	offset = base + PA_HIST_CTRL_DSPP_OFF;
-
-	op_mode = SDE_REG_READ(&ctx->hw, base);
-	if (!feature_enabled) {
-		op_mode &= ~DSPP_OP_PA_HIST_EN;
-		if (PA_DSPP_DISABLE_REQUIRED(op_mode))
-			op_mode &= ~DSPP_OP_PA_EN;
-	} else {
-		op_mode |= DSPP_OP_PA_HIST_EN | DSPP_OP_PA_EN;
-	}
-
-	SDE_REG_WRITE(&ctx->hw, offset, 0);
-	SDE_REG_WRITE(&ctx->hw, base, op_mode);
-}
-
-void sde_read_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct drm_msm_hist *hist_data;
-	u32 offset, offset_ctl;
-	u32 i;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid parameters ctx %pK cfg %pK", ctx, cfg);
-		return;
-	}
-
-	hist_data = (struct drm_msm_hist *)cfg;
-	offset = ctx->cap->sblk->hist.base + PA_HIST_DATA_DSPP_OFF;
-	offset_ctl = ctx->cap->sblk->hist.base + PA_HIST_CTRL_DSPP_OFF;
-
-	/* collect hist data for given DSPPs */
-	for (i = 0; i < HIST_V_SIZE; i++)
-		hist_data->data[i] += SDE_REG_READ(&ctx->hw, offset + i * 4) &
-					REG_MASK(24);
-
-	/* unlock hist buffer */
-	SDE_REG_WRITE(&ctx->hw, offset_ctl, 0);
-}
-
-void sde_lock_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg)
-{
-	u32 offset_ctl;
-
-	if (!ctx) {
-		DRM_ERROR("invalid parameters ctx %pK", ctx);
-		return;
-	}
-
-	offset_ctl = ctx->cap->sblk->hist.base + PA_HIST_CTRL_DSPP_OFF;
-
-	/* lock hist buffer */
-	SDE_REG_WRITE(&ctx->hw, offset_ctl, 1);
-}
-
-void sde_setup_dspp_dither_v1_7(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct drm_msm_pa_dither *dither;
-	u32 ctrl_off, matrix_off;
-	u32 opmode, data, i;
-
-	if (!hw_cfg || (hw_cfg->len != sizeof(struct drm_msm_pa_dither) &&
-			hw_cfg->payload)) {
-		DRM_ERROR("hw %pK payload %pK size %d expected sz %zd\n",
-			hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
-			((hw_cfg) ? hw_cfg->len : 0),
-			sizeof(struct drm_msm_pa_dither));
-		return;
-	}
-
-	ctrl_off = ctx->cap->sblk->dither.base;
-	matrix_off = ctrl_off + PA_DITH_DSPP_MATRIX_OFF;
-
-	/* Turn off feature */
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("Disable DSPP dither feature\n");
-		SDE_REG_WRITE(&ctx->hw, ctrl_off, 0);
-		return;
-	}
-	DRM_DEBUG_DRIVER("Enable DSPP Dither feature\n");
-	dither = hw_cfg->payload;
-
-	for (i = 0; i < DITHER_MATRIX_SZ; i += 4) {
-		data = (dither->matrix[i] & REG_MASK(4)) |
-			((dither->matrix[i + 1] & REG_MASK(4)) << 4) |
-			((dither->matrix[i + 2] & REG_MASK(4)) << 8) |
-			((dither->matrix[i + 3] & REG_MASK(4)) << 12);
-		SDE_REG_WRITE(&ctx->hw, matrix_off + i, data);
-	}
-
-	opmode = BIT(0);
-	opmode |= (dither->offset_en) ? BIT(1) : 0;
-	opmode |= ((dither->strength) & REG_MASK(4)) << 4;
-	SDE_REG_WRITE(&ctx->hw, ctrl_off, opmode);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
deleted file mode 100644
index 515e99b..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_COLOR_PROCESSING_V1_7_H
-#define _SDE_HW_COLOR_PROCESSING_V1_7_H
-
-#include "sde_hw_sspp.h"
-#include "sde_hw_dspp.h"
-
-/**
- * sde_setup_pipe_pa_hue_v1_7 - setup SSPP hue feature in v1.7 hardware
- * @ctx: Pointer to pipe context
- * @cfg: Pointer to hue data
- */
-void sde_setup_pipe_pa_hue_v1_7(struct sde_hw_pipe *ctx, void *cfg);
-
-/**
- * sde_setup_pipe_pa_sat_v1_7 - setup SSPP saturation feature in v1.7 hardware
- * @ctx: Pointer to pipe context
- * @cfg: Pointer to saturation data
- */
-void sde_setup_pipe_pa_sat_v1_7(struct sde_hw_pipe *ctx, void *cfg);
-
-/**
- * sde_setup_pipe_pa_val_v1_7 - setup SSPP value feature in v1.7 hardware
- * @ctx: Pointer to pipe context
- * @cfg: Pointer to value data
- */
-void sde_setup_pipe_pa_val_v1_7(struct sde_hw_pipe *ctx, void *cfg);
-
-/**
- * sde_setup_pipe_pa_cont_v1_7 - setup SSPP contrast feature in v1.7 hardware
- * @ctx: Pointer to pipe context
- * @cfg: Pointer to contrast data
- */
-void sde_setup_pipe_pa_cont_v1_7(struct sde_hw_pipe *ctx, void *cfg);
-
-/**
- * sde_setup_pipe_pa_memcol_v1_7 - setup SSPP memory color in v1.7 hardware
- * @ctx: Pointer to pipe context
- * @type: Memory color type (Skin, sky, or foliage)
- * @cfg: Pointer to memory color config data
- */
-void sde_setup_pipe_pa_memcol_v1_7(struct sde_hw_pipe *ctx,
-				   enum sde_memcolor_type type,
-				   void *cfg);
-
-/**
- * sde_setup_dspp_pcc_v1_7 - setup DSPP PCC veature in v1.7 hardware
- * @ctx: Pointer to dspp context
- * @cfg: Pointer to PCC data
- */
-void sde_setup_dspp_pcc_v1_7(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_pa_hsic_v17 - setup DSPP hsic feature in v1.7 hardware
- * @ctx: Pointer to DSPP context
- * @cfg: Pointer to hsic data
- */
-void sde_setup_dspp_pa_hsic_v17(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_memcol_skin_v17 - setup DSPP memcol skin in v1.7 hardware
- * @ctx: Pointer to DSPP context
- * @cfg: Pointer to memcolor config data
- */
-void sde_setup_dspp_memcol_skin_v17(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_memcol_sky_v17 - setup DSPP memcol sky in v1.7 hardware
- * @ctx: Pointer to DSPP context
- * @cfg: Pointer to memcolor config data
- */
-void sde_setup_dspp_memcol_sky_v17(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_memcol_foliage_v17 - setup DSPP memcol fol in v1.7 hardware
- * @ctx: Pointer to DSPP context
- * @cfg: Pointer to memcolor config data
- */
-void sde_setup_dspp_memcol_foliage_v17(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_memcol_prot_v17 - setup DSPP memcol prot in v1.7 hardware
- * @ctx: Pointer to DSPP context
- * @cfg: Pointer to memcolor config data
- */
-void sde_setup_dspp_memcol_prot_v17(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_sixzone_v17 - setup DSPP sixzone feature in v1.7 hardware
- * @ctx: Pointer to DSPP context
- * @cfg: Pointer to sixzone data
- */
-void sde_setup_dspp_sixzone_v17(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_pa_vlut_v1_7 - setup DSPP PA vLUT feature in v1.7 hardware
- * @ctx: Pointer to DSPP context
- * @cfg: Pointer to vLUT data
- */
-void sde_setup_dspp_pa_vlut_v1_7(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_pa_vlut_v1_8 - setup DSPP PA vLUT feature in v1.8 hardware
- * @ctx: Pointer to DSPP context
- * @cfg: Pointer to vLUT data
- */
-void sde_setup_dspp_pa_vlut_v1_8(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_gc_v1_7 - setup DSPP gc feature in v1.7 hardware
- * @ctx: Pointer to DSPP context
- * @cfg: Pointer to gc data
- */
-void sde_setup_dspp_gc_v1_7(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_hist_v1_7 - setup DSPP histogram feature in v1.7 hardware
- * @ctx: Pointer to DSPP context
- * @cfg: Pointer to histogram control data
- */
-void sde_setup_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_read_dspp_hist_v1_7 - read DSPP histogram data in v1.7 hardware
- * @ctx: Pointer to DSPP context
- * @cfg: Pointer to histogram data
- */
-void sde_read_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_lock_dspp_hist_v1_7 - lock DSPP histogram buffer in v1.7 hardware
- * @ctx: Pointer to DSPP context
- */
-void sde_lock_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * sde_setup_dspp_dither_v1_7 - setup DSPP dither feature in v1.7 hardware
- * @ctx: Pointer to DSPP context
- * @cfg: Pointer to dither data
- */
-void sde_setup_dspp_dither_v1_7(struct sde_hw_dspp *ctx, void *cfg);
-#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
deleted file mode 100644
index 190335e..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ /dev/null
@@ -1,1327 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/delay.h>
-#include "sde_hwio.h"
-#include "sde_hw_ctl.h"
-#include "sde_dbg.h"
-#include "sde_kms.h"
-#include "sde_reg_dma.h"
-
-#define   CTL_LAYER(lm)                 \
-	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
-#define   CTL_LAYER_EXT(lm)             \
-	(0x40 + (((lm) - LM_0) * 0x004))
-#define   CTL_LAYER_EXT2(lm)             \
-	(0x70 + (((lm) - LM_0) * 0x004))
-#define   CTL_LAYER_EXT3(lm)             \
-	(0xA0 + (((lm) - LM_0) * 0x004))
-#define   CTL_TOP                       0x014
-#define   CTL_FLUSH                     0x018
-#define   CTL_START                     0x01C
-#define   CTL_PREPARE                   0x0d0
-#define   CTL_SW_RESET                  0x030
-#define   CTL_SW_RESET_OVERRIDE         0x060
-#define   CTL_STATUS                    0x064
-#define   CTL_LAYER_EXTN_OFFSET         0x40
-#define   CTL_ROT_TOP                   0x0C0
-#define   CTL_ROT_FLUSH                 0x0C4
-#define   CTL_ROT_START                 0x0CC
-
-#define   CTL_MERGE_3D_ACTIVE           0x0E4
-#define   CTL_DSC_ACTIVE                0x0E8
-#define   CTL_WB_ACTIVE                 0x0EC
-#define   CTL_CWB_ACTIVE                0x0F0
-#define   CTL_INTF_ACTIVE               0x0F4
-#define   CTL_CDM_ACTIVE                0x0F8
-
-#define   CTL_MERGE_3D_FLUSH           0x100
-#define   CTL_DSC_FLUSH                0x104
-#define   CTL_WB_FLUSH                 0x108
-#define   CTL_CWB_FLUSH                0x10C
-#define   CTL_INTF_FLUSH               0x110
-#define   CTL_CDM_FLUSH                0x114
-#define   CTL_PERIPH_FLUSH             0x128
-
-#define  CTL_INTF_MASTER               0x134
-#define  CTL_UIDLE_ACTIVE              0x138
-
-#define CTL_MIXER_BORDER_OUT            BIT(24)
-#define CTL_FLUSH_MASK_ROT              BIT(27)
-#define CTL_FLUSH_MASK_CTL              BIT(17)
-
-#define CTL_NUM_EXT			4
-#define CTL_SSPP_MAX_RECTS		2
-
-#define SDE_REG_RESET_TIMEOUT_US        2000
-#define SDE_REG_WAIT_RESET_TIMEOUT_US        100000
-
-#define UPDATE_MASK(m, idx, en)           \
-	((m) = (en) ? ((m) | BIT((idx))) : ((m) & ~BIT((idx))))
-
-/**
- * List of SSPP bits in CTL_FLUSH
- */
-static const u32 sspp_tbl[SSPP_MAX] = { SDE_NONE, 0, 1, 2, 18, 3, 4, 5,
-	19, 11, 12, 24, 25, SDE_NONE, SDE_NONE};
-
-/**
- * List of layer mixer bits in CTL_FLUSH
- */
-static const u32 mixer_tbl[LM_MAX] = {SDE_NONE, 6, 7, 8, 9, 10, 20,
-	SDE_NONE};
-
-/**
- * List of DSPP bits in CTL_FLUSH
- */
-static const u32 dspp_tbl[DSPP_MAX] = {SDE_NONE, 13, 14, 15, 21};
-
-/**
- * List of DSPP PA LUT bits in CTL_FLUSH
- */
-static const u32 dspp_pav_tbl[DSPP_MAX] = {SDE_NONE, 3, 4, 5, 19};
-
-/**
- * List of CDM LUT bits in CTL_FLUSH
- */
-static const u32 cdm_tbl[CDM_MAX] = {SDE_NONE, 26};
-
-/**
- * List of WB bits in CTL_FLUSH
- */
-static const u32 wb_tbl[WB_MAX] = {SDE_NONE, SDE_NONE, SDE_NONE, 16};
-
-/**
- * List of ROT bits in CTL_FLUSH
- */
-static const u32 rot_tbl[ROT_MAX] = {SDE_NONE, 27};
-
-/**
- * List of INTF bits in CTL_FLUSH
- */
-static const u32 intf_tbl[INTF_MAX] = {SDE_NONE, 31, 30, 29, 28};
-
-/**
- * Below definitions are for CTL supporting SDE_CTL_ACTIVE_CFG,
- * certain blocks have the individual flush control as well,
- * for such blocks flush is done by flushing individual control and
- * top level control.
- */
-
-/**
- * list of WB bits in CTL_WB_FLUSH
- */
-static const u32 wb_flush_tbl[WB_MAX] = {SDE_NONE, SDE_NONE, SDE_NONE, 2};
-
-/**
- * list of INTF bits in CTL_INTF_FLUSH
- */
-static const u32 intf_flush_tbl[INTF_MAX] = {SDE_NONE, 0, 1, 2, 3, 4, 5};
-
-/**
- * list of DSC bits in CTL_DSC_FLUSH
- */
-static const u32 dsc_flush_tbl[DSC_MAX] = {SDE_NONE, 0, 1, 2, 3, 4, 5};
-
-/**
- * list of MERGE_3D bits in CTL_MERGE_3D_FLUSH
- */
-static const u32 merge_3d_tbl[MERGE_3D_MAX] = {SDE_NONE, 0, 1, 2};
-
-/**
- * list of CDM bits in CTL_CDM_FLUSH
- */
-static const u32 cdm_flush_tbl[CDM_MAX] = {SDE_NONE, 0};
-
-/**
- * list of CWB bits in CTL_CWB_FLUSH
- */
-static const u32 cwb_flush_tbl[CWB_MAX] = {SDE_NONE, SDE_NONE, SDE_NONE, 2, 3,
-	4, 5};
-
-/**
- * struct ctl_sspp_stage_reg_map: Describes bit layout for a sspp stage cfg
- * @ext: Index to indicate LAYER_x_EXT id for given sspp
- * @start: Start position of blend stage bits for given sspp
- * @bits: Number of bits from @start assigned for given sspp
- * @sec_bit_mask: Bitmask to add to LAYER_x_EXT1 for missing bit of sspp
- */
-struct ctl_sspp_stage_reg_map {
-	u32 ext;
-	u32 start;
-	u32 bits;
-	u32 sec_bit_mask;
-};
-
-/* list of ctl_sspp_stage_reg_map for all the sppp */
-static const struct ctl_sspp_stage_reg_map
-sspp_reg_cfg_tbl[SSPP_MAX][CTL_SSPP_MAX_RECTS] = {
-	/* SSPP_NONE */{ {0, 0, 0, 0}, {0, 0, 0, 0} },
-	/* SSPP_VIG0 */{ {0, 0, 3, BIT(0)}, {3, 0, 4, 0} },
-	/* SSPP_VIG1 */{ {0, 3, 3, BIT(2)}, {3, 4, 4, 0} },
-	/* SSPP_VIG2 */{ {0, 6, 3, BIT(4)}, {3, 8, 4, 0} },
-	/* SSPP_VIG3 */{ {0, 26, 3, BIT(6)}, {3, 12, 4, 0} },
-	/* SSPP_RGB0 */{ {0, 9, 3, BIT(8)}, {0, 0, 0, 0} },
-	/* SSPP_RGB1 */{ {0, 12, 3, BIT(10)}, {0, 0, 0, 0} },
-	/* SSPP_RGB2 */{ {0, 15, 3, BIT(12)}, {0, 0, 0, 0} },
-	/* SSPP_RGB3 */{ {0, 29, 3, BIT(14)}, {0, 0, 0, 0} },
-	/* SSPP_DMA0 */{ {0, 18, 3, BIT(16)}, {2, 8, 4, 0} },
-	/* SSPP_DMA1 */{ {0, 21, 3, BIT(18)}, {2, 12, 4, 0} },
-	/* SSPP_DMA2 */{ {2, 0, 4, 0}, {2, 16, 4, 0} },
-	/* SSPP_DMA3 */{ {2, 4, 4, 0}, {2, 20, 4, 0} },
-	/* SSPP_CURSOR0 */{ {1, 20, 4, 0}, {0, 0, 0, 0} },
-	/* SSPP_CURSOR1 */{ {0, 26, 4, 0}, {0, 0, 0, 0} }
-};
-
-/**
- * Individual flush bit in CTL_FLUSH
- */
-#define  WB_IDX         16
-#define  DSC_IDX        22
-#define  MERGE_3D_IDX   23
-#define  CDM_IDX        26
-#define  CWB_IDX        28
-#define  PERIPH_IDX     30
-#define  INTF_IDX       31
-
-static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
-		struct sde_mdss_cfg *m,
-		void __iomem *addr,
-		struct sde_hw_blk_reg_map *b)
-{
-	int i;
-
-	for (i = 0; i < m->ctl_count; i++) {
-		if (ctl == m->ctl[i].id) {
-			b->base_off = addr;
-			b->blk_off = m->ctl[i].base;
-			b->length = m->ctl[i].len;
-			b->hwversion = m->hwversion;
-			b->log_mask = SDE_DBG_MASK_CTL;
-			return &m->ctl[i];
-		}
-	}
-	return ERR_PTR(-ENOMEM);
-}
-
-static int _mixer_stages(const struct sde_lm_cfg *mixer, int count,
-		enum sde_lm lm)
-{
-	int i;
-	int stages = -EINVAL;
-
-	for (i = 0; i < count; i++) {
-		if (lm == mixer[i].id) {
-			stages = mixer[i].sblk->maxblendstages;
-			break;
-		}
-	}
-
-	return stages;
-}
-
-static inline int sde_hw_ctl_trigger_start(struct sde_hw_ctl *ctx)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
-	return 0;
-}
-
-static inline int sde_hw_ctl_get_start_state(struct sde_hw_ctl *ctx)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	return SDE_REG_READ(&ctx->hw, CTL_START);
-}
-
-static inline int sde_hw_ctl_trigger_pending(struct sde_hw_ctl *ctx)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	SDE_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
-	return 0;
-}
-
-static inline int sde_hw_ctl_clear_pending_flush(struct sde_hw_ctl *ctx)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	memset(&ctx->flush, 0, sizeof(ctx->flush));
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_pending_flush(struct sde_hw_ctl *ctx,
-	struct sde_ctl_flush_cfg *cfg)
-{
-	if (!ctx || !cfg)
-		return -EINVAL;
-
-	ctx->flush.pending_flush_mask |= cfg->pending_flush_mask;
-	return 0;
-}
-
-static int sde_hw_ctl_get_pending_flush(struct sde_hw_ctl *ctx,
-		struct sde_ctl_flush_cfg *cfg)
-{
-	if (!ctx || !cfg)
-		return -EINVAL;
-
-	memcpy(cfg, &ctx->flush, sizeof(*cfg));
-	return 0;
-}
-
-static inline int sde_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx)
-{
-
-	if (!ctx)
-		return -EINVAL;
-
-	SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->flush.pending_flush_mask);
-	return 0;
-}
-
-static inline u32 sde_hw_ctl_get_flush_register(struct sde_hw_ctl *ctx)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 rot_op_mode;
-
-	if (!ctx)
-		return 0;
-
-	c = &ctx->hw;
-	rot_op_mode = SDE_REG_READ(c, CTL_ROT_TOP) & 0x3;
-
-	/* rotate flush bit is undefined if offline mode, so ignore it */
-	if (rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
-		return SDE_REG_READ(c, CTL_FLUSH) & ~CTL_FLUSH_MASK_ROT;
-
-	return SDE_REG_READ(c, CTL_FLUSH);
-}
-
-static inline void sde_hw_ctl_uidle_enable(struct sde_hw_ctl *ctx, bool enable)
-{
-	u32 val;
-
-	if (!ctx)
-		return;
-
-	val = SDE_REG_READ(&ctx->hw, CTL_UIDLE_ACTIVE);
-	val = (val & ~BIT(0)) | (enable ? BIT(0) : 0);
-
-	SDE_REG_WRITE(&ctx->hw, CTL_UIDLE_ACTIVE, val);
-}
-
-static inline int sde_hw_ctl_update_bitmask_sspp(struct sde_hw_ctl *ctx,
-		enum sde_sspp sspp,
-		bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (!(sspp > SSPP_NONE) || !(sspp < SSPP_MAX)) {
-		SDE_ERROR("Unsupported pipe %d\n", sspp);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_flush_mask, sspp_tbl[sspp], enable);
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_bitmask_mixer(struct sde_hw_ctl *ctx,
-		enum sde_lm lm,
-		bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (!(lm > SDE_NONE) || !(lm < LM_MAX)) {
-		SDE_ERROR("Unsupported mixer %d\n", lm);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_flush_mask, mixer_tbl[lm], enable);
-	ctx->flush.pending_flush_mask |= CTL_FLUSH_MASK_CTL;
-
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_bitmask_dspp(struct sde_hw_ctl *ctx,
-		enum sde_dspp dspp,
-		bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (!(dspp > SDE_NONE) || !(dspp < DSPP_MAX)) {
-		SDE_ERROR("Unsupported dspp %d\n", dspp);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_flush_mask, dspp_tbl[dspp], enable);
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_bitmask_dspp_pavlut(struct sde_hw_ctl *ctx,
-		enum sde_dspp dspp, bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (!(dspp > SDE_NONE) || !(dspp < DSPP_MAX)) {
-		SDE_ERROR("Unsupported dspp %d\n", dspp);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_flush_mask, dspp_pav_tbl[dspp], enable);
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_bitmask_cdm(struct sde_hw_ctl *ctx,
-		enum sde_cdm cdm,
-		bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (!(cdm > SDE_NONE) || !(cdm < CDM_MAX) || (cdm == CDM_1)) {
-		SDE_ERROR("Unsupported cdm %d\n", cdm);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_flush_mask, cdm_tbl[cdm], enable);
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_bitmask_wb(struct sde_hw_ctl *ctx,
-		enum sde_wb wb, bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (!(wb > SDE_NONE) || !(wb < WB_MAX) ||
-			(wb == WB_0) || (wb == WB_1)) {
-		SDE_ERROR("Unsupported wb %d\n", wb);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_flush_mask, wb_tbl[wb], enable);
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_bitmask_intf(struct sde_hw_ctl *ctx,
-		enum sde_intf intf, bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (!(intf > SDE_NONE) || !(intf < INTF_MAX) || (intf > INTF_4)) {
-		SDE_ERROR("Unsupported intf %d\n", intf);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_flush_mask, intf_tbl[intf], enable);
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_bitmask_wb_v1(struct sde_hw_ctl *ctx,
-		enum sde_wb wb, bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (wb != WB_2) {
-		SDE_ERROR("Unsupported wb %d\n", wb);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_wb_flush_mask, wb_flush_tbl[wb], enable);
-	if (ctx->flush.pending_wb_flush_mask)
-		UPDATE_MASK(ctx->flush.pending_flush_mask, WB_IDX, 1);
-	else
-		UPDATE_MASK(ctx->flush.pending_flush_mask, WB_IDX, 0);
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_bitmask_intf_v1(struct sde_hw_ctl *ctx,
-		enum sde_intf intf, bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (!(intf > SDE_NONE) || !(intf < INTF_MAX)) {
-		SDE_ERROR("Unsupported intf %d\n", intf);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_intf_flush_mask, intf_flush_tbl[intf],
-			enable);
-	if (ctx->flush.pending_intf_flush_mask)
-		UPDATE_MASK(ctx->flush.pending_flush_mask, INTF_IDX, 1);
-	else
-		UPDATE_MASK(ctx->flush.pending_flush_mask, INTF_IDX, 0);
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_bitmask_periph_v1(struct sde_hw_ctl *ctx,
-		enum sde_intf intf, bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (!(intf > SDE_NONE) || !(intf < INTF_MAX)) {
-		SDE_ERROR("Unsupported intf %d\n", intf);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_periph_flush_mask, intf_flush_tbl[intf],
-			enable);
-	if (ctx->flush.pending_periph_flush_mask)
-		UPDATE_MASK(ctx->flush.pending_flush_mask, PERIPH_IDX, 1);
-	else
-		UPDATE_MASK(ctx->flush.pending_flush_mask, PERIPH_IDX, 0);
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_bitmask_dsc_v1(struct sde_hw_ctl *ctx,
-		enum sde_dsc dsc, bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (!(dsc > SDE_NONE) || !(dsc < DSC_MAX)) {
-		SDE_ERROR("Unsupported dsc %d\n", dsc);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_dsc_flush_mask, dsc_flush_tbl[dsc],
-			enable);
-	if (ctx->flush.pending_dsc_flush_mask)
-		UPDATE_MASK(ctx->flush.pending_flush_mask, DSC_IDX, 1);
-	else
-		UPDATE_MASK(ctx->flush.pending_flush_mask, DSC_IDX, 0);
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_bitmask_merge3d_v1(struct sde_hw_ctl *ctx,
-		enum sde_merge_3d merge_3d, bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (!(merge_3d > SDE_NONE) || !(merge_3d < MERGE_3D_MAX)) {
-		SDE_ERROR("Unsupported merge_3d %d\n", merge_3d);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_merge_3d_flush_mask,
-			merge_3d_tbl[merge_3d], enable);
-	if (ctx->flush.pending_merge_3d_flush_mask)
-		UPDATE_MASK(ctx->flush.pending_flush_mask, MERGE_3D_IDX, 1);
-	else
-		UPDATE_MASK(ctx->flush.pending_flush_mask, MERGE_3D_IDX, 0);
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_bitmask_cdm_v1(struct sde_hw_ctl *ctx,
-		enum sde_cdm cdm, bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (cdm != CDM_0) {
-		SDE_ERROR("Unsupported cdm %d\n", cdm);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_cdm_flush_mask, cdm_flush_tbl[cdm],
-			enable);
-	if (ctx->flush.pending_cdm_flush_mask)
-		UPDATE_MASK(ctx->flush.pending_flush_mask, CDM_IDX, 1);
-	else
-		UPDATE_MASK(ctx->flush.pending_flush_mask, CDM_IDX, 0);
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_bitmask_cwb_v1(struct sde_hw_ctl *ctx,
-		enum sde_cwb cwb, bool enable)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if ((cwb < CWB_2) || (cwb >= CWB_MAX)) {
-		SDE_ERROR("Unsupported cwb %d\n", cwb);
-		return -EINVAL;
-	}
-
-	UPDATE_MASK(ctx->flush.pending_cwb_flush_mask, cwb_flush_tbl[cwb],
-			enable);
-	if (ctx->flush.pending_cwb_flush_mask)
-		UPDATE_MASK(ctx->flush.pending_flush_mask, CWB_IDX, 1);
-	else
-		UPDATE_MASK(ctx->flush.pending_flush_mask, CWB_IDX, 0);
-	return 0;
-}
-
-static inline int sde_hw_ctl_update_pending_flush_v1(
-		struct sde_hw_ctl *ctx,
-		struct sde_ctl_flush_cfg *cfg)
-{
-	if (!ctx || !cfg)
-		return -EINVAL;
-
-	ctx->flush.pending_flush_mask |= cfg->pending_flush_mask;
-	ctx->flush.pending_intf_flush_mask |= cfg->pending_intf_flush_mask;
-	ctx->flush.pending_cdm_flush_mask |= cfg->pending_cdm_flush_mask;
-	ctx->flush.pending_wb_flush_mask |= cfg->pending_wb_flush_mask;
-	ctx->flush.pending_dsc_flush_mask |= cfg->pending_dsc_flush_mask;
-	ctx->flush.pending_merge_3d_flush_mask |=
-		cfg->pending_merge_3d_flush_mask;
-	ctx->flush.pending_cwb_flush_mask |= cfg->pending_cwb_flush_mask;
-	ctx->flush.pending_periph_flush_mask |= cfg->pending_periph_flush_mask;
-	return 0;
-}
-
-static inline int sde_hw_ctl_trigger_flush_v1(struct sde_hw_ctl *ctx)
-{
-	if (!ctx)
-		return -EINVAL;
-
-	if (ctx->flush.pending_flush_mask & BIT(WB_IDX))
-		SDE_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
-				ctx->flush.pending_wb_flush_mask);
-	if (ctx->flush.pending_flush_mask & BIT(DSC_IDX))
-		SDE_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
-				ctx->flush.pending_dsc_flush_mask);
-	if (ctx->flush.pending_flush_mask & BIT(MERGE_3D_IDX))
-		SDE_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
-				ctx->flush.pending_merge_3d_flush_mask);
-	if (ctx->flush.pending_flush_mask & BIT(CDM_IDX))
-		SDE_REG_WRITE(&ctx->hw, CTL_CDM_FLUSH,
-				ctx->flush.pending_cdm_flush_mask);
-	if (ctx->flush.pending_flush_mask & BIT(CWB_IDX))
-		SDE_REG_WRITE(&ctx->hw, CTL_CWB_FLUSH,
-				ctx->flush.pending_cwb_flush_mask);
-	if (ctx->flush.pending_flush_mask & BIT(INTF_IDX))
-		SDE_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
-				ctx->flush.pending_intf_flush_mask);
-	if (ctx->flush.pending_flush_mask & BIT(PERIPH_IDX))
-		SDE_REG_WRITE(&ctx->hw, CTL_PERIPH_FLUSH,
-				ctx->flush.pending_periph_flush_mask);
-
-	SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->flush.pending_flush_mask);
-	return 0;
-}
-
-static inline u32 sde_hw_ctl_get_intf_v1(struct sde_hw_ctl *ctx)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 intf_active;
-
-	if (!ctx) {
-		pr_err("Invalid input argument\n");
-		return 0;
-	}
-
-	c = &ctx->hw;
-	intf_active = SDE_REG_READ(c, CTL_INTF_ACTIVE);
-
-	return intf_active;
-}
-
-static inline u32 sde_hw_ctl_get_intf(struct sde_hw_ctl *ctx)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 ctl_top;
-	u32 intf_active = 0;
-
-	if (!ctx) {
-		pr_err("Invalid input argument\n");
-		return 0;
-	}
-
-	c = &ctx->hw;
-	ctl_top = SDE_REG_READ(c, CTL_TOP);
-
-	intf_active = (ctl_top > 0) ?
-		BIT(ctl_top - 1) : 0;
-
-	return intf_active;
-}
-
-static u32 sde_hw_ctl_poll_reset_status(struct sde_hw_ctl *ctx, u32 timeout_us)
-{
-	struct sde_hw_blk_reg_map *c;
-	ktime_t timeout;
-	u32 status;
-
-	if (!ctx)
-		return 0;
-
-	c = &ctx->hw;
-	timeout = ktime_add_us(ktime_get(), timeout_us);
-
-	/*
-	 * it takes around 30us to have mdp finish resetting its ctl path
-	 * poll every 50us so that reset should be completed at 1st poll
-	 */
-	do {
-		status = SDE_REG_READ(c, CTL_SW_RESET);
-		status &= 0x1;
-		if (status)
-			usleep_range(20, 50);
-	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
-
-	return status;
-}
-
-static u32 sde_hw_ctl_get_reset_status(struct sde_hw_ctl *ctx)
-{
-	if (!ctx)
-		return 0;
-	return (u32)SDE_REG_READ(&ctx->hw, CTL_SW_RESET);
-}
-
-static u32 sde_hw_ctl_get_scheduler_status(struct sde_hw_ctl *ctx)
-{
-	if (!ctx)
-		return 0;
-	return (u32)SDE_REG_READ(&ctx->hw, CTL_STATUS);
-}
-
-static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	if (!ctx)
-		return 0;
-
-	c = &ctx->hw;
-	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
-	SDE_REG_WRITE(c, CTL_SW_RESET, 0x1);
-	if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_US))
-		return -EINVAL;
-
-	return 0;
-}
-
-static void sde_hw_ctl_hard_reset(struct sde_hw_ctl *ctx, bool enable)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	if (!ctx)
-		return;
-
-	c = &ctx->hw;
-	pr_debug("hw ctl hard reset for ctl:%d, %d\n",
-			ctx->idx - CTL_0, enable);
-	SDE_REG_WRITE(c, CTL_SW_RESET_OVERRIDE, enable);
-}
-
-static int sde_hw_ctl_wait_reset_status(struct sde_hw_ctl *ctx)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 status;
-
-	if (!ctx)
-		return 0;
-
-	c = &ctx->hw;
-	status = SDE_REG_READ(c, CTL_SW_RESET);
-	status &= 0x01;
-	if (!status)
-		return 0;
-
-	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
-	if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_WAIT_RESET_TIMEOUT_US)) {
-		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
-{
-	struct sde_hw_blk_reg_map *c;
-	int i;
-
-	if (!ctx)
-		return;
-
-	c = &ctx->hw;
-	for (i = 0; i < ctx->mixer_count; i++) {
-		int mixer_id = ctx->mixer_hw_caps[i].id;
-
-		SDE_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
-		SDE_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
-		SDE_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
-		SDE_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
-	}
-}
-
-static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
-	enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
-	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
-	int i, j;
-	u8 stages;
-	int pipes_per_stage;
-
-	if (!ctx)
-		return;
-
-	c = &ctx->hw;
-	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
-	if ((int)stages < 0)
-		return;
-
-	if (test_bit(SDE_MIXER_SOURCESPLIT,
-		&ctx->mixer_hw_caps->features))
-		pipes_per_stage = PIPES_PER_STAGE;
-	else
-		pipes_per_stage = 1;
-
-	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
-
-	if (!stage_cfg)
-		goto exit;
-
-	for (i = 0; i <= stages; i++) {
-		/* overflow to ext register if 'i + 1 > 7' */
-		mix = (i + 1) & 0x7;
-		ext = i >= 7;
-
-		for (j = 0 ; j < pipes_per_stage; j++) {
-			enum sde_sspp_multirect_index rect_index =
-				stage_cfg->multirect_index[i][j];
-
-			switch (stage_cfg->stage[i][j]) {
-			case SSPP_VIG0:
-				if (rect_index == SDE_SSPP_RECT_1) {
-					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
-				} else {
-					mixercfg |= mix << 0;
-					mixercfg_ext |= ext << 0;
-				}
-				break;
-			case SSPP_VIG1:
-				if (rect_index == SDE_SSPP_RECT_1) {
-					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
-				} else {
-					mixercfg |= mix << 3;
-					mixercfg_ext |= ext << 2;
-				}
-				break;
-			case SSPP_VIG2:
-				if (rect_index == SDE_SSPP_RECT_1) {
-					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
-				} else {
-					mixercfg |= mix << 6;
-					mixercfg_ext |= ext << 4;
-				}
-				break;
-			case SSPP_VIG3:
-				if (rect_index == SDE_SSPP_RECT_1) {
-					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
-				} else {
-					mixercfg |= mix << 26;
-					mixercfg_ext |= ext << 6;
-				}
-				break;
-			case SSPP_RGB0:
-				mixercfg |= mix << 9;
-				mixercfg_ext |= ext << 8;
-				break;
-			case SSPP_RGB1:
-				mixercfg |= mix << 12;
-				mixercfg_ext |= ext << 10;
-				break;
-			case SSPP_RGB2:
-				mixercfg |= mix << 15;
-				mixercfg_ext |= ext << 12;
-				break;
-			case SSPP_RGB3:
-				mixercfg |= mix << 29;
-				mixercfg_ext |= ext << 14;
-				break;
-			case SSPP_DMA0:
-				if (rect_index == SDE_SSPP_RECT_1) {
-					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
-				} else {
-					mixercfg |= mix << 18;
-					mixercfg_ext |= ext << 16;
-				}
-				break;
-			case SSPP_DMA1:
-				if (rect_index == SDE_SSPP_RECT_1) {
-					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
-				} else {
-					mixercfg |= mix << 21;
-					mixercfg_ext |= ext << 18;
-				}
-				break;
-			case SSPP_DMA2:
-				if (rect_index == SDE_SSPP_RECT_1) {
-					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
-				} else {
-					mix |= (i + 1) & 0xF;
-					mixercfg_ext2 |= mix << 0;
-				}
-				break;
-			case SSPP_DMA3:
-				if (rect_index == SDE_SSPP_RECT_1) {
-					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
-				} else {
-					mix |= (i + 1) & 0xF;
-					mixercfg_ext2 |= mix << 4;
-				}
-				break;
-			case SSPP_CURSOR0:
-				mixercfg_ext |= ((i + 1) & 0xF) << 20;
-				break;
-			case SSPP_CURSOR1:
-				mixercfg_ext |= ((i + 1) & 0xF) << 26;
-				break;
-			default:
-				break;
-			}
-		}
-	}
-
-exit:
-	SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
-	SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
-	SDE_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
-	SDE_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
-}
-
-static u32 sde_hw_ctl_get_staged_sspp(struct sde_hw_ctl *ctx, enum sde_lm lm,
-		struct sde_sspp_index_info *info, u32 info_max_cnt)
-{
-	int i, j;
-	u32 count = 0;
-	u32 mask = 0;
-	bool staged;
-	u32 mixercfg[CTL_NUM_EXT];
-	struct sde_hw_blk_reg_map *c;
-	const struct ctl_sspp_stage_reg_map *sspp_cfg;
-
-	if (!ctx || (lm >= LM_MAX) || !info)
-		return count;
-
-	c = &ctx->hw;
-	mixercfg[0] = SDE_REG_READ(c, CTL_LAYER(lm));
-	mixercfg[1] = SDE_REG_READ(c, CTL_LAYER_EXT(lm));
-	mixercfg[2] = SDE_REG_READ(c, CTL_LAYER_EXT2(lm));
-	mixercfg[3] = SDE_REG_READ(c, CTL_LAYER_EXT3(lm));
-
-	for (i = SSPP_VIG0; i < SSPP_MAX; i++) {
-		for (j = 0; j < CTL_SSPP_MAX_RECTS; j++) {
-			if (count >= info_max_cnt)
-				goto end;
-
-			sspp_cfg = &sspp_reg_cfg_tbl[i][j];
-			if (!sspp_cfg->bits || sspp_cfg->ext >= CTL_NUM_EXT)
-				continue;
-
-			mask = ((0x1 << sspp_cfg->bits) - 1) << sspp_cfg->start;
-			staged = mixercfg[sspp_cfg->ext] & mask;
-			if (!staged)
-				staged = mixercfg[1] & sspp_cfg->sec_bit_mask;
-
-			if (staged) {
-				info[count].sspp = i;
-				info[count].is_virtual = j;
-				count++;
-			}
-		}
-	}
-
-end:
-	return count;
-}
-
-static int sde_hw_ctl_intf_cfg_v1(struct sde_hw_ctl *ctx,
-		struct sde_hw_intf_cfg_v1 *cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 intf_active = 0;
-	u32 wb_active = 0;
-	u32 merge_3d_active = 0;
-	u32 cwb_active = 0;
-	u32 mode_sel = 0;
-	u32 cdm_active = 0;
-	u32 intf_master = 0;
-	u32 i;
-
-	if (!ctx)
-		return -EINVAL;
-
-	c = &ctx->hw;
-	for (i = 0; i < cfg->intf_count; i++) {
-		if (cfg->intf[i])
-			intf_active |= BIT(cfg->intf[i] - INTF_0);
-	}
-
-	if (cfg->intf_count > 1)
-		intf_master = BIT(cfg->intf_master - INTF_0);
-
-	for (i = 0; i < cfg->wb_count; i++) {
-		if (cfg->wb[i])
-			wb_active |= BIT(cfg->wb[i] - WB_0);
-	}
-
-	for (i = 0; i < cfg->merge_3d_count; i++) {
-		if (cfg->merge_3d[i])
-			merge_3d_active |= BIT(cfg->merge_3d[i] - MERGE_3D_0);
-	}
-
-	for (i = 0; i < cfg->cwb_count; i++) {
-		if (cfg->cwb[i])
-			cwb_active |= BIT(cfg->cwb[i] - CWB_0);
-	}
-
-	for (i = 0; i < cfg->cdm_count; i++) {
-		if (cfg->cdm[i])
-			cdm_active |= BIT(cfg->cdm[i] - CDM_0);
-	}
-
-	if (cfg->intf_mode_sel == SDE_CTL_MODE_SEL_CMD)
-		mode_sel |= BIT(17);
-
-	SDE_REG_WRITE(c, CTL_TOP, mode_sel);
-	SDE_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
-	SDE_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
-	SDE_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
-	SDE_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active);
-	SDE_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active);
-	SDE_REG_WRITE(c, CTL_INTF_MASTER, intf_master);
-	return 0;
-}
-
-static int sde_hw_ctl_reset_post_disable(struct sde_hw_ctl *ctx,
-		struct sde_hw_intf_cfg_v1 *cfg, u32 merge_3d_idx)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 intf_active = 0;
-	u32 intf_flush = 0;
-	u32 merge_3d_active = 0;
-	u32 merge_3d_flush = 0;
-	u32 wb_active = 0;
-	u32 wb_flush = 0;
-	u32 i;
-
-	if (!ctx || !cfg) {
-		SDE_ERROR("invalid hw_ctl or hw_intf blk\n");
-		return -EINVAL;
-	}
-
-	c = &ctx->hw;
-	for (i = 0; i < cfg->intf_count; i++) {
-		if (cfg->intf[i]) {
-			intf_active &= ~BIT(cfg->intf[i] - INTF_0);
-			intf_flush |= BIT(cfg->intf[i] - INTF_0);
-		}
-	}
-
-	for (i = 0; i < cfg->wb_count; i++) {
-		if (cfg->wb[i]) {
-			wb_active &= ~BIT(cfg->wb[i] - WB_0);
-			wb_flush |= BIT(cfg->wb[i] - WB_0);
-		}
-	}
-
-	if (merge_3d_idx) {
-		/* disable and flush merge3d_blk */
-		merge_3d_flush = BIT(merge_3d_idx - MERGE_3D_0);
-		merge_3d_active &= ~BIT(merge_3d_idx - MERGE_3D_0);
-		ctx->flush.pending_merge_3d_flush_mask = merge_3d_flush;
-		SDE_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active);
-	}
-	sde_hw_ctl_clear_all_blendstages(ctx);
-
-	ctx->flush.pending_intf_flush_mask = intf_flush;
-	ctx->flush.pending_wb_flush_mask = wb_flush;
-
-
-	SDE_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
-	SDE_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
-
-	return 0;
-}
-
-static int sde_hw_ctl_update_cwb_cfg(struct sde_hw_ctl *ctx,
-		struct sde_hw_intf_cfg_v1 *cfg)
-{
-	int i;
-	u32 cwb_active = 0;
-	u32 merge_3d_active = 0;
-	u32 wb_active = 0;
-	struct sde_hw_blk_reg_map *c;
-
-	if (!ctx)
-		return -EINVAL;
-
-	c = &ctx->hw;
-	cwb_active = SDE_REG_READ(c, CTL_CWB_ACTIVE);
-	for (i = 0; i < cfg->cwb_count; i++) {
-		if (cfg->cwb[i])
-			cwb_active |= BIT(cfg->cwb[i] - CWB_0);
-	}
-
-	merge_3d_active = SDE_REG_READ(c, CTL_MERGE_3D_ACTIVE);
-	for (i = 0; i < cfg->merge_3d_count; i++) {
-		if (cfg->merge_3d[i])
-			merge_3d_active |= BIT(cfg->merge_3d[i] - MERGE_3D_0);
-	}
-
-	wb_active = BIT(2);
-	SDE_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
-	SDE_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active);
-	SDE_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
-
-	return 0;
-}
-
-static int sde_hw_ctl_dsc_cfg(struct sde_hw_ctl *ctx,
-		struct sde_ctl_dsc_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 dsc_active = 0;
-	int i;
-
-	if (!ctx)
-		return -EINVAL;
-
-	c = &ctx->hw;
-	for (i = 0; i < cfg->dsc_count; i++)
-		if (cfg->dsc[i])
-			dsc_active |= BIT(cfg->dsc[i] - DSC_0);
-
-	SDE_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
-	return 0;
-}
-
-static int sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
-		struct sde_hw_intf_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 intf_cfg = 0;
-
-	if (!ctx)
-		return -EINVAL;
-
-	c = &ctx->hw;
-	intf_cfg |= (cfg->intf & 0xF) << 4;
-
-	if (cfg->wb)
-		intf_cfg |= (cfg->wb & 0x3) + 2;
-
-	if (cfg->mode_3d) {
-		intf_cfg |= BIT(19);
-		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
-	}
-
-	switch (cfg->intf_mode_sel) {
-	case SDE_CTL_MODE_SEL_VID:
-		intf_cfg &= ~BIT(17);
-		intf_cfg &= ~(0x3 << 15);
-		break;
-	case SDE_CTL_MODE_SEL_CMD:
-		intf_cfg |= BIT(17);
-		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
-		break;
-	default:
-		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
-		return -EINVAL;
-	}
-
-	SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
-	return 0;
-}
-
-static void sde_hw_ctl_update_wb_cfg(struct sde_hw_ctl *ctx,
-		struct sde_hw_intf_cfg *cfg, bool enable)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	u32 intf_cfg = 0;
-
-	if (!cfg->wb)
-		return;
-
-	intf_cfg = SDE_REG_READ(c, CTL_TOP);
-	if (enable)
-		intf_cfg |= (cfg->wb & 0x3) + 2;
-	else
-		intf_cfg &= ~((cfg->wb & 0x3) + 2);
-
-	SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
-}
-
-static inline u32 sde_hw_ctl_read_ctl_top(struct sde_hw_ctl *ctx)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 ctl_top;
-
-	if (!ctx) {
-		pr_err("Invalid input argument\n");
-		return 0;
-	}
-	c = &ctx->hw;
-	ctl_top = SDE_REG_READ(c, CTL_TOP);
-	return ctl_top;
-}
-
-static inline u32 sde_hw_ctl_read_ctl_layers(struct sde_hw_ctl *ctx, int index)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 ctl_top;
-
-	if (!ctx) {
-		pr_err("Invalid input argument\n");
-		return 0;
-	}
-	c = &ctx->hw;
-	ctl_top = SDE_REG_READ(c, CTL_LAYER(index));
-	pr_debug("Ctl_layer value = 0x%x\n", ctl_top);
-	return ctl_top;
-}
-
-static int sde_hw_reg_dma_flush(struct sde_hw_ctl *ctx, bool blocking)
-{
-	struct sde_hw_reg_dma_ops *ops = sde_reg_dma_get_ops();
-
-	if (!ctx)
-		return -EINVAL;
-
-	if (ops && ops->last_command)
-		return ops->last_command(ctx, DMA_CTL_QUEUE0,
-		    (blocking ? REG_DMA_WAIT4_COMP : REG_DMA_NOWAIT));
-
-	return 0;
-
-}
-
-static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
-		unsigned long cap)
-{
-	if (cap & BIT(SDE_CTL_ACTIVE_CFG)) {
-		ops->update_pending_flush =
-			sde_hw_ctl_update_pending_flush_v1;
-		ops->trigger_flush = sde_hw_ctl_trigger_flush_v1;
-
-		ops->setup_intf_cfg_v1 = sde_hw_ctl_intf_cfg_v1;
-		ops->update_cwb_cfg = sde_hw_ctl_update_cwb_cfg;
-		ops->setup_dsc_cfg = sde_hw_ctl_dsc_cfg;
-
-		ops->update_bitmask_cdm = sde_hw_ctl_update_bitmask_cdm_v1;
-		ops->update_bitmask_wb = sde_hw_ctl_update_bitmask_wb_v1;
-		ops->update_bitmask_intf = sde_hw_ctl_update_bitmask_intf_v1;
-		ops->update_bitmask_dsc = sde_hw_ctl_update_bitmask_dsc_v1;
-		ops->update_bitmask_merge3d =
-			sde_hw_ctl_update_bitmask_merge3d_v1;
-		ops->update_bitmask_cwb = sde_hw_ctl_update_bitmask_cwb_v1;
-		ops->update_bitmask_periph =
-			sde_hw_ctl_update_bitmask_periph_v1;
-		ops->get_ctl_intf = sde_hw_ctl_get_intf_v1;
-		ops->reset_post_disable = sde_hw_ctl_reset_post_disable;
-		ops->get_scheduler_status = sde_hw_ctl_get_scheduler_status;
-	} else {
-		ops->update_pending_flush = sde_hw_ctl_update_pending_flush;
-		ops->trigger_flush = sde_hw_ctl_trigger_flush;
-
-		ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
-
-		ops->update_bitmask_cdm = sde_hw_ctl_update_bitmask_cdm;
-		ops->update_bitmask_wb = sde_hw_ctl_update_bitmask_wb;
-		ops->update_bitmask_intf = sde_hw_ctl_update_bitmask_intf;
-		ops->get_ctl_intf = sde_hw_ctl_get_intf;
-	}
-	ops->clear_pending_flush = sde_hw_ctl_clear_pending_flush;
-	ops->get_pending_flush = sde_hw_ctl_get_pending_flush;
-	ops->get_flush_register = sde_hw_ctl_get_flush_register;
-	ops->trigger_start = sde_hw_ctl_trigger_start;
-	ops->trigger_pending = sde_hw_ctl_trigger_pending;
-	ops->read_ctl_top = sde_hw_ctl_read_ctl_top;
-	ops->read_ctl_layers = sde_hw_ctl_read_ctl_layers;
-	ops->update_wb_cfg = sde_hw_ctl_update_wb_cfg;
-	ops->reset = sde_hw_ctl_reset_control;
-	ops->get_reset = sde_hw_ctl_get_reset_status;
-	ops->hard_reset = sde_hw_ctl_hard_reset;
-	ops->wait_reset_status = sde_hw_ctl_wait_reset_status;
-	ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages;
-	ops->setup_blendstage = sde_hw_ctl_setup_blendstage;
-	ops->get_staged_sspp = sde_hw_ctl_get_staged_sspp;
-	ops->update_bitmask_sspp = sde_hw_ctl_update_bitmask_sspp;
-	ops->update_bitmask_mixer = sde_hw_ctl_update_bitmask_mixer;
-	ops->update_bitmask_dspp = sde_hw_ctl_update_bitmask_dspp;
-	ops->update_bitmask_dspp_pavlut = sde_hw_ctl_update_bitmask_dspp_pavlut;
-	ops->reg_dma_flush = sde_hw_reg_dma_flush;
-	ops->get_start_state = sde_hw_ctl_get_start_state;
-
-	if (cap & BIT(SDE_CTL_UIDLE))
-		ops->uidle_enable = sde_hw_ctl_uidle_enable;
-};
-
-static struct sde_hw_blk_ops sde_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
-
-struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m)
-{
-	struct sde_hw_ctl *c;
-	struct sde_ctl_cfg *cfg;
-	int rc;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _ctl_offset(idx, m, addr, &c->hw);
-	if (IS_ERR_OR_NULL(cfg)) {
-		kfree(c);
-		pr_err("failed to create sde_hw_ctl %d\n", idx);
-		return ERR_PTR(-EINVAL);
-	}
-
-	c->caps = cfg;
-	_setup_ctl_ops(&c->ops, c->caps->features);
-	c->idx = idx;
-	c->mixer_count = m->mixer_count;
-	c->mixer_hw_caps = m->mixer;
-
-	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_CTL, idx, &sde_hw_ops);
-	if (rc) {
-		SDE_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
-
-	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
-			c->hw.blk_off + c->hw.length, c->hw.xin_id);
-
-	return c;
-
-blk_init_error:
-	kzfree(c);
-
-	return ERR_PTR(rc);
-}
-
-void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx)
-{
-	if (ctx)
-		sde_hw_blk_destroy(&ctx->base);
-	kfree(ctx);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
deleted file mode 100644
index 812d44a..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ /dev/null
@@ -1,531 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_CTL_H
-#define _SDE_HW_CTL_H
-
-#include "sde_hw_mdss.h"
-#include "sde_hw_util.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_sspp.h"
-#include "sde_hw_blk.h"
-
-/**
- * sde_ctl_mode_sel: Interface mode selection
- * SDE_CTL_MODE_SEL_VID:    Video mode interface
- * SDE_CTL_MODE_SEL_CMD:    Command mode interface
- */
-enum sde_ctl_mode_sel {
-	SDE_CTL_MODE_SEL_VID = 0,
-	SDE_CTL_MODE_SEL_CMD
-};
-
-/**
- * sde_ctl_rot_op_mode - inline rotation mode
- * SDE_CTL_ROT_OP_MODE_OFFLINE: offline rotation
- * SDE_CTL_ROT_OP_MODE_RESERVED: reserved
- * SDE_CTL_ROT_OP_MODE_INLINE_SYNC: inline rotation synchronous mode
- * SDE_CTL_ROT_OP_MODE_INLINE_ASYNC: inline rotation asynchronous mode
- */
-enum sde_ctl_rot_op_mode {
-	SDE_CTL_ROT_OP_MODE_OFFLINE,
-	SDE_CTL_ROT_OP_MODE_RESERVED,
-	SDE_CTL_ROT_OP_MODE_INLINE_SYNC,
-	SDE_CTL_ROT_OP_MODE_INLINE_ASYNC,
-};
-
-struct sde_hw_ctl;
-/**
- * struct sde_hw_stage_cfg - blending stage cfg
- * @stage : SSPP_ID at each stage
- * @multirect_index: index of the rectangle of SSPP.
- */
-struct sde_hw_stage_cfg {
-	enum sde_sspp stage[SDE_STAGE_MAX][PIPES_PER_STAGE];
-	enum sde_sspp_multirect_index multirect_index
-					[SDE_STAGE_MAX][PIPES_PER_STAGE];
-};
-
-/**
- * struct sde_hw_intf_cfg :Describes how the SDE writes data to output interface
- * @intf :                 Interface id
- * @wb:                    Writeback id
- * @mode_3d:               3d mux configuration
- * @intf_mode_sel:         Interface mode, cmd / vid
- * @stream_sel:            Stream selection for multi-stream interfaces
- */
-struct sde_hw_intf_cfg {
-	enum sde_intf intf;
-	enum sde_wb wb;
-	enum sde_3d_blend_mode mode_3d;
-	enum sde_ctl_mode_sel intf_mode_sel;
-	int stream_sel;
-};
-
-/**
- * struct sde_hw_intf_cfg_v1 :Describes the data strcuture to configure the
- *                            output interfaces for a particular display on a
- *                            platform which supports ctl path version 1.
- * @intf_count:               No. of active interfaces for this display
- * @intf :                    Interface ids of active interfaces
- * @intf_mode_sel:            Interface mode, cmd / vid
- * @intf_master:              Master interface for split display
- * @wb_count:                 No. of active writebacks
- * @wb:                       Writeback ids of active writebacks
- * @merge_3d_count            No. of active merge_3d blocks
- * @merge_3d:                 Id of the active merge 3d blocks
- * @cwb_count:                No. of active concurrent writebacks
- * @cwb:                      Id of active cwb blocks
- * @cdm_count:                No. of active chroma down module
- * @cdm:                      Id of active cdm blocks
- */
-struct sde_hw_intf_cfg_v1 {
-	uint32_t intf_count;
-	enum sde_intf intf[MAX_INTF_PER_CTL_V1];
-	enum sde_ctl_mode_sel intf_mode_sel;
-	enum sde_intf intf_master;
-
-	uint32_t wb_count;
-	enum sde_wb wb[MAX_WB_PER_CTL_V1];
-
-	uint32_t merge_3d_count;
-	enum sde_merge_3d merge_3d[MAX_MERGE_3D_PER_CTL_V1];
-
-	uint32_t cwb_count;
-	enum sde_cwb cwb[MAX_CWB_PER_CTL_V1];
-
-	uint32_t cdm_count;
-	enum sde_cdm cdm[MAX_CDM_PER_CTL_V1];
-};
-
-/**
- * struct sde_hw_ctl_dsc_cfg :Describes the DSC blocks being used for this
- *                            display on a platoform which supports ctl path
- *                            version 1.
- * @dsc_count:                No. of active dsc blocks
- * @dsc:                      Id of active dsc blocks
- */
-struct sde_ctl_dsc_cfg {
-	uint32_t dsc_count;
-	enum sde_dsc dsc[MAX_DSC_PER_CTL_V1];
-};
-
-/**
- * struct sde_ctl_flush_cfg - struct describing flush configuration managed
- * via set, trigger and clear ops.
- * set ops corresponding to the hw_block is called, when the block's
- * configuration is changed and needs to be committed on Hw. Flush mask caches
- * the different bits for the ongoing commit.
- * clear ops clears the bitmask and cancels the update to the corresponding
- * hw block.
- * trigger op will trigger the update on the hw for the blocks cached in the
- * pending flush mask.
- *
- * @pending_flush_mask: pending ctl_flush
- * CTL path version SDE_CTL_CFG_VERSION_1_0_0 has * two level flush mechanism
- * for lower pipe controls. individual control should be flushed before
- * exercising top level flush
- * @pending_intf_flush_mask: pending INTF flush
- * @pending_cdm_flush_mask: pending CDWN block flush
- * @pending_wb_flush_mask: pending writeback flush
- * @pending_dsc_flush_mask: pending dsc flush
- * @pending_merge_3d_flush_mask: pending 3d merge block flush
- * @pending_cwb_flush_mask: pending flush for concurrent writeback
- * @pending_periph_flush_mask: pending flush for peripheral module
- */
-struct sde_ctl_flush_cfg {
-	u32 pending_flush_mask;
-	u32 pending_intf_flush_mask;
-	u32 pending_cdm_flush_mask;
-	u32 pending_wb_flush_mask;
-	u32 pending_dsc_flush_mask;
-	u32 pending_merge_3d_flush_mask;
-	u32 pending_cwb_flush_mask;
-	u32 pending_periph_flush_mask;
-};
-
-/**
- * struct sde_hw_ctl_ops - Interface to the wb Hw driver functions
- * Assumption is these functions will be called after clocks are enabled
- */
-struct sde_hw_ctl_ops {
-	/**
-	 * kickoff hw operation for Sw controlled interfaces
-	 * DSI cmd mode and WB interface are SW controlled
-	 * @ctx       : ctl path ctx pointer
-	 * @Return: error code
-	 */
-	int (*trigger_start)(struct sde_hw_ctl *ctx);
-
-	/**
-	 * kickoff prepare is in progress hw operation for sw
-	 * controlled interfaces: DSI cmd mode and WB interface
-	 * are SW controlled
-	 * @ctx       : ctl path ctx pointer
-	 * @Return: error code
-	 */
-	int (*trigger_pending)(struct sde_hw_ctl *ctx);
-
-	/**
-	 * kickoff rotator operation for Sw controlled interfaces
-	 * DSI cmd mode and WB interface are SW controlled
-	 * @ctx       : ctl path ctx pointer
-	 * @Return: error code
-	 */
-	int (*trigger_rot_start)(struct sde_hw_ctl *ctx);
-
-	/**
-	 * enable/disable UIDLE feature
-	 * @ctx       : ctl path ctx pointer
-	 * @enable: true to enable the feature
-	 */
-	void (*uidle_enable)(struct sde_hw_ctl *ctx, bool enable);
-
-	/**
-	 * Clear the value of the cached pending_flush_mask
-	 * No effect on hardware
-	 * @ctx       : ctl path ctx pointer
-	 * @Return: error code
-	 */
-	int (*clear_pending_flush)(struct sde_hw_ctl *ctx);
-
-	/**
-	 * Query the value of the cached pending_flush_mask
-	 * No effect on hardware
-	 * @ctx       : ctl path ctx pointer
-	 * @cfg       : current flush configuration
-	 * @Return: error code
-	 */
-	int (*get_pending_flush)(struct sde_hw_ctl *ctx,
-			struct sde_ctl_flush_cfg *cfg);
-
-	/**
-	 * OR in the given flushbits to the flush_cfg
-	 * No effect on hardware
-	 * @ctx       : ctl path ctx pointer
-	 * @cfg     : flush configuration pointer
-	 * @Return: error code
-	 */
-	int (*update_pending_flush)(struct sde_hw_ctl *ctx,
-		struct sde_ctl_flush_cfg *cfg);
-
-	/**
-	 * Write the value of the pending_flush_mask to hardware
-	 * @ctx       : ctl path ctx pointer
-	 * @Return: error code
-	 */
-	int (*trigger_flush)(struct sde_hw_ctl *ctx);
-
-	/**
-	 * Read the value of the flush register
-	 * @ctx       : ctl path ctx pointer
-	 * @Return: value of the ctl flush register.
-	 */
-	u32 (*get_flush_register)(struct sde_hw_ctl *ctx);
-
-	/**
-	 * Setup ctl_path interface config
-	 * @ctx
-	 * @cfg    : interface config structure pointer
-	 * @Return: error code
-	 */
-	int (*setup_intf_cfg)(struct sde_hw_ctl *ctx,
-		struct sde_hw_intf_cfg *cfg);
-
-	/**
-	 * Reset ctl_path interface config
-	 * @ctx   : ctl path ctx pointer
-	 * @cfg    : interface config structure pointer
-	 * @merge_3d_idx	: index of merge3d blk
-	 * @Return: error code
-	 */
-	int (*reset_post_disable)(struct sde_hw_ctl *ctx,
-		struct sde_hw_intf_cfg_v1 *cfg, u32 merge_3d_idx);
-
-	/** update cwb  for ctl_path
-	 * @ctx       : ctl path ctx pointer
-	 * @cfg    : interface config structure pointer
-	 * @Return: error code
-	 */
-	int (*update_cwb_cfg)(struct sde_hw_ctl *ctx,
-		struct sde_hw_intf_cfg_v1 *cfg);
-
-	/**
-	 * Setup ctl_path interface config for SDE_CTL_ACTIVE_CFG
-	 * @ctx   : ctl path ctx pointer
-	 * @cfg    : interface config structure pointer
-	 * @Return: error code
-	 */
-	int (*setup_intf_cfg_v1)(struct sde_hw_ctl *ctx,
-		struct sde_hw_intf_cfg_v1 *cfg);
-
-	/**
-	 * Setup ctl_path dsc config for SDE_CTL_ACTIVE_CFG
-	 * @ctx   : ctl path ctx pointer
-	 * @cfg    : dsc config structure pointer
-	 * @Return: error code
-	 */
-	int (*setup_dsc_cfg)(struct sde_hw_ctl *ctx,
-		struct sde_ctl_dsc_cfg *cfg);
-
-	/** Update the interface selection with input WB config
-	 * @ctx       : ctl path ctx pointer
-	 * @cfg       : pointer to input wb config
-	 * @enable    : set if true, clear otherwise
-	 */
-	void (*update_wb_cfg)(struct sde_hw_ctl *ctx,
-		struct sde_hw_intf_cfg *cfg, bool enable);
-
-	int (*reset)(struct sde_hw_ctl *c);
-
-	/**
-	 * get_reset - check ctl reset status bit
-	 * @ctx    : ctl path ctx pointer
-	 * Returns: current value of ctl reset status
-	 */
-	u32 (*get_reset)(struct sde_hw_ctl *ctx);
-
-	/**
-	 * get_scheduler_reset - check ctl scheduler status bit
-	 * @ctx    : ctl path ctx pointer
-	 * Returns: current value of ctl scheduler and idle status
-	 */
-	u32 (*get_scheduler_status)(struct sde_hw_ctl *ctx);
-
-	/**
-	 * hard_reset - force reset on ctl_path
-	 * @ctx    : ctl path ctx pointer
-	 * @enable : whether to enable/disable hard reset
-	 */
-	void (*hard_reset)(struct sde_hw_ctl *c, bool enable);
-
-	/*
-	 * wait_reset_status - checks ctl reset status
-	 * @ctx       : ctl path ctx pointer
-	 *
-	 * This function checks the ctl reset status bit.
-	 * If the reset bit is set, it keeps polling the status till the hw
-	 * reset is complete.
-	 * Returns: 0 on success or -error if reset incomplete within interval
-	 */
-	int (*wait_reset_status)(struct sde_hw_ctl *ctx);
-
-	/**
-	 * update_bitmask_sspp: updates mask corresponding to sspp
-	 * @blk               : blk id
-	 * @enable            : true to enable, 0 to disable
-	 */
-	int (*update_bitmask_sspp)(struct sde_hw_ctl *ctx,
-		enum sde_sspp blk, bool enable);
-
-	/**
-	 * update_bitmask_sspp: updates mask corresponding to sspp
-	 * @blk               : blk id
-	 * @enable            : true to enable, 0 to disable
-	 */
-	int (*update_bitmask_mixer)(struct sde_hw_ctl *ctx,
-		enum sde_lm blk, bool enable);
-
-	/**
-	 * update_bitmask_sspp: updates mask corresponding to sspp
-	 * @blk               : blk id
-	 * @enable            : true to enable, 0 to disable
-	 */
-	int (*update_bitmask_dspp)(struct sde_hw_ctl *ctx,
-		enum sde_dspp blk, bool enable);
-
-	/**
-	 * update_bitmask_sspp: updates mask corresponding to sspp
-	 * @blk               : blk id
-	 * @enable            : true to enable, 0 to disable
-	 */
-	int (*update_bitmask_dspp_pavlut)(struct sde_hw_ctl *ctx,
-		enum sde_dspp blk, bool enable);
-
-	/**
-	 * update_bitmask_sspp: updates mask corresponding to sspp
-	 * @blk               : blk id
-	 * @enable            : true to enable, 0 to disable
-	 */
-	int (*update_bitmask_intf)(struct sde_hw_ctl *ctx,
-		enum sde_intf blk, bool enable);
-
-	/**
-	 * update_bitmask_sspp: updates mask corresponding to sspp
-	 * @blk               : blk id
-	 * @enable            : true to enable, 0 to disable
-	 */
-	int (*update_bitmask_cdm)(struct sde_hw_ctl *ctx,
-		enum sde_cdm blk, bool enable);
-
-	/**
-	 * update_bitmask_sspp: updates mask corresponding to sspp
-	 * @blk               : blk id
-	 * @enable            : true to enable, 0 to disable
-	 */
-	int (*update_bitmask_wb)(struct sde_hw_ctl *ctx,
-		enum sde_wb blk, bool enable);
-
-	/**
-	 * update_bitmask_sspp: updates mask corresponding to sspp
-	 * @blk               : blk id
-	 * @enable            : true to enable, 0 to disable
-	 */
-	int (*update_bitmask_rot)(struct sde_hw_ctl *ctx,
-		enum sde_rot blk, bool enable);
-
-	/**
-	 * update_bitmask_dsc: updates mask corresponding to dsc
-	 * @blk               : blk id
-	 * @enable            : true to enable, 0 to disable
-	 */
-	int (*update_bitmask_dsc)(struct sde_hw_ctl *ctx,
-		enum sde_dsc blk, bool enable);
-
-	/**
-	 * update_bitmask_merge3d: updates mask corresponding to merge_3d
-	 * @blk               : blk id
-	 * @enable            : true to enable, 0 to disable
-	 */
-	int (*update_bitmask_merge3d)(struct sde_hw_ctl *ctx,
-		enum sde_merge_3d blk, bool enable);
-
-	/**
-	 * update_bitmask_cwb: updates mask corresponding to cwb
-	 * @blk               : blk id
-	 * @enable            : true to enable, 0 to disable
-	 */
-	int (*update_bitmask_cwb)(struct sde_hw_ctl *ctx,
-		enum sde_cwb blk, bool enable);
-
-	/**
-	 * update_bitmask_periph: updates mask corresponding to peripheral
-	 * @blk               : blk id
-	 * @enable            : true to enable, 0 to disable
-	 */
-	int (*update_bitmask_periph)(struct sde_hw_ctl *ctx,
-		enum sde_intf blk, bool enable);
-
-	/**
-	 * read CTL_TOP register value and return
-	 * the data.
-	 * @ctx		: ctl path ctx pointer
-	 * @return	: CTL top register value
-	 */
-	u32 (*read_ctl_top)(struct sde_hw_ctl *ctx);
-
-	/**
-	 * get interfaces for the active CTL .
-	 * @ctx		: ctl path ctx pointer
-	 * @return	: bit mask with the active interfaces for the CTL
-	 */
-	u32 (*get_ctl_intf)(struct sde_hw_ctl *ctx);
-
-	/**
-	 * read CTL layers register value and return
-	 * the data.
-	 * @ctx       : ctl path ctx pointer
-	 * @index       : layer index for this ctl path
-	 * @return	: CTL layers register value
-	 */
-	u32 (*read_ctl_layers)(struct sde_hw_ctl *ctx, int index);
-
-	/**
-	 * Set all blend stages to disabled
-	 * @ctx       : ctl path ctx pointer
-	 */
-	void (*clear_all_blendstages)(struct sde_hw_ctl *ctx);
-
-	/**
-	 * Configure layer mixer to pipe configuration
-	 * @ctx       : ctl path ctx pointer
-	 * @lm        : layer mixer enumeration
-	 * @cfg       : blend stage configuration
-	 */
-	void (*setup_blendstage)(struct sde_hw_ctl *ctx,
-		enum sde_lm lm, struct sde_hw_stage_cfg *cfg);
-
-	/**
-	 * Get all the sspp staged on a layer mixer
-	 * @ctx       : ctl path ctx pointer
-	 * @lm        : layer mixer enumeration
-	 * @info      : array address to populate connected sspp index info
-	 * @info_max_cnt : maximum sspp info elements based on array size
-	 * @Return: count of sspps info  elements populated
-	 */
-	u32 (*get_staged_sspp)(struct sde_hw_ctl *ctx, enum sde_lm lm,
-		struct sde_sspp_index_info *info, u32 info_max_cnt);
-
-	/**
-	 * Flush the reg dma by sending last command.
-	 * @ctx       : ctl path ctx pointer
-	 * @blocking  : if set to true api will block until flush is done
-	 * @Return: error code
-	 */
-	int (*reg_dma_flush)(struct sde_hw_ctl *ctx, bool blocking);
-
-	/**
-	 * check if ctl start trigger state to confirm the frame pending
-	 * status
-	 * @ctx       : ctl path ctx pointer
-	 * @Return: error code
-	 */
-	int (*get_start_state)(struct sde_hw_ctl *ctx);
-};
-
-/**
- * struct sde_hw_ctl : CTL PATH driver object
- * @base: hardware block base structure
- * @hw: block register map object
- * @idx: control path index
- * @caps: control path capabilities
- * @mixer_count: number of mixers
- * @mixer_hw_caps: mixer hardware capabilities
- * @flush: storage for pending ctl_flush managed via ops
- * @ops: operation list
- */
-struct sde_hw_ctl {
-	struct sde_hw_blk base;
-	struct sde_hw_blk_reg_map hw;
-
-	/* ctl path */
-	int idx;
-	const struct sde_ctl_cfg *caps;
-	int mixer_count;
-	const struct sde_lm_cfg *mixer_hw_caps;
-	struct sde_ctl_flush_cfg flush;
-
-	/* ops */
-	struct sde_hw_ctl_ops ops;
-};
-
-/**
- * sde_hw_ctl - convert base object sde_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct sde_hw_ctl *to_sde_hw_ctl(struct sde_hw_blk *hw)
-{
-	return container_of(hw, struct sde_hw_ctl, base);
-}
-
-/**
- * sde_hw_ctl_init(): Initializes the ctl_path hw driver object.
- * should be called before accessing every ctl path registers.
- * @idx:  ctl_path index for which driver object is required
- * @addr: mapped register io address of MDP
- * @m :   pointer to mdss catalog data
- */
-struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m);
-
-/**
- * sde_hw_ctl_destroy(): Destroys ctl driver context
- * should be called to free the context
- */
-void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx);
-
-#endif /*_SDE_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ds.c b/drivers/gpu/drm/msm/sde/sde_hw_ds.c
deleted file mode 100644
index 2d3b2be..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_ds.c
+++ /dev/null
@@ -1,156 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#include "sde_hw_ds.h"
-#include "sde_formats.h"
-#include "sde_dbg.h"
-#include "sde_kms.h"
-
-/* Destination scaler TOP registers */
-#define DEST_SCALER_OP_MODE     0x00
-#define DEST_SCALER_HW_VERSION  0x10
-
-static void sde_hw_ds_setup_opmode(struct sde_hw_ds *hw_ds,
-				u32 op_mode)
-{
-	struct sde_hw_blk_reg_map *hw = &hw_ds->hw;
-
-	SDE_REG_WRITE(hw, DEST_SCALER_OP_MODE, op_mode);
-}
-
-static u32 _sde_hw_ds_get_scaler3_ver(struct sde_hw_ds *ctx)
-{
-	if (!ctx)
-		return 0;
-
-	return sde_hw_get_scaler3_ver(&ctx->hw, ctx->scl->base);
-}
-
-static void sde_hw_ds_setup_scaler3(struct sde_hw_ds *hw_ds,
-			void *scaler_cfg, void *scaler_lut_cfg)
-{
-	struct sde_hw_scaler3_cfg *scl3_cfg = scaler_cfg;
-	struct sde_hw_scaler3_lut_cfg *scl3_lut_cfg = scaler_lut_cfg;
-
-	if (!hw_ds || !hw_ds->scl || !scl3_cfg || !scl3_lut_cfg)
-		return;
-
-	/*
-	 * copy LUT values to scaler structure
-	 */
-	if (scl3_lut_cfg->is_configured) {
-		scl3_cfg->dir_lut = scl3_lut_cfg->dir_lut;
-		scl3_cfg->dir_len = scl3_lut_cfg->dir_len;
-		scl3_cfg->cir_lut = scl3_lut_cfg->cir_lut;
-		scl3_cfg->cir_len = scl3_lut_cfg->cir_len;
-		scl3_cfg->sep_lut = scl3_lut_cfg->sep_lut;
-		scl3_cfg->sep_len = scl3_lut_cfg->sep_len;
-	}
-
-	sde_hw_setup_scaler3(&hw_ds->hw, scl3_cfg, hw_ds->scl->version,
-			 hw_ds->scl->base,
-			 sde_get_sde_format(DRM_FORMAT_XBGR2101010));
-}
-
-static void _setup_ds_ops(struct sde_hw_ds_ops *ops, unsigned long features)
-{
-	ops->setup_opmode = sde_hw_ds_setup_opmode;
-
-	if (test_bit(SDE_SSPP_SCALER_QSEED3, &features) ||
-			test_bit(SDE_SSPP_SCALER_QSEED3LITE, &features)) {
-		ops->get_scaler_ver = _sde_hw_ds_get_scaler3_ver;
-		ops->setup_scaler = sde_hw_ds_setup_scaler3;
-	}
-}
-
-static struct sde_ds_cfg *_ds_offset(enum sde_ds ds,
-		struct sde_mdss_cfg *m,
-		void __iomem *addr,
-		struct sde_hw_blk_reg_map *b)
-{
-	int i;
-
-	if (!m || !addr || !b)
-		return ERR_PTR(-EINVAL);
-
-	for (i = 0; i < m->ds_count; i++) {
-		if ((ds == m->ds[i].id) &&
-			 (m->ds[i].top)) {
-			b->base_off = addr;
-			b->blk_off = m->ds[i].top->base;
-			b->length = m->ds[i].top->len;
-			b->hwversion = m->hwversion;
-			b->log_mask = SDE_DBG_MASK_DS;
-			return &m->ds[i];
-		}
-	}
-
-	return ERR_PTR(-EINVAL);
-}
-
-static struct sde_hw_blk_ops sde_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
-
-struct sde_hw_ds *sde_hw_ds_init(enum sde_ds idx,
-			void __iomem *addr,
-			struct sde_mdss_cfg *m)
-{
-	struct sde_hw_ds *hw_ds;
-	struct sde_ds_cfg *cfg;
-	int rc;
-
-	if (!addr || !m)
-		return ERR_PTR(-EINVAL);
-
-	hw_ds = kzalloc(sizeof(*hw_ds), GFP_KERNEL);
-	if (!hw_ds)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _ds_offset(idx, m, addr, &hw_ds->hw);
-	if (IS_ERR_OR_NULL(cfg)) {
-		SDE_ERROR("failed to get ds cfg\n");
-		kfree(hw_ds);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/* Assign ops */
-	hw_ds->idx = idx;
-	hw_ds->scl = cfg;
-	_setup_ds_ops(&hw_ds->ops, hw_ds->scl->features);
-
-	if (hw_ds->ops.get_scaler_ver)
-		hw_ds->scl->version = hw_ds->ops.get_scaler_ver(hw_ds);
-
-
-	rc = sde_hw_blk_init(&hw_ds->base, SDE_HW_BLK_DS, idx, &sde_hw_ops);
-	if (rc) {
-		SDE_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
-
-	if (cfg->len) {
-		sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
-				hw_ds->hw.blk_off + cfg->base,
-				hw_ds->hw.blk_off + cfg->base + cfg->len,
-				hw_ds->hw.xin_id);
-	}
-
-	return hw_ds;
-
-blk_init_error:
-	kzfree(hw_ds);
-
-	return ERR_PTR(rc);
-
-}
-
-void sde_hw_ds_destroy(struct sde_hw_ds *hw_ds)
-{
-	if (hw_ds)
-		sde_hw_blk_destroy(&hw_ds->base);
-	kfree(hw_ds);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ds.h b/drivers/gpu/drm/msm/sde/sde_hw_ds.h
deleted file mode 100644
index 7950459..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_ds.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_DS_H
-#define _SDE_HW_DS_H
-
-#include "sde_hw_mdss.h"
-#include "sde_hw_util.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_blk.h"
-
-struct sde_hw_ds;
-
-/* Destination Scaler DUAL mode overfetch pixel count */
-#define SDE_DS_OVERFETCH_SIZE 5
-
-/* Destination scaler DUAL mode operation bit */
-#define SDE_DS_OP_MODE_DUAL BIT(16)
-
-/* struct sde_hw_ds_cfg - destination scaler config
- * @idx          : DS selection index
- * @flags        : Flag to switch between mode for DS
- * @lm_width     : Layer mixer width configuration
- * @lm_heigh     : Layer mixer height configuration
- * @scl3_cfg     : Configuration data for scaler
- */
-struct sde_hw_ds_cfg {
-	u32 idx;
-	int flags;
-	u32 lm_width;
-	u32 lm_height;
-	struct sde_hw_scaler3_cfg scl3_cfg;
-};
-
-/**
- * struct sde_hw_ds_ops - interface to the destination scaler
- * hardware driver functions
- * Caller must call the init function to get the ds context for each ds
- * Assumption is these functions will be called after clocks are enabled
- */
-struct sde_hw_ds_ops {
-	/**
-	 * setup_opmode - destination scaler op mode setup
-	 * @hw_ds   : Pointer to ds context
-	 * @op_mode : Op mode configuration
-	 */
-	void (*setup_opmode)(struct sde_hw_ds *hw_ds,
-				u32 op_mode);
-
-	/**
-	 * setup_scaler - destination scaler block setup
-	 * @hw_ds          : Pointer to ds context
-	 * @scaler_cfg     : Pointer to scaler data
-	 * @scaler_lut_cfg : Pointer to scaler lut
-	 */
-	void (*setup_scaler)(struct sde_hw_ds *hw_ds,
-				void *scaler_cfg,
-				void *scaler_lut_cfg);
-
-	/**
-	 * get_scaler_ver - get scaler h/w version
-	 * @ctx: Pointer to ds structure
-	 */
-	u32 (*get_scaler_ver)(struct sde_hw_ds *ctx);
-
-};
-
-/**
- * struct sde_hw_ds - destination scaler description
- * @base : Hardware block base structure
- * @hw   : Block hardware details
- * @idx  : Destination scaler index
- * @scl  : Pointer to
- *          - scaler offset relative to top offset
- *          - capabilities
- * @ops  : Pointer to operations for this DS
- */
-struct sde_hw_ds {
-	struct sde_hw_blk base;
-	struct sde_hw_blk_reg_map hw;
-	enum sde_ds idx;
-	struct sde_ds_cfg *scl;
-	struct sde_hw_ds_ops ops;
-};
-
-/**
- * sde_hw_ds_init - initializes the destination scaler
- * hw driver object and should be called once before
- * accessing every destination scaler
- * @idx : DS index for which driver object is required
- * @addr: Mapped register io address of MDP
- * @m   : MDSS catalog information
- * @Return: pointer to structure or ERR_PTR
- */
-struct sde_hw_ds *sde_hw_ds_init(enum sde_ds idx,
-			void __iomem *addr,
-			struct sde_mdss_cfg *m);
-
-/**
- * sde_hw_ds_destroy - destroys destination scaler
- * driver context
- * @hw_ds:   Pointer to DS context
- */
-void sde_hw_ds_destroy(struct sde_hw_ds *hw_ds);
-
-#endif /*_SDE_HW_DS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dsc.c b/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
deleted file mode 100644
index 57de131..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
+++ /dev/null
@@ -1,280 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-
-#include "sde_hw_mdss.h"
-#include "sde_hwio.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_dsc.h"
-#include "sde_hw_pingpong.h"
-#include "sde_dbg.h"
-#include "sde_kms.h"
-
-#define DSC_COMMON_MODE	                0x000
-#define DSC_ENC                         0X004
-#define DSC_PICTURE                     0x008
-#define DSC_SLICE                       0x00C
-#define DSC_CHUNK_SIZE                  0x010
-#define DSC_DELAY                       0x014
-#define DSC_SCALE_INITIAL               0x018
-#define DSC_SCALE_DEC_INTERVAL          0x01C
-#define DSC_SCALE_INC_INTERVAL          0x020
-#define DSC_FIRST_LINE_BPG_OFFSET       0x024
-#define DSC_BPG_OFFSET                  0x028
-#define DSC_DSC_OFFSET                  0x02C
-#define DSC_FLATNESS                    0x030
-#define DSC_RC_MODEL_SIZE               0x034
-#define DSC_RC                          0x038
-#define DSC_RC_BUF_THRESH               0x03C
-#define DSC_RANGE_MIN_QP                0x074
-#define DSC_RANGE_MAX_QP                0x0B0
-#define DSC_RANGE_BPG_OFFSET            0x0EC
-
-#define DSC_CTL_BLOCK_SIZE              0x300
-#define DSC_CTL(m)     \
-	(((m == DSC_NONE) || (m >= DSC_MAX)) ? 0 : (0x1800 - 0x3FC * (m - 1)))
-
-static void sde_hw_dsc_disable(struct sde_hw_dsc *dsc)
-{
-	struct sde_hw_blk_reg_map *dsc_c = &dsc->hw;
-
-	SDE_REG_WRITE(dsc_c, DSC_COMMON_MODE, 0);
-}
-
-static void sde_hw_dsc_config(struct sde_hw_dsc *hw_dsc,
-		struct msm_display_dsc_info *dsc, u32 mode,
-		bool ich_reset_override)
-{
-	u32 data;
-	int bpp, lsb;
-	u32 initial_lines = dsc->initial_lines;
-	struct sde_hw_blk_reg_map *dsc_c = &hw_dsc->hw;
-
-	SDE_REG_WRITE(dsc_c, DSC_COMMON_MODE, mode);
-
-	data = 0;
-	if (ich_reset_override)
-		data = 3 << 28;
-
-	data |= (initial_lines << 20);
-	data |= (dsc->slice_last_group_size << 18);
-	/* bpp is 6.4 format, 4 LSBs bits are for fractional part */
-	lsb = dsc->bpp % 4;
-	bpp = dsc->bpp / 4;
-	bpp *= 4;	/* either 8 or 12 */
-	bpp <<= 4;
-	bpp |= lsb;
-	data |= (bpp << 8);
-	data |= (dsc->block_pred_enable << 7);
-	data |= (dsc->line_buf_depth << 3);
-	data |= (dsc->enable_422 << 2);
-	data |= (dsc->convert_rgb << 1);
-	data |= dsc->input_10_bits;
-
-	SDE_REG_WRITE(dsc_c, DSC_ENC, data);
-
-	data = dsc->pic_width << 16;
-	data |= dsc->pic_height;
-	SDE_REG_WRITE(dsc_c, DSC_PICTURE, data);
-
-	data = dsc->slice_width << 16;
-	data |= dsc->slice_height;
-	SDE_REG_WRITE(dsc_c, DSC_SLICE, data);
-
-	data = dsc->chunk_size << 16;
-	SDE_REG_WRITE(dsc_c, DSC_CHUNK_SIZE, data);
-
-	data = dsc->initial_dec_delay << 16;
-	data |= dsc->initial_xmit_delay;
-	SDE_REG_WRITE(dsc_c, DSC_DELAY, data);
-
-	data = dsc->initial_scale_value;
-	SDE_REG_WRITE(dsc_c, DSC_SCALE_INITIAL, data);
-
-	data = dsc->scale_decrement_interval;
-	SDE_REG_WRITE(dsc_c, DSC_SCALE_DEC_INTERVAL, data);
-
-	data = dsc->scale_increment_interval;
-	SDE_REG_WRITE(dsc_c, DSC_SCALE_INC_INTERVAL, data);
-
-	data = dsc->first_line_bpg_offset;
-	SDE_REG_WRITE(dsc_c, DSC_FIRST_LINE_BPG_OFFSET, data);
-
-	data = dsc->nfl_bpg_offset << 16;
-	data |= dsc->slice_bpg_offset;
-	SDE_REG_WRITE(dsc_c, DSC_BPG_OFFSET, data);
-
-	data = dsc->initial_offset << 16;
-	data |= dsc->final_offset;
-	SDE_REG_WRITE(dsc_c, DSC_DSC_OFFSET, data);
-
-	data = dsc->det_thresh_flatness << 10;
-	data |= dsc->max_qp_flatness << 5;
-	data |= dsc->min_qp_flatness;
-	SDE_REG_WRITE(dsc_c, DSC_FLATNESS, data);
-
-	data = dsc->rc_model_size;
-	SDE_REG_WRITE(dsc_c, DSC_RC_MODEL_SIZE, data);
-
-	data = dsc->tgt_offset_lo << 18;
-	data |= dsc->tgt_offset_hi << 14;
-	data |= dsc->quant_incr_limit1 << 9;
-	data |= dsc->quant_incr_limit0 << 4;
-	data |= dsc->edge_factor;
-	SDE_REG_WRITE(dsc_c, DSC_RC, data);
-}
-
-static void sde_hw_dsc_config_thresh(struct sde_hw_dsc *hw_dsc,
-		struct msm_display_dsc_info *dsc)
-{
-	u32 *lp;
-	char *cp;
-	int i;
-
-	struct sde_hw_blk_reg_map *dsc_c = &hw_dsc->hw;
-	u32 off = 0x0;
-
-	lp = dsc->buf_thresh;
-	off = DSC_RC_BUF_THRESH;
-	for (i = 0; i < 14; i++) {
-		SDE_REG_WRITE(dsc_c, off, *lp++);
-		off += 4;
-	}
-
-	cp = dsc->range_min_qp;
-	off = DSC_RANGE_MIN_QP;
-	for (i = 0; i < 15; i++) {
-		SDE_REG_WRITE(dsc_c, off, *cp++);
-		off += 4;
-	}
-
-	cp = dsc->range_max_qp;
-	off = DSC_RANGE_MAX_QP;
-	for (i = 0; i < 15; i++) {
-		SDE_REG_WRITE(dsc_c, off, *cp++);
-		off += 4;
-	}
-
-	cp = dsc->range_bpg_offset;
-	off = DSC_RANGE_BPG_OFFSET;
-	for (i = 0; i < 15; i++) {
-		SDE_REG_WRITE(dsc_c, off, *cp++);
-		off += 4;
-	}
-}
-
-static void sde_hw_dsc_bind_pingpong_blk(
-		struct sde_hw_dsc *hw_dsc,
-		bool enable,
-		const enum sde_pingpong pp)
-{
-	struct sde_hw_blk_reg_map *c;
-	int mux_cfg = 0xF;
-	u32 dsc_ctl_offset;
-
-	if (!hw_dsc)
-		return;
-
-	c = &hw_dsc->hw;
-	dsc_ctl_offset = DSC_CTL(hw_dsc->idx);
-
-	if (enable)
-		mux_cfg = (pp - PINGPONG_0) & 0x7;
-
-	if (dsc_ctl_offset)
-		SDE_REG_WRITE(c, dsc_ctl_offset, mux_cfg);
-}
-
-
-static struct sde_dsc_cfg *_dsc_offset(enum sde_dsc dsc,
-		struct sde_mdss_cfg *m,
-		void __iomem *addr,
-		struct sde_hw_blk_reg_map *b)
-{
-	int i;
-
-	for (i = 0; i < m->dsc_count; i++) {
-		if (dsc == m->dsc[i].id) {
-			b->base_off = addr;
-			b->blk_off = m->dsc[i].base;
-			b->length = m->dsc[i].len;
-			b->hwversion = m->hwversion;
-			b->log_mask = SDE_DBG_MASK_DSC;
-			return &m->dsc[i];
-		}
-	}
-
-	return NULL;
-}
-
-static void _setup_dsc_ops(struct sde_hw_dsc_ops *ops,
-		unsigned long features)
-{
-	ops->dsc_disable = sde_hw_dsc_disable;
-	ops->dsc_config = sde_hw_dsc_config;
-	ops->dsc_config_thresh = sde_hw_dsc_config_thresh;
-	if (test_bit(SDE_DSC_OUTPUT_CTRL, &features))
-		ops->bind_pingpong_blk = sde_hw_dsc_bind_pingpong_blk;
-};
-
-static struct sde_hw_blk_ops sde_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
-
-struct sde_hw_dsc *sde_hw_dsc_init(enum sde_dsc idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m)
-{
-	struct sde_hw_dsc *c;
-	struct sde_dsc_cfg *cfg;
-	u32 dsc_ctl_offset;
-	int rc;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _dsc_offset(idx, m, addr, &c->hw);
-	if (IS_ERR_OR_NULL(cfg)) {
-		kfree(c);
-		return ERR_PTR(-EINVAL);
-	}
-
-	c->idx = idx;
-	c->caps = cfg;
-	_setup_dsc_ops(&c->ops, c->caps->features);
-
-	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_DSC, idx, &sde_hw_ops);
-	if (rc) {
-		SDE_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
-
-	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
-		c->hw.blk_off + c->hw.length, c->hw.xin_id);
-
-	if ((c->idx == DSC_0) &&
-			test_bit(SDE_DSC_OUTPUT_CTRL, &cfg->features)) {
-		dsc_ctl_offset = DSC_CTL(c->idx);
-		sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "dsc_ctl",
-			c->hw.blk_off + dsc_ctl_offset,
-			c->hw.blk_off + dsc_ctl_offset + DSC_CTL_BLOCK_SIZE,
-			c->hw.xin_id);
-	}
-
-	return c;
-
-blk_init_error:
-	kzfree(c);
-
-	return ERR_PTR(rc);
-}
-
-void sde_hw_dsc_destroy(struct sde_hw_dsc *dsc)
-{
-	if (dsc)
-		sde_hw_blk_destroy(&dsc->base);
-	kfree(dsc);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dsc.h b/drivers/gpu/drm/msm/sde/sde_hw_dsc.h
deleted file mode 100644
index 2e34c6b..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_dsc.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_DSC_H
-#define _SDE_HW_DSC_H
-
-#include "sde_hw_catalog.h"
-#include "sde_hw_mdss.h"
-#include "sde_hw_util.h"
-#include "sde_hw_blk.h"
-
-struct sde_hw_dsc;
-struct msm_display_dsc_info;
-
-#define DSC_MODE_SPLIT_PANEL            BIT(0)
-#define DSC_MODE_MULTIPLEX              BIT(1)
-#define DSC_MODE_VIDEO                  BIT(2)
-
-/**
- * struct sde_hw_dsc_ops - interface to the dsc hardware driver functions
- * Assumption is these functions will be called after clocks are enabled
- */
-struct sde_hw_dsc_ops {
-	/**
-	 * dsc_disable - disable dsc
-	 * @hw_dsc: Pointer to dsc context
-	 */
-	void (*dsc_disable)(struct sde_hw_dsc *hw_dsc);
-
-	/**
-	 * dsc_config - configures dsc encoder
-	 * @hw_dsc: Pointer to dsc context
-	 * @dsc: panel dsc parameters
-	 * @mode: dsc topology mode to be set
-	 * @ich_reset_override: option to reset ich
-	 */
-	void (*dsc_config)(struct sde_hw_dsc *hw_dsc,
-			struct msm_display_dsc_info *dsc,
-			u32 mode, bool ich_reset_override);
-
-	/**
-	 * dsc_config_thresh - programs panel thresholds
-	 * @hw_dsc: Pointer to dsc context
-	 * @dsc: panel dsc parameters
-	 */
-	void (*dsc_config_thresh)(struct sde_hw_dsc *hw_dsc,
-			struct msm_display_dsc_info *dsc);
-
-	/**
-	 * bind_pingpong_blk - enable/disable the connection with pp
-	 * @hw_dsc: Pointer to dsc context
-	 * @enable: enable/disable connection
-	 * @pp: pingpong blk id
-	 */
-	void (*bind_pingpong_blk)(struct sde_hw_dsc *hw_dsc,
-			bool enable,
-			const enum sde_pingpong pp);
-};
-
-struct sde_hw_dsc {
-	struct sde_hw_blk base;
-	struct sde_hw_blk_reg_map hw;
-
-	/* dsc */
-	enum sde_dsc idx;
-	const struct sde_dsc_cfg *caps;
-
-	/* ops */
-	struct sde_hw_dsc_ops ops;
-};
-
-/**
- * sde_hw_dsc - convert base object sde_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct sde_hw_dsc *to_sde_hw_dsc(struct sde_hw_blk *hw)
-{
-	return container_of(hw, struct sde_hw_dsc, base);
-}
-
-/**
- * sde_hw_dsc_init - initializes the dsc block for the passed
- *                   dsc idx.
- * @idx:  DSC index for which driver object is required
- * @addr: Mapped register io address of MDP
- * @m:    Pointer to mdss catalog data
- * Returns: Error code or allocated sde_hw_dsc context
- */
-struct sde_hw_dsc *sde_hw_dsc_init(enum sde_dsc idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m);
-
-/**
- * sde_hw_dsc_destroy - destroys dsc driver context
- *                      should be called to free the context
- * @dsc:   Pointer to dsc driver context returned by sde_hw_dsc_init
- */
-void sde_hw_dsc_destroy(struct sde_hw_dsc *dsc);
-
-#endif /*_SDE_HW_DSC_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
deleted file mode 100644
index 4f4fe59..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ /dev/null
@@ -1,330 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-#include <drm/msm_drm_pp.h>
-#include "sde_hw_mdss.h"
-#include "sde_hwio.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_dspp.h"
-#include "sde_hw_color_processing.h"
-#include "sde_dbg.h"
-#include "sde_ad4.h"
-#include "sde_kms.h"
-
-static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp,
-		struct sde_mdss_cfg *m,
-		void __iomem *addr,
-		struct sde_hw_blk_reg_map *b)
-{
-	int i;
-
-	if (!m || !addr || !b)
-		return ERR_PTR(-EINVAL);
-
-	for (i = 0; i < m->dspp_count; i++) {
-		if (dspp == m->dspp[i].id) {
-			b->base_off = addr;
-			b->blk_off = m->dspp[i].base;
-			b->length = m->dspp[i].len;
-			b->hwversion = m->hwversion;
-			b->log_mask = SDE_DBG_MASK_DSPP;
-			return &m->dspp[i];
-		}
-	}
-
-	return ERR_PTR(-EINVAL);
-}
-
-static void dspp_igc(struct sde_hw_dspp *c)
-{
-	int ret = 0;
-
-	if (c->cap->sblk->igc.version == SDE_COLOR_PROCESS_VER(0x3, 0x1)) {
-		ret = reg_dmav1_init_dspp_op_v4(SDE_DSPP_IGC, c->idx);
-		if (!ret)
-			c->ops.setup_igc = reg_dmav1_setup_dspp_igcv31;
-		else
-			c->ops.setup_igc = sde_setup_dspp_igcv3;
-	}
-}
-
-static void dspp_pcc(struct sde_hw_dspp *c)
-{
-	int ret = 0;
-
-	if (c->cap->sblk->pcc.version == (SDE_COLOR_PROCESS_VER(0x1, 0x7)))
-		c->ops.setup_pcc = sde_setup_dspp_pcc_v1_7;
-	else if (c->cap->sblk->pcc.version ==
-			(SDE_COLOR_PROCESS_VER(0x4, 0x0))) {
-		ret = reg_dmav1_init_dspp_op_v4(SDE_DSPP_PCC, c->idx);
-		if (!ret)
-			c->ops.setup_pcc = reg_dmav1_setup_dspp_pccv4;
-		else
-			c->ops.setup_pcc = sde_setup_dspp_pccv4;
-	}
-}
-
-static void dspp_gc(struct sde_hw_dspp *c)
-{
-	int ret = 0;
-
-	if (c->cap->sblk->gc.version == SDE_COLOR_PROCESS_VER(0x1, 8)) {
-		ret = reg_dmav1_init_dspp_op_v4(SDE_DSPP_GC, c->idx);
-		if (!ret)
-			c->ops.setup_gc = reg_dmav1_setup_dspp_gcv18;
-		/**
-		 * programming for v18 through ahb is same as v17,
-		 * hence assign v17 function
-		 */
-		else
-			c->ops.setup_gc = sde_setup_dspp_gc_v1_7;
-	}
-}
-
-static void dspp_hsic(struct sde_hw_dspp *c)
-{
-	int ret = 0;
-
-	if (c->cap->sblk->hsic.version == SDE_COLOR_PROCESS_VER(0x1, 0x7)) {
-		ret = reg_dmav1_init_dspp_op_v4(SDE_DSPP_HSIC, c->idx);
-		if (!ret)
-			c->ops.setup_pa_hsic = reg_dmav1_setup_dspp_pa_hsicv17;
-		else
-			c->ops.setup_pa_hsic = sde_setup_dspp_pa_hsic_v17;
-	}
-}
-
-static void dspp_memcolor(struct sde_hw_dspp *c)
-{
-	int ret = 0;
-
-	if (c->cap->sblk->memcolor.version == SDE_COLOR_PROCESS_VER(0x1, 0x7)) {
-		ret = reg_dmav1_init_dspp_op_v4(SDE_DSPP_MEMCOLOR, c->idx);
-		if (!ret) {
-			c->ops.setup_pa_memcol_skin =
-				reg_dmav1_setup_dspp_memcol_skinv17;
-			c->ops.setup_pa_memcol_sky =
-				reg_dmav1_setup_dspp_memcol_skyv17;
-			c->ops.setup_pa_memcol_foliage =
-				reg_dmav1_setup_dspp_memcol_folv17;
-			c->ops.setup_pa_memcol_prot =
-				reg_dmav1_setup_dspp_memcol_protv17;
-		} else {
-			c->ops.setup_pa_memcol_skin =
-				sde_setup_dspp_memcol_skin_v17;
-			c->ops.setup_pa_memcol_sky =
-				sde_setup_dspp_memcol_sky_v17;
-			c->ops.setup_pa_memcol_foliage =
-				sde_setup_dspp_memcol_foliage_v17;
-			c->ops.setup_pa_memcol_prot =
-				sde_setup_dspp_memcol_prot_v17;
-		}
-	}
-}
-
-static void dspp_sixzone(struct sde_hw_dspp *c)
-{
-	int ret = 0;
-
-	if (c->cap->sblk->sixzone.version == SDE_COLOR_PROCESS_VER(0x1, 0x7)) {
-		ret = reg_dmav1_init_dspp_op_v4(SDE_DSPP_SIXZONE, c->idx);
-		if (!ret)
-			c->ops.setup_sixzone = reg_dmav1_setup_dspp_sixzonev17;
-		else
-			c->ops.setup_sixzone = sde_setup_dspp_sixzone_v17;
-	}
-}
-
-static void dspp_gamut(struct sde_hw_dspp *c)
-{
-	int ret = 0;
-
-	if (c->cap->sblk->gamut.version == SDE_COLOR_PROCESS_VER(0x4, 0)) {
-		ret = reg_dmav1_init_dspp_op_v4(SDE_DSPP_GAMUT, c->idx);
-		if (!ret)
-			c->ops.setup_gamut = reg_dmav1_setup_dspp_3d_gamutv4;
-		else
-			c->ops.setup_gamut = sde_setup_dspp_3d_gamutv4;
-	} else if (c->cap->sblk->gamut.version ==
-			SDE_COLOR_PROCESS_VER(0x4, 1)) {
-		ret = reg_dmav1_init_dspp_op_v4(SDE_DSPP_GAMUT, c->idx);
-		if (!ret)
-			c->ops.setup_gamut = reg_dmav1_setup_dspp_3d_gamutv41;
-		else
-			c->ops.setup_gamut = sde_setup_dspp_3d_gamutv41;
-	}
-}
-
-static void dspp_dither(struct sde_hw_dspp *c)
-{
-	if (c->cap->sblk->dither.version == SDE_COLOR_PROCESS_VER(0x1, 0x7))
-		c->ops.setup_pa_dither = sde_setup_dspp_dither_v1_7;
-}
-
-static void dspp_hist(struct sde_hw_dspp *c)
-{
-	if (c->cap->sblk->hist.version == (SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
-		c->ops.setup_histogram = sde_setup_dspp_hist_v1_7;
-		c->ops.read_histogram = sde_read_dspp_hist_v1_7;
-		c->ops.lock_histogram = sde_lock_dspp_hist_v1_7;
-	}
-}
-
-static void dspp_vlut(struct sde_hw_dspp *c)
-{
-	int ret = 0;
-
-	if (c->cap->sblk->vlut.version == (SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
-		c->ops.setup_vlut = sde_setup_dspp_pa_vlut_v1_7;
-	} else if (c->cap->sblk->vlut.version ==
-			(SDE_COLOR_PROCESS_VER(0x1, 0x8))) {
-		ret = reg_dmav1_init_dspp_op_v4(SDE_DSPP_VLUT, c->idx);
-		if (!ret)
-			c->ops.setup_vlut = reg_dmav1_setup_dspp_vlutv18;
-		else
-			c->ops.setup_vlut = sde_setup_dspp_pa_vlut_v1_8;
-	}
-}
-
-static void dspp_ad(struct sde_hw_dspp *c)
-{
-	if (c->cap->sblk->ad.version == SDE_COLOR_PROCESS_VER(4, 0)) {
-		c->ops.setup_ad = sde_setup_dspp_ad4;
-		c->ops.ad_read_intr_resp = sde_read_intr_resp_ad4;
-		c->ops.validate_ad = sde_validate_dspp_ad4;
-	}
-}
-
-static void dspp_ltm(struct sde_hw_dspp *c)
-{
-	int ret = 0;
-
-	if (c->cap->sblk->ltm.version == SDE_COLOR_PROCESS_VER(0x1, 0x0)) {
-		ret = reg_dmav1_init_ltm_op_v6(SDE_LTM_INIT, c->idx);
-		if (!ret)
-			ret = reg_dmav1_init_ltm_op_v6(SDE_LTM_ROI, c->idx);
-		if (!ret)
-			ret = reg_dmav1_init_ltm_op_v6(SDE_LTM_VLUT, c->idx);
-
-		if (!ret) {
-			c->ops.setup_ltm_init = reg_dmav1_setup_ltm_initv1;
-			c->ops.setup_ltm_roi = reg_dmav1_setup_ltm_roiv1;
-			c->ops.setup_ltm_vlut = reg_dmav1_setup_ltm_vlutv1;
-		} else {
-			c->ops.setup_ltm_init = NULL;
-			c->ops.setup_ltm_roi = NULL;
-			c->ops.setup_ltm_vlut = NULL;
-		}
-		c->ops.setup_ltm_thresh = sde_setup_dspp_ltm_threshv1;
-		c->ops.setup_ltm_hist_ctrl = sde_setup_dspp_ltm_hist_ctrlv1;
-		c->ops.setup_ltm_hist_buffer = sde_setup_dspp_ltm_hist_bufferv1;
-		c->ops.ltm_read_intr_status = sde_ltm_read_intr_status;
-	}
-}
-
-static void (*dspp_blocks[SDE_DSPP_MAX])(struct sde_hw_dspp *c);
-
-static void _init_dspp_ops(void)
-{
-	dspp_blocks[SDE_DSPP_IGC] = dspp_igc;
-	dspp_blocks[SDE_DSPP_PCC] = dspp_pcc;
-	dspp_blocks[SDE_DSPP_GC] = dspp_gc;
-	dspp_blocks[SDE_DSPP_HSIC] = dspp_hsic;
-	dspp_blocks[SDE_DSPP_MEMCOLOR] = dspp_memcolor;
-	dspp_blocks[SDE_DSPP_SIXZONE] = dspp_sixzone;
-	dspp_blocks[SDE_DSPP_GAMUT] = dspp_gamut;
-	dspp_blocks[SDE_DSPP_DITHER] = dspp_dither;
-	dspp_blocks[SDE_DSPP_HIST] = dspp_hist;
-	dspp_blocks[SDE_DSPP_VLUT] = dspp_vlut;
-	dspp_blocks[SDE_DSPP_AD] = dspp_ad;
-	dspp_blocks[SDE_DSPP_LTM] = dspp_ltm;
-}
-
-static void _setup_dspp_ops(struct sde_hw_dspp *c, unsigned long features)
-{
-	int i = 0;
-
-	if (!c->cap->sblk)
-		return;
-
-	for (i = 0; i < SDE_DSPP_MAX; i++) {
-		if (!test_bit(i, &features))
-			continue;
-		if (dspp_blocks[i])
-			dspp_blocks[i](c);
-	}
-}
-
-static struct sde_hw_blk_ops sde_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
-
-struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
-			void __iomem *addr,
-			struct sde_mdss_cfg *m)
-{
-	struct sde_hw_dspp *c;
-	struct sde_dspp_cfg *cfg;
-	int rc;
-
-	if (!addr || !m)
-		return ERR_PTR(-EINVAL);
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _dspp_offset(idx, m, addr, &c->hw);
-	if (IS_ERR_OR_NULL(cfg)) {
-		kfree(c);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/* Populate DSPP Top HW block */
-	c->hw_top.base_off = addr;
-	c->hw_top.blk_off = m->dspp_top.base;
-	c->hw_top.length = m->dspp_top.len;
-	c->hw_top.hwversion = m->hwversion;
-	c->hw_top.log_mask = SDE_DBG_MASK_DSPP;
-
-	/* Assign ops */
-	c->idx = idx;
-	c->cap = cfg;
-	_init_dspp_ops();
-	_setup_dspp_ops(c, c->cap->features);
-
-	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_DSPP, idx, &sde_hw_ops);
-	if (rc) {
-		SDE_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
-
-	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
-			c->hw.blk_off + c->hw.length, c->hw.xin_id);
-
-	if ((cfg->sblk->ltm.id == SDE_DSPP_LTM) && cfg->sblk->ltm.base) {
-		sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "LTM",
-				c->hw.blk_off + cfg->sblk->ltm.base,
-				c->hw.blk_off + cfg->sblk->ltm.base + 0xC4,
-				c->hw.xin_id);
-	}
-
-	return c;
-
-blk_init_error:
-	kzfree(c);
-
-	return ERR_PTR(rc);
-}
-
-void sde_hw_dspp_destroy(struct sde_hw_dspp *dspp)
-{
-	if (dspp) {
-		reg_dmav1_deinit_dspp_ops(dspp->idx);
-		reg_dmav1_deinit_ltm_ops(dspp->idx);
-		sde_hw_blk_destroy(&dspp->base);
-	}
-	kfree(dspp);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
deleted file mode 100644
index 65274b3..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_DSPP_H
-#define _SDE_HW_DSPP_H
-
-#include "sde_hw_blk.h"
-
-struct sde_hw_dspp;
-
-/**
- * struct sde_hw_dspp_ops - interface to the dspp hardware driver functions
- * Caller must call the init function to get the dspp context for each dspp
- * Assumption is these functions will be called after clocks are enabled
- */
-struct sde_hw_dspp_ops {
-	/**
-	 * setup_histogram - setup dspp histogram
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_histogram)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * read_histogram - read dspp histogram
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*read_histogram)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * lock_histogram - lock dspp histogram buffer
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*lock_histogram)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_igc - update dspp igc
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_igc)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_pa_hsic - setup dspp pa hsic
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_pa_hsic)(struct sde_hw_dspp *dspp, void *cfg);
-
-	/**
-	 * setup_pcc - setup dspp pcc
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_pcc)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_sharpening - setup dspp sharpening
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_sharpening)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_pa_memcol_skin - setup dspp memcolor skin
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_pa_memcol_skin)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_pa_memcol_sky - setup dspp memcolor sky
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_pa_memcol_sky)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_pa_memcol_foliage - setup dspp memcolor foliage
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_pa_memcol_foliage)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_pa_memcol_prot - setup dspp memcolor protection
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_pa_memcol_prot)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_sixzone - setup dspp six zone
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_sixzone)(struct sde_hw_dspp *dspp, void *cfg);
-
-	/**
-	 * setup_danger_safe - setup danger safe LUTS
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_danger_safe)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_pa_dither - setup dspp PA dither
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_pa_dither)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_vlut - setup dspp PA VLUT
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_vlut)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_gc - update dspp gc
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_gc)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_gamut - update dspp gamut
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_gamut)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * validate_ad - check if ad property can be set
-	 * @ctx: Pointer to dspp context
-	 * @prop: Pointer to ad property being validated
-	 */
-	int (*validate_ad)(struct sde_hw_dspp *ctx, u32 *prop);
-
-	/**
-	 * setup_ad - update the ad property
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to ad configuration
-	 */
-	void (*setup_ad)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * ad_read_intr_resp - function to get interrupt response for ad
-	 * @event: Event for which response needs to be read
-	 * @resp_in: Pointer to u32 where resp ad4 input value is dumped.
-	 * @resp_out: Pointer to u32 where resp ad4 output value is dumped.
-	 */
-	void (*ad_read_intr_resp)(struct sde_hw_dspp *ctx, u32 event,
-			u32 *resp_in, u32 *resp_out);
-
-	/**
-	 * setup_ltm_init - setup LTM INIT
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_ltm_init)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_ltm_roi - setup LTM ROI
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_ltm_roi)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_ltm_vlut - setup LTM VLUT
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_ltm_vlut)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * setup_ltm_hist_ctrl - setup LTM histogram control
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 * @enable: feature enable/disable value
-	 * @iova: aligned hist buffer address
-	 */
-	void (*setup_ltm_hist_ctrl)(struct sde_hw_dspp *ctx, void *cfg,
-				    bool enable, u64 iova);
-
-	/**
-	 * setup_ltm_hist_buffer - setup LTM histogram buffer
-	 * @ctx: Pointer to dspp context
-	 * @iova: aligned hist buffer address
-	 */
-	void (*setup_ltm_hist_buffer)(struct sde_hw_dspp *ctx, u64 iova);
-
-	/**
-	 * setup_ltm_thresh - setup LTM histogram thresh
-	 * @ctx: Pointer to dspp context
-	 * @cfg: Pointer to configuration
-	 */
-	void (*setup_ltm_thresh)(struct sde_hw_dspp *ctx, void *cfg);
-
-	/**
-	 * ltm_read_intr_status - function to read ltm interrupt status
-	 * @ctx: Pointer to dspp context
-	 * @status: Pointer to u32 where ltm status value is dumped.
-	 */
-	void (*ltm_read_intr_status)(struct sde_hw_dspp *ctx, u32 *status);
-};
-
-/**
- * struct sde_hw_dspp - dspp description
- * @base: Hardware block base structure
- * @hw: Block hardware details
- * @hw_top: Block hardware top details
- * @idx: DSPP index
- * @cap: Pointer to layer_cfg
- * @ops: Pointer to operations possible for this DSPP
- */
-struct sde_hw_dspp {
-	struct sde_hw_blk base;
-	struct sde_hw_blk_reg_map hw;
-
-	/* dspp top */
-	struct sde_hw_blk_reg_map hw_top;
-
-	/* dspp */
-	enum sde_dspp idx;
-	const struct sde_dspp_cfg *cap;
-
-	/* Ops */
-	struct sde_hw_dspp_ops ops;
-};
-
-/**
- * sde_hw_dspp - convert base object sde_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct sde_hw_dspp *to_sde_hw_dspp(struct sde_hw_blk *hw)
-{
-	return container_of(hw, struct sde_hw_dspp, base);
-}
-
-/**
- * sde_hw_dspp_init - initializes the dspp hw driver object.
- * should be called once before accessing every dspp.
- * @idx:  DSPP index for which driver object is required
- * @addr: Mapped register io address of MDP
- * @Return: pointer to structure or ERR_PTR
- */
-struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
-			void __iomem *addr,
-			struct sde_mdss_cfg *m);
-
-/**
- * sde_hw_dspp_destroy(): Destroys DSPP driver context
- * @dspp:   Pointer to DSPP driver context
- */
-void sde_hw_dspp_destroy(struct sde_hw_dspp *dspp);
-
-#endif /*_SDE_HW_DSPP_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
deleted file mode 100644
index c3aff09..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ /dev/null
@@ -1,1497 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/bitops.h>
-#include <linux/slab.h>
-
-#include "sde_kms.h"
-#include "sde_hw_interrupts.h"
-#include "sde_hw_util.h"
-#include "sde_hw_mdss.h"
-
-/**
- * Register offsets in MDSS register file for the interrupt registers
- * w.r.t. to the MDSS base
- */
-#define HW_INTR_STATUS			0x0010
-#define MDP_SSPP_TOP0_OFF		0x1000
-#define MDP_INTF_0_OFF			0x6B000
-#define MDP_INTF_1_OFF			0x6B800
-#define MDP_INTF_2_OFF			0x6C000
-#define MDP_INTF_3_OFF			0x6C800
-#define MDP_INTF_4_OFF			0x6D000
-#define MDP_AD4_0_OFF			0x7D000
-#define MDP_AD4_1_OFF			0x7E000
-#define MDP_AD4_INTR_EN_OFF		0x41c
-#define MDP_AD4_INTR_CLEAR_OFF		0x424
-#define MDP_AD4_INTR_STATUS_OFF		0x420
-#define MDP_INTF_TEAR_INTF_1_IRQ_OFF	0x6E800
-#define MDP_INTF_TEAR_INTF_2_IRQ_OFF	0x6E900
-#define MDP_INTF_TEAR_INTR_EN_OFF	0x0
-#define MDP_INTF_TEAR_INTR_STATUS_OFF   0x4
-#define MDP_INTF_TEAR_INTR_CLEAR_OFF    0x8
-#define MDP_LTM_0_OFF			0x7F000
-#define MDP_LTM_1_OFF			0x7F100
-#define MDP_LTM_INTR_EN_OFF		0x50
-#define MDP_LTM_INTR_STATUS_OFF		0x54
-#define MDP_LTM_INTR_CLEAR_OFF		0x58
-
-/**
- * WB interrupt status bit definitions
- */
-#define SDE_INTR_WB_0_DONE BIT(0)
-#define SDE_INTR_WB_1_DONE BIT(1)
-#define SDE_INTR_WB_2_DONE BIT(4)
-
-/**
- * WDOG timer interrupt status bit definitions
- */
-#define SDE_INTR_WD_TIMER_0_DONE BIT(2)
-#define SDE_INTR_WD_TIMER_1_DONE BIT(3)
-#define SDE_INTR_WD_TIMER_2_DONE BIT(5)
-#define SDE_INTR_WD_TIMER_3_DONE BIT(6)
-#define SDE_INTR_WD_TIMER_4_DONE BIT(7)
-
-/**
- * Pingpong interrupt status bit definitions
- */
-#define SDE_INTR_PING_PONG_0_DONE BIT(8)
-#define SDE_INTR_PING_PONG_1_DONE BIT(9)
-#define SDE_INTR_PING_PONG_2_DONE BIT(10)
-#define SDE_INTR_PING_PONG_3_DONE BIT(11)
-#define SDE_INTR_PING_PONG_4_DONE BIT(30)
-#define SDE_INTR_PING_PONG_5_DONE BIT(31)
-#define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
-#define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
-#define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
-#define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
-#define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
-#define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
-#define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
-#define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
-#define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
-#define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
-#define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
-#define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
-
-/**
- * Interface interrupt status bit definitions
- */
-#define SDE_INTR_INTF_0_UNDERRUN BIT(24)
-#define SDE_INTR_INTF_1_UNDERRUN BIT(26)
-#define SDE_INTR_INTF_2_UNDERRUN BIT(28)
-#define SDE_INTR_INTF_3_UNDERRUN BIT(30)
-#define SDE_INTR_INTF_0_VSYNC BIT(25)
-#define SDE_INTR_INTF_1_VSYNC BIT(27)
-#define SDE_INTR_INTF_2_VSYNC BIT(29)
-#define SDE_INTR_INTF_3_VSYNC BIT(31)
-
-/**
- * Pingpong Secondary interrupt status bit definitions
- */
-#define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
-#define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
-#define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8)
-#define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
-#define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
-
-/**
- * Pingpong TEAR detection interrupt status bit definitions
- */
-#define SDE_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
-#define SDE_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
-#define SDE_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
-#define SDE_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
-
-/**
- * Pingpong TE detection interrupt status bit definitions
- */
-#define SDE_INTR_PING_PONG_0_TE_DETECTED BIT(24)
-#define SDE_INTR_PING_PONG_1_TE_DETECTED BIT(25)
-#define SDE_INTR_PING_PONG_2_TE_DETECTED BIT(26)
-#define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
-
-/**
- * Ctl start interrupt status bit definitions
- */
-#define SDE_INTR_CTL_0_START BIT(9)
-#define SDE_INTR_CTL_1_START BIT(10)
-#define SDE_INTR_CTL_2_START BIT(11)
-#define SDE_INTR_CTL_3_START BIT(12)
-#define SDE_INTR_CTL_4_START BIT(13)
-#define SDE_INTR_CTL_5_START BIT(23)
-
-/**
- * Concurrent WB overflow interrupt status bit definitions
- */
-#define SDE_INTR_CWB_2_OVERFLOW BIT(14)
-#define SDE_INTR_CWB_3_OVERFLOW BIT(15)
-#define SDE_INTR_CWB_4_OVERFLOW BIT(20)
-#define SDE_INTR_CWB_5_OVERFLOW BIT(21)
-
-/**
- * Histogram VIG done interrupt status bit definitions
- */
-#define SDE_INTR_HIST_VIG_0_DONE BIT(0)
-#define SDE_INTR_HIST_VIG_1_DONE BIT(4)
-#define SDE_INTR_HIST_VIG_2_DONE BIT(8)
-#define SDE_INTR_HIST_VIG_3_DONE BIT(10)
-
-/**
- * Histogram VIG reset Sequence done interrupt status bit definitions
- */
-#define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
-#define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
-#define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
-#define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
-
-/**
- * Histogram DSPP done interrupt status bit definitions
- */
-#define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
-#define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
-#define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
-#define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
-
-/**
- * Histogram DSPP reset Sequence done interrupt status bit definitions
- */
-#define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
-#define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
-#define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
-#define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
-
-/**
- * INTF interrupt status bit definitions
- */
-#define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
-#define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
-#define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
-#define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
-#define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
-#define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
-#define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
-#define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
-#define SDE_INTR_PROG_LINE BIT(8)
-
-/**
- * AD4 interrupt status bit definitions
- */
-#define SDE_INTR_BRIGHTPR_UPDATED BIT(4)
-#define SDE_INTR_DARKENH_UPDATED BIT(3)
-#define SDE_INTR_STREN_OUTROI_UPDATED BIT(2)
-#define SDE_INTR_STREN_INROI_UPDATED BIT(1)
-#define SDE_INTR_BACKLIGHT_UPDATED BIT(0)
-
-/**
- * INTF Tear IRQ register bit definitions
- */
-#define SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE BIT(0)
-#define SDE_INTR_INTF_TEAR_WR_PTR BIT(1)
-#define SDE_INTR_INTF_TEAR_RD_PTR BIT(2)
-#define SDE_INTR_INTF_TEAR_TE_DETECTED BIT(3)
-#define SDE_INTR_INTF_TEAR_TEAR_DETECTED BIT(4)
-
-/**
- * LTM interrupt status bit definitions
- */
-#define SDE_INTR_LTM_STATS_DONE BIT(0)
-#define SDE_INTR_LTM_STATS_WB_PB BIT(5)
-
-/**
- * struct sde_intr_reg - array of SDE register sets
- * @clr_off:	offset to CLEAR reg
- * @en_off:	offset to ENABLE reg
- * @status_off:	offset to STATUS reg
- * @sde_irq_idx;	global index in the 'sde_irq_map' table,
- *		to know which interrupt type, instance, mask, etc. to use
- * @map_idx_start   first offset in the sde_irq_map table
- * @map_idx_end    last offset in the sde_irq_map table
- */
-struct sde_intr_reg {
-	u32 clr_off;
-	u32 en_off;
-	u32 status_off;
-	int sde_irq_idx;
-	u32 map_idx_start;
-	u32 map_idx_end;
-};
-
-/**
- * struct sde_irq_type - maps each irq with i/f
- * @intr_type:		type of interrupt listed in sde_intr_type
- * @instance_idx:	instance index of the associated HW block in SDE
- * @irq_mask:		corresponding bit in the interrupt status reg
- * @reg_idx:		index in the 'sde_irq_tbl' table, to know which
- *		registers offsets to use. -1 = invalid offset
- */
-struct sde_irq_type {
-	u32 intr_type;
-	u32 instance_idx;
-	u32 irq_mask;
-	int reg_idx;
-};
-
-/**
- * IRQ mapping tables - use for lookup an irq_idx in this table that have
- *                     a matching interface type and instance index.
- * Each of these tables are copied to a dynamically allocated
- * table, that will be used to service each of the irqs
- */
-static struct sde_irq_type sde_irq_intr_map[] = {
-
-	{ SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, -1},
-	{ SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, 0},
-	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, -1},
-	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, -1},
-
-	{ SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, -1},
-	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, -1},
-	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, -1},
-	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, -1},
-
-	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
-		SDE_INTR_PING_PONG_0_DONE, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
-		SDE_INTR_PING_PONG_1_DONE, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
-		SDE_INTR_PING_PONG_2_DONE, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
-		SDE_INTR_PING_PONG_3_DONE, -1},
-
-	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
-		SDE_INTR_PING_PONG_0_RD_PTR, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
-		SDE_INTR_PING_PONG_1_RD_PTR, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
-		SDE_INTR_PING_PONG_2_RD_PTR, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
-		SDE_INTR_PING_PONG_3_RD_PTR, -1},
-
-	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
-		SDE_INTR_PING_PONG_0_WR_PTR, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
-		SDE_INTR_PING_PONG_1_WR_PTR, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
-		SDE_INTR_PING_PONG_2_WR_PTR, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
-		SDE_INTR_PING_PONG_3_WR_PTR, -1},
-
-	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
-		SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
-		SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
-		SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
-		SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, -1},
-
-	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, -1},
-	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, -1},
-	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, -1},
-	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, -1},
-
-	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, -1},
-	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, -1},
-	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, -1},
-	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, -1},
-};
-
-static struct sde_irq_type sde_irq_intr2_map[] = {
-
-	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
-		SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, -1},
-
-	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
-		SDE_INTR_PING_PONG_S0_WR_PTR, -1},
-
-	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
-		SDE_INTR_PING_PONG_S0_RD_PTR, -1},
-
-	{ SDE_IRQ_TYPE_CTL_START, CTL_0,
-		SDE_INTR_CTL_0_START, -1},
-	{ SDE_IRQ_TYPE_CTL_START, CTL_1,
-		SDE_INTR_CTL_1_START, -1},
-	{ SDE_IRQ_TYPE_CTL_START, CTL_2,
-		SDE_INTR_CTL_2_START, -1},
-	{ SDE_IRQ_TYPE_CTL_START, CTL_3,
-		SDE_INTR_CTL_3_START, -1},
-	{ SDE_IRQ_TYPE_CTL_START, CTL_4,
-		SDE_INTR_CTL_4_START, -1},
-	{ SDE_IRQ_TYPE_CTL_START, CTL_5,
-		SDE_INTR_CTL_5_START, -1},
-
-	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, -1},
-	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, -1},
-
-	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
-		SDE_INTR_PING_PONG_0_TEAR_DETECTED, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
-		SDE_INTR_PING_PONG_1_TEAR_DETECTED, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
-		SDE_INTR_PING_PONG_2_TEAR_DETECTED, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
-		SDE_INTR_PING_PONG_3_TEAR_DETECTED, -1},
-
-	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_4, SDE_INTR_CWB_4_OVERFLOW, -1},
-	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_5, SDE_INTR_CWB_5_OVERFLOW, -1},
-
-	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
-		SDE_INTR_PING_PONG_S0_TEAR_DETECTED, -1},
-
-	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
-		SDE_INTR_PING_PONG_0_TE_DETECTED, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
-		SDE_INTR_PING_PONG_1_TE_DETECTED, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
-		SDE_INTR_PING_PONG_2_TE_DETECTED, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
-		SDE_INTR_PING_PONG_3_TE_DETECTED, -1},
-
-	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
-		SDE_INTR_PING_PONG_S0_TE_DETECTED, -1},
-
-	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_4,
-		SDE_INTR_PING_PONG_4_DONE, -1},
-	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_5,
-		SDE_INTR_PING_PONG_5_DONE, -1},
-};
-
-static struct sde_irq_type sde_irq_hist_map[] = {
-
-	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, -1},
-	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
-		SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, -1},
-
-	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, -1},
-	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
-		SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, -1},
-
-	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, -1},
-	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
-		SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, -1},
-	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, -1},
-	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
-		SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, -1},
-
-	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, -1},
-	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
-		SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, -1},
-
-	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, -1},
-	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
-		SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, -1},
-
-	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, -1},
-	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
-		SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, -1},
-	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, -1},
-	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
-		SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, -1},
-};
-
-static struct sde_irq_type sde_irq_intf0_map[] = {
-
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
-		SDE_INTR_VIDEO_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
-		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
-		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
-		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
-		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_0, SDE_INTR_PROG_LINE, -1},
-};
-
-static struct sde_irq_type sde_irq_inf1_map[] = {
-
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
-		SDE_INTR_VIDEO_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
-		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
-		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
-		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
-		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_1, SDE_INTR_PROG_LINE, -1},
-};
-
-static struct sde_irq_type sde_irq_intf2_map[] = {
-
-
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
-		SDE_INTR_VIDEO_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
-		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
-		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
-		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
-		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_2, SDE_INTR_PROG_LINE, -1},
-};
-
-static struct sde_irq_type sde_irq_intf3_map[] = {
-
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
-		SDE_INTR_VIDEO_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
-		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
-		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
-		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
-		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_3, SDE_INTR_PROG_LINE, -1},
-};
-
-static struct sde_irq_type sde_irq_inf4_map[] = {
-
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
-		SDE_INTR_VIDEO_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
-		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
-		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
-		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
-		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_4, SDE_INTR_PROG_LINE, -1},
-};
-
-static struct sde_irq_type sde_irq_ad4_0_map[] = {
-
-	{ SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_0, SDE_INTR_BACKLIGHT_UPDATED, -1},
-};
-
-static struct sde_irq_type sde_irq_ad4_1_map[] = {
-
-	{ SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_1, SDE_INTR_BACKLIGHT_UPDATED, -1},
-};
-
-static struct sde_irq_type sde_irq_intf1_te_map[] = {
-
-	{ SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_1,
-		SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
-	{ SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_1,
-		SDE_INTR_INTF_TEAR_WR_PTR, -1},
-	{ SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_1,
-		SDE_INTR_INTF_TEAR_RD_PTR, -1},
-	{ SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_1,
-		SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
-};
-
-static struct sde_irq_type sde_irq_intf2_te_map[] = {
-
-	{ SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_2,
-		SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
-	{ SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_2,
-		SDE_INTR_INTF_TEAR_WR_PTR, -1},
-	{ SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_2,
-		SDE_INTR_INTF_TEAR_RD_PTR, -1},
-
-	{ SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_2,
-		SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
-};
-
-static struct sde_irq_type sde_irq_ltm_0_map[] = {
-
-	{ SDE_IRQ_TYPE_LTM_STATS_DONE, DSPP_0, SDE_INTR_LTM_STATS_DONE, -1},
-	{ SDE_IRQ_TYPE_LTM_STATS_WB_PB, DSPP_0, SDE_INTR_LTM_STATS_WB_PB, -1},
-};
-
-static struct sde_irq_type sde_irq_ltm_1_map[] = {
-
-	{ SDE_IRQ_TYPE_LTM_STATS_DONE, DSPP_1, SDE_INTR_LTM_STATS_DONE, -1},
-	{ SDE_IRQ_TYPE_LTM_STATS_WB_PB, DSPP_1, SDE_INTR_LTM_STATS_WB_PB, -1},
-};
-
-static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
-	enum sde_intr_type intr_type, u32 instance_idx)
-{
-	int i;
-
-	for (i = 0; i < intr->sde_irq_map_size; i++) {
-		if (intr_type == intr->sde_irq_map[i].intr_type &&
-			instance_idx == intr->sde_irq_map[i].instance_idx)
-			return i;
-	}
-
-	pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
-			intr_type, instance_idx);
-	return -EINVAL;
-}
-
-static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off,
-		uint32_t mask)
-{
-	if (!intr)
-		return;
-
-	SDE_REG_WRITE(&intr->hw, reg_off, mask);
-
-	/* ensure register writes go through */
-	wmb();
-}
-
-static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
-		void (*cbfunc)(void *, int),
-		void *arg)
-{
-	int reg_idx;
-	int irq_idx;
-	int start_idx;
-	int end_idx;
-	u32 irq_status;
-	unsigned long irq_flags;
-	int sde_irq_idx;
-
-	if (!intr)
-		return;
-
-	/*
-	 * The dispatcher will save the IRQ status before calling here.
-	 * Now need to go through each IRQ status and find matching
-	 * irq lookup index.
-	 */
-	spin_lock_irqsave(&intr->irq_lock, irq_flags);
-	for (reg_idx = 0; reg_idx < intr->sde_irq_size; reg_idx++) {
-		irq_status = intr->save_irq_status[reg_idx];
-
-		/* get the global offset in 'sde_irq_map' */
-		sde_irq_idx = intr->sde_irq_tbl[reg_idx].sde_irq_idx;
-		if (sde_irq_idx < 0)
-			continue;
-
-		/*
-		 * Each Interrupt register has dynamic range of indexes,
-		 * initialized during hw_intr_init when sde_irq_tbl is created.
-		 */
-		start_idx = intr->sde_irq_tbl[reg_idx].map_idx_start;
-		end_idx = intr->sde_irq_tbl[reg_idx].map_idx_end;
-
-		if (start_idx >= intr->sde_irq_map_size ||
-				end_idx > intr->sde_irq_map_size)
-			continue;
-
-		/*
-		 * Search through matching intr status from irq map.
-		 * start_idx and end_idx defined the search range in
-		 * the sde_irq_map.
-		 */
-		for (irq_idx = start_idx;
-				(irq_idx < end_idx) && irq_status;
-				irq_idx++)
-			if ((irq_status &
-				intr->sde_irq_map[irq_idx].irq_mask) &&
-				(intr->sde_irq_map[irq_idx].reg_idx ==
-				 reg_idx)) {
-				/*
-				 * Once a match on irq mask, perform a callback
-				 * to the given cbfunc. cbfunc will take care
-				 * the interrupt status clearing. If cbfunc is
-				 * not provided, then the interrupt clearing
-				 * is here.
-				 */
-				if (cbfunc)
-					cbfunc(arg, irq_idx);
-				else
-					intr->ops.clear_intr_status_nolock(
-							intr, irq_idx);
-
-				/*
-				 * When callback finish, clear the irq_status
-				 * with the matching mask. Once irq_status
-				 * is all cleared, the search can be stopped.
-				 */
-				irq_status &=
-					~intr->sde_irq_map[irq_idx].irq_mask;
-			}
-	}
-	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
-}
-
-static int sde_hw_intr_enable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
-{
-	int reg_idx;
-	const struct sde_intr_reg *reg;
-	const struct sde_irq_type *irq;
-	const char *dbgstr = NULL;
-	uint32_t cache_irq_mask;
-
-	if (!intr)
-		return -EINVAL;
-
-	if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
-		pr_err("invalid IRQ index: [%d]\n", irq_idx);
-		return -EINVAL;
-	}
-
-	irq = &intr->sde_irq_map[irq_idx];
-	reg_idx = irq->reg_idx;
-	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
-		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
-		return -EINVAL;
-	}
-
-	reg = &intr->sde_irq_tbl[reg_idx];
-
-	cache_irq_mask = intr->cache_irq_mask[reg_idx];
-	if (cache_irq_mask & irq->irq_mask) {
-		dbgstr = "SDE IRQ already set:";
-	} else {
-		dbgstr = "SDE IRQ enabled:";
-
-		cache_irq_mask |= irq->irq_mask;
-		/* Cleaning any pending interrupt */
-		SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
-		/* Enabling interrupts with the new mask */
-		SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
-
-		/* ensure register write goes through */
-		wmb();
-
-		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
-	}
-
-	pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
-			irq->irq_mask, cache_irq_mask);
-
-	return 0;
-}
-
-static int sde_hw_intr_disable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
-{
-	int reg_idx;
-	const struct sde_intr_reg *reg;
-	const struct sde_irq_type *irq;
-	const char *dbgstr = NULL;
-	uint32_t cache_irq_mask;
-
-	if (!intr)
-		return -EINVAL;
-
-	if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
-		pr_err("invalid IRQ index: [%d]\n", irq_idx);
-		return -EINVAL;
-	}
-
-	irq = &intr->sde_irq_map[irq_idx];
-	reg_idx = irq->reg_idx;
-	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
-		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
-		return -EINVAL;
-	}
-
-	reg = &intr->sde_irq_tbl[reg_idx];
-
-	cache_irq_mask = intr->cache_irq_mask[reg_idx];
-	if ((cache_irq_mask & irq->irq_mask) == 0) {
-		dbgstr = "SDE IRQ is already cleared:";
-	} else {
-		dbgstr = "SDE IRQ mask disable:";
-
-		cache_irq_mask &= ~irq->irq_mask;
-		/* Disable interrupts based on the new mask */
-		SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
-		/* Cleaning any pending interrupt */
-		SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
-
-		/* ensure register write goes through */
-		wmb();
-
-		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
-	}
-
-	pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
-			irq->irq_mask, cache_irq_mask);
-
-	return 0;
-}
-
-static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
-{
-	int i;
-
-	if (!intr)
-		return -EINVAL;
-
-	for (i = 0; i < intr->sde_irq_size; i++)
-		SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
-				0xffffffff);
-
-	/* ensure register writes go through */
-	wmb();
-
-	return 0;
-}
-
-static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
-{
-	int i;
-
-	if (!intr)
-		return -EINVAL;
-
-	for (i = 0; i < intr->sde_irq_size; i++)
-		SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].en_off,
-				0x00000000);
-
-	/* ensure register writes go through */
-	wmb();
-
-	return 0;
-}
-
-static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr,
-		uint32_t *mask)
-{
-	if (!intr || !mask)
-		return -EINVAL;
-
-	*mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
-		| IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
-
-	return 0;
-}
-
-static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
-		uint32_t *sources)
-{
-	if (!intr || !sources)
-		return -EINVAL;
-
-	*sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
-
-	return 0;
-}
-
-static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr)
-{
-	int i;
-	u32 enable_mask;
-	unsigned long irq_flags;
-
-	if (!intr)
-		return;
-
-	spin_lock_irqsave(&intr->irq_lock, irq_flags);
-	for (i = 0; i < intr->sde_irq_size; i++) {
-		/* Read interrupt status */
-		intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
-				intr->sde_irq_tbl[i].status_off);
-
-		/* Read enable mask */
-		enable_mask = SDE_REG_READ(&intr->hw,
-				intr->sde_irq_tbl[i].en_off);
-
-		/* and clear the interrupt */
-		if (intr->save_irq_status[i])
-			SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
-					intr->save_irq_status[i]);
-
-		/* Finally update IRQ status based on enable mask */
-		intr->save_irq_status[i] &= enable_mask;
-	}
-
-	/* ensure register writes go through */
-	wmb();
-
-	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
-}
-
-static void sde_hw_intr_clear_intr_status_force_mask(struct sde_hw_intr *intr,
-						 int irq_idx, u32 irq_mask)
-{
-	int reg_idx;
-
-	if (!intr)
-		return;
-
-	if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
-		pr_err("invalid IRQ index: [%d]\n", irq_idx);
-		return;
-	}
-
-	reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
-	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
-		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
-		return;
-	}
-
-	SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
-			irq_mask);
-
-	/* ensure register writes go through */
-	wmb();
-}
-
-static void sde_hw_intr_clear_intr_status_nolock(struct sde_hw_intr *intr,
-		int irq_idx)
-{
-	int reg_idx;
-
-	if (!intr)
-		return;
-
-	if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
-		pr_err("invalid IRQ index: [%d]\n", irq_idx);
-		return;
-	}
-
-	reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
-	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
-		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
-		return;
-	}
-
-	SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
-			intr->sde_irq_map[irq_idx].irq_mask);
-
-	/* ensure register writes go through */
-	wmb();
-}
-
-static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
-		int irq_idx)
-{
-	unsigned long irq_flags;
-
-	if (!intr)
-		return;
-
-	spin_lock_irqsave(&intr->irq_lock, irq_flags);
-	sde_hw_intr_clear_intr_status_nolock(intr, irq_idx);
-	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
-}
-
-static u32 sde_hw_intr_get_intr_status_nolock(struct sde_hw_intr *intr,
-		int irq_idx, bool clear)
-{
-	int reg_idx;
-	u32 intr_status;
-
-	if (!intr)
-		return 0;
-
-	if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
-		pr_err("invalid IRQ index: [%d]\n", irq_idx);
-		return 0;
-	}
-
-	reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
-	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
-		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
-		return 0;
-	}
-
-	intr_status = SDE_REG_READ(&intr->hw,
-			intr->sde_irq_tbl[reg_idx].status_off) &
-					intr->sde_irq_map[irq_idx].irq_mask;
-	if (intr_status && clear)
-		SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
-				intr_status);
-
-	/* ensure register writes go through */
-	wmb();
-
-	return intr_status;
-}
-
-static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
-		int irq_idx, bool clear)
-{
-	int reg_idx;
-	unsigned long irq_flags;
-	u32 intr_status;
-
-	if (!intr)
-		return 0;
-
-	if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
-		pr_err("invalid IRQ index: [%d]\n", irq_idx);
-		return 0;
-	}
-
-	reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
-	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
-		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
-		return 0;
-	}
-
-	spin_lock_irqsave(&intr->irq_lock, irq_flags);
-
-	intr_status = SDE_REG_READ(&intr->hw,
-			intr->sde_irq_tbl[reg_idx].status_off) &
-					intr->sde_irq_map[irq_idx].irq_mask;
-	if (intr_status && clear)
-		SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
-				intr_status);
-
-	/* ensure register writes go through */
-	wmb();
-
-	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
-
-	return intr_status;
-}
-
-static u32 sde_hw_intr_get_intr_status_nomask(struct sde_hw_intr *intr,
-		int irq_idx, bool clear)
-{
-	int reg_idx;
-	unsigned long irq_flags;
-	u32 intr_status = 0;
-
-	if (!intr)
-		return 0;
-
-	if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
-		pr_err("invalid IRQ index: [%d]\n", irq_idx);
-		return 0;
-	}
-
-	reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
-	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
-		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
-		return 0;
-	}
-
-	spin_lock_irqsave(&intr->irq_lock, irq_flags);
-	intr_status = SDE_REG_READ(&intr->hw,
-			intr->sde_irq_tbl[reg_idx].status_off);
-	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
-
-	return intr_status;
-}
-
-static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
-{
-	ops->set_mask = sde_hw_intr_set_mask;
-	ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
-	ops->enable_irq_nolock = sde_hw_intr_enable_irq_nolock;
-	ops->disable_irq_nolock = sde_hw_intr_disable_irq_nolock;
-	ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
-	ops->clear_all_irqs = sde_hw_intr_clear_irqs;
-	ops->disable_all_irqs = sde_hw_intr_disable_irqs;
-	ops->get_valid_interrupts = sde_hw_intr_get_valid_interrupts;
-	ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
-	ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
-	ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
-	ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock;
-	ops->clear_intr_status_force_mask =
-				sde_hw_intr_clear_intr_status_force_mask;
-	ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
-	ops->get_intr_status_nolock = sde_hw_intr_get_intr_status_nolock;
-	ops->get_intr_status_nomask = sde_hw_intr_get_intr_status_nomask;
-}
-
-static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
-		void __iomem *addr, struct sde_hw_blk_reg_map *hw)
-{
-	if (!m || !addr || !hw || m->mdp_count == 0)
-		return NULL;
-
-	hw->base_off = addr;
-	hw->blk_off = m->mdss[0].base;
-	hw->hwversion = m->hwversion;
-	return &m->mdss[0];
-}
-
-static inline int _sde_hw_intr_init_sde_irq_tbl(u32 irq_tbl_size,
-	struct sde_intr_reg *sde_irq_tbl)
-{
-	int idx;
-	struct sde_intr_reg *sde_irq;
-
-	for (idx = 0; idx < irq_tbl_size; idx++) {
-		sde_irq = &sde_irq_tbl[idx];
-
-		switch (sde_irq->sde_irq_idx) {
-		case MDSS_INTR_SSPP_TOP0_INTR:
-			sde_irq->clr_off =
-				MDP_SSPP_TOP0_OFF+INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_SSPP_TOP0_OFF+INTR_EN;
-			sde_irq->status_off =
-				MDP_SSPP_TOP0_OFF+INTR_STATUS;
-			break;
-		case MDSS_INTR_SSPP_TOP0_INTR2:
-			sde_irq->clr_off =
-				MDP_SSPP_TOP0_OFF+INTR2_CLEAR;
-			sde_irq->en_off =
-				MDP_SSPP_TOP0_OFF+INTR2_EN;
-			sde_irq->status_off =
-				MDP_SSPP_TOP0_OFF+INTR2_STATUS;
-			break;
-		case MDSS_INTR_SSPP_TOP0_HIST_INTR:
-			sde_irq->clr_off =
-				MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_SSPP_TOP0_OFF+HIST_INTR_EN;
-			sde_irq->status_off =
-				MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS;
-			break;
-		case MDSS_INTR_INTF_0_INTR:
-			sde_irq->clr_off =
-				MDP_INTF_0_OFF+INTF_INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_INTF_0_OFF+INTF_INTR_EN;
-			sde_irq->status_off =
-				MDP_INTF_0_OFF+INTF_INTR_STATUS;
-			break;
-		case MDSS_INTR_INTF_1_INTR:
-			sde_irq->clr_off =
-				MDP_INTF_1_OFF+INTF_INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_INTF_1_OFF+INTF_INTR_EN;
-			sde_irq->status_off =
-				MDP_INTF_1_OFF+INTF_INTR_STATUS;
-			break;
-		case MDSS_INTR_INTF_2_INTR:
-			sde_irq->clr_off =
-				MDP_INTF_2_OFF+INTF_INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_INTF_2_OFF+INTF_INTR_EN;
-			sde_irq->status_off =
-				MDP_INTF_2_OFF+INTF_INTR_STATUS;
-			break;
-		case MDSS_INTR_INTF_3_INTR:
-			sde_irq->clr_off =
-				MDP_INTF_3_OFF+INTF_INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_INTF_3_OFF+INTF_INTR_EN;
-			sde_irq->status_off =
-				MDP_INTF_3_OFF+INTF_INTR_STATUS;
-			break;
-		case MDSS_INTR_INTF_4_INTR:
-			sde_irq->clr_off =
-				MDP_INTF_4_OFF+INTF_INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_INTF_4_OFF+INTF_INTR_EN;
-			sde_irq->status_off =
-				MDP_INTF_4_OFF+INTF_INTR_STATUS;
-			break;
-		case MDSS_INTR_AD4_0_INTR:
-			sde_irq->clr_off =
-				MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF;
-			sde_irq->en_off =
-				MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF;
-			sde_irq->status_off =
-				MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF;
-			break;
-		case MDSS_INTR_AD4_1_INTR:
-			sde_irq->clr_off =
-				MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF;
-			sde_irq->en_off =
-				MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF;
-			sde_irq->status_off =
-				MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF;
-			break;
-		case MDSS_INTF_TEAR_1_INTR:
-			sde_irq->clr_off = MDP_INTF_TEAR_INTF_1_IRQ_OFF +
-				MDP_INTF_TEAR_INTR_CLEAR_OFF;
-			sde_irq->en_off =
-				MDP_INTF_TEAR_INTF_1_IRQ_OFF +
-				MDP_INTF_TEAR_INTR_EN_OFF;
-			sde_irq->status_off = MDP_INTF_TEAR_INTF_1_IRQ_OFF +
-				MDP_INTF_TEAR_INTR_STATUS_OFF;
-			break;
-		case MDSS_INTF_TEAR_2_INTR:
-			sde_irq->clr_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
-				MDP_INTF_TEAR_INTR_CLEAR_OFF;
-			sde_irq->en_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
-				MDP_INTF_TEAR_INTR_EN_OFF;
-			sde_irq->status_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
-				MDP_INTF_TEAR_INTR_STATUS_OFF;
-			break;
-		case MDSS_INTR_LTM_0_INTR:
-			sde_irq->clr_off =
-				MDP_LTM_0_OFF + MDP_LTM_INTR_CLEAR_OFF;
-			sde_irq->en_off =
-				MDP_LTM_0_OFF + MDP_LTM_INTR_EN_OFF;
-			sde_irq->status_off =
-				MDP_LTM_0_OFF + MDP_LTM_INTR_STATUS_OFF;
-			break;
-		case MDSS_INTR_LTM_1_INTR:
-			sde_irq->clr_off =
-				MDP_LTM_1_OFF + MDP_LTM_INTR_CLEAR_OFF;
-			sde_irq->en_off =
-				MDP_LTM_1_OFF + MDP_LTM_INTR_EN_OFF;
-			sde_irq->status_off =
-				MDP_LTM_1_OFF + MDP_LTM_INTR_STATUS_OFF;
-			break;
-		default:
-			pr_err("wrong irq idx %d\n",
-				sde_irq->sde_irq_idx);
-			return -EINVAL;
-		}
-
-		pr_debug("idx:%d irq_idx:%d clr:0x%x en:0x%x status:0x%x\n",
-			idx, sde_irq->sde_irq_idx, sde_irq->clr_off,
-			sde_irq->en_off, sde_irq->status_off);
-	}
-
-	return 0;
-}
-
-void sde_hw_intr_destroy(struct sde_hw_intr *intr)
-{
-	if (intr) {
-		kfree(intr->sde_irq_tbl);
-		kfree(intr->sde_irq_map);
-		kfree(intr->cache_irq_mask);
-		kfree(intr->save_irq_status);
-		kfree(intr);
-	}
-}
-
-static inline u32 _get_irq_map_size(int idx)
-{
-	u32 ret = 0;
-
-	switch (idx) {
-	case MDSS_INTR_SSPP_TOP0_INTR:
-		ret = ARRAY_SIZE(sde_irq_intr_map);
-		break;
-	case MDSS_INTR_SSPP_TOP0_INTR2:
-		ret = ARRAY_SIZE(sde_irq_intr2_map);
-		break;
-	case MDSS_INTR_SSPP_TOP0_HIST_INTR:
-		ret = ARRAY_SIZE(sde_irq_hist_map);
-		break;
-	case MDSS_INTR_INTF_0_INTR:
-		ret = ARRAY_SIZE(sde_irq_intf0_map);
-		break;
-	case MDSS_INTR_INTF_1_INTR:
-		ret = ARRAY_SIZE(sde_irq_inf1_map);
-		break;
-	case MDSS_INTR_INTF_2_INTR:
-		ret = ARRAY_SIZE(sde_irq_intf2_map);
-		break;
-	case MDSS_INTR_INTF_3_INTR:
-		ret = ARRAY_SIZE(sde_irq_intf3_map);
-		break;
-	case MDSS_INTR_INTF_4_INTR:
-		ret = ARRAY_SIZE(sde_irq_inf4_map);
-		break;
-	case MDSS_INTR_AD4_0_INTR:
-		ret = ARRAY_SIZE(sde_irq_ad4_0_map);
-		break;
-	case MDSS_INTR_AD4_1_INTR:
-		ret = ARRAY_SIZE(sde_irq_ad4_1_map);
-		break;
-	case MDSS_INTF_TEAR_1_INTR:
-		ret = ARRAY_SIZE(sde_irq_intf1_te_map);
-		break;
-	case MDSS_INTF_TEAR_2_INTR:
-		ret = ARRAY_SIZE(sde_irq_intf2_te_map);
-		break;
-	case MDSS_INTR_LTM_0_INTR:
-		ret = ARRAY_SIZE(sde_irq_ltm_0_map);
-		break;
-	case MDSS_INTR_LTM_1_INTR:
-		ret = ARRAY_SIZE(sde_irq_ltm_1_map);
-		break;
-	default:
-		pr_err("invalid idx:%d\n", idx);
-	}
-
-	return ret;
-}
-
-static inline struct sde_irq_type *_get_irq_map_addr(int idx)
-{
-	struct sde_irq_type *ret = NULL;
-
-	switch (idx) {
-	case MDSS_INTR_SSPP_TOP0_INTR:
-		ret = sde_irq_intr_map;
-		break;
-	case MDSS_INTR_SSPP_TOP0_INTR2:
-		ret = sde_irq_intr2_map;
-		break;
-	case MDSS_INTR_SSPP_TOP0_HIST_INTR:
-		ret = sde_irq_hist_map;
-		break;
-	case MDSS_INTR_INTF_0_INTR:
-		ret = sde_irq_intf0_map;
-		break;
-	case MDSS_INTR_INTF_1_INTR:
-		ret = sde_irq_inf1_map;
-		break;
-	case MDSS_INTR_INTF_2_INTR:
-		ret = sde_irq_intf2_map;
-		break;
-	case MDSS_INTR_INTF_3_INTR:
-		ret = sde_irq_intf3_map;
-		break;
-	case MDSS_INTR_INTF_4_INTR:
-		ret = sde_irq_inf4_map;
-		break;
-	case MDSS_INTR_AD4_0_INTR:
-		ret = sde_irq_ad4_0_map;
-		break;
-	case MDSS_INTR_AD4_1_INTR:
-		ret = sde_irq_ad4_1_map;
-		break;
-	case MDSS_INTF_TEAR_1_INTR:
-		ret = sde_irq_intf1_te_map;
-		break;
-	case MDSS_INTF_TEAR_2_INTR:
-		ret = sde_irq_intf2_te_map;
-		break;
-	case MDSS_INTR_LTM_0_INTR:
-		ret = sde_irq_ltm_0_map;
-		break;
-	case MDSS_INTR_LTM_1_INTR:
-		ret = sde_irq_ltm_1_map;
-		break;
-	default:
-		pr_err("invalid idx:%d\n", idx);
-	}
-
-	return ret;
-}
-
-static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
-	u32 irq_idx, u32 low_idx, u32 high_idx)
-{
-	int i, j = 0;
-	struct sde_irq_type *src = _get_irq_map_addr(irq_idx);
-	u32 src_size = _get_irq_map_size(irq_idx);
-
-	if (!src)
-		return -EINVAL;
-
-	if (low_idx >= size || high_idx > size ||
-		(high_idx - low_idx > src_size)) {
-		pr_err("invalid size l:%d h:%d dst:%d src:%d\n",
-			low_idx, high_idx, size, src_size);
-		return -EINVAL;
-	}
-
-	for (i = low_idx; i < high_idx; i++)
-		sde_irq_map[i] = src[j++];
-
-	return 0;
-}
-
-static int _sde_hw_intr_init_irq_tables(struct sde_hw_intr *intr,
-	struct sde_mdss_cfg *m)
-{
-	int i, idx, sde_irq_tbl_idx = 0, ret = 0;
-	u32 low_idx, high_idx;
-	u32 sde_irq_map_idx = 0;
-
-	/* Initialize the offset of the irq's in the sde_irq_map table */
-	for (idx = 0; idx < MDSS_INTR_MAX; idx++) {
-		if (test_bit(idx, m->mdss_irqs)) {
-			low_idx = sde_irq_map_idx;
-			high_idx = low_idx + _get_irq_map_size(idx);
-
-			pr_debug("init[%d]=%d low:%d high:%d\n",
-				sde_irq_tbl_idx, idx, low_idx, high_idx);
-
-			if (sde_irq_tbl_idx >= intr->sde_irq_size ||
-				sde_irq_tbl_idx < 0) {
-				ret = -EINVAL;
-				goto exit;
-			}
-
-			/* init sde_irq_map with the global irq mapping table */
-			if (_sde_copy_regs(intr->sde_irq_map,
-					intr->sde_irq_map_size,
-					idx, low_idx, high_idx)) {
-				ret = -EINVAL;
-				goto exit;
-			}
-
-			/* init irq map with its reg idx within the irq tbl */
-			for (i = low_idx; i < high_idx; i++) {
-				intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
-				pr_debug("sde_irq_map[%d].reg_idx=%d\n",
-						i, sde_irq_tbl_idx);
-			}
-
-			/* track the idx of the mapping table for this irq in
-			 * sde_irq_map, this to only access the indexes of this
-			 * irq during the irq dispatch
-			 */
-			intr->sde_irq_tbl[sde_irq_tbl_idx].sde_irq_idx = idx;
-			intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start =
-				low_idx;
-			intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end =
-				high_idx;
-
-			/* increment idx for both tables accordingly */
-			sde_irq_tbl_idx++;
-			sde_irq_map_idx = high_idx;
-		}
-	}
-
-	/* do this after 'sde_irq_idx is initialized in sde_irq_tbl */
-	ret = _sde_hw_intr_init_sde_irq_tbl(intr->sde_irq_size,
-			intr->sde_irq_tbl);
-
-exit:
-	return ret;
-}
-
-struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
-		struct sde_mdss_cfg *m)
-{
-	struct sde_hw_intr *intr = NULL;
-	struct sde_mdss_base_cfg *cfg;
-	u32 irq_regs_count = 0;
-	u32 irq_map_count = 0;
-	u32 size;
-	int idx;
-	int ret = 0;
-
-	if (!addr || !m) {
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	intr = kzalloc(sizeof(*intr), GFP_KERNEL);
-	if (!intr) {
-		ret = -ENOMEM;
-		goto exit;
-	}
-
-	cfg = __intr_offset(m, addr, &intr->hw);
-	if (!cfg) {
-		ret = -EINVAL;
-		goto exit;
-	}
-	__setup_intr_ops(&intr->ops);
-
-	if (MDSS_INTR_MAX >= UINT_MAX) {
-		pr_err("max intr exceeded:%d\n", MDSS_INTR_MAX);
-		ret  = -EINVAL;
-		goto exit;
-	}
-
-	/* check how many irq's this target supports */
-	for (idx = 0; idx < MDSS_INTR_MAX; idx++) {
-		if (test_bit(idx, m->mdss_irqs)) {
-			irq_regs_count++;
-
-			size = _get_irq_map_size(idx);
-			if (!size || irq_map_count >= UINT_MAX - size) {
-				pr_err("wrong map cnt idx:%d sz:%d cnt:%d\n",
-					idx, size, irq_map_count);
-				ret = -EINVAL;
-				goto exit;
-			}
-
-			irq_map_count += size;
-		}
-	}
-
-	if (irq_regs_count == 0 || irq_regs_count > MDSS_INTR_MAX ||
-		irq_map_count == 0) {
-		pr_err("wrong mapping of supported irqs 0x%lx\n",
-			m->mdss_irqs[0]);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	/* Allocate table for the irq registers */
-	intr->sde_irq_size = irq_regs_count;
-	intr->sde_irq_tbl = kcalloc(irq_regs_count, sizeof(*intr->sde_irq_tbl),
-		GFP_KERNEL);
-	if (intr->sde_irq_tbl == NULL) {
-		ret = -ENOMEM;
-		goto exit;
-	}
-
-	/* Allocate table with the valid interrupts bits */
-	intr->sde_irq_map_size = irq_map_count;
-	intr->sde_irq_map = kcalloc(irq_map_count, sizeof(*intr->sde_irq_map),
-		GFP_KERNEL);
-	if (intr->sde_irq_map == NULL) {
-		ret = -ENOMEM;
-		goto exit;
-	}
-
-	/* Initialize IRQs tables */
-	ret = _sde_hw_intr_init_irq_tables(intr, m);
-	if (ret)
-		goto exit;
-
-	intr->cache_irq_mask = kcalloc(intr->sde_irq_size,
-			sizeof(*intr->cache_irq_mask), GFP_KERNEL);
-	if (intr->cache_irq_mask == NULL) {
-		ret = -ENOMEM;
-		goto exit;
-	}
-
-	intr->save_irq_status = kcalloc(intr->sde_irq_size,
-			sizeof(*intr->save_irq_status), GFP_KERNEL);
-	if (intr->save_irq_status == NULL) {
-		ret = -ENOMEM;
-		goto exit;
-	}
-
-	spin_lock_init(&intr->irq_lock);
-
-exit:
-	if (ret) {
-		sde_hw_intr_destroy(intr);
-		return ERR_PTR(ret);
-	}
-
-	return intr;
-}
-
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
deleted file mode 100644
index 179166f..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
+++ /dev/null
@@ -1,317 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_INTERRUPTS_H
-#define _SDE_HW_INTERRUPTS_H
-
-#include <linux/types.h>
-
-#include "sde_hwio.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_util.h"
-#include "sde_hw_mdss.h"
-
-#define IRQ_SOURCE_MDP		BIT(0)
-#define IRQ_SOURCE_DSI0		BIT(4)
-#define IRQ_SOURCE_DSI1		BIT(5)
-#define IRQ_SOURCE_HDMI		BIT(8)
-#define IRQ_SOURCE_EDP		BIT(12)
-#define IRQ_SOURCE_MHL		BIT(16)
-
-/**
- * sde_intr_type - HW Interrupt Type
- * @SDE_IRQ_TYPE_WB_ROT_COMP:		WB rotator done
- * @SDE_IRQ_TYPE_WB_WFD_COMP:		WB WFD done
- * @SDE_IRQ_TYPE_PING_PONG_COMP:	PingPong done
- * @SDE_IRQ_TYPE_PING_PONG_RD_PTR:	PingPong read pointer
- * @SDE_IRQ_TYPE_PING_PONG_WR_PTR:	PingPong write pointer
- * @SDE_IRQ_TYPE_PING_PONG_AUTO_REF:	PingPong auto refresh
- * @SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK:	PingPong Tear check
- * @SDE_IRQ_TYPE_PING_PONG_TE_CHECK:	PingPong TE detection
- * @SDE_IRQ_TYPE_INTF_UNDER_RUN:	INTF underrun
- * @SDE_IRQ_TYPE_INTF_VSYNC:		INTF VSYNC
- * @SDE_IRQ_TYPE_CWB_OVERFLOW:		Concurrent WB overflow
- * @SDE_IRQ_TYPE_HIST_VIG_DONE:		VIG Histogram done
- * @SDE_IRQ_TYPE_HIST_VIG_RSTSEQ:	VIG Histogram reset
- * @SDE_IRQ_TYPE_HIST_DSPP_DONE:	DSPP Histogram done
- * @SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ:	DSPP Histogram reset
- * @SDE_IRQ_TYPE_WD_TIMER:		Watchdog timer
- * @SDE_IRQ_TYPE_SFI_VIDEO_IN:		Video static frame INTR into static
- * @SDE_IRQ_TYPE_SFI_VIDEO_OUT:		Video static frame INTR out-of static
- * @SDE_IRQ_TYPE_SFI_CMD_0_IN:		DSI CMD0 static frame INTR into static
- * @SDE_IRQ_TYPE_SFI_CMD_0_OUT:		DSI CMD0 static frame INTR out-of static
- * @SDE_IRQ_TYPE_SFI_CMD_1_IN:		DSI CMD1 static frame INTR into static
- * @SDE_IRQ_TYPE_SFI_CMD_1_OUT:		DSI CMD1 static frame INTR out-of static
- * @SDE_IRQ_TYPE_SFI_CMD_2_IN:		DSI CMD2 static frame INTR into static
- * @SDE_IRQ_TYPE_SFI_CMD_2_OUT:		DSI CMD2 static frame INTR out-of static
- * @SDE_IRQ_TYPE_PROG_LINE:		Programmable Line interrupt
- * @SDE_IRQ_TYPE_AD4_BL_DONE:		AD4 backlight
- * @SDE_IRQ_TYPE_CTL_START:		Control start
- * @SDE_IRQ_TYPE_INTF_TEAR_RD_PTR:	INTF Tear read pointer
- * @SDE_IRQ_TYPE_INTF_TEAR_WR_PTR:	INTF Tear write pointer
- * @SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF:	INTF Tear auto refresh
- * @SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK:	INTF Tear Tear check
- * @SDE_IRQ_TYPE_INTF_TEAR_TE_CHECK:	INTF Tear TE detection
- * @SDE_IRQ_TYPE_LTM_STATS_DONE:	LTM stats done interrupt
- * @SDE_IRQ_TYPE_LTM_STATS_WB_PB:	LTM stats WB push back interrupt
- * @SDE_IRQ_TYPE_RESERVED:		Reserved for expansion
- */
-enum sde_intr_type {
-	SDE_IRQ_TYPE_WB_ROT_COMP,
-	SDE_IRQ_TYPE_WB_WFD_COMP,
-	SDE_IRQ_TYPE_PING_PONG_COMP,
-	SDE_IRQ_TYPE_PING_PONG_RD_PTR,
-	SDE_IRQ_TYPE_PING_PONG_WR_PTR,
-	SDE_IRQ_TYPE_PING_PONG_AUTO_REF,
-	SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK,
-	SDE_IRQ_TYPE_PING_PONG_TE_CHECK,
-	SDE_IRQ_TYPE_INTF_UNDER_RUN,
-	SDE_IRQ_TYPE_INTF_VSYNC,
-	SDE_IRQ_TYPE_CWB_OVERFLOW,
-	SDE_IRQ_TYPE_HIST_VIG_DONE,
-	SDE_IRQ_TYPE_HIST_VIG_RSTSEQ,
-	SDE_IRQ_TYPE_HIST_DSPP_DONE,
-	SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ,
-	SDE_IRQ_TYPE_WD_TIMER,
-	SDE_IRQ_TYPE_SFI_VIDEO_IN,
-	SDE_IRQ_TYPE_SFI_VIDEO_OUT,
-	SDE_IRQ_TYPE_SFI_CMD_0_IN,
-	SDE_IRQ_TYPE_SFI_CMD_0_OUT,
-	SDE_IRQ_TYPE_SFI_CMD_1_IN,
-	SDE_IRQ_TYPE_SFI_CMD_1_OUT,
-	SDE_IRQ_TYPE_SFI_CMD_2_IN,
-	SDE_IRQ_TYPE_SFI_CMD_2_OUT,
-	SDE_IRQ_TYPE_PROG_LINE,
-	SDE_IRQ_TYPE_AD4_BL_DONE,
-	SDE_IRQ_TYPE_CTL_START,
-	SDE_IRQ_TYPE_INTF_TEAR_RD_PTR,
-	SDE_IRQ_TYPE_INTF_TEAR_WR_PTR,
-	SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF,
-	SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK,
-	SDE_IRQ_TYPE_INTF_TEAR_TE_CHECK,
-	SDE_IRQ_TYPE_LTM_STATS_DONE,
-	SDE_IRQ_TYPE_LTM_STATS_WB_PB,
-	SDE_IRQ_TYPE_RESERVED,
-};
-
-struct sde_hw_intr;
-
-/**
- * Interrupt operations.
- */
-struct sde_hw_intr_ops {
-	/**
-	 * set_mask - Programs the given interrupt register with the
-	 *            given interrupt mask. Register value will get overwritten.
-	 * @intr:	HW interrupt handle
-	 * @reg_off:	MDSS HW register offset
-	 * @irqmask:	IRQ mask value
-	 */
-	void (*set_mask)(
-			struct sde_hw_intr *intr,
-			uint32_t reg,
-			uint32_t irqmask);
-
-	/**
-	 * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
-	 *                 Used for all irq related ops
-	 * @intr:	HW interrupt handle
-	 * @intr_type:		Interrupt type defined in sde_intr_type
-	 * @instance_idx:	HW interrupt block instance
-	 * @return:		irq_idx or -EINVAL for lookup fail
-	 */
-	int (*irq_idx_lookup)(
-			struct sde_hw_intr *intr,
-			enum sde_intr_type intr_type,
-			u32 instance_idx);
-
-	/**
-	 * enable_irq_nolock - Enable IRQ based on lookup IRQ index without lock
-	 * @intr:	HW interrupt handle
-	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
-	 * @return:	0 for success, otherwise failure
-	 */
-	int (*enable_irq_nolock)(
-			struct sde_hw_intr *intr,
-			int irq_idx);
-
-	/**
-	 * disable_irq_nolock - Disable IRQ based on IRQ index without lock
-	 * @intr:	HW interrupt handle
-	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
-	 * @return:	0 for success, otherwise failure
-	 */
-	int (*disable_irq_nolock)(
-			struct sde_hw_intr *intr,
-			int irq_idx);
-
-	/**
-	 * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
-	 *                  any asserted IRQs). Useful during reset.
-	 * @intr:	HW interrupt handle
-	 * @return:	0 for success, otherwise failure
-	 */
-	int (*clear_all_irqs)(
-			struct sde_hw_intr *intr);
-
-	/**
-	 * disable_all_irqs - Disables all the interrupts. Useful during reset.
-	 * @intr:	HW interrupt handle
-	 * @return:	0 for success, otherwise failure
-	 */
-	int (*disable_all_irqs)(
-			struct sde_hw_intr *intr);
-
-	/**
-	 * dispatch_irqs - IRQ dispatcher will call the given callback
-	 *                 function when a matching interrupt status bit is
-	 *                 found in the irq mapping table.
-	 * @intr:	HW interrupt handle
-	 * @cbfunc:	Callback function pointer
-	 * @arg:	Argument to pass back during callback
-	 */
-	void (*dispatch_irqs)(
-			struct sde_hw_intr *intr,
-			void (*cbfunc)(void *arg, int irq_idx),
-			void *arg);
-
-	/**
-	 * get_interrupt_statuses - Gets and store value from all interrupt
-	 *                          status registers that are currently fired.
-	 * @intr:	HW interrupt handle
-	 */
-	void (*get_interrupt_statuses)(
-			struct sde_hw_intr *intr);
-
-	/**
-	 * clear_interrupt_status - Clears HW interrupt status based on given
-	 *                          lookup IRQ index.
-	 * @intr:	HW interrupt handle
-	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
-	 */
-	void (*clear_interrupt_status)(
-			struct sde_hw_intr *intr,
-			int irq_idx);
-
-	/**
-	 * clear_intr_status_nolock() - clears the HW interrupts without lock
-	 * @intr:	HW interrupt handle
-	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
-	 */
-	void (*clear_intr_status_nolock)(
-			struct sde_hw_intr *intr,
-			int irq_idx);
-
-	/**
-	 * clear_intr_status_force_mask() - clear the HW interrupts
-	 * @intr:	HW interrupt handle
-	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
-	 * @irq_mask:	irq mask to clear
-	 */
-	void (*clear_intr_status_force_mask)(
-			struct sde_hw_intr *intr,
-			int irq_idx,
-			u32 irq_mask);
-
-	/**
-	 * get_interrupt_status - Gets HW interrupt status, and clear if set,
-	 *                        based on given lookup IRQ index.
-	 * @intr:	HW interrupt handle
-	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
-	 * @clear:	True to clear irq after read
-	 */
-	u32 (*get_interrupt_status)(
-			struct sde_hw_intr *intr,
-			int irq_idx,
-			bool clear);
-
-	/**
-	 * get_intr_status_nolock - nolock version of get_interrupt_status
-	 * @intr:	HW interrupt handle
-	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
-	 * @clear:	True to clear irq after read
-	 */
-	u32 (*get_intr_status_nolock)(
-			struct sde_hw_intr *intr,
-			int irq_idx,
-			bool clear);
-
-	/**
-	 * get_intr_status_nomask - nolock version of get_interrupt_status
-	 * @intr:	HW interrupt handle
-	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
-	 * @clear:	True to clear irq after read
-	 */
-	u32 (*get_intr_status_nomask)(
-			struct sde_hw_intr *intr,
-			int irq_idx,
-			bool clear);
-
-	/**
-	 * get_valid_interrupts - Gets a mask of all valid interrupt sources
-	 *                        within SDE. These are actually status bits
-	 *                        within interrupt registers that specify the
-	 *                        source of the interrupt in IRQs. For example,
-	 *                        valid interrupt sources can be MDP, DSI,
-	 *                        HDMI etc.
-	 * @intr:	HW interrupt handle
-	 * @mask:	Returning the interrupt source MASK
-	 * @return:	0 for success, otherwise failure
-	 */
-	int (*get_valid_interrupts)(
-			struct sde_hw_intr *intr,
-			uint32_t *mask);
-
-	/**
-	 * get_interrupt_sources - Gets the bitmask of the SDE interrupt
-	 *                         source that are currently fired.
-	 * @intr:	HW interrupt handle
-	 * @sources:	Returning the SDE interrupt source status bit mask
-	 * @return:	0 for success, otherwise failure
-	 */
-	int (*get_interrupt_sources)(
-			struct sde_hw_intr *intr,
-			uint32_t *sources);
-};
-
-/**
- * struct sde_hw_intr: hw interrupts handling data structure
- * @hw:               virtual address mapping
- * @ops:              function pointer mapping for IRQ handling
- * @cache_irq_mask:   array of IRQ enable masks reg storage created during init
- * @save_irq_status:  array of IRQ status reg storage created during init
- * @irq_lock:         spinlock for accessing IRQ resources
- * @sde_irq_size:   total number of elements of the sde_irq_tbl
- * @sde_irq_tbl:	table with the registesrs offsets of the sde interrupts
- *		supported by the hw
- * @sde_irq_map_size: total number of elements of the 'sde_irq_map'
- * @sde_irq_map: total number of interrupt bits valid within the irq regs
- */
-struct sde_hw_intr {
-	struct sde_hw_blk_reg_map hw;
-	struct sde_hw_intr_ops ops;
-	u32 *cache_irq_mask;
-	u32 *save_irq_status;
-	u32 sde_irq_size;
-	struct sde_intr_reg *sde_irq_tbl;
-	u32 sde_irq_map_size;
-	struct sde_irq_type *sde_irq_map;
-	spinlock_t irq_lock;
-};
-
-/**
- * sde_hw_intr_init(): Initializes the interrupts hw object
- * @addr: mapped register io address of MDP
- * @m :   pointer to mdss catalog data
- */
-struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
-		struct sde_mdss_cfg *m);
-
-/**
- * sde_hw_intr_destroy(): Cleanup interrutps hw object
- * @intr: pointer to interrupts hw object
- */
-void sde_hw_intr_destroy(struct sde_hw_intr *intr);
-#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
deleted file mode 100644
index 9f3c9ba..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ /dev/null
@@ -1,726 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-#include <linux/iopoll.h>
-
-#include "sde_hwio.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_intf.h"
-#include "sde_dbg.h"
-#include "sde_kms.h"
-
-#define INTF_TIMING_ENGINE_EN           0x000
-#define INTF_CONFIG                     0x004
-#define INTF_HSYNC_CTL                  0x008
-#define INTF_VSYNC_PERIOD_F0            0x00C
-#define INTF_VSYNC_PERIOD_F1            0x010
-#define INTF_VSYNC_PULSE_WIDTH_F0       0x014
-#define INTF_VSYNC_PULSE_WIDTH_F1       0x018
-#define INTF_DISPLAY_V_START_F0         0x01C
-#define INTF_DISPLAY_V_START_F1         0x020
-#define INTF_DISPLAY_V_END_F0           0x024
-#define INTF_DISPLAY_V_END_F1           0x028
-#define INTF_ACTIVE_V_START_F0          0x02C
-#define INTF_ACTIVE_V_START_F1          0x030
-#define INTF_ACTIVE_V_END_F0            0x034
-#define INTF_ACTIVE_V_END_F1            0x038
-#define INTF_DISPLAY_HCTL               0x03C
-#define INTF_ACTIVE_HCTL                0x040
-#define INTF_BORDER_COLOR               0x044
-#define INTF_UNDERFLOW_COLOR            0x048
-#define INTF_HSYNC_SKEW                 0x04C
-#define INTF_POLARITY_CTL               0x050
-#define INTF_TEST_CTL                   0x054
-#define INTF_TP_COLOR0                  0x058
-#define INTF_TP_COLOR1                  0x05C
-#define INTF_CONFIG2                    0x060
-#define INTF_DISPLAY_DATA_HCTL          0x064
-#define INTF_ACTIVE_DATA_HCTL           0x068
-#define INTF_FRAME_LINE_COUNT_EN        0x0A8
-#define INTF_FRAME_COUNT                0x0AC
-#define   INTF_LINE_COUNT               0x0B0
-
-#define   INTF_DEFLICKER_CONFIG         0x0F0
-#define   INTF_DEFLICKER_STRNG_COEFF    0x0F4
-#define   INTF_DEFLICKER_WEAK_COEFF     0x0F8
-
-#define   INTF_DSI_CMD_MODE_TRIGGER_EN  0x084
-#define   INTF_PANEL_FORMAT             0x090
-#define   INTF_TPG_ENABLE               0x100
-#define   INTF_TPG_MAIN_CONTROL         0x104
-#define   INTF_TPG_VIDEO_CONFIG         0x108
-#define   INTF_TPG_COMPONENT_LIMITS     0x10C
-#define   INTF_TPG_RECTANGLE            0x110
-#define   INTF_TPG_INITIAL_VALUE        0x114
-#define   INTF_TPG_BLK_WHITE_PATTERN_FRAMES   0x118
-#define   INTF_TPG_RGB_MAPPING          0x11C
-#define   INTF_PROG_FETCH_START         0x170
-#define   INTF_PROG_ROT_START           0x174
-
-#define INTF_MISR_CTRL			0x180
-#define INTF_MISR_SIGNATURE		0x184
-
-#define INTF_MUX                        0x25C
-#define INTF_STATUS                     0x26C
-#define INTF_AVR_CONTROL                0x270
-#define INTF_AVR_MODE                   0x274
-#define INTF_AVR_TRIGGER                0x278
-#define INTF_AVR_VTOTAL                 0x27C
-#define INTF_TEAR_MDP_VSYNC_SEL         0x280
-#define INTF_TEAR_TEAR_CHECK_EN         0x284
-#define INTF_TEAR_SYNC_CONFIG_VSYNC     0x288
-#define INTF_TEAR_SYNC_CONFIG_HEIGHT    0x28C
-#define INTF_TEAR_SYNC_WRCOUNT          0x290
-#define INTF_TEAR_VSYNC_INIT_VAL        0x294
-#define INTF_TEAR_INT_COUNT_VAL         0x298
-#define INTF_TEAR_SYNC_THRESH           0x29C
-#define INTF_TEAR_START_POS             0x2A0
-#define INTF_TEAR_RD_PTR_IRQ            0x2A4
-#define INTF_TEAR_WR_PTR_IRQ            0x2A8
-#define INTF_TEAR_OUT_LINE_COUNT        0x2AC
-#define INTF_TEAR_LINE_COUNT            0x2B0
-#define INTF_TEAR_AUTOREFRESH_CONFIG    0x2B4
-#define INTF_TEAR_TEAR_DETECT_CTRL      0x2B8
-
-#define AVR_CONTINUOUS_MODE   1
-#define AVR_ONE_SHOT_MODE     2
-
-static struct sde_intf_cfg *_intf_offset(enum sde_intf intf,
-		struct sde_mdss_cfg *m,
-		void __iomem *addr,
-		struct sde_hw_blk_reg_map *b)
-{
-	int i;
-
-	for (i = 0; i < m->intf_count; i++) {
-		if ((intf == m->intf[i].id) &&
-		(m->intf[i].type != INTF_NONE)) {
-			b->base_off = addr;
-			b->blk_off = m->intf[i].base;
-			b->length = m->intf[i].len;
-			b->hwversion = m->hwversion;
-			b->log_mask = SDE_DBG_MASK_INTF;
-			return &m->intf[i];
-		}
-	}
-
-	return ERR_PTR(-EINVAL);
-}
-
-static void sde_hw_intf_avr_trigger(struct sde_hw_intf *ctx)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	if (!ctx)
-		return;
-
-	c = &ctx->hw;
-	SDE_REG_WRITE(c, INTF_AVR_TRIGGER, 0x1);
-	SDE_DEBUG("AVR Triggered\n");
-}
-
-static int sde_hw_intf_avr_setup(struct sde_hw_intf *ctx,
-	const struct intf_timing_params *params,
-	const struct intf_avr_params *avr_params)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 hsync_period, vsync_period;
-	u32 min_fps, default_fps, diff_fps;
-	u32 vsync_period_slow;
-	u32 avr_vtotal;
-	u32 add_porches;
-
-	if (!ctx || !params || !avr_params) {
-		SDE_ERROR("invalid input parameter(s)\n");
-		return -EINVAL;
-	}
-
-	c = &ctx->hw;
-	min_fps = avr_params->min_fps;
-	default_fps = avr_params->default_fps;
-	diff_fps = default_fps - min_fps;
-	hsync_period = params->hsync_pulse_width +
-			params->h_back_porch + params->width +
-			params->h_front_porch;
-	vsync_period = params->vsync_pulse_width +
-			params->v_back_porch + params->height +
-			params->v_front_porch;
-	add_porches = mult_frac(vsync_period, diff_fps, min_fps);
-	vsync_period_slow = vsync_period + add_porches;
-	avr_vtotal = vsync_period_slow * hsync_period;
-
-	SDE_REG_WRITE(c, INTF_AVR_VTOTAL, avr_vtotal);
-
-	return 0;
-}
-
-static void sde_hw_intf_avr_ctrl(struct sde_hw_intf *ctx,
-	const struct intf_avr_params *avr_params)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 avr_mode = 0;
-	u32 avr_ctrl = 0;
-
-	if (!ctx || !avr_params)
-		return;
-
-	c = &ctx->hw;
-	if (avr_params->avr_mode) {
-		avr_ctrl = BIT(0);
-		avr_mode = (avr_params->avr_mode == AVR_ONE_SHOT_MODE) ?
-			(BIT(0) | BIT(8)) : 0x0;
-	}
-
-	SDE_REG_WRITE(c, INTF_AVR_CONTROL, avr_ctrl);
-	SDE_REG_WRITE(c, INTF_AVR_MODE, avr_mode);
-}
-
-
-static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
-		const struct intf_timing_params *p,
-		const struct sde_format *fmt)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	u32 hsync_period, vsync_period;
-	u32 display_v_start, display_v_end;
-	u32 hsync_start_x, hsync_end_x;
-	u32 active_h_start, active_h_end;
-	u32 active_v_start, active_v_end;
-	u32 active_hctl, display_hctl, hsync_ctl;
-	u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
-	u32 panel_format;
-	u32 intf_cfg, intf_cfg2;
-	u32 display_data_hctl = 0, active_data_hctl = 0;
-	bool dp_intf = false;
-
-	/* read interface_cfg */
-	intf_cfg = SDE_REG_READ(c, INTF_CONFIG);
-	hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
-	p->h_front_porch;
-	vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
-	p->v_front_porch;
-
-	display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
-	hsync_period) + p->hsync_skew;
-	display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
-	p->hsync_skew - 1;
-
-	hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
-	hsync_end_x = hsync_period - p->h_front_porch - 1;
-
-	if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP)
-		dp_intf = true;
-
-	if (p->width != p->xres) {
-		active_h_start = hsync_start_x;
-		active_h_end = active_h_start + p->xres - 1;
-	} else {
-		active_h_start = 0;
-		active_h_end = 0;
-	}
-
-	if (p->height != p->yres) {
-		active_v_start = display_v_start;
-		active_v_end = active_v_start + (p->yres * hsync_period) - 1;
-	} else {
-		active_v_start = 0;
-		active_v_end = 0;
-	}
-
-	if (active_h_end) {
-		active_hctl = (active_h_end << 16) | active_h_start;
-		intf_cfg |= BIT(29);	/* ACTIVE_H_ENABLE */
-	} else {
-		active_hctl = 0;
-	}
-
-	if (active_v_end)
-		intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
-
-	hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
-	display_hctl = (hsync_end_x << 16) | hsync_start_x;
-
-	if (dp_intf) {
-		active_h_start = hsync_start_x;
-		active_h_end = active_h_start + p->xres - 1;
-		active_v_start = display_v_start;
-		active_v_end = active_v_start + (p->yres * hsync_period) - 1;
-
-		display_v_start += p->hsync_pulse_width + p->h_back_porch;
-
-		active_hctl = (active_h_end << 16) | active_h_start;
-		display_hctl = active_hctl;
-	}
-
-	intf_cfg2 = 0;
-
-	if (dp_intf && p->compression_en) {
-		active_data_hctl = (hsync_start_x + p->extra_dto_cycles) << 16;
-		active_data_hctl += hsync_start_x;
-
-		display_data_hctl = active_data_hctl;
-
-		intf_cfg2 |= BIT(4);
-	}
-
-	den_polarity = 0;
-	if (ctx->cap->type == INTF_HDMI) {
-		hsync_polarity = p->yres >= 720 ? 0 : 1;
-		vsync_polarity = p->yres >= 720 ? 0 : 1;
-	} else if (ctx->cap->type == INTF_DP) {
-		hsync_polarity = p->hsync_polarity;
-		vsync_polarity = p->vsync_polarity;
-	} else {
-		hsync_polarity = 0;
-		vsync_polarity = 0;
-	}
-	polarity_ctl = (den_polarity << 2) | /*  DEN Polarity  */
-		(vsync_polarity << 1) | /* VSYNC Polarity */
-		(hsync_polarity << 0);  /* HSYNC Polarity */
-
-	if (!SDE_FORMAT_IS_YUV(fmt))
-		panel_format = (fmt->bits[C0_G_Y] |
-				(fmt->bits[C1_B_Cb] << 2) |
-				(fmt->bits[C2_R_Cr] << 4) |
-				(0x21 << 8));
-	else
-		/* Interface treats all the pixel data in RGB888 format */
-		panel_format = (COLOR_8BIT |
-				(COLOR_8BIT << 2) |
-				(COLOR_8BIT << 4) |
-				(0x21 << 8));
-
-	if (p->wide_bus_en)
-		intf_cfg2 |= BIT(0);
-
-	SDE_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
-	SDE_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
-	SDE_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
-			p->vsync_pulse_width * hsync_period);
-	SDE_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
-	SDE_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
-	SDE_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
-	SDE_REG_WRITE(c, INTF_ACTIVE_HCTL,  active_hctl);
-	SDE_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
-	SDE_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
-	SDE_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
-	SDE_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
-	SDE_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
-	SDE_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
-	SDE_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
-	SDE_REG_WRITE(c, INTF_CONFIG, intf_cfg);
-	SDE_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
-	SDE_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
-	SDE_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
-	SDE_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
-}
-
-static void sde_hw_intf_enable_timing_engine(
-		struct sde_hw_intf *intf,
-		u8 enable)
-{
-	struct sde_hw_blk_reg_map *c = &intf->hw;
-	/* Note: Display interface select is handled in top block hw layer */
-	SDE_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
-}
-
-static void sde_hw_intf_setup_prg_fetch(
-		struct sde_hw_intf *intf,
-		const struct intf_prog_fetch *fetch)
-{
-	struct sde_hw_blk_reg_map *c = &intf->hw;
-	int fetch_enable;
-
-	/*
-	 * Fetch should always be outside the active lines. If the fetching
-	 * is programmed within active region, hardware behavior is unknown.
-	 */
-
-	fetch_enable = SDE_REG_READ(c, INTF_CONFIG);
-	if (fetch->enable) {
-		fetch_enable |= BIT(31);
-		SDE_REG_WRITE(c, INTF_PROG_FETCH_START,
-				fetch->fetch_start);
-	} else {
-		fetch_enable &= ~BIT(31);
-	}
-
-	SDE_REG_WRITE(c, INTF_CONFIG, fetch_enable);
-}
-
-static void sde_hw_intf_bind_pingpong_blk(
-		struct sde_hw_intf *intf,
-		bool enable,
-		const enum sde_pingpong pp)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 mux_cfg;
-
-	if (!intf)
-		return;
-
-	c = &intf->hw;
-
-	mux_cfg = SDE_REG_READ(c, INTF_MUX);
-	mux_cfg &= ~0xf;
-
-	if (enable)
-		mux_cfg |= (pp - PINGPONG_0) & 0x7;
-	else
-		mux_cfg |= 0xf;
-
-	SDE_REG_WRITE(c, INTF_MUX, mux_cfg);
-}
-
-static void sde_hw_intf_get_status(
-		struct sde_hw_intf *intf,
-		struct intf_status *s)
-{
-	struct sde_hw_blk_reg_map *c = &intf->hw;
-
-	s->is_en = SDE_REG_READ(c, INTF_TIMING_ENGINE_EN);
-	if (s->is_en) {
-		s->frame_count = SDE_REG_READ(c, INTF_FRAME_COUNT);
-		s->line_count = SDE_REG_READ(c, INTF_LINE_COUNT);
-	} else {
-		s->line_count = 0;
-		s->frame_count = 0;
-	}
-}
-
-static void sde_hw_intf_v1_get_status(
-		struct sde_hw_intf *intf,
-		struct intf_status *s)
-{
-	struct sde_hw_blk_reg_map *c = &intf->hw;
-
-	s->is_en = SDE_REG_READ(c, INTF_STATUS) & BIT(0);
-	if (s->is_en) {
-		s->frame_count = SDE_REG_READ(c, INTF_FRAME_COUNT);
-		s->line_count = SDE_REG_READ(c, INTF_LINE_COUNT);
-	} else {
-		s->line_count = 0;
-		s->frame_count = 0;
-	}
-}
-static void sde_hw_intf_setup_misr(struct sde_hw_intf *intf,
-						bool enable, u32 frame_count)
-{
-	struct sde_hw_blk_reg_map *c = &intf->hw;
-	u32 config = 0;
-
-	SDE_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
-	/* clear misr data */
-	wmb();
-
-	if (enable)
-		config = (frame_count & MISR_FRAME_COUNT_MASK) |
-				MISR_CTRL_ENABLE |
-				INTF_MISR_CTRL_FREE_RUN_MASK |
-				INTF_MISR_CTRL_INPUT_SEL_DATA;
-
-	SDE_REG_WRITE(c, INTF_MISR_CTRL, config);
-}
-
-static int sde_hw_intf_collect_misr(struct sde_hw_intf *intf, bool nonblock,
-		 u32 *misr_value)
-{
-	struct sde_hw_blk_reg_map *c = &intf->hw;
-	u32 ctrl = 0;
-
-	if (!misr_value)
-		return -EINVAL;
-
-	ctrl = SDE_REG_READ(c, INTF_MISR_CTRL);
-	if (!nonblock) {
-		if (ctrl & MISR_CTRL_ENABLE) {
-			int rc;
-
-			rc = readl_poll_timeout(c->base_off + c->blk_off +
-					INTF_MISR_CTRL, ctrl,
-					(ctrl & MISR_CTRL_STATUS) > 0, 500,
-					84000);
-			if (rc)
-				return rc;
-		} else {
-			return -EINVAL;
-		}
-	}
-
-	*misr_value =  SDE_REG_READ(c, INTF_MISR_SIGNATURE);
-	return 0;
-}
-
-static u32 sde_hw_intf_get_line_count(struct sde_hw_intf *intf)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	if (!intf)
-		return 0;
-
-	c = &intf->hw;
-
-	return SDE_REG_READ(c, INTF_LINE_COUNT);
-}
-
-static int sde_hw_intf_setup_te_config(struct sde_hw_intf *intf,
-		struct sde_hw_tear_check *te)
-{
-	struct sde_hw_blk_reg_map *c;
-	int cfg;
-
-	if (!intf)
-		return -EINVAL;
-
-	c = &intf->hw;
-
-	cfg = BIT(19); /* VSYNC_COUNTER_EN */
-	if (te->hw_vsync_mode)
-		cfg |= BIT(20);
-
-	cfg |= te->vsync_count;
-
-	SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
-	SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
-	SDE_REG_WRITE(c, INTF_TEAR_VSYNC_INIT_VAL, te->vsync_init_val);
-	SDE_REG_WRITE(c, INTF_TEAR_RD_PTR_IRQ, te->rd_ptr_irq);
-	SDE_REG_WRITE(c, INTF_TEAR_START_POS, te->start_pos);
-	SDE_REG_WRITE(c, INTF_TEAR_SYNC_THRESH,
-			((te->sync_threshold_continue << 16) |
-			 te->sync_threshold_start));
-	SDE_REG_WRITE(c, INTF_TEAR_SYNC_WRCOUNT,
-			(te->start_pos + te->sync_threshold_start + 1));
-
-	return 0;
-}
-
-static int sde_hw_intf_setup_autorefresh_config(struct sde_hw_intf *intf,
-		struct sde_hw_autorefresh *cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 refresh_cfg;
-
-	if (!intf || !cfg)
-		return -EINVAL;
-
-	c = &intf->hw;
-
-	if (cfg->enable)
-		refresh_cfg = BIT(31) | cfg->frame_count;
-	else
-		refresh_cfg = 0;
-
-	SDE_REG_WRITE(c, INTF_TEAR_AUTOREFRESH_CONFIG, refresh_cfg);
-
-	return 0;
-}
-
-static int sde_hw_intf_get_autorefresh_config(struct sde_hw_intf *intf,
-		struct sde_hw_autorefresh *cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 val;
-
-	if (!intf || !cfg)
-		return -EINVAL;
-
-	c = &intf->hw;
-	val = SDE_REG_READ(c, INTF_TEAR_AUTOREFRESH_CONFIG);
-	cfg->enable = (val & BIT(31)) >> 31;
-	cfg->frame_count = val & 0xffff;
-
-	return 0;
-}
-
-static int sde_hw_intf_poll_timeout_wr_ptr(struct sde_hw_intf *intf,
-		u32 timeout_us)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 val;
-	int rc;
-
-	if (!intf)
-		return -EINVAL;
-
-	c = &intf->hw;
-	rc = readl_poll_timeout(c->base_off + c->blk_off + INTF_TEAR_LINE_COUNT,
-			val, (val & 0xffff) >= 1, 10, timeout_us);
-
-	return rc;
-}
-
-static int sde_hw_intf_enable_te(struct sde_hw_intf *intf, bool enable)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	if (!intf)
-		return -EINVAL;
-
-	c = &intf->hw;
-	SDE_REG_WRITE(c, INTF_TEAR_TEAR_CHECK_EN, enable);
-	return 0;
-}
-
-static void sde_hw_intf_update_te(struct sde_hw_intf *intf,
-		struct sde_hw_tear_check *te)
-{
-	struct sde_hw_blk_reg_map *c;
-	int cfg;
-
-	if (!intf || !te)
-		return;
-
-	c = &intf->hw;
-	cfg = SDE_REG_READ(c, INTF_TEAR_SYNC_THRESH);
-	cfg &= ~0xFFFF;
-	cfg |= te->sync_threshold_start;
-	SDE_REG_WRITE(c, INTF_TEAR_SYNC_THRESH, cfg);
-}
-
-static int sde_hw_intf_connect_external_te(struct sde_hw_intf *intf,
-		bool enable_external_te)
-{
-	struct sde_hw_blk_reg_map *c = &intf->hw;
-	u32 cfg;
-	int orig;
-
-	if (!intf)
-		return -EINVAL;
-
-	c = &intf->hw;
-	cfg = SDE_REG_READ(c, INTF_TEAR_SYNC_CONFIG_VSYNC);
-	orig = (bool)(cfg & BIT(20));
-	if (enable_external_te)
-		cfg |= BIT(20);
-	else
-		cfg &= ~BIT(20);
-	SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
-
-	return orig;
-}
-
-static int sde_hw_intf_get_vsync_info(struct sde_hw_intf *intf,
-		struct sde_hw_pp_vsync_info *info)
-{
-	struct sde_hw_blk_reg_map *c = &intf->hw;
-	u32 val;
-
-	if (!intf || !info)
-		return -EINVAL;
-
-	c = &intf->hw;
-
-	val = SDE_REG_READ(c, INTF_TEAR_VSYNC_INIT_VAL);
-	info->rd_ptr_init_val = val & 0xffff;
-
-	val = SDE_REG_READ(c, INTF_TEAR_INT_COUNT_VAL);
-	info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
-	info->rd_ptr_line_count = val & 0xffff;
-
-	val = SDE_REG_READ(c, INTF_TEAR_LINE_COUNT);
-	info->wr_ptr_line_count = val & 0xffff;
-
-	return 0;
-}
-
-static void sde_hw_intf_vsync_sel(struct sde_hw_intf *intf,
-		u32 vsync_source)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	if (!intf)
-		return;
-
-	c = &intf->hw;
-
-	SDE_REG_WRITE(c, INTF_TEAR_MDP_VSYNC_SEL, (vsync_source & 0xf));
-}
-
-static void _setup_intf_ops(struct sde_hw_intf_ops *ops,
-		unsigned long cap)
-{
-	ops->setup_timing_gen = sde_hw_intf_setup_timing_engine;
-	ops->setup_prg_fetch  = sde_hw_intf_setup_prg_fetch;
-	ops->get_status = sde_hw_intf_get_status;
-	ops->enable_timing = sde_hw_intf_enable_timing_engine;
-	ops->setup_misr = sde_hw_intf_setup_misr;
-	ops->collect_misr = sde_hw_intf_collect_misr;
-	ops->get_line_count = sde_hw_intf_get_line_count;
-	ops->avr_setup = sde_hw_intf_avr_setup;
-	ops->avr_trigger = sde_hw_intf_avr_trigger;
-	ops->avr_ctrl = sde_hw_intf_avr_ctrl;
-
-	if (cap & BIT(SDE_INTF_INPUT_CTRL))
-		ops->bind_pingpong_blk = sde_hw_intf_bind_pingpong_blk;
-
-	if (cap & BIT(SDE_INTF_TE)) {
-		ops->setup_tearcheck = sde_hw_intf_setup_te_config;
-		ops->enable_tearcheck = sde_hw_intf_enable_te;
-		ops->update_tearcheck = sde_hw_intf_update_te;
-		ops->connect_external_te = sde_hw_intf_connect_external_te;
-		ops->get_vsync_info = sde_hw_intf_get_vsync_info;
-		ops->setup_autorefresh = sde_hw_intf_setup_autorefresh_config;
-		ops->get_autorefresh = sde_hw_intf_get_autorefresh_config;
-		ops->poll_timeout_wr_ptr = sde_hw_intf_poll_timeout_wr_ptr;
-		ops->vsync_sel = sde_hw_intf_vsync_sel;
-		ops->get_status = sde_hw_intf_v1_get_status;
-	}
-}
-
-static struct sde_hw_blk_ops sde_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
-
-struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m)
-{
-	struct sde_hw_intf *c;
-	struct sde_intf_cfg *cfg;
-	int rc;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _intf_offset(idx, m, addr, &c->hw);
-	if (IS_ERR_OR_NULL(cfg)) {
-		kfree(c);
-		pr_err("failed to create sde_hw_intf %d\n", idx);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/*
-	 * Assign ops
-	 */
-	c->idx = idx;
-	c->cap = cfg;
-	c->mdss = m;
-	_setup_intf_ops(&c->ops, c->cap->features);
-
-	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_INTF, idx, &sde_hw_ops);
-	if (rc) {
-		SDE_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
-
-	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
-			c->hw.blk_off + c->hw.length, c->hw.xin_id);
-
-	return c;
-
-blk_init_error:
-	kzfree(c);
-
-	return ERR_PTR(rc);
-}
-
-void sde_hw_intf_destroy(struct sde_hw_intf *intf)
-{
-	if (intf)
-		sde_hw_blk_destroy(&intf->base);
-	kfree(intf);
-}
-
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
deleted file mode 100644
index a77b8a3..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_INTF_H
-#define _SDE_HW_INTF_H
-
-#include "sde_hw_catalog.h"
-#include "sde_hw_mdss.h"
-#include "sde_hw_util.h"
-#include "sde_hw_blk.h"
-
-struct sde_hw_intf;
-
-/* intf timing settings */
-struct intf_timing_params {
-	u32 width;		/* active width */
-	u32 height;		/* active height */
-	u32 xres;		/* Display panel width */
-	u32 yres;		/* Display panel height */
-
-	u32 h_back_porch;
-	u32 h_front_porch;
-	u32 v_back_porch;
-	u32 v_front_porch;
-	u32 hsync_pulse_width;
-	u32 vsync_pulse_width;
-	u32 hsync_polarity;
-	u32 vsync_polarity;
-	u32 border_clr;
-	u32 underflow_clr;
-	u32 hsync_skew;
-	u32 v_front_porch_fixed;
-	bool wide_bus_en;	/* for DP only */
-	bool compression_en;	/* for DP only */
-	u32 extra_dto_cycles;	/* for DP only */
-};
-
-struct intf_prog_fetch {
-	u8 enable;
-	/* vsync counter for the front porch pixel line */
-	u32 fetch_start;
-};
-
-struct intf_status {
-	u8 is_en;		/* interface timing engine is enabled or not */
-	u32 frame_count;	/* frame count since timing engine enabled */
-	u32 line_count;		/* current line count including blanking */
-};
-
-struct intf_avr_params {
-	u32 default_fps;
-	u32 min_fps;
-	u32 avr_mode; /* 0 - disable, 1 - continuous, 2 - one-shot */
-};
-
-/**
- * struct sde_hw_intf_ops : Interface to the interface Hw driver functions
- *  Assumption is these functions will be called after clocks are enabled
- * @ setup_timing_gen : programs the timing engine
- * @ setup_prog_fetch : enables/disables the programmable fetch logic
- * @ setup_rot_start  : enables/disables the rotator start trigger
- * @ enable_timing: enable/disable timing engine
- * @ get_status: returns if timing engine is enabled or not
- * @ setup_misr: enables/disables MISR in HW register
- * @ collect_misr: reads and stores MISR data from HW register
- * @ get_line_count: reads current vertical line counter
- * @bind_pingpong_blk: enable/disable the connection with pingpong which will
- *                     feed pixels to this interface
- */
-struct sde_hw_intf_ops {
-	void (*setup_timing_gen)(struct sde_hw_intf *intf,
-			const struct intf_timing_params *p,
-			const struct sde_format *fmt);
-
-	void (*setup_prg_fetch)(struct sde_hw_intf *intf,
-			const struct intf_prog_fetch *fetch);
-
-	void (*setup_rot_start)(struct sde_hw_intf *intf,
-			const struct intf_prog_fetch *fetch);
-
-	void (*enable_timing)(struct sde_hw_intf *intf,
-			u8 enable);
-
-	void (*get_status)(struct sde_hw_intf *intf,
-			struct intf_status *status);
-
-	void (*setup_misr)(struct sde_hw_intf *intf,
-			bool enable, u32 frame_count);
-
-	int (*collect_misr)(struct sde_hw_intf *intf,
-			bool nonblock, u32 *misr_value);
-
-	/**
-	 * returns the current scan line count of the display
-	 * video mode panels use get_line_count whereas get_vsync_info
-	 * is used for command mode panels
-	 */
-	u32 (*get_line_count)(struct sde_hw_intf *intf);
-
-	void (*bind_pingpong_blk)(struct sde_hw_intf *intf,
-			bool enable,
-			const enum sde_pingpong pp);
-
-	/**
-	 * enables vysnc generation and sets up init value of
-	 * read pointer and programs the tear check cofiguration
-	 */
-	int (*setup_tearcheck)(struct sde_hw_intf *intf,
-			struct sde_hw_tear_check *cfg);
-
-	/**
-	 * enables tear check block
-	 */
-	int (*enable_tearcheck)(struct sde_hw_intf *intf,
-			bool enable);
-
-	/**
-	 * updates tearcheck configuration
-	 */
-	void (*update_tearcheck)(struct sde_hw_intf *intf,
-			struct sde_hw_tear_check *cfg);
-
-	/**
-	 * read, modify, write to either set or clear listening to external TE
-	 * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
-	 */
-	int (*connect_external_te)(struct sde_hw_intf *intf,
-			bool enable_external_te);
-
-	/**
-	 * provides the programmed and current
-	 * line_count
-	 */
-	int (*get_vsync_info)(struct sde_hw_intf *intf,
-			struct sde_hw_pp_vsync_info  *info);
-
-	/**
-	 * configure and enable the autorefresh config
-	 */
-	int (*setup_autorefresh)(struct sde_hw_intf *intf,
-			struct sde_hw_autorefresh *cfg);
-
-	/**
-	 * retrieve autorefresh config from hardware
-	 */
-	int (*get_autorefresh)(struct sde_hw_intf *intf,
-			struct sde_hw_autorefresh *cfg);
-
-	/**
-	 * poll until write pointer transmission starts
-	 * @Return: 0 on success, -ETIMEDOUT on timeout
-	 */
-	int (*poll_timeout_wr_ptr)(struct sde_hw_intf *intf, u32 timeout_us);
-
-	/**
-	 * Select vsync signal for tear-effect configuration
-	 */
-	void (*vsync_sel)(struct sde_hw_intf *intf, u32 vsync_source);
-
-	/**
-	 * Program the AVR_TOTAL for min fps rate
-	 */
-	int (*avr_setup)(struct sde_hw_intf *intf,
-			const struct intf_timing_params *params,
-			const struct intf_avr_params *avr_params);
-
-	/**
-	 * Signal the trigger on each commit for AVR
-	 */
-	void (*avr_trigger)(struct sde_hw_intf *ctx);
-
-	/**
-	 * Enable AVR and select the mode
-	 */
-	void (*avr_ctrl)(struct sde_hw_intf *intf,
-			const struct intf_avr_params *avr_params);
-};
-
-struct sde_hw_intf {
-	struct sde_hw_blk base;
-	struct sde_hw_blk_reg_map hw;
-
-	/* intf */
-	enum sde_intf idx;
-	const struct sde_intf_cfg *cap;
-	const struct sde_mdss_cfg *mdss;
-
-	/* ops */
-	struct sde_hw_intf_ops ops;
-};
-
-/**
- * to_sde_hw_intf - convert base object sde_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct sde_hw_intf *to_sde_hw_intf(struct sde_hw_blk *hw)
-{
-	return container_of(hw, struct sde_hw_intf, base);
-}
-
-/**
- * sde_hw_intf_init(): Initializes the intf driver for the passed
- * interface idx.
- * @idx:  interface index for which driver object is required
- * @addr: mapped register io address of MDP
- * @m :   pointer to mdss catalog data
- */
-struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m);
-
-/**
- * sde_hw_intf_destroy(): Destroys INTF driver context
- * @intf:   Pointer to INTF driver context
- */
-void sde_hw_intf_destroy(struct sde_hw_intf *intf);
-
-#endif /*_SDE_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
deleted file mode 100644
index 608929e..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ /dev/null
@@ -1,354 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/iopoll.h>
-
-#include "sde_kms.h"
-#include "sde_hw_catalog.h"
-#include "sde_hwio.h"
-#include "sde_hw_lm.h"
-#include "sde_hw_mdss.h"
-#include "sde_dbg.h"
-#include "sde_kms.h"
-
-#define LM_OP_MODE                        0x00
-#define LM_OUT_SIZE                       0x04
-#define LM_BORDER_COLOR_0                 0x08
-#define LM_BORDER_COLOR_1                 0x010
-
-/* These register are offset to mixer base + stage base */
-#define LM_BLEND0_OP                     0x00
-#define LM_BLEND0_CONST_ALPHA            0x04
-#define LM_FG_COLOR_FILL_COLOR_0         0x08
-#define LM_FG_COLOR_FILL_COLOR_1         0x0C
-#define LM_FG_COLOR_FILL_SIZE            0x10
-#define LM_FG_COLOR_FILL_XY              0x14
-
-#define LM_BLEND0_FG_ALPHA               0x04
-#define LM_BLEND0_BG_ALPHA               0x08
-
-#define LM_MISR_CTRL			0x310
-#define LM_MISR_SIGNATURE		0x314
-
-static struct sde_lm_cfg *_lm_offset(enum sde_lm mixer,
-		struct sde_mdss_cfg *m,
-		void __iomem *addr,
-		struct sde_hw_blk_reg_map *b)
-{
-	int i;
-
-	for (i = 0; i < m->mixer_count; i++) {
-		if (mixer == m->mixer[i].id) {
-			b->base_off = addr;
-			b->blk_off = m->mixer[i].base;
-			b->length = m->mixer[i].len;
-			b->hwversion = m->hwversion;
-			b->log_mask = SDE_DBG_MASK_LM;
-			return &m->mixer[i];
-		}
-	}
-
-	return ERR_PTR(-ENOMEM);
-}
-
-/**
- * _stage_offset(): returns the relative offset of the blend registers
- * for the stage to be setup
- * @c:     mixer ctx contains the mixer to be programmed
- * @stage: stage index to setup
- */
-static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage)
-{
-	const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
-	int rc;
-
-	if (stage == SDE_STAGE_BASE)
-		rc = -EINVAL;
-	else if (stage <= sblk->maxblendstages)
-		rc = sblk->blendstage_base[stage - SDE_STAGE_0];
-	else
-		rc = -EINVAL;
-
-	return rc;
-}
-
-static void sde_hw_lm_setup_out(struct sde_hw_mixer *ctx,
-		struct sde_hw_mixer_cfg *mixer)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	u32 outsize;
-	u32 op_mode;
-
-	op_mode = SDE_REG_READ(c, LM_OP_MODE);
-
-	outsize = mixer->out_height << 16 | mixer->out_width;
-	SDE_REG_WRITE(c, LM_OUT_SIZE, outsize);
-
-	/* SPLIT_LEFT_RIGHT */
-	if (mixer->right_mixer)
-		op_mode |= BIT(31);
-	else
-		op_mode &= ~BIT(31);
-	SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
-}
-
-static void sde_hw_lm_setup_border_color(struct sde_hw_mixer *ctx,
-		struct sde_mdss_color *color,
-		u8 border_en)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-
-	if (border_en) {
-		SDE_REG_WRITE(c, LM_BORDER_COLOR_0,
-			(color->color_0 & 0xFFF) |
-			((color->color_1 & 0xFFF) << 0x10));
-		SDE_REG_WRITE(c, LM_BORDER_COLOR_1,
-			(color->color_2 & 0xFFF) |
-			((color->color_3 & 0xFFF) << 0x10));
-	}
-}
-
-static void sde_hw_lm_setup_blend_config_sdm845(struct sde_hw_mixer *ctx,
-	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	int stage_off;
-	u32 const_alpha;
-
-	if (stage == SDE_STAGE_BASE)
-		return;
-
-	stage_off = _stage_offset(ctx, stage);
-	if (WARN_ON(stage_off < 0))
-		return;
-
-	const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
-	SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
-	SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
-}
-
-static void sde_hw_lm_setup_blend_config(struct sde_hw_mixer *ctx,
-	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	int stage_off;
-
-	if (stage == SDE_STAGE_BASE)
-		return;
-
-	stage_off = _stage_offset(ctx, stage);
-	if (WARN_ON(stage_off < 0))
-		return;
-
-	SDE_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
-	SDE_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
-	SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
-}
-
-static void sde_hw_lm_setup_color3(struct sde_hw_mixer *ctx,
-	uint32_t mixer_op_mode)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	int op_mode;
-
-	/* read the existing op_mode configuration */
-	op_mode = SDE_REG_READ(c, LM_OP_MODE);
-
-	op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
-
-	SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
-}
-
-static void sde_hw_lm_gc(struct sde_hw_mixer *mixer,
-			void *cfg)
-{
-}
-
-static void sde_hw_lm_clear_dim_layer(struct sde_hw_mixer *ctx)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
-	int stage_off, i;
-	u32 reset = BIT(16), val;
-
-	reset = ~reset;
-	for (i = SDE_STAGE_0; i <= sblk->maxblendstages; i++) {
-		stage_off = _stage_offset(ctx, i);
-		if (WARN_ON(stage_off < 0))
-			return;
-
-		/*
-		 * read the existing blendn_op register and clear only DIM layer
-		 * bit (color_fill bit)
-		 */
-		val = SDE_REG_READ(c, LM_BLEND0_OP + stage_off);
-		val &= reset;
-		SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
-	}
-}
-
-static void sde_hw_lm_setup_dim_layer(struct sde_hw_mixer *ctx,
-		struct sde_hw_dim_layer *dim_layer)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	int stage_off;
-	u32 val = 0, alpha = 0;
-
-	stage_off = _stage_offset(ctx, dim_layer->stage);
-	if (stage_off < 0) {
-		SDE_ERROR("invalid stage_off:%d for dim layer\n", stage_off);
-		return;
-	}
-
-	alpha = dim_layer->color_fill.color_3 & 0xFF;
-	val = ((dim_layer->color_fill.color_1 << 2) & 0xFFF) << 16 |
-			((dim_layer->color_fill.color_0 << 2) & 0xFFF);
-	SDE_REG_WRITE(c, LM_FG_COLOR_FILL_COLOR_0 + stage_off, val);
-
-	val = (alpha << 4) << 16 |
-			((dim_layer->color_fill.color_2 << 2) & 0xFFF);
-	SDE_REG_WRITE(c, LM_FG_COLOR_FILL_COLOR_1 + stage_off, val);
-
-	val = dim_layer->rect.h << 16 | dim_layer->rect.w;
-	SDE_REG_WRITE(c, LM_FG_COLOR_FILL_SIZE + stage_off, val);
-
-	val = dim_layer->rect.y << 16 | dim_layer->rect.x;
-	SDE_REG_WRITE(c, LM_FG_COLOR_FILL_XY + stage_off, val);
-
-	val = BIT(16); /* enable dim layer */
-	val |= SDE_BLEND_FG_ALPHA_FG_CONST | SDE_BLEND_BG_ALPHA_BG_CONST;
-	if (dim_layer->flags & SDE_DRM_DIM_LAYER_EXCLUSIVE)
-		val |= BIT(17);
-	else
-		val &= ~BIT(17);
-	SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
-	val = (alpha << 16) | (0xff - alpha);
-	SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, val);
-}
-
-static void sde_hw_lm_setup_misr(struct sde_hw_mixer *ctx,
-				bool enable, u32 frame_count)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	u32 config = 0;
-
-	SDE_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
-	/* clear misr data */
-	wmb();
-
-	if (enable)
-		config = (frame_count & MISR_FRAME_COUNT_MASK) |
-			MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
-
-	SDE_REG_WRITE(c, LM_MISR_CTRL, config);
-}
-
-static int sde_hw_lm_collect_misr(struct sde_hw_mixer *ctx, bool nonblock,
-		u32 *misr_value)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	u32 ctrl = 0;
-
-	if (!misr_value)
-		return -EINVAL;
-
-	ctrl = SDE_REG_READ(c, LM_MISR_CTRL);
-	if (!nonblock) {
-		if (ctrl & MISR_CTRL_ENABLE) {
-			int rc;
-
-			rc = readl_poll_timeout(c->base_off + c->blk_off +
-					LM_MISR_CTRL, ctrl,
-					(ctrl & MISR_CTRL_STATUS) > 0, 500,
-					84000);
-			if (rc)
-				return rc;
-		} else {
-			return -EINVAL;
-		}
-	}
-
-	*misr_value  = SDE_REG_READ(c, LM_MISR_SIGNATURE);
-
-	return 0;
-}
-
-static void _setup_mixer_ops(struct sde_mdss_cfg *m,
-		struct sde_hw_lm_ops *ops,
-		unsigned long features)
-{
-	ops->setup_mixer_out = sde_hw_lm_setup_out;
-	if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion) ||
-			IS_SM8150_TARGET(m->hwversion) ||
-			IS_SDMSHRIKE_TARGET(m->hwversion) ||
-			IS_SM6150_TARGET(m->hwversion) ||
-			IS_SDMMAGPIE_TARGET(m->hwversion) ||
-			IS_KONA_TARGET(m->hwversion))
-		ops->setup_blend_config = sde_hw_lm_setup_blend_config_sdm845;
-	else
-		ops->setup_blend_config = sde_hw_lm_setup_blend_config;
-	ops->setup_alpha_out = sde_hw_lm_setup_color3;
-	ops->setup_border_color = sde_hw_lm_setup_border_color;
-	ops->setup_gc = sde_hw_lm_gc;
-	ops->setup_misr = sde_hw_lm_setup_misr;
-	ops->collect_misr = sde_hw_lm_collect_misr;
-
-	if (test_bit(SDE_DIM_LAYER, &features)) {
-		ops->setup_dim_layer = sde_hw_lm_setup_dim_layer;
-		ops->clear_dim_layer = sde_hw_lm_clear_dim_layer;
-	}
-};
-
-static struct sde_hw_blk_ops sde_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
-
-struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m)
-{
-	struct sde_hw_mixer *c;
-	struct sde_lm_cfg *cfg;
-	int rc;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _lm_offset(idx, m, addr, &c->hw);
-	if (IS_ERR_OR_NULL(cfg)) {
-		kfree(c);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/* Assign ops */
-	c->idx = idx;
-	c->cap = cfg;
-	_setup_mixer_ops(m, &c->ops, c->cap->features);
-
-	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_LM, idx, &sde_hw_ops);
-	if (rc) {
-		SDE_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
-
-	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
-			c->hw.blk_off + c->hw.length, c->hw.xin_id);
-
-	return c;
-
-blk_init_error:
-	kzfree(c);
-
-	return ERR_PTR(rc);
-}
-
-void sde_hw_lm_destroy(struct sde_hw_mixer *lm)
-{
-	if (lm)
-		sde_hw_blk_destroy(&lm->base);
-	kfree(lm);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.h b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
deleted file mode 100644
index 07574d3..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_LM_H
-#define _SDE_HW_LM_H
-
-#include "sde_hw_mdss.h"
-#include "sde_hw_util.h"
-#include "sde_hw_blk.h"
-
-struct sde_hw_mixer;
-
-struct sde_hw_mixer_cfg {
-	u32 out_width;
-	u32 out_height;
-	bool right_mixer;
-	int flags;
-};
-
-struct sde_hw_color3_cfg {
-	u8 keep_fg[SDE_STAGE_MAX];
-};
-
-/**
- *
- * struct sde_hw_lm_ops : Interface to the mixer Hw driver functions
- *  Assumption is these functions will be called after clocks are enabled
- */
-struct sde_hw_lm_ops {
-	/*
-	 * Sets up mixer output width and height
-	 * and border color if enabled
-	 */
-	void (*setup_mixer_out)(struct sde_hw_mixer *ctx,
-		struct sde_hw_mixer_cfg *cfg);
-
-	/*
-	 * Alpha blending configuration
-	 * for the specified stage
-	 */
-	void (*setup_blend_config)(struct sde_hw_mixer *ctx, uint32_t stage,
-		uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
-
-	/*
-	 * Alpha color component selection from either fg or bg
-	 */
-	void (*setup_alpha_out)(struct sde_hw_mixer *ctx, uint32_t mixer_op);
-
-	/**
-	 * setup_border_color : enable/disable border color
-	 */
-	void (*setup_border_color)(struct sde_hw_mixer *ctx,
-		struct sde_mdss_color *color,
-		u8 border_en);
-	/**
-	 * setup_gc : enable/disable gamma correction feature
-	 */
-	void (*setup_gc)(struct sde_hw_mixer *mixer,
-			void *cfg);
-
-	/**
-	 * setup_dim_layer: configure dim layer settings
-	 * @ctx: Pointer to layer mixer context
-	 * @dim_layer: dim layer configs
-	 */
-	void (*setup_dim_layer)(struct sde_hw_mixer *ctx,
-			struct sde_hw_dim_layer *dim_layer);
-
-	/**
-	 * clear_dim_layer: clear dim layer settings
-	 * @ctx: Pointer to layer mixer context
-	 */
-	void (*clear_dim_layer)(struct sde_hw_mixer *ctx);
-
-	/* setup_misr: enables/disables MISR in HW register */
-	void (*setup_misr)(struct sde_hw_mixer *ctx,
-			bool enable, u32 frame_count);
-
-	/* collect_misr: reads and stores MISR data from HW register */
-	int (*collect_misr)(struct sde_hw_mixer *ctx, bool nonblock,
-			u32 *misr_value);
-};
-
-struct sde_hw_mixer {
-	struct sde_hw_blk base;
-	struct sde_hw_blk_reg_map hw;
-
-	/* lm */
-	enum sde_lm  idx;
-	const struct sde_lm_cfg   *cap;
-	const struct sde_mdp_cfg  *mdp;
-	const struct sde_ctl_cfg  *ctl;
-
-	/* ops */
-	struct sde_hw_lm_ops ops;
-
-	/* store mixer info specific to display */
-	struct sde_hw_mixer_cfg cfg;
-};
-
-/**
- * to_sde_hw_mixer - convert base object sde_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct sde_hw_mixer *to_sde_hw_mixer(struct sde_hw_blk *hw)
-{
-	return container_of(hw, struct sde_hw_mixer, base);
-}
-
-/**
- * sde_hw_lm_init(): Initializes the mixer hw driver object.
- * should be called once before accessing every mixer.
- * @idx:  mixer index for which driver object is required
- * @addr: mapped register io address of MDP
- * @m :   pointer to mdss catalog data
- */
-struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m);
-
-/**
- * sde_hw_lm_destroy(): Destroys layer mixer driver context
- * @lm:   Pointer to LM driver context
- */
-void sde_hw_lm_destroy(struct sde_hw_mixer *lm);
-
-#endif /*_SDE_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
deleted file mode 100644
index b5d6c1c..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ /dev/null
@@ -1,682 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_MDSS_H
-#define _SDE_HW_MDSS_H
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-
-#include "msm_drv.h"
-
-#define SDE_DBG_NAME			"sde"
-
-#define SDE_NONE                        0
-
-#ifndef SDE_CSC_MATRIX_COEFF_SIZE
-#define SDE_CSC_MATRIX_COEFF_SIZE	9
-#endif
-
-#ifndef SDE_CSC_CLAMP_SIZE
-#define SDE_CSC_CLAMP_SIZE		6
-#endif
-
-#ifndef SDE_CSC_BIAS_SIZE
-#define SDE_CSC_BIAS_SIZE		3
-#endif
-
-#ifndef SDE_MAX_PLANES
-#define SDE_MAX_PLANES			4
-#endif
-
-#define PIPES_PER_STAGE			2
-#ifndef SDE_MAX_DE_CURVES
-#define SDE_MAX_DE_CURVES		3
-#endif
-
-#define MAX_DSI_DISPLAYS		2
-#define MAX_DATA_PATH_PER_DSIPLAY	2
-
-enum sde_format_flags {
-	SDE_FORMAT_FLAG_YUV_BIT,
-	SDE_FORMAT_FLAG_DX_BIT,
-	SDE_FORMAT_FLAG_COMPRESSED_BIT,
-	SDE_FORMAT_FLAG_BIT_MAX,
-};
-
-#define SDE_FORMAT_FLAG_YUV		BIT(SDE_FORMAT_FLAG_YUV_BIT)
-#define SDE_FORMAT_FLAG_DX		BIT(SDE_FORMAT_FLAG_DX_BIT)
-#define SDE_FORMAT_FLAG_COMPRESSED	BIT(SDE_FORMAT_FLAG_COMPRESSED_BIT)
-#define SDE_FORMAT_IS_YUV(X)		\
-	(test_bit(SDE_FORMAT_FLAG_YUV_BIT, (X)->flag))
-#define SDE_FORMAT_IS_DX(X)		\
-	(test_bit(SDE_FORMAT_FLAG_DX_BIT, (X)->flag))
-#define SDE_FORMAT_IS_LINEAR(X)		((X)->fetch_mode == SDE_FETCH_LINEAR)
-#define SDE_FORMAT_IS_TILE(X) \
-	(((X)->fetch_mode == SDE_FETCH_UBWC) && \
-			!test_bit(SDE_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
-#define SDE_FORMAT_IS_UBWC(X) \
-	(((X)->fetch_mode == SDE_FETCH_UBWC) && \
-			test_bit(SDE_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
-
-#define SDE_BLEND_FG_ALPHA_FG_CONST	(0 << 0)
-#define SDE_BLEND_FG_ALPHA_BG_CONST	(1 << 0)
-#define SDE_BLEND_FG_ALPHA_FG_PIXEL	(2 << 0)
-#define SDE_BLEND_FG_ALPHA_BG_PIXEL	(3 << 0)
-#define SDE_BLEND_FG_INV_ALPHA		(1 << 2)
-#define SDE_BLEND_FG_MOD_ALPHA		(1 << 3)
-#define SDE_BLEND_FG_INV_MOD_ALPHA	(1 << 4)
-#define SDE_BLEND_FG_TRANSP_EN		(1 << 5)
-#define SDE_BLEND_BG_ALPHA_FG_CONST	(0 << 8)
-#define SDE_BLEND_BG_ALPHA_BG_CONST	(1 << 8)
-#define SDE_BLEND_BG_ALPHA_FG_PIXEL	(2 << 8)
-#define SDE_BLEND_BG_ALPHA_BG_PIXEL	(3 << 8)
-#define SDE_BLEND_BG_INV_ALPHA		(1 << 10)
-#define SDE_BLEND_BG_MOD_ALPHA		(1 << 11)
-#define SDE_BLEND_BG_INV_MOD_ALPHA	(1 << 12)
-#define SDE_BLEND_BG_TRANSP_EN		(1 << 13)
-
-#define SDE_VSYNC0_SOURCE_GPIO		0
-#define SDE_VSYNC1_SOURCE_GPIO		1
-#define SDE_VSYNC2_SOURCE_GPIO		2
-#define SDE_VSYNC_SOURCE_INTF_0		3
-#define SDE_VSYNC_SOURCE_INTF_1		4
-#define SDE_VSYNC_SOURCE_INTF_2		5
-#define SDE_VSYNC_SOURCE_INTF_3		6
-#define SDE_VSYNC_SOURCE_WD_TIMER_4	11
-#define SDE_VSYNC_SOURCE_WD_TIMER_3	12
-#define SDE_VSYNC_SOURCE_WD_TIMER_2	13
-#define SDE_VSYNC_SOURCE_WD_TIMER_1	14
-#define SDE_VSYNC_SOURCE_WD_TIMER_0	15
-
-enum sde_hw_blk_type {
-	SDE_HW_BLK_TOP = 0,
-	SDE_HW_BLK_SSPP,
-	SDE_HW_BLK_LM,
-	SDE_HW_BLK_DSPP,
-	SDE_HW_BLK_DS,
-	SDE_HW_BLK_CTL,
-	SDE_HW_BLK_CDM,
-	SDE_HW_BLK_PINGPONG,
-	SDE_HW_BLK_INTF,
-	SDE_HW_BLK_WB,
-	SDE_HW_BLK_DSC,
-	SDE_HW_BLK_MERGE_3D,
-	SDE_HW_BLK_MAX,
-};
-
-enum sde_uidle {
-	UIDLE = 0x1,
-	UIDLE_MAX,
-};
-
-enum sde_mdp {
-	MDP_TOP = 0x1,
-	MDP_MAX,
-};
-
-enum sde_sspp {
-	SSPP_NONE,
-	SSPP_VIG0,
-	SSPP_VIG1,
-	SSPP_VIG2,
-	SSPP_VIG3,
-	SSPP_RGB0,
-	SSPP_RGB1,
-	SSPP_RGB2,
-	SSPP_RGB3,
-	SSPP_DMA0,
-	SSPP_DMA1,
-	SSPP_DMA2,
-	SSPP_DMA3,
-	SSPP_CURSOR0,
-	SSPP_CURSOR1,
-	SSPP_MAX
-};
-
-enum sde_sspp_type {
-	SSPP_TYPE_VIG,
-	SSPP_TYPE_RGB,
-	SSPP_TYPE_DMA,
-	SSPP_TYPE_CURSOR,
-	SSPP_TYPE_MAX
-};
-
-enum sde_lm {
-	LM_0 = 1,
-	LM_1,
-	LM_2,
-	LM_3,
-	LM_4,
-	LM_5,
-	LM_6,
-	LM_MAX
-};
-
-enum sde_stage {
-	SDE_STAGE_BASE = 0,
-	SDE_STAGE_0,
-	SDE_STAGE_1,
-	SDE_STAGE_2,
-	SDE_STAGE_3,
-	SDE_STAGE_4,
-	SDE_STAGE_5,
-	SDE_STAGE_6,
-	SDE_STAGE_7,
-	SDE_STAGE_8,
-	SDE_STAGE_9,
-	SDE_STAGE_10,
-	SDE_STAGE_MAX
-};
-enum sde_dspp {
-	DSPP_0 = 1,
-	DSPP_1,
-	DSPP_2,
-	DSPP_3,
-	DSPP_MAX
-};
-
-enum sde_ltm {
-	LTM_0 = DSPP_0,
-	LTM_1,
-	LTM_MAX
-};
-
-enum sde_ds {
-	DS_TOP,
-	DS_0,
-	DS_1,
-	DS_MAX
-};
-
-enum sde_ctl {
-	CTL_0 = 1,
-	CTL_1,
-	CTL_2,
-	CTL_3,
-	CTL_4,
-	CTL_5,
-	CTL_MAX
-};
-
-enum sde_cdm {
-	CDM_0 = 1,
-	CDM_1,
-	CDM_MAX
-};
-
-enum sde_pingpong {
-	PINGPONG_0 = 1,
-	PINGPONG_1,
-	PINGPONG_2,
-	PINGPONG_3,
-	PINGPONG_4,
-	PINGPONG_5,
-	PINGPONG_S0,
-	PINGPONG_MAX
-};
-
-enum sde_dsc {
-	DSC_NONE = 0,
-	DSC_0,
-	DSC_1,
-	DSC_2,
-	DSC_3,
-	DSC_4,
-	DSC_5,
-	DSC_MAX
-};
-
-enum sde_intf {
-	INTF_0 = 1,
-	INTF_1,
-	INTF_2,
-	INTF_3,
-	INTF_4,
-	INTF_5,
-	INTF_6,
-	INTF_MAX
-};
-
-enum sde_intf_type {
-	INTF_NONE = 0x0,
-	INTF_DSI = 0x1,
-	INTF_HDMI = 0x3,
-	INTF_LCDC = 0x5,
-	INTF_EDP = 0x9,
-	INTF_DP = 0xa,
-	INTF_TYPE_MAX,
-
-	/* virtual interfaces */
-	INTF_WB = 0x100,
-};
-
-enum sde_intf_mode {
-	INTF_MODE_NONE = 0,
-	INTF_MODE_CMD,
-	INTF_MODE_VIDEO,
-	INTF_MODE_WB_BLOCK,
-	INTF_MODE_WB_LINE,
-	INTF_MODE_MAX
-};
-
-enum sde_wb {
-	WB_0 = 1,
-	WB_1,
-	WB_2,
-	WB_3,
-	WB_MAX
-};
-
-enum sde_ad {
-	AD_0 = 0x1,
-	AD_1,
-	AD_MAX
-};
-
-enum sde_cwb {
-	CWB_0 = 0x1,
-	CWB_1,
-	CWB_2,
-	CWB_3,
-	CWB_4,
-	CWB_5,
-	CWB_MAX
-};
-
-enum sde_wd_timer {
-	WD_TIMER_0 = 0x1,
-	WD_TIMER_1,
-	WD_TIMER_2,
-	WD_TIMER_3,
-	WD_TIMER_4,
-	WD_TIMER_5,
-	WD_TIMER_MAX
-};
-
-enum sde_vbif {
-	VBIF_0,
-	VBIF_1,
-	VBIF_MAX,
-	VBIF_RT = VBIF_0,
-	VBIF_NRT = VBIF_1
-};
-
-enum sde_iommu_domain {
-	SDE_IOMMU_DOMAIN_UNSECURE,
-	SDE_IOMMU_DOMAIN_SECURE,
-	SDE_IOMMU_DOMAIN_MAX
-};
-
-enum sde_rot {
-	ROT_0 = 1,
-	ROT_MAX
-};
-
-enum sde_merge_3d {
-	MERGE_3D_0 = 1,
-	MERGE_3D_1,
-	MERGE_3D_2,
-	MERGE_3D_MAX
-};
-
-/**
- * SDE HW,Component order color map
- */
-enum {
-	C0_G_Y = 0,
-	C1_B_Cb = 1,
-	C2_R_Cr = 2,
-	C3_ALPHA = 3
-};
-
-/**
- * enum sde_plane_type - defines how the color component pixel packing
- * @SDE_PLANE_INTERLEAVED   : Color components in single plane
- * @SDE_PLANE_PLANAR        : Color component in separate planes
- * @SDE_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
- */
-enum sde_plane_type {
-	SDE_PLANE_INTERLEAVED,
-	SDE_PLANE_PLANAR,
-	SDE_PLANE_PSEUDO_PLANAR,
-};
-
-/**
- * enum sde_chroma_samp_type - chroma sub-samplng type
- * @SDE_CHROMA_RGB   : No chroma subsampling
- * @SDE_CHROMA_H2V1  : Chroma pixels are horizontally subsampled
- * @SDE_CHROMA_H1V2  : Chroma pixels are vertically subsampled
- * @SDE_CHROMA_420   : 420 subsampling
- */
-enum sde_chroma_samp_type {
-	SDE_CHROMA_RGB,
-	SDE_CHROMA_H2V1,
-	SDE_CHROMA_H1V2,
-	SDE_CHROMA_420
-};
-
-/**
- * sde_fetch_type - Defines How SDE HW fetches data
- * @SDE_FETCH_LINEAR   : fetch is line by line
- * @SDE_FETCH_TILE     : fetches data in Z order from a tile
- * @SDE_FETCH_UBWC     : fetch and decompress data
- */
-enum sde_fetch_type {
-	SDE_FETCH_LINEAR,
-	SDE_FETCH_TILE,
-	SDE_FETCH_UBWC
-};
-
-/**
- * Value of enum chosen to fit the number of bits
- * expected by the HW programming.
- */
-enum {
-	COLOR_ALPHA_1BIT = 0,
-	COLOR_ALPHA_4BIT = 1,
-	COLOR_4BIT = 0,
-	COLOR_5BIT = 1, /* No 5-bit Alpha */
-	COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
-	COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
-};
-
-/**
- * enum sde_3d_blend_mode
- * Desribes how the 3d data is blended
- * @BLEND_3D_NONE      : 3d blending not enabled
- * @BLEND_3D_FRAME_INT : Frame interleaving
- * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
- * @BLEND_3D_V_ROW_INT : vertical row interleaving
- * @BLEND_3D_COL_INT   : column interleaving
- * @BLEND_3D_MAX       :
- */
-enum sde_3d_blend_mode {
-	BLEND_3D_NONE = 0,
-	BLEND_3D_FRAME_INT,
-	BLEND_3D_H_ROW_INT,
-	BLEND_3D_V_ROW_INT,
-	BLEND_3D_COL_INT,
-	BLEND_3D_MAX
-};
-
-/** struct sde_format - defines the format configuration which
- * allows SDE HW to correctly fetch and decode the format
- * @base: base msm_format struture containing fourcc code
- * @fetch_planes: how the color components are packed in pixel format
- * @element: element color ordering
- * @bits: element bit widths
- * @chroma_sample: chroma sub-samplng type
- * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
- * @unpack_tight: 0 for loose, 1 for tight
- * @unpack_count: 0 = 1 component, 1 = 2 component
- * @bpp: bytes per pixel
- * @alpha_enable: whether the format has an alpha channel
- * @num_planes: number of planes (including meta data planes)
- * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
- * @is_yuv: is format a yuv variant
- * @flag: usage bit flags
- * @tile_width: format tile width
- * @tile_height: format tile height
- */
-struct sde_format {
-	struct msm_format base;
-	enum sde_plane_type fetch_planes;
-	u8 element[SDE_MAX_PLANES];
-	u8 bits[SDE_MAX_PLANES];
-	enum sde_chroma_samp_type chroma_sample;
-	u8 unpack_align_msb;
-	u8 unpack_tight;
-	u8 unpack_count;
-	u8 bpp;
-	u8 alpha_enable;
-	u8 num_planes;
-	enum sde_fetch_type fetch_mode;
-	DECLARE_BITMAP(flag, SDE_FORMAT_FLAG_BIT_MAX);
-	u16 tile_width;
-	u16 tile_height;
-};
-#define to_sde_format(x) container_of(x, struct sde_format, base)
-
-/**
- * struct sde_hw_fmt_layout - format information of the source pixel data
- * @format: pixel format parameters
- * @num_planes: number of planes (including meta data planes)
- * @width: image width
- * @height: image height
- * @total_size: total size in bytes
- * @plane_addr: address of each plane
- * @plane_size: length of each plane
- * @plane_pitch: pitch of each plane
- */
-struct sde_hw_fmt_layout {
-	const struct sde_format *format;
-	uint32_t num_planes;
-	uint32_t width;
-	uint32_t height;
-	uint32_t total_size;
-	uint32_t plane_addr[SDE_MAX_PLANES];
-	uint32_t plane_size[SDE_MAX_PLANES];
-	uint32_t plane_pitch[SDE_MAX_PLANES];
-};
-
-struct sde_rect {
-	u16 x;
-	u16 y;
-	u16 w;
-	u16 h;
-};
-
-struct sde_csc_cfg {
-	/* matrix coefficients in S15.16 format */
-	uint32_t csc_mv[SDE_CSC_MATRIX_COEFF_SIZE];
-	uint32_t csc_pre_bv[SDE_CSC_BIAS_SIZE];
-	uint32_t csc_post_bv[SDE_CSC_BIAS_SIZE];
-	uint32_t csc_pre_lv[SDE_CSC_CLAMP_SIZE];
-	uint32_t csc_post_lv[SDE_CSC_CLAMP_SIZE];
-};
-
-/**
- * struct sde_mdss_color - mdss color description
- * color 0 : green
- * color 1 : blue
- * color 2 : red
- * color 3 : alpha
- */
-struct sde_mdss_color {
-	u32 color_0;
-	u32 color_1;
-	u32 color_2;
-	u32 color_3;
-};
-
-/*
- * Define bit masks for h/w logging.
- */
-#define SDE_DBG_MASK_NONE     (1 << 0)
-#define SDE_DBG_MASK_CDM      (1 << 1)
-#define SDE_DBG_MASK_DSPP     (1 << 2)
-#define SDE_DBG_MASK_INTF     (1 << 3)
-#define SDE_DBG_MASK_LM       (1 << 4)
-#define SDE_DBG_MASK_CTL      (1 << 5)
-#define SDE_DBG_MASK_PINGPONG (1 << 6)
-#define SDE_DBG_MASK_SSPP     (1 << 7)
-#define SDE_DBG_MASK_WB       (1 << 8)
-#define SDE_DBG_MASK_TOP      (1 << 9)
-#define SDE_DBG_MASK_VBIF     (1 << 10)
-#define SDE_DBG_MASK_DSC      (1 << 11)
-#define SDE_DBG_MASK_ROT      (1 << 12)
-#define SDE_DBG_MASK_DS       (1 << 13)
-#define SDE_DBG_MASK_REGDMA   (1 << 14)
-#define SDE_DBG_MASK_UIDLE    (1 << 15)
-#define SDE_DBG_MASK_SID      (1 << 15)
-
-/**
- * struct sde_hw_cp_cfg: hardware dspp/lm feature payload.
- * @payload: Feature specific payload.
- * @len: Length of the payload.
- * @ctl: control pointer associated with dspp/lm.
- * @last_feature: last feature that will be set.
- * @num_of_mixers: number of layer mixers for the display.
- * @mixer_info: mixer info pointer associated with lm.
- * @displayv: height of the display.
- * @displayh: width of the display.
- * @dspp[DSPP_MAX]: array of hw_dspp pointers associated with crtc.
- * @broadcast_disabled: flag indicating if broadcast should be avoided when
- *			using LUTDMA
- */
-struct sde_hw_cp_cfg {
-	void *payload;
-	u32 len;
-	void *ctl;
-	u32 last_feature;
-	u32 num_of_mixers;
-	void *mixer_info;
-	u32 displayv;
-	u32 displayh;
-	struct sde_hw_dspp *dspp[DSPP_MAX];
-	bool broadcast_disabled;
-};
-
-/**
- * struct sde_hw_dim_layer: dim layer configs
- * @flags: Flag to represent INCLUSIVE/EXCLUSIVE
- * @stage: Blending stage of dim layer
- * @color_fill: Color fill to be used for the layer
- * @rect: Dim layer coordinates
- */
-struct sde_hw_dim_layer {
-	uint32_t flags;
-	uint32_t stage;
-	struct sde_mdss_color color_fill;
-	struct sde_rect rect;
-};
-
-/**
- * struct sde_splash_mem - Struct contains splah memory info
- * @splash_buf_size: Indicates the size of the memory region
- * @splash_buf_base: Address of specific splash memory region
- * @ramdump_size: Size of ramdump buffer region
- * @ramdump_base: Address of ramdump region reserved by bootloader
- * @ref_cnt:	Tracks the map count to help in sharing splash memory
- */
-struct sde_splash_mem {
-	u32 splash_buf_size;
-	unsigned long splash_buf_base;
-	u32 ramdump_size;
-	unsigned long ramdump_base;
-	u32 ref_cnt;
-};
-
-/**
- * struct sde_sspp_index_info - Struct containing sspp identifier info
- * @sspp:	Enum value indicates sspp id
- * @is_virtual: Boolean to identify if virtual or base
- */
-struct sde_sspp_index_info {
-	enum sde_sspp sspp;
-	bool is_virtual;
-};
-
-/**
- * struct sde_splash_data - Struct contains details of resources and hw blocks
- * used in continuous splash on a specific display.
- * @cont_splash_enabled:  Stores the cont_splash status (enabled/disabled)
- * @single_flush_en: Stores if the single flush is enabled
- * @encoder:	Pointer to the drm encoder object used for this display
- * @splash:     Pointer to struct sde_splash_mem used for this display
- * @ctl_ids:	Stores the valid MDSS ctl block ids for the current mode
- * @lm_ids:	Stores the valid MDSS layer mixer block ids for the current mode
- * @dsc_ids:	Stores the valid MDSS DSC block ids for the current mode
- * @pipes:      Array of sspp info detected on this display
- * @ctl_cnt:    Stores the active number of MDSS "top" blks of the current mode
- * @lm_cnt:	Stores the active number of MDSS "LM" blks for the current mode
- * @dsc_cnt:	Stores the active number of MDSS "dsc" blks for the current mode
- * @pipe_cnt:	Stores the active number of "sspp" blks connected
- */
-struct sde_splash_display {
-	bool cont_splash_enabled;
-	bool single_flush_en;
-	struct drm_encoder *encoder;
-	struct sde_splash_mem *splash;
-	u8 ctl_ids[MAX_DATA_PATH_PER_DSIPLAY];
-	u8 lm_ids[MAX_DATA_PATH_PER_DSIPLAY];
-	u8 dsc_ids[MAX_DATA_PATH_PER_DSIPLAY];
-	struct sde_sspp_index_info pipes[MAX_DATA_PATH_PER_DSIPLAY];
-	u8 ctl_cnt;
-	u8 lm_cnt;
-	u8 dsc_cnt;
-	u8 pipe_cnt;
-};
-
-/**
- * struct sde_splash_data - Struct contains details of continuous splash
- *	for all the displays connected by probe time
- * @num_splash_regions:  Indicates number of splash memory regions from dtsi
- * @num_splash_displays: Indicates count of active displays in continuous splash
- * @splash_mem:          Array of all struct sde_splash_mem listed from dtsi
- * @splash_display:      Array of all struct sde_splash_display
- */
-struct sde_splash_data {
-	u32 num_splash_regions;
-	u32 num_splash_displays;
-	struct sde_splash_mem splash_mem[MAX_DSI_DISPLAYS];
-	struct sde_splash_display splash_display[MAX_DSI_DISPLAYS];
-};
-
-/**
- * struct sde_hw_tear_check - Struct contains parameters to configure
- *	tear-effect module. This structure is used to configure tear-check
- *	logic present either in ping-pong or in interface module.
- * @vsync_count:	Ratio of MDP VSYNC clk freq(Hz) to refresh rate divided
- *                      by no of lines
- * @sync_cfg_height:	Total vertical lines (display height - 1)
- * @vsync_init_val:	Init value to which the read pointer gets loaded at
- *                      vsync edge
- * @sync_threshold_start: Read pointer threshold start ROI for write operation
- * @sync_threshold_continue: The minimum number of lines the write pointer
- *                           needs to be above the read pointer
- * @start_pos:	The position from which the start_threshold value is added
- * @rd_ptr_irq:	The read pointer line at which interrupt has to be generated
- * @hw_vsync_mode:	Sync with external frame sync input
- */
-struct sde_hw_tear_check {
-	u32 vsync_count;
-	u32 sync_cfg_height;
-	u32 vsync_init_val;
-	u32 sync_threshold_start;
-	u32 sync_threshold_continue;
-	u32 start_pos;
-	u32 rd_ptr_irq;
-	u8 hw_vsync_mode;
-};
-
-/**
- * struct sde_hw_autorefresh - Struct contains parameters to configure
- *            auto-refresh mode for command mode panels
- * @enable:	Enalbe or disable the auto-refresh mode
- * @frame_count:	Auto-refresh frame counter at which update occurs
- */
-struct sde_hw_autorefresh {
-	bool  enable;
-	u32 frame_count;
-};
-
-/**
- * struct sde_hw_pp_vsync_info - Struct contains parameters to configure
- *        read and write pointers for command mode panels
- * @rd_ptr_init_val:	Value of rd pointer at vsync edge
- * @rd_ptr_frame_count:	num frames sent since enabling interface
- * @rd_ptr_line_count:	current line on panel (rd ptr)
- * @wr_ptr_line_count:	current line within pp fifo (wr ptr)
- */
-struct sde_hw_pp_vsync_info {
-	u32 rd_ptr_init_val;
-	u32 rd_ptr_frame_count;
-	u32 rd_ptr_line_count;
-	u32 wr_ptr_line_count;
-};
-
-#endif  /* _SDE_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
deleted file mode 100644
index 7488b28..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
+++ /dev/null
@@ -1,556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/iopoll.h>
-
-#include "sde_hw_mdss.h"
-#include "sde_hwio.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_pingpong.h"
-#include "sde_dbg.h"
-#include "sde_kms.h"
-
-#define PP_TEAR_CHECK_EN                0x000
-#define PP_SYNC_CONFIG_VSYNC            0x004
-#define PP_SYNC_CONFIG_HEIGHT           0x008
-#define PP_SYNC_WRCOUNT                 0x00C
-#define PP_VSYNC_INIT_VAL               0x010
-#define PP_INT_COUNT_VAL                0x014
-#define PP_SYNC_THRESH                  0x018
-#define PP_START_POS                    0x01C
-#define PP_RD_PTR_IRQ                   0x020
-#define PP_WR_PTR_IRQ                   0x024
-#define PP_OUT_LINE_COUNT               0x028
-#define PP_LINE_COUNT                   0x02C
-#define PP_AUTOREFRESH_CONFIG           0x030
-
-#define PP_FBC_MODE                     0x034
-#define PP_FBC_BUDGET_CTL               0x038
-#define PP_FBC_LOSSY_MODE               0x03C
-#define PP_DSC_MODE                     0x0a0
-#define PP_DCE_DATA_IN_SWAP             0x0ac
-#define PP_DCE_DATA_OUT_SWAP            0x0c8
-
-#define DITHER_DEPTH_MAP_INDEX 9
-static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = {
-	0, 0, 0, 0, 0, 1, 2, 3, 3
-};
-
-#define MERGE_3D_MODE 0x004
-#define MERGE_3D_MUX  0x000
-
-static struct sde_merge_3d_cfg *_merge_3d_offset(enum sde_merge_3d idx,
-		struct sde_mdss_cfg *m,
-		void __iomem *addr,
-		struct sde_hw_blk_reg_map *b)
-{
-	int i;
-
-	for (i = 0; i < m->merge_3d_count; i++) {
-		if (idx == m->merge_3d[i].id) {
-			b->base_off = addr;
-			b->blk_off = m->merge_3d[i].base;
-			b->length = m->merge_3d[i].len;
-			b->hwversion = m->hwversion;
-			b->log_mask = SDE_DBG_MASK_PINGPONG;
-			return &m->merge_3d[i];
-		}
-	}
-
-	return ERR_PTR(-EINVAL);
-}
-
-static void _sde_hw_merge_3d_setup_blend_mode(struct sde_hw_merge_3d *ctx,
-			enum sde_3d_blend_mode cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 mode = 0;
-
-	if (!ctx)
-		return;
-
-	c = &ctx->hw;
-	if (cfg) {
-		mode = BIT(0);
-		mode |= (cfg - 0x1) << 1;
-	}
-
-	SDE_REG_WRITE(c, MERGE_3D_MODE, mode);
-}
-
-static void sde_hw_merge_3d_reset_blend_mode(struct sde_hw_merge_3d *ctx)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	if (!ctx)
-		return;
-
-	c = &ctx->hw;
-	SDE_REG_WRITE(c, MERGE_3D_MODE, 0x0);
-	SDE_REG_WRITE(c, MERGE_3D_MUX, 0x0);
-}
-
-static void _setup_merge_3d_ops(struct sde_hw_merge_3d_ops *ops,
-	const struct sde_merge_3d_cfg *hw_cap)
-{
-	ops->setup_blend_mode = _sde_hw_merge_3d_setup_blend_mode;
-	ops->reset_blend_mode = sde_hw_merge_3d_reset_blend_mode;
-}
-
-static struct sde_hw_merge_3d *_sde_pp_merge_3d_init(enum sde_merge_3d idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m)
-{
-	struct sde_hw_merge_3d *c;
-	struct sde_merge_3d_cfg *cfg;
-	static u32 merge3d_init_mask;
-
-	if (idx < MERGE_3D_0)
-		return NULL;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _merge_3d_offset(idx, m, addr, &c->hw);
-	if (IS_ERR_OR_NULL(cfg)) {
-		pr_err("invalid merge_3d cfg%d\n", idx);
-		kfree(c);
-		return ERR_PTR(-EINVAL);
-	}
-
-	c->idx = idx;
-	c->caps = cfg;
-	_setup_merge_3d_ops(&c->ops, c->caps);
-
-	if (!(merge3d_init_mask & BIT(idx))) {
-		sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
-				c->hw.blk_off, c->hw.blk_off + c->hw.length,
-				c->hw.xin_id);
-		merge3d_init_mask |= BIT(idx);
-	}
-
-	return c;
-}
-
-static struct sde_pingpong_cfg *_pingpong_offset(enum sde_pingpong pp,
-		struct sde_mdss_cfg *m,
-		void __iomem *addr,
-		struct sde_hw_blk_reg_map *b)
-{
-	int i;
-
-	for (i = 0; i < m->pingpong_count; i++) {
-		if (pp == m->pingpong[i].id) {
-			b->base_off = addr;
-			b->blk_off = m->pingpong[i].base;
-			b->length = m->pingpong[i].len;
-			b->hwversion = m->hwversion;
-			b->log_mask = SDE_DBG_MASK_PINGPONG;
-			return &m->pingpong[i];
-		}
-	}
-
-	return ERR_PTR(-EINVAL);
-}
-
-static int sde_hw_pp_setup_te_config(struct sde_hw_pingpong *pp,
-		struct sde_hw_tear_check *te)
-{
-	struct sde_hw_blk_reg_map *c;
-	int cfg;
-
-	if (!pp || !te)
-		return -EINVAL;
-	c = &pp->hw;
-
-	cfg = BIT(19); /*VSYNC_COUNTER_EN */
-	if (te->hw_vsync_mode)
-		cfg |= BIT(20);
-
-	cfg |= te->vsync_count;
-
-	SDE_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
-	SDE_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
-	SDE_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
-	SDE_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
-	SDE_REG_WRITE(c, PP_START_POS, te->start_pos);
-	SDE_REG_WRITE(c, PP_SYNC_THRESH,
-			((te->sync_threshold_continue << 16) |
-			 te->sync_threshold_start));
-	SDE_REG_WRITE(c, PP_SYNC_WRCOUNT,
-			(te->start_pos + te->sync_threshold_start + 1));
-
-	return 0;
-}
-
-static void sde_hw_pp_update_te(struct sde_hw_pingpong *pp,
-		struct sde_hw_tear_check *te)
-{
-	struct sde_hw_blk_reg_map *c;
-	int cfg;
-
-	if (!pp || !te)
-		return;
-	c = &pp->hw;
-
-	cfg = SDE_REG_READ(c, PP_SYNC_THRESH);
-	cfg &= ~0xFFFF;
-	cfg |= te->sync_threshold_start;
-	SDE_REG_WRITE(c, PP_SYNC_THRESH, cfg);
-}
-
-static int sde_hw_pp_setup_autorefresh_config(struct sde_hw_pingpong *pp,
-		struct sde_hw_autorefresh *cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 refresh_cfg;
-
-	if (!pp || !cfg)
-		return -EINVAL;
-	c = &pp->hw;
-
-	if (cfg->enable)
-		refresh_cfg = BIT(31) | cfg->frame_count;
-	else
-		refresh_cfg = 0;
-
-	SDE_REG_WRITE(c, PP_AUTOREFRESH_CONFIG, refresh_cfg);
-	SDE_EVT32(pp->idx - PINGPONG_0, refresh_cfg);
-
-	return 0;
-}
-
-static int sde_hw_pp_get_autorefresh_config(struct sde_hw_pingpong *pp,
-		struct sde_hw_autorefresh *cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 val;
-
-	if (!pp || !cfg)
-		return -EINVAL;
-
-	c = &pp->hw;
-	val = SDE_REG_READ(c, PP_AUTOREFRESH_CONFIG);
-	cfg->enable = (val & BIT(31)) >> 31;
-	cfg->frame_count = val & 0xffff;
-
-	return 0;
-}
-
-static int sde_hw_pp_poll_timeout_wr_ptr(struct sde_hw_pingpong *pp,
-		u32 timeout_us)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 val;
-	int rc;
-
-	if (!pp)
-		return -EINVAL;
-
-	c = &pp->hw;
-	rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
-			val, (val & 0xffff) >= 1, 10, timeout_us);
-
-	return rc;
-}
-
-static void sde_hw_pp_dsc_enable(struct sde_hw_pingpong *pp)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	if (!pp)
-		return;
-	c = &pp->hw;
-
-	SDE_REG_WRITE(c, PP_DSC_MODE, 1);
-}
-
-static u32 sde_hw_pp_get_dsc_status(struct sde_hw_pingpong *pp)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	if (!pp)
-		return 0;
-
-	c = &pp->hw;
-	return SDE_REG_READ(c, PP_DSC_MODE);
-}
-
-static void sde_hw_pp_dsc_disable(struct sde_hw_pingpong *pp)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 data;
-
-	if (!pp)
-		return;
-	c = &pp->hw;
-
-	data = SDE_REG_READ(c, PP_DCE_DATA_OUT_SWAP);
-	data &= ~BIT(18); /* disable endian flip */
-	SDE_REG_WRITE(c, PP_DCE_DATA_OUT_SWAP, data);
-
-	SDE_REG_WRITE(c, PP_DSC_MODE, 0);
-}
-
-static int sde_hw_pp_setup_dsc(struct sde_hw_pingpong *pp)
-{
-	struct sde_hw_blk_reg_map *c;
-	int data;
-
-	if (!pp)
-		return -EINVAL;
-	c = &pp->hw;
-
-	data = SDE_REG_READ(c, PP_DCE_DATA_OUT_SWAP);
-	data |= BIT(18); /* endian flip */
-	SDE_REG_WRITE(c, PP_DCE_DATA_OUT_SWAP, data);
-	return 0;
-}
-
-static int sde_hw_pp_setup_dither_v1(struct sde_hw_pingpong *pp,
-					void *cfg, size_t len)
-{
-	struct sde_hw_blk_reg_map *c;
-	struct drm_msm_dither *dither = (struct drm_msm_dither *)cfg;
-	u32 base = 0, offset = 0, data = 0, i = 0;
-
-	if (!pp)
-		return -EINVAL;
-
-	c = &pp->hw;
-	base = pp->caps->sblk->dither.base;
-	if (!dither) {
-		/* dither property disable case */
-		SDE_REG_WRITE(c, base, 0);
-		return 0;
-	}
-
-	if (len != sizeof(struct drm_msm_dither)) {
-		DRM_ERROR("input len %zu, expected len %zu\n", len,
-			sizeof(struct drm_msm_dither));
-		return -EINVAL;
-	}
-
-	if (dither->c0_bitdepth >= DITHER_DEPTH_MAP_INDEX ||
-		dither->c1_bitdepth >= DITHER_DEPTH_MAP_INDEX ||
-		dither->c2_bitdepth >= DITHER_DEPTH_MAP_INDEX ||
-		dither->c3_bitdepth >= DITHER_DEPTH_MAP_INDEX)
-		return -EINVAL;
-
-	offset += 4;
-	data = dither_depth_map[dither->c0_bitdepth] & REG_MASK(2);
-	data |= (dither_depth_map[dither->c1_bitdepth] & REG_MASK(2)) << 2;
-	data |= (dither_depth_map[dither->c2_bitdepth] & REG_MASK(2)) << 4;
-	data |= (dither_depth_map[dither->c3_bitdepth] & REG_MASK(2)) << 6;
-	data |= (dither->temporal_en) ? (1 << 8) : 0;
-	SDE_REG_WRITE(c, base + offset, data);
-
-	for (i = 0; i < DITHER_MATRIX_SZ - 3; i += 4) {
-		offset += 4;
-		data = (dither->matrix[i] & REG_MASK(4)) |
-			((dither->matrix[i + 1] & REG_MASK(4)) << 4) |
-			((dither->matrix[i + 2] & REG_MASK(4)) << 8) |
-			((dither->matrix[i + 3] & REG_MASK(4)) << 12);
-		SDE_REG_WRITE(c, base + offset, data);
-	}
-	SDE_REG_WRITE(c, base, 1);
-
-	return 0;
-}
-
-static int sde_hw_pp_enable_te(struct sde_hw_pingpong *pp, bool enable)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	if (!pp)
-		return -EINVAL;
-	c = &pp->hw;
-
-	SDE_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
-	return 0;
-}
-
-static int sde_hw_pp_connect_external_te(struct sde_hw_pingpong *pp,
-		bool enable_external_te)
-{
-	struct sde_hw_blk_reg_map *c = &pp->hw;
-	u32 cfg;
-	int orig;
-
-	if (!pp)
-		return -EINVAL;
-
-	c = &pp->hw;
-	cfg = SDE_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
-	orig = (bool)(cfg & BIT(20));
-	if (enable_external_te)
-		cfg |= BIT(20);
-	else
-		cfg &= ~BIT(20);
-	SDE_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
-	SDE_EVT32(pp->idx - PINGPONG_0, cfg);
-
-	return orig;
-}
-
-static int sde_hw_pp_get_vsync_info(struct sde_hw_pingpong *pp,
-		struct sde_hw_pp_vsync_info *info)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 val;
-
-	if (!pp || !info)
-		return -EINVAL;
-	c = &pp->hw;
-
-	val = SDE_REG_READ(c, PP_VSYNC_INIT_VAL);
-	info->rd_ptr_init_val = val & 0xffff;
-
-	val = SDE_REG_READ(c, PP_INT_COUNT_VAL);
-	info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
-	info->rd_ptr_line_count = val & 0xffff;
-
-	val = SDE_REG_READ(c, PP_LINE_COUNT);
-	info->wr_ptr_line_count = val & 0xffff;
-
-	return 0;
-}
-
-static u32 sde_hw_pp_get_line_count(struct sde_hw_pingpong *pp)
-{
-	struct sde_hw_blk_reg_map *c = &pp->hw;
-	u32 height, init;
-	u32 line = 0xFFFF;
-
-	if (!pp)
-		return 0;
-	c = &pp->hw;
-
-	init = SDE_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
-	height = SDE_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
-
-	if (height < init)
-		goto line_count_exit;
-
-	line = SDE_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
-
-	if (line < init)
-		line += (0xFFFF - init);
-	else
-		line -= init;
-
-line_count_exit:
-	return line;
-}
-
-static void sde_hw_pp_setup_3d_merge_mode(struct sde_hw_pingpong *pp,
-					enum sde_3d_blend_mode cfg)
-{
-	if (pp->merge_3d && pp->merge_3d->ops.setup_blend_mode)
-		pp->merge_3d->ops.setup_blend_mode(pp->merge_3d, cfg);
-}
-
-static void sde_hw_pp_reset_3d_merge_mode(struct sde_hw_pingpong *pp)
-{
-	if (pp->merge_3d && pp->merge_3d->ops.reset_blend_mode)
-		pp->merge_3d->ops.reset_blend_mode(pp->merge_3d);
-}
-static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops,
-	const struct sde_pingpong_cfg *hw_cap)
-{
-	u32 version = 0;
-
-	if (hw_cap->features & BIT(SDE_PINGPONG_TE)) {
-		ops->setup_tearcheck = sde_hw_pp_setup_te_config;
-		ops->enable_tearcheck = sde_hw_pp_enable_te;
-		ops->update_tearcheck = sde_hw_pp_update_te;
-		ops->connect_external_te = sde_hw_pp_connect_external_te;
-		ops->get_vsync_info = sde_hw_pp_get_vsync_info;
-		ops->setup_autorefresh = sde_hw_pp_setup_autorefresh_config;
-		ops->get_autorefresh = sde_hw_pp_get_autorefresh_config;
-		ops->poll_timeout_wr_ptr = sde_hw_pp_poll_timeout_wr_ptr;
-		ops->get_line_count = sde_hw_pp_get_line_count;
-	}
-	ops->setup_dsc = sde_hw_pp_setup_dsc;
-	ops->enable_dsc = sde_hw_pp_dsc_enable;
-	ops->disable_dsc = sde_hw_pp_dsc_disable;
-	ops->get_dsc_status = sde_hw_pp_get_dsc_status;
-
-	version = SDE_COLOR_PROCESS_MAJOR(hw_cap->sblk->dither.version);
-	switch (version) {
-	case 1:
-		ops->setup_dither = sde_hw_pp_setup_dither_v1;
-		break;
-	default:
-		ops->setup_dither = NULL;
-		break;
-	}
-	if (test_bit(SDE_PINGPONG_MERGE_3D, &hw_cap->features)) {
-		ops->setup_3d_mode = sde_hw_pp_setup_3d_merge_mode;
-		ops->reset_3d_mode = sde_hw_pp_reset_3d_merge_mode;
-	}
-};
-
-static struct sde_hw_blk_ops sde_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
-
-struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m)
-{
-	struct sde_hw_pingpong *c;
-	struct sde_pingpong_cfg *cfg;
-	int rc;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _pingpong_offset(idx, m, addr, &c->hw);
-	if (IS_ERR_OR_NULL(cfg)) {
-		kfree(c);
-		return ERR_PTR(-EINVAL);
-	}
-
-	c->idx = idx;
-	c->caps = cfg;
-	if (test_bit(SDE_PINGPONG_MERGE_3D, &cfg->features)) {
-		c->merge_3d = _sde_pp_merge_3d_init(cfg->merge_3d_id, addr, m);
-			if (IS_ERR(c->merge_3d)) {
-				SDE_ERROR("invalid merge_3d block %d\n", idx);
-				return ERR_PTR(-ENOMEM);
-			}
-	}
-
-	_setup_pingpong_ops(&c->ops, c->caps);
-
-	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_PINGPONG, idx, &sde_hw_ops);
-	if (rc) {
-		SDE_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
-
-	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
-			c->hw.blk_off + c->hw.length, c->hw.xin_id);
-
-	return c;
-
-blk_init_error:
-	kzfree(c);
-
-	return ERR_PTR(rc);
-}
-
-void sde_hw_pingpong_destroy(struct sde_hw_pingpong *pp)
-{
-	if (pp) {
-		sde_hw_blk_destroy(&pp->base);
-		kfree(pp->merge_3d);
-		kfree(pp);
-	}
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
deleted file mode 100644
index 9e6a72d..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_PINGPONG_H
-#define _SDE_HW_PINGPONG_H
-
-#include "sde_hw_catalog.h"
-#include "sde_hw_mdss.h"
-#include "sde_hw_util.h"
-#include "sde_hw_blk.h"
-#include <uapi/drm/msm_drm_pp.h>
-
-struct sde_hw_pingpong;
-struct sde_hw_merge_3d;
-
-struct sde_hw_dsc_cfg {
-	u8 enable;
-};
-
-/**
- *
- * struct sde_hw_pingpong_ops : Interface to the pingpong Hw driver functions
- *  Assumption is these functions will be called after clocks are enabled
- *  @setup_tearcheck : program tear check values
- *  @enable_tearcheck : enables tear check
- *  @get_vsync_info : retries timing info of the panel
- *  @setup_autorefresh : program auto refresh
- *  @setup_dsc : program DSC block with encoding details
- *  @enable_dsc : enables DSC encoder
- *  @disable_dsc : disables DSC encoder
- *  @setup_dither : function to program the dither hw block
- *  @get_line_count: obtain current vertical line counter
- */
-struct sde_hw_pingpong_ops {
-	/**
-	 * enables vysnc generation and sets up init value of
-	 * read pointer and programs the tear check cofiguration
-	 */
-	int (*setup_tearcheck)(struct sde_hw_pingpong *pp,
-			struct sde_hw_tear_check *cfg);
-
-	/**
-	 * enables tear check block
-	 */
-	int (*enable_tearcheck)(struct sde_hw_pingpong *pp,
-			bool enable);
-
-	/**
-	 * updates tearcheck configuration
-	 */
-	void (*update_tearcheck)(struct sde_hw_pingpong *pp,
-			struct sde_hw_tear_check *cfg);
-
-	/**
-	 * read, modify, write to either set or clear listening to external TE
-	 * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
-	 */
-	int (*connect_external_te)(struct sde_hw_pingpong *pp,
-			bool enable_external_te);
-
-	/**
-	 * provides the programmed and current
-	 * line_count
-	 */
-	int (*get_vsync_info)(struct sde_hw_pingpong *pp,
-			struct sde_hw_pp_vsync_info  *info);
-
-	/**
-	 * configure and enable the autorefresh config
-	 */
-	int (*setup_autorefresh)(struct sde_hw_pingpong *pp,
-			struct sde_hw_autorefresh *cfg);
-
-	/**
-	 * retrieve autorefresh config from hardware
-	 */
-	int (*get_autorefresh)(struct sde_hw_pingpong *pp,
-			struct sde_hw_autorefresh *cfg);
-
-	/**
-	 * poll until write pointer transmission starts
-	 * @Return: 0 on success, -ETIMEDOUT on timeout
-	 */
-	int (*poll_timeout_wr_ptr)(struct sde_hw_pingpong *pp, u32 timeout_us);
-
-	/**
-	 * Program the dsc compression block
-	 */
-	int (*setup_dsc)(struct sde_hw_pingpong *pp);
-
-	/**
-	 * Enables DSC encoder
-	 */
-	void (*enable_dsc)(struct sde_hw_pingpong *pp);
-
-	/**
-	 * Disables DSC encoder
-	 */
-	void (*disable_dsc)(struct sde_hw_pingpong *pp);
-
-	/**
-	 * Get DSC status
-	 * @Return: register value of DSC config
-	 */
-	u32 (*get_dsc_status)(struct sde_hw_pingpong *pp);
-
-	/**
-	 * Program the dither hw block
-	 */
-	int (*setup_dither)(struct sde_hw_pingpong *pp, void *cfg, size_t len);
-
-	/**
-	 * Obtain current vertical line counter
-	 */
-	u32 (*get_line_count)(struct sde_hw_pingpong *pp);
-
-	/**
-	 * Programs the 3d blend configuration
-	 */
-	void (*setup_3d_mode)(struct sde_hw_pingpong *pp,
-			enum sde_3d_blend_mode cfg);
-
-	/**
-	 * reset 3d blend configuration
-	 */
-	void (*reset_3d_mode)(struct sde_hw_pingpong *pp);
-};
-
-struct sde_hw_merge_3d_ops {
-	/**
-	 * setup the 3d blend mode configuration
-	 */
-	void (*setup_blend_mode)(struct sde_hw_merge_3d *id,
-			enum sde_3d_blend_mode cfg);
-
-	/**
-	 * reset 3d blend mode configuration
-	 */
-	void (*reset_blend_mode)(struct sde_hw_merge_3d *id);
-};
-
-struct sde_hw_merge_3d {
-	struct sde_hw_blk base;
-	struct sde_hw_blk_reg_map hw;
-
-	/* merge_3d */
-	enum sde_merge_3d idx;
-	const struct sde_merge_3d_cfg *caps;
-
-	/* ops */
-	struct sde_hw_merge_3d_ops ops;
-};
-
-struct sde_hw_pingpong {
-	struct sde_hw_blk base;
-	struct sde_hw_blk_reg_map hw;
-
-	/* pingpong */
-	enum sde_pingpong idx;
-	const struct sde_pingpong_cfg *caps;
-
-	/* associated 3d_merge */
-	struct sde_hw_merge_3d *merge_3d;
-
-	/* ops */
-	struct sde_hw_pingpong_ops ops;
-};
-
-/**
- * sde_hw_pingpong - convert base object sde_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct sde_hw_pingpong *to_sde_hw_pingpong(struct sde_hw_blk *hw)
-{
-	return container_of(hw, struct sde_hw_pingpong, base);
-}
-
-/**
- * sde_hw_pingpong_init - initializes the pingpong driver for the passed
- *	pingpong idx.
- * @idx:  Pingpong index for which driver object is required
- * @addr: Mapped register io address of MDP
- * @m:    Pointer to mdss catalog data
- * Returns: Error code or allocated sde_hw_pingpong context
- */
-struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m);
-
-/**
- * sde_hw_pingpong_destroy - destroys pingpong driver context
- *	should be called to free the context
- * @pp:   Pointer to PP driver context returned by sde_hw_pingpong_init
- */
-void sde_hw_pingpong_destroy(struct sde_hw_pingpong *pp);
-
-#endif /*_SDE_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
deleted file mode 100644
index e8911cf..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
+++ /dev/null
@@ -1,1022 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-#include <linux/iopoll.h>
-#include "sde_hw_mdss.h"
-#include "sde_hw_ctl.h"
-#include "sde_hw_reg_dma_v1.h"
-#include "msm_drv.h"
-#include "msm_mmu.h"
-#include "sde_dbg.h"
-
-#define GUARD_BYTES (BIT(8) - 1)
-#define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
-#define ADDR_ALIGN BIT(8)
-#define MAX_RELATIVE_OFF (BIT(20) - 1)
-
-#define DECODE_SEL_OP (BIT(HW_BLK_SELECT))
-#define REG_WRITE_OP ((BIT(REG_SINGLE_WRITE)) | (BIT(REG_BLK_WRITE_SINGLE)) | \
-	(BIT(REG_BLK_WRITE_INC)) | (BIT(REG_BLK_WRITE_MULTIPLE)) | \
-	(BIT(REG_SINGLE_MODIFY)))
-
-#define REG_DMA_OPS (DECODE_SEL_OP | REG_WRITE_OP)
-#define IS_OP_ALLOWED(op, buf_op) (BIT(op) & buf_op)
-
-#define SET_UP_REG_DMA_REG(hw, reg_dma) \
-	do { \
-		(hw).base_off = (reg_dma)->addr; \
-		(hw).blk_off = (reg_dma)->caps->base; \
-		(hw).hwversion = (reg_dma)->caps->version; \
-		(hw).log_mask = SDE_DBG_MASK_REGDMA; \
-} while (0)
-
-#define SIZE_DWORD(x) ((x) / (sizeof(u32)))
-#define NOT_WORD_ALIGNED(x) ((x) & 0x3)
-
-
-#define GRP_VIG_HW_BLK_SELECT (VIG0 | VIG1 | VIG2 | VIG3)
-#define GRP_DMA_HW_BLK_SELECT (DMA0 | DMA1 | DMA2 | DMA3)
-#define GRP_DSPP_HW_BLK_SELECT (DSPP0 | DSPP1 | DSPP2 | DSPP3)
-#define GRP_LTM_HW_BLK_SELECT (LTM0 | LTM1)
-#define BUFFER_SPACE_LEFT(cfg) ((cfg)->dma_buf->buffer_size - \
-			(cfg)->dma_buf->index)
-
-#define REL_ADDR_OPCODE (BIT(27))
-#define SINGLE_REG_WRITE_OPCODE (BIT(28))
-#define SINGLE_REG_MODIFY_OPCODE (BIT(29))
-#define HW_INDEX_REG_WRITE_OPCODE (BIT(28) | BIT(29))
-#define AUTO_INC_REG_WRITE_OPCODE (BIT(30))
-#define BLK_REG_WRITE_OPCODE (BIT(30) | BIT(28))
-
-#define WRAP_MIN_SIZE 2
-#define WRAP_MAX_SIZE (BIT(4) - 1)
-#define MAX_DWORDS_SZ (BIT(14) - 1)
-#define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
-
-static uint32_t reg_dma_register_count;
-static uint32_t reg_dma_decode_sel;
-static uint32_t reg_dma_opmode_offset;
-static uint32_t reg_dma_ctl0_queue0_cmd0_offset;
-static uint32_t reg_dma_intr_status_offset;
-static uint32_t reg_dma_intr_4_status_offset;
-static uint32_t reg_dma_intr_clear_offset;
-static uint32_t reg_dma_ctl_trigger_offset;
-static uint32_t reg_dma_ctl0_reset_offset;
-static uint32_t reg_dma_error_clear_mask;
-
-typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg);
-
-static struct sde_hw_reg_dma *reg_dma;
-static u32 ops_mem_size[REG_DMA_SETUP_OPS_MAX] = {
-	[REG_BLK_WRITE_SINGLE] = sizeof(u32) * 2,
-	[REG_BLK_WRITE_INC] = sizeof(u32) * 2,
-	[REG_BLK_WRITE_MULTIPLE] = sizeof(u32) * 2,
-	[HW_BLK_SELECT] = sizeof(u32) * 2,
-	[REG_SINGLE_WRITE] = sizeof(u32) * 2,
-	[REG_SINGLE_MODIFY] = sizeof(u32) * 3,
-};
-
-static u32 queue_sel[DMA_CTL_QUEUE_MAX] = {
-	[DMA_CTL_QUEUE0] = BIT(0),
-	[DMA_CTL_QUEUE1] = BIT(4),
-};
-
-static u32 reg_dma_ctl_queue_off[CTL_MAX];
-static u32 dspp_read_sel[DSPP_HIST_MAX] = {
-	[DSPP0_HIST] = 0,
-	[DSPP1_HIST] = 1,
-	[DSPP2_HIST] = 2,
-	[DSPP3_HIST] = 3,
-};
-
-static u32 v1_supported[REG_DMA_FEATURES_MAX]  = {
-	[GAMUT] = GRP_VIG_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT,
-	[VLUT] = GRP_DSPP_HW_BLK_SELECT,
-	[GC] = GRP_DSPP_HW_BLK_SELECT,
-	[IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT,
-	[PCC] = GRP_DSPP_HW_BLK_SELECT,
-};
-
-static u32 ctl_trigger_done_mask[CTL_MAX][DMA_CTL_QUEUE_MAX] = {
-	[CTL_0][0] = BIT(16),
-	[CTL_0][1] = BIT(21),
-	[CTL_1][0] = BIT(17),
-	[CTL_1][1] = BIT(22),
-	[CTL_2][0] = BIT(18),
-	[CTL_2][1] = BIT(23),
-	[CTL_3][0] = BIT(19),
-	[CTL_3][1] = BIT(24),
-	[CTL_4][0] = BIT(25),
-	[CTL_4][1] = BIT(27),
-	[CTL_5][0] = BIT(26),
-	[CTL_5][1] = BIT(28),
-};
-
-static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
-static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
-static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
-static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
-static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
-static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
-static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
-static int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg);
-static int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg);
-static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
-static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg);
-static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
-static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf);
-static int check_support_v1(enum sde_reg_dma_features feature,
-		enum sde_reg_dma_blk blk, bool *is_supported);
-static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg);
-static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg);
-static int reset_v1(struct sde_hw_ctl *ctl);
-static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
-		enum sde_reg_dma_last_cmd_mode mode);
-static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
-static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);
-static void dump_regs_v1(void);
-
-static reg_dma_internal_ops write_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
-	[HW_BLK_SELECT] = write_decode_sel,
-	[REG_SINGLE_WRITE] = write_single_reg,
-	[REG_BLK_WRITE_SINGLE] = write_multi_reg_inc,
-	[REG_BLK_WRITE_INC] = write_multi_reg_index,
-	[REG_BLK_WRITE_MULTIPLE] = write_multi_lut_reg,
-	[REG_SINGLE_MODIFY] = write_single_modify,
-};
-
-static reg_dma_internal_ops validate_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
-	[HW_BLK_SELECT] = validate_write_decode_sel,
-	[REG_SINGLE_WRITE] = validate_write_reg,
-	[REG_BLK_WRITE_SINGLE] = validate_write_reg,
-	[REG_BLK_WRITE_INC] = validate_write_reg,
-	[REG_BLK_WRITE_MULTIPLE] = validate_write_multi_lut_reg,
-	[REG_SINGLE_MODIFY] = validate_write_reg,
-};
-
-static struct sde_reg_dma_buffer *last_cmd_buf[CTL_MAX];
-
-static void get_decode_sel(unsigned long blk, u32 *decode_sel)
-{
-	int i = 0;
-
-	*decode_sel = 0;
-	for_each_set_bit(i, &blk, 31) {
-		switch (BIT(i)) {
-		case VIG0:
-			*decode_sel |= BIT(0);
-			break;
-		case VIG1:
-			*decode_sel |= BIT(1);
-			break;
-		case VIG2:
-			*decode_sel |= BIT(2);
-			break;
-		case VIG3:
-			*decode_sel |= BIT(3);
-			break;
-		case DMA0:
-			*decode_sel |= BIT(5);
-			break;
-		case DMA1:
-			*decode_sel |= BIT(6);
-			break;
-		case DMA2:
-			*decode_sel |= BIT(7);
-			break;
-		case DMA3:
-			*decode_sel |= BIT(8);
-			break;
-		case DSPP0:
-			*decode_sel |= BIT(17);
-			break;
-		case DSPP1:
-			*decode_sel |= BIT(18);
-			break;
-		case DSPP2:
-			*decode_sel |= BIT(19);
-			break;
-		case DSPP3:
-			*decode_sel |= BIT(20);
-			break;
-		case SSPP_IGC:
-			*decode_sel |= BIT(4);
-			break;
-		case DSPP_IGC:
-			*decode_sel |= BIT(21);
-			break;
-		case LTM0:
-			*decode_sel |= BIT(22);
-			break;
-		case LTM1:
-			*decode_sel |= BIT(23);
-			break;
-		default:
-			DRM_ERROR("block not supported %zx\n", (size_t)BIT(i));
-			break;
-		}
-	}
-}
-
-static int write_multi_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	u8 *loc = NULL;
-
-	loc =  (u8 *)cfg->dma_buf->vaddr + cfg->dma_buf->index;
-	memcpy(loc, cfg->data, cfg->data_size);
-	cfg->dma_buf->index += cfg->data_size;
-	cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
-	cfg->dma_buf->ops_completed |= REG_WRITE_OP;
-
-	return 0;
-}
-
-int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	u32 *loc = NULL;
-
-	loc =  (u32 *)((u8 *)cfg->dma_buf->vaddr +
-			cfg->dma_buf->index);
-	loc[0] = HW_INDEX_REG_WRITE_OPCODE;
-	loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
-	loc[1] = SIZE_DWORD(cfg->data_size);
-	cfg->dma_buf->index += ops_mem_size[cfg->ops];
-
-	return write_multi_reg(cfg);
-}
-
-int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	u32 *loc = NULL;
-
-	loc =  (u32 *)((u8 *)cfg->dma_buf->vaddr +
-			cfg->dma_buf->index);
-	loc[0] = AUTO_INC_REG_WRITE_OPCODE;
-	loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
-	loc[1] = SIZE_DWORD(cfg->data_size);
-	cfg->dma_buf->index += ops_mem_size[cfg->ops];
-
-	return write_multi_reg(cfg);
-}
-
-static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	u32 *loc = NULL;
-
-	loc =  (u32 *)((u8 *)cfg->dma_buf->vaddr +
-			cfg->dma_buf->index);
-	loc[0] = BLK_REG_WRITE_OPCODE;
-	loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
-	loc[1] = (cfg->inc) ? 0 : BIT(31);
-	loc[1] |= (cfg->wrap_size & WRAP_MAX_SIZE) << 16;
-	loc[1] |= ((SIZE_DWORD(cfg->data_size)) & MAX_DWORDS_SZ);
-	cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
-	cfg->dma_buf->index += ops_mem_size[cfg->ops];
-
-	return write_multi_reg(cfg);
-}
-
-static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	u32 *loc = NULL;
-
-	loc =  (u32 *)((u8 *)cfg->dma_buf->vaddr +
-			cfg->dma_buf->index);
-	loc[0] = SINGLE_REG_WRITE_OPCODE;
-	loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
-	loc[1] = *cfg->data;
-	cfg->dma_buf->index += ops_mem_size[cfg->ops];
-	cfg->dma_buf->ops_completed |= REG_WRITE_OP;
-	cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
-
-	return 0;
-}
-
-static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	u32 *loc = NULL;
-
-	loc =  (u32 *)((u8 *)cfg->dma_buf->vaddr +
-			cfg->dma_buf->index);
-	loc[0] = SINGLE_REG_MODIFY_OPCODE;
-	loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
-	loc[1] = cfg->mask;
-	loc[2] = *cfg->data;
-	cfg->dma_buf->index += ops_mem_size[cfg->ops];
-	cfg->dma_buf->ops_completed |= REG_WRITE_OP;
-	cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
-
-	return 0;
-}
-
-static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	u32 *loc = NULL;
-
-	loc =  (u32 *)((u8 *)cfg->dma_buf->vaddr +
-			cfg->dma_buf->index);
-	loc[0] = reg_dma_decode_sel;
-	get_decode_sel(cfg->blk, &loc[1]);
-	cfg->dma_buf->index += ops_mem_size[cfg->ops];
-	cfg->dma_buf->ops_completed |= DECODE_SEL_OP;
-	cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
-
-	return 0;
-}
-
-static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	int rc;
-
-	rc = validate_write_reg(cfg);
-	if (rc)
-		return rc;
-
-	if (cfg->wrap_size < WRAP_MIN_SIZE || cfg->wrap_size > WRAP_MAX_SIZE) {
-		DRM_ERROR("invalid wrap sz %d min %d max %zd\n",
-			cfg->wrap_size, WRAP_MIN_SIZE, (size_t)WRAP_MAX_SIZE);
-		rc = -EINVAL;
-	}
-
-	return rc;
-}
-
-static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	u32 remain_len, write_len;
-
-	remain_len = BUFFER_SPACE_LEFT(cfg);
-	write_len = ops_mem_size[cfg->ops] + cfg->data_size;
-	if (remain_len < write_len) {
-		DRM_ERROR("buffer is full sz %d needs %d bytes\n",
-				remain_len, write_len);
-		return -EINVAL;
-	}
-
-	if (!cfg->data) {
-		DRM_ERROR("invalid data %pK size %d exp sz %d\n", cfg->data,
-			cfg->data_size, write_len);
-		return -EINVAL;
-	}
-	if ((SIZE_DWORD(cfg->data_size)) > MAX_DWORDS_SZ ||
-	    NOT_WORD_ALIGNED(cfg->data_size)) {
-		DRM_ERROR("Invalid data size %d max %zd align %x\n",
-			cfg->data_size, (size_t)MAX_DWORDS_SZ,
-			NOT_WORD_ALIGNED(cfg->data_size));
-		return -EINVAL;
-	}
-
-	if (cfg->blk_offset > MAX_RELATIVE_OFF ||
-			NOT_WORD_ALIGNED(cfg->blk_offset)) {
-		DRM_ERROR("invalid offset %d max %zd align %x\n",
-				cfg->blk_offset, (size_t)MAX_RELATIVE_OFF,
-				NOT_WORD_ALIGNED(cfg->blk_offset));
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	u32 remain_len;
-
-	remain_len = BUFFER_SPACE_LEFT(cfg);
-	if (remain_len < ops_mem_size[HW_BLK_SELECT]) {
-		DRM_ERROR("buffer is full needs %d bytes\n",
-				ops_mem_size[HW_BLK_SELECT]);
-		return -EINVAL;
-	}
-
-	if (!cfg->blk) {
-		DRM_ERROR("blk set as 0\n");
-		return -EINVAL;
-	}
-	/* VIG, DMA and DSPP can't be combined */
-	if (((cfg->blk & GRP_VIG_HW_BLK_SELECT) &&
-		(cfg->blk & GRP_DSPP_HW_BLK_SELECT)) ||
-		((cfg->blk & GRP_DMA_HW_BLK_SELECT) &&
-		(cfg->blk & GRP_DSPP_HW_BLK_SELECT)) ||
-		((cfg->blk & GRP_VIG_HW_BLK_SELECT) &&
-		(cfg->blk & GRP_DMA_HW_BLK_SELECT))) {
-		DRM_ERROR("invalid blk combination %x\n",
-			    cfg->blk);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	int rc = 0;
-	bool supported;
-
-	if (!cfg || cfg->ops >= REG_DMA_SETUP_OPS_MAX || !cfg->dma_buf) {
-		DRM_ERROR("invalid param cfg %pK ops %d dma_buf %pK\n",
-			cfg, ((cfg) ? cfg->ops : REG_DMA_SETUP_OPS_MAX),
-			((cfg) ? cfg->dma_buf : NULL));
-		return -EINVAL;
-	}
-
-	rc = check_support_v1(cfg->feature, cfg->blk, &supported);
-	if (rc || !supported) {
-		DRM_ERROR("check support failed rc %d supported %d\n",
-				rc, supported);
-		rc = -EINVAL;
-		return rc;
-	}
-
-	if (cfg->dma_buf->index >= cfg->dma_buf->buffer_size ||
-	    NOT_WORD_ALIGNED(cfg->dma_buf->index)) {
-		DRM_ERROR("Buf Overflow index %d max size %d align %x\n",
-			cfg->dma_buf->index, cfg->dma_buf->buffer_size,
-			NOT_WORD_ALIGNED(cfg->dma_buf->index));
-		return -EINVAL;
-	}
-
-	if (cfg->dma_buf->iova & GUARD_BYTES || !cfg->dma_buf->vaddr) {
-		DRM_ERROR("iova not aligned to %zx iova %llx kva %pK",
-				(size_t)ADDR_ALIGN, cfg->dma_buf->iova,
-				cfg->dma_buf->vaddr);
-		return -EINVAL;
-	}
-	if (!IS_OP_ALLOWED(cfg->ops, cfg->dma_buf->next_op_allowed)) {
-		DRM_ERROR("invalid op %x allowed %x\n", cfg->ops,
-				cfg->dma_buf->next_op_allowed);
-		return -EINVAL;
-	}
-
-	if (!validate_dma_op_params[cfg->ops] ||
-	    !write_dma_op_params[cfg->ops]) {
-		DRM_ERROR("invalid op %d validate %pK write %pK\n", cfg->ops,
-			validate_dma_op_params[cfg->ops],
-			write_dma_op_params[cfg->ops]);
-		return -EINVAL;
-	}
-	return rc;
-}
-
-static int validate_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
-{
-
-	if (!cfg || !cfg->ctl || !cfg->dma_buf) {
-		DRM_ERROR("invalid cfg %pK ctl %pK dma_buf %pK\n",
-			cfg, ((!cfg) ? NULL : cfg->ctl),
-			((!cfg) ? NULL : cfg->dma_buf));
-		return -EINVAL;
-	}
-
-	if (cfg->ctl->idx < CTL_0 && cfg->ctl->idx >= CTL_MAX) {
-		DRM_ERROR("invalid ctl idx %d\n", cfg->ctl->idx);
-		return -EINVAL;
-	}
-
-	if (cfg->op >= REG_DMA_OP_MAX) {
-		DRM_ERROR("invalid op %d\n", cfg->op);
-		return -EINVAL;
-	}
-
-	if ((cfg->op == REG_DMA_WRITE) &&
-	     (!(cfg->dma_buf->ops_completed & DECODE_SEL_OP) ||
-	     !(cfg->dma_buf->ops_completed & REG_WRITE_OP))) {
-		DRM_ERROR("incomplete write ops %x\n",
-				cfg->dma_buf->ops_completed);
-		return -EINVAL;
-	}
-
-	if (cfg->op == REG_DMA_READ && cfg->block_select >= DSPP_HIST_MAX) {
-		DRM_ERROR("invalid block for read %d\n", cfg->block_select);
-		return -EINVAL;
-	}
-
-	/* Only immediate triggers are supported now hence hardcode */
-	cfg->trigger_mode = (cfg->op == REG_DMA_READ) ? (READ_TRIGGER) :
-				(WRITE_TRIGGER);
-
-	if (cfg->dma_buf->iova & GUARD_BYTES) {
-		DRM_ERROR("Address is not aligned to %zx iova %llx",
-				(size_t)ADDR_ALIGN, cfg->dma_buf->iova);
-		return -EINVAL;
-	}
-
-	if (cfg->queue_select >= DMA_CTL_QUEUE_MAX) {
-		DRM_ERROR("invalid queue selected %d\n", cfg->queue_select);
-		return -EINVAL;
-	}
-
-	if (SIZE_DWORD(cfg->dma_buf->index) > MAX_DWORDS_SZ ||
-			!cfg->dma_buf->index) {
-		DRM_ERROR("invalid dword size %zd max %zd\n",
-			(size_t)SIZE_DWORD(cfg->dma_buf->index),
-				(size_t)MAX_DWORDS_SZ);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
-{
-	u32 cmd1, mask = 0, val = 0;
-	struct sde_hw_blk_reg_map hw;
-
-	memset(&hw, 0, sizeof(hw));
-	msm_gem_sync(cfg->dma_buf->buf);
-	cmd1 = (cfg->op == REG_DMA_READ) ?
-		(dspp_read_sel[cfg->block_select] << 30) : 0;
-	cmd1 |= (cfg->last_command) ? BIT(24) : 0;
-	cmd1 |= (cfg->op == REG_DMA_READ) ? (2 << 22) : 0;
-	cmd1 |= (cfg->op == REG_DMA_WRITE) ? (BIT(22)) : 0;
-	cmd1 |= (SIZE_DWORD(cfg->dma_buf->index) & MAX_DWORDS_SZ);
-
-	SET_UP_REG_DMA_REG(hw, reg_dma);
-	SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
-	val = SDE_REG_READ(&hw, reg_dma_intr_4_status_offset);
-	if (val) {
-		DRM_DEBUG("LUT dma status %x\n", val);
-		mask = reg_dma_error_clear_mask;
-		SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset + sizeof(u32) * 4,
-				mask);
-		SDE_EVT32(val);
-	}
-
-	SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
-			cfg->dma_buf->iova);
-	SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
-			cmd1);
-	if (cfg->last_command) {
-		mask = ctl_trigger_done_mask[cfg->ctl->idx][cfg->queue_select];
-		SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset, mask);
-		SDE_REG_WRITE(&cfg->ctl->hw, reg_dma_ctl_trigger_offset,
-			queue_sel[cfg->queue_select]);
-	}
-
-	return 0;
-}
-
-int init_v1(struct sde_hw_reg_dma *cfg)
-{
-	int i = 0, rc = 0;
-
-	if (!cfg)
-		return -EINVAL;
-
-	reg_dma = cfg;
-	for (i = CTL_0; i < CTL_MAX; i++) {
-		if (!last_cmd_buf[i]) {
-			last_cmd_buf[i] =
-			    alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
-			if (IS_ERR_OR_NULL(last_cmd_buf[i])) {
-				/*
-				 * This will allow reg dma to fall back to
-				 * AHB domain
-				 */
-				pr_info("Failed to allocate reg dma, ret:%lu\n",
-						PTR_ERR(last_cmd_buf[i]));
-				return 0;
-			}
-		}
-	}
-	if (rc) {
-		for (i = 0; i < CTL_MAX; i++) {
-			if (!last_cmd_buf[i])
-				continue;
-			dealloc_reg_dma_v1(last_cmd_buf[i]);
-			last_cmd_buf[i] = NULL;
-		}
-		return rc;
-	}
-
-	reg_dma->ops.check_support = check_support_v1;
-	reg_dma->ops.setup_payload = setup_payload_v1;
-	reg_dma->ops.kick_off = kick_off_v1;
-	reg_dma->ops.reset = reset_v1;
-	reg_dma->ops.alloc_reg_dma_buf = alloc_reg_dma_buf_v1;
-	reg_dma->ops.dealloc_reg_dma = dealloc_reg_dma_v1;
-	reg_dma->ops.reset_reg_dma_buf = reset_reg_dma_buffer_v1;
-	reg_dma->ops.last_command = last_cmd_v1;
-	reg_dma->ops.dump_regs = dump_regs_v1;
-
-	reg_dma_register_count = 60;
-	reg_dma_decode_sel = 0x180ac060;
-	reg_dma_opmode_offset = 0x4;
-	reg_dma_ctl0_queue0_cmd0_offset = 0x14;
-	reg_dma_intr_status_offset = 0x90;
-	reg_dma_intr_4_status_offset = 0xa0;
-	reg_dma_intr_clear_offset = 0xb0;
-	reg_dma_ctl_trigger_offset = 0xd4;
-	reg_dma_ctl0_reset_offset = 0xe4;
-	reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16);
-
-	reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
-	for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
-		reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
-			(sizeof(u32) * 4);
-
-	return 0;
-}
-
-int init_v11(struct sde_hw_reg_dma *cfg)
-{
-	int ret = 0, i = 0;
-
-	ret = init_v1(cfg);
-	if (ret) {
-		DRM_ERROR("failed to initialize v1: ret %d\n", ret);
-		return -EINVAL;
-	}
-
-	/* initialize register offsets and v1_supported based on version */
-	reg_dma_register_count = 133;
-	reg_dma_decode_sel = 0x180ac114;
-	reg_dma_opmode_offset = 0x4;
-	reg_dma_ctl0_queue0_cmd0_offset = 0x14;
-	reg_dma_intr_status_offset = 0x160;
-	reg_dma_intr_4_status_offset = 0x170;
-	reg_dma_intr_clear_offset = 0x1a0;
-	reg_dma_ctl_trigger_offset = 0xd4;
-	reg_dma_ctl0_reset_offset = 0x200;
-	reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16) |
-		BIT(17) | BIT(18);
-
-	reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
-	for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
-		reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
-			(sizeof(u32) * 4);
-
-	v1_supported[IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT |
-				GRP_VIG_HW_BLK_SELECT | GRP_DMA_HW_BLK_SELECT;
-	v1_supported[GC] = GRP_DMA_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT;
-	v1_supported[HSIC] = GRP_DSPP_HW_BLK_SELECT;
-	v1_supported[SIX_ZONE] = GRP_DSPP_HW_BLK_SELECT;
-	v1_supported[MEMC_SKIN] = GRP_DSPP_HW_BLK_SELECT;
-	v1_supported[MEMC_SKY] = GRP_DSPP_HW_BLK_SELECT;
-	v1_supported[MEMC_FOLIAGE] = GRP_DSPP_HW_BLK_SELECT;
-	v1_supported[MEMC_PROT] = GRP_DSPP_HW_BLK_SELECT;
-	v1_supported[QSEED] = GRP_VIG_HW_BLK_SELECT;
-
-	return 0;
-}
-
-int init_v12(struct sde_hw_reg_dma *cfg)
-{
-	int ret = 0;
-
-	ret = init_v11(cfg);
-	if (ret) {
-		DRM_ERROR("failed to initialize v11: ret %d\n", ret);
-		return ret;
-	}
-
-	v1_supported[LTM_INIT] = GRP_LTM_HW_BLK_SELECT;
-	v1_supported[LTM_ROI] = GRP_LTM_HW_BLK_SELECT;
-	v1_supported[LTM_VLUT] = GRP_LTM_HW_BLK_SELECT;
-
-	return 0;
-}
-
-static int check_support_v1(enum sde_reg_dma_features feature,
-		     enum sde_reg_dma_blk blk,
-		     bool *is_supported)
-{
-	int ret = 0;
-
-	if (!is_supported)
-		return -EINVAL;
-
-	if (feature >= REG_DMA_FEATURES_MAX || blk >= MDSS) {
-		*is_supported = false;
-		return ret;
-	}
-
-	*is_supported = (blk & v1_supported[feature]) ? true : false;
-	return ret;
-}
-
-static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	int rc = 0;
-
-	rc = validate_dma_cfg(cfg);
-
-	if (!rc)
-		rc = validate_dma_op_params[cfg->ops](cfg);
-
-	if (!rc)
-		rc = write_dma_op_params[cfg->ops](cfg);
-
-	return rc;
-}
-
-
-static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
-{
-	int rc = 0;
-
-	rc = validate_kick_off_v1(cfg);
-	if (rc)
-		return rc;
-
-	rc = write_kick_off_v1(cfg);
-	return rc;
-}
-
-int reset_v1(struct sde_hw_ctl *ctl)
-{
-	struct sde_hw_blk_reg_map hw;
-	u32 index, val, i = 0;
-
-	if (!ctl || ctl->idx > CTL_MAX) {
-		DRM_ERROR("invalid ctl %pK ctl idx %d\n",
-			ctl, ((ctl) ? ctl->idx : 0));
-		return -EINVAL;
-	}
-
-	memset(&hw, 0, sizeof(hw));
-	index = ctl->idx - CTL_0;
-	SET_UP_REG_DMA_REG(hw, reg_dma);
-	SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
-	SDE_REG_WRITE(&hw, (reg_dma_ctl0_reset_offset + index * sizeof(u32)),
-			BIT(0));
-
-	i = 0;
-	do {
-		udelay(1000);
-		i++;
-		val = SDE_REG_READ(&hw,
-			(reg_dma_ctl0_reset_offset + index * sizeof(u32)));
-	} while (i < 2 && val);
-
-	return 0;
-}
-
-static void sde_reg_dma_aspace_cb_locked(void *cb_data, bool is_detach)
-{
-	struct sde_reg_dma_buffer *dma_buf = NULL;
-	struct msm_gem_address_space *aspace = NULL;
-	u32 iova_aligned, offset;
-	int rc;
-
-	if (!cb_data) {
-		DRM_ERROR("aspace cb called with invalid dma_buf\n");
-		return;
-	}
-
-	dma_buf = (struct sde_reg_dma_buffer *)cb_data;
-	aspace = dma_buf->aspace;
-
-	if (is_detach) {
-		/* invalidate the stored iova */
-		dma_buf->iova = 0;
-
-		/* return the virtual address mapping */
-		msm_gem_put_vaddr(dma_buf->buf);
-		msm_gem_vunmap(dma_buf->buf, OBJ_LOCK_NORMAL);
-
-	} else {
-		rc = msm_gem_get_iova(dma_buf->buf, aspace,
-				&dma_buf->iova);
-		if (rc) {
-			DRM_ERROR("failed to get the iova rc %d\n", rc);
-			return;
-		}
-
-		dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
-		if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
-			DRM_ERROR("failed to get va rc %d\n", rc);
-			return;
-		}
-
-		iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
-		offset = iova_aligned - dma_buf->iova;
-		dma_buf->iova = dma_buf->iova + offset;
-		dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
-		dma_buf->next_op_allowed = DECODE_SEL_OP;
-	}
-}
-
-static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size)
-{
-	struct sde_reg_dma_buffer *dma_buf = NULL;
-	u32 iova_aligned, offset;
-	u32 rsize = size + GUARD_BYTES;
-	struct msm_gem_address_space *aspace = NULL;
-	int rc = 0;
-
-	if (!size || SIZE_DWORD(size) > MAX_DWORDS_SZ) {
-		DRM_ERROR("invalid buffer size %d\n", size);
-		return ERR_PTR(-EINVAL);
-	}
-
-	dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
-	if (!dma_buf)
-		return ERR_PTR(-ENOMEM);
-
-	dma_buf->buf = msm_gem_new(reg_dma->drm_dev,
-				    rsize, MSM_BO_UNCACHED);
-	if (IS_ERR_OR_NULL(dma_buf->buf)) {
-		rc = -EINVAL;
-		goto fail;
-	}
-
-	aspace = msm_gem_smmu_address_space_get(reg_dma->drm_dev,
-			MSM_SMMU_DOMAIN_UNSECURE);
-	if (!aspace) {
-		DRM_ERROR("failed to get aspace\n");
-		rc = -EINVAL;
-		goto free_gem;
-	}
-
-	/* register to aspace */
-	rc = msm_gem_address_space_register_cb(aspace,
-			sde_reg_dma_aspace_cb_locked,
-			(void *)dma_buf);
-	if (rc) {
-		DRM_ERROR("failed to register callback %d", rc);
-		goto free_gem;
-	}
-
-	dma_buf->aspace = aspace;
-	rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
-	if (rc) {
-		DRM_ERROR("failed to get the iova rc %d\n", rc);
-		goto free_aspace_cb;
-	}
-
-	dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
-	if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
-		DRM_ERROR("failed to get va rc %d\n", rc);
-		rc = -EINVAL;
-		goto put_iova;
-	}
-
-	dma_buf->buffer_size = size;
-	iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
-	offset = iova_aligned - dma_buf->iova;
-	dma_buf->iova = dma_buf->iova + offset;
-	dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
-	dma_buf->next_op_allowed = DECODE_SEL_OP;
-
-	return dma_buf;
-
-put_iova:
-	msm_gem_put_iova(dma_buf->buf, aspace);
-free_aspace_cb:
-	msm_gem_address_space_unregister_cb(aspace,
-			sde_reg_dma_aspace_cb_locked, dma_buf);
-free_gem:
-	mutex_lock(&reg_dma->drm_dev->struct_mutex);
-	msm_gem_free_object(dma_buf->buf);
-	mutex_unlock(&reg_dma->drm_dev->struct_mutex);
-fail:
-	kfree(dma_buf);
-	return ERR_PTR(rc);
-}
-
-static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *dma_buf)
-{
-	if (!dma_buf) {
-		DRM_ERROR("invalid param reg_buf %pK\n", dma_buf);
-		return -EINVAL;
-	}
-
-	if (dma_buf->buf) {
-		msm_gem_put_iova(dma_buf->buf, 0);
-		msm_gem_address_space_unregister_cb(dma_buf->aspace,
-				sde_reg_dma_aspace_cb_locked, dma_buf);
-		mutex_lock(&reg_dma->drm_dev->struct_mutex);
-		msm_gem_free_object(dma_buf->buf);
-		mutex_unlock(&reg_dma->drm_dev->struct_mutex);
-	}
-
-	kfree(dma_buf);
-	return 0;
-}
-
-static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf)
-{
-	if (!lut_buf)
-		return -EINVAL;
-
-	lut_buf->index = 0;
-	lut_buf->ops_completed = 0;
-	lut_buf->next_op_allowed = DECODE_SEL_OP;
-	return 0;
-}
-
-static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	u32 remain_len, write_len;
-
-	remain_len = BUFFER_SPACE_LEFT(cfg);
-	write_len = sizeof(u32);
-	if (remain_len < write_len) {
-		DRM_ERROR("buffer is full sz %d needs %d bytes\n",
-				remain_len, write_len);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	u32 *loc = NULL;
-
-	loc =  (u32 *)((u8 *)cfg->dma_buf->vaddr +
-			cfg->dma_buf->index);
-	loc[0] = reg_dma_decode_sel;
-	loc[1] = 0;
-	cfg->dma_buf->index = sizeof(u32) * 2;
-	cfg->dma_buf->ops_completed = REG_WRITE_OP | DECODE_SEL_OP;
-	cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
-
-	return 0;
-}
-
-static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
-		enum sde_reg_dma_last_cmd_mode mode)
-{
-	struct sde_reg_dma_setup_ops_cfg cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_blk_reg_map hw;
-	u32 val;
-	int rc;
-
-	if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
-		DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
-				((ctl) ? ctl->idx : -1));
-		return -EINVAL;
-	}
-
-	if (!last_cmd_buf[ctl->idx] || !last_cmd_buf[ctl->idx]->iova) {
-		DRM_DEBUG("invalid last cmd buf for idx %d\n", ctl->idx);
-		return 0;
-	}
-
-	cfg.dma_buf = last_cmd_buf[ctl->idx];
-	reset_reg_dma_buffer_v1(last_cmd_buf[ctl->idx]);
-	if (validate_last_cmd(&cfg)) {
-		DRM_ERROR("validate buf failed\n");
-		return -EINVAL;
-	}
-
-	if (write_last_cmd(&cfg)) {
-		DRM_ERROR("write buf failed\n");
-		return -EINVAL;
-	}
-
-	kick_off.ctl = ctl;
-	kick_off.queue_select = q;
-	kick_off.trigger_mode = WRITE_IMMEDIATE;
-	kick_off.last_command = 1;
-	kick_off.op = REG_DMA_WRITE;
-	kick_off.dma_buf = last_cmd_buf[ctl->idx];
-	if (kick_off_v1(&kick_off)) {
-		DRM_ERROR("kick off last cmd failed\n");
-		return -EINVAL;
-	}
-
-	memset(&hw, 0, sizeof(hw));
-	SET_UP_REG_DMA_REG(hw, reg_dma);
-
-	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode);
-	if (mode == REG_DMA_WAIT4_COMP) {
-		rc = readl_poll_timeout(hw.base_off + hw.blk_off +
-			reg_dma_intr_status_offset, val,
-			(val & ctl_trigger_done_mask[ctl->idx][q]),
-			10, 20000);
-		if (rc)
-			DRM_ERROR("poll wait failed %d val %x mask %x\n",
-			    rc, val, ctl_trigger_done_mask[ctl->idx][q]);
-		SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, mode);
-	}
-
-	return 0;
-}
-
-void deinit_v1(void)
-{
-	int i = 0;
-
-	for (i = CTL_0; i < CTL_MAX; i++) {
-		if (last_cmd_buf[i])
-			dealloc_reg_dma_v1(last_cmd_buf[i]);
-		last_cmd_buf[i] = NULL;
-	}
-}
-
-static void dump_regs_v1(void)
-{
-	uint32_t i = 0;
-	u32 val;
-	struct sde_hw_blk_reg_map hw;
-
-	memset(&hw, 0, sizeof(hw));
-	SET_UP_REG_DMA_REG(hw, reg_dma);
-
-	for (i = 0; i < reg_dma_register_count; i++) {
-		val = SDE_REG_READ(&hw, i * sizeof(u32));
-		DRM_ERROR("offset %x val %x\n", (u32)(i * sizeof(u32)), val);
-	}
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h
deleted file mode 100644
index e5760f8..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-#ifndef _SDE_HW_REG_DMA_V1_H
-#define _SDE_HW_REG_DMA_V1_H
-
-#include "sde_reg_dma.h"
-
-/**
- * init_v1() - initialize the reg dma v1 driver by installing v1 ops
- * @reg_dma - reg_dma hw info structure exposing capabilities.
- */
-int init_v1(struct sde_hw_reg_dma *reg_dma);
-
-/**
- * init_v11() - initialize the reg dma v11 driver by installing v11 ops
- * @reg_dma - reg_dma hw info structure exposing capabilities.
- */
-int init_v11(struct sde_hw_reg_dma *reg_dma);
-
-/**
- * init_v12() - initialize the reg dma v12 driver by installing v12 ops
- * @reg_dma - reg_dma hw info structure exposing capabilities.
- */
-int init_v12(struct sde_hw_reg_dma *reg_dma);
-
-/**
- * deinit_v1() - free up any resources allocated during the v1 reg dma init
- */
-void deinit_v1(void);
-#endif /* _SDE_HW_REG_DMA_V1_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
deleted file mode 100644
index 0c2e025c..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
+++ /dev/null
@@ -1,3702 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-#include <drm/msm_drm_pp.h>
-#include "sde_reg_dma.h"
-#include "sde_hw_reg_dma_v1_color_proc.h"
-#include "sde_hw_color_proc_common_v4.h"
-#include "sde_hw_ctl.h"
-#include "sde_hw_sspp.h"
-#include "sde_hwio.h"
-#include "sde_hw_lm.h"
-
-/* Reserve space of 128 words for LUT dma payload set-up */
-#define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
-#define REG_DMA_VIG_SWI_DIFF 0x200
-#define REG_DMA_DMA_SWI_DIFF 0x200
-
-#define VLUT_MEM_SIZE ((128 * sizeof(u32)) + REG_DMA_HEADERS_BUFFER_SZ)
-#define VLUT_LEN (128 * sizeof(u32))
-#define PA_OP_MODE_OFF 0x800
-#define PA_LUTV_OPMODE_OFF 0x84c
-#define REG_DMA_PA_MODE_HSIC_MASK 0xE1EFFFFF
-#define REG_DMA_PA_MODE_SZONE_MASK 0x1FEFFFFF
-#define REG_DMA_PA_PWL_HOLD_SZONE_MASK 0x0FFF
-#define PA_LUTV_DSPP_CTRL_OFF 0x4c
-#define PA_LUTV_DSPP_SWAP_OFF 0x18
-/**
- * the diff between LTM_INIT_ENABLE/DISABLE masks are portrait_en and
- * merge_mode bits. When disabling INIT property, we don't want to reset those
- * bits since they are needed for both LTM histogram and VLUT.
- */
-#define REG_DMA_LTM_INIT_ENABLE_OP_MASK 0xFFFC8CAB
-#define REG_DMA_LTM_INIT_DISABLE_OP_MASK 0xFFFF8CAF
-#define REG_DMA_LTM_ROI_OP_MASK 0xFEFFFFFF
-/**
- * the diff between LTM_VLUT_ENABLE/DISABLE masks are dither strength and
- * unsharp_gain bits. When disabling VLUT property, we want to reset these
- * bits since those are only valid if VLUT is enabled.
- */
-#define REG_DMA_LTM_VLUT_ENABLE_OP_MASK 0xFEFFFFAD
-#define REG_DMA_LTM_VLUT_DISABLE_OP_MASK 0xFEFF8CAD
-#define REG_DMA_LTM_UPDATE_REQ_MASK 0xFFFFFFFE
-
-#define GAMUT_LUT_MEM_SIZE ((sizeof(struct drm_msm_3d_gamut)) + \
-		REG_DMA_HEADERS_BUFFER_SZ)
-#define GAMUT_SCALE_OFF_LEN (GAMUT_3D_SCALE_OFF_SZ * sizeof(u32))
-#define GAMUT_SCALE_OFF_LEN_12 (GAMUT_3D_SCALEB_OFF_SZ * sizeof(u32))
-
-#define GC_LUT_MEM_SIZE ((sizeof(struct drm_msm_pgc_lut)) + \
-		REG_DMA_HEADERS_BUFFER_SZ)
-
-#define IGC_LUT_MEM_SIZE ((sizeof(struct drm_msm_igc_lut)) + \
-		REG_DMA_HEADERS_BUFFER_SZ)
-
-#define PCC_LUT_ENTRIES (PCC_NUM_PLANES * PCC_NUM_COEFF)
-#define PCC_LEN (PCC_LUT_ENTRIES * sizeof(u32))
-#define PCC_MEM_SIZE (PCC_LEN + \
-		REG_DMA_HEADERS_BUFFER_SZ)
-
-#define HSIC_MEM_SIZE ((sizeof(struct drm_msm_pa_hsic)) + \
-		REG_DMA_HEADERS_BUFFER_SZ)
-#define SIXZONE_MEM_SIZE ((sizeof(struct drm_msm_sixzone)) + \
-		REG_DMA_HEADERS_BUFFER_SZ)
-#define MEMCOLOR_MEM_SIZE ((sizeof(struct drm_msm_memcol)) + \
-		REG_DMA_HEADERS_BUFFER_SZ)
-
-#define QSEED3_MEM_SIZE (sizeof(struct sde_hw_scaler3_cfg) + \
-			(450 * sizeof(u32)) + \
-			REG_DMA_HEADERS_BUFFER_SZ)
-#define LTM_INIT_MEM_SIZE ((sizeof(struct drm_msm_ltm_init_param)) + \
-		REG_DMA_HEADERS_BUFFER_SZ)
-#define LTM_ROI_MEM_SIZE ((sizeof(struct drm_msm_ltm_cfg_param)) + \
-		REG_DMA_HEADERS_BUFFER_SZ)
-#define LTM_VLUT_MEM_SIZE ((sizeof(struct drm_msm_ltm_data)) + \
-		REG_DMA_HEADERS_BUFFER_SZ)
-
-#define REG_MASK(n) ((BIT(n)) - 1)
-#define REG_MASK_SHIFT(n, shift) ((REG_MASK(n)) << (shift))
-#define REG_DMA_VIG_GAMUT_OP_MASK 0x300
-#define REG_DMA_VIG_IGC_OP_MASK 0x1001F
-#define DMA_DGM_0_OP_MODE_OFF 0x604
-#define DMA_DGM_1_OP_MODE_OFF 0x1604
-#define REG_DMA_DMA_IGC_OP_MASK 0x10005
-#define REG_DMA_DMA_GC_OP_MASK 0x10003
-#define DMA_1D_LUT_IGC_LEN 128
-#define DMA_1D_LUT_GC_LEN 128
-#define DMA_1D_LUT_IGC_DITHER_OFF 0x408
-#define VIG_1D_LUT_IGC_LEN 128
-#define VIG_IGC_DATA_MASK (BIT(10) - 1)
-#define VIG_IGC_DATA_MASK_V6 (BIT(12) - 1)
-
-/* SDE_SCALER_QSEED3 */
-#define QSEED3_DE_OFFSET                       0x24
-#define QSEED3_COEF_LUT_SWAP_BIT           0
-#define QSEED3_COEF_LUT_CTRL_OFF               0x4C
-
-/* SDE_SCALER_QSEED3LITE */
-#define QSEED3L_COEF_LUT_SWAP_BIT          0
-#define QSEED3L_COEF_LUT_Y_SEP_BIT         4
-#define QSEED3L_COEF_LUT_UV_SEP_BIT        5
-#define QSEED3L_COEF_LUT_CTRL_OFF              0x4c
-#define Y_INDEX                            0
-#define UV_INDEX                           1
-
-enum ltm_vlut_ops_bitmask {
-	ltm_unsharp = BIT(0),
-	ltm_dither = BIT(1),
-	ltm_roi = BIT(2),
-	ltm_vlut = BIT(3),
-	ltm_ops_max = BIT(31),
-};
-
-static u32 ltm_vlut_ops_mask[LTM_MAX];
-
-static struct sde_reg_dma_buffer *dspp_buf[REG_DMA_FEATURES_MAX][DSPP_MAX];
-static struct sde_reg_dma_buffer
-	*sspp_buf[SDE_SSPP_RECT_MAX][REG_DMA_FEATURES_MAX][SSPP_MAX];
-static struct sde_reg_dma_buffer *ltm_buf[REG_DMA_FEATURES_MAX][LTM_MAX];
-
-static u32 feature_map[SDE_DSPP_MAX] = {
-	[SDE_DSPP_VLUT] = VLUT,
-	[SDE_DSPP_GAMUT] = GAMUT,
-	[SDE_DSPP_IGC] = IGC,
-	[SDE_DSPP_PCC] = PCC,
-	[SDE_DSPP_GC] = GC,
-	[SDE_DSPP_HSIC] = HSIC,
-	/* MEMCOLOR can be mapped to any MEMC_SKIN/SKY/FOLIAGE/PROT*/
-	[SDE_DSPP_MEMCOLOR] = MEMC_SKIN,
-	[SDE_DSPP_SIXZONE] = SIX_ZONE,
-	[SDE_DSPP_DITHER] = REG_DMA_FEATURES_MAX,
-	[SDE_DSPP_HIST] = REG_DMA_FEATURES_MAX,
-	[SDE_DSPP_AD] = REG_DMA_FEATURES_MAX,
-};
-
-static u32 sspp_feature_map[SDE_SSPP_MAX] = {
-	[SDE_SSPP_VIG_IGC] = IGC,
-	[SDE_SSPP_VIG_GAMUT] = GAMUT,
-	[SDE_SSPP_DMA_IGC] = IGC,
-	[SDE_SSPP_DMA_GC] = GC,
-	[SDE_SSPP_SCALER_QSEED3] = QSEED,
-	[SDE_SSPP_SCALER_QSEED3LITE] = REG_DMA_FEATURES_MAX,
-};
-
-static u32 ltm_feature_map[SDE_LTM_MAX] = {
-	[SDE_LTM_INIT] = LTM_INIT,
-	[SDE_LTM_ROI] = LTM_ROI,
-	[SDE_LTM_VLUT] = LTM_VLUT,
-};
-
-static u32 feature_reg_dma_sz[SDE_DSPP_MAX] = {
-	[SDE_DSPP_VLUT] = VLUT_MEM_SIZE,
-	[SDE_DSPP_GAMUT] = GAMUT_LUT_MEM_SIZE,
-	[SDE_DSPP_GC] = GC_LUT_MEM_SIZE,
-	[SDE_DSPP_IGC] = IGC_LUT_MEM_SIZE,
-	[SDE_DSPP_PCC] = PCC_MEM_SIZE,
-	[SDE_DSPP_HSIC] = HSIC_MEM_SIZE,
-	[SDE_DSPP_SIXZONE] = SIXZONE_MEM_SIZE,
-	[SDE_DSPP_MEMCOLOR] = MEMCOLOR_MEM_SIZE,
-};
-
-static u32 sspp_feature_reg_dma_sz[SDE_SSPP_MAX] = {
-	[SDE_SSPP_VIG_IGC] = IGC_LUT_MEM_SIZE,
-	[SDE_SSPP_VIG_GAMUT] = GAMUT_LUT_MEM_SIZE,
-	[SDE_SSPP_DMA_IGC] = IGC_LUT_MEM_SIZE,
-	[SDE_SSPP_DMA_GC] = GC_LUT_MEM_SIZE,
-	[SDE_SSPP_SCALER_QSEED3] = QSEED3_MEM_SIZE,
-};
-
-static u32 ltm_feature_reg_dma_sz[SDE_LTM_MAX] = {
-	[SDE_LTM_INIT] = LTM_INIT_MEM_SIZE,
-	[SDE_LTM_ROI] = LTM_ROI_MEM_SIZE,
-	[SDE_LTM_VLUT] = LTM_VLUT_MEM_SIZE,
-};
-
-static u32 dspp_mapping[DSPP_MAX] = {
-	[DSPP_0] = DSPP0,
-	[DSPP_1] = DSPP1,
-	[DSPP_2] = DSPP2,
-	[DSPP_3] = DSPP3,
-};
-
-static u32 sspp_mapping[SSPP_MAX] = {
-	[SSPP_VIG0] = VIG0,
-	[SSPP_VIG1] = VIG1,
-	[SSPP_VIG2] = VIG2,
-	[SSPP_VIG3] = VIG3,
-	[SSPP_DMA0] = DMA0,
-	[SSPP_DMA1] = DMA1,
-	[SSPP_DMA2] = DMA2,
-	[SSPP_DMA3] = DMA3,
-};
-
-static u32 ltm_mapping[LTM_MAX] = {
-	[LTM_0] = LTM0,
-	[LTM_1] = LTM1,
-};
-
-#define REG_DMA_INIT_OPS(cfg, block, reg_dma_feature, feature_dma_buf) \
-	do { \
-		memset(&cfg, 0, sizeof(cfg)); \
-		(cfg).blk = block; \
-		(cfg).feature = reg_dma_feature; \
-		(cfg).dma_buf = feature_dma_buf; \
-	} while (0)
-
-#define REG_DMA_SETUP_OPS(cfg, block_off, data_ptr, data_len, op, \
-		wrap_sz, wrap_inc, reg_mask) \
-	do { \
-		(cfg).ops = op; \
-		(cfg).blk_offset = block_off; \
-		(cfg).data_size = data_len; \
-		(cfg).data = data_ptr; \
-		(cfg).inc = wrap_inc; \
-		(cfg).wrap_size = wrap_sz; \
-		(cfg).mask = reg_mask; \
-	} while (0)
-
-#define REG_DMA_SETUP_KICKOFF(cfg, hw_ctl, feature_dma_buf, ops, ctl_q, \
-		mode) \
-	do { \
-		memset(&cfg, 0, sizeof(cfg)); \
-		(cfg).ctl = hw_ctl; \
-		(cfg).dma_buf = feature_dma_buf; \
-		(cfg).op = ops; \
-		(cfg).queue_select = ctl_q; \
-		(cfg).trigger_mode = mode; \
-	} while (0)
-
-static int reg_dma_buf_init(struct sde_reg_dma_buffer **buf, u32 sz);
-static int reg_dma_dspp_check(struct sde_hw_dspp *ctx, void *cfg,
-		enum sde_reg_dma_features feature);
-static int reg_dma_sspp_check(struct sde_hw_pipe *ctx, void *cfg,
-		enum sde_reg_dma_features feature,
-		enum sde_sspp_multirect_index idx);
-static int reg_dma_ltm_check(struct sde_hw_dspp *ctx, void *cfg,
-		enum sde_reg_dma_features feature);
-
-static int reg_dma_buf_init(struct sde_reg_dma_buffer **buf, u32 size)
-{
-	struct sde_hw_reg_dma_ops *dma_ops;
-
-	dma_ops = sde_reg_dma_get_ops();
-	if (IS_ERR_OR_NULL(dma_ops))
-		return -ENOTSUPP;
-
-	if (!buf) {
-		DRM_ERROR("invalid buf\n");
-		return -EINVAL;
-	}
-
-	/* buffer already initialized */
-	if (*buf)
-		return 0;
-
-	*buf = dma_ops->alloc_reg_dma_buf(size);
-	if (IS_ERR_OR_NULL(*buf))
-		return -EINVAL;
-
-	return 0;
-}
-
-static int reg_dma_dspp_check(struct sde_hw_dspp *ctx, void *cfg,
-		enum sde_reg_dma_features feature)
-{
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-
-	if (!cfg || !ctx) {
-		DRM_ERROR("invalid cfg %pK ctx %pK\n", cfg, ctx);
-		return -EINVAL;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	if (IS_ERR_OR_NULL(dma_ops))
-		return -EINVAL;
-
-	if (!hw_cfg->ctl || ctx->idx >= DSPP_MAX ||
-		feature >= REG_DMA_FEATURES_MAX) {
-		DRM_ERROR("invalid ctl %pK dspp idx %d feature %d\n",
-			hw_cfg->ctl, ctx->idx, feature);
-		return -EINVAL;
-	}
-
-	if (!dspp_buf[feature][ctx->idx]) {
-		DRM_ERROR("invalid dma_buf\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-int reg_dmav1_init_dspp_op_v4(int feature, enum sde_dspp idx)
-{
-	int rc = -ENOTSUPP;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	bool is_supported = false;
-	u32 blk;
-
-	if (feature >= SDE_DSPP_MAX || idx >= DSPP_MAX) {
-		DRM_ERROR("invalid feature %x max %x dspp idx %x max %xd\n",
-			feature, SDE_DSPP_MAX, idx, DSPP_MAX);
-		return rc;
-	}
-
-	if (feature_map[feature] >= REG_DMA_FEATURES_MAX) {
-		DRM_ERROR("invalid feature map %d for feature %d\n",
-			feature_map[feature], feature);
-		return -ENOTSUPP;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	if (IS_ERR_OR_NULL(dma_ops))
-		return -ENOTSUPP;
-
-	blk = (feature_map[feature] == IGC) ? DSPP_IGC : dspp_mapping[idx];
-	rc = dma_ops->check_support(feature_map[feature], blk, &is_supported);
-	if (!rc)
-		rc = (is_supported) ? 0 : -ENOTSUPP;
-
-	if (!rc) {
-		if (feature == SDE_DSPP_MEMCOLOR) {
-			rc = reg_dma_buf_init(
-				&dspp_buf[MEMC_SKIN][idx],
-				feature_reg_dma_sz[feature]);
-			if (rc)
-				return rc;
-			rc = reg_dma_buf_init(
-				&dspp_buf[MEMC_SKY][idx],
-				feature_reg_dma_sz[feature]);
-			if (rc)
-				return rc;
-			rc = reg_dma_buf_init(
-				&dspp_buf[MEMC_FOLIAGE][idx],
-				feature_reg_dma_sz[feature]);
-			if (rc)
-				return rc;
-			rc = reg_dma_buf_init(
-				&dspp_buf[MEMC_PROT][idx],
-				feature_reg_dma_sz[feature]);
-		} else {
-			rc = reg_dma_buf_init(
-				&dspp_buf[feature_map[feature]][idx],
-				feature_reg_dma_sz[feature]);
-		}
-
-	}
-	return rc;
-}
-
-static int reg_dmav1_get_dspp_blk(struct sde_hw_cp_cfg *hw_cfg,
-		enum sde_dspp curr_dspp, u32 *blk, u32 *num_of_mixers)
-{
-	struct sde_hw_dspp *dspp;
-	int rc = 0;
-
-	*num_of_mixers = 0;
-
-	if (hw_cfg == NULL) {
-		DRM_ERROR("Invalid sde_hw_cp_cfg structure provided\n");
-		return -EINVAL;
-	}
-
-	if (hw_cfg->dspp == NULL) {
-		DRM_ERROR("Invalid sde_hw_dspp structure provided in hw_cfg\n");
-		return -EINVAL;
-	}
-
-	if (blk == NULL) {
-		DRM_ERROR("Invalid payload provided\n");
-		return -EINVAL;
-	}
-
-	if (curr_dspp >= DSPP_MAX) {
-		DRM_ERROR("Invalid current dspp idx %d", curr_dspp);
-		return -EINVAL;
-	}
-
-	/* Treat first dspp as master to simplify setup */
-	dspp = hw_cfg->dspp[0];
-	if (hw_cfg->broadcast_disabled) {
-		*blk = dspp_mapping[curr_dspp];
-		(*num_of_mixers)++;
-	} else if (curr_dspp != dspp->idx) {
-		DRM_DEBUG_DRIVER("Slave DSPP instance %d\n", dspp->idx);
-		rc = -EALREADY;
-	} else {
-		u32 i;
-
-		for (i = 0 ; i < hw_cfg->num_of_mixers; i++) {
-			dspp = hw_cfg->dspp[i];
-			if (dspp->idx >= DSPP_MAX) {
-				DRM_ERROR("Invalid dspp idx %d", dspp->idx);
-				rc = -EINVAL;
-				break;
-			}
-			*blk |= dspp_mapping[dspp->idx];
-			(*num_of_mixers)++;
-		}
-	}
-
-	if (!rc && !blk) {
-		rc = -EINVAL;
-		*num_of_mixers = 0;
-	}
-
-	return rc;
-}
-
-void reg_dmav1_setup_dspp_vlutv18(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct drm_msm_pa_vlut *payload = NULL;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_hw_ctl *ctl = NULL;
-	struct sde_hw_dspp *dspp_list[DSPP_MAX];
-	u32 *data = NULL;
-	int i, j, rc = 0;
-	u32 index, num_of_mixers, blk = 0;
-
-	rc = reg_dma_dspp_check(ctx, cfg, VLUT);
-	if (rc)
-		return;
-
-	rc = reg_dmav1_get_dspp_blk(hw_cfg, ctx->idx, &blk,
-		&num_of_mixers);
-	if (rc == -EINVAL) {
-		DRM_ERROR("unable to determine LUTDMA DSPP blocks\n");
-		return;
-	} else if (rc == -EALREADY) {
-		return;
-	} else if (num_of_mixers > DSPP_MAX) {
-		DRM_ERROR("unable to process more than %d DSPP blocks\n",
-			DSPP_MAX);
-		return;
-	} else if (num_of_mixers > 1) {
-		memcpy(dspp_list, hw_cfg->dspp,
-			sizeof(struct sde_hw_dspp *) * num_of_mixers);
-	} else {
-		dspp_list[0] = ctx;
-	}
-
-	ctl = hw_cfg->ctl;
-	if (!hw_cfg->payload) {
-		struct sde_hw_dspp *dspp;
-
-		DRM_DEBUG_DRIVER("Disable vlut feature\n");
-		for (index = 0; index < num_of_mixers; index++) {
-			dspp = hw_cfg->dspp[index];
-			SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->hist.base +
-					PA_LUTV_DSPP_CTRL_OFF, 0);
-		}
-		goto exit;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_pa_vlut)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-				hw_cfg->len, sizeof(struct drm_msm_pa_vlut));
-		return;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(dspp_buf[VLUT][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, VLUT, dspp_buf[VLUT][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	data = kzalloc(VLUT_LEN, GFP_KERNEL);
-	if (!data)
-		return;
-
-	payload = hw_cfg->payload;
-	DRM_DEBUG_DRIVER("Enable vlut feature flags %llx\n", payload->flags);
-	for (i = 0, j = 0; i < ARRAY_SIZE(payload->val); i += 2, j++)
-		data[j] = (payload->val[i] & REG_MASK(10)) |
-		((payload->val[i + 1] & REG_MASK(10)) << 16);
-
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, ctx->cap->sblk->vlut.base, data,
-			VLUT_LEN, REG_BLK_WRITE_SINGLE, 0, 0, 0);
-
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write pa vlut failed ret %d\n", rc);
-		goto exit;
-	}
-
-	i = 1;
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->hist.base + PA_LUTV_DSPP_CTRL_OFF, &i,
-		sizeof(i), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("opmode write single reg failed ret %d\n", rc);
-		goto exit;
-	}
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->hist.base + PA_LUTV_DSPP_SWAP_OFF, &i,
-		sizeof(i), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("opmode write single reg failed ret %d\n", rc);
-		goto exit;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[VLUT][ctx->idx],
-	    REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc) {
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-		goto exit;
-	}
-
-exit:
-	kfree(data);
-	/* update flush bit */
-	if (!rc && ctl && ctl->ops.update_bitmask_dspp_pavlut) {
-		int dspp_idx;
-
-		for (index = 0; index < num_of_mixers; index++) {
-			dspp_idx = dspp_list[index]->idx;
-			ctl->ops.update_bitmask_dspp_pavlut(ctl, dspp_idx,
-				true);
-		}
-	}
-}
-
-static int sde_gamut_get_mode_info(u32 pipe, struct drm_msm_3d_gamut *payload,
-		u32 *tbl_len, u32 *tbl_off, u32 *opcode, u32 *scale_off)
-{
-	int rc = 0;
-
-	if (payload->mode > GAMUT_3D_MODE_13) {
-		DRM_ERROR("invalid mode %d", payload->mode);
-		return -EINVAL;
-	}
-
-	switch (payload->mode) {
-	case GAMUT_3D_MODE_17:
-		*tbl_len = GAMUT_3D_MODE17_TBL_SZ * sizeof(u32) * 2;
-		*tbl_off = 0;
-		if (pipe == DSPP) {
-			*scale_off = GAMUT_SCALEA_OFFSET_OFF;
-			*opcode = gamut_mode_17;
-		} else {
-			*opcode = (*opcode & (BIT(5) - 1)) >> 2;
-			if (*opcode == gamut_mode_17b)
-				*opcode = gamut_mode_17;
-			else
-				*opcode = gamut_mode_17b;
-			*scale_off = (*opcode == gamut_mode_17) ?
-				GAMUT_SCALEA_OFFSET_OFF :
-				GAMUT_SCALEB_OFFSET_OFF;
-		}
-		*opcode <<= 2;
-		break;
-	case GAMUT_3D_MODE_5:
-		*tbl_len = GAMUT_3D_MODE5_TBL_SZ * sizeof(u32) * 2;
-		*tbl_off = GAMUT_MODE_5_OFF;
-		*scale_off = GAMUT_SCALEB_OFFSET_OFF;
-		*opcode = gamut_mode_5 << 2;
-		break;
-	case GAMUT_3D_MODE_13:
-		*tbl_len = GAMUT_3D_MODE13_TBL_SZ * sizeof(u32) * 2;
-		*opcode = (*opcode & (BIT(4) - 1)) >> 2;
-		if (*opcode == gamut_mode_13a)
-			*opcode = gamut_mode_13b;
-		else
-			*opcode = gamut_mode_13a;
-		*tbl_off = (*opcode == gamut_mode_13a) ? 0 :
-			GAMUT_MODE_13B_OFF;
-		*scale_off = (*opcode == gamut_mode_13a) ?
-			GAMUT_SCALEA_OFFSET_OFF : GAMUT_SCALEB_OFFSET_OFF;
-		*opcode <<= 2;
-		break;
-	default:
-		rc = -EINVAL;
-		break;
-	}
-	if (payload->flags & GAMUT_3D_MAP_EN)
-		*opcode |= GAMUT_MAP_EN;
-	*opcode |= GAMUT_EN;
-
-	return rc;
-}
-
-static void dspp_3d_gamutv4_off(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	u32 op_mode = 0;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	int rc;
-	u32 num_of_mixers, blk = 0;
-
-	rc = reg_dmav1_get_dspp_blk(hw_cfg, ctx->idx, &blk,
-		&num_of_mixers);
-	if (rc == -EINVAL) {
-		DRM_ERROR("unable to determine LUTDMA DSPP blocks\n");
-		return;
-	} else if (rc == -EALREADY) {
-		return;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(dspp_buf[GAMUT][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, GAMUT, dspp_buf[GAMUT][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->gamut.base,
-		&op_mode, sizeof(op_mode), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("opmode write single reg failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GAMUT][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-static void reg_dmav1_setup_dspp_3d_gamutv4_common(struct sde_hw_dspp *ctx,
-		void *cfg, u32 scale_tbl_a_len, u32 scale_tbl_b_len)
-{
-	struct drm_msm_3d_gamut *payload;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	u32 op_mode, reg, tbl_len, tbl_off, scale_off, i;
-	u32 scale_tbl_len, scale_tbl_off;
-	u32 *scale_data;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	int rc;
-	u32 num_of_mixers, blk = 0;
-
-	rc = reg_dma_dspp_check(ctx, cfg, GAMUT);
-	if (rc)
-		return;
-
-	op_mode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->gamut.base);
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable gamut feature\n");
-		dspp_3d_gamutv4_off(ctx, cfg);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_3d_gamut)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-				hw_cfg->len, sizeof(struct drm_msm_3d_gamut));
-		return;
-	}
-	payload = hw_cfg->payload;
-	rc = sde_gamut_get_mode_info(DSPP, payload, &tbl_len, &tbl_off,
-			&op_mode, &scale_off);
-	if (rc) {
-		DRM_ERROR("invalid mode info rc %d\n", rc);
-		return;
-	}
-
-	rc = reg_dmav1_get_dspp_blk(hw_cfg, ctx->idx, &blk,
-		&num_of_mixers);
-	if (rc == -EINVAL) {
-		DRM_ERROR("unable to determine LUTDMA DSPP blocks\n");
-		return;
-	} else if (rc == -EALREADY) {
-		return;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(dspp_buf[GAMUT][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, GAMUT, dspp_buf[GAMUT][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-	for (i = 0; i < GAMUT_3D_TBL_NUM; i++) {
-		reg = GAMUT_TABLE0_SEL << i;
-		reg |= ((tbl_off) & (BIT(11) - 1));
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-			ctx->cap->sblk->gamut.base + GAMUT_TABLE_SEL_OFF,
-			&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write tbl sel reg failed ret %d\n", rc);
-			return;
-		}
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-		    ctx->cap->sblk->gamut.base + GAMUT_LOWER_COLOR_OFF,
-		    &payload->col[i][0].c2_c1, tbl_len,
-		    REG_BLK_WRITE_MULTIPLE, 2, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write color reg failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	if (op_mode & GAMUT_MAP_EN) {
-		if (scale_off == GAMUT_SCALEA_OFFSET_OFF)
-			scale_tbl_len = scale_tbl_a_len;
-		else
-			scale_tbl_len = scale_tbl_b_len;
-
-		for (i = 0; i < GAMUT_3D_SCALE_OFF_TBL_NUM; i++) {
-			scale_tbl_off = ctx->cap->sblk->gamut.base + scale_off +
-					(i * scale_tbl_len);
-			scale_data = &payload->scale_off[i][0];
-			REG_DMA_SETUP_OPS(dma_write_cfg, scale_tbl_off,
-					scale_data, scale_tbl_len,
-					REG_BLK_WRITE_SINGLE, 0, 0, 0);
-			rc = dma_ops->setup_payload(&dma_write_cfg);
-			if (rc) {
-				DRM_ERROR("write scale/off reg failed ret %d\n",
-						rc);
-				return;
-			}
-		}
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->gamut.base,
-		&op_mode, sizeof(op_mode), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("opmode write single reg failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GAMUT][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-void reg_dmav1_setup_dspp_3d_gamutv4(struct sde_hw_dspp *ctx, void *cfg)
-{
-	reg_dmav1_setup_dspp_3d_gamutv4_common(ctx, cfg, GAMUT_SCALE_OFF_LEN,
-		GAMUT_SCALE_OFF_LEN_12);
-}
-
-void reg_dmav1_setup_dspp_3d_gamutv41(struct sde_hw_dspp *ctx, void *cfg)
-{
-	reg_dmav1_setup_dspp_3d_gamutv4_common(ctx, cfg, GAMUT_SCALE_OFF_LEN,
-		GAMUT_SCALE_OFF_LEN);
-}
-
-void reg_dmav1_setup_dspp_gcv18(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct drm_msm_pgc_lut *lut_cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	int rc, i = 0;
-	u32 reg;
-	u32 num_of_mixers, blk = 0;
-
-	rc = reg_dma_dspp_check(ctx, cfg, GC);
-	if (rc)
-		return;
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable pgc feature\n");
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->gc.base, 0);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_pgc_lut)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-				hw_cfg->len, sizeof(struct drm_msm_pgc_lut));
-		return;
-	}
-
-	rc = reg_dmav1_get_dspp_blk(hw_cfg, ctx->idx, &blk,
-		&num_of_mixers);
-	if (rc == -EINVAL) {
-		DRM_ERROR("unable to determine LUTDMA DSPP blocks\n");
-		return;
-	} else if (rc == -EALREADY) {
-		return;
-	}
-
-	lut_cfg = hw_cfg->payload;
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(dspp_buf[GC][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, GC, dspp_buf[GC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	for (i = 0; i < GC_TBL_NUM; i++) {
-		reg = 0;
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-			ctx->cap->sblk->gc.base + GC_C0_INDEX_OFF +
-			(i * sizeof(u32) * 2),
-			&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("index init failed ret %d\n", rc);
-			return;
-		}
-
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-			ctx->cap->sblk->gc.base + GC_C0_OFF +
-			(i * sizeof(u32) * 2),
-			lut_cfg->c0 + (ARRAY_SIZE(lut_cfg->c0) * i),
-			PGC_TBL_LEN * sizeof(u32),
-			REG_BLK_WRITE_INC, 0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("lut write failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	reg = BIT(0);
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->gc.base + GC_LUT_SWAP_OFF,
-		&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting swap offset failed ret %d\n", rc);
-		return;
-	}
-
-	reg = GC_EN | ((lut_cfg->flags & PGC_8B_ROUND) ? GC_8B_ROUND_EN : 0);
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->gc.base,
-		&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("enabling gamma correction failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc) {
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-		return;
-	}
-}
-
-static void _dspp_igcv31_off(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	int rc;
-	u32 reg;
-	u32 num_of_mixers, blk = 0;
-
-	rc = reg_dmav1_get_dspp_blk(hw_cfg, ctx->idx, &blk,
-		&num_of_mixers);
-	if (rc == -EINVAL) {
-		DRM_ERROR("unable to determine LUTDMA DSPP blocks\n");
-		return;
-	} else if (rc == -EALREADY) {
-		return;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(dspp_buf[IGC][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, IGC, dspp_buf[IGC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	reg = IGC_DIS;
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->igc.base + IGC_OPMODE_OFF,
-		&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting opcode failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[IGC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-void reg_dmav1_setup_dspp_igcv31(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct drm_msm_igc_lut *lut_cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_hw_dspp *dspp_list[DSPP_MAX];
-	int rc, i = 0, j = 0;
-	u32 *addr = NULL;
-	u32 offset = 0;
-	u32 reg;
-	u32 index, num_of_mixers, dspp_sel, blk = 0;
-
-	rc = reg_dma_dspp_check(ctx, cfg, IGC);
-	if (rc)
-		return;
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable igc feature\n");
-		_dspp_igcv31_off(ctx, cfg);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_igc_lut)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-				hw_cfg->len, sizeof(struct drm_msm_igc_lut));
-		return;
-	}
-
-	rc = reg_dmav1_get_dspp_blk(hw_cfg, ctx->idx, &blk,
-		&num_of_mixers);
-	if (rc == -EINVAL) {
-		DRM_ERROR("unable to determine LUTDMA DSPP blocks\n");
-		return;
-	} else if (rc == -EALREADY) {
-		return;
-	} else if (num_of_mixers > DSPP_MAX) {
-		DRM_ERROR("unable to process more than %d DSPP blocks\n",
-			DSPP_MAX);
-		return;
-	} else if (num_of_mixers > 1) {
-		memcpy(dspp_list, hw_cfg->dspp,
-			sizeof(struct sde_hw_dspp *) * num_of_mixers);
-	} else {
-		dspp_list[0] = ctx;
-	}
-
-	lut_cfg = hw_cfg->payload;
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(dspp_buf[IGC][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, DSPP_IGC, IGC, dspp_buf[IGC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	dspp_sel = -1;
-	for (index = 0; index < num_of_mixers; index++)
-		dspp_sel &= IGC_DSPP_SEL_MASK(dspp_list[index]->idx - 1);
-
-	for (i = 0; i < IGC_TBL_NUM; i++) {
-		addr = lut_cfg->c0 + (i * ARRAY_SIZE(lut_cfg->c0));
-		offset = IGC_C0_OFF + (i * sizeof(u32));
-
-		for (j = 0; j < IGC_TBL_LEN; j++) {
-			addr[j] &= IGC_DATA_MASK;
-			addr[j] |= dspp_sel;
-			if (j == 0)
-				addr[j] |= IGC_INDEX_UPDATE;
-		}
-
-		REG_DMA_SETUP_OPS(dma_write_cfg, offset, addr,
-			IGC_TBL_LEN * sizeof(u32),
-			REG_BLK_WRITE_INC, 0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("lut write failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, IGC, dspp_buf[IGC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	if (lut_cfg->flags & IGC_DITHER_ENABLE) {
-		reg = lut_cfg->strength & IGC_DITHER_DATA_MASK;
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-			ctx->cap->sblk->igc.base + IGC_DITHER_OFF,
-			&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("dither strength failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	reg = IGC_EN;
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->igc.base + IGC_OPMODE_OFF,
-		&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting opcode failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[IGC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-static void _dspp_pccv4_off(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	int rc;
-	u32 reg;
-	u32 num_of_mixers, blk = 0;
-
-	rc = reg_dmav1_get_dspp_blk(hw_cfg, ctx->idx, &blk,
-		&num_of_mixers);
-	if (rc == -EINVAL) {
-		DRM_ERROR("unable to determine LUTDMA DSPP blocks\n");
-		return;
-	} else if (rc == -EALREADY) {
-		return;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(dspp_buf[PCC][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, PCC, dspp_buf[PCC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	reg = PCC_DIS;
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->pcc.base,
-		&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting opcode failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[PCC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-void reg_dmav1_setup_dspp_pccv4(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct drm_msm_pcc *pcc_cfg;
-	struct drm_msm_pcc_coeff *coeffs = NULL;
-	u32 *data = NULL;
-	int rc, i = 0;
-	u32 reg = 0;
-	u32 num_of_mixers, blk = 0;
-
-	rc = reg_dma_dspp_check(ctx, cfg, PCC);
-	if (rc)
-		return;
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable pcc feature\n");
-		_dspp_pccv4_off(ctx, cfg);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_pcc)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-				hw_cfg->len, sizeof(struct drm_msm_pcc));
-		return;
-	}
-
-	rc = reg_dmav1_get_dspp_blk(hw_cfg, ctx->idx, &blk,
-		&num_of_mixers);
-	if (rc == -EINVAL) {
-		DRM_ERROR("unable to determine LUTDMA DSPP blocks\n");
-		return;
-	} else if (rc == -EALREADY) {
-		return;
-	}
-
-	pcc_cfg = hw_cfg->payload;
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(dspp_buf[PCC][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, PCC, dspp_buf[PCC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	data = kzalloc(PCC_LEN, GFP_KERNEL);
-	if (!data)
-		return;
-
-	for (i = 0; i < PCC_NUM_PLANES; i++) {
-		switch (i) {
-		case 0:
-			coeffs = &pcc_cfg->r;
-			data[i + 24] = pcc_cfg->r_rr;
-			data[i + 27] = pcc_cfg->r_gg;
-			data[i + 30] = pcc_cfg->r_bb;
-			break;
-		case 1:
-			coeffs = &pcc_cfg->g;
-			data[i + 24] = pcc_cfg->g_rr;
-			data[i + 27] = pcc_cfg->g_gg;
-			data[i + 30] = pcc_cfg->g_bb;
-			break;
-		case 2:
-			coeffs = &pcc_cfg->b;
-			data[i + 24] = pcc_cfg->b_rr;
-			data[i + 27] = pcc_cfg->b_gg;
-			data[i + 30] = pcc_cfg->b_bb;
-			break;
-		default:
-			DRM_ERROR("invalid pcc plane: %d\n", i);
-			goto exit;
-		}
-
-		data[i] = coeffs->c;
-		data[i + 3] = coeffs->r;
-		data[i + 6] = coeffs->g;
-		data[i + 9] = coeffs->b;
-		data[i + 12] = coeffs->rg;
-		data[i + 15] = coeffs->rb;
-		data[i + 18] = coeffs->gb;
-		data[i + 21] = coeffs->rgb;
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->pcc.base + PCC_C_OFF,
-		data, PCC_LEN,
-		REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write pcc lut failed ret %d\n", rc);
-		goto exit;
-	}
-
-	reg = PCC_EN;
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->pcc.base,
-		&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting opcode failed ret %d\n", rc);
-		goto exit;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[PCC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-
-exit:
-	kfree(data);
-}
-
-void reg_dmav1_setup_dspp_pa_hsicv17(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct drm_msm_pa_hsic *hsic_cfg;
-	struct sde_hw_dspp *dspp_list[DSPP_MAX];
-	u32 reg = 0, opcode = 0, local_opcode = 0;
-	int rc, i;
-	u32 num_of_mixers, blk = 0;
-
-
-	opcode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->hsic.base);
-
-	rc = reg_dma_dspp_check(ctx, cfg, HSIC);
-	if (rc)
-		return;
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable pa hsic feature\n");
-		opcode &= ~(PA_HUE_EN | PA_SAT_EN | PA_VAL_EN | PA_CONT_EN);
-		if (PA_DISABLE_REQUIRED(opcode))
-			opcode &= ~PA_EN;
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_pa_hsic)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-				hw_cfg->len, sizeof(struct drm_msm_pa_hsic));
-		return;
-	}
-
-	rc = reg_dmav1_get_dspp_blk(hw_cfg, ctx->idx, &blk,
-		&num_of_mixers);
-	if (rc == -EINVAL) {
-		DRM_ERROR("unable to determine LUTDMA DSPP blocks\n");
-		return;
-	} else if (rc == -EALREADY) {
-		return;
-	} else if (num_of_mixers > DSPP_MAX) {
-		DRM_ERROR("unable to process more than %d DSPP blocks\n",
-			DSPP_MAX);
-		return;
-	} else if (num_of_mixers > 1) {
-		memcpy(dspp_list, hw_cfg->dspp,
-			sizeof(struct sde_hw_dspp *) * num_of_mixers);
-	} else {
-		dspp_list[0] = ctx;
-	}
-
-	hsic_cfg = hw_cfg->payload;
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(dspp_buf[HSIC][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, HSIC, dspp_buf[HSIC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	if (hsic_cfg->flags & PA_HSIC_HUE_ENABLE) {
-		reg = hsic_cfg->hue & PA_HUE_MASK;
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-			ctx->cap->sblk->hsic.base + PA_HUE_OFF,
-			&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("hsic hue write failed ret %d\n", rc);
-			return;
-		}
-		local_opcode |= PA_HUE_EN;
-	}
-
-	if (hsic_cfg->flags & PA_HSIC_SAT_ENABLE) {
-		reg = hsic_cfg->saturation & PA_SAT_MASK;
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-			ctx->cap->sblk->hsic.base + PA_SAT_OFF,
-			&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("hsic saturation write failed ret %d\n", rc);
-			return;
-		}
-		local_opcode |= PA_SAT_EN;
-	}
-
-	if (hsic_cfg->flags & PA_HSIC_VAL_ENABLE) {
-		reg = hsic_cfg->value & PA_VAL_MASK;
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-			ctx->cap->sblk->hsic.base + PA_VAL_OFF,
-			&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("hsic value write failed ret %d\n", rc);
-			return;
-		}
-		local_opcode |= PA_VAL_EN;
-	}
-
-	if (hsic_cfg->flags & PA_HSIC_CONT_ENABLE) {
-		reg = hsic_cfg->contrast & PA_CONT_MASK;
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-			ctx->cap->sblk->hsic.base + PA_CONT_OFF,
-			&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("hsic contrast write failed ret %d\n", rc);
-			return;
-		}
-		local_opcode |= PA_CONT_EN;
-	}
-
-	if (local_opcode) {
-		local_opcode |= PA_EN;
-	} else {
-		DRM_ERROR("Invalid hsic config 0x%x\n", local_opcode);
-		return;
-	}
-
-	for (i = 0; i < num_of_mixers; i++) {
-		blk = dspp_mapping[dspp_list[i]->idx];
-		REG_DMA_INIT_OPS(dma_write_cfg, blk, HSIC,
-			dspp_buf[HSIC][ctx->idx]);
-
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT,
-			0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write decode select failed ret %d\n", rc);
-			return;
-		}
-
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-			ctx->cap->sblk->hsic.base, &local_opcode,
-			sizeof(local_opcode), REG_SINGLE_MODIFY, 0, 0,
-			REG_DMA_PA_MODE_HSIC_MASK);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("setting opcode failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[HSIC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-void reg_dmav1_setup_dspp_sixzonev17(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct drm_msm_sixzone *sixzone;
-	struct sde_hw_dspp *dspp_list[DSPP_MAX];
-	u32 reg = 0, local_hold = 0;
-	u32 opcode = 0, local_opcode = 0;
-	u32 num_of_mixers, blk = 0;
-	int rc, i;
-
-	opcode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->hsic.base);
-
-	rc = reg_dma_dspp_check(ctx, cfg, SIX_ZONE);
-	if (rc)
-		return;
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable sixzone feature\n");
-		opcode &= ~(PA_SIXZONE_HUE_EN | PA_SIXZONE_SAT_EN |
-			PA_SIXZONE_VAL_EN);
-		if (PA_DISABLE_REQUIRED(opcode))
-			opcode &= ~PA_EN;
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_sixzone)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_sixzone));
-		return;
-	}
-
-	rc = reg_dmav1_get_dspp_blk(hw_cfg, ctx->idx, &blk,
-		&num_of_mixers);
-	if (rc == -EINVAL) {
-		DRM_ERROR("unable to determine LUTDMA DSPP blocks\n");
-		return;
-	} else if (rc == -EALREADY) {
-		return;
-	} else if (num_of_mixers > DSPP_MAX) {
-		DRM_ERROR("unable to process more than %d DSPP blocks\n",
-			DSPP_MAX);
-		return;
-	} else if (num_of_mixers > 1) {
-		memcpy(dspp_list, hw_cfg->dspp,
-			sizeof(struct sde_hw_dspp *) * num_of_mixers);
-	} else {
-		dspp_list[0] = ctx;
-	}
-
-	sixzone = hw_cfg->payload;
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(dspp_buf[SIX_ZONE][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, SIX_ZONE,
-		dspp_buf[SIX_ZONE][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	reg = BIT(26);
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->sixzone.base,
-		&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting lut index failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-	    (ctx->cap->sblk->sixzone.base + SIXZONE_ADJ_CURVE_P1_OFF),
-		&sixzone->curve[0].p1, (SIXZONE_LUT_SIZE * sizeof(u32) * 2),
-		REG_BLK_WRITE_MULTIPLE, 2, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write sixzone lut failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->sixzone.base + SIXZONE_THRESHOLDS_OFF,
-		&sixzone->threshold, 3 * sizeof(u32),
-		REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write sixzone threshold failed ret %d\n", rc);
-		return;
-	}
-
-	local_hold = ((sixzone->sat_hold & REG_MASK(2)) << 12);
-	local_hold |= ((sixzone->val_hold & REG_MASK(2)) << 14);
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->hsic.base + PA_PWL_HOLD_OFF, &local_hold,
-		sizeof(local_hold), REG_SINGLE_MODIFY, 0, 0,
-		REG_DMA_PA_PWL_HOLD_SZONE_MASK);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting local_hold failed ret %d\n", rc);
-		return;
-	}
-
-	if (sixzone->flags & SIXZONE_HUE_ENABLE)
-		local_opcode |= PA_SIXZONE_HUE_EN;
-	if (sixzone->flags & SIXZONE_SAT_ENABLE)
-		local_opcode |= PA_SIXZONE_SAT_EN;
-	if (sixzone->flags & SIXZONE_VAL_ENABLE)
-		local_opcode |= PA_SIXZONE_VAL_EN;
-
-	if (local_opcode) {
-		local_opcode |= PA_EN;
-	} else {
-		DRM_ERROR("Invalid six zone config 0x%x\n", local_opcode);
-		return;
-	}
-
-	for (i = 0; i < num_of_mixers; i++) {
-		blk = dspp_mapping[dspp_list[i]->idx];
-		REG_DMA_INIT_OPS(dma_write_cfg, blk, SIX_ZONE,
-			dspp_buf[SIX_ZONE][ctx->idx]);
-
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT,
-			0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write decode select failed ret %d\n", rc);
-			return;
-		}
-
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-			ctx->cap->sblk->hsic.base, &local_opcode,
-			sizeof(local_opcode), REG_SINGLE_MODIFY, 0, 0,
-			REG_DMA_PA_MODE_SZONE_MASK);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("setting local_opcode failed ret %d\n", rc);
-			return;
-		}
-	}
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
-		dspp_buf[SIX_ZONE][ctx->idx],
-		REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-int reg_dmav1_deinit_dspp_ops(enum sde_dspp idx)
-{
-	int i;
-	struct sde_hw_reg_dma_ops *dma_ops;
-
-	dma_ops = sde_reg_dma_get_ops();
-	if (IS_ERR_OR_NULL(dma_ops))
-		return -ENOTSUPP;
-
-	if (idx >= DSPP_MAX) {
-		DRM_ERROR("invalid dspp idx %x max %xd\n", idx, DSPP_MAX);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < REG_DMA_FEATURES_MAX; i++) {
-		if (!dspp_buf[i][idx])
-			continue;
-		dma_ops->dealloc_reg_dma(dspp_buf[i][idx]);
-		dspp_buf[i][idx] = NULL;
-	}
-	return 0;
-}
-
-static void __setup_dspp_memcol(struct sde_hw_dspp *ctx,
-		enum sde_reg_dma_features type,
-		struct sde_hw_cp_cfg *hw_cfg)
-{
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct drm_msm_memcol *memcolor;
-	struct sde_hw_dspp *dspp_list[DSPP_MAX];
-	int rc, i;
-	u32 addr = 0, idx = 0;
-	u32 hold = 0, hold_shift = 0, mask = 0xFFFF;
-	u32 opcode = 0, opcode_mask = 0xFFFFFFFF;
-	u32 num_of_mixers, blk = 0;
-
-	switch (type) {
-	case MEMC_SKIN:
-		idx = 0;
-		opcode |= PA_SKIN_EN;
-		break;
-	case MEMC_SKY:
-		idx = 1;
-		opcode |= PA_SKY_EN;
-		break;
-	case MEMC_FOLIAGE:
-		idx = 2;
-		opcode |= PA_FOL_EN;
-		break;
-	default:
-		DRM_ERROR("Invalid memory color type %d\n", type);
-		return;
-	}
-
-	rc = reg_dmav1_get_dspp_blk(hw_cfg, ctx->idx, &blk,
-		&num_of_mixers);
-	if (rc == -EINVAL) {
-		DRM_ERROR("unable to determine LUTDMA DSPP blocks\n");
-		return;
-	} else if (rc == -EALREADY) {
-		return;
-	} else if (num_of_mixers > DSPP_MAX) {
-		DRM_ERROR("unable to process more than %d DSPP blocks\n",
-			DSPP_MAX);
-		return;
-	} else if (num_of_mixers > 1) {
-		memcpy(dspp_list, hw_cfg->dspp,
-			sizeof(struct sde_hw_dspp *) * num_of_mixers);
-	} else {
-		dspp_list[0] = ctx;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(dspp_buf[type][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, type, dspp_buf[type][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	memcolor = hw_cfg->payload;
-	addr = ctx->cap->sblk->memcolor.base + MEMCOL_PWL0_OFF +
-		(idx * MEMCOL_SIZE0);
-	/* write color_adjust_p0 and color_adjust_p1 */
-	REG_DMA_SETUP_OPS(dma_write_cfg, addr, &memcolor->color_adjust_p0,
-		sizeof(u32) * 2, REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting color_adjust_p0 failed ret %d\n", rc);
-		return;
-	}
-
-	/* write hue/sat/val region */
-	addr += 8;
-	REG_DMA_SETUP_OPS(dma_write_cfg, addr, &memcolor->hue_region,
-		sizeof(u32) * 3, REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting color_adjust_p0 failed ret %d\n", rc);
-		return;
-	}
-
-	addr = ctx->cap->sblk->memcolor.base + MEMCOL_PWL2_OFF +
-		(idx * MEMCOL_SIZE1);
-	/* write color_adjust_p2 and blend_gain */
-	REG_DMA_SETUP_OPS(dma_write_cfg, addr, &memcolor->color_adjust_p2,
-		sizeof(u32) * 2, REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting color_adjust_p0 failed ret %d\n", rc);
-		return;
-	}
-
-	addr = ctx->cap->sblk->hsic.base + PA_PWL_HOLD_OFF;
-	hold_shift = idx * MEMCOL_HOLD_SIZE;
-	hold = ((memcolor->sat_hold & REG_MASK(2)) << hold_shift);
-	hold |= ((memcolor->val_hold & REG_MASK(2)) << (hold_shift + 2));
-	mask &= ~REG_MASK_SHIFT(4, hold_shift);
-	opcode |= PA_EN;
-	opcode_mask &= ~(opcode);
-
-	/* write sat_hold and val_hold in PA_PWL_HOLD */
-	for (i = 0; i < num_of_mixers; i++) {
-		blk = dspp_mapping[dspp_list[i]->idx];
-		REG_DMA_INIT_OPS(dma_write_cfg, blk, type,
-			dspp_buf[type][ctx->idx]);
-
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT,
-			0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write decode select failed ret %d\n", rc);
-			return;
-		}
-
-		REG_DMA_SETUP_OPS(dma_write_cfg, addr, &hold, sizeof(hold),
-			REG_SINGLE_MODIFY, 0, 0, mask);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("setting color_adjust_p0 failed ret %d\n",
-				rc);
-			return;
-		}
-
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-			ctx->cap->sblk->hsic.base, &opcode, sizeof(opcode),
-			REG_SINGLE_MODIFY, 0, 0, opcode_mask);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("setting opcode failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
-		dspp_buf[type][ctx->idx],
-		REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-void reg_dmav1_setup_dspp_memcol_skinv17(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	u32 opcode = 0;
-	int rc;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	rc = reg_dma_dspp_check(ctx, cfg, MEMC_SKIN);
-	if (rc)
-		return;
-
-	opcode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->hsic.base);
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable memcolor skin feature\n");
-		opcode &= ~(PA_SKIN_EN);
-		if (PA_DISABLE_REQUIRED(opcode))
-			opcode &= ~PA_EN;
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_memcol)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_memcol));
-		return;
-	}
-
-	__setup_dspp_memcol(ctx, MEMC_SKIN, hw_cfg);
-}
-
-void reg_dmav1_setup_dspp_memcol_skyv17(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	u32 opcode = 0;
-	int rc;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	rc = reg_dma_dspp_check(ctx, cfg, MEMC_SKY);
-	if (rc)
-		return;
-
-	opcode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->hsic.base);
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable memcolor sky feature\n");
-		opcode &= ~(PA_SKY_EN);
-		if (PA_DISABLE_REQUIRED(opcode))
-			opcode &= ~PA_EN;
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_memcol)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_memcol));
-		return;
-	}
-
-	__setup_dspp_memcol(ctx, MEMC_SKY, hw_cfg);
-}
-
-void reg_dmav1_setup_dspp_memcol_folv17(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	u32 opcode = 0;
-	int rc;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	rc = reg_dma_dspp_check(ctx, cfg, MEMC_FOLIAGE);
-	if (rc)
-		return;
-
-	opcode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->hsic.base);
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable memcolor foliage feature\n");
-		opcode &= ~(PA_FOL_EN);
-		if (PA_DISABLE_REQUIRED(opcode))
-			opcode &= ~PA_EN;
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_memcol)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_memcol));
-		return;
-	}
-
-	__setup_dspp_memcol(ctx, MEMC_FOLIAGE, hw_cfg);
-}
-
-void reg_dmav1_setup_dspp_memcol_protv17(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct drm_msm_memcol *memcolor;
-	int rc;
-	u32 opcode = 0, opcode_mask = 0xFFFFFFFF;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
-		return;
-	}
-
-	rc = reg_dma_dspp_check(ctx, cfg, MEMC_PROT);
-	if (rc)
-		return;
-
-	opcode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->hsic.base);
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable memcolor prot feature\n");
-		opcode &= ~(MEMCOL_PROT_MASK);
-		if (PA_DISABLE_REQUIRED(opcode))
-			opcode &= ~PA_EN;
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->hsic.base, opcode);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_memcol)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_memcol));
-		return;
-	}
-
-	memcolor = hw_cfg->payload;
-	opcode = 0;
-	if (memcolor->prot_flags) {
-		if (memcolor->prot_flags & MEMCOL_PROT_HUE)
-			opcode |= MEMCOL_PROT_HUE_EN;
-		if (memcolor->prot_flags & MEMCOL_PROT_SAT)
-			opcode |= MEMCOL_PROT_SAT_EN;
-		if (memcolor->prot_flags & MEMCOL_PROT_VAL)
-			opcode |= MEMCOL_PROT_VAL_EN;
-		if (memcolor->prot_flags & MEMCOL_PROT_CONT)
-			opcode |= MEMCOL_PROT_CONT_EN;
-		if (memcolor->prot_flags & MEMCOL_PROT_SIXZONE)
-			opcode |= MEMCOL_PROT_SIXZONE_EN;
-		if (memcolor->prot_flags & MEMCOL_PROT_BLEND)
-			opcode |= MEMCOL_PROT_BLEND_EN;
-	}
-
-	if (!opcode) {
-		DRM_ERROR("Invalid memcolor prot config 0x%x\n", opcode);
-		return;
-	}
-	opcode |= PA_EN;
-	opcode_mask &= ~(MEMCOL_PROT_MASK);
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(dspp_buf[MEMC_PROT][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, dspp_mapping[ctx->idx],
-		MEMC_PROT, dspp_buf[MEMC_PROT][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		ctx->cap->sblk->hsic.base, &opcode, sizeof(opcode),
-		REG_SINGLE_MODIFY, 0, 0, opcode_mask);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting opcode failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
-			dspp_buf[MEMC_PROT][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-int reg_dmav1_init_sspp_op_v4(int feature, enum sde_sspp idx)
-{
-	int rc = -ENOTSUPP;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	bool is_supported = false;
-	u32 blk, i = 0;
-
-	if (feature >= SDE_SSPP_MAX || idx >= SSPP_MAX) {
-		DRM_ERROR("invalid feature %x max %x sspp idx %x max %xd\n",
-			feature, SDE_SSPP_MAX, idx, SSPP_MAX);
-		return rc;
-	}
-
-	if (sspp_feature_map[feature] >= REG_DMA_FEATURES_MAX) {
-		DRM_ERROR("invalid feature map %d for feature %d\n",
-			sspp_feature_map[feature], feature);
-		return -ENOTSUPP;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	if (IS_ERR_OR_NULL(dma_ops))
-		return -ENOTSUPP;
-
-	blk = sspp_mapping[idx];
-	rc = dma_ops->check_support(sspp_feature_map[feature], blk,
-				    &is_supported);
-	if (!rc)
-		rc = (is_supported) ? 0 : -ENOTSUPP;
-
-	if (!rc) {
-		for (i = SDE_SSPP_RECT_SOLO; i < SDE_SSPP_RECT_MAX; i++) {
-			rc = reg_dma_buf_init(
-				&sspp_buf[i][sspp_feature_map[feature]][idx],
-				sspp_feature_reg_dma_sz[feature]);
-			if (rc) {
-				DRM_ERROR("rect %d buf init failed\n", i);
-				break;
-			}
-		}
-
-	}
-
-	return rc;
-}
-
-static int reg_dma_sspp_check(struct sde_hw_pipe *ctx, void *cfg,
-		enum sde_reg_dma_features feature,
-		enum sde_sspp_multirect_index idx)
-{
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-
-	if (!cfg || !ctx) {
-		DRM_ERROR("invalid cfg %pK ctx %pK\n", cfg, ctx);
-		return -EINVAL;
-	}
-
-	if (idx >= SDE_SSPP_RECT_MAX) {
-		DRM_ERROR("invalid multirect idx %d\n", idx);
-		return -EINVAL;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	if (IS_ERR_OR_NULL(dma_ops))
-		return -EINVAL;
-
-	if (!hw_cfg->ctl || ctx->idx > SSPP_DMA3 || ctx->idx <= SSPP_NONE ||
-		feature >= REG_DMA_FEATURES_MAX) {
-		DRM_ERROR("invalid ctl %pK sspp idx %d feature %d\n",
-			hw_cfg->ctl, ctx->idx, feature);
-		return -EINVAL;
-	}
-
-	if (!sspp_buf[idx][feature][ctx->idx]) {
-		DRM_ERROR("invalid dma_buf for rect idx %d sspp idx %d\n", idx,
-			ctx->idx);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static void vig_gamutv5_off(struct sde_hw_pipe *ctx, void *cfg)
-{
-	int rc;
-	u32 op_mode = 0;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	u32 gamut_base = ctx->cap->sblk->gamut_blk.base - REG_DMA_VIG_SWI_DIFF;
-	enum sde_sspp_multirect_index idx = SDE_SSPP_RECT_0;
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(sspp_buf[idx][GAMUT][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, sspp_mapping[ctx->idx], GAMUT,
-			sspp_buf[idx][GAMUT][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, gamut_base,
-		&op_mode, sizeof(op_mode), REG_SINGLE_MODIFY, 0, 0,
-		REG_DMA_VIG_GAMUT_OP_MASK);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("opmode modify single reg failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
-			sspp_buf[idx][GAMUT][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-void reg_dmav1_setup_vig_gamutv5(struct sde_hw_pipe *ctx, void *cfg)
-{
-	int rc;
-	u32 i, op_mode, reg, tbl_len, tbl_off, scale_off, scale_tbl_off;
-	u32 *scale_data;
-	struct drm_msm_3d_gamut *payload;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	u32 gamut_base = ctx->cap->sblk->gamut_blk.base - REG_DMA_VIG_SWI_DIFF;
-	bool use_2nd_memory = false;
-	enum sde_sspp_multirect_index idx = SDE_SSPP_RECT_0;
-
-	rc = reg_dma_sspp_check(ctx, cfg, GAMUT, idx);
-	if (rc)
-		return;
-
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("disable gamut feature\n");
-		/* v5 and v6 call the same off version */
-		vig_gamutv5_off(ctx, cfg);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_3d_gamut)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-				hw_cfg->len, sizeof(struct drm_msm_3d_gamut));
-		return;
-	}
-	op_mode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->gamut_blk.base);
-	payload = hw_cfg->payload;
-	rc = sde_gamut_get_mode_info(SSPP, payload, &tbl_len, &tbl_off,
-			&op_mode, &scale_off);
-	if (rc) {
-		DRM_ERROR("invalid mode info rc %d\n", rc);
-		return;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(sspp_buf[idx][GAMUT][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, sspp_mapping[ctx->idx], GAMUT,
-			sspp_buf[idx][GAMUT][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-	if ((op_mode & (BIT(5) - 1)) >> 2 == gamut_mode_17b)
-		use_2nd_memory = true;
-	for (i = 0; i < GAMUT_3D_TBL_NUM; i++) {
-		reg = GAMUT_TABLE0_SEL << i;
-		reg |= ((tbl_off) & (BIT(11) - 1));
-		/* when bit 11 equals to 1, 2nd memory will be in use */
-		if (use_2nd_memory)
-			reg |= BIT(11);
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-			gamut_base + GAMUT_TABLE_SEL_OFF,
-			&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write tbl sel reg failed ret %d\n", rc);
-			return;
-		}
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-		    gamut_base + GAMUT_LOWER_COLOR_OFF,
-		    &payload->col[i][0].c2_c1, tbl_len,
-		    REG_BLK_WRITE_MULTIPLE, 2, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write color reg failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	if (op_mode & GAMUT_MAP_EN) {
-		for (i = 0; i < GAMUT_3D_SCALE_OFF_TBL_NUM; i++) {
-			scale_tbl_off = gamut_base + scale_off +
-					(i * GAMUT_SCALE_OFF_LEN);
-			scale_data = &payload->scale_off[i][0];
-			REG_DMA_SETUP_OPS(dma_write_cfg, scale_tbl_off,
-					scale_data, GAMUT_SCALE_OFF_LEN,
-					REG_BLK_WRITE_SINGLE, 0, 0, 0);
-			rc = dma_ops->setup_payload(&dma_write_cfg);
-			if (rc) {
-				DRM_ERROR("write scale/off reg failed ret %d\n",
-						rc);
-				return;
-			}
-		}
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, gamut_base,
-		&op_mode, sizeof(op_mode), REG_SINGLE_MODIFY, 0, 0,
-		REG_DMA_VIG_GAMUT_OP_MASK);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("opmode write single reg failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
-			sspp_buf[idx][GAMUT][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-void reg_dmav1_setup_vig_gamutv6(struct sde_hw_pipe *ctx, void *cfg)
-{
-	reg_dmav1_setup_vig_gamutv5(ctx, cfg);
-}
-
-static void vig_igcv5_off(struct sde_hw_pipe *ctx, void *cfg)
-{
-	int rc;
-	u32 op_mode = 0;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	u32 igc_base = ctx->cap->sblk->igc_blk[0].base - REG_DMA_VIG_SWI_DIFF;
-	enum sde_sspp_multirect_index idx = SDE_SSPP_RECT_0;
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(sspp_buf[idx][IGC][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, sspp_mapping[ctx->idx], IGC,
-			sspp_buf[idx][IGC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, igc_base, &op_mode, sizeof(op_mode),
-		REG_SINGLE_MODIFY, 0, 0, REG_DMA_VIG_IGC_OP_MASK);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("opmode modify single reg failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
-			sspp_buf[idx][IGC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-static int reg_dmav1_setup_vig_igc_common(struct sde_hw_reg_dma_ops *dma_ops,
-				struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
-				struct sde_hw_pipe *ctx,
-				struct sde_hw_cp_cfg *hw_cfg, u32 mask,
-				struct drm_msm_igc_lut *igc_lut)
-{
-	int rc = 0;
-	u32 i = 0, j = 0, reg = 0, index = 0;
-	u32 offset = 0;
-	u32 lut_sel = 0, lut_enable = 0;
-	u32 *data = NULL, *data_ptr = NULL;
-	u32 igc_base = ctx->cap->sblk->igc_blk[0].base - REG_DMA_VIG_SWI_DIFF;
-
-	if (hw_cfg->len != sizeof(struct drm_msm_igc_lut)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-				hw_cfg->len, sizeof(struct drm_msm_igc_lut));
-	}
-
-	data = kzalloc(VIG_1D_LUT_IGC_LEN * sizeof(u32), GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
-
-	reg = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->igc_blk[0].base);
-	lut_enable = (reg >> 8) & BIT(0);
-	lut_sel = (reg >> 9) & BIT(0);
-	/* select LUT table (0 or 1) when 1D LUT is in active mode */
-	if (lut_enable)
-		lut_sel = (~lut_sel) && BIT(0);
-
-	for (i = 0; i < IGC_TBL_NUM; i++) {
-		/* write 0 to the index register */
-		index = 0;
-		REG_DMA_SETUP_OPS(*dma_write_cfg, igc_base + 0x1B0,
-			&index, sizeof(index), REG_SINGLE_WRITE, 0, 0, 0);
-		rc = dma_ops->setup_payload(dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("VIG IGC index write failed ret %d\n", rc);
-			goto exit;
-		}
-
-		offset = igc_base + 0x1B4 + i * sizeof(u32);
-		data_ptr = igc_lut->c0 + (ARRAY_SIZE(igc_lut->c0) * i);
-		for (j = 0; j < VIG_1D_LUT_IGC_LEN; j++)
-			data[j] = (data_ptr[2 * j] & mask) |
-				(data_ptr[2 * j + 1] & mask) << 16;
-
-		REG_DMA_SETUP_OPS(*dma_write_cfg, offset, data,
-				VIG_1D_LUT_IGC_LEN * sizeof(u32),
-				REG_BLK_WRITE_INC, 0, 0, 0);
-		rc = dma_ops->setup_payload(dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("lut write failed ret %d\n", rc);
-			goto exit;
-		}
-	}
-
-	if (igc_lut->flags & IGC_DITHER_ENABLE) {
-		reg = igc_lut->strength & IGC_DITHER_DATA_MASK;
-		reg |= BIT(4);
-	} else {
-		reg = 0;
-	}
-	REG_DMA_SETUP_OPS(*dma_write_cfg, igc_base + 0x1C0,
-			&reg, sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("dither strength failed ret %d\n", rc);
-		goto exit;
-	}
-
-	reg = BIT(8) | (lut_sel << 9);
-	REG_DMA_SETUP_OPS(*dma_write_cfg, igc_base, &reg, sizeof(reg),
-		REG_SINGLE_MODIFY, 0, 0, REG_DMA_VIG_IGC_OP_MASK);
-	rc = dma_ops->setup_payload(dma_write_cfg);
-	if (rc)
-		DRM_ERROR("setting opcode failed ret %d\n", rc);
-exit:
-	kfree(data);
-	return rc;
-}
-
-void reg_dmav1_setup_vig_igcv5(struct sde_hw_pipe *ctx, void *cfg)
-{
-	int rc;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct drm_msm_igc_lut *igc_lut;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	enum sde_sspp_multirect_index idx = SDE_SSPP_RECT_0;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-
-	rc = reg_dma_sspp_check(ctx, hw_cfg, IGC, idx);
-	if (rc)
-		return;
-
-	igc_lut = hw_cfg->payload;
-	if (!igc_lut) {
-		DRM_DEBUG_DRIVER("disable igc feature\n");
-		vig_igcv5_off(ctx, hw_cfg);
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(sspp_buf[idx][IGC][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, sspp_mapping[ctx->idx], IGC,
-			sspp_buf[idx][IGC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	rc = reg_dmav1_setup_vig_igc_common(dma_ops, &dma_write_cfg,
-			ctx, cfg, VIG_IGC_DATA_MASK, igc_lut);
-	if (rc) {
-		DRM_ERROR("setup_vig_igc_common failed\n");
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
-			sspp_buf[idx][IGC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-void reg_dmav1_setup_vig_igcv6(struct sde_hw_pipe *ctx, void *cfg)
-{
-	int rc;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	u32 igc_base = ctx->cap->sblk->igc_blk[0].base - REG_DMA_VIG_SWI_DIFF;
-	enum sde_sspp_multirect_index idx = SDE_SSPP_RECT_0;
-	struct drm_msm_igc_lut *igc_lut;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-
-	rc = reg_dma_sspp_check(ctx, hw_cfg, IGC, idx);
-	if (rc)
-		return;
-
-	igc_lut = hw_cfg->payload;
-	if (!igc_lut) {
-		DRM_DEBUG_DRIVER("disable igc feature\n");
-		/* Both v5 and v6 call same igcv5_off */
-		vig_igcv5_off(ctx, hw_cfg);
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(sspp_buf[idx][IGC][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, sspp_mapping[ctx->idx], IGC,
-			sspp_buf[idx][IGC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	rc = reg_dmav1_setup_vig_igc_common(dma_ops, &dma_write_cfg,
-			ctx, cfg, VIG_IGC_DATA_MASK_V6, igc_lut);
-	if (rc) {
-		DRM_ERROR("setup_vig_igcv6 failed\n");
-		return;
-	}
-
-	/* Perform LAST_LUT required for v6*/
-	REG_DMA_SETUP_OPS(dma_write_cfg, igc_base + 0x1C4, &igc_lut->c0_last,
-		sizeof(u32) * 3, REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("c_last failed ret %d", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
-			sspp_buf[idx][IGC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-static void dma_igcv5_off(struct sde_hw_pipe *ctx, void *cfg,
-			enum sde_sspp_multirect_index idx)
-{
-	int rc;
-	u32 op_mode = 0;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	u32 igc_opmode_off;
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(sspp_buf[idx][IGC][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, sspp_mapping[ctx->idx], IGC,
-			sspp_buf[idx][IGC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	if (idx == SDE_SSPP_RECT_SOLO || idx == SDE_SSPP_RECT_0)
-		igc_opmode_off = DMA_DGM_0_OP_MODE_OFF;
-	else
-		igc_opmode_off = DMA_DGM_1_OP_MODE_OFF;
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, igc_opmode_off, &op_mode,
-			sizeof(op_mode), REG_SINGLE_MODIFY, 0, 0,
-			REG_DMA_DMA_IGC_OP_MASK);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("opmode modify single reg failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
-			sspp_buf[idx][IGC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-void reg_dmav1_setup_dma_igcv5(struct sde_hw_pipe *ctx, void *cfg,
-			enum sde_sspp_multirect_index idx)
-{
-	int rc;
-	u32 i = 0, reg = 0;
-	u32 *data = NULL;
-	struct drm_msm_igc_lut *igc_lut;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	u32 igc_base, igc_dither_off, igc_opmode_off;
-
-	rc = reg_dma_sspp_check(ctx, cfg, IGC, idx);
-	if (rc)
-		return;
-
-	igc_lut = hw_cfg->payload;
-	if (!igc_lut) {
-		DRM_DEBUG_DRIVER("disable igc feature\n");
-		dma_igcv5_off(ctx, cfg, idx);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_igc_lut)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-				hw_cfg->len, sizeof(struct drm_msm_igc_lut));
-		return;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(sspp_buf[idx][IGC][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, sspp_mapping[ctx->idx], IGC,
-			sspp_buf[idx][IGC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	data = kzalloc(DMA_1D_LUT_IGC_LEN * sizeof(u32), GFP_KERNEL);
-	if (!data) {
-		DRM_ERROR("failed to allocate memory for igc\n");
-		return;
-	}
-
-	/* client packs the 1D LUT data in c2 instead of c0 */
-	for (i = 0; i < DMA_1D_LUT_IGC_LEN; i++)
-		data[i] = (igc_lut->c2[2 * i] & IGC_DATA_MASK) |
-			((igc_lut->c2[2 * i + 1] & IGC_DATA_MASK) << 16);
-
-	if (idx == SDE_SSPP_RECT_SOLO || idx == SDE_SSPP_RECT_0) {
-		igc_base = ctx->cap->sblk->igc_blk[0].base -
-				REG_DMA_DMA_SWI_DIFF;
-		igc_dither_off = igc_base + DMA_1D_LUT_IGC_DITHER_OFF;
-		igc_opmode_off = DMA_DGM_0_OP_MODE_OFF;
-	} else {
-		igc_base = ctx->cap->sblk->igc_blk[1].base -
-				REG_DMA_DMA_SWI_DIFF;
-		igc_dither_off = igc_base + DMA_1D_LUT_IGC_DITHER_OFF;
-		igc_opmode_off = DMA_DGM_1_OP_MODE_OFF;
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, igc_base, data,
-			DMA_1D_LUT_IGC_LEN * sizeof(u32),
-			REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("lut write failed ret %d\n", rc);
-		goto igc_exit;
-	}
-	if (igc_lut->flags & IGC_DITHER_ENABLE) {
-		reg = igc_lut->strength & IGC_DITHER_DATA_MASK;
-		reg |= BIT(4);
-	} else {
-		reg = 0;
-	}
-	REG_DMA_SETUP_OPS(dma_write_cfg, igc_dither_off, &reg,
-			sizeof(reg), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("failed to set dither strength %d\n", rc);
-		goto igc_exit;
-	}
-
-	reg = BIT(1);
-	REG_DMA_SETUP_OPS(dma_write_cfg, igc_opmode_off, &reg, sizeof(reg),
-			REG_SINGLE_MODIFY, 0, 0, REG_DMA_DMA_IGC_OP_MASK);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting opcode failed ret %d\n", rc);
-		goto igc_exit;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
-			sspp_buf[idx][IGC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-igc_exit:
-	kfree(data);
-}
-
-static void dma_gcv5_off(struct sde_hw_pipe *ctx, void *cfg,
-			enum sde_sspp_multirect_index idx)
-{
-	int rc;
-	u32 op_mode = 0;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	u32 gc_opmode_off;
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(sspp_buf[idx][GC][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, sspp_mapping[ctx->idx], GC,
-			sspp_buf[idx][GC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	if (idx == SDE_SSPP_RECT_SOLO || idx == SDE_SSPP_RECT_0)
-		gc_opmode_off = DMA_DGM_0_OP_MODE_OFF;
-	else
-		gc_opmode_off = DMA_DGM_1_OP_MODE_OFF;
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, gc_opmode_off, &op_mode,
-			sizeof(op_mode), REG_SINGLE_MODIFY, 0, 0,
-			REG_DMA_DMA_GC_OP_MASK);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("opmode modify single reg failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
-			sspp_buf[idx][GC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-void reg_dmav1_setup_dma_gcv5(struct sde_hw_pipe *ctx, void *cfg,
-			enum sde_sspp_multirect_index idx)
-{
-	int rc;
-	u32 reg = 0;
-	struct drm_msm_pgc_lut *gc_lut;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	u32 gc_base, gc_opmode_off;
-
-	rc = reg_dma_sspp_check(ctx, cfg, GC, idx);
-	if (rc)
-		return;
-
-	gc_lut = hw_cfg->payload;
-	if (!gc_lut) {
-		DRM_DEBUG_DRIVER("disable gc feature\n");
-		dma_gcv5_off(ctx, cfg, idx);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_pgc_lut)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-				hw_cfg->len, sizeof(struct drm_msm_pgc_lut));
-		return;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(sspp_buf[idx][GC][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, sspp_mapping[ctx->idx], GC,
-			sspp_buf[idx][GC][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	if (idx == SDE_SSPP_RECT_SOLO || idx == SDE_SSPP_RECT_0) {
-		gc_base = ctx->cap->sblk->gc_blk[0].base - REG_DMA_DMA_SWI_DIFF;
-		gc_opmode_off = DMA_DGM_0_OP_MODE_OFF;
-	} else {
-		gc_base = ctx->cap->sblk->gc_blk[1].base - REG_DMA_DMA_SWI_DIFF;
-		gc_opmode_off = DMA_DGM_1_OP_MODE_OFF;
-	}
-
-	/* client packs the 1D LUT data in c2 instead of c0,
-	 * and even & odd values are already stacked in register foramt
-	 */
-	REG_DMA_SETUP_OPS(dma_write_cfg, gc_base, gc_lut->c2,
-			DMA_1D_LUT_GC_LEN * sizeof(u32),
-			REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("lut write failed ret %d\n", rc);
-		return;
-	}
-	reg = BIT(2);
-	REG_DMA_SETUP_OPS(dma_write_cfg, gc_opmode_off, &reg,
-			sizeof(reg), REG_SINGLE_MODIFY, 0, 0,
-			REG_DMA_DMA_GC_OP_MASK);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting opcode failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
-			sspp_buf[idx][GC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-}
-
-int reg_dmav1_deinit_sspp_ops(enum sde_sspp idx)
-{
-	u32 i, j;
-	struct sde_hw_reg_dma_ops *dma_ops;
-
-	dma_ops = sde_reg_dma_get_ops();
-	if (IS_ERR_OR_NULL(dma_ops))
-		return -ENOTSUPP;
-
-	if (idx >= SSPP_MAX) {
-		DRM_ERROR("invalid sspp idx %x max %x\n", idx, SSPP_MAX);
-		return -EINVAL;
-	}
-
-	for (i = SDE_SSPP_RECT_SOLO; i < SDE_SSPP_RECT_MAX; i++) {
-		for (j = 0; j < REG_DMA_FEATURES_MAX; j++) {
-			if (!sspp_buf[i][j][idx])
-				continue;
-			dma_ops->dealloc_reg_dma(sspp_buf[i][j][idx]);
-			sspp_buf[i][j][idx] = NULL;
-		}
-	}
-	return 0;
-}
-
-void reg_dmav1_setup_scaler3_lut(struct sde_reg_dma_setup_ops_cfg *buf,
-		struct sde_hw_scaler3_cfg *scaler3_cfg, u32 offset)
-{
-	int i, filter, rc;
-	int config_lut = 0x0;
-	unsigned long lut_flags;
-	u32 lut_addr, lut_offset, lut_len;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
-	static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
-		{{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
-		{{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
-		{{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
-		{{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
-		{{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
-	};
-
-	dma_ops = sde_reg_dma_get_ops();
-	lut_flags = (unsigned long) scaler3_cfg->lut_flag;
-	if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
-		(scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
-		lut[0] = scaler3_cfg->dir_lut;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
-		(scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
-		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
-		lut[1] = scaler3_cfg->cir_lut +
-			scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
-		(scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
-		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
-		lut[2] = scaler3_cfg->cir_lut +
-			scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
-		(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
-		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
-		lut[3] = scaler3_cfg->sep_lut +
-			scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
-		(scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
-		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
-		lut[4] = scaler3_cfg->sep_lut +
-			scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
-		config_lut = 1;
-	}
-
-	for (filter = 0; filter < QSEED3_FILTERS && config_lut; filter++) {
-		if (!lut[filter])
-			continue;
-		lut_offset = 0;
-		for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
-			lut_addr = QSEED3_COEF_LUT_OFF + offset
-				+ off_tbl[filter][i][1];
-			lut_len = off_tbl[filter][i][0] << 2;
-			REG_DMA_SETUP_OPS(*buf, lut_addr,
-				&lut[filter][0], lut_len * sizeof(u32),
-				REG_BLK_WRITE_SINGLE, 0, 0, 0);
-			rc = dma_ops->setup_payload(buf);
-			if (rc) {
-				DRM_ERROR("lut write failed ret %d\n", rc);
-				return;
-			}
-		}
-	}
-
-	if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags)) {
-		i = BIT(0);
-		REG_DMA_SETUP_OPS(*buf, QSEED3_COEF_LUT_CTRL_OFF + offset, &i,
-				sizeof(i), REG_SINGLE_WRITE, 0, 0, 0);
-		rc = dma_ops->setup_payload(buf);
-		if (rc) {
-			DRM_ERROR("lut write failed ret %d\n", rc);
-			return;
-		}
-	}
-
-}
-
-void reg_dmav1_setup_scaler3lite_lut(
-		struct sde_reg_dma_setup_ops_cfg *buf,
-			struct sde_hw_scaler3_cfg *scaler3_cfg, u32 offset)
-{
-	int i, filter, rc;
-	int config_lut = 0x0;
-	unsigned long lut_flags;
-	u32 lut_addr, lut_offset;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	u32 *lut[QSEED3LITE_FILTERS] = {NULL, NULL};
-	static const uint32_t off_tbl[QSEED3LITE_FILTERS] = {0x000, 0x200};
-
-	/* destination scaler case */
-	if (!scaler3_cfg->sep_lut)
-		return;
-
-	dma_ops = sde_reg_dma_get_ops();
-	lut_flags = (unsigned long) scaler3_cfg->lut_flag;
-	if (test_bit(QSEED3L_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
-		(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3L_SEPARABLE_LUTS) &&
-		(scaler3_cfg->sep_len == QSEED3L_SEP_LUT_SIZE)) {
-		lut[Y_INDEX] = scaler3_cfg->sep_lut +
-			scaler3_cfg->y_rgb_sep_lut_idx * QSEED3L_LUT_SIZE;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3L_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
-		(scaler3_cfg->uv_sep_lut_idx < QSEED3L_SEPARABLE_LUTS) &&
-		(scaler3_cfg->sep_len == QSEED3L_SEP_LUT_SIZE)) {
-		lut[UV_INDEX] = scaler3_cfg->sep_lut +
-			scaler3_cfg->uv_sep_lut_idx * QSEED3L_LUT_SIZE;
-		config_lut = 1;
-	}
-
-	for (filter = 0; filter < QSEED3LITE_FILTERS && config_lut; filter++) {
-		if (!lut[filter])
-			continue;
-		lut_offset = 0;
-		lut_addr = QSEED3L_COEF_LUT_OFF + offset
-			+ off_tbl[filter];
-		REG_DMA_SETUP_OPS(*buf, lut_addr,
-				&lut[filter][0], QSEED3L_LUT_SIZE * sizeof(u32),
-				REG_BLK_WRITE_SINGLE, 0, 0, 0);
-		rc = dma_ops->setup_payload(buf);
-		if (rc) {
-			DRM_ERROR("lut write failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	if (test_bit(QSEED3L_COEF_LUT_SWAP_BIT, &lut_flags)) {
-		i = BIT(0);
-		REG_DMA_SETUP_OPS(*buf, QSEED3L_COEF_LUT_CTRL_OFF + offset, &i,
-				sizeof(i), REG_SINGLE_WRITE, 0, 0, 0);
-		rc = dma_ops->setup_payload(buf);
-		if (rc) {
-			DRM_ERROR("lut write failed ret %d\n", rc);
-			return;
-		}
-	}
-}
-
-static int reg_dmav1_setup_scaler3_de(struct sde_reg_dma_setup_ops_cfg *buf,
-	struct sde_hw_scaler3_de_cfg *de_cfg, u32 offset)
-{
-	u32 de_config[7];
-	struct sde_hw_reg_dma_ops *dma_ops;
-	int rc;
-
-	dma_ops = sde_reg_dma_get_ops();
-	de_config[0] = (de_cfg->sharpen_level1 & 0x1FF) |
-		((de_cfg->sharpen_level2 & 0x1FF) << 16);
-
-	de_config[1] = ((de_cfg->limit & 0xF) << 9) |
-		((de_cfg->prec_shift & 0x7) << 13) |
-		((de_cfg->clip & 0x7) << 16) |
-		((de_cfg->blend & 0xF) << 20);
-
-	de_config[2] = (de_cfg->thr_quiet & 0xFF) |
-		((de_cfg->thr_dieout & 0x3FF) << 16);
-
-	de_config[3] = (de_cfg->thr_low & 0x3FF) |
-		((de_cfg->thr_high & 0x3FF) << 16);
-
-	de_config[4] = (de_cfg->adjust_a[0] & 0x3FF) |
-		((de_cfg->adjust_a[1] & 0x3FF) << 10) |
-		((de_cfg->adjust_a[2] & 0x3FF) << 20);
-
-	de_config[5] = (de_cfg->adjust_b[0] & 0x3FF) |
-		((de_cfg->adjust_b[1] & 0x3FF) << 10) |
-		((de_cfg->adjust_b[2] & 0x3FF) << 20);
-
-	de_config[6] = (de_cfg->adjust_c[0] & 0x3FF) |
-		((de_cfg->adjust_c[1] & 0x3FF) << 10) |
-		((de_cfg->adjust_c[2] & 0x3FF) << 20);
-
-	offset += QSEED3_DE_OFFSET;
-	REG_DMA_SETUP_OPS(*buf, offset,
-		de_config, sizeof(de_config), REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(buf);
-	if (rc) {
-		DRM_ERROR("de write failed ret %d\n", rc);
-		return rc;
-	}
-	return 0;
-}
-
-void reg_dmav1_setup_vig_qseed3(struct sde_hw_pipe *ctx,
-	struct sde_hw_pipe_cfg *sspp, struct sde_hw_pixel_ext *pe,
-	void *scaler_cfg)
-{
-	struct sde_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
-	int rc;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_cp_cfg hw_cfg = {};
-	u32 op_mode = 0, offset;
-	u32 preload, src_y_rgb, src_uv, dst, dir_weight;
-	u32 cache[4];
-	enum sde_sspp_multirect_index idx = SDE_SSPP_RECT_0;
-
-	if (!ctx || !pe || !scaler_cfg) {
-		DRM_ERROR("invalid params ctx %pK pe %pK scaler_cfg %pK",
-			ctx, pe, scaler_cfg);
-		return;
-	}
-
-	hw_cfg.ctl = ctx->ctl;
-	hw_cfg.payload = scaler_cfg;
-	hw_cfg.len = sizeof(*scaler3_cfg);
-	rc = reg_dma_sspp_check(ctx, &hw_cfg, QSEED, idx);
-	if (rc || !sspp) {
-		DRM_ERROR("invalid params rc %d sspp %pK\n", rc, sspp);
-		return;
-	}
-
-	offset = ctx->cap->sblk->scaler_blk.base - REG_DMA_VIG_SWI_DIFF;
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(sspp_buf[idx][QSEED][ctx->idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, sspp_mapping[ctx->idx], QSEED,
-	    sspp_buf[idx][QSEED][ctx->idx]);
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	if (!scaler3_cfg->enable)
-		goto end;
-
-	op_mode = BIT(0);
-	op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
-
-	if (sspp->layout.format && SDE_FORMAT_IS_YUV(sspp->layout.format)) {
-		op_mode |= BIT(12);
-		op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
-	}
-
-	op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
-	op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
-	op_mode |= (scaler3_cfg->dyn_exp_disabled) ? BIT(13) : 0;
-
-	preload =
-		((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
-		((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
-		((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
-		((scaler3_cfg->preload_y[1] & 0x7F) << 24);
-
-	src_y_rgb = (scaler3_cfg->src_width[0] & 0xFFFF) |
-		((scaler3_cfg->src_height[0] & 0xFFFF) << 16);
-
-	src_uv = (scaler3_cfg->src_width[1] & 0xFFFF) |
-		((scaler3_cfg->src_height[1] & 0xFFFF) << 16);
-
-	dst = (scaler3_cfg->dst_width & 0xFFFF) |
-		((scaler3_cfg->dst_height & 0xFFFF) << 16);
-
-	if (scaler3_cfg->de.enable) {
-		rc = reg_dmav1_setup_scaler3_de(&dma_write_cfg,
-			&scaler3_cfg->de, offset);
-		if (!rc)
-			op_mode |= BIT(8);
-	}
-
-	ctx->ops.setup_scaler_lut(&dma_write_cfg, scaler3_cfg, offset);
-
-	cache[0] = scaler3_cfg->init_phase_x[0] & 0x1FFFFF;
-	cache[1] = scaler3_cfg->init_phase_y[0] & 0x1FFFFF;
-	cache[2] = scaler3_cfg->init_phase_x[1] & 0x1FFFFF;
-	cache[3] = scaler3_cfg->init_phase_y[1] & 0x1FFFFF;
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		offset + 0x90, cache, sizeof(cache),
-		REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting phase failed ret %d\n", rc);
-		return;
-	}
-
-	cache[0] = scaler3_cfg->phase_step_x[0] & 0xFFFFFF;
-	cache[1] = scaler3_cfg->phase_step_y[0] & 0xFFFFFF;
-	cache[2] = scaler3_cfg->phase_step_x[1] & 0xFFFFFF;
-	cache[3] = scaler3_cfg->phase_step_y[1] & 0xFFFFFF;
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		offset + 0x10, cache, sizeof(cache),
-		REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting phase failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		offset + 0x20, &preload, sizeof(u32),
-		REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting preload failed ret %d\n", rc);
-		return;
-	}
-
-	cache[0] = src_y_rgb;
-	cache[1] = src_uv;
-	cache[2] = dst;
-
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		offset + 0x40, cache, 3 * sizeof(u32),
-		REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting sizes failed ret %d\n", rc);
-		return;
-	}
-
-	if (is_qseed3_rev_qseed3lite(ctx->catalog)) {
-		dir_weight = (scaler3_cfg->dir_weight & 0xFF);
-
-		REG_DMA_SETUP_OPS(dma_write_cfg,
-				offset + 0x60, &dir_weight, sizeof(u32),
-				REG_SINGLE_WRITE, 0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("lut write failed ret %d\n", rc);
-			return;
-		}
-	}
-
-end:
-	if (sspp->layout.format) {
-		if (!SDE_FORMAT_IS_DX(sspp->layout.format))
-			op_mode |= BIT(14);
-		if (sspp->layout.format->alpha_enable) {
-			op_mode |= BIT(10);
-			op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
-		}
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg,
-		offset + 0x4,
-		&op_mode, sizeof(op_mode), REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("setting opmode failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg.ctl,
-			sspp_buf[idx][QSEED][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc)
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-
-}
-
-int reg_dmav1_init_ltm_op_v6(int feature, enum sde_dspp dspp_idx)
-{
-	int rc = -ENOTSUPP;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	bool is_supported = false;
-	u32 blk;
-	/* LTM blocks are hardwired to DSPP blocks */
-	enum sde_ltm idx = (enum sde_ltm)dspp_idx;
-
-	if (feature >= SDE_LTM_MAX || idx >= LTM_MAX) {
-		DRM_ERROR("invalid feature %x max %x ltm idx %x max %xd\n",
-			feature, SDE_LTM_MAX, idx, LTM_MAX);
-		return rc;
-	}
-
-	if (ltm_feature_map[feature] >= REG_DMA_FEATURES_MAX) {
-		DRM_ERROR("invalid feature map %d for feature %d\n",
-			ltm_feature_map[feature], feature);
-		return -ENOTSUPP;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	if (IS_ERR_OR_NULL(dma_ops))
-		return -ENOTSUPP;
-
-	blk = ltm_mapping[idx];
-	rc = dma_ops->check_support(ltm_feature_map[feature], blk,
-			&is_supported);
-	if (!rc)
-		rc = (is_supported) ? 0 : -ENOTSUPP;
-
-	if (!rc)
-		rc = reg_dma_buf_init(&ltm_buf[ltm_feature_map[feature]][idx],
-				ltm_feature_reg_dma_sz[feature]);
-	return rc;
-}
-
-
-int reg_dmav1_deinit_ltm_ops(enum sde_dspp dspp_idx)
-{
-	int i;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	/* LTM blocks are hardwired to DSPP blocks */
-	enum sde_ltm idx = (enum sde_ltm)dspp_idx;
-
-	dma_ops = sde_reg_dma_get_ops();
-	if (IS_ERR_OR_NULL(dma_ops))
-		return -ENOTSUPP;
-
-	if (idx >= LTM_MAX) {
-		DRM_DEBUG("invalid ltm idx %x max %xd\n", idx, LTM_MAX);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < REG_DMA_FEATURES_MAX; i++) {
-		if (!ltm_buf[i][idx])
-			continue;
-		dma_ops->dealloc_reg_dma(ltm_buf[i][idx]);
-		ltm_buf[i][idx] = NULL;
-	}
-	return 0;
-}
-
-static int reg_dma_ltm_check(struct sde_hw_dspp *ctx, void *cfg,
-		enum sde_reg_dma_features feature)
-{
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-
-	if (!ctx || !cfg) {
-		DRM_ERROR("invalid ctx %pK cfg %pK\n", ctx, cfg);
-		return -EINVAL;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	if (IS_ERR_OR_NULL(dma_ops))
-		return -EINVAL;
-
-	if (!hw_cfg->ctl || ctx->idx >= DSPP_MAX ||
-		feature >= REG_DMA_FEATURES_MAX) {
-		DRM_ERROR("invalid ctl %pK dspp idx %d feature %d\n",
-			hw_cfg->ctl, ctx->idx, feature);
-		return -EINVAL;
-	}
-
-	if (!ltm_buf[feature][ctx->idx]) {
-		DRM_ERROR("invalid dma_buf\n");
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static int reg_dmav1_get_ltm_blk(struct sde_hw_cp_cfg *hw_cfg,
-	enum sde_ltm idx, enum sde_ltm *dspp_idx, u32 *blk)
-{
-	struct sde_hw_mixer *hw_lm = NULL;
-	u32 i = 0, num_mixers = 0;
-
-	if (idx >= LTM_MAX) {
-		DRM_ERROR("invalid ltm idx %d\n", idx);
-		return -EINVAL;
-	}
-
-	num_mixers = hw_cfg->num_of_mixers;
-	hw_lm = hw_cfg->mixer_info;
-	if (num_mixers == 1) {
-		*blk = ltm_mapping[idx];
-		dspp_idx[0] = (enum sde_ltm)(hw_cfg->dspp[0]->idx);
-	} else if (num_mixers == 2) {
-		if (hw_lm->cfg.right_mixer) {
-			DRM_DEBUG_DRIVER("slave LTM instance\n");
-			return -EALREADY;
-		}
-		*blk = 0;
-		for (i = 0; i < num_mixers; i++) {
-			if (hw_cfg->dspp[i] && (i < LTM_MAX)) {
-				dspp_idx[i] =
-					(enum sde_ltm)(hw_cfg->dspp[i]->idx);
-				*blk |= ltm_mapping[dspp_idx[i]];
-			} else {
-				DRM_ERROR("invalid dspp = %pK, i = %d\n",
-					hw_cfg->dspp[i], i);
-				return -EINVAL;
-			}
-		}
-	} else {
-		DRM_ERROR("invalid num_of_mixers %d for LTM\n",
-				hw_cfg->num_of_mixers);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static void ltm_initv1_disable(struct sde_hw_dspp *ctx, void *cfg,
-		u32 num_mixers, enum sde_ltm *dspp_idx)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	int rc, i = 0;
-	enum sde_ltm idx = 0;
-	u32 opmode = 0;
-
-	idx = (enum sde_ltm)ctx->idx;
-	if (idx >= LTM_MAX) {
-		DRM_ERROR("invalid ltm idx %d\n", ctx->idx);
-		return;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(ltm_buf[LTM_INIT][idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, ltm_mapping[idx], LTM_INIT,
-			ltm_buf[LTM_INIT][idx]);
-
-	for (i = 0; i < num_mixers; i++) {
-		dma_write_cfg.blk = ltm_mapping[dspp_idx[i]];
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0,
-				0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write decode select failed ret %d\n", rc);
-			return;
-		}
-
-		ltm_vlut_ops_mask[dspp_idx[i]] &= ~ltm_dither;
-		ltm_vlut_ops_mask[dspp_idx[i]] &= ~ltm_unsharp;
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0x04, &opmode, sizeof(opmode),
-			REG_SINGLE_MODIFY, 0, 0,
-			REG_DMA_LTM_INIT_DISABLE_OP_MASK);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("opmode write failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_INIT][idx],
-				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc) {
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-		return;
-	}
-}
-
-void reg_dmav1_setup_ltm_initv1(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct drm_msm_ltm_init_param *init_param = NULL;
-	struct sde_ltm_phase_info phase;
-	enum sde_ltm dspp_idx[LTM_MAX] = {0};
-	enum sde_ltm idx = 0;
-	u32 blk = 0, opmode = 0, i = 0, num_mixers = 0;
-	u32 phase_data[3];
-	int rc = 0;
-
-	rc = reg_dma_ltm_check(ctx, cfg, LTM_INIT);
-	if (rc)
-		return;
-
-	idx = (enum sde_ltm)ctx->idx;
-	rc = reg_dmav1_get_ltm_blk(hw_cfg, idx, &dspp_idx[0], &blk);
-	if (rc) {
-		if (rc != -EALREADY)
-			DRM_ERROR("failed to get the blk info\n");
-		return;
-	}
-
-	num_mixers = hw_cfg->num_of_mixers;
-	/* disable case */
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("Disable LTM init feature\n");
-		ltm_initv1_disable(ctx, cfg, num_mixers, dspp_idx);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_ltm_init_param)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_ltm_init_param));
-		return;
-	}
-
-	init_param = hw_cfg->payload;
-
-
-	memset(&phase, 0, sizeof(phase));
-	sde_ltm_get_phase_info(hw_cfg, &phase);
-
-	if (phase.portrait_en)
-		opmode |= BIT(2);
-	else
-		opmode &= ~BIT(2);
-
-	if (phase.merge_en)
-		opmode |= BIT(16);
-	else
-		opmode &= ~(BIT(16) | BIT(17));
-
-	phase_data[0] = phase.init_v;
-	phase_data[1] = phase.inc_h;
-	phase_data[2] = phase.inc_v;
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(ltm_buf[LTM_INIT][idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, LTM_INIT, ltm_buf[LTM_INIT][idx]);
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0x0c, phase_data, sizeof(u32) * 3,
-			REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write phase data failed ret %d\n",
-				rc);
-		return;
-	}
-
-	for (i = 0; i < num_mixers; i++) {
-		/* reset decode select to unicast for phase init_h value*/
-		dma_write_cfg.blk = ltm_mapping[dspp_idx[i]];
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0,
-				0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write decode select failed ret %d\n", rc);
-			return;
-		}
-
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0x08,
-				&phase.init_h[dspp_idx[i]], sizeof(u32),
-				REG_SINGLE_WRITE, 0, 0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("opmode write failed ret %d\n", rc);
-			return;
-		}
-
-		if (init_param->init_param_01) {
-			if (ltm_vlut_ops_mask[dspp_idx[i]] & ltm_vlut)
-				opmode |= BIT(6);
-			ltm_vlut_ops_mask[dspp_idx[i]] |= ltm_dither;
-			opmode |= ((init_param->init_param_02 & 0x7) << 12);
-		} else {
-			opmode &= ~BIT(6);
-			ltm_vlut_ops_mask[dspp_idx[i]] &= ~ltm_dither;
-		}
-
-		if (init_param->init_param_03) {
-			if (ltm_vlut_ops_mask[dspp_idx[i]] & ltm_vlut)
-				opmode |= BIT(4);
-			ltm_vlut_ops_mask[dspp_idx[i]] |= ltm_unsharp;
-			opmode |= ((init_param->init_param_04 & 0x3) << 8);
-		} else {
-			opmode &= ~BIT(4);
-			ltm_vlut_ops_mask[dspp_idx[i]] &= ~ltm_unsharp;
-		}
-
-		/* broadcast feature is not supported with REG_SINGLE_MODIFY */
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0x04, &opmode, sizeof(opmode),
-				REG_SINGLE_MODIFY, 0, 0,
-				REG_DMA_LTM_INIT_ENABLE_OP_MASK);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("opmode write failed ret %d\n", rc);
-			return;
-		}
-	}
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_INIT][idx],
-				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc) {
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-		return;
-	}
-}
-
-static void ltm_roiv1_disable(struct sde_hw_dspp *ctx, void *cfg,
-		u32 num_mixers, enum sde_ltm *dspp_idx)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	int rc, i = 0;
-	enum sde_ltm idx = 0;
-	u32 opmode = 0;
-
-	idx = (enum sde_ltm)ctx->idx;
-	if (idx >= LTM_MAX) {
-		DRM_ERROR("invalid ltm idx %d\n", ctx->idx);
-		return;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(ltm_buf[LTM_ROI][idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, ltm_mapping[idx], LTM_ROI,
-			ltm_buf[LTM_ROI][idx]);
-
-	for (i = 0; i < num_mixers; i++) {
-		dma_write_cfg.blk = ltm_mapping[dspp_idx[i]];
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0,
-				0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write decode select failed ret %d\n", rc);
-			return;
-		}
-
-		ltm_vlut_ops_mask[dspp_idx[i]] &= ~ltm_roi;
-
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0x04, &opmode, sizeof(opmode),
-			REG_SINGLE_MODIFY, 0, 0, REG_DMA_LTM_ROI_OP_MASK);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("opmode write failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_ROI][idx],
-				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc) {
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-		return;
-	}
-}
-
-void reg_dmav1_setup_ltm_roiv1(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct drm_msm_ltm_cfg_param *cfg_param = NULL;
-	enum sde_ltm dspp_idx[LTM_MAX] = {0};
-	enum sde_ltm idx = 0;
-	u32 blk = 0, opmode = 0, i = 0, num_mixers = 0;
-	u32 roi_data[3];
-	int rc = 0;
-
-	rc = reg_dma_ltm_check(ctx, cfg, LTM_ROI);
-	if (rc)
-		return;
-
-	idx = (enum sde_ltm)ctx->idx;
-	rc = reg_dmav1_get_ltm_blk(hw_cfg, idx, &dspp_idx[0], &blk);
-	if (rc) {
-		if (rc != -EALREADY)
-			DRM_ERROR("failed to get the blk info\n");
-		return;
-	}
-
-	num_mixers = hw_cfg->num_of_mixers;
-	/* disable case */
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("Disable LTM roi feature\n");
-		ltm_roiv1_disable(ctx, cfg, num_mixers, dspp_idx);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_ltm_cfg_param)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-			hw_cfg->len, sizeof(struct drm_msm_ltm_cfg_param));
-		return;
-	}
-
-	cfg_param = hw_cfg->payload;
-	/* input param exceeds the display width */
-	if (cfg_param->cfg_param_01 + cfg_param->cfg_param_03 >
-			hw_cfg->displayh) {
-		DRM_DEBUG_DRIVER("invalid input = [%u,%u], displayh = %u\n",
-			cfg_param->cfg_param_01, cfg_param->cfg_param_03,
-			hw_cfg->displayh);
-		/* set the roi width to max register value */
-		cfg_param->cfg_param_03 = 0xFFFF;
-	}
-
-	/* input param exceeds the display height */
-	if (cfg_param->cfg_param_02 + cfg_param->cfg_param_04 >
-			hw_cfg->displayv) {
-		DRM_DEBUG_DRIVER("invalid input = [%u,%u], displayv = %u\n",
-			cfg_param->cfg_param_02, cfg_param->cfg_param_04,
-			hw_cfg->displayv);
-		/* set the roi height to max register value */
-		cfg_param->cfg_param_04 = 0xFFFF;
-	}
-
-	roi_data[0] = ((cfg_param->cfg_param_02 & 0xFFFF) << 16) |
-			(cfg_param->cfg_param_01 & 0xFFFF);
-	roi_data[1] = ((cfg_param->cfg_param_04 & 0xFFFF) << 16) |
-			(cfg_param->cfg_param_03 & 0xFFFF);
-	roi_data[2] = ((cfg_param->cfg_param_05 & 0x1FF) << 16) |
-			(cfg_param->cfg_param_06 & 0x1FF);
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(ltm_buf[LTM_ROI][idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, LTM_ROI, ltm_buf[LTM_ROI][idx]);
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0xb0, roi_data, sizeof(u32) * 3,
-			REG_BLK_WRITE_SINGLE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write roi data failed ret %d\n",
-				rc);
-		return;
-	}
-
-	for (i = 0; i < num_mixers; i++) {
-		/* broadcast feature is not supported with REG_SINGLE_MODIFY */
-		/* reset decode select to unicast */
-		dma_write_cfg.blk = ltm_mapping[dspp_idx[i]];
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0,
-				0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write decode select failed ret %d\n", rc);
-			return;
-		}
-
-		if (ltm_vlut_ops_mask[dspp_idx[i]] & ltm_vlut)
-			opmode |= BIT(24);
-		ltm_vlut_ops_mask[dspp_idx[i]] |= ltm_roi;
-
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0x04, &opmode, sizeof(opmode),
-			REG_SINGLE_MODIFY, 0, 0, REG_DMA_LTM_ROI_OP_MASK);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("opmode write failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_ROI][idx],
-				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc) {
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-		return;
-	}
-}
-
-static void ltm_vlutv1_disable(struct sde_hw_dspp *ctx, void *cfg,
-		u32 num_mixers, enum sde_ltm *dspp_idx)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	int rc, i = 0;
-	enum sde_ltm idx = 0;
-	u32 opmode = 0;
-
-	idx = (enum sde_ltm)ctx->idx;
-	if (idx >= LTM_MAX) {
-		DRM_ERROR("invalid ltm idx %d\n", ctx->idx);
-		return;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(ltm_buf[LTM_VLUT][idx]);
-	REG_DMA_INIT_OPS(dma_write_cfg, ltm_mapping[idx], LTM_VLUT,
-			ltm_buf[LTM_VLUT][idx]);
-
-	for (i = 0; i < num_mixers; i++) {
-		dma_write_cfg.blk = ltm_mapping[dspp_idx[i]];
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0,
-				0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write decode select failed ret %d\n", rc);
-			return;
-		}
-
-		ltm_vlut_ops_mask[dspp_idx[i]] &= ~ltm_vlut;
-		/* disable VLUT/INIT/ROI */
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0x04, &opmode, sizeof(opmode),
-			REG_SINGLE_MODIFY, 0, 0,
-			REG_DMA_LTM_VLUT_DISABLE_OP_MASK);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("opmode write failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_VLUT][idx],
-				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc) {
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-		return;
-	}
-}
-
-void reg_dmav1_setup_ltm_vlutv1(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct drm_msm_ltm_data *payload = NULL;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	enum sde_ltm dspp_idx[LTM_MAX] = {0};
-	enum sde_ltm idx = 0;
-	u32 offset, crs = 0, index = 0, len = 0, blk = 0, opmode = 0;
-	u32 i = 0, num_mixers = 0;
-	int rc = 0;
-
-	rc = reg_dma_ltm_check(ctx, cfg, LTM_VLUT);
-	if (rc)
-		return;
-
-	idx = (enum sde_ltm)ctx->idx;
-	num_mixers = hw_cfg->num_of_mixers;
-	rc = reg_dmav1_get_ltm_blk(hw_cfg, idx, &dspp_idx[0], &blk);
-	if (rc) {
-		if (rc != -EALREADY)
-			DRM_ERROR("failed to get the blk info\n");
-		return;
-	}
-
-	/* disable case */
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("Disable LTM vlut feature\n");
-		ltm_vlutv1_disable(ctx, cfg, num_mixers, dspp_idx);
-		return;
-	}
-
-	if (hw_cfg->len != sizeof(struct drm_msm_ltm_data)) {
-		DRM_ERROR("invalid size of payload len %d exp %zd\n",
-				hw_cfg->len, sizeof(struct drm_msm_ltm_data));
-		return;
-	}
-
-	offset = ctx->cap->sblk->ltm.base + 0x5c;
-	crs = SDE_REG_READ(&ctx->hw, offset);
-	if (!(crs & BIT(3))) {
-		DRM_ERROR("LTM VLUT buffer is not ready: crs = %d\n", crs);
-		return;
-	}
-
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(ltm_buf[LTM_VLUT][idx]);
-
-	REG_DMA_INIT_OPS(dma_write_cfg, blk, LTM_VLUT, ltm_buf[LTM_VLUT][idx]);
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write decode select failed ret %d\n", rc);
-		return;
-	}
-
-	/* write VLUT index */
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0x38, &index, sizeof(u32),
-				REG_SINGLE_WRITE, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write VLUT index reg failed ret %d\n", rc);
-		return;
-	}
-
-	payload = hw_cfg->payload;
-	len = sizeof(u32) * LTM_DATA_SIZE_0 * LTM_DATA_SIZE_3;
-	REG_DMA_SETUP_OPS(dma_write_cfg, 0x3c, &payload->data[0][0],
-			len, REG_BLK_WRITE_INC, 0, 0, 0);
-	rc = dma_ops->setup_payload(&dma_write_cfg);
-	if (rc) {
-		DRM_ERROR("write VLUT data failed rc %d\n", rc);
-		return;
-	}
-
-	for (i = 0; i < num_mixers; i++) {
-		/* broadcast feature is not supported with REG_SINGLE_MODIFY */
-		/* reset decode select to unicast */
-		dma_write_cfg.blk = ltm_mapping[dspp_idx[i]];
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0,
-				0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write decode select failed ret %d\n", rc);
-			return;
-		}
-
-		/* set the UPDATE_REQ bit */
-		crs = BIT(0);
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0x5c, &crs, sizeof(u32),
-				REG_SINGLE_MODIFY, 0, 0,
-				REG_DMA_LTM_UPDATE_REQ_MASK);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write UPDATE_REQ failed ret %d\n", rc);
-			return;
-		}
-
-		opmode = BIT(1);
-		if (ltm_vlut_ops_mask[dspp_idx[i]] & ltm_unsharp)
-			opmode |= BIT(4);
-		if (ltm_vlut_ops_mask[dspp_idx[i]] & ltm_dither)
-			opmode |= BIT(6);
-		if (ltm_vlut_ops_mask[dspp_idx[i]] & ltm_roi)
-			opmode |= BIT(24);
-		ltm_vlut_ops_mask[dspp_idx[i]] |= ltm_vlut;
-
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0x4, &opmode, sizeof(u32),
-				REG_SINGLE_MODIFY, 0, 0,
-				REG_DMA_LTM_VLUT_ENABLE_OP_MASK);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write UPDATE_REQ failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_VLUT][idx],
-				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc) {
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-		return;
-	}
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.h b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.h
deleted file mode 100644
index 99607ce5..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-#ifndef _SDE_HW_REG_DMA_V1_COLOR_PROC_H
-#define _SDE_HW_REG_DMA_V1_COLOR_PROC_H
-
-#include "sde_hw_util.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_dspp.h"
-#include "sde_hw_sspp.h"
-
-/**
- * reg_dmav1_init_dspp_op_v4() - initialize the dspp feature op for sde v4
- *                               using reg dma v1.
- * @feature: dspp feature
- * idx: dspp idx
- */
-int reg_dmav1_init_dspp_op_v4(int feature, enum sde_dspp idx);
-
-/**
- * reg_dmav1_setup_dspp_vlutv18() - vlut v18 implementation using reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_dspp_vlutv18(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_3d_gamutv4() - gamut v4 implementation using reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_dspp_3d_gamutv4(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_3d_gamutv41() - gamut v4_1 implementation using reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_dspp_3d_gamutv41(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_dspp_gcv18() - gc v18 implementation using reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_dspp_gcv18(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_dspp_igcv31() - igc v31 implementation using reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_dspp_igcv31(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_dspp_pccv4() - pcc v4 implementation using reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_dspp_pccv4(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_dspp_pa_hsicv17() - pa hsic v17 impl using reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_dspp_pa_hsicv17(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_dspp_sixzonev17() - sixzone v17 impl using reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_dspp_sixzonev17(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_dspp_memcol_skinv17() - memcol skin v17 impl using
- * reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_dspp_memcol_skinv17(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_dspp_memcol_skyv17() - memcol sky v17 impl using reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_dspp_memcol_skyv17(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_dspp_memcol_folv17() - memcol foliage v17 impl using
- * reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_dspp_memcol_folv17(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_dspp_memcol_protv17() - memcol prot v17 impl using
- * reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_dspp_memcol_protv17(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_deinit_dspp_ops() - deinitialize the dspp feature op for sde v4
- *                               which were initialized.
- * @idx: dspp idx
- */
-int reg_dmav1_deinit_dspp_ops(enum sde_dspp idx);
-
-/**
- * reg_dmav1_init_sspp_op_v4() - initialize the sspp feature op for sde v4
- * @feature: sspp feature
- * @idx: sspp idx
- */
-int reg_dmav1_init_sspp_op_v4(int feature, enum sde_sspp idx);
-
-/**
- * reg_dmav1_setup_vig_gamutv5() - VIG 3D lut gamut v5 implementation
- *                                 using reg dma v1.
- * @ctx: sspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_vig_gamutv5(struct sde_hw_pipe *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_vig_gamutv6() - VIG 3D lut gamut v6 implementation
- *                                 using reg dma v1.
- * @ctx: sspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_vig_gamutv6(struct sde_hw_pipe *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_vig_igcv5() - VIG 1D lut IGC v5 implementation
- *                               using reg dma v1.
- * @ctx: sspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_vig_igcv5(struct sde_hw_pipe *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_dma_igcv5() - DMA 1D lut IGC v5 implementation
- *                               using reg dma v1.
- * @ctx: sspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- * @idx: multirect index
- */
-void reg_dmav1_setup_dma_igcv5(struct sde_hw_pipe *ctx, void *cfg,
-			enum sde_sspp_multirect_index idx);
-/**
- * reg_dmav1_setup_vig_igcv6() - VIG ID lut IGC v6 implementation
- *				 using reg dma v1.
- * @ctx: sspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_vig_igcv6(struct sde_hw_pipe *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_dma_gcv5() - DMA 1D lut GC v5 implementation
- *                              using reg dma v1.
- * @ctx: sspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- * @idx: multirect index
- */
-void reg_dmav1_setup_dma_gcv5(struct sde_hw_pipe *ctx, void *cfg,
-			enum sde_sspp_multirect_index idx);
-
-/**
- * reg_dmav1_setup_vig_qseed3 - Qseed3 implementation using reg dma v1.
- * @ctx: sspp ctx info
- * @sspp: pointer to sspp hw config
- * @pe: pointer to pixel extension config
- * @scaler_cfg: pointer to scaler config
- */
-
-void reg_dmav1_setup_vig_qseed3(struct sde_hw_pipe *ctx,
-	struct sde_hw_pipe_cfg *sspp, struct sde_hw_pixel_ext *pe,
-	void *scaler_cfg);
-
-/**reg_dmav1_setup_scaler3_lut - Qseed3 lut coefficient programming
- * @buf: defines structure for reg dma ops on the reg dma buffer.
- * @scaler3_cfg: QSEEDv3 configuration
- * @offset: Scaler Offest
- */
-
-void reg_dmav1_setup_scaler3_lut(struct sde_reg_dma_setup_ops_cfg *buf,
-		struct sde_hw_scaler3_cfg *scaler3_cfg, u32 offset);
-
-/**reg_dmav1_setup_scaler3lite_lut - Qseed3lite lut coefficient programming
- * @buf: defines structure for reg dma ops on the reg dma buffer.
- * @scaler3_cfg: QSEEDv3 configuration
- * @offset: Scaler Offest
- */
-
-void reg_dmav1_setup_scaler3lite_lut(struct sde_reg_dma_setup_ops_cfg *buf,
-		struct sde_hw_scaler3_cfg *scaler3_cfg, u32 offset);
-
-/**
- * reg_dmav1_deinit_sspp_ops() - deinitialize the sspp feature op for sde v4
- *                               which were initialized.
- * @idx: sspp idx
- */
-int reg_dmav1_deinit_sspp_ops(enum sde_sspp idx);
-
-/**
- * reg_dmav1_init_ltm_op_v6() - initialize the ltm feature op for sde v6
- * @feature: ltm feature
- * @idx: dspp idx
- */
-int reg_dmav1_init_ltm_op_v6(int feature, enum sde_dspp idx);
-
-/**
- * reg_dmav1_setup_ltm_initv1() - LTM INIT v1 implementation using reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_ltm_initv1(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_ltm_roiv1() - LTM ROI v1 implementation using reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_ltm_roiv1(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_setup_ltm_vlutv1() - LTM VLUT v1 implementation using reg dma v1.
- * @ctx: dspp ctx info
- * @cfg: pointer to struct sde_hw_cp_cfg
- */
-void reg_dmav1_setup_ltm_vlutv1(struct sde_hw_dspp *ctx, void *cfg);
-
-/**
- * reg_dmav1_deinit_ltm_ops() - deinitialize the ltm feature op for sde v4
- *                               which were initialized.
- * @idx: ltm idx
- */
-int reg_dmav1_deinit_ltm_ops(enum sde_dspp idx);
-
-
-#endif /* _SDE_HW_REG_DMA_V1_COLOR_PROC_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
deleted file mode 100644
index 5c1aef5..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ /dev/null
@@ -1,1340 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#include "sde_hwio.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_lm.h"
-#include "sde_hw_sspp.h"
-#include "sde_hw_color_processing.h"
-#include "sde_dbg.h"
-#include "sde_kms.h"
-#include "sde_hw_reg_dma_v1_color_proc.h"
-
-#define SDE_FETCH_CONFIG_RESET_VALUE   0x00000087
-
-/* SDE_SSPP_SRC */
-#define SSPP_SRC_SIZE                      0x00
-#define SSPP_SRC_XY                        0x08
-#define SSPP_OUT_SIZE                      0x0c
-#define SSPP_OUT_XY                        0x10
-#define SSPP_SRC0_ADDR                     0x14
-#define SSPP_SRC1_ADDR                     0x18
-#define SSPP_SRC2_ADDR                     0x1C
-#define SSPP_SRC3_ADDR                     0x20
-#define SSPP_SRC_YSTRIDE0                  0x24
-#define SSPP_SRC_YSTRIDE1                  0x28
-#define SSPP_SRC_FORMAT                    0x30
-#define SSPP_SRC_UNPACK_PATTERN            0x34
-#define SSPP_SRC_OP_MODE                   0x38
-
-/* SSPP_MULTIRECT*/
-#define SSPP_SRC_SIZE_REC1                 0x16C
-#define SSPP_SRC_XY_REC1                   0x168
-#define SSPP_OUT_SIZE_REC1                 0x160
-#define SSPP_OUT_XY_REC1                   0x164
-#define SSPP_SRC_FORMAT_REC1               0x174
-#define SSPP_SRC_UNPACK_PATTERN_REC1       0x178
-#define SSPP_SRC_OP_MODE_REC1              0x17C
-#define SSPP_MULTIRECT_OPMODE              0x170
-#define SSPP_SRC_CONSTANT_COLOR_REC1       0x180
-#define SSPP_EXCL_REC_SIZE_REC1            0x184
-#define SSPP_EXCL_REC_XY_REC1              0x188
-
-#define SSPP_UIDLE_CTRL_VALUE              0x1f0
-#define SSPP_UIDLE_CTRL_VALUE_REC1         0x1f4
-
-/* SSPP_DGM */
-#define SSPP_DGM_OP_MODE                   0x804
-#define SSPP_DGM_OP_MODE_REC1              0x1804
-#define SSPP_GAMUT_UNMULT_MODE             0x1EA0
-
-#define MDSS_MDP_OP_DEINTERLACE            BIT(22)
-#define MDSS_MDP_OP_DEINTERLACE_ODD        BIT(23)
-#define MDSS_MDP_OP_IGC_ROM_1              BIT(18)
-#define MDSS_MDP_OP_IGC_ROM_0              BIT(17)
-#define MDSS_MDP_OP_IGC_EN                 BIT(16)
-#define MDSS_MDP_OP_FLIP_UD                BIT(14)
-#define MDSS_MDP_OP_FLIP_LR                BIT(13)
-#define MDSS_MDP_OP_SPLIT_ORDER            BIT(4)
-#define MDSS_MDP_OP_BWC_EN                 BIT(0)
-#define MDSS_MDP_OP_PE_OVERRIDE            BIT(31)
-#define MDSS_MDP_OP_BWC_LOSSLESS           (0 << 1)
-#define MDSS_MDP_OP_BWC_Q_HIGH             (1 << 1)
-#define MDSS_MDP_OP_BWC_Q_MED              (2 << 1)
-
-#define SSPP_SRC_CONSTANT_COLOR            0x3c
-#define SSPP_EXCL_REC_CTL                  0x40
-#define SSPP_UBWC_STATIC_CTRL              0x44
-#define SSPP_FETCH_CONFIG                  0x048
-#define SSPP_DANGER_LUT                    0x60
-#define SSPP_SAFE_LUT                      0x64
-#define SSPP_CREQ_LUT                      0x68
-#define SSPP_QOS_CTRL                      0x6C
-#define SSPP_DECIMATION_CONFIG             0xB4
-#define SSPP_SRC_ADDR_SW_STATUS            0x70
-#define SSPP_CREQ_LUT_0                    0x74
-#define SSPP_CREQ_LUT_1                    0x78
-#define SSPP_SW_PIX_EXT_C0_LR              0x100
-#define SSPP_SW_PIX_EXT_C0_TB              0x104
-#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS      0x108
-#define SSPP_SW_PIX_EXT_C1C2_LR            0x110
-#define SSPP_SW_PIX_EXT_C1C2_TB            0x114
-#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS    0x118
-#define SSPP_SW_PIX_EXT_C3_LR              0x120
-#define SSPP_SW_PIX_EXT_C3_TB              0x124
-#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS      0x128
-#define SSPP_TRAFFIC_SHAPER                0x130
-#define SSPP_CDP_CNTL                      0x134
-#define SSPP_UBWC_ERROR_STATUS             0x138
-#define SSPP_CDP_CNTL_REC1                 0x13c
-#define SSPP_TRAFFIC_SHAPER_PREFILL        0x150
-#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL   0x154
-#define SSPP_TRAFFIC_SHAPER_REC1           0x158
-#define SSPP_EXCL_REC_SIZE                 0x1B4
-#define SSPP_EXCL_REC_XY                   0x1B8
-#define SSPP_VIG_OP_MODE                   0x0
-#define SSPP_VIG_CSC_10_OP_MODE            0x0
-#define SSPP_TRAFFIC_SHAPER_BPC_MAX        0xFF
-
-/* SSPP_QOS_CTRL */
-#define SSPP_QOS_CTRL_VBLANK_EN            BIT(16)
-#define SSPP_QOS_CTRL_DANGER_SAFE_EN       BIT(0)
-#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK   0x3
-#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF    4
-#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK     0x3
-#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF      20
-
-#define SSPP_SYS_CACHE_MODE                0x1BC
-#define SSPP_SBUF_STATUS_PLANE0            0x1C0
-#define SSPP_SBUF_STATUS_PLANE1            0x1C4
-#define SSPP_SBUF_STATUS_PLANE_EMPTY       BIT(16)
-
-/* SDE_SSPP_SCALER_QSEED2 */
-#define SCALE_CONFIG                       0x04
-#define COMP0_3_PHASE_STEP_X               0x10
-#define COMP0_3_PHASE_STEP_Y               0x14
-#define COMP1_2_PHASE_STEP_X               0x18
-#define COMP1_2_PHASE_STEP_Y               0x1c
-#define COMP0_3_INIT_PHASE_X               0x20
-#define COMP0_3_INIT_PHASE_Y               0x24
-#define COMP1_2_INIT_PHASE_X               0x28
-#define COMP1_2_INIT_PHASE_Y               0x2C
-#define VIG_0_QSEED2_SHARP                 0x30
-
-/*
- * Definitions for ViG op modes
- */
-#define VIG_OP_CSC_DST_DATAFMT BIT(19)
-#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
-#define VIG_OP_CSC_EN          BIT(17)
-#define VIG_OP_MEM_PROT_CONT   BIT(15)
-#define VIG_OP_MEM_PROT_VAL    BIT(14)
-#define VIG_OP_MEM_PROT_SAT    BIT(13)
-#define VIG_OP_MEM_PROT_HUE    BIT(12)
-#define VIG_OP_HIST            BIT(8)
-#define VIG_OP_SKY_COL         BIT(7)
-#define VIG_OP_FOIL            BIT(6)
-#define VIG_OP_SKIN_COL        BIT(5)
-#define VIG_OP_PA_EN           BIT(4)
-#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
-#define VIG_OP_MEM_PROT_BLEND  BIT(1)
-
-/*
- * Definitions for CSC 10 op modes
- */
-#define VIG_CSC_10_SRC_DATAFMT BIT(1)
-#define VIG_CSC_10_EN          BIT(0)
-#define CSC_10BIT_OFFSET       4
-#define DGM_CSC_MATRIX_SHIFT       0
-
-/* traffic shaper clock in Hz */
-#define TS_CLK			19200000
-
-static inline int _sspp_subblk_offset(struct sde_hw_pipe *ctx,
-		int s_id,
-		u32 *idx)
-{
-	int rc = 0;
-	const struct sde_sspp_sub_blks *sblk = ctx->cap->sblk;
-
-	if (!ctx)
-		return -EINVAL;
-
-	switch (s_id) {
-	case SDE_SSPP_SRC:
-		*idx = sblk->src_blk.base;
-		break;
-	case SDE_SSPP_SCALER_QSEED2:
-	case SDE_SSPP_SCALER_QSEED3:
-	case SDE_SSPP_SCALER_RGB:
-		*idx = sblk->scaler_blk.base;
-		break;
-	case SDE_SSPP_CSC:
-	case SDE_SSPP_CSC_10BIT:
-		*idx = sblk->csc_blk.base;
-		break;
-	case SDE_SSPP_HSIC:
-		*idx = sblk->hsic_blk.base;
-		break;
-	case SDE_SSPP_PCC:
-		*idx = sblk->pcc_blk.base;
-		break;
-	case SDE_SSPP_MEMCOLOR:
-		*idx = sblk->memcolor_blk.base;
-		break;
-	default:
-		rc = -EINVAL;
-	}
-
-	return rc;
-}
-
-static void sde_hw_sspp_setup_multirect(struct sde_hw_pipe *ctx,
-		enum sde_sspp_multirect_index index,
-		enum sde_sspp_multirect_mode mode)
-{
-	u32 mode_mask;
-	u32 idx;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
-		return;
-
-	if (index == SDE_SSPP_RECT_SOLO) {
-		/**
-		 * if rect index is RECT_SOLO, we cannot expect a
-		 * virtual plane sharing the same SSPP id. So we go
-		 * and disable multirect
-		 */
-		mode_mask = 0;
-	} else {
-		mode_mask = SDE_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
-		mode_mask |= index;
-		if (mode == SDE_SSPP_MULTIRECT_TIME_MX)
-			mode_mask |= BIT(2);
-		else
-			mode_mask &= ~BIT(2);
-	}
-
-	SDE_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
-}
-
-static void _sspp_setup_opmode(struct sde_hw_pipe *ctx,
-		u32 mask, u8 en)
-{
-	u32 idx;
-	u32 opmode;
-
-	if (!test_bit(SDE_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
-		_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) ||
-		!test_bit(SDE_SSPP_CSC, &ctx->cap->features))
-		return;
-
-	opmode = SDE_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
-
-	if (en)
-		opmode |= mask;
-	else
-		opmode &= ~mask;
-
-	SDE_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
-}
-
-static void _sspp_setup_csc10_opmode(struct sde_hw_pipe *ctx,
-		u32 mask, u8 en)
-{
-	u32 idx;
-	u32 opmode;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_CSC_10BIT, &idx))
-		return;
-
-	opmode = SDE_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
-	if (en)
-		opmode |= mask;
-	else
-		opmode &= ~mask;
-
-	SDE_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
-}
-
-static void sde_hw_sspp_set_src_split_order(struct sde_hw_pipe *ctx,
-		enum sde_sspp_multirect_index rect_mode, bool enable)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 opmode, idx, op_mode_off;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
-		return;
-
-	if (rect_mode == SDE_SSPP_RECT_SOLO || rect_mode == SDE_SSPP_RECT_0)
-		op_mode_off = SSPP_SRC_OP_MODE;
-	else
-		op_mode_off = SSPP_SRC_OP_MODE_REC1;
-
-	c = &ctx->hw;
-	opmode = SDE_REG_READ(c, op_mode_off + idx);
-
-	if (enable)
-		opmode |= MDSS_MDP_OP_SPLIT_ORDER;
-	else
-		opmode &= ~MDSS_MDP_OP_SPLIT_ORDER;
-
-	SDE_REG_WRITE(c, op_mode_off + idx, opmode);
-}
-
-/**
- * Setup source pixel format, flip,
- */
-static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
-		const struct sde_format *fmt,
-		bool const_alpha_en, u32 flags,
-		enum sde_sspp_multirect_index rect_mode)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 chroma_samp, unpack, src_format;
-	u32 opmode = 0;
-	u32 alpha_en_mask = 0;
-	u32 op_mode_off, unpack_pat_off, format_off;
-	u32 idx;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !fmt)
-		return;
-
-	if (rect_mode == SDE_SSPP_RECT_SOLO || rect_mode == SDE_SSPP_RECT_0) {
-		op_mode_off = SSPP_SRC_OP_MODE;
-		unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
-		format_off = SSPP_SRC_FORMAT;
-	} else {
-		op_mode_off = SSPP_SRC_OP_MODE_REC1;
-		unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
-		format_off = SSPP_SRC_FORMAT_REC1;
-	}
-
-	c = &ctx->hw;
-	opmode = SDE_REG_READ(c, op_mode_off + idx);
-	opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
-			MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
-
-	if (flags & SDE_SSPP_FLIP_LR)
-		opmode |= MDSS_MDP_OP_FLIP_LR;
-	if (flags & SDE_SSPP_FLIP_UD)
-		opmode |= MDSS_MDP_OP_FLIP_UD;
-
-	chroma_samp = fmt->chroma_sample;
-	if (flags & SDE_SSPP_SOURCE_ROTATED_90) {
-		if (chroma_samp == SDE_CHROMA_H2V1)
-			chroma_samp = SDE_CHROMA_H1V2;
-		else if (chroma_samp == SDE_CHROMA_H1V2)
-			chroma_samp = SDE_CHROMA_H2V1;
-	}
-
-	src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
-		(fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
-		(fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
-
-	if (flags & SDE_SSPP_ROT_90)
-		src_format |= BIT(11); /* ROT90 */
-
-	if (fmt->alpha_enable && fmt->fetch_planes == SDE_PLANE_INTERLEAVED)
-		src_format |= BIT(8); /* SRCC3_EN */
-
-	if (flags & SDE_SSPP_SOLID_FILL)
-		src_format |= BIT(22);
-
-	unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
-		(fmt->element[1] << 8) | (fmt->element[0] << 0);
-	src_format |= ((fmt->unpack_count - 1) << 12) |
-		(fmt->unpack_tight << 17) |
-		(fmt->unpack_align_msb << 18) |
-		((fmt->bpp - 1) << 9);
-
-	if (fmt->fetch_mode != SDE_FETCH_LINEAR) {
-		if (SDE_FORMAT_IS_UBWC(fmt))
-			opmode |= MDSS_MDP_OP_BWC_EN;
-		src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
-		SDE_REG_WRITE(c, SSPP_FETCH_CONFIG,
-			SDE_FETCH_CONFIG_RESET_VALUE |
-			ctx->mdp->highest_bank_bit << 18);
-		if (IS_UBWC_40_SUPPORTED(ctx->catalog->ubwc_version)) {
-			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
-				SDE_FORMAT_IS_YUV(fmt) ? 0 : BIT(30));
-		} else if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version)) {
-			alpha_en_mask = const_alpha_en ? BIT(31) : 0;
-			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
-				alpha_en_mask | (ctx->mdp->ubwc_swizzle) |
-				(ctx->mdp->highest_bank_bit << 4));
-		} else if (IS_UBWC_30_SUPPORTED(ctx->catalog->ubwc_version)) {
-			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
-				BIT(30) | (ctx->mdp->ubwc_swizzle) |
-				(ctx->mdp->highest_bank_bit << 4));
-		}
-	}
-
-	opmode |= MDSS_MDP_OP_PE_OVERRIDE;
-
-	/* if this is YUV pixel format, enable CSC */
-	if (SDE_FORMAT_IS_YUV(fmt))
-		src_format |= BIT(15);
-
-	if (SDE_FORMAT_IS_DX(fmt))
-		src_format |= BIT(14);
-
-	/* update scaler opmode, if appropriate */
-	if (test_bit(SDE_SSPP_CSC, &ctx->cap->features))
-		_sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
-			SDE_FORMAT_IS_YUV(fmt));
-	else if (test_bit(SDE_SSPP_CSC_10BIT, &ctx->cap->features))
-		_sspp_setup_csc10_opmode(ctx,
-			VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
-			SDE_FORMAT_IS_YUV(fmt));
-
-	SDE_REG_WRITE(c, format_off + idx, src_format);
-	SDE_REG_WRITE(c, unpack_pat_off + idx, unpack);
-	SDE_REG_WRITE(c, op_mode_off + idx, opmode);
-
-	/* clear previous UBWC error */
-	SDE_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
-}
-
-static void sde_hw_sspp_clear_ubwc_error(struct sde_hw_pipe *ctx)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	c = &ctx->hw;
-	SDE_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS, BIT(31));
-}
-
-static u32 sde_hw_sspp_get_ubwc_error(struct sde_hw_pipe *ctx)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 reg_code;
-
-	c = &ctx->hw;
-	reg_code = SDE_REG_READ(c, SSPP_UBWC_ERROR_STATUS);
-
-	return reg_code;
-}
-
-static void sde_hw_sspp_setup_secure(struct sde_hw_pipe *ctx,
-		enum sde_sspp_multirect_index rect_mode,
-		bool enable)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 secure = 0, secure_bit_mask;
-	u32 idx;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
-		return;
-
-	c = &ctx->hw;
-
-	if ((rect_mode == SDE_SSPP_RECT_SOLO)
-			|| (rect_mode == SDE_SSPP_RECT_0))
-		secure_bit_mask =
-			(rect_mode == SDE_SSPP_RECT_SOLO) ? 0xF : 0x5;
-	else
-		secure_bit_mask = 0xA;
-
-	secure = SDE_REG_READ(c, SSPP_SRC_ADDR_SW_STATUS + idx);
-
-	if (enable)
-		secure |= secure_bit_mask;
-	else
-		secure &= ~secure_bit_mask;
-
-	SDE_REG_WRITE(c, SSPP_SRC_ADDR_SW_STATUS + idx, secure);
-
-	/* multiple planes share same sw_status register */
-	wmb();
-}
-
-
-static void sde_hw_sspp_setup_pe_config(struct sde_hw_pipe *ctx,
-		struct sde_hw_pixel_ext *pe_ext)
-{
-	struct sde_hw_blk_reg_map *c;
-	u8 color;
-	u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
-	const u32 bytemask = 0xff;
-	const u32 shortmask = 0xffff;
-	u32 idx;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !pe_ext)
-		return;
-
-	c = &ctx->hw;
-
-	/* program SW pixel extension override for all pipes*/
-	for (color = 0; color < SDE_MAX_PLANES; color++) {
-		/* color 2 has the same set of registers as color 1 */
-		if (color == 2)
-			continue;
-
-		lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
-			((pe_ext->right_rpt[color] & bytemask) << 16)|
-			((pe_ext->left_ftch[color] & bytemask) << 8)|
-			(pe_ext->left_rpt[color] & bytemask);
-
-		tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
-			((pe_ext->btm_rpt[color] & bytemask) << 16)|
-			((pe_ext->top_ftch[color] & bytemask) << 8)|
-			(pe_ext->top_rpt[color] & bytemask);
-
-		tot_req_pixels[color] = (((pe_ext->roi_h[color] +
-			pe_ext->num_ext_pxls_top[color] +
-			pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
-			((pe_ext->roi_w[color] +
-			pe_ext->num_ext_pxls_left[color] +
-			pe_ext->num_ext_pxls_right[color]) & shortmask);
-	}
-
-	/* color 0 */
-	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
-	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
-	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
-			tot_req_pixels[0]);
-
-	/* color 1 and color 2 */
-	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
-	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
-	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
-			tot_req_pixels[1]);
-
-	/* color 3 */
-	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
-	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
-	SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
-			tot_req_pixels[3]);
-}
-
-static void _sde_hw_sspp_setup_scaler(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_cfg *sspp,
-		struct sde_hw_pixel_ext *pe,
-		void *scaler_cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	int config_h = 0x0;
-	int config_v = 0x0;
-	u32 idx;
-
-	(void)sspp;
-	(void)scaler_cfg;
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) || !pe)
-		return;
-
-	c = &ctx->hw;
-
-	/* enable scaler(s) if valid filter set */
-	if (pe->horz_filter[SDE_SSPP_COMP_0] < SDE_SCALE_FILTER_MAX)
-		config_h |= pe->horz_filter[SDE_SSPP_COMP_0] << 8;
-	if (pe->horz_filter[SDE_SSPP_COMP_1_2] < SDE_SCALE_FILTER_MAX)
-		config_h |= pe->horz_filter[SDE_SSPP_COMP_1_2] << 12;
-	if (pe->horz_filter[SDE_SSPP_COMP_3] < SDE_SCALE_FILTER_MAX)
-		config_h |= pe->horz_filter[SDE_SSPP_COMP_3] << 16;
-
-	if (config_h)
-		config_h |= BIT(0);
-
-	if (pe->vert_filter[SDE_SSPP_COMP_0] < SDE_SCALE_FILTER_MAX)
-		config_v |= pe->vert_filter[SDE_SSPP_COMP_0] << 10;
-	if (pe->vert_filter[SDE_SSPP_COMP_1_2] < SDE_SCALE_FILTER_MAX)
-		config_v |= pe->vert_filter[SDE_SSPP_COMP_1_2] << 14;
-	if (pe->vert_filter[SDE_SSPP_COMP_3] < SDE_SCALE_FILTER_MAX)
-		config_v |= pe->vert_filter[SDE_SSPP_COMP_3] << 18;
-
-	if (config_v)
-		config_v |= BIT(1);
-
-	SDE_REG_WRITE(c, SCALE_CONFIG + idx,  config_h | config_v);
-	SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_X + idx,
-		pe->init_phase_x[SDE_SSPP_COMP_0]);
-	SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_Y + idx,
-		pe->init_phase_y[SDE_SSPP_COMP_0]);
-	SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_X + idx,
-		pe->phase_step_x[SDE_SSPP_COMP_0]);
-	SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_Y + idx,
-		pe->phase_step_y[SDE_SSPP_COMP_0]);
-
-	SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_X + idx,
-		pe->init_phase_x[SDE_SSPP_COMP_1_2]);
-	SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_Y + idx,
-		pe->init_phase_y[SDE_SSPP_COMP_1_2]);
-	SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_X + idx,
-		pe->phase_step_x[SDE_SSPP_COMP_1_2]);
-	SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_Y + idx,
-		pe->phase_step_y[SDE_SSPP_COMP_1_2]);
-}
-
-static void _sde_hw_sspp_setup_scaler3(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_cfg *sspp,
-		struct sde_hw_pixel_ext *pe,
-		void *scaler_cfg)
-{
-	u32 idx;
-	struct sde_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
-
-	(void)pe;
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) || !sspp
-		|| !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
-		return;
-
-	sde_hw_setup_scaler3(&ctx->hw, scaler3_cfg,
-		ctx->cap->sblk->scaler_blk.version, idx, sspp->layout.format);
-}
-
-static u32 _sde_hw_sspp_get_scaler3_ver(struct sde_hw_pipe *ctx)
-{
-	u32 idx;
-
-	if (!ctx || _sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx))
-		return 0;
-
-	return sde_hw_get_scaler3_ver(&ctx->hw, idx);
-}
-
-/**
- * sde_hw_sspp_setup_rects()
- */
-static void sde_hw_sspp_setup_rects(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_cfg *cfg,
-		enum sde_sspp_multirect_index rect_index)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
-	u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
-	u32 decimation = 0;
-	u32 idx;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !cfg)
-		return;
-
-	c = &ctx->hw;
-
-	if (rect_index == SDE_SSPP_RECT_SOLO || rect_index == SDE_SSPP_RECT_0) {
-		src_size_off = SSPP_SRC_SIZE;
-		src_xy_off = SSPP_SRC_XY;
-		out_size_off = SSPP_OUT_SIZE;
-		out_xy_off = SSPP_OUT_XY;
-	} else {
-		src_size_off = SSPP_SRC_SIZE_REC1;
-		src_xy_off = SSPP_SRC_XY_REC1;
-		out_size_off = SSPP_OUT_SIZE_REC1;
-		out_xy_off = SSPP_OUT_XY_REC1;
-	}
-
-
-	/* src and dest rect programming */
-	src_xy = (cfg->src_rect.y << 16) | (cfg->src_rect.x);
-	src_size = (cfg->src_rect.h << 16) | (cfg->src_rect.w);
-	dst_xy = (cfg->dst_rect.y << 16) | (cfg->dst_rect.x);
-	dst_size = (cfg->dst_rect.h << 16) | (cfg->dst_rect.w);
-
-	if (rect_index == SDE_SSPP_RECT_SOLO) {
-		ystride0 = (cfg->layout.plane_pitch[0]) |
-			(cfg->layout.plane_pitch[1] << 16);
-		ystride1 = (cfg->layout.plane_pitch[2]) |
-			(cfg->layout.plane_pitch[3] << 16);
-	} else {
-		ystride0 = SDE_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx);
-		ystride1 = SDE_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx);
-
-		if (rect_index == SDE_SSPP_RECT_0) {
-			ystride0 = (ystride0 & 0xFFFF0000) |
-				(cfg->layout.plane_pitch[0] & 0x0000FFFF);
-			ystride1 = (ystride1 & 0xFFFF0000)|
-				(cfg->layout.plane_pitch[2] & 0x0000FFFF);
-		} else {
-			ystride0 = (ystride0 & 0x0000FFFF) |
-				((cfg->layout.plane_pitch[0] << 16) &
-				 0xFFFF0000);
-			ystride1 = (ystride1 & 0x0000FFFF) |
-				((cfg->layout.plane_pitch[2] << 16) &
-				 0xFFFF0000);
-		}
-	}
-
-	/* program scaler, phase registers, if pipes supporting scaling */
-	if (ctx->cap->features & SDE_SSPP_SCALER) {
-		/* program decimation */
-		decimation = ((1 << cfg->horz_decimation) - 1) << 8;
-		decimation |= ((1 << cfg->vert_decimation) - 1);
-	}
-
-	/* rectangle register programming */
-	SDE_REG_WRITE(c, src_size_off + idx, src_size);
-	SDE_REG_WRITE(c, src_xy_off + idx, src_xy);
-	SDE_REG_WRITE(c, out_size_off + idx, dst_size);
-	SDE_REG_WRITE(c, out_xy_off + idx, dst_xy);
-
-	SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
-	SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
-	SDE_REG_WRITE(c, SSPP_DECIMATION_CONFIG + idx, decimation);
-}
-
-/**
- * _sde_hw_sspp_setup_excl_rect() - set exclusion rect configs
- * @ctx: Pointer to pipe context
- * @excl_rect: Exclusion rect configs
- */
-static void _sde_hw_sspp_setup_excl_rect(struct sde_hw_pipe *ctx,
-		struct sde_rect *excl_rect,
-		enum sde_sspp_multirect_index rect_index)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 size, xy;
-	u32 idx;
-	u32 reg_xy, reg_size;
-	u32 excl_ctrl, enable_bit;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !excl_rect)
-		return;
-
-	if (rect_index == SDE_SSPP_RECT_0 || rect_index == SDE_SSPP_RECT_SOLO) {
-		reg_xy = SSPP_EXCL_REC_XY;
-		reg_size = SSPP_EXCL_REC_SIZE;
-		enable_bit = BIT(0);
-	} else {
-		reg_xy = SSPP_EXCL_REC_XY_REC1;
-		reg_size = SSPP_EXCL_REC_SIZE_REC1;
-		enable_bit = BIT(1);
-	}
-
-	c = &ctx->hw;
-
-	xy = (excl_rect->y << 16) | (excl_rect->x);
-	size = (excl_rect->h << 16) | (excl_rect->w);
-
-	excl_ctrl = SDE_REG_READ(c, SSPP_EXCL_REC_CTL + idx);
-	if (!size) {
-		SDE_REG_WRITE(c, SSPP_EXCL_REC_CTL + idx,
-				excl_ctrl & ~enable_bit);
-	} else {
-		SDE_REG_WRITE(c, SSPP_EXCL_REC_CTL + idx,
-				excl_ctrl | enable_bit);
-		SDE_REG_WRITE(c, reg_size + idx, size);
-		SDE_REG_WRITE(c, reg_xy + idx, xy);
-	}
-}
-
-static void sde_hw_sspp_setup_sourceaddress(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_cfg *cfg,
-		enum sde_sspp_multirect_index rect_mode)
-{
-	int i;
-	u32 idx;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
-		return;
-
-	if (rect_mode == SDE_SSPP_RECT_SOLO) {
-		for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
-			SDE_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
-					cfg->layout.plane_addr[i]);
-	} else if (rect_mode == SDE_SSPP_RECT_0) {
-		SDE_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
-				cfg->layout.plane_addr[0]);
-		SDE_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
-				cfg->layout.plane_addr[2]);
-	} else {
-		SDE_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
-				cfg->layout.plane_addr[0]);
-		SDE_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
-				cfg->layout.plane_addr[2]);
-	}
-}
-
-u32 sde_hw_sspp_get_source_addr(struct sde_hw_pipe *ctx, bool is_virtual)
-{
-	u32 idx;
-	u32 offset = 0;
-
-	if (!ctx || _sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
-		return 0;
-
-	offset =  is_virtual ? (SSPP_SRC1_ADDR + idx) : (SSPP_SRC0_ADDR + idx);
-
-	return SDE_REG_READ(&ctx->hw, offset);
-}
-
-static void sde_hw_sspp_setup_csc(struct sde_hw_pipe *ctx,
-		struct sde_csc_cfg *data)
-{
-	u32 idx;
-	bool csc10 = false;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_CSC, &idx) || !data)
-		return;
-
-	if (test_bit(SDE_SSPP_CSC_10BIT, &ctx->cap->features)) {
-		idx += CSC_10BIT_OFFSET;
-		csc10 = true;
-	}
-
-	sde_hw_csc_setup(&ctx->hw, idx, data, csc10);
-}
-
-static void sde_hw_sspp_setup_sharpening(struct sde_hw_pipe *ctx,
-		struct sde_hw_sharp_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 idx;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) || !cfg ||
-			!test_bit(SDE_SSPP_SCALER_QSEED2, &ctx->cap->features))
-		return;
-
-	c = &ctx->hw;
-
-	SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx, cfg->strength);
-	SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0x4, cfg->edge_thr);
-	SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0x8, cfg->smooth_thr);
-	SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0xC, cfg->noise_thr);
-}
-
-static void sde_hw_sspp_setup_solidfill(struct sde_hw_pipe *ctx, u32 color, enum
-		sde_sspp_multirect_index rect_index)
-{
-	u32 idx;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
-		return;
-
-	if (rect_index == SDE_SSPP_RECT_SOLO || rect_index == SDE_SSPP_RECT_0)
-		SDE_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
-	else
-		SDE_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
-				color);
-}
-
-static void sde_hw_sspp_setup_danger_safe_lut(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_qos_cfg *cfg)
-{
-	u32 idx;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
-		return;
-
-	SDE_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
-	SDE_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
-}
-
-static void sde_hw_sspp_setup_creq_lut(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_qos_cfg *cfg)
-{
-	u32 idx;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
-		return;
-
-	if (ctx->cap && test_bit(SDE_PERF_SSPP_QOS_8LVL,
-				&ctx->cap->perf_features)) {
-		SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
-		SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
-				cfg->creq_lut >> 32);
-	} else {
-		SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
-	}
-}
-
-static void sde_hw_sspp_setup_qos_ctrl(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_qos_cfg *cfg)
-{
-	u32 idx;
-	u32 qos_ctrl = 0;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
-		return;
-
-	if (cfg->vblank_en) {
-		qos_ctrl |= ((cfg->creq_vblank &
-				SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
-				SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
-		qos_ctrl |= ((cfg->danger_vblank &
-				SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
-				SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
-		qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
-	}
-
-	if (cfg->danger_safe_en)
-		qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
-
-	SDE_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
-}
-
-static void sde_hw_sspp_setup_ts_prefill(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_ts_cfg *cfg,
-		enum sde_sspp_multirect_index index)
-{
-	u32 idx;
-	u32 ts_offset, ts_prefill_offset;
-	u32 ts_count = 0, ts_bytes = 0;
-	const struct sde_sspp_cfg *cap;
-
-	if (!ctx || !cfg || !ctx->cap)
-		return;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
-		return;
-
-	cap = ctx->cap;
-
-	if ((index == SDE_SSPP_RECT_SOLO || index == SDE_SSPP_RECT_0) &&
-			test_bit(SDE_PERF_SSPP_TS_PREFILL,
-				&cap->perf_features)) {
-		ts_offset = SSPP_TRAFFIC_SHAPER;
-		ts_prefill_offset = SSPP_TRAFFIC_SHAPER_PREFILL;
-	} else if (index == SDE_SSPP_RECT_1 &&
-			test_bit(SDE_PERF_SSPP_TS_PREFILL_REC1,
-				&cap->perf_features)) {
-		ts_offset = SSPP_TRAFFIC_SHAPER_REC1;
-		ts_prefill_offset = SSPP_TRAFFIC_SHAPER_REC1_PREFILL;
-	} else {
-		pr_err("%s: unexpected idx:%d\n", __func__, index);
-		return;
-	}
-
-	if (cfg->time) {
-		u64 temp = DIV_ROUND_UP_ULL(TS_CLK * 1000000ULL, cfg->time);
-
-		ts_bytes = temp * cfg->size;
-		if (ts_bytes > SSPP_TRAFFIC_SHAPER_BPC_MAX)
-			ts_bytes = SSPP_TRAFFIC_SHAPER_BPC_MAX;
-	}
-
-	if (ts_bytes) {
-		ts_count = DIV_ROUND_UP_ULL(cfg->size, ts_bytes);
-		ts_bytes |= BIT(31) | BIT(27);
-	}
-
-	SDE_REG_WRITE(&ctx->hw, ts_offset, ts_bytes);
-	SDE_REG_WRITE(&ctx->hw, ts_prefill_offset, ts_count);
-}
-
-static void sde_hw_sspp_setup_cdp(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_cdp_cfg *cfg,
-		enum sde_sspp_multirect_index index)
-{
-	u32 idx;
-	u32 cdp_cntl = 0;
-	u32 cdp_cntl_offset = 0;
-
-	if (!ctx || !cfg)
-		return;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
-		return;
-
-	if (index == SDE_SSPP_RECT_SOLO || index == SDE_SSPP_RECT_0) {
-		cdp_cntl_offset = SSPP_CDP_CNTL;
-	} else if (index == SDE_SSPP_RECT_1) {
-		cdp_cntl_offset = SSPP_CDP_CNTL_REC1;
-	} else {
-		pr_err("%s: unexpected idx:%d\n", __func__, index);
-		return;
-	}
-
-	if (cfg->enable)
-		cdp_cntl |= BIT(0);
-	if (cfg->ubwc_meta_enable)
-		cdp_cntl |= BIT(1);
-	if (cfg->tile_amortize_enable)
-		cdp_cntl |= BIT(2);
-	if (cfg->preload_ahead == SDE_SSPP_CDP_PRELOAD_AHEAD_64)
-		cdp_cntl |= BIT(3);
-
-	SDE_REG_WRITE(&ctx->hw, cdp_cntl_offset, cdp_cntl);
-}
-
-static void sde_hw_sspp_setup_sys_cache(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_sc_cfg *cfg)
-{
-	u32 idx, val;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
-		return;
-
-	if (!cfg)
-		return;
-
-	val = SDE_REG_READ(&ctx->hw, SSPP_SYS_CACHE_MODE + idx);
-
-	if (cfg->flags & SSPP_SYS_CACHE_EN_FLAG)
-		val = (val & ~BIT(15)) | ((cfg->rd_en & 0x1) << 15);
-
-	if (cfg->flags & SSPP_SYS_CACHE_SCID)
-		val = (val & ~0x1F00) | ((cfg->rd_scid & 0x1f) << 8);
-
-	if (cfg->flags & SSPP_SYS_CACHE_OP_MODE)
-		val = (val & ~0xC0000) | ((cfg->op_mode & 0x3) << 18);
-
-	if (cfg->flags & SSPP_SYS_CACHE_OP_TYPE)
-		val = (val & ~0xF) | ((cfg->rd_op_type & 0xf) << 0);
-
-	if (cfg->flags & SSPP_SYS_CACHE_NO_ALLOC)
-		val = (val & ~0x10) | ((cfg->rd_noallocate & 0x1) << 4);
-
-	SDE_REG_WRITE(&ctx->hw, SSPP_SYS_CACHE_MODE + idx, val);
-}
-
-static void sde_hw_sspp_setup_uidle(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_uidle_cfg *cfg,
-		enum sde_sspp_multirect_index index)
-{
-	u32 idx, val;
-	u32 offset;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
-		return;
-
-	if (index == SDE_SSPP_RECT_1)
-		offset = SSPP_UIDLE_CTRL_VALUE_REC1;
-	else
-		offset = SSPP_UIDLE_CTRL_VALUE;
-
-	val = SDE_REG_READ(&ctx->hw, offset + idx);
-	val = (val & ~BIT(31)) | (cfg->enable ? 0x0 : BIT(31));
-	val = (val & ~0xFF00000) | (cfg->fal_allowed_threshold << 20);
-	val = (val & ~0xF0000) | (cfg->fal10_exit_threshold << 16);
-	val = (val & ~0xF00) | (cfg->fal10_threshold << 8);
-	val = (val & ~0xF) | (cfg->fal1_threshold << 0);
-
-	SDE_REG_WRITE(&ctx->hw, offset + idx, val);
-}
-
-static void _setup_layer_ops_colorproc(struct sde_hw_pipe *c,
-		unsigned long features)
-{
-	int ret = 0;
-
-	if (test_bit(SDE_SSPP_HSIC, &features)) {
-		if (c->cap->sblk->hsic_blk.version ==
-			(SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
-			c->ops.setup_pa_hue = sde_setup_pipe_pa_hue_v1_7;
-			c->ops.setup_pa_sat = sde_setup_pipe_pa_sat_v1_7;
-			c->ops.setup_pa_val = sde_setup_pipe_pa_val_v1_7;
-			c->ops.setup_pa_cont = sde_setup_pipe_pa_cont_v1_7;
-		}
-	}
-
-	if (test_bit(SDE_SSPP_MEMCOLOR, &features)) {
-		if (c->cap->sblk->memcolor_blk.version ==
-			(SDE_COLOR_PROCESS_VER(0x1, 0x7)))
-			c->ops.setup_pa_memcolor =
-				sde_setup_pipe_pa_memcol_v1_7;
-	}
-
-	if (test_bit(SDE_SSPP_VIG_GAMUT, &features)) {
-		if (c->cap->sblk->gamut_blk.version ==
-			(SDE_COLOR_PROCESS_VER(0x5, 0x0))) {
-			ret = reg_dmav1_init_sspp_op_v4(SDE_SSPP_VIG_GAMUT,
-							c->idx);
-			if (!ret)
-				c->ops.setup_vig_gamut =
-					reg_dmav1_setup_vig_gamutv5;
-			else
-				c->ops.setup_vig_gamut = NULL;
-		}
-
-		if (c->cap->sblk->gamut_blk.version ==
-			(SDE_COLOR_PROCESS_VER(0x6, 0x0))) {
-			ret = reg_dmav1_init_sspp_op_v4(SDE_SSPP_VIG_GAMUT,
-							c->idx);
-			if (!ret)
-				c->ops.setup_vig_gamut =
-					reg_dmav1_setup_vig_gamutv6;
-			else
-				c->ops.setup_vig_gamut = NULL;
-		}
-	}
-
-	if (test_bit(SDE_SSPP_VIG_IGC, &features)) {
-		if (c->cap->sblk->igc_blk[0].version ==
-			(SDE_COLOR_PROCESS_VER(0x5, 0x0))) {
-			ret = reg_dmav1_init_sspp_op_v4(SDE_SSPP_VIG_IGC,
-							c->idx);
-			if (!ret)
-				c->ops.setup_vig_igc =
-					reg_dmav1_setup_vig_igcv5;
-			else
-				c->ops.setup_vig_igc = NULL;
-		}
-
-		if (c->cap->sblk->igc_blk[0].version ==
-			(SDE_COLOR_PROCESS_VER(0x6, 0x0))) {
-			ret = reg_dmav1_init_sspp_op_v4(SDE_SSPP_VIG_IGC,
-							c->idx);
-			if (!ret)
-				c->ops.setup_vig_igc =
-					reg_dmav1_setup_vig_igcv6;
-			else
-				c->ops.setup_vig_igc = NULL;
-		}
-	}
-
-	if (test_bit(SDE_SSPP_DMA_IGC, &features)) {
-		if (c->cap->sblk->igc_blk[0].version ==
-			(SDE_COLOR_PROCESS_VER(0x5, 0x0))) {
-			ret = reg_dmav1_init_sspp_op_v4(SDE_SSPP_DMA_IGC,
-							c->idx);
-			if (!ret)
-				c->ops.setup_dma_igc =
-					reg_dmav1_setup_dma_igcv5;
-			else
-				c->ops.setup_dma_igc = NULL;
-		}
-	}
-
-	if (test_bit(SDE_SSPP_DMA_GC, &features)) {
-		if (c->cap->sblk->gc_blk[0].version ==
-			(SDE_COLOR_PROCESS_VER(0x5, 0x0))) {
-			ret = reg_dmav1_init_sspp_op_v4(SDE_SSPP_DMA_GC,
-							c->idx);
-			if (!ret)
-				c->ops.setup_dma_gc =
-					reg_dmav1_setup_dma_gcv5;
-			else
-				c->ops.setup_dma_gc = NULL;
-		}
-	}
-}
-
-static void sde_hw_sspp_setup_inverse_pma(struct sde_hw_pipe *ctx,
-			enum sde_sspp_multirect_index index, u32 enable)
-{
-	u32 op_mode = 0;
-
-	if (!ctx || (index == SDE_SSPP_RECT_1))
-		return;
-
-	if (enable)
-		op_mode |= BIT(0);
-
-	SDE_REG_WRITE(&ctx->hw, SSPP_GAMUT_UNMULT_MODE, op_mode);
-}
-
-static void sde_hw_sspp_setup_dgm_inverse_pma(struct sde_hw_pipe *ctx,
-			enum sde_sspp_multirect_index index, u32 enable)
-{
-	u32 offset = SSPP_DGM_OP_MODE;
-	u32 op_mode = 0;
-
-	if (!ctx)
-		return;
-
-	if (index == SDE_SSPP_RECT_1)
-		offset = SSPP_DGM_OP_MODE_REC1;
-
-	op_mode = SDE_REG_READ(&ctx->hw, offset);
-
-	if (enable)
-		op_mode |= BIT(0);
-	else
-		op_mode &= ~BIT(0);
-
-	SDE_REG_WRITE(&ctx->hw, offset, op_mode);
-}
-
-static void sde_hw_sspp_setup_dgm_csc(struct sde_hw_pipe *ctx,
-		enum sde_sspp_multirect_index index, struct sde_csc_cfg *data)
-{
-	u32 idx = 0;
-	u32 offset;
-	u32 op_mode = 0;
-	const struct sde_sspp_sub_blks *sblk;
-
-	if (!ctx || !ctx->cap || !ctx->cap->sblk)
-		return;
-
-	sblk = ctx->cap->sblk;
-	if (index == SDE_SSPP_RECT_1)
-		idx = 1;
-
-	offset = sblk->dgm_csc_blk[idx].base;
-	if (data) {
-		op_mode |= BIT(0);
-		sde_hw_csc_matrix_coeff_setup(&ctx->hw,
-			offset + CSC_10BIT_OFFSET, data, DGM_CSC_MATRIX_SHIFT);
-	}
-
-	SDE_REG_WRITE(&ctx->hw, offset, op_mode);
-}
-
-static void _setup_layer_ops(struct sde_hw_pipe *c,
-		unsigned long features, unsigned long perf_features)
-{
-	int ret;
-
-	if (test_bit(SDE_SSPP_SRC, &features)) {
-		c->ops.setup_format = sde_hw_sspp_setup_format;
-		c->ops.setup_rects = sde_hw_sspp_setup_rects;
-		c->ops.setup_sourceaddress = sde_hw_sspp_setup_sourceaddress;
-		c->ops.get_sourceaddress = sde_hw_sspp_get_source_addr;
-		c->ops.setup_solidfill = sde_hw_sspp_setup_solidfill;
-		c->ops.setup_pe = sde_hw_sspp_setup_pe_config;
-		c->ops.setup_secure_address = sde_hw_sspp_setup_secure;
-		c->ops.set_src_split_order = sde_hw_sspp_set_src_split_order;
-	}
-
-	if (test_bit(SDE_SSPP_EXCL_RECT, &features))
-		c->ops.setup_excl_rect = _sde_hw_sspp_setup_excl_rect;
-
-	if (test_bit(SDE_PERF_SSPP_QOS, &features)) {
-		c->ops.setup_danger_safe_lut =
-			sde_hw_sspp_setup_danger_safe_lut;
-		c->ops.setup_creq_lut = sde_hw_sspp_setup_creq_lut;
-		c->ops.setup_qos_ctrl = sde_hw_sspp_setup_qos_ctrl;
-	}
-
-	if (test_bit(SDE_PERF_SSPP_TS_PREFILL, &perf_features))
-		c->ops.setup_ts_prefill = sde_hw_sspp_setup_ts_prefill;
-
-	if (test_bit(SDE_SSPP_CSC, &features) ||
-		test_bit(SDE_SSPP_CSC_10BIT, &features))
-		c->ops.setup_csc = sde_hw_sspp_setup_csc;
-
-	if (test_bit(SDE_SSPP_DGM_CSC, &features))
-		c->ops.setup_dgm_csc = sde_hw_sspp_setup_dgm_csc;
-
-	if (test_bit(SDE_SSPP_SCALER_QSEED2, &features)) {
-		c->ops.setup_sharpening = sde_hw_sspp_setup_sharpening;
-		c->ops.setup_scaler = _sde_hw_sspp_setup_scaler;
-	}
-
-	if (sde_hw_sspp_multirect_enabled(c->cap))
-		c->ops.setup_multirect = sde_hw_sspp_setup_multirect;
-
-	if (test_bit(SDE_SSPP_SCALER_QSEED3, &features) ||
-			test_bit(SDE_SSPP_SCALER_QSEED3LITE, &features)) {
-		c->ops.setup_scaler = _sde_hw_sspp_setup_scaler3;
-		c->ops.get_scaler_ver = _sde_hw_sspp_get_scaler3_ver;
-		c->ops.setup_scaler_lut = is_qseed3_rev_qseed3lite(
-				c->catalog) ? reg_dmav1_setup_scaler3lite_lut
-				: reg_dmav1_setup_scaler3_lut;
-		ret = reg_dmav1_init_sspp_op_v4(is_qseed3_rev_qseed3lite(
-					c->catalog) ? SDE_SSPP_SCALER_QSEED3LITE
-					: SDE_SSPP_SCALER_QSEED3, c->idx);
-		if (!ret)
-			c->ops.setup_scaler = reg_dmav1_setup_vig_qseed3;
-	}
-
-	if (test_bit(SDE_PERF_SSPP_SYS_CACHE, &perf_features))
-		c->ops.setup_sys_cache = sde_hw_sspp_setup_sys_cache;
-
-	if (test_bit(SDE_PERF_SSPP_CDP, &perf_features))
-		c->ops.setup_cdp = sde_hw_sspp_setup_cdp;
-
-	if (test_bit(SDE_PERF_SSPP_UIDLE, &perf_features))
-		c->ops.setup_uidle = sde_hw_sspp_setup_uidle;
-
-	_setup_layer_ops_colorproc(c, features);
-
-	if (test_bit(SDE_SSPP_DGM_INVERSE_PMA, &features))
-		c->ops.setup_inverse_pma = sde_hw_sspp_setup_dgm_inverse_pma;
-	else if (test_bit(SDE_SSPP_INVERSE_PMA, &features))
-		c->ops.setup_inverse_pma = sde_hw_sspp_setup_inverse_pma;
-
-	c->ops.get_ubwc_error = sde_hw_sspp_get_ubwc_error;
-	c->ops.clear_ubwc_error = sde_hw_sspp_clear_ubwc_error;
-}
-
-static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
-		void __iomem *addr,
-		struct sde_mdss_cfg *catalog,
-		struct sde_hw_blk_reg_map *b)
-{
-	int i;
-
-	if ((sspp < SSPP_MAX) && catalog && addr && b) {
-		for (i = 0; i < catalog->sspp_count; i++) {
-			if (sspp == catalog->sspp[i].id) {
-				b->base_off = addr;
-				b->blk_off = catalog->sspp[i].base;
-				b->length = catalog->sspp[i].len;
-				b->hwversion = catalog->hwversion;
-				b->log_mask = SDE_DBG_MASK_SSPP;
-				return &catalog->sspp[i];
-			}
-		}
-	}
-
-	return ERR_PTR(-ENOMEM);
-}
-
-static struct sde_hw_blk_ops sde_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
-
-struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
-		void __iomem *addr, struct sde_mdss_cfg *catalog,
-		bool is_virtual_pipe)
-{
-	struct sde_hw_pipe *hw_pipe;
-	struct sde_sspp_cfg *cfg;
-	int rc;
-
-	if (!addr || !catalog)
-		return ERR_PTR(-EINVAL);
-
-	hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
-	if (!hw_pipe)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
-	if (IS_ERR_OR_NULL(cfg)) {
-		kfree(hw_pipe);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/* Assign ops */
-	hw_pipe->catalog = catalog;
-	hw_pipe->mdp = &catalog->mdp[0];
-	hw_pipe->idx = idx;
-	hw_pipe->cap = cfg;
-	_setup_layer_ops(hw_pipe, hw_pipe->cap->features,
-		hw_pipe->cap->perf_features);
-
-	if (hw_pipe->ops.get_scaler_ver) {
-		sde_init_scaler_blk(&hw_pipe->cap->sblk->scaler_blk,
-			hw_pipe->ops.get_scaler_ver(hw_pipe));
-	}
-
-	rc = sde_hw_blk_init(&hw_pipe->base, SDE_HW_BLK_SSPP, idx, &sde_hw_ops);
-	if (rc) {
-		SDE_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
-
-	if (!is_virtual_pipe)
-		sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
-			hw_pipe->hw.blk_off,
-			hw_pipe->hw.blk_off + hw_pipe->hw.length,
-			hw_pipe->hw.xin_id);
-
-	if (cfg->sblk->scaler_blk.len && !is_virtual_pipe)
-		sde_dbg_reg_register_dump_range(SDE_DBG_NAME,
-			cfg->sblk->scaler_blk.name,
-			hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base,
-			hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base +
-				cfg->sblk->scaler_blk.len,
-			hw_pipe->hw.xin_id);
-
-	return hw_pipe;
-
-blk_init_error:
-	kzfree(hw_pipe);
-
-	return ERR_PTR(rc);
-}
-
-void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx)
-{
-	if (ctx) {
-		sde_hw_blk_destroy(&ctx->base);
-		reg_dmav1_deinit_sspp_ops(ctx->idx);
-	}
-	kfree(ctx);
-}
-
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
deleted file mode 100644
index fab6282..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ /dev/null
@@ -1,668 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_SSPP_H
-#define _SDE_HW_SSPP_H
-
-#include "sde_hw_catalog.h"
-#include "sde_hw_mdss.h"
-#include "sde_hw_util.h"
-#include "sde_reg_dma.h"
-#include "sde_hw_blk.h"
-#include "sde_formats.h"
-#include "sde_color_processing.h"
-
-struct sde_hw_pipe;
-
-/**
- * Flags
- */
-#define SDE_SSPP_SECURE_OVERLAY_SESSION 0x1
-#define SDE_SSPP_FLIP_LR	 0x2
-#define SDE_SSPP_FLIP_UD	 0x4
-#define SDE_SSPP_SOURCE_ROTATED_90 0x8
-#define SDE_SSPP_ROT_90  0x10
-#define SDE_SSPP_SOLID_FILL 0x20
-#define SDE_SSPP_RIGHT	 0x40
-
-/**
- * Define all scaler feature bits in catalog
- */
-#define SDE_SSPP_SCALER ((1UL << SDE_SSPP_SCALER_RGB) | \
-	(1UL << SDE_SSPP_SCALER_QSEED2) | \
-	(1UL << SDE_SSPP_SCALER_QSEED3) | \
-	(1UL << SDE_SSPP_SCALER_QSEED3LITE))
-
-/**
- * Component indices
- */
-enum {
-	SDE_SSPP_COMP_0,
-	SDE_SSPP_COMP_1_2,
-	SDE_SSPP_COMP_2,
-	SDE_SSPP_COMP_3,
-
-	SDE_SSPP_COMP_MAX
-};
-
-/**
- * SDE_SSPP_RECT_SOLO - multirect disabled
- * SDE_SSPP_RECT_0 - rect0 of a multirect pipe
- * SDE_SSPP_RECT_1 - rect1 of a multirect pipe
- * SDE_SSPP_RECT_MAX - max enum of multirect pipe
- *
- * Note: HW supports multirect with either RECT0 or
- * RECT1. Considering no benefit of such configs over
- * SOLO mode and to keep the plane management simple,
- * we dont support single rect multirect configs.
- */
-enum sde_sspp_multirect_index {
-	SDE_SSPP_RECT_SOLO = 0,
-	SDE_SSPP_RECT_0,
-	SDE_SSPP_RECT_1,
-	SDE_SSPP_RECT_MAX,
-};
-
-enum sde_sspp_multirect_mode {
-	SDE_SSPP_MULTIRECT_NONE = 0,
-	SDE_SSPP_MULTIRECT_PARALLEL,
-	SDE_SSPP_MULTIRECT_TIME_MX,
-};
-
-enum {
-	SDE_FRAME_LINEAR,
-	SDE_FRAME_TILE_A4X,
-	SDE_FRAME_TILE_A5X,
-};
-
-enum sde_hw_filter {
-	SDE_SCALE_FILTER_NEAREST = 0,
-	SDE_SCALE_FILTER_BIL,
-	SDE_SCALE_FILTER_PCMN,
-	SDE_SCALE_FILTER_CA,
-	SDE_SCALE_FILTER_MAX
-};
-
-enum sde_hw_filter_alpa {
-	SDE_SCALE_ALPHA_PIXEL_REP,
-	SDE_SCALE_ALPHA_BIL
-};
-
-enum sde_hw_filter_yuv {
-	SDE_SCALE_2D_4X4,
-	SDE_SCALE_2D_CIR,
-	SDE_SCALE_1D_SEP,
-	SDE_SCALE_BIL
-};
-
-struct sde_hw_sharp_cfg {
-	u32 strength;
-	u32 edge_thr;
-	u32 smooth_thr;
-	u32 noise_thr;
-};
-
-struct sde_hw_pixel_ext {
-	/* scaling factors are enabled for this input layer */
-	uint8_t enable_pxl_ext;
-
-	int init_phase_x[SDE_MAX_PLANES];
-	int phase_step_x[SDE_MAX_PLANES];
-	int init_phase_y[SDE_MAX_PLANES];
-	int phase_step_y[SDE_MAX_PLANES];
-
-	/*
-	 * Number of pixels extension in left, right, top and bottom direction
-	 * for all color components. This pixel value for each color component
-	 * should be sum of fetch + repeat pixels.
-	 */
-	int num_ext_pxls_left[SDE_MAX_PLANES];
-	int num_ext_pxls_right[SDE_MAX_PLANES];
-	int num_ext_pxls_top[SDE_MAX_PLANES];
-	int num_ext_pxls_btm[SDE_MAX_PLANES];
-
-	/*
-	 * Number of pixels needs to be overfetched in left, right, top and
-	 * bottom directions from source image for scaling.
-	 */
-	int left_ftch[SDE_MAX_PLANES];
-	int right_ftch[SDE_MAX_PLANES];
-	int top_ftch[SDE_MAX_PLANES];
-	int btm_ftch[SDE_MAX_PLANES];
-
-	/*
-	 * Number of pixels needs to be repeated in left, right, top and
-	 * bottom directions for scaling.
-	 */
-	int left_rpt[SDE_MAX_PLANES];
-	int right_rpt[SDE_MAX_PLANES];
-	int top_rpt[SDE_MAX_PLANES];
-	int btm_rpt[SDE_MAX_PLANES];
-
-	uint32_t roi_w[SDE_MAX_PLANES];
-	uint32_t roi_h[SDE_MAX_PLANES];
-
-	/*
-	 * Filter type to be used for scaling in horizontal and vertical
-	 * directions
-	 */
-	enum sde_hw_filter horz_filter[SDE_MAX_PLANES];
-	enum sde_hw_filter vert_filter[SDE_MAX_PLANES];
-
-};
-
-/**
- * struct sde_hw_pipe_cfg : Pipe description
- * @layout:    format layout information for programming buffer to hardware
- * @src_rect:  src ROI, caller takes into account the different operations
- *             such as decimation, flip etc to program this field
- * @dest_rect: destination ROI.
- * @ horz_decimation : horizontal decimation factor( 0, 2, 4, 8, 16)
- * @ vert_decimation : vertical decimation factor( 0, 2, 4, 8, 16)
- *              2: Read 1 line/pixel drop 1 line/pixel
- *              4: Read 1 line/pixel drop 3  lines/pixels
- *              8: Read 1 line/pixel drop 7 lines/pixels
- *              16: Read 1 line/pixel drop 15 line/pixels
- * @index:     index of the rectangle of SSPP
- * @mode:      parallel or time multiplex multirect mode
- */
-struct sde_hw_pipe_cfg {
-	struct sde_hw_fmt_layout layout;
-	struct sde_rect src_rect;
-	struct sde_rect dst_rect;
-	u8 horz_decimation;
-	u8 vert_decimation;
-	enum sde_sspp_multirect_index index;
-	enum sde_sspp_multirect_mode mode;
-};
-
-/**
- * struct sde_hw_pipe_qos_cfg : Source pipe QoS configuration
- * @danger_lut: LUT for generate danger level based on fill level
- * @safe_lut: LUT for generate safe level based on fill level
- * @creq_lut: LUT for generate creq level based on fill level
- * @creq_vblank: creq value generated to vbif during vertical blanking
- * @danger_vblank: danger value generated during vertical blanking
- * @vblank_en: enable creq_vblank and danger_vblank during vblank
- * @danger_safe_en: enable danger safe generation
- */
-struct sde_hw_pipe_qos_cfg {
-	u32 danger_lut;
-	u32 safe_lut;
-	u64 creq_lut;
-	u32 creq_vblank;
-	u32 danger_vblank;
-	bool vblank_en;
-	bool danger_safe_en;
-};
-
-/**
- * enum CDP preload ahead address size
- */
-enum {
-	SDE_SSPP_CDP_PRELOAD_AHEAD_32,
-	SDE_SSPP_CDP_PRELOAD_AHEAD_64
-};
-
-/**
- * struct sde_hw_pipe_cdp_cfg : CDP configuration
- * @enable: true to enable CDP
- * @ubwc_meta_enable: true to enable ubwc metadata preload
- * @tile_amortize_enable: true to enable amortization control for tile format
- * @preload_ahead: number of request to preload ahead
- *	SDE_SSPP_CDP_PRELOAD_AHEAD_32,
- *	SDE_SSPP_CDP_PRELOAD_AHEAD_64
- */
-struct sde_hw_pipe_cdp_cfg {
-	bool enable;
-	bool ubwc_meta_enable;
-	bool tile_amortize_enable;
-	u32 preload_ahead;
-};
-
-/**
- * enum system cache rotation operation mode
- */
-enum {
-	SDE_PIPE_SC_OP_MODE_OFFLINE,
-	SDE_PIPE_SC_OP_MODE_INLINE_SINGLE,
-	SDE_PIPE_SC_OP_MODE_INLINE_LEFT,
-	SDE_PIPE_SC_OP_MODE_INLINE_RIGHT,
-};
-
-/**
- * enum system cache read operation type
- */
-enum {
-	SDE_PIPE_SC_RD_OP_TYPE_CACHEABLE,
-	SDE_PIPE_SC_RD_OP_TYPE_INVALIDATE,
-	SDE_PIPE_SC_RD_OP_TYPE_EVICTION,
-};
-
-/**
- * struct sde_hw_pipe_sc_cfg - system cache configuration
- * @op_mode: rotation operating mode
- * @rd_en: system cache read enable
- * @rd_scid: system cache read block id
- * @rd_noallocate: system cache read no allocate attribute
- * @rd_op_type: system cache read operation type
- * @flags: dirty flags to change the configuration
- */
-struct sde_hw_pipe_sc_cfg {
-	u32 op_mode;
-	bool rd_en;
-	u32 rd_scid;
-	bool rd_noallocate;
-	u32 rd_op_type;
-	u32 flags;
-};
-
-/**
- * struct sde_hw_pipe_uidle_cfg - uidle configuration
- * @enable: disables uidle
- * @fal_allowed_threshold: minimum fl to allow uidle
- * @fal10_exit_threshold: number of lines to indicate fal_10_exit is okay
- * @fal10_threshold: number of lines where fal_10_is okay
- * @fal1_threshold: number of lines where fal_1 is okay
- */
-struct sde_hw_pipe_uidle_cfg {
-	u32 enable;
-	u32 fal_allowed_threshold;
-	u32 fal10_exit_threshold;
-	u32 fal10_threshold;
-	u32 fal1_threshold;
-};
-
-/**
- * struct sde_hw_pipe_ts_cfg - traffic shaper configuration
- * @size: size to prefill in bytes, or zero to disable
- * @time: time to prefill in usec, or zero to disable
- */
-struct sde_hw_pipe_ts_cfg {
-	u64 size;
-	u64 time;
-};
-
-/**
- * Maximum number of stream buffer plane
- */
-#define SDE_PIPE_SBUF_PLANE_NUM	2
-
-/**
- * struct sde_hw_sspp_ops - interface to the SSPP Hw driver functions
- * Caller must call the init function to get the pipe context for each pipe
- * Assumption is these functions will be called after clocks are enabled
- */
-struct sde_hw_sspp_ops {
-	/**
-	 * setup_format - setup pixel format cropping rectangle, flip
-	 * @ctx: Pointer to pipe context
-	 * @fmt: Pointer to sde_format structure
-	 * @blend_enabled: flag indicating blend enabled or disabled on plane
-	 * @flags: Extra flags for format config
-	 * @index: rectangle index in multirect
-	 */
-	void (*setup_format)(struct sde_hw_pipe *ctx,
-			const struct sde_format *fmt,
-			bool blend_enabled, u32 flags,
-			enum sde_sspp_multirect_index index);
-
-	/**
-	 * setup_rects - setup pipe ROI rectangles
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to pipe config structure
-	 * @index: rectangle index in multirect
-	 */
-	void (*setup_rects)(struct sde_hw_pipe *ctx,
-			struct sde_hw_pipe_cfg *cfg,
-			enum sde_sspp_multirect_index index);
-
-	/**
-	 * setup_pe - setup pipe pixel extension
-	 * @ctx: Pointer to pipe context
-	 * @pe_ext: Pointer to pixel ext settings
-	 */
-	void (*setup_pe)(struct sde_hw_pipe *ctx,
-			struct sde_hw_pixel_ext *pe_ext);
-
-	/**
-	 * setup_excl_rect - setup pipe exclusion rectangle
-	 * @ctx: Pointer to pipe context
-	 * @excl_rect: Pointer to exclclusion rect structure
-	 * @index: rectangle index in multirect
-	 */
-	void (*setup_excl_rect)(struct sde_hw_pipe *ctx,
-			struct sde_rect *excl_rect,
-			enum sde_sspp_multirect_index index);
-
-	/**
-	 * setup_sourceaddress - setup pipe source addresses
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to pipe config structure
-	 * @index: rectangle index in multirect
-	 */
-	void (*setup_sourceaddress)(struct sde_hw_pipe *ctx,
-			struct sde_hw_pipe_cfg *cfg,
-			enum sde_sspp_multirect_index index);
-
-	/* get_sourceaddress - get pipe current source addresses of a plane
-	 * @ctx: Pointer to pipe context
-	 * @is_virtual: If true get address programmed for R1 in multirect
-	 */
-	u32 (*get_sourceaddress)(struct sde_hw_pipe *ctx, bool is_virtual);
-
-	/**
-	 * setup_csc - setup color space conversion
-	 * @ctx: Pointer to pipe context
-	 * @data: Pointer to config structure
-	 */
-	void (*setup_csc)(struct sde_hw_pipe *ctx, struct sde_csc_cfg *data);
-
-	/**
-	 * setup_solidfill - enable/disable colorfill
-	 * @ctx: Pointer to pipe context
-	 * @const_color: Fill color value
-	 * @flags: Pipe flags
-	 * @index: rectangle index in multirect
-	 */
-	void (*setup_solidfill)(struct sde_hw_pipe *ctx, u32 color,
-			enum sde_sspp_multirect_index index);
-
-	/**
-	 * setup_multirect - setup multirect configuration
-	 * @ctx: Pointer to pipe context
-	 * @index: rectangle index in multirect
-	 * @mode: parallel fetch / time multiplex multirect mode
-	 */
-
-	void (*setup_multirect)(struct sde_hw_pipe *ctx,
-			enum sde_sspp_multirect_index index,
-			enum sde_sspp_multirect_mode mode);
-
-	/**
-	 * setup_sharpening - setup sharpening
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to config structure
-	 */
-	void (*setup_sharpening)(struct sde_hw_pipe *ctx,
-			struct sde_hw_sharp_cfg *cfg);
-
-
-	/**
-	 * setup_pa_hue(): Setup source hue adjustment
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to hue data
-	 */
-	void (*setup_pa_hue)(struct sde_hw_pipe *ctx, void *cfg);
-
-	/**
-	 * setup_pa_sat(): Setup source saturation adjustment
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to saturation data
-	 */
-	void (*setup_pa_sat)(struct sde_hw_pipe *ctx, void *cfg);
-
-	/**
-	 * setup_pa_val(): Setup source value adjustment
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to value data
-	 */
-	void (*setup_pa_val)(struct sde_hw_pipe *ctx, void *cfg);
-
-	/**
-	 * setup_pa_cont(): Setup source contrast adjustment
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer contrast data
-	 */
-	void (*setup_pa_cont)(struct sde_hw_pipe *ctx, void *cfg);
-
-	/**
-	 * setup_pa_memcolor - setup source color processing
-	 * @ctx: Pointer to pipe context
-	 * @type: Memcolor type (Skin, sky or foliage)
-	 * @cfg: Pointer to memory color config data
-	 */
-	void (*setup_pa_memcolor)(struct sde_hw_pipe *ctx,
-			enum sde_memcolor_type type, void *cfg);
-
-	/**
-	 * setup_vig_gamut - setup 3D LUT Gamut in VIG pipes
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to vig gamut data
-	 */
-	void (*setup_vig_gamut)(struct sde_hw_pipe *ctx, void *cfg);
-
-	/**
-	 * setup_vig_igc - setup 1D LUT IGC in VIG pipes
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to vig igc data
-	 */
-	void (*setup_vig_igc)(struct sde_hw_pipe *ctx, void *cfg);
-
-	/**
-	 * setup_dma_igc - setup 1D LUT IGC in DMA pipes
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to dma igc data
-	 * @idx: multirect index
-	 */
-	void (*setup_dma_igc)(struct sde_hw_pipe *ctx, void *cfg,
-				enum sde_sspp_multirect_index idx);
-
-	/**
-	 * setup_dma_gc - setup 1D LUT GC in DMA pipes
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to dma gc data
-	 * @idx: multirect index
-	 */
-	void (*setup_dma_gc)(struct sde_hw_pipe *ctx, void *cfg,
-				enum sde_sspp_multirect_index idx);
-
-	/**
-	 * setup_danger_safe_lut - setup danger safe LUTs
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to pipe QoS configuration
-	 *
-	 */
-	void (*setup_danger_safe_lut)(struct sde_hw_pipe *ctx,
-			struct sde_hw_pipe_qos_cfg *cfg);
-
-	/**
-	 * setup_creq_lut - setup CREQ LUT
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to pipe QoS configuration
-	 *
-	 */
-	void (*setup_creq_lut)(struct sde_hw_pipe *ctx,
-			struct sde_hw_pipe_qos_cfg *cfg);
-
-	/**
-	 * setup_qos_ctrl - setup QoS control
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to pipe QoS configuration
-	 *
-	 */
-	void (*setup_qos_ctrl)(struct sde_hw_pipe *ctx,
-			struct sde_hw_pipe_qos_cfg *cfg);
-
-	/**
-	 * setup_histogram - setup histograms
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to histogram configuration
-	 */
-	void (*setup_histogram)(struct sde_hw_pipe *ctx,
-			void *cfg);
-
-	/**
-	 * setup_scaler - setup scaler
-	 * @ctx: Pointer to pipe context
-	 * @pipe_cfg: Pointer to pipe configuration
-	 * @pe_cfg: Pointer to pixel extension configuration
-	 * @scaler_cfg: Pointer to scaler configuration
-	 */
-	void (*setup_scaler)(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_cfg *pipe_cfg,
-		struct sde_hw_pixel_ext *pe_cfg,
-		void *scaler_cfg);
-
-	/**
-	 * setup_scaler_lut - setup scaler lut
-	 * @buf: Defines structure for reg dma ops on the reg dma buffer.
-	 * @scaler3_cfg: QSEEDv3 configuration
-	 * @offset: Scaler Offset
-	 */
-	void (*setup_scaler_lut)(struct sde_reg_dma_setup_ops_cfg *buf,
-			struct sde_hw_scaler3_cfg *scaler3_cfg,
-			u32 offset);
-
-	/**
-	 * get_scaler_ver - get scaler h/w version
-	 * @ctx: Pointer to pipe context
-	 */
-	u32 (*get_scaler_ver)(struct sde_hw_pipe *ctx);
-
-	/**
-	 * setup_sys_cache - setup system cache configuration
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to system cache configuration
-	 */
-	void (*setup_sys_cache)(struct sde_hw_pipe *ctx,
-			struct sde_hw_pipe_sc_cfg *cfg);
-
-	 /**
-	  * setup_uidle - set uidle configuration
-	  * @ctx: Pointer to pipe context
-	  * @cfg: Pointer to uidle configuration
-	  * @index: rectangle index in multirect
-	  */
-	 void (*setup_uidle)(struct sde_hw_pipe *ctx,
-			 struct sde_hw_pipe_uidle_cfg *cfg,
-			 enum sde_sspp_multirect_index index);
-
-	/**
-	 * setup_ts_prefill - setup prefill traffic shaper
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to traffic shaper configuration
-	 * @index: rectangle index in multirect
-	 */
-	void (*setup_ts_prefill)(struct sde_hw_pipe *ctx,
-			struct sde_hw_pipe_ts_cfg *cfg,
-			enum sde_sspp_multirect_index index);
-
-	/**
-	 * setup_cdp - setup client driven prefetch
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to cdp configuration
-	 * @index: rectangle index in multirect
-	 */
-	void (*setup_cdp)(struct sde_hw_pipe *ctx,
-			struct sde_hw_pipe_cdp_cfg *cfg,
-			enum sde_sspp_multirect_index index);
-
-	/**
-	 * setup_secure_address - setup secureity status of the source address
-	 * @ctx: Pointer to pipe context
-	 * @index: rectangle index in multirect
-	 * @enable: enable content protected buffer state
-	 */
-	void (*setup_secure_address)(struct sde_hw_pipe *ctx,
-			enum sde_sspp_multirect_index index,
-		bool enable);
-
-	/**
-	 * set_src_split_order - setup source split order priority
-	 * @ctx: Pointer to pipe context
-	 * @index: rectangle index in multirect
-	 * @enable: enable src split order
-	 */
-	void (*set_src_split_order)(struct sde_hw_pipe *ctx,
-			enum sde_sspp_multirect_index index, bool enable);
-
-	/**
-	 * setup_inverse_pma - enable/disable alpha unmultiply unit (PMA)
-	 * @ctx: Pointer to pipe context
-	 * @index: Rectangle index in multirect
-	 * @enable: PMA enable/disable settings
-	 */
-	void (*setup_inverse_pma)(struct sde_hw_pipe *ctx,
-			enum sde_sspp_multirect_index index, u32 enable);
-
-	/**
-	 * setup_dgm_csc - setup DGM color space conversion block and update lut
-	 * @ctx: Pointer to pipe context
-	 * @index: Rectangle index in multirect
-	 * @data: Pointer to config structure
-	 */
-	void (*setup_dgm_csc)(struct sde_hw_pipe *ctx,
-		enum sde_sspp_multirect_index index, struct sde_csc_cfg *data);
-
-	/**
-	 * clear_ubwc_error - clear the ubwc error-code registers
-	 * @ctx: Pointer to pipe context
-	 */
-	void (*clear_ubwc_error)(struct sde_hw_pipe *ctx);
-
-	/**
-	 * get_ubwc_error - get the ubwc error-code
-	 * @ctx: Pointer to pipe context
-	 */
-	u32 (*get_ubwc_error)(struct sde_hw_pipe *ctx);
-};
-
-/**
- * struct sde_hw_pipe - pipe description
- * @base: hardware block base structure
- * @hw: block hardware details
- * @catalog: back pointer to catalog
- * @mdp: pointer to associated mdp portion of the catalog
- * @idx: pipe index
- * @cap: pointer to layer_cfg
- * @ops: pointer to operations possible for this pipe
- */
-struct sde_hw_pipe {
-	struct sde_hw_blk base;
-	struct sde_hw_blk_reg_map hw;
-	struct sde_mdss_cfg *catalog;
-	struct sde_mdp_cfg *mdp;
-
-	/* Pipe */
-	enum sde_sspp idx;
-	struct sde_sspp_cfg *cap;
-
-	/* Ops */
-	struct sde_hw_sspp_ops ops;
-	struct sde_hw_ctl *ctl;
-};
-
-/**
- * sde_hw_pipe - convert base object sde_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct sde_hw_pipe *to_sde_hw_pipe(struct sde_hw_blk *hw)
-{
-	return container_of(hw, struct sde_hw_pipe, base);
-}
-
-/**
- * sde_hw_sspp_init - initializes the sspp hw driver object.
- * Should be called once before accessing every pipe.
- * @idx:  Pipe index for which driver object is required
- * @addr: Mapped register io address of MDP
- * @catalog : Pointer to mdss catalog data
- * @is_virtual_pipe: is this pipe virtual pipe
- */
-struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
-		void __iomem *addr, struct sde_mdss_cfg *catalog,
-		bool is_virtual_pipe);
-
-/**
- * sde_hw_sspp_destroy(): Destroys SSPP driver context
- * should be called during Hw pipe cleanup.
- * @ctx:  Pointer to SSPP driver context returned by sde_hw_sspp_init
- */
-void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx);
-
-#endif /*_SDE_HW_SSPP_H */
-
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
deleted file mode 100644
index 2785642..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ /dev/null
@@ -1,603 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#include "sde_hwio.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_top.h"
-#include "sde_dbg.h"
-#include "sde_kms.h"
-
-#define SSPP_SPARE                        0x28
-#define UBWC_DEC_HW_VERSION               0x058
-#define UBWC_STATIC                       0x144
-#define UBWC_CTRL_2                       0x150
-#define UBWC_PREDICTION_MODE              0x154
-
-#define FLD_SPLIT_DISPLAY_CMD             BIT(1)
-#define FLD_SMART_PANEL_FREE_RUN          BIT(2)
-#define FLD_INTF_1_SW_TRG_MUX             BIT(4)
-#define FLD_INTF_2_SW_TRG_MUX             BIT(8)
-#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
-
-#define DANGER_STATUS                     0x360
-#define SAFE_STATUS                       0x364
-
-#define TE_LINE_INTERVAL                  0x3F4
-
-#define TRAFFIC_SHAPER_EN                 BIT(31)
-#define TRAFFIC_SHAPER_RD_CLIENT(num)     (0x030 + (num * 4))
-#define TRAFFIC_SHAPER_WR_CLIENT(num)     (0x060 + (num * 4))
-#define TRAFFIC_SHAPER_FIXPOINT_FACTOR    4
-
-#define MDP_WD_TIMER_0_CTL                0x380
-#define MDP_WD_TIMER_0_CTL2               0x384
-#define MDP_WD_TIMER_0_LOAD_VALUE         0x388
-#define MDP_WD_TIMER_1_CTL                0x390
-#define MDP_WD_TIMER_1_CTL2               0x394
-#define MDP_WD_TIMER_1_LOAD_VALUE         0x398
-#define MDP_WD_TIMER_2_CTL                0x420
-#define MDP_WD_TIMER_2_CTL2               0x424
-#define MDP_WD_TIMER_2_LOAD_VALUE         0x428
-#define MDP_WD_TIMER_3_CTL                0x430
-#define MDP_WD_TIMER_3_CTL2               0x434
-#define MDP_WD_TIMER_3_LOAD_VALUE         0x438
-#define MDP_WD_TIMER_4_CTL                0x440
-#define MDP_WD_TIMER_4_CTL2               0x444
-#define MDP_WD_TIMER_4_LOAD_VALUE         0x448
-
-#define MDP_TICK_COUNT                    16
-#define XO_CLK_RATE                       19200
-#define MS_TICKS_IN_SEC                   1000
-
-#define CALCULATE_WD_LOAD_VALUE(fps) \
-	((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
-
-#define DCE_SEL                           0x450
-
-#define ROT_SID_RD			  0x20
-#define ROT_SID_WR			  0x24
-#define ROT_SID_ID_VAL			  0x1c
-
-static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
-		struct split_pipe_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 upper_pipe = 0;
-	u32 lower_pipe = 0;
-
-	if (!mdp || !cfg)
-		return;
-
-	c = &mdp->hw;
-
-	if (cfg->en) {
-		if (cfg->mode == INTF_MODE_CMD) {
-			lower_pipe = FLD_SPLIT_DISPLAY_CMD;
-			/* interface controlling sw trigger */
-			if (cfg->intf == INTF_2)
-				lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
-			else
-				lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
-
-			/* free run */
-			if (cfg->pp_split_slave != INTF_MAX)
-				lower_pipe = FLD_SMART_PANEL_FREE_RUN;
-
-			upper_pipe = lower_pipe;
-
-			/* smart panel align mode */
-			lower_pipe |= BIT(mdp->caps->smart_panel_align_mode);
-		} else {
-			if (cfg->intf == INTF_2) {
-				lower_pipe = FLD_INTF_1_SW_TRG_MUX;
-				upper_pipe = FLD_INTF_2_SW_TRG_MUX;
-			} else {
-				lower_pipe = FLD_INTF_2_SW_TRG_MUX;
-				upper_pipe = FLD_INTF_1_SW_TRG_MUX;
-			}
-		}
-	}
-
-	SDE_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
-	SDE_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
-	SDE_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
-	SDE_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
-}
-
-static u32 sde_hw_get_split_flush(struct sde_hw_mdp *mdp)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	if (!mdp)
-		return 0;
-
-	c = &mdp->hw;
-
-	return (SDE_REG_READ(c, SSPP_SPARE) & 0x1);
-}
-
-static void sde_hw_setup_pp_split(struct sde_hw_mdp *mdp,
-		struct split_pipe_cfg *cfg)
-{
-	u32 ppb_config = 0x0;
-	u32 ppb_control = 0x0;
-
-	if (!mdp || !cfg)
-		return;
-
-	if (cfg->en && cfg->pp_split_slave != INTF_MAX) {
-		ppb_config |= (cfg->pp_split_slave - INTF_0 + 1) << 20;
-		ppb_config |= BIT(16); /* split enable */
-		ppb_control = BIT(5); /* horz split*/
-	}
-	if (cfg->pp_split_index) {
-		SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, 0x0);
-		SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, 0x0);
-		SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, ppb_config);
-		SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, ppb_control);
-	} else {
-		SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, ppb_config);
-		SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, ppb_control);
-		SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, 0x0);
-		SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, 0x0);
-	}
-}
-
-static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp,
-		struct cdm_output_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 out_ctl = 0;
-
-	if (!mdp || !cfg)
-		return;
-
-	c = &mdp->hw;
-
-	if (cfg->wb_en)
-		out_ctl |= BIT(24);
-	else if (cfg->intf_en)
-		out_ctl |= BIT(19);
-
-	SDE_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
-}
-
-static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp,
-		enum sde_clk_ctrl_type clk_ctrl, bool enable)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 reg_off, bit_off;
-	u32 reg_val, new_val;
-	bool clk_forced_on;
-
-	if (!mdp)
-		return false;
-
-	c = &mdp->hw;
-
-	if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX)
-		return false;
-
-	reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
-	bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
-
-	reg_val = SDE_REG_READ(c, reg_off);
-
-	if (enable)
-		new_val = reg_val | BIT(bit_off);
-	else
-		new_val = reg_val & ~BIT(bit_off);
-
-	SDE_REG_WRITE(c, reg_off, new_val);
-	wmb(); /* ensure write finished before progressing */
-
-	clk_forced_on = !(reg_val & BIT(bit_off));
-
-	return clk_forced_on;
-}
-
-
-static void sde_hw_get_danger_status(struct sde_hw_mdp *mdp,
-		struct sde_danger_safe_status *status)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 value;
-
-	if (!mdp || !status)
-		return;
-
-	c = &mdp->hw;
-
-	value = SDE_REG_READ(c, DANGER_STATUS);
-	status->mdp = (value >> 0) & 0x3;
-	status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
-	status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
-	status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
-	status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
-	status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
-	status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
-	status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
-	status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
-	status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
-	status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
-	status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
-	status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
-	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
-	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
-	status->wb[WB_0] = 0;
-	status->wb[WB_1] = 0;
-	status->wb[WB_2] = (value >> 2) & 0x3;
-	status->wb[WB_3] = 0;
-}
-
-static void _update_vsync_source(struct sde_hw_mdp *mdp,
-		struct sde_vsync_source_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 reg, wd_load_value, wd_ctl, wd_ctl2;
-
-	if (!mdp || !cfg)
-		return;
-
-	c = &mdp->hw;
-
-	if (cfg->vsync_source >= SDE_VSYNC_SOURCE_WD_TIMER_4 &&
-			cfg->vsync_source <= SDE_VSYNC_SOURCE_WD_TIMER_0) {
-		switch (cfg->vsync_source) {
-		case SDE_VSYNC_SOURCE_WD_TIMER_4:
-			wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
-			wd_ctl = MDP_WD_TIMER_4_CTL;
-			wd_ctl2 = MDP_WD_TIMER_4_CTL2;
-			break;
-		case SDE_VSYNC_SOURCE_WD_TIMER_3:
-			wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
-			wd_ctl = MDP_WD_TIMER_3_CTL;
-			wd_ctl2 = MDP_WD_TIMER_3_CTL2;
-			break;
-		case SDE_VSYNC_SOURCE_WD_TIMER_2:
-			wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
-			wd_ctl = MDP_WD_TIMER_2_CTL;
-			wd_ctl2 = MDP_WD_TIMER_2_CTL2;
-			break;
-		case SDE_VSYNC_SOURCE_WD_TIMER_1:
-			wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
-			wd_ctl = MDP_WD_TIMER_1_CTL;
-			wd_ctl2 = MDP_WD_TIMER_1_CTL2;
-			break;
-		case SDE_VSYNC_SOURCE_WD_TIMER_0:
-		default:
-			wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
-			wd_ctl = MDP_WD_TIMER_0_CTL;
-			wd_ctl2 = MDP_WD_TIMER_0_CTL2;
-			break;
-		}
-
-		if (cfg->is_dummy) {
-			SDE_REG_WRITE(c, wd_ctl2, 0x0);
-		} else {
-			SDE_REG_WRITE(c, wd_load_value,
-				CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
-
-			SDE_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
-			reg = SDE_REG_READ(c, wd_ctl2);
-			reg |= BIT(8);		/* enable heartbeat timer */
-			reg |= BIT(0);		/* enable WD timer */
-			SDE_REG_WRITE(c, wd_ctl2, reg);
-		}
-
-		/* make sure that timers are enabled/disabled for vsync state */
-		wmb();
-	}
-}
-
-static void sde_hw_setup_vsync_source(struct sde_hw_mdp *mdp,
-		struct sde_vsync_source_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 reg, i;
-	static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
-
-	if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
-		return;
-
-	c = &mdp->hw;
-	reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
-	for (i = 0; i < cfg->pp_count; i++) {
-		int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
-
-		if (pp_idx >= ARRAY_SIZE(pp_offset))
-			continue;
-
-		reg &= ~(0xf << pp_offset[pp_idx]);
-		reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
-	}
-	SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
-
-	_update_vsync_source(mdp, cfg);
-}
-
-static void sde_hw_setup_vsync_source_v1(struct sde_hw_mdp *mdp,
-		struct sde_vsync_source_cfg *cfg)
-{
-	_update_vsync_source(mdp, cfg);
-}
-
-
-static void sde_hw_get_safe_status(struct sde_hw_mdp *mdp,
-		struct sde_danger_safe_status *status)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 value;
-
-	if (!mdp || !status)
-		return;
-
-	c = &mdp->hw;
-
-	value = SDE_REG_READ(c, SAFE_STATUS);
-	status->mdp = (value >> 0) & 0x1;
-	status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
-	status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
-	status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
-	status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
-	status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
-	status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
-	status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
-	status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
-	status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
-	status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
-	status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
-	status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
-	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
-	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
-	status->wb[WB_0] = 0;
-	status->wb[WB_1] = 0;
-	status->wb[WB_2] = (value >> 2) & 0x1;
-	status->wb[WB_3] = 0;
-}
-
-static void sde_hw_setup_dce(struct sde_hw_mdp *mdp, u32 dce_sel)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	if (!mdp)
-		return;
-
-	c = &mdp->hw;
-
-	SDE_REG_WRITE(c, DCE_SEL, dce_sel);
-}
-
-void sde_hw_reset_ubwc(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m)
-{
-	struct sde_hw_blk_reg_map c;
-	u32 ubwc_version;
-
-	if (!mdp || !m)
-		return;
-
-	/* force blk offset to zero to access beginning of register region */
-	c = mdp->hw;
-	c.blk_off = 0x0;
-	ubwc_version = SDE_REG_READ(&c, UBWC_DEC_HW_VERSION);
-
-	if (IS_UBWC_40_SUPPORTED(ubwc_version)) {
-		u32 ver = 2;
-		u32 mode = 1;
-		u32 reg = (m->mdp[0].ubwc_swizzle & 0x7) |
-			((m->mdp[0].ubwc_static & 0x1) << 3) |
-			((m->mdp[0].highest_bank_bit & 0x7) << 4) |
-			((m->macrotile_mode & 0x1) << 12);
-
-		if (IS_UBWC_30_SUPPORTED(m->ubwc_version)) {
-			ver = 1;
-			mode = 0;
-		}
-
-		SDE_REG_WRITE(&c, UBWC_STATIC, reg);
-		SDE_REG_WRITE(&c, UBWC_CTRL_2, ver);
-		SDE_REG_WRITE(&c, UBWC_PREDICTION_MODE, mode);
-	} else if (IS_UBWC_20_SUPPORTED(ubwc_version)) {
-		SDE_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
-	} else if (IS_UBWC_30_SUPPORTED(ubwc_version)) {
-		u32 reg = m->mdp[0].ubwc_static |
-			(m->mdp[0].ubwc_swizzle & 0x1) |
-			((m->mdp[0].highest_bank_bit & 0x3) << 4) |
-			((m->macrotile_mode & 0x1) << 12);
-
-		if (IS_UBWC_30_SUPPORTED(m->ubwc_version))
-			reg |= BIT(10);
-
-		SDE_REG_WRITE(&c, UBWC_STATIC, reg);
-	} else {
-		SDE_ERROR("Unsupported UBWC version 0x%08x\n", ubwc_version);
-	}
-}
-
-static void sde_hw_intf_audio_select(struct sde_hw_mdp *mdp)
-{
-	struct sde_hw_blk_reg_map *c;
-
-	if (!mdp)
-		return;
-
-	c = &mdp->hw;
-
-	SDE_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
-}
-
-struct sde_hw_sid *sde_hw_sid_init(void __iomem *addr,
-	u32 sid_len, const struct sde_mdss_cfg *m)
-{
-	struct sde_hw_sid *c;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	c->hw.base_off = addr;
-	c->hw.blk_off = 0;
-	c->hw.length = sid_len;
-	c->hw.hwversion = m->hwversion;
-	c->hw.log_mask = SDE_DBG_MASK_SID;
-
-	return c;
-}
-
-void sde_hw_sid_rotator_set(struct sde_hw_sid *sid)
-{
-	SDE_REG_WRITE(&sid->hw, ROT_SID_RD, ROT_SID_ID_VAL);
-	SDE_REG_WRITE(&sid->hw, ROT_SID_WR, ROT_SID_ID_VAL);
-}
-
-static void sde_hw_program_cwb_ppb_ctrl(struct sde_hw_mdp *mdp,
-		bool dual, bool dspp_out)
-{
-	u32 value = dspp_out ? 0x4 : 0x0;
-
-	SDE_REG_WRITE(&mdp->hw, PPB2_CNTL, value);
-	if (dual) {
-		value |= 0x1;
-		SDE_REG_WRITE(&mdp->hw, PPB3_CNTL, value);
-	}
-}
-
-static void sde_hw_set_hdr_plus_metadata(struct sde_hw_mdp *mdp,
-		u8 *payload, u32 len, u32 stream_id)
-{
-	u32 i;
-	size_t length = len - 1;
-	u32 offset = 0, data = 0, byte_idx = 0;
-	const u32 dword_size = sizeof(u32);
-
-	if (!payload || !len) {
-		SDE_ERROR("invalid payload with length: %d\n", len);
-		return;
-	}
-
-	if (stream_id)
-		offset = DP_DHDR_MEM_POOL_1_DATA - DP_DHDR_MEM_POOL_0_DATA;
-
-	/* payload[0] is set in VSCEXT header byte 1, skip programming here */
-	SDE_REG_WRITE(&mdp->hw, DP_DHDR_MEM_POOL_0_NUM_BYTES + offset, length);
-	for (i = 1; i < len; i++) {
-		if (byte_idx && !(byte_idx % dword_size)) {
-			SDE_REG_WRITE(&mdp->hw, DP_DHDR_MEM_POOL_0_DATA +
-				offset, data);
-			data = 0;
-		}
-
-		data |= payload[i] << (8 * (byte_idx++ % dword_size));
-	}
-
-	SDE_REG_WRITE(&mdp->hw, DP_DHDR_MEM_POOL_0_DATA + offset, data);
-}
-
-static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
-		unsigned long cap)
-{
-	ops->setup_split_pipe = sde_hw_setup_split_pipe;
-	ops->setup_pp_split = sde_hw_setup_pp_split;
-	ops->setup_cdm_output = sde_hw_setup_cdm_output;
-	ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
-	ops->get_danger_status = sde_hw_get_danger_status;
-	ops->setup_vsync_source = sde_hw_setup_vsync_source;
-	ops->set_cwb_ppb_cntl = sde_hw_program_cwb_ppb_ctrl;
-	ops->get_safe_status = sde_hw_get_safe_status;
-	ops->get_split_flush_status = sde_hw_get_split_flush;
-	ops->setup_dce = sde_hw_setup_dce;
-	ops->reset_ubwc = sde_hw_reset_ubwc;
-	ops->intf_audio_select = sde_hw_intf_audio_select;
-	if (cap & BIT(SDE_MDP_VSYNC_SEL))
-		ops->setup_vsync_source = sde_hw_setup_vsync_source;
-	else
-		ops->setup_vsync_source = sde_hw_setup_vsync_source_v1;
-	if (cap & BIT(SDE_MDP_DHDR_MEMPOOL))
-		ops->set_hdr_plus_metadata = sde_hw_set_hdr_plus_metadata;
-}
-
-static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp,
-		const struct sde_mdss_cfg *m,
-		void __iomem *addr,
-		struct sde_hw_blk_reg_map *b)
-{
-	int i;
-
-	if (!m || !addr || !b)
-		return ERR_PTR(-EINVAL);
-
-	for (i = 0; i < m->mdp_count; i++) {
-		if (mdp == m->mdp[i].id) {
-			b->base_off = addr;
-			b->blk_off = m->mdp[i].base;
-			b->length = m->mdp[i].len;
-			b->hwversion = m->hwversion;
-			b->log_mask = SDE_DBG_MASK_TOP;
-			return &m->mdp[i];
-		}
-	}
-
-	return ERR_PTR(-EINVAL);
-}
-
-static struct sde_hw_blk_ops sde_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
-
-struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
-		void __iomem *addr,
-		const struct sde_mdss_cfg *m)
-{
-	struct sde_hw_mdp *mdp;
-	const struct sde_mdp_cfg *cfg;
-	int rc;
-
-	if (!addr || !m)
-		return ERR_PTR(-EINVAL);
-
-	mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
-	if (!mdp)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _top_offset(idx, m, addr, &mdp->hw);
-	if (IS_ERR_OR_NULL(cfg)) {
-		kfree(mdp);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/*
-	 * Assign ops
-	 */
-	mdp->idx = idx;
-	mdp->caps = cfg;
-	_setup_mdp_ops(&mdp->ops, mdp->caps->features);
-
-	rc = sde_hw_blk_init(&mdp->base, SDE_HW_BLK_TOP, idx, &sde_hw_ops);
-	if (rc) {
-		SDE_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
-
-	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
-			mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length,
-			mdp->hw.xin_id);
-	sde_dbg_set_sde_top_offset(mdp->hw.blk_off);
-
-	return mdp;
-
-blk_init_error:
-	kzfree(mdp);
-
-	return ERR_PTR(rc);
-}
-
-void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp)
-{
-	if (mdp)
-		sde_hw_blk_destroy(&mdp->base);
-	kfree(mdp);
-}
-
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
deleted file mode 100644
index 71c2c63..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_TOP_H
-#define _SDE_HW_TOP_H
-
-#include "sde_hw_catalog.h"
-#include "sde_hw_mdss.h"
-#include "sde_hw_util.h"
-#include "sde_hw_blk.h"
-
-struct sde_hw_mdp;
-
-/**
- * struct traffic_shaper_cfg: traffic shaper configuration
- * @en        : enable/disable traffic shaper
- * @rd_client : true if read client; false if write client
- * @client_id : client identifier
- * @bpc_denom : denominator of byte per clk
- * @bpc_numer : numerator of byte per clk
- */
-struct traffic_shaper_cfg {
-	bool en;
-	bool rd_client;
-	u32 client_id;
-	u32 bpc_denom;
-	u64 bpc_numer;
-};
-
-/**
- * struct split_pipe_cfg - pipe configuration for dual display panels
- * @en        : Enable/disable dual pipe confguration
- * @mode      : Panel interface mode
- * @intf      : Interface id for main control path
- * @pp_split_slave: Slave interface for ping pong split, INTF_MAX to disable
- * @pp_split_idx:   Ping pong index for ping pong split
- * @split_flush_en: Allows both the paths to be flushed when master path is
- *              flushed
- */
-struct split_pipe_cfg {
-	bool en;
-	enum sde_intf_mode mode;
-	enum sde_intf intf;
-	enum sde_intf pp_split_slave;
-	u32 pp_split_index;
-	bool split_flush_en;
-};
-
-/**
- * struct cdm_output_cfg: output configuration for cdm
- * @wb_en     : enable/disable writeback output
- * @intf_en   : enable/disable interface output
- */
-struct cdm_output_cfg {
-	bool wb_en;
-	bool intf_en;
-};
-
-/**
- * struct sde_danger_safe_status: danger and safe status signals
- * @mdp: top level status
- * @sspp: source pipe status
- * @wb: writebck output status
- */
-struct sde_danger_safe_status {
-	u8 mdp;
-	u8 sspp[SSPP_MAX];
-	u8 wb[WB_MAX];
-};
-
-/**
- * struct sde_vsync_source_cfg - configure vsync source and configure the
- *                                    watchdog timers if required.
- * @pp_count: number of ping pongs active
- * @frame_rate: Display frame rate
- * @ppnumber: ping pong index array
- * @vsync_source: vsync source selection
- * @is_dummy: a dummy source of vsync selection. It must not be selected for
- *           any case other than sde rsc idle request.
- */
-struct sde_vsync_source_cfg {
-	u32 pp_count;
-	u32 frame_rate;
-	u32 ppnumber[PINGPONG_MAX];
-	u32 vsync_source;
-	bool is_dummy;
-};
-
-/**
- * struct sde_hw_mdp_ops - interface to the MDP TOP Hw driver functions
- * Assumption is these functions will be called after clocks are enabled.
- * @setup_split_pipe : Programs the pipe control registers
- * @setup_pp_split : Programs the pp split control registers
- * @setup_cdm_output : programs cdm control
- * @setup_traffic_shaper : programs traffic shaper control
- */
-struct sde_hw_mdp_ops {
-	/** setup_split_pipe() : Regsiters are not double buffered, thisk
-	 * function should be called before timing control enable
-	 * @mdp  : mdp top context driver
-	 * @cfg  : upper and lower part of pipe configuration
-	 */
-	void (*setup_split_pipe)(struct sde_hw_mdp *mdp,
-			struct split_pipe_cfg *p);
-
-	/** setup_pp_split() : Configure pp split related registers
-	 * @mdp  : mdp top context driver
-	 * @cfg  : upper and lower part of pipe configuration
-	 */
-	void (*setup_pp_split)(struct sde_hw_mdp *mdp,
-			struct split_pipe_cfg *cfg);
-
-	/**
-	 * setup_cdm_output() : Setup selection control of the cdm data path
-	 * @mdp  : mdp top context driver
-	 * @cfg  : cdm output configuration
-	 */
-	void (*setup_cdm_output)(struct sde_hw_mdp *mdp,
-			struct cdm_output_cfg *cfg);
-
-	/**
-	 * setup_traffic_shaper() : Setup traffic shaper control
-	 * @mdp  : mdp top context driver
-	 * @cfg  : traffic shaper configuration
-	 */
-	void (*setup_traffic_shaper)(struct sde_hw_mdp *mdp,
-			struct traffic_shaper_cfg *cfg);
-
-	/**
-	 * setup_clk_force_ctrl - set clock force control
-	 * @mdp: mdp top context driver
-	 * @clk_ctrl: clock to be controlled
-	 * @enable: force on enable
-	 * @return: if the clock is forced-on by this function
-	 */
-	bool (*setup_clk_force_ctrl)(struct sde_hw_mdp *mdp,
-			enum sde_clk_ctrl_type clk_ctrl, bool enable);
-
-	/**
-	 * setup_dce - set DCE mux for DSC ctrl path
-	 * @mdp: mdp top context driver
-	 * @dce_sel: dce_mux value
-	 */
-	void (*setup_dce)(struct sde_hw_mdp *mdp, u32 dce_sel);
-
-	/**
-	 * get_danger_status - get danger status
-	 * @mdp: mdp top context driver
-	 * @status: Pointer to danger safe status
-	 */
-	void (*get_danger_status)(struct sde_hw_mdp *mdp,
-			struct sde_danger_safe_status *status);
-
-	/**
-	 * setup_vsync_source - setup vsync source configuration details
-	 * @mdp: mdp top context driver
-	 * @cfg: vsync source selection configuration
-	 */
-	void (*setup_vsync_source)(struct sde_hw_mdp *mdp,
-				struct sde_vsync_source_cfg *cfg);
-
-	/**
-	 * get_safe_status - get safe status
-	 * @mdp: mdp top context driver
-	 * @status: Pointer to danger safe status
-	 */
-	void (*get_safe_status)(struct sde_hw_mdp *mdp,
-			struct sde_danger_safe_status *status);
-
-	/**
-	 * get_split_flush_status - get split flush status
-	 * @mdp: mdp top context driver
-	 */
-	u32 (*get_split_flush_status)(struct sde_hw_mdp *mdp);
-
-	/**
-	 * reset_ubwc - reset top level UBWC configuration
-	 * @mdp: mdp top context driver
-	 * @m: pointer to mdss catalog data
-	 */
-	void (*reset_ubwc)(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m);
-
-	/**
-	 * intf_audio_select - select the external interface for audio
-	 * @mdp: mdp top context driver
-	 */
-	void (*intf_audio_select)(struct sde_hw_mdp *mdp);
-
-	/**
-	 * set_cwb_ppb_cntl - select the data point for CWB
-	 * @mdp: mdp top context driver
-	 * @dual: indicates if dual pipe line needs to be programmed
-	 * @dspp_out : true if dspp output required. LM is default tap point
-	 */
-	void (*set_cwb_ppb_cntl)(struct sde_hw_mdp *mdp,
-			bool dual, bool dspp_out);
-
-	/**
-	 * set_hdr_plus_metadata - program the dynamic hdr metadata
-	 * @mdp:     mdp top context driver
-	 * @payload: pointer to payload data
-	 * @len:     size of the valid data within payload
-	 * @stream_id: stream ID for MST (0 or 1)
-	 */
-	void (*set_hdr_plus_metadata)(struct sde_hw_mdp *mdp,
-			u8 *payload, u32 len, u32 stream_id);
-};
-
-struct sde_hw_mdp {
-	struct sde_hw_blk base;
-	struct sde_hw_blk_reg_map hw;
-
-	/* top */
-	enum sde_mdp idx;
-	const struct sde_mdp_cfg *caps;
-
-	/* ops */
-	struct sde_hw_mdp_ops ops;
-};
-
-struct sde_hw_sid {
-	/* rotator base */
-	struct sde_hw_blk_reg_map hw;
-};
-
-/**
- * sde_hw_sid_rotator_set - initialize the sid blk reg map
- * @addr: Mapped register io address
- * @sid_len: Length of block
- * @m: Pointer to mdss catalog data
- */
-struct sde_hw_sid *sde_hw_sid_init(void __iomem *addr,
-		u32 sid_len, const struct sde_mdss_cfg *m);
-
-/**
- * sde_hw_sid_rotator_set - set sid values for rotator
- * sid: sde_hw_sid passed from kms
- */
-void sde_hw_sid_rotator_set(struct sde_hw_sid *sid);
-
-/**
- * to_sde_hw_mdp - convert base object sde_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct sde_hw_mdp *to_sde_hw_mdp(struct sde_hw_blk *hw)
-{
-	return container_of(hw, struct sde_hw_mdp, base);
-}
-
-/**
- * sde_hw_mdptop_init - initializes the top driver for the passed idx
- * @idx:  Interface index for which driver object is required
- * @addr: Mapped register io address of MDP
- * @m:    Pointer to mdss catalog data
- */
-struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
-		void __iomem *addr,
-		const struct sde_mdss_cfg *m);
-
-void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp);
-
-#endif /*_SDE_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_uidle.c b/drivers/gpu/drm/msm/sde/sde_hw_uidle.c
deleted file mode 100644
index 5d17f415..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_uidle.c
+++ /dev/null
@@ -1,214 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- */
-
-#include "sde_hwio.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_top.h"
-#include "sde_dbg.h"
-#include "sde_kms.h"
-
-#define UIDLE_CTL 0x0
-#define UIDLE_STATUS 0x4
-#define UIDLE_FAL10_VETO_OVERRIDE 0x8
-
-#define UIDLE_WD_TIMER_CTL 0x10
-#define UIDLE_WD_TIMER_CTL2 0x14
-#define UIDLE_WD_TIMER_LOAD_VALUE 0x18
-
-#define UIDLE_DANGER_STATUS_0 0x20
-#define UIDLE_DANGER_STATUS_1 0x24
-#define UIDLE_SAFE_STATUS_0 0x30
-#define UIDLE_SAFE_STATUS_1 0x34
-#define UIDLE_IDLE_STATUS_0 0x38
-#define UIDLE_IDLE_STATUS_1 0x3c
-#define UIDLE_FAL_STATUS_0 0x40
-#define UIDLE_FAL_STATUS_1 0x44
-
-#define UIDLE_GATE_CNTR_CTL 0x50
-#define UIDLE_FAL1_GATE_CNTR 0x54
-#define UIDLE_FAL10_GATE_CNTR 0x58
-#define UIDLE_FAL_WAIT_GATE_CNTR 0x5c
-#define UIDLE_FAL1_NUM_TRANSITIONS_CNTR 0x60
-#define UIDLE_FAL10_NUM_TRANSITIONS_CNTR 0x64
-#define UIDLE_MIN_GATE_CNTR 0x68
-#define UIDLE_MAX_GATE_CNTR 0x6c
-
-static const struct sde_uidle_cfg *_top_offset(enum sde_uidle uidle,
-		struct sde_mdss_cfg *m, void __iomem *addr,
-		unsigned long len, struct sde_hw_blk_reg_map *b)
-{
-
-	/* Make sure length of regs offsets is within the mapped memory */
-	if ((uidle == m->uidle_cfg.id) &&
-		(m->uidle_cfg.base + m->uidle_cfg.len) < len) {
-
-		b->base_off = addr;
-		b->blk_off = m->uidle_cfg.base;
-		b->length = m->uidle_cfg.len;
-		b->hwversion = m->hwversion;
-		b->log_mask = SDE_DBG_MASK_UIDLE;
-		SDE_DEBUG("base:0x%p blk_off:0x%x length:%d hwversion:0x%x\n",
-			b->base_off, b->blk_off, b->length, b->hwversion);
-		return &m->uidle_cfg;
-	}
-
-	SDE_ERROR("wrong uidle mapping params, will disable UIDLE!\n");
-	SDE_ERROR("base_off:0x%pK id:%d base:0x%x len:%d mmio_len:%ld\n",
-		addr, m->uidle_cfg.id, m->uidle_cfg.base,
-		m->uidle_cfg.len, len);
-	m->uidle_cfg.uidle_rev = 0;
-
-	return ERR_PTR(-EINVAL);
-}
-
-void sde_hw_uidle_get_status(struct sde_hw_uidle *uidle,
-		struct sde_uidle_status *status)
-{
-	struct sde_hw_blk_reg_map *c = &uidle->hw;
-
-	status->uidle_danger_status_0 =
-		SDE_REG_READ(c, UIDLE_DANGER_STATUS_0);
-	status->uidle_danger_status_1 =
-		SDE_REG_READ(c, UIDLE_DANGER_STATUS_1);
-	status->uidle_safe_status_0 =
-		SDE_REG_READ(c, UIDLE_SAFE_STATUS_0);
-	status->uidle_safe_status_1 =
-		SDE_REG_READ(c, UIDLE_SAFE_STATUS_1);
-	status->uidle_idle_status_0 =
-		SDE_REG_READ(c, UIDLE_IDLE_STATUS_0);
-	status->uidle_idle_status_1 =
-		SDE_REG_READ(c, UIDLE_IDLE_STATUS_1);
-	status->uidle_fal_status_0 =
-		SDE_REG_READ(c, UIDLE_FAL_STATUS_0);
-	status->uidle_fal_status_1 =
-		SDE_REG_READ(c, UIDLE_FAL_STATUS_1);
-}
-
-void sde_hw_uidle_get_cntr(struct sde_hw_uidle *uidle,
-		struct sde_uidle_cntr *cntr)
-{
-	struct sde_hw_blk_reg_map *c = &uidle->hw;
-	u32 reg_val;
-
-	cntr->fal1_gate_cntr =
-		SDE_REG_READ(c, UIDLE_FAL1_GATE_CNTR);
-	cntr->fal10_gate_cntr =
-		SDE_REG_READ(c, UIDLE_FAL10_GATE_CNTR);
-	cntr->fal_wait_gate_cntr =
-		SDE_REG_READ(c, UIDLE_FAL_WAIT_GATE_CNTR);
-	cntr->fal1_num_transitions_cntr =
-		SDE_REG_READ(c, UIDLE_FAL1_NUM_TRANSITIONS_CNTR);
-	cntr->fal10_num_transitions_cntr =
-		SDE_REG_READ(c, UIDLE_FAL10_NUM_TRANSITIONS_CNTR);
-	cntr->min_gate_cntr =
-		SDE_REG_READ(c, UIDLE_MIN_GATE_CNTR);
-	cntr->max_gate_cntr =
-		SDE_REG_READ(c, UIDLE_MAX_GATE_CNTR);
-
-	/* clear counters after read */
-	reg_val = SDE_REG_READ(c, UIDLE_GATE_CNTR_CTL);
-	reg_val = reg_val | BIT(31);
-	SDE_REG_WRITE(c, UIDLE_GATE_CNTR_CTL, reg_val);
-	reg_val = (reg_val & ~BIT(31));
-	SDE_REG_WRITE(c, UIDLE_GATE_CNTR_CTL, reg_val);
-}
-
-void sde_hw_uidle_setup_cntr(struct sde_hw_uidle *uidle, bool enable)
-{
-	struct sde_hw_blk_reg_map *c = &uidle->hw;
-	u32 reg_val;
-
-	reg_val = SDE_REG_READ(c, UIDLE_GATE_CNTR_CTL);
-	reg_val = (reg_val & ~BIT(8)) | (enable ? BIT(8) : 0);
-
-	SDE_REG_WRITE(c, UIDLE_GATE_CNTR_CTL, reg_val);
-}
-
-void sde_hw_uidle_setup_wd_timer(struct sde_hw_uidle *uidle,
-		struct sde_uidle_wd_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c = &uidle->hw;
-	u32 val_ctl, val_ctl2, val_ld;
-
-	val_ctl = SDE_REG_READ(c, UIDLE_WD_TIMER_CTL);
-	val_ctl2 = SDE_REG_READ(c, UIDLE_WD_TIMER_CTL2);
-	val_ld = SDE_REG_READ(c, UIDLE_WD_TIMER_LOAD_VALUE);
-
-	val_ctl = (val_ctl & ~BIT(0)) | (cfg->clear ? BIT(0) : 0);
-
-	val_ctl2 = (val_ctl2 & ~BIT(0)) | (cfg->enable ? BIT(0) : 0);
-	val_ctl2 = (val_ctl2 & ~GENMASK(4, 1)) |
-		((cfg->granularity & 0xF) << 1);
-	val_ctl2 = (val_ctl2 & ~BIT(8)) | (cfg->heart_beat ? BIT(8) : 0);
-
-	val_ld = cfg->load_value;
-
-	SDE_REG_WRITE(c, UIDLE_WD_TIMER_CTL, val_ctl);
-	SDE_REG_WRITE(c, UIDLE_WD_TIMER_CTL2, val_ctl2);
-	SDE_REG_WRITE(c, UIDLE_WD_TIMER_LOAD_VALUE, val_ld);
-}
-
-void sde_hw_uidle_setup_ctl(struct sde_hw_uidle *uidle,
-		struct sde_uidle_ctl_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c = &uidle->hw;
-	u32 reg_val;
-
-	reg_val = SDE_REG_READ(c, UIDLE_CTL);
-	reg_val = (reg_val & ~BIT(31)) | (cfg->uidle_enable ? BIT(31) : 0);
-	reg_val = (reg_val & ~FAL10_DANGER_MSK) |
-		((cfg->fal10_danger << FAL10_DANGER_SHFT) &
-		FAL10_DANGER_MSK);
-	reg_val = (reg_val & ~FAL10_EXIT_DANGER_MSK) |
-		((cfg->fal10_exit_danger << FAL10_EXIT_DANGER_SHFT) &
-		FAL10_EXIT_DANGER_MSK);
-	reg_val = (reg_val & ~FAL10_EXIT_CNT_MSK) |
-		((cfg->fal10_exit_cnt << FAL10_EXIT_CNT_SHFT) &
-		FAL10_EXIT_CNT_MSK);
-
-	SDE_REG_WRITE(c, UIDLE_CTL, reg_val);
-}
-
-static inline void _setup_uidle_ops(struct sde_hw_uidle_ops *ops,
-		unsigned long cap)
-{
-	ops->set_uidle_ctl = sde_hw_uidle_setup_ctl;
-	ops->setup_wd_timer = sde_hw_uidle_setup_wd_timer;
-	ops->uidle_setup_cntr = sde_hw_uidle_setup_cntr;
-	ops->uidle_get_cntr = sde_hw_uidle_get_cntr;
-	ops->uidle_get_status = sde_hw_uidle_get_status;
-}
-
-struct sde_hw_uidle *sde_hw_uidle_init(enum sde_uidle idx,
-		void __iomem *addr, unsigned long len,
-		struct sde_mdss_cfg *m)
-{
-	struct sde_hw_uidle *c;
-	const struct sde_uidle_cfg *cfg;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _top_offset(idx, m, addr, len, &c->hw);
-	if (IS_ERR_OR_NULL(cfg)) {
-		kfree(c);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/*
-	 * Assign ops
-	 */
-	c->idx = idx;
-	c->cap = cfg;
-	_setup_uidle_ops(&c->ops, c->cap->features);
-
-	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "uidle", c->hw.blk_off,
-		c->hw.blk_off + c->hw.length, 0);
-
-	return c;
-}
-
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_uidle.h b/drivers/gpu/drm/msm/sde/sde_hw_uidle.h
deleted file mode 100644
index 50de041..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_uidle.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- */
-
-#ifndef _SDE_HW_UIDLE_H
-#define _SDE_HW_UIDLE_H
-
-#include "sde_hw_catalog.h"
-#include "sde_hw_mdss.h"
-#include "sde_hw_util.h"
-
-struct sde_hw_uidle;
-
-#define FAL10_DANGER_SHFT 0
-#define FAL10_EXIT_DANGER_SHFT 4
-#define FAL10_EXIT_CNT_SHFT 16
-
-#define FAL10_DANGER_MSK GENMASK(2, FAL10_DANGER_SHFT)
-#define FAL10_EXIT_DANGER_MSK GENMASK(6, FAL10_EXIT_DANGER_SHFT)
-#define FAL10_EXIT_CNT_MSK GENMASK(23, FAL10_EXIT_CNT_SHFT)
-
-#define SDE_UIDLE_WD_GRANULARITY 1
-#define SDE_UIDLE_WD_HEART_BEAT 0
-#define SDE_UIDLE_WD_LOAD_VAL 3
-
-struct sde_uidle_ctl_cfg {
-	u32 fal10_exit_cnt;
-	u32 fal10_exit_danger;
-	u32 fal10_danger;
-	bool uidle_enable;
-};
-
-struct sde_uidle_wd_cfg {
-	u32 granularity;
-	u32 heart_beat;
-	u32 load_value;
-	bool clear;
-	bool enable;
-};
-
-struct sde_uidle_cntr {
-	u32 fal1_gate_cntr;
-	u32 fal10_gate_cntr;
-	u32 fal_wait_gate_cntr;
-	u32 fal1_num_transitions_cntr;
-	u32 fal10_num_transitions_cntr;
-	u32 min_gate_cntr;
-	u32 max_gate_cntr;
-};
-
-struct sde_uidle_status {
-	u32 uidle_danger_status_0;
-	u32 uidle_danger_status_1;
-	u32 uidle_safe_status_0;
-	u32 uidle_safe_status_1;
-	u32 uidle_idle_status_0;
-	u32 uidle_idle_status_1;
-	u32 uidle_fal_status_0;
-	u32 uidle_fal_status_1;
-};
-
-struct sde_hw_uidle_ops {
-	/**
-	 * set_uidle_ctl - set uidle global config
-	 * @uidle: uidle context driver
-	 * @cfg: uidle global config
-	 */
-	void (*set_uidle_ctl)(struct sde_hw_uidle *uidle,
-		struct sde_uidle_ctl_cfg *cfg);
-
-	/**
-	 * setup_wd_timer - set uidle watchdog timer
-	 * @uidle: uidle context driver
-	 * @cfg: uidle wd timer config
-	 */
-	void (*setup_wd_timer)(struct sde_hw_uidle *uidle,
-		struct sde_uidle_wd_cfg *cfg);
-
-	/**
-	 * uidle_setup_cntr - set uidle perf counters
-	 * @uidle: uidle context driver
-	 * @enable: true to enable the counters
-	 */
-	void (*uidle_setup_cntr)(struct sde_hw_uidle *uidle,
-		bool enable);
-
-	/**
-	 * uidle_get_cntr - get uidle perf counters
-	 * @uidle: uidle context driver
-	 * @cntr: pointer to return the counters
-	 */
-	void (*uidle_get_cntr)(struct sde_hw_uidle *uidle,
-		struct sde_uidle_cntr *cntr);
-
-	/**
-	 * uidle_get_status - get uidle status
-	 * @uidle: uidle context driver
-	 * @status: pointer to return the status of uidle
-	 */
-	void (*uidle_get_status)(struct sde_hw_uidle *uidle,
-		struct sde_uidle_status *status);
-
-};
-
-struct sde_hw_uidle {
-	/* base */
-	struct sde_hw_blk_reg_map hw;
-
-	/* uidle */
-	const struct sde_uidle_cfg *cap;
-
-	/* ops */
-	struct sde_hw_uidle_ops ops;
-
-	/*
-	 * uidle is common across all displays, lock to serialize access.
-	 * must be taken by client before using any ops
-	 */
-	struct mutex uidle_lock;
-
-	enum sde_uidle idx;
-};
-
-struct sde_hw_uidle *sde_hw_uidle_init(enum sde_uidle idx,
-		void __iomem *addr, unsigned long len,
-		struct sde_mdss_cfg *m);
-
-#endif /*_SDE_HW_UIDLE_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.c b/drivers/gpu/drm/msm/sde/sde_hw_util.c
deleted file mode 100644
index 819af9e..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_util.c
+++ /dev/null
@@ -1,586 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <uapi/drm/sde_drm.h>
-#include "msm_drv.h"
-#include "sde_kms.h"
-#include "sde_hw_mdss.h"
-#include "sde_hw_util.h"
-
-/* using a file static variables for debugfs access */
-static u32 sde_hw_util_log_mask = SDE_DBG_MASK_NONE;
-
-/* SDE_SCALER_QSEED3 */
-#define QSEED3_HW_VERSION                  0x00
-#define QSEED3_OP_MODE                     0x04
-#define QSEED3_RGB2Y_COEFF                 0x08
-#define QSEED3_PHASE_INIT                  0x0C
-#define QSEED3_PHASE_STEP_Y_H              0x10
-#define QSEED3_PHASE_STEP_Y_V              0x14
-#define QSEED3_PHASE_STEP_UV_H             0x18
-#define QSEED3_PHASE_STEP_UV_V             0x1C
-#define QSEED3_PRELOAD                     0x20
-#define QSEED3_DE_SHARPEN                  0x24
-#define QSEED3_DE_SHARPEN_CTL              0x28
-#define QSEED3_DE_SHAPE_CTL                0x2C
-#define QSEED3_DE_THRESHOLD                0x30
-#define QSEED3_DE_ADJUST_DATA_0            0x34
-#define QSEED3_DE_ADJUST_DATA_1            0x38
-#define QSEED3_DE_ADJUST_DATA_2            0x3C
-#define QSEED3_SRC_SIZE_Y_RGB_A            0x40
-#define QSEED3_SRC_SIZE_UV                 0x44
-#define QSEED3_DST_SIZE                    0x48
-#define QSEED3_COEF_LUT_CTRL               0x4C
-#define QSEED3_COEF_LUT_SWAP_BIT           0
-#define QSEED3_BUFFER_CTRL                 0x50
-#define QSEED3_CLK_CTRL0                   0x54
-#define QSEED3_CLK_CTRL1                   0x58
-#define QSEED3_CLK_STATUS                  0x5C
-#define QSEED3_MISR_CTRL                   0x70
-#define QSEED3_MISR_SIGNATURE_0            0x74
-#define QSEED3_MISR_SIGNATURE_1            0x78
-#define QSEED3_PHASE_INIT_Y_H              0x90
-#define QSEED3_PHASE_INIT_Y_V              0x94
-#define QSEED3_PHASE_INIT_UV_H             0x98
-#define QSEED3_PHASE_INIT_UV_V             0x9C
-#define QSEED3_ENABLE                      2
-#define CSC_MATRIX_SHIFT                   7
-
-/* SDE_SCALER_QSEED3LITE */
-#define QSEED3L_COEF_LUT_Y_SEP_BIT         4
-#define QSEED3L_COEF_LUT_UV_SEP_BIT        5
-#define QSEED3L_COEF_LUT_CTRL              0x4C
-#define QSEED3L_COEF_LUT_SWAP_BIT          0
-#define QSEED3L_DIR_FILTER_WEIGHT          0x60
-#define QSEED3LITE_SCALER_VERSION          0x2004
-#define QSEED4_SCALER_VERSION              0x3000
-
-#define QSEED3_DEFAULT_PRELOAD_V 0x3
-#define QSEED3_DEFAULT_PRELOAD_H 0x4
-
-#define QSEED4_DEFAULT_PRELOAD_V 0x2
-#define QSEED4_DEFAULT_PRELOAD_H 0x4
-
-typedef void (*scaler_lut_type)(struct sde_hw_blk_reg_map *,
-		struct sde_hw_scaler3_cfg *, u32);
-
-void sde_reg_write(struct sde_hw_blk_reg_map *c,
-		u32 reg_off,
-		u32 val,
-		const char *name)
-{
-	/* don't need to mutex protect this */
-	if (c->log_mask & sde_hw_util_log_mask)
-		SDE_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
-				name, c->blk_off + reg_off, val);
-	writel_relaxed(val, c->base_off + c->blk_off + reg_off);
-}
-
-int sde_reg_read(struct sde_hw_blk_reg_map *c, u32 reg_off)
-{
-	return readl_relaxed(c->base_off + c->blk_off + reg_off);
-}
-
-u32 *sde_hw_util_get_log_mask_ptr(void)
-{
-	return &sde_hw_util_log_mask;
-}
-
-void sde_init_scaler_blk(struct sde_scaler_blk *blk, u32 version)
-{
-	if (!blk)
-		return;
-
-	blk->version = version;
-	blk->v_preload = QSEED4_DEFAULT_PRELOAD_V;
-	blk->h_preload = QSEED4_DEFAULT_PRELOAD_H;
-	if (version < QSEED4_SCALER_VERSION) {
-		blk->v_preload = QSEED3_DEFAULT_PRELOAD_V;
-		blk->h_preload = QSEED3_DEFAULT_PRELOAD_H;
-	}
-}
-void sde_set_scaler_v2(struct sde_hw_scaler3_cfg *cfg,
-		const struct sde_drm_scaler_v2 *scale_v2)
-{
-	int i;
-
-	cfg->enable = scale_v2->enable;
-	cfg->dir_en = scale_v2->dir_en;
-
-	for (i = 0; i < SDE_MAX_PLANES; i++) {
-		cfg->init_phase_x[i] = scale_v2->init_phase_x[i];
-		cfg->phase_step_x[i] = scale_v2->phase_step_x[i];
-		cfg->init_phase_y[i] = scale_v2->init_phase_y[i];
-		cfg->phase_step_y[i] = scale_v2->phase_step_y[i];
-
-		cfg->preload_x[i] = scale_v2->preload_x[i];
-		cfg->preload_y[i] = scale_v2->preload_y[i];
-		cfg->src_width[i] = scale_v2->src_width[i];
-		cfg->src_height[i] = scale_v2->src_height[i];
-	}
-
-	cfg->dst_width = scale_v2->dst_width;
-	cfg->dst_height = scale_v2->dst_height;
-
-	cfg->y_rgb_filter_cfg = scale_v2->y_rgb_filter_cfg;
-	cfg->uv_filter_cfg = scale_v2->uv_filter_cfg;
-	cfg->alpha_filter_cfg = scale_v2->alpha_filter_cfg;
-	cfg->blend_cfg = scale_v2->blend_cfg;
-
-	cfg->lut_flag = scale_v2->lut_flag;
-	cfg->dir_lut_idx = scale_v2->dir_lut_idx;
-	cfg->y_rgb_cir_lut_idx = scale_v2->y_rgb_cir_lut_idx;
-	cfg->uv_cir_lut_idx = scale_v2->uv_cir_lut_idx;
-	cfg->y_rgb_sep_lut_idx = scale_v2->y_rgb_sep_lut_idx;
-	cfg->uv_sep_lut_idx = scale_v2->uv_sep_lut_idx;
-	cfg->de.prec_shift = scale_v2->de.prec_shift;
-	cfg->dir_weight = scale_v2->dir_weight;
-	cfg->dyn_exp_disabled = (scale_v2->flags & SDE_DYN_EXP_DISABLE) ? 1 : 0;
-
-	cfg->de.enable = scale_v2->de.enable;
-	cfg->de.sharpen_level1 = scale_v2->de.sharpen_level1;
-	cfg->de.sharpen_level2 = scale_v2->de.sharpen_level2;
-	cfg->de.clip = scale_v2->de.clip;
-	cfg->de.limit = scale_v2->de.limit;
-	cfg->de.thr_quiet = scale_v2->de.thr_quiet;
-	cfg->de.thr_dieout = scale_v2->de.thr_dieout;
-	cfg->de.thr_low = scale_v2->de.thr_low;
-	cfg->de.thr_high = scale_v2->de.thr_high;
-	cfg->de.blend = scale_v2->de_blend;
-
-	for (i = 0; i < SDE_MAX_DE_CURVES; i++) {
-		cfg->de.adjust_a[i] = scale_v2->de.adjust_a[i];
-		cfg->de.adjust_b[i] = scale_v2->de.adjust_b[i];
-		cfg->de.adjust_c[i] = scale_v2->de.adjust_c[i];
-	}
-}
-
-static void _sde_hw_setup_scaler3_lut(struct sde_hw_blk_reg_map *c,
-		struct sde_hw_scaler3_cfg *scaler3_cfg, u32 offset)
-{
-	int i, j, filter;
-	int config_lut = 0x0;
-	unsigned long lut_flags;
-	u32 lut_addr, lut_offset, lut_len;
-	u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
-	static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
-		{{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
-		{{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
-		{{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
-		{{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
-		{{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
-	};
-
-	lut_flags = (unsigned long) scaler3_cfg->lut_flag;
-	if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
-		(scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
-		lut[0] = scaler3_cfg->dir_lut;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
-		(scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
-		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
-		lut[1] = scaler3_cfg->cir_lut +
-			scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
-		(scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
-		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
-		lut[2] = scaler3_cfg->cir_lut +
-			scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
-		(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
-		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
-		lut[3] = scaler3_cfg->sep_lut +
-			scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
-		(scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
-		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
-		lut[4] = scaler3_cfg->sep_lut +
-			scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
-		config_lut = 1;
-	}
-
-	if (config_lut) {
-		for (filter = 0; filter < QSEED3_FILTERS; filter++) {
-			if (!lut[filter])
-				continue;
-			lut_offset = 0;
-			for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
-				lut_addr = QSEED3_COEF_LUT_OFF + offset
-					+ off_tbl[filter][i][1];
-				lut_len = off_tbl[filter][i][0] << 2;
-				for (j = 0; j < lut_len; j++) {
-					SDE_REG_WRITE(c,
-						lut_addr,
-						(lut[filter])[lut_offset++]);
-					lut_addr += 4;
-				}
-			}
-		}
-	}
-
-	if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
-		SDE_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
-
-}
-
-static void _sde_hw_setup_scaler3lite_lut(struct sde_hw_blk_reg_map *c,
-		struct sde_hw_scaler3_cfg *scaler3_cfg, u32 offset)
-{
-	int i, filter;
-	int config_lut = 0x0;
-	unsigned long lut_flags;
-	u32 lut_addr, lut_offset;
-	u32 *lut[QSEED3LITE_FILTERS] = {NULL, NULL};
-	static const uint32_t off_tbl[QSEED3LITE_FILTERS] = {0x000, 0x200};
-
-	SDE_REG_WRITE(c, QSEED3L_DIR_FILTER_WEIGHT + offset,
-			scaler3_cfg->dir_weight & 0xFF);
-
-	/* destination scaler case */
-	if (!scaler3_cfg->sep_lut)
-		return;
-
-	lut_flags = (unsigned long) scaler3_cfg->lut_flag;
-	if (test_bit(QSEED3L_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
-		(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3L_SEPARABLE_LUTS) &&
-		(scaler3_cfg->sep_len == QSEED3L_SEP_LUT_SIZE)) {
-		lut[0] = scaler3_cfg->sep_lut +
-			scaler3_cfg->y_rgb_sep_lut_idx * QSEED3L_LUT_SIZE;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3L_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
-		(scaler3_cfg->uv_sep_lut_idx < QSEED3L_SEPARABLE_LUTS) &&
-		(scaler3_cfg->sep_len == QSEED3L_SEP_LUT_SIZE)) {
-		lut[1] = scaler3_cfg->sep_lut +
-			scaler3_cfg->uv_sep_lut_idx * QSEED3L_LUT_SIZE;
-		config_lut = 1;
-	}
-
-	if (config_lut) {
-		for (filter = 0; filter < QSEED3LITE_FILTERS; filter++) {
-			if (!lut[filter])
-				continue;
-			lut_offset = 0;
-			lut_addr = QSEED3L_COEF_LUT_OFF + offset +
-				off_tbl[filter];
-			for (i = 0; i < QSEED3L_LUT_SIZE; i++) {
-				SDE_REG_WRITE(c, lut_addr,
-						(lut[filter])[lut_offset++]);
-				lut_addr += 4;
-			}
-		}
-	}
-
-	if (test_bit(QSEED3L_COEF_LUT_SWAP_BIT, &lut_flags))
-		SDE_REG_WRITE(c, QSEED3L_COEF_LUT_CTRL + offset, BIT(0));
-}
-
-static void _sde_hw_setup_scaler3_de(struct sde_hw_blk_reg_map *c,
-		struct sde_hw_scaler3_de_cfg *de_cfg, u32 offset)
-{
-	u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
-	u32 adjust_a, adjust_b, adjust_c;
-
-	if (!de_cfg->enable)
-		return;
-
-	sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
-		((de_cfg->sharpen_level2 & 0x1FF) << 16);
-
-	sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
-		((de_cfg->prec_shift & 0x7) << 13) |
-		((de_cfg->clip & 0x7) << 16) |
-		((de_cfg->blend & 0xF) << 20);
-
-	shape_ctl = (de_cfg->thr_quiet & 0xFF) |
-		((de_cfg->thr_dieout & 0x3FF) << 16);
-
-	de_thr = (de_cfg->thr_low & 0x3FF) |
-		((de_cfg->thr_high & 0x3FF) << 16);
-
-	adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
-		((de_cfg->adjust_a[1] & 0x3FF) << 10) |
-		((de_cfg->adjust_a[2] & 0x3FF) << 20);
-
-	adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
-		((de_cfg->adjust_b[1] & 0x3FF) << 10) |
-		((de_cfg->adjust_b[2] & 0x3FF) << 20);
-
-	adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
-		((de_cfg->adjust_c[1] & 0x3FF) << 10) |
-		((de_cfg->adjust_c[2] & 0x3FF) << 20);
-
-	SDE_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl);
-	SDE_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl);
-	SDE_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl);
-	SDE_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr);
-	SDE_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a);
-	SDE_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b);
-	SDE_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c);
-
-}
-
-static inline scaler_lut_type get_scaler_lut(
-		struct sde_hw_scaler3_cfg *scaler3_cfg, u32 scaler_version)
-{
-	scaler_lut_type lut_ptr = _sde_hw_setup_scaler3lite_lut;
-
-	if (!(scaler3_cfg->lut_flag))
-		return NULL;
-
-	if (scaler_version < QSEED3LITE_SCALER_VERSION)
-		lut_ptr = _sde_hw_setup_scaler3_lut;
-
-	return lut_ptr;
-}
-
-void sde_hw_setup_scaler3(struct sde_hw_blk_reg_map *c,
-		struct sde_hw_scaler3_cfg *scaler3_cfg, u32 scaler_version,
-		u32 scaler_offset, const struct sde_format *format)
-{
-	u32 op_mode = 0;
-	u32 phase_init, preload, src_y_rgb, src_uv, dst;
-	scaler_lut_type setup_lut = NULL;
-
-	if (!scaler3_cfg->enable)
-		goto end;
-
-	op_mode |= BIT(0);
-	op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
-
-	if (format && SDE_FORMAT_IS_YUV(format)) {
-		op_mode |= BIT(12);
-		op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
-	}
-
-	op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
-	op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
-	op_mode |= (scaler3_cfg->dyn_exp_disabled) ? BIT(13) : 0;
-
-	preload =
-		((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
-		((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
-		((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
-		((scaler3_cfg->preload_y[1] & 0x7F) << 24);
-
-	src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
-		((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
-
-	src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
-		((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
-
-	dst = (scaler3_cfg->dst_width & 0x1FFFF) |
-		((scaler3_cfg->dst_height & 0x1FFFF) << 16);
-
-	if (scaler3_cfg->de.enable) {
-		_sde_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset);
-		op_mode |= BIT(8);
-	}
-
-	setup_lut = get_scaler_lut(scaler3_cfg, scaler_version);
-	if (setup_lut)
-		setup_lut(c, scaler3_cfg, scaler_offset);
-
-	if (scaler_version == 0x1002) {
-		phase_init =
-			((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
-			((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
-			((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
-			((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
-		SDE_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init);
-	} else {
-		SDE_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset,
-			scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
-		SDE_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset,
-			scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
-		SDE_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset,
-			scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
-		SDE_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset,
-			scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
-	}
-
-	SDE_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset,
-		scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
-
-	SDE_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset,
-		scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
-
-	SDE_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset,
-		scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
-
-	SDE_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset,
-		scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
-
-	SDE_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload);
-
-	SDE_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb);
-
-	SDE_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv);
-
-	SDE_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
-
-end:
-	if (format && !SDE_FORMAT_IS_DX(format))
-		op_mode |= BIT(14);
-
-	if (format && format->alpha_enable) {
-		op_mode |= BIT(10);
-		if (scaler_version == 0x1002)
-			op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
-		else
-			op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
-	}
-
-	SDE_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
-}
-
-u32 sde_hw_get_scaler3_ver(struct sde_hw_blk_reg_map *c,
-			u32 scaler_offset)
-{
-	return SDE_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
-}
-
-void sde_hw_csc_matrix_coeff_setup(struct sde_hw_blk_reg_map *c,
-		u32 csc_reg_off, struct sde_csc_cfg *data,
-		u32 shift_bit)
-{
-	u32 val;
-
-	if (!c || !data)
-		return;
-
-	val = ((data->csc_mv[0] >> shift_bit) & 0x1FFF) |
-		(((data->csc_mv[1] >> shift_bit) & 0x1FFF) << 16);
-	SDE_REG_WRITE(c, csc_reg_off, val);
-	val = ((data->csc_mv[2] >> shift_bit) & 0x1FFF) |
-		(((data->csc_mv[3] >> shift_bit) & 0x1FFF) << 16);
-	SDE_REG_WRITE(c, csc_reg_off + 0x4, val);
-	val = ((data->csc_mv[4] >> shift_bit) & 0x1FFF) |
-		(((data->csc_mv[5] >> shift_bit) & 0x1FFF) << 16);
-	SDE_REG_WRITE(c, csc_reg_off + 0x8, val);
-	val = ((data->csc_mv[6] >> shift_bit) & 0x1FFF) |
-		(((data->csc_mv[7] >> shift_bit) & 0x1FFF) << 16);
-	SDE_REG_WRITE(c, csc_reg_off + 0xc, val);
-	val = (data->csc_mv[8] >> shift_bit) & 0x1FFF;
-	SDE_REG_WRITE(c, csc_reg_off + 0x10, val);
-}
-
-void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c,
-		u32 csc_reg_off,
-		struct sde_csc_cfg *data, bool csc10)
-{
-	u32 clamp_shift = csc10 ? 16 : 8;
-	u32 val;
-
-	if (!c || !data)
-		return;
-
-	/* matrix coeff - convert S15.16 to S4.9 */
-	sde_hw_csc_matrix_coeff_setup(c, csc_reg_off, data, CSC_MATRIX_SHIFT);
-
-	/* Pre clamp */
-	val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
-	SDE_REG_WRITE(c, csc_reg_off + 0x14, val);
-	val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
-	SDE_REG_WRITE(c, csc_reg_off + 0x18, val);
-	val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
-	SDE_REG_WRITE(c, csc_reg_off + 0x1c, val);
-
-	/* Post clamp */
-	val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
-	SDE_REG_WRITE(c, csc_reg_off + 0x20, val);
-	val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
-	SDE_REG_WRITE(c, csc_reg_off + 0x24, val);
-	val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
-	SDE_REG_WRITE(c, csc_reg_off + 0x28, val);
-
-	/* Pre-Bias */
-	SDE_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
-	SDE_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
-	SDE_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
-
-	/* Post-Bias */
-	SDE_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
-	SDE_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
-	SDE_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
-}
-
-/**
- * _sde_copy_formats   - copy formats from src_list to dst_list
- * @dst_list:          pointer to destination list where to copy formats
- * @dst_list_size:     size of destination list
- * @dst_list_pos:      starting position on the list where to copy formats
- * @src_list:          pointer to source list where to copy formats from
- * @src_list_size:     size of source list
- * Return: number of elements populated
- */
-uint32_t sde_copy_formats(
-		struct sde_format_extended *dst_list,
-		uint32_t dst_list_size,
-		uint32_t dst_list_pos,
-		const struct sde_format_extended *src_list,
-		uint32_t src_list_size)
-{
-	uint32_t cur_pos, i;
-
-	if (!dst_list || !src_list || (dst_list_pos >= (dst_list_size - 1)))
-		return 0;
-
-	for (i = 0, cur_pos = dst_list_pos;
-		(cur_pos < (dst_list_size - 1)) && (i < src_list_size)
-		&& src_list[i].fourcc_format; ++i, ++cur_pos)
-		dst_list[cur_pos] = src_list[i];
-
-	dst_list[cur_pos].fourcc_format = 0;
-
-	return i;
-}
-
-/**
- * sde_get_linetime   - returns the line time for a given mode
- * @mode:          pointer to drm mode to calculate the line time
- * Return:         line time of display mode in nS
- */
-uint32_t sde_get_linetime(struct drm_display_mode *mode)
-{
-	u64 pclk_rate;
-	u32 pclk_period;
-	u32 line_time;
-
-	pclk_rate = mode->clock; /* pixel clock in kHz */
-	if (pclk_rate == 0) {
-		SDE_ERROR("pclk is 0, cannot calculate line time\n");
-		return 0;
-	}
-
-	pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
-	if (pclk_period == 0) {
-		SDE_ERROR("pclk period is 0\n");
-		return 0;
-	}
-
-	/*
-	 * Line time calculation based on Pixel clock and HTOTAL.
-	 * Final unit is in ns.
-	 */
-	line_time = (pclk_period * mode->htotal) / 1000;
-	if (line_time == 0) {
-		SDE_ERROR("line time calculation is 0\n");
-		return 0;
-	}
-
-	pr_debug("clk_rate=%lldkHz, clk_period=%d, linetime=%dns, htotal=%d\n",
-			pclk_rate, pclk_period, line_time, mode->htotal);
-
-	return line_time;
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.h b/drivers/gpu/drm/msm/sde/sde_hw_util.h
deleted file mode 100644
index d89ac70..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_util.h
+++ /dev/null
@@ -1,217 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_UTIL_H
-#define _SDE_HW_UTIL_H
-
-#include <linux/io.h>
-#include <linux/slab.h>
-#include "sde_hw_mdss.h"
-#include "sde_hw_catalog.h"
-
-#define REG_MASK(n)                     ((BIT(n)) - 1)
-#define LP_DDR4_TYPE			0x7
-
-struct sde_format_extended;
-
-/*
- * This is the common struct maintained by each sub block
- * for mapping the register offsets in this block to the
- * absoulute IO address
- * @base_off:     mdp register mapped offset
- * @blk_off:      pipe offset relative to mdss offset
- * @length        length of register block offset
- * @xin_id        xin id
- * @hwversion     mdss hw version number
- */
-struct sde_hw_blk_reg_map {
-	void __iomem *base_off;
-	u32 blk_off;
-	u32 length;
-	u32 xin_id;
-	u32 hwversion;
-	u32 log_mask;
-};
-
-/**
- * struct sde_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
- * @enable:         detail enhancer enable/disable
- * @sharpen_level1: sharpening strength for noise
- * @sharpen_level2: sharpening strength for signal
- * @ clip:          clip shift
- * @ limit:         limit value
- * @ thr_quiet:     quiet threshold
- * @ thr_dieout:    dieout threshold
- * @ thr_high:      low threshold
- * @ thr_high:      high threshold
- * @ prec_shift:    precision shift
- * @ adjust_a:      A-coefficients for mapping curve
- * @ adjust_b:      B-coefficients for mapping curve
- * @ adjust_c:      C-coefficients for mapping curve
- * @ blend:      Unsharp Blend Filter Ratio
- */
-struct sde_hw_scaler3_de_cfg {
-	u32 enable;
-	int16_t sharpen_level1;
-	int16_t sharpen_level2;
-	uint16_t clip;
-	uint16_t limit;
-	uint16_t thr_quiet;
-	uint16_t thr_dieout;
-	uint16_t thr_low;
-	uint16_t thr_high;
-	uint16_t prec_shift;
-	int16_t adjust_a[SDE_MAX_DE_CURVES];
-	int16_t adjust_b[SDE_MAX_DE_CURVES];
-	int16_t adjust_c[SDE_MAX_DE_CURVES];
-	uint32_t blend;
-};
-
-
-/**
- * struct sde_hw_scaler3_cfg : QSEEDv3 configuration
- * @enable:        scaler enable
- * @dir_en:        direction detection block enable
- * @ init_phase_x: horizontal initial phase
- * @ phase_step_x: horizontal phase step
- * @ init_phase_y: vertical initial phase
- * @ phase_step_y: vertical phase step
- * @ preload_x:    horizontal preload value
- * @ preload_y:    vertical preload value
- * @ src_width:    source width
- * @ src_height:   source height
- * @ dst_width:    destination width
- * @ dst_height:   destination height
- * @ y_rgb_filter_cfg: y/rgb plane filter configuration
- * @ uv_filter_cfg: uv plane filter configuration
- * @ alpha_filter_cfg: alpha filter configuration
- * @ blend_cfg:    blend coefficients configuration
- * @ lut_flag:     scaler LUT update flags
- *                 0x1 swap LUT bank
- *                 0x2 update 2D filter LUT
- *                 0x4 update y circular filter LUT
- *                 0x8 update uv circular filter LUT
- *                 0x10 update y separable filter LUT
- *                 0x20 update uv separable filter LUT
- * @ dir_lut_idx:  2D filter LUT index
- * @ y_rgb_cir_lut_idx: y circular filter LUT index
- * @ uv_cir_lut_idx: uv circular filter LUT index
- * @ y_rgb_sep_lut_idx: y circular filter LUT index
- * @ uv_sep_lut_idx: uv separable filter LUT index
- * @ dir_lut:      pointer to 2D LUT
- * @ cir_lut:      pointer to circular filter LUT
- * @ sep_lut:      pointer to separable filter LUT
- * @ de: detail enhancer configuration
- * @ dir_weight:   Directional Weight
- * @dyn_exp_disabled:     Dynamic expansion disabled
- */
-struct sde_hw_scaler3_cfg {
-	u32 enable;
-	u32 dir_en;
-	int32_t init_phase_x[SDE_MAX_PLANES];
-	int32_t phase_step_x[SDE_MAX_PLANES];
-	int32_t init_phase_y[SDE_MAX_PLANES];
-	int32_t phase_step_y[SDE_MAX_PLANES];
-
-	u32 preload_x[SDE_MAX_PLANES];
-	u32 preload_y[SDE_MAX_PLANES];
-	u32 src_width[SDE_MAX_PLANES];
-	u32 src_height[SDE_MAX_PLANES];
-
-	u32 dst_width;
-	u32 dst_height;
-
-	u32 y_rgb_filter_cfg;
-	u32 uv_filter_cfg;
-	u32 alpha_filter_cfg;
-	u32 blend_cfg;
-
-	u32 lut_flag;
-	u32 dir_lut_idx;
-
-	u32 y_rgb_cir_lut_idx;
-	u32 uv_cir_lut_idx;
-	u32 y_rgb_sep_lut_idx;
-	u32 uv_sep_lut_idx;
-	u32 *dir_lut;
-	size_t dir_len;
-	u32 *cir_lut;
-	size_t cir_len;
-	u32 *sep_lut;
-	size_t sep_len;
-
-	/*
-	 * Detail enhancer settings
-	 */
-	struct sde_hw_scaler3_de_cfg de;
-	uint32_t dir_weight;
-	uint32_t dyn_exp_disabled;
-};
-
-struct sde_hw_scaler3_lut_cfg {
-	bool is_configured;
-	u32 *dir_lut;
-	size_t dir_len;
-	u32 *cir_lut;
-	size_t cir_len;
-	u32 *sep_lut;
-	size_t sep_len;
-};
-
-u32 *sde_hw_util_get_log_mask_ptr(void);
-
-void sde_reg_write(struct sde_hw_blk_reg_map *c,
-		u32 reg_off,
-		u32 val,
-		const char *name);
-int sde_reg_read(struct sde_hw_blk_reg_map *c, u32 reg_off);
-
-#define SDE_REG_WRITE(c, off, val) sde_reg_write(c, off, val, #off)
-#define SDE_REG_READ(c, off) sde_reg_read(c, off)
-
-#define MISR_FRAME_COUNT_MASK		0xFF
-#define MISR_CTRL_ENABLE		BIT(8)
-#define MISR_CTRL_STATUS		BIT(9)
-#define MISR_CTRL_STATUS_CLEAR		BIT(10)
-#define INTF_MISR_CTRL_FREE_RUN_MASK	BIT(31)
-#define INTF_MISR_CTRL_INPUT_SEL_DATA   BIT(24)
-
-void *sde_hw_util_get_dir(void);
-
-void sde_init_scaler_blk(struct sde_scaler_blk *blk, u32 version);
-
-void sde_set_scaler_v2(struct sde_hw_scaler3_cfg *cfg,
-		const struct sde_drm_scaler_v2 *scale_v2);
-
-void sde_hw_setup_scaler3(struct sde_hw_blk_reg_map *c,
-		struct sde_hw_scaler3_cfg *scaler3_cfg, u32 scaler_version,
-		u32 scaler_offset, const struct sde_format *format);
-
-u32 sde_hw_get_scaler3_ver(struct sde_hw_blk_reg_map *c,
-		u32 scaler_offset);
-
-void sde_hw_csc_matrix_coeff_setup(struct sde_hw_blk_reg_map *c,
-		u32 csc_reg_off, struct sde_csc_cfg *data,
-		u32 shift_bit);
-
-void sde_hw_csc_setup(struct sde_hw_blk_reg_map  *c,
-		u32 csc_reg_off,
-		struct sde_csc_cfg *data, bool csc10);
-
-uint32_t sde_copy_formats(
-		struct sde_format_extended *dst_list,
-		uint32_t dst_list_size,
-		uint32_t dst_list_pos,
-		const struct sde_format_extended *src_list,
-		uint32_t src_list_size);
-
-uint32_t sde_get_linetime(struct drm_display_mode *mode);
-
-static inline bool is_qseed3_rev_qseed3lite(struct sde_mdss_cfg *sde_cfg)
-{
-	return ((sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3LITE) ?
-			true : false);
-}
-#endif /* _SDE_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
deleted file mode 100644
index f28a1fe..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
+++ /dev/null
@@ -1,303 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#include "sde_hwio.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_vbif.h"
-#include "sde_dbg.h"
-
-#define VBIF_VERSION			0x0000
-#define VBIF_CLK_FORCE_CTRL0		0x0008
-#define VBIF_CLK_FORCE_CTRL1		0x000C
-#define VBIF_QOS_REMAP_00		0x0020
-#define VBIF_QOS_REMAP_01		0x0024
-#define VBIF_QOS_REMAP_10		0x0028
-#define VBIF_QOS_REMAP_11		0x002C
-#define VBIF_WRITE_GATHER_EN		0x00AC
-#define VBIF_IN_RD_LIM_CONF0		0x00B0
-#define VBIF_IN_RD_LIM_CONF1		0x00B4
-#define VBIF_IN_RD_LIM_CONF2		0x00B8
-#define VBIF_IN_WR_LIM_CONF0		0x00C0
-#define VBIF_IN_WR_LIM_CONF1		0x00C4
-#define VBIF_IN_WR_LIM_CONF2		0x00C8
-#define VBIF_OUT_RD_LIM_CONF0		0x00D0
-#define VBIF_OUT_WR_LIM_CONF0		0x00D4
-#define VBIF_OUT_AXI_AMEMTYPE_CONF0	0x0160
-#define VBIF_OUT_AXI_AMEMTYPE_CONF1	0x0164
-#define VBIF_OUT_AXI_ASHARED		0x0170
-#define VBIF_OUT_AXI_AINNERSHARED	0x0174
-#define VBIF_XIN_PND_ERR		0x0190
-#define VBIF_XIN_SRC_ERR		0x0194
-#define VBIF_XIN_CLR_ERR		0x019C
-#define VBIF_XIN_HALT_CTRL0		0x0200
-#define VBIF_XIN_HALT_CTRL1		0x0204
-#define VBIF_XINL_QOS_RP_REMAP_000	0x0550
-#define VBIF_XINL_QOS_LVL_REMAP_000	0x0590
-
-static void sde_hw_clear_errors(struct sde_hw_vbif *vbif,
-		u32 *pnd_errors, u32 *src_errors)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 pnd, src;
-
-	if (!vbif)
-		return;
-	c = &vbif->hw;
-	pnd = SDE_REG_READ(c, VBIF_XIN_PND_ERR);
-	src = SDE_REG_READ(c, VBIF_XIN_SRC_ERR);
-
-	if (pnd_errors)
-		*pnd_errors = pnd;
-	if (src_errors)
-		*src_errors = src;
-
-	SDE_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
-}
-
-static void sde_hw_set_mem_type(struct sde_hw_vbif *vbif,
-		u32 xin_id, u32 value)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 reg_off;
-	u32 bit_off;
-	u32 reg_val;
-
-	/*
-	 * Assume 4 bits per bit field, 8 fields per 32-bit register so
-	 * 16 bit fields maximum across two registers
-	 */
-	if (!vbif || xin_id >= MAX_XIN_COUNT)
-		return;
-
-	c = &vbif->hw;
-
-	/* enable cacheable */
-	if (xin_id >= 8) {
-		xin_id -= 8;
-		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
-	} else {
-		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
-	}
-	bit_off = (xin_id & 0x7) * 4;
-	reg_val = SDE_REG_READ(c, reg_off);
-	reg_val &= ~(0x7 << bit_off);
-	reg_val |= (value & 0x7) << bit_off;
-	SDE_REG_WRITE(c, reg_off, reg_val);
-}
-
-static void sde_hw_set_mem_type_v1(struct sde_hw_vbif *vbif,
-		u32 xin_id, u32 value)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 reg_val;
-
-	if (!vbif || xin_id >= MAX_XIN_COUNT)
-		return;
-
-	sde_hw_set_mem_type(vbif, xin_id, value);
-
-	c = &vbif->hw;
-
-	/* disable outer shareable */
-	reg_val = SDE_REG_READ(c, VBIF_OUT_AXI_ASHARED);
-	reg_val &= ~BIT(xin_id);
-	SDE_REG_WRITE(c, VBIF_OUT_AXI_ASHARED, 0);
-
-	/* disable inner shareable */
-	reg_val = SDE_REG_READ(c, VBIF_OUT_AXI_AINNERSHARED);
-	reg_val &= ~BIT(xin_id);
-	SDE_REG_WRITE(c, VBIF_OUT_AXI_AINNERSHARED, 0);
-}
-
-static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif,
-		u32 xin_id, bool rd, u32 limit)
-{
-	struct sde_hw_blk_reg_map *c = &vbif->hw;
-	u32 reg_val;
-	u32 reg_off;
-	u32 bit_off;
-
-	if (rd)
-		reg_off = VBIF_IN_RD_LIM_CONF0;
-	else
-		reg_off = VBIF_IN_WR_LIM_CONF0;
-
-	reg_off += (xin_id / 4) * 4;
-	bit_off = (xin_id % 4) * 8;
-	reg_val = SDE_REG_READ(c, reg_off);
-	reg_val &= ~(0xFF << bit_off);
-	reg_val |= (limit) << bit_off;
-	SDE_REG_WRITE(c, reg_off, reg_val);
-}
-
-static u32 sde_hw_get_limit_conf(struct sde_hw_vbif *vbif,
-		u32 xin_id, bool rd)
-{
-	struct sde_hw_blk_reg_map *c = &vbif->hw;
-	u32 reg_val;
-	u32 reg_off;
-	u32 bit_off;
-	u32 limit;
-
-	if (rd)
-		reg_off = VBIF_IN_RD_LIM_CONF0;
-	else
-		reg_off = VBIF_IN_WR_LIM_CONF0;
-
-	reg_off += (xin_id / 4) * 4;
-	bit_off = (xin_id % 4) * 8;
-	reg_val = SDE_REG_READ(c, reg_off);
-	limit = (reg_val >> bit_off) & 0xFF;
-
-	return limit;
-}
-
-static void sde_hw_set_halt_ctrl(struct sde_hw_vbif *vbif,
-		u32 xin_id, bool enable)
-{
-	struct sde_hw_blk_reg_map *c = &vbif->hw;
-	u32 reg_val;
-
-	reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL0);
-
-	if (enable)
-		reg_val |= BIT(xin_id);
-	else
-		reg_val &= ~BIT(xin_id);
-
-	SDE_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
-}
-
-static bool sde_hw_get_halt_ctrl(struct sde_hw_vbif *vbif,
-		u32 xin_id)
-{
-	struct sde_hw_blk_reg_map *c = &vbif->hw;
-	u32 reg_val;
-
-	reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL1);
-
-	return (reg_val & BIT(xin_id)) ? true : false;
-}
-
-static void sde_hw_set_qos_remap(struct sde_hw_vbif *vbif,
-		u32 xin_id, u32 level, u32 remap_level)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
-
-	if (!vbif)
-		return;
-
-	c = &vbif->hw;
-
-	reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
-	reg_shift = (xin_id & 0x7) * 4;
-
-	reg_val = SDE_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
-	reg_val_lvl = SDE_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
-
-	mask = 0x7 << reg_shift;
-
-	reg_val &= ~mask;
-	reg_val |= (remap_level << reg_shift) & mask;
-
-	reg_val_lvl &= ~mask;
-	reg_val_lvl |= (remap_level << reg_shift) & mask;
-
-	SDE_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
-	SDE_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
-}
-
-static void sde_hw_set_write_gather_en(struct sde_hw_vbif *vbif, u32 xin_id)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 reg_val;
-
-	if (!vbif || xin_id >= MAX_XIN_COUNT)
-		return;
-
-	c = &vbif->hw;
-
-	reg_val = SDE_REG_READ(c, VBIF_WRITE_GATHER_EN);
-	reg_val |= BIT(xin_id);
-	SDE_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
-}
-
-static void _setup_vbif_ops(const struct sde_mdss_cfg *m,
-		struct sde_hw_vbif_ops *ops, unsigned long cap)
-{
-	ops->set_limit_conf = sde_hw_set_limit_conf;
-	ops->get_limit_conf = sde_hw_get_limit_conf;
-	ops->set_halt_ctrl = sde_hw_set_halt_ctrl;
-	ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
-	if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
-		ops->set_qos_remap = sde_hw_set_qos_remap;
-	if (IS_SM8150_TARGET(m->hwversion) || IS_SM6150_TARGET(m->hwversion) ||
-			IS_SDMMAGPIE_TARGET(m->hwversion))
-		ops->set_mem_type = sde_hw_set_mem_type_v1;
-	else
-		ops->set_mem_type = sde_hw_set_mem_type;
-	ops->clear_errors = sde_hw_clear_errors;
-	ops->set_write_gather_en = sde_hw_set_write_gather_en;
-}
-
-static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
-		const struct sde_mdss_cfg *m,
-		void __iomem *addr,
-		struct sde_hw_blk_reg_map *b)
-{
-	int i;
-
-	for (i = 0; i < m->vbif_count; i++) {
-		if (vbif == m->vbif[i].id) {
-			b->base_off = addr;
-			b->blk_off = m->vbif[i].base;
-			b->length = m->vbif[i].len;
-			b->hwversion = m->hwversion;
-			b->log_mask = SDE_DBG_MASK_VBIF;
-			return &m->vbif[i];
-		}
-	}
-
-	return ERR_PTR(-EINVAL);
-}
-
-struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx,
-		void __iomem *addr,
-		const struct sde_mdss_cfg *m)
-{
-	struct sde_hw_vbif *c;
-	const struct sde_vbif_cfg *cfg;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _top_offset(idx, m, addr, &c->hw);
-	if (IS_ERR_OR_NULL(cfg)) {
-		kfree(c);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/*
-	 * Assign ops
-	 */
-	c->idx = idx;
-	c->cap = cfg;
-	_setup_vbif_ops(m, &c->ops, c->cap->features);
-
-	/* no need to register sub-range in sde dbg, dump entire vbif io base */
-
-	mutex_init(&c->mutex);
-
-	return c;
-}
-
-void sde_hw_vbif_destroy(struct sde_hw_vbif *vbif)
-{
-	if (vbif)
-		mutex_destroy(&vbif->mutex);
-	kfree(vbif);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
deleted file mode 100644
index 78776df..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_VBIF_H
-#define _SDE_HW_VBIF_H
-
-#include "sde_hw_catalog.h"
-#include "sde_hw_mdss.h"
-#include "sde_hw_util.h"
-
-struct sde_hw_vbif;
-
-/**
- * struct sde_hw_vbif_ops : Interface to the VBIF hardware driver functions
- *  Assumption is these functions will be called after clocks are enabled
- */
-struct sde_hw_vbif_ops {
-	/**
-	 * set_limit_conf - set transaction limit config
-	 * @vbif: vbif context driver
-	 * @xin_id: client interface identifier
-	 * @rd: true for read limit; false for write limit
-	 * @limit: outstanding transaction limit
-	 */
-	void (*set_limit_conf)(struct sde_hw_vbif *vbif,
-			u32 xin_id, bool rd, u32 limit);
-
-	/**
-	 * get_limit_conf - get transaction limit config
-	 * @vbif: vbif context driver
-	 * @xin_id: client interface identifier
-	 * @rd: true for read limit; false for write limit
-	 * @return: outstanding transaction limit
-	 */
-	u32 (*get_limit_conf)(struct sde_hw_vbif *vbif,
-			u32 xin_id, bool rd);
-
-	/**
-	 * set_halt_ctrl - set halt control
-	 * @vbif: vbif context driver
-	 * @xin_id: client interface identifier
-	 * @enable: halt control enable
-	 */
-	void (*set_halt_ctrl)(struct sde_hw_vbif *vbif,
-			u32 xin_id, bool enable);
-
-	/**
-	 * get_halt_ctrl - get halt control
-	 * @vbif: vbif context driver
-	 * @xin_id: client interface identifier
-	 * @return: halt control enable
-	 */
-	bool (*get_halt_ctrl)(struct sde_hw_vbif *vbif,
-			u32 xin_id);
-
-	/**
-	 * set_qos_remap - set QoS priority remap
-	 * @vbif: vbif context driver
-	 * @xin_id: client interface identifier
-	 * @level: priority level
-	 * @remap_level: remapped level
-	 */
-	void (*set_qos_remap)(struct sde_hw_vbif *vbif,
-			u32 xin_id, u32 level, u32 remap_level);
-
-	/**
-	 * set_mem_type - set memory type
-	 * @vbif: vbif context driver
-	 * @xin_id: client interface identifier
-	 * @value: memory type value
-	 */
-	void (*set_mem_type)(struct sde_hw_vbif *vbif,
-			u32 xin_id, u32 value);
-
-	/**
-	 * clear_errors - clear any vbif errors
-	 *	This function clears any detected pending/source errors
-	 *	on the VBIF interface, and optionally returns the detected
-	 *	error mask(s).
-	 * @vbif: vbif context driver
-	 * @pnd_errors: pointer to pending error reporting variable
-	 * @src_errors: pointer to source error reporting variable
-	 */
-	void (*clear_errors)(struct sde_hw_vbif *vbif,
-		u32 *pnd_errors, u32 *src_errors);
-
-	/**
-	 * set_write_gather_en - set write_gather enable
-	 * @vbif: vbif context driver
-	 * @xin_id: client interface identifier
-	 */
-	void (*set_write_gather_en)(struct sde_hw_vbif *vbif, u32 xin_id);
-};
-
-struct sde_hw_vbif {
-	/* base */
-	struct sde_hw_blk_reg_map hw;
-
-	/* vbif */
-	enum sde_vbif idx;
-	const struct sde_vbif_cfg *cap;
-
-	/* ops */
-	struct sde_hw_vbif_ops ops;
-
-	/*
-	 * vbif is common across all displays, lock to serialize access.
-	 * must be take by client before using any ops
-	 */
-	struct mutex mutex;
-};
-
-/**
- * sde_hw_vbif_init - initializes the vbif driver for the passed interface idx
- * @idx:  Interface index for which driver object is required
- * @addr: Mapped register io address of MDSS
- * @m:    Pointer to mdss catalog data
- */
-struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx,
-		void __iomem *addr,
-		const struct sde_mdss_cfg *m);
-
-void sde_hw_vbif_destroy(struct sde_hw_vbif *vbif);
-
-#endif /*_SDE_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
deleted file mode 100644
index 1b1d06f..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.c
+++ /dev/null
@@ -1,379 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#include "sde_hw_mdss.h"
-#include "sde_hwio.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_wb.h"
-#include "sde_formats.h"
-#include "sde_dbg.h"
-#include "sde_kms.h"
-
-#define WB_DST_FORMAT			0x000
-#define WB_DST_OP_MODE			0x004
-#define WB_DST_PACK_PATTERN		0x008
-#define WB_DST0_ADDR			0x00C
-#define WB_DST1_ADDR			0x010
-#define WB_DST2_ADDR			0x014
-#define WB_DST3_ADDR			0x018
-#define WB_DST_YSTRIDE0			0x01C
-#define WB_DST_YSTRIDE1			0x020
-#define WB_DST_YSTRIDE1			0x020
-#define WB_DST_DITHER_BITDEPTH		0x024
-#define WB_DST_MATRIX_ROW0		0x030
-#define WB_DST_MATRIX_ROW1		0x034
-#define WB_DST_MATRIX_ROW2		0x038
-#define WB_DST_MATRIX_ROW3		0x03C
-#define WB_DST_WRITE_CONFIG		0x048
-#define WB_ROTATION_DNSCALER		0x050
-#define WB_ROTATOR_PIPE_DOWNSCALER	0x054
-#define WB_N16_INIT_PHASE_X_C03		0x060
-#define WB_N16_INIT_PHASE_X_C12		0x064
-#define WB_N16_INIT_PHASE_Y_C03		0x068
-#define WB_N16_INIT_PHASE_Y_C12		0x06C
-#define WB_OUT_SIZE			0x074
-#define WB_ALPHA_X_VALUE		0x078
-#define WB_DANGER_LUT			0x084
-#define WB_SAFE_LUT			0x088
-#define WB_QOS_CTRL			0x090
-#define WB_CREQ_LUT_0			0x098
-#define WB_CREQ_LUT_1			0x09C
-#define WB_UBWC_STATIC_CTRL		0x144
-#define WB_MUX				0x150
-#define WB_CSC_BASE			0x260
-#define WB_DST_ADDR_SW_STATUS		0x2B0
-#define WB_CDP_CNTL			0x2B4
-#define WB_OUT_IMAGE_SIZE		0x2C0
-#define WB_OUT_XY			0x2C4
-
-#define CWB_CTRL_SRC_SEL		0x0
-#define CWB_CTRL_MODE			0x4
-#define CWB_CTRL_BLK_SIZE		0x100
-#define CWB_CTRL_BASE_OFFSET		0x83000
-
-/* WB_QOS_CTRL */
-#define WB_QOS_CTRL_DANGER_SAFE_EN	BIT(0)
-
-static struct sde_wb_cfg *_wb_offset(enum sde_wb wb,
-		struct sde_mdss_cfg *m,
-		void __iomem *addr,
-		struct sde_hw_blk_reg_map *b)
-{
-	int i;
-
-	for (i = 0; i < m->wb_count; i++) {
-		if (wb == m->wb[i].id) {
-			b->base_off = addr;
-			b->blk_off = m->wb[i].base;
-			b->length = m->wb[i].len;
-			b->hwversion = m->hwversion;
-			b->log_mask = SDE_DBG_MASK_WB;
-			return &m->wb[i];
-		}
-	}
-	return ERR_PTR(-EINVAL);
-}
-
-static void _sde_hw_cwb_ctrl_init(struct sde_mdss_cfg *m,
-		void __iomem *addr, struct sde_hw_blk_reg_map *b)
-{
-	if (b) {
-		b->base_off = addr;
-		b->blk_off = CWB_CTRL_BASE_OFFSET;
-		b->length = CWB_CTRL_BLK_SIZE * m->pingpong_count;
-		b->hwversion = m->hwversion;
-		b->log_mask = SDE_DBG_MASK_WB;
-	}
-}
-
-static void sde_hw_wb_setup_outaddress(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_cfg *data)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-
-	SDE_REG_WRITE(c, WB_DST0_ADDR, data->dest.plane_addr[0]);
-	SDE_REG_WRITE(c, WB_DST1_ADDR, data->dest.plane_addr[1]);
-	SDE_REG_WRITE(c, WB_DST2_ADDR, data->dest.plane_addr[2]);
-	SDE_REG_WRITE(c, WB_DST3_ADDR, data->dest.plane_addr[3]);
-}
-
-static void sde_hw_wb_setup_format(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_cfg *data)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	const struct sde_format *fmt = data->dest.format;
-	u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
-	u32 write_config = 0;
-	u32 opmode = 0;
-	u32 dst_addr_sw = 0;
-
-	chroma_samp = fmt->chroma_sample;
-
-	dst_format = (chroma_samp << 23) |
-			(fmt->fetch_planes << 19) |
-			(fmt->bits[C3_ALPHA] << 6) |
-			(fmt->bits[C2_R_Cr] << 4) |
-			(fmt->bits[C1_B_Cb] << 2) |
-			(fmt->bits[C0_G_Y] << 0);
-
-	if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
-		dst_format |= BIT(8); /* DSTC3_EN */
-		if (!fmt->alpha_enable ||
-				!(ctx->caps->features & BIT(SDE_WB_PIPE_ALPHA)))
-			dst_format |= BIT(14); /* DST_ALPHA_X */
-	}
-
-	if (SDE_FORMAT_IS_YUV(fmt) &&
-			(ctx->caps->features & BIT(SDE_WB_YUV_CONFIG)))
-		dst_format |= BIT(15);
-
-	if (SDE_FORMAT_IS_DX(fmt))
-		dst_format |= BIT(21);
-
-	pattern = (fmt->element[3] << 24) |
-			(fmt->element[2] << 16) |
-			(fmt->element[1] << 8)  |
-			(fmt->element[0] << 0);
-
-	dst_format |= (fmt->unpack_align_msb << 18) |
-			(fmt->unpack_tight << 17) |
-			((fmt->unpack_count - 1) << 12) |
-			((fmt->bpp - 1) << 9);
-
-	ystride0 = data->dest.plane_pitch[0] |
-			(data->dest.plane_pitch[1] << 16);
-	ystride1 = data->dest.plane_pitch[2] |
-			(data->dest.plane_pitch[3] << 16);
-
-	if (data->roi.h && data->roi.w)
-		outsize = (data->roi.h << 16) | data->roi.w;
-	else
-		outsize = (data->dest.height << 16) | data->dest.width;
-
-	if (SDE_FORMAT_IS_UBWC(fmt)) {
-		opmode |= BIT(0);
-		dst_format |= BIT(31);
-		write_config |= (ctx->mdp->highest_bank_bit << 8);
-		if (fmt->base.pixel_format == DRM_FORMAT_RGB565)
-			write_config |= 0x8;
-		if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version))
-			SDE_REG_WRITE(c, WB_UBWC_STATIC_CTRL,
-					(ctx->mdp->ubwc_swizzle << 0) |
-					(ctx->mdp->highest_bank_bit << 4));
-	}
-
-	if (data->is_secure)
-		dst_addr_sw |= BIT(0);
-
-	SDE_REG_WRITE(c, WB_ALPHA_X_VALUE, 0xFF);
-	SDE_REG_WRITE(c, WB_DST_FORMAT, dst_format);
-	SDE_REG_WRITE(c, WB_DST_OP_MODE, opmode);
-	SDE_REG_WRITE(c, WB_DST_PACK_PATTERN, pattern);
-	SDE_REG_WRITE(c, WB_DST_YSTRIDE0, ystride0);
-	SDE_REG_WRITE(c, WB_DST_YSTRIDE1, ystride1);
-	SDE_REG_WRITE(c, WB_OUT_SIZE, outsize);
-	SDE_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
-	SDE_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
-}
-
-static void sde_hw_wb_roi(struct sde_hw_wb *ctx, struct sde_hw_wb_cfg *wb)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	u32 image_size, out_size, out_xy;
-
-	image_size = (wb->dest.height << 16) | wb->dest.width;
-	out_xy = (wb->roi.y << 16) | wb->roi.x;
-	out_size = (wb->roi.h << 16) | wb->roi.w;
-
-	SDE_REG_WRITE(c, WB_OUT_IMAGE_SIZE, image_size);
-	SDE_REG_WRITE(c, WB_OUT_XY, out_xy);
-	SDE_REG_WRITE(c, WB_OUT_SIZE, out_size);
-}
-
-static void sde_hw_wb_setup_danger_safe_lut(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_qos_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-
-	if (!ctx || !cfg)
-		return;
-
-	SDE_REG_WRITE(c, WB_DANGER_LUT, cfg->danger_lut);
-	SDE_REG_WRITE(c, WB_SAFE_LUT, cfg->safe_lut);
-}
-
-static void sde_hw_wb_setup_creq_lut(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_qos_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-
-	if (!ctx || !cfg)
-		return;
-
-	if (ctx->caps && test_bit(SDE_WB_QOS_8LVL, &ctx->caps->features)) {
-		SDE_REG_WRITE(c, WB_CREQ_LUT_0, cfg->creq_lut);
-		SDE_REG_WRITE(c, WB_CREQ_LUT_1, cfg->creq_lut >> 32);
-	}
-}
-
-static void sde_hw_wb_setup_qos_ctrl(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_qos_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	u32 qos_ctrl = 0;
-
-	if (!ctx || !cfg)
-		return;
-
-	if (cfg->danger_safe_en)
-		qos_ctrl |= WB_QOS_CTRL_DANGER_SAFE_EN;
-
-	SDE_REG_WRITE(c, WB_QOS_CTRL, qos_ctrl);
-}
-
-static void sde_hw_wb_setup_cdp(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_cdp_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 cdp_cntl = 0;
-
-	if (!ctx || !cfg)
-		return;
-
-	c = &ctx->hw;
-
-	if (cfg->enable)
-		cdp_cntl |= BIT(0);
-	if (cfg->ubwc_meta_enable)
-		cdp_cntl |= BIT(1);
-	if (cfg->preload_ahead == SDE_WB_CDP_PRELOAD_AHEAD_64)
-		cdp_cntl |= BIT(3);
-
-	SDE_REG_WRITE(c, WB_CDP_CNTL, cdp_cntl);
-}
-
-static void sde_hw_wb_bind_pingpong_blk(
-		struct sde_hw_wb *ctx,
-		bool enable,
-		const enum sde_pingpong pp)
-{
-	struct sde_hw_blk_reg_map *c;
-	int mux_cfg = 0xF;
-
-	if (!ctx)
-		return;
-
-	c = &ctx->hw;
-	if (enable)
-		mux_cfg = (pp - PINGPONG_0) & 0x7;
-
-	SDE_REG_WRITE(c, WB_MUX, mux_cfg);
-}
-
-static void sde_hw_wb_program_cwb_ctrl(struct sde_hw_wb *ctx,
-		const enum sde_cwb cur_idx,
-		const enum sde_cwb data_src, bool dspp_out)
-{
-	struct sde_hw_blk_reg_map *c;
-	u32 blk_base;
-
-	if (!ctx)
-		return;
-
-	c = &ctx->cwb_hw;
-	blk_base = CWB_CTRL_BLK_SIZE * (cur_idx - CWB_0);
-
-	SDE_REG_WRITE(c, blk_base + CWB_CTRL_SRC_SEL, data_src - CWB_0);
-	SDE_REG_WRITE(c, blk_base + CWB_CTRL_MODE, dspp_out);
-}
-
-static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
-	unsigned long features)
-{
-	ops->setup_outaddress = sde_hw_wb_setup_outaddress;
-	ops->setup_outformat = sde_hw_wb_setup_format;
-
-	if (test_bit(SDE_WB_XY_ROI_OFFSET, &features))
-		ops->setup_roi = sde_hw_wb_roi;
-
-	if (test_bit(SDE_WB_QOS, &features)) {
-		ops->setup_danger_safe_lut =
-			sde_hw_wb_setup_danger_safe_lut;
-		ops->setup_creq_lut = sde_hw_wb_setup_creq_lut;
-		ops->setup_qos_ctrl = sde_hw_wb_setup_qos_ctrl;
-	}
-
-	if (test_bit(SDE_WB_CDP, &features))
-		ops->setup_cdp = sde_hw_wb_setup_cdp;
-
-	if (test_bit(SDE_WB_INPUT_CTRL, &features))
-		ops->bind_pingpong_blk = sde_hw_wb_bind_pingpong_blk;
-
-	if (test_bit(SDE_WB_CWB_CTRL, &features))
-		ops->program_cwb_ctrl = sde_hw_wb_program_cwb_ctrl;
-}
-
-static struct sde_hw_blk_ops sde_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
-
-struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m,
-		struct sde_hw_mdp *hw_mdp)
-{
-	struct sde_hw_wb *c;
-	struct sde_wb_cfg *cfg;
-	int rc;
-
-	if (!addr || !m || !hw_mdp)
-		return ERR_PTR(-EINVAL);
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _wb_offset(idx, m, addr, &c->hw);
-	if (IS_ERR(cfg)) {
-		WARN(1, "Unable to find wb idx=%d\n", idx);
-		kfree(c);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/* Assign ops */
-	c->catalog = m;
-	c->mdp = &m->mdp[0];
-	c->idx = idx;
-	c->caps = cfg;
-	_setup_wb_ops(&c->ops, c->caps->features);
-	c->hw_mdp = hw_mdp;
-
-	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_WB, idx, &sde_hw_ops);
-	if (rc) {
-		SDE_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
-
-	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
-			c->hw.blk_off + c->hw.length, c->hw.xin_id);
-
-	if (test_bit(SDE_WB_CWB_CTRL, &cfg->features))
-		_sde_hw_cwb_ctrl_init(m, addr, &c->cwb_hw);
-
-	return c;
-
-blk_init_error:
-	kzfree(c);
-
-	return ERR_PTR(rc);
-}
-
-void sde_hw_wb_destroy(struct sde_hw_wb *hw_wb)
-{
-	if (hw_wb)
-		sde_hw_blk_destroy(&hw_wb->base);
-	kfree(hw_wb);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.h b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
deleted file mode 100644
index bbf647b7..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HW_WB_H
-#define _SDE_HW_WB_H
-
-#include "sde_hw_catalog.h"
-#include "sde_hw_mdss.h"
-#include "sde_hw_top.h"
-#include "sde_hw_util.h"
-
-struct sde_hw_wb;
-
-struct sde_hw_wb_cfg {
-	struct sde_hw_fmt_layout dest;
-	enum sde_intf_mode intf_mode;
-	struct traffic_shaper_cfg ts_cfg;
-	struct sde_rect roi;
-	bool is_secure;
-};
-
-/**
- * enum CDP preload ahead address size
- */
-enum {
-	SDE_WB_CDP_PRELOAD_AHEAD_32,
-	SDE_WB_CDP_PRELOAD_AHEAD_64
-};
-
-/**
- * struct sde_hw_wb_cdp_cfg : CDP configuration
- * @enable: true to enable CDP
- * @ubwc_meta_enable: true to enable ubwc metadata preload
- * @tile_amortize_enable: true to enable amortization control for tile format
- * @preload_ahead: number of request to preload ahead
- *	SDE_WB_CDP_PRELOAD_AHEAD_32,
- *	SDE_WB_CDP_PRELOAD_AHEAD_64
- */
-struct sde_hw_wb_cdp_cfg {
-	bool enable;
-	bool ubwc_meta_enable;
-	bool tile_amortize_enable;
-	u32 preload_ahead;
-};
-
-/**
- * struct sde_hw_wb_qos_cfg : Writeback pipe QoS configuration
- * @danger_lut: LUT for generate danger level based on fill level
- * @safe_lut: LUT for generate safe level based on fill level
- * @creq_lut: LUT for generate creq level based on fill level
- * @danger_safe_en: enable danger safe generation
- */
-struct sde_hw_wb_qos_cfg {
-	u32 danger_lut;
-	u32 safe_lut;
-	u64 creq_lut;
-	bool danger_safe_en;
-};
-
-/**
- *
- * struct sde_hw_wb_ops : Interface to the wb Hw driver functions
- *  Assumption is these functions will be called after clocks are enabled
- */
-struct sde_hw_wb_ops {
-	void (*setup_csc_data)(struct sde_hw_wb *ctx,
-			struct sde_csc_cfg *data);
-
-	void (*setup_outaddress)(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_cfg *wb);
-
-	void (*setup_outformat)(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_cfg *wb);
-
-	void (*setup_rotator)(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_cfg *wb);
-
-	void (*setup_dither)(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_cfg *wb);
-
-	void (*setup_cdwn)(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_cfg *wb);
-
-	void (*setup_trafficshaper)(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_cfg *wb);
-
-	void (*setup_roi)(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_cfg *wb);
-
-	/**
-	 * setup_danger_safe_lut - setup danger safe LUTs
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to pipe QoS configuration
-	 */
-	void (*setup_danger_safe_lut)(struct sde_hw_wb *ctx,
-			struct sde_hw_wb_qos_cfg *cfg);
-
-	/**
-	 * setup_creq_lut - setup CREQ LUT
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to pipe QoS configuration
-	 */
-	void (*setup_creq_lut)(struct sde_hw_wb *ctx,
-			struct sde_hw_wb_qos_cfg *cfg);
-
-	/**
-	 * setup_qos_ctrl - setup QoS control
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to pipe QoS configuration
-	 */
-	void (*setup_qos_ctrl)(struct sde_hw_wb *ctx,
-			struct sde_hw_wb_qos_cfg *cfg);
-
-	/**
-	 * setup_cdp - setup CDP
-	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to pipe CDP configuration
-	 */
-	void (*setup_cdp)(struct sde_hw_wb *ctx,
-			struct sde_hw_wb_cdp_cfg *cfg);
-
-	/**
-	 * bind_pingpong_blk - enable/disable the connection with pp
-	 * @ctx: Pointer to wb context
-	 * @enable: enable/disable connection
-	 * @pp: pingpong blk id
-	 */
-	void (*bind_pingpong_blk)(struct sde_hw_wb *ctx,
-			bool enable,
-			const enum sde_pingpong pp);
-
-	/**
-	 * program_cwb_ctrl - program cwb block configp
-	 * @ctx: Pointer to wb context
-	 * @pp_idx: Current CWB block index to poram
-	 * @data_src: Source CWB/PingPong block index
-	 * @dspp_out: Tap dspp output or default LM output
-	 */
-	void (*program_cwb_ctrl)(struct sde_hw_wb *ctx, const enum sde_cwb cwb,
-			const enum sde_cwb data_src, bool dspp_out);
-};
-
-/**
- * struct sde_hw_wb : WB driver object
- * @base: hardware block base structure
- * @hw: block hardware details
- * @catalog: back pointer to catalog
- * @mdp: pointer to associated mdp portion of the catalog
- * @idx: hardware index number within type
- * @wb_hw_caps: hardware capabilities
- * @ops: function pointers
- * @hw_mdp: MDP top level hardware block
- * @cwb_hw: CWB control hwio details
- */
-struct sde_hw_wb {
-	struct sde_hw_blk base;
-	struct sde_hw_blk_reg_map hw;
-	struct sde_mdss_cfg *catalog;
-	struct sde_mdp_cfg *mdp;
-
-	/* wb path */
-	int idx;
-	const struct sde_wb_cfg *caps;
-
-	/* ops */
-	struct sde_hw_wb_ops ops;
-
-	struct sde_hw_mdp *hw_mdp;
-	struct sde_hw_blk_reg_map cwb_hw;
-};
-
-/**
- * sde_hw_wb - convert base object sde_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct sde_hw_wb *to_sde_hw_wb(struct sde_hw_blk *hw)
-{
-	return container_of(hw, struct sde_hw_wb, base);
-}
-
-/**
- * sde_hw_wb_init(): Initializes and return writeback hw driver object.
- * @idx:  wb_path index for which driver object is required
- * @addr: mapped register io address of MDP
- * @m :   pointer to mdss catalog data
- * @hw_mdp: pointer to mdp top hw driver object
- */
-struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m,
-		struct sde_hw_mdp *hw_mdp);
-
-/**
- * sde_hw_wb_destroy(): Destroy writeback hw driver object.
- * @hw_wb:  Pointer to writeback hw driver object
- */
-void sde_hw_wb_destroy(struct sde_hw_wb *hw_wb);
-
-#endif /*_SDE_HW_WB_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hwio.h b/drivers/gpu/drm/msm/sde/sde_hwio.h
deleted file mode 100644
index c3dbde0..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hwio.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_HWIO_H
-#define _SDE_HWIO_H
-
-#include "sde_hw_util.h"
-
-/**
- * MDP TOP block Register and bit fields and defines
- */
-#define DISP_INTF_SEL                   0x004
-#define INTR_EN                         0x010
-#define INTR_STATUS                     0x014
-#define INTR_CLEAR                      0x018
-#define INTR2_EN                        0x008
-#define INTR2_STATUS                    0x00c
-#define INTR2_CLEAR                     0x02c
-#define HIST_INTR_EN                    0x01c
-#define HIST_INTR_STATUS                0x020
-#define HIST_INTR_CLEAR                 0x024
-#define INTF_INTR_EN                    0x1C0
-#define INTF_INTR_STATUS                0x1C4
-#define INTF_INTR_CLEAR                 0x1C8
-#define SPLIT_DISPLAY_EN                0x2F4
-#define SPLIT_DISPLAY_UPPER_PIPE_CTRL   0x2F8
-#define DSPP_IGC_COLOR0_RAM_LUTN        0x300
-#define DSPP_IGC_COLOR1_RAM_LUTN        0x304
-#define DSPP_IGC_COLOR2_RAM_LUTN        0x308
-#define PPB0_CNTL                       0x330
-#define PPB0_CONFIG                     0x334
-#define PPB1_CNTL                       0x338
-#define PPB1_CONFIG                     0x33C
-#define PPB2_CNTL                       0x370
-#define PPB3_CNTL                       0x374
-#define HW_EVENTS_CTL                   0x37C
-#define CLK_CTRL3                       0x3A8
-#define CLK_STATUS3                     0x3AC
-#define CLK_CTRL4                       0x3B0
-#define CLK_STATUS4                     0x3B4
-#define CLK_CTRL5                       0x3B8
-#define CLK_STATUS5                     0x3BC
-#define CLK_CTRL7                       0x3D0
-#define CLK_STATUS7                     0x3D4
-#define SPLIT_DISPLAY_LOWER_PIPE_CTRL   0x3F0
-#define SPLIT_DISPLAY_TE_LINE_INTERVAL  0x3F4
-#define INTF_SW_RESET_MASK              0x3FC
-#define HDMI_DP_CORE_SELECT             0x408
-#define MDP_OUT_CTL_0                   0x410
-#define MDP_VSYNC_SEL                   0x414
-#define DCE_SEL                         0x450
-
-#define DP_DHDR_MEM_POOL_0_DATA         0x46c
-#define DP_DHDR_MEM_POOL_1_DATA         0x470
-#define DP_DHDR_MEM_POOL_0_NUM_BYTES    0x47c
-#define DP_DHDR_MEM_POOL_1_NUM_BYTES    0x480
-
-/* SDE_SCALER_QSEED3 */
-#define QSEED3_COEF_LUT_OFF              0x100
-#define QSEED3_FILTERS                     5
-#define QSEED3_LUT_REGIONS                 4
-#define QSEED3_CIRCULAR_LUTS               9
-#define QSEED3_SEPARABLE_LUTS              10
-#define QSEED3_LUT_SIZE                    60
-#define QSEED3_DIR_LUT_SIZE                (200 * sizeof(u32))
-#define QSEED3_COEF_LUT_DIR_BIT            1
-#define QSEED3_COEF_LUT_Y_CIR_BIT          2
-#define QSEED3_COEF_LUT_UV_CIR_BIT         3
-#define QSEED3_COEF_LUT_Y_SEP_BIT          4
-#define QSEED3_COEF_LUT_UV_SEP_BIT         5
-#define QSEED3_CIR_LUT_SIZE \
-	(QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
-#define QSEED3_SEP_LUT_SIZE \
-	(QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
-
-/* SDE_SCALER_QSEED3LITE */
-#define QSEED3L_COEF_LUT_OFF                   0x100
-#define QSEED3LITE_FILTERS                 2
-#define QSEED3L_SEPARABLE_LUTS             10
-#define QSEED3L_LUT_SIZE                   33
-#define QSEED3L_SEP_LUT_SIZE \
-	(QSEED3L_LUT_SIZE * QSEED3L_SEPARABLE_LUTS * sizeof(u32))
-
-#endif /*_SDE_HWIO_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
deleted file mode 100644
index e857415..0000000
--- a/drivers/gpu/drm/msm/sde/sde_irq.c
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-#include <linux/kthread.h>
-
-#include "sde_irq.h"
-#include "sde_core_irq.h"
-
-uint32_t g_sde_irq_status;
-
-void sde_irq_update(struct msm_kms *msm_kms, bool enable)
-{
-	struct sde_kms *sde_kms = to_sde_kms(msm_kms);
-
-	if (!msm_kms || !sde_kms) {
-		SDE_ERROR("invalid kms arguments\n");
-		return;
-	}
-
-	sde_kms->irq_enabled = enable;
-
-	if (enable)
-		enable_irq(sde_kms->irq_num);
-	else
-		disable_irq(sde_kms->irq_num);
-}
-
-irqreturn_t sde_irq(struct msm_kms *kms)
-{
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-	u32 interrupts;
-
-	sde_kms->hw_intr->ops.get_interrupt_sources(sde_kms->hw_intr,
-			&interrupts);
-
-	/* store irq status in case of irq-storm debugging */
-	g_sde_irq_status = interrupts;
-
-	/*
-	 * Taking care of MDP interrupt
-	 */
-	if (interrupts & IRQ_SOURCE_MDP) {
-		interrupts &= ~IRQ_SOURCE_MDP;
-		sde_core_irq(sde_kms);
-	}
-
-	/*
-	 * Routing all other interrupts to external drivers
-	 */
-	while (interrupts) {
-		irq_hw_number_t hwirq = fls(interrupts) - 1;
-		unsigned int mapping;
-		int rc;
-
-		mapping = irq_find_mapping(sde_kms->irq_controller.domain,
-				hwirq);
-		if (mapping == 0) {
-			SDE_EVT32(hwirq, SDE_EVTLOG_ERROR);
-			goto error;
-		}
-
-		rc = generic_handle_irq(mapping);
-		if (rc < 0) {
-			SDE_EVT32(hwirq, mapping, rc, SDE_EVTLOG_ERROR);
-			goto error;
-		}
-
-		interrupts &= ~(1 << hwirq);
-	}
-
-	return IRQ_HANDLED;
-
-error:
-	/* bad situation, inform irq system, it may disable overall MDSS irq */
-	return IRQ_NONE;
-}
-
-void sde_irq_preinstall(struct msm_kms *kms)
-{
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-
-	if (!sde_kms->dev || !sde_kms->dev->dev) {
-		pr_err("invalid device handles\n");
-		return;
-	}
-
-	sde_core_irq_preinstall(sde_kms);
-
-	sde_kms->irq_num = platform_get_irq(
-				to_platform_device(sde_kms->dev->dev),
-				0);
-	if (sde_kms->irq_num < 0) {
-		SDE_ERROR("invalid irq number %d\n", sde_kms->irq_num);
-		return;
-	}
-
-	/* disable irq until power event enables it */
-	if (!sde_kms->splash_data.num_splash_displays && !sde_kms->irq_enabled)
-		irq_set_status_flags(sde_kms->irq_num, IRQ_NOAUTOEN);
-}
-
-int sde_irq_postinstall(struct msm_kms *kms)
-{
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-	int rc;
-
-	if (!kms) {
-		SDE_ERROR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	rc = sde_core_irq_postinstall(sde_kms);
-
-	return rc;
-}
-
-void sde_irq_uninstall(struct msm_kms *kms)
-{
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-
-	if (!kms) {
-		SDE_ERROR("invalid parameters\n");
-		return;
-	}
-
-	sde_core_irq_uninstall(sde_kms);
-	sde_core_irq_domain_fini(sde_kms);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.h b/drivers/gpu/drm/msm/sde/sde_irq.h
deleted file mode 100644
index 0417c2c..0000000
--- a/drivers/gpu/drm/msm/sde/sde_irq.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_IRQ_H__
-#define __SDE_IRQ_H__
-
-#include <linux/kernel.h>
-#include <linux/irqdomain.h>
-
-#include "msm_kms.h"
-
-/**
- * sde_irq_controller - define MDSS level interrupt controller context
- * @enabled_mask:	enable status of MDSS level interrupt
- * @domain:		interrupt domain of this controller
- */
-struct sde_irq_controller {
-	unsigned long enabled_mask;
-	struct irq_domain *domain;
-};
-
-/**
- * sde_irq_preinstall - perform pre-installation of MDSS IRQ handler
- * @kms:		pointer to kms context
- * @return:		none
- */
-void sde_irq_preinstall(struct msm_kms *kms);
-
-/**
- * sde_irq_postinstall - perform post-installation of MDSS IRQ handler
- * @kms:		pointer to kms context
- * @return:		0 if success; error code otherwise
- */
-int sde_irq_postinstall(struct msm_kms *kms);
-
-/**
- * sde_irq_uninstall - uninstall MDSS IRQ handler
- * @drm_dev:		pointer to kms context
- * @return:		none
- */
-void sde_irq_uninstall(struct msm_kms *kms);
-
-/**
- * sde_irq - MDSS level IRQ handler
- * @kms:		pointer to kms context
- * @return:		interrupt handling status
- */
-irqreturn_t sde_irq(struct msm_kms *kms);
-
-/**
- * sde_irq_update - enable/disable IRQ line
- * @kms:		pointer to kms context
- * @enable:		enable:true, disable:false
- */
-void sde_irq_update(struct msm_kms *kms, bool enable);
-
-#endif /* __SDE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
deleted file mode 100644
index 065f090..0000000
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ /dev/null
@@ -1,3470 +0,0 @@
-/*
- * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <drm/drm_crtc.h>
-#include <linux/debugfs.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/dma-buf.h>
-#include <linux/memblock.h>
-#include <linux/bootmem.h>
-
-#include "msm_drv.h"
-#include "msm_mmu.h"
-#include "msm_gem.h"
-
-#include "dsi_display.h"
-#include "dsi_drm.h"
-#include "sde_wb.h"
-#include "dp_display.h"
-#include "dp_drm.h"
-
-#include "sde_kms.h"
-#include "sde_core_irq.h"
-#include "sde_formats.h"
-#include "sde_hw_vbif.h"
-#include "sde_vbif.h"
-#include "sde_encoder.h"
-#include "sde_plane.h"
-#include "sde_crtc.h"
-#include "sde_reg_dma.h"
-
-#include <soc/qcom/scm.h>
-#include "soc/qcom/secure_buffer.h"
-
-#define CREATE_TRACE_POINTS
-#include "sde_trace.h"
-
-/* defines for secure channel call */
-#define MEM_PROTECT_SD_CTRL_SWITCH 0x18
-#define MDP_DEVICE_ID            0x1A
-
-static const char * const iommu_ports[] = {
-		"mdp_0",
-};
-
-/**
- * Controls size of event log buffer. Specified as a power of 2.
- */
-#define SDE_EVTLOG_SIZE	1024
-
-/*
- * To enable overall DRM driver logging
- * # echo 0x2 > /sys/module/drm/parameters/debug
- *
- * To enable DRM driver h/w logging
- * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
- *
- * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
- */
-#define SDE_DEBUGFS_DIR "msm_sde"
-#define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
-
-#define SDE_KMS_MODESET_LOCK_TIMEOUT_US 500
-#define SDE_KMS_MODESET_LOCK_MAX_TRIALS 20
-
-/**
- * sdecustom - enable certain driver customizations for sde clients
- *	Enabling this modifies the standard DRM behavior slightly and assumes
- *	that the clients have specific knowledge about the modifications that
- *	are involved, so don't enable this unless you know what you're doing.
- *
- *	Parts of the driver that are affected by this setting may be located by
- *	searching for invocations of the 'sde_is_custom_client()' function.
- *
- *	This is disabled by default.
- */
-static bool sdecustom = true;
-module_param(sdecustom, bool, 0400);
-MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
-
-static int sde_kms_hw_init(struct msm_kms *kms);
-static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
-static int _sde_kms_mmu_init(struct sde_kms *sde_kms);
-static int _sde_kms_register_events(struct msm_kms *kms,
-		struct drm_mode_object *obj, u32 event, bool en);
-bool sde_is_custom_client(void)
-{
-	return sdecustom;
-}
-
-#ifdef CONFIG_DEBUG_FS
-void *sde_debugfs_get_root(struct sde_kms *sde_kms)
-{
-	struct msm_drm_private *priv;
-
-	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
-		return NULL;
-
-	priv = sde_kms->dev->dev_private;
-	return priv->debug_root;
-}
-
-static int _sde_debugfs_init(struct sde_kms *sde_kms)
-{
-	void *p;
-	int rc;
-	void *debugfs_root;
-
-	p = sde_hw_util_get_log_mask_ptr();
-
-	if (!sde_kms || !p)
-		return -EINVAL;
-
-	debugfs_root = sde_debugfs_get_root(sde_kms);
-	if (!debugfs_root)
-		return -EINVAL;
-
-	/* allow debugfs_root to be NULL */
-	debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0600, debugfs_root, p);
-
-	(void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
-	(void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
-
-	rc = sde_core_perf_debugfs_init(&sde_kms->perf, debugfs_root);
-	if (rc) {
-		SDE_ERROR("failed to init perf %d\n", rc);
-		return rc;
-	}
-
-	return 0;
-}
-
-static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
-{
-	/* don't need to NULL check debugfs_root */
-	if (sde_kms) {
-		sde_debugfs_vbif_destroy(sde_kms);
-		sde_debugfs_core_irq_destroy(sde_kms);
-	}
-}
-#else
-static int _sde_debugfs_init(struct sde_kms *sde_kms)
-{
-	return 0;
-}
-
-static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
-{
-}
-#endif
-
-static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
-{
-	int ret = 0;
-
-	SDE_ATRACE_BEGIN("sde_kms_enable_vblank");
-	ret = sde_crtc_vblank(crtc, true);
-	SDE_ATRACE_END("sde_kms_enable_vblank");
-
-	return ret;
-}
-
-static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
-{
-	SDE_ATRACE_BEGIN("sde_kms_disable_vblank");
-	sde_crtc_vblank(crtc, false);
-	SDE_ATRACE_END("sde_kms_disable_vblank");
-}
-
-static void sde_kms_wait_for_frame_transfer_complete(struct msm_kms *kms,
-		struct drm_crtc *crtc)
-{
-	struct drm_encoder *encoder;
-	struct drm_device *dev;
-	int ret;
-
-	if (!kms || !crtc || !crtc->state || !crtc->dev) {
-		SDE_ERROR("invalid params\n");
-		return;
-	}
-
-	if (!crtc->state->enable) {
-		SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
-		return;
-	}
-
-	if (!crtc->state->active) {
-		SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
-		return;
-	}
-
-	dev = crtc->dev;
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		if (encoder->crtc != crtc)
-			continue;
-		/*
-		 * Video Mode - Wait for VSYNC
-		 * Cmd Mode   - Wait for PP_DONE. Will be no-op if transfer is
-		 *              complete
-		 */
-		SDE_EVT32_VERBOSE(DRMID(crtc));
-		ret = sde_encoder_wait_for_event(encoder, MSM_ENC_TX_COMPLETE);
-		if (ret && ret != -EWOULDBLOCK) {
-			SDE_ERROR(
-			"[crtc: %d][enc: %d] wait for commit done returned %d\n",
-			crtc->base.id, encoder->base.id, ret);
-			break;
-		}
-	}
-}
-
-static int _sde_kms_secure_ctrl_xin_clients(struct sde_kms *sde_kms,
-			struct drm_crtc *crtc, bool enable)
-{
-	struct drm_device *dev;
-	struct msm_drm_private *priv;
-	struct sde_mdss_cfg *sde_cfg;
-	struct drm_plane *plane;
-	int i, ret;
-
-	dev = sde_kms->dev;
-	priv = dev->dev_private;
-	sde_cfg = sde_kms->catalog;
-
-	ret = sde_vbif_halt_xin_mask(sde_kms,
-			sde_cfg->sui_block_xin_mask, enable);
-	if (ret) {
-		SDE_ERROR("failed to halt some xin-clients, ret:%d\n", ret);
-		return ret;
-	}
-
-	if (enable) {
-		for (i = 0; i < priv->num_planes; i++) {
-			plane = priv->planes[i];
-			sde_plane_secure_ctrl_xin_client(plane, crtc);
-		}
-	}
-
-	return 0;
-}
-
-/**
- * _sde_kms_scm_call - makes secure channel call to switch the VMIDs
- * @sde_kms: Pointer to sde_kms struct
- * @vimd: switch the stage 2 translation to this VMID
- */
-static int _sde_kms_scm_call(struct sde_kms *sde_kms, int vmid)
-{
-	struct scm_desc desc = {0};
-	uint32_t num_sids;
-	uint32_t *sec_sid;
-	uint32_t mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_SWITCH;
-	struct sde_mdss_cfg *sde_cfg = sde_kms->catalog;
-	int ret = 0, i;
-
-	num_sids = sde_cfg->sec_sid_mask_count;
-	if (!num_sids) {
-		SDE_ERROR("secure SID masks not configured, vmid 0x%x\n", vmid);
-		return -EINVAL;
-	}
-
-	sec_sid = kcalloc(num_sids, sizeof(uint32_t), GFP_KERNEL);
-	if (!sec_sid)
-		return -ENOMEM;
-
-	for (i = 0; i < num_sids; i++) {
-		sec_sid[i] = sde_cfg->sec_sid_mask[i];
-		SDE_DEBUG("sid_mask[%d]: %d\n", i, sec_sid[i]);
-	}
-	dmac_flush_range(sec_sid, sec_sid + num_sids);
-
-	SDE_DEBUG("calling scm_call for vmid 0x%x, num_sids %d",
-				vmid, num_sids);
-
-	desc.arginfo = SCM_ARGS(4, SCM_VAL, SCM_RW, SCM_VAL, SCM_VAL);
-	desc.args[0] = MDP_DEVICE_ID;
-	desc.args[1] = SCM_BUFFER_PHYS(sec_sid);
-	desc.args[2] = sizeof(uint32_t) * num_sids;
-	desc.args[3] =  vmid;
-
-	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
-				mem_protect_sd_ctrl_id), &desc);
-	if (ret)
-		SDE_ERROR("Error:scm_call2, vmid %lld, ret%d\n",
-				desc.args[3], ret);
-	SDE_EVT32(mem_protect_sd_ctrl_id,
-			desc.args[0], desc.args[3], num_sids, ret);
-
-	kfree(sec_sid);
-	return ret;
-}
-
-static int _sde_kms_detach_all_cb(struct sde_kms *sde_kms, u32 vmid)
-{
-	u32 ret = 0;
-
-	if (atomic_inc_return(&sde_kms->detach_all_cb) > 1)
-		goto end;
-
-	/* detach_all_contexts */
-	ret = sde_kms_mmu_detach(sde_kms, false);
-	if (ret) {
-		SDE_ERROR("failed to detach all cb ret:%d\n", ret);
-		goto end;
-	}
-
-	ret = _sde_kms_scm_call(sde_kms, vmid);
-	if (ret)
-		goto end;
-
-end:
-	return ret;
-}
-
-static int _sde_kms_attach_all_cb(struct sde_kms *sde_kms, int vmid)
-{
-	u32 ret = 0;
-
-	if (atomic_dec_return(&sde_kms->detach_all_cb) != 0)
-		goto end;
-
-	ret = _sde_kms_scm_call(sde_kms, vmid);
-	if (ret)
-		goto end;
-
-	/* attach_all_contexts */
-	ret = sde_kms_mmu_attach(sde_kms, false);
-	if (ret) {
-		SDE_ERROR("failed to attach all cb ret:%d\n", ret);
-		goto end;
-	}
-
-end:
-	return ret;
-}
-
-static int _sde_kms_detach_sec_cb(struct sde_kms *sde_kms, int vmid)
-{
-	u32 ret = 0;
-
-	if (atomic_inc_return(&sde_kms->detach_sec_cb) > 1)
-		goto end;
-
-	/* detach secure_context */
-	ret = sde_kms_mmu_detach(sde_kms, true);
-	if (ret) {
-		SDE_ERROR("failed to detach sec cb ret:%d\n", ret);
-		goto end;
-	}
-
-	ret = _sde_kms_scm_call(sde_kms, vmid);
-	if (ret)
-		goto end;
-
-end:
-	return ret;
-}
-
-static int _sde_kms_attach_sec_cb(struct sde_kms *sde_kms, int vmid)
-{
-	u32 ret = 0;
-
-	if (atomic_dec_return(&sde_kms->detach_sec_cb) != 0)
-		goto end;
-
-	ret = _sde_kms_scm_call(sde_kms, vmid);
-	if (ret)
-		goto end;
-
-	ret = sde_kms_mmu_attach(sde_kms, true);
-	if (ret) {
-		SDE_ERROR("failed to attach sec cb ret:%d\n", ret);
-		goto end;
-	}
-
-end:
-	return ret;
-}
-
-static int _sde_kms_sui_misr_ctrl(struct sde_kms *sde_kms,
-		struct drm_crtc *crtc, bool enable)
-{
-	struct drm_device *dev = sde_kms->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-	int ret;
-
-	if (enable) {
-		ret = sde_power_resource_enable(&priv->phandle,
-					sde_kms->core_client, true);
-		if (ret) {
-			SDE_ERROR("failed to enable resource, ret:%d\n", ret);
-			return ret;
-		}
-
-		sde_crtc_misr_setup(crtc, true, 1);
-
-		ret = _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, true);
-		if (ret) {
-			sde_power_resource_enable(&priv->phandle,
-					sde_kms->core_client, false);
-			return ret;
-		}
-
-	} else {
-		_sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, false);
-		sde_crtc_misr_setup(crtc, false, 0);
-		sde_power_resource_enable(&priv->phandle,
-					sde_kms->core_client, false);
-	}
-
-	return 0;
-}
-
-static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
-		bool post_commit)
-{
-	struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
-	int old_smmu_state = smmu_state->state;
-	int ret = 0;
-	u32 vmid;
-
-	if (!sde_kms || !crtc) {
-		SDE_ERROR("invalid argument(s)\n");
-		return -EINVAL;
-	}
-
-	SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->transition_type,
-			post_commit, smmu_state->sui_misr_state,
-			smmu_state->secure_level, SDE_EVTLOG_FUNC_ENTRY);
-
-	if ((!smmu_state->transition_type) ||
-	    ((smmu_state->transition_type == POST_COMMIT) && !post_commit))
-		/* Bail out */
-		return 0;
-
-	/* enable sui misr if requested, before the transition */
-	if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ) {
-		ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, true);
-		if (ret)
-			goto end;
-	}
-
-	mutex_lock(&sde_kms->secure_transition_lock);
-	switch (smmu_state->state) {
-	case DETACH_ALL_REQ:
-		ret = _sde_kms_detach_all_cb(sde_kms, VMID_CP_SEC_DISPLAY);
-		if (!ret)
-			smmu_state->state = DETACHED;
-		break;
-
-	case ATTACH_ALL_REQ:
-		ret = _sde_kms_attach_all_cb(sde_kms, VMID_CP_PIXEL);
-		if (!ret) {
-			smmu_state->state = ATTACHED;
-			smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
-		}
-		break;
-
-	case DETACH_SEC_REQ:
-		vmid = (smmu_state->secure_level == SDE_DRM_SEC_ONLY) ?
-				VMID_CP_SEC_DISPLAY : VMID_CP_CAMERA_PREVIEW;
-
-		ret = _sde_kms_detach_sec_cb(sde_kms, vmid);
-		if (!ret)
-			smmu_state->state = DETACHED_SEC;
-		break;
-
-	case ATTACH_SEC_REQ:
-		ret = _sde_kms_attach_sec_cb(sde_kms, VMID_CP_PIXEL);
-		if (!ret) {
-			smmu_state->state = ATTACHED;
-			smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
-		}
-		break;
-
-	default:
-		SDE_ERROR("crtc%d: invalid smmu state %d transition type %d\n",
-			DRMID(crtc), smmu_state->state,
-			smmu_state->transition_type);
-		ret = -EINVAL;
-		break;
-	}
-	mutex_unlock(&sde_kms->secure_transition_lock);
-
-	/* disable sui misr if requested, after the transition */
-	if (!ret && (smmu_state->sui_misr_state == SUI_MISR_DISABLE_REQ)) {
-		ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
-		if (ret)
-			goto end;
-	}
-
-end:
-	smmu_state->sui_misr_state = NONE;
-	smmu_state->transition_type = NONE;
-	smmu_state->transition_error = ret ? true : false;
-
-	SDE_DEBUG("crtc %d: old_state %d, new_state %d, sec_lvl %d, ret %d\n",
-			DRMID(crtc), old_smmu_state, smmu_state->state,
-			smmu_state->secure_level, ret);
-	SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->transition_type,
-			smmu_state->transition_error, smmu_state->secure_level,
-			smmu_state->sui_misr_state, ret, SDE_EVTLOG_FUNC_EXIT);
-
-	return ret;
-}
-
-static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
-		struct drm_atomic_state *state)
-{
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *old_crtc_state;
-
-	struct drm_plane *plane;
-	struct drm_plane_state *plane_state;
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-	struct drm_device *dev = sde_kms->dev;
-	int i, ops = 0, ret = 0;
-	bool old_valid_fb = false;
-
-	for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
-		if (!crtc->state || !crtc->state->active)
-			continue;
-		/*
-		 * It is safe to assume only one active crtc,
-		 * and compatible translation modes on the
-		 * planes staged on this crtc.
-		 * otherwise validation would have failed.
-		 * For this CRTC,
-		 */
-
-		/*
-		 * 1. Check if old state on the CRTC has planes
-		 * staged with valid fbs
-		 */
-		for_each_old_plane_in_state(state, plane, plane_state, i) {
-			if (!plane_state->crtc)
-				continue;
-			if (plane_state->fb) {
-				old_valid_fb = true;
-				break;
-			}
-		}
-
-		/*
-		 * 2.Get the operations needed to be performed before
-		 * secure transition can be initiated.
-		 */
-		ops = sde_crtc_get_secure_transition_ops(crtc,
-				old_crtc_state, old_valid_fb);
-		if (ops < 0) {
-			SDE_ERROR("invalid secure operations %x\n", ops);
-			return ops;
-		}
-
-		if (!ops)
-			goto no_ops;
-
-		SDE_DEBUG("%d:secure operations(%x) started on state:%pK\n",
-				crtc->base.id, ops, crtc->state);
-		SDE_EVT32(DRMID(crtc), ops, crtc->state, old_valid_fb);
-
-		/* 3. Perform operations needed for secure transition */
-		if  (ops & SDE_KMS_OPS_WAIT_FOR_TX_DONE) {
-			SDE_DEBUG("wait_for_transfer_done\n");
-			sde_kms_wait_for_frame_transfer_complete(kms, crtc);
-		}
-		if (ops & SDE_KMS_OPS_CLEANUP_PLANE_FB) {
-			SDE_DEBUG("cleanup planes\n");
-			drm_atomic_helper_cleanup_planes(dev, state);
-		}
-		if (ops & SDE_KMS_OPS_SECURE_STATE_CHANGE) {
-			SDE_DEBUG("secure ctrl\n");
-			_sde_kms_secure_ctrl(sde_kms, crtc, false);
-		}
-		if (ops & SDE_KMS_OPS_PREPARE_PLANE_FB) {
-			SDE_DEBUG("prepare planes %d",
-					crtc->state->plane_mask);
-			drm_atomic_crtc_for_each_plane(plane,
-					crtc) {
-				const struct drm_plane_helper_funcs *funcs;
-
-				plane_state = plane->state;
-				funcs = plane->helper_private;
-
-				SDE_DEBUG("psde:%d FB[%u]\n",
-						plane->base.id,
-						plane->fb->base.id);
-				if (!funcs)
-					continue;
-
-				if (funcs->prepare_fb(plane, plane_state)) {
-					ret = funcs->prepare_fb(plane,
-							plane_state);
-					if (ret)
-						return ret;
-				}
-			}
-		}
-		SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
-		SDE_DEBUG("secure operations completed\n");
-	}
-
-no_ops:
-	return 0;
-}
-
-static int _sde_kms_release_splash_buffer(unsigned int mem_addr,
-					unsigned int splash_buffer_size,
-					unsigned int ramdump_base,
-					unsigned int ramdump_buffer_size)
-{
-	unsigned long pfn_start, pfn_end, pfn_idx;
-	int ret = 0;
-
-	if (!mem_addr || !splash_buffer_size) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	/* leave ramdump memory only if base address matches */
-	if (ramdump_base == mem_addr &&
-			ramdump_buffer_size <= splash_buffer_size) {
-		mem_addr +=  ramdump_buffer_size;
-		splash_buffer_size -= ramdump_buffer_size;
-	}
-
-	pfn_start = mem_addr >> PAGE_SHIFT;
-	pfn_end = (mem_addr + splash_buffer_size) >> PAGE_SHIFT;
-
-	ret = memblock_free(mem_addr, splash_buffer_size);
-	if (ret) {
-		SDE_ERROR("continuous splash memory free failed:%d\n", ret);
-		return ret;
-	}
-	for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
-		free_reserved_page(pfn_to_page(pfn_idx));
-
-	return ret;
-
-}
-
-static int _sde_kms_splash_mem_get(struct sde_kms *sde_kms,
-		struct sde_splash_mem *splash)
-{
-	struct msm_mmu *mmu = NULL;
-	int ret = 0;
-
-	if (!sde_kms->aspace[0]) {
-		SDE_ERROR("aspace not found for sde kms node\n");
-		return -EINVAL;
-	}
-
-	mmu = sde_kms->aspace[0]->mmu;
-	if (!mmu) {
-		SDE_ERROR("mmu not found for aspace\n");
-		return -EINVAL;
-	}
-
-	if (!splash || !mmu->funcs || !mmu->funcs->one_to_one_map) {
-		SDE_ERROR("invalid input params for map\n");
-		return -EINVAL;
-	}
-
-	if (!splash->ref_cnt) {
-		ret = mmu->funcs->one_to_one_map(mmu, splash->splash_buf_base,
-				splash->splash_buf_base,
-				splash->splash_buf_size,
-				IOMMU_READ | IOMMU_NOEXEC);
-		if (ret)
-			SDE_ERROR("splash memory smmu map failed:%d\n", ret);
-	}
-
-	splash->ref_cnt++;
-	SDE_DEBUG("one2one mapping done for base:%lx size:%x ref_cnt:%d\n",
-				splash->splash_buf_base,
-				splash->splash_buf_size,
-				splash->ref_cnt);
-
-	return ret;
-}
-
-static int _sde_kms_map_all_splash_regions(struct sde_kms *sde_kms)
-{
-	int i = 0;
-	int ret = 0;
-
-	if (!sde_kms)
-		return -EINVAL;
-
-	for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) {
-		ret = _sde_kms_splash_mem_get(sde_kms,
-				sde_kms->splash_data.splash_display[i].splash);
-		if (ret)
-			return ret;
-	}
-
-	return ret;
-}
-
-static int _sde_kms_splash_mem_put(struct sde_kms *sde_kms,
-		struct sde_splash_mem *splash)
-{
-	struct msm_mmu *mmu = NULL;
-	int rc = 0;
-
-	if (!sde_kms)
-		return -EINVAL;
-
-	if (!sde_kms->aspace[0]) {
-		SDE_ERROR("aspace not found for sde kms node\n");
-		return -EINVAL;
-	}
-
-	mmu = sde_kms->aspace[0]->mmu;
-	if (!mmu) {
-		SDE_ERROR("mmu not found for aspace\n");
-		return -EINVAL;
-	}
-
-	if (!splash || !mmu->funcs || !mmu->funcs->one_to_one_unmap)
-		return -EINVAL;
-
-	splash->ref_cnt--;
-
-	SDE_DEBUG("splash base:%lx refcnt:%d\n",
-			splash->splash_buf_base, splash->ref_cnt);
-
-	if (!splash->ref_cnt) {
-		mmu->funcs->one_to_one_unmap(mmu, splash->splash_buf_base,
-				splash->splash_buf_size);
-		rc = _sde_kms_release_splash_buffer(splash->splash_buf_base,
-				splash->splash_buf_size, splash->ramdump_base,
-				splash->ramdump_size);
-		splash->splash_buf_base = 0;
-		splash->splash_buf_size = 0;
-	}
-
-	return rc;
-}
-
-static int _sde_kms_unmap_all_splash_regions(struct sde_kms *sde_kms)
-{
-	int i = 0;
-	int ret = 0;
-
-	if (!sde_kms)
-		return -EINVAL;
-
-	for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) {
-		ret = _sde_kms_splash_mem_put(sde_kms,
-				sde_kms->splash_data.splash_display[i].splash);
-		if (ret)
-			return ret;
-	}
-
-	return ret;
-}
-
-static void sde_kms_prepare_commit(struct msm_kms *kms,
-		struct drm_atomic_state *state)
-{
-	struct sde_kms *sde_kms;
-	struct msm_drm_private *priv;
-	struct drm_device *dev;
-	struct drm_encoder *encoder;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
-	int i, rc = 0;
-
-	if (!kms)
-		return;
-	sde_kms = to_sde_kms(kms);
-	dev = sde_kms->dev;
-
-	if (!dev || !dev->dev_private)
-		return;
-	priv = dev->dev_private;
-
-	SDE_ATRACE_BEGIN("prepare_commit");
-	rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
-			true);
-	if (rc) {
-		SDE_ERROR("failed to enable power resource %d\n", rc);
-		SDE_EVT32(rc, SDE_EVTLOG_ERROR);
-		goto end;
-	}
-
-	if (sde_kms->first_kickoff) {
-		sde_power_scale_reg_bus(&priv->phandle, sde_kms->core_client,
-			VOTE_INDEX_HIGH, false);
-		sde_kms->first_kickoff = false;
-	}
-
-	for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
-		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
-				head) {
-			if (encoder->crtc != crtc)
-				continue;
-
-			sde_encoder_prepare_commit(encoder);
-		}
-	}
-
-	/*
-	 * NOTE: for secure use cases we want to apply the new HW
-	 * configuration only after completing preparation for secure
-	 * transitions prepare below if any transtions is required.
-	 */
-	sde_kms_prepare_secure_transition(kms, state);
-end:
-	SDE_ATRACE_END("prepare_commit");
-}
-
-static void sde_kms_commit(struct msm_kms *kms,
-		struct drm_atomic_state *old_state)
-{
-	struct sde_kms *sde_kms;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *old_crtc_state;
-	int i;
-
-	if (!kms || !old_state)
-		return;
-	sde_kms = to_sde_kms(kms);
-
-	if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
-		SDE_ERROR("power resource is not enabled\n");
-		return;
-	}
-
-	SDE_ATRACE_BEGIN("sde_kms_commit");
-	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
-		if (crtc->state->active) {
-			SDE_EVT32(DRMID(crtc));
-			sde_crtc_commit_kickoff(crtc, old_crtc_state);
-		}
-	}
-
-	SDE_ATRACE_END("sde_kms_commit");
-}
-
-static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms,
-		struct drm_crtc *crtc)
-{
-	struct msm_drm_private *priv;
-	struct sde_splash_display *splash_display;
-	int i;
-
-	if (!sde_kms || !crtc)
-		return;
-
-	priv = sde_kms->dev->dev_private;
-
-	SDE_EVT32(crtc->base.id, crtc->state->active,
-			sde_kms->splash_data.num_splash_displays);
-	if (!crtc->state->active || !sde_kms->splash_data.num_splash_displays)
-		return;
-
-	for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
-		splash_display = &sde_kms->splash_data.splash_display[i];
-		if (splash_display->encoder &&
-				crtc == splash_display->encoder->crtc)
-			break;
-	}
-
-	if (i >= MAX_DSI_DISPLAYS)
-		return;
-
-	_sde_kms_splash_mem_put(sde_kms, splash_display->splash);
-
-	if (splash_display->cont_splash_enabled) {
-		sde_encoder_update_caps_for_cont_splash(splash_display->encoder,
-				splash_display, false);
-		splash_display->cont_splash_enabled = false;
-		sde_kms->splash_data.num_splash_displays--;
-		SDE_DEBUG("cont_splash handoff done for dpy:%d remaining:%d\n",
-				i, sde_kms->splash_data.num_splash_displays);
-		memset(splash_display, 0x0, sizeof(struct sde_splash_display));
-
-	}
-
-	/* remove the votes if all displays are done with splash */
-	if (!sde_kms->splash_data.num_splash_displays) {
-		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
-			sde_power_data_bus_set_quota(&priv->phandle,
-					sde_kms->core_client,
-					SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i,
-					SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
-					SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
-
-		sde_power_resource_enable(&priv->phandle,
-				sde_kms->core_client, false);
-	}
-}
-
-static void sde_kms_complete_commit(struct msm_kms *kms,
-		struct drm_atomic_state *old_state)
-{
-	struct sde_kms *sde_kms;
-	struct msm_drm_private *priv;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *old_crtc_state;
-	struct drm_connector *connector;
-	struct drm_connector_state *old_conn_state;
-	int i, rc = 0;
-
-	if (!kms || !old_state)
-		return;
-	sde_kms = to_sde_kms(kms);
-
-	if (!sde_kms->dev || !sde_kms->dev->dev_private)
-		return;
-	priv = sde_kms->dev->dev_private;
-
-	if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
-		SDE_ERROR("power resource is not enabled\n");
-		return;
-	}
-
-	SDE_ATRACE_BEGIN("sde_kms_complete_commit");
-
-	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
-		sde_crtc_complete_commit(crtc, old_crtc_state);
-
-		/* complete secure transitions if any */
-		if (sde_kms->smmu_state.transition_type == POST_COMMIT)
-			_sde_kms_secure_ctrl(sde_kms, crtc, true);
-	}
-
-	for_each_old_connector_in_state(old_state, connector,
-			old_conn_state, i) {
-		struct sde_connector *c_conn;
-
-		c_conn = to_sde_connector(connector);
-		if (!c_conn->ops.post_kickoff)
-			continue;
-		rc = c_conn->ops.post_kickoff(connector);
-		if (rc) {
-			pr_err("Connector Post kickoff failed rc=%d\n",
-					 rc);
-		}
-	}
-
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
-
-	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
-		_sde_kms_release_splash_resource(sde_kms, crtc);
-
-	SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
-	SDE_ATRACE_END("sde_kms_complete_commit");
-}
-
-static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
-		struct drm_crtc *crtc)
-{
-	struct drm_encoder *encoder;
-	struct drm_device *dev;
-	int ret;
-
-	if (!kms || !crtc || !crtc->state) {
-		SDE_ERROR("invalid params\n");
-		return;
-	}
-
-	dev = crtc->dev;
-
-	if (!crtc->state->enable) {
-		SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
-		return;
-	}
-
-	if (!crtc->state->active) {
-		SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
-		return;
-	}
-
-	if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
-		SDE_ERROR("power resource is not enabled\n");
-		return;
-	}
-
-	SDE_ATRACE_BEGIN("sde_kms_wait_for_commit_done");
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		if (encoder->crtc != crtc)
-			continue;
-		/*
-		 * Wait for post-flush if necessary to delay before
-		 * plane_cleanup. For example, wait for vsync in case of video
-		 * mode panels. This may be a no-op for command mode panels.
-		 */
-		SDE_EVT32_VERBOSE(DRMID(crtc));
-		ret = sde_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
-		if (ret && ret != -EWOULDBLOCK) {
-			SDE_ERROR("wait for commit done returned %d\n", ret);
-			sde_crtc_request_frame_reset(crtc);
-			break;
-		}
-
-		sde_crtc_complete_flip(crtc, NULL);
-	}
-
-	SDE_ATRACE_END("sde_ksm_wait_for_commit_done");
-}
-
-static void sde_kms_prepare_fence(struct msm_kms *kms,
-		struct drm_atomic_state *old_state)
-{
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *old_crtc_state;
-	int i, rc;
-
-	if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
-		SDE_ERROR("invalid argument(s)\n");
-		return;
-	}
-
-	SDE_ATRACE_BEGIN("sde_kms_prepare_fence");
-retry:
-	/* attempt to acquire ww mutex for connection */
-	rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
-			       old_state->acquire_ctx);
-
-	if (rc == -EDEADLK) {
-		drm_modeset_backoff(old_state->acquire_ctx);
-		goto retry;
-	}
-
-	/* old_state actually contains updated crtc pointers */
-	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
-		if (crtc->state->active)
-			sde_crtc_prepare_commit(crtc, old_crtc_state);
-	}
-
-	SDE_ATRACE_END("sde_kms_prepare_fence");
-}
-
-/**
- * _sde_kms_get_displays - query for underlying display handles and cache them
- * @sde_kms:    Pointer to sde kms structure
- * Returns:     Zero on success
- */
-static int _sde_kms_get_displays(struct sde_kms *sde_kms)
-{
-	int rc = -ENOMEM;
-
-	if (!sde_kms) {
-		SDE_ERROR("invalid sde kms\n");
-		return -EINVAL;
-	}
-
-	/* dsi */
-	sde_kms->dsi_displays = NULL;
-	sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
-	if (sde_kms->dsi_display_count) {
-		sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
-				sizeof(void *),
-				GFP_KERNEL);
-		if (!sde_kms->dsi_displays) {
-			SDE_ERROR("failed to allocate dsi displays\n");
-			goto exit_deinit_dsi;
-		}
-		sde_kms->dsi_display_count =
-			dsi_display_get_active_displays(sde_kms->dsi_displays,
-					sde_kms->dsi_display_count);
-	}
-
-	/* wb */
-	sde_kms->wb_displays = NULL;
-	sde_kms->wb_display_count = sde_wb_get_num_of_displays();
-	if (sde_kms->wb_display_count) {
-		sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
-				sizeof(void *),
-				GFP_KERNEL);
-		if (!sde_kms->wb_displays) {
-			SDE_ERROR("failed to allocate wb displays\n");
-			goto exit_deinit_wb;
-		}
-		sde_kms->wb_display_count =
-			wb_display_get_displays(sde_kms->wb_displays,
-					sde_kms->wb_display_count);
-	}
-
-	/* dp */
-	sde_kms->dp_displays = NULL;
-	sde_kms->dp_display_count = dp_display_get_num_of_displays();
-	if (sde_kms->dp_display_count) {
-		sde_kms->dp_displays = kcalloc(sde_kms->dp_display_count,
-				sizeof(void *), GFP_KERNEL);
-		if (!sde_kms->dp_displays) {
-			SDE_ERROR("failed to allocate dp displays\n");
-			goto exit_deinit_dp;
-		}
-		sde_kms->dp_display_count =
-			dp_display_get_displays(sde_kms->dp_displays,
-					sde_kms->dp_display_count);
-
-		sde_kms->dp_stream_count = dp_display_get_num_of_streams();
-	}
-	return 0;
-
-exit_deinit_dp:
-	kfree(sde_kms->dp_displays);
-	sde_kms->dp_stream_count = 0;
-	sde_kms->dp_display_count = 0;
-	sde_kms->dp_displays = NULL;
-
-exit_deinit_wb:
-	kfree(sde_kms->wb_displays);
-	sde_kms->wb_display_count = 0;
-	sde_kms->wb_displays = NULL;
-
-exit_deinit_dsi:
-	kfree(sde_kms->dsi_displays);
-	sde_kms->dsi_display_count = 0;
-	sde_kms->dsi_displays = NULL;
-	return rc;
-}
-
-/**
- * _sde_kms_release_displays - release cache of underlying display handles
- * @sde_kms:    Pointer to sde kms structure
- */
-static void _sde_kms_release_displays(struct sde_kms *sde_kms)
-{
-	if (!sde_kms) {
-		SDE_ERROR("invalid sde kms\n");
-		return;
-	}
-
-	kfree(sde_kms->wb_displays);
-	sde_kms->wb_displays = NULL;
-	sde_kms->wb_display_count = 0;
-
-	kfree(sde_kms->dsi_displays);
-	sde_kms->dsi_displays = NULL;
-	sde_kms->dsi_display_count = 0;
-}
-
-/**
- * _sde_kms_setup_displays - create encoders, bridges and connectors
- *                           for underlying displays
- * @dev:        Pointer to drm device structure
- * @priv:       Pointer to private drm device data
- * @sde_kms:    Pointer to sde kms structure
- * Returns:     Zero on success
- */
-static int _sde_kms_setup_displays(struct drm_device *dev,
-		struct msm_drm_private *priv,
-		struct sde_kms *sde_kms)
-{
-	static const struct sde_connector_ops dsi_ops = {
-		.set_info_blob = dsi_conn_set_info_blob,
-		.detect =     dsi_conn_detect,
-		.get_modes =  dsi_connector_get_modes,
-		.pre_destroy =  dsi_connector_put_modes,
-		.mode_valid = dsi_conn_mode_valid,
-		.get_info =   dsi_display_get_info,
-		.set_backlight = dsi_display_set_backlight,
-		.soft_reset   = dsi_display_soft_reset,
-		.pre_kickoff  = dsi_conn_pre_kickoff,
-		.clk_ctrl = dsi_display_clk_ctrl,
-		.set_power = dsi_display_set_power,
-		.get_mode_info = dsi_conn_get_mode_info,
-		.get_dst_format = dsi_display_get_dst_format,
-		.post_kickoff = dsi_conn_post_kickoff,
-		.check_status = dsi_display_check_status,
-		.enable_event = dsi_conn_enable_event,
-		.cmd_transfer = dsi_display_cmd_transfer,
-		.cont_splash_config = dsi_display_cont_splash_config,
-		.get_panel_vfp = dsi_display_get_panel_vfp,
-		.get_default_lms = dsi_display_get_default_lms,
-	};
-	static const struct sde_connector_ops wb_ops = {
-		.post_init =    sde_wb_connector_post_init,
-		.set_info_blob = sde_wb_connector_set_info_blob,
-		.detect =       sde_wb_connector_detect,
-		.get_modes =    sde_wb_connector_get_modes,
-		.set_property = sde_wb_connector_set_property,
-		.get_info =     sde_wb_get_info,
-		.soft_reset =   NULL,
-		.get_mode_info = sde_wb_get_mode_info,
-		.get_dst_format = NULL,
-		.check_status = NULL,
-		.cmd_transfer = NULL,
-		.cont_splash_config = NULL,
-		.get_panel_vfp = NULL,
-	};
-	static const struct sde_connector_ops dp_ops = {
-		.post_init  = dp_connector_post_init,
-		.detect     = dp_connector_detect,
-		.get_modes  = dp_connector_get_modes,
-		.mode_valid = dp_connector_mode_valid,
-		.get_info   = dp_connector_get_info,
-		.get_mode_info  = dp_connector_get_mode_info,
-		.post_open  = dp_connector_post_open,
-		.check_status = NULL,
-		.config_hdr = dp_connector_config_hdr,
-		.cmd_transfer = NULL,
-		.cont_splash_config = NULL,
-		.get_panel_vfp = NULL,
-		.update_pps = dp_connector_update_pps,
-	};
-	struct msm_display_info info;
-	struct drm_encoder *encoder;
-	void *display, *connector;
-	int i, max_encoders;
-	int rc = 0;
-
-	if (!dev || !priv || !sde_kms) {
-		SDE_ERROR("invalid argument(s)\n");
-		return -EINVAL;
-	}
-
-	max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count +
-				sde_kms->dp_display_count +
-				sde_kms->dp_stream_count;
-	if (max_encoders > ARRAY_SIZE(priv->encoders)) {
-		max_encoders = ARRAY_SIZE(priv->encoders);
-		SDE_ERROR("capping number of displays to %d", max_encoders);
-	}
-
-	/* dsi */
-	for (i = 0; i < sde_kms->dsi_display_count &&
-		priv->num_encoders < max_encoders; ++i) {
-		display = sde_kms->dsi_displays[i];
-		encoder = NULL;
-
-		memset(&info, 0x0, sizeof(info));
-		rc = dsi_display_get_info(NULL, &info, display);
-		if (rc) {
-			SDE_ERROR("dsi get_info %d failed\n", i);
-			continue;
-		}
-
-		encoder = sde_encoder_init(dev, &info);
-		if (IS_ERR_OR_NULL(encoder)) {
-			SDE_ERROR("encoder init failed for dsi %d\n", i);
-			continue;
-		}
-
-		rc = dsi_display_drm_bridge_init(display, encoder);
-		if (rc) {
-			SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
-			sde_encoder_destroy(encoder);
-			continue;
-		}
-
-		connector = sde_connector_init(dev,
-					encoder,
-					0,
-					display,
-					&dsi_ops,
-					DRM_CONNECTOR_POLL_HPD,
-					DRM_MODE_CONNECTOR_DSI);
-		if (connector) {
-			priv->encoders[priv->num_encoders++] = encoder;
-			priv->connectors[priv->num_connectors++] = connector;
-		} else {
-			SDE_ERROR("dsi %d connector init failed\n", i);
-			dsi_display_drm_bridge_deinit(display);
-			sde_encoder_destroy(encoder);
-		}
-	}
-
-	/* wb */
-	for (i = 0; i < sde_kms->wb_display_count &&
-		priv->num_encoders < max_encoders; ++i) {
-		display = sde_kms->wb_displays[i];
-		encoder = NULL;
-
-		memset(&info, 0x0, sizeof(info));
-		rc = sde_wb_get_info(NULL, &info, display);
-		if (rc) {
-			SDE_ERROR("wb get_info %d failed\n", i);
-			continue;
-		}
-
-		encoder = sde_encoder_init(dev, &info);
-		if (IS_ERR_OR_NULL(encoder)) {
-			SDE_ERROR("encoder init failed for wb %d\n", i);
-			continue;
-		}
-
-		rc = sde_wb_drm_init(display, encoder);
-		if (rc) {
-			SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
-			sde_encoder_destroy(encoder);
-			continue;
-		}
-
-		connector = sde_connector_init(dev,
-				encoder,
-				0,
-				display,
-				&wb_ops,
-				DRM_CONNECTOR_POLL_HPD,
-				DRM_MODE_CONNECTOR_VIRTUAL);
-		if (connector) {
-			priv->encoders[priv->num_encoders++] = encoder;
-			priv->connectors[priv->num_connectors++] = connector;
-		} else {
-			SDE_ERROR("wb %d connector init failed\n", i);
-			sde_wb_drm_deinit(display);
-			sde_encoder_destroy(encoder);
-		}
-	}
-	/* dp */
-	for (i = 0; i < sde_kms->dp_display_count &&
-			priv->num_encoders < max_encoders; ++i) {
-		int idx;
-
-		display = sde_kms->dp_displays[i];
-		encoder = NULL;
-
-		memset(&info, 0x0, sizeof(info));
-		rc = dp_connector_get_info(NULL, &info, display);
-		if (rc) {
-			SDE_ERROR("dp get_info %d failed\n", i);
-			continue;
-		}
-
-		encoder = sde_encoder_init(dev, &info);
-		if (IS_ERR_OR_NULL(encoder)) {
-			SDE_ERROR("dp encoder init failed %d\n", i);
-			continue;
-		}
-
-		rc = dp_drm_bridge_init(display, encoder);
-		if (rc) {
-			SDE_ERROR("dp bridge %d init failed, %d\n", i, rc);
-			sde_encoder_destroy(encoder);
-			continue;
-		}
-
-		connector = sde_connector_init(dev,
-					encoder,
-					NULL,
-					display,
-					&dp_ops,
-					DRM_CONNECTOR_POLL_HPD,
-					DRM_MODE_CONNECTOR_DisplayPort);
-		if (connector) {
-			priv->encoders[priv->num_encoders++] = encoder;
-			priv->connectors[priv->num_connectors++] = connector;
-		} else {
-			SDE_ERROR("dp %d connector init failed\n", i);
-			dp_drm_bridge_deinit(display);
-			sde_encoder_destroy(encoder);
-		}
-
-		/* update display cap to MST_MODE for DP MST encoders */
-		info.capabilities |= MSM_DISPLAY_CAP_MST_MODE;
-		for (idx = 0; idx < sde_kms->dp_stream_count; idx++) {
-			info.h_tile_instance[0] = idx;
-			encoder = sde_encoder_init(dev, &info);
-			if (IS_ERR_OR_NULL(encoder)) {
-				SDE_ERROR("dp mst encoder init failed %d\n", i);
-				continue;
-			}
-
-			rc = dp_mst_drm_bridge_init(display, encoder);
-			if (rc) {
-				SDE_ERROR("dp mst bridge %d init failed, %d\n",
-						i, rc);
-				sde_encoder_destroy(encoder);
-				continue;
-			}
-			priv->encoders[priv->num_encoders++] = encoder;
-		}
-	}
-
-	return 0;
-}
-
-static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
-{
-	struct msm_drm_private *priv;
-	int i;
-
-	if (!sde_kms) {
-		SDE_ERROR("invalid sde_kms\n");
-		return;
-	} else if (!sde_kms->dev) {
-		SDE_ERROR("invalid dev\n");
-		return;
-	} else if (!sde_kms->dev->dev_private) {
-		SDE_ERROR("invalid dev_private\n");
-		return;
-	}
-	priv = sde_kms->dev->dev_private;
-
-	for (i = 0; i < priv->num_crtcs; i++)
-		priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
-	priv->num_crtcs = 0;
-
-	for (i = 0; i < priv->num_planes; i++)
-		priv->planes[i]->funcs->destroy(priv->planes[i]);
-	priv->num_planes = 0;
-
-	for (i = 0; i < priv->num_connectors; i++)
-		priv->connectors[i]->funcs->destroy(priv->connectors[i]);
-	priv->num_connectors = 0;
-
-	for (i = 0; i < priv->num_encoders; i++)
-		priv->encoders[i]->funcs->destroy(priv->encoders[i]);
-	priv->num_encoders = 0;
-
-	_sde_kms_release_displays(sde_kms);
-}
-
-static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
-{
-	struct drm_device *dev;
-	struct drm_plane *primary_planes[MAX_PLANES], *plane;
-	struct drm_crtc *crtc;
-
-	struct msm_drm_private *priv;
-	struct sde_mdss_cfg *catalog;
-
-	int primary_planes_idx = 0, i, ret;
-	int max_crtc_count;
-
-	u32 sspp_id[MAX_PLANES];
-	u32 master_plane_id[MAX_PLANES];
-	u32 num_virt_planes = 0;
-
-	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
-		SDE_ERROR("invalid sde_kms\n");
-		return -EINVAL;
-	}
-
-	dev = sde_kms->dev;
-	priv = dev->dev_private;
-	catalog = sde_kms->catalog;
-
-	ret = sde_core_irq_domain_add(sde_kms);
-	if (ret)
-		goto fail_irq;
-	/*
-	 * Query for underlying display drivers, and create connectors,
-	 * bridges and encoders for them.
-	 */
-	if (!_sde_kms_get_displays(sde_kms))
-		(void)_sde_kms_setup_displays(dev, priv, sde_kms);
-
-	max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
-
-	/* Create the planes */
-	for (i = 0; i < catalog->sspp_count; i++) {
-		bool primary = true;
-
-		if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
-			|| primary_planes_idx >= max_crtc_count)
-			primary = false;
-
-		plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
-				(1UL << max_crtc_count) - 1, 0);
-		if (IS_ERR(plane)) {
-			SDE_ERROR("sde_plane_init failed\n");
-			ret = PTR_ERR(plane);
-			goto fail;
-		}
-		priv->planes[priv->num_planes++] = plane;
-
-		if (primary)
-			primary_planes[primary_planes_idx++] = plane;
-
-		if (sde_hw_sspp_multirect_enabled(&catalog->sspp[i]) &&
-			sde_is_custom_client()) {
-			int priority =
-				catalog->sspp[i].sblk->smart_dma_priority;
-			sspp_id[priority - 1] = catalog->sspp[i].id;
-			master_plane_id[priority - 1] = plane->base.id;
-			num_virt_planes++;
-		}
-	}
-
-	/* Initialize smart DMA virtual planes */
-	for (i = 0; i < num_virt_planes; i++) {
-		plane = sde_plane_init(dev, sspp_id[i], false,
-			(1UL << max_crtc_count) - 1, master_plane_id[i]);
-		if (IS_ERR(plane)) {
-			SDE_ERROR("sde_plane for virtual SSPP init failed\n");
-			ret = PTR_ERR(plane);
-			goto fail;
-		}
-		priv->planes[priv->num_planes++] = plane;
-	}
-
-	max_crtc_count = min(max_crtc_count, primary_planes_idx);
-
-	/* Create one CRTC per encoder */
-	for (i = 0; i < max_crtc_count; i++) {
-		crtc = sde_crtc_init(dev, primary_planes[i]);
-		if (IS_ERR(crtc)) {
-			ret = PTR_ERR(crtc);
-			goto fail;
-		}
-		priv->crtcs[priv->num_crtcs++] = crtc;
-	}
-
-	if (sde_is_custom_client()) {
-		/* All CRTCs are compatible with all planes */
-		for (i = 0; i < priv->num_planes; i++)
-			priv->planes[i]->possible_crtcs =
-				(1 << priv->num_crtcs) - 1;
-	}
-
-	/* All CRTCs are compatible with all encoders */
-	for (i = 0; i < priv->num_encoders; i++)
-		priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
-
-	return 0;
-fail:
-	_sde_kms_drm_obj_destroy(sde_kms);
-fail_irq:
-	sde_core_irq_domain_fini(sde_kms);
-	return ret;
-}
-
-/**
- * sde_kms_timeline_status - provides current timeline status
- *    This API should be called without mode config lock.
- * @dev: Pointer to drm device
- */
-void sde_kms_timeline_status(struct drm_device *dev)
-{
-	struct drm_crtc *crtc;
-	struct drm_connector *conn;
-	struct drm_connector_list_iter conn_iter;
-
-	if (!dev) {
-		SDE_ERROR("invalid drm device node\n");
-		return;
-	}
-
-	drm_for_each_crtc(crtc, dev)
-		sde_crtc_timeline_status(crtc);
-
-	if (mutex_is_locked(&dev->mode_config.mutex)) {
-		/*
-		 *Probably locked from last close dumping status anyway
-		 */
-		SDE_ERROR("dumping conn_timeline without mode_config lock\n");
-		drm_connector_list_iter_begin(dev, &conn_iter);
-		drm_for_each_connector_iter(conn, &conn_iter)
-			sde_conn_timeline_status(conn);
-		drm_connector_list_iter_end(&conn_iter);
-		return;
-	}
-
-	mutex_lock(&dev->mode_config.mutex);
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(conn, &conn_iter)
-		sde_conn_timeline_status(conn);
-	drm_connector_list_iter_end(&conn_iter);
-	mutex_unlock(&dev->mode_config.mutex);
-}
-
-static int sde_kms_postinit(struct msm_kms *kms)
-{
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-	struct drm_device *dev;
-	struct drm_crtc *crtc;
-	int rc;
-
-	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
-		SDE_ERROR("invalid sde_kms\n");
-		return -EINVAL;
-	}
-
-	dev = sde_kms->dev;
-
-	rc = _sde_debugfs_init(sde_kms);
-	if (rc)
-		SDE_ERROR("sde_debugfs init failed: %d\n", rc);
-
-	drm_for_each_crtc(crtc, dev)
-		sde_crtc_post_init(dev, crtc);
-
-	return rc;
-}
-
-static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
-		struct drm_encoder *encoder)
-{
-	return rate;
-}
-
-static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
-		struct platform_device *pdev)
-{
-	struct drm_device *dev;
-	struct msm_drm_private *priv;
-	int i;
-
-	if (!sde_kms || !pdev)
-		return;
-
-	dev = sde_kms->dev;
-	if (!dev)
-		return;
-
-	priv = dev->dev_private;
-	if (!priv)
-		return;
-
-	if (sde_kms->genpd_init) {
-		sde_kms->genpd_init = false;
-		pm_genpd_remove(&sde_kms->genpd);
-		of_genpd_del_provider(pdev->dev.of_node);
-	}
-
-	if (sde_kms->hw_intr)
-		sde_hw_intr_destroy(sde_kms->hw_intr);
-	sde_kms->hw_intr = NULL;
-
-	if (sde_kms->power_event)
-		sde_power_handle_unregister_event(
-				&priv->phandle, sde_kms->power_event);
-
-	_sde_kms_release_displays(sde_kms);
-
-	_sde_kms_unmap_all_splash_regions(sde_kms);
-
-	/* safe to call these more than once during shutdown */
-	_sde_debugfs_destroy(sde_kms);
-	_sde_kms_mmu_destroy(sde_kms);
-
-	if (sde_kms->catalog) {
-		for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
-			u32 vbif_idx = sde_kms->catalog->vbif[i].id;
-
-			if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
-				sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
-		}
-	}
-
-	if (sde_kms->rm_init)
-		sde_rm_destroy(&sde_kms->rm);
-	sde_kms->rm_init = false;
-
-	if (sde_kms->catalog)
-		sde_hw_catalog_deinit(sde_kms->catalog);
-	sde_kms->catalog = NULL;
-
-	if (sde_kms->core_client)
-		sde_power_client_destroy(&priv->phandle, sde_kms->core_client);
-	sde_kms->core_client = NULL;
-
-	if (sde_kms->sid)
-		msm_iounmap(pdev, sde_kms->sid);
-	sde_kms->sid = NULL;
-
-	if (sde_kms->reg_dma)
-		msm_iounmap(pdev, sde_kms->reg_dma);
-	sde_kms->reg_dma = NULL;
-
-	if (sde_kms->vbif[VBIF_NRT])
-		msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
-	sde_kms->vbif[VBIF_NRT] = NULL;
-
-	if (sde_kms->vbif[VBIF_RT])
-		msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
-	sde_kms->vbif[VBIF_RT] = NULL;
-
-	if (sde_kms->mmio)
-		msm_iounmap(pdev, sde_kms->mmio);
-	sde_kms->mmio = NULL;
-
-	sde_reg_dma_deinit();
-}
-
-int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only)
-{
-	int i;
-
-	if (!sde_kms)
-		return -EINVAL;
-
-	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
-		struct msm_mmu *mmu;
-		struct msm_gem_address_space *aspace = sde_kms->aspace[i];
-
-		if (!aspace)
-			continue;
-
-		mmu = sde_kms->aspace[i]->mmu;
-
-		if (secure_only &&
-			!aspace->mmu->funcs->is_domain_secure(mmu))
-			continue;
-
-		/* cleanup aspace before detaching */
-		msm_gem_aspace_domain_attach_detach_update(aspace, true);
-
-		SDE_DEBUG("Detaching domain:%d\n", i);
-		aspace->mmu->funcs->detach(mmu, (const char **)iommu_ports,
-			ARRAY_SIZE(iommu_ports));
-
-		aspace->domain_attached = false;
-	}
-
-	return 0;
-}
-
-int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only)
-{
-	int i;
-
-	if (!sde_kms)
-		return -EINVAL;
-
-	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
-		struct msm_mmu *mmu;
-		struct msm_gem_address_space *aspace = sde_kms->aspace[i];
-
-		if (!aspace)
-			continue;
-
-		mmu = sde_kms->aspace[i]->mmu;
-
-		if (secure_only &&
-			!aspace->mmu->funcs->is_domain_secure(mmu))
-			continue;
-
-		SDE_DEBUG("Attaching domain:%d\n", i);
-		aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
-			ARRAY_SIZE(iommu_ports));
-
-		aspace->domain_attached = true;
-		msm_gem_aspace_domain_attach_detach_update(aspace, false);
-	}
-
-	return 0;
-}
-
-static void sde_kms_destroy(struct msm_kms *kms)
-{
-	struct sde_kms *sde_kms;
-	struct drm_device *dev;
-
-	if (!kms) {
-		SDE_ERROR("invalid kms\n");
-		return;
-	}
-
-	sde_kms = to_sde_kms(kms);
-	dev = sde_kms->dev;
-	if (!dev || !dev->dev) {
-		SDE_ERROR("invalid device\n");
-		return;
-	}
-
-	_sde_kms_hw_destroy(sde_kms, to_platform_device(dev->dev));
-	kfree(sde_kms);
-}
-
-static void _sde_kms_plane_force_remove(struct drm_plane *plane,
-			struct drm_atomic_state *state)
-{
-	struct drm_plane_state *plane_state;
-	int ret = 0;
-
-	plane_state = drm_atomic_get_plane_state(state, plane);
-	if (IS_ERR(plane_state)) {
-		ret = PTR_ERR(plane_state);
-		SDE_ERROR("error %d getting plane %d state\n",
-				ret, plane->base.id);
-		return;
-	}
-
-	plane->old_fb = plane->fb;
-
-	SDE_DEBUG("disabling plane %d\n", plane->base.id);
-
-	ret = __drm_atomic_helper_disable_plane(plane, plane_state);
-	if (ret != 0)
-		SDE_ERROR("error %d disabling plane %d\n", ret,
-				plane->base.id);
-}
-
-static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file,
-		struct drm_atomic_state *state)
-{
-	struct drm_device *dev = sde_kms->dev;
-	struct drm_framebuffer *fb, *tfb;
-	struct list_head fbs;
-	struct drm_plane *plane;
-	int ret = 0;
-	u32 plane_mask = 0;
-
-	INIT_LIST_HEAD(&fbs);
-
-	list_for_each_entry_safe(fb, tfb, &file->fbs, filp_head) {
-		if (drm_framebuffer_read_refcount(fb) > 1) {
-			list_move_tail(&fb->filp_head, &fbs);
-
-			drm_for_each_plane(plane, dev) {
-				if (plane->fb == fb) {
-					plane_mask |=
-						1 << drm_plane_index(plane);
-					 _sde_kms_plane_force_remove(
-								plane, state);
-				}
-			}
-		} else {
-			list_del_init(&fb->filp_head);
-			drm_framebuffer_put(fb);
-		}
-	}
-
-	if (list_empty(&fbs)) {
-		SDE_DEBUG("skip commit as no fb(s)\n");
-		drm_atomic_state_put(state);
-		return 0;
-	}
-
-	SDE_DEBUG("committing after removing all the pipes\n");
-	ret = drm_atomic_commit(state);
-
-	if (ret) {
-		/*
-		 * move the fbs back to original list, so it would be
-		 * handled during drm_release
-		 */
-		list_for_each_entry_safe(fb, tfb, &fbs, filp_head)
-			list_move_tail(&fb->filp_head, &file->fbs);
-
-		SDE_ERROR("atomic commit failed in preclose, ret:%d\n", ret);
-		goto end;
-	}
-
-	while (!list_empty(&fbs)) {
-		fb = list_first_entry(&fbs, typeof(*fb), filp_head);
-
-		list_del_init(&fb->filp_head);
-		drm_framebuffer_put(fb);
-	}
-
-end:
-	return ret;
-}
-
-static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
-{
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-	struct drm_device *dev = sde_kms->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-	unsigned int i;
-	struct drm_atomic_state *state = NULL;
-	struct drm_modeset_acquire_ctx ctx;
-	int ret = 0;
-
-	/* cancel pending flip event */
-	for (i = 0; i < priv->num_crtcs; i++)
-		sde_crtc_complete_flip(priv->crtcs[i], file);
-
-	drm_modeset_acquire_init(&ctx, 0);
-retry:
-	ret = drm_modeset_lock_all_ctx(dev, &ctx);
-	if (ret == -EDEADLK) {
-		drm_modeset_backoff(&ctx);
-		goto retry;
-	} else if (WARN_ON(ret)) {
-		goto end;
-	}
-
-	state = drm_atomic_state_alloc(dev);
-	if (!state) {
-		ret = -ENOMEM;
-		goto end;
-	}
-
-	state->acquire_ctx = &ctx;
-
-	for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
-		ret = _sde_kms_remove_fbs(sde_kms, file, state);
-		if (ret != -EDEADLK)
-			break;
-		drm_atomic_state_clear(state);
-		drm_modeset_backoff(&ctx);
-	}
-
-end:
-	if (state)
-		drm_atomic_state_put(state);
-
-	SDE_DEBUG("sde preclose done, ret:%d\n", ret);
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-}
-
-static int _sde_kms_helper_reset_custom_properties(struct sde_kms *sde_kms,
-		struct drm_atomic_state *state)
-{
-	struct drm_device *dev = sde_kms->dev;
-	struct drm_plane *plane;
-	struct drm_plane_state *plane_state;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
-	struct drm_connector *conn;
-	struct drm_connector_state *conn_state;
-	struct drm_connector_list_iter conn_iter;
-	int ret = 0;
-
-	drm_for_each_plane(plane, dev) {
-		plane_state = drm_atomic_get_plane_state(state, plane);
-		if (IS_ERR(plane_state)) {
-			ret = PTR_ERR(plane_state);
-			SDE_ERROR("error %d getting plane %d state\n",
-					ret, DRMID(plane));
-			return ret;
-		}
-
-		ret = sde_plane_helper_reset_custom_properties(plane,
-				plane_state);
-		if (ret) {
-			SDE_ERROR("error %d resetting plane props %d\n",
-					ret, DRMID(plane));
-			return ret;
-		}
-	}
-	drm_for_each_crtc(crtc, dev) {
-		crtc_state = drm_atomic_get_crtc_state(state, crtc);
-		if (IS_ERR(crtc_state)) {
-			ret = PTR_ERR(crtc_state);
-			SDE_ERROR("error %d getting crtc %d state\n",
-					ret, DRMID(crtc));
-			return ret;
-		}
-
-		ret = sde_crtc_helper_reset_custom_properties(crtc, crtc_state);
-		if (ret) {
-			SDE_ERROR("error %d resetting crtc props %d\n",
-					ret, DRMID(crtc));
-			return ret;
-		}
-	}
-
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(conn, &conn_iter) {
-		conn_state = drm_atomic_get_connector_state(state, conn);
-		if (IS_ERR(conn_state)) {
-			ret = PTR_ERR(conn_state);
-			SDE_ERROR("error %d getting connector %d state\n",
-					ret, DRMID(conn));
-			return ret;
-		}
-
-		ret = sde_connector_helper_reset_custom_properties(conn,
-				conn_state);
-		if (ret) {
-			SDE_ERROR("error %d resetting connector props %d\n",
-					ret, DRMID(conn));
-			return ret;
-		}
-	}
-	drm_connector_list_iter_end(&conn_iter);
-
-	return ret;
-}
-
-static void sde_kms_lastclose(struct msm_kms *kms,
-		struct drm_modeset_acquire_ctx *ctx)
-{
-	struct sde_kms *sde_kms;
-	struct drm_device *dev;
-	struct drm_atomic_state *state;
-	int ret, i;
-
-	if (!kms) {
-		SDE_ERROR("invalid argument\n");
-		return;
-	}
-
-	sde_kms = to_sde_kms(kms);
-	dev = sde_kms->dev;
-
-	state = drm_atomic_state_alloc(dev);
-	if (!state)
-		return;
-
-	state->acquire_ctx = ctx;
-
-	for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
-		/* add reset of custom properties to the state */
-		ret = _sde_kms_helper_reset_custom_properties(sde_kms, state);
-		if (ret)
-			break;
-
-		ret = drm_atomic_commit(state);
-		if (ret != -EDEADLK)
-			break;
-
-		drm_atomic_state_clear(state);
-		drm_modeset_backoff(ctx);
-		SDE_DEBUG("deadlock backoff on attempt %d\n", i);
-	}
-
-	if (ret)
-		SDE_ERROR("failed to run last close: %d\n", ret);
-
-	drm_atomic_state_put(state);
-}
-
-static int sde_kms_check_secure_transition(struct msm_kms *kms,
-		struct drm_atomic_state *state)
-{
-	struct sde_kms *sde_kms;
-	struct drm_device *dev;
-	struct drm_crtc *crtc;
-	struct drm_crtc *cur_crtc = NULL, *global_crtc = NULL;
-	struct drm_crtc_state *crtc_state;
-	int active_crtc_cnt = 0, global_active_crtc_cnt = 0;
-	bool sec_session = false, global_sec_session = false;
-	uint32_t fb_ns = 0, fb_sec = 0, fb_sec_dir = 0;
-	int i;
-
-	if (!kms || !state) {
-		return -EINVAL;
-		SDE_ERROR("invalid arguments\n");
-	}
-
-	sde_kms = to_sde_kms(kms);
-	dev = sde_kms->dev;
-
-	/* iterate state object for active secure/non-secure crtc */
-	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!crtc_state->active)
-			continue;
-
-		active_crtc_cnt++;
-		sde_crtc_state_find_plane_fb_modes(crtc_state, &fb_ns,
-				&fb_sec, &fb_sec_dir);
-		if (fb_sec_dir)
-			sec_session = true;
-		cur_crtc = crtc;
-	}
-
-	/* iterate global list for active and secure/non-secure crtc */
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		if (!crtc->state->active)
-			continue;
-
-		global_active_crtc_cnt++;
-		/* update only when crtc is not the same as current crtc */
-		if (crtc != cur_crtc) {
-			fb_ns = fb_sec = fb_sec_dir = 0;
-			sde_crtc_find_plane_fb_modes(crtc, &fb_ns,
-					&fb_sec, &fb_sec_dir);
-			if (fb_sec_dir)
-				global_sec_session = true;
-			global_crtc = crtc;
-		}
-	}
-
-	if (!global_sec_session && !sec_session)
-		return 0;
-
-	/*
-	 * - fail crtc commit, if secure-camera/secure-ui session is
-	 *   in-progress in any other display
-	 * - fail secure-camera/secure-ui crtc commit, if any other display
-	 *   session is in-progress
-	 */
-	if ((global_active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE) ||
-		    (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE)) {
-		SDE_ERROR(
-		    "crtc%d secure check failed global_active:%d active:%d\n",
-				cur_crtc ? cur_crtc->base.id : -1,
-				global_active_crtc_cnt, active_crtc_cnt);
-		return -EPERM;
-
-	/*
-	 * As only one crtc is allowed during secure session, the crtc
-	 * in this commit should match with the global crtc
-	 */
-	} else if (global_crtc && cur_crtc && (global_crtc != cur_crtc)) {
-		SDE_ERROR("crtc%d-sec%d not allowed during crtc%d-sec%d\n",
-				cur_crtc->base.id, sec_session,
-				global_crtc->base.id, global_sec_session);
-		return -EPERM;
-	}
-
-	return 0;
-}
-
-static int sde_kms_atomic_check(struct msm_kms *kms,
-		struct drm_atomic_state *state)
-{
-	struct sde_kms *sde_kms;
-	struct drm_device *dev;
-	int ret;
-
-	if (!kms || !state)
-		return -EINVAL;
-
-	sde_kms = to_sde_kms(kms);
-	dev = sde_kms->dev;
-
-	SDE_ATRACE_BEGIN("atomic_check");
-	if (sde_kms_is_suspend_blocked(dev)) {
-		SDE_DEBUG("suspended, skip atomic_check\n");
-		ret = -EBUSY;
-		goto end;
-	}
-
-	ret = drm_atomic_helper_check(dev, state);
-	if (ret)
-		goto end;
-	/*
-	 * Check if any secure transition(moving CRTC between secure and
-	 * non-secure state and vice-versa) is allowed or not. when moving
-	 * to secure state, planes with fb_mode set to dir_translated only can
-	 * be staged on the CRTC, and only one CRTC can be active during
-	 * Secure state
-	 */
-	ret = sde_kms_check_secure_transition(kms, state);
-end:
-	SDE_ATRACE_END("atomic_check");
-	return ret;
-}
-
-static struct msm_gem_address_space*
-_sde_kms_get_address_space(struct msm_kms *kms,
-		unsigned int domain)
-{
-	struct sde_kms *sde_kms;
-
-	if (!kms) {
-		SDE_ERROR("invalid kms\n");
-		return  NULL;
-	}
-
-	sde_kms = to_sde_kms(kms);
-	if (!sde_kms) {
-		SDE_ERROR("invalid sde_kms\n");
-		return NULL;
-	}
-
-	if (domain >= MSM_SMMU_DOMAIN_MAX)
-		return NULL;
-
-	return (sde_kms->aspace[domain] &&
-			sde_kms->aspace[domain]->domain_attached) ?
-		sde_kms->aspace[domain] : NULL;
-}
-
-static struct device *_sde_kms_get_address_space_device(struct msm_kms *kms,
-		unsigned int domain)
-{
-	struct msm_gem_address_space *aspace =
-		_sde_kms_get_address_space(kms, domain);
-
-	return (aspace && aspace->domain_attached) ?
-			msm_gem_get_aspace_device(aspace) : NULL;
-}
-
-static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
-{
-	struct drm_device *dev = NULL;
-	struct sde_kms *sde_kms = NULL;
-	struct drm_connector *connector = NULL;
-	struct drm_connector_list_iter conn_iter;
-	struct sde_connector *sde_conn = NULL;
-	int i;
-
-	if (!kms) {
-		SDE_ERROR("invalid kms\n");
-		return;
-	}
-
-	sde_kms = to_sde_kms(kms);
-	dev = sde_kms->dev;
-
-	if (!dev) {
-		SDE_ERROR("invalid device\n");
-		return;
-	}
-
-	if (!dev->mode_config.poll_enabled)
-		return;
-
-	/* init external dsi bridge here to make sure ext bridge is probed*/
-	for (i = 0; i < sde_kms->dsi_display_count; ++i) {
-		struct dsi_display *dsi_display;
-
-		dsi_display = sde_kms->dsi_displays[i];
-		if (dsi_display->bridge) {
-			dsi_display_drm_ext_bridge_init(dsi_display,
-				dsi_display->bridge->base.encoder,
-				dsi_display->drm_conn);
-		}
-	}
-
-	mutex_lock(&dev->mode_config.mutex);
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(connector, &conn_iter) {
-		/* Only handle HPD capable connectors. */
-		if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
-			continue;
-
-		sde_conn = to_sde_connector(connector);
-
-		if (sde_conn->ops.post_open)
-			sde_conn->ops.post_open(&sde_conn->base,
-					sde_conn->display);
-	}
-	drm_connector_list_iter_end(&conn_iter);
-	mutex_unlock(&dev->mode_config.mutex);
-
-}
-
-static int _sde_kms_update_planes_for_cont_splash(struct sde_kms *sde_kms,
-		struct sde_splash_display *splash_display,
-		struct drm_crtc *crtc)
-{
-	struct msm_drm_private *priv;
-	struct drm_plane *plane;
-	struct sde_splash_mem *splash;
-	enum sde_sspp plane_id;
-	bool is_virtual;
-	int i, j;
-
-	if (!sde_kms || !splash_display || !crtc) {
-		SDE_ERROR("invalid input args\n");
-		return -EINVAL;
-	}
-
-	priv = sde_kms->dev->dev_private;
-	for (i = 0; i < priv->num_planes; i++) {
-		plane = priv->planes[i];
-		plane_id = sde_plane_pipe(plane);
-		is_virtual = is_sde_plane_virtual(plane);
-		splash = splash_display->splash;
-
-		for (j = 0; j < splash_display->pipe_cnt; j++) {
-			if ((plane_id != splash_display->pipes[j].sspp) ||
-					(splash_display->pipes[j].is_virtual
-					 != is_virtual))
-				continue;
-
-			if (splash && sde_plane_validate_src_addr(plane,
-						splash->splash_buf_base,
-						splash->splash_buf_size)) {
-				SDE_ERROR("invalid adr on pipe:%d crtc:%d\n",
-						plane_id, crtc->base.id);
-			}
-
-			SDE_DEBUG("set crtc:%d for plane:%d rect:%d\n",
-					crtc->base.id, plane_id, is_virtual);
-		}
-	}
-
-	return 0;
-}
-
-static int sde_kms_cont_splash_config(struct msm_kms *kms)
-{
-	void *display;
-	struct dsi_display *dsi_display;
-	struct msm_display_info info;
-	struct drm_encoder *encoder = NULL;
-	struct drm_crtc *crtc = NULL;
-	int i, rc = 0;
-	struct drm_display_mode *drm_mode = NULL;
-	struct drm_device *dev;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	struct drm_connector_list_iter conn_iter;
-	struct drm_connector *connector = NULL;
-	struct sde_connector *sde_conn = NULL;
-	struct sde_splash_display *splash_display;
-
-	if (!kms) {
-		SDE_ERROR("invalid kms\n");
-		return -EINVAL;
-	}
-
-	sde_kms = to_sde_kms(kms);
-	dev = sde_kms->dev;
-	if (!dev) {
-		SDE_ERROR("invalid device\n");
-		return -EINVAL;
-	}
-
-	if (!sde_kms->splash_data.num_splash_regions ||
-			!sde_kms->splash_data.num_splash_displays) {
-		DRM_INFO("cont_splash feature not enabled\n");
-		return rc;
-	}
-
-	if (sde_kms->dsi_display_count !=
-			sde_kms->splash_data.num_splash_displays) {
-		SDE_ERROR("mismatch - displays:%d vs splash-displays:%d\n",
-				sde_kms->dsi_display_count,
-				sde_kms->splash_data.num_splash_displays);
-		return rc;
-	}
-
-	/* dsi */
-	for (i = 0; i < sde_kms->dsi_display_count; ++i) {
-		display = sde_kms->dsi_displays[i];
-		dsi_display = (struct dsi_display *)display;
-		splash_display = &sde_kms->splash_data.splash_display[i];
-
-		if (!splash_display->cont_splash_enabled) {
-			SDE_DEBUG("display->name = %s splash not enabled\n",
-					dsi_display->name);
-			continue;
-		}
-
-		SDE_DEBUG("display->name = %s\n", dsi_display->name);
-
-		if (dsi_display->bridge->base.encoder) {
-			encoder = dsi_display->bridge->base.encoder;
-			SDE_DEBUG("encoder name = %s\n", encoder->name);
-		}
-		memset(&info, 0x0, sizeof(info));
-		rc = dsi_display_get_info(NULL, &info, display);
-		if (rc) {
-			SDE_ERROR("dsi get_info %d failed\n", i);
-			encoder = NULL;
-			continue;
-		}
-		SDE_DEBUG("info.is_connected = %s, info.is_primary = %s\n",
-			((info.is_connected) ? "true" : "false"),
-			((info.is_primary) ? "true" : "false"));
-
-		if (!encoder) {
-			SDE_ERROR("encoder not initialized\n");
-			return -EINVAL;
-		}
-
-		priv = sde_kms->dev->dev_private;
-		encoder->crtc = priv->crtcs[i];
-		crtc = encoder->crtc;
-		splash_display->encoder =  encoder;
-
-		SDE_DEBUG("for dsi-display:%d crtc id = %d enc id =%d\n",
-				i, crtc->base.id, encoder->base.id);
-
-		mutex_lock(&dev->mode_config.mutex);
-		drm_connector_list_iter_begin(dev, &conn_iter);
-		drm_for_each_connector_iter(connector, &conn_iter) {
-			/**
-			 * SDE_KMS doesn't attach more than one encoder to
-			 * a DSI connector. So it is safe to check only with
-			 * the first encoder entry. Revisit this logic if we
-			 * ever have to support continuous splash for
-			 * external displays in MST configuration.
-			 */
-			if (connector->encoder_ids[0] == encoder->base.id)
-				break;
-		}
-		drm_connector_list_iter_end(&conn_iter);
-
-		if (!connector) {
-			SDE_ERROR("connector not initialized\n");
-			mutex_unlock(&dev->mode_config.mutex);
-			return -EINVAL;
-		}
-
-		if (connector->funcs->fill_modes) {
-			connector->funcs->fill_modes(connector,
-					dev->mode_config.max_width,
-					dev->mode_config.max_height);
-		} else {
-			SDE_ERROR("fill_modes api not defined\n");
-			mutex_unlock(&dev->mode_config.mutex);
-			return -EINVAL;
-		}
-		mutex_unlock(&dev->mode_config.mutex);
-
-		crtc->state->encoder_mask = (1 << drm_encoder_index(encoder));
-
-		/* currently consider modes[0] as the preferred mode */
-		drm_mode = list_first_entry(&connector->modes,
-				struct drm_display_mode, head);
-		SDE_DEBUG("drm_mode->name = %s, id=%d, type=0x%x, flags=0x%x\n",
-				drm_mode->name, drm_mode->base.id,
-				drm_mode->type, drm_mode->flags);
-
-		/* Update CRTC drm structure */
-		crtc->state->active = true;
-		rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode);
-		if (rc) {
-			SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc);
-			return rc;
-		}
-		drm_mode_copy(&crtc->state->adjusted_mode, drm_mode);
-		drm_mode_copy(&crtc->mode, drm_mode);
-
-		/* Update encoder structure */
-		sde_encoder_update_caps_for_cont_splash(encoder,
-				splash_display, true);
-
-		sde_crtc_update_cont_splash_settings(crtc);
-
-		sde_conn = to_sde_connector(connector);
-		if (sde_conn && sde_conn->ops.cont_splash_config)
-			sde_conn->ops.cont_splash_config(sde_conn->display);
-
-		rc = _sde_kms_update_planes_for_cont_splash(sde_kms,
-				splash_display, crtc);
-		if (rc) {
-			SDE_ERROR("Failed: updating plane status rc=%d\n", rc);
-			return rc;
-		}
-	}
-
-	return rc;
-}
-
-static bool sde_kms_check_for_splash(struct msm_kms *kms)
-{
-	struct sde_kms *sde_kms;
-
-	if (!kms) {
-		SDE_ERROR("invalid kms\n");
-		return false;
-	}
-
-	sde_kms = to_sde_kms(kms);
-	return sde_kms->splash_data.num_splash_displays;
-}
-
-static void _sde_kms_null_commit(struct drm_device *dev,
-		struct drm_encoder *enc)
-{
-	struct drm_modeset_acquire_ctx ctx;
-	struct drm_connector *conn = NULL;
-	struct drm_connector *tmp_conn = NULL;
-	struct drm_connector_list_iter conn_iter;
-	struct drm_atomic_state *state = NULL;
-	struct drm_crtc_state *crtc_state = NULL;
-	struct drm_connector_state *conn_state = NULL;
-	int retry_cnt = 0;
-	int ret = 0;
-
-	drm_modeset_acquire_init(&ctx, 0);
-
-retry:
-	ret = drm_modeset_lock_all_ctx(dev, &ctx);
-	if (ret == -EDEADLK && retry_cnt < SDE_KMS_MODESET_LOCK_MAX_TRIALS) {
-		drm_modeset_backoff(&ctx);
-		retry_cnt++;
-		udelay(SDE_KMS_MODESET_LOCK_TIMEOUT_US);
-		goto retry;
-	} else if (WARN_ON(ret)) {
-		goto end;
-	}
-
-	state = drm_atomic_state_alloc(dev);
-	if (!state) {
-		DRM_ERROR("failed to allocate atomic state, %d\n", ret);
-		goto end;
-	}
-
-	state->acquire_ctx = &ctx;
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(tmp_conn, &conn_iter) {
-		if (enc == tmp_conn->state->best_encoder) {
-			conn = tmp_conn;
-			break;
-		}
-	}
-	drm_connector_list_iter_end(&conn_iter);
-
-	if (!conn) {
-		SDE_ERROR("error in finding conn for enc:%d\n", DRMID(enc));
-		goto end;
-	}
-
-	crtc_state = drm_atomic_get_crtc_state(state, enc->crtc);
-	conn_state = drm_atomic_get_connector_state(state, conn);
-	if (IS_ERR(conn_state)) {
-		SDE_ERROR("error %d getting connector %d state\n",
-				ret, DRMID(conn));
-		goto end;
-	}
-
-	crtc_state->active = true;
-	ret = drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
-	if (ret)
-		SDE_ERROR("error %d setting the crtc\n", ret);
-
-	ret = drm_atomic_commit(state);
-	if (ret)
-		SDE_ERROR("Error %d doing the atomic commit\n", ret);
-
-end:
-	if (state)
-		drm_atomic_state_put(state);
-
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-}
-
-static int sde_kms_pm_suspend(struct device *dev)
-{
-	struct drm_device *ddev;
-	struct drm_modeset_acquire_ctx ctx;
-	struct drm_connector *conn;
-	struct drm_encoder *enc;
-	struct drm_connector_list_iter conn_iter;
-	struct drm_atomic_state *state = NULL;
-	struct sde_kms *sde_kms;
-	int ret = 0, num_crtcs = 0;
-
-	if (!dev)
-		return -EINVAL;
-
-	ddev = dev_get_drvdata(dev);
-	if (!ddev || !ddev_to_msm_kms(ddev))
-		return -EINVAL;
-
-	sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
-	SDE_EVT32(0);
-
-	/* disable hot-plug polling */
-	drm_kms_helper_poll_disable(ddev);
-
-	/* if a display stuck in CS trigger a null commit to complete handoff */
-	drm_for_each_encoder(enc, ddev) {
-		if (sde_encoder_in_cont_splash(enc) && enc->crtc)
-			_sde_kms_null_commit(ddev, enc);
-	}
-
-	/* acquire modeset lock(s) */
-	drm_modeset_acquire_init(&ctx, 0);
-
-retry:
-	ret = drm_modeset_lock_all_ctx(ddev, &ctx);
-	if (ret)
-		goto unlock;
-
-	/* save current state for resume */
-	if (sde_kms->suspend_state)
-		drm_atomic_state_put(sde_kms->suspend_state);
-	sde_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
-	if (IS_ERR_OR_NULL(sde_kms->suspend_state)) {
-		ret = PTR_ERR(sde_kms->suspend_state);
-		DRM_ERROR("failed to back up suspend state, %d\n", ret);
-		sde_kms->suspend_state = NULL;
-		goto unlock;
-	}
-
-	/* create atomic state to disable all CRTCs */
-	state = drm_atomic_state_alloc(ddev);
-	if (!state) {
-		ret = -ENOMEM;
-		DRM_ERROR("failed to allocate crtc disable state, %d\n", ret);
-		goto unlock;
-	}
-
-	state->acquire_ctx = &ctx;
-	drm_connector_list_iter_begin(ddev, &conn_iter);
-	drm_for_each_connector_iter(conn, &conn_iter) {
-		struct drm_crtc_state *crtc_state;
-		uint64_t lp;
-
-		if (!conn->state || !conn->state->crtc ||
-				conn->dpms != DRM_MODE_DPMS_ON)
-			continue;
-
-		lp = sde_connector_get_lp(conn);
-		if (lp == SDE_MODE_DPMS_LP1) {
-			/* transition LP1->LP2 on pm suspend */
-			ret = sde_connector_set_property_for_commit(conn, state,
-					CONNECTOR_PROP_LP, SDE_MODE_DPMS_LP2);
-			if (ret) {
-				DRM_ERROR("failed to set lp2 for conn %d\n",
-						conn->base.id);
-				drm_connector_list_iter_end(&conn_iter);
-				goto unlock;
-			}
-		}
-
-		if (lp != SDE_MODE_DPMS_LP2) {
-			/* force CRTC to be inactive */
-			crtc_state = drm_atomic_get_crtc_state(state,
-					conn->state->crtc);
-			if (IS_ERR_OR_NULL(crtc_state)) {
-				DRM_ERROR("failed to get crtc %d state\n",
-						conn->state->crtc->base.id);
-				drm_connector_list_iter_end(&conn_iter);
-				goto unlock;
-			}
-
-			if (lp != SDE_MODE_DPMS_LP1)
-				crtc_state->active = false;
-			++num_crtcs;
-		}
-	}
-	drm_connector_list_iter_end(&conn_iter);
-
-	/* check for nothing to do */
-	if (num_crtcs == 0) {
-		DRM_DEBUG("all crtcs are already in the off state\n");
-		sde_kms->suspend_block = true;
-		goto unlock;
-	}
-
-	/* commit the "disable all" state */
-	ret = drm_atomic_commit(state);
-	if (ret < 0) {
-		DRM_ERROR("failed to disable crtcs, %d\n", ret);
-		goto unlock;
-	}
-
-	sde_kms->suspend_block = true;
-
-	drm_connector_list_iter_begin(ddev, &conn_iter);
-	drm_for_each_connector_iter(conn, &conn_iter) {
-		uint64_t lp;
-
-		lp = sde_connector_get_lp(conn);
-		if (lp != SDE_MODE_DPMS_LP2)
-			continue;
-
-		ret = sde_encoder_wait_for_event(conn->encoder,
-						MSM_ENC_TX_COMPLETE);
-		if (ret && ret != -EWOULDBLOCK)
-			SDE_ERROR(
-				"[enc: %d] wait for commit done returned %d\n",
-				conn->encoder->base.id, ret);
-		else if (!ret)
-			sde_encoder_idle_request(conn->encoder);
-	}
-	drm_connector_list_iter_end(&conn_iter);
-unlock:
-	if (state) {
-		drm_atomic_state_put(state);
-		state = NULL;
-	}
-
-	if (ret == -EDEADLK) {
-		drm_modeset_backoff(&ctx);
-		goto retry;
-	}
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-
-	return ret;
-}
-
-static int sde_kms_pm_resume(struct device *dev)
-{
-	struct drm_device *ddev;
-	struct sde_kms *sde_kms;
-	struct drm_modeset_acquire_ctx ctx;
-	int ret, i;
-
-	if (!dev)
-		return -EINVAL;
-
-	ddev = dev_get_drvdata(dev);
-	if (!ddev || !ddev_to_msm_kms(ddev))
-		return -EINVAL;
-
-	sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
-
-	SDE_EVT32(sde_kms->suspend_state != NULL);
-
-	drm_mode_config_reset(ddev);
-
-	drm_modeset_acquire_init(&ctx, 0);
-retry:
-	ret = drm_modeset_lock_all_ctx(ddev, &ctx);
-	if (ret == -EDEADLK) {
-		drm_modeset_backoff(&ctx);
-		goto retry;
-	} else if (WARN_ON(ret)) {
-		goto end;
-	}
-
-	sde_kms->suspend_block = false;
-
-	if (sde_kms->suspend_state) {
-		sde_kms->suspend_state->acquire_ctx = &ctx;
-		for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
-			ret = drm_atomic_helper_commit_duplicated_state(
-					sde_kms->suspend_state, &ctx);
-			if (ret != -EDEADLK)
-				break;
-
-			drm_modeset_backoff(&ctx);
-		}
-
-		if (ret < 0)
-			DRM_ERROR("failed to restore state, %d\n", ret);
-
-		drm_atomic_state_put(sde_kms->suspend_state);
-		sde_kms->suspend_state = NULL;
-	}
-
-end:
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-
-	/* enable hot-plug polling */
-	drm_kms_helper_poll_enable(ddev);
-
-	return 0;
-}
-
-static const struct msm_kms_funcs kms_funcs = {
-	.hw_init         = sde_kms_hw_init,
-	.postinit        = sde_kms_postinit,
-	.irq_preinstall  = sde_irq_preinstall,
-	.irq_postinstall = sde_irq_postinstall,
-	.irq_uninstall   = sde_irq_uninstall,
-	.irq             = sde_irq,
-	.preclose        = sde_kms_preclose,
-	.lastclose       = sde_kms_lastclose,
-	.prepare_fence   = sde_kms_prepare_fence,
-	.prepare_commit  = sde_kms_prepare_commit,
-	.commit          = sde_kms_commit,
-	.complete_commit = sde_kms_complete_commit,
-	.wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
-	.wait_for_tx_complete = sde_kms_wait_for_frame_transfer_complete,
-	.enable_vblank   = sde_kms_enable_vblank,
-	.disable_vblank  = sde_kms_disable_vblank,
-	.check_modified_format = sde_format_check_modified_format,
-	.atomic_check = sde_kms_atomic_check,
-	.get_format      = sde_get_msm_format,
-	.round_pixclk    = sde_kms_round_pixclk,
-	.pm_suspend      = sde_kms_pm_suspend,
-	.pm_resume       = sde_kms_pm_resume,
-	.destroy         = sde_kms_destroy,
-	.cont_splash_config = sde_kms_cont_splash_config,
-	.register_events = _sde_kms_register_events,
-	.get_address_space = _sde_kms_get_address_space,
-	.get_address_space_device = _sde_kms_get_address_space_device,
-	.postopen = _sde_kms_post_open,
-	.check_for_splash = sde_kms_check_for_splash,
-};
-
-/* the caller api needs to turn on clock before calling it */
-static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
-{
-	sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
-}
-
-static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
-{
-	int i;
-
-	for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
-		if (!sde_kms->aspace[i])
-			continue;
-
-		msm_gem_address_space_put(sde_kms->aspace[i]);
-		sde_kms->aspace[i] = NULL;
-	}
-
-	return 0;
-}
-
-static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
-{
-	struct msm_mmu *mmu;
-	int i, ret;
-	int early_map = 0;
-
-	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
-		struct msm_gem_address_space *aspace;
-
-		mmu = msm_smmu_new(sde_kms->dev->dev, i);
-		if (IS_ERR(mmu)) {
-			ret = PTR_ERR(mmu);
-			SDE_DEBUG("failed to init iommu id %d: rc:%d\n",
-								i, ret);
-			continue;
-		}
-
-		aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
-			mmu, "sde");
-		if (IS_ERR(aspace)) {
-			ret = PTR_ERR(aspace);
-			goto fail;
-		}
-
-		sde_kms->aspace[i] = aspace;
-		aspace->domain_attached = true;
-
-		/* Mapping splash memory block */
-		if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
-				sde_kms->splash_data.num_splash_regions) {
-			ret = _sde_kms_map_all_splash_regions(sde_kms);
-			if (ret) {
-				SDE_ERROR("failed to map ret:%d\n", ret);
-				goto fail;
-			}
-		}
-
-		/*
-		 * disable early-map which would have been enabled during
-		 * bootup by smmu through the device-tree hint for cont-spash
-		 */
-		ret = mmu->funcs->set_attribute(mmu, DOMAIN_ATTR_EARLY_MAP,
-				 &early_map);
-		if (ret) {
-			SDE_ERROR("failed to set_att ret:%d, early_map:%d\n",
-					ret, early_map);
-			goto early_map_fail;
-		}
-	}
-
-	return 0;
-
-early_map_fail:
-	_sde_kms_unmap_all_splash_regions(sde_kms);
-fail:
-	mmu->funcs->destroy(mmu);
-	_sde_kms_mmu_destroy(sde_kms);
-
-	return ret;
-}
-
-static void sde_kms_init_shared_hw(struct sde_kms *sde_kms)
-{
-	if (!sde_kms || !sde_kms->hw_mdp || !sde_kms->catalog)
-		return;
-
-	if (sde_kms->hw_mdp->ops.reset_ubwc)
-		sde_kms->hw_mdp->ops.reset_ubwc(sde_kms->hw_mdp,
-						sde_kms->catalog);
-
-	sde_hw_sid_rotator_set(sde_kms->hw_sid);
-}
-
-static void _sde_kms_set_lutdma_vbif_remap(struct sde_kms *sde_kms)
-{
-	struct sde_vbif_set_qos_params qos_params;
-	struct sde_mdss_cfg *catalog;
-
-	if (!sde_kms->catalog)
-		return;
-
-	catalog = sde_kms->catalog;
-
-	memset(&qos_params, 0, sizeof(qos_params));
-	qos_params.vbif_idx = catalog->dma_cfg.vbif_idx;
-	qos_params.xin_id = catalog->dma_cfg.xin_id;
-	qos_params.clk_ctrl = catalog->dma_cfg.clk_ctrl;
-	qos_params.client_type = VBIF_LUTDMA_CLIENT;
-
-	sde_vbif_set_qos_remap(sde_kms, &qos_params);
-}
-
-static void sde_kms_handle_power_event(u32 event_type, void *usr)
-{
-	struct sde_kms *sde_kms = usr;
-	struct msm_kms *msm_kms;
-
-	msm_kms = &sde_kms->base;
-	if (!sde_kms)
-		return;
-
-	SDE_DEBUG("event_type:%d\n", event_type);
-	SDE_EVT32_VERBOSE(event_type);
-
-	if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
-		sde_irq_update(msm_kms, true);
-		sde_vbif_init_memtypes(sde_kms);
-		sde_kms_init_shared_hw(sde_kms);
-		_sde_kms_set_lutdma_vbif_remap(sde_kms);
-		sde_kms->first_kickoff = true;
-	} else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
-		sde_irq_update(msm_kms, false);
-		sde_kms->first_kickoff = false;
-	}
-}
-
-#define genpd_to_sde_kms(domain) container_of(domain, struct sde_kms, genpd)
-
-static int sde_kms_pd_enable(struct generic_pm_domain *genpd)
-{
-	struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
-	struct drm_device *dev;
-	struct msm_drm_private *priv;
-	int rc;
-
-	SDE_DEBUG("\n");
-
-	dev = sde_kms->dev;
-	if (!dev)
-		return -EINVAL;
-
-	priv = dev->dev_private;
-	if (!priv)
-		return -EINVAL;
-
-	SDE_EVT32(genpd->device_count);
-
-	rc = sde_power_resource_enable(&priv->phandle, priv->pclient, true);
-
-	return rc;
-}
-
-static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
-{
-	struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
-	struct drm_device *dev;
-	struct msm_drm_private *priv;
-	int rc;
-
-	SDE_DEBUG("\n");
-
-	dev = sde_kms->dev;
-	if (!dev)
-		return -EINVAL;
-
-	priv = dev->dev_private;
-	if (!priv)
-		return -EINVAL;
-
-	SDE_EVT32(genpd->device_count);
-
-	rc = sde_power_resource_enable(&priv->phandle, priv->pclient, false);
-
-	return rc;
-}
-
-static int _sde_kms_get_splash_data(struct sde_splash_data *data)
-{
-	int i = 0;
-	int ret = 0;
-	struct device_node *parent, *node, *node1;
-	struct resource r, r1;
-	const char *node_name = "cont_splash_region";
-	struct sde_splash_mem *mem;
-	bool share_splash_mem = false;
-	int num_displays, num_regions;
-	struct sde_splash_display *splash_display;
-
-	if (!data)
-		return -EINVAL;
-
-	memset(data, 0, sizeof(*data));
-
-	parent = of_find_node_by_path("/reserved-memory");
-	if (!parent) {
-		SDE_ERROR("failed to find reserved-memory node\n");
-		return -EINVAL;
-	}
-
-	node = of_find_node_by_name(parent, node_name);
-	if (!node) {
-		SDE_DEBUG("failed to find node %s\n", node_name);
-		return -EINVAL;
-	}
-
-	node1 = of_find_node_by_name(parent, "disp_rdump_region");
-	if (!node1)
-		SDE_DEBUG("failed to find disp ramdump memory reservation\n");
-
-	/**
-	 * Support sharing a single splash memory for all the built in displays
-	 * and also independent splash region per displays. Incase of
-	 * independent splash region for each connected display, dtsi node of
-	 * cont_splash_region  should be collection of all memory regions
-	 * Ex: <r1.start r1.end r2.start r2.end  ... rn.start, rn.end>
-	 */
-	num_displays = dsi_display_get_num_of_displays();
-	num_regions = of_property_count_u64_elems(node, "reg") / 2;
-
-	data->num_splash_displays = num_displays;
-
-	SDE_DEBUG("splash mem num_regions:%d\n", num_regions);
-	if (num_displays > num_regions) {
-		share_splash_mem = true;
-		pr_info(":%d displays share same splash buf\n", num_displays);
-	}
-
-	for (i = 0; i < num_displays; i++) {
-		splash_display = &data->splash_display[i];
-		if (!i || !share_splash_mem) {
-			if (of_address_to_resource(node, i, &r)) {
-				SDE_ERROR("invalid data for:%s\n", node_name);
-				return -EINVAL;
-			}
-
-			mem =  &data->splash_mem[i];
-			if (!node1 || of_address_to_resource(node1, i, &r1)) {
-				SDE_DEBUG("failed to find ramdump memory\n");
-				mem->ramdump_base = 0;
-				mem->ramdump_size = 0;
-			} else {
-				mem->ramdump_base = (unsigned long)r1.start;
-				mem->ramdump_size = (r1.end - r1.start) + 1;
-			}
-
-			mem->splash_buf_base = (unsigned long)r.start;
-			mem->splash_buf_size = (r.end - r.start) + 1;
-			mem->ref_cnt = 0;
-			splash_display->splash = mem;
-			data->num_splash_regions++;
-		} else {
-			data->splash_display[i].splash = &data->splash_mem[0];
-		}
-
-		SDE_DEBUG("splash mem for disp:%d add:%lx size:%x\n", (i + 1),
-				splash_display->splash->splash_buf_base,
-				splash_display->splash->splash_buf_size);
-	}
-
-	return ret;
-}
-
-static int _sde_kms_hw_init_ioremap(struct sde_kms *sde_kms,
-	struct platform_device *platformdev)
-{
-	int rc = -EINVAL;
-
-	sde_kms->mmio = msm_ioremap(platformdev, "mdp_phys", "mdp_phys");
-	if (IS_ERR(sde_kms->mmio)) {
-		rc = PTR_ERR(sde_kms->mmio);
-		SDE_ERROR("mdp register memory map failed: %d\n", rc);
-		sde_kms->mmio = NULL;
-		goto error;
-	}
-	DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio);
-	sde_kms->mmio_len = msm_iomap_size(platformdev, "mdp_phys");
-
-	rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
-			sde_kms->mmio_len);
-	if (rc)
-		SDE_ERROR("dbg base register kms failed: %d\n", rc);
-
-	sde_kms->vbif[VBIF_RT] = msm_ioremap(platformdev, "vbif_phys",
-								"vbif_phys");
-	if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
-		rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
-		SDE_ERROR("vbif register memory map failed: %d\n", rc);
-		sde_kms->vbif[VBIF_RT] = NULL;
-		goto error;
-	}
-	sde_kms->vbif_len[VBIF_RT] = msm_iomap_size(platformdev,
-								"vbif_phys");
-	rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
-				sde_kms->vbif_len[VBIF_RT]);
-	if (rc)
-		SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
-
-	sde_kms->vbif[VBIF_NRT] = msm_ioremap(platformdev, "vbif_nrt_phys",
-								"vbif_nrt_phys");
-	if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
-		sde_kms->vbif[VBIF_NRT] = NULL;
-		SDE_DEBUG("VBIF NRT is not defined");
-	} else {
-		sde_kms->vbif_len[VBIF_NRT] = msm_iomap_size(platformdev,
-							"vbif_nrt_phys");
-		rc = sde_dbg_reg_register_base("vbif_nrt",
-				sde_kms->vbif[VBIF_NRT],
-				sde_kms->vbif_len[VBIF_NRT]);
-		if (rc)
-			SDE_ERROR("dbg base register vbif_nrt failed: %d\n",
-					rc);
-	}
-
-	sde_kms->reg_dma = msm_ioremap(platformdev, "regdma_phys",
-								"regdma_phys");
-	if (IS_ERR(sde_kms->reg_dma)) {
-		sde_kms->reg_dma = NULL;
-		SDE_DEBUG("REG_DMA is not defined");
-	} else {
-		sde_kms->reg_dma_len = msm_iomap_size(platformdev,
-								"regdma_phys");
-		rc =  sde_dbg_reg_register_base("reg_dma",
-				sde_kms->reg_dma,
-				sde_kms->reg_dma_len);
-		if (rc)
-			SDE_ERROR("dbg base register reg_dma failed: %d\n",
-					rc);
-	}
-
-	sde_kms->sid = msm_ioremap(platformdev, "sid_phys",
-							"sid_phys");
-	if (IS_ERR(sde_kms->sid)) {
-		rc = PTR_ERR(sde_kms->sid);
-		SDE_ERROR("sid register memory map failed: %d\n", rc);
-		sde_kms->sid = NULL;
-		goto error;
-	}
-
-	sde_kms->sid_len = msm_iomap_size(platformdev, "sid_phys");
-	rc =  sde_dbg_reg_register_base("sid", sde_kms->sid, sde_kms->sid_len);
-	if (rc)
-		SDE_ERROR("dbg base register sid failed: %d\n", rc);
-
-error:
-	return rc;
-}
-
-static int _sde_kms_hw_init_power_helper(struct drm_device *dev,
-			struct sde_kms *sde_kms)
-{
-	int rc = 0;
-
-	if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) {
-		sde_kms->genpd.name = dev->unique;
-		sde_kms->genpd.power_off = sde_kms_pd_disable;
-		sde_kms->genpd.power_on = sde_kms_pd_enable;
-
-		rc = pm_genpd_init(&sde_kms->genpd, NULL, true);
-		if (rc < 0) {
-			SDE_ERROR("failed to init genpd provider %s: %d\n",
-					sde_kms->genpd.name, rc);
-			return rc;
-		}
-
-		rc = of_genpd_add_provider_simple(dev->dev->of_node,
-				&sde_kms->genpd);
-		if (rc < 0) {
-			SDE_ERROR("failed to add genpd provider %s: %d\n",
-					sde_kms->genpd.name, rc);
-			pm_genpd_remove(&sde_kms->genpd);
-			return rc;
-		}
-
-		sde_kms->genpd_init = true;
-		SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
-	}
-
-	return rc;
-}
-
-static int _sde_kms_hw_init_blocks(struct sde_kms *sde_kms,
-	struct drm_device *dev,
-	struct msm_drm_private *priv)
-{
-	struct sde_rm *rm = NULL;
-	int i, rc = -EINVAL;
-
-	for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
-		sde_power_data_bus_set_quota(&priv->phandle,
-			sde_kms->core_client,
-			SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i,
-			SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA,
-			SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA);
-
-	_sde_kms_core_hw_rev_init(sde_kms);
-
-	pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
-
-	sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
-	if (IS_ERR_OR_NULL(sde_kms->catalog)) {
-		rc = PTR_ERR(sde_kms->catalog);
-		if (!sde_kms->catalog)
-			rc = -EINVAL;
-		SDE_ERROR("catalog init failed: %d\n", rc);
-		sde_kms->catalog = NULL;
-		goto power_error;
-	}
-
-	/* initialize power domain if defined */
-	rc = _sde_kms_hw_init_power_helper(dev, sde_kms);
-	if (rc) {
-		SDE_ERROR("_sde_kms_hw_init_power_helper failed: %d\n", rc);
-		goto genpd_err;
-	}
-
-	rc = _sde_kms_mmu_init(sde_kms);
-	if (rc) {
-		SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
-		goto power_error;
-	}
-
-	/* Initialize reg dma block which is a singleton */
-	rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
-			sde_kms->dev);
-	if (rc) {
-		SDE_ERROR("failed: reg dma init failed\n");
-		goto power_error;
-	}
-
-	sde_dbg_init_dbg_buses(sde_kms->core_rev);
-
-	rm = &sde_kms->rm;
-	rc = sde_rm_init(rm, sde_kms->catalog, sde_kms->mmio,
-			sde_kms->dev);
-	if (rc) {
-		SDE_ERROR("rm init failed: %d\n", rc);
-		goto power_error;
-	}
-
-	sde_kms->rm_init = true;
-
-	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
-	if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
-		rc = PTR_ERR(sde_kms->hw_intr);
-		SDE_ERROR("hw_intr init failed: %d\n", rc);
-		sde_kms->hw_intr = NULL;
-		goto hw_intr_init_err;
-	}
-
-	/*
-	 * Attempt continuous splash handoff only if reserved
-	 * splash memory is found & release resources on any error
-	 * in finding display hw config in splash
-	 */
-	if (sde_kms->splash_data.num_splash_regions &&
-			sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
-					&sde_kms->splash_data,
-					sde_kms->catalog)) {
-		SDE_DEBUG("freeing continuous splash resources\n");
-		_sde_kms_unmap_all_splash_regions(sde_kms);
-		memset(&sde_kms->splash_data, 0x0,
-				sizeof(struct sde_splash_data));
-	}
-
-	sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
-	if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
-		rc = PTR_ERR(sde_kms->hw_mdp);
-		if (!sde_kms->hw_mdp)
-			rc = -EINVAL;
-		SDE_ERROR("failed to get hw_mdp: %d\n", rc);
-		sde_kms->hw_mdp = NULL;
-		goto power_error;
-	}
-
-	for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
-		u32 vbif_idx = sde_kms->catalog->vbif[i].id;
-
-		sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
-				sde_kms->vbif[vbif_idx], sde_kms->catalog);
-		if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
-			rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
-			if (!sde_kms->hw_vbif[vbif_idx])
-				rc = -EINVAL;
-			SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
-			sde_kms->hw_vbif[vbif_idx] = NULL;
-			goto power_error;
-		}
-	}
-
-	if (sde_kms->catalog->uidle_cfg.uidle_rev) {
-		sde_kms->hw_uidle = sde_hw_uidle_init(UIDLE, sde_kms->mmio,
-			sde_kms->mmio_len, sde_kms->catalog);
-		if (IS_ERR_OR_NULL(sde_kms->hw_uidle)) {
-			rc = PTR_ERR(sde_kms->hw_uidle);
-			if (!sde_kms->hw_uidle)
-				rc = -EINVAL;
-			/* uidle is optional, so do not make it a fatal error */
-			SDE_ERROR("failed to init uidle rc:%d\n", rc);
-			sde_kms->hw_uidle = NULL;
-			rc = 0;
-		}
-	} else {
-		sde_kms->hw_uidle = NULL;
-	}
-
-	sde_kms->hw_sid = sde_hw_sid_init(sde_kms->sid,
-				sde_kms->sid_len, sde_kms->catalog);
-	if (IS_ERR(sde_kms->hw_sid)) {
-		SDE_ERROR("failed to init sid %ld\n", PTR_ERR(sde_kms->hw_sid));
-		sde_kms->hw_sid = NULL;
-		goto power_error;
-	}
-
-	rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
-			&priv->phandle, priv->pclient, "core_clk");
-	if (rc) {
-		SDE_ERROR("failed to init perf %d\n", rc);
-		goto perf_err;
-	}
-
-	/*
-	 * _sde_kms_drm_obj_init should create the DRM related objects
-	 * i.e. CRTCs, planes, encoders, connectors and so forth
-	 */
-	rc = _sde_kms_drm_obj_init(sde_kms);
-	if (rc) {
-		SDE_ERROR("modeset init failed: %d\n", rc);
-		goto drm_obj_init_err;
-	}
-
-	return 0;
-
-genpd_err:
-drm_obj_init_err:
-	sde_core_perf_destroy(&sde_kms->perf);
-hw_intr_init_err:
-perf_err:
-power_error:
-	return rc;
-}
-
-static int sde_kms_hw_init(struct msm_kms *kms)
-{
-	struct sde_kms *sde_kms;
-	struct drm_device *dev;
-	struct msm_drm_private *priv;
-	struct platform_device *platformdev;
-	int i, rc = -EINVAL;
-
-	if (!kms) {
-		SDE_ERROR("invalid kms\n");
-		goto end;
-	}
-
-	sde_kms = to_sde_kms(kms);
-	dev = sde_kms->dev;
-	if (!dev || !dev->dev) {
-		SDE_ERROR("invalid device\n");
-		goto end;
-	}
-
-	platformdev = to_platform_device(dev->dev);
-	priv = dev->dev_private;
-	if (!priv) {
-		SDE_ERROR("invalid private data\n");
-		goto end;
-	}
-
-	rc = _sde_kms_hw_init_ioremap(sde_kms, platformdev);
-	if (rc)
-		goto error;
-
-	sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
-	if (IS_ERR_OR_NULL(sde_kms->core_client)) {
-		rc = PTR_ERR(sde_kms->core_client);
-		if (!sde_kms->core_client)
-			rc = -EINVAL;
-		SDE_ERROR("sde power client create failed: %d\n", rc);
-		sde_kms->core_client = NULL;
-		goto error;
-	}
-
-	rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
-	if (rc)
-		SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
-
-	rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
-		true);
-	if (rc) {
-		SDE_ERROR("resource enable failed: %d\n", rc);
-		goto error;
-	}
-
-	rc = _sde_kms_hw_init_blocks(sde_kms, dev, priv);
-	if (rc)
-		goto hw_init_err;
-
-	dev->mode_config.min_width = sde_kms->catalog->min_display_width;
-	dev->mode_config.min_height = sde_kms->catalog->min_display_height;
-	dev->mode_config.max_width = sde_kms->catalog->max_display_width;
-	dev->mode_config.max_height = sde_kms->catalog->max_display_height;
-
-	mutex_init(&sde_kms->secure_transition_lock);
-	atomic_set(&sde_kms->detach_sec_cb, 0);
-	atomic_set(&sde_kms->detach_all_cb, 0);
-
-	/*
-	 * Support format modifiers for compression etc.
-	 */
-	dev->mode_config.allow_fb_modifiers = true;
-
-	/*
-	 * Handle (re)initializations during power enable
-	 */
-	sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
-	sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
-			SDE_POWER_EVENT_POST_ENABLE |
-			SDE_POWER_EVENT_PRE_DISABLE,
-			sde_kms_handle_power_event, sde_kms, "kms");
-
-	if (sde_kms->splash_data.num_splash_displays) {
-		SDE_DEBUG("Skipping MDP Resources disable\n");
-	} else {
-		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
-			sde_power_data_bus_set_quota(&priv->phandle,
-				sde_kms->core_client,
-				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i,
-				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
-				SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
-
-		sde_power_resource_enable(&priv->phandle,
-						sde_kms->core_client, false);
-	}
-	return 0;
-
-hw_init_err:
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
-error:
-	_sde_kms_hw_destroy(sde_kms, platformdev);
-end:
-	return rc;
-}
-
-struct msm_kms *sde_kms_init(struct drm_device *dev)
-{
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-
-	if (!dev || !dev->dev_private) {
-		SDE_ERROR("drm device node invalid\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	priv = dev->dev_private;
-
-	sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
-	if (!sde_kms) {
-		SDE_ERROR("failed to allocate sde kms\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	msm_kms_init(&sde_kms->base, &kms_funcs);
-	sde_kms->dev = dev;
-
-	return &sde_kms->base;
-}
-
-static int _sde_kms_register_events(struct msm_kms *kms,
-		struct drm_mode_object *obj, u32 event, bool en)
-{
-	int ret = 0;
-	struct drm_crtc *crtc = NULL;
-	struct drm_connector *conn = NULL;
-	struct sde_kms *sde_kms = NULL;
-
-	if (!kms || !obj) {
-		SDE_ERROR("invalid argument kms %pK obj %pK\n", kms, obj);
-		return -EINVAL;
-	}
-
-	sde_kms = to_sde_kms(kms);
-	switch (obj->type) {
-	case DRM_MODE_OBJECT_CRTC:
-		crtc = obj_to_crtc(obj);
-		ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
-		break;
-	case DRM_MODE_OBJECT_CONNECTOR:
-		conn = obj_to_connector(obj);
-		ret = sde_connector_register_custom_event(sde_kms, conn, event,
-				en);
-		break;
-	}
-
-	return ret;
-}
-
-int sde_kms_handle_recovery(struct drm_encoder *encoder)
-{
-	SDE_EVT32(DRMID(encoder), MSM_ENC_ACTIVE_REGION);
-	return sde_encoder_wait_for_event(encoder, MSM_ENC_ACTIVE_REGION);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
deleted file mode 100644
index 62da4de..0000000
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ /dev/null
@@ -1,639 +0,0 @@
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __SDE_KMS_H__
-#define __SDE_KMS_H__
-
-#include <linux/msm_ion.h>
-#include <linux/pm_domain.h>
-#include <linux/pm_qos.h>
-
-#include "msm_drv.h"
-#include "msm_kms.h"
-#include "msm_mmu.h"
-#include "msm_gem.h"
-#include "sde_dbg.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_ctl.h"
-#include "sde_hw_lm.h"
-#include "sde_hw_pingpong.h"
-#include "sde_hw_interrupts.h"
-#include "sde_hw_wb.h"
-#include "sde_hw_top.h"
-#include "sde_hw_uidle.h"
-#include "sde_rm.h"
-#include "sde_power_handle.h"
-#include "sde_irq.h"
-#include "sde_core_perf.h"
-
-#define DRMID(x) ((x) ? (x)->base.id : -1)
-
-/**
- * SDE_DEBUG - macro for kms/plane/crtc/encoder/connector logs
- * @fmt: Pointer to format string
- */
-#define SDE_DEBUG(fmt, ...)                                                \
-	do {                                                               \
-		if (unlikely(drm_debug & DRM_UT_KMS))                      \
-			DRM_DEBUG(fmt, ##__VA_ARGS__); \
-		else                                                       \
-			pr_debug(fmt, ##__VA_ARGS__);                      \
-	} while (0)
-
-/**
- * SDE_INFO - macro for kms/plane/crtc/encoder/connector logs
- * @fmt: Pointer to format string
- */
-#define SDE_INFO(fmt, ...)                                                \
-	do {                                                               \
-		if (unlikely(drm_debug & DRM_UT_KMS))                      \
-			DRM_INFO(fmt, ##__VA_ARGS__); \
-		else                                                       \
-			pr_info(fmt, ##__VA_ARGS__);                      \
-	} while (0)
-
-/**
- * SDE_DEBUG_DRIVER - macro for hardware driver logging
- * @fmt: Pointer to format string
- */
-#define SDE_DEBUG_DRIVER(fmt, ...)                                         \
-	do {                                                               \
-		if (unlikely(drm_debug & DRM_UT_DRIVER))                   \
-			DRM_ERROR(fmt, ##__VA_ARGS__); \
-		else                                                       \
-			pr_debug(fmt, ##__VA_ARGS__);                      \
-	} while (0)
-
-#define SDE_ERROR(fmt, ...) pr_err("[sde error]" fmt, ##__VA_ARGS__)
-
-#define POPULATE_RECT(rect, a, b, c, d, Q16_flag) \
-	do {						\
-		(rect)->x = (Q16_flag) ? (a) >> 16 : (a);    \
-		(rect)->y = (Q16_flag) ? (b) >> 16 : (b);    \
-		(rect)->w = (Q16_flag) ? (c) >> 16 : (c);    \
-		(rect)->h = (Q16_flag) ? (d) >> 16 : (d);    \
-	} while (0)
-
-#define CHECK_LAYER_BOUNDS(offset, size, max_size) \
-	(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
-
-/**
- * ktime_compare_safe - compare two ktime structures
- *	This macro is similar to the standard ktime_compare() function, but
- *	attempts to also handle ktime overflows.
- * @A: First ktime value
- * @B: Second ktime value
- * Returns: -1 if A < B, 0 if A == B, 1 if A > B
- */
-#define ktime_compare_safe(A, B) \
-	ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
-
-#define SDE_NAME_SIZE  12
-
-/* timeout in frames waiting for frame done */
-#define SDE_FRAME_DONE_TIMEOUT	60
-
-/* max active secure client counts allowed */
-#define MAX_ALLOWED_SECURE_CLIENT_CNT	1
-
-/* max active crtc when secure client is active */
-#define MAX_ALLOWED_CRTC_CNT_DURING_SECURE	1
-
-/* max virtual encoders per secure crtc */
-#define MAX_ALLOWED_ENCODER_CNT_PER_SECURE_CRTC	1
-
-/* defines the operations required for secure state transition */
-#define SDE_KMS_OPS_SECURE_STATE_CHANGE		BIT(0)
-#define SDE_KMS_OPS_WAIT_FOR_TX_DONE		BIT(1)
-#define SDE_KMS_OPS_CLEANUP_PLANE_FB		BIT(2)
-#define SDE_KMS_OPS_PREPARE_PLANE_FB		BIT(3)
-
-/* ESD status check interval in miliseconds */
-#define STATUS_CHECK_INTERVAL_MS 5000
-
-/**
- * enum sde_kms_smmu_state:	smmu state
- * @ATTACHED:	 all the context banks are attached.
- * @DETACHED:	 all the context banks are detached.
- * @DETACHED_SEC:	 secure context bank is detached.
- * @ATTACH_ALL_REQ:	 transient state of attaching context banks.
- * @DETACH_ALL_REQ:	 transient state of detaching context banks.
- * @DETACH_SEC_REQ:	 tranisent state of secure context bank is detached
- * @ATTACH_SEC_REQ:	 transient state of attaching secure context bank.
- */
-enum sde_kms_smmu_state {
-	ATTACHED = 0,
-	DETACHED,
-	DETACHED_SEC,
-	ATTACH_ALL_REQ,
-	DETACH_ALL_REQ,
-	DETACH_SEC_REQ,
-	ATTACH_SEC_REQ,
-};
-
-/**
- * enum sde_kms_smmu_state_transition_type: state transition type
- * @NONE: no pending state transitions
- * @PRE_COMMIT: state transitions should be done before processing the commit
- * @POST_COMMIT: state transitions to be done after processing the commit.
- */
-enum sde_kms_smmu_state_transition_type {
-	NONE,
-	PRE_COMMIT,
-	POST_COMMIT
-};
-
-/**
- * enum sde_kms_sui_misr_state: state request for enabling/disabling MISR
- * @NONE: no request
- * @ENABLE_SUI_MISR_REQ: request to enable sui MISR
- * @DISABLE_SUI_MISR_REQ: request to disable sui MISR
- */
-enum sde_kms_sui_misr_state {
-	SUI_MISR_NONE,
-	SUI_MISR_ENABLE_REQ,
-	SUI_MISR_DISABLE_REQ
-};
-
-/*
- * @FRAME_DONE_WAIT_DEFAULT:	waits for frame N pp_done interrupt before
- *                              triggering frame N+1.
- * @FRAME_DONE_WAIT_SERIALIZE:	serialize pp_done and ctl_start irq for frame
- *                              N without next frame trigger wait.
- * @FRAME_DONE_WAIT_POSTED_START: Do not wait for pp_done interrupt for any
- *                              frame. Wait will trigger only for error case.
- */
-enum frame_trigger_mode_type {
-	FRAME_DONE_WAIT_DEFAULT,
-	FRAME_DONE_WAIT_SERIALIZE,
-	FRAME_DONE_WAIT_POSTED_START,
-};
-
-/**
- * struct sde_kms_smmu_state_data: stores the smmu state and transition type
- * @state: current state of smmu context banks
- * @secure_level: secure level cached from crtc
- * @transition_type: transition request type
- * @transition_error: whether there is error while transitioning the state
- */
-struct sde_kms_smmu_state_data {
-	uint32_t state;
-	uint32_t secure_level;
-	uint32_t transition_type;
-	uint32_t transition_error;
-	uint32_t sui_misr_state;
-};
-
-/*
- * struct sde_irq_callback - IRQ callback handlers
- * @list: list to callback
- * @func: intr handler
- * @arg: argument for the handler
- */
-struct sde_irq_callback {
-	struct list_head list;
-	void (*func)(void *arg, int irq_idx);
-	void *arg;
-};
-
-/**
- * struct sde_irq: IRQ structure contains callback registration info
- * @total_irq:    total number of irq_idx obtained from HW interrupts mapping
- * @irq_cb_tbl:   array of IRQ callbacks setting
- * @enable_counts array of IRQ enable counts
- * @cb_lock:      callback lock
- * @debugfs_file: debugfs file for irq statistics
- */
-struct sde_irq {
-	u32 total_irqs;
-	struct list_head *irq_cb_tbl;
-	atomic_t *enable_counts;
-	atomic_t *irq_counts;
-	spinlock_t cb_lock;
-	struct dentry *debugfs_file;
-};
-
-struct sde_kms {
-	struct msm_kms base;
-	struct drm_device *dev;
-	int core_rev;
-	struct sde_mdss_cfg *catalog;
-
-	struct generic_pm_domain genpd;
-	bool genpd_init;
-
-	struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
-	struct sde_power_client *core_client;
-
-	struct sde_power_event *power_event;
-
-	/* directory entry for debugfs */
-	struct dentry *debugfs_vbif;
-
-	/* io/register spaces: */
-	void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma, *sid;
-	unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len, sid_len;
-
-	struct regulator *vdd;
-	struct regulator *mmagic;
-	struct regulator *venus;
-
-	struct sde_irq_controller irq_controller;
-
-	struct sde_hw_intr *hw_intr;
-	struct sde_irq irq_obj;
-	int irq_num;	/* mdss irq number */
-	bool irq_enabled;
-
-	struct sde_core_perf perf;
-
-	/* saved atomic state during system suspend */
-	struct drm_atomic_state *suspend_state;
-	bool suspend_block;
-
-	struct sde_rm rm;
-	bool rm_init;
-	struct sde_splash_data splash_data;
-	struct sde_hw_vbif *hw_vbif[VBIF_MAX];
-	struct sde_hw_mdp *hw_mdp;
-	struct sde_hw_uidle *hw_uidle;
-	struct sde_hw_sid *hw_sid;
-	int dsi_display_count;
-	void **dsi_displays;
-	int wb_display_count;
-	void **wb_displays;
-	int dp_display_count;
-	void **dp_displays;
-	int dp_stream_count;
-
-	bool has_danger_ctrl;
-
-	struct sde_kms_smmu_state_data smmu_state;
-	atomic_t detach_sec_cb;
-	atomic_t detach_all_cb;
-	struct mutex secure_transition_lock;
-
-	bool first_kickoff;
-};
-
-struct vsync_info {
-	u32 frame_count;
-	u32 line_count;
-};
-
-#define to_sde_kms(x) container_of(x, struct sde_kms, base)
-
-/**
- * sde_is_custom_client - whether or not to enable non-standard customizations
- *
- * Return: Whether or not the 'sdeclient' module parameter was set on boot up
- */
-bool sde_is_custom_client(void);
-
-/**
- * sde_kms_power_resource_is_enabled - whether or not power resource is enabled
- * @dev: Pointer to drm device
- * Return: true if power resource is enabled; false otherwise
- */
-static inline bool sde_kms_power_resource_is_enabled(struct drm_device *dev)
-{
-	struct msm_drm_private *priv;
-
-	if (!dev || !dev->dev_private)
-		return false;
-
-	priv = dev->dev_private;
-
-	return sde_power_resource_is_enabled(&priv->phandle);
-}
-
-/**
- * sde_kms_is_suspend_state - whether or not the system is pm suspended
- * @dev: Pointer to drm device
- * Return: Suspend status
- */
-static inline bool sde_kms_is_suspend_state(struct drm_device *dev)
-{
-	if (!ddev_to_msm_kms(dev))
-		return false;
-
-	return to_sde_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
-}
-
-/**
- * sde_kms_is_suspend_blocked - whether or not commits are blocked due to pm
- *				suspend status
- * @dev: Pointer to drm device
- * Return: True if commits should be rejected due to pm suspend
- */
-static inline bool sde_kms_is_suspend_blocked(struct drm_device *dev)
-{
-	if (!sde_kms_is_suspend_state(dev))
-		return false;
-
-	return to_sde_kms(ddev_to_msm_kms(dev))->suspend_block;
-}
-
-/**
- * sde_kms_is_secure_session_inprogress - to indicate if secure-session is in
- * currently in-progress based on the current smmu_state
- *
- * @sde_kms: Pointer to sde_kms
- *
- * return: true if secure-session is in progress; false otherwise
- */
-static inline bool sde_kms_is_secure_session_inprogress(struct sde_kms *sde_kms)
-{
-	bool ret = false;
-
-	if (!sde_kms)
-		return false;
-
-	mutex_lock(&sde_kms->secure_transition_lock);
-	if (((sde_kms->catalog->sui_ns_allowed) &&
-		(sde_kms->smmu_state.secure_level == SDE_DRM_SEC_ONLY) &&
-			((sde_kms->smmu_state.state == DETACHED_SEC) ||
-				(sde_kms->smmu_state.state == DETACH_SEC_REQ) ||
-				(sde_kms->smmu_state.state == ATTACH_SEC_REQ)))
-		|| (((sde_kms->smmu_state.state == DETACHED) ||
-			(sde_kms->smmu_state.state == DETACH_ALL_REQ) ||
-			(sde_kms->smmu_state.state == ATTACH_ALL_REQ))))
-		ret = true;
-	mutex_unlock(&sde_kms->secure_transition_lock);
-
-	return ret;
-}
-
-/**
- * sde_kms_is_vbif_operation_allowed - resticts the VBIF programming
- * during secure-ui, if the sec_ui_misr feature is enabled
- *
- * @sde_kms: Pointer to sde_kms
- *
- * return: false if secure-session is in progress; true otherwise
- */
-static inline bool sde_kms_is_vbif_operation_allowed(struct sde_kms *sde_kms)
-{
-	if (!sde_kms)
-		return false;
-
-	if (!sde_kms->catalog->sui_misr_supported)
-		return true;
-
-	return !sde_kms_is_secure_session_inprogress(sde_kms);
-}
-
-/**
- * sde_kms_is_cp_operation_allowed - resticts the CP programming
- * during secure-ui, if the non-secure context banks are detached
- *
- * @sde_kms: Pointer to sde_kms
- */
-static inline bool sde_kms_is_cp_operation_allowed(struct sde_kms *sde_kms)
-{
-	if (!sde_kms || !sde_kms->catalog)
-		return false;
-
-	if (sde_kms->catalog->sui_ns_allowed)
-		return true;
-
-	return !sde_kms_is_secure_session_inprogress(sde_kms);
-}
-
-/**
- * Debugfs functions - extra helper functions for debugfs support
- *
- * Main debugfs documentation is located at,
- *
- * Documentation/filesystems/debugfs.txt
- *
- * @sde_debugfs_get_root: Get root dentry for SDE_KMS's debugfs node
- */
-
-/**
- * sde_debugfs_get_root - Return root directory entry for KMS's debugfs
- *
- * The return value should be passed as the 'parent' argument to subsequent
- * debugfs create calls.
- *
- * @sde_kms: Pointer to SDE's KMS structure
- *
- * Return: dentry pointer for SDE's debugfs location
- */
-void *sde_debugfs_get_root(struct sde_kms *sde_kms);
-
-/**
- * SDE info management functions
- * These functions/definitions allow for building up a 'sde_info' structure
- * containing one or more "key=value\n" entries.
- */
-#define SDE_KMS_INFO_MAX_SIZE	4096
-
-/**
- * struct sde_kms_info - connector information structure container
- * @data: Array of information character data
- * @len: Current length of information data
- * @staged_len: Temporary data buffer length, commit to
- *              len using sde_kms_info_stop
- * @start: Whether or not a partial data entry was just started
- */
-struct sde_kms_info {
-	char data[SDE_KMS_INFO_MAX_SIZE];
-	uint32_t len;
-	uint32_t staged_len;
-	bool start;
-};
-
-/**
- * SDE_KMS_INFO_DATA - Macro for accessing sde_kms_info data bytes
- * @S: Pointer to sde_kms_info structure
- * Returns: Pointer to byte data
- */
-#define SDE_KMS_INFO_DATA(S)    ((S) ? ((struct sde_kms_info *)(S))->data \
-							: NULL)
-
-/**
- * SDE_KMS_INFO_DATALEN - Macro for accessing sde_kms_info data length
- *			it adds an extra character length to count null.
- * @S: Pointer to sde_kms_info structure
- * Returns: Size of available byte data
- */
-#define SDE_KMS_INFO_DATALEN(S) ((S) ? ((struct sde_kms_info *)(S))->len + 1 \
-							: 0)
-
-/**
- * sde_kms_info_reset - reset sde_kms_info structure
- * @info: Pointer to sde_kms_info structure
- */
-void sde_kms_info_reset(struct sde_kms_info *info);
-
-/**
- * sde_kms_info_add_keyint - add integer value to 'sde_kms_info'
- * @info: Pointer to sde_kms_info structure
- * @key: Pointer to key string
- * @value: Signed 64-bit integer value
- */
-void sde_kms_info_add_keyint(struct sde_kms_info *info,
-		const char *key,
-		int64_t value);
-
-/**
- * sde_kms_info_add_keystr - add string value to 'sde_kms_info'
- * @info: Pointer to sde_kms_info structure
- * @key: Pointer to key string
- * @value: Pointer to string value
- */
-void sde_kms_info_add_keystr(struct sde_kms_info *info,
-		const char *key,
-		const char *value);
-
-/**
- * sde_kms_info_start - begin adding key to 'sde_kms_info'
- * Usage:
- *      sde_kms_info_start(key)
- *      sde_kms_info_append(val_1)
- *      ...
- *      sde_kms_info_append(val_n)
- *      sde_kms_info_stop
- * @info: Pointer to sde_kms_info structure
- * @key: Pointer to key string
- */
-void sde_kms_info_start(struct sde_kms_info *info,
-		const char *key);
-
-/**
- * sde_kms_info_append - append value string to 'sde_kms_info'
- * Usage:
- *      sde_kms_info_start(key)
- *      sde_kms_info_append(val_1)
- *      ...
- *      sde_kms_info_append(val_n)
- *      sde_kms_info_stop
- * @info: Pointer to sde_kms_info structure
- * @str: Pointer to partial value string
- */
-void sde_kms_info_append(struct sde_kms_info *info,
-		const char *str);
-
-/**
- * sde_kms_info_append_format - append format code string to 'sde_kms_info'
- * Usage:
- *      sde_kms_info_start(key)
- *      sde_kms_info_append_format(fourcc, modifier)
- *      ...
- *      sde_kms_info_stop
- * @info: Pointer to sde_kms_info structure
- * @pixel_format: FOURCC format code
- * @modifier: 64-bit drm format modifier
- */
-void sde_kms_info_append_format(struct sde_kms_info *info,
-		uint32_t pixel_format,
-		uint64_t modifier);
-
-/**
- * sde_kms_info_stop - finish adding key to 'sde_kms_info'
- * Usage:
- *      sde_kms_info_start(key)
- *      sde_kms_info_append(val_1)
- *      ...
- *      sde_kms_info_append(val_n)
- *      sde_kms_info_stop
- * @info: Pointer to sde_kms_info structure
- */
-void sde_kms_info_stop(struct sde_kms_info *info);
-
-/**
- * sde_kms_rect_intersect - intersect two rectangles
- * @r1: first rectangle
- * @r2: scissor rectangle
- * @result: result rectangle, all 0's on no intersection found
- */
-void sde_kms_rect_intersect(const struct sde_rect *r1,
-		const struct sde_rect *r2,
-		struct sde_rect *result);
-
-/**
- * sde_kms_rect_merge_rectangles - merge a rectangle list into one rect
- * @rois: pointer to the list of rois
- * @result: output rectangle, all 0 on error
- */
-void sde_kms_rect_merge_rectangles(const struct msm_roi_list *rois,
-		struct sde_rect *result);
-
-/**
- * sde_kms_rect_is_equal - compares two rects
- * @r1: rect value to compare
- * @r2: rect value to compare
- *
- * Returns 1 if the rects are same, 0 otherwise.
- */
-static inline bool sde_kms_rect_is_equal(struct sde_rect *r1,
-		struct sde_rect *r2)
-{
-	if ((!r1 && r2) || (r1 && !r2))
-		return false;
-
-	if (!r1 && !r2)
-		return true;
-
-	return r1->x == r2->x && r1->y == r2->y && r1->w == r2->w &&
-			r1->h == r2->h;
-}
-
-/**
- * sde_kms_rect_is_null - returns true if the width or height of a rect is 0
- * @rect: rectangle to check for zero size
- * @Return: True if width or height of rectangle is 0
- */
-static inline bool sde_kms_rect_is_null(const struct sde_rect *r)
-{
-	if (!r)
-		return true;
-
-	return (!r->w || !r->h);
-}
-
-/**
- * Vblank enable/disable functions
- */
-int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
-void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
-
-/**
- * smmu attach/detach functions
- * @sde_kms: poiner to sde_kms structure
- * @secure_only: if true only secure contexts are attached/detached, else
- * all contexts are attached/detached/
- */
-int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only);
-int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only);
-
-/**
- * sde_kms_timeline_status - provides current timeline status
- * @dev: Pointer to drm device
- */
-void sde_kms_timeline_status(struct drm_device *dev);
-
-/**
- * sde_kms_handle_recovery - handler function for FIFO overflow issue
- * @encoder: pointer to drm encoder structure
- * return: 0 on success; error code otherwise
- */
-int sde_kms_handle_recovery(struct drm_encoder *encoder);
-
-#endif /* __sde_kms_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_kms_utils.c b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
deleted file mode 100644
index ac1e142..0000000
--- a/drivers/gpu/drm/msm/sde/sde_kms_utils.c
+++ /dev/null
@@ -1,213 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"sde-kms_utils:[%s] " fmt, __func__
-
-#include "sde_kms.h"
-
-void sde_kms_info_reset(struct sde_kms_info *info)
-{
-	if (info) {
-		info->len = 0;
-		info->staged_len = 0;
-	}
-}
-
-void sde_kms_info_add_keyint(struct sde_kms_info *info,
-		const char *key,
-		int64_t value)
-{
-	uint32_t len;
-
-	if (info && key) {
-		len = snprintf(info->data + info->len,
-				SDE_KMS_INFO_MAX_SIZE - info->len,
-				"%s=%lld\n",
-				key,
-				value);
-
-		/* check if snprintf truncated the string */
-		if ((info->len + len) < SDE_KMS_INFO_MAX_SIZE)
-			info->len += len;
-	}
-}
-
-void sde_kms_info_add_keystr(struct sde_kms_info *info,
-		const char *key,
-		const char *value)
-{
-	uint32_t len;
-
-	if (info && key && value) {
-		len = snprintf(info->data + info->len,
-				SDE_KMS_INFO_MAX_SIZE - info->len,
-				"%s=%s\n",
-				key,
-				value);
-
-		/* check if snprintf truncated the string */
-		if ((info->len + len) < SDE_KMS_INFO_MAX_SIZE)
-			info->len += len;
-	}
-}
-
-void sde_kms_info_start(struct sde_kms_info *info,
-		const char *key)
-{
-	uint32_t len;
-
-	if (info && key) {
-		len = snprintf(info->data + info->len,
-				SDE_KMS_INFO_MAX_SIZE - info->len,
-				"%s=",
-				key);
-
-		info->start = true;
-
-		/* check if snprintf truncated the string */
-		if ((info->len + len) < SDE_KMS_INFO_MAX_SIZE)
-			info->staged_len = info->len + len;
-	}
-}
-
-void sde_kms_info_append(struct sde_kms_info *info,
-		const char *str)
-{
-	uint32_t len;
-
-	if (info) {
-		len = snprintf(info->data + info->staged_len,
-				SDE_KMS_INFO_MAX_SIZE - info->staged_len,
-				"%s",
-				str);
-
-		/* check if snprintf truncated the string */
-		if ((info->staged_len + len) < SDE_KMS_INFO_MAX_SIZE) {
-			info->staged_len += len;
-			info->start = false;
-		}
-	}
-}
-
-void sde_kms_info_append_format(struct sde_kms_info *info,
-		uint32_t pixel_format,
-		uint64_t modifier)
-{
-	uint32_t len;
-
-	if (!info)
-		return;
-
-	if (modifier) {
-		len = snprintf(info->data + info->staged_len,
-				SDE_KMS_INFO_MAX_SIZE - info->staged_len,
-				info->start ?
-				"%c%c%c%c/%llX/%llX" : " %c%c%c%c/%llX/%llX",
-				(pixel_format >> 0) & 0xFF,
-				(pixel_format >> 8) & 0xFF,
-				(pixel_format >> 16) & 0xFF,
-				(pixel_format >> 24) & 0xFF,
-				(modifier >> 56) & 0xFF,
-				modifier & ((1ULL << 56) - 1));
-	} else {
-		len = snprintf(info->data + info->staged_len,
-				SDE_KMS_INFO_MAX_SIZE - info->staged_len,
-				info->start ?
-				"%c%c%c%c" : " %c%c%c%c",
-				(pixel_format >> 0) & 0xFF,
-				(pixel_format >> 8) & 0xFF,
-				(pixel_format >> 16) & 0xFF,
-				(pixel_format >> 24) & 0xFF);
-	}
-
-	/* check if snprintf truncated the string */
-	if ((info->staged_len + len) < SDE_KMS_INFO_MAX_SIZE) {
-		info->staged_len += len;
-		info->start = false;
-	}
-}
-
-void sde_kms_info_stop(struct sde_kms_info *info)
-{
-	uint32_t len;
-
-	if (info) {
-		/* insert final delimiter */
-		len = snprintf(info->data + info->staged_len,
-				SDE_KMS_INFO_MAX_SIZE - info->staged_len,
-				"\n");
-
-		/* check if snprintf truncated the string */
-		if ((info->staged_len + len) < SDE_KMS_INFO_MAX_SIZE)
-			info->len = info->staged_len + len;
-	}
-}
-
-void sde_kms_rect_intersect(const struct sde_rect *r1,
-		const struct sde_rect *r2,
-		struct sde_rect *result)
-{
-	int l, t, r, b;
-
-	if (!r1 || !r2 || !result)
-		return;
-
-	l = max(r1->x, r2->x);
-	t = max(r1->y, r2->y);
-	r = min((r1->x + r1->w), (r2->x + r2->w));
-	b = min((r1->y + r1->h), (r2->y + r2->h));
-
-	if (r <= l || b <= t) {
-		memset(result, 0, sizeof(*result));
-	} else {
-		result->x = l;
-		result->y = t;
-		result->w = r - l;
-		result->h = b - t;
-	}
-}
-
-void sde_kms_rect_merge_rectangles(const struct msm_roi_list *rois,
-		struct sde_rect *result)
-{
-	struct drm_clip_rect clip;
-	const struct drm_clip_rect *roi_rect;
-	int i;
-
-	if (!rois || !result)
-		return;
-
-	memset(result, 0, sizeof(*result));
-
-	/* init to invalid range maxes */
-	clip.x1 = ~0;
-	clip.y1 = ~0;
-	clip.x2 = 0;
-	clip.y2 = 0;
-
-	/* aggregate all clipping rectangles together for overall roi */
-	for (i = 0; i < rois->num_rects; i++) {
-		roi_rect = &rois->roi[i];
-
-		clip.x1 = min(clip.x1, roi_rect->x1);
-		clip.y1 = min(clip.y1, roi_rect->y1);
-		clip.x2 = max(clip.x2, roi_rect->x2);
-		clip.y2 = max(clip.y2, roi_rect->y2);
-
-		SDE_DEBUG("roi%d (%d,%d),(%d,%d) -> crtc (%d,%d),(%d,%d)\n", i,
-				roi_rect->x1, roi_rect->y1,
-				roi_rect->x2, roi_rect->y2,
-				clip.x1, clip.y1,
-				clip.x2, clip.y2);
-	}
-
-	if (clip.x2  && clip.y2) {
-		result->x = clip.x1;
-		result->y = clip.y1;
-		result->w = clip.x2 - clip.x1;
-		result->h = clip.y2 - clip.y1;
-	}
-}
-
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
deleted file mode 100644
index eff1a2d..0000000
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ /dev/null
@@ -1,4538 +0,0 @@
-/*
- * Copyright (C) 2014-2019 The Linux Foundation. All rights reserved.
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/debugfs.h>
-#include <linux/dma-buf.h>
-#include <uapi/drm/sde_drm.h>
-#include <uapi/drm/msm_drm_pp.h>
-
-#include "msm_prop.h"
-#include "msm_drv.h"
-
-#include "sde_kms.h"
-#include "sde_fence.h"
-#include "sde_formats.h"
-#include "sde_hw_sspp.h"
-#include "sde_hw_catalog_format.h"
-#include "sde_trace.h"
-#include "sde_crtc.h"
-#include "sde_vbif.h"
-#include "sde_plane.h"
-#include "sde_color_processing.h"
-
-#define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
-		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
-
-#define SDE_ERROR_PLANE(pl, fmt, ...) SDE_ERROR("plane%d " fmt,\
-		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
-
-#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
-#define PHASE_STEP_SHIFT	21
-#define PHASE_STEP_UNIT_SCALE   ((int) (1 << PHASE_STEP_SHIFT))
-#define PHASE_RESIDUAL		15
-
-#define SHARP_STRENGTH_DEFAULT	32
-#define SHARP_EDGE_THR_DEFAULT	112
-#define SHARP_SMOOTH_THR_DEFAULT	8
-#define SHARP_NOISE_THR_DEFAULT	2
-
-#define SDE_NAME_SIZE  12
-
-#define SDE_PLANE_COLOR_FILL_FLAG	BIT(31)
-
-#define TIME_MULTIPLEX_RECT(r0, r1, buffer_lines) \
-	 ((r0).y >= ((r1).y + (r1).h + buffer_lines))
-
-/* multirect rect index */
-enum {
-	R0,
-	R1,
-	R_MAX
-};
-
-#define SDE_QSEED_DEFAULT_DYN_EXP 0x0
-
-#define DEFAULT_REFRESH_RATE	60
-
-/**
- * enum sde_plane_qos - Different qos configurations for each pipe
- *
- * @SDE_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
- * @SDE_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
- *	this configuration is mutually exclusive from VBLANK_CTRL.
- * @SDE_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
- */
-enum sde_plane_qos {
-	SDE_PLANE_QOS_VBLANK_CTRL = BIT(0),
-	SDE_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
-	SDE_PLANE_QOS_PANIC_CTRL = BIT(2),
-};
-
-/*
- * struct sde_plane - local sde plane structure
- * @aspace: address space pointer
- * @csc_cfg: Decoded user configuration for csc
- * @csc_usr_ptr: Points to csc_cfg if valid user config available
- * @csc_ptr: Points to sde_csc_cfg structure to use for current
- * @mplane_list: List of multirect planes of the same pipe
- * @catalog: Points to sde catalog structure
- * @revalidate: force revalidation of all the plane properties
- * @xin_halt_forced_clk: whether or not clocks were forced on for xin halt
- * @blob_rot_caps: Pointer to rotator capability blob
- */
-struct sde_plane {
-	struct drm_plane base;
-
-	struct mutex lock;
-
-	enum sde_sspp pipe;
-	uint32_t features;      /* capabilities from catalog */
-	uint32_t perf_features; /* perf capabilities from catalog */
-	uint32_t nformats;
-	uint32_t formats[64];
-
-	struct sde_hw_pipe *pipe_hw;
-	struct sde_hw_pipe_cfg pipe_cfg;
-	struct sde_hw_sharp_cfg sharp_cfg;
-	struct sde_hw_pipe_qos_cfg pipe_qos_cfg;
-	uint32_t color_fill;
-	bool is_error;
-	bool is_rt_pipe;
-	bool is_virtual;
-	struct list_head mplane_list;
-	struct sde_mdss_cfg *catalog;
-	bool revalidate;
-	bool xin_halt_forced_clk;
-
-	struct sde_csc_cfg csc_cfg;
-	struct sde_csc_cfg *csc_usr_ptr;
-	struct sde_csc_cfg *csc_ptr;
-
-	const struct sde_sspp_sub_blks *pipe_sblk;
-
-	char pipe_name[SDE_NAME_SIZE];
-
-	struct msm_property_info property_info;
-	struct msm_property_data property_data[PLANE_PROP_COUNT];
-	struct drm_property_blob *blob_info;
-	struct drm_property_blob *blob_rot_caps;
-
-	/* debugfs related stuff */
-	struct dentry *debugfs_root;
-	bool debugfs_default_scale;
-};
-
-#define to_sde_plane(x) container_of(x, struct sde_plane, base)
-
-static int plane_prop_array[PLANE_PROP_COUNT] = {SDE_PLANE_DIRTY_ALL};
-
-static struct sde_kms *_sde_plane_get_kms(struct drm_plane *plane)
-{
-	struct msm_drm_private *priv;
-
-	if (!plane || !plane->dev)
-		return NULL;
-	priv = plane->dev->dev_private;
-	if (!priv)
-		return NULL;
-	return to_sde_kms(priv->kms);
-}
-
-static struct sde_hw_ctl *_sde_plane_get_hw_ctl(const struct drm_plane *plane)
-{
-	struct drm_plane_state *pstate = NULL;
-	struct drm_crtc *drm_crtc = NULL;
-	struct sde_crtc *sde_crtc = NULL;
-	struct sde_crtc_mixer *mixer = NULL;
-	struct sde_hw_ctl *ctl = NULL;
-
-	if (!plane) {
-		DRM_ERROR("Invalid plane %pK\n", plane);
-		return NULL;
-	}
-
-	pstate = plane->state;
-	if (!pstate) {
-		DRM_ERROR("Invalid plane state %pK\n", pstate);
-		return NULL;
-	}
-
-	drm_crtc = pstate->crtc;
-	if (!drm_crtc) {
-		DRM_ERROR("Invalid drm_crtc %pK\n", drm_crtc);
-		return NULL;
-	}
-
-	sde_crtc = to_sde_crtc(drm_crtc);
-	if (!sde_crtc) {
-		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
-		return NULL;
-	}
-
-	/* it will always return the first mixer and single CTL */
-	mixer = sde_crtc->mixers;
-	if (!mixer) {
-		DRM_ERROR("invalid mixer %pK\n", mixer);
-		return NULL;
-	}
-
-	ctl = mixer->hw_ctl;
-	if (!mixer) {
-		DRM_ERROR("invalid ctl %pK\n", ctl);
-		return NULL;
-	}
-
-	return ctl;
-}
-
-static bool sde_plane_enabled(const struct drm_plane_state *state)
-{
-	return state && state->fb && state->crtc;
-}
-
-bool sde_plane_is_sec_ui_allowed(struct drm_plane *plane)
-{
-	struct sde_plane *psde;
-
-	if (!plane)
-		return false;
-
-	psde = to_sde_plane(plane);
-
-	return !(psde->features & BIT(SDE_SSPP_BLOCK_SEC_UI));
-}
-
-void sde_plane_setup_src_split_order(struct drm_plane *plane,
-		enum sde_sspp_multirect_index rect_mode, bool enable)
-{
-	struct sde_plane *psde;
-
-	if (!plane)
-		return;
-
-	psde = to_sde_plane(plane);
-	if (psde->pipe_hw->ops.set_src_split_order)
-		psde->pipe_hw->ops.set_src_split_order(psde->pipe_hw,
-					rect_mode, enable);
-}
-
-/**
- * _sde_plane_calc_fill_level - calculate fill level of the given source format
- * @plane:		Pointer to drm plane
- * @fmt:		Pointer to source buffer format
- * @src_wdith:		width of source buffer
- * Return: fill level corresponding to the source buffer/format or 0 if error
- */
-static inline int _sde_plane_calc_fill_level(struct drm_plane *plane,
-		const struct sde_format *fmt, u32 src_width)
-{
-	struct sde_plane *psde, *tmp;
-	struct sde_plane_state *pstate;
-	u32 fixed_buff_size;
-	u32 total_fl;
-	u32 hflip_bytes;
-	u32 unused_space;
-
-	if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
-		SDE_ERROR("invalid arguments\n");
-		return 0;
-	}
-
-	psde = to_sde_plane(plane);
-	if (psde->perf_features & BIT(SDE_PERF_SSPP_QOS_FL_NOCALC))
-		return 0;
-
-	pstate = to_sde_plane_state(plane->state);
-	fixed_buff_size = psde->pipe_sblk->pixel_ram_size;
-
-	list_for_each_entry(tmp, &psde->mplane_list, mplane_list) {
-		if (!sde_plane_enabled(tmp->base.state))
-			continue;
-		SDE_DEBUG("plane%d/%d src_width:%d/%d\n",
-				psde->base.base.id, tmp->base.base.id,
-				src_width, tmp->pipe_cfg.src_rect.w);
-		src_width = max_t(u32, src_width, tmp->pipe_cfg.src_rect.w);
-	}
-
-	if ((pstate->rotation & DRM_MODE_REFLECT_X) &&
-			SDE_FORMAT_IS_LINEAR(fmt))
-		hflip_bytes = (src_width + 32) * fmt->bpp;
-	else
-		hflip_bytes = 0;
-
-	if (fmt->fetch_planes == SDE_PLANE_PSEUDO_PLANAR) {
-
-		unused_space = 23 * 128;
-		if (fmt->chroma_sample == SDE_CHROMA_420) {
-			/* NV12 */
-			total_fl = (fixed_buff_size / 2 - hflip_bytes -
-				unused_space) / ((src_width + 32) * fmt->bpp);
-		} else {
-			/* non NV12 */
-			total_fl = (fixed_buff_size / 2 - hflip_bytes -
-				unused_space) * 2 / ((src_width + 32) *
-				fmt->bpp);
-		}
-	} else {
-
-		unused_space = 6 * 128;
-		if (pstate->multirect_mode == SDE_SSPP_MULTIRECT_PARALLEL) {
-			total_fl = (fixed_buff_size / 2 - hflip_bytes -
-				unused_space) * 2 / ((src_width + 32) *
-				fmt->bpp);
-		} else {
-			total_fl = (fixed_buff_size - hflip_bytes -
-				unused_space) * 2 / ((src_width + 32) *
-				fmt->bpp);
-		}
-	}
-
-	SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u hf:%d us:%d fl:%u\n",
-			plane->base.id, psde->pipe - SSPP_VIG0,
-			(char *)&fmt->base.pixel_format,
-			src_width, hflip_bytes, unused_space, total_fl);
-
-	return total_fl;
-}
-
-/**
- * _sde_plane_get_qos_lut - get LUT mapping based on fill level
- * @tbl:		Pointer to LUT table
- * @total_fl:		fill level
- * Return: LUT setting corresponding to the fill level
- */
-static u64 _sde_plane_get_qos_lut(const struct sde_qos_lut_tbl *tbl,
-		u32 total_fl)
-{
-	int i;
-
-	if (!tbl || !tbl->nentry || !tbl->entries)
-		return 0;
-
-	for (i = 0; i < tbl->nentry; i++)
-		if (total_fl <= tbl->entries[i].fl)
-			return tbl->entries[i].lut;
-
-	/* if last fl is zero, use as default */
-	if (!tbl->entries[i-1].fl)
-		return tbl->entries[i-1].lut;
-
-	return 0;
-}
-
-/**
- * _sde_plane_set_qos_lut - set QoS LUT of the given plane
- * @plane:		Pointer to drm plane
- * @fb:			Pointer to framebuffer associated with the given plane
- */
-static void _sde_plane_set_qos_lut(struct drm_plane *plane,
-		struct drm_framebuffer *fb)
-{
-	struct sde_plane *psde;
-	const struct sde_format *fmt = NULL;
-	u64 qos_lut;
-	u32 total_fl = 0, lut_usage;
-
-	if (!plane || !fb) {
-		SDE_ERROR("invalid arguments plane %d fb %d\n",
-				!plane, !fb);
-		return;
-	}
-
-	psde = to_sde_plane(plane);
-
-	if (!psde->pipe_hw || !psde->pipe_sblk || !psde->catalog) {
-		SDE_ERROR("invalid arguments\n");
-		return;
-	} else if (!psde->pipe_hw->ops.setup_creq_lut) {
-		return;
-	}
-
-	if (!psde->is_rt_pipe) {
-		lut_usage = SDE_QOS_LUT_USAGE_NRT;
-	} else {
-		fmt = sde_get_sde_format_ext(
-				fb->format->format,
-				fb->modifier);
-		total_fl = _sde_plane_calc_fill_level(plane, fmt,
-				psde->pipe_cfg.src_rect.w);
-
-		if (fmt && SDE_FORMAT_IS_LINEAR(fmt))
-			lut_usage = SDE_QOS_LUT_USAGE_LINEAR;
-		else if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3))
-			lut_usage = SDE_QOS_LUT_USAGE_MACROTILE_QSEED;
-		else
-			lut_usage = SDE_QOS_LUT_USAGE_MACROTILE;
-	}
-
-	qos_lut = _sde_plane_get_qos_lut(
-			&psde->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
-
-	psde->pipe_qos_cfg.creq_lut = qos_lut;
-
-	trace_sde_perf_set_qos_luts(psde->pipe - SSPP_VIG0,
-			(fmt) ? fmt->base.pixel_format : 0,
-			psde->is_rt_pipe, total_fl, qos_lut, lut_usage);
-
-	SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
-			plane->base.id,
-			psde->pipe - SSPP_VIG0,
-			fmt ? (char *)&fmt->base.pixel_format : NULL,
-			psde->is_rt_pipe, total_fl, qos_lut);
-
-	psde->pipe_hw->ops.setup_creq_lut(psde->pipe_hw, &psde->pipe_qos_cfg);
-}
-
-/**
- * _sde_plane_set_panic_lut - set danger/safe LUT of the given plane
- * @plane:		Pointer to drm plane
- * @fb:			Pointer to framebuffer associated with the given plane
- */
-static void _sde_plane_set_danger_lut(struct drm_plane *plane,
-		struct drm_framebuffer *fb)
-{
-	struct sde_plane *psde;
-	const struct sde_format *fmt = NULL;
-	u32 danger_lut, safe_lut;
-	u32 total_fl = 0, lut_usage;
-
-	if (!plane || !fb) {
-		SDE_ERROR("invalid arguments\n");
-		return;
-	}
-
-	psde = to_sde_plane(plane);
-
-	if (!psde->pipe_hw || !psde->pipe_sblk || !psde->catalog) {
-		SDE_ERROR("invalid arguments\n");
-		return;
-	} else if (!psde->pipe_hw->ops.setup_danger_safe_lut) {
-		return;
-	}
-
-	if (!psde->is_rt_pipe) {
-		danger_lut = psde->catalog->perf.danger_lut_tbl
-				[SDE_QOS_LUT_USAGE_NRT];
-		lut_usage = SDE_QOS_LUT_USAGE_NRT;
-	} else {
-		fmt = sde_get_sde_format_ext(
-				fb->format->format,
-				fb->modifier);
-		total_fl = _sde_plane_calc_fill_level(plane, fmt,
-				psde->pipe_cfg.src_rect.w);
-
-		if (fmt && SDE_FORMAT_IS_LINEAR(fmt)) {
-			danger_lut = psde->catalog->perf.danger_lut_tbl
-					[SDE_QOS_LUT_USAGE_LINEAR];
-			lut_usage = SDE_QOS_LUT_USAGE_LINEAR;
-		} else if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
-			danger_lut = psde->catalog->perf.danger_lut_tbl
-					[SDE_QOS_LUT_USAGE_MACROTILE_QSEED];
-			lut_usage = SDE_QOS_LUT_USAGE_MACROTILE_QSEED;
-		} else {
-			danger_lut = psde->catalog->perf.danger_lut_tbl
-					[SDE_QOS_LUT_USAGE_MACROTILE];
-			lut_usage = SDE_QOS_LUT_USAGE_MACROTILE;
-		}
-	}
-
-	safe_lut = (u32) _sde_plane_get_qos_lut(
-			&psde->catalog->perf.sfe_lut_tbl[lut_usage], total_fl);
-
-	psde->pipe_qos_cfg.danger_lut = danger_lut;
-	psde->pipe_qos_cfg.safe_lut = safe_lut;
-
-	trace_sde_perf_set_danger_luts(psde->pipe - SSPP_VIG0,
-			(fmt) ? fmt->base.pixel_format : 0,
-			(fmt) ? fmt->fetch_mode : 0,
-			psde->pipe_qos_cfg.danger_lut,
-			psde->pipe_qos_cfg.safe_lut);
-
-	SDE_DEBUG("plane%u: pnum:%d fmt:%4.4s mode:%d fl:%d luts[0x%x,0x%x]\n",
-		plane->base.id,
-		psde->pipe - SSPP_VIG0,
-		fmt ? (char *)&fmt->base.pixel_format : NULL,
-		fmt ? fmt->fetch_mode : -1, total_fl,
-		psde->pipe_qos_cfg.danger_lut,
-		psde->pipe_qos_cfg.safe_lut);
-
-	psde->pipe_hw->ops.setup_danger_safe_lut(psde->pipe_hw,
-			&psde->pipe_qos_cfg);
-}
-
-/**
- * _sde_plane_set_qos_ctrl - set QoS control of the given plane
- * @plane:		Pointer to drm plane
- * @enable:		true to enable QoS control
- * @flags:		QoS control mode (enum sde_plane_qos)
- */
-static void _sde_plane_set_qos_ctrl(struct drm_plane *plane,
-	bool enable, u32 flags)
-{
-	struct sde_plane *psde;
-
-	if (!plane) {
-		SDE_ERROR("invalid arguments\n");
-		return;
-	}
-
-	psde = to_sde_plane(plane);
-
-	if (!psde->pipe_hw || !psde->pipe_sblk) {
-		SDE_ERROR("invalid arguments\n");
-		return;
-	} else if (!psde->pipe_hw->ops.setup_qos_ctrl) {
-		return;
-	}
-
-	if (flags & SDE_PLANE_QOS_VBLANK_CTRL) {
-		psde->pipe_qos_cfg.creq_vblank = psde->pipe_sblk->creq_vblank;
-		psde->pipe_qos_cfg.danger_vblank =
-				psde->pipe_sblk->danger_vblank;
-		psde->pipe_qos_cfg.vblank_en = enable;
-	}
-
-	if (flags & SDE_PLANE_QOS_VBLANK_AMORTIZE) {
-		/* this feature overrules previous VBLANK_CTRL */
-		psde->pipe_qos_cfg.vblank_en = false;
-		psde->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
-	}
-
-	if (flags & SDE_PLANE_QOS_PANIC_CTRL)
-		psde->pipe_qos_cfg.danger_safe_en = enable;
-
-	if (!psde->is_rt_pipe) {
-		psde->pipe_qos_cfg.vblank_en = false;
-		psde->pipe_qos_cfg.danger_safe_en = false;
-	}
-
-	SDE_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
-		plane->base.id,
-		psde->pipe - SSPP_VIG0,
-		psde->pipe_qos_cfg.danger_safe_en,
-		psde->pipe_qos_cfg.vblank_en,
-		psde->pipe_qos_cfg.creq_vblank,
-		psde->pipe_qos_cfg.danger_vblank,
-		psde->is_rt_pipe);
-
-	psde->pipe_hw->ops.setup_qos_ctrl(psde->pipe_hw,
-			&psde->pipe_qos_cfg);
-}
-
-void sde_plane_set_revalidate(struct drm_plane *plane, bool enable)
-{
-	struct sde_plane *psde;
-
-	if (!plane)
-		return;
-
-	psde = to_sde_plane(plane);
-	psde->revalidate = enable;
-}
-
-int sde_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
-{
-	struct sde_plane *psde;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	int rc;
-
-	if (!plane || !plane->dev) {
-		SDE_ERROR("invalid arguments\n");
-		return -EINVAL;
-	}
-
-	priv = plane->dev->dev_private;
-	if (!priv || !priv->kms) {
-		SDE_ERROR("invalid KMS reference\n");
-		return -EINVAL;
-	}
-
-	sde_kms = to_sde_kms(priv->kms);
-	psde = to_sde_plane(plane);
-
-	if (!psde->is_rt_pipe)
-		goto end;
-
-	rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
-			true);
-	if (rc) {
-		SDE_ERROR("failed to enable power resource %d\n", rc);
-		SDE_EVT32(rc, SDE_EVTLOG_ERROR);
-		return rc;
-	}
-
-	_sde_plane_set_qos_ctrl(plane, enable, SDE_PLANE_QOS_PANIC_CTRL);
-
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
-
-end:
-	return 0;
-}
-
-/**
- * _sde_plane_set_ot_limit - set OT limit for the given plane
- * @plane:		Pointer to drm plane
- * @crtc:		Pointer to drm crtc
- */
-static void _sde_plane_set_ot_limit(struct drm_plane *plane,
-		struct drm_crtc *crtc)
-{
-	struct sde_plane *psde;
-	struct sde_vbif_set_ot_params ot_params;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-
-	if (!plane || !plane->dev || !crtc) {
-		SDE_ERROR("invalid arguments plane %d crtc %d\n",
-				!plane, !crtc);
-		return;
-	}
-
-	priv = plane->dev->dev_private;
-	if (!priv || !priv->kms) {
-		SDE_ERROR("invalid KMS reference\n");
-		return;
-	}
-
-	sde_kms = to_sde_kms(priv->kms);
-	psde = to_sde_plane(plane);
-	if (!psde->pipe_hw) {
-		SDE_ERROR("invalid pipe reference\n");
-		return;
-	}
-
-	memset(&ot_params, 0, sizeof(ot_params));
-	ot_params.xin_id = psde->pipe_hw->cap->xin_id;
-	ot_params.num = psde->pipe_hw->idx - SSPP_NONE;
-	ot_params.width = psde->pipe_cfg.src_rect.w;
-	ot_params.height = psde->pipe_cfg.src_rect.h;
-	ot_params.is_wfd = !psde->is_rt_pipe;
-	ot_params.frame_rate = crtc->mode.vrefresh;
-	ot_params.vbif_idx = VBIF_RT;
-	ot_params.clk_ctrl = psde->pipe_hw->cap->clk_ctrl;
-	ot_params.rd = true;
-
-	sde_vbif_set_ot_limit(sde_kms, &ot_params);
-}
-
-/**
- * _sde_plane_set_vbif_qos - set vbif QoS for the given plane
- * @plane:		Pointer to drm plane
- */
-static void _sde_plane_set_qos_remap(struct drm_plane *plane)
-{
-	struct sde_plane *psde;
-	struct sde_vbif_set_qos_params qos_params;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-
-	if (!plane || !plane->dev) {
-		SDE_ERROR("invalid arguments\n");
-		return;
-	}
-
-	priv = plane->dev->dev_private;
-	if (!priv || !priv->kms) {
-		SDE_ERROR("invalid KMS reference\n");
-		return;
-	}
-
-	sde_kms = to_sde_kms(priv->kms);
-	psde = to_sde_plane(plane);
-	if (!psde->pipe_hw) {
-		SDE_ERROR("invalid pipe reference\n");
-		return;
-	}
-
-	memset(&qos_params, 0, sizeof(qos_params));
-	qos_params.vbif_idx = VBIF_RT;
-	qos_params.clk_ctrl = psde->pipe_hw->cap->clk_ctrl;
-	qos_params.xin_id = psde->pipe_hw->cap->xin_id;
-	qos_params.num = psde->pipe_hw->idx - SSPP_VIG0;
-	qos_params.client_type = psde->is_rt_pipe ?
-					VBIF_RT_CLIENT : VBIF_NRT_CLIENT;
-
-	SDE_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
-			plane->base.id, qos_params.num,
-			qos_params.vbif_idx,
-			qos_params.xin_id, qos_params.client_type,
-			qos_params.clk_ctrl);
-
-	sde_vbif_set_qos_remap(sde_kms, &qos_params);
-}
-
-/**
- * _sde_plane_set_ts_prefill - set prefill with traffic shaper
- * @plane:	Pointer to drm plane
- * @pstate:	Pointer to sde plane state
- */
-static void _sde_plane_set_ts_prefill(struct drm_plane *plane,
-		struct sde_plane_state *pstate)
-{
-	struct sde_plane *psde;
-	struct sde_hw_pipe_ts_cfg cfg;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-
-	if (!plane || !plane->dev) {
-		SDE_ERROR("invalid arguments");
-		return;
-	}
-
-	priv = plane->dev->dev_private;
-	if (!priv || !priv->kms) {
-		SDE_ERROR("invalid KMS reference\n");
-		return;
-	}
-
-	sde_kms = to_sde_kms(priv->kms);
-	psde = to_sde_plane(plane);
-	if (!psde->pipe_hw) {
-		SDE_ERROR("invalid pipe reference\n");
-		return;
-	}
-
-	if (!psde->pipe_hw || !psde->pipe_hw->ops.setup_ts_prefill)
-		return;
-
-	_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_VBLANK_AMORTIZE);
-
-	memset(&cfg, 0, sizeof(cfg));
-	cfg.size = sde_plane_get_property(pstate,
-			PLANE_PROP_PREFILL_SIZE);
-	cfg.time = sde_plane_get_property(pstate,
-			PLANE_PROP_PREFILL_TIME);
-
-	SDE_DEBUG("plane%d size:%llu time:%llu\n",
-			plane->base.id, cfg.size, cfg.time);
-	SDE_EVT32_VERBOSE(DRMID(plane), cfg.size, cfg.time);
-	psde->pipe_hw->ops.setup_ts_prefill(psde->pipe_hw, &cfg,
-			pstate->multirect_index);
-}
-
-/* helper to update a state's input fence pointer from the property */
-static void _sde_plane_set_input_fence(struct sde_plane *psde,
-		struct sde_plane_state *pstate, uint64_t fd)
-{
-	if (!psde || !pstate) {
-		SDE_ERROR("invalid arg(s), plane %d state %d\n",
-				!psde, !pstate);
-		return;
-	}
-
-	/* clear previous reference */
-	if (pstate->input_fence)
-		sde_sync_put(pstate->input_fence);
-
-	/* get fence pointer for later */
-	if (fd == 0)
-		pstate->input_fence = NULL;
-	else
-		pstate->input_fence = sde_sync_get(fd);
-
-	SDE_DEBUG_PLANE(psde, "0x%llX\n", fd);
-}
-
-int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms)
-{
-	struct sde_plane *psde;
-	struct sde_plane_state *pstate;
-	uint32_t prefix;
-	void *input_fence;
-	int ret = -EINVAL;
-	signed long rc;
-
-	if (!plane) {
-		SDE_ERROR("invalid plane\n");
-	} else if (!plane->state) {
-		SDE_ERROR_PLANE(to_sde_plane(plane), "invalid state\n");
-	} else {
-		psde = to_sde_plane(plane);
-		pstate = to_sde_plane_state(plane->state);
-		input_fence = pstate->input_fence;
-
-		if (input_fence) {
-			prefix = sde_sync_get_name_prefix(input_fence);
-			rc = sde_sync_wait(input_fence, wait_ms);
-
-			switch (rc) {
-			case 0:
-				SDE_ERROR_PLANE(psde, "%ums timeout on %08X\n",
-						wait_ms, prefix);
-				psde->is_error = true;
-				sde_kms_timeline_status(plane->dev);
-				ret = -ETIMEDOUT;
-				break;
-			case -ERESTARTSYS:
-				SDE_ERROR_PLANE(psde,
-					"%ums wait interrupted on %08X\n",
-					wait_ms, prefix);
-				psde->is_error = true;
-				ret = -ERESTARTSYS;
-				break;
-			case -EINVAL:
-				SDE_ERROR_PLANE(psde,
-					"invalid fence param for %08X\n",
-						prefix);
-				psde->is_error = true;
-				ret = -EINVAL;
-				break;
-			default:
-				SDE_DEBUG_PLANE(psde, "signaled\n");
-				ret = 0;
-				break;
-			}
-
-			SDE_EVT32_VERBOSE(DRMID(plane), -ret, prefix);
-		} else {
-			ret = 0;
-		}
-	}
-	return ret;
-}
-
-/**
- * _sde_plane_get_aspace: gets the address space based on the
- *            fb_translation mode property
- */
-static int _sde_plane_get_aspace(
-		struct sde_plane *psde,
-		struct sde_plane_state *pstate,
-		struct msm_gem_address_space **aspace)
-{
-	struct sde_kms *kms;
-	int mode;
-
-	if (!psde || !pstate || !aspace) {
-		SDE_ERROR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	kms = _sde_plane_get_kms(&psde->base);
-	if (!kms) {
-		SDE_ERROR("invalid kms\n");
-		return -EINVAL;
-	}
-
-	mode = sde_plane_get_property(pstate,
-			PLANE_PROP_FB_TRANSLATION_MODE);
-
-	switch (mode) {
-	case SDE_DRM_FB_NON_SEC:
-		*aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
-		if (!aspace)
-			return -EINVAL;
-		break;
-	case SDE_DRM_FB_SEC:
-		*aspace = kms->aspace[MSM_SMMU_DOMAIN_SECURE];
-		if (!aspace)
-			return -EINVAL;
-		break;
-	case SDE_DRM_FB_SEC_DIR_TRANS:
-		*aspace = NULL;
-		break;
-	default:
-		SDE_ERROR("invalid fb_translation mode:%d\n", mode);
-		return -EFAULT;
-	}
-
-	return 0;
-}
-
-static inline void _sde_plane_set_scanout(struct drm_plane *plane,
-		struct sde_plane_state *pstate,
-		struct sde_hw_pipe_cfg *pipe_cfg,
-		struct drm_framebuffer *fb)
-{
-	struct sde_plane *psde;
-	struct msm_gem_address_space *aspace = NULL;
-	int ret, mode;
-	bool secure = false;
-
-	if (!plane || !pstate || !pipe_cfg || !fb) {
-		SDE_ERROR(
-			"invalid arg(s), plane %d state %d cfg %d fb %d\n",
-			!plane, !pstate, !pipe_cfg, !fb);
-		return;
-	}
-
-	psde = to_sde_plane(plane);
-	if (!psde->pipe_hw) {
-		SDE_ERROR_PLANE(psde, "invalid pipe_hw\n");
-		return;
-	}
-
-	ret = _sde_plane_get_aspace(psde, pstate, &aspace);
-	if (ret) {
-		SDE_ERROR_PLANE(psde, "Failed to get aspace %d\n", ret);
-		return;
-	}
-
-	/*
-	 * framebuffer prepare is deferred for prepare_fb calls that
-	 * happen during the transition from secure to non-secure.
-	 * Handle the prepare at this point for such cases. This can be
-	 * expected for one or two frames during the transition.
-	 */
-	if (aspace && pstate->defer_prepare_fb) {
-		SDE_EVT32(DRMID(plane), psde->pipe, aspace->domain_attached);
-		ret = msm_framebuffer_prepare(fb, pstate->aspace);
-		if (ret) {
-			SDE_ERROR_PLANE(psde,
-				"failed to prepare framebuffer %d\n", ret);
-			return;
-		}
-		pstate->defer_prepare_fb = false;
-	}
-	mode = sde_plane_get_property(pstate, PLANE_PROP_FB_TRANSLATION_MODE);
-	if ((mode == SDE_DRM_FB_SEC) || (mode == SDE_DRM_FB_SEC_DIR_TRANS))
-		secure = true;
-
-	ret = sde_format_populate_layout(aspace, fb, &pipe_cfg->layout);
-	if (ret == -EAGAIN)
-		SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
-	else if (ret) {
-		SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
-
-		/*
-		 * Force solid fill color on error. This is to prevent
-		 * smmu faults during secure session transition.
-		 */
-		psde->is_error = true;
-	} else if (psde->pipe_hw->ops.setup_sourceaddress) {
-		SDE_EVT32_VERBOSE(psde->pipe_hw->idx,
-				pipe_cfg->layout.width,
-				pipe_cfg->layout.height,
-				pipe_cfg->layout.plane_addr[0],
-				pipe_cfg->layout.plane_size[0],
-				pipe_cfg->layout.plane_addr[1],
-				pipe_cfg->layout.plane_size[1],
-				pipe_cfg->layout.plane_addr[2],
-				pipe_cfg->layout.plane_size[2],
-				pipe_cfg->layout.plane_addr[3],
-				pipe_cfg->layout.plane_size[3],
-				pstate->multirect_index,
-				secure);
-		psde->pipe_hw->ops.setup_sourceaddress(psde->pipe_hw, pipe_cfg,
-						pstate->multirect_index);
-	}
-}
-
-static int _sde_plane_setup_scaler3_lut(struct sde_plane *psde,
-		struct sde_plane_state *pstate)
-{
-	struct sde_hw_scaler3_cfg *cfg;
-	int ret = 0;
-
-	if (!psde || !pstate) {
-		SDE_ERROR("invalid args\n");
-		return -EINVAL;
-	}
-
-	cfg = &pstate->scaler3_cfg;
-
-	cfg->dir_lut = msm_property_get_blob(
-			&psde->property_info,
-			&pstate->property_state, &cfg->dir_len,
-			PLANE_PROP_SCALER_LUT_ED);
-	cfg->cir_lut = msm_property_get_blob(
-			&psde->property_info,
-			&pstate->property_state, &cfg->cir_len,
-			PLANE_PROP_SCALER_LUT_CIR);
-	cfg->sep_lut = msm_property_get_blob(
-			&psde->property_info,
-			&pstate->property_state, &cfg->sep_len,
-			PLANE_PROP_SCALER_LUT_SEP);
-	if (!cfg->dir_lut || !cfg->cir_lut || !cfg->sep_lut)
-		ret = -ENODATA;
-	return ret;
-}
-
-static int _sde_plane_setup_scaler3lite_lut(struct sde_plane *psde,
-		struct sde_plane_state *pstate)
-{
-	struct sde_hw_scaler3_cfg *cfg;
-
-	cfg = &pstate->scaler3_cfg;
-
-	cfg->sep_lut = msm_property_get_blob(
-			&psde->property_info,
-			&pstate->property_state, &cfg->sep_len,
-			PLANE_PROP_SCALER_LUT_SEP);
-
-	return cfg->sep_lut ? 0 : -ENODATA;
-}
-
-static void _sde_plane_setup_scaler3(struct sde_plane *psde,
-		struct sde_plane_state *pstate, const struct sde_format *fmt,
-		uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
-{
-	uint32_t decimated, i, src_w, src_h, dst_w, dst_h;
-	struct sde_hw_scaler3_cfg *scale_cfg;
-
-	if (!psde || !pstate || !fmt ||
-			!chroma_subsmpl_h || !chroma_subsmpl_v) {
-		SDE_ERROR("psde %d pstate %d fmt %d smp_h %d smp_v %d\n",
-				!!psde, !!pstate, !!fmt, chroma_subsmpl_h,
-				chroma_subsmpl_v);
-		return;
-	}
-
-	scale_cfg = &pstate->scaler3_cfg;
-	src_w = psde->pipe_cfg.src_rect.w;
-	src_h = psde->pipe_cfg.src_rect.h;
-	dst_w = psde->pipe_cfg.dst_rect.w;
-	dst_h = psde->pipe_cfg.dst_rect.h;
-
-	memset(scale_cfg, 0, sizeof(*scale_cfg));
-	memset(&pstate->pixel_ext, 0, sizeof(struct sde_hw_pixel_ext));
-
-	/*
-	 * For inline rotation cases, scaler config is post-rotation,
-	 * so swap the dimensions here. However, pixel extension will
-	 * need pre-rotation settings, this will be corrected below
-	 * when calculating pixel extension settings.
-	 */
-	if (pstate->rotation & DRM_MODE_ROTATE_90)
-		swap(src_w, src_h);
-
-	decimated = DECIMATED_DIMENSION(src_w,
-			psde->pipe_cfg.horz_decimation);
-	scale_cfg->phase_step_x[SDE_SSPP_COMP_0] =
-		mult_frac((1 << PHASE_STEP_SHIFT), decimated, dst_w);
-	decimated = DECIMATED_DIMENSION(src_h,
-			psde->pipe_cfg.vert_decimation);
-	scale_cfg->phase_step_y[SDE_SSPP_COMP_0] =
-		mult_frac((1 << PHASE_STEP_SHIFT), decimated, dst_h);
-
-
-	scale_cfg->phase_step_y[SDE_SSPP_COMP_1_2] =
-		scale_cfg->phase_step_y[SDE_SSPP_COMP_0] / chroma_subsmpl_v;
-	scale_cfg->phase_step_x[SDE_SSPP_COMP_1_2] =
-		scale_cfg->phase_step_x[SDE_SSPP_COMP_0] / chroma_subsmpl_h;
-
-	scale_cfg->phase_step_x[SDE_SSPP_COMP_2] =
-		scale_cfg->phase_step_x[SDE_SSPP_COMP_1_2];
-	scale_cfg->phase_step_y[SDE_SSPP_COMP_2] =
-		scale_cfg->phase_step_y[SDE_SSPP_COMP_1_2];
-
-	scale_cfg->phase_step_x[SDE_SSPP_COMP_3] =
-		scale_cfg->phase_step_x[SDE_SSPP_COMP_0];
-	scale_cfg->phase_step_y[SDE_SSPP_COMP_3] =
-		scale_cfg->phase_step_y[SDE_SSPP_COMP_0];
-
-	for (i = 0; i < SDE_MAX_PLANES; i++) {
-		scale_cfg->src_width[i] = DECIMATED_DIMENSION(src_w,
-				psde->pipe_cfg.horz_decimation);
-		scale_cfg->src_height[i] = DECIMATED_DIMENSION(src_h,
-				psde->pipe_cfg.vert_decimation);
-		if (i == SDE_SSPP_COMP_1_2 || i == SDE_SSPP_COMP_2) {
-			scale_cfg->src_width[i] /= chroma_subsmpl_h;
-			scale_cfg->src_height[i] /= chroma_subsmpl_v;
-		}
-		scale_cfg->preload_x[i] = psde->pipe_sblk->scaler_blk.h_preload;
-		scale_cfg->preload_y[i] = psde->pipe_sblk->scaler_blk.v_preload;
-
-		/* For pixel extension we need the pre-rotated orientation */
-		if (pstate->rotation & DRM_MODE_ROTATE_90) {
-			pstate->pixel_ext.num_ext_pxls_top[i] =
-				scale_cfg->src_width[i];
-			pstate->pixel_ext.num_ext_pxls_left[i] =
-				scale_cfg->src_height[i];
-		} else {
-			pstate->pixel_ext.num_ext_pxls_top[i] =
-				scale_cfg->src_height[i];
-			pstate->pixel_ext.num_ext_pxls_left[i] =
-				scale_cfg->src_width[i];
-		}
-	}
-
-	if ((!(SDE_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
-		&& (src_w == dst_w)) || pstate->multirect_mode)
-		return;
-
-	SDE_DEBUG_PLANE(psde,
-		"setting bilinear: src:%dx%d dst:%dx%d chroma:%dx%d fmt:%x\n",
-			src_w, src_h, dst_w, dst_h,
-			chroma_subsmpl_v, chroma_subsmpl_h,
-			fmt->base.pixel_format);
-
-	scale_cfg->dst_width = dst_w;
-	scale_cfg->dst_height = dst_h;
-	scale_cfg->y_rgb_filter_cfg = SDE_SCALE_BIL;
-	scale_cfg->uv_filter_cfg = SDE_SCALE_BIL;
-	scale_cfg->alpha_filter_cfg = SDE_SCALE_ALPHA_BIL;
-	scale_cfg->lut_flag = 0;
-	scale_cfg->blend_cfg = 1;
-	scale_cfg->enable = 1;
-	scale_cfg->dyn_exp_disabled = SDE_QSEED_DEFAULT_DYN_EXP;
-}
-
-/**
- * _sde_plane_setup_scaler2 - determine default scaler phase steps/filter type
- * @psde: Pointer to SDE plane object
- * @src: Source size
- * @dst: Destination size
- * @phase_steps: Pointer to output array for phase steps
- * @filter: Pointer to output array for filter type
- * @fmt: Pointer to format definition
- * @chroma_subsampling: Subsampling amount for chroma channel
- *
- * Returns: 0 on success
- */
-static int _sde_plane_setup_scaler2(struct sde_plane *psde,
-		uint32_t src, uint32_t dst, uint32_t *phase_steps,
-		enum sde_hw_filter *filter, const struct sde_format *fmt,
-		uint32_t chroma_subsampling)
-{
-	if (!psde || !phase_steps || !filter || !fmt) {
-		SDE_ERROR(
-			"invalid arg(s), plane %d phase %d filter %d fmt %d\n",
-			!psde, !phase_steps, !filter, !fmt);
-		return -EINVAL;
-	}
-
-	/* calculate phase steps, leave init phase as zero */
-	phase_steps[SDE_SSPP_COMP_0] =
-		mult_frac(1 << PHASE_STEP_SHIFT, src, dst);
-	phase_steps[SDE_SSPP_COMP_1_2] =
-		phase_steps[SDE_SSPP_COMP_0] / chroma_subsampling;
-	phase_steps[SDE_SSPP_COMP_2] = phase_steps[SDE_SSPP_COMP_1_2];
-	phase_steps[SDE_SSPP_COMP_3] = phase_steps[SDE_SSPP_COMP_0];
-
-	/* calculate scaler config, if necessary */
-	if (SDE_FORMAT_IS_YUV(fmt) || src != dst) {
-		filter[SDE_SSPP_COMP_3] =
-			(src <= dst) ? SDE_SCALE_FILTER_BIL :
-			SDE_SCALE_FILTER_PCMN;
-
-		if (SDE_FORMAT_IS_YUV(fmt)) {
-			filter[SDE_SSPP_COMP_0] = SDE_SCALE_FILTER_CA;
-			filter[SDE_SSPP_COMP_1_2] = filter[SDE_SSPP_COMP_3];
-		} else {
-			filter[SDE_SSPP_COMP_0] = filter[SDE_SSPP_COMP_3];
-			filter[SDE_SSPP_COMP_1_2] =
-				SDE_SCALE_FILTER_NEAREST;
-		}
-	} else {
-		/* disable scaler */
-		filter[SDE_SSPP_COMP_0] = SDE_SCALE_FILTER_MAX;
-		filter[SDE_SSPP_COMP_1_2] = SDE_SCALE_FILTER_MAX;
-		filter[SDE_SSPP_COMP_3] = SDE_SCALE_FILTER_MAX;
-	}
-	return 0;
-}
-
-/**
- * _sde_plane_setup_pixel_ext - determine default pixel extension values
- * @psde: Pointer to SDE plane object
- * @src: Source size
- * @dst: Destination size
- * @decimated_src: Source size after decimation, if any
- * @phase_steps: Pointer to output array for phase steps
- * @out_src: Output array for pixel extension values
- * @out_edge1: Output array for pixel extension first edge
- * @out_edge2: Output array for pixel extension second edge
- * @filter: Pointer to array for filter type
- * @fmt: Pointer to format definition
- * @chroma_subsampling: Subsampling amount for chroma channel
- * @post_compare: Whether to chroma subsampled source size for comparisions
- */
-static void _sde_plane_setup_pixel_ext(struct sde_plane *psde,
-		uint32_t src, uint32_t dst, uint32_t decimated_src,
-		uint32_t *phase_steps, uint32_t *out_src, int *out_edge1,
-		int *out_edge2, enum sde_hw_filter *filter,
-		const struct sde_format *fmt, uint32_t chroma_subsampling,
-		bool post_compare)
-{
-	int64_t edge1, edge2, caf;
-	uint32_t src_work;
-	int i, tmp;
-
-	if (psde && phase_steps && out_src && out_edge1 &&
-			out_edge2 && filter && fmt) {
-		/* handle CAF for YUV formats */
-		if (SDE_FORMAT_IS_YUV(fmt) && *filter == SDE_SCALE_FILTER_CA)
-			caf = PHASE_STEP_UNIT_SCALE;
-		else
-			caf = 0;
-
-		for (i = 0; i < SDE_MAX_PLANES; i++) {
-			src_work = decimated_src;
-			if (i == SDE_SSPP_COMP_1_2 || i == SDE_SSPP_COMP_2)
-				src_work /= chroma_subsampling;
-			if (post_compare)
-				src = src_work;
-			if (!SDE_FORMAT_IS_YUV(fmt) && (src == dst)) {
-				/* unity */
-				edge1 = 0;
-				edge2 = 0;
-			} else if (dst >= src) {
-				/* upscale */
-				edge1 = (1 << PHASE_RESIDUAL);
-				edge1 -= caf;
-				edge2 = (1 << PHASE_RESIDUAL);
-				edge2 += (dst - 1) * *(phase_steps + i);
-				edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE;
-				edge2 += caf;
-				edge2 = -(edge2);
-			} else {
-				/* downscale */
-				edge1 = 0;
-				edge2 = (dst - 1) * *(phase_steps + i);
-				edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE;
-				edge2 += *(phase_steps + i);
-				edge2 = -(edge2);
-			}
-
-			/* only enable CAF for luma plane */
-			caf = 0;
-
-			/* populate output arrays */
-			*(out_src + i) = src_work;
-
-			/* edge updates taken from __pxl_extn_helper */
-			if (edge1 >= 0) {
-				tmp = (uint32_t)edge1;
-				tmp >>= PHASE_STEP_SHIFT;
-				*(out_edge1 + i) = -tmp;
-			} else {
-				tmp = (uint32_t)(-edge1);
-				*(out_edge1 + i) =
-					(tmp + PHASE_STEP_UNIT_SCALE - 1) >>
-					PHASE_STEP_SHIFT;
-			}
-			if (edge2 >= 0) {
-				tmp = (uint32_t)edge2;
-				tmp >>= PHASE_STEP_SHIFT;
-				*(out_edge2 + i) = -tmp;
-			} else {
-				tmp = (uint32_t)(-edge2);
-				*(out_edge2 + i) =
-					(tmp + PHASE_STEP_UNIT_SCALE - 1) >>
-					PHASE_STEP_SHIFT;
-			}
-		}
-	}
-}
-
-static inline void _sde_plane_setup_csc(struct sde_plane *psde)
-{
-	static const struct sde_csc_cfg sde_csc_YUV2RGB_601L = {
-		{
-			/* S15.16 format */
-			0x00012A00, 0x00000000, 0x00019880,
-			0x00012A00, 0xFFFF9B80, 0xFFFF3000,
-			0x00012A00, 0x00020480, 0x00000000,
-		},
-		/* signed bias */
-		{ 0xfff0, 0xff80, 0xff80,},
-		{ 0x0, 0x0, 0x0,},
-		/* unsigned clamp */
-		{ 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
-		{ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
-	};
-	static const struct sde_csc_cfg sde_csc10_YUV2RGB_601L = {
-		{
-			/* S15.16 format */
-			0x00012A00, 0x00000000, 0x00019880,
-			0x00012A00, 0xFFFF9B80, 0xFFFF3000,
-			0x00012A00, 0x00020480, 0x00000000,
-			},
-		/* signed bias */
-		{ 0xffc0, 0xfe00, 0xfe00,},
-		{ 0x0, 0x0, 0x0,},
-		/* unsigned clamp */
-		{ 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
-		{ 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
-	};
-
-	if (!psde) {
-		SDE_ERROR("invalid plane\n");
-		return;
-	}
-
-	/* revert to kernel default if override not available */
-	if (psde->csc_usr_ptr)
-		psde->csc_ptr = psde->csc_usr_ptr;
-	else if (BIT(SDE_SSPP_CSC_10BIT) & psde->features)
-		psde->csc_ptr = (struct sde_csc_cfg *)&sde_csc10_YUV2RGB_601L;
-	else
-		psde->csc_ptr = (struct sde_csc_cfg *)&sde_csc_YUV2RGB_601L;
-
-	SDE_DEBUG_PLANE(psde, "using 0x%X 0x%X 0x%X...\n",
-			psde->csc_ptr->csc_mv[0],
-			psde->csc_ptr->csc_mv[1],
-			psde->csc_ptr->csc_mv[2]);
-}
-
-static void sde_color_process_plane_setup(struct drm_plane *plane)
-{
-	struct sde_plane *psde;
-	struct sde_plane_state *pstate;
-	uint32_t hue, saturation, value, contrast;
-	struct drm_msm_memcol *memcol = NULL;
-	struct drm_msm_3d_gamut *vig_gamut = NULL;
-	struct drm_msm_igc_lut *igc = NULL;
-	struct drm_msm_pgc_lut *gc = NULL;
-	size_t memcol_sz = 0, size = 0;
-	struct sde_hw_cp_cfg hw_cfg = {};
-	struct sde_hw_ctl *ctl = _sde_plane_get_hw_ctl(plane);
-
-	psde = to_sde_plane(plane);
-	pstate = to_sde_plane_state(plane->state);
-
-	hue = (uint32_t) sde_plane_get_property(pstate, PLANE_PROP_HUE_ADJUST);
-	if (psde->pipe_hw->ops.setup_pa_hue)
-		psde->pipe_hw->ops.setup_pa_hue(psde->pipe_hw, &hue);
-	saturation = (uint32_t) sde_plane_get_property(pstate,
-		PLANE_PROP_SATURATION_ADJUST);
-	if (psde->pipe_hw->ops.setup_pa_sat)
-		psde->pipe_hw->ops.setup_pa_sat(psde->pipe_hw, &saturation);
-	value = (uint32_t) sde_plane_get_property(pstate,
-		PLANE_PROP_VALUE_ADJUST);
-	if (psde->pipe_hw->ops.setup_pa_val)
-		psde->pipe_hw->ops.setup_pa_val(psde->pipe_hw, &value);
-	contrast = (uint32_t) sde_plane_get_property(pstate,
-		PLANE_PROP_CONTRAST_ADJUST);
-	if (psde->pipe_hw->ops.setup_pa_cont)
-		psde->pipe_hw->ops.setup_pa_cont(psde->pipe_hw, &contrast);
-
-	if (psde->pipe_hw->ops.setup_pa_memcolor) {
-		/* Skin memory color setup */
-		memcol = msm_property_get_blob(&psde->property_info,
-					&pstate->property_state,
-					&memcol_sz,
-					PLANE_PROP_SKIN_COLOR);
-		psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
-					MEMCOLOR_SKIN, memcol);
-
-		/* Sky memory color setup */
-		memcol = msm_property_get_blob(&psde->property_info,
-					&pstate->property_state,
-					&memcol_sz,
-					PLANE_PROP_SKY_COLOR);
-		psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
-					MEMCOLOR_SKY, memcol);
-
-		/* Foliage memory color setup */
-		memcol = msm_property_get_blob(&psde->property_info,
-					&pstate->property_state,
-					&memcol_sz,
-					PLANE_PROP_FOLIAGE_COLOR);
-		psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
-					MEMCOLOR_FOLIAGE, memcol);
-	}
-
-	if (pstate->dirty & SDE_PLANE_DIRTY_VIG_GAMUT &&
-			psde->pipe_hw->ops.setup_vig_gamut) {
-		vig_gamut = msm_property_get_blob(&psde->property_info,
-				&pstate->property_state,
-				&size,
-				PLANE_PROP_VIG_GAMUT);
-		hw_cfg.last_feature = 0;
-		hw_cfg.ctl = ctl;
-		hw_cfg.len = sizeof(struct drm_msm_3d_gamut);
-		hw_cfg.payload = vig_gamut;
-		psde->pipe_hw->ops.setup_vig_gamut(psde->pipe_hw, &hw_cfg);
-	}
-
-	if (pstate->dirty & SDE_PLANE_DIRTY_VIG_IGC &&
-			psde->pipe_hw->ops.setup_vig_igc) {
-		igc = msm_property_get_blob(&psde->property_info,
-				&pstate->property_state,
-				&size,
-				PLANE_PROP_VIG_IGC);
-		hw_cfg.last_feature = 0;
-		hw_cfg.ctl = ctl;
-		hw_cfg.len = sizeof(struct drm_msm_igc_lut);
-		hw_cfg.payload = igc;
-		psde->pipe_hw->ops.setup_vig_igc(psde->pipe_hw, &hw_cfg);
-	}
-
-	if (pstate->dirty & SDE_PLANE_DIRTY_DMA_IGC &&
-			psde->pipe_hw->ops.setup_dma_igc) {
-		igc = msm_property_get_blob(&psde->property_info,
-				&pstate->property_state,
-				&size,
-				PLANE_PROP_DMA_IGC);
-		hw_cfg.last_feature = 0;
-		hw_cfg.ctl = ctl;
-		hw_cfg.len = sizeof(struct drm_msm_igc_lut);
-		hw_cfg.payload = igc;
-		psde->pipe_hw->ops.setup_dma_igc(psde->pipe_hw, &hw_cfg,
-				pstate->multirect_index);
-	}
-
-	if (pstate->dirty & SDE_PLANE_DIRTY_DMA_GC &&
-			psde->pipe_hw->ops.setup_dma_gc) {
-		gc = msm_property_get_blob(&psde->property_info,
-				&pstate->property_state,
-				&size,
-				PLANE_PROP_DMA_GC);
-		hw_cfg.last_feature = 0;
-		hw_cfg.ctl = ctl;
-		hw_cfg.len = sizeof(struct drm_msm_pgc_lut);
-		hw_cfg.payload = gc;
-		psde->pipe_hw->ops.setup_dma_gc(psde->pipe_hw, &hw_cfg,
-				pstate->multirect_index);
-	}
-}
-
-static void _sde_plane_setup_scaler(struct sde_plane *psde,
-		struct sde_plane_state *pstate,
-		const struct sde_format *fmt, bool color_fill)
-{
-	struct sde_hw_pixel_ext *pe;
-	uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
-
-	if (!psde || !fmt || !pstate) {
-		SDE_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
-				!psde, !fmt, !pstate);
-		return;
-	}
-
-	pe = &pstate->pixel_ext;
-
-	psde->pipe_cfg.horz_decimation =
-		sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
-	psde->pipe_cfg.vert_decimation =
-		sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
-
-	/* don't chroma subsample if decimating */
-	chroma_subsmpl_h = psde->pipe_cfg.horz_decimation ? 1 :
-		drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
-	chroma_subsmpl_v = psde->pipe_cfg.vert_decimation ? 1 :
-		drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
-
-	/* update scaler */
-	if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3) ||
-			(psde->features & BIT(SDE_SSPP_SCALER_QSEED3LITE))) {
-		int rc = -EINVAL;
-
-		if (!color_fill && !psde->debugfs_default_scale)
-			rc = is_qseed3_rev_qseed3lite(psde->pipe_hw->catalog) ?
-			_sde_plane_setup_scaler3lite_lut(psde, pstate) :
-				_sde_plane_setup_scaler3_lut(psde, pstate);
-		if (rc || pstate->scaler_check_state !=
-					SDE_PLANE_SCLCHECK_SCALER_V2) {
-			SDE_EVT32(DRMID(&psde->base), color_fill,
-					pstate->scaler_check_state,
-					psde->debugfs_default_scale, rc,
-					psde->pipe_cfg.src_rect.w,
-					psde->pipe_cfg.src_rect.h,
-					psde->pipe_cfg.dst_rect.w,
-					psde->pipe_cfg.dst_rect.h,
-					pstate->multirect_mode);
-
-			/* calculate default config for QSEED3 */
-			_sde_plane_setup_scaler3(psde, pstate, fmt,
-					chroma_subsmpl_h, chroma_subsmpl_v);
-		}
-	} else if (pstate->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V1 ||
-			color_fill || psde->debugfs_default_scale) {
-		uint32_t deci_dim, i;
-
-		/* calculate default configuration for QSEED2 */
-		memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
-
-		SDE_DEBUG_PLANE(psde, "default config\n");
-		deci_dim = DECIMATED_DIMENSION(psde->pipe_cfg.src_rect.w,
-				psde->pipe_cfg.horz_decimation);
-		_sde_plane_setup_scaler2(psde,
-				deci_dim,
-				psde->pipe_cfg.dst_rect.w,
-				pe->phase_step_x,
-				pe->horz_filter, fmt, chroma_subsmpl_h);
-
-		if (SDE_FORMAT_IS_YUV(fmt))
-			deci_dim &= ~0x1;
-		_sde_plane_setup_pixel_ext(psde, psde->pipe_cfg.src_rect.w,
-				psde->pipe_cfg.dst_rect.w, deci_dim,
-				pe->phase_step_x,
-				pe->roi_w,
-				pe->num_ext_pxls_left,
-				pe->num_ext_pxls_right, pe->horz_filter, fmt,
-				chroma_subsmpl_h, 0);
-
-		deci_dim = DECIMATED_DIMENSION(psde->pipe_cfg.src_rect.h,
-				psde->pipe_cfg.vert_decimation);
-		_sde_plane_setup_scaler2(psde,
-				deci_dim,
-				psde->pipe_cfg.dst_rect.h,
-				pe->phase_step_y,
-				pe->vert_filter, fmt, chroma_subsmpl_v);
-		_sde_plane_setup_pixel_ext(psde, psde->pipe_cfg.src_rect.h,
-				psde->pipe_cfg.dst_rect.h, deci_dim,
-				pe->phase_step_y,
-				pe->roi_h,
-				pe->num_ext_pxls_top,
-				pe->num_ext_pxls_btm, pe->vert_filter, fmt,
-				chroma_subsmpl_v, 1);
-
-		for (i = 0; i < SDE_MAX_PLANES; i++) {
-			if (pe->num_ext_pxls_left[i] >= 0)
-				pe->left_rpt[i] = pe->num_ext_pxls_left[i];
-			else
-				pe->left_ftch[i] = pe->num_ext_pxls_left[i];
-
-			if (pe->num_ext_pxls_right[i] >= 0)
-				pe->right_rpt[i] = pe->num_ext_pxls_right[i];
-			else
-				pe->right_ftch[i] = pe->num_ext_pxls_right[i];
-
-			if (pe->num_ext_pxls_top[i] >= 0)
-				pe->top_rpt[i] = pe->num_ext_pxls_top[i];
-			else
-				pe->top_ftch[i] = pe->num_ext_pxls_top[i];
-
-			if (pe->num_ext_pxls_btm[i] >= 0)
-				pe->btm_rpt[i] = pe->num_ext_pxls_btm[i];
-			else
-				pe->btm_ftch[i] = pe->num_ext_pxls_btm[i];
-		}
-	}
-}
-
-/**
- * _sde_plane_color_fill - enables color fill on plane
- * @psde:   Pointer to SDE plane object
- * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
- * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
- * Returns: 0 on success
- */
-static int _sde_plane_color_fill(struct sde_plane *psde,
-		uint32_t color, uint32_t alpha)
-{
-	const struct sde_format *fmt;
-	const struct drm_plane *plane;
-	struct sde_plane_state *pstate;
-	bool blend_enable = true;
-
-	if (!psde || !psde->base.state) {
-		SDE_ERROR("invalid plane\n");
-		return -EINVAL;
-	}
-
-	if (!psde->pipe_hw) {
-		SDE_ERROR_PLANE(psde, "invalid plane h/w pointer\n");
-		return -EINVAL;
-	}
-
-	plane = &psde->base;
-	pstate = to_sde_plane_state(plane->state);
-
-	SDE_DEBUG_PLANE(psde, "\n");
-
-	/*
-	 * select fill format to match user property expectation,
-	 * h/w only supports RGB variants
-	 */
-	fmt = sde_get_sde_format(DRM_FORMAT_ABGR8888);
-
-	blend_enable = (SDE_DRM_BLEND_OP_OPAQUE !=
-			sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP));
-
-	/* update sspp */
-	if (fmt && psde->pipe_hw->ops.setup_solidfill) {
-		psde->pipe_hw->ops.setup_solidfill(psde->pipe_hw,
-				(color & 0xFFFFFF) | ((alpha & 0xFF) << 24),
-				pstate->multirect_index);
-
-		/* override scaler/decimation if solid fill */
-		psde->pipe_cfg.src_rect.x = 0;
-		psde->pipe_cfg.src_rect.y = 0;
-		psde->pipe_cfg.src_rect.w = psde->pipe_cfg.dst_rect.w;
-		psde->pipe_cfg.src_rect.h = psde->pipe_cfg.dst_rect.h;
-		_sde_plane_setup_scaler(psde, pstate, fmt, true);
-
-		if (psde->pipe_hw->ops.setup_format)
-			psde->pipe_hw->ops.setup_format(psde->pipe_hw,
-					fmt, blend_enable,
-					SDE_SSPP_SOLID_FILL,
-					pstate->multirect_index);
-
-		if (psde->pipe_hw->ops.setup_rects)
-			psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
-					&psde->pipe_cfg,
-					pstate->multirect_index);
-
-		if (psde->pipe_hw->ops.setup_pe)
-			psde->pipe_hw->ops.setup_pe(psde->pipe_hw,
-					&pstate->pixel_ext);
-		if (psde->pipe_hw->ops.setup_scaler &&
-				pstate->multirect_index != SDE_SSPP_RECT_1) {
-			psde->pipe_hw->ctl = _sde_plane_get_hw_ctl(plane);
-			psde->pipe_hw->ops.setup_scaler(psde->pipe_hw,
-					&psde->pipe_cfg, &pstate->pixel_ext,
-					&pstate->scaler3_cfg);
-		}
-	}
-
-	return 0;
-}
-
-/**
-* sde_plane_rot_atomic_check - verify rotator update of the given state
-* @plane: Pointer to drm plane
-* @state: Pointer to drm plane state to be validated
-* return: 0 if success; error code otherwise
-*/
-static int sde_plane_rot_atomic_check(struct drm_plane *plane,
-	struct drm_plane_state *state)
-{
-	struct sde_plane *psde;
-	struct sde_plane_state *pstate, *old_pstate;
-	int ret = 0;
-	u32 rotation;
-
-	if (!plane || !state) {
-		SDE_ERROR("invalid plane/state\n");
-		return -EINVAL;
-	}
-
-	psde = to_sde_plane(plane);
-	pstate = to_sde_plane_state(state);
-	old_pstate = to_sde_plane_state(plane->state);
-
-	/* check inline rotation and simplify the transform */
-	rotation = drm_rotation_simplify(
-			state->rotation,
-			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
-			DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y);
-
-	if ((rotation & DRM_MODE_ROTATE_180) ||
-		(rotation & DRM_MODE_ROTATE_270)) {
-		SDE_ERROR_PLANE(psde,
-			"invalid rotation transform must be simplified 0x%x\n",
-			rotation);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	if (rotation & DRM_MODE_ROTATE_90) {
-		struct msm_drm_private *priv = plane->dev->dev_private;
-		struct sde_kms *sde_kms;
-		const struct msm_format *msm_fmt;
-		const struct sde_format *fmt;
-		struct sde_rect src;
-		bool q16_data = true;
-
-		POPULATE_RECT(&src, state->src_x, state->src_y,
-			state->src_w, state->src_h, q16_data);
-		/*
-		 * DRM framework expects rotation flag in counter-clockwise
-		 * direction and the HW expects in clockwise direction.
-		 * Flip the flags to match with HW.
-		 */
-		rotation ^= (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y);
-
-		if (!psde->pipe_sblk->in_rot_maxdwnscale_rt_num ||
-			!psde->pipe_sblk->in_rot_maxdwnscale_rt_denom ||
-			!psde->pipe_sblk->in_rot_maxdwnscale_nrt ||
-			!psde->pipe_sblk->in_rot_maxheight ||
-			!psde->pipe_sblk->in_rot_format_list ||
-			!(psde->features & BIT(SDE_SSPP_TRUE_INLINE_ROT_V1))) {
-			SDE_ERROR_PLANE(psde,
-			    "wrong config rt:%d/%d nrt:%d fmt:%d h:%d 0x%x\n",
-				!psde->pipe_sblk->in_rot_maxdwnscale_rt_num,
-				!psde->pipe_sblk->in_rot_maxdwnscale_rt_denom,
-				!psde->pipe_sblk->in_rot_maxdwnscale_nrt,
-				!psde->pipe_sblk->in_rot_format_list,
-				!psde->pipe_sblk->in_rot_maxheight,
-				psde->features);
-			ret = -EINVAL;
-			goto exit;
-		}
-
-		/* check for valid height */
-		if (src.h > psde->pipe_sblk->in_rot_maxheight) {
-			SDE_ERROR_PLANE(psde,
-				"invalid height for inline rot:%d max:%d\n",
-				src.h, psde->pipe_sblk->in_rot_maxheight);
-			ret = -EINVAL;
-			goto exit;
-		}
-
-		if (!sde_plane_enabled(state))
-			goto exit;
-
-		/* check for valid formats supported by inline rot */
-		sde_kms = to_sde_kms(priv->kms);
-		msm_fmt = msm_framebuffer_format(state->fb);
-		fmt = to_sde_format(msm_fmt);
-		ret = sde_format_validate_fmt(&sde_kms->base, fmt,
-			psde->pipe_sblk->in_rot_format_list);
-	}
-
-exit:
-	pstate->rotation = rotation;
-	return ret;
-}
-
-static bool _sde_plane_halt_requests(struct drm_plane *plane,
-		uint32_t xin_id, bool halt_forced_clk, bool enable)
-{
-	struct sde_plane *psde;
-	struct msm_drm_private *priv;
-	struct sde_vbif_set_xin_halt_params halt_params;
-
-	if (!plane || !plane->dev) {
-		SDE_ERROR("invalid arguments\n");
-		return false;
-	}
-
-	psde = to_sde_plane(plane);
-	if (!psde->pipe_hw || !psde->pipe_hw->cap) {
-		SDE_ERROR("invalid pipe reference\n");
-		return false;
-	}
-
-	priv = plane->dev->dev_private;
-	if (!priv || !priv->kms) {
-		SDE_ERROR("invalid KMS reference\n");
-		return false;
-	}
-
-	memset(&halt_params, 0, sizeof(halt_params));
-	halt_params.vbif_idx = VBIF_RT;
-	halt_params.xin_id = xin_id;
-	halt_params.clk_ctrl = psde->pipe_hw->cap->clk_ctrl;
-	halt_params.forced_on = halt_forced_clk;
-	halt_params.enable = enable;
-
-	return sde_vbif_set_xin_halt(to_sde_kms(priv->kms), &halt_params);
-}
-
-void sde_plane_halt_requests(struct drm_plane *plane, bool enable)
-{
-	struct sde_plane *psde;
-
-	if (!plane) {
-		SDE_ERROR("invalid plane\n");
-		return;
-	}
-
-	psde = to_sde_plane(plane);
-	if (!psde->pipe_hw || !psde->pipe_hw->cap) {
-		SDE_ERROR("invalid pipe reference\n");
-		return;
-	}
-
-	SDE_EVT32(DRMID(plane), psde->xin_halt_forced_clk, enable);
-
-	psde->xin_halt_forced_clk =
-		_sde_plane_halt_requests(plane, psde->pipe_hw->cap->xin_id,
-				psde->xin_halt_forced_clk, enable);
-}
-
-void sde_plane_secure_ctrl_xin_client(struct drm_plane *plane,
-				struct drm_crtc *crtc)
-{
-	struct sde_plane *psde;
-
-	if (!plane || !crtc) {
-		SDE_ERROR("invalid plane/crtc\n");
-		return;
-	}
-	psde = to_sde_plane(plane);
-
-	if (psde->features & BIT(SDE_SSPP_BLOCK_SEC_UI))
-		return;
-
-	/* do all VBIF programming for the sec-ui allowed SSPP */
-	_sde_plane_set_qos_remap(plane);
-	_sde_plane_set_ot_limit(plane, crtc);
-}
-
-/**
- * sde_plane_rot_install_properties - install plane rotator properties
- * @plane: Pointer to drm plane
- * @catalog: Pointer to mdss configuration
- * return: none
- */
-static void sde_plane_rot_install_properties(struct drm_plane *plane,
-	struct sde_mdss_cfg *catalog)
-{
-	struct sde_plane *psde = to_sde_plane(plane);
-	unsigned long supported_rotations = DRM_MODE_ROTATE_0 |
-			DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
-	int ret = 0;
-
-	if (!plane || !psde) {
-		SDE_ERROR("invalid plane\n");
-		return;
-	} else if (!catalog) {
-		SDE_ERROR("invalid catalog\n");
-		return;
-	}
-
-	if (psde->features & BIT(SDE_SSPP_TRUE_INLINE_ROT_V1))
-		supported_rotations |= DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
-			DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
-
-	ret = drm_plane_create_rotation_property(plane,
-			DRM_MODE_ROTATE_0, supported_rotations);
-	if (ret) {
-		DRM_ERROR("create rotation property failed: %d\n", ret);
-		return;
-	}
-}
-
-void sde_plane_clear_multirect(const struct drm_plane_state *drm_state)
-{
-	struct sde_plane_state *pstate;
-
-	if (!drm_state)
-		return;
-
-	pstate = to_sde_plane_state(drm_state);
-
-	pstate->multirect_index = SDE_SSPP_RECT_SOLO;
-	pstate->multirect_mode = SDE_SSPP_MULTIRECT_NONE;
-}
-
-/**
- * multi_rect validate API allows to validate only R0 and R1 RECT
- * passing for each plane. Client of this API must not pass multiple
- * plane which are not sharing same XIN client. Such calls will fail
- * even though kernel client is passing valid multirect configuration.
- */
-int sde_plane_validate_multirect_v2(struct sde_multirect_plane_states *plane)
-{
-	struct sde_plane_state *pstate[R_MAX];
-	const struct drm_plane_state *drm_state[R_MAX];
-	struct sde_rect src[R_MAX], dst[R_MAX];
-	struct sde_plane *sde_plane[R_MAX];
-	const struct sde_format *fmt[R_MAX];
-	int xin_id[R_MAX];
-	bool q16_data = true;
-	int i, j, buffer_lines, width_threshold[R_MAX];
-	unsigned int max_tile_height = 1;
-	bool parallel_fetch_qualified = true;
-	enum sde_sspp_multirect_mode mode = SDE_SSPP_MULTIRECT_NONE;
-	const struct msm_format *msm_fmt;
-	bool const_alpha_enable = true;
-
-	for (i = 0; i < R_MAX; i++) {
-		drm_state[i] = i ? plane->r1 : plane->r0;
-		if (!drm_state[i]) {
-			SDE_ERROR("drm plane state is NULL\n");
-			return -EINVAL;
-		}
-
-		pstate[i] = to_sde_plane_state(drm_state[i]);
-		sde_plane[i] = to_sde_plane(drm_state[i]->plane);
-		xin_id[i] = sde_plane[i]->pipe_hw->cap->xin_id;
-
-		for (j = 0; j < i; j++) {
-			if (xin_id[i] != xin_id[j]) {
-				SDE_ERROR_PLANE(sde_plane[i],
-					"invalid multirect validate call base:%d xin_id:%d curr:%d xin:%d\n",
-					j, xin_id[j], i, xin_id[i]);
-				return -EINVAL;
-			}
-		}
-
-		msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
-		if (!msm_fmt) {
-			SDE_ERROR_PLANE(sde_plane[i], "null fb\n");
-			return -EINVAL;
-		}
-		fmt[i] = to_sde_format(msm_fmt);
-
-		if (SDE_FORMAT_IS_UBWC(fmt[i]) &&
-		    (fmt[i]->tile_height > max_tile_height))
-			max_tile_height = fmt[i]->tile_height;
-
-		POPULATE_RECT(&src[i], drm_state[i]->src_x, drm_state[i]->src_y,
-			drm_state[i]->src_w, drm_state[i]->src_h, q16_data);
-		POPULATE_RECT(&dst[i], drm_state[i]->crtc_x,
-				drm_state[i]->crtc_y, drm_state[i]->crtc_w,
-				drm_state[i]->crtc_h, !q16_data);
-
-		if (src[i].w != dst[i].w || src[i].h != dst[i].h) {
-			SDE_ERROR_PLANE(sde_plane[i],
-				"scaling is not supported in multirect mode\n");
-			return -EINVAL;
-		}
-
-		if (SDE_FORMAT_IS_YUV(fmt[i])) {
-			SDE_ERROR_PLANE(sde_plane[i],
-				"Unsupported format for multirect mode\n");
-			return -EINVAL;
-		}
-
-		/**
-		 * SSPP PD_MEM is split half - one for each RECT.
-		 * Tiled formats need 5 lines of buffering while fetching
-		 * whereas linear formats need only 2 lines.
-		 * So we cannot support more than half of the supported SSPP
-		 * width for tiled formats.
-		 */
-		width_threshold[i] = sde_plane[i]->pipe_sblk->maxlinewidth;
-		if (SDE_FORMAT_IS_UBWC(fmt[i]))
-			width_threshold[i] /= 2;
-
-		if (parallel_fetch_qualified && src[i].w > width_threshold[i])
-			parallel_fetch_qualified = false;
-
-		if (sde_plane[i]->is_virtual)
-			mode = sde_plane_get_property(pstate[i],
-					PLANE_PROP_MULTIRECT_MODE);
-
-		if (pstate[i]->const_alpha_en != const_alpha_enable)
-			const_alpha_enable = false;
-
-	}
-
-	buffer_lines = 2 * max_tile_height;
-
-	/**
-	 * fallback to driver mode selection logic if client is using
-	 * multirect plane without setting property.
-	 *
-	 * validate multirect mode configuration based on rectangle
-	 */
-	switch (mode) {
-	case SDE_SSPP_MULTIRECT_NONE:
-		if (parallel_fetch_qualified)
-			mode = SDE_SSPP_MULTIRECT_PARALLEL;
-		else if (TIME_MULTIPLEX_RECT(dst[R1], dst[R0], buffer_lines) ||
-			 TIME_MULTIPLEX_RECT(dst[R0], dst[R1], buffer_lines))
-			mode = SDE_SSPP_MULTIRECT_TIME_MX;
-		else
-			SDE_ERROR(
-				"planes(%d - %d) multirect mode selection fail\n",
-				drm_state[R0]->plane->base.id,
-				drm_state[R1]->plane->base.id);
-		break;
-
-	case SDE_SSPP_MULTIRECT_PARALLEL:
-		if (!parallel_fetch_qualified) {
-			SDE_ERROR("R0 plane:%d width_threshold:%d src_w:%d\n",
-				drm_state[R0]->plane->base.id,
-				width_threshold[R0],  src[R0].w);
-			SDE_ERROR("R1 plane:%d width_threshold:%d src_w:%d\n",
-				drm_state[R1]->plane->base.id,
-				width_threshold[R1],  src[R1].w);
-			SDE_ERROR("parallel fetch not qualified\n");
-			mode = SDE_SSPP_MULTIRECT_NONE;
-		}
-		break;
-
-	case SDE_SSPP_MULTIRECT_TIME_MX:
-		if (!TIME_MULTIPLEX_RECT(dst[R1], dst[R0], buffer_lines) &&
-		    !TIME_MULTIPLEX_RECT(dst[R0], dst[R1], buffer_lines)) {
-			SDE_ERROR(
-				"buffer_lines:%d R0 plane:%d dst_y:%d dst_h:%d\n",
-				buffer_lines, drm_state[R0]->plane->base.id,
-				dst[R0].y, dst[R0].h);
-			SDE_ERROR(
-				"buffer_lines:%d R1 plane:%d dst_y:%d dst_h:%d\n",
-				buffer_lines, drm_state[R1]->plane->base.id,
-				dst[R1].y, dst[R1].h);
-			SDE_ERROR("time multiplexed fetch not qualified\n");
-			mode = SDE_SSPP_MULTIRECT_NONE;
-		}
-		break;
-
-	default:
-		SDE_ERROR("bad mode:%d selection\n", mode);
-		mode = SDE_SSPP_MULTIRECT_NONE;
-		break;
-	}
-
-	for (i = 0; i < R_MAX; i++) {
-		pstate[i]->multirect_mode = mode;
-		pstate[i]->const_alpha_en = const_alpha_enable;
-	}
-
-	if (mode == SDE_SSPP_MULTIRECT_NONE)
-		return -EINVAL;
-
-	if (sde_plane[R0]->is_virtual) {
-		pstate[R0]->multirect_index = SDE_SSPP_RECT_1;
-		pstate[R1]->multirect_index = SDE_SSPP_RECT_0;
-	} else {
-		pstate[R0]->multirect_index = SDE_SSPP_RECT_0;
-		pstate[R1]->multirect_index = SDE_SSPP_RECT_1;
-	}
-
-	SDE_DEBUG_PLANE(sde_plane[R0], "R0: %d - %d\n",
-		pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
-	SDE_DEBUG_PLANE(sde_plane[R1], "R1: %d - %d\n",
-		pstate[R1]->multirect_mode, pstate[R1]->multirect_index);
-
-	return 0;
-}
-
-/**
- * sde_plane_ctl_flush - set/clear control flush bitmask for the given plane
- * @plane: Pointer to drm plane structure
- * @ctl: Pointer to hardware control driver
- * @set: set if true else clear
- */
-void sde_plane_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
-		bool set)
-{
-	if (!plane || !ctl) {
-		SDE_ERROR("invalid parameters\n");
-		return;
-	}
-
-	if (!ctl->ops.update_bitmask_sspp) {
-		SDE_ERROR("invalid ops\n");
-		return;
-	}
-
-	ctl->ops.update_bitmask_sspp(ctl, sde_plane_pipe(plane), set);
-}
-
-static int sde_plane_prepare_fb(struct drm_plane *plane,
-		struct drm_plane_state *new_state)
-{
-	struct drm_framebuffer *fb = new_state->fb;
-	struct sde_plane *psde = to_sde_plane(plane);
-	struct sde_plane_state *pstate = to_sde_plane_state(new_state);
-	struct sde_hw_fmt_layout layout;
-	struct msm_gem_address_space *aspace;
-	int ret;
-
-	if (!fb)
-		return 0;
-
-	SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
-
-	ret = _sde_plane_get_aspace(psde, pstate, &aspace);
-	if (ret) {
-		SDE_ERROR_PLANE(psde, "Failed to get aspace\n");
-		return ret;
-	}
-
-	/* cache aspace */
-	pstate->aspace = aspace;
-
-	/*
-	 * when transitioning from secure to non-secure,
-	 * plane->prepare_fb happens before the commit. In such case,
-	 * defer the prepare_fb and handled it late, during the commit
-	 * after attaching the domains as part of the transition
-	 */
-	pstate->defer_prepare_fb = (aspace && !aspace->domain_attached) ?
-							true : false;
-
-	if (pstate->defer_prepare_fb) {
-		SDE_EVT32(DRMID(plane), psde->pipe);
-		SDE_DEBUG_PLANE(psde,
-		    "domain not attached, prepare_fb handled later\n");
-		return 0;
-	}
-
-	if (pstate->aspace && fb) {
-		ret = msm_framebuffer_prepare(fb,
-				pstate->aspace);
-		if (ret) {
-			SDE_ERROR("failed to prepare framebuffer\n");
-			return ret;
-		}
-	}
-
-	/* validate framebuffer layout before commit */
-	ret = sde_format_populate_layout(pstate->aspace,
-			fb, &layout);
-	if (ret) {
-		SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-/**
- * _sde_plane_fetch_halt - halts vbif transactions for a plane
- * @plane: Pointer to plane
- * Returns: 0 on success
- */
-static int _sde_plane_fetch_halt(struct drm_plane *plane)
-{
-	struct sde_plane *psde;
-	int xin_id;
-	enum sde_clk_ctrl_type clk_ctrl;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-
-	psde = to_sde_plane(plane);
-	if (!plane || !plane->dev || !psde->pipe_hw) {
-		SDE_ERROR("invalid arguments\n");
-		return -EINVAL;
-	}
-
-	priv = plane->dev->dev_private;
-	if (!priv || !priv->kms) {
-		SDE_ERROR("invalid KMS reference\n");
-		return -EINVAL;
-	}
-
-	sde_kms = to_sde_kms(priv->kms);
-	clk_ctrl = psde->pipe_hw->cap->clk_ctrl;
-	xin_id = psde->pipe_hw->cap->xin_id;
-	SDE_DEBUG_PLANE(psde, "pipe:%d xin_id:%d clk_ctrl:%d\n",
-			psde->pipe - SSPP_VIG0, xin_id, clk_ctrl);
-	SDE_EVT32_VERBOSE(psde, psde->pipe - SSPP_VIG0, xin_id, clk_ctrl);
-
-	return sde_vbif_halt_plane_xin(sde_kms, xin_id, clk_ctrl);
-}
-
-
-static inline int _sde_plane_power_enable(struct drm_plane *plane, bool enable)
-{
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-
-	if (!plane->dev || !plane->dev->dev_private) {
-		SDE_ERROR("invalid drm device\n");
-		return -EINVAL;
-	}
-
-	priv = plane->dev->dev_private;
-	if (!priv->kms) {
-		SDE_ERROR("invalid kms\n");
-		return -EINVAL;
-	}
-
-	sde_kms = to_sde_kms(priv->kms);
-
-	return sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
-									enable);
-}
-
-static void sde_plane_cleanup_fb(struct drm_plane *plane,
-		struct drm_plane_state *old_state)
-{
-	struct sde_plane *psde = to_sde_plane(plane);
-	struct sde_plane_state *old_pstate;
-	int ret;
-
-	if (!old_state || !old_state->fb || !plane || !plane->state)
-		return;
-
-	old_pstate = to_sde_plane_state(old_state);
-
-	SDE_DEBUG_PLANE(psde, "FB[%u]\n", old_state->fb->base.id);
-
-	/*
-	 * plane->state gets populated for next frame after swap_state. If
-	 * plane->state->crtc pointer is not populated then it is not used in
-	 * the next frame, hence making it an unused plane.
-	 */
-	if ((plane->state->crtc == NULL) && !psde->is_virtual) {
-		SDE_DEBUG_PLANE(psde, "unused pipe:%u\n",
-			       psde->pipe - SSPP_VIG0);
-
-		/* halt this plane now */
-		ret = _sde_plane_power_enable(plane, true);
-		if (ret) {
-			SDE_ERROR("power resource enable failed with %d", ret);
-			SDE_EVT32(ret);
-			return;
-		}
-
-		ret = _sde_plane_fetch_halt(plane);
-		if (ret) {
-			SDE_ERROR_PLANE(psde,
-				       "unused pipe %u halt failed\n",
-				       psde->pipe - SSPP_VIG0);
-			SDE_EVT32(DRMID(plane), psde->pipe - SSPP_VIG0,
-				       ret, SDE_EVTLOG_ERROR);
-		}
-		_sde_plane_power_enable(plane, false);
-	}
-
-	msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace);
-
-}
-
-static void _sde_plane_sspp_atomic_check_mode_changed(struct sde_plane *psde,
-		struct drm_plane_state *state,
-		struct drm_plane_state *old_state)
-{
-	struct sde_plane_state *pstate = to_sde_plane_state(state);
-	struct sde_plane_state *old_pstate = to_sde_plane_state(old_state);
-	struct drm_framebuffer *fb, *old_fb;
-
-	/* no need to check it again */
-	if (pstate->dirty == SDE_PLANE_DIRTY_ALL)
-		return;
-
-	if (!sde_plane_enabled(state) || !sde_plane_enabled(old_state)
-			|| psde->is_error) {
-		SDE_DEBUG_PLANE(psde,
-			"enabling/disabling full modeset required\n");
-		pstate->dirty |= SDE_PLANE_DIRTY_ALL;
-	} else if (to_sde_plane_state(old_state)->pending) {
-		SDE_DEBUG_PLANE(psde, "still pending\n");
-		pstate->dirty |= SDE_PLANE_DIRTY_ALL;
-	} else if (pstate->multirect_index != old_pstate->multirect_index ||
-			pstate->multirect_mode != old_pstate->multirect_mode) {
-		SDE_DEBUG_PLANE(psde, "multirect config updated\n");
-		pstate->dirty |= SDE_PLANE_DIRTY_ALL;
-	} else if (state->src_w != old_state->src_w ||
-		   state->src_h != old_state->src_h ||
-		   state->src_x != old_state->src_x ||
-		   state->src_y != old_state->src_y) {
-		SDE_DEBUG_PLANE(psde, "src rect updated\n");
-		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
-	} else if (state->crtc_w != old_state->crtc_w ||
-		   state->crtc_h != old_state->crtc_h ||
-		   state->crtc_x != old_state->crtc_x ||
-		   state->crtc_y != old_state->crtc_y) {
-		SDE_DEBUG_PLANE(psde, "crtc rect updated\n");
-		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
-	} else if (pstate->excl_rect.w != old_pstate->excl_rect.w ||
-		   pstate->excl_rect.h != old_pstate->excl_rect.h ||
-		   pstate->excl_rect.x != old_pstate->excl_rect.x ||
-		   pstate->excl_rect.y != old_pstate->excl_rect.y) {
-		SDE_DEBUG_PLANE(psde, "excl_rect updated\n");
-		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
-	} else if (pstate->rotation != old_pstate->rotation) {
-		SDE_DEBUG_PLANE(psde, "rotation updated 0x%x->0x%x\n",
-			pstate->rotation, old_pstate->rotation);
-		pstate->dirty |= SDE_PLANE_DIRTY_FORMAT;
-	}
-
-	fb = state->fb;
-	old_fb = old_state->fb;
-
-	if (!fb || !old_fb) {
-		SDE_DEBUG_PLANE(psde, "can't compare fb handles\n");
-	} else if ((fb->format->format != old_fb->format->format) ||
-			pstate->const_alpha_en != old_pstate->const_alpha_en) {
-		SDE_DEBUG_PLANE(psde, "format change\n");
-		pstate->dirty |= SDE_PLANE_DIRTY_FORMAT | SDE_PLANE_DIRTY_RECTS;
-	} else {
-		uint64_t new_mod = fb->modifier;
-		uint64_t old_mod = old_fb->modifier;
-		uint32_t *new_pitches = fb->pitches;
-		uint32_t *old_pitches = old_fb->pitches;
-		uint32_t *new_offset = fb->offsets;
-		uint32_t *old_offset = old_fb->offsets;
-		int i;
-
-		if (new_mod != old_mod) {
-			SDE_DEBUG_PLANE(psde,
-				"format modifiers change new_mode:%llu old_mode:%llu\n",
-				new_mod, old_mod);
-			pstate->dirty |= SDE_PLANE_DIRTY_FORMAT |
-				SDE_PLANE_DIRTY_RECTS;
-		}
-
-		for (i = 0; i < ARRAY_SIZE(fb->pitches); i++) {
-			if (new_pitches[i] != old_pitches[i]) {
-				SDE_DEBUG_PLANE(psde,
-					"pitches change plane:%d old_pitches:%u new_pitches:%u\n",
-					i, old_pitches[i], new_pitches[i]);
-				pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
-				break;
-			}
-		}
-		for (i = 0; i < ARRAY_SIZE(fb->offsets); i++) {
-			if (new_offset[i] != old_offset[i]) {
-				SDE_DEBUG_PLANE(psde,
-					"offset change plane:%d old_offset:%u new_offset:%u\n",
-					i, old_offset[i], new_offset[i]);
-				pstate->dirty |= SDE_PLANE_DIRTY_FORMAT |
-					SDE_PLANE_DIRTY_RECTS;
-				break;
-			}
-		}
-	}
-}
-
-int sde_plane_validate_src_addr(struct drm_plane *plane,
-		unsigned long base_addr, u32 size)
-{
-	int ret =  -EINVAL;
-	u32 addr;
-	struct sde_plane *psde = to_sde_plane(plane);
-
-	if (!psde || !base_addr || !size) {
-		SDE_ERROR_PLANE(psde, "invalid arguments\n");
-		return ret;
-	}
-
-	if (psde->pipe_hw && psde->pipe_hw->ops.get_sourceaddress) {
-		addr = psde->pipe_hw->ops.get_sourceaddress(psde->pipe_hw,
-				is_sde_plane_virtual(plane));
-		if ((addr >= base_addr) && (addr < (base_addr + size)))
-			ret = 0;
-	}
-
-	return ret;
-}
-
-static int _sde_plane_validate_scaler_v2(struct sde_plane *psde,
-		struct sde_plane_state *pstate,
-		const struct sde_format *fmt,
-		uint32_t img_w, uint32_t img_h,
-		uint32_t src_w, uint32_t src_h,
-		uint32_t deci_w, uint32_t deci_h)
-{
-	int i;
-
-	if (!psde || !pstate || !fmt) {
-		SDE_ERROR_PLANE(psde, "invalid arguments\n");
-		return -EINVAL;
-	}
-
-	if (psde->debugfs_default_scale ||
-	   (pstate->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V2 &&
-	    pstate->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V2_CHECK))
-		return 0;
-
-	pstate->scaler_check_state = SDE_PLANE_SCLCHECK_INVALID;
-
-	for (i = 0; i < SDE_MAX_PLANES; i++) {
-		uint32_t hor_req_pixels, hor_fetch_pixels;
-		uint32_t vert_req_pixels, vert_fetch_pixels;
-		uint32_t src_w_tmp, src_h_tmp;
-		uint32_t scaler_w, scaler_h;
-		bool rot;
-
-		/* re-use color plane 1's config for plane 2 */
-		if (i == 2)
-			continue;
-
-		src_w_tmp = src_w;
-		src_h_tmp = src_h;
-
-		/*
-		 * For chroma plane, width is half for the following sub sampled
-		 * formats. Except in case of decimation, where hardware avoids
-		 * 1 line of decimation instead of downsampling.
-		 */
-		if (i == 1) {
-			if (!deci_w &&
-					(fmt->chroma_sample == SDE_CHROMA_420 ||
-					 fmt->chroma_sample == SDE_CHROMA_H2V1))
-				src_w_tmp >>= 1;
-			if (!deci_h &&
-					(fmt->chroma_sample == SDE_CHROMA_420 ||
-					 fmt->chroma_sample == SDE_CHROMA_H1V2))
-				src_h_tmp >>= 1;
-		}
-
-		hor_req_pixels = pstate->pixel_ext.roi_w[i];
-		vert_req_pixels = pstate->pixel_ext.roi_h[i];
-
-		hor_fetch_pixels = DECIMATED_DIMENSION(src_w_tmp +
-			(int8_t)(pstate->pixel_ext.left_ftch[i] & 0xFF) +
-			(int8_t)(pstate->pixel_ext.right_ftch[i] & 0xFF),
-			deci_w);
-		vert_fetch_pixels = DECIMATED_DIMENSION(src_h_tmp +
-			(int8_t)(pstate->pixel_ext.top_ftch[i] & 0xFF) +
-			(int8_t)(pstate->pixel_ext.btm_ftch[i] & 0xFF),
-			deci_h);
-
-		if ((hor_req_pixels != hor_fetch_pixels) ||
-			(hor_fetch_pixels > img_w) ||
-			(vert_req_pixels != vert_fetch_pixels) ||
-			(vert_fetch_pixels > img_h)) {
-			SDE_ERROR_PLANE(psde,
-					"req %d/%d, fetch %d/%d, src %dx%d\n",
-					hor_req_pixels, vert_req_pixels,
-					hor_fetch_pixels, vert_fetch_pixels,
-					img_w, img_h);
-			return -EINVAL;
-		}
-
-		/*
-		 * swap the scaler src width & height for inline-rotation 90
-		 * comparison with Pixel-Extension, as PE is based on
-		 * pre-rotation and QSEED is based on post-rotation
-		 */
-		rot = pstate->rotation & DRM_MODE_ROTATE_90;
-		scaler_w = rot ? pstate->scaler3_cfg.src_height[i]
-				    : pstate->scaler3_cfg.src_width[i];
-		scaler_h = rot ? pstate->scaler3_cfg.src_width[i]
-				    : pstate->scaler3_cfg.src_height[i];
-		/*
-		 * Alpha plane can only be scaled using bilinear or pixel
-		 * repeat/drop, src_width and src_height are only specified
-		 * for Y and UV plane
-		 */
-		if (i != 3 && (hor_req_pixels != scaler_w ||
-					vert_req_pixels != scaler_h)) {
-			SDE_ERROR_PLANE(psde,
-			    "roi[%d] roi:%dx%d scaler:%dx%d src:%dx%d rot:%d\n",
-				i, pstate->pixel_ext.roi_w[i],
-				pstate->pixel_ext.roi_h[i],
-				scaler_w, scaler_h, src_w, src_h, rot);
-			return -EINVAL;
-		}
-
-		/*
-		 * SSPP fetch , unpack output and QSEED3 input lines need
-		 * to match for Y plane
-		 */
-		if (i == 0 &&
-			(sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
-			BIT(SDE_DRM_DEINTERLACE)) &&
-			((pstate->scaler3_cfg.src_height[i] != (src_h/2)) ||
-			(pstate->pixel_ext.roi_h[i] != (src_h/2)))) {
-			SDE_ERROR_PLANE(psde,
-				"de-interlace fail roi[%d] %d/%d, src %dx%d, src %dx%d\n",
-				i, pstate->pixel_ext.roi_w[i],
-				pstate->pixel_ext.roi_h[i],
-				pstate->scaler3_cfg.src_width[i],
-				pstate->scaler3_cfg.src_height[i],
-				src_w, src_h);
-			return -EINVAL;
-		}
-	}
-
-	pstate->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2;
-	return 0;
-}
-
-static int _sde_atomic_check_decimation_scaler(struct drm_plane_state *state,
-	struct sde_plane *psde, const struct sde_format *fmt,
-	struct sde_plane_state *pstate, struct sde_rect *src,
-	struct sde_rect *dst, u32 width, u32 height)
-{
-	int ret = 0;
-	uint32_t deci_w, deci_h, src_deci_w, src_deci_h;
-	uint32_t scaler_src_w, scaler_src_h;
-	uint32_t max_downscale_num, max_downscale_denom;
-	uint32_t max_upscale, max_linewidth;
-	bool inline_rotation, rt_client;
-	struct drm_crtc *crtc;
-
-	deci_w = sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
-	deci_h = sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
-
-	src_deci_w = DECIMATED_DIMENSION(src->w, deci_w);
-	src_deci_h = DECIMATED_DIMENSION(src->h, deci_h);
-
-	/* with inline rotator, the source of the scaler is post-rotated */
-	inline_rotation = pstate->rotation & DRM_MODE_ROTATE_90 ? true : false;
-	if (inline_rotation) {
-		scaler_src_w = src_deci_h;
-		scaler_src_h = src_deci_w;
-	} else {
-		scaler_src_w = src_deci_w;
-		scaler_src_h = src_deci_h;
-	}
-
-	max_upscale = psde->pipe_sblk->maxupscale;
-	max_linewidth = psde->pipe_sblk->maxlinewidth;
-
-	crtc = state->crtc;
-	if (crtc)
-		rt_client = (sde_crtc_get_client_type(crtc) != NRT_CLIENT);
-	else
-		rt_client = true;
-
-	max_downscale_denom = 1;
-	/* inline rotation RT clients have a different max downscaling limit */
-	if (inline_rotation) {
-		if (rt_client) {
-			max_downscale_num =
-				psde->pipe_sblk->in_rot_maxdwnscale_rt_num;
-			max_downscale_denom =
-				psde->pipe_sblk->in_rot_maxdwnscale_rt_denom;
-		} else {
-			max_downscale_num =
-				psde->pipe_sblk->in_rot_maxdwnscale_nrt;
-		}
-	} else {
-		max_downscale_num = psde->pipe_sblk->maxdwnscale;
-	}
-
-	/* decimation validation */
-	if ((deci_w || deci_h)
-			&& ((deci_w > psde->pipe_sblk->maxhdeciexp)
-				|| (deci_h > psde->pipe_sblk->maxvdeciexp))) {
-		SDE_ERROR_PLANE(psde, "too much decimation requested\n");
-		ret = -EINVAL;
-
-	} else if ((deci_w || deci_h)
-			&& (fmt->fetch_mode != SDE_FETCH_LINEAR)) {
-		SDE_ERROR_PLANE(psde, "decimation requires linear fetch\n");
-		ret = -EINVAL;
-
-	} else if (!(psde->features & SDE_SSPP_SCALER) &&
-		((src->w != dst->w) || (src->h != dst->h))) {
-		SDE_ERROR_PLANE(psde,
-			"pipe doesn't support scaling %ux%u->%ux%u\n",
-			src->w, src->h, dst->w, dst->h);
-		ret = -EINVAL;
-
-	/* check decimated source width */
-	} else if (src_deci_w > max_linewidth) {
-		SDE_ERROR_PLANE(psde,
-				"invalid src w:%u, deci w:%u, line w:%u\n",
-				src->w, src_deci_w, max_linewidth);
-		ret = -E2BIG;
-	}
-
-	/* check max scaler capability */
-	else if (((scaler_src_w * max_upscale) < dst->w) ||
-		((scaler_src_h * max_upscale) < dst->h) ||
-		(((dst->w * max_downscale_num) / max_downscale_denom)
-			< scaler_src_w) ||
-		(((dst->h * max_downscale_num) / max_downscale_denom)
-			< scaler_src_h)) {
-		SDE_ERROR_PLANE(psde,
-			"too much scaling requested %ux%u->%ux%u rot:%d\n",
-			scaler_src_w, scaler_src_h, dst->w, dst->h,
-			inline_rotation);
-		ret = -E2BIG;
-	} else if (_sde_plane_validate_scaler_v2(psde, pstate, fmt,
-				width, height,
-				src->w, src->h, deci_w, deci_h)) {
-		ret = -EINVAL;
-	}
-
-	return ret;
-}
-
-static int _sde_atomic_check_excl_rect(struct sde_plane *psde,
-	struct sde_plane_state *pstate, struct sde_rect *src,
-	const struct sde_format *fmt, int ret)
-{
-
-	/* check excl rect configs */
-	if (!ret && pstate->excl_rect.w && pstate->excl_rect.h) {
-		struct sde_rect intersect;
-
-		/*
-		 * Check exclusion rect against src rect.
-		 * it must intersect with source rect.
-		 */
-		sde_kms_rect_intersect(src, &pstate->excl_rect, &intersect);
-		if (intersect.w != pstate->excl_rect.w ||
-				intersect.h != pstate->excl_rect.h ||
-				SDE_FORMAT_IS_YUV(fmt)) {
-			SDE_ERROR_PLANE(psde,
-				"invalid excl_rect:{%d,%d,%d,%d} src:{%d,%d,%d,%d}, fmt: %4.4s\n",
-				pstate->excl_rect.x, pstate->excl_rect.y,
-				pstate->excl_rect.w, pstate->excl_rect.h,
-				src->x, src->y, src->w, src->h,
-				(char *)&fmt->base.pixel_format);
-			ret = -EINVAL;
-		}
-		SDE_DEBUG_PLANE(psde, "excl_rect: {%d,%d,%d,%d}\n",
-				pstate->excl_rect.x, pstate->excl_rect.y,
-				pstate->excl_rect.w, pstate->excl_rect.h);
-	}
-
-	return ret;
-}
-
-static int sde_plane_sspp_atomic_check(struct drm_plane *plane,
-		struct drm_plane_state *state)
-{
-	int ret = 0;
-	struct sde_plane *psde;
-	struct sde_plane_state *pstate;
-	const struct msm_format *msm_fmt;
-	const struct sde_format *fmt;
-	struct sde_rect src, dst;
-	uint32_t min_src_size;
-	bool q16_data = true;
-	struct drm_framebuffer *fb;
-	u32 width;
-	u32 height;
-
-	psde = to_sde_plane(plane);
-	pstate = to_sde_plane_state(state);
-
-	if (!psde->pipe_sblk) {
-		SDE_ERROR_PLANE(psde, "invalid catalog\n");
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	/* src values are in Q16 fixed point, convert to integer */
-	POPULATE_RECT(&src, state->src_x, state->src_y,
-			state->src_w, state->src_h, q16_data);
-	POPULATE_RECT(&dst, state->crtc_x, state->crtc_y, state->crtc_w,
-		state->crtc_h, !q16_data);
-
-	SDE_DEBUG_PLANE(psde, "check %d -> %d\n",
-		sde_plane_enabled(plane->state), sde_plane_enabled(state));
-
-	if (!sde_plane_enabled(state))
-		goto modeset_update;
-
-	fb = state->fb;
-	width = fb ? state->fb->width : 0x0;
-	height = fb ? state->fb->height : 0x0;
-
-	SDE_DEBUG("plane%d sspp:%x/%dx%d/%4.4s/%llx\n",
-			plane->base.id,
-			pstate->rotation,
-			width, height,
-			fb ? (char *) &state->fb->format->format : 0x0,
-			fb ? state->fb->modifier : 0x0);
-	SDE_DEBUG("src:%dx%d %d,%d crtc:%dx%d+%d+%d\n",
-			state->src_w >> 16, state->src_h >> 16,
-			state->src_x >> 16, state->src_y >> 16,
-			state->crtc_w, state->crtc_h,
-			state->crtc_x, state->crtc_y);
-
-	msm_fmt = msm_framebuffer_format(fb);
-	fmt = to_sde_format(msm_fmt);
-
-	min_src_size = SDE_FORMAT_IS_YUV(fmt) ? 2 : 1;
-
-	if (SDE_FORMAT_IS_YUV(fmt) &&
-		(!(psde->features & SDE_SSPP_SCALER) ||
-		 !(psde->features & (BIT(SDE_SSPP_CSC)
-		 | BIT(SDE_SSPP_CSC_10BIT))))) {
-		SDE_ERROR_PLANE(psde,
-				"plane doesn't have scaler/csc for yuv\n");
-		ret = -EINVAL;
-
-	/* check src bounds */
-	} else if (width > MAX_IMG_WIDTH ||
-		height > MAX_IMG_HEIGHT ||
-		src.w < min_src_size || src.h < min_src_size ||
-		CHECK_LAYER_BOUNDS(src.x, src.w, width) ||
-		CHECK_LAYER_BOUNDS(src.y, src.h, height)) {
-		SDE_ERROR_PLANE(psde, "invalid source %u, %u, %ux%u\n",
-			src.x, src.y, src.w, src.h);
-		ret = -E2BIG;
-
-	/* valid yuv image */
-	} else if (SDE_FORMAT_IS_YUV(fmt) && ((src.x & 0x1) || (src.y & 0x1) ||
-			 (src.w & 0x1) || (src.h & 0x1))) {
-		SDE_ERROR_PLANE(psde, "invalid yuv source %u, %u, %ux%u\n",
-				src.x, src.y, src.w, src.h);
-		ret = -EINVAL;
-
-	/* min dst support */
-	} else if (dst.w < 0x1 || dst.h < 0x1) {
-		SDE_ERROR_PLANE(psde, "invalid dest rect %u, %u, %ux%u\n",
-				dst.x, dst.y, dst.w, dst.h);
-		ret = -EINVAL;
-	}
-
-	ret = _sde_atomic_check_decimation_scaler(state, psde, fmt, pstate,
-		&src, &dst, width, height);
-
-	if (ret)
-		return ret;
-
-	ret = _sde_atomic_check_excl_rect(psde, pstate,
-		&src, fmt, ret);
-
-	if (ret)
-		return ret;
-
-	pstate->const_alpha_en = fmt->alpha_enable &&
-		(SDE_DRM_BLEND_OP_OPAQUE !=
-		 sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP)) &&
-		(pstate->stage != SDE_STAGE_0);
-
-modeset_update:
-	if (!ret)
-		_sde_plane_sspp_atomic_check_mode_changed(psde,
-				state, plane->state);
-exit:
-	return ret;
-}
-
-static int sde_plane_atomic_check(struct drm_plane *plane,
-		struct drm_plane_state *state)
-{
-	int ret = 0;
-	struct sde_plane *psde;
-	struct sde_plane_state *pstate;
-
-	if (!plane || !state) {
-		SDE_ERROR("invalid arg(s), plane %d state %d\n",
-				!plane, !state);
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	psde = to_sde_plane(plane);
-	pstate = to_sde_plane_state(state);
-
-	SDE_DEBUG_PLANE(psde, "\n");
-
-	ret = sde_plane_rot_atomic_check(plane, state);
-	if (ret)
-		goto exit;
-
-	ret = sde_plane_sspp_atomic_check(plane, state);
-
-exit:
-	return ret;
-}
-
-void sde_plane_flush(struct drm_plane *plane)
-{
-	struct sde_plane *psde;
-	struct sde_plane_state *pstate;
-
-	if (!plane || !plane->state) {
-		SDE_ERROR("invalid plane\n");
-		return;
-	}
-
-	psde = to_sde_plane(plane);
-	pstate = to_sde_plane_state(plane->state);
-
-	/*
-	 * These updates have to be done immediately before the plane flush
-	 * timing, and may not be moved to the atomic_update/mode_set functions.
-	 */
-	if (psde->is_error)
-		/* force white frame with 100% alpha pipe output on error */
-		_sde_plane_color_fill(psde, 0xFFFFFF, 0xFF);
-	else if (psde->color_fill & SDE_PLANE_COLOR_FILL_FLAG)
-		/* force 100% alpha */
-		_sde_plane_color_fill(psde, psde->color_fill, 0xFF);
-	else if (psde->pipe_hw && psde->csc_ptr && psde->pipe_hw->ops.setup_csc)
-		psde->pipe_hw->ops.setup_csc(psde->pipe_hw, psde->csc_ptr);
-
-	/* flag h/w flush complete */
-	if (plane->state)
-		pstate->pending = false;
-}
-
-/**
- * sde_plane_set_error: enable/disable error condition
- * @plane: pointer to drm_plane structure
- */
-void sde_plane_set_error(struct drm_plane *plane, bool error)
-{
-	struct sde_plane *psde;
-
-	if (!plane)
-		return;
-
-	psde = to_sde_plane(plane);
-	psde->is_error = error;
-}
-
-static void _sde_plane_sspp_setup_sys_cache(struct sde_plane *psde,
-	struct sde_plane_state *pstate, const struct sde_format *fmt)
-{
-	if (!psde->pipe_hw->ops.setup_sys_cache ||
-	    !(psde->perf_features & BIT(SDE_PERF_SSPP_SYS_CACHE)))
-		return;
-
-	SDE_DEBUG("features:0x%x rotation:0x%x\n",
-		psde->features, pstate->rotation);
-
-	if ((pstate->rotation & DRM_MODE_ROTATE_90) &&
-			sde_format_is_tp10_ubwc(fmt)) {
-		pstate->sc_cfg.rd_en = true;
-		pstate->sc_cfg.rd_scid =
-			psde->pipe_sblk->llcc_scid;
-		pstate->sc_cfg.flags = SSPP_SYS_CACHE_EN_FLAG |
-			SSPP_SYS_CACHE_SCID;
-	} else {
-		pstate->sc_cfg.rd_en = false;
-		pstate->sc_cfg.rd_scid = 0x0;
-		pstate->sc_cfg.flags = SSPP_SYS_CACHE_EN_FLAG |
-			SSPP_SYS_CACHE_SCID;
-	}
-
-	psde->pipe_hw->ops.setup_sys_cache(
-		psde->pipe_hw, &pstate->sc_cfg);
-}
-
-static void _sde_plane_map_prop_to_dirty_bits(void)
-{
-	plane_prop_array[PLANE_PROP_SCALER_V1] =
-	plane_prop_array[PLANE_PROP_SCALER_V2] =
-	plane_prop_array[PLANE_PROP_SCALER_LUT_ED] =
-	plane_prop_array[PLANE_PROP_SCALER_LUT_CIR] =
-	plane_prop_array[PLANE_PROP_SCALER_LUT_SEP] =
-	plane_prop_array[PLANE_PROP_H_DECIMATE] =
-	plane_prop_array[PLANE_PROP_V_DECIMATE] =
-	plane_prop_array[PLANE_PROP_SRC_CONFIG] =
-	plane_prop_array[PLANE_PROP_ZPOS] =
-	plane_prop_array[PLANE_PROP_EXCL_RECT_V1] =
-		SDE_PLANE_DIRTY_RECTS;
-
-	plane_prop_array[PLANE_PROP_CSC_V1] =
-	plane_prop_array[PLANE_PROP_CSC_DMA_V1] =
-	plane_prop_array[PLANE_PROP_INVERSE_PMA] =
-		SDE_PLANE_DIRTY_FORMAT;
-
-	plane_prop_array[PLANE_PROP_MULTIRECT_MODE] =
-	plane_prop_array[PLANE_PROP_COLOR_FILL] =
-		SDE_PLANE_DIRTY_ALL;
-
-	/* no special action required */
-	plane_prop_array[PLANE_PROP_INFO] =
-	plane_prop_array[PLANE_PROP_ALPHA] =
-	plane_prop_array[PLANE_PROP_INPUT_FENCE] =
-	plane_prop_array[PLANE_PROP_BLEND_OP] = 0;
-
-	plane_prop_array[PLANE_PROP_FB_TRANSLATION_MODE] =
-		SDE_PLANE_DIRTY_FB_TRANSLATION_MODE;
-	plane_prop_array[PLANE_PROP_PREFILL_SIZE] =
-	plane_prop_array[PLANE_PROP_PREFILL_TIME] =
-		SDE_PLANE_DIRTY_PERF;
-
-	plane_prop_array[PLANE_PROP_VIG_GAMUT] = SDE_PLANE_DIRTY_VIG_GAMUT;
-	plane_prop_array[PLANE_PROP_VIG_IGC] = SDE_PLANE_DIRTY_VIG_IGC;
-	plane_prop_array[PLANE_PROP_DMA_IGC] = SDE_PLANE_DIRTY_DMA_IGC;
-	plane_prop_array[PLANE_PROP_DMA_GC] = SDE_PLANE_DIRTY_DMA_GC;
-
-	plane_prop_array[PLANE_PROP_SKIN_COLOR] =
-	plane_prop_array[PLANE_PROP_SKY_COLOR] =
-	plane_prop_array[PLANE_PROP_FOLIAGE_COLOR] =
-	plane_prop_array[PLANE_PROP_HUE_ADJUST] =
-	plane_prop_array[PLANE_PROP_SATURATION_ADJUST] =
-	plane_prop_array[PLANE_PROP_VALUE_ADJUST] =
-	plane_prop_array[PLANE_PROP_CONTRAST_ADJUST] =
-		SDE_PLANE_DIRTY_ALL;
-}
-
-static inline bool _sde_plane_allow_uidle(struct sde_plane *psde,
-	struct sde_rect *src, struct sde_rect *dst)
-{
-	u32 max_downscale = psde->catalog->uidle_cfg.max_dwnscale;
-	u32 downscale = (src->h * 1000)/dst->h;
-
-	return (downscale > max_downscale) ? false : true;
-}
-
-static void _sde_plane_setup_uidle(struct drm_crtc *crtc,
-	struct sde_plane *psde, struct sde_plane_state *pstate,
-	struct sde_rect *src, struct sde_rect *dst)
-{
-	struct sde_hw_pipe_uidle_cfg cfg;
-	u32 line_time = sde_get_linetime(&crtc->mode); /* nS */
-	u32 fal1_target_idle_time_ns =
-		psde->catalog->uidle_cfg.fal1_target_idle_time * 1000; /* nS */
-	u32 fal10_target_idle_time_ns =
-		psde->catalog->uidle_cfg.fal10_target_idle_time * 1000; /* nS */
-	u32 fal10_threshold =
-		psde->catalog->uidle_cfg.fal10_threshold; /* uS */
-
-	if (line_time && fal10_threshold && fal10_target_idle_time_ns &&
-		fal1_target_idle_time_ns) {
-		cfg.enable = _sde_plane_allow_uidle(psde, src, dst);
-		cfg.fal10_threshold = fal10_threshold;
-		cfg.fal10_exit_threshold = fal10_threshold + 2;
-		cfg.fal1_threshold = 1 +
-			(fal1_target_idle_time_ns*1000/line_time*2)/1000;
-		cfg.fal_allowed_threshold = fal10_threshold +
-			(fal10_target_idle_time_ns*1000/line_time*2)/1000;
-	} else {
-		SDE_ERROR("invalid settings, will disable UIDLE %d %d %d %d\n",
-			line_time, fal10_threshold, fal10_target_idle_time_ns,
-			fal1_target_idle_time_ns);
-		cfg.enable = false;
-		cfg.fal10_threshold = 0;
-		cfg.fal1_threshold = 0;
-		cfg.fal_allowed_threshold = 0;
-	}
-
-	SDE_DEBUG_PLANE(psde,
-		"tholds: fal10=%d fal10_exit=%d fal1=%d fal_allowed=%d\n",
-			cfg.fal10_threshold, cfg.fal10_exit_threshold,
-			cfg.fal1_threshold, cfg.fal_allowed_threshold);
-	SDE_DEBUG_PLANE(psde,
-		"times: line:%d fal1_idle:%d fal10_idle:%d dwnscale:%d\n",
-			line_time, fal1_target_idle_time_ns,
-			fal10_target_idle_time_ns,
-			psde->catalog->uidle_cfg.max_dwnscale);
-	SDE_EVT32(cfg.enable, cfg.fal10_threshold, cfg.fal10_exit_threshold,
-		cfg.fal1_threshold, cfg.fal_allowed_threshold,
-		psde->catalog->uidle_cfg.max_dwnscale);
-
-	psde->pipe_hw->ops.setup_uidle(
-		psde->pipe_hw, &cfg,
-		pstate->multirect_index);
-}
-
-static void _sde_plane_update_secure_session(struct sde_plane *psde,
-	struct sde_plane_state *pstate)
-{
-	bool enable = false;
-	int mode = sde_plane_get_property(pstate,
-			PLANE_PROP_FB_TRANSLATION_MODE);
-
-	if ((mode == SDE_DRM_FB_SEC) ||
-			(mode == SDE_DRM_FB_SEC_DIR_TRANS))
-		enable = true;
-
-	/* update secure session flag */
-	psde->pipe_hw->ops.setup_secure_address(psde->pipe_hw,
-			pstate->multirect_index,
-			enable);
-}
-
-static void _sde_plane_update_roi_config(struct drm_plane *plane,
-	struct drm_crtc *crtc, struct drm_framebuffer *fb)
-{
-	const struct sde_format *fmt;
-	struct sde_plane *psde;
-	struct drm_plane_state *state;
-	struct sde_plane_state *pstate;
-	struct sde_rect src, dst;
-	const struct sde_rect *crtc_roi;
-	bool q16_data = true;
-	int idx;
-
-	psde = to_sde_plane(plane);
-	state = plane->state;
-
-	pstate = to_sde_plane_state(state);
-	fmt = to_sde_format(msm_framebuffer_format(fb));
-
-	POPULATE_RECT(&src, state->src_x, state->src_y,
-		state->src_w, state->src_h, q16_data);
-	POPULATE_RECT(&dst, state->crtc_x, state->crtc_y,
-		state->crtc_w, state->crtc_h, !q16_data);
-
-	SDE_DEBUG_PLANE(psde,
-		"FB[%u] %u,%u,%ux%u->crtc%u %d,%d,%ux%u, %4.4s ubwc %d\n",
-			fb->base.id, src.x, src.y, src.w, src.h,
-			crtc->base.id, dst.x, dst.y, dst.w, dst.h,
-			(char *)&fmt->base.pixel_format,
-			SDE_FORMAT_IS_UBWC(fmt));
-
-	if (sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
-		BIT(SDE_DRM_DEINTERLACE)) {
-		SDE_DEBUG_PLANE(psde, "deinterlace\n");
-		for (idx = 0; idx < SDE_MAX_PLANES; ++idx)
-			psde->pipe_cfg.layout.plane_pitch[idx] <<= 1;
-		src.h /= 2;
-		src.y  = DIV_ROUND_UP(src.y, 2);
-		src.y &= ~0x1;
-	}
-
-	/*
-	 * adjust layer mixer position of the sspp in the presence
-	 * of a partial update to the active lm origin
-	 */
-	sde_crtc_get_crtc_roi(crtc->state, &crtc_roi);
-	dst.x -= crtc_roi->x;
-	dst.y -= crtc_roi->y;
-
-	/* check for UIDLE */
-	if (psde->pipe_hw->ops.setup_uidle)
-		_sde_plane_setup_uidle(crtc, psde, pstate, &src, &dst);
-
-	psde->pipe_cfg.src_rect = src;
-	psde->pipe_cfg.dst_rect = dst;
-
-	_sde_plane_setup_scaler(psde, pstate, fmt, false);
-
-	/* check for color fill */
-	psde->color_fill = (uint32_t)sde_plane_get_property(pstate,
-			PLANE_PROP_COLOR_FILL);
-	if (psde->color_fill & SDE_PLANE_COLOR_FILL_FLAG) {
-		/* skip remaining processing on color fill */
-		pstate->dirty = 0x0;
-	} else if (psde->pipe_hw->ops.setup_rects) {
-		psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
-				&psde->pipe_cfg,
-				pstate->multirect_index);
-	}
-
-	if (psde->pipe_hw->ops.setup_pe &&
-			(pstate->multirect_index != SDE_SSPP_RECT_1))
-		psde->pipe_hw->ops.setup_pe(psde->pipe_hw,
-				&pstate->pixel_ext);
-
-	/**
-	 * when programmed in multirect mode, scalar block will be
-	 * bypassed. Still we need to update alpha and bitwidth
-	 * ONLY for RECT0
-	 */
-	if (psde->pipe_hw->ops.setup_scaler &&
-			pstate->multirect_index != SDE_SSPP_RECT_1) {
-		psde->pipe_hw->ctl = _sde_plane_get_hw_ctl(plane);
-		psde->pipe_hw->ops.setup_scaler(psde->pipe_hw,
-				&psde->pipe_cfg, &pstate->pixel_ext,
-				&pstate->scaler3_cfg);
-	}
-
-	/* update excl rect */
-	if (psde->pipe_hw->ops.setup_excl_rect)
-		psde->pipe_hw->ops.setup_excl_rect(psde->pipe_hw,
-				&pstate->excl_rect,
-				pstate->multirect_index);
-
-	if (psde->pipe_hw->ops.setup_multirect)
-		psde->pipe_hw->ops.setup_multirect(
-				psde->pipe_hw,
-				pstate->multirect_index,
-				pstate->multirect_mode);
-}
-
-static void _sde_plane_update_format_and_rects(struct sde_plane *psde,
-	struct sde_plane_state *pstate, const struct sde_format *fmt)
-{
-	uint32_t src_flags = 0;
-
-	SDE_DEBUG_PLANE(psde, "rotation 0x%X\n", pstate->rotation);
-	if (pstate->rotation & DRM_MODE_REFLECT_X)
-		src_flags |= SDE_SSPP_FLIP_LR;
-	if (pstate->rotation & DRM_MODE_REFLECT_Y)
-		src_flags |= SDE_SSPP_FLIP_UD;
-	if (pstate->rotation & DRM_MODE_ROTATE_90)
-		src_flags |= SDE_SSPP_ROT_90;
-
-	/* update format */
-	psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt,
-	   pstate->const_alpha_en, src_flags,
-	   pstate->multirect_index);
-
-	if (psde->pipe_hw->ops.setup_cdp) {
-		struct sde_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
-
-		memset(cdp_cfg, 0, sizeof(struct sde_hw_pipe_cdp_cfg));
-
-		cdp_cfg->enable = psde->catalog->perf.cdp_cfg
-			   [SDE_PERF_CDP_USAGE_RT].rd_enable;
-		cdp_cfg->ubwc_meta_enable =
-			   SDE_FORMAT_IS_UBWC(fmt);
-		cdp_cfg->tile_amortize_enable =
-			   SDE_FORMAT_IS_UBWC(fmt) ||
-			   SDE_FORMAT_IS_TILE(fmt);
-		cdp_cfg->preload_ahead = SDE_WB_CDP_PRELOAD_AHEAD_64;
-
-		psde->pipe_hw->ops.setup_cdp(psde->pipe_hw, cdp_cfg,
-			   pstate->multirect_index);
-	}
-
-	_sde_plane_sspp_setup_sys_cache(psde, pstate, fmt);
-
-	/* update csc */
-	if (SDE_FORMAT_IS_YUV(fmt))
-		_sde_plane_setup_csc(psde);
-	else
-		psde->csc_ptr = 0;
-
-	if (psde->pipe_hw->ops.setup_inverse_pma) {
-		uint32_t pma_mode = 0;
-
-		if (fmt->alpha_enable)
-			pma_mode = (uint32_t) sde_plane_get_property(
-				pstate, PLANE_PROP_INVERSE_PMA);
-		psde->pipe_hw->ops.setup_inverse_pma(psde->pipe_hw,
-			pstate->multirect_index, pma_mode);
-	}
-
-	if (psde->pipe_hw->ops.setup_dgm_csc)
-		psde->pipe_hw->ops.setup_dgm_csc(psde->pipe_hw,
-			pstate->multirect_index, psde->csc_usr_ptr);
-}
-
-static void _sde_plane_update_sharpening(struct sde_plane *psde)
-{
-	psde->sharp_cfg.strength = SHARP_STRENGTH_DEFAULT;
-	psde->sharp_cfg.edge_thr = SHARP_EDGE_THR_DEFAULT;
-	psde->sharp_cfg.smooth_thr = SHARP_SMOOTH_THR_DEFAULT;
-	psde->sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
-
-	psde->pipe_hw->ops.setup_sharpening(psde->pipe_hw,
-			&psde->sharp_cfg);
-}
-
-static void _sde_plane_update_properties(struct drm_plane *plane,
-	struct drm_crtc *crtc, struct drm_framebuffer *fb)
-{
-	uint32_t nplanes;
-	const struct sde_format *fmt;
-	struct sde_plane *psde;
-	struct drm_plane_state *state;
-	struct sde_plane_state *pstate;
-
-	psde = to_sde_plane(plane);
-	state = plane->state;
-
-	pstate = to_sde_plane_state(state);
-	fmt = to_sde_format(msm_framebuffer_format(fb));
-	nplanes = fmt->num_planes;
-
-	/* update secure session flag */
-	if (pstate->dirty & SDE_PLANE_DIRTY_FB_TRANSLATION_MODE)
-		_sde_plane_update_secure_session(psde, pstate);
-
-	/* update roi config */
-	if (pstate->dirty & SDE_PLANE_DIRTY_RECTS)
-		_sde_plane_update_roi_config(plane, crtc, fb);
-
-	if ((pstate->dirty & SDE_PLANE_DIRTY_FORMAT ||
-			pstate->dirty & SDE_PLANE_DIRTY_RECTS) &&
-			psde->pipe_hw->ops.setup_format)
-		_sde_plane_update_format_and_rects(psde, pstate, fmt);
-
-	sde_color_process_plane_setup(plane);
-
-	/* update sharpening */
-	if ((pstate->dirty & SDE_PLANE_DIRTY_SHARPEN) &&
-		psde->pipe_hw->ops.setup_sharpening)
-		_sde_plane_update_sharpening(psde);
-
-	_sde_plane_set_qos_lut(plane, fb);
-	_sde_plane_set_danger_lut(plane, fb);
-
-	if (plane->type != DRM_PLANE_TYPE_CURSOR) {
-		_sde_plane_set_qos_ctrl(plane, true, SDE_PLANE_QOS_PANIC_CTRL);
-		_sde_plane_set_ot_limit(plane, crtc);
-		if (pstate->dirty & SDE_PLANE_DIRTY_PERF)
-			_sde_plane_set_ts_prefill(plane, pstate);
-	}
-
-	_sde_plane_set_qos_remap(plane);
-
-	/* clear dirty */
-	pstate->dirty = 0x0;
-}
-
-static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
-				struct drm_plane_state *old_state)
-{
-	struct sde_plane *psde;
-	struct drm_plane_state *state;
-	struct sde_plane_state *pstate;
-	struct sde_plane_state *old_pstate;
-	struct drm_crtc *crtc;
-	struct drm_framebuffer *fb;
-	int idx;
-	int dirty_prop_flag;
-
-	if (!plane) {
-		SDE_ERROR("invalid plane\n");
-		return -EINVAL;
-	} else if (!plane->state) {
-		SDE_ERROR("invalid plane state\n");
-		return -EINVAL;
-	} else if (!old_state) {
-		SDE_ERROR("invalid old state\n");
-		return -EINVAL;
-	}
-
-	psde = to_sde_plane(plane);
-	state = plane->state;
-
-	pstate = to_sde_plane_state(state);
-
-	old_pstate = to_sde_plane_state(old_state);
-
-	crtc = state->crtc;
-	fb = state->fb;
-	if (!crtc || !fb) {
-		SDE_ERROR_PLANE(psde, "invalid crtc %d or fb %d\n",
-				!crtc, !fb);
-		return -EINVAL;
-	}
-
-	SDE_DEBUG(
-		"plane%d sspp:%dx%d/%4.4s/%llx/%dx%d+%d+%d/%x crtc:%dx%d+%d+%d\n",
-			plane->base.id,
-			state->fb->width, state->fb->height,
-			(char *) &state->fb->format->format,
-			state->fb->modifier,
-			state->src_w >> 16, state->src_h >> 16,
-			state->src_x >> 16, state->src_y >> 16,
-			pstate->rotation,
-			state->crtc_w, state->crtc_h,
-			state->crtc_x, state->crtc_y);
-
-	/* force reprogramming of all the parameters, if the flag is set */
-	if (psde->revalidate) {
-		SDE_DEBUG("plane:%d - reconfigure all the parameters\n",
-				plane->base.id);
-		pstate->dirty = SDE_PLANE_DIRTY_ALL;
-		psde->revalidate = false;
-	}
-
-	/* determine what needs to be refreshed */
-	while ((idx = msm_property_pop_dirty(&psde->property_info,
-				&pstate->property_state)) >= 0) {
-		dirty_prop_flag = plane_prop_array[idx];
-		pstate->dirty |= dirty_prop_flag;
-
-		if (dirty_prop_flag == SDE_PLANE_DIRTY_ALL)
-			break;
-	}
-
-	/**
-	 * since plane_atomic_check is invoked before crtc_atomic_check
-	 * in the commit sequence, all the parameters for updating the
-	 * plane dirty flag will not be available during
-	 * plane_atomic_check as some features params are updated
-	 * in crtc_atomic_check (eg.:sDMA). So check for mode_change
-	 * before sspp update.
-	 */
-	_sde_plane_sspp_atomic_check_mode_changed(psde, state,
-								old_state);
-
-	/* re-program the output rects always if partial update roi changed */
-	if (sde_crtc_is_crtc_roi_dirty(crtc->state))
-		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
-
-	if (pstate->dirty & SDE_PLANE_DIRTY_RECTS)
-		memset(&(psde->pipe_cfg), 0, sizeof(struct sde_hw_pipe_cfg));
-
-	_sde_plane_set_scanout(plane, pstate, &psde->pipe_cfg, fb);
-
-	/* early out if nothing dirty */
-	if (!pstate->dirty)
-		return 0;
-	pstate->pending = true;
-
-	psde->is_rt_pipe = (sde_crtc_get_client_type(crtc) != NRT_CLIENT);
-	_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
-
-	_sde_plane_update_properties(plane, crtc, fb);
-
-	return 0;
-}
-
-static void _sde_plane_atomic_disable(struct drm_plane *plane,
-				struct drm_plane_state *old_state)
-{
-	struct sde_plane *psde;
-	struct drm_plane_state *state;
-	struct sde_plane_state *pstate;
-
-	if (!plane) {
-		SDE_ERROR("invalid plane\n");
-		return;
-	} else if (!plane->state) {
-		SDE_ERROR("invalid plane state\n");
-		return;
-	} else if (!old_state) {
-		SDE_ERROR("invalid old state\n");
-		return;
-	}
-
-	psde = to_sde_plane(plane);
-	state = plane->state;
-	pstate = to_sde_plane_state(state);
-
-	SDE_EVT32(DRMID(plane), is_sde_plane_virtual(plane),
-			pstate->multirect_mode);
-
-	pstate->pending = true;
-
-	if (is_sde_plane_virtual(plane) &&
-			psde->pipe_hw && psde->pipe_hw->ops.setup_multirect)
-		psde->pipe_hw->ops.setup_multirect(psde->pipe_hw,
-				SDE_SSPP_RECT_SOLO, SDE_SSPP_MULTIRECT_NONE);
-}
-
-static void sde_plane_atomic_update(struct drm_plane *plane,
-				struct drm_plane_state *old_state)
-{
-	struct sde_plane *psde;
-	struct drm_plane_state *state;
-
-	if (!plane) {
-		SDE_ERROR("invalid plane\n");
-		return;
-	} else if (!plane->state) {
-		SDE_ERROR("invalid plane state\n");
-		return;
-	}
-
-	psde = to_sde_plane(plane);
-	psde->is_error = false;
-	state = plane->state;
-
-	SDE_DEBUG_PLANE(psde, "\n");
-
-	if (!sde_plane_enabled(state)) {
-		_sde_plane_atomic_disable(plane, old_state);
-	} else {
-		int ret;
-
-		ret = sde_plane_sspp_atomic_update(plane, old_state);
-		/* atomic_check should have ensured that this doesn't fail */
-		WARN_ON(ret < 0);
-	}
-}
-
-void sde_plane_restore(struct drm_plane *plane)
-{
-	struct sde_plane *psde;
-
-	if (!plane || !plane->state) {
-		SDE_ERROR("invalid plane\n");
-		return;
-	}
-
-	psde = to_sde_plane(plane);
-
-	/*
-	 * Revalidate is only true here if idle PC occurred and
-	 * there is no plane state update in current commit cycle.
-	 */
-	if (!psde->revalidate)
-		return;
-
-	SDE_DEBUG_PLANE(psde, "\n");
-
-	/* last plane state is same as current state */
-	sde_plane_atomic_update(plane, plane->state);
-}
-
-bool sde_plane_is_cache_required(struct drm_plane *plane)
-{
-	struct sde_plane_state *pstate;
-
-	if (!plane || !plane->state) {
-		SDE_ERROR("invalid plane\n");
-		return false;
-	}
-
-	pstate = to_sde_plane_state(plane->state);
-
-	/* check if llcc is required for the plane */
-	if (pstate->sc_cfg.rd_en)
-		return true;
-	else
-		return false;
-}
-
-static void _sde_plane_install_non_master_properties(struct sde_plane *psde)
-{
-	char feature_name[256];
-
-	if (psde->pipe_sblk->maxhdeciexp) {
-		msm_property_install_range(&psde->property_info,
-				"h_decimate", 0x0, 0,
-				psde->pipe_sblk->maxhdeciexp, 0,
-				PLANE_PROP_H_DECIMATE);
-	}
-
-	if (psde->pipe_sblk->maxvdeciexp) {
-		msm_property_install_range(&psde->property_info,
-				"v_decimate", 0x0, 0,
-				psde->pipe_sblk->maxvdeciexp, 0,
-				PLANE_PROP_V_DECIMATE);
-	}
-
-	if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
-		msm_property_install_range(
-				&psde->property_info, "scaler_v2",
-				0x0, 0, ~0, 0, PLANE_PROP_SCALER_V2);
-		msm_property_install_blob(&psde->property_info,
-				"lut_ed", 0, PLANE_PROP_SCALER_LUT_ED);
-		msm_property_install_blob(&psde->property_info,
-				"lut_cir", 0,
-				PLANE_PROP_SCALER_LUT_CIR);
-		msm_property_install_blob(&psde->property_info,
-				"lut_sep", 0,
-				PLANE_PROP_SCALER_LUT_SEP);
-	} else if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3LITE)) {
-		msm_property_install_range(
-				&psde->property_info, "scaler_v2",
-				0x0, 0, ~0, 0, PLANE_PROP_SCALER_V2);
-		msm_property_install_blob(&psde->property_info,
-				"lut_sep", 0,
-				PLANE_PROP_SCALER_LUT_SEP);
-	} else if (psde->features & SDE_SSPP_SCALER) {
-		msm_property_install_range(
-				&psde->property_info, "scaler_v1", 0x0,
-				0, ~0, 0, PLANE_PROP_SCALER_V1);
-	}
-
-	if (psde->features & BIT(SDE_SSPP_CSC) ||
-		psde->features & BIT(SDE_SSPP_CSC_10BIT))
-		msm_property_install_volatile_range(
-				&psde->property_info, "csc_v1", 0x0,
-				0, ~0, 0, PLANE_PROP_CSC_V1);
-
-	if (psde->features & BIT(SDE_SSPP_HSIC)) {
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_SSPP_HUE_V",
-			psde->pipe_sblk->hsic_blk.version >> 16);
-		msm_property_install_range(&psde->property_info,
-			feature_name, 0, 0, 0xFFFFFFFF, 0,
-			PLANE_PROP_HUE_ADJUST);
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_SSPP_SATURATION_V",
-			psde->pipe_sblk->hsic_blk.version >> 16);
-		msm_property_install_range(&psde->property_info,
-			feature_name, 0, 0, 0xFFFFFFFF, 0,
-			PLANE_PROP_SATURATION_ADJUST);
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_SSPP_VALUE_V",
-			psde->pipe_sblk->hsic_blk.version >> 16);
-		msm_property_install_range(&psde->property_info,
-			feature_name, 0, 0, 0xFFFFFFFF, 0,
-			PLANE_PROP_VALUE_ADJUST);
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_SSPP_CONTRAST_V",
-			psde->pipe_sblk->hsic_blk.version >> 16);
-		msm_property_install_range(&psde->property_info,
-			feature_name, 0, 0, 0xFFFFFFFF, 0,
-			PLANE_PROP_CONTRAST_ADJUST);
-	}
-
-}
-
-/* helper to install properties which are common to planes and crtcs */
-static void _sde_plane_install_properties(struct drm_plane *plane,
-	struct sde_mdss_cfg *catalog, u32 master_plane_id)
-{
-	static const struct drm_prop_enum_list e_blend_op[] = {
-		{SDE_DRM_BLEND_OP_NOT_DEFINED,    "not_defined"},
-		{SDE_DRM_BLEND_OP_OPAQUE,         "opaque"},
-		{SDE_DRM_BLEND_OP_PREMULTIPLIED,  "premultiplied"},
-		{SDE_DRM_BLEND_OP_COVERAGE,       "coverage"}
-	};
-	static const struct drm_prop_enum_list e_src_config[] = {
-		{SDE_DRM_DEINTERLACE, "deinterlace"}
-	};
-	static const struct drm_prop_enum_list e_fb_translation_mode[] = {
-		{SDE_DRM_FB_NON_SEC, "non_sec"},
-		{SDE_DRM_FB_SEC, "sec"},
-		{SDE_DRM_FB_NON_SEC_DIR_TRANS, "non_sec_direct_translation"},
-		{SDE_DRM_FB_SEC_DIR_TRANS, "sec_direct_translation"},
-	};
-	static const struct drm_prop_enum_list e_multirect_mode[] = {
-		{SDE_SSPP_MULTIRECT_NONE, "none"},
-		{SDE_SSPP_MULTIRECT_PARALLEL, "parallel"},
-		{SDE_SSPP_MULTIRECT_TIME_MX,  "serial"},
-	};
-	const struct sde_format_extended *format_list;
-	struct sde_kms_info *info;
-	struct sde_plane *psde = to_sde_plane(plane);
-	int zpos_max = 255;
-	int zpos_def = 0;
-	char feature_name[256];
-
-	if (!plane || !psde) {
-		SDE_ERROR("invalid plane\n");
-		return;
-	} else if (!psde->pipe_hw || !psde->pipe_sblk) {
-		SDE_ERROR("invalid plane, pipe_hw %d pipe_sblk %d\n",
-				!psde->pipe_hw, !psde->pipe_sblk);
-		return;
-	} else if (!catalog) {
-		SDE_ERROR("invalid catalog\n");
-		return;
-	}
-
-	psde->catalog = catalog;
-
-	if (sde_is_custom_client()) {
-		if (catalog->mixer_count &&
-				catalog->mixer[0].sblk->maxblendstages) {
-			zpos_max = catalog->mixer[0].sblk->maxblendstages - 1;
-			if (zpos_max > SDE_STAGE_MAX - SDE_STAGE_0 - 1)
-				zpos_max = SDE_STAGE_MAX - SDE_STAGE_0 - 1;
-		}
-	} else if (plane->type != DRM_PLANE_TYPE_PRIMARY) {
-		/* reserve zpos == 0 for primary planes */
-		zpos_def = drm_plane_index(plane) + 1;
-	}
-
-	msm_property_install_range(&psde->property_info, "zpos",
-		0x0, 0, zpos_max, zpos_def, PLANE_PROP_ZPOS);
-
-	msm_property_install_range(&psde->property_info, "alpha",
-		0x0, 0, 255, 255, PLANE_PROP_ALPHA);
-
-	/* linux default file descriptor range on each process */
-	msm_property_install_range(&psde->property_info, "input_fence",
-		0x0, 0, INR_OPEN_MAX, 0, PLANE_PROP_INPUT_FENCE);
-
-	if (!master_plane_id)
-		_sde_plane_install_non_master_properties(psde);
-
-	if (psde->features & BIT(SDE_SSPP_EXCL_RECT))
-		msm_property_install_volatile_range(&psde->property_info,
-			"excl_rect_v1", 0x0, 0, ~0, 0, PLANE_PROP_EXCL_RECT_V1);
-
-	sde_plane_rot_install_properties(plane, catalog);
-
-	msm_property_install_enum(&psde->property_info, "blend_op", 0x0, 0,
-		e_blend_op, ARRAY_SIZE(e_blend_op), PLANE_PROP_BLEND_OP);
-
-	msm_property_install_enum(&psde->property_info, "src_config", 0x0, 1,
-		e_src_config, ARRAY_SIZE(e_src_config), PLANE_PROP_SRC_CONFIG);
-
-	if (psde->pipe_hw->ops.setup_solidfill)
-		msm_property_install_range(&psde->property_info, "color_fill",
-				0, 0, 0xFFFFFFFF, 0, PLANE_PROP_COLOR_FILL);
-
-	msm_property_install_range(&psde->property_info,
-			"prefill_size", 0x0, 0, ~0, 0,
-			PLANE_PROP_PREFILL_SIZE);
-	msm_property_install_range(&psde->property_info,
-			"prefill_time", 0x0, 0, ~0, 0,
-			PLANE_PROP_PREFILL_TIME);
-
-	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
-	if (!info) {
-		SDE_ERROR("failed to allocate info memory\n");
-		return;
-	}
-
-	msm_property_install_blob(&psde->property_info, "capabilities",
-		DRM_MODE_PROP_IMMUTABLE, PLANE_PROP_INFO);
-	sde_kms_info_reset(info);
-
-	if (!master_plane_id) {
-		format_list = psde->pipe_sblk->format_list;
-	} else {
-		format_list = psde->pipe_sblk->virt_format_list;
-		sde_kms_info_add_keyint(info, "primary_smart_plane_id",
-						master_plane_id);
-		msm_property_install_enum(&psde->property_info,
-			"multirect_mode", 0x0, 0, e_multirect_mode,
-			ARRAY_SIZE(e_multirect_mode),
-			PLANE_PROP_MULTIRECT_MODE);
-	}
-
-	if (format_list) {
-		sde_kms_info_start(info, "pixel_formats");
-		while (format_list->fourcc_format) {
-			sde_kms_info_append_format(info,
-					format_list->fourcc_format,
-					format_list->modifier);
-			++format_list;
-		}
-		sde_kms_info_stop(info);
-	}
-
-	if (psde->pipe_hw && psde->pipe_hw->ops.get_scaler_ver)
-		sde_kms_info_add_keyint(info, "scaler_step_ver",
-			psde->pipe_hw->ops.get_scaler_ver(psde->pipe_hw));
-
-	sde_kms_info_add_keyint(info, "max_linewidth",
-			psde->pipe_sblk->maxlinewidth);
-	sde_kms_info_add_keyint(info, "max_upscale",
-			psde->pipe_sblk->maxupscale);
-	sde_kms_info_add_keyint(info, "max_downscale",
-			psde->pipe_sblk->maxdwnscale);
-	sde_kms_info_add_keyint(info, "max_horizontal_deci",
-			psde->pipe_sblk->maxhdeciexp);
-	sde_kms_info_add_keyint(info, "max_vertical_deci",
-			psde->pipe_sblk->maxvdeciexp);
-	sde_kms_info_add_keyint(info, "max_per_pipe_bw",
-			psde->pipe_sblk->max_per_pipe_bw * 1000LL);
-
-	if ((!master_plane_id &&
-		(psde->features & BIT(SDE_SSPP_INVERSE_PMA))) ||
-		(psde->features & BIT(SDE_SSPP_DGM_INVERSE_PMA))) {
-		msm_property_install_range(&psde->property_info,
-			"inverse_pma", 0x0, 0, 1, 0, PLANE_PROP_INVERSE_PMA);
-		sde_kms_info_add_keyint(info, "inverse_pma", 1);
-	}
-
-	if (psde->features & BIT(SDE_SSPP_DGM_CSC)) {
-		msm_property_install_volatile_range(
-			&psde->property_info, "csc_dma_v1", 0x0,
-			0, ~0, 0, PLANE_PROP_CSC_DMA_V1);
-		sde_kms_info_add_keyint(info, "csc_dma_v1", 1);
-	}
-
-	if (psde->features & BIT(SDE_SSPP_SEC_UI_ALLOWED))
-		sde_kms_info_add_keyint(info, "sec_ui_allowed", 1);
-	if (psde->features & BIT(SDE_SSPP_BLOCK_SEC_UI))
-		sde_kms_info_add_keyint(info, "block_sec_ui", 1);
-
-	if (psde->features & BIT(SDE_SSPP_TRUE_INLINE_ROT_V1)) {
-		const struct sde_format_extended *inline_rot_fmt_list;
-
-		sde_kms_info_add_keyint(info, "true_inline_rot_rev", 1);
-		sde_kms_info_add_keyint(info,
-			"true_inline_dwnscale_rt",
-			(int) (psde->pipe_sblk->in_rot_maxdwnscale_rt_num /
-				psde->pipe_sblk->in_rot_maxdwnscale_rt_denom));
-		sde_kms_info_add_keyint(info,
-				"true_inline_dwnscale_rt_numerator",
-				psde->pipe_sblk->in_rot_maxdwnscale_rt_num);
-		sde_kms_info_add_keyint(info,
-				"true_inline_dwnscale_rt_denominator",
-				psde->pipe_sblk->in_rot_maxdwnscale_rt_denom);
-		sde_kms_info_add_keyint(info, "true_inline_dwnscale_nrt",
-			psde->pipe_sblk->in_rot_maxdwnscale_nrt);
-		sde_kms_info_add_keyint(info, "true_inline_max_height",
-			psde->pipe_sblk->in_rot_maxheight);
-		sde_kms_info_add_keyint(info, "true_inline_prefill_fudge_lines",
-			psde->pipe_sblk->in_rot_prefill_fudge_lines);
-		sde_kms_info_add_keyint(info, "true_inline_prefill_lines_nv12",
-			psde->pipe_sblk->in_rot_prefill_lines_nv12);
-		sde_kms_info_add_keyint(info, "true_inline_prefill_lines",
-			psde->pipe_sblk->in_rot_prefill_lines);
-
-		inline_rot_fmt_list = psde->pipe_sblk->in_rot_format_list;
-
-		if (inline_rot_fmt_list) {
-			sde_kms_info_start(info, "inline_rot_pixel_formats");
-			while (inline_rot_fmt_list->fourcc_format) {
-				sde_kms_info_append_format(info,
-					inline_rot_fmt_list->fourcc_format,
-					inline_rot_fmt_list->modifier);
-				++inline_rot_fmt_list;
-			}
-			sde_kms_info_stop(info);
-		}
-
-	}
-
-	msm_property_set_blob(&psde->property_info, &psde->blob_info,
-			info->data, SDE_KMS_INFO_DATALEN(info),
-			PLANE_PROP_INFO);
-
-	kfree(info);
-
-	if (psde->features & BIT(SDE_SSPP_MEMCOLOR)) {
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_SSPP_SKIN_COLOR_V",
-			psde->pipe_sblk->memcolor_blk.version >> 16);
-		msm_property_install_blob(&psde->property_info, feature_name, 0,
-			PLANE_PROP_SKIN_COLOR);
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_SSPP_SKY_COLOR_V",
-			psde->pipe_sblk->memcolor_blk.version >> 16);
-		msm_property_install_blob(&psde->property_info, feature_name, 0,
-			PLANE_PROP_SKY_COLOR);
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_SSPP_FOLIAGE_COLOR_V",
-			psde->pipe_sblk->memcolor_blk.version >> 16);
-		msm_property_install_blob(&psde->property_info, feature_name, 0,
-			PLANE_PROP_FOLIAGE_COLOR);
-	}
-
-	if (psde->features & BIT(SDE_SSPP_VIG_GAMUT)) {
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_VIG_3D_LUT_GAMUT_V",
-			psde->pipe_sblk->gamut_blk.version >> 16);
-		msm_property_install_blob(&psde->property_info, feature_name, 0,
-			PLANE_PROP_VIG_GAMUT);
-	}
-
-	if (psde->features & BIT(SDE_SSPP_VIG_IGC)) {
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_VIG_1D_LUT_IGC_V",
-			psde->pipe_sblk->igc_blk[0].version >> 16);
-		msm_property_install_blob(&psde->property_info, feature_name, 0,
-			PLANE_PROP_VIG_IGC);
-	}
-
-	if (psde->features & BIT(SDE_SSPP_DMA_IGC)) {
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_DGM_1D_LUT_IGC_V",
-			psde->pipe_sblk->igc_blk[0].version >> 16);
-		msm_property_install_blob(&psde->property_info, feature_name, 0,
-			PLANE_PROP_DMA_IGC);
-	}
-
-	if (psde->features & BIT(SDE_SSPP_DMA_GC)) {
-		snprintf(feature_name, sizeof(feature_name), "%s%d",
-			"SDE_DGM_1D_LUT_GC_V",
-			psde->pipe_sblk->gc_blk[0].version >> 16);
-		msm_property_install_blob(&psde->property_info, feature_name, 0,
-			PLANE_PROP_DMA_GC);
-	}
-
-	msm_property_install_enum(&psde->property_info, "fb_translation_mode",
-			0x0,
-			0, e_fb_translation_mode,
-			ARRAY_SIZE(e_fb_translation_mode),
-			PLANE_PROP_FB_TRANSLATION_MODE);
-}
-
-static inline void _sde_plane_set_csc_v1(struct sde_plane *psde,
-		void __user *usr_ptr)
-{
-	struct sde_drm_csc_v1 csc_v1;
-	int i;
-
-	if (!psde) {
-		SDE_ERROR("invalid plane\n");
-		return;
-	}
-
-	psde->csc_usr_ptr = NULL;
-	if (!usr_ptr) {
-		SDE_DEBUG_PLANE(psde, "csc data removed\n");
-		return;
-	}
-
-	if (copy_from_user(&csc_v1, usr_ptr, sizeof(csc_v1))) {
-		SDE_ERROR_PLANE(psde, "failed to copy csc data\n");
-		return;
-	}
-
-	/* populate from user space */
-	for (i = 0; i < SDE_CSC_MATRIX_COEFF_SIZE; ++i)
-		psde->csc_cfg.csc_mv[i] = csc_v1.ctm_coeff[i] >> 16;
-	for (i = 0; i < SDE_CSC_BIAS_SIZE; ++i) {
-		psde->csc_cfg.csc_pre_bv[i] = csc_v1.pre_bias[i];
-		psde->csc_cfg.csc_post_bv[i] = csc_v1.post_bias[i];
-	}
-	for (i = 0; i < SDE_CSC_CLAMP_SIZE; ++i) {
-		psde->csc_cfg.csc_pre_lv[i] = csc_v1.pre_clamp[i];
-		psde->csc_cfg.csc_post_lv[i] = csc_v1.post_clamp[i];
-	}
-	psde->csc_usr_ptr = &psde->csc_cfg;
-}
-
-static inline void _sde_plane_set_scaler_v1(struct sde_plane *psde,
-		struct sde_plane_state *pstate, void __user *usr)
-{
-	struct sde_drm_scaler_v1 scale_v1;
-	struct sde_hw_pixel_ext *pe;
-	int i;
-
-	if (!psde || !pstate) {
-		SDE_ERROR("invalid argument(s)\n");
-		return;
-	}
-
-	pstate->scaler_check_state = SDE_PLANE_SCLCHECK_NONE;
-	if (!usr) {
-		SDE_DEBUG_PLANE(psde, "scale data removed\n");
-		return;
-	}
-
-	if (copy_from_user(&scale_v1, usr, sizeof(scale_v1))) {
-		SDE_ERROR_PLANE(psde, "failed to copy scale data\n");
-		return;
-	}
-
-	/* force property to be dirty, even if the pointer didn't change */
-	msm_property_set_dirty(&psde->property_info,
-			&pstate->property_state, PLANE_PROP_SCALER_V1);
-
-	/* populate from user space */
-	pe = &pstate->pixel_ext;
-	memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
-	for (i = 0; i < SDE_MAX_PLANES; i++) {
-		pe->init_phase_x[i] = scale_v1.init_phase_x[i];
-		pe->phase_step_x[i] = scale_v1.phase_step_x[i];
-		pe->init_phase_y[i] = scale_v1.init_phase_y[i];
-		pe->phase_step_y[i] = scale_v1.phase_step_y[i];
-
-		pe->horz_filter[i] = scale_v1.horz_filter[i];
-		pe->vert_filter[i] = scale_v1.vert_filter[i];
-	}
-	for (i = 0; i < SDE_MAX_PLANES; i++) {
-		pe->left_ftch[i] = scale_v1.pe.left_ftch[i];
-		pe->right_ftch[i] = scale_v1.pe.right_ftch[i];
-		pe->left_rpt[i] = scale_v1.pe.left_rpt[i];
-		pe->right_rpt[i] = scale_v1.pe.right_rpt[i];
-		pe->roi_w[i] = scale_v1.pe.num_ext_pxls_lr[i];
-
-		pe->top_ftch[i] = scale_v1.pe.top_ftch[i];
-		pe->btm_ftch[i] = scale_v1.pe.btm_ftch[i];
-		pe->top_rpt[i] = scale_v1.pe.top_rpt[i];
-		pe->btm_rpt[i] = scale_v1.pe.btm_rpt[i];
-		pe->roi_h[i] = scale_v1.pe.num_ext_pxls_tb[i];
-	}
-
-	pstate->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V1;
-
-	SDE_EVT32_VERBOSE(DRMID(&psde->base));
-	SDE_DEBUG_PLANE(psde, "user property data copied\n");
-}
-
-static inline void _sde_plane_set_scaler_v2(struct sde_plane *psde,
-		struct sde_plane_state *pstate, void __user *usr)
-{
-	struct sde_drm_scaler_v2 scale_v2;
-	struct sde_hw_pixel_ext *pe;
-	int i;
-	struct sde_hw_scaler3_cfg *cfg;
-
-	if (!psde || !pstate) {
-		SDE_ERROR("invalid argument(s)\n");
-		return;
-	}
-
-	cfg = &pstate->scaler3_cfg;
-	pstate->scaler_check_state = SDE_PLANE_SCLCHECK_NONE;
-	if (!usr) {
-		SDE_DEBUG_PLANE(psde, "scale data removed\n");
-		cfg->enable = 0;
-		goto end;
-	}
-
-	if (copy_from_user(&scale_v2, usr, sizeof(scale_v2))) {
-		SDE_ERROR_PLANE(psde, "failed to copy scale data\n");
-		return;
-	}
-
-	/* detach/ignore user data if 'disabled' */
-	if (!scale_v2.enable) {
-		SDE_DEBUG_PLANE(psde, "scale data removed\n");
-		cfg->enable = 0;
-		goto end;
-	}
-
-	/* populate from user space */
-	sde_set_scaler_v2(cfg, &scale_v2);
-
-	pe = &pstate->pixel_ext;
-	memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
-
-	for (i = 0; i < SDE_MAX_PLANES; i++) {
-		pe->left_ftch[i] = scale_v2.pe.left_ftch[i];
-		pe->right_ftch[i] = scale_v2.pe.right_ftch[i];
-		pe->left_rpt[i] = scale_v2.pe.left_rpt[i];
-		pe->right_rpt[i] = scale_v2.pe.right_rpt[i];
-		pe->roi_w[i] = scale_v2.pe.num_ext_pxls_lr[i];
-
-		pe->top_ftch[i] = scale_v2.pe.top_ftch[i];
-		pe->btm_ftch[i] = scale_v2.pe.btm_ftch[i];
-		pe->top_rpt[i] = scale_v2.pe.top_rpt[i];
-		pe->btm_rpt[i] = scale_v2.pe.btm_rpt[i];
-		pe->roi_h[i] = scale_v2.pe.num_ext_pxls_tb[i];
-	}
-	pstate->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2_CHECK;
-
-end:
-	/* force property to be dirty, even if the pointer didn't change */
-	msm_property_set_dirty(&psde->property_info,
-			&pstate->property_state, PLANE_PROP_SCALER_V2);
-
-	SDE_EVT32_VERBOSE(DRMID(&psde->base), cfg->enable, cfg->de.enable,
-			cfg->src_width[0], cfg->src_height[0],
-			cfg->dst_width, cfg->dst_height);
-	SDE_DEBUG_PLANE(psde, "user property data copied\n");
-}
-
-static void _sde_plane_set_excl_rect_v1(struct sde_plane *psde,
-		struct sde_plane_state *pstate, void __user *usr_ptr)
-{
-	struct drm_clip_rect excl_rect_v1;
-
-	if (!psde || !pstate) {
-		SDE_ERROR("invalid argument(s)\n");
-		return;
-	}
-
-	if (!usr_ptr) {
-		memset(&pstate->excl_rect, 0, sizeof(pstate->excl_rect));
-		SDE_DEBUG_PLANE(psde, "excl_rect data cleared\n");
-		return;
-	}
-
-	if (copy_from_user(&excl_rect_v1, usr_ptr, sizeof(excl_rect_v1))) {
-		SDE_ERROR_PLANE(psde, "failed to copy excl_rect data\n");
-		return;
-	}
-
-	/* populate from user space */
-	pstate->excl_rect.x = excl_rect_v1.x1;
-	pstate->excl_rect.y = excl_rect_v1.y1;
-	pstate->excl_rect.w = excl_rect_v1.x2 - excl_rect_v1.x1;
-	pstate->excl_rect.h = excl_rect_v1.y2 - excl_rect_v1.y1;
-
-	SDE_DEBUG_PLANE(psde, "excl_rect: {%d,%d,%d,%d}\n",
-			pstate->excl_rect.x, pstate->excl_rect.y,
-			pstate->excl_rect.w, pstate->excl_rect.h);
-}
-
-static int sde_plane_atomic_set_property(struct drm_plane *plane,
-		struct drm_plane_state *state, struct drm_property *property,
-		uint64_t val)
-{
-	struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
-	struct sde_plane_state *pstate;
-	int idx, ret = -EINVAL;
-
-	SDE_DEBUG_PLANE(psde, "\n");
-
-	if (!plane) {
-		SDE_ERROR("invalid plane\n");
-	} else if (!state) {
-		SDE_ERROR_PLANE(psde, "invalid state\n");
-	} else {
-		pstate = to_sde_plane_state(state);
-		ret = msm_property_atomic_set(&psde->property_info,
-				&pstate->property_state, property, val);
-		if (!ret) {
-			idx = msm_property_index(&psde->property_info,
-					property);
-			switch (idx) {
-			case PLANE_PROP_INPUT_FENCE:
-				_sde_plane_set_input_fence(psde, pstate, val);
-				break;
-			case PLANE_PROP_CSC_V1:
-			case PLANE_PROP_CSC_DMA_V1:
-				_sde_plane_set_csc_v1(psde, (void __user *)val);
-				break;
-			case PLANE_PROP_SCALER_V1:
-				_sde_plane_set_scaler_v1(psde, pstate,
-						(void *)(uintptr_t)val);
-				break;
-			case PLANE_PROP_SCALER_V2:
-				_sde_plane_set_scaler_v2(psde, pstate,
-						(void *)(uintptr_t)val);
-				break;
-			case PLANE_PROP_EXCL_RECT_V1:
-				_sde_plane_set_excl_rect_v1(psde, pstate,
-						(void *)(uintptr_t)val);
-				break;
-			default:
-				/* nothing to do */
-				break;
-			}
-		}
-	}
-
-	SDE_DEBUG_PLANE(psde, "%s[%d] <= 0x%llx ret=%d\n",
-			property->name, property->base.id, val, ret);
-
-	return ret;
-}
-
-static int sde_plane_atomic_get_property(struct drm_plane *plane,
-		const struct drm_plane_state *state,
-		struct drm_property *property, uint64_t *val)
-{
-	struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
-	struct sde_plane_state *pstate;
-	int ret = -EINVAL;
-
-	if (!plane) {
-		SDE_ERROR("invalid plane\n");
-	} else if (!state) {
-		SDE_ERROR("invalid state\n");
-	} else {
-		SDE_DEBUG_PLANE(psde, "\n");
-		pstate = to_sde_plane_state(state);
-		ret = msm_property_atomic_get(&psde->property_info,
-				&pstate->property_state, property, val);
-	}
-
-	return ret;
-}
-
-int sde_plane_helper_reset_custom_properties(struct drm_plane *plane,
-		struct drm_plane_state *plane_state)
-{
-	struct sde_plane *psde;
-	struct sde_plane_state *pstate;
-	struct drm_property *drm_prop;
-	enum msm_mdp_plane_property prop_idx;
-
-	if (!plane || !plane_state) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	psde = to_sde_plane(plane);
-	pstate = to_sde_plane_state(plane_state);
-
-	for (prop_idx = 0; prop_idx < PLANE_PROP_COUNT; prop_idx++) {
-		uint64_t val = pstate->property_values[prop_idx].value;
-		uint64_t def;
-		int ret;
-
-		drm_prop = msm_property_index_to_drm_property(
-				&psde->property_info, prop_idx);
-		if (!drm_prop) {
-			/* not all props will be installed, based on caps */
-			SDE_DEBUG_PLANE(psde, "invalid property index %d\n",
-					prop_idx);
-			continue;
-		}
-
-		def = msm_property_get_default(&psde->property_info, prop_idx);
-		if (val == def)
-			continue;
-
-		SDE_DEBUG_PLANE(psde, "set prop %s idx %d from %llu to %llu\n",
-				drm_prop->name, prop_idx, val, def);
-
-		ret = sde_plane_atomic_set_property(plane, plane_state,
-				drm_prop, def);
-		if (ret) {
-			SDE_ERROR_PLANE(psde,
-					"set property failed, idx %d ret %d\n",
-					prop_idx, ret);
-			continue;
-		}
-	}
-
-	return 0;
-}
-
-static void sde_plane_destroy(struct drm_plane *plane)
-{
-	struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
-
-	SDE_DEBUG_PLANE(psde, "\n");
-
-	if (psde) {
-		_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
-
-		if (psde->blob_info)
-			drm_property_blob_put(psde->blob_info);
-		msm_property_destroy(&psde->property_info);
-		mutex_destroy(&psde->lock);
-
-		drm_plane_helper_disable(plane, NULL);
-
-		/* this will destroy the states as well */
-		drm_plane_cleanup(plane);
-
-		if (psde->pipe_hw)
-			sde_hw_sspp_destroy(psde->pipe_hw);
-
-		kfree(psde);
-	}
-}
-
-static void sde_plane_destroy_state(struct drm_plane *plane,
-		struct drm_plane_state *state)
-{
-	struct sde_plane *psde;
-	struct sde_plane_state *pstate;
-
-	if (!plane || !state) {
-		SDE_ERROR("invalid arg(s), plane %d state %d\n",
-				!plane, !state);
-		return;
-	}
-
-	psde = to_sde_plane(plane);
-	pstate = to_sde_plane_state(state);
-
-	SDE_DEBUG_PLANE(psde, "\n");
-
-	/* remove ref count for frame buffers */
-	if (state->fb)
-		drm_framebuffer_put(state->fb);
-
-	/* remove ref count for fence */
-	if (pstate->input_fence)
-		sde_sync_put(pstate->input_fence);
-
-	/* destroy value helper */
-	msm_property_destroy_state(&psde->property_info, pstate,
-			&pstate->property_state);
-}
-
-static struct drm_plane_state *
-sde_plane_duplicate_state(struct drm_plane *plane)
-{
-	struct sde_plane *psde;
-	struct sde_plane_state *pstate;
-	struct sde_plane_state *old_state;
-	struct drm_property *drm_prop;
-	uint64_t input_fence_default;
-
-	if (!plane) {
-		SDE_ERROR("invalid plane\n");
-		return NULL;
-	} else if (!plane->state) {
-		SDE_ERROR("invalid plane state\n");
-		return NULL;
-	}
-
-	old_state = to_sde_plane_state(plane->state);
-	psde = to_sde_plane(plane);
-	pstate = msm_property_alloc_state(&psde->property_info);
-	if (!pstate) {
-		SDE_ERROR_PLANE(psde, "failed to allocate state\n");
-		return NULL;
-	}
-
-	SDE_DEBUG_PLANE(psde, "\n");
-
-	/* duplicate value helper */
-	msm_property_duplicate_state(&psde->property_info, old_state, pstate,
-			&pstate->property_state, pstate->property_values);
-
-	/* clear out any input fence */
-	pstate->input_fence = 0;
-	input_fence_default = msm_property_get_default(
-			&psde->property_info, PLANE_PROP_INPUT_FENCE);
-	drm_prop = msm_property_index_to_drm_property(
-				&psde->property_info, PLANE_PROP_INPUT_FENCE);
-	if (msm_property_atomic_set(&psde->property_info,
-				&pstate->property_state, drm_prop,
-				input_fence_default))
-		SDE_DEBUG_PLANE(psde,
-				"error clearing duplicated input fence\n");
-
-	pstate->dirty = 0x0;
-	pstate->pending = false;
-
-	__drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
-
-	return &pstate->base;
-}
-
-static void sde_plane_reset(struct drm_plane *plane)
-{
-	struct sde_plane *psde;
-	struct sde_plane_state *pstate;
-
-	if (!plane) {
-		SDE_ERROR("invalid plane\n");
-		return;
-	}
-
-	psde = to_sde_plane(plane);
-	SDE_DEBUG_PLANE(psde, "\n");
-
-	if (plane->state && !sde_crtc_is_reset_required(plane->state->crtc)) {
-		SDE_DEBUG_PLANE(psde, "avoid reset for plane\n");
-		return;
-	}
-
-	/* remove previous state, if present */
-	if (plane->state) {
-		sde_plane_destroy_state(plane, plane->state);
-		plane->state = 0;
-	}
-
-	pstate = msm_property_alloc_state(&psde->property_info);
-	if (!pstate) {
-		SDE_ERROR_PLANE(psde, "failed to allocate state\n");
-		return;
-	}
-
-	/* reset value helper */
-	msm_property_reset_state(&psde->property_info, pstate,
-			&pstate->property_state,
-			pstate->property_values);
-
-	pstate->base.plane = plane;
-
-	plane->state = &pstate->base;
-}
-
-u32 sde_plane_get_ubwc_error(struct drm_plane *plane)
-{
-	u32 ubwc_error = 0;
-	struct sde_plane *psde;
-
-	if (!plane) {
-		SDE_ERROR("invalid plane\n");
-		return 0;
-	}
-	psde = to_sde_plane(plane);
-
-	if (!psde->is_virtual && psde->pipe_hw->ops.get_ubwc_error)
-		ubwc_error = psde->pipe_hw->ops.get_ubwc_error(psde->pipe_hw);
-
-	return ubwc_error;
-}
-
-void sde_plane_clear_ubwc_error(struct drm_plane *plane)
-{
-	struct sde_plane *psde;
-
-	if (!plane) {
-		SDE_ERROR("invalid plane\n");
-		return;
-	}
-	psde = to_sde_plane(plane);
-
-	if (psde->pipe_hw->ops.clear_ubwc_error)
-		psde->pipe_hw->ops.clear_ubwc_error(psde->pipe_hw);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static ssize_t _sde_plane_danger_read(struct file *file,
-			char __user *buff, size_t count, loff_t *ppos)
-{
-	struct sde_kms *kms = file->private_data;
-	struct sde_mdss_cfg *cfg = kms->catalog;
-	int len = 0;
-	char buf[40] = {'\0'};
-
-	if (!cfg)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0; /* the end */
-
-	len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
-	if (len < 0 || len >= sizeof(buf))
-		return 0;
-
-	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
-		return -EFAULT;
-
-	*ppos += len;   /* increase offset */
-
-	return len;
-}
-
-static void _sde_plane_set_danger_state(struct sde_kms *kms, bool enable)
-{
-	struct drm_plane *plane;
-
-	drm_for_each_plane(plane, kms->dev) {
-		if (plane->fb && plane->state) {
-			sde_plane_danger_signal_ctrl(plane, enable);
-			SDE_DEBUG("plane:%d img:%dx%d ",
-				plane->base.id, plane->fb->width,
-				plane->fb->height);
-			SDE_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
-				plane->state->src_x >> 16,
-				plane->state->src_y >> 16,
-				plane->state->src_w >> 16,
-				plane->state->src_h >> 16,
-				plane->state->crtc_x, plane->state->crtc_y,
-				plane->state->crtc_w, plane->state->crtc_h);
-		} else {
-			SDE_DEBUG("Inactive plane:%d\n", plane->base.id);
-		}
-	}
-}
-
-static ssize_t _sde_plane_danger_write(struct file *file,
-		    const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	struct sde_kms *kms = file->private_data;
-	struct sde_mdss_cfg *cfg = kms->catalog;
-	int disable_panic;
-	char buf[10];
-
-	if (!cfg)
-		return -EFAULT;
-
-	if (count >= sizeof(buf))
-		return -EFAULT;
-
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
-	buf[count] = 0;	/* end of string */
-
-	if (kstrtoint(buf, 0, &disable_panic))
-		return -EFAULT;
-
-	if (disable_panic) {
-		/* Disable panic signal for all active pipes */
-		SDE_DEBUG("Disabling danger:\n");
-		_sde_plane_set_danger_state(kms, false);
-		kms->has_danger_ctrl = false;
-	} else {
-		/* Enable panic signal for all active pipes */
-		SDE_DEBUG("Enabling danger:\n");
-		kms->has_danger_ctrl = true;
-		_sde_plane_set_danger_state(kms, true);
-	}
-
-	return count;
-}
-
-static const struct file_operations sde_plane_danger_enable = {
-	.open = simple_open,
-	.read = _sde_plane_danger_read,
-	.write = _sde_plane_danger_write,
-};
-
-static int _sde_plane_init_debugfs(struct drm_plane *plane)
-{
-	struct sde_plane *psde;
-	struct sde_kms *kms;
-	struct msm_drm_private *priv;
-	const struct sde_sspp_sub_blks *sblk = 0;
-	const struct sde_sspp_cfg *cfg = 0;
-
-	if (!plane || !plane->dev) {
-		SDE_ERROR("invalid arguments\n");
-		return -EINVAL;
-	}
-
-	priv = plane->dev->dev_private;
-	if (!priv || !priv->kms) {
-		SDE_ERROR("invalid KMS reference\n");
-		return -EINVAL;
-	}
-
-	kms = to_sde_kms(priv->kms);
-	psde = to_sde_plane(plane);
-
-	if (psde && psde->pipe_hw)
-		cfg = psde->pipe_hw->cap;
-	if (cfg)
-		sblk = cfg->sblk;
-
-	if (!sblk)
-		return 0;
-
-	/* create overall sub-directory for the pipe */
-	psde->debugfs_root =
-		debugfs_create_dir(psde->pipe_name,
-				plane->dev->primary->debugfs_root);
-
-	if (!psde->debugfs_root)
-		return -ENOMEM;
-
-	/* don't error check these */
-	debugfs_create_x32("features", 0400,
-		psde->debugfs_root, &psde->features);
-
-	if (cfg->features & BIT(SDE_SSPP_SCALER_QSEED3) ||
-			cfg->features & BIT(SDE_SSPP_SCALER_QSEED3LITE) ||
-			cfg->features & BIT(SDE_SSPP_SCALER_QSEED2))
-		debugfs_create_bool("default_scaling",
-				0600,
-				psde->debugfs_root,
-				&psde->debugfs_default_scale);
-
-	if (cfg->features & BIT(SDE_SSPP_TRUE_INLINE_ROT_V1)) {
-		debugfs_create_u32("in_rot_max_downscale_rt_num",
-			0600,
-			psde->debugfs_root,
-			(u32 *) &psde->pipe_sblk->in_rot_maxdwnscale_rt_num);
-		debugfs_create_u32("in_rot_max_downscale_rt_denom",
-			0600,
-			psde->debugfs_root,
-			(u32 *) &psde->pipe_sblk->in_rot_maxdwnscale_rt_denom);
-		debugfs_create_u32("in_rot_max_downscale_nrt",
-			0600,
-			psde->debugfs_root,
-			(u32 *) &psde->pipe_sblk->in_rot_maxdwnscale_nrt);
-		debugfs_create_u32("in_rot_max_height",
-			0600,
-			psde->debugfs_root,
-			(u32 *) &psde->pipe_sblk->in_rot_maxheight);
-	}
-
-	debugfs_create_u32("xin_id",
-			0400,
-			psde->debugfs_root,
-			(u32 *) &cfg->xin_id);
-	debugfs_create_x32("creq_vblank",
-			0600,
-			psde->debugfs_root,
-			(u32 *) &sblk->creq_vblank);
-	debugfs_create_x32("danger_vblank",
-			0600,
-			psde->debugfs_root,
-			(u32 *) &sblk->danger_vblank);
-
-	debugfs_create_file("disable_danger",
-			0600,
-			psde->debugfs_root,
-			kms, &sde_plane_danger_enable);
-
-	return 0;
-}
-
-static void _sde_plane_destroy_debugfs(struct drm_plane *plane)
-{
-	struct sde_plane *psde;
-
-	if (!plane)
-		return;
-	psde = to_sde_plane(plane);
-
-	debugfs_remove_recursive(psde->debugfs_root);
-}
-#else
-static int _sde_plane_init_debugfs(struct drm_plane *plane)
-{
-	return 0;
-}
-static void _sde_plane_destroy_debugfs(struct drm_plane *plane)
-{
-}
-#endif
-
-static int sde_plane_late_register(struct drm_plane *plane)
-{
-	return _sde_plane_init_debugfs(plane);
-}
-
-static void sde_plane_early_unregister(struct drm_plane *plane)
-{
-	_sde_plane_destroy_debugfs(plane);
-}
-
-static const struct drm_plane_funcs sde_plane_funcs = {
-		.update_plane = drm_atomic_helper_update_plane,
-		.disable_plane = drm_atomic_helper_disable_plane,
-		.destroy = sde_plane_destroy,
-		.atomic_set_property = sde_plane_atomic_set_property,
-		.atomic_get_property = sde_plane_atomic_get_property,
-		.reset = sde_plane_reset,
-		.atomic_duplicate_state = sde_plane_duplicate_state,
-		.atomic_destroy_state = sde_plane_destroy_state,
-		.late_register = sde_plane_late_register,
-		.early_unregister = sde_plane_early_unregister,
-};
-
-static const struct drm_plane_helper_funcs sde_plane_helper_funcs = {
-		.prepare_fb = sde_plane_prepare_fb,
-		.cleanup_fb = sde_plane_cleanup_fb,
-		.atomic_check = sde_plane_atomic_check,
-		.atomic_update = sde_plane_atomic_update,
-};
-
-enum sde_sspp sde_plane_pipe(struct drm_plane *plane)
-{
-	return plane ? to_sde_plane(plane)->pipe : SSPP_NONE;
-}
-
-bool is_sde_plane_virtual(struct drm_plane *plane)
-{
-	return plane ? to_sde_plane(plane)->is_virtual : false;
-}
-
-/* initialize plane */
-struct drm_plane *sde_plane_init(struct drm_device *dev,
-		uint32_t pipe, bool primary_plane,
-		unsigned long possible_crtcs, u32 master_plane_id)
-{
-	struct drm_plane *plane = NULL, *master_plane = NULL;
-	const struct sde_format_extended *format_list;
-	struct sde_plane *psde;
-	struct msm_drm_private *priv;
-	struct sde_kms *kms;
-	enum drm_plane_type type;
-	int ret = -EINVAL;
-
-	if (!dev) {
-		SDE_ERROR("[%u]device is NULL\n", pipe);
-		goto exit;
-	}
-
-	priv = dev->dev_private;
-	if (!priv) {
-		SDE_ERROR("[%u]private data is NULL\n", pipe);
-		goto exit;
-	}
-
-	if (!priv->kms) {
-		SDE_ERROR("[%u]invalid KMS reference\n", pipe);
-		goto exit;
-	}
-	kms = to_sde_kms(priv->kms);
-
-	if (!kms->catalog) {
-		SDE_ERROR("[%u]invalid catalog reference\n", pipe);
-		goto exit;
-	}
-
-	/* create and zero local structure */
-	psde = kzalloc(sizeof(*psde), GFP_KERNEL);
-	if (!psde) {
-		SDE_ERROR("[%u]failed to allocate local plane struct\n", pipe);
-		ret = -ENOMEM;
-		goto exit;
-	}
-
-	/* cache local stuff for later */
-	plane = &psde->base;
-	psde->pipe = pipe;
-	psde->is_virtual = (master_plane_id != 0);
-	INIT_LIST_HEAD(&psde->mplane_list);
-	master_plane = drm_plane_find(dev, NULL, master_plane_id);
-	if (master_plane) {
-		struct sde_plane *mpsde = to_sde_plane(master_plane);
-
-		list_add_tail(&psde->mplane_list, &mpsde->mplane_list);
-	}
-
-	/* initialize underlying h/w driver */
-	psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, kms->catalog,
-							master_plane_id != 0);
-	if (IS_ERR(psde->pipe_hw)) {
-		SDE_ERROR("[%u]SSPP init failed\n", pipe);
-		ret = PTR_ERR(psde->pipe_hw);
-		goto clean_plane;
-	} else if (!psde->pipe_hw->cap || !psde->pipe_hw->cap->sblk) {
-		SDE_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
-		goto clean_sspp;
-	}
-
-	/* cache features mask for later */
-	psde->features = psde->pipe_hw->cap->features;
-	psde->perf_features = psde->pipe_hw->cap->perf_features;
-	psde->pipe_sblk = psde->pipe_hw->cap->sblk;
-	if (!psde->pipe_sblk) {
-		SDE_ERROR("[%u]invalid sblk\n", pipe);
-		goto clean_sspp;
-	}
-
-	if (!master_plane_id)
-		format_list = psde->pipe_sblk->format_list;
-	else
-		format_list = psde->pipe_sblk->virt_format_list;
-
-	psde->nformats = sde_populate_formats(format_list,
-				psde->formats,
-				0,
-				ARRAY_SIZE(psde->formats));
-
-	if (!psde->nformats) {
-		SDE_ERROR("[%u]no valid formats for plane\n", pipe);
-		goto clean_sspp;
-	}
-
-	if (psde->features & BIT(SDE_SSPP_CURSOR))
-		type = DRM_PLANE_TYPE_CURSOR;
-	else if (primary_plane)
-		type = DRM_PLANE_TYPE_PRIMARY;
-	else
-		type = DRM_PLANE_TYPE_OVERLAY;
-	ret = drm_universal_plane_init(dev, plane, 0xff, &sde_plane_funcs,
-				psde->formats, psde->nformats,
-				NULL, type, NULL);
-	if (ret)
-		goto clean_sspp;
-
-	/* Populate static array of plane property flags */
-	_sde_plane_map_prop_to_dirty_bits();
-
-	/* success! finalize initialization */
-	drm_plane_helper_add(plane, &sde_plane_helper_funcs);
-
-	msm_property_init(&psde->property_info, &plane->base, dev,
-			priv->plane_property, psde->property_data,
-			PLANE_PROP_COUNT, PLANE_PROP_BLOBCOUNT,
-			sizeof(struct sde_plane_state));
-
-	_sde_plane_install_properties(plane, kms->catalog, master_plane_id);
-
-	/* save user friendly pipe name for later */
-	snprintf(psde->pipe_name, SDE_NAME_SIZE, "plane%u", plane->base.id);
-
-	mutex_init(&psde->lock);
-
-	SDE_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", psde->pipe_name,
-					pipe, plane->base.id, master_plane_id);
-	return plane;
-
-clean_sspp:
-	if (psde && psde->pipe_hw)
-		sde_hw_sspp_destroy(psde->pipe_hw);
-clean_plane:
-	kfree(psde);
-exit:
-	return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
deleted file mode 100644
index e4a45bfe..0000000
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _SDE_PLANE_H_
-#define _SDE_PLANE_H_
-
-#include <drm/drm_crtc.h>
-
-#include "msm_prop.h"
-#include "sde_kms.h"
-#include "sde_hw_mdss.h"
-#include "sde_hw_sspp.h"
-
-/* dirty bits for update function */
-#define SDE_PLANE_DIRTY_RECTS	0x1
-#define SDE_PLANE_DIRTY_FORMAT	0x2
-#define SDE_PLANE_DIRTY_SHARPEN	0x4
-#define SDE_PLANE_DIRTY_PERF	0x8
-#define SDE_PLANE_DIRTY_FB_TRANSLATION_MODE	0x10
-#define SDE_PLANE_DIRTY_VIG_GAMUT 0x20
-#define SDE_PLANE_DIRTY_VIG_IGC 0x40
-#define SDE_PLANE_DIRTY_DMA_IGC 0x80
-#define SDE_PLANE_DIRTY_DMA_GC 0x100
-#define SDE_PLANE_DIRTY_CP (SDE_PLANE_DIRTY_VIG_GAMUT |\
-		SDE_PLANE_DIRTY_VIG_IGC | SDE_PLANE_DIRTY_DMA_IGC |\
-		SDE_PLANE_DIRTY_DMA_GC)
-#define SDE_PLANE_DIRTY_ALL	(0xFFFFFFFF & ~(SDE_PLANE_DIRTY_CP))
-
-/**
- * enum sde_plane_sclcheck_state - User scaler data status
- *
- * @SDE_PLANE_SCLCHECK_NONE: No user data provided
- * @SDE_PLANE_SCLCHECK_INVALID: Invalid user data provided
- * @SDE_PLANE_SCLCHECK_SCALER_V1: Valid scaler v1 data
- * @SDE_PLANE_SCLCHECK_SCALER_V1_CHECK: Unchecked scaler v1 data
- * @SDE_PLANE_SCLCHECK_SCALER_V2: Valid scaler v2 data
- * @SDE_PLANE_SCLCHECK_SCALER_V2_CHECK: Unchecked scaler v2 data
- */
-enum sde_plane_sclcheck_state {
-	SDE_PLANE_SCLCHECK_NONE,
-	SDE_PLANE_SCLCHECK_INVALID,
-	SDE_PLANE_SCLCHECK_SCALER_V1,
-	SDE_PLANE_SCLCHECK_SCALER_V1_CHECK,
-	SDE_PLANE_SCLCHECK_SCALER_V2,
-	SDE_PLANE_SCLCHECK_SCALER_V2_CHECK,
-};
-
-/**
- * struct sde_plane_state: Define sde extension of drm plane state object
- * @base:	base drm plane state object
- * @property_state: Local storage for msm_prop properties
- * @property_values:	cached plane property values
- * @aspace:	pointer to address space for input/output buffers
- * @input_fence:	dereferenced input fence pointer
- * @stage:	assigned by crtc blender
- * @excl_rect:	exclusion rect values
- * @dirty:	bitmask for which pipe h/w config functions need to be updated
- * @multirect_index: index of the rectangle of SSPP
- * @multirect_mode: parallel or time multiplex multirect mode
- * @const_alpha_en: const alpha channel is enabled for this HW pipe
- * @pending:	whether the current update is still pending
- * @defer_prepare_fb:	indicate if prepare_fb call was deferred
- * @pipe_order_flags: contains pipe order flags:
- *			SDE_SSPP_RIGHT - right pipe in source split pair
- * @scaler3_cfg: configuration data for scaler3
- * @pixel_ext: configuration data for pixel extensions
- * @scaler_check_state: indicates status of user provided pixel extension data
- * @cdp_cfg:	CDP configuration
- */
-struct sde_plane_state {
-	struct drm_plane_state base;
-	struct msm_property_state property_state;
-	struct msm_property_value property_values[PLANE_PROP_COUNT];
-	struct msm_gem_address_space *aspace;
-	void *input_fence;
-	enum sde_stage stage;
-	struct sde_rect excl_rect;
-	uint32_t dirty;
-	uint32_t multirect_index;
-	uint32_t multirect_mode;
-	bool const_alpha_en;
-	bool pending;
-	bool defer_prepare_fb;
-	uint32_t pipe_order_flags;
-
-	/* scaler configuration */
-	struct sde_hw_scaler3_cfg scaler3_cfg;
-	struct sde_hw_pixel_ext pixel_ext;
-	enum sde_plane_sclcheck_state scaler_check_state;
-
-	/* @sc_cfg: system_cache configuration */
-	struct sde_hw_pipe_sc_cfg sc_cfg;
-	uint32_t rotation;
-
-	struct sde_hw_pipe_cdp_cfg cdp_cfg;
-};
-
-/**
- * struct sde_multirect_plane_states: Defines multirect pair of drm plane states
- * @r0: drm plane configured on rect 0
- * @r1: drm plane configured on rect 1
- */
-struct sde_multirect_plane_states {
-	const struct drm_plane_state *r0;
-	const struct drm_plane_state *r1;
-};
-
-#define to_sde_plane_state(x) \
-	container_of(x, struct sde_plane_state, base)
-
-/**
- * sde_plane_get_property - Query integer value of plane property
- * @S: Pointer to plane state
- * @X: Property index, from enum msm_mdp_plane_property
- * Returns: Integer value of requested property
- */
-#define sde_plane_get_property(S, X) ((S) && ((X) < PLANE_PROP_COUNT) ? \
-	((S)->property_values[(X)].value) : 0)
-
-/**
- * sde_plane_pipe - return sspp identifier for the given plane
- * @plane:   Pointer to DRM plane object
- * Returns: sspp identifier of the given plane
- */
-enum sde_sspp sde_plane_pipe(struct drm_plane *plane);
-
-/**
- * is_sde_plane_virtual - check for virtual plane
- * @plane: Pointer to DRM plane object
- * returns: true - if the plane is virtual
- *          false - if the plane is primary
- */
-bool is_sde_plane_virtual(struct drm_plane *plane);
-
-/**
- * sde_plane_ctl_flush - set/clear control flush mask
- * @plane:   Pointer to DRM plane object
- * @ctl: Pointer to control hardware
- * @set: set if true else clear
- */
-void sde_plane_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
-		bool set);
-
-/**
- * sde_plane_restore - restore hw state if previously power collapsed
- * @plane: Pointer to drm plane structure
- */
-void sde_plane_restore(struct drm_plane *plane);
-
-/**
- * sde_plane_flush - final plane operations before commit flush
- * @plane: Pointer to drm plane structure
- */
-void sde_plane_flush(struct drm_plane *plane);
-
-/**
- * sde_plane_halt_requests - control halting of vbif transactions for this plane
- *	This function isn't thread safe. Plane halt enable/disable requests
- *	should always be made from the same commit cycle.
- * @plane: Pointer to drm plane structure
- * @enable: Whether to enable/disable halting of vbif transactions
- */
-void sde_plane_halt_requests(struct drm_plane *plane, bool enable);
-
-/**
- * sde_plane_set_error: enable/disable error condition
- * @plane: pointer to drm_plane structure
- */
-void sde_plane_set_error(struct drm_plane *plane, bool error);
-
-/**
- * sde_plane_init - create new sde plane for the given pipe
- * @dev:   Pointer to DRM device
- * @pipe:  sde hardware pipe identifier
- * @primary_plane: true if this pipe is primary plane for crtc
- * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
- * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
- *                   a regular plane initialization. A non-zero primary plane
- *                   id will be passed for a virtual pipe initialization.
- *
- */
-struct drm_plane *sde_plane_init(struct drm_device *dev,
-		uint32_t pipe, bool primary_plane,
-		unsigned long possible_crtcs, u32 master_plane_id);
-
-/**
- * sde_plane_validate_multirecti_v2 - validate the multirect planes
- *				      against hw limitations
- * @plane: drm plate states of the multirect pair
- */
-int sde_plane_validate_multirect_v2(struct sde_multirect_plane_states *plane);
-
-/**
- * sde_plane_clear_multirect - clear multirect bits for the given pipe
- * @drm_state: Pointer to DRM plane state
- */
-void sde_plane_clear_multirect(const struct drm_plane_state *drm_state);
-
-/**
- * sde_plane_validate_src_addr - validate if current sspp addr of given
- * plane is within the input address range
- * @drm_plane:	Pointer to DRM plane object
- * @base_addr:	Start address of the input address range
- * @size:	Size of the input address range
- * @Return:	Non-zero if source pipe current address is not in input range
- */
-int sde_plane_validate_src_addr(struct drm_plane *plane,
-		unsigned long base_addr, u32 size);
-
-/**
- * sde_plane_wait_input_fence - wait for input fence object
- * @plane:   Pointer to DRM plane object
- * @wait_ms: Wait timeout value
- * Returns: Zero on success
- */
-int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
-
-/**
- * sde_plane_color_fill - enables color fill on plane
- * @plane:  Pointer to DRM plane object
- * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
- * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
- * Returns: 0 on success
- */
-int sde_plane_color_fill(struct drm_plane *plane,
-		uint32_t color, uint32_t alpha);
-
-/**
- * sde_plane_set_revalidate - sets revalidate flag which forces a full
- *	validation of the plane properties in the next atomic check
- * @plane: Pointer to DRM plane object
- * @enable: Boolean to set/unset the flag
- */
-void sde_plane_set_revalidate(struct drm_plane *plane, bool enable);
-
-/**
- * sde_plane_helper_reset_properties - reset properties to default values in the
- *	given DRM plane state object
- * @plane: Pointer to DRM plane object
- * @plane_state: Pointer to DRM plane state object
- * Returns: 0 on success, negative errno on failure
- */
-int sde_plane_helper_reset_custom_properties(struct drm_plane *plane,
-		struct drm_plane_state *plane_state);
-
-/* sde_plane_is_sec_ui_allowed - indicates if the sspp allows secure-ui layers
- * @plane: Pointer to DRM plane object
- * Returns: true if allowed; false otherwise
- */
-bool sde_plane_is_sec_ui_allowed(struct drm_plane *plane);
-
-/* sde_plane_secure_ctrl_xin_client - controls the VBIF programming of
- *	the xin-client before the secure-ui session. Programs the QOS
- *	and OT limits in VBIF for the sec-ui allowed xins
- * @plane: Pointer to DRM plane object
- * @crtc: Pointer to DRM CRTC state object
- */
-void sde_plane_secure_ctrl_xin_client(struct drm_plane *plane,
-		struct drm_crtc *crtc);
-
-/*
- * sde_plane_get_ubwc_error - gets the ubwc error code
- * @plane: Pointer to DRM plane object
- */
-u32 sde_plane_get_ubwc_error(struct drm_plane *plane);
-
-/*
- * sde_plane_clear_ubwc_error - clears the ubwc error code
- * @plane: Pointer to DRM plane object
- */
-void sde_plane_clear_ubwc_error(struct drm_plane *plane);
-
-/*
- * sde_plane_setup_src_split_order - enable/disable pipe's src_split_order
- * @plane: Pointer to DRM plane object
- * @rect_mode: multirect mode
- * @enable: enable/disable flag
- */
-void sde_plane_setup_src_split_order(struct drm_plane *plane,
-		enum sde_sspp_multirect_index rect_mode, bool enable);
-
-/* sde_plane_is_cache_required - indicates if the system cache is
- *	required for the plane.
- * @plane: Pointer to DRM plane object
- * Returns: true if sys cache is required, otherwise false.
- */
-bool sde_plane_is_cache_required(struct drm_plane *plane);
-
-#endif /* _SDE_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_reg_dma.c b/drivers/gpu/drm/msm/sde/sde_reg_dma.c
deleted file mode 100644
index c52a96b..0000000
--- a/drivers/gpu/drm/msm/sde/sde_reg_dma.c
+++ /dev/null
@@ -1,152 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-#include "sde_reg_dma.h"
-#include "sde_hw_reg_dma_v1.h"
-#include "sde_dbg.h"
-
-#define REG_DMA_VER_1_0 0x00010000
-#define REG_DMA_VER_1_1 0x00010001
-#define REG_DMA_VER_1_2 0x00010002
-
-static int default_check_support(enum sde_reg_dma_features feature,
-		     enum sde_reg_dma_blk blk,
-		     bool *is_supported)
-{
-
-	if (!is_supported)
-		return -EINVAL;
-
-	*is_supported = false;
-	return 0;
-}
-
-static int default_setup_payload(struct sde_reg_dma_setup_ops_cfg *cfg)
-{
-	DRM_ERROR("not implemented\n");
-	return -EINVAL;
-}
-
-static int default_kick_off(struct sde_reg_dma_kickoff_cfg *cfg)
-{
-	DRM_ERROR("not implemented\n");
-	return -EINVAL;
-
-}
-
-static int default_reset(struct sde_hw_ctl *ctl)
-{
-	DRM_ERROR("not implemented\n");
-	return -EINVAL;
-}
-
-struct sde_reg_dma_buffer *default_alloc_reg_dma_buf(u32 size)
-{
-	DRM_ERROR("not implemented\n");
-	return ERR_PTR(-EINVAL);
-}
-
-int default_dealloc_reg_dma(struct sde_reg_dma_buffer *lut_buf)
-{
-	DRM_ERROR("not implemented\n");
-	return -EINVAL;
-}
-
-static int default_buf_reset_reg_dma(struct sde_reg_dma_buffer *lut_buf)
-{
-	DRM_ERROR("not implemented\n");
-	return -EINVAL;
-}
-
-static int default_last_command(struct sde_hw_ctl *ctl,
-		enum sde_reg_dma_queue q, enum sde_reg_dma_last_cmd_mode mode)
-{
-	return 0;
-}
-
-static void default_dump_reg(void)
-{
-}
-
-static struct sde_hw_reg_dma reg_dma = {
-	.ops = {default_check_support, default_setup_payload,
-		default_kick_off, default_reset, default_alloc_reg_dma_buf,
-		default_dealloc_reg_dma, default_buf_reset_reg_dma,
-		default_last_command, default_dump_reg},
-};
-
-int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m,
-		struct drm_device *dev)
-{
-	int rc = 0;
-
-	if (!addr || !m || !dev) {
-		DRM_DEBUG("invalid addr %pK catalog %pK dev %pK\n", addr, m,
-				dev);
-		return 0;
-	}
-
-	reg_dma.drm_dev = dev;
-	reg_dma.caps = &m->dma_cfg;
-	reg_dma.addr = addr;
-
-	if (!m->reg_dma_count)
-		return 0;
-
-	switch (reg_dma.caps->version) {
-	case REG_DMA_VER_1_0:
-		rc = init_v1(&reg_dma);
-		if (rc)
-			DRM_DEBUG("init v1 dma ops failed\n");
-		break;
-	case REG_DMA_VER_1_1:
-		rc = init_v11(&reg_dma);
-		if (rc)
-			DRM_DEBUG("init v11 dma ops failed\n");
-		break;
-	case REG_DMA_VER_1_2:
-		rc = init_v12(&reg_dma);
-		if (rc)
-			DRM_DEBUG("init v11 dma ops failed\n");
-		break;
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-struct sde_hw_reg_dma_ops *sde_reg_dma_get_ops(void)
-{
-	return &reg_dma.ops;
-}
-
-void sde_reg_dma_deinit(void)
-{
-	struct sde_hw_reg_dma op = {
-	.ops = {default_check_support, default_setup_payload,
-		default_kick_off, default_reset, default_alloc_reg_dma_buf,
-		default_dealloc_reg_dma, default_buf_reset_reg_dma,
-		default_last_command, default_dump_reg},
-	};
-
-	if (!reg_dma.drm_dev || !reg_dma.caps)
-		return;
-
-	switch (reg_dma.caps->version) {
-	case REG_DMA_VER_1_0:
-		deinit_v1();
-		break;
-	case REG_DMA_VER_1_1:
-	case REG_DMA_VER_1_2:
-		deinit_v1();
-		break;
-	default:
-		break;
-	}
-	memset(&reg_dma, 0, sizeof(reg_dma));
-	memcpy(&reg_dma.ops, &op.ops, sizeof(op.ops));
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_reg_dma.h b/drivers/gpu/drm/msm/sde/sde_reg_dma.h
deleted file mode 100644
index 48e8dfd..0000000
--- a/drivers/gpu/drm/msm/sde/sde_reg_dma.h
+++ /dev/null
@@ -1,338 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_REG_DMA_H
-#define _SDE_REG_DMA_H
-
-#include "msm_drv.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_mdss.h"
-#include "sde_hw_top.h"
-#include "sde_hw_util.h"
-
-/**
- * enum sde_reg_dma_op - defines operations supported by reg dma
- * @REG_DMA_READ: Read the histogram into buffer provided
- * @REG_DMA_WRITE: Write the reg dma configuration into MDP block
- * @REG_DMA_OP_MAX: Max operation which indicates that op is invalid
- */
-enum sde_reg_dma_op {
-	REG_DMA_READ,
-	REG_DMA_WRITE,
-	REG_DMA_OP_MAX
-};
-
-/**
- * enum sde_reg_dma_read_sel - defines the blocks for histogram read
- * @DSPP0_HIST: select dspp0
- * @DSPP1_HIST: select dspp1
- * @DSPP2_HIST: select dspp2
- * @DSPP3_HIST: select dspp3
- * @DSPP_HIST_MAX: invalid selection
- */
-enum sde_reg_dma_read_sel {
-	DSPP0_HIST,
-	DSPP1_HIST,
-	DSPP2_HIST,
-	DSPP3_HIST,
-	DSPP_HIST_MAX,
-};
-
-/**
- * enum sde_reg_dma_features - defines features supported by reg dma
- * @QSEED: qseed feature
- * @GAMUT: gamut feature
- * @IGC: inverse gamma correction
- * @PCC: polynomical color correction
- * @VLUT: PA vlut
- * @MEMC_SKIN: memory color skin
- * @MEMC_SKY: memory color sky
- * @MEMC_FOLIAGE: memory color foliage
- * @MEMC_PROT: memory color protect
- * @SIX_ZONE: six zone
- * @HSIC: Hue, saturation and contrast
- * @GC: gamma correction
- * @LTM_INIT: LTM INIT
- * @LTM_ROI: LTM ROI
- * @LTM_VLUT: LTM VLUT
- * @REG_DMA_FEATURES_MAX: invalid selection
- */
-enum sde_reg_dma_features {
-	QSEED,
-	GAMUT,
-	IGC,
-	PCC,
-	VLUT,
-	MEMC_SKIN,
-	MEMC_SKY,
-	MEMC_FOLIAGE,
-	MEMC_PROT,
-	SIX_ZONE,
-	HSIC,
-	GC,
-	LTM_INIT,
-	LTM_ROI,
-	LTM_VLUT,
-	REG_DMA_FEATURES_MAX,
-};
-
-/**
- * enum sde_reg_dma_queue - defines reg dma write queue values
- * @DMA_CTL_QUEUE0: select queue0
- * @DMA_CTL_QUEUE1: select queue1
- * @DMA_CTL_QUEUE_MAX: invalid selection
- */
-enum sde_reg_dma_queue {
-	DMA_CTL_QUEUE0,
-	DMA_CTL_QUEUE1,
-	DMA_CTL_QUEUE_MAX,
-};
-
-/**
- * enum sde_reg_dma_trigger_mode - defines reg dma ops trigger mode
- * @WRITE_IMMEDIATE: trigger write op immediately
- * @WRITE_TRIGGER: trigger write op when sw trigger is issued
- * @READ_IMMEDIATE: trigger read op immediately
- * @READ_TRIGGER: trigger read op when sw trigger is issued
- * @TIGGER_MAX: invalid trigger selection
- */
-enum sde_reg_dma_trigger_mode {
-	WRITE_IMMEDIATE,
-	WRITE_TRIGGER,
-	READ_IMMEDIATE,
-	READ_TRIGGER,
-	TIGGER_MAX,
-};
-
-/**
- * enum sde_reg_dma_setup_ops - defines reg dma write configuration
- * @HW_BLK_SELECT: op for selecting the hardware block
- * @REG_SINGLE_WRITE: op for writing single register value
- *                    at the address provided
- * @REG_BLK_WRITE_SINGLE: op for writing multiple registers using auto address
- *                     increment
- * @REG_BLK_WRITE_INC: op for writing multiple registers using hw index
- *                        register
- * @REG_BLK_WRITE_MULTIPLE: op for writing hw index based registers at
- *                         non-consecutive location
- * @REG_SINGLE_MODIFY: op for modifying single register value
- *                    with bitmask at the address provided
- * @REG_DMA_SETUP_OPS_MAX: invalid operation
- */
-enum sde_reg_dma_setup_ops {
-	HW_BLK_SELECT,
-	REG_SINGLE_WRITE,
-	REG_BLK_WRITE_SINGLE,
-	REG_BLK_WRITE_INC,
-	REG_BLK_WRITE_MULTIPLE,
-	REG_SINGLE_MODIFY,
-	REG_DMA_SETUP_OPS_MAX,
-};
-
-/**
- * enum sde_reg_dma_blk - defines blocks for which reg dma op should be
- *                        performed
- * @VIG0: select vig0 block
- * @VIG1: select vig1 block
- * @VIG2: select vig2 block
- * @VIG3: select vig3 block
- * @LM0: select lm0 block
- * @LM1: select lm1 block
- * @LM2: select lm2 block
- * @LM3: select lm3 block
- * @DSPP0: select dspp0 block
- * @DSPP1: select dspp1 block
- * @DSPP2: select dspp2 block
- * @DSPP3: select dspp3 block
- * @DMA0: select dma0 block
- * @DMA1: select dma1 block
- * @DMA2: select dma2 block
- * @DMA3: select dma3 block
- * @SSPP_IGC: select sspp igc block
- * @DSPP_IGC: select dspp igc block
- * @LTM0: select LTM0 block
- * @LTM1: select LTM1 block
- * @MDSS: select mdss block
- */
-enum sde_reg_dma_blk {
-	VIG0  = BIT(0),
-	VIG1  = BIT(1),
-	VIG2  = BIT(2),
-	VIG3  = BIT(3),
-	LM0   = BIT(4),
-	LM1   = BIT(5),
-	LM2   = BIT(6),
-	LM3   = BIT(7),
-	DSPP0 = BIT(8),
-	DSPP1 = BIT(9),
-	DSPP2 = BIT(10),
-	DSPP3 = BIT(11),
-	DMA0  = BIT(12),
-	DMA1  = BIT(13),
-	DMA2  = BIT(14),
-	DMA3  = BIT(15),
-	SSPP_IGC = BIT(16),
-	DSPP_IGC = BIT(17),
-	LTM0 = BIT(18),
-	LTM1 = BIT(19),
-	MDSS  = BIT(31)
-};
-
-/**
- * enum sde_reg_dma_last_cmd_mode - defines enums for kick off mode.
- * @REG_DMA_WAIT4_COMP: last_command api will wait for max of 1 msec allowing
- *			reg dma trigger to complete.
- * @REG_DMA_NOWAIT: last_command api will not wait for reg dma trigger
- *		    completion.
- */
-enum sde_reg_dma_last_cmd_mode {
-	REG_DMA_WAIT4_COMP,
-	REG_DMA_NOWAIT,
-};
-
-/**
- * struct sde_reg_dma_buffer - defines reg dma buffer structure.
- * @drm_gem_object *buf: drm gem handle for the buffer
- * @asapce : pointer to address space
- * @buffer_size: buffer size
- * @index: write pointer index
- * @iova: device address
- * @vaddr: cpu address
- * @next_op_allowed: operation allowed on the buffer
- * @ops_completed: operations completed on buffer
- */
-struct sde_reg_dma_buffer {
-	struct drm_gem_object *buf;
-	struct msm_gem_address_space *aspace;
-	u32 buffer_size;
-	u32 index;
-	u64 iova;
-	void *vaddr;
-	u32 next_op_allowed;
-	u32 ops_completed;
-};
-
-/**
- * struct sde_reg_dma_setup_ops_cfg - defines structure for reg dma ops on the
- *                                    reg dma buffer.
- * @sde_reg_dma_setup_ops ops: ops to be performed
- * @sde_reg_dma_blk blk: block on which op needs to be performed
- * @sde_reg_dma_features feature: feature on which op needs to be done
- * @wrap_size: valid for REG_BLK_WRITE_MULTIPLE, indicates reg index location
- *             size
- * @inc: valid for REG_BLK_WRITE_MULTIPLE indicates whether reg index location
- *       needs an increment or decrement.
- *       0 - decrement
- *       1 - increment
- * @blk_offset: offset for blk, valid for HW_BLK_SELECT op only
- * @sde_reg_dma_buffer *dma_buf: reg dma buffer on which op needs to be
- *                                performed
- * @data: pointer to payload which has to be written into reg dma buffer for
- *        selected op.
- * @data_size: size of payload in data
- */
-struct sde_reg_dma_setup_ops_cfg {
-	enum sde_reg_dma_setup_ops ops;
-	enum sde_reg_dma_blk blk;
-	enum sde_reg_dma_features feature;
-	u32 wrap_size;
-	u32 inc;
-	u32 blk_offset;
-	struct sde_reg_dma_buffer *dma_buf;
-	u32 *data;
-	u32 mask;
-	u32 data_size;
-};
-
-/**
- * struct sde_reg_dma_kickoff_cfg - commit reg dma buffer to hw engine
- * @ctl: ctl for which reg dma buffer needs to be committed.
- * @dma_buf: reg dma buffer with iova address and size info
- * @block_select: histogram read select
- * @trigger_mode: reg dma ops trigger mode
- * @queue_select: queue on which reg dma buffer will be submitted
- * @last_command: last command for this vsync
- */
-struct sde_reg_dma_kickoff_cfg {
-	struct sde_hw_ctl *ctl;
-	enum sde_reg_dma_op op;
-	struct sde_reg_dma_buffer *dma_buf;
-	enum sde_reg_dma_read_sel block_select;
-	enum sde_reg_dma_trigger_mode trigger_mode;
-	enum sde_reg_dma_queue queue_select;
-	u32 last_command;
-};
-
-/**
- * struct sde_hw_reg_dma_ops - ops supported by reg dma frame work, based on
- *                             version of reg dma appropriate ops will be
- *                             installed during driver probe.
- * @check_support: checks if reg dma is supported on this platform for a
- *                 feature
- * @setup_payload: setup reg dma buffer based on ops and payload provided by
- *                 client
- * @kick_off: submit the reg dma buffer to hw enginge
- * @reset: reset the reg dma hw enginge for a ctl
- * @alloc_reg_dma_buf: allocate reg dma buffer
- * @dealloc_reg_dma: de-allocate reg dma buffer
- * @reset_reg_dma_buf: reset the buffer to init state
- * @last_command: notify control that last command is queued
- * @dump_regs: dump reg dma registers
- */
-struct sde_hw_reg_dma_ops {
-	int (*check_support)(enum sde_reg_dma_features feature,
-			     enum sde_reg_dma_blk blk,
-			     bool *is_supported);
-	int (*setup_payload)(struct sde_reg_dma_setup_ops_cfg *cfg);
-	int (*kick_off)(struct sde_reg_dma_kickoff_cfg *cfg);
-	int (*reset)(struct sde_hw_ctl *ctl);
-	struct sde_reg_dma_buffer* (*alloc_reg_dma_buf)(u32 size);
-	int (*dealloc_reg_dma)(struct sde_reg_dma_buffer *lut_buf);
-	int (*reset_reg_dma_buf)(struct sde_reg_dma_buffer *buf);
-	int (*last_command)(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
-			enum sde_reg_dma_last_cmd_mode mode);
-	void (*dump_regs)(void);
-};
-
-/**
- * struct sde_hw_reg_dma - structure to hold reg dma hw info
- * @drm_dev: drm driver dev handle
- * @caps: reg dma hw caps on the platform
- * @ops: reg dma ops supported on the platform
- * @addr: reg dma hw block base address
- */
-struct sde_hw_reg_dma {
-	struct drm_device *drm_dev;
-	const struct sde_reg_dma_cfg *caps;
-	struct sde_hw_reg_dma_ops ops;
-	void __iomem *addr;
-};
-
-/**
- * sde_reg_dma_init() - function called to initialize reg dma during sde
- *                         drm driver probe. If reg dma is supported by sde
- *                         ops for reg dma version will be installed.
- *                         if reg dma is not supported by sde default ops will
- *                         be installed. check_support of default ops will
- *                         return false, hence the clients should fall back to
- *                         AHB programming.
- * @addr: reg dma block base address
- * @m: catalog which contains sde hw capabilities and offsets
- * @dev: drm driver device handle
- */
-int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m,
-		struct drm_device *dev);
-
-/**
- * sde_reg_dma_get_ops() - singleton module, ops is returned to the clients
- *                            who call this api.
- */
-struct sde_hw_reg_dma_ops *sde_reg_dma_get_ops(void);
-
-/**
- * sde_reg_dma_deinit() - de-initialize the reg dma
- */
-void sde_reg_dma_deinit(void);
-#endif /* _SDE_REG_DMA_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
deleted file mode 100644
index 8a29c00..0000000
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ /dev/null
@@ -1,1892 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s] " fmt, __func__
-#include "sde_kms.h"
-#include "sde_hw_lm.h"
-#include "sde_hw_ctl.h"
-#include "sde_hw_cdm.h"
-#include "sde_hw_dspp.h"
-#include "sde_hw_ds.h"
-#include "sde_hw_pingpong.h"
-#include "sde_hw_intf.h"
-#include "sde_hw_wb.h"
-#include "sde_encoder.h"
-#include "sde_connector.h"
-#include "sde_hw_dsc.h"
-
-#define RESERVED_BY_OTHER(h, r) \
-	(((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id)) ||\
-		((h)->rsvp_nxt && ((h)->rsvp_nxt->enc_id != (r)->enc_id)))
-
-#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK))
-#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_CLEAR))
-#define RM_RQ_DSPP(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DSPP))
-#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DS))
-#define RM_RQ_CWB(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_CWB))
-#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
-				(t).num_comp_enc == (r).num_enc && \
-				(t).num_intf == (r).num_intf)
-
-/**
- * toplogy information to be used when ctl path version does not
- * support driving more than one interface per ctl_path
- */
-static const struct sde_rm_topology_def g_top_table[] = {
-	{   SDE_RM_TOPOLOGY_NONE,                 0, 0, 0, 0, false },
-	{   SDE_RM_TOPOLOGY_SINGLEPIPE,           1, 0, 1, 1, false },
-	{   SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,       1, 1, 1, 1, false },
-	{   SDE_RM_TOPOLOGY_DUALPIPE,             2, 0, 2, 2, true  },
-	{   SDE_RM_TOPOLOGY_DUALPIPE_DSC,         2, 2, 2, 2, true  },
-	{   SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,     2, 0, 1, 1, false },
-	{   SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC, 2, 1, 1, 1, false },
-	{   SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,    2, 2, 1, 1, false },
-	{   SDE_RM_TOPOLOGY_PPSPLIT,              1, 0, 2, 1, true  },
-};
-
-/**
- * topology information to be used when the ctl path version
- * is SDE_CTL_CFG_VERSION_1_0_0
- */
-static const struct sde_rm_topology_def g_ctl_ver_1_top_table[] = {
-	{   SDE_RM_TOPOLOGY_NONE,                 0, 0, 0, 0, false },
-	{   SDE_RM_TOPOLOGY_SINGLEPIPE,           1, 0, 1, 1, false },
-	{   SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,       1, 1, 1, 1, false },
-	{   SDE_RM_TOPOLOGY_DUALPIPE,             2, 0, 2, 1, true  },
-	{   SDE_RM_TOPOLOGY_DUALPIPE_DSC,         2, 2, 2, 1, true  },
-	{   SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,     2, 0, 1, 1, false },
-	{   SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC, 2, 1, 1, 1, false },
-	{   SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,    2, 2, 1, 1, false },
-	{   SDE_RM_TOPOLOGY_PPSPLIT,              1, 0, 2, 1, true  },
-};
-
-
-/**
- * struct sde_rm_requirements - Reservation requirements parameter bundle
- * @top_ctrl:  topology control preference from kernel client
- * @top:       selected topology for the display
- * @hw_res:	   Hardware resources required as reported by the encoders
- */
-struct sde_rm_requirements {
-	uint64_t top_ctrl;
-	const struct sde_rm_topology_def *topology;
-	struct sde_encoder_hw_resources hw_res;
-};
-
-/**
- * struct sde_rm_rsvp - Use Case Reservation tagging structure
- *	Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
- *	By using as a tag, rather than lists of pointers to HW blocks used
- *	we can avoid some list management since we don't know how many blocks
- *	of each type a given use case may require.
- * @list:	List head for list of all reservations
- * @seq:	Global RSVP sequence number for debugging, especially for
- *		differentiating differenct allocations for same encoder.
- * @enc_id:	Reservations are tracked by Encoder DRM object ID.
- *		CRTCs may be connected to multiple Encoders.
- *		An encoder or connector id identifies the display path.
- * @topology	DRM<->HW topology use case
- */
-struct sde_rm_rsvp {
-	struct list_head list;
-	uint32_t seq;
-	uint32_t enc_id;
-	enum sde_rm_topology_name topology;
-};
-
-/**
- * struct sde_rm_hw_blk - hardware block tracking list member
- * @list:	List head for list of all hardware blocks tracking items
- * @rsvp:	Pointer to use case reservation if reserved by a client
- * @rsvp_nxt:	Temporary pointer used during reservation to the incoming
- *		request. Will be swapped into rsvp if proposal is accepted
- * @type:	Type of hardware block this structure tracks
- * @id:		Hardware ID number, within it's own space, ie. LM_X
- * @catalog:	Pointer to the hardware catalog entry for this block
- * @hw:		Pointer to the hardware register access object for this block
- */
-struct sde_rm_hw_blk {
-	struct list_head list;
-	struct sde_rm_rsvp *rsvp;
-	struct sde_rm_rsvp *rsvp_nxt;
-	enum sde_hw_blk_type type;
-	uint32_t id;
-	struct sde_hw_blk *hw;
-};
-
-/**
- * sde_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
- */
-enum sde_rm_dbg_rsvp_stage {
-	SDE_RM_STAGE_BEGIN,
-	SDE_RM_STAGE_AFTER_CLEAR,
-	SDE_RM_STAGE_AFTER_RSVPNEXT,
-	SDE_RM_STAGE_FINAL
-};
-
-static void _sde_rm_print_rsvps(
-		struct sde_rm *rm,
-		enum sde_rm_dbg_rsvp_stage stage)
-{
-	struct sde_rm_rsvp *rsvp;
-	struct sde_rm_hw_blk *blk;
-	enum sde_hw_blk_type type;
-
-	SDE_DEBUG("%d\n", stage);
-
-	list_for_each_entry(rsvp, &rm->rsvps, list) {
-		SDE_DEBUG("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
-				rsvp->enc_id, rsvp->topology);
-		SDE_EVT32(stage, rsvp->seq, rsvp->enc_id, rsvp->topology);
-	}
-
-	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
-		list_for_each_entry(blk, &rm->hw_blks[type], list) {
-			if (!blk->rsvp && !blk->rsvp_nxt)
-				continue;
-
-			SDE_DEBUG("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
-				(blk->rsvp) ? blk->rsvp->seq : 0,
-				(blk->rsvp) ? blk->rsvp->enc_id : 0,
-				(blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
-				(blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
-				blk->type, blk->id);
-
-			SDE_EVT32(stage,
-				(blk->rsvp) ? blk->rsvp->seq : 0,
-				(blk->rsvp) ? blk->rsvp->enc_id : 0,
-				(blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
-				(blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
-				blk->type, blk->id);
-		}
-	}
-}
-
-struct sde_hw_mdp *sde_rm_get_mdp(struct sde_rm *rm)
-{
-	return rm->hw_mdp;
-}
-
-void sde_rm_init_hw_iter(
-		struct sde_rm_hw_iter *iter,
-		uint32_t enc_id,
-		enum sde_hw_blk_type type)
-{
-	memset(iter, 0, sizeof(*iter));
-	iter->enc_id = enc_id;
-	iter->type = type;
-}
-
-enum sde_rm_topology_name sde_rm_get_topology_name(
-	struct msm_display_topology topology)
-{
-	int i;
-
-	for (i = 0; i < SDE_RM_TOPOLOGY_MAX; i++)
-		if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
-			return g_top_table[i].top_name;
-
-	return SDE_RM_TOPOLOGY_NONE;
-}
-
-static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
-{
-	struct list_head *blk_list;
-
-	if (!rm || !i || i->type >= SDE_HW_BLK_MAX) {
-		SDE_ERROR("invalid rm\n");
-		return false;
-	}
-
-	i->hw = NULL;
-	blk_list = &rm->hw_blks[i->type];
-
-	if (i->blk && (&i->blk->list == blk_list)) {
-		SDE_DEBUG("attempt resume iteration past last\n");
-		return false;
-	}
-
-	i->blk = list_prepare_entry(i->blk, blk_list, list);
-
-	list_for_each_entry_continue(i->blk, blk_list, list) {
-		struct sde_rm_rsvp *rsvp = i->blk->rsvp;
-
-		if (i->blk->type != i->type) {
-			SDE_ERROR("found incorrect block type %d on %d list\n",
-					i->blk->type, i->type);
-			return false;
-		}
-
-		if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
-			i->hw = i->blk->hw;
-			SDE_DEBUG("found type %d id %d for enc %d\n",
-					i->type, i->blk->id, i->enc_id);
-			return true;
-		}
-	}
-
-	SDE_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
-
-	return false;
-}
-
-static bool _sde_rm_request_hw_blk_locked(struct sde_rm *rm,
-		struct sde_rm_hw_request *hw_blk_info)
-{
-	struct list_head *blk_list;
-	struct sde_rm_hw_blk *blk = NULL;
-
-	if (!rm || !hw_blk_info || hw_blk_info->type >= SDE_HW_BLK_MAX) {
-		SDE_ERROR("invalid rm\n");
-		return false;
-	}
-
-	hw_blk_info->hw = NULL;
-	blk_list = &rm->hw_blks[hw_blk_info->type];
-
-	blk = list_prepare_entry(blk, blk_list, list);
-
-	list_for_each_entry_continue(blk, blk_list, list) {
-		if (blk->type != hw_blk_info->type) {
-			SDE_ERROR("found incorrect block type %d on %d list\n",
-					blk->type, hw_blk_info->type);
-			return false;
-		}
-
-		if (blk->hw->id == hw_blk_info->id) {
-			hw_blk_info->hw = blk->hw;
-			SDE_DEBUG("found type %d id %d\n",
-					blk->type, blk->id);
-			return true;
-		}
-	}
-
-	SDE_DEBUG("no match, type %d id %d\n", hw_blk_info->type,
-			hw_blk_info->id);
-
-	return false;
-}
-
-bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
-{
-	bool ret;
-
-	mutex_lock(&rm->rm_lock);
-	ret = _sde_rm_get_hw_locked(rm, i);
-	mutex_unlock(&rm->rm_lock);
-
-	return ret;
-}
-
-bool sde_rm_request_hw_blk(struct sde_rm *rm, struct sde_rm_hw_request *hw)
-{
-	bool ret;
-
-	mutex_lock(&rm->rm_lock);
-	ret = _sde_rm_request_hw_blk_locked(rm, hw);
-	mutex_unlock(&rm->rm_lock);
-
-	return ret;
-}
-
-static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
-{
-	switch (type) {
-	case SDE_HW_BLK_LM:
-		sde_hw_lm_destroy(hw);
-		break;
-	case SDE_HW_BLK_DSPP:
-		sde_hw_dspp_destroy(hw);
-		break;
-	case SDE_HW_BLK_DS:
-		sde_hw_ds_destroy(hw);
-		break;
-	case SDE_HW_BLK_CTL:
-		sde_hw_ctl_destroy(hw);
-		break;
-	case SDE_HW_BLK_CDM:
-		sde_hw_cdm_destroy(hw);
-		break;
-	case SDE_HW_BLK_PINGPONG:
-		sde_hw_pingpong_destroy(hw);
-		break;
-	case SDE_HW_BLK_INTF:
-		sde_hw_intf_destroy(hw);
-		break;
-	case SDE_HW_BLK_WB:
-		sde_hw_wb_destroy(hw);
-		break;
-	case SDE_HW_BLK_DSC:
-		sde_hw_dsc_destroy(hw);
-		break;
-	case SDE_HW_BLK_SSPP:
-		/* SSPPs are not managed by the resource manager */
-	case SDE_HW_BLK_TOP:
-		/* Top is a singleton, not managed in hw_blks list */
-	case SDE_HW_BLK_MAX:
-	default:
-		SDE_ERROR("unsupported block type %d\n", type);
-		break;
-	}
-}
-
-int sde_rm_destroy(struct sde_rm *rm)
-{
-
-	struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
-	struct sde_rm_hw_blk *hw_cur, *hw_nxt;
-	enum sde_hw_blk_type type;
-
-	if (!rm) {
-		SDE_ERROR("invalid rm\n");
-		return -EINVAL;
-	}
-
-	list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
-		list_del(&rsvp_cur->list);
-		kfree(rsvp_cur);
-	}
-
-
-	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
-		list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
-				list) {
-			list_del(&hw_cur->list);
-			_sde_rm_hw_destroy(hw_cur->type, hw_cur->hw);
-			kfree(hw_cur);
-		}
-	}
-
-	sde_hw_mdp_destroy(rm->hw_mdp);
-	rm->hw_mdp = NULL;
-
-	mutex_destroy(&rm->rm_lock);
-
-	return 0;
-}
-
-static int _sde_rm_hw_blk_create(
-		struct sde_rm *rm,
-		struct sde_mdss_cfg *cat,
-		void __iomem *mmio,
-		enum sde_hw_blk_type type,
-		uint32_t id,
-		void *hw_catalog_info)
-{
-	struct sde_rm_hw_blk *blk;
-	struct sde_hw_mdp *hw_mdp;
-	void *hw;
-
-	hw_mdp = rm->hw_mdp;
-
-	switch (type) {
-	case SDE_HW_BLK_LM:
-		hw = sde_hw_lm_init(id, mmio, cat);
-		break;
-	case SDE_HW_BLK_DSPP:
-		hw = sde_hw_dspp_init(id, mmio, cat);
-		break;
-	case SDE_HW_BLK_DS:
-		hw = sde_hw_ds_init(id, mmio, cat);
-		break;
-	case SDE_HW_BLK_CTL:
-		hw = sde_hw_ctl_init(id, mmio, cat);
-		break;
-	case SDE_HW_BLK_CDM:
-		hw = sde_hw_cdm_init(id, mmio, cat, hw_mdp);
-		break;
-	case SDE_HW_BLK_PINGPONG:
-		hw = sde_hw_pingpong_init(id, mmio, cat);
-		break;
-	case SDE_HW_BLK_INTF:
-		hw = sde_hw_intf_init(id, mmio, cat);
-		break;
-	case SDE_HW_BLK_WB:
-		hw = sde_hw_wb_init(id, mmio, cat, hw_mdp);
-		break;
-	case SDE_HW_BLK_DSC:
-		hw = sde_hw_dsc_init(id, mmio, cat);
-		break;
-	case SDE_HW_BLK_SSPP:
-		/* SSPPs are not managed by the resource manager */
-	case SDE_HW_BLK_TOP:
-		/* Top is a singleton, not managed in hw_blks list */
-	case SDE_HW_BLK_MAX:
-	default:
-		SDE_ERROR("unsupported block type %d\n", type);
-		return -EINVAL;
-	}
-
-	if (IS_ERR_OR_NULL(hw)) {
-		SDE_ERROR("failed hw object creation: type %d, err %ld\n",
-				type, PTR_ERR(hw));
-		return -EFAULT;
-	}
-
-	blk = kzalloc(sizeof(*blk), GFP_KERNEL);
-	if (!blk) {
-		_sde_rm_hw_destroy(type, hw);
-		return -ENOMEM;
-	}
-
-	blk->type = type;
-	blk->id = id;
-	blk->hw = hw;
-	list_add_tail(&blk->list, &rm->hw_blks[type]);
-
-	return 0;
-}
-
-static int _sde_rm_hw_blk_create_new(struct sde_rm *rm,
-			struct sde_mdss_cfg *cat,
-			void __iomem *mmio)
-{
-	int i, rc = 0;
-
-	for (i = 0; i < cat->dspp_count; i++) {
-		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_DSPP,
-				cat->dspp[i].id, &cat->dspp[i]);
-		if (rc) {
-			SDE_ERROR("failed: dspp hw not available\n");
-			goto fail;
-		}
-	}
-
-	if (cat->mdp[0].has_dest_scaler) {
-		for (i = 0; i < cat->ds_count; i++) {
-			rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_DS,
-					cat->ds[i].id, &cat->ds[i]);
-			if (rc) {
-				SDE_ERROR("failed: ds hw not available\n");
-				goto fail;
-			}
-		}
-	}
-
-	for (i = 0; i < cat->pingpong_count; i++) {
-		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_PINGPONG,
-				cat->pingpong[i].id, &cat->pingpong[i]);
-		if (rc) {
-			SDE_ERROR("failed: pp hw not available\n");
-			goto fail;
-		}
-	}
-
-	for (i = 0; i < cat->dsc_count; i++) {
-		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_DSC,
-			cat->dsc[i].id, &cat->dsc[i]);
-		if (rc) {
-			SDE_ERROR("failed: dsc hw not available\n");
-			goto fail;
-		}
-	}
-
-	for (i = 0; i < cat->intf_count; i++) {
-		if (cat->intf[i].type == INTF_NONE) {
-			SDE_DEBUG("skip intf %d with type none\n", i);
-			continue;
-		}
-
-		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_INTF,
-				cat->intf[i].id, &cat->intf[i]);
-		if (rc) {
-			SDE_ERROR("failed: intf hw not available\n");
-			goto fail;
-		}
-	}
-
-	for (i = 0; i < cat->wb_count; i++) {
-		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_WB,
-				cat->wb[i].id, &cat->wb[i]);
-		if (rc) {
-			SDE_ERROR("failed: wb hw not available\n");
-			goto fail;
-		}
-	}
-
-	for (i = 0; i < cat->ctl_count; i++) {
-		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CTL,
-				cat->ctl[i].id, &cat->ctl[i]);
-		if (rc) {
-			SDE_ERROR("failed: ctl hw not available\n");
-			goto fail;
-		}
-	}
-
-	for (i = 0; i < cat->cdm_count; i++) {
-		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CDM,
-				cat->cdm[i].id, &cat->cdm[i]);
-		if (rc) {
-			SDE_ERROR("failed: cdm hw not available\n");
-			goto fail;
-		}
-	}
-
-fail:
-	return rc;
-}
-
-int sde_rm_init(struct sde_rm *rm,
-		struct sde_mdss_cfg *cat,
-		void __iomem *mmio,
-		struct drm_device *dev)
-{
-	int i, rc = 0;
-	enum sde_hw_blk_type type;
-
-	if (!rm || !cat || !mmio || !dev) {
-		SDE_ERROR("invalid input params\n");
-		return -EINVAL;
-	}
-
-	/* Clear, setup lists */
-	memset(rm, 0, sizeof(*rm));
-
-	mutex_init(&rm->rm_lock);
-
-	INIT_LIST_HEAD(&rm->rsvps);
-	for (type = 0; type < SDE_HW_BLK_MAX; type++)
-		INIT_LIST_HEAD(&rm->hw_blks[type]);
-
-	rm->dev = dev;
-
-	if (IS_SDE_CTL_REV_100(cat->ctl_rev))
-		rm->topology_tbl = g_ctl_ver_1_top_table;
-	else
-		rm->topology_tbl = g_top_table;
-
-	/* Some of the sub-blocks require an mdptop to be created */
-	rm->hw_mdp = sde_hw_mdptop_init(MDP_TOP, mmio, cat);
-	if (IS_ERR_OR_NULL(rm->hw_mdp)) {
-		rc = PTR_ERR(rm->hw_mdp);
-		rm->hw_mdp = NULL;
-		SDE_ERROR("failed: mdp hw not available\n");
-		goto fail;
-	}
-
-	/* Interrogate HW catalog and create tracking items for hw blocks */
-	for (i = 0; i < cat->mixer_count; i++) {
-		struct sde_lm_cfg *lm = &cat->mixer[i];
-
-		if (lm->pingpong == PINGPONG_MAX) {
-			SDE_ERROR("mixer %d without pingpong\n", lm->id);
-			goto fail;
-		}
-
-		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_LM,
-				cat->mixer[i].id, &cat->mixer[i]);
-		if (rc) {
-			SDE_ERROR("failed: lm hw not available\n");
-			goto fail;
-		}
-
-		if (!rm->lm_max_width) {
-			rm->lm_max_width = lm->sblk->maxwidth;
-		} else if (rm->lm_max_width != lm->sblk->maxwidth) {
-			/*
-			 * Don't expect to have hw where lm max widths differ.
-			 * If found, take the min.
-			 */
-			SDE_ERROR("unsupported: lm maxwidth differs\n");
-			if (rm->lm_max_width > lm->sblk->maxwidth)
-				rm->lm_max_width = lm->sblk->maxwidth;
-		}
-	}
-
-	rc = _sde_rm_hw_blk_create_new(rm, cat, mmio);
-	if (!rc)
-		return 0;
-
-fail:
-	sde_rm_destroy(rm);
-
-	return rc;
-}
-
-static bool _sde_rm_check_lm(
-		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs,
-		const struct sde_lm_cfg *lm_cfg,
-		struct sde_rm_hw_blk *lm,
-		struct sde_rm_hw_blk **dspp,
-		struct sde_rm_hw_blk **ds,
-		struct sde_rm_hw_blk **pp)
-{
-	bool is_valid_dspp, is_valid_ds, ret;
-
-	is_valid_dspp = (lm_cfg->dspp != DSPP_MAX) ? true : false;
-	is_valid_ds = (lm_cfg->ds != DS_MAX) ? true : false;
-
-	/**
-	 * RM_RQ_X: specification of which LMs to choose
-	 * is_valid_X: indicates whether LM is tied with block X
-	 * ret: true if given LM matches the user requirement,
-	 *      false otherwise
-	 */
-	if (RM_RQ_DSPP(reqs) && RM_RQ_DS(reqs))
-		ret = (is_valid_dspp && is_valid_ds);
-	else if (RM_RQ_DSPP(reqs))
-		ret = is_valid_dspp;
-	else if (RM_RQ_DS(reqs))
-		ret = is_valid_ds;
-	else
-		ret = !(is_valid_dspp || is_valid_ds);
-
-	if (!ret) {
-		SDE_DEBUG(
-			"fail:lm(%d)req_dspp(%d)dspp(%d)req_ds(%d)ds(%d)\n",
-			lm_cfg->id, (bool)(RM_RQ_DSPP(reqs)),
-			lm_cfg->dspp, (bool)(RM_RQ_DS(reqs)),
-			lm_cfg->ds);
-
-		return ret;
-	}
-	return true;
-}
-
-static bool _sde_rm_reserve_dspp(
-		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp,
-		const struct sde_lm_cfg *lm_cfg,
-		struct sde_rm_hw_blk *lm,
-		struct sde_rm_hw_blk **dspp)
-{
-	struct sde_rm_hw_iter iter;
-
-	if (lm_cfg->dspp != DSPP_MAX) {
-		sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSPP);
-		while (_sde_rm_get_hw_locked(rm, &iter)) {
-			if (iter.blk->id == lm_cfg->dspp) {
-				*dspp = iter.blk;
-				break;
-			}
-		}
-
-		if (!*dspp) {
-			SDE_DEBUG("lm %d failed to retrieve dspp %d\n", lm->id,
-					lm_cfg->dspp);
-			return false;
-		}
-
-		if (RESERVED_BY_OTHER(*dspp, rsvp)) {
-			SDE_DEBUG("lm %d dspp %d already reserved\n",
-					lm->id, (*dspp)->id);
-			return false;
-		}
-	}
-
-	return true;
-}
-
-
-static bool _sde_rm_reserve_ds(
-		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp,
-		const struct sde_lm_cfg *lm_cfg,
-		struct sde_rm_hw_blk *lm,
-		struct sde_rm_hw_blk **ds)
-{
-	struct sde_rm_hw_iter iter;
-
-	if (lm_cfg->ds != DS_MAX) {
-		sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DS);
-		while (_sde_rm_get_hw_locked(rm, &iter)) {
-			if (iter.blk->id == lm_cfg->ds) {
-				*ds = iter.blk;
-				break;
-			}
-		}
-
-		if (!*ds) {
-			SDE_DEBUG("lm %d failed to retrieve ds %d\n", lm->id,
-					lm_cfg->ds);
-			return false;
-		}
-
-		if (RESERVED_BY_OTHER(*ds, rsvp)) {
-			SDE_DEBUG("lm %d ds %d already reserved\n",
-					lm->id, (*ds)->id);
-			return false;
-		}
-	}
-
-	return true;
-}
-
-static bool _sde_rm_reserve_pp(
-		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs,
-		const struct sde_lm_cfg *lm_cfg,
-		const struct sde_pingpong_cfg *pp_cfg,
-		struct sde_rm_hw_blk *lm,
-		struct sde_rm_hw_blk **dspp,
-		struct sde_rm_hw_blk **ds,
-		struct sde_rm_hw_blk **pp)
-{
-	struct sde_rm_hw_iter iter;
-
-	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
-	while (_sde_rm_get_hw_locked(rm, &iter)) {
-		if (iter.blk->id == lm_cfg->pingpong) {
-			*pp = iter.blk;
-			break;
-		}
-	}
-
-	if (!*pp) {
-		SDE_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
-		return false;
-	}
-
-	if (RESERVED_BY_OTHER(*pp, rsvp)) {
-		SDE_DEBUG("lm %d pp %d already reserved\n", lm->id,
-				(*pp)->id);
-		*dspp = NULL;
-		*ds = NULL;
-		return false;
-	}
-
-	pp_cfg = to_sde_hw_pingpong((*pp)->hw)->caps;
-	if ((reqs->topology->top_name == SDE_RM_TOPOLOGY_PPSPLIT) &&
-			!(test_bit(SDE_PINGPONG_SPLIT, &pp_cfg->features))) {
-		SDE_DEBUG("pp %d doesn't support ppsplit\n", pp_cfg->id);
-		*dspp = NULL;
-		*ds = NULL;
-		return false;
-	}
-	return true;
-}
-
-/**
- * _sde_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
- *	proposed use case requirements, incl. hardwired dependent blocks like
- *	pingpong, and dspp.
- * @rm: sde resource manager handle
- * @rsvp: reservation currently being created
- * @reqs: proposed use case requirements
- * @lm: proposed layer mixer, function checks if lm, and all other hardwired
- *      blocks connected to the lm (pp, dspp) are available and appropriate
- * @dspp: output parameter, dspp block attached to the layer mixer.
- *        NULL if dspp was not available, or not matching requirements.
- * @pp: output parameter, pingpong block attached to the layer mixer.
- *      NULL if dspp was not available, or not matching requirements.
- * @primary_lm: if non-null, this function check if lm is compatible primary_lm
- *              as well as satisfying all other requirements
- * @Return: true if lm matches all requirements, false otherwise
- */
-static bool _sde_rm_check_lm_and_get_connected_blks(
-		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs,
-		struct sde_rm_hw_blk *lm,
-		struct sde_rm_hw_blk **dspp,
-		struct sde_rm_hw_blk **ds,
-		struct sde_rm_hw_blk **pp,
-		struct sde_rm_hw_blk *primary_lm)
-{
-	const struct sde_lm_cfg *lm_cfg = to_sde_hw_mixer(lm->hw)->cap;
-	const struct sde_pingpong_cfg *pp_cfg;
-	bool ret;
-	u32 display_pref, cwb_pref;
-
-	*dspp = NULL;
-	*ds = NULL;
-	*pp = NULL;
-	display_pref = lm_cfg->features & BIT(SDE_DISP_PRIMARY_PREF);
-	cwb_pref = lm_cfg->features & BIT(SDE_DISP_CWB_PREF);
-
-	SDE_DEBUG("check lm %d: dspp %d ds %d pp %d disp_pref: %d cwb_pref%d\n",
-		lm_cfg->id, lm_cfg->dspp, lm_cfg->ds,
-		lm_cfg->pingpong, display_pref, cwb_pref);
-
-	/* Check if this layer mixer is a peer of the proposed primary LM */
-	if (primary_lm) {
-		const struct sde_lm_cfg *prim_lm_cfg =
-				to_sde_hw_mixer(primary_lm->hw)->cap;
-
-		if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
-			SDE_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
-					prim_lm_cfg->id);
-			return false;
-		}
-	}
-
-	/* bypass rest of the checks if LM for primary display is found */
-	if (!display_pref) {
-		/* Check lm for valid requirements */
-		ret = _sde_rm_check_lm(rm, rsvp, reqs, lm_cfg, lm,
-				dspp, ds, pp);
-		if (!ret)
-			return ret;
-
-		/**
-		 * If CWB is enabled and LM is not CWB supported
-		 * then return false.
-		 */
-		if (RM_RQ_CWB(reqs) && !cwb_pref) {
-			SDE_DEBUG("fail: cwb supported lm not allocated\n");
-			return false;
-		}
-	} else if (!(reqs->hw_res.is_primary && display_pref)) {
-		SDE_DEBUG(
-			"display preference is not met. is_primary: %d display_pref: %d\n",
-			(int)reqs->hw_res.is_primary, (int)display_pref);
-		return false;
-	}
-
-	/* Already reserved? */
-	if (RESERVED_BY_OTHER(lm, rsvp)) {
-		SDE_DEBUG("lm %d already reserved\n", lm_cfg->id);
-		return false;
-	}
-
-	/* Reserve dspp */
-	ret = _sde_rm_reserve_dspp(rm, rsvp, lm_cfg, lm, dspp);
-	if (!ret)
-		return ret;
-
-	/* Reserve ds */
-	ret = _sde_rm_reserve_ds(rm, rsvp, lm_cfg, lm, ds);
-	if (!ret)
-		return ret;
-
-	/* Reserve pp */
-	ret = _sde_rm_reserve_pp(rm, rsvp, reqs, lm_cfg, pp_cfg, lm,
-			dspp, ds, pp);
-	if (!ret)
-		return ret;
-
-	return true;
-}
-
-static int _sde_rm_reserve_lms(
-		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs,
-		u8 *_lm_ids)
-
-{
-	struct sde_rm_hw_blk *lm[MAX_BLOCKS];
-	struct sde_rm_hw_blk *dspp[MAX_BLOCKS];
-	struct sde_rm_hw_blk *ds[MAX_BLOCKS];
-	struct sde_rm_hw_blk *pp[MAX_BLOCKS];
-	struct sde_rm_hw_iter iter_i, iter_j;
-	int lm_count = 0;
-	int i, rc = 0;
-
-	if (!reqs->topology->num_lm) {
-		SDE_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
-		return -EINVAL;
-	}
-
-	/* Find a primary mixer */
-	sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
-	while (lm_count != reqs->topology->num_lm &&
-			_sde_rm_get_hw_locked(rm, &iter_i)) {
-		memset(&lm, 0, sizeof(lm));
-		memset(&dspp, 0, sizeof(dspp));
-		memset(&ds, 0, sizeof(ds));
-		memset(&pp, 0, sizeof(pp));
-
-		lm_count = 0;
-		lm[lm_count] = iter_i.blk;
-
-		SDE_DEBUG("blk id = %d, _lm_ids[%d] = %d\n",
-			iter_i.blk->id,
-			lm_count,
-			_lm_ids ? _lm_ids[lm_count] : -1);
-
-		if (_lm_ids && (lm[lm_count])->id != _lm_ids[lm_count])
-			continue;
-
-		if (!_sde_rm_check_lm_and_get_connected_blks(
-				rm, rsvp, reqs, lm[lm_count],
-				&dspp[lm_count], &ds[lm_count],
-				&pp[lm_count], NULL))
-			continue;
-
-		++lm_count;
-
-		/* Valid primary mixer found, find matching peers */
-		sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
-
-		while (lm_count != reqs->topology->num_lm &&
-				_sde_rm_get_hw_locked(rm, &iter_j)) {
-			if (iter_i.blk == iter_j.blk)
-				continue;
-
-			if (!_sde_rm_check_lm_and_get_connected_blks(
-					rm, rsvp, reqs, iter_j.blk,
-					&dspp[lm_count], &ds[lm_count],
-					&pp[lm_count], iter_i.blk))
-				continue;
-
-			lm[lm_count] = iter_j.blk;
-			SDE_DEBUG("blk id = %d, _lm_ids[%d] = %d\n",
-				iter_i.blk->id,
-				lm_count,
-				_lm_ids ? _lm_ids[lm_count] : -1);
-
-			if (_lm_ids && (lm[lm_count])->id != _lm_ids[lm_count])
-				continue;
-
-			++lm_count;
-		}
-	}
-
-	if (lm_count != reqs->topology->num_lm) {
-		SDE_DEBUG("unable to find appropriate mixers\n");
-		return -ENAVAIL;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(lm); i++) {
-		if (!lm[i])
-			break;
-
-		lm[i]->rsvp_nxt = rsvp;
-		pp[i]->rsvp_nxt = rsvp;
-		if (dspp[i])
-			dspp[i]->rsvp_nxt = rsvp;
-
-		if (ds[i])
-			ds[i]->rsvp_nxt = rsvp;
-
-		SDE_EVT32(lm[i]->type, rsvp->enc_id, lm[i]->id, pp[i]->id,
-				dspp[i] ? dspp[i]->id : 0,
-				ds[i] ? ds[i]->id : 0);
-	}
-
-	if (reqs->topology->top_name == SDE_RM_TOPOLOGY_PPSPLIT) {
-		/* reserve a free PINGPONG_SLAVE block */
-		rc = -ENAVAIL;
-		sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
-		while (_sde_rm_get_hw_locked(rm, &iter_i)) {
-			const struct sde_hw_pingpong *pp =
-					to_sde_hw_pingpong(iter_i.blk->hw);
-			const struct sde_pingpong_cfg *pp_cfg = pp->caps;
-
-			if (!(test_bit(SDE_PINGPONG_SLAVE, &pp_cfg->features)))
-				continue;
-			if (RESERVED_BY_OTHER(iter_i.blk, rsvp))
-				continue;
-
-			iter_i.blk->rsvp_nxt = rsvp;
-			rc = 0;
-			break;
-		}
-	}
-
-	return rc;
-}
-
-static int _sde_rm_reserve_ctls(
-		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs,
-		const struct sde_rm_topology_def *top,
-		u8 *_ctl_ids)
-{
-	struct sde_rm_hw_blk *ctls[MAX_BLOCKS];
-	struct sde_rm_hw_iter iter;
-	int i = 0;
-
-	memset(&ctls, 0, sizeof(ctls));
-
-	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
-	while (_sde_rm_get_hw_locked(rm, &iter)) {
-		const struct sde_hw_ctl *ctl = to_sde_hw_ctl(iter.blk->hw);
-		unsigned long features = ctl->caps->features;
-		bool has_split_display, has_ppsplit, primary_pref;
-
-		if (RESERVED_BY_OTHER(iter.blk, rsvp))
-			continue;
-
-		has_split_display = BIT(SDE_CTL_SPLIT_DISPLAY) & features;
-		has_ppsplit = BIT(SDE_CTL_PINGPONG_SPLIT) & features;
-		primary_pref = BIT(SDE_CTL_PRIMARY_PREF) & features;
-
-		SDE_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
-
-		/*
-		 * bypass rest feature checks on finding CTL preferred
-		 * for primary displays.
-		 */
-		if (!primary_pref && !_ctl_ids) {
-			if (top->needs_split_display != has_split_display)
-				continue;
-
-			if (top->top_name == SDE_RM_TOPOLOGY_PPSPLIT &&
-					!has_ppsplit)
-				continue;
-		} else if (!(reqs->hw_res.is_primary && primary_pref) &&
-				!_ctl_ids) {
-			SDE_DEBUG(
-				"display pref not met. is_primary: %d primary_pref: %d\n",
-				reqs->hw_res.is_primary, primary_pref);
-			continue;
-		}
-
-		ctls[i] = iter.blk;
-
-		SDE_DEBUG("blk id = %d, _ctl_ids[%d] = %d\n",
-			iter.blk->id, i,
-			_ctl_ids ? _ctl_ids[i] : -1);
-
-		if (_ctl_ids && (ctls[i]->id != _ctl_ids[i]))
-			continue;
-
-		SDE_DEBUG("ctl %d match\n", iter.blk->id);
-
-		if (++i == top->num_ctl)
-			break;
-	}
-
-	if (i != top->num_ctl)
-		return -ENAVAIL;
-
-	for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
-		ctls[i]->rsvp_nxt = rsvp;
-		SDE_EVT32(ctls[i]->type, rsvp->enc_id, ctls[i]->id);
-	}
-
-	return 0;
-}
-
-static int _sde_rm_reserve_dsc(
-		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp,
-		const struct sde_rm_topology_def *top,
-		u8 *_dsc_ids)
-{
-	struct sde_rm_hw_iter iter;
-	int alloc_count = 0;
-	int num_dsc_enc = top->num_lm;
-
-	if (!top->num_comp_enc)
-		return 0;
-
-	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSC);
-
-	while (_sde_rm_get_hw_locked(rm, &iter)) {
-		if (RESERVED_BY_OTHER(iter.blk, rsvp))
-			continue;
-
-		SDE_DEBUG("blk id = %d, _dsc_ids[%d] = %d\n",
-			iter.blk->id,
-			alloc_count,
-			_dsc_ids ? _dsc_ids[alloc_count] : -1);
-
-		if (_dsc_ids && (iter.blk->id != _dsc_ids[alloc_count]))
-			continue;
-
-		iter.blk->rsvp_nxt = rsvp;
-		SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
-
-		if (++alloc_count == num_dsc_enc)
-			return 0;
-	}
-
-	SDE_ERROR("couldn't reserve %d dsc blocks for enc id %d\n",
-		num_dsc_enc, rsvp->enc_id);
-
-	return -ENAVAIL;
-}
-
-static int _sde_rm_reserve_cdm(
-		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp,
-		uint32_t id,
-		enum sde_hw_blk_type type)
-{
-	struct sde_rm_hw_iter iter;
-
-	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CDM);
-	while (_sde_rm_get_hw_locked(rm, &iter)) {
-		const struct sde_hw_cdm *cdm = to_sde_hw_cdm(iter.blk->hw);
-		const struct sde_cdm_cfg *caps = cdm->caps;
-		bool match = false;
-
-		if (RESERVED_BY_OTHER(iter.blk, rsvp))
-			continue;
-
-		if (type == SDE_HW_BLK_INTF && id != INTF_MAX)
-			match = test_bit(id, &caps->intf_connect);
-		else if (type == SDE_HW_BLK_WB && id != WB_MAX)
-			match = test_bit(id, &caps->wb_connect);
-
-		SDE_DEBUG("type %d id %d, cdm intfs %lu wbs %lu match %d\n",
-				type, id, caps->intf_connect, caps->wb_connect,
-				match);
-
-		if (!match)
-			continue;
-
-		iter.blk->rsvp_nxt = rsvp;
-		SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
-		break;
-	}
-
-	if (!iter.hw) {
-		SDE_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
-		return -ENAVAIL;
-	}
-
-	return 0;
-}
-
-static int _sde_rm_reserve_intf_or_wb(
-		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp,
-		uint32_t id,
-		enum sde_hw_blk_type type,
-		bool needs_cdm)
-{
-	struct sde_rm_hw_iter iter;
-	int ret = 0;
-
-	/* Find the block entry in the rm, and note the reservation */
-	sde_rm_init_hw_iter(&iter, 0, type);
-	while (_sde_rm_get_hw_locked(rm, &iter)) {
-		if (iter.blk->id != id)
-			continue;
-
-		if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
-			SDE_ERROR("type %d id %d already reserved\n", type, id);
-			return -ENAVAIL;
-		}
-
-		iter.blk->rsvp_nxt = rsvp;
-		SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
-		break;
-	}
-
-	/* Shouldn't happen since wbs / intfs are fixed at probe */
-	if (!iter.hw) {
-		SDE_ERROR("couldn't find type %d id %d\n", type, id);
-		return -EINVAL;
-	}
-
-	/* Expected only one intf or wb will request cdm */
-	if (needs_cdm)
-		ret = _sde_rm_reserve_cdm(rm, rsvp, id, type);
-
-	return ret;
-}
-
-static int _sde_rm_reserve_intf_related_hw(
-		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp,
-		struct sde_encoder_hw_resources *hw_res)
-{
-	int i, ret = 0;
-	u32 id;
-
-	for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
-		if (hw_res->intfs[i] == INTF_MODE_NONE)
-			continue;
-		id = i + INTF_0;
-		ret = _sde_rm_reserve_intf_or_wb(rm, rsvp, id,
-				SDE_HW_BLK_INTF, hw_res->needs_cdm);
-		if (ret)
-			return ret;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(hw_res->wbs); i++) {
-		if (hw_res->wbs[i] == INTF_MODE_NONE)
-			continue;
-		id = i + WB_0;
-		ret = _sde_rm_reserve_intf_or_wb(rm, rsvp, id,
-				SDE_HW_BLK_WB, hw_res->needs_cdm);
-		if (ret)
-			return ret;
-	}
-
-	return ret;
-}
-
-static bool _sde_rm_is_display_in_cont_splash(struct sde_kms *sde_kms,
-		struct drm_encoder *enc)
-{
-	int i;
-	struct sde_splash_display *splash_dpy;
-
-	for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
-		splash_dpy = &sde_kms->splash_data.splash_display[i];
-		if (splash_dpy->encoder ==  enc)
-			return splash_dpy->cont_splash_enabled;
-	}
-
-	return false;
-}
-
-static int _sde_rm_make_lm_rsvp(struct sde_rm *rm, struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs,
-		struct sde_splash_display *splash_display)
-{
-	int ret, i;
-	u8 *hw_ids = NULL;
-
-	/* Check if splash data provided lm_ids */
-	if (splash_display) {
-		hw_ids = splash_display->lm_ids;
-		for (i = 0; i < splash_display->lm_cnt; i++)
-			SDE_DEBUG("splash_display->lm_ids[%d] = %d\n",
-				i, splash_display->lm_ids[i]);
-
-		if (splash_display->lm_cnt != reqs->topology->num_lm)
-			SDE_DEBUG("Configured splash LMs != needed LM cnt\n");
-	}
-	/*
-	 * Assign LMs and blocks whose usage is tied to them: DSPP & Pingpong.
-	 * Do assignment preferring to give away low-resource mixers first:
-	 * - Check mixers without DSPPs
-	 * - Only then allow to grab from mixers with DSPP capability
-	 */
-	ret = _sde_rm_reserve_lms(rm, rsvp, reqs, hw_ids);
-	if (ret && !RM_RQ_DSPP(reqs)) {
-		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
-		ret = _sde_rm_reserve_lms(rm, rsvp, reqs, hw_ids);
-	}
-
-	return ret;
-}
-
-static int _sde_rm_make_ctl_rsvp(struct sde_rm *rm, struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs,
-		struct sde_splash_display *splash_display)
-{
-	int ret, i;
-	u8 *hw_ids = NULL;
-	struct sde_rm_topology_def topology;
-
-	/* Check if splash data provided ctl_ids */
-	if (splash_display) {
-		hw_ids = splash_display->ctl_ids;
-		for (i = 0; i < splash_display->ctl_cnt; i++)
-			SDE_DEBUG("splash_display->ctl_ids[%d] = %d\n",
-				i, splash_display->ctl_ids[i]);
-	}
-
-	/*
-	 * Do assignment preferring to give away low-resource CTLs first:
-	 * - Check mixers without Split Display
-	 * - Only then allow to grab from CTLs with split display capability
-	 */
-	ret = _sde_rm_reserve_ctls(rm, rsvp, reqs, reqs->topology, hw_ids);
-	if (ret && !reqs->topology->needs_split_display &&
-			reqs->topology->num_ctl > SINGLE_CTL) {
-		memcpy(&topology, reqs->topology, sizeof(topology));
-		topology.needs_split_display = true;
-		ret = _sde_rm_reserve_ctls(rm, rsvp, reqs, &topology, hw_ids);
-	}
-
-	return ret;
-}
-
-static int _sde_rm_make_dsc_rsvp(struct sde_rm *rm, struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs,
-		struct sde_splash_display *splash_display)
-{
-	int ret, i;
-	u8 *hw_ids = NULL;
-
-	/* Check if splash data provided dsc_ids */
-	if (splash_display) {
-		hw_ids = splash_display->dsc_ids;
-		for (i = 0; i < splash_display->dsc_cnt; i++)
-			SDE_DEBUG("splash_data.dsc_ids[%d] = %d\n",
-				i, splash_display->dsc_ids[i]);
-	}
-
-	ret = _sde_rm_reserve_dsc(rm, rsvp, reqs->topology, hw_ids);
-
-	return ret;
-}
-
-static int _sde_rm_make_next_rsvp(struct sde_rm *rm, struct drm_encoder *enc,
-		struct drm_crtc_state *crtc_state,
-		struct drm_connector_state *conn_state,
-		struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs)
-{
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	struct sde_splash_display *splash_display = NULL;
-	struct sde_splash_data *splash_data;
-	int i, ret;
-
-	priv = enc->dev->dev_private;
-	sde_kms = to_sde_kms(priv->kms);
-	splash_data = &sde_kms->splash_data;
-
-	if (_sde_rm_is_display_in_cont_splash(sde_kms, enc)) {
-		for (i = 0; i < ARRAY_SIZE(splash_data->splash_display); i++) {
-			if (enc == splash_data->splash_display[i].encoder)
-				splash_display =
-					&splash_data->splash_display[i];
-		}
-		if (!splash_display) {
-			SDE_ERROR("rm is in cont_splash but data not found\n");
-			return -EINVAL;
-		}
-	}
-
-	/* Create reservation info, tag reserved blocks with it as we go */
-	rsvp->seq = ++rm->rsvp_next_seq;
-	rsvp->enc_id = enc->base.id;
-	rsvp->topology = reqs->topology->top_name;
-	list_add_tail(&rsvp->list, &rm->rsvps);
-
-	ret = _sde_rm_make_lm_rsvp(rm, rsvp, reqs, splash_display);
-	if (ret) {
-		SDE_ERROR("unable to find appropriate mixers\n");
-		return ret;
-	}
-
-	ret = _sde_rm_make_ctl_rsvp(rm, rsvp, reqs, splash_display);
-	if (ret) {
-		SDE_ERROR("unable to find appropriate CTL\n");
-		return ret;
-	}
-
-	/* Assign INTFs, WBs, and blks whose usage is tied to them: CTL & CDM */
-	ret = _sde_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
-	if (ret)
-		return ret;
-
-	ret = _sde_rm_make_dsc_rsvp(rm, rsvp, reqs, splash_display);
-	if (ret)
-		return ret;
-
-	return ret;
-}
-
-/**
- * _sde_rm_get_hw_blk_for_cont_splash - retrieve the LM blocks on given CTL
- * and populate the connected HW blk ids in sde_splash_display
- * @rm:	Pointer to resource manager structure
- * @ctl: Pointer to CTL hardware block
- * @splash_display: Pointer to struct sde_splash_display
- * return: number of active LM blocks for this CTL block
- */
-static int _sde_rm_get_hw_blk_for_cont_splash(struct sde_rm *rm,
-		struct sde_hw_ctl *ctl,
-		struct sde_splash_display *splash_display)
-{
-	u32 lm_reg;
-	struct sde_rm_hw_iter iter_lm, iter_pp;
-	struct sde_hw_pingpong *pp;
-
-	if (!rm || !ctl || !splash_display) {
-		SDE_ERROR("invalid input parameters\n");
-		return 0;
-	}
-
-	sde_rm_init_hw_iter(&iter_lm, 0, SDE_HW_BLK_LM);
-	sde_rm_init_hw_iter(&iter_pp, 0, SDE_HW_BLK_PINGPONG);
-	while (_sde_rm_get_hw_locked(rm, &iter_lm)) {
-		_sde_rm_get_hw_locked(rm, &iter_pp);
-
-		if (splash_display->lm_cnt >= MAX_DATA_PATH_PER_DSIPLAY)
-			break;
-
-		lm_reg = ctl->ops.read_ctl_layers(ctl, iter_lm.blk->id);
-		if (!lm_reg)
-			continue;
-
-		splash_display->lm_ids[splash_display->lm_cnt++] =
-			iter_lm.blk->id;
-		SDE_DEBUG("lm_cnt=%d lm_reg[%d]=0x%x\n", splash_display->lm_cnt,
-				iter_lm.blk->id - LM_0, lm_reg);
-
-		if (ctl->ops.get_staged_sspp &&
-				ctl->ops.get_staged_sspp(ctl, iter_lm.blk->id,
-					&splash_display->pipes[
-					splash_display->pipe_cnt], 1)) {
-			splash_display->pipe_cnt++;
-		} else {
-			SDE_ERROR("no pipe detected on LM-%d\n",
-					iter_lm.blk->id - LM_0);
-			return 0;
-		}
-
-		pp = to_sde_hw_pingpong(iter_pp.blk->hw);
-		if (pp && pp->ops.get_dsc_status &&
-				pp->ops.get_dsc_status(pp)) {
-			splash_display->dsc_ids[splash_display->dsc_cnt++] =
-				iter_pp.blk->id;
-			SDE_DEBUG("lm/pp[%d] path, using dsc[%d]\n",
-					iter_lm.blk->id - LM_0,
-					iter_pp.blk->id - DSC_0);
-		}
-	}
-
-	return splash_display->lm_cnt;
-}
-
-int sde_rm_cont_splash_res_init(struct msm_drm_private *priv,
-				struct sde_rm *rm,
-				struct sde_splash_data *splash_data,
-				struct sde_mdss_cfg *cat)
-{
-	struct sde_rm_hw_iter iter_c;
-	int index = 0, ctl_top_cnt;
-	struct sde_kms *sde_kms = NULL;
-	struct sde_hw_mdp *hw_mdp;
-	struct sde_splash_display *splash_display;
-	u8 intf_sel;
-
-	if (!priv || !rm || !cat || !splash_data) {
-		SDE_ERROR("invalid input parameters\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("mixer_count=%d, ctl_count=%d, dsc_count=%d\n",
-			cat->mixer_count,
-			cat->ctl_count,
-			cat->dsc_count);
-
-	ctl_top_cnt = cat->ctl_count;
-
-	if (!priv->kms) {
-		SDE_ERROR("invalid kms\n");
-		return -EINVAL;
-	}
-	sde_kms = to_sde_kms(priv->kms);
-
-	hw_mdp = sde_rm_get_mdp(rm);
-
-	sde_rm_init_hw_iter(&iter_c, 0, SDE_HW_BLK_CTL);
-	while (_sde_rm_get_hw_locked(rm, &iter_c)) {
-		struct sde_hw_ctl *ctl = to_sde_hw_ctl(iter_c.blk->hw);
-
-		if (!ctl->ops.get_ctl_intf) {
-			SDE_ERROR("get_ctl_intf not initialized\n");
-			return -EINVAL;
-		}
-
-		intf_sel = ctl->ops.get_ctl_intf(ctl);
-		if (intf_sel) {
-			splash_display =  &splash_data->splash_display[index];
-			SDE_DEBUG("finding resources for display=%d ctl=%d\n",
-					index, iter_c.blk->id - CTL_0);
-
-			_sde_rm_get_hw_blk_for_cont_splash(rm,
-					ctl, splash_display);
-			splash_display->cont_splash_enabled = true;
-			splash_display->ctl_ids[splash_display->ctl_cnt++] =
-				iter_c.blk->id;
-
-			if (hw_mdp && hw_mdp->ops.get_split_flush_status) {
-				splash_display->single_flush_en =
-					hw_mdp->ops.get_split_flush_status(
-							hw_mdp);
-			}
-
-			if (!splash_display->single_flush_en ||
-					(iter_c.blk->id != CTL_0))
-				index++;
-
-			if (index >= ARRAY_SIZE(splash_data->splash_display))
-				break;
-		}
-	}
-
-	if (index != splash_data->num_splash_displays) {
-		SDE_DEBUG("mismatch active displays vs actually enabled :%d/%d",
-				splash_data->num_splash_displays, index);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int _sde_rm_populate_requirements(
-		struct sde_rm *rm,
-		struct drm_encoder *enc,
-		struct drm_crtc_state *crtc_state,
-		struct drm_connector_state *conn_state,
-		struct sde_rm_requirements *reqs)
-{
-	const struct drm_display_mode *mode = &crtc_state->mode;
-	int i;
-
-	memset(reqs, 0, sizeof(*reqs));
-
-	reqs->top_ctrl = sde_connector_get_property(conn_state,
-			CONNECTOR_PROP_TOPOLOGY_CONTROL);
-	sde_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
-
-	for (i = 0; i < SDE_RM_TOPOLOGY_MAX; i++) {
-		if (RM_IS_TOPOLOGY_MATCH(rm->topology_tbl[i],
-					reqs->hw_res.topology)) {
-			reqs->topology = &rm->topology_tbl[i];
-			break;
-		}
-	}
-
-	if (!reqs->topology) {
-		SDE_ERROR("invalid topology for the display\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * select dspp HW block for all dsi displays and ds for only
-	 * primary dsi display.
-	 */
-	if (conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI) {
-		if (!RM_RQ_DSPP(reqs))
-			reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
-
-		if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
-		    sde_encoder_is_primary_display(enc))
-			reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DS);
-	}
-
-	/**
-	 * Set the requirement for LM which has CWB support if CWB is
-	 * found enabled.
-	 */
-	if (!RM_RQ_CWB(reqs) && sde_encoder_in_clone_mode(enc))
-		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_CWB);
-
-	SDE_DEBUG("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
-			reqs->hw_res.display_num_of_h_tiles);
-	SDE_DEBUG("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
-			reqs->topology->num_lm, reqs->topology->num_ctl,
-			reqs->topology->top_name,
-			reqs->topology->needs_split_display);
-	SDE_EVT32(mode->hdisplay, rm->lm_max_width, reqs->topology->num_lm,
-			reqs->top_ctrl, reqs->topology->top_name,
-			reqs->topology->num_ctl);
-
-	return 0;
-}
-
-static struct sde_rm_rsvp *_sde_rm_get_rsvp(
-		struct sde_rm *rm,
-		struct drm_encoder *enc)
-{
-	struct sde_rm_rsvp *i;
-
-	if (!rm || !enc) {
-		SDE_ERROR("invalid params\n");
-		return NULL;
-	}
-
-	if (list_empty(&rm->rsvps))
-		return NULL;
-
-	list_for_each_entry(i, &rm->rsvps, list)
-		if (i->enc_id == enc->base.id)
-			return i;
-
-	return NULL;
-}
-
-static struct sde_rm_rsvp *_sde_rm_get_rsvp_nxt(
-		struct sde_rm *rm,
-		struct drm_encoder *enc)
-{
-	struct sde_rm_rsvp *i;
-
-	if (list_empty(&rm->rsvps))
-		return NULL;
-
-	list_for_each_entry(i, &rm->rsvps, list)
-		if (i->enc_id == enc->base.id)
-			break;
-
-	list_for_each_entry_continue(i, &rm->rsvps, list)
-		if (i->enc_id == enc->base.id)
-			return i;
-
-	return NULL;
-}
-
-static struct drm_connector *_sde_rm_get_connector(
-		struct drm_encoder *enc)
-{
-	struct drm_connector *conn = NULL;
-	struct list_head *connector_list =
-			&enc->dev->mode_config.connector_list;
-
-	list_for_each_entry(conn, connector_list, head)
-		if (conn->encoder == enc)
-			return conn;
-
-	return NULL;
-}
-
-int sde_rm_update_topology(struct drm_connector_state *conn_state,
-	struct msm_display_topology *topology)
-{
-	int i, ret = 0;
-	struct msm_display_topology top;
-	enum sde_rm_topology_name top_name = SDE_RM_TOPOLOGY_NONE;
-
-	if (!conn_state)
-		return -EINVAL;
-
-	if (topology) {
-		top = *topology;
-		for (i = 0; i < SDE_RM_TOPOLOGY_MAX; i++)
-			if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], top)) {
-				top_name = g_top_table[i].top_name;
-				break;
-			}
-	}
-
-	ret = msm_property_set_property(
-			sde_connector_get_propinfo(conn_state->connector),
-			sde_connector_get_property_state(conn_state),
-			CONNECTOR_PROP_TOPOLOGY_NAME, top_name);
-
-	return ret;
-}
-
-/**
- * _sde_rm_release_rsvp - release resources and release a reservation
- * @rm:	KMS handle
- * @rsvp:	RSVP pointer to release and release resources for
- */
-static void _sde_rm_release_rsvp(
-		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp,
-		struct drm_connector *conn)
-{
-	struct sde_rm_rsvp *rsvp_c, *rsvp_n;
-	struct sde_rm_hw_blk *blk;
-	enum sde_hw_blk_type type;
-
-	if (!rsvp)
-		return;
-
-	SDE_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
-
-	list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
-		if (rsvp == rsvp_c) {
-			list_del(&rsvp_c->list);
-			break;
-		}
-	}
-
-	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
-		list_for_each_entry(blk, &rm->hw_blks[type], list) {
-			if (blk->rsvp == rsvp) {
-				blk->rsvp = NULL;
-				SDE_DEBUG("rel rsvp %d enc %d %d %d\n",
-						rsvp->seq, rsvp->enc_id,
-						blk->type, blk->id);
-			}
-			if (blk->rsvp_nxt == rsvp) {
-				blk->rsvp_nxt = NULL;
-				SDE_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
-						rsvp->seq, rsvp->enc_id,
-						blk->type, blk->id);
-			}
-		}
-	}
-
-	kfree(rsvp);
-}
-
-void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc, bool nxt)
-{
-	struct sde_rm_rsvp *rsvp;
-	struct drm_connector *conn;
-	uint64_t top_ctrl;
-
-	if (!rm || !enc) {
-		SDE_ERROR("invalid params\n");
-		return;
-	}
-
-	mutex_lock(&rm->rm_lock);
-
-	if (nxt)
-		rsvp = _sde_rm_get_rsvp_nxt(rm, enc);
-	else
-		rsvp = _sde_rm_get_rsvp(rm, enc);
-	if (!rsvp) {
-		SDE_DEBUG("failed to find rsvp for enc %d, nxt %d",
-				enc->base.id, nxt);
-		goto end;
-	}
-
-	conn = _sde_rm_get_connector(enc);
-	if (!conn) {
-		SDE_ERROR("failed to get connector for enc %d, nxt %d",
-				enc->base.id, nxt);
-		goto end;
-	}
-
-	top_ctrl = sde_connector_get_property(conn->state,
-			CONNECTOR_PROP_TOPOLOGY_CONTROL);
-
-	if (top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK)) {
-		SDE_DEBUG("rsvp[s%de%d] not releasing locked resources\n",
-				rsvp->seq, rsvp->enc_id);
-	} else {
-		SDE_DEBUG("release rsvp[s%de%d]\n", rsvp->seq,
-				rsvp->enc_id);
-		_sde_rm_release_rsvp(rm, rsvp, conn);
-	}
-
-end:
-	mutex_unlock(&rm->rm_lock);
-}
-
-static int _sde_rm_commit_rsvp(
-		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp,
-		struct drm_connector_state *conn_state)
-{
-	struct sde_rm_hw_blk *blk;
-	enum sde_hw_blk_type type;
-	int ret = 0;
-
-	/* Swap next rsvp to be the active */
-	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
-		list_for_each_entry(blk, &rm->hw_blks[type], list) {
-			if (blk->rsvp_nxt) {
-				blk->rsvp = blk->rsvp_nxt;
-				blk->rsvp_nxt = NULL;
-			}
-		}
-	}
-
-	if (!ret) {
-		SDE_DEBUG("rsrv enc %d topology %d\n", rsvp->enc_id,
-				rsvp->topology);
-		SDE_EVT32(rsvp->enc_id, rsvp->topology);
-	}
-
-	return ret;
-}
-
-int sde_rm_reserve(
-		struct sde_rm *rm,
-		struct drm_encoder *enc,
-		struct drm_crtc_state *crtc_state,
-		struct drm_connector_state *conn_state,
-		bool test_only)
-{
-	struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
-	struct sde_rm_requirements reqs;
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	int ret;
-
-	if (!rm || !enc || !crtc_state || !conn_state) {
-		SDE_ERROR("invalid arguments\n");
-		return -EINVAL;
-	}
-
-	if (!enc->dev || !enc->dev->dev_private) {
-		SDE_ERROR("drm device invalid\n");
-		return -EINVAL;
-	}
-	priv = enc->dev->dev_private;
-	if (!priv->kms) {
-		SDE_ERROR("invalid kms\n");
-		return -EINVAL;
-	}
-	sde_kms = to_sde_kms(priv->kms);
-
-	/* Check if this is just a page-flip */
-	if (!_sde_rm_is_display_in_cont_splash(sde_kms, enc) &&
-			!drm_atomic_crtc_needs_modeset(crtc_state))
-		return 0;
-
-	SDE_DEBUG("reserving hw for conn %d enc %d crtc %d test_only %d\n",
-			conn_state->connector->base.id, enc->base.id,
-			crtc_state->crtc->base.id, test_only);
-	SDE_EVT32(enc->base.id, conn_state->connector->base.id);
-
-	mutex_lock(&rm->rm_lock);
-
-	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_BEGIN);
-
-	rsvp_cur = _sde_rm_get_rsvp(rm, enc);
-	rsvp_nxt = _sde_rm_get_rsvp_nxt(rm, enc);
-
-	if (!test_only && rsvp_nxt)
-		goto commit_rsvp;
-
-	ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
-			conn_state, &reqs);
-	if (ret) {
-		SDE_ERROR("failed to populate hw requirements\n");
-		goto end;
-	}
-
-	/*
-	 * We only support one active reservation per-hw-block. But to implement
-	 * transactional semantics for test-only, and for allowing failure while
-	 * modifying your existing reservation, over the course of this
-	 * function we can have two reservations:
-	 * Current: Existing reservation
-	 * Next: Proposed reservation. The proposed reservation may fail, or may
-	 *       be discarded if in test-only mode.
-	 * If reservation is successful, and we're not in test-only, then we
-	 * replace the current with the next.
-	 */
-	rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
-	if (!rsvp_nxt) {
-		ret = -ENOMEM;
-		goto end;
-	}
-
-	/*
-	 * User can request that we clear out any reservation during the
-	 * atomic_check phase by using this CLEAR bit
-	 */
-	if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
-		SDE_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
-				rsvp_cur->seq, rsvp_cur->enc_id);
-		_sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
-		rsvp_cur = NULL;
-		_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_CLEAR);
-	}
-
-	/* Check the proposed reservation, store it in hw's "next" field */
-	ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
-			rsvp_nxt, &reqs);
-
-	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_RSVPNEXT);
-
-	if (ret) {
-		SDE_ERROR("failed to reserve hw resources: %d, test_only %d\n",
-				ret, test_only);
-		_sde_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
-		goto end;
-	} else if (test_only && !RM_RQ_LOCK(&reqs)) {
-		/*
-		 * Normally, if test_only, test the reservation and then undo
-		 * However, if the user requests LOCK, then keep the reservation
-		 * made during the atomic_check phase.
-		 */
-		SDE_DEBUG("test_only: rsvp[s%de%d]\n",
-				rsvp_nxt->seq, rsvp_nxt->enc_id);
-		goto end;
-	} else {
-		if (test_only && RM_RQ_LOCK(&reqs))
-			SDE_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
-					rsvp_nxt->seq, rsvp_nxt->enc_id);
-	}
-
-commit_rsvp:
-	_sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
-	ret = _sde_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
-
-end:
-	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_FINAL);
-	mutex_unlock(&rm->rm_lock);
-
-	return ret;
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
deleted file mode 100644
index 6f1ab29..0000000
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ /dev/null
@@ -1,300 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_RM_H__
-#define __SDE_RM_H__
-
-#include <linux/list.h>
-
-#include "msm_kms.h"
-#include "sde_hw_top.h"
-
-#define SINGLE_CTL	1
-#define DUAL_CTL	2
-
-/**
- * enum sde_rm_topology_name - HW resource use case in use by connector
- * @SDE_RM_TOPOLOGY_NONE:                 No topology in use currently
- * @SDE_RM_TOPOLOGY_SINGLEPIPE:           1 LM, 1 PP, 1 INTF/WB
- * @SDE_RM_TOPOLOGY_SINGLEPIPE_DSC:       1 LM, 1 DSC, 1 PP, 1 INTF/WB
- * @SDE_RM_TOPOLOGY_DUALPIPE:             2 LM, 2 PP, 2 INTF/WB
- * @SDE_RM_TOPOLOGY_DUALPIPE_DSC:         2 LM, 2 DSC, 2 PP, 2 INTF/WB
- * @SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE:     2 LM, 2 PP, 3DMux, 1 INTF/WB
- * @SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC: 2 LM, 2 PP, 3DMux, 1 DSC, 1 INTF/WB
- * @SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE:    2 LM, 2 PP, 2 DSC Merge, 1 INTF/WB
- * @SDE_RM_TOPOLOGY_PPSPLIT:              1 LM, 2 PPs, 2 INTF/WB
- */
-enum sde_rm_topology_name {
-	SDE_RM_TOPOLOGY_NONE = 0,
-	SDE_RM_TOPOLOGY_SINGLEPIPE,
-	SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,
-	SDE_RM_TOPOLOGY_DUALPIPE,
-	SDE_RM_TOPOLOGY_DUALPIPE_DSC,
-	SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,
-	SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC,
-	SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,
-	SDE_RM_TOPOLOGY_PPSPLIT,
-	SDE_RM_TOPOLOGY_MAX,
-};
-
-/**
- * enum sde_rm_topology_control - HW resource use case in use by connector
- * @SDE_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
- *                              test, reserve the resources for this display.
- *                              Normal behavior would not impact the reservation
- *                              list during the AtomicTest phase.
- * @SDE_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
- *                               release any reservation held by this display.
- *                               Normal behavior would not impact the
- *                               reservation list during the AtomicTest phase.
- * @SDE_RM_TOPCTL_DSPP: Require layer mixers with DSPP capabilities
- * @SDE_RM_TOPCTL_DS  : Require layer mixers with DS capabilities
- * @SDE_RM_TOPCTL_CWB  : Require layer mixers with CWB capabilities
- */
-enum sde_rm_topology_control {
-	SDE_RM_TOPCTL_RESERVE_LOCK,
-	SDE_RM_TOPCTL_RESERVE_CLEAR,
-	SDE_RM_TOPCTL_DSPP,
-	SDE_RM_TOPCTL_DS,
-	SDE_RM_TOPCTL_CWB,
-};
-
-/**
- * enum sde_rm_topology_control - HW resource use case in use by connector
- * @SDE_RM_QSYNC_DISABLED: If set, Qsync feature is supported and in
- *                              disable state.
- * @SDE_RM_QSYNC_CONTINUOUS_MODE: If set, Qsync is enabled in continuous
- *                              mode.
- */
-enum sde_rm_qsync_modes {
-	SDE_RM_QSYNC_DISABLED,
-	SDE_RM_QSYNC_CONTINUOUS_MODE,
-};
-
-/**
- * struct sde_rm_topology_def - Topology table definition
- * @top_name: name identifying this topology
- * @num_lm:   number of layer mixers used
- * @num_comp_enc: number of encoders used
- * @num_intf: number of interface used
- * @num_ctl: number of control path used
- * @needs_split_display: If set split display is enabled
- */
-struct sde_rm_topology_def {
-	enum sde_rm_topology_name top_name;
-	int num_lm;
-	int num_comp_enc;
-	int num_intf;
-	int num_ctl;
-	int needs_split_display;
-};
-
-/**
- * struct sde_rm - SDE dynamic hardware resource manager
- * @dev: device handle for event logging purposes
- * @rsvps: list of hardware reservations by each crtc->encoder->connector
- * @hw_blks: array of lists of hardware resources present in the system, one
- *	list per type of hardware block
- * @hw_mdp: hardware object for mdp_top
- * @lm_max_width: cached layer mixer maximum width
- * @rsvp_next_seq: sequence number for next reservation for debugging purposes
- * @rm_lock: resource manager mutex
- */
-struct sde_rm {
-	struct drm_device *dev;
-	struct list_head rsvps;
-	struct list_head hw_blks[SDE_HW_BLK_MAX];
-	struct sde_hw_mdp *hw_mdp;
-	uint32_t lm_max_width;
-	uint32_t rsvp_next_seq;
-	struct mutex rm_lock;
-	const struct sde_rm_topology_def *topology_tbl;
-};
-
-/**
- *  struct sde_rm_hw_blk - resource manager internal structure
- *	forward declaration for single iterator definition without void pointer
- */
-struct sde_rm_hw_blk;
-
-/**
- * struct sde_rm_hw_iter - iterator for use with sde_rm
- * @hw: sde_hw object requested, or NULL on failure
- * @blk: sde_rm internal block representation. Clients ignore. Used as iterator.
- * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
- * @type: Hardware Block Type client wishes to search for.
- */
-struct sde_rm_hw_iter {
-	void *hw;
-	struct sde_rm_hw_blk *blk;
-	uint32_t enc_id;
-	enum sde_hw_blk_type type;
-};
-
-/**
- * struct sde_rm_hw_request - data for requesting hw blk
- * @hw: sde_hw object requested, or NULL on failure
- * @type: Hardware Block Type client wishes to search for
- * @id: Hardware block id
- */
-struct sde_rm_hw_request {
-	void *hw;
-	enum sde_hw_blk_type type;
-	int id;
-};
-
-/**
- * sde_rm_get_topology_name - get the name of the given topology config
- * @topology: msm_display_topology topology config
- * @Return: name of the given topology
- */
-enum sde_rm_topology_name sde_rm_get_topology_name(
-	struct msm_display_topology topology);
-
-
-/**
- * sde_rm_init - Read hardware catalog and create reservation tracking objects
- *	for all HW blocks.
- * @rm: SDE Resource Manager handle
- * @cat: Pointer to hardware catalog
- * @mmio: mapped register io address of MDP
- * @dev: device handle for event logging purposes
- * @Return: 0 on Success otherwise -ERROR
- */
-int sde_rm_init(struct sde_rm *rm,
-		struct sde_mdss_cfg *cat,
-		void __iomem *mmio,
-		struct drm_device *dev);
-
-/**
- * sde_rm_destroy - Free all memory allocated by sde_rm_init
- * @rm: SDE Resource Manager handle
- * @Return: 0 on Success otherwise -ERROR
- */
-int sde_rm_destroy(struct sde_rm *rm);
-
-/**
- * sde_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
- *	the use connections and user requirements, specified through related
- *	topology control properties, and reserve hardware blocks to that
- *	display chain.
- *	HW blocks can then be accessed through sde_rm_get_* functions.
- *	HW Reservations should be released via sde_rm_release_hw.
- * @rm: SDE Resource Manager handle
- * @drm_enc: DRM Encoder handle
- * @crtc_state: Proposed Atomic DRM CRTC State handle
- * @conn_state: Proposed Atomic DRM Connector State handle
- * @test_only: Atomic-Test phase, discard results (unless property overrides)
- * @Return: 0 on Success otherwise -ERROR
- */
-int sde_rm_reserve(struct sde_rm *rm,
-		struct drm_encoder *drm_enc,
-		struct drm_crtc_state *crtc_state,
-		struct drm_connector_state *conn_state,
-		bool test_only);
-
-/**
- * sde_rm_release - Given the encoder for the display chain, release any
- *	HW blocks previously reserved for that use case.
- * @rm: SDE Resource Manager handle
- * @enc: DRM Encoder handle
- * @nxt: Choose option to release rsvp_nxt
- * @Return: 0 on Success otherwise -ERROR
- */
-void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc, bool nxt);
-
-/**
- * sde_rm_get_mdp - Retrieve HW block for MDP TOP.
- *	This is never reserved, and is usable by any display.
- * @rm: SDE Resource Manager handle
- * @Return: Pointer to hw block or NULL
- */
-struct sde_hw_mdp *sde_rm_get_mdp(struct sde_rm *rm);
-
-/**
- * sde_rm_init_hw_iter - setup given iterator for new iteration over hw list
- *	using sde_rm_get_hw
- * @iter: iter object to initialize
- * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
- * @type: Hardware Block Type client wishes to search for.
- */
-void sde_rm_init_hw_iter(
-		struct sde_rm_hw_iter *iter,
-		uint32_t enc_id,
-		enum sde_hw_blk_type type);
-/**
- * sde_rm_get_hw - retrieve reserved hw object given encoder and hw type
- *	Meant to do a single pass through the hardware list to iteratively
- *	retrieve hardware blocks of a given type for a given encoder.
- *	Initialize an iterator object.
- *	Set hw block type of interest. Set encoder id of interest, 0 for any.
- *	Function returns first hw of type for that encoder.
- *	Subsequent calls will return the next reserved hw of that type in-order.
- *	Iterator HW pointer will be null on failure to find hw.
- * @rm: SDE Resource Manager handle
- * @iter: iterator object
- * @Return: true on match found, false on no match found
- */
-bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *iter);
-
-/**
- * sde_rm_request_hw_blk - retrieve the requested hardware block
- * @rm: SDE Resource Manager handle
- * @hw: holds the input and output information of the requested hw block
- * @Return: true on match found, false on no match found
- */
-bool sde_rm_request_hw_blk(struct sde_rm *rm, struct sde_rm_hw_request *hw);
-
-/**
- * sde_rm_check_property_topctl - validate property bitmask before it is set
- * @val: user's proposed topology control bitmask
- * @Return: 0 on success or error
- */
-int sde_rm_check_property_topctl(uint64_t val);
-
-/**
- * sde_rm_cont_splash_res_init - Read the current MDSS configuration
- *	to update the splash data structure with the topology
- *	configured by the bootloader.
- * @priv: DRM private structure handle
- * @rm: SDE Resource Manager handle
- * @splash_data: Pointer to the splash_data structure to be updated.
- * @cat: Pointer to the SDE catalog
- * @Return: 0 on success or error
- */
-int sde_rm_cont_splash_res_init(struct msm_drm_private *priv,
-				struct sde_rm *rm,
-				struct sde_splash_data *splash_data,
-				struct sde_mdss_cfg *cat);
-
-/**
- * sde_rm_update_topology - sets topology property of the connector
- * @conn_state: drm state of the connector
- * @topology: topology selected for the display
- * @return: 0 on success or error
- */
-int sde_rm_update_topology(struct drm_connector_state *conn_state,
-	struct msm_display_topology *topology);
-
-/**
- * sde_rm_topology_is_dual_ctl - checks if topoloy requires two control paths
- * @rm: SDE Resource Manager handle
- * @topology: topology selected for the display
- * @return: true if two control paths are required or false
- */
-static inline bool sde_rm_topology_is_dual_ctl(struct sde_rm *rm,
-		enum sde_rm_topology_name topology)
-{
-	if ((!rm) || (topology <= SDE_RM_TOPOLOGY_NONE) ||
-			(topology >= SDE_RM_TOPOLOGY_MAX)) {
-		pr_err("invalid arguments: rm:%d topology:%d\n",
-				rm == NULL, topology);
-
-		return false;
-	}
-
-	return rm->topology_tbl[topology].num_ctl == DUAL_CTL;
-}
-#endif /* __SDE_RM_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
deleted file mode 100644
index 73498d1..0000000
--- a/drivers/gpu/drm/msm/sde/sde_trace.h
+++ /dev/null
@@ -1,433 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- */
-
-#if !defined(_SDE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
-#define _SDE_TRACE_H_
-
-#include <linux/stringify.h>
-#include <linux/types.h>
-#include <linux/tracepoint.h>
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM sde
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE sde_trace
-
-TRACE_EVENT(sde_perf_set_qos_luts,
-	TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
-		u32 lut, u32 lut_usage),
-	TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
-	TP_STRUCT__entry(
-			__field(u32, pnum)
-			__field(u32, fmt)
-			__field(bool, rt)
-			__field(u32, fl)
-			__field(u64, lut)
-			__field(u32, lut_usage)
-	),
-	TP_fast_assign(
-			__entry->pnum = pnum;
-			__entry->fmt = fmt;
-			__entry->rt = rt;
-			__entry->fl = fl;
-			__entry->lut = lut;
-			__entry->lut_usage = lut_usage;
-	),
-	TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
-			__entry->pnum, __entry->fmt,
-			__entry->rt, __entry->fl,
-			__entry->lut, __entry->lut_usage)
-);
-
-TRACE_EVENT(sde_perf_set_danger_luts,
-	TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
-		u32 safe_lut),
-	TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
-	TP_STRUCT__entry(
-			__field(u32, pnum)
-			__field(u32, fmt)
-			__field(u32, mode)
-			__field(u32, danger_lut)
-			__field(u32, safe_lut)
-	),
-	TP_fast_assign(
-			__entry->pnum = pnum;
-			__entry->fmt = fmt;
-			__entry->mode = mode;
-			__entry->danger_lut = danger_lut;
-			__entry->safe_lut = safe_lut;
-	),
-	TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
-			__entry->pnum, __entry->fmt,
-			__entry->mode, __entry->danger_lut,
-			__entry->safe_lut)
-);
-
-TRACE_EVENT(sde_perf_set_ot,
-	TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
-	TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
-	TP_STRUCT__entry(
-			__field(u32, pnum)
-			__field(u32, xin_id)
-			__field(u32, rd_lim)
-			__field(u32, vbif_idx)
-	),
-	TP_fast_assign(
-			__entry->pnum = pnum;
-			__entry->xin_id = xin_id;
-			__entry->rd_lim = rd_lim;
-			__entry->vbif_idx = vbif_idx;
-	),
-	TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
-			__entry->pnum, __entry->xin_id, __entry->rd_lim,
-			__entry->vbif_idx)
-)
-
-TRACE_EVENT(sde_perf_update_bus,
-	TP_PROTO(int client, u32 bus_id, unsigned long long ab_quota,
-	unsigned long long ib_quota),
-	TP_ARGS(client, bus_id, ab_quota, ib_quota),
-	TP_STRUCT__entry(
-			__field(int, client)
-			__field(u32, bus_id);
-			__field(u64, ab_quota)
-			__field(u64, ib_quota)
-	),
-	TP_fast_assign(
-			__entry->client = client;
-			__entry->bus_id = bus_id;
-			__entry->ab_quota = ab_quota;
-			__entry->ib_quota = ib_quota;
-	),
-	TP_printk("Request client:%d bus_id:%d ab=%llu ib=%llu",
-			__entry->client,
-			__entry->bus_id,
-			__entry->ab_quota,
-			__entry->ib_quota)
-)
-
-
-TRACE_EVENT(sde_cmd_release_bw,
-	TP_PROTO(u32 crtc_id),
-	TP_ARGS(crtc_id),
-	TP_STRUCT__entry(
-			__field(u32, crtc_id)
-	),
-	TP_fast_assign(
-			__entry->crtc_id = crtc_id;
-	),
-	TP_printk("crtc:%d", __entry->crtc_id)
-);
-
-TRACE_EVENT(sde_encoder_underrun,
-	TP_PROTO(u32 enc_id, u32 underrun_cnt),
-	TP_ARGS(enc_id, underrun_cnt),
-	TP_STRUCT__entry(
-			__field(u32, enc_id)
-			__field(u32, underrun_cnt)
-	),
-	TP_fast_assign(
-			__entry->enc_id = enc_id;
-			__entry->underrun_cnt = underrun_cnt;
-
-	),
-	TP_printk("enc:%d underrun_cnt:%d", __entry->enc_id,
-		__entry->underrun_cnt)
-);
-
-TRACE_EVENT(tracing_mark_write,
-	TP_PROTO(int pid, const char *name, bool trace_begin),
-	TP_ARGS(pid, name, trace_begin),
-	TP_STRUCT__entry(
-			__field(int, pid)
-			__string(trace_name, name)
-			__field(bool, trace_begin)
-	),
-	TP_fast_assign(
-			__entry->pid = pid;
-			__assign_str(trace_name, name);
-			__entry->trace_begin = trace_begin;
-	),
-	TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
-		__entry->pid, __get_str(trace_name))
-)
-
-TRACE_EVENT(sde_trace_counter,
-	TP_PROTO(int pid, char *name, int value),
-	TP_ARGS(pid, name, value),
-	TP_STRUCT__entry(
-			__field(int, pid)
-			__string(counter_name, name)
-			__field(int, value)
-	),
-	TP_fast_assign(
-			__entry->pid = current->tgid;
-			__assign_str(counter_name, name);
-			__entry->value = value;
-	),
-	TP_printk("%d|%s|%d", __entry->pid,
-			__get_str(counter_name), __entry->value)
-)
-
-#define SDE_TRACE_EVTLOG_SIZE	15
-TRACE_EVENT(sde_evtlog,
-	TP_PROTO(const char *tag, u32 tag_id, u32 cnt, u32 *data),
-	TP_ARGS(tag, tag_id, cnt, data),
-	TP_STRUCT__entry(
-			__field(int, pid)
-			__string(evtlog_tag, tag)
-			__field(u32, tag_id)
-			__array(u32, data, SDE_TRACE_EVTLOG_SIZE)
-	),
-	TP_fast_assign(
-			__entry->pid = current->tgid;
-			__assign_str(evtlog_tag, tag);
-			__entry->tag_id = tag_id;
-			if (cnt > SDE_TRACE_EVTLOG_SIZE)
-				cnt = SDE_TRACE_EVTLOG_SIZE;
-			memcpy(__entry->data, data, cnt * sizeof(u32));
-			memset(&__entry->data[cnt], 0,
-				(SDE_TRACE_EVTLOG_SIZE - cnt) * sizeof(u32));
-	),
-	TP_printk("%d|%s:%d|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x",
-			__entry->pid, __get_str(evtlog_tag),
-			__entry->tag_id,
-			__entry->data[0], __entry->data[1],
-			__entry->data[2], __entry->data[3],
-			__entry->data[4], __entry->data[5],
-			__entry->data[6], __entry->data[7],
-			__entry->data[8], __entry->data[9],
-			__entry->data[10], __entry->data[11],
-			__entry->data[12], __entry->data[13],
-			__entry->data[14])
-)
-
-TRACE_EVENT(sde_perf_crtc_update,
-	TP_PROTO(u32 crtc,
-			u64 bw_ctl_mnoc, u64 per_pipe_ib_mnoc,
-			u64 bw_ctl_llcc, u64 per_pipe_ib_llcc,
-			u64 bw_ctl_ebi, u64 per_pipe_ib_ebi,
-			u32 core_clk_rate, bool stop_req,
-			u32 update_bus, u32 update_clk, int params),
-	TP_ARGS(crtc,
-		bw_ctl_mnoc, per_pipe_ib_mnoc,
-		bw_ctl_llcc, per_pipe_ib_llcc,
-		bw_ctl_ebi, per_pipe_ib_ebi,
-		core_clk_rate, stop_req,
-		update_bus, update_clk, params),
-	TP_STRUCT__entry(
-			__field(u32, crtc)
-			__field(u64, bw_ctl_mnoc)
-			__field(u64, per_pipe_ib_mnoc)
-			__field(u64, bw_ctl_llcc)
-			__field(u64, per_pipe_ib_llcc)
-			__field(u64, bw_ctl_ebi)
-			__field(u64, per_pipe_ib_ebi)
-			__field(u32, core_clk_rate)
-			__field(bool, stop_req)
-			__field(u32, update_bus)
-			__field(u32, update_clk)
-			__field(int, params)
-	),
-	TP_fast_assign(
-			__entry->crtc = crtc;
-			__entry->bw_ctl_mnoc = bw_ctl_mnoc;
-			__entry->per_pipe_ib_mnoc = per_pipe_ib_mnoc;
-			__entry->bw_ctl_llcc = bw_ctl_llcc;
-			__entry->per_pipe_ib_llcc = per_pipe_ib_llcc;
-			__entry->bw_ctl_ebi = bw_ctl_ebi;
-			__entry->per_pipe_ib_ebi = per_pipe_ib_ebi;
-			__entry->core_clk_rate = core_clk_rate;
-			__entry->stop_req = stop_req;
-			__entry->update_bus = update_bus;
-			__entry->update_clk = update_clk;
-			__entry->params = params;
-	),
-	 TP_printk(
-		"crtc=%d mnoc=[%llu %llu] llcc=[%llu %llu] ebi=[%llu %llu] clk=%u stop=%d ubus=%d uclk=%d %d",
-			__entry->crtc,
-			__entry->bw_ctl_mnoc,
-			__entry->per_pipe_ib_mnoc,
-			__entry->bw_ctl_llcc,
-			__entry->per_pipe_ib_llcc,
-			__entry->bw_ctl_ebi,
-			__entry->per_pipe_ib_ebi,
-			__entry->core_clk_rate,
-			__entry->stop_req,
-			__entry->update_bus,
-			__entry->update_clk,
-			__entry->params)
-);
-
-TRACE_EVENT(sde_perf_calc_crtc,
-	TP_PROTO(u32 crtc,
-			u64 bw_ctl_mnoc,
-			u64 bw_ctl_llcc,
-			u64 bw_ctl_ebi,
-			u64 ib_mnoc,
-			u64 ib_llcc,
-			u64 ib_ebi,
-			u32 core_clk_rate
-			),
-	TP_ARGS(crtc,
-			bw_ctl_mnoc,
-			bw_ctl_llcc,
-			bw_ctl_ebi,
-			ib_mnoc,
-			ib_llcc,
-			ib_ebi,
-			core_clk_rate),
-	TP_STRUCT__entry(
-			__field(u32, crtc)
-			__field(u64, bw_ctl_mnoc)
-			__field(u64, bw_ctl_llcc)
-			__field(u64, bw_ctl_ebi)
-			__field(u64, ib_mnoc)
-			__field(u64, ib_llcc)
-			__field(u64, ib_ebi)
-			__field(u32, core_clk_rate)
-
-	),
-	TP_fast_assign(
-			__entry->crtc = crtc;
-			__entry->bw_ctl_mnoc = bw_ctl_mnoc;
-			__entry->bw_ctl_llcc = bw_ctl_llcc;
-			__entry->bw_ctl_ebi = bw_ctl_ebi;
-			__entry->ib_mnoc = ib_mnoc;
-			__entry->ib_llcc = ib_llcc;
-			__entry->ib_ebi = ib_ebi;
-			__entry->core_clk_rate = core_clk_rate;
-	),
-	 TP_printk(
-		"crtc=%d mnoc=[%llu, %llu] llcc=[%llu %llu] ebi=[%llu, %llu] clk_rate=%u",
-			__entry->crtc,
-			__entry->bw_ctl_mnoc,
-			__entry->ib_mnoc,
-			__entry->bw_ctl_llcc,
-			__entry->ib_llcc,
-			__entry->bw_ctl_ebi,
-			__entry->ib_ebi,
-			__entry->core_clk_rate)
-);
-
-TRACE_EVENT(sde_perf_uidle_cntr,
-	TP_PROTO(u32 crtc,
-			u32 fal1_gate_cntr,
-			u32 fal10_gate_cntr,
-			u32 fal_wait_gate_cntr,
-			u32 fal1_num_transitions_cntr,
-			u32 fal10_num_transitions_cntr,
-			u32 min_gate_cntr,
-			u32 max_gate_cntr
-			),
-	TP_ARGS(crtc,
-			fal1_gate_cntr,
-			fal10_gate_cntr,
-			fal_wait_gate_cntr,
-			fal1_num_transitions_cntr,
-			fal10_num_transitions_cntr,
-			min_gate_cntr,
-			max_gate_cntr),
-	TP_STRUCT__entry(
-			__field(u32, crtc)
-			__field(u32, fal1_gate_cntr)
-			__field(u32, fal10_gate_cntr)
-			__field(u32, fal_wait_gate_cntr)
-			__field(u32, fal1_num_transitions_cntr)
-			__field(u32, fal10_num_transitions_cntr)
-			__field(u32, min_gate_cntr)
-			__field(u32, max_gate_cntr)
-	),
-	TP_fast_assign(
-			__entry->crtc = crtc;
-			__entry->fal1_gate_cntr = fal1_gate_cntr;
-			__entry->fal10_gate_cntr = fal10_gate_cntr;
-			__entry->fal_wait_gate_cntr = fal_wait_gate_cntr;
-			__entry->fal1_num_transitions_cntr =
-				fal1_num_transitions_cntr;
-			__entry->fal10_num_transitions_cntr =
-				fal10_num_transitions_cntr;
-			__entry->min_gate_cntr = min_gate_cntr;
-			__entry->max_gate_cntr = max_gate_cntr;
-	),
-	 TP_printk(
-		"crtc:%d gate:fal1=%d fal10=%d wait=%d min=%d max=%d trns:fal1=%d fal10=%d",
-			__entry->crtc,
-			__entry->fal1_gate_cntr,
-			__entry->fal10_gate_cntr,
-			__entry->fal_wait_gate_cntr,
-			__entry->min_gate_cntr,
-			__entry->max_gate_cntr,
-			__entry->fal1_num_transitions_cntr,
-			__entry->fal10_num_transitions_cntr
-			)
-);
-
-TRACE_EVENT(sde_perf_uidle_status,
-	TP_PROTO(u32 crtc,
-			u32 uidle_danger_status_0,
-			u32 uidle_danger_status_1,
-			u32 uidle_safe_status_0,
-			u32 uidle_safe_status_1,
-			u32 uidle_idle_status_0,
-			u32 uidle_idle_status_1,
-			u32 uidle_fal_status_0,
-			u32 uidle_fal_status_1),
-	TP_ARGS(crtc,
-			uidle_danger_status_0,
-			uidle_danger_status_1,
-			uidle_safe_status_0,
-			uidle_safe_status_1,
-			uidle_idle_status_0,
-			uidle_idle_status_1,
-			uidle_fal_status_0,
-			uidle_fal_status_1),
-	TP_STRUCT__entry(
-			__field(u32, crtc)
-			__field(u32, uidle_danger_status_0)
-			__field(u32, uidle_danger_status_1)
-			__field(u32, uidle_safe_status_0)
-			__field(u32, uidle_safe_status_1)
-			__field(u32, uidle_idle_status_0)
-			__field(u32, uidle_idle_status_1)
-			__field(u32, uidle_fal_status_0)
-			__field(u32, uidle_fal_status_1)),
-	TP_fast_assign(
-			__entry->crtc = crtc;
-			__entry->uidle_danger_status_0 = uidle_danger_status_0;
-			__entry->uidle_danger_status_1 = uidle_danger_status_1;
-			__entry->uidle_safe_status_0 = uidle_safe_status_0;
-			__entry->uidle_safe_status_1 = uidle_safe_status_1;
-			__entry->uidle_idle_status_0 = uidle_idle_status_0;
-			__entry->uidle_idle_status_1 = uidle_idle_status_1;
-			__entry->uidle_fal_status_0 = uidle_fal_status_0;
-			__entry->uidle_fal_status_1 = uidle_fal_status_1;),
-	 TP_printk(
-		"crtc:%d danger[%d, %d] safe[%d, %d] idle[%d, %d] fal[%d, %d]",
-			__entry->crtc,
-			__entry->uidle_danger_status_0,
-			__entry->uidle_danger_status_1,
-			__entry->uidle_safe_status_0,
-			__entry->uidle_safe_status_1,
-			__entry->uidle_idle_status_0,
-			__entry->uidle_idle_status_1,
-			__entry->uidle_fal_status_0,
-			__entry->uidle_fal_status_1
-			)
-);
-
-#define SDE_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
-#define SDE_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
-#define SDE_ATRACE_FUNC() SDE_ATRACE_BEGIN(__func__)
-
-#define SDE_ATRACE_INT(name, value) \
-	trace_sde_trace_counter(current->tgid, name, value)
-
-#endif /* _SDE_TRACE_H_ */
-
-/* This part must be outside protection */
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
deleted file mode 100644
index 44e3a84..0000000
--- a/drivers/gpu/drm/msm/sde/sde_vbif.c
+++ /dev/null
@@ -1,594 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/debugfs.h>
-
-#include "sde_vbif.h"
-#include "sde_hw_vbif.h"
-#include "sde_trace.h"
-
-#define MAX_XIN_CLIENT	16
-
-/**
- * _sde_vbif_wait_for_xin_halt - wait for the xin to halt
- * @vbif:	Pointer to hardware vbif driver
- * @xin_id:	Client interface identifier
- * @return:	0 if success; error code otherwise
- */
-static int _sde_vbif_wait_for_xin_halt(struct sde_hw_vbif *vbif, u32 xin_id)
-{
-	ktime_t timeout;
-	bool status;
-	int rc;
-
-	if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
-		SDE_ERROR("invalid arguments vbif %d\n", !vbif);
-		return -EINVAL;
-	}
-
-	timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
-	for (;;) {
-		status = vbif->ops.get_halt_ctrl(vbif, xin_id);
-		if (status)
-			break;
-		if (ktime_compare_safe(ktime_get(), timeout) > 0) {
-			status = vbif->ops.get_halt_ctrl(vbif, xin_id);
-			break;
-		}
-		usleep_range(501, 1000);
-	}
-
-	if (!status) {
-		rc = -ETIMEDOUT;
-		SDE_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
-				vbif->idx - VBIF_0, xin_id);
-	} else {
-		rc = 0;
-		SDE_DEBUG("VBIF %d client %d is halted\n",
-				vbif->idx - VBIF_0, xin_id);
-	}
-
-	return rc;
-}
-
-int sde_vbif_halt_plane_xin(struct sde_kms *sde_kms, u32 xin_id, u32 clk_ctrl)
-{
-	struct sde_hw_vbif *vbif = NULL;
-	struct sde_hw_mdp *mdp;
-	bool forced_on = false;
-	bool status;
-	int rc = 0;
-
-	if (!sde_kms) {
-		SDE_ERROR("invalid argument\n");
-		return -EINVAL;
-	}
-
-	if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
-		SDE_DEBUG("vbif operations not permitted\n");
-		return 0;
-	}
-
-	vbif = sde_kms->hw_vbif[VBIF_RT];
-	mdp = sde_kms->hw_mdp;
-	if (!vbif || !mdp || !vbif->ops.get_halt_ctrl ||
-		       !vbif->ops.set_halt_ctrl ||
-		       !mdp->ops.setup_clk_force_ctrl) {
-		SDE_ERROR("invalid vbif or mdp arguments\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&vbif->mutex);
-
-	SDE_EVT32_VERBOSE(vbif->idx, xin_id);
-
-	/*
-	 * If status is 0, then make sure client clock is not gated
-	 * while halting by forcing it ON only if it was not previously
-	 * forced on. If status is 1 then its already halted.
-	 */
-	status = vbif->ops.get_halt_ctrl(vbif, xin_id);
-	if (status) {
-		mutex_unlock(&vbif->mutex);
-		return 0;
-	}
-
-	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, clk_ctrl, true);
-
-	/* send halt request for unused plane's xin client */
-	vbif->ops.set_halt_ctrl(vbif, xin_id, true);
-
-	rc = _sde_vbif_wait_for_xin_halt(vbif, xin_id);
-	if (rc) {
-		SDE_ERROR(
-		"wait failed for pipe halt:xin_id %u, clk_ctrl %u, rc %u\n",
-			xin_id, clk_ctrl, rc);
-		SDE_EVT32(xin_id, clk_ctrl, rc, SDE_EVTLOG_ERROR);
-	}
-
-	/* open xin client to enable transactions */
-	vbif->ops.set_halt_ctrl(vbif, xin_id, false);
-	if (forced_on)
-		mdp->ops.setup_clk_force_ctrl(mdp, clk_ctrl, false);
-
-	mutex_unlock(&vbif->mutex);
-
-	return rc;
-}
-
-/**
- * _sde_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
- * @vbif:	Pointer to hardware vbif driver
- * @ot_lim:	Pointer to OT limit to be modified
- * @params:	Pointer to usecase parameters
- */
-static void _sde_vbif_apply_dynamic_ot_limit(struct sde_hw_vbif *vbif,
-		u32 *ot_lim, struct sde_vbif_set_ot_params *params)
-{
-	u64 pps;
-	const struct sde_vbif_dynamic_ot_tbl *tbl;
-	u32 i;
-
-	if (!vbif || !(vbif->cap->features & BIT(SDE_VBIF_QOS_OTLIM)))
-		return;
-
-	/* Dynamic OT setting done only for WFD */
-	if (!params->is_wfd)
-		return;
-
-	pps = params->frame_rate;
-	pps *= params->width;
-	pps *= params->height;
-
-	tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
-			&vbif->cap->dynamic_ot_wr_tbl;
-
-	for (i = 0; i < tbl->count; i++) {
-		if (pps <= tbl->cfg[i].pps) {
-			*ot_lim = tbl->cfg[i].ot_limit;
-			break;
-		}
-	}
-
-	SDE_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
-			vbif->idx - VBIF_0, params->xin_id,
-			params->width, params->height, params->frame_rate,
-			pps, *ot_lim);
-}
-
-/**
- * _sde_vbif_get_ot_limit - get OT based on usecase & configuration parameters
- * @vbif:	Pointer to hardware vbif driver
- * @params:	Pointer to usecase parameters
- * @return:	OT limit
- */
-static u32 _sde_vbif_get_ot_limit(struct sde_hw_vbif *vbif,
-	struct sde_vbif_set_ot_params *params)
-{
-	u32 ot_lim = 0;
-	u32 val;
-
-	if (!vbif || !vbif->cap) {
-		SDE_ERROR("invalid arguments vbif %d\n", !vbif);
-		return -EINVAL;
-	}
-
-	if (vbif->cap->default_ot_wr_limit && !params->rd)
-		ot_lim = vbif->cap->default_ot_wr_limit;
-	else if (vbif->cap->default_ot_rd_limit && params->rd)
-		ot_lim = vbif->cap->default_ot_rd_limit;
-
-	/*
-	 * If default ot is not set from dt/catalog,
-	 * then do not configure it.
-	 */
-	if (ot_lim == 0)
-		goto exit;
-
-	/* Modify the limits if the target and the use case requires it */
-	_sde_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
-
-	if (vbif && vbif->ops.get_limit_conf) {
-		val = vbif->ops.get_limit_conf(vbif,
-				params->xin_id, params->rd);
-		if (val == ot_lim)
-			ot_lim = 0;
-	}
-
-exit:
-	SDE_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
-			vbif->idx - VBIF_0, params->xin_id, ot_lim);
-	return ot_lim;
-}
-
-/**
- * sde_vbif_set_ot_limit - set OT based on usecase & configuration parameters
- * @vbif:	Pointer to hardware vbif driver
- * @params:	Pointer to usecase parameters
- *
- * Note this function would block waiting for bus halt.
- */
-void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
-		struct sde_vbif_set_ot_params *params)
-{
-	struct sde_hw_vbif *vbif = NULL;
-	struct sde_hw_mdp *mdp;
-	bool forced_on = false;
-	u32 ot_lim;
-	int ret, i;
-
-	if (!sde_kms) {
-		SDE_ERROR("invalid arguments\n");
-		return;
-	}
-
-	if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
-		SDE_DEBUG("vbif operations not permitted\n");
-		return;
-	}
-
-	mdp = sde_kms->hw_mdp;
-
-	for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
-		if (sde_kms->hw_vbif[i] &&
-				sde_kms->hw_vbif[i]->idx == params->vbif_idx) {
-			vbif = sde_kms->hw_vbif[i];
-			break;
-		}
-	}
-
-	if (!vbif || !mdp) {
-		SDE_DEBUG("invalid arguments vbif %d mdp %d\n",
-				vbif != NULL, mdp != NULL);
-		return;
-	}
-
-	if (!mdp->ops.setup_clk_force_ctrl ||
-			!vbif->ops.set_limit_conf ||
-			!vbif->ops.set_halt_ctrl)
-		return;
-
-	mutex_lock(&vbif->mutex);
-
-	SDE_EVT32_VERBOSE(vbif->idx, params->xin_id);
-
-	/* set write_gather_en for all write clients */
-	if (vbif->ops.set_write_gather_en && !params->rd)
-		vbif->ops.set_write_gather_en(vbif, params->xin_id);
-
-	ot_lim = _sde_vbif_get_ot_limit(vbif, params) & 0xFF;
-
-	if (ot_lim == 0)
-		goto exit;
-
-	trace_sde_perf_set_ot(params->num, params->xin_id, ot_lim,
-		params->vbif_idx);
-
-	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
-
-	vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
-
-	vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
-
-	ret = _sde_vbif_wait_for_xin_halt(vbif, params->xin_id);
-	if (ret)
-		SDE_EVT32(vbif->idx, params->xin_id);
-
-	vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
-
-	if (forced_on)
-		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
-exit:
-	mutex_unlock(&vbif->mutex);
-}
-
-bool sde_vbif_set_xin_halt(struct sde_kms *sde_kms,
-		struct sde_vbif_set_xin_halt_params *params)
-{
-	struct sde_hw_vbif *vbif = NULL;
-	struct sde_hw_mdp *mdp;
-	bool forced_on = false;
-	int ret, i;
-
-	if (!sde_kms || !params) {
-		SDE_ERROR("invalid arguments\n");
-		return false;
-	}
-
-	if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
-		SDE_DEBUG("vbif operations not permitted\n");
-		return true;
-	}
-
-	mdp = sde_kms->hw_mdp;
-
-	for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
-		if (sde_kms->hw_vbif[i] &&
-				sde_kms->hw_vbif[i]->idx == params->vbif_idx) {
-			vbif = sde_kms->hw_vbif[i];
-			break;
-		}
-	}
-
-	if (!vbif || !mdp) {
-		SDE_DEBUG("invalid arguments vbif %d mdp %d\n",
-				vbif != NULL, mdp != NULL);
-		return false;
-	}
-
-	if (!mdp->ops.setup_clk_force_ctrl ||
-			!vbif->ops.set_halt_ctrl)
-		return false;
-
-	mutex_lock(&vbif->mutex);
-
-	SDE_EVT32_VERBOSE(vbif->idx, params->xin_id);
-
-	if (params->enable) {
-		forced_on = mdp->ops.setup_clk_force_ctrl(mdp,
-				params->clk_ctrl, true);
-
-		vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
-
-		ret = _sde_vbif_wait_for_xin_halt(vbif, params->xin_id);
-		if (ret)
-			SDE_EVT32(vbif->idx, params->xin_id, SDE_EVTLOG_ERROR);
-	} else {
-		vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
-
-		if (params->forced_on)
-			mdp->ops.setup_clk_force_ctrl(mdp,
-					params->clk_ctrl, false);
-	}
-
-	mutex_unlock(&vbif->mutex);
-
-	return forced_on;
-}
-
-void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
-		struct sde_vbif_set_qos_params *params)
-{
-	struct sde_hw_vbif *vbif = NULL;
-	struct sde_hw_mdp *mdp;
-	bool forced_on = false;
-	const struct sde_vbif_qos_tbl *qos_tbl;
-	int i;
-
-	if (!sde_kms || !params || !sde_kms->hw_mdp) {
-		SDE_ERROR("invalid arguments\n");
-		return;
-	}
-
-	if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
-		SDE_DEBUG("vbif operations not permitted\n");
-		return;
-	}
-
-	mdp = sde_kms->hw_mdp;
-
-	for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
-		if (sde_kms->hw_vbif[i] &&
-				sde_kms->hw_vbif[i]->idx == params->vbif_idx) {
-			vbif = sde_kms->hw_vbif[i];
-			break;
-		}
-	}
-
-	if (!vbif || !vbif->cap) {
-		SDE_ERROR("invalid vbif %d\n", params->vbif_idx);
-		return;
-	}
-
-	if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
-		SDE_DEBUG("qos remap not supported\n");
-		return;
-	}
-
-	if (params->client_type > VBIF_MAX_CLIENT) {
-		SDE_ERROR("invalid client type:%d\n", params->client_type);
-		return;
-	}
-
-	qos_tbl = &vbif->cap->qos_tbl[params->client_type];
-	if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
-		SDE_DEBUG("qos tbl not defined\n");
-		return;
-	}
-
-	mutex_lock(&vbif->mutex);
-
-	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
-
-	for (i = 0; i < qos_tbl->npriority_lvl; i++) {
-		SDE_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
-				params->vbif_idx, params->xin_id, i,
-				qos_tbl->priority_lvl[i]);
-		vbif->ops.set_qos_remap(vbif, params->xin_id, i,
-				qos_tbl->priority_lvl[i]);
-	}
-
-	if (forced_on)
-		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
-
-	mutex_unlock(&vbif->mutex);
-}
-
-void sde_vbif_clear_errors(struct sde_kms *sde_kms)
-{
-	struct sde_hw_vbif *vbif;
-	u32 i, pnd, src;
-
-	if (!sde_kms) {
-		SDE_ERROR("invalid argument\n");
-		return;
-	}
-
-	if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
-		SDE_DEBUG("vbif operations not permitted\n");
-		return;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
-		vbif = sde_kms->hw_vbif[i];
-		if (vbif && vbif->ops.clear_errors) {
-			mutex_lock(&vbif->mutex);
-			vbif->ops.clear_errors(vbif, &pnd, &src);
-			if (pnd || src) {
-				SDE_EVT32(i, pnd, src);
-				SDE_DEBUG("VBIF %d: pnd 0x%X, src 0x%X\n",
-						vbif->idx - VBIF_0, pnd, src);
-			}
-			mutex_unlock(&vbif->mutex);
-		}
-	}
-}
-
-void sde_vbif_init_memtypes(struct sde_kms *sde_kms)
-{
-	struct sde_hw_vbif *vbif;
-	int i, j;
-
-	if (!sde_kms) {
-		SDE_ERROR("invalid argument\n");
-		return;
-	}
-
-	if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
-		SDE_DEBUG("vbif operations not permitted\n");
-		return;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
-		vbif = sde_kms->hw_vbif[i];
-		if (vbif && vbif->cap && vbif->ops.set_mem_type) {
-			mutex_lock(&vbif->mutex);
-			for (j = 0; j < vbif->cap->memtype_count; j++)
-				vbif->ops.set_mem_type(
-						vbif, j, vbif->cap->memtype[j]);
-			mutex_unlock(&vbif->mutex);
-		}
-	}
-}
-
-int sde_vbif_halt_xin_mask(struct sde_kms *sde_kms, u32 xin_id_mask,
-				bool halt)
-{
-	struct sde_hw_vbif *vbif;
-	int i = 0, status, rc;
-
-	if (!sde_kms) {
-		SDE_ERROR("invalid argument\n");
-		return -EINVAL;
-	}
-
-	vbif = sde_kms->hw_vbif[VBIF_RT];
-
-	if (!vbif->ops.get_halt_ctrl || !vbif->ops.set_halt_ctrl)
-		return 0;
-
-	SDE_EVT32(xin_id_mask, halt);
-
-	for (i = 0; i < MAX_XIN_CLIENT; i++) {
-		if (xin_id_mask & BIT(i)) {
-			/* unhalt the xin-clients */
-			if (!halt) {
-				vbif->ops.set_halt_ctrl(vbif, i, false);
-				continue;
-			}
-
-			status = vbif->ops.get_halt_ctrl(vbif, i);
-			if (status)
-				continue;
-
-			/* halt xin-clients and wait for ack */
-			vbif->ops.set_halt_ctrl(vbif, i, true);
-
-			rc = _sde_vbif_wait_for_xin_halt(vbif, i);
-			if (rc) {
-				SDE_ERROR("xin_halt failed for xin:%d, rc:%d\n",
-					i, rc);
-				SDE_EVT32(xin_id_mask, i, rc, SDE_EVTLOG_ERROR);
-				return rc;
-			}
-		}
-	}
-
-	return 0;
-}
-
-#ifdef CONFIG_DEBUG_FS
-void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
-{
-	debugfs_remove_recursive(sde_kms->debugfs_vbif);
-	sde_kms->debugfs_vbif = NULL;
-}
-
-int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root)
-{
-	char vbif_name[32];
-	struct dentry *debugfs_vbif;
-	int i, j;
-
-	sde_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
-	if (!sde_kms->debugfs_vbif) {
-		SDE_ERROR("failed to create vbif debugfs\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
-		struct sde_vbif_cfg *vbif = &sde_kms->catalog->vbif[i];
-
-		snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
-
-		debugfs_vbif = debugfs_create_dir(vbif_name,
-				sde_kms->debugfs_vbif);
-
-		debugfs_create_u32("features", 0400, debugfs_vbif,
-			(u32 *)&vbif->features);
-
-		debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
-			(u32 *)&vbif->xin_halt_timeout);
-
-		debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
-			(u32 *)&vbif->default_ot_rd_limit);
-
-		debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
-			(u32 *)&vbif->default_ot_wr_limit);
-
-		for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
-			struct sde_vbif_dynamic_ot_cfg *cfg =
-					&vbif->dynamic_ot_rd_tbl.cfg[j];
-
-			snprintf(vbif_name, sizeof(vbif_name),
-					"dynamic_ot_rd_%d_pps", j);
-			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
-					(u64 *)&cfg->pps);
-			snprintf(vbif_name, sizeof(vbif_name),
-					"dynamic_ot_rd_%d_ot_limit", j);
-			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
-					(u32 *)&cfg->ot_limit);
-		}
-
-		for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
-			struct sde_vbif_dynamic_ot_cfg *cfg =
-					&vbif->dynamic_ot_wr_tbl.cfg[j];
-
-			snprintf(vbif_name, sizeof(vbif_name),
-					"dynamic_ot_wr_%d_pps", j);
-			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
-					(u64 *)&cfg->pps);
-			snprintf(vbif_name, sizeof(vbif_name),
-					"dynamic_ot_wr_%d_ot_limit", j);
-			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
-					(u32 *)&cfg->ot_limit);
-		}
-	}
-
-	return 0;
-}
-#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
deleted file mode 100644
index b16e0c7..0000000
--- a/drivers/gpu/drm/msm/sde/sde_vbif.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_VBIF_H__
-#define __SDE_VBIF_H__
-
-#include "sde_kms.h"
-
-struct sde_vbif_set_ot_params {
-	u32 xin_id;
-	u32 num;
-	u32 width;
-	u32 height;
-	u32 frame_rate;
-	bool rd;
-	bool is_wfd;
-	u32 vbif_idx;
-	u32 clk_ctrl;
-};
-
-struct sde_vbif_set_memtype_params {
-	u32 xin_id;
-	u32 vbif_idx;
-	u32 clk_ctrl;
-	bool is_cacheable;
-};
-
-/**
- * struct sde_vbif_set_xin_halt_params - xin halt parameters
- * @vbif_idx: vbif identifier
- * @xin_id: client interface identifier
- * @clk_ctrl: clock control identifier of the xin
- * @forced_on: whether or not previous call to xin halt forced the clocks on,
- *	only applicable to xin halt disable calls
- * @enable: whether to enable/disable xin halts
- */
-struct sde_vbif_set_xin_halt_params {
-	u32 vbif_idx;
-	u32 xin_id;
-	u32 clk_ctrl;
-	bool forced_on;
-	bool enable;
-};
-
-/**
- * struct sde_vbif_set_qos_params - QoS remapper parameter
- * @vbif_idx: vbif identifier
- * @xin_id: client interface identifier
- * @clk_ctrl: clock control identifier of the xin
- * @num: pipe identifier (debug only)
- * @client_type: client type enumerated by sde_vbif_client_type
- */
-struct sde_vbif_set_qos_params {
-	u32 vbif_idx;
-	u32 xin_id;
-	u32 clk_ctrl;
-	u32 num;
-	enum sde_vbif_client_type client_type;
-};
-
-/**
- * sde_vbif_set_ot_limit - set OT limit for vbif client
- * @sde_kms:	SDE handler
- * @params:	Pointer to OT configuration parameters
- */
-void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
-		struct sde_vbif_set_ot_params *params);
-
-/**
- * sde_vbif_set_xin_halt - halt one of the xin ports
- *	This function isn't thread safe.
- * @sde_kms:	SDE handler
- * @params:	Pointer to halt configuration parameters
- * Returns:	Whether or not VBIF clocks were forced on
- */
-bool sde_vbif_set_xin_halt(struct sde_kms *sde_kms,
-		struct sde_vbif_set_xin_halt_params *params);
-
-/**
- * sde_vbif_set_qos_remap - set QoS priority level remap
- * @sde_kms:	SDE handler
- * @params:	Pointer to QoS configuration parameters
- */
-void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
-		struct sde_vbif_set_qos_params *params);
-
-/**
- * sde_vbif_clear_errors - clear any vbif errors
- * @sde_kms:	SDE handler
- */
-void sde_vbif_clear_errors(struct sde_kms *sde_kms);
-
-/**
- * sde_vbif_init_memtypes - initialize xin memory types for vbif
- * @sde_kms:	SDE handler
- */
-void sde_vbif_init_memtypes(struct sde_kms *sde_kms);
-
-/**
- * sde_vbif_halt_plane_xin - halts the xin client for the unused plane
- * On unused plane, check if the vbif for this plane is idle or not.
- * If not then first force_on the planes clock and then send the
- * halt request. Wait for some time then check for the vbif idle
- * or not again.
- * @sde_kms:	SDE handler
- * @xin_id:	xin id of the unused plane
- * @clk_ctrl:	clk ctrl type for the unused plane
- * Returns:	0 on success, error code otherwise
- */
-int sde_vbif_halt_plane_xin(struct sde_kms *sde_kms, u32 xin_id,
-	       u32 clk_ctrl);
-
-/**
- * sde_vbif_halt_xin_mask - halts/unhalts all the xin clients present in
- * the mask.
- * @sde_kms:	SDE handler
- * @xin_id_mask: Mask of all the xin-ids to be halted/unhalted
- * halt:	boolen to indicate halt/unhalt
- */
-int sde_vbif_halt_xin_mask(struct sde_kms *sde_kms, u32 xin_id_mask, bool halt);
-
-#ifdef CONFIG_DEBUG_FS
-int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root);
-void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms);
-#else
-static inline int sde_debugfs_vbif_init(struct sde_kms *sde_kms,
-		struct dentry *debugfs_root)
-{
-	return 0;
-}
-static inline void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
-{
-}
-#endif
-#endif /* __SDE_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
deleted file mode 100644
index 5c955bc..0000000
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ /dev/null
@@ -1,830 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <uapi/drm/sde_drm.h>
-
-#include "msm_kms.h"
-#include "sde_kms.h"
-#include "sde_wb.h"
-#include "sde_formats.h"
-
-/* maximum display mode resolution if not available from catalog */
-#define SDE_WB_MODE_MAX_WIDTH	4096
-#define SDE_WB_MODE_MAX_HEIGHT	4096
-
-/* Serialization lock for sde_wb_list */
-static DEFINE_MUTEX(sde_wb_list_lock);
-
-/* List of all writeback devices installed */
-static LIST_HEAD(sde_wb_list);
-
-/**
- * sde_wb_is_format_valid - check if given format/modifier is supported
- * @wb_dev:	Pointer to writeback device
- * @pixel_format:	Fourcc pixel format
- * @format_modifier:	Format modifier
- * Returns:		true if valid; false otherwise
- */
-static int sde_wb_is_format_valid(struct sde_wb_device *wb_dev,
-		u32 pixel_format, u64 format_modifier)
-{
-	const struct sde_format_extended *fmts = wb_dev->wb_cfg->format_list;
-	int i;
-
-	if (!fmts)
-		return false;
-
-	for (i = 0; fmts[i].fourcc_format; i++)
-		if ((fmts[i].modifier == format_modifier) &&
-				(fmts[i].fourcc_format == pixel_format))
-			return true;
-
-	return false;
-}
-
-enum drm_connector_status
-sde_wb_connector_detect(struct drm_connector *connector,
-		bool force,
-		void *display)
-{
-	enum drm_connector_status rc = connector_status_unknown;
-
-	SDE_DEBUG("\n");
-
-	if (display)
-		rc = ((struct sde_wb_device *)display)->detect_status;
-
-	return rc;
-}
-
-int sde_wb_connector_get_modes(struct drm_connector *connector, void *display)
-{
-	struct sde_wb_device *wb_dev;
-	int num_modes = 0;
-
-	if (!connector || !display)
-		return 0;
-
-	wb_dev = display;
-
-	SDE_DEBUG("\n");
-
-	mutex_lock(&wb_dev->wb_lock);
-	if (wb_dev->count_modes && wb_dev->modes) {
-		struct drm_display_mode *mode;
-		int i, ret;
-
-		for (i = 0; i < wb_dev->count_modes; i++) {
-			mode = drm_mode_create(connector->dev);
-			if (!mode) {
-				SDE_ERROR("failed to create mode\n");
-				break;
-			}
-			ret = drm_mode_convert_umode(wb_dev->drm_dev, mode,
-					&wb_dev->modes[i]);
-			if (ret) {
-				SDE_ERROR("failed to convert mode %d\n", ret);
-				break;
-			}
-
-			drm_mode_probed_add(connector, mode);
-			num_modes++;
-		}
-	} else {
-		u32 max_width = (wb_dev->wb_cfg && wb_dev->wb_cfg->sblk) ?
-				wb_dev->wb_cfg->sblk->maxlinewidth :
-				SDE_WB_MODE_MAX_WIDTH;
-
-		num_modes = drm_add_modes_noedid(connector, max_width,
-				SDE_WB_MODE_MAX_HEIGHT);
-	}
-	mutex_unlock(&wb_dev->wb_lock);
-	return num_modes;
-}
-
-struct drm_framebuffer *
-sde_wb_connector_state_get_output_fb(struct drm_connector_state *state)
-{
-	if (!state || !state->connector ||
-		(state->connector->connector_type !=
-				DRM_MODE_CONNECTOR_VIRTUAL)) {
-		SDE_ERROR("invalid params\n");
-		return NULL;
-	}
-
-	SDE_DEBUG("\n");
-
-	return sde_connector_get_out_fb(state);
-}
-
-int sde_wb_connector_state_get_output_roi(struct drm_connector_state *state,
-		struct sde_rect *roi)
-{
-	if (!state || !roi || !state->connector ||
-		(state->connector->connector_type !=
-				DRM_MODE_CONNECTOR_VIRTUAL)) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("\n");
-
-	roi->x = sde_connector_get_property(state, CONNECTOR_PROP_DST_X);
-	roi->y = sde_connector_get_property(state, CONNECTOR_PROP_DST_Y);
-	roi->w = sde_connector_get_property(state, CONNECTOR_PROP_DST_W);
-	roi->h = sde_connector_get_property(state, CONNECTOR_PROP_DST_H);
-
-	return 0;
-}
-
-/**
- * sde_wb_connector_set_modes - set writeback modes and connection status
- * @wb_dev:	Pointer to write back device
- * @count_modes:	Count of modes
- * @modes:	Pointer to writeback mode requested
- * @connected:	Connection status requested
- * Returns:	0 if success; error code otherwise
- */
-static
-int sde_wb_connector_set_modes(struct sde_wb_device *wb_dev,
-		u32 count_modes, struct drm_mode_modeinfo __user *modes,
-		bool connected)
-{
-	struct drm_mode_modeinfo *modeinfo = NULL;
-	int ret = 0;
-	int i;
-
-	if (!wb_dev || !wb_dev->connector ||
-			(wb_dev->connector->connector_type !=
-			 DRM_MODE_CONNECTOR_VIRTUAL)) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("\n");
-
-	if (connected) {
-		SDE_DEBUG("connect\n");
-
-		if (count_modes && modes) {
-			modeinfo = kcalloc(count_modes,
-					sizeof(struct drm_mode_modeinfo),
-					GFP_KERNEL);
-			if (!modeinfo) {
-				SDE_ERROR("invalid params\n");
-				ret = -ENOMEM;
-				goto error;
-			}
-
-			if (copy_from_user(modeinfo, modes,
-					count_modes *
-					sizeof(struct drm_mode_modeinfo))) {
-				SDE_ERROR("failed to copy modes\n");
-				kfree(modeinfo);
-				ret = -EFAULT;
-				goto error;
-			}
-
-			for (i = 0; i < count_modes; i++) {
-				struct drm_display_mode dispmode;
-
-				memset(&dispmode, 0, sizeof(dispmode));
-				ret = drm_mode_convert_umode(wb_dev->drm_dev,
-						&dispmode, &modeinfo[i]);
-				if (ret) {
-					SDE_ERROR(
-						"failed to convert mode %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x status:%d rc:%d\n",
-						i,
-						modeinfo[i].name,
-						modeinfo[i].vrefresh,
-						modeinfo[i].clock,
-						modeinfo[i].hdisplay,
-						modeinfo[i].hsync_start,
-						modeinfo[i].hsync_end,
-						modeinfo[i].htotal,
-						modeinfo[i].vdisplay,
-						modeinfo[i].vsync_start,
-						modeinfo[i].vsync_end,
-						modeinfo[i].vtotal,
-						modeinfo[i].type,
-						modeinfo[i].flags,
-						dispmode.status,
-						ret);
-					kfree(modeinfo);
-					goto error;
-				}
-			}
-		}
-
-		if (wb_dev->modes) {
-			wb_dev->count_modes = 0;
-
-			kfree(wb_dev->modes);
-			wb_dev->modes = NULL;
-		}
-
-		wb_dev->count_modes = count_modes;
-		wb_dev->modes = modeinfo;
-		wb_dev->detect_status = connector_status_connected;
-	} else {
-		SDE_DEBUG("disconnect\n");
-
-		if (wb_dev->modes) {
-			wb_dev->count_modes = 0;
-
-			kfree(wb_dev->modes);
-			wb_dev->modes = NULL;
-		}
-
-		wb_dev->detect_status = connector_status_disconnected;
-	}
-
-error:
-	return ret;
-}
-
-int sde_wb_connector_set_property(struct drm_connector *connector,
-		struct drm_connector_state *state,
-		int property_index,
-		uint64_t value,
-		void *display)
-{
-	struct sde_wb_device *wb_dev = display;
-	struct drm_framebuffer *out_fb;
-	int rc = 0;
-
-	SDE_DEBUG("\n");
-
-	if (state && (property_index == CONNECTOR_PROP_OUT_FB)) {
-		const struct sde_format *sde_format;
-
-		out_fb = sde_connector_get_out_fb(state);
-		if (!out_fb)
-			goto done;
-
-		sde_format = sde_get_sde_format_ext(out_fb->format->format,
-				out_fb->modifier);
-		if (!sde_format) {
-			SDE_ERROR("failed to get sde format\n");
-			rc = -EINVAL;
-			goto done;
-		}
-
-		if (!sde_wb_is_format_valid(wb_dev, out_fb->format->format,
-				out_fb->modifier)) {
-			SDE_ERROR("unsupported writeback format 0x%x/0x%llx\n",
-					out_fb->format->format,
-					out_fb->modifier);
-			rc = -EINVAL;
-			goto done;
-		}
-	}
-
-done:
-	return rc;
-}
-
-int sde_wb_get_info(struct drm_connector *connector,
-		struct msm_display_info *info, void *display)
-{
-	struct sde_wb_device *wb_dev = display;
-
-	if (!info || !wb_dev) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	memset(info, 0, sizeof(struct msm_display_info));
-	info->intf_type = DRM_MODE_CONNECTOR_VIRTUAL;
-	info->num_of_h_tiles = 1;
-	info->h_tile_instance[0] = sde_wb_get_index(display);
-	info->is_connected = true;
-	info->capabilities = MSM_DISPLAY_CAP_HOT_PLUG | MSM_DISPLAY_CAP_EDID;
-	info->max_width = (wb_dev->wb_cfg && wb_dev->wb_cfg->sblk) ?
-			wb_dev->wb_cfg->sblk->maxlinewidth :
-			SDE_WB_MODE_MAX_WIDTH;
-	info->max_height = SDE_WB_MODE_MAX_HEIGHT;
-	return 0;
-}
-
-int sde_wb_get_mode_info(struct drm_connector *connector,
-		const struct drm_display_mode *drm_mode,
-		struct msm_mode_info *mode_info,
-		u32 max_mixer_width, void *display)
-{
-	const u32 dual_lm = 2;
-	const u32 single_lm = 1;
-	const u32 single_intf = 1;
-	const u32 no_enc = 0;
-	struct msm_display_topology *topology;
-	struct sde_wb_device *wb_dev = display;
-	u16 hdisplay;
-	int i;
-
-	if (!drm_mode || !mode_info || !max_mixer_width || !display) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	hdisplay = drm_mode->hdisplay;
-
-	/* find maximum display width to support */
-	for (i = 0; i < wb_dev->count_modes; i++)
-		hdisplay = max(hdisplay, wb_dev->modes[i].hdisplay);
-
-	topology = &mode_info->topology;
-	topology->num_lm = (max_mixer_width <= hdisplay) ? dual_lm : single_lm;
-	topology->num_enc = no_enc;
-	topology->num_intf = single_intf;
-
-	mode_info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
-	mode_info->wide_bus_en = false;
-	mode_info->comp_info.comp_ratio = MSM_DISPLAY_COMPRESSION_RATIO_NONE;
-
-	return 0;
-}
-
-int sde_wb_connector_set_info_blob(struct drm_connector *connector,
-		void *info, void *display, struct msm_mode_info *mode_info)
-{
-	struct sde_wb_device *wb_dev = display;
-	const struct sde_format_extended *format_list;
-
-	if (!connector || !info || !display || !wb_dev->wb_cfg) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	format_list = wb_dev->wb_cfg->format_list;
-
-	/*
-	 * Populate info buffer
-	 */
-	if (format_list) {
-		sde_kms_info_start(info, "pixel_formats");
-		while (format_list->fourcc_format) {
-			sde_kms_info_append_format(info,
-					format_list->fourcc_format,
-					format_list->modifier);
-			++format_list;
-		}
-		sde_kms_info_stop(info);
-	}
-
-	sde_kms_info_add_keyint(info,
-			"wb_intf_index",
-			wb_dev->wb_idx - WB_0);
-
-	sde_kms_info_add_keyint(info,
-			"maxlinewidth",
-			wb_dev->wb_cfg->sblk->maxlinewidth);
-
-	sde_kms_info_start(info, "features");
-	if (wb_dev->wb_cfg && (wb_dev->wb_cfg->features & BIT(SDE_WB_UBWC)))
-		sde_kms_info_append(info, "wb_ubwc");
-	sde_kms_info_stop(info);
-
-	return 0;
-}
-
-int sde_wb_connector_post_init(struct drm_connector *connector, void *display)
-{
-	struct sde_connector *c_conn;
-	struct sde_wb_device *wb_dev = display;
-	static const struct drm_prop_enum_list e_fb_translation_mode[] = {
-		{SDE_DRM_FB_NON_SEC, "non_sec"},
-		{SDE_DRM_FB_SEC, "sec"},
-	};
-
-	if (!connector || !display || !wb_dev->wb_cfg) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	c_conn = to_sde_connector(connector);
-	wb_dev->connector = connector;
-	wb_dev->detect_status = connector_status_connected;
-
-	/*
-	 * Add extra connector properties
-	 */
-	msm_property_install_range(&c_conn->property_info, "FB_ID",
-			0x0, 0, ~0, 0, CONNECTOR_PROP_OUT_FB);
-	msm_property_install_range(&c_conn->property_info, "DST_X",
-			0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_X);
-	msm_property_install_range(&c_conn->property_info, "DST_Y",
-			0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_Y);
-	msm_property_install_range(&c_conn->property_info, "DST_W",
-			0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_W);
-	msm_property_install_range(&c_conn->property_info, "DST_H",
-			0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_H);
-	msm_property_install_enum(&c_conn->property_info,
-			"fb_translation_mode",
-			0x0,
-			0, e_fb_translation_mode,
-			ARRAY_SIZE(e_fb_translation_mode),
-			CONNECTOR_PROP_FB_TRANSLATION_MODE);
-
-	return 0;
-}
-
-struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev)
-{
-	struct drm_framebuffer *fb;
-
-	if (!wb_dev || !wb_dev->connector) {
-		SDE_ERROR("invalid params\n");
-		return NULL;
-	}
-
-	SDE_DEBUG("\n");
-
-	mutex_lock(&wb_dev->wb_lock);
-	fb = sde_wb_connector_state_get_output_fb(wb_dev->connector->state);
-	mutex_unlock(&wb_dev->wb_lock);
-
-	return fb;
-}
-
-int sde_wb_get_output_roi(struct sde_wb_device *wb_dev, struct sde_rect *roi)
-{
-	int rc;
-
-	if (!wb_dev || !wb_dev->connector || !roi) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("\n");
-
-	mutex_lock(&wb_dev->wb_lock);
-	rc = sde_wb_connector_state_get_output_roi(
-			wb_dev->connector->state, roi);
-	mutex_unlock(&wb_dev->wb_lock);
-
-	return rc;
-}
-
-u32 sde_wb_get_num_of_displays(void)
-{
-	u32 count = 0;
-	struct sde_wb_device *wb_dev;
-
-	SDE_DEBUG("\n");
-
-	mutex_lock(&sde_wb_list_lock);
-	list_for_each_entry(wb_dev, &sde_wb_list, wb_list) {
-		count++;
-	}
-	mutex_unlock(&sde_wb_list_lock);
-
-	return count;
-}
-
-int wb_display_get_displays(void **display_array, u32 max_display_count)
-{
-	struct sde_wb_device *curr;
-	int i = 0;
-
-	SDE_DEBUG("\n");
-
-	if (!display_array || !max_display_count) {
-		if (!display_array)
-			SDE_ERROR("invalid param\n");
-		return 0;
-	}
-
-	mutex_lock(&sde_wb_list_lock);
-	list_for_each_entry(curr, &sde_wb_list, wb_list) {
-		if (i >= max_display_count)
-			break;
-		display_array[i++] = curr;
-	}
-	mutex_unlock(&sde_wb_list_lock);
-
-	return i;
-}
-
-int sde_wb_config(struct drm_device *drm_dev, void *data,
-				struct drm_file *file_priv)
-{
-	struct sde_drm_wb_cfg *config = data;
-	struct msm_drm_private *priv;
-	struct sde_wb_device *wb_dev = NULL;
-	struct sde_wb_device *curr;
-	struct drm_connector *connector;
-	uint32_t flags;
-	uint32_t connector_id;
-	uint32_t count_modes;
-	uint64_t modes;
-	int rc;
-
-	if (!drm_dev || !data) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("\n");
-
-	flags = config->flags;
-	connector_id = config->connector_id;
-	count_modes = config->count_modes;
-	modes = config->modes;
-
-	priv = drm_dev->dev_private;
-
-	connector = drm_connector_lookup(drm_dev, file_priv, connector_id);
-	if (!connector) {
-		SDE_ERROR("failed to find connector\n");
-		rc = -ENOENT;
-		goto fail;
-	}
-
-	mutex_lock(&sde_wb_list_lock);
-	list_for_each_entry(curr, &sde_wb_list, wb_list) {
-		if (curr->connector == connector) {
-			wb_dev = curr;
-			break;
-		}
-	}
-	mutex_unlock(&sde_wb_list_lock);
-
-	if (!wb_dev) {
-		SDE_ERROR("failed to find wb device\n");
-		rc = -ENOENT;
-		goto fail;
-	}
-
-	mutex_lock(&wb_dev->wb_lock);
-
-	rc = sde_wb_connector_set_modes(wb_dev, count_modes,
-		(struct drm_mode_modeinfo __user *) (uintptr_t) modes,
-		(flags & SDE_DRM_WB_CFG_FLAGS_CONNECTED) ? true : false);
-
-	mutex_unlock(&wb_dev->wb_lock);
-	drm_helper_hpd_irq_event(drm_dev);
-fail:
-	return rc;
-}
-
-/**
- * _sde_wb_dev_init - perform device initialization
- * @wb_dev:	Pointer to writeback device
- */
-static int _sde_wb_dev_init(struct sde_wb_device *wb_dev)
-{
-	int rc = 0;
-
-	if (!wb_dev) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("\n");
-
-	return rc;
-}
-
-/**
- * _sde_wb_dev_deinit - perform device de-initialization
- * @wb_dev:	Pointer to writeback device
- */
-static int _sde_wb_dev_deinit(struct sde_wb_device *wb_dev)
-{
-	int rc = 0;
-
-	if (!wb_dev) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("\n");
-
-	return rc;
-}
-
-/**
- * sde_wb_bind - bind writeback device with controlling device
- * @dev:        Pointer to base of platform device
- * @master:     Pointer to container of drm device
- * @data:       Pointer to private data
- * Returns:     Zero on success
- */
-static int sde_wb_bind(struct device *dev, struct device *master, void *data)
-{
-	struct sde_wb_device *wb_dev;
-
-	if (!dev || !master) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	wb_dev = platform_get_drvdata(to_platform_device(dev));
-	if (!wb_dev) {
-		SDE_ERROR("invalid wb device\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("\n");
-
-	mutex_lock(&wb_dev->wb_lock);
-	wb_dev->drm_dev = dev_get_drvdata(master);
-	mutex_unlock(&wb_dev->wb_lock);
-
-	return 0;
-}
-
-/**
- * sde_wb_unbind - unbind writeback from controlling device
- * @dev:        Pointer to base of platform device
- * @master:     Pointer to container of drm device
- * @data:       Pointer to private data
- */
-static void sde_wb_unbind(struct device *dev,
-		struct device *master, void *data)
-{
-	struct sde_wb_device *wb_dev;
-
-	if (!dev) {
-		SDE_ERROR("invalid params\n");
-		return;
-	}
-
-	wb_dev = platform_get_drvdata(to_platform_device(dev));
-	if (!wb_dev) {
-		SDE_ERROR("invalid wb device\n");
-		return;
-	}
-
-	SDE_DEBUG("\n");
-
-	mutex_lock(&wb_dev->wb_lock);
-	wb_dev->drm_dev = NULL;
-	mutex_unlock(&wb_dev->wb_lock);
-}
-
-static const struct component_ops sde_wb_comp_ops = {
-	.bind = sde_wb_bind,
-	.unbind = sde_wb_unbind,
-};
-
-/**
- * sde_wb_drm_init - perform DRM initialization
- * @wb_dev:	Pointer to writeback device
- * @encoder:	Pointer to associated encoder
- */
-int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder)
-{
-	int rc = 0;
-
-	if (!wb_dev || !wb_dev->drm_dev || !encoder) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("\n");
-
-	mutex_lock(&wb_dev->wb_lock);
-
-	if (wb_dev->drm_dev->dev_private) {
-		struct msm_drm_private *priv = wb_dev->drm_dev->dev_private;
-		struct sde_kms *sde_kms = to_sde_kms(priv->kms);
-
-		if (wb_dev->index < sde_kms->catalog->wb_count) {
-			wb_dev->wb_idx = sde_kms->catalog->wb[wb_dev->index].id;
-			wb_dev->wb_cfg = &sde_kms->catalog->wb[wb_dev->index];
-		}
-	}
-
-	wb_dev->drm_dev = encoder->dev;
-	wb_dev->encoder = encoder;
-	mutex_unlock(&wb_dev->wb_lock);
-	return rc;
-}
-
-int sde_wb_drm_deinit(struct sde_wb_device *wb_dev)
-{
-	int rc = 0;
-
-	if (!wb_dev) {
-		SDE_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	SDE_DEBUG("\n");
-
-	return rc;
-}
-
-/**
- * sde_wb_probe - load writeback module
- * @pdev:	Pointer to platform device
- */
-static int sde_wb_probe(struct platform_device *pdev)
-{
-	struct sde_wb_device *wb_dev;
-	int ret;
-
-	wb_dev = devm_kzalloc(&pdev->dev, sizeof(*wb_dev), GFP_KERNEL);
-	if (!wb_dev)
-		return -ENOMEM;
-
-	SDE_DEBUG("\n");
-
-	ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
-			&wb_dev->index);
-	if (ret) {
-		SDE_DEBUG("cell index not set, default to 0\n");
-		wb_dev->index = 0;
-	}
-
-	wb_dev->name = of_get_property(pdev->dev.of_node, "label", NULL);
-	if (!wb_dev->name) {
-		SDE_DEBUG("label not set, default to unknown\n");
-		wb_dev->name = "unknown";
-	}
-
-	wb_dev->wb_idx = SDE_NONE;
-
-	mutex_init(&wb_dev->wb_lock);
-	platform_set_drvdata(pdev, wb_dev);
-
-	mutex_lock(&sde_wb_list_lock);
-	list_add(&wb_dev->wb_list, &sde_wb_list);
-	mutex_unlock(&sde_wb_list_lock);
-
-	if (!_sde_wb_dev_init(wb_dev)) {
-		ret = component_add(&pdev->dev, &sde_wb_comp_ops);
-		if (ret)
-			pr_err("component add failed\n");
-	}
-
-	return ret;
-}
-
-/**
- * sde_wb_remove - unload writeback module
- * @pdev:	Pointer to platform device
- */
-static int sde_wb_remove(struct platform_device *pdev)
-{
-	struct sde_wb_device *wb_dev;
-	struct sde_wb_device *curr, *next;
-
-	wb_dev = platform_get_drvdata(pdev);
-	if (!wb_dev)
-		return 0;
-
-	SDE_DEBUG("\n");
-
-	(void)_sde_wb_dev_deinit(wb_dev);
-
-	mutex_lock(&sde_wb_list_lock);
-	list_for_each_entry_safe(curr, next, &sde_wb_list, wb_list) {
-		if (curr == wb_dev) {
-			list_del(&wb_dev->wb_list);
-			break;
-		}
-	}
-	mutex_unlock(&sde_wb_list_lock);
-
-	kfree(wb_dev->modes);
-	mutex_destroy(&wb_dev->wb_lock);
-
-	platform_set_drvdata(pdev, NULL);
-	devm_kfree(&pdev->dev, wb_dev);
-
-	return 0;
-}
-
-static const struct of_device_id dt_match[] = {
-	{ .compatible = "qcom,wb-display"},
-	{}
-};
-
-static struct platform_driver sde_wb_driver = {
-	.probe = sde_wb_probe,
-	.remove = sde_wb_remove,
-	.driver = {
-		.name = "sde_wb",
-		.of_match_table = dt_match,
-		.suppress_bind_attrs = true,
-	},
-};
-
-static int __init sde_wb_register(void)
-{
-	return platform_driver_register(&sde_wb_driver);
-}
-
-static void __exit sde_wb_unregister(void)
-{
-	platform_driver_unregister(&sde_wb_driver);
-}
-
-module_init(sde_wb_register);
-module_exit(sde_wb_unregister);
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.h b/drivers/gpu/drm/msm/sde/sde_wb.h
deleted file mode 100644
index 4d1248a..0000000
--- a/drivers/gpu/drm/msm/sde/sde_wb.h
+++ /dev/null
@@ -1,344 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_WB_H__
-#define __SDE_WB_H__
-
-#include <linux/platform_device.h>
-
-#include "msm_kms.h"
-#include "sde_kms.h"
-#include "sde_connector.h"
-
-/**
- * struct sde_wb_device - Writeback device context
- * @drm_dev:		Pointer to controlling DRM device
- * @index:		Index of hardware instance from device tree
- * @wb_idx:		Writeback identifier of enum sde_wb
- * @wb_cfg:		Writeback configuration catalog
- * @name:		Name of writeback device from device tree
- * @display_type:	Display type from device tree
- * @wb_list		List of all writeback devices
- * @wb_lock		Serialization lock for writeback context structure
- * @connector:		Connector associated with writeback device
- * @encoder:		Encoder associated with writeback device
- * @max_mixer_width:    Max width supported by SDE LM HW block
- * @count_modes:	Length of writeback connector modes array
- * @modes:		Writeback connector modes array
- */
-struct sde_wb_device {
-	struct drm_device *drm_dev;
-
-	u32 index;
-	u32 wb_idx;
-	struct sde_wb_cfg *wb_cfg;
-	const char *name;
-
-	struct list_head wb_list;
-	struct mutex wb_lock;
-
-	struct drm_connector *connector;
-	struct drm_encoder *encoder;
-
-	enum drm_connector_status detect_status;
-	u32 max_mixer_width;
-
-	u32 count_modes;
-	struct drm_mode_modeinfo *modes;
-};
-
-/**
- * sde_wb_get_index - get device index of the given writeback device
- * @wb_dev:	Pointer to writeback device
- * Returns:	Index of hardware instance
- */
-static inline
-int sde_wb_get_index(struct sde_wb_device *wb_dev)
-{
-	return wb_dev ? wb_dev->index : -1;
-}
-
-#ifdef CONFIG_DRM_SDE_WB
-/**
- * sde_wb_get_output_fb - get framebuffer in current atomic state
- * @wb_dev:	Pointer to writeback device
- * Returns:	Pointer to framebuffer
- */
-struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev);
-
-/**
- * sde_wb_get_output_roi - get region-of-interest in current atomic state
- * @wb_dev:	Pointer to writeback device
- * @roi:	Pointer to region of interest
- * Returns:	0 if success; error code otherwise
- */
-int sde_wb_get_output_roi(struct sde_wb_device *wb_dev, struct sde_rect *roi);
-
-/**
- * sde_wb_get_num_of_displays - get total number of writeback devices
- * Returns:	Number of writeback devices
- */
-u32 sde_wb_get_num_of_displays(void);
-
-/**
- * wb_display_get_displays - returns pointers for supported display devices
- * @display_array: Pointer to display array to be filled
- * @max_display_count: Size of display_array
- * @Returns: Number of display entries filled
- */
-int wb_display_get_displays(void **display_array, u32 max_display_count);
-
-void sde_wb_set_active_state(struct sde_wb_device *wb_dev, bool is_active);
-bool sde_wb_is_active(struct sde_wb_device *wb_dev);
-
-/**
- * sde_wb_drm_init - perform DRM initialization
- * @wb_dev:	Pointer to writeback device
- * @encoder:	Pointer to associated encoder
- * Returns:	0 if success; error code otherwise
- */
-int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder);
-
-/**
- * sde_wb_drm_deinit - perform DRM de-initialization
- * @wb_dev:	Pointer to writeback device
- * Returns:	0 if success; error code otherwise
- */
-int sde_wb_drm_deinit(struct sde_wb_device *wb_dev);
-
-/**
- * sde_wb_config - setup connection status and available drm modes of the
- *			given writeback connector
- * @drm_dev:	Pointer to DRM device
- * @data:	Pointer to writeback configuration
- * @file_priv:	Pointer file private data
- * Returns:	0 if success; error code otherwise
- *
- * This function will initiate hot-plug detection event.
- */
-int sde_wb_config(struct drm_device *drm_dev, void *data,
-				struct drm_file *file_priv);
-
-/**
- * sde_wb_connector_post_init - perform writeback specific initialization
- * @connector: Pointer to drm connector structure
- * @display: Pointer to private display structure
- * Returns: Zero on success
- */
-int sde_wb_connector_post_init(struct drm_connector *connector, void *display);
-
-/**
- * sde_wb_connector_set_info_blob - perform writeback info blob initialization
- * @connector: Pointer to drm connector structure
- * @info: Pointer to connector info
- * @display: Pointer to private display structure
- * @mode_info: Pointer to the mode info structure
- * Returns: Zero on success
- */
-int sde_wb_connector_set_info_blob(struct drm_connector *connector,
-		void *info,
-		void *display,
-		struct msm_mode_info *mode_info);
-
-/**
- * sde_wb_connector_detect - perform writeback connection status detection
- * @connector:	Pointer to connector
- * @force:	Indicate force detection
- * @display:	Pointer to writeback device
- * Returns:	connector status
- */
-enum drm_connector_status
-sde_wb_connector_detect(struct drm_connector *connector,
-		bool force,
-		void *display);
-
-/**
- * sde_wb_connector_get_modes - get display modes of connector
- * @connector:	Pointer to connector
- * @display:	Pointer to writeback device
- * Returns:	Number of modes
- *
- * If display modes are not specified in writeback configuration IOCTL, this
- * function will install default EDID modes up to maximum resolution support.
- */
-int sde_wb_connector_get_modes(struct drm_connector *connector, void *display);
-
-/**
- * sde_wb_connector_set_property - set atomic connector property
- * @connector: Pointer to drm connector structure
- * @state: Pointer to drm connector state structure
- * @property_index: DRM property index
- * @value: Incoming property value
- * @display: Pointer to private display structure
- * Returns: Zero on success
- */
-int sde_wb_connector_set_property(struct drm_connector *connector,
-		struct drm_connector_state *state,
-		int property_index,
-		uint64_t value,
-		void *display);
-
-/**
- * sde_wb_get_info - retrieve writeback 'display' information
- * @connector: Pointer to drm connector structure
- * @info: Pointer to display info structure
- * @display: Pointer to private display structure
- * Returns: Zero on success
- */
-int sde_wb_get_info(struct drm_connector *connector,
-		struct msm_display_info *info, void *display);
-
-/**
- * sde_wb_get_mode_info - retrieve information of the mode selected
- * @connector: Pointer to drm connector structure
- * @drm_mode: Display mode set for the display
- * @mode_info: Out parameter. information of the mode.
- * @max_mixer_width: max width supported by HW layer mixer
- * @display: Pointer to private display structure
- * Returns: zero on success
- */
-int sde_wb_get_mode_info(struct drm_connector *connector,
-		const struct drm_display_mode *drm_mode,
-		struct msm_mode_info *mode_info, u32 max_mixer_width,
-		void *display);
-
-/**
- * sde_wb_connector_get_wb - retrieve writeback device of the given connector
- * @connector: Pointer to drm connector
- * Returns: Pointer to writeback device on success; NULL otherwise
- */
-static inline
-struct sde_wb_device *sde_wb_connector_get_wb(struct drm_connector *connector)
-{
-	if (!connector ||
-		(connector->connector_type != DRM_MODE_CONNECTOR_VIRTUAL)) {
-		SDE_ERROR("invalid params\n");
-		return NULL;
-	}
-
-	return sde_connector_get_display(connector);
-}
-
-/**
- * sde_wb_connector_state_get_output_fb - get framebuffer of given state
- * @state:	Pointer to connector state
- * Returns:	Pointer to framebuffer
- */
-struct drm_framebuffer *
-sde_wb_connector_state_get_output_fb(struct drm_connector_state *state);
-
-/**
- * sde_wb_connector_state_get_output_roi - get roi from given atomic state
- * @state:	Pointer to atomic state
- * @roi:	Pointer to region of interest
- * Returns:	0 if success; error code otherwise
- */
-int sde_wb_connector_state_get_output_roi(struct drm_connector_state *state,
-		struct sde_rect *roi);
-
-#else
-static inline
-struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev)
-{
-	return NULL;
-}
-static inline
-int sde_wb_get_output_roi(struct sde_wb_device *wb_dev, struct sde_rect *roi)
-{
-	return 0;
-}
-static inline
-u32 sde_wb_get_num_of_displays(void)
-{
-	return 0;
-}
-static inline
-int wb_display_get_displays(void **display_array, u32 max_display_count)
-{
-	return 0;
-}
-static inline
-void sde_wb_set_active_state(struct sde_wb_device *wb_dev, bool is_active)
-{
-}
-static inline
-bool sde_wb_is_active(struct sde_wb_device *wb_dev)
-{
-	return false;
-}
-static inline
-int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder)
-{
-	return 0;
-}
-static inline
-int sde_wb_drm_deinit(struct sde_wb_device *wb_dev)
-{
-	return 0;
-}
-static inline
-int sde_wb_config(struct drm_device *drm_dev, void *data,
-				struct drm_file *file_priv)
-{
-	return 0;
-}
-static inline
-int sde_wb_connector_post_init(struct drm_connector *connector,
-		void *info,
-		void *display,
-		struct msm_mode_info *mode_info)
-{
-	return 0;
-}
-static inline
-enum drm_connector_status
-sde_wb_connector_detect(struct drm_connector *connector,
-		bool force,
-		void *display)
-{
-	return connector_status_disconnected;
-}
-static inline
-int sde_wb_connector_get_modes(struct drm_connector *connector, void *display)
-{
-	return -EINVAL;
-}
-static inline
-int sde_wb_connector_set_property(struct drm_connector *connector,
-		struct drm_connector_state *state,
-		int property_index,
-		uint64_t value,
-		void *display)
-{
-	return 0;
-}
-static inline
-int sde_wb_get_info(struct msm_display_info *info, void *display)
-{
-	return 0;
-}
-static inline
-struct sde_wb_device *sde_wb_connector_get_wb(struct drm_connector *connector)
-{
-	return NULL;
-}
-
-static inline
-struct drm_framebuffer *
-sde_wb_connector_state_get_output_fb(struct drm_connector_state *state)
-{
-	return NULL;
-}
-
-static inline
-int sde_wb_connector_state_get_output_roi(struct drm_connector_state *state,
-		struct sde_rect *roi)
-{
-	return 0;
-}
-
-#endif
-#endif /* __SDE_WB_H__ */
-
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
deleted file mode 100644
index ec019fb..0000000
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ /dev/null
@@ -1,4660 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2009-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/ktime.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/dma-buf.h>
-#include <linux/slab.h>
-#include <linux/list_sort.h>
-
-#include "sde_dbg.h"
-#include "sde/sde_hw_catalog.h"
-
-#define SDE_DBG_BASE_MAX		10
-
-#define DEFAULT_PANIC		1
-#define DEFAULT_REGDUMP		SDE_DBG_DUMP_IN_MEM
-#define DEFAULT_DBGBUS_SDE	SDE_DBG_DUMP_IN_MEM
-#define DEFAULT_DBGBUS_VBIFRT	SDE_DBG_DUMP_IN_MEM
-#define DEFAULT_BASE_REG_CNT	0x100
-#define GROUP_BYTES		4
-#define ROW_BYTES		16
-#define RANGE_NAME_LEN		40
-#define REG_BASE_NAME_LEN	80
-
-#define DBGBUS_FLAGS_DSPP	BIT(0)
-#define DBGBUS_DSPP_STATUS	0x34C
-
-#define DBGBUS_NAME_SDE		"sde"
-#define DBGBUS_NAME_VBIF_RT	"vbif_rt"
-
-/* offsets from sde top address for the debug buses */
-#define DBGBUS_SSPP0	0x188
-#define DBGBUS_AXI_INTF	0x194
-#define DBGBUS_SSPP1	0x298
-#define DBGBUS_DSPP	0x348
-#define DBGBUS_PERIPH	0x418
-
-#define TEST_MASK(id, tp)	((id << 4) | (tp << 1) | BIT(0))
-
-/* following offsets are with respect to MDP VBIF base for DBG BUS access */
-#define MMSS_VBIF_CLKON			0x4
-#define MMSS_VBIF_TEST_BUS_OUT_CTRL	0x210
-#define MMSS_VBIF_TEST_BUS_OUT		0x230
-
-/* Vbif error info */
-#define MMSS_VBIF_PND_ERR		0x190
-#define MMSS_VBIF_SRC_ERR		0x194
-#define MMSS_VBIF_XIN_HALT_CTRL1	0x204
-#define MMSS_VBIF_ERR_INFO		0X1a0
-#define MMSS_VBIF_ERR_INFO_1		0x1a4
-#define MMSS_VBIF_CLIENT_NUM		14
-
-/* print debug ranges in groups of 4 u32s */
-#define REG_DUMP_ALIGN		16
-
-#define RSC_DEBUG_MUX_SEL_SDM845 9
-
-#define DBG_CTRL_STOP_FTRACE	BIT(0)
-#define DBG_CTRL_PANIC_UNDERRUN	BIT(1)
-#define DBG_CTRL_RESET_HW_PANIC	BIT(2)
-#define DBG_CTRL_MAX			BIT(3)
-
-#define DUMP_BUF_SIZE			(4096 * 512)
-#define DUMP_CLMN_COUNT			4
-#define DUMP_LINE_SIZE			256
-#define DUMP_MAX_LINES_PER_BLK		512
-
-/**
- * struct sde_dbg_reg_offset - tracking for start and end of region
- * @start: start offset
- * @start: end offset
- */
-struct sde_dbg_reg_offset {
-	u32 start;
-	u32 end;
-};
-
-/**
- * struct sde_dbg_reg_range - register dumping named sub-range
- * @head: head of this node
- * @reg_dump: address for the mem dump
- * @range_name: name of this range
- * @offset: offsets for range to dump
- * @xin_id: client xin id
- */
-struct sde_dbg_reg_range {
-	struct list_head head;
-	u32 *reg_dump;
-	char range_name[RANGE_NAME_LEN];
-	struct sde_dbg_reg_offset offset;
-	uint32_t xin_id;
-};
-
-/**
- * struct sde_dbg_reg_base - register region base.
- *	may sub-ranges: sub-ranges are used for dumping
- *	or may not have sub-ranges: dumping is base -> max_offset
- * @reg_base_head: head of this node
- * @sub_range_list: head to the list with dump ranges
- * @name: register base name
- * @base: base pointer
- * @off: cached offset of region for manual register dumping
- * @cnt: cached range of region for manual register dumping
- * @max_offset: length of region
- * @buf: buffer used for manual register dumping
- * @buf_len:  buffer length used for manual register dumping
- * @reg_dump: address for the mem dump if no ranges used
- * @cb: callback for external dump function, null if not defined
- * @cb_ptr: private pointer to callback function
- */
-struct sde_dbg_reg_base {
-	struct list_head reg_base_head;
-	struct list_head sub_range_list;
-	char name[REG_BASE_NAME_LEN];
-	void __iomem *base;
-	size_t off;
-	size_t cnt;
-	size_t max_offset;
-	char *buf;
-	size_t buf_len;
-	u32 *reg_dump;
-	void (*cb)(void *ptr);
-	void *cb_ptr;
-};
-
-struct sde_debug_bus_entry {
-	u32 wr_addr;
-	u32 block_id;
-	u32 test_id;
-	void (*analyzer)(void __iomem *mem_base,
-				struct sde_debug_bus_entry *entry, u32 val);
-};
-
-struct vbif_debug_bus_entry {
-	u32 disable_bus_addr;
-	u32 block_bus_addr;
-	u32 bit_offset;
-	u32 block_cnt;
-	u32 test_pnt_start;
-	u32 test_pnt_cnt;
-};
-
-struct sde_dbg_debug_bus_common {
-	char *name;
-	u32 enable_mask;
-	bool include_in_deferred_work;
-	u32 flags;
-	u32 entries_size;
-	u32 *dumped_content;
-};
-
-struct sde_dbg_sde_debug_bus {
-	struct sde_dbg_debug_bus_common cmn;
-	struct sde_debug_bus_entry *entries;
-	u32 top_blk_off;
-};
-
-struct sde_dbg_vbif_debug_bus {
-	struct sde_dbg_debug_bus_common cmn;
-	struct vbif_debug_bus_entry *entries;
-};
-
-struct sde_dbg_dsi_debug_bus {
-	u32 *entries;
-	u32 size;
-};
-
-/**
- * struct sde_dbg_regbuf - wraps buffer and tracking params for register dumps
- * @buf: pointer to allocated memory for storing register dumps in hw recovery
- * @buf_size: size of the memory allocated
- * @len: size of the dump data valid in the buffer
- * @rpos: cursor points to the buffer position read by client
- * @dump_done: to indicate if dumping to user memory is complete
- * @cur_blk: points to the current sde_dbg_reg_base block
- */
-struct sde_dbg_regbuf {
-	char *buf;
-	int buf_size;
-	int len;
-	int rpos;
-	int dump_done;
-	struct sde_dbg_reg_base *cur_blk;
-};
-
-/**
- * struct sde_dbg_base - global sde debug base structure
- * @evtlog: event log instance
- * @reg_base_list: list of register dumping regions
- * @dev: device pointer
- * @mutex: mutex to serialize access to serialze dumps, debugfs access
- * @power_ctrl: callback structure for enabling power for reading hw registers
- * @req_dump_blks: list of blocks requested for dumping
- * @panic_on_err: whether to kernel panic after triggering dump via debugfs
- * @dump_work: work struct for deferring register dump work to separate thread
- * @work_panic: panic after dump if internal user passed "panic" special region
- * @enable_reg_dump: whether to dump registers into memory, kernel log, or both
- * @dbgbus_sde: debug bus structure for the sde
- * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
- * @dump_all: dump all entries in register dump
- * @dsi_dbg_bus: dump dsi debug bus register
- * @regbuf: buffer data to track the register dumping in hw recovery
- * @cur_evt_index: index used for tracking event logs dump in hw recovery
- * @dbgbus_dump_idx: index used for tracking dbg-bus dump in hw recovery
- * @vbif_dbgbus_dump_idx: index for tracking vbif dumps in hw recovery
- */
-static struct sde_dbg_base {
-	struct sde_dbg_evtlog *evtlog;
-	struct list_head reg_base_list;
-	struct device *dev;
-	struct mutex mutex;
-	struct sde_dbg_power_ctrl power_ctrl;
-
-	struct sde_dbg_reg_base *req_dump_blks[SDE_DBG_BASE_MAX];
-
-	u32 panic_on_err;
-	struct work_struct dump_work;
-	bool work_panic;
-	u32 enable_reg_dump;
-
-	struct sde_dbg_sde_debug_bus dbgbus_sde;
-	struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt;
-	struct sde_dbg_dsi_debug_bus dbgbus_dsi;
-	bool dump_all;
-	bool dsi_dbg_bus;
-	u32 debugfs_ctrl;
-
-	struct sde_dbg_regbuf regbuf;
-	u32 cur_evt_index;
-	u32 dbgbus_dump_idx;
-	u32 vbif_dbgbus_dump_idx;
-} sde_dbg_base;
-
-/* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */
-struct sde_dbg_evtlog *sde_dbg_base_evtlog;
-
-static void _sde_debug_bus_xbar_dump(void __iomem *mem_base,
-		struct sde_debug_bus_entry *entry, u32 val)
-{
-	dev_err(sde_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
-			entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _sde_debug_bus_lm_dump(void __iomem *mem_base,
-		struct sde_debug_bus_entry *entry, u32 val)
-{
-	if (!(val & 0xFFF000))
-		return;
-
-	dev_err(sde_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
-			entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _sde_debug_bus_ppb0_dump(void __iomem *mem_base,
-		struct sde_debug_bus_entry *entry, u32 val)
-{
-	if (!(val & BIT(15)))
-		return;
-
-	dev_err(sde_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
-			entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _sde_debug_bus_ppb1_dump(void __iomem *mem_base,
-		struct sde_debug_bus_entry *entry, u32 val)
-{
-	if (!(val & BIT(15)))
-		return;
-
-	dev_err(sde_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
-			entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static struct sde_debug_bus_entry dbg_bus_sde_sm8150[] = {
-
-	/* Unpack 0 sspp 0*/
-	{ DBGBUS_SSPP0, 35, 2 },
-	{ DBGBUS_SSPP0, 50, 2 },
-	{ DBGBUS_SSPP0, 60, 2 },
-	{ DBGBUS_SSPP0, 70, 2 },
-
-	/* Unpack 1 sspp 0*/
-	{ DBGBUS_SSPP0, 36, 2 },
-	{ DBGBUS_SSPP0, 51, 2 },
-	{ DBGBUS_SSPP0, 61, 2 },
-	{ DBGBUS_SSPP0, 71, 2 },
-
-	/* Unpack 2 sspp 0*/
-	{ DBGBUS_SSPP0, 37, 2 },
-	{ DBGBUS_SSPP0, 52, 2 },
-	{ DBGBUS_SSPP0, 62, 2 },
-	{ DBGBUS_SSPP0, 72, 2 },
-
-
-	/* Unpack 3 sspp 0*/
-	{ DBGBUS_SSPP0, 38, 2 },
-	{ DBGBUS_SSPP0, 53, 2 },
-	{ DBGBUS_SSPP0, 63, 2 },
-	{ DBGBUS_SSPP0, 73, 2 },
-
-	/* Unpack 0 sspp 1*/
-	{ DBGBUS_SSPP1, 35, 2 },
-	{ DBGBUS_SSPP1, 50, 2 },
-	{ DBGBUS_SSPP1, 60, 2 },
-	{ DBGBUS_SSPP1, 70, 2 },
-
-	/* Unpack 1 sspp 1*/
-	{ DBGBUS_SSPP1, 36, 2 },
-	{ DBGBUS_SSPP1, 51, 2 },
-	{ DBGBUS_SSPP1, 61, 2 },
-	{ DBGBUS_SSPP1, 71, 2 },
-
-	/* Unpack 2 sspp 1*/
-	{ DBGBUS_SSPP1, 37, 2 },
-	{ DBGBUS_SSPP1, 52, 2 },
-	{ DBGBUS_SSPP1, 62, 2 },
-	{ DBGBUS_SSPP1, 72, 2 },
-
-
-	/* Unpack 3 sspp 1*/
-	{ DBGBUS_SSPP1, 38, 2 },
-	{ DBGBUS_SSPP1, 53, 2 },
-	{ DBGBUS_SSPP1, 63, 2 },
-	{ DBGBUS_SSPP1, 73, 2 },
-
-	/* scheduler */
-	{ DBGBUS_DSPP, 130, 0 },
-	{ DBGBUS_DSPP, 130, 1 },
-	{ DBGBUS_DSPP, 130, 2 },
-	{ DBGBUS_DSPP, 130, 3 },
-	{ DBGBUS_DSPP, 130, 4 },
-	{ DBGBUS_DSPP, 130, 5 },
-
-
-	/* fetch sspp0 */
-
-	/* vig 0 */
-	{ DBGBUS_SSPP0, 0, 0 },
-	{ DBGBUS_SSPP0, 0, 1 },
-	{ DBGBUS_SSPP0, 0, 2 },
-	{ DBGBUS_SSPP0, 0, 3 },
-	{ DBGBUS_SSPP0, 0, 4 },
-	{ DBGBUS_SSPP0, 0, 5 },
-	{ DBGBUS_SSPP0, 0, 6 },
-	{ DBGBUS_SSPP0, 0, 7 },
-
-	{ DBGBUS_SSPP0, 1, 0 },
-	{ DBGBUS_SSPP0, 1, 1 },
-	{ DBGBUS_SSPP0, 1, 2 },
-	{ DBGBUS_SSPP0, 1, 3 },
-	{ DBGBUS_SSPP0, 1, 4 },
-	{ DBGBUS_SSPP0, 1, 5 },
-	{ DBGBUS_SSPP0, 1, 6 },
-	{ DBGBUS_SSPP0, 1, 7 },
-
-	{ DBGBUS_SSPP0, 2, 0 },
-	{ DBGBUS_SSPP0, 2, 1 },
-	{ DBGBUS_SSPP0, 2, 2 },
-	{ DBGBUS_SSPP0, 2, 3 },
-	{ DBGBUS_SSPP0, 2, 4 },
-	{ DBGBUS_SSPP0, 2, 5 },
-	{ DBGBUS_SSPP0, 2, 6 },
-	{ DBGBUS_SSPP0, 2, 7 },
-
-	{ DBGBUS_SSPP0, 4, 0 },
-	{ DBGBUS_SSPP0, 4, 1 },
-	{ DBGBUS_SSPP0, 4, 2 },
-	{ DBGBUS_SSPP0, 4, 3 },
-	{ DBGBUS_SSPP0, 4, 4 },
-	{ DBGBUS_SSPP0, 4, 5 },
-	{ DBGBUS_SSPP0, 4, 6 },
-	{ DBGBUS_SSPP0, 4, 7 },
-
-	{ DBGBUS_SSPP0, 5, 0 },
-	{ DBGBUS_SSPP0, 5, 1 },
-	{ DBGBUS_SSPP0, 5, 2 },
-	{ DBGBUS_SSPP0, 5, 3 },
-	{ DBGBUS_SSPP0, 5, 4 },
-	{ DBGBUS_SSPP0, 5, 5 },
-	{ DBGBUS_SSPP0, 5, 6 },
-	{ DBGBUS_SSPP0, 5, 7 },
-
-	/* vig 2 */
-	{ DBGBUS_SSPP0, 20, 0 },
-	{ DBGBUS_SSPP0, 20, 1 },
-	{ DBGBUS_SSPP0, 20, 2 },
-	{ DBGBUS_SSPP0, 20, 3 },
-	{ DBGBUS_SSPP0, 20, 4 },
-	{ DBGBUS_SSPP0, 20, 5 },
-	{ DBGBUS_SSPP0, 20, 6 },
-	{ DBGBUS_SSPP0, 20, 7 },
-
-	{ DBGBUS_SSPP0, 21, 0 },
-	{ DBGBUS_SSPP0, 21, 1 },
-	{ DBGBUS_SSPP0, 21, 2 },
-	{ DBGBUS_SSPP0, 21, 3 },
-	{ DBGBUS_SSPP0, 21, 4 },
-	{ DBGBUS_SSPP0, 21, 5 },
-	{ DBGBUS_SSPP0, 21, 6 },
-	{ DBGBUS_SSPP0, 21, 7 },
-
-	{ DBGBUS_SSPP0, 22, 0 },
-	{ DBGBUS_SSPP0, 22, 1 },
-	{ DBGBUS_SSPP0, 22, 2 },
-	{ DBGBUS_SSPP0, 22, 3 },
-	{ DBGBUS_SSPP0, 22, 4 },
-	{ DBGBUS_SSPP0, 22, 5 },
-	{ DBGBUS_SSPP0, 22, 6 },
-	{ DBGBUS_SSPP0, 22, 7 },
-
-	{ DBGBUS_SSPP0, 24, 0 },
-	{ DBGBUS_SSPP0, 24, 1 },
-	{ DBGBUS_SSPP0, 24, 2 },
-	{ DBGBUS_SSPP0, 24, 3 },
-	{ DBGBUS_SSPP0, 24, 4 },
-	{ DBGBUS_SSPP0, 24, 5 },
-	{ DBGBUS_SSPP0, 24, 6 },
-	{ DBGBUS_SSPP0, 24, 7 },
-
-	{ DBGBUS_SSPP0, 25, 0 },
-	{ DBGBUS_SSPP0, 25, 1 },
-	{ DBGBUS_SSPP0, 25, 2 },
-	{ DBGBUS_SSPP0, 25, 3 },
-	{ DBGBUS_SSPP0, 25, 4 },
-	{ DBGBUS_SSPP0, 25, 5 },
-	{ DBGBUS_SSPP0, 25, 6 },
-	{ DBGBUS_SSPP0, 25, 7 },
-
-	/* dma 2 */
-	{ DBGBUS_SSPP0, 30, 0 },
-	{ DBGBUS_SSPP0, 30, 1 },
-	{ DBGBUS_SSPP0, 30, 2 },
-	{ DBGBUS_SSPP0, 30, 3 },
-	{ DBGBUS_SSPP0, 30, 4 },
-	{ DBGBUS_SSPP0, 30, 5 },
-	{ DBGBUS_SSPP0, 30, 6 },
-	{ DBGBUS_SSPP0, 30, 7 },
-
-	{ DBGBUS_SSPP0, 31, 0 },
-	{ DBGBUS_SSPP0, 31, 1 },
-	{ DBGBUS_SSPP0, 31, 2 },
-	{ DBGBUS_SSPP0, 31, 3 },
-	{ DBGBUS_SSPP0, 31, 4 },
-	{ DBGBUS_SSPP0, 31, 5 },
-	{ DBGBUS_SSPP0, 31, 6 },
-	{ DBGBUS_SSPP0, 31, 7 },
-
-	{ DBGBUS_SSPP0, 32, 0 },
-	{ DBGBUS_SSPP0, 32, 1 },
-	{ DBGBUS_SSPP0, 32, 2 },
-	{ DBGBUS_SSPP0, 32, 3 },
-	{ DBGBUS_SSPP0, 32, 4 },
-	{ DBGBUS_SSPP0, 32, 5 },
-	{ DBGBUS_SSPP0, 32, 6 },
-	{ DBGBUS_SSPP0, 32, 7 },
-
-	{ DBGBUS_SSPP0, 33, 0 },
-	{ DBGBUS_SSPP0, 33, 1 },
-	{ DBGBUS_SSPP0, 33, 2 },
-	{ DBGBUS_SSPP0, 33, 3 },
-	{ DBGBUS_SSPP0, 33, 4 },
-	{ DBGBUS_SSPP0, 33, 5 },
-	{ DBGBUS_SSPP0, 33, 6 },
-	{ DBGBUS_SSPP0, 33, 7 },
-
-	{ DBGBUS_SSPP0, 34, 0 },
-	{ DBGBUS_SSPP0, 34, 1 },
-	{ DBGBUS_SSPP0, 34, 2 },
-	{ DBGBUS_SSPP0, 34, 3 },
-	{ DBGBUS_SSPP0, 34, 4 },
-	{ DBGBUS_SSPP0, 34, 5 },
-	{ DBGBUS_SSPP0, 34, 6 },
-	{ DBGBUS_SSPP0, 34, 7 },
-
-	/* dma 0 */
-	{ DBGBUS_SSPP0, 40, 0 },
-	{ DBGBUS_SSPP0, 40, 1 },
-	{ DBGBUS_SSPP0, 40, 2 },
-	{ DBGBUS_SSPP0, 40, 3 },
-	{ DBGBUS_SSPP0, 40, 4 },
-	{ DBGBUS_SSPP0, 40, 5 },
-	{ DBGBUS_SSPP0, 40, 6 },
-	{ DBGBUS_SSPP0, 40, 7 },
-
-	{ DBGBUS_SSPP0, 41, 0 },
-	{ DBGBUS_SSPP0, 41, 1 },
-	{ DBGBUS_SSPP0, 41, 2 },
-	{ DBGBUS_SSPP0, 41, 3 },
-	{ DBGBUS_SSPP0, 41, 4 },
-	{ DBGBUS_SSPP0, 41, 5 },
-	{ DBGBUS_SSPP0, 41, 6 },
-	{ DBGBUS_SSPP0, 41, 7 },
-
-	{ DBGBUS_SSPP0, 42, 0 },
-	{ DBGBUS_SSPP0, 42, 1 },
-	{ DBGBUS_SSPP0, 42, 2 },
-	{ DBGBUS_SSPP0, 42, 3 },
-	{ DBGBUS_SSPP0, 42, 4 },
-	{ DBGBUS_SSPP0, 42, 5 },
-	{ DBGBUS_SSPP0, 42, 6 },
-	{ DBGBUS_SSPP0, 42, 7 },
-
-	{ DBGBUS_SSPP0, 44, 0 },
-	{ DBGBUS_SSPP0, 44, 1 },
-	{ DBGBUS_SSPP0, 44, 2 },
-	{ DBGBUS_SSPP0, 44, 3 },
-	{ DBGBUS_SSPP0, 44, 4 },
-	{ DBGBUS_SSPP0, 44, 5 },
-	{ DBGBUS_SSPP0, 44, 6 },
-	{ DBGBUS_SSPP0, 44, 7 },
-
-	{ DBGBUS_SSPP0, 45, 0 },
-	{ DBGBUS_SSPP0, 45, 1 },
-	{ DBGBUS_SSPP0, 45, 2 },
-	{ DBGBUS_SSPP0, 45, 3 },
-	{ DBGBUS_SSPP0, 45, 4 },
-	{ DBGBUS_SSPP0, 45, 5 },
-	{ DBGBUS_SSPP0, 45, 6 },
-	{ DBGBUS_SSPP0, 45, 7 },
-
-	/* fetch sspp1 */
-	/* vig 1 */
-	{ DBGBUS_SSPP1, 0, 0 },
-	{ DBGBUS_SSPP1, 0, 1 },
-	{ DBGBUS_SSPP1, 0, 2 },
-	{ DBGBUS_SSPP1, 0, 3 },
-	{ DBGBUS_SSPP1, 0, 4 },
-	{ DBGBUS_SSPP1, 0, 5 },
-	{ DBGBUS_SSPP1, 0, 6 },
-	{ DBGBUS_SSPP1, 0, 7 },
-
-	{ DBGBUS_SSPP1, 1, 0 },
-	{ DBGBUS_SSPP1, 1, 1 },
-	{ DBGBUS_SSPP1, 1, 2 },
-	{ DBGBUS_SSPP1, 1, 3 },
-	{ DBGBUS_SSPP1, 1, 4 },
-	{ DBGBUS_SSPP1, 1, 5 },
-	{ DBGBUS_SSPP1, 1, 6 },
-	{ DBGBUS_SSPP1, 1, 7 },
-
-	{ DBGBUS_SSPP1, 2, 0 },
-	{ DBGBUS_SSPP1, 2, 1 },
-	{ DBGBUS_SSPP1, 2, 2 },
-	{ DBGBUS_SSPP1, 2, 3 },
-	{ DBGBUS_SSPP1, 2, 4 },
-	{ DBGBUS_SSPP1, 2, 5 },
-	{ DBGBUS_SSPP1, 2, 6 },
-	{ DBGBUS_SSPP1, 2, 7 },
-
-	{ DBGBUS_SSPP1, 4, 0 },
-	{ DBGBUS_SSPP1, 4, 1 },
-	{ DBGBUS_SSPP1, 4, 2 },
-	{ DBGBUS_SSPP1, 4, 3 },
-	{ DBGBUS_SSPP1, 4, 4 },
-	{ DBGBUS_SSPP1, 4, 5 },
-	{ DBGBUS_SSPP1, 4, 6 },
-	{ DBGBUS_SSPP1, 4, 7 },
-
-	{ DBGBUS_SSPP1, 5, 0 },
-	{ DBGBUS_SSPP1, 5, 1 },
-	{ DBGBUS_SSPP1, 5, 2 },
-	{ DBGBUS_SSPP1, 5, 3 },
-	{ DBGBUS_SSPP1, 5, 4 },
-	{ DBGBUS_SSPP1, 5, 5 },
-	{ DBGBUS_SSPP1, 5, 6 },
-	{ DBGBUS_SSPP1, 5, 7 },
-
-	/* vig 3 */
-	{ DBGBUS_SSPP1, 20, 0 },
-	{ DBGBUS_SSPP1, 20, 1 },
-	{ DBGBUS_SSPP1, 20, 2 },
-	{ DBGBUS_SSPP1, 20, 3 },
-	{ DBGBUS_SSPP1, 20, 4 },
-	{ DBGBUS_SSPP1, 20, 5 },
-	{ DBGBUS_SSPP1, 20, 6 },
-	{ DBGBUS_SSPP1, 20, 7 },
-
-	{ DBGBUS_SSPP1, 21, 0 },
-	{ DBGBUS_SSPP1, 21, 1 },
-	{ DBGBUS_SSPP1, 21, 2 },
-	{ DBGBUS_SSPP1, 21, 3 },
-	{ DBGBUS_SSPP1, 21, 4 },
-	{ DBGBUS_SSPP1, 21, 5 },
-	{ DBGBUS_SSPP1, 21, 6 },
-	{ DBGBUS_SSPP1, 21, 7 },
-
-	{ DBGBUS_SSPP1, 22, 0 },
-	{ DBGBUS_SSPP1, 22, 1 },
-	{ DBGBUS_SSPP1, 22, 2 },
-	{ DBGBUS_SSPP1, 22, 3 },
-	{ DBGBUS_SSPP1, 22, 4 },
-	{ DBGBUS_SSPP1, 22, 5 },
-	{ DBGBUS_SSPP1, 22, 6 },
-	{ DBGBUS_SSPP1, 22, 7 },
-
-	{ DBGBUS_SSPP1, 24, 0 },
-	{ DBGBUS_SSPP1, 24, 1 },
-	{ DBGBUS_SSPP1, 24, 2 },
-	{ DBGBUS_SSPP1, 24, 3 },
-	{ DBGBUS_SSPP1, 24, 4 },
-	{ DBGBUS_SSPP1, 24, 5 },
-	{ DBGBUS_SSPP1, 24, 6 },
-	{ DBGBUS_SSPP1, 24, 7 },
-
-	{ DBGBUS_SSPP1, 25, 0 },
-	{ DBGBUS_SSPP1, 25, 1 },
-	{ DBGBUS_SSPP1, 25, 2 },
-	{ DBGBUS_SSPP1, 25, 3 },
-	{ DBGBUS_SSPP1, 25, 4 },
-	{ DBGBUS_SSPP1, 25, 5 },
-	{ DBGBUS_SSPP1, 25, 6 },
-	{ DBGBUS_SSPP1, 25, 7 },
-
-	/* dma 3 */
-	{ DBGBUS_SSPP1, 30, 0 },
-	{ DBGBUS_SSPP1, 30, 1 },
-	{ DBGBUS_SSPP1, 30, 2 },
-	{ DBGBUS_SSPP1, 30, 3 },
-	{ DBGBUS_SSPP1, 30, 4 },
-	{ DBGBUS_SSPP1, 30, 5 },
-	{ DBGBUS_SSPP1, 30, 6 },
-	{ DBGBUS_SSPP1, 30, 7 },
-
-	{ DBGBUS_SSPP1, 31, 0 },
-	{ DBGBUS_SSPP1, 31, 1 },
-	{ DBGBUS_SSPP1, 31, 2 },
-	{ DBGBUS_SSPP1, 31, 3 },
-	{ DBGBUS_SSPP1, 31, 4 },
-	{ DBGBUS_SSPP1, 31, 5 },
-	{ DBGBUS_SSPP1, 31, 6 },
-	{ DBGBUS_SSPP1, 31, 7 },
-
-	{ DBGBUS_SSPP1, 32, 0 },
-	{ DBGBUS_SSPP1, 32, 1 },
-	{ DBGBUS_SSPP1, 32, 2 },
-	{ DBGBUS_SSPP1, 32, 3 },
-	{ DBGBUS_SSPP1, 32, 4 },
-	{ DBGBUS_SSPP1, 32, 5 },
-	{ DBGBUS_SSPP1, 32, 6 },
-	{ DBGBUS_SSPP1, 32, 7 },
-
-	{ DBGBUS_SSPP1, 33, 0 },
-	{ DBGBUS_SSPP1, 33, 1 },
-	{ DBGBUS_SSPP1, 33, 2 },
-	{ DBGBUS_SSPP1, 33, 3 },
-	{ DBGBUS_SSPP1, 33, 4 },
-	{ DBGBUS_SSPP1, 33, 5 },
-	{ DBGBUS_SSPP1, 33, 6 },
-	{ DBGBUS_SSPP1, 33, 7 },
-
-	{ DBGBUS_SSPP1, 34, 0 },
-	{ DBGBUS_SSPP1, 34, 1 },
-	{ DBGBUS_SSPP1, 34, 2 },
-	{ DBGBUS_SSPP1, 34, 3 },
-	{ DBGBUS_SSPP1, 34, 4 },
-	{ DBGBUS_SSPP1, 34, 5 },
-	{ DBGBUS_SSPP1, 34, 6 },
-	{ DBGBUS_SSPP1, 34, 7 },
-
-	/* dma 1 */
-	{ DBGBUS_SSPP1, 40, 0 },
-	{ DBGBUS_SSPP1, 40, 1 },
-	{ DBGBUS_SSPP1, 40, 2 },
-	{ DBGBUS_SSPP1, 40, 3 },
-	{ DBGBUS_SSPP1, 40, 4 },
-	{ DBGBUS_SSPP1, 40, 5 },
-	{ DBGBUS_SSPP1, 40, 6 },
-	{ DBGBUS_SSPP1, 40, 7 },
-
-	{ DBGBUS_SSPP1, 41, 0 },
-	{ DBGBUS_SSPP1, 41, 1 },
-	{ DBGBUS_SSPP1, 41, 2 },
-	{ DBGBUS_SSPP1, 41, 3 },
-	{ DBGBUS_SSPP1, 41, 4 },
-	{ DBGBUS_SSPP1, 41, 5 },
-	{ DBGBUS_SSPP1, 41, 6 },
-	{ DBGBUS_SSPP1, 41, 7 },
-
-	{ DBGBUS_SSPP1, 42, 0 },
-	{ DBGBUS_SSPP1, 42, 1 },
-	{ DBGBUS_SSPP1, 42, 2 },
-	{ DBGBUS_SSPP1, 42, 3 },
-	{ DBGBUS_SSPP1, 42, 4 },
-	{ DBGBUS_SSPP1, 42, 5 },
-	{ DBGBUS_SSPP1, 42, 6 },
-	{ DBGBUS_SSPP1, 42, 7 },
-
-	{ DBGBUS_SSPP1, 44, 0 },
-	{ DBGBUS_SSPP1, 44, 1 },
-	{ DBGBUS_SSPP1, 44, 2 },
-	{ DBGBUS_SSPP1, 44, 3 },
-	{ DBGBUS_SSPP1, 44, 4 },
-	{ DBGBUS_SSPP1, 44, 5 },
-	{ DBGBUS_SSPP1, 44, 6 },
-	{ DBGBUS_SSPP1, 44, 7 },
-
-	{ DBGBUS_SSPP1, 45, 0 },
-	{ DBGBUS_SSPP1, 45, 1 },
-	{ DBGBUS_SSPP1, 45, 2 },
-	{ DBGBUS_SSPP1, 45, 3 },
-	{ DBGBUS_SSPP1, 45, 4 },
-	{ DBGBUS_SSPP1, 45, 5 },
-	{ DBGBUS_SSPP1, 45, 6 },
-	{ DBGBUS_SSPP1, 45, 7 },
-
-	/* ppb_0 */
-	{ DBGBUS_DSPP, 31, 0, _sde_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 33, 0, _sde_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 35, 0, _sde_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 42, 0, _sde_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 47, 0, _sde_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 49, 0, _sde_debug_bus_ppb0_dump },
-
-	/* ppb_1 */
-	{ DBGBUS_DSPP, 32, 0, _sde_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 34, 0, _sde_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 36, 0, _sde_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 43, 0, _sde_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 48, 0, _sde_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 50, 0, _sde_debug_bus_ppb1_dump },
-
-	/* crossbar */
-	{ DBGBUS_DSPP, 0, 0, _sde_debug_bus_xbar_dump },
-
-	/* rotator */
-	{ DBGBUS_DSPP, 9, 0},
-
-	/* blend */
-	/* LM0 */
-	{ DBGBUS_DSPP, 63, 1},
-	{ DBGBUS_DSPP, 63, 2},
-	{ DBGBUS_DSPP, 63, 3},
-	{ DBGBUS_DSPP, 63, 4},
-	{ DBGBUS_DSPP, 63, 5},
-	{ DBGBUS_DSPP, 63, 6},
-	{ DBGBUS_DSPP, 63, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 64, 1},
-	{ DBGBUS_DSPP, 64, 2},
-	{ DBGBUS_DSPP, 64, 3},
-	{ DBGBUS_DSPP, 64, 4},
-	{ DBGBUS_DSPP, 64, 5},
-	{ DBGBUS_DSPP, 64, 6},
-	{ DBGBUS_DSPP, 64, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 65, 1},
-	{ DBGBUS_DSPP, 65, 2},
-	{ DBGBUS_DSPP, 65, 3},
-	{ DBGBUS_DSPP, 65, 4},
-	{ DBGBUS_DSPP, 65, 5},
-	{ DBGBUS_DSPP, 65, 6},
-	{ DBGBUS_DSPP, 65, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 66, 1},
-	{ DBGBUS_DSPP, 66, 2},
-	{ DBGBUS_DSPP, 66, 3},
-	{ DBGBUS_DSPP, 66, 4},
-	{ DBGBUS_DSPP, 66, 5},
-	{ DBGBUS_DSPP, 66, 6},
-	{ DBGBUS_DSPP, 66, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 67, 1},
-	{ DBGBUS_DSPP, 67, 2},
-	{ DBGBUS_DSPP, 67, 3},
-	{ DBGBUS_DSPP, 67, 4},
-	{ DBGBUS_DSPP, 67, 5},
-	{ DBGBUS_DSPP, 67, 6},
-	{ DBGBUS_DSPP, 67, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 68, 1},
-	{ DBGBUS_DSPP, 68, 2},
-	{ DBGBUS_DSPP, 68, 3},
-	{ DBGBUS_DSPP, 68, 4},
-	{ DBGBUS_DSPP, 68, 5},
-	{ DBGBUS_DSPP, 68, 6},
-	{ DBGBUS_DSPP, 68, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 69, 1},
-	{ DBGBUS_DSPP, 69, 2},
-	{ DBGBUS_DSPP, 69, 3},
-	{ DBGBUS_DSPP, 69, 4},
-	{ DBGBUS_DSPP, 69, 5},
-	{ DBGBUS_DSPP, 69, 6},
-	{ DBGBUS_DSPP, 69, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 84, 1},
-	{ DBGBUS_DSPP, 84, 2},
-	{ DBGBUS_DSPP, 84, 3},
-	{ DBGBUS_DSPP, 84, 4},
-	{ DBGBUS_DSPP, 84, 5},
-	{ DBGBUS_DSPP, 84, 6},
-	{ DBGBUS_DSPP, 84, 7, _sde_debug_bus_lm_dump },
-
-
-	{ DBGBUS_DSPP, 85, 1},
-	{ DBGBUS_DSPP, 85, 2},
-	{ DBGBUS_DSPP, 85, 3},
-	{ DBGBUS_DSPP, 85, 4},
-	{ DBGBUS_DSPP, 85, 5},
-	{ DBGBUS_DSPP, 85, 6},
-	{ DBGBUS_DSPP, 85, 7, _sde_debug_bus_lm_dump },
-
-
-	{ DBGBUS_DSPP, 86, 1},
-	{ DBGBUS_DSPP, 86, 2},
-	{ DBGBUS_DSPP, 86, 3},
-	{ DBGBUS_DSPP, 86, 4},
-	{ DBGBUS_DSPP, 86, 5},
-	{ DBGBUS_DSPP, 86, 6},
-	{ DBGBUS_DSPP, 86, 7, _sde_debug_bus_lm_dump },
-
-
-	{ DBGBUS_DSPP, 87, 1},
-	{ DBGBUS_DSPP, 87, 2},
-	{ DBGBUS_DSPP, 87, 3},
-	{ DBGBUS_DSPP, 87, 4},
-	{ DBGBUS_DSPP, 87, 5},
-	{ DBGBUS_DSPP, 87, 6},
-	{ DBGBUS_DSPP, 87, 7, _sde_debug_bus_lm_dump },
-
-	/* LM1 */
-	{ DBGBUS_DSPP, 70, 1},
-	{ DBGBUS_DSPP, 70, 2},
-	{ DBGBUS_DSPP, 70, 3},
-	{ DBGBUS_DSPP, 70, 4},
-	{ DBGBUS_DSPP, 70, 5},
-	{ DBGBUS_DSPP, 70, 6},
-	{ DBGBUS_DSPP, 70, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 71, 1},
-	{ DBGBUS_DSPP, 71, 2},
-	{ DBGBUS_DSPP, 71, 3},
-	{ DBGBUS_DSPP, 71, 4},
-	{ DBGBUS_DSPP, 71, 5},
-	{ DBGBUS_DSPP, 71, 6},
-	{ DBGBUS_DSPP, 71, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 72, 1},
-	{ DBGBUS_DSPP, 72, 2},
-	{ DBGBUS_DSPP, 72, 3},
-	{ DBGBUS_DSPP, 72, 4},
-	{ DBGBUS_DSPP, 72, 5},
-	{ DBGBUS_DSPP, 72, 6},
-	{ DBGBUS_DSPP, 72, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 73, 1},
-	{ DBGBUS_DSPP, 73, 2},
-	{ DBGBUS_DSPP, 73, 3},
-	{ DBGBUS_DSPP, 73, 4},
-	{ DBGBUS_DSPP, 73, 5},
-	{ DBGBUS_DSPP, 73, 6},
-	{ DBGBUS_DSPP, 73, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 74, 1},
-	{ DBGBUS_DSPP, 74, 2},
-	{ DBGBUS_DSPP, 74, 3},
-	{ DBGBUS_DSPP, 74, 4},
-	{ DBGBUS_DSPP, 74, 5},
-	{ DBGBUS_DSPP, 74, 6},
-	{ DBGBUS_DSPP, 74, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 75, 1},
-	{ DBGBUS_DSPP, 75, 2},
-	{ DBGBUS_DSPP, 75, 3},
-	{ DBGBUS_DSPP, 75, 4},
-	{ DBGBUS_DSPP, 75, 5},
-	{ DBGBUS_DSPP, 75, 6},
-	{ DBGBUS_DSPP, 75, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 76, 1},
-	{ DBGBUS_DSPP, 76, 2},
-	{ DBGBUS_DSPP, 76, 3},
-	{ DBGBUS_DSPP, 76, 4},
-	{ DBGBUS_DSPP, 76, 5},
-	{ DBGBUS_DSPP, 76, 6},
-	{ DBGBUS_DSPP, 76, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 88, 1},
-	{ DBGBUS_DSPP, 88, 2},
-	{ DBGBUS_DSPP, 88, 3},
-	{ DBGBUS_DSPP, 88, 4},
-	{ DBGBUS_DSPP, 88, 5},
-	{ DBGBUS_DSPP, 88, 6},
-	{ DBGBUS_DSPP, 88, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 89, 1},
-	{ DBGBUS_DSPP, 89, 2},
-	{ DBGBUS_DSPP, 89, 3},
-	{ DBGBUS_DSPP, 89, 4},
-	{ DBGBUS_DSPP, 89, 5},
-	{ DBGBUS_DSPP, 89, 6},
-	{ DBGBUS_DSPP, 89, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 90, 1},
-	{ DBGBUS_DSPP, 90, 2},
-	{ DBGBUS_DSPP, 90, 3},
-	{ DBGBUS_DSPP, 90, 4},
-	{ DBGBUS_DSPP, 90, 5},
-	{ DBGBUS_DSPP, 90, 6},
-	{ DBGBUS_DSPP, 90, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 91, 1},
-	{ DBGBUS_DSPP, 91, 2},
-	{ DBGBUS_DSPP, 91, 3},
-	{ DBGBUS_DSPP, 91, 4},
-	{ DBGBUS_DSPP, 91, 5},
-	{ DBGBUS_DSPP, 91, 6},
-	{ DBGBUS_DSPP, 91, 7, _sde_debug_bus_lm_dump },
-
-	/* LM2 */
-	{ DBGBUS_DSPP, 77, 0},
-	{ DBGBUS_DSPP, 77, 1},
-	{ DBGBUS_DSPP, 77, 2},
-	{ DBGBUS_DSPP, 77, 3},
-	{ DBGBUS_DSPP, 77, 4},
-	{ DBGBUS_DSPP, 77, 5},
-	{ DBGBUS_DSPP, 77, 6},
-	{ DBGBUS_DSPP, 77, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 78, 0},
-	{ DBGBUS_DSPP, 78, 1},
-	{ DBGBUS_DSPP, 78, 2},
-	{ DBGBUS_DSPP, 78, 3},
-	{ DBGBUS_DSPP, 78, 4},
-	{ DBGBUS_DSPP, 78, 5},
-	{ DBGBUS_DSPP, 78, 6},
-	{ DBGBUS_DSPP, 78, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 79, 0},
-	{ DBGBUS_DSPP, 79, 1},
-	{ DBGBUS_DSPP, 79, 2},
-	{ DBGBUS_DSPP, 79, 3},
-	{ DBGBUS_DSPP, 79, 4},
-	{ DBGBUS_DSPP, 79, 5},
-	{ DBGBUS_DSPP, 79, 6},
-	{ DBGBUS_DSPP, 79, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 80, 0},
-	{ DBGBUS_DSPP, 80, 1},
-	{ DBGBUS_DSPP, 80, 2},
-	{ DBGBUS_DSPP, 80, 3},
-	{ DBGBUS_DSPP, 80, 4},
-	{ DBGBUS_DSPP, 80, 5},
-	{ DBGBUS_DSPP, 80, 6},
-	{ DBGBUS_DSPP, 80, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 81, 0},
-	{ DBGBUS_DSPP, 81, 1},
-	{ DBGBUS_DSPP, 81, 2},
-	{ DBGBUS_DSPP, 81, 3},
-	{ DBGBUS_DSPP, 81, 4},
-	{ DBGBUS_DSPP, 81, 5},
-	{ DBGBUS_DSPP, 81, 6},
-	{ DBGBUS_DSPP, 81, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 82, 0},
-	{ DBGBUS_DSPP, 82, 1},
-	{ DBGBUS_DSPP, 82, 2},
-	{ DBGBUS_DSPP, 82, 3},
-	{ DBGBUS_DSPP, 82, 4},
-	{ DBGBUS_DSPP, 82, 5},
-	{ DBGBUS_DSPP, 82, 6},
-	{ DBGBUS_DSPP, 82, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 83, 0},
-	{ DBGBUS_DSPP, 83, 1},
-	{ DBGBUS_DSPP, 83, 2},
-	{ DBGBUS_DSPP, 83, 3},
-	{ DBGBUS_DSPP, 83, 4},
-	{ DBGBUS_DSPP, 83, 5},
-	{ DBGBUS_DSPP, 83, 6},
-	{ DBGBUS_DSPP, 83, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 92, 1},
-	{ DBGBUS_DSPP, 92, 2},
-	{ DBGBUS_DSPP, 92, 3},
-	{ DBGBUS_DSPP, 92, 4},
-	{ DBGBUS_DSPP, 92, 5},
-	{ DBGBUS_DSPP, 92, 6},
-	{ DBGBUS_DSPP, 92, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 93, 1},
-	{ DBGBUS_DSPP, 93, 2},
-	{ DBGBUS_DSPP, 93, 3},
-	{ DBGBUS_DSPP, 93, 4},
-	{ DBGBUS_DSPP, 93, 5},
-	{ DBGBUS_DSPP, 93, 6},
-	{ DBGBUS_DSPP, 93, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 94, 1},
-	{ DBGBUS_DSPP, 94, 2},
-	{ DBGBUS_DSPP, 94, 3},
-	{ DBGBUS_DSPP, 94, 4},
-	{ DBGBUS_DSPP, 94, 5},
-	{ DBGBUS_DSPP, 94, 6},
-	{ DBGBUS_DSPP, 94, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 95, 1},
-	{ DBGBUS_DSPP, 95, 2},
-	{ DBGBUS_DSPP, 95, 3},
-	{ DBGBUS_DSPP, 95, 4},
-	{ DBGBUS_DSPP, 95, 5},
-	{ DBGBUS_DSPP, 95, 6},
-	{ DBGBUS_DSPP, 95, 7, _sde_debug_bus_lm_dump },
-
-
-	/* LM3 */
-	{ DBGBUS_DSPP, 110, 1},
-	{ DBGBUS_DSPP, 110, 2},
-	{ DBGBUS_DSPP, 110, 3},
-	{ DBGBUS_DSPP, 110, 4},
-	{ DBGBUS_DSPP, 110, 5},
-	{ DBGBUS_DSPP, 110, 6},
-	{ DBGBUS_DSPP, 110, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 111, 1},
-	{ DBGBUS_DSPP, 111, 2},
-	{ DBGBUS_DSPP, 111, 3},
-	{ DBGBUS_DSPP, 111, 4},
-	{ DBGBUS_DSPP, 111, 5},
-	{ DBGBUS_DSPP, 111, 6},
-	{ DBGBUS_DSPP, 111, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 112, 1},
-	{ DBGBUS_DSPP, 112, 2},
-	{ DBGBUS_DSPP, 112, 3},
-	{ DBGBUS_DSPP, 112, 4},
-	{ DBGBUS_DSPP, 112, 5},
-	{ DBGBUS_DSPP, 112, 6},
-	{ DBGBUS_DSPP, 112, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 113, 1},
-	{ DBGBUS_DSPP, 113, 2},
-	{ DBGBUS_DSPP, 113, 3},
-	{ DBGBUS_DSPP, 113, 4},
-	{ DBGBUS_DSPP, 113, 5},
-	{ DBGBUS_DSPP, 113, 6},
-	{ DBGBUS_DSPP, 113, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 114, 1},
-	{ DBGBUS_DSPP, 114, 2},
-	{ DBGBUS_DSPP, 114, 3},
-	{ DBGBUS_DSPP, 114, 4},
-	{ DBGBUS_DSPP, 114, 5},
-	{ DBGBUS_DSPP, 114, 6},
-	{ DBGBUS_DSPP, 114, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 115, 1},
-	{ DBGBUS_DSPP, 115, 2},
-	{ DBGBUS_DSPP, 115, 3},
-	{ DBGBUS_DSPP, 115, 4},
-	{ DBGBUS_DSPP, 115, 5},
-	{ DBGBUS_DSPP, 115, 6},
-	{ DBGBUS_DSPP, 115, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 116, 1},
-	{ DBGBUS_DSPP, 116, 2},
-	{ DBGBUS_DSPP, 116, 3},
-	{ DBGBUS_DSPP, 116, 4},
-	{ DBGBUS_DSPP, 116, 5},
-	{ DBGBUS_DSPP, 116, 6},
-	{ DBGBUS_DSPP, 116, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 117, 1},
-	{ DBGBUS_DSPP, 117, 2},
-	{ DBGBUS_DSPP, 117, 3},
-	{ DBGBUS_DSPP, 117, 4},
-	{ DBGBUS_DSPP, 117, 5},
-	{ DBGBUS_DSPP, 117, 6},
-	{ DBGBUS_DSPP, 117, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 118, 1},
-	{ DBGBUS_DSPP, 118, 2},
-	{ DBGBUS_DSPP, 118, 3},
-	{ DBGBUS_DSPP, 118, 4},
-	{ DBGBUS_DSPP, 118, 5},
-	{ DBGBUS_DSPP, 118, 6},
-	{ DBGBUS_DSPP, 118, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 119, 1},
-	{ DBGBUS_DSPP, 119, 2},
-	{ DBGBUS_DSPP, 119, 3},
-	{ DBGBUS_DSPP, 119, 4},
-	{ DBGBUS_DSPP, 119, 5},
-	{ DBGBUS_DSPP, 119, 6},
-	{ DBGBUS_DSPP, 119, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 120, 1},
-	{ DBGBUS_DSPP, 120, 2},
-	{ DBGBUS_DSPP, 120, 3},
-	{ DBGBUS_DSPP, 120, 4},
-	{ DBGBUS_DSPP, 120, 5},
-	{ DBGBUS_DSPP, 120, 6},
-	{ DBGBUS_DSPP, 120, 7, _sde_debug_bus_lm_dump },
-
-	/* LM4 */
-	{ DBGBUS_DSPP, 96, 1},
-	{ DBGBUS_DSPP, 96, 2},
-	{ DBGBUS_DSPP, 96, 3},
-	{ DBGBUS_DSPP, 96, 4},
-	{ DBGBUS_DSPP, 96, 5},
-	{ DBGBUS_DSPP, 96, 6},
-	{ DBGBUS_DSPP, 96, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 97, 1},
-	{ DBGBUS_DSPP, 97, 2},
-	{ DBGBUS_DSPP, 97, 3},
-	{ DBGBUS_DSPP, 97, 4},
-	{ DBGBUS_DSPP, 97, 5},
-	{ DBGBUS_DSPP, 97, 6},
-	{ DBGBUS_DSPP, 97, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 98, 1},
-	{ DBGBUS_DSPP, 98, 2},
-	{ DBGBUS_DSPP, 98, 3},
-	{ DBGBUS_DSPP, 98, 4},
-	{ DBGBUS_DSPP, 98, 5},
-	{ DBGBUS_DSPP, 98, 6},
-	{ DBGBUS_DSPP, 98, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 99, 1},
-	{ DBGBUS_DSPP, 99, 2},
-	{ DBGBUS_DSPP, 99, 3},
-	{ DBGBUS_DSPP, 99, 4},
-	{ DBGBUS_DSPP, 99, 5},
-	{ DBGBUS_DSPP, 99, 6},
-	{ DBGBUS_DSPP, 99, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 100, 1},
-	{ DBGBUS_DSPP, 100, 2},
-	{ DBGBUS_DSPP, 100, 3},
-	{ DBGBUS_DSPP, 100, 4},
-	{ DBGBUS_DSPP, 100, 5},
-	{ DBGBUS_DSPP, 100, 6},
-	{ DBGBUS_DSPP, 100, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 101, 1},
-	{ DBGBUS_DSPP, 101, 2},
-	{ DBGBUS_DSPP, 101, 3},
-	{ DBGBUS_DSPP, 101, 4},
-	{ DBGBUS_DSPP, 101, 5},
-	{ DBGBUS_DSPP, 101, 6},
-	{ DBGBUS_DSPP, 101, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 103, 1},
-	{ DBGBUS_DSPP, 103, 2},
-	{ DBGBUS_DSPP, 103, 3},
-	{ DBGBUS_DSPP, 103, 4},
-	{ DBGBUS_DSPP, 103, 5},
-	{ DBGBUS_DSPP, 103, 6},
-	{ DBGBUS_DSPP, 103, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 105, 1},
-	{ DBGBUS_DSPP, 105, 2},
-	{ DBGBUS_DSPP, 105, 3},
-	{ DBGBUS_DSPP, 105, 4},
-	{ DBGBUS_DSPP, 105, 5},
-	{ DBGBUS_DSPP, 105, 6},
-	{ DBGBUS_DSPP, 105, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 106, 1},
-	{ DBGBUS_DSPP, 106, 2},
-	{ DBGBUS_DSPP, 106, 3},
-	{ DBGBUS_DSPP, 106, 4},
-	{ DBGBUS_DSPP, 106, 5},
-	{ DBGBUS_DSPP, 106, 6},
-	{ DBGBUS_DSPP, 106, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 109, 1},
-	{ DBGBUS_DSPP, 109, 2},
-	{ DBGBUS_DSPP, 109, 3},
-	{ DBGBUS_DSPP, 109, 4},
-	{ DBGBUS_DSPP, 109, 5},
-	{ DBGBUS_DSPP, 109, 6},
-	{ DBGBUS_DSPP, 109, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 122, 1},
-	{ DBGBUS_DSPP, 122, 2},
-	{ DBGBUS_DSPP, 122, 3},
-	{ DBGBUS_DSPP, 122, 4},
-	{ DBGBUS_DSPP, 122, 5},
-	{ DBGBUS_DSPP, 122, 6},
-	{ DBGBUS_DSPP, 122, 7, _sde_debug_bus_lm_dump },
-
-	/* LM5 */
-	{ DBGBUS_DSPP, 124, 1},
-	{ DBGBUS_DSPP, 124, 2},
-	{ DBGBUS_DSPP, 124, 3},
-	{ DBGBUS_DSPP, 124, 4},
-	{ DBGBUS_DSPP, 124, 5},
-	{ DBGBUS_DSPP, 124, 6},
-	{ DBGBUS_DSPP, 124, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 125, 1},
-	{ DBGBUS_DSPP, 125, 2},
-	{ DBGBUS_DSPP, 125, 3},
-	{ DBGBUS_DSPP, 125, 4},
-	{ DBGBUS_DSPP, 125, 5},
-	{ DBGBUS_DSPP, 125, 6},
-	{ DBGBUS_DSPP, 125, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 126, 1},
-	{ DBGBUS_DSPP, 126, 2},
-	{ DBGBUS_DSPP, 126, 3},
-	{ DBGBUS_DSPP, 126, 4},
-	{ DBGBUS_DSPP, 126, 5},
-	{ DBGBUS_DSPP, 126, 6},
-	{ DBGBUS_DSPP, 126, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 127, 1},
-	{ DBGBUS_DSPP, 127, 2},
-	{ DBGBUS_DSPP, 127, 3},
-	{ DBGBUS_DSPP, 127, 4},
-	{ DBGBUS_DSPP, 127, 5},
-	{ DBGBUS_DSPP, 127, 6},
-	{ DBGBUS_DSPP, 127, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 128, 1},
-	{ DBGBUS_DSPP, 128, 2},
-	{ DBGBUS_DSPP, 128, 3},
-	{ DBGBUS_DSPP, 128, 4},
-	{ DBGBUS_DSPP, 128, 5},
-	{ DBGBUS_DSPP, 128, 6},
-	{ DBGBUS_DSPP, 128, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 129, 1},
-	{ DBGBUS_DSPP, 129, 2},
-	{ DBGBUS_DSPP, 129, 3},
-	{ DBGBUS_DSPP, 129, 4},
-	{ DBGBUS_DSPP, 129, 5},
-	{ DBGBUS_DSPP, 129, 6},
-	{ DBGBUS_DSPP, 129, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 131, 1},
-	{ DBGBUS_DSPP, 131, 2},
-	{ DBGBUS_DSPP, 131, 3},
-	{ DBGBUS_DSPP, 131, 4},
-	{ DBGBUS_DSPP, 131, 5},
-	{ DBGBUS_DSPP, 131, 6},
-	{ DBGBUS_DSPP, 131, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 132, 1},
-	{ DBGBUS_DSPP, 132, 2},
-	{ DBGBUS_DSPP, 132, 3},
-	{ DBGBUS_DSPP, 132, 4},
-	{ DBGBUS_DSPP, 132, 5},
-	{ DBGBUS_DSPP, 132, 6},
-	{ DBGBUS_DSPP, 132, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 133, 1},
-	{ DBGBUS_DSPP, 133, 2},
-	{ DBGBUS_DSPP, 133, 3},
-	{ DBGBUS_DSPP, 133, 4},
-	{ DBGBUS_DSPP, 133, 5},
-	{ DBGBUS_DSPP, 133, 6},
-	{ DBGBUS_DSPP, 133, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 134, 1},
-	{ DBGBUS_DSPP, 134, 2},
-	{ DBGBUS_DSPP, 134, 3},
-	{ DBGBUS_DSPP, 134, 4},
-	{ DBGBUS_DSPP, 134, 5},
-	{ DBGBUS_DSPP, 134, 6},
-	{ DBGBUS_DSPP, 134, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 135, 1},
-	{ DBGBUS_DSPP, 135, 2},
-	{ DBGBUS_DSPP, 135, 3},
-	{ DBGBUS_DSPP, 135, 4},
-	{ DBGBUS_DSPP, 135, 5},
-	{ DBGBUS_DSPP, 135, 6},
-	{ DBGBUS_DSPP, 135, 7, _sde_debug_bus_lm_dump },
-
-	/* csc */
-	{ DBGBUS_SSPP0, 7, 0},
-	{ DBGBUS_SSPP0, 7, 1},
-	{ DBGBUS_SSPP0, 7, 2},
-	{ DBGBUS_SSPP0, 27, 0},
-	{ DBGBUS_SSPP0, 27, 1},
-	{ DBGBUS_SSPP0, 27, 2},
-	{ DBGBUS_SSPP1, 7, 0},
-	{ DBGBUS_SSPP1, 7, 1},
-	{ DBGBUS_SSPP1, 7, 2},
-	{ DBGBUS_SSPP1, 27, 0},
-	{ DBGBUS_SSPP1, 27, 1},
-	{ DBGBUS_SSPP1, 27, 2},
-
-	/* pcc */
-	{ DBGBUS_SSPP0, 43, 3},
-	{ DBGBUS_SSPP0, 47, 3},
-	{ DBGBUS_SSPP1, 43, 3},
-	{ DBGBUS_SSPP1, 47, 3},
-
-	/* spa */
-	{ DBGBUS_SSPP0, 8,  0},
-	{ DBGBUS_SSPP0, 28, 0},
-	{ DBGBUS_SSPP1, 8,  0},
-	{ DBGBUS_SSPP1, 28, 0},
-
-	/* dspp pa */
-	{ DBGBUS_DSPP, 13, 0},
-	{ DBGBUS_DSPP, 19, 0},
-	{ DBGBUS_DSPP, 24, 0},
-	{ DBGBUS_DSPP, 37, 0},
-
-	/* igc */
-	{ DBGBUS_SSPP0, 39, 0},
-	{ DBGBUS_SSPP0, 39, 1},
-	{ DBGBUS_SSPP0, 39, 2},
-
-	{ DBGBUS_SSPP1, 39, 0},
-	{ DBGBUS_SSPP1, 39, 1},
-	{ DBGBUS_SSPP1, 39, 2},
-
-	{ DBGBUS_SSPP0, 46, 0},
-	{ DBGBUS_SSPP0, 46, 1},
-	{ DBGBUS_SSPP0, 46, 2},
-
-	{ DBGBUS_SSPP1, 46, 0},
-	{ DBGBUS_SSPP1, 46, 1},
-	{ DBGBUS_SSPP1, 46, 2},
-
-	{ DBGBUS_DSPP, 14, 0},
-	{ DBGBUS_DSPP, 14, 1},
-	{ DBGBUS_DSPP, 14, 2},
-	{ DBGBUS_DSPP, 20, 0},
-	{ DBGBUS_DSPP, 20, 1},
-	{ DBGBUS_DSPP, 20, 2},
-	{ DBGBUS_DSPP, 25, 0},
-	{ DBGBUS_DSPP, 25, 1},
-	{ DBGBUS_DSPP, 25, 2},
-	{ DBGBUS_DSPP, 38, 0},
-	{ DBGBUS_DSPP, 38, 1},
-	{ DBGBUS_DSPP, 38, 2},
-
-	/* intf0-3 */
-	{ DBGBUS_PERIPH, 0, 0},
-	{ DBGBUS_PERIPH, 1, 0},
-	{ DBGBUS_PERIPH, 2, 0},
-	{ DBGBUS_PERIPH, 3, 0},
-	{ DBGBUS_PERIPH, 4, 0},
-	{ DBGBUS_PERIPH, 5, 0},
-
-	/* te counter wrapper */
-	{ DBGBUS_PERIPH, 60, 0},
-	{ DBGBUS_PERIPH, 60, 1},
-	{ DBGBUS_PERIPH, 60, 2},
-	{ DBGBUS_PERIPH, 60, 3},
-	{ DBGBUS_PERIPH, 60, 4},
-	{ DBGBUS_PERIPH, 60, 5},
-
-	/* dsc0 */
-	{ DBGBUS_PERIPH, 47, 0},
-	{ DBGBUS_PERIPH, 47, 1},
-	{ DBGBUS_PERIPH, 47, 2},
-	{ DBGBUS_PERIPH, 47, 3},
-	{ DBGBUS_PERIPH, 47, 4},
-	{ DBGBUS_PERIPH, 47, 5},
-	{ DBGBUS_PERIPH, 47, 6},
-	{ DBGBUS_PERIPH, 47, 7},
-
-	/* dsc1 */
-	{ DBGBUS_PERIPH, 48, 0},
-	{ DBGBUS_PERIPH, 48, 1},
-	{ DBGBUS_PERIPH, 48, 2},
-	{ DBGBUS_PERIPH, 48, 3},
-	{ DBGBUS_PERIPH, 48, 4},
-	{ DBGBUS_PERIPH, 48, 5},
-	{ DBGBUS_PERIPH, 48, 6},
-	{ DBGBUS_PERIPH, 48, 7},
-
-	/* dsc2 */
-	{ DBGBUS_PERIPH, 50, 0},
-	{ DBGBUS_PERIPH, 50, 1},
-	{ DBGBUS_PERIPH, 50, 2},
-	{ DBGBUS_PERIPH, 50, 3},
-	{ DBGBUS_PERIPH, 50, 4},
-	{ DBGBUS_PERIPH, 50, 5},
-	{ DBGBUS_PERIPH, 50, 6},
-	{ DBGBUS_PERIPH, 50, 7},
-
-	/* dsc3 */
-	{ DBGBUS_PERIPH, 51, 0},
-	{ DBGBUS_PERIPH, 51, 1},
-	{ DBGBUS_PERIPH, 51, 2},
-	{ DBGBUS_PERIPH, 51, 3},
-	{ DBGBUS_PERIPH, 51, 4},
-	{ DBGBUS_PERIPH, 51, 5},
-	{ DBGBUS_PERIPH, 51, 6},
-	{ DBGBUS_PERIPH, 51, 7},
-
-	/* dsc4 */
-	{ DBGBUS_PERIPH, 52, 0},
-	{ DBGBUS_PERIPH, 52, 1},
-	{ DBGBUS_PERIPH, 52, 2},
-	{ DBGBUS_PERIPH, 52, 3},
-	{ DBGBUS_PERIPH, 52, 4},
-	{ DBGBUS_PERIPH, 52, 5},
-	{ DBGBUS_PERIPH, 52, 6},
-	{ DBGBUS_PERIPH, 52, 7},
-
-	/* dsc5 */
-	{ DBGBUS_PERIPH, 53, 0},
-	{ DBGBUS_PERIPH, 53, 1},
-	{ DBGBUS_PERIPH, 53, 2},
-	{ DBGBUS_PERIPH, 53, 3},
-	{ DBGBUS_PERIPH, 53, 4},
-	{ DBGBUS_PERIPH, 53, 5},
-	{ DBGBUS_PERIPH, 53, 6},
-	{ DBGBUS_PERIPH, 53, 7},
-
-	/* tear-check */
-	/* INTF_0 */
-	{ DBGBUS_PERIPH, 63, 0 },
-	{ DBGBUS_PERIPH, 63, 1 },
-	{ DBGBUS_PERIPH, 63, 2 },
-	{ DBGBUS_PERIPH, 63, 3 },
-	{ DBGBUS_PERIPH, 63, 4 },
-	{ DBGBUS_PERIPH, 63, 5 },
-	{ DBGBUS_PERIPH, 63, 6 },
-	{ DBGBUS_PERIPH, 63, 7 },
-
-	/* INTF_1 */
-	{ DBGBUS_PERIPH, 64, 0 },
-	{ DBGBUS_PERIPH, 64, 1 },
-	{ DBGBUS_PERIPH, 64, 2 },
-	{ DBGBUS_PERIPH, 64, 3 },
-	{ DBGBUS_PERIPH, 64, 4 },
-	{ DBGBUS_PERIPH, 64, 5 },
-	{ DBGBUS_PERIPH, 64, 6 },
-	{ DBGBUS_PERIPH, 64, 7 },
-
-	/* INTF_2 */
-	{ DBGBUS_PERIPH, 65, 0 },
-	{ DBGBUS_PERIPH, 65, 1 },
-	{ DBGBUS_PERIPH, 65, 2 },
-	{ DBGBUS_PERIPH, 65, 3 },
-	{ DBGBUS_PERIPH, 65, 4 },
-	{ DBGBUS_PERIPH, 65, 5 },
-	{ DBGBUS_PERIPH, 65, 6 },
-	{ DBGBUS_PERIPH, 65, 7 },
-
-	/* INTF_4 */
-	{ DBGBUS_PERIPH, 66, 0 },
-	{ DBGBUS_PERIPH, 66, 1 },
-	{ DBGBUS_PERIPH, 66, 2 },
-	{ DBGBUS_PERIPH, 66, 3 },
-	{ DBGBUS_PERIPH, 66, 4 },
-	{ DBGBUS_PERIPH, 66, 5 },
-	{ DBGBUS_PERIPH, 66, 6 },
-	{ DBGBUS_PERIPH, 66, 7 },
-
-	/* INTF_5 */
-	{ DBGBUS_PERIPH, 67, 0 },
-	{ DBGBUS_PERIPH, 67, 1 },
-	{ DBGBUS_PERIPH, 67, 2 },
-	{ DBGBUS_PERIPH, 67, 3 },
-	{ DBGBUS_PERIPH, 67, 4 },
-	{ DBGBUS_PERIPH, 67, 5 },
-	{ DBGBUS_PERIPH, 67, 6 },
-	{ DBGBUS_PERIPH, 67, 7 },
-
-	/* INTF_3 */
-	{ DBGBUS_PERIPH, 73, 0 },
-	{ DBGBUS_PERIPH, 73, 1 },
-	{ DBGBUS_PERIPH, 73, 2 },
-	{ DBGBUS_PERIPH, 73, 3 },
-	{ DBGBUS_PERIPH, 73, 4 },
-	{ DBGBUS_PERIPH, 73, 5 },
-	{ DBGBUS_PERIPH, 73, 6 },
-	{ DBGBUS_PERIPH, 73, 7 },
-
-	/* cdwn */
-	{ DBGBUS_PERIPH, 80, 0},
-	{ DBGBUS_PERIPH, 80, 1},
-	{ DBGBUS_PERIPH, 80, 2},
-
-	{ DBGBUS_PERIPH, 81, 0},
-	{ DBGBUS_PERIPH, 81, 1},
-	{ DBGBUS_PERIPH, 81, 2},
-
-	{ DBGBUS_PERIPH, 82, 0},
-	{ DBGBUS_PERIPH, 82, 1},
-	{ DBGBUS_PERIPH, 82, 2},
-	{ DBGBUS_PERIPH, 82, 3},
-	{ DBGBUS_PERIPH, 82, 4},
-	{ DBGBUS_PERIPH, 82, 5},
-	{ DBGBUS_PERIPH, 82, 6},
-	{ DBGBUS_PERIPH, 82, 7},
-
-	/* DPTX1 */
-	{ DBGBUS_PERIPH, 68, 0},
-	{ DBGBUS_PERIPH, 68, 1},
-	{ DBGBUS_PERIPH, 68, 2},
-	{ DBGBUS_PERIPH, 68, 3},
-	{ DBGBUS_PERIPH, 68, 4},
-	{ DBGBUS_PERIPH, 68, 5},
-	{ DBGBUS_PERIPH, 68, 6},
-	{ DBGBUS_PERIPH, 68, 7},
-
-	/* DP */
-	{ DBGBUS_PERIPH, 69, 0},
-	{ DBGBUS_PERIPH, 69, 1},
-	{ DBGBUS_PERIPH, 69, 2},
-	{ DBGBUS_PERIPH, 69, 3},
-	{ DBGBUS_PERIPH, 69, 4},
-	{ DBGBUS_PERIPH, 69, 5},
-
-	/* dsi0 */
-	{ DBGBUS_PERIPH, 70, 0},
-	{ DBGBUS_PERIPH, 70, 1},
-	{ DBGBUS_PERIPH, 70, 2},
-	{ DBGBUS_PERIPH, 70, 3},
-	{ DBGBUS_PERIPH, 70, 4},
-	{ DBGBUS_PERIPH, 70, 5},
-
-	/* dsi1 */
-	{ DBGBUS_PERIPH, 71, 0},
-	{ DBGBUS_PERIPH, 71, 1},
-	{ DBGBUS_PERIPH, 71, 2},
-	{ DBGBUS_PERIPH, 71, 3},
-	{ DBGBUS_PERIPH, 71, 4},
-	{ DBGBUS_PERIPH, 71, 5},
-
-	/* eDP */
-	{ DBGBUS_PERIPH, 72, 0},
-	{ DBGBUS_PERIPH, 72, 1},
-	{ DBGBUS_PERIPH, 72, 2},
-	{ DBGBUS_PERIPH, 72, 3},
-	{ DBGBUS_PERIPH, 72, 4},
-	{ DBGBUS_PERIPH, 72, 5},
-
-};
-
-static struct sde_debug_bus_entry dbg_bus_sde_kona[] = {
-
-	/* Unpack 0 sspp 0*/
-	{ DBGBUS_SSPP0, 35, 2 },
-	{ DBGBUS_SSPP0, 50, 2 },
-	{ DBGBUS_SSPP0, 60, 2 },
-	{ DBGBUS_SSPP0, 70, 2 },
-
-	/* Unpack 1 sspp 0*/
-	{ DBGBUS_SSPP0, 36, 2 },
-	{ DBGBUS_SSPP0, 51, 2 },
-	{ DBGBUS_SSPP0, 61, 2 },
-	{ DBGBUS_SSPP0, 71, 2 },
-
-	/* Unpack 2 sspp 0*/
-	{ DBGBUS_SSPP0, 37, 2 },
-	{ DBGBUS_SSPP0, 52, 2 },
-	{ DBGBUS_SSPP0, 62, 2 },
-	{ DBGBUS_SSPP0, 72, 2 },
-
-
-	/* Unpack 3 sspp 0*/
-	{ DBGBUS_SSPP0, 38, 2 },
-	{ DBGBUS_SSPP0, 53, 2 },
-	{ DBGBUS_SSPP0, 63, 2 },
-	{ DBGBUS_SSPP0, 73, 2 },
-
-	/* Unpack 0 sspp 1*/
-	{ DBGBUS_SSPP1, 35, 2 },
-	{ DBGBUS_SSPP1, 50, 2 },
-	{ DBGBUS_SSPP1, 60, 2 },
-	{ DBGBUS_SSPP1, 70, 2 },
-
-	/* Unpack 1 sspp 1*/
-	{ DBGBUS_SSPP1, 36, 2 },
-	{ DBGBUS_SSPP1, 51, 2 },
-	{ DBGBUS_SSPP1, 61, 2 },
-	{ DBGBUS_SSPP1, 71, 2 },
-
-	/* Unpack 2 sspp 1*/
-	{ DBGBUS_SSPP1, 37, 2 },
-	{ DBGBUS_SSPP1, 52, 2 },
-	{ DBGBUS_SSPP1, 62, 2 },
-	{ DBGBUS_SSPP1, 72, 2 },
-
-
-	/* Unpack 3 sspp 1*/
-	{ DBGBUS_SSPP1, 38, 2 },
-	{ DBGBUS_SSPP1, 53, 2 },
-	{ DBGBUS_SSPP1, 63, 2 },
-	{ DBGBUS_SSPP1, 73, 2 },
-
-	/* scheduler */
-	{ DBGBUS_DSPP, 130, 0 },
-	{ DBGBUS_DSPP, 130, 1 },
-	{ DBGBUS_DSPP, 130, 2 },
-	{ DBGBUS_DSPP, 130, 3 },
-	{ DBGBUS_DSPP, 130, 4 },
-	{ DBGBUS_DSPP, 130, 5 },
-
-
-	/* fetch sspp0 */
-
-	/* vig 0 */
-	{ DBGBUS_SSPP0, 0, 0 },
-	{ DBGBUS_SSPP0, 0, 1 },
-	{ DBGBUS_SSPP0, 0, 2 },
-	{ DBGBUS_SSPP0, 0, 3 },
-	{ DBGBUS_SSPP0, 0, 4 },
-	{ DBGBUS_SSPP0, 0, 5 },
-	{ DBGBUS_SSPP0, 0, 6 },
-	{ DBGBUS_SSPP0, 0, 7 },
-
-	{ DBGBUS_SSPP0, 1, 0 },
-	{ DBGBUS_SSPP0, 1, 1 },
-	{ DBGBUS_SSPP0, 1, 2 },
-	{ DBGBUS_SSPP0, 1, 3 },
-	{ DBGBUS_SSPP0, 1, 4 },
-	{ DBGBUS_SSPP0, 1, 5 },
-	{ DBGBUS_SSPP0, 1, 6 },
-	{ DBGBUS_SSPP0, 1, 7 },
-
-	{ DBGBUS_SSPP0, 2, 0 },
-	{ DBGBUS_SSPP0, 2, 1 },
-	{ DBGBUS_SSPP0, 2, 2 },
-	{ DBGBUS_SSPP0, 2, 3 },
-	{ DBGBUS_SSPP0, 2, 4 },
-	{ DBGBUS_SSPP0, 2, 5 },
-	{ DBGBUS_SSPP0, 2, 6 },
-	{ DBGBUS_SSPP0, 2, 7 },
-
-	{ DBGBUS_SSPP0, 4, 0 },
-	{ DBGBUS_SSPP0, 4, 1 },
-	{ DBGBUS_SSPP0, 4, 2 },
-	{ DBGBUS_SSPP0, 4, 3 },
-	{ DBGBUS_SSPP0, 4, 4 },
-	{ DBGBUS_SSPP0, 4, 5 },
-	{ DBGBUS_SSPP0, 4, 6 },
-	{ DBGBUS_SSPP0, 4, 7 },
-
-	{ DBGBUS_SSPP0, 5, 0 },
-	{ DBGBUS_SSPP0, 5, 1 },
-	{ DBGBUS_SSPP0, 5, 2 },
-	{ DBGBUS_SSPP0, 5, 3 },
-	{ DBGBUS_SSPP0, 5, 4 },
-	{ DBGBUS_SSPP0, 5, 5 },
-	{ DBGBUS_SSPP0, 5, 6 },
-	{ DBGBUS_SSPP0, 5, 7 },
-
-	/* vig 2 */
-	{ DBGBUS_SSPP0, 20, 0 },
-	{ DBGBUS_SSPP0, 20, 1 },
-	{ DBGBUS_SSPP0, 20, 2 },
-	{ DBGBUS_SSPP0, 20, 3 },
-	{ DBGBUS_SSPP0, 20, 4 },
-	{ DBGBUS_SSPP0, 20, 5 },
-	{ DBGBUS_SSPP0, 20, 6 },
-	{ DBGBUS_SSPP0, 20, 7 },
-
-	{ DBGBUS_SSPP0, 21, 0 },
-	{ DBGBUS_SSPP0, 21, 1 },
-	{ DBGBUS_SSPP0, 21, 2 },
-	{ DBGBUS_SSPP0, 21, 3 },
-	{ DBGBUS_SSPP0, 21, 4 },
-	{ DBGBUS_SSPP0, 21, 5 },
-	{ DBGBUS_SSPP0, 21, 6 },
-	{ DBGBUS_SSPP0, 21, 7 },
-
-	{ DBGBUS_SSPP0, 22, 0 },
-	{ DBGBUS_SSPP0, 22, 1 },
-	{ DBGBUS_SSPP0, 22, 2 },
-	{ DBGBUS_SSPP0, 22, 3 },
-	{ DBGBUS_SSPP0, 22, 4 },
-	{ DBGBUS_SSPP0, 22, 5 },
-	{ DBGBUS_SSPP0, 22, 6 },
-	{ DBGBUS_SSPP0, 22, 7 },
-
-	{ DBGBUS_SSPP0, 24, 0 },
-	{ DBGBUS_SSPP0, 24, 1 },
-	{ DBGBUS_SSPP0, 24, 2 },
-	{ DBGBUS_SSPP0, 24, 3 },
-	{ DBGBUS_SSPP0, 24, 4 },
-	{ DBGBUS_SSPP0, 24, 5 },
-	{ DBGBUS_SSPP0, 24, 6 },
-	{ DBGBUS_SSPP0, 24, 7 },
-
-	{ DBGBUS_SSPP0, 25, 0 },
-	{ DBGBUS_SSPP0, 25, 1 },
-	{ DBGBUS_SSPP0, 25, 2 },
-	{ DBGBUS_SSPP0, 25, 3 },
-	{ DBGBUS_SSPP0, 25, 4 },
-	{ DBGBUS_SSPP0, 25, 5 },
-	{ DBGBUS_SSPP0, 25, 6 },
-	{ DBGBUS_SSPP0, 25, 7 },
-
-	/* dma 2 */
-	{ DBGBUS_SSPP0, 30, 0 },
-	{ DBGBUS_SSPP0, 30, 1 },
-	{ DBGBUS_SSPP0, 30, 2 },
-	{ DBGBUS_SSPP0, 30, 3 },
-	{ DBGBUS_SSPP0, 30, 4 },
-	{ DBGBUS_SSPP0, 30, 5 },
-	{ DBGBUS_SSPP0, 30, 6 },
-	{ DBGBUS_SSPP0, 30, 7 },
-
-	{ DBGBUS_SSPP0, 31, 0 },
-	{ DBGBUS_SSPP0, 31, 1 },
-	{ DBGBUS_SSPP0, 31, 2 },
-	{ DBGBUS_SSPP0, 31, 3 },
-	{ DBGBUS_SSPP0, 31, 4 },
-	{ DBGBUS_SSPP0, 31, 5 },
-	{ DBGBUS_SSPP0, 31, 6 },
-	{ DBGBUS_SSPP0, 31, 7 },
-
-	{ DBGBUS_SSPP0, 32, 0 },
-	{ DBGBUS_SSPP0, 32, 1 },
-	{ DBGBUS_SSPP0, 32, 2 },
-	{ DBGBUS_SSPP0, 32, 3 },
-	{ DBGBUS_SSPP0, 32, 4 },
-	{ DBGBUS_SSPP0, 32, 5 },
-	{ DBGBUS_SSPP0, 32, 6 },
-	{ DBGBUS_SSPP0, 32, 7 },
-
-	{ DBGBUS_SSPP0, 33, 0 },
-	{ DBGBUS_SSPP0, 33, 1 },
-	{ DBGBUS_SSPP0, 33, 2 },
-	{ DBGBUS_SSPP0, 33, 3 },
-	{ DBGBUS_SSPP0, 33, 4 },
-	{ DBGBUS_SSPP0, 33, 5 },
-	{ DBGBUS_SSPP0, 33, 6 },
-	{ DBGBUS_SSPP0, 33, 7 },
-
-	{ DBGBUS_SSPP0, 34, 0 },
-	{ DBGBUS_SSPP0, 34, 1 },
-	{ DBGBUS_SSPP0, 34, 2 },
-	{ DBGBUS_SSPP0, 34, 3 },
-	{ DBGBUS_SSPP0, 34, 4 },
-	{ DBGBUS_SSPP0, 34, 5 },
-	{ DBGBUS_SSPP0, 34, 6 },
-	{ DBGBUS_SSPP0, 34, 7 },
-
-	/* dma 0 */
-	{ DBGBUS_SSPP0, 40, 0 },
-	{ DBGBUS_SSPP0, 40, 1 },
-	{ DBGBUS_SSPP0, 40, 2 },
-	{ DBGBUS_SSPP0, 40, 3 },
-	{ DBGBUS_SSPP0, 40, 4 },
-	{ DBGBUS_SSPP0, 40, 5 },
-	{ DBGBUS_SSPP0, 40, 6 },
-	{ DBGBUS_SSPP0, 40, 7 },
-
-	{ DBGBUS_SSPP0, 41, 0 },
-	{ DBGBUS_SSPP0, 41, 1 },
-	{ DBGBUS_SSPP0, 41, 2 },
-	{ DBGBUS_SSPP0, 41, 3 },
-	{ DBGBUS_SSPP0, 41, 4 },
-	{ DBGBUS_SSPP0, 41, 5 },
-	{ DBGBUS_SSPP0, 41, 6 },
-	{ DBGBUS_SSPP0, 41, 7 },
-
-	{ DBGBUS_SSPP0, 42, 0 },
-	{ DBGBUS_SSPP0, 42, 1 },
-	{ DBGBUS_SSPP0, 42, 2 },
-	{ DBGBUS_SSPP0, 42, 3 },
-	{ DBGBUS_SSPP0, 42, 4 },
-	{ DBGBUS_SSPP0, 42, 5 },
-	{ DBGBUS_SSPP0, 42, 6 },
-	{ DBGBUS_SSPP0, 42, 7 },
-
-	{ DBGBUS_SSPP0, 44, 0 },
-	{ DBGBUS_SSPP0, 44, 1 },
-	{ DBGBUS_SSPP0, 44, 2 },
-	{ DBGBUS_SSPP0, 44, 3 },
-	{ DBGBUS_SSPP0, 44, 4 },
-	{ DBGBUS_SSPP0, 44, 5 },
-	{ DBGBUS_SSPP0, 44, 6 },
-	{ DBGBUS_SSPP0, 44, 7 },
-
-	{ DBGBUS_SSPP0, 45, 0 },
-	{ DBGBUS_SSPP0, 45, 1 },
-	{ DBGBUS_SSPP0, 45, 2 },
-	{ DBGBUS_SSPP0, 45, 3 },
-	{ DBGBUS_SSPP0, 45, 4 },
-	{ DBGBUS_SSPP0, 45, 5 },
-	{ DBGBUS_SSPP0, 45, 6 },
-	{ DBGBUS_SSPP0, 45, 7 },
-
-	/* fetch sspp1 */
-	/* vig 1 */
-	{ DBGBUS_SSPP1, 0, 0 },
-	{ DBGBUS_SSPP1, 0, 1 },
-	{ DBGBUS_SSPP1, 0, 2 },
-	{ DBGBUS_SSPP1, 0, 3 },
-	{ DBGBUS_SSPP1, 0, 4 },
-	{ DBGBUS_SSPP1, 0, 5 },
-	{ DBGBUS_SSPP1, 0, 6 },
-	{ DBGBUS_SSPP1, 0, 7 },
-
-	{ DBGBUS_SSPP1, 1, 0 },
-	{ DBGBUS_SSPP1, 1, 1 },
-	{ DBGBUS_SSPP1, 1, 2 },
-	{ DBGBUS_SSPP1, 1, 3 },
-	{ DBGBUS_SSPP1, 1, 4 },
-	{ DBGBUS_SSPP1, 1, 5 },
-	{ DBGBUS_SSPP1, 1, 6 },
-	{ DBGBUS_SSPP1, 1, 7 },
-
-	{ DBGBUS_SSPP1, 2, 0 },
-	{ DBGBUS_SSPP1, 2, 1 },
-	{ DBGBUS_SSPP1, 2, 2 },
-	{ DBGBUS_SSPP1, 2, 3 },
-	{ DBGBUS_SSPP1, 2, 4 },
-	{ DBGBUS_SSPP1, 2, 5 },
-	{ DBGBUS_SSPP1, 2, 6 },
-	{ DBGBUS_SSPP1, 2, 7 },
-
-	{ DBGBUS_SSPP1, 4, 0 },
-	{ DBGBUS_SSPP1, 4, 1 },
-	{ DBGBUS_SSPP1, 4, 2 },
-	{ DBGBUS_SSPP1, 4, 3 },
-	{ DBGBUS_SSPP1, 4, 4 },
-	{ DBGBUS_SSPP1, 4, 5 },
-	{ DBGBUS_SSPP1, 4, 6 },
-	{ DBGBUS_SSPP1, 4, 7 },
-
-	{ DBGBUS_SSPP1, 5, 0 },
-	{ DBGBUS_SSPP1, 5, 1 },
-	{ DBGBUS_SSPP1, 5, 2 },
-	{ DBGBUS_SSPP1, 5, 3 },
-	{ DBGBUS_SSPP1, 5, 4 },
-	{ DBGBUS_SSPP1, 5, 5 },
-	{ DBGBUS_SSPP1, 5, 6 },
-	{ DBGBUS_SSPP1, 5, 7 },
-
-	/* vig 3 */
-	{ DBGBUS_SSPP1, 20, 0 },
-	{ DBGBUS_SSPP1, 20, 1 },
-	{ DBGBUS_SSPP1, 20, 2 },
-	{ DBGBUS_SSPP1, 20, 3 },
-	{ DBGBUS_SSPP1, 20, 4 },
-	{ DBGBUS_SSPP1, 20, 5 },
-	{ DBGBUS_SSPP1, 20, 6 },
-	{ DBGBUS_SSPP1, 20, 7 },
-
-	{ DBGBUS_SSPP1, 21, 0 },
-	{ DBGBUS_SSPP1, 21, 1 },
-	{ DBGBUS_SSPP1, 21, 2 },
-	{ DBGBUS_SSPP1, 21, 3 },
-	{ DBGBUS_SSPP1, 21, 4 },
-	{ DBGBUS_SSPP1, 21, 5 },
-	{ DBGBUS_SSPP1, 21, 6 },
-	{ DBGBUS_SSPP1, 21, 7 },
-
-	{ DBGBUS_SSPP1, 22, 0 },
-	{ DBGBUS_SSPP1, 22, 1 },
-	{ DBGBUS_SSPP1, 22, 2 },
-	{ DBGBUS_SSPP1, 22, 3 },
-	{ DBGBUS_SSPP1, 22, 4 },
-	{ DBGBUS_SSPP1, 22, 5 },
-	{ DBGBUS_SSPP1, 22, 6 },
-	{ DBGBUS_SSPP1, 22, 7 },
-
-	{ DBGBUS_SSPP1, 24, 0 },
-	{ DBGBUS_SSPP1, 24, 1 },
-	{ DBGBUS_SSPP1, 24, 2 },
-	{ DBGBUS_SSPP1, 24, 3 },
-	{ DBGBUS_SSPP1, 24, 4 },
-	{ DBGBUS_SSPP1, 24, 5 },
-	{ DBGBUS_SSPP1, 24, 6 },
-	{ DBGBUS_SSPP1, 24, 7 },
-
-	{ DBGBUS_SSPP1, 25, 0 },
-	{ DBGBUS_SSPP1, 25, 1 },
-	{ DBGBUS_SSPP1, 25, 2 },
-	{ DBGBUS_SSPP1, 25, 3 },
-	{ DBGBUS_SSPP1, 25, 4 },
-	{ DBGBUS_SSPP1, 25, 5 },
-	{ DBGBUS_SSPP1, 25, 6 },
-	{ DBGBUS_SSPP1, 25, 7 },
-
-	/* dma 3 */
-	{ DBGBUS_SSPP1, 30, 0 },
-	{ DBGBUS_SSPP1, 30, 1 },
-	{ DBGBUS_SSPP1, 30, 2 },
-	{ DBGBUS_SSPP1, 30, 3 },
-	{ DBGBUS_SSPP1, 30, 4 },
-	{ DBGBUS_SSPP1, 30, 5 },
-	{ DBGBUS_SSPP1, 30, 6 },
-	{ DBGBUS_SSPP1, 30, 7 },
-
-	{ DBGBUS_SSPP1, 31, 0 },
-	{ DBGBUS_SSPP1, 31, 1 },
-	{ DBGBUS_SSPP1, 31, 2 },
-	{ DBGBUS_SSPP1, 31, 3 },
-	{ DBGBUS_SSPP1, 31, 4 },
-	{ DBGBUS_SSPP1, 31, 5 },
-	{ DBGBUS_SSPP1, 31, 6 },
-	{ DBGBUS_SSPP1, 31, 7 },
-
-	{ DBGBUS_SSPP1, 32, 0 },
-	{ DBGBUS_SSPP1, 32, 1 },
-	{ DBGBUS_SSPP1, 32, 2 },
-	{ DBGBUS_SSPP1, 32, 3 },
-	{ DBGBUS_SSPP1, 32, 4 },
-	{ DBGBUS_SSPP1, 32, 5 },
-	{ DBGBUS_SSPP1, 32, 6 },
-	{ DBGBUS_SSPP1, 32, 7 },
-
-	{ DBGBUS_SSPP1, 33, 0 },
-	{ DBGBUS_SSPP1, 33, 1 },
-	{ DBGBUS_SSPP1, 33, 2 },
-	{ DBGBUS_SSPP1, 33, 3 },
-	{ DBGBUS_SSPP1, 33, 4 },
-	{ DBGBUS_SSPP1, 33, 5 },
-	{ DBGBUS_SSPP1, 33, 6 },
-	{ DBGBUS_SSPP1, 33, 7 },
-
-	{ DBGBUS_SSPP1, 34, 0 },
-	{ DBGBUS_SSPP1, 34, 1 },
-	{ DBGBUS_SSPP1, 34, 2 },
-	{ DBGBUS_SSPP1, 34, 3 },
-	{ DBGBUS_SSPP1, 34, 4 },
-	{ DBGBUS_SSPP1, 34, 5 },
-	{ DBGBUS_SSPP1, 34, 6 },
-	{ DBGBUS_SSPP1, 34, 7 },
-
-	/* dma 1 */
-	{ DBGBUS_SSPP1, 40, 0 },
-	{ DBGBUS_SSPP1, 40, 1 },
-	{ DBGBUS_SSPP1, 40, 2 },
-	{ DBGBUS_SSPP1, 40, 3 },
-	{ DBGBUS_SSPP1, 40, 4 },
-	{ DBGBUS_SSPP1, 40, 5 },
-	{ DBGBUS_SSPP1, 40, 6 },
-	{ DBGBUS_SSPP1, 40, 7 },
-
-	{ DBGBUS_SSPP1, 41, 0 },
-	{ DBGBUS_SSPP1, 41, 1 },
-	{ DBGBUS_SSPP1, 41, 2 },
-	{ DBGBUS_SSPP1, 41, 3 },
-	{ DBGBUS_SSPP1, 41, 4 },
-	{ DBGBUS_SSPP1, 41, 5 },
-	{ DBGBUS_SSPP1, 41, 6 },
-	{ DBGBUS_SSPP1, 41, 7 },
-
-	{ DBGBUS_SSPP1, 42, 0 },
-	{ DBGBUS_SSPP1, 42, 1 },
-	{ DBGBUS_SSPP1, 42, 2 },
-	{ DBGBUS_SSPP1, 42, 3 },
-	{ DBGBUS_SSPP1, 42, 4 },
-	{ DBGBUS_SSPP1, 42, 5 },
-	{ DBGBUS_SSPP1, 42, 6 },
-	{ DBGBUS_SSPP1, 42, 7 },
-
-	{ DBGBUS_SSPP1, 44, 0 },
-	{ DBGBUS_SSPP1, 44, 1 },
-	{ DBGBUS_SSPP1, 44, 2 },
-	{ DBGBUS_SSPP1, 44, 3 },
-	{ DBGBUS_SSPP1, 44, 4 },
-	{ DBGBUS_SSPP1, 44, 5 },
-	{ DBGBUS_SSPP1, 44, 6 },
-	{ DBGBUS_SSPP1, 44, 7 },
-
-	{ DBGBUS_SSPP1, 45, 0 },
-	{ DBGBUS_SSPP1, 45, 1 },
-	{ DBGBUS_SSPP1, 45, 2 },
-	{ DBGBUS_SSPP1, 45, 3 },
-	{ DBGBUS_SSPP1, 45, 4 },
-	{ DBGBUS_SSPP1, 45, 5 },
-	{ DBGBUS_SSPP1, 45, 6 },
-	{ DBGBUS_SSPP1, 45, 7 },
-
-	/* ppb_0 */
-	{ DBGBUS_DSPP, 31, 0, _sde_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 33, 0, _sde_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 35, 0, _sde_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 42, 0, _sde_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 47, 0, _sde_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 49, 0, _sde_debug_bus_ppb0_dump },
-
-	/* ppb_1 */
-	{ DBGBUS_DSPP, 32, 0, _sde_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 34, 0, _sde_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 36, 0, _sde_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 43, 0, _sde_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 48, 0, _sde_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 50, 0, _sde_debug_bus_ppb1_dump },
-
-	/* crossbar */
-	{ DBGBUS_DSPP, 0, 0, _sde_debug_bus_xbar_dump },
-
-	/* rotator */
-	{ DBGBUS_DSPP, 9, 0},
-
-	/* ltm */
-	{ DBGBUS_DSPP, 45, 0},
-	{ DBGBUS_DSPP, 45, 1},
-	{ DBGBUS_DSPP, 45, 2},
-	{ DBGBUS_DSPP, 45, 3},
-	{ DBGBUS_DSPP, 45, 4},
-	{ DBGBUS_DSPP, 45, 5},
-	{ DBGBUS_DSPP, 45, 6},
-	{ DBGBUS_DSPP, 45, 7},
-
-	{ DBGBUS_DSPP, 46, 0},
-	{ DBGBUS_DSPP, 46, 1},
-	{ DBGBUS_DSPP, 46, 2},
-	{ DBGBUS_DSPP, 46, 3},
-	{ DBGBUS_DSPP, 46, 4},
-	{ DBGBUS_DSPP, 46, 5},
-	{ DBGBUS_DSPP, 46, 6},
-	{ DBGBUS_DSPP, 46, 7},
-
-	/* blend */
-	/* LM0 */
-	{ DBGBUS_DSPP, 63, 1},
-	{ DBGBUS_DSPP, 63, 2},
-	{ DBGBUS_DSPP, 63, 3},
-	{ DBGBUS_DSPP, 63, 4},
-	{ DBGBUS_DSPP, 63, 5},
-	{ DBGBUS_DSPP, 63, 6},
-	{ DBGBUS_DSPP, 63, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 64, 1},
-	{ DBGBUS_DSPP, 64, 2},
-	{ DBGBUS_DSPP, 64, 3},
-	{ DBGBUS_DSPP, 64, 4},
-	{ DBGBUS_DSPP, 64, 5},
-	{ DBGBUS_DSPP, 64, 6},
-	{ DBGBUS_DSPP, 64, 7},
-
-	{ DBGBUS_DSPP, 65, 1},
-	{ DBGBUS_DSPP, 65, 2},
-	{ DBGBUS_DSPP, 65, 3},
-	{ DBGBUS_DSPP, 65, 4},
-	{ DBGBUS_DSPP, 65, 5},
-	{ DBGBUS_DSPP, 65, 6},
-	{ DBGBUS_DSPP, 65, 7},
-
-	{ DBGBUS_DSPP, 66, 1},
-	{ DBGBUS_DSPP, 66, 2},
-	{ DBGBUS_DSPP, 66, 3},
-	{ DBGBUS_DSPP, 66, 4},
-	{ DBGBUS_DSPP, 66, 5},
-	{ DBGBUS_DSPP, 66, 6},
-	{ DBGBUS_DSPP, 66, 7},
-
-	{ DBGBUS_DSPP, 67, 1},
-	{ DBGBUS_DSPP, 67, 2},
-	{ DBGBUS_DSPP, 67, 3},
-	{ DBGBUS_DSPP, 67, 4},
-	{ DBGBUS_DSPP, 67, 5},
-	{ DBGBUS_DSPP, 67, 6},
-	{ DBGBUS_DSPP, 67, 7},
-
-	{ DBGBUS_DSPP, 68, 1},
-	{ DBGBUS_DSPP, 68, 2},
-	{ DBGBUS_DSPP, 68, 3},
-	{ DBGBUS_DSPP, 68, 4},
-	{ DBGBUS_DSPP, 68, 5},
-	{ DBGBUS_DSPP, 68, 6},
-	{ DBGBUS_DSPP, 68, 7},
-
-	{ DBGBUS_DSPP, 69, 1},
-	{ DBGBUS_DSPP, 69, 2},
-	{ DBGBUS_DSPP, 69, 3},
-	{ DBGBUS_DSPP, 69, 4},
-	{ DBGBUS_DSPP, 69, 5},
-	{ DBGBUS_DSPP, 69, 6},
-	{ DBGBUS_DSPP, 69, 7},
-
-	{ DBGBUS_DSPP, 84, 1},
-	{ DBGBUS_DSPP, 84, 2},
-	{ DBGBUS_DSPP, 84, 3},
-	{ DBGBUS_DSPP, 84, 4},
-	{ DBGBUS_DSPP, 84, 5},
-	{ DBGBUS_DSPP, 84, 6},
-	{ DBGBUS_DSPP, 84, 7},
-
-	{ DBGBUS_DSPP, 85, 1},
-	{ DBGBUS_DSPP, 85, 2},
-	{ DBGBUS_DSPP, 85, 3},
-	{ DBGBUS_DSPP, 85, 4},
-	{ DBGBUS_DSPP, 85, 5},
-	{ DBGBUS_DSPP, 85, 6},
-	{ DBGBUS_DSPP, 85, 7},
-
-	{ DBGBUS_DSPP, 86, 1},
-	{ DBGBUS_DSPP, 86, 2},
-	{ DBGBUS_DSPP, 86, 3},
-	{ DBGBUS_DSPP, 86, 4},
-	{ DBGBUS_DSPP, 86, 5},
-	{ DBGBUS_DSPP, 86, 6},
-	{ DBGBUS_DSPP, 86, 7},
-
-	{ DBGBUS_DSPP, 87, 1},
-	{ DBGBUS_DSPP, 87, 2},
-	{ DBGBUS_DSPP, 87, 3},
-	{ DBGBUS_DSPP, 87, 4},
-	{ DBGBUS_DSPP, 87, 5},
-	{ DBGBUS_DSPP, 87, 6},
-	{ DBGBUS_DSPP, 87, 7},
-
-	/* LM1 */
-	{ DBGBUS_DSPP, 70, 1},
-	{ DBGBUS_DSPP, 70, 2},
-	{ DBGBUS_DSPP, 70, 3},
-	{ DBGBUS_DSPP, 70, 4},
-	{ DBGBUS_DSPP, 70, 5},
-	{ DBGBUS_DSPP, 70, 6},
-	{ DBGBUS_DSPP, 70, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 71, 1},
-	{ DBGBUS_DSPP, 71, 2},
-	{ DBGBUS_DSPP, 71, 3},
-	{ DBGBUS_DSPP, 71, 4},
-	{ DBGBUS_DSPP, 71, 5},
-	{ DBGBUS_DSPP, 71, 6},
-	{ DBGBUS_DSPP, 71, 7},
-
-	{ DBGBUS_DSPP, 72, 1},
-	{ DBGBUS_DSPP, 72, 2},
-	{ DBGBUS_DSPP, 72, 3},
-	{ DBGBUS_DSPP, 72, 4},
-	{ DBGBUS_DSPP, 72, 5},
-	{ DBGBUS_DSPP, 72, 6},
-	{ DBGBUS_DSPP, 72, 7},
-
-	{ DBGBUS_DSPP, 73, 1},
-	{ DBGBUS_DSPP, 73, 2},
-	{ DBGBUS_DSPP, 73, 3},
-	{ DBGBUS_DSPP, 73, 4},
-	{ DBGBUS_DSPP, 73, 5},
-	{ DBGBUS_DSPP, 73, 6},
-	{ DBGBUS_DSPP, 73, 7},
-
-	{ DBGBUS_DSPP, 74, 1},
-	{ DBGBUS_DSPP, 74, 2},
-	{ DBGBUS_DSPP, 74, 3},
-	{ DBGBUS_DSPP, 74, 4},
-	{ DBGBUS_DSPP, 74, 5},
-	{ DBGBUS_DSPP, 74, 6},
-	{ DBGBUS_DSPP, 74, 7},
-
-	{ DBGBUS_DSPP, 75, 1},
-	{ DBGBUS_DSPP, 75, 2},
-	{ DBGBUS_DSPP, 75, 3},
-	{ DBGBUS_DSPP, 75, 4},
-	{ DBGBUS_DSPP, 75, 5},
-	{ DBGBUS_DSPP, 75, 6},
-	{ DBGBUS_DSPP, 75, 7},
-
-	{ DBGBUS_DSPP, 76, 1},
-	{ DBGBUS_DSPP, 76, 2},
-	{ DBGBUS_DSPP, 76, 3},
-	{ DBGBUS_DSPP, 76, 4},
-	{ DBGBUS_DSPP, 76, 5},
-	{ DBGBUS_DSPP, 76, 6},
-	{ DBGBUS_DSPP, 76, 7},
-
-	{ DBGBUS_DSPP, 88, 1},
-	{ DBGBUS_DSPP, 88, 2},
-	{ DBGBUS_DSPP, 88, 3},
-	{ DBGBUS_DSPP, 88, 4},
-	{ DBGBUS_DSPP, 88, 5},
-	{ DBGBUS_DSPP, 88, 6},
-	{ DBGBUS_DSPP, 88, 7},
-
-	{ DBGBUS_DSPP, 89, 1},
-	{ DBGBUS_DSPP, 89, 2},
-	{ DBGBUS_DSPP, 89, 3},
-	{ DBGBUS_DSPP, 89, 4},
-	{ DBGBUS_DSPP, 89, 5},
-	{ DBGBUS_DSPP, 89, 6},
-	{ DBGBUS_DSPP, 89, 7},
-
-	{ DBGBUS_DSPP, 90, 1},
-	{ DBGBUS_DSPP, 90, 2},
-	{ DBGBUS_DSPP, 90, 3},
-	{ DBGBUS_DSPP, 90, 4},
-	{ DBGBUS_DSPP, 90, 5},
-	{ DBGBUS_DSPP, 90, 6},
-	{ DBGBUS_DSPP, 90, 7},
-
-	{ DBGBUS_DSPP, 91, 1},
-	{ DBGBUS_DSPP, 91, 2},
-	{ DBGBUS_DSPP, 91, 3},
-	{ DBGBUS_DSPP, 91, 4},
-	{ DBGBUS_DSPP, 91, 5},
-	{ DBGBUS_DSPP, 91, 6},
-	{ DBGBUS_DSPP, 91, 7},
-
-	/* LM2 */
-	{ DBGBUS_DSPP, 77, 0},
-	{ DBGBUS_DSPP, 77, 1},
-	{ DBGBUS_DSPP, 77, 2},
-	{ DBGBUS_DSPP, 77, 3},
-	{ DBGBUS_DSPP, 77, 4},
-	{ DBGBUS_DSPP, 77, 5},
-	{ DBGBUS_DSPP, 77, 6},
-	{ DBGBUS_DSPP, 77, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 78, 0},
-	{ DBGBUS_DSPP, 78, 1},
-	{ DBGBUS_DSPP, 78, 2},
-	{ DBGBUS_DSPP, 78, 3},
-	{ DBGBUS_DSPP, 78, 4},
-	{ DBGBUS_DSPP, 78, 5},
-	{ DBGBUS_DSPP, 78, 6},
-	{ DBGBUS_DSPP, 78, 7},
-
-	{ DBGBUS_DSPP, 79, 0},
-	{ DBGBUS_DSPP, 79, 1},
-	{ DBGBUS_DSPP, 79, 2},
-	{ DBGBUS_DSPP, 79, 3},
-	{ DBGBUS_DSPP, 79, 4},
-	{ DBGBUS_DSPP, 79, 5},
-	{ DBGBUS_DSPP, 79, 6},
-	{ DBGBUS_DSPP, 79, 7},
-
-	{ DBGBUS_DSPP, 80, 0},
-	{ DBGBUS_DSPP, 80, 1},
-	{ DBGBUS_DSPP, 80, 2},
-	{ DBGBUS_DSPP, 80, 3},
-	{ DBGBUS_DSPP, 80, 4},
-	{ DBGBUS_DSPP, 80, 5},
-	{ DBGBUS_DSPP, 80, 6},
-	{ DBGBUS_DSPP, 80, 7},
-
-	{ DBGBUS_DSPP, 81, 0},
-	{ DBGBUS_DSPP, 81, 1},
-	{ DBGBUS_DSPP, 81, 2},
-	{ DBGBUS_DSPP, 81, 3},
-	{ DBGBUS_DSPP, 81, 4},
-	{ DBGBUS_DSPP, 81, 5},
-	{ DBGBUS_DSPP, 81, 6},
-	{ DBGBUS_DSPP, 81, 7},
-
-	{ DBGBUS_DSPP, 82, 0},
-	{ DBGBUS_DSPP, 82, 1},
-	{ DBGBUS_DSPP, 82, 2},
-	{ DBGBUS_DSPP, 82, 3},
-	{ DBGBUS_DSPP, 82, 4},
-	{ DBGBUS_DSPP, 82, 5},
-	{ DBGBUS_DSPP, 82, 6},
-	{ DBGBUS_DSPP, 82, 7},
-
-	{ DBGBUS_DSPP, 83, 0},
-	{ DBGBUS_DSPP, 83, 1},
-	{ DBGBUS_DSPP, 83, 2},
-	{ DBGBUS_DSPP, 83, 3},
-	{ DBGBUS_DSPP, 83, 4},
-	{ DBGBUS_DSPP, 83, 5},
-	{ DBGBUS_DSPP, 83, 6},
-	{ DBGBUS_DSPP, 83, 7},
-
-	{ DBGBUS_DSPP, 92, 1},
-	{ DBGBUS_DSPP, 92, 2},
-	{ DBGBUS_DSPP, 92, 3},
-	{ DBGBUS_DSPP, 92, 4},
-	{ DBGBUS_DSPP, 92, 5},
-	{ DBGBUS_DSPP, 92, 6},
-	{ DBGBUS_DSPP, 92, 7},
-
-	{ DBGBUS_DSPP, 93, 1},
-	{ DBGBUS_DSPP, 93, 2},
-	{ DBGBUS_DSPP, 93, 3},
-	{ DBGBUS_DSPP, 93, 4},
-	{ DBGBUS_DSPP, 93, 5},
-	{ DBGBUS_DSPP, 93, 6},
-	{ DBGBUS_DSPP, 93, 7},
-
-	{ DBGBUS_DSPP, 94, 1},
-	{ DBGBUS_DSPP, 94, 2},
-	{ DBGBUS_DSPP, 94, 3},
-	{ DBGBUS_DSPP, 94, 4},
-	{ DBGBUS_DSPP, 94, 5},
-	{ DBGBUS_DSPP, 94, 6},
-	{ DBGBUS_DSPP, 94, 7},
-
-	{ DBGBUS_DSPP, 95, 1},
-	{ DBGBUS_DSPP, 95, 2},
-	{ DBGBUS_DSPP, 95, 3},
-	{ DBGBUS_DSPP, 95, 4},
-	{ DBGBUS_DSPP, 95, 5},
-	{ DBGBUS_DSPP, 95, 6},
-	{ DBGBUS_DSPP, 95, 7},
-
-	/* LM3 */
-	{ DBGBUS_DSPP, 110, 1},
-	{ DBGBUS_DSPP, 110, 2},
-	{ DBGBUS_DSPP, 110, 3},
-	{ DBGBUS_DSPP, 110, 4},
-	{ DBGBUS_DSPP, 110, 5},
-	{ DBGBUS_DSPP, 110, 6},
-	{ DBGBUS_DSPP, 110, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 111, 1},
-	{ DBGBUS_DSPP, 111, 2},
-	{ DBGBUS_DSPP, 111, 3},
-	{ DBGBUS_DSPP, 111, 4},
-	{ DBGBUS_DSPP, 111, 5},
-	{ DBGBUS_DSPP, 111, 6},
-	{ DBGBUS_DSPP, 111, 7},
-
-	{ DBGBUS_DSPP, 112, 1},
-	{ DBGBUS_DSPP, 112, 2},
-	{ DBGBUS_DSPP, 112, 3},
-	{ DBGBUS_DSPP, 112, 4},
-	{ DBGBUS_DSPP, 112, 5},
-	{ DBGBUS_DSPP, 112, 6},
-	{ DBGBUS_DSPP, 112, 7},
-
-	{ DBGBUS_DSPP, 113, 1},
-	{ DBGBUS_DSPP, 113, 2},
-	{ DBGBUS_DSPP, 113, 3},
-	{ DBGBUS_DSPP, 113, 4},
-	{ DBGBUS_DSPP, 113, 5},
-	{ DBGBUS_DSPP, 113, 6},
-	{ DBGBUS_DSPP, 113, 7},
-
-	{ DBGBUS_DSPP, 114, 1},
-	{ DBGBUS_DSPP, 114, 2},
-	{ DBGBUS_DSPP, 114, 3},
-	{ DBGBUS_DSPP, 114, 4},
-	{ DBGBUS_DSPP, 114, 5},
-	{ DBGBUS_DSPP, 114, 6},
-	{ DBGBUS_DSPP, 114, 7},
-
-	{ DBGBUS_DSPP, 115, 1},
-	{ DBGBUS_DSPP, 115, 2},
-	{ DBGBUS_DSPP, 115, 3},
-	{ DBGBUS_DSPP, 115, 4},
-	{ DBGBUS_DSPP, 115, 5},
-	{ DBGBUS_DSPP, 115, 6},
-	{ DBGBUS_DSPP, 115, 7},
-
-	{ DBGBUS_DSPP, 116, 1},
-	{ DBGBUS_DSPP, 116, 2},
-	{ DBGBUS_DSPP, 116, 3},
-	{ DBGBUS_DSPP, 116, 4},
-	{ DBGBUS_DSPP, 116, 5},
-	{ DBGBUS_DSPP, 116, 6},
-	{ DBGBUS_DSPP, 116, 7},
-
-	{ DBGBUS_DSPP, 117, 1},
-	{ DBGBUS_DSPP, 117, 2},
-	{ DBGBUS_DSPP, 117, 3},
-	{ DBGBUS_DSPP, 117, 4},
-	{ DBGBUS_DSPP, 117, 5},
-	{ DBGBUS_DSPP, 117, 6},
-	{ DBGBUS_DSPP, 117, 7},
-
-	{ DBGBUS_DSPP, 118, 1},
-	{ DBGBUS_DSPP, 118, 2},
-	{ DBGBUS_DSPP, 118, 3},
-	{ DBGBUS_DSPP, 118, 4},
-	{ DBGBUS_DSPP, 118, 5},
-	{ DBGBUS_DSPP, 118, 6},
-	{ DBGBUS_DSPP, 118, 7},
-
-	{ DBGBUS_DSPP, 119, 1},
-	{ DBGBUS_DSPP, 119, 2},
-	{ DBGBUS_DSPP, 119, 3},
-	{ DBGBUS_DSPP, 119, 4},
-	{ DBGBUS_DSPP, 119, 5},
-	{ DBGBUS_DSPP, 119, 6},
-	{ DBGBUS_DSPP, 119, 7},
-
-	{ DBGBUS_DSPP, 120, 1},
-	{ DBGBUS_DSPP, 120, 2},
-	{ DBGBUS_DSPP, 120, 3},
-	{ DBGBUS_DSPP, 120, 4},
-	{ DBGBUS_DSPP, 120, 5},
-	{ DBGBUS_DSPP, 120, 6},
-	{ DBGBUS_DSPP, 120, 7},
-
-	/* LM4 */
-	{ DBGBUS_DSPP, 96, 1},
-	{ DBGBUS_DSPP, 96, 2},
-	{ DBGBUS_DSPP, 96, 3},
-	{ DBGBUS_DSPP, 96, 4},
-	{ DBGBUS_DSPP, 96, 5},
-	{ DBGBUS_DSPP, 96, 6},
-	{ DBGBUS_DSPP, 96, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 97, 1},
-	{ DBGBUS_DSPP, 97, 2},
-	{ DBGBUS_DSPP, 97, 3},
-	{ DBGBUS_DSPP, 97, 4},
-	{ DBGBUS_DSPP, 97, 5},
-	{ DBGBUS_DSPP, 97, 6},
-	{ DBGBUS_DSPP, 97, 7},
-
-	{ DBGBUS_DSPP, 98, 1},
-	{ DBGBUS_DSPP, 98, 2},
-	{ DBGBUS_DSPP, 98, 3},
-	{ DBGBUS_DSPP, 98, 4},
-	{ DBGBUS_DSPP, 98, 5},
-	{ DBGBUS_DSPP, 98, 6},
-	{ DBGBUS_DSPP, 98, 7},
-
-	{ DBGBUS_DSPP, 99, 1},
-	{ DBGBUS_DSPP, 99, 2},
-	{ DBGBUS_DSPP, 99, 3},
-	{ DBGBUS_DSPP, 99, 4},
-	{ DBGBUS_DSPP, 99, 5},
-	{ DBGBUS_DSPP, 99, 6},
-	{ DBGBUS_DSPP, 99, 7},
-
-	{ DBGBUS_DSPP, 100, 1},
-	{ DBGBUS_DSPP, 100, 2},
-	{ DBGBUS_DSPP, 100, 3},
-	{ DBGBUS_DSPP, 100, 4},
-	{ DBGBUS_DSPP, 100, 5},
-	{ DBGBUS_DSPP, 100, 6},
-	{ DBGBUS_DSPP, 100, 7},
-
-	{ DBGBUS_DSPP, 101, 1},
-	{ DBGBUS_DSPP, 101, 2},
-	{ DBGBUS_DSPP, 101, 3},
-	{ DBGBUS_DSPP, 101, 4},
-	{ DBGBUS_DSPP, 101, 5},
-	{ DBGBUS_DSPP, 101, 6},
-	{ DBGBUS_DSPP, 101, 7},
-
-	{ DBGBUS_DSPP, 103, 1},
-	{ DBGBUS_DSPP, 103, 2},
-	{ DBGBUS_DSPP, 103, 3},
-	{ DBGBUS_DSPP, 103, 4},
-	{ DBGBUS_DSPP, 103, 5},
-	{ DBGBUS_DSPP, 103, 6},
-	{ DBGBUS_DSPP, 103, 7},
-
-	{ DBGBUS_DSPP, 105, 1},
-	{ DBGBUS_DSPP, 105, 2},
-	{ DBGBUS_DSPP, 105, 3},
-	{ DBGBUS_DSPP, 105, 4},
-	{ DBGBUS_DSPP, 105, 5},
-	{ DBGBUS_DSPP, 105, 6},
-	{ DBGBUS_DSPP, 105, 7},
-
-	{ DBGBUS_DSPP, 106, 1},
-	{ DBGBUS_DSPP, 106, 2},
-	{ DBGBUS_DSPP, 106, 3},
-	{ DBGBUS_DSPP, 106, 4},
-	{ DBGBUS_DSPP, 106, 5},
-	{ DBGBUS_DSPP, 106, 6},
-	{ DBGBUS_DSPP, 106, 7},
-
-	{ DBGBUS_DSPP, 109, 1},
-	{ DBGBUS_DSPP, 109, 2},
-	{ DBGBUS_DSPP, 109, 3},
-	{ DBGBUS_DSPP, 109, 4},
-	{ DBGBUS_DSPP, 109, 5},
-	{ DBGBUS_DSPP, 109, 6},
-	{ DBGBUS_DSPP, 109, 7},
-
-	{ DBGBUS_DSPP, 122, 1},
-	{ DBGBUS_DSPP, 122, 2},
-	{ DBGBUS_DSPP, 122, 3},
-	{ DBGBUS_DSPP, 122, 4},
-	{ DBGBUS_DSPP, 122, 5},
-	{ DBGBUS_DSPP, 122, 6},
-	{ DBGBUS_DSPP, 122, 7},
-
-	/* LM5 */
-	{ DBGBUS_DSPP, 124, 1},
-	{ DBGBUS_DSPP, 124, 2},
-	{ DBGBUS_DSPP, 124, 3},
-	{ DBGBUS_DSPP, 124, 4},
-	{ DBGBUS_DSPP, 124, 5},
-	{ DBGBUS_DSPP, 124, 6},
-	{ DBGBUS_DSPP, 124, 7, _sde_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 125, 1},
-	{ DBGBUS_DSPP, 125, 2},
-	{ DBGBUS_DSPP, 125, 3},
-	{ DBGBUS_DSPP, 125, 4},
-	{ DBGBUS_DSPP, 125, 5},
-	{ DBGBUS_DSPP, 125, 6},
-	{ DBGBUS_DSPP, 125, 7},
-
-	{ DBGBUS_DSPP, 126, 1},
-	{ DBGBUS_DSPP, 126, 2},
-	{ DBGBUS_DSPP, 126, 3},
-	{ DBGBUS_DSPP, 126, 4},
-	{ DBGBUS_DSPP, 126, 5},
-	{ DBGBUS_DSPP, 126, 6},
-	{ DBGBUS_DSPP, 126, 7},
-
-	{ DBGBUS_DSPP, 127, 1},
-	{ DBGBUS_DSPP, 127, 2},
-	{ DBGBUS_DSPP, 127, 3},
-	{ DBGBUS_DSPP, 127, 4},
-	{ DBGBUS_DSPP, 127, 5},
-	{ DBGBUS_DSPP, 127, 6},
-	{ DBGBUS_DSPP, 127, 7},
-
-	{ DBGBUS_DSPP, 128, 1},
-	{ DBGBUS_DSPP, 128, 2},
-	{ DBGBUS_DSPP, 128, 3},
-	{ DBGBUS_DSPP, 128, 4},
-	{ DBGBUS_DSPP, 128, 5},
-	{ DBGBUS_DSPP, 128, 6},
-	{ DBGBUS_DSPP, 128, 7},
-
-	{ DBGBUS_DSPP, 129, 1},
-	{ DBGBUS_DSPP, 129, 2},
-	{ DBGBUS_DSPP, 129, 3},
-	{ DBGBUS_DSPP, 129, 4},
-	{ DBGBUS_DSPP, 129, 5},
-	{ DBGBUS_DSPP, 129, 6},
-	{ DBGBUS_DSPP, 129, 7},
-
-	{ DBGBUS_DSPP, 131, 1},
-	{ DBGBUS_DSPP, 131, 2},
-	{ DBGBUS_DSPP, 131, 3},
-	{ DBGBUS_DSPP, 131, 4},
-	{ DBGBUS_DSPP, 131, 5},
-	{ DBGBUS_DSPP, 131, 6},
-	{ DBGBUS_DSPP, 131, 7},
-
-	{ DBGBUS_DSPP, 132, 1},
-	{ DBGBUS_DSPP, 132, 2},
-	{ DBGBUS_DSPP, 132, 3},
-	{ DBGBUS_DSPP, 132, 4},
-	{ DBGBUS_DSPP, 132, 5},
-	{ DBGBUS_DSPP, 132, 6},
-	{ DBGBUS_DSPP, 132, 7},
-
-	{ DBGBUS_DSPP, 133, 1},
-	{ DBGBUS_DSPP, 133, 2},
-	{ DBGBUS_DSPP, 133, 3},
-	{ DBGBUS_DSPP, 133, 4},
-	{ DBGBUS_DSPP, 133, 5},
-	{ DBGBUS_DSPP, 133, 6},
-	{ DBGBUS_DSPP, 133, 7},
-
-	{ DBGBUS_DSPP, 134, 1},
-	{ DBGBUS_DSPP, 134, 2},
-	{ DBGBUS_DSPP, 134, 3},
-	{ DBGBUS_DSPP, 134, 4},
-	{ DBGBUS_DSPP, 134, 5},
-	{ DBGBUS_DSPP, 134, 6},
-	{ DBGBUS_DSPP, 134, 7},
-
-	{ DBGBUS_DSPP, 135, 1},
-	{ DBGBUS_DSPP, 135, 2},
-	{ DBGBUS_DSPP, 135, 3},
-	{ DBGBUS_DSPP, 135, 4},
-	{ DBGBUS_DSPP, 135, 5},
-	{ DBGBUS_DSPP, 135, 6},
-	{ DBGBUS_DSPP, 135, 7},
-
-	/* csc */
-	{ DBGBUS_SSPP0, 7, 0},
-	{ DBGBUS_SSPP0, 7, 1},
-	{ DBGBUS_SSPP0, 7, 2},
-	{ DBGBUS_SSPP0, 27, 0},
-	{ DBGBUS_SSPP0, 27, 1},
-	{ DBGBUS_SSPP0, 27, 2},
-	{ DBGBUS_SSPP1, 7, 0},
-	{ DBGBUS_SSPP1, 7, 1},
-	{ DBGBUS_SSPP1, 7, 2},
-	{ DBGBUS_SSPP1, 27, 0},
-	{ DBGBUS_SSPP1, 27, 1},
-	{ DBGBUS_SSPP1, 27, 2},
-
-	/* pcc */
-	{ DBGBUS_SSPP0, 43, 3},
-	{ DBGBUS_SSPP0, 47, 3},
-	{ DBGBUS_SSPP1, 43, 3},
-	{ DBGBUS_SSPP1, 47, 3},
-
-	/* spa */
-	{ DBGBUS_SSPP0, 8,  0},
-	{ DBGBUS_SSPP0, 28, 0},
-	{ DBGBUS_SSPP1, 8,  0},
-	{ DBGBUS_SSPP1, 28, 0},
-
-	/* dspp pa */
-	{ DBGBUS_DSPP, 13, 0},
-	{ DBGBUS_DSPP, 19, 0},
-	{ DBGBUS_DSPP, 24, 0},
-	{ DBGBUS_DSPP, 37, 0},
-
-	/* igc */
-	{ DBGBUS_SSPP0, 39, 0},
-	{ DBGBUS_SSPP0, 39, 1},
-	{ DBGBUS_SSPP0, 39, 2},
-
-	{ DBGBUS_SSPP1, 39, 0},
-	{ DBGBUS_SSPP1, 39, 1},
-	{ DBGBUS_SSPP1, 39, 2},
-
-	{ DBGBUS_SSPP0, 46, 0},
-	{ DBGBUS_SSPP0, 46, 1},
-	{ DBGBUS_SSPP0, 46, 2},
-
-	{ DBGBUS_SSPP1, 46, 0},
-	{ DBGBUS_SSPP1, 46, 1},
-	{ DBGBUS_SSPP1, 46, 2},
-
-	{ DBGBUS_DSPP, 14, 0},
-	{ DBGBUS_DSPP, 14, 1},
-	{ DBGBUS_DSPP, 14, 2},
-	{ DBGBUS_DSPP, 20, 0},
-	{ DBGBUS_DSPP, 20, 1},
-	{ DBGBUS_DSPP, 20, 2},
-	{ DBGBUS_DSPP, 25, 0},
-	{ DBGBUS_DSPP, 25, 1},
-	{ DBGBUS_DSPP, 25, 2},
-	{ DBGBUS_DSPP, 38, 0},
-	{ DBGBUS_DSPP, 38, 1},
-	{ DBGBUS_DSPP, 38, 2},
-
-	/* intf0-3 */
-	{ DBGBUS_PERIPH, 0, 0},
-	{ DBGBUS_PERIPH, 1, 0},
-	{ DBGBUS_PERIPH, 2, 0},
-	{ DBGBUS_PERIPH, 3, 0},
-	{ DBGBUS_PERIPH, 4, 0},
-	{ DBGBUS_PERIPH, 5, 0},
-
-	/* te counter wrapper */
-	{ DBGBUS_PERIPH, 60, 0},
-	{ DBGBUS_PERIPH, 60, 1},
-	{ DBGBUS_PERIPH, 60, 2},
-	{ DBGBUS_PERIPH, 60, 3},
-	{ DBGBUS_PERIPH, 60, 4},
-	{ DBGBUS_PERIPH, 60, 5},
-
-	/* dsc0 */
-	{ DBGBUS_PERIPH, 47, 0},
-	{ DBGBUS_PERIPH, 47, 1},
-	{ DBGBUS_PERIPH, 47, 2},
-	{ DBGBUS_PERIPH, 47, 3},
-	{ DBGBUS_PERIPH, 47, 4},
-	{ DBGBUS_PERIPH, 47, 5},
-	{ DBGBUS_PERIPH, 47, 6},
-	{ DBGBUS_PERIPH, 47, 7},
-
-	/* dsc1 */
-	{ DBGBUS_PERIPH, 48, 0},
-	{ DBGBUS_PERIPH, 48, 1},
-	{ DBGBUS_PERIPH, 48, 2},
-	{ DBGBUS_PERIPH, 48, 3},
-	{ DBGBUS_PERIPH, 48, 4},
-	{ DBGBUS_PERIPH, 48, 5},
-	{ DBGBUS_PERIPH, 48, 6},
-	{ DBGBUS_PERIPH, 48, 7},
-
-	/* dsc2 */
-	{ DBGBUS_PERIPH, 50, 0},
-	{ DBGBUS_PERIPH, 50, 1},
-	{ DBGBUS_PERIPH, 50, 2},
-	{ DBGBUS_PERIPH, 50, 3},
-	{ DBGBUS_PERIPH, 50, 4},
-	{ DBGBUS_PERIPH, 50, 5},
-	{ DBGBUS_PERIPH, 50, 6},
-	{ DBGBUS_PERIPH, 50, 7},
-
-	/* dsc3 */
-	{ DBGBUS_PERIPH, 51, 0},
-	{ DBGBUS_PERIPH, 51, 1},
-	{ DBGBUS_PERIPH, 51, 2},
-	{ DBGBUS_PERIPH, 51, 3},
-	{ DBGBUS_PERIPH, 51, 4},
-	{ DBGBUS_PERIPH, 51, 5},
-	{ DBGBUS_PERIPH, 51, 6},
-	{ DBGBUS_PERIPH, 51, 7},
-
-	/* dsc4 */
-	{ DBGBUS_PERIPH, 52, 0},
-	{ DBGBUS_PERIPH, 52, 1},
-	{ DBGBUS_PERIPH, 52, 2},
-	{ DBGBUS_PERIPH, 52, 3},
-	{ DBGBUS_PERIPH, 52, 4},
-	{ DBGBUS_PERIPH, 52, 5},
-	{ DBGBUS_PERIPH, 52, 6},
-	{ DBGBUS_PERIPH, 52, 7},
-
-	/* dsc5 */
-	{ DBGBUS_PERIPH, 53, 0},
-	{ DBGBUS_PERIPH, 53, 1},
-	{ DBGBUS_PERIPH, 53, 2},
-	{ DBGBUS_PERIPH, 53, 3},
-	{ DBGBUS_PERIPH, 53, 4},
-	{ DBGBUS_PERIPH, 53, 5},
-	{ DBGBUS_PERIPH, 53, 6},
-	{ DBGBUS_PERIPH, 53, 7},
-
-	/* tear-check */
-	/* INTF_0 */
-	{ DBGBUS_PERIPH, 63, 0 },
-	{ DBGBUS_PERIPH, 63, 1 },
-	{ DBGBUS_PERIPH, 63, 2 },
-	{ DBGBUS_PERIPH, 63, 3 },
-	{ DBGBUS_PERIPH, 63, 4 },
-	{ DBGBUS_PERIPH, 63, 5 },
-	{ DBGBUS_PERIPH, 63, 6 },
-	{ DBGBUS_PERIPH, 63, 7 },
-
-	/* INTF_1 */
-	{ DBGBUS_PERIPH, 64, 0 },
-	{ DBGBUS_PERIPH, 64, 1 },
-	{ DBGBUS_PERIPH, 64, 2 },
-	{ DBGBUS_PERIPH, 64, 3 },
-	{ DBGBUS_PERIPH, 64, 4 },
-	{ DBGBUS_PERIPH, 64, 5 },
-	{ DBGBUS_PERIPH, 64, 6 },
-	{ DBGBUS_PERIPH, 64, 7 },
-
-	/* INTF_2 */
-	{ DBGBUS_PERIPH, 65, 0 },
-	{ DBGBUS_PERIPH, 65, 1 },
-	{ DBGBUS_PERIPH, 65, 2 },
-	{ DBGBUS_PERIPH, 65, 3 },
-	{ DBGBUS_PERIPH, 65, 4 },
-	{ DBGBUS_PERIPH, 65, 5 },
-	{ DBGBUS_PERIPH, 65, 6 },
-	{ DBGBUS_PERIPH, 65, 7 },
-
-	/* INTF_4 */
-	{ DBGBUS_PERIPH, 66, 0 },
-	{ DBGBUS_PERIPH, 66, 1 },
-	{ DBGBUS_PERIPH, 66, 2 },
-	{ DBGBUS_PERIPH, 66, 3 },
-	{ DBGBUS_PERIPH, 66, 4 },
-	{ DBGBUS_PERIPH, 66, 5 },
-	{ DBGBUS_PERIPH, 66, 6 },
-	{ DBGBUS_PERIPH, 66, 7 },
-
-	/* INTF_5 */
-	{ DBGBUS_PERIPH, 67, 0 },
-	{ DBGBUS_PERIPH, 67, 1 },
-	{ DBGBUS_PERIPH, 67, 2 },
-	{ DBGBUS_PERIPH, 67, 3 },
-	{ DBGBUS_PERIPH, 67, 4 },
-	{ DBGBUS_PERIPH, 67, 5 },
-	{ DBGBUS_PERIPH, 67, 6 },
-	{ DBGBUS_PERIPH, 67, 7 },
-
-	/* INTF_3 */
-	{ DBGBUS_PERIPH, 73, 0 },
-	{ DBGBUS_PERIPH, 73, 1 },
-	{ DBGBUS_PERIPH, 73, 2 },
-	{ DBGBUS_PERIPH, 73, 3 },
-	{ DBGBUS_PERIPH, 73, 4 },
-	{ DBGBUS_PERIPH, 73, 5 },
-	{ DBGBUS_PERIPH, 73, 6 },
-	{ DBGBUS_PERIPH, 73, 7 },
-
-	/* cdwn */
-	{ DBGBUS_PERIPH, 80, 0},
-	{ DBGBUS_PERIPH, 80, 1},
-	{ DBGBUS_PERIPH, 80, 2},
-
-	{ DBGBUS_PERIPH, 81, 0},
-	{ DBGBUS_PERIPH, 81, 1},
-	{ DBGBUS_PERIPH, 81, 2},
-
-	{ DBGBUS_PERIPH, 82, 0},
-	{ DBGBUS_PERIPH, 82, 1},
-	{ DBGBUS_PERIPH, 82, 2},
-	{ DBGBUS_PERIPH, 82, 3},
-	{ DBGBUS_PERIPH, 82, 4},
-	{ DBGBUS_PERIPH, 82, 5},
-	{ DBGBUS_PERIPH, 82, 6},
-	{ DBGBUS_PERIPH, 82, 7},
-
-	/* DPTX1 */
-	{ DBGBUS_PERIPH, 68, 0},
-	{ DBGBUS_PERIPH, 68, 1},
-	{ DBGBUS_PERIPH, 68, 2},
-	{ DBGBUS_PERIPH, 68, 3},
-	{ DBGBUS_PERIPH, 68, 4},
-	{ DBGBUS_PERIPH, 68, 5},
-	{ DBGBUS_PERIPH, 68, 6},
-	{ DBGBUS_PERIPH, 68, 7},
-
-	/* DP */
-	{ DBGBUS_PERIPH, 69, 0},
-	{ DBGBUS_PERIPH, 69, 1},
-	{ DBGBUS_PERIPH, 69, 2},
-	{ DBGBUS_PERIPH, 69, 3},
-	{ DBGBUS_PERIPH, 69, 4},
-	{ DBGBUS_PERIPH, 69, 5},
-
-	/* dsi0 */
-	{ DBGBUS_PERIPH, 70, 0},
-	{ DBGBUS_PERIPH, 70, 1},
-	{ DBGBUS_PERIPH, 70, 2},
-	{ DBGBUS_PERIPH, 70, 3},
-	{ DBGBUS_PERIPH, 70, 4},
-	{ DBGBUS_PERIPH, 70, 5},
-
-	/* dsi1 */
-	{ DBGBUS_PERIPH, 71, 0},
-	{ DBGBUS_PERIPH, 71, 1},
-	{ DBGBUS_PERIPH, 71, 2},
-	{ DBGBUS_PERIPH, 71, 3},
-	{ DBGBUS_PERIPH, 71, 4},
-	{ DBGBUS_PERIPH, 71, 5},
-
-	/* eDP */
-	{ DBGBUS_PERIPH, 72, 0},
-	{ DBGBUS_PERIPH, 72, 1},
-	{ DBGBUS_PERIPH, 72, 2},
-	{ DBGBUS_PERIPH, 72, 3},
-	{ DBGBUS_PERIPH, 72, 4},
-	{ DBGBUS_PERIPH, 72, 5},
-
-};
-
-static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
-	{0x214, 0x21c, 16, 2, 0x0, 0xd},     /* arb clients */
-	{0x214, 0x21c, 16, 2, 0x80, 0xc0},   /* arb clients */
-	{0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
-	{0x214, 0x21c, 0, 16, 0x0, 0xf},     /* xin blocks - axi side */
-	{0x214, 0x21c, 0, 16, 0x80, 0xa4},   /* xin blocks - axi side */
-	{0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
-	{0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
-};
-
-/**
- * _sde_dbg_enable_power - use callback to turn power on for hw register access
- * @enable: whether to turn power on or off
- * Return: zero if success; error code otherwise
- */
-static inline int _sde_dbg_enable_power(int enable)
-{
-	if (!sde_dbg_base.power_ctrl.enable_fn)
-		return -EINVAL;
-	return sde_dbg_base.power_ctrl.enable_fn(
-			sde_dbg_base.power_ctrl.handle,
-			sde_dbg_base.power_ctrl.client,
-			enable);
-}
-
-/**
- * _sde_dump_reg - helper function for dumping rotator register set content
- * @dump_name: register set name
- * @reg_dump_flag: dumping flag controlling in-log/memory dump location
- * @base_addr: starting address of io region for calculating offsets to print
- * @addr: starting address offset for dumping
- * @len_bytes: range of the register set
- * @dump_mem: output buffer for memory dump location option
- * @from_isr: whether being called from isr context
- */
-static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
-		char *base_addr, char *addr, size_t len_bytes, u32 **dump_mem,
-		bool from_isr)
-{
-	u32 in_log, in_mem, len_align, len_padded;
-	u32 *dump_addr = NULL;
-	char *end_addr;
-	int i;
-	int rc;
-
-	if (!len_bytes)
-		return;
-
-	in_log = (reg_dump_flag & SDE_DBG_DUMP_IN_LOG);
-	in_mem = (reg_dump_flag & SDE_DBG_DUMP_IN_MEM);
-
-	pr_debug("%s: reg_dump_flag=%d in_log=%d in_mem=%d\n",
-		dump_name, reg_dump_flag, in_log, in_mem);
-
-	if (!in_log && !in_mem)
-		return;
-
-	if (in_log)
-		dev_info(sde_dbg_base.dev, "%s: start_offset 0x%lx len 0x%zx\n",
-				dump_name, (unsigned long)(addr - base_addr),
-					len_bytes);
-
-	len_align = (len_bytes + REG_DUMP_ALIGN - 1) / REG_DUMP_ALIGN;
-	len_padded = len_align * REG_DUMP_ALIGN;
-	end_addr = addr + len_bytes;
-
-	if (in_mem) {
-		if (dump_mem && !(*dump_mem)) {
-			phys_addr_t phys = 0;
-			*dump_mem = dma_alloc_coherent(sde_dbg_base.dev,
-					len_padded, &phys, GFP_KERNEL);
-		}
-
-		if (dump_mem && *dump_mem) {
-			dump_addr = *dump_mem;
-			dev_info(sde_dbg_base.dev,
-				"%s: start_addr:0x%pK len:0x%x reg_offset=0x%lx\n",
-				dump_name, dump_addr, len_padded,
-				(unsigned long)(addr - base_addr));
-		} else {
-			in_mem = 0;
-			pr_err("dump_mem: kzalloc fails!\n");
-		}
-	}
-
-	if (!from_isr) {
-		rc = _sde_dbg_enable_power(true);
-		if (rc) {
-			pr_err("failed to enable power %d\n", rc);
-			return;
-		}
-	}
-
-	for (i = 0; i < len_align; i++) {
-		u32 x0, x4, x8, xc;
-
-		x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
-		x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0;
-		x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0;
-		xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
-
-		if (in_log)
-			dev_info(sde_dbg_base.dev,
-					"0x%lx : %08x %08x %08x %08x\n",
-					(unsigned long)(addr - base_addr),
-					x0, x4, x8, xc);
-
-		if (dump_addr) {
-			dump_addr[i * 4] = x0;
-			dump_addr[i * 4 + 1] = x4;
-			dump_addr[i * 4 + 2] = x8;
-			dump_addr[i * 4 + 3] = xc;
-		}
-
-		addr += REG_DUMP_ALIGN;
-	}
-
-	if (!from_isr)
-		_sde_dbg_enable_power(false);
-}
-
-/**
- * _sde_dbg_get_dump_range - helper to retrieve dump length for a range node
- * @range_node: range node to dump
- * @max_offset: max offset of the register base
- * @Return: length
- */
-static u32 _sde_dbg_get_dump_range(struct sde_dbg_reg_offset *range_node,
-		size_t max_offset)
-{
-	u32 length = 0;
-
-	if (range_node->start == 0 && range_node->end == 0) {
-		length = max_offset;
-	} else if (range_node->start < max_offset) {
-		if (range_node->end > max_offset)
-			length = max_offset - range_node->start;
-		else if (range_node->start < range_node->end)
-			length = range_node->end - range_node->start;
-	}
-
-	return length;
-}
-
-static int _sde_dump_reg_range_cmp(void *priv, struct list_head *a,
-		struct list_head *b)
-{
-	struct sde_dbg_reg_range *ar, *br;
-
-	if (!a || !b)
-		return 0;
-
-	ar = container_of(a, struct sde_dbg_reg_range, head);
-	br = container_of(b, struct sde_dbg_reg_range, head);
-
-	return ar->offset.start - br->offset.start;
-}
-
-/**
- * _sde_dump_reg_by_ranges - dump ranges or full range of the register blk base
- * @dbg: register blk base structure
- * @reg_dump_flag: dump target, memory, kernel log, or both
- */
-static void _sde_dump_reg_by_ranges(struct sde_dbg_reg_base *dbg,
-	u32 reg_dump_flag)
-{
-	char *addr;
-	size_t len;
-	struct sde_dbg_reg_range *range_node;
-
-	if (!dbg || !(dbg->base || dbg->cb)) {
-		pr_err("dbg base is null!\n");
-		return;
-	}
-
-	dev_info(sde_dbg_base.dev, "%s:=========%s DUMP=========\n", __func__,
-			dbg->name);
-	if (dbg->cb) {
-		dbg->cb(dbg->cb_ptr);
-	/* If there is a list to dump the registers by ranges, use the ranges */
-	} else if (!list_empty(&dbg->sub_range_list)) {
-		/* sort the list by start address first */
-		list_sort(NULL, &dbg->sub_range_list, _sde_dump_reg_range_cmp);
-		list_for_each_entry(range_node, &dbg->sub_range_list, head) {
-			len = _sde_dbg_get_dump_range(&range_node->offset,
-				dbg->max_offset);
-			addr = dbg->base + range_node->offset.start;
-			pr_debug("%s: range_base=0x%pK start=0x%x end=0x%x\n",
-				range_node->range_name,
-				addr, range_node->offset.start,
-				range_node->offset.end);
-
-			_sde_dump_reg(range_node->range_name, reg_dump_flag,
-					dbg->base, addr, len,
-					&range_node->reg_dump, false);
-		}
-	} else {
-		/* If there is no list to dump ranges, dump all registers */
-		dev_info(sde_dbg_base.dev,
-				"Ranges not found, will dump full registers\n");
-		dev_info(sde_dbg_base.dev, "base:0x%pK len:0x%zx\n", dbg->base,
-				dbg->max_offset);
-		addr = dbg->base;
-		len = dbg->max_offset;
-		_sde_dump_reg(dbg->name, reg_dump_flag, dbg->base, addr, len,
-				&dbg->reg_dump, false);
-	}
-}
-
-/**
- * _sde_dump_reg_by_blk - dump a named register base region
- * @blk_name: register blk name
- */
-static void _sde_dump_reg_by_blk(const char *blk_name)
-{
-	struct sde_dbg_base *dbg_base = &sde_dbg_base;
-	struct sde_dbg_reg_base *blk_base;
-
-	if (!dbg_base)
-		return;
-
-	list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head) {
-		if (strlen(blk_base->name) &&
-			!strcmp(blk_base->name, blk_name)) {
-			_sde_dump_reg_by_ranges(blk_base,
-				dbg_base->enable_reg_dump);
-			break;
-		}
-	}
-}
-
-/**
- * _sde_dump_reg_all - dump all register regions
- */
-static void _sde_dump_reg_all(void)
-{
-	struct sde_dbg_base *dbg_base = &sde_dbg_base;
-	struct sde_dbg_reg_base *blk_base;
-
-	if (!dbg_base)
-		return;
-
-	list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head)
-		if (strlen(blk_base->name))
-			_sde_dump_reg_by_blk(blk_base->name);
-}
-
-/**
- * _sde_dump_get_blk_addr - retrieve register block address by name
- * @blk_name: register blk name
- * @Return: register blk base, or NULL
- */
-static struct sde_dbg_reg_base *_sde_dump_get_blk_addr(const char *blk_name)
-{
-	struct sde_dbg_base *dbg_base = &sde_dbg_base;
-	struct sde_dbg_reg_base *blk_base;
-
-	list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head)
-		if (strlen(blk_base->name) && !strcmp(blk_base->name, blk_name))
-			return blk_base;
-
-	return NULL;
-}
-
-static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus)
-{
-	bool in_log, in_mem;
-	u32 **dump_mem = NULL;
-	u32 *dump_addr = NULL;
-	u32 status = 0;
-	struct sde_debug_bus_entry *head;
-	phys_addr_t phys = 0;
-	int list_size;
-	int i;
-	u32 offset;
-	void __iomem *mem_base = NULL;
-	struct sde_dbg_reg_base *reg_base;
-	int rc;
-
-	if (!bus || !bus->cmn.entries_size)
-		return;
-
-	list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list,
-			reg_base_head)
-		if (strlen(reg_base->name) &&
-			!strcmp(reg_base->name, bus->cmn.name))
-			mem_base = reg_base->base + bus->top_blk_off;
-
-	if (!mem_base) {
-		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
-		return;
-	}
-
-	dump_mem = &bus->cmn.dumped_content;
-
-	/* will keep in memory 4 entries of 4 bytes each */
-	list_size = (bus->cmn.entries_size * 4 * 4);
-
-	in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG);
-	in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM);
-
-	if (!in_log && !in_mem)
-		return;
-
-	dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
-			bus->cmn.name);
-
-	if (in_mem) {
-		if (!(*dump_mem))
-			*dump_mem = dma_alloc_coherent(sde_dbg_base.dev,
-				list_size, &phys, GFP_KERNEL);
-
-		if (*dump_mem) {
-			dump_addr = *dump_mem;
-			dev_info(sde_dbg_base.dev,
-				"%s: start_addr:0x%pK len:0x%x\n",
-				__func__, dump_addr, list_size);
-		} else {
-			in_mem = false;
-			pr_err("dump_mem: allocation fails\n");
-		}
-	}
-
-	rc = _sde_dbg_enable_power(true);
-	if (rc) {
-		pr_err("failed to enable power %d\n", rc);
-		return;
-	}
-
-	for (i = 0; i < bus->cmn.entries_size; i++) {
-		head = bus->entries + i;
-		writel_relaxed(TEST_MASK(head->block_id, head->test_id),
-				mem_base + head->wr_addr);
-		wmb(); /* make sure test bits were written */
-
-		if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
-			offset = DBGBUS_DSPP_STATUS;
-			/* keep DSPP test point enabled */
-			if (head->wr_addr != DBGBUS_DSPP)
-				writel_relaxed(0x7001, mem_base + DBGBUS_DSPP);
-		} else {
-			offset = head->wr_addr + 0x4;
-		}
-
-		status = readl_relaxed(mem_base + offset);
-
-		if (in_log)
-			dev_info(sde_dbg_base.dev,
-					"waddr=0x%x blk=%d tst=%d val=0x%x\n",
-					head->wr_addr, head->block_id,
-					head->test_id, status);
-
-		if (dump_addr && in_mem) {
-			dump_addr[i*4]     = head->wr_addr;
-			dump_addr[i*4 + 1] = head->block_id;
-			dump_addr[i*4 + 2] = head->test_id;
-			dump_addr[i*4 + 3] = status;
-		}
-
-		if (head->analyzer)
-			head->analyzer(mem_base, head, status);
-
-		/* Disable debug bus once we are done */
-		writel_relaxed(0, mem_base + head->wr_addr);
-		if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
-						head->wr_addr != DBGBUS_DSPP)
-			writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
-	}
-	_sde_dbg_enable_power(false);
-
-	dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
-			bus->cmn.name);
-}
-
-static void _sde_dbg_dump_vbif_debug_bus_entry(
-		struct vbif_debug_bus_entry *head, void __iomem *mem_base,
-		u32 *dump_addr, bool in_log)
-{
-	int i, j;
-	u32 val;
-
-	if (!dump_addr && !in_log)
-		return;
-
-	for (i = 0; i < head->block_cnt; i++) {
-		writel_relaxed(1 << (i + head->bit_offset),
-				mem_base + head->block_bus_addr);
-		/* make sure that current bus blcok enable */
-		wmb();
-		for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
-			writel_relaxed(j, mem_base + head->block_bus_addr + 4);
-			/* make sure that test point is enabled */
-			wmb();
-			val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
-			if (dump_addr) {
-				*dump_addr++ = head->block_bus_addr;
-				*dump_addr++ = i;
-				*dump_addr++ = j;
-				*dump_addr++ = val;
-			}
-			if (in_log)
-				dev_info(sde_dbg_base.dev,
-					"testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
-					head->block_bus_addr, i, j, val);
-		}
-	}
-}
-
-static void _sde_dbg_dump_vbif_dbg_bus(struct sde_dbg_vbif_debug_bus *bus)
-{
-	bool in_log, in_mem;
-	u32 **dump_mem = NULL;
-	u32 *dump_addr = NULL;
-	u32 value, d0, d1;
-	unsigned long reg, reg1, reg2;
-	struct vbif_debug_bus_entry *head;
-	phys_addr_t phys = 0;
-	int i, list_size = 0;
-	void __iomem *mem_base = NULL;
-	struct vbif_debug_bus_entry *dbg_bus;
-	u32 bus_size;
-	struct sde_dbg_reg_base *reg_base;
-	int rc;
-
-	if (!bus || !bus->cmn.entries_size)
-		return;
-
-	list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list,
-			reg_base_head)
-		if (strlen(reg_base->name) &&
-			!strcmp(reg_base->name, bus->cmn.name))
-			mem_base = reg_base->base;
-
-	if (!mem_base) {
-		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
-		return;
-	}
-
-	dbg_bus = bus->entries;
-	bus_size = bus->cmn.entries_size;
-	list_size = bus->cmn.entries_size;
-	dump_mem = &bus->cmn.dumped_content;
-
-	dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
-			bus->cmn.name);
-
-	if (!dump_mem || !dbg_bus || !bus_size || !list_size)
-		return;
-
-	/* allocate memory for each test point */
-	for (i = 0; i < bus_size; i++) {
-		head = dbg_bus + i;
-		list_size += (head->block_cnt * head->test_pnt_cnt);
-	}
-
-	/* 4 bytes * 4 entries for each test point*/
-	list_size *= 16;
-
-	in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG);
-	in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM);
-
-	if (!in_log && !in_mem)
-		return;
-
-	if (in_mem) {
-		if (!(*dump_mem))
-			*dump_mem = dma_alloc_coherent(sde_dbg_base.dev,
-				list_size, &phys, GFP_KERNEL);
-
-		if (*dump_mem) {
-			dump_addr = *dump_mem;
-			dev_info(sde_dbg_base.dev,
-				"%s: start_addr:0x%pK len:0x%x\n",
-				__func__, dump_addr, list_size);
-		} else {
-			in_mem = false;
-			pr_err("dump_mem: allocation fails\n");
-		}
-	}
-
-	rc = _sde_dbg_enable_power(true);
-	if (rc) {
-		pr_err("failed to enable power %d\n", rc);
-		return;
-	}
-
-	value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
-	writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
-
-	/* make sure that vbif core is on */
-	wmb();
-
-	/**
-	 * Extract VBIF error info based on XIN halt and error status.
-	 * If the XIN client is not in HALT state, or an error is detected,
-	 * then retrieve the VBIF error info for it.
-	 */
-	reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
-	reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
-	reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
-	dev_err(sde_dbg_base.dev,
-			"XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
-			reg, reg1, reg2);
-	reg >>= 16;
-	reg &= ~(reg1 | reg2);
-	for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
-		if (!test_bit(0, &reg)) {
-			writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
-			/* make sure reg write goes through */
-			wmb();
-
-			d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
-			d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
-
-			dev_err(sde_dbg_base.dev,
-					"Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
-					i, d0, d1);
-		}
-		reg >>= 1;
-	}
-
-	for (i = 0; i < bus_size; i++) {
-		head = dbg_bus + i;
-
-		writel_relaxed(0, mem_base + head->disable_bus_addr);
-		writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
-		/* make sure that other bus is off */
-		wmb();
-
-		_sde_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
-				in_log);
-		if (dump_addr)
-			dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
-	}
-
-	_sde_dbg_enable_power(false);
-
-	dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
-			bus->cmn.name);
-}
-
-/**
- * _sde_dump_array - dump array of register bases
- * @blk_arr: array of register base pointers
- * @len: length of blk_arr
- * @do_panic: whether to trigger a panic after dumping
- * @name: string indicating origin of dump
- * @dump_dbgbus_sde: whether to dump the sde debug bus
- * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
- */
-static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[],
-	u32 len, bool do_panic, const char *name, bool dump_dbgbus_sde,
-	bool dump_dbgbus_vbif_rt, bool dump_all)
-{
-	int i;
-
-	mutex_lock(&sde_dbg_base.mutex);
-
-	if (dump_all)
-		sde_evtlog_dump_all(sde_dbg_base.evtlog);
-
-	if (dump_all || !blk_arr || !len) {
-		_sde_dump_reg_all();
-	} else {
-		for (i = 0; i < len; i++) {
-			if (blk_arr[i] != NULL)
-				_sde_dump_reg_by_ranges(blk_arr[i],
-					sde_dbg_base.enable_reg_dump);
-		}
-	}
-
-	if (dump_dbgbus_sde)
-		_sde_dbg_dump_sde_dbg_bus(&sde_dbg_base.dbgbus_sde);
-
-	if (dump_dbgbus_vbif_rt)
-		_sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt);
-
-	if (sde_dbg_base.dsi_dbg_bus || dump_all)
-		dsi_ctrl_debug_dump(sde_dbg_base.dbgbus_dsi.entries,
-				    sde_dbg_base.dbgbus_dsi.size);
-
-	if (do_panic && sde_dbg_base.panic_on_err)
-		panic(name);
-
-	mutex_unlock(&sde_dbg_base.mutex);
-}
-
-/**
- * _sde_dump_work - deferred dump work function
- * @work: work structure
- */
-static void _sde_dump_work(struct work_struct *work)
-{
-	_sde_dump_array(sde_dbg_base.req_dump_blks,
-		ARRAY_SIZE(sde_dbg_base.req_dump_blks),
-		sde_dbg_base.work_panic, "evtlog_workitem",
-		sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work,
-		sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work,
-		sde_dbg_base.dump_all);
-}
-
-void sde_dbg_dump(bool queue_work, const char *name, ...)
-{
-	int i, index = 0;
-	bool do_panic = false;
-	bool dump_dbgbus_sde = false;
-	bool dump_dbgbus_vbif_rt = false;
-	bool dump_all = false;
-	va_list args;
-	char *blk_name = NULL;
-	struct sde_dbg_reg_base *blk_base = NULL;
-	struct sde_dbg_reg_base **blk_arr;
-	u32 blk_len;
-
-	if (!sde_evtlog_is_enabled(sde_dbg_base.evtlog, SDE_EVTLOG_ALWAYS))
-		return;
-
-	if (queue_work && work_pending(&sde_dbg_base.dump_work))
-		return;
-
-	blk_arr = &sde_dbg_base.req_dump_blks[0];
-	blk_len = ARRAY_SIZE(sde_dbg_base.req_dump_blks);
-
-	memset(sde_dbg_base.req_dump_blks, 0,
-			sizeof(sde_dbg_base.req_dump_blks));
-	sde_dbg_base.dump_all = false;
-
-	va_start(args, name);
-	i = 0;
-	while ((blk_name = va_arg(args, char*))) {
-		if (i++ >= SDE_EVTLOG_MAX_DATA) {
-			pr_err("could not parse all dump arguments\n");
-			break;
-		}
-		if (IS_ERR_OR_NULL(blk_name))
-			break;
-
-		blk_base = _sde_dump_get_blk_addr(blk_name);
-		if (blk_base) {
-			if (index < blk_len) {
-				blk_arr[index] = blk_base;
-				index++;
-			} else {
-				pr_err("insufficient space to to dump %s\n",
-						blk_name);
-			}
-		}
-
-		if (!strcmp(blk_name, "all"))
-			dump_all = true;
-
-		if (!strcmp(blk_name, "dbg_bus"))
-			dump_dbgbus_sde = true;
-
-		if (!strcmp(blk_name, "vbif_dbg_bus"))
-			dump_dbgbus_vbif_rt = true;
-
-		if (!strcmp(blk_name, "dsi_dbg_bus"))
-			sde_dbg_base.dsi_dbg_bus = true;
-
-		if (!strcmp(blk_name, "panic"))
-			do_panic = true;
-	}
-	va_end(args);
-
-	if (queue_work) {
-		/* schedule work to dump later */
-		sde_dbg_base.work_panic = do_panic;
-		sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work =
-				dump_dbgbus_sde;
-		sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
-				dump_dbgbus_vbif_rt;
-		sde_dbg_base.dump_all = dump_all;
-		schedule_work(&sde_dbg_base.dump_work);
-	} else {
-		_sde_dump_array(blk_arr, blk_len, do_panic, name,
-				dump_dbgbus_sde, dump_dbgbus_vbif_rt, dump_all);
-	}
-}
-
-void sde_dbg_ctrl(const char *name, ...)
-{
-	int i = 0;
-	va_list args;
-	char *blk_name = NULL;
-
-	/* no debugfs controlled events are enabled, just return */
-	if (!sde_dbg_base.debugfs_ctrl)
-		return;
-
-	va_start(args, name);
-
-	while ((blk_name = va_arg(args, char*))) {
-		if (i++ >= SDE_EVTLOG_MAX_DATA) {
-			pr_err("could not parse all dbg arguments\n");
-			break;
-		}
-
-		if (IS_ERR_OR_NULL(blk_name))
-			break;
-
-		if (!strcmp(blk_name, "stop_ftrace") &&
-				sde_dbg_base.debugfs_ctrl &
-				DBG_CTRL_STOP_FTRACE) {
-			pr_debug("tracing off\n");
-			tracing_off();
-		}
-
-		if (!strcmp(blk_name, "panic_underrun") &&
-				sde_dbg_base.debugfs_ctrl &
-				DBG_CTRL_PANIC_UNDERRUN) {
-			pr_err("panic underrun\n");
-			SDE_DBG_DUMP_WQ("all", "dbg_bus", "vbif_dbg_bus",
-					"panic");
-		}
-
-		if (!strcmp(blk_name, "reset_hw_panic") &&
-				sde_dbg_base.debugfs_ctrl &
-				DBG_CTRL_RESET_HW_PANIC) {
-			pr_debug("reset hw panic\n");
-			panic("reset_hw");
-		}
-	}
-
-	va_end(args);
-}
-
-
-/*
- * sde_dbg_debugfs_open - debugfs open handler for evtlog dump
- * @inode: debugfs inode
- * @file: file handle
- */
-static int sde_dbg_debugfs_open(struct inode *inode, struct file *file)
-{
-	if (!inode || !file)
-		return -EINVAL;
-
-	/* non-seekable */
-	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
-	file->private_data = inode->i_private;
-	mutex_lock(&sde_dbg_base.mutex);
-	sde_dbg_base.cur_evt_index = 0;
-	sde_dbg_base.evtlog->first = sde_dbg_base.evtlog->curr + 1;
-	sde_dbg_base.evtlog->last =
-		sde_dbg_base.evtlog->first + SDE_EVTLOG_ENTRY;
-	mutex_unlock(&sde_dbg_base.mutex);
-	return 0;
-}
-
-/**
- * sde_evtlog_dump_read - debugfs read handler for evtlog dump
- * @file: file handler
- * @buff: user buffer content for debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff,
-		size_t count, loff_t *ppos)
-{
-	ssize_t len = 0;
-	char evtlog_buf[SDE_EVTLOG_BUF_MAX];
-
-	if (!buff || !ppos)
-		return -EINVAL;
-
-	mutex_lock(&sde_dbg_base.mutex);
-	len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog,
-			evtlog_buf, SDE_EVTLOG_BUF_MAX,
-			!sde_dbg_base.cur_evt_index, true);
-	sde_dbg_base.cur_evt_index++;
-	mutex_unlock(&sde_dbg_base.mutex);
-
-	if (len < 0 || len > count) {
-		pr_err("len is more than user buffer size\n");
-		return 0;
-	}
-
-	if (copy_to_user(buff, evtlog_buf, len))
-		return -EFAULT;
-	*ppos += len;
-
-	return len;
-}
-
-/**
- * sde_evtlog_dump_write - debugfs write handler for evtlog dump
- * @file: file handler
- * @user_buf: user buffer content from debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t sde_evtlog_dump_write(struct file *file,
-	const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	_sde_dump_array(NULL, 0, sde_dbg_base.panic_on_err, "dump_debugfs",
-		true, true, true);
-
-	return count;
-}
-
-static const struct file_operations sde_evtlog_fops = {
-	.open = sde_dbg_debugfs_open,
-	.read = sde_evtlog_dump_read,
-	.write = sde_evtlog_dump_write,
-};
-
-/**
- * sde_dbg_ctrl_read - debugfs read handler for debug ctrl read
- * @file: file handler
- * @buff: user buffer content for debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t sde_dbg_ctrl_read(struct file *file, char __user *buff,
-		size_t count, loff_t *ppos)
-{
-	ssize_t len = 0;
-	char buf[24] = {'\0'};
-
-	if (!buff || !ppos)
-		return -EINVAL;
-
-	if (*ppos)
-		return 0;	/* the end */
-
-	len = snprintf(buf, sizeof(buf), "0x%x\n", sde_dbg_base.debugfs_ctrl);
-	pr_debug("%s: ctrl:0x%x len:0x%zx\n",
-		__func__, sde_dbg_base.debugfs_ctrl, len);
-
-	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
-		pr_err("error copying the buffer! count:0x%zx\n", count);
-		return -EFAULT;
-	}
-
-	*ppos += len;	/* increase offset */
-	return len;
-}
-
-/**
- * sde_dbg_ctrl_write - debugfs read handler for debug ctrl write
- * @file: file handler
- * @user_buf: user buffer content from debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t sde_dbg_ctrl_write(struct file *file,
-	const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	u32 dbg_ctrl = 0;
-	char buf[24];
-
-	if (!file) {
-		pr_err("DbgDbg: %s: error no file --\n", __func__);
-		return -EINVAL;
-	}
-
-	if (count >= sizeof(buf))
-		return -EFAULT;
-
-
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
-	buf[count] = 0; /* end of string */
-
-	if (kstrtouint(buf, 0, &dbg_ctrl)) {
-		pr_err("%s: error in the number of bytes\n", __func__);
-		return -EFAULT;
-	}
-
-	pr_debug("dbg_ctrl_read:0x%x\n", dbg_ctrl);
-	sde_dbg_base.debugfs_ctrl = dbg_ctrl;
-
-	return count;
-}
-
-static const struct file_operations sde_dbg_ctrl_fops = {
-	.open = sde_dbg_debugfs_open,
-	.read = sde_dbg_ctrl_read,
-	.write = sde_dbg_ctrl_write,
-};
-
-static int sde_recovery_regdump_open(struct inode *inode, struct file *file)
-{
-	if (!inode || !file)
-		return -EINVAL;
-
-	/* non-seekable */
-	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
-	file->private_data = inode->i_private;
-
-	/* initialize to start position */
-	sde_dbg_base.regbuf.rpos = 0;
-	sde_dbg_base.regbuf.cur_blk = NULL;
-	sde_dbg_base.regbuf.dump_done = false;
-
-	return 0;
-}
-
-static ssize_t _sde_dbg_dump_reg_rows(u32 reg_start,
-		void *start, int count, char *buf, int buflen)
-{
-	int i;
-	int len = 0;
-	u32 *addr;
-	u32  reg_offset = 0;
-	int rows = min(count / DUMP_CLMN_COUNT, DUMP_MAX_LINES_PER_BLK);
-
-	if (!start || !buf) {
-		pr_err("invalid address for dump\n");
-		return len;
-	}
-
-	if (buflen < PAGE_SIZE) {
-		pr_err("buffer too small for dump\n");
-		return len;
-	}
-
-	for (i = 0; i < rows; i++) {
-		addr = start + (i * DUMP_CLMN_COUNT * sizeof(u32));
-		reg_offset = reg_start + (i * DUMP_CLMN_COUNT * sizeof(u32));
-		if (buflen < (len + DUMP_LINE_SIZE))
-			break;
-
-		len += snprintf(buf + len, DUMP_LINE_SIZE,
-				"0x%.8X | %.8X %.8X %.8X %.8X\n",
-				reg_offset, addr[0], addr[1], addr[2], addr[3]);
-	}
-
-	return len;
-}
-
-static int  _sde_dbg_recovery_dump_sub_blk(struct sde_dbg_reg_range *sub_blk,
-		char  *buf, int buflen)
-{
-	int count = 0;
-	int len = 0;
-
-	if (!sub_blk || (buflen < PAGE_SIZE)) {
-		pr_err("invalid params buflen:%d subblk valid:%d\n",
-				buflen, sub_blk != NULL);
-		return len;
-	}
-
-	count = (sub_blk->offset.end - sub_blk->offset.start) / (sizeof(u32));
-	if (count < DUMP_CLMN_COUNT) {
-		pr_err("invalid count for register dumps :%d\n", count);
-		return len;
-	}
-
-	len += snprintf(buf + len, DUMP_LINE_SIZE,
-			"------------------------------------------\n");
-	len += snprintf(buf + len, DUMP_LINE_SIZE,
-			"**** sub block [%s] - size:%d ****\n",
-			sub_blk->range_name, count);
-	len += _sde_dbg_dump_reg_rows(sub_blk->offset.start, sub_blk->reg_dump,
-			count, buf + len, buflen - len);
-
-	return len;
-}
-
-static int  _sde_dbg_recovery_dump_reg_blk(struct sde_dbg_reg_base *blk,
-		char *buf, int buf_size, int *out_len)
-{
-	int ret = 0;
-	int len = 0;
-	struct sde_dbg_reg_range *sub_blk;
-
-	if (buf_size < PAGE_SIZE) {
-		pr_err("buffer too small for dump\n");
-		return len;
-	}
-
-	if (!blk || !strlen(blk->name)) {
-		len += snprintf(buf + len, DUMP_LINE_SIZE,
-			"Found one invalid block - skip dump\n");
-		*out_len = len;
-		return len;
-	}
-
-	len += snprintf(buf + len, DUMP_LINE_SIZE,
-			"******************************************\n");
-	len += snprintf(buf + len, DUMP_LINE_SIZE,
-			"==========================================\n");
-	len += snprintf(buf + len, DUMP_LINE_SIZE,
-			"*********** DUMP of %s block *************\n",
-			blk->name);
-	len += snprintf(buf + len, DUMP_LINE_SIZE,
-			"count:%ld max-off:0x%lx has_sub_blk:%d\n",
-			blk->cnt, blk->max_offset,
-			!list_empty(&blk->sub_range_list));
-
-	if (list_empty(&blk->sub_range_list)) {
-		len += _sde_dbg_dump_reg_rows(0, blk->reg_dump,
-				blk->max_offset / sizeof(u32), buf + len,
-				buf_size - len);
-	} else {
-		list_for_each_entry(sub_blk, &blk->sub_range_list, head)
-			len += _sde_dbg_recovery_dump_sub_blk(sub_blk,
-					buf + len, buf_size - len);
-	}
-	*out_len = len;
-
-	return ret;
-}
-
-static ssize_t sde_recovery_regdump_read(struct file *file, char __user *ubuf,
-		size_t count, loff_t *ppos)
-{
-	ssize_t len = 0;
-	int usize = 0;
-	struct sde_dbg_base *dbg_base = &sde_dbg_base;
-	struct sde_dbg_regbuf *rbuf = &dbg_base->regbuf;
-
-	mutex_lock(&sde_dbg_base.mutex);
-	if (!rbuf->dump_done && !rbuf->cur_blk) {
-		if (!rbuf->buf)
-			rbuf->buf = kzalloc(DUMP_BUF_SIZE, GFP_KERNEL);
-		if (!rbuf->buf) {
-			len =  -ENOMEM;
-			goto err;
-		}
-		rbuf->rpos = 0;
-		rbuf->len = 0;
-		rbuf->buf_size = DUMP_BUF_SIZE;
-
-		rbuf->cur_blk = list_first_entry(&dbg_base->reg_base_list,
-				struct sde_dbg_reg_base, reg_base_head);
-		if (rbuf->cur_blk)
-			_sde_dbg_recovery_dump_reg_blk(rbuf->cur_blk,
-					rbuf->buf,
-					rbuf->buf_size,
-					&rbuf->len);
-		pr_debug("dumping done for blk:%s len:%d\n", rbuf->cur_blk ?
-				rbuf->cur_blk->name : "unknown", rbuf->len);
-	} else if (rbuf->len == rbuf->rpos && rbuf->cur_blk) {
-		rbuf->rpos = 0;
-		rbuf->len = 0;
-		rbuf->buf_size = DUMP_BUF_SIZE;
-
-		if (rbuf->cur_blk == list_last_entry(&dbg_base->reg_base_list,
-					struct sde_dbg_reg_base, reg_base_head))
-			rbuf->cur_blk = NULL;
-		else
-			rbuf->cur_blk = list_next_entry(rbuf->cur_blk,
-					reg_base_head);
-
-		if (rbuf->cur_blk)
-			_sde_dbg_recovery_dump_reg_blk(rbuf->cur_blk,
-					rbuf->buf,
-					rbuf->buf_size,
-					&rbuf->len);
-		pr_debug("dumping done for blk:%s len:%d\n", rbuf->cur_blk ?
-				rbuf->cur_blk->name : "unknown", rbuf->len);
-	}
-
-	if ((rbuf->len - rbuf->rpos) > 0) {
-		usize = ((rbuf->len - rbuf->rpos) > count) ?
-			count  : rbuf->len - rbuf->rpos;
-		if (copy_to_user(ubuf, rbuf->buf + rbuf->rpos, usize)) {
-			len =  -EFAULT;
-			goto err;
-		}
-
-		len = usize;
-		rbuf->rpos += usize;
-		*ppos += usize;
-	}
-
-	if (!len && rbuf->buf)
-		rbuf->dump_done = true;
-err:
-	mutex_unlock(&sde_dbg_base.mutex);
-
-	return len;
-}
-
-static const struct file_operations sde_recovery_reg_fops = {
-	.open = sde_recovery_regdump_open,
-	.read = sde_recovery_regdump_read,
-};
-
-static int sde_recovery_dbgbus_dump_open(struct inode *inode, struct file *file)
-{
-	if (!inode || !file)
-		return -EINVAL;
-
-	/* non-seekable */
-	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
-	file->private_data = inode->i_private;
-
-	mutex_lock(&sde_dbg_base.mutex);
-	sde_dbg_base.dbgbus_dump_idx = 0;
-	mutex_unlock(&sde_dbg_base.mutex);
-
-	return 0;
-}
-
-static ssize_t sde_recovery_dbgbus_dump_read(struct file *file,
-		char __user *buff,
-		size_t count, loff_t *ppos)
-{
-	ssize_t len = 0;
-	char evtlog_buf[SDE_EVTLOG_BUF_MAX];
-	u32 *data;
-	struct sde_dbg_sde_debug_bus *bus;
-
-	mutex_lock(&sde_dbg_base.mutex);
-	bus = &sde_dbg_base.dbgbus_sde;
-	if (!bus->cmn.dumped_content || !bus->cmn.entries_size)
-		goto dump_done;
-
-	if (sde_dbg_base.dbgbus_dump_idx <=
-			((bus->cmn.entries_size - 1) * DUMP_CLMN_COUNT)) {
-		data = &bus->cmn.dumped_content[
-			sde_dbg_base.dbgbus_dump_idx];
-		len = snprintf(evtlog_buf, SDE_EVTLOG_BUF_MAX,
-				"0x%.8X | %.8X %.8X %.8X %.8X\n",
-				sde_dbg_base.dbgbus_dump_idx,
-				data[0], data[1], data[2], data[3]);
-		sde_dbg_base.dbgbus_dump_idx += DUMP_CLMN_COUNT;
-		if ((count < len) || copy_to_user(buff, evtlog_buf, len)) {
-			len = -EFAULT;
-			goto dump_done;
-		}
-		*ppos += len;
-	}
-dump_done:
-	mutex_unlock(&sde_dbg_base.mutex);
-
-	return len;
-}
-
-static const struct file_operations sde_recovery_dbgbus_fops = {
-	.open = sde_recovery_dbgbus_dump_open,
-	.read = sde_recovery_dbgbus_dump_read,
-};
-
-static int sde_recovery_vbif_dbgbus_dump_open(struct inode *inode,
-		struct file *file)
-{
-	if (!inode || !file)
-		return -EINVAL;
-
-	/* non-seekable */
-	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
-	file->private_data = inode->i_private;
-
-	mutex_lock(&sde_dbg_base.mutex);
-	sde_dbg_base.vbif_dbgbus_dump_idx = 0;
-	mutex_unlock(&sde_dbg_base.mutex);
-
-	return 0;
-}
-
-static ssize_t sde_recovery_vbif_dbgbus_dump_read(struct file *file,
-		char __user *buff,
-		size_t count, loff_t *ppos)
-{
-	ssize_t len = 0;
-	char evtlog_buf[SDE_EVTLOG_BUF_MAX];
-	int i;
-	u32 *data;
-	u32 list_size = 0;
-	struct vbif_debug_bus_entry *head;
-	struct sde_dbg_vbif_debug_bus *bus;
-
-	mutex_lock(&sde_dbg_base.mutex);
-	bus = &sde_dbg_base.dbgbus_vbif_rt;
-	if (!bus->cmn.dumped_content || !bus->cmn.entries_size)
-		goto dump_done;
-
-	/* calculate total number of test point */
-	for (i = 0; i < bus->cmn.entries_size; i++) {
-		head = bus->entries + i;
-		list_size += (head->block_cnt * head->test_pnt_cnt);
-	}
-
-	/* 4 entries for each test point*/
-	list_size *= DUMP_CLMN_COUNT;
-	if (sde_dbg_base.vbif_dbgbus_dump_idx < list_size) {
-		data = &bus->cmn.dumped_content[
-			sde_dbg_base.vbif_dbgbus_dump_idx];
-		len = snprintf(evtlog_buf, SDE_EVTLOG_BUF_MAX,
-				"0x%.8X | %.8X %.8X %.8X %.8X\n",
-				sde_dbg_base.vbif_dbgbus_dump_idx,
-				data[0], data[1], data[2], data[3]);
-		sde_dbg_base.vbif_dbgbus_dump_idx += DUMP_CLMN_COUNT;
-		if ((count < len) || copy_to_user(buff, evtlog_buf, len)) {
-			len = -EFAULT;
-			goto dump_done;
-		}
-		*ppos += len;
-	}
-dump_done:
-	mutex_unlock(&sde_dbg_base.mutex);
-
-	return len;
-}
-
-static const struct file_operations sde_recovery_vbif_dbgbus_fops = {
-	.open = sde_recovery_vbif_dbgbus_dump_open,
-	.read = sde_recovery_vbif_dbgbus_dump_read,
-};
-
-/**
- * sde_dbg_reg_base_release - release allocated reg dump file private data
- * @inode: debugfs inode
- * @file: file handle
- * @Return: 0 on success
- */
-static int sde_dbg_reg_base_release(struct inode *inode, struct file *file)
-{
-	struct sde_dbg_reg_base *dbg;
-
-	if (!file)
-		return -EINVAL;
-
-	dbg = file->private_data;
-	if (!dbg)
-		return -ENODEV;
-
-	mutex_lock(&sde_dbg_base.mutex);
-	if (dbg && dbg->buf) {
-		kfree(dbg->buf);
-		dbg->buf_len = 0;
-		dbg->buf = NULL;
-	}
-	mutex_unlock(&sde_dbg_base.mutex);
-
-	return 0;
-}
-
-/**
- * sde_dbg_reg_base_is_valid_range - verify if requested memory range is valid
- * @off: address offset in bytes
- * @cnt: memory size in bytes
- * Return: true if valid; false otherwise
- */
-static bool sde_dbg_reg_base_is_valid_range(u32 off, u32 cnt)
-{
-	static struct sde_dbg_base *dbg_base = &sde_dbg_base;
-	struct sde_dbg_reg_range *node;
-	struct sde_dbg_reg_base *base;
-
-	pr_debug("check offset=0x%x cnt=0x%x\n", off, cnt);
-
-	list_for_each_entry(base, &dbg_base->reg_base_list, reg_base_head) {
-		list_for_each_entry(node, &base->sub_range_list, head) {
-			pr_debug("%s: start=0x%x end=0x%x\n", node->range_name,
-					node->offset.start, node->offset.end);
-
-			if (node->offset.start <= off
-					&& off <= node->offset.end
-					&& off + cnt <= node->offset.end) {
-				pr_debug("valid range requested\n");
-				return true;
-			}
-		}
-	}
-
-	pr_err("invalid range requested\n");
-	return false;
-}
-
-/**
- * sde_dbg_reg_base_offset_write - set new offset and len to debugfs reg base
- * @file: file handler
- * @user_buf: user buffer content from debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t sde_dbg_reg_base_offset_write(struct file *file,
-		const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	struct sde_dbg_reg_base *dbg;
-	u32 off = 0;
-	u32 cnt = DEFAULT_BASE_REG_CNT;
-	char buf[24];
-
-	if (!file)
-		return -EINVAL;
-
-	dbg = file->private_data;
-	if (!dbg)
-		return -ENODEV;
-
-	if (count >= sizeof(buf))
-		return -EFAULT;
-
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
-	buf[count] = 0;	/* end of string */
-
-	if (sscanf(buf, "%5x %x", &off, &cnt) != 2)
-		return -EFAULT;
-
-	if (off > dbg->max_offset)
-		return -EINVAL;
-
-	if (off % sizeof(u32))
-		return -EINVAL;
-
-	if (cnt > (dbg->max_offset - off))
-		cnt = dbg->max_offset - off;
-
-	if (cnt == 0)
-		return -EINVAL;
-
-	if (!sde_dbg_reg_base_is_valid_range(off, cnt))
-		return -EINVAL;
-
-	mutex_lock(&sde_dbg_base.mutex);
-	dbg->off = off;
-	dbg->cnt = cnt;
-	mutex_unlock(&sde_dbg_base.mutex);
-
-	pr_debug("offset=%x cnt=%x\n", off, cnt);
-
-	return count;
-}
-
-/**
- * sde_dbg_reg_base_offset_read - read current offset and len of register base
- * @file: file handler
- * @user_buf: user buffer content from debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t sde_dbg_reg_base_offset_read(struct file *file,
-			char __user *buff, size_t count, loff_t *ppos)
-{
-	struct sde_dbg_reg_base *dbg;
-	int len = 0;
-	char buf[24] = {'\0'};
-
-	if (!file)
-		return -EINVAL;
-
-	dbg = file->private_data;
-	if (!dbg)
-		return -ENODEV;
-
-	if (!ppos)
-		return -EINVAL;
-
-	if (*ppos)
-		return 0;	/* the end */
-
-	mutex_lock(&sde_dbg_base.mutex);
-	if (dbg->off % sizeof(u32)) {
-		mutex_unlock(&sde_dbg_base.mutex);
-		return -EFAULT;
-	}
-
-	len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
-	if (len < 0 || len >= sizeof(buf)) {
-		mutex_unlock(&sde_dbg_base.mutex);
-		return 0;
-	}
-
-	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
-		mutex_unlock(&sde_dbg_base.mutex);
-		return -EFAULT;
-	}
-
-	*ppos += len;	/* increase offset */
-	mutex_unlock(&sde_dbg_base.mutex);
-
-	return len;
-}
-
-/**
- * sde_dbg_reg_base_reg_write - write to reg base hw at offset a given value
- * @file: file handler
- * @user_buf: user buffer content from debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t sde_dbg_reg_base_reg_write(struct file *file,
-		const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	struct sde_dbg_reg_base *dbg;
-	size_t off;
-	u32 data, cnt;
-	char buf[24];
-	int rc;
-
-	if (!file)
-		return -EINVAL;
-
-	dbg = file->private_data;
-	if (!dbg)
-		return -ENODEV;
-
-	if (count >= sizeof(buf))
-		return -EFAULT;
-
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
-	buf[count] = 0;	/* end of string */
-
-	cnt = sscanf(buf, "%zx %x", &off, &data);
-
-	if (cnt < 2)
-		return -EFAULT;
-
-	if (off % sizeof(u32))
-		return -EFAULT;
-
-	mutex_lock(&sde_dbg_base.mutex);
-	if (off >= dbg->max_offset) {
-		mutex_unlock(&sde_dbg_base.mutex);
-		return -EFAULT;
-	}
-
-	rc = _sde_dbg_enable_power(true);
-	if (rc) {
-		mutex_unlock(&sde_dbg_base.mutex);
-		pr_err("failed to enable power %d\n", rc);
-		return rc;
-	}
-
-	writel_relaxed(data, dbg->base + off);
-
-	_sde_dbg_enable_power(false);
-
-	mutex_unlock(&sde_dbg_base.mutex);
-
-	pr_debug("addr=%zx data=%x\n", off, data);
-
-	return count;
-}
-
-/**
- * sde_dbg_reg_base_reg_read - read len from reg base hw at current offset
- * @file: file handler
- * @user_buf: user buffer content from debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t sde_dbg_reg_base_reg_read(struct file *file,
-			char __user *user_buf, size_t count, loff_t *ppos)
-{
-	struct sde_dbg_reg_base *dbg;
-	size_t len;
-	int rc;
-
-	if (!file)
-		return -EINVAL;
-
-	dbg = file->private_data;
-	if (!dbg) {
-		pr_err("invalid handle\n");
-		return -ENODEV;
-	}
-
-	if (!ppos)
-		return -EINVAL;
-
-	mutex_lock(&sde_dbg_base.mutex);
-	if (!dbg->buf) {
-		char dump_buf[64];
-		char *ptr;
-		int cnt, tot;
-
-		dbg->buf_len = sizeof(dump_buf) *
-			DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
-		dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL);
-
-		if (!dbg->buf) {
-			mutex_unlock(&sde_dbg_base.mutex);
-			return -ENOMEM;
-		}
-
-		if (dbg->off % sizeof(u32)) {
-			mutex_unlock(&sde_dbg_base.mutex);
-			return -EFAULT;
-		}
-
-		ptr = dbg->base + dbg->off;
-		tot = 0;
-
-		rc = _sde_dbg_enable_power(true);
-		if (rc) {
-			mutex_unlock(&sde_dbg_base.mutex);
-			pr_err("failed to enable power %d\n", rc);
-			return rc;
-		}
-
-		for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
-			hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
-					   ROW_BYTES, GROUP_BYTES, dump_buf,
-					   sizeof(dump_buf), false);
-			len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
-					"0x%08x: %s\n",
-					((int) (unsigned long) ptr) -
-					((int) (unsigned long) dbg->base),
-					dump_buf);
-
-			ptr += ROW_BYTES;
-			tot += len;
-			if (tot >= dbg->buf_len)
-				break;
-		}
-
-		_sde_dbg_enable_power(false);
-
-		dbg->buf_len = tot;
-	}
-
-	if (*ppos >= dbg->buf_len) {
-		mutex_unlock(&sde_dbg_base.mutex);
-		return 0; /* done reading */
-	}
-
-	len = min(count, dbg->buf_len - (size_t) *ppos);
-	if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
-		mutex_unlock(&sde_dbg_base.mutex);
-		pr_err("failed to copy to user\n");
-		return -EFAULT;
-	}
-
-	*ppos += len; /* increase offset */
-	mutex_unlock(&sde_dbg_base.mutex);
-
-	return len;
-}
-
-static const struct file_operations sde_off_fops = {
-	.open = sde_dbg_debugfs_open,
-	.release = sde_dbg_reg_base_release,
-	.read = sde_dbg_reg_base_offset_read,
-	.write = sde_dbg_reg_base_offset_write,
-};
-
-static const struct file_operations sde_reg_fops = {
-	.open = sde_dbg_debugfs_open,
-	.release = sde_dbg_reg_base_release,
-	.read = sde_dbg_reg_base_reg_read,
-	.write = sde_dbg_reg_base_reg_write,
-};
-
-int sde_dbg_debugfs_register(struct dentry *debugfs_root)
-{
-	static struct sde_dbg_base *dbg = &sde_dbg_base;
-	struct sde_dbg_reg_base *blk_base;
-	char debug_name[80] = "";
-
-	if (!debugfs_root)
-		return -EINVAL;
-
-	debugfs_create_file("dbg_ctrl", 0600, debugfs_root, NULL,
-			&sde_dbg_ctrl_fops);
-	debugfs_create_file("dump", 0600, debugfs_root, NULL,
-			&sde_evtlog_fops);
-	debugfs_create_u32("enable", 0600, debugfs_root,
-			&(sde_dbg_base.evtlog->enable));
-	debugfs_create_u32("panic", 0600, debugfs_root,
-			&sde_dbg_base.panic_on_err);
-	debugfs_create_u32("reg_dump", 0600, debugfs_root,
-			&sde_dbg_base.enable_reg_dump);
-	debugfs_create_file("recovery_reg", 0400, debugfs_root, NULL,
-			&sde_recovery_reg_fops);
-	debugfs_create_file("recovery_dbgbus", 0400, debugfs_root, NULL,
-			&sde_recovery_dbgbus_fops);
-	debugfs_create_file("recovery_vbif_dbgbus", 0400, debugfs_root, NULL,
-			&sde_recovery_vbif_dbgbus_fops);
-
-	if (dbg->dbgbus_sde.entries) {
-		dbg->dbgbus_sde.cmn.name = DBGBUS_NAME_SDE;
-		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
-				dbg->dbgbus_sde.cmn.name);
-		dbg->dbgbus_sde.cmn.enable_mask = DEFAULT_DBGBUS_SDE;
-		debugfs_create_u32(debug_name, 0600, debugfs_root,
-				&dbg->dbgbus_sde.cmn.enable_mask);
-	}
-
-	if (dbg->dbgbus_vbif_rt.entries) {
-		dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
-		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
-				dbg->dbgbus_vbif_rt.cmn.name);
-		dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
-		debugfs_create_u32(debug_name, 0600, debugfs_root,
-				&dbg->dbgbus_vbif_rt.cmn.enable_mask);
-	}
-
-	list_for_each_entry(blk_base, &dbg->reg_base_list, reg_base_head) {
-		snprintf(debug_name, sizeof(debug_name), "%s_off",
-				blk_base->name);
-		debugfs_create_file(debug_name, 0600, debugfs_root, blk_base,
-				&sde_off_fops);
-
-		snprintf(debug_name, sizeof(debug_name), "%s_reg",
-				blk_base->name);
-		debugfs_create_file(debug_name, 0400, debugfs_root, blk_base,
-				&sde_reg_fops);
-	}
-
-	return 0;
-}
-
-static void _sde_dbg_debugfs_destroy(void)
-{
-}
-
-void sde_dbg_init_dbg_buses(u32 hwversion)
-{
-	static struct sde_dbg_base *dbg = &sde_dbg_base;
-
-	memset(&dbg->dbgbus_sde, 0, sizeof(dbg->dbgbus_sde));
-	memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
-
-	if (IS_SM8150_TARGET(hwversion) || IS_SM6150_TARGET(hwversion) ||
-				IS_SDMMAGPIE_TARGET(hwversion)) {
-		dbg->dbgbus_sde.entries = dbg_bus_sde_sm8150;
-		dbg->dbgbus_sde.cmn.entries_size =
-				ARRAY_SIZE(dbg_bus_sde_sm8150);
-		dbg->dbgbus_sde.cmn.flags = DBGBUS_FLAGS_DSPP;
-
-		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
-		dbg->dbgbus_vbif_rt.cmn.entries_size =
-				ARRAY_SIZE(vbif_dbg_bus_msm8998);
-		dbg->dbgbus_dsi.entries = NULL;
-		dbg->dbgbus_dsi.size = 0;
-	} else if (IS_KONA_TARGET(hwversion)) {
-		dbg->dbgbus_sde.entries = dbg_bus_sde_kona;
-		dbg->dbgbus_sde.cmn.entries_size =
-				ARRAY_SIZE(dbg_bus_sde_kona);
-		dbg->dbgbus_sde.cmn.flags = DBGBUS_FLAGS_DSPP;
-
-		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
-		dbg->dbgbus_vbif_rt.cmn.entries_size =
-				ARRAY_SIZE(vbif_dbg_bus_msm8998);
-	} else {
-		pr_err("unsupported chipset id %X\n", hwversion);
-	}
-}
-
-int sde_dbg_init(struct device *dev, struct sde_dbg_power_ctrl *power_ctrl)
-{
-	if (!dev || !power_ctrl) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_init(&sde_dbg_base.mutex);
-	INIT_LIST_HEAD(&sde_dbg_base.reg_base_list);
-	sde_dbg_base.dev = dev;
-	sde_dbg_base.power_ctrl = *power_ctrl;
-
-	sde_dbg_base.evtlog = sde_evtlog_init();
-	if (IS_ERR_OR_NULL(sde_dbg_base.evtlog))
-		return PTR_ERR(sde_dbg_base.evtlog);
-
-	sde_dbg_base_evtlog = sde_dbg_base.evtlog;
-
-	INIT_WORK(&sde_dbg_base.dump_work, _sde_dump_work);
-	sde_dbg_base.work_panic = false;
-	sde_dbg_base.panic_on_err = DEFAULT_PANIC;
-	sde_dbg_base.enable_reg_dump = DEFAULT_REGDUMP;
-	memset(&sde_dbg_base.regbuf, 0, sizeof(sde_dbg_base.regbuf));
-
-	pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n",
-		sde_dbg_base.evtlog->enable, sde_dbg_base.panic_on_err,
-		sde_dbg_base.enable_reg_dump);
-
-	return 0;
-}
-
-static void sde_dbg_reg_base_destroy(void)
-{
-	struct sde_dbg_reg_range *range_node, *range_tmp;
-	struct sde_dbg_reg_base *blk_base, *blk_tmp;
-	struct sde_dbg_base *dbg_base = &sde_dbg_base;
-
-	if (!dbg_base)
-		return;
-
-	list_for_each_entry_safe(blk_base, blk_tmp, &dbg_base->reg_base_list,
-							reg_base_head) {
-		list_for_each_entry_safe(range_node, range_tmp,
-				&blk_base->sub_range_list, head) {
-			list_del(&range_node->head);
-			kfree(range_node);
-		}
-		list_del(&blk_base->reg_base_head);
-		kfree(blk_base);
-	}
-}
-/**
- * sde_dbg_destroy - destroy sde debug facilities
- */
-void sde_dbg_destroy(void)
-{
-	kfree(sde_dbg_base.regbuf.buf);
-	memset(&sde_dbg_base.regbuf, 0, sizeof(sde_dbg_base.regbuf));
-	_sde_dbg_debugfs_destroy();
-	sde_dbg_base_evtlog = NULL;
-	sde_evtlog_destroy(sde_dbg_base.evtlog);
-	sde_dbg_base.evtlog = NULL;
-	sde_dbg_reg_base_destroy();
-	mutex_destroy(&sde_dbg_base.mutex);
-}
-
-int sde_dbg_reg_register_base(const char *name, void __iomem *base,
-		size_t max_offset)
-{
-	struct sde_dbg_base *dbg_base = &sde_dbg_base;
-	struct sde_dbg_reg_base *reg_base;
-
-	if (!name || !strlen(name)) {
-		pr_err("no debug name provided\n");
-		return -EINVAL;
-	}
-
-	reg_base = kzalloc(sizeof(*reg_base), GFP_KERNEL);
-	if (!reg_base)
-		return -ENOMEM;
-
-	strlcpy(reg_base->name, name, sizeof(reg_base->name));
-	reg_base->base = base;
-	reg_base->max_offset = max_offset;
-	reg_base->off = 0;
-	reg_base->cnt = DEFAULT_BASE_REG_CNT;
-	reg_base->reg_dump = NULL;
-
-	/* Initialize list to make sure check for null list will be valid */
-	INIT_LIST_HEAD(&reg_base->sub_range_list);
-
-	pr_debug("%s base: %pK max_offset 0x%zX\n", reg_base->name,
-			reg_base->base, reg_base->max_offset);
-
-	list_add(&reg_base->reg_base_head, &dbg_base->reg_base_list);
-
-	return 0;
-}
-
-int sde_dbg_reg_register_cb(const char *name, void (*cb)(void *), void *ptr)
-{
-	struct sde_dbg_base *dbg_base = &sde_dbg_base;
-	struct sde_dbg_reg_base *reg_base;
-
-	if (!name || !strlen(name)) {
-		pr_err("no debug name provided\n");
-		return -EINVAL;
-	}
-
-	reg_base = kzalloc(sizeof(*reg_base), GFP_KERNEL);
-	if (!reg_base)
-		return -ENOMEM;
-
-	strlcpy(reg_base->name, name, sizeof(reg_base->name));
-	reg_base->base = NULL;
-	reg_base->max_offset = 0;
-	reg_base->off = 0;
-	reg_base->cnt = DEFAULT_BASE_REG_CNT;
-	reg_base->reg_dump = NULL;
-	reg_base->cb = cb;
-	reg_base->cb_ptr = ptr;
-
-	/* Initialize list to make sure check for null list will be valid */
-	INIT_LIST_HEAD(&reg_base->sub_range_list);
-
-	pr_debug("%s cb: %pK cb_ptr: %pK\n", reg_base->name,
-			reg_base->cb, reg_base->cb_ptr);
-
-	list_add(&reg_base->reg_base_head, &dbg_base->reg_base_list);
-
-	return 0;
-}
-
-void sde_dbg_reg_unregister_cb(const char *name, void (*cb)(void *), void *ptr)
-{
-	struct sde_dbg_base *dbg_base = &sde_dbg_base;
-	struct sde_dbg_reg_base *reg_base;
-
-	if (!dbg_base)
-		return;
-
-	list_for_each_entry(reg_base, &dbg_base->reg_base_list, reg_base_head) {
-		if (strlen(reg_base->name) &&
-			!strcmp(reg_base->name, name)) {
-			pr_debug("%s cb: %pK cb_ptr: %pK\n", reg_base->name,
-					reg_base->cb, reg_base->cb_ptr);
-			list_del(&reg_base->reg_base_head);
-			kfree(reg_base);
-			break;
-		}
-	}
-}
-
-void sde_dbg_reg_register_dump_range(const char *base_name,
-		const char *range_name, u32 offset_start, u32 offset_end,
-		uint32_t xin_id)
-{
-	struct sde_dbg_reg_base *reg_base;
-	struct sde_dbg_reg_range *range;
-
-	reg_base = _sde_dump_get_blk_addr(base_name);
-	if (!reg_base) {
-		pr_err("error: for range %s unable to locate base %s\n",
-				range_name, base_name);
-		return;
-	}
-
-	if (!range_name || strlen(range_name) == 0) {
-		pr_err("%pS: bad range name, base_name %s, offset_start 0x%X, end 0x%X\n",
-				__builtin_return_address(0), base_name,
-				offset_start, offset_end);
-		return;
-	}
-
-	if (offset_end - offset_start < REG_DUMP_ALIGN ||
-			offset_start > offset_end) {
-		pr_err("%pS: bad range, base_name %s, range_name %s, offset_start 0x%X, end 0x%X\n",
-				__builtin_return_address(0), base_name,
-				range_name, offset_start, offset_end);
-		return;
-	}
-
-	range = kzalloc(sizeof(*range), GFP_KERNEL);
-	if (!range)
-		return;
-
-	strlcpy(range->range_name, range_name, sizeof(range->range_name));
-	range->offset.start = offset_start;
-	range->offset.end = offset_end;
-	range->xin_id = xin_id;
-	list_add_tail(&range->head, &reg_base->sub_range_list);
-
-	pr_debug("base %s, range %s, start 0x%X, end 0x%X\n",
-			base_name, range->range_name,
-			range->offset.start, range->offset.end);
-}
-
-void sde_dbg_set_sde_top_offset(u32 blk_off)
-{
-	sde_dbg_base.dbgbus_sde.top_blk_off = blk_off;
-}
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
deleted file mode 100644
index 0d67e4c..0000000
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ /dev/null
@@ -1,445 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef SDE_DBG_H_
-#define SDE_DBG_H_
-
-#include <stdarg.h>
-#include <linux/debugfs.h>
-#include <linux/list.h>
-
-/* select an uncommon hex value for the limiter */
-#define SDE_EVTLOG_DATA_LIMITER	(0xC0DEBEEF)
-#define SDE_EVTLOG_FUNC_ENTRY	0x1111
-#define SDE_EVTLOG_FUNC_EXIT	0x2222
-#define SDE_EVTLOG_FUNC_CASE1	0x3333
-#define SDE_EVTLOG_FUNC_CASE2	0x4444
-#define SDE_EVTLOG_FUNC_CASE3	0x5555
-#define SDE_EVTLOG_FUNC_CASE4	0x6666
-#define SDE_EVTLOG_FUNC_CASE5	0x7777
-#define SDE_EVTLOG_FUNC_CASE6	0x8888
-#define SDE_EVTLOG_FUNC_CASE7	0x9999
-#define SDE_EVTLOG_FUNC_CASE8	0xaaaa
-#define SDE_EVTLOG_FUNC_CASE9	0xbbbb
-#define SDE_EVTLOG_FUNC_CASE10	0xcccc
-#define SDE_EVTLOG_PANIC	0xdead
-#define SDE_EVTLOG_FATAL	0xbad
-#define SDE_EVTLOG_ERROR	0xebad
-
-#define SDE_DBG_DUMP_DATA_LIMITER (NULL)
-
-enum sde_dbg_evtlog_flag {
-	SDE_EVTLOG_CRITICAL = BIT(0),
-	SDE_EVTLOG_IRQ = BIT(1),
-	SDE_EVTLOG_VERBOSE = BIT(2),
-	SDE_EVTLOG_ALWAYS = -1
-};
-
-enum sde_dbg_dump_flag {
-	SDE_DBG_DUMP_IN_LOG = BIT(0),
-	SDE_DBG_DUMP_IN_MEM = BIT(1),
-};
-
-#define SDE_EVTLOG_DEFAULT_ENABLE (SDE_EVTLOG_CRITICAL | SDE_EVTLOG_IRQ)
-
-/*
- * evtlog will print this number of entries when it is called through
- * sysfs node or panic. This prevents kernel log from evtlog message
- * flood.
- */
-#define SDE_EVTLOG_PRINT_ENTRY	256
-
-/*
- * evtlog keeps this number of entries in memory for debug purpose. This
- * number must be greater than print entry to prevent out of bound evtlog
- * entry array access.
- */
-#define SDE_EVTLOG_ENTRY	(SDE_EVTLOG_PRINT_ENTRY * 8)
-#define SDE_EVTLOG_MAX_DATA 15
-#define SDE_EVTLOG_BUF_MAX 512
-#define SDE_EVTLOG_BUF_ALIGN 32
-
-struct sde_dbg_power_ctrl {
-	void *handle;
-	void *client;
-	int (*enable_fn)(void *handle, void *client, bool enable);
-};
-
-struct sde_dbg_evtlog_log {
-	s64 time;
-	const char *name;
-	int line;
-	u32 data[SDE_EVTLOG_MAX_DATA];
-	u32 data_cnt;
-	int pid;
-};
-
-/**
- * @last_dump: Index of last entry to be output during evtlog dumps
- * @filter_list: Linked list of currently active filter strings
- */
-struct sde_dbg_evtlog {
-	struct sde_dbg_evtlog_log logs[SDE_EVTLOG_ENTRY];
-	u32 first;
-	u32 last;
-	u32 last_dump;
-	u32 curr;
-	u32 next;
-	u32 enable;
-	spinlock_t spin_lock;
-	struct list_head filter_list;
-};
-
-extern struct sde_dbg_evtlog *sde_dbg_base_evtlog;
-
-/**
- * SDE_EVT32 - Write a list of 32bit values to the event log, default area
- * ... - variable arguments
- */
-#define SDE_EVT32(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \
-		__LINE__, SDE_EVTLOG_ALWAYS, ##__VA_ARGS__, \
-		SDE_EVTLOG_DATA_LIMITER)
-
-/**
- * SDE_EVT32_VERBOSE - Write a list of 32bit values for verbose event logging
- * ... - variable arguments
- */
-#define SDE_EVT32_VERBOSE(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \
-		__LINE__, SDE_EVTLOG_VERBOSE, ##__VA_ARGS__, \
-		SDE_EVTLOG_DATA_LIMITER)
-
-/**
- * SDE_EVT32_IRQ - Write a list of 32bit values to the event log, IRQ area
- * ... - variable arguments
- */
-#define SDE_EVT32_IRQ(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \
-		__LINE__, SDE_EVTLOG_IRQ, ##__VA_ARGS__, \
-		SDE_EVTLOG_DATA_LIMITER)
-
-/**
- * SDE_DBG_DUMP - trigger dumping of all sde_dbg facilities
- * @va_args:	list of named register dump ranges and regions to dump, as
- *		registered previously through sde_dbg_reg_register_base and
- *		sde_dbg_reg_register_dump_range.
- *		Including the special name "panic" will trigger a panic after
- *		the dumping work has completed.
- */
-#define SDE_DBG_DUMP(...) sde_dbg_dump(false, __func__, ##__VA_ARGS__, \
-		SDE_DBG_DUMP_DATA_LIMITER)
-
-/**
- * SDE_DBG_DUMP_WQ - trigger dumping of all sde_dbg facilities, queuing the work
- * @va_args:	list of named register dump ranges and regions to dump, as
- *		registered previously through sde_dbg_reg_register_base and
- *		sde_dbg_reg_register_dump_range.
- *		Including the special name "panic" will trigger a panic after
- *		the dumping work has completed.
- */
-#define SDE_DBG_DUMP_WQ(...) sde_dbg_dump(true, __func__, ##__VA_ARGS__, \
-		SDE_DBG_DUMP_DATA_LIMITER)
-
-/**
- * SDE_DBG_EVT_CTRL - trigger a different driver events
- *  event: event that trigger different behavior in the driver
- */
-#define SDE_DBG_CTRL(...) sde_dbg_ctrl(__func__, ##__VA_ARGS__, \
-		SDE_DBG_DUMP_DATA_LIMITER)
-
-#if defined(CONFIG_DEBUG_FS)
-
-/**
- * sde_evtlog_init - allocate a new event log object
- * Returns:	evtlog or -ERROR
- */
-struct sde_dbg_evtlog *sde_evtlog_init(void);
-
-/**
- * sde_evtlog_destroy - destroy previously allocated event log
- * @evtlog:	pointer to evtlog
- * Returns:	none
- */
-void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog);
-
-/**
- * sde_evtlog_log - log an entry into the event log.
- *	log collection may be enabled/disabled entirely via debugfs
- *	log area collection may be filtered by user provided flags via debugfs.
- * @evtlog:	pointer to evtlog
- * @name:	function name of call site
- * @line:	line number of call site
- * @flag:	log area filter flag checked against user's debugfs request
- * Returns:	none
- */
-void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line,
-		int flag, ...);
-
-/**
- * sde_evtlog_dump_all - print all entries in event log to kernel log
- * @evtlog:	pointer to evtlog
- * Returns:	none
- */
-void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog);
-
-/**
- * sde_evtlog_is_enabled - check whether log collection is enabled for given
- *	event log and log area flag
- * @evtlog:	pointer to evtlog
- * @flag:	log area filter flag
- * Returns:	none
- */
-bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag);
-
-/**
- * sde_evtlog_dump_to_buffer - print content of event log to the given buffer
- * @evtlog:		pointer to evtlog
- * @evtlog_buf:		target buffer to print into
- * @evtlog_buf_size:	size of target buffer
- * @update_last_entry:	whether or not to stop at most recent entry
- * @full_dump:          whether to dump full or to limit print entries
- * Returns:		number of bytes written to buffer
- */
-ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
-		char *evtlog_buf, ssize_t evtlog_buf_size,
-		bool update_last_entry, bool full_dump);
-
-/**
- * sde_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
- * @hwversion:		Chipset revision
- */
-void sde_dbg_init_dbg_buses(u32 hwversion);
-
-/**
- * sde_dbg_init - initialize global sde debug facilities: evtlog, regdump
- * @dev:		device handle
- * @power_ctrl:		power control callback structure for enabling clocks
- *			during register dumping
- * Returns:		0 or -ERROR
- */
-int sde_dbg_init(struct device *dev, struct sde_dbg_power_ctrl *power_ctrl);
-
-/**
- * sde_dbg_debugfs_register - register entries at the given debugfs dir
- * @debugfs_root:	debugfs root in which to create sde debug entries
- * Returns:	0 or -ERROR
- */
-int sde_dbg_debugfs_register(struct dentry *debugfs_root);
-
-/**
- * sde_dbg_destroy - destroy the global sde debug facilities
- * Returns:	none
- */
-void sde_dbg_destroy(void);
-
-/**
- * sde_dbg_dump - trigger dumping of all sde_dbg facilities
- * @queue_work:	whether to queue the dumping work to the work_struct
- * @name:	string indicating origin of dump
- * @va_args:	list of named register dump ranges and regions to dump, as
- *		registered previously through sde_dbg_reg_register_base and
- *		sde_dbg_reg_register_dump_range.
- *		Including the special name "panic" will trigger a panic after
- *		the dumping work has completed.
- * Returns:	none
- */
-void sde_dbg_dump(bool queue_work, const char *name, ...);
-
-/**
- * sde_dbg_ctrl - trigger specific actions for the driver with debugging
- *		purposes. Those actions need to be enabled by the debugfs entry
- *		so the driver executes those actions in the corresponding calls.
- * @va_args:	list of actions to trigger
- * Returns:	none
- */
-void sde_dbg_ctrl(const char *name, ...);
-
-/**
- * sde_dbg_reg_register_base - register a hw register address section for later
- *	dumping. call this before calling sde_dbg_reg_register_dump_range
- *	to be able to specify sub-ranges within the base hw range.
- * @name:	name of base region
- * @base:	base pointer of region
- * @max_offset:	length of region
- * Returns:	0 or -ERROR
- */
-int sde_dbg_reg_register_base(const char *name, void __iomem *base,
-		size_t max_offset);
-
-/**
- * sde_dbg_reg_register_cb - register a hw register callback for later
- *	dumping.
- * @name:	name of base region
- * @cb:		callback of external region
- * @cb_ptr:	private pointer of external region
- * Returns:	0 or -ERROR
- */
-int sde_dbg_reg_register_cb(const char *name, void (*cb)(void *), void *ptr);
-
-/**
- * sde_dbg_reg_unregister_cb - register a hw unregister callback for later
- *	dumping.
- * @name:	name of base region
- * @cb:		callback of external region
- * @cb_ptr:	private pointer of external region
- * Returns:	None
- */
-void sde_dbg_reg_unregister_cb(const char *name, void (*cb)(void *), void *ptr);
-
-/**
- * sde_dbg_reg_register_dump_range - register a hw register sub-region for
- *	later register dumping associated with base specified by
- *	sde_dbg_reg_register_base
- * @base_name:		name of base region
- * @range_name:		name of sub-range within base region
- * @offset_start:	sub-range's start offset from base's base pointer
- * @offset_end:		sub-range's end offset from base's base pointer
- * @xin_id:		xin id
- * Returns:		none
- */
-void sde_dbg_reg_register_dump_range(const char *base_name,
-		const char *range_name, u32 offset_start, u32 offset_end,
-		uint32_t xin_id);
-
-/**
- * sde_dbg_set_sde_top_offset - set the target specific offset from mdss base
- *	address of the top registers. Used for accessing debug bus controls.
- * @blk_off: offset from mdss base of the top block
- */
-void sde_dbg_set_sde_top_offset(u32 blk_off);
-
-/**
- * sde_evtlog_set_filter - update evtlog filtering
- * @evtlog:	pointer to evtlog
- * @filter:     pointer to optional function name filter, set to NULL to disable
- */
-void sde_evtlog_set_filter(struct sde_dbg_evtlog *evtlog, char *filter);
-
-/**
- * sde_evtlog_get_filter - query configured evtlog filters
- * @evtlog:	pointer to evtlog
- * @index:	filter index to retrieve
- * @buf:	pointer to output filter buffer
- * @bufsz:	size of output filter buffer
- * Returns:	zero if a filter string was returned
- */
-int sde_evtlog_get_filter(struct sde_dbg_evtlog *evtlog, int index,
-		char *buf, size_t bufsz);
-
-#ifndef CONFIG_DRM_SDE_RSC
-static inline void sde_rsc_debug_dump(u32 mux_sel)
-{
-}
-#else
-/**
- * sde_rsc_debug_dump - sde rsc debug dump status
- * @mux_sel:»       select mux on rsc debug bus
- */
-void sde_rsc_debug_dump(u32 mux_sel);
-#endif
-
-/**
- * dsi_ctrl_debug_dump - dump dsi debug dump status
- * @entries:	array of debug bus control values
- * @size:	size of the debug bus control array
- */
-void dsi_ctrl_debug_dump(u32 *entries, u32 size);
-
-#else
-static inline struct sde_dbg_evtlog *sde_evtlog_init(void)
-{
-	return NULL;
-}
-
-static inline void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog)
-{
-}
-
-static inline void sde_evtlog_log(struct sde_dbg_evtlog *evtlog,
-		const char *name, int line, int flag, ...)
-{
-}
-
-static inline void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog)
-{
-}
-
-static inline bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog,
-		u32 flag)
-{
-	return false;
-}
-
-static inline ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
-		char *evtlog_buf, ssize_t evtlog_buf_size,
-		bool update_last_entry)
-{
-	return 0;
-}
-
-static inline void sde_dbg_init_dbg_buses(u32 hwversion)
-{
-}
-
-static inline int sde_dbg_init(struct device *dev,
-		struct sde_dbg_power_ctrl *power_ctrl)
-{
-	return 0;
-}
-
-static inline int sde_dbg_debugfs_register(struct dentry *debugfs_root)
-{
-	return 0;
-}
-
-static inline void sde_dbg_destroy(void)
-{
-}
-
-static inline void sde_dbg_dump(bool queue_work, const char *name, ...)
-{
-}
-
-static inline void sde_dbg_ctrl(const char *name, ...)
-{
-}
-
-static inline int sde_dbg_reg_register_base(const char *name,
-		void __iomem *base, size_t max_offset)
-{
-	return 0;
-}
-
-static inline void sde_dbg_reg_register_dump_range(const char *base_name,
-		const char *range_name, u32 offset_start, u32 offset_end,
-		uint32_t xin_id)
-{
-}
-
-void sde_dbg_set_sde_top_offset(u32 blk_off)
-{
-}
-
-static inline void sde_evtlog_set_filter(
-		struct sde_dbg_evtlog *evtlog, char *filter)
-{
-}
-
-static inline int sde_evtlog_get_filter(struct sde_dbg_evtlog *evtlog,
-		int index, char *buf, size_t bufsz)
-{
-	return -EINVAL;
-}
-
-static inline void sde_rsc_debug_dump(u32 mux_sel)
-{
-}
-
-static inline void dsi_ctrl_debug_dump(u32 entries, u32 size)
-{
-}
-
-#endif /* defined(CONFIG_DEBUG_FS) */
-
-
-#endif /* SDE_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
deleted file mode 100644
index 17f3ccd..0000000
--- a/drivers/gpu/drm/msm/sde_dbg_evtlog.c
+++ /dev/null
@@ -1,312 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"sde_dbg:[%s] " fmt, __func__
-
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/ktime.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/dma-buf.h>
-#include <linux/slab.h>
-
-#include "sde_dbg.h"
-#include "sde_trace.h"
-
-#define SDE_EVTLOG_FILTER_STRSIZE	64
-
-struct sde_evtlog_filter {
-	struct list_head list;
-	char filter[SDE_EVTLOG_FILTER_STRSIZE];
-};
-
-static bool _sde_evtlog_is_filtered_no_lock(
-		struct sde_dbg_evtlog *evtlog, const char *str)
-{
-	struct sde_evtlog_filter *filter_node;
-	size_t len;
-	bool rc;
-
-	if (!str)
-		return true;
-
-	len = strlen(str);
-
-	/*
-	 * Filter the incoming string IFF the list is not empty AND
-	 * a matching entry is not in the list.
-	 */
-	rc = !list_empty(&evtlog->filter_list);
-	list_for_each_entry(filter_node, &evtlog->filter_list, list)
-		if (strnstr(str, filter_node->filter, len)) {
-			rc = false;
-			break;
-		}
-
-	return rc;
-}
-
-bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag)
-{
-	return evtlog && (evtlog->enable & flag);
-}
-
-void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line,
-		int flag, ...)
-{
-	unsigned long flags;
-	int i, val = 0;
-	va_list args;
-	struct sde_dbg_evtlog_log *log;
-
-	if (!evtlog)
-		return;
-
-	if (!sde_evtlog_is_enabled(evtlog, flag))
-		return;
-
-	spin_lock_irqsave(&evtlog->spin_lock, flags);
-
-	if (_sde_evtlog_is_filtered_no_lock(evtlog, name))
-		goto exit;
-
-	log = &evtlog->logs[evtlog->curr];
-	log->time = ktime_to_us(ktime_get());
-	log->name = name;
-	log->line = line;
-	log->data_cnt = 0;
-	log->pid = current->pid;
-
-	va_start(args, flag);
-	for (i = 0; i < SDE_EVTLOG_MAX_DATA; i++) {
-
-		val = va_arg(args, int);
-		if (val == SDE_EVTLOG_DATA_LIMITER)
-			break;
-
-		log->data[i] = val;
-	}
-	va_end(args);
-	log->data_cnt = i;
-	evtlog->curr = (evtlog->curr + 1) % SDE_EVTLOG_ENTRY;
-	evtlog->last++;
-
-	trace_sde_evtlog(name, line, log->data_cnt, log->data);
-exit:
-	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
-}
-
-/* always dump the last entries which are not dumped yet */
-static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog,
-		bool update_last_entry, bool full_dump)
-{
-	int max_entries = full_dump ? SDE_EVTLOG_ENTRY : SDE_EVTLOG_PRINT_ENTRY;
-
-	if (!evtlog)
-		return false;
-
-	evtlog->first = evtlog->next;
-
-	if (update_last_entry)
-		evtlog->last_dump = evtlog->last;
-
-	if (evtlog->last_dump == evtlog->first)
-		return false;
-
-	if (evtlog->last_dump < evtlog->first) {
-		evtlog->first %= SDE_EVTLOG_ENTRY;
-		if (evtlog->last_dump < evtlog->first)
-			evtlog->last_dump += SDE_EVTLOG_ENTRY;
-	}
-
-	if ((evtlog->last_dump - evtlog->first) > max_entries) {
-		pr_info("evtlog skipping %d entries, last=%d\n",
-			evtlog->last_dump - evtlog->first -
-			max_entries, evtlog->last_dump - 1);
-		evtlog->first = evtlog->last_dump - max_entries;
-	}
-	evtlog->next = evtlog->first + 1;
-
-	return true;
-}
-
-ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
-		char *evtlog_buf, ssize_t evtlog_buf_size,
-		bool update_last_entry, bool full_dump)
-{
-	int i;
-	ssize_t off = 0;
-	struct sde_dbg_evtlog_log *log, *prev_log;
-	unsigned long flags;
-
-	if (!evtlog || !evtlog_buf)
-		return 0;
-
-	spin_lock_irqsave(&evtlog->spin_lock, flags);
-
-	/* update markers, exit if nothing to print */
-	if (!_sde_evtlog_dump_calc_range(evtlog, update_last_entry, full_dump))
-		goto exit;
-
-	log = &evtlog->logs[evtlog->first % SDE_EVTLOG_ENTRY];
-
-	prev_log = &evtlog->logs[(evtlog->first - 1) % SDE_EVTLOG_ENTRY];
-
-	off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d",
-		log->name, log->line);
-
-	if (off < SDE_EVTLOG_BUF_ALIGN) {
-		memset((evtlog_buf + off), 0x20, (SDE_EVTLOG_BUF_ALIGN - off));
-		off = SDE_EVTLOG_BUF_ALIGN;
-	}
-
-	off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
-		"=>[%-8d:%-11llu:%9llu][%-4d]:", evtlog->first,
-		log->time, (log->time - prev_log->time), log->pid);
-
-	for (i = 0; i < log->data_cnt; i++)
-		off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
-			"%x ", log->data[i]);
-
-	off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n");
-exit:
-	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
-
-	return off;
-}
-
-void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog)
-{
-	char buf[SDE_EVTLOG_BUF_MAX];
-	bool update_last_entry = true;
-
-	if (!evtlog)
-		return;
-
-	while (sde_evtlog_dump_to_buffer(evtlog, buf, sizeof(buf),
-				update_last_entry, false)) {
-		pr_info("%s\n", buf);
-		update_last_entry = false;
-	}
-}
-
-struct sde_dbg_evtlog *sde_evtlog_init(void)
-{
-	struct sde_dbg_evtlog *evtlog;
-
-	evtlog = kzalloc(sizeof(*evtlog), GFP_KERNEL);
-	if (!evtlog)
-		return ERR_PTR(-ENOMEM);
-
-	spin_lock_init(&evtlog->spin_lock);
-	evtlog->enable = SDE_EVTLOG_DEFAULT_ENABLE;
-
-	INIT_LIST_HEAD(&evtlog->filter_list);
-
-	return evtlog;
-}
-
-int sde_evtlog_get_filter(struct sde_dbg_evtlog *evtlog, int index,
-		char *buf, size_t bufsz)
-{
-	struct sde_evtlog_filter *filter_node;
-	unsigned long flags;
-	int rc = -EFAULT;
-
-	if (!evtlog || !buf || !bufsz || index < 0)
-		return -EINVAL;
-
-	spin_lock_irqsave(&evtlog->spin_lock, flags);
-	list_for_each_entry(filter_node, &evtlog->filter_list, list) {
-		if (index--)
-			continue;
-
-		/* don't care about return value */
-		(void)strlcpy(buf, filter_node->filter, bufsz);
-		rc = 0;
-		break;
-	}
-	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
-
-	return rc;
-}
-
-void sde_evtlog_set_filter(struct sde_dbg_evtlog *evtlog, char *filter)
-{
-	struct sde_evtlog_filter *filter_node, *tmp;
-	struct list_head free_list;
-	unsigned long flags;
-	char *flt;
-
-	if (!evtlog)
-		return;
-
-	INIT_LIST_HEAD(&free_list);
-
-	/*
-	 * Clear active filter list and cache filter_nodes locally
-	 * to reduce memory fragmentation.
-	 */
-	spin_lock_irqsave(&evtlog->spin_lock, flags);
-	list_for_each_entry_safe(filter_node, tmp, &evtlog->filter_list, list) {
-		list_del_init(&filter_node->list);
-		list_add_tail(&filter_node->list, &free_list);
-	}
-	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
-
-	/*
-	 * Parse incoming filter request string and build up a new
-	 * filter list. New filter nodes are taken from the local
-	 * free list, if available, and allocated from the system
-	 * heap once the free list is empty.
-	 */
-	while (filter && (flt = strsep(&filter, "|\r\n\t ")) != NULL) {
-		if (!*flt)
-			continue;
-
-		if (list_empty(&free_list)) {
-			filter_node = kzalloc(sizeof(*filter_node), GFP_KERNEL);
-			if (!filter_node)
-				break;
-
-			INIT_LIST_HEAD(&filter_node->list);
-		} else {
-			filter_node = list_first_entry(&free_list,
-					struct sde_evtlog_filter, list);
-			list_del_init(&filter_node->list);
-		}
-
-		/* don't care if copy truncated */
-		(void)strlcpy(filter_node->filter, flt,
-				SDE_EVTLOG_FILTER_STRSIZE);
-
-		spin_lock_irqsave(&evtlog->spin_lock, flags);
-		list_add_tail(&filter_node->list, &evtlog->filter_list);
-		spin_unlock_irqrestore(&evtlog->spin_lock, flags);
-	}
-
-	/*
-	 * Free any unused filter_nodes back to the system.
-	 */
-	list_for_each_entry_safe(filter_node, tmp, &free_list, list) {
-		list_del(&filter_node->list);
-		kfree(filter_node);
-	}
-}
-
-void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog)
-{
-	struct sde_evtlog_filter *filter_node, *tmp;
-
-	if (!evtlog)
-		return;
-
-	list_for_each_entry_safe(filter_node, tmp, &evtlog->filter_list, list) {
-		list_del(&filter_node->list);
-		kfree(filter_node);
-	}
-	kfree(evtlog);
-}
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
deleted file mode 100644
index 9a2ee1a..0000000
--- a/drivers/gpu/drm/msm/sde_edid_parser.c
+++ /dev/null
@@ -1,630 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#include <drm/drm_edid.h>
-#include <linux/hdmi.h>
-
-#include "sde_kms.h"
-#include "sde_edid_parser.h"
-
-#define DBC_START_OFFSET 4
-#define EDID_DTD_LEN 18
-
-enum data_block_types {
-	RESERVED,
-	AUDIO_DATA_BLOCK,
-	VIDEO_DATA_BLOCK,
-	VENDOR_SPECIFIC_DATA_BLOCK,
-	SPEAKER_ALLOCATION_DATA_BLOCK,
-	VESA_DTC_DATA_BLOCK,
-	RESERVED2,
-	USE_EXTENDED_TAG
-};
-
-static u8 *sde_find_edid_extension(struct edid *edid, int ext_id)
-{
-	u8 *edid_ext = NULL;
-	int i;
-
-	/* No EDID or EDID extensions */
-	if (edid == NULL || edid->extensions == 0)
-		return NULL;
-
-	/* Find CEA extension */
-	for (i = 0; i < edid->extensions; i++) {
-		edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
-		if (edid_ext[0] == ext_id)
-			break;
-	}
-
-	if (i == edid->extensions)
-		return NULL;
-
-	return edid_ext;
-}
-
-static u8 *sde_find_cea_extension(struct edid *edid)
-{
-	return sde_find_edid_extension(edid, SDE_CEA_EXT);
-}
-
-static int
-sde_cea_db_payload_len(const u8 *db)
-{
-	return db[0] & 0x1f;
-}
-
-static int
-sde_cea_db_tag(const u8 *db)
-{
-	return db[0] >> 5;
-}
-
-static int
-sde_cea_revision(const u8 *cea)
-{
-	return cea[1];
-}
-
-static int
-sde_cea_db_offsets(const u8 *cea, int *start, int *end)
-{
-	/* Data block offset in CEA extension block */
-	*start = 4;
-	*end = cea[2];
-	if (*end == 0)
-		*end = 127;
-	if (*end < 4 || *end > 127)
-		return -ERANGE;
-	return 0;
-}
-
-#define sde_for_each_cea_db(cea, i, start, end) \
-for ((i) = (start); \
-(i) < (end) && (i) + sde_cea_db_payload_len(&(cea)[(i)]) < (end); \
-(i) += sde_cea_db_payload_len(&(cea)[(i)]) + 1)
-
-static bool sde_cea_db_is_hdmi_hf_vsdb(const u8 *db)
-{
-	int hdmi_id;
-
-	if (sde_cea_db_tag(db) != VENDOR_SPECIFIC_DATA_BLOCK)
-		return false;
-
-	if (sde_cea_db_payload_len(db) < 7)
-		return false;
-
-	hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
-
-	return hdmi_id == HDMI_FORUM_IEEE_OUI;
-}
-
-static u8 *sde_edid_find_extended_tag_block(struct edid *edid, int blk_id)
-{
-	u8 *db = NULL;
-	u8 *cea = NULL;
-
-	if (!edid) {
-		SDE_ERROR("%s: invalid input\n", __func__);
-		return NULL;
-	}
-
-	cea = sde_find_cea_extension(edid);
-
-	if (cea && sde_cea_revision(cea) >= 3) {
-		int i, start, end;
-
-		if (sde_cea_db_offsets(cea, &start, &end))
-			return NULL;
-
-		sde_for_each_cea_db(cea, i, start, end) {
-			db = &cea[i];
-			if ((sde_cea_db_tag(db) == SDE_EXTENDED_TAG) &&
-				(db[1] == blk_id))
-				return db;
-		}
-	}
-	return NULL;
-}
-
-static u8 *
-sde_edid_find_block(struct edid *edid, int blk_id)
-{
-	u8 *db = NULL;
-	u8 *cea = NULL;
-
-	if (!edid) {
-		SDE_ERROR("%s: invalid input\n", __func__);
-		return NULL;
-	}
-
-	cea = sde_find_cea_extension(edid);
-
-	if (cea && sde_cea_revision(cea) >= 3) {
-		int i, start, end;
-
-		if (sde_cea_db_offsets(cea, &start, &end))
-			return NULL;
-
-		sde_for_each_cea_db(cea, i, start, end) {
-			db = &cea[i];
-			if (sde_cea_db_tag(db) == blk_id)
-				return db;
-		}
-	}
-	return NULL;
-}
-
-
-static const u8 *_sde_edid_find_block(const u8 *in_buf, u32 start_offset,
-	u8 type, u8 *len)
-{
-	/* the start of data block collection, start of Video Data Block */
-	u32 offset = start_offset;
-	u32 dbc_offset = in_buf[2];
-
-	SDE_EDID_DEBUG("%s +", __func__);
-	/*
-	 * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
-	 *   collection present.
-	 * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block
-	 *   collection present and no DTD data present.
-	 */
-
-	if ((dbc_offset == 0) || (dbc_offset == 4)) {
-		SDE_EDID_DEBUG("EDID: no DTD or non-DTD data present\n");
-		return NULL;
-	}
-
-	while (offset < dbc_offset) {
-		u8 block_len = in_buf[offset] & 0x1F;
-
-		if ((offset + block_len <= dbc_offset) &&
-		    (in_buf[offset] >> 5) == type) {
-			*len = block_len;
-			SDE_EDID_DEBUG("block=%d found @ 0x%x w/ len=%d\n",
-				type, offset, block_len);
-
-			return in_buf + offset;
-		}
-		offset += 1 + block_len;
-	}
-
-	return NULL;
-}
-
-static void sde_edid_extract_vendor_id(struct sde_edid_ctrl *edid_ctrl)
-{
-	char *vendor_id;
-	u32 id_codes;
-
-	SDE_EDID_DEBUG("%s +", __func__);
-	if (!edid_ctrl) {
-		SDE_ERROR("%s: invalid input\n", __func__);
-		return;
-	}
-
-	vendor_id = edid_ctrl->vendor_id;
-	id_codes = ((u32)edid_ctrl->edid->mfg_id[0] << 8) +
-		edid_ctrl->edid->mfg_id[1];
-
-	vendor_id[0] = 'A' - 1 + ((id_codes >> 10) & 0x1F);
-	vendor_id[1] = 'A' - 1 + ((id_codes >> 5) & 0x1F);
-	vendor_id[2] = 'A' - 1 + (id_codes & 0x1F);
-	vendor_id[3] = 0;
-	SDE_EDID_DEBUG("vendor id is %s ", vendor_id);
-	SDE_EDID_DEBUG("%s -", __func__);
-}
-
-static void sde_edid_set_y420_support(struct drm_connector *connector,
-u32 video_format)
-{
-	u8 cea_mode = 0;
-	struct drm_display_mode *mode;
-	u32 mode_fmt_flags = 0;
-
-	/* Need to add Y420 support flag to the modes */
-	list_for_each_entry(mode, &connector->probed_modes, head) {
-		/* Cache the format flags before clearing */
-		mode_fmt_flags = mode->flags;
-		/* Clear the RGB/YUV format flags before calling upstream API */
-		mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
-		cea_mode = drm_match_cea_mode(mode);
-		/* Restore the format flags */
-		mode->flags = mode_fmt_flags;
-		if ((cea_mode != 0) && (cea_mode == video_format)) {
-			SDE_EDID_DEBUG("%s found match for %d ", __func__,
-			video_format);
-			mode->flags |= DRM_MODE_FLAG_SUPPORTS_YUV;
-		}
-	}
-}
-
-static void sde_edid_parse_Y420CMDB(
-struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
-const u8 *db)
-{
-	u32 offset = 0;
-	u8 cmdb_len = 0;
-	u8 svd_len = 0;
-	const u8 *svd = NULL;
-	u32 i = 0, j = 0;
-	u32 video_format = 0;
-
-	if (!edid_ctrl) {
-		SDE_ERROR("%s: edid_ctrl is NULL\n", __func__);
-		return;
-	}
-
-	if (!db) {
-		SDE_ERROR("%s: invalid input\n", __func__);
-		return;
-	}
-	SDE_EDID_DEBUG("%s +\n", __func__);
-	cmdb_len = db[0] & 0x1f;
-
-	/* Byte 3 to L+1 contain SVDs */
-	offset += 2;
-
-	svd = sde_edid_find_block(edid_ctrl->edid, VIDEO_DATA_BLOCK);
-
-	if (svd) {
-		/*moving to the next byte as vic info begins there*/
-		svd_len = svd[0] & 0x1f;
-		++svd;
-	}
-
-	for (i = 0; i < svd_len; i++, j++) {
-		video_format = *(svd + i) & 0x7F;
-		if (cmdb_len == 1) {
-			/* If cmdb_len is 1, it means all SVDs support YUV */
-			sde_edid_set_y420_support(connector, video_format);
-		} else if (db[offset] & (1 << j)) {
-			sde_edid_set_y420_support(connector, video_format);
-
-			if (j & 0x80) {
-				j = j/8;
-				offset++;
-				if (offset >= cmdb_len)
-					break;
-			}
-		}
-	}
-
-	SDE_EDID_DEBUG("%s -\n", __func__);
-
-}
-
-static void sde_edid_parse_Y420VDB(
-struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
-const u8 *db)
-{
-	u8 len = db[0] & 0x1f;
-	u32 i = 0;
-	u32 video_format = 0;
-
-	if (!edid_ctrl) {
-		SDE_ERROR("%s: invalid input\n", __func__);
-		return;
-	}
-
-	SDE_EDID_DEBUG("%s +\n", __func__);
-
-	/* Offset to byte 3 */
-	db += 2;
-	for (i = 0; i < len - 1; i++) {
-		video_format = *(db + i) & 0x7F;
-		/*
-		 * mode was already added in get_modes()
-		 * only need to set the Y420 support flag
-		 */
-		sde_edid_set_y420_support(connector, video_format);
-	}
-	SDE_EDID_DEBUG("%s -", __func__);
-}
-
-static void sde_edid_set_mode_format(
-struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl)
-{
-	const u8 *db = NULL;
-	struct drm_display_mode *mode;
-
-	SDE_EDID_DEBUG("%s +\n", __func__);
-	/* Set YUV mode support flags for YCbcr420VDB */
-	db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
-			Y420_VIDEO_DATA_BLOCK);
-	if (db)
-		sde_edid_parse_Y420VDB(connector, edid_ctrl, db);
-	else
-		SDE_EDID_DEBUG("YCbCr420 VDB is not present\n");
-
-	/* Set RGB supported on all modes where YUV is not set */
-	list_for_each_entry(mode, &connector->probed_modes, head) {
-		if (!(mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV))
-			mode->flags |= DRM_MODE_FLAG_SUPPORTS_RGB;
-	}
-
-
-	db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
-			Y420_CAPABILITY_MAP_DATA_BLOCK);
-	if (db)
-		sde_edid_parse_Y420CMDB(connector, edid_ctrl, db);
-	else
-		SDE_EDID_DEBUG("YCbCr420 CMDB is not present\n");
-
-	SDE_EDID_DEBUG("%s -\n", __func__);
-}
-
-static void _sde_edid_update_dc_modes(
-struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl)
-{
-	int i, start, end;
-	u8 *edid_ext, *hdmi;
-	struct drm_display_info *disp_info;
-	u32 hdmi_dc_yuv_modes = 0;
-
-	SDE_EDID_DEBUG("%s +\n", __func__);
-
-	if (!connector || !edid_ctrl) {
-		SDE_ERROR("invalid input\n");
-		return;
-	}
-
-	disp_info = &connector->display_info;
-
-	edid_ext = sde_find_cea_extension(edid_ctrl->edid);
-
-	if (!edid_ext) {
-		SDE_DEBUG("no cea extension\n");
-		return;
-	}
-
-	if (sde_cea_db_offsets(edid_ext, &start, &end))
-		return;
-
-	sde_for_each_cea_db(edid_ext, i, start, end) {
-		if (sde_cea_db_is_hdmi_hf_vsdb(&edid_ext[i])) {
-
-			hdmi = &edid_ext[i];
-
-			if (sde_cea_db_payload_len(hdmi) < 7)
-				continue;
-
-			if (hdmi[7] & DRM_EDID_YCBCR420_DC_30) {
-				hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_30;
-				SDE_EDID_DEBUG("Y420 30-bit supported\n");
-			}
-
-			if (hdmi[7] & DRM_EDID_YCBCR420_DC_36) {
-				hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_36;
-				SDE_EDID_DEBUG("Y420 36-bit supported\n");
-			}
-
-			if (hdmi[7] & DRM_EDID_YCBCR420_DC_48) {
-				hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_36;
-				SDE_EDID_DEBUG("Y420 48-bit supported\n");
-			}
-		}
-	}
-
-	disp_info->edid_hdmi_dc_modes |= hdmi_dc_yuv_modes;
-
-	SDE_EDID_DEBUG("%s -\n", __func__);
-}
-
-static void _sde_edid_extract_audio_data_blocks(
-	struct sde_edid_ctrl *edid_ctrl)
-{
-	u8 len = 0;
-	u8 adb_max = 0;
-	const u8 *adb = NULL;
-	u32 offset = DBC_START_OFFSET;
-	u8 *cea = NULL;
-
-	if (!edid_ctrl) {
-		SDE_ERROR("invalid edid_ctrl\n");
-		return;
-	}
-	SDE_EDID_DEBUG("%s +", __func__);
-	cea = sde_find_cea_extension(edid_ctrl->edid);
-	if (!cea) {
-		SDE_DEBUG("CEA extension not found\n");
-		return;
-	}
-
-	edid_ctrl->adb_size = 0;
-
-	memset(edid_ctrl->audio_data_block, 0,
-		sizeof(edid_ctrl->audio_data_block));
-
-	do {
-		len = 0;
-		adb = _sde_edid_find_block(cea, offset, AUDIO_DATA_BLOCK,
-			&len);
-
-		if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE ||
-			adb_max >= MAX_NUMBER_ADB)) {
-			if (!edid_ctrl->adb_size) {
-				SDE_DEBUG("No/Invalid Audio Data Block\n");
-				return;
-			}
-
-			continue;
-		}
-
-		memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size,
-			adb + 1, len);
-		offset = (adb - cea) + 1 + len;
-
-		edid_ctrl->adb_size += len;
-		adb_max++;
-	} while (adb);
-	SDE_EDID_DEBUG("%s -", __func__);
-}
-
-static void _sde_edid_extract_speaker_allocation_data(
-	struct sde_edid_ctrl *edid_ctrl)
-{
-	u8 len;
-	const u8 *sadb = NULL;
-	u8 *cea = NULL;
-
-	if (!edid_ctrl) {
-		SDE_ERROR("invalid edid_ctrl\n");
-		return;
-	}
-	SDE_EDID_DEBUG("%s +", __func__);
-	cea = sde_find_cea_extension(edid_ctrl->edid);
-	if (!cea) {
-		SDE_DEBUG("CEA extension not found\n");
-		return;
-	}
-
-	sadb = _sde_edid_find_block(cea, DBC_START_OFFSET,
-		SPEAKER_ALLOCATION_DATA_BLOCK, &len);
-	if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) {
-		SDE_DEBUG("No/Invalid Speaker Allocation Data Block\n");
-		return;
-	}
-
-	memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len);
-	edid_ctrl->sadb_size = len;
-
-	SDE_EDID_DEBUG("speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n",
-		sadb[1],
-		(sadb[1] & BIT(0)) ? "FL/FR," : "",
-		(sadb[1] & BIT(1)) ? "LFE," : "",
-		(sadb[1] & BIT(2)) ? "FC," : "",
-		(sadb[1] & BIT(3)) ? "RL/RR," : "",
-		(sadb[1] & BIT(4)) ? "RC," : "",
-		(sadb[1] & BIT(5)) ? "FLC/FRC," : "",
-		(sadb[1] & BIT(6)) ? "RLC/RRC," : "");
-	SDE_EDID_DEBUG("%s -", __func__);
-}
-
-struct sde_edid_ctrl *sde_edid_init(void)
-{
-	struct sde_edid_ctrl *edid_ctrl = NULL;
-
-	SDE_EDID_DEBUG("%s +\n", __func__);
-	edid_ctrl = kzalloc(sizeof(*edid_ctrl), GFP_KERNEL);
-	if (!edid_ctrl) {
-		SDE_ERROR("edid_ctrl alloc failed\n");
-		return NULL;
-	}
-	memset((edid_ctrl), 0, sizeof(*edid_ctrl));
-	SDE_EDID_DEBUG("%s -\n", __func__);
-	return edid_ctrl;
-}
-
-void sde_free_edid(void **input)
-{
-	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
-
-	SDE_EDID_DEBUG("%s +", __func__);
-	kfree(edid_ctrl->edid);
-	edid_ctrl->edid = NULL;
-}
-
-void sde_edid_deinit(void **input)
-{
-	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
-
-	SDE_EDID_DEBUG("%s +", __func__);
-	sde_free_edid((void *)&edid_ctrl);
-	kfree(edid_ctrl);
-	SDE_EDID_DEBUG("%s -", __func__);
-}
-
-int _sde_edid_update_modes(struct drm_connector *connector,
-	void *input)
-{
-	int rc = 0;
-	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
-
-	SDE_EDID_DEBUG("%s +", __func__);
-	if (edid_ctrl->edid) {
-		drm_connector_update_edid_property(connector,
-			edid_ctrl->edid);
-
-		rc = drm_add_edid_modes(connector, edid_ctrl->edid);
-		sde_edid_set_mode_format(connector, edid_ctrl);
-		_sde_edid_update_dc_modes(connector, edid_ctrl);
-		SDE_EDID_DEBUG("%s -", __func__);
-		return rc;
-	}
-
-	drm_connector_update_edid_property(connector, NULL);
-	SDE_EDID_DEBUG("%s null edid -", __func__);
-	return rc;
-}
-
-u8 sde_get_edid_checksum(void *input)
-{
-	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
-	struct edid *edid = NULL, *last_block = NULL;
-	u8 *raw_edid = NULL;
-
-	if (!edid_ctrl || !edid_ctrl->edid) {
-		SDE_ERROR("invalid edid input\n");
-		return 0;
-	}
-
-	edid = edid_ctrl->edid;
-
-	raw_edid = (u8 *)edid;
-	raw_edid += (edid->extensions * EDID_LENGTH);
-	last_block = (struct edid *)raw_edid;
-
-	if (last_block)
-		return last_block->checksum;
-
-	SDE_ERROR("Invalid block, no checksum\n");
-	return 0;
-}
-
-bool sde_detect_hdmi_monitor(void *input)
-{
-	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
-
-	return drm_detect_hdmi_monitor(edid_ctrl->edid);
-}
-
-void sde_parse_edid(void *input)
-{
-	struct sde_edid_ctrl *edid_ctrl;
-
-	if (!input) {
-		SDE_ERROR("Invalid input\n");
-		return;
-	}
-
-	edid_ctrl = (struct sde_edid_ctrl *)(input);
-
-	if (edid_ctrl->edid) {
-		sde_edid_extract_vendor_id(edid_ctrl);
-		_sde_edid_extract_audio_data_blocks(edid_ctrl);
-		_sde_edid_extract_speaker_allocation_data(edid_ctrl);
-	} else {
-		SDE_ERROR("edid not present\n");
-	}
-}
-
-void sde_get_edid(struct drm_connector *connector,
-				  struct i2c_adapter *adapter, void **input)
-{
-	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
-
-	edid_ctrl->edid = drm_get_edid(connector, adapter);
-	SDE_EDID_DEBUG("%s +\n", __func__);
-
-	if (!edid_ctrl->edid)
-		SDE_ERROR("EDID read failed\n");
-
-	if (edid_ctrl->edid)
-		sde_parse_edid(edid_ctrl);
-
-	SDE_EDID_DEBUG("%s -\n", __func__);
-};
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
deleted file mode 100644
index a89ea48..0000000
--- a/drivers/gpu/drm/msm/sde_edid_parser.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_EDID_PARSER_H_
-#define _SDE_EDID_PARSER_H_
-
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/debugfs.h>
-#include <linux/of_device.h>
-#include <linux/i2c.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
-
-
-#define MAX_NUMBER_ADB 5
-#define MAX_AUDIO_DATA_BLOCK_SIZE 30
-#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE 3
-#define EDID_VENDOR_ID_SIZE     4
-
-#define SDE_CEA_EXT    0x02
-#define SDE_EXTENDED_TAG 0x07
-
-#define SDE_DRM_MODE_FLAG_FMT_MASK (0x3 << 20)
-
-enum extended_data_block_types {
-	VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
-	VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
-	HDMI_VIDEO_DATA_BLOCK = 0x04,
-	HDR_STATIC_METADATA_DATA_BLOCK = 0x06,
-	Y420_VIDEO_DATA_BLOCK = 0x0E,
-	VIDEO_FORMAT_PREFERENCE_DATA_BLOCK = 0x0D,
-	Y420_CAPABILITY_MAP_DATA_BLOCK = 0x0F,
-	VENDOR_SPECIFIC_AUDIO_DATA_BLOCK = 0x11,
-	INFOFRAME_DATA_BLOCK = 0x20,
-};
-
-#ifdef SDE_EDID_DEBUG_ENABLE
-#define SDE_EDID_DEBUG(fmt, args...)   SDE_ERROR(fmt, ##args)
-#else
-#define SDE_EDID_DEBUG(fmt, args...)   SDE_DEBUG(fmt, ##args)
-#endif
-
-/*
- * struct hdmi_edid_hdr_data - HDR Static Metadata
- * @eotf: Electro-Optical Transfer Function
- * @metadata_type_one: Static Metadata Type 1 support
- * @max_luminance: Desired Content Maximum Luminance
- * @avg_luminance: Desired Content Frame-average Luminance
- * @min_luminance: Desired Content Minimum Luminance
- */
-struct sde_edid_hdr_data {
-	u32 eotf;
-	bool metadata_type_one;
-	u32 max_luminance;
-	u32 avg_luminance;
-	u32 min_luminance;
-};
-
-struct sde_edid_sink_caps {
-	u32 max_pclk_in_hz;
-	bool scdc_present;
-	bool scramble_support; /* scramble support for less than 340Mcsc */
-	bool read_req_support;
-	bool osd_disparity;
-	bool dual_view_support;
-	bool ind_view_support;
-};
-
-struct sde_edid_ctrl {
-	struct edid *edid;
-	u8 pt_scan_info;
-	u8 it_scan_info;
-	u8 ce_scan_info;
-	u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE];
-	int adb_size;
-	u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE];
-	int sadb_size;
-	bool hdr_supported;
-	char vendor_id[EDID_VENDOR_ID_SIZE];
-	struct sde_edid_sink_caps sink_caps;
-	struct sde_edid_hdr_data hdr_data;
-};
-
-/**
- * sde_edid_init() - init edid structure.
- * @edid_ctrl:     Handle to the edid_ctrl structure.
- * Return: handle to sde_edid_ctrl for the client.
- */
-struct sde_edid_ctrl *sde_edid_init(void);
-
-/**
- * sde_edid_deinit() - deinit edid structure.
- * @edid_ctrl:     Handle to the edid_ctrl structure.
- *
- * Return: void.
- */
-void sde_edid_deinit(void **edid_ctrl);
-
-/**
- * sde_get_edid() - get edid info.
- * @connector:   Handle to the drm_connector.
- * @adapter:     handle to i2c adapter for DDC read
- * @edid_ctrl:   Handle to the edid_ctrl structure.
- *
- * Return: void.
- */
-void sde_get_edid(struct drm_connector *connector,
-struct i2c_adapter *adapter,
-void **edid_ctrl);
-
-/**
- * sde_parse_edid() - parses edid info.
- * @edid_ctrl:   Handle to the edid_ctrl structure.
- *
- * Return: void.
- */
-void sde_parse_edid(void *edid_ctrl);
-
-/**
- * sde_free_edid() - free edid structure.
- * @edid_ctrl:     Handle to the edid_ctrl structure.
- *
- * Return: void.
- */
-void sde_free_edid(void **edid_ctrl);
-
-/**
- * sde_detect_hdmi_monitor() - detect HDMI mode.
- * @edid_ctrl:     Handle to the edid_ctrl structure.
- *
- * Return: error code.
- */
-bool sde_detect_hdmi_monitor(void *edid_ctrl);
-
-/**
- * sde_get_edid_checksum() - return the checksum of last block of EDID.
- * @input:     Handle to the edid_ctrl structure.
- *
- * Return: checksum of the last EDID block.
- */
-u8 sde_get_edid_checksum(void *input);
-
-/**
- * _sde_edid_update_modes() - populate EDID modes.
- * @edid_ctrl:     Handle to the edid_ctrl structure.
- *
- * Return: error code.
- */
-int _sde_edid_update_modes(struct drm_connector *connector,
-							void *edid_ctrl);
-
-#endif /* _SDE_EDID_PARSER_H_ */
-
diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h
deleted file mode 100644
index 5c1dc4a..0000000
--- a/drivers/gpu/drm/msm/sde_hdcp.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012, 2014-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_HDCP_H__
-#define __SDE_HDCP_H__
-
-#include <soc/qcom/scm.h>
-
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/debugfs.h>
-#include <linux/of_device.h>
-#include <linux/i2c.h>
-#include <linux/list.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
-#include <linux/hdcp_qseecom.h>
-#include "sde_kms.h"
-
-#define MAX_STREAM_COUNT 2
-
-enum sde_hdcp_client_id {
-	HDCP_CLIENT_HDMI,
-	HDCP_CLIENT_DP,
-};
-
-enum sde_hdcp_state {
-	HDCP_STATE_INACTIVE,
-	HDCP_STATE_AUTHENTICATING,
-	HDCP_STATE_AUTHENTICATED,
-	HDCP_STATE_AUTH_FAIL,
-};
-
-enum sde_hdcp_version {
-	HDCP_VERSION_NONE,
-	HDCP_VERSION_1X = BIT(0),
-	HDCP_VERSION_2P2 = BIT(1),
-	HDCP_VERSION_MAX = BIT(2),
-};
-
-struct stream_info {
-	u8 stream_id;
-	u8 virtual_channel;
-};
-
-struct sde_hdcp_stream {
-	struct list_head list;
-	u8 stream_id;
-	u8 virtual_channel;
-	u32 stream_handle;
-};
-
-struct sde_hdcp_init_data {
-	struct device *msm_hdcp_dev;
-	struct dss_io_data *core_io;
-	struct dss_io_data *dp_ahb;
-	struct dss_io_data *dp_aux;
-	struct dss_io_data *dp_link;
-	struct dss_io_data *dp_p0;
-	struct dss_io_data *qfprom_io;
-	struct dss_io_data *hdcp_io;
-	struct drm_dp_aux *drm_aux;
-	struct mutex *mutex;
-	struct workqueue_struct *workq;
-	void *cb_data;
-	void (*notify_status)(void *cb_data, enum sde_hdcp_state state);
-	u8 sink_rx_status;
-	unsigned char *revision;
-	u32 phy_addr;
-	bool sec_access;
-	enum sde_hdcp_client_id client_id;
-};
-
-struct sde_hdcp_ops {
-	int (*isr)(void *ptr);
-	int (*cp_irq)(void *ptr);
-	int (*reauthenticate)(void *input);
-	int (*authenticate)(void *hdcp_ctrl);
-	bool (*feature_supported)(void *input);
-	void (*force_encryption)(void *input, bool enable);
-	bool (*sink_support)(void *input);
-	int (*set_mode)(void *input, bool mst_enabled);
-	int (*on)(void *input);
-	void (*off)(void *hdcp_ctrl);
-	int (*register_streams)(void *input, u8 num_streams,
-			struct stream_info *streams);
-	int (*deregister_streams)(void *input, u8 num_streams,
-			struct stream_info *streams);
-};
-
-static inline const char *sde_hdcp_state_name(enum sde_hdcp_state hdcp_state)
-{
-	switch (hdcp_state) {
-	case HDCP_STATE_INACTIVE:	return "HDCP_STATE_INACTIVE";
-	case HDCP_STATE_AUTHENTICATING:	return "HDCP_STATE_AUTHENTICATING";
-	case HDCP_STATE_AUTHENTICATED:	return "HDCP_STATE_AUTHENTICATED";
-	case HDCP_STATE_AUTH_FAIL:	return "HDCP_STATE_AUTH_FAIL";
-	default:			return "???";
-	}
-}
-
-static inline const char *sde_hdcp_version(enum sde_hdcp_version hdcp_version)
-{
-	switch (hdcp_version) {
-	case HDCP_VERSION_NONE:		return "HDCP_VERSION_NONE";
-	case HDCP_VERSION_1X:		return "HDCP_VERSION_1X";
-	case HDCP_VERSION_2P2:		return "HDCP_VERSION_2P2";
-	default:			return "???";
-	}
-}
-
-void *sde_hdcp_1x_init(struct sde_hdcp_init_data *init_data);
-void sde_hdcp_1x_deinit(void *input);
-struct sde_hdcp_ops *sde_hdcp_1x_get(void *input);
-void *sde_dp_hdcp2p2_init(struct sde_hdcp_init_data *init_data);
-void sde_dp_hdcp2p2_deinit(void *input);
-struct sde_hdcp_ops *sde_dp_hdcp2p2_get(void *input);
-#endif /* __SDE_HDCP_H__ */
diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c
deleted file mode 100644
index aa37def..0000000
--- a/drivers/gpu/drm/msm/sde_hdcp_1x.c
+++ /dev/null
@@ -1,1546 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[sde-hdcp1x] %s: " fmt, __func__
-
-#include <linux/io.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/iopoll.h>
-#include <linux/msm_hdcp.h>
-#include <drm/drm_dp_helper.h>
-#include "sde_hdcp.h"
-#include "hdmi/hdmi.xml.h"
-#include "video/msm_hdmi_hdcp_mgr.h"
-#include "dp/dp_reg.h"
-
-#define SDE_HDCP_STATE_NAME (sde_hdcp_state_name(hdcp->hdcp_state))
-
-/* QFPROM Registers for HDMI/HDCP */
-#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB  (0x000000F8)
-#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB  (0x000000FC)
-#define QFPROM_RAW_VERSION_4             (0x000000A8)
-#define SEC_CTRL_HW_VERSION              (0x00006000)
-#define HDCP_KSV_LSB                     (0x000060D8)
-#define HDCP_KSV_MSB                     (0x000060DC)
-#define HDCP_KSV_VERSION_4_OFFSET        (0x00000014)
-
-/* SEC_CTRL version that supports HDCP SEL */
-#define HDCP_SEL_MIN_SEC_VERSION         (0x50010000)
-
-/* HDCP Keys state based on HDMI_HDCP_LINK0_STATUS:KEYS_STATE */
-#define HDCP_KEYS_STATE_NO_KEYS		0
-#define HDCP_KEYS_STATE_NOT_CHECKED	1
-#define HDCP_KEYS_STATE_CHECKING	2
-#define HDCP_KEYS_STATE_VALID		3
-#define HDCP_KEYS_STATE_AKSV_NOT_VALID	4
-#define HDCP_KEYS_STATE_CHKSUM_MISMATCH	5
-#define HDCP_KEYS_STATE_PROD_AKSV	6
-#define HDCP_KEYS_STATE_RESERVED	7
-
-#define TZ_HDCP_CMD_ID 0x00004401
-
-#define HDCP_INT_CLR (isr->auth_success_ack | isr->auth_fail_ack | \
-			isr->auth_fail_info_ack | isr->tx_req_ack | \
-			isr->encryption_ready_ack | \
-			isr->encryption_not_ready_ack | isr->tx_req_done_ack)
-
-#define HDCP_INT_EN (isr->auth_success_mask | isr->auth_fail_mask | \
-			isr->encryption_ready_mask | \
-			isr->encryption_not_ready_mask)
-
-#define HDCP_POLL_SLEEP_US   (20 * 1000)
-#define HDCP_POLL_TIMEOUT_US (HDCP_POLL_SLEEP_US * 100)
-
-#define sde_hdcp_1x_state(x) (hdcp->hdcp_state == x)
-
-struct sde_hdcp_sink_addr {
-	char *name;
-	u32 addr;
-	u32 len;
-};
-
-struct sde_hdcp_1x_reg_data {
-	u32 reg_id;
-	struct sde_hdcp_sink_addr *sink;
-};
-
-struct sde_hdcp_sink_addr_map {
-	/* addresses to read from sink */
-	struct sde_hdcp_sink_addr bcaps;
-	struct sde_hdcp_sink_addr bksv;
-	struct sde_hdcp_sink_addr r0;
-	struct sde_hdcp_sink_addr bstatus;
-	struct sde_hdcp_sink_addr cp_irq_status;
-	struct sde_hdcp_sink_addr ksv_fifo;
-	struct sde_hdcp_sink_addr v_h0;
-	struct sde_hdcp_sink_addr v_h1;
-	struct sde_hdcp_sink_addr v_h2;
-	struct sde_hdcp_sink_addr v_h3;
-	struct sde_hdcp_sink_addr v_h4;
-
-	/* addresses to write to sink */
-	struct sde_hdcp_sink_addr an;
-	struct sde_hdcp_sink_addr aksv;
-	struct sde_hdcp_sink_addr ainfo;
-};
-
-struct sde_hdcp_int_set {
-	/* interrupt register */
-	u32 int_reg;
-
-	/* interrupt enable/disable masks */
-	u32 auth_success_mask;
-	u32 auth_fail_mask;
-	u32 encryption_ready_mask;
-	u32 encryption_not_ready_mask;
-	u32 tx_req_mask;
-	u32 tx_req_done_mask;
-
-	/* interrupt acknowledgment */
-	u32 auth_success_ack;
-	u32 auth_fail_ack;
-	u32 auth_fail_info_ack;
-	u32 encryption_ready_ack;
-	u32 encryption_not_ready_ack;
-	u32 tx_req_ack;
-	u32 tx_req_done_ack;
-
-	/* interrupt status */
-	u32 auth_success_int;
-	u32 auth_fail_int;
-	u32 encryption_ready;
-	u32 encryption_not_ready;
-	u32 tx_req_int;
-	u32 tx_req_done_int;
-};
-
-struct sde_hdcp_reg_set {
-	u32 status;
-	u32 keys_offset;
-	u32 r0_offset;
-	u32 v_offset;
-	u32 ctrl;
-	u32 aksv_lsb;
-	u32 aksv_msb;
-	u32 entropy_ctrl0;
-	u32 entropy_ctrl1;
-	u32 sec_sha_ctrl;
-	u32 sec_sha_data;
-	u32 sha_status;
-
-	u32 data2_0;
-	u32 data3;
-	u32 data4;
-	u32 data5;
-	u32 data6;
-
-	u32 sec_data0;
-	u32 sec_data1;
-	u32 sec_data7;
-	u32 sec_data8;
-	u32 sec_data9;
-	u32 sec_data10;
-	u32 sec_data11;
-	u32 sec_data12;
-
-	u32 reset;
-	u32 reset_bit;
-
-	u32 repeater;
-};
-
-#define HDCP_REG_SET_CLIENT_HDMI \
-	{0}
-
-#define HDCP_REG_SET_CLIENT_DP \
-{DP_HDCP_STATUS, 16, 14, 13, DP_HDCP_CTRL, \
-	DP_HDCP_SW_LOWER_AKSV, DP_HDCP_SW_UPPER_AKSV, \
-	DP_HDCP_ENTROPY_CTRL0, DP_HDCP_ENTROPY_CTRL1, \
-	HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL, \
-	HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA, \
-	DP_HDCP_SHA_STATUS, DP_HDCP_RCVPORT_DATA2_0, \
-	DP_HDCP_RCVPORT_DATA3, DP_HDCP_RCVPORT_DATA4, \
-	DP_HDCP_RCVPORT_DATA5, DP_HDCP_RCVPORT_DATA6, \
-	HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0, \
-	HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1, \
-	HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7, \
-	HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8, \
-	HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9, \
-	HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10, \
-	HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11, \
-	HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12, \
-	DP_SW_RESET, BIT(1), BIT(1)}
-
-#define HDCP_HDMI_SINK_ADDR_MAP \
-	{{"bcaps", 0x40, 1}, {"bksv", 0x00, 5}, {"r0'", 0x08, 2}, \
-	 {"bstatus", 0x41, 2}, {"??", 0x0, 0}, {"ksv-fifo", 0x43, 0}, \
-	 {"v_h0", 0x20, 4}, {"v_h1", 0x24, 4}, {"v_h2", 0x28, 4}, \
-	 {"v_h3", 0x2c, 4}, {"v_h4", 0x30, 4}, {"an", 0x18, 8}, \
-	 {"aksv", 0x10, 5}, {"ainfo", 0x00, 0},}
-
-#define HDCP_DP_SINK_ADDR_MAP \
-	{{"bcaps", 0x68028, 1}, {"bksv", 0x68000, 5}, {"r0'", 0x68005, 2}, \
-	 {"binfo", 0x6802A, 2}, {"cp_irq_status", 0x68029, 1}, \
-	 {"ksv-fifo", 0x6802C, 0}, {"v_h0", 0x68014, 4}, {"v_h1", 0x68018, 4}, \
-	 {"v_h2", 0x6801C, 4}, {"v_h3", 0x68020, 4}, {"v_h4", 0x68024, 4}, \
-	 {"an", 0x6800C, 8}, {"aksv", 0x68007, 5}, {"ainfo", 0x6803B, 1} }
-
-#define HDCP_HDMI_INT_SET \
-	{0}
-
-#define HDCP_DP_INT_SET \
-	{DP_INTR_STATUS2, \
-	 BIT(17), BIT(20), BIT(24), BIT(27), 0, 0, \
-	 BIT(16), BIT(19), BIT(21), BIT(23), BIT(26), 0, 0, \
-	 BIT(15), BIT(18), BIT(22), BIT(25), 0, 0}
-
-struct sde_hdcp_1x {
-	u8 bcaps;
-	u32 tp_msgid;
-	u32 an_0, an_1, aksv_0, aksv_1;
-	u32 aksv_msb, aksv_lsb;
-	bool sink_r0_ready;
-	bool reauth;
-	bool ksv_ready;
-	enum sde_hdcp_state hdcp_state;
-	struct HDCP_V2V1_MSG_TOPOLOGY current_tp;
-	struct delayed_work hdcp_auth_work;
-	struct completion r0_checked;
-	struct completion sink_r0_available;
-	struct sde_hdcp_init_data init_data;
-	struct sde_hdcp_ops *ops;
-	struct sde_hdcp_reg_set reg_set;
-	struct sde_hdcp_int_set int_set;
-	struct sde_hdcp_sink_addr_map sink_addr;
-	struct workqueue_struct *workq;
-	void *hdcp1_handle;
-};
-
-static int sde_hdcp_1x_count_one(u8 *array, u8 len)
-{
-	int i, j, count = 0;
-
-	for (i = 0; i < len; i++)
-		for (j = 0; j < 8; j++)
-			count += (((array[i] >> j) & 0x1) ? 1 : 0);
-	return count;
-}
-
-static int sde_hdcp_1x_enable_hdcp_engine(void *input)
-{
-	int rc = 0;
-	struct dss_io_data *dp_ahb;
-	struct dss_io_data *dp_aux;
-	struct dss_io_data *dp_link;
-	struct sde_hdcp_1x *hdcp = input;
-	struct sde_hdcp_reg_set *reg_set;
-
-	if (!hdcp || !hdcp->init_data.dp_ahb ||
-		!hdcp->init_data.dp_aux ||
-		!hdcp->init_data.dp_link) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE) &&
-	    !sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL)) {
-		pr_err("%s: invalid state. returning\n",
-			SDE_HDCP_STATE_NAME);
-		rc = -EINVAL;
-		goto end;
-	}
-
-	dp_ahb = hdcp->init_data.dp_ahb;
-	dp_aux = hdcp->init_data.dp_aux;
-	dp_link = hdcp->init_data.dp_link;
-	reg_set = &hdcp->reg_set;
-
-	DSS_REG_W(dp_aux, reg_set->aksv_lsb, hdcp->aksv_lsb);
-	DSS_REG_W(dp_aux, reg_set->aksv_msb, hdcp->aksv_msb);
-
-	/* Setup seed values for random number An */
-	DSS_REG_W(dp_link, reg_set->entropy_ctrl0, 0xB1FFB0FF);
-	DSS_REG_W(dp_link, reg_set->entropy_ctrl1, 0xF00DFACE);
-
-	/* make sure hw is programmed */
-	wmb();
-
-	/* enable hdcp engine */
-	DSS_REG_W(dp_ahb, reg_set->ctrl, 0x1);
-
-	hdcp->hdcp_state = HDCP_STATE_AUTHENTICATING;
-end:
-	return rc;
-}
-
-static int sde_hdcp_1x_read(struct sde_hdcp_1x *hdcp,
-			  struct sde_hdcp_sink_addr *sink,
-			  u8 *buf, bool realign)
-{
-	int const max_size = 15;
-	int rc = 0, read_size = 0, bytes_read = 0;
-
-	if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
-		int size = sink->len, offset = sink->addr;
-
-		do {
-			read_size = min(size, max_size);
-
-			bytes_read = drm_dp_dpcd_read(hdcp->init_data.drm_aux,
-					offset, buf, read_size);
-			if (bytes_read != read_size) {
-				pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
-					offset, read_size, bytes_read);
-				rc = -EIO;
-				break;
-			}
-
-			buf += read_size;
-			size -= read_size;
-
-			if (!realign)
-				offset += read_size;
-		} while (size > 0);
-	}
-
-	return rc;
-}
-
-static int sde_hdcp_1x_write(struct sde_hdcp_1x *hdcp,
-			   struct sde_hdcp_sink_addr *sink, u8 *buf)
-{
-	int const max_size = 16;
-	int rc = 0, write_size = 0, bytes_written = 0;
-
-	if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
-		int size = sink->len, offset = sink->addr;
-
-		do {
-			write_size = min(size, max_size);
-
-			bytes_written =
-				drm_dp_dpcd_write(hdcp->init_data.drm_aux,
-						offset, buf, write_size);
-			if (bytes_written != write_size) {
-				pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
-					offset, write_size, bytes_written);
-				rc = -EIO;
-				break;
-			}
-
-			buf += write_size;
-			offset += write_size;
-			size -= write_size;
-		} while (size > 0);
-	}
-
-	return rc;
-}
-
-static void sde_hdcp_1x_enable_interrupts(struct sde_hdcp_1x *hdcp)
-{
-	u32 intr_reg;
-	struct dss_io_data *io;
-	struct sde_hdcp_int_set *isr;
-
-	io = hdcp->init_data.dp_ahb;
-	isr = &hdcp->int_set;
-
-	intr_reg = DSS_REG_R(io, isr->int_reg);
-
-	intr_reg |= HDCP_INT_CLR | HDCP_INT_EN;
-
-	DSS_REG_W(io, isr->int_reg, intr_reg);
-}
-
-static int sde_hdcp_1x_read_bcaps(struct sde_hdcp_1x *hdcp)
-{
-	int rc;
-	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
-	struct dss_io_data *hdcp_io = hdcp->init_data.hdcp_io;
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-		pr_err("invalid state\n");
-		return -EINVAL;
-	}
-
-	rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bcaps,
-		&hdcp->bcaps, false);
-	if (rc) {
-		pr_err("error reading bcaps\n");
-		goto error;
-	}
-
-	pr_debug("bcaps read: 0x%x\n", hdcp->bcaps);
-
-	hdcp->current_tp.ds_type = hdcp->bcaps & reg_set->repeater ?
-			DS_REPEATER : DS_RECEIVER;
-
-	pr_debug("ds: %s\n", hdcp->current_tp.ds_type == DS_REPEATER ?
-			"repeater" : "receiver");
-
-	/* Write BCAPS to the hardware */
-	DSS_REG_W(hdcp_io, reg_set->sec_data12, hdcp->bcaps);
-error:
-	return rc;
-}
-
-static int sde_hdcp_1x_wait_for_hw_ready(struct sde_hdcp_1x *hdcp)
-{
-	int rc;
-	u32 link0_status;
-	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
-	struct dss_io_data *dp_ahb = hdcp->init_data.dp_ahb;
-	struct dss_io_data *dp_aux = hdcp->init_data.dp_aux;
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-		pr_err("invalid state\n");
-		return -EINVAL;
-	}
-
-	/* Wait for HDCP keys to be checked and validated */
-	rc = readl_poll_timeout(dp_ahb->base + reg_set->status, link0_status,
-				((link0_status >> reg_set->keys_offset) & 0x7)
-					== HDCP_KEYS_STATE_VALID ||
-				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
-				HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
-	if (rc) {
-		pr_err("key not ready\n");
-		goto error;
-	}
-
-	/*
-	 * 1.1_Features turned off by default.
-	 * No need to write AInfo since 1.1_Features is disabled.
-	 */
-	DSS_REG_W(dp_aux, reg_set->data4, 0);
-
-	/* Wait for An0 and An1 bit to be ready */
-	rc = readl_poll_timeout(dp_ahb->base + reg_set->status, link0_status,
-				(link0_status & (BIT(8) | BIT(9))) ||
-				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
-				HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
-	if (rc) {
-		pr_err("An not ready\n");
-		goto error;
-	}
-
-	/* As per hardware recommendations, wait before reading An */
-	msleep(20);
-error:
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
-		rc = -EINVAL;
-
-	return rc;
-}
-
-static int sde_hdcp_1x_send_an_aksv_to_sink(struct sde_hdcp_1x *hdcp)
-{
-	int rc;
-	u8 an[8], aksv[5];
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-		pr_err("invalid state\n");
-		return -EINVAL;
-	}
-
-	an[0] =  hdcp->an_0        & 0xFF;
-	an[1] = (hdcp->an_0 >> 8)  & 0xFF;
-	an[2] = (hdcp->an_0 >> 16) & 0xFF;
-	an[3] = (hdcp->an_0 >> 24) & 0xFF;
-	an[4] =  hdcp->an_1        & 0xFF;
-	an[5] = (hdcp->an_1 >> 8)  & 0xFF;
-	an[6] = (hdcp->an_1 >> 16) & 0xFF;
-	an[7] = (hdcp->an_1 >> 24) & 0xFF;
-
-	pr_debug("an read: 0x%2x%2x%2x%2x%2x%2x%2x%2x\n",
-		an[7], an[6], an[5], an[4], an[3], an[2], an[1], an[0]);
-
-	rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.an, an);
-	if (rc) {
-		pr_err("error writing an to sink\n");
-		goto error;
-	}
-
-	/* Copy An and AKSV to byte arrays for transmission */
-	aksv[0] =  hdcp->aksv_0        & 0xFF;
-	aksv[1] = (hdcp->aksv_0 >> 8)  & 0xFF;
-	aksv[2] = (hdcp->aksv_0 >> 16) & 0xFF;
-	aksv[3] = (hdcp->aksv_0 >> 24) & 0xFF;
-	aksv[4] =  hdcp->aksv_1        & 0xFF;
-
-	pr_debug("aksv read: 0x%2x%2x%2x%2x%2x\n",
-		aksv[4], aksv[3], aksv[2], aksv[1], aksv[0]);
-
-	rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.aksv, aksv);
-	if (rc) {
-		pr_err("error writing aksv to sink\n");
-		goto error;
-	}
-error:
-	return rc;
-}
-
-static int sde_hdcp_1x_read_an_aksv_from_hw(struct sde_hdcp_1x *hdcp)
-{
-	struct dss_io_data *dp_ahb = hdcp->init_data.dp_ahb;
-	struct dss_io_data *dp_aux = hdcp->init_data.dp_aux;
-	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-		pr_err("invalid state\n");
-		return -EINVAL;
-	}
-
-	hdcp->an_0 = DSS_REG_R(dp_ahb, reg_set->data5);
-	if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
-		udelay(1);
-		hdcp->an_0 = DSS_REG_R(dp_ahb, reg_set->data5);
-	}
-
-	hdcp->an_1 = DSS_REG_R(dp_ahb, reg_set->data6);
-	if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
-		udelay(1);
-		hdcp->an_1 = DSS_REG_R(dp_ahb, reg_set->data6);
-	}
-
-	/* Read AKSV */
-	hdcp->aksv_0 = DSS_REG_R(dp_aux, reg_set->data3);
-	hdcp->aksv_1 = DSS_REG_R(dp_aux, reg_set->data4);
-
-	return 0;
-}
-
-static int sde_hdcp_1x_get_bksv_from_sink(struct sde_hdcp_1x *hdcp)
-{
-	int rc;
-	u8 *bksv = hdcp->current_tp.bksv;
-	u32 link0_bksv_0, link0_bksv_1;
-	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
-	struct dss_io_data *hdcp_io  = hdcp->init_data.hdcp_io;
-
-	rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bksv, bksv, false);
-	if (rc) {
-		pr_err("error reading bksv from sink\n");
-		goto error;
-	}
-
-	pr_debug("bksv read: 0x%2x%2x%2x%2x%2x\n",
-		bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
-
-	/* check there are 20 ones in BKSV */
-	if (sde_hdcp_1x_count_one(bksv, 5) != 20) {
-		pr_err("%s: BKSV doesn't have 20 1's and 20 0's\n",
-			SDE_HDCP_STATE_NAME);
-		rc = -EINVAL;
-		goto error;
-	}
-
-	link0_bksv_0 = bksv[3];
-	link0_bksv_0 = (link0_bksv_0 << 8) | bksv[2];
-	link0_bksv_0 = (link0_bksv_0 << 8) | bksv[1];
-	link0_bksv_0 = (link0_bksv_0 << 8) | bksv[0];
-	link0_bksv_1 = bksv[4];
-
-	DSS_REG_W(hdcp_io, reg_set->sec_data0, link0_bksv_0);
-	DSS_REG_W(hdcp_io, reg_set->sec_data1, link0_bksv_1);
-error:
-	return rc;
-}
-
-static void sde_hdcp_1x_enable_sink_irq_hpd(struct sde_hdcp_1x *hdcp)
-{
-	u8 const required_major = 1, required_minor = 2;
-	u8 sink_major = 0, sink_minor = 0;
-	u8 enable_hpd_irq = 0x1;
-	int rc;
-	unsigned char revision = *hdcp->init_data.revision;
-
-	sink_major = (revision >> 4) & 0x0f;
-	sink_minor = revision & 0x0f;
-	pr_debug("revision: %d.%d\n", sink_major, sink_minor);
-
-	if ((sink_minor < required_minor) || (sink_major < required_major) ||
-	  (hdcp->current_tp.ds_type != DS_REPEATER)) {
-		pr_debug("sink irq hpd not enabled\n");
-		return;
-	}
-
-	rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.ainfo, &enable_hpd_irq);
-	if (rc)
-		pr_debug("error writing ainfo to sink\n");
-}
-
-static int sde_hdcp_1x_verify_r0(struct sde_hdcp_1x *hdcp)
-{
-	int rc, r0_retry = 3;
-	u8 buf[2];
-	u32 link0_status, timeout_count;
-	u32 const r0_read_delay_us = 1;
-	u32 const r0_read_timeout_us = r0_read_delay_us * 10;
-	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
-	struct dss_io_data *io = hdcp->init_data.dp_ahb;
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-		pr_err("invalid state\n");
-		return -EINVAL;
-	}
-
-	/* Wait for HDCP R0 computation to be completed */
-	rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
-				(link0_status & BIT(reg_set->r0_offset)) ||
-				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
-				HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
-	if (rc) {
-		pr_err("R0 not ready\n");
-		goto error;
-	}
-
-	/*
-	 * HDCP Compliace Test case 1A-01:
-	 * Wait here at least 100ms before reading R0'
-	 */
-	if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
-		msleep(100);
-	} else {
-		if (!hdcp->sink_r0_ready) {
-			reinit_completion(&hdcp->sink_r0_available);
-			timeout_count = wait_for_completion_timeout(
-				&hdcp->sink_r0_available, HZ / 2);
-
-			if (hdcp->reauth) {
-				pr_err("sink R0 not ready\n");
-				rc = -EINVAL;
-				goto error;
-			}
-		}
-	}
-
-	do {
-		memset(buf, 0, sizeof(buf));
-
-		rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.r0,
-			buf, false);
-		if (rc) {
-			pr_err("error reading R0' from sink\n");
-			goto error;
-		}
-
-		pr_debug("sink R0'read: %2x%2x\n", buf[1], buf[0]);
-
-		DSS_REG_W(io, reg_set->data2_0, (((u32)buf[1]) << 8) | buf[0]);
-
-		rc = readl_poll_timeout(io->base + reg_set->status,
-			link0_status, (link0_status & BIT(12)) ||
-			!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
-			r0_read_delay_us, r0_read_timeout_us);
-	} while (rc && --r0_retry);
-error:
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
-		rc = -EINVAL;
-
-	return rc;
-}
-
-static int sde_hdcp_1x_authentication_part1(struct sde_hdcp_1x *hdcp)
-{
-	int rc;
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-		pr_err("invalid state\n");
-		return -EINVAL;
-	}
-
-	sde_hdcp_1x_enable_interrupts(hdcp);
-
-	rc = sde_hdcp_1x_read_bcaps(hdcp);
-	if (rc)
-		goto error;
-
-	rc = sde_hdcp_1x_wait_for_hw_ready(hdcp);
-	if (rc)
-		goto error;
-
-	rc = sde_hdcp_1x_read_an_aksv_from_hw(hdcp);
-	if (rc)
-		goto error;
-
-	rc = sde_hdcp_1x_get_bksv_from_sink(hdcp);
-	if (rc)
-		goto error;
-
-	rc = sde_hdcp_1x_send_an_aksv_to_sink(hdcp);
-	if (rc)
-		goto error;
-
-	sde_hdcp_1x_enable_sink_irq_hpd(hdcp);
-
-	rc = sde_hdcp_1x_verify_r0(hdcp);
-	if (rc)
-		goto error;
-
-	pr_info("SUCCESSFUL\n");
-
-	return 0;
-error:
-	pr_err("%s: FAILED\n", SDE_HDCP_STATE_NAME);
-
-	return rc;
-}
-
-static int sde_hdcp_1x_transfer_v_h(struct sde_hdcp_1x *hdcp)
-{
-	int rc = 0;
-	struct dss_io_data *io = hdcp->init_data.hdcp_io;
-	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
-	struct sde_hdcp_1x_reg_data reg_data[]  = {
-		{reg_set->sec_data7,  &hdcp->sink_addr.v_h0},
-		{reg_set->sec_data8,  &hdcp->sink_addr.v_h1},
-		{reg_set->sec_data9,  &hdcp->sink_addr.v_h2},
-		{reg_set->sec_data10, &hdcp->sink_addr.v_h3},
-		{reg_set->sec_data11, &hdcp->sink_addr.v_h4},
-	};
-	struct sde_hdcp_sink_addr sink = {"V", reg_data->sink->addr};
-	u32 size = ARRAY_SIZE(reg_data);
-	u8 buf[0xFF] = {0};
-	u32 i = 0, len = 0;
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-		pr_err("invalid state\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < size; i++) {
-		struct sde_hdcp_1x_reg_data *rd = reg_data + i;
-
-		len += rd->sink->len;
-	}
-
-	sink.len = len;
-
-	rc = sde_hdcp_1x_read(hdcp, &sink, buf, false);
-	if (rc) {
-		pr_err("error reading %s\n", sink.name);
-		goto end;
-	}
-
-	for (i = 0; i < size; i++) {
-		struct sde_hdcp_1x_reg_data *rd = reg_data + i;
-		u32 reg_data;
-
-		memcpy(&reg_data, buf + (sizeof(u32) * i), sizeof(u32));
-		DSS_REG_W(io, rd->reg_id, reg_data);
-	}
-end:
-	return rc;
-}
-
-static int sde_hdcp_1x_validate_downstream(struct sde_hdcp_1x *hdcp)
-{
-	int rc;
-	u8 buf[2] = {0, 0};
-	u8 device_count, depth;
-	u8 max_cascade_exceeded, max_devs_exceeded;
-	u16 bstatus;
-	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-		pr_err("invalid state\n");
-		return -EINVAL;
-	}
-
-	rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bstatus,
-			buf, false);
-	if (rc) {
-		pr_err("error reading bstatus\n");
-		goto end;
-	}
-
-	bstatus = buf[1];
-	bstatus = (bstatus << 8) | buf[0];
-
-	device_count = bstatus & 0x7F;
-
-	pr_debug("device count %d\n", device_count);
-
-	/* Cascaded repeater depth */
-	depth = (bstatus >> 8) & 0x7;
-	pr_debug("depth %d\n", depth);
-
-	/*
-	 * HDCP Compliance 1B-05:
-	 * Check if no. of devices connected to repeater
-	 * exceed max_devices_connected from bit 7 of Bstatus.
-	 */
-	max_devs_exceeded = (bstatus & BIT(7)) >> 7;
-	if (max_devs_exceeded == 0x01) {
-		pr_err("no. of devs connected exceed max allowed\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	/*
-	 * HDCP Compliance 1B-06:
-	 * Check if no. of cascade connected to repeater
-	 * exceed max_cascade_connected from bit 11 of Bstatus.
-	 */
-	max_cascade_exceeded = (bstatus & BIT(11)) >> 11;
-	if (max_cascade_exceeded == 0x01) {
-		pr_err("no. of cascade connections exceed max allowed\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	/* Update topology information */
-	hdcp->current_tp.dev_count = device_count;
-	hdcp->current_tp.max_cascade_exceeded = max_cascade_exceeded;
-	hdcp->current_tp.max_dev_exceeded = max_devs_exceeded;
-	hdcp->current_tp.depth = depth;
-
-	DSS_REG_W(hdcp->init_data.hdcp_io,
-		  reg_set->sec_data12, hdcp->bcaps | (bstatus << 8));
-end:
-	return rc;
-}
-
-static int sde_hdcp_1x_read_ksv_fifo(struct sde_hdcp_1x *hdcp)
-{
-	u32 ksv_read_retry = 20, ksv_bytes, rc = 0;
-	u8 *ksv_fifo = hdcp->current_tp.ksv_list;
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-		pr_err("invalid state\n");
-		return -EINVAL;
-	}
-
-	memset(ksv_fifo, 0, sizeof(hdcp->current_tp.ksv_list));
-
-	/* each KSV is 5 bytes long */
-	ksv_bytes = 5 * hdcp->current_tp.dev_count;
-	hdcp->sink_addr.ksv_fifo.len = ksv_bytes;
-
-	while (ksv_bytes && --ksv_read_retry) {
-		rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.ksv_fifo,
-				ksv_fifo, true);
-		if (rc)
-			pr_err("could not read ksv fifo (%d)\n",
-				ksv_read_retry);
-		else
-			break;
-	}
-
-	if (rc)
-		pr_err("error reading ksv_fifo\n");
-
-	return rc;
-}
-
-static int sde_hdcp_1x_write_ksv_fifo(struct sde_hdcp_1x *hdcp)
-{
-	int i, rc = 0;
-	u8 *ksv_fifo = hdcp->current_tp.ksv_list;
-	u32 ksv_bytes = hdcp->sink_addr.ksv_fifo.len;
-	struct dss_io_data *io = hdcp->init_data.dp_ahb;
-	struct dss_io_data *sec_io = hdcp->init_data.hdcp_io;
-	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
-	u32 sha_status = 0, status;
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-		pr_err("invalid state\n");
-		return -EINVAL;
-	}
-
-	/* reset SHA Controller */
-	DSS_REG_W(sec_io, reg_set->sec_sha_ctrl, 0x1);
-	DSS_REG_W(sec_io, reg_set->sec_sha_ctrl, 0x0);
-
-	for (i = 0; i < ksv_bytes - 1; i++) {
-		/* Write KSV byte and do not set DONE bit[0] */
-		DSS_REG_W_ND(sec_io, reg_set->sec_sha_data, ksv_fifo[i] << 16);
-
-		/*
-		 * Once 64 bytes have been written, we need to poll for
-		 * HDCP_SHA_BLOCK_DONE before writing any further
-		 */
-		if (i && !((i + 1) % 64)) {
-			rc = readl_poll_timeout(io->base + reg_set->sha_status,
-				sha_status, (sha_status & BIT(0)) ||
-				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
-				HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
-			if (rc) {
-				pr_err("block not done\n");
-				goto error;
-			}
-		}
-	}
-
-	/* Write l to DONE bit[0] */
-	DSS_REG_W_ND(sec_io, reg_set->sec_sha_data,
-		(ksv_fifo[ksv_bytes - 1] << 16) | 0x1);
-
-	/* Now wait for HDCP_SHA_COMP_DONE */
-	rc = readl_poll_timeout(io->base + reg_set->sha_status, sha_status,
-				(sha_status & BIT(4)) ||
-				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
-				HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
-	if (rc) {
-		pr_err("V computation not done\n");
-		goto error;
-	}
-
-	/* Wait for V_MATCHES */
-	rc = readl_poll_timeout(io->base + reg_set->status, status,
-				(status & BIT(reg_set->v_offset)) ||
-				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
-				HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
-	if (rc) {
-		pr_err("V mismatch\n");
-		rc = -EINVAL;
-	}
-error:
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
-		rc = -EINVAL;
-
-	return rc;
-}
-
-static int sde_hdcp_1x_wait_for_ksv_ready(struct sde_hdcp_1x *hdcp)
-{
-	int rc, timeout;
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-		pr_err("invalid state\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * Wait until READY bit is set in BCAPS, as per HDCP specifications
-	 * maximum permitted time to check for READY bit is five seconds.
-	 */
-	rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bcaps,
-		&hdcp->bcaps, false);
-	if (rc) {
-		pr_err("error reading bcaps\n");
-		goto error;
-	}
-
-	if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
-		timeout = 50;
-
-		while (!(hdcp->bcaps & BIT(5)) && --timeout) {
-			rc = sde_hdcp_1x_read(hdcp,
-				&hdcp->sink_addr.bcaps,
-				&hdcp->bcaps, false);
-			if (rc ||
-			   !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-				pr_err("error reading bcaps\n");
-				goto error;
-			}
-			msleep(100);
-		}
-	} else {
-		u8 cp_buf = 0;
-		struct sde_hdcp_sink_addr *sink =
-			&hdcp->sink_addr.cp_irq_status;
-
-		timeout = jiffies_to_msecs(jiffies);
-
-		while (1) {
-			rc = sde_hdcp_1x_read(hdcp, sink, &cp_buf, false);
-			if (rc)
-				goto error;
-
-			if (cp_buf & BIT(0))
-				break;
-
-			/* max timeout of 5 sec as per hdcp 1.x spec */
-			if (abs(timeout - jiffies_to_msecs(jiffies)) > 5000) {
-				timeout = 0;
-				break;
-			}
-
-			if (hdcp->ksv_ready || hdcp->reauth ||
-			    !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
-				break;
-
-			/* re-read after a minimum delay */
-			msleep(20);
-		}
-	}
-
-	if (!timeout || hdcp->reauth ||
-	    !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-		pr_err("DS KSV not ready\n");
-		rc = -EINVAL;
-	} else {
-		hdcp->ksv_ready = true;
-	}
-error:
-	return rc;
-}
-
-static int sde_hdcp_1x_authentication_part2(struct sde_hdcp_1x *hdcp)
-{
-	int rc;
-	int v_retry = 3;
-
-	rc = sde_hdcp_1x_validate_downstream(hdcp);
-	if (rc)
-		goto error;
-
-	rc = sde_hdcp_1x_read_ksv_fifo(hdcp);
-	if (rc)
-		goto error;
-
-	do {
-		rc = sde_hdcp_1x_transfer_v_h(hdcp);
-		if (rc)
-			goto error;
-
-		/* do not proceed further if no device connected */
-		if (!hdcp->current_tp.dev_count)
-			goto error;
-
-		rc = sde_hdcp_1x_write_ksv_fifo(hdcp);
-	} while (--v_retry && rc);
-error:
-	if (rc) {
-		pr_err("%s: FAILED\n", SDE_HDCP_STATE_NAME);
-	} else {
-		hdcp->hdcp_state = HDCP_STATE_AUTHENTICATED;
-
-		pr_info("SUCCESSFUL\n");
-	}
-
-	return rc;
-}
-
-static void sde_hdcp_1x_update_auth_status(struct sde_hdcp_1x *hdcp)
-{
-	if (IS_ENABLED(CONFIG_HDCP_QSEECOM) &&
-			sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATED)) {
-		msm_hdcp_cache_repeater_topology(hdcp->init_data.msm_hdcp_dev,
-						&hdcp->current_tp);
-		msm_hdcp_notify_topology(hdcp->init_data.msm_hdcp_dev);
-	}
-
-	if (hdcp->init_data.notify_status &&
-	    !sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
-		hdcp->init_data.notify_status(
-			hdcp->init_data.cb_data,
-			hdcp->hdcp_state);
-	}
-}
-
-static void sde_hdcp_1x_auth_work(struct work_struct *work)
-{
-	int rc;
-	struct delayed_work *dw = to_delayed_work(work);
-	struct sde_hdcp_1x *hdcp = container_of(dw,
-		struct sde_hdcp_1x, hdcp_auth_work);
-	struct dss_io_data *io;
-
-	if (!hdcp) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-		pr_err("invalid state\n");
-		return;
-	}
-
-	hdcp->sink_r0_ready = false;
-	hdcp->reauth = false;
-	hdcp->ksv_ready = false;
-
-	io = hdcp->init_data.core_io;
-	/* Enabling Software DDC for HDMI and REF timer for DP */
-	if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI)
-		DSS_REG_W_ND(io, REG_HDMI_DDC_ARBITRATION, DSS_REG_R(io,
-				REG_HDMI_DDC_ARBITRATION) & ~(BIT(4)));
-	else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
-		io = hdcp->init_data.dp_aux;
-		DSS_REG_W(io, DP_DP_HPD_REFTIMER, 0x10013);
-	}
-
-	/*
-	 * program hw to enable encryption as soon as
-	 * authentication is successful.
-	 */
-	hdcp1_set_enc(hdcp->hdcp1_handle, true);
-
-	rc = sde_hdcp_1x_authentication_part1(hdcp);
-	if (rc)
-		goto end;
-
-	if (hdcp->current_tp.ds_type == DS_REPEATER) {
-		rc = sde_hdcp_1x_wait_for_ksv_ready(hdcp);
-		if (rc)
-			goto end;
-	} else {
-		hdcp->hdcp_state = HDCP_STATE_AUTHENTICATED;
-		goto end;
-	}
-
-	hdcp->ksv_ready = false;
-
-	rc = sde_hdcp_1x_authentication_part2(hdcp);
-	if (rc)
-		goto end;
-
-	/*
-	 * Disabling software DDC before going into part3 to make sure
-	 * there is no Arbitration between software and hardware for DDC
-	 */
-	if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI)
-		DSS_REG_W_ND(io, REG_HDMI_DDC_ARBITRATION, DSS_REG_R(io,
-				REG_HDMI_DDC_ARBITRATION) | (BIT(4)));
-end:
-	if (rc && !sde_hdcp_1x_state(HDCP_STATE_INACTIVE))
-		hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
-
-	sde_hdcp_1x_update_auth_status(hdcp);
-}
-
-static int sde_hdcp_1x_authenticate(void *input)
-{
-	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
-	int rc = 0;
-
-	if (!hdcp) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	flush_delayed_work(&hdcp->hdcp_auth_work);
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
-		pr_err("invalid state\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	rc = hdcp1_start(hdcp->hdcp1_handle, &hdcp->aksv_msb, &hdcp->aksv_lsb);
-	if (rc) {
-		pr_err("hdcp1_start failed (%d)\n", rc);
-		goto error;
-	}
-
-	if (!sde_hdcp_1x_enable_hdcp_engine(input)) {
-
-		queue_delayed_work(hdcp->workq,
-			&hdcp->hdcp_auth_work, HZ/2);
-	} else {
-		hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
-		sde_hdcp_1x_update_auth_status(hdcp);
-	}
-
-error:
-	return rc;
-} /* hdcp_1x_authenticate */
-
-static int sde_hdcp_1x_reauthenticate(void *input)
-{
-	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
-	struct dss_io_data *io;
-	struct sde_hdcp_reg_set *reg_set;
-	struct sde_hdcp_int_set *isr;
-	u32 reg;
-
-	if (!hdcp || !hdcp->init_data.dp_ahb) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	io = hdcp->init_data.dp_ahb;
-	reg_set = &hdcp->reg_set;
-	isr = &hdcp->int_set;
-
-	if (!sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL)) {
-		pr_err("invalid state\n");
-		return -EINVAL;
-	}
-
-	/* Disable HDCP interrupts */
-	DSS_REG_W(io, isr->int_reg, DSS_REG_R(io, isr->int_reg) & ~HDCP_INT_EN);
-
-	reg = DSS_REG_R(io, reg_set->reset);
-	DSS_REG_W(io, reg_set->reset, reg | reg_set->reset_bit);
-
-	/* Disable encryption and disable the HDCP block */
-	DSS_REG_W(io, reg_set->ctrl, 0);
-
-	DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit);
-
-	hdcp->hdcp_state = HDCP_STATE_INACTIVE;
-
-	return sde_hdcp_1x_authenticate(hdcp);
-} /* hdcp_1x_reauthenticate */
-
-static void sde_hdcp_1x_off(void *input)
-{
-	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
-	struct dss_io_data *io;
-	struct sde_hdcp_reg_set *reg_set;
-	struct sde_hdcp_int_set *isr;
-	int rc = 0;
-	u32 reg;
-
-	if (!hdcp || !hdcp->init_data.dp_ahb) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	io = hdcp->init_data.dp_ahb;
-	reg_set = &hdcp->reg_set;
-	isr = &hdcp->int_set;
-
-	if (sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
-		pr_err("invalid state\n");
-		return;
-	}
-
-	/*
-	 * Disable HDCP interrupts.
-	 * Also, need to set the state to inactive here so that any ongoing
-	 * reauth works will know that the HDCP session has been turned off.
-	 */
-	DSS_REG_W(io, isr->int_reg,
-		DSS_REG_R(io, isr->int_reg) & ~HDCP_INT_EN);
-	hdcp->hdcp_state = HDCP_STATE_INACTIVE;
-
-	/* complete any wait pending */
-	complete_all(&hdcp->sink_r0_available);
-	complete_all(&hdcp->r0_checked);
-	/*
-	 * Cancel any pending auth/reauth attempts.
-	 * If one is ongoing, this will wait for it to finish.
-	 * No more reauthentiaction attempts will be scheduled since we
-	 * set the currect state to inactive.
-	 */
-	rc = cancel_delayed_work_sync(&hdcp->hdcp_auth_work);
-	if (rc)
-		pr_debug("%s: Deleted hdcp auth work\n",
-			SDE_HDCP_STATE_NAME);
-
-	hdcp1_set_enc(hdcp->hdcp1_handle, false);
-
-	reg = DSS_REG_R(io, reg_set->reset);
-	DSS_REG_W(io, reg_set->reset, reg | reg_set->reset_bit);
-
-	/* Disable encryption and disable the HDCP block */
-	DSS_REG_W(io, reg_set->ctrl, 0);
-
-	DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit);
-
-	hdcp->sink_r0_ready = false;
-
-	hdcp1_stop(hdcp->hdcp1_handle);
-
-	pr_debug("%s: HDCP: Off\n", SDE_HDCP_STATE_NAME);
-} /* hdcp_1x_off */
-
-static int sde_hdcp_1x_isr(void *input)
-{
-	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
-	int rc = 0;
-	struct dss_io_data *io;
-	u32 hdcp_int_val;
-	struct sde_hdcp_reg_set *reg_set;
-	struct sde_hdcp_int_set *isr;
-
-	if (!hdcp || !hdcp->init_data.dp_ahb) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	io = hdcp->init_data.dp_ahb;
-	reg_set = &hdcp->reg_set;
-	isr = &hdcp->int_set;
-
-	hdcp_int_val = DSS_REG_R(io, isr->int_reg);
-
-	/* Ignore HDCP interrupts if HDCP is disabled */
-	if (sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
-		DSS_REG_W(io, isr->int_reg, hdcp_int_val | HDCP_INT_CLR);
-		return 0;
-	}
-
-	if (hdcp_int_val & isr->auth_success_int) {
-		/* AUTH_SUCCESS_INT */
-		DSS_REG_W(io, isr->int_reg,
-			(hdcp_int_val | isr->auth_success_ack));
-		pr_debug("%s: AUTH SUCCESS\n", SDE_HDCP_STATE_NAME);
-
-		if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
-			complete_all(&hdcp->r0_checked);
-	}
-
-	if (hdcp_int_val & isr->auth_fail_int) {
-		/* AUTH_FAIL_INT */
-		u32 link_status = DSS_REG_R(io, reg_set->status);
-
-		DSS_REG_W(io, isr->int_reg,
-			(hdcp_int_val | isr->auth_fail_ack));
-
-		pr_debug("%s: AUTH FAIL, LINK0_STATUS=0x%08x\n",
-			SDE_HDCP_STATE_NAME, link_status);
-
-		if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATED)) {
-			hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
-			sde_hdcp_1x_update_auth_status(hdcp);
-		} else if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
-			complete_all(&hdcp->r0_checked);
-		}
-
-		/* Clear AUTH_FAIL_INFO as well */
-		DSS_REG_W(io, isr->int_reg,
-			(hdcp_int_val | isr->auth_fail_info_ack));
-	}
-
-	if (hdcp_int_val & isr->tx_req_int) {
-		/* DDC_XFER_REQ_INT */
-		DSS_REG_W(io, isr->int_reg,
-			(hdcp_int_val | isr->tx_req_ack));
-		pr_debug("%s: DDC_XFER_REQ_INT received\n",
-			SDE_HDCP_STATE_NAME);
-	}
-
-	if (hdcp_int_val & isr->tx_req_done_int) {
-		/* DDC_XFER_DONE_INT */
-		DSS_REG_W(io, isr->int_reg,
-			(hdcp_int_val | isr->tx_req_done_ack));
-		pr_debug("%s: DDC_XFER_DONE received\n",
-			SDE_HDCP_STATE_NAME);
-	}
-
-	if (hdcp_int_val & isr->encryption_ready) {
-		/* Encryption enabled */
-		DSS_REG_W(io, isr->int_reg,
-			(hdcp_int_val | isr->encryption_ready_ack));
-		pr_debug("%s: encryption ready received\n",
-			SDE_HDCP_STATE_NAME);
-	}
-
-	if (hdcp_int_val & isr->encryption_not_ready) {
-		/* Encryption enabled */
-		DSS_REG_W(io, isr->int_reg,
-			(hdcp_int_val | isr->encryption_not_ready_ack));
-		pr_debug("%s: encryption not ready received\n",
-			SDE_HDCP_STATE_NAME);
-	}
-
-error:
-	return rc;
-}
-
-static bool sde_hdcp_1x_feature_supported(void *input)
-{
-	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
-	bool feature_supported = false;
-
-	if (!hdcp) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	feature_supported = hdcp1_feature_supported(hdcp->hdcp1_handle);
-
-	pr_debug("feature_supported = %d\n", feature_supported);
-
-	return feature_supported;
-}
-
-static bool sde_hdcp_1x_sink_support(void *input)
-{
-	return true;
-}
-
-void sde_hdcp_1x_deinit(void *input)
-{
-	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
-
-	if (!hdcp) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	if (hdcp->workq)
-		destroy_workqueue(hdcp->workq);
-
-	hdcp1_deinit(hdcp->hdcp1_handle);
-
-	kfree(hdcp);
-} /* hdcp_1x_deinit */
-
-static void sde_hdcp_1x_update_client_reg_set(struct sde_hdcp_1x *hdcp)
-{
-	if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
-		struct sde_hdcp_reg_set reg_set = HDCP_REG_SET_CLIENT_DP;
-		struct sde_hdcp_sink_addr_map sink_addr = HDCP_DP_SINK_ADDR_MAP;
-		struct sde_hdcp_int_set isr = HDCP_DP_INT_SET;
-
-		hdcp->reg_set = reg_set;
-		hdcp->sink_addr = sink_addr;
-		hdcp->int_set = isr;
-	}
-}
-
-static bool sde_hdcp_1x_is_cp_irq_raised(struct sde_hdcp_1x *hdcp)
-{
-	int ret;
-	u8 buf = 0;
-	struct sde_hdcp_sink_addr sink = {"irq", 0x201, 1};
-
-	ret = sde_hdcp_1x_read(hdcp, &sink, &buf, false);
-	if (ret)
-		pr_err("error reading irq_vector\n");
-
-	return buf & BIT(2) ? true : false;
-}
-
-static void sde_hdcp_1x_clear_cp_irq(struct sde_hdcp_1x *hdcp)
-{
-	int ret;
-	u8 buf = BIT(2);
-	struct sde_hdcp_sink_addr sink = {"irq", 0x201, 1};
-
-	ret = sde_hdcp_1x_write(hdcp, &sink, &buf);
-	if (ret)
-		pr_err("error clearing irq_vector\n");
-}
-
-static int sde_hdcp_1x_cp_irq(void *input)
-{
-	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
-	u8 buf = 0;
-	int ret;
-
-	if (!hdcp) {
-		pr_err("invalid input\n");
-		goto irq_not_handled;
-	}
-
-	if (!sde_hdcp_1x_is_cp_irq_raised(hdcp)) {
-		pr_debug("cp_irq not raised\n");
-		goto irq_not_handled;
-	}
-
-	ret = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.cp_irq_status,
-			&buf, false);
-	if (ret) {
-		pr_err("error reading cp_irq_status\n");
-		goto irq_not_handled;
-	}
-
-	if ((buf & BIT(2)) || (buf & BIT(3))) {
-		pr_err("%s\n",
-			buf & BIT(2) ? "LINK_INTEGRITY_FAILURE" :
-				"REAUTHENTICATION_REQUEST");
-
-		hdcp->reauth = true;
-
-		if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE))
-			hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
-
-		complete_all(&hdcp->sink_r0_available);
-		sde_hdcp_1x_update_auth_status(hdcp);
-	} else if (buf & BIT(1)) {
-		pr_debug("R0' AVAILABLE\n");
-		hdcp->sink_r0_ready = true;
-		complete_all(&hdcp->sink_r0_available);
-	} else if ((buf & BIT(0))) {
-		pr_debug("KSVs READY\n");
-
-		hdcp->ksv_ready = true;
-	} else {
-		pr_debug("spurious interrupt\n");
-	}
-
-	sde_hdcp_1x_clear_cp_irq(hdcp);
-	return 0;
-
-irq_not_handled:
-	return -EINVAL;
-}
-
-void *sde_hdcp_1x_init(struct sde_hdcp_init_data *init_data)
-{
-	struct sde_hdcp_1x *hdcp = NULL;
-	char name[20];
-	static struct sde_hdcp_ops ops = {
-		.isr = sde_hdcp_1x_isr,
-		.cp_irq = sde_hdcp_1x_cp_irq,
-		.reauthenticate = sde_hdcp_1x_reauthenticate,
-		.authenticate = sde_hdcp_1x_authenticate,
-		.feature_supported = sde_hdcp_1x_feature_supported,
-		.sink_support = sde_hdcp_1x_sink_support,
-		.off = sde_hdcp_1x_off
-	};
-
-	if (!init_data || !init_data->notify_status ||
-		!init_data->workq || !init_data->cb_data) {
-		pr_err("invalid input\n");
-		goto error;
-	}
-
-	if (init_data->sec_access && !init_data->hdcp_io) {
-		pr_err("hdcp_io required\n");
-		goto error;
-	}
-
-	hdcp = kzalloc(sizeof(*hdcp), GFP_KERNEL);
-	if (!hdcp)
-		goto error;
-
-	hdcp->init_data = *init_data;
-	hdcp->ops = &ops;
-
-	snprintf(name, sizeof(name), "hdcp_1x_%d",
-		hdcp->init_data.client_id);
-
-	hdcp->workq = create_workqueue(name);
-	if (!hdcp->workq) {
-		pr_err("Error creating workqueue\n");
-		goto workqueue_error;
-	}
-
-	hdcp->hdcp1_handle = hdcp1_init();
-	if (!hdcp->hdcp1_handle) {
-		pr_err("Error creating HDCP 1.x handle\n");
-		goto hdcp1_handle_error;
-	}
-
-	sde_hdcp_1x_update_client_reg_set(hdcp);
-
-	INIT_DELAYED_WORK(&hdcp->hdcp_auth_work, sde_hdcp_1x_auth_work);
-
-	hdcp->hdcp_state = HDCP_STATE_INACTIVE;
-	init_completion(&hdcp->r0_checked);
-	init_completion(&hdcp->sink_r0_available);
-
-	pr_debug("HDCP module initialized. HDCP_STATE=%s\n",
-		SDE_HDCP_STATE_NAME);
-
-	return (void *)hdcp;
-hdcp1_handle_error:
-	destroy_workqueue(hdcp->workq);
-workqueue_error:
-	kfree(hdcp);
-error:
-	return NULL;
-} /* hdcp_1x_init */
-
-struct sde_hdcp_ops *sde_hdcp_1x_get(void *input)
-{
-	return ((struct sde_hdcp_1x *)input)->ops;
-}
diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.c b/drivers/gpu/drm/msm/sde_hdcp_2x.c
deleted file mode 100644
index f578e09..0000000
--- a/drivers/gpu/drm/msm/sde_hdcp_2x.c
+++ /dev/null
@@ -1,1032 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[sde-hdcp-2x] %s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/sched.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/kthread.h>
-#include <linux/kfifo.h>
-
-#include "sde_hdcp_2x.h"
-
-/* all message IDs */
-#define INVALID_MESSAGE        0
-#define AKE_INIT               2
-#define AKE_SEND_CERT          3
-#define AKE_NO_STORED_KM       4
-#define AKE_STORED_KM          5
-#define AKE_SEND_H_PRIME       7
-#define AKE_SEND_PAIRING_INFO  8
-#define LC_INIT                9
-#define LC_SEND_L_PRIME       10
-#define SKE_SEND_EKS          11
-#define REP_SEND_RECV_ID_LIST 12
-#define REP_SEND_ACK          15
-#define REP_STREAM_MANAGE     16
-#define REP_STREAM_READY      17
-#define SKE_SEND_TYPE_ID      18
-#define HDCP2P2_MAX_MESSAGES  19
-
-#define REAUTH_REQ BIT(3)
-#define LINK_INTEGRITY_FAILURE BIT(4)
-
-struct sde_hdcp_2x_ctrl {
-	DECLARE_KFIFO(cmd_q, enum sde_hdcp_2x_wakeup_cmd, 8);
-	wait_queue_head_t wait_q;
-	struct hdcp2_app_data app_data;
-	u32 timeout_left;
-	u32 wait_timeout_ms;
-	u32 total_message_length;
-	bool no_stored_km;
-	bool feature_supported;
-	bool force_encryption;
-	bool authenticated;
-	bool resend_lc_init;
-	bool resend_stream_manage;
-	void *client_data;
-	void *hdcp2_ctx;
-	struct hdcp_transport_ops *client_ops;
-	bool repeater_flag;
-	bool update_stream;
-	int last_msg;
-	atomic_t hdcp_off;
-	enum sde_hdcp_2x_device_type device_type;
-	u8 min_enc_level;
-	struct list_head stream_handles;
-	u8 stream_count;
-	struct stream_info *streams;
-	u8 num_streams;
-
-	struct task_struct *thread;
-	struct completion response_completion;
-};
-
-static void sde_hdcp_2x_clean(struct sde_hdcp_2x_ctrl *hdcp);
-
-static const char *sde_hdcp_2x_message_name(int msg_id)
-{
-	switch (msg_id) {
-	case INVALID_MESSAGE:       return TO_STR(INVALID_MESSAGE);
-	case AKE_INIT:              return TO_STR(AKE_INIT);
-	case AKE_SEND_CERT:         return TO_STR(AKE_SEND_CERT);
-	case AKE_NO_STORED_KM:      return TO_STR(AKE_NO_STORED_KM);
-	case AKE_STORED_KM:         return TO_STR(AKE_STORED_KM);
-	case AKE_SEND_H_PRIME:      return TO_STR(AKE_SEND_H_PRIME);
-	case AKE_SEND_PAIRING_INFO: return TO_STR(AKE_SEND_PAIRING_INFO);
-	case LC_INIT:               return TO_STR(LC_INIT);
-	case LC_SEND_L_PRIME:       return TO_STR(LC_SEND_L_PRIME);
-	case SKE_SEND_EKS:          return TO_STR(SKE_SEND_EKS);
-	case REP_SEND_RECV_ID_LIST: return TO_STR(REP_SEND_RECV_ID_LIST);
-	case REP_STREAM_MANAGE:     return TO_STR(REP_STREAM_MANAGE);
-	case REP_STREAM_READY:      return TO_STR(REP_STREAM_READY);
-	case SKE_SEND_TYPE_ID:      return TO_STR(SKE_SEND_TYPE_ID);
-	default:
-		return "UNKNOWN";
-	}
-}
-
-static const struct sde_hdcp_2x_msg_data
-				hdcp_msg_lookup[HDCP2P2_MAX_MESSAGES] = {
-	[AKE_INIT] = { 2,
-		{ {"rtx", 0x69000, 8}, {"TxCaps", 0x69008, 3} },
-		0 },
-	[AKE_SEND_CERT] = { 3,
-		{ {"cert-rx", 0x6900B, 522}, {"rrx", 0x69215, 8},
-			{"RxCaps", 0x6921D, 3} },
-		0 },
-	[AKE_NO_STORED_KM] = { 1,
-		{ {"Ekpub_km", 0x69220, 128} },
-		0 },
-	[AKE_STORED_KM] = { 2,
-		{ {"Ekh_km", 0x692A0, 16}, {"m", 0x692B0, 16} },
-		0 },
-	[AKE_SEND_H_PRIME] = { 1,
-		{ {"H'", 0x692C0, 32} },
-		(1 << 1) },
-	[AKE_SEND_PAIRING_INFO] =  { 1,
-		{ {"Ekh_km", 0x692E0, 16} },
-		(1 << 2) },
-	[LC_INIT] = { 1,
-		{ {"rn", 0x692F0, 8} },
-		0 },
-	[LC_SEND_L_PRIME] = { 1,
-		{ {"L'", 0x692F8, 32} },
-		0 },
-	[SKE_SEND_EKS] = { 2,
-		{ {"Edkey_ks", 0x69318, 16}, {"riv", 0x69328, 8} },
-		0 },
-	[SKE_SEND_TYPE_ID] = { 1,
-		{ {"type", 0x69494, 1} },
-		0 },
-	[REP_SEND_RECV_ID_LIST] = { 4,
-		{ {"RxInfo", 0x69330, 2}, {"seq_num_V", 0x69332, 3},
-			{"V'", 0x69335, 16}, {"ridlist", 0x69345, 155} },
-		(1 << 0) },
-	[REP_SEND_ACK] = { 1,
-		{ {"V", 0x693E0, 16} },
-		0 },
-	[REP_STREAM_MANAGE] = { 3,
-		{ {"seq_num_M", 0x693F0, 3}, {"k", 0x693F3, 2},
-			{"streamID_Type", 0x693F5, 126} },
-		0 },
-	[REP_STREAM_READY] = { 1,
-		{ {"M'", 0x69473, 32} },
-		0 },
-};
-
-static int sde_hdcp_2x_get_next_message(struct sde_hdcp_2x_ctrl *hdcp,
-				     struct hdcp_transport_wakeup_data *data)
-{
-	switch (hdcp->last_msg) {
-	case INVALID_MESSAGE:
-		return AKE_INIT;
-	case AKE_INIT:
-		return AKE_SEND_CERT;
-	case AKE_SEND_CERT:
-		if (hdcp->no_stored_km)
-			return AKE_NO_STORED_KM;
-		else
-			return AKE_STORED_KM;
-	case AKE_STORED_KM:
-	case AKE_NO_STORED_KM:
-		return AKE_SEND_H_PRIME;
-	case AKE_SEND_H_PRIME:
-		if (hdcp->no_stored_km)
-			return AKE_SEND_PAIRING_INFO;
-		else
-			return LC_INIT;
-	case AKE_SEND_PAIRING_INFO:
-		return LC_INIT;
-	case LC_INIT:
-		return LC_SEND_L_PRIME;
-	case LC_SEND_L_PRIME:
-		if (hdcp->resend_lc_init)
-			return LC_INIT;
-		else
-			return SKE_SEND_EKS;
-	case SKE_SEND_EKS:
-		if (!hdcp->repeater_flag)
-			return SKE_SEND_TYPE_ID;
-	case SKE_SEND_TYPE_ID:
-		if (!hdcp->repeater_flag)
-			return SKE_SEND_TYPE_ID;
-	case REP_STREAM_READY:
-	case REP_SEND_ACK:
-		if (!hdcp->repeater_flag)
-			return INVALID_MESSAGE;
-
-		if (data->cmd == HDCP_TRANSPORT_CMD_SEND_MESSAGE)
-			return REP_STREAM_MANAGE;
-		else
-			return REP_SEND_RECV_ID_LIST;
-	case REP_SEND_RECV_ID_LIST:
-		return REP_SEND_ACK;
-	case REP_STREAM_MANAGE:
-		if (hdcp->resend_stream_manage)
-			return REP_STREAM_MANAGE;
-		else
-			return REP_STREAM_READY;
-	default:
-		pr_err("Unknown message ID (%d)\n", hdcp->last_msg);
-		return -EINVAL;
-	}
-}
-
-static void sde_hdcp_2x_wait_for_response(struct sde_hdcp_2x_ctrl *hdcp)
-{
-	u32 timeout;
-
-	switch (hdcp->last_msg) {
-	case AKE_SEND_H_PRIME:
-		if (hdcp->no_stored_km)
-			hdcp->wait_timeout_ms = HZ;
-		else
-			hdcp->wait_timeout_ms = HZ / 4;
-		break;
-	case AKE_SEND_PAIRING_INFO:
-		hdcp->wait_timeout_ms = HZ / 4;
-		break;
-	case REP_SEND_RECV_ID_LIST:
-		if (!hdcp->authenticated)
-			hdcp->wait_timeout_ms = HZ * 3;
-		else
-			hdcp->wait_timeout_ms = 0;
-		break;
-	default:
-		hdcp->wait_timeout_ms = 0;
-	}
-
-	if (!hdcp->wait_timeout_ms)
-		return;
-
-	if (atomic_read(&hdcp->hdcp_off)) {
-		pr_debug("invalid state: hdcp off\n");
-		return;
-	}
-
-	reinit_completion(&hdcp->response_completion);
-	timeout = wait_for_completion_timeout(&hdcp->response_completion,
-			hdcp->wait_timeout_ms);
-	if (!timeout) {
-		pr_err("completion expired, last message = %s\n",
-				sde_hdcp_2x_message_name(hdcp->last_msg));
-
-		if (!atomic_read(&hdcp->hdcp_off))
-			sde_hdcp_2x_clean(hdcp);
-	}
-
-	hdcp->wait_timeout_ms = 0;
-}
-
-static void sde_hdcp_2x_wakeup_client(struct sde_hdcp_2x_ctrl *hdcp,
-				struct hdcp_transport_wakeup_data *data)
-{
-	int rc = 0;
-
-	if (!hdcp || !hdcp->client_ops || !hdcp->client_ops->wakeup ||
-			!data || (data->cmd == HDCP_TRANSPORT_CMD_INVALID))
-		return;
-
-	data->abort_mask = REAUTH_REQ | LINK_INTEGRITY_FAILURE;
-
-	if (data->cmd == HDCP_TRANSPORT_CMD_SEND_MESSAGE ||
-			data->cmd == HDCP_TRANSPORT_CMD_RECV_MESSAGE ||
-			data->cmd == HDCP_TRANSPORT_CMD_LINK_POLL) {
-		hdcp->last_msg =
-			sde_hdcp_2x_get_next_message(hdcp, data);
-		if (hdcp->last_msg <= INVALID_MESSAGE) {
-			hdcp->last_msg = INVALID_MESSAGE;
-			return;
-		}
-
-		data->message_data = &hdcp_msg_lookup[hdcp->last_msg];
-	}
-
-	rc = hdcp->client_ops->wakeup(data);
-	if (rc)
-		pr_err("error sending %s to client\n",
-				hdcp_transport_cmd_to_str(data->cmd));
-
-	sde_hdcp_2x_wait_for_response(hdcp);
-}
-
-static inline void sde_hdcp_2x_send_message(struct sde_hdcp_2x_ctrl *hdcp)
-{
-	struct hdcp_transport_wakeup_data cdata = {
-					HDCP_TRANSPORT_CMD_SEND_MESSAGE };
-
-	cdata.context = hdcp->client_data;
-	cdata.timeout = hdcp->app_data.timeout;
-	cdata.buf_len = hdcp->app_data.response.length;
-
-	/* ignore the first byte as it contains the message id */
-	cdata.buf = hdcp->app_data.response.data + 1;
-
-	sde_hdcp_2x_wakeup_client(hdcp, &cdata);
-}
-
-static bool sde_hdcp_2x_client_feature_supported(void *data)
-{
-	struct sde_hdcp_2x_ctrl *hdcp = data;
-
-	return hdcp2_feature_supported(hdcp->hdcp2_ctx);
-}
-
-static void sde_hdcp_2x_force_encryption(void *data, bool enable)
-{
-	struct sde_hdcp_2x_ctrl *hdcp = data;
-
-	if (!hdcp) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	hdcp->force_encryption = enable;
-	pr_info("force_encryption=%d\n", hdcp->force_encryption);
-}
-
-static void sde_hdcp_2x_clean(struct sde_hdcp_2x_ctrl *hdcp)
-{
-	struct list_head *element;
-	struct sde_hdcp_stream *stream_entry;
-	struct hdcp_transport_wakeup_data cdata = {HDCP_TRANSPORT_CMD_INVALID};
-
-	hdcp->authenticated = false;
-
-	cdata.context = hdcp->client_data;
-	cdata.cmd = HDCP_TRANSPORT_CMD_STATUS_FAILED;
-
-	while (!list_empty(&hdcp->stream_handles)) {
-		element = hdcp->stream_handles.next;
-		list_del(element);
-
-		stream_entry = list_entry(element, struct sde_hdcp_stream,
-			list);
-		hdcp2_close_stream(hdcp->hdcp2_ctx,
-			stream_entry->stream_handle);
-		kzfree(stream_entry);
-		hdcp->stream_count--;
-	}
-
-	if (!atomic_xchg(&hdcp->hdcp_off, 1))
-		sde_hdcp_2x_wakeup_client(hdcp, &cdata);
-
-	hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_STOP, &hdcp->app_data);
-}
-
-static u8 sde_hdcp_2x_stream_type(u8 min_enc_level)
-{
-	u8 stream_type = 0;
-
-	switch (min_enc_level) {
-	case 0:
-	case 1:
-		stream_type = 0;
-		break;
-	case 2:
-		stream_type = 1;
-		break;
-	default:
-		stream_type = 0;
-		break;
-	}
-
-	pr_debug("min_enc_level = %u, type = %u\n", min_enc_level, stream_type);
-
-	return stream_type;
-}
-
-static void sde_hdcp_2x_send_type(struct sde_hdcp_2x_ctrl *hdcp)
-{
-	if (atomic_read(&hdcp->hdcp_off)) {
-		pr_debug("invalid state, hdcp off\n");
-		return;
-	}
-
-	if (hdcp->repeater_flag) {
-		pr_debug("invalid state, not receiver\n");
-		return;
-	}
-
-	hdcp->app_data.response.data[0] = SKE_SEND_TYPE_ID;
-	hdcp->app_data.response.data[1] =
-		sde_hdcp_2x_stream_type(hdcp->min_enc_level);
-	hdcp->app_data.response.length = 1;
-	hdcp->app_data.timeout = 100;
-
-	if (!atomic_read(&hdcp->hdcp_off))
-		sde_hdcp_2x_send_message(hdcp);
-}
-
-static void sde_hdcp_2x_query_stream(struct sde_hdcp_2x_ctrl *hdcp)
-{
-	int rc = 0;
-
-	if (atomic_read(&hdcp->hdcp_off)) {
-		pr_debug("invalid state, hdcp off\n");
-		return;
-	}
-
-	if (!hdcp->repeater_flag) {
-		pr_debug("invalid state, not a repeater\n");
-		return;
-	}
-
-	rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_QUERY_STREAM,
-			&hdcp->app_data);
-	if (rc)
-		goto exit;
-
-	if (!hdcp->app_data.response.data || !hdcp->app_data.request.data) {
-		pr_err("invalid response/request buffers\n");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	pr_debug("[tz]: %s\n", sde_hdcp_2x_message_name(
-		hdcp->app_data.response.data[0]));
-exit:
-	if (!rc && !atomic_read(&hdcp->hdcp_off))
-		sde_hdcp_2x_send_message(hdcp);
-}
-
-static void sde_hdcp_2x_initialize_command(struct sde_hdcp_2x_ctrl *hdcp,
-		enum hdcp_transport_wakeup_cmd cmd,
-		struct hdcp_transport_wakeup_data *cdata)
-{
-		cdata->cmd = cmd;
-		cdata->timeout = hdcp->timeout_left;
-		cdata->buf = hdcp->app_data.request.data + 1;
-}
-
-static void sde_hdcp_2x_msg_sent(struct sde_hdcp_2x_ctrl *hdcp)
-{
-	struct hdcp_transport_wakeup_data cdata = {
-		HDCP_TRANSPORT_CMD_INVALID,
-		hdcp->client_data};
-
-	switch (hdcp->app_data.response.data[0]) {
-	case SKE_SEND_TYPE_ID:
-		if (!hdcp2_app_comm(hdcp->hdcp2_ctx,
-				HDCP2_CMD_EN_ENCRYPTION, &hdcp->app_data)) {
-			hdcp->authenticated = true;
-
-			if (hdcp->force_encryption)
-				hdcp2_force_encryption(hdcp->hdcp2_ctx, 1);
-
-			cdata.cmd = HDCP_TRANSPORT_CMD_STATUS_SUCCESS;
-			sde_hdcp_2x_wakeup_client(hdcp, &cdata);
-		}
-
-		/* poll for link check */
-		sde_hdcp_2x_initialize_command(hdcp,
-				HDCP_TRANSPORT_CMD_LINK_POLL, &cdata);
-		break;
-	case SKE_SEND_EKS:
-		if (hdcp->repeater_flag && !atomic_read(&hdcp->hdcp_off)) {
-			/* poll for link check */
-			sde_hdcp_2x_initialize_command(hdcp,
-					HDCP_TRANSPORT_CMD_LINK_POLL, &cdata);
-		} else {
-			hdcp->app_data.response.data[0] = SKE_SEND_TYPE_ID;
-			hdcp->app_data.response.data[1] =
-				sde_hdcp_2x_stream_type(hdcp->min_enc_level);
-			hdcp->app_data.response.length = 1;
-			hdcp->app_data.timeout = 100;
-
-			sde_hdcp_2x_send_message(hdcp);
-		}
-		break;
-	case REP_SEND_ACK:
-		pr_debug("Repeater authentication successful. update_stream=%d\n",
-				hdcp->update_stream);
-
-		if (hdcp->update_stream) {
-			sde_hdcp_2x_query_stream(hdcp);
-			hdcp->update_stream = false;
-		} else {
-			sde_hdcp_2x_initialize_command(hdcp,
-					HDCP_TRANSPORT_CMD_LINK_POLL, &cdata);
-		}
-		break;
-	default:
-		cdata.cmd = HDCP_TRANSPORT_CMD_RECV_MESSAGE;
-		cdata.timeout = hdcp->timeout_left;
-		cdata.buf = hdcp->app_data.request.data + 1;
-	}
-
-	sde_hdcp_2x_wakeup_client(hdcp, &cdata);
-}
-
-static void sde_hdcp_2x_init(struct sde_hdcp_2x_ctrl *hdcp)
-{
-	int rc;
-	rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_START, &hdcp->app_data);
-	if (rc)
-		sde_hdcp_2x_clean(hdcp);
-}
-
-static void sde_hdcp_2x_start_auth(struct sde_hdcp_2x_ctrl *hdcp)
-{
-	int rc;
-
-	rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_START_AUTH,
-		&hdcp->app_data);
-	if (rc) {
-		sde_hdcp_2x_clean(hdcp);
-		return;
-	}
-
-	pr_debug("message received from TZ: %s\n",
-		 sde_hdcp_2x_message_name(hdcp->app_data.response.data[0]));
-
-	sde_hdcp_2x_send_message(hdcp);
-}
-
-static void sde_hdcp_2x_timeout(struct sde_hdcp_2x_ctrl *hdcp)
-{
-	int rc = 0;
-	int message_id;
-
-	if (atomic_read(&hdcp->hdcp_off)) {
-		pr_debug("invalid state, hdcp off\n");
-		return;
-	}
-
-	rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_TIMEOUT,
-			&hdcp->app_data);
-	if (rc)
-		goto error;
-
-	message_id = (int)hdcp->app_data.response.data[0];
-	if (message_id == LC_INIT && !atomic_read(&hdcp->hdcp_off))
-		sde_hdcp_2x_send_message(hdcp);
-	return;
-error:
-	if (!atomic_read(&hdcp->hdcp_off))
-		sde_hdcp_2x_clean(hdcp);
-}
-
-static void sde_hdcp_2x_msg_recvd(struct sde_hdcp_2x_ctrl *hdcp)
-{
-	int rc = 0;
-	char *msg = NULL;
-	u32 message_id_bytes = 0;
-	u32 request_length, out_msg;
-	struct hdcp_transport_wakeup_data cdata = {HDCP_TRANSPORT_CMD_INVALID};
-
-	if (atomic_read(&hdcp->hdcp_off)) {
-		pr_debug("invalid state, hdcp off\n");
-		return;
-	}
-
-	cdata.context = hdcp->client_data;
-
-	request_length = hdcp->total_message_length;
-	msg = hdcp->app_data.request.data;
-
-	if (request_length == 0) {
-		pr_err("invalid message length\n");
-		goto exit;
-	}
-
-	if (hdcp->device_type == HDCP_TXMTR_DP ||
-			hdcp->device_type == HDCP_TXMTR_DP_MST) {
-		msg[0] = hdcp->last_msg;
-		message_id_bytes = 1;
-	}
-
-	request_length += message_id_bytes;
-
-	pr_debug("[sink]: %s\n", sde_hdcp_2x_message_name(msg[0]));
-
-	hdcp->app_data.request.length = request_length;
-	rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_PROCESS_MSG,
-			&hdcp->app_data);
-	if (rc) {
-		pr_err("failed to process sink's response to %s (%d)\n",
-				sde_hdcp_2x_message_name(msg[0]), rc);
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	if (msg[0] == AKE_SEND_H_PRIME && hdcp->no_stored_km) {
-		cdata.cmd = HDCP_TRANSPORT_CMD_RECV_MESSAGE;
-		cdata.timeout = hdcp->app_data.timeout;
-		cdata.buf = hdcp->app_data.request.data + 1;
-		goto exit;
-	}
-
-	out_msg = (u32)hdcp->app_data.response.data[0];
-
-	pr_debug("[tz]: %s\n", sde_hdcp_2x_message_name(out_msg));
-
-	if (msg[0] == REP_STREAM_READY && out_msg != REP_STREAM_MANAGE) {
-		if (!hdcp->authenticated) {
-			rc = hdcp2_app_comm(hdcp->hdcp2_ctx,
-					HDCP2_CMD_EN_ENCRYPTION,
-					&hdcp->app_data);
-			if (!rc) {
-				hdcp->authenticated = true;
-
-				if (hdcp->force_encryption)
-					hdcp2_force_encryption(
-							hdcp->hdcp2_ctx, 1);
-
-				cdata.cmd = HDCP_TRANSPORT_CMD_STATUS_SUCCESS;
-				sde_hdcp_2x_wakeup_client(hdcp, &cdata);
-			} else {
-				pr_err("failed to enable encryption (%d)\n",
-						rc);
-			}
-		}
-
-		sde_hdcp_2x_initialize_command(hdcp,
-				HDCP_TRANSPORT_CMD_LINK_POLL, &cdata);
-		goto exit;
-	}
-
-	hdcp->resend_lc_init = false;
-	if (msg[0] == LC_SEND_L_PRIME && out_msg == LC_INIT)
-		hdcp->resend_lc_init = true;
-
-	hdcp->resend_stream_manage = false;
-	if (msg[0] == REP_STREAM_READY && out_msg == REP_STREAM_MANAGE)
-		hdcp->resend_stream_manage = true;
-
-	if (out_msg == AKE_NO_STORED_KM)
-		hdcp->no_stored_km = true;
-	else
-		hdcp->no_stored_km = false;
-
-	if (out_msg == SKE_SEND_EKS) {
-		hdcp->repeater_flag = hdcp->app_data.repeater_flag;
-		hdcp->update_stream = true;
-	}
-
-	if (!atomic_read(&hdcp->hdcp_off)) {
-		cdata.cmd = HDCP_TRANSPORT_CMD_SEND_MESSAGE;
-		cdata.buf = hdcp->app_data.response.data + 1;
-		cdata.buf_len = hdcp->app_data.response.length;
-		cdata.timeout = hdcp->app_data.timeout;
-	}
-exit:
-	sde_hdcp_2x_wakeup_client(hdcp, &cdata);
-
-	if (rc && !atomic_read(&hdcp->hdcp_off))
-		sde_hdcp_2x_clean(hdcp);
-}
-
-static struct list_head *sde_hdcp_2x_stream_present(
-		struct sde_hdcp_2x_ctrl *hdcp, u8 stream_id, u8 virtual_channel)
-{
-	struct sde_hdcp_stream *stream_entry;
-	struct list_head *entry;
-	bool present = false;
-
-	list_for_each(entry, &hdcp->stream_handles) {
-		stream_entry = list_entry(entry,
-			struct sde_hdcp_stream, list);
-		if (stream_entry->virtual_channel == virtual_channel &&
-				stream_entry->stream_id == stream_id) {
-			present = true;
-			break;
-		}
-	}
-
-	if (!present)
-		entry = NULL;
-	return entry;
-}
-
-static void sde_hdcp_2x_open_stream(struct sde_hdcp_2x_ctrl *hdcp)
-{
-	int rc;
-	size_t iterations, i;
-	u8 stream_id;
-	u8 virtual_channel;
-	u32 stream_handle = 0;
-	bool query_streams = false;
-
-	if (!hdcp->streams) {
-		pr_err("Array of streams to register is NULL\n");
-		return;
-	}
-
-	iterations = min(hdcp->num_streams, (u8)(MAX_STREAM_COUNT));
-
-	for (i  = 0; i < iterations; i++) {
-		if (hdcp->stream_count == MAX_STREAM_COUNT) {
-			pr_debug("Registered the maximum amount of streams\n");
-			break;
-		}
-
-		stream_id = hdcp->streams[i].stream_id;
-		virtual_channel = hdcp->streams[i].virtual_channel;
-
-		pr_debug("Opening stream %d, virtual channel %d\n",
-			stream_id, virtual_channel);
-
-		if (sde_hdcp_2x_stream_present(hdcp, stream_id,
-				virtual_channel)) {
-			pr_debug("Stream %d, virtual channel %d already open\n",
-				stream_id, virtual_channel);
-			continue;
-		}
-
-		rc = hdcp2_open_stream(hdcp->hdcp2_ctx, virtual_channel,
-				stream_id, &stream_handle);
-		if (rc) {
-			pr_err("Unable to open stream %d, virtual channel %d\n",
-				stream_id, virtual_channel);
-		} else {
-			struct sde_hdcp_stream *stream =
-				kzalloc(sizeof(struct sde_hdcp_stream),
-					GFP_KERNEL);
-			if (!stream)
-				break;
-
-			INIT_LIST_HEAD(&stream->list);
-			stream->stream_handle = stream_handle;
-			stream->stream_id = stream_id;
-			stream->virtual_channel = virtual_channel;
-
-			list_add(&stream->list, &hdcp->stream_handles);
-			hdcp->stream_count++;
-
-			query_streams = true;
-		}
-	}
-
-	if (query_streams && hdcp->authenticated)
-		sde_hdcp_2x_query_stream(hdcp);
-}
-
-static void sde_hdcp_2x_close_stream(struct sde_hdcp_2x_ctrl *hdcp)
-{
-	int rc;
-	size_t iterations, i;
-	u8 stream_id;
-	u8 virtual_channel;
-	struct list_head *entry;
-	struct sde_hdcp_stream *stream_entry;
-	bool query_streams = false;
-
-	if (!hdcp->streams) {
-		pr_err("Array of streams to register is NULL\n");
-		return;
-	}
-
-	iterations = min(hdcp->num_streams, (u8)(MAX_STREAM_COUNT));
-
-	for (i = 0; i < iterations; i++) {
-		if (hdcp->stream_count == 0) {
-			pr_debug("No streams are currently registered\n");
-			return;
-		}
-
-		stream_id = hdcp->streams[i].stream_id;
-		virtual_channel = hdcp->streams[i].virtual_channel;
-
-		pr_debug("Closing stream %d, virtual channel %d\n",
-			stream_id, virtual_channel);
-
-		entry = sde_hdcp_2x_stream_present(hdcp, stream_id,
-			virtual_channel);
-
-		if (!entry) {
-			pr_err("Unable to find stream %d, virtual channel %d\n"
-				, stream_id, virtual_channel);
-			continue;
-		}
-
-		stream_entry = list_entry(entry, struct sde_hdcp_stream,
-			list);
-
-		rc = hdcp2_close_stream(hdcp->hdcp2_ctx,
-			stream_entry->stream_handle);
-		if (rc)
-			pr_err("Unable to close stream %d, virtual channel %d\n"
-				, stream_id, virtual_channel);
-		hdcp->stream_count--;
-		list_del(entry);
-		kzfree(stream_entry);
-		query_streams = true;
-	}
-
-	if (query_streams && hdcp->authenticated)
-		sde_hdcp_2x_query_stream(hdcp);
-}
-
-/** sde_hdcp_2x_wakeup() - wakeup the module to execute a requested command
- * @data: data required for executing corresponding command.
- *
- * This function is executed on caller's thread. Update the local data
- * and wakeup the local thread to execute the command. Once the local
- * thread is activated, caller's thread is returned and this function
- * is ready to receive next command.
- */
-static int sde_hdcp_2x_wakeup(struct sde_hdcp_2x_wakeup_data *data)
-{
-	struct sde_hdcp_2x_ctrl *hdcp;
-	int rc = 0;
-
-	if (!data)
-		return -EINVAL;
-
-	hdcp = data->context;
-	if (!hdcp)
-		return -EINVAL;
-
-	hdcp->timeout_left = data->timeout;
-	hdcp->total_message_length = data->total_message_length;
-	hdcp->min_enc_level = data->min_enc_level;
-	hdcp->streams = data->streams;
-	hdcp->num_streams = data->num_streams;
-
-	if (!completion_done(&hdcp->response_completion))
-		complete_all(&hdcp->response_completion);
-
-	kfifo_put(&hdcp->cmd_q, data->cmd);
-
-	switch (data->cmd) {
-	case HDCP_2X_CMD_STOP:
-		atomic_set(&hdcp->hdcp_off, 1);
-
-		kthread_park(hdcp->thread);
-		break;
-	case HDCP_2X_CMD_START:
-		hdcp->no_stored_km = false;
-		hdcp->repeater_flag = false;
-		hdcp->update_stream = false;
-		hdcp->authenticated = false;
-		hdcp->last_msg = INVALID_MESSAGE;
-		hdcp->timeout_left = 0;
-		atomic_set(&hdcp->hdcp_off, 0);
-
-		kthread_unpark(hdcp->thread);
-		wake_up(&hdcp->wait_q);
-		break;
-	default:
-		wake_up(&hdcp->wait_q);
-		break;
-	}
-
-	return rc;
-}
-
-static int sde_hdcp_2x_main(void *data)
-{
-	struct sde_hdcp_2x_ctrl *hdcp = data;
-	enum sde_hdcp_2x_wakeup_cmd cmd;
-
-	while (1) {
-		wait_event(hdcp->wait_q,
-			!kfifo_is_empty(&hdcp->cmd_q) ||
-			kthread_should_stop() ||
-			kthread_should_park());
-
-		if (kthread_should_stop())
-			break;
-
-		if (kfifo_is_empty(&hdcp->cmd_q) && kthread_should_park()) {
-			kthread_parkme();
-			continue;
-		}
-
-		if (!kfifo_get(&hdcp->cmd_q, &cmd))
-			continue;
-
-		switch (cmd) {
-		case HDCP_2X_CMD_START:
-			sde_hdcp_2x_init(hdcp);
-			break;
-		case HDCP_2X_CMD_STOP:
-			sde_hdcp_2x_clean(hdcp);
-			break;
-		case HDCP_2X_CMD_START_AUTH:
-			sde_hdcp_2x_start_auth(hdcp);
-			break;
-		case HDCP_2X_CMD_MSG_SEND_SUCCESS:
-			sde_hdcp_2x_msg_sent(hdcp);
-			break;
-		case HDCP_2X_CMD_MSG_SEND_FAILED:
-		case HDCP_2X_CMD_MSG_RECV_FAILED:
-		case HDCP_2X_CMD_LINK_FAILED:
-			sde_hdcp_2x_clean(hdcp);
-			break;
-		case HDCP_2X_CMD_MSG_RECV_SUCCESS:
-			sde_hdcp_2x_msg_recvd(hdcp);
-			break;
-		case HDCP_2X_CMD_MSG_RECV_TIMEOUT:
-			sde_hdcp_2x_timeout(hdcp);
-			break;
-		case HDCP_2X_CMD_QUERY_STREAM_TYPE:
-			sde_hdcp_2x_query_stream(hdcp);
-			break;
-		case HDCP_2X_CMD_MIN_ENC_LEVEL:
-			if (!hdcp->repeater_flag) {
-				sde_hdcp_2x_send_type(hdcp);
-				break;
-			}
-			sde_hdcp_2x_query_stream(hdcp);
-			break;
-		case HDCP_2X_CMD_OPEN_STREAMS:
-			sde_hdcp_2x_open_stream(hdcp);
-			break;
-		case HDCP_2X_CMD_CLOSE_STREAMS:
-			sde_hdcp_2x_close_stream(hdcp);
-			break;
-		default:
-			break;
-		}
-	}
-
-	return 0;
-}
-
-int sde_hdcp_2x_register(struct sde_hdcp_2x_register_data *data)
-{
-	int rc = 0;
-	struct sde_hdcp_2x_ctrl *hdcp = NULL;
-
-	if (!data) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	if (!data->ops) {
-		pr_err("invalid input: txmtr context\n");
-		return -EINVAL;
-	}
-
-	if (!data->client_ops) {
-		pr_err("invalid input: client_ops\n");
-		return -EINVAL;
-	}
-
-	if (!data->hdcp_data) {
-		pr_err("invalid input: hdcp_data\n");
-		return -EINVAL;
-	}
-
-	/* populate ops to be called by client */
-	data->ops->feature_supported = sde_hdcp_2x_client_feature_supported;
-	data->ops->wakeup = sde_hdcp_2x_wakeup;
-	data->ops->force_encryption = sde_hdcp_2x_force_encryption;
-
-	hdcp = kzalloc(sizeof(*hdcp), GFP_KERNEL);
-	if (!hdcp) {
-		rc = -ENOMEM;
-		goto unlock;
-	}
-
-	INIT_LIST_HEAD(&hdcp->stream_handles);
-	hdcp->client_data = data->client_data;
-	hdcp->client_ops = data->client_ops;
-
-	INIT_KFIFO(hdcp->cmd_q);
-
-	init_waitqueue_head(&hdcp->wait_q);
-	atomic_set(&hdcp->hdcp_off, 1);
-
-	init_completion(&hdcp->response_completion);
-
-	*data->hdcp_data = hdcp;
-
-	hdcp->thread = kthread_run(sde_hdcp_2x_main, hdcp, "hdcp_2x");
-
-	if (IS_ERR(hdcp->thread)) {
-		pr_err("unable to start lib thread\n");
-		rc = PTR_ERR(hdcp->thread);
-		hdcp->thread = NULL;
-		goto error;
-	}
-
-	hdcp->force_encryption = false;
-
-	return 0;
-error:
-	kzfree(hdcp);
-	hdcp = NULL;
-unlock:
-	return rc;
-}
-
-int sde_hdcp_2x_enable(void *data, enum sde_hdcp_2x_device_type device_type)
-{
-	int rc =  0;
-	struct sde_hdcp_2x_ctrl *hdcp = data;
-
-	if (!hdcp)
-		return  -EINVAL;
-
-	if (hdcp->hdcp2_ctx) {
-		pr_debug("HDCP library context already acquired\n");
-		return 0;
-	}
-
-	hdcp->device_type = device_type;
-	hdcp->hdcp2_ctx = hdcp2_init(hdcp->device_type);
-	if (!hdcp->hdcp2_ctx) {
-		pr_err("Unable to acquire HDCP library handle\n");
-		return -ENOMEM;
-	}
-
-	return rc;
-}
-
-void sde_hdcp_2x_disable(void *data)
-{
-	struct sde_hdcp_2x_ctrl *hdcp = data;
-
-	if (!hdcp->hdcp2_ctx)
-		return;
-
-	hdcp2_deinit(hdcp->hdcp2_ctx);
-	hdcp->hdcp2_ctx = NULL;
-}
-
-void sde_hdcp_2x_deregister(void *data)
-{
-	struct sde_hdcp_2x_ctrl *hdcp = data;
-
-	if (!hdcp)
-		return;
-
-	sde_hdcp_2x_disable(data);
-	kthread_stop(hdcp->thread);
-	kzfree(hdcp);
-}
diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.h b/drivers/gpu/drm/msm/sde_hdcp_2x.h
deleted file mode 100644
index cfcd7ce..0000000
--- a/drivers/gpu/drm/msm/sde_hdcp_2x.h
+++ /dev/null
@@ -1,217 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_HDCP_2X_H__
-#define __SDE_HDCP_2X_H__
-
-#include "sde_hdcp.h"
-
-#define TO_STR(x) #x
-
-#define HDCP_MAX_MESSAGE_PARTS 4
-
-/**
- * enum sde_hdcp_2x_wakeup_cmd - commands for interacting with HDCP driver
- * @HDCP_2X_CMD_INVALID:           initialization value
- * @HDCP_2X_CMD_START:             start HDCP driver
- * @HDCP_2X_CMD_START_AUTH:        start authentication
- * @HDCP_2X_CMD_STOP:              stop HDCP driver
- * @HDCP_2X_CMD_MSG_SEND_SUCCESS:  sending message to sink succeeded
- * @HDCP_2X_CMD_MSG_SEND_FAILED:   sending message to sink failed
- * @HDCP_2X_CMD_MSG_SEND_TIMEOUT:  sending message to sink timed out
- * @HDCP_2X_CMD_MSG_RECV_SUCCESS:  receiving message from sink succeeded
- * @HDCP_2X_CMD_MSG_RECV_FAILED:   receiving message from sink failed
- * @HDCP_2X_CMD_MSG_RECV_TIMEOUT:  receiving message from sink timed out
- * @HDCP_2X_CMD_QUERY_STREAM_TYPE: start content stream processing
- * @HDCP_2X_CMD_LINK_FAILED:       link failure notification
- * @HDCP_2X_CMD_MIN_ENC_LEVEL:     trigger minimum encryption level change
- * @HDCP_2X_CMD_OPEN_STREAMS:       open a virtual channel
- * @HDCP_2X_CMD_CLOSE_STREAMS:      close a virtual channel
- */
-enum sde_hdcp_2x_wakeup_cmd {
-	HDCP_2X_CMD_INVALID,
-	HDCP_2X_CMD_START,
-	HDCP_2X_CMD_START_AUTH,
-	HDCP_2X_CMD_STOP,
-	HDCP_2X_CMD_MSG_SEND_SUCCESS,
-	HDCP_2X_CMD_MSG_SEND_FAILED,
-	HDCP_2X_CMD_MSG_SEND_TIMEOUT,
-	HDCP_2X_CMD_MSG_RECV_SUCCESS,
-	HDCP_2X_CMD_MSG_RECV_FAILED,
-	HDCP_2X_CMD_MSG_RECV_TIMEOUT,
-	HDCP_2X_CMD_QUERY_STREAM_TYPE,
-	HDCP_2X_CMD_LINK_FAILED,
-	HDCP_2X_CMD_MIN_ENC_LEVEL,
-	HDCP_2X_CMD_OPEN_STREAMS,
-	HDCP_2X_CMD_CLOSE_STREAMS,
-};
-
-/**
- * enum hdcp_transport_wakeup_cmd - commands to instruct display transport layer
- * @HDCP_TRANSPORT_CMD_INVALID:        initialization value
- * @HDCP_TRANSPORT_CMD_SEND_MESSAGE:   send message to sink
- * @HDCP_TRANSPORT_CMD_RECV_MESSAGE:   receive message from sink
- * @HDCP_TRANSPORT_CMD_STATUS_SUCCESS: successfully communicated with TrustZone
- * @HDCP_TRANSPORT_CMD_STATUS_FAILED:  failed to communicate with TrustZone
- * @HDCP_TRANSPORT_CMD_LINK_POLL:      poll the HDCP link
- * @HDCP_TRANSPORT_CMD_LINK_CHECK:     check link status in response to cp_irq
- * @HDCP_TRANSPORT_CMD_AUTHENTICATE:   start authentication
- */
-enum hdcp_transport_wakeup_cmd {
-	HDCP_TRANSPORT_CMD_INVALID,
-	HDCP_TRANSPORT_CMD_SEND_MESSAGE,
-	HDCP_TRANSPORT_CMD_RECV_MESSAGE,
-	HDCP_TRANSPORT_CMD_STATUS_SUCCESS,
-	HDCP_TRANSPORT_CMD_STATUS_FAILED,
-	HDCP_TRANSPORT_CMD_LINK_POLL,
-	HDCP_TRANSPORT_CMD_LINK_CHECK,
-	HDCP_TRANSPORT_CMD_AUTHENTICATE,
-};
-
-enum sde_hdcp_2x_device_type {
-	HDCP_TXMTR_HDMI = 0x8001,
-	HDCP_TXMTR_DP = 0x8002,
-	HDCP_TXMTR_DP_MST = 0x8003
-};
-
-/**
- * struct sde_hdcp_2x_lib_wakeup_data - command and data send to HDCP driver
- * @cmd:                       command type
- * @context:                   void pointer to the HDCP driver instance
- * @buf:                       message received from the sink
- * @buf_len:                   length of message received from the sink
- * @timeout:                   time out value for timed transactions
- * @streams:                   list indicating which streams need adjustment
- * @num_streams:               number of entries in streams
- */
-struct sde_hdcp_2x_wakeup_data {
-	enum sde_hdcp_2x_wakeup_cmd cmd;
-	void *context;
-	uint32_t total_message_length;
-	uint32_t timeout;
-	u8 min_enc_level;
-	struct stream_info *streams;
-	u8 num_streams;
-};
-
-/**
- * struct sde_hdcp_2x_msg_part - a single part of an HDCP 2.2 message
- * @name:       user readable message name
- * @offset:     message part offset
- * @length      message part length
- */
-struct sde_hdcp_2x_msg_part {
-	char *name;
-	uint32_t offset;
-	uint32_t length;
-};
-
-/**
- * struct sde_hdcp_2x_msg_data - HDCP 2.2 message containing one or more parts
- * @num_messages:   total number of parts in a full message
- * @messages:       array containing num_messages parts
- * @rx_status:      value of rx_status register
- */
-struct sde_hdcp_2x_msg_data {
-	uint32_t num_messages;
-	struct sde_hdcp_2x_msg_part messages[HDCP_MAX_MESSAGE_PARTS];
-	uint8_t rx_status;
-};
-
-/**
- * struct hdcp_transport_wakeup_data - data sent to display transport layer
- * @cmd:            command type
- * @context:        void pointer to the display transport layer
- * @send_msg_buf:   buffer containing message to be sent to sink
- * @send_msg_len:   length of the message to be sent to sink
- * @timeout:        timeout value for timed transactions
- * @abort_mask:     mask used to determine whether HDCP link is valid
- * @message_data:   a pointer to the message description
- */
-struct hdcp_transport_wakeup_data {
-	enum hdcp_transport_wakeup_cmd cmd;
-	void *context;
-	unsigned char *buf;
-	u32 buf_len;
-	u32 timeout;
-	u8 abort_mask;
-	const struct sde_hdcp_2x_msg_data *message_data;
-};
-
-static inline const char *sde_hdcp_2x_cmd_to_str(
-		enum sde_hdcp_2x_wakeup_cmd cmd)
-{
-	switch (cmd) {
-	case HDCP_2X_CMD_START:
-		return TO_STR(HDCP_2X_CMD_START);
-	case HDCP_2X_CMD_STOP:
-		return TO_STR(HDCP_2X_CMD_STOP);
-	case HDCP_2X_CMD_MSG_SEND_SUCCESS:
-		return TO_STR(HDCP_2X_CMD_MSG_SEND_SUCCESS);
-	case HDCP_2X_CMD_MSG_SEND_FAILED:
-		return TO_STR(HDCP_2X_CMD_MSG_SEND_FAILED);
-	case HDCP_2X_CMD_MSG_SEND_TIMEOUT:
-		return TO_STR(HDCP_2X_CMD_MSG_SEND_TIMEOUT);
-	case HDCP_2X_CMD_MSG_RECV_SUCCESS:
-		return TO_STR(HDCP_2X_CMD_MSG_RECV_SUCCESS);
-	case HDCP_2X_CMD_MSG_RECV_FAILED:
-		return TO_STR(HDCP_2X_CMD_MSG_RECV_FAILED);
-	case HDCP_2X_CMD_MSG_RECV_TIMEOUT:
-		return TO_STR(HDCP_2X_CMD_MSG_RECV_TIMEOUT);
-	case HDCP_2X_CMD_QUERY_STREAM_TYPE:
-		return TO_STR(HDCP_2X_CMD_QUERY_STREAM_TYPE);
-	case HDCP_2X_CMD_OPEN_STREAMS:
-		return TO_STR(HDCP_2X_CMD_OPEN_STREAMS);
-	case HDCP_2X_CMD_CLOSE_STREAMS:
-		return TO_STR(HDCP_2X_CMD_CLOSE_STREAMS);
-	default:
-		return "UNKNOWN";
-	}
-}
-
-static inline const char *hdcp_transport_cmd_to_str(
-		enum hdcp_transport_wakeup_cmd cmd)
-{
-	switch (cmd) {
-	case HDCP_TRANSPORT_CMD_SEND_MESSAGE:
-		return TO_STR(HDCP_TRANSPORT_CMD_SEND_MESSAGE);
-	case HDCP_TRANSPORT_CMD_RECV_MESSAGE:
-		return TO_STR(HDCP_TRANSPORT_CMD_RECV_MESSAGE);
-	case HDCP_TRANSPORT_CMD_STATUS_SUCCESS:
-		return TO_STR(HDCP_TRANSPORT_CMD_STATUS_SUCCESS);
-	case HDCP_TRANSPORT_CMD_STATUS_FAILED:
-		return TO_STR(HDCP_TRANSPORT_CMD_STATUS_FAILED);
-	case HDCP_TRANSPORT_CMD_LINK_POLL:
-		return TO_STR(HDCP_TRANSPORT_CMD_LINK_POLL);
-	case HDCP_TRANSPORT_CMD_AUTHENTICATE:
-		return TO_STR(HDCP_TRANSPORT_CMD_AUTHENTICATE);
-	default:
-		return "UNKNOWN";
-	}
-}
-
-struct sde_hdcp_2x_ops {
-	int (*wakeup)(struct sde_hdcp_2x_wakeup_data *data);
-	bool (*feature_supported)(void *data);
-	void (*force_encryption)(void *data, bool enable);
-};
-
-struct hdcp_transport_ops {
-	int (*wakeup)(struct hdcp_transport_wakeup_data *data);
-};
-
-struct sde_hdcp_2x_register_data {
-	struct hdcp_transport_ops *client_ops;
-	struct sde_hdcp_2x_ops *ops;
-	void *client_data;
-	void **hdcp_data;
-};
-
-/* functions for the HDCP 2.2 state machine module */
-int sde_hdcp_2x_register(struct sde_hdcp_2x_register_data *data);
-int sde_hdcp_2x_enable(void *data, enum sde_hdcp_2x_device_type device_type);
-void sde_hdcp_2x_disable(void *data);
-void sde_hdcp_2x_deregister(void *data);
-#endif
diff --git a/drivers/gpu/drm/msm/sde_io_util.c b/drivers/gpu/drm/msm/sde_io_util.c
deleted file mode 100644
index b8c2ccc..0000000
--- a/drivers/gpu/drm/msm/sde_io_util.c
+++ /dev/null
@@ -1,540 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2015, 2017-2019 The Linux Foundation. All rights reserved.
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/regulator/consumer.h>
-#include <linux/delay.h>
-#include <linux/sde_io_util.h>
-
-#define MAX_I2C_CMDS  16
-void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
-{
-	u32 in_val;
-
-	if (!io || !io->base) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return;
-	}
-
-	if (offset > io->len) {
-		DEV_ERR("%pS->%s: offset out of range\n",
-			__builtin_return_address(0), __func__);
-		return;
-	}
-
-	writel_relaxed(value, io->base + offset);
-	if (debug) {
-		in_val = readl_relaxed(io->base + offset);
-		DEV_DBG("[%08x] => %08x [%08x]\n",
-			(u32)(unsigned long)(io->base + offset),
-			value, in_val);
-	}
-} /* dss_reg_w */
-EXPORT_SYMBOL(dss_reg_w);
-
-u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug)
-{
-	u32 value;
-
-	if (!io || !io->base) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return -EINVAL;
-	}
-
-	if (offset > io->len) {
-		DEV_ERR("%pS->%s: offset out of range\n",
-			__builtin_return_address(0), __func__);
-		return -EINVAL;
-	}
-
-	value = readl_relaxed(io->base + offset);
-	if (debug)
-		DEV_DBG("[%08x] <= %08x\n",
-			(u32)(unsigned long)(io->base + offset), value);
-
-	return value;
-} /* dss_reg_r */
-EXPORT_SYMBOL(dss_reg_r);
-
-void dss_reg_dump(void __iomem *base, u32 length, const char *prefix,
-	u32 debug)
-{
-	if (debug)
-		print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
-			(void *)base, length, false);
-} /* dss_reg_dump */
-EXPORT_SYMBOL(dss_reg_dump);
-
-static struct resource *msm_dss_get_res_byname(struct platform_device *pdev,
-	unsigned int type, const char *name)
-{
-	struct resource *res = NULL;
-
-	res = platform_get_resource_byname(pdev, type, name);
-	if (!res)
-		DEV_ERR("%s: '%s' resource not found\n", __func__, name);
-
-	return res;
-} /* msm_dss_get_res_byname */
-EXPORT_SYMBOL(msm_dss_get_res_byname);
-
-int msm_dss_ioremap_byname(struct platform_device *pdev,
-	struct dss_io_data *io_data, const char *name)
-{
-	struct resource *res = NULL;
-
-	if (!pdev || !io_data) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return -EINVAL;
-	}
-
-	res = msm_dss_get_res_byname(pdev, IORESOURCE_MEM, name);
-	if (!res) {
-		DEV_ERR("%pS->%s: '%s' msm_dss_get_res_byname failed\n",
-			__builtin_return_address(0), __func__, name);
-		return -ENODEV;
-	}
-
-	io_data->len = (u32)resource_size(res);
-	io_data->base = ioremap(res->start, io_data->len);
-	if (!io_data->base) {
-		DEV_ERR("%pS->%s: '%s' ioremap failed\n",
-			__builtin_return_address(0), __func__, name);
-		return -EIO;
-	}
-
-	return 0;
-} /* msm_dss_ioremap_byname */
-EXPORT_SYMBOL(msm_dss_ioremap_byname);
-
-void msm_dss_iounmap(struct dss_io_data *io_data)
-{
-	if (!io_data) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return;
-	}
-
-	if (io_data->base) {
-		iounmap(io_data->base);
-		io_data->base = NULL;
-	}
-	io_data->len = 0;
-} /* msm_dss_iounmap */
-EXPORT_SYMBOL(msm_dss_iounmap);
-
-int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
-	int num_vreg, int config)
-{
-	int i = 0, rc = 0;
-	struct dss_vreg *curr_vreg = NULL;
-	enum dss_vreg_type type;
-
-	if (!in_vreg || !num_vreg)
-		return rc;
-
-	if (config) {
-		for (i = 0; i < num_vreg; i++) {
-			curr_vreg = &in_vreg[i];
-			curr_vreg->vreg = regulator_get(dev,
-				curr_vreg->vreg_name);
-			rc = PTR_RET(curr_vreg->vreg);
-			if (rc) {
-				DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
-					 __builtin_return_address(0), __func__,
-					 curr_vreg->vreg_name, rc);
-				curr_vreg->vreg = NULL;
-				goto vreg_get_fail;
-			}
-			type = (regulator_count_voltages(curr_vreg->vreg) > 0)
-					? DSS_REG_LDO : DSS_REG_VS;
-			if (type == DSS_REG_LDO) {
-				rc = regulator_set_voltage(
-					curr_vreg->vreg,
-					curr_vreg->min_voltage,
-					curr_vreg->max_voltage);
-				if (rc < 0) {
-					DEV_ERR("%pS->%s: %s set vltg fail\n",
-						__builtin_return_address(0),
-						__func__,
-						curr_vreg->vreg_name);
-					goto vreg_set_voltage_fail;
-				}
-			}
-		}
-	} else {
-		for (i = num_vreg-1; i >= 0; i--) {
-			curr_vreg = &in_vreg[i];
-			if (curr_vreg->vreg) {
-				type = (regulator_count_voltages(
-					curr_vreg->vreg) > 0)
-					? DSS_REG_LDO : DSS_REG_VS;
-				if (type == DSS_REG_LDO) {
-					regulator_set_voltage(curr_vreg->vreg,
-						0, curr_vreg->max_voltage);
-				}
-				regulator_put(curr_vreg->vreg);
-				curr_vreg->vreg = NULL;
-			}
-		}
-	}
-	return 0;
-
-vreg_unconfig:
-if (type == DSS_REG_LDO)
-	regulator_set_load(curr_vreg->vreg, 0);
-
-vreg_set_voltage_fail:
-	regulator_put(curr_vreg->vreg);
-	curr_vreg->vreg = NULL;
-
-vreg_get_fail:
-	for (i--; i >= 0; i--) {
-		curr_vreg = &in_vreg[i];
-		type = (regulator_count_voltages(curr_vreg->vreg) > 0)
-			? DSS_REG_LDO : DSS_REG_VS;
-		goto vreg_unconfig;
-	}
-	return rc;
-} /* msm_dss_config_vreg */
-EXPORT_SYMBOL(msm_dss_config_vreg);
-
-static bool msm_dss_is_hw_controlled(struct dss_vreg in_vreg)
-{
-	u32 mode = 0;
-	char const *regulator_gdsc = "gdsc";
-
-	/*
-	 * For gdsc-regulator devices only, REGULATOR_MODE_FAST specifies that
-	 * the GDSC is in HW controlled mode.
-	 */
-	mode = regulator_get_mode(in_vreg.vreg);
-	if (!strcmp(regulator_gdsc, in_vreg.vreg_name) &&
-			mode == REGULATOR_MODE_FAST) {
-		DEV_DBG("%pS->%s: %s is HW controlled\n",
-				__builtin_return_address(0), __func__,
-				in_vreg.vreg_name);
-		return true;
-	}
-
-	return false;
-}
-
-int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
-{
-	int i = 0, rc = 0;
-	bool need_sleep;
-
-	if (enable) {
-		for (i = 0; i < num_vreg; i++) {
-			rc = PTR_RET(in_vreg[i].vreg);
-			if (rc) {
-				DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
-					__builtin_return_address(0), __func__,
-					in_vreg[i].vreg_name, rc);
-				goto vreg_set_opt_mode_fail;
-			}
-			if (msm_dss_is_hw_controlled(in_vreg[i]))
-				continue;
-
-			need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
-			if (in_vreg[i].pre_on_sleep && need_sleep)
-				usleep_range(in_vreg[i].pre_on_sleep * 1000,
-					(in_vreg[i].pre_on_sleep * 1000) + 10);
-			rc = regulator_set_load(in_vreg[i].vreg,
-				in_vreg[i].enable_load);
-			if (rc < 0) {
-				DEV_ERR("%pS->%s: %s set opt m fail\n",
-					__builtin_return_address(0), __func__,
-					in_vreg[i].vreg_name);
-				goto vreg_set_opt_mode_fail;
-			}
-			rc = regulator_enable(in_vreg[i].vreg);
-			if (in_vreg[i].post_on_sleep && need_sleep)
-				usleep_range(in_vreg[i].post_on_sleep * 1000,
-					(in_vreg[i].post_on_sleep * 1000) + 10);
-			if (rc < 0) {
-				DEV_ERR("%pS->%s: %s enable failed\n",
-					__builtin_return_address(0), __func__,
-					in_vreg[i].vreg_name);
-				goto disable_vreg;
-			}
-		}
-	} else {
-		for (i = num_vreg-1; i >= 0; i--) {
-			if (msm_dss_is_hw_controlled(in_vreg[i]))
-				continue;
-
-			if (in_vreg[i].pre_off_sleep)
-				usleep_range(in_vreg[i].pre_off_sleep * 1000,
-					(in_vreg[i].pre_off_sleep * 1000) + 10);
-			regulator_set_load(in_vreg[i].vreg,
-				in_vreg[i].disable_load);
-			regulator_disable(in_vreg[i].vreg);
-			if (in_vreg[i].post_off_sleep)
-				usleep_range(in_vreg[i].post_off_sleep * 1000,
-				(in_vreg[i].post_off_sleep * 1000) + 10);
-		}
-	}
-	return rc;
-
-disable_vreg:
-	regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load);
-
-vreg_set_opt_mode_fail:
-	for (i--; i >= 0; i--) {
-		if (in_vreg[i].pre_off_sleep)
-			usleep_range(in_vreg[i].pre_off_sleep * 1000,
-				(in_vreg[i].pre_off_sleep * 1000) + 10);
-		regulator_set_load(in_vreg[i].vreg,
-			in_vreg[i].disable_load);
-		regulator_disable(in_vreg[i].vreg);
-		if (in_vreg[i].post_off_sleep)
-			usleep_range(in_vreg[i].post_off_sleep * 1000,
-				(in_vreg[i].post_off_sleep * 1000) + 10);
-	}
-
-	return rc;
-} /* msm_dss_enable_vreg */
-EXPORT_SYMBOL(msm_dss_enable_vreg);
-
-int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable)
-{
-	int i = 0, rc = 0;
-
-	if (enable) {
-		for (i = 0; i < num_gpio; i++) {
-			DEV_DBG("%pS->%s: %s enable\n",
-				__builtin_return_address(0), __func__,
-				in_gpio[i].gpio_name);
-
-			rc = gpio_request(in_gpio[i].gpio,
-				in_gpio[i].gpio_name);
-			if (rc < 0) {
-				DEV_ERR("%pS->%s: %s enable failed\n",
-					__builtin_return_address(0), __func__,
-					in_gpio[i].gpio_name);
-				goto disable_gpio;
-			}
-			gpio_set_value(in_gpio[i].gpio, in_gpio[i].value);
-		}
-	} else {
-		for (i = num_gpio-1; i >= 0; i--) {
-			DEV_DBG("%pS->%s: %s disable\n",
-				__builtin_return_address(0), __func__,
-				in_gpio[i].gpio_name);
-			if (in_gpio[i].gpio)
-				gpio_free(in_gpio[i].gpio);
-		}
-	}
-	return rc;
-
-disable_gpio:
-	for (i--; i >= 0; i--)
-		if (in_gpio[i].gpio)
-			gpio_free(in_gpio[i].gpio);
-
-	return rc;
-} /* msm_dss_enable_gpio */
-EXPORT_SYMBOL(msm_dss_enable_gpio);
-
-void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
-{
-	int i;
-
-	for (i = num_clk - 1; i >= 0; i--) {
-		if (clk_arry[i].clk)
-			clk_put(clk_arry[i].clk);
-		clk_arry[i].clk = NULL;
-	}
-} /* msm_dss_put_clk */
-EXPORT_SYMBOL(msm_dss_put_clk);
-
-int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
-{
-	int i, rc = 0;
-
-	for (i = 0; i < num_clk; i++) {
-		clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
-		rc = PTR_RET(clk_arry[i].clk);
-		if (rc) {
-			DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
-				__builtin_return_address(0), __func__,
-				clk_arry[i].clk_name, rc);
-			goto error;
-		}
-	}
-
-	return rc;
-
-error:
-	for (i--; i >= 0; i--) {
-		if (clk_arry[i].clk)
-			clk_put(clk_arry[i].clk);
-		clk_arry[i].clk = NULL;
-	}
-
-	return rc;
-} /* msm_dss_get_clk */
-EXPORT_SYMBOL(msm_dss_get_clk);
-
-int msm_dss_single_clk_set_rate(struct dss_clk *clk)
-{
-	int rc = 0;
-
-	if (!clk) {
-		DEV_ERR("invalid clk struct\n");
-		return -EINVAL;
-	}
-
-	DEV_DBG("%pS->%s: set_rate '%s'\n",
-			__builtin_return_address(0), __func__,
-			clk->clk_name);
-
-	if (clk->type != DSS_CLK_AHB) {
-		rc = clk_set_rate(clk->clk, clk->rate);
-		if (rc)
-			DEV_ERR("%pS->%s: %s failed. rc=%d\n",
-					__builtin_return_address(0),
-					__func__,
-					clk->clk_name, rc);
-	}
-
-	return rc;
-} /* msm_dss_single_clk_set_rate */
-EXPORT_SYMBOL(msm_dss_single_clk_set_rate);
-
-int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
-{
-	int i, rc = 0;
-
-	for (i = 0; i < num_clk; i++) {
-		if (clk_arry[i].clk) {
-			rc = msm_dss_single_clk_set_rate(&clk_arry[i]);
-			if (rc)
-				break;
-		} else {
-			DEV_ERR("%pS->%s: '%s' is not available\n",
-				__builtin_return_address(0), __func__,
-				clk_arry[i].clk_name);
-			rc = -EPERM;
-			break;
-		}
-	}
-
-	return rc;
-} /* msm_dss_clk_set_rate */
-EXPORT_SYMBOL(msm_dss_clk_set_rate);
-
-int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
-{
-	int i, rc = 0;
-
-	if (enable) {
-		for (i = 0; i < num_clk; i++) {
-			DEV_DBG("%pS->%s: enable '%s'\n",
-				__builtin_return_address(0), __func__,
-				clk_arry[i].clk_name);
-			if (clk_arry[i].clk) {
-				rc = clk_prepare_enable(clk_arry[i].clk);
-				if (rc)
-					DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
-						__builtin_return_address(0),
-						__func__,
-						clk_arry[i].clk_name, rc);
-			} else {
-				DEV_ERR("%pS->%s: '%s' is not available\n",
-					__builtin_return_address(0), __func__,
-					clk_arry[i].clk_name);
-				rc = -EPERM;
-			}
-
-			if (rc) {
-				msm_dss_enable_clk(&clk_arry[i],
-					i, false);
-				break;
-			}
-		}
-	} else {
-		for (i = num_clk - 1; i >= 0; i--) {
-			DEV_DBG("%pS->%s: disable '%s'\n",
-				__builtin_return_address(0), __func__,
-				clk_arry[i].clk_name);
-
-			if (clk_arry[i].clk)
-				clk_disable_unprepare(clk_arry[i].clk);
-			else
-				DEV_ERR("%pS->%s: '%s' is not available\n",
-					__builtin_return_address(0), __func__,
-					clk_arry[i].clk_name);
-		}
-	}
-
-	return rc;
-} /* msm_dss_enable_clk */
-EXPORT_SYMBOL(msm_dss_enable_clk);
-
-
-int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
-			uint8_t reg_offset, uint8_t *read_buf)
-{
-	struct i2c_msg msgs[2];
-	int ret = -1;
-
-	pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
-		 __func__, slave_addr, reg_offset);
-
-	msgs[0].addr = slave_addr >> 1;
-	msgs[0].flags = 0;
-	msgs[0].buf = &reg_offset;
-	msgs[0].len = 1;
-
-	msgs[1].addr = slave_addr >> 1;
-	msgs[1].flags = I2C_M_RD;
-	msgs[1].buf = read_buf;
-	msgs[1].len = 1;
-
-	ret = i2c_transfer(client->adapter, msgs, 2);
-	if (ret < 1) {
-		pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
-		return -EACCES;
-	}
-	pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
-	return 0;
-}
-EXPORT_SYMBOL(sde_i2c_byte_read);
-
-int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
-			uint8_t reg_offset, uint8_t *value)
-{
-	struct i2c_msg msgs[1];
-	uint8_t data[2];
-	int status = -EACCES;
-
-	pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
-		 __func__, slave_addr, reg_offset);
-
-	data[0] = reg_offset;
-	data[1] = *value;
-
-	msgs[0].addr = slave_addr >> 1;
-	msgs[0].flags = 0;
-	msgs[0].len = 2;
-	msgs[0].buf = data;
-
-	status = i2c_transfer(client->adapter, msgs, 1);
-	if (status < 1) {
-		pr_err("I2C WRITE FAILED=[%d]\n", status);
-		return -EACCES;
-	}
-	pr_debug("%s: I2C write status=%x\n", __func__, status);
-	return status;
-}
-EXPORT_SYMBOL(sde_i2c_byte_write);
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
deleted file mode 100644
index 1fb48de..0000000
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ /dev/null
@@ -1,1282 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d]: " fmt, __func__, __LINE__
-
-#include <linux/clk.h>
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/string.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/of_platform.h>
-
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
-#include <linux/sde_io_util.h>
-#include <linux/sde_rsc.h>
-
-#include "sde_power_handle.h"
-#include "sde_trace.h"
-#include "sde_dbg.h"
-
-static const char *data_bus_name[SDE_POWER_HANDLE_DBUS_ID_MAX] = {
-	[SDE_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,sde-data-bus",
-	[SDE_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,sde-llcc-bus",
-	[SDE_POWER_HANDLE_DBUS_ID_EBI] = "qcom,sde-ebi-bus",
-};
-
-const char *sde_power_handle_get_dbus_name(u32 bus_id)
-{
-	if (bus_id < SDE_POWER_HANDLE_DBUS_ID_MAX)
-		return data_bus_name[bus_id];
-
-	return NULL;
-}
-
-static void sde_power_event_trigger_locked(struct sde_power_handle *phandle,
-		u32 event_type)
-{
-	struct sde_power_event *event;
-
-	list_for_each_entry(event, &phandle->event_list, list) {
-		if (event->event_type & event_type)
-			event->cb_fnc(event_type, event->usr);
-	}
-}
-
-static inline void sde_power_rsc_client_init(struct sde_power_handle *phandle)
-{
-	/* creates the rsc client */
-	if (!phandle->rsc_client_init) {
-		phandle->rsc_client = sde_rsc_client_create(SDE_RSC_INDEX,
-				"sde_power_handle", SDE_RSC_CLK_CLIENT, 0);
-		if (IS_ERR_OR_NULL(phandle->rsc_client)) {
-			pr_debug("sde rsc client create failed :%ld\n",
-						PTR_ERR(phandle->rsc_client));
-			phandle->rsc_client = NULL;
-		}
-		phandle->rsc_client_init = true;
-	}
-}
-
-static int sde_power_rsc_update(struct sde_power_handle *phandle, bool enable)
-{
-	u32 rsc_state;
-	int ret = 0;
-
-	rsc_state = enable ? SDE_RSC_CLK_STATE : SDE_RSC_IDLE_STATE;
-
-	if (phandle->rsc_client)
-		ret = sde_rsc_client_state_update(phandle->rsc_client,
-			rsc_state, NULL, SDE_RSC_INVALID_CRTC_ID, NULL);
-
-	return ret;
-}
-
-struct sde_power_client *sde_power_client_create(
-	struct sde_power_handle *phandle, char *client_name)
-{
-	struct sde_power_client *client;
-	static u32 id;
-
-	if (!client_name || !phandle) {
-		pr_err("client name is null or invalid power data\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	client = kzalloc(sizeof(struct sde_power_client), GFP_KERNEL);
-	if (!client)
-		return ERR_PTR(-ENOMEM);
-
-	mutex_lock(&phandle->phandle_lock);
-	strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
-	client->usecase_ndx = VOTE_INDEX_DISABLE;
-	client->id = id;
-	client->active = true;
-	pr_debug("client %s created:%pK id :%d\n", client_name,
-		client, id);
-	id++;
-	list_add(&client->list, &phandle->power_client_clist);
-	mutex_unlock(&phandle->phandle_lock);
-
-	return client;
-}
-
-void sde_power_client_destroy(struct sde_power_handle *phandle,
-	struct sde_power_client *client)
-{
-	if (!client  || !phandle) {
-		pr_err("reg bus vote: invalid client handle\n");
-	} else if (!client->active) {
-		pr_err("sde power deinit already done\n");
-		kfree(client);
-	} else {
-		pr_debug("bus vote client %s destroyed:%pK id:%u\n",
-			client->name, client, client->id);
-		mutex_lock(&phandle->phandle_lock);
-		list_del_init(&client->list);
-		mutex_unlock(&phandle->phandle_lock);
-		kfree(client);
-	}
-}
-
-static int sde_power_parse_dt_supply(struct platform_device *pdev,
-				struct dss_module_power *mp)
-{
-	int i = 0, rc = 0;
-	u32 tmp = 0;
-	struct device_node *of_node = NULL, *supply_root_node = NULL;
-	struct device_node *supply_node = NULL;
-
-	if (!pdev || !mp) {
-		pr_err("invalid input param pdev:%pK mp:%pK\n", pdev, mp);
-		return -EINVAL;
-	}
-
-	of_node = pdev->dev.of_node;
-
-	mp->num_vreg = 0;
-	supply_root_node = of_get_child_by_name(of_node,
-						"qcom,platform-supply-entries");
-	if (!supply_root_node) {
-		pr_debug("no supply entry present\n");
-		return rc;
-	}
-
-	for_each_child_of_node(supply_root_node, supply_node)
-		mp->num_vreg++;
-
-	if (mp->num_vreg == 0) {
-		pr_debug("no vreg\n");
-		return rc;
-	}
-
-	pr_debug("vreg found. count=%d\n", mp->num_vreg);
-	mp->vreg_config = devm_kzalloc(&pdev->dev, sizeof(struct dss_vreg) *
-						mp->num_vreg, GFP_KERNEL);
-	if (!mp->vreg_config) {
-		rc = -ENOMEM;
-		return rc;
-	}
-
-	for_each_child_of_node(supply_root_node, supply_node) {
-
-		const char *st = NULL;
-
-		rc = of_property_read_string(supply_node,
-						"qcom,supply-name", &st);
-		if (rc) {
-			pr_err("error reading name. rc=%d\n", rc);
-			goto error;
-		}
-
-		strlcpy(mp->vreg_config[i].vreg_name, st,
-					sizeof(mp->vreg_config[i].vreg_name));
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-min-voltage", &tmp);
-		if (rc) {
-			pr_err("error reading min volt. rc=%d\n", rc);
-			goto error;
-		}
-		mp->vreg_config[i].min_voltage = tmp;
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-max-voltage", &tmp);
-		if (rc) {
-			pr_err("error reading max volt. rc=%d\n", rc);
-			goto error;
-		}
-		mp->vreg_config[i].max_voltage = tmp;
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-enable-load", &tmp);
-		if (rc) {
-			pr_err("error reading enable load. rc=%d\n", rc);
-			goto error;
-		}
-		mp->vreg_config[i].enable_load = tmp;
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-disable-load", &tmp);
-		if (rc) {
-			pr_err("error reading disable load. rc=%d\n", rc);
-			goto error;
-		}
-		mp->vreg_config[i].disable_load = tmp;
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-pre-on-sleep", &tmp);
-		if (rc)
-			pr_debug("error reading supply pre sleep value. rc=%d\n",
-							rc);
-
-		mp->vreg_config[i].pre_on_sleep = (!rc ? tmp : 0);
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-pre-off-sleep", &tmp);
-		if (rc)
-			pr_debug("error reading supply pre sleep value. rc=%d\n",
-							rc);
-
-		mp->vreg_config[i].pre_off_sleep = (!rc ? tmp : 0);
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-post-on-sleep", &tmp);
-		if (rc)
-			pr_debug("error reading supply post sleep value. rc=%d\n",
-							rc);
-
-		mp->vreg_config[i].post_on_sleep = (!rc ? tmp : 0);
-
-		rc = of_property_read_u32(supply_node,
-					"qcom,supply-post-off-sleep", &tmp);
-		if (rc)
-			pr_debug("error reading supply post sleep value. rc=%d\n",
-							rc);
-
-		mp->vreg_config[i].post_off_sleep = (!rc ? tmp : 0);
-
-		pr_debug("%s min=%d, max=%d, enable=%d, disable=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
-					mp->vreg_config[i].vreg_name,
-					mp->vreg_config[i].min_voltage,
-					mp->vreg_config[i].max_voltage,
-					mp->vreg_config[i].enable_load,
-					mp->vreg_config[i].disable_load,
-					mp->vreg_config[i].pre_on_sleep,
-					mp->vreg_config[i].post_on_sleep,
-					mp->vreg_config[i].pre_off_sleep,
-					mp->vreg_config[i].post_off_sleep);
-		++i;
-
-		rc = 0;
-	}
-
-	return rc;
-
-error:
-	if (mp->vreg_config) {
-		devm_kfree(&pdev->dev, mp->vreg_config);
-		mp->vreg_config = NULL;
-		mp->num_vreg = 0;
-	}
-
-	return rc;
-}
-
-static int sde_power_parse_dt_clock(struct platform_device *pdev,
-					struct dss_module_power *mp)
-{
-	u32 i = 0, rc = 0;
-	const char *clock_name;
-	u32 clock_rate = 0;
-	u32 clock_max_rate = 0;
-	int num_clk = 0;
-
-	if (!pdev || !mp) {
-		pr_err("invalid input param pdev:%pK mp:%pK\n", pdev, mp);
-		return -EINVAL;
-	}
-
-	mp->num_clk = 0;
-	num_clk = of_property_count_strings(pdev->dev.of_node,
-							"clock-names");
-	if (num_clk <= 0) {
-		pr_debug("clocks are not defined\n");
-		goto clk_err;
-	}
-
-	mp->num_clk = num_clk;
-	mp->clk_config = devm_kzalloc(&pdev->dev,
-			sizeof(struct dss_clk) * num_clk, GFP_KERNEL);
-	if (!mp->clk_config) {
-		rc = -ENOMEM;
-		mp->num_clk = 0;
-		goto clk_err;
-	}
-
-	for (i = 0; i < num_clk; i++) {
-		of_property_read_string_index(pdev->dev.of_node, "clock-names",
-							i, &clock_name);
-		strlcpy(mp->clk_config[i].clk_name, clock_name,
-				sizeof(mp->clk_config[i].clk_name));
-
-		of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
-							i, &clock_rate);
-		mp->clk_config[i].rate = clock_rate;
-
-		if (!clock_rate)
-			mp->clk_config[i].type = DSS_CLK_AHB;
-		else
-			mp->clk_config[i].type = DSS_CLK_PCLK;
-
-		clock_max_rate = 0;
-		of_property_read_u32_index(pdev->dev.of_node, "clock-max-rate",
-							i, &clock_max_rate);
-		mp->clk_config[i].max_rate = clock_max_rate;
-	}
-
-clk_err:
-	return rc;
-}
-
-#ifdef CONFIG_QCOM_BUS_SCALING
-
-#define MAX_AXI_PORT_COUNT 3
-
-static int _sde_power_data_bus_set_quota(
-		struct sde_power_data_bus_handle *pdbus,
-		u64 ab_quota_rt, u64 ab_quota_nrt,
-		u64 ib_quota_rt, u64 ib_quota_nrt)
-{
-	int new_uc_idx;
-	u64 ab_quota[MAX_AXI_PORT_COUNT] = {0, 0};
-	u64 ib_quota[MAX_AXI_PORT_COUNT] = {0, 0};
-	int rc;
-
-	if (pdbus->data_bus_hdl < 1) {
-		pr_err("invalid bus handle %d\n", pdbus->data_bus_hdl);
-		return -EINVAL;
-	}
-
-	pdbus->ab_rt = ab_quota_rt;
-	pdbus->ib_rt = ib_quota_rt;
-	pdbus->ab_nrt = ab_quota_nrt;
-	pdbus->ib_nrt = ib_quota_nrt;
-
-	if (pdbus->enable) {
-		ab_quota_rt = max_t(u64, ab_quota_rt,
-				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA);
-		ib_quota_rt = max_t(u64, ib_quota_rt,
-				SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
-		ab_quota_nrt = max_t(u64, ab_quota_nrt,
-				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA);
-		ib_quota_nrt = max_t(u64, ib_quota_nrt,
-				SDE_POWER_HANDLE_ENABLE_NRT_BUS_IB_QUOTA);
-	} else {
-		ab_quota_rt = min_t(u64, ab_quota_rt,
-				SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA);
-		ib_quota_rt = min_t(u64, ib_quota_rt,
-				SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA);
-		ab_quota_nrt = min_t(u64, ab_quota_nrt,
-				SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA);
-		ib_quota_nrt = min_t(u64, ib_quota_nrt,
-				SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA);
-	}
-
-	if (!ab_quota_rt && !ab_quota_nrt && !ib_quota_rt && !ib_quota_nrt)  {
-		new_uc_idx = 0;
-	} else {
-		int i;
-		struct msm_bus_vectors *vect = NULL;
-		struct msm_bus_scale_pdata *bw_table =
-			pdbus->data_bus_scale_table;
-		u32 nrt_data_paths_cnt = pdbus->nrt_data_paths_cnt;
-		u32 total_data_paths_cnt = pdbus->data_paths_cnt;
-		u32 rt_data_paths_cnt = total_data_paths_cnt -
-			nrt_data_paths_cnt;
-
-		if (!bw_table || !total_data_paths_cnt ||
-		    total_data_paths_cnt > MAX_AXI_PORT_COUNT) {
-			pr_err("invalid input\n");
-			return -EINVAL;
-		}
-
-		if (nrt_data_paths_cnt) {
-
-			ab_quota_rt = div_u64(ab_quota_rt, rt_data_paths_cnt);
-			ab_quota_nrt = div_u64(ab_quota_nrt,
-						nrt_data_paths_cnt);
-
-			ib_quota_rt = div_u64(ib_quota_rt,
-						rt_data_paths_cnt);
-			ib_quota_nrt = div_u64(ib_quota_nrt,
-						nrt_data_paths_cnt);
-
-			for (i = 0; i < total_data_paths_cnt; i++) {
-				if (i < rt_data_paths_cnt) {
-					ab_quota[i] = ab_quota_rt;
-					ib_quota[i] = ib_quota_rt;
-				} else {
-					ab_quota[i] = ab_quota_nrt;
-					ib_quota[i] = ib_quota_nrt;
-				}
-			}
-		} else {
-			ab_quota[0] = div_u64(ab_quota_rt + ab_quota_nrt,
-					total_data_paths_cnt);
-			ib_quota[0] = div_u64(ib_quota_rt + ib_quota_nrt,
-					total_data_paths_cnt);
-
-			for (i = 1; i < total_data_paths_cnt; i++) {
-				ab_quota[i] = ab_quota[0];
-				ib_quota[i] = ib_quota[0];
-			}
-		}
-
-		new_uc_idx = (pdbus->curr_bw_uc_idx %
-			(bw_table->num_usecases - 1)) + 1;
-
-		for (i = 0; i < total_data_paths_cnt; i++) {
-			vect = &bw_table->usecase[new_uc_idx].vectors[i];
-			vect->ab = ab_quota[i];
-			vect->ib = ib_quota[i];
-
-			pr_debug(
-				"%s uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
-				bw_table->name,
-				new_uc_idx, (i < rt_data_paths_cnt) ?
-				"rt" : "nrt", i, vect->ab, vect->ib);
-		}
-	}
-	pdbus->curr_bw_uc_idx = new_uc_idx;
-	pdbus->ao_bw_uc_idx = new_uc_idx;
-
-	SDE_ATRACE_BEGIN("msm_bus_scale_req");
-	rc = msm_bus_scale_client_update_request(pdbus->data_bus_hdl,
-			new_uc_idx);
-	SDE_ATRACE_END("msm_bus_scale_req");
-
-	return rc;
-}
-
-int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
-		struct sde_power_client *pclient,
-		int bus_client, u32 bus_id,
-		u64 ab_quota, u64 ib_quota)
-{
-	int rc = 0;
-	int i;
-	u64 total_ab_rt = 0, total_ib_rt = 0;
-	u64 total_ab_nrt = 0, total_ib_nrt = 0;
-	struct sde_power_client *client;
-
-	if (!phandle || !pclient ||
-			bus_client >= SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX ||
-			bus_id >= SDE_POWER_HANDLE_DBUS_ID_MAX) {
-		pr_err("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&phandle->phandle_lock);
-
-	pclient->ab[bus_client] = ab_quota;
-	pclient->ib[bus_client] = ib_quota;
-	trace_sde_perf_update_bus(bus_client, bus_id, ab_quota, ib_quota);
-
-	list_for_each_entry(client, &phandle->power_client_clist, list) {
-		for (i = 0; i < SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX; i++) {
-			if (i == SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT) {
-				total_ab_nrt += client->ab[i];
-				total_ib_nrt += client->ib[i];
-			} else {
-				total_ab_rt += client->ab[i];
-				total_ib_rt = max(total_ib_rt, client->ib[i]);
-			}
-		}
-	}
-
-	if (phandle->data_bus_handle[bus_id].data_bus_hdl)
-		rc = _sde_power_data_bus_set_quota(
-			&phandle->data_bus_handle[bus_id],
-			total_ab_rt, total_ab_nrt,
-			total_ib_rt, total_ib_nrt);
-
-	mutex_unlock(&phandle->phandle_lock);
-
-	return rc;
-}
-
-static void sde_power_data_bus_unregister(
-		struct sde_power_data_bus_handle *pdbus)
-{
-	if (pdbus->data_bus_hdl) {
-		msm_bus_scale_unregister_client(pdbus->data_bus_hdl);
-		pdbus->data_bus_hdl = 0;
-	}
-}
-
-static int sde_power_data_bus_parse(struct platform_device *pdev,
-	struct sde_power_data_bus_handle *pdbus, const char *name)
-{
-	struct device_node *node;
-	int rc = 0;
-	int paths;
-
-	pdbus->bus_channels = 1;
-	rc = of_property_read_u32(pdev->dev.of_node,
-		"qcom,sde-dram-channels", &pdbus->bus_channels);
-	if (rc) {
-		pr_debug("number of channels property not specified\n");
-		rc = 0;
-	}
-
-	pdbus->nrt_data_paths_cnt = 0;
-	rc = of_property_read_u32(pdev->dev.of_node,
-			"qcom,sde-num-nrt-paths",
-			&pdbus->nrt_data_paths_cnt);
-	if (rc) {
-		pr_debug("number of axi port property not specified\n");
-		rc = 0;
-	}
-
-	node = of_get_child_by_name(pdev->dev.of_node, name);
-	if (node) {
-		rc = of_property_read_u32(node,
-				"qcom,msm-bus,num-paths", &paths);
-		if (rc) {
-			pr_err("Error. qcom,msm-bus,num-paths not found\n");
-			return rc;
-		}
-		pdbus->data_paths_cnt = paths;
-
-		pdbus->data_bus_scale_table =
-				msm_bus_pdata_from_node(pdev, node);
-		if (IS_ERR_OR_NULL(pdbus->data_bus_scale_table)) {
-			pr_err("reg bus handle parsing failed\n");
-			rc = PTR_ERR(pdbus->data_bus_scale_table);
-			if (!pdbus->data_bus_scale_table)
-				rc = -EINVAL;
-			goto end;
-		}
-		pdbus->data_bus_hdl = msm_bus_scale_register_client(
-				pdbus->data_bus_scale_table);
-		if (!pdbus->data_bus_hdl) {
-			pr_err("data_bus_client register failed\n");
-			rc = -EINVAL;
-			goto end;
-		}
-		pr_debug("register %s data_bus_hdl=%x\n", name,
-				pdbus->data_bus_hdl);
-	}
-
-end:
-	return rc;
-}
-
-static int sde_power_reg_bus_parse(struct platform_device *pdev,
-	struct sde_power_handle *phandle)
-{
-	struct device_node *node;
-	struct msm_bus_scale_pdata *bus_scale_table;
-	int rc = 0;
-
-	node = of_get_child_by_name(pdev->dev.of_node, "qcom,sde-reg-bus");
-	if (node) {
-		bus_scale_table = msm_bus_pdata_from_node(pdev, node);
-		if (IS_ERR_OR_NULL(bus_scale_table)) {
-			pr_err("reg bus handle parsing failed\n");
-			rc = PTR_ERR(bus_scale_table);
-			if (!bus_scale_table)
-				rc = -EINVAL;
-			goto end;
-		}
-		phandle->reg_bus_hdl = msm_bus_scale_register_client(
-			      bus_scale_table);
-		if (!phandle->reg_bus_hdl) {
-			pr_err("reg_bus_client register failed\n");
-			rc = -EINVAL;
-			goto end;
-		}
-		pr_debug("register reg_bus_hdl=%x\n", phandle->reg_bus_hdl);
-	}
-
-end:
-	return rc;
-}
-
-static void sde_power_reg_bus_unregister(u32 reg_bus_hdl)
-{
-	if (reg_bus_hdl)
-		msm_bus_scale_unregister_client(reg_bus_hdl);
-}
-
-int sde_power_data_bus_state_update(struct sde_power_handle *phandle,
-							bool enable)
-{
-	int i;
-
-	if (!phandle) {
-		pr_err("invalid param\n");
-		return -EINVAL;
-	}
-
-	for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC;
-			i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
-		phandle->data_bus_handle[i].enable = enable;
-
-	return 0;
-}
-
-static int sde_power_data_bus_update(struct sde_power_data_bus_handle *pdbus,
-							bool enable)
-{
-	int rc = 0;
-
-	pdbus->enable = enable;
-
-	if (pdbus->data_bus_hdl)
-		rc = _sde_power_data_bus_set_quota(pdbus, pdbus->ab_rt,
-				pdbus->ab_nrt, pdbus->ib_rt, pdbus->ib_nrt);
-
-	if (rc)
-		pr_err("failed to set data bus vote rc=%d enable:%d\n",
-							rc, enable);
-
-	return rc;
-}
-
-static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx)
-{
-	int rc = 0;
-
-	if (reg_bus_hdl) {
-		SDE_ATRACE_BEGIN("msm_bus_scale_req");
-		rc = msm_bus_scale_client_update_request(reg_bus_hdl,
-								usecase_ndx);
-		SDE_ATRACE_END("msm_bus_scale_req");
-	}
-
-	if (rc)
-		pr_err("failed to set reg bus vote rc=%d\n", rc);
-
-	return rc;
-}
-#else
-static int sde_power_data_bus_parse(struct platform_device *pdev,
-		struct sde_power_data_bus_handle *pdbus, const char *name)
-{
-	return 0;
-}
-
-static void sde_power_data_bus_unregister(
-		struct sde_power_data_bus_handle *pdbus)
-{
-}
-
-int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
-		struct sde_power_client *pclient,
-		int bus_client, u32 bus_id,
-		u64 ab_quota, u64 ib_quota)
-{
-	return 0;
-}
-
-static int sde_power_reg_bus_parse(struct platform_device *pdev,
-	struct sde_power_handle *phandle)
-{
-	return 0;
-}
-
-static void sde_power_reg_bus_unregister(u32 reg_bus_hdl)
-{
-}
-
-static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx)
-{
-	return 0;
-}
-
-static int sde_power_data_bus_update(struct sde_power_data_bus_handle *pdbus,
-							bool enable)
-{
-	return 0;
-}
-
-int sde_power_data_bus_state_update(struct sde_power_handle *phandle,
-							bool enable)
-{
-	return 0;
-}
-#endif
-
-int sde_power_resource_init(struct platform_device *pdev,
-	struct sde_power_handle *phandle)
-{
-	int rc = 0, i;
-	struct dss_module_power *mp;
-
-	if (!phandle || !pdev) {
-		pr_err("invalid input param\n");
-		rc = -EINVAL;
-		goto end;
-	}
-	mp = &phandle->mp;
-	phandle->dev = &pdev->dev;
-
-	rc = sde_power_parse_dt_clock(pdev, mp);
-	if (rc) {
-		pr_err("device clock parsing failed\n");
-		goto end;
-	}
-
-	rc = sde_power_parse_dt_supply(pdev, mp);
-	if (rc) {
-		pr_err("device vreg supply parsing failed\n");
-		goto parse_vreg_err;
-	}
-
-	rc = msm_dss_config_vreg(&pdev->dev,
-				mp->vreg_config, mp->num_vreg, 1);
-	if (rc) {
-		pr_err("vreg config failed rc=%d\n", rc);
-		goto vreg_err;
-	}
-
-	rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, mp->num_clk);
-	if (rc) {
-		pr_err("clock get failed rc=%d\n", rc);
-		goto clk_err;
-	}
-
-	rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
-	if (rc) {
-		pr_err("clock set rate failed rc=%d\n", rc);
-		goto bus_err;
-	}
-
-	rc = sde_power_reg_bus_parse(pdev, phandle);
-	if (rc) {
-		pr_err("register bus parse failed rc=%d\n", rc);
-		goto bus_err;
-	}
-
-	for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC;
-			i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
-		rc = sde_power_data_bus_parse(pdev,
-				&phandle->data_bus_handle[i],
-				data_bus_name[i]);
-		if (rc) {
-			pr_err("register data bus parse failed id=%d rc=%d\n",
-					i, rc);
-			goto data_bus_err;
-		}
-	}
-
-	if (of_find_property(pdev->dev.of_node, "qcom,dss-cx-ipeak", NULL))
-		phandle->dss_cx_ipeak = cx_ipeak_register(pdev->dev.of_node,
-						"qcom,dss-cx-ipeak");
-	else
-		pr_debug("cx ipeak client parse failed\n");
-
-	INIT_LIST_HEAD(&phandle->power_client_clist);
-	INIT_LIST_HEAD(&phandle->event_list);
-
-	phandle->rsc_client = NULL;
-	phandle->rsc_client_init = false;
-
-	mutex_init(&phandle->phandle_lock);
-
-	return rc;
-
-data_bus_err:
-	for (i--; i >= 0; i--)
-		sde_power_data_bus_unregister(&phandle->data_bus_handle[i]);
-	sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
-bus_err:
-	msm_dss_put_clk(mp->clk_config, mp->num_clk);
-clk_err:
-	msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
-vreg_err:
-	if (mp->vreg_config)
-		devm_kfree(&pdev->dev, mp->vreg_config);
-	mp->num_vreg = 0;
-parse_vreg_err:
-	if (mp->clk_config)
-		devm_kfree(&pdev->dev, mp->clk_config);
-	mp->num_clk = 0;
-end:
-	return rc;
-}
-
-void sde_power_resource_deinit(struct platform_device *pdev,
-	struct sde_power_handle *phandle)
-{
-	struct dss_module_power *mp;
-	struct sde_power_client *curr_client, *next_client;
-	struct sde_power_event *curr_event, *next_event;
-	int i;
-
-	if (!phandle || !pdev) {
-		pr_err("invalid input param\n");
-		return;
-	}
-	mp = &phandle->mp;
-
-	mutex_lock(&phandle->phandle_lock);
-	list_for_each_entry_safe(curr_client, next_client,
-			&phandle->power_client_clist, list) {
-		pr_err("cliend:%s-%d still registered with refcount:%d\n",
-				curr_client->name, curr_client->id,
-				curr_client->refcount);
-		curr_client->active = false;
-		list_del(&curr_client->list);
-	}
-
-	list_for_each_entry_safe(curr_event, next_event,
-			&phandle->event_list, list) {
-		pr_err("event:%d, client:%s still registered\n",
-				curr_event->event_type,
-				curr_event->client_name);
-		curr_event->active = false;
-		list_del(&curr_event->list);
-	}
-	mutex_unlock(&phandle->phandle_lock);
-
-	if (phandle->dss_cx_ipeak)
-		cx_ipeak_unregister(phandle->dss_cx_ipeak);
-
-	for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
-		sde_power_data_bus_unregister(&phandle->data_bus_handle[i]);
-
-	sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
-
-	msm_dss_put_clk(mp->clk_config, mp->num_clk);
-
-	msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
-
-	if (mp->clk_config)
-		devm_kfree(&pdev->dev, mp->clk_config);
-
-	if (mp->vreg_config)
-		devm_kfree(&pdev->dev, mp->vreg_config);
-
-	mp->num_vreg = 0;
-	mp->num_clk = 0;
-
-	if (phandle->rsc_client)
-		sde_rsc_client_destroy(phandle->rsc_client);
-}
-
-
-int sde_power_scale_reg_bus(struct sde_power_handle *phandle,
-	struct sde_power_client *pclient, u32 usecase_ndx, bool skip_lock)
-{
-	struct sde_power_client *client;
-	int rc = 0;
-	u32 max_usecase_ndx = VOTE_INDEX_DISABLE;
-
-	if (!skip_lock) {
-		mutex_lock(&phandle->phandle_lock);
-
-		if (WARN_ON(pclient->refcount == 0)) {
-			/*
-			 * This is not expected, clients calling without skip
-			 * lock are outside the power resource enable, which
-			 * means that they should have enabled the power
-			 * resource before trying to scale.
-			 */
-			rc = -EINVAL;
-			goto exit;
-		}
-	}
-
-	pr_debug("%pS: current idx:%d requested:%d client:%d\n",
-		__builtin_return_address(0), pclient->usecase_ndx,
-		usecase_ndx, pclient->id);
-
-	pclient->usecase_ndx = usecase_ndx;
-
-	list_for_each_entry(client, &phandle->power_client_clist, list) {
-		if (client->usecase_ndx < VOTE_INDEX_MAX &&
-		    client->usecase_ndx > max_usecase_ndx)
-			max_usecase_ndx = client->usecase_ndx;
-	}
-
-	rc = sde_power_reg_bus_update(phandle->reg_bus_hdl,
-						max_usecase_ndx);
-	if (rc)
-		pr_err("failed to set reg bus vote rc=%d\n", rc);
-
-exit:
-	if (!skip_lock)
-		mutex_unlock(&phandle->phandle_lock);
-
-	return rc;
-}
-
-static inline bool _resource_changed(u32 current_usecase_ndx,
-		u32 max_usecase_ndx)
-{
-	WARN_ON((current_usecase_ndx >= VOTE_INDEX_MAX)
-		|| (max_usecase_ndx >= VOTE_INDEX_MAX));
-
-	if (((current_usecase_ndx >= VOTE_INDEX_LOW) && /* enabled */
-		(max_usecase_ndx == VOTE_INDEX_DISABLE)) || /* max disabled */
-		((current_usecase_ndx == VOTE_INDEX_DISABLE) && /* disabled */
-		(max_usecase_ndx >= VOTE_INDEX_LOW))) /* max enabled */
-		return true;
-
-	return false;
-}
-
-int sde_power_resource_enable(struct sde_power_handle *phandle,
-	struct sde_power_client *pclient, bool enable)
-{
-	int rc = 0, i;
-	bool changed = false;
-	u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
-	struct sde_power_client *client;
-	struct dss_module_power *mp;
-
-	if (!phandle || !pclient) {
-		pr_err("invalid input argument\n");
-		return -EINVAL;
-	}
-
-	mp = &phandle->mp;
-
-	mutex_lock(&phandle->phandle_lock);
-	if (enable)
-		pclient->refcount++;
-	else if (pclient->refcount)
-		pclient->refcount--;
-
-	if (pclient->refcount)
-		pclient->usecase_ndx = VOTE_INDEX_LOW;
-	else
-		pclient->usecase_ndx = VOTE_INDEX_DISABLE;
-
-	list_for_each_entry(client, &phandle->power_client_clist, list) {
-		if (client->usecase_ndx < VOTE_INDEX_MAX &&
-		    client->usecase_ndx > max_usecase_ndx)
-			max_usecase_ndx = client->usecase_ndx;
-	}
-
-	/*
-	 * Check if we need to enable/disable the power resource, we won't
-	 * only-scale up/down the AHB vote in this API; if a client wants to
-	 * bump up the AHB clock above the LOW (default) level, it needs to
-	 * call 'sde_power_scale_reg_bus' with the desired vote after the power
-	 * resource was enabled.
-	 */
-	if (_resource_changed(phandle->current_usecase_ndx,
-			max_usecase_ndx)) {
-		changed = true;
-		prev_usecase_ndx = phandle->current_usecase_ndx;
-		phandle->current_usecase_ndx = max_usecase_ndx;
-	}
-
-	pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
-		__builtin_return_address(0), changed, max_usecase_ndx,
-		pclient->name, pclient->id, enable, pclient->refcount);
-
-	if (!changed)
-		goto end;
-
-	SDE_ATRACE_BEGIN("sde_power_resource_enable");
-
-	/* RSC client init */
-	sde_power_rsc_client_init(phandle);
-
-	if (enable) {
-		sde_power_event_trigger_locked(phandle,
-				SDE_POWER_EVENT_PRE_ENABLE);
-
-		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
-			rc = sde_power_data_bus_update(
-					&phandle->data_bus_handle[i], enable);
-			if (rc) {
-				pr_err("failed to set data bus vote id=%d rc=%d\n",
-						i, rc);
-				goto data_bus_hdl_err;
-			}
-		}
-		rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
-				enable);
-		if (rc) {
-			pr_err("failed to enable vregs rc=%d\n", rc);
-			goto vreg_err;
-		}
-
-		rc = sde_power_scale_reg_bus(phandle, pclient,
-				max_usecase_ndx, true);
-		if (rc) {
-			pr_err("failed to set reg bus vote rc=%d\n", rc);
-			goto reg_bus_hdl_err;
-		}
-
-		SDE_EVT32_VERBOSE(enable, SDE_EVTLOG_FUNC_CASE1);
-		rc = sde_power_rsc_update(phandle, true);
-		if (rc) {
-			pr_err("failed to update rsc\n");
-			goto rsc_err;
-		}
-
-		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
-		if (rc) {
-			pr_err("clock enable failed rc:%d\n", rc);
-			goto clk_err;
-		}
-
-		sde_power_event_trigger_locked(phandle,
-				SDE_POWER_EVENT_POST_ENABLE);
-
-	} else {
-		sde_power_event_trigger_locked(phandle,
-				SDE_POWER_EVENT_PRE_DISABLE);
-
-		SDE_EVT32_VERBOSE(enable, SDE_EVTLOG_FUNC_CASE2);
-		sde_power_rsc_update(phandle, false);
-
-		msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
-
-		sde_power_scale_reg_bus(phandle, pclient,
-				max_usecase_ndx, true);
-
-		msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
-
-		for (i = 0 ; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
-			sde_power_data_bus_update(&phandle->data_bus_handle[i],
-					enable);
-
-		sde_power_event_trigger_locked(phandle,
-				SDE_POWER_EVENT_POST_DISABLE);
-	}
-
-end:
-	SDE_EVT32_VERBOSE(enable, SDE_EVTLOG_FUNC_EXIT);
-	mutex_unlock(&phandle->phandle_lock);
-	SDE_ATRACE_END("sde_power_resource_enable");
-	return rc;
-
-clk_err:
-	sde_power_rsc_update(phandle, false);
-rsc_err:
-	sde_power_scale_reg_bus(phandle, pclient, max_usecase_ndx, true);
-reg_bus_hdl_err:
-	msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
-vreg_err:
-	for (i = 0 ; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
-		sde_power_data_bus_update(&phandle->data_bus_handle[i], 0);
-data_bus_hdl_err:
-	phandle->current_usecase_ndx = prev_usecase_ndx;
-	mutex_unlock(&phandle->phandle_lock);
-	SDE_ATRACE_END("sde_power_resource_enable");
-	return rc;
-}
-
-int sde_power_resource_is_enabled(struct sde_power_handle *phandle)
-{
-	if (!phandle) {
-		pr_err("invalid input argument\n");
-		return false;
-	}
-
-	return phandle->current_usecase_ndx != VOTE_INDEX_DISABLE;
-}
-
-int sde_cx_ipeak_vote(struct sde_power_handle *phandle, struct dss_clk *clock,
-		u64 requested_clk_rate, u64 prev_clk_rate, bool enable_vote)
-{
-	int ret = 0;
-	u64 curr_core_clk_rate, max_core_clk_rate, prev_core_clk_rate;
-
-	if (!phandle->dss_cx_ipeak) {
-		pr_debug("%pS->%s: Invalid input\n",
-				__builtin_return_address(0), __func__);
-		return -EOPNOTSUPP;
-	}
-
-	if (strcmp("core_clk", clock->clk_name)) {
-		pr_debug("Not a core clk , cx_ipeak vote not needed\n");
-		return -EOPNOTSUPP;
-	}
-
-	curr_core_clk_rate = clock->rate;
-	max_core_clk_rate = clock->max_rate;
-	prev_core_clk_rate = prev_clk_rate;
-
-	if (enable_vote && requested_clk_rate == max_core_clk_rate &&
-				curr_core_clk_rate != requested_clk_rate)
-		ret = cx_ipeak_update(phandle->dss_cx_ipeak, true);
-	else if (!enable_vote && requested_clk_rate != max_core_clk_rate &&
-				prev_core_clk_rate == max_core_clk_rate)
-		ret = cx_ipeak_update(phandle->dss_cx_ipeak, false);
-
-	if (ret)
-		SDE_EVT32(ret, enable_vote, requested_clk_rate,
-					curr_core_clk_rate, prev_core_clk_rate);
-
-	return ret;
-}
-
-int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name,
-	u64 rate)
-{
-	int i, rc = -EINVAL;
-	struct dss_module_power *mp;
-	u64 prev_clk_rate, requested_clk_rate;
-
-	if (!phandle) {
-		pr_err("invalid input power handle\n");
-		return -EINVAL;
-	}
-	mp = &phandle->mp;
-
-	for (i = 0; i < mp->num_clk; i++) {
-		if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
-			if (mp->clk_config[i].max_rate &&
-					(rate > mp->clk_config[i].max_rate))
-				rate = mp->clk_config[i].max_rate;
-
-			prev_clk_rate = mp->clk_config[i].rate;
-			requested_clk_rate = rate;
-			sde_cx_ipeak_vote(phandle, &mp->clk_config[i],
-				requested_clk_rate, prev_clk_rate, true);
-			mp->clk_config[i].rate = rate;
-			rc = msm_dss_single_clk_set_rate(&mp->clk_config[i]);
-			if (!rc)
-				sde_cx_ipeak_vote(phandle, &mp->clk_config[i],
-				   requested_clk_rate, prev_clk_rate, false);
-			break;
-		}
-	}
-
-	return rc;
-}
-
-u64 sde_power_clk_get_rate(struct sde_power_handle *phandle, char *clock_name)
-{
-	int i;
-	struct dss_module_power *mp;
-	u64 rate = -EINVAL;
-
-	if (!phandle) {
-		pr_err("invalid input power handle\n");
-		return -EINVAL;
-	}
-	mp = &phandle->mp;
-
-	for (i = 0; i < mp->num_clk; i++) {
-		if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
-			rate = clk_get_rate(mp->clk_config[i].clk);
-			break;
-		}
-	}
-
-	return rate;
-}
-
-u64 sde_power_clk_get_max_rate(struct sde_power_handle *phandle,
-		char *clock_name)
-{
-	int i;
-	struct dss_module_power *mp;
-	u64 rate = 0;
-
-	if (!phandle) {
-		pr_err("invalid input power handle\n");
-		return 0;
-	}
-	mp = &phandle->mp;
-
-	for (i = 0; i < mp->num_clk; i++) {
-		if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
-			rate = mp->clk_config[i].max_rate;
-			break;
-		}
-	}
-
-	return rate;
-}
-
-struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle,
-		char *clock_name)
-{
-	int i;
-	struct dss_module_power *mp;
-	struct clk *clk = NULL;
-
-	if (!phandle) {
-		pr_err("invalid input power handle\n");
-		return 0;
-	}
-	mp = &phandle->mp;
-
-	for (i = 0; i < mp->num_clk; i++) {
-		if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
-			clk = mp->clk_config[i].clk;
-			break;
-		}
-	}
-
-	return clk;
-}
-
-int sde_power_clk_set_flags(struct sde_power_handle *phandle,
-		char *clock_name, unsigned long flags)
-{
-	struct clk *clk;
-
-	if (!phandle) {
-		pr_err("invalid input power handle\n");
-		return -EINVAL;
-	}
-
-	if (!clock_name) {
-		pr_err("invalid input clock name\n");
-		return -EINVAL;
-	}
-
-	clk = sde_power_clk_get_clk(phandle, clock_name);
-	if (!clk) {
-		pr_err("get_clk failed for clk: %s\n", clock_name);
-		return -EINVAL;
-	}
-
-	return clk_set_flags(clk, flags);
-}
-
-struct sde_power_event *sde_power_handle_register_event(
-		struct sde_power_handle *phandle,
-		u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
-		void *usr, char *client_name)
-{
-	struct sde_power_event *event;
-
-	if (!phandle) {
-		pr_err("invalid power handle\n");
-		return ERR_PTR(-EINVAL);
-	} else if (!cb_fnc || !event_type) {
-		pr_err("no callback fnc or event type\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	event = kzalloc(sizeof(struct sde_power_event), GFP_KERNEL);
-	if (!event)
-		return ERR_PTR(-ENOMEM);
-
-	event->event_type = event_type;
-	event->cb_fnc = cb_fnc;
-	event->usr = usr;
-	strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
-	event->active = true;
-
-	mutex_lock(&phandle->phandle_lock);
-	list_add(&event->list, &phandle->event_list);
-	mutex_unlock(&phandle->phandle_lock);
-
-	return event;
-}
-
-void sde_power_handle_unregister_event(
-		struct sde_power_handle *phandle,
-		struct sde_power_event *event)
-{
-	if (!phandle || !event) {
-		pr_err("invalid phandle or event\n");
-	} else if (!event->active) {
-		pr_err("power handle deinit already done\n");
-		kfree(event);
-	} else {
-		mutex_lock(&phandle->phandle_lock);
-		list_del_init(&event->list);
-		mutex_unlock(&phandle->phandle_lock);
-		kfree(event);
-	}
-}
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
deleted file mode 100644
index d14441c..0000000
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ /dev/null
@@ -1,369 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_POWER_HANDLE_H_
-#define _SDE_POWER_HANDLE_H_
-
-#define MAX_CLIENT_NAME_LEN 128
-
-#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	0
-#define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
-#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA		400000000
-#define SDE_POWER_HANDLE_ENABLE_NRT_BUS_IB_QUOTA	0
-#define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
-
-#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA	3000000000
-#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA	3000000000
-
-#include <linux/sde_io_util.h>
-#include <soc/qcom/cx_ipeak.h>
-
-/* event will be triggered before power handler disable */
-#define SDE_POWER_EVENT_PRE_DISABLE	0x1
-
-/* event will be triggered after power handler disable */
-#define SDE_POWER_EVENT_POST_DISABLE	0x2
-
-/* event will be triggered before power handler enable */
-#define SDE_POWER_EVENT_PRE_ENABLE	0x4
-
-/* event will be triggered after power handler enable */
-#define SDE_POWER_EVENT_POST_ENABLE	0x8
-
-/**
- * mdss_bus_vote_type: register bus vote type
- * VOTE_INDEX_DISABLE: removes the client vote
- * VOTE_INDEX_LOW: keeps the lowest vote for register bus
- * VOTE_INDEX_MEDIUM: keeps medium vote for register bus
- * VOTE_INDEX_HIGH: keeps the highest vote for register bus
- * VOTE_INDEX_MAX: invalid
- */
-enum mdss_bus_vote_type {
-	VOTE_INDEX_DISABLE,
-	VOTE_INDEX_LOW,
-	VOTE_INDEX_MEDIUM,
-	VOTE_INDEX_HIGH,
-	VOTE_INDEX_MAX,
-};
-
-/**
- * enum sde_power_handle_data_bus_client - type of axi bus clients
- * @SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
- * @SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
- * @SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
- */
-enum sde_power_handle_data_bus_client {
-	SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
-	SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
-	SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX
-};
-
-/**
- * enum SDE_POWER_HANDLE_DBUS_ID - data bus identifier
- * @SDE_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
- * @SDE_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
- * @SDE_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
- */
-enum SDE_POWER_HANDLE_DBUS_ID {
-	SDE_POWER_HANDLE_DBUS_ID_MNOC,
-	SDE_POWER_HANDLE_DBUS_ID_LLCC,
-	SDE_POWER_HANDLE_DBUS_ID_EBI,
-	SDE_POWER_HANDLE_DBUS_ID_MAX,
-};
-
-/**
- * struct sde_power_client: stores the power client for sde driver
- * @name:	name of the client
- * @usecase_ndx: current regs bus vote type
- * @refcount:	current refcount if multiple modules are using same
- *              same client for enable/disable. Power module will
- *              aggregate the refcount and vote accordingly for this
- *              client.
- * @id:		assigned during create. helps for debugging.
- * @list:	list to attach power handle master list
- * @ab:         arbitrated bandwidth for each bus client
- * @ib:         instantaneous bandwidth for each bus client
- * @active:	inidcates the state of sde power handle
- */
-struct sde_power_client {
-	char name[MAX_CLIENT_NAME_LEN];
-	short usecase_ndx;
-	short refcount;
-	u32 id;
-	struct list_head list;
-	u64 ab[SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
-	u64 ib[SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
-	bool active;
-};
-
-/**
- * struct sde_power_data_handle: power handle struct for data bus
- * @data_bus_scale_table: pointer to bus scaling table
- * @data_bus_hdl: current data bus handle
- * @data_paths_cnt: number of rt data path ports
- * @nrt_data_paths_cnt: number of nrt data path ports
- * @bus_channels: number of memory bus channels
- * @curr_bw_uc_idx: current use case index of data bus
- * @ao_bw_uc_idx: active only use case index of data bus
- * @ab_rt: realtime ab quota
- * @ib_rt: realtime ib quota
- * @ab_nrt: non-realtime ab quota
- * @ib_nrt: non-realtime ib quota
- * @enable: true if bus is enabled
- */
-struct sde_power_data_bus_handle {
-	struct msm_bus_scale_pdata *data_bus_scale_table;
-	u32 data_bus_hdl;
-	u32 data_paths_cnt;
-	u32 nrt_data_paths_cnt;
-	u32 bus_channels;
-	u32 curr_bw_uc_idx;
-	u32 ao_bw_uc_idx;
-	u64 ab_rt;
-	u64 ib_rt;
-	u64 ab_nrt;
-	u64 ib_nrt;
-	bool enable;
-};
-
-/*
- * struct sde_power_event - local event registration structure
- * @client_name: name of the client registering
- * @cb_fnc: pointer to desired callback function
- * @usr: user pointer to pass to callback event trigger
- * @event: refer to SDE_POWER_HANDLE_EVENT_*
- * @list: list to attach event master list
- * @active: indicates the state of sde power handle
- */
-struct sde_power_event {
-	char client_name[MAX_CLIENT_NAME_LEN];
-	void (*cb_fnc)(u32 event_type, void *usr);
-	void *usr;
-	u32 event_type;
-	struct list_head list;
-	bool active;
-};
-
-/**
- * struct sde_power_handle: power handle main struct
- * @mp:		module power for clock and regulator
- * @client_clist: master list to store all clients
- * @phandle_lock: lock to synchronize the enable/disable
- * @dev: pointer to device structure
- * @usecase_ndx: current usecase index
- * @reg_bus_hdl: current register bus handle
- * @data_bus_handle: context structure for data bus control
- * @event_list: current power handle event list
- * @rsc_client: sde rsc client pointer
- * @rsc_client_init: boolean to control rsc client create
- * @dss_cx_ipeak: client pointer for cx ipeak driver
- */
-struct sde_power_handle {
-	struct dss_module_power mp;
-	struct list_head power_client_clist;
-	struct mutex phandle_lock;
-	struct device *dev;
-	u32 current_usecase_ndx;
-	u32 reg_bus_hdl;
-	struct sde_power_data_bus_handle data_bus_handle
-		[SDE_POWER_HANDLE_DBUS_ID_MAX];
-	struct list_head event_list;
-	struct sde_rsc_client *rsc_client;
-	bool rsc_client_init;
-	struct cx_ipeak_client *dss_cx_ipeak;
-};
-
-/**
- * sde_power_resource_init() - initializes the sde power handle
- * @pdev:   platform device to search the power resources
- * @pdata:  power handle to store the power resources
- *
- * Return: error code.
- */
-int sde_power_resource_init(struct platform_device *pdev,
-	struct sde_power_handle *pdata);
-
-/**
- * sde_power_resource_deinit() - release the sde power handle
- * @pdev:   platform device for power resources
- * @pdata:  power handle containing the resources
- *
- * Return: error code.
- */
-void sde_power_resource_deinit(struct platform_device *pdev,
-	struct sde_power_handle *pdata);
-
-/**
- * sde_power_client_create() - create the client on power handle
- * @pdata:  power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: error code.
- */
-struct sde_power_client *sde_power_client_create(struct sde_power_handle *pdata,
-	char *client_name);
-
-/**
- * sde_power_client_destroy() - destroy the client on power handle
- * @pdata:  power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: none
- */
-void sde_power_client_destroy(struct sde_power_handle *phandle,
-	struct sde_power_client *client);
-
-/**
- * sde_power_resource_enable() - enable/disable the power resources
- * @pdata:  power handle containing the resources
- * @client: client information to enable/disable its vote
- * @enable: boolean request for enable/disable
- *
- * Return: error code.
- */
-int sde_power_resource_enable(struct sde_power_handle *pdata,
-	struct sde_power_client *pclient, bool enable);
-
-/**
- * sde_power_scale_reg_bus() - Scale the registers bus for the specified client
- * @phandle:  power handle containing the resources
- * @pclient: client information to scale its vote
- * @usecase_ndx: new use case to scale the reg bus
- * @skip_lock: will skip holding the power rsrc mutex during the call, this is
- *		for internal callers that already hold this required lock.
- *
- * Return: error code.
- */
-int sde_power_scale_reg_bus(struct sde_power_handle *phandle,
-	struct sde_power_client *pclient, u32 usecase_ndx, bool skip_lock);
-
-/**
- * sde_power_resource_is_enabled() - return true if power resource is enabled
- * @pdata:  power handle containing the resources
- *
- * Return: true if enabled; false otherwise
- */
-int sde_power_resource_is_enabled(struct sde_power_handle *pdata);
-
-/**
- * sde_power_data_bus_state_update() - update data bus state
- * @pdata:  power handle containing the resources
- * @enable: take enable vs disable path
- *
- * Return: error code.
- */
-int sde_power_data_bus_state_update(struct sde_power_handle *phandle,
-							bool enable);
-
-/**
- * sde_power_clk_set_rate() - set the clock rate
- * @pdata:  power handle containing the resources
- * @clock_name: clock name which needs rate update.
- * @rate:       Requested rate.
- *
- * Return: error code.
- */
-int sde_power_clk_set_rate(struct sde_power_handle *pdata, char *clock_name,
-	u64 rate);
-
-/**
- * sde_power_clk_get_rate() - get the clock rate
- * @pdata:  power handle containing the resources
- * @clock_name: clock name to get the rate
- *
- * Return: current clock rate
- */
-u64 sde_power_clk_get_rate(struct sde_power_handle *pdata, char *clock_name);
-
-/**
- * sde_power_clk_get_max_rate() - get the maximum clock rate
- * @pdata:  power handle containing the resources
- * @clock_name: clock name to get the max rate.
- *
- * Return: maximum clock rate or 0 if not found.
- */
-u64 sde_power_clk_get_max_rate(struct sde_power_handle *pdata,
-		char *clock_name);
-
-/**
- * sde_power_clk_get_clk() - get the clock
- * @pdata:  power handle containing the resources
- * @clock_name: clock name to get the clk pointer.
- *
- * Return: Pointer to clock
- */
-struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle,
-		char *clock_name);
-
-/**
- * sde_power_clk_set_flags() - set the clock flags
- * @pdata:  power handle containing the resources
- * @clock_name: clock name to get the clk pointer.
- * @flags: flags to set
- *
- * Return: error code.
- */
-int sde_power_clk_set_flags(struct sde_power_handle *pdata,
-		char *clock_name, unsigned long flags);
-
-/**
- * sde_power_data_bus_set_quota() - set data bus quota for power client
- * @phandle:  power handle containing the resources
- * @client: client information to set quota
- * @bus_client: real-time or non-real-time bus client
- * @bus_id: identifier of data bus, see SDE_POWER_HANDLE_DBUS_ID
- * @ab_quota: arbitrated bus bandwidth
- * @ib_quota: instantaneous bus bandwidth
- *
- * Return: zero if success, or error code otherwise
- */
-int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
-		struct sde_power_client *pclient,
-		int bus_client, u32 bus_id,
-		u64 ab_quota, u64 ib_quota);
-
-/**
- * sde_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
- * @phandle:  power handle containing the resources
- * @client: client information to bandwidth control
- * @enable: true to enable bandwidth for data base
- *
- * Return: none
- */
-void sde_power_data_bus_bandwidth_ctrl(struct sde_power_handle *phandle,
-		struct sde_power_client *pclient, int enable);
-
-/**
- * sde_power_handle_register_event - register a callback function for an event.
- *	Clients can register for multiple events with a single register.
- *	Any block with access to phandle can register for the event
- *	notification.
- * @phandle:	power handle containing the resources
- * @event_type:	event type to register; refer SDE_POWER_HANDLE_EVENT_*
- * @cb_fnc:	pointer to desired callback function
- * @usr:	user pointer to pass to callback on event trigger
- *
- * Return:	event pointer if success, or error code otherwise
- */
-struct sde_power_event *sde_power_handle_register_event(
-		struct sde_power_handle *phandle,
-		u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
-		void *usr, char *client_name);
-/**
- * sde_power_handle_unregister_event - unregister callback for event(s)
- * @phandle:	power handle containing the resources
- * @event:	event pointer returned after power handle register
- */
-void sde_power_handle_unregister_event(struct sde_power_handle *phandle,
-		struct sde_power_event *event);
-
-/**
- * sde_power_handle_get_dbus_name - get name of given data bus identifier
- * @bus_id:	data bus identifier
- * Return:	Pointer to name string if success; NULL otherwise
- */
-const char *sde_power_handle_get_dbus_name(u32 bus_id);
-
-#endif /* _SDE_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
deleted file mode 100644
index cf218c6..0000000
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ /dev/null
@@ -1,1671 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[sde_rsc:%s:%d]: " fmt, __func__, __LINE__
-
-#include <linux/kernel.h>
-#include <linux/debugfs.h>
-#include <linux/of.h>
-#include <linux/string.h>
-#include <linux/of_address.h>
-#include <linux/component.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/of_platform.h>
-#include <linux/module.h>
-
-#include <soc/qcom/rpmh.h>
-#include <drm/drmP.h>
-#include <drm/drm_irq.h>
-#include "sde_rsc_priv.h"
-#include "sde_dbg.h"
-
-#define SDE_RSC_DRV_DBG_NAME		"sde_rsc_drv"
-#define SDE_RSC_WRAPPER_DBG_NAME	"sde_rsc_wrapper"
-
-#define SINGLE_TCS_EXECUTION_TIME_V1	1064000
-#define SINGLE_TCS_EXECUTION_TIME_V2	930000
-
-#define RSC_MODE_INSTRUCTION_TIME	100
-#define RSC_MODE_THRESHOLD_OVERHEAD	2700
-
-/**
- * rsc_min_threshold will be set to MIN_THRESHOLD_OVERHEAD_TIME which
- * takes into account back off time + overhead from RSC/RSC_WRAPPER. The
- * overhead buffer time is required to be greater than 14. For measure,
- * this value assumes 18.
- */
-#define MIN_THRESHOLD_OVERHEAD_TIME	18
-
-#define DEFAULT_PANEL_FPS		60
-#define DEFAULT_PANEL_JITTER_NUMERATOR	2
-#define DEFAULT_PANEL_JITTER_DENOMINATOR 1
-#define DEFAULT_PANEL_PREFILL_LINES	25
-#define DEFAULT_PANEL_VTOTAL		(480 + DEFAULT_PANEL_PREFILL_LINES)
-#define TICKS_IN_NANO_SECOND		1000000000
-
-#define MAX_BUFFER_SIZE 256
-
-#define CMD_MODE_SWITCH_SUCCESS		0xFFFF
-#define VID_MODE_SWITCH_SUCCESS		0xFFFE
-#define CLK_MODE_SWITCH_SUCCESS		0xFFFD
-#define STATE_UPDATE_NOT_ALLOWED	0xFFFC
-
-/* Primary panel worst case VSYNC expected to be no less than 30fps */
-#define PRIMARY_VBLANK_WORST_CASE_MS 34
-
-#define DEFAULT_PANEL_MIN_V_PREFILL	35
-#define DEFAULT_PANEL_MAX_V_PREFILL	108
-
-static struct sde_rsc_priv *rsc_prv_list[MAX_RSC_COUNT];
-static struct device *rpmh_dev[MAX_RSC_COUNT];
-
-/**
- * sde_rsc_client_create() - create the client for sde rsc.
- * Different displays like DSI, HDMI, DP, WB, etc should call this
- * api to register their vote for rpmh. They still need to vote for
- * power handle to get the clocks.
-
- * @rsc_index:   A client will be created on this RSC. As of now only
- *               SDE_RSC_INDEX is valid rsc index.
- * @name:	 Caller needs to provide some valid string to identify
- *               the client. "primary", "dp", "hdmi" are suggested name.
- * @is_primary:	 Caller needs to provide information if client is primary
- *               or not. Primary client votes will be redirected to
- *               display rsc.
- * @vsync_source: This parameter is only valid for primary display. It provides
- *               vsync source information
- *
- * Return: client node pointer.
- */
-struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index, char *client_name,
-	enum sde_rsc_client_type client_type, u32 vsync_source)
-{
-	struct sde_rsc_client *client;
-	struct sde_rsc_priv *rsc;
-	static int id;
-
-	if (!client_name) {
-		pr_err("client name is null- not supported\n");
-		return ERR_PTR(-EINVAL);
-	} else if (rsc_index >= MAX_RSC_COUNT) {
-		pr_err("invalid rsc index\n");
-		return ERR_PTR(-EINVAL);
-	} else if (!rsc_prv_list[rsc_index]) {
-		pr_debug("rsc not probed yet or not available\n");
-		return NULL;
-	}
-
-	rsc = rsc_prv_list[rsc_index];
-	client = kzalloc(sizeof(struct sde_rsc_client), GFP_KERNEL);
-	if (!client)
-		return ERR_PTR(-ENOMEM);
-
-	mutex_lock(&rsc->client_lock);
-	strlcpy(client->name, client_name, MAX_RSC_CLIENT_NAME_LEN);
-	client->current_state = SDE_RSC_IDLE_STATE;
-	client->rsc_index = rsc_index;
-	client->id = id;
-	client->client_type = client_type;
-	if (client->client_type == SDE_RSC_PRIMARY_DISP_CLIENT) {
-		rsc->primary_client = client;
-		rsc->vsync_source = vsync_source;
-	}
-	pr_debug("client %s rsc index:%d client_type:%d\n", client_name,
-						rsc_index, client->client_type);
-
-	list_add(&client->list, &rsc->client_list);
-	id++;
-	mutex_unlock(&rsc->client_lock);
-
-	return client;
-}
-EXPORT_SYMBOL(sde_rsc_client_create);
-
-/**
- * sde_rsc_client_destroy() - Destroy the sde rsc client.
- *
- * @client:	 Client pointer provided by sde_rsc_client_create().
- *
- * Return: none
- */
-void sde_rsc_client_destroy(struct sde_rsc_client *client)
-{
-	struct sde_rsc_priv *rsc;
-	enum sde_rsc_state state;
-
-	if (!client) {
-		pr_debug("invalid client\n");
-		goto end;
-	} else if (client->rsc_index >= MAX_RSC_COUNT) {
-		pr_err("invalid rsc index\n");
-		goto end;
-	}
-
-	pr_debug("client %s destroyed\n", client->name);
-	rsc = rsc_prv_list[client->rsc_index];
-	if (!rsc)
-		goto end;
-
-	mutex_lock(&rsc->client_lock);
-	state = client->current_state;
-	mutex_unlock(&rsc->client_lock);
-
-	if (state != SDE_RSC_IDLE_STATE) {
-		int wait_vblank_crtc_id;
-
-		sde_rsc_client_state_update(client, SDE_RSC_IDLE_STATE, NULL,
-				SDE_RSC_INVALID_CRTC_ID, &wait_vblank_crtc_id);
-
-		/* if vblank wait required at shutdown, use a simple sleep */
-		if (wait_vblank_crtc_id != SDE_RSC_INVALID_CRTC_ID) {
-			pr_err("unexpected sleep required on crtc %d at rsc client destroy\n",
-					wait_vblank_crtc_id);
-			SDE_EVT32(client->id, state, rsc->current_state,
-					client->crtc_id, wait_vblank_crtc_id,
-					SDE_EVTLOG_ERROR);
-			msleep(PRIMARY_VBLANK_WORST_CASE_MS);
-		}
-	}
-	mutex_lock(&rsc->client_lock);
-	list_del_init(&client->list);
-	mutex_unlock(&rsc->client_lock);
-
-	kfree(client);
-end:
-	return;
-}
-EXPORT_SYMBOL(sde_rsc_client_destroy);
-
-struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type,
-		void (*cb_func)(uint32_t event_type, void *usr), void *usr)
-{
-	struct sde_rsc_event *evt;
-	struct sde_rsc_priv *rsc;
-
-	if (rsc_index >= MAX_RSC_COUNT) {
-		pr_err("invalid rsc index:%d\n", rsc_index);
-		return ERR_PTR(-EINVAL);
-	} else if (!rsc_prv_list[rsc_index]) {
-		pr_err("rsc idx:%d not probed yet or not available\n",
-								rsc_index);
-		return ERR_PTR(-EINVAL);
-	} else if (!cb_func || !event_type) {
-		pr_err("no event or cb func\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	rsc = rsc_prv_list[rsc_index];
-	evt = kzalloc(sizeof(struct sde_rsc_event), GFP_KERNEL);
-	if (!evt)
-		return ERR_PTR(-ENOMEM);
-
-	evt->event_type = event_type;
-	evt->rsc_index = rsc_index;
-	evt->usr = usr;
-	evt->cb_func = cb_func;
-	pr_debug("event register type:%d rsc index:%d\n",
-						event_type, rsc_index);
-
-	mutex_lock(&rsc->client_lock);
-	list_add(&evt->list, &rsc->event_list);
-	mutex_unlock(&rsc->client_lock);
-
-	return evt;
-}
-EXPORT_SYMBOL(sde_rsc_register_event);
-
-void sde_rsc_unregister_event(struct sde_rsc_event *event)
-{
-	struct sde_rsc_priv *rsc;
-
-	if (!event) {
-		pr_debug("invalid event client\n");
-		goto end;
-	} else if (event->rsc_index >= MAX_RSC_COUNT) {
-		pr_err("invalid rsc index\n");
-		goto end;
-	}
-
-	pr_debug("event client destroyed\n");
-	rsc = rsc_prv_list[event->rsc_index];
-	if (!rsc)
-		goto end;
-
-	mutex_lock(&rsc->client_lock);
-	list_del_init(&event->list);
-	mutex_unlock(&rsc->client_lock);
-
-	kfree(event);
-end:
-	return;
-}
-EXPORT_SYMBOL(sde_rsc_unregister_event);
-
-bool is_sde_rsc_available(int rsc_index)
-{
-	if (rsc_index >= MAX_RSC_COUNT) {
-		pr_err("invalid rsc index:%d\n", rsc_index);
-		return false;
-	} else if (!rsc_prv_list[rsc_index]) {
-		pr_debug("rsc idx:%d not probed yet or not available\n",
-								rsc_index);
-		return false;
-	}
-
-	return true;
-}
-EXPORT_SYMBOL(is_sde_rsc_available);
-
-enum sde_rsc_state get_sde_rsc_current_state(int rsc_index)
-{
-	struct sde_rsc_priv *rsc;
-
-	if (rsc_index >= MAX_RSC_COUNT) {
-		pr_err("invalid rsc index:%d\n", rsc_index);
-		return SDE_RSC_IDLE_STATE;
-	} else if (!rsc_prv_list[rsc_index]) {
-		pr_err("rsc idx:%d not probed yet or not available\n",
-								rsc_index);
-		return SDE_RSC_IDLE_STATE;
-	}
-
-	rsc = rsc_prv_list[rsc_index];
-	return rsc->current_state;
-}
-EXPORT_SYMBOL(get_sde_rsc_current_state);
-
-static int sde_rsc_clk_enable(struct sde_power_handle *phandle,
-	struct sde_power_client *pclient, bool enable)
-{
-	int rc = 0;
-	struct dss_module_power *mp;
-
-	if (!phandle || !pclient) {
-		pr_err("invalid input argument\n");
-		return -EINVAL;
-	}
-
-	mp = &phandle->mp;
-
-	if (enable)
-		pclient->refcount++;
-	else if (pclient->refcount)
-		pclient->refcount--;
-
-	if (pclient->refcount)
-		pclient->usecase_ndx = VOTE_INDEX_LOW;
-	else
-		pclient->usecase_ndx = VOTE_INDEX_DISABLE;
-
-	if (phandle->current_usecase_ndx == pclient->usecase_ndx)
-		goto end;
-
-	if (enable) {
-		rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
-				enable);
-		if (rc) {
-			pr_err("failed to enable vregs rc=%d\n", rc);
-			goto end;
-		}
-
-		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
-		if (rc) {
-			pr_err("clock enable failed rc:%d\n", rc);
-			msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
-					!enable);
-			goto end;
-		}
-	} else {
-		msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
-
-		rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
-				enable);
-		if (rc) {
-			pr_err("failed to disable vregs rc=%d\n", rc);
-			goto end;
-		}
-	}
-
-	phandle->current_usecase_ndx = pclient->usecase_ndx;
-
-end:
-	return rc;
-}
-
-static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
-	struct sde_rsc_cmd_config *cmd_config, enum sde_rsc_state state)
-{
-	const u32 cxo_period_ns = 52;
-	u64 rsc_backoff_time_ns = rsc->backoff_time_ns;
-	u64 rsc_mode_threshold_time_ns = rsc->mode_threshold_time_ns;
-	u64 rsc_time_slot_0_ns = rsc->time_slot_0_ns;
-	u64 rsc_time_slot_1_ns;
-	const u64 pdc_jitter = 20; /* 20% more */
-
-	u64 frame_time_ns, frame_jitter;
-	u64 line_time_ns, prefill_time_ns;
-	u64 pdc_backoff_time_ns;
-	s64 total;
-	int ret = 0;
-
-	if (cmd_config)
-		memcpy(&rsc->cmd_config, cmd_config, sizeof(*cmd_config));
-
-	/* calculate for 640x480 60 fps resolution by default */
-	if (!rsc->cmd_config.fps)
-		rsc->cmd_config.fps = DEFAULT_PANEL_FPS;
-	if (!rsc->cmd_config.jitter_numer)
-		rsc->cmd_config.jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR;
-	if (!rsc->cmd_config.jitter_denom)
-		rsc->cmd_config.jitter_denom = DEFAULT_PANEL_JITTER_DENOMINATOR;
-	if (!rsc->cmd_config.vtotal)
-		rsc->cmd_config.vtotal = DEFAULT_PANEL_VTOTAL;
-	if (!rsc->cmd_config.prefill_lines)
-		rsc->cmd_config.prefill_lines = DEFAULT_PANEL_PREFILL_LINES;
-	if (rsc->cmd_config.prefill_lines > DEFAULT_PANEL_MAX_V_PREFILL)
-		rsc->cmd_config.prefill_lines = DEFAULT_PANEL_MAX_V_PREFILL;
-	if (rsc->cmd_config.prefill_lines < DEFAULT_PANEL_MIN_V_PREFILL)
-		rsc->cmd_config.prefill_lines = DEFAULT_PANEL_MIN_V_PREFILL;
-	pr_debug("frame fps:%d jitter_numer:%d jitter_denom:%d vtotal:%d prefill lines:%d\n",
-		rsc->cmd_config.fps, rsc->cmd_config.jitter_numer,
-		rsc->cmd_config.jitter_denom, rsc->cmd_config.vtotal,
-		rsc->cmd_config.prefill_lines);
-
-	/* 1 nano second */
-	frame_time_ns = TICKS_IN_NANO_SECOND;
-	frame_time_ns = div_u64(frame_time_ns, rsc->cmd_config.fps);
-
-	frame_jitter = frame_time_ns * rsc->cmd_config.jitter_numer;
-	frame_jitter = div_u64(frame_jitter, rsc->cmd_config.jitter_denom);
-	/* convert it to percentage */
-	frame_jitter = div_u64(frame_jitter, 100);
-
-	line_time_ns = frame_time_ns;
-	line_time_ns = div_u64(line_time_ns, rsc->cmd_config.vtotal);
-	prefill_time_ns = line_time_ns * rsc->cmd_config.prefill_lines;
-
-	/* only take jitter into account for CMD mode */
-	if (state == SDE_RSC_CMD_STATE)
-		total = frame_time_ns - frame_jitter - prefill_time_ns;
-	else
-		total = frame_time_ns - prefill_time_ns;
-
-	if (total < 0) {
-		pr_err("invalid total time period time:%llu jiter_time:%llu blanking time:%llu\n",
-			frame_time_ns, frame_jitter, prefill_time_ns);
-		total = 0;
-	}
-
-	total = div_u64(total, cxo_period_ns);
-	rsc->timer_config.static_wakeup_time_ns = total;
-
-	pr_debug("frame time:%llu frame jiter_time:%llu\n",
-			frame_time_ns, frame_jitter);
-	pr_debug("line time:%llu prefill time ps:%llu\n",
-			line_time_ns, prefill_time_ns);
-	pr_debug("static wakeup time:%lld cxo:%u\n", total, cxo_period_ns);
-
-	pdc_backoff_time_ns = rsc_backoff_time_ns;
-	rsc_backoff_time_ns = div_u64(rsc_backoff_time_ns, cxo_period_ns);
-	rsc->timer_config.rsc_backoff_time_ns = (u32) rsc_backoff_time_ns;
-
-	pdc_backoff_time_ns *= pdc_jitter;
-	pdc_backoff_time_ns = div_u64(pdc_backoff_time_ns, 100);
-	rsc->timer_config.pdc_backoff_time_ns = (u32) pdc_backoff_time_ns;
-
-	rsc_mode_threshold_time_ns =
-			div_u64(rsc_mode_threshold_time_ns, cxo_period_ns);
-	rsc->timer_config.rsc_mode_threshold_time_ns
-					= (u32) rsc_mode_threshold_time_ns;
-
-	/* time_slot_0 for mode0 latency */
-	rsc_time_slot_0_ns = div_u64(rsc_time_slot_0_ns, cxo_period_ns);
-	rsc->timer_config.rsc_time_slot_0_ns = (u32) rsc_time_slot_0_ns;
-
-	/* time_slot_1 for mode1 latency */
-	rsc_time_slot_1_ns = frame_time_ns;
-	rsc_time_slot_1_ns = div_u64(rsc_time_slot_1_ns, cxo_period_ns);
-	rsc->timer_config.rsc_time_slot_1_ns = (u32) rsc_time_slot_1_ns;
-
-	/* mode 2 is infinite */
-	rsc->timer_config.rsc_time_slot_2_ns = 0xFFFFFFFF;
-
-	rsc->timer_config.min_threshold_time_ns = MIN_THRESHOLD_OVERHEAD_TIME;
-	rsc->timer_config.bwi_threshold_time_ns =
-		rsc->timer_config.rsc_time_slot_0_ns;
-
-	/* timer update should be called with client call */
-	if (cmd_config && rsc->hw_ops.timer_update) {
-		ret = rsc->hw_ops.timer_update(rsc);
-		if (ret)
-			pr_err("sde rsc: hw timer update failed ret:%d\n", ret);
-	/* rsc init should be called during rsc probe - one time only */
-	} else if (rsc->hw_ops.init) {
-		ret = rsc->hw_ops.init(rsc);
-		if (ret)
-			pr_err("sde rsc: hw init failed ret:%d\n", ret);
-	}
-
-	return ret;
-}
-
-static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
-	struct sde_rsc_cmd_config *config,
-	struct sde_rsc_client *caller_client,
-	int *wait_vblank_crtc_id)
-{
-	struct sde_rsc_client *client;
-	int rc = STATE_UPDATE_NOT_ALLOWED;
-
-	if (!rsc->primary_client) {
-		pr_err("primary client not available for cmd state switch\n");
-		rc = -EINVAL;
-		goto end;
-	} else if (caller_client != rsc->primary_client) {
-		pr_err("primary client state:%d not cmd state request\n",
-			rsc->primary_client->current_state);
-		rc = -EINVAL;
-		goto end;
-	}
-
-	/* update timers - might not be available at next switch */
-	if (config)
-		sde_rsc_timer_calculate(rsc, config, SDE_RSC_CMD_STATE);
-
-	/**
-	 * rsc clients can still send config at any time. If a config is
-	 * received during cmd_state then vsync_wait will execute with the logic
-	 * below. If a config is received when rsc is in AMC mode; A mode
-	 * switch will do the vsync wait. updated checks still support all cases
-	 * for dynamic mode switch and inline rotation.
-	 */
-	if (rsc->current_state == SDE_RSC_CMD_STATE) {
-		rc = 0;
-		if (config)
-			goto vsync_wait;
-		else
-			goto end;
-	}
-
-	/* any non-primary clk state client blocks the cmd state switch */
-	list_for_each_entry(client, &rsc->client_list, list)
-		if (client->current_state == SDE_RSC_CLK_STATE &&
-		    client->client_type == SDE_RSC_EXTERNAL_DISP_CLIENT)
-			goto end;
-
-	if (rsc->hw_ops.state_update) {
-		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
-		if (!rc)
-			rpmh_mode_solver_set(rsc->rpmh_dev, true);
-	}
-
-vsync_wait:
-	/* indicate wait for vsync for vid to cmd state switch & cfg update */
-	if (!rc && (rsc->current_state == SDE_RSC_VID_STATE ||
-			rsc->current_state == SDE_RSC_CMD_STATE)) {
-		/* clear VSYNC timestamp for indication when update completes */
-		if (rsc->hw_ops.hw_vsync)
-			rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL, 0, 0);
-		if (!wait_vblank_crtc_id) {
-			pr_err("invalid crtc id wait pointer, client %d\n",
-					caller_client->id);
-			SDE_EVT32(caller_client->id, rsc->current_state,
-					caller_client->crtc_id,
-					wait_vblank_crtc_id, SDE_EVTLOG_ERROR);
-			msleep(PRIMARY_VBLANK_WORST_CASE_MS);
-		} else {
-			*wait_vblank_crtc_id = rsc->primary_client->crtc_id;
-		}
-	}
-end:
-	return rc;
-}
-
-static int sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc,
-		int *wait_vblank_crtc_id)
-{
-	struct sde_rsc_client *client;
-	int rc = STATE_UPDATE_NOT_ALLOWED;
-	bool multi_display_active = false;
-	bool vid_display_active = false, cmd_display_active = false;
-
-	list_for_each_entry(client, &rsc->client_list, list) {
-		if (client->current_state == SDE_RSC_CLK_STATE &&
-		    client->client_type == SDE_RSC_EXTERNAL_DISP_CLIENT)
-			multi_display_active = true;
-		else if (client->current_state == SDE_RSC_VID_STATE)
-			vid_display_active = true;
-		else if (client->current_state == SDE_RSC_CMD_STATE)
-			cmd_display_active = true;
-	}
-
-	pr_debug("multi_display:%d vid_display:%d cmd_display:%d\n",
-		multi_display_active, vid_display_active, cmd_display_active);
-	if (!multi_display_active && (vid_display_active || cmd_display_active))
-		goto end;
-
-	if (rsc->hw_ops.state_update) {
-		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CLK_STATE);
-		if (!rc)
-			rpmh_mode_solver_set(rsc->rpmh_dev, false);
-	}
-
-	/* indicate wait for vsync for cmd/vid to clk state switch */
-	if (!rc && rsc->primary_client &&
-		(rsc->current_state == SDE_RSC_CMD_STATE ||
-			rsc->current_state == SDE_RSC_VID_STATE)) {
-		/* clear VSYNC timestamp for indication when update completes */
-		if (rsc->hw_ops.hw_vsync)
-			rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL, 0, 0);
-		if (!wait_vblank_crtc_id) {
-			pr_err("invalid crtc id wait pointer provided\n");
-			msleep(PRIMARY_VBLANK_WORST_CASE_MS);
-		} else {
-			*wait_vblank_crtc_id = rsc->primary_client->crtc_id;
-
-			/* increase refcount, so we wait for the next vsync */
-			atomic_inc(&rsc->rsc_vsync_wait);
-			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait));
-		}
-	} else if (atomic_read(&rsc->rsc_vsync_wait)) {
-		SDE_EVT32(rsc->primary_client, rsc->current_state,
-			atomic_read(&rsc->rsc_vsync_wait));
-
-		/* Wait for the vsync, if the refcount is set */
-		rc = wait_event_timeout(rsc->rsc_vsync_waitq,
-			atomic_read(&rsc->rsc_vsync_wait) == 0,
-			msecs_to_jiffies(PRIMARY_VBLANK_WORST_CASE_MS*2));
-		if (!rc) {
-			pr_err("Timeout waiting for vsync\n");
-			rc = -ETIMEDOUT;
-			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc,
-				SDE_EVTLOG_ERROR);
-		} else {
-			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc);
-			rc = 0;
-		}
-	}
-end:
-	return rc;
-}
-
-static int sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
-	struct sde_rsc_cmd_config *config,
-	struct sde_rsc_client *caller_client,
-	int *wait_vblank_crtc_id)
-{
-	struct sde_rsc_client *client;
-	int rc = STATE_UPDATE_NOT_ALLOWED;
-
-	if (!rsc->primary_client) {
-		pr_err("primary client not available for vid state switch\n");
-		rc = -EINVAL;
-		goto end;
-	} else if (caller_client != rsc->primary_client) {
-		pr_err("primary client state:%d not vid state request\n",
-			rsc->primary_client->current_state);
-		rc = -EINVAL;
-		goto end;
-	}
-
-	/* update timers - might not be available at next switch */
-	if (config)
-		sde_rsc_timer_calculate(rsc, config, SDE_RSC_VID_STATE);
-
-	/**
-	 * rsc clients can still send config at any time. If a config is
-	 * received during vid_state then vsync_wait will execute with the logic
-	 * below.
-	 */
-	if (rsc->current_state == SDE_RSC_VID_STATE) {
-		rc = 0;
-		if (config)
-			goto vsync_wait;
-		else
-			goto end;
-	}
-
-	/* any non-primary clk state client blocks the vid state switch */
-	list_for_each_entry(client, &rsc->client_list, list)
-		if (client->current_state == SDE_RSC_CLK_STATE &&
-		    client->client_type == SDE_RSC_EXTERNAL_DISP_CLIENT)
-			goto end;
-
-	if (rsc->hw_ops.state_update) {
-		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_VID_STATE);
-		if (!rc)
-			rpmh_mode_solver_set(rsc->rpmh_dev,
-				rsc->version == SDE_RSC_REV_3 ? true : false);
-	}
-
-vsync_wait:
-	/* indicate wait for vsync for vid to cmd state switch & cfg update */
-	if (!rc && (rsc->current_state == SDE_RSC_VID_STATE ||
-			rsc->current_state == SDE_RSC_CMD_STATE)) {
-		/* clear VSYNC timestamp for indication when update completes */
-		if (rsc->hw_ops.hw_vsync)
-			rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL, 0, 0);
-		if (!wait_vblank_crtc_id) {
-			pr_err("invalid crtc id wait pointer, client %d\n",
-					caller_client->id);
-			SDE_EVT32(caller_client->id, rsc->current_state,
-					caller_client->crtc_id,
-					wait_vblank_crtc_id, SDE_EVTLOG_ERROR);
-			msleep(PRIMARY_VBLANK_WORST_CASE_MS);
-		} else {
-			*wait_vblank_crtc_id = rsc->primary_client->crtc_id;
-		}
-	}
-end:
-	return rc;
-}
-
-static int sde_rsc_switch_to_idle(struct sde_rsc_priv *rsc,
-	struct sde_rsc_cmd_config *config,
-	struct sde_rsc_client *caller_client,
-	int *wait_vblank_crtc_id)
-{
-	struct sde_rsc_client *client;
-	int rc = STATE_UPDATE_NOT_ALLOWED;
-	bool clk_client_active = false, multi_display_active = false;
-	bool vid_display_active = false, cmd_display_active = false;
-
-	/*
-	 * following code needs to run the loop through each
-	 * client because they might be in different order
-	 * sorting is not possible; only preference is available
-	 */
-	list_for_each_entry(client, &rsc->client_list, list) {
-		if (client->current_state == SDE_RSC_CLK_STATE &&
-		    client->client_type == SDE_RSC_EXTERNAL_DISP_CLIENT)
-			multi_display_active = true;
-		else if (client->current_state == SDE_RSC_CLK_STATE &&
-				client->client_type == SDE_RSC_CLK_CLIENT)
-			clk_client_active = true;
-		else if (client->current_state == SDE_RSC_VID_STATE)
-			vid_display_active = true;
-		else if (client->current_state == SDE_RSC_CMD_STATE)
-			cmd_display_active = true;
-		pr_debug("client state:%d type:%d\n",
-			client->current_state, client->client_type);
-	}
-
-	pr_debug("multi_display:%d clk_client:%d vid_display:%d cmd_display:%d\n",
-		multi_display_active, clk_client_active, vid_display_active,
-		cmd_display_active);
-	if (vid_display_active && !multi_display_active) {
-		rc = sde_rsc_switch_to_vid(rsc, NULL, rsc->primary_client,
-				wait_vblank_crtc_id);
-		if (!rc)
-			rc = VID_MODE_SWITCH_SUCCESS;
-	} else if (cmd_display_active && !multi_display_active) {
-		rc = sde_rsc_switch_to_cmd(rsc, NULL, rsc->primary_client,
-				wait_vblank_crtc_id);
-		if (!rc)
-			rc = CMD_MODE_SWITCH_SUCCESS;
-	} else if (clk_client_active) {
-		rc = sde_rsc_switch_to_clk(rsc, wait_vblank_crtc_id);
-		if (!rc)
-			rc = CLK_MODE_SWITCH_SUCCESS;
-	} else if (rsc->hw_ops.state_update) {
-		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_IDLE_STATE);
-		if (!rc)
-			rpmh_mode_solver_set(rsc->rpmh_dev, true);
-	}
-
-	return rc;
-}
-
-/**
- * sde_rsc_client_get_vsync_refcount() - returns the status of the vsync
- * refcount, to signal if the client needs to reset the refcounting logic
- * @client:	 Client pointer provided by sde_rsc_client_create().
- *
- * Return: value of the vsync refcount.
- */
-int sde_rsc_client_get_vsync_refcount(
-		struct sde_rsc_client *caller_client)
-{
-	struct sde_rsc_priv *rsc;
-
-	if (!caller_client) {
-		pr_err("invalid client for rsc state update\n");
-		return -EINVAL;
-	} else if (caller_client->rsc_index >= MAX_RSC_COUNT) {
-		pr_err("invalid rsc index\n");
-		return -EINVAL;
-	}
-
-	rsc = rsc_prv_list[caller_client->rsc_index];
-	if (!rsc)
-		return 0;
-
-	return atomic_read(&rsc->rsc_vsync_wait);
-}
-
-/**
- * sde_rsc_client_reset_vsync_refcount() - reduces the refcounting
- * logic that waits for the vsync.
- * @client:	 Client pointer provided by sde_rsc_client_create().
- *
- * Return: zero if refcount was already zero.
- */
-int sde_rsc_client_reset_vsync_refcount(
-		struct sde_rsc_client *caller_client)
-{
-	struct sde_rsc_priv *rsc;
-	int ret;
-
-	if (!caller_client) {
-		pr_err("invalid client for rsc state update\n");
-		return -EINVAL;
-	} else if (caller_client->rsc_index >= MAX_RSC_COUNT) {
-		pr_err("invalid rsc index\n");
-		return -EINVAL;
-	}
-
-	rsc = rsc_prv_list[caller_client->rsc_index];
-	if (!rsc)
-		return 0;
-
-	ret = atomic_add_unless(&rsc->rsc_vsync_wait, -1, 0);
-	wake_up_all(&rsc->rsc_vsync_waitq);
-	SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait));
-
-	return ret;
-}
-
-/**
- * sde_rsc_client_is_state_update_complete() - check if state update is complete
- * RSC state transition is not complete until HW receives VBLANK signal. This
- * function checks RSC HW to determine whether that signal has been received.
- * @client:	 Client pointer provided by sde_rsc_client_create().
- *
- * Return: true if the state update has completed.
- */
-bool sde_rsc_client_is_state_update_complete(
-		struct sde_rsc_client *caller_client)
-{
-	struct sde_rsc_priv *rsc;
-	u32 vsync_timestamp0 = 0;
-
-	if (!caller_client) {
-		pr_err("invalid client for rsc state update\n");
-		return false;
-	} else if (caller_client->rsc_index >= MAX_RSC_COUNT) {
-		pr_err("invalid rsc index\n");
-		return false;
-	}
-
-	rsc = rsc_prv_list[caller_client->rsc_index];
-	if (!rsc)
-		return false;
-
-	/**
-	 * state updates clear VSYNC timestamp, check if a new one arrived.
-	 * use VSYNC mode 0 (CMD TE) always for this, per HW recommendation.
-	 */
-	if (rsc->hw_ops.hw_vsync)
-		vsync_timestamp0 = rsc->hw_ops.hw_vsync(rsc, VSYNC_READ_VSYNC0,
-				NULL, 0, 0);
-
-	return vsync_timestamp0 != 0;
-}
-
-/**
- * sde_rsc_client_state_update() - rsc client state update
- * Video mode, cmd mode and clk state are suppoed as modes. A client need to
- * set this property during panel config time. A switching client can set the
- * property to change the state
- *
- * @client:	 Client pointer provided by sde_rsc_client_create().
- * @state:	 Client state - video/cmd
- * @config:	 fps, vtotal, porches, etc configuration for command mode
- *               panel
- * @crtc_id:	 current client's crtc id
- * @wait_vblank_crtc_id:	Output parameter. If set to non-zero, rsc hw
- *				state update requires a wait for one vblank on
- *				the primary crtc. In that case, this output
- *				param will be set to the crtc on which to wait.
- *				If SDE_RSC_INVALID_CRTC_ID, no wait necessary
- *
- * Return: error code.
- */
-int sde_rsc_client_state_update(struct sde_rsc_client *caller_client,
-	enum sde_rsc_state state,
-	struct sde_rsc_cmd_config *config, int crtc_id,
-	int *wait_vblank_crtc_id)
-{
-	int rc = 0;
-	struct sde_rsc_priv *rsc;
-
-	if (!caller_client) {
-		pr_err("invalid client for rsc state update\n");
-		return -EINVAL;
-	} else if (caller_client->rsc_index >= MAX_RSC_COUNT) {
-		pr_err("invalid rsc index\n");
-		return -EINVAL;
-	}
-
-	rsc = rsc_prv_list[caller_client->rsc_index];
-	if (!rsc)
-		return -EINVAL;
-
-	if (wait_vblank_crtc_id)
-		*wait_vblank_crtc_id = SDE_RSC_INVALID_CRTC_ID;
-
-	mutex_lock(&rsc->client_lock);
-	SDE_EVT32_VERBOSE(caller_client->id, caller_client->current_state,
-			state, rsc->current_state, SDE_EVTLOG_FUNC_ENTRY);
-	caller_client->crtc_id = crtc_id;
-	caller_client->current_state = state;
-
-	if (rsc->master_drm == NULL) {
-		pr_err("invalid master component binding\n");
-		rc = -EINVAL;
-		goto end;
-	} else if ((rsc->current_state == state) && !config) {
-		pr_debug("no state change: %d\n", state);
-		goto end;
-	}
-
-	pr_debug("%pS: rsc state:%d request client:%s state:%d\n",
-		__builtin_return_address(0), rsc->current_state,
-		caller_client->name, state);
-
-	if (rsc->current_state == SDE_RSC_IDLE_STATE)
-		sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
-
-	switch (state) {
-	case SDE_RSC_IDLE_STATE:
-		rc = sde_rsc_switch_to_idle(rsc, NULL, rsc->primary_client,
-			wait_vblank_crtc_id);
-
-		if (rc == CMD_MODE_SWITCH_SUCCESS) {
-			state = SDE_RSC_CMD_STATE;
-			rc = 0;
-		} else if (rc == VID_MODE_SWITCH_SUCCESS) {
-			state = SDE_RSC_VID_STATE;
-			rc = 0;
-		} else if (rc == CLK_MODE_SWITCH_SUCCESS) {
-			state = SDE_RSC_CLK_STATE;
-			rc = 0;
-		}
-		break;
-
-	case SDE_RSC_CMD_STATE:
-		rc = sde_rsc_switch_to_cmd(rsc, config, caller_client,
-				wait_vblank_crtc_id);
-		break;
-
-	case SDE_RSC_VID_STATE:
-		rc = sde_rsc_switch_to_vid(rsc, config, caller_client,
-				wait_vblank_crtc_id);
-		break;
-
-	case SDE_RSC_CLK_STATE:
-		rc = sde_rsc_switch_to_clk(rsc, wait_vblank_crtc_id);
-		break;
-
-	default:
-		pr_err("invalid state handling %d\n", state);
-		break;
-	}
-
-	if (rc == STATE_UPDATE_NOT_ALLOWED) {
-		rc = 0;
-		SDE_EVT32(caller_client->id, caller_client->current_state,
-			state, rsc->current_state, rc, SDE_EVTLOG_FUNC_CASE1);
-		goto clk_disable;
-	} else if (rc) {
-		pr_debug("state:%d update failed rc:%d\n", state, rc);
-		SDE_EVT32(caller_client->id, caller_client->current_state,
-			state, rsc->current_state, rc, SDE_EVTLOG_FUNC_CASE2);
-		goto clk_disable;
-	}
-
-	pr_debug("state switch successfully complete: %d\n", state);
-	rsc->current_state = state;
-	SDE_EVT32(caller_client->id, caller_client->current_state,
-			state, rsc->current_state, SDE_EVTLOG_FUNC_EXIT);
-
-clk_disable:
-	if (rsc->current_state == SDE_RSC_IDLE_STATE)
-		sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
-end:
-	mutex_unlock(&rsc->client_lock);
-	return rc;
-}
-EXPORT_SYMBOL(sde_rsc_client_state_update);
-
-/**
- * sde_rsc_client_vote() - ab/ib vote from rsc client
- *
- * @client:	 Client pointer provided by sde_rsc_client_create().
- * @bus_id: data bus for which to be voted
- * @ab:		 aggregated bandwidth vote from client.
- * @ib:		 instant bandwidth vote from client.
- *
- * Return: error code.
- */
-int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
-		u32 bus_id, u64 ab_vote, u64 ib_vote)
-{
-	int rsc_index;
-	struct sde_rsc_priv *rsc;
-
-	if (caller_client && caller_client->rsc_index >= MAX_RSC_COUNT) {
-		pr_err("invalid rsc client or client index\n");
-		return -EINVAL;
-	}
-
-	rsc_index = caller_client ? caller_client->rsc_index : SDE_RSC_INDEX;
-	rsc = rsc_prv_list[rsc_index];
-	if (!rsc || bus_id >= SDE_POWER_HANDLE_DBUS_ID_MAX)
-		return -EINVAL;
-
-	pr_debug("client:%s ab:%llu ib:%llu\n",
-			caller_client ? caller_client->name : "unknown",
-			ab_vote, ib_vote);
-
-	mutex_lock(&rsc->client_lock);
-	rsc->bw_config.new_ab_vote[bus_id] = ab_vote;
-	rsc->bw_config.new_ib_vote[bus_id] = ib_vote;
-	mutex_unlock(&rsc->client_lock);
-
-	return 0;
-}
-EXPORT_SYMBOL(sde_rsc_client_vote);
-
-int sde_rsc_client_trigger_vote(struct sde_rsc_client *caller_client,
-	bool delta_vote)
-{
-	int rc = 0, rsc_index, i;
-	struct sde_rsc_priv *rsc;
-	bool bw_increase = false;
-
-	if (caller_client && caller_client->rsc_index >= MAX_RSC_COUNT) {
-		pr_err("invalid rsc index\n");
-		return -EINVAL;
-	}
-
-	rsc_index = caller_client ? caller_client->rsc_index : SDE_RSC_INDEX;
-	rsc = rsc_prv_list[rsc_index];
-	if (!rsc)
-		return -EINVAL;
-
-	pr_debug("client:%s trigger bw delta vote:%d\n",
-		caller_client ? caller_client->name : "unknown", delta_vote);
-
-	mutex_lock(&rsc->client_lock);
-
-	for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX && delta_vote; i++) {
-		if (rsc->bw_config.new_ab_vote[i] > rsc->bw_config.ab_vote[i] ||
-		    rsc->bw_config.new_ib_vote[i] > rsc->bw_config.ib_vote[i])
-			bw_increase = true;
-
-		rsc->bw_config.ab_vote[i] = rsc->bw_config.new_ab_vote[i];
-		rsc->bw_config.ib_vote[i] = rsc->bw_config.new_ib_vote[i];
-	}
-
-	rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
-	if (rc)
-		goto clk_enable_fail;
-
-	if (delta_vote) {
-		if (rsc->hw_ops.tcs_wait) {
-			rc = rsc->hw_ops.tcs_wait(rsc);
-			if (rc) {
-				pr_err("tcs is still busy; can't send command\n");
-				if (rsc->hw_ops.tcs_use_ok)
-					rsc->hw_ops.tcs_use_ok(rsc);
-				goto end;
-			}
-		}
-
-		rpmh_invalidate(rsc->rpmh_dev);
-		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
-			sde_power_data_bus_set_quota(&rsc->phandle,
-				rsc->pclient,
-				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
-				i, rsc->bw_config.ab_vote[i],
-				rsc->bw_config.ib_vote[i]);
-		rpmh_flush(rsc->rpmh_dev);
-	}
-
-	if (rsc->hw_ops.bwi_status &&
-	    (rsc->current_state == SDE_RSC_CMD_STATE ||
-	     rsc->current_state == SDE_RSC_VID_STATE))
-		rsc->hw_ops.bwi_status(rsc, bw_increase);
-	else if (rsc->hw_ops.tcs_use_ok)
-		rsc->hw_ops.tcs_use_ok(rsc);
-
-end:
-	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
-clk_enable_fail:
-	mutex_unlock(&rsc->client_lock);
-
-	return rc;
-}
-EXPORT_SYMBOL(sde_rsc_client_trigger_vote);
-
-#if defined(CONFIG_DEBUG_FS)
-void sde_rsc_debug_dump(u32 mux_sel)
-{
-	struct sde_rsc_priv *rsc;
-
-	rsc = rsc_prv_list[SDE_RSC_INDEX];
-	if (!rsc)
-		return;
-
-	/* this must be called with rsc clocks enabled */
-	if (rsc->hw_ops.debug_dump)
-		rsc->hw_ops.debug_dump(rsc, mux_sel);
-}
-#endif /* defined(CONFIG_DEBUG_FS) */
-
-static int _sde_debugfs_status_show(struct seq_file *s, void *data)
-{
-	struct sde_rsc_priv *rsc;
-	struct sde_rsc_client *client;
-	int ret;
-
-	if (!s || !s->private)
-		return -EINVAL;
-
-	rsc = s->private;
-
-	mutex_lock(&rsc->client_lock);
-
-	seq_printf(s, "rsc current state:%d\n", rsc->current_state);
-	seq_printf(s, "wraper backoff time(ns):%d\n",
-				rsc->timer_config.static_wakeup_time_ns);
-	seq_printf(s, "rsc backoff time(ns):%d\n",
-				rsc->timer_config.rsc_backoff_time_ns);
-	seq_printf(s, "pdc backoff time(ns):%d\n",
-				rsc->timer_config.pdc_backoff_time_ns);
-	seq_printf(s, "rsc mode threshold time(ns):%d\n",
-				rsc->timer_config.rsc_mode_threshold_time_ns);
-	seq_printf(s, "rsc time slot 0(ns):%d\n",
-				rsc->timer_config.rsc_time_slot_0_ns);
-	seq_printf(s, "rsc time slot 1(ns):%d\n",
-				rsc->timer_config.rsc_time_slot_1_ns);
-	seq_printf(s, "frame fps:%d jitter_numer:%d jitter_denom:%d vtotal:%d prefill lines:%d\n",
-			rsc->cmd_config.fps, rsc->cmd_config.jitter_numer,
-			rsc->cmd_config.jitter_denom,
-			rsc->cmd_config.vtotal, rsc->cmd_config.prefill_lines);
-
-	seq_puts(s, "\n");
-
-	list_for_each_entry(client, &rsc->client_list, list)
-		seq_printf(s, "\t client:%s state:%d\n",
-				client->name, client->current_state);
-
-	if (rsc->current_state == SDE_RSC_IDLE_STATE) {
-		pr_debug("debug node is not supported during idle state\n");
-		seq_puts(s, "hw state is not supported during idle pc\n");
-		goto end;
-	}
-
-	if (rsc->hw_ops.debug_show) {
-		ret = rsc->hw_ops.debug_show(s, rsc);
-		if (ret)
-			pr_err("sde rsc: hw debug failed ret:%d\n", ret);
-	}
-
-end:
-	mutex_unlock(&rsc->client_lock);
-	return 0;
-}
-
-static int _sde_debugfs_status_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, _sde_debugfs_status_show, inode->i_private);
-}
-
-static int _sde_debugfs_mode_ctrl_open(struct inode *inode, struct file *file)
-{
-	/* non-seekable */
-	file->private_data = inode->i_private;
-	return nonseekable_open(inode, file);
-}
-
-static ssize_t _sde_debugfs_mode_ctrl_read(struct file *file, char __user *buf,
-				size_t count, loff_t *ppos)
-{
-	struct sde_rsc_priv *rsc = file->private_data;
-	char buffer[MAX_BUFFER_SIZE];
-	int blen = 0;
-
-	if (*ppos || !rsc || !rsc->hw_ops.mode_ctrl)
-		return 0;
-
-	mutex_lock(&rsc->client_lock);
-	if (rsc->current_state == SDE_RSC_IDLE_STATE) {
-		pr_debug("debug node is not supported during idle state\n");
-		blen = snprintf(buffer, MAX_BUFFER_SIZE,
-				"hw state is not supported during idle pc\n");
-		goto end;
-	}
-
-	blen = rsc->hw_ops.mode_ctrl(rsc, MODE_READ, buffer,
-							MAX_BUFFER_SIZE, 0);
-
-end:
-	mutex_unlock(&rsc->client_lock);
-	if (blen <= 0)
-		return 0;
-
-	if (copy_to_user(buf, buffer, blen))
-		return -EFAULT;
-
-	*ppos += blen;
-	return blen;
-}
-
-static ssize_t _sde_debugfs_mode_ctrl_write(struct file *file,
-			const char __user *p, size_t count, loff_t *ppos)
-{
-	struct sde_rsc_priv *rsc = file->private_data;
-	char *input;
-	u32 mode_state = 0;
-	int rc;
-
-	if (!rsc || !rsc->hw_ops.mode_ctrl || !count ||
-					count > MAX_COUNT_SIZE_SUPPORTED)
-		return 0;
-
-	input = kmalloc(count + 1, GFP_KERNEL);
-	if (!input)
-		return -ENOMEM;
-
-	if (copy_from_user(input, p, count)) {
-		kfree(input);
-		return -EFAULT;
-	}
-	input[count] = '\0';
-
-	rc = kstrtoint(input, 0, &mode_state);
-	if (rc) {
-		pr_err("mode_state: int conversion failed rc:%d\n", rc);
-		goto end;
-	}
-
-	pr_debug("mode_state: %d\n", mode_state);
-	mode_state &= 0x7;
-	if (mode_state != ALL_MODES_DISABLED &&
-			mode_state != ALL_MODES_ENABLED &&
-			mode_state != ONLY_MODE_0_ENABLED &&
-			mode_state != ONLY_MODE_0_1_ENABLED) {
-		pr_err("invalid mode:%d combination\n", mode_state);
-		goto end;
-	}
-
-	mutex_lock(&rsc->client_lock);
-	if (rsc->current_state == SDE_RSC_IDLE_STATE) {
-		pr_debug("debug node is not supported during idle state\n");
-		goto state_check;
-	}
-
-	rsc->hw_ops.mode_ctrl(rsc, MODE_UPDATE, NULL, 0, mode_state);
-
-state_check:
-	mutex_unlock(&rsc->client_lock);
-end:
-	kfree(input);
-	return count;
-}
-
-static int _sde_debugfs_vsync_mode_open(struct inode *inode, struct file *file)
-{
-	/* non-seekable */
-	file->private_data = inode->i_private;
-	return nonseekable_open(inode, file);
-}
-
-static ssize_t _sde_debugfs_vsync_mode_read(struct file *file, char __user *buf,
-				size_t count, loff_t *ppos)
-{
-	struct sde_rsc_priv *rsc = file->private_data;
-	char buffer[MAX_BUFFER_SIZE];
-	int blen = 0;
-
-	if (*ppos || !rsc || !rsc->hw_ops.hw_vsync)
-		return 0;
-
-	mutex_lock(&rsc->client_lock);
-	if (rsc->current_state == SDE_RSC_IDLE_STATE) {
-		pr_debug("debug node is not supported during idle state\n");
-		blen = snprintf(buffer, MAX_BUFFER_SIZE,
-				"hw state is not supported during idle pc\n");
-		goto end;
-	}
-
-	blen = rsc->hw_ops.hw_vsync(rsc, VSYNC_READ, buffer,
-						MAX_BUFFER_SIZE, 0);
-
-end:
-	mutex_unlock(&rsc->client_lock);
-	if (blen <= 0)
-		return 0;
-
-	if (copy_to_user(buf, buffer, blen))
-		return -EFAULT;
-
-	*ppos += blen;
-	return blen;
-}
-
-static ssize_t _sde_debugfs_vsync_mode_write(struct file *file,
-			const char __user *p, size_t count, loff_t *ppos)
-{
-	struct sde_rsc_priv *rsc = file->private_data;
-	char *input;
-	u32 vsync_state = 0;
-	int rc;
-
-	if (!rsc || !rsc->hw_ops.hw_vsync || !count ||
-				count > MAX_COUNT_SIZE_SUPPORTED)
-		return 0;
-
-	input = kmalloc(count + 1, GFP_KERNEL);
-	if (!input)
-		return -ENOMEM;
-
-	if (copy_from_user(input, p, count)) {
-		kfree(input);
-		return -EFAULT;
-	}
-	input[count] = '\0';
-
-	rc = kstrtoint(input, 0, &vsync_state);
-	if (rc) {
-		pr_err("vsync_state: int conversion failed rc:%d\n", rc);
-		goto end;
-	}
-
-	pr_debug("vsync_state: %d\n", vsync_state);
-	vsync_state &= 0x7;
-
-	mutex_lock(&rsc->client_lock);
-	if (rsc->current_state == SDE_RSC_IDLE_STATE) {
-		pr_debug("debug node is not supported during idle state\n");
-		goto state_check;
-	}
-
-	if (vsync_state)
-		rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL,
-							0, vsync_state - 1);
-	else
-		rsc->hw_ops.hw_vsync(rsc, VSYNC_DISABLE, NULL, 0, 0);
-
-state_check:
-	mutex_unlock(&rsc->client_lock);
-end:
-	kfree(input);
-	return count;
-}
-
-static const struct file_operations debugfs_status_fops = {
-	.open =		_sde_debugfs_status_open,
-	.read =		seq_read,
-	.llseek =	seq_lseek,
-	.release =	single_release,
-};
-
-static const struct file_operations mode_control_fops = {
-	.open =		_sde_debugfs_mode_ctrl_open,
-	.read =		_sde_debugfs_mode_ctrl_read,
-	.write =	_sde_debugfs_mode_ctrl_write,
-};
-
-static const struct file_operations vsync_status_fops = {
-	.open =		_sde_debugfs_vsync_mode_open,
-	.read =		_sde_debugfs_vsync_mode_read,
-	.write =	_sde_debugfs_vsync_mode_write,
-};
-
-static void _sde_rsc_init_debugfs(struct sde_rsc_priv *rsc, char *name)
-{
-	rsc->debugfs_root = debugfs_create_dir(name, NULL);
-	if (!rsc->debugfs_root)
-		return;
-
-	/* don't error check these */
-	debugfs_create_file("status", 0400, rsc->debugfs_root, rsc,
-							&debugfs_status_fops);
-	debugfs_create_file("mode_control", 0600, rsc->debugfs_root, rsc,
-							&mode_control_fops);
-	debugfs_create_file("vsync_mode", 0600, rsc->debugfs_root, rsc,
-							&vsync_status_fops);
-	debugfs_create_x32("debug_mode", 0600, rsc->debugfs_root,
-							&rsc->debug_mode);
-}
-
-static void sde_rsc_deinit(struct platform_device *pdev,
-					struct sde_rsc_priv *rsc)
-{
-	if (!rsc)
-		return;
-
-	if (rsc->pclient)
-		sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
-	if (rsc->sw_fs_enabled)
-		regulator_disable(rsc->fs);
-	if (rsc->fs)
-		devm_regulator_put(rsc->fs);
-	if (rsc->wrapper_io.base)
-		msm_dss_iounmap(&rsc->wrapper_io);
-	if (rsc->drv_io.base)
-		msm_dss_iounmap(&rsc->drv_io);
-	if (rsc->pclient)
-		sde_power_client_destroy(&rsc->phandle, rsc->pclient);
-
-	sde_power_resource_deinit(pdev, &rsc->phandle);
-	debugfs_remove_recursive(rsc->debugfs_root);
-	kfree(rsc);
-}
-
-/**
- * sde_rsc_bind - bind rsc device with controlling device
- * @dev:        Pointer to base of platform device
- * @master:     Pointer to container of drm device
- * @data:       Pointer to private data
- * Returns:     Zero on success
- */
-static int sde_rsc_bind(struct device *dev,
-		struct device *master,
-		void *data)
-{
-	struct sde_rsc_priv *rsc;
-	struct drm_device *drm;
-	struct platform_device *pdev = to_platform_device(dev);
-
-	if (!dev || !pdev || !master) {
-		pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
-				dev, pdev, master);
-		return -EINVAL;
-	}
-
-	drm = dev_get_drvdata(master);
-	rsc = platform_get_drvdata(pdev);
-	if (!drm || !rsc) {
-		pr_err("invalid param(s), drm %pK, rsc %pK\n",
-				drm, rsc);
-		return -EINVAL;
-	}
-
-	mutex_lock(&rsc->client_lock);
-	rsc->master_drm = drm;
-	mutex_unlock(&rsc->client_lock);
-
-	sde_dbg_reg_register_base(SDE_RSC_DRV_DBG_NAME, rsc->drv_io.base,
-							rsc->drv_io.len);
-	sde_dbg_reg_register_base(SDE_RSC_WRAPPER_DBG_NAME,
-				rsc->wrapper_io.base, rsc->wrapper_io.len);
-	return 0;
-}
-
-/**
- * sde_rsc_unbind - unbind rsc from controlling device
- * @dev:        Pointer to base of platform device
- * @master:     Pointer to container of drm device
- * @data:       Pointer to private data
- */
-static void sde_rsc_unbind(struct device *dev,
-		struct device *master, void *data)
-{
-	struct sde_rsc_priv *rsc;
-	struct platform_device *pdev = to_platform_device(dev);
-
-	if (!dev || !pdev) {
-		pr_err("invalid param(s)\n");
-		return;
-	}
-
-	rsc = platform_get_drvdata(pdev);
-	if (!rsc) {
-		pr_err("invalid display rsc\n");
-		return;
-	}
-
-	mutex_lock(&rsc->client_lock);
-	rsc->master_drm = NULL;
-	mutex_unlock(&rsc->client_lock);
-}
-
-static const struct component_ops sde_rsc_comp_ops = {
-	.bind = sde_rsc_bind,
-	.unbind = sde_rsc_unbind,
-};
-
-static int sde_rsc_probe(struct platform_device *pdev)
-{
-	int ret;
-	struct sde_rsc_priv *rsc;
-	static int counter;
-	char  name[MAX_RSC_CLIENT_NAME_LEN];
-
-	if (counter >= MAX_RSC_COUNT) {
-		pr_err("sde rsc supports probe till MAX_RSC_COUNT=%d devices\n",
-			MAX_RSC_COUNT);
-		return -EINVAL;
-	}
-
-	rsc = kzalloc(sizeof(*rsc), GFP_KERNEL);
-	if (!rsc) {
-		ret = -ENOMEM;
-		goto rsc_alloc_fail;
-	}
-
-	platform_set_drvdata(pdev, rsc);
-	of_property_read_u32(pdev->dev.of_node, "qcom,sde-rsc-version",
-								&rsc->version);
-
-	if (rsc->version == SDE_RSC_REV_2)
-		rsc->single_tcs_execution_time = SINGLE_TCS_EXECUTION_TIME_V2;
-	else
-		rsc->single_tcs_execution_time = SINGLE_TCS_EXECUTION_TIME_V1;
-
-	if (rsc->version == SDE_RSC_REV_3) {
-		rsc->time_slot_0_ns = rsc->single_tcs_execution_time
-					+ RSC_MODE_INSTRUCTION_TIME;
-		rsc->backoff_time_ns = RSC_MODE_INSTRUCTION_TIME;
-		rsc->mode_threshold_time_ns = rsc->time_slot_0_ns;
-	} else {
-		rsc->time_slot_0_ns = (rsc->single_tcs_execution_time * 2)
-					+ RSC_MODE_INSTRUCTION_TIME;
-		rsc->backoff_time_ns = rsc->single_tcs_execution_time
-						+ RSC_MODE_INSTRUCTION_TIME;
-		rsc->mode_threshold_time_ns = rsc->backoff_time_ns
-						+ RSC_MODE_THRESHOLD_OVERHEAD;
-	}
-
-	ret = sde_power_resource_init(pdev, &rsc->phandle);
-	if (ret) {
-		pr_err("sde rsc:power resource init failed ret:%d\n", ret);
-		goto sde_rsc_fail;
-	}
-
-	rsc->pclient = sde_power_client_create(&rsc->phandle, "rsc");
-	if (IS_ERR_OR_NULL(rsc->pclient)) {
-		ret = PTR_ERR(rsc->pclient);
-		rsc->pclient = NULL;
-		pr_err("sde rsc:power client create failed ret:%d\n", ret);
-		goto sde_rsc_fail;
-	}
-
-	/**
-	 * sde rsc should always vote through enable path, sleep vote is
-	 * set to "0" by default.
-	 */
-	sde_power_data_bus_state_update(&rsc->phandle, true);
-
-	rsc->rpmh_dev = rpmh_dev[SDE_RSC_INDEX + counter];
-	if (IS_ERR_OR_NULL(rsc->rpmh_dev)) {
-		ret = !rsc->rpmh_dev ? -EINVAL : PTR_ERR(rsc->rpmh_dev);
-		rsc->rpmh_dev = NULL;
-		pr_err("rpmh device node is not available ret:%d\n", ret);
-		goto sde_rsc_fail;
-	}
-
-	ret = msm_dss_ioremap_byname(pdev, &rsc->wrapper_io, "wrapper");
-	if (ret) {
-		pr_err("sde rsc: wrapper io data mapping failed ret=%d\n", ret);
-		goto sde_rsc_fail;
-	}
-
-	ret = msm_dss_ioremap_byname(pdev, &rsc->drv_io, "drv");
-	if (ret) {
-		pr_err("sde rsc: drv io data mapping failed ret:%d\n", ret);
-		goto sde_rsc_fail;
-	}
-
-	rsc->fs = devm_regulator_get(&pdev->dev, "vdd");
-	if (IS_ERR_OR_NULL(rsc->fs)) {
-		rsc->fs = NULL;
-		pr_err("unable to get regulator\n");
-		goto sde_rsc_fail;
-	}
-
-	if (rsc->version >= SDE_RSC_REV_3)
-		ret = sde_rsc_hw_register_v3(rsc);
-	else
-		ret = sde_rsc_hw_register(rsc);
-	if (ret) {
-		pr_err("sde rsc: hw register failed ret:%d\n", ret);
-		goto sde_rsc_fail;
-	}
-
-	ret = regulator_enable(rsc->fs);
-	if (ret) {
-		pr_err("sde rsc: fs on failed ret:%d\n", ret);
-		goto sde_rsc_fail;
-	}
-
-	rsc->sw_fs_enabled = true;
-
-	if (sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true)) {
-		pr_err("failed to enable sde rsc power resources\n");
-		goto sde_rsc_fail;
-	}
-
-	if (sde_rsc_timer_calculate(rsc, NULL, SDE_RSC_IDLE_STATE))
-		goto sde_rsc_fail;
-
-	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
-
-	INIT_LIST_HEAD(&rsc->client_list);
-	INIT_LIST_HEAD(&rsc->event_list);
-	mutex_init(&rsc->client_lock);
-	init_waitqueue_head(&rsc->rsc_vsync_waitq);
-
-	pr_info("sde rsc index:%d probed successfully\n",
-				SDE_RSC_INDEX + counter);
-
-	rsc_prv_list[SDE_RSC_INDEX + counter] = rsc;
-	snprintf(name, MAX_RSC_CLIENT_NAME_LEN, "%s%d", "sde_rsc", counter);
-	_sde_rsc_init_debugfs(rsc, name);
-	counter++;
-
-	ret = component_add(&pdev->dev, &sde_rsc_comp_ops);
-	if (ret)
-		pr_debug("component add failed, ret=%d\n", ret);
-	ret = 0;
-
-	return ret;
-
-sde_rsc_fail:
-	sde_rsc_deinit(pdev, rsc);
-rsc_alloc_fail:
-	return ret;
-}
-
-static int sde_rsc_remove(struct platform_device *pdev)
-{
-	struct sde_rsc_priv *rsc = platform_get_drvdata(pdev);
-
-	sde_rsc_deinit(pdev, rsc);
-	return 0;
-}
-
-static int sde_rsc_rpmh_probe(struct platform_device *pdev)
-{
-	int ret = 0;
-	uint32_t index = 0;
-
-	ret = of_property_read_u32(pdev->dev.of_node, "cell-index", &index);
-	if (ret) {
-		pr_err("unable to find sde rsc cell index\n");
-		return ret;
-	} else if (index >= MAX_RSC_COUNT) {
-		pr_err("invalid cell index for sde rsc:%d\n", index);
-		return -EINVAL;
-	}
-
-	rpmh_dev[index] = &pdev->dev;
-	return 0;
-}
-
-int sde_rsc_rpmh_remove(struct platform_device *pdev)
-{
-	int i;
-
-	for (i = 0; i < MAX_RSC_COUNT; i++)
-		rpmh_dev[i] = NULL;
-
-	return 0;
-}
-
-static const struct of_device_id dt_match[] = {
-	{ .compatible = "qcom,sde-rsc"},
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, dt_match);
-
-static struct platform_driver sde_rsc_platform_driver = {
-	.probe      = sde_rsc_probe,
-	.remove     = sde_rsc_remove,
-	.driver     = {
-		.name   = "sde_rsc",
-		.of_match_table = dt_match,
-		.suppress_bind_attrs = true,
-	},
-};
-
-static const struct of_device_id sde_rsc_rpmh_match[] = {
-	{.compatible = "qcom,sde-rsc-rpmh"},
-	{},
-};
-
-static struct platform_driver sde_rsc_rpmh_driver = {
-	.probe = sde_rsc_rpmh_probe,
-	.remove = sde_rsc_rpmh_remove,
-	.driver = {
-		.name = "sde_rsc_rpmh",
-		.of_match_table = sde_rsc_rpmh_match,
-	},
-};
-
-static int __init sde_rsc_register(void)
-{
-	return platform_driver_register(&sde_rsc_platform_driver);
-}
-
-static void __exit sde_rsc_unregister(void)
-{
-	platform_driver_unregister(&sde_rsc_platform_driver);
-}
-
-static int __init sde_rsc_rpmh_register(void)
-{
-	return platform_driver_register(&sde_rsc_rpmh_driver);
-}
-
-subsys_initcall(sde_rsc_rpmh_register);
-module_init(sde_rsc_register);
-module_exit(sde_rsc_unregister);
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
deleted file mode 100644
index e2c3e9e..0000000
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ /dev/null
@@ -1,913 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[sde_rsc_hw:%s:%d]: " fmt, __func__, __LINE__
-
-#include <linux/kernel.h>
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-
-#include "sde_rsc_priv.h"
-#include "sde_dbg.h"
-#include "sde_rsc_hw.h"
-
-static void rsc_event_trigger(struct sde_rsc_priv *rsc, uint32_t event_type)
-{
-	struct sde_rsc_event *event;
-
-	list_for_each_entry(event, &rsc->event_list, list)
-		if (event->event_type & event_type)
-			event->cb_func(event_type, event->usr);
-}
-
-static int rsc_hw_qtimer_init(struct sde_rsc_priv *rsc)
-{
-	pr_debug("rsc hardware qtimer init\n");
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_QTMR_AC_HW_FRAME_SEL_1,
-						0xffffffff, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_QTMR_AC_HW_FRAME_SEL_2,
-						0xffffffff, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_QTMR_AC_CNTACR0_FG0,
-						0x1, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_QTMR_AC_CNTACR1_FG0,
-						0x1, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
-						0xffffffff, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
-						0xffffffff, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
-						0xffffffff, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI,
-						0xffffffff, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CTL,
-						0x1, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CTL,
-						0x1, rsc->debug_mode);
-
-	return 0;
-}
-
-static int rsc_hw_pdc_init(struct sde_rsc_priv *rsc)
-{
-	pr_debug("rsc hardware pdc init\n");
-
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_PDC_SEQ_START_ADDR_REG_OFFSET_DRV0,
-						0x4520, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_PDC_MATCH_VALUE_LO_REG_OFFSET_DRV0,
-						0x4510, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_PDC_MATCH_VALUE_HI_REG_OFFSET_DRV0,
-						0x4514, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_PDC_SLAVE_ID_DRV0,
-						0x1, rsc->debug_mode);
-
-	return 0;
-}
-
-static int rsc_hw_wrapper_init(struct sde_rsc_priv *rsc)
-{
-	pr_debug("rsc hardware wrapper init\n");
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_STATIC_WAKEUP_0,
-		rsc->timer_config.static_wakeup_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_RSCC_MODE_THRESHOLD,
-		rsc->timer_config.rsc_mode_threshold_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-						BIT(8), rsc->debug_mode);
-	return 0;
-}
-
-static int rsc_hw_seq_memory_init_v2(struct sde_rsc_priv *rsc)
-{
-	const u32 mode_0_start_addr = 0x0;
-	const u32 mode_1_start_addr = 0xc;
-	const u32 mode_2_start_addr = 0x18;
-
-	pr_debug("rsc sequencer memory init v2\n");
-
-	/* Mode - 0 sequence */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x0,
-						0xe0bb9ebe, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x4,
-						0x9ebeff39, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x8,
-						0x2020209b, rsc->debug_mode);
-
-	/* Mode - 1 sequence */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0xc,
-						0x38bb9ebe, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x10,
-						0xbeff39e0, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x14,
-						0x20209b9e, rsc->debug_mode);
-
-	/* Mode - 2 sequence */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x18,
-						0xb9bae5a0, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x1c,
-						0xbdbbf9fa, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x20,
-						0x38999afe, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x24,
-						0xac81e1a1, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x28,
-						0x82e2a2e0, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
-						0x8cfd9d39, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
-						0xbc20209b, rsc->debug_mode);
-
-	/* tcs sleep & wake sequence */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
-						0xe601a6fc, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x38,
-						0xbc20209c, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x3c,
-						0xe701a7fc, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x40,
-						0x0000209c, rsc->debug_mode);
-
-	/* branch address */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
-						0x33, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0,
-						0x3b, rsc->debug_mode);
-
-	/* start address */
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_OVERRIDE_CTRL_DRV0,
-					mode_0_start_addr,
-					rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE0,
-					mode_0_start_addr,
-					rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE1,
-					mode_1_start_addr,
-					rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE2,
-					mode_2_start_addr,
-					rsc->debug_mode);
-	return 0;
-
-}
-static int rsc_hw_seq_memory_init(struct sde_rsc_priv *rsc)
-{
-	const u32 mode_0_start_addr = 0x0;
-	const u32 mode_1_start_addr = 0xa;
-	const u32 mode_2_start_addr = 0x15;
-
-	pr_debug("rsc sequencer memory init\n");
-
-	/* Mode - 0 sequence */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x0,
-						0xe0a88bab, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x4,
-						0x8babec39, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x8,
-						0x8bab2088, rsc->debug_mode);
-
-	/* Mode - 1 sequence */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0xc,
-						0x39e038a8, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x10,
-						0x888babec, rsc->debug_mode);
-
-	/* Mode - 2 sequence */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x14,
-						0xaaa8a020, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x18,
-						0xe1a138eb, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x1c,
-						0xe0aca581, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x20,
-						0x82e2a2ed, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x24,
-						0x8cea8a39, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x28,
-						0xe9a92088, rsc->debug_mode);
-
-	/* tcs sleep & wake sequence */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
-						0x89e686a6, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
-						0xa7e9a920, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
-						0x2089e787, rsc->debug_mode);
-
-	/* branch address */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
-						0x2a, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0,
-						0x31, rsc->debug_mode);
-
-	/* start address */
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_OVERRIDE_CTRL_DRV0,
-					mode_0_start_addr,
-					rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE0,
-					mode_0_start_addr,
-					rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE1,
-					mode_1_start_addr,
-					rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE2,
-					mode_2_start_addr,
-					rsc->debug_mode);
-
-	return 0;
-}
-
-static int rsc_hw_solver_init(struct sde_rsc_priv *rsc)
-{
-
-	pr_debug("rsc solver init\n");
-
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SOFT_WAKEUP_TIME_LO_DRV0,
-					0xFFFFFFFF, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SOFT_WAKEUP_TIME_HI_DRV0,
-					0xFFFFFFFF, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_MAX_IDLE_DURATION_DRV0,
-					0xEFFFFFFF, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_0_DRV0,
-						0x0, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0,
-		rsc->timer_config.rsc_time_slot_0_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0,
-		rsc->timer_config.rsc_time_slot_1_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_3_DRV0,
-		rsc->timer_config.rsc_time_slot_2_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0,
-						0x7, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT0_PRI0_DRV0,
-						0x0, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT1_PRI0_DRV0,
-						0x1, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT1_PRI3_DRV0,
-						0x1, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT2_PRI0_DRV0,
-						0x2, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT2_PRI3_DRV0,
-						0x2, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_OVERRIDE_MODE_DRV0,
-						0x0, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_TIMERS_CONSIDERED_DRV0,
-						0x1, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_OVERRIDE_IDLE_TIME_DRV0,
-						0x01000010, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE0,
-					0x80000000, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE0,
-			rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE0,
-			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE1,
-					0x80000000, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE1,
-			rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE1,
-			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE2,
-					0x80000000, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE2,
-					0x0, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE2,
-			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
-
-	return 0;
-}
-
-static int rsc_hw_timer_update(struct sde_rsc_priv *rsc)
-{
-	if (!rsc) {
-		pr_debug("invalid input param\n");
-		return -EINVAL;
-	}
-
-	pr_debug("rsc hw timer update\n");
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0,
-		rsc->timer_config.rsc_time_slot_0_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0,
-		rsc->timer_config.rsc_time_slot_1_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_3_DRV0,
-		rsc->timer_config.rsc_time_slot_2_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE0,
-			rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE0,
-			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE1,
-			rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE1,
-			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE2,
-			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_STATIC_WAKEUP_0,
-		rsc->timer_config.static_wakeup_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_RSCC_MODE_THRESHOLD,
-		rsc->timer_config.rsc_mode_threshold_time_ns, rsc->debug_mode);
-
-	/* make sure that hw timers are updated */
-	wmb();
-
-	return 0;
-}
-
-int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc, enum sde_rsc_state state)
-{
-	int rc = -EBUSY;
-	int count, reg;
-	unsigned long power_status;
-
-	rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_RESTORE);
-
-	/**
-	 * force busy and idle during clk & video mode state because it
-	 * is trying to entry in mode-2 without turning on the vysnc.
-	 */
-	if ((state == SDE_RSC_VID_STATE) || (state == SDE_RSC_CLK_STATE)) {
-		reg = dss_reg_r(&rsc->wrapper_io,
-			SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
-		reg &= ~(BIT(8) | BIT(0));
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-							reg, rsc->debug_mode);
-	}
-
-	// needs review with HPG sequence
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
-					0x0, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI,
-					0x0, rsc->debug_mode);
-
-	reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-				rsc->debug_mode);
-	reg &= ~BIT(3);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-					reg, rsc->debug_mode);
-
-	if (rsc->version < SDE_RSC_REV_2) {
-		reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT,
-							rsc->debug_mode);
-		reg |= BIT(13);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT,
-							reg, rsc->debug_mode);
-	}
-
-	/* make sure that mode-2 exit before wait*/
-	wmb();
-
-	/* this wait is required to make sure that gdsc is powered on */
-	for (count = MAX_CHECK_LOOPS; count > 0; count--) {
-		power_status = dss_reg_r(&rsc->wrapper_io,
-				SDE_RSCC_PWR_CTRL, rsc->debug_mode);
-		if (!test_bit(POWER_CTRL_BIT_12, &power_status)) {
-			reg = dss_reg_r(&rsc->drv_io,
-				SDE_RSCC_SEQ_PROGRAM_COUNTER, rsc->debug_mode);
-			SDE_EVT32_VERBOSE(count, reg, power_status);
-			rc = 0;
-			break;
-		}
-		usleep_range(10, 100);
-	}
-
-	if (rsc->version < SDE_RSC_REV_2) {
-		reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT,
-							rsc->debug_mode);
-		reg &= ~BIT(13);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT,
-							reg, rsc->debug_mode);
-	}
-
-	if (rc)
-		pr_err("vdd reg is not enabled yet\n");
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0,
-						0x3, rsc->debug_mode);
-
-	reg = dss_reg_r(&rsc->wrapper_io,
-			SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
-	reg &= ~(BIT(0) | BIT(8));
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-						reg, rsc->debug_mode);
-	wmb(); /* make sure to disable rsc solver state */
-
-	reg = dss_reg_r(&rsc->wrapper_io,
-			SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
-	reg |= (BIT(0) | BIT(8));
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-						reg, rsc->debug_mode);
-	wmb(); /* make sure to enable rsc solver state */
-
-	rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE);
-
-	return rc;
-}
-
-static int sde_rsc_mode2_entry_trigger(struct sde_rsc_priv *rsc)
-{
-	int rc;
-	int count, wrapper_status;
-	unsigned long reg;
-
-	/* update qtimers to high during clk & video mode state */
-	if ((rsc->current_state == SDE_RSC_VID_STATE) ||
-			(rsc->current_state == SDE_RSC_CLK_STATE)) {
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
-						0xffffffff, rsc->debug_mode);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
-						0xffffffff, rsc->debug_mode);
-	}
-
-	wrapper_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-				rsc->debug_mode);
-	wrapper_status |= BIT(3);
-	wrapper_status |= BIT(0);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-					wrapper_status, rsc->debug_mode);
-
-	/**
-	 * force busy and idle during clk & video mode state because it
-	 * is trying to entry in mode-2 without turning on the vysnc.
-	 */
-	if ((rsc->current_state == SDE_RSC_VID_STATE) ||
-			(rsc->current_state == SDE_RSC_CLK_STATE)) {
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-				BIT(0) | BIT(1), rsc->debug_mode);
-		wmb(); /* force busy gurantee */
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-				BIT(0) | BIT(9), rsc->debug_mode);
-	}
-
-	/* make sure that mode-2 is triggered before wait*/
-	wmb();
-
-	rc = -EBUSY;
-	/* this wait is required to turn off the rscc clocks */
-	for (count = MAX_CHECK_LOOPS; count > 0; count--) {
-		reg = dss_reg_r(&rsc->wrapper_io,
-				SDE_RSCC_PWR_CTRL, rsc->debug_mode);
-		if (test_bit(POWER_CTRL_BIT_12, &reg)) {
-			rc = 0;
-			break;
-		}
-		usleep_range(10, 100);
-	}
-
-	return rc;
-}
-
-static void sde_rsc_reset_mode_0_1(struct sde_rsc_priv *rsc)
-{
-	u32 seq_busy, current_mode, curr_inst_addr;
-
-	seq_busy = dss_reg_r(&rsc->drv_io, SDE_RSCC_SEQ_BUSY_DRV0,
-			rsc->debug_mode);
-	current_mode = dss_reg_r(&rsc->drv_io, SDE_RSCC_SOLVER_STATUS2_DRV0,
-			rsc->debug_mode);
-	curr_inst_addr = dss_reg_r(&rsc->drv_io, SDE_RSCC_SEQ_PROGRAM_COUNTER,
-			rsc->debug_mode);
-	SDE_EVT32(seq_busy, current_mode, curr_inst_addr);
-
-	if (seq_busy && (current_mode == SDE_RSC_MODE_0_VAL ||
-			current_mode == SDE_RSC_MODE_1_VAL)) {
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI,
-						0xffffff, rsc->debug_mode);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
-						0xffffffff, rsc->debug_mode);
-		/* unstick f1 qtimer */
-		wmb();
-
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI,
-						0x0, rsc->debug_mode);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
-						0x0, rsc->debug_mode);
-		/* manually trigger f1 qtimer interrupt */
-		wmb();
-
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
-						0xffffff, rsc->debug_mode);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
-						0xffffffff, rsc->debug_mode);
-		/* unstick f0 qtimer */
-		wmb();
-
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
-						0x0, rsc->debug_mode);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
-						0x0, rsc->debug_mode);
-		/* manually trigger f0 qtimer interrupt */
-		wmb();
-	}
-}
-
-static int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc)
-{
-	int rc = 0, i;
-	u32 reg;
-
-	if (rsc->power_collapse_block)
-		return -EINVAL;
-
-	if (rsc->sw_fs_enabled) {
-		rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST);
-		if (rc) {
-			pr_err("vdd reg fast mode set failed rc:%d\n", rc);
-			return rc;
-		}
-	}
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0,
-						0x7, rsc->debug_mode);
-	rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_PC);
-
-	for (i = 0; i <= MAX_MODE2_ENTRY_TRY; i++) {
-		rc = sde_rsc_mode2_entry_trigger(rsc);
-		if (!rc)
-			break;
-
-		reg = dss_reg_r(&rsc->drv_io,
-				SDE_RSCC_SEQ_PROGRAM_COUNTER, rsc->debug_mode);
-		pr_err("mdss gdsc power down failed, instruction:0x%x, rc:%d\n",
-				reg, rc);
-		SDE_EVT32(rc, reg, SDE_EVTLOG_ERROR);
-
-		/* avoid touching f1 qtimer for last try */
-		if (i != MAX_MODE2_ENTRY_TRY)
-			sde_rsc_reset_mode_0_1(rsc);
-	}
-
-	if (rc)
-		goto end;
-
-	if ((rsc->current_state == SDE_RSC_VID_STATE) ||
-			(rsc->current_state == SDE_RSC_CLK_STATE)) {
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-					BIT(0) | BIT(8), rsc->debug_mode);
-		wmb(); /* force busy on vsync */
-	}
-
-	rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_PC);
-
-	if (rsc->sw_fs_enabled) {
-		regulator_disable(rsc->fs);
-		rsc->sw_fs_enabled = false;
-	}
-
-	return 0;
-
-end:
-	sde_rsc_mode2_exit(rsc, rsc->current_state);
-
-	return rc;
-}
-
-static int sde_rsc_state_update(struct sde_rsc_priv *rsc,
-						enum sde_rsc_state state)
-{
-	int rc = 0;
-	int reg;
-
-	if (rsc->power_collapse) {
-		rc = sde_rsc_mode2_exit(rsc, state);
-		if (rc)
-			pr_err("power collapse: mode2 exit failed\n");
-		else
-			rsc->power_collapse = false;
-	}
-
-	switch (state) {
-	case SDE_RSC_CMD_STATE:
-		pr_debug("command mode handling\n");
-
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-						0x1, rsc->debug_mode);
-		dss_reg_w(&rsc->drv_io, SDE_RSCC_SOLVER_OVERRIDE_CTRL_DRV0,
-							0x0, rsc->debug_mode);
-		reg = dss_reg_r(&rsc->wrapper_io,
-			SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
-		reg |= (BIT(0) | BIT(8));
-		reg &= ~(BIT(1) | BIT(2) | BIT(3) | BIT(6) | BIT(7) | BIT(9));
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-							reg, rsc->debug_mode);
-		/* make sure that solver is enabled */
-		wmb();
-
-		rsc_event_trigger(rsc, SDE_RSC_EVENT_SOLVER_ENABLED);
-		break;
-
-	case SDE_RSC_VID_STATE:
-		pr_debug("video mode handling\n");
-
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-						0x1, rsc->debug_mode);
-		reg = dss_reg_r(&rsc->wrapper_io,
-			SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
-		reg |= BIT(8);
-		reg &= ~(BIT(1) | BIT(0));
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-							reg, rsc->debug_mode);
-		/* make sure that solver mode is override */
-		wmb();
-
-		rsc_event_trigger(rsc, SDE_RSC_EVENT_SOLVER_DISABLED);
-		break;
-
-	case SDE_RSC_CLK_STATE:
-		pr_debug("clk state handling\n");
-
-		reg = dss_reg_r(&rsc->wrapper_io,
-			SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
-		reg &= ~BIT(0);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-							reg, rsc->debug_mode);
-		/* make sure that solver mode is disabled */
-		wmb();
-		break;
-
-	case SDE_RSC_IDLE_STATE:
-		rc = sde_rsc_mode2_entry(rsc);
-		if (rc)
-			pr_err("power collapse - mode 2 entry failed\n");
-		else
-			rsc->power_collapse = true;
-		break;
-
-	default:
-		pr_err("state:%d handling is not supported\n", state);
-		break;
-	}
-
-	return rc;
-}
-
-int rsc_hw_init(struct sde_rsc_priv *rsc)
-{
-	int rc = 0;
-
-	rc = rsc_hw_qtimer_init(rsc);
-	if (rc) {
-		pr_err("rsc hw qtimer init failed\n");
-		goto end;
-	}
-
-	rc = rsc_hw_wrapper_init(rsc);
-	if (rc) {
-		pr_err("rsc hw wrapper init failed\n");
-		goto end;
-	}
-
-	if (rsc->version == SDE_RSC_REV_2)
-		rc = rsc_hw_seq_memory_init_v2(rsc);
-	else
-		rc = rsc_hw_seq_memory_init(rsc);
-	if (rc) {
-		pr_err("rsc sequencer memory init failed\n");
-		goto end;
-	}
-
-	rc = rsc_hw_solver_init(rsc);
-	if (rc) {
-		pr_err("rsc solver init failed\n");
-		goto end;
-	}
-
-	rc = rsc_hw_pdc_init(rsc);
-	if (rc) {
-		pr_err("rsc hw pdc init failed\n");
-		goto end;
-	}
-
-	/* make sure that hw is initialized */
-	wmb();
-
-	pr_info("sde rsc init successfully done\n");
-end:
-	return rc;
-}
-
-int rsc_hw_mode_ctrl(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
-		char *buffer, int buffer_size, u32 mode)
-{
-	u32 blen = 0;
-	u32 slot_time;
-
-	switch (request) {
-	case MODE_READ:
-		if (!buffer || !buffer_size)
-			return blen;
-
-		blen = snprintf(buffer, buffer_size - blen,
-			"mode_status:0x%x\n",
-			dss_reg_r(&rsc->drv_io, SDE_RSCC_SOLVER_STATUS2_DRV0,
-			rsc->debug_mode));
-		break;
-
-	case MODE_UPDATE:
-		slot_time = mode & BIT(0) ? 0x0 :
-					rsc->timer_config.rsc_time_slot_2_ns;
-		dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_0_DRV0,
-						slot_time, rsc->debug_mode);
-
-		slot_time = mode & BIT(1) ?
-			rsc->timer_config.rsc_time_slot_0_ns :
-				rsc->timer_config.rsc_time_slot_2_ns;
-		dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0,
-						slot_time, rsc->debug_mode);
-
-		rsc->power_collapse_block = !(mode & BIT(2));
-		break;
-
-	default:
-		break;
-	}
-
-	return blen;
-}
-
-int sde_rsc_debug_show(struct seq_file *s, struct sde_rsc_priv *rsc)
-{
-	seq_printf(s, "override ctrl:0x%x\n",
-		 dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-				rsc->debug_mode));
-	seq_printf(s, "power ctrl:0x%x\n",
-		 dss_reg_r(&rsc->wrapper_io, SDE_RSCC_PWR_CTRL,
-				rsc->debug_mode));
-	seq_printf(s, "vsycn timestamp0:0x%x\n",
-		 dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP0,
-				rsc->debug_mode));
-	seq_printf(s, "vsycn timestamp1:0x%x\n",
-		 dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP1,
-				rsc->debug_mode));
-
-	seq_printf(s, "error irq status:0x%x\n",
-		 dss_reg_r(&rsc->drv_io, SDE_RSCC_ERROR_IRQ_STATUS_DRV0,
-				rsc->debug_mode));
-
-	seq_printf(s, "seq busy status:0x%x\n",
-		 dss_reg_r(&rsc->drv_io, SDE_RSCC_SEQ_BUSY_DRV0,
-				rsc->debug_mode));
-
-	seq_printf(s, "solver override ctrl status:0x%x\n",
-		 dss_reg_r(&rsc->drv_io, SDE_RSCC_SOLVER_OVERRIDE_CTRL_DRV0,
-				rsc->debug_mode));
-	seq_printf(s, "solver override status:0x%x\n",
-		 dss_reg_r(&rsc->drv_io, SDE_RSCC_SOLVER_STATUS0_DRV0,
-				rsc->debug_mode));
-	seq_printf(s, "solver timeslot status:0x%x\n",
-		 dss_reg_r(&rsc->drv_io, SDE_RSCC_SOLVER_STATUS1_DRV0,
-				rsc->debug_mode));
-	seq_printf(s, "solver mode status:0x%x\n",
-		 dss_reg_r(&rsc->drv_io, SDE_RSCC_SOLVER_STATUS2_DRV0,
-				rsc->debug_mode));
-
-	seq_printf(s, "amc status:0x%x\n",
-		 dss_reg_r(&rsc->drv_io, SDE_RSCC_AMC_TCS_MODE_IRQ_STATUS_DRV0,
-				rsc->debug_mode));
-
-	return 0;
-}
-
-int rsc_hw_vsync(struct sde_rsc_priv *rsc, enum rsc_vsync_req request,
-		char *buffer, int buffer_size, u32 mode)
-{
-	u32 blen = 0, reg;
-
-	switch (request) {
-	case VSYNC_READ:
-		if (!buffer || !buffer_size)
-			return blen;
-
-		blen = snprintf(buffer, buffer_size - blen, "vsync0:0x%x\n",
-			 dss_reg_r(&rsc->wrapper_io,
-				SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP0,
-				rsc->debug_mode));
-		if (blen >= buffer_size)
-			return blen;
-
-		blen += snprintf(buffer + blen, buffer_size - blen,
-			"vsync1:0x%x\n",
-			 dss_reg_r(&rsc->wrapper_io,
-				SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP1,
-				rsc->debug_mode));
-		break;
-
-	case VSYNC_READ_VSYNC0:
-		return dss_reg_r(&rsc->wrapper_io,
-				SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP0,
-				rsc->debug_mode);
-
-	case VSYNC_ENABLE:
-		/* clear the current VSYNC value */
-		reg = BIT(9) | ((mode & 0x7) << 10);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_BUS,
-					reg, rsc->debug_mode);
-
-		/* enable the VSYNC logging */
-		reg = BIT(8) | ((mode & 0x7) << 10);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_BUS,
-				reg, rsc->debug_mode);
-
-		/* ensure vsync config has been written before waiting on it */
-		wmb();
-		break;
-
-	case VSYNC_DISABLE:
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_BUS,
-						0x0, rsc->debug_mode);
-		break;
-	}
-
-	return blen;
-}
-
-void rsc_hw_debug_dump(struct sde_rsc_priv *rsc, u32 mux_sel)
-{
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_BUS,
-		((mux_sel & 0xf) << 1) | BIT(0), rsc->debug_mode);
-}
-
-bool rsc_hw_is_amc_mode(struct sde_rsc_priv *rsc)
-{
-	return dss_reg_r(&rsc->drv_io, SDE_RSCC_TCS_DRV0_CONTROL,
-			rsc->debug_mode) & BIT(16);
-}
-
-int rsc_hw_tcs_wait(struct sde_rsc_priv *rsc)
-{
-	int rc = -EBUSY;
-	int count, seq_status;
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-						0x0, rsc->debug_mode);
-	seq_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-			rsc->debug_mode) & BIT(1);
-	/* if seq busy - set TCS use OK to high and wait for 200us */
-	if (seq_status) {
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-						0x1, rsc->debug_mode);
-		usleep_range(100, 200);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-						0x0, rsc->debug_mode);
-	}
-
-	/* check for sequence running status before exiting */
-	for (count = MAX_CHECK_LOOPS; count > 0; count--) {
-		seq_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-				rsc->debug_mode) & BIT(1);
-		if (!seq_status) {
-			rc = 0;
-			break;
-		}
-		usleep_range(1, 2);
-	}
-
-	return rc;
-}
-
-int rsc_hw_tcs_use_ok(struct sde_rsc_priv *rsc)
-{
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-						0x1, rsc->debug_mode);
-	return 0;
-}
-
-int sde_rsc_hw_register(struct sde_rsc_priv *rsc)
-{
-	pr_debug("rsc hardware register\n");
-
-	rsc->hw_ops.init = rsc_hw_init;
-	rsc->hw_ops.timer_update = rsc_hw_timer_update;
-
-	rsc->hw_ops.tcs_wait = rsc_hw_tcs_wait;
-	rsc->hw_ops.tcs_use_ok = rsc_hw_tcs_use_ok;
-	rsc->hw_ops.is_amc_mode = rsc_hw_is_amc_mode;
-
-	rsc->hw_ops.hw_vsync = rsc_hw_vsync;
-	rsc->hw_ops.state_update = sde_rsc_state_update;
-	rsc->hw_ops.debug_show = sde_rsc_debug_show;
-	rsc->hw_ops.mode_ctrl = rsc_hw_mode_ctrl;
-	rsc->hw_ops.debug_dump = rsc_hw_debug_dump;
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.h b/drivers/gpu/drm/msm/sde_rsc_hw.h
deleted file mode 100644
index 9540fc5..0000000
--- a/drivers/gpu/drm/msm/sde_rsc_hw.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_RSC_HW_H_
-#define _SDE_RSC_HW_H_
-
-#include <linux/kernel.h>
-#include <linux/sde_io_util.h>
-#include <linux/sde_rsc.h>
-
-/* display rsc offset */
-#define SDE_RSCC_PDC_SEQ_START_ADDR_REG_OFFSET_DRV0	0x020
-#define SDE_RSCC_PDC_MATCH_VALUE_LO_REG_OFFSET_DRV0	0x024
-#define SDE_RSCC_PDC_MATCH_VALUE_HI_REG_OFFSET_DRV0	0x028
-#define SDE_RSCC_PDC_SLAVE_ID_DRV0			0x02c
-#define SDE_RSCC_SEQ_PROGRAM_COUNTER			0x408
-#define SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0			0x410
-#define SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0			0x414
-#define SDE_RSCC_SEQ_MEM_0_DRV0				0x600
-#define SDE_RSCC_SOLVER_OVERRIDE_CTRL_DRV0		0xc14
-#define SDE_RSCC_ERROR_IRQ_STATUS_DRV0			0x0d0
-#define SDE_RSCC_SEQ_BUSY_DRV0				0x404
-#define SDE_RSCC_SOLVER_STATUS0_DRV0			0xc24
-#define SDE_RSCC_SOLVER_STATUS1_DRV0			0xc28
-#define SDE_RSCC_SOLVER_STATUS2_DRV0			0xc2c
-#define SDE_RSCC_AMC_TCS_MODE_IRQ_STATUS_DRV0		0x1c00
-
-#define SDE_RSCC_SOFT_WAKEUP_TIME_LO_DRV0		0xc04
-#define SDE_RSCC_SOFT_WAKEUP_TIME_HI_DRV0		0xc08
-#define SDE_RSCC_MAX_IDLE_DURATION_DRV0			0xc0c
-#define SDE_RSC_SOLVER_TIME_SLOT_TABLE_0_DRV0		0x1000
-#define SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0		0x1004
-#define SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0		0x1008
-#define SDE_RSC_SOLVER_TIME_SLOT_TABLE_3_DRV0		0x100c
-
-#define SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0	0xc20
-#define SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT0_PRI0_DRV0	0x1080
-#define SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT1_PRI0_DRV0	0x1100
-#define SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT1_PRI3_DRV0	0x110c
-#define SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT2_PRI0_DRV0	0x1180
-#define SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT2_PRI3_DRV0	0x118c
-
-#define SDE_RSC_SOLVER_OVERRIDE_MODE_DRV0		0xc18
-#define SDE_RSC_SOLVER_OVERRIDE_CTRL_DRV0		0xc14
-#define SDE_RSC_TIMERS_CONSIDERED_DRV0			0xc00
-#define SDE_RSC_SOLVER_OVERRIDE_IDLE_TIME_DRV0		0xc1c
-
-#define SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE0		0xc30
-#define SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE0		0xc34
-#define SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE0		0xc38
-#define SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE0		0xc40
-
-#define SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE1		0xc4c
-#define SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE1		0xc50
-#define SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE1		0xc54
-#define SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE1		0xc5c
-
-#define SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE2		0xc68
-#define SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE2		0xc6c
-#define SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE2		0xc70
-#define SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE2		0xc78
-
-#define SDE_RSCC_TCS_DRV0_CONTROL			0x1c14
-
-#define SDE_RSCC_WRAPPER_CTRL				0x000
-#define SDE_RSCC_WRAPPER_OVERRIDE_CTRL			0x004
-#define SDE_RSCC_WRAPPER_STATIC_WAKEUP_0		0x008
-#define SDE_RSCC_WRAPPER_RSCC_MODE_THRESHOLD		0x00c
-#define SDE_RSCC_WRAPPER_DEBUG_BUS			0x010
-#define SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP0		0x018
-#define SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP1		0x01c
-#define SDE_RSCC_SPARE_PWR_EVENT			0x020
-#define SDE_RSCC_PWR_CTRL				0x024
-#define SDE_RSCC_WRAPPER_OVERRIDE_CTRL2			0x040
-#define SDE_RSCC_WRAPPER_MODE_MIN_THRESHOLD		0x044
-#define SDE_RSCC_WRAPPER_BW_INDICATION			0x048
-#define SDE_RSCC_WRAPPER_DEBUG_CTRL2			0x050
-
-/* qtimer offset */
-#define SDE_RSCC_QTMR_AC_HW_FRAME_SEL_1			0x1FE0
-#define SDE_RSCC_QTMR_AC_HW_FRAME_SEL_2			0x1FF0
-#define SDE_RSCC_QTMR_AC_CNTACR0_FG0			0x1040
-#define SDE_RSCC_QTMR_AC_CNTACR1_FG0			0x1044
-#define SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO		0x2020
-#define SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI		0x2024
-#define SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO		0x3020
-#define SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI		0x3024
-#define SDE_RSCC_F0_QTMR_V1_CNTP_CTL			0x202C
-#define SDE_RSCC_F1_QTMR_V1_CNTP_CTL			0x302C
-
-#define MAX_CHECK_LOOPS			500
-#define POWER_CTRL_BIT_12		12
-
-#define SDE_RSC_MODE_0_VAL		0
-#define SDE_RSC_MODE_1_VAL		1
-#define MAX_MODE2_ENTRY_TRY		3
-
-int rsc_hw_vsync(struct sde_rsc_priv *rsc, enum rsc_vsync_req request,
-		char *buffer, int buffer_size, u32 mode);
-
-bool rsc_hw_is_amc_mode(struct sde_rsc_priv *rsc);
-
-void rsc_hw_debug_dump(struct sde_rsc_priv *rsc, u32 mux_sel);
-
-int sde_rsc_debug_show(struct seq_file *s, struct sde_rsc_priv *rsc);
-
-int rsc_hw_mode_ctrl(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
-		char *buffer, int buffer_size, u32 mode);
-
-int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc, enum sde_rsc_state state);
-
-int rsc_hw_tcs_use_ok(struct sde_rsc_priv *rsc);
-
-int rsc_hw_tcs_wait(struct sde_rsc_priv *rsc);
-
-#endif /* _SDE_RSC_HW_H_ */
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw_v3.c b/drivers/gpu/drm/msm/sde_rsc_hw_v3.c
deleted file mode 100644
index 77b931e..0000000
--- a/drivers/gpu/drm/msm/sde_rsc_hw_v3.c
+++ /dev/null
@@ -1,607 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"[sde_rsc_hw:%s:%d]: " fmt, __func__, __LINE__
-
-#include <linux/kernel.h>
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-
-#include "sde_rsc_priv.h"
-#include "sde_rsc_hw.h"
-#include "sde_dbg.h"
-
-static int _rsc_hw_qtimer_init(struct sde_rsc_priv *rsc)
-{
-	pr_debug("rsc hardware qtimer init\n");
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_QTMR_AC_HW_FRAME_SEL_1,
-						0xffffffff, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_QTMR_AC_HW_FRAME_SEL_2,
-						0xffffffff, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_QTMR_AC_CNTACR0_FG0,
-						0x1, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_QTMR_AC_CNTACR1_FG0,
-						0x1, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
-						0xffffffff, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
-						0xffffffff, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
-						0xffffffff, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI,
-						0xffffffff, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CTL,
-						0x1, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CTL,
-						0x1, rsc->debug_mode);
-
-	return 0;
-}
-
-static int _rsc_hw_pdc_init(struct sde_rsc_priv *rsc)
-{
-	pr_debug("rsc hardware pdc init\n");
-
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_PDC_SEQ_START_ADDR_REG_OFFSET_DRV0,
-						0x4520, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_PDC_MATCH_VALUE_LO_REG_OFFSET_DRV0,
-						0x4510, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_PDC_MATCH_VALUE_HI_REG_OFFSET_DRV0,
-						0x4514, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_PDC_SLAVE_ID_DRV0,
-						0x1, rsc->debug_mode);
-
-	return 0;
-}
-
-static int _rsc_hw_wrapper_init(struct sde_rsc_priv *rsc)
-{
-	pr_debug("rsc hardware wrapper init\n");
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_STATIC_WAKEUP_0,
-		rsc->timer_config.static_wakeup_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_RSCC_MODE_THRESHOLD,
-		rsc->timer_config.rsc_mode_threshold_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-						BIT(8), rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_MODE_MIN_THRESHOLD,
-		rsc->timer_config.min_threshold_time_ns, rsc->debug_mode);
-
-	return 0;
-}
-
-static int _rsc_hw_seq_memory_init_v3(struct sde_rsc_priv *rsc)
-{
-	const u32 mode_0_start_addr = 0x0;
-	const u32 mode_1_start_addr = 0xc;
-	const u32 mode_2_start_addr = 0x18;
-
-	pr_debug("rsc sequencer memory init v2\n");
-
-	/* Mode - 0 sequence */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x0,
-						0xff399ebe, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x4,
-						0x20209ebe, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x8,
-						0x20202020, rsc->debug_mode);
-
-	/* Mode - 1 sequence */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0xc,
-						0xe0389ebe, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x10,
-						0x9ebeff39, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x14,
-						0x20202020, rsc->debug_mode);
-
-	/* Mode - 2 sequence */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x18,
-						0xfab9baa0, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x1c,
-						0x9afebdf9, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x20,
-						0xe1a13899, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x24,
-						0xa2e0ac81, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x28,
-						0x9d3982e2, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
-						0x20208cfd, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
-						0x20202020, rsc->debug_mode);
-
-	/* tcs sleep & wake sequence */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
-						0x01a6fcbc, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x38,
-						0x20209ce6, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x3c,
-						0x01a7fcbc, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x40,
-						0x00209ce7, rsc->debug_mode);
-
-	/* branch address */
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
-						0x34, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0,
-						0x3c, rsc->debug_mode);
-
-	/* start address */
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_OVERRIDE_CTRL_DRV0,
-					mode_0_start_addr,
-					rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE0,
-					mode_0_start_addr,
-					rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE1,
-					mode_1_start_addr,
-					rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE2,
-					mode_2_start_addr,
-					rsc->debug_mode);
-	return 0;
-}
-
-static int _rsc_hw_solver_init(struct sde_rsc_priv *rsc)
-{
-
-	pr_debug("rsc solver init\n");
-
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SOFT_WAKEUP_TIME_LO_DRV0,
-					0xFFFFFFFF, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SOFT_WAKEUP_TIME_HI_DRV0,
-					0xFFFFFFFF, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_MAX_IDLE_DURATION_DRV0,
-					0xEFFFFFFF, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_0_DRV0,
-						0x0, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0,
-		rsc->timer_config.bwi_threshold_time_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0,
-		rsc->timer_config.rsc_time_slot_1_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_3_DRV0,
-		rsc->timer_config.rsc_time_slot_2_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0,
-						0x7, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT0_PRI0_DRV0,
-						0x0, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT1_PRI0_DRV0,
-						0x1, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT1_PRI3_DRV0,
-						0x1, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT2_PRI0_DRV0,
-						0x2, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT2_PRI3_DRV0,
-						0x2, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_OVERRIDE_MODE_DRV0,
-						0x0, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_TIMERS_CONSIDERED_DRV0,
-						0x1, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_OVERRIDE_IDLE_TIME_DRV0,
-						0x01000010, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE0,
-					0x80000000, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE0,
-			rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE0,
-			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE1,
-					0x80000000, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE1,
-			rsc->timer_config.rsc_backoff_time_ns * 2,
-			rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE1,
-			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE2,
-					0x80000000, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE2,
-					0x0, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE2,
-			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
-
-	return 0;
-}
-
-static int sde_rsc_mode2_entry_trigger(struct sde_rsc_priv *rsc)
-{
-	int rc;
-	int count, wrapper_status, ctrl2_status;
-	unsigned long reg;
-
-	/* update qtimers to high during clk & video mode state */
-	if ((rsc->current_state == SDE_RSC_VID_STATE) ||
-			(rsc->current_state == SDE_RSC_CLK_STATE)) {
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
-						0xffffffff, rsc->debug_mode);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
-						0xffffffff, rsc->debug_mode);
-	}
-
-	wrapper_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-				rsc->debug_mode);
-	wrapper_status |= BIT(3);
-	wrapper_status |= BIT(0);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-					wrapper_status, rsc->debug_mode);
-
-	ctrl2_status = dss_reg_r(&rsc->wrapper_io,
-		SDE_RSCC_WRAPPER_OVERRIDE_CTRL2, rsc->debug_mode);
-	ctrl2_status &= ~BIT(3);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL2,
-		ctrl2_status, rsc->debug_mode);
-	wmb(); /* make sure that vsync source is disabled */
-
-
-	/**
-	 * force busy and idle during clk & video mode state because it
-	 * is trying to entry in mode-2 without turning on the vysnc.
-	 */
-	if ((rsc->current_state == SDE_RSC_VID_STATE) ||
-			(rsc->current_state == SDE_RSC_CLK_STATE)) {
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-				BIT(0) | BIT(1), rsc->debug_mode);
-		wmb(); /* force busy gurantee */
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-				BIT(0) | BIT(9), rsc->debug_mode);
-	}
-
-	wmb(); /* make sure that mode-2 is triggered before wait*/
-
-	rc = -EBUSY;
-	/* this wait is required to turn off the rscc clocks */
-	for (count = MAX_CHECK_LOOPS; count > 0; count--) {
-		reg = dss_reg_r(&rsc->wrapper_io,
-				SDE_RSCC_PWR_CTRL, rsc->debug_mode);
-		if (test_bit(POWER_CTRL_BIT_12, &reg)) {
-			rc = 0;
-			break;
-		}
-		usleep_range(10, 100);
-	}
-
-	return rc;
-}
-
-static void sde_rsc_reset_mode_0_1(struct sde_rsc_priv *rsc)
-{
-	u32 seq_busy, current_mode, curr_inst_addr;
-
-	seq_busy = dss_reg_r(&rsc->drv_io, SDE_RSCC_SEQ_BUSY_DRV0,
-			rsc->debug_mode);
-	current_mode = dss_reg_r(&rsc->drv_io, SDE_RSCC_SOLVER_STATUS2_DRV0,
-			rsc->debug_mode);
-	curr_inst_addr = dss_reg_r(&rsc->drv_io, SDE_RSCC_SEQ_PROGRAM_COUNTER,
-			rsc->debug_mode);
-	SDE_EVT32(seq_busy, current_mode, curr_inst_addr);
-
-	if (seq_busy && (current_mode == SDE_RSC_MODE_0_VAL ||
-			current_mode == SDE_RSC_MODE_1_VAL)) {
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI,
-						0xffffff, rsc->debug_mode);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
-						0xffffffff, rsc->debug_mode);
-		wmb(); /* unstick f1 qtimer */
-
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI,
-						0x0, rsc->debug_mode);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
-						0x0, rsc->debug_mode);
-		wmb(); /* manually trigger f1 qtimer interrupt */
-
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
-						0xffffff, rsc->debug_mode);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
-						0xffffffff, rsc->debug_mode);
-		wmb(); /* unstick f0 qtimer */
-
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
-						0x0, rsc->debug_mode);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
-						0x0, rsc->debug_mode);
-		wmb(); /* manually trigger f0 qtimer interrupt */
-	}
-}
-
-static int sde_rsc_mode2_entry_v3(struct sde_rsc_priv *rsc)
-{
-	int rc = 0, i;
-	u32 reg;
-
-	if (rsc->power_collapse_block)
-		return -EINVAL;
-
-	if (rsc->sw_fs_enabled) {
-		rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST);
-		if (rc) {
-			pr_err("vdd reg fast mode set failed rc:%d\n", rc);
-			return rc;
-		}
-	}
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0,
-						0x7, rsc->debug_mode);
-
-	for (i = 0; i <= MAX_MODE2_ENTRY_TRY; i++) {
-		rc = sde_rsc_mode2_entry_trigger(rsc);
-		if (!rc)
-			break;
-
-		reg = dss_reg_r(&rsc->drv_io,
-				SDE_RSCC_SEQ_PROGRAM_COUNTER, rsc->debug_mode);
-		pr_err("mdss gdsc power down failed, instruction:0x%x, rc:%d\n",
-				reg, rc);
-		SDE_EVT32(rc, reg, SDE_EVTLOG_ERROR);
-
-		/* avoid touching f1 qtimer for last try */
-		if (i != MAX_MODE2_ENTRY_TRY)
-			sde_rsc_reset_mode_0_1(rsc);
-	}
-
-	if (rc)
-		goto end;
-
-	if ((rsc->current_state == SDE_RSC_VID_STATE) ||
-			(rsc->current_state == SDE_RSC_CLK_STATE)) {
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-					BIT(0) | BIT(8), rsc->debug_mode);
-		wmb(); /* force busy on vsync */
-	}
-
-	if (rsc->sw_fs_enabled) {
-		regulator_disable(rsc->fs);
-		rsc->sw_fs_enabled = false;
-	}
-
-	return 0;
-
-end:
-	sde_rsc_mode2_exit(rsc, rsc->current_state);
-
-	return rc;
-}
-
-static int sde_rsc_state_update_v3(struct sde_rsc_priv *rsc,
-						enum sde_rsc_state state)
-{
-	int rc = 0;
-	int reg, ctrl2_config;
-
-	if (rsc->power_collapse) {
-		rc = sde_rsc_mode2_exit(rsc, state);
-		if (rc)
-			pr_err("power collapse: mode2 exit failed\n");
-		else
-			rsc->power_collapse = false;
-	}
-
-	switch (state) {
-	case SDE_RSC_CMD_STATE:
-		pr_debug("command mode handling\n");
-
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL2,
-				BIT(1) | BIT(2) | BIT(3), rsc->debug_mode);
-
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-						0x1, rsc->debug_mode);
-		dss_reg_w(&rsc->drv_io, SDE_RSCC_SOLVER_OVERRIDE_CTRL_DRV0,
-							0x0, rsc->debug_mode);
-		reg = dss_reg_r(&rsc->wrapper_io,
-			SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
-		reg |= (BIT(0) | BIT(8));
-		reg &= ~(BIT(1) | BIT(2) | BIT(3) | BIT(6) | BIT(7) | BIT(9));
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-							reg, rsc->debug_mode);
-		wmb(); /* make sure that solver is enabled */
-
-		break;
-
-	case SDE_RSC_VID_STATE:
-		pr_debug("video mode handling\n");
-
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-							0x0, rsc->debug_mode);
-		wmb(); /* disable double buffer config before vsync select */
-
-		ctrl2_config = (rsc->vsync_source & 0x7) << 4;
-		ctrl2_config |= (BIT(0) | BIT(1) | BIT(3));
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL2,
-				ctrl2_config, rsc->debug_mode);
-		wmb(); /* select vsync before double buffer config enabled */
-
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-						0x1, rsc->debug_mode);
-		dss_reg_w(&rsc->drv_io, SDE_RSCC_SOLVER_OVERRIDE_CTRL_DRV0,
-							0x0, rsc->debug_mode);
-		reg = dss_reg_r(&rsc->wrapper_io,
-			SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
-		reg |= (BIT(0) | BIT(8));
-		reg &= ~(BIT(1) | BIT(2) | BIT(3) | BIT(6) | BIT(7) | BIT(9));
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-							reg, rsc->debug_mode);
-		wmb(); /* make sure that solver is enabled */
-
-		break;
-
-	case SDE_RSC_CLK_STATE:
-		pr_debug("clk state handling\n");
-
-		ctrl2_config = dss_reg_r(&rsc->wrapper_io,
-			SDE_RSCC_WRAPPER_OVERRIDE_CTRL2, rsc->debug_mode);
-		ctrl2_config &= ~(BIT(0) | BIT(1) | BIT(2));
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL2,
-			ctrl2_config, rsc->debug_mode);
-
-		reg = dss_reg_r(&rsc->wrapper_io,
-			SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
-		reg &= ~BIT(0);
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
-							reg, rsc->debug_mode);
-		wmb(); /* make sure that solver mode is disabled */
-		break;
-
-	case SDE_RSC_IDLE_STATE:
-		rc = sde_rsc_mode2_entry_v3(rsc);
-		if (rc)
-			pr_err("power collapse - mode 2 entry failed\n");
-		else
-			rsc->power_collapse = true;
-		break;
-
-	default:
-		pr_err("state:%d handling is not supported\n", state);
-		break;
-	}
-
-	return rc;
-}
-
-int rsc_hw_init_v3(struct sde_rsc_priv *rsc)
-{
-	int rc = 0;
-
-	rc = _rsc_hw_qtimer_init(rsc);
-	if (rc) {
-		pr_err("rsc hw qtimer init failed\n");
-		goto end;
-	}
-
-	rc = _rsc_hw_wrapper_init(rsc);
-	if (rc) {
-		pr_err("rsc hw wrapper init failed\n");
-		goto end;
-	}
-
-	rc = _rsc_hw_seq_memory_init_v3(rsc);
-	if (rc) {
-		pr_err("rsc sequencer memory init failed\n");
-		goto end;
-	}
-
-	rc = _rsc_hw_solver_init(rsc);
-	if (rc) {
-		pr_err("rsc solver init failed\n");
-		goto end;
-	}
-
-	rc = _rsc_hw_pdc_init(rsc);
-	if (rc) {
-		pr_err("rsc hw pdc init failed\n");
-		goto end;
-	}
-
-	wmb(); /* make sure that hw is initialized */
-
-	pr_info("sde rsc init successfully done\n");
-end:
-	return rc;
-}
-
-int rsc_hw_bwi_status_v3(struct sde_rsc_priv *rsc, bool bw_indication)
-{
-	int count, bw_ack;
-	int rc = 0;
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_BW_INDICATION,
-						bw_indication, rsc->debug_mode);
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
-						0x1, rsc->debug_mode);
-
-	bw_ack = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_CTRL2,
-			rsc->debug_mode) & BIT(14);
-
-	/* check for sequence running status before exiting */
-	for (count = MAX_CHECK_LOOPS; count > 0 && !bw_ack; count--) {
-		usleep_range(8, 10);
-
-		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_BW_INDICATION,
-						bw_indication, rsc->debug_mode);
-		bw_ack = dss_reg_r(&rsc->wrapper_io,
-		       SDE_RSCC_WRAPPER_DEBUG_CTRL2, rsc->debug_mode) & BIT(14);
-	}
-
-	if (!bw_ack)
-		rc = -EINVAL;
-
-	return rc;
-}
-
-static int rsc_hw_timer_update_v3(struct sde_rsc_priv *rsc)
-{
-	if (!rsc) {
-		pr_debug("invalid input param\n");
-		return -EINVAL;
-	}
-
-	pr_debug("rsc hw timer update\n");
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0,
-		rsc->timer_config.rsc_time_slot_0_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0,
-		rsc->timer_config.rsc_time_slot_1_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_3_DRV0,
-		rsc->timer_config.rsc_time_slot_2_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE0,
-			rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE0,
-			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE1,
-			rsc->timer_config.rsc_backoff_time_ns * 2,
-			rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE1,
-			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE2,
-			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_STATIC_WAKEUP_0,
-		rsc->timer_config.static_wakeup_time_ns, rsc->debug_mode);
-
-	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_RSCC_MODE_THRESHOLD,
-		rsc->timer_config.rsc_mode_threshold_time_ns, rsc->debug_mode);
-
-	/* make sure that hw timers are updated */
-	wmb();
-
-	return 0;
-}
-
-int sde_rsc_hw_register_v3(struct sde_rsc_priv *rsc)
-{
-	pr_debug("rsc hardware register v3\n");
-
-	rsc->hw_ops.init = rsc_hw_init_v3;
-	rsc->hw_ops.state_update = sde_rsc_state_update_v3;
-	rsc->hw_ops.bwi_status = rsc_hw_bwi_status_v3;
-	rsc->hw_ops.timer_update = rsc_hw_timer_update_v3;
-
-	rsc->hw_ops.tcs_wait = rsc_hw_tcs_wait;
-	rsc->hw_ops.tcs_use_ok = rsc_hw_tcs_use_ok;
-	rsc->hw_ops.is_amc_mode = rsc_hw_is_amc_mode;
-	rsc->hw_ops.hw_vsync = rsc_hw_vsync;
-	rsc->hw_ops.debug_show = sde_rsc_debug_show;
-	rsc->hw_ops.mode_ctrl = rsc_hw_mode_ctrl;
-	rsc->hw_ops.debug_dump = rsc_hw_debug_dump;
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/msm/sde_rsc_priv.h b/drivers/gpu/drm/msm/sde_rsc_priv.h
deleted file mode 100644
index 9bf36af..0000000
--- a/drivers/gpu/drm/msm/sde_rsc_priv.h
+++ /dev/null
@@ -1,241 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_RSC_PRIV_H_
-#define _SDE_RSC_PRIV_H_
-
-#include <linux/kernel.h>
-#include <linux/sde_io_util.h>
-#include <linux/sde_rsc.h>
-
-#include <soc/qcom/tcs.h>
-#include "sde_power_handle.h"
-
-#define SDE_RSC_COMPATIBLE "disp_rscc"
-
-#define MAX_RSC_COUNT		5
-
-#define ALL_MODES_DISABLED	0x0
-#define ONLY_MODE_0_ENABLED	0x1
-#define ONLY_MODE_0_1_ENABLED	0x3
-#define ALL_MODES_ENABLED	0x7
-
-#define MAX_COUNT_SIZE_SUPPORTED	128
-
-#define SDE_RSC_REV_1			0x1
-#define SDE_RSC_REV_2			0x2
-#define SDE_RSC_REV_3			0x3
-
-struct sde_rsc_priv;
-
-/**
- * rsc_mode_req: sde rsc mode request information
- * MODE_READ: read vsync status
- * MODE_UPDATE: mode timeslot update
- *            0x0: all modes are disabled.
- *            0x1: Mode-0 is enabled and other two modes are disabled.
- *            0x3: Mode-0 & Mode-1 are enabled and mode-2 is disabled.
- *            0x7: all modes are enabled.
- */
-enum rsc_mode_req {
-	MODE_READ,
-	MODE_UPDATE = 0x1,
-};
-
-/**
- * rsc_vsync_req: sde rsc vsync request information
- * VSYNC_READ: read vsync status
- * VSYNC_READ_VSYNC0: read value vsync0 timestamp (cast to int from u32)
- * VSYNC_ENABLE: enable rsc wrapper vsync status
- * VSYNC_DISABLE: disable rsc wrapper vsync status
- */
-enum rsc_vsync_req {
-	VSYNC_READ,
-	VSYNC_READ_VSYNC0,
-	VSYNC_ENABLE,
-	VSYNC_DISABLE,
-};
-
-/**
- * struct sde_rsc_hw_ops - sde resource state coordinator hardware ops
- * @init:			Initialize the sequencer, solver, qtimer,
-				etc. hardware blocks on RSC.
- * @timer_update:		update the static wrapper time and pdc/rsc
-				backoff time.
- * @tcs_wait:			Waits for TCS block OK to allow sending a
- *				TCS command.
- * @hw_vsync:			Enables the vsync on RSC block.
- * @tcs_use_ok:			set TCS set to high to allow RSC to use it.
- * @bwi_status:			It updates the BW increase/decrease status.
- * @is_amc_mode:		Check current amc mode status
- * @debug_dump:			dump debug bus registers or enable debug bus
- * @state_update:		Enable/override the solver based on rsc state
- *                              status (command/video)
- * @mode_show:			shows current mode status, mode0/1/2
- * @debug_show:			Show current debug status.
- */
-
-struct sde_rsc_hw_ops {
-	int (*init)(struct sde_rsc_priv *rsc);
-	int (*timer_update)(struct sde_rsc_priv *rsc);
-	int (*tcs_wait)(struct sde_rsc_priv *rsc);
-	int (*hw_vsync)(struct sde_rsc_priv *rsc, enum rsc_vsync_req request,
-		char *buffer, int buffer_size, u32 mode);
-	int (*tcs_use_ok)(struct sde_rsc_priv *rsc);
-	int (*bwi_status)(struct sde_rsc_priv *rsc, bool bw_indication);
-	bool (*is_amc_mode)(struct sde_rsc_priv *rsc);
-	void (*debug_dump)(struct sde_rsc_priv *rsc, u32 mux_sel);
-	int (*state_update)(struct sde_rsc_priv *rsc, enum sde_rsc_state state);
-	int (*debug_show)(struct seq_file *s, struct sde_rsc_priv *rsc);
-	int (*mode_ctrl)(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
-		char *buffer, int buffer_size, u32 mode);
-};
-
-/**
- * struct sde_rsc_timer_config: this is internal configuration between
- * rsc and rsc_hw API.
- *
- * @static_wakeup_time_ns:	wrapper backoff time in nano seconds
- * @rsc_backoff_time_ns:	rsc backoff time in nano seconds
- * @pdc_backoff_time_ns:	pdc backoff time in nano seconds
- * @rsc_mode_threshold_time_ns:	rsc mode threshold time in nano seconds
- * @rsc_time_slot_0_ns:		mode-0 time slot threshold in nano seconds
- * @rsc_time_slot_1_ns:		mode-1 time slot threshold in nano seconds
- * @rsc_time_slot_2_ns:		mode-2 time slot threshold in nano seconds
- *
- * @min_threshold_time_ns:	minimum time required to enter & exit mode0
- * @bwi_threshold_time_ns:	worst case time to increase the BW vote
- */
-struct sde_rsc_timer_config {
-	u32 static_wakeup_time_ns;
-
-	u32 rsc_backoff_time_ns;
-	u32 pdc_backoff_time_ns;
-	u32 rsc_mode_threshold_time_ns;
-	u32 rsc_time_slot_0_ns;
-	u32 rsc_time_slot_1_ns;
-	u32 rsc_time_slot_2_ns;
-
-	u32 min_threshold_time_ns;
-	u32 bwi_threshold_time_ns;
-};
-
-/**
- * struct sde_rsc_bw_config: bandwidth configuration
- *
- * @ab_vote:	Stored ab_vote for SDE_POWER_HANDLE_DBUS_ID_MAX
- * @ib_vote:	Stored ib_vote for SDE_POWER_HANDLE_DBUS_ID_MAX
- * @new_ab_vote:	ab_vote for incoming frame.
- * @new_ib_vote:	ib_vote for incoming frame.
- */
-struct sde_rsc_bw_config {
-	u64	ab_vote[SDE_POWER_HANDLE_DBUS_ID_MAX];
-	u64	ib_vote[SDE_POWER_HANDLE_DBUS_ID_MAX];
-
-	u64	new_ab_vote[SDE_POWER_HANDLE_DBUS_ID_MAX];
-	u64	new_ib_vote[SDE_POWER_HANDLE_DBUS_ID_MAX];
-};
-/**
- * struct sde_rsc_priv: sde resource state coordinator(rsc) private handle
- * @version:		rsc sequence version
- * @phandle:		module power handle for clocks
- * @pclient:		module power client of phandle
- * @fs:			"MDSS GDSC" handle
- * @sw_fs_enabled:	track "MDSS GDSC" sw vote during probe
- *
- * @rpmh_dev:		rpmh device node
- * @drv_io:		sde drv io data mapping
- * @wrapper_io:		wrapper io data mapping
- *
- * @client_list:	current rsc client list handle
- * @event_list:		current rsc event list handle
- * @client_lock:	current rsc client synchronization lock
- *
- * timer_config:	current rsc timer configuration
- * cmd_config:		current panel config
- * current_state:	current rsc state (video/command), solver
- *                      override/enabled.
- * vsync_source:	Interface index to provide the vsync ticks
- * debug_mode:		enables the logging for each register read/write
- * debugfs_root:	debugfs file system root node
- *
- * hw_ops:		sde rsc hardware operations
- * power_collapse:	if all clients are in IDLE state then it enters in
- *			mode2 state and enable the power collapse state
- * power_collapse_block:By default, rsc move to mode-2 if all clients are in
- *			invalid state. It can be blocked by this boolean entry.
- * primary_client:	A client which is allowed to make command state request
- *			and ab/ib vote on display rsc
- * single_tcs_execution_time: worst case time to execute one tcs vote
- *			(sleep/wake)
- * backoff_time_ns:	time to only wake tcs in any mode
- * mode_threshold_time_ns: time to wake TCS in mode-0, must be greater than
- *			backoff time
- * time_slot_0_ns:	time for sleep & wake TCS in mode-1
- * master_drm:		Primary client waits for vsync on this drm object based
- *			on crtc id
- * rsc_vsync_wait:   Refcount to indicate if we have to wait for the vsync.
- * rsc_vsync_waitq:   Queue to wait for the vsync.
- * bw_config:		check sde_rsc_bw_config structure description.
- */
-struct sde_rsc_priv {
-	u32 version;
-	struct sde_power_handle phandle;
-	struct sde_power_client *pclient;
-	struct regulator *fs;
-	bool sw_fs_enabled;
-
-	struct device *rpmh_dev;
-	struct dss_io_data drv_io;
-	struct dss_io_data wrapper_io;
-
-	struct list_head client_list;
-	struct list_head event_list;
-	struct mutex client_lock;
-
-	struct sde_rsc_timer_config timer_config;
-	struct sde_rsc_cmd_config cmd_config;
-	u32	current_state;
-	u32	vsync_source;
-
-	u32 debug_mode;
-	struct dentry *debugfs_root;
-
-	struct sde_rsc_hw_ops hw_ops;
-	bool power_collapse;
-	bool power_collapse_block;
-	struct sde_rsc_client *primary_client;
-
-	u32 single_tcs_execution_time;
-	u32 backoff_time_ns;
-	u32 mode_threshold_time_ns;
-	u32 time_slot_0_ns;
-
-	struct drm_device *master_drm;
-	atomic_t rsc_vsync_wait;
-	wait_queue_head_t rsc_vsync_waitq;
-
-	struct sde_rsc_bw_config bw_config;
-};
-
-/**
- * sde_rsc_hw_register() - register hardware API. It manages V1 and V2 support.
- *
- * @client:	 Client pointer provided by sde_rsc_client_create().
- *
- * Return: error code.
- */
-int sde_rsc_hw_register(struct sde_rsc_priv *rsc);
-
-/**
- * sde_rsc_hw_register_v3() - register hardware API. It manages V3 support.
- *
- * @client:	 Client pointer provided by sde_rsc_client_create().
- *
- * Return: error code.
- */
-int sde_rsc_hw_register_v3(struct sde_rsc_priv *rsc);
-
-#endif /* _SDE_RSC_PRIV_H_ */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 157b076..38c9c08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -109,7 +109,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
 	struct nvkm_device *device = bar->base.subdev.device;
 	static struct lock_class_key bar1_lock;
 	static struct lock_class_key bar2_lock;
-	u64 start, limit;
+	u64 start, limit, size;
 	int ret;
 
 	ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
@@ -127,7 +127,10 @@ nv50_bar_oneinit(struct nvkm_bar *base)
 
 	/* BAR2 */
 	start = 0x0100000000ULL;
-	limit = start + device->func->resource_size(device, 3);
+	size = device->func->resource_size(device, 3);
+	if (!size)
+		return -ENOMEM;
+	limit = start + size;
 
 	ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
 			   &bar2_lock, "bar2", &bar->bar2_vmm);
@@ -164,7 +167,10 @@ nv50_bar_oneinit(struct nvkm_bar *base)
 
 	/* BAR1 */
 	start = 0x0000000000ULL;
-	limit = start + device->func->resource_size(device, 1);
+	size = device->func->resource_size(device, 1);
+	if (!size)
+		return -ENOMEM;
+	limit = start + size;
 
 	ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
 			   &bar1_lock, "bar1", &bar->bar1_vmm);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 74467b3..8160954 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1386,12 +1386,9 @@ static int dsi_pll_enable(struct dss_pll *pll)
 	 */
 	dsi_enable_scp_clk(dsi);
 
-	if (!dsi->vdds_dsi_enabled) {
-		r = regulator_enable(dsi->vdds_dsi_reg);
-		if (r)
-			goto err0;
-		dsi->vdds_dsi_enabled = true;
-	}
+	r = regulator_enable(dsi->vdds_dsi_reg);
+	if (r)
+		goto err0;
 
 	/* XXX PLL does not come out of reset without this... */
 	dispc_pck_free_enable(dsi->dss->dispc, 1);
@@ -1416,36 +1413,25 @@ static int dsi_pll_enable(struct dss_pll *pll)
 
 	return 0;
 err1:
-	if (dsi->vdds_dsi_enabled) {
-		regulator_disable(dsi->vdds_dsi_reg);
-		dsi->vdds_dsi_enabled = false;
-	}
+	regulator_disable(dsi->vdds_dsi_reg);
 err0:
 	dsi_disable_scp_clk(dsi);
 	dsi_runtime_put(dsi);
 	return r;
 }
 
-static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes)
-{
-	dsi_pll_power(dsi, DSI_PLL_POWER_OFF);
-	if (disconnect_lanes) {
-		WARN_ON(!dsi->vdds_dsi_enabled);
-		regulator_disable(dsi->vdds_dsi_reg);
-		dsi->vdds_dsi_enabled = false;
-	}
-
-	dsi_disable_scp_clk(dsi);
-	dsi_runtime_put(dsi);
-
-	DSSDBG("PLL uninit done\n");
-}
-
 static void dsi_pll_disable(struct dss_pll *pll)
 {
 	struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
 
-	dsi_pll_uninit(dsi, true);
+	dsi_pll_power(dsi, DSI_PLL_POWER_OFF);
+
+	regulator_disable(dsi->vdds_dsi_reg);
+
+	dsi_disable_scp_clk(dsi);
+	dsi_runtime_put(dsi);
+
+	DSSDBG("PLL disable done\n");
 }
 
 static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s)
@@ -4195,11 +4181,11 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
 
 	r = dss_pll_enable(&dsi->pll);
 	if (r)
-		goto err0;
+		return r;
 
 	r = dsi_configure_dsi_clocks(dsi);
 	if (r)
-		goto err1;
+		goto err0;
 
 	dss_select_dsi_clk_source(dsi->dss, dsi->module_id,
 				  dsi->module_id == 0 ?
@@ -4207,6 +4193,14 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
 
 	DSSDBG("PLL OK\n");
 
+	if (!dsi->vdds_dsi_enabled) {
+		r = regulator_enable(dsi->vdds_dsi_reg);
+		if (r)
+			goto err1;
+
+		dsi->vdds_dsi_enabled = true;
+	}
+
 	r = dsi_cio_init(dsi);
 	if (r)
 		goto err2;
@@ -4235,10 +4229,13 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
 err3:
 	dsi_cio_uninit(dsi);
 err2:
-	dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
+	regulator_disable(dsi->vdds_dsi_reg);
+	dsi->vdds_dsi_enabled = false;
 err1:
-	dss_pll_disable(&dsi->pll);
+	dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
 err0:
+	dss_pll_disable(&dsi->pll);
+
 	return r;
 }
 
@@ -4257,7 +4254,12 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
 
 	dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
 	dsi_cio_uninit(dsi);
-	dsi_pll_uninit(dsi, disconnect_lanes);
+	dss_pll_disable(&dsi->pll);
+
+	if (disconnect_lanes) {
+		regulator_disable(dsi->vdds_dsi_reg);
+		dsi->vdds_dsi_enabled = false;
+	}
 }
 
 static int dsi_display_enable(struct omap_dss_device *dssdev)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index 3403831..ebf9c96 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -175,6 +175,7 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
 		REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
 		hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
 		hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
+		REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
 		hdmi4_core_disable(core);
 		return 0;
 	}
@@ -182,16 +183,24 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
 	if (err)
 		return err;
 
+	/*
+	 * Initialize CEC clock divider: CEC needs 2MHz clock hence
+	 * set the divider to 24 to get 48/24=2MHz clock
+	 */
+	REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+
 	/* Clear TX FIFO */
 	if (!hdmi_cec_clear_tx_fifo(adap)) {
 		pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
-		return -EIO;
+		err = -EIO;
+		goto err_disable_clk;
 	}
 
 	/* Clear RX FIFO */
 	if (!hdmi_cec_clear_rx_fifo(adap)) {
 		pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
-		return -EIO;
+		err = -EIO;
+		goto err_disable_clk;
 	}
 
 	/* Clear CEC interrupts */
@@ -236,6 +245,12 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
 		hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
 	}
 	return 0;
+
+err_disable_clk:
+	REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
+	hdmi4_core_disable(core);
+
+	return err;
 }
 
 static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
@@ -333,11 +348,8 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
 		return ret;
 	core->wp = wp;
 
-	/*
-	 * Initialize CEC clock divider: CEC needs 2MHz clock hence
-	 * set the devider to 24 to get 48/24=2MHz clock
-	 */
-	REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+	/* Disable clock initially, hdmi_cec_adap_enable() manages it */
+	REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
 
 	ret = cec_register_adapter(core->adap, &pdev->dev);
 	if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 87fa316..58ccf64 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -248,6 +248,9 @@ static int otm8009a_init_sequence(struct otm8009a *ctx)
 	/* Send Command GRAM memory write (no parameters) */
 	dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START);
 
+	/* Wait a short while to let the panel be ready before the 1st frame */
+	mdelay(10);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index b9baefd..1c318ad 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -330,6 +330,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
 		ret = vexpress_muxfpga_init();
 		if (ret) {
 			dev_err(dev, "unable to initialize muxfpga driver\n");
+			of_node_put(np);
 			return ret;
 		}
 
@@ -337,17 +338,20 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
 		pdev = of_find_device_by_node(np);
 		if (!pdev) {
 			dev_err(dev, "can't find the sysreg device, deferring\n");
+			of_node_put(np);
 			return -EPROBE_DEFER;
 		}
 		map = dev_get_drvdata(&pdev->dev);
 		if (!map) {
 			dev_err(dev, "sysreg has not yet probed\n");
 			platform_device_put(pdev);
+			of_node_put(np);
 			return -EPROBE_DEFER;
 		}
 	} else {
 		map = syscon_node_to_regmap(np);
 	}
+	of_node_put(np);
 
 	if (IS_ERR(map)) {
 		dev_err(dev, "no Versatile syscon regmap\n");
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 5a48548..6c8b14f 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -113,7 +113,7 @@ static int cdp_dp_mailbox_write(struct cdn_dp_device *dp, u8 val)
 
 static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
 					   u8 module_id, u8 opcode,
-					   u8 req_size)
+					   u16 req_size)
 {
 	u32 mbox_size, i;
 	u8 header[4];
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 8b0cd08..57f61ec 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -92,6 +92,8 @@ static int sun4i_drv_bind(struct device *dev)
 		ret = -ENOMEM;
 		goto free_drm;
 	}
+
+	dev_set_drvdata(dev, drm);
 	drm->dev_private = drv;
 	INIT_LIST_HEAD(&drv->frontend_list);
 	INIT_LIST_HEAD(&drv->engine_list);
@@ -156,7 +158,10 @@ static void sun4i_drv_unbind(struct device *dev)
 	drm_kms_helper_poll_fini(drm);
 	sun4i_framebuffer_free(drm);
 	drm_mode_config_cleanup(drm);
+
+	component_unbind_all(dev, NULL);
 	of_reserved_mem_device_release(dev);
+
 	drm_dev_put(drm);
 }
 
@@ -405,6 +410,8 @@ static int sun4i_drv_probe(struct platform_device *pdev)
 
 static int sun4i_drv_remove(struct platform_device *pdev)
 {
+	component_master_del(&pdev->dev, &sun4i_drv_master_ops);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index e3b34a3..97a0573 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -357,7 +357,13 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
 static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
 					   struct drm_display_mode *mode)
 {
-	return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1;
+	u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
+	u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
+
+	if (delay > mode->vtotal)
+		delay = delay % mode->vtotal;
+
+	return max_t(u16, delay, 1);
 }
 
 static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index adcdf94..dfbcd1a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -217,7 +217,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
 
 err_unregister_gates:
 	for (i = 0; i < CLK_NUM; i++)
-		if (clk_data->hws[i])
+		if (!IS_ERR_OR_NULL(clk_data->hws[i]))
 			clk_hw_unregister_gate(clk_data->hws[i]);
 	clk_disable_unprepare(tcon_top->bus);
 err_assert_reset:
@@ -235,7 +235,8 @@ static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
 
 	of_clk_del_provider(dev->of_node);
 	for (i = 0; i < CLK_NUM; i++)
-		clk_hw_unregister_gate(clk_data->hws[i]);
+		if (clk_data->hws[i])
+			clk_hw_unregister_gate(clk_data->hws[i]);
 
 	clk_disable_unprepare(tcon_top->bus);
 	reset_control_assert(tcon_top->rst);
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 8f4fcbb..bb97cad 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
 					      struct drm_plane_state *old_state)
 {
-	struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
 	struct tegra_plane *p = to_tegra_plane(plane);
+	struct tegra_dc *dc;
 	u32 value;
 
 	/* rien ne va plus */
 	if (!old_state || !old_state->crtc)
 		return;
 
+	dc = to_tegra_dc(old_state->crtc);
+
 	/*
 	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
 	 * on planes that are already disabled. Make sure we fallback to the
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 455fefe..6044a01 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -278,7 +278,7 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
 	mipi->enabled = false;
 }
 
-static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
+static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
 			       size_t num)
 {
 	struct spi_device *spi = mipi->spi;
@@ -288,11 +288,11 @@ static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
 
 	gpiod_set_value_cansleep(mipi->dc, 0);
 	speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
-	ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
+	ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
 	if (ret || !num)
 		return ret;
 
-	if (cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
+	if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
 		bpw = 16;
 
 	gpiod_set_value_cansleep(mipi->dc, 1);
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index cb3441e..e772a8a 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -144,16 +144,42 @@ EXPORT_SYMBOL(mipi_dbi_command_read);
  */
 int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
 {
+	u8 *cmdbuf;
 	int ret;
 
+	/* SPI requires dma-safe buffers */
+	cmdbuf = kmemdup(&cmd, 1, GFP_KERNEL);
+	if (!cmdbuf)
+		return -ENOMEM;
+
 	mutex_lock(&mipi->cmdlock);
-	ret = mipi->command(mipi, cmd, data, len);
+	ret = mipi->command(mipi, cmdbuf, data, len);
 	mutex_unlock(&mipi->cmdlock);
 
+	kfree(cmdbuf);
+
 	return ret;
 }
 EXPORT_SYMBOL(mipi_dbi_command_buf);
 
+/* This should only be used by mipi_dbi_command() */
+int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
+{
+	u8 *buf;
+	int ret;
+
+	buf = kmemdup(data, len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = mipi_dbi_command_buf(mipi, cmd, buf, len);
+
+	kfree(buf);
+
+	return ret;
+}
+EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
+
 /**
  * mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary
  * @dst: The destination buffer
@@ -741,18 +767,18 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
 	return 0;
 }
 
-static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
+static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd,
 				   u8 *parameters, size_t num)
 {
-	unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
+	unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
 	int ret;
 
-	if (mipi_dbi_command_is_read(mipi, cmd))
+	if (mipi_dbi_command_is_read(mipi, *cmd))
 		return -ENOTSUPP;
 
-	MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num);
+	MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
 
-	ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8);
+	ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8);
 	if (ret || !num)
 		return ret;
 
@@ -761,7 +787,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
 
 /* MIPI DBI Type C Option 3 */
 
-static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
+static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd,
 					u8 *data, size_t len)
 {
 	struct spi_device *spi = mipi->spi;
@@ -770,7 +796,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
 	struct spi_transfer tr[2] = {
 		{
 			.speed_hz = speed_hz,
-			.tx_buf = &cmd,
+			.tx_buf = cmd,
 			.len = 1,
 		}, {
 			.speed_hz = speed_hz,
@@ -788,8 +814,8 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
 	 * Support non-standard 24-bit and 32-bit Nokia read commands which
 	 * start with a dummy clock, so we need to read an extra byte.
 	 */
-	if (cmd == MIPI_DCS_GET_DISPLAY_ID ||
-	    cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
+	if (*cmd == MIPI_DCS_GET_DISPLAY_ID ||
+	    *cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
 		if (!(len == 3 || len == 4))
 			return -EINVAL;
 
@@ -819,7 +845,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
 			data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
 	}
 
-	MIPI_DBI_DEBUG_COMMAND(cmd, data, len);
+	MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
 
 err_free:
 	kfree(buf);
@@ -827,7 +853,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
 	return ret;
 }
 
-static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
+static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd,
 				   u8 *par, size_t num)
 {
 	struct spi_device *spi = mipi->spi;
@@ -835,18 +861,18 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
 	u32 speed_hz;
 	int ret;
 
-	if (mipi_dbi_command_is_read(mipi, cmd))
+	if (mipi_dbi_command_is_read(mipi, *cmd))
 		return mipi_dbi_typec3_command_read(mipi, cmd, par, num);
 
-	MIPI_DBI_DEBUG_COMMAND(cmd, par, num);
+	MIPI_DBI_DEBUG_COMMAND(*cmd, par, num);
 
 	gpiod_set_value_cansleep(mipi->dc, 0);
 	speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
-	ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
+	ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
 	if (ret || !num)
 		return ret;
 
-	if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
+	if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
 		bpw = 16;
 
 	gpiod_set_value_cansleep(mipi->dc, 1);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index f841acc..f77c81db 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -730,7 +730,8 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
 			}
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-			if (!(flags & TTM_PAGE_FLAG_DMA32)) {
+			if (!(flags & TTM_PAGE_FLAG_DMA32) &&
+			    (npages - i) >= HPAGE_PMD_NR) {
 				for (j = 0; j < HPAGE_PMD_NR; ++j)
 					if (p++ != pages[i + j])
 					    break;
@@ -759,7 +760,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
 		unsigned max_size, n2free;
 
 		spin_lock_irqsave(&huge->lock, irq_flags);
-		while (i < npages) {
+		while ((npages - i) >= HPAGE_PMD_NR) {
 			struct page *p = pages[i];
 			unsigned j;
 
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 2a85fa68f..2a4c618 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -305,14 +305,18 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
 	if (ret)
 		goto dev_destroy;
 
-	v3d_irq_init(v3d);
-
-	ret = drm_dev_register(drm, 0);
+	ret = v3d_irq_init(v3d);
 	if (ret)
 		goto gem_destroy;
 
+	ret = drm_dev_register(drm, 0);
+	if (ret)
+		goto irq_disable;
+
 	return 0;
 
+irq_disable:
+	v3d_irq_disable(v3d);
 gem_destroy:
 	v3d_gem_destroy(drm);
 dev_destroy:
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index e6fed69..0ad73f4 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -284,7 +284,7 @@ void v3d_invalidate_caches(struct v3d_dev *v3d);
 void v3d_flush_caches(struct v3d_dev *v3d);
 
 /* v3d_irq.c */
-void v3d_irq_init(struct v3d_dev *v3d);
+int v3d_irq_init(struct v3d_dev *v3d);
 void v3d_irq_enable(struct v3d_dev *v3d);
 void v3d_irq_disable(struct v3d_dev *v3d);
 void v3d_irq_reset(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index e07514e..22be0f2 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -137,7 +137,7 @@ v3d_hub_irq(int irq, void *arg)
 	return status;
 }
 
-void
+int
 v3d_irq_init(struct v3d_dev *v3d)
 {
 	int ret, core;
@@ -154,13 +154,22 @@ v3d_irq_init(struct v3d_dev *v3d)
 	ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
 			       v3d_hub_irq, IRQF_SHARED,
 			       "v3d_hub", v3d);
+	if (ret)
+		goto fail;
+
 	ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1),
 			       v3d_irq, IRQF_SHARED,
 			       "v3d_core0", v3d);
 	if (ret)
-		dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+		goto fail;
 
 	v3d_irq_enable(v3d);
+	return 0;
+
+fail:
+	if (ret != -EPROBE_DEFER)
+		dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+	return ret;
 }
 
 void
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 0e6a121..5615ceb 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -998,7 +998,7 @@ static void
 vc4_crtc_reset(struct drm_crtc *crtc)
 {
 	if (crtc->state)
-		__drm_atomic_helper_crtc_destroy_state(crtc->state);
+		vc4_crtc_destroy_state(crtc, crtc->state);
 
 	crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
 	if (crtc->state)
diff --git a/drivers/gpu/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c
index 9b2b3fa..5e44ff1 100644
--- a/drivers/gpu/ipu-v3/ipu-dp.c
+++ b/drivers/gpu/ipu-v3/ipu-dp.c
@@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
 		ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
 				DP_COM_CONF_CSC_DEF_BOTH);
 	} else {
-		if (flow->foreground.in_cs == flow->out_cs)
+		if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
+		    flow->foreground.in_cs == flow->out_cs)
 			/*
 			 * foreground identical to output, apply color
 			 * conversion on background
@@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
 	struct ipu_dp_priv *priv = flow->priv;
 	u32 reg, csc;
 
+	dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
+
 	if (!dp->foreground)
 		return;
 
@@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
 
 	reg = readl(flow->base + DP_COM_CONF);
 	csc = reg & DP_COM_CONF_CSC_DEF_MASK;
-	if (csc == DP_COM_CONF_CSC_DEF_FG)
-		reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+	reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+	if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
+		reg |= DP_COM_CONF_CSC_DEF_BG;
 
 	reg &= ~DP_COM_CONF_FG_EN;
 	writel(reg, flow->base + DP_COM_CONF);
@@ -347,6 +351,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
 	mutex_init(&priv->mutex);
 
 	for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
+		priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
+		priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
 		priv->flow[i].foreground.foreground = true;
 		priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
 		priv->flow[i].priv = priv;
diff --git a/drivers/gpu/msm/Kconfig b/drivers/gpu/msm/Kconfig
index 58c12d4..2b0d75115 100644
--- a/drivers/gpu/msm/Kconfig
+++ b/drivers/gpu/msm/Kconfig
@@ -6,6 +6,7 @@
 	select FW_LOADER
 	select PM_DEVFREQ
 	select QCOM_SCM
+	select NVMEM
 	select DEVFREQ_GOV_SIMPLE_ONDEMAND
 	select DEVFREQ_GOV_PERFORMANCE
 	select DEVFREQ_GOV_QCOM_ADRENO_TZ
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 774aaa0..7ad8e8c 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -1016,6 +1016,8 @@
 /* LM registers */
 #define A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD       0x1F94D
 
+/* FAL10 veto register */
+#define A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF         0x1F8F0
 
 #define A6XX_GMU_AO_INTERRUPT_EN		0x23B03
 #define A6XX_GMU_AO_HOST_INTERRUPT_CLR		0x23B04
@@ -1029,6 +1031,7 @@
 #define A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK	0x23B0E
 #define A6XX_GMU_AO_AHB_FENCE_CTRL		0x23B10
 #define A6XX_GMU_AHB_FENCE_STATUS		0x23B13
+#define A6XX_GMU_AHB_FENCE_STATUS_CLR           0x23B14
 #define A6XX_GMU_RBBM_INT_UNMASKED_STATUS	0x23B15
 #define A6XX_GMU_AO_SPARE_CNTL			0x23B16
 
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index ef54463..1e3439c 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -5,462 +5,1292 @@
 
 #define ANY_ID (~0)
 
-static const struct adreno_gpu_core adreno_gpulist[] = {
-	{
-		.gpurev = ADRENO_REV_A306,
-		.core = 3,
-		.major = 0,
-		.minor = 6,
-		.patchid = 0x00,
+#define DEFINE_ADRENO_REV(_rev, _core, _major, _minor, _patchid) \
+	.gpurev = _rev, .core = _core, .major = _major, .minor = _minor, \
+	.patchid = _patchid
+
+#define DEFINE_DEPRECATED_CORE(_name, _rev, _core, _major, _minor, _patchid) \
+static const struct adreno_gpu_core adreno_gpu_core_##_name = { \
+	DEFINE_ADRENO_REV(_rev, _core, _major, _minor, _patchid), \
+	.features = ADRENO_DEPRECATED, \
+}
+
+static const struct adreno_reglist a306_vbif_regs[] = {
+	{ A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
+	{ A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000A },
+	{ A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000A },
+};
+
+static const struct adreno_a3xx_core adreno_gpu_core_a306 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A306, 3, 0, 6, 0),
 		.features = ADRENO_SOFT_FAULT_DETECT,
-		.pm4fw_name = "a300_pm4.fw",
-		.pfpfw_name = "a300_pfp.fw",
 		.gpudev = &adreno_a3xx_gpudev,
 		.gmem_base = 0,
 		.gmem_size = SZ_128K,
-		.busy_mask = 0x7FFFFFFE,
+		.busy_mask = 0x7ffffffe,
+		.bus_width = 0,
 	},
-	{
-		.gpurev = ADRENO_REV_A306A,
-		.core = 3,
-		.major = 0,
-		.minor = 6,
-		.patchid = 0x20,
+	.pm4fw_name = "a300_pm4.fw",
+	.pfpfw_name = "a300_pfp.fw",
+	.vbif = a306_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a306_vbif_regs),
+};
+
+static const struct adreno_reglist a306a_vbif_regs[] = {
+	{ A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
+	{ A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000010 },
+	{ A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000010 },
+};
+
+static const struct adreno_a3xx_core adreno_gpu_core_a306a = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A306A, 3, 0, 6, 0x20),
 		.features = ADRENO_SOFT_FAULT_DETECT,
-		.pm4fw_name = "a300_pm4.fw",
-		.pfpfw_name = "a300_pfp.fw",
 		.gpudev = &adreno_a3xx_gpudev,
 		.gmem_base = 0,
 		.gmem_size = SZ_128K,
-		.busy_mask = 0x7FFFFFFE,
+		.busy_mask = 0x7ffffffe,
+		.bus_width = 16,
 	},
-	{
-		.gpurev = ADRENO_REV_A304,
-		.core = 3,
-		.major = 0,
-		.minor = 4,
-		.patchid = 0x00,
+	.pm4fw_name = "a300_pm4.fw",
+	.pfpfw_name = "a300_pfp.fw",
+	.vbif = a306a_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a306a_vbif_regs),
+};
+
+static const struct adreno_reglist a304_vbif_regs[] = {
+	{ A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
+};
+
+static const struct adreno_a3xx_core adreno_gpu_core_a304 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A304, 3, 0, 4, 0),
 		.features = ADRENO_SOFT_FAULT_DETECT,
-		.pm4fw_name = "a300_pm4.fw",
-		.pfpfw_name = "a300_pfp.fw",
 		.gpudev = &adreno_a3xx_gpudev,
 		.gmem_base = 0,
 		.gmem_size = (SZ_64K + SZ_32K),
-		.busy_mask = 0x7FFFFFFE,
+		.busy_mask = 0x7ffffffe,
+		.bus_width = 0,
 	},
-	{
-		.gpurev = ADRENO_REV_A405,
-		.core = 4,
-		.major = 0,
-		.minor = 5,
-		.patchid = ANY_ID,
-		.features = ADRENO_DEPRECATED,
-	},
-	{
-		.gpurev = ADRENO_REV_A420,
-		.core = 4,
-		.major = 2,
-		.minor = 0,
-		.patchid = ANY_ID,
-		.features = ADRENO_DEPRECATED,
-	},
-	{
-		.gpurev = ADRENO_REV_A430,
-		.core = 4,
-		.major = 3,
-		.minor = 0,
-		.patchid = ANY_ID,
-		.features = ADRENO_DEPRECATED,
-	},
-	{
-		.gpurev = ADRENO_REV_A418,
-		.core = 4,
-		.major = 1,
-		.minor = 8,
-		.patchid = ANY_ID,
-		.features = ADRENO_DEPRECATED,
-	},
-	{
-		.gpurev = ADRENO_REV_A530,
-		.core = 5,
-		.major = 3,
-		.minor = 0,
-		.patchid = 0,
-		.features = ADRENO_DEPRECATED,
-	},
-	{
-		.gpurev = ADRENO_REV_A530,
-		.core = 5,
-		.major = 3,
-		.minor = 0,
-		.patchid = 1,
+	.pm4fw_name = "a300_pm4.fw",
+	.pfpfw_name = "a300_pfp.fw",
+	.vbif = a304_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a304_vbif_regs),
+};
+
+DEFINE_DEPRECATED_CORE(a405, ADRENO_REV_A405, 4, 0, 5, ANY_ID);
+DEFINE_DEPRECATED_CORE(a418, ADRENO_REV_A418, 4, 1, 8, ANY_ID);
+DEFINE_DEPRECATED_CORE(a420, ADRENO_REV_A420, 4, 2, 0, ANY_ID);
+DEFINE_DEPRECATED_CORE(a430, ADRENO_REV_A430, 4, 3, 0, ANY_ID);
+DEFINE_DEPRECATED_CORE(a530v1, ADRENO_REV_A530, 5, 3, 0, 0);
+
+static const struct adreno_reglist a530_hwcg_regs[] = {
+	{A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+	{A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+	{A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+	{A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+	{A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+	{A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+	{A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+	{A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+	{A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+	{A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+	{A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+	{A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+	{A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+	{A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+	{A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+	{A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+	{A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+	{A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+	{A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
+	{A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
+	{A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+	{A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+	{A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
+	{A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
+	{A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+	{A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+	{A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
+	{A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
+	{A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+	{A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+	{A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+	{A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+	{A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+	{A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
+	{A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
+	{A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+	{A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+	{A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+	{A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+	{A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
+	{A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
+	{A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
+	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
+	{A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+	{A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+	{A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+	{A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+	{A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+	{A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+	{A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+	{A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+	{A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+	{A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+	{A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+};
+
+/* VBIF control registers for a530, a510, a508, a505 and a506 */
+static const struct adreno_reglist a530_vbif_regs[] = {
+	{A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003},
+};
+
+static const struct adreno_a5xx_core adreno_gpu_core_a530v2 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A530, 5, 3, 0, 1),
 		.features = ADRENO_GPMU | ADRENO_SPTP_PC | ADRENO_LM |
 			ADRENO_PREEMPTION | ADRENO_64BIT |
 			ADRENO_CONTENT_PROTECTION,
-		.pm4fw_name = "a530_pm4.fw",
-		.pfpfw_name = "a530_pfp.fw",
-		.zap_name = "a530_zap",
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
 		.num_protected_regs = 0x20,
-		.gpmufw_name = "a530_gpmu.fw2",
-		.gpmu_major = 1,
-		.gpmu_minor = 0,
-		.busy_mask = 0xFFFFFFFE,
-		.lm_major = 3,
-		.lm_minor = 0,
-		.gpmu_tsens = 0x00060007,
-		.max_power = 5448,
-		.regfw_name = "a530v2_seq.fw2",
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
-	{
-		.gpurev = ADRENO_REV_A530,
-		.core = 5,
-		.major = 3,
-		.minor = 0,
-		.patchid = ANY_ID,
+	.gpmu_tsens = 0x00060007,
+	.max_power = 5448,
+	.pm4fw_name = "a530_pm4.fw",
+	.pfpfw_name = "a530_pfp.fw",
+	.gpmufw_name = "a530_gpmu.fw2",
+	.regfw_name = "a530v2_seq.fw2",
+	.zap_name = "a530_zap",
+	.hwcg = a530_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a530_hwcg_regs),
+	.vbif = a530_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a530_vbif_regs),
+};
+
+static const struct adreno_a5xx_core adreno_gpu_core_a530v3 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A530, 5, 3, 0, ANY_ID),
 		.features = ADRENO_GPMU | ADRENO_SPTP_PC | ADRENO_LM |
 			ADRENO_PREEMPTION | ADRENO_64BIT |
 			ADRENO_CONTENT_PROTECTION,
-		.pm4fw_name = "a530_pm4.fw",
-		.pfpfw_name = "a530_pfp.fw",
-		.zap_name = "a530_zap",
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
 		.num_protected_regs = 0x20,
-		.gpmufw_name = "a530v3_gpmu.fw2",
-		.gpmu_major = 1,
-		.gpmu_minor = 0,
-		.busy_mask = 0xFFFFFFFE,
-		.lm_major = 1,
-		.lm_minor = 0,
-		.gpmu_tsens = 0x00060007,
-		.max_power = 5448,
-		.regfw_name = "a530v3_seq.fw2",
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
-	{
-		.gpurev = ADRENO_REV_A505,
-		.core = 5,
-		.major = 0,
-		.minor = 5,
-		.patchid = ANY_ID,
+	.gpmu_tsens = 0x00060007,
+	.max_power = 5448,
+	.pm4fw_name = "a530_pm4.fw",
+	.pfpfw_name = "a530_pfp.fw",
+	.gpmufw_name = "a530v3_gpmu.fw2",
+	.regfw_name = "a530v3_seq.fw2",
+	.zap_name = "a530_zap",
+	.hwcg = a530_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a530_hwcg_regs),
+	.vbif = a530_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a530_vbif_regs),
+};
+
+/* For a505, a506 and a508 */
+static const struct adreno_reglist a50x_hwcg_regs[] = {
+	{A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+	{A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+	{A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+	{A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+	{A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+	{A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+	{A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+	{A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4},
+	{A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+	{A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+	{A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+	{A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+	{A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+	{A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+	{A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+	{A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+	{A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+	{A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+	{A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+	{A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+	{A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+	{A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+	{A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+	{A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+	{A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+};
+
+static const struct adreno_a5xx_core adreno_gpu_core_a505 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A505, 5, 0, 5, ANY_ID),
 		.features = ADRENO_PREEMPTION | ADRENO_64BIT,
-		.pm4fw_name = "a530_pm4.fw",
-		.pfpfw_name = "a530_pfp.fw",
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_8K),
 		.num_protected_regs = 0x20,
-		.busy_mask = 0xFFFFFFFE,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 16,
 	},
-	{
-		.gpurev = ADRENO_REV_A506,
-		.core = 5,
-		.major = 0,
-		.minor = 6,
-		.patchid = ANY_ID,
+	.pm4fw_name = "a530_pm4.fw",
+	.pfpfw_name = "a530_pfp.fw",
+	.hwcg = a50x_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a50x_hwcg_regs),
+	.vbif = a530_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a530_vbif_regs),
+};
+
+static const struct adreno_a5xx_core adreno_gpu_core_a506 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A506, 5, 0, 6, ANY_ID),
 		.features = ADRENO_PREEMPTION | ADRENO_64BIT |
 			ADRENO_CONTENT_PROTECTION | ADRENO_CPZ_RETENTION,
-		.pm4fw_name = "a530_pm4.fw",
-		.pfpfw_name = "a530_pfp.fw",
-		.zap_name = "a506_zap",
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_8K),
 		.num_protected_regs = 0x20,
-		.busy_mask = 0xFFFFFFFE,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 16,
 	},
-	{
-		.gpurev = ADRENO_REV_A510,
-		.core = 5,
-		.major = 1,
-		.minor = 0,
-		.patchid = ANY_ID,
-		.pm4fw_name = "a530_pm4.fw",
-		.pfpfw_name = "a530_pfp.fw",
+	.pm4fw_name = "a530_pm4.fw",
+	.pfpfw_name = "a530_pfp.fw",
+	.zap_name = "a506_zap",
+	.hwcg = a50x_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a50x_hwcg_regs),
+	.vbif = a530_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a530_vbif_regs),
+};
+
+static const struct adreno_reglist a510_hwcg_regs[] = {
+	{A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+	{A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+	{A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+	{A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+	{A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+	{A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+	{A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+	{A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+	{A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+	{A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+	{A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+	{A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+	{A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+	{A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+	{A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+	{A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+	{A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+	{A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+	{A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+	{A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+	{A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+	{A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+	{A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+	{A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+	{A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+	{A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+	{A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+	{A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+	{A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+	{A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+	{A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+	{A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+	{A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+	{A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+	{A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+};
+
+static const struct adreno_a5xx_core adreno_gpu_core_a510 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A510, 5, 1, 0, ANY_ID),
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_256K,
 		.num_protected_regs = 0x20,
-		.busy_mask = 0xFFFFFFFE,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 16,
 	},
-	{
-		.gpurev = ADRENO_REV_A540,
-		.core = 5,
-		.major = 4,
-		.minor = 0,
-		.patchid = 0,
-		.features = ADRENO_DEPRECATED,
-	},
-	{
-		.gpurev = ADRENO_REV_A540,
-		.core = 5,
-		.major = 4,
-		.minor = 0,
-		.patchid = ANY_ID,
+	.pm4fw_name = "a530_pm4.fw",
+	.pfpfw_name = "a530_pfp.fw",
+	.hwcg = a510_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a510_hwcg_regs),
+	.vbif = a530_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a530_vbif_regs),
+};
+
+DEFINE_DEPRECATED_CORE(a540v1, ADRENO_REV_A540, 5, 4, 0, 0);
+
+static const struct adreno_reglist a540_hwcg_regs[] = {
+	{A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+	{A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+	{A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+	{A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+	{A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+	{A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+	{A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+	{A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+	{A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+	{A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+	{A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+	{A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+	{A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+	{A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+	{A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+	{A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+	{A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+	{A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+	{A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
+	{A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
+	{A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+	{A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+	{A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
+	{A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
+	{A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+	{A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+	{A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
+	{A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
+	{A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+	{A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+	{A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+	{A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+	{A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+	{A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
+	{A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
+	{A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+	{A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+	{A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+	{A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+	{A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
+	{A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
+	{A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
+	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
+	{A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+	{A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+	{A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+	{A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+	{A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+	{A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+	{A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+	{A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+	{A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+	{A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+	{A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+	{A5XX_RBBM_CLOCK_HYST_GPMU, 0x00000222},
+	{A5XX_RBBM_CLOCK_DELAY_GPMU, 0x00000770},
+	{A5XX_RBBM_CLOCK_HYST_GPMU, 0x00000004},
+};
+
+static const struct adreno_reglist a540_vbif_regs[] = {
+	{A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003},
+	{A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009},
+};
+
+static const struct adreno_a5xx_core adreno_gpu_core_a540v2 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A540, 5, 4, 0, ANY_ID),
 		.features = ADRENO_PREEMPTION | ADRENO_64BIT |
 			ADRENO_CONTENT_PROTECTION |
 			ADRENO_GPMU | ADRENO_SPTP_PC,
-		.pm4fw_name = "a530_pm4.fw",
-		.pfpfw_name = "a530_pfp.fw",
-		.zap_name = "a540_zap",
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
 		.num_protected_regs = 0x20,
-		.busy_mask = 0xFFFFFFFE,
-		.gpmufw_name = "a540_gpmu.fw2",
-		.gpmu_major = 3,
-		.gpmu_minor = 0,
-		.gpmu_tsens = 0x000C000D,
-		.max_power = 5448,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
-	{
-		.gpurev = ADRENO_REV_A512,
-		.core = 5,
-		.major = 1,
-		.minor = 2,
-		.patchid = ANY_ID,
+	.gpmu_tsens = 0x000c000d,
+	.max_power = 5448,
+	.pm4fw_name = "a530_pm4.fw",
+	.pfpfw_name = "a530_pfp.fw",
+	.gpmufw_name = "a540_gpmu.fw2",
+	.zap_name = "a540_zap",
+	.hwcg = a540_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a540_hwcg_regs),
+	.vbif = a540_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a540_vbif_regs),
+};
+
+static const struct adreno_reglist a512_hwcg_regs[] = {
+	{A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+	{A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+	{A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+	{A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+	{A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+	{A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+	{A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+	{A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+	{A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+	{A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+	{A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+	{A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+	{A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+	{A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+	{A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+	{A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+	{A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+	{A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+	{A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+	{A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+	{A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+	{A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+	{A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+	{A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+	{A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+	{A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+	{A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+	{A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+	{A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+	{A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+	{A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+	{A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+	{A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+	{A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+	{A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+	{A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+	{A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+	{A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+	{A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+};
+
+static const struct adreno_a5xx_core adreno_gpu_core_a512 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A512, 5, 1, 2, ANY_ID),
 		.features = ADRENO_PREEMPTION | ADRENO_64BIT |
 			ADRENO_CONTENT_PROTECTION | ADRENO_CPZ_RETENTION,
-		.pm4fw_name = "a530_pm4.fw",
-		.pfpfw_name = "a530_pfp.fw",
-		.zap_name = "a512_zap",
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_256K + SZ_16K),
 		.num_protected_regs = 0x20,
-		.busy_mask = 0xFFFFFFFE,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
-	{
-		.gpurev = ADRENO_REV_A508,
-		.core = 5,
-		.major = 0,
-		.minor = 8,
-		.patchid = ANY_ID,
+	.pm4fw_name = "a530_pm4.fw",
+	.pfpfw_name = "a530_pfp.fw",
+	.zap_name = "a512_zap",
+	.hwcg = a512_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a512_hwcg_regs),
+};
+
+static const struct adreno_a5xx_core adreno_gpu_core_a508 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A508, 5, 0, 8, ANY_ID),
 		.features = ADRENO_PREEMPTION | ADRENO_64BIT |
 			ADRENO_CONTENT_PROTECTION | ADRENO_CPZ_RETENTION,
-		.pm4fw_name = "a530_pm4.fw",
-		.pfpfw_name = "a530_pfp.fw",
-		.zap_name = "a508_zap",
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_8K),
 		.num_protected_regs = 0x20,
-		.busy_mask = 0xFFFFFFFE,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
-	{
-		.gpurev = ADRENO_REV_A630,
-		.core = 6,
-		.major = 3,
-		.minor = 0,
-		.patchid = 0,
-		.features = ADRENO_DEPRECATED,
-	},
-	{
-		.gpurev = ADRENO_REV_A630,
-		.core = 6,
-		.major = 3,
-		.minor = 0,
-		.patchid = ANY_ID,
+	.pm4fw_name = "a530_pm4.fw",
+	.pfpfw_name = "a530_pfp.fw",
+	.zap_name = "a508_zap",
+	.hwcg = a50x_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a50x_hwcg_regs),
+	.vbif = a530_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a530_vbif_regs),
+};
+
+DEFINE_DEPRECATED_CORE(a630v1, ADRENO_REV_A630, 6, 3, 0, 0);
+
+static const struct adreno_reglist a630_hwcg_regs[] = {
+	{A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
+	{A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
+	{A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
+	{A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
+	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+	{A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+	{A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+	{A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+	{A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+	{A6XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+	{A6XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+	{A6XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+	{A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
+	{A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+	{A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
+	{A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
+	{A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
+	{A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
+	{A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+	{A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+	{A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
+	{A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
+	{A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
+	{A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
+	{A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+	{A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+	{A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+	{A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
+	{A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+	{A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+	{A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+	{A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+	{A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+	{A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+	{A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+	{A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+	{A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+	{A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+};
+
+static const struct adreno_reglist a630_vbif_regs[] = {
+	{A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009},
+	{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A630, 6, 3, 0, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_IFPC |
 			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION |
 			ADRENO_IOCOHERENT | ADRENO_PREEMPTION,
-		.sqefw_name = "a630_sqe.fw",
-		.zap_name = "a630_zap",
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
 		.num_protected_regs = 0x20,
-		.busy_mask = 0xFFFFFFFE,
-		.gpmufw_name = "a630_gmu.bin",
-		.gpmu_major = 0x1,
-		.gpmu_minor = 0x003,
-		.gpmu_tsens = 0x000C000D,
-		.max_power = 5448,
-		.prim_fifo_threshold = 0x0018000,
-		.pdc_address_offset = 0x00030080,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
-	{
-		.gpurev = ADRENO_REV_A615,
-		.core = 6,
-		.major = 1,
-		.minor = 5,
-		.patchid = ANY_ID,
+	.prim_fifo_threshold = 0x0018000,
+	.pdc_address_offset = 0x00030080,
+	.gmu_major = 1,
+	.gmu_minor = 3,
+	.sqefw_name = "a630_sqe.fw",
+	.gmufw_name = "a630_gmu.bin",
+	.zap_name = "a630_zap",
+	.hwcg = a630_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a630_hwcg_regs),
+	.vbif = a630_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a630_vbif_regs),
+};
+
+/* For a615, a616 and a618 */
+static const struct adreno_reglist a615_hwcg_regs[] = {
+	{A6XX_RBBM_CLOCK_CNTL_SP0,  0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+	{A6XX_RBBM_CLOCK_HYST_SP0,  0x0000F3CF},
+	{A6XX_RBBM_CLOCK_CNTL_TP0,  0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL_TP1,  0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
+	{A6XX_RBBM_CLOCK_HYST_TP0,  0x77777777},
+	{A6XX_RBBM_CLOCK_HYST_TP1,  0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+	{A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
+	{A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
+	{A6XX_RBBM_CLOCK_CNTL_UCHE,  0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+	{A6XX_RBBM_CLOCK_HYST_UCHE,  0x00000004},
+	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+	{A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
+	{A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+	{A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+	{A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+	{A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
+	{A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+	{A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+	{A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+	{A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+	{A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+	{A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+	{A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+	{A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+	{A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+	{A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
+};
+
+/* For a615, a616 and a618 */
+static const struct adreno_reglist a615_gbif_regs[] = {
+	{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a615 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A615, 6, 1, 5, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_PREEMPTION |
 			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC |
 			ADRENO_IOCOHERENT,
-		.sqefw_name = "a630_sqe.fw",
-		.zap_name = "a615_zap",
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_512K,
 		.num_protected_regs = 0x20,
-		.busy_mask = 0xFFFFFFFE,
-		.gpmufw_name = "a630_gmu.bin",
-		.gpmu_major = 0x1,
-		.gpmu_minor = 0x003,
-		.prim_fifo_threshold = 0x0018000,
-		.pdc_address_offset = 0x00030080,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
-	{
-		.gpurev = ADRENO_REV_A618,
-		.core = 6,
-		.major = 1,
-		.minor = 8,
-		.patchid = ANY_ID,
+	.prim_fifo_threshold = 0x0018000,
+	.pdc_address_offset = 0x00030080,
+	.gmu_major = 1,
+	.gmu_minor = 3,
+	.sqefw_name = "a630_sqe.fw",
+	.gmufw_name = "a630_gmu.bin",
+	.zap_name = "a615_zap",
+	.hwcg = a615_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
+	.vbif = a615_gbif_regs,
+	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A618, 6, 1, 8, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_PREEMPTION |
 			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC |
 			ADRENO_IOCOHERENT,
-		.sqefw_name = "a630_sqe.fw",
-		.zap_name = "a615_zap",
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_512K,
 		.num_protected_regs = 0x20,
-		.busy_mask = 0xFFFFFFFE,
-		.gpmufw_name = "a630_gmu.bin",
-		.gpmu_major = 0x1,
-		.gpmu_minor = 0x007,
-		.prim_fifo_threshold = 0x0018000,
-		.pdc_address_offset = 0x00030090,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
-	{
-		.gpurev = ADRENO_REV_A620,
-		.core = 6,
-		.major = 2,
-		.minor = 0,
-		.patchid = 0,
+	.prim_fifo_threshold = 0x0018000,
+	.pdc_address_offset = 0x00030090,
+	.gmu_major = 1,
+	.gmu_minor = 7,
+	.sqefw_name = "a630_sqe.fw",
+	.gmufw_name = "a630_gmu.bin",
+	.zap_name = "a615_zap",
+	.hwcg = a615_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
+	.vbif = a615_gbif_regs,
+	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
+};
+
+static const struct adreno_reglist a620_hwcg_regs[] = {
+	{A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+	{A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+	{A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+	{A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+	{A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+	{A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+	{A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+	{A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+	{A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+	{A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+	{A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+	{A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+	{A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+	{A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+	{A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+	{A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
+	{A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+	{A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+	{A6XX_RBBM_ISDB_CNT, 0x00000182},
+	{A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+	{A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+	{A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+	{A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+};
+
+/* a620 and a650 */
+static const struct adreno_reglist a650_gbif_regs[] = {
+	{A6XX_GBIF_QSB_SIDE0, 0x00071620},
+	{A6XX_GBIF_QSB_SIDE1, 0x00071620},
+	{A6XX_GBIF_QSB_SIDE2, 0x00071620},
+	{A6XX_GBIF_QSB_SIDE3, 0x00071620},
+	{A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3},
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A620, 6, 2, 0, 0),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
 			ADRENO_CONTENT_PROTECTION | ADRENO_IOCOHERENT |
 			ADRENO_IFPC,
-		.sqefw_name = "a650_sqe.fw",
-		.zap_name = "a620_zap",
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0,
 		.gmem_size = SZ_512K,
 		.num_protected_regs = 0x30,
-		.busy_mask = 0xFFFFFFFE,
-		.gpmufw_name = "a650_gmu.bin",
-		.gpmu_major = 0x2,
-		.gpmu_minor = 0x000,
-		.gpmu_tsens = 0x000C000D,
-		.max_power = 5448,
-		.prim_fifo_threshold = 0x100000,
-		.pdc_address_offset = 0x000300A0,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
-	{
-		.gpurev = ADRENO_REV_A640,
-		.core = 6,
-		.major = 4,
-		.minor = 0,
-		.patchid = ANY_ID,
+	.prim_fifo_threshold = 0x0010000,
+	.pdc_address_offset = 0x000300a0,
+	.gmu_major = 2,
+	.gmu_minor = 0,
+	.sqefw_name = "a650_sqe.fw",
+	.gmufw_name = "a650_gmu.bin",
+	.zap_name = "a620_zap",
+	.hwcg = a620_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a620_hwcg_regs),
+	.vbif = a650_gbif_regs,
+	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
+	.veto_fal10 = true,
+};
+
+static const struct adreno_reglist a640_hwcg_regs[] = {
+	{A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+	{A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+	{A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+	{A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+	{A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+	{A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+	{A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+	{A6XX_RBBM_CLOCK_CNTL_RAC, 0x05222022},
+	{A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+	{A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+	{A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+	{A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+	{A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+	{A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+	{A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+	{A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
+	{A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+	{A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+	{A6XX_RBBM_ISDB_CNT, 0x00000182},
+	{A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+	{A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+	{A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+	{A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+};
+
+/* These apply to a640, a680 and a612 */
+static const struct adreno_reglist a640_vbif_regs[] = {
+	{A6XX_GBIF_QSB_SIDE0, 0x00071620},
+	{A6XX_GBIF_QSB_SIDE1, 0x00071620},
+	{A6XX_GBIF_QSB_SIDE2, 0x00071620},
+	{A6XX_GBIF_QSB_SIDE3, 0x00071620},
+	{A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3},
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a640 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A640, 6, 4, 0, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
 			ADRENO_CONTENT_PROTECTION | ADRENO_IOCOHERENT |
 			ADRENO_IFPC | ADRENO_PREEMPTION,
-		.sqefw_name = "a630_sqe.fw",
-		.zap_name = "a640_zap",
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M, //Verified 1MB
 		.num_protected_regs = 0x20,
-		.busy_mask = 0xFFFFFFFE,
-		.gpmufw_name = "a640_gmu.bin",
-		.gpmu_major = 0x2,
-		.gpmu_minor = 0x000,
-		.gpmu_tsens = 0x000C000D,
-		.max_power = 5448,
-		.prim_fifo_threshold = 0x00200000,
-		.pdc_address_offset = 0x00030090,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
+	.prim_fifo_threshold = 0x00200000,
+	.pdc_address_offset = 0x00030090,
+	.gmu_major = 2,
+	.gmu_minor = 0,
+	.sqefw_name = "a630_sqe.fw",
+	.gmufw_name = "a640_gmu.bin",
+	.zap_name = "a640_zap",
+	.hwcg = a640_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a640_hwcg_regs),
+	.vbif = a640_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+};
+
+static const struct adreno_reglist a650_hwcg_regs[] = {
+	{A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+	{A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+	{A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+	{A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+	{A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+	{A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+	{A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+	{A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+	{A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+	{A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+	{A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+	{A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+	{A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+	{A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+	{A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+	{A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
+	{A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+	{A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+	{A6XX_RBBM_ISDB_CNT, 0x00000182},
+	{A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+	{A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+	{A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+	{A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
 	{
-		.gpurev = ADRENO_REV_A650,
-		.core = 6,
-		.major = 5,
-		.minor = 0,
-		.patchid = 0,
+		DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
 			ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
 			ADRENO_IFPC,
-		.sqefw_name = "a650_sqe.fw",
-		.zap_name = "a650_zap",
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0,
 		.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
 		.num_protected_regs = 0x30,
-		.busy_mask = 0xFFFFFFFE,
-		.gpmufw_name = "a650_gmu.bin",
-		.gpmu_major = 0x2,
-		.gpmu_minor = 0x000,
-		.gpmu_tsens = 0x000C000D,
-		.max_power = 5448,
-		.prim_fifo_threshold = 0x00300000,
-		.pdc_address_offset = 0x000300A0,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
-	{
-		.gpurev = ADRENO_REV_A680,
-		.core = 6,
-		.major = 8,
-		.minor = 0,
-		.patchid = ANY_ID,
+	.prim_fifo_threshold = 0x00300000,
+	.pdc_address_offset = 0x000300A0,
+	.gmu_major = 2,
+	.gmu_minor = 0,
+	.sqefw_name = "a650_sqe.fw",
+	.gmufw_name = "a650_gmu.bin",
+	.zap_name = "a650_zap",
+	.vbif = a650_gbif_regs,
+	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
+	.veto_fal10 = true,
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
+	.base = {
+		DEFINE_ADRENO_REV(ADRENO_REV_A680, 6, 8, 0, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU,
-		.sqefw_name = "a630_sqe.fw",
-		.zap_name = "a640_zap",
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_2M,
 		.num_protected_regs = 0x20,
-		.busy_mask = 0xFFFFFFFE,
-		.gpmufw_name = "a640_gmu.bin",
-		.gpmu_major = 0x2,
-		.gpmu_minor = 0x000,
-		.gpmu_tsens = 0x000C000D,
-		.max_power = 5448,
-		.prim_fifo_threshold = 0x00400000,
-		.pdc_address_offset = 0x00030090,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
+	.prim_fifo_threshold = 0x00400000,
+	.pdc_address_offset = 0x00030090,
+	.gmu_major = 2,
+	.gmu_minor = 0,
+	.sqefw_name = "a630_sqe.fw",
+	.gmufw_name = "a640_gmu.bin",
+	.zap_name = "a640_zap",
+	.hwcg = a640_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a640_hwcg_regs),
+	.vbif = a640_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+};
+
+static const struct adreno_reglist a612_hwcg_regs[] = {
+	{A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x0000F3CF},
+	{A6XX_RBBM_CLOCK_HYST_SP0, 0x00000081},
+	{A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+	{A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+	{A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+	{A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01202222},
+	{A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+	{A6XX_RBBM_CLOCK_CNTL_RAC, 0x05522022},
+	{A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+	{A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+	{A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+	{A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+	{A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+	{A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+	{A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+	{A6XX_RBBM_ISDB_CNT, 0x00000182},
+	{A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+	{A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+	{A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+	{A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
 	{
-		.gpurev = ADRENO_REV_A612,
-		.core = 6,
-		.major = 1,
-		.minor = 2,
-		.patchid = ANY_ID,
+		DEFINE_ADRENO_REV(ADRENO_REV_A612, 6, 1, 2, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION |
 			ADRENO_IOCOHERENT | ADRENO_PREEMPTION | ADRENO_GPMU |
 			ADRENO_IFPC | ADRENO_PERFCTRL_RETAIN,
-		.sqefw_name = "a630_sqe.fw",
-		.zap_name = "a612_zap",
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_4K),
 		.num_protected_regs = 0x20,
-		.busy_mask = 0xFFFFFFFE,
-		.gpmufw_name = "a612_rgmu.bin",
-		.prim_fifo_threshold = 0x00080000,
-		.pdc_address_offset = 0x00030080,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
+	.prim_fifo_threshold = 0x00080000,
+	.pdc_address_offset = 0x00030080,
+	.sqefw_name = "a630_sqe.fw",
+	.gmufw_name = "a612_rgmu.bin",
+	.zap_name = "a612_zap",
+	.hwcg = a612_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a612_hwcg_regs),
+	.vbif = a640_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
 	{
-		.gpurev = ADRENO_REV_A616,
-		.core = 6,
-		.major = 1,
-		.minor = 6,
-		.patchid = ANY_ID,
+		DEFINE_ADRENO_REV(ADRENO_REV_A616, 6, 1, 6, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_PREEMPTION |
 			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC |
 			ADRENO_IOCOHERENT,
-		.sqefw_name = "a630_sqe.fw",
-		.zap_name = "a615_zap",
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_512K,
 		.num_protected_regs = 0x20,
-		.busy_mask = 0xFFFFFFFE,
-		.gpmufw_name = "a630_gmu.bin",
-		.gpmu_major = 0x1,
-		.gpmu_minor = 0x003,
-		.prim_fifo_threshold = 0x0018000,
-		.pdc_address_offset = 0x00030080,
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
 	},
+	.prim_fifo_threshold = 0x0018000,
+	.pdc_address_offset = 0x00030080,
+	.gmu_major = 1,
+	.gmu_minor = 3,
+	.sqefw_name = "a630_sqe.fw",
+	.gmufw_name = "a630_gmu.bin",
+	.zap_name = "a615_zap",
+	.hwcg = a615_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
+	.vbif = a615_gbif_regs,
+	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
+};
+
+static const struct adreno_gpu_core *adreno_gpulist[] = {
+	&adreno_gpu_core_a306.base,
+	&adreno_gpu_core_a306a.base,
+	&adreno_gpu_core_a304.base,
+	&adreno_gpu_core_a405,		/* Deprecated */
+	&adreno_gpu_core_a418,		/* Deprecated */
+	&adreno_gpu_core_a420,		/* Deprecated */
+	&adreno_gpu_core_a430,		/* Deprecated */
+	&adreno_gpu_core_a530v1,	/* Deprecated */
+	&adreno_gpu_core_a530v2.base,
+	&adreno_gpu_core_a530v3.base,
+	&adreno_gpu_core_a505.base,
+	&adreno_gpu_core_a506.base,
+	&adreno_gpu_core_a510.base,
+	&adreno_gpu_core_a540v1,	/* Deprecated */
+	&adreno_gpu_core_a540v2.base,
+	&adreno_gpu_core_a512.base,
+	&adreno_gpu_core_a508.base,
+	&adreno_gpu_core_a630v1,	/* Deprecated */
+	&adreno_gpu_core_a630v2.base,
+	&adreno_gpu_core_a615.base,
+	&adreno_gpu_core_a618.base,
+	&adreno_gpu_core_a620.base,
+	&adreno_gpu_core_a640.base,
+	&adreno_gpu_core_a650.base,
+	&adreno_gpu_core_a680.base,
+	&adreno_gpu_core_a612.base,
+	&adreno_gpu_core_a616.base,
 };
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 08d715d..08e3b18 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2,41 +2,30 @@
 /*
  * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
-#include <linux/module.h>
-#include <linux/uaccess.h>
-#include <linux/sched.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_fdt.h>
 #include <linux/delay.h>
 #include <linux/input.h>
 #include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_fdt.h>
+#include <linux/module.h>
+#include <linux/msm_kgsl.h>
+#include <linux/regulator/consumer.h>
+#include <linux/nvmem-consumer.h>
 #include <soc/qcom/scm.h>
 
-#include <linux/msm-bus-board.h>
-#include <linux/msm-bus.h>
-
-#include "kgsl.h"
-#include "kgsl_gmu_core.h"
-#include "kgsl_pwrscale.h"
-#include "kgsl_sharedmem.h"
-#include "kgsl_iommu.h"
-#include "kgsl_trace.h"
-#include "adreno_llc.h"
-
 #include "adreno.h"
-#include "adreno_iommu.h"
+#include "adreno_a3xx.h"
+#include "adreno_a5xx.h"
+#include "adreno_a6xx.h"
 #include "adreno_compat.h"
-#include "adreno_pm4types.h"
+#include "adreno_iommu.h"
+#include "adreno_llc.h"
 #include "adreno_trace.h"
-
-#include "a3xx_reg.h"
-#include "a6xx_reg.h"
-#include "adreno_snapshot.h"
+#include "kgsl_trace.h"
 
 /* Include the master list of GPU cores that are supported */
 #include "adreno-gpulist.h"
-#include "adreno_dispatch.h"
 
 static void adreno_input_work(struct work_struct *work);
 static unsigned int counter_delta(struct kgsl_device *device,
@@ -106,6 +95,16 @@ int adreno_wake_nice = -7;
 /* Number of milliseconds to stay active active after a wake on touch */
 unsigned int adreno_wake_timeout = 100;
 
+void adreno_reglist_write(struct adreno_device *adreno_dev,
+		const struct adreno_reglist *list, u32 count)
+{
+	int i;
+
+	for (i = 0; list && i < count; i++)
+		kgsl_regwrite(KGSL_DEVICE(adreno_dev),
+			list[i].offset, list[i].value);
+}
+
 /**
  * adreno_readreg64() - Read a 64bit register by getting its offset from the
  * offset array defined in gpudev node
@@ -684,11 +683,11 @@ static inline const struct adreno_gpu_core *_get_gpu_core(unsigned int chipid)
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(adreno_gpulist); i++) {
-		if (core == adreno_gpulist[i].core &&
-		    _rev_match(major, adreno_gpulist[i].major) &&
-		    _rev_match(minor, adreno_gpulist[i].minor) &&
-		    _rev_match(patchid, adreno_gpulist[i].patchid))
-			return &adreno_gpulist[i];
+		if (core == adreno_gpulist[i]->core &&
+		    _rev_match(major, adreno_gpulist[i]->major) &&
+		    _rev_match(minor, adreno_gpulist[i]->minor) &&
+		    _rev_match(patchid, adreno_gpulist[i]->patchid))
+			return adreno_gpulist[i];
 	}
 
 	return NULL;
@@ -1285,14 +1284,51 @@ static bool adreno_is_gpu_disabled(struct adreno_device *adreno_dev)
 			pte_row0_msb[1] ? true : false;
 }
 
+static int adreno_read_speed_bin(struct platform_device *pdev,
+		struct adreno_device *adreno_dev)
+{
+	struct nvmem_cell *cell = nvmem_cell_get(&pdev->dev, "speed_bin");
+	int ret = PTR_ERR_OR_ZERO(cell);
+	void *buf;
+	size_t len;
+
+	if (ret) {
+		/*
+		 * If the cell isn't defined enabled, then revert to
+		 * using the default bin
+		 */
+		if (ret == -ENOENT)
+			return 0;
+
+		return ret;
+	}
+
+	buf = nvmem_cell_read(cell, &len);
+	if (!IS_ERR(buf)) {
+		memcpy(&adreno_dev->speed_bin, buf,
+				min(len, sizeof(adreno_dev->speed_bin)));
+		kfree(buf);
+	}
+
+	nvmem_cell_put(cell);
+
+	return 0;
+}
+
 static int adreno_probe(struct platform_device *pdev)
 {
-	const struct of_device_id *of_id =
-		of_match_device(adreno_match_table, &pdev->dev);
-	struct adreno_device *adreno_dev = (struct adreno_device *) of_id->data;
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	const struct of_device_id *of_id;
+	struct adreno_device *adreno_dev;
+	struct kgsl_device *device;
 	int status;
 
+	of_id = of_match_device(adreno_match_table, &pdev->dev);
+	if (!of_id)
+		return -EINVAL;
+
+	adreno_dev = (struct adreno_device *) of_id->data;
+	device = KGSL_DEVICE(adreno_dev);
+
 	device->pdev = pdev;
 
 	if (adreno_is_gpu_disabled(adreno_dev)) {
@@ -1305,6 +1341,10 @@ static int adreno_probe(struct platform_device *pdev)
 
 	adreno_update_soc_hw_revision_quirks(adreno_dev, pdev);
 
+	status = adreno_read_speed_bin(pdev, adreno_dev);
+	if (status)
+		return status;
+
 	/* Get the chip ID from the DT and set up target specific parameters */
 	if (adreno_identify_gpu(adreno_dev))
 		return -ENODEV;
@@ -1334,6 +1374,8 @@ static int adreno_probe(struct platform_device *pdev)
 	if (adreno_support_64bit(adreno_dev))
 		device->mmu.features |= KGSL_MMU_64BIT;
 
+	device->pwrctrl.bus_width = adreno_dev->gpucore->bus_width;
+
 	status = kgsl_device_platform_probe(device);
 	if (status) {
 		device->pdev = NULL;
@@ -1446,11 +1488,18 @@ static void _adreno_free_memories(struct adreno_device *adreno_dev)
 
 static int adreno_remove(struct platform_device *pdev)
 {
-	const struct of_device_id *of_id =
-		of_match_device(adreno_match_table, &pdev->dev);
-	struct adreno_device *adreno_dev = (struct adreno_device *) of_id->data;
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	const struct of_device_id *of_id;
+	struct adreno_device *adreno_dev;
+	struct kgsl_device *device;
+	struct adreno_gpudev *gpudev;
+
+	of_id = of_match_device(adreno_match_table, &pdev->dev);
+	if (!of_id)
+		return -EINVAL;
+
+	adreno_dev = (struct adreno_device *) of_id->data;
+	device = KGSL_DEVICE(adreno_dev);
+	gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 
 	if (gpudev->remove != NULL)
 		gpudev->remove(adreno_dev);
@@ -1629,23 +1678,6 @@ static int adreno_init(struct kgsl_device *device)
 
 	set_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
 
-	/* Use shader offset and length defined in gpudev */
-	if (adreno_dev->gpucore->shader_offset &&
-					adreno_dev->gpucore->shader_size) {
-
-		if (device->shader_mem_phys || device->shader_mem_virt)
-			dev_err(device->dev,
-				     "Shader memory already specified in device tree\n");
-		else {
-			device->shader_mem_phys = device->reg_phys +
-					adreno_dev->gpucore->shader_offset;
-			device->shader_mem_virt = device->reg_virt +
-					adreno_dev->gpucore->shader_offset;
-			device->shader_mem_len =
-					adreno_dev->gpucore->shader_size;
-		}
-	}
-
 	/*
 	 * Allocate a small chunk of memory for precise drawobj profiling for
 	 * those targets that have the always on timer
@@ -1668,16 +1700,6 @@ static int adreno_init(struct kgsl_device *device)
 
 	}
 
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION)) {
-		int r = 0;
-
-		if (gpudev->preemption_init)
-			r = gpudev->preemption_init(adreno_dev);
-
-		if (!WARN(r, "adreno: GPU preemption is disabled"))
-			set_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
-	}
-
 	return 0;
 }
 
@@ -2211,8 +2233,9 @@ int adreno_reset(struct kgsl_device *device, int fault)
 		if (ret == 0) {
 			ret = adreno_soft_reset(device);
 			if (ret)
-				dev_err_once(device->dev,
-					"Device soft reset failed\n");
+				dev_err(device->dev,
+					"Device soft reset failed: ret=%d\n",
+					ret);
 		}
 	}
 	if (ret) {
@@ -2365,23 +2388,29 @@ static int adreno_prop_ucode_version(struct kgsl_device *device,
 	return copy_prop(param, &ucode, sizeof(ucode));
 }
 
-static int adreno_prop_gpmu_version(struct kgsl_device *device,
+static int adreno_prop_gaming_bin(struct kgsl_device *device,
 		struct kgsl_device_getproperty *param)
 {
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct kgsl_gpmu_version gpmu = { 0 };
+	void *buf;
+	size_t len;
+	int ret;
+	struct nvmem_cell *cell;
 
-	if (!adreno_dev->gpucore)
+	cell = nvmem_cell_get(&device->pdev->dev, "gaming_bin");
+	if (IS_ERR(cell))
 		return -EINVAL;
 
-	if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
-		return -EOPNOTSUPP;
+	buf = nvmem_cell_read(cell, &len);
+	nvmem_cell_put(cell);
 
-	gpmu.major = adreno_dev->gpucore->gpmu_major;
-	gpmu.minor = adreno_dev->gpucore->gpmu_minor;
-	gpmu.features = adreno_dev->gpucore->gpmu_features;
+	if (!IS_ERR(buf)) {
+		ret = copy_prop(param, buf, len);
+		kfree(buf);
+		return ret;
+	}
 
-	return copy_prop(param, &gpmu, sizeof(gpmu));
+	dev_err(device->dev, "failed to read gaming_bin nvmem cell\n");
+	return -EINVAL;
 }
 
 static int adreno_prop_u32(struct kgsl_device *device,
@@ -2421,12 +2450,12 @@ static const struct {
 	{ KGSL_PROP_INTERRUPT_WAITS, adreno_prop_s32 },
 	{ KGSL_PROP_UCHE_GMEM_VADDR, adreno_prop_uche_gmem_addr },
 	{ KGSL_PROP_UCODE_VERSION, adreno_prop_ucode_version },
-	{ KGSL_PROP_GPMU_VERSION, adreno_prop_gpmu_version },
 	{ KGSL_PROP_HIGHEST_BANK_BIT, adreno_prop_u32 },
 	{ KGSL_PROP_MIN_ACCESS_LENGTH, adreno_prop_u32 },
 	{ KGSL_PROP_UBWC_MODE, adreno_prop_u32 },
 	{ KGSL_PROP_DEVICE_BITNESS, adreno_prop_u32 },
 	{ KGSL_PROP_SPEED_BIN, adreno_prop_u32 },
+	{ KGSL_PROP_GAMING_BIN, adreno_prop_gaming_bin },
 };
 
 static int adreno_getproperty(struct kgsl_device *device,
@@ -2442,6 +2471,20 @@ static int adreno_getproperty(struct kgsl_device *device,
 	return -ENODEV;
 }
 
+static int adreno_query_property_list(struct kgsl_device *device, u32 *list,
+		u32 count)
+{
+	int i;
+
+	if (!list)
+		return ARRAY_SIZE(adreno_property_funcs);
+
+	for (i = 0; i < count && i < ARRAY_SIZE(adreno_property_funcs); i++)
+		list[i] = adreno_property_funcs[i].type;
+
+	return i;
+}
+
 int adreno_set_constraint(struct kgsl_device *device,
 				struct kgsl_context *context,
 				struct kgsl_device_constraint *constraint)
@@ -2966,6 +3009,53 @@ static void adreno_read(struct kgsl_device *device, void __iomem *base,
 	rmb();
 }
 
+static void adreno_retry_rbbm_read(struct kgsl_device *device,
+		void __iomem *base, unsigned int offsetwords,
+		unsigned int *value, unsigned int mem_len)
+{
+	int i;
+	void __iomem *reg;
+
+	/* Make sure we're not reading from invalid memory */
+	if (WARN(offsetwords * sizeof(uint32_t) >= mem_len,
+		"Out of bounds register read: 0x%x/0x%x\n",
+		offsetwords, mem_len >> 2))
+		return;
+
+	reg = (base + (offsetwords << 2));
+
+	/*
+	 * If 0xdeafbead was transient, second read is expected to return the
+	 * actual register value. However, if a register value is indeed
+	 * 0xdeafbead, read it enough times to guarantee that.
+	 */
+	for (i = 0; i < 16; i++) {
+		*value = readl_relaxed(reg);
+		/*
+		 * Read barrier needed so that register is read from hardware
+		 * every iteration
+		 */
+		rmb();
+
+		if (*value != 0xdeafbead)
+			return;
+	}
+}
+
+static bool adreno_is_rbbm_batch_reg(struct kgsl_device *device,
+	unsigned int offsetwords)
+{
+	if (adreno_is_a650(ADRENO_DEVICE(device)) ||
+		adreno_is_a620v1(ADRENO_DEVICE(device))) {
+		if (((offsetwords > 0x0) && (offsetwords < 0x3FF)) ||
+			((offsetwords > 0x4FA) && (offsetwords < 0x53F)) ||
+			((offsetwords > 0x556) && (offsetwords < 0x5FF)) ||
+			((offsetwords > 0xF400) && (offsetwords < 0xFFFF)))
+			return  true;
+	}
+	return false;
+}
+
 /**
  * adreno_regread - Used to read adreno device registers
  * @offsetwords - Word (4 Bytes) offset to the register to be read
@@ -2976,6 +3066,11 @@ static void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
 {
 	adreno_read(device, device->reg_virt, offsetwords, value,
 						device->reg_len);
+
+	if ((*value == 0xdeafbead) &&
+		adreno_is_rbbm_batch_reg(device, offsetwords))
+		adreno_retry_rbbm_read(device, device->reg_virt, offsetwords,
+			value, device->reg_len);
 }
 
 /**
@@ -3470,7 +3565,7 @@ static void adreno_power_stats(struct kgsl_device *device,
 		if (gpudev->read_throttling_counters) {
 			adj = gpudev->read_throttling_counters(adreno_dev);
 			if (adj < 0 && -adj > gpu_busy)
-				adj = -gpu_busy;
+				adj = 0;
 
 			gpu_busy += adj;
 		}
@@ -3707,6 +3802,13 @@ static void adreno_gpu_model(struct kgsl_device *device, char *str,
 			 ADRENO_CHIPID_PATCH(adreno_dev->chipid) + 1);
 }
 
+static bool adreno_is_hwcg_on(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	return test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag);
+}
+
 static const struct kgsl_functable adreno_functable = {
 	/* Mandatory functions */
 	.regread = adreno_regread,
@@ -3750,6 +3852,8 @@ static const struct kgsl_functable adreno_functable = {
 	.stop_fault_timer = adreno_dispatcher_stop_fault_timer,
 	.dispatcher_halt = adreno_dispatcher_halt,
 	.dispatcher_unhalt = adreno_dispatcher_unhalt,
+	.query_property_list = adreno_query_property_list,
+	.is_hwcg_on = adreno_is_hwcg_on,
 };
 
 static struct platform_driver adreno_platform_driver = {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index a8ffc0f..84f95b7 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -5,17 +5,12 @@
 #ifndef __ADRENO_H
 #define __ADRENO_H
 
-#include "kgsl_device.h"
-#include "kgsl_sharedmem.h"
-#include "adreno_drawctxt.h"
-#include "adreno_ringbuffer.h"
-#include "adreno_profile.h"
 #include "adreno_dispatch.h"
-#include "kgsl_iommu.h"
+#include "adreno_drawctxt.h"
 #include "adreno_perfcounter.h"
-#include <linux/stat.h>
-#include <linux/delay.h>
-#include "kgsl_gmu_core.h"
+#include "adreno_profile.h"
+#include "adreno_ringbuffer.h"
+#include "kgsl_sharedmem.h"
 
 #define DEVICE_3D_NAME "kgsl-3d"
 #define DEVICE_3D0_NAME "kgsl-3d0"
@@ -337,6 +332,16 @@ struct adreno_device_private {
 };
 
 /**
+ * struct adreno_reglist - simple container for register offsets / values
+ */
+struct adreno_reglist {
+	/** @offset: Offset of the register */
+	u32 offset;
+	/** @value: Default value of the register to write */
+	u32 value;
+};
+
+/**
  * struct adreno_gpu_core - A specific GPU core definition
  * @gpurev: Unique GPU revision identifier
  * @core: Match for the core version of the GPU
@@ -344,54 +349,25 @@ struct adreno_device_private {
  * @minor: Match for the minor version of the GPU
  * @patchid: Match for the patch revision of the GPU
  * @features: Common adreno features supported by this core
- * @pm4fw_name: Filename for th PM4 firmware
- * @pfpfw_name: Filename for the PFP firmware
- * @zap_name: Filename for the Zap Shader ucode
  * @gpudev: Pointer to the GPU family specific functions for this core
  * @gmem_base: Base address of binning memory (GMEM/OCMEM)
  * @gmem_size: Amount of binning memory (GMEM/OCMEM) to reserve for the core
- * @shader_offset: Offset of shader from gpu reg base
- * @shader_size: Shader size
  * @num_protected_regs: number of protected registers
- * @gpmufw_name: Filename for the GPMU firmware
- * @gpmu_major: Match for the GPMU & firmware, major revision
- * @gpmu_minor: Match for the GPMU & firmware, minor revision
- * @gpmu_features: Supported features for any given GPMU version
  * @busy_mask: mask to check if GPU is busy in RBBM_STATUS
- * @lm_major: Limits Management register sequence, major revision
- * @lm_minor: LM register sequence, minor revision
- * @regfw_name: Filename for the register sequence firmware
- * @gpmu_tsens: ID for the temporature sensor used by the GPMU
- * @max_power: Max possible power draw of a core, units elephant tail hairs
+ * @bus_width: Bytes transferred in 1 cycle
  */
 struct adreno_gpu_core {
 	enum adreno_gpurev gpurev;
 	unsigned int core, major, minor, patchid;
 	unsigned long features;
-	const char *pm4fw_name;
-	const char *pfpfw_name;
-	const char *sqefw_name;
-	const char *zap_name;
 	struct adreno_gpudev *gpudev;
 	unsigned long gmem_base;
 	size_t gmem_size;
-	unsigned long shader_offset;
-	unsigned int shader_size;
 	unsigned int num_protected_regs;
-	const char *gpmufw_name;
-	unsigned int gpmu_major;
-	unsigned int gpmu_minor;
-	unsigned int gpmu_features;
 	unsigned int busy_mask;
-	unsigned int lm_major, lm_minor;
-	const char *regfw_name;
-	unsigned int gpmu_tsens;
-	unsigned int max_power;
-	unsigned int prim_fifo_threshold;
-	unsigned int pdc_address_offset;
+	u32 bus_width;
 };
 
-
 enum gpu_coresight_sources {
 	GPU_CORESIGHT_GX = 0,
 	GPU_CORESIGHT_CX = 1,
@@ -763,27 +739,6 @@ struct adreno_reg_offsets {
 #define ADRENO_INT_DEFINE(_offset, _val) ADRENO_REG_DEFINE(_offset, _val)
 
 /*
- * struct adreno_vbif_data - Describes vbif register value pair
- * @reg: Offset to vbif register
- * @val: The value that should be programmed in the register at reg
- */
-struct adreno_vbif_data {
-	unsigned int reg;
-	unsigned int val;
-};
-
-/*
- * struct adreno_vbif_platform - Holds an array of vbif reg value pairs
- * for a particular core
- * @devfunc: Pointer to platform/core identification function
- * @vbif: Array of reg value pairs for vbif registers
- */
-struct adreno_vbif_platform {
-	int (*devfunc)(struct adreno_device *adreno_dev);
-	const struct adreno_vbif_data *vbif;
-};
-
-/*
  * struct adreno_vbif_snapshot_registers - Holds an array of vbif registers
  * listed for snapshot dump for a particular core
  * @version: vbif version
@@ -958,6 +913,7 @@ struct adreno_gpudev {
 				struct adreno_device *adreno_dev,
 				unsigned int *cmds);
 	int (*preemption_init)(struct adreno_device *adreno_dev);
+	void (*preemption_close)(struct adreno_device *adreno_dev);
 	void (*preemption_schedule)(struct adreno_device *adreno_dev);
 	int (*preemption_context_init)(struct kgsl_context *context);
 	void (*preemption_context_destroy)(struct kgsl_context *context);
@@ -1253,6 +1209,12 @@ static inline int adreno_is_a650_family(struct adreno_device *adreno_dev)
 	return (rev == ADRENO_REV_A650 || rev == ADRENO_REV_A620);
 }
 
+static inline int adreno_is_a620v1(struct adreno_device *adreno_dev)
+{
+	return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A620) &&
+		(ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0);
+}
+
 static inline int adreno_is_a640v2(struct adreno_device *adreno_dev)
 {
 	return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A640) &&
@@ -1488,32 +1450,16 @@ static inline void adreno_put_gpu_halt(struct adreno_device *adreno_dev)
 }
 
 
-/*
- * adreno_vbif_start() - Program VBIF registers, called in device start
- * @adreno_dev: Pointer to device whose vbif data is to be programmed
- * @vbif_platforms: list register value pair of vbif for a family
- * of adreno cores
- * @num_platforms: Number of platforms contained in vbif_platforms
+/**
+ * adreno_reglist_write - Write each register in a reglist
+ * @adreno_dev: An Adreno GPU device handle
+ * @reglist: A list of &struct adreno_reglist items
+ * @count: Number of items in @reglist
+ *
+ * Write each register listed in @reglist.
  */
-static inline void adreno_vbif_start(struct adreno_device *adreno_dev,
-			const struct adreno_vbif_platform *vbif_platforms,
-			int num_platforms)
-{
-	int i;
-	const struct adreno_vbif_data *vbif = NULL;
-
-	for (i = 0; i < num_platforms; i++) {
-		if (vbif_platforms[i].devfunc(adreno_dev)) {
-			vbif = vbif_platforms[i].vbif;
-			break;
-		}
-	}
-
-	while ((vbif != NULL) && (vbif->reg != 0)) {
-		kgsl_regwrite(KGSL_DEVICE(adreno_dev), vbif->reg, vbif->val);
-		vbif++;
-	}
-}
+void adreno_reglist_write(struct adreno_device *adreno_dev,
+		const struct adreno_reglist *list, u32 count);
 
 /**
  * adreno_set_protected_registers() - Protect the specified range of registers
@@ -1878,7 +1824,8 @@ static inline int adreno_wait_for_halt_ack(struct kgsl_device *device,
 			break;
 		if (time_after(jiffies, wait_for_vbif)) {
 			dev_err(device->dev,
-				"Wait limit reached for GBIF/VBIF Halt\n");
+				"GBIF/VBIF Halt ack timeout: reg=%08X mask=%08X status=%08X\n",
+				ack_reg, mask, val);
 			ret = -ETIMEDOUT;
 			break;
 		}
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index eff3c01..3456c14 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -3,22 +3,17 @@
  * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/firmware.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/msm_kgsl.h>
 #include <linux/clk/qcom.h>
+#include <linux/firmware.h>
+#include <linux/of.h>
+#include <linux/slab.h>
 
-#include "kgsl.h"
 #include "adreno.h"
-#include "kgsl_sharedmem.h"
-#include "a3xx_reg.h"
-#include "adreno_a3xx.h"
 #include "adreno_cp_parser.h"
-#include "adreno_trace.h"
+#include "adreno_a3xx.h"
 #include "adreno_pm4types.h"
-#include "adreno_perfcounter.h"
 #include "adreno_snapshot.h"
+#include "adreno_trace.h"
 
 /*
  * Define registers for a3xx that contain addresses used by the
@@ -831,32 +826,6 @@ static struct adreno_irq a3xx_irq = {
 	.mask = A3XX_INT_MASK,
 };
 
-/* VBIF registers start after 0x3000 so use 0x0 as end of list marker */
-static const struct adreno_vbif_data a304_vbif[] = {
-	{ A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
-	{0, 0},
-};
-
-static const struct adreno_vbif_data a306_vbif[] = {
-	{ A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
-	{ A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000A },
-	{ A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000A },
-	{0, 0},
-};
-
-static const struct adreno_vbif_data a306a_vbif[] = {
-	{ A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
-	{ A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000010 },
-	{ A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000010 },
-	{0, 0},
-};
-
-static const struct adreno_vbif_platform a3xx_vbif_platforms[] = {
-	{ adreno_is_a304, a304_vbif },
-	{ adreno_is_a306, a306_vbif },
-	{ adreno_is_a306a, a306a_vbif },
-};
-
 /*
  * Define the available perfcounter groups - these get used by
  * adreno_perfcounter_get and adreno_perfcounter_put
@@ -1157,9 +1126,11 @@ static void a3xx_protect_init(struct adreno_device *adreno_dev)
 static void a3xx_start(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	const struct adreno_a3xx_core *a3xx_core = to_a3xx_core(adreno_dev);
 
-	adreno_vbif_start(adreno_dev, a3xx_vbif_platforms,
-			ARRAY_SIZE(a3xx_vbif_platforms));
+	/* Set up VBIF registers from the GPU core definition */
+	adreno_reglist_write(adreno_dev, a3xx_core->vbif,
+		a3xx_core->vbif_count);
 
 	/* Make all blocks contribute to the GPU BUSY perf counter */
 	kgsl_regwrite(device, A3XX_RBBM_GPU_BUSY_MASKED, 0xFFFFFFFF);
@@ -1394,17 +1365,18 @@ static int a3xx_microcode_read(struct adreno_device *adreno_dev)
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_firmware *pm4_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PM4);
 	struct adreno_firmware *pfp_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PFP);
+	const struct adreno_a3xx_core *a3xx_core = to_a3xx_core(adreno_dev);
 
 	if (pm4_fw->fwvirt == NULL) {
 		int len;
 		void *ptr;
 
 		int ret = _load_firmware(device,
-			adreno_dev->gpucore->pm4fw_name, &ptr, &len);
+			a3xx_core->pm4fw_name, &ptr, &len);
 
 		if (ret) {
 			dev_err(device->dev,  "Failed to read pm4 ucode %s\n",
-				adreno_dev->gpucore->pm4fw_name);
+				a3xx_core->pm4fw_name);
 			return ret;
 		}
 
@@ -1427,10 +1399,10 @@ static int a3xx_microcode_read(struct adreno_device *adreno_dev)
 		void *ptr;
 
 		int ret = _load_firmware(device,
-			adreno_dev->gpucore->pfpfw_name, &ptr, &len);
+			a3xx_core->pfpfw_name, &ptr, &len);
 		if (ret) {
 			dev_err(device->dev, "Failed to read pfp ucode %s\n",
-					   adreno_dev->gpucore->pfpfw_name);
+					   a3xx_core->pfpfw_name);
 			return ret;
 		}
 
diff --git a/drivers/gpu/msm/adreno_a3xx.h b/drivers/gpu/msm/adreno_a3xx.h
index 4fc1cd0..29240b5 100644
--- a/drivers/gpu/msm/adreno_a3xx.h
+++ b/drivers/gpu/msm/adreno_a3xx.h
@@ -6,6 +6,21 @@
 #define __A3XX_H
 
 #include "a3xx_reg.h"
+/**
+ * struct adreno_a3xx_core - a3xx specific GPU core definitions
+ */
+struct adreno_a3xx_core {
+	/** @base: Container for the generic &struct adreno_gpu_core */
+	struct adreno_gpu_core base;
+	/** pm4fw_name: Name of the PM4 microcode file */
+	const char *pm4fw_name;
+	/** pfpfw_name: Name of the PFP microcode file */
+	const char *pfpfw_name;
+	/** @vbif: List of registers and values to write for VBIF */
+	const struct adreno_reglist *vbif;
+	/** @vbif_count: Number of registers in @vbif */
+	u32 vbif_count;
+};
 
 #define A3XX_IRQ_FLAGS \
 	{ BIT(A3XX_INT_RBBM_GPU_IDLE), "RBBM_GPU_IDLE" }, \
@@ -33,6 +48,21 @@
 	{ BIT(A3XX_INT_MISC_HANG_DETECT), "MISC_HANG_DETECT" }, \
 	{ BIT(A3XX_INT_UCHE_OOB_ACCESS), "UCHE_OOB_ACCESS" }
 
+/**
+ * to_a3xx_core - return the a3xx specific GPU core struct
+ * @adreno_dev: An Adreno GPU device handle
+ *
+ * Returns:
+ * A pointer to the a3xx specific GPU core struct
+ */
+static inline const struct adreno_a3xx_core *
+to_a3xx_core(struct adreno_device *adreno_dev)
+{
+	const struct adreno_gpu_core *core = adreno_dev->gpucore;
+
+	return container_of(core, struct adreno_a3xx_core, base);
+}
+
 unsigned int a3xx_irq_pending(struct adreno_device *adreno_dev);
 
 void a3xx_snapshot(struct adreno_device *adreno_dev,
diff --git a/drivers/gpu/msm/adreno_a3xx_snapshot.c b/drivers/gpu/msm/adreno_a3xx_snapshot.c
index 5da3020..e34be4c 100644
--- a/drivers/gpu/msm/adreno_a3xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a3xx_snapshot.c
@@ -4,12 +4,11 @@
  */
 
 #include <linux/io.h>
-#include "kgsl.h"
+
 #include "adreno.h"
-#include "kgsl_snapshot.h"
-#include "a3xx_reg.h"
-#include "adreno_snapshot.h"
 #include "adreno_a3xx.h"
+#include "adreno_snapshot.h"
+#include "kgsl_device.h"
 
 /*
  * Set of registers to dump for A3XX on snapshot.
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 3f297ca..a0a23d7 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -3,24 +3,20 @@
  * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/firmware.h>
-#include <soc/qcom/subsystem_restart.h>
-#include <soc/qcom/scm.h>
-#include <linux/pm_opp.h>
 #include <linux/clk/qcom.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/subsystem_restart.h>
 
 #include "adreno.h"
-#include "a5xx_reg.h"
 #include "adreno_a5xx.h"
-#include "adreno_cp_parser.h"
-#include "adreno_trace.h"
-#include "adreno_pm4types.h"
-#include "adreno_perfcounter.h"
-#include "adreno_ringbuffer.h"
-#include "kgsl_sharedmem.h"
-#include "kgsl.h"
-#include "kgsl_trace.h"
 #include "adreno_a5xx_packets.h"
+#include "adreno_pm4types.h"
+#include "adreno_trace.h"
+#include "kgsl_trace.h"
 
 static int critical_packet_constructed;
 
@@ -31,27 +27,6 @@ static struct kgsl_memdesc crit_pkts_refbuf1;
 static struct kgsl_memdesc crit_pkts_refbuf2;
 static struct kgsl_memdesc crit_pkts_refbuf3;
 
-static const struct adreno_vbif_data a530_vbif[] = {
-	{A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003},
-	{0, 0},
-};
-
-static const struct adreno_vbif_data a540_vbif[] = {
-	{A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003},
-	{A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009},
-	{0, 0},
-};
-
-static const struct adreno_vbif_platform a5xx_vbif_platforms[] = {
-	{ adreno_is_a540, a540_vbif },
-	{ adreno_is_a530, a530_vbif },
-	{ adreno_is_a512, a540_vbif },
-	{ adreno_is_a510, a530_vbif },
-	{ adreno_is_a508, a530_vbif },
-	{ adreno_is_a505, a530_vbif },
-	{ adreno_is_a506, a530_vbif },
-};
-
 static void a5xx_irq_storm_worker(struct work_struct *work);
 static int _read_fw2_block_header(struct kgsl_device *device,
 		uint32_t *header, uint32_t remain,
@@ -643,24 +618,29 @@ static int _load_gpmu_firmware(struct adreno_device *adreno_dev)
 	uint32_t *data;
 	const struct firmware *fw = NULL;
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
+	const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
 	uint32_t *cmds, cmd_size;
 	int ret =  -EINVAL;
+	u32 gmu_major = 1;
 
 	if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
 		return 0;
 
+	/* a530 used GMU major 1 and A540 used GMU major 3 */
+	if (adreno_is_a540(adreno_dev))
+		gmu_major = 3;
+
 	/* gpmu fw already saved and verified so do nothing new */
 	if (adreno_dev->gpmu_cmds_size != 0)
 		return 0;
 
-	if (gpucore->gpmufw_name == NULL)
+	if (a5xx_core->gpmufw_name == NULL)
 		return 0;
 
-	ret = request_firmware(&fw, gpucore->gpmufw_name, device->dev);
+	ret = request_firmware(&fw, a5xx_core->gpmufw_name, device->dev);
 	if (ret || fw == NULL) {
 		dev_err(device->dev, "request_firmware (%s) failed: %d\n",
-				gpucore->gpmufw_name, ret);
+				a5xx_core->gpmufw_name, ret);
 		return ret;
 	}
 
@@ -672,10 +652,7 @@ static int _load_gpmu_firmware(struct adreno_device *adreno_dev)
 	if (data[1] != GPMU_FIRMWARE_ID)
 		goto err;
 	ret = _read_fw2_block_header(device, &data[2],
-		data[0] - 2,
-		GPMU_FIRMWARE_ID,
-		adreno_dev->gpucore->gpmu_major,
-		adreno_dev->gpucore->gpmu_minor);
+		data[0] - 2, GPMU_FIRMWARE_ID, gmu_major, 0);
 	if (ret)
 		goto err;
 
@@ -797,399 +774,18 @@ static int a5xx_gpmu_start(struct adreno_device *adreno_dev)
 	return ret;
 }
 
-struct kgsl_hwcg_reg {
-	unsigned int off;
-	unsigned int val;
-};
-
-static const struct kgsl_hwcg_reg a50x_hwcg_regs[] = {
-	{A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
-	{A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
-	{A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
-	{A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
-	{A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
-	{A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
-	{A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
-	{A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4},
-	{A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
-	{A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
-	{A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
-	{A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
-	{A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
-	{A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
-	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
-	{A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
-	{A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
-	{A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
-	{A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
-	{A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
-	{A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
-	{A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
-	{A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
-	{A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
-	{A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
-	{A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
-};
-
-static const struct kgsl_hwcg_reg a510_hwcg_regs[] = {
-	{A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
-	{A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
-	{A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
-	{A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
-	{A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
-	{A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
-	{A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
-	{A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
-	{A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
-	{A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
-	{A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
-	{A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
-	{A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
-	{A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
-	{A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
-	{A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
-	{A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
-	{A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
-	{A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
-	{A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
-	{A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
-	{A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
-	{A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
-	{A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
-	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
-	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
-	{A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
-	{A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
-	{A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
-	{A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
-	{A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
-	{A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
-	{A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
-	{A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
-	{A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
-	{A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
-	{A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
-};
-
-static const struct kgsl_hwcg_reg a530_hwcg_regs[] = {
-	{A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
-	{A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
-	{A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
-	{A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
-	{A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
-	{A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
-	{A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
-	{A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
-	{A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
-	{A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
-	{A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
-	{A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
-	{A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
-	{A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
-	{A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
-	{A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
-	{A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
-	{A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
-	{A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
-	{A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
-	{A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
-	{A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
-	{A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
-	{A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
-	{A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
-	{A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
-	{A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
-	{A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
-	{A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
-	{A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
-	{A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
-	{A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
-	{A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
-	{A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
-	{A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
-	{A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
-	{A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
-	{A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
-	{A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
-	{A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
-	{A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
-	{A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
-	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
-	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
-	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
-	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
-	{A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
-	{A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
-	{A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
-	{A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
-	{A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
-	{A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
-	{A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
-	{A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
-	{A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
-	{A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
-	{A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
-};
-
-
-static const struct kgsl_hwcg_reg a540_hwcg_regs[] = {
-	{A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
-	{A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
-	{A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
-	{A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
-	{A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
-	{A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
-	{A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
-	{A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
-	{A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
-	{A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
-	{A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
-	{A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
-	{A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
-	{A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
-	{A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
-	{A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
-	{A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
-	{A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
-	{A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
-	{A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
-	{A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
-	{A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
-	{A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
-	{A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
-	{A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
-	{A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
-	{A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
-	{A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
-	{A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
-	{A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
-	{A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
-	{A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
-	{A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
-	{A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
-	{A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
-	{A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
-	{A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
-	{A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
-	{A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
-	{A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
-	{A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
-	{A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
-	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
-	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
-	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
-	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
-	{A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
-	{A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
-	{A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
-	{A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
-	{A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
-	{A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
-	{A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
-	{A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
-	{A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
-	{A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
-	{A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
-	{A5XX_RBBM_CLOCK_HYST_GPMU, 0x00000222},
-	{A5XX_RBBM_CLOCK_DELAY_GPMU, 0x00000770},
-	{A5XX_RBBM_CLOCK_HYST_GPMU, 0x00000004}
-};
-
-static const struct kgsl_hwcg_reg a512_hwcg_regs[] = {
-	{A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
-	{A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
-	{A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
-	{A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
-	{A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
-	{A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
-	{A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
-	{A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
-	{A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
-	{A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
-	{A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
-	{A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
-	{A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
-	{A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
-	{A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
-	{A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
-	{A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
-	{A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
-	{A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
-	{A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
-	{A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
-	{A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
-	{A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
-	{A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
-	{A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
-	{A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
-	{A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
-	{A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
-	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
-	{A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
-	{A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
-	{A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
-	{A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
-	{A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
-	{A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
-	{A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
-	{A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
-	{A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
-	{A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
-	{A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
-	{A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
-};
-
-static const struct {
-	int (*devfunc)(struct adreno_device *adreno_dev);
-	const struct kgsl_hwcg_reg *regs;
-	unsigned int count;
-} a5xx_hwcg_registers[] = {
-	{ adreno_is_a540, a540_hwcg_regs, ARRAY_SIZE(a540_hwcg_regs) },
-	{ adreno_is_a530, a530_hwcg_regs, ARRAY_SIZE(a530_hwcg_regs) },
-	{ adreno_is_a512, a512_hwcg_regs, ARRAY_SIZE(a512_hwcg_regs) },
-	{ adreno_is_a510, a510_hwcg_regs, ARRAY_SIZE(a510_hwcg_regs) },
-	{ adreno_is_a505, a50x_hwcg_regs, ARRAY_SIZE(a50x_hwcg_regs) },
-	{ adreno_is_a506, a50x_hwcg_regs, ARRAY_SIZE(a50x_hwcg_regs) },
-	{ adreno_is_a508, a50x_hwcg_regs, ARRAY_SIZE(a50x_hwcg_regs) },
-};
-
 void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	const struct kgsl_hwcg_reg *regs;
-	int i, j;
+	const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
+	int i;
 
 	if (!test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag))
 		return;
 
-	for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_registers); i++) {
-		if (a5xx_hwcg_registers[i].devfunc(adreno_dev))
-			break;
-	}
-
-	if (i == ARRAY_SIZE(a5xx_hwcg_registers))
-		return;
-
-	regs = a5xx_hwcg_registers[i].regs;
-
-	for (j = 0; j < a5xx_hwcg_registers[i].count; j++)
-		kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0);
+	for (i = 0; i < a5xx_core->hwcg_count; i++)
+		kgsl_regwrite(device, a5xx_core->hwcg[i].offset,
+			on ? a5xx_core->hwcg[i].value : 0);
 
 	/* enable top level HWCG */
 	kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL, on ? 0xAAA8AA00 : 0);
@@ -1272,22 +868,27 @@ static int _read_fw2_block_header(struct kgsl_device *device,
 static void _load_regfile(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
 	const struct firmware *fw;
 	uint64_t block_size = 0, block_total = 0;
 	uint32_t fw_size, *block;
 	int ret = -EINVAL;
+	u32 lm_major = 1;
 
-	if (!adreno_dev->gpucore->regfw_name)
+	if (!a5xx_core->regfw_name)
 		return;
 
-	ret = request_firmware(&fw, adreno_dev->gpucore->regfw_name,
-			device->dev);
+	ret = request_firmware(&fw, a5xx_core->regfw_name, device->dev);
 	if (ret) {
 		dev_err(device->dev, "request firmware failed %d, %s\n",
-				ret, adreno_dev->gpucore->regfw_name);
+				ret, a5xx_core->regfw_name);
 		return;
 	}
 
+	/* a530v2 lm_major was 3. a530v3 lm_major was 1 */
+	if (adreno_is_a530v2(adreno_dev))
+		lm_major = 3;
+
 	fw_size = fw->size / sizeof(uint32_t);
 	/* Min valid file of size 6, see file description */
 	if (fw_size < 6)
@@ -1305,10 +906,8 @@ static void _load_regfile(struct adreno_device *adreno_dev)
 		/* For now ignore blocks other than the LM sequence */
 		if (block[4] == LM_SEQUENCE_ID) {
 			ret = _read_fw2_block_header(device, &block[2],
-				block_size - 2,
-				GPMU_SEQUENCE_ID,
-				adreno_dev->gpucore->lm_major,
-				adreno_dev->gpucore->lm_minor);
+				block_size - 2, GPMU_SEQUENCE_ID,
+				lm_major, 0);
 			if (ret)
 				goto err;
 
@@ -1369,12 +968,13 @@ static uint32_t _write_voltage_table(struct adreno_device *adreno_dev,
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
 	int i;
 	struct dev_pm_opp *opp;
 	int levels = pwr->num_pwrlevels - 1;
 	unsigned int mvolt = 0;
 
-	kgsl_regwrite(device, addr++, adreno_dev->gpucore->max_power);
+	kgsl_regwrite(device, addr++, a5xx_core->max_power);
 	kgsl_regwrite(device, addr++, levels);
 
 	/* Write voltage in mV and frequency in MHz */
@@ -1414,6 +1014,7 @@ static void a530_lm_init(struct adreno_device *adreno_dev)
 {
 	uint32_t length;
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
 
 	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
 		!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
@@ -1433,8 +1034,7 @@ static void a530_lm_init(struct adreno_device *adreno_dev)
 		return;
 	}
 
-	kgsl_regwrite(device, A5XX_GPMU_TEMP_SENSOR_ID,
-			adreno_dev->gpucore->gpmu_tsens);
+	kgsl_regwrite(device, A5XX_GPMU_TEMP_SENSOR_ID, a5xx_core->gpmu_tsens);
 	kgsl_regwrite(device, A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x1);
 	kgsl_regwrite(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x1);
 
@@ -1805,6 +1405,7 @@ static void a5xx_start(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
 	unsigned int bit;
 	int ret;
 
@@ -1822,8 +1423,9 @@ static void a5xx_start(struct adreno_device *adreno_dev)
 
 	_setup_throttling_counters(adreno_dev);
 
-	adreno_vbif_start(adreno_dev, a5xx_vbif_platforms,
-			ARRAY_SIZE(a5xx_vbif_platforms));
+	/* Set up VBIF registers from the GPU core definition */
+	adreno_reglist_write(adreno_dev, a5xx_core->vbif,
+		a5xx_core->vbif_count);
 
 	/* Make all blocks contribute to the GPU BUSY perf counter */
 	kgsl_regwrite(device, A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
@@ -2152,6 +1754,7 @@ static int a5xx_microcode_load(struct adreno_device *adreno_dev)
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_firmware *pm4_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PM4);
 	struct adreno_firmware *pfp_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PFP);
+	const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
 	uint64_t gpuaddr;
 
 	gpuaddr = pm4_fw->memdesc.gpuaddr;
@@ -2197,8 +1800,8 @@ static int a5xx_microcode_load(struct adreno_device *adreno_dev)
 	}
 
 	/* Load the zap shader firmware through PIL if its available */
-	if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
-		ptr = subsystem_get(adreno_dev->gpucore->zap_name);
+	if (a5xx_core->zap_name && !adreno_dev->zap_loaded) {
+		ptr = subsystem_get(a5xx_core->zap_name);
 
 		/* Return error if the zap shader cannot be loaded */
 		if (IS_ERR_OR_NULL(ptr))
@@ -2440,17 +2043,18 @@ static int a5xx_microcode_read(struct adreno_device *adreno_dev)
 	int ret;
 	struct adreno_firmware *pm4_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PM4);
 	struct adreno_firmware *pfp_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PFP);
+	const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
 
 	if (pm4_fw->memdesc.hostptr == NULL) {
 		ret = _load_firmware(KGSL_DEVICE(adreno_dev),
-				 adreno_dev->gpucore->pm4fw_name, pm4_fw);
+				 a5xx_core->pm4fw_name, pm4_fw);
 		if (ret)
 			return ret;
 	}
 
 	if (pfp_fw->memdesc.hostptr == NULL) {
 		ret = _load_firmware(KGSL_DEVICE(adreno_dev),
-				 adreno_dev->gpucore->pfpfw_name, pfp_fw);
+				 a5xx_core->pfpfw_name, pfp_fw);
 		if (ret)
 			return ret;
 	}
@@ -3581,6 +3185,7 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
 	.preemption_post_ibsubmit =
 			a5xx_preemption_post_ibsubmit,
 	.preemption_init = a5xx_preemption_init,
+	.preemption_close = a5xx_preemption_close,
 	.preemption_schedule = a5xx_preemption_schedule,
 	.enable_64bit = a5xx_enable_64bit,
 	.clk_set_options = a5xx_clk_set_options,
diff --git a/drivers/gpu/msm/adreno_a5xx.h b/drivers/gpu/msm/adreno_a5xx.h
index b2f4e4e..ddfc74a 100644
--- a/drivers/gpu/msm/adreno_a5xx.h
+++ b/drivers/gpu/msm/adreno_a5xx.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017,2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef _ADRENO_A5XX_H_
@@ -8,6 +8,36 @@
 
 #include "a5xx_reg.h"
 
+/**
+ * struct adreno_a5xx_core - a5xx specific GPU core definitions
+ */
+struct adreno_a5xx_core {
+	/** @base: Container for the generic &struct adreno_gpu_core */
+	struct adreno_gpu_core base;
+	/** @gpmu_tsens: ID for the temperature sensor used by the GPMU */
+	unsigned int gpmu_tsens;
+	/** @max_power: Max possible power draw of a core */
+	unsigned int max_power;
+	/** pm4fw_name: Name of the PM4 microcode file */
+	const char *pm4fw_name;
+	/** pfpfw_name: Name of the PFP microcode file */
+	const char *pfpfw_name;
+	/** gpmufw_name: Name of the GPMU microcode file */
+	const char *gpmufw_name;
+	/** @regfw_name: Filename for the LM registers if applicable */
+	const char *regfw_name;
+	/** @zap_name: Name of the CPZ zap file */
+	const char *zap_name;
+	/** @hwcg: List of registers and values to write for HWCG */
+	const struct adreno_reglist *hwcg;
+	/** @hwcg_count: Number of registers in @hwcg */
+	u32 hwcg_count;
+	/** @vbif: List of registers and values to write for VBIF */
+	const struct adreno_reglist *vbif;
+	/** @vbif_count: Number of registers in @vbif */
+	u32 vbif_count;
+};
+
 #define A5XX_IRQ_FLAGS \
 	{ BIT(A5XX_INT_RBBM_GPU_IDLE), "RBBM_GPU_IDLE" }, \
 	{ BIT(A5XX_INT_RBBM_AHB_ERROR), "RBBM_AHB_ERR" }, \
@@ -213,11 +243,27 @@ static inline bool lm_on(struct adreno_device *adreno_dev)
 		test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag);
 }
 
+/**
+ * to_a5xx_core - return the a5xx specific GPU core struct
+ * @adreno_dev: An Adreno GPU device handle
+ *
+ * Returns:
+ * A pointer to the a5xx specific GPU core struct
+ */
+static inline const struct adreno_a5xx_core *
+to_a5xx_core(struct adreno_device *adreno_dev)
+{
+	const struct adreno_gpu_core *core = adreno_dev->gpucore;
+
+	return container_of(core, struct adreno_a5xx_core, base);
+}
+
 /* Preemption functions */
 void a5xx_preemption_trigger(struct adreno_device *adreno_dev);
 void a5xx_preemption_schedule(struct adreno_device *adreno_dev);
 void a5xx_preemption_start(struct adreno_device *adreno_dev);
 int a5xx_preemption_init(struct adreno_device *adreno_dev);
+void a5xx_preemption_close(struct adreno_device *adreno_dev);
 int a5xx_preemption_yield_enable(unsigned int *cmds);
 
 unsigned int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
index 05634eb..c3f87aa 100644
--- a/drivers/gpu/msm/adreno_a5xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -1,13 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017,2019 The Linux Foundation. All rights reserved.
  */
 
 #include "adreno.h"
 #include "adreno_a5xx.h"
-#include "a5xx_reg.h"
-#include "adreno_trace.h"
 #include "adreno_pm4types.h"
+#include "adreno_trace.h"
 
 #define PREEMPT_RECORD(_field) \
 		offsetof(struct a5xx_cp_preemption_record, _field)
@@ -563,9 +562,9 @@ static void a5xx_preemption_iommu_close(struct adreno_device *adreno_dev)
 }
 #endif
 
-static void a5xx_preemption_close(struct kgsl_device *device)
+void a5xx_preemption_close(struct adreno_device *adreno_dev)
 {
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_preemption *preempt = &adreno_dev->preempt;
 	struct adreno_ringbuffer *rb;
 	unsigned int i;
@@ -619,7 +618,7 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev)
 
 err:
 	if (ret)
-		a5xx_preemption_close(device);
+		a5xx_preemption_close(adreno_dev);
 
 	return ret;
 }
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index 32642cd..4ef6bf8 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -3,13 +3,9 @@
  * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/io.h>
-#include "kgsl.h"
 #include "adreno.h"
-#include "kgsl_snapshot.h"
-#include "adreno_snapshot.h"
-#include "a5xx_reg.h"
 #include "adreno_a5xx.h"
+#include "adreno_snapshot.h"
 
 enum a5xx_rbbm_debbus_id {
 	A5XX_RBBM_DBGBUS_CP          = 0x1,
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 95a2bd01..9a6f0c9 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -4,465 +4,21 @@
  */
 
 #include <linux/firmware.h>
+#include <linux/of.h>
 #include <soc/qcom/subsystem_restart.h>
-#include <linux/pm_opp.h>
-#include <linux/jiffies.h>
 
 #include "adreno.h"
-#include "a6xx_reg.h"
 #include "adreno_a6xx.h"
-#include "adreno_cp_parser.h"
-#include "adreno_trace.h"
-#include "adreno_pm4types.h"
-#include "adreno_perfcounter.h"
-#include "adreno_ringbuffer.h"
 #include "adreno_llc.h"
-#include "kgsl_sharedmem.h"
-#include "kgsl.h"
+#include "adreno_pm4types.h"
+#include "adreno_trace.h"
 #include "kgsl_gmu.h"
-#include "kgsl_hfi.h"
 #include "kgsl_trace.h"
 
 #define MIN_HBB		13
 #define HBB_LOWER_MASK	0x3
 #define HBB_UPPER_SHIFT	0x2
 
-static const struct adreno_vbif_data a630_vbif[] = {
-	{A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009},
-	{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
-	{0, 0},
-};
-
-static const struct adreno_vbif_data a615_gbif[] = {
-	{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
-	{0, 0},
-};
-
-static const struct adreno_vbif_data a640_gbif[] = {
-	{A6XX_GBIF_QSB_SIDE0, 0x00071620},
-	{A6XX_GBIF_QSB_SIDE1, 0x00071620},
-	{A6XX_GBIF_QSB_SIDE2, 0x00071620},
-	{A6XX_GBIF_QSB_SIDE3, 0x00071620},
-	{A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3},
-	{0, 0},
-};
-
-static const struct adreno_vbif_data a650_gbif[] = {
-	{A6XX_GBIF_QSB_SIDE0, 0x00071620},
-	{A6XX_GBIF_QSB_SIDE1, 0x00071620},
-	{A6XX_GBIF_QSB_SIDE2, 0x00071620},
-	{A6XX_GBIF_QSB_SIDE3, 0x00071620},
-	{A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3},
-	{0, 0},
-};
-
-static const struct adreno_vbif_platform a6xx_vbif_platforms[] = {
-	{ adreno_is_a630, a630_vbif },
-	{ adreno_is_a615_family, a615_gbif },
-	{ adreno_is_a640, a640_gbif },
-	{ adreno_is_a650, a650_gbif },
-	{ adreno_is_a680, a640_gbif },
-	{ adreno_is_a612, a640_gbif },
-	{ adreno_is_a620, a650_gbif },
-};
-
-struct kgsl_hwcg_reg {
-	unsigned int off;
-	unsigned int val;
-};
-static const struct kgsl_hwcg_reg a630_hwcg_regs[] = {
-	{A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
-	{A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
-	{A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
-	{A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
-	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
-	{A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
-	{A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
-	{A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
-	{A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
-	{A6XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
-	{A6XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
-	{A6XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
-	{A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
-	{A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
-	{A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
-	{A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
-	{A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
-	{A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
-	{A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
-	{A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
-	{A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
-	{A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
-	{A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
-	{A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
-	{A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
-	{A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
-	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
-	{A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
-	{A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
-	{A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
-	{A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
-	{A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
-	{A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
-	{A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
-	{A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
-	{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
-	{A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
-	{A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
-	{A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
-	{A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
-	{A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
-	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
-	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
-	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
-	{A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
-	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
-	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
-	{A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
-	{A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
-	{A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
-	{A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
-	{A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
-	{A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
-	{A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
-	{A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
-	{A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
-	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
-};
-
-static const struct kgsl_hwcg_reg a615_hwcg_regs[] = {
-	{A6XX_RBBM_CLOCK_CNTL_SP0,  0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
-	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
-	{A6XX_RBBM_CLOCK_HYST_SP0,  0x0000F3CF},
-	{A6XX_RBBM_CLOCK_CNTL_TP0,  0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL_TP1,  0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
-	{A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
-	{A6XX_RBBM_CLOCK_HYST_TP0,  0x77777777},
-	{A6XX_RBBM_CLOCK_HYST_TP1,  0x77777777},
-	{A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
-	{A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
-	{A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
-	{A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
-	{A6XX_RBBM_CLOCK_CNTL_UCHE,  0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
-	{A6XX_RBBM_CLOCK_HYST_UCHE,  0x00000004},
-	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
-	{A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
-	{A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
-	{A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
-	{A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
-	{A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
-	{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
-	{A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
-	{A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
-	{A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
-	{A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
-	{A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
-	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
-	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
-	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
-	{A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
-	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
-	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
-	{A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
-	{A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
-	{A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
-	{A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
-	{A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
-	{A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
-	{A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
-	{A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
-	{A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
-	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
-};
-
-static const struct kgsl_hwcg_reg a640_hwcg_regs[] = {
-	{A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
-	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
-	{A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
-	{A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
-	{A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
-	{A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
-	{A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
-	{A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
-	{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
-	{A6XX_RBBM_CLOCK_CNTL_RAC, 0x05222022},
-	{A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
-	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
-	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
-	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
-	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
-	{A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
-	{A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
-	{A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
-	{A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
-	{A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
-	{A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
-	{A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
-	{A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
-	{A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
-	{A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
-	{A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
-	{A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
-	{A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
-	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
-	{A6XX_RBBM_ISDB_CNT, 0x00000182},
-	{A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
-	{A6XX_RBBM_SP_HYST_CNT, 0x00000000},
-	{A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
-	{A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
-	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
-};
-
-static const struct kgsl_hwcg_reg a650_hwcg_regs[] = {
-	{A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
-	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
-	{A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
-	{A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
-	{A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
-	{A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
-	{A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
-	{A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
-	{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
-	{A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
-	{A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
-	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
-	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
-	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
-	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
-	{A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
-	{A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
-	{A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
-	{A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
-	{A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
-	{A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
-	{A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
-	{A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
-	{A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
-	{A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
-	{A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
-	{A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
-	{A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
-	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
-	{A6XX_RBBM_ISDB_CNT, 0x00000182},
-	{A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
-	{A6XX_RBBM_SP_HYST_CNT, 0x00000000},
-	{A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
-	{A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
-	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
-};
-
-static const struct kgsl_hwcg_reg a620_hwcg_regs[] = {
-	{A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
-	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
-	{A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
-	{A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
-	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
-	{A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
-	{A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
-	{A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
-	{A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
-	{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
-	{A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
-	{A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
-	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
-	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
-	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
-	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
-	{A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
-	{A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
-	{A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
-	{A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
-	{A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
-	{A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
-	{A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
-	{A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
-	{A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
-	{A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
-	{A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
-	{A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
-	{A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
-	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
-	{A6XX_RBBM_ISDB_CNT, 0x00000182},
-	{A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
-	{A6XX_RBBM_SP_HYST_CNT, 0x00000000},
-	{A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
-	{A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
-	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
-};
-
-static const struct kgsl_hwcg_reg a612_hwcg_regs[] = {
-	{A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
-	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x0000F3CF},
-	{A6XX_RBBM_CLOCK_HYST_SP0, 0x00000081},
-	{A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
-	{A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
-	{A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
-	{A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
-	{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
-	{A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01202222},
-	{A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
-	{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
-	{A6XX_RBBM_CLOCK_CNTL_RAC, 0x05522022},
-	{A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
-	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
-	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
-	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
-	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
-	{A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
-	{A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
-	{A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
-	{A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
-	{A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
-	{A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
-	{A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
-	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
-	{A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
-	{A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
-	{A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
-	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
-	{A6XX_RBBM_ISDB_CNT, 0x00000182},
-	{A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
-	{A6XX_RBBM_SP_HYST_CNT, 0x00000000},
-	{A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
-	{A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
-	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
-};
-
-static const struct {
-	int (*devfunc)(struct adreno_device *adreno_dev);
-	const struct kgsl_hwcg_reg *regs;
-	unsigned int count;
-} a6xx_hwcg_registers[] = {
-	{adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)},
-	{adreno_is_a615_family, a615_hwcg_regs, ARRAY_SIZE(a615_hwcg_regs)},
-	{adreno_is_a640, a640_hwcg_regs, ARRAY_SIZE(a640_hwcg_regs)},
-	{adreno_is_a650, a650_hwcg_regs, ARRAY_SIZE(a650_hwcg_regs)},
-	{adreno_is_a680, a640_hwcg_regs, ARRAY_SIZE(a640_hwcg_regs)},
-	{adreno_is_a612, a612_hwcg_regs, ARRAY_SIZE(a612_hwcg_regs)},
-	{adreno_is_a620, a620_hwcg_regs, ARRAY_SIZE(a620_hwcg_regs)},
-};
-
 static struct a6xx_protected_regs {
 	unsigned int base;
 	unsigned int count;
@@ -751,9 +307,9 @@ static unsigned int __get_gmu_wfi_config(struct adreno_device *adreno_dev)
 static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	const struct kgsl_hwcg_reg *regs;
+	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
 	unsigned int value;
-	int i, j;
+	int i;
 
 	if (!test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag))
 		on = false;
@@ -777,16 +333,6 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
 	if (value == 0 && !on)
 		return;
 
-	for (i = 0; i < ARRAY_SIZE(a6xx_hwcg_registers); i++) {
-		if (a6xx_hwcg_registers[i].devfunc(adreno_dev))
-			break;
-	}
-
-	if (i == ARRAY_SIZE(a6xx_hwcg_registers))
-		return;
-
-	regs = a6xx_hwcg_registers[i].regs;
-
 	/*
 	 * Disable SP clock before programming HWCG registers.
 	 * A612 GPU is not having the GX power domain. Hence
@@ -797,8 +343,9 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
 		gmu_core_regrmw(device,
 			A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
 
-	for (j = 0; j < a6xx_hwcg_registers[i].count; j++)
-		kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0);
+	for (i = 0; i < a6xx_core->hwcg_count; i++)
+		kgsl_regwrite(device, a6xx_core->hwcg[i].offset,
+			on ? a6xx_core->hwcg[i].value : 0);
 
 	/*
 	 * Enable SP clock after programming HWCG registers.
@@ -899,6 +446,7 @@ static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
 static void a6xx_start(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
 	unsigned int bit, lower_bit, mal, mode, upper_bit;
 	unsigned int uavflagprd_inv;
 	unsigned int amsbc = 0;
@@ -912,11 +460,9 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 	/* enable hardware clockgating */
 	a6xx_hwcg_set(adreno_dev, true);
 
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_LM))
-		adreno_dev->lm_threshold_count = A6XX_GMU_GENERAL_1;
-
-	adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
-			ARRAY_SIZE(a6xx_vbif_platforms));
+	/* Set up VBIF registers from the GPU core definition */
+	adreno_reglist_write(adreno_dev, a6xx_core->vbif,
+		a6xx_core->vbif_count);
 
 	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW))
 		kgsl_regwrite(device, A6XX_UCHE_GBIF_GX_CONFIG, 0x10200F9);
@@ -975,7 +521,7 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 
 	/* Setting the primFifo thresholds values */
 	kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL,
-			adreno_dev->gpucore->prim_fifo_threshold);
+		a6xx_core->prim_fifo_threshold);
 
 	/* Set the AHB default slave response to "ERROR" */
 	kgsl_regwrite(device, A6XX_CP_AHB_CNTL, 0x1);
@@ -1072,6 +618,9 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI))
 		kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
 
+	/* Set the bit vccCacheSkipDis=1 to get rid of TSEskip logic */
+	kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 9));
+
 	/* Enable the GMEM save/restore feature for preemption */
 	if (adreno_is_preemption_enabled(adreno_dev))
 		kgsl_regwrite(device, A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
@@ -1104,6 +653,7 @@ static int a6xx_microcode_load(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
+	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
 	uint64_t gpuaddr;
 	void *zap;
 	int ret = 0;
@@ -1115,8 +665,8 @@ static int a6xx_microcode_load(struct adreno_device *adreno_dev)
 				upper_32_bits(gpuaddr));
 
 	/* Load the zap shader firmware through PIL if its available */
-	if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
-		zap = subsystem_get(adreno_dev->gpucore->zap_name);
+	if (a6xx_core->zap_name && !adreno_dev->zap_loaded) {
+		zap = subsystem_get(a6xx_core->zap_name);
 
 		/* Return error if the zap shader cannot be loaded */
 		if (IS_ERR_OR_NULL(zap)) {
@@ -1194,13 +744,6 @@ static void _set_ordinals(struct adreno_device *adreno_dev,
 		*cmds++ = 0x00000000;
 	}
 
-	if (CP_INIT_MASK & CP_INIT_DRAWCALL_FILTER_RANGE) {
-		/* Start range */
-		*cmds++ = 0x00000000;
-		/* End range (inclusive) */
-		*cmds++ = 0x00000000;
-	}
-
 	if (CP_INIT_MASK & CP_INIT_UCODE_WORKAROUND_MASK)
 		*cmds++ = 0x00000000;
 
@@ -1213,15 +756,6 @@ static void _set_ordinals(struct adreno_device *adreno_dev,
 		*cmds++ = lower_32_bits(gpuaddr);
 		*cmds++ = upper_32_bits(gpuaddr);
 		*cmds++ =  0;
-
-	} else if (CP_INIT_MASK & CP_INIT_REGISTER_INIT_LIST) {
-		uint64_t gpuaddr = adreno_dev->pwrup_reglist.gpuaddr;
-
-		*cmds++ = lower_32_bits(gpuaddr);
-		*cmds++ = upper_32_bits(gpuaddr);
-		/* Size is in dwords */
-		*cmds++ = (sizeof(a6xx_ifpc_pwrup_reglist) +
-			sizeof(a6xx_pwrup_reglist)) >> 2;
 	}
 
 	/* Pad rest of the cmds with 0's */
@@ -1520,10 +1054,10 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
 	struct adreno_firmware *sqe_fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
+	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
 
 	if (sqe_fw->memdesc.hostptr == NULL) {
-		ret = _load_firmware(device, adreno_dev->gpucore->sqefw_name,
-				sqe_fw);
+		ret = _load_firmware(device, a6xx_core->sqefw_name, sqe_fw);
 		if (ret)
 			return ret;
 	}
@@ -1545,8 +1079,6 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	unsigned int reg;
-	unsigned long time;
-	bool vbif_acked = false;
 
 	/*
 	 * For the soft reset case with GMU enabled this part is done
@@ -1565,21 +1097,6 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev)
 	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, &reg);
 	adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 0);
 
-	/* Wait for the VBIF reset ack to complete */
-	time = jiffies + msecs_to_jiffies(VBIF_RESET_ACK_TIMEOUT);
-
-	do {
-		kgsl_regread(device, A6XX_RBBM_VBIF_GX_RESET_STATUS, &reg);
-		if ((reg & VBIF_RESET_ACK_MASK) == VBIF_RESET_ACK_MASK) {
-			vbif_acked = true;
-			break;
-		}
-		cpu_relax();
-	} while (!time_after(jiffies, time));
-
-	if (!vbif_acked)
-		return -ETIMEDOUT;
-
 	/* Clear GBIF client halt and CX arbiter halt */
 	adreno_deassert_gbif_halt(adreno_dev);
 
@@ -1588,13 +1105,19 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev)
 	return 0;
 }
 
+/* Number of throttling counters for A6xx */
+#define A6XX_GMU_THROTTLE_COUNTERS 3
+
 static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev)
 {
 	int i;
 	int64_t adj = -1;
-	uint32_t counts[ADRENO_GPMU_THROTTLE_COUNTERS];
+	uint32_t counts[A6XX_GMU_THROTTLE_COUNTERS];
 	struct adreno_busy_data *busy = &adreno_dev->busy_data;
 
+	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+		return 0;
+
 	for (i = 0; i < ARRAY_SIZE(counts); i++) {
 		if (!adreno_dev->gpmu_throttle_counters[i])
 			counts[i] = 0;
@@ -1619,18 +1142,6 @@ static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev)
 	return adj;
 }
 
-static void a6xx_count_throttles(struct adreno_device *adreno_dev,
-	uint64_t adj)
-{
-	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
-		!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
-		return;
-
-	gmu_core_regread(KGSL_DEVICE(adreno_dev),
-		adreno_dev->lm_threshold_count,
-		&adreno_dev->lm_threshold_cross);
-}
-
 /**
  * a6xx_reset() - Helper function to reset the GPU
  * @device: Pointer to the KGSL device structure for the GPU
@@ -1773,22 +1284,21 @@ static void a6xx_llc_configure_gpu_scid(struct adreno_device *adreno_dev)
 	uint32_t gpu_scid;
 	uint32_t gpu_cntl1_val = 0;
 	int i;
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct kgsl_mmu *mmu = &device->mmu;
 
 	gpu_scid = adreno_llc_get_scid(adreno_dev->gpu_llc_slice);
 	for (i = 0; i < A6XX_LLC_NUM_GPU_SCIDS; i++)
 		gpu_cntl1_val = (gpu_cntl1_val << A6XX_GPU_LLC_SCID_NUM_BITS)
 			| gpu_scid;
 
-	if (adreno_is_a640_family(adreno_dev) ||
-			adreno_is_a612(adreno_dev) ||
-			adreno_is_a650_family(adreno_dev)) {
-		kgsl_regrmw(KGSL_DEVICE(adreno_dev), A6XX_GBIF_SCACHE_CNTL1,
+	if (mmu->subtype == KGSL_IOMMU_SMMU_V500)
+		kgsl_regrmw(device, A6XX_GBIF_SCACHE_CNTL1,
 			A6XX_GPU_LLC_SCID_MASK, gpu_cntl1_val);
-	} else {
+	else
 		adreno_cx_misc_regrmw(adreno_dev,
 				A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
 				A6XX_GPU_LLC_SCID_MASK, gpu_cntl1_val);
-	}
 }
 
 /*
@@ -1798,12 +1308,14 @@ static void a6xx_llc_configure_gpu_scid(struct adreno_device *adreno_dev)
 static void a6xx_llc_configure_gpuhtw_scid(struct adreno_device *adreno_dev)
 {
 	uint32_t gpuhtw_scid;
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct kgsl_mmu *mmu = &device->mmu;
 
 	/*
-	 * On A640, the GPUHTW SCID is configured via a NoC override in the
-	 * XBL image.
+	 * On SMMU-v500, the GPUHTW SCID is configured via a NoC override in
+	 * the XBL image.
 	 */
-	if (adreno_is_a640_family(adreno_dev) || adreno_is_a612(adreno_dev))
+	if (mmu->subtype == KGSL_IOMMU_SMMU_V500)
 		return;
 
 	gpuhtw_scid = adreno_llc_get_scid(adreno_dev->gpuhtw_llc_slice);
@@ -1820,11 +1332,14 @@ static void a6xx_llc_configure_gpuhtw_scid(struct adreno_device *adreno_dev)
  */
 static void a6xx_llc_enable_overrides(struct adreno_device *adreno_dev)
 {
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct kgsl_mmu *mmu = &device->mmu;
+
 	/*
 	 * Attributes override through GBIF is not supported with MMU-500.
 	 * Attributes are used as configured through SMMU pagetable entries.
 	 */
-	if (adreno_is_a640_family(adreno_dev) || adreno_is_a612(adreno_dev))
+	if (mmu->subtype == KGSL_IOMMU_SMMU_V500)
 		return;
 
 	/*
@@ -3153,7 +2668,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
 	.perfcounters = &a6xx_perfcounters,
 	.enable_pwr_counters = a6xx_enable_pwr_counters,
 	.read_throttling_counters = a6xx_read_throttling_counters,
-	.count_throttles = a6xx_count_throttles,
 	.microcode_read = a6xx_microcode_read,
 	.enable_64bit = a6xx_enable_64bit,
 	.llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
@@ -3167,6 +2681,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
 	.preemption_pre_ibsubmit = a6xx_preemption_pre_ibsubmit,
 	.preemption_post_ibsubmit = a6xx_preemption_post_ibsubmit,
 	.preemption_init = a6xx_preemption_init,
+	.preemption_close = a6xx_preemption_close,
 	.preemption_schedule = a6xx_preemption_schedule,
 	.set_marker = a6xx_set_marker,
 	.preemption_context_init = a6xx_preemption_context_init,
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index 8c1cce6..03156fa 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
@@ -6,8 +6,42 @@
 #ifndef _ADRENO_A6XX_H_
 #define _ADRENO_A6XX_H_
 
+#include <linux/delay.h>
+
 #include "a6xx_reg.h"
 
+/**
+ * struct adreno_a6xx_core - a6xx specific GPU core definitions
+ */
+struct adreno_a6xx_core {
+	/** @base: Container for the generic GPU definitions */
+	struct adreno_gpu_core base;
+	/** @gmu_major: The maximum GMU version supported by the core */
+	u32 gmu_major;
+	/** @gmu_minor: The minimum GMU version supported by the core */
+	u32 gmu_minor;
+	/** @prim_fifo_threshold: target specific value for PC_DBG_ECO_CNTL */
+	unsigned int prim_fifo_threshold;
+	/** @pdc_address_offset: Offset for the PDC region for the target */
+	unsigned int pdc_address_offset;
+	/** @sqefw_name: Name of the SQE microcode file */
+	const char *sqefw_name;
+	/** @gmufw_name: Name of the GMU firmware file */
+	const char *gmufw_name;
+	/** @zap_name: Name of the CPZ zap file */
+	const char *zap_name;
+	/** @hwcg: List of registers and values to write for HWCG */
+	const struct adreno_reglist *hwcg;
+	/** @hwcg_count: Number of registers in @hwcg */
+	u32 hwcg_count;
+	/** @vbif: List of registers and values to write for VBIF */
+	const struct adreno_reglist *vbif;
+	/** @vbif_count: Number of registers in @vbif */
+	u32 vbif_count;
+	/** @veto_fal10: veto status for fal10 feature */
+	bool veto_fal10;
+};
+
 #define CP_CLUSTER_FE		0x0
 #define CP_CLUSTER_SP_VS	0x1
 #define CP_CLUSTER_PC_VS	0x2
@@ -99,6 +133,21 @@ struct cpu_gpu_lock {
 #define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
 		(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))
 
+/**
+ * to_a6xx_core - return the a6xx specific GPU core struct
+ * @adreno_dev: An Adreno GPU device handle
+ *
+ * Returns:
+ * A pointer to the a6xx specific GPU core struct
+ */
+static inline const struct adreno_a6xx_core *
+to_a6xx_core(struct adreno_device *adreno_dev)
+{
+	const struct adreno_gpu_core *core = adreno_dev->gpucore;
+
+	return container_of(core, struct adreno_a6xx_core, base);
+}
+
 /*
  * timed_poll_check() - polling *gmu* register at given offset until
  * its value changed to match expected value. The function times
@@ -199,6 +248,7 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev);
 void a6xx_preemption_schedule(struct adreno_device *adreno_dev);
 void a6xx_preemption_start(struct adreno_device *adreno_dev);
 int a6xx_preemption_init(struct adreno_device *adreno_dev);
+void a6xx_preemption_close(struct adreno_device *adreno_dev);
 
 unsigned int a6xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
 		unsigned int *cmds);
diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c
index ab2d0ef..83139db 100644
--- a/drivers/gpu/msm/adreno_a6xx_gmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_gmu.c
@@ -3,23 +3,18 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
+/* soc/qcom/cmd-db.h needs types.h */
 #include <linux/firmware.h>
-#include <linux/jiffies.h>
-#include <linux/interrupt.h>
 #include <linux/io.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
 #include <soc/qcom/cmd-db.h>
 
-#include "kgsl_gmu_core.h"
-#include "kgsl_gmu.h"
-#include "kgsl_trace.h"
-#include "kgsl_snapshot.h"
-
 #include "adreno.h"
-#include "a6xx_reg.h"
 #include "adreno_a6xx.h"
 #include "adreno_snapshot.h"
-#include "adreno_trace.h"
+#include "kgsl_gmu.h"
+#include "kgsl_trace.h"
 
 static const unsigned int a6xx_gmu_gx_registers[] = {
 	/* GMU GX */
@@ -84,6 +79,7 @@ static int _load_gmu_rpmh_ucode(struct kgsl_device *device)
 	struct resource *res_pdc, *res_cfg, *res_seq;
 	void __iomem *cfg = NULL, *seq = NULL, *rscc;
 	unsigned int cfg_offset, seq_offset;
+	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
 	u32 vrm_resource_addr = cmd_db_read_addr("vrm.soc");
 
 	/* Offsets from the base PDC (if no PDC subsections in the DTSI) */
@@ -199,7 +195,7 @@ static int _load_gmu_rpmh_ucode(struct kgsl_device *device)
 	_regwrite(cfg, PDC_GPU_TCS1_CMD0_MSGID + PDC_CMD_OFFSET * 2, 0x10108);
 
 	_regwrite(cfg, PDC_GPU_TCS1_CMD0_ADDR + PDC_CMD_OFFSET * 2,
-			adreno_dev->gpucore->pdc_address_offset);
+			a6xx_core->pdc_address_offset);
 
 	_regwrite(cfg, PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x0);
 
@@ -221,7 +217,7 @@ static int _load_gmu_rpmh_ucode(struct kgsl_device *device)
 	_regwrite(cfg, PDC_GPU_TCS3_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
 	_regwrite(cfg, PDC_GPU_TCS3_CMD0_ADDR + PDC_CMD_OFFSET, 0x30000);
 
-	if (adreno_is_a618(adreno_dev))
+	if (adreno_is_a618(adreno_dev) || adreno_is_a650(adreno_dev))
 		_regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET, 0x2);
 	else
 		_regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET, 0x3);
@@ -229,7 +225,7 @@ static int _load_gmu_rpmh_ucode(struct kgsl_device *device)
 	_regwrite(cfg, PDC_GPU_TCS3_CMD0_MSGID + PDC_CMD_OFFSET * 2, 0x10108);
 
 	_regwrite(cfg, PDC_GPU_TCS3_CMD0_ADDR + PDC_CMD_OFFSET * 2,
-			adreno_dev->gpucore->pdc_address_offset);
+			a6xx_core->pdc_address_offset);
 
 	_regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x3);
 
@@ -560,9 +556,6 @@ static int a6xx_gmu_oob_set(struct kgsl_device *device,
 	int ret = 0;
 	int set, check;
 
-	if (!gmu_core_isenabled(device))
-		return 0;
-
 	if (!adreno_is_a630(adreno_dev) && !adreno_is_a615_family(adreno_dev)) {
 		set = BIT(30 - req * 2);
 		check = BIT(31 - req);
@@ -607,9 +600,6 @@ static inline void a6xx_gmu_oob_clear(struct kgsl_device *device,
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
 	int clear;
 
-	if (!gmu_core_isenabled(device))
-		return;
-
 	if (!adreno_is_a630(adreno_dev) && !adreno_is_a615_family(adreno_dev)) {
 		clear = BIT(31 - req * 2);
 		if (req >= 6) {
@@ -655,7 +645,12 @@ static int a6xx_gmu_hfi_start_msg(struct kgsl_device *device)
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct hfi_start_cmd req;
 
-	if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev))
+	/*
+	 * This HFI was not supported in legacy firmware and this quirk
+	 * serves as a better means to identify targets that depend on
+	 * legacy firmware.
+	 */
+	if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
 		return hfi_send_req(KGSL_GMU_DEVICE(device),
 					 H2F_MSG_START, &req);
 
@@ -698,9 +693,6 @@ static int a6xx_complete_rpmh_votes(struct kgsl_device *device)
 {
 	int ret = 0;
 
-	if (!gmu_core_isenabled(device))
-		return ret;
-
 	ret |= timed_poll_check_rscc(device, A6XX_RSCC_TCS0_DRV0_STATUS,
 			BIT(0), GPU_RESET_TIMEOUT, BIT(0));
 	ret |= timed_poll_check_rscc(device, A6XX_RSCC_TCS1_DRV0_STATUS,
@@ -855,6 +847,22 @@ static bool idle_trandition_complete(unsigned int idle_level,
 	return true;
 }
 
+static const char *idle_level_name(int level)
+{
+	if (level == GPU_HW_ACTIVE)
+		return "GPU_HW_ACTIVE";
+	else if (level == GPU_HW_SPTP_PC)
+		return "GPU_HW_SPTP_PC";
+	else if (level == GPU_HW_IFPC)
+		return "GPU_HW_IFPC";
+	else if (level == GPU_HW_NAP)
+		return "GPU_HW_NAP";
+	else if (level == GPU_HW_MIN_VOLT)
+		return "GPU_HW_MIN_VOLT";
+
+	return "";
+}
+
 static int a6xx_gmu_wait_for_lowest_idle(struct kgsl_device *device)
 {
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
@@ -862,9 +870,6 @@ static int a6xx_gmu_wait_for_lowest_idle(struct kgsl_device *device)
 	unsigned long t;
 	uint64_t ts1, ts2, ts3;
 
-	if (!gmu_core_isenabled(device))
-		return 0;
-
 	ts1 = read_AO_counter(device);
 
 	t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
@@ -904,7 +909,7 @@ static int a6xx_gmu_wait_for_lowest_idle(struct kgsl_device *device)
 		"----------------------[ GMU error ]----------------------\n");
 	dev_err(&gmu->pdev->dev,
 		"Timeout waiting for lowest idle level %s\n",
-		gpu_idle_level_names[gmu->idle_level]);
+		idle_level_name(gmu->idle_level));
 	dev_err(&gmu->pdev->dev, "Start: %llx (absolute ticks)\n", ts1);
 	dev_err(&gmu->pdev->dev, "Poll: %llx (ticks relative to start)\n",
 		ts2-ts1);
@@ -960,10 +965,15 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
+	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
 	uint32_t gmu_log_info;
 	int ret;
 	unsigned int chipid = 0;
 
+	/* Vote veto for FAL10 feature if supported*/
+	if (a6xx_core->veto_fal10)
+		gmu_core_regwrite(device, A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 0x1);
+
 	switch (boot_state) {
 	case GMU_COLD_BOOT:
 		/* Turn on TCM retention */
@@ -1068,28 +1078,24 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
  */
 static int a6xx_gmu_load_firmware(struct kgsl_device *device)
 {
-	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
-	const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
+	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
 	struct gmu_block_header *blk;
 	int ret, offset = 0;
 
-	/* there is no GMU */
-	if (!gmu_core_isenabled(device))
-		return 0;
-
 	/* GMU fw already saved and verified so do nothing new */
 	if (gmu->fw_image)
 		return 0;
 
-	if (gpucore->gpmufw_name == NULL)
+	if (a6xx_core->gmufw_name == NULL)
 		return -EINVAL;
 
-	ret = request_firmware(&gmu->fw_image, gpucore->gpmufw_name,
+	ret = request_firmware(&gmu->fw_image, a6xx_core->gmufw_name,
 			device->dev);
 	if (ret) {
 		dev_err(device->dev, "request_firmware (%s) failed: %d\n",
-				gpucore->gpmufw_name, ret);
+				a6xx_core->gmufw_name, ret);
 		return ret;
 	}
 
@@ -1222,6 +1228,13 @@ static int a6xx_gmu_suspend(struct kgsl_device *device)
 	/* Check no outstanding RPMh voting */
 	a6xx_complete_rpmh_votes(device);
 
+	/* Clear the WRITEDROPPED fields and set fence to allow mode */
+	gmu_core_regwrite(device, A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7);
+	gmu_core_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+	/* Make sure above writes are committed before we proceed to recovery */
+	wmb();
+
 	/*
 	 * This is based on the assumption that GMU is the only one controlling
 	 * the GX HS. This code path is the only client voting for GX through
@@ -1350,99 +1363,51 @@ static int a6xx_gmu_rpmh_gpu_pwrctrl(struct kgsl_device *device,
 	return ret;
 }
 
-#define LM_DEFAULT_LIMIT	6000
-#define GPU_LIMIT_THRESHOLD_ENABLE	BIT(31)
-
-static uint32_t lm_limit(struct adreno_device *adreno_dev)
+static int _setup_throttling_counter(struct adreno_device *adreno_dev,
+						int countable, u32 *offset)
 {
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	if (*offset)
+		return 0;
 
-	if (adreno_dev->lm_limit)
-		return adreno_dev->lm_limit;
-
-	if (of_property_read_u32(device->pdev->dev.of_node, "qcom,lm-limit",
-		&adreno_dev->lm_limit))
-		adreno_dev->lm_limit = LM_DEFAULT_LIMIT;
-
-	return adreno_dev->lm_limit;
+	return adreno_perfcounter_get(adreno_dev,
+			KGSL_PERFCOUNTER_GROUP_GPMU_PWR,
+			countable, offset, NULL,
+			PERFCOUNTER_FLAG_KERNEL);
 }
 
-static int a640_throttling_counters[ADRENO_GPMU_THROTTLE_COUNTERS] = {
-	0x11, 0x15, 0x19
-};
-
 static void _setup_throttling_counters(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
-	int i, ret;
+	int ret;
 
-	for (i = 0; i < ARRAY_SIZE(a640_throttling_counters); i++) {
-		adreno_dev->busy_data.throttle_cycles[i] = 0;
+	ret = _setup_throttling_counter(adreno_dev, 0x10,
+				&adreno_dev->gpmu_throttle_counters[0]);
+	ret |= _setup_throttling_counter(adreno_dev, 0x15,
+				&adreno_dev->gpmu_throttle_counters[1]);
+	ret |= _setup_throttling_counter(adreno_dev, 0x19,
+				&adreno_dev->gpmu_throttle_counters[2]);
 
-		if (!a640_throttling_counters[i])
-			continue;
-		if (adreno_dev->gpmu_throttle_counters[i])
-			continue;
+	if (ret)
+		dev_err_once(&gmu->pdev->dev,
+			"Could not get all the throttling counters for LM\n");
 
-		ret = adreno_perfcounter_get(adreno_dev,
-				KGSL_PERFCOUNTER_GROUP_GPMU_PWR,
-				a640_throttling_counters[i],
-				&adreno_dev->gpmu_throttle_counters[i],
-				NULL,
-				PERFCOUNTER_FLAG_KERNEL);
-		if (ret)
-			dev_err_once(&gmu->pdev->dev,
-				"Unable to get counter for LM: GPMU_PWR %d\n",
-				a640_throttling_counters[i]);
-	}
 }
 
-#define LIMITS_CONFIG(t, s, c, i, a) ( \
-		(t & 0xF) | \
-		((s & 0xF) << 4) | \
-		((c & 0xF) << 8) | \
-		((i & 0xF) << 12) | \
-		((a & 0xF) << 16))
-
 void a6xx_gmu_enable_lm(struct kgsl_device *device)
 {
-	int result;
-	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct device *dev = &gmu->pdev->dev;
-	struct hfi_lmconfig_cmd cmd;
+
+	memset(adreno_dev->busy_data.throttle_cycles, 0,
+		sizeof(adreno_dev->busy_data.throttle_cycles));
 
 	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
 			!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
 		return;
 
-	/* a640 only needs to set up throttling counters for DCVS */
-	if (adreno_is_a640(adreno_dev)) {
-		_setup_throttling_counters(adreno_dev);
-		return;
-	}
+	_setup_throttling_counters(adreno_dev);
 
-	gmu_core_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD,
-		GPU_LIMIT_THRESHOLD_ENABLE | lm_limit(adreno_dev));
 	gmu_core_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
-	gmu_core_regwrite(device, A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL, 0x1);
-
-	gmu->lm_config = LIMITS_CONFIG(1, 1, 1, 0, 0);
-	gmu->bcl_config = 0;
-	gmu->lm_dcvs_level = 0;
-
-	cmd.limit_conf = gmu->lm_config;
-	cmd.bcl_conf = gmu->bcl_config;
-	cmd.lm_enable_bitmask = 0;
-
-	if (gmu->lm_dcvs_level <= MAX_GX_LEVELS)
-		cmd.lm_enable_bitmask =
-			(1 << (gmu->lm_dcvs_level + 1)) - 1;
-
-	result = hfi_send_req(gmu, H2F_MSG_LM_CFG, &cmd);
-	if (result)
-		dev_err(dev, "Failure enabling limits management:%d\n", result);
 }
 
 static int a6xx_gmu_ifpc_store(struct kgsl_device *device,
@@ -1452,8 +1417,7 @@ static int a6xx_gmu_ifpc_store(struct kgsl_device *device,
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
 	unsigned int requested_idle_level;
 
-	if (!gmu_core_isenabled(device) ||
-			!ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
+	if (!ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
 		return -EINVAL;
 
 	if ((val && gmu->idle_level >= GPU_HW_IFPC) ||
@@ -1485,7 +1449,7 @@ static unsigned int a6xx_gmu_ifpc_show(struct kgsl_device *device)
 {
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
 
-	return gmu_core_isenabled(device) && gmu->idle_level  >= GPU_HW_IFPC;
+	return gmu->idle_level >= GPU_HW_IFPC;
 }
 
 struct gmu_mem_type_desc {
@@ -1674,9 +1638,6 @@ static void a6xx_gmu_snapshot(struct kgsl_device *device,
 {
 	unsigned int val;
 
-	if (!gmu_core_isenabled(device))
-		return;
-
 	a6xx_gmu_snapshot_versions(device, snapshot);
 
 	a6xx_gmu_snapshot_memories(device, snapshot);
@@ -1709,9 +1670,6 @@ static int a6xx_gmu_wait_for_active_transition(
 	unsigned int reg, num_retries;
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
 
-	if (!gmu_core_isenabled(device))
-		return 0;
-
 	gmu_core_regread(device,
 		A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
 
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index 68a1e34b..3f628fb 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -1,14 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include "adreno.h"
 #include "adreno_a6xx.h"
-#include "a6xx_reg.h"
-#include "adreno_trace.h"
 #include "adreno_pm4types.h"
-#include "kgsl_gmu_core.h"
+#include "adreno_trace.h"
 
 #define PREEMPT_RECORD(_field) \
 		offsetof(struct a6xx_cp_preemption_record, _field)
@@ -673,9 +671,9 @@ static void a6xx_preemption_iommu_close(struct adreno_device *adreno_dev)
 }
 #endif
 
-static void a6xx_preemption_close(struct kgsl_device *device)
+void a6xx_preemption_close(struct adreno_device *adreno_dev)
 {
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_preemption *preempt = &adreno_dev->preempt;
 	struct adreno_ringbuffer *rb;
 	unsigned int i;
@@ -733,7 +731,7 @@ int a6xx_preemption_init(struct adreno_device *adreno_dev)
 
 err:
 	if (ret)
-		a6xx_preemption_close(device);
+		a6xx_preemption_close(adreno_dev);
 
 	return ret;
 }
diff --git a/drivers/gpu/msm/adreno_a6xx_rgmu.c b/drivers/gpu/msm/adreno_a6xx_rgmu.c
index 00f655b..5919f87 100644
--- a/drivers/gpu/msm/adreno_a6xx_rgmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_rgmu.c
@@ -1,20 +1,16 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
-#include <linux/firmware.h>
-#include <linux/jiffies.h>
-#include <linux/interrupt.h>
 
-#include "kgsl_gmu_core.h"
-#include "kgsl_rgmu.h"
-#include "kgsl_trace.h"
+#include <linux/firmware.h>
+#include <linux/regulator/consumer.h>
 
 #include "adreno.h"
-#include "a6xx_reg.h"
 #include "adreno_a6xx.h"
-#include "adreno_trace.h"
 #include "adreno_snapshot.h"
+#include "kgsl_rgmu.h"
+#include "kgsl_trace.h"
 
 /* RGMU timeouts */
 #define RGMU_IDLE_TIMEOUT		100	/* ms */
@@ -103,9 +99,6 @@ static int a6xx_rgmu_oob_set(struct kgsl_device *device,
 	struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device);
 	int ret, set, check;
 
-	if (!gmu_core_isenabled(device))
-		return 0;
-
 	set = BIT(req + 16);
 	check = BIT(req + 16);
 
@@ -140,9 +133,6 @@ static int a6xx_rgmu_oob_set(struct kgsl_device *device,
 static inline void a6xx_rgmu_oob_clear(struct kgsl_device *device,
 		enum oob_request req)
 {
-	if (!gmu_core_isenabled(device))
-		return;
-
 	gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, BIT(req + 24));
 	trace_kgsl_gmu_oob_clear(BIT(req + 24));
 }
@@ -205,8 +195,7 @@ static int a6xx_rgmu_ifpc_store(struct kgsl_device *device,
 	struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device);
 	unsigned int requested_idle_level;
 
-	if (!gmu_core_isenabled(device) ||
-		!ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
+	if (!ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
 		return -EINVAL;
 
 	if (val)
@@ -233,7 +222,7 @@ static unsigned int a6xx_rgmu_ifpc_show(struct kgsl_device *device)
 {
 	struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device);
 
-	return gmu_core_isenabled(device) && rgmu->idle_level == GPU_HW_IFPC;
+	return rgmu->idle_level == GPU_HW_IFPC;
 }
 
 
@@ -267,8 +256,7 @@ static int a6xx_rgmu_wait_for_lowest_idle(struct kgsl_device *device)
 	unsigned long t;
 	uint64_t ts1, ts2, ts3;
 
-	if (!gmu_core_isenabled(device) ||
-			rgmu->idle_level != GPU_HW_IFPC)
+	if (rgmu->idle_level != GPU_HW_IFPC)
 		return 0;
 
 	ts1 = read_AO_counter(device);
@@ -466,9 +454,6 @@ static int a6xx_rgmu_gpu_pwrctrl(struct kgsl_device *device,
 {
 	int ret = 0;
 
-	if (!gmu_core_isenabled(device))
-		return 0;
-
 	switch (mode) {
 	case GMU_FW_START:
 		ret = a6xx_rgmu_fw_start(device, arg1);
@@ -501,22 +486,19 @@ static int a6xx_rgmu_gpu_pwrctrl(struct kgsl_device *device,
 static int a6xx_rgmu_load_firmware(struct kgsl_device *device)
 {
 	const struct firmware *fw = NULL;
-	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device);
-	const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
+	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
 	int ret;
 
-	if (!gmu_core_isenabled(device))
-		return 0;
-
 	/* RGMU fw already saved and verified so do nothing new */
 	if (rgmu->fw_hostptr)
 		return 0;
 
-	ret = request_firmware(&fw, gpucore->gpmufw_name, device->dev);
+	ret = request_firmware(&fw, a6xx_core->gmufw_name, device->dev);
 	if (ret < 0) {
 		pr_err("request_firmware (%s) failed: %d\n",
-				gpucore->gpmufw_name, ret);
+				a6xx_core->gmufw_name, ret);
 		return ret;
 	}
 
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 6ca841a..dcd01b2 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -3,14 +3,9 @@
  * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/io.h>
-#include "kgsl.h"
 #include "adreno.h"
-#include "kgsl_snapshot.h"
-#include "adreno_snapshot.h"
-#include "a6xx_reg.h"
 #include "adreno_a6xx.h"
-#include "kgsl_gmu_core.h"
+#include "adreno_snapshot.h"
 
 #define A6XX_NUM_CTXTS 2
 #define A6XX_NUM_AXI_ARB_BLOCKS 2
@@ -585,7 +580,7 @@ static struct a6xx_shader_block a6xx_shader_blocks[] = {
 	{A6XX_SP_LB_3_DATA,               0x800},
 	{A6XX_SP_LB_4_DATA,               0x800},
 	{A6XX_SP_LB_5_DATA,               0x200},
-	{A6XX_SP_CB_BINDLESS_DATA,        0x2000},
+	{A6XX_SP_CB_BINDLESS_DATA,        0x800},
 	{A6XX_SP_CB_LEGACY_DATA,          0x280,},
 	{A6XX_SP_UAV_DATA,                0x80,},
 	{A6XX_SP_INST_TAG,                0x80,},
@@ -708,6 +703,49 @@ static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
 	return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
 }
 
+static size_t a6xx_legacy_snapshot_shader(struct kgsl_device *device,
+				u8 *buf, size_t remain, void *priv)
+{
+	struct kgsl_snapshot_shader *header =
+		(struct kgsl_snapshot_shader *) buf;
+	struct a6xx_shader_block_info *info =
+		(struct a6xx_shader_block_info *) priv;
+	struct a6xx_shader_block *block = info->block;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	unsigned int read_sel, val;
+	int i;
+
+	if (!device->snapshot_legacy)
+		return 0;
+
+	if (remain < SHADER_SECTION_SZ(block->sz)) {
+		SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
+		return 0;
+	}
+
+	header->type = block->statetype;
+	header->index = info->bank;
+	header->size = block->sz;
+
+	read_sel = (block->statetype << A6XX_SHADER_STATETYPE_SHIFT) |
+		info->bank;
+	kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
+
+	/*
+	 * An explicit barrier is needed so that reads do not happen before
+	 * the register write.
+	 */
+	mb();
+
+	for (i = 0; i < block->sz; i++) {
+		kgsl_regread(device, (A6XX_HLSQ_DBG_AHB_READ_APERTURE + i),
+			&val);
+		*data++ = val;
+	}
+
+	return SHADER_SECTION_SZ(block->sz);
+}
+
 static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
 		u8 *buf, size_t remain, void *priv)
 {
@@ -718,6 +756,9 @@ static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
 	struct a6xx_shader_block *block = info->block;
 	unsigned int *data = (unsigned int *) (buf + sizeof(*header));
 
+	if (!crash_dump_valid)
+		return a6xx_legacy_snapshot_shader(device, buf, remain, priv);
+
 	if (remain < SHADER_SECTION_SZ(block->sz)) {
 		SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
 		return 0;
@@ -739,10 +780,6 @@ static void a6xx_snapshot_shader(struct kgsl_device *device,
 	unsigned int i, j;
 	struct a6xx_shader_block_info info;
 
-	/* Shader blocks can only be read by the crash dumper */
-	if (!crash_dump_valid)
-		return;
-
 	for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
 		for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
 			info.block = &a6xx_shader_blocks[i];
diff --git a/drivers/gpu/msm/adreno_compat.c b/drivers/gpu/msm/adreno_compat.c
index 48b7077..d0be270 100644
--- a/drivers/gpu/msm/adreno_compat.c
+++ b/drivers/gpu/msm/adreno_compat.c
@@ -2,14 +2,10 @@
 /*
  * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
-#include <linux/uaccess.h>
-#include <linux/ioctl.h>
-
-#include "kgsl.h"
-#include "kgsl_compat.h"
 
 #include "adreno.h"
 #include "adreno_compat.h"
+#include "kgsl_compat.h"
 
 int adreno_getproperty_compat(struct kgsl_device *device,
 		struct kgsl_device_getproperty *param)
diff --git a/drivers/gpu/msm/adreno_compat.h b/drivers/gpu/msm/adreno_compat.h
index eb6e2ea6..ba4f00f 100644
--- a/drivers/gpu/msm/adreno_compat.h
+++ b/drivers/gpu/msm/adreno_compat.h
@@ -6,9 +6,9 @@
 #define __ADRENO_COMPAT_H
 
 #ifdef CONFIG_COMPAT
-#include <linux/compat.h>
-#include "kgsl.h"
-#include "kgsl_device.h"
+
+struct kgsl_device;
+struct kgsl_device_private;
 
 int adreno_getproperty_compat(struct kgsl_device *device,
 		struct kgsl_device_getproperty *param);
diff --git a/drivers/gpu/msm/adreno_coresight.c b/drivers/gpu/msm/adreno_coresight.c
index dbad93d..5a4d935 100644
--- a/drivers/gpu/msm/adreno_coresight.c
+++ b/drivers/gpu/msm/adreno_coresight.c
@@ -3,10 +3,11 @@
  * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/platform_device.h>
 #include <linux/coresight.h>
+#include <linux/of.h>
 
 #include "adreno.h"
+
 #define TO_ADRENO_CORESIGHT_ATTR(_attr) \
 	container_of(_attr, struct adreno_coresight_attr, attr)
 
diff --git a/drivers/gpu/msm/adreno_cp_parser.c b/drivers/gpu/msm/adreno_cp_parser.c
index f7951c1..d47bca3 100644
--- a/drivers/gpu/msm/adreno_cp_parser.c
+++ b/drivers/gpu/msm/adreno_cp_parser.c
@@ -3,14 +3,12 @@
  * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
 
-#include "kgsl.h"
-#include "kgsl_sharedmem.h"
-#include "kgsl_snapshot.h"
+#include <linux/slab.h>
 
 #include "adreno.h"
-#include "adreno_pm4types.h"
-#include "a3xx_reg.h"
 #include "adreno_cp_parser.h"
+#include "adreno_pm4types.h"
+#include "adreno_snapshot.h"
 
 #define MAX_IB_OBJS 1000
 #define NUM_SET_DRAW_GROUPS 32
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 32ae641..bc32438 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -1,17 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2002,2008-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002,2008-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/export.h>
-#include <linux/delay.h>
 #include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
+#include <linux/msm_kgsl.h>
 
-#include "kgsl.h"
 #include "adreno.h"
-#include "kgsl_sync.h"
 
 static int _isdb_set(void *data, u64 val)
 {
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index a7da945..43b39cf 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1,20 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/wait.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/jiffies.h>
-#include <linux/err.h>
+#include <linux/slab.h>
 
-#include "kgsl.h"
-#include "kgsl_sharedmem.h"
 #include "adreno.h"
-#include "adreno_ringbuffer.h"
 #include "adreno_trace.h"
-#include "kgsl_sharedmem.h"
 
 #define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
 
@@ -632,6 +624,17 @@ static int sendcmd(struct adreno_device *adreno_dev,
 	secs = time.ktime;
 	nsecs = do_div(secs, 1000000000);
 
+	/*
+	 * For the first submission in any given command queue update the
+	 * expected expire time - this won't actually be used / updated until
+	 * the command queue in question goes current, but universally setting
+	 * it here avoids the possibilty of some race conditions with preempt
+	 */
+
+	if (dispatch_q->inflight == 1)
+		dispatch_q->expires = jiffies +
+			msecs_to_jiffies(adreno_drawobj_timeout);
+
 	trace_adreno_cmdbatch_submitted(drawobj, (int) dispatcher->inflight,
 		time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb,
 		adreno_get_rptr(drawctxt->rb));
@@ -645,17 +648,6 @@ static int sendcmd(struct adreno_device *adreno_dev,
 		ADRENO_DISPATCH_DRAWQUEUE_SIZE;
 
 	/*
-	 * For the first submission in any given command queue update the
-	 * expected expire time - this won't actually be used / updated until
-	 * the command queue in question goes current, but universally setting
-	 * it here avoids the possibilty of some race conditions with preempt
-	 */
-
-	if (dispatch_q->inflight == 1)
-		dispatch_q->expires = jiffies +
-			msecs_to_jiffies(adreno_drawobj_timeout);
-
-	/*
 	 * If we believe ourselves to be current and preemption isn't a thing,
 	 * then set up the timer.  If this misses, then preemption is indeed a
 	 * thing and the timer will be set up in due time
@@ -1686,6 +1678,9 @@ static void adreno_fault_header(struct kgsl_device *device,
 		struct adreno_context *drawctxt =
 			ADRENO_CONTEXT(drawobj->context);
 
+		drawctxt->base.total_fault_count++;
+		drawctxt->base.last_faulted_cmd_ts = drawobj->timestamp;
+
 		trace_adreno_gpu_fault(drawobj->context->id,
 			drawobj->timestamp,
 			status, rptr, wptr, ib1base, ib1sz,
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index de2e538..fca4a15 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -1,11 +1,14 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef ____ADRENO_DISPATCHER_H
 #define ____ADRENO_DISPATCHER_H
 
+#include <linux/kobject.h>
+#include <linux/kthread.h>
+
 extern unsigned int adreno_drawobj_timeout;
 
 /*
@@ -68,6 +71,12 @@ enum adreno_dispatcher_flags {
 	ADRENO_DISPATCHER_ACTIVE = 1,
 };
 
+struct adreno_device;
+struct adreno_context;
+struct kgsl_context;
+struct kgsl_device;
+struct kgsl_device_private;
+
 void adreno_dispatcher_start(struct kgsl_device *device);
 void adreno_dispatcher_halt(struct kgsl_device *device);
 void adreno_dispatcher_unhalt(struct kgsl_device *device);
@@ -78,6 +87,8 @@ void adreno_dispatcher_irq_fault(struct adreno_device *adreno_dev);
 void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
 void adreno_dispatcher_stop_fault_timer(struct kgsl_device *device);
 
+struct kgsl_drawobj;
+
 int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
 		struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
 		uint32_t count, uint32_t *timestamp);
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 40395bf..1132a6a 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -3,14 +3,10 @@
  * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/slab.h>
-#include <linux/msm_kgsl.h>
-#include <linux/sched.h>
 #include <linux/debugfs.h>
 
-#include "kgsl.h"
-#include "kgsl_sharedmem.h"
 #include "adreno.h"
+#include "adreno_iommu.h"
 #include "adreno_trace.h"
 
 static void wait_callback(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 50df872..436c5a2 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -1,10 +1,14 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef __ADRENO_DRAWCTXT_H
 #define __ADRENO_DRAWCTXT_H
 
+#include <linux/types.h>
+
+#include "kgsl_device.h"
+
 struct adreno_context_type {
 	unsigned int type;
 	const char *str;
@@ -16,7 +20,6 @@ struct adreno_context_type {
 struct kgsl_device;
 struct adreno_device;
 struct kgsl_device_private;
-struct kgsl_context;
 
 /**
  * struct adreno_context - Adreno GPU draw context
diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c
index 116f0f9..1264244 100644
--- a/drivers/gpu/msm/adreno_ioctl.c
+++ b/drivers/gpu/msm/adreno_ioctl.c
@@ -1,10 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/ioctl.h>
-#include "kgsl_device.h"
+#include <linux/slab.h>
+
 #include "adreno.h"
 #include "adreno_a5xx.h"
 
diff --git a/drivers/gpu/msm/adreno_iommu.c b/drivers/gpu/msm/adreno_iommu.c
index 523de9b..6463881 100644
--- a/drivers/gpu/msm/adreno_iommu.c
+++ b/drivers/gpu/msm/adreno_iommu.c
@@ -2,9 +2,12 @@
 /*
  * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
-#include "adreno.h"
-#include "kgsl_sharedmem.h"
+
+#include <linux/slab.h>
+
 #include "a3xx_reg.h"
+#include "adreno.h"
+#include "adreno_iommu.h"
 #include "adreno_pm4types.h"
 
 /*
diff --git a/drivers/gpu/msm/adreno_llc.h b/drivers/gpu/msm/adreno_llc.h
index 5c8279a..cac4d17 100644
--- a/drivers/gpu/msm/adreno_llc.h
+++ b/drivers/gpu/msm/adreno_llc.h
@@ -1,13 +1,14 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef __ADRENO_LLC_H
 #define __ADRENO_LLC_H
 
-#include "adreno.h"
 #include <linux/soc/qcom/llcc-qcom.h>
 
+#include "adreno.h"
+
 #ifdef CONFIG_QCOM_LLCC
 
 static inline bool adreno_llc_supported(void)
@@ -72,7 +73,7 @@ static inline bool adreno_llc_supported(void)
 
 static inline void *adreno_llc_getd(u32 uid)
 {
-	return NULL;
+	return ERR_PTR(-ENODEV);
 }
 
 static inline void adreno_llc_putd(void *desc)
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 9dab3b6..7842dd9 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -2,14 +2,13 @@
 /*
  * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
-#include <linux/module.h>
-#include <linux/uaccess.h>
 
-#include "kgsl.h"
+#include <linux/slab.h>
+
+#include "a5xx_reg.h"
 #include "adreno.h"
 #include "adreno_perfcounter.h"
 #include "adreno_pm4types.h"
-#include "a5xx_reg.h"
 
 /* Bit flag for RBMM_PERFCTR_CTL */
 #define RBBM_PERFCTR_CTL_ENABLE		0x00000001
diff --git a/drivers/gpu/msm/adreno_perfcounter.h b/drivers/gpu/msm/adreno_perfcounter.h
index 2fa556e..273c5aa 100644
--- a/drivers/gpu/msm/adreno_perfcounter.h
+++ b/drivers/gpu/msm/adreno_perfcounter.h
@@ -1,12 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2008-2015, 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2015,2017,2019 The Linux Foundation. All rights reserved.
  */
 #ifndef __ADRENO_PERFCOUNTER_H
 #define __ADRENO_PERFCOUNTER_H
 
-#include "adreno.h"
-
 struct adreno_device;
 
 /* ADRENO_PERFCOUNTERS - Given an adreno device, return the perfcounters list */
diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c
index ccb20c0..9e3740d 100644
--- a/drivers/gpu/msm/adreno_profile.c
+++ b/drivers/gpu/msm/adreno_profile.c
@@ -1,20 +1,14 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
-#include <linux/fs.h>
-#include <linux/kernel.h>
+
 #include <linux/ctype.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
 #include <linux/debugfs.h>
 #include <linux/sched/signal.h>
 
 #include "adreno.h"
 #include "adreno_profile.h"
-#include "kgsl_sharedmem.h"
 #include "adreno_pm4types.h"
 
 #define ASSIGNS_STR_FORMAT "%.8s:%u "
@@ -529,7 +523,7 @@ static ssize_t profile_assignments_read(struct file *filep,
 		return 0;
 	}
 
-	buf = kmalloc(max_size, GFP_KERNEL);
+	buf = kzalloc(max_size, GFP_KERNEL);
 	if (!buf) {
 		mutex_unlock(&device->mutex);
 		return -ENOMEM;
@@ -547,7 +541,7 @@ static ssize_t profile_assignments_read(struct file *filep,
 	}
 
 	size = simple_read_from_buffer(ubuf, max, ppos, buf,
-			strlen(buf));
+			pos - buf);
 
 	kfree(buf);
 
diff --git a/drivers/gpu/msm/adreno_profile.h b/drivers/gpu/msm/adreno_profile.h
index 92340e6..8994a43 100644
--- a/drivers/gpu/msm/adreno_profile.h
+++ b/drivers/gpu/msm/adreno_profile.h
@@ -1,10 +1,9 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014,2019 The Linux Foundation. All rights reserved.
  */
 #ifndef __ADRENO_PROFILE_H
 #define __ADRENO_PROFILE_H
-#include <linux/seq_file.h>
 
 /**
  * struct adreno_profile_assigns_list: linked list for assigned perf counters
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index d0ad7f0..3b49324 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -2,26 +2,16 @@
 /*
  * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
-#include <linux/slab.h>
-#include <linux/sched.h>
+
 #include <linux/sched/clock.h>
-#include <linux/log2.h>
-#include <linux/time.h>
-#include <linux/delay.h>
+#include <linux/slab.h>
 
-#include "kgsl.h"
-#include "kgsl_sharedmem.h"
-#include "kgsl_trace.h"
-#include "kgsl_pwrctrl.h"
-
+#include "a3xx_reg.h"
 #include "adreno.h"
-#include "adreno_iommu.h"
 #include "adreno_pm4types.h"
 #include "adreno_ringbuffer.h"
 #include "adreno_trace.h"
-
-#include "a3xx_reg.h"
-#include "adreno_a5xx.h"
+#include "kgsl_trace.h"
 
 #define RB_HOSTPTR(_rb, _pos) \
 	((unsigned int *) ((_rb)->buffer_desc.hostptr + \
@@ -326,6 +316,16 @@ int adreno_ringbuffer_probe(struct adreno_device *adreno_dev)
 			break;
 	}
 
+	if (!status && ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION)) {
+		int r = 0;
+
+		if (gpudev->preemption_init)
+			r = gpudev->preemption_init(adreno_dev);
+
+		if (!WARN(r, "adreno: GPU preemption is disabled\n"))
+			set_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
+	}
+
 	if (status)
 		adreno_ringbuffer_close(adreno_dev);
 	else
@@ -340,7 +340,6 @@ static void _adreno_ringbuffer_close(struct adreno_device *adreno_dev,
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 
 	kgsl_free_global(device, &rb->pagetable_desc);
-	kgsl_free_global(device, &rb->preemption_desc);
 
 	kgsl_free_global(device, &rb->buffer_desc);
 	kgsl_del_event_group(&rb->events);
@@ -350,6 +349,7 @@ static void _adreno_ringbuffer_close(struct adreno_device *adreno_dev,
 void adreno_ringbuffer_close(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	struct adreno_ringbuffer *rb;
 	int i;
 
@@ -358,6 +358,10 @@ void adreno_ringbuffer_close(struct adreno_device *adreno_dev)
 
 	FOR_EACH_RINGBUFFER(adreno_dev, rb, i)
 		_adreno_ringbuffer_close(adreno_dev, rb);
+
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION))
+		if (gpudev->preemption_close)
+			gpudev->preemption_close(adreno_dev);
 }
 
 /*
@@ -396,6 +400,12 @@ static inline int cp_mem_write(struct adreno_device *adreno_dev,
 	return dwords;
 }
 
+static bool _check_secured(struct adreno_context *drawctxt, unsigned int flags)
+{
+	return ((drawctxt->base.flags & KGSL_CONTEXT_SECURE) &&
+		!is_internal_cmds(flags));
+}
+
 static int
 adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 				unsigned int flags, unsigned int *cmds,
@@ -441,8 +451,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 	 */
 	if (drawctxt) {
 		drawctxt->internal_timestamp = rb->timestamp;
-		if (drawctxt->base.flags & KGSL_CONTEXT_SECURE)
-			secured_ctxt = true;
+		secured_ctxt = _check_secured(drawctxt, flags);
 	}
 
 	/*
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 865edea..32186c1 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -5,10 +5,6 @@
 #ifndef __ADRENO_RINGBUFFER_H
 #define __ADRENO_RINGBUFFER_H
 
-#include "kgsl_iommu.h"
-#include "adreno_iommu.h"
-#include "adreno_dispatch.h"
-
 /* Given a ringbuffer, return the adreno device that owns it */
 
 #define _RB_OFFSET(_id) (offsetof(struct adreno_device, ringbuffers) + \
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index 0ba5606..8289f52 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -1,18 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
-#include "kgsl.h"
-#include "kgsl_sharedmem.h"
-#include "kgsl_snapshot.h"
-
 #include "adreno.h"
-#include "adreno_pm4types.h"
-#include "a3xx_reg.h"
 #include "adreno_cp_parser.h"
+#include "adreno_pm4types.h"
 #include "adreno_snapshot.h"
-#include "adreno_a5xx.h"
 
 #define VPC_MEMORY_BANKS 4
 
@@ -64,6 +58,19 @@ void kgsl_snapshot_push_object(struct kgsl_device *device,
 	for (index = 0; index < objbufptr; index++) {
 		if (objbuf[index].gpuaddr == gpuaddr &&
 			objbuf[index].entry->priv == process) {
+			/*
+			 * Check if newly requested size is within the
+			 * allocated range or not, otherwise continue
+			 * with previous size.
+			 */
+			if (!kgsl_gpuaddr_in_memdesc(
+				&objbuf[index].entry->memdesc,
+				gpuaddr, dwords << 2)) {
+				dev_err(device->dev,
+					"snapshot: gpuaddr 0x%016llX size is less than requested\n",
+					gpuaddr);
+				return;
+			}
 
 			objbuf[index].size = max_t(uint64_t,
 						objbuf[index].size,
diff --git a/drivers/gpu/msm/adreno_sysfs.c b/drivers/gpu/msm/adreno_sysfs.c
index 5943008..0e6f1f8 100644
--- a/drivers/gpu/msm/adreno_sysfs.c
+++ b/drivers/gpu/msm/adreno_sysfs.c
@@ -4,9 +4,7 @@
  */
 
 #include <linux/sysfs.h>
-#include <linux/device.h>
 
-#include "kgsl_device.h"
 #include "adreno.h"
 
 struct adreno_sysfs_attribute {
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 6423495..6f9209f4 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2,42 +2,29 @@
 /*
  * Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
  */
-#include <linux/module.h>
-#include <linux/fb.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/fdtable.h>
-#include <linux/list.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/dma-buf.h>
-#include <linux/pm_runtime.h>
-#include <linux/rbtree.h>
-#include <linux/major.h>
-#include <linux/io.h>
-#include <linux/mman.h>
-#include <linux/sort.h>
-#include <linux/security.h>
-#include <linux/compat.h>
-#include <linux/ctype.h>
-#include <linux/mm.h>
-#include <linux/ion.h>
-#include <asm/cacheflush.h>
-#include <uapi/linux/sched/types.h>
-#include <linux/of_fdt.h>
-#include <linux/msm-bus.h>
 
-#include "kgsl.h"
-#include "kgsl_debugfs.h"
-#include "kgsl_sharedmem.h"
-#include "kgsl_drawobj.h"
-#include "kgsl_device.h"
-#include "kgsl_trace.h"
-#include "kgsl_sync.h"
+#include <uapi/linux/sched/types.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/fdtable.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/pm_runtime.h>
+#include <linux/security.h>
+#include <linux/sort.h>
+
 #include "kgsl_compat.h"
-#include "kgsl_pool.h"
+#include "kgsl_debugfs.h"
+#include "kgsl_device.h"
+#include "kgsl_mmu.h"
+#include "kgsl_sync.h"
+#include "kgsl_trace.h"
 
 #ifndef arch_mmap_check
 #define arch_mmap_check(addr, len, flags)	(0)
@@ -344,6 +331,10 @@ kgsl_mem_entry_destroy(struct kref *kref)
 	/* pull out the memtype before the flags get cleared */
 	memtype = kgsl_memdesc_usermem_type(&entry->memdesc);
 
+	if (!(entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_VIRT))
+		kgsl_process_sub_stats(entry->priv, memtype,
+			entry->memdesc.size);
+
 	/* Detach from process list */
 	kgsl_mem_entry_detach_process(entry);
 
@@ -502,7 +493,6 @@ static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
 	entry->id = 0;
 
 	type = kgsl_memdesc_usermem_type(&entry->memdesc);
-	entry->priv->stats[type].cur -= entry->memdesc.size;
 
 	if (type != KGSL_MEM_ENTRY_ION)
 		entry->priv->gpumem_mapped -= entry->memdesc.mapsize;
@@ -1409,6 +1399,81 @@ static inline bool kgsl_mem_entry_set_pend(struct kgsl_mem_entry *entry)
 	return ret;
 }
 
+static int kgsl_get_ctxt_fault_stats(struct kgsl_context *context,
+		struct kgsl_context_property *ctxt_property)
+{
+	struct kgsl_context_property_fault fault_stats;
+	size_t copy;
+
+	/* Return the size of the subtype struct */
+	if (ctxt_property->size == 0) {
+		ctxt_property->size = sizeof(fault_stats);
+		return 0;
+	}
+
+	memset(&fault_stats, 0, sizeof(fault_stats));
+
+	copy = min_t(size_t, ctxt_property->size, sizeof(fault_stats));
+
+	fault_stats.faults = context->total_fault_count;
+	fault_stats.timestamp = context->last_faulted_cmd_ts;
+
+	/*
+	 * Copy the context fault stats to data which also serves as
+	 * the out parameter.
+	 */
+	if (copy_to_user(u64_to_user_ptr(ctxt_property->data),
+				&fault_stats, copy))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long kgsl_get_ctxt_properties(struct kgsl_device_private *dev_priv,
+		struct kgsl_device_getproperty *param)
+{
+	/* Return fault stats of given context */
+	struct kgsl_context_property ctxt_property;
+	struct kgsl_context *context;
+	size_t copy;
+	long ret;
+
+	/*
+	 * If sizebytes is zero, tell the user how big the
+	 * ctxt_property struct should be.
+	 */
+	if (param->sizebytes == 0) {
+		param->sizebytes = sizeof(ctxt_property);
+		return 0;
+	}
+
+	memset(&ctxt_property, 0, sizeof(ctxt_property));
+
+	copy = min_t(size_t, param->sizebytes, sizeof(ctxt_property));
+
+	/* We expect the value passed in to contain the context id */
+	if (copy_from_user(&ctxt_property, param->value, copy))
+		return -EFAULT;
+
+	/* ctxt type zero is not valid, as we consider it as uninitialized. */
+	if (ctxt_property.type == 0)
+		return -EINVAL;
+
+	context = kgsl_context_get_owner(dev_priv,
+			ctxt_property.contextid);
+	if (!context)
+		return -EINVAL;
+
+	if (ctxt_property.type == KGSL_CONTEXT_PROP_FAULTS)
+		ret = kgsl_get_ctxt_fault_stats(context, &ctxt_property);
+	else
+		ret = -EOPNOTSUPP;
+
+	kgsl_context_put(context);
+
+	return ret;
+}
+
 static long kgsl_prop_version(struct kgsl_device_private *dev_priv,
 		struct kgsl_device_getproperty *param)
 {
@@ -1500,6 +1565,106 @@ static long kgsl_prop_secure_ctxt_support(struct kgsl_device_private *dev_priv,
 	return 0;
 }
 
+static int kgsl_query_caps_properties(struct kgsl_device *device,
+		struct kgsl_capabilities *caps)
+{
+	struct kgsl_capabilities_properties props;
+	size_t copy;
+	u32 count, *local;
+	int ret;
+
+	/* Return the size of the subtype struct */
+	if (caps->size == 0) {
+		caps->size = sizeof(props);
+		return 0;
+	}
+
+	memset(&props, 0, sizeof(props));
+
+	copy = min_t(size_t, caps->size, sizeof(props));
+
+	if (copy_from_user(&props, u64_to_user_ptr(caps->data), copy))
+		return -EFAULT;
+
+	/* Get the number of properties */
+	count = kgsl_query_property_list(device, NULL, 0);
+
+	/*
+	 * If the incoming user count is zero, they are querying the number of
+	 * available properties. Set it and return.
+	 */
+	if (props.count == 0) {
+		props.count = count;
+		goto done;
+	}
+
+	/* Copy the lesser of the user or kernel property count */
+	if (props.count < count)
+		count = props.count;
+
+	/* Create a local buffer to store the property list */
+	local = kcalloc(count, sizeof(u32), GFP_KERNEL);
+	if (!local)
+		return -ENOMEM;
+
+	/* Get the properties */
+	props.count = kgsl_query_property_list(device, local, count);
+
+	ret = copy_to_user(u64_to_user_ptr(props.list), local,
+		props.count * sizeof(u32));
+
+	kfree(local);
+
+	if (ret)
+		return -EFAULT;
+
+done:
+	if (copy_to_user(u64_to_user_ptr(caps->data), &props, copy))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long kgsl_prop_query_capabilities(struct kgsl_device_private *dev_priv,
+		struct kgsl_device_getproperty *param)
+{
+	struct kgsl_capabilities caps;
+	long ret;
+	size_t copy;
+
+	/*
+	 * If sizebytes is zero, tell the user how big the capabilities struct
+	 * should be
+	 */
+	if (param->sizebytes == 0) {
+		param->sizebytes = sizeof(caps);
+		return 0;
+	}
+
+	memset(&caps, 0, sizeof(caps));
+
+	copy = min_t(size_t, param->sizebytes, sizeof(caps));
+
+	if (copy_from_user(&caps, param->value, copy))
+		return -EFAULT;
+
+	/* querytype must be non zero */
+	if (caps.querytype == 0)
+		return -EINVAL;
+
+	if (caps.querytype == KGSL_QUERY_CAPS_PROPERTIES)
+		ret = kgsl_query_caps_properties(dev_priv->device, &caps);
+	else {
+		/* Unsupported querytypes should return a unique return value */
+		return -EOPNOTSUPP;
+	}
+
+	if (copy_to_user(param->value, &caps, copy))
+		return -EFAULT;
+
+	return ret;
+}
+
 static const struct {
 	int type;
 	long (*func)(struct kgsl_device_private *dev_priv,
@@ -1509,6 +1674,8 @@ static const struct {
 	{ KGSL_PROP_GPU_RESET_STAT, kgsl_prop_gpu_reset_stat},
 	{ KGSL_PROP_SECURE_BUFFER_ALIGNMENT, kgsl_prop_secure_buf_alignment },
 	{ KGSL_PROP_SECURE_CTXT_SUPPORT, kgsl_prop_secure_ctxt_support },
+	{ KGSL_PROP_QUERY_CAPABILITIES, kgsl_prop_query_capabilities },
+	{ KGSL_PROP_CONTEXT_PROPERTY, kgsl_get_ctxt_properties },
 };
 
 /*call all ioctl sub functions with driver locked*/
@@ -1530,6 +1697,30 @@ long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
 	return device->ftbl->getproperty(device, param);
 }
 
+int kgsl_query_property_list(struct kgsl_device *device, u32 *list, u32 count)
+{
+	int num = 0;
+
+	if (!list) {
+		num = ARRAY_SIZE(kgsl_property_funcs);
+
+		if (device->ftbl->query_property_list)
+			num += device->ftbl->query_property_list(device, list,
+				count);
+
+		return num;
+	}
+
+	for (; num < count && num < ARRAY_SIZE(kgsl_property_funcs); num++)
+		list[num] = kgsl_property_funcs[num].type;
+
+	if (device->ftbl->query_property_list)
+		num += device->ftbl->query_property_list(device, &list[num],
+			count - num);
+
+	return num;
+}
+
 long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
 					  unsigned int cmd, void *data)
 {
@@ -2219,11 +2410,11 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile)
 	if (sglen == 0 || sglen >= LONG_MAX)
 		return -EINVAL;
 
-	pages = kgsl_malloc(sglen * sizeof(struct page *));
+	pages = kvcalloc(sglen, sizeof(*pages), GFP_KERNEL);
 	if (pages == NULL)
 		return -ENOMEM;
 
-	memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	memdesc->sgt = kmalloc(sizeof(*memdesc->sgt), GFP_KERNEL);
 	if (memdesc->sgt == NULL) {
 		ret = -ENOMEM;
 		goto out;
@@ -2260,7 +2451,7 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile)
 		kfree(memdesc->sgt);
 		memdesc->sgt = NULL;
 	}
-	kgsl_free(pages);
+	kvfree(pages);
 	return ret;
 }
 
@@ -3487,6 +3678,10 @@ long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
 	param->id = entry->id;
 	param->flags = entry->memdesc.flags;
 
+	kgsl_process_add_stats(process,
+			kgsl_memdesc_usermem_type(&entry->memdesc),
+			entry->memdesc.size);
+
 	trace_sparse_phys_alloc(entry->id, param->size, param->pagesize);
 	kgsl_mem_entry_commit_process(entry);
 
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index cef4d93..aeb0ecb 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -5,23 +5,15 @@
 #ifndef __KGSL_H
 #define __KGSL_H
 
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/msm_kgsl.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
 #include <linux/cdev.h>
-#include <linux/regulator/consumer.h>
+#include <linux/kthread.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
-#include <linux/kthread.h>
-#include <asm/cacheflush.h>
-#include <linux/compat.h>
 #include <uapi/linux/msm_kgsl.h>
 
+#include "kgsl_gmu_core.h"
+#include "kgsl_pwrscale.h"
+
 /*
  * --- kgsl drawobj flags ---
  * These flags are same as --- drawobj flags ---
@@ -457,6 +449,12 @@ struct kgsl_mem_entry *gpumem_alloc_entry(struct kgsl_device_private *dev_priv,
 				uint64_t size, uint64_t flags);
 long gpumem_free_entry(struct kgsl_mem_entry *entry);
 
+enum kgsl_mmutype kgsl_mmu_get_mmutype(struct kgsl_device *device);
+void kgsl_mmu_add_global(struct kgsl_device *device,
+	struct kgsl_memdesc *memdesc, const char *name);
+void kgsl_mmu_remove_global(struct kgsl_device *device,
+		struct kgsl_memdesc *memdesc);
+
 /* Helper functions */
 int kgsl_request_irq(struct platform_device *pdev, const  char *name,
 		irq_handler_t handler, void *data);
@@ -568,35 +566,6 @@ static inline bool kgsl_addr_range_overlap(uint64_t gpuaddr1,
 		(gpuaddr1 >= (gpuaddr2 + size2)));
 }
 
-/**
- * kgsl_malloc() - Use either kzalloc or vmalloc to allocate memory
- * @size: Size of the desired allocation
- *
- * Allocate a block of memory for the driver - if it is small try to allocate it
- * from kmalloc (fast!) otherwise we need to go with vmalloc (safe!)
- */
-static inline void *kgsl_malloc(size_t size)
-{
-	if (size <= PAGE_SIZE)
-		return kzalloc(size, GFP_KERNEL);
-
-	return vmalloc(size);
-}
-
-/**
- * kgsl_free() - Free memory allocated by kgsl_malloc()
- * @ptr: Pointer to the memory to free
- *
- * Free the memory be it in vmalloc or kmalloc space
- */
-static inline void kgsl_free(void *ptr)
-{
-	if (ptr != NULL && is_vmalloc_addr(ptr))
-		return vfree(ptr);
-
-	kfree(ptr);
-}
-
 static inline int kgsl_copy_from_user(void *dest, void __user *src,
 		unsigned int ksize, unsigned int usize)
 {
diff --git a/drivers/gpu/msm/kgsl_compat.c b/drivers/gpu/msm/kgsl_compat.c
index 1ec626e..d38ceb035 100644
--- a/drivers/gpu/msm/kgsl_compat.c
+++ b/drivers/gpu/msm/kgsl_compat.c
@@ -1,16 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/uaccess.h>
-#include <asm/ioctl.h>
-
-#include "kgsl.h"
-#include "kgsl_compat.h"
 #include "kgsl_device.h"
+#include "kgsl_compat.h"
 #include "kgsl_sync.h"
 
 static long
diff --git a/drivers/gpu/msm/kgsl_compat.h b/drivers/gpu/msm/kgsl_compat.h
index b33b90a..8664e1c 100644
--- a/drivers/gpu/msm/kgsl_compat.h
+++ b/drivers/gpu/msm/kgsl_compat.h
@@ -1,14 +1,14 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017,2019 The Linux Foundation. All rights reserved.
  */
 #ifndef __KGSL_COMPAT_H
 #define __KGSL_COMPAT_H
 
-#ifdef CONFIG_COMPAT
 #include <linux/compat.h>
-#include "kgsl.h"
-#include "kgsl_device.h"
+#include <uapi/linux/msm_kgsl.h>
+
+#ifdef CONFIG_COMPAT
 
 struct kgsl_ibdesc_compat {
 	compat_ulong_t gpuaddr;
@@ -228,6 +228,9 @@ static inline compat_size_t sizet_to_compat(size_t size)
 	return (compat_size_t)size;
 }
 
+struct kgsl_device;
+struct kgsl_drawobj;
+
 int kgsl_drawobj_create_compat(struct kgsl_device *device, unsigned int flags,
 			struct kgsl_drawobj *drawobj, void __user *cmdlist,
 			unsigned int numcmds, void __user *synclist,
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index cde0f427..43ba370 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -1,15 +1,14 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2002,2008-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002,2008-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/module.h>
 #include <linux/debugfs.h>
+#include <linux/io.h>
 
-#include "kgsl.h"
+#include "kgsl_debugfs.h"
 #include "kgsl_device.h"
 #include "kgsl_sharedmem.h"
-#include "kgsl_debugfs.h"
 
 /*default log levels is error for everything*/
 #define KGSL_LOG_LEVEL_MAX     7
@@ -31,11 +30,56 @@ static int _strict_get(void *data, u64 *val)
 
 DEFINE_DEBUGFS_ATTRIBUTE(_strict_fops, _strict_get, _strict_set, "%llu\n");
 
+static void kgsl_qdss_gfx_register_probe(struct kgsl_device *device)
+{
+	struct resource *res;
+
+	res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
+							"qdss_gfx");
+
+	if (res == NULL)
+		return;
+
+	device->qdss_gfx_virt = devm_ioremap(device->dev, res->start,
+							resource_size(res));
+
+	if (device->qdss_gfx_virt == NULL)
+		dev_warn(device->dev, "qdss_gfx ioremap failed\n");
+}
+
+static int _isdb_set(void *data, u64 val)
+{
+	struct kgsl_device *device = data;
+
+	if (device->qdss_gfx_virt == NULL)
+		kgsl_qdss_gfx_register_probe(device);
+
+	device->set_isdb_breakpoint = val ? true : false;
+	return 0;
+}
+
+static int _isdb_get(void *data, u64 *val)
+{
+	struct kgsl_device *device = data;
+
+	*val = device->set_isdb_breakpoint ? 1 : 0;
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(_isdb_fops, _isdb_get, _isdb_set, "%llu\n");
+
 void kgsl_device_debugfs_init(struct kgsl_device *device)
 {
-	if (!IS_ERR_OR_NULL(kgsl_debugfs_dir))
-		device->d_debugfs = debugfs_create_dir(device->name,
+	struct dentry *snapshot_dir;
+
+	if (IS_ERR_OR_NULL(kgsl_debugfs_dir))
+		return;
+
+	device->d_debugfs = debugfs_create_dir(device->name,
 						       kgsl_debugfs_dir);
+	snapshot_dir = debugfs_create_dir("snapshot", kgsl_debugfs_dir);
+	debugfs_create_file("break_isdb", 0644, snapshot_dir, device,
+		&_isdb_fops);
 }
 
 void kgsl_device_debugfs_close(struct kgsl_device *device)
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 9b7e288..36ad144 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -5,19 +5,12 @@
 #ifndef __KGSL_DEVICE_H
 #define __KGSL_DEVICE_H
 
-#include <linux/slab.h>
-#include <linux/idr.h>
-#include <linux/pm_qos.h>
-#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/task.h>
 
 #include "kgsl.h"
-#include "kgsl_mmu.h"
-#include "kgsl_pwrctrl.h"
-#include "kgsl_pwrscale.h"
-#include "kgsl_snapshot.h"
-#include "kgsl_sharedmem.h"
 #include "kgsl_drawobj.h"
-#include "kgsl_gmu_core.h"
+#include "kgsl_mmu.h"
 
 #define KGSL_IOCTL_FUNC(_cmd, _func) \
 	[_IOC_NR((_cmd))] = \
@@ -180,6 +173,15 @@ struct kgsl_functable {
 	void (*stop_fault_timer)(struct kgsl_device *device);
 	void (*dispatcher_halt)(struct kgsl_device *device);
 	void (*dispatcher_unhalt)(struct kgsl_device *device);
+	/**
+	 * @query_property_list: query the list of properties
+	 * supported by the device. If 'list' is NULL just return the total
+	 * number of properties available otherwise copy up to 'count' items
+	 * into the list and return the total number of items copied.
+	 */
+	int (*query_property_list)(struct kgsl_device *device, u32 *list,
+		u32 count);
+	bool (*is_hwcg_on)(struct kgsl_device *device);
 };
 
 struct kgsl_ioctl {
@@ -248,6 +250,9 @@ struct kgsl_device {
 	/* Starting physical address for GPU shader memory */
 	unsigned long shader_mem_phys;
 
+	/* Starting kernel virtual address for QDSS GFX DBG register block */
+	void __iomem *qdss_gfx_virt;
+
 	/* GPU shader memory size */
 	unsigned int shader_mem_len;
 	struct kgsl_memdesc memstore;
@@ -293,6 +298,7 @@ struct kgsl_device {
 	u32 snapshot_faultcount;	/* Total number of faults since boot */
 	bool force_panic;		/* Force panic after snapshot dump */
 	bool prioritize_unrecoverable;	/* Overwrite with new GMU snapshots */
+	bool set_isdb_breakpoint;	/* Set isdb registers before snapshot */
 
 	/* Use CP Crash dumper to get GPU snapshot*/
 	bool snapshot_crashdumper;
@@ -379,6 +385,8 @@ struct kgsl_process_private;
  * @fault_time: time of the first gpu hang in last _context_throttle_time ms
  * @user_ctxt_record: memory descriptor used by CP to save/restore VPC data
  * across preemption
+ * @total_fault_count: number of times gpu faulted in this context
+ * @last_faulted_cmd_ts: last faulted command batch timestamp
  */
 struct kgsl_context {
 	struct kref refcount;
@@ -398,6 +406,8 @@ struct kgsl_context {
 	unsigned int fault_count;
 	unsigned long fault_time;
 	struct kgsl_mem_entry *user_ctxt_record;
+	unsigned int total_fault_count;
+	unsigned int last_faulted_cmd_ts;
 };
 
 #define _context_comm(_c) \
@@ -546,6 +556,31 @@ static inline void kgsl_process_add_stats(struct kgsl_process_private *priv,
 	priv->stats[type].cur += size;
 	if (priv->stats[type].max < priv->stats[type].cur)
 		priv->stats[type].max = priv->stats[type].cur;
+	add_mm_counter(current->mm, MM_UNRECLAIMABLE, (size >> PAGE_SHIFT));
+}
+
+static inline void kgsl_process_sub_stats(struct kgsl_process_private *priv,
+	unsigned int type, uint64_t size)
+{
+	struct pid *pid_struct;
+	struct task_struct *task;
+	struct mm_struct *mm;
+
+	priv->stats[type].cur -= size;
+	pid_struct = find_get_pid(priv->pid);
+	if (pid_struct) {
+		task = get_pid_task(pid_struct, PIDTYPE_PID);
+		if (task) {
+			mm = get_task_mm(task);
+			if (mm) {
+				add_mm_counter(mm, MM_UNRECLAIMABLE,
+					-(size >> PAGE_SHIFT));
+				mmput(mm);
+			}
+			put_task_struct(task);
+		}
+		put_pid(pid_struct);
+	}
 }
 
 static inline bool kgsl_is_register_offset(struct kgsl_device *device,
@@ -890,6 +925,21 @@ void kgsl_snapshot_add_section(struct kgsl_device *device, u16 id,
 	void *priv);
 
 /**
+ * kgsl_query_property_list - Get a list of valid properties
+ * @device: A KGSL device handle
+ * @list: Pointer to a list of u32s
+ * @count: Number of items in @list
+ *
+ * Populate a list with the IDs for supported properties. If @list is NULL,
+ * just return the number of properties available, otherwise fill up to @count
+ * items in the list with property identifiers.
+ *
+ * Returns the number of total properties if @list is NULL or the number of
+ * properties copied to @list.
+ */
+int kgsl_query_property_list(struct kgsl_device *device, u32 *list, u32 count);
+
+/**
  * kgsl_get_bus_scale_table() - Get the bus scaling table from devicetree
  * @device: kgsl device handle
  *
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index f515979..1c4a907 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 /*
@@ -18,16 +18,14 @@
  * goes to zero indicating no more pending events.
  */
 
-#include <linux/uaccess.h>
-#include <linux/list.h>
-#include <linux/compat.h>
+#include <linux/slab.h>
 
-#include "kgsl.h"
+#include "adreno_drawctxt.h"
+#include "kgsl_compat.h"
 #include "kgsl_device.h"
 #include "kgsl_drawobj.h"
 #include "kgsl_sync.h"
 #include "kgsl_trace.h"
-#include "kgsl_compat.h"
 
 /*
  * Define an kmem cache for the memobj & sparseobj structures since we
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
index b58dff9..7f3dd26 100644
--- a/drivers/gpu/msm/kgsl_drawobj.h
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
@@ -6,6 +6,8 @@
 #ifndef __KGSL_DRAWOBJ_H
 #define __KGSL_DRAWOBJ_H
 
+#include <linux/kref.h>
+
 #define DRAWOBJ(obj) (&obj->base)
 #define SYNCOBJ(obj) \
 	container_of(obj, struct kgsl_drawobj_sync, base)
@@ -172,6 +174,9 @@ enum kgsl_drawobj_cmd_priv {
 	CMDOBJ_PROFILE,
 };
 
+struct kgsl_ibdesc;
+struct kgsl_cmd_syncpoint;
+
 struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
 		struct kgsl_context *context, unsigned int flags,
 		unsigned int type);
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index d3e36ad..7ee8e4fa 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -1,15 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
 #include <linux/debugfs.h>
 
-#include "kgsl_device.h"
 #include "kgsl_debugfs.h"
+#include "kgsl_device.h"
 #include "kgsl_trace.h"
 
 /*
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 5b2136c..e70b8ca0 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -2,22 +2,22 @@
 /*
  * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/iommu.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/of_platform.h>
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
-#include <linux/pm_opp.h>
-#include <soc/qcom/cmd-db.h>
-#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
 
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/mailbox_client.h>
+#include <linux/msm-bus.h>
+#include <linux/of_platform.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <soc/qcom/cmd-db.h>
+
+#include "adreno.h"
 #include "kgsl_device.h"
 #include "kgsl_gmu.h"
-#include "kgsl_hfi.h"
-#include "adreno.h"
 
 #undef MODULE_PARAM_PREFIX
 #define MODULE_PARAM_PREFIX "kgsl."
@@ -559,11 +559,6 @@ enum rpmh_vote_type {
 	INVALID_ARC_VOTE,
 };
 
-static const char debug_strs[][8] = {
-	[GPU_ARC_VOTE] = "gpu",
-	[GMU_ARC_VOTE] = "gmu",
-};
-
 /*
  * rpmh_arc_cmds() - query RPMh command database for GX/CX/MX rail
  * VLVL tables. The index of table will be used by GMU to vote rail
@@ -622,7 +617,7 @@ static int rpmh_arc_cmds(struct gmu_device *gmu,
  */
 static int setup_volt_dependency_tbl(uint32_t *votes,
 		struct rpmh_arc_vals *pri_rail, struct rpmh_arc_vals *sec_rail,
-		unsigned int *vlvl, unsigned int num_entries)
+		u16 *vlvl, unsigned int num_entries)
 {
 	int i, j, k;
 	uint16_t cur_vlvl;
@@ -668,9 +663,20 @@ static int setup_volt_dependency_tbl(uint32_t *votes,
 	return 0;
 }
 
+
+static int rpmh_gmu_arc_votes_init(struct gmu_device *gmu,
+		struct rpmh_arc_vals *pri_rail, struct rpmh_arc_vals *sec_rail)
+{
+	/* Hardcoded values of GMU CX voltage levels */
+	u16 gmu_cx_vlvl[] = { 0, RPMH_REGULATOR_LEVEL_MIN_SVS };
+
+	return setup_volt_dependency_tbl(gmu->rpmh_votes.cx_votes, pri_rail,
+						sec_rail, gmu_cx_vlvl, 2);
+}
+
 /*
- * rpmh_arc_votes_init() - initialized RPMh votes needed for rails voltage
- * scaling by GMU.
+ * rpmh_arc_votes_init() - initialized GX RPMh votes needed for rails
+ * voltage scaling by GMU.
  * @device: Pointer to KGSL device
  * @gmu: Pointer to GMU device
  * @pri_rail: Pointer to primary power rail VLVL table
@@ -682,74 +688,50 @@ static int rpmh_arc_votes_init(struct kgsl_device *device,
 		struct gmu_device *gmu, struct rpmh_arc_vals *pri_rail,
 		struct rpmh_arc_vals *sec_rail, unsigned int type)
 {
-	struct device *dev;
 	unsigned int num_freqs;
-	uint32_t *votes;
-	unsigned int vlvl_tbl[MAX_GX_LEVELS];
+	u16 vlvl_tbl[MAX_GX_LEVELS];
 	unsigned int *freq_tbl;
-	int i, ret;
+	int i;
 	struct dev_pm_opp *opp;
 
-	uint16_t cx_vlvl[MAX_GX_LEVELS] = { 64, 128, 192, 256, 384, 416 };
+	if (type == GMU_ARC_VOTE)
+		return rpmh_gmu_arc_votes_init(gmu, pri_rail, sec_rail);
 
-	if (type == GPU_ARC_VOTE) {
-		num_freqs = gmu->num_gpupwrlevels;
-		votes = gmu->rpmh_votes.gx_votes;
-		freq_tbl = gmu->gpu_freqs;
-		dev = &device->pdev->dev;
-	} else if (type == GMU_ARC_VOTE) {
-		num_freqs = gmu->num_gmupwrlevels;
-		votes = gmu->rpmh_votes.cx_votes;
-		freq_tbl = gmu->gmu_freqs;
-		dev = &gmu->pdev->dev;
-	} else {
-		return -EINVAL;
-	}
+	num_freqs = gmu->num_gpupwrlevels;
+	freq_tbl = gmu->gpu_freqs;
 
-	if (num_freqs > pri_rail->num) {
+	if (num_freqs > pri_rail->num || num_freqs > MAX_GX_LEVELS) {
 		dev_err(&gmu->pdev->dev,
-			"%s defined more DCVS levels than RPMh can support\n",
-			debug_strs[type]);
+			"Defined more GPU DCVS levels than RPMh can support\n");
 		return -EINVAL;
 	}
 
 	memset(vlvl_tbl, 0, sizeof(vlvl_tbl));
+
+	/* Get the values from OPP API */
 	for (i = 0; i < num_freqs; i++) {
-		/* Hardcode VLVL for 0 because it is not registered in OPP */
+		/* Hardcode VLVL 0 because it is not present in OPP */
 		if (freq_tbl[i] == 0) {
 			vlvl_tbl[i] = 0;
 			continue;
 		}
 
-		/* Hardcode GMU ARC Vote levels for A650 */
-		if (adreno_is_a650_family(ADRENO_DEVICE(device)) &&
-				type == GMU_ARC_VOTE) {
-			vlvl_tbl[i] = cx_vlvl[i];
-			continue;
-		}
+		opp = dev_pm_opp_find_freq_exact(&device->pdev->dev,
+			freq_tbl[i], true);
 
-		/* Otherwise get the value from the OPP API */
-		opp = dev_pm_opp_find_freq_exact(dev, freq_tbl[i], true);
 		if (IS_ERR(opp)) {
 			dev_err(&gmu->pdev->dev,
-				"Failed to find opp freq %d of %s\n",
-				freq_tbl[i], debug_strs[type]);
+				"Failed to find opp freq %d for GPU\n",
+				freq_tbl[i]);
 			return PTR_ERR(opp);
 		}
 
-		/* Values from OPP framework are offset by 1 */
 		vlvl_tbl[i] = dev_pm_opp_get_voltage(opp);
 		dev_pm_opp_put(opp);
 	}
 
-	ret = setup_volt_dependency_tbl(votes,
-			pri_rail, sec_rail, vlvl_tbl, num_freqs);
-
-	if (ret)
-		dev_err(&gmu->pdev->dev, "%s rail volt failed to match DT freqs\n",
-				debug_strs[type]);
-
-	return ret;
+	return setup_volt_dependency_tbl(gmu->rpmh_votes.gx_votes, pri_rail,
+						sec_rail, vlvl_tbl, num_freqs);
 }
 
 /*
@@ -1001,54 +983,6 @@ static irqreturn_t gmu_irq_handler(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-static int gmu_pwrlevel_probe(struct gmu_device *gmu, struct device_node *node)
-{
-	int ret;
-	struct device_node *pwrlevel_node, *child;
-
-	/* Add the GMU OPP table if we define it */
-	if (of_find_property(gmu->pdev->dev.of_node,
-			"operating-points-v2", NULL)) {
-		ret = dev_pm_opp_of_add_table(&gmu->pdev->dev);
-		if (ret) {
-			dev_err(&gmu->pdev->dev,
-					"Unable to set the GMU OPP table: %d\n",
-					ret);
-			return ret;
-		}
-	}
-
-	pwrlevel_node = of_find_node_by_name(node, "qcom,gmu-pwrlevels");
-	if (pwrlevel_node == NULL) {
-		dev_err(&gmu->pdev->dev, "Unable to find 'qcom,gmu-pwrlevels'\n");
-		return -EINVAL;
-	}
-
-	gmu->num_gmupwrlevels = 0;
-
-	for_each_child_of_node(pwrlevel_node, child) {
-		unsigned int index;
-
-		if (of_property_read_u32(child, "reg", &index))
-			return -EINVAL;
-
-		if (index >= MAX_CX_LEVELS) {
-			dev_err(&gmu->pdev->dev, "gmu pwrlevel %d is out of range\n",
-				index);
-			continue;
-		}
-
-		if (index >= gmu->num_gmupwrlevels)
-			gmu->num_gmupwrlevels = index + 1;
-
-		if (of_property_read_u32(child, "qcom,gmu-freq",
-					&gmu->gmu_freqs[index]))
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
 static int gmu_reg_probe(struct kgsl_device *device)
 {
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
@@ -1355,11 +1289,16 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
 	if (gmu == NULL)
 		return -ENOMEM;
 
+	gmu->pdev = of_find_device_by_node(node);
+	if (!gmu->pdev) {
+		kfree(gmu);
+		return -EINVAL;
+	}
+
 	device->gmu_core.ptr = (void *)gmu;
 	hfi = &gmu->hfi;
 	gmu->load_mode = TCM_BOOT;
 
-	gmu->pdev = of_find_device_by_node(node);
 	of_dma_configure(&gmu->pdev->dev, node, true);
 
 	/* Set up GMU regulators */
@@ -1413,11 +1352,6 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
 	tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long) gmu);
 	hfi->kgsldev = device;
 
-	/* Retrieves GMU/GPU power level configurations*/
-	ret = gmu_pwrlevel_probe(gmu, node);
-	if (ret)
-		goto error;
-
 	gmu->num_gpupwrlevels = pwr->num_pwrlevels;
 
 	for (i = 0; i < gmu->num_gpupwrlevels; i++) {
@@ -1487,10 +1421,10 @@ static int gmu_enable_clks(struct kgsl_device *device)
 	if (IS_ERR_OR_NULL(gmu->clks[0]))
 		return -EINVAL;
 
-	ret = clk_set_rate(gmu->clks[0], gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
+	ret = clk_set_rate(gmu->clks[0], GMU_FREQUENCY);
 	if (ret) {
 		dev_err(&gmu->pdev->dev, "fail to set default GMU clk freq %d\n",
-				gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
+				GMU_FREQUENCY);
 		return ret;
 	}
 
@@ -1649,9 +1583,9 @@ static int gmu_start(struct kgsl_device *device)
 		gmu_enable_clks(device);
 		gmu_dev_ops->irq_enable(device);
 
-		/* Vote for 300MHz DDR for GMU to init */
+		/* Vote for minimal DDR BW for GMU to init */
 		ret = msm_bus_scale_client_update_request(gmu->pcl,
-				pwr->pwrlevels[pwr->default_pwrlevel].bus_freq);
+				pwr->pwrlevels[pwr->default_pwrlevel].bus_min);
 		if (ret)
 			dev_err(&gmu->pdev->dev,
 				"Failed to allocate gmu b/w: %d\n", ret);
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index e57a844..f1dd0aa 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -1,15 +1,15 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef __KGSL_GMU_H
 #define __KGSL_GMU_H
 
-#include <linux/mailbox_client.h>
 #include "kgsl_gmu_core.h"
-#include <linux/firmware.h>
 #include "kgsl_hfi.h"
 
+#define GMU_PWR_LEVELS  2
+#define GMU_FREQUENCY   200000000
 #define MAX_GMUFW_SIZE	0x8000	/* in bytes */
 
 #define BWMEM_SIZE	(12 + (4 * NUM_BW_LEVELS))	/*in bytes*/
@@ -154,13 +154,7 @@ struct kgsl_mailbox {
  * @dump_mem: pointer to GMU debug dump memory
  * @gmu_log: gmu event log memory
  * @hfi: HFI controller
- * @lm_config: GPU LM configuration data
- * @lm_dcvs_level: Minimal DCVS level that enable LM. LM disable in
- *		lower levels
- * @bcl_config: Battery Current Limit configuration data
- * @gmu_freqs: GMU frequency table with lowest freq at index 0
  * @gpu_freqs: GPU frequency table with lowest freq at index 0
- * @num_gmupwrlevels: number GMU frequencies in GMU freq table
  * @num_gpupwrlevels: number GPU frequencies in GPU freq table
  * @num_bwlevel: number of GPU BW levels
  * @num_cnocbwlevel: number CNOC BW levels
@@ -196,12 +190,7 @@ struct gmu_device {
 	struct gmu_memdesc *dump_mem;
 	struct gmu_memdesc *gmu_log;
 	struct kgsl_hfi hfi;
-	unsigned int lm_config;
-	unsigned int lm_dcvs_level;
-	unsigned int bcl_config;
-	unsigned int gmu_freqs[MAX_CX_LEVELS];
 	unsigned int gpu_freqs[MAX_GX_LEVELS];
-	unsigned int num_gmupwrlevels;
 	unsigned int num_gpupwrlevels;
 	unsigned int num_bwlevels;
 	unsigned int num_cnocbwlevels;
diff --git a/drivers/gpu/msm/kgsl_gmu_core.c b/drivers/gpu/msm/kgsl_gmu_core.c
index fed8a5b..bf526c0 100644
--- a/drivers/gpu/msm/kgsl_gmu_core.c
+++ b/drivers/gpu/msm/kgsl_gmu_core.c
@@ -1,14 +1,14 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
-#include <linux/module.h>
-#include <linux/types.h>
 
+#include <linux/of.h>
+
+#include "adreno.h"
 #include "kgsl_device.h"
 #include "kgsl_gmu_core.h"
 #include "kgsl_trace.h"
-#include "adreno.h"
 
 #undef MODULE_PARAM_PREFIX
 #define MODULE_PARAM_PREFIX "kgsl_gmu."
diff --git a/drivers/gpu/msm/kgsl_gmu_core.h b/drivers/gpu/msm/kgsl_gmu_core.h
index 97e8952..f702bff9 100644
--- a/drivers/gpu/msm/kgsl_gmu_core.h
+++ b/drivers/gpu/msm/kgsl_gmu_core.h
@@ -1,6 +1,6 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef __KGSL_GMU_CORE_H
 #define __KGSL_GMU_CORE_H
@@ -22,7 +22,6 @@
 #endif
 
 #define MAX_GMU_CLKS 6
-#define DEFAULT_GMU_FREQ_IDX 1
 
 /*
  * These are the different ways the GMU can boot. GMU_WARM_BOOT is waking up
@@ -87,16 +86,6 @@ enum gpu_idle_level {
 	GPU_HW_SLUMBER = 0xF
 };
 
-static const char * const gpu_idle_level_names[] = {
-	[GPU_HW_ACTIVE] = "GPU_HW_ACTIVE",
-	[GPU_HW_SPTP_PC] = "GPU_HW_SPTP_PC",
-	[GPU_HW_IFPC] = "GPU_HW_IFPC",
-	[GPU_HW_NAP] = "GPU_HW_NAP",
-	[GPU_HW_MIN_VOLT] = "GPU_HW_MIN_VOLT",
-	[GPU_HW_MIN_DDR] = "GPU_HW_MIN_DDR",
-	[GPU_HW_SLUMBER] = "GPU_HW_SLUMBER"
-};
-
 /*
  * Wait time before trying to write the register again.
  * Hopefully the GMU has finished waking up during this delay.
@@ -117,6 +106,7 @@ static const char * const gpu_idle_level_names[] = {
 #define FENCE_STATUS_WRITEDROPPED0_MASK 0x1
 #define FENCE_STATUS_WRITEDROPPED1_MASK 0x2
 
+struct device_node;
 struct kgsl_device;
 struct kgsl_snapshot;
 
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index efbaa05..00500ad 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -3,12 +3,14 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#include "kgsl_device.h"
-#include "kgsl_hfi.h"
-#include "kgsl_gmu.h"
+#include <linux/delay.h>
+
 #include "adreno.h"
+#include "adreno_a6xx.h"
+#include "kgsl_device.h"
+#include "kgsl_gmu.h"
+#include "kgsl_hfi.h"
 #include "kgsl_trace.h"
-#include "kgsl_pwrctrl.h"
 
 #define HFI_QUEUE_OFFSET(i)		\
 		(ALIGN(sizeof(struct hfi_queue_table), SZ_16) + \
@@ -428,7 +430,7 @@ static int hfi_send_dcvstbl_v1(struct gmu_device *gmu)
 	struct hfi_dcvstable_v1_cmd cmd = {
 		.hdr = CMD_MSG_HDR(H2F_MSG_PERF_TBL, sizeof(cmd)),
 		.gpu_level_num = gmu->num_gpupwrlevels,
-		.gmu_level_num = gmu->num_gmupwrlevels,
+		.gmu_level_num = GMU_PWR_LEVELS,
 	};
 	int i;
 
@@ -438,10 +440,10 @@ static int hfi_send_dcvstbl_v1(struct gmu_device *gmu)
 		cmd.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
 	}
 
-	for (i = 0; i < gmu->num_gmupwrlevels; i++) {
-		cmd.cx_votes[i].vote = gmu->rpmh_votes.cx_votes[i];
-		cmd.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
-	}
+	cmd.cx_votes[0].vote = gmu->rpmh_votes.cx_votes[0];
+	cmd.cx_votes[0].freq = 0;
+	cmd.cx_votes[1].vote = gmu->rpmh_votes.cx_votes[1];
+	cmd.cx_votes[1].freq = GMU_FREQUENCY / 1000;
 
 	return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd);
 }
@@ -472,7 +474,7 @@ static int hfi_send_dcvstbl(struct gmu_device *gmu)
 	struct hfi_dcvstable_cmd cmd = {
 		.hdr = CMD_MSG_HDR(H2F_MSG_PERF_TBL, sizeof(cmd)),
 		.gpu_level_num = gmu->num_gpupwrlevels,
-		.gmu_level_num = gmu->num_gmupwrlevels,
+		.gmu_level_num = GMU_PWR_LEVELS,
 	};
 	int i;
 
@@ -484,10 +486,10 @@ static int hfi_send_dcvstbl(struct gmu_device *gmu)
 		cmd.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
 	}
 
-	for (i = 0; i < gmu->num_gmupwrlevels; i++) {
-		cmd.cx_votes[i].vote = gmu->rpmh_votes.cx_votes[i];
-		cmd.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
-	}
+	cmd.cx_votes[0].vote = gmu->rpmh_votes.cx_votes[0];
+	cmd.cx_votes[0].freq = 0;
+	cmd.cx_votes[1].vote = gmu->rpmh_votes.cx_votes[1];
+	cmd.cx_votes[1].freq = GMU_FREQUENCY / 1000;
 
 	return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd);
 }
@@ -607,6 +609,7 @@ static int hfi_verify_fw_version(struct kgsl_device *device,
 		struct gmu_device *gmu)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
 	int result;
 	unsigned int ver, major, minor;
 
@@ -614,8 +617,8 @@ static int hfi_verify_fw_version(struct kgsl_device *device,
 	if (gmu->ver.core != 0)
 		return 0;
 
-	major = adreno_dev->gpucore->gpmu_major;
-	minor = adreno_dev->gpucore->gpmu_minor;
+	major = a6xx_core->gmu_major;
+	minor = a6xx_core->gmu_minor;
 
 	result = hfi_get_fw_version(gmu, GMU_VERSION(major, minor), &ver);
 	if (result) {
@@ -773,13 +776,6 @@ void hfi_stop(struct gmu_device *gmu)
 int hfi_send_req(struct gmu_device *gmu, unsigned int id, void *data)
 {
 	switch (id) {
-	case H2F_MSG_LM_CFG: {
-		struct hfi_lmconfig_cmd *cmd = data;
-
-		cmd->hdr = CMD_MSG_HDR(H2F_MSG_LM_CFG, sizeof(*cmd));
-
-		return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd);
-	}
 	case H2F_MSG_GX_BW_PERF_VOTE: {
 		struct hfi_gx_bw_perf_vote_cmd *cmd = data;
 
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index cdc874a..8a6175f 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -1,12 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef __KGSL_HFI_H
 #define __KGSL_HFI_H
 
-#include <linux/types.h>
-
 #define HFI_QUEUE_SIZE			SZ_4K /* bytes, must be base 4dw */
 #define MAX_RCVD_PAYLOAD_SIZE		16		/* dwords */
 #define MAX_RCVD_SIZE			(MAX_RCVD_PAYLOAD_SIZE + 3) /* dwords */
@@ -204,14 +202,6 @@ struct hfi_fw_version_cmd {
 	uint32_t supported_ver;
 };
 
-/* H2F */
-struct hfi_lmconfig_cmd {
-	uint32_t hdr;
-	uint32_t limit_conf;
-	uint32_t bcl_conf;
-	uint32_t lm_enable_bitmask;
-};
-
 #define ARC_VOTE_GET_PRI(_v) ((_v) & 0xFF)
 #define ARC_VOTE_GET_SEC(_v) (((_v) >> 8) & 0xFF)
 #define ARC_VOTE_GET_VLVL(_v) (((_v) >> 16) & 0xFFFF)
diff --git a/drivers/gpu/msm/kgsl_ioctl.c b/drivers/gpu/msm/kgsl_ioctl.c
index acacfa9..8df9166 100644
--- a/drivers/gpu/msm/kgsl_ioctl.c
+++ b/drivers/gpu/msm/kgsl_ioctl.c
@@ -1,12 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/ioctl.h>
-#include <linux/compat.h>
-#include <linux/uaccess.h>
-#include <linux/fs.h>
 #include "kgsl_device.h"
 #include "kgsl_sync.h"
 
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index edd75d8..8638287 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -2,29 +2,21 @@
 /*
  * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  */
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/genalloc.h>
-#include <linux/slab.h>
+
+#include <linux/compat.h>
 #include <linux/iommu.h>
-#include <linux/msm_kgsl.h>
-#include <linux/ratelimit.h>
 #include <linux/of_platform.h>
+#include <linux/seq_file.h>
 #include <soc/qcom/scm.h>
 #include <soc/qcom/secure_buffer.h>
-#include <linux/compat.h>
 
-#include "kgsl.h"
-#include "kgsl_device.h"
-#include "kgsl_mmu.h"
-#include "kgsl_sharedmem.h"
-#include "kgsl_iommu.h"
-#include "adreno_pm4types.h"
 #include "adreno.h"
-#include "kgsl_trace.h"
+#include "kgsl_device.h"
+#include "kgsl_iommu.h"
+#include "kgsl_mmu.h"
 #include "kgsl_pwrctrl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_trace.h"
 
 #define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
 
@@ -259,7 +251,7 @@ static void kgsl_setup_qdss_desc(struct kgsl_device *device)
 	gpu_qdss_desc.ops = NULL;
 	gpu_qdss_desc.hostptr = NULL;
 
-	result = memdesc_sg_dma(&gpu_qdss_desc, gpu_qdss_desc.physaddr,
+	result = kgsl_memdesc_sg_dma(&gpu_qdss_desc, gpu_qdss_desc.physaddr,
 			gpu_qdss_desc.size);
 	if (result) {
 		dev_err(device->dev, "memdesc_sg_dma failed: %d\n", result);
@@ -303,7 +295,7 @@ static void kgsl_setup_qtimer_desc(struct kgsl_device *device)
 	gpu_qtimer_desc.ops = NULL;
 	gpu_qtimer_desc.hostptr = NULL;
 
-	result = memdesc_sg_dma(&gpu_qtimer_desc, gpu_qtimer_desc.physaddr,
+	result = kgsl_memdesc_sg_dma(&gpu_qtimer_desc, gpu_qtimer_desc.physaddr,
 			gpu_qtimer_desc.size);
 	if (result) {
 		dev_err(device->dev, "memdesc_sg_dma failed: %d\n", result);
@@ -744,7 +736,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	struct kgsl_mmu *mmu = pt->mmu;
 	struct kgsl_iommu *iommu;
 	struct kgsl_iommu_context *ctx;
-	u64 ptbase;
+	u64 ptbase, proc_ptbase;
 	u32 contextidr;
 	pid_t pid = 0;
 	pid_t ptname;
@@ -856,6 +848,17 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 			"GPU PAGE FAULT: addr = %lX pid= %d name=%s\n", addr,
 			ptname,
 			context != NULL ? context->proc_priv->comm : "unknown");
+
+		if (context != NULL) {
+			proc_ptbase = kgsl_mmu_pagetable_get_ttbr0(
+					context->proc_priv->pagetable);
+
+			if (ptbase != proc_ptbase)
+				dev_crit(ctx->kgsldev->dev,
+				"Pagetable address mismatch: HW address is 0x%llx but SW expected 0x%llx\n",
+				ptbase, proc_ptbase);
+		}
+
 		dev_crit(ctx->kgsldev->dev,
 			"context=%s ctx_type=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
 			ctx->name, api_str, ptbase, contextidr,
@@ -1176,8 +1179,7 @@ void _enable_gpuhtw_llc(struct kgsl_mmu *mmu, struct kgsl_iommu_pt *iommu_pt)
 		return;
 
 	/* Domain attribute to enable system cache for GPU pagetable walks */
-	if (adreno_is_a650(adreno_dev) || adreno_is_a640(adreno_dev) ||
-		adreno_is_a612(adreno_dev))
+	if (mmu->subtype == KGSL_IOMMU_SMMU_V500)
 		ret = iommu_domain_set_attr(iommu_pt->domain,
 			DOMAIN_ATTR_USE_LLC_NWA, &gpuhtw_llc_enable);
 	else
@@ -1788,15 +1790,19 @@ static unsigned int _get_protection_flags(struct kgsl_pagetable *pt,
 {
 	unsigned int flags = IOMMU_READ | IOMMU_WRITE |
 		IOMMU_NOEXEC;
-	int ret, llc_nwa = 0;
+	int ret, llc_nwa = 0, upstream_hint = 0;
 	struct kgsl_iommu_pt *iommu_pt = pt->priv;
 
 	ret = iommu_domain_get_attr(iommu_pt->domain,
+				DOMAIN_ATTR_USE_UPSTREAM_HINT, &upstream_hint);
+
+	if (!ret && upstream_hint)
+		flags |= IOMMU_USE_UPSTREAM_HINT;
+
+	ret = iommu_domain_get_attr(iommu_pt->domain,
 				DOMAIN_ATTR_USE_LLC_NWA, &llc_nwa);
 
-	if (ret || (llc_nwa == 0))
-		flags |= IOMMU_USE_UPSTREAM_HINT;
-	else
+	if (!ret && llc_nwa)
 		flags |= IOMMU_USE_LLC_NWA;
 
 	if (memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY)
@@ -2632,7 +2638,7 @@ static int _kgsl_iommu_probe(struct kgsl_device *device,
 	u32 reg_val[2];
 	int i = 0;
 	struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
-	struct device_node *child;
+	struct device_node *child, *iommu_node = NULL;
 	struct platform_device *pdev = of_find_device_by_node(node);
 
 	memset(iommu, 0, sizeof(*iommu));
@@ -2692,8 +2698,15 @@ static int _kgsl_iommu_probe(struct kgsl_device *device,
 		ret = _kgsl_iommu_cb_probe(device, iommu, child);
 		if (ret)
 			return ret;
+
+		if (!iommu_node)
+			iommu_node = of_parse_phandle(child, "iommus", 0);
 	}
 
+	if (iommu_node &&
+		of_device_is_compatible(iommu_node, "qcom,qsmmu-v500"))
+		device->mmu.subtype = KGSL_IOMMU_SMMU_V500;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index 3928cab..e02df39 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -5,8 +5,7 @@
 #ifndef __KGSL_IOMMU_H
 #define __KGSL_IOMMU_H
 
-#include <linux/of.h>
-#include "kgsl.h"
+#include "kgsl_mmu.h"
 
 /*
  * These defines control the address range for allocations that
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index b68844a0..df9fe39 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -1,19 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/genalloc.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/types.h>
 
-#include "kgsl.h"
-#include "kgsl_mmu.h"
+#include <linux/slab.h>
+
 #include "kgsl_device.h"
+#include "kgsl_mmu.h"
 #include "kgsl_sharedmem.h"
 
 static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 47f6663..637e57d 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -5,6 +5,8 @@
 #ifndef __KGSL_MMU_H
 #define __KGSL_MMU_H
 
+#include <linux/platform_device.h>
+
 #include "kgsl_iommu.h"
 
 /* Identifier for the global page table */
@@ -28,6 +30,8 @@ enum kgsl_mmutype {
 	KGSL_MMU_TYPE_NONE
 };
 
+#define KGSL_IOMMU_SMMU_V500 1
+
 struct kgsl_pagetable {
 	spinlock_t lock;
 	struct kref refcount;
@@ -138,6 +142,7 @@ struct kgsl_mmu_pt_ops {
  * struct kgsl_mmu - Master definition for KGSL MMU devices
  * @flags: MMU device flags
  * @type: Type of MMU that is attached
+ * @subtype: Sub Type of MMU that is attached
  * @defaultpagetable: Default pagetable object for the MMU
  * @securepagetable: Default secure pagetable object for the MMU
  * @mmu_ops: Function pointers for the MMU sub-type
@@ -149,6 +154,7 @@ struct kgsl_mmu_pt_ops {
 struct kgsl_mmu {
 	unsigned long flags;
 	enum kgsl_mmutype type;
+	u32 subtype;
 	struct kgsl_pagetable *defaultpagetable;
 	struct kgsl_pagetable *securepagetable;
 	const struct kgsl_mmu_ops *mmu_ops;
@@ -186,7 +192,6 @@ void kgsl_mmu_put_gpuaddr(struct kgsl_memdesc *memdesc);
 unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
 unsigned int kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu,
 		u64 ttbr0, uint64_t addr);
-enum kgsl_mmutype kgsl_mmu_get_mmutype(struct kgsl_device *device);
 bool kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pt, uint64_t gpuaddr);
 
 int kgsl_mmu_get_region(struct kgsl_pagetable *pagetable,
@@ -196,11 +201,6 @@ int kgsl_mmu_find_region(struct kgsl_pagetable *pagetable,
 		uint64_t region_start, uint64_t region_end,
 		uint64_t *gpuaddr, uint64_t size, unsigned int align);
 
-void kgsl_mmu_add_global(struct kgsl_device *device,
-	struct kgsl_memdesc *memdesc, const char *name);
-void kgsl_mmu_remove_global(struct kgsl_device *device,
-		struct kgsl_memdesc *memdesc);
-
 struct kgsl_pagetable *kgsl_mmu_get_pt_from_ptname(struct kgsl_mmu *mmu,
 							int ptname);
 void kgsl_mmu_close(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index 441fa33..14c1c58 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -1,17 +1,16 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/vmalloc.h>
 #include <asm/cacheflush.h>
-#include <linux/slab.h>
 #include <linux/highmem.h>
-#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/scatterlist.h>
 
-#include "kgsl.h"
 #include "kgsl_device.h"
 #include "kgsl_pool.h"
+#include "kgsl_sharedmem.h"
 
 #define KGSL_MAX_POOLS 4
 #define KGSL_MAX_POOL_ORDER 8
@@ -81,6 +80,8 @@ _kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
 	list_add_tail(&p->lru, &pool->page_list);
 	pool->page_count++;
 	spin_unlock(&pool->list_lock);
+	mod_node_page_state(page_pgdat(p), NR_INDIRECTLY_RECLAIMABLE_BYTES,
+				(PAGE_SIZE << pool->pool_order));
 }
 
 /* Returns a page from specified pool */
@@ -96,7 +97,8 @@ _kgsl_pool_get_page(struct kgsl_page_pool *pool)
 		list_del(&p->lru);
 	}
 	spin_unlock(&pool->list_lock);
-
+	mod_node_page_state(page_pgdat(p), NR_INDIRECTLY_RECLAIMABLE_BYTES,
+				-(PAGE_SIZE << pool->pool_order));
 	return p;
 }
 
@@ -283,6 +285,22 @@ static int kgsl_pool_get_retry_order(unsigned int order)
 	return 0;
 }
 
+static unsigned int kgsl_gfp_mask(unsigned int page_order)
+{
+	unsigned int gfp_mask = __GFP_HIGHMEM;
+
+	if (page_order > 0) {
+		gfp_mask |= __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
+		gfp_mask &= ~__GFP_RECLAIM;
+	} else
+		gfp_mask |= GFP_KERNEL;
+
+	if (kgsl_sharedmem_get_noretry())
+		gfp_mask |= __GFP_NORETRY | __GFP_NOWARN;
+
+	return gfp_mask;
+}
+
 /**
  * kgsl_pool_alloc_page() - Allocate a page of requested size
  * @page_size: Size of the page to be allocated
@@ -382,6 +400,8 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
 		pcount++;
 	}
 
+	mod_node_page_state(page_pgdat(page), NR_UNRECLAIMABLE_PAGES,
+					(1 << order));
 	return pcount;
 
 eagain:
@@ -401,6 +421,9 @@ void kgsl_pool_free_page(struct page *page)
 
 	page_order = compound_order(page);
 
+	mod_node_page_state(page_pgdat(page), NR_UNRECLAIMABLE_PAGES,
+					-(1 << page_order));
+
 	if (!kgsl_pool_max_pages ||
 			(kgsl_pool_size_total() < kgsl_pool_max_pages)) {
 		pool = _kgsl_get_pool_from_order(page_order);
diff --git a/drivers/gpu/msm/kgsl_pool.h b/drivers/gpu/msm/kgsl_pool.h
index 393a4d8..1bfa323 100644
--- a/drivers/gpu/msm/kgsl_pool.h
+++ b/drivers/gpu/msm/kgsl_pool.h
@@ -1,30 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017,2019 The Linux Foundation. All rights reserved.
  */
 #ifndef __KGSL_POOL_H
 #define __KGSL_POOL_H
 
-#include <linux/mm_types.h>
-#include "kgsl_sharedmem.h"
-
-static inline unsigned int
-kgsl_gfp_mask(unsigned int page_order)
-{
-	unsigned int gfp_mask = __GFP_HIGHMEM;
-
-	if (page_order > 0) {
-		gfp_mask |= __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
-		gfp_mask &= ~__GFP_RECLAIM;
-	} else
-		gfp_mask |= GFP_KERNEL;
-
-	if (kgsl_sharedmem_get_noretry())
-		gfp_mask |= __GFP_NORETRY | __GFP_NOWARN;
-
-	return gfp_mask;
-}
-
 void kgsl_pool_free_sgt(struct sg_table *sgt);
 void kgsl_pool_free_pages(struct page **pages, unsigned int page_count);
 void kgsl_init_page_pools(struct platform_device *pdev);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 23586d0..701c669 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -3,23 +3,18 @@
  * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <asm/page.h>
-#include <linux/pm_runtime.h>
 #include <linux/msm-bus.h>
 #include <linux/msm-bus-board.h>
-#include <linux/ktime.h>
-#include <linux/delay.h>
-#include <linux/msm_adreno_devfreq.h>
+#include <linux/msm_kgsl.h>
 #include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
 #include <linux/thermal.h>
 
-#include "kgsl.h"
-#include "kgsl_pwrscale.h"
 #include "kgsl_device.h"
+#include "kgsl_pwrscale.h"
 #include "kgsl_trace.h"
-#include "kgsl_gmu_core.h"
 
 #define KGSL_PWRFLAGS_POWER_ON 0
 #define KGSL_PWRFLAGS_CLK_ON   1
@@ -53,12 +48,14 @@ static const char * const clocks[] = {
 	"rbcpr_clk",
 	"iref_clk",
 	"gmu_clk",
-	"ahb_clk"
+	"ahb_clk",
+	"smmu_vote",
 };
 
-static unsigned int ib_votes[KGSL_MAX_BUSLEVELS];
+static unsigned long ib_votes[KGSL_MAX_BUSLEVELS];
 static int last_vote_buslevel;
 static int max_vote_buslevel;
+static unsigned long last_ab;
 
 static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
 					int requested_state);
@@ -120,9 +117,16 @@ static void _record_pwrevent(struct kgsl_device *device,
 /**
  * kgsl_get_bw() - Return latest msm bus IB vote
  */
-static unsigned int kgsl_get_bw(void)
+static void kgsl_get_bw(unsigned long *ib, unsigned long *ab, void *data)
 {
-	return ib_votes[last_vote_buslevel];
+	struct kgsl_device *device = (struct kgsl_device *)data;
+
+	if (gmu_core_scales_bandwidth(device))
+		*ib = 0;
+	else
+		*ib = ib_votes[last_vote_buslevel];
+
+	*ab = last_ab;
 }
 #endif
 
@@ -134,8 +138,8 @@ static unsigned int kgsl_get_bw(void)
 static void _ab_buslevel_update(struct kgsl_pwrctrl *pwr,
 				unsigned long *ab)
 {
-	unsigned int ib = ib_votes[last_vote_buslevel];
-	unsigned int max_bw = ib_votes[max_vote_buslevel];
+	unsigned long ib = ib_votes[last_vote_buslevel];
+	unsigned long max_bw = ib_votes[max_vote_buslevel];
 
 	if (!ab)
 		return;
@@ -169,6 +173,12 @@ static unsigned int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level,
 					pwr->thermal_pwrlevel_floor,
 					pwr->min_pwrlevel);
 
+	/* Ensure that max/min pwrlevels are within thermal max/min limits */
+	max_pwrlevel = min_t(unsigned int, max_pwrlevel,
+					pwr->thermal_pwrlevel_floor);
+	min_pwrlevel = max_t(unsigned int, min_pwrlevel,
+					pwr->thermal_pwrlevel);
+
 	switch (pwrc->type) {
 	case KGSL_CONSTRAINT_PWRLEVEL: {
 		switch (pwrc->sub_type) {
@@ -195,13 +205,13 @@ static unsigned int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level,
 }
 
 #ifdef CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON
-static void kgsl_pwrctrl_vbif_update(unsigned long ab)
+static void kgsl_pwrctrl_vbif_update(void)
 {
 	/* ask a governor to vote on behalf of us */
-	devfreq_vbif_update_bw(ib_votes[last_vote_buslevel], ab);
+	devfreq_vbif_update_bw();
 }
 #else
-static void kgsl_pwrctrl_vbif_update(unsigned long ab)
+static void kgsl_pwrctrl_vbif_update(void)
 {
 }
 #endif
@@ -293,9 +303,11 @@ void kgsl_pwrctrl_buslevel_update(struct kgsl_device *device,
 	/* buslevel is the IB vote, update the AB */
 	_ab_buslevel_update(pwr, &ab);
 
+	last_ab = ab;
+
 	kgsl_bus_scale_request(device, buslevel);
 
-	kgsl_pwrctrl_vbif_update(ab);
+	kgsl_pwrctrl_vbif_update();
 }
 EXPORT_SYMBOL(kgsl_pwrctrl_buslevel_update);
 
@@ -1768,15 +1780,13 @@ static void kgsl_thermal_timer(struct timer_list *t)
 }
 
 #ifdef CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON
-static int kgsl_pwrctrl_vbif_init(void)
+static void kgsl_pwrctrl_vbif_init(struct kgsl_device *device)
 {
-	devfreq_vbif_register_callback(kgsl_get_bw);
-	return 0;
+	devfreq_vbif_register_callback(kgsl_get_bw, device);
 }
 #else
-static int kgsl_pwrctrl_vbif_init(void)
+static void kgsl_pwrctrl_vbif_init(struct kgsl_device *device)
 {
-	return 0;
 }
 #endif
 
@@ -2099,11 +2109,6 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
 
 	pm_runtime_enable(&pdev->dev);
 
-	/* Bus width in bytes, set it to zero if not found */
-	if (of_property_read_u32(pdev->dev.of_node, "qcom,bus-width",
-		&pwr->bus_width))
-		pwr->bus_width = 0;
-
 	/* Check if gpu bandwidth vote device is defined in dts */
 	if (pwr->bus_control)
 		/* Check if gpu bandwidth vote device is defined in dts */
@@ -2186,7 +2191,7 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
 	spin_lock_init(&pwr->limits_lock);
 	pwr->sysfs_pwr_limit = kgsl_pwr_limits_add(KGSL_DEVICE_3D0);
 
-	kgsl_pwrctrl_vbif_init();
+	kgsl_pwrctrl_vbif_init(device);
 
 	/* temperature sensor name */
 	of_property_read_string(pdev->dev.of_node, "qcom,tzone-name",
@@ -2677,7 +2682,6 @@ _slumber(struct kgsl_device *device)
 		kgsl_pwrctrl_clk_set_options(device, false);
 		kgsl_pwrctrl_disable(device);
 		kgsl_pwrscale_sleep(device);
-		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
 		pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
 						PM_QOS_DEFAULT_VALUE);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 6b6a52f..f3a5648 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -5,6 +5,7 @@
 #ifndef __KGSL_PWRCTRL_H
 #define __KGSL_PWRCTRL_H
 
+#include <linux/clk.h>
 #include <linux/pm_qos.h>
 
 /*****************************************************************************
@@ -17,7 +18,7 @@
 
 #define KGSL_PWR_ON	0xFFFF
 
-#define KGSL_MAX_CLKS 16
+#define KGSL_MAX_CLKS 17
 #define KGSL_MAX_REGULATORS 2
 
 #define KGSL_MAX_PWRLEVELS 10
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 7cddb37f..89b227b 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -3,15 +3,11 @@
  * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/hrtimer.h>
 #include <linux/devfreq_cooling.h>
-#include <linux/pm_opp.h>
+#include <linux/slab.h>
 
-#include "kgsl.h"
-#include "kgsl_pwrscale.h"
 #include "kgsl_device.h"
+#include "kgsl_pwrscale.h"
 #include "kgsl_trace.h"
 
 /*
@@ -928,7 +924,7 @@ int kgsl_pwrscale_init(struct device *dev, const char *governor)
 	struct kgsl_pwrscale *pwrscale;
 	struct kgsl_pwrctrl *pwr;
 	struct devfreq *devfreq;
-	struct devfreq *bus_devfreq;
+	struct devfreq *bus_devfreq = NULL;
 	struct msm_adreno_extended_profile *gpu_profile;
 	struct devfreq_dev_profile *profile;
 	struct devfreq_msm_adreno_tz_data *data;
@@ -1039,9 +1035,20 @@ int kgsl_pwrscale_init(struct device *dev, const char *governor)
 		pwrscale->bus_profile.profile.freq_table
 					= pwrscale->freq_table;
 
-		bus_devfreq = devfreq_add_device(device->busmondev,
-			&pwrscale->bus_profile.profile, "gpubw_mon", NULL);
-		if (!IS_ERR(bus_devfreq))
+		/*
+		 * This is needed because devfreq expects the device
+		 * to have an opp table handle to calculate the min/max
+		 * frequency.
+		 */
+		ret = dev_pm_opp_of_add_table(device->busmondev);
+		if (!ret)
+			bus_devfreq = devfreq_add_device(device->busmondev,
+				&pwrscale->bus_profile.profile, "gpubw_mon",
+				NULL);
+
+		if (IS_ERR_OR_NULL(bus_devfreq))
+			dev_err(device->dev, "Bus scaling not enabled\n");
+		else
 			pwrscale->gpu_profile.bus_devfreq = bus_devfreq;
 	}
 
diff --git a/drivers/gpu/msm/kgsl_pwrscale.h b/drivers/gpu/msm/kgsl_pwrscale.h
index e44b6d0..af07d5b 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.h
+++ b/drivers/gpu/msm/kgsl_pwrscale.h
@@ -1,12 +1,11 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __KGSL_PWRSCALE_H
 #define __KGSL_PWRSCALE_H
 
-#include <linux/devfreq.h>
 #include <linux/msm_adreno_devfreq.h>
 #include "kgsl_pwrctrl.h"
 
diff --git a/drivers/gpu/msm/kgsl_rgmu.c b/drivers/gpu/msm/kgsl_rgmu.c
index 3cff6ae..9982c631 100644
--- a/drivers/gpu/msm/kgsl_rgmu.c
+++ b/drivers/gpu/msm/kgsl_rgmu.c
@@ -1,17 +1,18 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/of_platform.h>
-#include <linux/clk-provider.h>
 
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/regulator/consumer.h>
+
+#include "adreno.h"
 #include "kgsl_device.h"
 #include "kgsl_rgmu.h"
-#include "kgsl_gmu_core.h"
-#include "kgsl_trace.h"
-#include "adreno.h"
 
 #define RGMU_CLK_FREQ 200000000
 
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index ed9fdca..a7c1471 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -3,21 +3,14 @@
  * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/export.h>
-#include <linux/vmalloc.h>
 #include <asm/cacheflush.h>
-#include <linux/slab.h>
-#include <linux/kmemleak.h>
 #include <linux/highmem.h>
-#include <linux/scatterlist.h>
+#include <linux/slab.h>
 #include <soc/qcom/scm.h>
 #include <soc/qcom/secure_buffer.h>
-#include <linux/ratelimit.h>
 
-#include "kgsl.h"
-#include "kgsl_sharedmem.h"
 #include "kgsl_device.h"
-#include "kgsl_mmu.h"
+#include "kgsl_sharedmem.h"
 
 /*
  * The user can set this from debugfs to force failed memory allocations to
@@ -45,6 +38,43 @@ struct cp2_lock_req {
 #define MEM_PROTECT_LOCK_ID2		0x0A
 #define MEM_PROTECT_LOCK_ID2_FLAT	0x11
 
+int kgsl_allocate_global(struct kgsl_device *device,
+	struct kgsl_memdesc *memdesc, uint64_t size, uint64_t flags,
+	unsigned int priv, const char *name)
+{
+	int ret;
+
+	kgsl_memdesc_init(device, memdesc, flags);
+	memdesc->priv |= priv;
+
+	if (((memdesc->priv & KGSL_MEMDESC_CONTIG) != 0) ||
+		(kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE))
+		ret = kgsl_sharedmem_alloc_contig(device, memdesc,
+						(size_t) size);
+	else {
+		ret = kgsl_sharedmem_page_alloc_user(memdesc, (size_t) size);
+		if (ret == 0) {
+			if (kgsl_memdesc_map(memdesc) == NULL) {
+				kgsl_sharedmem_free(memdesc);
+				ret = -ENOMEM;
+			}
+		}
+	}
+
+	if (ret == 0)
+		kgsl_mmu_add_global(device, memdesc, name);
+
+	return ret;
+}
+
+void kgsl_free_global(struct kgsl_device *device,
+		struct kgsl_memdesc *memdesc)
+{
+	kgsl_mmu_remove_global(device, memdesc);
+	kgsl_sharedmem_free(memdesc);
+}
+
+
 /* An attribute for showing per-process memory statistics */
 struct kgsl_mem_entry_attribute {
 	struct attribute attr;
@@ -620,6 +650,9 @@ static void kgsl_cma_coherent_free(struct kgsl_memdesc *memdesc)
 			atomic_long_sub(memdesc->size,
 				&kgsl_driver.stats.coherent);
 
+		mod_node_page_state(page_pgdat(phys_to_page(memdesc->physaddr)),
+			NR_UNRECLAIMABLE_PAGES, -(memdesc->size >> PAGE_SHIFT));
+
 		dma_free_attrs(memdesc->dev, (size_t) memdesc->size,
 			memdesc->hostptr, memdesc->physaddr, attrs);
 	}
@@ -887,7 +920,8 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
 	 * routine by finding the faulted page in constant time.
 	 */
 
-	memdesc->pages = kgsl_malloc(len_alloc * sizeof(struct page *));
+	memdesc->pages = kvcalloc(len_alloc, sizeof(*memdesc->pages),
+		GFP_KERNEL);
 	memdesc->page_count = 0;
 	memdesc->size = 0;
 
@@ -956,7 +990,7 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
 			memdesc->sgt = NULL;
 
 			if (ret == -EADDRNOTAVAIL) {
-				kgsl_free(memdesc->pages);
+				kvfree(memdesc->pages);
 				memset(memdesc, 0, sizeof(*memdesc));
 				return ret;
 			}
@@ -974,7 +1008,7 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
 		 * We don't need the array for secure buffers because they are
 		 * not mapped to CPU
 		 */
-		kgsl_free(memdesc->pages);
+		kvfree(memdesc->pages);
 		memdesc->pages = NULL;
 		memdesc->page_count = 0;
 
@@ -996,7 +1030,7 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
 			}
 		}
 
-		kgsl_free(memdesc->pages);
+		kvfree(memdesc->pages);
 		memset(memdesc, 0, sizeof(*memdesc));
 	}
 
@@ -1016,11 +1050,10 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
 
 	if (memdesc->sgt) {
 		sg_free_table(memdesc->sgt);
-		kfree(memdesc->sgt);
+		kvfree(memdesc->sgt);
 	}
 
-	if (memdesc->pages)
-		kgsl_free(memdesc->pages);
+	kvfree(memdesc->pages);
 }
 EXPORT_SYMBOL(kgsl_sharedmem_free);
 
@@ -1248,6 +1281,27 @@ void kgsl_get_memory_usage(char *name, size_t name_size, uint64_t memflags)
 }
 EXPORT_SYMBOL(kgsl_get_memory_usage);
 
+int kgsl_memdesc_sg_dma(struct kgsl_memdesc *memdesc,
+		phys_addr_t addr, u64 size)
+{
+	int ret;
+	struct page *page = phys_to_page(addr);
+
+	memdesc->sgt = kmalloc(sizeof(*memdesc->sgt), GFP_KERNEL);
+	if (memdesc->sgt == NULL)
+		return -ENOMEM;
+
+	ret = sg_alloc_table(memdesc->sgt, 1, GFP_KERNEL);
+	if (ret) {
+		kfree(memdesc->sgt);
+		memdesc->sgt = NULL;
+		return ret;
+	}
+
+	sg_set_page(memdesc->sgt->sgl, page, (size_t) size, 0);
+	return 0;
+}
+
 int kgsl_sharedmem_alloc_contig(struct kgsl_device *device,
 			struct kgsl_memdesc *memdesc, uint64_t size)
 {
@@ -1269,7 +1323,7 @@ int kgsl_sharedmem_alloc_contig(struct kgsl_device *device,
 		goto err;
 	}
 
-	result = memdesc_sg_dma(memdesc, memdesc->physaddr, size);
+	result = kgsl_memdesc_sg_dma(memdesc, memdesc->physaddr, size);
 	if (result)
 		goto err;
 
@@ -1281,6 +1335,8 @@ int kgsl_sharedmem_alloc_contig(struct kgsl_device *device,
 	KGSL_STATS_ADD(size, &kgsl_driver.stats.coherent,
 		&kgsl_driver.stats.coherent_max);
 
+	mod_node_page_state(page_pgdat(phys_to_page(memdesc->physaddr)),
+			NR_UNRECLAIMABLE_PAGES, (size >> PAGE_SHIFT));
 err:
 	if (result)
 		kgsl_sharedmem_free(memdesc);
@@ -1379,7 +1435,7 @@ static int kgsl_cma_alloc_secure(struct kgsl_device *device,
 		goto err;
 	}
 
-	result = memdesc_sg_dma(memdesc, memdesc->physaddr, aligned);
+	result = kgsl_memdesc_sg_dma(memdesc, memdesc->physaddr, aligned);
 	if (result)
 		goto err;
 
@@ -1396,6 +1452,9 @@ static int kgsl_cma_alloc_secure(struct kgsl_device *device,
 	/* Record statistics */
 	KGSL_STATS_ADD(aligned, &kgsl_driver.stats.secure,
 	       &kgsl_driver.stats.secure_max);
+
+	mod_node_page_state(page_pgdat(phys_to_page(memdesc->physaddr)),
+			NR_UNRECLAIMABLE_PAGES, (aligned >> PAGE_SHIFT));
 err:
 	if (result)
 		kgsl_sharedmem_free(memdesc);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index c17176a..d7bc8be 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -6,7 +6,10 @@
 #define __KGSL_SHAREDMEM_H
 
 #include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
 
+#include "kgsl.h"
 #include "kgsl_mmu.h"
 
 struct kgsl_device;
@@ -152,34 +155,17 @@ kgsl_memdesc_usermem_type(const struct kgsl_memdesc *memdesc)
 }
 
 /**
- * memdesg_sg_dma() - Turn a dma_addr (from CMA) into a sg table
- * @memdesc: Pointer to the memdesc structure
+ * kgsl_memdesc_sg_dma - Turn a dma_addr (from CMA) into a sg table
+ * @memdesc: Pointer to a memory descriptor
  * @addr: Physical address from the dma_alloc function
  * @size: Size of the chunk
  *
- * Create a sg table for the contigious chunk specified by addr and size.
+ * Create a sg table for the contiguous chunk specified by addr and size.
+ *
+ * Return: 0 on success or negative on failure.
  */
-static inline int
-memdesc_sg_dma(struct kgsl_memdesc *memdesc,
-		phys_addr_t addr, uint64_t size)
-{
-	int ret;
-	struct page *page = phys_to_page(addr);
-
-	memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-	if (memdesc->sgt == NULL)
-		return -ENOMEM;
-
-	ret = sg_alloc_table(memdesc->sgt, 1, GFP_KERNEL);
-	if (ret) {
-		kfree(memdesc->sgt);
-		memdesc->sgt = NULL;
-		return ret;
-	}
-
-	sg_set_page(memdesc->sgt->sgl, page, (size_t) size, 0);
-	return 0;
-}
+int kgsl_memdesc_sg_dma(struct kgsl_memdesc *memdesc,
+		phys_addr_t addr, u64 size);
 
 /*
  * kgsl_memdesc_is_global - is this a globally mapped buffer?
@@ -276,34 +262,9 @@ kgsl_memdesc_footprint(const struct kgsl_memdesc *memdesc)
  * all pagetables.  This is for use for device wide GPU allocations such as
  * ringbuffers.
  */
-static inline int kgsl_allocate_global(struct kgsl_device *device,
+int kgsl_allocate_global(struct kgsl_device *device,
 	struct kgsl_memdesc *memdesc, uint64_t size, uint64_t flags,
-	unsigned int priv, const char *name)
-{
-	int ret;
-
-	kgsl_memdesc_init(device, memdesc, flags);
-	memdesc->priv |= priv;
-
-	if (((memdesc->priv & KGSL_MEMDESC_CONTIG) != 0) ||
-		(kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE))
-		ret = kgsl_sharedmem_alloc_contig(device, memdesc,
-						(size_t) size);
-	else {
-		ret = kgsl_sharedmem_page_alloc_user(memdesc, (size_t) size);
-		if (ret == 0) {
-			if (kgsl_memdesc_map(memdesc) == NULL) {
-				kgsl_sharedmem_free(memdesc);
-				ret = -ENOMEM;
-			}
-		}
-	}
-
-	if (ret == 0)
-		kgsl_mmu_add_global(device, memdesc, name);
-
-	return ret;
-}
+	unsigned int priv, const char *name);
 
 /**
  * kgsl_free_global() - Free a device wide GPU allocation and remove it from the
@@ -315,12 +276,7 @@ static inline int kgsl_allocate_global(struct kgsl_device *device,
  * Remove the specific memory descriptor from the global pagetable entry list
  * and free it
  */
-static inline void kgsl_free_global(struct kgsl_device *device,
-		struct kgsl_memdesc *memdesc)
-{
-	kgsl_mmu_remove_global(device, memdesc);
-	kgsl_sharedmem_free(memdesc);
-}
+void kgsl_free_global(struct kgsl_device *device, struct kgsl_memdesc *memdesc);
 
 void kgsl_sharedmem_set_noretry(bool val);
 bool kgsl_sharedmem_get_noretry(void);
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index e309423..b3a56a8 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -3,18 +3,14 @@
  * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/export.h>
-#include <linux/time.h>
-#include <linux/sysfs.h>
+#include <linux/of.h>
+#include <linux/slab.h>
 #include <linux/utsname.h>
-#include <linux/sched.h>
-#include <linux/idr.h>
 
-#include "kgsl.h"
+#include "adreno_cp_parser.h"
 #include "kgsl_device.h"
 #include "kgsl_sharedmem.h"
 #include "kgsl_snapshot.h"
-#include "adreno_cp_parser.h"
 
 static void kgsl_snapshot_save_frozen_objs(struct work_struct *work);
 
@@ -598,6 +594,47 @@ static void kgsl_free_snapshot(struct kgsl_snapshot *snapshot)
 	dev_err(device->dev, "snapshot: objects released\n");
 }
 
+#define SP0_ISDB_ISDB_BRKPT_CFG 0x40014
+#define SP0_ISDB_ISDB_EN 0x40004
+#define SP0_ISDB_ISDB_CMD 0x4000C
+
+static void isdb_write(void __iomem *base, u32 offset)
+{
+	/* To set the SCHBREAKTYPE bit */
+	__raw_writel(0x800, base + SP0_ISDB_ISDB_BRKPT_CFG + offset);
+
+	/*
+	 * ensure the configurations are set before
+	 * enabling ISDB
+	 */
+	wmb();
+	/* To set the ISDBCLKON and ISDB_EN bits*/
+	__raw_writel(0x03, base + SP0_ISDB_ISDB_EN + offset);
+
+	/*
+	 * ensure previous write to enable isdb posts
+	 * before issuing the break command
+	 */
+	wmb();
+	/*To issue ISDB_0_ISDB_CMD_BREAK*/
+	__raw_writel(0x1, base + SP0_ISDB_ISDB_CMD + offset);
+}
+
+static void set_isdb_breakpoint_registers(struct kgsl_device *device)
+{
+	if (!device->set_isdb_breakpoint || device->ftbl->is_hwcg_on(device)
+					|| device->qdss_gfx_virt == NULL)
+		return;
+
+	/* Issue break command for all six SPs */
+	isdb_write(device->qdss_gfx_virt, 0x0000);
+	isdb_write(device->qdss_gfx_virt, 0x1000);
+	isdb_write(device->qdss_gfx_virt, 0x2000);
+	isdb_write(device->qdss_gfx_virt, 0x3000);
+	isdb_write(device->qdss_gfx_virt, 0x4000);
+	isdb_write(device->qdss_gfx_virt, 0x5000);
+}
+
 /**
  * kgsl_snapshot() - construct a device snapshot
  * @device: device to snapshot
@@ -615,6 +652,8 @@ void kgsl_device_snapshot(struct kgsl_device *device,
 	struct timespec boot;
 	phys_addr_t pa;
 
+	set_isdb_breakpoint_registers(device);
+
 	if (device->snapshot_memory.ptr == NULL) {
 		dev_err(device->dev,
 			     "snapshot: no snapshot memory available\n");
diff --git a/drivers/gpu/msm/kgsl_snapshot.h b/drivers/gpu/msm/kgsl_snapshot.h
index 477f3aa..1fa5122 100644
--- a/drivers/gpu/msm/kgsl_snapshot.h
+++ b/drivers/gpu/msm/kgsl_snapshot.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
@@ -279,6 +279,9 @@ struct kgsl_snapshot_gpu_object_v2 {
 	__u64 size;    /* Size of the object (in dwords) */
 } __packed;
 
+struct kgsl_device;
+struct kgsl_process_private;
+
 void kgsl_snapshot_push_object(struct kgsl_device *device,
 		struct kgsl_process_private *process,
 		uint64_t gpuaddr, uint64_t dwords);
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 5c1f71e..38b5c88 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -1,16 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/err.h>
 #include <linux/file.h>
-#include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/uaccess.h>
+#include <linux/sync_file.h>
 
-#include <asm/current.h>
-
+#include "kgsl_device.h"
 #include "kgsl_sync.h"
 
 static void kgsl_sync_timeline_signal(struct kgsl_sync_timeline *timeline,
diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h
index 8a234ef..989658d 100644
--- a/drivers/gpu/msm/kgsl_sync.h
+++ b/drivers/gpu/msm/kgsl_sync.h
@@ -1,12 +1,11 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2012-2014, 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014,2018-2019 The Linux Foundation. All rights reserved.
  */
 #ifndef __KGSL_SYNC_H
 #define __KGSL_SYNC_H
 
-#include <linux/sync_file.h>
-#include "kgsl_device.h"
+#include <linux/dma-fence.h>
 
 /**
  * struct kgsl_sync_timeline - A sync timeline associated with a kgsl context
@@ -68,6 +67,10 @@ struct kgsl_sync_fence_cb {
 	bool (*func)(void *priv);
 };
 
+struct kgsl_device_private;
+struct kgsl_drawobj_sync_event;
+struct event_fence_info;
+struct kgsl_process_private;
 struct kgsl_syncsource;
 
 #if defined(CONFIG_SYNC_FILE)
@@ -101,9 +104,6 @@ void kgsl_syncsource_put(struct kgsl_syncsource *syncsource);
 void kgsl_syncsource_process_release_syncsources(
 		struct kgsl_process_private *private);
 
-void kgsl_dump_fence(struct kgsl_drawobj_sync_event *event,
-					char *fence_str, int len);
-
 #else
 static inline int kgsl_add_fence_event(struct kgsl_device *device,
 	u32 context_id, u32 timestamp, void __user *data, int len,
@@ -178,11 +178,6 @@ static inline void kgsl_syncsource_process_release_syncsources(
 
 }
 
-static inline void kgsl_dump_fence(struct kgsl_drawobj_sync_event *event,
-					char *fence_str, int len)
-{
-}
-
 #endif /* CONFIG_SYNC_FILE */
 
 #endif /* __KGSL_SYNC_H */
diff --git a/drivers/gpu/msm/kgsl_trace.c b/drivers/gpu/msm/kgsl_trace.c
index d22b7e3..30a7bcb 100644
--- a/drivers/gpu/msm/kgsl_trace.c
+++ b/drivers/gpu/msm/kgsl_trace.c
@@ -1,11 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2011, 2013, 2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011,2013,2015,2019 The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
 
-#include "kgsl.h"
 #include "kgsl_device.h"
 
 /* Instantiate tracepoints */
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 966f791..3106e88 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  */
@@ -14,10 +14,11 @@
 #define TRACE_INCLUDE_FILE kgsl_trace
 
 #include <linux/tracepoint.h>
-#include "kgsl_device.h"
-#include "adreno_drawctxt.h"
 
-struct kgsl_device;
+#include "kgsl.h"
+#include "kgsl_drawobj.h"
+#include "kgsl_sharedmem.h"
+
 struct kgsl_ringbuffer_issueibcmds;
 struct kgsl_device_waittimestamp;
 
@@ -1215,7 +1216,7 @@ TRACE_EVENT(kgsl_clock_throttling,
 		__entry->crc_less50pct = crc_less50pct;
 		__entry->adj = adj;
 	),
-	TP_printk("idle_10=%d crc_50=%d crc_more50=%d crc_less50=%d adj=%llx",
+	TP_printk("idle_10=%d crc_50=%d crc_more50=%d crc_less50=%d adj=%lld",
 		__entry->idle_10pct, __entry->crc_50pct, __entry->crc_more50pct,
 		__entry->crc_less50pct, __entry->adj
 	)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 44564f6..8613755 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -215,13 +215,14 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
  * Add a usage to the temporary parser table.
  */
 
-static int hid_add_usage(struct hid_parser *parser, unsigned usage)
+static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
 {
 	if (parser->local.usage_index >= HID_MAX_USAGES) {
 		hid_err(parser->device, "usage index exceeded\n");
 		return -1;
 	}
 	parser->local.usage[parser->local.usage_index] = usage;
+	parser->local.usage_size[parser->local.usage_index] = size;
 	parser->local.collection_index[parser->local.usage_index] =
 		parser->collection_stack_ptr ?
 		parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
@@ -482,10 +483,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
 			return 0;
 		}
 
-		if (item->size <= 2)
-			data = (parser->global.usage_page << 16) + data;
-
-		return hid_add_usage(parser, data);
+		return hid_add_usage(parser, data, item->size);
 
 	case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
 
@@ -494,9 +492,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
 			return 0;
 		}
 
-		if (item->size <= 2)
-			data = (parser->global.usage_page << 16) + data;
-
 		parser->local.usage_minimum = data;
 		return 0;
 
@@ -507,9 +502,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
 			return 0;
 		}
 
-		if (item->size <= 2)
-			data = (parser->global.usage_page << 16) + data;
-
 		count = data - parser->local.usage_minimum;
 		if (count + parser->local.usage_index >= HID_MAX_USAGES) {
 			/*
@@ -529,7 +521,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
 		}
 
 		for (n = parser->local.usage_minimum; n <= data; n++)
-			if (hid_add_usage(parser, n)) {
+			if (hid_add_usage(parser, n, item->size)) {
 				dbg_hid("hid_add_usage failed\n");
 				return -1;
 			}
@@ -544,6 +536,22 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
 }
 
 /*
+ * Concatenate Usage Pages into Usages where relevant:
+ * As per specification, 6.2.2.8: "When the parser encounters a main item it
+ * concatenates the last declared Usage Page with a Usage to form a complete
+ * usage value."
+ */
+
+static void hid_concatenate_usage_page(struct hid_parser *parser)
+{
+	int i;
+
+	for (i = 0; i < parser->local.usage_index; i++)
+		if (parser->local.usage_size[i] <= 2)
+			parser->local.usage[i] += parser->global.usage_page << 16;
+}
+
+/*
  * Process a main item.
  */
 
@@ -552,6 +560,8 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
 	__u32 data;
 	int ret;
 
+	hid_concatenate_usage_page(parser);
+
 	data = item_udata(item);
 
 	switch (item->tag) {
@@ -761,6 +771,8 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
 	__u32 data;
 	int i;
 
+	hid_concatenate_usage_page(parser);
+
 	data = item_udata(item);
 
 	switch (item->tag) {
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index ebc9ffd..a353a01 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
 	seq_printf(f, "\n\n");
 
 	/* dump parsed data and input mappings */
+	if (down_interruptible(&hdev->driver_input_lock))
+		return 0;
+
 	hid_dump_device(hdev, f);
 	seq_printf(f, "\n");
 	hid_dump_input_mapping(hdev, f);
 
+	up(&hdev->driver_input_lock);
+
 	return 0;
 }
 
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index a3916e58..d988b92 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -677,6 +677,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
 			break;
 		}
 
+		if ((usage->hid & 0xf0) == 0xb0) {	/* SC - Display */
+			switch (usage->hid & 0xf) {
+			case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break;
+			default: goto ignore;
+			}
+			break;
+		}
+
 		/*
 		 * Some lazy vendors declare 255 usages for System Control,
 		 * leading to the creation of ABS_X|Y axis and too many others.
@@ -895,6 +903,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
 		case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX);		break;
 		case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO);		break;
 
+		case 0x079: map_key_clear(KEY_KBDILLUMUP);	break;
+		case 0x07a: map_key_clear(KEY_KBDILLUMDOWN);	break;
+		case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE);	break;
+
 		case 0x082: map_key_clear(KEY_VIDEO_NEXT);	break;
 		case 0x083: map_key_clear(KEY_LAST);		break;
 		case 0x084: map_key_clear(KEY_ENTER);		break;
@@ -982,6 +994,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
 		case 0x1b8: map_key_clear(KEY_VIDEO);		break;
 		case 0x1bc: map_key_clear(KEY_MESSENGER);	break;
 		case 0x1bd: map_key_clear(KEY_INFO);		break;
+		case 0x1cb: map_key_clear(KEY_ASSISTANT);	break;
 		case 0x201: map_key_clear(KEY_NEW);		break;
 		case 0x202: map_key_clear(KEY_OPEN);		break;
 		case 0x203: map_key_clear(KEY_CLOSE);		break;
@@ -1025,6 +1038,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
 		case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT);	break;
 		case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL);	break;
 
+		case 0x29f: map_key_clear(KEY_SCALE);		break;
+
 		default: map_key_clear(KEY_UNKNOWN);
 		}
 		break;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 19cc980..e642cfa 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -725,13 +725,16 @@ static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
 
 static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
 {
+	const u8 ping_byte = 0x5a;
+	u8 ping_data[3] = { 0, 0, ping_byte };
 	struct hidpp_report response;
 	int ret;
 
-	ret = hidpp_send_fap_command_sync(hidpp,
+	ret = hidpp_send_rap_command_sync(hidpp,
+			REPORT_ID_HIDPP_SHORT,
 			HIDPP_PAGE_ROOT_IDX,
 			CMD_ROOT_GET_PROTOCOL_VERSION,
-			NULL, 0, &response);
+			ping_data, sizeof(ping_data), &response);
 
 	if (ret == HIDPP_ERROR_INVALID_SUBID) {
 		hidpp->protocol_major = 1;
@@ -751,8 +754,14 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
 	if (ret)
 		return ret;
 
-	hidpp->protocol_major = response.fap.params[0];
-	hidpp->protocol_minor = response.fap.params[1];
+	if (response.rap.params[2] != ping_byte) {
+		hid_err(hidpp->hid_dev, "%s: ping mismatch 0x%02x != 0x%02x\n",
+			__func__, response.rap.params[2], ping_byte);
+		return -EPROTO;
+	}
+
+	hidpp->protocol_major = response.rap.params[0];
+	hidpp->protocol_minor = response.rap.params[1];
 
 	return ret;
 }
@@ -901,7 +910,11 @@ static int hidpp_map_battery_level(int capacity)
 {
 	if (capacity < 11)
 		return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
-	else if (capacity < 31)
+	/*
+	 * The spec says this should be < 31 but some devices report 30
+	 * with brand new batteries and Windows reports 30 as "Good".
+	 */
+	else if (capacity < 30)
 		return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
 	else if (capacity < 81)
 		return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
@@ -1907,6 +1920,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
 		kfree(data);
 		return -ENOMEM;
 	}
+	data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+	if (!data->wq) {
+		kfree(data->effect_ids);
+		kfree(data);
+		return -ENOMEM;
+	}
+
 	data->hidpp = hidpp;
 	data->feature_index = feature_index;
 	data->version = version;
@@ -1951,7 +1971,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
 	/* ignore boost value at response.fap.params[2] */
 
 	/* init the hardware command queue */
-	data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
 	atomic_set(&data->workqueue_size, 0);
 
 	/* initialize with zero autocenter to get wheel in usable state */
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 72ee77b..a9c103c 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -752,7 +752,6 @@ static const struct hid_device_id hid_ignore_list[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
-	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -1033,6 +1032,10 @@ bool hid_ignore(struct hid_device *hdev)
 		if (hdev->product == 0x0401 &&
 		    strncmp(hdev->name, "ELAN0800", 8) != 0)
 			return true;
+		/* Same with product id 0x0400 */
+		if (hdev->product == 0x0400 &&
+		    strncmp(hdev->name, "QTEC0001", 8) != 0)
+			return true;
 		break;
 	}
 
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index 1d645c9..cac262a 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -337,7 +337,8 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
 		},
 		.driver_data = (void *)&sipodev_desc
-	}
+	},
+	{ }	/* Terminate list */
 };
 
 
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 748a1c41..8e923e7 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -402,7 +402,6 @@ int hv_synic_cleanup(unsigned int cpu)
 
 		clockevents_unbind_device(hv_cpu->clk_evt, cpu);
 		hv_ce_shutdown(hv_cpu->clk_evt);
-		put_cpu_ptr(hv_cpu);
 	}
 
 	hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 73c6811..623736d 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -96,17 +96,23 @@ superio_select(int base, int ld)
 	outb(ld, base + 1);
 }
 
-static inline void
+static inline int
 superio_enter(int base)
 {
+	if (!request_muxed_region(base, 2, DRVNAME))
+		return -EBUSY;
+
 	outb(0x87, base);
 	outb(0x87, base);
+
+	return 0;
 }
 
 static inline void
 superio_exit(int base)
 {
 	outb(0xaa, base);
+	release_region(base, 2);
 }
 
 /*
@@ -1561,7 +1567,7 @@ static int __init f71805f_device_add(unsigned short address,
 static int __init f71805f_find(int sioaddr, unsigned short *address,
 			       struct f71805f_sio_data *sio_data)
 {
-	int err = -ENODEV;
+	int err;
 	u16 devid;
 
 	static const char * const names[] = {
@@ -1569,8 +1575,11 @@ static int __init f71805f_find(int sioaddr, unsigned short *address,
 		"F71872F/FG or F71806F/FG",
 	};
 
-	superio_enter(sioaddr);
+	err = superio_enter(sioaddr);
+	if (err)
+		return err;
 
+	err = -ENODEV;
 	devid = superio_inw(sioaddr, SIO_REG_MANID);
 	if (devid != SIO_FINTEK_ID)
 		goto exit;
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index dc5a9d5..81a05cd 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -106,6 +106,13 @@ static const char *logdev_str[2] = { DRVNAME " FMC", DRVNAME " HMC" };
 #define LD_IN		1
 #define LD_TEMP		1
 
+static inline int superio_enter(int sioaddr)
+{
+	if (!request_muxed_region(sioaddr, 2, DRVNAME))
+		return -EBUSY;
+	return 0;
+}
+
 static inline void superio_outb(int sioaddr, int reg, int val)
 {
 	outb(reg, sioaddr);
@@ -122,6 +129,7 @@ static inline void superio_exit(int sioaddr)
 {
 	outb(0x02, sioaddr);
 	outb(0x02, sioaddr + 1);
+	release_region(sioaddr, 2);
 }
 
 /*
@@ -1220,7 +1228,11 @@ static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data)
 {
 	u16 val;
 	u8 cfg, cfg_b;
-	int i, err = 0;
+	int i, err;
+
+	err = superio_enter(sioaddr);
+	if (err)
+		return err;
 
 	/* Identify device */
 	val = force_id ? force_id : superio_inb(sioaddr, SIOREG_DEVID);
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 9d611dd..7f01fad 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -250,7 +250,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
 
 	ret = pwm_fan_of_get_cooling_data(&pdev->dev, ctx);
 	if (ret)
-		return ret;
+		goto err_pwm_disable;
 
 	ctx->pwm_fan_state = ctx->pwm_fan_max_state;
 	if (IS_ENABLED(CONFIG_THERMAL)) {
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index 6bd2007..cbdb5c4 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -72,14 +72,19 @@ static inline void superio_select(int ld)
 	superio_outb(0x07, ld);
 }
 
-static inline void superio_enter(void)
+static inline int superio_enter(void)
 {
+	if (!request_muxed_region(REG, 2, DRVNAME))
+		return -EBUSY;
+
 	outb(0x55, REG);
+	return 0;
 }
 
 static inline void superio_exit(void)
 {
 	outb(0xAA, REG);
+	release_region(REG, 2);
 }
 
 #define SUPERIO_REG_DEVID	0x20
@@ -300,8 +305,12 @@ static int __init smsc47b397_find(void)
 	u8 id, rev;
 	char *name;
 	unsigned short addr;
+	int err;
 
-	superio_enter();
+	err = superio_enter();
+	if (err)
+		return err;
+
 	id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
 
 	switch (id) {
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index c7b6a42..5eeac98 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -73,16 +73,21 @@ superio_inb(int reg)
 /* logical device for fans is 0x0A */
 #define superio_select() superio_outb(0x07, 0x0A)
 
-static inline void
+static inline int
 superio_enter(void)
 {
+	if (!request_muxed_region(REG, 2, DRVNAME))
+		return -EBUSY;
+
 	outb(0x55, REG);
+	return 0;
 }
 
 static inline void
 superio_exit(void)
 {
 	outb(0xAA, REG);
+	release_region(REG, 2);
 }
 
 #define SUPERIO_REG_ACT		0x30
@@ -531,8 +536,12 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
 {
 	u8 val;
 	unsigned short addr;
+	int err;
 
-	superio_enter();
+	err = superio_enter();
+	if (err)
+		return err;
+
 	val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
 
 	/*
@@ -608,13 +617,14 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
 static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
 {
 	if ((sio_data->activate & 0x01) == 0) {
-		superio_enter();
-		superio_select();
-
-		pr_info("Disabling device\n");
-		superio_outb(SUPERIO_REG_ACT, sio_data->activate);
-
-		superio_exit();
+		if (!superio_enter()) {
+			superio_select();
+			pr_info("Disabling device\n");
+			superio_outb(SUPERIO_REG_ACT, sio_data->activate);
+			superio_exit();
+		} else {
+			pr_warn("Failed to disable device\n");
+		}
 	}
 }
 
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 3a6bfa5..95d5e8e 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -226,15 +226,21 @@ static inline void superio_select(int sio_cip, int ldn)
 	outb(ldn, sio_cip + 1);
 }
 
-static inline void superio_enter(int sio_cip)
+static inline int superio_enter(int sio_cip)
 {
+	if (!request_muxed_region(sio_cip, 2, DRVNAME))
+		return -EBUSY;
+
 	outb(0x87, sio_cip);
 	outb(0x87, sio_cip);
+
+	return 0;
 }
 
 static inline void superio_exit(int sio_cip)
 {
 	outb(0xaa, sio_cip);
+	release_region(sio_cip, 2);
 }
 
 /* ---------------------------------------------------------------------
@@ -1282,11 +1288,14 @@ static int __init vt1211_device_add(unsigned short address)
 
 static int __init vt1211_find(int sio_cip, unsigned short *address)
 {
-	int err = -ENODEV;
+	int err;
 	int devid;
 
-	superio_enter(sio_cip);
+	err = superio_enter(sio_cip);
+	if (err)
+		return err;
 
+	err = -ENODEV;
 	devid = force_id ? force_id : superio_inb(sio_cip, SIO_VT1211_DEVID);
 	if (devid != SIO_VT1211_ID)
 		goto EXIT;
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c
index 38e58a0..637299b 100644
--- a/drivers/hwtracing/coresight/coresight-byte-cntr.c
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  *
  * Description: CoreSight Trace Memory Controller driver
  */
@@ -10,11 +10,15 @@
 #include <linux/moduleparam.h>
 #include <linux/delay.h>
 #include <linux/uaccess.h>
+#include <linux/usb/usb_qdss.h>
 
 #include "coresight-byte-cntr.h"
 #include "coresight-priv.h"
 #include "coresight-tmc.h"
 
+#define USB_BLK_SIZE 65536
+#define USB_BUF_NUM 255
+
 static struct tmc_drvdata *tmcdrvdata;
 
 static void tmc_etr_read_bytes(struct byte_cntr *byte_cntr_data, loff_t *ppos,
@@ -39,10 +43,14 @@ static irqreturn_t etr_handler(int irq, void *data)
 {
 	struct byte_cntr *byte_cntr_data = data;
 
-	atomic_inc(&byte_cntr_data->irq_cnt);
-
-	wake_up(&byte_cntr_data->wq);
-
+	if (tmcdrvdata->out_mode == TMC_ETR_OUT_MODE_USB
+	    && byte_cntr_data->sw_usb) {
+		atomic_inc(&byte_cntr_data->irq_cnt);
+		wake_up(&byte_cntr_data->usb_wait_wq);
+	} else if (tmcdrvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
+		atomic_inc(&byte_cntr_data->irq_cnt);
+		wake_up(&byte_cntr_data->wq);
+	}
 	return IRQ_HANDLED;
 }
 
@@ -64,23 +72,29 @@ static ssize_t tmc_etr_byte_cntr_read(struct file *fp, char __user *data,
 {
 	struct byte_cntr *byte_cntr_data = fp->private_data;
 	char *bufp;
-
+	int ret = 0;
 	if (!data)
 		return -EINVAL;
 
 	mutex_lock(&byte_cntr_data->byte_cntr_lock);
-	if (!byte_cntr_data->read_active)
+	if (!byte_cntr_data->read_active) {
+		ret = -EINVAL;
 		goto err0;
+	}
 
 	if (byte_cntr_data->enable) {
 		if (!atomic_read(&byte_cntr_data->irq_cnt)) {
 			mutex_unlock(&byte_cntr_data->byte_cntr_lock);
 			if (wait_event_interruptible(byte_cntr_data->wq,
-				atomic_read(&byte_cntr_data->irq_cnt) > 0))
+				atomic_read(&byte_cntr_data->irq_cnt) > 0
+				|| !byte_cntr_data->enable))
 				return -ERESTARTSYS;
 			mutex_lock(&byte_cntr_data->byte_cntr_lock);
-			if (!byte_cntr_data->read_active)
+			if (!byte_cntr_data->read_active) {
+				ret = -EINVAL;
 				goto err0;
+			}
+
 		}
 
 		tmc_etr_read_bytes(byte_cntr_data, ppos,
@@ -90,8 +104,10 @@ static ssize_t tmc_etr_byte_cntr_read(struct file *fp, char __user *data,
 		if (!atomic_read(&byte_cntr_data->irq_cnt)) {
 			tmc_etr_flush_bytes(ppos, byte_cntr_data->block_size,
 						  &len);
-			if (!len)
+			if (!len) {
+				ret = -EINVAL;
 				goto err0;
+			}
 		} else {
 			tmc_etr_read_bytes(byte_cntr_data, ppos,
 						   byte_cntr_data->block_size,
@@ -109,9 +125,14 @@ static ssize_t tmc_etr_byte_cntr_read(struct file *fp, char __user *data,
 		*ppos = 0;
 	else
 		*ppos += len;
+
+	goto out;
+
 err0:
 	mutex_unlock(&byte_cntr_data->byte_cntr_lock);
-
+	return ret;
+out:
+	mutex_unlock(&byte_cntr_data->byte_cntr_lock);
 	return len;
 }
 
@@ -122,7 +143,8 @@ void tmc_etr_byte_cntr_start(struct byte_cntr *byte_cntr_data)
 
 	mutex_lock(&byte_cntr_data->byte_cntr_lock);
 
-	if (byte_cntr_data->block_size == 0) {
+	if (byte_cntr_data->block_size == 0
+		|| byte_cntr_data->read_active) {
 		mutex_unlock(&byte_cntr_data->byte_cntr_lock);
 		return;
 	}
@@ -140,6 +162,8 @@ void tmc_etr_byte_cntr_stop(struct byte_cntr *byte_cntr_data)
 
 	mutex_lock(&byte_cntr_data->byte_cntr_lock);
 	byte_cntr_data->enable = false;
+	byte_cntr_data->read_active = false;
+	wake_up(&byte_cntr_data->wq);
 	coresight_csr_set_byte_cntr(byte_cntr_data->csr, 0);
 	mutex_unlock(&byte_cntr_data->byte_cntr_lock);
 
@@ -160,6 +184,45 @@ static int tmc_etr_byte_cntr_release(struct inode *in, struct file *fp)
 	return 0;
 }
 
+int usb_bypass_start(struct byte_cntr *byte_cntr_data)
+{
+	if (!byte_cntr_data)
+		return -ENOMEM;
+
+	mutex_lock(&byte_cntr_data->usb_bypass_lock);
+
+	if (!tmcdrvdata->enable) {
+		mutex_unlock(&byte_cntr_data->usb_bypass_lock);
+		return -EINVAL;
+	}
+
+	atomic_set(&byte_cntr_data->usb_free_buf, USB_BUF_NUM);
+	byte_cntr_data->offset = tmcdrvdata->etr_buf->offset;
+	/*
+	 * IRQ is a '8- byte' counter and to observe interrupt at
+	 * 'block_size' bytes of data
+	 */
+	coresight_csr_set_byte_cntr(byte_cntr_data->csr, USB_BLK_SIZE / 8);
+
+	atomic_set(&byte_cntr_data->irq_cnt, 0);
+	mutex_unlock(&byte_cntr_data->usb_bypass_lock);
+
+	return 0;
+}
+
+void usb_bypass_stop(struct byte_cntr *byte_cntr_data)
+{
+	if (!byte_cntr_data)
+		return;
+
+	mutex_lock(&byte_cntr_data->usb_bypass_lock);
+	wake_up(&byte_cntr_data->usb_wait_wq);
+	coresight_csr_set_byte_cntr(byte_cntr_data->csr, 0);
+	mutex_unlock(&byte_cntr_data->usb_bypass_lock);
+
+}
+EXPORT_SYMBOL(usb_bypass_stop);
+
 static int tmc_etr_byte_cntr_open(struct inode *in, struct file *fp)
 {
 	struct byte_cntr *byte_cntr_data =
@@ -167,7 +230,7 @@ static int tmc_etr_byte_cntr_open(struct inode *in, struct file *fp)
 
 	mutex_lock(&byte_cntr_data->byte_cntr_lock);
 
-	if (!tmcdrvdata->enable || !byte_cntr_data->block_size) {
+	if (!byte_cntr_data->enable || !byte_cntr_data->block_size) {
 		mutex_unlock(&byte_cntr_data->byte_cntr_lock);
 		return -EINVAL;
 	}
@@ -180,10 +243,8 @@ static int tmc_etr_byte_cntr_open(struct inode *in, struct file *fp)
 
 	fp->private_data = byte_cntr_data;
 	nonseekable_open(in, fp);
-	byte_cntr_data->enable = true;
 	byte_cntr_data->read_active = true;
 	mutex_unlock(&byte_cntr_data->byte_cntr_lock);
-
 	return 0;
 }
 
@@ -244,6 +305,131 @@ static int byte_cntr_register_chardev(struct byte_cntr *byte_cntr_data)
 	return ret;
 }
 
+static void usb_read_work_fn(struct work_struct *work)
+{
+	int ret, seq = 0;
+	struct qdss_request *usb_req = NULL;
+	struct etr_buf *etr_buf = tmcdrvdata->etr_buf;
+	size_t actual, req_size;
+	struct byte_cntr *drvdata =
+		container_of(work, struct byte_cntr, read_work);
+
+	while (tmcdrvdata->enable
+		&& tmcdrvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+		if (!atomic_read(&drvdata->irq_cnt)) {
+			ret = wait_event_interruptible(drvdata->usb_wait_wq,
+				atomic_read(&drvdata->irq_cnt) > 0
+				|| !tmcdrvdata->enable || tmcdrvdata->out_mode
+				!= TMC_ETR_OUT_MODE_USB);
+			if (ret == -ERESTARTSYS || !tmcdrvdata->enable
+			|| tmcdrvdata->out_mode != TMC_ETR_OUT_MODE_USB)
+				break;
+		}
+
+		req_size = USB_BLK_SIZE;
+		while (req_size > 0) {
+			seq++;
+			usb_req = kzalloc(sizeof(*usb_req), GFP_KERNEL);
+			if (!usb_req)
+				return;
+			actual = tmc_etr_buf_get_data(etr_buf, drvdata->offset,
+					req_size, &usb_req->buf);
+			if (actual <= 0) {
+				kfree(usb_req);
+				usb_req = NULL;
+				dev_err(tmcdrvdata->dev, "No data in ETR\n");
+				break;
+			}
+			usb_req->length = actual;
+			drvdata->usb_req = usb_req;
+			req_size -= actual;
+			if ((drvdata->offset + usb_req->length)
+					>= tmcdrvdata->size)
+				drvdata->offset = 0;
+			else
+				drvdata->offset += usb_req->length;
+			if (atomic_read(&drvdata->usb_free_buf) > 0) {
+				ret = usb_qdss_write(tmcdrvdata->usbch,
+						drvdata->usb_req);
+				if (ret) {
+					kfree(usb_req);
+					usb_req = NULL;
+					drvdata->usb_req = NULL;
+					dev_err(tmcdrvdata->dev,
+						"Write data failed\n");
+					continue;
+				}
+				atomic_dec(&drvdata->usb_free_buf);
+
+			} else {
+				dev_dbg(tmcdrvdata->dev,
+				"Drop data, offset = %d, seq = %d, irq = %d\n",
+					drvdata->offset, seq,
+					atomic_read(&drvdata->irq_cnt));
+				kfree(usb_req);
+				drvdata->usb_req = NULL;
+			}
+		}
+		if (atomic_read(&drvdata->irq_cnt) > 0)
+			atomic_dec(&drvdata->irq_cnt);
+	}
+	dev_err(tmcdrvdata->dev, "TMC has been stopped.\n");
+}
+
+static void usb_write_done(struct byte_cntr *drvdata,
+				   struct qdss_request *d_req)
+{
+	atomic_inc(&drvdata->usb_free_buf);
+	if (d_req->status)
+		pr_err_ratelimited("USB write failed err:%d\n", d_req->status);
+	kfree(d_req);
+}
+
+void usb_bypass_notifier(void *priv, unsigned int event,
+			struct qdss_request *d_req, struct usb_qdss_ch *ch)
+{
+	struct byte_cntr *drvdata = priv;
+
+	if (!drvdata)
+		return;
+
+	switch (event) {
+	case USB_QDSS_CONNECT:
+		usb_qdss_alloc_req(ch, USB_BUF_NUM, 0);
+		usb_bypass_start(drvdata);
+		queue_work(drvdata->usb_wq, &(drvdata->read_work));
+		break;
+
+	case USB_QDSS_DISCONNECT:
+		usb_bypass_stop(drvdata);
+		break;
+
+	case USB_QDSS_DATA_WRITE_DONE:
+		usb_write_done(drvdata, d_req);
+		break;
+
+	default:
+		break;
+	}
+}
+EXPORT_SYMBOL(usb_bypass_notifier);
+
+
+static int usb_bypass_init(struct byte_cntr *byte_cntr_data)
+{
+	byte_cntr_data->usb_wq = create_singlethread_workqueue("byte-cntr");
+	if (!byte_cntr_data->usb_wq)
+		return -ENOMEM;
+
+	byte_cntr_data->offset = 0;
+	mutex_init(&byte_cntr_data->usb_bypass_lock);
+	init_waitqueue_head(&byte_cntr_data->usb_wait_wq);
+	atomic_set(&byte_cntr_data->usb_free_buf, USB_BUF_NUM);
+	INIT_WORK(&(byte_cntr_data->read_work), usb_read_work_fn);
+
+	return 0;
+}
+
 struct byte_cntr *byte_cntr_init(struct amba_device *adev,
 				 struct tmc_drvdata *drvdata)
 {
@@ -261,6 +447,12 @@ struct byte_cntr *byte_cntr_init(struct amba_device *adev,
 	if (!byte_cntr_data)
 		return NULL;
 
+	byte_cntr_data->sw_usb = of_property_read_bool(np, "qcom,sw-usb");
+	if (byte_cntr_data->sw_usb) {
+		ret = usb_bypass_init(byte_cntr_data);
+		if (ret)
+			return NULL;
+	}
 	ret = devm_request_irq(dev, byte_cntr_irq, etr_handler,
 			       IRQF_TRIGGER_RISING | IRQF_SHARED,
 			       "tmc-etr", byte_cntr_data);
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.h b/drivers/hwtracing/coresight/coresight-byte-cntr.h
index 63e4bad..0721c50 100644
--- a/drivers/hwtracing/coresight/coresight-byte-cntr.h
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved.
  */
 #ifndef _CORESIGHT_BYTE_CNTR_H
 #define _CORESIGHT_BYTE_CNTR_H
@@ -14,16 +14,27 @@ struct byte_cntr {
 	struct class		*driver_class;
 	bool			enable;
 	bool			read_active;
+	bool			sw_usb;
 	uint32_t		byte_cntr_value;
 	uint32_t		block_size;
 	int			byte_cntr_irq;
 	atomic_t		irq_cnt;
+	atomic_t		usb_free_buf;
 	wait_queue_head_t	wq;
+	wait_queue_head_t	usb_wait_wq;
+	struct workqueue_struct *usb_wq;
+	struct qdss_request	*usb_req;
+	struct work_struct	read_work;
+	struct mutex		usb_bypass_lock;
 	struct mutex		byte_cntr_lock;
-	struct coresight_csr		*csr;
+	struct coresight_csr	*csr;
+	unsigned long		offset;
 };
 
+
+extern void usb_bypass_notifier(void *priv, unsigned int event,
+		struct qdss_request *d_req, struct usb_qdss_ch *ch);
 extern void tmc_etr_byte_cntr_start(struct byte_cntr *byte_cntr_data);
 extern void tmc_etr_byte_cntr_stop(struct byte_cntr *byte_cntr_data);
-
+extern void usb_bypass_stop(struct byte_cntr *byte_cntr_data);
 #endif
diff --git a/drivers/hwtracing/coresight/coresight-csr.c b/drivers/hwtracing/coresight/coresight-csr.c
index 41651ae..309e5a0 100644
--- a/drivers/hwtracing/coresight/coresight-csr.c
+++ b/drivers/hwtracing/coresight/coresight-csr.c
@@ -423,6 +423,7 @@ static struct platform_driver csr_driver = {
 	.driver         = {
 		.name   = "coresight-csr",
 		.of_match_table = csr_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index c0d5064..4217742 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -358,6 +358,7 @@ ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
 	len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset);
 	if (len > 0)
 		*bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset;
+
 	return len;
 }
 
@@ -1342,9 +1343,29 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
 			}
 			coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
 			coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+		} else if (drvdata->byte_cntr->sw_usb) {
+			if (!drvdata->etr_buf) {
+				free_buf = new_buf =
+				tmc_etr_setup_sysfs_buf(drvdata);
+				if (IS_ERR(new_buf)) {
+					mutex_unlock(&drvdata->mem_lock);
+					return -ENOMEM;
+				}
+			}
+			coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+			coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+
+			drvdata->usbch = usb_qdss_open("qdss_mdm",
+						drvdata->byte_cntr,
+						usb_bypass_notifier);
+			if (IS_ERR_OR_NULL(drvdata->usbch)) {
+				dev_err(drvdata->dev, "usb_qdss_open failed\n");
+				mutex_unlock(&drvdata->mem_lock);
+				return -ENODEV;
+			}
 		} else {
 			drvdata->usbch = usb_qdss_open("qdss", drvdata,
-								usb_notifier);
+							usb_notifier);
 			if (IS_ERR_OR_NULL(drvdata->usbch)) {
 				dev_err(drvdata->dev, "usb_qdss_open failed\n");
 				mutex_unlock(&drvdata->mem_lock);
@@ -1379,7 +1400,9 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
 
 	drvdata->mode = CS_MODE_SYSFS;
 
-	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM ||
+	    (drvdata->out_mode == TMC_ETR_OUT_MODE_USB
+	     && drvdata->byte_cntr->sw_usb))
 		tmc_etr_enable_hw(drvdata);
 
 	drvdata->enable = true;
@@ -1435,12 +1458,18 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
 	/* Disable the TMC only if it needs to */
 	if (drvdata->mode != CS_MODE_DISABLED) {
 		if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
-			__tmc_etr_disable_to_bam(drvdata);
-			spin_unlock_irqrestore(&drvdata->spinlock, flags);
-			tmc_etr_bam_disable(drvdata);
-			usb_qdss_close(drvdata->usbch);
-			drvdata->mode = CS_MODE_DISABLED;
-			goto out;
+			if (!drvdata->byte_cntr->sw_usb) {
+				__tmc_etr_disable_to_bam(drvdata);
+				spin_unlock_irqrestore(&drvdata->spinlock,
+								flags);
+				tmc_etr_bam_disable(drvdata);
+				usb_qdss_close(drvdata->usbch);
+				drvdata->mode = CS_MODE_DISABLED;
+				goto out;
+			} else {
+				usb_qdss_close(drvdata->usbch);
+				tmc_etr_disable_hw(drvdata);
+			}
 		} else {
 			tmc_etr_disable_hw(drvdata);
 		}
@@ -1449,6 +1478,19 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
 
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
+	if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB
+		&& drvdata->byte_cntr->sw_usb) {
+		usb_bypass_stop(drvdata->byte_cntr);
+		flush_workqueue(drvdata->byte_cntr->usb_wq);
+		coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+		coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+		/* Free memory outside the spinlock if need be */
+		if (drvdata->etr_buf) {
+			tmc_etr_free_sysfs_buf(drvdata->etr_buf);
+			drvdata->etr_buf = NULL;
+		}
+	}
+
 	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
 		tmc_etr_byte_cntr_stop(drvdata->byte_cntr);
 		coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index cc287cf..edc52d7 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -616,7 +616,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
 	othdev->output.port = -1;
 	othdev->output.active = false;
 	gth->output[port].output = NULL;
-	for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
+	for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
 		if (gth->master[master] == port)
 			gth->master[master] = -1;
 	spin_unlock(&gth->gth_lock);
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index ba7aaf4..8ff326c 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -84,6 +84,7 @@ struct msc_iter {
  * @reg_base:		register window base address
  * @thdev:		intel_th_device pointer
  * @win_list:		list of windows in multiblock mode
+ * @single_sgt:		single mode buffer
  * @nr_pages:		total number of pages allocated for this buffer
  * @single_sz:		amount of data in single mode
  * @single_wrap:	single mode wrap occurred
@@ -104,6 +105,7 @@ struct msc {
 	struct intel_th_device	*thdev;
 
 	struct list_head	win_list;
+	struct sg_table		single_sgt;
 	unsigned long		nr_pages;
 	unsigned long		single_sz;
 	unsigned int		single_wrap : 1;
@@ -617,22 +619,45 @@ static void intel_th_msc_deactivate(struct intel_th_device *thdev)
  */
 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
 {
+	unsigned long nr_pages = size >> PAGE_SHIFT;
 	unsigned int order = get_order(size);
 	struct page *page;
+	int ret;
 
 	if (!size)
 		return 0;
 
+	ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
+	if (ret)
+		goto err_out;
+
+	ret = -ENOMEM;
 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
 	if (!page)
-		return -ENOMEM;
+		goto err_free_sgt;
 
 	split_page(page, order);
-	msc->nr_pages = size >> PAGE_SHIFT;
+	sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
+
+	ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
+			 DMA_FROM_DEVICE);
+	if (ret < 0)
+		goto err_free_pages;
+
+	msc->nr_pages = nr_pages;
 	msc->base = page_address(page);
-	msc->base_addr = page_to_phys(page);
+	msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
 
 	return 0;
+
+err_free_pages:
+	__free_pages(page, order);
+
+err_free_sgt:
+	sg_free_table(&msc->single_sgt);
+
+err_out:
+	return ret;
 }
 
 /**
@@ -643,6 +668,10 @@ static void msc_buffer_contig_free(struct msc *msc)
 {
 	unsigned long off;
 
+	dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
+		     1, DMA_FROM_DEVICE);
+	sg_free_table(&msc->single_sgt);
+
 	for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
 		struct page *page = virt_to_page(msc->base + off);
 
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 1cf6290..70f2cb9 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -165,6 +165,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
 		.driver_data = (kernel_ulong_t)&intel_th_2x,
 	},
+	{
+		/* Comet Lake */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
+		.driver_data = (kernel_ulong_t)&intel_th_2x,
+	},
 	{ 0 },
 };
 
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index ce518de..5d40275 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -166,12 +166,10 @@ stm_master(struct stm_device *stm, unsigned int idx)
 static int stp_master_alloc(struct stm_device *stm, unsigned int idx)
 {
 	struct stp_master *master;
-	size_t size;
-	unsigned long align = sizeof(unsigned long);
 
-	size = ALIGN(stm->data->sw_nchannels, align) / align;
-	size += sizeof(struct stp_master);
-	master = kzalloc(size, GFP_ATOMIC);
+	master = kzalloc(struct_size(master, chan_map,
+				     BITS_TO_LONGS(stm->data->sw_nchannels)),
+			 GFP_ATOMIC);
 	if (!master)
 		return -ENOMEM;
 
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ac4b096..8f80381 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -131,6 +131,7 @@
 	    Cannon Lake (PCH)
 	    Cedar Fork (PCH)
 	    Ice Lake (PCH)
+	    Comet Lake (PCH)
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c91e145..679c6c4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -71,6 +71,7 @@
  * Cannon Lake-LP (PCH)		0x9da3	32	hard	yes	yes	yes
  * Cedar Fork (PCH)		0x18df	32	hard	yes	yes	yes
  * Ice Lake-LP (PCH)		0x34a3	32	hard	yes	yes	yes
+ * Comet Lake (PCH)		0x02a3	32	hard	yes	yes	yes
  *
  * Features supported by this driver:
  * Software PEC				no
@@ -240,6 +241,7 @@
 #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS	0xa223
 #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS	0xa2a3
 #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS		0xa323
+#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS		0x02a3
 
 struct i801_mux_config {
 	char *gpio_chip;
@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
 	{ 0, }
 };
 
@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 	case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
 	case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
 	case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
+	case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
 		priv->features |= FEATURE_I2C_BLOCK_READ;
 		priv->features |= FEATURE_IRQ;
 		priv->features |= FEATURE_SMBUS_PEC;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index c4067007..7bd409e 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -510,9 +510,9 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
 				     unsigned long action, void *data)
 {
 	struct clk_notifier_data *ndata = data;
-	struct imx_i2c_struct *i2c_imx = container_of(&ndata->clk,
+	struct imx_i2c_struct *i2c_imx = container_of(nb,
 						      struct imx_i2c_struct,
-						      clk);
+						      clk_change_nb);
 
 	if (action & POST_RATE_CHANGE)
 		i2c_imx_set_clk(i2c_imx, ndata->new_rate);
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 07b2d16..d449e60 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -3,7 +3,6 @@
  * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
-
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/err.h>
@@ -492,6 +491,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 		u8 op = (msgs[i].flags & I2C_M_RD) ? 2 : 1;
 		int segs = 3 - op;
 		int index = 0;
+		u8 *dma_buf = NULL;
 		int stretch = (i < (num - 1));
 		dma_cookie_t tx_cookie, rx_cookie;
 		struct msm_gpi_tre *go_t = &gi2c->go_t;
@@ -499,6 +499,13 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 		struct device *tx_dev = gi2c->wrapper_dev;
 
 		gi2c->cur = &msgs[i];
+
+		dma_buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1);
+		if (!dma_buf) {
+			ret = -ENOMEM;
+			goto geni_i2c_gsi_xfer_out;
+		}
+
 		qcom_geni_i2c_calc_timeout(gi2c);
 		if (!gi2c->cfg_sent) {
 			segs++;
@@ -531,12 +538,14 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 		if (msgs[i].flags & I2C_M_RD) {
 			sg_init_table(&gi2c->rx_sg, 1);
 			ret = geni_se_iommu_map_buf(rx_dev, &gi2c->rx_ph,
-						msgs[i].buf, msgs[i].len,
+						dma_buf, msgs[i].len,
 						DMA_FROM_DEVICE);
 			if (ret) {
 				GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
 					    "geni_se_iommu_map_buf for rx failed :%d\n",
 					    ret);
+				i2c_put_dma_safe_msg_buf(dma_buf, &msgs[i],
+								false);
 				goto geni_i2c_gsi_xfer_out;
 
 			}
@@ -570,12 +579,14 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 			dma_async_issue_pending(gi2c->rx_c);
 		} else {
 			ret = geni_se_iommu_map_buf(tx_dev, &gi2c->tx_ph,
-						msgs[i].buf, msgs[i].len,
+						dma_buf, msgs[i].len,
 						DMA_TO_DEVICE);
 			if (ret) {
 				GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
 					    "geni_se_iommu_map_buf for tx failed :%d\n",
 					    ret);
+				i2c_put_dma_safe_msg_buf(dma_buf, &msgs[i],
+								false);
 				goto geni_i2c_gsi_xfer_out;
 
 			}
@@ -611,7 +622,6 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 
 		timeout = wait_for_completion_timeout(&gi2c->xfer,
 						gi2c->xfer_timeout);
-
 		if (!timeout) {
 			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
 				    "GSI Txn timed out: %u len: %d\n",
@@ -625,6 +635,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 		else
 			geni_se_iommu_unmap_buf(tx_dev, &gi2c->tx_ph,
 				msgs[i].len, DMA_TO_DEVICE);
+		i2c_put_dma_safe_msg_buf(dma_buf, &msgs[i], !gi2c->err);
 
 		if (gi2c->err) {
 			dmaengine_terminate_all(gi2c->tx_c);
@@ -672,6 +683,7 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 		int stretch = (i < (num - 1));
 		u32 m_param = 0;
 		u32 m_cmd = 0;
+		u8 *dma_buf = NULL;
 		dma_addr_t tx_dma = 0;
 		dma_addr_t rx_dma = 0;
 		enum se_xfer_mode mode = FIFO_MODE;
@@ -683,12 +695,22 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 		gi2c->cur = &msgs[i];
 		qcom_geni_i2c_calc_timeout(gi2c);
 		mode = msgs[i].len > 32 ? SE_DMA : FIFO_MODE;
+
 		ret = geni_se_select_mode(gi2c->base, mode);
 		if (ret) {
 			dev_err(gi2c->dev, "%s: Error mode init %d:%d:%d\n",
 				__func__, mode, i, msgs[i].len);
 			break;
 		}
+
+		if (mode == SE_DMA) {
+			dma_buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1);
+			if (!dma_buf) {
+				ret = -ENOMEM;
+				goto geni_i2c_txn_ret;
+			}
+		}
+
 		if (msgs[i].flags & I2C_M_RD) {
 			dev_dbg(gi2c->dev,
 				"READ,n:%d,i:%d len:%d, stretch:%d\n",
@@ -699,9 +721,11 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 			geni_setup_m_cmd(gi2c->base, m_cmd, m_param);
 			if (mode == SE_DMA) {
 				ret = geni_se_rx_dma_prep(gi2c->wrapper_dev,
-							gi2c->base, msgs[i].buf,
+							gi2c->base, dma_buf,
 							msgs[i].len, &rx_dma);
 				if (ret) {
+					i2c_put_dma_safe_msg_buf(dma_buf,
+							&msgs[i], false);
 					mode = FIFO_MODE;
 					ret = geni_se_select_mode(gi2c->base,
 								  mode);
@@ -717,9 +741,11 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 			geni_setup_m_cmd(gi2c->base, m_cmd, m_param);
 			if (mode == SE_DMA) {
 				ret = geni_se_tx_dma_prep(gi2c->wrapper_dev,
-							gi2c->base, msgs[i].buf,
+							gi2c->base, dma_buf,
 							msgs[i].len, &tx_dma);
 				if (ret) {
+					i2c_put_dma_safe_msg_buf(dma_buf,
+							&msgs[i], false);
 					mode = FIFO_MODE;
 					ret = geni_se_select_mode(gi2c->base,
 								  mode);
@@ -744,7 +770,6 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 			if (!timeout)
 				geni_abort_m_cmd(gi2c->base);
 		}
-
 		gi2c->cur_wr = 0;
 		gi2c->cur_rd = 0;
 		if (mode == SE_DMA) {
@@ -762,6 +787,7 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 					      msgs[i].len);
 			geni_se_tx_dma_unprep(gi2c->wrapper_dev, tx_dma,
 					      msgs[i].len);
+			i2c_put_dma_safe_msg_buf(dma_buf, &msgs[i], !gi2c->err);
 		}
 		ret = gi2c->err;
 		if (gi2c->err) {
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 62d023e..a492da9 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -424,7 +424,7 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev,
 		 STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0);
 	dnf_delay = setup->dnf * i2cclk;
 
-	sdadel_min = setup->fall_time - i2c_specs[setup->speed].hddat_min -
+	sdadel_min = i2c_specs[setup->speed].hddat_min + setup->fall_time -
 		af_delay_min - (setup->dnf + 3) * i2cclk;
 
 	sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time -
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index 915f5ed..e6c554e 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -602,6 +602,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
 	i2c->adapter = synquacer_i2c_ops;
 	i2c_set_adapdata(&i2c->adapter, i2c);
 	i2c->adapter.dev.parent = &pdev->dev;
+	i2c->adapter.dev.of_node = pdev->dev.of_node;
+	ACPI_COMPANION_SET(&i2c->adapter.dev, ACPI_COMPANION(&pdev->dev));
 	i2c->adapter.nr = pdev->id;
 	init_completion(&i2c->completion);
 
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 9200e34..5b0e1d9 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -306,10 +306,7 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
 	if (client->flags & I2C_CLIENT_TEN)
 		return -EINVAL;
 
-	irq = irq_find_mapping(adap->host_notify_domain, client->addr);
-	if (!irq)
-		irq = irq_create_mapping(adap->host_notify_domain,
-					 client->addr);
+	irq = irq_create_mapping(adap->host_notify_domain, client->addr);
 
 	return irq > 0 ? irq : -ENXIO;
 }
@@ -330,6 +327,8 @@ static int i2c_device_probe(struct device *dev)
 
 		if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
 			dev_dbg(dev, "Using Host Notify IRQ\n");
+			/* Keep adapter active when Host Notify is required */
+			pm_runtime_get_sync(&client->adapter->dev);
 			irq = i2c_smbus_host_notify_to_irq(client);
 		} else if (dev->of_node) {
 			irq = of_irq_get_byname(dev->of_node, "irq");
@@ -433,6 +432,10 @@ static int i2c_device_remove(struct device *dev)
 	dev_pm_clear_wake_irq(&client->dev);
 	device_init_wakeup(&client->dev, false);
 
+	client->irq = client->init_irq;
+	if (client->flags & I2C_CLIENT_HOST_NOTIFY)
+		pm_runtime_put(&client->adapter->dev);
+
 	return status;
 }
 
@@ -742,10 +745,11 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
 	client->flags = info->flags;
 	client->addr = info->addr;
 
-	client->irq = info->irq;
-	if (!client->irq)
-		client->irq = i2c_dev_irq_from_resources(info->resources,
+	client->init_irq = info->irq;
+	if (!client->init_irq)
+		client->init_irq = i2c_dev_irq_from_resources(info->resources,
 							 info->num_resources);
+	client->irq = client->init_irq;
 
 	strlcpy(client->name, info->type, sizeof(client->name));
 
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 2dc628d..752256d 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -385,8 +385,9 @@ static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
 		return;
 
 	ptr = bus->addrslots + (bitpos / BITS_PER_LONG);
-	*ptr &= ~(I3C_ADDR_SLOT_STATUS_MASK << (bitpos % BITS_PER_LONG));
-	*ptr |= status << (bitpos % BITS_PER_LONG);
+	*ptr &= ~((unsigned long)I3C_ADDR_SLOT_STATUS_MASK <<
+		  (bitpos % BITS_PER_LONG));
+	*ptr |= (unsigned long)status << (bitpos % BITS_PER_LONG);
 }
 
 static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
@@ -1980,7 +1981,6 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
 {
 	struct i3c_dev_boardinfo *boardinfo;
 	struct device *dev = &master->dev;
-	struct i3c_device_info info = { };
 	enum i3c_addr_slot_status addrstatus;
 	u32 init_dyn_addr = 0;
 
@@ -2012,8 +2012,8 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
 
 	boardinfo->pid = ((u64)reg[1] << 32) | reg[2];
 
-	if ((info.pid & GENMASK_ULL(63, 48)) ||
-	    I3C_PID_RND_LOWER_32BITS(info.pid))
+	if ((boardinfo->pid & GENMASK_ULL(63, 48)) ||
+	    I3C_PID_RND_LOWER_32BITS(boardinfo->pid))
 		return -EINVAL;
 
 	boardinfo->init_dyn_addr = init_dyn_addr;
diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c
index 443ff43..b88425f 100644
--- a/drivers/i3c/master/i3c-master-qcom-geni.c
+++ b/drivers/i3c/master/i3c-master-qcom-geni.c
@@ -19,6 +19,7 @@
 #include <linux/pinctrl/consumer.h>
 #include <linux/ipc_logging.h>
 #include <linux/pinctrl/qcom-pinctrl.h>
+#include <linux/delay.h>
 
 #define SE_I3C_SCL_HIGH			0x268
 #define SE_I3C_TX_TRANS_LEN		0x26C
@@ -29,6 +30,39 @@
 #define SE_GENI_HW_IRQ_EN		0x920
 #define SE_GENI_HW_IRQ_IGNORE_ON_ACTIVE	0x924
 #define SE_GENI_HW_IRQ_CMD_PARAM_0	0x930
+/* IBI_C registers */
+#define IBI_GEN_CONFIG			0x0000
+#define IBI_SCL_OD_TYPE			0x0004
+#define IBI_SCL_PP_TIMING_CONFIG	0x0008
+#define IBI_GPII_IBI_EN			0x000c
+#define IBI_GEN_IRQ_STATUS		0x0010
+#define IBI_GEN_IRQ_EN			0x0014
+#define IBI_GEN_IRQ_CLR			0x0018
+#define IBI_HW_PARAM			0x001c
+#define IBI_HW_VERSION			0x0020
+#define IBI_RX_DATA_DELAY		0x0024
+#define IBI_UNEXPECT_IBI_INFO		0x0028
+#define IBI_LEGACY_MODE			0x002c
+#define IBI_SW_RESET			0x0030
+#define IBI_TEST_BUS_SEL		0x0100
+#define IBI_TEST_BUS_EN			0x0104
+#define IBI_TEST_BUS_REG		0x0108
+#define IBI_HW_EVENTS_MUX_CFG		0x010c
+#define IBI_CHAR_CFG			0x0180
+#define IBI_CHAR_DATA			0x0184
+#define IBI_CHAR_OE			0x0188
+#define IBI_CMD(n)			(0x1000 + (0x1000*n))
+#define IBI_IRQ_STATUS(n)		(0x1004 + (0x1000*n))
+#define IBI_IRQ_EN(n)			(0x1008 + (0x1000*n))
+#define IBI_IRQ_CLR(n)			(0x100C + (0x1000*n))
+#define IBI_RCVD_IBI_STATUS(n)		(0x1010 + (0x1000*n))
+#define IBI_RCVD_IBI_CLR(n)		(0x1014 + (0x1000*n))
+#define IBI_ALLOCATED_ENTRIES_GPII(n)	(0x1018 + (0x1000*n))
+#define IBI_CONFIG_ENTRY(n, k)		(0x1800 + (0x1000*n) + (0x40*k))
+#define IBI_RCVD_IBI_INFO_ENTRY(n, k)	(0x1804 + (0x1000*n) + (0x40*k))
+#define IBI_RCVD_IBI_DATA_ENTRY(n, k)	(0x1808 + (0x1000*n) + (0x40*k))
+#define IBI_RCVD_IBI_TS_LSB_ENTRY(n, k)	(0x180C + (0x1000*n) + (0x40*k))
+#define IBI_RCVD_IBI_TS_MSB_ENTRY(n, k)	(0x1810 + (0x1000*n) + (0x40*k))
 
 /* SE_GENI_M_CLK_CFG field shifts */
 #define CLK_DEV_VALUE_SHFT	4
@@ -83,6 +117,58 @@
 #define USE_7E			BIT(25)
 #define BYPASS_ADDR_PHASE	BIT(26)
 
+/* IBI_HW_PARAM fields */
+#define I3C_IBI_NUM_GPII_MSK	(GENMASK(11, 8))
+#define I3C_IBI_NUM_GPII_SHFT	(8)
+#define I3C_IBI_TABLE_DEPTH_MSK	(GENMASK(4, 0))
+
+/* IBI_IRQ_STATUS(n) fields */
+#define COMMAND_DONE			BIT(0)
+#define CFG_TABLE_FULL			BIT(2)
+#define IBI_RECEIVED			BIT(8)
+#define ADDR_ASSOCIATED_W_OTHER_GPII	BIT(21)
+
+/* IBI_GEN_IRQ_EN fields */
+#define ENABLE_CHANGE_IRQ_EN		BIT(0)
+#define UNEXPECT_IBI_ADDR_IRQ_EN	BIT(1)
+#define HOT_JOIN_IRQ_EN			BIT(2)
+#define SW_RESET_DONE_EN		BIT(3)
+#define BUS_ERROR_EN			BIT(4)
+
+/* IBI_IRQ_EN fields */
+#define COMMAND_DONE_IRQ_EN		BIT(0)
+#define INVALID_I3C_SLAVE_ADDR_IRQ_EN	BIT(1)
+#define CFG_TABLE_FULL_IRQ_EN		BIT(2)
+#define CFG_FAIL_IRQ_EN			BIT(3)
+#define CFG_W_IBI_DIS_IRQ_EN		BIT(4)
+#define IBI_RECEIVED_IRQ_EN		BIT(8)
+#define CFG_FAIL_ZERO_NUM_MDB_EN	BIT(16)
+#define CFG_FAIL_MASK_EN_DIFF_EN	BIT(17)
+#define CFG_FAIL_NUM_MDB_DIFF_EN	BIT(18)
+#define CFG_FAIL_NACK_DIFF_EN		BIT(19)
+#define CFG_FAIL_STALL_DIFF_EN		BIT(20)
+#define ADDR_ASSOCIATED_W_OTHER_GPII_EN	BIT(21)
+
+/* IBI_CMD fields */
+#define IBI_CMD_OPCODE        BIT(0)
+#define I3C_SLAVE_RW          BIT(15)
+#define STALL                 BIT(21)
+#define I3C_SLAVE_ADDR_SHIFT  8
+#define I3C_SLAVE_MASK        0x7f
+
+/* IBI_GEN_CONFIG fields */
+#define IBI_C_ENABLE	BIT(0)
+
+/* IBI_CONFIG_ENTRY fields */
+#define IBI_VALID	BIT(0)
+
+#define SE_I3C_IBI_ERR  (INVALID_I3C_SLAVE_ADDR_IRQ_EN |\
+			CFG_TABLE_FULL_IRQ_EN | CFG_FAIL_IRQ_EN |\
+			CFG_W_IBI_DIS_IRQ_EN | CFG_FAIL_ZERO_NUM_MDB_EN |\
+			CFG_FAIL_MASK_EN_DIFF_EN | CFG_FAIL_NUM_MDB_DIFF_EN |\
+			CFG_FAIL_NACK_DIFF_EN | CFG_FAIL_STALL_DIFF_EN |\
+			ADDR_ASSOCIATED_W_OTHER_GPII_EN)
+
 enum geni_i3c_err_code {
 	RD_TERM,
 	NACK,
@@ -109,6 +195,10 @@ enum geni_i3c_err_code {
 
 #define I3C_DDR_READ_CMD BIT(7)
 #define I3C_ADDR_MASK	0x7f
+#define I3C_MAX_GPII_NUM 12
+#define TLMM_I3C_MODE	0x24
+#define IBI_SW_RESET_MIN_SLEEP 1000
+#define IBI_SW_RESET_MAX_SLEEP 2000
 
 enum i3c_trans_dir {
 	WRITE_TRANSACTION = 0,
@@ -122,6 +212,38 @@ struct geni_se {
 	struct se_geni_rsc i3c_rsc;
 };
 
+struct rcvd_ibi_data {
+	union {
+		struct {
+			u32 slave_add   : 7;
+			u32 rw          : 1;
+			u32 num_bytes   : 3;
+			u32 resvd1      : 1;
+			u32 nack        : 1;
+			u32 resvd2      : 18;
+			u32 valid       : 1;
+		} fields;
+		u32 info;
+	} info;
+	u32 ts;
+	u32 payload;
+};
+
+struct geni_ibi {
+	bool hw_support;
+	bool is_init;
+	unsigned int num_slots;
+	unsigned int num_gpi;
+	struct i3c_dev_desc **slots;
+	spinlock_t lock;
+	int mngr_irq;
+	struct completion done;
+	int gpii_irq[I3C_MAX_GPII_NUM];
+	int err;
+	u8 ctrl_id;
+	struct rcvd_ibi_data data;
+};
+
 struct geni_i3c_dev {
 	struct geni_se se;
 	unsigned int tx_wm;
@@ -140,10 +262,13 @@ struct geni_i3c_dev {
 	int cur_idx;
 	unsigned long newaddrslots[(I3C_ADDR_MASK + 1) / BITS_PER_LONG];
 	const struct geni_i3c_clk_fld *clk_fld;
+	struct geni_ibi ibi;
 };
 
 struct geni_i3c_i2c_dev_data {
-	u32 dummy;  /* placeholder for now, later will hold IBI information */
+	u16 id;
+	s16 ibi;
+	struct i3c_generic_ibi_pool *ibi_pool;
 };
 
 struct i3c_xfer_params {
@@ -210,6 +335,7 @@ static const struct geni_i3c_clk_fld geni_i3c_clk_map[] = {
 	{ KHZ(100),    19200, 7, 10, 11, 26, 0, 0 },
 	{ KHZ(400),    19200, 2,  5, 12, 24, 0, 0 },
 	{ KHZ(1000),   19200, 1,  3,  9, 18, 7, 0 },
+	{ KHZ(1920),   19200, 1,  4,  9, 19, 7, 8 },
 	{ KHZ(12500), 100000, 1, 60, 140, 250, 8, 16 },
 };
 
@@ -293,29 +419,122 @@ static void qcom_geni_i3c_conf(struct geni_i3c_dev *gi3c)
 	writel_relaxed(itr->i3c_t_cycle_cnt, gi3c->se.base + SE_I3C_SCL_CYCLE);
 	writel_relaxed(itr->i3c_t_high_cnt, gi3c->se.base + SE_I3C_SCL_HIGH);
 
-	writel_relaxed(1, gi3c->se.base + SE_GENI_HW_IRQ_IGNORE_ON_ACTIVE);
-
-	val = 1 << M_IBI_IRQ_PARAM_STOP_STALL_SHFT;
-	val |= 1 << M_IBI_IRQ_PARAM_7E_SHFT;
-	writel_relaxed(val, gi3c->se.base + SE_GENI_HW_IRQ_CMD_PARAM_0);
-
-	writel_relaxed(1, gi3c->se.base + SE_GENI_HW_IRQ_EN);
-	geni_write_reg(1, gi3c->se.ibi_base, 0x2C);
 }
 
 static void geni_i3c_err(struct geni_i3c_dev *gi3c, int err)
 {
 	if (gi3c->cur_rnw == WRITE_TRANSACTION)
-		dev_dbg(gi3c->se.dev, "len:%d, write\n", gi3c->cur_len);
+		GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "len:%d, write\n",
+			gi3c->cur_len);
 	else
-		dev_dbg(gi3c->se.dev, "len:%d, read\n", gi3c->cur_len);
+		GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "len:%d, read\n",
+			gi3c->cur_len);
 
-	dev_dbg(gi3c->se.dev, "%s\n", gi3c_log[err].msg);
+	GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "%s\n", gi3c_log[err].msg);
 	gi3c->err = gi3c_log[err].err;
 
 	geni_se_dump_dbg_regs(&gi3c->se.i3c_rsc, gi3c->se.base, gi3c->ipcl);
 }
 
+static void geni_i3c_handle_received_ibi(struct geni_i3c_dev *gi3c)
+{
+	struct geni_i3c_i2c_dev_data *data;
+	struct i3c_ibi_slot *slot;
+	struct i3c_dev_desc *dev = gi3c->ibi.slots[0];
+	u32 val, i;
+
+	val = readl_relaxed(gi3c->se.ibi_base + IBI_RCVD_IBI_STATUS(0));
+
+	data = i3c_dev_get_master_data(dev);
+	slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+	if (!slot) {
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, "no free slot\n");
+		goto no_free_slot;
+	}
+
+	for (i = 0; i < gi3c->ibi.num_slots; i++) {
+		if (!(val & (1u << i)))
+			continue;
+
+		gi3c->ibi.data.info.info =
+			readl_relaxed(gi3c->se.ibi_base
+				+ IBI_RCVD_IBI_INFO_ENTRY(0, i));
+		gi3c->ibi.data.ts =
+			readl_relaxed(gi3c->se.ibi_base
+				+ IBI_RCVD_IBI_TS_LSB_ENTRY(0, i));
+		gi3c->ibi.data.payload =
+			readl_relaxed(gi3c->se.ibi_base
+				+ IBI_RCVD_IBI_DATA_ENTRY(0, i));
+
+		if (slot->data)
+			memcpy(slot->data, &gi3c->ibi.data.payload,
+				dev->ibi->max_payload_len);
+
+		slot->len = min_t(unsigned int,
+				gi3c->ibi.data.info.fields.num_bytes,
+				dev->ibi->max_payload_len);
+
+		GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+			"IBI: info: 0x%x, ts: 0x%x, Data: 0x%x\n",
+			gi3c->ibi.data.info.info, gi3c->ibi.data.ts,
+			gi3c->ibi.data.payload);
+	}
+
+	i3c_master_queue_ibi(dev, slot);
+no_free_slot:
+	writel_relaxed(val, gi3c->se.ibi_base + IBI_RCVD_IBI_CLR(0));
+}
+
+static irqreturn_t geni_i3c_ibi_irq(int irq, void *dev)
+{
+	struct geni_i3c_dev *gi3c = dev;
+	unsigned long flags;
+	u32 m_stat = 0, m_stat_mask = 0;
+	bool cmd_done = false;
+
+	spin_lock_irqsave(&gi3c->ibi.lock, flags);
+
+	if (irq == gi3c->ibi.mngr_irq) {
+		m_stat_mask = readl_relaxed(gi3c->se.ibi_base + IBI_GEN_IRQ_EN);
+		m_stat = readl_relaxed(gi3c->se.ibi_base
+				+ IBI_GEN_IRQ_STATUS) & m_stat_mask;
+
+		if ((m_stat & UNEXPECT_IBI_ADDR_IRQ_EN) ||
+			(m_stat & BUS_ERROR_EN))
+			gi3c->ibi.err = m_stat;
+
+		if ((m_stat & ENABLE_CHANGE_IRQ_EN) ||
+			(m_stat & SW_RESET_DONE_EN))
+			cmd_done = true;
+
+		/* clear interrupts */
+		if (m_stat)
+			writel_relaxed(m_stat, gi3c->se.ibi_base
+				+ IBI_GEN_IRQ_CLR);
+	} else if (irq == gi3c->ibi.gpii_irq[0]) {
+		m_stat = readl_relaxed(gi3c->se.ibi_base + IBI_IRQ_STATUS(0));
+
+		if (m_stat & SE_I3C_IBI_ERR)
+			gi3c->ibi.err = m_stat;
+
+		if (m_stat & IBI_RECEIVED)
+			geni_i3c_handle_received_ibi(gi3c);
+
+		if (m_stat & COMMAND_DONE)
+			cmd_done = true;
+
+		/* clear interrupts */
+		if (m_stat)
+			writel_relaxed(m_stat, gi3c->se.ibi_base
+				+ IBI_IRQ_CLR(0));
+	}
+
+	if (cmd_done)
+		complete(&gi3c->ibi.done);
+	spin_unlock_irqrestore(&gi3c->ibi.lock, flags);
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t geni_i3c_irq(int irq, void *dev)
 {
 	struct geni_i3c_dev *gi3c = dev;
@@ -360,8 +579,8 @@ static irqreturn_t geni_i3c_irq(int irq, void *dev)
 	}
 
 	if (dma) {
-		dev_dbg(gi3c->se.dev, "i3c dma tx:0x%x, dma rx:0x%x\n",
-			dm_tx_st, dm_rx_st);
+		GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+			"i3c dma tx:0x%x, dma rx:0x%x\n", dm_tx_st, dm_rx_st);
 		goto irqret;
 	}
 
@@ -440,7 +659,7 @@ static int i3c_geni_runtime_get_mutex_lock(struct geni_i3c_dev *gi3c)
 	reinit_completion(&gi3c->done);
 	ret = pm_runtime_get_sync(gi3c->se.dev);
 	if (ret < 0) {
-		dev_err(gi3c->se.dev,
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
 			"error turning on SE resources:%d\n", ret);
 		pm_runtime_put_noidle(gi3c->se.dev);
 		/* Set device in suspended since resume failed */
@@ -480,10 +699,14 @@ static int _i3c_geni_execute_command
 	gi3c->cur_idx = 0;
 
 	if (rnw == READ_TRANSACTION) {
-		dev_dbg(gi3c->se.dev, "I3C cmd:0x%x param:0x%x READ len:%d\n",
-			xfer->m_cmd, xfer->m_param, len);
 		writel_relaxed(len, gi3c->se.base + SE_I3C_RX_TRANS_LEN);
 		geni_setup_m_cmd(gi3c->se.base, xfer->m_cmd, xfer->m_param);
+
+		GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+			"I3C cmd:0x%x param:0x%x READ len:%d, m_cmd: 0x%x\n",
+			xfer->m_cmd, xfer->m_param, len,
+			geni_read_reg(gi3c->se.base, SE_GENI_M_CMD0));
+
 		if (xfer->mode == SE_DMA) {
 			ret = geni_se_rx_dma_prep(gi3c->se.i3c_rsc.wrapper_dev,
 					gi3c->se.base, gi3c->cur_buf,
@@ -494,10 +717,14 @@ static int _i3c_geni_execute_command
 			}
 		}
 	} else {
-		dev_dbg(gi3c->se.dev, "I3C cmd:0x%x param:0x%x WRITE len:%d\n",
-			xfer->m_cmd, xfer->m_param, len);
 		writel_relaxed(len, gi3c->se.base + SE_I3C_TX_TRANS_LEN);
 		geni_setup_m_cmd(gi3c->se.base, xfer->m_cmd, xfer->m_param);
+
+		GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+			"I3C cmd:0x%x param:0x%x WRITE len:%d, m_cmd: 0x%x\n",
+			xfer->m_cmd, xfer->m_param, len,
+			geni_read_reg(gi3c->se.base, SE_GENI_M_CMD0));
+
 		if (xfer->mode == SE_DMA) {
 			ret = geni_se_tx_dma_prep(gi3c->se.i3c_rsc.wrapper_dev,
 					gi3c->se.base, gi3c->cur_buf,
@@ -516,6 +743,8 @@ static int _i3c_geni_execute_command
 	if (!time_remaining) {
 		unsigned long flags;
 
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+			"got wait_for_completion timeout\n");
 		spin_lock_irqsave(&gi3c->spinlock, flags);
 		geni_i3c_err(gi3c, GENI_TIMEOUT);
 		gi3c->cur_buf = NULL;
@@ -543,7 +772,8 @@ static int _i3c_geni_execute_command
 	}
 	ret = gi3c->err;
 	if (gi3c->err)
-		dev_err(gi3c->se.dev, "I3C transaction error :%d\n", gi3c->err);
+		GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+			"I3C transaction error :%d\n", gi3c->err);
 
 	gi3c->cur_buf = NULL;
 	gi3c->cur_len = gi3c->cur_idx = 0;
@@ -597,7 +827,8 @@ static void geni_i3c_perform_daa(struct geni_i3c_dev *gi3c)
 		u64 pid;
 		u8 bcr, dcr, addr;
 
-		dev_dbg(gi3c->se.dev, "i3c entdaa read\n");
+		GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+			"i3c entdaa read\n");
 
 		xfer.m_cmd = I2C_READ;
 		xfer.m_param = STOP_STRETCH | CONTINUOUS_MODE_DAA | USE_7E |
@@ -644,10 +875,24 @@ static void geni_i3c_perform_daa(struct geni_i3c_dev *gi3c)
 		tx_buf[0] = (addr & I3C_ADDR_MASK) << 1;
 		tx_buf[0] |= ~(hweight8(addr & I3C_ADDR_MASK) & 1);
 
-		dev_dbg(gi3c->se.dev, "i3c entdaa write\n");
+		/* calculate crc */
+		if (tx_buf[0]) {
+			u32 slaveid = addr;
+			u32 ret = slaveid & 1u;
+			u32 final = 0;
+
+			while (slaveid) {
+				slaveid >>= 1;
+				ret = ret ^ (slaveid & 1u);
+			}
+
+			ret = ret ^ 1u;
+			final = (addr << 1) | ret;
+			tx_buf[0] = final;
+		}
 
 		xfer.m_cmd = I2C_WRITE;
-		xfer.m_param = STOP_STRETCH | BYPASS_ADDR_PHASE | USE_7E |
+		xfer.m_param = STOP_STRETCH | BYPASS_ADDR_PHASE |
 				IBI_NACK_TBL_CTRL;
 
 		ret = i3c_geni_execute_write_command(gi3c, &xfer, tx_buf, 1);
@@ -724,7 +969,8 @@ static int geni_i3c_master_send_ccc_cmd
 			geni_i3c_perform_daa(gi3c);
 	}
 
-	dev_dbg(gi3c->se.dev, "i3c ccc: txn ret:%d\n", ret);
+	GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+		"i3c ccc: txn ret:%d\n", ret);
 
 	i3c_geni_runtime_put_mutex_unlock(gi3c);
 
@@ -778,7 +1024,8 @@ static int geni_i3c_master_priv_xfers
 			break;
 	}
 
-	dev_dbg(gi3c->se.dev, "i3c priv: txn ret:%d\n", ret);
+	GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+		"i3c priv: txn ret:%d\n", ret);
 
 	i3c_geni_runtime_put_mutex_unlock(gi3c);
 
@@ -800,8 +1047,10 @@ static int geni_i3c_master_i2c_xfers
 	if (ret)
 		return ret;
 
-	dev_dbg(gi3c->se.dev, "i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
+	GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+		"i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
 		num, msgs[0].len, msgs[0].flags);
+
 	for (i = 0; i < num; i++) {
 		struct i3c_xfer_params xfer;
 
@@ -821,7 +1070,7 @@ static int geni_i3c_master_i2c_xfers
 			break;
 	}
 
-	dev_dbg(gi3c->se.dev, "i2c: txn ret:%d\n", ret);
+	GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "i2c: txn ret:%d\n", ret);
 
 	i3c_geni_runtime_put_mutex_unlock(gi3c);
 
@@ -866,6 +1115,7 @@ static int geni_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
 	if (!data)
 		return -ENOMEM;
 
+	data->ibi = -1;
 	i3c_dev_set_master_data(dev, data);
 
 	return 0;
@@ -882,10 +1132,8 @@ static int geni_i3c_master_reattach_i3c_dev
 
 static void geni_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
 {
-	struct geni_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
 
 	i3c_dev_set_master_data(dev, NULL);
-	kfree(data);
 }
 
 static int geni_i3c_master_entdaa_locked(struct geni_i3c_dev *gi3c)
@@ -924,8 +1172,8 @@ static int geni_i3c_master_bus_init(struct i3c_master_controller *m)
 
 	ret = pm_runtime_get_sync(gi3c->se.dev);
 	if (ret < 0) {
-		dev_err(gi3c->se.dev, "%s: error turning SE resources:%d\n",
-			__func__, ret);
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+			"%s: error turning SE resources:%d\n", __func__, ret);
 		pm_runtime_put_noidle(gi3c->se.dev);
 		/* Set device in suspended since resume failed */
 		pm_runtime_set_suspended(gi3c->se.dev);
@@ -934,7 +1182,7 @@ static int geni_i3c_master_bus_init(struct i3c_master_controller *m)
 
 	ret = geni_i3c_clk_map_idx(gi3c);
 	if (ret) {
-		dev_err(gi3c->se.dev,
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
 			"Invalid clk frequency %d Hz src or %ld Hz bus: %d\n",
 			gi3c->clk_src_freq, bus->scl_rate.i3c,
 			ret);
@@ -1034,22 +1282,298 @@ static bool geni_i3c_master_supports_ccc_cmd
 
 static int geni_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
 {
-	return -ENOTSUPP;
+	struct i3c_master_controller *m = i3c_dev_get_master(dev);
+	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+	int ret = 0;
+
+	if (!gi3c->ibi.hw_support && !gi3c->ibi.is_init)
+		return -EPERM;
+
+	ret = i3c_master_enec_locked(m, dev->info.dyn_addr,
+				     I3C_CCC_EVENT_SIR);
+	if (ret)
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+			"%s: error while i3c_master_enec_locked\n", __func__);
+
+	return ret;
 }
 
 static int geni_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
 {
-	return -ENOTSUPP;
+	struct i3c_master_controller *m = i3c_dev_get_master(dev);
+	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+	int ret = 0;
+
+	if (!gi3c->ibi.hw_support && !gi3c->ibi.is_init)
+		return -EPERM;
+
+	ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+	if (ret)
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+			"%s: error while i3c_master_disec_locked\n", __func__);
+
+	return ret;
+}
+
+static void qcom_geni_i3c_ibi_conf(struct geni_i3c_dev *gi3c)
+{
+	u32 val, timeout;
+
+	gi3c->ibi.err = 0;
+	reinit_completion(&gi3c->ibi.done);
+
+	/* set the configuration for 100Khz OD speed */
+	geni_write_reg(0, gi3c->se.ibi_base, IBI_SCL_OD_TYPE);
+	geni_write_reg(0x5FD74322, gi3c->se.ibi_base, IBI_SCL_PP_TIMING_CONFIG);
+
+	/* Enable I3C IBI controller */
+	val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_CONFIG);
+	val |= IBI_C_ENABLE;
+	geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_CONFIG);
+
+	/* enable ENABLE_CHANGE */
+	val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
+	val |= IBI_C_ENABLE;
+	geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
+
+	/* wait for ENABLE_CHANGE */
+	timeout = wait_for_completion_timeout(&gi3c->ibi.done, XFER_TIMEOUT);
+	if (!timeout) {
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+			"timeout while ENABLE_CHANGE bit\n");
+		return;
+	}
+
+	/* enable manager interrupts */
+	geni_write_reg(~0u, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
+
+	/* Enable GPII0 interrupts */
+	geni_write_reg(0x1, gi3c->se.ibi_base, IBI_GPII_IBI_EN);
+	geni_write_reg(~0u, gi3c->se.ibi_base, IBI_IRQ_EN(0));
+	gi3c->ibi.is_init = true;
 }
 
 static int geni_i3c_master_request_ibi(struct i3c_dev_desc *dev,
 	const struct i3c_ibi_setup *req)
 {
-	return -ENOTSUPP;
+	struct i3c_master_controller *m = i3c_dev_get_master(dev);
+	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+	struct geni_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+	unsigned long i, flags;
+
+	if (!gi3c->ibi.hw_support)
+		return -EPERM;
+
+	if (!gi3c->ibi.is_init)
+		qcom_geni_i3c_ibi_conf(gi3c);
+
+	data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
+	if (IS_ERR(data->ibi_pool)) {
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+			"Error creating a generic IBI pool %d\n",
+			PTR_ERR(data->ibi_pool));
+		return PTR_ERR(data->ibi_pool);
+	}
+
+	spin_lock_irqsave(&gi3c->ibi.lock, flags);
+	for (i = 0; i < gi3c->ibi.num_slots; i++) {
+		if (!gi3c->ibi.slots[i]) {
+			data->ibi = i;
+			gi3c->ibi.slots[i] = dev;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&gi3c->ibi.lock, flags);
+
+	if (i < gi3c->ibi.num_slots) {
+		u32 cmd, timeout;
+
+		gi3c->ibi.err = 0;
+		reinit_completion(&gi3c->ibi.done);
+
+		cmd = ((dev->info.dyn_addr & I3C_SLAVE_MASK)
+			<< I3C_SLAVE_ADDR_SHIFT) | I3C_SLAVE_RW | STALL;
+		geni_write_reg(cmd, gi3c->se.ibi_base, IBI_CMD(0));
+
+		/* wait for adding slave IBI */
+		timeout = wait_for_completion_timeout(&gi3c->ibi.done,
+				XFER_TIMEOUT);
+		if (!timeout) {
+			gi3c->ibi.err = -ETIMEDOUT;
+			GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+				"timeout while adding slave IBI\n");
+		}
+
+		if (!gi3c->ibi.err)
+			return 0;
+
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+			"error while adding slave IBI 0x%x\n", gi3c->ibi.err);
+	}
+
+	GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+		"ibi.num_slots ran out %d: %d\n", i, gi3c->ibi.num_slots);
+
+	i3c_generic_ibi_free_pool(data->ibi_pool);
+	data->ibi_pool = NULL;
+
+	return -ENOSPC;
+}
+
+static int qcom_deallocate_ibi_table_entry(struct i3c_dev_desc *dev)
+{
+	struct i3c_master_controller *m = i3c_dev_get_master(dev);
+	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+	u32 i, timeout;
+
+	for (i = 0; i < gi3c->ibi.num_slots; i++) {
+		u32 entry;
+
+		gi3c->ibi.err = 0;
+		reinit_completion(&gi3c->ibi.done);
+
+		entry = geni_read_reg(gi3c->se.ibi_base,
+				IBI_CONFIG_ENTRY(0, i));
+
+		/* if valid entry */
+		if (entry & IBI_VALID) {
+			/* send remove command */
+			entry &= ~IBI_CMD_OPCODE;
+			geni_write_reg(entry, gi3c->se.ibi_base, IBI_CMD(0));
+
+			/* wait for removing slave IBI */
+			timeout = wait_for_completion_timeout(&gi3c->ibi.done,
+					XFER_TIMEOUT);
+			if (!timeout) {
+				GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+					"timeout while adding slave IBI\n");
+				return -ETIMEDOUT;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void qcom_geni_i3c_ibi_unconf(struct i3c_dev_desc *dev)
+{
+	struct i3c_master_controller *m = i3c_dev_get_master(dev);
+	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+	u32 val, timeout;
+	int ret = 0;
+
+	val = geni_read_reg(gi3c->se.ibi_base, IBI_ALLOCATED_ENTRIES_GPII(0));
+	if (val) {
+		ret = qcom_deallocate_ibi_table_entry(dev);
+		if (ret)
+			return;
+	}
+
+	/* disable interrupts */
+	geni_write_reg(0, gi3c->se.ibi_base, IBI_GPII_IBI_EN);
+	geni_write_reg(0, gi3c->se.ibi_base, IBI_IRQ_EN(0));
+
+	/* check if any IBI is enabled, if not then reset HW */
+	val = geni_read_reg(gi3c->se.ibi_base, IBI_GPII_IBI_EN);
+	if (!val) {
+		u32 wait = 100;
+
+		gi3c->ibi.err = 0;
+		reinit_completion(&gi3c->ibi.done);
+
+		val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_CONFIG);
+		val |= ~IBI_C_ENABLE;
+		geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_CONFIG);
+
+		/* enable ENABLE_CHANGE */
+		val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
+		val |= ENABLE_CHANGE_IRQ_EN;
+		geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
+
+		/* wait for ENABLE change */
+		timeout = wait_for_completion_timeout(&gi3c->ibi.done,
+				XFER_TIMEOUT);
+		if (!timeout) {
+			GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+				"timeout while disabling  IBI controller\n");
+				return;
+		}
+
+		if (gi3c->ibi.err) {
+			GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+				"error while disabling IBI controller 0x%x\n",
+				gi3c->ibi.err);
+			return;
+		}
+
+		/* IBI_C reset */
+		geni_write_reg(1, gi3c->se.ibi_base, IBI_SW_RESET);
+		/*
+		 * wait for SW_RESET to be taken care by HW. Post reset it
+		 * will get cleared by HW
+		 */
+		while (wait--) {
+			if (geni_read_reg(gi3c->se.ibi_base, IBI_SW_RESET) != 0)
+				break;
+			usleep_range(IBI_SW_RESET_MIN_SLEEP,
+				IBI_SW_RESET_MAX_SLEEP);
+		}
+
+		if (!wait)
+			GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+				"IBI controller reset failed\n");
+
+		gi3c->ibi.err = 0;
+		reinit_completion(&gi3c->ibi.done);
+
+		/* enable ENABLE_CHANGE */
+		val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
+		val |= SW_RESET_DONE_EN;
+		geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
+
+		/* wait for SW_RESET_DONE */
+		timeout = wait_for_completion_timeout(&gi3c->ibi.done,
+				XFER_TIMEOUT);
+		if (!timeout) {
+			GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+				"timeout while resetting  IBI controller\n");
+			return;
+		}
+
+		if (gi3c->ibi.err) {
+			GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+				"error while resetting IBI controller 0x%x\n",
+				gi3c->ibi.err);
+			return;
+		}
+
+		/* disable IBI interrupts */
+		geni_write_reg(0, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
+	}
+
+	gi3c->ibi.is_init = false;
+	disable_irq(gi3c->ibi.mngr_irq);
+	disable_irq(gi3c->ibi.gpii_irq[0]);
 }
 
 static void geni_i3c_master_free_ibi(struct i3c_dev_desc *dev)
 {
+	struct i3c_master_controller *m = i3c_dev_get_master(dev);
+	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+	struct geni_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+	unsigned long flags;
+
+	if (!gi3c->ibi.hw_support && !gi3c->ibi.is_init)
+		return;
+
+	qcom_geni_i3c_ibi_unconf(dev);
+
+	spin_lock_irqsave(&gi3c->ibi.lock, flags);
+	gi3c->ibi.slots[data->ibi] = NULL;
+	data->ibi = -1;
+	spin_unlock_irqrestore(&gi3c->ibi.lock, flags);
+
+	i3c_generic_ibi_free_pool(data->ibi_pool);
 }
 
 static void geni_i3c_master_recycle_ibi_slot
@@ -1058,6 +1582,9 @@ static void geni_i3c_master_recycle_ibi_slot
 	struct i3c_ibi_slot *slot
 )
 {
+	struct geni_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+	i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
 }
 
 static const struct i3c_master_controller_ops geni_i3c_master_ops = {
@@ -1085,24 +1612,28 @@ static int i3c_geni_rsrcs_clk_init(struct geni_i3c_dev *gi3c)
 {
 	int ret;
 
+	gi3c->se.i3c_rsc.ctrl_dev = gi3c->se.dev;
 	gi3c->se.i3c_rsc.se_clk = devm_clk_get(gi3c->se.dev, "se-clk");
 	if (IS_ERR(gi3c->se.i3c_rsc.se_clk)) {
 		ret = PTR_ERR(gi3c->se.i3c_rsc.se_clk);
-		dev_err(gi3c->se.dev, "Error getting SE Core clk %d\n", ret);
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+			"Error getting SE Core clk %d\n", ret);
 		return ret;
 	}
 
 	gi3c->se.i3c_rsc.m_ahb_clk = devm_clk_get(gi3c->se.dev, "m-ahb");
 	if (IS_ERR(gi3c->se.i3c_rsc.m_ahb_clk)) {
 		ret = PTR_ERR(gi3c->se.i3c_rsc.m_ahb_clk);
-		dev_err(gi3c->se.dev, "Error getting M AHB clk %d\n", ret);
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+			"Error getting M AHB clk %d\n", ret);
 		return ret;
 	}
 
 	gi3c->se.i3c_rsc.s_ahb_clk = devm_clk_get(gi3c->se.dev, "s-ahb");
 	if (IS_ERR(gi3c->se.i3c_rsc.s_ahb_clk)) {
 		ret = PTR_ERR(gi3c->se.i3c_rsc.s_ahb_clk);
-		dev_err(gi3c->se.dev, "Error getting S AHB clk %d\n", ret);
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+			"Error getting S AHB clk %d\n", ret);
 		return ret;
 	}
 
@@ -1119,27 +1650,22 @@ static int i3c_geni_rsrcs_init(struct geni_i3c_dev *gi3c,
 
 	/* base register address */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
+	if (!res) {
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Err getting IO region\n");
 		return -EINVAL;
+	}
 
 	gi3c->se.base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(gi3c->se.base))
 		return PTR_ERR(gi3c->se.base);
 
-	/* IBI register address */
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (!res)
-		return -EINVAL;
-
-	gi3c->se.ibi_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(gi3c->se.ibi_base))
-		return PTR_ERR(gi3c->se.ibi_base);
-
 	wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
 			"qcom,wrapper-core", 0);
 	if (IS_ERR_OR_NULL(wrapper_ph_node)) {
 		ret = PTR_ERR(wrapper_ph_node);
-		dev_err(&pdev->dev, "No wrapper core defined\n");
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"No wrapper core defined\n");
 		return ret;
 	}
 
@@ -1147,7 +1673,8 @@ static int i3c_geni_rsrcs_init(struct geni_i3c_dev *gi3c,
 	of_node_put(wrapper_ph_node);
 	if (IS_ERR_OR_NULL(wrapper_pdev)) {
 		ret = PTR_ERR(wrapper_pdev);
-		dev_err(&pdev->dev, "Cannot retrieve wrapper device\n");
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Cannot retrieve wrapper device\n");
 		return ret;
 	}
 
@@ -1156,14 +1683,15 @@ static int i3c_geni_rsrcs_init(struct geni_i3c_dev *gi3c,
 	ret = geni_se_resources_init(&gi3c->se.i3c_rsc, I3C_CORE2X_VOTE,
 				     (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
 	if (ret) {
-		dev_err(gi3c->se.dev, "geni_se_resources_init\n");
+		GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+			"geni_se_resources_init\n");
 		return ret;
 	}
 
 	ret = device_property_read_u32(&pdev->dev, "se-clock-frequency",
 		&gi3c->clk_src_freq);
 	if (ret) {
-		dev_info(&pdev->dev,
+		GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
 			"SE clk freq not specified, default to 100 MHz.\n");
 		gi3c->clk_src_freq = 100000000;
 	}
@@ -1175,21 +1703,24 @@ static int i3c_geni_rsrcs_init(struct geni_i3c_dev *gi3c,
 
 	gi3c->se.i3c_rsc.geni_pinctrl = devm_pinctrl_get(&pdev->dev);
 	if (IS_ERR(gi3c->se.i3c_rsc.geni_pinctrl)) {
-		dev_err(&pdev->dev, "Error no pinctrl config specified\n");
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Error no pinctrl config specified\n");
 		ret = PTR_ERR(gi3c->se.i3c_rsc.geni_pinctrl);
 		return ret;
 	}
 	gi3c->se.i3c_rsc.geni_gpio_active =
 		pinctrl_lookup_state(gi3c->se.i3c_rsc.geni_pinctrl, "default");
 	if (IS_ERR(gi3c->se.i3c_rsc.geni_gpio_active)) {
-		dev_err(&pdev->dev, "No default config specified\n");
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Error no pinctr default config specified\n");
 		ret = PTR_ERR(gi3c->se.i3c_rsc.geni_gpio_active);
 		return ret;
 	}
 	gi3c->se.i3c_rsc.geni_gpio_sleep =
 		pinctrl_lookup_state(gi3c->se.i3c_rsc.geni_pinctrl, "sleep");
 	if (IS_ERR(gi3c->se.i3c_rsc.geni_gpio_sleep)) {
-		dev_err(&pdev->dev, "No sleep config specified\n");
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Error no pinctrl sleep config specified\n");
 		ret = PTR_ERR(gi3c->se.i3c_rsc.geni_gpio_sleep);
 		return ret;
 	}
@@ -1197,6 +1728,88 @@ static int i3c_geni_rsrcs_init(struct geni_i3c_dev *gi3c,
 	return 0;
 }
 
+static int i3c_ibi_rsrcs_init(struct geni_i3c_dev *gi3c,
+		struct platform_device *pdev)
+{
+	struct resource *res;
+	int ret;
+
+	if (of_property_read_u8(pdev->dev.of_node, "qcom,ibi-ctrl-id",
+		&gi3c->ibi.ctrl_id)) {
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+			"IBI controller instance id is not defined\n");
+		return -ENXIO;
+	}
+
+	/* Enable TLMM I3C MODE registers */
+	msm_qup_write(gi3c->ibi.ctrl_id, TLMM_I3C_MODE);
+
+	/* IBI register address */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res)
+		return -EINVAL;
+
+	gi3c->se.ibi_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(gi3c->se.ibi_base))
+		return PTR_ERR(gi3c->se.ibi_base);
+
+	gi3c->ibi.hw_support = (geni_read_reg(gi3c->se.base, SE_HW_PARAM_0)
+				& GEN_I3C_IBI_CTRL);
+	if (!gi3c->ibi.hw_support) {
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+			"IBI controller support not present\n");
+		return -ENODEV;
+	}
+
+	init_completion(&gi3c->ibi.done);
+	spin_lock_init(&gi3c->ibi.lock);
+	gi3c->ibi.num_slots = ((geni_read_reg(gi3c->se.ibi_base, IBI_HW_PARAM)
+				& I3C_IBI_TABLE_DEPTH_MSK));
+	gi3c->ibi.slots = devm_kcalloc(&pdev->dev, gi3c->ibi.num_slots,
+				sizeof(*gi3c->ibi.slots), GFP_KERNEL);
+	if (!gi3c->ibi.slots)
+		return -ENOMEM;
+
+	/* Register IBI_C manager interrupt */
+	gi3c->ibi.mngr_irq = platform_get_irq(pdev, 1);
+	if (gi3c->ibi.mngr_irq < 0) {
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"IRQ error for ibi_c manager\n");
+		return gi3c->ibi.mngr_irq;
+	}
+
+	ret = devm_request_irq(&pdev->dev, gi3c->ibi.mngr_irq, geni_i3c_ibi_irq,
+			IRQF_TRIGGER_HIGH, dev_name(&pdev->dev), gi3c);
+	if (ret) {
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Request_irq failed:%d: err:%d\n",
+			gi3c->ibi.mngr_irq, ret);
+		return ret;
+	}
+
+	/* Register GPII interrupt */
+	gi3c->ibi.gpii_irq[0] = platform_get_irq(pdev, 2);
+	if (gi3c->ibi.gpii_irq[0] < 0) {
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"IRQ error for ibi_c gpii\n");
+		return gi3c->ibi.gpii_irq[0];
+	}
+
+	ret = devm_request_irq(&pdev->dev, gi3c->ibi.gpii_irq[0],
+			geni_i3c_ibi_irq, IRQF_TRIGGER_HIGH,
+			dev_name(&pdev->dev), gi3c);
+	if (ret) {
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Request_irq failed:%d: err:%d\n",
+			gi3c->ibi.gpii_irq[0], ret);
+		return ret;
+	}
+
+	qcom_geni_i3c_ibi_conf(gi3c);
+
+	return 0;
+}
+
 static int geni_i3c_probe(struct platform_device *pdev)
 {
 	struct geni_i3c_dev *gi3c;
@@ -1209,6 +1822,7 @@ static int geni_i3c_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	gi3c->se.dev = &pdev->dev;
+	gi3c->ipcl = ipc_log_context_create(4, dev_name(gi3c->se.dev), 0);
 
 	ret = i3c_geni_rsrcs_init(gi3c, pdev);
 	if (ret)
@@ -1220,17 +1834,11 @@ static int geni_i3c_probe(struct platform_device *pdev)
 
 	gi3c->irq = platform_get_irq(pdev, 0);
 	if (gi3c->irq < 0) {
-		dev_err(&pdev->dev, "IRQ error for i3c-master-geni\n");
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"IRQ error for i3c-master-geni\n");
 		return gi3c->irq;
 	}
 
-	ret = geni_i3c_clk_map_idx(gi3c);
-	if (ret) {
-		dev_err(&pdev->dev, "Invalid source clk frequency %d Hz: %d\n",
-			gi3c->clk_src_freq, ret);
-		return ret;
-	}
-
 	init_completion(&gi3c->done);
 	mutex_init(&gi3c->lock);
 	spin_lock_init(&gi3c->spinlock);
@@ -1238,7 +1846,8 @@ static int geni_i3c_probe(struct platform_device *pdev)
 	ret = devm_request_irq(&pdev->dev, gi3c->irq, geni_i3c_irq,
 		IRQF_TRIGGER_HIGH, dev_name(&pdev->dev), gi3c);
 	if (ret) {
-		dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Request_irq failed:%d: err:%d\n",
 			gi3c->irq, ret);
 		return ret;
 	}
@@ -1247,27 +1856,23 @@ static int geni_i3c_probe(struct platform_device *pdev)
 
 	ret = se_geni_resources_on(&gi3c->se.i3c_rsc);
 	if (ret) {
-		dev_err(&pdev->dev, "Error turning on resources %d\n", ret);
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Error turning on resources %d\n", ret);
 		return ret;
 	}
 
-	if (!gi3c->ipcl) {
-		char ipc_name[I2C_NAME_SIZE];
-
-		snprintf(ipc_name, I2C_NAME_SIZE, "i3c-%d", gi3c->ctrlr.bus.id);
-		gi3c->ipcl = ipc_log_context_create(2, ipc_name, 0);
-	}
-
 	proto = get_se_proto(gi3c->se.base);
 	if (proto != I3C) {
-		dev_err(&pdev->dev, "Invalid proto %d\n", proto);
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Invalid proto %d\n", proto);
 		se_geni_resources_off(&gi3c->se.i3c_rsc);
 		return -ENXIO;
 	}
 
-	se_mode = readl_relaxed(gi3c->se.base + GENI_IF_FIFO_DISABLE_RO);
+	se_mode = geni_read_reg(gi3c->se.base, GENI_IF_FIFO_DISABLE_RO);
 	if (se_mode) {
-		dev_err(&pdev->dev, "Non supported mode %d\n", se_mode);
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Non supported mode %d\n", se_mode);
 		se_geni_resources_off(&gi3c->se.i3c_rsc);
 		return -ENXIO;
 	}
@@ -1276,6 +1881,13 @@ static int geni_i3c_probe(struct platform_device *pdev)
 	gi3c->tx_wm = tx_depth - 1;
 	geni_se_init(gi3c->se.base, gi3c->tx_wm, tx_depth);
 	se_config_packing(gi3c->se.base, BITS_PER_BYTE, PACKING_BYTES_PW, true);
+
+	ret = i3c_ibi_rsrcs_init(gi3c, pdev);
+	if (ret) {
+		se_geni_resources_off(&gi3c->se.i3c_rsc);
+		return ret;
+	}
+
 	se_geni_resources_off(&gi3c->se.i3c_rsc);
 	GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
 		"i3c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
@@ -1314,7 +1926,6 @@ static int geni_i3c_runtime_suspend(struct device *dev)
 
 	disable_irq(gi3c->irq);
 	se_geni_resources_off(&gi3c->se.i3c_rsc);
-	msm_qup_write(0, 0x0);
 	return 0;
 }
 
@@ -1330,7 +1941,6 @@ static int geni_i3c_runtime_resume(struct device *dev)
 	enable_irq(gi3c->irq);
 
 	/* Enable TLMM I3C MODE registers */
-	msm_qup_write(0, 0x24);
 	return 0;
 }
 #else
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index b2ccce5..c4bb67e 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1076,14 +1076,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
 	ICPU(INTEL_FAM6_WESTMERE,		idle_cpu_nehalem),
 	ICPU(INTEL_FAM6_WESTMERE_EP,		idle_cpu_nehalem),
 	ICPU(INTEL_FAM6_NEHALEM_EX,		idle_cpu_nehalem),
-	ICPU(INTEL_FAM6_ATOM_PINEVIEW,		idle_cpu_atom),
-	ICPU(INTEL_FAM6_ATOM_LINCROFT,		idle_cpu_lincroft),
+	ICPU(INTEL_FAM6_ATOM_BONNELL,		idle_cpu_atom),
+	ICPU(INTEL_FAM6_ATOM_BONNELL_MID,		idle_cpu_lincroft),
 	ICPU(INTEL_FAM6_WESTMERE_EX,		idle_cpu_nehalem),
 	ICPU(INTEL_FAM6_SANDYBRIDGE,		idle_cpu_snb),
 	ICPU(INTEL_FAM6_SANDYBRIDGE_X,		idle_cpu_snb),
-	ICPU(INTEL_FAM6_ATOM_CEDARVIEW,		idle_cpu_atom),
-	ICPU(INTEL_FAM6_ATOM_SILVERMONT1,	idle_cpu_byt),
-	ICPU(INTEL_FAM6_ATOM_MERRIFIELD,	idle_cpu_tangier),
+	ICPU(INTEL_FAM6_ATOM_SALTWELL,		idle_cpu_atom),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT,	idle_cpu_byt),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID,	idle_cpu_tangier),
 	ICPU(INTEL_FAM6_ATOM_AIRMONT,		idle_cpu_cht),
 	ICPU(INTEL_FAM6_IVYBRIDGE,		idle_cpu_ivb),
 	ICPU(INTEL_FAM6_IVYBRIDGE_X,		idle_cpu_ivt),
@@ -1091,7 +1091,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
 	ICPU(INTEL_FAM6_HASWELL_X,		idle_cpu_hsw),
 	ICPU(INTEL_FAM6_HASWELL_ULT,		idle_cpu_hsw),
 	ICPU(INTEL_FAM6_HASWELL_GT3E,		idle_cpu_hsw),
-	ICPU(INTEL_FAM6_ATOM_SILVERMONT2,	idle_cpu_avn),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT_X,	idle_cpu_avn),
 	ICPU(INTEL_FAM6_BROADWELL_CORE,		idle_cpu_bdw),
 	ICPU(INTEL_FAM6_BROADWELL_GT3E,		idle_cpu_bdw),
 	ICPU(INTEL_FAM6_BROADWELL_X,		idle_cpu_bdw),
@@ -1104,8 +1104,8 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
 	ICPU(INTEL_FAM6_XEON_PHI_KNL,		idle_cpu_knl),
 	ICPU(INTEL_FAM6_XEON_PHI_KNM,		idle_cpu_knl),
 	ICPU(INTEL_FAM6_ATOM_GOLDMONT,		idle_cpu_bxt),
-	ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE,	idle_cpu_bxt),
-	ICPU(INTEL_FAM6_ATOM_DENVERTON,		idle_cpu_dnv),
+	ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS,	idle_cpu_bxt),
+	ICPU(INTEL_FAM6_ATOM_GOLDMONT_X,	idle_cpu_dnv),
 	{}
 };
 
@@ -1322,7 +1322,7 @@ static void intel_idle_state_table_update(void)
 		ivt_idle_state_table_update();
 		break;
 	case INTEL_FAM6_ATOM_GOLDMONT:
-	case INTEL_FAM6_ATOM_GEMINI_LAKE:
+	case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
 		bxt_idle_state_table_update();
 		break;
 	case INTEL_FAM6_SKYLAKE_DESKTOP:
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 471caa53..e5fdca7 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev)
 
 	mutex_lock(&data->mutex);
 	ret = kxcjk1013_set_mode(data, OPERATION);
+	if (ret == 0)
+		ret = kxcjk1013_set_range(data, data->range);
 	mutex_unlock(&data->mutex);
 
 	return ret;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 923f7ee..ecbaed9 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -716,6 +716,7 @@
 	depends on (ARCH_STM32 && OF) || COMPILE_TEST
 	select STM32_DFSDM_CORE
 	select REGMAP_MMIO
+	select IIO_BUFFER
 	select IIO_BUFFER_HW_CONSUMER
 	help
 	  Select this option to support ADCSigma delta modulator for
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index fc95107..25af4c7 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -62,7 +62,7 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
 	struct spi_transfer t = {
 		.tx_buf		= data,
 		.len		= size + 1,
-		.cs_change	= sigma_delta->bus_locked,
+		.cs_change	= sigma_delta->keep_cs_asserted,
 	};
 	struct spi_message m;
 	int ret;
@@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
 	if (sigma_delta->info->has_registers) {
 		data[0] = reg << sigma_delta->info->addr_shift;
 		data[0] |= sigma_delta->info->read_mask;
+		data[0] |= sigma_delta->comm;
 		spi_message_add_tail(&t[0], &m);
 	}
 	spi_message_add_tail(&t[1], &m);
@@ -217,6 +218,7 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
 
 	spi_bus_lock(sigma_delta->spi->master);
 	sigma_delta->bus_locked = true;
+	sigma_delta->keep_cs_asserted = true;
 	reinit_completion(&sigma_delta->completion);
 
 	ret = ad_sigma_delta_set_mode(sigma_delta, mode);
@@ -234,9 +236,10 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
 		ret = 0;
 	}
 out:
+	sigma_delta->keep_cs_asserted = false;
+	ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
 	sigma_delta->bus_locked = false;
 	spi_bus_unlock(sigma_delta->spi->master);
-	ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
 
 	return ret;
 }
@@ -288,6 +291,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
 
 	spi_bus_lock(sigma_delta->spi->master);
 	sigma_delta->bus_locked = true;
+	sigma_delta->keep_cs_asserted = true;
 	reinit_completion(&sigma_delta->completion);
 
 	ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE);
@@ -297,9 +301,6 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
 	ret = wait_for_completion_interruptible_timeout(
 			&sigma_delta->completion, HZ);
 
-	sigma_delta->bus_locked = false;
-	spi_bus_unlock(sigma_delta->spi->master);
-
 	if (ret == 0)
 		ret = -EIO;
 	if (ret < 0)
@@ -315,7 +316,10 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
 		sigma_delta->irq_dis = true;
 	}
 
+	sigma_delta->keep_cs_asserted = false;
 	ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+	sigma_delta->bus_locked = false;
+	spi_bus_unlock(sigma_delta->spi->master);
 	mutex_unlock(&indio_dev->mlock);
 
 	if (ret)
@@ -352,6 +356,8 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
 
 	spi_bus_lock(sigma_delta->spi->master);
 	sigma_delta->bus_locked = true;
+	sigma_delta->keep_cs_asserted = true;
+
 	ret = ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_CONTINUOUS);
 	if (ret)
 		goto err_unlock;
@@ -380,6 +386,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
 		sigma_delta->irq_dis = true;
 	}
 
+	sigma_delta->keep_cs_asserted = false;
 	ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
 
 	sigma_delta->bus_locked = false;
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 75d2f73..596841a 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev,
 		ret = wait_event_interruptible_timeout(st->wq_data_avail,
 						       st->done,
 						       msecs_to_jiffies(1000));
-		if (ret == 0)
-			ret = -ETIMEDOUT;
-		if (ret < 0) {
-			mutex_unlock(&st->lock);
-			return ret;
-		}
 
-		*val = st->last_value;
-
+		/* Disable interrupts, regardless if adc conversion was
+		 * successful or not
+		 */
 		at91_adc_writel(st, AT91_ADC_CHDR,
 				AT91_ADC_CH(chan->channel));
 		at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
 
-		st->last_value = 0;
-		st->done = false;
+		if (ret > 0) {
+			/* a valid conversion took place */
+			*val = st->last_value;
+			st->last_value = 0;
+			st->done = false;
+			ret = IIO_VAL_INT;
+		} else if (ret == 0) {
+			/* conversion timeout */
+			dev_err(&idev->dev, "ADC Channel %d timeout.\n",
+				chan->channel);
+			ret = -ETIMEDOUT;
+		}
+
 		mutex_unlock(&st->lock);
-		return IIO_VAL_INT;
+		return ret;
 
 	case IIO_CHAN_INFO_SCALE:
 		*val = st->vref_mv;
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index f59ddf1..f172ef7 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -1003,7 +1003,7 @@ static int adc_probe(struct platform_device *pdev)
 	revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
 	if (revid_dev_node) {
 		pmic_rev_id = get_revid_data(revid_dev_node);
-		if (!(IS_ERR(pmic_rev_id)))
+		if (!(IS_ERR_OR_NULL(pmic_rev_id)))
 			skip_usb_wa = skip_usb_in_wa(pmic_rev_id);
 		else {
 			pr_err("Unable to get revid\n");
diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c
index c0961a8..ebcfbde 100644
--- a/drivers/iio/adc/qcom-vadc-common.c
+++ b/drivers/iio/adc/qcom-vadc-common.c
@@ -52,40 +52,40 @@ static const struct vadc_map_pt adcmap_100k_104ef_104fb[] = {
  * 1.875V reference.
  */
 static const struct vadc_map_pt adcmap_100k_104ef_104fb_1875_vref[] = {
-	{ 1831,	-40000 },
-	{ 1814,	-35000 },
-	{ 1791,	-30000 },
-	{ 1761,	-25000 },
-	{ 1723,	-20000 },
-	{ 1675,	-15000 },
-	{ 1616,	-10000 },
-	{ 1545,	-5000 },
-	{ 1463,	0 },
-	{ 1370,	5000 },
-	{ 1268,	10000 },
-	{ 1160,	15000 },
-	{ 1049,	20000 },
-	{ 937,	25000 },
-	{ 828,	30000 },
-	{ 726,	35000 },
-	{ 630,	40000 },
-	{ 544,	45000 },
-	{ 467,	50000 },
-	{ 399,	55000 },
-	{ 340,	60000 },
-	{ 290,	65000 },
-	{ 247,	70000 },
-	{ 209,	75000 },
-	{ 179,	80000 },
-	{ 153,	85000 },
-	{ 130,	90000 },
-	{ 112,	95000 },
-	{ 96,	100000 },
-	{ 82,	105000 },
-	{ 71,	110000 },
-	{ 62,	115000 },
-	{ 53,	120000 },
-	{ 46,	125000 },
+	{ 1831000,	-40000 },
+	{ 1814000,	-35000 },
+	{ 1791000,	-30000 },
+	{ 1761000,	-25000 },
+	{ 1723000,	-20000 },
+	{ 1675000,	-15000 },
+	{ 1616000,	-10000 },
+	{ 1545000,	-5000 },
+	{ 1463000,	0 },
+	{ 1370000,	5000 },
+	{ 1268000,	10000 },
+	{ 1160000,	15000 },
+	{ 1049000,	20000 },
+	{ 937000,	25000 },
+	{ 828000,	30000 },
+	{ 726000,	35000 },
+	{ 630000,	40000 },
+	{ 544000,	45000 },
+	{ 467000,	50000 },
+	{ 399000,	55000 },
+	{ 340000,	60000 },
+	{ 290000,	65000 },
+	{ 247000,	70000 },
+	{ 209000,	75000 },
+	{ 179000,	80000 },
+	{ 153000,	85000 },
+	{ 130000,	90000 },
+	{ 112000,	95000 },
+	{ 96000,	100000 },
+	{ 82000,	105000 },
+	{ 71000,	110000 },
+	{ 62000,	115000 },
+	{ 53000,	120000 },
+	{ 46000,	125000 },
 };
 
 /*
@@ -739,14 +739,14 @@ static int qcom_vadc_scale_hw_calib_therm(
 				const struct adc_data *data,
 				u16 adc_code, int *result_mdec)
 {
-	s64 voltage = 0, result = 0, adc_vdd_ref_mv = 1875;
+	s64 voltage = 0, result = 0;
 	int ret;
 
 	if (adc_code > VADC5_MAX_CODE)
 		adc_code = 0;
 
 	/* (ADC code * vref_vadc (1.875V)) / full_scale_code */
-	voltage = (s64) adc_code * adc_vdd_ref_mv * 1000;
+	voltage = (s64) adc_code * ADC_HC_VDD_REF * 1000;
 	voltage = div64_s64(voltage, (data->full_scale_code_volt
 								* 1000));
 	ret = qcom_vadc_map_voltage_temp(adcmap_100k_104ef_104fb_1875_vref,
diff --git a/drivers/iio/adc/qcom-vadc-common.h b/drivers/iio/adc/qcom-vadc-common.h
index 51cc4bc..ccd3751 100644
--- a/drivers/iio/adc/qcom-vadc-common.h
+++ b/drivers/iio/adc/qcom-vadc-common.h
@@ -51,6 +51,7 @@
 #define VADC5_MAX_CODE				0x7fff
 #define VADC5_FULL_SCALE_CODE			0x70e4
 #define ADC_USR_DATA_CHECK			0x8000
+#define ADC_HC_VDD_REF			1875000
 
 #define IPC_LOGPAGES 10
 
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index a5bd594..c9cd7e5 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -56,6 +56,9 @@ struct ti_ads7950_state {
 	struct spi_message	ring_msg;
 	struct spi_message	scan_single_msg;
 
+	/* Lock to protect the spi xfer buffers */
+	struct mutex		slock;
+
 	struct regulator	*reg;
 	unsigned int		vref_mv;
 
@@ -277,6 +280,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p)
 	struct ti_ads7950_state *st = iio_priv(indio_dev);
 	int ret;
 
+	mutex_lock(&st->slock);
 	ret = spi_sync(st->spi, &st->ring_msg);
 	if (ret < 0)
 		goto out;
@@ -285,6 +289,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p)
 					   iio_get_time_ns(indio_dev));
 
 out:
+	mutex_unlock(&st->slock);
 	iio_trigger_notify_done(indio_dev->trig);
 
 	return IRQ_HANDLED;
@@ -295,7 +300,7 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
 	struct ti_ads7950_state *st = iio_priv(indio_dev);
 	int ret, cmd;
 
-	mutex_lock(&indio_dev->mlock);
+	mutex_lock(&st->slock);
 
 	cmd = TI_ADS7950_CR_WRITE | TI_ADS7950_CR_CHAN(ch) | st->settings;
 	st->single_tx = cpu_to_be16(cmd);
@@ -307,7 +312,7 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
 	ret = be16_to_cpu(st->single_rx);
 
 out:
-	mutex_unlock(&indio_dev->mlock);
+	mutex_unlock(&st->slock);
 
 	return ret;
 }
@@ -423,16 +428,19 @@ static int ti_ads7950_probe(struct spi_device *spi)
 	if (ACPI_COMPANION(&spi->dev))
 		st->vref_mv = TI_ADS7950_VA_MV_ACPI_DEFAULT;
 
+	mutex_init(&st->slock);
+
 	st->reg = devm_regulator_get(&spi->dev, "vref");
 	if (IS_ERR(st->reg)) {
 		dev_err(&spi->dev, "Failed get get regulator \"vref\"\n");
-		return PTR_ERR(st->reg);
+		ret = PTR_ERR(st->reg);
+		goto error_destroy_mutex;
 	}
 
 	ret = regulator_enable(st->reg);
 	if (ret) {
 		dev_err(&spi->dev, "Failed to enable regulator \"vref\"\n");
-		return ret;
+		goto error_destroy_mutex;
 	}
 
 	ret = iio_triggered_buffer_setup(indio_dev, NULL,
@@ -454,6 +462,8 @@ static int ti_ads7950_probe(struct spi_device *spi)
 	iio_triggered_buffer_cleanup(indio_dev);
 error_disable_reg:
 	regulator_disable(st->reg);
+error_destroy_mutex:
+	mutex_destroy(&st->slock);
 
 	return ret;
 }
@@ -466,6 +476,7 @@ static int ti_ads7950_remove(struct spi_device *spi)
 	iio_device_unregister(indio_dev);
 	iio_triggered_buffer_cleanup(indio_dev);
 	regulator_disable(st->reg);
+	mutex_destroy(&st->slock);
 
 	return 0;
 }
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 3f6be5a..1ae86e7 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1290,6 +1290,7 @@ static int xadc_probe(struct platform_device *pdev)
 
 err_free_irq:
 	free_irq(xadc->irq, indio_dev);
+	cancel_delayed_work_sync(&xadc->zynq_unmask_work);
 err_clk_disable_unprepare:
 	clk_disable_unprepare(xadc->clk);
 err_free_samplerate_trigger:
@@ -1319,8 +1320,8 @@ static int xadc_remove(struct platform_device *pdev)
 		iio_triggered_buffer_cleanup(indio_dev);
 	}
 	free_irq(xadc->irq, indio_dev);
+	cancel_delayed_work_sync(&xadc->zynq_unmask_work);
 	clk_disable_unprepare(xadc->clk);
-	cancel_delayed_work(&xadc->zynq_unmask_work);
 	kfree(xadc->data);
 	kfree(indio_dev->channels);
 
diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
index e049323..71dd635 100644
--- a/drivers/iio/chemical/bme680.h
+++ b/drivers/iio/chemical/bme680.h
@@ -2,11 +2,9 @@
 #ifndef BME680_H_
 #define BME680_H_
 
-#define BME680_REG_CHIP_I2C_ID			0xD0
-#define BME680_REG_CHIP_SPI_ID			0x50
+#define BME680_REG_CHIP_ID			0xD0
 #define BME680_CHIP_ID_VAL			0x61
-#define BME680_REG_SOFT_RESET_I2C		0xE0
-#define BME680_REG_SOFT_RESET_SPI		0x60
+#define BME680_REG_SOFT_RESET			0xE0
 #define BME680_CMD_SOFTRESET			0xB6
 #define BME680_REG_STATUS			0x73
 #define   BME680_SPI_MEM_PAGE_BIT		BIT(4)
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index 7d9bb62..b2db598 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -63,9 +63,23 @@ struct bme680_data {
 	s32 t_fine;
 };
 
+static const struct regmap_range bme680_volatile_ranges[] = {
+	regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB),
+	regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS),
+	regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG),
+};
+
+static const struct regmap_access_table bme680_volatile_table = {
+	.yes_ranges	= bme680_volatile_ranges,
+	.n_yes_ranges	= ARRAY_SIZE(bme680_volatile_ranges),
+};
+
 const struct regmap_config bme680_regmap_config = {
 	.reg_bits = 8,
 	.val_bits = 8,
+	.max_register = 0xef,
+	.volatile_table = &bme680_volatile_table,
+	.cache_type = REGCACHE_RBTREE,
 };
 EXPORT_SYMBOL(bme680_regmap_config);
 
@@ -330,6 +344,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
 	s64 var1, var2, var3;
 	s16 calc_temp;
 
+	/* If the calibration is invalid, attempt to reload it */
+	if (!calib->par_t2)
+		bme680_read_calib(data, calib);
+
 	var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
 	var2 = (var1 * calib->par_t2) >> 11;
 	var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
@@ -591,8 +609,7 @@ static int bme680_gas_config(struct bme680_data *data)
 	return ret;
 }
 
-static int bme680_read_temp(struct bme680_data *data,
-			    int *val, int *val2)
+static int bme680_read_temp(struct bme680_data *data, int *val)
 {
 	struct device *dev = regmap_get_device(data->regmap);
 	int ret;
@@ -625,10 +642,9 @@ static int bme680_read_temp(struct bme680_data *data,
 	 * compensate_press/compensate_humid to get compensated
 	 * pressure/humidity readings.
 	 */
-	if (val && val2) {
-		*val = comp_temp;
-		*val2 = 100;
-		return IIO_VAL_FRACTIONAL;
+	if (val) {
+		*val = comp_temp * 10; /* Centidegrees to millidegrees */
+		return IIO_VAL_INT;
 	}
 
 	return ret;
@@ -643,7 +659,7 @@ static int bme680_read_press(struct bme680_data *data,
 	s32 adc_press;
 
 	/* Read and compensate temperature to get a reading of t_fine */
-	ret = bme680_read_temp(data, NULL, NULL);
+	ret = bme680_read_temp(data, NULL);
 	if (ret < 0)
 		return ret;
 
@@ -676,7 +692,7 @@ static int bme680_read_humid(struct bme680_data *data,
 	u32 comp_humidity;
 
 	/* Read and compensate temperature to get a reading of t_fine */
-	ret = bme680_read_temp(data, NULL, NULL);
+	ret = bme680_read_temp(data, NULL);
 	if (ret < 0)
 		return ret;
 
@@ -769,7 +785,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
 	case IIO_CHAN_INFO_PROCESSED:
 		switch (chan->type) {
 		case IIO_TEMP:
-			return bme680_read_temp(data, val, val2);
+			return bme680_read_temp(data, val);
 		case IIO_PRESSURE:
 			return bme680_read_press(data, val, val2);
 		case IIO_HUMIDITYRELATIVE:
@@ -905,8 +921,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
 {
 	struct iio_dev *indio_dev;
 	struct bme680_data *data;
+	unsigned int val;
 	int ret;
 
+	ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
+			   BME680_CMD_SOFTRESET);
+	if (ret < 0) {
+		dev_err(dev, "Failed to reset chip\n");
+		return ret;
+	}
+
+	ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val);
+	if (ret < 0) {
+		dev_err(dev, "Error reading chip ID\n");
+		return ret;
+	}
+
+	if (val != BME680_CHIP_ID_VAL) {
+		dev_err(dev, "Wrong chip ID, got %x expected %x\n",
+				val, BME680_CHIP_ID_VAL);
+		return -ENODEV;
+	}
+
 	indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
 	if (!indio_dev)
 		return -ENOMEM;
diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
index 06d4be5..cfc4449 100644
--- a/drivers/iio/chemical/bme680_i2c.c
+++ b/drivers/iio/chemical/bme680_i2c.c
@@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
 {
 	struct regmap *regmap;
 	const char *name = NULL;
-	unsigned int val;
-	int ret;
 
 	regmap = devm_regmap_init_i2c(client, &bme680_regmap_config);
 	if (IS_ERR(regmap)) {
@@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
 		return PTR_ERR(regmap);
 	}
 
-	ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C,
-			   BME680_CMD_SOFTRESET);
-	if (ret < 0) {
-		dev_err(&client->dev, "Failed to reset chip\n");
-		return ret;
-	}
-
-	ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val);
-	if (ret < 0) {
-		dev_err(&client->dev, "Error reading I2C chip ID\n");
-		return ret;
-	}
-
-	if (val != BME680_CHIP_ID_VAL) {
-		dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n",
-				val, BME680_CHIP_ID_VAL);
-		return -ENODEV;
-	}
-
 	if (id)
 		name = id->name;
 
diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
index c9fb05e..881778e 100644
--- a/drivers/iio/chemical/bme680_spi.c
+++ b/drivers/iio/chemical/bme680_spi.c
@@ -11,28 +11,93 @@
 
 #include "bme680.h"
 
+struct bme680_spi_bus_context {
+	struct spi_device *spi;
+	u8 current_page;
+};
+
+/*
+ * In SPI mode there are only 7 address bits, a "page" register determines
+ * which part of the 8-bit range is active. This function looks at the address
+ * and writes the page selection bit if needed
+ */
+static int bme680_regmap_spi_select_page(
+	struct bme680_spi_bus_context *ctx, u8 reg)
+{
+	struct spi_device *spi = ctx->spi;
+	int ret;
+	u8 buf[2];
+	u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */
+
+	if (page == ctx->current_page)
+		return 0;
+
+	/*
+	 * Data sheet claims we're only allowed to change bit 4, so we must do
+	 * a read-modify-write on each and every page select
+	 */
+	buf[0] = BME680_REG_STATUS;
+	ret = spi_write_then_read(spi, buf, 1, buf + 1, 1);
+	if (ret < 0) {
+		dev_err(&spi->dev, "failed to set page %u\n", page);
+		return ret;
+	}
+
+	buf[0] = BME680_REG_STATUS;
+	if (page)
+		buf[1] |= BME680_SPI_MEM_PAGE_BIT;
+	else
+		buf[1] &= ~BME680_SPI_MEM_PAGE_BIT;
+
+	ret = spi_write(spi, buf, 2);
+	if (ret < 0) {
+		dev_err(&spi->dev, "failed to set page %u\n", page);
+		return ret;
+	}
+
+	ctx->current_page = page;
+
+	return 0;
+}
+
 static int bme680_regmap_spi_write(void *context, const void *data,
 				   size_t count)
 {
-	struct spi_device *spi = context;
+	struct bme680_spi_bus_context *ctx = context;
+	struct spi_device *spi = ctx->spi;
+	int ret;
 	u8 buf[2];
 
 	memcpy(buf, data, 2);
+
+	ret = bme680_regmap_spi_select_page(ctx, buf[0]);
+	if (ret)
+		return ret;
+
 	/*
 	 * The SPI register address (= full register address without bit 7)
 	 * and the write command (bit7 = RW = '0')
 	 */
 	buf[0] &= ~0x80;
 
-	return spi_write_then_read(spi, buf, 2, NULL, 0);
+	return spi_write(spi, buf, 2);
 }
 
 static int bme680_regmap_spi_read(void *context, const void *reg,
 				  size_t reg_size, void *val, size_t val_size)
 {
-	struct spi_device *spi = context;
+	struct bme680_spi_bus_context *ctx = context;
+	struct spi_device *spi = ctx->spi;
+	int ret;
+	u8 addr = *(const u8 *)reg;
 
-	return spi_write_then_read(spi, reg, reg_size, val, val_size);
+	ret = bme680_regmap_spi_select_page(ctx, addr);
+	if (ret)
+		return ret;
+
+	addr |= 0x80; /* bit7 = RW = '1' */
+
+	return spi_write_then_read(spi, &addr, 1, val, val_size);
 }
 
 static struct regmap_bus bme680_regmap_bus = {
@@ -45,8 +110,8 @@ static struct regmap_bus bme680_regmap_bus = {
 static int bme680_spi_probe(struct spi_device *spi)
 {
 	const struct spi_device_id *id = spi_get_device_id(spi);
+	struct bme680_spi_bus_context *bus_context;
 	struct regmap *regmap;
-	unsigned int val;
 	int ret;
 
 	spi->bits_per_word = 8;
@@ -56,45 +121,21 @@ static int bme680_spi_probe(struct spi_device *spi)
 		return ret;
 	}
 
+	bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL);
+	if (!bus_context)
+		return -ENOMEM;
+
+	bus_context->spi = spi;
+	bus_context->current_page = 0xff; /* Undefined on warm boot */
+
 	regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus,
-				  &spi->dev, &bme680_regmap_config);
+				  bus_context, &bme680_regmap_config);
 	if (IS_ERR(regmap)) {
 		dev_err(&spi->dev, "Failed to register spi regmap %d\n",
 				(int)PTR_ERR(regmap));
 		return PTR_ERR(regmap);
 	}
 
-	ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI,
-			   BME680_CMD_SOFTRESET);
-	if (ret < 0) {
-		dev_err(&spi->dev, "Failed to reset chip\n");
-		return ret;
-	}
-
-	/* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */
-	ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val);
-	if (ret < 0) {
-		dev_err(&spi->dev, "Error reading SPI chip ID\n");
-		return ret;
-	}
-
-	if (val != BME680_CHIP_ID_VAL) {
-		dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n",
-				val, BME680_CHIP_ID_VAL);
-		return -ENODEV;
-	}
-	/*
-	 * select Page 1 of spi_mem_page to enable access to
-	 * to registers from address 0x00 to 0x7F.
-	 */
-	ret = regmap_write_bits(regmap, BME680_REG_STATUS,
-				BME680_SPI_MEM_PAGE_BIT,
-				BME680_SPI_MEM_PAGE_1_VAL);
-	if (ret < 0) {
-		dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n");
-		return ret;
-	}
-
 	return bme680_core_probe(&spi->dev, regmap, id->name);
 }
 
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 89cb006..8d76afb 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
 			 * Do not use IIO_DEGREE_TO_RAD to avoid precision
 			 * loss. Round to the nearest integer.
 			 */
-			*val = div_s64(val64 * 314159 + 9000000ULL, 1000);
-			*val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
-			ret = IIO_VAL_FRACTIONAL;
+			*val = 0;
+			*val2 = div_s64(val64 * 3141592653ULL,
+					180 << (CROS_EC_SENSOR_BITS - 1));
+			ret = IIO_VAL_INT_PLUS_NANO;
 			break;
 		case MOTIONSENSE_TYPE_MAG:
 			/*
diff --git a/drivers/iio/common/ssp_sensors/ssp_iio.c b/drivers/iio/common/ssp_sensors/ssp_iio.c
index 645f2e3..e38f704 100644
--- a/drivers/iio/common/ssp_sensors/ssp_iio.c
+++ b/drivers/iio/common/ssp_sensors/ssp_iio.c
@@ -81,7 +81,7 @@ int ssp_common_process_data(struct iio_dev *indio_dev, void *buf,
 			    unsigned int len, int64_t timestamp)
 {
 	__le32 time;
-	int64_t calculated_time;
+	int64_t calculated_time = 0;
 	struct ssp_sensor_data *spd = iio_priv(indio_dev);
 
 	if (indio_dev->scan_bytes == 0)
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 8b5aad4..30dc2775 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -98,6 +98,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
 
 	inoutbuf[0] = 0x60; /* write EEPROM */
 	inoutbuf[0] |= data->ref_mode << 3;
+	inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0;
 	inoutbuf[1] = data->dac_value >> 4;
 	inoutbuf[2] = (data->dac_value & 0xf) << 4;
 
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 63ca316..92c07ab 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
 	case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
 		return bmg160_get_filter(data, val);
 	case IIO_CHAN_INFO_SCALE:
-		*val = 0;
 		switch (chan->type) {
 		case IIO_TEMP:
-			*val2 = 500000;
-			return IIO_VAL_INT_PLUS_MICRO;
+			*val = 500;
+			return IIO_VAL_INT;
 		case IIO_ANGL_VEL:
 		{
 			int i;
@@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
 			for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
 				if (bmg160_scale_table[i].dps_range ==
 							data->dps_range) {
+					*val = 0;
 					*val2 = bmg160_scale_table[i].scale;
 					return IIO_VAL_INT_PLUS_MICRO;
 				}
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 77fac81..5ddebed 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -29,7 +29,8 @@
 
 #include "mpu3050.h"
 
-#define MPU3050_CHIP_ID		0x69
+#define MPU3050_CHIP_ID		0x68
+#define MPU3050_CHIP_ID_MASK	0x7E
 
 /*
  * Register map: anything suffixed *_H is a big-endian high byte and always
@@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev,
 		goto err_power_down;
 	}
 
-	if (val != MPU3050_CHIP_ID) {
-		dev_err(dev, "unsupported chip id %02x\n", (u8)val);
+	if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) {
+		dev_err(dev, "unsupported chip id %02x\n",
+				(u8)(val & MPU3050_CHIP_ID_MASK));
 		ret = -ENODEV;
 		goto err_power_down;
 	}
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index cd5bfe3..dadd921 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
 	const unsigned long *mask;
 	unsigned long *trialmask;
 
-	trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
-				  sizeof(*trialmask),
-				  GFP_KERNEL);
+	trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
+			    sizeof(*trialmask), GFP_KERNEL);
 	if (trialmask == NULL)
 		return -ENOMEM;
 	if (!indio_dev->masklength) {
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index a062cfd..49d4b4f 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1735,10 +1735,10 @@ EXPORT_SYMBOL(__iio_device_register);
  **/
 void iio_device_unregister(struct iio_dev *indio_dev)
 {
-	mutex_lock(&indio_dev->info_exist_lock);
-
 	cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
 
+	mutex_lock(&indio_dev->info_exist_lock);
+
 	iio_device_unregister_debugfs(indio_dev);
 
 	iio_disable_all_buffers(indio_dev);
diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
index 3de7f44..86abba5 100644
--- a/drivers/iio/magnetometer/hmc5843_i2c.c
+++ b/drivers/iio/magnetometer/hmc5843_i2c.c
@@ -58,8 +58,13 @@ static const struct regmap_config hmc5843_i2c_regmap_config = {
 static int hmc5843_i2c_probe(struct i2c_client *cli,
 			     const struct i2c_device_id *id)
 {
+	struct regmap *regmap = devm_regmap_init_i2c(cli,
+			&hmc5843_i2c_regmap_config);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
 	return hmc5843_common_probe(&cli->dev,
-			devm_regmap_init_i2c(cli, &hmc5843_i2c_regmap_config),
+			regmap,
 			id->driver_data, id->name);
 }
 
diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
index 535f03a..79b2b70 100644
--- a/drivers/iio/magnetometer/hmc5843_spi.c
+++ b/drivers/iio/magnetometer/hmc5843_spi.c
@@ -58,6 +58,7 @@ static const struct regmap_config hmc5843_spi_regmap_config = {
 static int hmc5843_spi_probe(struct spi_device *spi)
 {
 	int ret;
+	struct regmap *regmap;
 	const struct spi_device_id *id = spi_get_device_id(spi);
 
 	spi->mode = SPI_MODE_3;
@@ -67,8 +68,12 @@ static int hmc5843_spi_probe(struct spi_device *spi)
 	if (ret)
 		return ret;
 
+	regmap = devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
 	return hmc5843_common_probe(&spi->dev,
-			devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config),
+			regmap,
 			id->driver_data, id->name);
 }
 
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6f5be78..39dc7be 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1078,18 +1078,31 @@ static inline bool cma_any_addr(const struct sockaddr *addr)
 	return cma_zero_addr(addr) || cma_loopback_addr(addr);
 }
 
-static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
+static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
 {
 	if (src->sa_family != dst->sa_family)
 		return -1;
 
 	switch (src->sa_family) {
 	case AF_INET:
-		return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
-		       ((struct sockaddr_in *) dst)->sin_addr.s_addr;
-	case AF_INET6:
-		return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
-				     &((struct sockaddr_in6 *) dst)->sin6_addr);
+		return ((struct sockaddr_in *)src)->sin_addr.s_addr !=
+		       ((struct sockaddr_in *)dst)->sin_addr.s_addr;
+	case AF_INET6: {
+		struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src;
+		struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst;
+		bool link_local;
+
+		if (ipv6_addr_cmp(&src_addr6->sin6_addr,
+					  &dst_addr6->sin6_addr))
+			return 1;
+		link_local = ipv6_addr_type(&dst_addr6->sin6_addr) &
+			     IPV6_ADDR_LINKLOCAL;
+		/* Link local must match their scope_ids */
+		return link_local ? (src_addr6->sin6_scope_id !=
+				     dst_addr6->sin6_scope_id) :
+				    0;
+	}
+
 	default:
 		return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
 				   &((struct sockaddr_ib *) dst)->sib_addr);
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 9b0bea8..b79b61b 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -711,16 +711,20 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
 						agent->device->name,
 						agent->port_num);
 	if (ret)
-		return ret;
+		goto free_security;
 
 	agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
 	ret = register_lsm_notifier(&agent->lsm_nb);
 	if (ret)
-		return ret;
+		goto free_security;
 
 	agent->smp_allowed = true;
 	agent->lsm_nb_reg = true;
 	return 0;
+
+free_security:
+	security_ib_free_security(agent->security);
+	return ret;
 }
 
 void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
@@ -728,9 +732,10 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
 	if (!rdma_protocol_ib(agent->device, agent->port_num))
 		return;
 
-	security_ib_free_security(agent->security);
 	if (agent->lsm_nb_reg)
 		unregister_lsm_notifier(&agent->lsm_nb);
+
+	security_ib_free_security(agent->security);
 }
 
 int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 6ee03d6..82f309f 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1087,8 +1087,8 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
 }
 EXPORT_SYMBOL(ib_open_qp);
 
-static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
-		struct ib_qp_init_attr *qp_init_attr)
+static struct ib_qp *create_xrc_qp(struct ib_qp *qp,
+				   struct ib_qp_init_attr *qp_init_attr)
 {
 	struct ib_qp *real_qp = qp;
 
@@ -1103,10 +1103,10 @@ static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
 
 	qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
 			  qp_init_attr->qp_context);
-	if (!IS_ERR(qp))
-		__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
-	else
-		real_qp->device->destroy_qp(real_qp);
+	if (IS_ERR(qp))
+		return qp;
+
+	__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
 	return qp;
 }
 
@@ -1137,10 +1137,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
 		return qp;
 
 	ret = ib_create_qp_security(qp, device);
-	if (ret) {
-		ib_destroy_qp(qp);
-		return ERR_PTR(ret);
-	}
+	if (ret)
+		goto err;
 
 	qp->real_qp    = qp;
 	qp->qp_type    = qp_init_attr->qp_type;
@@ -1153,8 +1151,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
 	INIT_LIST_HEAD(&qp->sig_mrs);
 	qp->port = 0;
 
-	if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
-		return ib_create_xrc_qp(qp, qp_init_attr);
+	if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
+		struct ib_qp *xrc_qp = create_xrc_qp(qp, qp_init_attr);
+
+		if (IS_ERR(xrc_qp)) {
+			ret = PTR_ERR(xrc_qp);
+			goto err;
+		}
+		return xrc_qp;
+	}
 
 	qp->event_handler = qp_init_attr->event_handler;
 	qp->qp_context = qp_init_attr->qp_context;
@@ -1181,11 +1186,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
 
 	if (qp_init_attr->cap.max_rdma_ctxs) {
 		ret = rdma_rw_init_mrs(qp, qp_init_attr);
-		if (ret) {
-			pr_err("failed to init MR pool ret= %d\n", ret);
-			ib_destroy_qp(qp);
-			return ERR_PTR(ret);
-		}
+		if (ret)
+			goto err;
 	}
 
 	/*
@@ -1198,6 +1200,11 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
 				 device->attrs.max_sge_rd);
 
 	return qp;
+
+err:
+	ib_destroy_qp(qp);
+	return ERR_PTR(ret);
+
 }
 EXPORT_SYMBOL(ib_create_qp);
 
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index a68569e..3be6405 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -458,6 +458,8 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
 		skb_reset_transport_header(skb);
 	} else {
 		skb = alloc_skb(len, gfp);
+		if (!skb)
+			return NULL;
 	}
 	t4_set_arp_err_handler(skb, NULL, NULL);
 	return skb;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 902d12d..b12c8ff 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -13388,7 +13388,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
 	int total_contexts;
 	int ret;
 	unsigned ngroups;
-	int qos_rmt_count;
+	int rmt_count;
 	int user_rmt_reduced;
 	u32 n_usr_ctxts;
 	u32 send_contexts = chip_send_contexts(dd);
@@ -13450,10 +13450,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
 		n_usr_ctxts = rcv_contexts - total_contexts;
 	}
 
-	/* each user context requires an entry in the RMT */
-	qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
-	if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
-		user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
+	/*
+	 * The RMT entries are currently allocated as shown below:
+	 * 1. QOS (0 to 128 entries);
+	 * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
+	 * 3. VNIC (num_vnic_contexts).
+	 * It should be noted that PSM FECN oversubscribe num_vnic_contexts
+	 * entries of RMT because both VNIC and PSM could allocate any receive
+	 * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
+	 * and PSM FECN must reserve an RMT entry for each possible PSM receive
+	 * context.
+	 */
+	rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
+	if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
+		user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
 		dd_dev_err(dd,
 			   "RMT size is reducing the number of user receive contexts from %u to %d\n",
 			   n_usr_ctxts,
@@ -14441,9 +14451,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
 	u64 reg;
 	int i, idx, regoff, regidx;
 	u8 offset;
+	u32 total_cnt;
 
 	/* there needs to be enough room in the map table */
-	if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
+	total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
+	if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
 		dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
 		return;
 	}
@@ -14497,7 +14509,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
 	/* add rule 1 */
 	add_rsm_rule(dd, RSM_INS_FECN, &rrd);
 
-	rmt->used += dd->num_user_contexts;
+	rmt->used += total_cnt;
 }
 
 /* Initialize RSM for VNIC */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index da786eb..368f4f0 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -798,7 +798,8 @@ static int create_workqueues(struct hfi1_devdata *dd)
 			ppd->hfi1_wq =
 				alloc_workqueue(
 				    "hfi%d_%d",
-				    WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
+				    WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
+				    WQ_MEM_RECLAIM,
 				    HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
 				    dd->unit, pidx);
 			if (!ppd->hfi1_wq)
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 6f013a5..770c78c 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2303,7 +2303,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
 			update_ack_queue(qp, next);
 		}
 		e = &qp->s_ack_queue[qp->r_head_ack_queue];
-		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+		if (e->rdma_sge.mr) {
 			rvt_put_mr(e->rdma_sge.mr);
 			e->rdma_sge.mr = NULL;
 		}
@@ -2377,7 +2377,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
 			update_ack_queue(qp, next);
 		}
 		e = &qp->s_ack_queue[qp->r_head_ack_queue];
-		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+		if (e->rdma_sge.mr) {
 			rvt_put_mr(e->rdma_sge.mr);
 			e->rdma_sge.mr = NULL;
 		}
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 0d96c5b..d2d4ab9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -66,7 +66,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
 			     HNS_ROCE_VLAN_SL_BIT_MASK) <<
 			     HNS_ROCE_VLAN_SL_SHIFT;
 
-	ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
+	ah->av.port_pd = cpu_to_le32(to_hr_pd(ibpd)->pdn |
 				     (rdma_ah_get_port_num(ah_attr) <<
 				     HNS_ROCE_PORT_NUM_SHIFT));
 	ah->av.gid_index = grh->sgid_index;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index f6faefe..a73d388 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -745,6 +745,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
 		idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
 		dma_offset = offset = idx_offset * table->obj_size;
 	} else {
+		u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
+
 		hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
 		/* mtt mhop */
 		i = mhop.l0_idx;
@@ -756,8 +758,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
 			hem_idx = i;
 
 		hem = table->hem[hem_idx];
-		dma_offset = offset = (obj & (table->num_obj - 1)) *
-				       table->obj_size % mhop.bt_chunk_size;
+		dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
+				       mhop.bt_chunk_size;
 		if (mhop.hop_num == 2)
 			dma_offset = offset = 0;
 	}
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index eb26a5f..41a538d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -707,7 +707,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
 	struct hns_roce_hem_table *table;
 	dma_addr_t dma_handle;
 	__le64 *mtts;
-	u32 s = start_index * sizeof(u64);
 	u32 bt_page_size;
 	u32 i;
 
@@ -730,7 +729,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
 		table = &hr_dev->mr_table.mtt_cqe_table;
 
 	mtts = hns_roce_table_find(hr_dev, table,
-				mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
+				mtt->first_seg +
+				start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
 				&dma_handle);
 	if (!mtts)
 		return -ENOMEM;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index efb7e96..2fa4fb1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -494,7 +494,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
 
 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
 {
-	if (attr->qp_type == IB_QPT_XRC_TGT)
+	if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
 		return 0;
 
 	return 1;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 50be240..8cc4da6 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2014,6 +2014,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
 
 	if (vma->vm_flags & VM_WRITE)
 		return -EPERM;
+	vma->vm_flags &= ~VM_MAYWRITE;
 
 	if (!dev->mdev->clock_info_page)
 		return -EOPNOTSUPP;
@@ -2197,6 +2198,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
 
 		if (vma->vm_flags & VM_WRITE)
 			return -EPERM;
+		vma->vm_flags &= ~VM_MAYWRITE;
 
 		/* Don't expose to user-space information it shouldn't have */
 		if (PAGE_SIZE > 4096)
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index a571989..ed99f0a 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -1123,6 +1123,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
 	pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
 	pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
 	pvrdma_free_slots(dev);
+	dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
+			  dev->dsrbase);
 
 	iounmap(dev->regs);
 	kfree(dev->sgid_tbl);
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 49c9541..5819c9d 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -611,11 +611,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
 	if (unlikely(mapped_segs == mr->mr.max_segs))
 		return -ENOMEM;
 
-	if (mr->mr.length == 0) {
-		mr->mr.user_base = addr;
-		mr->mr.iova = addr;
-	}
-
 	m = mapped_segs / RVT_SEGSZ;
 	n = mapped_segs % RVT_SEGSZ;
 	mr->mr.map[m]->segs[n].vaddr = (void *)addr;
@@ -633,17 +628,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
  * @sg_nents: number of entries in sg
  * @sg_offset: offset in bytes into sg
  *
+ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
+ *
  * Return: number of sg elements mapped to the memory region
  */
 int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
 		  int sg_nents, unsigned int *sg_offset)
 {
 	struct rvt_mr *mr = to_imr(ibmr);
+	int ret;
 
 	mr->mr.length = 0;
 	mr->mr.page_shift = PAGE_SHIFT;
-	return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
-			      rvt_set_page);
+	ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
+	mr->mr.user_base = ibmr->iova;
+	mr->mr.iova = ibmr->iova;
+	mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
+	mr->mr.length = (size_t)ibmr->length;
+	return ret;
 }
 
 /**
@@ -674,6 +676,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
 	ibmr->rkey = key;
 	mr->mr.lkey = key;
 	mr->mr.access_flags = access;
+	mr->mr.iova = ibmr->iova;
 	atomic_set(&mr->mr.lkey_invalid, 0);
 
 	return 0;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index f4bce5a..ea4afde 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2793,8 +2793,19 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
 	srpt_queue_response(cmd);
 }
 
+/*
+ * This function is called for aborted commands if no response is sent to the
+ * initiator. Make sure that the credits freed by aborting a command are
+ * returned to the initiator the next time a response is sent by incrementing
+ * ch->req_lim_delta.
+ */
 static void srpt_aborted_task(struct se_cmd *cmd)
 {
+	struct srpt_send_ioctx *ioctx = container_of(cmd,
+				struct srpt_send_ioctx, cmd);
+	struct srpt_rdma_ch *ch = ioctx->ch;
+
+	atomic_inc(&ch->req_lim_delta);
 }
 
 static int srpt_queue_status(struct se_cmd *cmd)
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index effb632..4c67cf3 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -148,6 +148,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
 		return error;
 	}
 
+	pdata->input = input;
+	platform_set_drvdata(pdev, pdata);
+
 	error = devm_request_irq(&pdev->dev, pdata->irq,
 			       imx_snvs_pwrkey_interrupt,
 			       0, pdev->name, pdev);
@@ -163,9 +166,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
 		return error;
 	}
 
-	pdata->input = input;
-	platform_set_drvdata(pdev, pdata);
-
 	device_init_wakeup(&pdev->dev, pdata->wakeup);
 
 	return 0;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 628ef61..f9525d6 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1339,21 +1339,46 @@ static const struct acpi_device_id elan_acpi_id[] = {
 	{ "ELAN0600", 0 },
 	{ "ELAN0601", 0 },
 	{ "ELAN0602", 0 },
+	{ "ELAN0603", 0 },
+	{ "ELAN0604", 0 },
 	{ "ELAN0605", 0 },
+	{ "ELAN0606", 0 },
+	{ "ELAN0607", 0 },
 	{ "ELAN0608", 0 },
 	{ "ELAN0609", 0 },
 	{ "ELAN060B", 0 },
 	{ "ELAN060C", 0 },
+	{ "ELAN060F", 0 },
+	{ "ELAN0610", 0 },
 	{ "ELAN0611", 0 },
 	{ "ELAN0612", 0 },
+	{ "ELAN0615", 0 },
+	{ "ELAN0616", 0 },
 	{ "ELAN0617", 0 },
 	{ "ELAN0618", 0 },
+	{ "ELAN0619", 0 },
+	{ "ELAN061A", 0 },
+	{ "ELAN061B", 0 },
 	{ "ELAN061C", 0 },
 	{ "ELAN061D", 0 },
 	{ "ELAN061E", 0 },
+	{ "ELAN061F", 0 },
 	{ "ELAN0620", 0 },
 	{ "ELAN0621", 0 },
 	{ "ELAN0622", 0 },
+	{ "ELAN0623", 0 },
+	{ "ELAN0624", 0 },
+	{ "ELAN0625", 0 },
+	{ "ELAN0626", 0 },
+	{ "ELAN0627", 0 },
+	{ "ELAN0628", 0 },
+	{ "ELAN0629", 0 },
+	{ "ELAN062A", 0 },
+	{ "ELAN062B", 0 },
+	{ "ELAN062C", 0 },
+	{ "ELAN062D", 0 },
+	{ "ELAN0631", 0 },
+	{ "ELAN0632", 0 },
 	{ "ELAN1000", 0 },
 	{ }
 };
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index fc3ab93..7fb358f 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -860,7 +860,7 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
 
 	error = rmi_register_function(fn);
 	if (error)
-		goto err_put_fn;
+		return error;
 
 	if (pdt->function_number == 0x01)
 		data->f01_container = fn;
@@ -870,10 +870,6 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
 	list_add_tail(&fn->node, &data->function_list);
 
 	return RMI_SCAN_CONTINUE;
-
-err_put_fn:
-	put_device(&fn->dev);
-	return error;
 }
 
 void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index df64d6a..93901eb 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
 	}
 
 	rc = f11_write_control_regs(fn, &f11->sens_query,
-			   &f11->dev_controls, fn->fd.query_base_addr);
+			   &f11->dev_controls, fn->fd.control_base_addr);
 	if (rc)
 		dev_warn(&fn->dev, "Failed to write control registers\n");
 
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 482508d..5299eea 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1312,4 +1312,18 @@
 
 source "drivers/input/touchscreen/st/Kconfig"
 
+config TOUCHSCREEN_SYNAPTICS_DSX
+	bool "Synaptics DSX Touchscreen Driver"
+	depends on I2C
+	default y
+	help
+	  Say Y here if you have a Synaptics Touchscreen.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx.
+
+source "drivers/input/touchscreen/synaptics_dsx/Kconfig"
+
 endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 169d2e2..a120f92 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -110,3 +110,4 @@
 obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50)	+= colibri-vf50-ts.o
 obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023)	+= rohm_bu21023.o
 obj-$(CONFIG_TOUCHSCREEN_ST)		+= st/
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX) += synaptics_dsx/
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index c4a14a1..6a3953d 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -36,7 +36,7 @@
 #include <linux/notifier.h>
 #include <linux/fb.h>
 #else
-#include <linux/msm_drm_notify.h>
+#include <drm/drm_panel.h>
 #endif
 
 #ifdef KERNEL_ABOVE_2_6_38
@@ -129,6 +129,8 @@ static int fts_command(struct fts_ts_info *info, unsigned char cmd);
 static int fts_chip_initialization(struct fts_ts_info *info);
 static int fts_enable_reg(struct fts_ts_info *info, bool enable);
 
+static struct drm_panel *active_panel;
+
 void touch_callback(unsigned int status)
 {
 	/* Empty */
@@ -220,7 +222,7 @@ static ssize_t fts_fwupdate_store(struct device *dev,
 	}
 
 	logError(0, "%s Starting flashing procedure...\n", tag);
-	ret = flash_burn(fwD, mode, !mode);
+	ret = flash_burn(&fwD, mode, !mode);
 
 	if (ret < OK && ret != (ERROR_FW_NO_UPDATE | ERROR_FLASH_BURN_FAILED))
 		logError(0, "%s flashProcedure: ERROR %02X\n",
@@ -2232,9 +2234,11 @@ static ssize_t fts_stm_cmd_show(struct device *dev,
 		}
 
 #if defined(CONFIG_FB_MSM)
-	res = fb_unregister_client(&info->notifier);
+		res = fb_unregister_client(&info->notifier);
 #else
-	res = msm_drm_unregister_client(&info->notifier);
+		if (active_panel)
+			res = drm_panel_notifier_unregister(active_panel,
+				&info->notifier);
 #endif
 		if (res < 0) {
 			logError(1, "%s ERROR: unregister notifier failed!\n",
@@ -2437,7 +2441,8 @@ static ssize_t fts_stm_cmd_show(struct device *dev,
 	if (fb_register_client(&info->notifier) < 0)
 		logError(1, "%s ERROR: register notifier failed!\n", tag);
 #else
-	if (msm_drm_register_client(&info->notifier) < 0)
+	if (active_panel &&
+		drm_panel_notifier_register(active_panel, &info->notifier) < 0)
 		logError(1, "%s ERROR: register notifier failed!\n", tag);
 #endif
 
@@ -2445,6 +2450,8 @@ static ssize_t fts_stm_cmd_show(struct device *dev,
 	/*here start the reporting phase,*/
 	/* assembling the data to send in the file node */
 	all_strbuff = kmalloc(size, GFP_KERNEL);
+	if (!all_strbuff)
+		return 0;
 	memset(all_strbuff, 0, size);
 
 	snprintf(buff, sizeof(buff), "%02X", 0xAA);
@@ -3287,23 +3294,39 @@ static void fts_fw_update_auto(struct work_struct *work)
 	struct delayed_work, work);
 	int crc_status = 0;
 	int error = 0;
+	struct Firmware fwD;
+	int orig_size;
+	u8 *orig_data;
 
 	info = container_of(fwu_work, struct fts_ts_info, fwu_work);
 	logError(0, "%s Fw Auto Update is starting...\n", tag);
 
-	fts_chip_powercycle(info);
-	retval = flashProcedure(PATH_FILE_FW, crc_status, 1);
-	if ((retval & ERROR_FILE_NOT_FOUND) == ERROR_FILE_NOT_FOUND ||
-		retval == (ERROR_FW_NO_UPDATE | ERROR_FLASH_BURN_FAILED)) {
-		logError(1, "%s %s: no firmware file or no newer firmware!\n",
-			tag, __func__);
+	ret = getFWdata(PATH_FILE_FW, &orig_data, &orig_size, 0);
+	if (ret < OK) {
+		logError(0, "%s %s: impossible retrieve FW... ERROR %08X\n",
+			tag, __func__, ERROR_MEMH_READ);
+		ret = (ret | ERROR_MEMH_READ);
 		goto NO_FIRMWARE_UPDATE;
-	} else if ((retval & 0xFF000000) == ERROR_FLASH_PROCEDURE) {
+	}
+
+	ret = parseBinFile(orig_data, orig_size, &fwD, 1);
+	if (ret < OK) {
+		logError(1, "%s %s: impossible parse ERROR %08X\n",
+			tag, __func__, ERROR_MEMH_READ);
+		ret = (ret | ERROR_MEMH_READ);
+		kfree(fwD.data);
+		goto NO_FIRMWARE_UPDATE;
+	}
+
+	fts_chip_powercycle(info);
+	retval = flash_burn(&fwD, crc_status, 1);
+
+	if ((retval & 0xFF000000) == ERROR_FLASH_PROCEDURE) {
 		logError(1, "%s %s:firmware update retry! ERROR %08X\n",
 			tag, __func__, retval);
 		fts_chip_powercycle(info);
 
-		retval1 = flashProcedure(PATH_FILE_FW, crc_status, 1);
+		retval1 = flash_burn(&fwD, crc_status, 1);
 
 		if ((retval1 & 0xFF000000) == ERROR_FLASH_PROCEDURE) {
 			logError(1, "%s %s: update failed again! ERROR %08X\n",
@@ -3312,6 +3335,7 @@ static void fts_fw_update_auto(struct work_struct *work)
 		}
 	}
 
+	kfree(fwD.data);
 	u16ToU8_be(SYSTEM_RESET_ADDRESS, &cmd[1]);
 	ret = fts_writeCmd(cmd, 4);
 	if (ret < OK) {
@@ -3709,7 +3733,9 @@ static int fts_init_afterProbe(struct fts_ts_info *info)
 #if defined(CONFIG_FB_MSM)
 	error |= fb_register_client(&info->notifier);
 #else
-	error |= msm_drm_register_client(&info->notifier);
+	if (active_panel)
+		error |= drm_panel_notifier_register(active_panel,
+			&info->notifier);
 #endif
 
 	if (error < OK)
@@ -4025,14 +4051,14 @@ static int fts_chip_power_switch(struct fts_ts_info *info, bool on)
 	}
 
 	if (on) {
-		if (info->pwr_reg) {
+		if (info->bus_reg) {
 			error = regulator_enable(info->bus_reg);
 			if (error < 0)
 				logError(1, "%s %s: Failed to enable AVDD\n",
 					tag, __func__);
 		}
 
-		if (info->bus_reg) {
+		if (info->pwr_reg) {
 			error = regulator_enable(info->pwr_reg);
 			if (error < 0)
 				logError(1, "%s %s: Failed to enable DVDD\n",
@@ -4182,21 +4208,21 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb,
 {
 	struct fts_ts_info *info = container_of(nb, struct fts_ts_info,
 				notifier);
-	struct msm_drm_notifier *evdata = data;
+	struct drm_panel_notifier *evdata = data;
 	unsigned int blank;
 
-	if (!evdata || (evdata->id != 0))
+	if (!evdata)
 		return 0;
 
-	if (val != MSM_DRM_EVENT_BLANK)
+	if (val != DRM_PANEL_EVENT_BLANK)
 		return 0;
+
 	logError(0, "%s %s: fts notifier begin!\n", tag, __func__);
-
-	if (evdata->data && val == MSM_DRM_EVENT_BLANK && info) {
+	if (evdata->data && val == DRM_PANEL_EVENT_BLANK && info) {
 		blank = *(int *) (evdata->data);
 
 		switch (blank) {
-		case MSM_DRM_BLANK_POWERDOWN:
+		case DRM_PANEL_BLANK_POWERDOWN:
 			if (info->sensor_sleep)
 				break;
 
@@ -4211,7 +4237,7 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb,
 				queue_work(info->event_wq, &info->suspend_work);
 			break;
 
-		case MSM_DRM_BLANK_UNBLANK:
+		case DRM_PANEL_BLANK_UNBLANK:
 			if (!info->sensor_sleep)
 				break;
 
@@ -4487,6 +4513,30 @@ static int parse_dt(struct device *dev,
 	return OK;
 }
 
+static int check_dt(struct device_node *np)
+{
+	int i;
+	int count;
+	struct device_node *node;
+	struct drm_panel *panel;
+
+	count = of_count_phandle_with_args(np, "panel", NULL);
+	if (count <= 0)
+		return OK;
+
+	for (i = 0; i < count; i++) {
+		node = of_parse_phandle(np, "panel", i);
+		panel = of_drm_find_panel(node);
+		of_node_put(node);
+		if (!IS_ERR(panel)) {
+			active_panel = panel;
+			return OK;
+		}
+	}
+
+	return -ENODEV;
+}
+
 static int fts_probe(struct i2c_client *client,
 		const struct i2c_device_id *idp)
 {
@@ -4498,16 +4548,17 @@ static int fts_probe(struct i2c_client *client,
 
 	logError(0, "%s %s: driver probe begin!\n", tag, __func__);
 
-	logError(0, "%s SET I2C Functionality and Dev INFO:\n", tag);
-	openChannel(client);
-	logError(0, "%s driver ver. %s (built on)\n", tag, FTS_TS_DRV_VERSION);
+	error = check_dt(dp);
 
-	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+	if (error != OK ||
+		!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
 		logError(1, "%s Unsupported I2C functionality\n", tag);
 		error = -EIO;
 		goto ProbeErrorExit_0;
 	}
 
+	openChannel(client);
+
 	info = kzalloc(sizeof(struct fts_ts_info), GFP_KERNEL);
 	if (!info) {
 		logError(1,
@@ -4904,7 +4955,8 @@ static int fts_remove(struct i2c_client *client)
 #if defined(CONFIG_FB_MSM)
 	fb_unregister_client(&info->notifier);
 #else
-	msm_drm_unregister_client(&info->notifier);
+	if (active_panel)
+		drm_panel_notifier_register(active_panel, &info->notifier);
 #endif
 
 	/* unregister the device */
diff --git a/drivers/input/touchscreen/st/fts_aoi_event.c b/drivers/input/touchscreen/st/fts_aoi_event.c
index 3d67028..df5faaa 100644
--- a/drivers/input/touchscreen/st/fts_aoi_event.c
+++ b/drivers/input/touchscreen/st/fts_aoi_event.c
@@ -48,19 +48,24 @@ ssize_t aoi_set_store(struct device *dev, struct device_attribute *attr,
 	if (ret != 4)
 		return -EINVAL;
 
-	if (left < 0 || left > X_AXIS_MAX || right < 0 || right > X_AXIS_MAX) {
+	if (right > X_AXIS_MAX)
+		right = X_AXIS_MAX;
+	if (bottom > Y_AXIS_MAX)
+		bottom = Y_AXIS_MAX;
+
+	if (left < 0 || left > X_AXIS_MAX || right < 0) {
 		info->aoi_notify_enabled = false;
 		return -EINVAL;
 	}
 
-	if (top < 0 || top > Y_AXIS_MAX || bottom < 0 || bottom > Y_AXIS_MAX) {
+	if (top < 0 || top > Y_AXIS_MAX || bottom < 0) {
 		info->aoi_notify_enabled = false;
 		return -EINVAL;
 	}
 
 	if (left >= right || top >= bottom) {
 		info->aoi_notify_enabled = false;
-		return -EINVAL;
+		return count;
 	}
 
 	info->aoi_left = left;
diff --git a/drivers/input/touchscreen/st/fts_lib/ftsFlash.c b/drivers/input/touchscreen/st/fts_lib/ftsFlash.c
index 7390968..3472532 100644
--- a/drivers/input/touchscreen/st/fts_lib/ftsFlash.c
+++ b/drivers/input/touchscreen/st/fts_lib/ftsFlash.c
@@ -78,6 +78,8 @@
 #define LOAD_FW_FROM 0
 #endif
 
+#define FTS_LATEST_VERSION 0x1101
+
 static char tag[8] = "[ FTS ]\0";
 
 int getFirmwareVersion(u16 *fw_vers, u16 *config_id)
@@ -142,6 +144,10 @@ int getFWdata(const char *pathToFile, u8 **data, int *size, int from)
 #endif
 	default:
 		logError(0, "%s Read FW from BIN file!\n", tag);
+
+		if (ftsInfo.u16_fwVer == FTS_LATEST_VERSION)
+			return ERROR_FW_NO_UPDATE;
+
 		dev = getDev();
 
 		if (dev != NULL) {
@@ -219,7 +225,7 @@ int flashProcedure(const char *path, int force, int keep_cx)
 	logError(0, "%s Fw file read COMPLETED!\n", tag);
 
 	logError(0, "%s Starting flashing procedure...\n", tag);
-	res = flash_burn(fw, force, keep_cx);
+	res = flash_burn(&fw, force, keep_cx);
 	if (res < OK && res != (ERROR_FW_NO_UPDATE | ERROR_FLASH_BURN_FAILED)) {
 		logError(1, "%s %s: ERROR %02X\n",
 			tag, __func__, ERROR_FLASH_PROCEDURE);
@@ -453,13 +459,13 @@ int fillMemory(u32 address, u8 *data, int size)
 	return OK;
 }
 
-int flash_burn(Firmware fw, int force_burn, int keep_cx)
+int flash_burn(Firmware *fw, int force_burn, int keep_cx)
 {
 	u8 cmd;
 	int res;
 
-	if (!force_burn && (ftsInfo.u16_fwVer >= fw.fw_ver)
-		&& (ftsInfo.u16_cfgId >= fw.config_id)) {
+	if (!force_burn && (ftsInfo.u16_fwVer >= fw->fw_ver)
+		&& (ftsInfo.u16_cfgId >= fw->config_id)) {
 		logError(0, "Firmware in the chip newer");
 		logError(0, " or equal to the one to burn! ");
 		logError(0, "%s %s:NO UPDATE ERROR %02X\n",
@@ -498,7 +504,7 @@ int flash_burn(Firmware fw, int force_burn, int keep_cx)
 	//Write the lower part of the Program RAM
 	logError(0, "%s 3) PREPARING DATA FOR FLASH BURN:\n", tag);
 
-	res = fillMemory(FLASH_ADDR_CODE, fw.data, fw.data_size);
+	res = fillMemory(FLASH_ADDR_CODE, fw->data, fw->data_size);
 	if (res < 0) {
 		logError(1, "%s Error During filling the memory!%02X\n",
 			tag, ERROR_FLASH_BURN_FAILED);
@@ -567,15 +573,15 @@ int flash_burn(Firmware fw, int force_burn, int keep_cx)
 		return (res | ERROR_FLASH_BURN_FAILED);
 	}
 
-	if ((ftsInfo.u16_fwVer != fw.fw_ver)
-		&& (ftsInfo.u16_cfgId != fw.config_id)) {
+	if ((ftsInfo.u16_fwVer != fw->fw_ver)
+		&& (ftsInfo.u16_cfgId != fw->config_id)) {
 		logError(1, "Firmware in the chip different");
 		logError(1, " from the one that was burn!");
 		logError(1, "%s fw: %x != %x , conf: %x != %x\n",
 			tag, ftsInfo.u16_fwVer,
-			fw.fw_ver,
+			fw->fw_ver,
 			ftsInfo.u16_cfgId,
-			fw.config_id);
+			fw->config_id);
 		return ERROR_FLASH_BURN_FAILED;
 	}
 
@@ -1015,14 +1021,14 @@ int fillFlash(u32 address, u8 *data, int size)
 	return OK;
 }
 
-int flash_burn(struct Firmware fw, int force_burn, int keep_cx)
+int flash_burn(struct Firmware *fw, int force_burn, int keep_cx)
 {
 	int res;
 
-	if (!force_burn && (ftsInfo.u16_fwVer >= fw.fw_ver)
-		&& (ftsInfo.u16_cfgId >= fw.config_id)) {
+	if (!force_burn && (ftsInfo.u16_fwVer >= fw->fw_ver)
+		&& (ftsInfo.u16_cfgId >= fw->config_id)) {
 		for (res = EXTERNAL_RELEASE_INFO_SIZE-1; res >= 0; res--) {
-			if (fw.externalRelease[res] >
+			if (fw->externalRelease[res] >
 				ftsInfo.u8_extReleaseInfo[res])
 				goto start;
 		}
@@ -1045,7 +1051,7 @@ int flash_burn(struct Firmware fw, int force_burn, int keep_cx)
 	if (res < OK) {
 		logError(1, "%s warm boot FAILED!\n", tag);
 		return (res | ERROR_FLASH_BURN_FAILED);
-	} /*else*/
+	}
 	logError(0, "%s warm boot COMPLETED!\n\n", tag);
 
 	//mdelay(FLASH_WAIT_TIME);
@@ -1084,8 +1090,8 @@ int flash_burn(struct Firmware fw, int force_burn, int keep_cx)
 
 	//mdelay(FLASH_WAIT_TIME);
 	logError(0, "%s 6) LOAD PROGRAM:\n", tag);
-	res = fillFlash(FLASH_ADDR_CODE, &fw.data[0],
-					fw.sec0_size);
+	res = fillFlash(FLASH_ADDR_CODE, (u8 *)(&fw->data[0]),
+					fw->sec0_size);
 	if (res < OK) {
 		logError(1, "%s   load program ERROR %02X\n",
 			tag, ERROR_FLASH_BURN_FAILED);
@@ -1094,7 +1100,7 @@ int flash_burn(struct Firmware fw, int force_burn, int keep_cx)
 	logError(0, "%s   load program DONE!\n", tag);
 	logError(0, "%s 7) LOAD CONFIG:\n", tag);
 	res = fillFlash(FLASH_ADDR_CONFIG,
-		&(fw.data[fw.sec0_size]), fw.sec1_size);
+		&(fw->data[fw->sec0_size]), fw->sec1_size);
 	if (res < OK) {
 		logError(1, "%s   load config ERROR %02X\n",
 			tag, ERROR_FLASH_BURN_FAILED);
@@ -1122,12 +1128,12 @@ int flash_burn(struct Firmware fw, int force_burn, int keep_cx)
 		return (res | ERROR_FLASH_BURN_FAILED);
 	}
 
-	if ((ftsInfo.u16_fwVer != fw.fw_ver)
-		&& (ftsInfo.u16_cfgId != fw.config_id)) {
+	if ((ftsInfo.u16_fwVer != fw->fw_ver)
+		&& (ftsInfo.u16_cfgId != fw->config_id)) {
 		pr_err("Firmware is different from the old!\n");
 		logError(1, "%s fw: %x != %x, conf: %x != %x\n",
-			tag, ftsInfo.u16_fwVer, fw.fw_ver,
-			ftsInfo.u16_cfgId, fw.config_id);
+			tag, ftsInfo.u16_fwVer, fw->fw_ver,
+			ftsInfo.u16_cfgId, fw->config_id);
 		return ERROR_FLASH_BURN_FAILED;
 	}
 
diff --git a/drivers/input/touchscreen/st/fts_lib/ftsFlash.h b/drivers/input/touchscreen/st/fts_lib/ftsFlash.h
index f23f07b..e712fe5 100644
--- a/drivers/input/touchscreen/st/fts_lib/ftsFlash.h
+++ b/drivers/input/touchscreen/st/fts_lib/ftsFlash.h
@@ -102,7 +102,7 @@ int getFirmwareVersion(u16 *fw_vers, u16 *config_id);
 int getFWdata(const char *pathToFile, u8 **data, int *size, int from);
 int parseBinFile(u8 *fw_data, int fw_size, struct Firmware *fw, int keep_cx);
 int readFwFile(const char *path, struct Firmware *fw, int keep_cx);
-int flash_burn(struct Firmware fw, int force_burn, int keep_cx);
+int flash_burn(struct Firmware *fw, int force_burn, int keep_cx);
 int flashProcedure(const char *path, int force, int keep_cx);
 
 #endif
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index 704e990..b6f95f2 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -106,27 +106,29 @@ struct stmfts_data {
 	bool running;
 };
 
-static void stmfts_brightness_set(struct led_classdev *led_cdev,
+static int stmfts_brightness_set(struct led_classdev *led_cdev,
 					enum led_brightness value)
 {
 	struct stmfts_data *sdata = container_of(led_cdev,
 					struct stmfts_data, led_cdev);
 	int err;
 
-	if (value == sdata->led_status || !sdata->ledvdd)
-		return;
-
-	if (!value) {
-		regulator_disable(sdata->ledvdd);
-	} else {
-		err = regulator_enable(sdata->ledvdd);
-		if (err)
-			dev_warn(&sdata->client->dev,
-				 "failed to disable ledvdd regulator: %d\n",
-				 err);
+	if (value != sdata->led_status && sdata->ledvdd) {
+		if (!value) {
+			regulator_disable(sdata->ledvdd);
+		} else {
+			err = regulator_enable(sdata->ledvdd);
+			if (err) {
+				dev_warn(&sdata->client->dev,
+					 "failed to disable ledvdd regulator: %d\n",
+					 err);
+				return err;
+			}
+		}
+		sdata->led_status = value;
 	}
 
-	sdata->led_status = value;
+	return 0;
 }
 
 static enum led_brightness stmfts_brightness_get(struct led_classdev *led_cdev)
@@ -608,7 +610,7 @@ static int stmfts_enable_led(struct stmfts_data *sdata)
 	sdata->led_cdev.name = STMFTS_DEV_NAME;
 	sdata->led_cdev.max_brightness = LED_ON;
 	sdata->led_cdev.brightness = LED_OFF;
-	sdata->led_cdev.brightness_set = stmfts_brightness_set;
+	sdata->led_cdev.brightness_set_blocking = stmfts_brightness_set;
 	sdata->led_cdev.brightness_get = stmfts_brightness_get;
 
 	err = devm_led_classdev_register(&sdata->client->dev, &sdata->led_cdev);
diff --git a/drivers/input/touchscreen/synaptics_dsx/Kconfig b/drivers/input/touchscreen/synaptics_dsx/Kconfig
new file mode 100755
index 0000000..cc2d34e
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/Kconfig
@@ -0,0 +1,142 @@
+#
+# Synaptics DSX touchscreen driver configuration
+#
+menuconfig TOUCHSCREEN_SYNAPTICS_DSX
+	bool "Synaptics DSX touchscreen"
+	help
+	  Say Y here if you have a Synaptics DSX touchscreen connected
+	  to your system.
+
+	  If unsure, say N.
+
+if TOUCHSCREEN_SYNAPTICS_DSX
+
+choice
+	default TOUCHSCREEN_SYNAPTICS_DSX_I2C
+	prompt "Synaptics DSX bus interface"
+config TOUCHSCREEN_SYNAPTICS_DSX_I2C
+	bool "RMI over I2C"
+	depends on I2C
+config TOUCHSCREEN_SYNAPTICS_DSX_SPI
+	bool "RMI over SPI"
+	depends on SPI_MASTER
+config TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C
+	bool "HID over I2C"
+	depends on I2C
+endchoice
+
+config TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	tristate "Synaptics DSX core driver module"
+	depends on I2C || SPI_MASTER
+	default y
+	help
+	  Say Y here to enable basic touch reporting functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_core.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV
+	tristate "Synaptics DSX RMI device module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	default y
+	help
+	  Say Y here to enable support for direct RMI register access.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_rmi_dev.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE
+	tristate "Synaptics DSX firmware update module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	default y
+	help
+	  Say Y here to enable support for doing firmware update.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_fw_update.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+	bool "Synaptics DSX firmware update sysfs attributes"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE
+	default y
+	help
+	  Say Y here to enable support for sysfs attributes for
+	  performing firmware update in a development environment.
+	  This does not affect the core or other subsystem attributes.
+
+	  If unsure, say N.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING
+	tristate "Synaptics DSX test reporting module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	default y
+	help
+	  Say Y here to enable support for retrieving production test reports.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_test_reporting.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_PROXIMITY
+	tristate "Synaptics DSX proximity module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for proximity functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_proximity.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_ACTIVE_PEN
+	tristate "Synaptics DSX active pen module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for active pen functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_active_pen.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_GESTURE
+	tristate "Synaptics DSX user defined gesture module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for user defined gesture functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_gesture.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_VIDEO
+	tristate "Synaptics DSX video module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for video communication functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_video.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_DEBUG
+	tristate "Synaptics DSX debug module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for firmware debug functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_debug.
+
+endif
diff --git a/drivers/input/touchscreen/synaptics_dsx/Makefile b/drivers/input/touchscreen/synaptics_dsx/Makefile
new file mode 100755
index 0000000..191dcdc
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the Synaptics DSX touchscreen driver.
+#
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_I2C) += synaptics_dsx_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_SPI) += synaptics_dsx_spi.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C) += synaptics_dsx_rmi_hid_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE) += synaptics_dsx_core.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV) += synaptics_dsx_rmi_dev.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE) += synaptics_dsx_fw_update.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING) += synaptics_dsx_test_reporting.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_PROXIMITY) += synaptics_dsx_proximity.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_ACTIVE_PEN) += synaptics_dsx_active_pen.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_GESTURE) += synaptics_dsx_gesture.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_VIDEO) += synaptics_dsx_video.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_DEBUG) += synaptics_dsx_debug.o
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_active_pen.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_active_pen.c
new file mode 100755
index 0000000..64bc941
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_active_pen.c
@@ -0,0 +1,606 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define APEN_PHYS_NAME "synaptics_dsx/active_pen"
+
+#define ACTIVE_PEN_MAX_PRESSURE_16BIT 65535
+#define ACTIVE_PEN_MAX_PRESSURE_8BIT 255
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+		};
+		unsigned char data[2];
+	};
+};
+
+struct apen_data_8b_pressure {
+	union {
+		struct {
+			unsigned char status_pen:1;
+			unsigned char status_invert:1;
+			unsigned char status_barrel:1;
+			unsigned char status_reserved:5;
+			unsigned char x_lsb;
+			unsigned char x_msb;
+			unsigned char y_lsb;
+			unsigned char y_msb;
+			unsigned char pressure_msb;
+			unsigned char battery_state;
+			unsigned char pen_id_0_7;
+			unsigned char pen_id_8_15;
+			unsigned char pen_id_16_23;
+			unsigned char pen_id_24_31;
+		} __packed;
+		unsigned char data[11];
+	};
+};
+
+struct apen_data {
+	union {
+		struct {
+			unsigned char status_pen:1;
+			unsigned char status_invert:1;
+			unsigned char status_barrel:1;
+			unsigned char status_reserved:5;
+			unsigned char x_lsb;
+			unsigned char x_msb;
+			unsigned char y_lsb;
+			unsigned char y_msb;
+			unsigned char pressure_lsb;
+			unsigned char pressure_msb;
+			unsigned char battery_state;
+			unsigned char pen_id_0_7;
+			unsigned char pen_id_8_15;
+			unsigned char pen_id_16_23;
+			unsigned char pen_id_24_31;
+		} __packed;
+		unsigned char data[12];
+	};
+};
+
+struct synaptics_rmi4_apen_handle {
+	bool apen_present;
+	unsigned char intr_mask;
+	unsigned char battery_state;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short apen_data_addr;
+	unsigned short max_pressure;
+	unsigned int pen_id;
+	struct input_dev *apen_dev;
+	struct apen_data *apen_data;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_apen_handle *apen;
+
+DECLARE_COMPLETION(apen_remove_complete);
+
+static void apen_lift(void)
+{
+	input_report_key(apen->apen_dev, BTN_TOUCH, 0);
+	input_report_key(apen->apen_dev, BTN_TOOL_PEN, 0);
+	input_report_key(apen->apen_dev, BTN_TOOL_RUBBER, 0);
+	input_sync(apen->apen_dev);
+	apen->apen_present = false;
+}
+
+static void apen_report(void)
+{
+	int retval;
+	int x;
+	int y;
+	int pressure;
+	static int invert = -1;
+	struct apen_data_8b_pressure *apen_data_8b;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->apen_data_addr,
+			apen->apen_data->data,
+			sizeof(apen->apen_data->data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read active pen data\n",
+				__func__);
+		return;
+	}
+
+	if (apen->apen_data->status_pen == 0) {
+		if (apen->apen_present)
+			apen_lift();
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: No active pen data\n",
+				__func__);
+
+		return;
+	}
+
+	x = (apen->apen_data->x_msb << 8) | (apen->apen_data->x_lsb);
+	y = (apen->apen_data->y_msb << 8) | (apen->apen_data->y_lsb);
+
+	if ((x == -1) && (y == -1)) {
+		if (apen->apen_present)
+			apen_lift();
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Active pen in range but no valid x & y\n",
+				__func__);
+
+		return;
+	}
+
+	if (!apen->apen_present)
+		invert = -1;
+
+	if (invert != -1 && invert != apen->apen_data->status_invert)
+		apen_lift();
+
+	invert = apen->apen_data->status_invert;
+
+	if (apen->max_pressure == ACTIVE_PEN_MAX_PRESSURE_16BIT) {
+		pressure = (apen->apen_data->pressure_msb << 8) |
+				apen->apen_data->pressure_lsb;
+		apen->battery_state = apen->apen_data->battery_state;
+		apen->pen_id = (apen->apen_data->pen_id_24_31 << 24) |
+				(apen->apen_data->pen_id_16_23 << 16) |
+				(apen->apen_data->pen_id_8_15 << 8) |
+				apen->apen_data->pen_id_0_7;
+	} else {
+		apen_data_8b = (struct apen_data_8b_pressure *)apen->apen_data;
+		pressure = apen_data_8b->pressure_msb;
+		apen->battery_state = apen_data_8b->battery_state;
+		apen->pen_id = (apen_data_8b->pen_id_24_31 << 24) |
+				(apen_data_8b->pen_id_16_23 << 16) |
+				(apen_data_8b->pen_id_8_15 << 8) |
+				apen_data_8b->pen_id_0_7;
+	}
+
+	input_report_key(apen->apen_dev, BTN_TOUCH, pressure > 0 ? 1 : 0);
+	input_report_key(apen->apen_dev,
+			apen->apen_data->status_invert > 0 ?
+			BTN_TOOL_RUBBER : BTN_TOOL_PEN, 1);
+	input_report_key(apen->apen_dev,
+			BTN_STYLUS, apen->apen_data->status_barrel > 0 ?
+			1 : 0);
+	input_report_abs(apen->apen_dev, ABS_X, x);
+	input_report_abs(apen->apen_dev, ABS_Y, y);
+	input_report_abs(apen->apen_dev, ABS_PRESSURE, pressure);
+
+	input_sync(apen->apen_dev);
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Active pen: status = %d, invert = %d, barrel = %d, x = %d, y = %d, pressure = %d\n",
+			__func__,
+			apen->apen_data->status_pen,
+			apen->apen_data->status_invert,
+			apen->apen_data->status_barrel,
+			x, y, pressure);
+
+	apen->apen_present = true;
+}
+
+static void apen_set_params(void)
+{
+	input_set_abs_params(apen->apen_dev, ABS_X, 0,
+			apen->rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(apen->apen_dev, ABS_Y, 0,
+			apen->rmi4_data->sensor_max_y, 0, 0);
+	input_set_abs_params(apen->apen_dev, ABS_PRESSURE, 0,
+			apen->max_pressure, 0, 0);
+}
+
+static int apen_pressure(struct synaptics_rmi4_f12_query_8 *query_8)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char data_reg_presence;
+	unsigned char size_of_query_9;
+	unsigned char *query_9;
+	unsigned char *data_desc;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	data_reg_presence = query_8->data[1];
+
+	size_of_query_9 = query_8->size_of_query9;
+	query_9 = kmalloc(size_of_query_9, GFP_KERNEL);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->query_base_addr + 9,
+			query_9,
+			size_of_query_9);
+	if (retval < 0)
+		goto exit;
+
+	data_desc = query_9;
+
+	for (ii = 0; ii < 6; ii++) {
+		if (!(data_reg_presence & (1 << ii)))
+			continue; /* The data register is not present */
+		data_desc++; /* Jump over the size entry */
+		while (*data_desc & (1 << 7))
+			data_desc++;
+		data_desc++; /* Go to the next descriptor */
+	}
+
+	data_desc++; /* Jump over the size entry */
+	/* Check for the presence of subpackets 1 and 2 */
+	if ((*data_desc & (3 << 1)) == (3 << 1))
+		apen->max_pressure = ACTIVE_PEN_MAX_PRESSURE_16BIT;
+	else
+		apen->max_pressure = ACTIVE_PEN_MAX_PRESSURE_8BIT;
+
+exit:
+	kfree(query_9);
+
+	return retval;
+}
+
+static int apen_reg_init(void)
+{
+	int retval;
+	unsigned char data_offset;
+	unsigned char size_of_query8;
+	struct synaptics_rmi4_f12_query_8 query_8;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->query_base_addr + 7,
+			&size_of_query8,
+			sizeof(size_of_query8));
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->query_base_addr + 8,
+			query_8.data,
+			sizeof(query_8.data));
+	if (retval < 0)
+		return retval;
+
+	if ((size_of_query8 >= 2) && (query_8.data6_is_present)) {
+		data_offset = query_8.data0_is_present +
+				query_8.data1_is_present +
+				query_8.data2_is_present +
+				query_8.data3_is_present +
+				query_8.data4_is_present +
+				query_8.data5_is_present;
+		apen->apen_data_addr = apen->data_base_addr + data_offset;
+		retval = apen_pressure(&query_8);
+		if (retval < 0)
+			return retval;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Active pen support unavailable\n",
+				__func__);
+		retval = -ENODEV;
+	}
+
+	return retval;
+}
+
+static int apen_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char page;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	struct synaptics_rmi4_fn_desc fd;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&fd,
+					sizeof(fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (fd.fn_number) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Found F%02x\n",
+						__func__, fd.fn_number);
+				switch (fd.fn_number) {
+				case SYNAPTICS_RMI4_F12:
+					goto f12_found;
+				}
+			} else {
+				break;
+			}
+
+			intr_count += fd.intr_src_count;
+		}
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to find F12\n",
+			__func__);
+	return -EINVAL;
+
+f12_found:
+	apen->query_base_addr = fd.query_base_addr | (page << 8);
+	apen->control_base_addr = fd.ctrl_base_addr | (page << 8);
+	apen->data_base_addr = fd.data_base_addr | (page << 8);
+	apen->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+	retval = apen_reg_init();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize active pen registers\n",
+				__func__);
+		return retval;
+	}
+
+	apen->intr_mask = 0;
+	intr_src = fd.intr_src_count;
+	intr_off = intr_count % 8;
+	for (ii = intr_off;
+			ii < (intr_src + intr_off);
+			ii++) {
+		apen->intr_mask |= 1 << ii;
+	}
+
+	rmi4_data->intr_mask[0] |= apen->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&(rmi4_data->intr_mask[0]),
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_apen_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!apen)
+		return;
+
+	if (apen->intr_mask & intr_mask)
+		apen_report();
+
+	return;
+}
+
+static int synaptics_rmi4_apen_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (apen) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	apen = kzalloc(sizeof(*apen), GFP_KERNEL);
+	if (!apen) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for apen\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	apen->apen_data = kzalloc(sizeof(*(apen->apen_data)), GFP_KERNEL);
+	if (!apen->apen_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for apen_data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_apen;
+	}
+
+	apen->rmi4_data = rmi4_data;
+
+	retval = apen_scan_pdt();
+	if (retval < 0)
+		goto exit_free_apen_data;
+
+	apen->apen_dev = input_allocate_device();
+	if (apen->apen_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate active pen device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_apen_data;
+	}
+
+	apen->apen_dev->name = ACTIVE_PEN_DRIVER_NAME;
+	apen->apen_dev->phys = APEN_PHYS_NAME;
+	apen->apen_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	apen->apen_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	apen->apen_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(apen->apen_dev, rmi4_data);
+
+	set_bit(EV_KEY, apen->apen_dev->evbit);
+	set_bit(EV_ABS, apen->apen_dev->evbit);
+	set_bit(BTN_TOUCH, apen->apen_dev->keybit);
+	set_bit(BTN_TOOL_PEN, apen->apen_dev->keybit);
+	set_bit(BTN_TOOL_RUBBER, apen->apen_dev->keybit);
+	set_bit(BTN_STYLUS, apen->apen_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, apen->apen_dev->propbit);
+#endif
+
+	apen_set_params();
+
+	retval = input_register_device(apen->apen_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register active pen device\n",
+				__func__);
+		goto exit_free_input_device;
+	}
+
+	return 0;
+
+exit_free_input_device:
+	input_free_device(apen->apen_dev);
+
+exit_free_apen_data:
+	kfree(apen->apen_data);
+
+exit_free_apen:
+	kfree(apen);
+	apen = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_apen_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		goto exit;
+
+	input_unregister_device(apen->apen_dev);
+	kfree(apen->apen_data);
+	kfree(apen);
+	apen = NULL;
+
+exit:
+	complete(&apen_remove_complete);
+}
+
+static void synaptics_rmi4_apen_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen) {
+		synaptics_rmi4_apen_init(rmi4_data);
+		return;
+	}
+
+	apen_lift();
+
+	apen_scan_pdt();
+}
+
+static void synaptics_rmi4_apen_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		return;
+
+	apen_lift();
+}
+
+static void synaptics_rmi4_apen_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		return;
+
+	apen_lift();
+}
+
+static void synaptics_rmi4_apen_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		return;
+
+	apen_lift();
+}
+
+static struct synaptics_rmi4_exp_fn active_pen_module = {
+	.fn_type = RMI_ACTIVE_PEN,
+	.init = synaptics_rmi4_apen_init,
+	.remove = synaptics_rmi4_apen_remove,
+	.reset = synaptics_rmi4_apen_reset,
+	.reinit = synaptics_rmi4_apen_reinit,
+	.early_suspend = synaptics_rmi4_apen_e_suspend,
+	.suspend = synaptics_rmi4_apen_suspend,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_apen_attn,
+};
+
+static int __init rmi4_active_pen_module_init(void)
+{
+	synaptics_rmi4_new_function(&active_pen_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_active_pen_module_exit(void)
+{
+	synaptics_rmi4_new_function(&active_pen_module, false);
+
+	wait_for_completion(&apen_remove_complete);
+}
+
+module_init(rmi4_active_pen_module_init);
+module_exit(rmi4_active_pen_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Active Pen Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
new file mode 100755
index 0000000..3790fa1
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
@@ -0,0 +1,4951 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+#ifdef KERNEL_ABOVE_2_6_38
+#include <linux/input/mt.h>
+#endif
+
+#include <linux/completion.h>
+
+#define INPUT_PHYS_NAME "synaptics_dsx/touch_input"
+#define STYLUS_PHYS_NAME "synaptics_dsx/stylus"
+
+#define VIRTUAL_KEY_MAP_FILE_NAME "virtualkeys." PLATFORM_DRIVER_NAME
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define TYPE_B_PROTOCOL
+#endif
+
+/*
+#define USE_DATA_SERVER
+*/
+
+#define WAKEUP_GESTURE false
+
+#define NO_0D_WHILE_2D
+#define REPORT_2D_Z
+#define REPORT_2D_W
+/*
+#define REPORT_2D_PRESSURE
+*/
+
+#define F12_DATA_15_WORKAROUND
+
+#define IGNORE_FN_INIT_FAILURE
+#define FB_READY_RESET
+#define FB_READY_WAIT_MS 100
+#define FB_READY_TIMEOUT_S 30
+#ifdef SYNA_TDDI
+#define TDDI_LPWG_WAIT_US 10
+#endif
+#define RPT_TYPE (1 << 0)
+#define RPT_X_LSB (1 << 1)
+#define RPT_X_MSB (1 << 2)
+#define RPT_Y_LSB (1 << 3)
+#define RPT_Y_MSB (1 << 4)
+#define RPT_Z (1 << 5)
+#define RPT_WX (1 << 6)
+#define RPT_WY (1 << 7)
+#define RPT_DEFAULT (RPT_TYPE | RPT_X_LSB | RPT_X_MSB | RPT_Y_LSB | RPT_Y_MSB)
+
+#define REBUILD_WORK_DELAY_MS 500 /* ms */
+
+#define EXP_FN_WORK_DELAY_MS 500 /* ms */
+#define MAX_F11_TOUCH_WIDTH 15
+#define MAX_F12_TOUCH_WIDTH 255
+
+#define CHECK_STATUS_TIMEOUT_MS 100
+
+#define F01_STD_QUERY_LEN 21
+#define F01_BUID_ID_OFFSET 18
+
+#define STATUS_NO_ERROR 0x00
+#define STATUS_RESET_OCCURRED 0x01
+#define STATUS_INVALID_CONFIG 0x02
+#define STATUS_DEVICE_FAILURE 0x03
+#define STATUS_CONFIG_CRC_FAILURE 0x04
+#define STATUS_FIRMWARE_CRC_FAILURE 0x05
+#define STATUS_CRC_IN_PROGRESS 0x06
+
+#define NORMAL_OPERATION (0 << 0)
+#define SENSOR_SLEEP (1 << 0)
+#define NO_SLEEP_OFF (0 << 2)
+#define NO_SLEEP_ON (1 << 2)
+#define CONFIGURED (1 << 7)
+
+#define F11_CONTINUOUS_MODE 0x00
+#define F11_WAKEUP_GESTURE_MODE 0x04
+#define F12_CONTINUOUS_MODE 0x00
+#define F12_WAKEUP_GESTURE_MODE 0x02
+#define F12_UDG_DETECT 0x0f
+
+static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
+		bool *was_in_bl_mode);
+static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data);
+static int synaptics_rmi4_reinit_device(struct synaptics_rmi4_data *rmi4_data);
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
+		bool rebuild);
+static int synaptics_rmi4_dsi_panel_notifier_cb(struct notifier_block *self,
+		unsigned long event, void *data);
+struct drm_panel *active_panel;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#ifndef CONFIG_FB
+#define USE_EARLYSUSPEND
+#endif
+#endif
+
+#ifdef USE_EARLYSUSPEND
+static int synaptics_rmi4_early_suspend(struct early_suspend *h);
+
+static int synaptics_rmi4_late_resume(struct early_suspend *h);
+#endif
+
+static int synaptics_rmi4_suspend(struct device *dev);
+
+static int synaptics_rmi4_resume(struct device *dev);
+
+static void synaptics_rmi4_defer_probe(struct work_struct *work);
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_suspend_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_wake_gesture_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_wake_gesture_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+#ifdef USE_DATA_SERVER
+static ssize_t synaptics_rmi4_synad_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+#endif
+
+static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf);
+
+struct synaptics_rmi4_f01_device_status {
+	union {
+		struct {
+			unsigned char status_code:4;
+			unsigned char reserved:2;
+			unsigned char flash_prog:1;
+			unsigned char unconfigured:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_query_0_5 {
+	union {
+		struct {
+			/* query 0 */
+			unsigned char f11_query0_b0__2:3;
+			unsigned char has_query_9:1;
+			unsigned char has_query_11:1;
+			unsigned char has_query_12:1;
+			unsigned char has_query_27:1;
+			unsigned char has_query_28:1;
+
+			/* query 1 */
+			unsigned char num_of_fingers:3;
+			unsigned char has_rel:1;
+			unsigned char has_abs:1;
+			unsigned char has_gestures:1;
+			unsigned char has_sensitibity_adjust:1;
+			unsigned char f11_query1_b7:1;
+
+			/* query 2 */
+			unsigned char num_of_x_electrodes;
+
+			/* query 3 */
+			unsigned char num_of_y_electrodes;
+
+			/* query 4 */
+			unsigned char max_electrodes:7;
+			unsigned char f11_query4_b7:1;
+
+			/* query 5 */
+			unsigned char abs_data_size:2;
+			unsigned char has_anchored_finger:1;
+			unsigned char has_adj_hyst:1;
+			unsigned char has_dribble:1;
+			unsigned char has_bending_correction:1;
+			unsigned char has_large_object_suppression:1;
+			unsigned char has_jitter_filter:1;
+		} __packed;
+		unsigned char data[6];
+	};
+};
+
+struct synaptics_rmi4_f11_query_7_8 {
+	union {
+		struct {
+			/* query 7 */
+			unsigned char has_single_tap:1;
+			unsigned char has_tap_and_hold:1;
+			unsigned char has_double_tap:1;
+			unsigned char has_early_tap:1;
+			unsigned char has_flick:1;
+			unsigned char has_press:1;
+			unsigned char has_pinch:1;
+			unsigned char has_chiral_scroll:1;
+
+			/* query 8 */
+			unsigned char has_palm_detect:1;
+			unsigned char has_rotate:1;
+			unsigned char has_touch_shapes:1;
+			unsigned char has_scroll_zones:1;
+			unsigned char individual_scroll_zones:1;
+			unsigned char has_multi_finger_scroll:1;
+			unsigned char has_multi_finger_scroll_edge_motion:1;
+			unsigned char has_multi_finger_scroll_inertia:1;
+		} __packed;
+		unsigned char data[2];
+	};
+};
+
+struct synaptics_rmi4_f11_query_9 {
+	union {
+		struct {
+			unsigned char has_pen:1;
+			unsigned char has_proximity:1;
+			unsigned char has_large_object_sensitivity:1;
+			unsigned char has_suppress_on_large_object_detect:1;
+			unsigned char has_two_pen_thresholds:1;
+			unsigned char has_contact_geometry:1;
+			unsigned char has_pen_hover_discrimination:1;
+			unsigned char has_pen_hover_and_edge_filters:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_query_12 {
+	union {
+		struct {
+			unsigned char has_small_object_detection:1;
+			unsigned char has_small_object_detection_tuning:1;
+			unsigned char has_8bit_w:1;
+			unsigned char has_2d_adjustable_mapping:1;
+			unsigned char has_general_information_2:1;
+			unsigned char has_physical_properties:1;
+			unsigned char has_finger_limit:1;
+			unsigned char has_linear_cofficient_2:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_query_27 {
+	union {
+		struct {
+			unsigned char f11_query27_b0:1;
+			unsigned char has_pen_position_correction:1;
+			unsigned char has_pen_jitter_filter_coefficient:1;
+			unsigned char has_group_decomposition:1;
+			unsigned char has_wakeup_gesture:1;
+			unsigned char has_small_finger_correction:1;
+			unsigned char has_data_37:1;
+			unsigned char f11_query27_b7:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_ctrl_6_9 {
+	union {
+		struct {
+			unsigned char sensor_max_x_pos_7_0;
+			unsigned char sensor_max_x_pos_11_8:4;
+			unsigned char f11_ctrl7_b4__7:4;
+			unsigned char sensor_max_y_pos_7_0;
+			unsigned char sensor_max_y_pos_11_8:4;
+			unsigned char f11_ctrl9_b4__7:4;
+		} __packed;
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f11_data_1_5 {
+	union {
+		struct {
+			unsigned char x_position_11_4;
+			unsigned char y_position_11_4;
+			unsigned char x_position_3_0:4;
+			unsigned char y_position_3_0:4;
+			unsigned char wx:4;
+			unsigned char wy:4;
+			unsigned char z;
+		} __packed;
+		unsigned char data[5];
+	};
+};
+
+struct synaptics_rmi4_f12_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl24_is_present:1;
+				unsigned char ctrl25_is_present:1;
+				unsigned char ctrl26_is_present:1;
+				unsigned char ctrl27_is_present:1;
+				unsigned char ctrl28_is_present:1;
+				unsigned char ctrl29_is_present:1;
+				unsigned char ctrl30_is_present:1;
+				unsigned char ctrl31_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl32_is_present:1;
+				unsigned char ctrl33_is_present:1;
+				unsigned char ctrl34_is_present:1;
+				unsigned char ctrl35_is_present:1;
+				unsigned char ctrl36_is_present:1;
+				unsigned char ctrl37_is_present:1;
+				unsigned char ctrl38_is_present:1;
+				unsigned char ctrl39_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl40_is_present:1;
+				unsigned char ctrl41_is_present:1;
+				unsigned char ctrl42_is_present:1;
+				unsigned char ctrl43_is_present:1;
+				unsigned char ctrl44_is_present:1;
+				unsigned char ctrl45_is_present:1;
+				unsigned char ctrl46_is_present:1;
+				unsigned char ctrl47_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl48_is_present:1;
+				unsigned char ctrl49_is_present:1;
+				unsigned char ctrl50_is_present:1;
+				unsigned char ctrl51_is_present:1;
+				unsigned char ctrl52_is_present:1;
+				unsigned char ctrl53_is_present:1;
+				unsigned char ctrl54_is_present:1;
+				unsigned char ctrl55_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl56_is_present:1;
+				unsigned char ctrl57_is_present:1;
+				unsigned char ctrl58_is_present:1;
+				unsigned char ctrl59_is_present:1;
+				unsigned char ctrl60_is_present:1;
+				unsigned char ctrl61_is_present:1;
+				unsigned char ctrl62_is_present:1;
+				unsigned char ctrl63_is_present:1;
+			} __packed;
+		};
+		unsigned char data[9];
+	};
+};
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data8_is_present:1;
+				unsigned char data9_is_present:1;
+				unsigned char data10_is_present:1;
+				unsigned char data11_is_present:1;
+				unsigned char data12_is_present:1;
+				unsigned char data13_is_present:1;
+				unsigned char data14_is_present:1;
+				unsigned char data15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data16_is_present:1;
+				unsigned char data17_is_present:1;
+				unsigned char data18_is_present:1;
+				unsigned char data19_is_present:1;
+				unsigned char data20_is_present:1;
+				unsigned char data21_is_present:1;
+				unsigned char data22_is_present:1;
+				unsigned char data23_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data24_is_present:1;
+				unsigned char data25_is_present:1;
+				unsigned char data26_is_present:1;
+				unsigned char data27_is_present:1;
+				unsigned char data28_is_present:1;
+				unsigned char data29_is_present:1;
+				unsigned char data30_is_present:1;
+				unsigned char data31_is_present:1;
+			} __packed;
+		};
+		unsigned char data[5];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_8 {
+	union {
+		struct {
+			unsigned char max_x_coord_lsb;
+			unsigned char max_x_coord_msb;
+			unsigned char max_y_coord_lsb;
+			unsigned char max_y_coord_msb;
+			unsigned char rx_pitch_lsb;
+			unsigned char rx_pitch_msb;
+			unsigned char tx_pitch_lsb;
+			unsigned char tx_pitch_msb;
+			unsigned char low_rx_clip;
+			unsigned char high_rx_clip;
+			unsigned char low_tx_clip;
+			unsigned char high_tx_clip;
+			unsigned char num_of_rx;
+			unsigned char num_of_tx;
+		};
+		unsigned char data[14];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_23 {
+	union {
+		struct {
+			unsigned char finger_enable:1;
+			unsigned char active_stylus_enable:1;
+			unsigned char palm_enable:1;
+			unsigned char unclassified_object_enable:1;
+			unsigned char hovering_finger_enable:1;
+			unsigned char gloved_finger_enable:1;
+			unsigned char f12_ctr23_00_b6__7:2;
+			unsigned char max_reported_objects;
+			unsigned char f12_ctr23_02_b0:1;
+			unsigned char report_active_stylus_as_finger:1;
+			unsigned char report_palm_as_finger:1;
+			unsigned char report_unclassified_object_as_finger:1;
+			unsigned char report_hovering_finger_as_finger:1;
+			unsigned char report_gloved_finger_as_finger:1;
+			unsigned char report_narrow_object_swipe_as_finger:1;
+			unsigned char report_handedge_as_finger:1;
+			unsigned char cover_enable:1;
+			unsigned char stylus_enable:1;
+			unsigned char eraser_enable:1;
+			unsigned char small_object_enable:1;
+			unsigned char f12_ctr23_03_b4__7:4;
+			unsigned char report_cover_as_finger:1;
+			unsigned char report_stylus_as_finger:1;
+			unsigned char report_eraser_as_finger:1;
+			unsigned char report_small_object_as_finger:1;
+			unsigned char f12_ctr23_04_b4__7:4;
+		};
+		unsigned char data[5];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_31 {
+	union {
+		struct {
+			unsigned char max_x_coord_lsb;
+			unsigned char max_x_coord_msb;
+			unsigned char max_y_coord_lsb;
+			unsigned char max_y_coord_msb;
+			unsigned char rx_pitch_lsb;
+			unsigned char rx_pitch_msb;
+			unsigned char rx_clip_low;
+			unsigned char rx_clip_high;
+			unsigned char wedge_clip_low;
+			unsigned char wedge_clip_high;
+			unsigned char num_of_p;
+			unsigned char num_of_q;
+		};
+		unsigned char data[12];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_58 {
+	union {
+		struct {
+			unsigned char reporting_format;
+			unsigned char f12_ctr58_00_reserved;
+			unsigned char min_force_lsb;
+			unsigned char min_force_msb;
+			unsigned char max_force_lsb;
+			unsigned char max_force_msb;
+			unsigned char light_press_threshold_lsb;
+			unsigned char light_press_threshold_msb;
+			unsigned char light_press_hysteresis_lsb;
+			unsigned char light_press_hysteresis_msb;
+			unsigned char hard_press_threshold_lsb;
+			unsigned char hard_press_threshold_msb;
+			unsigned char hard_press_hysteresis_lsb;
+			unsigned char hard_press_hysteresis_msb;
+		};
+		unsigned char data[14];
+	};
+};
+
+struct synaptics_rmi4_f12_finger_data {
+	unsigned char object_type_and_status;
+	unsigned char x_lsb;
+	unsigned char x_msb;
+	unsigned char y_lsb;
+	unsigned char y_msb;
+#ifdef REPORT_2D_Z
+	unsigned char z;
+#endif
+#ifdef REPORT_2D_W
+	unsigned char wx;
+	unsigned char wy;
+#endif
+};
+
+struct synaptics_rmi4_f1a_query {
+	union {
+		struct {
+			unsigned char max_button_count:3;
+			unsigned char f1a_query0_b3__4:2;
+			unsigned char has_query4:1;
+			unsigned char has_query3:1;
+			unsigned char has_query2:1;
+			unsigned char has_general_control:1;
+			unsigned char has_interrupt_enable:1;
+			unsigned char has_multibutton_select:1;
+			unsigned char has_tx_rx_map:1;
+			unsigned char has_perbutton_threshold:1;
+			unsigned char has_release_threshold:1;
+			unsigned char has_strongestbtn_hysteresis:1;
+			unsigned char has_filter_strength:1;
+		} __packed;
+		unsigned char data[2];
+	};
+};
+
+struct synaptics_rmi4_f1a_query_4 {
+	union {
+		struct {
+			unsigned char has_ctrl19:1;
+			unsigned char f1a_query4_b1__4:4;
+			unsigned char has_ctrl24:1;
+			unsigned char f1a_query4_b6__7:2;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f1a_control_0 {
+	union {
+		struct {
+			unsigned char multibutton_report:2;
+			unsigned char filter_mode:2;
+			unsigned char reserved:4;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f1a_control {
+	struct synaptics_rmi4_f1a_control_0 general_control;
+	unsigned char button_int_enable;
+	unsigned char multi_button;
+	unsigned char *txrx_map;
+	unsigned char *button_threshold;
+	unsigned char button_release_threshold;
+	unsigned char strongest_button_hysteresis;
+	unsigned char filter_strength;
+};
+
+struct synaptics_rmi4_f1a_handle {
+	int button_bitmask_size;
+	unsigned char max_count;
+	unsigned char valid_button_count;
+	unsigned char *button_data_buffer;
+	unsigned char *button_map;
+	struct synaptics_rmi4_f1a_query button_query;
+	struct synaptics_rmi4_f1a_control button_control;
+};
+
+struct synaptics_rmi4_exp_fhandler {
+	struct synaptics_rmi4_exp_fn *exp_fn;
+	bool insert;
+	bool remove;
+	struct list_head link;
+};
+
+struct synaptics_rmi4_exp_fn_data {
+	bool initialized;
+	bool queue_work;
+	struct mutex mutex;
+	struct list_head list;
+	struct delayed_work work;
+	struct workqueue_struct *workqueue;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_exp_fn_data exp_data;
+
+static struct synaptics_dsx_button_map *vir_button_map;
+
+#ifdef USE_DATA_SERVER
+static pid_t synad_pid;
+static struct task_struct *synad_task;
+static struct siginfo interrupt_signal;
+#endif
+
+static struct device_attribute attrs[] = {
+	__ATTR(reset, 0220,
+			synaptics_rmi4_show_error,
+			synaptics_rmi4_f01_reset_store),
+	__ATTR(productinfo, 0444,
+			synaptics_rmi4_f01_productinfo_show,
+			synaptics_rmi4_store_error),
+	__ATTR(buildid, 0444,
+			synaptics_rmi4_f01_buildid_show,
+			synaptics_rmi4_store_error),
+	__ATTR(flashprog, 0444,
+			synaptics_rmi4_f01_flashprog_show,
+			synaptics_rmi4_store_error),
+	__ATTR(0dbutton, 0664,
+			synaptics_rmi4_0dbutton_show,
+			synaptics_rmi4_0dbutton_store),
+	__ATTR(suspend, 0220,
+			synaptics_rmi4_show_error,
+			synaptics_rmi4_suspend_store),
+	__ATTR(wake_gesture, 0664,
+			synaptics_rmi4_wake_gesture_show,
+			synaptics_rmi4_wake_gesture_store),
+#ifdef USE_DATA_SERVER
+	__ATTR(synad_pid, 0220,
+			synaptics_rmi4_show_error,
+			synaptics_rmi4_synad_pid_store),
+#endif
+};
+
+static struct kobj_attribute virtual_key_map_attr = {
+	.attr = {
+		.name = VIRTUAL_KEY_MAP_FILE_NAME,
+		.mode = 0444,
+	},
+	.show = synaptics_rmi4_virtual_key_map_show,
+};
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int reset;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (kstrtouint(buf, 10, &reset) != 1)
+		return -EINVAL;
+
+	if (reset != 1)
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reset_device(rmi4_data, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command, error = %d\n",
+				__func__, retval);
+		return retval;
+	}
+
+	return count;
+}
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "0x%02x 0x%02x\n",
+			(rmi4_data->rmi4_mod_info.product_info[0]),
+			(rmi4_data->rmi4_mod_info.product_info[1]));
+}
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			rmi4_data->firmware_id);
+}
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	struct synaptics_rmi4_f01_device_status device_status;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_data_base_addr,
+			device_status.data,
+			sizeof(device_status.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read device status, error = %d\n",
+				__func__, retval);
+		return retval;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			device_status.flash_prog);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			rmi4_data->button_0d_enabled);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	unsigned char ii;
+	unsigned char intr_enable;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	input = input > 0 ? 1 : 0;
+
+	if (rmi4_data->button_0d_enabled == input)
+		return count;
+
+	if (list_empty(&rmi->support_fn_list))
+		return -ENODEV;
+
+	list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+		if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
+			ii = fhandler->intr_reg_num;
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr + 1 + ii,
+					&intr_enable,
+					sizeof(intr_enable));
+			if (retval < 0)
+				return retval;
+
+			if (input == 1)
+				intr_enable |= fhandler->intr_mask;
+			else
+				intr_enable &= ~fhandler->intr_mask;
+
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr + 1 + ii,
+					&intr_enable,
+					sizeof(intr_enable));
+			if (retval < 0)
+				return retval;
+		}
+	}
+
+	rmi4_data->button_0d_enabled = input;
+
+	return count;
+}
+
+static ssize_t synaptics_rmi4_suspend_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		synaptics_rmi4_suspend(dev);
+	else if (input == 0)
+		synaptics_rmi4_resume(dev);
+	else
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t synaptics_rmi4_wake_gesture_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			rmi4_data->enable_wakeup_gesture);
+}
+
+static ssize_t synaptics_rmi4_wake_gesture_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	input = input > 0 ? 1 : 0;
+
+	if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture)
+		rmi4_data->enable_wakeup_gesture = input;
+
+	return count;
+}
+
+#ifdef USE_DATA_SERVER
+static ssize_t synaptics_rmi4_synad_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	synad_pid = input;
+
+	if (synad_pid) {
+		synad_task = pid_task(find_vpid(synad_pid), PIDTYPE_PID);
+		if (!synad_task)
+			return -EINVAL;
+	}
+
+	return count;
+}
+#endif
+
+static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int ii;
+	int cnt;
+	int count = 0;
+
+	for (ii = 0; ii < vir_button_map->nbuttons; ii++) {
+		cnt = snprintf(buf, PAGE_SIZE - count, "0x01:%d:%d:%d:%d:%d\n",
+				vir_button_map->map[ii * 5 + 0],
+				vir_button_map->map[ii * 5 + 1],
+				vir_button_map->map[ii * 5 + 2],
+				vir_button_map->map[ii * 5 + 3],
+				vir_button_map->map[ii * 5 + 4]);
+		buf += cnt;
+		count += cnt;
+	}
+
+	return count;
+}
+
+static int synaptics_rmi4_f11_wg(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	unsigned char reporting_control;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+		if (fhandler->fn_number == SYNAPTICS_RMI4_F11)
+			break;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base,
+			&reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return retval;
+	}
+
+	reporting_control = (reporting_control & ~MASK_3BIT);
+	if (enable)
+		reporting_control |= F11_WAKEUP_GESTURE_MODE;
+	else
+		reporting_control |= F11_CONTINUOUS_MODE;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			fhandler->full_addr.ctrl_base,
+			&reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return retval;
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_wg(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	unsigned char offset;
+	unsigned char reporting_control[3];
+	struct synaptics_rmi4_f12_extra_data *extra_data;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+		if (fhandler->fn_number == SYNAPTICS_RMI4_F12)
+			break;
+	}
+
+	extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+	offset = extra_data->ctrl20_offset;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base + offset,
+			reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return retval;
+	}
+
+	if (enable)
+		reporting_control[rmi4_data->set_wakeup_gesture] =
+					F12_WAKEUP_GESTURE_MODE;
+	else
+		reporting_control[rmi4_data->set_wakeup_gesture] =
+					F12_CONTINUOUS_MODE;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			fhandler->full_addr.ctrl_base + offset,
+			reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return retval;
+	}
+
+	return retval;
+}
+
+static void synaptics_rmi4_wakeup_gesture(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	if (rmi4_data->f11_wakeup_gesture)
+		synaptics_rmi4_f11_wg(rmi4_data, enable);
+	else if (rmi4_data->f12_wakeup_gesture)
+		synaptics_rmi4_f12_wg(rmi4_data, enable);
+}
+
+static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char touch_count = 0; /* number of touch points */
+	unsigned char reg_index;
+	unsigned char finger;
+	unsigned char fingers_supported;
+	unsigned char num_of_finger_status_regs;
+	unsigned char finger_shift;
+	unsigned char finger_status;
+	unsigned char finger_status_reg[3];
+	unsigned char detected_gestures;
+	unsigned short data_addr;
+	unsigned short data_offset;
+	int x;
+	int y;
+	int wx;
+	int wy;
+	int temp;
+	struct synaptics_rmi4_f11_data_1_5 data;
+	struct synaptics_rmi4_f11_extra_data *extra_data;
+
+	/*
+	 * The number of finger status registers is determined by the
+	 * maximum number of fingers supported - 2 bits per finger. So
+	 * the number of finger status registers to read is:
+	 * register_count = ceil(max_num_of_fingers / 4)
+	 */
+	fingers_supported = fhandler->num_of_data_points;
+	num_of_finger_status_regs = (fingers_supported + 3) / 4;
+	data_addr = fhandler->full_addr.data_base;
+
+	extra_data = (struct synaptics_rmi4_f11_extra_data *)fhandler->extra;
+
+	if (rmi4_data->suspend && rmi4_data->enable_wakeup_gesture) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data38_offset,
+				&detected_gestures,
+				sizeof(detected_gestures));
+		if (retval < 0)
+			return 0;
+
+		if (detected_gestures) {
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 1);
+			input_sync(rmi4_data->input_dev);
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 0);
+			input_sync(rmi4_data->input_dev);
+			rmi4_data->suspend = false;
+		}
+/*		synaptics_rmi4_wakeup_gesture(rmi4_data, false); */
+		return 0;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_addr,
+			finger_status_reg,
+			num_of_finger_status_regs);
+	if (retval < 0)
+		return 0;
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+	for (finger = 0; finger < fingers_supported; finger++) {
+		reg_index = finger / 4;
+		finger_shift = (finger % 4) * 2;
+		finger_status = (finger_status_reg[reg_index] >> finger_shift)
+				& MASK_2BIT;
+
+		/*
+		 * Each 2-bit finger status field represents the following:
+		 * 00 = finger not present
+		 * 01 = finger present and data accurate
+		 * 10 = finger present but data may be inaccurate
+		 * 11 = reserved
+		 */
+#ifdef TYPE_B_PROTOCOL
+		input_mt_slot(rmi4_data->input_dev, finger);
+		input_mt_report_slot_state(rmi4_data->input_dev,
+				MT_TOOL_FINGER, finger_status);
+#endif
+
+		if (finger_status) {
+			data_offset = data_addr +
+					num_of_finger_status_regs +
+					(finger * sizeof(data.data));
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					data_offset,
+					data.data,
+					sizeof(data.data));
+			if (retval < 0) {
+				touch_count = 0;
+				goto exit;
+			}
+
+			x = (data.x_position_11_4 << 4) | data.x_position_3_0;
+			y = (data.y_position_11_4 << 4) | data.y_position_3_0;
+			wx = data.wx;
+			wy = data.wy;
+
+			if (rmi4_data->hw_if->board_data->swap_axes) {
+				temp = x;
+				x = y;
+				y = temp;
+				temp = wx;
+				wx = wy;
+				wy = temp;
+			}
+
+			if (rmi4_data->hw_if->board_data->x_flip)
+				x = rmi4_data->sensor_max_x - x;
+			if (rmi4_data->hw_if->board_data->y_flip)
+				y = rmi4_data->sensor_max_y - y;
+
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOUCH, 1);
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOOL_FINGER, 1);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_X, x);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_Y, y);
+#ifdef REPORT_2D_W
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_TOUCH_MAJOR, max(wx, wy));
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_TOUCH_MINOR, min(wx, wy));
+#endif
+#ifndef TYPE_B_PROTOCOL
+			input_mt_sync(rmi4_data->input_dev);
+#endif
+
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Finger %d: status = 0x%02x, x = %d, y = %d, wx = %d, wy = %d\n",
+					__func__, finger,
+					finger_status,
+					x, y, wx, wy);
+
+			touch_count++;
+		}
+	}
+
+	if (touch_count == 0) {
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOUCH, 0);
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+		input_mt_sync(rmi4_data->input_dev);
+#endif
+	}
+
+	input_sync(rmi4_data->input_dev);
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	return touch_count;
+}
+
+static int synaptics_rmi4_f12_abs_report(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char touch_count = 0; /* number of touch points */
+	unsigned char index;
+	unsigned char finger;
+	unsigned char fingers_to_process;
+	unsigned char finger_status;
+	unsigned char size_of_2d_data;
+	unsigned char gesture_type;
+	unsigned short data_addr;
+	int x;
+	int y;
+	int wx;
+	int wy;
+	int temp;
+#if defined(REPORT_2D_PRESSURE) || defined(F51_DISCRETE_FORCE)
+	int pressure;
+#endif
+#ifdef REPORT_2D_PRESSURE
+	unsigned char f_fingers;
+	unsigned char f_lsb;
+	unsigned char f_msb;
+	unsigned char *f_data;
+#endif
+#ifdef F51_DISCRETE_FORCE
+	unsigned char force_level;
+#endif
+	struct synaptics_rmi4_f12_extra_data *extra_data;
+	struct synaptics_rmi4_f12_finger_data *data;
+	struct synaptics_rmi4_f12_finger_data *finger_data;
+	static unsigned char finger_presence;
+	static unsigned char stylus_presence;
+#ifdef F12_DATA_15_WORKAROUND
+	static unsigned char objects_already_present;
+#endif
+
+	fingers_to_process = fhandler->num_of_data_points;
+	data_addr = fhandler->full_addr.data_base;
+	extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+	size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+
+	if (rmi4_data->suspend && rmi4_data->enable_wakeup_gesture) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data4_offset,
+				rmi4_data->gesture_detection,
+				sizeof(rmi4_data->gesture_detection));
+		if (retval < 0)
+			return 0;
+
+		gesture_type = rmi4_data->gesture_detection[0];
+
+		if (gesture_type && gesture_type != F12_UDG_DETECT) {
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 1);
+			input_sync(rmi4_data->input_dev);
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 0);
+			input_sync(rmi4_data->input_dev);
+			/* synaptics_rmi4_wakeup_gesture(rmi4_data, false); */
+			/* rmi4_data->suspend = false; */
+		}
+
+		return 0;
+	}
+
+	/* Determine the total number of fingers to process */
+	if (extra_data->data15_size) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data15_offset,
+				extra_data->data15_data,
+				extra_data->data15_size);
+		if (retval < 0)
+			return 0;
+
+		/* Start checking from the highest bit */
+		index = extra_data->data15_size - 1; /* Highest byte */
+		finger = (fingers_to_process - 1) % 8; /* Highest bit */
+		do {
+			if (extra_data->data15_data[index] & (1 << finger))
+				break;
+
+			if (finger) {
+				finger--;
+			} else if (index > 0) {
+				index--; /* Move to the next lower byte */
+				finger = 7;
+			}
+
+			fingers_to_process--;
+		} while (fingers_to_process);
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Number of fingers to process = %d\n",
+			__func__, fingers_to_process);
+	}
+
+#ifdef F12_DATA_15_WORKAROUND
+	fingers_to_process = max(fingers_to_process, objects_already_present);
+#endif
+
+	if (!fingers_to_process) {
+		synaptics_rmi4_free_fingers(rmi4_data);
+		finger_presence = 0;
+		stylus_presence = 0;
+		return 0;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_addr + extra_data->data1_offset,
+			(unsigned char *)fhandler->data,
+			fingers_to_process * size_of_2d_data);
+	if (retval < 0)
+		return 0;
+
+	data = (struct synaptics_rmi4_f12_finger_data *)fhandler->data;
+
+#ifdef REPORT_2D_PRESSURE
+	if (rmi4_data->report_pressure) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data29_offset,
+				extra_data->data29_data,
+				extra_data->data29_size);
+		if (retval < 0)
+			return 0;
+	}
+#endif
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+	for (finger = 0; finger < fingers_to_process; finger++) {
+		finger_data = data + finger;
+		finger_status = finger_data->object_type_and_status;
+
+#ifdef F12_DATA_15_WORKAROUND
+		objects_already_present = finger + 1;
+#endif
+
+		x = (finger_data->x_msb << 8) | (finger_data->x_lsb);
+		y = (finger_data->y_msb << 8) | (finger_data->y_lsb);
+#ifdef REPORT_2D_W
+		wx = finger_data->wx;
+		wy = finger_data->wy;
+#endif
+
+		if (rmi4_data->hw_if->board_data->swap_axes) {
+			temp = x;
+			x = y;
+			y = temp;
+			temp = wx;
+			wx = wy;
+			wy = temp;
+		}
+
+		if (rmi4_data->hw_if->board_data->x_flip)
+			x = rmi4_data->sensor_max_x - x;
+		if (rmi4_data->hw_if->board_data->y_flip)
+			y = rmi4_data->sensor_max_y - y;
+
+		switch (finger_status) {
+		case F12_FINGER_STATUS:
+		case F12_GLOVED_FINGER_STATUS:
+			/* Stylus has priority over fingers */
+			if (stylus_presence)
+				break;
+#ifdef TYPE_B_PROTOCOL
+			input_mt_slot(rmi4_data->input_dev, finger);
+			input_mt_report_slot_state(rmi4_data->input_dev,
+					MT_TOOL_FINGER, 1);
+#endif
+
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOUCH, 1);
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOOL_FINGER, 1);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_X, x);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_Y, y);
+#ifdef REPORT_2D_W
+			if (rmi4_data->wedge_sensor) {
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MAJOR, wx);
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MINOR, wx);
+			} else {
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MAJOR,
+						max(wx, wy));
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MINOR,
+						min(wx, wy));
+			}
+#endif
+#ifdef REPORT_2D_PRESSURE
+			if (rmi4_data->report_pressure) {
+				f_fingers = extra_data->data29_size / 2;
+				f_data = extra_data->data29_data;
+				if (finger + 1 > f_fingers) {
+					pressure = 1;
+				} else {
+					f_lsb = finger * 2;
+					f_msb = finger * 2 + 1;
+					pressure = (int)f_data[f_lsb] << 0 |
+							(int)f_data[f_msb] << 8;
+				}
+				pressure = pressure > 0 ? pressure : 1;
+				if (pressure > rmi4_data->force_max)
+					pressure = rmi4_data->force_max;
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_PRESSURE, pressure);
+			}
+#elif defined(F51_DISCRETE_FORCE)
+			if (finger == 0) {
+				retval = synaptics_rmi4_reg_read(rmi4_data,
+						FORCE_LEVEL_ADDR,
+						&force_level,
+						sizeof(force_level));
+				if (retval < 0)
+					return 0;
+				pressure = force_level > 0 ? force_level : 1;
+			} else {
+				pressure = 1;
+			}
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_PRESSURE, pressure);
+#endif
+#ifndef TYPE_B_PROTOCOL
+			input_mt_sync(rmi4_data->input_dev);
+#endif
+
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Finger %d: status = 0x%02x, x = %d, y = %d, wx = %d, wy = %d\n",
+					__func__, finger,
+					finger_status,
+					x, y, wx, wy);
+
+			finger_presence = 1;
+			touch_count++;
+			break;
+		case F12_PALM_STATUS:
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Finger %d: x = %d, y = %d, wx = %d, wy = %d\n",
+					__func__, finger,
+					x, y, wx, wy);
+			break;
+		case F12_STYLUS_STATUS:
+		case F12_ERASER_STATUS:
+			if (!rmi4_data->stylus_enable)
+				break;
+			/* Stylus has priority over fingers */
+			if (finger_presence) {
+				mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+				synaptics_rmi4_free_fingers(rmi4_data);
+				mutex_lock(&(rmi4_data->rmi4_report_mutex));
+				finger_presence = 0;
+			}
+			if (stylus_presence) {/* Allow one stylus at a timee */
+				if (finger + 1 != stylus_presence)
+					break;
+			}
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOUCH, 1);
+			if (finger_status == F12_STYLUS_STATUS) {
+				input_report_key(rmi4_data->stylus_dev,
+						BTN_TOOL_PEN, 1);
+			} else {
+				input_report_key(rmi4_data->stylus_dev,
+						BTN_TOOL_RUBBER, 1);
+			}
+			input_report_abs(rmi4_data->stylus_dev,
+					ABS_X, x);
+			input_report_abs(rmi4_data->stylus_dev,
+					ABS_Y, y);
+			input_sync(rmi4_data->stylus_dev);
+
+			stylus_presence = finger + 1;
+			touch_count++;
+			break;
+		default:
+#ifdef TYPE_B_PROTOCOL
+			input_mt_slot(rmi4_data->input_dev, finger);
+			input_mt_report_slot_state(rmi4_data->input_dev,
+					MT_TOOL_FINGER, 0);
+#endif
+			break;
+		}
+	}
+
+	if (touch_count == 0) {
+		finger_presence = 0;
+#ifdef F12_DATA_15_WORKAROUND
+		objects_already_present = 0;
+#endif
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOUCH, 0);
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+		input_mt_sync(rmi4_data->input_dev);
+#endif
+
+		if (rmi4_data->stylus_enable) {
+			stylus_presence = 0;
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOUCH, 0);
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOOL_PEN, 0);
+			if (rmi4_data->eraser_enable) {
+				input_report_key(rmi4_data->stylus_dev,
+						BTN_TOOL_RUBBER, 0);
+			}
+			input_sync(rmi4_data->stylus_dev);
+		}
+	}
+
+	input_sync(rmi4_data->input_dev);
+
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	return touch_count;
+}
+
+static int synaptics_rmi4_f1a_report(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char touch_count = 0;
+	unsigned char button;
+	unsigned char index;
+	unsigned char shift;
+	unsigned char status;
+	unsigned char *data;
+	unsigned short data_addr = fhandler->full_addr.data_base;
+	struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+	static unsigned char do_once = 1;
+	static bool current_status[MAX_NUMBER_OF_BUTTONS];
+#ifdef NO_0D_WHILE_2D
+	static bool before_2d_status[MAX_NUMBER_OF_BUTTONS];
+	static bool while_2d_status[MAX_NUMBER_OF_BUTTONS];
+#endif
+
+	if (do_once) {
+		memset(current_status, 0, sizeof(current_status));
+#ifdef NO_0D_WHILE_2D
+		memset(before_2d_status, 0, sizeof(before_2d_status));
+		memset(while_2d_status, 0, sizeof(while_2d_status));
+#endif
+		do_once = 0;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_addr,
+			f1a->button_data_buffer,
+			f1a->button_bitmask_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read button data registers\n",
+				__func__);
+		return retval;
+	}
+
+	data = f1a->button_data_buffer;
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+	for (button = 0; button < f1a->valid_button_count; button++) {
+		index = button / 8;
+		shift = button % 8;
+		status = ((data[index] >> shift) & MASK_1BIT);
+
+		if (current_status[button] == status)
+			continue;
+		else
+			current_status[button] = status;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Button %d (code %d) ->%d\n",
+				__func__, button,
+				f1a->button_map[button],
+				status);
+#ifdef NO_0D_WHILE_2D
+		if (rmi4_data->fingers_on_2d == false) {
+			if (status == 1) {
+				before_2d_status[button] = 1;
+			} else {
+				if (while_2d_status[button] == 1) {
+					while_2d_status[button] = 0;
+					continue;
+				} else {
+					before_2d_status[button] = 0;
+				}
+			}
+			touch_count++;
+			input_report_key(rmi4_data->input_dev,
+					f1a->button_map[button],
+					status);
+		} else {
+			if (before_2d_status[button] == 1) {
+				before_2d_status[button] = 0;
+				touch_count++;
+				input_report_key(rmi4_data->input_dev,
+						f1a->button_map[button],
+						status);
+			} else {
+				if (status == 1)
+					while_2d_status[button] = 1;
+				else
+					while_2d_status[button] = 0;
+			}
+		}
+#else
+		touch_count++;
+		input_report_key(rmi4_data->input_dev,
+				f1a->button_map[button],
+				status);
+#endif
+	}
+
+	if (touch_count)
+		input_sync(rmi4_data->input_dev);
+
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	return retval;
+}
+
+static void synaptics_rmi4_report_touch(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	unsigned char touch_count_2d;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Function %02x reporting\n",
+			__func__, fhandler->fn_number);
+
+	switch (fhandler->fn_number) {
+	case SYNAPTICS_RMI4_F11:
+		touch_count_2d = synaptics_rmi4_f11_abs_report(rmi4_data,
+				fhandler);
+
+		if (touch_count_2d)
+			rmi4_data->fingers_on_2d = true;
+		else
+			rmi4_data->fingers_on_2d = false;
+		break;
+	case SYNAPTICS_RMI4_F12:
+		touch_count_2d = synaptics_rmi4_f12_abs_report(rmi4_data,
+				fhandler);
+
+		if (touch_count_2d)
+			rmi4_data->fingers_on_2d = true;
+		else
+			rmi4_data->fingers_on_2d = false;
+		break;
+	case SYNAPTICS_RMI4_F1A:
+		synaptics_rmi4_f1a_report(rmi4_data, fhandler);
+		break;
+#ifdef USE_DATA_SERVER
+	case SYNAPTICS_RMI4_F21:
+		if (synad_pid)
+			send_sig_info(SIGIO, &interrupt_signal, synad_task);
+		break;
+#endif
+	default:
+		break;
+	}
+}
+
+static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
+		bool report)
+{
+	int retval;
+	unsigned char data[MAX_INTR_REGISTERS + 1];
+	unsigned char *intr = &data[1];
+	bool was_in_bl_mode;
+	struct synaptics_rmi4_f01_device_status status;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	/*
+	 * Get interrupt status information from F01 Data1 register to
+	 * determine the source(s) that are flagging the interrupt.
+	 */
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_data_base_addr,
+			data,
+			rmi4_data->num_of_intr_regs + 1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read interrupt status\n",
+				__func__);
+		return retval;
+	}
+
+	status.data[0] = data[0];
+	if (status.status_code == STATUS_CRC_IN_PROGRESS) {
+		retval = synaptics_rmi4_check_status(rmi4_data,
+				&was_in_bl_mode);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to check status\n",
+					__func__);
+			return retval;
+		}
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr,
+				status.data,
+				sizeof(status.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read device status\n",
+					__func__);
+			return retval;
+		}
+	}
+	if (status.unconfigured && !status.flash_prog) {
+		pr_notice("%s: spontaneous reset detected\n", __func__);
+		retval = synaptics_rmi4_reinit_device(rmi4_data);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to reinit device\n",
+					__func__);
+		}
+	}
+
+	if (!report)
+		return retval;
+
+	/*
+	 * Traverse the function handler list and service the source(s)
+	 * of the interrupt accordingly.
+	 */
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				if (fhandler->intr_mask &
+						intr[fhandler->intr_reg_num]) {
+					synaptics_rmi4_report_touch(rmi4_data,
+							fhandler);
+				}
+			}
+		}
+	}
+
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link) {
+			if (!exp_fhandler->insert &&
+					!exp_fhandler->remove &&
+					(exp_fhandler->exp_fn->attn != NULL))
+				exp_fhandler->exp_fn->attn(rmi4_data, intr[0]);
+		}
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	return retval;
+}
+
+static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
+{
+	struct synaptics_rmi4_data *rmi4_data = data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (gpio_get_value(bdata->irq_gpio) != bdata->irq_on_state)
+		goto exit;
+
+	synaptics_rmi4_sensor_report(rmi4_data, true);
+
+exit:
+	return IRQ_HANDLED;
+}
+
+static int synaptics_rmi4_int_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval = 0;
+	unsigned char ii;
+	unsigned char zero = 0x00;
+	unsigned char *intr_mask;
+	unsigned short intr_addr;
+
+	intr_mask = rmi4_data->intr_mask;
+
+	for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
+		if (intr_mask[ii] != 0x00) {
+			intr_addr = rmi4_data->f01_ctrl_base_addr + 1 + ii;
+			if (enable) {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						intr_addr,
+						&(intr_mask[ii]),
+						sizeof(intr_mask[ii]));
+				if (retval < 0)
+					return retval;
+			} else {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						intr_addr,
+						&zero,
+						sizeof(zero));
+				if (retval < 0)
+					return retval;
+			}
+		}
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable, bool attn_only)
+{
+	int retval = 0;
+	unsigned char data[MAX_INTR_REGISTERS];
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	mutex_lock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	if (attn_only) {
+		retval = synaptics_rmi4_int_enable(rmi4_data, enable);
+		goto exit;
+	}
+
+	if (enable) {
+		if (rmi4_data->irq_enabled) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Interrupt already enabled\n",
+					__func__);
+			goto exit;
+		}
+
+		retval = synaptics_rmi4_int_enable(rmi4_data, false);
+		if (retval < 0)
+			goto exit;
+
+		/* Clear interrupts */
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr + 1,
+				data,
+				rmi4_data->num_of_intr_regs);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read interrupt status\n",
+					__func__);
+			goto exit;
+		}
+
+		retval = request_threaded_irq(rmi4_data->irq, NULL,
+				synaptics_rmi4_irq, bdata->irq_flags,
+				PLATFORM_DRIVER_NAME, rmi4_data);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create irq thread\n",
+					__func__);
+			goto exit;
+		}
+
+		retval = synaptics_rmi4_int_enable(rmi4_data, true);
+		if (retval < 0)
+			goto exit;
+
+		rmi4_data->irq_enabled = true;
+	} else {
+		if (rmi4_data->irq_enabled) {
+			disable_irq(rmi4_data->irq);
+			free_irq(rmi4_data->irq, rmi4_data);
+			rmi4_data->irq_enabled = false;
+		}
+	}
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	return retval;
+}
+
+static void synaptics_rmi4_set_intr_mask(struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	unsigned char ii;
+	unsigned char intr_offset;
+
+	fhandler->intr_reg_num = (intr_count + 7) / 8;
+	if (fhandler->intr_reg_num != 0)
+		fhandler->intr_reg_num -= 1;
+
+	/* Set an enable bit for each data source */
+	intr_offset = intr_count % 8;
+	fhandler->intr_mask = 0;
+	for (ii = intr_offset;
+			ii < (fd->intr_src_count + intr_offset);
+			ii++)
+		fhandler->intr_mask |= 1 << ii;
+}
+
+static int synaptics_rmi4_f01_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+	fhandler->data = NULL;
+	fhandler->extra = NULL;
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	rmi4_data->f01_query_base_addr = fd->query_base_addr;
+	rmi4_data->f01_ctrl_base_addr = fd->ctrl_base_addr;
+	rmi4_data->f01_data_base_addr = fd->data_base_addr;
+	rmi4_data->f01_cmd_base_addr = fd->cmd_base_addr;
+
+	return 0;
+}
+
+static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	int retval;
+	int temp;
+	unsigned char offset;
+	unsigned char fingers_supported;
+	struct synaptics_rmi4_f11_extra_data *extra_data;
+	struct synaptics_rmi4_f11_query_0_5 query_0_5;
+	struct synaptics_rmi4_f11_query_7_8 query_7_8;
+	struct synaptics_rmi4_f11_query_9 query_9;
+	struct synaptics_rmi4_f11_query_12 query_12;
+	struct synaptics_rmi4_f11_query_27 query_27;
+	struct synaptics_rmi4_f11_ctrl_6_9 control_6_9;
+	const struct synaptics_dsx_board_data *bdata =
+				rmi4_data->hw_if->board_data;
+
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+	fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL);
+	if (!fhandler->extra) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fhandler->extra\n",
+				__func__);
+		return -ENOMEM;
+	}
+	extra_data = (struct synaptics_rmi4_f11_extra_data *)fhandler->extra;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base,
+			query_0_5.data,
+			sizeof(query_0_5.data));
+	if (retval < 0)
+		return retval;
+
+	/* Maximum number of fingers supported */
+	if (query_0_5.num_of_fingers <= 4)
+		fhandler->num_of_data_points = query_0_5.num_of_fingers + 1;
+	else if (query_0_5.num_of_fingers == 5)
+		fhandler->num_of_data_points = 10;
+
+	rmi4_data->num_of_fingers = fhandler->num_of_data_points;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base + 6,
+			control_6_9.data,
+			sizeof(control_6_9.data));
+	if (retval < 0)
+		return retval;
+
+	/* Maximum x and y */
+	rmi4_data->sensor_max_x = control_6_9.sensor_max_x_pos_7_0 |
+			(control_6_9.sensor_max_x_pos_11_8 << 8);
+	rmi4_data->sensor_max_y = control_6_9.sensor_max_y_pos_7_0 |
+			(control_6_9.sensor_max_y_pos_11_8 << 8);
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Function %02x max x = %d max y = %d\n",
+			__func__, fhandler->fn_number,
+			rmi4_data->sensor_max_x,
+			rmi4_data->sensor_max_y);
+
+	rmi4_data->max_touch_width = MAX_F11_TOUCH_WIDTH;
+
+	if (bdata->swap_axes) {
+		temp = rmi4_data->sensor_max_x;
+		rmi4_data->sensor_max_x = rmi4_data->sensor_max_y;
+		rmi4_data->sensor_max_y = temp;
+	}
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	fhandler->data = NULL;
+
+	offset = sizeof(query_0_5.data);
+
+	/* query 6 */
+	if (query_0_5.has_rel)
+		offset += 1;
+
+	/* queries 7 8 */
+	if (query_0_5.has_gestures) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_7_8.data,
+				sizeof(query_7_8.data));
+		if (retval < 0)
+			return retval;
+
+		offset += sizeof(query_7_8.data);
+	}
+
+	/* query 9 */
+	if (query_0_5.has_query_9) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_9.data,
+				sizeof(query_9.data));
+		if (retval < 0)
+			return retval;
+
+		offset += sizeof(query_9.data);
+	}
+
+	/* query 10 */
+	if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+		offset += 1;
+
+	/* query 11 */
+	if (query_0_5.has_query_11)
+		offset += 1;
+
+	/* query 12 */
+	if (query_0_5.has_query_12) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_12.data,
+				sizeof(query_12.data));
+		if (retval < 0)
+			return retval;
+
+		offset += sizeof(query_12.data);
+	}
+
+	/* query 13 */
+	if (query_0_5.has_jitter_filter)
+		offset += 1;
+
+	/* query 14 */
+	if (query_0_5.has_query_12 && query_12.has_general_information_2)
+		offset += 1;
+
+	/* queries 15 16 17 18 19 20 21 22 23 24 25 26*/
+	if (query_0_5.has_query_12 && query_12.has_physical_properties)
+		offset += 12;
+
+	/* query 27 */
+	if (query_0_5.has_query_27) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_27.data,
+				sizeof(query_27.data));
+		if (retval < 0)
+			return retval;
+
+		rmi4_data->f11_wakeup_gesture = query_27.has_wakeup_gesture;
+	}
+
+	if (!rmi4_data->f11_wakeup_gesture)
+		return retval;
+
+	/* data 0 */
+	fingers_supported = fhandler->num_of_data_points;
+	offset = (fingers_supported + 3) / 4;
+
+	/* data 1 2 3 4 5 */
+	offset += 5 * fingers_supported;
+
+	/* data 6 7 */
+	if (query_0_5.has_rel)
+		offset += 2 * fingers_supported;
+
+	/* data 8 */
+	if (query_0_5.has_gestures && query_7_8.data[0])
+		offset += 1;
+
+	/* data 9 */
+	if (query_0_5.has_gestures && (query_7_8.data[0] || query_7_8.data[1]))
+		offset += 1;
+
+	/* data 10 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_pinch || query_7_8.has_flick))
+		offset += 1;
+
+	/* data 11 12 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_flick || query_7_8.has_rotate))
+		offset += 2;
+
+	/* data 13 */
+	if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+		offset += (fingers_supported + 3) / 4;
+
+	/* data 14 15 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_scroll_zones ||
+			query_7_8.has_multi_finger_scroll ||
+			query_7_8.has_chiral_scroll))
+		offset += 2;
+
+	/* data 16 17 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_scroll_zones &&
+			query_7_8.individual_scroll_zones))
+		offset += 2;
+
+	/* data 18 19 20 21 22 23 24 25 26 27 */
+	if (query_0_5.has_query_9 && query_9.has_contact_geometry)
+		offset += 10 * fingers_supported;
+
+	/* data 28 */
+	if (query_0_5.has_bending_correction ||
+			query_0_5.has_large_object_suppression)
+		offset += 1;
+
+	/* data 29 30 31 */
+	if (query_0_5.has_query_9 && query_9.has_pen_hover_discrimination)
+		offset += 3;
+
+	/* data 32 */
+	if (query_0_5.has_query_12 &&
+			query_12.has_small_object_detection_tuning)
+		offset += 1;
+
+	/* data 33 34 */
+	if (query_0_5.has_query_27 && query_27.f11_query27_b0)
+		offset += 2;
+
+	/* data 35 */
+	if (query_0_5.has_query_12 && query_12.has_8bit_w)
+		offset += fingers_supported;
+
+	/* data 36 */
+	if (query_0_5.has_bending_correction)
+		offset += 1;
+
+	/* data 37 */
+	if (query_0_5.has_query_27 && query_27.has_data_37)
+		offset += 1;
+
+	/* data 38 */
+	if (query_0_5.has_query_27 && query_27.has_wakeup_gesture)
+		extra_data->data38_offset = offset;
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_set_enables(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short ctrl28)
+{
+	int retval;
+	static unsigned short ctrl_28_address;
+
+	if (ctrl28)
+		ctrl_28_address = ctrl28;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			ctrl_28_address,
+			&rmi4_data->report_enable,
+			sizeof(rmi4_data->report_enable));
+	if (retval < 0)
+		return retval;
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_find_sub(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		unsigned char *presence, unsigned char presence_size,
+		unsigned char structure_offset, unsigned char reg,
+		unsigned char sub)
+{
+	int retval;
+	unsigned char cnt;
+	unsigned char regnum;
+	unsigned char bitnum;
+	unsigned char p_index;
+	unsigned char s_index;
+	unsigned char offset;
+	unsigned char max_reg;
+	unsigned char *structure;
+
+	max_reg = (presence_size - 1) * 8 - 1;
+
+	if (reg > max_reg) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Register number (%d) over limit\n",
+				__func__, reg);
+		return -EINVAL;
+	}
+
+	p_index = reg / 8 + 1;
+	bitnum = reg % 8;
+	if ((presence[p_index] & (1 << bitnum)) == 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Register %d is not present\n",
+				__func__, reg);
+		return -EINVAL;
+	}
+
+	structure = kmalloc(presence[0], GFP_KERNEL);
+	if (!structure) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for structure register\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + structure_offset,
+			structure,
+			presence[0]);
+	if (retval < 0)
+		goto exit;
+
+	s_index = 0;
+
+	for (regnum = 0; regnum < reg; regnum++) {
+		p_index = regnum / 8 + 1;
+		bitnum = regnum % 8;
+		if ((presence[p_index] & (1 << bitnum)) == 0x00)
+			continue;
+
+		if (structure[s_index] == 0x00)
+			s_index += 3;
+		else
+			s_index++;
+
+		while (structure[s_index] & ~MASK_7BIT)
+			s_index++;
+
+		s_index++;
+	}
+
+	cnt = 0;
+	s_index++;
+	offset = sub / 7;
+	bitnum = sub % 7;
+
+	do {
+		if (cnt == offset) {
+			if (structure[s_index + cnt] & (1 << bitnum))
+				retval = 1;
+			else
+				retval = 0;
+			goto exit;
+		}
+		cnt++;
+	} while (structure[s_index + cnt - 1] & ~MASK_7BIT);
+
+	retval = 0;
+
+exit:
+	kfree(structure);
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	int retval = 0;
+	int temp;
+	unsigned char subpacket;
+	unsigned char ctrl_23_size;
+	unsigned char size_of_2d_data;
+	unsigned char size_of_query5;
+	unsigned char size_of_query8;
+	unsigned char ctrl_8_offset;
+	unsigned char ctrl_20_offset;
+	unsigned char ctrl_23_offset;
+	unsigned char ctrl_28_offset;
+	unsigned char ctrl_31_offset;
+	unsigned char ctrl_58_offset;
+	unsigned char num_of_fingers;
+	struct synaptics_rmi4_f12_extra_data *extra_data;
+	struct synaptics_rmi4_f12_query_5 *query_5 = NULL;
+	struct synaptics_rmi4_f12_query_8 *query_8 = NULL;
+	struct synaptics_rmi4_f12_ctrl_8 *ctrl_8 = NULL;
+	struct synaptics_rmi4_f12_ctrl_23 *ctrl_23 = NULL;
+	struct synaptics_rmi4_f12_ctrl_31 *ctrl_31 = NULL;
+	struct synaptics_rmi4_f12_ctrl_58 *ctrl_58 = NULL;
+	const struct synaptics_dsx_board_data *bdata =
+				rmi4_data->hw_if->board_data;
+
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+	fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL);
+	if (!fhandler->extra) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fhandler->extra\n",
+				__func__);
+		return -ENOMEM;
+	}
+	extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+	size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+
+	query_5 = kzalloc(sizeof(*query_5), GFP_KERNEL);
+	if (!query_5) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_5\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	query_8 = kzalloc(sizeof(*query_8), GFP_KERNEL);
+	if (!query_8) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_8\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_8 = kzalloc(sizeof(*ctrl_8), GFP_KERNEL);
+	if (!ctrl_8) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_8\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_23 = kzalloc(sizeof(*ctrl_23), GFP_KERNEL);
+	if (!ctrl_23) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_23\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_31 = kzalloc(sizeof(*ctrl_31), GFP_KERNEL);
+	if (!ctrl_31) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_31\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_58 = kzalloc(sizeof(*ctrl_58), GFP_KERNEL);
+	if (!ctrl_58) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_58\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 4,
+			&size_of_query5,
+			sizeof(size_of_query5));
+	if (retval < 0)
+		goto exit;
+
+	if (size_of_query5 > sizeof(query_5->data))
+		size_of_query5 = sizeof(query_5->data);
+	memset(query_5->data, 0x00, sizeof(query_5->data));
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 5,
+			query_5->data,
+			size_of_query5);
+	if (retval < 0)
+		goto exit;
+
+	ctrl_8_offset = query_5->ctrl0_is_present +
+			query_5->ctrl1_is_present +
+			query_5->ctrl2_is_present +
+			query_5->ctrl3_is_present +
+			query_5->ctrl4_is_present +
+			query_5->ctrl5_is_present +
+			query_5->ctrl6_is_present +
+			query_5->ctrl7_is_present;
+
+	ctrl_20_offset = ctrl_8_offset +
+			query_5->ctrl8_is_present +
+			query_5->ctrl9_is_present +
+			query_5->ctrl10_is_present +
+			query_5->ctrl11_is_present +
+			query_5->ctrl12_is_present +
+			query_5->ctrl13_is_present +
+			query_5->ctrl14_is_present +
+			query_5->ctrl15_is_present +
+			query_5->ctrl16_is_present +
+			query_5->ctrl17_is_present +
+			query_5->ctrl18_is_present +
+			query_5->ctrl19_is_present;
+
+	ctrl_23_offset = ctrl_20_offset +
+			query_5->ctrl20_is_present +
+			query_5->ctrl21_is_present +
+			query_5->ctrl22_is_present;
+
+	ctrl_28_offset = ctrl_23_offset +
+			query_5->ctrl23_is_present +
+			query_5->ctrl24_is_present +
+			query_5->ctrl25_is_present +
+			query_5->ctrl26_is_present +
+			query_5->ctrl27_is_present;
+
+	ctrl_31_offset = ctrl_28_offset +
+			query_5->ctrl28_is_present +
+			query_5->ctrl29_is_present +
+			query_5->ctrl30_is_present;
+
+	ctrl_58_offset = ctrl_31_offset +
+			query_5->ctrl31_is_present +
+			query_5->ctrl32_is_present +
+			query_5->ctrl33_is_present +
+			query_5->ctrl34_is_present +
+			query_5->ctrl35_is_present +
+			query_5->ctrl36_is_present +
+			query_5->ctrl37_is_present +
+			query_5->ctrl38_is_present +
+			query_5->ctrl39_is_present +
+			query_5->ctrl40_is_present +
+			query_5->ctrl41_is_present +
+			query_5->ctrl42_is_present +
+			query_5->ctrl43_is_present +
+			query_5->ctrl44_is_present +
+			query_5->ctrl45_is_present +
+			query_5->ctrl46_is_present +
+			query_5->ctrl47_is_present +
+			query_5->ctrl48_is_present +
+			query_5->ctrl49_is_present +
+			query_5->ctrl50_is_present +
+			query_5->ctrl51_is_present +
+			query_5->ctrl52_is_present +
+			query_5->ctrl53_is_present +
+			query_5->ctrl54_is_present +
+			query_5->ctrl55_is_present +
+			query_5->ctrl56_is_present +
+			query_5->ctrl57_is_present;
+
+	ctrl_23_size = 2;
+	for (subpacket = 2; subpacket <= 4; subpacket++) {
+		retval = synaptics_rmi4_f12_find_sub(rmi4_data,
+				fhandler, query_5->data, sizeof(query_5->data),
+				6, 23, subpacket);
+		if (retval == 1)
+			ctrl_23_size++;
+		else if (retval < 0)
+			goto exit;
+
+	}
+
+	retval = synaptics_rmi4_f12_find_sub(rmi4_data,
+			fhandler, query_5->data, sizeof(query_5->data),
+			6, 20, 0);
+	if (retval == 1)
+		rmi4_data->set_wakeup_gesture = 2;
+	else if (retval == 0)
+		rmi4_data->set_wakeup_gesture = 0;
+	else if (retval < 0)
+		goto exit;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base + ctrl_23_offset,
+			ctrl_23->data,
+			ctrl_23_size);
+	if (retval < 0)
+		goto exit;
+
+	/* Maximum number of fingers supported */
+	fhandler->num_of_data_points = min_t(unsigned char,
+			ctrl_23->max_reported_objects,
+			(unsigned char)F12_FINGERS_TO_SUPPORT);
+
+	num_of_fingers = fhandler->num_of_data_points;
+	rmi4_data->num_of_fingers = num_of_fingers;
+
+	rmi4_data->stylus_enable = ctrl_23->stylus_enable;
+	rmi4_data->eraser_enable = ctrl_23->eraser_enable;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 7,
+			&size_of_query8,
+			sizeof(size_of_query8));
+	if (retval < 0)
+		goto exit;
+
+	if (size_of_query8 > sizeof(query_8->data))
+		size_of_query8 = sizeof(query_8->data);
+	memset(query_8->data, 0x00, sizeof(query_8->data));
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 8,
+			query_8->data,
+			size_of_query8);
+	if (retval < 0)
+		goto exit;
+
+	/* Determine the presence of the Data0 register */
+	extra_data->data1_offset = query_8->data0_is_present;
+
+	if ((size_of_query8 >= 3) && (query_8->data15_is_present)) {
+		extra_data->data15_offset = query_8->data0_is_present +
+				query_8->data1_is_present +
+				query_8->data2_is_present +
+				query_8->data3_is_present +
+				query_8->data4_is_present +
+				query_8->data5_is_present +
+				query_8->data6_is_present +
+				query_8->data7_is_present +
+				query_8->data8_is_present +
+				query_8->data9_is_present +
+				query_8->data10_is_present +
+				query_8->data11_is_present +
+				query_8->data12_is_present +
+				query_8->data13_is_present +
+				query_8->data14_is_present;
+		extra_data->data15_size = (num_of_fingers + 7) / 8;
+	} else {
+		extra_data->data15_size = 0;
+	}
+
+#ifdef REPORT_2D_PRESSURE
+	if ((size_of_query8 >= 5) && (query_8->data29_is_present)) {
+		extra_data->data29_offset = query_8->data0_is_present +
+				query_8->data1_is_present +
+				query_8->data2_is_present +
+				query_8->data3_is_present +
+				query_8->data4_is_present +
+				query_8->data5_is_present +
+				query_8->data6_is_present +
+				query_8->data7_is_present +
+				query_8->data8_is_present +
+				query_8->data9_is_present +
+				query_8->data10_is_present +
+				query_8->data11_is_present +
+				query_8->data12_is_present +
+				query_8->data13_is_present +
+				query_8->data14_is_present +
+				query_8->data15_is_present +
+				query_8->data16_is_present +
+				query_8->data17_is_present +
+				query_8->data18_is_present +
+				query_8->data19_is_present +
+				query_8->data20_is_present +
+				query_8->data21_is_present +
+				query_8->data22_is_present +
+				query_8->data23_is_present +
+				query_8->data24_is_present +
+				query_8->data25_is_present +
+				query_8->data26_is_present +
+				query_8->data27_is_present +
+				query_8->data28_is_present;
+		extra_data->data29_size = 0;
+		for (subpacket = 0; subpacket <= num_of_fingers; subpacket++) {
+			retval = synaptics_rmi4_f12_find_sub(rmi4_data,
+					fhandler, query_8->data,
+					sizeof(query_8->data),
+					9, 29, subpacket);
+			if (retval == 1)
+				extra_data->data29_size += 2;
+			else if (retval < 0)
+				goto exit;
+		}
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + ctrl_58_offset,
+				ctrl_58->data,
+				sizeof(ctrl_58->data));
+		if (retval < 0)
+			goto exit;
+		rmi4_data->force_min =
+				(int)(ctrl_58->min_force_lsb << 0) |
+				(int)(ctrl_58->min_force_msb << 8);
+		rmi4_data->force_max =
+				(int)(ctrl_58->max_force_lsb << 0) |
+				(int)(ctrl_58->max_force_msb << 8);
+		rmi4_data->report_pressure = true;
+	} else {
+		extra_data->data29_size = 0;
+		rmi4_data->report_pressure = false;
+	}
+#endif
+
+	rmi4_data->report_enable = RPT_DEFAULT;
+#ifdef REPORT_2D_Z
+	rmi4_data->report_enable |= RPT_Z;
+#endif
+#ifdef REPORT_2D_W
+	rmi4_data->report_enable |= (RPT_WX | RPT_WY);
+#endif
+
+	retval = synaptics_rmi4_f12_set_enables(rmi4_data,
+			fhandler->full_addr.ctrl_base + ctrl_28_offset);
+	if (retval < 0)
+		goto exit;
+
+	if (query_5->ctrl8_is_present) {
+		rmi4_data->wedge_sensor = false;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + ctrl_8_offset,
+				ctrl_8->data,
+				sizeof(ctrl_8->data));
+		if (retval < 0)
+			goto exit;
+
+		/* Maximum x and y */
+		rmi4_data->sensor_max_x =
+				((unsigned int)ctrl_8->max_x_coord_lsb << 0) |
+				((unsigned int)ctrl_8->max_x_coord_msb << 8);
+		rmi4_data->sensor_max_y =
+				((unsigned int)ctrl_8->max_y_coord_lsb << 0) |
+				((unsigned int)ctrl_8->max_y_coord_msb << 8);
+
+		rmi4_data->max_touch_width = MAX_F12_TOUCH_WIDTH;
+	} else {
+		rmi4_data->wedge_sensor = true;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + ctrl_31_offset,
+				ctrl_31->data,
+				sizeof(ctrl_31->data));
+		if (retval < 0)
+			goto exit;
+
+		/* Maximum x and y */
+		rmi4_data->sensor_max_x =
+				((unsigned int)ctrl_31->max_x_coord_lsb << 0) |
+				((unsigned int)ctrl_31->max_x_coord_msb << 8);
+		rmi4_data->sensor_max_y =
+				((unsigned int)ctrl_31->max_y_coord_lsb << 0) |
+				((unsigned int)ctrl_31->max_y_coord_msb << 8);
+
+		rmi4_data->max_touch_width = MAX_F12_TOUCH_WIDTH;
+	}
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Function %02x max x = %d max y = %d\n",
+			__func__, fhandler->fn_number,
+			rmi4_data->sensor_max_x,
+			rmi4_data->sensor_max_y);
+
+	if (bdata->swap_axes) {
+		temp = rmi4_data->sensor_max_x;
+		rmi4_data->sensor_max_x = rmi4_data->sensor_max_y;
+		rmi4_data->sensor_max_y = temp;
+	}
+
+	rmi4_data->f12_wakeup_gesture = query_5->ctrl27_is_present;
+	if (rmi4_data->f12_wakeup_gesture) {
+		extra_data->ctrl20_offset = ctrl_20_offset;
+		extra_data->data4_offset = query_8->data0_is_present +
+				query_8->data1_is_present +
+				query_8->data2_is_present +
+				query_8->data3_is_present;
+	}
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	/* Allocate memory for finger data storage space */
+	fhandler->data_size = num_of_fingers * size_of_2d_data;
+	fhandler->data = kmalloc(fhandler->data_size, GFP_KERNEL);
+	if (!fhandler->data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fhandler->data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+exit:
+	kfree(query_5);
+	kfree(query_8);
+	kfree(ctrl_8);
+	kfree(ctrl_23);
+	kfree(ctrl_31);
+	kfree(ctrl_58);
+
+	return retval;
+}
+
+static int synaptics_rmi4_f1a_alloc_mem(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	struct synaptics_rmi4_f1a_handle *f1a;
+
+	f1a = kzalloc(sizeof(*f1a), GFP_KERNEL);
+	if (!f1a) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for function handle\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	fhandler->data = (void *)f1a;
+	fhandler->extra = NULL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base,
+			f1a->button_query.data,
+			sizeof(f1a->button_query.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read query registers\n",
+				__func__);
+		return retval;
+	}
+
+	f1a->max_count = f1a->button_query.max_button_count + 1;
+
+	f1a->button_control.txrx_map = kzalloc(f1a->max_count * 2, GFP_KERNEL);
+	if (!f1a->button_control.txrx_map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for tx rx mapping\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	f1a->button_bitmask_size = (f1a->max_count + 7) / 8;
+
+	f1a->button_data_buffer = kcalloc(f1a->button_bitmask_size,
+			sizeof(*(f1a->button_data_buffer)), GFP_KERNEL);
+	if (!f1a->button_data_buffer) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for data buffer\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	f1a->button_map = kcalloc(f1a->max_count,
+			sizeof(*(f1a->button_map)), GFP_KERNEL);
+	if (!f1a->button_map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for button map\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_f1a_button_map(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char offset = 0;
+	struct synaptics_rmi4_f1a_query_4 query_4;
+	struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	rmi4_data->valid_button_count = f1a->valid_button_count;
+
+	offset = f1a->button_query.has_general_control +
+			f1a->button_query.has_interrupt_enable +
+			f1a->button_query.has_multibutton_select;
+
+	if (f1a->button_query.has_tx_rx_map) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + offset,
+				f1a->button_control.txrx_map,
+				f1a->max_count * 2);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tx rx mapping\n",
+					__func__);
+			return retval;
+		}
+
+		rmi4_data->button_txrx_mapping = f1a->button_control.txrx_map;
+	}
+
+	if (f1a->button_query.has_query4) {
+		offset = 2 + f1a->button_query.has_query2 +
+				f1a->button_query.has_query3;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_4.data,
+				sizeof(query_4.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read button features 4\n",
+					__func__);
+			return retval;
+		}
+
+		if (query_4.has_ctrl24)
+			rmi4_data->external_afe_buttons = true;
+		else
+			rmi4_data->external_afe_buttons = false;
+	}
+
+	if (!bdata->cap_button_map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: cap_button_map is NULL in board file\n",
+				__func__);
+		return -ENODEV;
+	} else if (!bdata->cap_button_map->map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Button map is missing in board file\n",
+				__func__);
+		return -ENODEV;
+	} else {
+		if (bdata->cap_button_map->nbuttons != f1a->max_count) {
+			f1a->valid_button_count = min(f1a->max_count,
+					bdata->cap_button_map->nbuttons);
+		} else {
+			f1a->valid_button_count = f1a->max_count;
+		}
+
+		for (ii = 0; ii < f1a->valid_button_count; ii++)
+			f1a->button_map[ii] = bdata->cap_button_map->map[ii];
+
+		rmi4_data->valid_button_count = f1a->valid_button_count;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_f1a_kfree(struct synaptics_rmi4_fn *fhandler)
+{
+	struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+
+	if (f1a) {
+		kfree(f1a->button_control.txrx_map);
+		kfree(f1a->button_data_buffer);
+		kfree(f1a->button_map);
+		kfree(f1a);
+		fhandler->data = NULL;
+	}
+}
+
+static int synaptics_rmi4_f1a_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	int retval;
+
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	retval = synaptics_rmi4_f1a_alloc_mem(rmi4_data, fhandler);
+	if (retval < 0)
+		goto error_exit;
+
+	retval = synaptics_rmi4_f1a_button_map(rmi4_data, fhandler);
+	if (retval < 0)
+		goto error_exit;
+
+	rmi4_data->button_0d_enabled = 1;
+
+	return 0;
+
+error_exit:
+	synaptics_rmi4_f1a_kfree(fhandler);
+
+	return retval;
+}
+
+static void synaptics_rmi4_empty_fn_list(struct synaptics_rmi4_data *rmi4_data)
+{
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_fn *fhandler_temp;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry_safe(fhandler,
+				fhandler_temp,
+				&rmi->support_fn_list,
+				link) {
+			if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
+				synaptics_rmi4_f1a_kfree(fhandler);
+			} else {
+				kfree(fhandler->extra);
+				kfree(fhandler->data);
+			}
+			list_del(&fhandler->link);
+			kfree(fhandler);
+		}
+	}
+	INIT_LIST_HEAD(&rmi->support_fn_list);
+}
+
+static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
+		bool *was_in_bl_mode)
+{
+	int retval;
+	int timeout = CHECK_STATUS_TIMEOUT_MS;
+	struct synaptics_rmi4_f01_device_status status;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_data_base_addr,
+			status.data,
+			sizeof(status.data));
+	if (retval < 0)
+		return retval;
+
+	while (status.status_code == STATUS_CRC_IN_PROGRESS) {
+		if (timeout > 0)
+			msleep(20);
+		else
+			return -EINVAL;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr,
+				status.data,
+				sizeof(status.data));
+		if (retval < 0)
+			return retval;
+
+		timeout -= 20;
+	}
+
+	if (timeout != CHECK_STATUS_TIMEOUT_MS)
+		*was_in_bl_mode = true;
+
+	if (status.flash_prog == 1) {
+		rmi4_data->flash_prog_mode = true;
+		pr_notice("%s: In flash prog mode, status = 0x%02x\n",
+				__func__,
+				status.status_code);
+	} else {
+		rmi4_data->flash_prog_mode = false;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_set_configured(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char device_ctrl;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set configured\n",
+				__func__);
+		return retval;
+	}
+
+	rmi4_data->no_sleep_setting = device_ctrl & NO_SLEEP_ON;
+	device_ctrl |= CONFIGURED;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set configured\n",
+				__func__);
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_alloc_fh(struct synaptics_rmi4_fn **fhandler,
+		struct synaptics_rmi4_fn_desc *rmi_fd, int page_number)
+{
+	*fhandler = kzalloc(sizeof(**fhandler), GFP_KERNEL);
+	if (!(*fhandler))
+		return -ENOMEM;
+
+	(*fhandler)->full_addr.data_base =
+			(rmi_fd->data_base_addr |
+			(page_number << 8));
+	(*fhandler)->full_addr.ctrl_base =
+			(rmi_fd->ctrl_base_addr |
+			(page_number << 8));
+	(*fhandler)->full_addr.cmd_base =
+			(rmi_fd->cmd_base_addr |
+			(page_number << 8));
+	(*fhandler)->full_addr.query_base =
+			(rmi_fd->query_base_addr |
+			(page_number << 8));
+
+	return 0;
+}
+
+static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char page_number;
+	unsigned char intr_count;
+	unsigned char *f01_query;
+	unsigned short pdt_entry_addr;
+	bool f01found;
+	bool f35found;
+	bool was_in_bl_mode;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+rescan_pdt:
+	f01found = false;
+	f35found = false;
+	was_in_bl_mode = false;
+	intr_count = 0;
+	INIT_LIST_HEAD(&rmi->support_fn_list);
+
+	/* Scan the page description tables of the pages to service */
+	for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) {
+		for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END;
+				pdt_entry_addr -= PDT_ENTRY_SIZE) {
+			pdt_entry_addr |= (page_number << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					pdt_entry_addr,
+					(unsigned char *)&rmi_fd,
+					sizeof(rmi_fd));
+			if (retval < 0)
+				return retval;
+
+			pdt_entry_addr &= ~(MASK_8BIT << 8);
+
+			fhandler = NULL;
+
+			if (rmi_fd.fn_number == 0) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Reached end of PDT\n",
+						__func__);
+				break;
+			}
+
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: F%02x found (page %d)\n",
+					__func__, rmi_fd.fn_number,
+					page_number);
+
+			switch (rmi_fd.fn_number) {
+			case SYNAPTICS_RMI4_F01:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				f01found = true;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f01_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0)
+					return retval;
+
+				retval = synaptics_rmi4_check_status(rmi4_data,
+						&was_in_bl_mode);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to check status\n",
+							__func__);
+					return retval;
+				}
+
+				if (was_in_bl_mode) {
+					kfree(fhandler);
+					fhandler = NULL;
+					goto rescan_pdt;
+				}
+
+				if (rmi4_data->flash_prog_mode)
+					goto flash_prog_mode;
+
+				break;
+			case SYNAPTICS_RMI4_F11:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f11_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0)
+					return retval;
+				break;
+			case SYNAPTICS_RMI4_F12:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f12_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0)
+					return retval;
+				break;
+			case SYNAPTICS_RMI4_F1A:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f1a_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0) {
+#ifdef IGNORE_FN_INIT_FAILURE
+					kfree(fhandler);
+					fhandler = NULL;
+#else
+					return retval;
+#endif
+				}
+				break;
+#ifdef USE_DATA_SERVER
+			case SYNAPTICS_RMI4_F21:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				fhandler->fn_number = rmi_fd.fn_number;
+				fhandler->num_of_data_sources =
+						rmi_fd.intr_src_count;
+
+				synaptics_rmi4_set_intr_mask(fhandler, &rmi_fd,
+						intr_count);
+				break;
+#endif
+			case SYNAPTICS_RMI4_F35:
+				f35found = true;
+				break;
+#ifdef F51_DISCRETE_FORCE
+			case SYNAPTICS_RMI4_F51:
+				rmi4_data->f51_query_base_addr =
+						rmi_fd.query_base_addr |
+						(page_number << 8);
+				break;
+#endif
+			}
+
+			/* Accumulate the interrupt count */
+			intr_count += rmi_fd.intr_src_count;
+
+			if (fhandler && rmi_fd.intr_src_count) {
+				list_add_tail(&fhandler->link,
+						&rmi->support_fn_list);
+			}
+		}
+	}
+
+	if (!f01found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find F01\n",
+				__func__);
+		if (!f35found) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to find F35\n",
+					__func__);
+			return -EINVAL;
+		} else {
+			pr_notice("%s: In microbootloader mode\n",
+					__func__);
+			return 0;
+		}
+	}
+
+flash_prog_mode:
+	rmi4_data->num_of_intr_regs = (intr_count + 7) / 8;
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Number of interrupt registers = %d\n",
+			__func__, rmi4_data->num_of_intr_regs);
+
+	f01_query = kmalloc(F01_STD_QUERY_LEN, GFP_KERNEL);
+	if (!f01_query) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for f01_query\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_query_base_addr,
+			f01_query,
+			F01_STD_QUERY_LEN);
+	if (retval < 0) {
+		kfree(f01_query);
+		return retval;
+	}
+
+	/* RMI Version 4.0 currently supported */
+	rmi->version_major = 4;
+	rmi->version_minor = 0;
+
+	rmi->manufacturer_id = f01_query[0];
+	rmi->product_props = f01_query[1];
+	rmi->product_info[0] = f01_query[2];
+	rmi->product_info[1] = f01_query[3];
+	retval = secure_memcpy(rmi->product_id_string,
+			sizeof(rmi->product_id_string),
+			&f01_query[11],
+			F01_STD_QUERY_LEN - 11,
+			PRODUCT_ID_SIZE);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy product ID string\n",
+				__func__);
+	}
+
+	kfree(f01_query);
+
+	if (rmi->manufacturer_id != 1) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Non-Synaptics device found, manufacturer ID = %d\n",
+				__func__, rmi->manufacturer_id);
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_query_base_addr + F01_BUID_ID_OFFSET,
+			rmi->build_id,
+			sizeof(rmi->build_id));
+	if (retval < 0)
+		return retval;
+
+	rmi4_data->firmware_id = (unsigned int)rmi->build_id[0] +
+			(unsigned int)rmi->build_id[1] * 0x100 +
+			(unsigned int)rmi->build_id[2] * 0x10000;
+
+	memset(rmi4_data->intr_mask, 0x00, sizeof(rmi4_data->intr_mask));
+
+	/*
+	 * Map out the interrupt bit masks for the interrupt sources
+	 * from the registered function handlers.
+	 */
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				rmi4_data->intr_mask[fhandler->intr_reg_num] |=
+						fhandler->intr_mask;
+			}
+		}
+	}
+
+	if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture)
+		rmi4_data->enable_wakeup_gesture = WAKEUP_GESTURE;
+	else
+		rmi4_data->enable_wakeup_gesture = false;
+
+	synaptics_rmi4_set_configured(rmi4_data);
+
+	return 0;
+}
+
+static int synaptics_rmi4_gpio_setup(int gpio, bool config, int dir, int state)
+{
+	int retval = 0;
+	unsigned char buf[16];
+
+	if (config) {
+		snprintf(buf, PAGE_SIZE, "dsx_gpio_%u\n", gpio);
+
+		retval = gpio_request(gpio, buf);
+		if (retval) {
+			pr_err("%s: Failed to get gpio %d (code: %d)",
+					__func__, gpio, retval);
+			return retval;
+		}
+
+		if (dir == 0)
+			retval = gpio_direction_input(gpio);
+		else
+			retval = gpio_direction_output(gpio, state);
+		if (retval) {
+			pr_err("%s: Failed to set gpio %d direction",
+					__func__, gpio);
+			return retval;
+		}
+	} else {
+		gpio_free(gpio);
+	}
+
+	return retval;
+}
+
+static void synaptics_rmi4_set_params(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char ii;
+	struct synaptics_rmi4_f1a_handle *f1a;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_POSITION_X, 0,
+			rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_POSITION_Y, 0,
+			rmi4_data->sensor_max_y, 0, 0);
+#ifdef REPORT_2D_W
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_TOUCH_MAJOR, 0,
+			rmi4_data->max_touch_width, 0, 0);
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_TOUCH_MINOR, 0,
+			rmi4_data->max_touch_width, 0, 0);
+#endif
+
+	rmi4_data->input_settings.sensor_max_x = rmi4_data->sensor_max_x;
+	rmi4_data->input_settings.sensor_max_y = rmi4_data->sensor_max_y;
+	rmi4_data->input_settings.max_touch_width = rmi4_data->max_touch_width;
+
+#ifdef REPORT_2D_PRESSURE
+	if (rmi4_data->report_pressure) {
+		input_set_abs_params(rmi4_data->input_dev,
+				ABS_MT_PRESSURE, rmi4_data->force_min,
+				rmi4_data->force_max, 0, 0);
+
+		rmi4_data->input_settings.force_min = rmi4_data->force_min;
+		rmi4_data->input_settings.force_max = rmi4_data->force_max;
+	}
+#elif defined(F51_DISCRETE_FORCE)
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_PRESSURE, 0,
+			FORCE_LEVEL_MAX, 0, 0);
+#endif
+
+#ifdef TYPE_B_PROTOCOL
+#ifdef KERNEL_ABOVE_3_6
+	input_mt_init_slots(rmi4_data->input_dev,
+			rmi4_data->num_of_fingers, INPUT_MT_DIRECT);
+#else
+	input_mt_init_slots(rmi4_data->input_dev,
+			rmi4_data->num_of_fingers);
+#endif
+#endif
+
+	rmi4_data->input_settings.num_of_fingers = rmi4_data->num_of_fingers;
+
+	f1a = NULL;
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
+				f1a = fhandler->data;
+		}
+	}
+
+	if (f1a) {
+		for (ii = 0; ii < f1a->valid_button_count; ii++) {
+			set_bit(f1a->button_map[ii],
+					rmi4_data->input_dev->keybit);
+			input_set_capability(rmi4_data->input_dev,
+					EV_KEY, f1a->button_map[ii]);
+		}
+
+		rmi4_data->input_settings.valid_button_count =
+				f1a->valid_button_count;
+	}
+
+	if (vir_button_map->nbuttons) {
+		for (ii = 0; ii < vir_button_map->nbuttons; ii++) {
+			set_bit(vir_button_map->map[ii * 5],
+					rmi4_data->input_dev->keybit);
+			input_set_capability(rmi4_data->input_dev,
+					EV_KEY, vir_button_map->map[ii * 5]);
+		}
+	}
+
+	if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture) {
+		set_bit(KEY_WAKEUP, rmi4_data->input_dev->keybit);
+		input_set_capability(rmi4_data->input_dev, EV_KEY, KEY_WAKEUP);
+	}
+}
+
+static int synaptics_rmi4_set_input_dev(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+				rmi4_data->hw_if->board_data;
+
+	rmi4_data->input_dev = input_allocate_device();
+	if (rmi4_data->input_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate input device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_input_device;
+	}
+
+	retval = synaptics_rmi4_query_device(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to query device\n",
+				__func__);
+		goto err_query_device;
+	}
+
+	rmi4_data->input_dev->name = PLATFORM_DRIVER_NAME;
+	rmi4_data->input_dev->phys = INPUT_PHYS_NAME;
+	rmi4_data->input_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	rmi4_data->input_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	rmi4_data->input_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(rmi4_data->input_dev, rmi4_data);
+
+	set_bit(EV_SYN, rmi4_data->input_dev->evbit);
+	set_bit(EV_KEY, rmi4_data->input_dev->evbit);
+	set_bit(EV_ABS, rmi4_data->input_dev->evbit);
+	set_bit(BTN_TOUCH, rmi4_data->input_dev->keybit);
+	set_bit(BTN_TOOL_FINGER, rmi4_data->input_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, rmi4_data->input_dev->propbit);
+#endif
+
+	if (bdata->max_y_for_2d >= 0)
+		rmi4_data->sensor_max_y = bdata->max_y_for_2d;
+
+	synaptics_rmi4_set_params(rmi4_data);
+
+	retval = input_register_device(rmi4_data->input_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register input device\n",
+				__func__);
+		goto err_register_input;
+	}
+
+	rmi4_data->input_settings.stylus_enable = rmi4_data->stylus_enable;
+	rmi4_data->input_settings.eraser_enable = rmi4_data->eraser_enable;
+
+	if (!rmi4_data->stylus_enable)
+		return 0;
+
+	rmi4_data->stylus_dev = input_allocate_device();
+	if (rmi4_data->stylus_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate stylus device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_stylus_device;
+	}
+
+	rmi4_data->stylus_dev->name = STYLUS_DRIVER_NAME;
+	rmi4_data->stylus_dev->phys = STYLUS_PHYS_NAME;
+	rmi4_data->stylus_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	rmi4_data->stylus_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	rmi4_data->stylus_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(rmi4_data->stylus_dev, rmi4_data);
+
+	set_bit(EV_KEY, rmi4_data->stylus_dev->evbit);
+	set_bit(EV_ABS, rmi4_data->stylus_dev->evbit);
+	set_bit(BTN_TOUCH, rmi4_data->stylus_dev->keybit);
+	set_bit(BTN_TOOL_PEN, rmi4_data->stylus_dev->keybit);
+	if (rmi4_data->eraser_enable)
+		set_bit(BTN_TOOL_RUBBER, rmi4_data->stylus_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, rmi4_data->stylus_dev->propbit);
+#endif
+
+	input_set_abs_params(rmi4_data->stylus_dev, ABS_X, 0,
+			rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(rmi4_data->stylus_dev, ABS_Y, 0,
+			rmi4_data->sensor_max_y, 0, 0);
+
+	retval = input_register_device(rmi4_data->stylus_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register stylus device\n",
+				__func__);
+		goto err_register_stylus;
+	}
+
+	return 0;
+
+err_register_stylus:
+	rmi4_data->stylus_dev = NULL;
+
+err_stylus_device:
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+
+err_register_input:
+err_query_device:
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_free_device(rmi4_data->input_dev);
+
+err_input_device:
+	return retval;
+}
+
+static int synaptics_rmi4_set_gpio(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	retval = synaptics_rmi4_gpio_setup(
+			bdata->irq_gpio,
+			true, 0, 0);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to configure attention GPIO\n",
+				__func__);
+		goto err_gpio_irq;
+	}
+
+	if (bdata->power_gpio >= 0) {
+		retval = synaptics_rmi4_gpio_setup(
+				bdata->power_gpio,
+				true, 1, !bdata->power_on_state);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to configure power GPIO\n",
+					__func__);
+			goto err_gpio_power;
+		}
+	}
+
+	if (bdata->reset_gpio >= 0) {
+		retval = synaptics_rmi4_gpio_setup(
+				bdata->reset_gpio,
+				true, 1, !bdata->reset_on_state);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to configure reset GPIO\n",
+					__func__);
+			goto err_gpio_reset;
+		}
+	}
+
+	if (bdata->power_gpio >= 0) {
+		gpio_set_value(bdata->power_gpio, bdata->power_on_state);
+		msleep(bdata->power_delay_ms);
+	}
+
+	if (bdata->reset_gpio >= 0) {
+		gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+		msleep(bdata->reset_active_ms);
+		gpio_set_value(bdata->reset_gpio, !bdata->reset_on_state);
+		msleep(bdata->reset_delay_ms);
+	}
+
+	return 0;
+
+err_gpio_reset:
+	if (bdata->power_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+err_gpio_power:
+	synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+err_gpio_irq:
+	return retval;
+}
+
+static int synaptics_dsx_pinctrl_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	/* Get pinctrl if target uses pinctrl */
+	rmi4_data->ts_pinctrl = devm_pinctrl_get((rmi4_data->pdev->dev.parent));
+	if (IS_ERR_OR_NULL(rmi4_data->ts_pinctrl)) {
+		retval = PTR_ERR(rmi4_data->ts_pinctrl);
+		dev_err(rmi4_data->pdev->dev.parent,
+			"Target does not use pinctrl %d\n", retval);
+		goto err_pinctrl_get;
+	}
+
+	rmi4_data->pinctrl_state_active
+		= pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_active");
+	if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_active)) {
+		retval = PTR_ERR(rmi4_data->pinctrl_state_active);
+		dev_err(rmi4_data->pdev->dev.parent,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_ACTIVE, retval);
+		goto err_pinctrl_lookup;
+	}
+
+	rmi4_data->pinctrl_state_suspend
+		= pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_suspend");
+	if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_suspend)) {
+		retval = PTR_ERR(rmi4_data->pinctrl_state_suspend);
+		dev_err(rmi4_data->pdev->dev.parent,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_SUSPEND, retval);
+		goto err_pinctrl_lookup;
+	}
+
+	rmi4_data->pinctrl_state_release
+		= pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_release");
+	if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+		retval = PTR_ERR(rmi4_data->pinctrl_state_release);
+		dev_err(rmi4_data->pdev->dev.parent,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_RELEASE, retval);
+	}
+
+	return 0;
+
+err_pinctrl_lookup:
+	devm_pinctrl_put(rmi4_data->ts_pinctrl);
+err_pinctrl_get:
+	rmi4_data->ts_pinctrl = NULL;
+	return retval;
+}
+
+
+static int synaptics_rmi4_get_reg(struct synaptics_rmi4_data *rmi4_data,
+		bool get)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (!get) {
+		retval = 0;
+		goto regulator_put;
+	}
+
+	if ((bdata->pwr_reg_name != NULL) && (*bdata->pwr_reg_name != 0)) {
+		rmi4_data->pwr_reg = regulator_get(rmi4_data->pdev->dev.parent,
+				bdata->pwr_reg_name);
+		if (IS_ERR(rmi4_data->pwr_reg)) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to get power regulator\n",
+					__func__);
+			retval = PTR_ERR(rmi4_data->pwr_reg);
+			goto regulator_put;
+		}
+	}
+
+	retval = regulator_set_load(rmi4_data->pwr_reg,
+		20000);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to set regulator current avdd\n",
+				__func__);
+		goto regulator_put;
+	}
+
+	retval = regulator_set_voltage(rmi4_data->pwr_reg,
+			3000000,
+			3008000);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set regulator voltage avdd\n",
+				__func__);
+		goto regulator_put;
+	}
+
+	if ((bdata->bus_reg_name != NULL) && (*bdata->bus_reg_name != 0)) {
+		rmi4_data->bus_reg = regulator_get(rmi4_data->pdev->dev.parent,
+				bdata->bus_reg_name);
+		if (IS_ERR(rmi4_data->bus_reg)) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to get bus pullup regulator\n",
+					__func__);
+			retval = PTR_ERR(rmi4_data->bus_reg);
+			goto regulator_put;
+		}
+	}
+
+	retval = regulator_set_load(rmi4_data->bus_reg,
+		62000);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set regulator current vdd\n",
+				__func__);
+		goto regulator_put;
+	}
+
+	retval = regulator_set_voltage(rmi4_data->bus_reg,
+			1800000,
+			1800000);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set regulator voltage avdd\n",
+				__func__);
+		goto regulator_put;
+	}
+
+	return 0;
+
+regulator_put:
+	if (rmi4_data->pwr_reg) {
+		regulator_put(rmi4_data->pwr_reg);
+		rmi4_data->pwr_reg = NULL;
+	}
+
+	if (rmi4_data->bus_reg) {
+		regulator_put(rmi4_data->bus_reg);
+		rmi4_data->bus_reg = NULL;
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_enable_reg(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (!enable) {
+		retval = 0;
+		goto disable_pwr_reg;
+	}
+
+	if (rmi4_data->bus_reg && rmi4_data->vdd_status == 0) {
+		retval = regulator_enable(rmi4_data->bus_reg);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to enable bus pullup regulator\n",
+					__func__);
+			goto exit;
+		}
+		rmi4_data->vdd_status = 1;
+	}
+
+	if (rmi4_data->pwr_reg && rmi4_data->avdd_status == 0) {
+		retval = regulator_enable(rmi4_data->pwr_reg);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to enable power regulator\n",
+					__func__);
+			goto disable_bus_reg;
+		}
+		rmi4_data->avdd_status = 1;
+		msleep(bdata->power_delay_ms);
+	}
+
+	return 0;
+
+disable_pwr_reg:
+	if (rmi4_data->pwr_reg && rmi4_data->avdd_status == 1) {
+		regulator_disable(rmi4_data->pwr_reg);
+		rmi4_data->avdd_status = 0;
+	}
+
+disable_bus_reg:
+	if (rmi4_data->bus_reg && rmi4_data->vdd_status == 1) {
+		regulator_disable(rmi4_data->bus_reg);
+		rmi4_data->vdd_status = 0;
+	}
+
+exit:
+	return retval;
+}
+
+static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char ii;
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+#ifdef TYPE_B_PROTOCOL
+	for (ii = 0; ii < rmi4_data->num_of_fingers; ii++) {
+		input_mt_slot(rmi4_data->input_dev, ii);
+		input_mt_report_slot_state(rmi4_data->input_dev,
+				MT_TOOL_FINGER, 0);
+	}
+#endif
+	input_report_key(rmi4_data->input_dev,
+			BTN_TOUCH, 0);
+	input_report_key(rmi4_data->input_dev,
+			BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+	input_mt_sync(rmi4_data->input_dev);
+#endif
+	input_sync(rmi4_data->input_dev);
+
+	if (rmi4_data->stylus_enable) {
+		input_report_key(rmi4_data->stylus_dev,
+				BTN_TOUCH, 0);
+		input_report_key(rmi4_data->stylus_dev,
+				BTN_TOOL_PEN, 0);
+		if (rmi4_data->eraser_enable) {
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOOL_RUBBER, 0);
+		}
+		input_sync(rmi4_data->stylus_dev);
+	}
+
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	rmi4_data->fingers_on_2d = false;
+
+	return 0;
+}
+
+static int synaptics_rmi4_sw_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char command = 0x01;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_cmd_base_addr,
+			&command,
+			sizeof(command));
+	if (retval < 0)
+		return retval;
+
+	msleep(rmi4_data->hw_if->board_data->reset_delay_ms);
+
+	if (rmi4_data->hw_if->ui_hw_init) {
+		retval = rmi4_data->hw_if->ui_hw_init(rmi4_data);
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_do_rebuild(struct synaptics_rmi4_data *rmi4_data)
+{
+	struct synaptics_rmi4_input_settings *settings;
+
+	settings = &(rmi4_data->input_settings);
+
+	if (settings->num_of_fingers != rmi4_data->num_of_fingers)
+		return 1;
+
+	if (settings->valid_button_count != rmi4_data->valid_button_count)
+		return 1;
+
+	if (settings->max_touch_width != rmi4_data->max_touch_width)
+		return 1;
+
+	if (settings->sensor_max_x != rmi4_data->sensor_max_x)
+		return 1;
+
+	if (settings->sensor_max_y != rmi4_data->sensor_max_y)
+		return 1;
+
+	if (settings->force_min != rmi4_data->force_min)
+		return 1;
+
+	if (settings->force_max != rmi4_data->force_max)
+		return 1;
+
+	if (settings->stylus_enable != rmi4_data->stylus_enable)
+		return 1;
+
+	if (settings->eraser_enable != rmi4_data->eraser_enable)
+		return 1;
+
+	return 0;
+}
+
+static void synaptics_rmi4_rebuild_work(struct work_struct *work)
+{
+	int retval;
+	unsigned char attr_count;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct delayed_work *delayed_work =
+			container_of(work, struct delayed_work, work);
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(delayed_work, struct synaptics_rmi4_data,
+			rb_work);
+
+	mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+	mutex_lock(&exp_data.mutex);
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->remove != NULL)
+				exp_fhandler->exp_fn->remove(rmi4_data);
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	synaptics_rmi4_free_fingers(rmi4_data);
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+	if (rmi4_data->stylus_enable) {
+		input_unregister_device(rmi4_data->stylus_dev);
+		rmi4_data->stylus_dev = NULL;
+	}
+
+	retval = synaptics_rmi4_set_input_dev(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up input device\n",
+				__func__);
+		goto exit;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			goto exit;
+		}
+	}
+
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->init != NULL)
+				exp_fhandler->exp_fn->init(rmi4_data);
+	}
+
+exit:
+	synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+	mutex_unlock(&exp_data.mutex);
+
+	mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+}
+
+static int synaptics_rmi4_reinit_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+	synaptics_rmi4_free_fingers(rmi4_data);
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->fn_number == SYNAPTICS_RMI4_F12) {
+				synaptics_rmi4_f12_set_enables(rmi4_data, 0);
+				break;
+			}
+		}
+	}
+
+	retval = synaptics_rmi4_int_enable(rmi4_data, true);
+	if (retval < 0)
+		goto exit;
+
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->reinit != NULL)
+				exp_fhandler->exp_fn->reinit(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	synaptics_rmi4_set_configured(rmi4_data);
+
+	retval = 0;
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+	return retval;
+}
+
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
+		bool rebuild)
+{
+	int retval;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+
+	mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+	retval = synaptics_rmi4_sw_reset(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+		goto exit;
+	}
+
+	synaptics_rmi4_free_fingers(rmi4_data);
+
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+
+	retval = synaptics_rmi4_query_device(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to query device\n",
+				__func__);
+		goto exit;
+	}
+
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->reset != NULL)
+				exp_fhandler->exp_fn->reset(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	retval = 0;
+
+exit:
+	synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+	mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+
+	if (rebuild && synaptics_rmi4_do_rebuild(rmi4_data)) {
+		queue_delayed_work(rmi4_data->rb_workqueue,
+				&rmi4_data->rb_work,
+				msecs_to_jiffies(REBUILD_WORK_DELAY_MS));
+	}
+
+	return retval;
+}
+
+#ifdef FB_READY_RESET
+static void synaptics_rmi4_reset_work(struct work_struct *work)
+{
+	int retval = 0;
+	unsigned int timeout;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(work, struct synaptics_rmi4_data,
+			reset_work);
+
+	timeout = FB_READY_TIMEOUT_S * 1000 / FB_READY_WAIT_MS + 1;
+
+	while (!rmi4_data->fb_ready) {
+		msleep(FB_READY_WAIT_MS);
+		timeout--;
+		if (timeout == 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Timed out waiting for FB ready\n",
+					__func__);
+			goto err;
+		}
+	}
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	retval = synaptics_rmi4_reset_device(rmi4_data, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+err:
+
+	dev_err(rmi4_data->pdev->dev.parent,
+		"%s: Timed out waiting for FB ready\n",
+		__func__);
+
+}
+#endif
+
+static int synaptics_rmi4_sleep_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	unsigned char device_ctrl;
+	unsigned char no_sleep_setting = rmi4_data->no_sleep_setting;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read device control\n",
+				__func__);
+		return retval;
+	}
+
+	device_ctrl = device_ctrl & ~MASK_3BIT;
+	if (enable)
+		device_ctrl = device_ctrl | SENSOR_SLEEP;
+	else
+		device_ctrl = device_ctrl | no_sleep_setting | NORMAL_OPERATION;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write device control\n",
+				__func__);
+		return retval;
+	}
+
+	rmi4_data->sensor_sleep = enable;
+
+	return retval;
+}
+
+static void synaptics_rmi4_exp_fn_work(struct work_struct *work)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler_temp;
+	struct synaptics_rmi4_data *rmi4_data = exp_data.rmi4_data;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+	mutex_lock(&rmi4_data->rmi4_reset_mutex);
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry_safe(exp_fhandler,
+				exp_fhandler_temp,
+				&exp_data.list,
+				link) {
+			if ((exp_fhandler->exp_fn->init != NULL) &&
+					exp_fhandler->insert) {
+				exp_fhandler->exp_fn->init(rmi4_data);
+				exp_fhandler->insert = false;
+			} else if ((exp_fhandler->exp_fn->remove != NULL) &&
+					exp_fhandler->remove) {
+				exp_fhandler->exp_fn->remove(rmi4_data);
+				list_del(&exp_fhandler->link);
+				kfree(exp_fhandler);
+			}
+		}
+	}
+	mutex_unlock(&exp_data.mutex);
+	mutex_unlock(&rmi4_data->rmi4_reset_mutex);
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+}
+
+void synaptics_rmi4_new_function(struct synaptics_rmi4_exp_fn *exp_fn,
+		bool insert)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+
+	if (!exp_data.initialized) {
+		mutex_init(&exp_data.mutex);
+		INIT_LIST_HEAD(&exp_data.list);
+		exp_data.initialized = true;
+	}
+
+	mutex_lock(&exp_data.mutex);
+	if (insert) {
+		exp_fhandler = kzalloc(sizeof(*exp_fhandler), GFP_KERNEL);
+		if (!exp_fhandler) {
+			pr_err("%s: Failed to alloc mem for expansion function\n",
+					__func__);
+			goto exit;
+		}
+		exp_fhandler->exp_fn = exp_fn;
+		exp_fhandler->insert = true;
+		exp_fhandler->remove = false;
+		list_add_tail(&exp_fhandler->link, &exp_data.list);
+	} else if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link) {
+			if (exp_fhandler->exp_fn->fn_type == exp_fn->fn_type) {
+				exp_fhandler->insert = false;
+				exp_fhandler->remove = true;
+				goto exit;
+			}
+		}
+	}
+
+exit:
+	mutex_unlock(&exp_data.mutex);
+
+	if (exp_data.queue_work) {
+		queue_delayed_work(exp_data.workqueue,
+				&exp_data.work,
+				msecs_to_jiffies(EXP_FN_WORK_DELAY_MS));
+	}
+}
+EXPORT_SYMBOL(synaptics_rmi4_new_function);
+
+static int synaptics_rmi4_probe(struct platform_device *pdev)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data;
+	const struct synaptics_dsx_hw_interface *hw_if;
+	const struct synaptics_dsx_board_data *bdata;
+
+	hw_if = pdev->dev.platform_data;
+	if (!hw_if) {
+		dev_err(&pdev->dev,
+				"%s: No hardware interface found\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	bdata = hw_if->board_data;
+	if (!bdata) {
+		dev_err(&pdev->dev,
+				"%s: No board data found\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	rmi4_data = kzalloc(sizeof(*rmi4_data), GFP_KERNEL);
+	if (!rmi4_data) {
+		dev_err(&pdev->dev,
+				"%s: Failed to alloc mem for rmi4_data\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	rmi4_data->pdev = pdev;
+	rmi4_data->current_page = MASK_8BIT;
+	rmi4_data->hw_if = hw_if;
+	rmi4_data->suspend = false;
+	rmi4_data->irq_enabled = false;
+	rmi4_data->fingers_on_2d = false;
+
+	rmi4_data->reset_device = synaptics_rmi4_reset_device;
+	rmi4_data->irq_enable = synaptics_rmi4_irq_enable;
+	rmi4_data->sleep_enable = synaptics_rmi4_sleep_enable;
+	rmi4_data->report_touch = synaptics_rmi4_report_touch;
+
+	mutex_init(&(rmi4_data->rmi4_reset_mutex));
+	mutex_init(&(rmi4_data->rmi4_report_mutex));
+	mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex));
+	mutex_init(&(rmi4_data->rmi4_exp_init_mutex));
+	mutex_init(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	platform_set_drvdata(pdev, rmi4_data);
+
+	vir_button_map = bdata->vir_button_map;
+
+	rmi4_data->initialized = false;
+	rmi4_data->fb_notifier.notifier_call =
+					synaptics_rmi4_dsi_panel_notifier_cb;
+	if (active_panel) {
+		retval = drm_panel_notifier_register(active_panel,
+				&rmi4_data->fb_notifier);
+		if (retval < 0) {
+			dev_err(&pdev->dev,
+					"%s: Failed to register fb notifier client\n",
+					__func__);
+			goto err_drm_reg;
+		}
+	}
+	rmi4_data->rmi4_probe_wq = create_singlethread_workqueue(
+						"Synaptics_rmi4_probe_wq");
+	if (!rmi4_data->rmi4_probe_wq) {
+		dev_err(&pdev->dev,
+				"%s: Failed to create probe workqueue\n",
+				__func__);
+		goto err_probe_wq;
+	}
+	INIT_WORK(&rmi4_data->rmi4_probe_work, synaptics_rmi4_defer_probe);
+	queue_work(rmi4_data->rmi4_probe_wq, &rmi4_data->rmi4_probe_work);
+
+	return retval;
+
+err_probe_wq:
+	if (active_panel)
+		drm_panel_notifier_unregister(active_panel,
+				&rmi4_data->fb_notifier);
+
+err_drm_reg:
+	kfree(rmi4_data);
+
+	return retval;
+}
+
+static void synaptics_rmi4_defer_probe(struct work_struct *work)
+{
+	int retval;
+	unsigned char attr_count;
+	struct synaptics_rmi4_data *rmi4_data = container_of(work,
+				struct synaptics_rmi4_data, rmi4_probe_work);
+	struct platform_device *pdev;
+	const struct synaptics_dsx_hw_interface *hw_if;
+	const struct synaptics_dsx_board_data *bdata;
+
+	pdev = rmi4_data->pdev;
+	hw_if = rmi4_data->hw_if;
+	bdata = hw_if->board_data;
+
+	init_completion(&rmi4_data->drm_init_done);
+	retval = wait_for_completion_interruptible(&rmi4_data->drm_init_done);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Wait for DRM init was interrupted\n",
+				__func__);
+		goto err_drm_init_wait;
+	}
+
+	retval = synaptics_rmi4_get_reg(rmi4_data, true);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to get regulators\n",
+				__func__);
+		goto err_get_reg;
+	}
+
+	retval = synaptics_rmi4_enable_reg(rmi4_data, true);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to enable regulators\n",
+				__func__);
+		goto err_enable_reg;
+	}
+
+	retval = synaptics_rmi4_set_gpio(rmi4_data);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to set up GPIO's\n",
+				__func__);
+		goto err_set_gpio;
+	}
+
+	retval = synaptics_dsx_pinctrl_init(rmi4_data);
+	if (!retval && rmi4_data->ts_pinctrl) {
+		/*
+		 * Pinctrl handle is optional.
+		 * If pinctrl handle is found let pins to be
+		 * configured in active state. If not found continue
+		 * further without error.
+		 */
+		retval = pinctrl_select_state(rmi4_data->ts_pinctrl,
+				rmi4_data->pinctrl_state_active);
+		if (retval < 0) {
+			dev_err(&pdev->dev,
+				"%s: Failed to select %s pinstate %d\n",
+				__func__, PINCTRL_STATE_ACTIVE, retval);
+		}
+	}
+
+	if (hw_if->ui_hw_init) {
+		retval = hw_if->ui_hw_init(rmi4_data);
+		if (retval < 0) {
+			dev_err(&pdev->dev,
+					"%s: Failed to initialize hardware interface\n",
+					__func__);
+			goto err_ui_hw_init;
+		}
+	}
+
+	retval = synaptics_rmi4_set_input_dev(rmi4_data);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to set up input device\n",
+				__func__);
+		goto err_set_input_dev;
+	}
+
+#ifdef USE_EARLYSUSPEND
+	rmi4_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+	rmi4_data->early_suspend.suspend = synaptics_rmi4_early_suspend;
+	rmi4_data->early_suspend.resume = synaptics_rmi4_late_resume;
+	register_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+	if (!exp_data.initialized) {
+		mutex_init(&exp_data.mutex);
+		INIT_LIST_HEAD(&exp_data.list);
+		exp_data.initialized = true;
+	}
+
+	rmi4_data->irq = gpio_to_irq(bdata->irq_gpio);
+
+	retval = synaptics_rmi4_irq_enable(rmi4_data, true, false);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to enable attention interrupt\n",
+				__func__);
+		goto err_enable_irq;
+	}
+
+	if (vir_button_map->nbuttons) {
+		rmi4_data->board_prop_dir = kobject_create_and_add(
+				"board_properties", NULL);
+		if (!rmi4_data->board_prop_dir) {
+			dev_err(&pdev->dev,
+					"%s: Failed to create board_properties directory\n",
+					__func__);
+			goto err_virtual_buttons;
+		} else {
+			retval = sysfs_create_file(rmi4_data->board_prop_dir,
+					&virtual_key_map_attr.attr);
+			if (retval < 0) {
+				dev_err(&pdev->dev,
+						"%s: Failed to create virtual key map file\n",
+						__func__);
+				goto err_virtual_buttons;
+			}
+		}
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(&pdev->dev,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			goto err_sysfs;
+		}
+	}
+
+#ifdef USE_DATA_SERVER
+	memset(&interrupt_signal, 0, sizeof(interrupt_signal));
+	interrupt_signal.si_signo = SIGIO;
+	interrupt_signal.si_code = SI_USER;
+#endif
+
+	rmi4_data->rb_workqueue =
+			create_singlethread_workqueue("dsx_rebuild_workqueue");
+	INIT_DELAYED_WORK(&rmi4_data->rb_work, synaptics_rmi4_rebuild_work);
+
+	exp_data.workqueue = create_singlethread_workqueue("dsx_exp_workqueue");
+	INIT_DELAYED_WORK(&exp_data.work, synaptics_rmi4_exp_fn_work);
+	exp_data.rmi4_data = rmi4_data;
+	exp_data.queue_work = true;
+	queue_delayed_work(exp_data.workqueue,
+			&exp_data.work,
+			0);
+
+#ifdef FB_READY_RESET
+	rmi4_data->reset_workqueue =
+			create_singlethread_workqueue("dsx_reset_workqueue");
+	INIT_WORK(&rmi4_data->reset_work, synaptics_rmi4_reset_work);
+	queue_work(rmi4_data->reset_workqueue, &rmi4_data->reset_work);
+#endif
+	rmi4_data->initialized = true;
+
+	return;
+
+err_sysfs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+err_virtual_buttons:
+	if (rmi4_data->board_prop_dir) {
+		sysfs_remove_file(rmi4_data->board_prop_dir,
+				&virtual_key_map_attr.attr);
+		kobject_put(rmi4_data->board_prop_dir);
+	}
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+err_enable_irq:
+#ifdef USE_EARLYSUSPEND
+	unregister_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+	if (rmi4_data->stylus_enable) {
+		input_unregister_device(rmi4_data->stylus_dev);
+		rmi4_data->stylus_dev = NULL;
+	}
+
+err_set_input_dev:
+	synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+	if (bdata->reset_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->reset_gpio, false, 0, 0);
+
+	if (bdata->power_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+err_ui_hw_init:
+err_set_gpio:
+	synaptics_rmi4_enable_reg(rmi4_data, false);
+
+	if (rmi4_data->ts_pinctrl) {
+		if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+			devm_pinctrl_put(rmi4_data->ts_pinctrl);
+			rmi4_data->ts_pinctrl = NULL;
+		} else {
+			if (pinctrl_select_state(
+					rmi4_data->ts_pinctrl,
+					rmi4_data->pinctrl_state_release))
+				dev_err(&pdev->dev,
+					"%s: Failed to select %s pinstate\n",
+					__func__, PINCTRL_STATE_RELEASE);
+		}
+	}
+
+err_enable_reg:
+	synaptics_rmi4_get_reg(rmi4_data, false);
+
+err_get_reg:
+err_drm_init_wait:
+	if (active_panel)
+		drm_panel_notifier_unregister(active_panel,
+				&rmi4_data->fb_notifier);
+	cancel_work_sync(&rmi4_data->rmi4_probe_work);
+	destroy_workqueue(rmi4_data->rmi4_probe_wq);
+	kfree(rmi4_data);
+}
+
+static int synaptics_rmi4_remove(struct platform_device *pdev)
+{
+	unsigned char attr_count;
+	struct synaptics_rmi4_data *rmi4_data = platform_get_drvdata(pdev);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+#ifdef FB_READY_RESET
+	cancel_work_sync(&rmi4_data->reset_work);
+	flush_workqueue(rmi4_data->reset_workqueue);
+	destroy_workqueue(rmi4_data->reset_workqueue);
+#endif
+
+	cancel_delayed_work_sync(&exp_data.work);
+	flush_workqueue(exp_data.workqueue);
+	destroy_workqueue(exp_data.workqueue);
+
+	cancel_delayed_work_sync(&rmi4_data->rb_work);
+	flush_workqueue(rmi4_data->rb_workqueue);
+	destroy_workqueue(rmi4_data->rb_workqueue);
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	if (rmi4_data->board_prop_dir) {
+		sysfs_remove_file(rmi4_data->board_prop_dir,
+				&virtual_key_map_attr.attr);
+		kobject_put(rmi4_data->board_prop_dir);
+	}
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+	if (active_panel)
+		drm_panel_notifier_unregister(active_panel,
+				&rmi4_data->fb_notifier);
+
+#ifdef USE_EARLYSUSPEND
+	unregister_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+	if (rmi4_data->stylus_enable) {
+		input_unregister_device(rmi4_data->stylus_dev);
+		rmi4_data->stylus_dev = NULL;
+	}
+
+	synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+	if (bdata->reset_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->reset_gpio, false, 0, 0);
+
+	if (bdata->power_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+	if (rmi4_data->ts_pinctrl) {
+			if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+				devm_pinctrl_put(rmi4_data->ts_pinctrl);
+				rmi4_data->ts_pinctrl = NULL;
+			} else {
+				pinctrl_select_state(
+					rmi4_data->ts_pinctrl,
+					rmi4_data->pinctrl_state_release);
+			}
+		}
+
+	synaptics_rmi4_enable_reg(rmi4_data, false);
+	synaptics_rmi4_get_reg(rmi4_data, false);
+
+	cancel_work_sync(&rmi4_data->rmi4_probe_work);
+	destroy_workqueue(rmi4_data->rmi4_probe_wq);
+
+	kfree(rmi4_data);
+
+	return 0;
+}
+
+static int synaptics_rmi4_dsi_panel_notifier_cb(struct notifier_block *self,
+		unsigned long event, void *data)
+{
+	int transition;
+	struct drm_panel_notifier *evdata = data;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(self, struct synaptics_rmi4_data,
+			fb_notifier);
+
+	if (!evdata)
+		return 0;
+
+	if (evdata && evdata->data && rmi4_data) {
+		if (event == DRM_PANEL_EARLY_EVENT_BLANK) {
+			transition = *(int *)evdata->data;
+			if (transition == DRM_PANEL_BLANK_POWERDOWN) {
+				if (rmi4_data->initialized)
+					synaptics_rmi4_suspend(
+							&rmi4_data->pdev->dev);
+				rmi4_data->fb_ready = false;
+			}
+		}
+	}
+
+	if (evdata && evdata->data && rmi4_data) {
+		if (event == DRM_PANEL_EVENT_BLANK) {
+			transition = *(int *)evdata->data;
+			if (transition == DRM_PANEL_BLANK_UNBLANK) {
+				if (rmi4_data->initialized)
+					synaptics_rmi4_resume(
+							&rmi4_data->pdev->dev);
+				else
+					complete(&rmi4_data->drm_init_done);
+				rmi4_data->fb_ready = true;
+			}
+		}
+	}
+
+	return 0;
+}
+
+#ifdef USE_EARLYSUSPEND
+static int synaptics_rmi4_early_suspend(struct early_suspend *h)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(h, struct synaptics_rmi4_data,
+			early_suspend);
+	unsigned char device_ctrl;
+
+	if (rmi4_data->stay_awake)
+		return retval;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		if (rmi4_data->no_sleep_setting) {
+			synaptics_rmi4_reg_read(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+			device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+			synaptics_rmi4_reg_write(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+		}
+		synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+		enable_irq_wake(rmi4_data->irq);
+		goto exit;
+	}
+
+#ifdef SYNA_TDDI
+	if (rmi4_data->no_sleep_setting) {
+		synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_ctrl_base_addr,
+				&device_ctrl,
+				sizeof(device_ctrl));
+		device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+		synaptics_rmi4_reg_write(rmi4_data,
+				rmi4_data->f01_ctrl_base_addr,
+				&device_ctrl,
+				sizeof(device_ctrl));
+	}
+	synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+	usleep(TDDI_LPWG_WAIT_US);
+#endif
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+	synaptics_rmi4_sleep_enable(rmi4_data, true);
+	synaptics_rmi4_free_fingers(rmi4_data);
+
+exit:
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->early_suspend != NULL)
+				exp_fhandler->exp_fn->early_suspend(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = true;
+
+	return retval;
+}
+
+static int synaptics_rmi4_late_resume(struct early_suspend *h)
+{
+#ifdef FB_READY_RESET
+	int retval;
+#endif
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(h, struct synaptics_rmi4_data,
+			early_suspend);
+
+	if (rmi4_data->stay_awake)
+		return retval;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		disable_irq_wake(rmi4_data->irq);
+		goto exit;
+	}
+
+	rmi4_data->current_page = MASK_8BIT;
+
+	if (rmi4_data->suspend) {
+		synaptics_rmi4_sleep_enable(rmi4_data, false);
+		synaptics_rmi4_irq_enable(rmi4_data, true, false);
+	}
+
+exit:
+#ifdef FB_READY_RESET
+	if (rmi4_data->suspend) {
+		retval = synaptics_rmi4_reset_device(rmi4_data, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to issue reset command\n",
+					__func__);
+		}
+	}
+#endif
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->late_resume != NULL)
+				exp_fhandler->exp_fn->late_resume(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = false;
+
+	return retval;
+}
+#endif
+
+static int synaptics_rmi4_suspend(struct device *dev)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+	unsigned char device_ctrl;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (rmi4_data->stay_awake)
+		return 0;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		if (rmi4_data->no_sleep_setting) {
+			synaptics_rmi4_reg_read(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+			device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+			synaptics_rmi4_reg_write(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+		}
+		synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+		enable_irq_wake(rmi4_data->irq);
+		goto exit;
+	}
+
+	if (!rmi4_data->suspend) {
+#ifdef SYNA_TDDI
+		if (rmi4_data->no_sleep_setting) {
+			synaptics_rmi4_reg_read(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+			device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+			synaptics_rmi4_reg_write(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+		}
+		synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+		usleep(TDDI_LPWG_WAIT_US);
+#endif
+		synaptics_rmi4_irq_enable(rmi4_data, false, false);
+		synaptics_rmi4_sleep_enable(rmi4_data, true);
+		synaptics_rmi4_free_fingers(rmi4_data);
+	}
+
+	if (bdata->reset_gpio >= 0) {
+		gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+		msleep(bdata->reset_active_ms);
+	}
+
+	synaptics_rmi4_enable_reg(rmi4_data, false);
+
+exit:
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->suspend != NULL)
+				exp_fhandler->exp_fn->suspend(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = true;
+
+	return 0;
+}
+
+static int synaptics_rmi4_resume(struct device *dev)
+{
+#ifdef FB_READY_RESET
+	int retval;
+#endif
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+	if (rmi4_data->stay_awake)
+		return 0;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		disable_irq_wake(rmi4_data->irq);
+		synaptics_rmi4_wakeup_gesture(rmi4_data, false);
+		goto exit;
+	}
+
+	synaptics_rmi4_enable_reg(rmi4_data, true);
+
+	if (bdata->reset_gpio >= 0) {
+		gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+		msleep(bdata->reset_active_ms);
+		gpio_set_value(bdata->reset_gpio, !bdata->reset_on_state);
+		msleep(bdata->reset_delay_ms);
+	}
+
+
+	rmi4_data->current_page = MASK_8BIT;
+
+	synaptics_rmi4_sleep_enable(rmi4_data, false);
+	synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+exit:
+#ifdef FB_READY_RESET
+	retval = synaptics_rmi4_reset_device(rmi4_data, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+	}
+#endif
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->resume != NULL)
+				exp_fhandler->exp_fn->resume(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = false;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = {
+	.suspend = synaptics_rmi4_suspend,
+	.resume = synaptics_rmi4_resume,
+};
+#endif
+
+static struct platform_driver synaptics_rmi4_driver = {
+	.driver = {
+		.name = PLATFORM_DRIVER_NAME,
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &synaptics_rmi4_dev_pm_ops,
+#endif
+	},
+	.probe = synaptics_rmi4_probe,
+	.remove = synaptics_rmi4_remove,
+};
+
+static int __init synaptics_rmi4_init(void)
+{
+	int retval;
+
+	retval = synaptics_rmi4_bus_init();
+	if (retval)
+		return retval;
+
+	return platform_driver_register(&synaptics_rmi4_driver);
+}
+
+static void __exit synaptics_rmi4_exit(void)
+{
+	platform_driver_unregister(&synaptics_rmi4_driver);
+
+	synaptics_rmi4_bus_exit();
+}
+
+module_init(synaptics_rmi4_init);
+module_exit(synaptics_rmi4_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Touch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
new file mode 100755
index 0000000..8f175e3
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
@@ -0,0 +1,534 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_DSX_RMI4_H_
+#define _SYNAPTICS_DSX_RMI4_H_
+
+#define SYNAPTICS_DS4 (1 << 0)
+#define SYNAPTICS_DS5 (1 << 1)
+#define SYNAPTICS_DSX_DRIVER_PRODUCT (SYNAPTICS_DS4 | SYNAPTICS_DS5)
+#define SYNAPTICS_DSX_DRIVER_VERSION 0x2070
+
+#include <linux/version.h>
+#ifdef CONFIG_FB
+#include <linux/notifier.h>
+#include <linux/fb.h>
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include <drm/drm_panel.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
+#define KERNEL_ABOVE_2_6_38
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#define KERNEL_ABOVE_3_6
+#endif
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define sstrtoul(...) kstrtoul(__VA_ARGS__)
+#else
+#define sstrtoul(...) strict_strtoul(__VA_ARGS__)
+#endif
+
+#define PDT_PROPS (0X00EF)
+#define PDT_START (0x00E9)
+#define PDT_END (0x00D0)
+#define PDT_ENTRY_SIZE (0x0006)
+#define PAGES_TO_SERVICE (10)
+#define PAGE_SELECT_LEN (2)
+#define ADDRESS_LEN (2)
+
+#define SYNAPTICS_RMI4_F01 (0x01)
+#define SYNAPTICS_RMI4_F11 (0x11)
+#define SYNAPTICS_RMI4_F12 (0x12)
+#define SYNAPTICS_RMI4_F1A (0x1A)
+#define SYNAPTICS_RMI4_F21 (0x21)
+#define SYNAPTICS_RMI4_F34 (0x34)
+#define SYNAPTICS_RMI4_F35 (0x35)
+#define SYNAPTICS_RMI4_F38 (0x38)
+#define SYNAPTICS_RMI4_F51 (0x51)
+#define SYNAPTICS_RMI4_F54 (0x54)
+#define SYNAPTICS_RMI4_F55 (0x55)
+#define SYNAPTICS_RMI4_FDB (0xDB)
+
+#define PRODUCT_INFO_SIZE 2
+#define PRODUCT_ID_SIZE 10
+#define BUILD_ID_SIZE 3
+
+#define F12_FINGERS_TO_SUPPORT 10
+#define F12_NO_OBJECT_STATUS 0x00
+#define F12_FINGER_STATUS 0x01
+#define F12_ACTIVE_STYLUS_STATUS 0x02
+#define F12_PALM_STATUS 0x03
+#define F12_HOVERING_FINGER_STATUS 0x05
+#define F12_GLOVED_FINGER_STATUS 0x06
+#define F12_NARROW_OBJECT_STATUS 0x07
+#define F12_HAND_EDGE_STATUS 0x08
+#define F12_COVER_STATUS 0x0A
+#define F12_STYLUS_STATUS 0x0B
+#define F12_ERASER_STATUS 0x0C
+#define F12_SMALL_OBJECT_STATUS 0x0D
+
+#define F12_GESTURE_DETECTION_LEN 5
+
+#define MAX_NUMBER_OF_BUTTONS 4
+#define MAX_INTR_REGISTERS 4
+
+#define MASK_16BIT 0xFFFF
+#define MASK_8BIT 0xFF
+#define MASK_7BIT 0x7F
+#define MASK_6BIT 0x3F
+#define MASK_5BIT 0x1F
+#define MASK_4BIT 0x0F
+#define MASK_3BIT 0x07
+#define MASK_2BIT 0x03
+#define MASK_1BIT 0x01
+
+#define PINCTRL_STATE_ACTIVE    "pmx_ts_active"
+#define PINCTRL_STATE_SUSPEND   "pmx_ts_suspend"
+#define PINCTRL_STATE_RELEASE   "pmx_ts_release"
+
+enum exp_fn {
+	RMI_DEV = 0,
+	RMI_FW_UPDATER,
+	RMI_TEST_REPORTING,
+	RMI_PROXIMITY,
+	RMI_ACTIVE_PEN,
+	RMI_GESTURE,
+	RMI_VIDEO,
+	RMI_DEBUG,
+	RMI_LAST,
+};
+
+extern struct drm_panel *active_panel;
+
+/*
+ * struct synaptics_rmi4_fn_desc - function descriptor fields in PDT entry
+ * @query_base_addr: base address for query registers
+ * @cmd_base_addr: base address for command registers
+ * @ctrl_base_addr: base address for control registers
+ * @data_base_addr: base address for data registers
+ * @intr_src_count: number of interrupt sources
+ * @fn_version: version of function
+ * @fn_number: function number
+ */
+struct synaptics_rmi4_fn_desc {
+	union {
+		struct {
+			unsigned char query_base_addr;
+			unsigned char cmd_base_addr;
+			unsigned char ctrl_base_addr;
+			unsigned char data_base_addr;
+			unsigned char intr_src_count:3;
+			unsigned char reserved_1:2;
+			unsigned char fn_version:2;
+			unsigned char reserved_2:1;
+			unsigned char fn_number;
+		} __packed;
+		unsigned char data[6];
+	};
+};
+
+/*
+ * synaptics_rmi4_fn_full_addr - full 16-bit base addresses
+ * @query_base: 16-bit base address for query registers
+ * @cmd_base: 16-bit base address for command registers
+ * @ctrl_base: 16-bit base address for control registers
+ * @data_base: 16-bit base address for data registers
+ */
+struct synaptics_rmi4_fn_full_addr {
+	unsigned short query_base;
+	unsigned short cmd_base;
+	unsigned short ctrl_base;
+	unsigned short data_base;
+};
+
+/*
+ * struct synaptics_rmi4_f11_extra_data - extra data of F$11
+ * @data38_offset: offset to F11_2D_DATA38 register
+ */
+struct synaptics_rmi4_f11_extra_data {
+	unsigned char data38_offset;
+};
+
+/*
+ * struct synaptics_rmi4_f12_extra_data - extra data of F$12
+ * @data1_offset: offset to F12_2D_DATA01 register
+ * @data4_offset: offset to F12_2D_DATA04 register
+ * @data15_offset: offset to F12_2D_DATA15 register
+ * @data15_size: size of F12_2D_DATA15 register
+ * @data15_data: buffer for reading F12_2D_DATA15 register
+ * @data29_offset: offset to F12_2D_DATA29 register
+ * @data29_size: size of F12_2D_DATA29 register
+ * @data29_data: buffer for reading F12_2D_DATA29 register
+ * @ctrl20_offset: offset to F12_2D_CTRL20 register
+ */
+struct synaptics_rmi4_f12_extra_data {
+	unsigned char data1_offset;
+	unsigned char data4_offset;
+	unsigned char data15_offset;
+	unsigned char data15_size;
+	unsigned char data15_data[(F12_FINGERS_TO_SUPPORT + 7) / 8];
+	unsigned char data29_offset;
+	unsigned char data29_size;
+	unsigned char data29_data[F12_FINGERS_TO_SUPPORT * 2];
+	unsigned char ctrl20_offset;
+};
+
+/*
+ * struct synaptics_rmi4_fn - RMI function handler
+ * @fn_number: function number
+ * @num_of_data_sources: number of data sources
+ * @num_of_data_points: maximum number of fingers supported
+ * @intr_reg_num: index to associated interrupt register
+ * @intr_mask: interrupt mask
+ * @full_addr: full 16-bit base addresses of function registers
+ * @link: linked list for function handlers
+ * @data_size: size of private data
+ * @data: pointer to private data
+ * @extra: pointer to extra data
+ */
+struct synaptics_rmi4_fn {
+	unsigned char fn_number;
+	unsigned char num_of_data_sources;
+	unsigned char num_of_data_points;
+	unsigned char intr_reg_num;
+	unsigned char intr_mask;
+	struct synaptics_rmi4_fn_full_addr full_addr;
+	struct list_head link;
+	int data_size;
+	void *data;
+	void *extra;
+};
+
+/*
+ * struct synaptics_rmi4_input_settings - current input settings
+ * @num_of_fingers: maximum number of fingers for 2D touch
+ * @valid_button_count: number of valid 0D buttons
+ * @max_touch_width: maximum touch width
+ * @sensor_max_x: maximum x coordinate for 2D touch
+ * @sensor_max_y: maximum y coordinate for 2D touch
+ * @force_min: minimum force value
+ * @force_max: maximum force value
+ * @stylus_enable: flag to indicate reporting of stylus data
+ * @eraser_enable: flag to indicate reporting of eraser data
+ */
+struct synaptics_rmi4_input_settings {
+	unsigned char num_of_fingers;
+	unsigned char valid_button_count;
+	unsigned char max_touch_width;
+	int sensor_max_x;
+	int sensor_max_y;
+	int force_min;
+	int force_max;
+	bool stylus_enable;
+	bool eraser_enable;
+};
+
+/*
+ * struct synaptics_rmi4_device_info - device information
+ * @version_major: RMI protocol major version number
+ * @version_minor: RMI protocol minor version number
+ * @manufacturer_id: manufacturer ID
+ * @product_props: product properties
+ * @product_info: product information
+ * @product_id_string: product ID
+ * @build_id: firmware build ID
+ * @support_fn_list: linked list for function handlers
+ */
+struct synaptics_rmi4_device_info {
+	unsigned int version_major;
+	unsigned int version_minor;
+	unsigned char manufacturer_id;
+	unsigned char product_props;
+	unsigned char product_info[PRODUCT_INFO_SIZE];
+	unsigned char product_id_string[PRODUCT_ID_SIZE + 1];
+	unsigned char build_id[BUILD_ID_SIZE];
+	struct list_head support_fn_list;
+};
+
+/*
+ * struct synaptics_rmi4_data - RMI4 device instance data
+ * @pdev: pointer to platform device
+ * @input_dev: pointer to associated input device
+ * @stylus_dev: pointer to associated stylus device
+ * @hw_if: pointer to hardware interface data
+ * @rmi4_mod_info: device information
+ * @board_prop_dir: /sys/board_properties directory for virtual key map file
+ * @pwr_reg: pointer to regulator for power control
+ * @bus_reg: pointer to regulator for bus pullup control
+ * @rmi4_reset_mutex: mutex for software reset
+ * @rmi4_report_mutex: mutex for input event reporting
+ * @rmi4_io_ctrl_mutex: mutex for communication interface I/O
+ * @rmi4_exp_init_mutex: mutex for expansion function module initialization
+ * @rmi4_irq_enable_mutex: mutex for enabling/disabling interrupt
+ * @rb_work: work for rebuilding input device
+ * @rb_workqueue: workqueue for rebuilding input device
+ * @fb_notifier: framebuffer notifier client
+ * @reset_work: work for issuing reset after display framebuffer ready
+ * @reset_workqueue: workqueue for issuing reset after display framebuffer ready
+ * @early_suspend: early suspend power management
+ * @current_page: current RMI page for register access
+ * @button_0d_enabled: switch for enabling 0d button support
+ * @num_of_tx: number of Tx channels for 2D touch
+ * @num_of_rx: number of Rx channels for 2D touch
+ * @num_of_fingers: maximum number of fingers for 2D touch
+ * @max_touch_width: maximum touch width
+ * @valid_button_count: number of valid 0D buttons
+ * @report_enable: input data to report for F$12
+ * @no_sleep_setting: default setting of NoSleep in F01_RMI_CTRL00 register
+ * @gesture_detection: detected gesture type and properties
+ * @intr_mask: interrupt enable mask
+ * @button_txrx_mapping: Tx Rx mapping of 0D buttons
+ * @num_of_intr_regs: number of interrupt registers
+ * @f01_query_base_addr: query base address for f$01
+ * @f01_cmd_base_addr: command base address for f$01
+ * @f01_ctrl_base_addr: control base address for f$01
+ * @f01_data_base_addr: data base address for f$01
+ * @f51_query_base_addr: query base address for f$51
+ * @firmware_id: firmware build ID
+ * @irq: attention interrupt
+ * @sensor_max_x: maximum x coordinate for 2D touch
+ * @sensor_max_y: maximum y coordinate for 2D touch
+ * @force_min: minimum force value
+ * @force_max: maximum force value
+ * @set_wakeup_gesture: location of set wakeup gesture
+ * @flash_prog_mode: flag to indicate flash programming mode status
+ * @irq_enabled: flag to indicate attention interrupt enable status
+ * @fingers_on_2d: flag to indicate presence of fingers in 2D area
+ * @suspend: flag to indicate whether in suspend state
+ * @sensor_sleep: flag to indicate sleep state of sensor
+ * @stay_awake: flag to indicate whether to stay awake during suspend
+ * @fb_ready: flag to indicate whether display framebuffer in ready state
+ * @f11_wakeup_gesture: flag to indicate support for wakeup gestures in F$11
+ * @f12_wakeup_gesture: flag to indicate support for wakeup gestures in F$12
+ * @enable_wakeup_gesture: flag to indicate usage of wakeup gestures
+ * @wedge_sensor: flag to indicate use of wedge sensor
+ * @report_pressure: flag to indicate reporting of pressure data
+ * @stylus_enable: flag to indicate reporting of stylus data
+ * @eraser_enable: flag to indicate reporting of eraser data
+ * @external_afe_buttons: flag to indicate presence of external AFE buttons
+ * @reset_device: pointer to device reset function
+ * @irq_enable: pointer to interrupt enable function
+ * @sleep_enable: pointer to sleep enable function
+ * @report_touch: pointer to touch reporting function
+ */
+struct synaptics_rmi4_data {
+	bool initialized;
+	struct platform_device *pdev;
+	struct input_dev *input_dev;
+	struct input_dev *stylus_dev;
+	const struct synaptics_dsx_hw_interface *hw_if;
+	struct synaptics_rmi4_device_info rmi4_mod_info;
+	struct synaptics_rmi4_input_settings input_settings;
+	struct kobject *board_prop_dir;
+	struct regulator *pwr_reg;
+	struct regulator *bus_reg;
+	struct mutex rmi4_reset_mutex;
+	struct mutex rmi4_report_mutex;
+	struct mutex rmi4_io_ctrl_mutex;
+	struct mutex rmi4_exp_init_mutex;
+	struct mutex rmi4_irq_enable_mutex;
+	struct delayed_work rb_work;
+	struct workqueue_struct *rb_workqueue;
+	struct work_struct rmi4_probe_work;
+	struct workqueue_struct *rmi4_probe_wq;
+	struct completion drm_init_done;
+	struct pinctrl *ts_pinctrl;
+	struct pinctrl_state *pinctrl_state_active;
+	struct pinctrl_state *pinctrl_state_suspend;
+	struct pinctrl_state *pinctrl_state_release;
+	struct notifier_block fb_notifier;
+	struct work_struct reset_work;
+	struct workqueue_struct *reset_workqueue;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend;
+#endif
+	unsigned char current_page;
+	unsigned char button_0d_enabled;
+	unsigned char num_of_tx;
+	unsigned char num_of_rx;
+	unsigned char num_of_fingers;
+	unsigned char max_touch_width;
+	unsigned char valid_button_count;
+	unsigned char report_enable;
+	unsigned char no_sleep_setting;
+	unsigned char gesture_detection[F12_GESTURE_DETECTION_LEN];
+	unsigned char intr_mask[MAX_INTR_REGISTERS];
+	unsigned char *button_txrx_mapping;
+	unsigned short num_of_intr_regs;
+	unsigned short f01_query_base_addr;
+	unsigned short f01_cmd_base_addr;
+	unsigned short f01_ctrl_base_addr;
+	unsigned short f01_data_base_addr;
+#ifdef F51_DISCRETE_FORCE
+	unsigned short f51_query_base_addr;
+#endif
+	unsigned int firmware_id;
+	int irq;
+	int sensor_max_x;
+	int sensor_max_y;
+	int force_min;
+	int force_max;
+	int set_wakeup_gesture;
+	int avdd_status;
+	int vdd_status;
+	bool flash_prog_mode;
+	bool irq_enabled;
+	bool fingers_on_2d;
+	bool suspend;
+	bool sensor_sleep;
+	bool stay_awake;
+	bool fb_ready;
+	bool f11_wakeup_gesture;
+	bool f12_wakeup_gesture;
+	bool enable_wakeup_gesture;
+	bool wedge_sensor;
+	bool report_pressure;
+	bool stylus_enable;
+	bool eraser_enable;
+	bool external_afe_buttons;
+	int (*reset_device)(struct synaptics_rmi4_data *rmi4_data,
+			bool rebuild);
+	int (*irq_enable)(struct synaptics_rmi4_data *rmi4_data, bool enable,
+			bool attn_only);
+	int (*sleep_enable)(struct synaptics_rmi4_data *rmi4_data,
+			bool enable);
+	void (*report_touch)(struct synaptics_rmi4_data *rmi4_data,
+			struct synaptics_rmi4_fn *fhandler);
+};
+
+struct synaptics_dsx_bus_access {
+	unsigned char type;
+	int (*read)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+		unsigned char *data, unsigned int length);
+	int (*write)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+		unsigned char *data, unsigned int length);
+};
+
+struct synaptics_dsx_hw_interface {
+	struct synaptics_dsx_board_data *board_data;
+	const struct synaptics_dsx_bus_access *bus_access;
+	int (*bl_hw_init)(struct synaptics_rmi4_data *rmi4_data);
+	int (*ui_hw_init)(struct synaptics_rmi4_data *rmi4_data);
+};
+
+struct synaptics_rmi4_exp_fn {
+	enum exp_fn fn_type;
+	int (*init)(struct synaptics_rmi4_data *rmi4_data);
+	void (*remove)(struct synaptics_rmi4_data *rmi4_data);
+	void (*reset)(struct synaptics_rmi4_data *rmi4_data);
+	void (*reinit)(struct synaptics_rmi4_data *rmi4_data);
+	void (*early_suspend)(struct synaptics_rmi4_data *rmi4_data);
+	void (*suspend)(struct synaptics_rmi4_data *rmi4_data);
+	void (*resume)(struct synaptics_rmi4_data *rmi4_data);
+	void (*late_resume)(struct synaptics_rmi4_data *rmi4_data);
+	void (*attn)(struct synaptics_rmi4_data *rmi4_data,
+			unsigned char intr_mask);
+};
+
+int synaptics_rmi4_bus_init(void);
+
+void synaptics_rmi4_bus_exit(void);
+
+void synaptics_rmi4_new_function(struct synaptics_rmi4_exp_fn *exp_fn_module,
+		bool insert);
+
+int synaptics_fw_updater(const unsigned char *fw_data);
+
+static inline int synaptics_rmi4_reg_read(
+		struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr,
+		unsigned char *data,
+		unsigned int len)
+{
+	return rmi4_data->hw_if->bus_access->read(rmi4_data, addr, data, len);
+}
+
+static inline int synaptics_rmi4_reg_write(
+		struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr,
+		unsigned char *data,
+		unsigned int len)
+{
+	return rmi4_data->hw_if->bus_access->write(rmi4_data, addr, data, len);
+}
+
+static inline ssize_t synaptics_rmi4_show_error(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	dev_warn(dev, "%s Attempted to read from write-only attribute %s\n",
+			__func__, attr->attr.name);
+	return -EPERM;
+}
+
+static inline ssize_t synaptics_rmi4_store_error(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	dev_warn(dev, "%s Attempted to write to read-only attribute %s\n",
+			__func__, attr->attr.name);
+	return -EPERM;
+}
+
+static inline int secure_memcpy(unsigned char *dest, unsigned int dest_size,
+		const unsigned char *src, unsigned int src_size,
+		unsigned int count)
+{
+	if (dest == NULL || src == NULL)
+		return -EINVAL;
+
+	if (count > dest_size || count > src_size)
+		return -EINVAL;
+
+	memcpy((void *)dest, (const void *)src, count);
+
+	return 0;
+}
+
+static inline void batohs(unsigned short *dest, unsigned char *src)
+{
+	*dest = src[1] * 0x100 + src[0];
+}
+
+static inline void hstoba(unsigned char *dest, unsigned short src)
+{
+	dest[0] = src % 0x100;
+	dest[1] = src / 0x100;
+}
+
+#endif
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
new file mode 100755
index 0000000..2372368
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
@@ -0,0 +1,5797 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define FW_IHEX_NAME "synaptics/startup_fw_update.bin"
+#define FW_IMAGE_NAME "synaptics/startup_fw_update.img"
+
+#define ENABLE_SYS_REFLASH false
+#define FORCE_UPDATE false
+#define DO_LOCKDOWN false
+
+#define MAX_IMAGE_NAME_LEN 256
+#define MAX_FIRMWARE_ID_LEN 10
+
+#define IMAGE_HEADER_VERSION_05 0x05
+#define IMAGE_HEADER_VERSION_06 0x06
+#define IMAGE_HEADER_VERSION_10 0x10
+
+#define IMAGE_AREA_OFFSET 0x100
+#define LOCKDOWN_SIZE 0x50
+
+#define MAX_UTILITY_PARAMS 20
+
+#define V5V6_BOOTLOADER_ID_OFFSET 0
+#define V5V6_CONFIG_ID_SIZE 4
+
+#define V5_PROPERTIES_OFFSET 2
+#define V5_BLOCK_SIZE_OFFSET 3
+#define V5_BLOCK_COUNT_OFFSET 5
+#define V5_BLOCK_NUMBER_OFFSET 0
+#define V5_BLOCK_DATA_OFFSET 2
+
+#define V6_PROPERTIES_OFFSET 1
+#define V6_BLOCK_SIZE_OFFSET 2
+#define V6_BLOCK_COUNT_OFFSET 3
+#define V6_PROPERTIES_2_OFFSET 4
+#define V6_GUEST_CODE_BLOCK_COUNT_OFFSET 5
+#define V6_BLOCK_NUMBER_OFFSET 0
+#define V6_BLOCK_DATA_OFFSET 1
+#define V6_FLASH_COMMAND_OFFSET 2
+#define V6_FLASH_STATUS_OFFSET 3
+
+#define V7_CONFIG_ID_SIZE 32
+
+#define V7_FLASH_STATUS_OFFSET 0
+#define V7_PARTITION_ID_OFFSET 1
+#define V7_BLOCK_NUMBER_OFFSET 2
+#define V7_TRANSFER_LENGTH_OFFSET 3
+#define V7_COMMAND_OFFSET 4
+#define V7_PAYLOAD_OFFSET 5
+
+#define V7_PARTITION_SUPPORT_BYTES 4
+
+#define F35_ERROR_CODE_OFFSET 0
+#define F35_FLASH_STATUS_OFFSET 5
+#define F35_CHUNK_NUM_LSB_OFFSET 0
+#define F35_CHUNK_NUM_MSB_OFFSET 1
+#define F35_CHUNK_DATA_OFFSET 2
+#define F35_CHUNK_COMMAND_OFFSET 18
+
+#define F35_CHUNK_SIZE 16
+#define F35_ERASE_ALL_WAIT_MS 5000
+#define F35_RESET_WAIT_MS 250
+
+#define SLEEP_MODE_NORMAL (0x00)
+#define SLEEP_MODE_SENSOR_SLEEP (0x01)
+#define SLEEP_MODE_RESERVED0 (0x02)
+#define SLEEP_MODE_RESERVED1 (0x03)
+
+#define ENABLE_WAIT_MS (1 * 1000)
+#define WRITE_WAIT_MS (3 * 1000)
+#define ERASE_WAIT_MS (5 * 1000)
+
+#define MIN_SLEEP_TIME_US 50
+#define MAX_SLEEP_TIME_US 100
+
+#define INT_DISABLE_WAIT_MS 20
+#define ENTER_FLASH_PROG_WAIT_MS 20
+#define READ_CONFIG_WAIT_MS 20
+
+static int fwu_do_reflash(void);
+
+static int fwu_recovery_check_status(void);
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_do_recovery_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_image_name_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_utility_parameter_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_guest_code_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_write_guest_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+#ifdef SYNA_TDDI
+static ssize_t fwu_sysfs_write_lockdown_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_read_lockdown_code_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+#endif
+
+#endif
+
+enum f34_version {
+	F34_V0 = 0,
+	F34_V1,
+	F34_V2,
+};
+
+enum bl_version {
+	BL_V5 = 5,
+	BL_V6 = 6,
+	BL_V7 = 7,
+	BL_V8 = 8,
+};
+
+enum flash_area {
+	NONE = 0,
+	UI_FIRMWARE,
+	UI_CONFIG,
+};
+
+enum update_mode {
+	NORMAL = 1,
+	FORCE = 2,
+	LOCKDOWN = 8,
+};
+
+enum config_area {
+	UI_CONFIG_AREA = 0,
+	PM_CONFIG_AREA,
+	BL_CONFIG_AREA,
+	DP_CONFIG_AREA,
+	FLASH_CONFIG_AREA,
+#ifdef SYNA_TDDI
+	TDDI_FORCE_CONFIG_AREA,
+	TDDI_LCM_DATA_AREA,
+	TDDI_OEM_DATA_AREA,
+#endif
+	UPP_AREA,
+};
+
+enum v7_status {
+	SUCCESS = 0x00,
+	DEVICE_NOT_IN_BOOTLOADER_MODE,
+	INVALID_PARTITION,
+	INVALID_COMMAND,
+	INVALID_BLOCK_OFFSET,
+	INVALID_TRANSFER,
+	NOT_ERASED,
+	FLASH_PROGRAMMING_KEY_INCORRECT,
+	BAD_PARTITION_TABLE,
+	CHECKSUM_FAILED,
+	FLASH_HARDWARE_FAILURE = 0x1f,
+};
+
+enum v7_partition_id {
+	BOOTLOADER_PARTITION = 0x01,
+	DEVICE_CONFIG_PARTITION,
+	FLASH_CONFIG_PARTITION,
+	MANUFACTURING_BLOCK_PARTITION,
+	GUEST_SERIALIZATION_PARTITION,
+	GLOBAL_PARAMETERS_PARTITION,
+	CORE_CODE_PARTITION,
+	CORE_CONFIG_PARTITION,
+	GUEST_CODE_PARTITION,
+	DISPLAY_CONFIG_PARTITION,
+	EXTERNAL_TOUCH_AFE_CONFIG_PARTITION,
+	UTILITY_PARAMETER_PARTITION,
+};
+
+enum v7_flash_command {
+	CMD_V7_IDLE = 0x00,
+	CMD_V7_ENTER_BL,
+	CMD_V7_READ,
+	CMD_V7_WRITE,
+	CMD_V7_ERASE,
+	CMD_V7_ERASE_AP,
+	CMD_V7_SENSOR_ID,
+};
+
+enum v5v6_flash_command {
+	CMD_V5V6_IDLE = 0x0,
+	CMD_V5V6_WRITE_FW = 0x2,
+	CMD_V5V6_ERASE_ALL = 0x3,
+	CMD_V5V6_WRITE_LOCKDOWN = 0x4,
+	CMD_V5V6_READ_CONFIG = 0x5,
+	CMD_V5V6_WRITE_CONFIG = 0x6,
+	CMD_V5V6_ERASE_UI_CONFIG = 0x7,
+	CMD_V5V6_ERASE_BL_CONFIG = 0x9,
+	CMD_V5V6_ERASE_DISP_CONFIG = 0xa,
+	CMD_V5V6_ERASE_GUEST_CODE = 0xb,
+	CMD_V5V6_WRITE_GUEST_CODE = 0xc,
+	CMD_V5V6_ERASE_CHIP = 0x0d,
+	CMD_V5V6_ENABLE_FLASH_PROG = 0xf,
+#ifdef SYNA_TDDI
+	CMD_V5V6_ERASE_FORCE_CONFIG = 0x11,
+	CMD_V5V6_READ_FORCE_CONFIG = 0x12,
+	CMD_V5V6_WRITE_FORCE_CONFIG = 0x13,
+	CMD_V5V6_ERASE_LOCKDOWN_DATA = 0x1a,
+	CMD_V5V6_READ_LOCKDOWN_DATA = 0x1b,
+	CMD_V5V6_WRITE_LOCKDOWN_DATA = 0x1c,
+	CMD_V5V6_ERASE_LCM_DATA = 0x1d,
+	CMD_V5V6_ERASE_OEM_DATA = 0x1e,
+#endif
+};
+
+enum flash_command {
+	CMD_IDLE = 0,
+	CMD_WRITE_FW,
+	CMD_WRITE_CONFIG,
+	CMD_WRITE_LOCKDOWN,
+	CMD_WRITE_GUEST_CODE,
+	CMD_WRITE_BOOTLOADER,
+	CMD_WRITE_UTILITY_PARAM,
+	CMD_READ_CONFIG,
+	CMD_ERASE_ALL,
+	CMD_ERASE_UI_FIRMWARE,
+	CMD_ERASE_UI_CONFIG,
+	CMD_ERASE_BL_CONFIG,
+	CMD_ERASE_DISP_CONFIG,
+	CMD_ERASE_FLASH_CONFIG,
+	CMD_ERASE_GUEST_CODE,
+	CMD_ERASE_BOOTLOADER,
+	CMD_ERASE_UTILITY_PARAMETER,
+	CMD_ENABLE_FLASH_PROG,
+#ifdef SYNA_TDDI
+	CMD_ERASE_CHIP,
+	CMD_ERASE_FORCE_CONFIG,
+	CMD_READ_FORCE_CONFIG,
+	CMD_WRITE_FORCE_CONFIG,
+	CMD_ERASE_LOCKDOWN_DATA,
+	CMD_READ_LOCKDOWN_DATA,
+	CMD_WRITE_LOCKDOWN_DATA,
+	CMD_ERASE_LCM_DATA,
+	CMD_READ_LCM_DATA,
+	CMD_WRITE_LCM_DATA,
+	CMD_ERASE_OEM_DATA,
+	CMD_READ_OEM_DATA,
+	CMD_WRITE_OEM_DATA,
+#endif
+};
+
+enum f35_flash_command {
+	CMD_F35_IDLE = 0x0,
+	CMD_F35_RESERVED = 0x1,
+	CMD_F35_WRITE_CHUNK = 0x2,
+	CMD_F35_ERASE_ALL = 0x3,
+	CMD_F35_RESET = 0x10,
+};
+
+enum container_id {
+	TOP_LEVEL_CONTAINER = 0,
+	UI_CONTAINER,
+	UI_CONFIG_CONTAINER,
+	BL_CONTAINER,
+	BL_IMAGE_CONTAINER,
+	BL_CONFIG_CONTAINER,
+	BL_LOCKDOWN_INFO_CONTAINER,
+	PERMANENT_CONFIG_CONTAINER,
+	GUEST_CODE_CONTAINER,
+	BL_PROTOCOL_DESCRIPTOR_CONTAINER,
+	UI_PROTOCOL_DESCRIPTOR_CONTAINER,
+	RMI_SELF_DISCOVERY_CONTAINER,
+	RMI_PAGE_CONTENT_CONTAINER,
+	GENERAL_INFORMATION_CONTAINER,
+	DEVICE_CONFIG_CONTAINER,
+	FLASH_CONFIG_CONTAINER,
+	GUEST_SERIALIZATION_CONTAINER,
+	GLOBAL_PARAMETERS_CONTAINER,
+	CORE_CODE_CONTAINER,
+	CORE_CONFIG_CONTAINER,
+	DISPLAY_CONFIG_CONTAINER,
+	EXTERNAL_TOUCH_AFE_CONFIG_CONTAINER,
+	UTILITY_CONTAINER,
+	UTILITY_PARAMETER_CONTAINER,
+};
+
+enum utility_parameter_id {
+	UNUSED = 0,
+	FORCE_PARAMETER,
+	ANTI_BENDING_PARAMETER,
+};
+
+struct pdt_properties {
+	union {
+		struct {
+			unsigned char reserved_1:6;
+			unsigned char has_bsr:1;
+			unsigned char reserved_2:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct partition_table {
+	unsigned char partition_id:5;
+	unsigned char byte_0_reserved:3;
+	unsigned char byte_1_reserved;
+	unsigned char partition_length_7_0;
+	unsigned char partition_length_15_8;
+	unsigned char start_physical_address_7_0;
+	unsigned char start_physical_address_15_8;
+	unsigned char partition_properties_7_0;
+	unsigned char partition_properties_15_8;
+} __packed;
+
+struct f01_device_control {
+	union {
+		struct {
+			unsigned char sleep_mode:2;
+			unsigned char nosleep:1;
+			unsigned char reserved:2;
+			unsigned char charger_connected:1;
+			unsigned char report_rate:1;
+			unsigned char configured:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v7_query_0 {
+	union {
+		struct {
+			unsigned char subpacket_1_size:3;
+			unsigned char has_config_id:1;
+			unsigned char f34_query0_b4:1;
+			unsigned char has_thqa:1;
+			unsigned char f34_query0_b6__7:2;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v7_query_1_7 {
+	union {
+		struct {
+			/* query 1 */
+			unsigned char bl_minor_revision;
+			unsigned char bl_major_revision;
+
+			/* query 2 */
+			unsigned char bl_fw_id_7_0;
+			unsigned char bl_fw_id_15_8;
+			unsigned char bl_fw_id_23_16;
+			unsigned char bl_fw_id_31_24;
+
+			/* query 3 */
+			unsigned char minimum_write_size;
+			unsigned char block_size_7_0;
+			unsigned char block_size_15_8;
+			unsigned char flash_page_size_7_0;
+			unsigned char flash_page_size_15_8;
+
+			/* query 4 */
+			unsigned char adjustable_partition_area_size_7_0;
+			unsigned char adjustable_partition_area_size_15_8;
+
+			/* query 5 */
+			unsigned char flash_config_length_7_0;
+			unsigned char flash_config_length_15_8;
+
+			/* query 6 */
+			unsigned char payload_length_7_0;
+			unsigned char payload_length_15_8;
+
+			/* query 7 */
+			unsigned char f34_query7_b0:1;
+			unsigned char has_bootloader:1;
+			unsigned char has_device_config:1;
+			unsigned char has_flash_config:1;
+			unsigned char has_manufacturing_block:1;
+			unsigned char has_guest_serialization:1;
+			unsigned char has_global_parameters:1;
+			unsigned char has_core_code:1;
+			unsigned char has_core_config:1;
+			unsigned char has_guest_code:1;
+			unsigned char has_display_config:1;
+			unsigned char f34_query7_b11__15:5;
+			unsigned char f34_query7_b16__23;
+			unsigned char f34_query7_b24__31;
+		} __packed;
+		unsigned char data[21];
+	};
+};
+
+struct f34_v7_data0 {
+	union {
+		struct {
+			unsigned char operation_status:5;
+			unsigned char device_cfg_status:2;
+			unsigned char bl_mode:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v7_data_1_5 {
+	union {
+		struct {
+			unsigned char partition_id:5;
+			unsigned char f34_data1_b5__7:3;
+			unsigned char block_offset_7_0;
+			unsigned char block_offset_15_8;
+			unsigned char transfer_length_7_0;
+			unsigned char transfer_length_15_8;
+			unsigned char command;
+			unsigned char payload_0;
+			unsigned char payload_1;
+		} __packed;
+		unsigned char data[8];
+	};
+};
+
+struct f34_v5v6_flash_properties {
+	union {
+		struct {
+			unsigned char reg_map:1;
+			unsigned char unlocked:1;
+			unsigned char has_config_id:1;
+			unsigned char has_pm_config:1;
+			unsigned char has_bl_config:1;
+			unsigned char has_disp_config:1;
+			unsigned char has_ctrl1:1;
+			unsigned char has_query4:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v5v6_flash_properties_2 {
+	union {
+		struct {
+			unsigned char has_guest_code:1;
+			unsigned char f34_query4_b1:1;
+			unsigned char has_gesture_config:1;
+			unsigned char has_force_config:1;
+			unsigned char has_lockdown_data:1;
+			unsigned char has_lcm_data:1;
+			unsigned char has_oem_data:1;
+			unsigned char f34_query4_b7:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct register_offset {
+	unsigned char properties;
+	unsigned char properties_2;
+	unsigned char block_size;
+	unsigned char block_count;
+	unsigned char gc_block_count;
+	unsigned char flash_status;
+	unsigned char partition_id;
+	unsigned char block_number;
+	unsigned char transfer_length;
+	unsigned char flash_cmd;
+	unsigned char payload;
+};
+
+struct block_count {
+	unsigned short ui_firmware;
+	unsigned short ui_config;
+	unsigned short dp_config;
+	unsigned short pm_config;
+	unsigned short fl_config;
+	unsigned short bl_image;
+	unsigned short bl_config;
+	unsigned short utility_param;
+	unsigned short lockdown;
+	unsigned short guest_code;
+#ifdef SYNA_TDDI
+	unsigned short tddi_force_config;
+	unsigned short tddi_lockdown_data;
+	unsigned short tddi_lcm_data;
+	unsigned short tddi_oem_data;
+#endif
+	unsigned short total_count;
+};
+
+struct physical_address {
+	unsigned short ui_firmware;
+	unsigned short ui_config;
+	unsigned short dp_config;
+	unsigned short pm_config;
+	unsigned short fl_config;
+	unsigned short bl_image;
+	unsigned short bl_config;
+	unsigned short utility_param;
+	unsigned short lockdown;
+	unsigned short guest_code;
+};
+
+struct container_descriptor {
+	unsigned char content_checksum[4];
+	unsigned char container_id[2];
+	unsigned char minor_version;
+	unsigned char major_version;
+	unsigned char reserved_08;
+	unsigned char reserved_09;
+	unsigned char reserved_0a;
+	unsigned char reserved_0b;
+	unsigned char container_option_flags[4];
+	unsigned char content_options_length[4];
+	unsigned char content_options_address[4];
+	unsigned char content_length[4];
+	unsigned char content_address[4];
+};
+
+struct image_header_10 {
+	unsigned char checksum[4];
+	unsigned char reserved_04;
+	unsigned char reserved_05;
+	unsigned char minor_header_version;
+	unsigned char major_header_version;
+	unsigned char reserved_08;
+	unsigned char reserved_09;
+	unsigned char reserved_0a;
+	unsigned char reserved_0b;
+	unsigned char top_level_container_start_addr[4];
+};
+
+struct image_header_05_06 {
+	/* 0x00 - 0x0f */
+	unsigned char checksum[4];
+	unsigned char reserved_04;
+	unsigned char reserved_05;
+	unsigned char options_firmware_id:1;
+	unsigned char options_bootloader:1;
+	unsigned char options_guest_code:1;
+	unsigned char options_tddi:1;
+	unsigned char options_reserved:4;
+	unsigned char header_version;
+	unsigned char firmware_size[4];
+	unsigned char config_size[4];
+	/* 0x10 - 0x1f */
+	unsigned char product_id[PRODUCT_ID_SIZE];
+	unsigned char package_id[2];
+	unsigned char package_id_revision[2];
+	unsigned char product_info[PRODUCT_INFO_SIZE];
+	/* 0x20 - 0x2f */
+	unsigned char bootloader_addr[4];
+	unsigned char bootloader_size[4];
+	unsigned char ui_addr[4];
+	unsigned char ui_size[4];
+	/* 0x30 - 0x3f */
+	unsigned char ds_id[16];
+	/* 0x40 - 0x4f */
+	union {
+		struct {
+			unsigned char cstmr_product_id[PRODUCT_ID_SIZE];
+			unsigned char reserved_4a_4f[6];
+		};
+		struct {
+			unsigned char dsp_cfg_addr[4];
+			unsigned char dsp_cfg_size[4];
+			unsigned char reserved_48_4f[8];
+		};
+	};
+	/* 0x50 - 0x53 */
+	unsigned char firmware_id[4];
+};
+
+struct block_data {
+	unsigned int size;
+	const unsigned char *data;
+};
+
+struct image_metadata {
+	bool contains_firmware_id;
+	bool contains_bootloader;
+	bool contains_guest_code;
+	bool contains_disp_config;
+	bool contains_perm_config;
+	bool contains_flash_config;
+	bool contains_utility_param;
+	unsigned int firmware_id;
+	unsigned int checksum;
+	unsigned int bootloader_size;
+	unsigned int disp_config_offset;
+	unsigned char bl_version;
+	unsigned char product_id[PRODUCT_ID_SIZE + 1];
+	unsigned char cstmr_product_id[PRODUCT_ID_SIZE + 1];
+	unsigned char utility_param_id[MAX_UTILITY_PARAMS];
+	struct block_data bootloader;
+	struct block_data utility;
+	struct block_data ui_firmware;
+	struct block_data ui_config;
+	struct block_data dp_config;
+	struct block_data pm_config;
+	struct block_data fl_config;
+	struct block_data bl_image;
+	struct block_data bl_config;
+	struct block_data utility_param[MAX_UTILITY_PARAMS];
+	struct block_data lockdown;
+	struct block_data guest_code;
+	struct block_count blkcount;
+	struct physical_address phyaddr;
+};
+
+struct synaptics_rmi4_fwu_handle {
+	enum bl_version bl_version;
+	bool initialized;
+	bool in_bl_mode;
+	bool in_ub_mode;
+	bool bl_mode_device;
+	bool force_update;
+	bool do_lockdown;
+	bool has_guest_code;
+#ifdef SYNA_TDDI
+	bool has_force_config;
+	bool has_lockdown_data;
+	bool has_lcm_data;
+	bool has_oem_data;
+#endif
+	bool has_utility_param;
+	bool new_partition_table;
+	bool incompatible_partition_tables;
+	bool write_bootloader;
+	unsigned int data_pos;
+	unsigned char *ext_data_source;
+	unsigned char *read_config_buf;
+	unsigned char intr_mask;
+	unsigned char command;
+	unsigned char bootloader_id[2];
+	unsigned char config_id[32];
+	unsigned char flash_status;
+	unsigned char partitions;
+#ifdef F51_DISCRETE_FORCE
+	unsigned char *cal_data;
+	unsigned short cal_data_off;
+	unsigned short cal_data_size;
+	unsigned short cal_data_buf_size;
+	unsigned short cal_packet_data_size;
+#endif
+	unsigned short block_size;
+	unsigned short config_size;
+	unsigned short config_area;
+	unsigned short config_block_count;
+	unsigned short flash_config_length;
+	unsigned short payload_length;
+	unsigned short partition_table_bytes;
+	unsigned short read_config_buf_size;
+	const unsigned char *config_data;
+	const unsigned char *image;
+	unsigned char *image_name;
+	unsigned int image_size;
+	struct image_metadata img;
+	struct register_offset off;
+	struct block_count blkcount;
+	struct physical_address phyaddr;
+	struct f34_v5v6_flash_properties flash_properties;
+	struct synaptics_rmi4_fn_desc f34_fd;
+	struct synaptics_rmi4_fn_desc f35_fd;
+	struct synaptics_rmi4_data *rmi4_data;
+	struct workqueue_struct *fwu_workqueue;
+	struct work_struct fwu_work;
+};
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static struct bin_attribute dev_attr_data = {
+	.attr = {
+		.name = "data",
+		.mode = 0664,
+	},
+	.size = 0,
+	.read = fwu_sysfs_show_image,
+	.write = fwu_sysfs_store_image,
+};
+#endif
+
+static struct device_attribute attrs[] = {
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+	__ATTR(dorecovery, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_do_recovery_store),
+	__ATTR(doreflash, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_do_reflash_store),
+	__ATTR(writeconfig, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_write_config_store),
+	__ATTR(readconfig, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_read_config_store),
+	__ATTR(configarea, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_config_area_store),
+	__ATTR(imagename, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_image_name_store),
+	__ATTR(imagesize, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_image_size_store),
+	__ATTR(blocksize, 0444,
+			fwu_sysfs_block_size_show,
+			synaptics_rmi4_store_error),
+	__ATTR(fwblockcount, 0444,
+			fwu_sysfs_firmware_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(configblockcount, 0444,
+			fwu_sysfs_configuration_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(dispconfigblockcount, 0444,
+			fwu_sysfs_disp_config_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(permconfigblockcount, 0444,
+			fwu_sysfs_perm_config_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(blconfigblockcount, 0444,
+			fwu_sysfs_bl_config_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(uppblockcount, 0444,
+			fwu_sysfs_utility_parameter_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(guestcodeblockcount, 0444,
+			fwu_sysfs_guest_code_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(writeguestcode, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_write_guest_code_store),
+#ifdef SYNA_TDDI
+	__ATTR(lockdowncode, 0664,
+			fwu_sysfs_read_lockdown_code_show,
+			fwu_sysfs_write_lockdown_code_store),
+#endif
+#endif
+};
+
+static struct synaptics_rmi4_fwu_handle *fwu;
+
+DECLARE_COMPLETION(fwu_remove_complete);
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+DEFINE_MUTEX(fwu_sysfs_mutex);
+#endif
+
+static void calculate_checksum(unsigned short *data, unsigned long len,
+		unsigned long *result)
+{
+	unsigned long temp;
+	unsigned long sum1 = 0xffff;
+	unsigned long sum2 = 0xffff;
+
+	*result = 0xffffffff;
+
+	while (len--) {
+		temp = *data;
+		sum1 += temp;
+		sum2 += sum1;
+		sum1 = (sum1 & 0xffff) + (sum1 >> 16);
+		sum2 = (sum2 & 0xffff) + (sum2 >> 16);
+		data++;
+	}
+
+	*result = sum2 << 16 | sum1;
+}
+
+static void convert_to_little_endian(unsigned char *dest, unsigned long src)
+{
+	dest[0] = (unsigned char)(src & 0xff);
+	dest[1] = (unsigned char)((src >> 8) & 0xff);
+	dest[2] = (unsigned char)((src >> 16) & 0xff);
+	dest[3] = (unsigned char)((src >> 24) & 0xff);
+}
+
+static unsigned int le_to_uint(const unsigned char *ptr)
+{
+	return (unsigned int)ptr[0] +
+			(unsigned int)ptr[1] * 0x100 +
+			(unsigned int)ptr[2] * 0x10000 +
+			(unsigned int)ptr[3] * 0x1000000;
+}
+
+#ifdef F51_DISCRETE_FORCE
+static int fwu_f51_force_data_init(void)
+{
+	int retval;
+	unsigned char query_count;
+	unsigned char packet_info;
+	unsigned char offset[2];
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f51_query_base_addr + 7,
+			offset,
+			sizeof(offset));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read force data offset\n",
+				__func__);
+		return retval;
+	}
+
+	fwu->cal_data_off = offset[0] | offset[1] << 8;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f51_query_base_addr,
+			&query_count,
+			sizeof(query_count));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read number of F51 query registers\n",
+				__func__);
+		return retval;
+	}
+
+	if (query_count >= 10) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f51_query_base_addr + 9,
+				&packet_info,
+				sizeof(packet_info));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read F51 packet register info\n",
+					__func__);
+			return retval;
+		}
+
+		if (packet_info & MASK_1BIT) {
+			fwu->cal_packet_data_size = packet_info >> 1;
+			fwu->cal_packet_data_size *= 2;
+		} else {
+			fwu->cal_packet_data_size = 0;
+		}
+	} else {
+		fwu->cal_packet_data_size = 0;
+	}
+
+	fwu->cal_data_size = CAL_DATA_SIZE + fwu->cal_packet_data_size;
+	if (fwu->cal_data_size > fwu->cal_data_buf_size) {
+		kfree(fwu->cal_data);
+		fwu->cal_data_buf_size = fwu->cal_data_size;
+		fwu->cal_data = kmalloc(fwu->cal_data_buf_size, GFP_KERNEL);
+		if (!fwu->cal_data) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for fwu->cal_data\n",
+					__func__);
+			fwu->cal_data_buf_size = 0;
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+static int fwu_allocate_read_config_buf(unsigned int count)
+{
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (count > fwu->read_config_buf_size) {
+		kfree(fwu->read_config_buf);
+		fwu->read_config_buf = kzalloc(count, GFP_KERNEL);
+		if (!fwu->read_config_buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for fwu->read_config_buf\n",
+					__func__);
+			fwu->read_config_buf_size = 0;
+			return -ENOMEM;
+		}
+		fwu->read_config_buf_size = count;
+	}
+
+	return 0;
+}
+
+static void fwu_compare_partition_tables(void)
+{
+	fwu->incompatible_partition_tables = false;
+
+	if (fwu->phyaddr.bl_image != fwu->img.phyaddr.bl_image)
+		fwu->incompatible_partition_tables = true;
+	else if (fwu->phyaddr.lockdown != fwu->img.phyaddr.lockdown)
+		fwu->incompatible_partition_tables = true;
+	else if (fwu->phyaddr.bl_config != fwu->img.phyaddr.bl_config)
+		fwu->incompatible_partition_tables = true;
+	else if (fwu->phyaddr.utility_param != fwu->img.phyaddr.utility_param)
+		fwu->incompatible_partition_tables = true;
+
+	if (fwu->bl_version == BL_V7) {
+		if (fwu->phyaddr.fl_config != fwu->img.phyaddr.fl_config)
+			fwu->incompatible_partition_tables = true;
+	}
+
+	fwu->new_partition_table = false;
+
+	if (fwu->phyaddr.ui_firmware != fwu->img.phyaddr.ui_firmware)
+		fwu->new_partition_table = true;
+	else if (fwu->phyaddr.ui_config != fwu->img.phyaddr.ui_config)
+		fwu->new_partition_table = true;
+
+	if (fwu->flash_properties.has_disp_config) {
+		if (fwu->phyaddr.dp_config != fwu->img.phyaddr.dp_config)
+			fwu->new_partition_table = true;
+	}
+
+	if (fwu->has_guest_code) {
+		if (fwu->phyaddr.guest_code != fwu->img.phyaddr.guest_code)
+			fwu->new_partition_table = true;
+	}
+}
+
+static void fwu_parse_partition_table(const unsigned char *partition_table,
+		struct block_count *blkcount, struct physical_address *phyaddr)
+{
+	unsigned char ii;
+	unsigned char index;
+	unsigned char offset;
+	unsigned short partition_length;
+	unsigned short physical_address;
+	struct partition_table *ptable;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	for (ii = 0; ii < fwu->partitions; ii++) {
+		index = ii * 8 + 2;
+		ptable = (struct partition_table *)&partition_table[index];
+		partition_length = ptable->partition_length_15_8 << 8 |
+				ptable->partition_length_7_0;
+		physical_address = ptable->start_physical_address_15_8 << 8 |
+				ptable->start_physical_address_7_0;
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Partition entry %d:\n",
+				__func__, ii);
+		for (offset = 0; offset < 8; offset++) {
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: 0x%02x\n",
+					__func__,
+					partition_table[index + offset]);
+		}
+		switch (ptable->partition_id) {
+		case CORE_CODE_PARTITION:
+			blkcount->ui_firmware = partition_length;
+			phyaddr->ui_firmware = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Core code block count: %d\n",
+					__func__, blkcount->ui_firmware);
+			blkcount->total_count += partition_length;
+			break;
+		case CORE_CONFIG_PARTITION:
+			blkcount->ui_config = partition_length;
+			phyaddr->ui_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Core config block count: %d\n",
+					__func__, blkcount->ui_config);
+			blkcount->total_count += partition_length;
+			break;
+		case BOOTLOADER_PARTITION:
+			blkcount->bl_image = partition_length;
+			phyaddr->bl_image = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Bootloader block count: %d\n",
+					__func__, blkcount->bl_image);
+			blkcount->total_count += partition_length;
+			break;
+		case UTILITY_PARAMETER_PARTITION:
+			blkcount->utility_param = partition_length;
+			phyaddr->utility_param = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Utility parameter block count: %d\n",
+					__func__, blkcount->utility_param);
+			blkcount->total_count += partition_length;
+			break;
+		case DISPLAY_CONFIG_PARTITION:
+			blkcount->dp_config = partition_length;
+			phyaddr->dp_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Display config block count: %d\n",
+					__func__, blkcount->dp_config);
+			blkcount->total_count += partition_length;
+			break;
+		case FLASH_CONFIG_PARTITION:
+			blkcount->fl_config = partition_length;
+			phyaddr->fl_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Flash config block count: %d\n",
+					__func__, blkcount->fl_config);
+			blkcount->total_count += partition_length;
+			break;
+		case GUEST_CODE_PARTITION:
+			blkcount->guest_code = partition_length;
+			phyaddr->guest_code = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Guest code block count: %d\n",
+					__func__, blkcount->guest_code);
+			blkcount->total_count += partition_length;
+			break;
+		case GUEST_SERIALIZATION_PARTITION:
+			blkcount->pm_config = partition_length;
+			phyaddr->pm_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Guest serialization block count: %d\n",
+					__func__, blkcount->pm_config);
+			blkcount->total_count += partition_length;
+			break;
+		case GLOBAL_PARAMETERS_PARTITION:
+			blkcount->bl_config = partition_length;
+			phyaddr->bl_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Global parameters block count: %d\n",
+					__func__, blkcount->bl_config);
+			blkcount->total_count += partition_length;
+			break;
+		case DEVICE_CONFIG_PARTITION:
+			blkcount->lockdown = partition_length;
+			phyaddr->lockdown = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Device config block count: %d\n",
+					__func__, blkcount->lockdown);
+			blkcount->total_count += partition_length;
+			break;
+		};
+	}
+}
+
+static void fwu_parse_image_header_10_utility(const unsigned char *image)
+{
+	unsigned char ii;
+	unsigned char num_of_containers;
+	unsigned int addr;
+	unsigned int container_id;
+	unsigned int length;
+	const unsigned char *content;
+	struct container_descriptor *descriptor;
+
+	num_of_containers = fwu->img.utility.size / 4;
+
+	for (ii = 0; ii < num_of_containers; ii++) {
+		if (ii >= MAX_UTILITY_PARAMS)
+			continue;
+		addr = le_to_uint(fwu->img.utility.data + (ii * 4));
+		descriptor = (struct container_descriptor *)(image + addr);
+		container_id = descriptor->container_id[0] |
+				descriptor->container_id[1] << 8;
+		content = image + le_to_uint(descriptor->content_address);
+		length = le_to_uint(descriptor->content_length);
+		switch (container_id) {
+		case UTILITY_PARAMETER_CONTAINER:
+			fwu->img.utility_param[ii].data = content;
+			fwu->img.utility_param[ii].size = length;
+			fwu->img.utility_param_id[ii] = content[0];
+			break;
+		default:
+			break;
+		};
+	}
+}
+
+static void fwu_parse_image_header_10_bootloader(const unsigned char *image)
+{
+	unsigned char ii;
+	unsigned char num_of_containers;
+	unsigned int addr;
+	unsigned int container_id;
+	unsigned int length;
+	const unsigned char *content;
+	struct container_descriptor *descriptor;
+
+	num_of_containers = (fwu->img.bootloader.size - 4) / 4;
+
+	for (ii = 1; ii <= num_of_containers; ii++) {
+		addr = le_to_uint(fwu->img.bootloader.data + (ii * 4));
+		descriptor = (struct container_descriptor *)(image + addr);
+		container_id = descriptor->container_id[0] |
+				descriptor->container_id[1] << 8;
+		content = image + le_to_uint(descriptor->content_address);
+		length = le_to_uint(descriptor->content_length);
+		switch (container_id) {
+		case BL_IMAGE_CONTAINER:
+			fwu->img.bl_image.data = content;
+			fwu->img.bl_image.size = length;
+			break;
+		case BL_CONFIG_CONTAINER:
+		case GLOBAL_PARAMETERS_CONTAINER:
+			fwu->img.bl_config.data = content;
+			fwu->img.bl_config.size = length;
+			break;
+		case BL_LOCKDOWN_INFO_CONTAINER:
+		case DEVICE_CONFIG_CONTAINER:
+			fwu->img.lockdown.data = content;
+			fwu->img.lockdown.size = length;
+			break;
+		default:
+			break;
+		};
+	}
+}
+
+static void fwu_parse_image_header_10(void)
+{
+	unsigned char ii;
+	unsigned char num_of_containers;
+	unsigned int addr;
+	unsigned int offset;
+	unsigned int container_id;
+	unsigned int length;
+	const unsigned char *image;
+	const unsigned char *content;
+	struct container_descriptor *descriptor;
+	struct image_header_10 *header;
+
+	image = fwu->image;
+	header = (struct image_header_10 *)image;
+
+	fwu->img.checksum = le_to_uint(header->checksum);
+
+	/* address of top level container */
+	offset = le_to_uint(header->top_level_container_start_addr);
+	descriptor = (struct container_descriptor *)(image + offset);
+
+	/* address of top level container content */
+	offset = le_to_uint(descriptor->content_address);
+	num_of_containers = le_to_uint(descriptor->content_length) / 4;
+
+	for (ii = 0; ii < num_of_containers; ii++) {
+		addr = le_to_uint(image + offset);
+		offset += 4;
+		descriptor = (struct container_descriptor *)(image + addr);
+		container_id = descriptor->container_id[0] |
+				descriptor->container_id[1] << 8;
+		content = image + le_to_uint(descriptor->content_address);
+		length = le_to_uint(descriptor->content_length);
+		switch (container_id) {
+		case UI_CONTAINER:
+		case CORE_CODE_CONTAINER:
+			fwu->img.ui_firmware.data = content;
+			fwu->img.ui_firmware.size = length;
+			break;
+		case UI_CONFIG_CONTAINER:
+		case CORE_CONFIG_CONTAINER:
+			fwu->img.ui_config.data = content;
+			fwu->img.ui_config.size = length;
+			break;
+		case BL_CONTAINER:
+			fwu->img.bl_version = *content;
+			fwu->img.bootloader.data = content;
+			fwu->img.bootloader.size = length;
+			fwu_parse_image_header_10_bootloader(image);
+			break;
+		case UTILITY_CONTAINER:
+			fwu->img.utility.data = content;
+			fwu->img.utility.size = length;
+			fwu_parse_image_header_10_utility(image);
+			break;
+		case GUEST_CODE_CONTAINER:
+			fwu->img.contains_guest_code = true;
+			fwu->img.guest_code.data = content;
+			fwu->img.guest_code.size = length;
+			break;
+		case DISPLAY_CONFIG_CONTAINER:
+			fwu->img.contains_disp_config = true;
+			fwu->img.dp_config.data = content;
+			fwu->img.dp_config.size = length;
+			break;
+		case PERMANENT_CONFIG_CONTAINER:
+		case GUEST_SERIALIZATION_CONTAINER:
+			fwu->img.contains_perm_config = true;
+			fwu->img.pm_config.data = content;
+			fwu->img.pm_config.size = length;
+			break;
+		case FLASH_CONFIG_CONTAINER:
+			fwu->img.contains_flash_config = true;
+			fwu->img.fl_config.data = content;
+			fwu->img.fl_config.size = length;
+			break;
+		case GENERAL_INFORMATION_CONTAINER:
+			fwu->img.contains_firmware_id = true;
+			fwu->img.firmware_id = le_to_uint(content + 4);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+static void fwu_parse_image_header_05_06(void)
+{
+	int retval;
+	const unsigned char *image;
+	struct image_header_05_06 *header;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	image = fwu->image;
+	header = (struct image_header_05_06 *)image;
+
+	fwu->img.checksum = le_to_uint(header->checksum);
+
+	fwu->img.bl_version = header->header_version;
+
+	fwu->img.contains_bootloader = header->options_bootloader;
+	if (fwu->img.contains_bootloader)
+		fwu->img.bootloader_size = le_to_uint(header->bootloader_size);
+
+	fwu->img.ui_firmware.size = le_to_uint(header->firmware_size);
+	if (fwu->img.ui_firmware.size) {
+		fwu->img.ui_firmware.data = image + IMAGE_AREA_OFFSET;
+		if (fwu->img.contains_bootloader)
+			fwu->img.ui_firmware.data += fwu->img.bootloader_size;
+	}
+
+	if ((fwu->img.bl_version == BL_V6) && header->options_tddi)
+		fwu->img.ui_firmware.data = image + IMAGE_AREA_OFFSET;
+
+	fwu->img.ui_config.size = le_to_uint(header->config_size);
+	if (fwu->img.ui_config.size) {
+		fwu->img.ui_config.data = fwu->img.ui_firmware.data +
+				fwu->img.ui_firmware.size;
+	}
+
+	if (fwu->img.contains_bootloader || header->options_tddi)
+		fwu->img.contains_disp_config = true;
+	else
+		fwu->img.contains_disp_config = false;
+
+	if (fwu->img.contains_disp_config) {
+		fwu->img.disp_config_offset = le_to_uint(header->dsp_cfg_addr);
+		fwu->img.dp_config.size = le_to_uint(header->dsp_cfg_size);
+		fwu->img.dp_config.data = image + fwu->img.disp_config_offset;
+	} else {
+		retval = secure_memcpy(fwu->img.cstmr_product_id,
+				sizeof(fwu->img.cstmr_product_id),
+				header->cstmr_product_id,
+				sizeof(header->cstmr_product_id),
+				PRODUCT_ID_SIZE);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy custom product ID string\n",
+					__func__);
+		}
+		fwu->img.cstmr_product_id[PRODUCT_ID_SIZE] = 0;
+	}
+
+	fwu->img.contains_firmware_id = header->options_firmware_id;
+	if (fwu->img.contains_firmware_id)
+		fwu->img.firmware_id = le_to_uint(header->firmware_id);
+
+	retval = secure_memcpy(fwu->img.product_id,
+			sizeof(fwu->img.product_id),
+			header->product_id,
+			sizeof(header->product_id),
+			PRODUCT_ID_SIZE);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy product ID string\n",
+				__func__);
+	}
+	fwu->img.product_id[PRODUCT_ID_SIZE] = 0;
+
+	fwu->img.lockdown.size = LOCKDOWN_SIZE;
+	fwu->img.lockdown.data = image + IMAGE_AREA_OFFSET - LOCKDOWN_SIZE;
+}
+
+static int fwu_parse_image_info(void)
+{
+	struct image_header_10 *header;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	header = (struct image_header_10 *)fwu->image;
+
+	memset(&fwu->img, 0x00, sizeof(fwu->img));
+
+	switch (header->major_header_version) {
+	case IMAGE_HEADER_VERSION_10:
+		fwu_parse_image_header_10();
+		break;
+	case IMAGE_HEADER_VERSION_05:
+	case IMAGE_HEADER_VERSION_06:
+		fwu_parse_image_header_05_06();
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Unsupported image file format (0x%02x)\n",
+				__func__, header->major_header_version);
+		return -EINVAL;
+	}
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8) {
+		if (!fwu->img.contains_flash_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No flash config found in firmware image\n",
+					__func__);
+			return -EINVAL;
+		}
+
+		fwu_parse_partition_table(fwu->img.fl_config.data,
+				&fwu->img.blkcount, &fwu->img.phyaddr);
+
+		if (fwu->img.blkcount.utility_param)
+			fwu->img.contains_utility_param = true;
+
+		fwu_compare_partition_tables();
+	} else {
+		fwu->new_partition_table = false;
+		fwu->incompatible_partition_tables = false;
+	}
+
+	return 0;
+}
+
+static int fwu_read_flash_status(void)
+{
+	int retval;
+	unsigned char status;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.data_base_addr + fwu->off.flash_status,
+			&status,
+			sizeof(status));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash status\n",
+				__func__);
+		return retval;
+	}
+
+	fwu->in_bl_mode = status >> 7;
+
+	if (fwu->bl_version == BL_V5)
+		fwu->flash_status = (status >> 4) & MASK_3BIT;
+	else if (fwu->bl_version == BL_V6)
+		fwu->flash_status = status & MASK_3BIT;
+	else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		fwu->flash_status = status & MASK_5BIT;
+
+	if (fwu->write_bootloader)
+		fwu->flash_status = 0x00;
+
+	if (fwu->flash_status != 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash status = %d, command = 0x%02x\n",
+				__func__, fwu->flash_status, fwu->command);
+	}
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8) {
+		if (fwu->flash_status == 0x08)
+			fwu->flash_status = 0x00;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.data_base_addr + fwu->off.flash_cmd,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash command\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->bl_version == BL_V5)
+		fwu->command = command & MASK_4BIT;
+	else if (fwu->bl_version == BL_V6)
+		fwu->command = command & MASK_6BIT;
+	else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		fwu->command = command;
+
+	if (fwu->write_bootloader)
+		fwu->command = 0x00;
+
+	return 0;
+}
+
+static int fwu_wait_for_idle(int timeout_ms, bool poll)
+{
+	int count = 0;
+	int timeout_count = ((timeout_ms * 1000) / MAX_SLEEP_TIME_US) + 1;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	do {
+		usleep_range(MIN_SLEEP_TIME_US, MAX_SLEEP_TIME_US);
+
+		count++;
+		if (poll || (count == timeout_count))
+			fwu_read_flash_status();
+
+		if ((fwu->command == CMD_IDLE) && (fwu->flash_status == 0x00))
+			return 0;
+	} while (count < timeout_count);
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Timed out waiting for idle status\n",
+			__func__);
+
+	return -ETIMEDOUT;
+}
+
+static int fwu_write_f34_v7_command_single_transaction(unsigned char cmd)
+{
+	int retval;
+	unsigned char data_base;
+	struct f34_v7_data_1_5 data_1_5;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	memset(data_1_5.data, 0x00, sizeof(data_1_5.data));
+
+	switch (cmd) {
+	case CMD_ERASE_ALL:
+		data_1_5.partition_id = CORE_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE_AP;
+		break;
+	case CMD_ERASE_UI_FIRMWARE:
+		data_1_5.partition_id = CORE_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_BL_CONFIG:
+		data_1_5.partition_id = GLOBAL_PARAMETERS_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_UI_CONFIG:
+		data_1_5.partition_id = CORE_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_DISP_CONFIG:
+		data_1_5.partition_id = DISPLAY_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_FLASH_CONFIG:
+		data_1_5.partition_id = FLASH_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_GUEST_CODE:
+		data_1_5.partition_id = GUEST_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_BOOTLOADER:
+		data_1_5.partition_id = BOOTLOADER_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_UTILITY_PARAMETER:
+		data_1_5.partition_id = UTILITY_PARAMETER_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		data_1_5.partition_id = BOOTLOADER_PARTITION;
+		data_1_5.command = CMD_V7_ENTER_BL;
+		break;
+	};
+
+	data_1_5.payload_0 = fwu->bootloader_id[0];
+	data_1_5.payload_1 = fwu->bootloader_id[1];
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.partition_id,
+			data_1_5.data,
+			sizeof(data_1_5.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write single transaction command\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_v7_command(unsigned char cmd)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	switch (cmd) {
+	case CMD_WRITE_FW:
+	case CMD_WRITE_CONFIG:
+	case CMD_WRITE_LOCKDOWN:
+	case CMD_WRITE_GUEST_CODE:
+	case CMD_WRITE_BOOTLOADER:
+	case CMD_WRITE_UTILITY_PARAM:
+		command = CMD_V7_WRITE;
+		break;
+	case CMD_READ_CONFIG:
+		command = CMD_V7_READ;
+		break;
+	case CMD_ERASE_ALL:
+		command = CMD_V7_ERASE_AP;
+		break;
+	case CMD_ERASE_UI_FIRMWARE:
+	case CMD_ERASE_BL_CONFIG:
+	case CMD_ERASE_UI_CONFIG:
+	case CMD_ERASE_DISP_CONFIG:
+	case CMD_ERASE_FLASH_CONFIG:
+	case CMD_ERASE_GUEST_CODE:
+	case CMD_ERASE_BOOTLOADER:
+	case CMD_ERASE_UTILITY_PARAMETER:
+		command = CMD_V7_ERASE;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		command = CMD_V7_ENTER_BL;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid command 0x%02x\n",
+				__func__, cmd);
+		return -EINVAL;
+	};
+
+	fwu->command = command;
+
+	switch (cmd) {
+	case CMD_ERASE_ALL:
+	case CMD_ERASE_UI_FIRMWARE:
+	case CMD_ERASE_BL_CONFIG:
+	case CMD_ERASE_UI_CONFIG:
+	case CMD_ERASE_DISP_CONFIG:
+	case CMD_ERASE_FLASH_CONFIG:
+	case CMD_ERASE_GUEST_CODE:
+	case CMD_ERASE_BOOTLOADER:
+	case CMD_ERASE_UTILITY_PARAMETER:
+	case CMD_ENABLE_FLASH_PROG:
+		retval = fwu_write_f34_v7_command_single_transaction(cmd);
+		if (retval < 0)
+			return retval;
+		else
+			return 0;
+	default:
+		break;
+	};
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.flash_cmd,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write flash command\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_v5v6_command(unsigned char cmd)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	switch (cmd) {
+	case CMD_IDLE:
+		command = CMD_V5V6_IDLE;
+		break;
+	case CMD_WRITE_FW:
+		command = CMD_V5V6_WRITE_FW;
+		break;
+	case CMD_WRITE_CONFIG:
+		command = CMD_V5V6_WRITE_CONFIG;
+		break;
+	case CMD_WRITE_LOCKDOWN:
+		command = CMD_V5V6_WRITE_LOCKDOWN;
+		break;
+	case CMD_WRITE_GUEST_CODE:
+		command = CMD_V5V6_WRITE_GUEST_CODE;
+		break;
+	case CMD_READ_CONFIG:
+		command = CMD_V5V6_READ_CONFIG;
+		break;
+	case CMD_ERASE_ALL:
+		command = CMD_V5V6_ERASE_ALL;
+		break;
+	case CMD_ERASE_UI_CONFIG:
+		command = CMD_V5V6_ERASE_UI_CONFIG;
+		break;
+	case CMD_ERASE_DISP_CONFIG:
+		command = CMD_V5V6_ERASE_DISP_CONFIG;
+		break;
+	case CMD_ERASE_GUEST_CODE:
+		command = CMD_V5V6_ERASE_GUEST_CODE;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		command = CMD_V5V6_ENABLE_FLASH_PROG;
+		break;
+#ifdef SYNA_TDDI
+	case CMD_ERASE_CHIP:
+		command = CMD_V5V6_ERASE_CHIP;
+		break;
+	case CMD_ERASE_FORCE_CONFIG:
+		command = CMD_V5V6_ERASE_FORCE_CONFIG;
+		break;
+	case CMD_READ_FORCE_CONFIG:
+		command = CMD_V5V6_READ_FORCE_CONFIG;
+		break;
+	case CMD_WRITE_FORCE_CONFIG:
+		command = CMD_V5V6_WRITE_CONFIG;
+		break;
+	case CMD_ERASE_LOCKDOWN_DATA:
+		command = CMD_V5V6_ERASE_LOCKDOWN_DATA;
+		break;
+	case CMD_READ_LOCKDOWN_DATA:
+		command = CMD_V5V6_READ_LOCKDOWN_DATA;
+		break;
+	case CMD_WRITE_LOCKDOWN_DATA:
+		command = CMD_V5V6_WRITE_LOCKDOWN_DATA;
+		break;
+	case CMD_ERASE_LCM_DATA:
+		command = CMD_V5V6_ERASE_LCM_DATA;
+		break;
+	case CMD_ERASE_OEM_DATA:
+		command = CMD_V5V6_ERASE_OEM_DATA;
+		break;
+#endif
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid command 0x%02x\n",
+				__func__, cmd);
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+	case CMD_ERASE_ALL:
+	case CMD_ERASE_UI_CONFIG:
+	case CMD_ERASE_DISP_CONFIG:
+	case CMD_ERASE_GUEST_CODE:
+#ifdef SYNA_TDDI
+	case CMD_ERASE_CHIP:
+	case CMD_ERASE_FORCE_CONFIG:
+	case CMD_ERASE_LOCKDOWN_DATA:
+	case CMD_ERASE_LCM_DATA:
+	case CMD_ERASE_OEM_DATA:
+#endif
+	case CMD_ENABLE_FLASH_PROG:
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				data_base + fwu->off.payload,
+				fwu->bootloader_id,
+				sizeof(fwu->bootloader_id));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write bootloader ID\n",
+					__func__);
+			return retval;
+		}
+		break;
+	default:
+		break;
+	};
+
+	fwu->command = command;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.flash_cmd,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write command 0x%02x\n",
+				__func__, command);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_command(unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_write_f34_v7_command(cmd);
+	else
+		retval = fwu_write_f34_v5v6_command(cmd);
+
+	return retval;
+}
+
+static int fwu_write_f34_v7_partition_id(unsigned char cmd)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char partition;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	switch (cmd) {
+	case CMD_WRITE_FW:
+		partition = CORE_CODE_PARTITION;
+		break;
+	case CMD_WRITE_CONFIG:
+	case CMD_READ_CONFIG:
+		if (fwu->config_area == UI_CONFIG_AREA)
+			partition = CORE_CONFIG_PARTITION;
+		else if (fwu->config_area == DP_CONFIG_AREA)
+			partition = DISPLAY_CONFIG_PARTITION;
+		else if (fwu->config_area == PM_CONFIG_AREA)
+			partition = GUEST_SERIALIZATION_PARTITION;
+		else if (fwu->config_area == BL_CONFIG_AREA)
+			partition = GLOBAL_PARAMETERS_PARTITION;
+		else if (fwu->config_area == FLASH_CONFIG_AREA)
+			partition = FLASH_CONFIG_PARTITION;
+		else if (fwu->config_area == UPP_AREA)
+			partition = UTILITY_PARAMETER_PARTITION;
+		break;
+	case CMD_WRITE_LOCKDOWN:
+		partition = DEVICE_CONFIG_PARTITION;
+		break;
+	case CMD_WRITE_GUEST_CODE:
+		partition = GUEST_CODE_PARTITION;
+		break;
+	case CMD_WRITE_BOOTLOADER:
+		partition = BOOTLOADER_PARTITION;
+		break;
+	case CMD_WRITE_UTILITY_PARAM:
+		partition = UTILITY_PARAMETER_PARTITION;
+		break;
+	case CMD_ERASE_ALL:
+		partition = CORE_CODE_PARTITION;
+		break;
+	case CMD_ERASE_BL_CONFIG:
+		partition = GLOBAL_PARAMETERS_PARTITION;
+		break;
+	case CMD_ERASE_UI_CONFIG:
+		partition = CORE_CONFIG_PARTITION;
+		break;
+	case CMD_ERASE_DISP_CONFIG:
+		partition = DISPLAY_CONFIG_PARTITION;
+		break;
+	case CMD_ERASE_FLASH_CONFIG:
+		partition = FLASH_CONFIG_PARTITION;
+		break;
+	case CMD_ERASE_GUEST_CODE:
+		partition = GUEST_CODE_PARTITION;
+		break;
+	case CMD_ERASE_BOOTLOADER:
+		partition = BOOTLOADER_PARTITION;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		partition = BOOTLOADER_PARTITION;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid command 0x%02x\n",
+				__func__, cmd);
+		return -EINVAL;
+	};
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.partition_id,
+			&partition,
+			sizeof(partition));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write partition ID\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_partition_id(unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_write_f34_v7_partition_id(cmd);
+	else
+		retval = 0;
+
+	return retval;
+}
+
+static int fwu_read_f34_v7_partition_table(unsigned char *partition_table)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char length[2];
+	unsigned short block_number = 0;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+
+	retval = fwu_write_f34_partition_id(CMD_READ_CONFIG);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			(unsigned char *)&block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	length[0] = (unsigned char)(fwu->flash_config_length & MASK_8BIT);
+	length[1] = (unsigned char)(fwu->flash_config_length >> 8);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.transfer_length,
+			length,
+			sizeof(length));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write transfer length\n",
+				__func__);
+		return retval;
+	}
+
+	retval = fwu_write_f34_command(CMD_READ_CONFIG);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write command\n",
+				__func__);
+		return retval;
+	}
+
+	msleep(READ_CONFIG_WAIT_MS);
+
+	retval = fwu_wait_for_idle(WRITE_WAIT_MS, true);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to wait for idle status\n",
+				__func__);
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_base + fwu->off.payload,
+			partition_table,
+			fwu->partition_table_bytes);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read block data\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_read_f34_v7_queries(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char query_base;
+	unsigned char index;
+	unsigned char offset;
+	unsigned char *ptable;
+	struct f34_v7_query_0 query_0;
+	struct f34_v7_query_1_7 query_1_7;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	query_base = fwu->f34_fd.query_base_addr;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			query_base,
+			query_0.data,
+			sizeof(query_0.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read query 0\n",
+				__func__);
+		return retval;
+	}
+
+	offset = query_0.subpacket_1_size + 1;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			query_base + offset,
+			query_1_7.data,
+			sizeof(query_1_7.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read queries 1 to 7\n",
+				__func__);
+		return retval;
+	}
+
+	fwu->bootloader_id[0] = query_1_7.bl_minor_revision;
+	fwu->bootloader_id[1] = query_1_7.bl_major_revision;
+
+	if (fwu->bootloader_id[1] == BL_V8)
+		fwu->bl_version = BL_V8;
+
+	fwu->block_size = query_1_7.block_size_15_8 << 8 |
+			query_1_7.block_size_7_0;
+
+	fwu->flash_config_length = query_1_7.flash_config_length_15_8 << 8 |
+			query_1_7.flash_config_length_7_0;
+
+	fwu->payload_length = query_1_7.payload_length_15_8 << 8 |
+			query_1_7.payload_length_7_0;
+
+	fwu->off.flash_status = V7_FLASH_STATUS_OFFSET;
+	fwu->off.partition_id = V7_PARTITION_ID_OFFSET;
+	fwu->off.block_number = V7_BLOCK_NUMBER_OFFSET;
+	fwu->off.transfer_length = V7_TRANSFER_LENGTH_OFFSET;
+	fwu->off.flash_cmd = V7_COMMAND_OFFSET;
+	fwu->off.payload = V7_PAYLOAD_OFFSET;
+
+	index = sizeof(query_1_7.data) - V7_PARTITION_SUPPORT_BYTES;
+
+	fwu->partitions = 0;
+	for (offset = 0; offset < V7_PARTITION_SUPPORT_BYTES; offset++) {
+		for (ii = 0; ii < 8; ii++) {
+			if (query_1_7.data[index + offset] & (1 << ii))
+				fwu->partitions++;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Supported partitions: 0x%02x\n",
+				__func__, query_1_7.data[index + offset]);
+	}
+
+	fwu->partition_table_bytes = fwu->partitions * 8 + 2;
+
+	ptable = kzalloc(fwu->partition_table_bytes, GFP_KERNEL);
+	if (!ptable) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for partition table\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = fwu_read_f34_v7_partition_table(ptable);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read partition table\n",
+				__func__);
+		kfree(ptable);
+		return retval;
+	}
+
+	fwu_parse_partition_table(ptable, &fwu->blkcount, &fwu->phyaddr);
+
+	if (fwu->blkcount.dp_config)
+		fwu->flash_properties.has_disp_config = 1;
+	else
+		fwu->flash_properties.has_disp_config = 0;
+
+	if (fwu->blkcount.pm_config)
+		fwu->flash_properties.has_pm_config = 1;
+	else
+		fwu->flash_properties.has_pm_config = 0;
+
+	if (fwu->blkcount.bl_config)
+		fwu->flash_properties.has_bl_config = 1;
+	else
+		fwu->flash_properties.has_bl_config = 0;
+
+	if (fwu->blkcount.guest_code)
+		fwu->has_guest_code = 1;
+	else
+		fwu->has_guest_code = 0;
+
+	if (fwu->blkcount.utility_param)
+		fwu->has_utility_param = 1;
+	else
+		fwu->has_utility_param = 0;
+
+	kfree(ptable);
+
+	return 0;
+}
+
+static int fwu_read_f34_v5v6_queries(void)
+{
+	int retval;
+	unsigned char count;
+	unsigned char base;
+	unsigned char offset;
+	unsigned char buf[10];
+	struct f34_v5v6_flash_properties_2 properties_2;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.query_base_addr;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + V5V6_BOOTLOADER_ID_OFFSET,
+			fwu->bootloader_id,
+			sizeof(fwu->bootloader_id));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read bootloader ID\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->bl_version == BL_V5) {
+		fwu->off.properties = V5_PROPERTIES_OFFSET;
+		fwu->off.block_size = V5_BLOCK_SIZE_OFFSET;
+		fwu->off.block_count = V5_BLOCK_COUNT_OFFSET;
+		fwu->off.block_number = V5_BLOCK_NUMBER_OFFSET;
+		fwu->off.payload = V5_BLOCK_DATA_OFFSET;
+	} else if (fwu->bl_version == BL_V6) {
+		fwu->off.properties = V6_PROPERTIES_OFFSET;
+		fwu->off.properties_2 = V6_PROPERTIES_2_OFFSET;
+		fwu->off.block_size = V6_BLOCK_SIZE_OFFSET;
+		fwu->off.block_count = V6_BLOCK_COUNT_OFFSET;
+		fwu->off.gc_block_count = V6_GUEST_CODE_BLOCK_COUNT_OFFSET;
+		fwu->off.block_number = V6_BLOCK_NUMBER_OFFSET;
+		fwu->off.payload = V6_BLOCK_DATA_OFFSET;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + fwu->off.block_size,
+			buf,
+			2);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read block size info\n",
+				__func__);
+		return retval;
+	}
+
+	batohs(&fwu->block_size, &(buf[0]));
+
+	if (fwu->bl_version == BL_V5) {
+		fwu->off.flash_cmd = fwu->off.payload + fwu->block_size;
+		fwu->off.flash_status = fwu->off.flash_cmd;
+	} else if (fwu->bl_version == BL_V6) {
+		fwu->off.flash_cmd = V6_FLASH_COMMAND_OFFSET;
+		fwu->off.flash_status = V6_FLASH_STATUS_OFFSET;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + fwu->off.properties,
+			fwu->flash_properties.data,
+			sizeof(fwu->flash_properties.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash properties\n",
+				__func__);
+		return retval;
+	}
+
+	count = 4;
+
+	if (fwu->flash_properties.has_pm_config)
+		count += 2;
+
+	if (fwu->flash_properties.has_bl_config)
+		count += 2;
+
+	if (fwu->flash_properties.has_disp_config)
+		count += 2;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + fwu->off.block_count,
+			buf,
+			count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read block count info\n",
+				__func__);
+		return retval;
+	}
+
+	batohs(&fwu->blkcount.ui_firmware, &(buf[0]));
+	batohs(&fwu->blkcount.ui_config, &(buf[2]));
+
+	count = 4;
+
+	if (fwu->flash_properties.has_pm_config) {
+		batohs(&fwu->blkcount.pm_config, &(buf[count]));
+		count += 2;
+	}
+
+	if (fwu->flash_properties.has_bl_config) {
+		batohs(&fwu->blkcount.bl_config, &(buf[count]));
+		count += 2;
+	}
+
+	if (fwu->flash_properties.has_disp_config)
+		batohs(&fwu->blkcount.dp_config, &(buf[count]));
+
+	fwu->has_guest_code = false;
+#ifdef SYNA_TDDI
+	fwu->has_force_config = false;
+	fwu->has_lockdown_data = false;
+	fwu->has_lcm_data = false;
+	fwu->has_oem_data = false;
+#endif
+
+	if (fwu->flash_properties.has_query4) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				base + fwu->off.properties_2,
+				properties_2.data,
+				sizeof(properties_2.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read flash properties 2\n",
+					__func__);
+			return retval;
+		}
+		offset = fwu->off.properties_2 + 1;
+		count = 0;
+		if (properties_2.has_guest_code) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read guest code block count\n",
+						__func__);
+				return retval;
+			}
+
+			batohs(&fwu->blkcount.guest_code, &(buf[0]));
+			count++;
+			fwu->has_guest_code = true;
+		}
+#ifdef SYNA_TDDI
+		if (properties_2.has_force_config) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tddi force block count\n",
+					__func__);
+				return retval;
+			}
+			batohs(&fwu->blkcount.tddi_force_config, &(buf[0]));
+			count++;
+			fwu->has_force_config = true;
+		}
+		if (properties_2.has_lockdown_data) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tddi lockdown block count\n",
+					__func__);
+				return retval;
+			}
+			batohs(&fwu->blkcount.tddi_lockdown_data, &(buf[0]));
+			count++;
+			fwu->has_lockdown_data = true;
+		}
+		if (properties_2.has_lcm_data) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tddi lcm block count\n",
+					__func__);
+				return retval;
+			}
+			batohs(&fwu->blkcount.tddi_lcm_data, &(buf[0]));
+			count++;
+			fwu->has_lcm_data = true;
+		}
+		if (properties_2.has_oem_data) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tddi oem block count\n",
+					__func__);
+				return retval;
+			}
+			batohs(&fwu->blkcount.tddi_oem_data, &(buf[0]));
+			fwu->has_oem_data = true;
+		}
+#endif
+	}
+
+	fwu->has_utility_param = false;
+
+	return 0;
+}
+
+static int fwu_read_f34_queries(void)
+{
+	int retval;
+
+	memset(&fwu->blkcount, 0x00, sizeof(fwu->blkcount));
+	memset(&fwu->phyaddr, 0x00, sizeof(fwu->phyaddr));
+
+	if (fwu->bl_version == BL_V7)
+		retval = fwu_read_f34_v7_queries();
+	else
+		retval = fwu_read_f34_v5v6_queries();
+
+	return retval;
+}
+
+static int fwu_write_f34_v7_blocks(unsigned char *block_ptr,
+		unsigned short block_cnt, unsigned char command)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char length[2];
+	unsigned short transfer;
+	unsigned short remaining = block_cnt;
+	unsigned short block_number = 0;
+	unsigned short left_bytes;
+	unsigned short write_size;
+	unsigned short max_write_size;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	retval = fwu_write_f34_partition_id(command);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			(unsigned char *)&block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	do {
+		if (remaining / fwu->payload_length)
+			transfer = fwu->payload_length;
+		else
+			transfer = remaining;
+
+		length[0] = (unsigned char)(transfer & MASK_8BIT);
+		length[1] = (unsigned char)(transfer >> 8);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				data_base + fwu->off.transfer_length,
+				length,
+				sizeof(length));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write transfer length (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write command (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+#ifdef MAX_WRITE_SIZE
+		max_write_size = MAX_WRITE_SIZE;
+		if (max_write_size >= transfer * fwu->block_size)
+			max_write_size = transfer * fwu->block_size;
+		else if (max_write_size > fwu->block_size)
+			max_write_size -= max_write_size % fwu->block_size;
+		else
+			max_write_size = fwu->block_size;
+#else
+		max_write_size = transfer * fwu->block_size;
+#endif
+		left_bytes = transfer * fwu->block_size;
+
+		do {
+			if (left_bytes / max_write_size)
+				write_size = max_write_size;
+			else
+				write_size = left_bytes;
+
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					data_base + fwu->off.payload,
+					block_ptr,
+					write_size);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to write block data (remaining = %d)\n",
+						__func__, remaining);
+				return retval;
+			}
+
+			block_ptr += write_size;
+			left_bytes -= write_size;
+		} while (left_bytes);
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		remaining -= transfer;
+	} while (remaining);
+
+	return 0;
+}
+
+static int fwu_write_f34_v5v6_blocks(unsigned char *block_ptr,
+		unsigned short block_cnt, unsigned char command)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char block_number[] = {0, 0};
+	unsigned short blk;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	block_number[1] |= (fwu->config_area << 5);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	for (blk = 0; blk < block_cnt; blk++) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				data_base + fwu->off.payload,
+				block_ptr,
+				fwu->block_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write block data (block %d)\n",
+					__func__, blk);
+			return retval;
+		}
+
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write command for block %d\n",
+					__func__, blk);
+			return retval;
+		}
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status (block %d)\n",
+					__func__, blk);
+			return retval;
+		}
+
+		block_ptr += fwu->block_size;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_blocks(unsigned char *block_ptr,
+		unsigned short block_cnt, unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_write_f34_v7_blocks(block_ptr, block_cnt, cmd);
+	else
+		retval = fwu_write_f34_v5v6_blocks(block_ptr, block_cnt, cmd);
+
+	return retval;
+}
+
+static int fwu_read_f34_v7_blocks(unsigned short block_cnt,
+		unsigned char command)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char length[2];
+	unsigned short transfer;
+	unsigned short remaining = block_cnt;
+	unsigned short block_number = 0;
+	unsigned short index = 0;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	retval = fwu_write_f34_partition_id(command);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			(unsigned char *)&block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	do {
+		if (remaining / fwu->payload_length)
+			transfer = fwu->payload_length;
+		else
+			transfer = remaining;
+
+		length[0] = (unsigned char)(transfer & MASK_8BIT);
+		length[1] = (unsigned char)(transfer >> 8);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				data_base + fwu->off.transfer_length,
+				length,
+				sizeof(length));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write transfer length (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write command (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_base + fwu->off.payload,
+				&fwu->read_config_buf[index],
+				transfer * fwu->block_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read block data (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		index += (transfer * fwu->block_size);
+		remaining -= transfer;
+	} while (remaining);
+
+	return 0;
+}
+
+static int fwu_read_f34_v5v6_blocks(unsigned short block_cnt,
+		unsigned char command)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char block_number[] = {0, 0};
+	unsigned short blk;
+	unsigned short index = 0;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	block_number[1] |= (fwu->config_area << 5);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	for (blk = 0; blk < block_cnt; blk++) {
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write read config command\n",
+					__func__);
+			return retval;
+		}
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status\n",
+					__func__);
+			return retval;
+		}
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_base + fwu->off.payload,
+				&fwu->read_config_buf[index],
+				fwu->block_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read block data (block %d)\n",
+					__func__, blk);
+			return retval;
+		}
+
+		index += fwu->block_size;
+	}
+
+	return 0;
+}
+
+static int fwu_read_f34_blocks(unsigned short block_cnt, unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_read_f34_v7_blocks(block_cnt, cmd);
+	else
+		retval = fwu_read_f34_v5v6_blocks(block_cnt, cmd);
+
+	return retval;
+}
+
+static int fwu_get_image_firmware_id(unsigned int *fw_id)
+{
+	int retval;
+	unsigned char index = 0;
+	char *strptr;
+	char *firmware_id;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->img.contains_firmware_id) {
+		*fw_id = fwu->img.firmware_id;
+	} else {
+		strptr = strnstr(fwu->image_name, "PR", MAX_IMAGE_NAME_LEN);
+		if (!strptr) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No valid PR number (PRxxxxxxx) found in image file name (%s)\n",
+					__func__, fwu->image_name);
+			return -EINVAL;
+		}
+
+		strptr += 2;
+		firmware_id = kzalloc(MAX_FIRMWARE_ID_LEN, GFP_KERNEL);
+		if (!firmware_id) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for firmware_id\n",
+					__func__);
+			return -ENOMEM;
+		}
+		while (strptr[index] >= '0' && strptr[index] <= '9') {
+			firmware_id[index] = strptr[index];
+			index++;
+			if (index == MAX_FIRMWARE_ID_LEN - 1)
+				break;
+		}
+
+		retval = sstrtoul(firmware_id, 10, (unsigned long *)fw_id);
+		kfree(firmware_id);
+		if (retval) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to obtain image firmware ID\n",
+					__func__);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int fwu_get_device_config_id(void)
+{
+	int retval;
+	unsigned char config_id_size;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		config_id_size = V7_CONFIG_ID_SIZE;
+	else
+		config_id_size = V5V6_CONFIG_ID_SIZE;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+				fwu->f34_fd.ctrl_base_addr,
+				fwu->config_id,
+				config_id_size);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static enum flash_area fwu_go_nogo(void)
+{
+	int retval;
+	enum flash_area flash_area = NONE;
+	unsigned char ii;
+	unsigned char config_id_size;
+	unsigned int device_fw_id;
+	unsigned int image_fw_id;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->force_update) {
+		flash_area = UI_FIRMWARE;
+		goto exit;
+	}
+
+	/* Update both UI and config if device is in bootloader mode */
+	if (fwu->bl_mode_device) {
+		flash_area = UI_FIRMWARE;
+		goto exit;
+	}
+
+	/* Get device firmware ID */
+	device_fw_id = rmi4_data->firmware_id;
+	dev_info(rmi4_data->pdev->dev.parent,
+			"%s: Device firmware ID = %d\n",
+			__func__, device_fw_id);
+
+	/* Get image firmware ID */
+	retval = fwu_get_image_firmware_id(&image_fw_id);
+	if (retval < 0) {
+		flash_area = NONE;
+		goto exit;
+	}
+	dev_info(rmi4_data->pdev->dev.parent,
+			"%s: Image firmware ID = %d\n",
+			__func__, image_fw_id);
+
+	if (image_fw_id > device_fw_id) {
+		flash_area = UI_FIRMWARE;
+		goto exit;
+	} else if (image_fw_id < device_fw_id) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Image firmware ID older than device firmware ID\n",
+				__func__);
+		flash_area = NONE;
+		goto exit;
+	}
+
+	/* Get device config ID */
+	retval = fwu_get_device_config_id();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read device config ID\n",
+				__func__);
+		flash_area = NONE;
+		goto exit;
+	}
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		config_id_size = V7_CONFIG_ID_SIZE;
+	else
+		config_id_size = V5V6_CONFIG_ID_SIZE;
+
+	for (ii = 0; ii < config_id_size; ii++) {
+		if (fwu->img.ui_config.data[ii] > fwu->config_id[ii]) {
+			flash_area = UI_CONFIG;
+			goto exit;
+		} else if (fwu->img.ui_config.data[ii] < fwu->config_id[ii]) {
+			flash_area = NONE;
+			goto exit;
+		}
+	}
+
+	flash_area = NONE;
+
+exit:
+	if (flash_area == NONE) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: No need to do reflash\n",
+				__func__);
+	} else {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Updating %s\n",
+				__func__,
+				flash_area == UI_FIRMWARE ?
+				"UI firmware and config" :
+				"UI config only");
+	}
+
+	return flash_area;
+}
+
+static int fwu_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	bool f01found = false;
+	bool f34found = false;
+	bool f35found = false;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	fwu->in_ub_mode = false;
+
+	for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				addr,
+				(unsigned char *)&rmi_fd,
+				sizeof(rmi_fd));
+		if (retval < 0)
+			return retval;
+
+		if (rmi_fd.fn_number) {
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Found F%02x\n",
+					__func__, rmi_fd.fn_number);
+			switch (rmi_fd.fn_number) {
+			case SYNAPTICS_RMI4_F01:
+				f01found = true;
+
+				rmi4_data->f01_query_base_addr =
+						rmi_fd.query_base_addr;
+				rmi4_data->f01_ctrl_base_addr =
+						rmi_fd.ctrl_base_addr;
+				rmi4_data->f01_data_base_addr =
+						rmi_fd.data_base_addr;
+				rmi4_data->f01_cmd_base_addr =
+						rmi_fd.cmd_base_addr;
+				break;
+			case SYNAPTICS_RMI4_F34:
+				f34found = true;
+				fwu->f34_fd.query_base_addr =
+						rmi_fd.query_base_addr;
+				fwu->f34_fd.ctrl_base_addr =
+						rmi_fd.ctrl_base_addr;
+				fwu->f34_fd.data_base_addr =
+						rmi_fd.data_base_addr;
+
+				switch (rmi_fd.fn_version) {
+				case F34_V0:
+					fwu->bl_version = BL_V5;
+					break;
+				case F34_V1:
+					fwu->bl_version = BL_V6;
+					break;
+				case F34_V2:
+					fwu->bl_version = BL_V7;
+					break;
+				default:
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Unrecognized F34 version\n",
+							__func__);
+					return -EINVAL;
+				}
+
+				fwu->intr_mask = 0;
+				intr_src = rmi_fd.intr_src_count;
+				intr_off = intr_count % 8;
+				for (ii = intr_off;
+						ii < (intr_src + intr_off);
+						ii++) {
+					fwu->intr_mask |= 1 << ii;
+				}
+				break;
+			case SYNAPTICS_RMI4_F35:
+				f35found = true;
+				fwu->f35_fd.query_base_addr =
+						rmi_fd.query_base_addr;
+				fwu->f35_fd.ctrl_base_addr =
+						rmi_fd.ctrl_base_addr;
+				fwu->f35_fd.data_base_addr =
+						rmi_fd.data_base_addr;
+				fwu->f35_fd.cmd_base_addr =
+						rmi_fd.cmd_base_addr;
+				break;
+			}
+		} else {
+			break;
+		}
+
+		intr_count += rmi_fd.intr_src_count;
+	}
+
+	if (!f01found || !f34found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find both F01 and F34\n",
+				__func__);
+		if (!f35found) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to find F35\n",
+					__func__);
+			return -EINVAL;
+		} else {
+			fwu->in_ub_mode = true;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: In microbootloader mode\n",
+					__func__);
+			fwu_recovery_check_status();
+			return 0;
+		}
+	}
+
+	rmi4_data->intr_mask[0] |= fwu->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&(rmi4_data->intr_mask[0]),
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_enter_flash_prog(void)
+{
+	int retval;
+	struct f01_device_control f01_device_control;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_read_flash_status();
+	if (retval < 0)
+		return retval;
+
+	if (fwu->in_bl_mode)
+		return 0;
+
+	retval = rmi4_data->irq_enable(rmi4_data, false, true);
+	if (retval < 0)
+		return retval;
+
+	msleep(INT_DISABLE_WAIT_MS);
+
+	retval = fwu_write_f34_command(CMD_ENABLE_FLASH_PROG);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_wait_for_idle(ENABLE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	if (!fwu->in_bl_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: BL mode not entered\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (rmi4_data->hw_if->bl_hw_init) {
+		retval = rmi4_data->hw_if->bl_hw_init(rmi4_data);
+		if (retval < 0)
+			return retval;
+	}
+
+	retval = fwu_scan_pdt();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_read_f34_queries();
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			f01_device_control.data,
+			sizeof(f01_device_control.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F01 device control\n",
+				__func__);
+		return retval;
+	}
+
+	f01_device_control.nosleep = true;
+	f01_device_control.sleep_mode = SLEEP_MODE_NORMAL;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			f01_device_control.data,
+			sizeof(f01_device_control.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write F01 device control\n",
+				__func__);
+		return retval;
+	}
+
+	msleep(ENTER_FLASH_PROG_WAIT_MS);
+
+	return retval;
+}
+
+static int fwu_check_ui_firmware_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.ui_firmware.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.ui_firmware) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: UI firmware size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_ui_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.ui_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.ui_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: UI configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_dp_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.dp_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.dp_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Display configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int fwu_check_pm_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.pm_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.pm_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Permanent configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+#endif
+
+static int fwu_check_bl_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.bl_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.bl_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Bootloader configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_guest_code_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.guest_code.size / fwu->block_size;
+	if (block_count != fwu->blkcount.guest_code) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Guest code size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_erase_configuration(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_UI_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case DP_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_DISP_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case BL_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_BL_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case FLASH_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_FLASH_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case UPP_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_UTILITY_PARAMETER);
+		if (retval < 0)
+			return retval;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid config area\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return retval;
+}
+
+static int fwu_erase_bootloader(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_write_f34_command(CMD_ERASE_BOOTLOADER);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return 0;
+}
+
+#ifdef SYNA_TDDI
+static int fwu_erase_lockdown_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_write_f34_command(CMD_ERASE_LOCKDOWN_DATA);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	msleep(100);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return 0;
+}
+
+#endif
+
+static int fwu_erase_guest_code(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_write_f34_command(CMD_ERASE_GUEST_CODE);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return 0;
+}
+
+static int fwu_erase_all(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->bl_version == BL_V7) {
+		retval = fwu_write_f34_command(CMD_ERASE_UI_FIRMWARE);
+		if (retval < 0)
+			return retval;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Erase command written\n",
+				__func__);
+
+		retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+		if (retval < 0)
+			return retval;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Idle status detected\n",
+				__func__);
+
+		fwu->config_area = UI_CONFIG_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			return retval;
+	} else {
+		retval = fwu_write_f34_command(CMD_ERASE_ALL);
+		if (retval < 0)
+			return retval;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Erase all command written\n",
+				__func__);
+
+		retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+		if (!(fwu->bl_version == BL_V8 &&
+				fwu->flash_status == BAD_PARTITION_TABLE)) {
+			if (retval < 0)
+				return retval;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Idle status detected\n",
+				__func__);
+
+		if (fwu->bl_version == BL_V8)
+			return 0;
+	}
+
+	if (fwu->flash_properties.has_disp_config) {
+		fwu->config_area = DP_CONFIG_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			return retval;
+	}
+
+	if (fwu->has_guest_code) {
+		retval = fwu_erase_guest_code();
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_firmware(void)
+{
+	unsigned short firmware_block_count;
+
+	firmware_block_count = fwu->img.ui_firmware.size / fwu->block_size;
+
+	return fwu_write_f34_blocks((unsigned char *)fwu->img.ui_firmware.data,
+			firmware_block_count, CMD_WRITE_FW);
+}
+
+static int fwu_write_bootloader(void)
+{
+	int retval;
+	unsigned short bootloader_block_count;
+
+	bootloader_block_count = fwu->img.bl_image.size / fwu->block_size;
+
+	fwu->write_bootloader = true;
+	retval = fwu_write_f34_blocks((unsigned char *)fwu->img.bl_image.data,
+			bootloader_block_count, CMD_WRITE_BOOTLOADER);
+	fwu->write_bootloader = false;
+
+	return retval;
+}
+
+static int fwu_write_utility_parameter(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char checksum_array[4];
+	unsigned char *pbuf;
+	unsigned short remaining_size;
+	unsigned short utility_param_size;
+	unsigned long checksum;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	utility_param_size = fwu->blkcount.utility_param * fwu->block_size;
+	retval = fwu_allocate_read_config_buf(utility_param_size);
+	if (retval < 0)
+		return retval;
+	memset(fwu->read_config_buf, 0x00, utility_param_size);
+
+	pbuf = fwu->read_config_buf;
+	remaining_size = utility_param_size - 4;
+
+	for (ii = 0; ii < MAX_UTILITY_PARAMS; ii++) {
+		if (fwu->img.utility_param_id[ii] == UNUSED)
+			continue;
+
+#ifdef F51_DISCRETE_FORCE
+		if (fwu->img.utility_param_id[ii] == FORCE_PARAMETER) {
+			if (fwu->bl_mode_device) {
+				dev_info(rmi4_data->pdev->dev.parent,
+						"%s: Device in bootloader mode, skipping calibration data restoration\n",
+						__func__);
+				goto image_param;
+			}
+			retval = secure_memcpy(&(pbuf[4]),
+					remaining_size - 4,
+					fwu->cal_data,
+					fwu->cal_data_buf_size,
+					fwu->cal_data_size);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to copy force calibration data\n",
+						__func__);
+				return retval;
+			}
+			pbuf[0] = FORCE_PARAMETER;
+			pbuf[1] = 0x00;
+			pbuf[2] = (4 + fwu->cal_data_size) / 2;
+			pbuf += (fwu->cal_data_size + 4);
+			remaining_size -= (fwu->cal_data_size + 4);
+			continue;
+		}
+image_param:
+#endif
+
+		retval = secure_memcpy(pbuf,
+				remaining_size,
+				fwu->img.utility_param[ii].data,
+				fwu->img.utility_param[ii].size,
+				fwu->img.utility_param[ii].size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy utility parameter data\n",
+					__func__);
+			return retval;
+		}
+		pbuf += fwu->img.utility_param[ii].size;
+		remaining_size -= fwu->img.utility_param[ii].size;
+	}
+
+	calculate_checksum((unsigned short *)fwu->read_config_buf,
+			((utility_param_size - 4) / 2),
+			&checksum);
+
+	convert_to_little_endian(checksum_array, checksum);
+
+	fwu->read_config_buf[utility_param_size - 4] = checksum_array[0];
+	fwu->read_config_buf[utility_param_size - 3] = checksum_array[1];
+	fwu->read_config_buf[utility_param_size - 2] = checksum_array[2];
+	fwu->read_config_buf[utility_param_size - 1] = checksum_array[3];
+
+	retval = fwu_write_f34_blocks((unsigned char *)fwu->read_config_buf,
+			fwu->blkcount.utility_param, CMD_WRITE_UTILITY_PARAM);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_write_configuration(void)
+{
+	return fwu_write_f34_blocks((unsigned char *)fwu->config_data,
+			fwu->config_block_count, CMD_WRITE_CONFIG);
+}
+
+static int fwu_write_ui_configuration(void)
+{
+	fwu->config_area = UI_CONFIG_AREA;
+	fwu->config_data = fwu->img.ui_config.data;
+	fwu->config_size = fwu->img.ui_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	return fwu_write_configuration();
+}
+
+static int fwu_write_dp_configuration(void)
+{
+	fwu->config_area = DP_CONFIG_AREA;
+	fwu->config_data = fwu->img.dp_config.data;
+	fwu->config_size = fwu->img.dp_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	return fwu_write_configuration();
+}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int fwu_write_pm_configuration(void)
+{
+	fwu->config_area = PM_CONFIG_AREA;
+	fwu->config_data = fwu->img.pm_config.data;
+	fwu->config_size = fwu->img.pm_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	return fwu_write_configuration();
+}
+
+#ifdef SYNA_TDDI
+static int fwu_write_tddi_lockdown_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_write_f34_blocks(fwu->read_config_buf,
+			fwu->blkcount.tddi_lockdown_data,
+			CMD_WRITE_LOCKDOWN_DATA);
+	if (retval < 0)
+		return retval;
+	rmi4_data->reset_device(rmi4_data, false);
+	return 0;
+}
+#endif
+#endif
+
+static int fwu_write_flash_configuration(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	fwu->config_data = fwu->img.fl_config.data;
+	fwu->config_size = fwu->img.fl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	if (fwu->config_block_count != fwu->blkcount.fl_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	return 0;
+}
+
+static int fwu_write_guest_code(void)
+{
+	int retval;
+	unsigned short guest_code_block_count;
+
+	guest_code_block_count = fwu->img.guest_code.size / fwu->block_size;
+
+	retval = fwu_write_f34_blocks((unsigned char *)fwu->img.guest_code.data,
+			guest_code_block_count, CMD_WRITE_GUEST_CODE);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_write_lockdown(void)
+{
+	unsigned short lockdown_block_count;
+
+	lockdown_block_count = fwu->img.lockdown.size / fwu->block_size;
+
+	return fwu_write_f34_blocks((unsigned char *)fwu->img.lockdown.data,
+			lockdown_block_count, CMD_WRITE_LOCKDOWN);
+}
+
+static int fwu_write_partition_table_v8(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	fwu->config_data = fwu->img.fl_config.data;
+	fwu->config_size = fwu->img.fl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	if (fwu->config_block_count != fwu->blkcount.fl_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	return 0;
+}
+
+static int fwu_write_partition_table_v7(void)
+{
+	int retval;
+	unsigned short block_count;
+
+	block_count = fwu->blkcount.bl_config;
+	fwu->config_area = BL_CONFIG_AREA;
+	fwu->config_size = fwu->block_size * block_count;
+
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_write_flash_configuration();
+	if (retval < 0)
+		return retval;
+
+	fwu->config_area = BL_CONFIG_AREA;
+	fwu->config_data = fwu->read_config_buf;
+	fwu->config_size = fwu->img.bl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_write_bl_area_v7(void)
+{
+	int retval;
+	bool has_utility_param;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	has_utility_param = fwu->has_utility_param;
+
+	if (fwu->has_utility_param) {
+		fwu->config_area = UPP_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			return retval;
+	}
+
+	fwu->config_area = BL_CONFIG_AREA;
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_erase_bootloader();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_write_bootloader();
+	if (retval < 0)
+		return retval;
+
+	msleep(rmi4_data->hw_if->board_data->reset_delay_ms);
+	rmi4_data->reset_device(rmi4_data, false);
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	fwu->config_data = fwu->img.fl_config.data;
+	fwu->config_size = fwu->img.fl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+	rmi4_data->reset_device(rmi4_data, false);
+
+	fwu->config_area = BL_CONFIG_AREA;
+	fwu->config_data = fwu->img.bl_config.data;
+	fwu->config_size = fwu->img.bl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	if (fwu->img.contains_utility_param) {
+		retval = fwu_write_utility_parameter();
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_do_reflash(void)
+{
+	int retval;
+	bool do_bl_update = false;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!fwu->new_partition_table) {
+		retval = fwu_check_ui_firmware_size();
+		if (retval < 0)
+			return retval;
+
+		retval = fwu_check_ui_configuration_size();
+		if (retval < 0)
+			return retval;
+
+		if (fwu->flash_properties.has_disp_config &&
+				fwu->img.contains_disp_config) {
+			retval = fwu_check_dp_configuration_size();
+			if (retval < 0)
+				return retval;
+		}
+
+		if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+			retval = fwu_check_guest_code_size();
+			if (retval < 0)
+				return retval;
+		}
+	} else if (fwu->bl_version == BL_V7) {
+		retval = fwu_check_bl_configuration_size();
+		if (retval < 0)
+			return retval;
+	}
+
+	if (!fwu->has_utility_param && fwu->img.contains_utility_param) {
+		if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+			do_bl_update = true;
+	}
+
+	if (fwu->has_utility_param && !fwu->img.contains_utility_param) {
+		if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+			do_bl_update = true;
+	}
+
+	if (!do_bl_update && fwu->incompatible_partition_tables) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Incompatible partition tables\n",
+				__func__);
+		return -EINVAL;
+	} else if (!do_bl_update && fwu->new_partition_table) {
+		if (!fwu->force_update) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Partition table mismatch\n",
+					__func__);
+			return -EINVAL;
+		}
+	}
+
+	retval = fwu_erase_all();
+	if (retval < 0)
+		return retval;
+
+	if (do_bl_update) {
+		retval = fwu_write_bl_area_v7();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Bootloader area programmed\n", __func__);
+	} else if (fwu->bl_version == BL_V7 && fwu->new_partition_table) {
+		retval = fwu_write_partition_table_v7();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Partition table programmed\n", __func__);
+	} else if (fwu->bl_version == BL_V8) {
+		retval = fwu_write_partition_table_v8();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Partition table programmed\n", __func__);
+	}
+
+	fwu->config_area = UI_CONFIG_AREA;
+	if (fwu->flash_properties.has_disp_config &&
+			fwu->img.contains_disp_config) {
+		retval = fwu_write_dp_configuration();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Display configuration programmed\n", __func__);
+	}
+
+	retval = fwu_write_ui_configuration();
+	if (retval < 0)
+		return retval;
+	pr_notice("%s: Configuration programmed\n", __func__);
+
+	if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+		retval = fwu_write_guest_code();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Guest code programmed\n", __func__);
+	}
+
+	retval = fwu_write_firmware();
+	if (retval < 0)
+		return retval;
+	pr_notice("%s: Firmware programmed\n", __func__);
+
+	return retval;
+}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int fwu_do_read_config(void)
+{
+	int retval;
+	unsigned short block_count;
+	unsigned short config_area;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		block_count = fwu->blkcount.ui_config;
+		break;
+	case DP_CONFIG_AREA:
+		if (!fwu->flash_properties.has_disp_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Display configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.dp_config;
+		break;
+	case PM_CONFIG_AREA:
+		if (!fwu->flash_properties.has_pm_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Permanent configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.pm_config;
+		break;
+	case BL_CONFIG_AREA:
+		if (!fwu->flash_properties.has_bl_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Bootloader configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.bl_config;
+		break;
+	case UPP_AREA:
+		if (!fwu->has_utility_param) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Utility parameter not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.utility_param;
+		break;
+#ifdef SYNA_TDDI
+	case TDDI_FORCE_CONFIG_AREA:
+		if (!fwu->has_force_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: force configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.tddi_force_config;
+		break;
+	case TDDI_OEM_DATA_AREA:
+		if (!fwu->has_oem_data) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: oem data not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.tddi_oem_data;
+		break;
+	case TDDI_LCM_DATA_AREA:
+		if (!fwu->has_lcm_data) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: lcm data not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.tddi_lcm_data;
+		break;
+#endif
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid config area\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (block_count == 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid block count\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	if (fwu->bl_version == BL_V5 || fwu->bl_version == BL_V6) {
+		config_area = fwu->config_area;
+		retval = fwu_enter_flash_prog();
+		fwu->config_area = config_area;
+		if (retval < 0)
+			goto exit;
+	}
+
+	fwu->config_size = fwu->block_size * block_count;
+
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+
+exit:
+	if (fwu->bl_version == BL_V5 || fwu->bl_version == BL_V6)
+		rmi4_data->reset_device(rmi4_data, false);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	return retval;
+}
+
+#ifdef SYNA_TDDI
+static int fwu_do_read_tddi_lockdown_data(void)
+{
+	int retval = -EINVAL;
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->blkcount.tddi_lockdown_data;
+	fwu->config_size = fwu->block_size * block_count;
+
+	if (fwu->bl_version != BL_V6) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not support lockdown data in bl v.%d\n",
+				__func__,
+				fwu->bl_version);
+		goto exit;
+	} else if (!fwu->has_lockdown_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not support lockdown data\n", __func__);
+		goto exit;
+	}
+
+	kfree(fwu->read_config_buf);
+
+	fwu->read_config_buf = kzalloc(fwu->config_size, GFP_KERNEL);
+
+	if (!fwu->read_config_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fwu->read_config_buf\n",
+				__func__);
+		fwu->read_config_buf_size = 0;
+		retval = -ENOMEM;
+		goto exit;
+	}
+	fwu->read_config_buf_size = fwu->config_size;
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_LOCKDOWN_DATA);
+exit:
+	return retval;
+}
+
+int get_tddi_lockdown_data(unsigned char *lockdown_data, unsigned short leng)
+{
+	int retval;
+
+	retval = fwu_do_read_tddi_lockdown_data();
+	if (retval < 0)
+		return retval;
+	memcpy(lockdown_data, fwu->read_config_buf, leng);
+	return retval;
+}
+
+int set_tddi_lockdown_data(unsigned char *lockdown_data, unsigned short leng)
+{
+	int retval = -EINVAL;
+	unsigned long checksum;
+	unsigned char checksum_array[4];
+	unsigned short blk_cnt;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->bl_version != BL_V6) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not support lockdown data in bl v.%d\n",
+				__func__,
+				fwu->bl_version);
+		goto exit;
+	} else if (!fwu->has_lockdown_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not support lockdown data\n", __func__);
+		goto exit;
+	}
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_erase_lockdown_data();
+	if (retval < 0)
+		goto exit;
+
+	blk_cnt = fwu->blkcount.tddi_lockdown_data;
+
+	fwu->config_size = fwu->blkcount.tddi_lockdown_data * fwu->block_size;
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		goto exit;
+	memset(fwu->read_config_buf, 0x00, fwu->config_size);
+	retval = secure_memcpy(fwu->read_config_buf, fwu->config_size,
+			lockdown_data, leng, leng);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy tddi lockdwon data\n",
+				__func__);
+		goto exit;
+	}
+
+	calculate_checksum((unsigned short *)fwu->read_config_buf,
+			((fwu->config_size - 4) / 2),
+			&checksum);
+
+	convert_to_little_endian(checksum_array, checksum);
+
+	fwu->read_config_buf[blk_cnt * fwu->block_size - 4] = checksum_array[0];
+	fwu->read_config_buf[blk_cnt * fwu->block_size - 3] = checksum_array[1];
+	fwu->read_config_buf[blk_cnt * fwu->block_size - 2] = checksum_array[2];
+	fwu->read_config_buf[blk_cnt * fwu->block_size - 1] = checksum_array[3];
+	retval = fwu_write_tddi_lockdown_data();
+exit:
+	return retval;
+}
+#endif
+#endif
+
+static int fwu_do_lockdown_v7(void)
+{
+	int retval;
+	struct f34_v7_data0 status;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.data_base_addr + fwu->off.flash_status,
+			status.data,
+			sizeof(status.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash status\n",
+				__func__);
+		return retval;
+	}
+
+	if (status.device_cfg_status == 2) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Device already locked down\n",
+				__func__);
+		return 0;
+	}
+
+	retval = fwu_write_lockdown();
+	if (retval < 0)
+		return retval;
+
+	pr_notice("%s: Lockdown programmed\n", __func__);
+
+	return retval;
+}
+
+static int fwu_do_lockdown_v5v6(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+#ifdef SYNA_TDDI
+	unsigned char *img_ld;
+
+	img_ld = (unsigned char *)fwu->img.lockdown.data;
+	if (fwu->has_lockdown_data) {
+		retval = set_tddi_lockdown_data(img_ld,
+				LOCKDOWN_SIZE);
+		if (retval < 0)
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write lockdown data\n",
+					__func__);
+		return retval;
+	}
+#endif
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.query_base_addr + fwu->off.properties,
+			fwu->flash_properties.data,
+			sizeof(fwu->flash_properties.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash properties\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->flash_properties.unlocked == 0) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Device already locked down\n",
+				__func__);
+		return 0;
+	}
+
+	retval = fwu_write_lockdown();
+	if (retval < 0)
+		return retval;
+
+	pr_notice("%s: Lockdown programmed\n", __func__);
+
+	return retval;
+}
+
+#ifdef F51_DISCRETE_FORCE
+static int fwu_do_restore_f51_cal_data(void)
+{
+	int retval;
+	unsigned char checksum_array[4];
+	unsigned short block_count;
+	unsigned long checksum;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->blkcount.ui_config;
+	fwu->config_size = fwu->block_size * block_count;
+	fwu->config_area = UI_CONFIG_AREA;
+
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+	if (retval < 0)
+		return retval;
+
+	retval = secure_memcpy(&fwu->read_config_buf[fwu->cal_data_off],
+			fwu->cal_data_size, fwu->cal_data,
+			fwu->cal_data_buf_size, fwu->cal_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to restore calibration data\n",
+				__func__);
+		return retval;
+	}
+
+	calculate_checksum((unsigned short *)fwu->read_config_buf,
+			((fwu->config_size - 4) / 2),
+			&checksum);
+
+	convert_to_little_endian(checksum_array, checksum);
+
+	fwu->read_config_buf[fwu->config_size - 4] = checksum_array[0];
+	fwu->read_config_buf[fwu->config_size - 3] = checksum_array[1];
+	fwu->read_config_buf[fwu->config_size - 2] = checksum_array[2];
+	fwu->read_config_buf[fwu->config_size - 1] = checksum_array[3];
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		return retval;
+
+	fwu->config_area = UI_CONFIG_AREA;
+	fwu->config_data = fwu->read_config_buf;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int fwu_start_write_guest_code(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_parse_image_info();
+	if (retval < 0)
+		return -EINVAL;
+
+	if (!fwu->has_guest_code) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Guest code not supported\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (!fwu->img.contains_guest_code) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: No guest code in firmware image\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of write guest code process\n", __func__);
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_check_guest_code_size();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_erase_guest_code();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_write_guest_code();
+	if (retval < 0)
+		goto exit;
+
+	pr_notice("%s: Guest code programmed\n", __func__);
+
+exit:
+	rmi4_data->reset_device(rmi4_data, false);
+
+	pr_notice("%s: End of write guest code process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+static int fwu_start_write_config(void)
+{
+	int retval;
+	unsigned short config_area;
+	unsigned int device_fw_id;
+	unsigned int image_fw_id;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_parse_image_info();
+	if (retval < 0)
+		return -EINVAL;
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		device_fw_id = rmi4_data->firmware_id;
+		retval = fwu_get_image_firmware_id(&image_fw_id);
+		if (retval < 0)
+			return retval;
+		if (device_fw_id != image_fw_id) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Device and image firmware IDs don't match\n",
+					__func__);
+			return -EINVAL;
+		}
+		retval = fwu_check_ui_configuration_size();
+		if (retval < 0)
+			return retval;
+		break;
+	case DP_CONFIG_AREA:
+		if (!fwu->flash_properties.has_disp_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Display configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		if (!fwu->img.contains_disp_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No display configuration in firmware image\n",
+					__func__);
+			return -EINVAL;
+		}
+		retval = fwu_check_dp_configuration_size();
+		if (retval < 0)
+			return retval;
+		break;
+	case PM_CONFIG_AREA:
+		if (!fwu->flash_properties.has_pm_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Permanent configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		if (!fwu->img.contains_perm_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No permanent configuration in firmware image\n",
+					__func__);
+			return -EINVAL;
+		}
+		retval = fwu_check_pm_configuration_size();
+		if (retval < 0)
+			return retval;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Configuration not supported\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of write config process\n", __func__);
+
+	config_area = fwu->config_area;
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		goto exit;
+
+	fwu->config_area = config_area;
+
+	if (fwu->config_area != PM_CONFIG_AREA) {
+		retval = fwu_erase_configuration();
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to erase config\n",
+					__func__);
+			goto exit;
+		}
+	}
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		retval = fwu_write_ui_configuration();
+		if (retval < 0)
+			goto exit;
+		break;
+	case DP_CONFIG_AREA:
+		retval = fwu_write_dp_configuration();
+		if (retval < 0)
+			goto exit;
+		break;
+	case PM_CONFIG_AREA:
+		retval = fwu_write_pm_configuration();
+		if (retval < 0)
+			goto exit;
+		break;
+	}
+
+	pr_notice("%s: Config written\n", __func__);
+
+exit:
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		rmi4_data->reset_device(rmi4_data, true);
+		break;
+	case DP_CONFIG_AREA:
+	case PM_CONFIG_AREA:
+		rmi4_data->reset_device(rmi4_data, false);
+		break;
+	}
+
+	pr_notice("%s: End of write config process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+#endif
+
+static int fwu_start_reflash(void)
+{
+	int retval = 0;
+	enum flash_area flash_area;
+	bool do_rebuild = false;
+	const struct firmware *fw_entry = NULL;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of reflash process\n", __func__);
+
+	if (fwu->image == NULL) {
+		retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+				FW_IMAGE_NAME, sizeof(FW_IMAGE_NAME),
+				sizeof(FW_IMAGE_NAME));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy image file name\n",
+					__func__);
+			goto exit;
+		}
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Requesting firmware image %s\n",
+				__func__, fwu->image_name);
+
+		retval = request_firmware(&fw_entry, fwu->image_name,
+				rmi4_data->pdev->dev.parent);
+		if (retval != 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Firmware image %s not available\n",
+					__func__, fwu->image_name);
+			retval = -EINVAL;
+			goto exit;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Firmware image size = %d\n",
+				__func__, (unsigned int)fw_entry->size);
+
+		fwu->image = fw_entry->data;
+	}
+
+	retval = fwu_parse_image_info();
+	if (retval < 0)
+		goto exit;
+
+	if (fwu->blkcount.total_count != fwu->img.blkcount.total_count) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash size mismatch\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->bl_version != fwu->img.bl_version) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Bootloader version mismatch\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = fwu_read_flash_status();
+	if (retval < 0)
+		goto exit;
+
+	if (fwu->in_bl_mode) {
+		fwu->bl_mode_device = true;
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Device in bootloader mode\n",
+				__func__);
+	} else {
+		fwu->bl_mode_device = false;
+	}
+
+	flash_area = fwu_go_nogo();
+
+	if (flash_area != NONE) {
+		retval = fwu_enter_flash_prog();
+		if (retval < 0) {
+			rmi4_data->reset_device(rmi4_data, false);
+			goto exit;
+		}
+	}
+
+#ifdef F51_DISCRETE_FORCE
+	if (flash_area != NONE && !fwu->bl_mode_device) {
+		fwu->config_size = fwu->block_size * fwu->blkcount.ui_config;
+		fwu->config_area = UI_CONFIG_AREA;
+
+		retval = fwu_allocate_read_config_buf(fwu->config_size);
+		if (retval < 0) {
+			rmi4_data->reset_device(rmi4_data, false);
+			goto exit;
+		}
+
+		retval = fwu_read_f34_blocks(fwu->blkcount.ui_config,
+				CMD_READ_CONFIG);
+		if (retval < 0) {
+			rmi4_data->reset_device(rmi4_data, false);
+			goto exit;
+		}
+
+		retval = secure_memcpy(fwu->cal_data, fwu->cal_data_buf_size,
+				&fwu->read_config_buf[fwu->cal_data_off],
+				fwu->cal_data_size, fwu->cal_data_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to save calibration data\n",
+					__func__);
+			rmi4_data->reset_device(rmi4_data, false);
+			goto exit;
+		}
+	}
+#endif
+
+	switch (flash_area) {
+	case UI_FIRMWARE:
+		do_rebuild = true;
+		retval = fwu_do_reflash();
+#ifdef F51_DISCRETE_FORCE
+		if (retval < 0)
+			break;
+
+		if (fwu->has_utility_param || fwu->img.contains_utility_param)
+			break;
+
+		rmi4_data->reset_device(rmi4_data, false);
+
+		if (fwu->bl_mode_device || fwu->in_bl_mode) {
+			dev_info(rmi4_data->pdev->dev.parent,
+					"%s: Device in bootloader mode, skipping calibration data restoration\n",
+					__func__);
+			break;
+		}
+
+		retval = fwu_do_restore_f51_cal_data();
+#endif
+		break;
+	case UI_CONFIG:
+		do_rebuild = true;
+		retval = fwu_check_ui_configuration_size();
+		if (retval < 0)
+			break;
+		fwu->config_area = UI_CONFIG_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			break;
+		retval = fwu_write_ui_configuration();
+#ifdef F51_DISCRETE_FORCE
+		if (retval < 0)
+			break;
+
+		if (fwu->has_utility_param)
+			break;
+
+		retval = fwu_do_restore_f51_cal_data();
+#endif
+		break;
+	case NONE:
+	default:
+		break;
+	}
+
+	if (retval < 0) {
+		do_rebuild = false;
+		rmi4_data->reset_device(rmi4_data, false);
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do reflash\n",
+				__func__);
+		goto exit;
+	}
+
+	if (fwu->do_lockdown && (fwu->img.lockdown.data != NULL)) {
+		switch (fwu->bl_version) {
+		case BL_V5:
+		case BL_V6:
+			retval = fwu_do_lockdown_v5v6();
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to do lockdown\n",
+						__func__);
+			}
+			rmi4_data->reset_device(rmi4_data, false);
+			break;
+		case BL_V7:
+		case BL_V8:
+			retval = fwu_do_lockdown_v7();
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to do lockdown\n",
+						__func__);
+			}
+			rmi4_data->reset_device(rmi4_data, false);
+			break;
+		default:
+			break;
+		}
+	}
+
+exit:
+	if (fw_entry)
+		release_firmware(fw_entry);
+
+	if (do_rebuild)
+		rmi4_data->reset_device(rmi4_data, true);
+
+	pr_notice("%s: End of reflash process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+static int fwu_recovery_check_status(void)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char status;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f35_fd.data_base_addr;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_base + F35_ERROR_CODE_OFFSET,
+			&status,
+			1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read status\n",
+				__func__);
+		return retval;
+	}
+
+	status = status & MASK_5BIT;
+
+	if (status != 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Recovery mode status = %d\n",
+				__func__, status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_recovery_erase_completion(void)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char command;
+	unsigned char status;
+	unsigned int timeout = F35_ERASE_ALL_WAIT_MS / 20;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f35_fd.data_base_addr;
+
+	do {
+		command = 0x01;
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				fwu->f35_fd.cmd_base_addr,
+				&command,
+				sizeof(command));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to issue command\n",
+					__func__);
+			return retval;
+		}
+
+		do {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					fwu->f35_fd.cmd_base_addr,
+					&command,
+					sizeof(command));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read command status\n",
+						__func__);
+				return retval;
+			}
+
+			if ((command & 0x01) == 0x00)
+				break;
+
+			msleep(20);
+			timeout--;
+		} while (timeout > 0);
+
+		if (timeout == 0)
+			goto exit;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_base + F35_FLASH_STATUS_OFFSET,
+				&status,
+				sizeof(status));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read flash status\n",
+					__func__);
+			return retval;
+		}
+
+		if ((status & 0x01) == 0x00)
+			break;
+
+		msleep(20);
+		timeout--;
+	} while (timeout > 0);
+
+exit:
+	if (timeout == 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Timed out waiting for flash erase completion\n",
+				__func__);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int fwu_recovery_erase_all(void)
+{
+	int retval;
+	unsigned char ctrl_base;
+	unsigned char command = CMD_F35_ERASE_ALL;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	ctrl_base = fwu->f35_fd.ctrl_base_addr;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			ctrl_base + F35_CHUNK_COMMAND_OFFSET,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue erase all command\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->f35_fd.cmd_base_addr) {
+		retval = fwu_recovery_erase_completion();
+		if (retval < 0)
+			return retval;
+	} else {
+		msleep(F35_ERASE_ALL_WAIT_MS);
+	}
+
+	retval = fwu_recovery_check_status();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_recovery_write_chunk(void)
+{
+	int retval;
+	unsigned char ctrl_base;
+	unsigned char chunk_number[] = {0, 0};
+	unsigned char chunk_spare;
+	unsigned char chunk_size;
+	unsigned char buf[F35_CHUNK_SIZE + 1];
+	unsigned short chunk;
+	unsigned short chunk_total;
+	unsigned short bytes_written = 0;
+	unsigned char *chunk_ptr = (unsigned char *)fwu->image;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	ctrl_base = fwu->f35_fd.ctrl_base_addr;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			ctrl_base + F35_CHUNK_NUM_LSB_OFFSET,
+			chunk_number,
+			sizeof(chunk_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write chunk number\n",
+				__func__);
+		return retval;
+	}
+
+	buf[sizeof(buf) - 1] = CMD_F35_WRITE_CHUNK;
+
+	chunk_total = fwu->image_size / F35_CHUNK_SIZE;
+	chunk_spare = fwu->image_size % F35_CHUNK_SIZE;
+	if (chunk_spare)
+		chunk_total++;
+
+	for (chunk = 0; chunk < chunk_total; chunk++) {
+		if (chunk_spare && chunk == chunk_total - 1)
+			chunk_size = chunk_spare;
+		else
+			chunk_size = F35_CHUNK_SIZE;
+
+		memset(buf, 0x00, F35_CHUNK_SIZE);
+		secure_memcpy(buf, sizeof(buf), chunk_ptr,
+					fwu->image_size - bytes_written,
+					chunk_size);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				ctrl_base + F35_CHUNK_DATA_OFFSET,
+				buf,
+				sizeof(buf));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write chunk data (chunk %d)\n",
+					__func__, chunk);
+			return retval;
+		}
+		chunk_ptr += chunk_size;
+		bytes_written += chunk_size;
+	}
+
+	retval = fwu_recovery_check_status();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write chunk data\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_recovery_reset(void)
+{
+	int retval;
+	unsigned char ctrl_base;
+	unsigned char command = CMD_F35_RESET;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	ctrl_base = fwu->f35_fd.ctrl_base_addr;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			ctrl_base + F35_CHUNK_COMMAND_OFFSET,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+		return retval;
+	}
+
+	msleep(F35_RESET_WAIT_MS);
+
+	return 0;
+}
+
+static int fwu_start_recovery(void)
+{
+	int retval;
+	const struct firmware *fw_entry = NULL;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of recovery process\n", __func__);
+
+	if (fwu->image == NULL) {
+		retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+				FW_IHEX_NAME, sizeof(FW_IHEX_NAME),
+				sizeof(FW_IHEX_NAME));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy ihex file name\n",
+					__func__);
+			goto exit;
+		}
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Requesting firmware ihex %s\n",
+				__func__, fwu->image_name);
+
+		retval = request_firmware(&fw_entry, fwu->image_name,
+				rmi4_data->pdev->dev.parent);
+		if (retval != 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Firmware ihex %s not available\n",
+					__func__, fwu->image_name);
+			retval = -EINVAL;
+			goto exit;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Firmware image size = %d\n",
+				__func__, (unsigned int)fw_entry->size);
+
+		fwu->image = fw_entry->data;
+		fwu->image_size = fw_entry->size;
+	}
+
+	retval = rmi4_data->irq_enable(rmi4_data, false, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to disable interrupt\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = fwu_recovery_erase_all();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do erase all in recovery mode\n",
+				__func__);
+		goto exit;
+	}
+
+	pr_notice("%s: External flash erased\n", __func__);
+
+	retval = fwu_recovery_write_chunk();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write chunk data in recovery mode\n",
+				__func__);
+		goto exit;
+	}
+
+	pr_notice("%s: Chunk data programmed\n", __func__);
+
+	retval = fwu_recovery_reset();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to reset device in recovery mode\n",
+				__func__);
+		goto exit;
+	}
+
+	pr_notice("%s: Recovery mode reset issued\n", __func__);
+
+	rmi4_data->reset_device(rmi4_data, true);
+
+	retval = 0;
+
+exit:
+	if (fw_entry)
+		release_firmware(fw_entry);
+
+	pr_notice("%s: End of recovery process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+int synaptics_fw_updater(const unsigned char *fw_data)
+{
+	int retval;
+
+	if (!fwu)
+		return -ENODEV;
+
+	if (!fwu->initialized)
+		return -ENODEV;
+
+	if (fwu->in_ub_mode) {
+		fwu->image = NULL;
+		retval = fwu_start_recovery();
+		if (retval < 0)
+			return retval;
+	}
+
+	fwu->image = fw_data;
+
+	retval = fwu_start_reflash();
+
+	fwu->image = NULL;
+
+	return retval;
+}
+EXPORT_SYMBOL(synaptics_fw_updater);
+
+#ifdef DO_STARTUP_FW_UPDATE
+static void fwu_startup_fw_update_work(struct work_struct *work)
+{
+	static unsigned char do_once = 1;
+#ifdef WAIT_FOR_FB_READY
+	unsigned int timeout;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+#endif
+
+	if (!do_once)
+		return;
+	do_once = 0;
+
+#ifdef WAIT_FOR_FB_READY
+	timeout = FB_READY_TIMEOUT_S * 1000 / FB_READY_WAIT_MS + 1;
+
+	while (!rmi4_data->fb_ready) {
+		msleep(FB_READY_WAIT_MS);
+		timeout--;
+		if (timeout == 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Timed out waiting for FB ready\n",
+					__func__);
+			return;
+		}
+	}
+#endif
+
+	synaptics_fw_updater(NULL);
+}
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (count < fwu->config_size) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not enough space (%d bytes) in buffer\n",
+				__func__, (unsigned int)count);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = secure_memcpy(buf, count, fwu->read_config_buf,
+			fwu->read_config_buf_size, fwu->config_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy config data\n",
+				__func__);
+		goto exit;
+	} else {
+		retval = fwu->config_size;
+	}
+
+exit:
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = secure_memcpy(&fwu->ext_data_source[fwu->data_pos],
+			fwu->image_size - fwu->data_pos, buf, count, count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy image data\n",
+				__func__);
+		goto exit;
+	} else {
+		retval = count;
+	}
+
+	fwu->data_pos += count;
+
+exit:
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_do_recovery_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (kstrtouint(buf, 10, &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not in microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source) {
+		retval = -EINVAL;
+		goto exit;
+	} else {
+		fwu->image = fwu->ext_data_source;
+	}
+
+	retval = fwu_start_recovery();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do recovery\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (kstrtouint(buf, 10, &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source) {
+		retval =  -EINVAL;
+		goto exit;
+	} else {
+		fwu->image = fwu->ext_data_source;
+	}
+
+	if (input & LOCKDOWN) {
+		fwu->do_lockdown = true;
+		input &= ~LOCKDOWN;
+	}
+
+	if ((input != NORMAL) && (input != FORCE)) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (input == FORCE)
+		fwu->force_update = true;
+
+	retval = synaptics_fw_updater(fwu->image);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do reflash\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	fwu->force_update = FORCE_UPDATE;
+	fwu->do_lockdown = DO_LOCKDOWN;
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (kstrtouint(buf, 10, &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (input != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source) {
+		retval = -EINVAL;
+		goto exit;
+	} else {
+		fwu->image = fwu->ext_data_source;
+	}
+
+	retval = fwu_start_write_config();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write config\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval =  -EINVAL;
+		goto exit;
+	}
+
+	retval = fwu_do_read_config();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read config\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long config_area;
+
+	retval = sstrtoul(buf, 10, &config_area);
+	if (retval)
+		return retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	fwu->config_area = config_area;
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return count;
+}
+
+static ssize_t fwu_sysfs_image_name_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+			buf, count, count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy image file name\n",
+				__func__);
+	} else {
+		retval = count;
+	}
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long size;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &size);
+	if (retval)
+		return retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	fwu->image_size = size;
+	fwu->data_pos = 0;
+
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = kzalloc(fwu->image_size, GFP_KERNEL);
+	if (!fwu->ext_data_source) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for image data\n",
+				__func__);
+		retval = -ENOMEM;
+	}
+	retval = count;
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval =  snprintf(buf, PAGE_SIZE, "%u\n", fwu->block_size);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.ui_firmware);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.ui_config);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.dp_config);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.pm_config);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.bl_config);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_utility_parameter_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.utility_param);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_guest_code_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.guest_code);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_write_guest_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (kstrtouint(buf, 10, &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (input != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source) {
+		retval = -EINVAL;
+		goto exit;
+	} else {
+		fwu->image = fwu->ext_data_source;
+	}
+
+	retval = fwu_start_write_guest_code();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write guest code\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+#ifdef SYNA_TDDI
+static ssize_t fwu_sysfs_read_lockdown_code_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned short lockdown_data_size;
+	unsigned char *lockdown_data;
+	char ld_val[2];
+	int retval = 0;
+	int i = 0;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	lockdown_data_size = fwu->blkcount.tddi_lockdown_data * fwu->block_size;
+	lockdown_data = kzalloc(lockdown_data_size, GFP_KERNEL);
+	if (!lockdown_data) {
+		mutex_unlock(&fwu_sysfs_mutex);
+		return -ENOMEM;
+	}
+
+	if (get_tddi_lockdown_data(lockdown_data, lockdown_data_size) < 0) {
+		kfree(lockdown_data);
+		mutex_unlock(&fwu_sysfs_mutex);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < lockdown_data_size; i++) {
+		retval += snprintf(ld_val, PAGE_SIZE, "%02x",
+				*(lockdown_data + i));
+		strlcat(buf, ld_val, lockdown_data_size);
+	}
+	*(buf + retval) = '\n';
+	kfree(lockdown_data);
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval + 1;
+}
+
+static ssize_t fwu_sysfs_write_lockdown_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned short lockdown_data_size = (count - 1) / 2;
+	unsigned char *lockdown_data;
+	unsigned char temp[2];
+	int ld_val;
+	int i = 0;
+
+	for (i = 0; i < (count - 1); i++) {
+		if (((*buf >= '0') && (*buf <= '9')) ||
+				(('a' < *buf) && (*buf > 'f')) ||
+				(('A' < *buf) && (*buf > 'F')))
+			continue;
+		else
+			return -EINVAL;
+	}
+
+	if (count % 2 != 1)
+		return -EINVAL;
+
+	lockdown_data = kzalloc(lockdown_data_size, GFP_KERNEL);
+	if (!lockdown_data)
+		return -ENOMEM;
+
+	for (i = 0; i < lockdown_data_size; i++) {
+		memcpy(temp, (buf + 2 * i), sizeof(temp));
+		if (kstrtoint(temp, 16, &ld_val) == 1)
+			*(lockdown_data + i) = ld_val & 0xff;
+	}
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (set_tddi_lockdown_data(lockdown_data, lockdown_data_size) < 0) {
+		kfree(lockdown_data);
+		mutex_unlock(&fwu_sysfs_mutex);
+		return -EINVAL;
+	}
+	kfree(lockdown_data);
+	mutex_unlock(&fwu_sysfs_mutex);
+	return count;
+}
+#endif
+#endif
+static void synaptics_rmi4_fwu_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!fwu)
+		return;
+
+	if (fwu->intr_mask & intr_mask)
+		fwu_read_flash_status();
+
+	return;
+}
+
+static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char attr_count;
+	struct pdt_properties pdt_props;
+
+	if (fwu) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	fwu = kzalloc(sizeof(*fwu), GFP_KERNEL);
+	if (!fwu) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fwu\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	fwu->image_name = kzalloc(MAX_IMAGE_NAME_LEN, GFP_KERNEL);
+	if (!fwu->image_name) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for image name\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_fwu;
+	}
+
+	fwu->rmi4_data = rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			PDT_PROPS,
+			pdt_props.data,
+			sizeof(pdt_props.data));
+	if (retval < 0) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read PDT properties, assuming 0x00\n",
+				__func__);
+	} else if (pdt_props.has_bsr) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Reflash for LTS not currently supported\n",
+				__func__);
+		retval = -ENODEV;
+		goto exit_free_mem;
+	}
+
+	retval = fwu_scan_pdt();
+	if (retval < 0)
+		goto exit_free_mem;
+
+	if (!fwu->in_ub_mode) {
+		retval = fwu_read_f34_queries();
+		if (retval < 0)
+			goto exit_free_mem;
+
+		retval = fwu_get_device_config_id();
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read device config ID\n",
+					__func__);
+			goto exit_free_mem;
+		}
+	}
+
+	fwu->force_update = FORCE_UPDATE;
+	fwu->do_lockdown = DO_LOCKDOWN;
+	fwu->initialized = true;
+
+#ifdef DO_STARTUP_FW_UPDATE
+	fwu->fwu_workqueue = create_singlethread_workqueue("fwu_workqueue");
+	INIT_WORK(&fwu->fwu_work, fwu_startup_fw_update_work);
+	queue_work(fwu->fwu_workqueue,
+			&fwu->fwu_work);
+#endif
+
+#ifdef F51_DISCRETE_FORCE
+	fwu_read_flash_status();
+	if (!fwu->in_bl_mode) {
+		retval = fwu_f51_force_data_init();
+		if (retval < 0)
+			goto exit_free_mem;
+	}
+#endif
+
+	if (ENABLE_SYS_REFLASH == false)
+		return 0;
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+	retval = sysfs_create_bin_file(&rmi4_data->input_dev->dev.kobj,
+			&dev_attr_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs bin file\n",
+				__func__);
+		goto exit_free_mem;
+	}
+#endif
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_remove_attrs;
+		}
+	}
+
+	return 0;
+
+exit_remove_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+	sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+#endif
+
+exit_free_mem:
+	kfree(fwu->image_name);
+
+exit_free_fwu:
+	kfree(fwu);
+	fwu = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_fwu_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+
+	if (!fwu)
+		goto exit;
+
+#ifdef DO_STARTUP_FW_UPDATE
+	cancel_work_sync(&fwu->fwu_work);
+	flush_workqueue(fwu->fwu_workqueue);
+	destroy_workqueue(fwu->fwu_workqueue);
+#endif
+
+#ifdef F51_DISCRETE_FORCE
+	kfree(fwu->cal_data);
+#endif
+	kfree(fwu->read_config_buf);
+	kfree(fwu->image_name);
+	kfree(fwu);
+	fwu = NULL;
+
+	if (ENABLE_SYS_REFLASH == false)
+		goto exit;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+	sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+#endif
+
+exit:
+	complete(&fwu_remove_complete);
+}
+
+static void synaptics_rmi4_fwu_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (!fwu) {
+		synaptics_rmi4_fwu_init(rmi4_data);
+		return;
+	}
+
+	retval = fwu_scan_pdt();
+	if (retval < 0)
+		return;
+
+	if (!fwu->in_ub_mode)
+		fwu_read_f34_queries();
+
+#ifdef F51_DISCRETE_FORCE
+	fwu_read_flash_status();
+	if (!fwu->in_bl_mode)
+		fwu_f51_force_data_init();
+#endif
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn fwu_module = {
+	.fn_type = RMI_FW_UPDATER,
+	.init = synaptics_rmi4_fwu_init,
+	.remove = synaptics_rmi4_fwu_remove,
+	.reset = synaptics_rmi4_fwu_reset,
+	.reinit = NULL,
+	.early_suspend = NULL,
+	.suspend = NULL,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_fwu_attn,
+};
+
+static int __init rmi4_fw_update_module_init(void)
+{
+	synaptics_rmi4_new_function(&fwu_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_fw_update_module_exit(void)
+{
+	synaptics_rmi4_new_function(&fwu_module, false);
+
+	wait_for_completion(&fwu_remove_complete);
+}
+
+module_init(rmi4_fw_update_module_init);
+module_exit(rmi4_fw_update_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX FW Update Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_gesture.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_gesture.c
new file mode 100755
index 0000000..2c315b1
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_gesture.c
@@ -0,0 +1,2291 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define GESTURE_PHYS_NAME "synaptics_dsx/gesture"
+
+#define TUNING_SYSFS_DIR_NAME "tuning"
+
+#define STORE_GESTURES
+#ifdef STORE_GESTURES
+#define GESTURES_TO_STORE 10
+#endif
+
+#define CTRL23_FINGER_REPORT_ENABLE_BIT 0
+#define CTRL27_UDG_ENABLE_BIT 4
+#define WAKEUP_GESTURE_MODE 0x02
+
+static ssize_t udg_sysfs_engine_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_detection_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_detection_score_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_detection_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_registration_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_registration_begin_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_registration_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_max_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_detection_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_index_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_template_valid_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_valid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_template_clear_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_trace_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_template_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_trace_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_template_displacement_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_displacement_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_rotation_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_rotation_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_scale_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_scale_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_threshold_factor_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_threshold_factor_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_match_metric_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_match_metric_threshold_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_max_inter_stroke_time_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_max_inter_stroke_time_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static int udg_read_tuning_params(void);
+
+static int udg_write_tuning_params(void);
+
+static int udg_detection_enable(bool enable);
+
+static int udg_engine_enable(bool enable);
+
+static int udg_set_index(unsigned char index);
+
+#ifdef STORE_GESTURES
+static int udg_read_valid_data(void);
+static int udg_write_valid_data(void);
+static int udg_read_template_data(unsigned char index);
+static int udg_write_template_data(void);
+#endif
+
+enum gesture_type {
+	DETECTION = 0x0f,
+	REGISTRATION = 0x10,
+};
+
+struct udg_tuning {
+	union {
+		struct {
+			unsigned char maximum_number_of_templates;
+			unsigned char template_size;
+			unsigned char template_disp_lsb;
+			unsigned char template_disp_msb;
+			unsigned char rotation_inv_lsb;
+			unsigned char rotation_inv_msb;
+			unsigned char scale_inv_lsb;
+			unsigned char scale_inv_msb;
+			unsigned char thres_factor_lsb;
+			unsigned char thres_factor_msb;
+			unsigned char metric_thres_lsb;
+			unsigned char metric_thres_msb;
+			unsigned char inter_stroke_lsb;
+			unsigned char inter_stroke_msb;
+		} __packed;
+		unsigned char data[14];
+	};
+};
+
+struct udg_addr {
+	unsigned short data_4;
+	unsigned short ctrl_18;
+	unsigned short ctrl_20;
+	unsigned short ctrl_23;
+	unsigned short ctrl_27;
+	unsigned short ctrl_41;
+	unsigned short trace_x;
+	unsigned short trace_y;
+	unsigned short trace_segment;
+	unsigned short template_helper;
+	unsigned short template_data;
+	unsigned short template_flags;
+};
+
+struct synaptics_rmi4_f12_query_0 {
+	union {
+		struct {
+			struct {
+				unsigned char has_register_descriptors:1;
+				unsigned char has_closed_cover:1;
+				unsigned char has_fast_glove_detect:1;
+				unsigned char has_dribble:1;
+				unsigned char has_4p4_jitter_filter_strength:1;
+				unsigned char f12_query0_s0_b5__7:3;
+			} __packed;
+			struct {
+				unsigned char max_num_templates:4;
+				unsigned char f12_query0_s1_b4__7:4;
+				unsigned char template_size_lsb;
+				unsigned char template_size_msb;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f12_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl24_is_present:1;
+				unsigned char ctrl25_is_present:1;
+				unsigned char ctrl26_is_present:1;
+				unsigned char ctrl27_is_present:1;
+				unsigned char ctrl28_is_present:1;
+				unsigned char ctrl29_is_present:1;
+				unsigned char ctrl30_is_present:1;
+				unsigned char ctrl31_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl32_is_present:1;
+				unsigned char ctrl33_is_present:1;
+				unsigned char ctrl34_is_present:1;
+				unsigned char ctrl35_is_present:1;
+				unsigned char ctrl36_is_present:1;
+				unsigned char ctrl37_is_present:1;
+				unsigned char ctrl38_is_present:1;
+				unsigned char ctrl39_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl40_is_present:1;
+				unsigned char ctrl41_is_present:1;
+				unsigned char ctrl42_is_present:1;
+				unsigned char ctrl43_is_present:1;
+				unsigned char ctrl44_is_present:1;
+				unsigned char ctrl45_is_present:1;
+				unsigned char ctrl46_is_present:1;
+				unsigned char ctrl47_is_present:1;
+			} __packed;
+		};
+		unsigned char data[7];
+	};
+};
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data8_is_present:1;
+				unsigned char data9_is_present:1;
+				unsigned char data10_is_present:1;
+				unsigned char data11_is_present:1;
+				unsigned char data12_is_present:1;
+				unsigned char data13_is_present:1;
+				unsigned char data14_is_present:1;
+				unsigned char data15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data16_is_present:1;
+				unsigned char data17_is_present:1;
+				unsigned char data18_is_present:1;
+				unsigned char data19_is_present:1;
+				unsigned char data20_is_present:1;
+				unsigned char data21_is_present:1;
+				unsigned char data22_is_present:1;
+				unsigned char data23_is_present:1;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f12_control_41 {
+	union {
+		struct {
+			unsigned char enable_registration:1;
+			unsigned char template_index:4;
+			unsigned char begin:1;
+			unsigned char f12_ctrl41_b6__7:2;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_udg_handle {
+	atomic_t attn_event;
+	unsigned char intr_mask;
+	unsigned char report_flags;
+	unsigned char object_type_enable1;
+	unsigned char object_type_enable2;
+	unsigned char trace_size;
+	unsigned char template_index;
+	unsigned char max_num_templates;
+	unsigned char detection_score;
+	unsigned char detection_index;
+	unsigned char detection_status;
+	unsigned char registration_status;
+	unsigned char *ctrl_buf;
+	unsigned char *trace_data_buf;
+	unsigned char *template_data_buf;
+#ifdef STORE_GESTURES
+	unsigned char gestures_to_store;
+	unsigned char *storage_buf;
+	unsigned char valid_buf[2];
+#endif
+	unsigned short trace_data_buf_size;
+	unsigned short template_size;
+	unsigned short template_data_size;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short ctrl_18_sub10_off;
+	unsigned short ctrl_20_sub1_off;
+	unsigned short ctrl_23_sub3_off;
+	unsigned short ctrl_27_sub5_off;
+	struct input_dev *udg_dev;
+	struct kobject *tuning_dir;
+	struct udg_addr addr;
+	struct udg_tuning tuning;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct device_attribute attrs[] = {
+	__ATTR(engine_enable, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_engine_enable_store),
+	__ATTR(detection_enable, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_detection_enable_store),
+	__ATTR(detection_score, 0444,
+			udg_sysfs_detection_score_show,
+			synaptics_rmi4_store_error),
+	__ATTR(detection_index, 0444,
+			udg_sysfs_detection_index_show,
+			synaptics_rmi4_store_error),
+	__ATTR(registration_enable, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_registration_enable_store),
+	__ATTR(registration_begin, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_registration_begin_store),
+	__ATTR(registration_status, 0444,
+			udg_sysfs_registration_status_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_size, 0444,
+			udg_sysfs_template_size_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_max_index, 0444,
+			udg_sysfs_template_max_index_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_detection, 0444,
+			udg_sysfs_template_detection_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_index, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_template_index_store),
+	__ATTR(template_valid, 0664,
+			udg_sysfs_template_valid_show,
+			udg_sysfs_template_valid_store),
+	__ATTR(template_clear, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_template_clear_store),
+	__ATTR(trace_size, 0444,
+			udg_sysfs_trace_size_show,
+			synaptics_rmi4_store_error),
+};
+
+static struct bin_attribute template_data = {
+	.attr = {
+		.name = "template_data",
+		.mode = 0664,
+	},
+	.size = 0,
+	.read = udg_sysfs_template_data_show,
+	.write = udg_sysfs_template_data_store,
+};
+
+static struct bin_attribute trace_data = {
+	.attr = {
+		.name = "trace_data",
+		.mode = 0444,
+	},
+	.size = 0,
+	.read = udg_sysfs_trace_data_show,
+	.write = NULL,
+};
+
+static struct device_attribute params[] = {
+	__ATTR(template_displacement, 0664,
+			udg_sysfs_template_displacement_show,
+			udg_sysfs_template_displacement_store),
+	__ATTR(rotation_invariance, 0664,
+			udg_sysfs_rotation_invariance_show,
+			udg_sysfs_rotation_invariance_store),
+	__ATTR(scale_invariance, 0664,
+			udg_sysfs_scale_invariance_show,
+			udg_sysfs_scale_invariance_store),
+	__ATTR(threshold_factor, 0664,
+			udg_sysfs_threshold_factor_show,
+			udg_sysfs_threshold_factor_store),
+	__ATTR(match_metric_threshold, 0664,
+			udg_sysfs_match_metric_threshold_show,
+			udg_sysfs_match_metric_threshold_store),
+	__ATTR(max_inter_stroke_time, 0664,
+			udg_sysfs_max_inter_stroke_time_show,
+			udg_sysfs_max_inter_stroke_time_store),
+};
+
+static struct synaptics_rmi4_udg_handle *udg;
+
+static unsigned char ctrl_18_sub_size[] = {10, 10, 10, 2, 3, 4, 3, 3, 1, 1};
+static unsigned char ctrl_20_sub_size[] = {2};
+static unsigned char ctrl_23_sub_size[] = {1, 1, 1};
+static unsigned char ctrl_27_sub_size[] = {1, 5, 2, 1, 7};
+
+DECLARE_COMPLETION(udg_remove_complete);
+
+static ssize_t udg_sysfs_engine_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool enable;
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		enable = true;
+	else if (input == 0)
+		enable = false;
+	else
+		return -EINVAL;
+
+	retval = udg_engine_enable(enable);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_detection_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool enable;
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		enable = true;
+	else if (input == 0)
+		enable = false;
+	else
+		return -EINVAL;
+
+	udg->detection_status = 0;
+
+	retval = udg_detection_enable(enable);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_detection_score_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->detection_score);
+}
+
+static ssize_t udg_sysfs_detection_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->detection_index);
+}
+
+static ssize_t udg_sysfs_registration_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool enable;
+	unsigned int input;
+	struct synaptics_rmi4_f12_control_41 control_41;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		enable = true;
+	else if (input == 0)
+		enable = false;
+	else
+		return -EINVAL;
+
+	if (enable) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[0] = 0;
+		udg->ctrl_buf[0] |= (1 << CTRL23_FINGER_REPORT_ENABLE_BIT);
+		if (udg->ctrl_23_sub3_off)
+			udg->ctrl_buf[udg->ctrl_23_sub3_off] = 0;
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+	} else {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[0] = udg->object_type_enable1;
+		if (udg->ctrl_23_sub3_off) {
+			udg->ctrl_buf[udg->ctrl_23_sub3_off] =
+					udg->object_type_enable2;
+		}
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	control_41.enable_registration = enable ? 1 : 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_registration_begin_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool begin;
+	unsigned int input;
+	struct synaptics_rmi4_f12_control_41 control_41;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		begin = true;
+	else if (input == 0)
+		begin = false;
+	else
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	control_41.begin = begin ? 1 : 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_registration_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%02x\n", udg->registration_status);
+}
+
+static ssize_t udg_sysfs_template_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->template_size);
+}
+
+static ssize_t udg_sysfs_template_max_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->max_num_templates - 1);
+}
+
+static ssize_t udg_sysfs_template_detection_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	int attn_event;
+	unsigned char detection_status;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	attn_event = atomic_read(&udg->attn_event);
+	atomic_set(&udg->attn_event, 0);
+
+	if (attn_event == 0)
+		return snprintf(buf, PAGE_SIZE, "0\n");
+
+	if (udg->detection_status == 0) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.data_4,
+				rmi4_data->gesture_detection,
+				sizeof(rmi4_data->gesture_detection));
+		if (retval < 0)
+			return retval;
+
+		udg->detection_status = rmi4_data->gesture_detection[0];
+	}
+
+	detection_status = udg->detection_status;
+	udg->detection_status = 0;
+
+	switch (detection_status) {
+	case DETECTION:
+		udg->detection_score = rmi4_data->gesture_detection[1];
+		udg->detection_index = rmi4_data->gesture_detection[4];
+		udg->trace_size = rmi4_data->gesture_detection[3];
+		break;
+	case REGISTRATION:
+		udg->registration_status = rmi4_data->gesture_detection[1];
+		udg->trace_size = rmi4_data->gesture_detection[3];
+		break;
+	default:
+		return snprintf(buf, PAGE_SIZE, "0\n");
+	}
+
+	return snprintf(buf, PAGE_SIZE, "0x%02x\n", detection_status);
+}
+
+static ssize_t udg_sysfs_template_index_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long index;
+
+	retval = sstrtoul(buf, 10, &index);
+	if (retval)
+		return retval;
+
+	retval = udg_set_index((unsigned char)index);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_template_valid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned char valid;
+	unsigned char offset;
+	unsigned char byte_num;
+	unsigned char template_flags[2];
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	byte_num = udg->template_index / 8;
+	offset = udg->template_index % 8;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_flags,
+			template_flags,
+			sizeof(template_flags));
+	if (retval < 0)
+		return retval;
+
+	valid = (template_flags[byte_num] & (1 << offset)) >> offset;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", valid);
+}
+
+static ssize_t udg_sysfs_template_valid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long valid;
+	unsigned char offset;
+	unsigned char byte_num;
+	unsigned char template_flags[2];
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &valid);
+	if (retval)
+		return retval;
+
+	if (valid > 0)
+		valid = 1;
+
+	byte_num = udg->template_index / 8;
+	offset = udg->template_index % 8;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_flags,
+			template_flags,
+			sizeof(template_flags));
+	if (retval < 0)
+		return retval;
+
+	if (valid)
+		template_flags[byte_num] |= (1 << offset);
+	else
+		template_flags[byte_num] &= ~(1 << offset);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_flags,
+			template_flags,
+			sizeof(template_flags));
+	if (retval < 0)
+		return retval;
+
+#ifdef STORE_GESTURES
+	udg_read_valid_data();
+#endif
+
+	return count;
+}
+
+static ssize_t udg_sysfs_template_clear_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	const char cmd[] = {'0', 0};
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	memset(udg->template_data_buf, 0x00, udg->template_data_size);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_data,
+			udg->template_data_buf,
+			udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to clear template data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = udg_sysfs_template_valid_store(dev, attr, cmd, 1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to clear valid bit\n",
+				__func__);
+		return retval;
+	}
+
+#ifdef STORE_GESTURES
+	udg_read_template_data(udg->template_index);
+	udg_read_valid_data();
+#endif
+
+	return count;
+}
+
+static ssize_t udg_sysfs_trace_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->trace_size);
+}
+
+static ssize_t udg_sysfs_trace_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned short index = 0;
+	unsigned short trace_data_size;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	trace_data_size = udg->trace_size * 5;
+
+	if (trace_data_size == 0)
+		return -EINVAL;
+
+	if (count < trace_data_size) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not enough space (%d bytes) in buffer\n",
+				__func__, (unsigned int)count);
+		return -EINVAL;
+	}
+
+	if (udg->trace_data_buf_size < trace_data_size) {
+		if (udg->trace_data_buf_size)
+			kfree(udg->trace_data_buf);
+		udg->trace_data_buf = kzalloc(trace_data_size, GFP_KERNEL);
+		if (!udg->trace_data_buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for trace data buffer\n",
+					__func__);
+			udg->trace_data_buf_size = 0;
+			return -ENOMEM;
+		}
+		udg->trace_data_buf_size = trace_data_size;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.trace_x,
+			&udg->trace_data_buf[index],
+			udg->trace_size * 2);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read trace X data\n",
+				__func__);
+		return retval;
+	}
+	index += udg->trace_size * 2;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.trace_y,
+			&udg->trace_data_buf[index],
+			udg->trace_size * 2);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read trace Y data\n",
+				__func__);
+		return retval;
+	}
+	index += udg->trace_size * 2;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.trace_segment,
+			&udg->trace_data_buf[index],
+			udg->trace_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read trace segment data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = secure_memcpy(buf, count, udg->trace_data_buf,
+			udg->trace_data_buf_size, trace_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy trace data\n",
+				__func__);
+		return retval;
+	}
+
+	return trace_data_size;
+}
+
+static ssize_t udg_sysfs_template_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (count < udg->template_data_size) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not enough space (%d bytes) in buffer\n",
+				__func__, (unsigned int)count);
+		return -EINVAL;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_data,
+			udg->template_data_buf,
+			udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read template data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = secure_memcpy(buf, count, udg->template_data_buf,
+			udg->template_data_size, udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy template data\n",
+				__func__);
+		return retval;
+	}
+
+#ifdef STORE_GESTURES
+	udg_read_template_data(udg->template_index);
+	udg_read_valid_data();
+#endif
+
+	return udg->template_data_size;
+}
+
+static ssize_t udg_sysfs_template_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = secure_memcpy(udg->template_data_buf, udg->template_data_size,
+			buf, count, count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy template data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_data,
+			udg->template_data_buf,
+			count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write template data\n",
+				__func__);
+		return retval;
+	}
+
+#ifdef STORE_GESTURES
+	udg_read_template_data(udg->template_index);
+	udg_read_valid_data();
+#endif
+
+	return count;
+}
+
+static ssize_t udg_sysfs_template_displacement_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short template_displacement;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	template_displacement =
+			((unsigned short)udg->tuning.template_disp_lsb << 0) |
+			((unsigned short)udg->tuning.template_disp_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", template_displacement);
+}
+
+static ssize_t udg_sysfs_template_displacement_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.template_disp_lsb = (unsigned char)(input >> 0);
+	udg->tuning.template_disp_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_rotation_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short rotation_invariance;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	rotation_invariance =
+			((unsigned short)udg->tuning.rotation_inv_lsb << 0) |
+			((unsigned short)udg->tuning.rotation_inv_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", rotation_invariance);
+}
+
+static ssize_t udg_sysfs_rotation_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.rotation_inv_lsb = (unsigned char)(input >> 0);
+	udg->tuning.rotation_inv_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_scale_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short scale_invariance;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	scale_invariance =
+			((unsigned short)udg->tuning.scale_inv_lsb << 0) |
+			((unsigned short)udg->tuning.scale_inv_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", scale_invariance);
+}
+
+static ssize_t udg_sysfs_scale_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.scale_inv_lsb = (unsigned char)(input >> 0);
+	udg->tuning.scale_inv_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_threshold_factor_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short threshold_factor;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	threshold_factor =
+			((unsigned short)udg->tuning.thres_factor_lsb << 0) |
+			((unsigned short)udg->tuning.thres_factor_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", threshold_factor);
+}
+
+static ssize_t udg_sysfs_threshold_factor_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.thres_factor_lsb = (unsigned char)(input >> 0);
+	udg->tuning.thres_factor_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_match_metric_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short match_metric_threshold;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	match_metric_threshold =
+			((unsigned short)udg->tuning.metric_thres_lsb << 0) |
+			((unsigned short)udg->tuning.metric_thres_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", match_metric_threshold);
+}
+
+static ssize_t udg_sysfs_match_metric_threshold_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.metric_thres_lsb = (unsigned char)(input >> 0);
+	udg->tuning.metric_thres_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_max_inter_stroke_time_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short max_inter_stroke_time;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	max_inter_stroke_time =
+			((unsigned short)udg->tuning.inter_stroke_lsb << 0) |
+			((unsigned short)udg->tuning.inter_stroke_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", max_inter_stroke_time);
+}
+
+static ssize_t udg_sysfs_max_inter_stroke_time_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.inter_stroke_lsb = (unsigned char)(input >> 0);
+	udg->tuning.inter_stroke_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static int udg_ctrl_subpacket(unsigned char ctrlreg,
+		unsigned char subpacket,
+		struct synaptics_rmi4_f12_query_5 *query_5)
+{
+	int retval;
+	unsigned char cnt;
+	unsigned char regnum;
+	unsigned char bitnum;
+	unsigned char q5_index;
+	unsigned char q6_index;
+	unsigned char offset;
+	unsigned char max_ctrlreg;
+	unsigned char *query_6;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	max_ctrlreg = (sizeof(query_5->data) - 1) * 8 - 1;
+
+	if (ctrlreg > max_ctrlreg) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Control register number (%d) over limit\n",
+				__func__, ctrlreg);
+		return -EINVAL;
+	}
+
+	q5_index = ctrlreg / 8 + 1;
+	bitnum = ctrlreg % 8;
+	if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Control %d is not present\n",
+				__func__, ctrlreg);
+		return -EINVAL;
+	}
+
+	query_6 = kmalloc(query_5->size_of_query6, GFP_KERNEL);
+	if (!query_6) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query 6\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 6,
+			query_6,
+			query_5->size_of_query6);
+	if (retval < 0)
+		goto exit;
+
+	q6_index = 0;
+
+	for (regnum = 0; regnum < ctrlreg; regnum++) {
+		q5_index = regnum / 8 + 1;
+		bitnum = regnum % 8;
+		if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00)
+			continue;
+
+		if (query_6[q6_index] == 0x00)
+			q6_index += 3;
+		else
+			q6_index++;
+
+		while (query_6[q6_index] & ~MASK_7BIT)
+			q6_index++;
+
+		q6_index++;
+	}
+
+	cnt = 0;
+	q6_index++;
+	offset = subpacket / 7;
+	bitnum = subpacket % 7;
+
+	do {
+		if (cnt == offset) {
+			if (query_6[q6_index + cnt] & (1 << bitnum))
+				retval = 1;
+			else
+				retval = 0;
+			goto exit;
+		}
+		cnt++;
+	} while (query_6[q6_index + cnt - 1] & ~MASK_7BIT);
+
+	retval = 0;
+
+exit:
+	kfree(query_6);
+
+	return retval;
+}
+
+static int udg_read_tuning_params(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_18,
+			udg->ctrl_buf,
+			udg->ctrl_18_sub10_off + sizeof(struct udg_tuning));
+	if (retval < 0)
+		return retval;
+
+	secure_memcpy(udg->tuning.data,
+			sizeof(udg->tuning.data),
+			(unsigned char *)&udg->ctrl_buf[udg->ctrl_18_sub10_off],
+			sizeof(struct udg_tuning),
+			sizeof(struct udg_tuning));
+
+	return 0;
+}
+
+static int udg_write_tuning_params(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	secure_memcpy((unsigned char *)&udg->ctrl_buf[udg->ctrl_18_sub10_off],
+			sizeof(struct udg_tuning),
+			udg->tuning.data,
+			sizeof(udg->tuning.data),
+			sizeof(struct udg_tuning));
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_18,
+			udg->ctrl_buf,
+			udg->ctrl_18_sub10_off + sizeof(struct udg_tuning));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_detection_enable(bool enable)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_20,
+			udg->ctrl_buf,
+			udg->ctrl_20_sub1_off + 1);
+	if (retval < 0)
+		return retval;
+
+	if (enable)
+		udg->ctrl_buf[udg->ctrl_20_sub1_off] = WAKEUP_GESTURE_MODE;
+	else
+		udg->ctrl_buf[udg->ctrl_20_sub1_off] = udg->report_flags;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_20,
+			udg->ctrl_buf,
+			udg->ctrl_20_sub1_off + 1);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_engine_enable(bool enable)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (enable) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[udg->ctrl_27_sub5_off] |=
+				(1 << CTRL27_UDG_ENABLE_BIT);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+	} else {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[udg->ctrl_27_sub5_off] &=
+				~(1 << CTRL27_UDG_ENABLE_BIT);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static void udg_report(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	atomic_set(&udg->attn_event, 1);
+
+	if (rmi4_data->suspend) {
+		if (rmi4_data->gesture_detection[0] == 0) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					udg->addr.data_4,
+					rmi4_data->gesture_detection,
+					sizeof(rmi4_data->gesture_detection));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read gesture detection\n",
+						__func__);
+				return;
+			}
+		}
+
+		udg->detection_status = rmi4_data->gesture_detection[0];
+		rmi4_data->gesture_detection[0] = 0;
+
+		if (udg->detection_status == DETECTION) {
+			input_report_key(udg->udg_dev, KEY_WAKEUP, 1);
+			input_sync(udg->udg_dev);
+			input_report_key(udg->udg_dev, KEY_WAKEUP, 0);
+			input_sync(udg->udg_dev);
+			rmi4_data->suspend = false;
+		}
+	}
+
+	return;
+}
+
+static int udg_set_index(unsigned char index)
+{
+	int retval;
+	struct synaptics_rmi4_f12_control_41 control_41;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (index >= udg->max_num_templates)
+		return -EINVAL;
+
+	udg->template_index = index;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	control_41.template_index = udg->template_index;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+#ifdef STORE_GESTURES
+static int udg_read_valid_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_flags,
+			udg->valid_buf,
+			sizeof(udg->valid_buf));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_write_valid_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_flags,
+			udg->valid_buf,
+			sizeof(udg->valid_buf));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_read_template_data(unsigned char index)
+{
+	int retval;
+	unsigned char *storage;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	udg_set_index(index);
+	storage = &(udg->storage_buf[index * udg->template_data_size]);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_data,
+			storage,
+			udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read template data\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int udg_write_template_data(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char *storage;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	for (ii = 0; ii < udg->gestures_to_store; ii++) {
+		udg_set_index(ii);
+		storage = &(udg->storage_buf[ii * udg->template_data_size]);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.template_data,
+				storage,
+				udg->template_data_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write template data\n",
+					__func__);
+			return retval;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+static int udg_reg_init(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char data_offset;
+	unsigned char size_of_query;
+	unsigned char ctrl_18_offset;
+	unsigned char ctrl_20_offset;
+	unsigned char ctrl_23_offset;
+	unsigned char ctrl_27_offset;
+	unsigned char ctrl_41_offset;
+	struct synaptics_rmi4_f12_query_0 query_0;
+	struct synaptics_rmi4_f12_query_5 query_5;
+	struct synaptics_rmi4_f12_query_8 query_8;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 7,
+			&size_of_query,
+			sizeof(size_of_query));
+	if (retval < 0)
+		return retval;
+
+	if (size_of_query < 4) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: User defined gesture support unavailable (missing data registers)\n",
+				__func__);
+		retval = -ENODEV;
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 8,
+			query_8.data,
+			sizeof(query_8.data));
+	if (retval < 0)
+		return retval;
+
+	if ((query_8.data16_is_present) &&
+			(query_8.data17_is_present) &&
+			(query_8.data18_is_present) &&
+			(query_8.data19_is_present) &&
+			(query_8.data20_is_present) &&
+			(query_8.data21_is_present)) {
+		data_offset = query_8.data0_is_present +
+				query_8.data1_is_present +
+				query_8.data2_is_present +
+				query_8.data3_is_present;
+		udg->addr.data_4 = udg->data_base_addr + data_offset;
+		data_offset = data_offset +
+				query_8.data4_is_present +
+				query_8.data5_is_present +
+				query_8.data6_is_present +
+				query_8.data7_is_present +
+				query_8.data8_is_present +
+				query_8.data9_is_present +
+				query_8.data10_is_present +
+				query_8.data11_is_present +
+				query_8.data12_is_present +
+				query_8.data13_is_present +
+				query_8.data14_is_present +
+				query_8.data15_is_present;
+		udg->addr.trace_x = udg->data_base_addr + data_offset;
+		udg->addr.trace_y = udg->addr.trace_x + 1;
+		udg->addr.trace_segment = udg->addr.trace_y + 1;
+		udg->addr.template_helper = udg->addr.trace_segment + 1;
+		udg->addr.template_data = udg->addr.template_helper + 1;
+		udg->addr.template_flags = udg->addr.template_data + 1;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: User defined gesture support unavailable (missing data registers)\n",
+				__func__);
+		retval = -ENODEV;
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 4,
+			&size_of_query,
+			sizeof(size_of_query));
+	if (retval < 0)
+		return retval;
+
+	if (size_of_query < 7) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: User defined gesture support unavailable (missing control registers)\n",
+				__func__);
+		retval = -ENODEV;
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 5,
+			query_5.data,
+			sizeof(query_5.data));
+	if (retval < 0)
+		return retval;
+
+	ctrl_18_offset = query_5.ctrl0_is_present +
+			query_5.ctrl1_is_present +
+			query_5.ctrl2_is_present +
+			query_5.ctrl3_is_present +
+			query_5.ctrl4_is_present +
+			query_5.ctrl5_is_present +
+			query_5.ctrl6_is_present +
+			query_5.ctrl7_is_present +
+			query_5.ctrl8_is_present +
+			query_5.ctrl9_is_present +
+			query_5.ctrl10_is_present +
+			query_5.ctrl11_is_present +
+			query_5.ctrl12_is_present +
+			query_5.ctrl13_is_present +
+			query_5.ctrl14_is_present +
+			query_5.ctrl15_is_present +
+			query_5.ctrl16_is_present +
+			query_5.ctrl17_is_present;
+
+	ctrl_20_offset = ctrl_18_offset +
+			query_5.ctrl18_is_present +
+			query_5.ctrl19_is_present;
+
+	ctrl_23_offset = ctrl_20_offset +
+			query_5.ctrl20_is_present +
+			query_5.ctrl21_is_present +
+			query_5.ctrl22_is_present;
+
+	ctrl_27_offset = ctrl_23_offset+
+			query_5.ctrl23_is_present +
+			query_5.ctrl24_is_present +
+			query_5.ctrl25_is_present +
+			query_5.ctrl26_is_present;
+
+	ctrl_41_offset = ctrl_27_offset+
+			query_5.ctrl27_is_present +
+			query_5.ctrl28_is_present +
+			query_5.ctrl29_is_present +
+			query_5.ctrl30_is_present +
+			query_5.ctrl31_is_present +
+			query_5.ctrl32_is_present +
+			query_5.ctrl33_is_present +
+			query_5.ctrl34_is_present +
+			query_5.ctrl35_is_present +
+			query_5.ctrl36_is_present +
+			query_5.ctrl37_is_present +
+			query_5.ctrl38_is_present +
+			query_5.ctrl39_is_present +
+			query_5.ctrl40_is_present;
+
+	udg->addr.ctrl_18 = udg->control_base_addr + ctrl_18_offset;
+	udg->addr.ctrl_20 = udg->control_base_addr + ctrl_20_offset;
+	udg->addr.ctrl_23 = udg->control_base_addr + ctrl_23_offset;
+	udg->addr.ctrl_27 = udg->control_base_addr + ctrl_27_offset;
+	udg->addr.ctrl_41 = udg->control_base_addr + ctrl_41_offset;
+
+	udg->ctrl_18_sub10_off = 0;
+	for (ii = 0; ii < 10; ii++) {
+		retval = udg_ctrl_subpacket(18, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_18_sub10_off += ctrl_18_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	udg->ctrl_20_sub1_off = 0;
+	for (ii = 0; ii < 1; ii++) {
+		retval = udg_ctrl_subpacket(20, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_20_sub1_off += ctrl_20_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	udg->ctrl_23_sub3_off = 0;
+	for (ii = 0; ii < 3; ii++) {
+		retval = udg_ctrl_subpacket(23, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_23_sub3_off += ctrl_23_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	retval = udg_ctrl_subpacket(23, 3, &query_5);
+	if (retval == 0)
+		udg->ctrl_23_sub3_off = 0;
+	else if (retval < 0)
+		return retval;
+
+	udg->ctrl_27_sub5_off = 0;
+	for (ii = 0; ii < 5; ii++) {
+		retval = udg_ctrl_subpacket(27, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_27_sub5_off += ctrl_27_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 0,
+			query_0.data,
+			sizeof(query_0.data));
+	if (retval < 0)
+		return retval;
+
+	udg->max_num_templates = query_0.max_num_templates;
+	udg->template_size =
+			((unsigned short)query_0.template_size_lsb << 0) |
+			((unsigned short)query_0.template_size_msb << 8);
+	udg->template_data_size = udg->template_size * 4 * 2 + 4 + 1;
+
+#ifdef STORE_GESTURES
+	udg->gestures_to_store = udg->max_num_templates;
+	if (GESTURES_TO_STORE < udg->gestures_to_store)
+		udg->gestures_to_store = GESTURES_TO_STORE;
+#endif
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_20,
+			udg->ctrl_buf,
+			udg->ctrl_20_sub1_off + 1);
+	if (retval < 0)
+		return retval;
+
+	udg->report_flags = udg->ctrl_buf[udg->ctrl_20_sub1_off];
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_23,
+			udg->ctrl_buf,
+			udg->ctrl_23_sub3_off + 1);
+	if (retval < 0)
+		return retval;
+
+	udg->object_type_enable1 = udg->ctrl_buf[0];
+	if (udg->ctrl_23_sub3_off)
+		udg->object_type_enable2 = udg->ctrl_buf[udg->ctrl_23_sub3_off];
+
+	return retval;
+}
+
+static int udg_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char page;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	struct synaptics_rmi4_fn_desc fd;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&fd,
+					sizeof(fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (fd.fn_number) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Found F%02x\n",
+						__func__, fd.fn_number);
+				switch (fd.fn_number) {
+				case SYNAPTICS_RMI4_F12:
+					goto f12_found;
+					break;
+				}
+			} else {
+				break;
+			}
+
+			intr_count += fd.intr_src_count;
+		}
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to find F12\n",
+			__func__);
+	return -EINVAL;
+
+f12_found:
+	udg->query_base_addr = fd.query_base_addr | (page << 8);
+	udg->control_base_addr = fd.ctrl_base_addr | (page << 8);
+	udg->data_base_addr = fd.data_base_addr | (page << 8);
+	udg->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+	retval = udg_reg_init();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize user defined gesture registers\n",
+				__func__);
+		return retval;
+	}
+
+	udg->intr_mask = 0;
+	intr_src = fd.intr_src_count;
+	intr_off = intr_count % 8;
+	for (ii = intr_off;
+			ii < (intr_src + intr_off);
+			ii++) {
+		udg->intr_mask |= 1 << ii;
+	}
+
+	rmi4_data->intr_mask[0] |= udg->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&rmi4_data->intr_mask[0],
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_udg_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!udg)
+		return;
+
+	if (udg->intr_mask & intr_mask)
+		udg_report();
+
+	return;
+}
+
+static int synaptics_rmi4_udg_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char size;
+	unsigned char attr_count;
+	unsigned char param_count;
+
+	if (udg) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	udg = kzalloc(sizeof(*udg), GFP_KERNEL);
+	if (!udg) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for udg\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	size = 0;
+	for (ii = 0; ii < sizeof(ctrl_18_sub_size); ii++)
+		size += ctrl_18_sub_size[ii];
+	size += sizeof(struct udg_tuning);
+	udg->ctrl_buf = kzalloc(size, GFP_KERNEL);
+	if (!udg->ctrl_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_buf\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_udg;
+	}
+
+	udg->rmi4_data = rmi4_data;
+
+	retval = udg_scan_pdt();
+	if (retval < 0)
+		goto exit_free_ctrl_buf;
+
+	udg->template_data_buf = kzalloc(udg->template_data_size, GFP_KERNEL);
+	if (!udg->template_data_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for template_data_buf\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_ctrl_buf;
+	}
+
+#ifdef STORE_GESTURES
+	udg->storage_buf = kzalloc(
+			udg->template_data_size * udg->gestures_to_store,
+			GFP_KERNEL);
+	if (!udg->storage_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for storage_buf\n",
+				__func__);
+		kfree(udg->template_data_buf);
+		retval = -ENOMEM;
+		goto exit_free_ctrl_buf;
+	}
+#endif
+
+	udg->udg_dev = input_allocate_device();
+	if (udg->udg_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate gesture device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_template_data_buf;
+	}
+
+	udg->udg_dev->name = GESTURE_DRIVER_NAME;
+	udg->udg_dev->phys = GESTURE_PHYS_NAME;
+	udg->udg_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	udg->udg_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	udg->udg_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(udg->udg_dev, rmi4_data);
+
+	set_bit(EV_KEY, udg->udg_dev->evbit);
+	set_bit(KEY_WAKEUP, udg->udg_dev->keybit);
+	input_set_capability(udg->udg_dev, EV_KEY, KEY_WAKEUP);
+
+	retval = input_register_device(udg->udg_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register gesture device\n",
+				__func__);
+		input_free_device(udg->udg_dev);
+		goto exit_free_template_data_buf;
+	}
+
+	udg->tuning_dir = kobject_create_and_add(TUNING_SYSFS_DIR_NAME,
+			&udg->udg_dev->dev.kobj);
+	if (!udg->tuning_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create tuning sysfs directory\n",
+				__func__);
+		goto exit_unregister_input_device;
+	}
+
+	retval = sysfs_create_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create template data bin file\n",
+				__func__);
+		goto exit_remove_sysfs_directory;
+	}
+
+	retval = sysfs_create_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create trace data bin file\n",
+				__func__);
+		goto exit_remove_bin_file;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&udg->udg_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_remove_attrs;
+		}
+	}
+
+	for (param_count = 0; param_count < ARRAY_SIZE(params); param_count++) {
+		retval = sysfs_create_file(udg->tuning_dir,
+				&params[param_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create tuning parameters\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_remove_params;
+		}
+	}
+
+	retval = udg_engine_enable(true);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to enable gesture engine\n",
+				__func__);
+		goto exit_remove_params;
+	}
+
+	return 0;
+
+exit_remove_params:
+	for (param_count--; param_count >= 0; param_count--) {
+		sysfs_remove_file(udg->tuning_dir,
+				&params[param_count].attr);
+	}
+
+exit_remove_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&udg->udg_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+
+exit_remove_bin_file:
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+
+exit_remove_sysfs_directory:
+	kobject_put(udg->tuning_dir);
+
+exit_unregister_input_device:
+	input_unregister_device(udg->udg_dev);
+
+exit_free_template_data_buf:
+#ifdef STORE_GESTURES
+	kfree(udg->storage_buf);
+#endif
+	kfree(udg->template_data_buf);
+
+exit_free_ctrl_buf:
+	kfree(udg->ctrl_buf);
+
+exit_free_udg:
+	kfree(udg);
+	udg = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_udg_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char count;
+
+	if (!udg)
+		goto exit;
+
+	for (count = 0; count < ARRAY_SIZE(params); count++) {
+		sysfs_remove_file(udg->tuning_dir,
+				&params[count].attr);
+	}
+
+	for (count = 0; count < ARRAY_SIZE(attrs); count++) {
+		sysfs_remove_file(&udg->udg_dev->dev.kobj,
+				&attrs[count].attr);
+	}
+
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+	kobject_put(udg->tuning_dir);
+
+	input_unregister_device(udg->udg_dev);
+#ifdef STORE_GESTURES
+	kfree(udg->storage_buf);
+#endif
+	kfree(udg->template_data_buf);
+	kfree(udg->trace_data_buf);
+	kfree(udg->ctrl_buf);
+	kfree(udg);
+	udg = NULL;
+
+exit:
+	complete(&udg_remove_complete);
+}
+
+static void synaptics_rmi4_udg_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg) {
+		synaptics_rmi4_udg_init(rmi4_data);
+		return;
+	}
+
+	udg_scan_pdt();
+	udg_engine_enable(true);
+#ifdef STORE_GESTURES
+	udg_write_template_data();
+	udg_write_valid_data();
+#endif
+}
+
+static void synaptics_rmi4_udg_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	udg_engine_enable(true);
+#ifdef STORE_GESTURES
+	udg_write_template_data();
+	udg_write_valid_data();
+#endif
+}
+
+static void synaptics_rmi4_udg_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	rmi4_data->sleep_enable(rmi4_data, false);
+	rmi4_data->irq_enable(rmi4_data, true, false);
+	enable_irq_wake(rmi4_data->irq);
+
+	udg_engine_enable(true);
+	udg_detection_enable(true);
+}
+
+static void synaptics_rmi4_udg_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	rmi4_data->sleep_enable(rmi4_data, false);
+	rmi4_data->irq_enable(rmi4_data, true, false);
+	enable_irq_wake(rmi4_data->irq);
+
+	udg_engine_enable(true);
+	udg_detection_enable(true);
+}
+
+static void synaptics_rmi4_udg_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	disable_irq_wake(rmi4_data->irq);
+	udg_detection_enable(false);
+}
+
+static void synaptics_rmi4_udg_l_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	disable_irq_wake(rmi4_data->irq);
+	udg_detection_enable(false);
+}
+
+static struct synaptics_rmi4_exp_fn gesture_module = {
+	.fn_type = RMI_GESTURE,
+	.init = synaptics_rmi4_udg_init,
+	.remove = synaptics_rmi4_udg_remove,
+	.reset = synaptics_rmi4_udg_reset,
+	.reinit = synaptics_rmi4_udg_reinit,
+	.early_suspend = synaptics_rmi4_udg_e_suspend,
+	.suspend = synaptics_rmi4_udg_suspend,
+	.resume = synaptics_rmi4_udg_resume,
+	.late_resume = synaptics_rmi4_udg_l_resume,
+	.attn = synaptics_rmi4_udg_attn,
+};
+
+static int __init rmi4_gesture_module_init(void)
+{
+	synaptics_rmi4_new_function(&gesture_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_gesture_module_exit(void)
+{
+	synaptics_rmi4_new_function(&gesture_module, false);
+
+	wait_for_completion(&udg_remove_complete);
+}
+
+module_init(rmi4_gesture_module_init);
+module_exit(rmi4_gesture_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX User Defined Gesture Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
new file mode 100755
index 0000000..900fc8e
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
@@ -0,0 +1,623 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+#include "linux/moduleparam.h"
+
+#define SYN_I2C_RETRY_TIMES 10
+#define rd_msgs  1
+
+static unsigned char *wr_buf;
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_i2c_device;
+
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+	int retval;
+	u32 value;
+	const char *name;
+	struct property *prop;
+	struct device_node *np = dev->of_node;
+
+	bdata->irq_gpio = of_get_named_gpio_flags(np,
+			"synaptics,irq-gpio", 0,
+			(enum of_gpio_flags *)&bdata->irq_flags);
+
+	retval = of_property_read_u32(np, "synaptics,irq-on-state",
+			&value);
+	if (retval < 0)
+		bdata->irq_on_state = 0;
+	else
+		bdata->irq_on_state = value;
+
+	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+	if (retval < 0)
+		bdata->pwr_reg_name = NULL;
+	else
+		bdata->pwr_reg_name = name;
+
+	retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+	if (retval < 0)
+		bdata->bus_reg_name = NULL;
+	else
+		bdata->bus_reg_name = name;
+
+	prop = of_find_property(np, "synaptics,power-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->power_gpio = of_get_named_gpio_flags(np,
+				"synaptics,power-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,power-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+					__func__);
+			return retval;
+		}
+		bdata->power_on_state = value;
+	} else {
+		bdata->power_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+					__func__);
+			return retval;
+		}
+		bdata->power_delay_ms = value;
+	} else {
+		bdata->power_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->reset_gpio = of_get_named_gpio_flags(np,
+				"synaptics,reset-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,reset-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+					__func__);
+			return retval;
+		}
+		bdata->reset_on_state = value;
+		retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+					__func__);
+			return retval;
+		}
+		bdata->reset_active_ms = value;
+	} else {
+		bdata->reset_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+					__func__);
+			return retval;
+		}
+		bdata->reset_delay_ms = value;
+	} else {
+		bdata->reset_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+					__func__);
+			return retval;
+		}
+		bdata->max_y_for_2d = value;
+	} else {
+		bdata->max_y_for_2d = -1;
+	}
+
+	bdata->swap_axes = of_property_read_bool(np, "synaptics,swap-axes");
+	bdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
+	bdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
+
+	prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+					__func__);
+			return retval;
+		}
+		bdata->ub_i2c_addr = (unsigned short)value;
+	} else {
+		bdata->ub_i2c_addr = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->cap_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->cap_button_map->map)
+			return -ENOMEM;
+		bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+		retval = of_property_read_u32_array(np,
+				"synaptics,cap-button-codes",
+				bdata->cap_button_map->map,
+				bdata->cap_button_map->nbuttons);
+		if (retval < 0) {
+			bdata->cap_button_map->nbuttons = 0;
+			bdata->cap_button_map->map = NULL;
+		}
+	} else {
+		bdata->cap_button_map->nbuttons = 0;
+		bdata->cap_button_map->map = NULL;
+	}
+
+	prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->vir_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->vir_button_map->map)
+			return -ENOMEM;
+		bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+		bdata->vir_button_map->nbuttons /= 5;
+		retval = of_property_read_u32_array(np,
+				"synaptics,vir-button-codes",
+				bdata->vir_button_map->map,
+				bdata->vir_button_map->nbuttons * 5);
+		if (retval < 0) {
+			bdata->vir_button_map->nbuttons = 0;
+			bdata->vir_button_map->map = NULL;
+		}
+	} else {
+		bdata->vir_button_map->nbuttons = 0;
+		bdata->vir_button_map->map = NULL;
+	}
+
+	return 0;
+}
+#endif
+
+static int synaptics_rmi4_i2c_alloc_buf(struct synaptics_rmi4_data *rmi4_data,
+		unsigned int count)
+{
+	static unsigned int buf_size;
+
+	if (count > buf_size) {
+		if (buf_size)
+			kfree(wr_buf);
+		wr_buf = kzalloc(count, GFP_KERNEL);
+		if (!wr_buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for buffer\n",
+					__func__);
+			buf_size = 0;
+			return -ENOMEM;
+		}
+		buf_size = count;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_i2c_check_addr(struct synaptics_rmi4_data *rmi4_data,
+		struct i2c_client *i2c)
+{
+	if (hw_if.board_data->ub_i2c_addr == -1)
+		return;
+
+	if (hw_if.board_data->i2c_addr == i2c->addr)
+		hw_if.board_data->i2c_addr = hw_if.board_data->ub_i2c_addr;
+	else
+		hw_if.board_data->i2c_addr = i2c->addr;
+}
+
+static int synaptics_rmi4_i2c_set_page(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr)
+{
+	int retval = 0;
+	unsigned char retry;
+	unsigned char buf[PAGE_SELECT_LEN];
+	unsigned char page;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[2];
+
+	msg[0].addr = hw_if.board_data->i2c_addr;
+	msg[0].flags = 0;
+	msg[0].len = PAGE_SELECT_LEN;
+	msg[0].buf = buf;
+
+	page = ((addr >> 8) & MASK_8BIT);
+	buf[0] = MASK_8BIT;
+	buf[1] = page;
+
+	if (page != rmi4_data->current_page) {
+		for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+			if (i2c_transfer(i2c->adapter, &msg[0], 1) == 1) {
+				rmi4_data->current_page = page;
+				retval = PAGE_SELECT_LEN;
+				break;
+			}
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: I2C retry %d\n",
+					__func__, retry + 1);
+			msleep(20);
+
+			if (retry == SYN_I2C_RETRY_TIMES / 2) {
+				synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+				msg[0].addr = hw_if.board_data->i2c_addr;
+			}
+		}
+	} else {
+		retval = PAGE_SELECT_LEN;
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval = 0;
+	unsigned char retry;
+	unsigned char buf;
+	unsigned char index = 0;
+	unsigned char xfer_msgs;
+	unsigned char remaining_msgs;
+	unsigned short i2c_addr;
+	unsigned short data_offset = 0;
+	unsigned int remaining_length = length;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_adapter *adap = i2c->adapter;
+	struct i2c_msg msg[rd_msgs + 1];
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		retval = -EIO;
+		goto exit;
+	}
+
+	msg[0].addr = hw_if.board_data->i2c_addr;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	msg[0].buf = &buf;
+	msg[rd_msgs].addr = hw_if.board_data->i2c_addr;
+	msg[rd_msgs].flags = I2C_M_RD;
+	msg[rd_msgs].len = (unsigned short)remaining_length;
+	msg[rd_msgs].buf = &data[data_offset];
+
+	buf = addr & MASK_8BIT;
+
+	remaining_msgs = rd_msgs + 1;
+
+	while (remaining_msgs) {
+		xfer_msgs = remaining_msgs;
+		for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+			retval = i2c_transfer(adap, &msg[index], xfer_msgs);
+			if (retval == xfer_msgs)
+				break;
+
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: I2C retry %d\n",
+					__func__, retry + 1);
+			msleep(20);
+
+			if (retry == SYN_I2C_RETRY_TIMES / 2) {
+				synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+				i2c_addr = hw_if.board_data->i2c_addr;
+				msg[0].addr = i2c_addr;
+				msg[rd_msgs].addr = i2c_addr;
+			}
+		}
+
+		if (retry == SYN_I2C_RETRY_TIMES) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: I2C read over retry limit\n",
+					__func__);
+			retval = -EIO;
+			goto exit;
+		}
+
+		remaining_msgs -= xfer_msgs;
+		index += xfer_msgs;
+	}
+
+	retval = length;
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned char retry;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[2];
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_i2c_alloc_buf(rmi4_data, length + 1);
+	if (retval < 0)
+		goto exit;
+
+	retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		retval = -EIO;
+		goto exit;
+	}
+
+	msg[0].addr = hw_if.board_data->i2c_addr;
+	msg[0].flags = 0;
+	msg[0].len = (unsigned short)(length + 1);
+	msg[0].buf = wr_buf;
+
+	wr_buf[0] = addr & MASK_8BIT;
+	retval = secure_memcpy(&wr_buf[1], length, &data[0], length, length);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy data\n",
+				__func__);
+		goto exit;
+	}
+
+	for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+		if (i2c_transfer(i2c->adapter, &msg[0], 1) == 1) {
+			retval = length;
+			break;
+		}
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: I2C retry %d\n",
+				__func__, retry + 1);
+		msleep(20);
+
+		if (retry == SYN_I2C_RETRY_TIMES / 2) {
+			synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+			msg[0].addr = hw_if.board_data->i2c_addr;
+		}
+	}
+
+	if (retry == SYN_I2C_RETRY_TIMES) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: I2C write over retry limit\n",
+				__func__);
+		retval = -EIO;
+	}
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+int check_dt(struct device_node *np)
+{
+	int i;
+	int count;
+	struct device_node *node;
+	struct drm_panel *panel;
+
+	count = of_count_phandle_with_args(np, "panel", NULL);
+	if (count <= 0)
+		return 0;
+
+	for (i = 0; i < count; i++) {
+		node = of_parse_phandle(np, "panel", i);
+		panel = of_drm_find_panel(node);
+		of_node_put(node);
+		if (!IS_ERR(panel)) {
+			active_panel = panel;
+			return 0;
+		}
+	}
+
+	return -ENODEV;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+	.type = BUS_I2C,
+	.read = synaptics_rmi4_i2c_read,
+	.write = synaptics_rmi4_i2c_write,
+};
+
+static void synaptics_rmi4_i2c_dev_release(struct device *dev)
+{
+	kfree(synaptics_dsx_i2c_device);
+}
+
+static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *dev_id)
+{
+	int retval;
+
+	if (check_dt(client->dev.of_node))
+		return -ENODEV;
+
+	if (!i2c_check_functionality(client->adapter,
+			I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&client->dev,
+				"%s: SMBus byte data commands not supported by host\n",
+				__func__);
+		return -EIO;
+	}
+
+	synaptics_dsx_i2c_device = kzalloc(
+			sizeof(struct platform_device),
+			GFP_KERNEL);
+	if (!synaptics_dsx_i2c_device) {
+		dev_err(&client->dev,
+				"%s: Failed to allocate memory for synaptics_dsx_i2c_device\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	if (client->dev.of_node) {
+		hw_if.board_data = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_board_data),
+				GFP_KERNEL);
+		if (!hw_if.board_data) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for board data\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->cap_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->cap_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for 0D button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->vir_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->vir_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for virtual button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		parse_dt(&client->dev, hw_if.board_data);
+	}
+#else
+	hw_if.board_data = client->dev.platform_data;
+#endif
+
+	hw_if.bus_access = &bus_access;
+	hw_if.board_data->i2c_addr = client->addr;
+
+	synaptics_dsx_i2c_device->name = PLATFORM_DRIVER_NAME;
+	synaptics_dsx_i2c_device->id = 0;
+	synaptics_dsx_i2c_device->num_resources = 0;
+	synaptics_dsx_i2c_device->dev.parent = &client->dev;
+	synaptics_dsx_i2c_device->dev.platform_data = &hw_if;
+	synaptics_dsx_i2c_device->dev.release = synaptics_rmi4_i2c_dev_release;
+
+	retval = platform_device_register(synaptics_dsx_i2c_device);
+	if (retval) {
+		dev_err(&client->dev,
+				"%s: Failed to register platform device\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_i2c_remove(struct i2c_client *client)
+{
+	platform_device_unregister(synaptics_dsx_i2c_device);
+
+	return 0;
+}
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+	{I2C_DRIVER_NAME, 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static const struct of_device_id synaptics_rmi4_of_match_table[] = {
+	{
+		.compatible = "synaptics,dsx-i2c",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct i2c_driver synaptics_rmi4_i2c_driver = {
+	.driver = {
+		.name = I2C_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = synaptics_rmi4_of_match_table,
+	},
+	.probe = synaptics_rmi4_i2c_probe,
+	.remove = synaptics_rmi4_i2c_remove,
+	.id_table = synaptics_rmi4_id_table,
+};
+
+int synaptics_rmi4_bus_init(void)
+{
+	return i2c_add_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init);
+
+void synaptics_rmi4_bus_exit(void)
+{
+	kfree(wr_buf);
+
+	i2c_del_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX I2C Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_proximity.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_proximity.c
new file mode 100755
index 0000000..c813e3e
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_proximity.c
@@ -0,0 +1,673 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define PROX_PHYS_NAME "synaptics_dsx/proximity"
+
+#define HOVER_Z_MAX (255)
+
+#define HOVERING_FINGER_EN (1 << 4)
+
+static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static struct device_attribute attrs[] = {
+	__ATTR(hover_finger_en, 0664,
+			synaptics_rmi4_hover_finger_en_show,
+			synaptics_rmi4_hover_finger_en_store),
+};
+
+struct synaptics_rmi4_f12_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+		};
+		unsigned char data[2];
+	};
+};
+
+struct prox_finger_data {
+	union {
+		struct {
+			unsigned char object_type_and_status;
+			unsigned char x_lsb;
+			unsigned char x_msb;
+			unsigned char y_lsb;
+			unsigned char y_msb;
+			unsigned char z;
+		} __packed;
+		unsigned char proximity_data[6];
+	};
+};
+
+struct synaptics_rmi4_prox_handle {
+	bool hover_finger_present;
+	bool hover_finger_en;
+	unsigned char intr_mask;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short hover_finger_en_addr;
+	unsigned short hover_finger_data_addr;
+	struct input_dev *prox_dev;
+	struct prox_finger_data *finger_data;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_prox_handle *prox;
+
+DECLARE_COMPLETION(prox_remove_complete);
+
+static void prox_hover_finger_lift(void)
+{
+	input_report_key(prox->prox_dev, BTN_TOUCH, 0);
+	input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 0);
+	input_sync(prox->prox_dev);
+	prox->hover_finger_present = false;
+}
+
+static void prox_hover_finger_report(void)
+{
+	int retval;
+	int x;
+	int y;
+	int z;
+	struct prox_finger_data *data;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	data = prox->finger_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->hover_finger_data_addr,
+			data->proximity_data,
+			sizeof(data->proximity_data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read hovering finger data\n",
+				__func__);
+		return;
+	}
+
+	if (data->object_type_and_status != F12_HOVERING_FINGER_STATUS) {
+		if (prox->hover_finger_present)
+			prox_hover_finger_lift();
+
+		return;
+	}
+
+	x = (data->x_msb << 8) | (data->x_lsb);
+	y = (data->y_msb << 8) | (data->y_lsb);
+	z = HOVER_Z_MAX - data->z;
+
+	input_report_key(prox->prox_dev, BTN_TOUCH, 0);
+	input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 1);
+	input_report_abs(prox->prox_dev, ABS_X, x);
+	input_report_abs(prox->prox_dev, ABS_Y, y);
+	input_report_abs(prox->prox_dev, ABS_DISTANCE, z);
+
+	input_sync(prox->prox_dev);
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: x = %d y = %d z = %d\n",
+			__func__, x, y, z);
+
+	prox->hover_finger_present = true;
+}
+
+static int prox_set_hover_finger_en(void)
+{
+	int retval;
+	unsigned char object_report_enable;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->hover_finger_en_addr,
+			&object_report_enable,
+			sizeof(object_report_enable));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read from object report enable register\n",
+				__func__);
+		return retval;
+	}
+
+	if (prox->hover_finger_en)
+		object_report_enable |= HOVERING_FINGER_EN;
+	else
+		object_report_enable &= ~HOVERING_FINGER_EN;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			prox->hover_finger_en_addr,
+			&object_report_enable,
+			sizeof(object_report_enable));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write to object report enable register\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static void prox_set_params(void)
+{
+	input_set_abs_params(prox->prox_dev, ABS_X, 0,
+			prox->rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(prox->prox_dev, ABS_Y, 0,
+			prox->rmi4_data->sensor_max_y, 0, 0);
+	input_set_abs_params(prox->prox_dev, ABS_DISTANCE, 0,
+			HOVER_Z_MAX, 0, 0);
+}
+
+static int prox_reg_init(void)
+{
+	int retval;
+	unsigned char ctrl_23_offset;
+	unsigned char data_1_offset;
+	struct synaptics_rmi4_f12_query_5 query_5;
+	struct synaptics_rmi4_f12_query_8 query_8;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->query_base_addr + 5,
+			query_5.data,
+			sizeof(query_5.data));
+	if (retval < 0)
+		return retval;
+
+	ctrl_23_offset = query_5.ctrl0_is_present +
+			query_5.ctrl1_is_present +
+			query_5.ctrl2_is_present +
+			query_5.ctrl3_is_present +
+			query_5.ctrl4_is_present +
+			query_5.ctrl5_is_present +
+			query_5.ctrl6_is_present +
+			query_5.ctrl7_is_present +
+			query_5.ctrl8_is_present +
+			query_5.ctrl9_is_present +
+			query_5.ctrl10_is_present +
+			query_5.ctrl11_is_present +
+			query_5.ctrl12_is_present +
+			query_5.ctrl13_is_present +
+			query_5.ctrl14_is_present +
+			query_5.ctrl15_is_present +
+			query_5.ctrl16_is_present +
+			query_5.ctrl17_is_present +
+			query_5.ctrl18_is_present +
+			query_5.ctrl19_is_present +
+			query_5.ctrl20_is_present +
+			query_5.ctrl21_is_present +
+			query_5.ctrl22_is_present;
+
+	prox->hover_finger_en_addr = prox->control_base_addr + ctrl_23_offset;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->query_base_addr + 8,
+			query_8.data,
+			sizeof(query_8.data));
+	if (retval < 0)
+		return retval;
+
+	data_1_offset = query_8.data0_is_present;
+	prox->hover_finger_data_addr = prox->data_base_addr + data_1_offset;
+
+	return retval;
+}
+
+static int prox_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char page;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	struct synaptics_rmi4_fn_desc fd;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&fd,
+					sizeof(fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (fd.fn_number) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Found F%02x\n",
+						__func__, fd.fn_number);
+				switch (fd.fn_number) {
+				case SYNAPTICS_RMI4_F12:
+					goto f12_found;
+					break;
+				}
+			} else {
+				break;
+			}
+
+			intr_count += fd.intr_src_count;
+		}
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to find F12\n",
+			__func__);
+	return -EINVAL;
+
+f12_found:
+	prox->query_base_addr = fd.query_base_addr | (page << 8);
+	prox->control_base_addr = fd.ctrl_base_addr | (page << 8);
+	prox->data_base_addr = fd.data_base_addr | (page << 8);
+	prox->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+	retval = prox_reg_init();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize proximity registers\n",
+				__func__);
+		return retval;
+	}
+
+	prox->intr_mask = 0;
+	intr_src = fd.intr_src_count;
+	intr_off = intr_count % 8;
+	for (ii = intr_off;
+			ii < (intr_src + intr_off);
+			ii++) {
+		prox->intr_mask |= 1 << ii;
+	}
+
+	rmi4_data->intr_mask[0] |= prox->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&(rmi4_data->intr_mask[0]),
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	if (!prox)
+		return -ENODEV;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			prox->hover_finger_en);
+}
+
+static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	if (!prox)
+		return -ENODEV;
+
+	if (kstrtouint(buf, 16, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		prox->hover_finger_en = true;
+	else if (input == 0)
+		prox->hover_finger_en = false;
+	else
+		return -EINVAL;
+
+	retval = prox_set_hover_finger_en();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change hovering finger enable setting\n",
+				__func__);
+		return retval;
+	}
+
+	return count;
+}
+
+int synaptics_rmi4_prox_hover_finger_en(bool enable)
+{
+	int retval;
+
+	if (!prox)
+		return -ENODEV;
+
+	prox->hover_finger_en = enable;
+
+	retval = prox_set_hover_finger_en();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+EXPORT_SYMBOL(synaptics_rmi4_prox_hover_finger_en);
+
+static void synaptics_rmi4_prox_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!prox)
+		return;
+
+	if (prox->intr_mask & intr_mask)
+		prox_hover_finger_report();
+}
+
+static int synaptics_rmi4_prox_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char attr_count;
+
+	if (prox) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	prox = kzalloc(sizeof(*prox), GFP_KERNEL);
+	if (!prox) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for prox\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	prox->finger_data = kzalloc(sizeof(*(prox->finger_data)), GFP_KERNEL);
+	if (!prox->finger_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for finger_data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_prox;
+	}
+
+	prox->rmi4_data = rmi4_data;
+
+	retval = prox_scan_pdt();
+	if (retval < 0)
+		goto exit_free_finger_data;
+
+	prox->hover_finger_en = true;
+
+	retval = prox_set_hover_finger_en();
+	if (retval < 0)
+		return retval;
+
+	prox->prox_dev = input_allocate_device();
+	if (prox->prox_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate proximity device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_finger_data;
+	}
+
+	prox->prox_dev->name = PROXIMITY_DRIVER_NAME;
+	prox->prox_dev->phys = PROX_PHYS_NAME;
+	prox->prox_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	prox->prox_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	prox->prox_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(prox->prox_dev, rmi4_data);
+
+	set_bit(EV_KEY, prox->prox_dev->evbit);
+	set_bit(EV_ABS, prox->prox_dev->evbit);
+	set_bit(BTN_TOUCH, prox->prox_dev->keybit);
+	set_bit(BTN_TOOL_FINGER, prox->prox_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, prox->prox_dev->propbit);
+#endif
+
+	prox_set_params();
+
+	retval = input_register_device(prox->prox_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register proximity device\n",
+				__func__);
+		goto exit_free_input_device;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			goto exit_free_sysfs;
+		}
+	}
+
+	return 0;
+
+exit_free_sysfs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	input_unregister_device(prox->prox_dev);
+	prox->prox_dev = NULL;
+
+exit_free_input_device:
+	if (prox->prox_dev)
+		input_free_device(prox->prox_dev);
+
+exit_free_finger_data:
+	kfree(prox->finger_data);
+
+exit_free_prox:
+	kfree(prox);
+	prox = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_prox_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+
+	if (!prox)
+		goto exit;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	input_unregister_device(prox->prox_dev);
+	kfree(prox->finger_data);
+	kfree(prox);
+	prox = NULL;
+
+exit:
+	complete(&prox_remove_complete);
+}
+
+static void synaptics_rmi4_prox_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox) {
+		synaptics_rmi4_prox_init(rmi4_data);
+		return;
+	}
+
+	prox_hover_finger_lift();
+
+	prox_scan_pdt();
+
+	prox_set_hover_finger_en();
+}
+
+static void synaptics_rmi4_prox_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox)
+		return;
+
+	prox_hover_finger_lift();
+
+	prox_set_hover_finger_en();
+}
+
+static void synaptics_rmi4_prox_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox)
+		return;
+
+	prox_hover_finger_lift();
+}
+
+static void synaptics_rmi4_prox_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox)
+		return;
+
+	prox_hover_finger_lift();
+}
+
+static struct synaptics_rmi4_exp_fn proximity_module = {
+	.fn_type = RMI_PROXIMITY,
+	.init = synaptics_rmi4_prox_init,
+	.remove = synaptics_rmi4_prox_remove,
+	.reset = synaptics_rmi4_prox_reset,
+	.reinit = synaptics_rmi4_prox_reinit,
+	.early_suspend = synaptics_rmi4_prox_e_suspend,
+	.suspend = synaptics_rmi4_prox_suspend,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_prox_attn,
+};
+
+static int __init rmi4_proximity_module_init(void)
+{
+	synaptics_rmi4_new_function(&proximity_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_proximity_module_exit(void)
+{
+	synaptics_rmi4_new_function(&proximity_module, false);
+
+	wait_for_completion(&prox_remove_complete);
+}
+
+module_init(rmi4_proximity_module_init);
+module_exit(rmi4_proximity_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Proximity Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
new file mode 100755
index 0000000..8243e98
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
@@ -0,0 +1,1074 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define CHAR_DEVICE_NAME "rmi"
+#define DEVICE_CLASS_NAME "rmidev"
+#define SYSFS_FOLDER_NAME "rmidev"
+#define DEV_NUMBER 1
+#define REG_ADDR_LIMIT 0xFFFF
+
+#define RMIDEV_MAJOR_NUM 0
+
+static ssize_t rmidev_sysfs_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t rmidev_sysfs_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_attn_state_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_pid_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_term_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_intr_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_intr_mask_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_concurrent_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_concurrent_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+struct rmidev_handle {
+	dev_t dev_no;
+	pid_t pid;
+	unsigned char intr_mask;
+	unsigned char *tmpbuf;
+	unsigned int tmpbuf_size;
+	struct device dev;
+	struct synaptics_rmi4_data *rmi4_data;
+	struct kobject *sysfs_dir;
+	struct siginfo interrupt_signal;
+	struct siginfo terminate_signal;
+	struct task_struct *task;
+	void *data;
+	bool concurrent;
+};
+
+struct rmidev_data {
+	int ref_count;
+	struct cdev main_dev;
+	struct class *device_class;
+	struct mutex file_mutex;
+	struct rmidev_handle *rmi_dev;
+};
+
+static struct bin_attribute attr_data = {
+	.attr = {
+		.name = "data",
+		.mode = 0664,
+	},
+	.size = 0,
+	.read = rmidev_sysfs_data_show,
+	.write = rmidev_sysfs_data_store,
+};
+
+static struct device_attribute attrs[] = {
+	__ATTR(open, 0220,
+			synaptics_rmi4_show_error,
+			rmidev_sysfs_open_store),
+	__ATTR(release, 0220,
+			synaptics_rmi4_show_error,
+			rmidev_sysfs_release_store),
+	__ATTR(attn_state, 0444,
+			rmidev_sysfs_attn_state_show,
+			synaptics_rmi4_store_error),
+	__ATTR(pid, 0664,
+			rmidev_sysfs_pid_show,
+			rmidev_sysfs_pid_store),
+	__ATTR(term, 0220,
+			synaptics_rmi4_show_error,
+			rmidev_sysfs_term_store),
+	__ATTR(intr_mask, 0664,
+			rmidev_sysfs_intr_mask_show,
+			rmidev_sysfs_intr_mask_store),
+	__ATTR(concurrent, 0664,
+			rmidev_sysfs_concurrent_show,
+			rmidev_sysfs_concurrent_store),
+};
+
+static int rmidev_major_num = RMIDEV_MAJOR_NUM;
+
+static struct class *rmidev_device_class;
+
+static struct rmidev_handle *rmidev;
+
+DECLARE_COMPLETION(rmidev_remove_complete);
+
+static irqreturn_t rmidev_sysfs_irq(int irq, void *data)
+{
+	struct synaptics_rmi4_data *rmi4_data = data;
+
+	sysfs_notify(&rmi4_data->input_dev->dev.kobj,
+			SYSFS_FOLDER_NAME, "attn_state");
+
+	return IRQ_HANDLED;
+}
+
+static int rmidev_sysfs_irq_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval = 0;
+	unsigned char intr_status[MAX_INTR_REGISTERS];
+	unsigned long irq_flags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
+			IRQF_ONESHOT;
+
+	mutex_lock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	if (enable) {
+		if (rmi4_data->irq_enabled) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Interrupt already enabled\n",
+					__func__);
+			goto exit;
+		}
+
+		/* Clear interrupts first */
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr + 1,
+				intr_status,
+				rmi4_data->num_of_intr_regs);
+		if (retval < 0)
+			goto exit;
+
+		retval = request_threaded_irq(rmi4_data->irq, NULL,
+				rmidev_sysfs_irq, irq_flags,
+				PLATFORM_DRIVER_NAME, rmi4_data);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create irq thread\n",
+					__func__);
+			goto exit;
+		}
+
+		rmi4_data->irq_enabled = true;
+	} else {
+		if (rmi4_data->irq_enabled) {
+			disable_irq(rmi4_data->irq);
+			free_irq(rmi4_data->irq, rmi4_data);
+			rmi4_data->irq_enabled = false;
+		}
+	}
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	return retval;
+}
+
+static ssize_t rmidev_sysfs_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned char intr_status = 0;
+	unsigned int length = (unsigned int)count;
+	unsigned short address = (unsigned short)pos;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (length > (REG_ADDR_LIMIT - address)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Out of register map limit\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (length) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				address,
+				(unsigned char *)buf,
+				length);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read data\n",
+					__func__);
+			return retval;
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	if (!rmidev->concurrent)
+		goto exit;
+
+	if (address != rmi4_data->f01_data_base_addr)
+		goto exit;
+
+	if (length <= 1)
+		goto exit;
+
+	intr_status = buf[1];
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				if (fhandler->intr_mask & intr_status) {
+					rmi4_data->report_touch(rmi4_data,
+							fhandler);
+				}
+			}
+		}
+	}
+
+exit:
+	return length;
+}
+
+static ssize_t rmidev_sysfs_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned int length = (unsigned int)count;
+	unsigned short address = (unsigned short)pos;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (length > (REG_ADDR_LIMIT - address)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Out of register map limit\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (length) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				address,
+				(unsigned char *)buf,
+				length);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write data\n",
+					__func__);
+			return retval;
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	return length;
+}
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	rmi4_data->irq_enable(rmi4_data, false, false);
+	rmidev_sysfs_irq_enable(rmi4_data, true);
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Attention interrupt disabled\n",
+			__func__);
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	rmidev_sysfs_irq_enable(rmi4_data, false);
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	rmi4_data->stay_awake = false;
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_attn_state_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int attn_state;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	attn_state = gpio_get_value(bdata->irq_gpio);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", attn_state);
+}
+
+static ssize_t rmidev_sysfs_pid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", rmidev->pid);
+}
+
+static ssize_t rmidev_sysfs_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	rmidev->pid = input;
+
+	if (rmidev->pid) {
+		rmidev->task = pid_task(find_vpid(rmidev->pid), PIDTYPE_PID);
+		if (!rmidev->task) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to locate PID of data logging tool\n",
+					__func__);
+			return -EINVAL;
+		}
+	}
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_term_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	if (rmidev->pid)
+		send_sig_info(SIGTERM, &rmidev->terminate_signal, rmidev->task);
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_intr_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%02x\n", rmidev->intr_mask);
+}
+
+static ssize_t rmidev_sysfs_intr_mask_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	rmidev->intr_mask = (unsigned char)input;
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_concurrent_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", rmidev->concurrent);
+}
+
+static ssize_t rmidev_sysfs_concurrent_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	rmidev->concurrent = input > 0 ? true : false;
+
+	return count;
+}
+
+static int rmidev_allocate_buffer(int count)
+{
+	if (count + 1 > rmidev->tmpbuf_size) {
+		if (rmidev->tmpbuf_size)
+			kfree(rmidev->tmpbuf);
+		rmidev->tmpbuf = kzalloc(count + 1, GFP_KERNEL);
+		if (!rmidev->tmpbuf) {
+			dev_err(rmidev->rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for buffer\n",
+					__func__);
+			rmidev->tmpbuf_size = 0;
+			return -ENOMEM;
+		}
+		rmidev->tmpbuf_size = count + 1;
+	}
+
+	return 0;
+}
+
+/*
+ * rmidev_llseek - set register address to access for RMI device
+ *
+ * @filp: pointer to file structure
+ * @off:
+ *	if whence == SEEK_SET,
+ *		off: 16-bit RMI register address
+ *	if whence == SEEK_CUR,
+ *		off: offset from current position
+ *	if whence == SEEK_END,
+ *		off: offset from end position (0xFFFF)
+ * @whence: SEEK_SET, SEEK_CUR, or SEEK_END
+ */
+static loff_t rmidev_llseek(struct file *filp, loff_t off, int whence)
+{
+	loff_t newpos;
+	struct rmidev_data *dev_data = filp->private_data;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (IS_ERR(dev_data)) {
+		pr_err("%s: Pointer of char device data is invalid", __func__);
+		return -EBADF;
+	}
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	switch (whence) {
+	case SEEK_SET:
+		newpos = off;
+		break;
+	case SEEK_CUR:
+		newpos = filp->f_pos + off;
+		break;
+	case SEEK_END:
+		newpos = REG_ADDR_LIMIT + off;
+		break;
+	default:
+		newpos = -EINVAL;
+		goto clean_up;
+	}
+
+	if (newpos < 0 || newpos > REG_ADDR_LIMIT) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: New position 0x%04x is invalid\n",
+				__func__, (unsigned int)newpos);
+		newpos = -EINVAL;
+		goto clean_up;
+	}
+
+	filp->f_pos = newpos;
+
+clean_up:
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return newpos;
+}
+
+/*
+ * rmidev_read: read register data from RMI device
+ *
+ * @filp: pointer to file structure
+ * @buf: pointer to user space buffer
+ * @count: number of bytes to read
+ * @f_pos: starting RMI register address
+ */
+static ssize_t rmidev_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *f_pos)
+{
+	ssize_t retval;
+	unsigned char intr_status = 0;
+	unsigned short address;
+	struct rmidev_data *dev_data = filp->private_data;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (IS_ERR(dev_data)) {
+		pr_err("%s: Pointer of char device data is invalid", __func__);
+		return -EBADF;
+	}
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	if (*f_pos > REG_ADDR_LIMIT) {
+		retval = -EFAULT;
+		goto clean_up;
+	}
+
+	if (count > (REG_ADDR_LIMIT - *f_pos))
+		count = REG_ADDR_LIMIT - *f_pos;
+
+	if (count == 0) {
+		retval = 0;
+		goto clean_up;
+	}
+	address = (unsigned short)(*f_pos);
+
+	rmidev_allocate_buffer(count);
+
+	retval = synaptics_rmi4_reg_read(rmidev->rmi4_data,
+			*f_pos,
+			rmidev->tmpbuf,
+			count);
+	if (retval < 0)
+		goto clean_up;
+
+	if (copy_to_user(buf, rmidev->tmpbuf, count))
+		retval = -EFAULT;
+	else
+		*f_pos += retval;
+
+	if (!rmidev->concurrent)
+		goto clean_up;
+
+	if (address != rmi4_data->f01_data_base_addr)
+		goto clean_up;
+
+	if (count <= 1)
+		goto clean_up;
+
+	intr_status = rmidev->tmpbuf[1];
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				if (fhandler->intr_mask & intr_status) {
+					rmi4_data->report_touch(rmi4_data,
+							fhandler);
+				}
+			}
+		}
+	}
+
+clean_up:
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return retval;
+}
+
+/*
+ * rmidev_write: write register data to RMI device
+ *
+ * @filp: pointer to file structure
+ * @buf: pointer to user space buffer
+ * @count: number of bytes to write
+ * @f_pos: starting RMI register address
+ */
+static ssize_t rmidev_write(struct file *filp, const char __user *buf,
+		size_t count, loff_t *f_pos)
+{
+	ssize_t retval;
+	struct rmidev_data *dev_data = filp->private_data;
+
+	if (IS_ERR(dev_data)) {
+		pr_err("%s: Pointer of char device data is invalid", __func__);
+		return -EBADF;
+	}
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	if (*f_pos > REG_ADDR_LIMIT) {
+		retval = -EFAULT;
+		goto unlock;
+	}
+
+	if (count > (REG_ADDR_LIMIT - *f_pos))
+		count = REG_ADDR_LIMIT - *f_pos;
+
+	if (count == 0) {
+		retval = 0;
+		goto unlock;
+	}
+	rmidev_allocate_buffer(count);
+
+	if (copy_from_user(rmidev->tmpbuf, buf, count)) {
+		retval = -EFAULT;
+		goto unlock;
+	}
+
+	retval = synaptics_rmi4_reg_write(rmidev->rmi4_data,
+			*f_pos,
+			rmidev->tmpbuf,
+			count);
+	if (retval >= 0)
+		*f_pos += retval;
+
+unlock:
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return retval;
+}
+
+static int rmidev_open(struct inode *inp, struct file *filp)
+{
+	int retval = 0;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+	struct rmidev_data *dev_data =
+			container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+	if (!dev_data)
+		return -EACCES;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	filp->private_data = dev_data;
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	rmi4_data->irq_enable(rmi4_data, false, false);
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Attention interrupt disabled\n",
+			__func__);
+
+	if (dev_data->ref_count < 1)
+		dev_data->ref_count++;
+	else
+		retval = -EACCES;
+
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return retval;
+}
+
+static int rmidev_release(struct inode *inp, struct file *filp)
+{
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+	struct rmidev_data *dev_data =
+			container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+	if (!dev_data)
+		return -EACCES;
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	dev_data->ref_count--;
+	if (dev_data->ref_count < 0)
+		dev_data->ref_count = 0;
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	rmi4_data->stay_awake = false;
+
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return 0;
+}
+
+static const struct file_operations rmidev_fops = {
+	.owner = THIS_MODULE,
+	.llseek = rmidev_llseek,
+	.read = rmidev_read,
+	.write = rmidev_write,
+	.open = rmidev_open,
+	.release = rmidev_release,
+};
+
+static void rmidev_device_cleanup(struct rmidev_data *dev_data)
+{
+	dev_t devno;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (dev_data) {
+		devno = dev_data->main_dev.dev;
+
+		if (dev_data->device_class)
+			device_destroy(dev_data->device_class, devno);
+
+		cdev_del(&dev_data->main_dev);
+
+		unregister_chrdev_region(devno, 1);
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: rmidev device removed\n",
+				__func__);
+	}
+}
+
+static char *rmi_char_devnode(struct device *dev, umode_t *mode)
+{
+	if (!mode)
+		return NULL;
+
+	*mode = (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
+
+	return kasprintf(GFP_KERNEL, "rmi/%s", dev_name(dev));
+}
+
+static int rmidev_create_device_class(void)
+{
+	if (rmidev_device_class != NULL)
+		return 0;
+
+	rmidev_device_class = class_create(THIS_MODULE, DEVICE_CLASS_NAME);
+
+	if (IS_ERR(rmidev_device_class)) {
+		pr_err("%s: Failed to create /dev/%s\n",
+				__func__, CHAR_DEVICE_NAME);
+		return -ENODEV;
+	}
+
+	rmidev_device_class->devnode = rmi_char_devnode;
+
+	return 0;
+}
+
+static void rmidev_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!rmidev)
+		return;
+
+	if (rmidev->pid && (rmidev->intr_mask & intr_mask))
+		send_sig_info(SIGIO, &rmidev->interrupt_signal, rmidev->task);
+
+	return;
+}
+
+static int rmidev_init_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	dev_t dev_no;
+	unsigned char attr_count;
+	struct rmidev_data *dev_data;
+	struct device *device_ptr;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (rmidev) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	rmidev = kzalloc(sizeof(*rmidev), GFP_KERNEL);
+	if (!rmidev) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for rmidev\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_rmidev;
+	}
+
+	rmidev->rmi4_data = rmi4_data;
+
+	memset(&rmidev->interrupt_signal, 0, sizeof(rmidev->interrupt_signal));
+	rmidev->interrupt_signal.si_signo = SIGIO;
+	rmidev->interrupt_signal.si_code = SI_USER;
+
+	memset(&rmidev->terminate_signal, 0, sizeof(rmidev->terminate_signal));
+	rmidev->terminate_signal.si_signo = SIGTERM;
+	rmidev->terminate_signal.si_code = SI_USER;
+
+	retval = rmidev_create_device_class();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create device class\n",
+				__func__);
+		goto err_device_class;
+	}
+
+	if (rmidev_major_num) {
+		dev_no = MKDEV(rmidev_major_num, DEV_NUMBER);
+		retval = register_chrdev_region(dev_no, 1, CHAR_DEVICE_NAME);
+	} else {
+		retval = alloc_chrdev_region(&dev_no, 0, 1, CHAR_DEVICE_NAME);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to allocate char device region\n",
+					__func__);
+			goto err_device_region;
+		}
+
+		rmidev_major_num = MAJOR(dev_no);
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Major number of rmidev = %d\n",
+				__func__, rmidev_major_num);
+	}
+
+	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+	if (!dev_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for dev_data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_dev_data;
+	}
+
+	mutex_init(&dev_data->file_mutex);
+	dev_data->rmi_dev = rmidev;
+	rmidev->data = dev_data;
+
+	cdev_init(&dev_data->main_dev, &rmidev_fops);
+
+	retval = cdev_add(&dev_data->main_dev, dev_no, 1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to add rmi char device\n",
+				__func__);
+		goto err_char_device;
+	}
+
+	dev_set_name(&rmidev->dev, "rmidev%d", MINOR(dev_no));
+	dev_data->device_class = rmidev_device_class;
+
+	device_ptr = device_create(dev_data->device_class, NULL, dev_no,
+			NULL, CHAR_DEVICE_NAME"%d", MINOR(dev_no));
+	if (IS_ERR(device_ptr)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create rmi char device\n",
+				__func__);
+		retval = -ENODEV;
+		goto err_char_device;
+	}
+
+	retval = gpio_export(bdata->irq_gpio, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to export attention gpio\n",
+				__func__);
+	} else {
+		retval = gpio_export_link(&(rmi4_data->input_dev->dev),
+				"attn", bdata->irq_gpio);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s Failed to create gpio symlink\n",
+					__func__);
+		} else {
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Exported attention gpio %d\n",
+					__func__, bdata->irq_gpio);
+		}
+	}
+
+	rmidev->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+			&rmi4_data->input_dev->dev.kobj);
+	if (!rmidev->sysfs_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs directory\n",
+				__func__);
+		retval = -ENODEV;
+		goto err_sysfs_dir;
+	}
+
+	retval = sysfs_create_bin_file(rmidev->sysfs_dir,
+			&attr_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs bin file\n",
+				__func__);
+		goto err_sysfs_bin;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(rmidev->sysfs_dir,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto err_sysfs_attrs;
+		}
+	}
+
+	return 0;
+
+err_sysfs_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--)
+		sysfs_remove_file(rmidev->sysfs_dir, &attrs[attr_count].attr);
+
+	sysfs_remove_bin_file(rmidev->sysfs_dir, &attr_data);
+
+err_sysfs_bin:
+	kobject_put(rmidev->sysfs_dir);
+
+err_sysfs_dir:
+	sysfs_remove_link(&(rmi4_data->input_dev->dev.kobj), "attn");
+	gpio_unexport(bdata->irq_gpio);
+
+err_char_device:
+	rmidev_device_cleanup(dev_data);
+	kfree(dev_data);
+
+err_dev_data:
+	unregister_chrdev_region(dev_no, 1);
+
+err_device_region:
+	if (rmidev_device_class != NULL) {
+		class_destroy(rmidev_device_class);
+		rmidev_device_class = NULL;
+	}
+
+err_device_class:
+	kfree(rmidev);
+	rmidev = NULL;
+
+err_rmidev:
+	return retval;
+}
+
+static void rmidev_remove_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+	struct rmidev_data *dev_data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (!rmidev)
+		goto exit;
+
+	rmidev_major_num = RMIDEV_MAJOR_NUM;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++)
+		sysfs_remove_file(rmidev->sysfs_dir, &attrs[attr_count].attr);
+
+	sysfs_remove_bin_file(rmidev->sysfs_dir, &attr_data);
+
+	kobject_put(rmidev->sysfs_dir);
+
+	sysfs_remove_link(&(rmi4_data->input_dev->dev.kobj), "attn");
+	gpio_unexport(bdata->irq_gpio);
+
+	dev_data = rmidev->data;
+	if (dev_data) {
+		rmidev_device_cleanup(dev_data);
+		kfree(dev_data);
+	}
+
+	unregister_chrdev_region(rmidev->dev_no, 1);
+
+	if (rmidev_device_class != NULL) {
+		class_destroy(rmidev_device_class);
+		rmidev_device_class = NULL;
+	}
+
+	kfree(rmidev->tmpbuf);
+
+	kfree(rmidev);
+	rmidev = NULL;
+
+exit:
+	complete(&rmidev_remove_complete);
+}
+
+static struct synaptics_rmi4_exp_fn rmidev_module = {
+	.fn_type = RMI_DEV,
+	.init = rmidev_init_device,
+	.remove = rmidev_remove_device,
+	.reset = NULL,
+	.reinit = NULL,
+	.early_suspend = NULL,
+	.suspend = NULL,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = rmidev_attn,
+};
+
+static int __init rmidev_module_init(void)
+{
+	synaptics_rmi4_new_function(&rmidev_module, true);
+
+	return 0;
+}
+
+static void __exit rmidev_module_exit(void)
+{
+	synaptics_rmi4_new_function(&rmidev_module, false);
+
+	wait_for_completion(&rmidev_remove_complete);
+}
+
+module_init(rmidev_module_init);
+module_exit(rmidev_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX RMI Dev Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_hid_i2c.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_hid_i2c.c
new file mode 100755
index 0000000..8f6aff0
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_hid_i2c.c
@@ -0,0 +1,989 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYN_I2C_RETRY_TIMES 10
+
+#define REPORT_ID_GET_BLOB 0x07
+#define REPORT_ID_WRITE 0x09
+#define REPORT_ID_READ_ADDRESS 0x0a
+#define REPORT_ID_READ_DATA 0x0b
+#define REPORT_ID_SET_RMI_MODE 0x0f
+
+#define PREFIX_USAGE_PAGE_1BYTE 0x05
+#define PREFIX_USAGE_PAGE_2BYTES 0x06
+#define PREFIX_USAGE 0x09
+#define PREFIX_REPORT_ID 0x85
+#define PREFIX_REPORT_COUNT_1BYTE 0x95
+#define PREFIX_REPORT_COUNT_2BYTES 0x96
+
+#define USAGE_GET_BLOB 0xc5
+#define USAGE_WRITE 0x02
+#define USAGE_READ_ADDRESS 0x03
+#define USAGE_READ_DATA 0x04
+#define USAGE_SET_MODE 0x06
+
+#define FEATURE_REPORT_TYPE 0x03
+
+#define VENDOR_DEFINED_PAGE 0xff00
+
+#define BLOB_REPORT_SIZE 256
+
+#define RESET_COMMAND 0x01
+#define GET_REPORT_COMMAND 0x02
+#define SET_REPORT_COMMAND 0x03
+#define SET_POWER_COMMAND 0x08
+
+#define FINGER_MODE 0x00
+#define RMI_MODE 0x02
+
+struct hid_report_info {
+	unsigned char get_blob_id;
+	unsigned char write_id;
+	unsigned char read_addr_id;
+	unsigned char read_data_id;
+	unsigned char set_mode_id;
+	unsigned int blob_size;
+};
+
+static struct hid_report_info hid_report;
+
+struct hid_device_descriptor {
+	unsigned short device_descriptor_length;
+	unsigned short format_version;
+	unsigned short report_descriptor_length;
+	unsigned short report_descriptor_index;
+	unsigned short input_register_index;
+	unsigned short input_report_max_length;
+	unsigned short output_register_index;
+	unsigned short output_report_max_length;
+	unsigned short command_register_index;
+	unsigned short data_register_index;
+	unsigned short vendor_id;
+	unsigned short product_id;
+	unsigned short version_id;
+	unsigned int reserved;
+};
+
+static struct hid_device_descriptor hid_dd;
+
+struct i2c_rw_buffer {
+	unsigned char *read;
+	unsigned char *write;
+	unsigned int read_size;
+	unsigned int write_size;
+};
+
+static struct i2c_rw_buffer buffer;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+	int retval;
+	u32 value;
+	const char *name;
+	struct property *prop;
+	struct device_node *np = dev->of_node;
+
+	bdata->irq_gpio = of_get_named_gpio_flags(np,
+			"synaptics,irq-gpio", 0,
+			(enum of_gpio_flags *)&bdata->irq_flags);
+
+	retval = of_property_read_u32(np, "synaptics,irq-on-state",
+			&value);
+	if (retval < 0)
+		bdata->irq_on_state = 0;
+	else
+		bdata->irq_on_state = value;
+
+	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+	if (retval < 0)
+		bdata->pwr_reg_name = NULL;
+	else
+		bdata->pwr_reg_name = name;
+
+	retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+	if (retval < 0)
+		bdata->bus_reg_name = NULL;
+	else
+		bdata->bus_reg_name = name;
+
+	prop = of_find_property(np, "synaptics,power-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->power_gpio = of_get_named_gpio_flags(np,
+				"synaptics,power-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,power-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+					__func__);
+			return retval;
+		}
+		bdata->power_on_state = value;
+	} else {
+		bdata->power_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+					__func__);
+			return retval;
+		}
+		bdata->power_delay_ms = value;
+	} else {
+		bdata->power_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->reset_gpio = of_get_named_gpio_flags(np,
+				"synaptics,reset-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,reset-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+					__func__);
+			return retval;
+		}
+		bdata->reset_on_state = value;
+		retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+					__func__);
+			return retval;
+		}
+		bdata->reset_active_ms = value;
+	} else {
+		bdata->reset_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+					__func__);
+			return retval;
+		}
+		bdata->reset_delay_ms = value;
+	} else {
+		bdata->reset_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,dev-dscrptr-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,dev-dscrptr-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,dev-dscrptr-addr property\n",
+					__func__);
+			return retval;
+		}
+		bdata->device_descriptor_addr = (unsigned short)value;
+	} else {
+		bdata->device_descriptor_addr = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+					__func__);
+			return retval;
+		}
+		bdata->max_y_for_2d = value;
+	} else {
+		bdata->max_y_for_2d = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,swap-axes", NULL);
+	bdata->swap_axes = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,x-flip", NULL);
+	bdata->x_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,y-flip", NULL);
+	bdata->y_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+					__func__);
+			return retval;
+		}
+		bdata->ub_i2c_addr = (unsigned short)value;
+	} else {
+		bdata->ub_i2c_addr = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->cap_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->cap_button_map->map)
+			return -ENOMEM;
+		bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+		retval = of_property_read_u32_array(np,
+				"synaptics,cap-button-codes",
+				bdata->cap_button_map->map,
+				bdata->cap_button_map->nbuttons);
+		if (retval < 0) {
+			bdata->cap_button_map->nbuttons = 0;
+			bdata->cap_button_map->map = NULL;
+		}
+	} else {
+		bdata->cap_button_map->nbuttons = 0;
+		bdata->cap_button_map->map = NULL;
+	}
+
+	prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->vir_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->vir_button_map->map)
+			return -ENOMEM;
+		bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+		bdata->vir_button_map->nbuttons /= 5;
+		retval = of_property_read_u32_array(np,
+				"synaptics,vir-button-codes",
+				bdata->vir_button_map->map,
+				bdata->vir_button_map->nbuttons * 5);
+		if (retval < 0) {
+			bdata->vir_button_map->nbuttons = 0;
+			bdata->vir_button_map->map = NULL;
+		}
+	} else {
+		bdata->vir_button_map->nbuttons = 0;
+		bdata->vir_button_map->map = NULL;
+	}
+
+	return 0;
+}
+#endif
+
+static int do_i2c_transfer(struct i2c_client *client, struct i2c_msg *msg)
+{
+	unsigned char retry;
+
+	for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+		if (i2c_transfer(client->adapter, msg, 1) == 1)
+			break;
+		dev_err(&client->dev,
+				"%s: I2C retry %d\n",
+				__func__, retry + 1);
+		msleep(20);
+	}
+
+	if (retry == SYN_I2C_RETRY_TIMES) {
+		dev_err(&client->dev,
+				"%s: I2C transfer over retry limit\n",
+				__func__);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int check_buffer(unsigned char **buffer, unsigned int *buffer_size,
+		unsigned int length)
+{
+	if (*buffer_size < length) {
+		if (*buffer_size)
+			kfree(*buffer);
+		*buffer = kzalloc(length, GFP_KERNEL);
+		if (!(*buffer))
+			return -ENOMEM;
+		*buffer_size = length;
+	}
+
+	return 0;
+}
+
+static int generic_read(struct i2c_client *client, unsigned short length)
+{
+	int retval;
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = I2C_M_RD,
+			.len = length,
+		}
+	};
+
+	check_buffer(&buffer.read, &buffer.read_size, length);
+	msg[0].buf = buffer.read;
+
+	retval = do_i2c_transfer(client, msg);
+
+	return retval;
+}
+
+static int generic_write(struct i2c_client *client, unsigned short length)
+{
+	int retval;
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = length,
+			.buf = buffer.write,
+		}
+	};
+
+	retval = do_i2c_transfer(client, msg);
+
+	return retval;
+}
+
+static void traverse_report_descriptor(unsigned int *index)
+{
+	unsigned char size;
+	unsigned char *buf = buffer.read;
+
+	size = buf[*index] & MASK_2BIT;
+	switch (size) {
+	case 0: /* 0 bytes */
+		*index += 1;
+		break;
+	case 1: /* 1 byte */
+		*index += 2;
+		break;
+	case 2: /* 2 bytes */
+		*index += 3;
+		break;
+	case 3: /* 4 bytes */
+		*index += 5;
+		break;
+	default:
+		break;
+	}
+}
+
+static void find_blob_size(unsigned int index)
+{
+	unsigned int ii = index;
+	unsigned char *buf = buffer.read;
+
+	while (ii < hid_dd.report_descriptor_length) {
+		if (buf[ii] == PREFIX_REPORT_COUNT_1BYTE) {
+			hid_report.blob_size = buf[ii + 1];
+			return;
+		} else if (buf[ii] == PREFIX_REPORT_COUNT_2BYTES) {
+			hid_report.blob_size = buf[ii + 1] | (buf[ii + 2] << 8);
+			return;
+		}
+		traverse_report_descriptor(&ii);
+	}
+}
+
+static void find_reports(unsigned int index)
+{
+	unsigned int ii = index;
+	unsigned char *buf = buffer.read;
+	static unsigned int report_id_index;
+	static unsigned char report_id;
+	static unsigned short usage_page;
+
+	if (buf[ii] == PREFIX_REPORT_ID) {
+		report_id = buf[ii + 1];
+		report_id_index = ii;
+		return;
+	}
+
+	if (buf[ii] == PREFIX_USAGE_PAGE_1BYTE) {
+		usage_page = buf[ii + 1];
+		return;
+	} else if (buf[ii] == PREFIX_USAGE_PAGE_2BYTES) {
+		usage_page = buf[ii + 1] | (buf[ii + 2] << 8);
+		return;
+	}
+
+	if ((usage_page == VENDOR_DEFINED_PAGE) && (buf[ii] == PREFIX_USAGE)) {
+		switch (buf[ii + 1]) {
+		case USAGE_GET_BLOB:
+			hid_report.get_blob_id = report_id;
+			find_blob_size(report_id_index);
+			break;
+		case USAGE_WRITE:
+			hid_report.write_id = report_id;
+			break;
+		case USAGE_READ_ADDRESS:
+			hid_report.read_addr_id = report_id;
+			break;
+		case USAGE_READ_DATA:
+			hid_report.read_data_id = report_id;
+			break;
+		case USAGE_SET_MODE:
+			hid_report.set_mode_id = report_id;
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+static int parse_report_descriptor(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned int ii = 0;
+	unsigned char *buf;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	buffer.write[0] = hid_dd.report_descriptor_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.report_descriptor_index >> 8;
+	retval = generic_write(i2c, 2);
+	if (retval < 0)
+		return retval;
+	retval = generic_read(i2c, hid_dd.report_descriptor_length);
+	if (retval < 0)
+		return retval;
+
+	buf = buffer.read;
+
+	hid_report.get_blob_id = REPORT_ID_GET_BLOB;
+	hid_report.write_id = REPORT_ID_WRITE;
+	hid_report.read_addr_id = REPORT_ID_READ_ADDRESS;
+	hid_report.read_data_id = REPORT_ID_READ_DATA;
+	hid_report.set_mode_id = REPORT_ID_SET_RMI_MODE;
+	hid_report.blob_size = BLOB_REPORT_SIZE;
+
+	while (ii < hid_dd.report_descriptor_length) {
+		find_reports(ii);
+		traverse_report_descriptor(&ii);
+	}
+
+	return 0;
+}
+
+static int switch_to_rmi(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, 11);
+
+	/* set rmi mode */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.set_mode_id;
+	buffer.write[3] = SET_REPORT_COMMAND;
+	buffer.write[4] = hid_report.set_mode_id;
+	buffer.write[5] = hid_dd.data_register_index & MASK_8BIT;
+	buffer.write[6] = hid_dd.data_register_index >> 8;
+	buffer.write[7] = 0x04;
+	buffer.write[8] = 0x00;
+	buffer.write[9] = hid_report.set_mode_id;
+	buffer.write[10] = RMI_MODE;
+
+	retval = generic_write(i2c, 11);
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int check_report_mode(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned short report_size;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, 7);
+
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.set_mode_id;
+	buffer.write[3] = GET_REPORT_COMMAND;
+	buffer.write[4] = hid_report.set_mode_id;
+	buffer.write[5] = hid_dd.data_register_index & MASK_8BIT;
+	buffer.write[6] = hid_dd.data_register_index >> 8;
+
+	retval = generic_write(i2c, 7);
+	if (retval < 0)
+		goto exit;
+
+	retval = generic_read(i2c, 2);
+	if (retval < 0)
+		goto exit;
+
+	report_size = (buffer.read[1] << 8) | buffer.read[0];
+
+	retval = generic_write(i2c, 7);
+	if (retval < 0)
+		goto exit;
+
+	retval = generic_read(i2c, report_size);
+	if (retval < 0)
+		goto exit;
+
+	retval = buffer.read[3];
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Report mode = %d\n",
+			__func__, retval);
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int hid_i2c_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, 6);
+
+	/* read device descriptor */
+	buffer.write[0] = bdata->device_descriptor_addr & MASK_8BIT;
+	buffer.write[1] = bdata->device_descriptor_addr >> 8;
+	retval = generic_write(i2c, 2);
+	if (retval < 0)
+		goto exit;
+	retval = generic_read(i2c, sizeof(hid_dd));
+	if (retval < 0)
+		goto exit;
+	retval = secure_memcpy((unsigned char *)&hid_dd,
+			sizeof(struct hid_device_descriptor),
+			buffer.read,
+			buffer.read_size,
+			sizeof(hid_dd));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy device descriptor data\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = parse_report_descriptor(rmi4_data);
+	if (retval < 0)
+		goto exit;
+
+	/* set power */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = 0x00;
+	buffer.write[3] = SET_POWER_COMMAND;
+	retval = generic_write(i2c, 4);
+	if (retval < 0)
+		goto exit;
+
+	/* reset */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = 0x00;
+	buffer.write[3] = RESET_COMMAND;
+	retval = generic_write(i2c, 4);
+	if (retval < 0)
+		goto exit;
+
+	while (gpio_get_value(bdata->irq_gpio))
+		msleep(20);
+
+	retval = generic_read(i2c, hid_dd.input_report_max_length);
+	if (retval < 0)
+		goto exit;
+
+	/* get blob */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.get_blob_id;
+	buffer.write[3] = 0x02;
+	buffer.write[4] = hid_dd.data_register_index & MASK_8BIT;
+	buffer.write[5] = hid_dd.data_register_index >> 8;
+
+	retval = generic_write(i2c, 6);
+	if (retval < 0)
+		goto exit;
+
+	msleep(20);
+
+	retval = generic_read(i2c, hid_report.blob_size + 3);
+	if (retval < 0)
+		goto exit;
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize HID/I2C interface\n",
+				__func__);
+		return retval;
+	}
+
+	retval = switch_to_rmi(rmi4_data);
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned char retry;
+	unsigned char recover = 1;
+	unsigned short report_length;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[] = {
+		{
+			.addr = i2c->addr,
+			.flags = 0,
+			.len = hid_dd.output_report_max_length + 2,
+		},
+		{
+			.addr = i2c->addr,
+			.flags = I2C_M_RD,
+			.len = (unsigned short)(length + 4),
+		},
+	};
+
+recover:
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size,
+			hid_dd.output_report_max_length + 2);
+	msg[0].buf = buffer.write;
+	buffer.write[0] = hid_dd.output_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.output_register_index >> 8;
+	buffer.write[2] = hid_dd.output_report_max_length & MASK_8BIT;
+	buffer.write[3] = hid_dd.output_report_max_length >> 8;
+	buffer.write[4] = hid_report.read_addr_id;
+	buffer.write[5] = 0x00;
+	buffer.write[6] = addr & MASK_8BIT;
+	buffer.write[7] = addr >> 8;
+	buffer.write[8] = (unsigned char)length;
+	buffer.write[9] = (unsigned char)(length >> 8);
+
+	check_buffer(&buffer.read, &buffer.read_size, length + 4);
+	msg[1].buf = buffer.read;
+
+	retval = do_i2c_transfer(i2c, &msg[0]);
+	if (retval != 0)
+		goto exit;
+
+	retry = 0;
+	do {
+		retval = do_i2c_transfer(i2c, &msg[1]);
+		if (retval == 0)
+			retval = length;
+		else
+			goto exit;
+
+		report_length = (buffer.read[1] << 8) | buffer.read[0];
+		if (report_length == hid_dd.input_report_max_length) {
+			retval = secure_memcpy(&data[0], length,
+					&buffer.read[4], buffer.read_size - 4,
+					length);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to copy data\n",
+						__func__);
+			} else {
+				retval = length;
+			}
+			goto exit;
+		}
+
+		msleep(20);
+		retry++;
+	} while (retry < SYN_I2C_RETRY_TIMES);
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to receive read report\n",
+			__func__);
+	retval = -EIO;
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	if ((retval != length) && (recover == 1)) {
+		recover = 0;
+		if (check_report_mode(rmi4_data) != RMI_MODE) {
+			retval = hid_i2c_init(rmi4_data);
+			if (retval == 0)
+				goto recover;
+		}
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned char recover = 1;
+	unsigned int msg_length;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[] = {
+		{
+			.addr = i2c->addr,
+			.flags = 0,
+		}
+	};
+
+	if ((length + 10) < (hid_dd.output_report_max_length + 2))
+		msg_length = hid_dd.output_report_max_length + 2;
+	else
+		msg_length = length + 10;
+
+recover:
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, msg_length);
+	msg[0].len = (unsigned short)msg_length;
+	msg[0].buf = buffer.write;
+	buffer.write[0] = hid_dd.output_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.output_register_index >> 8;
+	buffer.write[2] = hid_dd.output_report_max_length & MASK_8BIT;
+	buffer.write[3] = hid_dd.output_report_max_length >> 8;
+	buffer.write[4] = hid_report.write_id;
+	buffer.write[5] = 0x00;
+	buffer.write[6] = addr & MASK_8BIT;
+	buffer.write[7] = addr >> 8;
+	buffer.write[8] = (unsigned char)length;
+	buffer.write[9] = (unsigned char)(length >> 8);
+	retval = secure_memcpy(&buffer.write[10], buffer.write_size - 10,
+			&data[0], length, length);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy data\n",
+				__func__);
+	} else {
+		retval = do_i2c_transfer(i2c, msg);
+		if (retval == 0)
+			retval = length;
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	if ((retval != length) && (recover == 1)) {
+		recover = 0;
+		if (check_report_mode(rmi4_data) != RMI_MODE) {
+			retval = hid_i2c_init(rmi4_data);
+			if (retval == 0)
+				goto recover;
+		}
+	}
+
+	return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+	.type = BUS_I2C,
+	.read = synaptics_rmi4_i2c_read,
+	.write = synaptics_rmi4_i2c_write,
+};
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_i2c_device;
+
+static void synaptics_rmi4_i2c_dev_release(struct device *dev)
+{
+	kfree(synaptics_dsx_i2c_device);
+}
+
+static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *dev_id)
+{
+	int retval;
+
+	if (!i2c_check_functionality(client->adapter,
+			I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&client->dev,
+				"%s: SMBus byte data commands not supported by host\n",
+				__func__);
+		return -EIO;
+	}
+
+	synaptics_dsx_i2c_device = kzalloc(
+			sizeof(struct platform_device),
+			GFP_KERNEL);
+	if (!synaptics_dsx_i2c_device) {
+		dev_err(&client->dev,
+				"%s: Failed to allocate memory for synaptics_dsx_i2c_device\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	if (client->dev.of_node) {
+		hw_if.board_data = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_board_data),
+				GFP_KERNEL);
+		if (!hw_if.board_data) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for board data\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->cap_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->cap_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for 0D button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->vir_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->vir_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for virtual button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		parse_dt(&client->dev, hw_if.board_data);
+	}
+#else
+	hw_if.board_data = client->dev.platform_data;
+#endif
+
+	hw_if.bus_access = &bus_access;
+	hw_if.bl_hw_init = switch_to_rmi;
+	hw_if.ui_hw_init = hid_i2c_init;
+
+	synaptics_dsx_i2c_device->name = PLATFORM_DRIVER_NAME;
+	synaptics_dsx_i2c_device->id = 0;
+	synaptics_dsx_i2c_device->num_resources = 0;
+	synaptics_dsx_i2c_device->dev.parent = &client->dev;
+	synaptics_dsx_i2c_device->dev.platform_data = &hw_if;
+	synaptics_dsx_i2c_device->dev.release = synaptics_rmi4_i2c_dev_release;
+
+	retval = platform_device_register(synaptics_dsx_i2c_device);
+	if (retval) {
+		dev_err(&client->dev,
+				"%s: Failed to register platform device\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_i2c_remove(struct i2c_client *client)
+{
+	if (buffer.read_size)
+		kfree(buffer.read);
+
+	if (buffer.write_size)
+		kfree(buffer.write);
+
+	platform_device_unregister(synaptics_dsx_i2c_device);
+
+	return 0;
+}
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+	{I2C_DRIVER_NAME, 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static const struct of_device_id synaptics_rmi4_of_match_table[] = {
+	{
+		.compatible = "synaptics,dsx-rmi-hid-i2c",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct i2c_driver synaptics_rmi4_i2c_driver = {
+	.driver = {
+		.name = I2C_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = synaptics_rmi4_of_match_table,
+	},
+	.probe = synaptics_rmi4_i2c_probe,
+	.remove = synaptics_rmi4_i2c_remove,
+	.id_table = synaptics_rmi4_id_table,
+};
+
+int synaptics_rmi4_bus_init(void)
+{
+	return i2c_add_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init);
+
+void synaptics_rmi4_bus_exit(void)
+{
+	i2c_del_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX I2C Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_spi.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_spi.c
new file mode 100755
index 0000000..6382460
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_spi.c
@@ -0,0 +1,698 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SPI_READ 0x80
+#define SPI_WRITE 0x00
+
+static unsigned char *buf;
+
+static struct spi_transfer *xfer;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+	int retval;
+	u32 value;
+	const char *name;
+	struct property *prop;
+	struct device_node *np = dev->of_node;
+
+	bdata->irq_gpio = of_get_named_gpio_flags(np,
+			"synaptics,irq-gpio", 0,
+			(enum of_gpio_flags *)&bdata->irq_flags);
+
+	retval = of_property_read_u32(np, "synaptics,irq-on-state",
+			&value);
+	if (retval < 0)
+		bdata->irq_on_state = 0;
+	else
+		bdata->irq_on_state = value;
+
+	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+	if (retval < 0)
+		bdata->pwr_reg_name = NULL;
+	else
+		bdata->pwr_reg_name = name;
+
+	retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+	if (retval < 0)
+		bdata->bus_reg_name = NULL;
+	else
+		bdata->bus_reg_name = name;
+
+	prop = of_find_property(np, "synaptics,power-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->power_gpio = of_get_named_gpio_flags(np,
+				"synaptics,power-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,power-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+					__func__);
+			return retval;
+		}
+		bdata->power_on_state = value;
+	} else {
+		bdata->power_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+					__func__);
+			return retval;
+		}
+		bdata->power_delay_ms = value;
+	} else {
+		bdata->power_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->reset_gpio = of_get_named_gpio_flags(np,
+				"synaptics,reset-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,reset-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+					__func__);
+			return retval;
+		}
+		bdata->reset_on_state = value;
+		retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+					__func__);
+			return retval;
+		}
+		bdata->reset_active_ms = value;
+	} else {
+		bdata->reset_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+					__func__);
+			return retval;
+		}
+		bdata->reset_delay_ms = value;
+	} else {
+		bdata->reset_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,byte-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,byte-delay-us",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,byte-delay-us property\n",
+					__func__);
+			return retval;
+		}
+		bdata->byte_delay_us = value;
+	} else {
+		bdata->byte_delay_us = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,block-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,block-delay-us",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,block-delay-us property\n",
+					__func__);
+			return retval;
+		}
+		bdata->block_delay_us = value;
+	} else {
+		bdata->block_delay_us = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,address-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,address-delay-us",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,address-delay-us property\n",
+					__func__);
+			return retval;
+		}
+		bdata->addr_delay_us = value;
+	} else {
+		bdata->addr_delay_us = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+					__func__);
+			return retval;
+		}
+		bdata->max_y_for_2d = value;
+	} else {
+		bdata->max_y_for_2d = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,swap-axes", NULL);
+	bdata->swap_axes = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,x-flip", NULL);
+	bdata->x_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,y-flip", NULL);
+	bdata->y_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+					__func__);
+			return retval;
+		}
+		bdata->ub_i2c_addr = (unsigned short)value;
+	} else {
+		bdata->ub_i2c_addr = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->cap_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->cap_button_map->map)
+			return -ENOMEM;
+		bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+		retval = of_property_read_u32_array(np,
+				"synaptics,cap-button-codes",
+				bdata->cap_button_map->map,
+				bdata->cap_button_map->nbuttons);
+		if (retval < 0) {
+			bdata->cap_button_map->nbuttons = 0;
+			bdata->cap_button_map->map = NULL;
+		}
+	} else {
+		bdata->cap_button_map->nbuttons = 0;
+		bdata->cap_button_map->map = NULL;
+	}
+
+	prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->vir_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->vir_button_map->map)
+			return -ENOMEM;
+		bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+		bdata->vir_button_map->nbuttons /= 5;
+		retval = of_property_read_u32_array(np,
+				"synaptics,vir-button-codes",
+				bdata->vir_button_map->map,
+				bdata->vir_button_map->nbuttons * 5);
+		if (retval < 0) {
+			bdata->vir_button_map->nbuttons = 0;
+			bdata->vir_button_map->map = NULL;
+		}
+	} else {
+		bdata->vir_button_map->nbuttons = 0;
+		bdata->vir_button_map->map = NULL;
+	}
+
+	return 0;
+}
+#endif
+
+static int synaptics_rmi4_spi_alloc_buf(struct synaptics_rmi4_data *rmi4_data,
+		unsigned int size, unsigned int count)
+{
+	static unsigned int buf_size;
+	static unsigned int xfer_count;
+
+	if (size > buf_size) {
+		if (buf_size)
+			kfree(buf);
+		buf = kmalloc(size, GFP_KERNEL);
+		if (!buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for buf\n",
+					__func__);
+			buf_size = 0;
+			return -ENOMEM;
+		}
+		buf_size = size;
+	}
+
+	if (count > xfer_count) {
+		if (xfer_count)
+			kfree(xfer);
+		xfer = kcalloc(count, sizeof(struct spi_transfer), GFP_KERNEL);
+		if (!xfer) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for xfer\n",
+					__func__);
+			xfer_count = 0;
+			return -ENOMEM;
+		}
+		xfer_count = count;
+	} else {
+		memset(xfer, 0, count * sizeof(struct spi_transfer));
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_spi_set_page(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr)
+{
+	int retval;
+	unsigned int index;
+	unsigned int byte_count = PAGE_SELECT_LEN + 1;
+	unsigned char page;
+	struct spi_message msg;
+	struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	page = ((addr >> 8) & MASK_8BIT);
+	if ((page >> 7) == (rmi4_data->current_page >> 7))
+		return PAGE_SELECT_LEN;
+
+	spi_message_init(&msg);
+
+	retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, byte_count,
+			byte_count);
+	if (retval < 0)
+		return retval;
+
+	buf[0] = SPI_WRITE;
+	buf[1] = MASK_8BIT;
+	buf[2] = page;
+
+	if (bdata->byte_delay_us == 0) {
+		xfer[0].len = byte_count;
+		xfer[0].tx_buf = &buf[0];
+		if (bdata->block_delay_us)
+			xfer[0].delay_usecs = bdata->block_delay_us;
+		spi_message_add_tail(&xfer[0], &msg);
+	} else {
+		for (index = 0; index < byte_count; index++) {
+			xfer[index].len = 1;
+			xfer[index].tx_buf = &buf[index];
+			if (index == 1)
+				xfer[index].delay_usecs = bdata->addr_delay_us;
+			else
+				xfer[index].delay_usecs = bdata->byte_delay_us;
+			spi_message_add_tail(&xfer[index], &msg);
+		}
+		if (bdata->block_delay_us)
+			xfer[index - 1].delay_usecs = bdata->block_delay_us;
+	}
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		rmi4_data->current_page = page;
+		retval = PAGE_SELECT_LEN;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to complete SPI transfer, error = %d\n",
+				__func__, retval);
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_spi_read(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned int index;
+	unsigned int byte_count = length + ADDRESS_LEN;
+	unsigned char txbuf[ADDRESS_LEN];
+	struct spi_message msg;
+	struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	spi_message_init(&msg);
+
+	txbuf[0] = (addr >> 8) | SPI_READ;
+	txbuf[1] = addr & MASK_8BIT;
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_spi_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return -EIO;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, length,
+				2);
+	} else {
+		retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, length,
+				byte_count);
+	}
+	if (retval < 0) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return retval;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		xfer[0].len = ADDRESS_LEN;
+		xfer[0].tx_buf = &txbuf[0];
+		spi_message_add_tail(&xfer[0], &msg);
+		xfer[1].len = length;
+		xfer[1].rx_buf = &buf[0];
+		if (bdata->block_delay_us)
+			xfer[1].delay_usecs = bdata->block_delay_us;
+		spi_message_add_tail(&xfer[1], &msg);
+	} else {
+		for (index = 0; index < byte_count; index++) {
+			xfer[index].len = 1;
+			if (index < ADDRESS_LEN)
+				xfer[index].tx_buf = &txbuf[index];
+			else
+				xfer[index].rx_buf = &buf[index - ADDRESS_LEN];
+			if (index == 1)
+				xfer[index].delay_usecs = bdata->addr_delay_us;
+			else
+				xfer[index].delay_usecs = bdata->byte_delay_us;
+			spi_message_add_tail(&xfer[index], &msg);
+		}
+		if (bdata->block_delay_us)
+			xfer[index - 1].delay_usecs = bdata->block_delay_us;
+	}
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		retval = secure_memcpy(data, length, buf, length, length);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy data\n",
+					__func__);
+		} else {
+			retval = length;
+		}
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to complete SPI transfer, error = %d\n",
+				__func__, retval);
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int synaptics_rmi4_spi_write(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned int index;
+	unsigned int byte_count = length + ADDRESS_LEN;
+	struct spi_message msg;
+	struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	spi_message_init(&msg);
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_spi_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return -EIO;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, byte_count,
+				1);
+	} else {
+		retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, byte_count,
+				byte_count);
+	}
+	if (retval < 0) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return retval;
+	}
+
+	buf[0] = (addr >> 8) & ~SPI_READ;
+	buf[1] = addr & MASK_8BIT;
+	retval = secure_memcpy(&buf[ADDRESS_LEN],
+			byte_count - ADDRESS_LEN, data, length, length);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy data\n",
+				__func__);
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return retval;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		xfer[0].len = byte_count;
+		xfer[0].tx_buf = &buf[0];
+		if (bdata->block_delay_us)
+			xfer[0].delay_usecs = bdata->block_delay_us;
+		spi_message_add_tail(xfer, &msg);
+	} else {
+		for (index = 0; index < byte_count; index++) {
+			xfer[index].len = 1;
+			xfer[index].tx_buf = &buf[index];
+			if (index == 1)
+				xfer[index].delay_usecs = bdata->addr_delay_us;
+			else
+				xfer[index].delay_usecs = bdata->byte_delay_us;
+			spi_message_add_tail(&xfer[index], &msg);
+		}
+		if (bdata->block_delay_us)
+			xfer[index - 1].delay_usecs = bdata->block_delay_us;
+	}
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		retval = length;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to complete SPI transfer, error = %d\n",
+				__func__, retval);
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+	.type = BUS_SPI,
+	.read = synaptics_rmi4_spi_read,
+	.write = synaptics_rmi4_spi_write,
+};
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_spi_device;
+
+static void synaptics_rmi4_spi_dev_release(struct device *dev)
+{
+	kfree(synaptics_dsx_spi_device);
+}
+
+static int synaptics_rmi4_spi_probe(struct spi_device *spi)
+{
+	int retval;
+
+	if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) {
+		dev_err(&spi->dev,
+				"%s: Full duplex not supported by host\n",
+				__func__);
+		return -EIO;
+	}
+
+	synaptics_dsx_spi_device = kzalloc(
+			sizeof(struct platform_device),
+			GFP_KERNEL);
+	if (!synaptics_dsx_spi_device) {
+		dev_err(&spi->dev,
+				"%s: Failed to allocate memory for synaptics_dsx_spi_device\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	if (spi->dev.of_node) {
+		hw_if.board_data = devm_kzalloc(&spi->dev,
+				sizeof(struct synaptics_dsx_board_data),
+				GFP_KERNEL);
+		if (!hw_if.board_data) {
+			dev_err(&spi->dev,
+					"%s: Failed to allocate memory for board data\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->cap_button_map = devm_kzalloc(&spi->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->cap_button_map) {
+			dev_err(&spi->dev,
+					"%s: Failed to allocate memory for 0D button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->vir_button_map = devm_kzalloc(&spi->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->vir_button_map) {
+			dev_err(&spi->dev,
+					"%s: Failed to allocate memory for virtual button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		parse_dt(&spi->dev, hw_if.board_data);
+	}
+#else
+	hw_if.board_data = spi->dev.platform_data;
+#endif
+
+	hw_if.bus_access = &bus_access;
+
+	spi->bits_per_word = 8;
+	spi->mode = SPI_MODE_3;
+
+	retval = spi_setup(spi);
+	if (retval < 0) {
+		dev_err(&spi->dev,
+				"%s: Failed to perform SPI setup\n",
+				__func__);
+		return retval;
+	}
+
+	synaptics_dsx_spi_device->name = PLATFORM_DRIVER_NAME;
+	synaptics_dsx_spi_device->id = 0;
+	synaptics_dsx_spi_device->num_resources = 0;
+	synaptics_dsx_spi_device->dev.parent = &spi->dev;
+	synaptics_dsx_spi_device->dev.platform_data = &hw_if;
+	synaptics_dsx_spi_device->dev.release = synaptics_rmi4_spi_dev_release;
+
+	retval = platform_device_register(synaptics_dsx_spi_device);
+	if (retval) {
+		dev_err(&spi->dev,
+				"%s: Failed to register platform device\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_spi_remove(struct spi_device *spi)
+{
+	platform_device_unregister(synaptics_dsx_spi_device);
+
+	return 0;
+}
+
+static const struct spi_device_id synaptics_rmi4_id_table[] = {
+	{SPI_DRIVER_NAME, 0},
+	{},
+};
+MODULE_DEVICE_TABLE(spi, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static const struct of_device_id synaptics_rmi4_of_match_table[] = {
+	{
+		.compatible = "synaptics,dsx-spi",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct spi_driver synaptics_rmi4_spi_driver = {
+	.driver = {
+		.name = SPI_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = synaptics_rmi4_of_match_table,
+	},
+	.probe = synaptics_rmi4_spi_probe,
+	.remove = synaptics_rmi4_spi_remove,
+	.id_table = synaptics_rmi4_id_table,
+};
+
+int synaptics_rmi4_bus_init(void)
+{
+	return spi_register_driver(&synaptics_rmi4_spi_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init);
+
+void synaptics_rmi4_bus_exit(void)
+{
+	kfree(buf);
+
+	kfree(xfer);
+
+	spi_unregister_driver(&synaptics_rmi4_spi_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX SPI Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
new file mode 100755
index 0000000..c17b692
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
@@ -0,0 +1,5324 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYSFS_FOLDER_NAME "f54"
+
+#define GET_REPORT_TIMEOUT_S 3
+#define CALIBRATION_TIMEOUT_S 10
+#define COMMAND_TIMEOUT_100MS 20
+
+#define NO_SLEEP_OFF (0 << 2)
+#define NO_SLEEP_ON (1 << 2)
+
+#define STATUS_IDLE 0
+#define STATUS_BUSY 1
+#define STATUS_ERROR 2
+
+#define REPORT_INDEX_OFFSET 1
+#define REPORT_DATA_OFFSET 3
+
+#define SENSOR_RX_MAPPING_OFFSET 1
+#define SENSOR_TX_MAPPING_OFFSET 2
+
+#define COMMAND_GET_REPORT 1
+#define COMMAND_FORCE_CAL 2
+#define COMMAND_FORCE_UPDATE 4
+
+#define CONTROL_NO_AUTO_CAL 1
+
+#define CONTROL_0_SIZE 1
+#define CONTROL_1_SIZE 1
+#define CONTROL_2_SIZE 2
+#define CONTROL_3_SIZE 1
+#define CONTROL_4_6_SIZE 3
+#define CONTROL_7_SIZE 1
+#define CONTROL_8_9_SIZE 3
+#define CONTROL_10_SIZE 1
+#define CONTROL_11_SIZE 2
+#define CONTROL_12_13_SIZE 2
+#define CONTROL_14_SIZE 1
+#define CONTROL_15_SIZE 1
+#define CONTROL_16_SIZE 1
+#define CONTROL_17_SIZE 1
+#define CONTROL_18_SIZE 1
+#define CONTROL_19_SIZE 1
+#define CONTROL_20_SIZE 1
+#define CONTROL_21_SIZE 2
+#define CONTROL_22_26_SIZE 7
+#define CONTROL_27_SIZE 1
+#define CONTROL_28_SIZE 2
+#define CONTROL_29_SIZE 1
+#define CONTROL_30_SIZE 1
+#define CONTROL_31_SIZE 1
+#define CONTROL_32_35_SIZE 8
+#define CONTROL_36_SIZE 1
+#define CONTROL_37_SIZE 1
+#define CONTROL_38_SIZE 1
+#define CONTROL_39_SIZE 1
+#define CONTROL_40_SIZE 1
+#define CONTROL_41_SIZE 1
+#define CONTROL_42_SIZE 2
+#define CONTROL_43_54_SIZE 13
+#define CONTROL_55_56_SIZE 2
+#define CONTROL_57_SIZE 1
+#define CONTROL_58_SIZE 1
+#define CONTROL_59_SIZE 2
+#define CONTROL_60_62_SIZE 3
+#define CONTROL_63_SIZE 1
+#define CONTROL_64_67_SIZE 4
+#define CONTROL_68_73_SIZE 8
+#define CONTROL_70_73_SIZE 6
+#define CONTROL_74_SIZE 2
+#define CONTROL_75_SIZE 1
+#define CONTROL_76_SIZE 1
+#define CONTROL_77_78_SIZE 2
+#define CONTROL_79_83_SIZE 5
+#define CONTROL_84_85_SIZE 2
+#define CONTROL_86_SIZE 1
+#define CONTROL_87_SIZE 1
+#define CONTROL_88_SIZE 1
+#define CONTROL_89_SIZE 1
+#define CONTROL_90_SIZE 1
+#define CONTROL_91_SIZE 1
+#define CONTROL_92_SIZE 1
+#define CONTROL_93_SIZE 1
+#define CONTROL_94_SIZE 1
+#define CONTROL_95_SIZE 1
+#define CONTROL_96_SIZE 1
+#define CONTROL_97_SIZE 1
+#define CONTROL_98_SIZE 1
+#define CONTROL_99_SIZE 1
+#define CONTROL_100_SIZE 1
+#define CONTROL_101_SIZE 1
+#define CONTROL_102_SIZE 1
+#define CONTROL_103_SIZE 1
+#define CONTROL_104_SIZE 1
+#define CONTROL_105_SIZE 1
+#define CONTROL_106_SIZE 1
+#define CONTROL_107_SIZE 1
+#define CONTROL_108_SIZE 1
+#define CONTROL_109_SIZE 1
+#define CONTROL_110_SIZE 1
+#define CONTROL_111_SIZE 1
+#define CONTROL_112_SIZE 1
+#define CONTROL_113_SIZE 1
+#define CONTROL_114_SIZE 1
+#define CONTROL_115_SIZE 1
+#define CONTROL_116_SIZE 1
+#define CONTROL_117_SIZE 1
+#define CONTROL_118_SIZE 1
+#define CONTROL_119_SIZE 1
+#define CONTROL_120_SIZE 1
+#define CONTROL_121_SIZE 1
+#define CONTROL_122_SIZE 1
+#define CONTROL_123_SIZE 1
+#define CONTROL_124_SIZE 1
+#define CONTROL_125_SIZE 1
+#define CONTROL_126_SIZE 1
+#define CONTROL_127_SIZE 1
+#define CONTROL_128_SIZE 1
+#define CONTROL_129_SIZE 1
+#define CONTROL_130_SIZE 1
+#define CONTROL_131_SIZE 1
+#define CONTROL_132_SIZE 1
+#define CONTROL_133_SIZE 1
+#define CONTROL_134_SIZE 1
+#define CONTROL_135_SIZE 1
+#define CONTROL_136_SIZE 1
+#define CONTROL_137_SIZE 1
+#define CONTROL_138_SIZE 1
+#define CONTROL_139_SIZE 1
+#define CONTROL_140_SIZE 1
+#define CONTROL_141_SIZE 1
+#define CONTROL_142_SIZE 1
+#define CONTROL_143_SIZE 1
+#define CONTROL_144_SIZE 1
+#define CONTROL_145_SIZE 1
+#define CONTROL_146_SIZE 1
+#define CONTROL_147_SIZE 1
+#define CONTROL_148_SIZE 1
+#define CONTROL_149_SIZE 1
+#define CONTROL_150_SIZE 1
+#define CONTROL_151_SIZE 1
+#define CONTROL_152_SIZE 1
+#define CONTROL_153_SIZE 1
+#define CONTROL_154_SIZE 1
+#define CONTROL_155_SIZE 1
+#define CONTROL_156_SIZE 1
+#define CONTROL_157_158_SIZE 2
+#define CONTROL_163_SIZE 1
+#define CONTROL_165_SIZE 1
+#define CONTROL_166_SIZE 1
+#define CONTROL_167_SIZE 1
+#define CONTROL_168_SIZE 1
+#define CONTROL_169_SIZE 1
+#define CONTROL_171_SIZE 1
+#define CONTROL_172_SIZE 1
+#define CONTROL_173_SIZE 1
+#define CONTROL_174_SIZE 1
+#define CONTROL_175_SIZE 1
+#define CONTROL_176_SIZE 1
+#define CONTROL_177_178_SIZE 2
+#define CONTROL_179_SIZE 1
+#define CONTROL_182_SIZE 1
+#define CONTROL_183_SIZE 1
+#define CONTROL_185_SIZE 1
+#define CONTROL_186_SIZE 1
+#define CONTROL_187_SIZE 1
+#define CONTROL_188_SIZE 1
+
+#define HIGH_RESISTANCE_DATA_SIZE 6
+#define FULL_RAW_CAP_MIN_MAX_DATA_SIZE 4
+#define TRX_OPEN_SHORT_DATA_SIZE 7
+
+#define attrify(propname) (&dev_attr_##propname.attr)
+
+#define show_prototype(propname)\
+static ssize_t propname##_show(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		char *buf);\
+\
+static struct device_attribute dev_attr_##propname =\
+		__ATTR_RO(propname)
+
+#define store_prototype(propname)\
+static ssize_t propname##_store(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		const char *buf, size_t count);\
+\
+static struct device_attribute dev_attr_##propname =\
+		__ATTR_WO(propname)
+
+#define show_store_prototype(propname)\
+static ssize_t propname##_show(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		char *buf);\
+\
+static ssize_t propname##_store(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		const char *buf, size_t count);\
+\
+static struct device_attribute dev_attr_##propname =\
+		__ATTR_RW(propname)
+
+#define disable_cbc(ctrl_num)\
+do {\
+	retval = synaptics_rmi4_reg_read(rmi4_data,\
+			f54->control.ctrl_num->address,\
+			f54->control.ctrl_num->data,\
+			sizeof(f54->control.ctrl_num->data));\
+	if (retval < 0) {\
+		dev_err(rmi4_data->pdev->dev.parent,\
+				"%s: Failed to disable CBC (" #ctrl_num ")\n",\
+				__func__);\
+		return retval;\
+	} \
+	f54->control.ctrl_num->cbc_tx_carrier_selection = 0;\
+	retval = synaptics_rmi4_reg_write(rmi4_data,\
+			f54->control.ctrl_num->address,\
+			f54->control.ctrl_num->data,\
+			sizeof(f54->control.ctrl_num->data));\
+	if (retval < 0) {\
+		dev_err(rmi4_data->pdev->dev.parent,\
+				"%s: Failed to disable CBC (" #ctrl_num ")\n",\
+				__func__);\
+		return retval;\
+	} \
+} while (0)
+
+enum f54_report_types {
+	F54_8BIT_IMAGE = 1,
+	F54_16BIT_IMAGE = 2,
+	F54_RAW_16BIT_IMAGE = 3,
+	F54_HIGH_RESISTANCE = 4,
+	F54_TX_TO_TX_SHORTS = 5,
+	F54_RX_TO_RX_SHORTS_1 = 7,
+	F54_TRUE_BASELINE = 9,
+	F54_FULL_RAW_CAP_MIN_MAX = 13,
+	F54_RX_OPENS_1 = 14,
+	F54_TX_OPENS = 15,
+	F54_TX_TO_GND_SHORTS = 16,
+	F54_RX_TO_RX_SHORTS_2 = 17,
+	F54_RX_OPENS_2 = 18,
+	F54_FULL_RAW_CAP = 19,
+	F54_FULL_RAW_CAP_NO_RX_COUPLING = 20,
+	F54_SENSOR_SPEED = 22,
+	F54_ADC_RANGE = 23,
+	F54_TRX_OPENS = 24,
+	F54_TRX_TO_GND_SHORTS = 25,
+	F54_TRX_SHORTS = 26,
+	F54_ABS_RAW_CAP = 38,
+	F54_ABS_DELTA_CAP = 40,
+	F54_ABS_HYBRID_DELTA_CAP = 59,
+	F54_ABS_HYBRID_RAW_CAP = 63,
+	F54_AMP_FULL_RAW_CAP = 78,
+	F54_AMP_RAW_ADC = 83,
+	F54_FULL_RAW_CAP_TDDI = 92,
+	INVALID_REPORT_TYPE = -1,
+};
+
+enum f54_afe_cal {
+	F54_AFE_CAL,
+	F54_AFE_IS_CAL,
+};
+
+struct f54_query {
+	union {
+		struct {
+			/* query 0 */
+			unsigned char num_of_rx_electrodes;
+
+			/* query 1 */
+			unsigned char num_of_tx_electrodes;
+
+			/* query 2 */
+			unsigned char f54_query2_b0__1:2;
+			unsigned char has_baseline:1;
+			unsigned char has_image8:1;
+			unsigned char f54_query2_b4__5:2;
+			unsigned char has_image16:1;
+			unsigned char f54_query2_b7:1;
+
+			/* queries 3.0 and 3.1 */
+			unsigned short clock_rate;
+
+			/* query 4 */
+			unsigned char touch_controller_family;
+
+			/* query 5 */
+			unsigned char has_pixel_touch_threshold_adjustment:1;
+			unsigned char f54_query5_b1__7:7;
+
+			/* query 6 */
+			unsigned char has_sensor_assignment:1;
+			unsigned char has_interference_metric:1;
+			unsigned char has_sense_frequency_control:1;
+			unsigned char has_firmware_noise_mitigation:1;
+			unsigned char has_ctrl11:1;
+			unsigned char has_two_byte_report_rate:1;
+			unsigned char has_one_byte_report_rate:1;
+			unsigned char has_relaxation_control:1;
+
+			/* query 7 */
+			unsigned char curve_compensation_mode:2;
+			unsigned char f54_query7_b2__7:6;
+
+			/* query 8 */
+			unsigned char f54_query8_b0:1;
+			unsigned char has_iir_filter:1;
+			unsigned char has_cmn_removal:1;
+			unsigned char has_cmn_maximum:1;
+			unsigned char has_touch_hysteresis:1;
+			unsigned char has_edge_compensation:1;
+			unsigned char has_per_frequency_noise_control:1;
+			unsigned char has_enhanced_stretch:1;
+
+			/* query 9 */
+			unsigned char has_force_fast_relaxation:1;
+			unsigned char has_multi_metric_state_machine:1;
+			unsigned char has_signal_clarity:1;
+			unsigned char has_variance_metric:1;
+			unsigned char has_0d_relaxation_control:1;
+			unsigned char has_0d_acquisition_control:1;
+			unsigned char has_status:1;
+			unsigned char has_slew_metric:1;
+
+			/* query 10 */
+			unsigned char has_h_blank:1;
+			unsigned char has_v_blank:1;
+			unsigned char has_long_h_blank:1;
+			unsigned char has_startup_fast_relaxation:1;
+			unsigned char has_esd_control:1;
+			unsigned char has_noise_mitigation2:1;
+			unsigned char has_noise_state:1;
+			unsigned char has_energy_ratio_relaxation:1;
+
+			/* query 11 */
+			unsigned char has_excessive_noise_reporting:1;
+			unsigned char has_slew_option:1;
+			unsigned char has_two_overhead_bursts:1;
+			unsigned char has_query13:1;
+			unsigned char has_one_overhead_burst:1;
+			unsigned char f54_query11_b5:1;
+			unsigned char has_ctrl88:1;
+			unsigned char has_query15:1;
+
+			/* query 12 */
+			unsigned char number_of_sensing_frequencies:4;
+			unsigned char f54_query12_b4__7:4;
+		} __packed;
+		unsigned char data[14];
+	};
+};
+
+struct f54_query_13 {
+	union {
+		struct {
+			unsigned char has_ctrl86:1;
+			unsigned char has_ctrl87:1;
+			unsigned char has_ctrl87_sub0:1;
+			unsigned char has_ctrl87_sub1:1;
+			unsigned char has_ctrl87_sub2:1;
+			unsigned char has_cidim:1;
+			unsigned char has_noise_mitigation_enhancement:1;
+			unsigned char has_rail_im:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_15 {
+	union {
+		struct {
+			unsigned char has_ctrl90:1;
+			unsigned char has_transmit_strength:1;
+			unsigned char has_ctrl87_sub3:1;
+			unsigned char has_query16:1;
+			unsigned char has_query20:1;
+			unsigned char has_query21:1;
+			unsigned char has_query22:1;
+			unsigned char has_query25:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_16 {
+	union {
+		struct {
+			unsigned char has_query17:1;
+			unsigned char has_data17:1;
+			unsigned char has_ctrl92:1;
+			unsigned char has_ctrl93:1;
+			unsigned char has_ctrl94_query18:1;
+			unsigned char has_ctrl95_query19:1;
+			unsigned char has_ctrl99:1;
+			unsigned char has_ctrl100:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_21 {
+	union {
+		struct {
+			unsigned char has_abs_rx:1;
+			unsigned char has_abs_tx:1;
+			unsigned char has_ctrl91:1;
+			unsigned char has_ctrl96:1;
+			unsigned char has_ctrl97:1;
+			unsigned char has_ctrl98:1;
+			unsigned char has_data19:1;
+			unsigned char has_query24_data18:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_22 {
+	union {
+		struct {
+			unsigned char has_packed_image:1;
+			unsigned char has_ctrl101:1;
+			unsigned char has_dynamic_sense_display_ratio:1;
+			unsigned char has_query23:1;
+			unsigned char has_ctrl103_query26:1;
+			unsigned char has_ctrl104:1;
+			unsigned char has_ctrl105:1;
+			unsigned char has_query28:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_23 {
+	union {
+		struct {
+			unsigned char has_ctrl102:1;
+			unsigned char has_ctrl102_sub1:1;
+			unsigned char has_ctrl102_sub2:1;
+			unsigned char has_ctrl102_sub4:1;
+			unsigned char has_ctrl102_sub5:1;
+			unsigned char has_ctrl102_sub9:1;
+			unsigned char has_ctrl102_sub10:1;
+			unsigned char has_ctrl102_sub11:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_25 {
+	union {
+		struct {
+			unsigned char has_ctrl106:1;
+			unsigned char has_ctrl102_sub12:1;
+			unsigned char has_ctrl107:1;
+			unsigned char has_ctrl108:1;
+			unsigned char has_ctrl109:1;
+			unsigned char has_data20:1;
+			unsigned char f54_query25_b6:1;
+			unsigned char has_query27:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_27 {
+	union {
+		struct {
+			unsigned char has_ctrl110:1;
+			unsigned char has_data21:1;
+			unsigned char has_ctrl111:1;
+			unsigned char has_ctrl112:1;
+			unsigned char has_ctrl113:1;
+			unsigned char has_data22:1;
+			unsigned char has_ctrl114:1;
+			unsigned char has_query29:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_29 {
+	union {
+		struct {
+			unsigned char has_ctrl115:1;
+			unsigned char has_ground_ring_options:1;
+			unsigned char has_lost_bursts_tuning:1;
+			unsigned char has_aux_exvcom2_select:1;
+			unsigned char has_ctrl116:1;
+			unsigned char has_data23:1;
+			unsigned char has_ctrl117:1;
+			unsigned char has_query30:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_30 {
+	union {
+		struct {
+			unsigned char has_ctrl118:1;
+			unsigned char has_ctrl119:1;
+			unsigned char has_ctrl120:1;
+			unsigned char has_ctrl121:1;
+			unsigned char has_ctrl122_query31:1;
+			unsigned char has_ctrl123:1;
+			unsigned char has_ctrl124:1;
+			unsigned char has_query32:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_32 {
+	union {
+		struct {
+			unsigned char has_ctrl125:1;
+			unsigned char has_ctrl126:1;
+			unsigned char has_ctrl127:1;
+			unsigned char has_abs_charge_pump_disable:1;
+			unsigned char has_query33:1;
+			unsigned char has_data24:1;
+			unsigned char has_query34:1;
+			unsigned char has_query35:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_33 {
+	union {
+		struct {
+			unsigned char has_ctrl128:1;
+			unsigned char has_ctrl129:1;
+			unsigned char has_ctrl130:1;
+			unsigned char has_ctrl131:1;
+			unsigned char has_ctrl132:1;
+			unsigned char has_ctrl133:1;
+			unsigned char has_ctrl134:1;
+			unsigned char has_query36:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_35 {
+	union {
+		struct {
+			unsigned char has_data25:1;
+			unsigned char has_ctrl135:1;
+			unsigned char has_ctrl136:1;
+			unsigned char has_ctrl137:1;
+			unsigned char has_ctrl138:1;
+			unsigned char has_ctrl139:1;
+			unsigned char has_data26:1;
+			unsigned char has_ctrl140:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_36 {
+	union {
+		struct {
+			unsigned char has_ctrl141:1;
+			unsigned char has_ctrl142:1;
+			unsigned char has_query37:1;
+			unsigned char has_ctrl143:1;
+			unsigned char has_ctrl144:1;
+			unsigned char has_ctrl145:1;
+			unsigned char has_ctrl146:1;
+			unsigned char has_query38:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_38 {
+	union {
+		struct {
+			unsigned char has_ctrl147:1;
+			unsigned char has_ctrl148:1;
+			unsigned char has_ctrl149:1;
+			unsigned char has_ctrl150:1;
+			unsigned char has_ctrl151:1;
+			unsigned char has_ctrl152:1;
+			unsigned char has_ctrl153:1;
+			unsigned char has_query39:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_39 {
+	union {
+		struct {
+			unsigned char has_ctrl154:1;
+			unsigned char has_ctrl155:1;
+			unsigned char has_ctrl156:1;
+			unsigned char has_ctrl160:1;
+			unsigned char has_ctrl157_ctrl158:1;
+			unsigned char f54_query39_b5__6:2;
+			unsigned char has_query40:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_40 {
+	union {
+		struct {
+			unsigned char has_ctrl169:1;
+			unsigned char has_ctrl163_query41:1;
+			unsigned char f54_query40_b2:1;
+			unsigned char has_ctrl165_query42:1;
+			unsigned char has_ctrl166:1;
+			unsigned char has_ctrl167:1;
+			unsigned char has_ctrl168:1;
+			unsigned char has_query43:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_43 {
+	union {
+		struct {
+			unsigned char f54_query43_b0__1:2;
+			unsigned char has_ctrl171:1;
+			unsigned char has_ctrl172_query44_query45:1;
+			unsigned char has_ctrl173:1;
+			unsigned char has_ctrl174:1;
+			unsigned char has_ctrl175:1;
+			unsigned char has_query46:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_46 {
+	union {
+		struct {
+			unsigned char has_ctrl176:1;
+			unsigned char has_ctrl177_ctrl178:1;
+			unsigned char has_ctrl179:1;
+			unsigned char f54_query46_b3:1;
+			unsigned char has_data27:1;
+			unsigned char has_data28:1;
+			unsigned char f54_query46_b6:1;
+			unsigned char has_query47:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_47 {
+	union {
+		struct {
+			unsigned char f54_query47_b0:1;
+			unsigned char has_ctrl182:1;
+			unsigned char has_ctrl183:1;
+			unsigned char f54_query47_b3:1;
+			unsigned char has_ctrl185:1;
+			unsigned char has_ctrl186:1;
+			unsigned char has_ctrl187:1;
+			unsigned char has_query49:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_49 {
+	union {
+		struct {
+			unsigned char f54_query49_b0__1:2;
+			unsigned char has_ctrl188:1;
+			unsigned char has_data31:1;
+			unsigned char f54_query49_b4__6:3;
+			unsigned char has_query50:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_50 {
+	union {
+		struct {
+			unsigned char f54_query50_b0__6:7;
+			unsigned char has_query51:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_51 {
+	union {
+		struct {
+			unsigned char f54_query51_b0__4:5;
+			unsigned char has_query53_query54_ctrl198:1;
+			unsigned char has_ctrl199:1;
+			unsigned char has_query55:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_55 {
+	union {
+		struct {
+			unsigned char has_query56:1;
+			unsigned char has_data33_data34:1;
+			unsigned char has_alt_report_rate:1;
+			unsigned char has_ctrl200:1;
+			unsigned char has_ctrl201_ctrl202:1;
+			unsigned char has_ctrl203:1;
+			unsigned char has_ctrl204:1;
+			unsigned char has_query57:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_57 {
+	union {
+		struct {
+			unsigned char has_ctrl205:1;
+			unsigned char has_ctrl206:1;
+			unsigned char has_usb_bulk_read:1;
+			unsigned char has_ctrl207:1;
+			unsigned char has_ctrl208:1;
+			unsigned char has_ctrl209:1;
+			unsigned char has_ctrl210:1;
+			unsigned char has_query58:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_58 {
+	union {
+		struct {
+			unsigned char has_query59:1;
+			unsigned char has_query60:1;
+			unsigned char has_ctrl211:1;
+			unsigned char has_ctrl212:1;
+			unsigned char has_hybrid_abs_tx_axis_filtering:1;
+			unsigned char has_hybrid_abs_tx_interpolation:1;
+			unsigned char has_ctrl213:1;
+			unsigned char has_query61:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_61 {
+	union {
+		struct {
+			unsigned char has_ctrl214:1;
+			unsigned char has_ctrl215_query62_query63:1;
+			unsigned char f54_query_61_b2:1;
+			unsigned char has_ctrl216:1;
+			unsigned char has_ctrl217:1;
+			unsigned char has_misc_host_ctrl:1;
+			unsigned char hybrid_abs_buttons:1;
+			unsigned char has_query64:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_64 {
+	union {
+		struct {
+			unsigned char has_ctrl101_sub1:1;
+			unsigned char has_ctrl220:1;
+			unsigned char has_ctrl221:1;
+			unsigned char has_ctrl222:1;
+			unsigned char has_ctrl219_sub1:1;
+			unsigned char has_ctrl103_sub3:1;
+			unsigned char has_ctrl224_ctrl226_ctrl227:1;
+			unsigned char has_query65:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_65 {
+	union {
+		struct {
+			unsigned char f54_query_65_b0__1:2;
+			unsigned char has_ctrl101_sub2:1;
+			unsigned char f54_query_65_b3__4:2;
+			unsigned char has_query66_ctrl231:1;
+			unsigned char has_ctrl232:1;
+			unsigned char has_query67:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_67 {
+	union {
+		struct {
+			unsigned char has_abs_doze_spatial_filter_en:1;
+			unsigned char has_abs_doze_avg_filter_enhancement_en:1;
+			unsigned char has_single_display_pulse:1;
+			unsigned char f54_query_67_b3__4:2;
+			unsigned char has_ctrl235_ctrl236:1;
+			unsigned char f54_query_67_b6:1;
+			unsigned char has_query68:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_68 {
+	union {
+		struct {
+			unsigned char f54_query_68_b0:1;
+			unsigned char has_ctrl238:1;
+			unsigned char has_ctrl238_sub1:1;
+			unsigned char has_ctrl238_sub2:1;
+			unsigned char has_ctrl239:1;
+			unsigned char has_freq_filter_bw_ext:1;
+			unsigned char is_tddi_hic:1;
+			unsigned char has_query69:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_69 {
+	union {
+		struct {
+			unsigned char has_ctrl240_sub0:1;
+			unsigned char has_ctrl240_sub1_sub2:1;
+			unsigned char has_ctrl240_sub3:1;
+			unsigned char has_ctrl240_sub4:1;
+			unsigned char f54_query_69_b4__7:4;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_data_31 {
+	union {
+		struct {
+			unsigned char is_calibration_crc:1;
+			unsigned char calibration_crc:1;
+			unsigned char short_test_row_number:5;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_7 {
+	union {
+		struct {
+			unsigned char cbc_cap:3;
+			unsigned char cbc_polarity:1;
+			unsigned char cbc_tx_carrier_selection:1;
+			unsigned char f54_ctrl7_b5__7:3;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_41 {
+	union {
+		struct {
+			unsigned char no_signal_clarity:1;
+			unsigned char f54_ctrl41_b1__7:7;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_57 {
+	union {
+		struct {
+			unsigned char cbc_cap:3;
+			unsigned char cbc_polarity:1;
+			unsigned char cbc_tx_carrier_selection:1;
+			unsigned char f54_ctrl57_b5__7:3;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_86 {
+	union {
+		struct {
+			unsigned char enable_high_noise_state:1;
+			unsigned char dynamic_sense_display_ratio:2;
+			unsigned char f54_ctrl86_b3__7:5;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_88 {
+	union {
+		struct {
+			unsigned char tx_low_reference_polarity:1;
+			unsigned char tx_high_reference_polarity:1;
+			unsigned char abs_low_reference_polarity:1;
+			unsigned char abs_polarity:1;
+			unsigned char cbc_polarity:1;
+			unsigned char cbc_tx_carrier_selection:1;
+			unsigned char charge_pump_enable:1;
+			unsigned char cbc_abs_auto_servo:1;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_110 {
+	union {
+		struct {
+			unsigned char active_stylus_rx_feedback_cap;
+			unsigned char active_stylus_rx_feedback_cap_reference;
+			unsigned char active_stylus_low_reference;
+			unsigned char active_stylus_high_reference;
+			unsigned char active_stylus_gain_control;
+			unsigned char active_stylus_gain_control_reference;
+			unsigned char active_stylus_timing_mode;
+			unsigned char active_stylus_discovery_bursts;
+			unsigned char active_stylus_detection_bursts;
+			unsigned char active_stylus_discovery_noise_multiplier;
+			unsigned char active_stylus_detection_envelope_min;
+			unsigned char active_stylus_detection_envelope_max;
+			unsigned char active_stylus_lose_count;
+		} __packed;
+		struct {
+			unsigned char data[13];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_149 {
+	union {
+		struct {
+			unsigned char trans_cbc_global_cap_enable:1;
+			unsigned char f54_ctrl149_b1__7:7;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_188 {
+	union {
+		struct {
+			unsigned char start_calibration:1;
+			unsigned char start_is_calibration:1;
+			unsigned char frequency:2;
+			unsigned char start_production_test:1;
+			unsigned char short_test_calibration:1;
+			unsigned char f54_ctrl188_b7:1;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control {
+	struct f54_control_7 *reg_7;
+	struct f54_control_41 *reg_41;
+	struct f54_control_57 *reg_57;
+	struct f54_control_86 *reg_86;
+	struct f54_control_88 *reg_88;
+	struct f54_control_110 *reg_110;
+	struct f54_control_149 *reg_149;
+	struct f54_control_188 *reg_188;
+};
+
+struct synaptics_rmi4_f54_handle {
+	bool no_auto_cal;
+	bool skip_preparation;
+	unsigned char status;
+	unsigned char intr_mask;
+	unsigned char intr_reg_num;
+	unsigned char tx_assigned;
+	unsigned char rx_assigned;
+	unsigned char *report_data;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short fifoindex;
+	unsigned int report_size;
+	unsigned int data_buffer_size;
+	unsigned int data_pos;
+	enum f54_report_types report_type;
+	struct f54_query query;
+	struct f54_query_13 query_13;
+	struct f54_query_15 query_15;
+	struct f54_query_16 query_16;
+	struct f54_query_21 query_21;
+	struct f54_query_22 query_22;
+	struct f54_query_23 query_23;
+	struct f54_query_25 query_25;
+	struct f54_query_27 query_27;
+	struct f54_query_29 query_29;
+	struct f54_query_30 query_30;
+	struct f54_query_32 query_32;
+	struct f54_query_33 query_33;
+	struct f54_query_35 query_35;
+	struct f54_query_36 query_36;
+	struct f54_query_38 query_38;
+	struct f54_query_39 query_39;
+	struct f54_query_40 query_40;
+	struct f54_query_43 query_43;
+	struct f54_query_46 query_46;
+	struct f54_query_47 query_47;
+	struct f54_query_49 query_49;
+	struct f54_query_50 query_50;
+	struct f54_query_51 query_51;
+	struct f54_query_55 query_55;
+	struct f54_query_57 query_57;
+	struct f54_query_58 query_58;
+	struct f54_query_61 query_61;
+	struct f54_query_64 query_64;
+	struct f54_query_65 query_65;
+	struct f54_query_67 query_67;
+	struct f54_query_68 query_68;
+	struct f54_query_69 query_69;
+	struct f54_data_31 data_31;
+	struct f54_control control;
+	struct mutex status_mutex;
+	struct kobject *sysfs_dir;
+	struct hrtimer watchdog;
+	struct work_struct timeout_work;
+	struct work_struct test_report_work;
+	struct workqueue_struct *test_report_workqueue;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+struct f55_query {
+	union {
+		struct {
+			/* query 0 */
+			unsigned char num_of_rx_electrodes;
+
+			/* query 1 */
+			unsigned char num_of_tx_electrodes;
+
+			/* query 2 */
+			unsigned char has_sensor_assignment:1;
+			unsigned char has_edge_compensation:1;
+			unsigned char curve_compensation_mode:2;
+			unsigned char has_ctrl6:1;
+			unsigned char has_alternate_transmitter_assignment:1;
+			unsigned char has_single_layer_multi_touch:1;
+			unsigned char has_query5:1;
+		} __packed;
+		unsigned char data[3];
+	};
+};
+
+struct f55_query_3 {
+	union {
+		struct {
+			unsigned char has_ctrl8:1;
+			unsigned char has_ctrl9:1;
+			unsigned char has_oncell_pattern_support:1;
+			unsigned char has_data0:1;
+			unsigned char has_single_wide_pattern_support:1;
+			unsigned char has_mirrored_tx_pattern_support:1;
+			unsigned char has_discrete_pattern_support:1;
+			unsigned char has_query9:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_5 {
+	union {
+		struct {
+			unsigned char has_corner_compensation:1;
+			unsigned char has_ctrl12:1;
+			unsigned char has_trx_configuration:1;
+			unsigned char has_ctrl13:1;
+			unsigned char f55_query5_b4:1;
+			unsigned char has_ctrl14:1;
+			unsigned char has_basis_function:1;
+			unsigned char has_query17:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_17 {
+	union {
+		struct {
+			unsigned char f55_query17_b0:1;
+			unsigned char has_ctrl16:1;
+			unsigned char has_ctrl18_ctrl19:1;
+			unsigned char has_ctrl17:1;
+			unsigned char has_ctrl20:1;
+			unsigned char has_ctrl21:1;
+			unsigned char has_ctrl22:1;
+			unsigned char has_query18:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_18 {
+	union {
+		struct {
+			unsigned char has_ctrl23:1;
+			unsigned char has_ctrl24:1;
+			unsigned char has_query19:1;
+			unsigned char has_ctrl25:1;
+			unsigned char has_ctrl26:1;
+			unsigned char has_ctrl27_query20:1;
+			unsigned char has_ctrl28_query21:1;
+			unsigned char has_query22:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_22 {
+	union {
+		struct {
+			unsigned char has_ctrl29:1;
+			unsigned char has_query23:1;
+			unsigned char has_guard_disable:1;
+			unsigned char has_ctrl30:1;
+			unsigned char has_ctrl31:1;
+			unsigned char has_ctrl32:1;
+			unsigned char has_query24_through_query27:1;
+			unsigned char has_query28:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_23 {
+	union {
+		struct {
+			unsigned char amp_sensor_enabled:1;
+			unsigned char image_transposed:1;
+			unsigned char first_column_at_left_side:1;
+			unsigned char size_of_column2mux:5;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_28 {
+	union {
+		struct {
+			unsigned char f55_query28_b0__4:5;
+			unsigned char has_ctrl37:1;
+			unsigned char has_query29:1;
+			unsigned char has_query30:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_30 {
+	union {
+		struct {
+			unsigned char has_ctrl38:1;
+			unsigned char has_query31_query32:1;
+			unsigned char has_ctrl39:1;
+			unsigned char has_ctrl40:1;
+			unsigned char has_ctrl41:1;
+			unsigned char has_ctrl42:1;
+			unsigned char has_ctrl43_ctrl44:1;
+			unsigned char has_query33:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_33 {
+	union {
+		struct {
+			unsigned char has_extended_amp_pad:1;
+			unsigned char has_extended_amp_btn:1;
+			unsigned char has_ctrl45_ctrl46:1;
+			unsigned char f55_query33_b3:1;
+			unsigned char has_ctrl47_sub0_sub1:1;
+			unsigned char f55_query33_b5__7:3;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_control_43 {
+	union {
+		struct {
+			unsigned char swap_sensor_side:1;
+			unsigned char f55_ctrl43_b1__7:7;
+			unsigned char afe_l_mux_size:4;
+			unsigned char afe_r_mux_size:4;
+		} __packed;
+		unsigned char data[2];
+	};
+};
+
+struct synaptics_rmi4_f55_handle {
+	bool amp_sensor;
+	bool extended_amp;
+	bool has_force;
+	unsigned char size_of_column2mux;
+	unsigned char afe_mux_offset;
+	unsigned char force_tx_offset;
+	unsigned char force_rx_offset;
+	unsigned char *tx_assignment;
+	unsigned char *rx_assignment;
+	unsigned char *force_tx_assignment;
+	unsigned char *force_rx_assignment;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	struct f55_query query;
+	struct f55_query_3 query_3;
+	struct f55_query_5 query_5;
+	struct f55_query_17 query_17;
+	struct f55_query_18 query_18;
+	struct f55_query_22 query_22;
+	struct f55_query_23 query_23;
+	struct f55_query_28 query_28;
+	struct f55_query_30 query_30;
+	struct f55_query_33 query_33;
+};
+
+struct f21_query_2 {
+	union {
+		struct {
+			unsigned char size_of_query3;
+			struct {
+				unsigned char query0_is_present:1;
+				unsigned char query1_is_present:1;
+				unsigned char query2_is_present:1;
+				unsigned char query3_is_present:1;
+				unsigned char query4_is_present:1;
+				unsigned char query5_is_present:1;
+				unsigned char query6_is_present:1;
+				unsigned char query7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char query8_is_present:1;
+				unsigned char query9_is_present:1;
+				unsigned char query10_is_present:1;
+				unsigned char query11_is_present:1;
+				unsigned char query12_is_present:1;
+				unsigned char query13_is_present:1;
+				unsigned char query14_is_present:1;
+				unsigned char query15_is_present:1;
+			} __packed;
+		};
+		unsigned char data[3];
+	};
+};
+
+struct f21_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct f21_query_11 {
+	union {
+		struct {
+			unsigned char has_high_resolution_force:1;
+			unsigned char has_force_sensing_txrx_mapping:1;
+			unsigned char f21_query11_00_b2__7:6;
+			unsigned char f21_query11_00_reserved;
+			unsigned char max_number_of_force_sensors;
+			unsigned char max_number_of_force_txs;
+			unsigned char max_number_of_force_rxs;
+			unsigned char f21_query11_01_reserved;
+		} __packed;
+		unsigned char data[6];
+	};
+};
+
+struct synaptics_rmi4_f21_handle {
+	bool has_force;
+	unsigned char tx_assigned;
+	unsigned char rx_assigned;
+	unsigned char max_num_of_tx;
+	unsigned char max_num_of_rx;
+	unsigned char max_num_of_txrx;
+	unsigned char *force_txrx_assignment;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+};
+
+show_prototype(num_of_mapped_tx);
+show_prototype(num_of_mapped_rx);
+show_prototype(tx_mapping);
+show_prototype(rx_mapping);
+show_prototype(num_of_mapped_force_tx);
+show_prototype(num_of_mapped_force_rx);
+show_prototype(force_tx_mapping);
+show_prototype(force_rx_mapping);
+show_prototype(report_size);
+show_prototype(status);
+
+store_prototype(do_preparation);
+store_prototype(force_cal);
+store_prototype(get_report);
+store_prototype(resume_touch);
+store_prototype(do_afe_calibration);
+
+show_store_prototype(report_type);
+show_store_prototype(fifoindex);
+show_store_prototype(no_auto_cal);
+show_store_prototype(read_report);
+
+static struct attribute *attrs[] = {
+	attrify(num_of_mapped_tx),
+	attrify(num_of_mapped_rx),
+	attrify(tx_mapping),
+	attrify(rx_mapping),
+	attrify(num_of_mapped_force_tx),
+	attrify(num_of_mapped_force_rx),
+	attrify(force_tx_mapping),
+	attrify(force_rx_mapping),
+	attrify(report_size),
+	attrify(status),
+	attrify(do_preparation),
+	attrify(force_cal),
+	attrify(get_report),
+	attrify(resume_touch),
+	attrify(do_afe_calibration),
+	attrify(report_type),
+	attrify(fifoindex),
+	attrify(no_auto_cal),
+	attrify(read_report),
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+
+static ssize_t test_sysfs_data_read(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static struct bin_attribute test_report_data = {
+	.attr = {
+		.name = "report_data",
+		.mode = 0444,
+	},
+	.size = 0,
+	.read = test_sysfs_data_read,
+};
+
+static struct synaptics_rmi4_f54_handle *f54;
+static struct synaptics_rmi4_f55_handle *f55;
+static struct synaptics_rmi4_f21_handle *f21;
+
+DECLARE_COMPLETION(test_remove_complete);
+
+static bool test_report_type_valid(enum f54_report_types report_type)
+{
+	switch (report_type) {
+	case F54_8BIT_IMAGE:
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_HIGH_RESISTANCE:
+	case F54_TX_TO_TX_SHORTS:
+	case F54_RX_TO_RX_SHORTS_1:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP_MIN_MAX:
+	case F54_RX_OPENS_1:
+	case F54_TX_OPENS:
+	case F54_TX_TO_GND_SHORTS:
+	case F54_RX_TO_RX_SHORTS_2:
+	case F54_RX_OPENS_2:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+	case F54_SENSOR_SPEED:
+	case F54_ADC_RANGE:
+	case F54_TRX_OPENS:
+	case F54_TRX_TO_GND_SHORTS:
+	case F54_TRX_SHORTS:
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+	case F54_AMP_FULL_RAW_CAP:
+	case F54_AMP_RAW_ADC:
+	case F54_FULL_RAW_CAP_TDDI:
+		return true;
+	default:
+		f54->report_type = INVALID_REPORT_TYPE;
+		f54->report_size = 0;
+		return false;
+	}
+}
+
+static void test_set_report_size(void)
+{
+	int retval;
+	unsigned char tx = f54->tx_assigned;
+	unsigned char rx = f54->rx_assigned;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	switch (f54->report_type) {
+	case F54_8BIT_IMAGE:
+		f54->report_size = tx * rx;
+		break;
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+	case F54_SENSOR_SPEED:
+	case F54_AMP_FULL_RAW_CAP:
+	case F54_AMP_RAW_ADC:
+	case F54_FULL_RAW_CAP_TDDI:
+		f54->report_size = 2 * tx * rx;
+		break;
+	case F54_HIGH_RESISTANCE:
+		f54->report_size = HIGH_RESISTANCE_DATA_SIZE;
+		break;
+	case F54_TX_TO_TX_SHORTS:
+	case F54_TX_OPENS:
+	case F54_TX_TO_GND_SHORTS:
+		f54->report_size = (tx + 7) / 8;
+		break;
+	case F54_RX_TO_RX_SHORTS_1:
+	case F54_RX_OPENS_1:
+		if (rx < tx)
+			f54->report_size = 2 * rx * rx;
+		else
+			f54->report_size = 2 * tx * rx;
+		break;
+	case F54_FULL_RAW_CAP_MIN_MAX:
+		f54->report_size = FULL_RAW_CAP_MIN_MAX_DATA_SIZE;
+		break;
+	case F54_RX_TO_RX_SHORTS_2:
+	case F54_RX_OPENS_2:
+		if (rx <= tx)
+			f54->report_size = 0;
+		else
+			f54->report_size = 2 * rx * (rx - tx);
+		break;
+	case F54_ADC_RANGE:
+		if (f54->query.has_signal_clarity) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_41->address,
+					f54->control.reg_41->data,
+					sizeof(f54->control.reg_41->data));
+			if (retval < 0) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read control reg_41\n",
+						__func__);
+				f54->report_size = 0;
+				break;
+			}
+			if (!f54->control.reg_41->no_signal_clarity) {
+				if (tx % 4)
+					tx += 4 - (tx % 4);
+			}
+		}
+		f54->report_size = 2 * tx * rx;
+		break;
+	case F54_TRX_OPENS:
+	case F54_TRX_TO_GND_SHORTS:
+	case F54_TRX_SHORTS:
+		f54->report_size = TRX_OPEN_SHORT_DATA_SIZE;
+		break;
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+		tx += f21->tx_assigned;
+		rx += f21->rx_assigned;
+		f54->report_size = 4 * (tx + rx);
+		break;
+	default:
+		f54->report_size = 0;
+	}
+}
+
+static int test_set_interrupt(bool set)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char zero = 0x00;
+	unsigned char *intr_mask;
+	unsigned short f01_ctrl_reg;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	intr_mask = rmi4_data->intr_mask;
+	f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + f54->intr_reg_num;
+
+	if (!set) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				f01_ctrl_reg,
+				&zero,
+				sizeof(zero));
+		if (retval < 0)
+			return retval;
+	}
+
+	for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
+		if (intr_mask[ii] != 0x00) {
+			f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + ii;
+			if (set) {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						f01_ctrl_reg,
+						&zero,
+						sizeof(zero));
+				if (retval < 0)
+					return retval;
+			} else {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						f01_ctrl_reg,
+						&(intr_mask[ii]),
+						sizeof(intr_mask[ii]));
+				if (retval < 0)
+					return retval;
+			}
+		}
+	}
+
+	f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + f54->intr_reg_num;
+
+	if (set) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				f01_ctrl_reg,
+				&f54->intr_mask,
+				1);
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int test_wait_for_command_completion(void)
+{
+	int retval;
+	unsigned char value;
+	unsigned char timeout_count;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	timeout_count = 0;
+	do {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->command_base_addr,
+				&value,
+				sizeof(value));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read command register\n",
+					__func__);
+			return retval;
+		}
+
+		if (value == 0x00)
+			break;
+
+		msleep(100);
+		timeout_count++;
+	} while (timeout_count < COMMAND_TIMEOUT_100MS);
+
+	if (timeout_count == COMMAND_TIMEOUT_100MS) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Timed out waiting for command completion\n",
+				__func__);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int test_do_command(unsigned char command)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->command_base_addr,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write command\n",
+				__func__);
+		return retval;
+	}
+
+	retval = test_wait_for_command_completion();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int test_do_preparation(void)
+{
+	int retval;
+	unsigned char value;
+	unsigned char zero = 0x00;
+	unsigned char device_ctrl;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set no sleep\n",
+				__func__);
+		return retval;
+	}
+
+	device_ctrl |= NO_SLEEP_ON;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set no sleep\n",
+				__func__);
+		return retval;
+	}
+
+	if (f54->skip_preparation)
+		return 0;
+
+	switch (f54->report_type) {
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_SENSOR_SPEED:
+	case F54_ADC_RANGE:
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+	case F54_FULL_RAW_CAP_TDDI:
+		break;
+	case F54_AMP_RAW_ADC:
+		if (f54->query_49.has_ctrl188) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+			f54->control.reg_188->start_production_test = 1;
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+		}
+		break;
+	default:
+		if (f54->query.touch_controller_family == 1)
+			disable_cbc(reg_7);
+		else if (f54->query.has_ctrl88)
+			disable_cbc(reg_88);
+
+		if (f54->query.has_0d_acquisition_control)
+			disable_cbc(reg_57);
+
+		if ((f54->query.has_query15) &&
+				(f54->query_15.has_query25) &&
+				(f54->query_25.has_query27) &&
+				(f54->query_27.has_query29) &&
+				(f54->query_29.has_query30) &&
+				(f54->query_30.has_query32) &&
+				(f54->query_32.has_query33) &&
+				(f54->query_33.has_query36) &&
+				(f54->query_36.has_query38) &&
+				(f54->query_38.has_ctrl149)) {
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_149->address,
+					&zero,
+					sizeof(f54->control.reg_149->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to disable global CBC\n",
+						__func__);
+				return retval;
+			}
+		}
+
+		if (f54->query.has_signal_clarity) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_41->address,
+					&value,
+					sizeof(f54->control.reg_41->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to disable signal clarity\n",
+						__func__);
+				return retval;
+			}
+			value |= 0x01;
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_41->address,
+					&value,
+					sizeof(f54->control.reg_41->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to disable signal clarity\n",
+						__func__);
+				return retval;
+			}
+		}
+
+		retval = test_do_command(COMMAND_FORCE_UPDATE);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to do force update\n",
+					__func__);
+			return retval;
+		}
+
+		retval = test_do_command(COMMAND_FORCE_CAL);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to do force cal\n",
+					__func__);
+			return retval;
+		}
+	}
+
+	return 0;
+}
+
+static int test_do_afe_calibration(enum f54_afe_cal mode)
+{
+	int retval;
+	unsigned char timeout = CALIBRATION_TIMEOUT_S;
+	unsigned char timeout_count = 0;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->control.reg_188->address,
+			f54->control.reg_188->data,
+			sizeof(f54->control.reg_188->data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to start calibration\n",
+				__func__);
+		return retval;
+	}
+
+	if (mode == F54_AFE_CAL)
+		f54->control.reg_188->start_calibration = 1;
+	else if (mode == F54_AFE_IS_CAL)
+		f54->control.reg_188->start_is_calibration = 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->control.reg_188->address,
+			f54->control.reg_188->data,
+			sizeof(f54->control.reg_188->data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to start calibration\n",
+				__func__);
+		return retval;
+	}
+
+	do {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->control.reg_188->address,
+				f54->control.reg_188->data,
+				sizeof(f54->control.reg_188->data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to complete calibration\n",
+					__func__);
+			return retval;
+		}
+
+		if (mode == F54_AFE_CAL) {
+			if (!f54->control.reg_188->start_calibration)
+				break;
+		} else if (mode == F54_AFE_IS_CAL) {
+			if (!f54->control.reg_188->start_is_calibration)
+				break;
+		}
+
+		if (timeout_count == timeout) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Timed out waiting for calibration completion\n",
+					__func__);
+			return -EBUSY;
+		}
+
+		timeout_count++;
+		msleep(1000);
+	} while (true);
+
+	/* check CRC */
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->data_31.address,
+			f54->data_31.data,
+			sizeof(f54->data_31.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read calibration CRC\n",
+				__func__);
+		return retval;
+	}
+
+	if (mode == F54_AFE_CAL) {
+		if (f54->data_31.calibration_crc == 0)
+			return 0;
+	} else if (mode == F54_AFE_IS_CAL) {
+		if (f54->data_31.is_calibration_crc == 0)
+			return 0;
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to read calibration CRC\n",
+			__func__);
+
+	return -EINVAL;
+}
+
+static int test_check_for_idle_status(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	switch (f54->status) {
+	case STATUS_IDLE:
+		retval = 0;
+		break;
+	case STATUS_BUSY:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Status busy\n",
+				__func__);
+		retval = -EINVAL;
+		break;
+	case STATUS_ERROR:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Status error\n",
+				__func__);
+		retval = -EINVAL;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid status (%d)\n",
+				__func__, f54->status);
+		retval = -EINVAL;
+	}
+
+	return retval;
+}
+
+static void test_timeout_work(struct work_struct *work)
+{
+	int retval;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	mutex_lock(&f54->status_mutex);
+
+	if (f54->status == STATUS_BUSY) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->command_base_addr,
+				&command,
+				sizeof(command));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read command register\n",
+					__func__);
+		} else if (command & COMMAND_GET_REPORT) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Report type not supported by FW\n",
+					__func__);
+		} else {
+			queue_work(f54->test_report_workqueue,
+					&f54->test_report_work);
+			goto exit;
+		}
+		f54->status = STATUS_ERROR;
+		f54->report_size = 0;
+	}
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+}
+
+static enum hrtimer_restart test_get_report_timeout(struct hrtimer *timer)
+{
+	schedule_work(&(f54->timeout_work));
+
+	return HRTIMER_NORESTART;
+}
+
+static ssize_t num_of_mapped_tx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->tx_assigned);
+}
+
+static ssize_t num_of_mapped_rx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->rx_assigned);
+}
+
+static ssize_t tx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char tx_num;
+	unsigned char tx_electrodes;
+
+	if (!f55)
+		return -EINVAL;
+
+	tx_electrodes = f55->query.num_of_tx_electrodes;
+
+	for (ii = 0; ii < tx_electrodes; ii++) {
+		tx_num = f55->tx_assignment[ii];
+		if (tx_num == 0xff)
+			cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+		else
+			cnt = snprintf(buf, PAGE_SIZE - count, "%02u ", tx_num);
+		buf += cnt;
+		count += cnt;
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t rx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char rx_num;
+	unsigned char rx_electrodes;
+
+	if (!f55)
+		return -EINVAL;
+
+	rx_electrodes = f55->query.num_of_rx_electrodes;
+
+	for (ii = 0; ii < rx_electrodes; ii++) {
+		rx_num = f55->rx_assignment[ii];
+		if (rx_num == 0xff)
+			cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+		else
+			cnt = snprintf(buf, PAGE_SIZE - count, "%02u ", rx_num);
+		buf += cnt;
+		count += cnt;
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t num_of_mapped_force_tx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f21->tx_assigned);
+}
+
+static ssize_t num_of_mapped_force_rx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f21->rx_assigned);
+}
+
+static ssize_t force_tx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char tx_num;
+	unsigned char tx_electrodes;
+
+	if ((!f55 || !f55->has_force) && (!f21 || !f21->has_force))
+		return -EINVAL;
+
+	if (f55->has_force) {
+		tx_electrodes = f55->query.num_of_tx_electrodes;
+
+		for (ii = 0; ii < tx_electrodes; ii++) {
+			tx_num = f55->force_tx_assignment[ii];
+			if (tx_num == 0xff) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+			} else {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+						tx_num);
+			}
+			buf += cnt;
+			count += cnt;
+		}
+	} else if (f21->has_force) {
+		tx_electrodes = f21->max_num_of_tx;
+
+		for (ii = 0; ii < tx_electrodes; ii++) {
+			tx_num = f21->force_txrx_assignment[ii];
+			if (tx_num == 0xff) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+			} else {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+						tx_num);
+			}
+			buf += cnt;
+			count += cnt;
+		}
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t force_rx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char offset;
+	unsigned char rx_num;
+	unsigned char rx_electrodes;
+
+	if ((!f55 || !f55->has_force) && (!f21 || !f21->has_force))
+		return -EINVAL;
+
+	if (f55->has_force) {
+		rx_electrodes = f55->query.num_of_rx_electrodes;
+
+		for (ii = 0; ii < rx_electrodes; ii++) {
+			rx_num = f55->force_rx_assignment[ii];
+			if (rx_num == 0xff)
+				cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+			else
+				cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+						rx_num);
+			buf += cnt;
+			count += cnt;
+		}
+	} else if (f21->has_force) {
+		offset = f21->max_num_of_tx;
+		rx_electrodes = f21->max_num_of_rx;
+
+		for (ii = offset; ii < (rx_electrodes + offset); ii++) {
+			rx_num = f21->force_txrx_assignment[ii];
+			if (rx_num == 0xff)
+				cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+			else
+				cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+						rx_num);
+			buf += cnt;
+			count += cnt;
+		}
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t report_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->report_size);
+}
+
+static ssize_t status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", f54->status);
+
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t do_preparation_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	retval = test_do_preparation();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do preparation\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t force_cal_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	retval = test_do_command(COMMAND_FORCE_CAL);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do force cal\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t get_report_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char command;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	if (!test_report_type_valid(f54->report_type)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid report type\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	test_set_interrupt(true);
+
+	command = (unsigned char)COMMAND_GET_REPORT;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->command_base_addr,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write get report command\n",
+				__func__);
+		goto exit;
+	}
+
+	f54->status = STATUS_BUSY;
+	f54->report_size = 0;
+	f54->data_pos = 0;
+
+	hrtimer_start(&f54->watchdog,
+			ktime_set(GET_REPORT_TIMEOUT_S, 0),
+			HRTIMER_MODE_REL);
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t resume_touch_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char device_ctrl;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to restore no sleep setting\n",
+				__func__);
+		return retval;
+	}
+
+	device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+	device_ctrl |= rmi4_data->no_sleep_setting;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to restore no sleep setting\n",
+				__func__);
+		return retval;
+	}
+
+	test_set_interrupt(false);
+
+	if (f54->skip_preparation)
+		return count;
+
+	switch (f54->report_type) {
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_SENSOR_SPEED:
+	case F54_ADC_RANGE:
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+	case F54_FULL_RAW_CAP_TDDI:
+		break;
+	case F54_AMP_RAW_ADC:
+		if (f54->query_49.has_ctrl188) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+			f54->control.reg_188->start_production_test = 0;
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+		}
+		break;
+	default:
+		rmi4_data->reset_device(rmi4_data, false);
+	}
+
+	return count;
+}
+
+static ssize_t do_afe_calibration_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (!f54->query_49.has_ctrl188) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: F54_ANALOG_Ctrl188 not found\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (setting == 0 || setting == 1)
+		retval = test_do_afe_calibration((enum f54_afe_cal)setting);
+	else
+		return -EINVAL;
+
+	if (retval)
+		return retval;
+	else
+		return count;
+}
+
+static ssize_t report_type_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->report_type);
+}
+
+static ssize_t report_type_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char data;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	if (!test_report_type_valid((enum f54_report_types)setting)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Report type not supported by driver\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	f54->report_type = (enum f54_report_types)setting;
+	data = (unsigned char)setting;
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->data_base_addr,
+			&data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write report type\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t fifoindex_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned char data[2];
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->data_base_addr + REPORT_INDEX_OFFSET,
+			data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read report index\n",
+				__func__);
+		return retval;
+	}
+
+	batohs(&f54->fifoindex, data);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->fifoindex);
+}
+
+static ssize_t fifoindex_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char data[2];
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	f54->fifoindex = setting;
+
+	hstoba(data, (unsigned short)setting);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->data_base_addr + REPORT_INDEX_OFFSET,
+			data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write report index\n",
+				__func__);
+		return retval;
+	}
+
+	return count;
+}
+
+static ssize_t no_auto_cal_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->no_auto_cal);
+}
+
+static ssize_t no_auto_cal_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char data;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting > 1)
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->control_base_addr,
+			&data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read no auto cal setting\n",
+				__func__);
+		return retval;
+	}
+
+	if (setting)
+		data |= CONTROL_NO_AUTO_CAL;
+	else
+		data &= ~CONTROL_NO_AUTO_CAL;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->control_base_addr,
+			&data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write no auto cal setting\n",
+				__func__);
+		return retval;
+	}
+
+	f54->no_auto_cal = (setting == 1);
+
+	return count;
+}
+
+static ssize_t read_report_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned int ii;
+	unsigned int jj;
+	int cnt;
+	int count = 0;
+	int tx_num = f54->tx_assigned;
+	int rx_num = f54->rx_assigned;
+	char *report_data_8;
+	short *report_data_16;
+	int *report_data_32;
+	unsigned short *report_data_u16;
+	unsigned int *report_data_u32;
+
+	switch (f54->report_type) {
+	case F54_8BIT_IMAGE:
+		report_data_8 = (char *)f54->report_data;
+		for (ii = 0; ii < f54->report_size; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "%03d: %d\n",
+					ii, *report_data_8);
+			report_data_8++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_AMP_RAW_ADC:
+		report_data_u16 = (unsigned short *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx = %d\nrx = %d\n",
+				tx_num, rx_num);
+		buf += cnt;
+		count += cnt;
+
+		for (ii = 0; ii < tx_num; ii++) {
+			for (jj = 0; jj < (rx_num - 1); jj++) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%-4d ",
+						*report_data_u16);
+				report_data_u16++;
+				buf += cnt;
+				count += cnt;
+			}
+			cnt = snprintf(buf, PAGE_SIZE - count, "%-4d\n",
+					*report_data_u16);
+			report_data_u16++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+	case F54_SENSOR_SPEED:
+	case F54_AMP_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_TDDI:
+		report_data_16 = (short *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx = %d\nrx = %d\n",
+				tx_num, rx_num);
+		buf += cnt;
+		count += cnt;
+
+		for (ii = 0; ii < tx_num; ii++) {
+			for (jj = 0; jj < (rx_num - 1); jj++) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%-4d ",
+						*report_data_16);
+				report_data_16++;
+				buf += cnt;
+				count += cnt;
+			}
+			cnt = snprintf(buf, PAGE_SIZE - count, "%-4d\n",
+					*report_data_16);
+			report_data_16++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_HIGH_RESISTANCE:
+	case F54_FULL_RAW_CAP_MIN_MAX:
+		report_data_16 = (short *)f54->report_data;
+		for (ii = 0; ii < f54->report_size; ii += 2) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "%03d: %d\n",
+					ii / 2, *report_data_16);
+			report_data_16++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+		tx_num += f21->tx_assigned;
+		rx_num += f21->rx_assigned;
+		report_data_u32 = (unsigned int *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "rx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5u",
+					*report_data_u32);
+			report_data_u32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5u",
+					*report_data_u32);
+			report_data_u32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+		break;
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+		tx_num += f21->tx_assigned;
+		rx_num += f21->rx_assigned;
+		report_data_32 = (int *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "rx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5d",
+					*report_data_32);
+			report_data_32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5d",
+					*report_data_32);
+			report_data_32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+		break;
+	default:
+		for (ii = 0; ii < f54->report_size; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "%03d: 0x%02x\n",
+					ii, f54->report_data[ii]);
+			buf += cnt;
+			count += cnt;
+		}
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t read_report_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char timeout = GET_REPORT_TIMEOUT_S * 10;
+	unsigned char timeout_count;
+	const char cmd[] = {'1', 0};
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = report_type_store(dev, attr, buf, count);
+	if (retval < 0)
+		goto exit;
+
+	retval = do_preparation_store(dev, attr, cmd, 1);
+	if (retval < 0)
+		goto exit;
+
+	retval = get_report_store(dev, attr, cmd, 1);
+	if (retval < 0)
+		goto exit;
+
+	timeout_count = 0;
+	do {
+		if (f54->status != STATUS_BUSY)
+			break;
+		msleep(100);
+		timeout_count++;
+	} while (timeout_count < timeout);
+
+	if ((f54->status != STATUS_IDLE) || (f54->report_size == 0)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read report\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = resume_touch_store(dev, attr, cmd, 1);
+	if (retval < 0)
+		goto exit;
+
+	return count;
+
+exit:
+	rmi4_data->reset_device(rmi4_data, false);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_data_read(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned int read_size;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	if (!f54->report_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Report type %d data not available\n",
+				__func__, f54->report_type);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if ((f54->data_pos + count) > f54->report_size)
+		read_size = f54->report_size - f54->data_pos;
+	else
+		read_size = min_t(unsigned int, count, f54->report_size);
+
+	retval = secure_memcpy(buf, count, f54->report_data + f54->data_pos,
+			f54->data_buffer_size - f54->data_pos, read_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy report data\n",
+				__func__);
+		goto exit;
+	}
+	f54->data_pos += read_size;
+	retval = read_size;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static void test_report_work(struct work_struct *work)
+{
+	int retval;
+	unsigned char report_index[2];
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	mutex_lock(&f54->status_mutex);
+
+	if (f54->status != STATUS_BUSY) {
+		retval = f54->status;
+		goto exit;
+	}
+
+	retval = test_wait_for_command_completion();
+	if (retval < 0) {
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	test_set_report_size();
+	if (f54->report_size == 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Report data size = 0\n",
+				__func__);
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	if (f54->data_buffer_size < f54->report_size) {
+		if (f54->data_buffer_size)
+			kfree(f54->report_data);
+		f54->report_data = kzalloc(f54->report_size, GFP_KERNEL);
+		if (!f54->report_data) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for data buffer\n",
+					__func__);
+			f54->data_buffer_size = 0;
+			retval = STATUS_ERROR;
+			goto exit;
+		}
+		f54->data_buffer_size = f54->report_size;
+	}
+
+	report_index[0] = 0;
+	report_index[1] = 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->data_base_addr + REPORT_INDEX_OFFSET,
+			report_index,
+			sizeof(report_index));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write report data index\n",
+				__func__);
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->data_base_addr + REPORT_DATA_OFFSET,
+			f54->report_data,
+			f54->report_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read report data\n",
+				__func__);
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	retval = STATUS_IDLE;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	if (retval == STATUS_ERROR)
+		f54->report_size = 0;
+
+	f54->status = retval;
+}
+
+static void test_remove_sysfs(void)
+{
+	sysfs_remove_group(f54->sysfs_dir, &attr_group);
+	sysfs_remove_bin_file(f54->sysfs_dir, &test_report_data);
+	kobject_put(f54->sysfs_dir);
+}
+
+static int test_set_sysfs(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	f54->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+			&rmi4_data->input_dev->dev.kobj);
+	if (!f54->sysfs_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs directory\n",
+				__func__);
+		goto exit_directory;
+	}
+
+	retval = sysfs_create_bin_file(f54->sysfs_dir, &test_report_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs bin file\n",
+				__func__);
+		goto exit_bin_file;
+	}
+
+	retval = sysfs_create_group(f54->sysfs_dir, &attr_group);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs attributes\n",
+				__func__);
+		goto exit_attributes;
+	}
+
+	return 0;
+
+exit_attributes:
+	sysfs_remove_group(f54->sysfs_dir, &attr_group);
+	sysfs_remove_bin_file(f54->sysfs_dir, &test_report_data);
+
+exit_bin_file:
+	kobject_put(f54->sysfs_dir);
+
+exit_directory:
+	return -ENODEV;
+}
+
+static void test_free_control_mem(void)
+{
+	struct f54_control control = f54->control;
+
+	kfree(control.reg_7);
+	kfree(control.reg_41);
+	kfree(control.reg_57);
+	kfree(control.reg_86);
+	kfree(control.reg_88);
+	kfree(control.reg_110);
+	kfree(control.reg_149);
+	kfree(control.reg_188);
+}
+
+static void test_set_data(void)
+{
+	unsigned short reg_addr;
+
+	reg_addr = f54->data_base_addr + REPORT_DATA_OFFSET + 1;
+
+	/* data 4 */
+	if (f54->query.has_sense_frequency_control)
+		reg_addr++;
+
+	/* data 5 reserved */
+
+	/* data 6 */
+	if (f54->query.has_interference_metric)
+		reg_addr += 2;
+
+	/* data 7 */
+	if (f54->query.has_one_byte_report_rate |
+			f54->query.has_two_byte_report_rate)
+		reg_addr++;
+	if (f54->query.has_two_byte_report_rate)
+		reg_addr++;
+
+	/* data 8 */
+	if (f54->query.has_variance_metric)
+		reg_addr += 2;
+
+	/* data 9 */
+	if (f54->query.has_multi_metric_state_machine)
+		reg_addr += 2;
+
+	/* data 10 */
+	if (f54->query.has_multi_metric_state_machine |
+			f54->query.has_noise_state)
+		reg_addr++;
+
+	/* data 11 */
+	if (f54->query.has_status)
+		reg_addr++;
+
+	/* data 12 */
+	if (f54->query.has_slew_metric)
+		reg_addr += 2;
+
+	/* data 13 */
+	if (f54->query.has_multi_metric_state_machine)
+		reg_addr += 2;
+
+	/* data 14 */
+	if (f54->query_13.has_cidim)
+		reg_addr++;
+
+	/* data 15 */
+	if (f54->query_13.has_rail_im)
+		reg_addr++;
+
+	/* data 16 */
+	if (f54->query_13.has_noise_mitigation_enhancement)
+		reg_addr++;
+
+	/* data 17 */
+	if (f54->query_16.has_data17)
+		reg_addr++;
+
+	/* data 18 */
+	if (f54->query_21.has_query24_data18)
+		reg_addr++;
+
+	/* data 19 */
+	if (f54->query_21.has_data19)
+		reg_addr++;
+
+	/* data_20 */
+	if (f54->query_25.has_ctrl109)
+		reg_addr++;
+
+	/* data 21 */
+	if (f54->query_27.has_data21)
+		reg_addr++;
+
+	/* data 22 */
+	if (f54->query_27.has_data22)
+		reg_addr++;
+
+	/* data 23 */
+	if (f54->query_29.has_data23)
+		reg_addr++;
+
+	/* data 24 */
+	if (f54->query_32.has_data24)
+		reg_addr++;
+
+	/* data 25 */
+	if (f54->query_35.has_data25)
+		reg_addr++;
+
+	/* data 26 */
+	if (f54->query_35.has_data26)
+		reg_addr++;
+
+	/* data 27 */
+	if (f54->query_46.has_data27)
+		reg_addr++;
+
+	/* data 28 */
+	if (f54->query_46.has_data28)
+		reg_addr++;
+
+	/* data 29 30 reserved */
+
+	/* data 31 */
+	if (f54->query_49.has_data31) {
+		f54->data_31.address = reg_addr;
+		reg_addr++;
+	}
+}
+
+static int test_set_controls(void)
+{
+	int retval;
+	unsigned char length;
+	unsigned char num_of_sensing_freqs;
+	unsigned short reg_addr = f54->control_base_addr;
+	struct f54_control *control = &f54->control;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	num_of_sensing_freqs = f54->query.number_of_sensing_frequencies;
+
+	/* control 0 */
+	reg_addr += CONTROL_0_SIZE;
+
+	/* control 1 */
+	if ((f54->query.touch_controller_family == 0) ||
+			(f54->query.touch_controller_family == 1))
+		reg_addr += CONTROL_1_SIZE;
+
+	/* control 2 */
+	reg_addr += CONTROL_2_SIZE;
+
+	/* control 3 */
+	if (f54->query.has_pixel_touch_threshold_adjustment)
+		reg_addr += CONTROL_3_SIZE;
+
+	/* controls 4 5 6 */
+	if ((f54->query.touch_controller_family == 0) ||
+			(f54->query.touch_controller_family == 1))
+		reg_addr += CONTROL_4_6_SIZE;
+
+	/* control 7 */
+	if (f54->query.touch_controller_family == 1) {
+		control->reg_7 = kzalloc(sizeof(*(control->reg_7)),
+				GFP_KERNEL);
+		if (!control->reg_7)
+			goto exit_no_mem;
+		control->reg_7->address = reg_addr;
+		reg_addr += CONTROL_7_SIZE;
+	}
+
+	/* controls 8 9 */
+	if ((f54->query.touch_controller_family == 0) ||
+			(f54->query.touch_controller_family == 1))
+		reg_addr += CONTROL_8_9_SIZE;
+
+	/* control 10 */
+	if (f54->query.has_interference_metric)
+		reg_addr += CONTROL_10_SIZE;
+
+	/* control 11 */
+	if (f54->query.has_ctrl11)
+		reg_addr += CONTROL_11_SIZE;
+
+	/* controls 12 13 */
+	if (f54->query.has_relaxation_control)
+		reg_addr += CONTROL_12_13_SIZE;
+
+	/* controls 14 15 16 */
+	if (f54->query.has_sensor_assignment) {
+		reg_addr += CONTROL_14_SIZE;
+		reg_addr += CONTROL_15_SIZE * f54->query.num_of_rx_electrodes;
+		reg_addr += CONTROL_16_SIZE * f54->query.num_of_tx_electrodes;
+	}
+
+	/* controls 17 18 19 */
+	if (f54->query.has_sense_frequency_control) {
+		reg_addr += CONTROL_17_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_18_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_19_SIZE * num_of_sensing_freqs;
+	}
+
+	/* control 20 */
+	reg_addr += CONTROL_20_SIZE;
+
+	/* control 21 */
+	if (f54->query.has_sense_frequency_control)
+		reg_addr += CONTROL_21_SIZE;
+
+	/* controls 22 23 24 25 26 */
+	if (f54->query.has_firmware_noise_mitigation)
+		reg_addr += CONTROL_22_26_SIZE;
+
+	/* control 27 */
+	if (f54->query.has_iir_filter)
+		reg_addr += CONTROL_27_SIZE;
+
+	/* control 28 */
+	if (f54->query.has_firmware_noise_mitigation)
+		reg_addr += CONTROL_28_SIZE;
+
+	/* control 29 */
+	if (f54->query.has_cmn_removal)
+		reg_addr += CONTROL_29_SIZE;
+
+	/* control 30 */
+	if (f54->query.has_cmn_maximum)
+		reg_addr += CONTROL_30_SIZE;
+
+	/* control 31 */
+	if (f54->query.has_touch_hysteresis)
+		reg_addr += CONTROL_31_SIZE;
+
+	/* controls 32 33 34 35 */
+	if (f54->query.has_edge_compensation)
+		reg_addr += CONTROL_32_35_SIZE;
+
+	/* control 36 */
+	if ((f54->query.curve_compensation_mode == 1) ||
+			(f54->query.curve_compensation_mode == 2)) {
+		if (f54->query.curve_compensation_mode == 1) {
+			length = max(f54->query.num_of_rx_electrodes,
+					f54->query.num_of_tx_electrodes);
+		} else if (f54->query.curve_compensation_mode == 2) {
+			length = f54->query.num_of_rx_electrodes;
+		}
+		reg_addr += CONTROL_36_SIZE * length;
+	}
+
+	/* control 37 */
+	if (f54->query.curve_compensation_mode == 2)
+		reg_addr += CONTROL_37_SIZE * f54->query.num_of_tx_electrodes;
+
+	/* controls 38 39 40 */
+	if (f54->query.has_per_frequency_noise_control) {
+		reg_addr += CONTROL_38_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_39_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_40_SIZE * num_of_sensing_freqs;
+	}
+
+	/* control 41 */
+	if (f54->query.has_signal_clarity) {
+		control->reg_41 = kzalloc(sizeof(*(control->reg_41)),
+				GFP_KERNEL);
+		if (!control->reg_41)
+			goto exit_no_mem;
+		control->reg_41->address = reg_addr;
+		reg_addr += CONTROL_41_SIZE;
+	}
+
+	/* control 42 */
+	if (f54->query.has_variance_metric)
+		reg_addr += CONTROL_42_SIZE;
+
+	/* controls 43 44 45 46 47 48 49 50 51 52 53 54 */
+	if (f54->query.has_multi_metric_state_machine)
+		reg_addr += CONTROL_43_54_SIZE;
+
+	/* controls 55 56 */
+	if (f54->query.has_0d_relaxation_control)
+		reg_addr += CONTROL_55_56_SIZE;
+
+	/* control 57 */
+	if (f54->query.has_0d_acquisition_control) {
+		control->reg_57 = kzalloc(sizeof(*(control->reg_57)),
+				GFP_KERNEL);
+		if (!control->reg_57)
+			goto exit_no_mem;
+		control->reg_57->address = reg_addr;
+		reg_addr += CONTROL_57_SIZE;
+	}
+
+	/* control 58 */
+	if (f54->query.has_0d_acquisition_control)
+		reg_addr += CONTROL_58_SIZE;
+
+	/* control 59 */
+	if (f54->query.has_h_blank)
+		reg_addr += CONTROL_59_SIZE;
+
+	/* controls 60 61 62 */
+	if ((f54->query.has_h_blank) ||
+			(f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank))
+		reg_addr += CONTROL_60_62_SIZE;
+
+	/* control 63 */
+	if ((f54->query.has_h_blank) ||
+			(f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank) ||
+			(f54->query.has_slew_metric) ||
+			(f54->query.has_slew_option) ||
+			(f54->query.has_noise_mitigation2))
+		reg_addr += CONTROL_63_SIZE;
+
+	/* controls 64 65 66 67 */
+	if (f54->query.has_h_blank)
+		reg_addr += CONTROL_64_67_SIZE * 7;
+	else if ((f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank))
+		reg_addr += CONTROL_64_67_SIZE;
+
+	/* controls 68 69 70 71 72 73 */
+	if ((f54->query.has_h_blank) ||
+			(f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank)) {
+		if (f54->query_68.is_tddi_hic)
+			reg_addr += CONTROL_70_73_SIZE;
+		else
+			reg_addr += CONTROL_68_73_SIZE;
+	}
+
+	/* control 74 */
+	if (f54->query.has_slew_metric)
+		reg_addr += CONTROL_74_SIZE;
+
+	/* control 75 */
+	if (f54->query.has_enhanced_stretch)
+		reg_addr += CONTROL_75_SIZE * num_of_sensing_freqs;
+
+	/* control 76 */
+	if (f54->query.has_startup_fast_relaxation)
+		reg_addr += CONTROL_76_SIZE;
+
+	/* controls 77 78 */
+	if (f54->query.has_esd_control)
+		reg_addr += CONTROL_77_78_SIZE;
+
+	/* controls 79 80 81 82 83 */
+	if (f54->query.has_noise_mitigation2)
+		reg_addr += CONTROL_79_83_SIZE;
+
+	/* controls 84 85 */
+	if (f54->query.has_energy_ratio_relaxation)
+		reg_addr += CONTROL_84_85_SIZE;
+
+	/* control 86 */
+	if (f54->query_13.has_ctrl86) {
+		control->reg_86 = kzalloc(sizeof(*(control->reg_86)),
+				GFP_KERNEL);
+		if (!control->reg_86)
+			goto exit_no_mem;
+		control->reg_86->address = reg_addr;
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->control.reg_86->address,
+				f54->control.reg_86->data,
+				sizeof(f54->control.reg_86->data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read sense display ratio\n",
+					__func__);
+			return retval;
+		}
+		reg_addr += CONTROL_86_SIZE;
+	}
+
+	/* control 87 */
+	if (f54->query_13.has_ctrl87)
+		reg_addr += CONTROL_87_SIZE;
+
+	/* control 88 */
+	if (f54->query.has_ctrl88) {
+		control->reg_88 = kzalloc(sizeof(*(control->reg_88)),
+				GFP_KERNEL);
+		if (!control->reg_88)
+			goto exit_no_mem;
+		control->reg_88->address = reg_addr;
+		reg_addr += CONTROL_88_SIZE;
+	}
+
+	/* control 89 */
+	if (f54->query_13.has_cidim ||
+			f54->query_13.has_noise_mitigation_enhancement ||
+			f54->query_13.has_rail_im)
+		reg_addr += CONTROL_89_SIZE;
+
+	/* control 90 */
+	if (f54->query_15.has_ctrl90)
+		reg_addr += CONTROL_90_SIZE;
+
+	/* control 91 */
+	if (f54->query_21.has_ctrl91)
+		reg_addr += CONTROL_91_SIZE;
+
+	/* control 92 */
+	if (f54->query_16.has_ctrl92)
+		reg_addr += CONTROL_92_SIZE;
+
+	/* control 93 */
+	if (f54->query_16.has_ctrl93)
+		reg_addr += CONTROL_93_SIZE;
+
+	/* control 94 */
+	if (f54->query_16.has_ctrl94_query18)
+		reg_addr += CONTROL_94_SIZE;
+
+	/* control 95 */
+	if (f54->query_16.has_ctrl95_query19)
+		reg_addr += CONTROL_95_SIZE;
+
+	/* control 96 */
+	if (f54->query_21.has_ctrl96)
+		reg_addr += CONTROL_96_SIZE;
+
+	/* control 97 */
+	if (f54->query_21.has_ctrl97)
+		reg_addr += CONTROL_97_SIZE;
+
+	/* control 98 */
+	if (f54->query_21.has_ctrl98)
+		reg_addr += CONTROL_98_SIZE;
+
+	/* control 99 */
+	if (f54->query.touch_controller_family == 2)
+		reg_addr += CONTROL_99_SIZE;
+
+	/* control 100 */
+	if (f54->query_16.has_ctrl100)
+		reg_addr += CONTROL_100_SIZE;
+
+	/* control 101 */
+	if (f54->query_22.has_ctrl101)
+		reg_addr += CONTROL_101_SIZE;
+
+	/* control 102 */
+	if (f54->query_23.has_ctrl102)
+		reg_addr += CONTROL_102_SIZE;
+
+	/* control 103 */
+	if (f54->query_22.has_ctrl103_query26) {
+		f54->skip_preparation = true;
+		reg_addr += CONTROL_103_SIZE;
+	}
+
+	/* control 104 */
+	if (f54->query_22.has_ctrl104)
+		reg_addr += CONTROL_104_SIZE;
+
+	/* control 105 */
+	if (f54->query_22.has_ctrl105)
+		reg_addr += CONTROL_105_SIZE;
+
+	/* control 106 */
+	if (f54->query_25.has_ctrl106)
+		reg_addr += CONTROL_106_SIZE;
+
+	/* control 107 */
+	if (f54->query_25.has_ctrl107)
+		reg_addr += CONTROL_107_SIZE;
+
+	/* control 108 */
+	if (f54->query_25.has_ctrl108)
+		reg_addr += CONTROL_108_SIZE;
+
+	/* control 109 */
+	if (f54->query_25.has_ctrl109)
+		reg_addr += CONTROL_109_SIZE;
+
+	/* control 110 */
+	if (f54->query_27.has_ctrl110) {
+		control->reg_110 = kzalloc(sizeof(*(control->reg_110)),
+				GFP_KERNEL);
+		if (!control->reg_110)
+			goto exit_no_mem;
+		control->reg_110->address = reg_addr;
+		reg_addr += CONTROL_110_SIZE;
+	}
+
+	/* control 111 */
+	if (f54->query_27.has_ctrl111)
+		reg_addr += CONTROL_111_SIZE;
+
+	/* control 112 */
+	if (f54->query_27.has_ctrl112)
+		reg_addr += CONTROL_112_SIZE;
+
+	/* control 113 */
+	if (f54->query_27.has_ctrl113)
+		reg_addr += CONTROL_113_SIZE;
+
+	/* control 114 */
+	if (f54->query_27.has_ctrl114)
+		reg_addr += CONTROL_114_SIZE;
+
+	/* control 115 */
+	if (f54->query_29.has_ctrl115)
+		reg_addr += CONTROL_115_SIZE;
+
+	/* control 116 */
+	if (f54->query_29.has_ctrl116)
+		reg_addr += CONTROL_116_SIZE;
+
+	/* control 117 */
+	if (f54->query_29.has_ctrl117)
+		reg_addr += CONTROL_117_SIZE;
+
+	/* control 118 */
+	if (f54->query_30.has_ctrl118)
+		reg_addr += CONTROL_118_SIZE;
+
+	/* control 119 */
+	if (f54->query_30.has_ctrl119)
+		reg_addr += CONTROL_119_SIZE;
+
+	/* control 120 */
+	if (f54->query_30.has_ctrl120)
+		reg_addr += CONTROL_120_SIZE;
+
+	/* control 121 */
+	if (f54->query_30.has_ctrl121)
+		reg_addr += CONTROL_121_SIZE;
+
+	/* control 122 */
+	if (f54->query_30.has_ctrl122_query31)
+		reg_addr += CONTROL_122_SIZE;
+
+	/* control 123 */
+	if (f54->query_30.has_ctrl123)
+		reg_addr += CONTROL_123_SIZE;
+
+	/* control 124 */
+	if (f54->query_30.has_ctrl124)
+		reg_addr += CONTROL_124_SIZE;
+
+	/* control 125 */
+	if (f54->query_32.has_ctrl125)
+		reg_addr += CONTROL_125_SIZE;
+
+	/* control 126 */
+	if (f54->query_32.has_ctrl126)
+		reg_addr += CONTROL_126_SIZE;
+
+	/* control 127 */
+	if (f54->query_32.has_ctrl127)
+		reg_addr += CONTROL_127_SIZE;
+
+	/* control 128 */
+	if (f54->query_33.has_ctrl128)
+		reg_addr += CONTROL_128_SIZE;
+
+	/* control 129 */
+	if (f54->query_33.has_ctrl129)
+		reg_addr += CONTROL_129_SIZE;
+
+	/* control 130 */
+	if (f54->query_33.has_ctrl130)
+		reg_addr += CONTROL_130_SIZE;
+
+	/* control 131 */
+	if (f54->query_33.has_ctrl131)
+		reg_addr += CONTROL_131_SIZE;
+
+	/* control 132 */
+	if (f54->query_33.has_ctrl132)
+		reg_addr += CONTROL_132_SIZE;
+
+	/* control 133 */
+	if (f54->query_33.has_ctrl133)
+		reg_addr += CONTROL_133_SIZE;
+
+	/* control 134 */
+	if (f54->query_33.has_ctrl134)
+		reg_addr += CONTROL_134_SIZE;
+
+	/* control 135 */
+	if (f54->query_35.has_ctrl135)
+		reg_addr += CONTROL_135_SIZE;
+
+	/* control 136 */
+	if (f54->query_35.has_ctrl136)
+		reg_addr += CONTROL_136_SIZE;
+
+	/* control 137 */
+	if (f54->query_35.has_ctrl137)
+		reg_addr += CONTROL_137_SIZE;
+
+	/* control 138 */
+	if (f54->query_35.has_ctrl138)
+		reg_addr += CONTROL_138_SIZE;
+
+	/* control 139 */
+	if (f54->query_35.has_ctrl139)
+		reg_addr += CONTROL_139_SIZE;
+
+	/* control 140 */
+	if (f54->query_35.has_ctrl140)
+		reg_addr += CONTROL_140_SIZE;
+
+	/* control 141 */
+	if (f54->query_36.has_ctrl141)
+		reg_addr += CONTROL_141_SIZE;
+
+	/* control 142 */
+	if (f54->query_36.has_ctrl142)
+		reg_addr += CONTROL_142_SIZE;
+
+	/* control 143 */
+	if (f54->query_36.has_ctrl143)
+		reg_addr += CONTROL_143_SIZE;
+
+	/* control 144 */
+	if (f54->query_36.has_ctrl144)
+		reg_addr += CONTROL_144_SIZE;
+
+	/* control 145 */
+	if (f54->query_36.has_ctrl145)
+		reg_addr += CONTROL_145_SIZE;
+
+	/* control 146 */
+	if (f54->query_36.has_ctrl146)
+		reg_addr += CONTROL_146_SIZE;
+
+	/* control 147 */
+	if (f54->query_38.has_ctrl147)
+		reg_addr += CONTROL_147_SIZE;
+
+	/* control 148 */
+	if (f54->query_38.has_ctrl148)
+		reg_addr += CONTROL_148_SIZE;
+
+	/* control 149 */
+	if (f54->query_38.has_ctrl149) {
+		control->reg_149 = kzalloc(sizeof(*(control->reg_149)),
+				GFP_KERNEL);
+		if (!control->reg_149)
+			goto exit_no_mem;
+		control->reg_149->address = reg_addr;
+		reg_addr += CONTROL_149_SIZE;
+	}
+
+	/* control 150 */
+	if (f54->query_38.has_ctrl150)
+		reg_addr += CONTROL_150_SIZE;
+
+	/* control 151 */
+	if (f54->query_38.has_ctrl151)
+		reg_addr += CONTROL_151_SIZE;
+
+	/* control 152 */
+	if (f54->query_38.has_ctrl152)
+		reg_addr += CONTROL_152_SIZE;
+
+	/* control 153 */
+	if (f54->query_38.has_ctrl153)
+		reg_addr += CONTROL_153_SIZE;
+
+	/* control 154 */
+	if (f54->query_39.has_ctrl154)
+		reg_addr += CONTROL_154_SIZE;
+
+	/* control 155 */
+	if (f54->query_39.has_ctrl155)
+		reg_addr += CONTROL_155_SIZE;
+
+	/* control 156 */
+	if (f54->query_39.has_ctrl156)
+		reg_addr += CONTROL_156_SIZE;
+
+	/* controls 157 158 */
+	if (f54->query_39.has_ctrl157_ctrl158)
+		reg_addr += CONTROL_157_158_SIZE;
+
+	/* controls 159 to 162 reserved */
+
+	/* control 163 */
+	if (f54->query_40.has_ctrl163_query41)
+		reg_addr += CONTROL_163_SIZE;
+
+	/* control 164 reserved */
+
+	/* control 165 */
+	if (f54->query_40.has_ctrl165_query42)
+		reg_addr += CONTROL_165_SIZE;
+
+	/* control 166 */
+	if (f54->query_40.has_ctrl166)
+		reg_addr += CONTROL_166_SIZE;
+
+	/* control 167 */
+	if (f54->query_40.has_ctrl167)
+		reg_addr += CONTROL_167_SIZE;
+
+	/* control 168 */
+	if (f54->query_40.has_ctrl168)
+		reg_addr += CONTROL_168_SIZE;
+
+	/* control 169 */
+	if (f54->query_40.has_ctrl169)
+		reg_addr += CONTROL_169_SIZE;
+
+	/* control 170 reserved */
+
+	/* control 171 */
+	if (f54->query_43.has_ctrl171)
+		reg_addr += CONTROL_171_SIZE;
+
+	/* control 172 */
+	if (f54->query_43.has_ctrl172_query44_query45)
+		reg_addr += CONTROL_172_SIZE;
+
+	/* control 173 */
+	if (f54->query_43.has_ctrl173)
+		reg_addr += CONTROL_173_SIZE;
+
+	/* control 174 */
+	if (f54->query_43.has_ctrl174)
+		reg_addr += CONTROL_174_SIZE;
+
+	/* control 175 */
+	if (f54->query_43.has_ctrl175)
+		reg_addr += CONTROL_175_SIZE;
+
+	/* control 176 */
+	if (f54->query_46.has_ctrl176)
+		reg_addr += CONTROL_176_SIZE;
+
+	/* controls 177 178 */
+	if (f54->query_46.has_ctrl177_ctrl178)
+		reg_addr += CONTROL_177_178_SIZE;
+
+	/* control 179 */
+	if (f54->query_46.has_ctrl179)
+		reg_addr += CONTROL_179_SIZE;
+
+	/* controls 180 to 181 reserved */
+
+	/* control 182 */
+	if (f54->query_47.has_ctrl182)
+		reg_addr += CONTROL_182_SIZE;
+
+	/* control 183 */
+	if (f54->query_47.has_ctrl183)
+		reg_addr += CONTROL_183_SIZE;
+
+	/* control 184 reserved */
+
+	/* control 185 */
+	if (f54->query_47.has_ctrl185)
+		reg_addr += CONTROL_185_SIZE;
+
+	/* control 186 */
+	if (f54->query_47.has_ctrl186)
+		reg_addr += CONTROL_186_SIZE;
+
+	/* control 187 */
+	if (f54->query_47.has_ctrl187)
+		reg_addr += CONTROL_187_SIZE;
+
+	/* control 188 */
+	if (f54->query_49.has_ctrl188) {
+		control->reg_188 = kzalloc(sizeof(*(control->reg_188)),
+				GFP_KERNEL);
+		if (!control->reg_188)
+			goto exit_no_mem;
+		control->reg_188->address = reg_addr;
+		reg_addr += CONTROL_188_SIZE;
+	}
+
+	return 0;
+
+exit_no_mem:
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to alloc mem for control registers\n",
+			__func__);
+	return -ENOMEM;
+}
+
+static int test_set_queries(void)
+{
+	int retval;
+	unsigned char offset;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->query_base_addr,
+			f54->query.data,
+			sizeof(f54->query.data));
+	if (retval < 0)
+		return retval;
+
+	offset = sizeof(f54->query.data);
+
+	/* query 12 */
+	if (f54->query.has_sense_frequency_control == 0)
+		offset -= 1;
+
+	/* query 13 */
+	if (f54->query.has_query13) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_13.data,
+				sizeof(f54->query_13.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 14 */
+	if (f54->query_13.has_ctrl87)
+		offset += 1;
+
+	/* query 15 */
+	if (f54->query.has_query15) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_15.data,
+				sizeof(f54->query_15.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 16 */
+	if (f54->query_15.has_query16) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_16.data,
+				sizeof(f54->query_16.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 17 */
+	if (f54->query_16.has_query17)
+		offset += 1;
+
+	/* query 18 */
+	if (f54->query_16.has_ctrl94_query18)
+		offset += 1;
+
+	/* query 19 */
+	if (f54->query_16.has_ctrl95_query19)
+		offset += 1;
+
+	/* query 20 */
+	if (f54->query_15.has_query20)
+		offset += 1;
+
+	/* query 21 */
+	if (f54->query_15.has_query21) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_21.data,
+				sizeof(f54->query_21.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 22 */
+	if (f54->query_15.has_query22) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_22.data,
+				sizeof(f54->query_22.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 23 */
+	if (f54->query_22.has_query23) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_23.data,
+				sizeof(f54->query_23.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 24 */
+	if (f54->query_21.has_query24_data18)
+		offset += 1;
+
+	/* query 25 */
+	if (f54->query_15.has_query25) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_25.data,
+				sizeof(f54->query_25.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 26 */
+	if (f54->query_22.has_ctrl103_query26)
+		offset += 1;
+
+	/* query 27 */
+	if (f54->query_25.has_query27) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_27.data,
+				sizeof(f54->query_27.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 28 */
+	if (f54->query_22.has_query28)
+		offset += 1;
+
+	/* query 29 */
+	if (f54->query_27.has_query29) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_29.data,
+				sizeof(f54->query_29.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 30 */
+	if (f54->query_29.has_query30) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_30.data,
+				sizeof(f54->query_30.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 31 */
+	if (f54->query_30.has_ctrl122_query31)
+		offset += 1;
+
+	/* query 32 */
+	if (f54->query_30.has_query32) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_32.data,
+				sizeof(f54->query_32.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 33 */
+	if (f54->query_32.has_query33) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_33.data,
+				sizeof(f54->query_33.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 34 */
+	if (f54->query_32.has_query34)
+		offset += 1;
+
+	/* query 35 */
+	if (f54->query_32.has_query35) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_35.data,
+				sizeof(f54->query_35.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 36 */
+	if (f54->query_33.has_query36) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_36.data,
+				sizeof(f54->query_36.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 37 */
+	if (f54->query_36.has_query37)
+		offset += 1;
+
+	/* query 38 */
+	if (f54->query_36.has_query38) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_38.data,
+				sizeof(f54->query_38.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 39 */
+	if (f54->query_38.has_query39) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_39.data,
+				sizeof(f54->query_39.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 40 */
+	if (f54->query_39.has_query40) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_40.data,
+				sizeof(f54->query_40.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 41 */
+	if (f54->query_40.has_ctrl163_query41)
+		offset += 1;
+
+	/* query 42 */
+	if (f54->query_40.has_ctrl165_query42)
+		offset += 1;
+
+	/* query 43 */
+	if (f54->query_40.has_query43) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_43.data,
+				sizeof(f54->query_43.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	if (f54->query_43.has_ctrl172_query44_query45)
+		offset += 2;
+
+	/* query 46 */
+	if (f54->query_43.has_query46) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_46.data,
+				sizeof(f54->query_46.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 47 */
+	if (f54->query_46.has_query47) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_47.data,
+				sizeof(f54->query_47.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 48 reserved */
+
+	/* query 49 */
+	if (f54->query_47.has_query49) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_49.data,
+				sizeof(f54->query_49.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 50 */
+	if (f54->query_49.has_query50) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_50.data,
+				sizeof(f54->query_50.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 51 */
+	if (f54->query_50.has_query51) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_51.data,
+				sizeof(f54->query_51.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 53 54 */
+	if (f54->query_51.has_query53_query54_ctrl198)
+		offset += 2;
+
+	/* query 55 */
+	if (f54->query_51.has_query55) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_55.data,
+				sizeof(f54->query_55.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 56 */
+	if (f54->query_55.has_query56)
+		offset += 1;
+
+	/* query 57 */
+	if (f54->query_55.has_query57) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_57.data,
+				sizeof(f54->query_57.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 58 */
+	if (f54->query_57.has_query58) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_58.data,
+				sizeof(f54->query_58.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 59 */
+	if (f54->query_58.has_query59)
+		offset += 1;
+
+	/* query 60 */
+	if (f54->query_58.has_query60)
+		offset += 1;
+
+	/* query 61 */
+	if (f54->query_58.has_query61) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_61.data,
+				sizeof(f54->query_61.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 62 63 */
+	if (f54->query_61.has_ctrl215_query62_query63)
+		offset += 2;
+
+	/* query 64 */
+	if (f54->query_61.has_query64) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_64.data,
+				sizeof(f54->query_64.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 65 */
+	if (f54->query_64.has_query65) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_65.data,
+				sizeof(f54->query_65.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 66 */
+	if (f54->query_65.has_query66_ctrl231)
+		offset += 1;
+
+	/* query 67 */
+	if (f54->query_65.has_query67) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_67.data,
+				sizeof(f54->query_67.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 68 */
+	if (f54->query_67.has_query68) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_68.data,
+				sizeof(f54->query_68.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 68 */
+	if (f54->query_68.has_query69) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_69.data,
+				sizeof(f54->query_69.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	return 0;
+}
+
+static void test_f54_set_regs(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count,
+		unsigned char page)
+{
+	unsigned char ii;
+	unsigned char intr_offset;
+
+	f54->query_base_addr = fd->query_base_addr | (page << 8);
+	f54->control_base_addr = fd->ctrl_base_addr | (page << 8);
+	f54->data_base_addr = fd->data_base_addr | (page << 8);
+	f54->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+	f54->intr_reg_num = (intr_count + 7) / 8;
+	if (f54->intr_reg_num != 0)
+		f54->intr_reg_num -= 1;
+
+	f54->intr_mask = 0;
+	intr_offset = intr_count % 8;
+	for (ii = intr_offset;
+			ii < (fd->intr_src_count + intr_offset);
+			ii++) {
+		f54->intr_mask |= 1 << ii;
+	}
+}
+
+static int test_f55_set_controls(void)
+{
+	unsigned char offset = 0;
+
+	/* controls 0 1 2 */
+	if (f55->query.has_sensor_assignment)
+		offset += 3;
+
+	/* control 3 */
+	if (f55->query.has_edge_compensation)
+		offset++;
+
+	/* control 4 */
+	if (f55->query.curve_compensation_mode == 0x1 ||
+			f55->query.curve_compensation_mode == 0x2)
+		offset++;
+
+	/* control 5 */
+	if (f55->query.curve_compensation_mode == 0x2)
+		offset++;
+
+	/* control 6 */
+	if (f55->query.has_ctrl6)
+		offset++;
+
+	/* control 7 */
+	if (f55->query.has_alternate_transmitter_assignment)
+		offset++;
+
+	/* control 8 */
+	if (f55->query_3.has_ctrl8)
+		offset++;
+
+	/* control 9 */
+	if (f55->query_3.has_ctrl9)
+		offset++;
+
+	/* control 10 */
+	if (f55->query_5.has_corner_compensation)
+		offset++;
+
+	/* control 11 */
+	if (f55->query.curve_compensation_mode == 0x3)
+		offset++;
+
+	/* control 12 */
+	if (f55->query_5.has_ctrl12)
+		offset++;
+
+	/* control 13 */
+	if (f55->query_5.has_ctrl13)
+		offset++;
+
+	/* control 14 */
+	if (f55->query_5.has_ctrl14)
+		offset++;
+
+	/* control 15 */
+	if (f55->query_5.has_basis_function)
+		offset++;
+
+	/* control 16 */
+	if (f55->query_17.has_ctrl16)
+		offset++;
+
+	/* control 17 */
+	if (f55->query_17.has_ctrl17)
+		offset++;
+
+	/* controls 18 19 */
+	if (f55->query_17.has_ctrl18_ctrl19)
+		offset += 2;
+
+	/* control 20 */
+	if (f55->query_17.has_ctrl20)
+		offset++;
+
+	/* control 21 */
+	if (f55->query_17.has_ctrl21)
+		offset++;
+
+	/* control 22 */
+	if (f55->query_17.has_ctrl22)
+		offset++;
+
+	/* control 23 */
+	if (f55->query_18.has_ctrl23)
+		offset++;
+
+	/* control 24 */
+	if (f55->query_18.has_ctrl24)
+		offset++;
+
+	/* control 25 */
+	if (f55->query_18.has_ctrl25)
+		offset++;
+
+	/* control 26 */
+	if (f55->query_18.has_ctrl26)
+		offset++;
+
+	/* control 27 */
+	if (f55->query_18.has_ctrl27_query20)
+		offset++;
+
+	/* control 28 */
+	if (f55->query_18.has_ctrl28_query21)
+		offset++;
+
+	/* control 29 */
+	if (f55->query_22.has_ctrl29)
+		offset++;
+
+	/* control 30 */
+	if (f55->query_22.has_ctrl30)
+		offset++;
+
+	/* control 31 */
+	if (f55->query_22.has_ctrl31)
+		offset++;
+
+	/* control 32 */
+	if (f55->query_22.has_ctrl32)
+		offset++;
+
+	/* controls 33 34 35 36 reserved */
+
+	/* control 37 */
+	if (f55->query_28.has_ctrl37)
+		offset++;
+
+	/* control 38 */
+	if (f55->query_30.has_ctrl38)
+		offset++;
+
+	/* control 39 */
+	if (f55->query_30.has_ctrl39)
+		offset++;
+
+	/* control 40 */
+	if (f55->query_30.has_ctrl40)
+		offset++;
+
+	/* control 41 */
+	if (f55->query_30.has_ctrl41)
+		offset++;
+
+	/* control 42 */
+	if (f55->query_30.has_ctrl42)
+		offset++;
+
+	/* controls 43 44 */
+	if (f55->query_30.has_ctrl43_ctrl44) {
+		f55->afe_mux_offset = offset;
+		offset += 2;
+	}
+
+	/* controls 45 46 */
+	if (f55->query_33.has_ctrl45_ctrl46) {
+		f55->has_force = true;
+		f55->force_tx_offset = offset;
+		f55->force_rx_offset = offset + 1;
+		offset += 2;
+	}
+
+	return 0;
+}
+
+static int test_f55_set_queries(void)
+{
+	int retval;
+	unsigned char offset;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f55->query_base_addr,
+			f55->query.data,
+			sizeof(f55->query.data));
+	if (retval < 0)
+		return retval;
+
+	offset = sizeof(f55->query.data);
+
+	/* query 3 */
+	if (f55->query.has_single_layer_multi_touch) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_3.data,
+				sizeof(f55->query_3.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 4 */
+	if (f55->query_3.has_ctrl9)
+		offset += 1;
+
+	/* query 5 */
+	if (f55->query.has_query5) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_5.data,
+				sizeof(f55->query_5.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* queries 6 7 */
+	if (f55->query.curve_compensation_mode == 0x3)
+		offset += 2;
+
+	/* query 8 */
+	if (f55->query_3.has_ctrl8)
+		offset += 1;
+
+	/* query 9 */
+	if (f55->query_3.has_query9)
+		offset += 1;
+
+	/* queries 10 11 12 13 14 15 16 */
+	if (f55->query_5.has_basis_function)
+		offset += 7;
+
+	/* query 17 */
+	if (f55->query_5.has_query17) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_17.data,
+				sizeof(f55->query_17.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 18 */
+	if (f55->query_17.has_query18) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_18.data,
+				sizeof(f55->query_18.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 19 */
+	if (f55->query_18.has_query19)
+		offset += 1;
+
+	/* query 20 */
+	if (f55->query_18.has_ctrl27_query20)
+		offset += 1;
+
+	/* query 21 */
+	if (f55->query_18.has_ctrl28_query21)
+		offset += 1;
+
+	/* query 22 */
+	if (f55->query_18.has_query22) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_22.data,
+				sizeof(f55->query_22.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 23 */
+	if (f55->query_22.has_query23) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_23.data,
+				sizeof(f55->query_23.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+
+		f55->amp_sensor = f55->query_23.amp_sensor_enabled;
+		f55->size_of_column2mux = f55->query_23.size_of_column2mux;
+	}
+
+	/* queries 24 25 26 27 reserved */
+
+	/* query 28 */
+	if (f55->query_22.has_query28) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_28.data,
+				sizeof(f55->query_28.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 29 */
+	if (f55->query_28.has_query29)
+		offset += 1;
+
+	/* query 30 */
+	if (f55->query_28.has_query30) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_30.data,
+				sizeof(f55->query_30.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* queries 31 32 */
+	if (f55->query_30.has_query31_query32)
+		offset += 2;
+
+	/* query 33 */
+	if (f55->query_30.has_query33) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_33.data,
+				sizeof(f55->query_33.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+
+		f55->extended_amp = f55->query_33.has_extended_amp_pad;
+	}
+
+	return 0;
+}
+
+static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char rx_electrodes;
+	unsigned char tx_electrodes;
+	struct f55_control_43 ctrl_43;
+
+	retval = test_f55_set_queries();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F55 query registers\n",
+				__func__);
+		return;
+	}
+
+	if (!f55->query.has_sensor_assignment)
+		return;
+
+	retval = test_f55_set_controls();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up F55 control registers\n",
+				__func__);
+		return;
+	}
+
+	tx_electrodes = f55->query.num_of_tx_electrodes;
+	rx_electrodes = f55->query.num_of_rx_electrodes;
+
+	f55->tx_assignment = kzalloc(tx_electrodes, GFP_KERNEL);
+	f55->rx_assignment = kzalloc(rx_electrodes, GFP_KERNEL);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f55->control_base_addr + SENSOR_TX_MAPPING_OFFSET,
+			f55->tx_assignment,
+			tx_electrodes);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F55 tx assignment\n",
+				__func__);
+		return;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f55->control_base_addr + SENSOR_RX_MAPPING_OFFSET,
+			f55->rx_assignment,
+			rx_electrodes);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F55 rx assignment\n",
+				__func__);
+		return;
+	}
+
+	f54->tx_assigned = 0;
+	for (ii = 0; ii < tx_electrodes; ii++) {
+		if (f55->tx_assignment[ii] != 0xff)
+			f54->tx_assigned++;
+	}
+
+	f54->rx_assigned = 0;
+	for (ii = 0; ii < rx_electrodes; ii++) {
+		if (f55->rx_assignment[ii] != 0xff)
+			f54->rx_assigned++;
+	}
+
+	if (f55->amp_sensor) {
+		f54->tx_assigned = f55->size_of_column2mux;
+		f54->rx_assigned /= 2;
+	}
+
+	if (f55->extended_amp) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->control_base_addr + f55->afe_mux_offset,
+				ctrl_43.data,
+				sizeof(ctrl_43.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read F55 AFE mux sizes\n",
+					__func__);
+			return;
+		}
+
+		f54->tx_assigned = ctrl_43.afe_l_mux_size +
+				ctrl_43.afe_r_mux_size;
+	}
+
+	/* force mapping */
+	if (f55->has_force) {
+		f55->force_tx_assignment = kzalloc(tx_electrodes, GFP_KERNEL);
+		f55->force_rx_assignment = kzalloc(rx_electrodes, GFP_KERNEL);
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->control_base_addr + f55->force_tx_offset,
+				f55->force_tx_assignment,
+				tx_electrodes);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read F55 force tx assignment\n",
+					__func__);
+			return;
+		}
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->control_base_addr + f55->force_rx_offset,
+				f55->force_rx_assignment,
+				rx_electrodes);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read F55 force rx assignment\n",
+					__func__);
+			return;
+		}
+
+		for (ii = 0; ii < tx_electrodes; ii++) {
+			if (f55->force_tx_assignment[ii] != 0xff)
+				f54->tx_assigned++;
+		}
+
+		for (ii = 0; ii < rx_electrodes; ii++) {
+			if (f55->force_rx_assignment[ii] != 0xff)
+				f54->rx_assigned++;
+		}
+	}
+}
+
+static void test_f55_set_regs(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned char page)
+{
+	f55 = kzalloc(sizeof(*f55), GFP_KERNEL);
+	if (!f55) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for F55\n",
+				__func__);
+		return;
+	}
+
+	f55->query_base_addr = fd->query_base_addr | (page << 8);
+	f55->control_base_addr = fd->ctrl_base_addr | (page << 8);
+	f55->data_base_addr = fd->data_base_addr | (page << 8);
+	f55->command_base_addr = fd->cmd_base_addr | (page << 8);
+}
+
+static void test_f21_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char size_of_query2;
+	unsigned char size_of_query5;
+	unsigned char query_11_offset;
+	unsigned char ctrl_4_offset;
+	struct f21_query_2 *query_2 = NULL;
+	struct f21_query_5 *query_5 = NULL;
+	struct f21_query_11 *query_11 = NULL;
+
+	query_2 = kzalloc(sizeof(*query_2), GFP_KERNEL);
+	if (!query_2) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_2\n",
+				__func__);
+		goto exit;
+	}
+
+	query_5 = kzalloc(sizeof(*query_5), GFP_KERNEL);
+	if (!query_5) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_5\n",
+				__func__);
+		goto exit;
+	}
+
+	query_11 = kzalloc(sizeof(*query_11), GFP_KERNEL);
+	if (!query_11) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_11\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 1,
+			&size_of_query2,
+			sizeof(size_of_query2));
+	if (retval < 0)
+		goto exit;
+
+	if (size_of_query2 > sizeof(query_2->data))
+		size_of_query2 = sizeof(query_2->data);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 2,
+			query_2->data,
+			size_of_query2);
+	if (retval < 0)
+		goto exit;
+
+	if (!query_2->query11_is_present) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: No F21 force capabilities\n",
+				__func__);
+		goto exit;
+	}
+
+	query_11_offset = query_2->query0_is_present +
+			query_2->query1_is_present +
+			query_2->query2_is_present +
+			query_2->query3_is_present +
+			query_2->query4_is_present +
+			query_2->query5_is_present +
+			query_2->query6_is_present +
+			query_2->query7_is_present +
+			query_2->query8_is_present +
+			query_2->query9_is_present +
+			query_2->query10_is_present;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 11,
+			query_11->data,
+			sizeof(query_11->data));
+	if (retval < 0)
+		goto exit;
+
+	if (!query_11->has_force_sensing_txrx_mapping) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: No F21 force mapping\n",
+				__func__);
+		goto exit;
+	}
+
+	f21->max_num_of_tx = query_11->max_number_of_force_txs;
+	f21->max_num_of_rx = query_11->max_number_of_force_rxs;
+	f21->max_num_of_txrx = f21->max_num_of_tx + f21->max_num_of_rx;
+
+	f21->force_txrx_assignment = kzalloc(f21->max_num_of_txrx, GFP_KERNEL);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 4,
+			&size_of_query5,
+			sizeof(size_of_query5));
+	if (retval < 0)
+		goto exit;
+
+	if (size_of_query5 > sizeof(query_5->data))
+		size_of_query5 = sizeof(query_5->data);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 5,
+			query_5->data,
+			size_of_query5);
+	if (retval < 0)
+		goto exit;
+
+	ctrl_4_offset = query_5->ctrl0_is_present +
+			query_5->ctrl1_is_present +
+			query_5->ctrl2_is_present +
+			query_5->ctrl3_is_present;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->control_base_addr + ctrl_4_offset,
+			f21->force_txrx_assignment,
+			f21->max_num_of_txrx);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F21 force txrx assignment\n",
+				__func__);
+		goto exit;
+	}
+
+	f21->has_force = true;
+
+	for (ii = 0; ii < f21->max_num_of_tx; ii++) {
+		if (f21->force_txrx_assignment[ii] != 0xff)
+			f21->tx_assigned++;
+	}
+
+	for (ii = f21->max_num_of_tx; ii < f21->max_num_of_txrx; ii++) {
+		if (f21->force_txrx_assignment[ii] != 0xff)
+			f21->rx_assigned++;
+	}
+
+exit:
+	kfree(query_2);
+	kfree(query_5);
+	kfree(query_11);
+}
+
+static void test_f21_set_regs(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned char page)
+{
+	f21 = kzalloc(sizeof(*f21), GFP_KERNEL);
+	if (!f21) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for F21\n",
+				__func__);
+		return;
+	}
+
+	f21->query_base_addr = fd->query_base_addr | (page << 8);
+	f21->control_base_addr = fd->ctrl_base_addr | (page << 8);
+	f21->data_base_addr = fd->data_base_addr | (page << 8);
+	f21->command_base_addr = fd->cmd_base_addr | (page << 8);
+}
+
+static int test_scan_pdt(void)
+{
+	int retval;
+	unsigned char intr_count = 0;
+	unsigned char page;
+	unsigned short addr;
+	bool f54found = false;
+	bool f55found = false;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&rmi_fd,
+					sizeof(rmi_fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (!rmi_fd.fn_number)
+				break;
+
+			switch (rmi_fd.fn_number) {
+			case SYNAPTICS_RMI4_F54:
+				test_f54_set_regs(rmi4_data,
+						&rmi_fd, intr_count, page);
+				f54found = true;
+				break;
+			case SYNAPTICS_RMI4_F55:
+				test_f55_set_regs(rmi4_data,
+						&rmi_fd, page);
+				f55found = true;
+				break;
+			case SYNAPTICS_RMI4_F21:
+				test_f21_set_regs(rmi4_data,
+						&rmi_fd, page);
+				break;
+			default:
+				break;
+			}
+
+			if (f54found && f55found)
+				goto pdt_done;
+
+			intr_count += rmi_fd.intr_src_count;
+		}
+	}
+
+	if (!f54found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find F54\n",
+				__func__);
+		return -EINVAL;
+	}
+
+pdt_done:
+	return 0;
+}
+
+static void synaptics_rmi4_test_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!f54)
+		return;
+
+	if (f54->intr_mask & intr_mask)
+		queue_work(f54->test_report_workqueue, &f54->test_report_work);
+
+	return;
+}
+
+static int synaptics_rmi4_test_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (f54) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	f54 = kzalloc(sizeof(*f54), GFP_KERNEL);
+	if (!f54) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for F54\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	f54->rmi4_data = rmi4_data;
+
+	f55 = NULL;
+
+	f21 = NULL;
+
+	retval = test_scan_pdt();
+	if (retval < 0)
+		goto exit_free_mem;
+
+	retval = test_set_queries();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F54 query registers\n",
+				__func__);
+		goto exit_free_mem;
+	}
+
+	f54->tx_assigned = f54->query.num_of_tx_electrodes;
+	f54->rx_assigned = f54->query.num_of_rx_electrodes;
+
+	retval = test_set_controls();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up F54 control registers\n",
+				__func__);
+		goto exit_free_control;
+	}
+
+	test_set_data();
+
+	if (f55)
+		test_f55_init(rmi4_data);
+
+	if (f21)
+		test_f21_init(rmi4_data);
+
+	if (rmi4_data->external_afe_buttons)
+		f54->tx_assigned++;
+
+	retval = test_set_sysfs();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs entries\n",
+				__func__);
+		goto exit_sysfs;
+	}
+
+	f54->test_report_workqueue =
+			create_singlethread_workqueue("test_report_workqueue");
+	INIT_WORK(&f54->test_report_work, test_report_work);
+
+	hrtimer_init(&f54->watchdog, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	f54->watchdog.function = test_get_report_timeout;
+	INIT_WORK(&f54->timeout_work, test_timeout_work);
+
+	mutex_init(&f54->status_mutex);
+	f54->status = STATUS_IDLE;
+
+	return 0;
+
+exit_sysfs:
+	if (f21)
+		kfree(f21->force_txrx_assignment);
+
+	if (f55) {
+		kfree(f55->tx_assignment);
+		kfree(f55->rx_assignment);
+		kfree(f55->force_tx_assignment);
+		kfree(f55->force_rx_assignment);
+	}
+
+exit_free_control:
+	test_free_control_mem();
+
+exit_free_mem:
+	kfree(f21);
+	f21 = NULL;
+	kfree(f55);
+	f55 = NULL;
+	kfree(f54);
+	f54 = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_test_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!f54)
+		goto exit;
+
+	hrtimer_cancel(&f54->watchdog);
+
+	cancel_work_sync(&f54->test_report_work);
+	flush_workqueue(f54->test_report_workqueue);
+	destroy_workqueue(f54->test_report_workqueue);
+
+	test_remove_sysfs();
+
+	if (f21)
+		kfree(f21->force_txrx_assignment);
+
+	if (f55) {
+		kfree(f55->tx_assignment);
+		kfree(f55->rx_assignment);
+		kfree(f55->force_tx_assignment);
+		kfree(f55->force_rx_assignment);
+	}
+
+	test_free_control_mem();
+
+	if (f54->data_buffer_size)
+		kfree(f54->report_data);
+
+	kfree(f21);
+	f21 = NULL;
+
+	kfree(f55);
+	f55 = NULL;
+
+	kfree(f54);
+	f54 = NULL;
+
+exit:
+	complete(&test_remove_complete);
+}
+
+static void synaptics_rmi4_test_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (!f54) {
+		synaptics_rmi4_test_init(rmi4_data);
+		return;
+	}
+
+	if (f21)
+		kfree(f21->force_txrx_assignment);
+
+	if (f55) {
+		kfree(f55->tx_assignment);
+		kfree(f55->rx_assignment);
+		kfree(f55->force_tx_assignment);
+		kfree(f55->force_rx_assignment);
+	}
+
+	test_free_control_mem();
+
+	kfree(f55);
+	f55 = NULL;
+
+	kfree(f21);
+	f21 = NULL;
+
+	retval = test_scan_pdt();
+	if (retval < 0)
+		goto exit_free_mem;
+
+	retval = test_set_queries();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F54 query registers\n",
+				__func__);
+		goto exit_free_mem;
+	}
+
+	f54->tx_assigned = f54->query.num_of_tx_electrodes;
+	f54->rx_assigned = f54->query.num_of_rx_electrodes;
+
+	retval = test_set_controls();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up F54 control registers\n",
+				__func__);
+		goto exit_free_control;
+	}
+
+	test_set_data();
+
+	if (f55)
+		test_f55_init(rmi4_data);
+
+	if (f21)
+		test_f21_init(rmi4_data);
+
+	if (rmi4_data->external_afe_buttons)
+		f54->tx_assigned++;
+
+	f54->status = STATUS_IDLE;
+
+	return;
+
+exit_free_control:
+	test_free_control_mem();
+
+exit_free_mem:
+	hrtimer_cancel(&f54->watchdog);
+
+	cancel_work_sync(&f54->test_report_work);
+	flush_workqueue(f54->test_report_workqueue);
+	destroy_workqueue(f54->test_report_workqueue);
+
+	test_remove_sysfs();
+
+	if (f54->data_buffer_size)
+		kfree(f54->report_data);
+
+	kfree(f21);
+	f21 = NULL;
+
+	kfree(f55);
+	f55 = NULL;
+
+	kfree(f54);
+	f54 = NULL;
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn test_module = {
+	.fn_type = RMI_TEST_REPORTING,
+	.init = synaptics_rmi4_test_init,
+	.remove = synaptics_rmi4_test_remove,
+	.reset = synaptics_rmi4_test_reset,
+	.reinit = NULL,
+	.early_suspend = NULL,
+	.suspend = NULL,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_test_attn,
+};
+
+static int __init rmi4_test_module_init(void)
+{
+	synaptics_rmi4_new_function(&test_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_test_module_exit(void)
+{
+	synaptics_rmi4_new_function(&test_module, false);
+
+	wait_for_completion(&test_remove_complete);
+}
+
+module_init(rmi4_test_module_init);
+module_exit(rmi4_test_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Test Reporting Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_video.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_video.c
new file mode 100755
index 0000000..372c403
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_video.c
@@ -0,0 +1,403 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYSFS_FOLDER_NAME "video"
+
+static ssize_t video_sysfs_dcs_write_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t video_sysfs_param_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static int video_send_dcs_command(unsigned char command_opcode);
+
+struct f38_command {
+	union {
+		struct {
+			unsigned char command_opcode;
+			unsigned char register_access:1;
+			unsigned char gamma_page:1;
+			unsigned char f38_control1_b2__7:6;
+			unsigned char parameter_field_1;
+			unsigned char parameter_field_2;
+			unsigned char parameter_field_3;
+			unsigned char parameter_field_4;
+			unsigned char send_to_dcs:1;
+			unsigned char f38_command6_b1__7:7;
+		} __packed;
+		unsigned char data[7];
+	};
+};
+
+struct synaptics_rmi4_video_handle {
+	unsigned char param;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	struct synaptics_rmi4_data *rmi4_data;
+	struct kobject *sysfs_dir;
+};
+
+#ifdef RMI_DCS_SUSPEND_RESUME
+struct dcs_command {
+	unsigned char command;
+	unsigned int wait_time;
+};
+
+static struct dcs_command suspend_sequence[] = {
+	{
+		.command = 0x28,
+		.wait_time = 200,
+	},
+	{
+		.command = 0x10,
+		.wait_time = 200,
+	},
+};
+
+static struct dcs_command resume_sequence[] = {
+	{
+		.command = 0x11,
+		.wait_time = 200,
+	},
+	{
+		.command = 0x29,
+		.wait_time = 200,
+	},
+};
+#endif
+
+static struct device_attribute attrs[] = {
+	__ATTR(dcs_write, 0220,
+			synaptics_rmi4_show_error,
+			video_sysfs_dcs_write_store),
+	__ATTR(param, 0220,
+			synaptics_rmi4_show_error,
+			video_sysfs_param_store),
+};
+
+static struct synaptics_rmi4_video_handle *video;
+
+DECLARE_COMPLETION(video_remove_complete);
+
+static ssize_t video_sysfs_dcs_write_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+
+	if (kstrtouint(buf, 16, &input) != 1)
+		return -EINVAL;
+
+	retval = video_send_dcs_command((unsigned char)input);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t video_sysfs_param_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 16, &input) != 1)
+		return -EINVAL;
+
+	video->param = (unsigned char)input;
+
+	return count;
+}
+
+static int video_send_dcs_command(unsigned char command_opcode)
+{
+	int retval;
+	struct f38_command command;
+	struct synaptics_rmi4_data *rmi4_data = video->rmi4_data;
+
+	memset(&command, 0x00, sizeof(command));
+
+	command.command_opcode = command_opcode;
+	command.parameter_field_1 = video->param;
+	command.send_to_dcs = 1;
+
+	video->param = 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			video->command_base_addr,
+			command.data,
+			sizeof(command.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to send DCS command\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int video_scan_pdt(void)
+{
+	int retval;
+	unsigned char page;
+	unsigned short addr;
+	bool f38_found = false;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_data *rmi4_data = video->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&rmi_fd,
+					sizeof(rmi_fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (!rmi_fd.fn_number)
+				break;
+
+			if (rmi_fd.fn_number == SYNAPTICS_RMI4_F38) {
+				f38_found = true;
+				goto f38_found;
+			}
+		}
+	}
+
+	if (!f38_found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find F38\n",
+				__func__);
+		return -EINVAL;
+	}
+
+f38_found:
+	video->query_base_addr = rmi_fd.query_base_addr | (page << 8);
+	video->control_base_addr = rmi_fd.ctrl_base_addr | (page << 8);
+	video->data_base_addr = rmi_fd.data_base_addr | (page << 8);
+	video->command_base_addr = rmi_fd.cmd_base_addr | (page << 8);
+
+	return 0;
+}
+
+static int synaptics_rmi4_video_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char attr_count;
+
+	if (video) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	video = kzalloc(sizeof(*video), GFP_KERNEL);
+	if (!video) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for video\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	video->rmi4_data = rmi4_data;
+
+	retval = video_scan_pdt();
+	if (retval < 0) {
+		retval = 0;
+		goto exit_scan_pdt;
+	}
+
+	video->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+			&rmi4_data->input_dev->dev.kobj);
+	if (!video->sysfs_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs directory\n",
+				__func__);
+		retval = -ENODEV;
+		goto exit_sysfs_dir;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(video->sysfs_dir,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_sysfs_attrs;
+		}
+	}
+
+	return 0;
+
+exit_sysfs_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--)
+		sysfs_remove_file(video->sysfs_dir, &attrs[attr_count].attr);
+
+	kobject_put(video->sysfs_dir);
+
+exit_sysfs_dir:
+exit_scan_pdt:
+	kfree(video);
+	video = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_video_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+
+	if (!video)
+		goto exit;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++)
+		sysfs_remove_file(video->sysfs_dir, &attrs[attr_count].attr);
+
+	kobject_put(video->sysfs_dir);
+
+	kfree(video);
+	video = NULL;
+
+exit:
+	complete(&video_remove_complete);
+}
+
+static void synaptics_rmi4_video_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!video)
+		synaptics_rmi4_video_init(rmi4_data);
+}
+
+#ifdef RMI_DCS_SUSPEND_RESUME
+static void synaptics_rmi4_video_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char command;
+	unsigned char num_of_cmds;
+
+	if (!video)
+		return;
+
+	num_of_cmds = ARRAY_SIZE(suspend_sequence);
+
+	for (ii = 0; ii < num_of_cmds; ii++) {
+		command = suspend_sequence[ii].command;
+		retval = video_send_dcs_command(command);
+		if (retval < 0)
+			return;
+		msleep(suspend_sequence[ii].wait_time);
+	}
+}
+
+static void synaptics_rmi4_video_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char command;
+	unsigned char num_of_cmds;
+
+	if (!video)
+		return;
+
+	num_of_cmds = ARRAY_SIZE(resume_sequence);
+
+	for (ii = 0; ii < num_of_cmds; ii++) {
+		command = resume_sequence[ii].command;
+		retval = video_send_dcs_command(command);
+		if (retval < 0)
+			return;
+		msleep(resume_sequence[ii].wait_time);
+	}
+}
+#endif
+
+static struct synaptics_rmi4_exp_fn video_module = {
+	.fn_type = RMI_VIDEO,
+	.init = synaptics_rmi4_video_init,
+	.remove = synaptics_rmi4_video_remove,
+	.reset = synaptics_rmi4_video_reset,
+	.reinit = NULL,
+	.early_suspend = NULL,
+#ifdef RMI_DCS_SUSPEND_RESUME
+	.suspend = synaptics_rmi4_video_suspend,
+	.resume = synaptics_rmi4_video_resume,
+#else
+	.suspend = NULL,
+	.resume = NULL,
+#endif
+	.late_resume = NULL,
+	.attn = NULL,
+};
+
+static int __init rmi4_video_module_init(void)
+{
+	synaptics_rmi4_new_function(&video_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_video_module_exit(void)
+{
+	synaptics_rmi4_new_function(&video_module, false);
+
+	wait_for_completion(&video_remove_complete);
+}
+
+module_init(rmi4_video_module_init);
+module_exit(rmi4_video_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Video Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 7ca65c5..e5ffc3a 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -498,6 +498,7 @@
 config IOMMU_TESTS
 	bool "Interactive IOMMU performance/functional tests"
 	select IOMMU_API
+	select ARM64_PTDUMP_CORE
 	help
 	  Enables a suite of IOMMU unit tests.  The tests are runnable
 	  through debugfs.  Unlike the IOMMU_DEBUG_TRACKING option, the
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 0b38776..8d9920f 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3119,21 +3119,24 @@ static void amd_iommu_get_resv_regions(struct device *dev,
 		return;
 
 	list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+		int type, prot = 0;
 		size_t length;
-		int prot = 0;
 
 		if (devid < entry->devid_start || devid > entry->devid_end)
 			continue;
 
+		type   = IOMMU_RESV_DIRECT;
 		length = entry->address_end - entry->address_start;
 		if (entry->prot & IOMMU_PROT_IR)
 			prot |= IOMMU_READ;
 		if (entry->prot & IOMMU_PROT_IW)
 			prot |= IOMMU_WRITE;
+		if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
+			/* Exclusion range */
+			type = IOMMU_RESV_RESERVED;
 
 		region = iommu_alloc_resv_region(entry->address_start,
-						 length, prot,
-						 IOMMU_RESV_DIRECT);
+						 length, prot, type);
 		if (!region) {
 			pr_err("Out of memory allocating dm-regions for %s\n",
 				dev_name(dev));
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index e062ab9..3a1d303 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -356,7 +356,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
 {
 	u64 start = iommu->exclusion_start & PAGE_MASK;
-	u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
+	u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
 	u64 entry;
 
 	if (!iommu->exclusion_start)
@@ -2001,6 +2001,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
 	if (e == NULL)
 		return -ENOMEM;
 
+	if (m->flags & IVMD_FLAG_EXCL_RANGE)
+		init_exclusion_range(m);
+
 	switch (m->type) {
 	default:
 		kfree(e);
@@ -2047,9 +2050,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
 
 	while (p < end) {
 		m = (struct ivmd_header *)p;
-		if (m->flags & IVMD_FLAG_EXCL_RANGE)
-			init_exclusion_range(m);
-		else if (m->flags & IVMD_FLAG_UNITY_MAP)
+		if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
 			init_unity_map_range(m);
 
 		p += m->length;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index e2b342e..69f3d4c 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -373,6 +373,8 @@
 #define IOMMU_PROT_IR 0x01
 #define IOMMU_PROT_IW 0x02
 
+#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE	(1 << 2)
+
 /* IOMMU capabilities */
 #define IOMMU_CAP_IOTLB   24
 #define IOMMU_CAP_NPCACHE 26
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 3c21145..d27f6817 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -430,6 +430,8 @@ static int arm_smmu_setup_default_domain(struct device *dev,
 				struct iommu_domain *domain);
 static int __arm_smmu_domain_set_attr(struct iommu_domain *domain,
 				    enum iommu_attr attr, void *data);
+static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
+				    enum iommu_attr attr, void *data);
 
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 {
@@ -2250,6 +2252,20 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
 	return &smmu_domain->domain;
 }
 
+static void arm_smmu_put_dma_cookie(struct iommu_domain *domain)
+{
+	int s1_bypass = 0, is_fast = 0;
+
+	iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS,
+					&s1_bypass);
+	iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &is_fast);
+
+	if (is_fast)
+		fast_smmu_put_dma_cookie(domain);
+	else if (!s1_bypass)
+		iommu_put_dma_cookie(domain);
+}
+
 static void arm_smmu_domain_free(struct iommu_domain *domain)
 {
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -2258,7 +2274,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
 	 * Free the domain resources. We assume that all devices have
 	 * already been detached.
 	 */
-	arm_iommu_put_dma_cookie(domain);
+	arm_smmu_put_dma_cookie(domain);
 	arm_smmu_destroy_domain_context(domain);
 	kfree(smmu_domain);
 }
@@ -2866,6 +2882,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 	struct arm_smmu_device *smmu;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
+	int s1_bypass = 0;
 
 	if (!fwspec || fwspec->ops != &arm_smmu_ops) {
 		dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
@@ -2894,6 +2911,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 	if (ret < 0)
 		goto out_power_off;
 
+	ret = arm_smmu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS,
+					&s1_bypass);
+	if (s1_bypass)
+		domain->type = IOMMU_DOMAIN_UNMANAGED;
+
 	/* Do not modify the SIDs, HW is still running */
 	if (is_dynamic_domain(domain)) {
 		ret = 0;
@@ -3909,65 +3931,6 @@ static void arm_smmu_trigger_fault(struct iommu_domain *domain,
 	arm_smmu_power_off(smmu->pwr);
 }
 
-static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
-				       unsigned long offset)
-{
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-	struct arm_smmu_device *smmu;
-	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-	void __iomem *cb_base;
-	unsigned long val;
-
-	if (offset >= SZ_4K) {
-		pr_err("Invalid offset: 0x%lx\n", offset);
-		return 0;
-	}
-
-	smmu = smmu_domain->smmu;
-	if (!smmu) {
-		WARN(1, "Can't read registers of a detached domain\n");
-		val = 0;
-		return val;
-	}
-
-	if (arm_smmu_power_on(smmu->pwr))
-		return 0;
-
-	cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
-	val = readl_relaxed(cb_base + offset);
-
-	arm_smmu_power_off(smmu->pwr);
-	return val;
-}
-
-static void arm_smmu_reg_write(struct iommu_domain *domain,
-			       unsigned long offset, unsigned long val)
-{
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-	struct arm_smmu_device *smmu;
-	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-	void __iomem *cb_base;
-
-	if (offset >= SZ_4K) {
-		pr_err("Invalid offset: 0x%lx\n", offset);
-		return;
-	}
-
-	smmu = smmu_domain->smmu;
-	if (!smmu) {
-		WARN(1, "Can't read registers of a detached domain\n");
-		return;
-	}
-
-	if (arm_smmu_power_on(smmu->pwr))
-		return;
-
-	cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
-	writel_relaxed(val, cb_base + offset);
-
-	arm_smmu_power_off(smmu->pwr);
-}
-
 static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
 {
 	arm_smmu_tlb_inv_context_s1(to_smmu_domain(domain));
@@ -4008,8 +3971,6 @@ static struct iommu_ops arm_smmu_ops = {
 	.put_resv_regions	= arm_smmu_put_resv_regions,
 	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
 	.trigger_fault		= arm_smmu_trigger_fault,
-	.reg_read		= arm_smmu_reg_read,
-	.reg_write		= arm_smmu_reg_write,
 	.tlbi_domain		= arm_smmu_tlbi_domain,
 	.enable_config_clocks	= arm_smmu_enable_config_clocks,
 	.disable_config_clocks	= arm_smmu_disable_config_clocks,
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 5805465..84b2706 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -694,6 +694,12 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
 			ms.num_pte * sizeof(*ms.pte_start),
 			DMA_TO_DEVICE);
 
+	/*
+	 * Synchronise all PTE updates for the new mapping before there's
+	 * a chance for anything to kick off a table walk for the new iova.
+	 */
+	wmb();
+
 	return mapped;
 
 out_err:
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 4378f2c..5481f0c 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -31,6 +31,10 @@
 #include <asm/cacheflush.h>
 #include <asm/dma-iommu.h>
 
+#ifdef CONFIG_ARM64_PTDUMP_CORE
+#include <asm/ptdump.h>
+#endif
+
 #if defined(CONFIG_IOMMU_TESTS)
 
 static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
@@ -177,6 +181,9 @@ struct iommu_debug_device {
 	unsigned int clk_count;
 	/* Protects domain */
 	struct mutex state_lock;
+#ifdef CONFIG_ARM64_PTDUMP_CORE
+	struct ptdump_info pt_info;
+#endif
 };
 
 static int iommu_debug_build_phoney_sg_table(struct device *dev,
@@ -2081,6 +2088,43 @@ static const struct file_operations iommu_debug_trigger_fault_fops = {
 	.write	= iommu_debug_trigger_fault_write,
 };
 
+#ifdef CONFIG_ARM64_PTDUMP_CORE
+static int ptdump_show(struct seq_file *s, void *v)
+{
+	struct iommu_debug_device *ddev = s->private;
+	struct ptdump_info *info = &(ddev->pt_info);
+	struct mm_struct		mm;
+	phys_addr_t phys;
+
+	info->markers = (struct addr_marker[]){
+		{ 0,		"start" },
+	};
+	info->base_addr	= 0;
+	info->mm = &mm;
+
+	if (ddev->domain) {
+		iommu_domain_get_attr(ddev->domain, DOMAIN_ATTR_PT_BASE_ADDR,
+			  &(phys));
+
+		info->mm->pgd = (pgd_t *)phys_to_virt(phys);
+		ptdump_walk_pgd(s, info);
+	}
+	return 0;
+}
+
+static int ptdump_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ptdump_show, inode->i_private);
+}
+
+static const struct file_operations ptdump_fops = {
+	.open		= ptdump_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+#endif
+
 /*
  * The following will only work for drivers that implement the generic
  * device tree bindings described in
@@ -2255,6 +2299,15 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
 		goto err_rmdir;
 	}
 
+#ifdef CONFIG_ARM64_PTDUMP_CORE
+	if (!debugfs_create_file("iommu_page_tables", 0200, dir, ddev,
+			   &ptdump_fops)) {
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+#endif
+
 	list_add(&ddev->list, &iommu_debug_devices);
 	return 0;
 
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d5821dd..462e496 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1931,31 +1931,6 @@ void iommu_trigger_fault(struct iommu_domain *domain, unsigned long flags)
 		domain->ops->trigger_fault(domain, flags);
 }
 
-/**
- * iommu_reg_read() - read an IOMMU register
- *
- * Reads the IOMMU register at the given offset.
- */
-unsigned long iommu_reg_read(struct iommu_domain *domain, unsigned long offset)
-{
-	if (domain->ops->reg_read)
-		return domain->ops->reg_read(domain, offset);
-	return 0;
-}
-
-/**
- * iommu_reg_write() - write an IOMMU register
- *
- * Writes the given value to the IOMMU register at the given offset.
- */
-void iommu_reg_write(struct iommu_domain *domain, unsigned long offset,
-		     unsigned long val)
-{
-	if (domain->ops->reg_write)
-		domain->ops->reg_write(domain, offset, val);
-}
-
-
 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
 						  size_t length, int prot,
 						  enum iommu_resv_type type)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 83fe262..b7b3339 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -175,6 +175,24 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova,
 	rb_insert_color(&iova->node, root);
 }
 
+#ifdef CONFIG_ARM64_DMA_IOMMU_ALIGNMENT
+static unsigned long limit_align(struct iova_domain *iovad,
+					unsigned long shift)
+{
+	unsigned long max;
+
+	max = CONFIG_ARM64_DMA_IOMMU_ALIGNMENT + PAGE_SHIFT
+		- iova_shift(iovad);
+	return min(shift, max);
+}
+#else
+static unsigned long limit_align(struct iova_domain *iovad,
+					unsigned long shift)
+{
+	return shift;
+}
+#endif
+
 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 		unsigned long size, unsigned long limit_pfn,
 			struct iova *new, bool size_aligned)
@@ -186,7 +204,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 	unsigned long align_mask = ~0UL;
 
 	if (size_aligned)
-		align_mask <<= fls_long(size - 1);
+		align_mask <<= limit_align(iovad, fls_long(size - 1));
 
 	/* Walk the tree backwards */
 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 0d03341..121d3cb 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -102,7 +102,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
 #define  SMMU_TLB_FLUSH_VA_MATCH_ALL     (0 << 0)
 #define  SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
 #define  SMMU_TLB_FLUSH_VA_MATCH_GROUP   (3 << 0)
-#define  SMMU_TLB_FLUSH_ASID(x)          (((x) & 0x7f) << 24)
 #define  SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
 					  SMMU_TLB_FLUSH_VA_MATCH_SECTION)
 #define  SMMU_TLB_FLUSH_VA_GROUP(addr)   ((((addr) & 0xffffc000) >> 12) | \
@@ -205,8 +204,12 @@ static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
 {
 	u32 value;
 
-	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
-		SMMU_TLB_FLUSH_VA_MATCH_ALL;
+	if (smmu->soc->num_asids == 4)
+		value = (asid & 0x3) << 29;
+	else
+		value = (asid & 0x7f) << 24;
+
+	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
 }
 
@@ -216,8 +219,12 @@ static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
 {
 	u32 value;
 
-	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
-		SMMU_TLB_FLUSH_VA_SECTION(iova);
+	if (smmu->soc->num_asids == 4)
+		value = (asid & 0x3) << 29;
+	else
+		value = (asid & 0x7f) << 24;
+
+	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
 }
 
@@ -227,8 +234,12 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
 {
 	u32 value;
 
-	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
-		SMMU_TLB_FLUSH_VA_GROUP(iova);
+	if (smmu->soc->num_asids == 4)
+		value = (asid & 0x3) << 29;
+	else
+		value = (asid & 0x7f) << 24;
+
+	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
 }
 
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
index aa72907..0390603 100644
--- a/drivers/irqchip/irq-ath79-misc.c
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -22,6 +22,15 @@
 #define AR71XX_RESET_REG_MISC_INT_ENABLE	4
 
 #define ATH79_MISC_IRQ_COUNT			32
+#define ATH79_MISC_PERF_IRQ			5
+
+static int ath79_perfcount_irq;
+
+int get_c0_perfcount_int(void)
+{
+	return ath79_perfcount_irq;
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 static void ath79_misc_irq_handler(struct irq_desc *desc)
 {
@@ -113,6 +122,8 @@ static void __init ath79_misc_intc_domain_init(
 {
 	void __iomem *base = domain->host_data;
 
+	ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
+
 	/* Disable and clear all interrupts */
 	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
 	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 4c8e510..9a99062 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -5,6 +5,8 @@
 
 #include <linux/err.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ipc_logging.h>
 #include <linux/irq.h>
 #include <linux/irqchip.h>
 #include <linux/irqdomain.h>
@@ -18,6 +20,8 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 
+#define PDC_IPC_LOG_SZ		2
+
 #define PDC_MAX_IRQS		138
 #define PDC_MAX_GPIO_IRQS	256
 
@@ -36,9 +40,11 @@ struct pdc_pin_region {
 };
 
 static DEFINE_RAW_SPINLOCK(pdc_lock);
-static void __iomem *pdc_base;
+static void __iomem *pdc_base, *pdc_cfg_base;
 static struct pdc_pin_region *pdc_region;
 static int pdc_region_cnt;
+static resource_size_t pdc_cfg_size;
+static void *pdc_ipc_log;
 
 static void pdc_reg_write(int reg, u32 i, u32 val)
 {
@@ -63,15 +69,52 @@ static void pdc_enable_intr(struct irq_data *d, bool on)
 	enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
 	enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask);
 	pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
+	ipc_log_string(pdc_ipc_log, "PIN=%d enable=%d", d->hwirq, on);
 	raw_spin_unlock(&pdc_lock);
 }
 
+static void qcom_pdc_gic_disable(struct irq_data *d)
+{
+	if (d->hwirq == GPIO_NO_WAKE_IRQ)
+		return;
+
+	pdc_enable_intr(d, false);
+	irq_chip_disable_parent(d);
+}
+
+static int qcom_pdc_gic_get_irqchip_state(struct irq_data *d,
+		enum irqchip_irq_state which, bool *state)
+{
+	if (d->hwirq == GPIO_NO_WAKE_IRQ)
+		return 0;
+
+	return irq_chip_get_parent_state(d, which, state);
+}
+
+static int qcom_pdc_gic_set_irqchip_state(struct irq_data *d,
+		enum irqchip_irq_state which, bool value)
+{
+	if (d->hwirq == GPIO_NO_WAKE_IRQ)
+		return 0;
+
+	return irq_chip_set_parent_state(d, which, value);
+}
+
+static void qcom_pdc_gic_enable(struct irq_data *d)
+{
+	if (d->hwirq == GPIO_NO_WAKE_IRQ)
+		return;
+
+	pdc_enable_intr(d, true);
+	irq_chip_enable_parent(d);
+}
+
 static void qcom_pdc_gic_mask(struct irq_data *d)
 {
 	if (d->hwirq == GPIO_NO_WAKE_IRQ)
 		return;
 
-	pdc_enable_intr(d, false);
+	ipc_log_string(pdc_ipc_log, "PIN=%d mask", d->hwirq);
 	irq_chip_mask_parent(d);
 }
 
@@ -80,10 +123,37 @@ static void qcom_pdc_gic_unmask(struct irq_data *d)
 	if (d->hwirq == GPIO_NO_WAKE_IRQ)
 		return;
 
-	pdc_enable_intr(d, true);
+	ipc_log_string(pdc_ipc_log, "PIN=%d unmask", d->hwirq);
 	irq_chip_unmask_parent(d);
 }
 
+static int spi_configure_type(irq_hw_number_t hwirq, unsigned int type)
+{
+	int spi = hwirq - 32;
+	unsigned int pin = spi / 32;
+	unsigned int mask = BIT(spi % 32);
+	void __iomem *cfg_reg = pdc_cfg_base + pin * 4;
+	unsigned int val;
+	unsigned long flags;
+
+	if (pin * 4 > pdc_cfg_size)
+		return -EFAULT;
+
+	raw_spin_lock_irqsave(&pdc_lock, flags);
+	val = readl_relaxed(cfg_reg);
+	rmb(); /* Ensure the read is complete before modifying */
+	val &= ~mask;
+	if (type & IRQ_TYPE_LEVEL_MASK)
+		val |= mask;
+	writel_relaxed(val, cfg_reg);
+	ipc_log_string(pdc_ipc_log,
+		       "SPI config: GIC-SPI=%d (reg=%d,bit=%d) val=%d",
+		       spi, pin, spi % 32, type & IRQ_TYPE_LEVEL_MASK);
+	raw_spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return 0;
+}
+
 /*
  * GIC does not handle falling edge or active low. To allow falling edge and
  * active low interrupts to be handled at GIC, PDC has an inverter that inverts
@@ -121,7 +191,9 @@ enum pdc_irq_config_bits {
 static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
 {
 	int pin_out = d->hwirq;
+	int parent_hwirq = d->parent_data->hwirq;
 	enum pdc_irq_config_bits pdc_type;
+	int ret;
 
 	if (pin_out == GPIO_NO_WAKE_IRQ)
 		return 0;
@@ -151,6 +223,15 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
 	}
 
 	pdc_reg_write(IRQ_i_CFG, pin_out, pdc_type);
+	ipc_log_string(pdc_ipc_log, "Set type: PIN=%d pdc_type=%d gic_type=%d",
+		       pin_out, pdc_type, type);
+
+	/* Additionally, configure (only) the GPIO in the f/w */
+	if (d->domain->host_data) {
+		ret = spi_configure_type(parent_hwirq, type);
+		if (ret)
+			return ret;
+	}
 
 	return irq_chip_set_type_parent(d, type);
 }
@@ -160,6 +241,10 @@ static struct irq_chip qcom_pdc_gic_chip = {
 	.irq_eoi		= irq_chip_eoi_parent,
 	.irq_mask		= qcom_pdc_gic_mask,
 	.irq_unmask		= qcom_pdc_gic_unmask,
+	.irq_disable		= qcom_pdc_gic_disable,
+	.irq_enable		= qcom_pdc_gic_enable,
+	.irq_get_irqchip_state	= qcom_pdc_gic_get_irqchip_state,
+	.irq_set_irqchip_state	= qcom_pdc_gic_set_irqchip_state,
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.irq_set_type		= qcom_pdc_gic_set_type,
 	.flags			= IRQCHIP_MASK_ON_SUSPEND |
@@ -233,6 +318,8 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
 	parent_fwspec.param[1]    = parent_hwirq;
 	parent_fwspec.param[2]    = type;
 
+	ipc_log_string(pdc_ipc_log, "Alloc: PIN=%d GIC-SPI=%d",
+		       hwirq, parent_hwirq);
 	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
 					    &parent_fwspec);
 }
@@ -283,6 +370,8 @@ static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
 	parent_fwspec.param[1]    = parent_hwirq;
 	parent_fwspec.param[2]    = type;
 
+	ipc_log_string(pdc_ipc_log, "GPIO alloc: PIN=%d GIC-SPI=%d",
+		       hwirq, parent_hwirq);
 	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
 					    &parent_fwspec);
 }
@@ -336,9 +425,19 @@ static int pdc_setup_pin_mapping(struct device_node *np)
 	return 0;
 }
 
+static int __init qcom_pdc_early_init(void)
+{
+	pdc_ipc_log = ipc_log_context_create(PDC_IPC_LOG_SZ, "pdc", 0);
+
+	return 0;
+
+}
+core_initcall(qcom_pdc_early_init);
+
 static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
 {
 	struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain;
+	struct resource res;
 	int ret;
 
 	pdc_base = of_iomap(node, 0);
@@ -369,10 +468,17 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
 		goto fail;
 	}
 
+	ret = of_address_to_resource(node, 1, &res);
+	if (!ret) {
+		pdc_cfg_size = resource_size(&res);
+		pdc_cfg_base = ioremap(res.start, pdc_cfg_size);
+	}
+
 	pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain, 0,
 						      PDC_MAX_GPIO_IRQS,
 						      of_fwnode_handle(node),
-						      &qcom_pdc_gpio_ops, NULL);
+						      &qcom_pdc_gpio_ops,
+						      pdc_cfg_base);
 	if (!pdc_gpio_domain) {
 		pr_err("GIC domain add failed for GPIO domain\n");
 		ret = -ENOMEM;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index ecdeb89..149b1ac 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -958,6 +958,7 @@ static void write_iso_callback(struct urb *urb)
  */
 static int starturbs(struct bc_state *bcs)
 {
+	struct usb_device *udev = bcs->cs->hw.bas->udev;
 	struct bas_bc_state *ubc = bcs->hw.bas;
 	struct urb *urb;
 	int j, k;
@@ -975,8 +976,8 @@ static int starturbs(struct bc_state *bcs)
 			rc = -EFAULT;
 			goto error;
 		}
-		usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
-				 usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel),
+		usb_fill_int_urb(urb, udev,
+				 usb_rcvisocpipe(udev, 3 + 2 * bcs->channel),
 				 ubc->isoinbuf + k * BAS_INBUFSIZE,
 				 BAS_INBUFSIZE, read_iso_callback, bcs,
 				 BAS_FRAMETIME);
@@ -1006,8 +1007,8 @@ static int starturbs(struct bc_state *bcs)
 			rc = -EFAULT;
 			goto error;
 		}
-		usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
-				 usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel),
+		usb_fill_int_urb(urb, udev,
+				 usb_sndisocpipe(udev, 4 + 2 * bcs->channel),
 				 ubc->isooutbuf->data,
 				 sizeof(ubc->isooutbuf->data),
 				 write_iso_callback, &ubc->isoouturbs[k],
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 18c0a12..b2abc44 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -711,10 +711,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
 	struct sock *sk = sock->sk;
 	int err = 0;
 
-	if (!maddr || maddr->family != AF_ISDN)
+	if (addr_len < sizeof(struct sockaddr_mISDN))
 		return -EINVAL;
 
-	if (addr_len < sizeof(struct sockaddr_mISDN))
+	if (!maddr || maddr->family != AF_ISDN)
 		return -EINVAL;
 
 	lock_sock(sk);
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 7fea18b..7cb4d68 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client,
 	const struct i2c_device_id *id)
 {
 	int devid;
+	const struct of_device_id *of_id;
 	struct pca9532_data *data = i2c_get_clientdata(client);
 	struct pca9532_platform_data *pca9532_pdata =
 			dev_get_platdata(&client->dev);
@@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client,
 			dev_err(&client->dev, "no platform data\n");
 			return -EINVAL;
 		}
-		devid = (int)(uintptr_t)of_match_device(
-			of_pca9532_leds_match, &client->dev)->data;
+		of_id = of_match_device(of_pca9532_leds_match,
+				&client->dev);
+		if (unlikely(!of_id))
+			return -EINVAL;
+		devid = (int)(uintptr_t) of_id->data;
 	} else {
 		devid = id->driver_data;
 	}
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 3dd3ed4..136f86a 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev,
 		trigger_data->net_dev = NULL;
 	}
 
-	strncpy(trigger_data->device_name, buf, size);
+	memcpy(trigger_data->device_name, buf, size);
+	trigger_data->device_name[size] = 0;
 	if (size > 0 && trigger_data->device_name[size - 1] == '\n')
 		trigger_data->device_name[size - 1] = 0;
 
@@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb,
 		container_of(nb, struct led_netdev_data, notifier);
 
 	if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
-	    && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
-	    && evt != NETDEV_CHANGENAME)
+	    && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
 		return NOTIFY_DONE;
 
-	if (strcmp(dev->name, trigger_data->device_name))
+	if (!(dev == trigger_data->net_dev ||
+	      (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
 		return NOTIFY_DONE;
 
 	cancel_delayed_work_sync(&trigger_data->work);
@@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
 		dev_hold(dev);
 		trigger_data->net_dev = dev;
 		break;
-	case NETDEV_CHANGENAME:
 	case NETDEV_UNREGISTER:
-		if (trigger_data->net_dev) {
-			dev_put(trigger_data->net_dev);
-			trigger_data->net_dev = NULL;
-		}
+		dev_put(trigger_data->net_dev);
+		trigger_data->net_dev = NULL;
 		break;
 	case NETDEV_UP:
 	case NETDEV_CHANGE:
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 674b35f..5a5c566 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -53,7 +53,7 @@ static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
 	return idx;
 }
 
-static void msg_submit(struct mbox_chan *chan)
+static int __msg_submit(struct mbox_chan *chan)
 {
 	unsigned count, idx;
 	unsigned long flags;
@@ -85,6 +85,24 @@ static void msg_submit(struct mbox_chan *chan)
 exit:
 	spin_unlock_irqrestore(&chan->lock, flags);
 
+	return err;
+}
+
+static void msg_submit(struct mbox_chan *chan)
+{
+	int err = 0;
+
+	/*
+	 * If the controller returns -EAGAIN, then it means, our spinlock
+	 * here is preventing the controller from receiving its interrupt,
+	 * that would help clear the controller channels that are currently
+	 * blocked waiting on the interrupt response.
+	 * Retry again.
+	 */
+	do {
+		err = __msg_submit(chan);
+	} while (err == -EAGAIN);
+
 	if (!err && (chan->txdone_method & TXDONE_BY_POLL))
 		/* kick start the timer immediately to avoid delays */
 		hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 7a28232..de85b3a 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -327,10 +327,11 @@ static int bch_allocator_thread(void *arg)
 		 * possibly issue discards to them, then we add the bucket to
 		 * the free list:
 		 */
-		while (!fifo_empty(&ca->free_inc)) {
+		while (1) {
 			long bucket;
 
-			fifo_pop(&ca->free_inc, bucket);
+			if (!fifo_pop(&ca->free_inc, bucket))
+				break;
 
 			if (ca->discard) {
 				mutex_unlock(&ca->set->bucket_lock);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 522c742..f880e5e 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -317,6 +317,18 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
 	}
 }
 
+bool is_discard_enabled(struct cache_set *s)
+{
+	struct cache *ca;
+	unsigned int i;
+
+	for_each_cache(ca, s, i)
+		if (ca->discard)
+			return true;
+
+	return false;
+}
+
 int bch_journal_replay(struct cache_set *s, struct list_head *list)
 {
 	int ret = 0, keys = 0, entries = 0;
@@ -330,9 +342,17 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
 	list_for_each_entry(i, list, list) {
 		BUG_ON(i->pin && atomic_read(i->pin) != 1);
 
-		cache_set_err_on(n != i->j.seq, s,
-"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
-				 n, i->j.seq - 1, start, end);
+		if (n != i->j.seq) {
+			if (n == start && is_discard_enabled(s))
+				pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
+					n, i->j.seq - 1, start, end);
+			else {
+				pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
+					n, i->j.seq - 1, start, end);
+				ret = -EIO;
+				goto err;
+			}
+		}
 
 		for (k = i->j.start;
 		     k < bset_bkey_last(&i->j);
@@ -540,11 +560,11 @@ static void journal_reclaim(struct cache_set *c)
 				  ca->sb.nr_this_dev);
 	}
 
-	bkey_init(k);
-	SET_KEY_PTRS(k, n);
-
-	if (n)
+	if (n) {
+		bkey_init(k);
+		SET_KEY_PTRS(k, n);
 		c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
+	}
 out:
 	if (!journal_full(&c->journal))
 		__closure_wake_up(&c->journal.wait);
@@ -671,6 +691,9 @@ static void journal_write_unlocked(struct closure *cl)
 		ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
 	}
 
+	/* If KEY_PTRS(k) == 0, this jset gets lost in air */
+	BUG_ON(i == 0);
+
 	atomic_dec_bug(&fifo_back(&c->journal.pin));
 	bch_journal_next(&c->journal);
 	journal_reclaim(c);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 03bb5ce..2409507 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1511,6 +1511,7 @@ static void cache_set_free(struct closure *cl)
 	bch_btree_cache_free(c);
 	bch_journal_free(c);
 
+	mutex_lock(&bch_register_lock);
 	for_each_cache(ca, c, i)
 		if (ca) {
 			ca->set = NULL;
@@ -1529,7 +1530,6 @@ static void cache_set_free(struct closure *cl)
 	mempool_exit(&c->search);
 	kfree(c->devices);
 
-	mutex_lock(&bch_register_lock);
 	list_del(&c->list);
 	mutex_unlock(&bch_register_lock);
 
@@ -1770,13 +1770,15 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
 	return NULL;
 }
 
-static void run_cache_set(struct cache_set *c)
+static int run_cache_set(struct cache_set *c)
 {
 	const char *err = "cannot allocate memory";
 	struct cached_dev *dc, *t;
 	struct cache *ca;
 	struct closure cl;
 	unsigned int i;
+	LIST_HEAD(journal);
+	struct journal_replay *l;
 
 	closure_init_stack(&cl);
 
@@ -1864,7 +1866,9 @@ static void run_cache_set(struct cache_set *c)
 		if (j->version < BCACHE_JSET_VERSION_UUID)
 			__uuid_write(c);
 
-		bch_journal_replay(c, &journal);
+		err = "bcache: replay journal failed";
+		if (bch_journal_replay(c, &journal))
+			goto err;
 	} else {
 		pr_notice("invalidating existing data");
 
@@ -1932,11 +1936,19 @@ static void run_cache_set(struct cache_set *c)
 	flash_devs_run(c);
 
 	set_bit(CACHE_SET_RUNNING, &c->flags);
-	return;
+	return 0;
 err:
+	while (!list_empty(&journal)) {
+		l = list_first_entry(&journal, struct journal_replay, list);
+		list_del(&l->list);
+		kfree(l);
+	}
+
 	closure_sync(&cl);
 	/* XXX: test this, it's broken */
 	bch_cache_set_error(c, "%s", err);
+
+	return -EIO;
 }
 
 static bool can_attach_cache(struct cache *ca, struct cache_set *c)
@@ -2000,8 +2012,11 @@ static const char *register_cache_set(struct cache *ca)
 	ca->set->cache[ca->sb.nr_this_dev] = ca;
 	c->cache_by_alloc[c->caches_loaded++] = ca;
 
-	if (c->caches_loaded == c->sb.nr_in_set)
-		run_cache_set(c);
+	if (c->caches_loaded == c->sb.nr_in_set) {
+		err = "failed to run cache set";
+		if (run_cache_set(c) < 0)
+			goto err;
+	}
 
 	return NULL;
 err:
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 6fc9383..151aa95 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1167,11 +1167,18 @@ static int __load_discards(struct dm_cache_metadata *cmd,
 		if (r)
 			return r;
 
-		for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
+		for (b = 0; ; b++) {
 			r = fn(context, cmd->discard_block_size, to_dblock(b),
 			       dm_bitset_cursor_get_value(&c));
 			if (r)
 				break;
+
+			if (b >= (from_dblock(cmd->discard_nr_blocks) - 1))
+				break;
+
+			r = dm_bitset_cursor_next(&c);
+			if (r)
+				break;
 		}
 
 		dm_bitset_cursor_end(&c);
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index fddffe2..f496213 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -121,7 +121,8 @@ static void delay_dtr(struct dm_target *ti)
 {
 	struct delay_c *dc = ti->private;
 
-	destroy_workqueue(dc->kdelayd_wq);
+	if (dc->kdelayd_wq)
+		destroy_workqueue(dc->kdelayd_wq);
 
 	if (dc->read.dev)
 		dm_put_device(ti, dc->read.dev);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 4e0ff7b..562b621 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2568,7 +2568,7 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
 		if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
 			return -EINVAL;
 	} else {
-		__u64 meta_size = ic->provided_data_sectors * ic->tag_size;
+		__u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
 		meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
 				>> (ic->log2_buffer_sectors + SECTOR_SHIFT);
 		meta_size <<= ic->log2_buffer_sectors;
@@ -3439,7 +3439,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
 	DEBUG_print("	journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
 	DEBUG_print("	journal_entries %u\n", ic->journal_entries);
 	DEBUG_print("	log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
-	DEBUG_print("	device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
+	DEBUG_print("	data_device_sectors 0x%llx\n", (unsigned long long)ic->data_device_sectors);
 	DEBUG_print("	initial_sectors 0x%x\n", ic->initial_sectors);
 	DEBUG_print("	metadata_run 0x%x\n", ic->metadata_run);
 	DEBUG_print("	log2_metadata_run %d\n", ic->log2_metadata_run);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 419362c..baa966e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -892,6 +892,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
 	if (attached_handler_name || m->hw_handler_name) {
 		INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
 		r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
+		kfree(attached_handler_name);
 		if (r) {
 			dm_put_device(ti, p->path.dev);
 			goto bad;
@@ -906,7 +907,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
 
 	return p;
  bad:
-	kfree(attached_handler_name);
 	free_pgpath(p);
 	return ERR_PTR(r);
 }
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index fa68336..d8334cd 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1169,6 +1169,9 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
 			goto out;
 		}
 
+		if (!nr_blkz)
+			break;
+
 		/* Process report */
 		for (i = 0; i < nr_blkz; i++) {
 			ret = dmz_init_zone(zmd, zone, &blkz[i]);
@@ -1204,6 +1207,8 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
 	/* Get zone information from disk */
 	ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
 				  &blkz, &nr_blkz, GFP_NOIO);
+	if (!nr_blkz)
+		ret = -EIO;
 	if (ret) {
 		dmz_dev_err(zmd->dev, "Get zone %u report failed",
 			    dmz_id(zmd, zone));
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8668793..b924f62 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -132,24 +132,6 @@ static inline int speed_max(struct mddev *mddev)
 		mddev->sync_speed_max : sysctl_speed_limit_max;
 }
 
-static void * flush_info_alloc(gfp_t gfp_flags, void *data)
-{
-        return kzalloc(sizeof(struct flush_info), gfp_flags);
-}
-static void flush_info_free(void *flush_info, void *data)
-{
-        kfree(flush_info);
-}
-
-static void * flush_bio_alloc(gfp_t gfp_flags, void *data)
-{
-	return kzalloc(sizeof(struct flush_bio), gfp_flags);
-}
-static void flush_bio_free(void *flush_bio, void *data)
-{
-	kfree(flush_bio);
-}
-
 static struct ctl_table_header *raid_table_header;
 
 static struct ctl_table raid_table[] = {
@@ -429,54 +411,31 @@ static int md_congested(void *data, int bits)
 /*
  * Generic flush handling for md
  */
-static void submit_flushes(struct work_struct *ws)
+
+static void md_end_flush(struct bio *bio)
 {
-	struct flush_info *fi = container_of(ws, struct flush_info, flush_work);
-	struct mddev *mddev = fi->mddev;
-	struct bio *bio = fi->bio;
-
-	bio->bi_opf &= ~REQ_PREFLUSH;
-	md_handle_request(mddev, bio);
-
-	mempool_free(fi, mddev->flush_pool);
-}
-
-static void md_end_flush(struct bio *fbio)
-{
-	struct flush_bio *fb = fbio->bi_private;
-	struct md_rdev *rdev = fb->rdev;
-	struct flush_info *fi = fb->fi;
-	struct bio *bio = fi->bio;
-	struct mddev *mddev = fi->mddev;
+	struct md_rdev *rdev = bio->bi_private;
+	struct mddev *mddev = rdev->mddev;
 
 	rdev_dec_pending(rdev, mddev);
 
-	if (atomic_dec_and_test(&fi->flush_pending)) {
-		if (bio->bi_iter.bi_size == 0) {
-			/* an empty barrier - all done */
-			bio_endio(bio);
-			mempool_free(fi, mddev->flush_pool);
-		} else {
-			INIT_WORK(&fi->flush_work, submit_flushes);
-			queue_work(md_wq, &fi->flush_work);
-		}
+	if (atomic_dec_and_test(&mddev->flush_pending)) {
+		/* The pre-request flush has finished */
+		queue_work(md_wq, &mddev->flush_work);
 	}
-
-	mempool_free(fb, mddev->flush_bio_pool);
-	bio_put(fbio);
+	bio_put(bio);
 }
 
-void md_flush_request(struct mddev *mddev, struct bio *bio)
+static void md_submit_flush_data(struct work_struct *ws);
+
+static void submit_flushes(struct work_struct *ws)
 {
+	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
 	struct md_rdev *rdev;
-	struct flush_info *fi;
 
-	fi = mempool_alloc(mddev->flush_pool, GFP_NOIO);
-
-	fi->bio = bio;
-	fi->mddev = mddev;
-	atomic_set(&fi->flush_pending, 1);
-
+	mddev->start_flush = ktime_get_boottime();
+	INIT_WORK(&mddev->flush_work, md_submit_flush_data);
+	atomic_set(&mddev->flush_pending, 1);
 	rcu_read_lock();
 	rdev_for_each_rcu(rdev, mddev)
 		if (rdev->raid_disk >= 0 &&
@@ -486,37 +445,74 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
 			 * we reclaim rcu_read_lock
 			 */
 			struct bio *bi;
-			struct flush_bio *fb;
 			atomic_inc(&rdev->nr_pending);
 			atomic_inc(&rdev->nr_pending);
 			rcu_read_unlock();
-
-			fb = mempool_alloc(mddev->flush_bio_pool, GFP_NOIO);
-			fb->fi = fi;
-			fb->rdev = rdev;
-
 			bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
-			bio_set_dev(bi, rdev->bdev);
 			bi->bi_end_io = md_end_flush;
-			bi->bi_private = fb;
+			bi->bi_private = rdev;
+			bio_set_dev(bi, rdev->bdev);
 			bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
-			atomic_inc(&fi->flush_pending);
+			atomic_inc(&mddev->flush_pending);
 			submit_bio(bi);
-
 			rcu_read_lock();
 			rdev_dec_pending(rdev, mddev);
 		}
 	rcu_read_unlock();
+	if (atomic_dec_and_test(&mddev->flush_pending))
+		queue_work(md_wq, &mddev->flush_work);
+}
 
-	if (atomic_dec_and_test(&fi->flush_pending)) {
-		if (bio->bi_iter.bi_size == 0) {
+static void md_submit_flush_data(struct work_struct *ws)
+{
+	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
+	struct bio *bio = mddev->flush_bio;
+
+	/*
+	 * must reset flush_bio before calling into md_handle_request to avoid a
+	 * deadlock, because other bios passed md_handle_request suspend check
+	 * could wait for this and below md_handle_request could wait for those
+	 * bios because of suspend check
+	 */
+	mddev->last_flush = mddev->start_flush;
+	mddev->flush_bio = NULL;
+	wake_up(&mddev->sb_wait);
+
+	if (bio->bi_iter.bi_size == 0) {
+		/* an empty barrier - all done */
+		bio_endio(bio);
+	} else {
+		bio->bi_opf &= ~REQ_PREFLUSH;
+		md_handle_request(mddev, bio);
+	}
+}
+
+void md_flush_request(struct mddev *mddev, struct bio *bio)
+{
+	ktime_t start = ktime_get_boottime();
+	spin_lock_irq(&mddev->lock);
+	wait_event_lock_irq(mddev->sb_wait,
+			    !mddev->flush_bio ||
+			    ktime_after(mddev->last_flush, start),
+			    mddev->lock);
+	if (!ktime_after(mddev->last_flush, start)) {
+		WARN_ON(mddev->flush_bio);
+		mddev->flush_bio = bio;
+		bio = NULL;
+	}
+	spin_unlock_irq(&mddev->lock);
+
+	if (!bio) {
+		INIT_WORK(&mddev->flush_work, submit_flushes);
+		queue_work(md_wq, &mddev->flush_work);
+	} else {
+		/* flush was performed for some other bio while we waited. */
+		if (bio->bi_iter.bi_size == 0)
 			/* an empty barrier - all done */
 			bio_endio(bio);
-			mempool_free(fi, mddev->flush_pool);
-		} else {
-			INIT_WORK(&fi->flush_work, submit_flushes);
-			queue_work(md_wq, &fi->flush_work);
+		else {
+			bio->bi_opf &= ~REQ_PREFLUSH;
+			mddev->pers->make_request(mddev, bio);
 		}
 	}
 }
@@ -566,6 +562,7 @@ void mddev_init(struct mddev *mddev)
 	atomic_set(&mddev->openers, 0);
 	atomic_set(&mddev->active_io, 0);
 	spin_lock_init(&mddev->lock);
+	atomic_set(&mddev->flush_pending, 0);
 	init_waitqueue_head(&mddev->sb_wait);
 	init_waitqueue_head(&mddev->recovery_wait);
 	mddev->reshape_position = MaxSector;
@@ -2863,8 +2860,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
 			err = 0;
 		}
 	} else if (cmd_match(buf, "re-add")) {
-		if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
-			rdev->saved_raid_disk >= 0) {
+		if (!rdev->mddev->pers)
+			err = -EINVAL;
+		else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
+				rdev->saved_raid_disk >= 0) {
 			/* clear_bit is performed _after_ all the devices
 			 * have their local Faulty bit cleared. If any writes
 			 * happen in the meantime in the local node, they
@@ -5519,22 +5518,6 @@ int md_run(struct mddev *mddev)
 		if (err)
 			return err;
 	}
-	if (mddev->flush_pool == NULL) {
-		mddev->flush_pool = mempool_create(NR_FLUSH_INFOS, flush_info_alloc,
-						flush_info_free, mddev);
-		if (!mddev->flush_pool) {
-			err = -ENOMEM;
-			goto abort;
-		}
-	}
-	if (mddev->flush_bio_pool == NULL) {
-		mddev->flush_bio_pool = mempool_create(NR_FLUSH_BIOS, flush_bio_alloc,
-						flush_bio_free, mddev);
-		if (!mddev->flush_bio_pool) {
-			err = -ENOMEM;
-			goto abort;
-		}
-	}
 
 	spin_lock(&pers_lock);
 	pers = find_pers(mddev->level, mddev->clevel);
@@ -5694,15 +5677,8 @@ int md_run(struct mddev *mddev)
 	return 0;
 
 abort:
-	if (mddev->flush_bio_pool) {
-		mempool_destroy(mddev->flush_bio_pool);
-		mddev->flush_bio_pool = NULL;
-	}
-	if (mddev->flush_pool){
-		mempool_destroy(mddev->flush_pool);
-		mddev->flush_pool = NULL;
-	}
-
+	bioset_exit(&mddev->bio_set);
+	bioset_exit(&mddev->sync_set);
 	return err;
 }
 EXPORT_SYMBOL_GPL(md_run);
@@ -5906,14 +5882,6 @@ static void __md_stop(struct mddev *mddev)
 		mddev->to_remove = &md_redundancy_group;
 	module_put(pers->owner);
 	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-	if (mddev->flush_bio_pool) {
-		mempool_destroy(mddev->flush_bio_pool);
-		mddev->flush_bio_pool = NULL;
-	}
-	if (mddev->flush_pool) {
-		mempool_destroy(mddev->flush_pool);
-		mddev->flush_pool = NULL;
-	}
 }
 
 void md_stop(struct mddev *mddev)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8afd6bf..325cb21 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -252,19 +252,6 @@ enum mddev_sb_flags {
 	MD_SB_NEED_REWRITE,	/* metadata write needs to be repeated */
 };
 
-#define NR_FLUSH_INFOS 8
-#define NR_FLUSH_BIOS 64
-struct flush_info {
-	struct bio			*bio;
-	struct mddev			*mddev;
-	struct work_struct		flush_work;
-	atomic_t			flush_pending;
-};
-struct flush_bio {
-	struct flush_info *fi;
-	struct md_rdev *rdev;
-};
-
 struct mddev {
 	void				*private;
 	struct md_personality		*pers;
@@ -470,8 +457,16 @@ struct mddev {
 						   * metadata and bitmap writes
 						   */
 
-	mempool_t			*flush_pool;
-	mempool_t			*flush_bio_pool;
+	/* Generic flush handling.
+	 * The last to finish preflush schedules a worker to submit
+	 * the rest of the request (without the REQ_PREFLUSH flag).
+	 */
+	struct bio *flush_bio;
+	atomic_t flush_pending;
+	ktime_t start_flush, last_flush; /* last_flush is when the last completed
+					  * flush was started.
+					  */
+	struct work_struct flush_work;
 	struct work_struct event_work;	/* used by dm to report failure event */
 	void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
 	struct md_cluster_info		*cluster_info;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ae38895..f237d6f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4185,7 +4185,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
 		/* now write out any block on a failed drive,
 		 * or P or Q if they were recomputed
 		 */
-		BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
+		dev = NULL;
 		if (s->failed == 2) {
 			dev = &sh->dev[s->failed_num[1]];
 			s->locked++;
@@ -4210,6 +4210,14 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
 			set_bit(R5_LOCKED, &dev->flags);
 			set_bit(R5_Wantwrite, &dev->flags);
 		}
+		if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags),
+			      "%s: disk%td not up to date\n",
+			      mdname(conf->mddev),
+			      dev - (struct r5dev *) &sh->dev)) {
+			clear_bit(R5_LOCKED, &dev->flags);
+			clear_bit(R5_Wantwrite, &dev->flags);
+			s->locked--;
+		}
 		clear_bit(STRIPE_DEGRADED, &sh->state);
 
 		set_bit(STRIPE_INSYNC, &sh->state);
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 6889c25..9226dca 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -668,6 +668,11 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
 		return -EBUSY;
 	}
 
+	if (q->waiting_in_dqbuf && *count) {
+		dprintk(1, "another dup()ped fd is waiting for a buffer\n");
+		return -EBUSY;
+	}
+
 	if (*count == 0 || q->num_buffers != 0 ||
 	    (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) {
 		/*
@@ -797,6 +802,10 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
 	}
 
 	if (!q->num_buffers) {
+		if (q->waiting_in_dqbuf && *count) {
+			dprintk(1, "another dup()ped fd is waiting for a buffer\n");
+			return -EBUSY;
+		}
 		memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
 		q->memory = memory;
 		q->waiting_for_buffers = !q->is_output;
@@ -1466,6 +1475,11 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
 	for (;;) {
 		int ret;
 
+		if (q->waiting_in_dqbuf) {
+			dprintk(1, "another dup()ped fd is waiting for a buffer\n");
+			return -EBUSY;
+		}
+
 		if (!q->streaming) {
 			dprintk(1, "streaming off, will not wait for buffers\n");
 			return -EINVAL;
@@ -1493,6 +1507,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
 			return -EAGAIN;
 		}
 
+		q->waiting_in_dqbuf = 1;
 		/*
 		 * We are streaming and blocking, wait for another buffer to
 		 * become ready or for streamoff. Driver's lock is released to
@@ -1513,6 +1528,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
 		 * the locks or return an error if one occurred.
 		 */
 		call_void_qop(q, wait_finish, q);
+		q->waiting_in_dqbuf = 0;
 		if (ret) {
 			dprintk(1, "sleep was interrupted\n");
 			return ret;
@@ -2361,6 +2377,12 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
 	if (!data)
 		return -EINVAL;
 
+	if (q->waiting_in_dqbuf) {
+		dprintk(3, "another dup()ped fd is %s\n",
+			read ? "reading" : "writing");
+		return -EBUSY;
+	}
+
 	/*
 	 * Initialize emulator on first call.
 	 */
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index dffd2d4..c25c927 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -309,6 +309,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
 	u16 u16tmp;
 	u32 tuner_frequency_khz, target_mclk;
 	s32 s32tmp;
+	static const struct reg_sequence reset_buf[] = {
+		{0x07, 0x80}, {0x07, 0x00}
+	};
 
 	dev_dbg(&client->dev,
 		"delivery_system=%d modulation=%d frequency=%u symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -321,11 +324,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
 	}
 
 	/* reset */
-	ret = regmap_write(dev->regmap, 0x07, 0x80);
-	if (ret)
-		goto err;
-
-	ret = regmap_write(dev->regmap, 0x07, 0x00);
+	ret = regmap_multi_reg_write(dev->regmap, reset_buf, 2);
 	if (ret)
 		goto err;
 
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index feacd8da..d55d8f1 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -275,18 +275,20 @@ static u32 si2165_get_fe_clk(struct si2165_state *state)
 
 static int si2165_wait_init_done(struct si2165_state *state)
 {
-	int ret = -EINVAL;
+	int ret;
 	u8 val = 0;
 	int i;
 
 	for (i = 0; i < 3; ++i) {
-		si2165_readreg8(state, REG_INIT_DONE, &val);
+		ret = si2165_readreg8(state, REG_INIT_DONE, &val);
+		if (ret < 0)
+			return ret;
 		if (val == 0x01)
 			return 0;
 		usleep_range(1000, 50000);
 	}
 	dev_err(&state->client->dev, "init_done was not set\n");
-	return ret;
+	return -EINVAL;
 }
 
 static int si2165_upload_firmware_block(struct si2165_state *state,
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 4715edc..e6a8b56 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1117,8 +1117,10 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
 		if (ov2659_formats[index].code == mf->code)
 			break;
 
-	if (index < 0)
-		return -EINVAL;
+	if (index < 0) {
+		index = 0;
+		mf->code = ov2659_formats[index].code;
+	}
 
 	mf->colorspace = V4L2_COLORSPACE_SRGB;
 	mf->field = V4L2_FIELD_NONE;
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 17a34b4..edded86 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -811,9 +811,18 @@ static int ov6650_video_probe(struct i2c_client *client)
 	u8		pidh, pidl, midh, midl;
 	int		ret;
 
+	priv->clk = v4l2_clk_get(&client->dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		ret = PTR_ERR(priv->clk);
+		dev_err(&client->dev, "v4l2_clk request err: %d\n", ret);
+		return ret;
+	}
+
 	ret = ov6650_s_power(&priv->subdev, 1);
 	if (ret < 0)
-		return ret;
+		goto eclkput;
+
+	msleep(20);
 
 	/*
 	 * check and show product ID and manufacturer ID
@@ -848,6 +857,11 @@ static int ov6650_video_probe(struct i2c_client *client)
 
 done:
 	ov6650_s_power(&priv->subdev, 0);
+	if (!ret)
+		return 0;
+eclkput:
+	v4l2_clk_put(priv->clk);
+
 	return ret;
 }
 
@@ -990,18 +1004,9 @@ static int ov6650_probe(struct i2c_client *client,
 	priv->code	  = MEDIA_BUS_FMT_YUYV8_2X8;
 	priv->colorspace  = V4L2_COLORSPACE_JPEG;
 
-	priv->clk = v4l2_clk_get(&client->dev, NULL);
-	if (IS_ERR(priv->clk)) {
-		ret = PTR_ERR(priv->clk);
-		goto eclkget;
-	}
-
 	ret = ov6650_video_probe(client);
-	if (ret) {
-		v4l2_clk_put(priv->clk);
-eclkget:
+	if (ret)
 		v4l2_ctrl_handler_free(&priv->hdl);
-	}
 
 	return ret;
 }
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 64d1402..1f71c14 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -159,10 +159,10 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
 #define REG_GFIX	0x69	/* Fix gain control */
 
 #define REG_DBLV	0x6b	/* PLL control an debugging */
-#define   DBLV_BYPASS	  0x00	  /* Bypass PLL */
-#define   DBLV_X4	  0x01	  /* clock x4 */
-#define   DBLV_X6	  0x10	  /* clock x6 */
-#define   DBLV_X8	  0x11	  /* clock x8 */
+#define   DBLV_BYPASS	  0x0a	  /* Bypass PLL */
+#define   DBLV_X4	  0x4a	  /* clock x4 */
+#define   DBLV_X6	  0x8a	  /* clock x6 */
+#define   DBLV_X8	  0xca	  /* clock x8 */
 
 #define REG_SCALING_XSC	0x70	/* Test pattern and horizontal scale factor */
 #define   TEST_PATTTERN_0 0x80
@@ -862,7 +862,7 @@ static int ov7675_set_framerate(struct v4l2_subdev *sd,
 	if (ret < 0)
 		return ret;
 
-	return ov7670_write(sd, REG_DBLV, DBLV_X4);
+	return 0;
 }
 
 static void ov7670_get_framerate_legacy(struct v4l2_subdev *sd,
@@ -1797,11 +1797,7 @@ static int ov7670_probe(struct i2c_client *client,
 		if (config->clock_speed)
 			info->clock_speed = config->clock_speed;
 
-		/*
-		 * It should be allowed for ov7670 too when it is migrated to
-		 * the new frame rate formula.
-		 */
-		if (config->pll_bypass && id->driver_data != MODEL_OV7670)
+		if (config->pll_bypass)
 			info->pll_bypass = true;
 
 		if (config->pclk_hb_disable)
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
index 5817d9c..6d8e4af 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/media/pci/saa7146/hexium_gemini.c
@@ -270,9 +270,8 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
 	/* enable i2c-port pins */
 	saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26));
 
-	hexium->i2c_adapter = (struct i2c_adapter) {
-		.name = "hexium gemini",
-	};
+	strscpy(hexium->i2c_adapter.name, "hexium gemini",
+		sizeof(hexium->i2c_adapter.name));
 	saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
 	if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
 		DEB_S("cannot register i2c-device. skipping.\n");
diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
index 0a05176..a794f9e 100644
--- a/drivers/media/pci/saa7146/hexium_orion.c
+++ b/drivers/media/pci/saa7146/hexium_orion.c
@@ -231,9 +231,8 @@ static int hexium_probe(struct saa7146_dev *dev)
 	saa7146_write(dev, DD1_STREAM_B, 0x00000000);
 	saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
 
-	hexium->i2c_adapter = (struct i2c_adapter) {
-		.name = "hexium orion",
-	};
+	strscpy(hexium->i2c_adapter.name, "hexium orion",
+		sizeof(hexium->i2c_adapter.name));
 	saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
 	if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
 		DEB_S("cannot register i2c-device. skipping.\n");
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index d20d3df..a3cfefd 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1999,6 +1999,9 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
 	/* Clear decode success flag */
 	coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS);
 
+	/* Clear error return value */
+	coda_write(dev, 0, CODA_RET_DEC_PIC_ERR_MB);
+
 	trace_coda_dec_pic_run(ctx, meta);
 
 	coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
diff --git a/drivers/media/platform/msm/Kconfig b/drivers/media/platform/msm/Kconfig
index a72a131..b11cdd4 100644
--- a/drivers/media/platform/msm/Kconfig
+++ b/drivers/media/platform/msm/Kconfig
@@ -10,6 +10,5 @@
       IFE and postprocessing drivers.
 
 source "drivers/media/platform/msm/cvp/Kconfig"
-source "drivers/media/platform/msm/sde/Kconfig"
 source "drivers/media/platform/msm/npu/Kconfig"
 source "drivers/media/platform/msm/synx/Kconfig"
diff --git a/drivers/media/platform/msm/Makefile b/drivers/media/platform/msm/Makefile
index df359b1..9192e4e 100644
--- a/drivers/media/platform/msm/Makefile
+++ b/drivers/media/platform/msm/Makefile
@@ -1,7 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-obj-y += sde/
-obj-$(CONFIG_SPECTRA_CAMERA) += camera/
 #
 # Makefile for the qti specific video device drivers
 # based on V4L2.
@@ -9,3 +7,4 @@
 obj-$(CONFIG_MSM_CVP_V4L2) += cvp/
 obj-$(CONFIG_MSM_NPU) += npu/
 obj-$(CONFIG_MSM_GLOBAL_SYNX) += synx/
+obj-$(CONFIG_SPECTRA_CAMERA) += camera/
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index 85908a3b..d2ec399 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -519,7 +519,7 @@ int cam_context_init(struct cam_context *ctx,
 	mutex_init(&ctx->sync_mutex);
 	spin_lock_init(&ctx->lock);
 
-	ctx->dev_name = dev_name;
+	strlcpy(ctx->dev_name, dev_name, CAM_CTX_DEV_NAME_MAX_LENGTH);
 	ctx->dev_id = dev_id;
 	ctx->ctx_id = ctx_id;
 	ctx->last_flush_req = 0;
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index 55fd376..1583dd0 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_CONTEXT_H_
@@ -15,6 +15,9 @@
 /* Forward declarations */
 struct cam_context;
 
+/* max device name string length*/
+#define CAM_CTX_DEV_NAME_MAX_LENGTH 20
+
 /* max request number */
 #define CAM_CTX_REQ_MAX              20
 #define CAM_CTX_CFG_MAX              20
@@ -177,7 +180,7 @@ struct cam_ctx_ops {
  *
  */
 struct cam_context {
-	const char                  *dev_name;
+	char                         dev_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
 	uint64_t                     dev_id;
 	uint32_t                     ctx_id;
 	struct list_head             list;
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 1ef4aa5..9c4c6f2 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -27,7 +27,7 @@ static void cam_node_print_ctx_state(
 		spin_lock_bh(&ctx->lock);
 		CAM_INFO(CAM_CORE,
 			"[%s][%d] : state=%d, refcount=%d, active_req_list=%d, pending_req_list=%d, wait_req_list=%d, free_req_list=%d",
-			ctx->dev_name ? ctx->dev_name : "null",
+			ctx->dev_name,
 			i, ctx->state,
 			atomic_read(&(ctx->refcount.refcount.refs)),
 			list_empty(&ctx->active_req_list),
@@ -148,6 +148,12 @@ static int __cam_node_handle_acquire_hw_v1(struct cam_node *node,
 		return -EINVAL;
 	}
 
+	if (strcmp(node->name, ctx->dev_name)) {
+		CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching",
+			node->name, ctx->dev_name);
+		return -EINVAL;
+	}
+
 	rc = cam_context_handle_acquire_hw(ctx, acquire);
 	if (rc) {
 		CAM_ERR(CAM_CORE, "Acquire device failed for node %s",
@@ -226,6 +232,12 @@ static int __cam_node_handle_start_dev(struct cam_node *node,
 		return -EINVAL;
 	}
 
+	if (strcmp(node->name, ctx->dev_name)) {
+		CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching",
+			node->name, ctx->dev_name);
+		return -EINVAL;
+	}
+
 	rc = cam_context_handle_start_dev(ctx, start);
 	if (rc)
 		CAM_ERR(CAM_CORE, "Start failure for node %s", node->name);
@@ -259,6 +271,12 @@ static int __cam_node_handle_stop_dev(struct cam_node *node,
 		return -EINVAL;
 	}
 
+	if (strcmp(node->name, ctx->dev_name)) {
+		CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching",
+			node->name, ctx->dev_name);
+		return -EINVAL;
+	}
+
 	rc = cam_context_handle_stop_dev(ctx, stop);
 	if (rc)
 		CAM_ERR(CAM_CORE, "Stop failure for node %s", node->name);
@@ -292,6 +310,12 @@ static int __cam_node_handle_config_dev(struct cam_node *node,
 		return -EINVAL;
 	}
 
+	if (strcmp(node->name, ctx->dev_name)) {
+		CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching",
+			node->name, ctx->dev_name);
+		return -EINVAL;
+	}
+
 	rc = cam_context_handle_config_dev(ctx, config);
 	if (rc)
 		CAM_ERR(CAM_CORE, "Config failure for node %s", node->name);
@@ -325,6 +349,12 @@ static int __cam_node_handle_flush_dev(struct cam_node *node,
 		return -EINVAL;
 	}
 
+	if (strcmp(node->name, ctx->dev_name)) {
+		CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching",
+			node->name, ctx->dev_name);
+		return -EINVAL;
+	}
+
 	rc = cam_context_handle_flush_dev(ctx, flush);
 	if (rc)
 		CAM_ERR(CAM_CORE, "Flush failure for node %s", node->name);
@@ -358,6 +388,12 @@ static int __cam_node_handle_release_dev(struct cam_node *node,
 		return -EINVAL;
 	}
 
+	if (strcmp(node->name, ctx->dev_name)) {
+		CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching",
+			node->name, ctx->dev_name);
+		return -EINVAL;
+	}
+
 	if (ctx->state > CAM_CTX_UNINIT && ctx->state < CAM_CTX_STATE_MAX) {
 		rc = cam_context_handle_release_dev(ctx, release);
 		if (rc)
@@ -413,6 +449,12 @@ static int __cam_node_handle_release_hw_v1(struct cam_node *node,
 		return -EINVAL;
 	}
 
+	if (strcmp(node->name, ctx->dev_name)) {
+		CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching",
+			node->name, ctx->dev_name);
+		return -EINVAL;
+	}
+
 	rc = cam_context_handle_release_hw(ctx, release);
 	if (rc)
 		CAM_ERR(CAM_CORE, "context release failed node %s", node->name);
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.h b/drivers/media/platform/msm/camera/cam_core/cam_node.h
index ad49c4b..062d021 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_NODE_H_
@@ -11,7 +11,6 @@
 #include "cam_hw_mgr_intf.h"
 #include "cam_req_mgr_interface.h"
 
-#define CAM_NODE_NAME_LENGTH_MAX        256
 
 #define CAM_NODE_STATE_UNINIT           0
 #define CAM_NODE_STATE_INIT             1
@@ -31,7 +30,7 @@
  *
  */
 struct cam_node {
-	char                         name[CAM_NODE_NAME_LENGTH_MAX];
+	char                         name[CAM_CTX_DEV_NAME_MAX_LENGTH];
 	uint32_t                     state;
 
 	/* context pool */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index d180b4e..1a7a5e2 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -236,6 +236,12 @@ static int cam_cpas_util_axi_cleanup(struct cam_cpas *cpas_core,
 {
 	int i = 0;
 
+	if (cpas_core->num_axi_ports > CAM_CPAS_MAX_AXI_PORTS) {
+		CAM_ERR(CAM_CPAS, "Invalid num_axi_ports: %d",
+			cpas_core->num_axi_ports);
+		return -EINVAL;
+	}
+
 	for (i = 0; i < cpas_core->num_axi_ports; i++) {
 		cam_cpas_util_unregister_bus_client(
 			&cpas_core->axi_port[i].bus_client);
@@ -252,6 +258,12 @@ static int cam_cpas_util_axi_setup(struct cam_cpas *cpas_core,
 	int i = 0, rc = 0;
 	struct device_node *axi_port_mnoc_node = NULL;
 
+	if (cpas_core->num_axi_ports > CAM_CPAS_MAX_AXI_PORTS) {
+		CAM_ERR(CAM_CPAS, "Invalid num_axi_ports: %d",
+			cpas_core->num_axi_ports);
+		return -EINVAL;
+	}
+
 	for (i = 0; i < cpas_core->num_axi_ports; i++) {
 		axi_port_mnoc_node = cpas_core->axi_port[i].axi_port_node;
 		rc = cam_cpas_util_register_bus_client(soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index 57eb9de..18151d1 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -334,6 +334,9 @@ static int cam_cpastop_reset_irq(struct cam_hw_info *cpas_hw)
 {
 	int i;
 
+	if (camnoc_info->irq_sbm->sbm_enable.enable == false)
+		return 0;
+
 	cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
 		&camnoc_info->irq_sbm->sbm_clear);
 	for (i = 0; i < camnoc_info->irq_err_size; i++) {
@@ -424,6 +427,7 @@ static void cam_cpastop_work(struct work_struct *work)
 					cpas_core, soc_info,
 					&irq_data.u.slave_err);
 				break;
+			case CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_ENCODE_ERROR:
 			case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
 			case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
 			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
@@ -431,6 +435,8 @@ static void cam_cpastop_work(struct work_struct *work)
 					cpas_core, soc_info, i,
 					&irq_data.u.enc_err);
 				break;
+			case CAM_CAMNOC_HW_IRQ_IPE1_BPS_UBWC_DECODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR:
 			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
 				cam_cpastop_handle_ubwc_dec_err(
 					cpas_core, soc_info, i,
@@ -508,6 +514,7 @@ static int cam_cpastop_poweron(struct cam_hw_info *cpas_hw)
 {
 	int i;
 
+	cam_cpastop_reset_irq(cpas_hw);
 	for (i = 0; i < camnoc_info->specific_size; i++) {
 		if (camnoc_info->specific[i].enable) {
 			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
index 8a8c61a..bdcf00d 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
@@ -17,12 +17,21 @@
  *                                 observed at any slave port is logged into
  *                                 the error logger register and an IRQ is
  *                                 triggered
+ * @CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_ENCODE_ERROR: Triggered if any error
+ *                                                 detected in the IFE UBWC-
+ *                                                 Stats encoder instance
  * @CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR  : Triggered if any error
  *                                               detected in the IFE0 UBWC
  *                                               encoder instance
  * @CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR  : Triggered if any error
  *                                               detected in the IFE1 or IFE3
  *                                               UBWC encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE1_BPS_UBWC_DECODE_ERROR: Triggered if any error
+ *                                                detected in the IPE1/BPS read
+ *                                                path decoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR   : Triggered if any error detected
+ *                                               in the IPE0 read path decoder
+ *                                               instance
  * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error
  *                                               detected in the IPE/BPS
  *                                               UBWC decoder instance
@@ -43,6 +52,8 @@
 enum cam_camnoc_hw_irq_type {
 	CAM_CAMNOC_HW_IRQ_SLAVE_ERROR =
 		CAM_CAMNOC_IRQ_SLAVE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IFE_UBWC_STATS_ENCODE_ERROR,
 	CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR =
 		CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
 	CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR =
@@ -51,6 +62,10 @@ enum cam_camnoc_hw_irq_type {
 		CAM_CAMNOC_IRQ_IFE0_UBWC_ENCODE_ERROR,
 	CAM_CAMNOC_HW_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR =
 		CAM_CAMNOC_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IPE1_BPS_UBWC_DECODE_ERROR =
+		CAM_CAMNOC_IRQ_IPE1_BPS_UBWC_DECODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR =
+		CAM_CAMNOC_IRQ_IPE0_UBWC_DECODE_ERROR,
 	CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR =
 		CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
 	CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR =
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v175_130.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v175_130.h
index fe6e274..2d3d96e 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v175_130.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v175_130.h
@@ -67,18 +67,19 @@ static struct cam_camnoc_irq_err
 		.err_enable = {
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.enable = true,
-			.offset = 0x3BA0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.offset = 0x3BA0, /* SPECIFIC_IFE0_MAIN_ENCERREN_LOW */
 			.value = 1,
 		},
 		.err_status = {
 			.access_type = CAM_REG_TYPE_READ,
 			.enable = true,
-			.offset = 0x3B90, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+			/* SPECIFIC_IFE0_MAIN_ENCERRSTATUS_LOW */
+			.offset = 0x3B90,
 		},
 		.err_clear = {
 			.access_type = CAM_REG_TYPE_WRITE,
 			.enable = true,
-			.offset = 0x3B98, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.offset = 0x3B98, /* SPECIFIC_IFE0_MAIN_ENCERRCLR_LOW */
 			.value = 1,
 		},
 	},
@@ -89,18 +90,19 @@ static struct cam_camnoc_irq_err
 		.err_enable = {
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.enable = true,
-			.offset = 0x55a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.offset = 0x55A0, /* SPECIFIC_IFE1_WR_ENCERREN_LOW */
 			.value = 1,
 		},
 		.err_status = {
 			.access_type = CAM_REG_TYPE_READ,
 			.enable = true,
-			.offset = 0x5590, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+			/* SPECIFIC_IFE1_WR_ENCERRSTATUS_LOW */
+			.offset = 0x5590,
 		},
 		.err_clear = {
 			.access_type = CAM_REG_TYPE_WRITE,
 			.enable = true,
-			.offset = 0x5598, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.offset = 0x5598, /* SPECIFIC_IFE1_WR_ENCERRCLR_LOW */
 			.value = 1,
 		},
 	},
@@ -133,7 +135,7 @@ static struct cam_camnoc_irq_err
 		.err_enable = {
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.enable = true,
-			.offset = 0x2Ba0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.offset = 0x2BA0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
 			.value = 1,
 		},
 		.err_status = {
@@ -387,7 +389,7 @@ static struct cam_camnoc_specific
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.offset = 0x3248, /* SPECIFIC_IFE1_SAFELUT_LOW */
-			.value = 0xF,
+			.value = 0xFFFFFFFF,
 		},
 		.ubwc_ctl = {
 			/*
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v480_100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v480_100.h
index d4782d7..510b970 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v480_100.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v480_100.h
@@ -11,28 +11,26 @@
 static struct cam_camnoc_irq_sbm cam_cpas_v480_100_irq_sbm = {
 	.sbm_enable = {
 		.access_type = CAM_REG_TYPE_READ_WRITE,
-		.enable = false,
-		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
-		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
-			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+		.enable = true,
+		.offset = 0x3840, /* SBM_FAULTINEN0_LOW */
+		.value = 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
 			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
 			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
 			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
-			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
 			(TEST_IRQ_ENABLE ?
-			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x40 : /* SBM_FAULTINEN0_LOW_PORT6_MASK */
 			0x0),
 	},
 	.sbm_status = {
 		.access_type = CAM_REG_TYPE_READ,
 		.enable = true,
-		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+		.offset = 0x3848, /* SBM_FAULTINSTATUS0_LOW */
 	},
 	.sbm_clear = {
 		.access_type = CAM_REG_TYPE_WRITE,
 		.enable = true,
-		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
-		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+		.offset = 0x3880, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x5 : 0x1,
 	}
 };
 
@@ -40,89 +38,89 @@ static struct cam_camnoc_irq_err
 	cam_cpas_v480_100_irq_err[] = {
 	{
 		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
-		.enable = true,
+		.enable = false,
 		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
 		.err_enable = {
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.enable = true,
-			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.offset = 0x7008, /* ERL_MAINCTL_LOW */
 			.value = 1,
 		},
 		.err_status = {
 			.access_type = CAM_REG_TYPE_READ,
 			.enable = true,
-			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+			.offset = 0x7010, /* ERL_ERRVLD_LOW */
 		},
 		.err_clear = {
 			.access_type = CAM_REG_TYPE_WRITE,
 			.enable = true,
-			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.offset = 0x7018, /* ERL_ERRCLR_LOW */
 			.value = 1,
 		},
 	},
 	{
-		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_ENCODE_ERROR,
 		.enable = true,
 		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
 		.err_enable = {
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.enable = true,
-			.offset = 0x5a0, /* IFE02_ENCERREN_LOW */
+			.offset = 0x1BA0, /* IFE_UBWC_STATS_ENCERREN_LOW */
 			.value = 1,
 		},
 		.err_status = {
 			.access_type = CAM_REG_TYPE_READ,
 			.enable = true,
-			.offset = 0x590, /* IFE02_ENCERRSTATUS_LOW */
+			.offset = 0x1B90, /* IFE_UBWC_STATS_ENCERRSTATUS_LOW */
 		},
 		.err_clear = {
 			.access_type = CAM_REG_TYPE_WRITE,
 			.enable = true,
-			.offset = 0x598, /* IFE02_ENCERRCLR_LOW */
+			.offset = 0x1B98, /* IFE_UBWC_STATS_ENCERRCLR_LOW */
 			.value = 1,
 		},
 	},
 	{
-		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE1_BPS_UBWC_DECODE_ERROR,
 		.enable = true,
 		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
 		.err_enable = {
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.enable = true,
-			.offset = 0x9a0, /* IFE13_ENCERREN_LOW */
+			.offset = 0x2520, /* IPE1_BPS_RD_DECERREN_LOW */
 			.value = 1,
 		},
 		.err_status = {
 			.access_type = CAM_REG_TYPE_READ,
 			.enable = true,
-			.offset = 0x990, /* IFE13_ENCERRSTATUS_LOW */
+			.offset = 0x2510, /* IPE1_BPS_RD_DECERRSTATUS_LOW */
 		},
 		.err_clear = {
 			.access_type = CAM_REG_TYPE_WRITE,
 			.enable = true,
-			.offset = 0x998, /* IFE13_ENCERRCLR_LOW */
+			.offset = 0x2518, /* IPE1_BPS_RD_DECERRCLR_LOW */
 			.value = 1,
 		},
 	},
 	{
-		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR,
 		.enable = true,
 		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
 		.err_enable = {
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.enable = true,
-			.offset = 0xd20, /* IBL_RD_DECERREN_LOW */
+			.offset = 0x1F20, /* IPE0_RD_DECERREN_LOW */
 			.value = 1,
 		},
 		.err_status = {
 			.access_type = CAM_REG_TYPE_READ,
 			.enable = true,
-			.offset = 0xd10, /* IBL_RD_DECERRSTATUS_LOW */
+			.offset = 0x1F10, /* IPE0_RD_DECERRSTATUS_LOW */
 		},
 		.err_clear = {
 			.access_type = CAM_REG_TYPE_WRITE,
 			.enable = true,
-			.offset = 0xd18, /* IBL_RD_DECERRCLR_LOW */
+			.offset = 0x1F18, /* IPE0_RD_DECERRCLR_LOW */
 			.value = 1,
 		},
 	},
@@ -133,36 +131,36 @@ static struct cam_camnoc_irq_err
 		.err_enable = {
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.enable = true,
-			.offset = 0x11a0, /* IBL_WR_ENCERREN_LOW */
+			.offset = 0x29A0, /* IPE_BPS_WR_ENCERREN_LOW */
 			.value = 1,
 		},
 		.err_status = {
 			.access_type = CAM_REG_TYPE_READ,
 			.enable = true,
-			.offset = 0x1190,
-			/* IBL_WR_ENCERRSTATUS_LOW */
+			.offset = 0x2990,
+			/* IPE_BPS_WR_ENCERRSTATUS_LOW */
 		},
 		.err_clear = {
 			.access_type = CAM_REG_TYPE_WRITE,
 			.enable = true,
-			.offset = 0x1198, /* IBL_WR_ENCERRCLR_LOW */
+			.offset = 0x2998, /* IPE_BPS_WR_ENCERRCLR_LOW */
 			.value = 1,
 		},
 	},
 	{
 		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
-		.enable = true,
+		.enable = false,
 		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
 		.err_enable = {
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.enable = true,
-			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.offset = 0x3888, /* SBM_FLAGOUTSET0_LOW */
 			.value = 0x1,
 		},
 		.err_status = {
 			.access_type = CAM_REG_TYPE_READ,
 			.enable = true,
-			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+			.offset = 0x3890, /* SBM_FLAGOUTSTATUS0_LOW */
 		},
 		.err_clear = {
 			.enable = false,
@@ -179,17 +177,17 @@ static struct cam_camnoc_irq_err
 	{
 		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
 		.enable = TEST_IRQ_ENABLE ? true : false,
-		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */
 		.err_enable = {
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.enable = true,
-			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.offset = 0x3888, /* SBM_FLAGOUTSET0_LOW */
 			.value = 0x5,
 		},
 		.err_status = {
 			.access_type = CAM_REG_TYPE_READ,
 			.enable = true,
-			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+			.offset = 0x3890, /* SBM_FLAGOUTSTATUS0_LOW */
 		},
 		.err_clear = {
 			.enable = false,
@@ -203,27 +201,25 @@ static struct cam_camnoc_specific
 		.port_type = CAM_CAMNOC_CDM,
 		.enable = true,
 		.priority_lut_low = {
-			.enable = true,
+			.enable = false,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x30, /* CDM_PRIORITYLUT_LOW */
-			.value = 0x22222222,
+			.value = 0x0,
 		},
 		.priority_lut_high = {
-			.enable = true,
+			.enable = false,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x34, /* CDM_PRIORITYLUT_HIGH */
-			.value = 0x22222222,
+			.value = 0x0,
 		},
 		.urgency = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 1,
+			.masked_value = 0,
 			.offset = 0x38, /* CDM_URGENCY_LOW */
-			.mask = 0x7, /* CDM_URGENCY_LOW_READ_MASK */
-			.shift = 0x0, /* CDM_URGENCY_LOW_READ_SHIFT */
-			.value = 0x2,
+			.value = 0x3,
 		},
 		.danger_lut = {
 			.enable = false,
@@ -247,25 +243,25 @@ static struct cam_camnoc_specific
 		.port_type = CAM_CAMNOC_FD,
 		.enable = true,
 		.priority_lut_low = {
-			.enable = true,
+			.enable = false,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x630, /* FD_PRIORITYLUT_LOW */
-			.value = 0x44444444,
+			.value = 0x0,
 		},
 		.priority_lut_high = {
-			.enable = true,
+			.enable = false,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x634, /* FD_PRIORITYLUT_HIGH */
-			.value = 0x44444444,
+			.value = 0x0,
 		},
 		.urgency = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x638, /* FD_URGENCY_LOW */
-			.value = 0x2,
+			.value = 0x33,
 		},
 		.danger_lut = {
 			.enable = false,
@@ -305,13 +301,9 @@ static struct cam_camnoc_specific
 		.urgency = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 1,
+			.masked_value = 0,
 			.offset = 0xA38, /* IFE_LINEAR_URGENCY_LOW */
-			/* IFE_LINEAR_URGENCY_LOW_WRITE_MASK */
-			.mask = 0x70,
-			/* IFE_LINEAR_URGENCY_LOW_WRITE_SHIFT */
-			.shift = 0x4,
-			.value = 3,
+			.value = 0x1030,
 		},
 		.danger_lut = {
 			.enable = true,
@@ -323,7 +315,7 @@ static struct cam_camnoc_specific
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.offset = 0xA48, /* IFE_LINEAR_SAFELUT_LOW */
-			.value = 0x1,
+			.value = 0x000F,
 		},
 		.ubwc_ctl = {
 			/*
@@ -338,41 +330,37 @@ static struct cam_camnoc_specific
 		.port_type = CAM_CAMNOC_IFE_RDI_RD,
 		.enable = true,
 		.priority_lut_low = {
-			.enable = true,
+			.enable = false,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x1030, /* IFE_RDI_RD_PRIORITYLUT_LOW */
-			.value = 0x66665433,
+			.value = 0x0,
 		},
 		.priority_lut_high = {
-			.enable = true,
+			.enable = false,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x1034, /* IFE_RDI_RD_PRIORITYLUT_HIGH */
-			.value = 0x66666666,
+			.value = 0x0,
 		},
 		.urgency = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 1,
+			.masked_value = 0,
 			.offset = 0x1038, /* IFE_RDI_RD_URGENCY_LOW */
-			/* IFE_RDI_RD_URGENCY_LOW_WRITE_MASK */
-			.mask = 0x70,
-			/* IFE_RDI_RD_URGENCY_LOW_WRITE_SHIFT */
-			.shift = 0x4,
-			.value = 3,
+			.value = 0x3,
 		},
 		.danger_lut = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.offset = 0x1040, /* IFE_RDI_RD_DANGERLUT_LOW */
-			.value = 0xFFFFFF00,
+			.value = 0x0,
 		},
 		.safe_lut = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.offset = 0x1048, /* IFE_RDI_RD_SAFELUT_LOW */
-			.value = 0x1,
+			.value = 0x0,
 		},
 		.ubwc_ctl = {
 			/*
@@ -403,13 +391,9 @@ static struct cam_camnoc_specific
 		.urgency = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 1,
+			.masked_value = 0,
 			.offset = 0x1438, /* IFE_RDI_WR_URGENCY_LOW */
-			/* IFE_RDI_WR_URGENCY_LOW_WRITE_MASK */
-			.mask = 0x70,
-			/* IFE_RDI_WR_URGENCY_LOW_WRITE_SHIFT */
-			.shift = 0x4,
-			.value = 3,
+			.value = 0x1030,
 		},
 		.danger_lut = {
 			.enable = true,
@@ -421,7 +405,7 @@ static struct cam_camnoc_specific
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.offset = 0x1448, /* IFE_RDI_WR_SAFELUT_LOW */
-			.value = 0x1,
+			.value = 0x000F,
 		},
 		.ubwc_ctl = {
 			/*
@@ -452,13 +436,9 @@ static struct cam_camnoc_specific
 		.urgency = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 1,
+			.masked_value = 0,
 			.offset = 0x1A38, /* IFE_UBWC_STATS_URGENCY_LOW */
-			/* IFE_UBWC_STATS_URGENCY_LOW_WRITE_MASK */
-			.mask = 0x70,
-			/* IFE_UBWC_STATS_URGENCY_LOW_WRITE_SHIFT */
-			.shift = 0x4,
-			.value = 3,
+			.value = 0x1030,
 		},
 		.danger_lut = {
 			.enable = true,
@@ -470,7 +450,7 @@ static struct cam_camnoc_specific
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.offset = 0x1A48, /* IFE_UBWC_STATS_SAFELUT_LOW */
-			.value = 0x1,
+			.value = 0x000F,
 		},
 		.ubwc_ctl = {
 			/*
@@ -489,14 +469,14 @@ static struct cam_camnoc_specific
 		.port_type = CAM_CAMNOC_IPE0_RD,
 		.enable = true,
 		.priority_lut_low = {
-			.enable = true,
+			.enable = false,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x1E30, /* IPE0_RD_PRIORITYLUT_LOW */
 			.value = 0x33333333,
 		},
 		.priority_lut_high = {
-			.enable = true,
+			.enable = false,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x1E34, /* IPE0_RD_PRIORITYLUT_HIGH */
@@ -505,13 +485,9 @@ static struct cam_camnoc_specific
 		.urgency = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 1,
+			.masked_value = 0,
 			.offset = 0x1E38, /* IPE0_RD_URGENCY_LOW */
-			/* IPE0_RD_URGENCY_LOW_READ_MASK */
-			.mask = 0x7,
-			/* IPE0_RD_URGENCY_LOW_READ_SHIFT */
-			.shift = 0x0,
-			.value = 3,
+			.value = 0x3,
 		},
 		.danger_lut = {
 			.enable = false,
@@ -544,14 +520,14 @@ static struct cam_camnoc_specific
 		.port_type = CAM_CAMNOC_IPE1_BPS_RD,
 		.enable = true,
 		.priority_lut_low = {
-			.enable = true,
+			.enable = false,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x2430, /* IPE1_BPS_RD_PRIORITYLUT_LOW */
 			.value = 0x33333333,
 		},
 		.priority_lut_high = {
-			.enable = true,
+			.enable = false,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x2434, /* IPE1_BPS_RD_PRIORITYLUT_HIGH */
@@ -560,13 +536,9 @@ static struct cam_camnoc_specific
 		.urgency = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 1,
+			.masked_value = 0,
 			.offset = 0x2438, /* IPE1_BPS_RD_URGENCY_LOW */
-			/* IPE1_BPS_RD_URGENCY_LOW_READ_MASK */
-			.mask = 0x7,
-			/* IPE1_BPS_RD_URGENCY_LOW_READ_SHIFT */
-			.shift = 0x0,
-			.value = 3,
+			.value = 0x3,
 		},
 		.danger_lut = {
 			.enable = false,
@@ -615,13 +587,9 @@ static struct cam_camnoc_specific
 		.urgency = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 1,
+			.masked_value = 0,
 			.offset = 0x2838, /* IPE_BPS_WR_URGENCY_LOW */
-			/* IPE_BPS_WR_URGENCY_LOW_WRITE_MASK */
-			.mask = 0x70,
-			/* IPE_BPS_WR_URGENCY_LOW_WRITE_SHIFT */
-			.shift = 0x4,
-			.value = 3,
+			.value = 0x30,
 		},
 		.danger_lut = {
 			.enable = false,
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index 932f4b2..1126053 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -57,6 +57,9 @@ enum cam_cpas_hw_version {
  *                              observed at any slave port is logged into
  *                              the error logger register and an IRQ is
  *                              triggered
+ * @CAM_CAMNOC_IRQ_IFE_UBWC_STATS_ENCODE_ERROR: Triggered if any error detected
+ *                                              in the IFE UBWC-Stats encoder
+ *                                              instance
  * @CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR  : Triggered if any error detected
  *                                            in the IFE0 UBWC encoder instance
  * @CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR  : Triggered if any error detected
@@ -67,6 +70,12 @@ enum cam_cpas_hw_version {
  * @CAM_CAMNOC_IRQ_IFE1_WR_UBWC_ENCODE_ERROR  : Triggered if any error detected
  *                                            in the IFE1 UBWC encoder
  *                                            instance
+ * @CAM_CAMNOC_IRQ_IPE1_BPS_UBWC_DECODE_ERROR: Triggered if any error detected
+ *                                             in the IPE1/BPS read path decoder
+ *                                             instance
+ * @CAM_CAMNOC_IRQ_IPE0_UBWC_DECODE_ERROR    : Triggered if any error detected
+ *                                             in the IPE0 read path decoder
+ *                                             instance
  * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error detected
  *                                            in the IPE/BPS UBWC decoder
  *                                            instance
@@ -78,10 +87,13 @@ enum cam_cpas_hw_version {
  */
 enum cam_camnoc_irq_type {
 	CAM_CAMNOC_IRQ_SLAVE_ERROR,
+	CAM_CAMNOC_IRQ_IFE_UBWC_STATS_ENCODE_ERROR,
 	CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
 	CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
 	CAM_CAMNOC_IRQ_IFE0_UBWC_ENCODE_ERROR,
 	CAM_CAMNOC_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_IPE1_BPS_UBWC_DECODE_ERROR,
+	CAM_CAMNOC_IRQ_IPE0_UBWC_DECODE_ERROR,
 	CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
 	CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
 	CAM_CAMNOC_IRQ_AHB_TIMEOUT,
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
index 427fcf7..8805859 100644
--- a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -10,7 +10,7 @@
 #include "cam_fd_context.h"
 #include "cam_trace.h"
 
-static const char fd_dev_name[] = "fd";
+static const char fd_dev_name[] = "cam-fd";
 
 /* Functions in Available state */
 static int __cam_fd_ctx_acquire_dev_in_available(struct cam_context *ctx,
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index 2fadc29..ec23a6e 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -20,7 +20,7 @@
 #include "cam_debug_util.h"
 #include "cam_packet_util.h"
 
-static const char icp_dev_name[] = "icp";
+static const char icp_dev_name[] = "cam-icp";
 
 static int cam_icp_context_dump_active_request(void *data, unsigned long iova,
 	uint32_t buf_info)
@@ -37,6 +37,14 @@ static int cam_icp_context_dump_active_request(void *data, unsigned long iova,
 		return -EINVAL;
 	}
 
+	mutex_lock(&ctx->ctx_mutex);
+
+	if (ctx->state < CAM_CTX_ACQUIRED || ctx->state > CAM_CTX_ACTIVATED) {
+		CAM_ERR(CAM_ICP, "Invalid state icp ctx %d state %d",
+			ctx->ctx_id, ctx->state);
+		goto end;
+	}
+
 	CAM_INFO(CAM_ICP, "iommu fault for icp ctx %d state %d",
 		ctx->ctx_id, ctx->state);
 
@@ -55,6 +63,8 @@ static int cam_icp_context_dump_active_request(void *data, unsigned long iova,
 				req->request_id, rc);
 	}
 
+end:
+	mutex_unlock(&ctx->ctx_mutex);
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index ae16cc0b..5a2b786 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -241,6 +241,7 @@ int cam_a5_init_hw(void *device_priv,
 	struct cam_hw_info *a5_dev = device_priv;
 	struct cam_hw_soc_info *soc_info = NULL;
 	struct cam_a5_device_core_info *core_info = NULL;
+	struct a5_soc_info *a5_soc_info;
 	struct cam_icp_cpas_vote cpas_vote;
 	int rc = 0;
 
@@ -258,6 +259,8 @@ int cam_a5_init_hw(void *device_priv,
 		return -EINVAL;
 	}
 
+	a5_soc_info = soc_info->soc_private;
+
 	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
 	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
 	cpas_vote.axi_vote.num_paths = 1;
@@ -291,6 +294,12 @@ int cam_a5_init_hw(void *device_priv,
 			CAM_ERR(CAM_ICP, "cpas stop is failed");
 		else
 			core_info->cpas_start = false;
+	} else {
+		CAM_DBG(CAM_ICP, "a5_qos %d", a5_soc_info->a5_qos_val);
+		if (a5_soc_info->a5_qos_val)
+			cam_io_w_mb(a5_soc_info->a5_qos_val,
+				soc_info->reg_map[A5_SIERRA_BASE].mem_base +
+				ICP_SIERRA_A5_CSR_ACCESS);
 	}
 
 error:
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
index a5c0fff..6c46b3a 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
@@ -20,6 +20,8 @@
 #define A5_WDT_0                0x2
 #define A5_WDT_1                0x4
 
+#define ICP_SIERRA_A5_CSR_ACCESS 0x3C
+
 #define ELF_GUARD_PAGE          (2 * 1024 * 1024)
 
 struct cam_a5_device_hw_info {
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
index bcdc126..62a88ac 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
@@ -40,6 +40,13 @@ static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
 		goto end;
 	}
 
+	rc = of_property_read_u32(of_node, "qos-val",
+		&a5_soc_info->a5_qos_val);
+	if (rc < 0) {
+		CAM_WARN(CAM_ICP, "QoS need not be set");
+		a5_soc_info->a5_qos_val = 0;
+	}
+
 	ubwc_cfg_ext = &a5_soc_info->uconfig.ubwc_cfg_ext;
 	num_ubwc_cfg = of_property_count_u32_elems(of_node,
 		"ubwc-ipe-fetch-cfg");
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
index eadf9d7..c44bcde 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
@@ -20,6 +20,7 @@ struct a5_ubwc_cfg_ext {
 struct a5_soc_info {
 	char *fw_name;
 	bool ubwc_config_ext;
+	uint32_t a5_qos_val;
 	union {
 		uint32_t ubwc_cfg[ICP_UBWC_MAX];
 		struct a5_ubwc_cfg_ext ubwc_cfg_ext;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
index 73075a4..7deea1c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -306,6 +306,7 @@ int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type,
 	struct cam_bps_device_core_info *core_info = NULL;
 	struct cam_bps_device_hw_info *hw_info = NULL;
 	int rc = 0;
+	unsigned long flags;
 
 	if (!device_priv) {
 		CAM_ERR(CAM_ICP, "Invalid arguments");
@@ -395,12 +396,16 @@ int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type,
 		}
 		break;
 	case CAM_ICP_BPS_CMD_DISABLE_CLK:
+		spin_lock_irqsave(&bps_dev->hw_lock, flags);
 		if (core_info->clk_enable == true)
 			cam_bps_toggle_clk(soc_info, false);
 		core_info->clk_enable = false;
+		spin_unlock_irqrestore(&bps_dev->hw_lock, flags);
 		break;
 	case CAM_ICP_BPS_CMD_RESET:
+		spin_lock_irqsave(&bps_dev->hw_lock, flags);
 		rc = cam_bps_cmd_reset(soc_info, core_info);
+		spin_unlock_irqrestore(&bps_dev->hw_lock, flags);
 		break;
 	default:
 		CAM_ERR(CAM_ICP, "Invalid Cmd Type:%u", cmd_type);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index e32366b..c9bc592 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -4965,10 +4965,31 @@ static int cam_icp_get_acquire_info(struct cam_icp_hw_mgr *hw_mgr,
 	return 0;
 }
 
+static uint32_t cam_icp_unify_dev_type(
+	uint32_t dev_type)
+{
+	switch (dev_type) {
+	case CAM_ICP_RES_TYPE_BPS:
+		return CAM_ICP_RES_TYPE_BPS;
+	case CAM_ICP_RES_TYPE_BPS_RT:
+		return CAM_ICP_RES_TYPE_BPS;
+	case CAM_ICP_RES_TYPE_BPS_SEMI_RT:
+		return CAM_ICP_RES_TYPE_BPS;
+	case CAM_ICP_RES_TYPE_IPE:
+		return CAM_ICP_RES_TYPE_IPE;
+	case CAM_ICP_RES_TYPE_IPE_RT:
+		return CAM_ICP_RES_TYPE_IPE;
+	case CAM_ICP_RES_TYPE_IPE_SEMI_RT:
+		return CAM_ICP_RES_TYPE_IPE;
+	default:
+		return CAM_ICP_RES_TYPE_MAX;
+	}
+}
+
 static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 {
 	int rc = 0, bitmap_size = 0;
-	uint32_t ctx_id = 0;
+	uint32_t ctx_id = 0, dev_type;
 	uint64_t io_buf_addr;
 	size_t io_buf_size;
 	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
@@ -5061,6 +5082,10 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 		goto create_handle_failed;
 	}
 
+	CAM_DBG(CAM_ICP,
+		"created stream handle for dev_type %u",
+		icp_dev_acquire_info->dev_type);
+
 	cmd_mem_region.num_regions = 1;
 	cmd_mem_region.map_info_array[0].mem_handle =
 		icp_dev_acquire_info->io_config_cmd_handle;
@@ -5120,10 +5145,12 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 	/* Start context timer*/
 	cam_icp_ctx_timer_start(ctx_data);
 	hw_mgr->ctxt_cnt++;
+	dev_type = cam_icp_unify_dev_type(icp_dev_acquire_info->dev_type);
+	icp_dev_acquire_info->dev_type = dev_type;
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
-	CAM_DBG(CAM_ICP, "Acquire Done for ctx_id %u dev name %s dev type %d",
-		ctx_data->ctx_id, cam_icp_dev_type_to_name(
-		icp_dev_acquire_info->dev_type),
+
+	CAM_DBG(CAM_ICP, "Acquire Done for ctx_id %u dev type %d",
+		ctx_data->ctx_id,
 		icp_dev_acquire_info->dev_type);
 
 	return 0;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
index 82d0ebe..614c748 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -305,6 +305,7 @@ int cam_ipe_process_cmd(void *device_priv, uint32_t cmd_type,
 	struct cam_ipe_device_core_info *core_info = NULL;
 	struct cam_ipe_device_hw_info *hw_info = NULL;
 	int rc = 0;
+	unsigned long flags;
 
 	if (!device_priv) {
 		CAM_ERR(CAM_ICP, "Invalid arguments");
@@ -389,12 +390,16 @@ int cam_ipe_process_cmd(void *device_priv, uint32_t cmd_type,
 		}
 		break;
 	case CAM_ICP_IPE_CMD_DISABLE_CLK:
+		spin_lock_irqsave(&ipe_dev->hw_lock, flags);
 		if (core_info->clk_enable == true)
 			cam_ipe_toggle_clk(soc_info, false);
 		core_info->clk_enable = false;
+		spin_unlock_irqrestore(&ipe_dev->hw_lock, flags);
 		break;
 	case CAM_ICP_IPE_CMD_RESET:
+		spin_lock_irqsave(&ipe_dev->hw_lock, flags);
 		rc = cam_ipe_cmd_reset(soc_info, core_info);
+		spin_unlock_irqrestore(&ipe_dev->hw_lock, flags);
 		break;
 	default:
 		CAM_ERR(CAM_ICP, "Invalid Cmd Type:%u", cmd_type);
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 8120fada..84f5e49 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -20,7 +20,7 @@
 #include "cam_isp_context.h"
 #include "cam_common_util.h"
 
-static const char isp_dev_name[] = "isp";
+static const char isp_dev_name[] = "cam-isp";
 
 static struct cam_isp_ctx_debug isp_ctx_debug;
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 2895e6f..473a274 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -1568,6 +1568,7 @@ static int cam_ife_hw_mgr_acquire_res_ife_csid_pxl(
 		csid_acquire.out_port = in_port->data;
 		csid_acquire.node_res = NULL;
 		csid_acquire.crop_enable = crop_enable;
+		csid_acquire.drop_enable = false;
 
 		hw_intf = cid_res->hw_res[i]->hw_intf;
 
@@ -1697,6 +1698,12 @@ static int cam_ife_hw_mgr_acquire_res_ife_csid_rdi(
 		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
 		csid_acquire.node_res = NULL;
 
+		/*
+		 * Enable RDI pixel drop by default. CSID will enable only for
+		 * ver 480 HW to allow userspace to control pixel drop pattern.
+		 */
+		csid_acquire.drop_enable = true;
+
 		/* Enable RDI crop for single ife use case only */
 		if (in_port->usage_type)
 			csid_acquire.crop_enable = false;
@@ -2140,6 +2147,8 @@ static int cam_ife_mgr_acquire_get_unified_structure_v0(
 	port_info->dsp_mode        =  in->dsp_mode;
 	port_info->hbi_cnt         =  in->hbi_cnt;
 	port_info->cust_node       =  0;
+	port_info->horizontal_bin  =  0;
+	port_info->qcfa_bin        =  0;
 	port_info->num_out_res     =  in->num_out_res;
 
 	port_info->data = kcalloc(in->num_out_res,
@@ -2239,6 +2248,8 @@ static int cam_ife_mgr_acquire_get_unified_structure_v2(
 	port_info->dsp_mode       =  in->dsp_mode;
 	port_info->hbi_cnt        =  in->hbi_cnt;
 	port_info->cust_node      =  in->cust_node;
+	port_info->horizontal_bin =  in->horizontal_bin;
+	port_info->qcfa_bin       =  in->qcfa_bin;
 	port_info->num_out_res    =  in->num_out_res;
 
 	port_info->data = kcalloc(in->num_out_res,
@@ -3323,19 +3334,17 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
 		cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
 	}
 
+	cam_tasklet_stop(ctx->common.tasklet_info);
+
 	cam_ife_mgr_pause_hw(ctx);
 
-	if (stop_isp->stop_only) {
-		cam_tasklet_stop(ctx->common.tasklet_info);
+	if (stop_isp->stop_only)
 		goto end;
-	}
 
 	if (cam_cdm_stream_off(ctx->cdm_handle))
 		CAM_ERR(CAM_ISP, "CDM stream off failed %d", ctx->cdm_handle);
 
 	cam_ife_hw_mgr_deinit_hw(ctx);
-	cam_tasklet_stop(ctx->common.tasklet_info);
-
 	CAM_DBG(CAM_ISP,
 		"Stop success for ctx id:%d rc :%d", ctx->ctx_index, rc);
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid480.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid480.h
index c18a21f..cfa8e152 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid480.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid480.h
@@ -50,6 +50,8 @@ static struct cam_ife_csid_pxl_reg_offset  cam_ife_csid_480_ipp_reg_offset = {
 	/* configurations */
 	.pix_store_en_shift_val              = 7,
 	.early_eof_en_shift_val              = 29,
+	.horizontal_bin_en_shift_val         = 2,
+	.quad_cfa_bin_en_shift_val           = 30,
 	.ccif_violation_en                   = 1,
 	.overflow_ctrl_en                    = 1,
 };
@@ -289,7 +291,7 @@ static struct cam_ife_csid_csi2_rx_reg_offset
 	.csi2_capture_short_pkt_vc_shift              = 15,
 	.csi2_capture_cphy_pkt_dt_shift               = 20,
 	.csi2_capture_cphy_pkt_vc_shift               = 26,
-	.csi2_rx_phy_num_mask                         = 0x3,
+	.csi2_rx_phy_num_mask                         = 0x7,
 };
 
 static struct cam_ife_csid_csi2_tpg_reg_offset
@@ -354,6 +356,8 @@ static struct cam_ife_csid_common_reg_offset
 	.plain_fmt_shit_val                           = 10,
 	.crop_v_en_shift_val                          = 6,
 	.crop_h_en_shift_val                          = 5,
+	.drop_v_en_shift_val                          = 4,
+	.drop_h_en_shift_val                          = 3,
 	.crop_shift                                   = 16,
 	.ipp_irq_mask_all                             = 0x7FFF,
 	.rdi_irq_mask_all                             = 0x7FFF,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index cce0529..252c1a5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -1015,6 +1015,9 @@ static int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
 	path_data->start_line = reserve->in_port->line_start;
 	path_data->end_line = reserve->in_port->line_stop;
 	path_data->crop_enable = reserve->crop_enable;
+	path_data->drop_enable = reserve->drop_enable;
+	path_data->horizontal_bin = reserve->in_port->horizontal_bin;
+	path_data->qcfa_bin = reserve->in_port->qcfa_bin;
 
 	CAM_DBG(CAM_ISP,
 		"Res id: %d height:%d line_start %d line_stop %d crop_en %d",
@@ -1589,10 +1592,6 @@ static int cam_ife_csid_init_config_pxl_path(
 		csid_reg->cmn_reg->crop_v_en_shift_val) |
 		(1 << 1) | 1;
 
-	val |= (1 << pxl_reg->pix_store_en_shift_val);
-	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-		pxl_reg->csid_pxl_cfg0_addr);
-
 	rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Failed to get HW version rc:%d", rc);
@@ -1600,6 +1599,22 @@ static int cam_ife_csid_init_config_pxl_path(
 	}
 	CAM_DBG(CAM_ISP, "HW version: %x", camera_hw_version);
 
+	if (camera_hw_version == CAM_CPAS_TITAN_480_V100)
+		val |= (path_data->drop_enable <<
+			csid_reg->cmn_reg->drop_h_en_shift_val) |
+			(path_data->drop_enable <<
+			csid_reg->cmn_reg->drop_v_en_shift_val);
+
+	if (path_data->horizontal_bin || path_data->qcfa_bin) {
+		val |= (1 << pxl_reg->horizontal_bin_en_shift_val);
+		if (path_data->qcfa_bin)
+			val |= (1 << pxl_reg->quad_cfa_bin_en_shift_val);
+	}
+
+	val |= (1 << pxl_reg->pix_store_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_cfg0_addr);
+
 	if (path_data->is_valid_vc1_dt1 &&
 		camera_hw_version == CAM_CPAS_TITAN_480_V100) {
 		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
@@ -2012,9 +2027,6 @@ static int cam_ife_csid_init_config_rdi_path(
 		csid_reg->cmn_reg->crop_v_en_shift_val) |
 		(1 << 2) | 3;
 
-	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-			csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
-
 	rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Failed to get HW version rc:%d", rc);
@@ -2022,6 +2034,15 @@ static int cam_ife_csid_init_config_rdi_path(
 	}
 	CAM_DBG(CAM_ISP, "HW version: %x", camera_hw_version);
 
+	if (camera_hw_version == CAM_CPAS_TITAN_480_V100)
+		val |= (path_data->drop_enable <<
+			csid_reg->cmn_reg->drop_h_en_shift_val) |
+			(path_data->drop_enable <<
+			csid_reg->cmn_reg->drop_v_en_shift_val);
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
 	if (path_data->is_valid_vc1_dt1 &&
 		camera_hw_version == CAM_CPAS_TITAN_480_V100) {
 		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
@@ -2077,8 +2098,16 @@ static int cam_ife_csid_init_config_rdi_path(
 	/* set pixel drop pattern to 0 and period to 1 */
 	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
 		csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_pattern_addr);
-	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+
+	/* Write max value to pixel drop period due to a bug in ver 480 HW */
+	if (camera_hw_version == CAM_CPAS_TITAN_480_V100 &&
+		path_data->drop_enable)
+		cam_io_w_mb(0x1F, soc_info->reg_map[0].mem_base +
 		csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_period_addr);
+	else
+		cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_period_addr);
+
 	/* set line drop pattern to 0 and period to 1 */
 	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
 		csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_pattern_addr);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
index a3c54fe..3e99885 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -133,6 +133,7 @@ struct cam_ife_csid_pxl_reg_offset {
 	/* configuration */
 	uint32_t pix_store_en_shift_val;
 	uint32_t early_eof_en_shift_val;
+	uint32_t horizontal_bin_en_shift_val;
 	uint32_t quad_cfa_bin_en_shift_val;
 	uint32_t ccif_violation_en;
 	uint32_t overflow_ctrl_en;
@@ -303,6 +304,8 @@ struct cam_ife_csid_common_reg_offset {
 	uint32_t plain_fmt_shit_val;
 	uint32_t crop_v_en_shift_val;
 	uint32_t crop_h_en_shift_val;
+	uint32_t drop_v_en_shift_val;
+	uint32_t drop_h_en_shift_val;
 	uint32_t crop_shift;
 	uint32_t ipp_irq_mask_all;
 	uint32_t rdi_irq_mask_all;
@@ -410,6 +413,7 @@ struct cam_ife_csid_cid_data {
  * @out_format:     output format
  * @crop_enable:    crop is enable or disabled, if enabled
  *                  then remaining parameters are valid.
+ * @drop_enable:    flag to indicate pixel drop enable or disable
  * @start_pixel:    start pixel
  * @end_pixel:      end_pixel
  * @width:          width
@@ -435,6 +439,7 @@ struct cam_ife_csid_path_cfg {
 	uint32_t                        in_format;
 	uint32_t                        out_format;
 	bool                            crop_enable;
+	bool                            drop_enable;
 	uint32_t                        start_pixel;
 	uint32_t                        end_pixel;
 	uint32_t                        width;
@@ -444,6 +449,8 @@ struct cam_ife_csid_path_cfg {
 	enum cam_isp_hw_sync_mode       sync_mode;
 	uint32_t                        master_idx;
 	uint64_t                        clk_rate;
+	uint32_t                        horizontal_bin;
+	uint32_t                        qcfa_bin;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c
index 0a80f1a..07d555b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c
@@ -5,27 +5,32 @@
 
 #include <linux/module.h>
 #include "cam_ife_csid_lite17x.h"
+#include "cam_ife_csid_lite480.h"
 #include "cam_ife_csid_core.h"
 #include "cam_ife_csid_dev.h"
 
 #define CAM_CSID_LITE_DRV_NAME                    "csid_lite"
 
-static struct cam_ife_csid_hw_info cam_ife_csid_lite_hw_info = {
+static struct cam_ife_csid_hw_info cam_ife_csid_lite_17x_hw_info = {
 	.csid_reg = &cam_ife_csid_lite_17x_reg_offset,
 };
 
+static struct cam_ife_csid_hw_info cam_ife_csid_lite_480_hw_info = {
+	.csid_reg = &cam_ife_csid_lite_480_reg_offset,
+};
+
 static const struct of_device_id cam_ife_csid_lite_dt_match[] = {
 	{
 		.compatible = "qcom,csid-lite170",
-		.data = &cam_ife_csid_lite_hw_info,
+		.data = &cam_ife_csid_lite_17x_hw_info,
 	},
 	{
 		.compatible = "qcom,csid-lite175",
-		.data = &cam_ife_csid_lite_hw_info,
+		.data = &cam_ife_csid_lite_17x_hw_info,
 	},
 	{
 		.compatible = "qcom,csid-lite480",
-		.data = &cam_ife_csid_lite_hw_info,
+		.data = &cam_ife_csid_lite_480_hw_info,
 	},
 	{}
 };
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite480.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite480.h
new file mode 100644
index 0000000..7d9a535
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite480.h
@@ -0,0 +1,339 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_IFE_CSID_LITE_480_H_
+#define _CAM_IFE_CSID_LITE_480_H_
+
+#include "cam_ife_csid_core.h"
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_480_rdi_0_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x30,
+	.csid_rdi_irq_mask_addr                   = 0x34,
+	.csid_rdi_irq_clear_addr                  = 0x38,
+	.csid_rdi_irq_set_addr                    = 0x3c,
+	.csid_rdi_cfg0_addr                       = 0x200,
+	.csid_rdi_cfg1_addr                       = 0x204,
+	.csid_rdi_ctrl_addr                       = 0x208,
+	.csid_rdi_frm_drop_pattern_addr           = 0x20c,
+	.csid_rdi_frm_drop_period_addr            = 0x210,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x214,
+	.csid_rdi_irq_subsample_period_addr       = 0x218,
+	.csid_rdi_rpp_hcrop_addr                  = 0x21c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x220,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x224,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x228,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x22c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x230,
+	.csid_rdi_rst_strobes_addr                = 0x240,
+	.csid_rdi_status_addr                     = 0x250,
+	.csid_rdi_misr_val0_addr                  = 0x254,
+	.csid_rdi_misr_val1_addr                  = 0x258,
+	.csid_rdi_misr_val2_addr                  = 0x25c,
+	.csid_rdi_misr_val3_addr                  = 0x260,
+	.csid_rdi_format_measure_cfg0_addr        = 0x270,
+	.csid_rdi_format_measure_cfg1_addr        = 0x274,
+	.csid_rdi_format_measure0_addr            = 0x278,
+	.csid_rdi_format_measure1_addr            = 0x27c,
+	.csid_rdi_format_measure2_addr            = 0x280,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x290,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x294,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x298,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x29c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x2a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x2a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x2a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x2ac,
+	.csid_rdi_err_recovery_cfg0_addr          = 0x2b0,
+	.csid_rdi_err_recovery_cfg1_addr          = 0x2b4,
+	.csid_rdi_err_recovery_cfg2_addr          = 0x2b8,
+	.csid_rdi_multi_vcdt_cfg0_addr            = 0x2bc,
+	.csid_rdi_byte_cntr_ping_addr             = 0x2e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x2e4,
+	/* configurations */
+	.ccif_violation_en                        = 1,
+	.overflow_ctrl_en                         = 1,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_480_rdi_1_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x40,
+	.csid_rdi_irq_mask_addr                   = 0x44,
+	.csid_rdi_irq_clear_addr                  = 0x48,
+	.csid_rdi_irq_set_addr                    = 0x4c,
+	.csid_rdi_cfg0_addr                       = 0x300,
+	.csid_rdi_cfg1_addr                       = 0x304,
+	.csid_rdi_ctrl_addr                       = 0x308,
+	.csid_rdi_frm_drop_pattern_addr           = 0x30c,
+	.csid_rdi_frm_drop_period_addr            = 0x310,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x314,
+	.csid_rdi_irq_subsample_period_addr       = 0x318,
+	.csid_rdi_rpp_hcrop_addr                  = 0x31c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x320,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x324,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x328,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x32c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x330,
+	.csid_rdi_rst_strobes_addr                = 0x340,
+	.csid_rdi_status_addr                     = 0x350,
+	.csid_rdi_misr_val0_addr                  = 0x354,
+	.csid_rdi_misr_val1_addr                  = 0x358,
+	.csid_rdi_misr_val2_addr                  = 0x35c,
+	.csid_rdi_misr_val3_addr                  = 0x360,
+	.csid_rdi_format_measure_cfg0_addr        = 0x370,
+	.csid_rdi_format_measure_cfg1_addr        = 0x374,
+	.csid_rdi_format_measure0_addr            = 0x378,
+	.csid_rdi_format_measure1_addr            = 0x37c,
+	.csid_rdi_format_measure2_addr            = 0x380,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x390,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x394,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x398,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x39c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x3a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x3a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x3a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x3ac,
+	.csid_rdi_err_recovery_cfg0_addr          = 0x3b0,
+	.csid_rdi_err_recovery_cfg1_addr          = 0x3b4,
+	.csid_rdi_err_recovery_cfg2_addr          = 0x3b8,
+	.csid_rdi_multi_vcdt_cfg0_addr            = 0x3bc,
+	.csid_rdi_byte_cntr_ping_addr             = 0x3e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x3e4,
+	/* configurations */
+	.ccif_violation_en                        = 1,
+	.overflow_ctrl_en                         = 1,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_480_rdi_2_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x50,
+	.csid_rdi_irq_mask_addr                   = 0x54,
+	.csid_rdi_irq_clear_addr                  = 0x58,
+	.csid_rdi_irq_set_addr                    = 0x5c,
+	.csid_rdi_cfg0_addr                       = 0x400,
+	.csid_rdi_cfg1_addr                       = 0x404,
+	.csid_rdi_ctrl_addr                       = 0x408,
+	.csid_rdi_frm_drop_pattern_addr           = 0x40c,
+	.csid_rdi_frm_drop_period_addr            = 0x410,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x414,
+	.csid_rdi_irq_subsample_period_addr       = 0x418,
+	.csid_rdi_rpp_hcrop_addr                  = 0x41c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x420,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x424,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x428,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x42c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x430,
+	.csid_rdi_rst_strobes_addr                = 0x440,
+	.csid_rdi_status_addr                     = 0x450,
+	.csid_rdi_misr_val0_addr                  = 0x454,
+	.csid_rdi_misr_val1_addr                  = 0x458,
+	.csid_rdi_misr_val2_addr                  = 0x45c,
+	.csid_rdi_misr_val3_addr                  = 0x460,
+	.csid_rdi_format_measure_cfg0_addr        = 0x470,
+	.csid_rdi_format_measure_cfg1_addr        = 0x474,
+	.csid_rdi_format_measure0_addr            = 0x478,
+	.csid_rdi_format_measure1_addr            = 0x47c,
+	.csid_rdi_format_measure2_addr            = 0x480,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x490,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x494,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x498,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x49c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x4a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x4a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x4a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x4ac,
+	.csid_rdi_err_recovery_cfg0_addr          = 0x4b0,
+	.csid_rdi_err_recovery_cfg1_addr          = 0x4b4,
+	.csid_rdi_err_recovery_cfg2_addr          = 0x4b8,
+	.csid_rdi_multi_vcdt_cfg0_addr            = 0x4bc,
+	.csid_rdi_byte_cntr_ping_addr             = 0x4e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x4e4,
+	/* configurations */
+	.ccif_violation_en                        = 1,
+	.overflow_ctrl_en                         = 1,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_480_rdi_3_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x60,
+	.csid_rdi_irq_mask_addr                   = 0x64,
+	.csid_rdi_irq_clear_addr                  = 0x68,
+	.csid_rdi_irq_set_addr                    = 0x6c,
+	.csid_rdi_cfg0_addr                       = 0x500,
+	.csid_rdi_cfg1_addr                       = 0x504,
+	.csid_rdi_ctrl_addr                       = 0x508,
+	.csid_rdi_frm_drop_pattern_addr           = 0x50c,
+	.csid_rdi_frm_drop_period_addr            = 0x510,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x514,
+	.csid_rdi_irq_subsample_period_addr       = 0x518,
+	.csid_rdi_rpp_hcrop_addr                  = 0x51c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x520,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x524,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x528,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x52c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x530,
+	.csid_rdi_rst_strobes_addr                = 0x540,
+	.csid_rdi_status_addr                     = 0x550,
+	.csid_rdi_misr_val0_addr                  = 0x554,
+	.csid_rdi_misr_val1_addr                  = 0x558,
+	.csid_rdi_misr_val2_addr                  = 0x55c,
+	.csid_rdi_misr_val3_addr                  = 0x560,
+	.csid_rdi_format_measure_cfg0_addr        = 0x570,
+	.csid_rdi_format_measure_cfg1_addr        = 0x574,
+	.csid_rdi_format_measure0_addr            = 0x578,
+	.csid_rdi_format_measure1_addr            = 0x57c,
+	.csid_rdi_format_measure2_addr            = 0x580,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x590,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x594,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x598,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x59c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x5a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x5a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x5a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x5ac,
+	.csid_rdi_err_recovery_cfg0_addr          = 0x5b0,
+	.csid_rdi_err_recovery_cfg1_addr          = 0x5b4,
+	.csid_rdi_err_recovery_cfg2_addr          = 0x5b8,
+	.csid_rdi_multi_vcdt_cfg0_addr            = 0x5bc,
+	.csid_rdi_byte_cntr_ping_addr             = 0x5e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x5e4,
+	/* configurations */
+	.ccif_violation_en                        = 1,
+	.overflow_ctrl_en                         = 1,
+};
+
+static struct cam_ife_csid_csi2_rx_reg_offset
+	cam_ife_csid_lite_480_csi2_reg_offset = {
+	.csid_csi2_rx_irq_status_addr                 = 0x20,
+	.csid_csi2_rx_irq_mask_addr                   = 0x24,
+	.csid_csi2_rx_irq_clear_addr                  = 0x28,
+	.csid_csi2_rx_irq_set_addr                    = 0x2c,
+
+	/*CSI2 rx control */
+	.csid_csi2_rx_cfg0_addr                       = 0x100,
+	.csid_csi2_rx_cfg1_addr                       = 0x104,
+	.csid_csi2_rx_capture_ctrl_addr               = 0x108,
+	.csid_csi2_rx_rst_strobes_addr                = 0x110,
+	.csid_csi2_rx_de_scramble_cfg0_addr           = 0x114,
+	.csid_csi2_rx_de_scramble_cfg1_addr           = 0x118,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr   = 0x120,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr   = 0x124,
+	.csid_csi2_rx_captured_short_pkt_0_addr       = 0x128,
+	.csid_csi2_rx_captured_short_pkt_1_addr       = 0x12c,
+	.csid_csi2_rx_captured_long_pkt_0_addr        = 0x130,
+	.csid_csi2_rx_captured_long_pkt_1_addr        = 0x134,
+	.csid_csi2_rx_captured_long_pkt_ftr_addr      = 0x138,
+	.csid_csi2_rx_captured_cphy_pkt_hdr_addr      = 0x13c,
+	.csid_csi2_rx_lane0_misr_addr                 = 0x150,
+	.csid_csi2_rx_lane1_misr_addr                 = 0x154,
+	.csid_csi2_rx_lane2_misr_addr                 = 0x158,
+	.csid_csi2_rx_lane3_misr_addr                 = 0x15c,
+	.csid_csi2_rx_total_pkts_rcvd_addr            = 0x160,
+	.csid_csi2_rx_stats_ecc_addr                  = 0x164,
+	.csid_csi2_rx_total_crc_err_addr              = 0x168,
+
+	.csi2_rst_srb_all                             = 0x3FFF,
+	.csi2_rst_done_shift_val                      = 27,
+	.csi2_irq_mask_all                            = 0xFFFFFFF,
+	.csi2_misr_enable_shift_val                   = 6,
+	.csi2_vc_mode_shift_val                       = 2,
+	.csi2_capture_long_pkt_en_shift               = 0,
+	.csi2_capture_short_pkt_en_shift              = 1,
+	.csi2_capture_cphy_pkt_en_shift               = 2,
+	.csi2_capture_long_pkt_dt_shift               = 4,
+	.csi2_capture_long_pkt_vc_shift               = 10,
+	.csi2_capture_short_pkt_vc_shift              = 15,
+	.csi2_capture_cphy_pkt_dt_shift               = 20,
+	.csi2_capture_cphy_pkt_vc_shift               = 26,
+	.csi2_rx_phy_num_mask                         = 0x7,
+};
+
+static struct cam_ife_csid_csi2_tpg_reg_offset
+	cam_ife_csid_lite_480_tpg_reg_offset = {
+	/*CSID TPG control */
+	.csid_tpg_ctrl_addr                           = 0x600,
+	.csid_tpg_vc_cfg0_addr                        = 0x604,
+	.csid_tpg_vc_cfg1_addr                        = 0x608,
+	.csid_tpg_lfsr_seed_addr                      = 0x60c,
+	.csid_tpg_dt_n_cfg_0_addr                     = 0x610,
+	.csid_tpg_dt_n_cfg_1_addr                     = 0x614,
+	.csid_tpg_dt_n_cfg_2_addr                     = 0x618,
+	.csid_tpg_color_bars_cfg_addr                 = 0x640,
+	.csid_tpg_color_box_cfg_addr                  = 0x644,
+	.csid_tpg_common_gen_cfg_addr                 = 0x648,
+	.csid_tpg_cgen_n_cfg_addr                     = 0x650,
+	.csid_tpg_cgen_n_x0_addr                      = 0x654,
+	.csid_tpg_cgen_n_x1_addr                      = 0x658,
+	.csid_tpg_cgen_n_x2_addr                      = 0x65c,
+	.csid_tpg_cgen_n_xy_addr                      = 0x660,
+	.csid_tpg_cgen_n_y1_addr                      = 0x664,
+	.csid_tpg_cgen_n_y2_addr                      = 0x668,
+
+	/* configurations */
+	.tpg_dtn_cfg_offset                           = 0xc,
+	.tpg_cgen_cfg_offset                          = 0x20,
+	.tpg_cpas_ife_reg_offset                      = 0x28,
+};
+
+static struct cam_ife_csid_common_reg_offset
+	cam_ife_csid_lite_480_cmn_reg_offset = {
+	.csid_hw_version_addr                         = 0x0,
+	.csid_cfg0_addr                               = 0x4,
+	.csid_ctrl_addr                               = 0x8,
+	.csid_reset_addr                              = 0xc,
+	.csid_rst_strobes_addr                        = 0x10,
+
+	.csid_test_bus_ctrl_addr                      = 0x14,
+	.csid_top_irq_status_addr                     = 0x70,
+	.csid_top_irq_mask_addr                       = 0x74,
+	.csid_top_irq_clear_addr                      = 0x78,
+	.csid_top_irq_set_addr                        = 0x7c,
+	.csid_irq_cmd_addr                            = 0x80,
+
+	/*configurations */
+	.major_version                                = 4,
+	.minor_version                                = 8,
+	.version_incr                                 = 0,
+	.num_rdis                                     = 4,
+	.num_pix                                      = 0,
+	.num_ppp                                      = 0,
+	.csid_reg_rst_stb                             = 1,
+	.csid_rst_stb                                 = 0x1e,
+	.csid_rst_stb_sw_all                          = 0x1f,
+	.path_rst_stb_all                             = 0x7f,
+	.path_rst_done_shift_val                      = 1,
+	.path_en_shift_val                            = 31,
+	.dt_id_shift_val                              = 27,
+	.vc_shift_val                                 = 22,
+	.dt_shift_val                                 = 16,
+	.fmt_shift_val                                = 12,
+	.plain_fmt_shit_val                           = 10,
+	.crop_v_en_shift_val                          = 6,
+	.crop_h_en_shift_val                          = 5,
+	.drop_v_en_shift_val                          = 4,
+	.drop_h_en_shift_val                          = 3,
+	.crop_shift                                   = 16,
+	.ipp_irq_mask_all                             = 0x7FFF,
+	.rdi_irq_mask_all                             = 0x7FFF,
+	.ppp_irq_mask_all                             = 0xFFFF,
+	.measure_en_hbi_vbi_cnt_mask                  = 0xC,
+	.format_measure_en_val                        = 1,
+};
+
+static struct cam_ife_csid_reg_offset cam_ife_csid_lite_480_reg_offset = {
+	.cmn_reg          = &cam_ife_csid_lite_480_cmn_reg_offset,
+	.csi2_reg         = &cam_ife_csid_lite_480_csi2_reg_offset,
+	.ipp_reg          = NULL,
+	.ppp_reg          = NULL,
+	.rdi_reg = {
+		&cam_ife_csid_lite_480_rdi_0_reg_offset,
+		&cam_ife_csid_lite_480_rdi_1_reg_offset,
+		&cam_ife_csid_lite_480_rdi_2_reg_offset,
+		&cam_ife_csid_lite_480_rdi_3_reg_offset,
+		},
+	.tpg_reg = &cam_ife_csid_lite_480_tpg_reg_offset,
+};
+
+#endif /*_CAM_IFE_CSID_LITE480_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
index 247dfc5..d8ba811 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
@@ -95,6 +95,8 @@ struct cam_isp_in_port_generic_info {
 	uint32_t                        hbi_cnt;
 	uint32_t                        cust_node;
 	uint32_t                        num_out_res;
+	uint32_t                        horizontal_bin;
+	uint32_t                        qcfa_bin;
 	struct cam_isp_out_port_generic_info    *data;
 };
 
@@ -115,6 +117,7 @@ struct cam_isp_in_port_generic_info {
  *                reserve
  * @node_res :    Reserved resource structure pointer
  * @crop_enable : Flag to indicate CSID crop enable
+ * @drop_enable : Flag to indicate CSID drop enable
  *
  */
 struct cam_csid_hw_reserve_resource_args {
@@ -127,6 +130,7 @@ struct cam_csid_hw_reserve_resource_args {
 	uint32_t                                  cid;
 	struct cam_isp_resource_node             *node_res;
 	bool                                      crop_enable;
+	bool                                      drop_enable;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h
index 748c324..663bc24 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE170_H_
@@ -166,6 +166,7 @@ static struct cam_vfe_top_ver2_hw_info vfe170_top_hw_info = {
 			NULL,
 			},
 		},
+	.num_mux = 4,
 	.mux_type = {
 		CAM_VFE_CAMIF_VER_2_0,
 		CAM_VFE_RDI_VER_1_0,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
index db61bfb..6823b63 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
@@ -201,6 +201,7 @@ static struct cam_vfe_top_ver2_hw_info vfe175_top_hw_info = {
 			NULL,
 			},
 		},
+	.num_mux = 5,
 	.mux_type = {
 		CAM_VFE_CAMIF_VER_2_0,
 		CAM_VFE_RDI_VER_1_0,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
index 03a6409..8acd77d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
@@ -254,6 +254,7 @@ static struct cam_vfe_top_ver2_hw_info vfe175_130_top_hw_info = {
 		.fe_reg     = &vfe175_130_fe_reg,
 		.reg_data       = &vfe_175_130_fe_reg_data,
 		},
+	.num_mux = 6,
 	.mux_type = {
 		CAM_VFE_CAMIF_VER_2_0,
 		CAM_VFE_RDI_VER_1_0,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
index 48eb7f1..ae65df7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
@@ -304,6 +304,7 @@ static struct cam_vfe_top_ver3_hw_info vfe480_top_hw_info = {
 		.camif_lite_reg = &vfe480_camif_lcr,
 		.reg_data       = &vfe480_camif_lcr_reg_data,
 		},
+	.num_mux = 6,
 	.mux_type = {
 		CAM_VFE_CAMIF_VER_3_0,
 		CAM_VFE_RDI_VER_1_0,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h
index 0c94b63..aab38c7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE_LITE17X_H_
@@ -56,6 +56,13 @@ static struct cam_vfe_rdi_ver2_reg vfe17x_rdi_reg = {
 	.reg_update_cmd           = 0x000004AC,
 };
 
+static struct cam_vfe_rdi_common_reg_data vfe17x_rdi_reg_data = {
+	.subscribe_irq_mask0      = 0x780001E0,
+	.subscribe_irq_mask1      = 0x0,
+	.error_irq_mask0          = 0x0,
+	.error_irq_mask1          = 0x3C,
+};
+
 static struct cam_vfe_rdi_reg_data  vfe17x_rdi_0_data = {
 	.reg_update_cmd_data      = 0x2,
 	.sof_irq_mask             = 0x8000000,
@@ -90,6 +97,7 @@ static struct cam_vfe_top_ver2_hw_info vfe17x_top_hw_info = {
 	.rdi_hw_info = {
 		.common_reg = &vfe17x_top_common_reg,
 		.rdi_reg    = &vfe17x_rdi_reg,
+		.common_reg_data = &vfe17x_rdi_reg_data,
 		.reg_data = {
 			&vfe17x_rdi_0_data,
 			&vfe17x_rdi_1_data,
@@ -97,6 +105,7 @@ static struct cam_vfe_top_ver2_hw_info vfe17x_top_hw_info = {
 			&vfe17x_rdi_3_data,
 			},
 		},
+	.num_mux = 4,
 	.mux_type = {
 		CAM_VFE_RDI_VER_1_0,
 		CAM_VFE_RDI_VER_1_0,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
index 221c372..c19ade5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
@@ -199,6 +199,7 @@ static struct cam_vfe_top_ver3_hw_info vfe48x_top_hw_info = {
 	.rdi_hw_info[1] = &vfe48x_rdi_hw_info[1],
 	.rdi_hw_info[2] = &vfe48x_rdi_hw_info[2],
 	.rdi_hw_info[3] = &vfe48x_rdi_hw_info[3],
+	.num_mux = 4,
 	.mux_type = {
 		CAM_VFE_RDI_VER_1_0,
 		CAM_VFE_RDI_VER_1_0,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
index be4902b..7db1a7e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
@@ -13,6 +13,6 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
 
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_camif_lite_ver2.o cam_vfe_top.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_camif_lite_ver2.o cam_vfe_top.o cam_vfe_top_common.o
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_top_ver3.o cam_vfe_top_ver2.o cam_vfe_camif_ver2.o
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_camif_ver3.o cam_vfe_rdi.o cam_vfe_fe_ver1.o cam_vfe_camif_lite_ver3.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
index ccf57ee..6551fa3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
@@ -49,6 +49,8 @@ struct cam_vfe_mux_camif_ver3_data {
 	bool                               enable_sof_irq_debug;
 	uint32_t                           irq_debug_cnt;
 	uint32_t                           camif_debug;
+	uint32_t                           horizontal_bin;
+	uint32_t                           qcfa_bin;
 };
 
 static int cam_vfe_camif_ver3_get_evt_payload(
@@ -250,6 +252,9 @@ int cam_vfe_camif_ver3_acquire_resource(
 	camif_data->last_pixel  = acquire_data->vfe_in.in_port->left_stop;
 	camif_data->first_line  = acquire_data->vfe_in.in_port->line_start;
 	camif_data->last_line   = acquire_data->vfe_in.in_port->line_stop;
+	camif_data->horizontal_bin =
+		acquire_data->vfe_in.in_port->horizontal_bin;
+	camif_data->qcfa_bin    = acquire_data->vfe_in.in_port->qcfa_bin;
 	camif_data->event_cb    = acquire_data->event_cb;
 	camif_data->priv        = acquire_data->priv;
 
@@ -441,6 +446,9 @@ static int cam_vfe_camif_ver3_resource_start(
 	 * frame width. We use '/ 4' instead of '/ 2'
 	 * cause it is multipixel path
 	 */
+		if (rsrc_data->horizontal_bin || rsrc_data->qcfa_bin)
+			epoch0_line_cfg >>= 1;
+
 		epoch1_line_cfg = rsrc_data->reg_data->epoch_line_cfg &
 			0xFFFF;
 		computed_epoch_line_cfg = (epoch1_line_cfg << 16) |
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
index 0b230ce..3cc9255 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -413,7 +413,9 @@ static int cam_vfe_rdi_handle_irq_bottom_half(void *handler_priv,
 				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
 
 		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-	} else if (irq_status0 & rdi_priv->reg_data->reg_update_irq_mask) {
+	}
+
+	if (irq_status0 & rdi_priv->reg_data->reg_update_irq_mask) {
 		CAM_DBG(CAM_ISP, "Received REG UPDATE");
 
 		if (rdi_priv->event_cb)
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.c
new file mode 100644
index 0000000..2eae532
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_vfe_top_common.h"
+#include "cam_debug_util.h"
+
+static struct cam_axi_vote *cam_vfe_top_delay_bw_reduction(
+	struct cam_vfe_top_priv_common *top_common,
+	uint64_t *to_be_applied_bw)
+{
+	uint32_t i, j;
+	int vote_idx = -1;
+	uint64_t max_bw = 0;
+	uint64_t total_bw;
+	struct cam_axi_vote *curr_l_vote;
+
+	for (i = 0; i < CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES; i++) {
+		total_bw = 0;
+		curr_l_vote = &top_common->last_vote[i];
+		for (j = 0; j < curr_l_vote->num_paths; j++) {
+			if (total_bw >
+				(U64_MAX -
+				curr_l_vote->axi_path[j].camnoc_bw)) {
+				CAM_ERR(CAM_PERF,
+					"ife[%d] : Integer overflow at hist idx: %d, path: %d, total_bw = %llu, camnoc_bw = %llu",
+					top_common->hw_idx, i, j, total_bw,
+					curr_l_vote->axi_path[j].camnoc_bw);
+				return NULL;
+			}
+
+			total_bw += curr_l_vote->axi_path[j].camnoc_bw;
+		}
+
+		if (total_bw > max_bw) {
+			vote_idx = i;
+			max_bw = total_bw;
+		}
+	}
+
+	if (vote_idx < 0)
+		return NULL;
+
+	*to_be_applied_bw = max_bw;
+
+	return &top_common->last_vote[vote_idx];
+}
+
+int cam_vfe_top_set_axi_bw_vote(struct cam_vfe_soc_private *soc_private,
+	struct cam_vfe_top_priv_common *top_common, bool start_stop)
+{
+	struct cam_axi_vote agg_vote = {0};
+	struct cam_axi_vote *to_be_applied_axi_vote = NULL;
+	int rc = 0;
+	uint32_t i;
+	uint32_t num_paths = 0;
+	uint64_t total_bw_new_vote = 0;
+	bool bw_unchanged = true;
+	bool apply_bw_update = false;
+
+	for (i = 0; i < top_common->num_mux; i++) {
+		if (top_common->axi_vote_control[i] ==
+			CAM_VFE_BW_CONTROL_INCLUDE) {
+			if (num_paths +
+				top_common->req_axi_vote[i].num_paths >
+				CAM_CPAS_MAX_PATHS_PER_CLIENT) {
+				CAM_ERR(CAM_PERF,
+					"Required paths(%d) more than max(%d)",
+					num_paths +
+					top_common->req_axi_vote[i].num_paths,
+					CAM_CPAS_MAX_PATHS_PER_CLIENT);
+				return -EINVAL;
+			}
+
+			memcpy(&agg_vote.axi_path[num_paths],
+				&top_common->req_axi_vote[i].axi_path[0],
+				top_common->req_axi_vote[i].num_paths *
+				sizeof(
+				struct cam_axi_per_path_bw_vote));
+			num_paths += top_common->req_axi_vote[i].num_paths;
+		}
+	}
+
+	agg_vote.num_paths = num_paths;
+
+	for (i = 0; i < agg_vote.num_paths; i++) {
+		CAM_DBG(CAM_PERF,
+			"ife[%d] : New BW Vote : counter[%d] [%s][%s] [%llu %llu %llu]",
+			top_common->hw_idx,
+			top_common->last_counter,
+			cam_cpas_axi_util_path_type_to_string(
+			agg_vote.axi_path[i].path_data_type),
+			cam_cpas_axi_util_trans_type_to_string(
+			agg_vote.axi_path[i].transac_type),
+			agg_vote.axi_path[i].camnoc_bw,
+			agg_vote.axi_path[i].mnoc_ab_bw,
+			agg_vote.axi_path[i].mnoc_ib_bw);
+
+		total_bw_new_vote += agg_vote.axi_path[i].camnoc_bw;
+	}
+
+	memcpy(&top_common->last_vote[top_common->last_counter], &agg_vote,
+		sizeof(struct cam_axi_vote));
+	top_common->last_counter = (top_common->last_counter + 1) %
+		CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES;
+
+	if ((agg_vote.num_paths != top_common->applied_axi_vote.num_paths) ||
+		(total_bw_new_vote != top_common->total_bw_applied))
+		bw_unchanged = false;
+
+	CAM_DBG(CAM_PERF,
+		"ife[%d] : applied_total=%lld, new_total=%lld unchanged=%d, start_stop=%d",
+		top_common->hw_idx, top_common->total_bw_applied,
+		total_bw_new_vote, bw_unchanged, start_stop);
+
+	if (bw_unchanged) {
+		CAM_DBG(CAM_PERF, "BW config unchanged");
+		return 0;
+	}
+
+	if (start_stop) {
+		/* need to vote current request immediately */
+		to_be_applied_axi_vote = &agg_vote;
+		/* Reset everything, we can start afresh */
+		memset(top_common->last_vote, 0x0, sizeof(struct cam_axi_vote) *
+			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
+		top_common->last_counter = 0;
+		top_common->last_vote[top_common->last_counter] = agg_vote;
+		top_common->last_counter = (top_common->last_counter + 1) %
+			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES;
+	} else {
+		/*
+		 * Find max bw request in last few frames. This will the bw
+		 * that we want to vote to CPAS now.
+		 */
+		to_be_applied_axi_vote =
+			cam_vfe_top_delay_bw_reduction(top_common,
+			&total_bw_new_vote);
+		if (!to_be_applied_axi_vote) {
+			CAM_ERR(CAM_PERF, "to_be_applied_axi_vote is NULL");
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < to_be_applied_axi_vote->num_paths; i++) {
+		CAM_DBG(CAM_PERF,
+			"ife[%d] : Apply BW Vote : [%s][%s] [%llu %llu %llu]",
+			top_common->hw_idx,
+			cam_cpas_axi_util_path_type_to_string(
+			to_be_applied_axi_vote->axi_path[i].path_data_type),
+			cam_cpas_axi_util_trans_type_to_string(
+			to_be_applied_axi_vote->axi_path[i].transac_type),
+			to_be_applied_axi_vote->axi_path[i].camnoc_bw,
+			to_be_applied_axi_vote->axi_path[i].mnoc_ab_bw,
+			to_be_applied_axi_vote->axi_path[i].mnoc_ib_bw);
+	}
+
+	if ((to_be_applied_axi_vote->num_paths !=
+		top_common->applied_axi_vote.num_paths) ||
+		(total_bw_new_vote != top_common->total_bw_applied))
+		apply_bw_update = true;
+
+	CAM_DBG(CAM_PERF,
+		"ife[%d] : Delayed update: applied_total=%lld, new_total=%lld apply_bw_update=%d, start_stop=%d",
+		top_common->hw_idx, top_common->total_bw_applied,
+		total_bw_new_vote, apply_bw_update, start_stop);
+
+	if (apply_bw_update) {
+		rc = cam_cpas_update_axi_vote(soc_private->cpas_handle,
+			to_be_applied_axi_vote);
+		if (!rc) {
+			memcpy(&top_common->applied_axi_vote,
+				to_be_applied_axi_vote,
+				sizeof(struct cam_axi_vote));
+			top_common->total_bw_applied = total_bw_new_vote;
+		} else {
+			CAM_ERR(CAM_PERF, "BW request failed, rc=%d", rc);
+		}
+	}
+
+	return rc;
+}
+
+int cam_vfe_top_bw_update_v2(struct cam_vfe_soc_private *soc_private,
+	struct cam_vfe_top_priv_common *top_common, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_vfe_bw_update_args_v2        *bw_update = NULL;
+	struct cam_isp_resource_node         *res = NULL;
+	struct cam_hw_info                   *hw_info = NULL;
+	int                                   rc = 0;
+	int                                   i;
+
+	bw_update = (struct cam_vfe_bw_update_args_v2 *)cmd_args;
+	res = bw_update->node_res;
+
+	if (!res || !res->hw_intf || !res->hw_intf->hw_priv)
+		return -EINVAL;
+
+	hw_info = res->hw_intf->hw_priv;
+
+	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+			res->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < top_common->num_mux; i++) {
+		if (top_common->mux_rsrc[i].res_id == res->res_id) {
+			memcpy(&top_common->req_axi_vote[i],
+				&bw_update->isp_vote,
+				sizeof(struct cam_axi_vote));
+			top_common->axi_vote_control[i] =
+				CAM_VFE_BW_CONTROL_INCLUDE;
+			break;
+		}
+	}
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR_RATE_LIMIT(CAM_PERF,
+			"VFE:%d Not ready to set BW yet :%d",
+			res->hw_intf->hw_idx,
+			hw_info->hw_state);
+	} else {
+		rc = cam_vfe_top_set_axi_bw_vote(soc_private, top_common,
+			false);
+	}
+
+	return rc;
+}
+
+int cam_vfe_top_bw_update(struct cam_vfe_soc_private *soc_private,
+	struct cam_vfe_top_priv_common *top_common, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_vfe_bw_update_args        *bw_update = NULL;
+	struct cam_isp_resource_node         *res = NULL;
+	struct cam_hw_info                   *hw_info = NULL;
+	int                                   rc = 0;
+	int                                   i;
+	struct cam_axi_vote                  *mux_axi_vote;
+	bool                                  vid_exists = false;
+	bool                                  rdi_exists = false;
+
+	bw_update = (struct cam_vfe_bw_update_args *)cmd_args;
+	res = bw_update->node_res;
+
+	if (!res || !res->hw_intf || !res->hw_intf->hw_priv)
+		return -EINVAL;
+
+	hw_info = res->hw_intf->hw_priv;
+
+	CAM_DBG(CAM_PERF, "res_id=%d, BW=[%lld %lld]",
+		res->res_id, bw_update->camnoc_bw_bytes,
+		bw_update->external_bw_bytes);
+
+	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+			res->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < top_common->num_mux; i++) {
+		mux_axi_vote = &top_common->req_axi_vote[i];
+		if (top_common->mux_rsrc[i].res_id == res->res_id) {
+			mux_axi_vote->num_paths = 1;
+			if ((res->res_id >= CAM_ISP_HW_VFE_IN_RDI0) &&
+				(res->res_id <= CAM_ISP_HW_VFE_IN_RDI3)) {
+				mux_axi_vote->axi_path[0].path_data_type =
+					CAM_AXI_PATH_DATA_IFE_RDI0 +
+					(res->res_id - CAM_ISP_HW_VFE_IN_RDI0);
+			} else {
+				/*
+				 * Vote all bw into VIDEO path as we cannot
+				 * differentiate to which path this has to go
+				 */
+				mux_axi_vote->axi_path[0].path_data_type =
+					CAM_AXI_PATH_DATA_IFE_VID;
+			}
+
+			mux_axi_vote->axi_path[0].transac_type =
+				CAM_AXI_TRANSACTION_WRITE;
+			mux_axi_vote->axi_path[0].camnoc_bw =
+				bw_update->camnoc_bw_bytes;
+			mux_axi_vote->axi_path[0].mnoc_ab_bw =
+				bw_update->external_bw_bytes;
+			mux_axi_vote->axi_path[0].mnoc_ib_bw =
+				bw_update->external_bw_bytes;
+			/* Make ddr bw same as mnoc bw */
+			mux_axi_vote->axi_path[0].ddr_ab_bw =
+				bw_update->external_bw_bytes;
+			mux_axi_vote->axi_path[0].ddr_ib_bw =
+				bw_update->external_bw_bytes;
+
+			top_common->axi_vote_control[i] =
+				CAM_VFE_BW_CONTROL_INCLUDE;
+			break;
+		}
+
+		if (mux_axi_vote->num_paths == 1) {
+			if (mux_axi_vote->axi_path[0].path_data_type ==
+				CAM_AXI_PATH_DATA_IFE_VID)
+				vid_exists = true;
+			else if ((mux_axi_vote->axi_path[0].path_data_type >=
+				CAM_AXI_PATH_DATA_IFE_RDI0) &&
+				(mux_axi_vote->axi_path[0].path_data_type <=
+				CAM_AXI_PATH_DATA_IFE_RDI3))
+				rdi_exists = true;
+		}
+	}
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR_RATE_LIMIT(CAM_PERF,
+			"VFE:%d Not ready to set BW yet :%d",
+			res->hw_intf->hw_idx,
+			hw_info->hw_state);
+	} else {
+		rc = cam_vfe_top_set_axi_bw_vote(soc_private, top_common,
+			false);
+	}
+
+	return rc;
+}
+
+int cam_vfe_top_bw_control(struct cam_vfe_soc_private *soc_private,
+	struct cam_vfe_top_priv_common *top_common, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_vfe_bw_control_args       *bw_ctrl = NULL;
+	struct cam_isp_resource_node         *res = NULL;
+	struct cam_hw_info                   *hw_info = NULL;
+	int                                   rc = 0;
+	int                                   i;
+
+	bw_ctrl = (struct cam_vfe_bw_control_args *)cmd_args;
+	res = bw_ctrl->node_res;
+
+	if (!res || !res->hw_intf->hw_priv)
+		return -EINVAL;
+
+	hw_info = res->hw_intf->hw_priv;
+
+	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+			res->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < top_common->num_mux; i++) {
+		if (top_common->mux_rsrc[i].res_id == res->res_id) {
+			top_common->axi_vote_control[i] = bw_ctrl->action;
+			break;
+		}
+	}
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR_RATE_LIMIT(CAM_PERF,
+			"VFE:%d Not ready to set BW yet :%d",
+			res->hw_intf->hw_idx,
+			hw_info->hw_state);
+	} else {
+		rc = cam_vfe_top_set_axi_bw_vote(soc_private, top_common, true);
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.h
new file mode 100644
index 0000000..03be713
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_TOP_COMMON_H_
+#define _CAM_VFE_TOP_COMMON_H_
+
+#define CAM_VFE_TOP_MUX_MAX 6
+#define CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES 18
+
+#include "cam_cpas_api.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_soc.h"
+
+struct cam_vfe_top_priv_common {
+	struct cam_isp_resource_node    mux_rsrc[CAM_VFE_TOP_MUX_MAX];
+	uint32_t                        num_mux;
+	uint32_t                        hw_idx;
+	struct cam_axi_vote             applied_axi_vote;
+	struct cam_axi_vote             req_axi_vote[CAM_VFE_TOP_MUX_MAX];
+	struct cam_axi_vote             last_vote[
+					CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES];
+	uint32_t                        last_counter;
+	uint64_t                        total_bw_applied;
+	enum cam_vfe_bw_control_action  axi_vote_control[CAM_VFE_TOP_MUX_MAX];
+};
+
+int cam_vfe_top_set_axi_bw_vote(struct cam_vfe_soc_private *soc_private,
+	struct cam_vfe_top_priv_common *top_common, bool start_stop);
+
+int cam_vfe_top_bw_update_v2(struct cam_vfe_soc_private *soc_private,
+	struct cam_vfe_top_priv_common *top_common, void *cmd_args,
+	uint32_t arg_size);
+
+int cam_vfe_top_bw_update(struct cam_vfe_soc_private *soc_private,
+	struct cam_vfe_top_priv_common *top_common, void *cmd_args,
+	uint32_t arg_size);
+
+int cam_vfe_top_bw_control(struct cam_vfe_soc_private *soc_private,
+	struct cam_vfe_top_priv_common *top_common, void *cmd_args,
+	uint32_t arg_size);
+
+#endif /* _CAM_VFE_TOP_COMMON_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index 78a72eb..c5225c5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -10,12 +10,10 @@
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver2.h"
 #include "cam_debug_util.h"
-#include "cam_cpas_api.h"
 #include "cam_vfe_soc.h"
 
 #define CAM_VFE_HW_RESET_HW_AND_REG_VAL       0x00003F9F
 #define CAM_VFE_HW_RESET_HW_VAL               0x00003F87
-#define CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES 3
 
 struct cam_vfe_top_ver2_common_data {
 	struct cam_hw_soc_info                     *soc_info;
@@ -25,17 +23,10 @@ struct cam_vfe_top_ver2_common_data {
 
 struct cam_vfe_top_ver2_priv {
 	struct cam_vfe_top_ver2_common_data common_data;
-	struct cam_isp_resource_node        mux_rsrc[CAM_VFE_TOP_VER2_MUX_MAX];
 	unsigned long                       hw_clk_rate;
-	struct cam_axi_vote                applied_axi_vote;
-	struct cam_axi_vote             req_axi_vote[CAM_VFE_TOP_VER2_MUX_MAX];
-	unsigned long                   req_clk_rate[CAM_VFE_TOP_VER2_MUX_MAX];
-	struct cam_axi_vote             last_vote[CAM_VFE_TOP_VER2_MUX_MAX *
-					CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES];
-	uint32_t                        last_counter;
-	uint64_t                        total_bw_applied;
-	enum cam_vfe_bw_control_action
-		axi_vote_control[CAM_VFE_TOP_VER2_MUX_MAX];
+	unsigned long                       req_clk_rate[
+						CAM_VFE_TOP_MUX_MAX];
+	struct cam_vfe_top_priv_common      top_common;
 };
 
 static int cam_vfe_top_mux_get_base(struct cam_vfe_top_ver2_priv *top_priv,
@@ -94,14 +85,14 @@ static int cam_vfe_top_set_hw_clk_rate(
 
 	soc_info = top_priv->common_data.soc_info;
 
-	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+	for (i = 0; i < top_priv->top_common.num_mux; i++) {
 		if (top_priv->req_clk_rate[i] > max_clk_rate)
 			max_clk_rate = top_priv->req_clk_rate[i];
 	}
 	if (max_clk_rate == top_priv->hw_clk_rate)
 		return 0;
 
-	CAM_DBG(CAM_ISP, "VFE: Clock name=%s idx=%d clk=%llu",
+	CAM_DBG(CAM_PERF, "VFE: Clock name=%s idx=%d clk=%llu",
 		soc_info->clk_name[soc_info->src_clk_idx],
 		soc_info->src_clk_idx, max_clk_rate);
 
@@ -110,196 +101,7 @@ static int cam_vfe_top_set_hw_clk_rate(
 	if (!rc)
 		top_priv->hw_clk_rate = max_clk_rate;
 	else
-		CAM_ERR(CAM_ISP, "Set Clock rate failed, rc=%d", rc);
-
-	return rc;
-}
-
-static struct cam_axi_vote *cam_vfe_top_delay_bw_reduction(
-	struct cam_vfe_top_ver2_priv *top_priv,
-	uint64_t *to_be_applied_bw)
-{
-	uint32_t i, j;
-	int vote_idx = -1;
-	uint64_t max_bw = 0;
-	uint64_t total_bw;
-	struct cam_axi_vote *curr_l_vote;
-
-	for (i = 0; i < (CAM_VFE_TOP_VER2_MUX_MAX *
-		CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES); i++) {
-		total_bw = 0;
-		curr_l_vote = &top_priv->last_vote[i];
-		for (j = 0; j < curr_l_vote->num_paths; j++) {
-			if (total_bw >
-				(U64_MAX -
-				curr_l_vote->axi_path[j].camnoc_bw)) {
-				CAM_ERR(CAM_ISP, "Overflow at idx: %d", j);
-				return NULL;
-			}
-
-			total_bw += curr_l_vote->axi_path[j].camnoc_bw;
-		}
-
-		if (total_bw > max_bw) {
-			vote_idx = i;
-			max_bw = total_bw;
-		}
-	}
-
-	if (vote_idx < 0)
-		return NULL;
-
-	*to_be_applied_bw = max_bw;
-
-	return &top_priv->last_vote[vote_idx];
-}
-
-static int cam_vfe_top_set_axi_bw_vote(
-	struct cam_vfe_top_ver2_priv *top_priv,
-	bool start_stop)
-{
-	struct cam_axi_vote agg_vote = {0};
-	struct cam_axi_vote *to_be_applied_axi_vote = NULL;
-	int rc = 0;
-	uint32_t i;
-	uint32_t num_paths = 0;
-	uint64_t total_bw_new_vote = 0;
-	bool bw_unchanged = true;
-	struct cam_hw_soc_info   *soc_info =
-		top_priv->common_data.soc_info;
-	struct cam_vfe_soc_private *soc_private =
-		soc_info->soc_private;
-	bool apply_bw_update = false;
-
-	if (!soc_private) {
-		CAM_ERR(CAM_ISP, "Error soc_private NULL");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
-		if (top_priv->axi_vote_control[i] ==
-			CAM_VFE_BW_CONTROL_INCLUDE) {
-			if (num_paths +
-				top_priv->req_axi_vote[i].num_paths >
-				CAM_CPAS_MAX_PATHS_PER_CLIENT) {
-				CAM_ERR(CAM_ISP,
-					"Required paths(%d) more than max(%d)",
-					num_paths +
-					top_priv->req_axi_vote[i].num_paths,
-					CAM_CPAS_MAX_PATHS_PER_CLIENT);
-				return -EINVAL;
-			}
-
-			memcpy(&agg_vote.axi_path[num_paths],
-				&top_priv->req_axi_vote[i].axi_path[0],
-				top_priv->req_axi_vote[i].num_paths *
-				sizeof(
-				struct cam_axi_per_path_bw_vote));
-			num_paths += top_priv->req_axi_vote[i].num_paths;
-		}
-	}
-
-	agg_vote.num_paths = num_paths;
-
-	for (i = 0; i < agg_vote.num_paths; i++) {
-		CAM_DBG(CAM_PERF,
-			"ife[%d] : New BW Vote : counter[%d] [%s][%s] [%llu %llu %llu]",
-			top_priv->common_data.hw_intf->hw_idx,
-			top_priv->last_counter,
-			cam_cpas_axi_util_path_type_to_string(
-			agg_vote.axi_path[i].path_data_type),
-			cam_cpas_axi_util_trans_type_to_string(
-			agg_vote.axi_path[i].transac_type),
-			agg_vote.axi_path[i].camnoc_bw,
-			agg_vote.axi_path[i].mnoc_ab_bw,
-			agg_vote.axi_path[i].mnoc_ib_bw);
-
-		total_bw_new_vote += agg_vote.axi_path[i].camnoc_bw;
-	}
-
-	memcpy(&top_priv->last_vote[top_priv->last_counter], &agg_vote,
-		sizeof(struct cam_axi_vote));
-	top_priv->last_counter = (top_priv->last_counter + 1) %
-		(CAM_VFE_TOP_VER2_MUX_MAX *
-		CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
-
-	if ((agg_vote.num_paths != top_priv->applied_axi_vote.num_paths) ||
-		(total_bw_new_vote != top_priv->total_bw_applied))
-		bw_unchanged = false;
-
-	CAM_DBG(CAM_PERF,
-		"ife[%d] : applied_total=%lld, new_total=%lld unchanged=%d, start_stop=%d",
-		top_priv->common_data.hw_intf->hw_idx,
-		top_priv->total_bw_applied, total_bw_new_vote,
-		bw_unchanged, start_stop);
-
-	if (bw_unchanged) {
-		CAM_DBG(CAM_ISP, "BW config unchanged");
-		return 0;
-	}
-
-	if (start_stop) {
-		/* need to vote current request immediately */
-		to_be_applied_axi_vote = &agg_vote;
-		/* Reset everything, we can start afresh */
-		memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
-			(CAM_VFE_TOP_VER2_MUX_MAX *
-			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES));
-		top_priv->last_counter = 0;
-		top_priv->last_vote[top_priv->last_counter] = agg_vote;
-		top_priv->last_counter = (top_priv->last_counter + 1) %
-			(CAM_VFE_TOP_VER2_MUX_MAX *
-			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
-	} else {
-		/*
-		 * Find max bw request in last few frames. This will the bw
-		 * that we want to vote to CPAS now.
-		 */
-		to_be_applied_axi_vote =
-			cam_vfe_top_delay_bw_reduction(top_priv,
-			&total_bw_new_vote);
-		if (!to_be_applied_axi_vote) {
-			CAM_ERR(CAM_ISP, "to_be_applied_axi_vote is NULL");
-			return -EINVAL;
-		}
-	}
-
-	for (i = 0; i < to_be_applied_axi_vote->num_paths; i++) {
-		CAM_DBG(CAM_PERF,
-			"ife[%d] : Apply BW Vote : [%s][%s] [%llu %llu %llu]",
-			top_priv->common_data.hw_intf->hw_idx,
-			cam_cpas_axi_util_path_type_to_string(
-			to_be_applied_axi_vote->axi_path[i].path_data_type),
-			cam_cpas_axi_util_trans_type_to_string(
-			to_be_applied_axi_vote->axi_path[i].transac_type),
-			to_be_applied_axi_vote->axi_path[i].camnoc_bw,
-			to_be_applied_axi_vote->axi_path[i].mnoc_ab_bw,
-			to_be_applied_axi_vote->axi_path[i].mnoc_ib_bw);
-	}
-
-	if ((to_be_applied_axi_vote->num_paths !=
-		top_priv->applied_axi_vote.num_paths) ||
-		(total_bw_new_vote != top_priv->total_bw_applied))
-		apply_bw_update = true;
-
-	CAM_DBG(CAM_PERF,
-		"ife[%d] : Delayed update: applied_total=%lld, new_total=%lld apply_bw_update=%d, start_stop=%d",
-		top_priv->common_data.hw_intf->hw_idx,
-		top_priv->total_bw_applied, total_bw_new_vote,
-		apply_bw_update, start_stop);
-
-	if (apply_bw_update) {
-		rc = cam_cpas_update_axi_vote(soc_private->cpas_handle,
-			to_be_applied_axi_vote);
-		if (!rc) {
-			memcpy(&top_priv->applied_axi_vote,
-				to_be_applied_axi_vote,
-				sizeof(struct cam_axi_vote));
-			top_priv->total_bw_applied = total_bw_new_vote;
-		} else {
-			CAM_ERR(CAM_ISP, "BW request failed, rc=%d", rc);
-		}
-	}
+		CAM_ERR(CAM_PERF, "Set Clock rate failed, rc=%d", rc);
 
 	return rc;
 }
@@ -331,7 +133,7 @@ static int cam_vfe_top_clock_update(
 	res = clk_update->node_res;
 
 	if (!res || !res->hw_intf->hw_priv) {
-		CAM_ERR(CAM_ISP, "Invalid input res %pK", res);
+		CAM_ERR(CAM_PERF, "Invalid input res %pK", res);
 		return -EINVAL;
 	}
 
@@ -339,21 +141,21 @@ static int cam_vfe_top_clock_update(
 
 	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
 		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
-		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+		CAM_ERR(CAM_PERF, "VFE:%d Invalid res_type:%d res id%d",
 			res->hw_intf->hw_idx, res->res_type,
 			res->res_id);
 		return -EINVAL;
 	}
 
-	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
-		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+	for (i = 0; i < top_priv->top_common.num_mux; i++) {
+		if (top_priv->top_common.mux_rsrc[i].res_id == res->res_id) {
 			top_priv->req_clk_rate[i] = clk_update->clk_rate;
 			break;
 		}
 	}
 
 	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
-		CAM_DBG(CAM_ISP,
+		CAM_DBG(CAM_PERF,
 			"VFE:%d Not ready to set clocks yet :%d",
 			res->hw_intf->hw_idx,
 			hw_info->hw_state);
@@ -363,193 +165,6 @@ static int cam_vfe_top_clock_update(
 	return rc;
 }
 
-static int cam_vfe_top_bw_update_v2(
-	struct cam_vfe_top_ver2_priv *top_priv,
-	void *cmd_args, uint32_t arg_size)
-{
-	struct cam_vfe_bw_update_args_v2        *bw_update = NULL;
-	struct cam_isp_resource_node         *res = NULL;
-	struct cam_hw_info                   *hw_info = NULL;
-	int                                   rc = 0;
-	int                                   i;
-
-	bw_update = (struct cam_vfe_bw_update_args_v2 *)cmd_args;
-	res = bw_update->node_res;
-
-	if (!res || !res->hw_intf || !res->hw_intf->hw_priv)
-		return -EINVAL;
-
-	hw_info = res->hw_intf->hw_priv;
-
-	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
-		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
-		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
-			res->hw_intf->hw_idx, res->res_type,
-			res->res_id);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
-		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
-			memcpy(&top_priv->req_axi_vote[i], &bw_update->isp_vote,
-				sizeof(struct cam_axi_vote));
-			top_priv->axi_vote_control[i] =
-				CAM_VFE_BW_CONTROL_INCLUDE;
-			break;
-		}
-	}
-
-	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"VFE:%d Not ready to set BW yet :%d",
-			res->hw_intf->hw_idx,
-			hw_info->hw_state);
-	} else {
-		rc = cam_vfe_top_set_axi_bw_vote(top_priv, false);
-	}
-
-	return rc;
-}
-
-static int cam_vfe_top_bw_update(
-	struct cam_vfe_top_ver2_priv *top_priv,
-	void *cmd_args, uint32_t arg_size)
-{
-	struct cam_vfe_bw_update_args        *bw_update = NULL;
-	struct cam_isp_resource_node         *res = NULL;
-	struct cam_hw_info                   *hw_info = NULL;
-	int                                   rc = 0;
-	int                                   i;
-	struct cam_axi_vote                  *mux_axi_vote;
-	bool                                  vid_exists = false;
-	bool                                  rdi_exists = false;
-
-	bw_update = (struct cam_vfe_bw_update_args *)cmd_args;
-	res = bw_update->node_res;
-
-	if (!res || !res->hw_intf || !res->hw_intf->hw_priv)
-		return -EINVAL;
-
-	hw_info = res->hw_intf->hw_priv;
-
-	CAM_DBG(CAM_ISP, "res_id=%d, BW=[%lld %lld]",
-		res->res_id, bw_update->camnoc_bw_bytes,
-		bw_update->external_bw_bytes);
-
-	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
-		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
-		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
-			res->hw_intf->hw_idx, res->res_type,
-			res->res_id);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
-		mux_axi_vote = &top_priv->req_axi_vote[i];
-		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
-			mux_axi_vote->num_paths = 1;
-			if ((res->res_id >= CAM_ISP_HW_VFE_IN_RDI0) &&
-				(res->res_id <= CAM_ISP_HW_VFE_IN_RDI3)) {
-				mux_axi_vote->axi_path[0].path_data_type =
-					CAM_AXI_PATH_DATA_IFE_RDI0 +
-					(res->res_id - CAM_ISP_HW_VFE_IN_RDI0);
-			} else {
-				/*
-				 * Vote all bw into VIDEO path as we cannot
-				 * differentiate to which path this has to go
-				 */
-				mux_axi_vote->axi_path[0].path_data_type =
-					CAM_AXI_PATH_DATA_IFE_VID;
-			}
-
-			mux_axi_vote->axi_path[0].transac_type =
-				CAM_AXI_TRANSACTION_WRITE;
-			mux_axi_vote->axi_path[0].camnoc_bw =
-				bw_update->camnoc_bw_bytes;
-			mux_axi_vote->axi_path[0].mnoc_ab_bw =
-				bw_update->external_bw_bytes;
-			mux_axi_vote->axi_path[0].mnoc_ib_bw =
-				bw_update->external_bw_bytes;
-			/* Make ddr bw same as mnoc bw */
-			mux_axi_vote->axi_path[0].ddr_ab_bw =
-				bw_update->external_bw_bytes;
-			mux_axi_vote->axi_path[0].ddr_ib_bw =
-				bw_update->external_bw_bytes;
-
-			top_priv->axi_vote_control[i] =
-				CAM_VFE_BW_CONTROL_INCLUDE;
-			break;
-		}
-
-		if (mux_axi_vote->num_paths == 1) {
-			if (mux_axi_vote->axi_path[0].path_data_type ==
-				CAM_AXI_PATH_DATA_IFE_VID)
-				vid_exists = true;
-			else if ((mux_axi_vote->axi_path[0].path_data_type >=
-				CAM_AXI_PATH_DATA_IFE_RDI0) &&
-				(mux_axi_vote->axi_path[0].path_data_type <=
-				CAM_AXI_PATH_DATA_IFE_RDI3))
-				rdi_exists = true;
-		}
-	}
-
-	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"VFE:%d Not ready to set BW yet :%d",
-			res->hw_intf->hw_idx,
-			hw_info->hw_state);
-	} else {
-		rc = cam_vfe_top_set_axi_bw_vote(top_priv, false);
-	}
-
-	return rc;
-}
-
-static int cam_vfe_top_bw_control(
-	struct cam_vfe_top_ver2_priv *top_priv,
-	 void *cmd_args, uint32_t arg_size)
-{
-	struct cam_vfe_bw_control_args       *bw_ctrl = NULL;
-	struct cam_isp_resource_node         *res = NULL;
-	struct cam_hw_info                   *hw_info = NULL;
-	int                                   rc = 0;
-	int                                   i;
-
-	bw_ctrl = (struct cam_vfe_bw_control_args *)cmd_args;
-	res = bw_ctrl->node_res;
-
-	if (!res || !res->hw_intf->hw_priv)
-		return -EINVAL;
-
-	hw_info = res->hw_intf->hw_priv;
-
-	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
-		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
-		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
-			res->hw_intf->hw_idx, res->res_type,
-			res->res_id);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
-		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
-			top_priv->axi_vote_control[i] = bw_ctrl->action;
-			break;
-		}
-	}
-
-	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"VFE:%d Not ready to set BW yet :%d",
-			res->hw_intf->hw_idx,
-			hw_info->hw_state);
-	} else {
-		rc = cam_vfe_top_set_axi_bw_vote(top_priv, true);
-	}
-
-	return rc;
-}
-
 static int cam_vfe_top_mux_get_reg_update(
 	struct cam_vfe_top_ver2_priv *top_priv,
 	void *cmd_args, uint32_t arg_size)
@@ -638,14 +253,15 @@ int cam_vfe_top_reserve(void *device_priv,
 	acquire_args = &args->vfe_in;
 
 
-	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
-		if (top_priv->mux_rsrc[i].res_id ==  acquire_args->res_id &&
-			top_priv->mux_rsrc[i].res_state ==
+	for (i = 0; i < top_priv->top_common.num_mux; i++) {
+		if (top_priv->top_common.mux_rsrc[i].res_id ==
+			acquire_args->res_id &&
+			top_priv->top_common.mux_rsrc[i].res_state ==
 			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
 
 			if (acquire_args->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
 				rc = cam_vfe_camif_ver2_acquire_resource(
-					&top_priv->mux_rsrc[i],
+					&top_priv->top_common.mux_rsrc[i],
 					args);
 				if (rc)
 					break;
@@ -654,7 +270,18 @@ int cam_vfe_top_reserve(void *device_priv,
 			if (acquire_args->res_id ==
 				CAM_ISP_HW_VFE_IN_PDLIB) {
 				rc = cam_vfe_camif_lite_ver2_acquire_resource(
-					&top_priv->mux_rsrc[i],
+					&top_priv->top_common.mux_rsrc[i],
+					args);
+				if (rc)
+					break;
+			}
+
+			if ((acquire_args->res_id >=
+				CAM_ISP_HW_VFE_IN_RDI0) &&
+				(acquire_args->res_id <=
+				CAM_ISP_HW_VFE_IN_RDI3)) {
+				rc = cam_vfe_rdi_ver2_acquire_resource(
+					&top_priv->top_common.mux_rsrc[i],
 					args);
 				if (rc)
 					break;
@@ -662,18 +289,20 @@ int cam_vfe_top_reserve(void *device_priv,
 
 			if (acquire_args->res_id == CAM_ISP_HW_VFE_IN_RD) {
 				rc = cam_vfe_fe_ver1_acquire_resource(
-					&top_priv->mux_rsrc[i],
+					&top_priv->top_common.mux_rsrc[i],
 					args);
 				if (rc)
 					break;
 			}
 
-			top_priv->mux_rsrc[i].cdm_ops = acquire_args->cdm_ops;
-			top_priv->mux_rsrc[i].tasklet_info = args->tasklet;
-			top_priv->mux_rsrc[i].res_state =
+			top_priv->top_common.mux_rsrc[i].cdm_ops =
+				acquire_args->cdm_ops;
+			top_priv->top_common.mux_rsrc[i].tasklet_info =
+				args->tasklet;
+			top_priv->top_common.mux_rsrc[i].res_state =
 				CAM_ISP_RESOURCE_STATE_RESERVED;
 			acquire_args->rsrc_node =
-				&top_priv->mux_rsrc[i];
+				&top_priv->top_common.mux_rsrc[i];
 
 			rc = 0;
 			break;
@@ -715,6 +344,8 @@ int cam_vfe_top_start(void *device_priv,
 	struct cam_vfe_top_ver2_priv            *top_priv;
 	struct cam_isp_resource_node            *mux_res;
 	struct cam_hw_info                      *hw_info = NULL;
+	struct cam_hw_soc_info                  *soc_info = NULL;
+	struct cam_vfe_soc_private              *soc_private = NULL;
 	int rc = 0;
 
 	if (!device_priv || !start_args) {
@@ -723,6 +354,13 @@ int cam_vfe_top_start(void *device_priv,
 	}
 
 	top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
+	soc_info = top_priv->common_data.soc_info;
+	soc_private = soc_info->soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error soc_private NULL");
+		return -EINVAL;
+	}
+
 	mux_res = (struct cam_isp_resource_node *)start_args;
 	hw_info = (struct cam_hw_info  *)mux_res->hw_intf->hw_priv;
 
@@ -734,7 +372,8 @@ int cam_vfe_top_start(void *device_priv,
 			return rc;
 		}
 
-		rc = cam_vfe_top_set_axi_bw_vote(top_priv, true);
+		rc = cam_vfe_top_set_axi_bw_vote(soc_private,
+			&top_priv->top_common, true);
 		if (rc) {
 			CAM_ERR(CAM_ISP,
 				"set_axi_bw_vote failed, rc=%d", rc);
@@ -785,17 +424,18 @@ int cam_vfe_top_stop(void *device_priv,
 	}
 
 	if (!rc) {
-		for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
-			if (top_priv->mux_rsrc[i].res_id == mux_res->res_id) {
+		for (i = 0; i < top_priv->top_common.num_mux; i++) {
+			if (top_priv->top_common.mux_rsrc[i].res_id ==
+				mux_res->res_id) {
 				top_priv->req_clk_rate[i] = 0;
 				top_priv->req_clk_rate[i] = 0;
-				top_priv->req_axi_vote[i].axi_path[0].camnoc_bw
-					= 0;
-				top_priv->req_axi_vote[i].axi_path[0].mnoc_ab_bw
-					= 0;
-				top_priv->req_axi_vote[i].axi_path[0].mnoc_ib_bw
-					= 0;
-				top_priv->axi_vote_control[i] =
+				top_priv->top_common.req_axi_vote[i]
+					.axi_path[0].camnoc_bw = 0;
+				top_priv->top_common.req_axi_vote[i]
+					.axi_path[0].mnoc_ab_bw = 0;
+				top_priv->top_common.req_axi_vote[i]
+					.axi_path[0].mnoc_ib_bw = 0;
+				top_priv->top_common.axi_vote_control[i] =
 					CAM_VFE_BW_CONTROL_EXCLUDE;
 				break;
 			}
@@ -822,12 +462,20 @@ int cam_vfe_top_process_cmd(void *device_priv, uint32_t cmd_type,
 {
 	int rc = 0;
 	struct cam_vfe_top_ver2_priv            *top_priv;
+	struct cam_hw_soc_info                  *soc_info = NULL;
+	struct cam_vfe_soc_private              *soc_private = NULL;
 
 	if (!device_priv || !cmd_args) {
 		CAM_ERR(CAM_ISP, "Error! Invalid arguments");
 		return -EINVAL;
 	}
 	top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
+	soc_info = top_priv->common_data.soc_info;
+	soc_private = soc_info->soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error soc_private NULL");
+		return -EINVAL;
+	}
 
 	switch (cmd_type) {
 	case CAM_ISP_HW_CMD_GET_CHANGE_BASE:
@@ -846,15 +494,16 @@ int cam_vfe_top_process_cmd(void *device_priv, uint32_t cmd_type,
 			arg_size);
 		break;
 	case CAM_ISP_HW_CMD_BW_UPDATE:
-		rc = cam_vfe_top_bw_update(top_priv, cmd_args,
-			arg_size);
+		rc = cam_vfe_top_bw_update(soc_private, &top_priv->top_common,
+			cmd_args, arg_size);
 		break;
 	case CAM_ISP_HW_CMD_BW_UPDATE_V2:
-		rc = cam_vfe_top_bw_update_v2(top_priv, cmd_args,
-			arg_size);
+		rc = cam_vfe_top_bw_update_v2(soc_private,
+			&top_priv->top_common, cmd_args, arg_size);
 		break;
 	case CAM_ISP_HW_CMD_BW_CONTROL:
-		rc = cam_vfe_top_bw_control(top_priv, cmd_args, arg_size);
+		rc = cam_vfe_top_bw_control(soc_private, &top_priv->top_common,
+			cmd_args, arg_size);
 		break;
 	default:
 		rc = -EINVAL;
@@ -891,56 +540,69 @@ int cam_vfe_top_ver2_init(
 		rc = -ENOMEM;
 		goto free_vfe_top;
 	}
+
 	vfe_top->top_priv = top_priv;
 	top_priv->hw_clk_rate = 0;
+	if (ver2_hw_info->num_mux > CAM_VFE_TOP_MUX_MAX) {
+		CAM_ERR(CAM_ISP, "Invalid number of input rsrc: %d, max: %d",
+			ver2_hw_info->num_mux, CAM_VFE_TOP_MUX_MAX);
+		rc = -EINVAL;
+		goto free_top_priv;
+	}
 
-	for (i = 0, j = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
-		top_priv->mux_rsrc[i].res_type = CAM_ISP_RESOURCE_VFE_IN;
-		top_priv->mux_rsrc[i].hw_intf = hw_intf;
-		top_priv->mux_rsrc[i].res_state =
+	top_priv->top_common.num_mux = ver2_hw_info->num_mux;
+
+	for (i = 0, j = 0; i < top_priv->top_common.num_mux; i++) {
+		top_priv->top_common.mux_rsrc[i].res_type =
+			CAM_ISP_RESOURCE_VFE_IN;
+		top_priv->top_common.mux_rsrc[i].hw_intf = hw_intf;
+		top_priv->top_common.mux_rsrc[i].res_state =
 			CAM_ISP_RESOURCE_STATE_AVAILABLE;
 		top_priv->req_clk_rate[i] = 0;
 
 		if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
-			top_priv->mux_rsrc[i].res_id =
+			top_priv->top_common.mux_rsrc[i].res_id =
 				CAM_ISP_HW_VFE_IN_CAMIF;
 
 			rc = cam_vfe_camif_ver2_init(hw_intf, soc_info,
 				&ver2_hw_info->camif_hw_info,
-				&top_priv->mux_rsrc[i], vfe_irq_controller);
+				&top_priv->top_common.mux_rsrc[i],
+				vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver2_hw_info->mux_type[i] ==
 			CAM_VFE_CAMIF_LITE_VER_2_0) {
-			top_priv->mux_rsrc[i].res_id =
+			top_priv->top_common.mux_rsrc[i].res_id =
 				CAM_ISP_HW_VFE_IN_PDLIB;
 
 			rc = cam_vfe_camif_lite_ver2_init(hw_intf, soc_info,
 				&ver2_hw_info->camif_lite_hw_info,
-				&top_priv->mux_rsrc[i], vfe_irq_controller);
+				&top_priv->top_common.mux_rsrc[i],
+				vfe_irq_controller);
 
 			if (rc)
 				goto deinit_resources;
 		} else if (ver2_hw_info->mux_type[i] ==
 			CAM_VFE_RDI_VER_1_0) {
 			/* set the RDI resource id */
-			top_priv->mux_rsrc[i].res_id =
+			top_priv->top_common.mux_rsrc[i].res_id =
 				CAM_ISP_HW_VFE_IN_RDI0 + j++;
 
 			rc = cam_vfe_rdi_ver2_init(hw_intf, soc_info,
 				&ver2_hw_info->rdi_hw_info,
-				&top_priv->mux_rsrc[i], vfe_irq_controller);
+				&top_priv->top_common.mux_rsrc[i],
+				vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver2_hw_info->mux_type[i] ==
 			CAM_VFE_IN_RD_VER_1_0) {
 			/* set the RD resource id */
-			top_priv->mux_rsrc[i].res_id =
+			top_priv->top_common.mux_rsrc[i].res_id =
 				CAM_ISP_HW_VFE_IN_RD;
 
 			rc = cam_vfe_fe_ver1_init(hw_intf, soc_info,
 				&ver2_hw_info->fe_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->top_common.mux_rsrc[i]);
 			if (rc)
 				goto deinit_resources;
 		} else {
@@ -963,6 +625,7 @@ int cam_vfe_top_ver2_init(
 
 	top_priv->common_data.soc_info     = soc_info;
 	top_priv->common_data.hw_intf      = hw_intf;
+	top_priv->top_common.hw_idx        = hw_intf->hw_idx;
 	top_priv->common_data.common_reg   = ver2_hw_info->common_reg;
 
 	return rc;
@@ -970,26 +633,30 @@ int cam_vfe_top_ver2_init(
 deinit_resources:
 	for (--i; i >= 0; i--) {
 		if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
-			if (cam_vfe_camif_ver2_deinit(&top_priv->mux_rsrc[i]))
+			if (cam_vfe_camif_ver2_deinit(
+				&top_priv->top_common.mux_rsrc[i]))
 				CAM_ERR(CAM_ISP, "Camif Deinit failed");
 		} else if (ver2_hw_info->mux_type[i] ==
 			CAM_VFE_CAMIF_LITE_VER_2_0) {
 			if (cam_vfe_camif_lite_ver2_deinit(
-				&top_priv->mux_rsrc[i]))
+				&top_priv->top_common.mux_rsrc[i]))
 				CAM_ERR(CAM_ISP, "Camif lite deinit failed");
 		} else if (ver2_hw_info->mux_type[i] ==
 			CAM_VFE_IN_RD_VER_1_0) {
-			if (cam_vfe_fe_ver1_deinit(&top_priv->mux_rsrc[i]))
+			if (cam_vfe_fe_ver1_deinit(
+				&top_priv->top_common.mux_rsrc[i]))
 				CAM_ERR(CAM_ISP, "VFE FE deinit failed");
 		} else {
-			if (cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]))
+			if (cam_vfe_rdi_ver2_deinit(
+				&top_priv->top_common.mux_rsrc[i]))
 				CAM_ERR(CAM_ISP, "RDI Deinit failed");
 		}
-		top_priv->mux_rsrc[i].res_state =
+		top_priv->top_common.mux_rsrc[i].res_state =
 			CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
 	}
-	kfree(vfe_top->top_priv);
 
+free_top_priv:
+	kfree(vfe_top->top_priv);
 free_vfe_top:
 	kfree(vfe_top);
 end:
@@ -1020,35 +687,39 @@ int cam_vfe_top_ver2_deinit(struct cam_vfe_top  **vfe_top_ptr)
 		goto free_vfe_top;
 	}
 
-	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
-		top_priv->mux_rsrc[i].res_state =
+	for (i = 0; i < top_priv->top_common.num_mux; i++) {
+		top_priv->top_common.mux_rsrc[i].res_state =
 			CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
-		if (top_priv->mux_rsrc[i].res_type ==
+		if (top_priv->top_common.mux_rsrc[i].res_type ==
 			CAM_VFE_CAMIF_VER_2_0) {
-			rc = cam_vfe_camif_ver2_deinit(&top_priv->mux_rsrc[i]);
+			rc = cam_vfe_camif_ver2_deinit(
+				&top_priv->top_common.mux_rsrc[i]);
 			if (rc)
 				CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d",
 					rc);
-		} else if (top_priv->mux_rsrc[i].res_type ==
+		} else if (top_priv->top_common.mux_rsrc[i].res_type ==
 			CAM_VFE_CAMIF_LITE_VER_2_0) {
 			rc = cam_vfe_camif_lite_ver2_deinit(
-				&top_priv->mux_rsrc[i]);
+				&top_priv->top_common.mux_rsrc[i]);
 			if (rc)
 				CAM_ERR(CAM_ISP,
 					"Camif lite deinit failed rc=%d", rc);
-		} else if (top_priv->mux_rsrc[i].res_type ==
+		} else if (top_priv->top_common.mux_rsrc[i].res_type ==
 			CAM_VFE_RDI_VER_1_0) {
-			rc = cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]);
+			rc = cam_vfe_rdi_ver2_deinit(
+				&top_priv->top_common.mux_rsrc[i]);
 			if (rc)
 				CAM_ERR(CAM_ISP, "RDI deinit failed rc=%d", rc);
-		} else if (top_priv->mux_rsrc[i].res_type ==
+		} else if (top_priv->top_common.mux_rsrc[i].res_type ==
 			CAM_VFE_IN_RD_VER_1_0) {
-			rc = cam_vfe_fe_ver1_deinit(&top_priv->mux_rsrc[i]);
+			rc = cam_vfe_fe_ver1_deinit(
+				&top_priv->top_common.mux_rsrc[i]);
 			if (rc)
 				CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d",
 					rc);
 		}
 	}
+
 	kfree(vfe_top->top_priv);
 
 free_vfe_top:
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
index 82e30b4..961bf95 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
@@ -10,8 +10,7 @@
 #include "cam_vfe_camif_lite_ver2.h"
 #include "cam_vfe_rdi.h"
 #include "cam_vfe_fe_ver1.h"
-
-#define CAM_VFE_TOP_VER2_MUX_MAX     6
+#include "cam_vfe_top_common.h"
 
 enum cam_vfe_top_ver2_module_type {
 	CAM_VFE_TOP_VER2_MODULE_LENS,
@@ -50,7 +49,8 @@ struct cam_vfe_top_ver2_hw_info {
 	struct cam_vfe_camif_lite_ver2_hw_info      camif_lite_hw_info;
 	struct cam_vfe_rdi_ver2_hw_info             rdi_hw_info;
 	struct cam_vfe_fe_ver1_hw_info              fe_hw_info;
-	uint32_t mux_type[CAM_VFE_TOP_VER2_MUX_MAX];
+	uint32_t                                    num_mux;
+	uint32_t mux_type[CAM_VFE_TOP_MUX_MAX];
 };
 
 int cam_vfe_top_ver2_init(struct cam_hw_soc_info     *soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
index 578b077..a4fbe67a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
@@ -10,14 +10,12 @@
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver3.h"
 #include "cam_debug_util.h"
-#include "cam_cpas_api.h"
 #include "cam_vfe_soc.h"
 
 #define CAM_VFE_HW_RESET_HW_AND_REG_VAL       0x00000003
 #define CAM_VFE_HW_RESET_HW_VAL               0x007F0000
 #define CAM_VFE_LITE_HW_RESET_AND_REG_VAL     0x00000002
 #define CAM_VFE_LITE_HW_RESET_HW_VAL          0x0000003D
-#define CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES 3
 
 struct cam_vfe_top_ver3_common_data {
 	struct cam_hw_soc_info                     *soc_info;
@@ -27,17 +25,10 @@ struct cam_vfe_top_ver3_common_data {
 
 struct cam_vfe_top_ver3_priv {
 	struct cam_vfe_top_ver3_common_data common_data;
-	struct cam_isp_resource_node        mux_rsrc[CAM_VFE_TOP_VER3_MUX_MAX];
 	unsigned long                       hw_clk_rate;
-	struct cam_axi_vote                 applied_axi_vote;
-	struct cam_axi_vote             req_axi_vote[CAM_VFE_TOP_VER3_MUX_MAX];
-	unsigned long                   req_clk_rate[CAM_VFE_TOP_VER3_MUX_MAX];
-	struct cam_axi_vote             last_vote[CAM_VFE_TOP_VER3_MUX_MAX *
-					CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES];
-	uint32_t                        last_counter;
-	uint64_t                        total_bw_applied;
-	enum cam_vfe_bw_control_action
-		axi_vote_control[CAM_VFE_TOP_VER3_MUX_MAX];
+	unsigned long                       req_clk_rate[
+						CAM_VFE_TOP_MUX_MAX];
+	struct cam_vfe_top_priv_common      top_common;
 };
 
 static int cam_vfe_top_ver3_mux_get_base(struct cam_vfe_top_ver3_priv *top_priv,
@@ -96,14 +87,14 @@ static int cam_vfe_top_ver3_set_hw_clk_rate(
 
 	soc_info = top_priv->common_data.soc_info;
 
-	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
+	for (i = 0; i < top_priv->top_common.num_mux; i++) {
 		if (top_priv->req_clk_rate[i] > max_clk_rate)
 			max_clk_rate = top_priv->req_clk_rate[i];
 	}
 	if (max_clk_rate == top_priv->hw_clk_rate)
 		return 0;
 
-	CAM_DBG(CAM_ISP, "VFE: Clock name=%s idx=%d clk=%llu",
+	CAM_DBG(CAM_PERF, "VFE: Clock name=%s idx=%d clk=%llu",
 		soc_info->clk_name[soc_info->src_clk_idx],
 		soc_info->src_clk_idx, max_clk_rate);
 
@@ -112,196 +103,7 @@ static int cam_vfe_top_ver3_set_hw_clk_rate(
 	if (!rc)
 		top_priv->hw_clk_rate = max_clk_rate;
 	else
-		CAM_ERR(CAM_ISP, "Set Clock rate failed, rc=%d", rc);
-
-	return rc;
-}
-
-static struct cam_axi_vote *cam_vfe_top_delay_bw_reduction(
-	struct cam_vfe_top_ver3_priv *top_priv,
-	uint64_t *to_be_applied_bw)
-{
-	uint32_t i, j;
-	int vote_idx = -1;
-	uint64_t max_bw = 0;
-	uint64_t total_bw;
-	struct cam_axi_vote *curr_l_vote;
-
-	for (i = 0; i < (CAM_VFE_TOP_VER3_MUX_MAX *
-		CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES); i++) {
-		total_bw = 0;
-		curr_l_vote = &top_priv->last_vote[i];
-		for (j = 0; j < curr_l_vote->num_paths; j++) {
-			if (total_bw >
-				(U64_MAX -
-				curr_l_vote->axi_path[j].camnoc_bw)) {
-				CAM_ERR(CAM_ISP, "Overflow at idx: %d", j);
-				return NULL;
-			}
-
-			total_bw += curr_l_vote->axi_path[j].camnoc_bw;
-		}
-
-		if (total_bw > max_bw) {
-			vote_idx = i;
-			max_bw = total_bw;
-		}
-	}
-
-	if (vote_idx < 0)
-		return NULL;
-
-	*to_be_applied_bw = max_bw;
-
-	return &top_priv->last_vote[vote_idx];
-}
-
-static int cam_vfe_top_ver3_set_axi_bw_vote(
-	struct cam_vfe_top_ver3_priv *top_priv,
-	bool start_stop)
-{
-	struct cam_axi_vote agg_vote = {0};
-	struct cam_axi_vote *to_be_applied_axi_vote = NULL;
-	int rc = 0;
-	uint32_t i;
-	uint32_t num_paths = 0;
-	uint64_t total_bw_new_vote = 0;
-	bool bw_unchanged = true;
-	struct cam_hw_soc_info   *soc_info =
-		top_priv->common_data.soc_info;
-	struct cam_vfe_soc_private *soc_private =
-		soc_info->soc_private;
-	bool apply_bw_update = false;
-
-	if (!soc_private) {
-		CAM_ERR(CAM_ISP, "Error soc_private NULL");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
-		if (top_priv->axi_vote_control[i] ==
-			CAM_VFE_BW_CONTROL_INCLUDE) {
-			if (num_paths +
-				top_priv->req_axi_vote[i].num_paths >
-				CAM_CPAS_MAX_PATHS_PER_CLIENT) {
-				CAM_ERR(CAM_ISP,
-					"Required paths(%d) more than max(%d)",
-					num_paths +
-					top_priv->req_axi_vote[i].num_paths,
-					CAM_CPAS_MAX_PATHS_PER_CLIENT);
-				return -EINVAL;
-			}
-
-			memcpy(&agg_vote.axi_path[num_paths],
-				&top_priv->req_axi_vote[i].axi_path[0],
-				top_priv->req_axi_vote[i].num_paths *
-				sizeof(
-				struct cam_axi_per_path_bw_vote));
-			num_paths += top_priv->req_axi_vote[i].num_paths;
-		}
-	}
-
-	agg_vote.num_paths = num_paths;
-
-	for (i = 0; i < agg_vote.num_paths; i++) {
-		CAM_DBG(CAM_PERF,
-			"ife[%d] : New BW Vote : counter[%d] [%s][%s] [%llu %llu %llu]",
-			top_priv->common_data.hw_intf->hw_idx,
-			top_priv->last_counter,
-			cam_cpas_axi_util_path_type_to_string(
-			agg_vote.axi_path[i].path_data_type),
-			cam_cpas_axi_util_trans_type_to_string(
-			agg_vote.axi_path[i].transac_type),
-			agg_vote.axi_path[i].camnoc_bw,
-			agg_vote.axi_path[i].mnoc_ab_bw,
-			agg_vote.axi_path[i].mnoc_ib_bw);
-
-		total_bw_new_vote += agg_vote.axi_path[i].camnoc_bw;
-	}
-
-	memcpy(&top_priv->last_vote[top_priv->last_counter], &agg_vote,
-		sizeof(struct cam_axi_vote));
-	top_priv->last_counter = (top_priv->last_counter + 1) %
-		(CAM_VFE_TOP_VER3_MUX_MAX *
-		CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
-
-	if ((agg_vote.num_paths != top_priv->applied_axi_vote.num_paths) ||
-		(total_bw_new_vote != top_priv->total_bw_applied))
-		bw_unchanged = false;
-
-	CAM_DBG(CAM_PERF,
-		"ife[%d] : applied_total=%lld, new_total=%lld unchanged=%d, start_stop=%d",
-		top_priv->common_data.hw_intf->hw_idx,
-		top_priv->total_bw_applied, total_bw_new_vote,
-		bw_unchanged, start_stop);
-
-	if (bw_unchanged) {
-		CAM_DBG(CAM_ISP, "BW config unchanged");
-		return 0;
-	}
-
-	if (start_stop) {
-		/* need to vote current request immediately */
-		to_be_applied_axi_vote = &agg_vote;
-		/* Reset everything, we can start afresh */
-		memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
-			(CAM_VFE_TOP_VER3_MUX_MAX *
-			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES));
-		top_priv->last_counter = 0;
-		top_priv->last_vote[top_priv->last_counter] = agg_vote;
-		top_priv->last_counter = (top_priv->last_counter + 1) %
-			(CAM_VFE_TOP_VER3_MUX_MAX *
-			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
-	} else {
-		/*
-		 * Find max bw request in last few frames. This will the bw
-		 * that we want to vote to CPAS now.
-		 */
-		to_be_applied_axi_vote =
-			cam_vfe_top_delay_bw_reduction(top_priv,
-			&total_bw_new_vote);
-		if (!to_be_applied_axi_vote) {
-			CAM_ERR(CAM_ISP, "to_be_applied_axi_vote is NULL");
-			return -EINVAL;
-		}
-	}
-
-	for (i = 0; i < to_be_applied_axi_vote->num_paths; i++) {
-		CAM_DBG(CAM_PERF,
-			"ife[%d] : Apply BW Vote : [%s][%s] [%llu %llu %llu]",
-			top_priv->common_data.hw_intf->hw_idx,
-			cam_cpas_axi_util_path_type_to_string(
-			to_be_applied_axi_vote->axi_path[i].path_data_type),
-			cam_cpas_axi_util_trans_type_to_string(
-			to_be_applied_axi_vote->axi_path[i].transac_type),
-			to_be_applied_axi_vote->axi_path[i].camnoc_bw,
-			to_be_applied_axi_vote->axi_path[i].mnoc_ab_bw,
-			to_be_applied_axi_vote->axi_path[i].mnoc_ib_bw);
-	}
-
-	if ((to_be_applied_axi_vote->num_paths !=
-		top_priv->applied_axi_vote.num_paths) ||
-		(total_bw_new_vote != top_priv->total_bw_applied))
-		apply_bw_update = true;
-
-	CAM_DBG(CAM_PERF,
-		"ife[%d] : Delayed update: applied_total=%lld, new_total=%lld apply_bw_update=%d, start_stop=%d",
-		top_priv->common_data.hw_intf->hw_idx,
-		top_priv->total_bw_applied, total_bw_new_vote,
-		apply_bw_update, start_stop);
-
-	if (apply_bw_update) {
-		rc = cam_cpas_update_axi_vote(soc_private->cpas_handle,
-			to_be_applied_axi_vote);
-		if (!rc) {
-			memcpy(&top_priv->applied_axi_vote,
-				to_be_applied_axi_vote,
-				sizeof(struct cam_axi_vote));
-			top_priv->total_bw_applied = total_bw_new_vote;
-		} else {
-			CAM_ERR(CAM_ISP, "BW request failed, rc=%d", rc);
-		}
-	}
+		CAM_ERR(CAM_PERF, "Set Clock rate failed, rc=%d", rc);
 
 	return rc;
 }
@@ -333,7 +135,7 @@ static int cam_vfe_top_ver3_clock_update(
 	res = clk_update->node_res;
 
 	if (!res || !res->hw_intf->hw_priv) {
-		CAM_ERR(CAM_ISP, "Invalid input res %pK", res);
+		CAM_ERR(CAM_PERF, "Invalid input res %pK", res);
 		return -EINVAL;
 	}
 
@@ -341,21 +143,21 @@ static int cam_vfe_top_ver3_clock_update(
 
 	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
 		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
-		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+		CAM_ERR(CAM_PERF, "VFE:%d Invalid res_type:%d res id%d",
 			res->hw_intf->hw_idx, res->res_type,
 			res->res_id);
 		return -EINVAL;
 	}
 
-	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
-		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+	for (i = 0; i < top_priv->top_common.num_mux; i++) {
+		if (top_priv->top_common.mux_rsrc[i].res_id == res->res_id) {
 			top_priv->req_clk_rate[i] = clk_update->clk_rate;
 			break;
 		}
 	}
 
 	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
-		CAM_DBG(CAM_ISP,
+		CAM_DBG(CAM_PERF,
 			"VFE:%d Not ready to set clocks yet :%d",
 			res->hw_intf->hw_idx,
 			hw_info->hw_state);
@@ -365,148 +167,6 @@ static int cam_vfe_top_ver3_clock_update(
 	return rc;
 }
 
-static int cam_vfe_top_ver3_bw_update_v2(
-	struct cam_vfe_top_ver3_priv *top_priv,
-	void *cmd_args, uint32_t arg_size)
-{
-	struct cam_vfe_bw_update_args_v2        *bw_update = NULL;
-	struct cam_isp_resource_node         *res = NULL;
-	struct cam_hw_info                   *hw_info = NULL;
-	int                                   rc = 0;
-	int                                   i;
-
-	bw_update = (struct cam_vfe_bw_update_args_v2 *)cmd_args;
-	res = bw_update->node_res;
-
-	if (!res || !res->hw_intf || !res->hw_intf->hw_priv)
-		return -EINVAL;
-
-	hw_info = res->hw_intf->hw_priv;
-
-	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
-		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
-		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
-			res->hw_intf->hw_idx, res->res_type,
-			res->res_id);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
-		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
-			memcpy(&top_priv->req_axi_vote[i], &bw_update->isp_vote,
-				sizeof(struct cam_axi_vote));
-			top_priv->axi_vote_control[i] =
-				CAM_VFE_BW_CONTROL_INCLUDE;
-			break;
-		}
-	}
-
-	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"VFE:%d Not ready to set BW yet :%d",
-			res->hw_intf->hw_idx,
-			hw_info->hw_state);
-	} else {
-		rc = cam_vfe_top_ver3_set_axi_bw_vote(top_priv, false);
-	}
-
-	return rc;
-}
-
-static int cam_vfe_top_ver3_bw_update(
-	struct cam_vfe_top_ver3_priv *top_priv,
-	void *cmd_args, uint32_t arg_size)
-{
-	struct cam_vfe_bw_update_args        *bw_update = NULL;
-	struct cam_isp_resource_node         *res = NULL;
-	struct cam_hw_info                   *hw_info = NULL;
-	int                                   rc = 0;
-	int                                   i;
-	struct cam_axi_vote                  *mux_axi_vote;
-	bool                                  vid_exists = false;
-	bool                                  rdi_exists = false;
-
-	bw_update = (struct cam_vfe_bw_update_args *)cmd_args;
-	res = bw_update->node_res;
-
-	if (!res || !res->hw_intf || !res->hw_intf->hw_priv)
-		return -EINVAL;
-
-	hw_info = res->hw_intf->hw_priv;
-
-	CAM_DBG(CAM_ISP, "res_id=%d, BW=[%lld %lld]",
-		res->res_id, bw_update->camnoc_bw_bytes,
-		bw_update->external_bw_bytes);
-
-	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
-		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
-		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
-			res->hw_intf->hw_idx, res->res_type,
-			res->res_id);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
-		mux_axi_vote = &top_priv->req_axi_vote[i];
-		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
-			mux_axi_vote->num_paths = 1;
-			if ((res->res_id >= CAM_ISP_HW_VFE_IN_RDI0) &&
-				(res->res_id <= CAM_ISP_HW_VFE_IN_RDI3)) {
-				mux_axi_vote->axi_path[0].path_data_type =
-					CAM_AXI_PATH_DATA_IFE_RDI0 +
-					(res->res_id - CAM_ISP_HW_VFE_IN_RDI0);
-			} else {
-				/*
-				 * Vote all bw into VIDEO path as we cannot
-				 * differentiate to which path this has to go
-				 */
-				mux_axi_vote->axi_path[0].path_data_type =
-					CAM_AXI_PATH_DATA_IFE_VID;
-			}
-
-			mux_axi_vote->axi_path[0].transac_type =
-				CAM_AXI_TRANSACTION_WRITE;
-			mux_axi_vote->axi_path[0].camnoc_bw =
-				bw_update->camnoc_bw_bytes;
-			mux_axi_vote->axi_path[0].mnoc_ab_bw =
-				bw_update->external_bw_bytes;
-			mux_axi_vote->axi_path[0].mnoc_ib_bw =
-				bw_update->external_bw_bytes;
-			/* Make ddr bw same as mnoc bw */
-			mux_axi_vote->axi_path[0].ddr_ab_bw =
-				bw_update->external_bw_bytes;
-			mux_axi_vote->axi_path[0].ddr_ib_bw =
-				bw_update->external_bw_bytes;
-
-			top_priv->axi_vote_control[i] =
-				CAM_VFE_BW_CONTROL_INCLUDE;
-			break;
-		}
-
-		if (mux_axi_vote->num_paths == 1) {
-			if (mux_axi_vote->axi_path[0].path_data_type ==
-				CAM_AXI_PATH_DATA_IFE_VID)
-				vid_exists = true;
-			else if ((mux_axi_vote->axi_path[0].path_data_type >=
-				CAM_AXI_PATH_DATA_IFE_RDI0) &&
-				(mux_axi_vote->axi_path[0].path_data_type <=
-				CAM_AXI_PATH_DATA_IFE_RDI3))
-				rdi_exists = true;
-		}
-	}
-
-	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"VFE:%d Not ready to set BW yet :%d",
-			res->hw_intf->hw_idx,
-			hw_info->hw_state);
-	} else {
-		rc = cam_vfe_top_ver3_set_axi_bw_vote(top_priv, false);
-	}
-
-	return rc;
-}
-
 static int cam_vfe_core_config_control(
 	struct cam_vfe_top_ver3_priv *top_priv,
 	 void *cmd_args, uint32_t arg_size)
@@ -520,51 +180,6 @@ static int cam_vfe_core_config_control(
 	return -EINVAL;
 }
 
-static int cam_vfe_top_ver3_bw_control(
-	struct cam_vfe_top_ver3_priv *top_priv,
-	 void *cmd_args, uint32_t arg_size)
-{
-	struct cam_vfe_bw_control_args       *bw_ctrl = NULL;
-	struct cam_isp_resource_node         *res = NULL;
-	struct cam_hw_info                   *hw_info = NULL;
-	int                                   rc = 0;
-	int                                   i;
-
-	bw_ctrl = (struct cam_vfe_bw_control_args *)cmd_args;
-	res = bw_ctrl->node_res;
-
-	if (!res || !res->hw_intf->hw_priv)
-		return -EINVAL;
-
-	hw_info = res->hw_intf->hw_priv;
-
-	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
-		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
-		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
-			res->hw_intf->hw_idx, res->res_type,
-			res->res_id);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
-		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
-			top_priv->axi_vote_control[i] = bw_ctrl->action;
-			break;
-		}
-	}
-
-	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"VFE:%d Not ready to set BW yet :%d",
-			res->hw_intf->hw_idx,
-			hw_info->hw_state);
-	} else {
-		rc = cam_vfe_top_ver3_set_axi_bw_vote(top_priv, true);
-	}
-
-	return rc;
-}
-
 static int cam_vfe_top_ver3_mux_get_reg_update(
 	struct cam_vfe_top_ver3_priv *top_priv,
 	void *cmd_args, uint32_t arg_size)
@@ -680,14 +295,15 @@ int cam_vfe_top_ver3_reserve(void *device_priv,
 	CAM_DBG(CAM_ISP, "res id %d", acquire_args->res_id);
 
 
-	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
-		if (top_priv->mux_rsrc[i].res_id ==  acquire_args->res_id &&
-			top_priv->mux_rsrc[i].res_state ==
+	for (i = 0; i < top_priv->top_common.num_mux; i++) {
+		if (top_priv->top_common.mux_rsrc[i].res_id ==
+			acquire_args->res_id &&
+			top_priv->top_common.mux_rsrc[i].res_state ==
 			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
 
 			if (acquire_args->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
 				rc = cam_vfe_camif_ver3_acquire_resource(
-					&top_priv->mux_rsrc[i],
+					&top_priv->top_common.mux_rsrc[i],
 					args);
 				if (rc)
 					break;
@@ -696,7 +312,7 @@ int cam_vfe_top_ver3_reserve(void *device_priv,
 			if (acquire_args->res_id >= CAM_ISP_HW_VFE_IN_RDI0 &&
 				acquire_args->res_id < CAM_ISP_HW_VFE_IN_MAX) {
 				rc = cam_vfe_camif_lite_ver3_acquire_resource(
-					&top_priv->mux_rsrc[i],
+					&top_priv->top_common.mux_rsrc[i],
 					args);
 				if (rc)
 					break;
@@ -704,18 +320,20 @@ int cam_vfe_top_ver3_reserve(void *device_priv,
 
 			if (acquire_args->res_id == CAM_ISP_HW_VFE_IN_RD) {
 				rc = cam_vfe_fe_ver1_acquire_resource(
-					&top_priv->mux_rsrc[i],
+					&top_priv->top_common.mux_rsrc[i],
 					args);
 				if (rc)
 					break;
 			}
 
-			top_priv->mux_rsrc[i].cdm_ops = acquire_args->cdm_ops;
-			top_priv->mux_rsrc[i].tasklet_info = args->tasklet;
-			top_priv->mux_rsrc[i].res_state =
+			top_priv->top_common.mux_rsrc[i].cdm_ops =
+				acquire_args->cdm_ops;
+			top_priv->top_common.mux_rsrc[i].tasklet_info =
+				args->tasklet;
+			top_priv->top_common.mux_rsrc[i].res_state =
 				CAM_ISP_RESOURCE_STATE_RESERVED;
 			acquire_args->rsrc_node =
-				&top_priv->mux_rsrc[i];
+				&top_priv->top_common.mux_rsrc[i];
 
 			rc = 0;
 			break;
@@ -757,6 +375,8 @@ int cam_vfe_top_ver3_start(void *device_priv,
 	struct cam_vfe_top_ver3_priv            *top_priv;
 	struct cam_isp_resource_node            *mux_res;
 	struct cam_hw_info                      *hw_info = NULL;
+	struct cam_hw_soc_info                  *soc_info = NULL;
+	struct cam_vfe_soc_private              *soc_private = NULL;
 	int rc = 0;
 
 	if (!device_priv || !start_args) {
@@ -765,6 +385,13 @@ int cam_vfe_top_ver3_start(void *device_priv,
 	}
 
 	top_priv = (struct cam_vfe_top_ver3_priv *)device_priv;
+	soc_info = top_priv->common_data.soc_info;
+	soc_private = soc_info->soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error soc_private NULL");
+		return -EINVAL;
+	}
+
 	mux_res = (struct cam_isp_resource_node *)start_args;
 	hw_info = (struct cam_hw_info  *)mux_res->hw_intf->hw_priv;
 
@@ -776,7 +403,8 @@ int cam_vfe_top_ver3_start(void *device_priv,
 			return rc;
 		}
 
-		rc = cam_vfe_top_ver3_set_axi_bw_vote(top_priv, true);
+		rc = cam_vfe_top_set_axi_bw_vote(soc_private,
+			&top_priv->top_common, true);
 		if (rc) {
 			CAM_ERR(CAM_ISP,
 				"set_axi_bw_vote failed, rc=%d", rc);
@@ -823,12 +451,13 @@ int cam_vfe_top_ver3_stop(void *device_priv,
 	}
 
 	if (!rc) {
-		for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
-			if (top_priv->mux_rsrc[i].res_id == mux_res->res_id) {
+		for (i = 0; i < top_priv->top_common.num_mux; i++) {
+			if (top_priv->top_common.mux_rsrc[i].res_id ==
+				mux_res->res_id) {
 				top_priv->req_clk_rate[i] = 0;
-				memset(&top_priv->req_axi_vote[i], 0,
-					sizeof(struct cam_axi_vote));
-				top_priv->axi_vote_control[i] =
+				memset(&top_priv->top_common.req_axi_vote[i],
+					0, sizeof(struct cam_axi_vote));
+				top_priv->top_common.axi_vote_control[i] =
 					CAM_VFE_BW_CONTROL_EXCLUDE;
 				break;
 			}
@@ -855,12 +484,21 @@ int cam_vfe_top_ver3_process_cmd(void *device_priv, uint32_t cmd_type,
 {
 	int rc = 0;
 	struct cam_vfe_top_ver3_priv            *top_priv;
+	struct cam_hw_soc_info                  *soc_info = NULL;
+	struct cam_vfe_soc_private              *soc_private = NULL;
 
 	if (!device_priv || !cmd_args) {
 		CAM_ERR(CAM_ISP, "Error, Invalid arguments");
 		return -EINVAL;
 	}
+
 	top_priv = (struct cam_vfe_top_ver3_priv *)device_priv;
+	soc_info = top_priv->common_data.soc_info;
+	soc_private = soc_info->soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error soc_private NULL");
+		return -EINVAL;
+	}
 
 	switch (cmd_type) {
 	case CAM_ISP_HW_CMD_GET_CHANGE_BASE:
@@ -880,15 +518,16 @@ int cam_vfe_top_ver3_process_cmd(void *device_priv, uint32_t cmd_type,
 			arg_size);
 		break;
 	case CAM_ISP_HW_CMD_BW_UPDATE:
-		rc = cam_vfe_top_ver3_bw_update(top_priv, cmd_args,
-			arg_size);
+		rc = cam_vfe_top_bw_update(soc_private, &top_priv->top_common,
+			cmd_args, arg_size);
 		break;
 	case CAM_ISP_HW_CMD_BW_UPDATE_V2:
-		rc = cam_vfe_top_ver3_bw_update_v2(top_priv, cmd_args,
-			arg_size);
+		rc = cam_vfe_top_bw_update_v2(soc_private,
+			&top_priv->top_common, cmd_args, arg_size);
 		break;
 	case CAM_ISP_HW_CMD_BW_CONTROL:
-		rc = cam_vfe_top_ver3_bw_control(top_priv, cmd_args, arg_size);
+		rc = cam_vfe_top_bw_control(soc_private, &top_priv->top_common,
+			cmd_args, arg_size);
 		break;
 	case CAM_ISP_HW_CMD_CORE_CONFIG:
 		rc = cam_vfe_core_config_control(top_priv, cmd_args, arg_size);
@@ -928,68 +567,82 @@ int cam_vfe_top_ver3_init(
 		rc = -ENOMEM;
 		goto free_vfe_top;
 	}
+
 	vfe_top->top_priv = top_priv;
 	top_priv->hw_clk_rate = 0;
+	if (ver3_hw_info->num_mux > CAM_VFE_TOP_MUX_MAX) {
+		CAM_ERR(CAM_ISP, "Invalid number of input rsrc: %d, max: %d",
+			ver3_hw_info->num_mux, CAM_VFE_TOP_MUX_MAX);
+		rc = -EINVAL;
+		goto free_top_priv;
+	}
 
-	for (i = 0, j = 0; i < CAM_VFE_TOP_VER3_MUX_MAX &&
+	top_priv->top_common.num_mux = ver3_hw_info->num_mux;
+
+	for (i = 0, j = 0; i < top_priv->top_common.num_mux &&
 		j < CAM_VFE_RDI_VER2_MAX; i++) {
-		top_priv->mux_rsrc[i].res_type = CAM_ISP_RESOURCE_VFE_IN;
-		top_priv->mux_rsrc[i].hw_intf = hw_intf;
-		top_priv->mux_rsrc[i].res_state =
+		top_priv->top_common.mux_rsrc[i].res_type =
+			CAM_ISP_RESOURCE_VFE_IN;
+		top_priv->top_common.mux_rsrc[i].hw_intf = hw_intf;
+		top_priv->top_common.mux_rsrc[i].res_state =
 			CAM_ISP_RESOURCE_STATE_AVAILABLE;
 		top_priv->req_clk_rate[i] = 0;
 
 		if (ver3_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_3_0) {
-			top_priv->mux_rsrc[i].res_id =
+			top_priv->top_common.mux_rsrc[i].res_id =
 				CAM_ISP_HW_VFE_IN_CAMIF;
 
 			rc = cam_vfe_camif_ver3_init(hw_intf, soc_info,
 				&ver3_hw_info->camif_hw_info,
-				&top_priv->mux_rsrc[i], vfe_irq_controller);
+				&top_priv->top_common.mux_rsrc[i],
+				vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver3_hw_info->mux_type[i] ==
 			CAM_VFE_PDLIB_VER_1_0) {
 			/* set the PDLIB resource id */
-			top_priv->mux_rsrc[i].res_id =
+			top_priv->top_common.mux_rsrc[i].res_id =
 				CAM_ISP_HW_VFE_IN_PDLIB;
 
 			rc = cam_vfe_camif_lite_ver3_init(hw_intf, soc_info,
 				&ver3_hw_info->pdlib_hw_info,
-				&top_priv->mux_rsrc[i], vfe_irq_controller);
+				&top_priv->top_common.mux_rsrc[i],
+				vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver3_hw_info->mux_type[i] ==
 			CAM_VFE_IN_RD_VER_1_0) {
 			/* set the RD resource id */
-			top_priv->mux_rsrc[i].res_id =
+			top_priv->top_common.mux_rsrc[i].res_id =
 				CAM_ISP_HW_VFE_IN_RD;
 
 			rc = cam_vfe_fe_ver1_init(hw_intf, soc_info,
 				&ver3_hw_info->fe_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->top_common.mux_rsrc[i]);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver3_hw_info->mux_type[i] ==
 			CAM_VFE_RDI_VER_1_0) {
 			/* set the RDI resource id */
-			top_priv->mux_rsrc[i].res_id =
+			top_priv->top_common.mux_rsrc[i].res_id =
 				CAM_ISP_HW_VFE_IN_RDI0 + j;
 
 			rc = cam_vfe_camif_lite_ver3_init(hw_intf, soc_info,
 				ver3_hw_info->rdi_hw_info[j++],
-				&top_priv->mux_rsrc[i], vfe_irq_controller);
+				&top_priv->top_common.mux_rsrc[i],
+				vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver3_hw_info->mux_type[i] ==
 			CAM_VFE_LCR_VER_1_0) {
 			/* set the LCR resource id */
-			top_priv->mux_rsrc[i].res_id =
+			top_priv->top_common.mux_rsrc[i].res_id =
 				CAM_ISP_HW_VFE_IN_LCR;
 
 			rc = cam_vfe_camif_lite_ver3_init(hw_intf, soc_info,
 				&ver3_hw_info->lcr_hw_info,
-				&top_priv->mux_rsrc[i], vfe_irq_controller);
+				&top_priv->top_common.mux_rsrc[i],
+				vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else {
@@ -1012,6 +665,7 @@ int cam_vfe_top_ver3_init(
 
 	top_priv->common_data.soc_info     = soc_info;
 	top_priv->common_data.hw_intf      = hw_intf;
+	top_priv->top_common.hw_idx        = hw_intf->hw_idx;
 	top_priv->common_data.common_reg   = ver3_hw_info->common_reg;
 
 	return rc;
@@ -1019,22 +673,26 @@ int cam_vfe_top_ver3_init(
 deinit_resources:
 	for (--i; i >= 0; i--) {
 		if (ver3_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_3_0) {
-			if (cam_vfe_camif_ver3_deinit(&top_priv->mux_rsrc[i]))
+			if (cam_vfe_camif_ver3_deinit(
+				&top_priv->top_common.mux_rsrc[i]))
 				CAM_ERR(CAM_ISP, "Camif Deinit failed");
 		} else if (ver3_hw_info->mux_type[i] == CAM_VFE_IN_RD_VER_1_0) {
-			if (cam_vfe_fe_ver1_deinit(&top_priv->mux_rsrc[i]))
+			if (cam_vfe_fe_ver1_deinit(
+				&top_priv->top_common.mux_rsrc[i]))
 				CAM_ERR(CAM_ISP, "Camif fe Deinit failed");
 		} else {
 			if (cam_vfe_camif_lite_ver3_deinit(
-				&top_priv->mux_rsrc[i]))
+				&top_priv->top_common.mux_rsrc[i]))
 				CAM_ERR(CAM_ISP,
 					"Camif lite res id %d Deinit failed",
-					top_priv->mux_rsrc[i].res_id);
+					top_priv->top_common.mux_rsrc[i]
+					.res_id);
 		}
-		top_priv->mux_rsrc[i].res_state =
+		top_priv->top_common.mux_rsrc[i].res_state =
 			CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
 	}
 
+free_top_priv:
 	kfree(vfe_top->top_priv);
 free_vfe_top:
 	kfree(vfe_top);
@@ -1066,28 +724,31 @@ int cam_vfe_top_ver3_deinit(struct cam_vfe_top  **vfe_top_ptr)
 		goto free_vfe_top;
 	}
 
-	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
-		top_priv->mux_rsrc[i].res_state =
+	for (i = 0; i < top_priv->top_common.num_mux; i++) {
+		top_priv->top_common.mux_rsrc[i].res_state =
 			CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
-		if (top_priv->mux_rsrc[i].res_type ==
+		if (top_priv->top_common.mux_rsrc[i].res_type ==
 			CAM_VFE_CAMIF_VER_3_0) {
-			rc = cam_vfe_camif_ver3_deinit(&top_priv->mux_rsrc[i]);
+			rc = cam_vfe_camif_ver3_deinit(
+				&top_priv->top_common.mux_rsrc[i]);
 			if (rc)
 				CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d",
 					rc);
-		} else if (top_priv->mux_rsrc[i].res_type ==
+		} else if (top_priv->top_common.mux_rsrc[i].res_type ==
 			CAM_VFE_IN_RD_VER_1_0) {
-			rc = cam_vfe_fe_ver1_deinit(&top_priv->mux_rsrc[i]);
+			rc = cam_vfe_fe_ver1_deinit(
+				&top_priv->top_common.mux_rsrc[i]);
 			if (rc)
 				CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d",
 					rc);
 		} else {
 			rc = cam_vfe_camif_lite_ver3_deinit(
-				&top_priv->mux_rsrc[i]);
+				&top_priv->top_common.mux_rsrc[i]);
 			if (rc)
 				CAM_ERR(CAM_ISP,
 					"Camif lite res id %d Deinit failed",
-					top_priv->mux_rsrc[i].res_id);
+					top_priv->top_common.mux_rsrc[i]
+					.res_id);
 		}
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
index dd0bb94..14c9609 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
@@ -9,8 +9,7 @@
 #include "cam_vfe_camif_ver3.h"
 #include "cam_vfe_camif_lite_ver3.h"
 #include "cam_vfe_fe_ver1.h"
-
-#define CAM_VFE_TOP_VER3_MUX_MAX     6
+#include "cam_vfe_top_common.h"
 
 #define CAM_SHIFT_TOP_CORE_CFG_MUXSEL_PDAF       31
 #define CAM_SHIFT_TOP_CORE_CFG_VID_DS16_R2PD     30
@@ -86,7 +85,8 @@ struct cam_vfe_top_ver3_hw_info {
 		*rdi_hw_info[CAM_VFE_RDI_VER2_MAX];
 	struct cam_vfe_camif_lite_ver3_hw_info      lcr_hw_info;
 	struct cam_vfe_fe_ver1_hw_info              fe_hw_info;
-	uint32_t mux_type[CAM_VFE_TOP_VER3_MUX_MAX];
+	uint32_t                                    num_mux;
+	uint32_t mux_type[CAM_VFE_TOP_MUX_MAX];
 };
 
 int cam_vfe_top_ver3_init(struct cam_hw_soc_info     *soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
index e28aae2..b16a9df 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
@@ -14,7 +14,7 @@
 #include "cam_debug_util.h"
 #include "cam_packet_util.h"
 
-static const char jpeg_dev_name[] = "jpeg";
+static const char jpeg_dev_name[] = "cam-jpeg";
 
 static int cam_jpeg_context_dump_active_request(void *data, unsigned long iova,
 	uint32_t buf_info)
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
index 3270d77..ae8f93c 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -9,7 +9,7 @@
 #include "cam_debug_util.h"
 #include "cam_lrme_context.h"
 
-static const char lrme_dev_name[] = "lrme";
+static const char lrme_dev_name[] = "cam-lrme";
 
 static int __cam_lrme_ctx_acquire_dev_in_available(struct cam_context *ctx,
 	struct cam_acquire_dev_cmd *cmd)
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
index 20828f6..4e26096 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/platform_device.h>
@@ -237,8 +237,7 @@ static int cam_lrme_hw_dev_remove(struct platform_device *pdev)
 	lrme_hw = platform_get_drvdata(pdev);
 	if (!lrme_hw) {
 		CAM_ERR(CAM_LRME, "Invalid lrme_hw from fd_hw_intf");
-		rc = -ENODEV;
-		goto deinit_platform_res;
+		return -ENODEV;
 	}
 
 	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 9f11077..a9edecc 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -979,6 +979,7 @@ static int __cam_req_mgr_check_sync_req_is_ready(
 		CAM_DBG(CAM_CRM, "Req: %lld not found on link: %x [other link]",
 			req_id, sync_link->link_hdl);
 		sync_ready = false;
+		return -EAGAIN;
 	}
 
 	sync_rd_idx = sync_link->req.in_q->rd_idx;
@@ -2437,18 +2438,24 @@ static struct cam_req_mgr_crm_cb cam_req_mgr_ops = {
  *
  */
 static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
-	struct cam_req_mgr_link_info *link_info)
+	struct cam_req_mgr_ver_info *link_info)
 {
-	int                                     rc = 0, i = 0;
+	int                                     rc = 0, i = 0, num_devices = 0;
 	struct cam_req_mgr_core_dev_link_setup  link_data;
 	struct cam_req_mgr_connected_device    *dev;
 	struct cam_req_mgr_req_tbl             *pd_tbl;
 	enum cam_pipeline_delay                 max_delay;
 	uint32_t                                subscribe_event = 0;
-
-	if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES)
-		return -EPERM;
-
+	if (link_info->version == VERSION_1) {
+		if (link_info->u.link_info_v1.num_devices >
+			CAM_REQ_MGR_MAX_HANDLES)
+			return -EPERM;
+		}
+	else if (link_info->version == VERSION_2) {
+		if (link_info->u.link_info_v2.num_devices >
+			CAM_REQ_MGR_MAX_HANDLES_V2)
+			return -EPERM;
+		}
 	mutex_init(&link->req.lock);
 	CAM_DBG(CAM_CRM, "LOCK_DBG in_q lock %pK", &link->req.lock);
 	link->req.num_tbl = 0;
@@ -2458,11 +2465,21 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
 		return rc;
 
 	max_delay = CAM_PIPELINE_DELAY_0;
-	for (i = 0; i < link_info->num_devices; i++) {
+	if (link_info->version == VERSION_1)
+		num_devices = link_info->u.link_info_v1.num_devices;
+	else if (link_info->version == VERSION_2)
+		num_devices = link_info->u.link_info_v2.num_devices;
+	for (i = 0; i < num_devices; i++) {
 		dev = &link->l_dev[i];
 		/* Using dev hdl, get ops ptr to communicate with device */
-		dev->ops = (struct cam_req_mgr_kmd_ops *)
-			cam_get_device_ops(link_info->dev_hdls[i]);
+		if (link_info->version == VERSION_1)
+			dev->ops = (struct cam_req_mgr_kmd_ops *)
+					cam_get_device_ops(
+					link_info->u.link_info_v1.dev_hdls[i]);
+		else if (link_info->version == VERSION_2)
+			dev->ops = (struct cam_req_mgr_kmd_ops *)
+					cam_get_device_ops(
+					link_info->u.link_info_v2.dev_hdls[i]);
 		if (!dev->ops ||
 			!dev->ops->get_dev_info ||
 			!dev->ops->link_setup) {
@@ -2470,18 +2487,29 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
 			rc = -ENXIO;
 			goto error;
 		}
-		dev->dev_hdl = link_info->dev_hdls[i];
+		if (link_info->version == VERSION_1)
+			dev->dev_hdl = link_info->u.link_info_v1.dev_hdls[i];
+		else if (link_info->version == VERSION_2)
+			dev->dev_hdl = link_info->u.link_info_v2.dev_hdls[i];
 		dev->parent = (void *)link;
 		dev->dev_info.dev_hdl = dev->dev_hdl;
 		rc = dev->ops->get_dev_info(&dev->dev_info);
 
 		trace_cam_req_mgr_connect_device(link, &dev->dev_info);
-
-		CAM_DBG(CAM_CRM,
-			"%x: connected: %s, id %d, delay %d, trigger %x",
-			link_info->session_hdl, dev->dev_info.name,
-			dev->dev_info.dev_id, dev->dev_info.p_delay,
-			dev->dev_info.trigger);
+		if (link_info->version == VERSION_1)
+			CAM_DBG(CAM_CRM,
+				"%x: connected: %s, id %d, delay %d, trigger %x",
+				link_info->u.link_info_v1.session_hdl,
+				dev->dev_info.name,
+				dev->dev_info.dev_id, dev->dev_info.p_delay,
+				dev->dev_info.trigger);
+		else if (link_info->version == VERSION_2)
+			CAM_DBG(CAM_CRM,
+				"%x: connected: %s, id %d, delay %d, trigger %x",
+				link_info->u.link_info_v2.session_hdl,
+				dev->dev_info.name,
+				dev->dev_info.dev_id, dev->dev_info.p_delay,
+				dev->dev_info.trigger);
 		if (rc < 0 ||
 			dev->dev_info.p_delay >=
 			CAM_PIPELINE_DELAY_MAX ||
@@ -2490,10 +2518,18 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
 			CAM_ERR(CAM_CRM, "get device info failed");
 			goto error;
 		} else {
-			CAM_DBG(CAM_CRM, "%x: connected: %s, delay %d",
-				link_info->session_hdl,
-				dev->dev_info.name,
-				dev->dev_info.p_delay);
+			if (link_info->version == VERSION_1) {
+				CAM_DBG(CAM_CRM, "%x: connected: %s, delay %d",
+					link_info->u.link_info_v1.session_hdl,
+					dev->dev_info.name,
+					dev->dev_info.p_delay);
+				}
+			else if (link_info->version == VERSION_2) {
+				CAM_DBG(CAM_CRM, "%x: connected: %s, delay %d",
+					link_info->u.link_info_v2.session_hdl,
+					dev->dev_info.name,
+					dev->dev_info.p_delay);
+				}
 			if (dev->dev_info.p_delay > max_delay)
 				max_delay = dev->dev_info.p_delay;
 
@@ -2508,7 +2544,7 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
 	link_data.max_delay = max_delay;
 	link_data.subscribe_event = subscribe_event;
 
-	for (i = 0; i < link_info->num_devices; i++) {
+	for (i = 0; i < num_devices; i++) {
 		dev = &link->l_dev[i];
 
 		link_data.dev_hdl = dev->dev_hdl;
@@ -2551,7 +2587,7 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
 		if (link->max_delay < dev->dev_info.p_delay)
 			link->max_delay = dev->dev_info.p_delay;
 	}
-	link->num_devs = link_info->num_devices;
+	link->num_devs = num_devices;
 
 	/* Assign id for pd tables */
 	__cam_req_mgr_tbl_set_id(link->req.l_tbl, &link->req);
@@ -2709,7 +2745,7 @@ int cam_req_mgr_destroy_session(
 	return rc;
 }
 
-int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
+int cam_req_mgr_link(struct cam_req_mgr_ver_info *link_info)
 {
 	int                                     rc = 0;
 	int                                     wq_flag = 0;
@@ -2722,9 +2758,9 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
 		CAM_DBG(CAM_CRM, "NULL pointer");
 		return -EINVAL;
 	}
-	if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES) {
+	if (link_info->u.link_info_v1.num_devices > CAM_REQ_MGR_MAX_HANDLES) {
 		CAM_ERR(CAM_CRM, "Invalid num devices %d",
-			link_info->num_devices);
+			link_info->u.link_info_v1.num_devices);
 		return -EINVAL;
 	}
 
@@ -2732,7 +2768,7 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
 
 	/* session hdl's priv data is cam session struct */
 	cam_session = (struct cam_req_mgr_core_session *)
-		cam_get_device_priv(link_info->session_hdl);
+		cam_get_device_priv(link_info->u.link_info_v1.session_hdl);
 	if (!cam_session) {
 		CAM_DBG(CAM_CRM, "NULL pointer");
 		mutex_unlock(&g_crm_core_dev->crm_lock);
@@ -2749,7 +2785,7 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
 	CAM_DBG(CAM_CRM, "link reserved %pK %x", link, link->link_hdl);
 
 	memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
-	root_dev.session_hdl = link_info->session_hdl;
+	root_dev.session_hdl = link_info->u.link_info_v1.session_hdl;
 	root_dev.priv = (void *)link;
 
 	mutex_lock(&link->lock);
@@ -2761,12 +2797,12 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
 		rc = link->link_hdl;
 		goto link_hdl_fail;
 	}
-	link_info->link_hdl = link->link_hdl;
+	link_info->u.link_info_v1.link_hdl = link->link_hdl;
 	link->last_flush_id = 0;
 
 	/* Allocate memory to hold data of all linked devs */
 	rc = __cam_req_mgr_create_subdevs(&link->l_dev,
-		link_info->num_devices);
+		link_info->u.link_info_v1.num_devices);
 	if (rc < 0) {
 		CAM_ERR(CAM_CRM,
 			"Insufficient memory to create new crm subdevs");
@@ -2784,7 +2820,7 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
 
 	/* Create worker for current link */
 	snprintf(buf, sizeof(buf), "%x-%x",
-		link_info->session_hdl, link->link_hdl);
+		link_info->u.link_info_v1.session_hdl, link->link_hdl);
 	wq_flag = CAM_WORKQ_FLAG_HIGH_PRIORITY | CAM_WORKQ_FLAG_SERIAL;
 	rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS,
 		&link->workq, CRM_WORKQ_USAGE_NON_IRQ, wq_flag);
@@ -2809,7 +2845,7 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
 	__cam_req_mgr_destroy_subdev(link->l_dev);
 create_subdev_failed:
 	cam_destroy_device_hdl(link->link_hdl);
-	link_info->link_hdl = -1;
+	link_info->u.link_info_v1.link_hdl = -1;
 link_hdl_fail:
 	mutex_unlock(&link->lock);
 	__cam_req_mgr_unreserve_link(cam_session, link);
@@ -2817,6 +2853,116 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
 	return rc;
 }
 
+int cam_req_mgr_link_v2(struct cam_req_mgr_ver_info *link_info)
+{
+	int                                     rc = 0;
+	int                                     wq_flag = 0;
+	char                                    buf[128];
+	struct cam_create_dev_hdl               root_dev;
+	struct cam_req_mgr_core_session        *cam_session;
+	struct cam_req_mgr_core_link           *link;
+
+	if (!link_info) {
+		CAM_DBG(CAM_CRM, "NULL pointer");
+		return -EINVAL;
+	}
+	if (link_info->u.link_info_v2.num_devices >
+		CAM_REQ_MGR_MAX_HANDLES_V2) {
+		CAM_ERR(CAM_CRM, "Invalid num devices %d",
+			link_info->u.link_info_v2.num_devices);
+		return -EINVAL;
+	}
+
+	mutex_lock(&g_crm_core_dev->crm_lock);
+
+	/* session hdl's priv data is cam session struct */
+	cam_session = (struct cam_req_mgr_core_session *)
+		cam_get_device_priv(link_info->u.link_info_v2.session_hdl);
+	if (!cam_session) {
+		CAM_DBG(CAM_CRM, "NULL pointer");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
+		return -EINVAL;
+	}
+
+	/* Allocate link struct and map it with session's request queue */
+	link = __cam_req_mgr_reserve_link(cam_session);
+	if (!link) {
+		CAM_ERR(CAM_CRM, "failed to reserve new link");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
+		return -EINVAL;
+	}
+	CAM_DBG(CAM_CRM, "link reserved %pK %x", link, link->link_hdl);
+
+	memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
+	root_dev.session_hdl = link_info->u.link_info_v2.session_hdl;
+	root_dev.priv = (void *)link;
+
+	mutex_lock(&link->lock);
+	/* Create unique dev handle for link */
+	link->link_hdl = cam_create_device_hdl(&root_dev);
+	if (link->link_hdl < 0) {
+		CAM_ERR(CAM_CRM,
+			"Insufficient memory to create new device handle");
+		rc = link->link_hdl;
+		goto link_hdl_fail;
+	}
+	link_info->u.link_info_v2.link_hdl = link->link_hdl;
+	link->last_flush_id = 0;
+
+	/* Allocate memory to hold data of all linked devs */
+	rc = __cam_req_mgr_create_subdevs(&link->l_dev,
+		link_info->u.link_info_v2.num_devices);
+	if (rc < 0) {
+		CAM_ERR(CAM_CRM,
+			"Insufficient memory to create new crm subdevs");
+		goto create_subdev_failed;
+	}
+
+	/* Using device ops query connected devs, prepare request tables */
+	rc = __cam_req_mgr_setup_link_info(link, link_info);
+	if (rc < 0)
+		goto setup_failed;
+
+	spin_lock_bh(&link->link_state_spin_lock);
+	link->state = CAM_CRM_LINK_STATE_READY;
+	spin_unlock_bh(&link->link_state_spin_lock);
+
+	/* Create worker for current link */
+	snprintf(buf, sizeof(buf), "%x-%x",
+		link_info->u.link_info_v2.session_hdl, link->link_hdl);
+	wq_flag = CAM_WORKQ_FLAG_HIGH_PRIORITY | CAM_WORKQ_FLAG_SERIAL;
+	rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS,
+		&link->workq, CRM_WORKQ_USAGE_NON_IRQ, wq_flag);
+	if (rc < 0) {
+		CAM_ERR(CAM_CRM, "FATAL: unable to create worker");
+		__cam_req_mgr_destroy_link_info(link);
+		goto setup_failed;
+	}
+
+	/* Assign payload to workqueue tasks */
+	rc = __cam_req_mgr_setup_payload(link->workq);
+	if (rc < 0) {
+		__cam_req_mgr_destroy_link_info(link);
+		cam_req_mgr_workq_destroy(&link->workq);
+		goto setup_failed;
+	}
+
+	mutex_unlock(&link->lock);
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
+setup_failed:
+	__cam_req_mgr_destroy_subdev(link->l_dev);
+create_subdev_failed:
+	cam_destroy_device_hdl(link->link_hdl);
+	link_info->u.link_info_v2.link_hdl = -1;
+link_hdl_fail:
+	mutex_unlock(&link->lock);
+	__cam_req_mgr_unreserve_link(cam_session, link);
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
+}
+
+
 int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
 {
 	int                              rc = 0;
@@ -3057,14 +3203,12 @@ int cam_req_mgr_flush_requests(
 
 	if (!flush_info) {
 		CAM_ERR(CAM_CRM, "flush req is NULL");
-		rc = -EFAULT;
-		goto end;
+		return -EFAULT;
 	}
 	if (flush_info->flush_type >= CAM_REQ_MGR_FLUSH_TYPE_MAX) {
 		CAM_ERR(CAM_CRM, "incorrect flush type %x",
 			flush_info->flush_type);
-		rc = -EINVAL;
-		goto end;
+		return -EINVAL;
 	}
 
 	mutex_lock(&g_crm_core_dev->crm_lock);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 9a6acbc..8eb15be 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -31,6 +31,9 @@
 
 #define MAXIMUM_RETRY_ATTEMPTS 3
 
+#define VERSION_1  1
+#define VERSION_2  2
+
 /**
  * enum crm_workq_task_type
  * @codes: to identify which type of task is present
@@ -408,7 +411,9 @@ int cam_req_mgr_destroy_session(struct cam_req_mgr_session_info *ses_info);
  * a unique link handle for the link and is specific to a
  * session. Returns link handle
  */
-int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info);
+int cam_req_mgr_link(struct cam_req_mgr_ver_info *link_info);
+int cam_req_mgr_link_v2(struct cam_req_mgr_ver_info *link_info);
+
 
 /**
  * cam_req_mgr_unlink()
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index 2958c6c..7189425 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -261,27 +261,50 @@ static long cam_private_ioctl(struct file *file, void *fh,
 		break;
 
 	case CAM_REQ_MGR_LINK: {
-		struct cam_req_mgr_link_info link_info;
+		struct cam_req_mgr_ver_info ver_info;
 
-		if (k_ioctl->size != sizeof(link_info))
+		if (k_ioctl->size != sizeof(ver_info.u.link_info_v1))
 			return -EINVAL;
 
-		if (copy_from_user(&link_info,
+		if (copy_from_user(&ver_info.u.link_info_v1,
 			u64_to_user_ptr(k_ioctl->handle),
 			sizeof(struct cam_req_mgr_link_info))) {
 			return -EFAULT;
 		}
-
-		rc = cam_req_mgr_link(&link_info);
+		ver_info.version = VERSION_1;
+		rc = cam_req_mgr_link(&ver_info);
 		if (!rc)
 			if (copy_to_user(
 				u64_to_user_ptr(k_ioctl->handle),
-				&link_info,
+				&ver_info.u.link_info_v1,
 				sizeof(struct cam_req_mgr_link_info)))
 				rc = -EFAULT;
 		}
 		break;
 
+	case CAM_REQ_MGR_LINK_V2: {
+		struct cam_req_mgr_ver_info ver_info;
+
+		if (k_ioctl->size != sizeof(ver_info.u.link_info_v2))
+			return -EINVAL;
+
+		if (copy_from_user(&ver_info.u.link_info_v2,
+			u64_to_user_ptr(k_ioctl->handle),
+			sizeof(struct cam_req_mgr_link_info_v2))) {
+			return -EFAULT;
+		}
+
+		ver_info.version = VERSION_2;
+		rc = cam_req_mgr_link_v2(&ver_info);
+		if (!rc)
+			if (copy_to_user(
+				u64_to_user_ptr(k_ioctl->handle),
+				&ver_info.u.link_info_v2,
+				sizeof(struct cam_req_mgr_link_info_v2)))
+				rc = -EFAULT;
+			}
+		break;
+
 	case CAM_REQ_MGR_UNLINK: {
 		struct cam_req_mgr_unlink_info unlink_info;
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
index 9c704c0..e4e5e43 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -25,6 +25,7 @@
 #include "cam_sensor_util.h"
 #include "cam_soc_util.h"
 #include "cam_debug_util.h"
+#include "cam_context.h"
 
 #define NUM_MASTERS 2
 #define NUM_QUEUES 2
@@ -83,6 +84,7 @@ struct intf_params {
 
 /**
  * struct cam_actuator_ctrl_t
+ * @device_name: Device name
  * @i2c_driver: I2C device info
  * @pdev: Platform device
  * @cci_i2c_master: I2C structure
@@ -98,10 +100,10 @@ struct intf_params {
  * @i2c_data: I2C register settings structure
  * @act_info: Sensor query cap structure
  * @of_node: Node ptr
- * @device_name: Device name
  * @last_flush_req: Last request to flush
  */
 struct cam_actuator_ctrl_t {
+	char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
 	struct i2c_driver *i2c_driver;
 	enum cci_i2c_master_t cci_i2c_master;
 	enum cci_device_num cci_num;
@@ -116,7 +118,6 @@ struct cam_actuator_ctrl_t {
 	struct i2c_data_settings i2c_data;
 	struct cam_actuator_query_cap act_info;
 	struct intf_params bridge_intf;
-	char device_name[20];
 	uint32_t last_flush_req;
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
index 653644a..81a09f6 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
@@ -7,5 +7,6 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
 
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_csiphy_soc.o cam_csiphy_dev.o cam_csiphy_core.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
index 51b8201..78c7a84 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -26,6 +26,7 @@
 #include <cam_cpas_api.h>
 #include "cam_soc_util.h"
 #include "cam_debug_util.h"
+#include "cam_context.h"
 
 #define MAX_CSIPHY                  6
 #define MAX_DPHY_DATA_LN            4
@@ -235,6 +236,7 @@ struct cam_csiphy_param {
 
 /**
  * struct csiphy_device
+ * @device_name:                Device name
  * @pdev:                       Platform device
  * @irq:                        Interrupt structure
  * @base:                       Base address
@@ -263,6 +265,7 @@ struct cam_csiphy_param {
  * @csiphy_cpas_cp_reg_mask:    CP reg mask for phy instance
  */
 struct csiphy_device {
+	char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
 	struct mutex mutex;
 	uint32_t hw_version;
 	enum cam_csiphy_state csiphy_state;
@@ -282,7 +285,6 @@ struct csiphy_device {
 	uint32_t clk_lane;
 	uint32_t acquire_count;
 	uint32_t start_dev_count;
-	char device_name[20];
 	uint32_t is_acquired_dev_combo_mode;
 	struct cam_hw_soc_info   soc_info;
 	uint32_t cpas_handle;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
index b487a72..59642af 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
@@ -7,4 +7,6 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom_dev.o cam_eeprom_core.o cam_eeprom_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index 5ccf37d..6600c92 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -70,7 +70,6 @@ static int cam_eeprom_read_memory(struct cam_eeprom_ctrl_t *e_ctrl,
 					rc);
 				return rc;
 			}
-			memptr = block->mapdata + emap[j].mem.valid_size;
 		}
 
 		if (emap[j].pageen.valid_size) {
@@ -435,7 +434,8 @@ static int32_t cam_eeprom_parse_memory_map(
 		validate_size = sizeof(struct cam_cmd_unconditional_wait);
 
 	if (remain_buf_len < validate_size ||
-	    *num_map >= MSM_EEPROM_MAX_MEM_MAP_CNT) {
+	    *num_map >= (MSM_EEPROM_MAX_MEM_MAP_CNT *
+		MSM_EEPROM_MEMORY_MAP_MAX_SIZE)) {
 		CAM_ERR(CAM_EEPROM, "not enough buffer");
 		return -EINVAL;
 	}
@@ -445,7 +445,9 @@ static int32_t cam_eeprom_parse_memory_map(
 
 		if (i2c_random_wr->header.count == 0 ||
 		    i2c_random_wr->header.count >= MSM_EEPROM_MAX_MEM_MAP_CNT ||
-		    (size_t)*num_map > U16_MAX - i2c_random_wr->header.count) {
+		    (size_t)*num_map >= ((MSM_EEPROM_MAX_MEM_MAP_CNT *
+				MSM_EEPROM_MEMORY_MAP_MAX_SIZE) -
+				i2c_random_wr->header.count)) {
 			CAM_ERR(CAM_EEPROM, "OOB Error");
 			return -EINVAL;
 		}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
index f840e63..ebd12c6 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
@@ -23,6 +23,7 @@
 #include <cam_subdev.h>
 #include <media/cam_sensor.h>
 #include "cam_soc_util.h"
+#include "cam_context.h"
 
 #define DEFINE_MSM_MUTEX(mutexname) \
 	static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
@@ -30,7 +31,7 @@
 #define PROPERTY_MAXSIZE 32
 
 #define MSM_EEPROM_MEMORY_MAP_MAX_SIZE         80
-#define MSM_EEPROM_MAX_MEM_MAP_CNT             8
+#define MSM_EEPROM_MAX_MEM_MAP_CNT             100
 #define MSM_EEPROM_MEM_MAP_PROPERTIES_CNT      8
 
 enum cam_eeprom_state {
@@ -153,6 +154,7 @@ struct eebin_info {
 
 /**
  * struct cam_eeprom_ctrl_t - EEPROM control structure
+ * @device_name         :   Device name
  * @pdev                :   platform device
  * @spi                 :   spi device
  * @eeprom_mutex        :   eeprom mutex
@@ -171,6 +173,7 @@ struct eebin_info {
  * @eebin_info          :   EEBIN address, size info
  */
 struct cam_eeprom_ctrl_t {
+	char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
 	struct platform_device *pdev;
 	struct spi_device *spi;
 	struct mutex eeprom_mutex;
@@ -185,7 +188,6 @@ struct cam_eeprom_ctrl_t {
 	enum cam_eeprom_state cam_eeprom_state;
 	bool userspace_probe;
 	struct cam_eeprom_memory_block_t cal_data;
-	char device_name[20];
 	uint16_t is_multimodule_mode;
 	struct i2c_settings_array wr_settings;
 	struct eebin_info eebin_info;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
index d061066..5eccfd8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -379,6 +379,8 @@ static int cam_flash_init_subdev(struct cam_flash_ctrl *fctrl)
 {
 	int rc = 0;
 
+	strlcpy(fctrl->device_name, CAM_FLASH_NAME,
+		sizeof(fctrl->device_name));
 	fctrl->v4l2_dev_str.internal_ops =
 		&cam_flash_internal_ops;
 	fctrl->v4l2_dev_str.ops = &cam_flash_subdev_ops;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
index 5d85292..43ed134 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
@@ -28,6 +28,7 @@
 #include "cam_debug_util.h"
 #include "cam_sensor_io.h"
 #include "cam_flash_core.h"
+#include "cam_context.h"
 
 #define CAMX_FLASH_DEV_NAME "cam-flash-dev"
 
@@ -153,6 +154,7 @@ struct cam_flash_func_tbl {
 
 /**
  *  struct cam_flash_ctrl
+ * @device_name         : Device name
  * @soc_info            : Soc related information
  * @pdev                : Platform device
  * @per_frame[]         : Per_frame setting array
@@ -179,6 +181,7 @@ struct cam_flash_func_tbl {
  * @last_flush_req      : last request to flush
  */
 struct cam_flash_ctrl {
+	char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
 	struct cam_hw_soc_info              soc_info;
 	struct platform_device             *pdev;
 	struct cam_sensor_power_ctrl_t      power_info;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
index 75becd5..482b5f8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
@@ -8,5 +8,6 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
 
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_ois_dev.o cam_ois_core.o cam_ois_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
index c0e7d3e..788243a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef _CAM_OIS_DEV_H_
 #define _CAM_OIS_DEV_H_
@@ -20,6 +20,7 @@
 #include <cam_mem_mgr.h>
 #include <cam_subdev.h>
 #include "cam_soc_util.h"
+#include "cam_context.h"
 
 #define DEFINE_MSM_MUTEX(mutexname) \
 	static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
@@ -83,6 +84,7 @@ struct cam_ois_intf_params {
 
 /**
  * struct cam_ois_ctrl_t - OIS ctrl private data
+ * @device_name     :   ois device_name
  * @pdev            :   platform device
  * @ois_mutex       :   ois mutex
  * @soc_info        :   ois soc related info
@@ -95,7 +97,6 @@ struct cam_ois_intf_params {
  * @i2c_calib_data  :   ois i2c calib settings
  * @ois_device_type :   ois device type
  * @cam_ois_state   :   ois_device_state
- * @ois_name        :   ois name
  * @ois_fw_flag     :   flag for firmware download
  * @is_ois_calib    :   flag for Calibration data
  * @opcode          :   ois opcode
@@ -103,6 +104,7 @@ struct cam_ois_intf_params {
  *
  */
 struct cam_ois_ctrl_t {
+	char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
 	struct platform_device *pdev;
 	struct mutex ois_mutex;
 	struct cam_hw_soc_info soc_info;
@@ -116,7 +118,6 @@ struct cam_ois_ctrl_t {
 	struct i2c_settings_array i2c_mode_data;
 	enum msm_camera_device_type_t ois_device_type;
 	enum cam_ois_state cam_ois_state;
-	char device_name[20];
 	char ois_name[32];
 	uint8_t ois_fw_flag;
 	uint8_t is_ois_calib;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
index 86676a4..37e0affd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
@@ -23,6 +23,7 @@
 #include <cam_subdev.h>
 #include <cam_sensor_io.h>
 #include "cam_debug_util.h"
+#include "cam_context.h"
 
 #define NUM_MASTERS 2
 #define NUM_QUEUES 2
@@ -65,6 +66,7 @@ struct intf_params {
 
 /**
  * struct cam_sensor_ctrl_t: Camera control structure
+ * @device_name: Sensor device name
  * @pdev: Platform device
  * @cam_sensor_mutex: Sensor mutex
  * @sensordata: Sensor board Information
@@ -80,7 +82,6 @@ struct intf_params {
  * @i2c_data: Sensor I2C register settings
  * @sensor_info: Sensor query cap structure
  * @bridge_intf: Bridge interface structure
- * @device_name: Sensor device structure
  * @streamon_count: Count to hold the number of times stream on called
  * @streamoff_count: Count to hold the number of times stream off called
  * @bob_reg_index: Hold to BoB regulator index
@@ -89,6 +90,7 @@ struct intf_params {
  * @pipeline_delay: Sensor pipeline delay
  */
 struct cam_sensor_ctrl_t {
+	char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
 	struct platform_device *pdev;
 	struct cam_hw_soc_info soc_info;
 	struct mutex cam_sensor_mutex;
@@ -106,7 +108,6 @@ struct cam_sensor_ctrl_t {
 	struct i2c_data_settings i2c_data;
 	struct  cam_sensor_query_cap sensor_info;
 	struct intf_params bridge_intf;
-	char device_name[20];
 	uint32_t streamon_count;
 	uint32_t streamoff_count;
 	int bob_reg_index;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 4a2a2a8..f5ea2dc 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -1658,7 +1658,7 @@ int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
 		case SENSOR_CUSTOM_GPIO2:
 			if (no_gpio) {
 				CAM_ERR(CAM_SENSOR, "request gpio failed");
-				return no_gpio;
+				goto power_up_failed;
 			}
 			if (!gpio_num_info) {
 				CAM_ERR(CAM_SENSOR, "Invalid gpio_num_info");
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index 9111186..a2dae566 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -3474,9 +3474,24 @@ static int cam_populate_smmu_context_banks(struct device *dev,
 		iommu_set_fault_handler(cb->domain,
 			cam_smmu_iommu_fault_handler,
 			(void *)cb->name);
+
+	if (!dev->dma_parms)
+		dev->dma_parms = devm_kzalloc(dev,
+			sizeof(*dev->dma_parms), GFP_KERNEL);
+
+	if (!dev->dma_parms) {
+		CAM_WARN(CAM_SMMU,
+			"Failed to allocate dma_params");
+		dev->dma_parms = NULL;
+		goto end;
+	}
+
+	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+	dma_set_seg_boundary(dev, DMA_BIT_MASK(64));
+
+end:
 	/* increment count to next bank */
 	iommu_cb_set.cb_init_count++;
-
 	CAM_DBG(CAM_SMMU, "X: cb init count :%d", iommu_cb_set.cb_init_count);
 
 cb_init_fail:
@@ -3488,6 +3503,7 @@ static int cam_smmu_probe(struct platform_device *pdev)
 	int rc = 0;
 	struct device *dev = &pdev->dev;
 
+	dev->dma_parms = NULL;
 	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) {
 		rc = cam_alloc_smmu_context_banks(dev);
 		if (rc < 0) {
@@ -3536,8 +3552,15 @@ static int cam_smmu_probe(struct platform_device *pdev)
 
 static int cam_smmu_remove(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
+
 	/* release all the context banks and memory allocated */
 	cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_DEINIT);
+	if (dev && dev->dma_parms) {
+		devm_kfree(dev, dev->dma_parms);
+		dev->dma_parms = NULL;
+	}
+
 	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cam-smmu"))
 		cam_smmu_release_cb(pdev);
 	return 0;
diff --git a/drivers/media/platform/msm/camera/cam_sync/Makefile b/drivers/media/platform/msm/camera/cam_sync/Makefile
index 410f3a7..52491b2 100644
--- a/drivers/media/platform/msm/camera/cam_sync/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sync/Makefile
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-$(CONFIG_MSM_GLOBAL_SYNX) += -Idrivers/media/platform/msm/synx
 ccflags-y += -I$(src)
 
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync.o cam_sync_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 5342de0..10ed0cd 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -13,6 +13,9 @@
 #include "cam_debug_util.h"
 #include "cam_common_util.h"
 
+#ifdef CONFIG_MSM_GLOBAL_SYNX
+#include <synx_api.h>
+#endif
 struct sync_device *sync_dev;
 
 /*
@@ -956,6 +959,25 @@ static int cam_sync_create_debugfs(void)
 	return 0;
 }
 
+#ifdef CONFIG_MSM_GLOBAL_SYNX
+static void cam_sync_register_synx_bind_ops(void)
+{
+	int rc = 0;
+	struct synx_register_params params;
+
+	params.name = CAM_SYNC_NAME;
+	params.type = SYNX_TYPE_CSL;
+	params.ops.register_callback = cam_sync_register_callback;
+	params.ops.deregister_callback = cam_sync_deregister_callback;
+	params.ops.enable_signaling = cam_sync_get_obj_ref;
+	params.ops.signal = cam_sync_signal;
+
+	rc = synx_register_ops(&params);
+	if (rc)
+		CAM_ERR(CAM_SYNC, "synx registration fail with %d", rc);
+}
+#endif
+
 static int cam_sync_probe(struct platform_device *pdev)
 {
 	int rc;
@@ -1023,7 +1045,9 @@ static int cam_sync_probe(struct platform_device *pdev)
 
 	trigger_cb_without_switch = false;
 	cam_sync_create_debugfs();
-
+#ifdef CONFIG_MSM_GLOBAL_SYNX
+	cam_sync_register_synx_bind_ops();
+#endif
 	return rc;
 
 v4l2_fail:
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.c b/drivers/media/platform/msm/cvp/cvp_hfi.c
index d23d044..a388541 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.c
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.c
@@ -214,6 +214,28 @@ const struct msm_cvp_hfi_defs cvp_hfi_defs[] = {
 		.buf_num = HFI_PYS_HCD_BUF_NUM,
 		.resp = HAL_NO_RESP,
 	},
+	{
+		.size = 0xFFFFFFFF,
+		.type = HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_MODEL_BUF_CMD_DONE,
+	},
+	{
+		.size = 0xFFFFFFFF,
+		.type = HFI_CMD_SESSION_CVP_FD_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_FD_CONFIG_CMD_DONE,
+	},
+	{
+		.size = 0xFFFFFFFF,
+		.type = HFI_CMD_SESSION_CVP_FD_FRAME,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_NO_RESP,
+	},
+
 };
 
 static struct cvp_hal_device_data hal_ctxt;
@@ -272,21 +294,22 @@ static int __disable_subcaches(struct iris_hfi_device *device);
 static int __power_collapse(struct iris_hfi_device *device, bool force);
 static int venus_hfi_noc_error_info(void *dev);
 
-static void interrupt_init_vpu5(struct iris_hfi_device *device);
+static void interrupt_init_iris2(struct iris_hfi_device *device);
 static void setup_dsp_uc_memmap_vpu5(struct iris_hfi_device *device);
 static void clock_config_on_enable_vpu5(struct iris_hfi_device *device);
 static int reset_ahb2axi_bridge(struct iris_hfi_device *device);
 static void power_off_iris2(struct iris_hfi_device *device);
 
 static int __set_ubwc_config(struct iris_hfi_device *device);
+static void __noc_error_info_iris2(struct iris_hfi_device *device);
 
 static struct iris_hfi_vpu_ops iris2_ops = {
-	.interrupt_init = interrupt_init_vpu5,
+	.interrupt_init = interrupt_init_iris2,
 	.setup_dsp_uc_memmap = setup_dsp_uc_memmap_vpu5,
 	.clock_config_on_enable = clock_config_on_enable_vpu5,
 	.reset_ahb2axi_bridge = reset_ahb2axi_bridge,
 	.power_off = power_off_iris2,
-	.noc_error_info = NULL,
+	.noc_error_info = __noc_error_info_iris2,
 };
 
 /**
@@ -896,9 +919,8 @@ static int __read_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
 	return rc;
 }
 
-static int __smem_alloc(struct iris_hfi_device *dev,
-			struct cvp_mem_addr *mem, u32 size, u32 align,
-			u32 flags, u32 usage)
+static int __smem_alloc(struct iris_hfi_device *dev, struct cvp_mem_addr *mem,
+			u32 size, u32 align, u32 flags)
 {
 	struct msm_cvp_smem *alloc = &mem->mem_data;
 	int rc = 0;
@@ -910,7 +932,7 @@ static int __smem_alloc(struct iris_hfi_device *dev,
 
 	dprintk(CVP_INFO, "start to alloc size: %d, flags: %d\n", size, flags);
 	rc = msm_cvp_smem_alloc(
-		size, align, flags, usage, 1, (void *)dev->res,
+		size, align, flags, 1, (void *)dev->res,
 		MSM_CVP_UNKNOWN, alloc);
 	if (rc) {
 		dprintk(CVP_ERR, "Alloc failed\n");
@@ -1045,23 +1067,6 @@ static void __set_threshold_registers(struct iris_hfi_device *device)
 		dprintk(CVP_ERR, "Failed to restore threshold values\n");
 }
 
-static void __iommu_detach(struct iris_hfi_device *device)
-{
-	struct context_bank_info *cb;
-
-	if (!device || !device->res) {
-		dprintk(CVP_ERR, "Invalid parameter: %pK\n", device);
-		return;
-	}
-
-	list_for_each_entry(cb, &device->res->context_banks, list) {
-		if (cb->dev)
-			__depr_arm_iommu_detach_device(cb->dev);
-		if (cb->mapping)
-			__depr_arm_iommu_release_mapping(cb->mapping);
-	}
-}
-
 #ifdef USE_DEVFREQ_SCALE_BUS
 static int __devfreq_target(struct device *devfreq_dev,
 		unsigned long *freq, u32 flags)
@@ -1219,7 +1224,7 @@ static int __vote_buses(struct iris_hfi_device *device,
 			}
 #else
 			rc = msm_bus_scale_update_bw(bus->client,
-				bus->range[1]*1000, 0);
+				bus->range[1], 0);
 			if (rc)
 				dprintk(CVP_ERR,
 				"Failed voting bus %s to ab %u\n",
@@ -1699,8 +1704,7 @@ static int __interface_dsp_queues_init(struct iris_hfi_device *dev)
 		dprintk(CVP_ERR, "%s: failed dma allocation\n", __func__);
 		goto fail_dma_alloc;
 	}
-	cb = msm_cvp_smem_get_context_bank(MSM_CVP_UNKNOWN, 0,
-			dev->res, HAL_BUFFER_INTERNAL_CMD_QUEUE);
+	cb = msm_cvp_smem_get_context_bank(MSM_CVP_UNKNOWN, 0, dev->res, 0);
 	if (!cb) {
 		dprintk(CVP_ERR,
 			"%s: failed to get context bank\n", __func__);
@@ -1721,7 +1725,7 @@ static int __interface_dsp_queues_init(struct iris_hfi_device *dev)
 	mem_data->device_addr = iova;
 	mem_data->dma_handle = dma_handle;
 	mem_data->size = q_size;
-	mem_data->buffer_type = HAL_BUFFER_INTERNAL_CMD_QUEUE;
+	mem_data->buffer_type = 0;
 	mem_data->mapping_info.cb_info = cb;
 
 	if (!is_iommu_present(dev->res))
@@ -1810,10 +1814,10 @@ static void __interface_queues_release(struct iris_hfi_device *device)
 
 		mem_map = (struct cvp_hfi_mem_map *)(qdss + 1);
 		cb = msm_cvp_smem_get_context_bank(MSM_CVP_UNKNOWN,
-			false, device->res, HAL_BUFFER_INTERNAL_CMD_QUEUE);
+			false, device->res, 0);
 
 		for (i = 0; cb && i < num_entries; i++) {
-			iommu_unmap(cb->mapping->domain,
+			iommu_unmap(cb->domain,
 						mem_map[i].virtual_addr,
 						mem_map[i].size);
 		}
@@ -1848,7 +1852,7 @@ static void __interface_queues_release(struct iris_hfi_device *device)
 
 static int __get_qdss_iommu_virtual_addr(struct iris_hfi_device *dev,
 		struct cvp_hfi_mem_map *mem_map,
-		struct dma_iommu_mapping *mapping)
+		struct iommu_domain *domain)
 {
 	int i;
 	int rc = 0;
@@ -1860,8 +1864,8 @@ static int __get_qdss_iommu_virtual_addr(struct iris_hfi_device *dev,
 		return -ENODATA;
 
 	for (i = 0; i < num_entries; i++) {
-		if (mapping) {
-			rc = iommu_map(mapping->domain, iova,
+		if (domain) {
+			rc = iommu_map(domain, iova,
 					qdss_addr_tbl[i].start,
 					qdss_addr_tbl[i].size,
 					IOMMU_READ | IOMMU_WRITE);
@@ -1889,8 +1893,8 @@ static int __get_qdss_iommu_virtual_addr(struct iris_hfi_device *dev,
 		dprintk(CVP_ERR,
 			"QDSS mapping failed, Freeing other entries %d\n", i);
 
-		for (--i; mapping && i >= 0; i--) {
-			iommu_unmap(mapping->domain,
+		for (--i; domain && i >= 0; i--) {
+			iommu_unmap(domain,
 				mem_map[i].virtual_addr,
 				mem_map[i].size);
 		}
@@ -1938,8 +1942,7 @@ static int __interface_queues_init(struct iris_hfi_device *dev)
 	mem_addr = &dev->mem_addr;
 	if (!is_iommu_present(dev->res))
 		fw_bias = dev->cvp_hal_data->firmware_base;
-	rc = __smem_alloc(dev, mem_addr, q_size, 1, SMEM_UNCACHED,
-			HAL_BUFFER_INTERNAL_CMD_QUEUE);
+	rc = __smem_alloc(dev, mem_addr, q_size, 1, SMEM_UNCACHED);
 	if (rc) {
 		dprintk(CVP_ERR, "iface_q_table_alloc_fail\n");
 		goto fail_alloc_queue;
@@ -1967,9 +1970,8 @@ static int __interface_queues_init(struct iris_hfi_device *dev)
 	}
 
 	if ((msm_cvp_fw_debug_mode & HFI_DEBUG_MODE_QDSS) && num_entries) {
-		rc = __smem_alloc(dev, mem_addr,
-				ALIGNED_QDSS_SIZE, 1, SMEM_UNCACHED,
-				HAL_BUFFER_INTERNAL_CMD_QUEUE);
+		rc = __smem_alloc(dev, mem_addr, ALIGNED_QDSS_SIZE, 1,
+				SMEM_UNCACHED);
 		if (rc) {
 			dprintk(CVP_WARN,
 				"qdss_alloc_fail: QDSS messages logging will not work\n");
@@ -1984,9 +1986,7 @@ static int __interface_queues_init(struct iris_hfi_device *dev)
 		}
 	}
 
-	rc = __smem_alloc(dev, mem_addr,
-			ALIGNED_SFR_SIZE, 1, SMEM_UNCACHED,
-			HAL_BUFFER_INTERNAL_CMD_QUEUE);
+	rc = __smem_alloc(dev, mem_addr, ALIGNED_SFR_SIZE, 1, SMEM_UNCACHED);
 	if (rc) {
 		dprintk(CVP_WARN, "sfr_alloc_fail: SFR not will work\n");
 		dev->sfr.align_device_addr = 0;
@@ -2040,14 +2040,14 @@ static int __interface_queues_init(struct iris_hfi_device *dev)
 
 		mem_map = (struct cvp_hfi_mem_map *)(qdss + 1);
 		cb = msm_cvp_smem_get_context_bank(MSM_CVP_UNKNOWN, false,
-			dev->res, HAL_BUFFER_INTERNAL_CMD_QUEUE);
+			dev->res, 0);
 		if (!cb) {
 			dprintk(CVP_ERR,
 				"%s: failed to get context bank\n", __func__);
 			return -EINVAL;
 		}
 
-		rc = __get_qdss_iommu_virtual_addr(dev, mem_map, cb->mapping);
+		rc = __get_qdss_iommu_virtual_addr(dev, mem_map, cb->domain);
 		if (rc) {
 			dprintk(CVP_ERR,
 				"IOMMU mapping failed, Freeing qdss memdata\n");
@@ -2283,8 +2283,10 @@ static int venus_hfi_core_release(void *dev)
 	__unload_fw(device);
 
 	/* unlink all sessions from device */
-	list_for_each_entry_safe(session, next, &device->sess_head, list)
+	list_for_each_entry_safe(session, next, &device->sess_head, list) {
 		list_del(&session->list);
+		session->device = NULL;
+	}
 
 	dprintk(CVP_DBG, "Core released successfully\n");
 	mutex_unlock(&device->lock);
@@ -2330,14 +2332,13 @@ static void __core_clear_interrupt(struct iris_hfi_device *device)
 	}
 
 	intr_status = __read_register(device, CVP_WRAPPER_INTR_STATUS);
-	mask = (CVP_WRAPPER_INTR_STATUS_A2H_BMSK |
-		CVP_WRAPPER_INTR_STATUS_A2HWD_BMSK);
+	mask = (CVP_WRAPPER_INTR_MASK_A2HCPU_BMSK | CVP_FATAL_INTR_BMSK);
 
 	if (intr_status & mask) {
 		device->intr_status |= intr_status;
 		device->reg_count++;
 		dprintk(CVP_DBG,
-			"INTERRUPT for device: %pK: times: %d interrupt_status: %d\n",
+			"INTERRUPT for device: %pK: times: %d status: %d\n",
 			device, device->reg_count, intr_status);
 	} else {
 		device->spur_count++;
@@ -2395,7 +2396,7 @@ static int venus_hfi_session_set_property(void *sess,
 
 	dprintk(CVP_INFO, "in set_prop,with prop id: %#x\n", ptype);
 	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
+		rc = -ECONNRESET;
 		goto err_set_prop;
 	}
 
@@ -2441,7 +2442,7 @@ static int venus_hfi_session_get_property(void *sess,
 
 	dprintk(CVP_INFO, "%s: property id: %d\n", __func__, ptype);
 	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
+		rc = -ECONNRESET;
 		goto err_create_pkt;
 	}
 
@@ -2584,7 +2585,7 @@ static int __send_session_cmd(struct cvp_hal_session *session, int pkt_type)
 	struct iris_hfi_device *device = session->device;
 
 	if (!__is_session_valid(device, session, __func__))
-		return -EINVAL;
+		return -ECONNRESET;
 
 	rc = call_hfi_pkt_op(device, session_cmd,
 			&pkt, pkt_type, session);
@@ -2616,6 +2617,10 @@ static int venus_hfi_session_end(void *session)
 
 	sess = session;
 	device = sess->device;
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid session %s\n", __func__);
+		return -EINVAL;
+	}
 
 	mutex_lock(&device->lock);
 
@@ -2646,7 +2651,6 @@ static int venus_hfi_session_abort(void *sess)
 
 	mutex_lock(&device->lock);
 
-	__flush_debug_queue(device, NULL);
 	rc = __send_session_cmd(session, HFI_CMD_SYS_SESSION_ABORT);
 
 	mutex_unlock(&device->lock);
@@ -2671,7 +2675,7 @@ static int venus_hfi_session_set_buffers(void *sess,
 	mutex_lock(&device->lock);
 
 	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
+		rc = -ECONNRESET;
 		goto err_create_pkt;
 	}
 
@@ -2708,12 +2712,7 @@ static int venus_hfi_session_release_buffers(void *sess,
 	mutex_lock(&device->lock);
 
 	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
-		goto err_create_pkt;
-	}
-	if (buffer_info->buffer_type != HAL_BUFFER_INTERNAL_PERSIST_1) {
-		dprintk(CVP_ERR, "INTERNAL_PERSIST_1 expected\n");
-		rc = -EINVAL;
+		rc = -ECONNRESET;
 		goto err_create_pkt;
 	}
 
@@ -2767,7 +2766,7 @@ static int venus_hfi_session_send(void *sess,
 	mutex_lock(&device->lock);
 
 	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
+		rc = -ECONNRESET;
 		goto err_send_pkt;
 	}
 	rc = call_hfi_pkt_op(device, session_send,
@@ -2804,7 +2803,7 @@ static int venus_hfi_session_get_buf_req(void *sess)
 	mutex_lock(&device->lock);
 
 	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
+		rc = -ECONNRESET;
 		goto err_create_pkt;
 	}
 	rc = call_hfi_pkt_op(device, session_get_buf_req,
@@ -3230,6 +3229,8 @@ static void **get_session_id(struct msm_cvp_cb_info *info)
 	case HAL_SESSION_DME_FRAME_CMD_DONE:
 	case HAL_SESSION_ICA_FRAME_CMD_DONE:
 	case HAL_SESSION_PERSIST_CMD_DONE:
+	case HAL_SESSION_FD_CONFIG_CMD_DONE:
+	case HAL_SESSION_MODEL_BUF_CMD_DONE:
 	case HAL_SESSION_PROPERTY_INFO:
 		session_id = &info->response.cmd.session_id;
 		break;
@@ -3298,7 +3299,7 @@ static int __response_handler(struct iris_hfi_device *device)
 		return 0;
 	}
 
-	if (device->intr_status & CVP_WRAPPER_INTR_STATUS_A2HWD_BMSK) {
+	if (device->intr_status & CVP_FATAL_INTR_BMSK) {
 		struct cvp_hfi_sfr_struct *vsfr = (struct cvp_hfi_sfr_struct *)
 			device->sfr.align_virtual_addr;
 		struct msm_cvp_cb_info info = {
@@ -3311,8 +3312,15 @@ static int __response_handler(struct iris_hfi_device *device)
 		if (vsfr)
 			dprintk(CVP_ERR, "SFR Message from FW: %s\n",
 					vsfr->rg_data);
+		if (device->intr_status & CVP_WRAPPER_INTR_MASK_CPU_NOC_BMSK)
+			dprintk(CVP_ERR, "Received Xtensa NOC error\n");
 
-		dprintk(CVP_ERR, "Received watchdog timeout\n");
+		if (device->intr_status & CVP_WRAPPER_INTR_MASK_CORE_NOC_BMSK)
+			dprintk(CVP_ERR, "Received CVP core NOC error\n");
+
+		if (device->intr_status & CVP_WRAPPER_INTR_MASK_A2HWD_BMSK)
+			dprintk(CVP_ERR, "Received CVP watchdog timeout\n");
+
 		packets[packet_count++] = info;
 		goto exit;
 	}
@@ -4351,7 +4359,7 @@ static int __disable_subcaches(struct iris_hfi_device *device)
 	return 0;
 }
 
-static void interrupt_init_vpu5(struct iris_hfi_device *device)
+static void interrupt_init_iris2(struct iris_hfi_device *device)
 {
 	u32 mask_val = 0;
 
@@ -4359,8 +4367,7 @@ static void interrupt_init_vpu5(struct iris_hfi_device *device)
 	mask_val = __read_register(device, CVP_WRAPPER_INTR_MASK);
 
 	/* Write 0 to unmask CPU and WD interrupts */
-	mask_val &= ~(CVP_WRAPPER_INTR_MASK_A2HWD_BMSK |
-			CVP_WRAPPER_INTR_MASK_A2HCPU_BMSK);
+	mask_val &= ~(CVP_FATAL_INTR_BMSK | CVP_WRAPPER_INTR_MASK_A2HCPU_BMSK);
 	__write_register(device, CVP_WRAPPER_INTR_MASK, mask_val);
 	dprintk(CVP_DBG, "Init irq: reg: %x, mask value %x\n",
 		CVP_WRAPPER_INTR_MASK, mask_val);
@@ -4842,9 +4849,82 @@ static int venus_hfi_get_core_capabilities(void *dev)
 	return 0;
 }
 
+static void __noc_error_info_iris2(struct iris_hfi_device *device)
+{
+	u32 val = 0;
+
+	val = __read_register(device, CVP_NOC_ERR_SWID_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_SWID_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_SWID_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_SWID_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_MAINCTL_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_MAINCTL_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRVLD_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRVLD_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRCLR_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRCLR_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG0_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG0_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG0_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG0_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG1_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG1_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG1_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG1_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG2_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG2_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG2_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG2_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG3_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG3_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG3_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG3_HIGH:     %#x\n", val);
+
+	val = __read_register(device, CVP_NOC_CORE_ERR_SWID_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC__CORE_ERL_MAIN_SWID_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_SWID_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_SWID_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_MAINCTL_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRVLD_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRVLD_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRCLR_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG0_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG0_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG0_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG0_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG1_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG1_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG1_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG1_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG2_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG2_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG2_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG2_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG3_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG3_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG3_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG3_HIGH:     %#x\n", val);
+}
+
 static int venus_hfi_noc_error_info(void *dev)
 {
-	dprintk(CVP_ERR, "%s not supported yet!\n", __func__);
+	struct iris_hfi_device *device;
+
+	if (!dev) {
+		dprintk(CVP_ERR, "%s: null device\n", __func__);
+		return -EINVAL;
+	}
+	device = dev;
+
+	mutex_lock(&device->lock);
+	dprintk(CVP_ERR, "%s: non error information\n", __func__);
+
+	call_venus_op(device, noc_error_info, device);
+
+	mutex_unlock(&device->lock);
+
 	return 0;
 }
 
@@ -4976,10 +5056,6 @@ void cvp_venus_hfi_delete_device(void *device)
 
 	dev = (struct iris_hfi_device *) device;
 
-	mutex_lock(&dev->lock);
-	__iommu_detach(dev);
-	mutex_unlock(&dev->lock);
-
 	list_for_each_entry_safe(close, tmp, &hal_ctxt.dev_head, list) {
 		if (close->cvp_hal_data->irq == dev->cvp_hal_data->irq) {
 			hal_ctxt.dev_count--;
@@ -4999,6 +5075,26 @@ void cvp_venus_hfi_delete_device(void *device)
 	}
 }
 
+static int venus_hfi_validate_session(void *sess, const char *func)
+{
+	struct cvp_hal_session *session = sess;
+	int rc = 0;
+	struct iris_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, " %s Invalid Params %pK\n", __func__, session);
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+	if (!__is_session_valid(device, session, func))
+		rc = -ECONNRESET;
+
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
 static void venus_init_hfi_callbacks(struct cvp_hfi_device *hdev)
 {
 	hdev->core_init = venus_hfi_core_init;
@@ -5027,6 +5123,7 @@ static void venus_init_hfi_callbacks(struct cvp_hfi_device *hdev)
 	hdev->suspend = venus_hfi_suspend;
 	hdev->flush_debug_queue = venus_hfi_flush_debug_queue;
 	hdev->noc_error_info = venus_hfi_noc_error_info;
+	hdev->validate_session = venus_hfi_validate_session;
 }
 
 int cvp_venus_hfi_initialize(struct cvp_hfi_device *hdev, u32 device_id,
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.h b/drivers/media/platform/msm/cvp/cvp_hfi.h
index d2e4071..6aa6253 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.h
@@ -31,7 +31,6 @@
 
 #define HFI_CMD_SYS_OX_START		\
 (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_CMD_START_OFFSET + 0x0000)
-#define HFI_CMD_SYS_SESSION_ABORT	(HFI_CMD_SYS_OX_START + 0x001)
 #define HFI_CMD_SYS_PING		(HFI_CMD_SYS_OX_START + 0x002)
 
 #define HFI_CMD_SESSION_OX_START	\
@@ -134,6 +133,12 @@
 	(HFI_CMD_SESSION_CVP_START + 0x050)
 #define HFI_CMD_SESSION_CVP_PYS_HCD_FRAME\
 	(HFI_CMD_SESSION_CVP_START + 0x051)
+#define HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x052)
+#define HFI_CMD_SESSION_CVP_FD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x053)
+#define HFI_CMD_SESSION_CVP_FD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x054)
 #define  HFI_CMD_SESSION_CVP_ICA_FRAME\
 	(HFI_CMD_SESSION_CVP_START + 0x100)
 #define  HFI_CMD_SESSION_CVP_ICA_CONFIG\
@@ -143,7 +148,6 @@
 #define HFI_MSG_SYS_OX_START			\
 (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_MSG_START_OFFSET + 0x0000)
 #define HFI_MSG_SYS_PING_ACK	(HFI_MSG_SYS_OX_START + 0x2)
-#define HFI_MSG_SYS_SESSION_ABORT_DONE	(HFI_MSG_SYS_OX_START + 0x4)
 
 #define HFI_MSG_SESSION_OX_START		\
 (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_MSG_START_OFFSET + 0x1000)
@@ -196,6 +200,10 @@
 
 #define  HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS\
 	(HFI_MSG_SESSION_CVP_START + 0x034)
+#define  HFI_MSG_SESSION_CVP_SET_MODEL_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x036)
+#define  HFI_MSG_SESSION_CVP_FD\
+	(HFI_MSG_SESSION_CVP_START + 0x037)
 
 #define CVP_IFACEQ_MAX_PKT_SIZE       1024
 #define CVP_IFACEQ_MED_PKT_SIZE       768
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_api.h b/drivers/media/platform/msm/cvp/cvp_hfi_api.h
index 1305898..b94ae5d 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_api.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_api.h
@@ -91,6 +91,15 @@
 #define HFI_PYS_HCD_BUFFERS_OFFSET 14
 #define HFI_PYS_HCD_BUF_NUM 26
 
+#define HFI_FD_CONFIG_CMD_SIZE 28
+#define HFI_FD_FRAME_CMD_SIZE  10
+#define HFI_FD_BUFFERS_OFFSET  6
+#define HFI_FD_BUF_NUM 2
+
+#define HFI_MODEL_CMD_SIZE 9
+#define HFI_MODEL_BUFFERS_OFFSET 7
+#define HFI_MODEL_BUF_NUM 1
+
 #define DFS_BIT_OFFSET (CVP_KMD_HFI_DFS_FRAME_CMD - CVP_KMD_CMD_START)
 #define DME_BIT_OFFSET (CVP_KMD_HFI_DME_FRAME_CMD - CVP_KMD_CMD_START)
 #define PERSIST_BIT_OFFSET (CVP_KMD_HFI_PERSIST_CMD - CVP_KMD_CMD_START)
@@ -152,6 +161,7 @@ enum hal_ssr_trigger_type {
 	SSR_ERR_FATAL = 1,
 	SSR_SW_DIV_BY_ZERO,
 	SSR_HW_WDOG_IRQ,
+	SSR_SESSION_ABORT,
 };
 
 struct cvp_hal_profile_level {
@@ -337,7 +347,9 @@ enum hal_command_response {
 	HAL_SESSION_DC_CONFIG_CMD_DONE,
 	HAL_SESSION_DCM_CONFIG_CMD_DONE,
 	HAL_SESSION_PYS_HCD_CONFIG_CMD_DONE,
+	HAL_SESSION_FD_CONFIG_CMD_DONE,
 	HAL_SESSION_PERSIST_CMD_DONE,
+	HAL_SESSION_MODEL_BUF_CMD_DONE,
 	HAL_SESSION_ICA_FRAME_CMD_DONE,
 	HAL_SESSION_PROPERTY_INFO,
 	HAL_SESSION_ERROR,
@@ -564,6 +576,7 @@ struct cvp_hfi_device {
 	int (*suspend)(void *dev);
 	int (*flush_debug_queue)(void *dev);
 	int (*noc_error_info)(void *dev);
+	int (*validate_session)(void *sess, const char *func);
 };
 
 typedef void (*hfi_cmd_response_callback) (enum hal_command_response cmd,
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
index 4ca4b2a..cd70c19 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
@@ -413,6 +413,7 @@ struct cvp_hfi_resource_syscache_info_type {
 #define HFI_CMD_SYS_SESSION_INIT	(HFI_CMD_SYS_COMMON_START + 0x007)
 #define HFI_CMD_SYS_SESSION_END		(HFI_CMD_SYS_COMMON_START + 0x008)
 #define HFI_CMD_SYS_SET_BUFFERS		(HFI_CMD_SYS_COMMON_START + 0x009)
+#define HFI_CMD_SYS_SESSION_ABORT	(HFI_CMD_SYS_COMMON_START + 0x00A)
 #define HFI_CMD_SYS_TEST_START		(HFI_CMD_SYS_COMMON_START + 0x100)
 
 #define HFI_CMD_SESSION_COMMON_START		\
@@ -437,6 +438,7 @@ struct cvp_hfi_resource_syscache_info_type {
 #define HFI_MSG_SYS_IDLE		(HFI_MSG_SYS_COMMON_START + 0x8)
 #define HFI_MSG_SYS_COV                 (HFI_MSG_SYS_COMMON_START + 0x9)
 #define HFI_MSG_SYS_PROPERTY_INFO	(HFI_MSG_SYS_COMMON_START + 0xA)
+#define HFI_MSG_SYS_SESSION_ABORT_DONE	(HFI_MSG_SYS_COMMON_START + 0xC)
 #define HFI_MSG_SESSION_SYNC_DONE      (HFI_MSG_SESSION_OX_START + 0xD)
 
 #define HFI_MSG_SESSION_COMMON_START		\
@@ -571,8 +573,8 @@ struct cvp_hfi_client {
 	u32 transaction_id;
 	u32 data1;
 	u32 data2;
-	u32 data3;
-	u32 data4;
+	u32 kdata1;
+	u32 kdata2;
 	u32 reserved1;
 	u32 reserved2;
 };
@@ -588,8 +590,8 @@ struct cvp_buf_desc {
 	u32 size;
 };
 
-struct cvp_buf_type {
-	u32 fd;
+struct cvp_hfi_buf_type {
+	s32 fd;
 	u32 size;
 	u32 offset;
 	u32 flags;
@@ -602,7 +604,7 @@ struct cvp_hfi_cmd_session_set_buffers_packet {
 	u32 packet_type;
 	u32 session_id;
 	struct cvp_hfi_client client_data;
-	struct cvp_buf_type buf_type;
+	struct cvp_hfi_buf_type buf_type;
 };
 
 struct cvp_hfi_cmd_session_set_buffers_packet_d {
@@ -635,6 +637,14 @@ struct cvp_session_release_buffers_packet_d {
 	u32 buffer_idx;
 };
 
+struct cvp_hfi_cmd_session_hdr {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	struct cvp_hfi_client client_data;
+	u32 stream_idx;
+};
+
 struct cvp_hfi_msg_session_hdr {
 	u32 size;
 	u32 packet_type;
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_io.h b/drivers/media/platform/msm/cvp/cvp_hfi_io.h
index 17ec2f2..22d107d 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_io.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_io.h
@@ -89,6 +89,11 @@
 #define CVP_WRAPPER_INTR_STATUS_A2H_BMSK	0x4
 
 #define CVP_WRAPPER_INTR_MASK		(CVP_WRAPPER_BASE_OFFS + 0x10)
+#define CVP_FATAL_INTR_BMSK	(CVP_WRAPPER_INTR_MASK_CPU_NOC_BMSK | \
+				CVP_WRAPPER_INTR_MASK_CORE_NOC_BMSK | \
+				CVP_WRAPPER_INTR_MASK_A2HWD_BMSK)
+#define CVP_WRAPPER_INTR_MASK_CPU_NOC_BMSK	0x40
+#define CVP_WRAPPER_INTR_MASK_CORE_NOC_BMSK	0x20
 #define CVP_WRAPPER_INTR_MASK_A2HWD_BMSK	0x8
 #define CVP_WRAPPER_INTR_MASK_A2HCPU_BMSK	0x4
 #define CVP_WRAPPER_INTR_MASK_A2HCPU_SHFT	0x2
@@ -168,21 +173,49 @@
  * MODULE: vcodec noc error log registers
  * --------------------------------------------------------------------------
  */
-#define VCODEC_CORE0_VIDEO_NOC_BASE_OFFS		0x00004000
-#define VCODEC_CORE1_VIDEO_NOC_BASE_OFFS		0x0000C000
-#define VCODEC_COREX_VIDEO_NOC_ERR_SWID_LOW_OFFS	0x0500
-#define VCODEC_COREX_VIDEO_NOC_ERR_SWID_HIGH_OFFS	0x0504
-#define VCODEC_COREX_VIDEO_NOC_ERR_MAINCTL_LOW_OFFS	0x0508
-#define VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS	0x0510
-#define VCODEC_COREX_VIDEO_NOC_ERR_ERRCLR_LOW_OFFS	0x0518
-#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG0_LOW_OFFS	0x0520
-#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG0_HIGH_OFFS	0x0524
-#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG1_LOW_OFFS	0x0528
-#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG1_HIGH_OFFS	0x052C
-#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG2_LOW_OFFS	0x0530
-#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG2_HIGH_OFFS	0x0534
-#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_LOW_OFFS	0x0538
-#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_HIGH_OFFS	0x053C
+#define CVP_NOC_BASE_OFFS		0x000D0000
+#define CVP_NOC_ERR_SWID_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x80)
+#define CVP_NOC_ERR_SWID_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0x84)
+#define CVP_NOC_ERR_MAINCTL_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x88)
+#define CVP_NOC_ERR_ERRVLD_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x90)
+#define CVP_NOC_ERR_ERRCLR_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x98)
+#define CVP_NOC_ERR_ERRLOG0_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0xA0)
+#define CVP_NOC_ERR_ERRLOG0_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0xA4)
+#define CVP_NOC_ERR_ERRLOG1_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0xA8)
+#define CVP_NOC_ERR_ERRLOG1_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0xAC)
+#define CVP_NOC_ERR_ERRLOG2_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0xB0)
+#define CVP_NOC_ERR_ERRLOG2_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0xB4)
+#define CVP_NOC_ERR_ERRLOG3_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0xB8)
+#define CVP_NOC_ERR_ERRLOG3_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0xBC)
+
+#define CVP_NOC_CORE_BASE_OFFS			0x00010000
+#define CVP_NOC_CORE_ERR_SWID_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1200)
+#define CVP_NOC_CORE_ERR_SWID_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1204)
+#define CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1208)
+#define CVP_NOC_CORE_ERR_ERRVLD_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1210)
+#define CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1218)
+#define CVP_NOC_CORE_ERR_ERRLOG0_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1220)
+#define CVP_NOC_CORE_ERR_ERRLOG0_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1224)
+#define CVP_NOC_CORE_ERR_ERRLOG1_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1228)
+#define CVP_NOC_CORE_ERR_ERRLOG1_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x122C)
+#define CVP_NOC_CORE_ERR_ERRLOG2_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1230)
+#define CVP_NOC_CORE_ERR_ERRLOG2_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1234)
+#define CVP_NOC_CORE_ERR_ERRLOG3_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1238)
+#define CVP_NOC_CORE_ERR_ERRLOG3_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x123C)
+
 
 #define CVP_AON_WRAPPER_MVP_NOC_LPI_CONTROL	(CVP_AON_BASE_OFFS)
 #define CVP_AON_WRAPPER_MVP_NOC_LPI_STATUS	(CVP_AON_BASE_OFFS + 0x4)
diff --git a/drivers/media/platform/msm/cvp/hfi_packetization.c b/drivers/media/platform/msm/cvp/hfi_packetization.c
index 75a6de7..909fe3c 100644
--- a/drivers/media/platform/msm/cvp/hfi_packetization.c
+++ b/drivers/media/platform/msm/cvp/hfi_packetization.c
@@ -201,18 +201,19 @@ inline int cvp_create_pkt_cmd_sys_session_init(
 		struct cvp_hal_session *session)
 {
 	int rc = 0;
+	struct msm_cvp_inst *inst = session->session_id;
 
-	if (!pkt)
+	if (!pkt || !inst)
 		return -EINVAL;
 
 	pkt->size = sizeof(struct cvp_hfi_cmd_sys_session_init_packet);
 	pkt->packet_type = HFI_CMD_SYS_SESSION_INIT;
 	pkt->session_id = hash32_ptr(session);
-	pkt->session_kmask = 0xFFFFFFFF;
-	pkt->session_type = HFI_SESSION_CV;
-	pkt->session_prio = 0;
-	pkt->is_secure = 0;
-	pkt->dsp_ac_mask = 0;
+	pkt->session_type = inst->prop.type;
+	pkt->session_kmask = inst->prop.kernel_mask;
+	pkt->session_prio = inst->prop.priority;
+	pkt->is_secure = inst->prop.is_secure;
+	pkt->dsp_ac_mask = inst->prop.dsp_mask;
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/cvp/hfi_response_handler.c b/drivers/media/platform/msm/cvp/hfi_response_handler.c
index 50ff78c..a8be9d6 100644
--- a/drivers/media/platform/msm/cvp/hfi_response_handler.c
+++ b/drivers/media/platform/msm/cvp/hfi_response_handler.c
@@ -158,7 +158,7 @@ static int hfi_process_session_error(u32 device_id,
 		dprintk(CVP_ERR,
 			"%s: session %x data1 %#x, data2 %#x\n", __func__,
 			pkt->session_id, pkt->event_data1, pkt->event_data2);
-		info->response_type = HAL_SESSION_ERROR;
+		info->response_type = HAL_RESPONSE_UNUSED;
 		break;
 	}
 
@@ -445,6 +445,9 @@ static int hfi_process_session_cvp_operation_config(u32 device_id,
 	if (pkt->packet_type == HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS)
 		signal = get_signal_from_pkt_type(
 				HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS);
+	else if (pkt->packet_type == HFI_MSG_SESSION_CVP_SET_MODEL_BUFFERS)
+		signal = get_signal_from_pkt_type(
+				HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS);
 	else
 		signal = get_signal_from_pkt_type(conf_id);
 
@@ -761,6 +764,7 @@ int cvp_hfi_process_msg_packet(u32 device_id,
 		break;
 	case HFI_MSG_SESSION_CVP_OPERATION_CONFIG:
 	case HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS:
+	case HFI_MSG_SESSION_CVP_SET_MODEL_BUFFERS:
 		pkt_func =
 			(pkt_func_def)hfi_process_session_cvp_operation_config;
 		break;
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.c b/drivers/media/platform/msm/cvp/msm_cvp.c
index b057525..df0b26c 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp.c
@@ -8,34 +8,17 @@
 #include <synx_api.h>
 #include "cvp_core_hfi.h"
 
-#define MSM_CVP_NOMINAL_CYCLES		(444 * 1000 * 1000)
-#define MSM_CVP_UHD60E_VPSS_CYCLES	(111 * 1000 * 1000)
-#define MSM_CVP_UHD60E_ISE_CYCLES	(175 * 1000 * 1000)
-#define MAX_CVP_VPSS_CYCLES		(MSM_CVP_NOMINAL_CYCLES - \
-		MSM_CVP_UHD60E_VPSS_CYCLES)
-#define MAX_CVP_ISE_CYCLES		(MSM_CVP_NOMINAL_CYCLES - \
-		MSM_CVP_UHD60E_ISE_CYCLES)
-
-struct msm_cvp_fence_thread_data {
-	struct msm_cvp_inst *inst;
-	unsigned int device_id;
-	struct cvp_kmd_hfi_fence_packet in_fence_pkt;
-	unsigned int arg_type;
-};
-
-static struct msm_cvp_fence_thread_data fence_thread_data;
-
-void print_cvp_internal_buffer(u32 tag, const char *str,
+void print_internal_buffer(u32 tag, const char *str,
 		struct msm_cvp_inst *inst, struct msm_cvp_internal_buffer *cbuf)
 {
 	if (!(tag & msm_cvp_debug) || !inst || !cbuf)
 		return;
 
 	dprintk(tag,
-		"%s: %x : idx %2d fd %d off %d daddr %x size %d type %d flags 0x%x\n",
+		"%s: %x : idx %2d fd %d off %d %s size %d flags 0x%x",
 		str, hash32_ptr(inst->session), cbuf->buf.index, cbuf->buf.fd,
-		cbuf->buf.offset, cbuf->smem.device_addr, cbuf->buf.size,
-		cbuf->buf.type, cbuf->buf.flags);
+		cbuf->buf.offset, cbuf->smem.dma_buf->name, cbuf->buf.size,
+		cbuf->buf.flags);
 }
 
 static enum hal_buffer get_hal_buftype(const char *str, unsigned int type)
@@ -57,52 +40,31 @@ static enum hal_buffer get_hal_buftype(const char *str, unsigned int type)
 	return buftype;
 }
 
-static int msm_cvp_scale_clocks_and_bus(struct msm_cvp_inst *inst)
-{
-	int rc = 0;
-
-	if (!inst || !inst->core) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	rc = msm_cvp_set_clocks(inst->core);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"%s: failed set_clocks for inst %pK (%#x)\n",
-			__func__, inst, hash32_ptr(inst->session));
-		goto exit;
-	}
-
-	rc = msm_cvp_comm_vote_bus(inst->core);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"%s: failed vote_bus for inst %pK (%#x)\n",
-			__func__, inst, hash32_ptr(inst->session));
-		goto exit;
-	}
-
-exit:
-	return rc;
-}
-
 static int msm_cvp_get_session_info(struct msm_cvp_inst *inst,
 		struct cvp_kmd_session_info *session)
 {
 	int rc = 0;
+	struct msm_cvp_inst *s;
 
 	if (!inst || !inst->core || !session) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
 
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	s->cur_cmd_type = CVP_KMD_GET_SESSION_INFO;
 	session->session_id = hash32_ptr(inst->session);
 	dprintk(CVP_DBG, "%s: id 0x%x\n", __func__, session->session_id);
 
+	s->cur_cmd_type = 0;
+	cvp_put_inst(s);
 	return rc;
 }
 
-static int msm_cvp_session_get_iova_addr(
+static int msm_cvp_session_get_iova_addr_d(
 	struct msm_cvp_inst *inst,
 	struct msm_cvp_internal_buffer **cbuf_ptr,
 	unsigned int search_fd, unsigned int search_size,
@@ -120,16 +82,6 @@ static int msm_cvp_session_get_iova_addr(
 		}
 	}
 	mutex_unlock(&inst->cvpcpubufs.lock);
-	if (!found) {
-		mutex_lock(&inst->cvpdspbufs.lock);
-		list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
-			if (cbuf->buf.fd == search_fd) {
-				found = true;
-				break;
-			}
-		}
-		mutex_unlock(&inst->cvpdspbufs.lock);
-	}
 	if (!found)
 		return -ENOENT;
 
@@ -148,6 +100,35 @@ static int msm_cvp_session_get_iova_addr(
 	return 0;
 }
 
+static int msm_cvp_session_get_iova_addr(
+	struct msm_cvp_inst *inst,
+	struct cvp_buf_type *in_buf,
+	unsigned int *iova)
+{
+	struct msm_cvp_internal_buffer *cbuf;
+
+	if (!inst | !iova) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&inst->cvpcpubufs.lock);
+	list_for_each_entry(cbuf, &inst->cvpcpubufs.list, list) {
+		if (cbuf->smem.dma_buf == in_buf->dbuf &&
+			cbuf->buf.size == in_buf->size &&
+			cbuf->buf.offset == in_buf->offset) {
+			*iova = cbuf->smem.device_addr + cbuf->buf.offset;
+			print_internal_buffer(CVP_DBG, "found", inst, cbuf);
+			mutex_unlock(&inst->cvpcpubufs.lock);
+			return 0;
+		}
+	}
+	mutex_unlock(&inst->cvpcpubufs.lock);
+	*iova = 0;
+
+	return -ENOENT;
+}
+
 static int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
 	struct cvp_kmd_buffer *buf)
 {
@@ -185,13 +166,12 @@ static int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
 	}
 	mutex_unlock(&inst->cvpdspbufs.lock);
 	if (found) {
-		print_client_buffer(CVP_ERR, "duplicate", inst, buf);
+		print_internal_buffer(CVP_ERR, "duplicate", inst, cbuf);
 		return -EINVAL;
 	}
 
-	cbuf = kzalloc(sizeof(struct msm_cvp_internal_buffer), GFP_KERNEL);
+	cbuf = kmem_cache_zalloc(inst->internal_buf_cache, GFP_KERNEL);
 	if (!cbuf) {
-		dprintk(CVP_ERR, "%s: cbuf alloc failed\n", __func__);
 		return -ENOMEM;
 	}
 
@@ -201,6 +181,7 @@ static int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
 	cbuf->smem.offset = buf->offset;
 	cbuf->smem.size = buf->size;
 	cbuf->smem.flags = buf->flags;
+
 	rc = msm_cvp_smem_map_dma_buf(inst, &cbuf->smem);
 	if (rc) {
 		print_client_buffer(CVP_ERR, "map failed", inst, buf);
@@ -208,8 +189,9 @@ static int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
 	}
 
 	if (buf->index) {
-		rc = cvp_dsp_register_buffer((uint32_t)cbuf->smem.device_addr,
-			buf->index, buf->size, hash32_ptr(session));
+		rc = cvp_dsp_register_buffer(hash32_ptr(session), buf->fd,
+			 buf->size, buf->offset, buf->index,
+			(uint32_t)cbuf->smem.device_addr);
 		if (rc) {
 			dprintk(CVP_ERR,
 				"%s: failed dsp registration for fd=%d rc=%d",
@@ -232,7 +214,7 @@ static int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
 exit:
 	if (cbuf->smem.device_addr)
 		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
-	kfree(cbuf);
+	kmem_cache_free(inst->internal_buf_cache, cbuf);
 	cbuf = NULL;
 
 	return rc;
@@ -289,11 +271,11 @@ static int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst,
 	list_del(&cbuf->list);
 	mutex_unlock(&inst->cvpdspbufs.lock);
 
-	kfree(cbuf);
+	kmem_cache_free(inst->internal_buf_cache, cbuf);
 	return rc;
 }
 
-static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
+static int msm_cvp_map_buf_cpu_d(struct msm_cvp_inst *inst,
 	unsigned int fd,
 	unsigned int size,
 	struct msm_cvp_internal_buffer **cbuf_ptr)
@@ -317,16 +299,14 @@ static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
 	}
 	mutex_unlock(&inst->cvpcpubufs.lock);
 	if (found) {
-		print_client_buffer(CVP_ERR, "duplicate", inst, &cbuf->buf);
+		print_internal_buffer(CVP_ERR, "duplicate", inst, cbuf);
 		return -EINVAL;
 	}
 
-	cbuf = kzalloc(sizeof(struct msm_cvp_internal_buffer), GFP_KERNEL);
+	cbuf = kmem_cache_zalloc(inst->internal_buf_cache, GFP_KERNEL);
 	if (!cbuf)
 		return -ENOMEM;
 
-	memset(cbuf, 0, sizeof(struct msm_cvp_internal_buffer));
-
 	cbuf->buf.fd = fd;
 	cbuf->buf.size = size;
 	/* HFI doesn't have buffer type, set it as HAL_BUFFER_INPUT */
@@ -335,9 +315,10 @@ static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
 	cbuf->smem.size = cbuf->buf.size;
 	cbuf->smem.flags = 0;
 	cbuf->smem.offset = 0;
+
 	rc = msm_cvp_smem_map_dma_buf(inst, &cbuf->smem);
 	if (rc) {
-		print_client_buffer(CVP_ERR, "map failed", inst, &cbuf->buf);
+		print_internal_buffer(CVP_ERR, "map failed", inst, cbuf);
 		goto exit;
 	}
 
@@ -352,12 +333,196 @@ static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
 exit:
 	if (cbuf->smem.device_addr)
 		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
-	kfree(cbuf);
+	kmem_cache_free(inst->internal_buf_cache, cbuf);
 	cbuf = NULL;
 
 	return rc;
 }
 
+static void __msm_cvp_cache_operations(struct msm_cvp_internal_buffer *cbuf)
+{
+	enum smem_cache_ops cache_op;
+
+	if (!cbuf) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return;
+	}
+
+	switch (cbuf->buf.type) {
+	case CVP_KMD_BUFTYPE_INPUT:
+		cache_op = SMEM_CACHE_CLEAN;
+		break;
+	case CVP_KMD_BUFTYPE_OUTPUT:
+		cache_op = SMEM_CACHE_INVALIDATE;
+		break;
+	default:
+		cache_op = SMEM_CACHE_CLEAN_INVALIDATE;
+	}
+
+	msm_cvp_smem_cache_operations(cbuf->smem.dma_buf, cache_op,
+				cbuf->buf.offset, cbuf->buf.size);
+}
+
+static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
+				struct cvp_buf_type *in_buf,
+				u32 *iova,
+				struct msm_cvp_frame *frame)
+{
+	int rc = 0;
+	struct msm_cvp_internal_buffer *cbuf;
+	struct msm_cvp_frame_buf *frame_buf;
+	struct dma_buf *dma_buf;
+
+	if (!inst || !iova || !frame) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (in_buf->fd > 0) {
+		dma_buf = msm_cvp_smem_get_dma_buf(in_buf->fd);
+		if (!dma_buf) {
+			rc = -EINVAL;
+			dprintk(CVP_ERR, "%s: Invalid fd=%d", __func__,
+				in_buf->fd);
+			goto exit;
+		}
+		in_buf->dbuf = dma_buf;
+		msm_cvp_smem_put_dma_buf(dma_buf);
+	}
+
+	rc = msm_cvp_session_get_iova_addr(inst, in_buf, iova);
+	if (!rc && *iova != 0)
+		return 0;
+
+	cbuf = kmem_cache_zalloc(inst->internal_buf_cache, GFP_KERNEL);
+	if (!cbuf)
+		return -ENOMEM;
+
+	cbuf->buf.fd = in_buf->fd;
+	cbuf->buf.size = in_buf->size;
+	cbuf->buf.offset = in_buf->offset;
+	cbuf->buf.flags = in_buf->flags;
+	cbuf->buf.type = CVP_KMD_BUFTYPE_INPUT | CVP_KMD_BUFTYPE_OUTPUT;
+
+	cbuf->smem.buffer_type = in_buf->flags;
+	cbuf->smem.fd = cbuf->buf.fd;
+	cbuf->smem.size = cbuf->buf.size;
+	cbuf->smem.flags = 0;
+	cbuf->smem.offset = 0;
+	cbuf->smem.dma_buf = in_buf->dbuf;
+
+	rc = msm_cvp_smem_map_dma_buf(inst, &cbuf->smem);
+	if (rc) {
+		print_internal_buffer(CVP_ERR, "map failed", inst, cbuf);
+		goto exit;
+	}
+
+	/* Assign mapped dma_buf back because it could be zero previously */
+	in_buf->dbuf = cbuf->smem.dma_buf;
+
+	mutex_lock(&inst->cvpcpubufs.lock);
+	list_add_tail(&cbuf->list, &inst->cvpcpubufs.list);
+	mutex_unlock(&inst->cvpcpubufs.lock);
+
+	__msm_cvp_cache_operations(cbuf);
+
+	*iova = cbuf->smem.device_addr + cbuf->buf.offset;
+
+	frame_buf = kmem_cache_zalloc(inst->frame_buf_cache, GFP_KERNEL);
+	if (!frame_buf) {
+		rc = -ENOMEM;
+		goto exit2;
+	}
+
+	memcpy(&frame_buf->buf, in_buf, sizeof(frame_buf->buf));
+
+	mutex_lock(&frame->bufs.lock);
+	list_add_tail(&frame_buf->list, &frame->bufs.list);
+	mutex_unlock(&frame->bufs.lock);
+
+	print_internal_buffer(CVP_DBG, "map", inst, cbuf);
+	return rc;
+
+exit2:
+	if (cbuf->smem.device_addr)
+		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
+	mutex_lock(&inst->cvpcpubufs.lock);
+	list_del(&cbuf->list);
+	mutex_unlock(&inst->cvpcpubufs.lock);
+exit:
+	kmem_cache_free(inst->internal_buf_cache, cbuf);
+	cbuf = NULL;
+
+	return rc;
+}
+
+static void __unmap_buf(struct msm_cvp_inst *inst,
+		struct msm_cvp_frame_buf *frame_buf)
+{
+	struct msm_cvp_internal_buffer *cbuf, *dummy;
+	struct cvp_buf_type *buf;
+
+	if (!inst || !frame_buf) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return;
+	}
+
+	buf = &frame_buf->buf;
+	mutex_lock(&inst->cvpcpubufs.lock);
+	list_for_each_entry_safe(cbuf, dummy, &inst->cvpcpubufs.list, list) {
+		if (cbuf->smem.dma_buf == buf->dbuf &&
+			cbuf->buf.size == buf->size &&
+			cbuf->buf.offset == buf->offset) {
+			list_del(&cbuf->list);
+			print_internal_buffer(CVP_DBG, "unmap", inst, cbuf);
+			msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
+			kmem_cache_free(inst->internal_buf_cache, cbuf);
+			break;
+		}
+	}
+	mutex_unlock(&inst->cvpcpubufs.lock);
+}
+
+void msm_cvp_unmap_buf_cpu(struct msm_cvp_inst *inst, u64 ktid)
+{
+	struct msm_cvp_frame *frame, *dummy1;
+	struct msm_cvp_frame_buf *frame_buf, *dummy2;
+	bool found;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return;
+	}
+
+	dprintk(CVP_DBG, "%s: unmap frame %llu\n", __func__, ktid);
+	found = false;
+	mutex_lock(&inst->frames.lock);
+	list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
+		if (frame->ktid == ktid) {
+			found = true;
+			list_del(&frame->list);
+			mutex_lock(&frame->bufs.lock);
+			list_for_each_entry_safe(frame_buf, dummy2,
+						&frame->bufs.list, list) {
+				list_del(&frame_buf->list);
+				__unmap_buf(inst, frame_buf);
+				kmem_cache_free(inst->frame_buf_cache,
+						frame_buf);
+			}
+			mutex_unlock(&frame->bufs.lock);
+			DEINIT_MSM_CVP_LIST(&frame->bufs);
+			kmem_cache_free(inst->frame_cache, frame);
+			break;
+		}
+	}
+	mutex_unlock(&inst->frames.lock);
+
+	if (!found) {
+		dprintk(CVP_WARN, "%s frame %#llx not found!\n",
+				__func__, ktid);
+	}
+}
+
 static bool _cvp_msg_pending(struct msm_cvp_inst *inst,
 			struct cvp_session_queue *sq,
 			struct cvp_session_msg **msg)
@@ -366,8 +531,7 @@ static bool _cvp_msg_pending(struct msm_cvp_inst *inst,
 	bool result = false;
 
 	spin_lock(&sq->lock);
-	if (!kref_read(&inst->kref) ||
-		sq->state != QUEUE_ACTIVE) {
+	if (sq->state != QUEUE_ACTIVE) {
 		/* The session is being deleted */
 		spin_unlock(&sq->lock);
 		*msg = NULL;
@@ -393,13 +557,20 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
 	struct cvp_session_msg *msg = NULL;
 	struct cvp_session_queue *sq;
 	struct cvp_kmd_session_control *sc;
-	int rc;
+	struct msm_cvp_inst *s;
+	int rc = 0;
+	u32 version;
 
 	if (!inst) {
 		dprintk(CVP_ERR, "%s invalid session\n", __func__);
 		return -EINVAL;
 	}
 
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	s->cur_cmd_type = CVP_KMD_RECEIVE_MSG_PKT;
 	sq = &inst->session_queue;
 	sc = (struct cvp_kmd_session_control *)out_pkt;
 
@@ -407,31 +578,47 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
 
 	if (wait_event_timeout(sq->wq,
 		_cvp_msg_pending(inst, sq, &msg), wait_time) == 0) {
-		dprintk(CVP_DBG, "session queue wait timeout\n");
-		return -ETIMEDOUT;
+		dprintk(CVP_WARN, "session queue wait timeout\n");
+		msm_cvp_comm_kill_session(inst);
+		rc = -ETIMEDOUT;
+		goto exit;
 	}
 
+	version = (get_hfi_version() & HFI_VERSION_MINOR_MASK)
+				>> HFI_VERSION_MINOR_SHIFT;
+
 	if (msg == NULL) {
-		dprintk(CVP_DBG,
+		dprintk(CVP_WARN,
 			"%s: session deleted, queue state %d, msg cnt %d\n",
 			__func__, inst->session_queue.state,
 			inst->session_queue.msg_count);
 
-		spin_lock(&sq->lock);
-		if (sq->msg_count) {
-			sc->ctrl_data[0] = sq->msg_count;
-			rc = -EUCLEAN;
-		} else {
-			rc = -ENOLINK;
+		if (inst->state >= MSM_CVP_CLOSE_DONE) {
+			rc = -ECONNRESET;
+			goto exit;
 		}
-		spin_unlock(&sq->lock);
-		return rc;
+
+		msm_cvp_comm_kill_session(inst);
+	} else {
+		if (version >= 1) {
+			u64 ktid;
+			u32 kdata1, kdata2;
+
+			kdata1 = msg->pkt.client_data.kdata1;
+			kdata2 = msg->pkt.client_data.kdata2;
+			ktid = ((u64)kdata2 << 32) | kdata1;
+			msm_cvp_unmap_buf_cpu(inst, ktid);
+		}
+
+		memcpy(out_pkt, &msg->pkt,
+			sizeof(struct cvp_hfi_msg_session_hdr));
+		kmem_cache_free(inst->session_queue.msg_cache, msg);
 	}
 
-	memcpy(out_pkt, &msg->pkt, get_msg_size());
-	kmem_cache_free(inst->session_queue.msg_cache, msg);
-
-	return 0;
+exit:
+	s->cur_cmd_type = 0;
+	cvp_put_inst(inst);
+	return rc;
 }
 
 static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
@@ -442,34 +629,89 @@ static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
 	struct cvp_buf_desc *buf_ptr;
 	struct cvp_buf_type *new_buf;
 	int i, rc = 0;
-	struct cvp_hfi_device *hdev = inst->core->device;
-	struct iris_hfi_device *hfi = hdev->hfi_device_data;
-	u32 version = hfi->version;
+	u32 version;
+	unsigned int iova;
+	u64 ktid;
+	struct msm_cvp_frame *frame;
 
+	version = get_hfi_version();
 	version = (version & HFI_VERSION_MINOR_MASK) >> HFI_VERSION_MINOR_SHIFT;
 
-	if (offset != 0 && buf_num != 0) {
-		for (i = 0; i < buf_num; i++) {
-			buf_ptr = (struct cvp_buf_desc *)
-					&in_pkt->pkt_data[offset];
-			if (version >= 1)
-				offset += sizeof(*new_buf) >> 2;
-			else
-				offset += sizeof(*buf_ptr) >> 2;
+	if (version >= 1 && buf_num) {
+		struct cvp_hfi_cmd_session_hdr *cmd_hdr;
 
+		cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
+		ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
+		cmd_hdr->client_data.kdata1 = (u32)ktid;
+		cmd_hdr->client_data.kdata2 = (u32)(ktid >> 32);
+
+		frame = kmem_cache_zalloc(inst->frame_cache, GFP_KERNEL);
+		if (!frame)
+			return -ENOMEM;
+
+		INIT_MSM_CVP_LIST(&frame->bufs);
+		frame->ktid = ktid;
+	} else {
+		frame = NULL;
+	}
+
+	if (!offset || !buf_num)
+		return 0;
+
+	for (i = 0; i < buf_num; i++) {
+		buf_ptr = (struct cvp_buf_desc *)
+				&in_pkt->pkt_data[offset];
+		if (version >= 1)
+			offset += sizeof(*new_buf) >> 2;
+		else
+			offset += sizeof(*buf_ptr) >> 2;
+
+		if (version >= 1) {
+			new_buf = (struct cvp_buf_type *)buf_ptr;
+
+			if (new_buf->fd <= 0 && !new_buf->dbuf)
+				continue;
+
+			rc = msm_cvp_map_buf_cpu(inst, new_buf, &iova, frame);
+			if (rc) {
+				struct msm_cvp_frame_buf *frame_buf, *dummy;
+
+				dprintk(CVP_ERR,
+					"%s: buf %d register failed.\n",
+					__func__, i);
+
+				list_for_each_entry_safe(frame_buf,
+					dummy, &frame->bufs.list, list) {
+					list_del(&frame_buf->list);
+					__unmap_buf(inst, frame_buf);
+					kmem_cache_free(
+					inst->frame_buf_cache,
+					frame_buf);
+				}
+				DEINIT_MSM_CVP_LIST(&frame->bufs);
+				kmem_cache_free(inst->frame_cache,
+						frame);
+				return rc;
+			}
+			new_buf->fd = iova;
+		} else {
 			if (!buf_ptr->fd)
 				continue;
 
-			rc = msm_cvp_session_get_iova_addr(inst, &cbuf,
+			rc = msm_cvp_session_get_iova_addr_d(inst,
+						&cbuf,
 						buf_ptr->fd,
 						buf_ptr->size,
 						&buf_ptr->fd,
 						&buf_ptr->size);
+
 			if (rc == -ENOENT) {
-				dprintk(CVP_DBG, "%s map buf fd %d size %d\n",
+				dprintk(CVP_DBG,
+					"%s map buf fd %d size %d\n",
 					__func__, buf_ptr->fd,
 					buf_ptr->size);
-				rc = msm_cvp_map_buf_cpu(inst, buf_ptr->fd,
+				rc = msm_cvp_map_buf_cpu_d(inst,
+						buf_ptr->fd,
 						buf_ptr->size, &cbuf);
 				if (rc || !cbuf) {
 					dprintk(CVP_ERR,
@@ -485,11 +727,20 @@ static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
 				__func__, i, rc);
 				return rc;
 			}
-			msm_cvp_smem_cache_operations(cbuf->smem.dma_buf,
-						SMEM_CACHE_CLEAN_INVALIDATE,
-						0, buf_ptr->size);
+			msm_cvp_smem_cache_operations(
+					cbuf->smem.dma_buf,
+					SMEM_CACHE_CLEAN_INVALIDATE,
+					0, buf_ptr->size);
 		}
 	}
+
+	if (frame != NULL) {
+		mutex_lock(&inst->frames.lock);
+		list_add_tail(&frame->list, &inst->frames.list);
+		mutex_unlock(&inst->frames.lock);
+		dprintk(CVP_DBG, "%s: map frame %llu\n", __func__, ktid);
+	}
+
 	return rc;
 }
 
@@ -503,18 +754,25 @@ static int msm_cvp_session_process_hfi(
 	struct cvp_hfi_device *hdev;
 	unsigned int offset, buf_num, signal;
 	struct cvp_session_queue *sq;
+	struct msm_cvp_inst *s;
 
 	if (!inst || !inst->core || !in_pkt) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
 
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	inst->cur_cmd_type = CVP_KMD_SEND_CMD_PKT;
 	sq = &inst->session_queue;
 	spin_lock(&sq->lock);
 	if (sq->state != QUEUE_ACTIVE) {
 		spin_unlock(&sq->lock);
 		dprintk(CVP_ERR, "%s: invalid queue state\n", __func__);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto exit;
 	}
 	spin_unlock(&sq->lock);
 
@@ -541,7 +799,7 @@ static int msm_cvp_session_process_hfi(
 
 	rc = msm_cvp_map_buf(inst, in_pkt, offset, buf_num);
 	if (rc)
-		return rc;
+		goto exit;
 
 	rc = call_hfi_op(hdev, session_send,
 			(void *)inst->session, in_pkt);
@@ -549,6 +807,7 @@ static int msm_cvp_session_process_hfi(
 		dprintk(CVP_ERR,
 			"%s: Failed in call_hfi_op %d, %x\n",
 			__func__, in_pkt->pkt_data[0], in_pkt->pkt_data[1]);
+		goto exit;
 	}
 
 	if (signal != HAL_NO_RESP) {
@@ -562,10 +821,13 @@ static int msm_cvp_session_process_hfi(
 				signal);
 
 	}
-
+exit:
+	inst->cur_cmd_type = 0;
+	cvp_put_inst(inst);
 	return rc;
 }
 
+#define CVP_FENCE_RUN	0x100
 static int msm_cvp_thread_fence_run(void *data)
 {
 	int i, rc = 0;
@@ -579,6 +841,7 @@ static int msm_cvp_thread_fence_run(void *data)
 	int *fence;
 	int ica_enabled = 0;
 	int pkt_idx;
+	int synx_state = SYNX_STATE_SIGNALED_SUCCESS;
 
 	if (!data) {
 		dprintk(CVP_ERR, "%s Wrong input data %pK\n", __func__, data);
@@ -586,12 +849,13 @@ static int msm_cvp_thread_fence_run(void *data)
 	}
 
 	fence_thread_data = data;
-	inst = cvp_get_inst(get_cvp_core(fence_thread_data->device_id),
-				(void *)fence_thread_data->inst);
+	inst = fence_thread_data->inst;
 	if (!inst) {
 		dprintk(CVP_ERR, "%s Wrong inst %pK\n", __func__, inst);
-		do_exit(-EINVAL);
+		rc = -EINVAL;
+		return rc;
 	}
+	inst->cur_cmd_type = CVP_FENCE_RUN;
 	in_fence_pkt = (struct cvp_kmd_hfi_fence_packet *)
 					&fence_thread_data->in_fence_pkt;
 	in_pkt = (struct cvp_kmd_hfi_packet *)(in_fence_pkt);
@@ -654,15 +918,18 @@ static int msm_cvp_thread_fence_run(void *data)
 				"%s: Failed in call_hfi_op %d, %x\n",
 				__func__, in_pkt->pkt_data[0],
 				in_pkt->pkt_data[1]);
-			goto exit;
+			synx_state = SYNX_STATE_SIGNALED_ERROR;
 		}
 
-		rc = wait_for_sess_signal_receipt(inst,
-				HAL_SESSION_DME_FRAME_CMD_DONE);
-		if (rc)	{
-			dprintk(CVP_ERR, "%s: wait for signal failed, rc %d\n",
-			__func__, rc);
-			goto exit;
+		if (synx_state != SYNX_STATE_SIGNALED_ERROR) {
+			rc = wait_for_sess_signal_receipt(inst,
+					HAL_SESSION_DME_FRAME_CMD_DONE);
+			if (rc) {
+				dprintk(CVP_ERR,
+				"%s: wait for signal failed, rc %d\n",
+				__func__, rc);
+				synx_state = SYNX_STATE_SIGNALED_ERROR;
+			}
 		}
 
 		if (ica_enabled) {
@@ -672,8 +939,7 @@ static int msm_cvp_thread_fence_run(void *data)
 					__func__);
 				goto exit;
 			}
-			rc = synx_signal(synx_obj,
-					SYNX_STATE_SIGNALED_SUCCESS);
+			rc = synx_signal(synx_obj, synx_state);
 			if (rc) {
 				dprintk(CVP_ERR, "%s: synx_signal failed\n",
 					__func__);
@@ -701,16 +967,11 @@ static int msm_cvp_thread_fence_run(void *data)
 			dprintk(CVP_ERR, "%s: synx_import failed\n", __func__);
 			goto exit;
 		}
-		rc = synx_signal(synx_obj, SYNX_STATE_SIGNALED_SUCCESS);
+		rc = synx_signal(synx_obj, synx_state);
 		if (rc) {
 			dprintk(CVP_ERR, "%s: synx_signal failed\n", __func__);
 			goto exit;
 		}
-		if (synx_get_status(synx_obj) != SYNX_STATE_SIGNALED_SUCCESS) {
-			dprintk(CVP_ERR, "%s: synx_get_status failed\n",
-					__func__);
-			goto exit;
-		}
 		rc = synx_release(synx_obj);
 		if (rc) {
 			dprintk(CVP_ERR, "%s: synx_release failed\n",
@@ -762,15 +1023,18 @@ static int msm_cvp_thread_fence_run(void *data)
 				"%s: Failed in call_hfi_op %d, %x\n",
 				__func__, in_pkt->pkt_data[0],
 				in_pkt->pkt_data[1]);
-			goto exit;
+			synx_state = SYNX_STATE_SIGNALED_ERROR;
 		}
 
-		rc = wait_for_sess_signal_receipt(inst,
-				HAL_SESSION_ICA_FRAME_CMD_DONE);
-		if (rc)	{
-			dprintk(CVP_ERR, "%s: wait for signal failed, rc %d\n",
-			__func__, rc);
-			goto exit;
+		if (synx_state != SYNX_STATE_SIGNALED_ERROR) {
+			rc = wait_for_sess_signal_receipt(inst,
+					HAL_SESSION_ICA_FRAME_CMD_DONE);
+			if (rc)	{
+				dprintk(CVP_ERR,
+				"%s: wait for signal failed, rc %d\n",
+				__func__, rc);
+				synx_state = SYNX_STATE_SIGNALED_ERROR;
+			}
 		}
 
 		rc = synx_import(fence[2], fence[3], &synx_obj);
@@ -778,16 +1042,11 @@ static int msm_cvp_thread_fence_run(void *data)
 			dprintk(CVP_ERR, "%s: synx_import failed\n", __func__);
 			goto exit;
 		}
-		rc = synx_signal(synx_obj, SYNX_STATE_SIGNALED_SUCCESS);
+		rc = synx_signal(synx_obj, synx_state);
 		if (rc) {
 			dprintk(CVP_ERR, "%s: synx_signal failed\n", __func__);
 			goto exit;
 		}
-		if (synx_get_status(synx_obj) != SYNX_STATE_SIGNALED_SUCCESS) {
-			dprintk(CVP_ERR, "%s: synx_get_status failed\n",
-					__func__);
-			goto exit;
-		}
 		rc = synx_release(synx_obj);
 		if (rc) {
 			dprintk(CVP_ERR, "%s: synx_release failed\n", __func__);
@@ -804,6 +1063,8 @@ static int msm_cvp_thread_fence_run(void *data)
 	}
 
 exit:
+	kmem_cache_free(inst->fence_data_cache, fence_thread_data);
+	inst->cur_cmd_type = 0;
 	cvp_put_inst(inst);
 	do_exit(rc);
 }
@@ -819,14 +1080,30 @@ static int msm_cvp_session_process_hfi_fence(
 	int pkt_idx;
 	struct cvp_kmd_hfi_packet *in_pkt;
 	unsigned int signal, offset, buf_num, in_offset, in_buf_num;
+	struct msm_cvp_inst *s;
+	struct msm_cvp_fence_thread_data *fence_thread_data;
 
-	dprintk(CVP_DBG, "%s: Enter inst = %d", __func__, inst);
+	dprintk(CVP_DBG, "%s: Enter inst = %#x", __func__, inst);
 
-	if (!inst || !inst->core || !arg) {
+	if (!inst || !inst->core || !arg || !inst->core->device) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
 
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	inst->cur_cmd_type = CVP_KMD_SEND_FENCE_CMD_PKT;
+	fence_thread_data = kmem_cache_alloc(inst->fence_data_cache,
+			GFP_KERNEL);
+	if (!fence_thread_data) {
+		dprintk(CVP_ERR, "%s: fence_thread_data alloc failed\n",
+				__func__);
+		rc = -ENOMEM;
+		goto exit;
+	}
+
 	in_offset = arg->buf_offset;
 	in_buf_num = arg->buf_num;
 	in_pkt = (struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
@@ -851,19 +1128,30 @@ static int msm_cvp_session_process_hfi_fence(
 
 	rc = msm_cvp_map_buf(inst, in_pkt, offset, buf_num);
 	if (rc)
-		return rc;
+		goto exit;
 
 	thread_num = thread_num + 1;
-	fence_thread_data.inst = inst;
-	fence_thread_data.device_id = (unsigned int)inst->core->id;
-	memcpy(&fence_thread_data.in_fence_pkt, &arg->data.hfi_fence_pkt,
+	fence_thread_data->inst = inst;
+	fence_thread_data->device_id = (unsigned int)inst->core->id;
+	memcpy(&fence_thread_data->in_fence_pkt, &arg->data.hfi_fence_pkt,
 				sizeof(struct cvp_kmd_hfi_fence_packet));
-	fence_thread_data.arg_type = arg->type;
+	fence_thread_data->arg_type = arg->type;
 	snprintf(thread_fence_name, sizeof(thread_fence_name),
 				"thread_fence_%d", thread_num);
 	thread = kthread_run(msm_cvp_thread_fence_run,
-			&fence_thread_data, thread_fence_name);
+			fence_thread_data, thread_fence_name);
+	if (!thread) {
+		kmem_cache_free(inst->fence_data_cache, fence_thread_data);
+		dprintk(CVP_ERR, "%s fail to create kthread\n", __func__);
+		rc = -ECHILD;
+		goto exit;
+	}
 
+	return 0;
+
+exit:
+	inst->cur_cmd_type = 0;
+	cvp_put_inst(s);
 	return rc;
 }
 
@@ -871,173 +1159,201 @@ static int msm_cvp_session_cvp_dfs_frame_response(
 	struct msm_cvp_inst *inst,
 	struct cvp_kmd_hfi_packet *dfs_frame)
 {
-	int rc = 0;
-
-	dprintk(CVP_DBG, "%s: Enter inst = %pK\n", __func__, inst);
-
-	if (!inst || !inst->core || !dfs_frame) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+	dprintk(CVP_ERR, "Deprecated system call: DFS_CMD_RESPONSE\n");
 		return -EINVAL;
-	}
-	rc = wait_for_sess_signal_receipt(inst,
-			HAL_SESSION_DFS_FRAME_CMD_DONE);
-	if (rc)
-		dprintk(CVP_ERR,
-			"%s: wait for signal failed, rc %d\n",
-			__func__, rc);
-	return rc;
 }
 
 static int msm_cvp_session_cvp_dme_frame_response(
 	struct msm_cvp_inst *inst,
 	struct cvp_kmd_hfi_packet *dme_frame)
 {
-	int rc = 0;
-
-	dprintk(CVP_DBG, "%s: Enter inst = %d", __func__, inst);
-
-	if (!inst || !inst->core || !dme_frame) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+	dprintk(CVP_ERR, "Deprecated system call: DME_CMD_RESPONSE\n");
 		return -EINVAL;
-	}
-	rc = wait_for_sess_signal_receipt(inst,
-			HAL_SESSION_DME_FRAME_CMD_DONE);
-	if (rc)
-		dprintk(CVP_ERR,
-			"%s: wait for signal failed, rc %d\n",
-			__func__, rc);
-	return rc;
 }
 
 static int msm_cvp_session_cvp_persist_response(
 	struct msm_cvp_inst *inst,
 	struct cvp_kmd_hfi_packet *pbuf_cmd)
 {
-	int rc = 0;
-
-	dprintk(CVP_DBG, "%s: Enter inst = %d", __func__, inst);
-
-	if (!inst || !inst->core || !pbuf_cmd) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+	dprintk(CVP_ERR, "Deprecated system call: PERSIST_CMD_RESPONSE\n");
 		return -EINVAL;
-	}
-	rc = wait_for_sess_signal_receipt(inst,
-			HAL_SESSION_PERSIST_CMD_DONE);
-	if (rc)
-		dprintk(CVP_ERR,
-			"%s: wait for signal failed, rc %d\n",
-			__func__, rc);
-	return rc;
 }
 
-
-
 static int msm_cvp_send_cmd(struct msm_cvp_inst *inst,
 		struct cvp_kmd_send_cmd *send_cmd)
 {
-	dprintk(CVP_ERR, "%s: UMD gave a deprecated cmd", __func__);
+	dprintk(CVP_ERR, "Deprecated system call: cvp_send_cmd\n");
 
 	return 0;
 }
 
-#define C2C_FREQ_RATIO	(1.5)
+static inline int div_by_1dot5(unsigned int a)
+{
+	unsigned long i = a << 1;
 
-static void adjust_bw_freqs(struct allowed_clock_rates_table *tbl,
-		unsigned int tbl_size, unsigned int max_bw,
-		unsigned int *freq, unsigned long *ab, unsigned long *ib)
+	return (unsigned int) i/3;
+}
+
+static inline int max_3(unsigned int a, unsigned int b, unsigned int c)
+{
+	return (a >= b) ? ((a >= c) ? a : c) : ((b >= c) ? b : c);
+}
+
+/**
+ * adjust_bw_freqs(): calculate CVP clock freq and bw required to sustain
+ * required use case.
+ */
+static int adjust_bw_freqs(void)
 {
 	struct msm_cvp_core *core;
 	struct msm_cvp_inst *inst;
-	unsigned long clk_core_sum = 0, clk_ctlr_sum = 0, bw_sum = 0;
-	int i;
+	struct iris_hfi_device *hdev;
+	struct bus_info *bus;
+	struct clock_set *clocks;
+	struct clock_info *cl;
+	struct allowed_clock_rates_table *tbl = NULL;
+	unsigned int tbl_size;
+	unsigned int cvp_min_rate, cvp_max_rate, max_bw;
+	unsigned long core_sum = 0, ctlr_sum = 0, fw_sum = 0;
+	unsigned long op_core_max = 0, op_ctlr_max = 0, op_fw_max = 0;
+	unsigned long bw_sum = 0, op_bw_max = 0;
+	int i, rc = 0;
 
 	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
 
+	hdev = core->device->hfi_device_data;
+	clocks = &core->resources.clock_set;
+	cl = &clocks->clock_tbl[clocks->count - 1];
+	tbl = core->resources.allowed_clks_tbl;
+	tbl_size = core->resources.allowed_clks_tbl_size;
+	cvp_min_rate = tbl[0].clock_rate;
+	cvp_max_rate = tbl[tbl_size - 1].clock_rate;
+	bus = &core->resources.bus_set.bus_tbl[1];
+	max_bw = bus->range[1];
+
 	list_for_each_entry(inst, &core->instances, list) {
-		clk_core_sum += inst->power.clock_cycles_a;
-		clk_ctlr_sum += inst->power.clock_cycles_b;
+		if (inst->state == MSM_CVP_CORE_INVALID ||
+			inst->state == MSM_CVP_CORE_UNINIT)
+			continue;
+		core_sum += inst->power.clock_cycles_a;
+		ctlr_sum += inst->power.clock_cycles_b;
+		fw_sum += inst->power.reserved[0];
+		op_core_max = (op_core_max >= inst->power.reserved[1]) ?
+			op_core_max : inst->power.reserved[1];
+		op_ctlr_max = (op_ctlr_max >= inst->power.reserved[2]) ?
+			op_ctlr_max : inst->power.reserved[2];
+		op_fw_max = (op_fw_max >= inst->power.reserved[3]) ?
+			op_fw_max : inst->power.reserved[3];
 		bw_sum += inst->power.ddr_bw;
+		op_bw_max = (op_bw_max >= inst->power.reserved[4]) ?
+			op_bw_max : inst->power.reserved[4];
 	}
 
-	if (clk_core_sum * C2C_FREQ_RATIO < clk_ctlr_sum)
-		clk_core_sum = clk_ctlr_sum/C2C_FREQ_RATIO;
+	core_sum = max_3(core_sum, ctlr_sum, fw_sum);
+	op_core_max = max_3(op_core_max, op_ctlr_max, op_fw_max);
+	op_core_max = (op_core_max > tbl[tbl_size - 1].clock_rate) ?
+				tbl[tbl_size - 1].clock_rate : op_core_max;
+	core_sum = (core_sum >= op_core_max) ? core_sum : op_core_max;
+	bw_sum = (bw_sum >= op_bw_max) ? bw_sum : op_bw_max;
 
-	for (i = 1; i < tbl_size; i++) {
-		if (clk_core_sum < tbl[i].clock_rate)
-			break;
+	if (core_sum < tbl[0].clock_rate) {
+		core_sum = tbl[0].clock_rate;
+	} else {
+		for (i = 1; i < tbl_size; i++)
+			if (core_sum <= tbl[i].clock_rate)
+				break;
+
+		if (i == tbl_size) {
+			dprintk(CVP_WARN, "%s clk vote out of range %lld\n",
+					__func__, core_sum);
+			return -ENOTSUPP;
+		}
+		core_sum = tbl[i].clock_rate;
 	}
 
-	if (i == tbl_size)
-		clk_core_sum = tbl[tbl_size - 1].clock_rate;
-	else
-		clk_core_sum = tbl[i].clock_rate;
-
-	*freq = clk_core_sum;
-
 	if (bw_sum > max_bw)
 		bw_sum = max_bw;
 
-	*ab = bw_sum;
-	*ib = 0;
+	dprintk(CVP_DBG, "%s %lld %lld %lld\n", __func__,
+		core_sum, bw_sum, op_bw_max);
+	if (!cl->has_scaling) {
+		dprintk(CVP_ERR, "Cannot scale CVP clock\n");
+		return -EINVAL;
+	}
+
+	ctlr_sum = core->curr_freq;
+	core->curr_freq = core_sum;
+	rc = msm_cvp_set_clocks(core);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to set clock rate %u %s: %d %s\n",
+			core_sum, cl->name, rc, __func__);
+		core->curr_freq = ctlr_sum;
+		return rc;
+	}
+	hdev->clk_freq = core_sum;
+	rc = msm_bus_scale_update_bw(bus->client,
+			bw_sum, 0);
+	if (rc)
+		dprintk(CVP_ERR, "Failed voting bus %s to ab %u\n",
+			bus->name, bw_sum);
+
+	return rc;
 }
 
 /**
- * clock_cycles_a: CVP core clock freq (lower)
+ * Use of cvp_kmd_request_power structure
+ * clock_cycles_a: CVP core clock freq
  * clock_cycles_b: CVP controller clock freq
+ * ddr_bw: b/w vote in Bps
+ * reserved[0]: CVP firmware required clock freq
+ * reserved[1]: CVP core operational clock freq
+ * reserved[2]: CVP controller operational clock freq
+ * reserved[3]: CVP firmware operational clock freq
+ * reserved[4]: CVP operational b/w vote
+ *
+ * session's power record only saves normalized freq or b/w vote
  */
 static int msm_cvp_request_power(struct msm_cvp_inst *inst,
 		struct cvp_kmd_request_power *power)
 {
 	int rc = 0;
-	unsigned int freq;
-	unsigned long ab, ib;
 	struct msm_cvp_core *core;
-	struct bus_info *bus;
-	struct allowed_clock_rates_table *clks_tbl = NULL;
-	unsigned int clks_tbl_size;
-	unsigned int min_rate, max_rate;
-
+	struct msm_cvp_inst *s;
 
 	if (!inst || !power) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
 
-	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	inst->cur_cmd_type = CVP_KMD_REQUEST_POWER;
+	core = inst->core;
 
 	mutex_lock(&core->lock);
 
-	clks_tbl = core->resources.allowed_clks_tbl;
-	clks_tbl_size = core->resources.allowed_clks_tbl_size;
-	min_rate = clks_tbl[0].clock_rate;
-	max_rate = clks_tbl[clks_tbl_size - 1].clock_rate;
-	bus = &core->resources.bus_set.bus_tbl[1];
-
 	memcpy(&inst->power, power, sizeof(*power));
 
-	if (inst->power.clock_cycles_a < min_rate ||
-			inst->power.clock_cycles_a > max_rate)
-		inst->power.clock_cycles_a = min_rate;
-
-	if (inst->power.clock_cycles_b < (min_rate * C2C_FREQ_RATIO) ||
-		inst->power.clock_cycles_b > (max_rate * C2C_FREQ_RATIO))
-		inst->power.clock_cycles_b = min_rate * C2C_FREQ_RATIO;
+	/* Normalize CVP controller clock freqs */
+	inst->power.clock_cycles_b = div_by_1dot5(inst->power.clock_cycles_b);
+	inst->power.reserved[0] = div_by_1dot5(inst->power.reserved[0]);
+	inst->power.reserved[2] = div_by_1dot5(inst->power.reserved[2]);
+	inst->power.reserved[3] = div_by_1dot5(inst->power.reserved[3]);
+	inst->power.reserved[4] = div_by_1dot5(inst->power.reserved[4]);
 
 	/* Convert bps to KBps */
 	inst->power.ddr_bw = inst->power.ddr_bw >> 10;
 
-	if (inst->power.ddr_bw > bus->range[1])
-		inst->power.ddr_bw = bus->range[1] >> 1;
+	rc = adjust_bw_freqs();
+	if (rc)
+		dprintk(CVP_ERR, "Instance %pK power request out of range\n");
 
-	dprintk(CVP_DBG,
-		"%s: cycles_a %d, cycles_b %d, ddr_bw %d sys_cache_bw %d\n",
-		__func__, power->clock_cycles_a, power->clock_cycles_b,
-		power->ddr_bw, power->sys_cache_bw);
-
-	adjust_bw_freqs(clks_tbl, clks_tbl_size, bus->range[1],
-			&freq, &ab, &ib);
-	dprintk(CVP_DBG, "%s %x %llx %llx\n", __func__, freq, ab, ib);
 	mutex_unlock(&core->lock);
+	inst->cur_cmd_type = 0;
+	cvp_put_inst(s);
 
 	return rc;
 }
@@ -1047,43 +1363,112 @@ static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
 {
 	struct cvp_hfi_device *hdev;
 	struct cvp_hal_session *session;
+	struct msm_cvp_inst *s;
+	int rc = 0;
 
 	if (!inst || !inst->core || !buf) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
 
+	if (!buf->index)
+		return 0;
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	inst->cur_cmd_type = CVP_KMD_REGISTER_BUFFER;
 	session = (struct cvp_hal_session *)inst->session;
 	if (!session) {
 		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto exit;
 	}
 	hdev = inst->core->device;
 	print_client_buffer(CVP_DBG, "register", inst, buf);
 
-	if (!buf->index)
-		return 0;
-
-	return msm_cvp_map_buf_dsp(inst, buf);
+	rc = msm_cvp_map_buf_dsp(inst, buf);
+exit:
+	inst->cur_cmd_type = 0;
+	cvp_put_inst(s);
+	return rc;
 }
 
 static int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
 		struct cvp_kmd_buffer *buf)
 {
+	struct msm_cvp_inst *s;
+	int rc = 0;
+
 	if (!inst || !inst->core || !buf) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
 
-	print_client_buffer(CVP_DBG, "unregister", inst, buf);
-
 	if (!buf->index) {
-		dprintk(CVP_INFO,
-			"%s CPU path unregister buffer is deprecated!\n",
-			__func__);
 		return 0;
 	}
-	return msm_cvp_unmap_buf_dsp(inst, buf);
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	inst->cur_cmd_type = CVP_KMD_UNREGISTER_BUFFER;
+	print_client_buffer(CVP_DBG, "unregister", inst, buf);
+
+	rc = msm_cvp_unmap_buf_dsp(inst, buf);
+	inst->cur_cmd_type = 0;
+	cvp_put_inst(s);
+	return rc;
+}
+
+static int msm_cvp_session_create(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+
+	if (!inst || !inst->core)
+		return -EINVAL;
+
+	if (inst->state >= MSM_CVP_CLOSE_DONE)
+		return -ECONNRESET;
+
+	if (inst->state != MSM_CVP_CORE_INIT_DONE ||
+		inst->state > MSM_CVP_OPEN_DONE) {
+		dprintk(CVP_ERR,
+			"%s Incorrect CVP state %d to create session\n",
+			__func__, inst->state);
+		return -EINVAL;
+	}
+
+	rc = msm_cvp_comm_try_state(inst, MSM_CVP_OPEN_DONE);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to move instance to open done state\n");
+		goto fail_init;
+	}
+
+	rc = cvp_comm_set_arp_buffers(inst);
+	if (rc) {
+		dprintk(CVP_ERR,
+				"Failed to set ARP buffers\n");
+		goto fail_init;
+	}
+
+fail_init:
+	return rc;
+}
+
+static int session_state_check_init(struct msm_cvp_inst *inst)
+{
+	mutex_lock(&inst->lock);
+	if (inst->state >= MSM_CVP_OPEN && inst->state < MSM_CVP_STOP) {
+		mutex_unlock(&inst->lock);
+		return 0;
+	}
+	mutex_unlock(&inst->lock);
+
+	return msm_cvp_session_create(inst);
 }
 
 static int msm_cvp_session_start(struct msm_cvp_inst *inst,
@@ -1134,16 +1519,16 @@ static int msm_cvp_session_ctrl(struct msm_cvp_inst *inst,
 {
 	struct cvp_kmd_session_control *ctrl = &arg->data.session_ctrl;
 	int rc = 0;
+	unsigned int ctrl_type;
 
+	ctrl_type = ctrl->ctrl_type;
 
-
-	if (!inst) {
+	if (!inst && ctrl_type != SESSION_CREATE) {
 		dprintk(CVP_ERR, "%s invalid session\n", __func__);
 		return -EINVAL;
 	}
 
-
-	switch (ctrl->ctrl_type) {
+	switch (ctrl_type) {
 	case SESSION_STOP:
 		rc = msm_cvp_session_stop(inst, arg);
 		break;
@@ -1151,7 +1536,9 @@ static int msm_cvp_session_ctrl(struct msm_cvp_inst *inst,
 		rc = msm_cvp_session_start(inst, arg);
 		break;
 	case SESSION_CREATE:
+		rc = msm_cvp_session_create(inst);
 	case SESSION_DELETE:
+		break;
 	case SESSION_INFO:
 	default:
 		dprintk(CVP_ERR, "%s Unsupported session ctrl%d\n",
@@ -1178,7 +1565,7 @@ static int msm_cvp_get_sysprop(struct msm_cvp_inst *inst,
 	hfi = hdev->hfi_device_data;
 
 	switch (props->prop_data.prop_type) {
-	case CVP_HFI_VERSION:
+	case CVP_KMD_PROP_HFI_VERSION:
 	{
 		props->prop_data.data = hfi->version;
 		break;
@@ -1191,6 +1578,49 @@ static int msm_cvp_get_sysprop(struct msm_cvp_inst *inst,
 	return rc;
 }
 
+static int msm_cvp_set_sysprop(struct msm_cvp_inst *inst,
+		struct cvp_kmd_arg *arg)
+{
+	struct cvp_kmd_sys_properties *props = &arg->data.sys_properties;
+	struct cvp_kmd_sys_property *prop_array;
+	struct cvp_session_prop *session_prop;
+	int i, rc = 0;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	prop_array = &arg->data.sys_properties.prop_data;
+	session_prop = &inst->prop;
+
+	for (i = 0; i < props->prop_num; i++) {
+		switch (prop_array[i].prop_type) {
+		case CVP_KMD_PROP_SESSION_TYPE:
+			session_prop->type = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_SESSION_KERNELMASK:
+			session_prop->kernel_mask = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_SESSION_PRIORITY:
+			session_prop->priority = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_SESSION_SECURITY:
+			session_prop->is_secure = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_SESSION_DSPMASK:
+			session_prop->dsp_mask = prop_array[i].data;
+			break;
+		default:
+			dprintk(CVP_ERR,
+				"unrecognized sys property to set %d\n",
+				prop_array[i].prop_type);
+			rc = -EFAULT;
+		}
+	}
+	return rc;
+}
+
 int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg)
 {
 	int rc = 0;
@@ -1201,6 +1631,19 @@ int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg)
 	}
 	dprintk(CVP_DBG, "%s: arg->type = %x", __func__, arg->type);
 
+	if (arg->type != CVP_KMD_SESSION_CONTROL &&
+		arg->type != CVP_KMD_SET_SYS_PROPERTY &&
+		arg->type != CVP_KMD_GET_SYS_PROPERTY) {
+
+		rc = session_state_check_init(inst);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"Incorrect session state %d for command %d",
+				inst->state, arg->type);
+			return rc;
+		}
+	}
+
 	switch (arg->type) {
 	case CVP_KMD_GET_SESSION_INFO:
 	{
@@ -1299,6 +1742,9 @@ int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg)
 	case CVP_KMD_GET_SYS_PROPERTY:
 		rc = msm_cvp_get_sysprop(inst, arg);
 		break;
+	case CVP_KMD_SET_SYS_PROPERTY:
+		rc = msm_cvp_set_sysprop(inst, arg);
+		break;
 	default:
 		dprintk(CVP_DBG, "%s: unknown arg type %#x\n",
 				__func__, arg->type);
@@ -1312,6 +1758,10 @@ int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg)
 int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
 {
 	int rc = 0;
+	struct cvp_hal_session *session;
+	struct msm_cvp_internal_buffer *cbuf, *dummy;
+	struct msm_cvp_frame *frame, *dummy1;
+	struct msm_cvp_frame_buf *frame_buf, *dummy2;
 
 	if (!inst || !inst->core) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -1320,17 +1770,72 @@ int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
 	dprintk(CVP_DBG, "%s: inst %pK (%#x)\n", __func__,
 		inst, hash32_ptr(inst->session));
 
+	session = (struct cvp_hal_session *)inst->session;
+	if (!session) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		return -EINVAL;
+	}
+
 	rc = msm_cvp_comm_try_state(inst, MSM_CVP_CLOSE_DONE);
 	if (rc)
 		dprintk(CVP_ERR, "%s: close failed\n", __func__);
 
-	inst->clk_data.min_freq = 0;
-	inst->clk_data.ddr_bw = 0;
-	inst->clk_data.sys_cache_bw = 0;
-	rc = msm_cvp_scale_clocks_and_bus(inst);
-	if (rc)
-		dprintk(CVP_ERR, "%s: failed to scale_clocks_and_bus\n",
-			__func__);
+	mutex_lock(&inst->cvpcpubufs.lock);
+	list_for_each_entry_safe(cbuf, dummy, &inst->cvpcpubufs.list,
+			list) {
+		print_internal_buffer(CVP_DBG, "remove from cvpcpubufs", inst,
+									cbuf);
+		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
+		list_del(&cbuf->list);
+		kmem_cache_free(inst->internal_buf_cache, cbuf);
+	}
+	mutex_unlock(&inst->cvpcpubufs.lock);
+
+	mutex_lock(&inst->cvpdspbufs.lock);
+	list_for_each_entry_safe(cbuf, dummy, &inst->cvpdspbufs.list,
+			list) {
+		print_internal_buffer(CVP_DBG, "remove from cvpdspbufs", inst,
+									cbuf);
+		rc = cvp_dsp_deregister_buffer(
+			(uint32_t)cbuf->smem.device_addr,
+			cbuf->buf.index, cbuf->buf.size,
+			hash32_ptr(session));
+		if (rc)
+			dprintk(CVP_ERR,
+				"%s: failed dsp deregistration fd=%d rc=%d",
+				__func__, cbuf->buf.fd, rc);
+
+		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
+		list_del(&cbuf->list);
+		kmem_cache_free(inst->internal_buf_cache, cbuf);
+	}
+	mutex_unlock(&inst->cvpdspbufs.lock);
+
+	mutex_lock(&inst->frames.lock);
+	list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
+		list_del(&frame->list);
+		mutex_lock(&frame->bufs.lock);
+		list_for_each_entry_safe(frame_buf, dummy2, &frame->bufs.list,
+									list) {
+			struct cvp_buf_type *buf = &frame_buf->buf;
+
+			dprintk(CVP_DBG,
+				"%s: %x : fd %d off %d size %d %s\n",
+				"remove from frame list",
+				hash32_ptr(inst->session),
+				buf->fd, buf->offset, buf->size,
+				buf->dbuf->name);
+
+			list_del(&frame_buf->list);
+			kmem_cache_free(inst->frame_buf_cache, frame_buf);
+		}
+		mutex_unlock(&frame->bufs.lock);
+		DEINIT_MSM_CVP_LIST(&frame->bufs);
+		kmem_cache_free(inst->frame_cache, frame);
+	}
+	mutex_unlock(&inst->frames.lock);
+
+	msm_cvp_comm_free_freq_table(inst);
 
 	return rc;
 }
@@ -1353,5 +1858,11 @@ int msm_cvp_session_init(struct msm_cvp_inst *inst)
 	inst->clk_data.ddr_bw = 1000;
 	inst->clk_data.sys_cache_bw = 1000;
 
+	inst->prop.type = HFI_SESSION_CV;
+	inst->prop.kernel_mask = 0xFFFFFFFF;
+	inst->prop.priority = 0;
+	inst->prop.is_secure = 0;
+	inst->prop.dsp_mask = 0;
+
 	return rc;
 }
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_clocks.c b/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
index b0bbcd0..431fea7 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
@@ -232,87 +232,16 @@ static unsigned long msm_cvp_calc_freq(struct msm_cvp_inst *inst,
 int msm_cvp_set_clocks(struct msm_cvp_core *core)
 {
 	struct cvp_hfi_device *hdev;
-	unsigned long freq_core_1 = 0, freq_core_2 = 0, rate = 0;
-	unsigned long freq_core_max = 0;
-	struct msm_cvp_inst *temp = NULL;
-	int rc = 0, i = 0;
-	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
-	bool increment, decrement;
+	int rc;
 
-	hdev = core->device;
-	allowed_clks_tbl = core->resources.allowed_clks_tbl;
-	if (!allowed_clks_tbl) {
-		dprintk(CVP_ERR,
-			"%s Invalid parameters\n", __func__);
+	if (!core || !core->device) {
+		dprintk(CVP_ERR, "%s Invalid args: %pK\n", __func__, core);
 		return -EINVAL;
 	}
 
-	mutex_lock(&core->lock);
-	increment = false;
-	decrement = true;
-	list_for_each_entry(temp, &core->instances, list) {
-
-		if (temp->clk_data.core_id == CVP_CORE_ID_1)
-			freq_core_1 += temp->clk_data.min_freq;
-		else if (temp->clk_data.core_id == CVP_CORE_ID_2)
-			freq_core_2 += temp->clk_data.min_freq;
-		else if (temp->clk_data.core_id == CVP_CORE_ID_3) {
-			freq_core_1 += temp->clk_data.min_freq;
-			freq_core_2 += temp->clk_data.min_freq;
-		}
-
-		freq_core_max = max_t(unsigned long, freq_core_1, freq_core_2);
-
-		if (msm_cvp_clock_voting) {
-			dprintk(CVP_PROF,
-				"msm_cvp_clock_voting %d\n",
-				 msm_cvp_clock_voting);
-			freq_core_max = msm_cvp_clock_voting;
-			break;
-		}
-
-		if (temp->clk_data.turbo_mode) {
-			dprintk(CVP_PROF,
-				"Found an instance with Turbo request\n");
-			freq_core_max = msm_cvp_max_freq(core);
-			break;
-		}
-		/* increment even if one session requested for it */
-		if (temp->clk_data.dcvs_flags & MSM_CVP_DCVS_INCR)
-			increment = true;
-		/* decrement only if all sessions requested for it */
-		if (!(temp->clk_data.dcvs_flags & MSM_CVP_DCVS_DECR))
-			decrement = false;
-	}
-
-	/*
-	 * keep checking from lowest to highest rate until
-	 * table rate >= requested rate
-	 */
-	for (i = 0; i < core->resources.allowed_clks_tbl_size;  i++) {
-		rate = allowed_clks_tbl[i].clock_rate;
-		if (rate >= freq_core_max)
-			break;
-	}
-	if (increment) {
-		if (i > 0)
-			rate = allowed_clks_tbl[i-1].clock_rate;
-	} else if (decrement) {
-		if (i < (core->resources.allowed_clks_tbl_size - 1))
-			rate = allowed_clks_tbl[i+1].clock_rate;
-	}
-
-	core->min_freq = freq_core_max;
-	core->curr_freq = rate;
-	mutex_unlock(&core->lock);
-
-	dprintk(CVP_PROF,
-		"%s: clock rate %lu requested %lu increment %d decrement %d\n",
-		__func__, core->curr_freq, core->min_freq,
-		increment, decrement);
+	hdev = core->device;
 	rc = call_hfi_op(hdev, scale_clocks,
-			hdev->hfi_device_data, core->curr_freq);
-
+		hdev->hfi_device_data, core->curr_freq);
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.c b/drivers/media/platform/msm/cvp/msm_cvp_common.c
index 61c1344..1ea4d04 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.c
@@ -15,6 +15,7 @@
 #include "msm_cvp_debug.h"
 #include "msm_cvp_clocks.h"
 #include "msm_cvp.h"
+#include "cvp_core_hfi.h"
 
 #define IS_ALREADY_IN_STATE(__p, __d) (\
 	(__p >= __d)\
@@ -22,6 +23,27 @@
 
 static void handle_session_error(enum hal_command_response cmd, void *data);
 
+static void dump_hfi_queue_info(struct iris_hfi_device *device)
+{
+	struct cvp_hfi_queue_header *queue;
+	struct cvp_iface_q_info *qinfo;
+	int i;
+
+	dprintk(CVP_ERR, "HFI queues in order of cmd(rd, wr), msg and dbg:\n");
+
+	/*
+	 * mb() to ensure driver reads the updated header values from
+	 * main memory.
+	 */
+	mb();
+	for (i = 0; i <= CVP_IFACEQ_DBGQ_IDX; i++) {
+		qinfo = &device->iface_queues[i];
+		queue = (struct cvp_hfi_queue_header *)qinfo->q_hdr;
+		dprintk(CVP_ERR, "queue details: %d %d\n",
+				queue->qhdr_read_idx, queue->qhdr_write_idx);
+	}
+}
+
 int msm_cvp_comm_get_inst_load(struct msm_cvp_inst *inst,
 		enum load_calc_quirks quirks)
 {
@@ -169,6 +191,30 @@ struct msm_cvp_inst *cvp_get_inst(struct msm_cvp_core *core,
 	return inst;
 }
 
+struct msm_cvp_inst *cvp_get_inst_validate(struct msm_cvp_core *core,
+		void *session_id)
+{
+	int rc = 0;
+	struct cvp_hfi_device *hdev;
+	struct msm_cvp_inst *s;
+
+	s = cvp_get_inst(core, session_id);
+	if (!s) {
+		dprintk(CVP_ERR, "%s session doesn't exit\n",
+			__builtin_return_address(0));
+		return NULL;
+	}
+
+	hdev = s->core->device;
+	rc = call_hfi_op(hdev, validate_session, s->session, __func__);
+	if (rc) {
+		cvp_put_inst(s);
+		s = NULL;
+	}
+
+	return s;
+}
+
 static void cvp_handle_session_cmd_done(enum hal_command_response cmd,
 	void *data)
 {
@@ -252,7 +298,9 @@ static void handle_session_release_buf_done(enum hal_command_response cmd,
 	inst = cvp_get_inst(get_cvp_core(response->device_id),
 			response->session_id);
 	if (!inst) {
-		dprintk(CVP_WARN, "Got a response for an inactive session\n");
+		dprintk(CVP_WARN,
+			"%s: Got a response for an inactive session\n",
+			__func__);
 		return;
 	}
 
@@ -270,9 +318,6 @@ static void handle_session_release_buf_done(enum hal_command_response cmd,
 	}
 	mutex_unlock(&inst->persistbufs.lock);
 
-	if (!buf_found)
-		dprintk(CVP_WARN, "invalid buffer %#x from firmware\n",
-				address);
 	if (IS_HAL_SESSION_CMD(cmd))
 		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
 	else
@@ -353,7 +398,7 @@ int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
 		msecs_to_jiffies(
 			inst->core->resources.msm_cvp_hw_rsp_timeout));
 	if (!rc) {
-		dprintk(CVP_ERR, "Wait interrupted or timed out: %d\n",
+		dprintk(CVP_WARN, "Wait interrupted or timed out: %d\n",
 				SESSION_MSG_INDEX(cmd));
 		msm_cvp_comm_kill_session(inst);
 		rc = -EIO;
@@ -417,7 +462,8 @@ static void handle_session_init_done(enum hal_command_response cmd, void *data)
 		response->session_id);
 
 	if (!inst) {
-		dprintk(CVP_WARN, "Got a response for an inactive session\n");
+		dprintk(CVP_WARN, "%s:Got a response for an inactive session\n",
+				__func__);
 		return;
 	}
 
@@ -466,7 +512,8 @@ static void handle_release_res_done(enum hal_command_response cmd, void *data)
 	inst = cvp_get_inst(get_cvp_core(response->device_id),
 			response->session_id);
 	if (!inst) {
-		dprintk(CVP_WARN, "Got a response for an inactive session\n");
+		dprintk(CVP_WARN, "%s:Got a response for an inactive session\n",
+				__func__);
 		return;
 	}
 
@@ -495,7 +542,8 @@ static void handle_session_error(enum hal_command_response cmd, void *data)
 	inst = cvp_get_inst(get_cvp_core(response->device_id),
 			response->session_id);
 	if (!inst) {
-		dprintk(CVP_WARN, "Got a response for an inactive session\n");
+		dprintk(CVP_WARN, "%s: response for an inactive session\n",
+				__func__);
 		return;
 	}
 
@@ -561,6 +609,7 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
 	struct cvp_hfi_device *hdev = NULL;
 	struct msm_cvp_inst *inst = NULL;
 	int rc = 0;
+	unsigned long flags = 0;
 
 	subsystem_crashed("cvpss");
 	if (!response) {
@@ -587,13 +636,20 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
 	}
 
 	dprintk(CVP_WARN, "SYS_ERROR received for core %pK\n", core);
-	/* msm_cvp_noc_error_info(core) is disabled as of now */
+	msm_cvp_noc_error_info(core);
 	call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
 	list_for_each_entry(inst, &core->instances, list) {
 		dprintk(CVP_WARN,
-			"%s: Send sys error for inst %pK\n", __func__, inst);
+			"%s: sys error for inst %#x kref %x, cmd %x\n",
+				__func__, inst, kref_read(&inst->kref),
+				inst->cur_cmd_type);
 		change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
-		msm_cvp_queue_v4l2_event(inst, V4L2_EVENT_MSM_CVP_SYS_ERROR);
+
+		spin_lock_irqsave(&inst->event_handler.lock, flags);
+		inst->event_handler.event = CVP_SSR_EVENT;
+		spin_unlock_irqrestore(&inst->event_handler.lock, flags);
+		wake_up_all(&inst->event_handler.wq);
+
 		if (!core->trigger_ssr)
 			msm_cvp_comm_print_inst_info(inst);
 	}
@@ -660,7 +716,8 @@ static void handle_session_close(enum hal_command_response cmd, void *data)
 	inst = cvp_get_inst(get_cvp_core(response->device_id),
 			response->session_id);
 	if (!inst) {
-		dprintk(CVP_WARN, "Got a response for an inactive session\n");
+		dprintk(CVP_WARN, "%s: response for an inactive session\n",
+				__func__);
 		return;
 	}
 
@@ -733,6 +790,8 @@ void cvp_handle_cmd_response(enum hal_command_response cmd, void *data)
 	case HAL_SESSION_DCM_CONFIG_CMD_DONE:
 	case HAL_SESSION_DC_CONFIG_CMD_DONE:
 	case HAL_SESSION_PYS_HCD_CONFIG_CMD_DONE:
+	case HAL_SESSION_FD_CONFIG_CMD_DONE:
+	case HAL_SESSION_MODEL_BUF_CMD_DONE:
 	case HAL_SESSION_ICA_FRAME_CMD_DONE:
 		cvp_handle_session_cmd_done(cmd, data);
 		break;
@@ -830,6 +889,8 @@ static int msm_comm_session_abort(struct msm_cvp_inst *inst)
 	if (!rc) {
 		dprintk(CVP_ERR, "%s: inst %pK session %x abort timed out\n",
 				__func__, inst, hash32_ptr(inst->session));
+		call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+		dump_hfi_queue_info(hdev->hfi_device_data);
 		msm_cvp_comm_generate_sys_error(inst);
 		rc = -EBUSY;
 	} else {
@@ -998,7 +1059,7 @@ static int msm_comm_init_core(struct msm_cvp_inst *inst)
 	return rc;
 }
 
-static int msm_cvp_deinit_core(struct msm_cvp_inst *inst)
+int msm_cvp_deinit_core(struct msm_cvp_inst *inst)
 {
 	struct msm_cvp_core *core;
 	struct cvp_hfi_device *hdev;
@@ -1047,12 +1108,6 @@ static int msm_cvp_deinit_core(struct msm_cvp_inst *inst)
 	return 0;
 }
 
-int msm_cvp_comm_force_cleanup(struct msm_cvp_inst *inst)
-{
-	msm_cvp_comm_kill_session(inst);
-	return msm_cvp_deinit_core(inst);
-}
-
 static int msm_comm_session_init_done(int flipped_state,
 	struct msm_cvp_inst *inst)
 {
@@ -1229,8 +1284,8 @@ int msm_cvp_comm_try_state(struct msm_cvp_inst *inst, int state)
 
 	flipped_state = get_flipped_state(inst->state, state);
 	dprintk(CVP_DBG,
-		"inst: %pK (%#x) flipped_state = %#x\n",
-		inst, hash32_ptr(inst->session), flipped_state);
+		"inst: %pK (%#x) flipped_state = %#x %x\n",
+		inst, hash32_ptr(inst->session), flipped_state, state);
 	switch (flipped_state) {
 	case MSM_CVP_CORE_UNINIT_DONE:
 	case MSM_CVP_CORE_INIT:
@@ -1264,12 +1319,13 @@ int msm_cvp_comm_try_state(struct msm_cvp_inst *inst, int state)
 	case MSM_CVP_RELEASE_RESOURCES:
 		dprintk(CVP_WARN, "Deprecated state RELEASE_SOURCES\n");
 	case MSM_CVP_RELEASE_RESOURCES_DONE:
-		dprintk(CVP_WARN, "Deprecated state RELEASE_RESOURCES_DONE\n");
 	case MSM_CVP_CLOSE:
+		dprintk(CVP_INFO, "to CVP_CLOSE state\n");
 		rc = msm_comm_session_close(flipped_state, inst);
 		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_CVP_CLOSE_DONE:
+		dprintk(CVP_INFO, "to CVP_CLOSE_DONE state\n");
 		rc = wait_for_state(inst, flipped_state, MSM_CVP_CLOSE_DONE,
 				HAL_SESSION_END_DONE);
 		if (rc || state <= get_flipped_state(inst->state, state))
@@ -1277,7 +1333,7 @@ int msm_cvp_comm_try_state(struct msm_cvp_inst *inst, int state)
 		msm_cvp_comm_session_clean(inst);
 	case MSM_CVP_CORE_UNINIT:
 	case MSM_CVP_CORE_INVALID:
-		dprintk(CVP_DBG, "Sending core uninit\n");
+		dprintk(CVP_INFO, "Sending core uninit\n");
 		rc = msm_cvp_deinit_core(inst);
 		if (rc || state == get_flipped_state(inst->state, state))
 			break;
@@ -1349,6 +1405,32 @@ void msm_cvp_ssr_handler(struct work_struct *work)
 	}
 	hdev = core->device;
 
+	if (core->ssr_type == SSR_SESSION_ABORT) {
+		struct msm_cvp_inst *inst = NULL, *s;
+
+		dprintk(CVP_ERR, "Session abort triggered\n");
+		list_for_each_entry(inst, &core->instances, list) {
+			dprintk(CVP_WARN,
+				"Session to abort: inst %#x cmd %x ref %x\n",
+				inst, inst->cur_cmd_type,
+				kref_read(&inst->kref));
+			break;
+		}
+
+		if (inst != NULL) {
+			s = cvp_get_inst_validate(inst->core, inst);
+			if (!s)
+				return;
+
+			msm_cvp_comm_kill_session(inst);
+			cvp_put_inst(s);
+		} else {
+			dprintk(CVP_WARN, "No active CVP session to abort\n");
+		}
+
+		return;
+	}
+
 	mutex_lock(&core->lock);
 	if (core->state == CVP_CORE_INIT_DONE) {
 		dprintk(CVP_WARN, "%s: ssr type %d\n", __func__,
@@ -1409,6 +1491,7 @@ void msm_cvp_comm_generate_sys_error(struct msm_cvp_inst *inst)
 int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst)
 {
 	int rc = 0;
+	unsigned long flags = 0;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(CVP_ERR, "%s: invalid input parameters\n", __func__);
@@ -1418,58 +1501,34 @@ int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst)
 			__func__, inst);
 		return 0;
 	}
-
-	dprintk(CVP_ERR, "%s: inst %pK, session %x state %d\n", __func__,
+	dprintk(CVP_WARN, "%s: inst %pK, session %x state %d\n", __func__,
 		inst, hash32_ptr(inst->session), inst->state);
 	/*
 	 * We're internally forcibly killing the session, if fw is aware of
 	 * the session send session_abort to firmware to clean up and release
 	 * the session, else just kill the session inside the driver.
 	 */
-	if ((inst->state >= MSM_CVP_OPEN_DONE &&
-			inst->state < MSM_CVP_CLOSE_DONE) ||
-			inst->state == MSM_CVP_CORE_INVALID) {
+	if (inst->state >= MSM_CVP_OPEN_DONE &&
+			inst->state < MSM_CVP_CLOSE_DONE) {
 		rc = msm_comm_session_abort(inst);
 		if (rc) {
 			dprintk(CVP_ERR,
 				"%s: inst %pK session %x abort failed\n",
 				__func__, inst, hash32_ptr(inst->session));
 			change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
+		} else {
+			change_cvp_inst_state(inst, MSM_CVP_CORE_UNINIT);
 		}
 	}
 
-	change_cvp_inst_state(inst, MSM_CVP_CLOSE_DONE);
-	msm_cvp_comm_session_clean(inst);
-
-	dprintk(CVP_WARN, "%s: inst %pK session %x handled\n", __func__,
-		inst, hash32_ptr(inst->session));
-	return rc;
-}
-
-int msm_cvp_comm_smem_alloc(struct msm_cvp_inst *inst,
-		size_t size, u32 align, u32 flags, enum hal_buffer buffer_type,
-		int map_kernel, struct msm_cvp_smem *smem)
-{
-	int rc = 0;
-
-	if (!inst || !inst->core) {
-		dprintk(CVP_ERR, "%s: invalid inst: %pK\n", __func__, inst);
-		return -EINVAL;
+	if (inst->state == MSM_CVP_CORE_UNINIT) {
+		spin_lock_irqsave(&inst->event_handler.lock, flags);
+		inst->event_handler.event = CVP_SSR_EVENT;
+		spin_unlock_irqrestore(&inst->event_handler.lock, flags);
+		wake_up_all(&inst->event_handler.wq);
 	}
-	rc = msm_cvp_smem_alloc(size, align, flags, buffer_type, map_kernel,
-				&(inst->core->resources), inst->session_type,
-				smem);
-	return rc;
-}
 
-void msm_cvp_comm_smem_free(struct msm_cvp_inst *inst, struct msm_cvp_smem *mem)
-{
-	if (!inst || !inst->core || !mem) {
-		dprintk(CVP_ERR,
-			"%s: invalid params: %pK %pK\n", __func__, inst, mem);
-		return;
-	}
-	msm_cvp_smem_free(mem);
+	return rc;
 }
 
 void msm_cvp_fw_unload_handler(struct work_struct *work)
@@ -1528,6 +1587,7 @@ void msm_cvp_comm_print_inst_info(struct msm_cvp_inst *inst)
 		return;
 	}
 
+	dprintk(CVP_ERR, "active session cmd %d\n", inst->cur_cmd_type);
 	is_secure = inst->flags & CVP_SECURE;
 	dprintk(CVP_ERR,
 			"---Buffer details for inst: %pK of type: %d---\n",
@@ -1574,7 +1634,6 @@ int msm_cvp_comm_unmap_cvp_buffer(struct msm_cvp_inst *inst,
 }
 
 static int set_internal_buf_on_fw(struct msm_cvp_inst *inst,
-				enum hal_buffer buffer_type,
 				struct msm_cvp_smem *handle, bool reuse)
 {
 	struct cvp_buffer_addr_info buffer_info;
@@ -1589,7 +1648,7 @@ static int set_internal_buf_on_fw(struct msm_cvp_inst *inst,
 	hdev = inst->core->device;
 
 	buffer_info.buffer_size = handle->size;
-	buffer_info.buffer_type = buffer_type;
+	buffer_info.buffer_type = 0;
 	buffer_info.num_buffers = 1;
 	buffer_info.align_device_addr = handle->device_addr;
 	dprintk(CVP_DBG, "%s %s buffer : %x\n",
@@ -1607,23 +1666,22 @@ static int set_internal_buf_on_fw(struct msm_cvp_inst *inst,
 }
 
 static int allocate_and_set_internal_bufs(struct msm_cvp_inst *inst,
-			struct cvp_hal_buffer_requirements *internal_bufreq,
-			struct msm_cvp_list *buf_list)
+			u32 buffer_size, struct msm_cvp_list *buf_list)
 {
 	struct cvp_internal_buf *binfo;
 	u32 smem_flags = SMEM_UNCACHED;
 	int rc = 0;
 
-	if (!inst || !internal_bufreq || !buf_list) {
+	if (!inst || !buf_list) {
 		dprintk(CVP_ERR, "%s Invalid input\n", __func__);
 		return -EINVAL;
 	}
 
-	if (!internal_bufreq->buffer_size)
+	if (!buffer_size)
 		return 0;
 
 	/* PERSIST buffer requires secure mapping */
-	smem_flags |= SMEM_SECURE;
+	smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
 
 	binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
 	if (!binfo) {
@@ -1632,8 +1690,7 @@ static int allocate_and_set_internal_bufs(struct msm_cvp_inst *inst,
 		goto fail_kzalloc;
 	}
 
-	rc = msm_cvp_smem_alloc(internal_bufreq->buffer_size, 1, smem_flags,
-			internal_bufreq->buffer_type, 0,
+	rc = msm_cvp_smem_alloc(buffer_size, 1, smem_flags, 0,
 			&(inst->core->resources), inst->session_type,
 			&binfo->smem);
 	if (rc) {
@@ -1641,10 +1698,9 @@ static int allocate_and_set_internal_bufs(struct msm_cvp_inst *inst,
 		goto err_no_mem;
 	}
 
-	binfo->buffer_type = internal_bufreq->buffer_type;
+	binfo->buffer_type = 0;
 
-	rc = set_internal_buf_on_fw(inst, internal_bufreq->buffer_type,
-			&binfo->smem, false);
+	rc = set_internal_buf_on_fw(inst, &binfo->smem, false);
 	if (rc)
 		goto fail_set_buffers;
 
@@ -1665,21 +1721,15 @@ static int allocate_and_set_internal_bufs(struct msm_cvp_inst *inst,
 /* Set ARP buffer for CVP firmware to handle concurrency */
 int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst)
 {
-	int rc = 0, idx = 0;
-	struct cvp_hal_buffer_requirements *cvp_internal_buf = NULL;
-	struct msm_cvp_list *buf_list = &inst->persistbufs;
+	int rc = 0;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
 		return -EINVAL;
 	}
 
-	idx = ffs(HAL_BUFFER_INTERNAL_PERSIST_1);
-	cvp_internal_buf = &inst->buff_req.buffer[idx];
-	cvp_internal_buf->buffer_type = HAL_BUFFER_INTERNAL_PERSIST_1;
-	cvp_internal_buf->buffer_size = ARP_BUF_SIZE;
-
-	rc = allocate_and_set_internal_bufs(inst, cvp_internal_buf, buf_list);
+	rc = allocate_and_set_internal_bufs(inst, ARP_BUF_SIZE,
+						&inst->persistbufs);
 	if (rc)
 		goto error;
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.h b/drivers/media/platform/msm/cvp/msm_cvp_common.h
index 5eee64d..b8359e8 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.h
@@ -18,11 +18,13 @@ enum load_calc_quirks {
 void cvp_put_inst(struct msm_cvp_inst *inst);
 struct msm_cvp_inst *cvp_get_inst(struct msm_cvp_core *core,
 		void *session_id);
+struct msm_cvp_inst *cvp_get_inst_validate(struct msm_cvp_core *core,
+		void *session_id);
 void cvp_change_inst_state(struct msm_cvp_inst *inst,
 		enum instance_state state);
 struct msm_cvp_core *get_cvp_core(int core_id);
 int msm_cvp_comm_try_state(struct msm_cvp_inst *inst, int state);
-int msm_cvp_comm_force_cleanup(struct msm_cvp_inst *inst);
+int msm_cvp_deinit_core(struct msm_cvp_inst *inst);
 int msm_cvp_comm_suspend(int core_id);
 struct cvp_hal_buffer_requirements *get_cvp_buff_req_buffer(
 			struct msm_cvp_inst *inst, u32 buffer_type);
@@ -30,11 +32,6 @@ void msm_cvp_comm_session_clean(struct msm_cvp_inst *inst);
 int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst);
 void msm_cvp_comm_generate_session_error(struct msm_cvp_inst *inst);
 void msm_cvp_comm_generate_sys_error(struct msm_cvp_inst *inst);
-int msm_cvp_comm_smem_alloc(struct msm_cvp_inst *inst, size_t size, u32 align,
-		u32 flags, enum hal_buffer buffer_type, int map_kernel,
-		struct msm_cvp_smem *smem);
-void msm_cvp_comm_smem_free(struct msm_cvp_inst *inst,
-				struct msm_cvp_smem *smem);
 int msm_cvp_comm_smem_cache_operations(struct msm_cvp_inst *inst,
 		struct msm_cvp_smem *mem, enum smem_cache_ops cache_ops);
 int msm_cvp_comm_check_core_init(struct msm_cvp_core *core);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_core.c b/drivers/media/platform/msm/cvp/msm_cvp_core.c
index 705e1c2..60eaf88 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_core.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_core.c
@@ -15,6 +15,7 @@
 #include "cvp_hfi_api.h"
 #include "msm_cvp_clocks.h"
 #include <linux/dma-buf.h>
+#include <uapi/media/msm_media_info.h>
 
 #define MAX_EVENTS 30
 #define NUM_CYCLES16X16_HCD_FRAME 95
@@ -42,6 +43,8 @@ int msm_cvp_est_cycles(struct cvp_kmd_usecase_desc *cvp_desc,
 	unsigned int hcd_stats_write = 0;
 	unsigned int dme_pixel_read = 0;
 	unsigned int ncc_pixel_read = 0;
+	unsigned int process_width = 0;
+	unsigned int process_height = 0;
 
 	if (!cvp_desc || !cvp_voting) {
 		dprintk(CVP_ERR, "%s: invalid args\n", __func__);
@@ -52,12 +55,72 @@ int msm_cvp_est_cycles(struct cvp_kmd_usecase_desc *cvp_desc,
 		num_16x16_blocks = (cvp_desc->fullres_width>>4)
 			* (cvp_desc->fullres_height>>4);
 		ds_cycles = NUM_CYCLES16X16_DS_FRAME * num_16x16_blocks;
-		num_16x16_blocks = (cvp_desc->downscale_width>>4)
-			* (cvp_desc->downscale_height>>4);
+		process_width = cvp_desc->downscale_width;
+		process_height = cvp_desc->downscale_height;
+		num_16x16_blocks = (process_width>>4)*(process_height>>4);
 		hcd_cycles = NUM_CYCLES16X16_HCD_FRAME * num_16x16_blocks;
+		/*Estimate downscale output (always UBWC) BW stats*/
+		if (cvp_desc->fullres_width <= 1920) {
+			/*w*h/1.58=w*h*(81/128)*/
+			ds_pixel_write = ((process_width*process_height*81)>>7);
+		} else {
+			/*w*h/2.38=w*h*(54/128)*/
+			ds_pixel_write = ((process_width*process_height*54)>>7);
+		}
+		/*Estimate downscale input BW stats based on colorfmt*/
+		switch (cvp_desc->colorfmt) {
+		case COLOR_FMT_NV12:
+		{
+			/*w*h*1.5*/
+			ds_pixel_read = ((cvp_desc->fullres_width
+				* cvp_desc->fullres_height * 3)>>1);
+			break;
+		}
+		case COLOR_FMT_P010:
+		{
+			/*w*h*2*1.5*/
+			ds_pixel_read = cvp_desc->fullres_width
+				* cvp_desc->fullres_height * 3;
+			break;
+		}
+		case COLOR_FMT_NV12_UBWC:
+		{
+			/*w*h*1.5/factor(factor=width>1920?2.38:1.58)*/
+			if (cvp_desc->fullres_width <= 1920) {
+				/*w*h*1.5/1.58 = w*h*121/128*/
+				ds_pixel_read = ((cvp_desc->fullres_width
+					* cvp_desc->fullres_height * 121)>>7);
+			} else {
+				/*w*h*1.5/1.61 = w*h*119/128*/
+				ds_pixel_read = ((cvp_desc->fullres_width
+					* cvp_desc->fullres_height * 119)>>7);
+			}
+			break;
+		}
+		case COLOR_FMT_NV12_BPP10_UBWC:
+		{
+			/*w*h*1.33*1.5/factor(factor=width>1920?2.38:1.58)*/
+			if (cvp_desc->fullres_width <= 1920) {
+				/*w*h*1.33*1.5/1.58 = w*h*5/4*/
+				ds_pixel_read = ((cvp_desc->fullres_width
+					* cvp_desc->fullres_height * 5)>>2);
+			} else {
+				/*w*h*1.33*1.5/1.61 = w*h*79/64*/
+				ds_pixel_read = ((cvp_desc->fullres_width
+					* cvp_desc->fullres_height * 79)>>6);
+			}
+			break;
+		}
+		default:
+			dprintk(CVP_ERR, "Defaulting to linear P010\n");
+			/*w*h*1.5*2 COLOR_FMT_P010*/
+			ds_pixel_read = (cvp_desc->fullres_width
+				* cvp_desc->fullres_height * 3);
+		}
 	} else {
-		num_16x16_blocks = (cvp_desc->fullres_width>>4)
-			* (cvp_desc->fullres_height>>4);
+		process_width = cvp_desc->fullres_width;
+		process_height = cvp_desc->fullres_height;
+		num_16x16_blocks = (process_width>>4)*(process_height>>4);
 		hcd_cycles = NUM_CYCLES16X16_HCD_FRAME * num_16x16_blocks;
 	}
 
@@ -70,77 +133,35 @@ int msm_cvp_est_cycles(struct cvp_kmd_usecase_desc *cvp_desc,
 	cvp_voting->clock_cycles_a = cvp_cycles * cvp_desc->fps;
 	cvp_voting->clock_cycles_b = 0;
 	cvp_voting->reserved[0] = NUM_CYCLESFW_FRAME * cvp_desc->fps;
-	cvp_voting->reserved[1] = cvp_desc->fps;
-	cvp_voting->reserved[2] = cvp_desc->op_rate;
+	cvp_voting->reserved[1] = cvp_cycles * cvp_desc->op_rate;
+	cvp_voting->reserved[2] = 0;
+	cvp_voting->reserved[3] = NUM_CYCLESFW_FRAME*cvp_desc->op_rate;
 
-	if (cvp_desc->is_downscale) {
-		if (cvp_desc->fullres_width <= 1920) {
-			/*
-			 *w*h*1.33(10bpc)*1.5/1.58=
-			 *w*h*(4/3)*(3/2)*(5/8)=w*h*(5/4)
-			 */
-			ds_pixel_read = ((cvp_desc->fullres_width
-				* cvp_desc->fullres_height * 5)>>2);
-			/*w*h/1.58=w*h*(5/8)*/
-			ds_pixel_write = ((cvp_desc->downscale_width
-				* cvp_desc->downscale_height * 5)>>3);
-			/*w*h*1.5/1.58=w*h*(3/2)*(5/8)*/
-			hcd_pixel_read = ((cvp_desc->downscale_width
-				* cvp_desc->downscale_height * 15)>>4);
-			/*num_16x16_blocks*8*4*/
-			hcd_stats_write = (num_16x16_blocks<<5);
-			/*NUM_DME_MAX_FEATURE_POINTS*96*48/1.58*/
-			dme_pixel_read = NUM_DME_MAX_FEATURE_POINTS * 2880;
-			/*NUM_DME_MAX_FEATURE_POINTS*(18/8+1)*32*8*2/1.58*/
-			ncc_pixel_read = NUM_DME_MAX_FEATURE_POINTS * 1040;
-		} else {
-			/*
-			 *w*h*1.33(10bpc)*1.5/2.38=
-			 *w*h*(4/3)*(3/2)*(54/128)=w*h*(54/64)
-			 */
-			ds_pixel_read = ((cvp_desc->fullres_width
-				* cvp_desc->fullres_height * 54)>>6);
-			/*w*h/2.38=w*h*(54/128)*/
-			ds_pixel_write = ((cvp_desc->downscale_width
-				* cvp_desc->downscale_height * 54)>>7);
-			/*w*h*1.5/2.38=w*h*(3/2)*(54/128)*/
-			hcd_pixel_read = ((cvp_desc->downscale_width
-				* cvp_desc->downscale_height * 81)>>7);
-			/*num_16x16_blocks*8*4*/
-			hcd_stats_write = (num_16x16_blocks<<5);
-			/*NUM_DME_MAX_FEATURE_POINTS*96*48/2.38*/
-			dme_pixel_read = NUM_DME_MAX_FEATURE_POINTS * 1944;
-			/*NUM_DME_MAX_FEATURE_POINTS*(18/8+1)*32*8*2/2.38*/
-			ncc_pixel_read = NUM_DME_MAX_FEATURE_POINTS * 702;
-		}
+	if (process_width <= 1920) {
+		/*w*h*1.5(for filter fetch overhead)/1.58=w*h*(3/2)*(5/8)*/
+		hcd_pixel_read = ((process_width * process_height * 15)>>4);
+		/*num_16x16_blocks*8*4*/
+		hcd_stats_write = (num_16x16_blocks<<5);
+		/*NUM_DME_MAX_FEATURE_POINTS*96*48/1.58*/
+		dme_pixel_read = NUM_DME_MAX_FEATURE_POINTS * 2880;
+		/*NUM_DME_MAX_FEATURE_POINTS*(18/8+1)*32*8*2/1.58*/
+		ncc_pixel_read = NUM_DME_MAX_FEATURE_POINTS * 1040;
 	} else {
-		if (cvp_desc->fullres_width <= 1920) {
-			/*w*h*1.5/1.58=w*h*(3/2)*(5/8)*/
-			hcd_pixel_read = ((cvp_desc->fullres_width
-				* cvp_desc->fullres_height * 15)>>4);
-			/*num_16x16_blocks*8*4*/
-			hcd_stats_write = (num_16x16_blocks<<5);
-			/*NUM_DME_MAX_FEATURE_POINTS*96*48/1.58*/
-			dme_pixel_read = NUM_DME_MAX_FEATURE_POINTS * 2880;
-			/*NUM_DME_MAX_FEATURE_POINTS*(18/8+1)*32*8*2/1.58*/
-			ncc_pixel_read = NUM_DME_MAX_FEATURE_POINTS * 1040;
-		} else {
-			/*w*h*1.5/2.38=w*h*(3/2)*(54/128)*/
-			hcd_pixel_read = ((cvp_desc->fullres_width
-				* cvp_desc->fullres_height * 81)>>7);
-			/*num_16x16_blocks*8*4*/
-			hcd_stats_write = (num_16x16_blocks<<5);
-			/*NUM_DME_MAX_FEATURE_POINTS*96*48/2.38*/
-			dme_pixel_read = NUM_DME_MAX_FEATURE_POINTS * 1944;
-			/*NUM_DME_MAX_FEATURE_POINTS*(18/8+1)*32*8*2/2.38*/
-			ncc_pixel_read = NUM_DME_MAX_FEATURE_POINTS * 702;
-		}
+		/*w*h*1.5(for filter fetch overhead)/2.38=w*h*(3/2)*(54/128)*/
+		hcd_pixel_read = ((process_width * process_height * 81)>>7);
+		/*num_16x16_blocks*8*4*/
+		hcd_stats_write = (num_16x16_blocks<<5);
+		/*NUM_DME_MAX_FEATURE_POINTS*96*48/2.38*/
+		dme_pixel_read = NUM_DME_MAX_FEATURE_POINTS * 1944;
+		/*NUM_DME_MAX_FEATURE_POINTS*(18/8+1)*32*8*2/2.38*/
+		ncc_pixel_read = NUM_DME_MAX_FEATURE_POINTS * 702;
 	}
 
 	cvp_bw = ds_pixel_read + ds_pixel_write + hcd_pixel_read
 		+ hcd_stats_write + dme_pixel_read + ncc_pixel_read;
 
 	cvp_voting->ddr_bw = cvp_bw * cvp_desc->fps;
+	cvp_voting->reserved[4] = cvp_bw * cvp_desc->op_rate;
 
 	dprintk(CVP_DBG, "%s Voting cycles_a, b, bw: %d %d %d\n", __func__,
 		cvp_voting->clock_cycles_a, cvp_voting->clock_cycles_b,
@@ -231,6 +252,7 @@ static void _deinit_session_queue(struct msm_cvp_inst *inst)
 		kmem_cache_free(inst->session_queue.msg_cache, msg);
 	}
 	inst->session_queue.msg_count = 0;
+	inst->session_queue.state = QUEUE_STOP;
 	spin_unlock(&inst->session_queue.lock);
 
 	wake_up_all(&inst->session_queue.wq);
@@ -265,15 +287,18 @@ void *msm_cvp_open(int core_id, int session_type)
 		goto err_invalid_core;
 	}
 
-	pr_info(CVP_DBG_TAG "Opening CVP instance: %pK, %d\n",
-		"info", inst, session_type);
+	pr_info(CVP_DBG_TAG "Opening cvp instance: %pK\n", "info", inst);
 	mutex_init(&inst->sync_lock);
 	mutex_init(&inst->lock);
+	spin_lock_init(&inst->event_handler.lock);
 
 	INIT_MSM_CVP_LIST(&inst->freqs);
 	INIT_MSM_CVP_LIST(&inst->persistbufs);
 	INIT_MSM_CVP_LIST(&inst->cvpcpubufs);
 	INIT_MSM_CVP_LIST(&inst->cvpdspbufs);
+	INIT_MSM_CVP_LIST(&inst->frames);
+
+	init_waitqueue_head(&inst->event_handler.wq);
 
 	kref_init(&inst->kref);
 
@@ -287,6 +312,10 @@ void *msm_cvp_open(int core_id, int session_type)
 	inst->clk_data.bitrate = 0;
 	inst->clk_data.core_id = CVP_CORE_ID_DEFAULT;
 	inst->deprecate_bitmask = 0;
+	inst->fence_data_cache = KMEM_CACHE(msm_cvp_fence_thread_data, 0);
+	inst->frame_cache = KMEM_CACHE(msm_cvp_frame, 0);
+	inst->frame_buf_cache = KMEM_CACHE(msm_cvp_frame_buf, 0);
+	inst->internal_buf_cache = KMEM_CACHE(msm_cvp_internal_buffer, 0);
 
 	for (i = SESSION_MSG_INDEX(SESSION_MSG_START);
 		i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) {
@@ -315,8 +344,14 @@ void *msm_cvp_open(int core_id, int session_type)
 	msm_cvp_dcvs_try_enable(inst);
 	core->resources.max_inst_count = MAX_SUPPORTED_INSTANCES;
 	if (msm_cvp_check_for_inst_overload(core)) {
-		dprintk(CVP_ERR,
-			"Instance count reached Max limit, rejecting session");
+		dprintk(CVP_ERR, "Instance num reached Max, rejecting session");
+		mutex_lock(&core->lock);
+		list_for_each_entry(inst, &core->instances, list)
+			dprintk(CVP_ERR, "inst %pK, cmd %d id %d\n",
+				inst, inst->cur_cmd_type,
+				hash32_ptr(inst->session));
+		mutex_unlock(&core->lock);
+
 		goto fail_init;
 	}
 
@@ -325,22 +360,6 @@ void *msm_cvp_open(int core_id, int session_type)
 	inst->debugfs_root =
 		msm_cvp_debugfs_init_inst(inst, core->debugfs_root);
 
-	if (inst->session_type == MSM_CVP_CORE) {
-		rc = msm_cvp_comm_try_state(inst, MSM_CVP_OPEN_DONE);
-		if (rc) {
-			dprintk(CVP_ERR,
-				"Failed to move video instance to open done state\n");
-			goto fail_init;
-		}
-		rc = cvp_comm_set_arp_buffers(inst);
-		if (rc) {
-			dprintk(CVP_ERR,
-				"Failed to set ARP buffers\n");
-			goto fail_init;
-		}
-
-	}
-
 	return inst;
 fail_init:
 	_deinit_session_queue(inst);
@@ -354,6 +373,12 @@ void *msm_cvp_open(int core_id, int session_type)
 	DEINIT_MSM_CVP_LIST(&inst->cvpcpubufs);
 	DEINIT_MSM_CVP_LIST(&inst->cvpdspbufs);
 	DEINIT_MSM_CVP_LIST(&inst->freqs);
+	DEINIT_MSM_CVP_LIST(&inst->frames);
+
+	kmem_cache_destroy(inst->fence_data_cache);
+	kmem_cache_destroy(inst->frame_cache);
+	kmem_cache_destroy(inst->frame_buf_cache);
+	kmem_cache_destroy(inst->internal_buf_cache);
 
 	kfree(inst);
 	inst = NULL;
@@ -364,58 +389,14 @@ EXPORT_SYMBOL(msm_cvp_open);
 
 static void msm_cvp_cleanup_instance(struct msm_cvp_inst *inst)
 {
-	int rc = 0;
-	struct msm_cvp_internal_buffer *cbuf, *dummy;
-	struct cvp_hal_session *session;
-
 	if (!inst) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
 		return;
 	}
 
-	session = (struct cvp_hal_session *)inst->session;
-	if (!session) {
-		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
-		return;
-	}
-
-	mutex_lock(&inst->cvpcpubufs.lock);
-	list_for_each_entry_safe(cbuf, dummy, &inst->cvpcpubufs.list,
-			list) {
-		print_client_buffer(CVP_DBG, "remove from cvpcpubufs",
-				inst, &cbuf->buf);
-		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
-		list_del(&cbuf->list);
-	}
-	mutex_unlock(&inst->cvpcpubufs.lock);
-
-	mutex_lock(&inst->cvpdspbufs.lock);
-	list_for_each_entry_safe(cbuf, dummy, &inst->cvpdspbufs.list,
-			list) {
-		print_client_buffer(CVP_DBG, "remove from cvpdspbufs",
-				inst, &cbuf->buf);
-		rc = cvp_dsp_deregister_buffer(
-			(uint32_t)cbuf->smem.device_addr,
-			cbuf->buf.index, cbuf->buf.size,
-			hash32_ptr(session));
-		if (rc)
-			dprintk(CVP_ERR,
-				"%s: failed dsp deregistration fd=%d rc=%d",
-				__func__, cbuf->buf.fd, rc);
-
-		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
-		list_del(&cbuf->list);
-	}
-	mutex_unlock(&inst->cvpdspbufs.lock);
-
-	msm_cvp_comm_free_freq_table(inst);
-
 	if (cvp_comm_release_persist_buffers(inst))
 		dprintk(CVP_ERR,
 			"Failed to release persist buffers\n");
-
-	if (inst->extradata_handle)
-		msm_cvp_comm_smem_free(inst, inst->extradata_handle);
 }
 
 int msm_cvp_destroy(struct msm_cvp_inst *inst)
@@ -438,6 +419,12 @@ int msm_cvp_destroy(struct msm_cvp_inst *inst)
 	DEINIT_MSM_CVP_LIST(&inst->cvpcpubufs);
 	DEINIT_MSM_CVP_LIST(&inst->cvpdspbufs);
 	DEINIT_MSM_CVP_LIST(&inst->freqs);
+	DEINIT_MSM_CVP_LIST(&inst->frames);
+
+	kmem_cache_destroy(inst->fence_data_cache);
+	kmem_cache_destroy(inst->frame_cache);
+	kmem_cache_destroy(inst->frame_buf_cache);
+	kmem_cache_destroy(inst->internal_buf_cache);
 
 	mutex_destroy(&inst->sync_lock);
 	mutex_destroy(&inst->lock);
@@ -445,8 +432,12 @@ int msm_cvp_destroy(struct msm_cvp_inst *inst)
 	msm_cvp_debugfs_deinit_inst(inst);
 	_deinit_session_queue(inst);
 
-	pr_info(CVP_DBG_TAG "Closed cvp instance: %pK\n",
-			"info", inst);
+	pr_info(CVP_DBG_TAG "Closed cvp instance: %pK session_id = %d\n",
+		"info", inst, hash32_ptr(inst->session));
+	if (inst->cur_cmd_type)
+		dprintk(CVP_ERR, "deleted instance has pending cmd %d\n",
+				inst->cur_cmd_type);
+	inst->session = (void *)0xdeadbeef;
 	kfree(inst);
 	return 0;
 }
@@ -475,7 +466,7 @@ int msm_cvp_close(void *instance)
 	if (rc) {
 		dprintk(CVP_ERR,
 			"Failed to move inst %pK to uninit state\n", inst);
-		rc = msm_cvp_comm_force_cleanup(inst);
+		rc = msm_cvp_deinit_core(inst);
 	}
 
 	msm_cvp_comm_session_clean(inst);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_core.h b/drivers/media/platform/msm/cvp/msm_cvp_core.h
index 629c3a7..e68a06f 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_core.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_core.h
@@ -9,6 +9,7 @@
 #include <linux/poll.h>
 #include <linux/videodev2.h>
 #include <linux/types.h>
+#include <linux/dma-buf.h>
 #include <linux/msm_ion.h>
 #include <media/msm_cvp_private.h>
 #include <media/msm_cvp_utils.h>
@@ -25,6 +26,7 @@ enum smem_prop {
 	SMEM_CACHED = 0x2,
 	SMEM_SECURE = 0x4,
 	SMEM_ADSP = 0x8,
+	SMEM_NON_PIXEL = 0x10
 };
 
 /* NOTE: if you change this enum you MUST update the
@@ -50,7 +52,7 @@ enum hal_buffer {
 
 struct cvp_dma_mapping_info {
 	struct device *dev;
-	struct dma_iommu_mapping *mapping;
+	struct iommu_domain *domain;
 	struct sg_table *table;
 	struct dma_buf_attachment *attach;
 	struct dma_buf *buf;
@@ -59,15 +61,15 @@ struct cvp_dma_mapping_info {
 
 struct msm_cvp_smem {
 	u32 refcount;
-	int fd;
-	void *dma_buf;
+	s32 fd;
+	struct dma_buf *dma_buf;
 	void *kvaddr;
 	u32 device_addr;
 	dma_addr_t dma_handle;
-	unsigned int offset;
-	unsigned int size;
-	unsigned long flags;
-	enum hal_buffer buffer_type;
+	u32 offset;
+	u32 size;
+	u32 flags;
+	u32 buffer_type;
 	struct cvp_dma_mapping_info mapping_info;
 };
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
index e1e641c..9ba03d1 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
@@ -34,7 +34,11 @@ struct cvp_dsp_cmd_msg {
 	uint32_t buff_size;
 	uint32_t session_id;
 	int32_t ddr_type;
-	uint32_t reserved[CVP_DSP_MAX_RESERVED];
+	uint32_t buff_fd;
+	uint32_t buff_offset;
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t reserved2;
 };
 
 struct cvp_dsp_rsp_msg {
@@ -58,6 +62,7 @@ struct cvp_dsp_apps {
 	struct completion reg_buffer_work;
 	struct completion dereg_buffer_work;
 	struct completion shutdown_work;
+	struct completion cmdqueue_send_work;
 };
 
 
@@ -70,7 +75,7 @@ static struct cvp_dsp_rsp_msg cmd_msg_rsp;
 static int cvp_dsp_send_cmd(void *msg, uint32_t len)
 {
 	struct cvp_dsp_apps *me = &gfa_cv;
-	int err;
+	int err = 0;
 
 	if (IS_ERR_OR_NULL(me->chan)) {
 		err = -EINVAL;
@@ -170,6 +175,13 @@ static int cvp_dsp_rpmsg_callback(struct rpmsg_device *rpdev,
 	case CVP_DSP_SHUTDOWN:
 		complete(&me->shutdown_work);
 		break;
+	case CVP_DSP_SUSPEND:
+		break;
+	case CVP_DSP_RESUME:
+		break;
+	case CVP_DSP_SEND_HFI_CMD_QUEUE:
+		complete(&me->cmdqueue_send_work);
+		break;
 	default:
 		dprintk(CVP_ERR,
 		"%s: Invalid cmd_msg_type received from dsp: %d\n",
@@ -182,7 +194,8 @@ static int cvp_dsp_rpmsg_callback(struct rpmsg_device *rpdev,
 int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
 	uint32_t size_in_bytes)
 {
-	int err;
+	int err, timeout;
+	struct msm_cvp_core *core;
 	struct cvp_dsp_cmd_msg local_cmd_msg;
 	struct cvp_dsp_apps *me = &gfa_cv;
 	int srcVM[SRC_VM_NUM] = {VMID_HLOS};
@@ -226,6 +239,17 @@ int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
 			"%s: cvp_dsp_send_cmd failed with err=%d\n",
 			__func__, err);
 	else {
+		core = list_first_entry(&cvp_driver->cores,
+				struct msm_cvp_core, list);
+		timeout = msecs_to_jiffies(
+				core->resources.msm_cvp_dsp_rsp_timeout);
+		err = wait_for_completion_timeout(
+				&me->cmdqueue_send_work, timeout);
+		if (!err) {
+			dprintk(CVP_ERR, "failed to send cmdqueue\n");
+			return -ETIMEDOUT;
+		}
+
 		mutex_lock(&me->smd_mutex);
 		me->cvp_shutdown = STATUS_OK;
 		me->cdsp_state = STATUS_OK;
@@ -287,8 +311,9 @@ int cvp_dsp_resume(uint32_t session_flag)
 
 int cvp_dsp_shutdown(uint32_t session_flag)
 {
+	struct msm_cvp_core *core;
 	struct cvp_dsp_apps *me = &gfa_cv;
-	int err, local_cmd_msg_rsp;
+	int err, local_cmd_msg_rsp, timeout;
 	struct cvp_dsp_cmd_msg local_cmd_msg;
 	int srcVM[DEST_VM_NUM] = {VMID_HLOS, VMID_CDSP_Q6};
 	int destVM[SRC_VM_NUM] = {VMID_HLOS};
@@ -302,7 +327,13 @@ int cvp_dsp_shutdown(uint32_t session_flag)
 			"%s: cvp_dsp_send_cmd failed with err=%d\n",
 			__func__, err);
 
-	wait_for_completion(&me->shutdown_work);
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	timeout = msecs_to_jiffies(core->resources.msm_cvp_dsp_rsp_timeout);
+	err = wait_for_completion_timeout(&me->shutdown_work, timeout);
+	if (!err) {
+		dprintk(CVP_ERR, "failed to shutdown dsp\n");
+		return -ETIMEDOUT;
+	}
 
 	mutex_lock(&me->smd_mutex);
 	me->cvp_shutdown = STATUS_SSR;
@@ -329,19 +360,22 @@ int cvp_dsp_shutdown(uint32_t session_flag)
 	return err;
 }
 
-int cvp_dsp_register_buffer(uint32_t iova_buff_addr,
-	uint32_t buff_index, uint32_t buff_size,
-	uint32_t session_id)
+int cvp_dsp_register_buffer(uint32_t session_id, uint32_t buff_fd,
+			uint32_t buff_size, uint32_t buff_offset,
+			uint32_t buff_index, uint32_t iova_buff_addr)
 {
 	struct cvp_dsp_cmd_msg local_cmd_msg;
 	int err;
 	struct cvp_dsp_apps *me = &gfa_cv;
 
 	local_cmd_msg.cmd_msg_type = CVP_DSP_REGISTER_BUFFER;
-	local_cmd_msg.iova_buff_addr = iova_buff_addr;
-	local_cmd_msg.buff_index = buff_index;
-	local_cmd_msg.buff_size = buff_size;
 	local_cmd_msg.session_id = session_id;
+	local_cmd_msg.buff_fd = buff_fd;
+	local_cmd_msg.buff_size = buff_size;
+	local_cmd_msg.buff_offset = buff_offset;
+	local_cmd_msg.buff_index = buff_index;
+	local_cmd_msg.iova_buff_addr = iova_buff_addr;
+
 	dprintk(CVP_DBG,
 		"%s: cmd_msg_type=0x%x, iova_buff_addr=0x%x buff_index=0x%x\n",
 		__func__, local_cmd_msg.cmd_msg_type, iova_buff_addr,
@@ -441,6 +475,7 @@ static int __init cvp_dsp_device_init(void)
 	init_completion(&me->shutdown_work);
 	init_completion(&me->reg_buffer_work);
 	init_completion(&me->dereg_buffer_work);
+	init_completion(&me->cmdqueue_send_work);
 	me->cvp_shutdown = STATUS_INIT;
 	me->cdsp_state = STATUS_INIT;
 	err = register_rpmsg_driver(&cvp_dsp_rpmsg_client);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.h b/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
index d200942..380af8d 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
@@ -55,14 +55,16 @@ int cvp_dsp_shutdown(uint32_t session_flag);
 /*
  * API to register iova buffer address with CDSP
  *
- * @iova_buff_addr: IOVA buffer address
- * @buff_index:     buffer index
- * @buff_size:      size in bytes of cvp buffer
  * @session_id:     cvp session id
+ * @buff_fd:        buffer fd
+ * @buff_size:      size in bytes of cvp buffer
+ * @buff_offset:    buffer offset
+ * @buff_index:     buffer index
+ * @iova_buff_addr: IOVA buffer address
  */
-int cvp_dsp_register_buffer(uint32_t iova_buff_addr,
-	uint32_t buff_index, uint32_t buff_size,
-	uint32_t session_id);
+int cvp_dsp_register_buffer(uint32_t session_id, uint32_t buff_fd,
+			uint32_t buff_size, uint32_t buff_offset,
+			uint32_t buff_index, uint32_t iova_buff_addr);
 
 /*
  * API to de-register iova buffer address from CDSP
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_internal.h b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
index 21d8fff..89bf7dd 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_internal.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
@@ -16,40 +16,20 @@
 #include <linux/msm-bus.h>
 #include <linux/msm-bus-board.h>
 #include <linux/kref.h>
-#include <media/v4l2-dev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-ctrls.h>
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-v4l2.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/dma-mapping.h>
 #include "msm_cvp_core.h"
 #include <media/msm_media_info.h>
 #include <media/msm_cvp_private.h>
 #include "cvp_hfi_api.h"
 
-#define MSM_CVP_DRV_NAME "msm_cvp_driver"
-#define MSM_CVP_VERSION KERNEL_VERSION(0, 0, 1)
-#define MAX_DEBUGFS_NAME 50
-#define DEFAULT_TIMEOUT 3
-#define DEFAULT_HEIGHT 1088
-#define DEFAULT_WIDTH 1920
-#define MIN_SUPPORTED_WIDTH 32
-#define MIN_SUPPORTED_HEIGHT 32
-#define DEFAULT_FPS 15
-#define MIN_NUM_OUTPUT_BUFFERS 1
-#define MIN_NUM_OUTPUT_BUFFERS_VP9 6
-#define MIN_NUM_CAPTURE_BUFFERS 1
-#define MAX_NUM_OUTPUT_BUFFERS VIDEO_MAX_FRAME // same as VB2_MAX_FRAME
-#define MAX_NUM_CAPTURE_BUFFERS VIDEO_MAX_FRAME // same as VB2_MAX_FRAME
-
 #define MAX_SUPPORTED_INSTANCES 16
-
-/* Maintains the number of FTB's between each FBD over a window */
+#define MAX_NAME_LENGTH 64
+#define MAX_DEBUGFS_NAME 50
 #define DCVS_FTB_WINDOW 16
 
-#define V4L2_EVENT_CVP_BASE  10
-
 #define SYS_MSG_START HAL_SYS_INIT_DONE
 #define SYS_MSG_END HAL_SYS_ERROR
 #define SESSION_MSG_START HAL_SESSION_EVENT_CHANGE
@@ -57,17 +37,6 @@
 #define SYS_MSG_INDEX(__msg) (__msg - SYS_MSG_START)
 #define SESSION_MSG_INDEX(__msg) (__msg - SESSION_MSG_START)
 
-
-#define MAX_NAME_LENGTH 64
-
-#define EXTRADATA_IDX(__num_planes) ((__num_planes) ? (__num_planes) - 1 : 0)
-
-#define NUM_MBS_PER_SEC(__height, __width, __fps) \
-	(NUM_MBS_PER_FRAME(__height, __width) * __fps)
-
-#define NUM_MBS_PER_FRAME(__height, __width) \
-	((ALIGN(__height, 16) / 16) * (ALIGN(__width, 16) / 16))
-
 #define call_core_op(c, op, args...)			\
 	(((c) && (c)->core_ops && (c)->core_ops->op) ? \
 	((c)->core_ops->op(args)) : 0)
@@ -233,6 +202,20 @@ enum dcvs_flags {
 	MSM_CVP_DCVS_DECR = BIT(1),
 };
 
+struct cvp_buf_type {
+	s32 fd;
+	u32 size;
+	u32 offset;
+	u32 flags;
+	union {
+		struct dma_buf *dbuf;
+		struct {
+			u32 reserved1;
+			u32 reserved2;
+		};
+	};
+};
+
 struct cvp_clock_data {
 	int buffer_counter;
 	int load;
@@ -314,6 +297,26 @@ struct cvp_session_queue {
 	struct kmem_cache *msg_cache;
 };
 
+struct cvp_session_prop {
+	u32 type;
+	u32 kernel_mask;
+	u32 priority;
+	u32 is_secure;
+	u32 dsp_mask;
+};
+
+enum cvp_event_t {
+	CVP_NO_EVENT,
+	CVP_SSR_EVENT = 1,
+	CVP_INVALID_EVENT,
+};
+
+struct cvp_session_event {
+	spinlock_t lock;
+	enum cvp_event_t event;
+	wait_queue_head_t wq;
+};
+
 struct msm_cvp_core {
 	struct list_head list;
 	struct mutex lock;
@@ -340,6 +343,7 @@ struct msm_cvp_core {
 	unsigned long min_freq;
 	unsigned long curr_freq;
 	struct msm_cvp_core_ops *core_ops;
+	atomic64_t kernel_trans_id;
 };
 
 struct msm_cvp_inst {
@@ -348,12 +352,14 @@ struct msm_cvp_inst {
 	struct msm_cvp_core *core;
 	enum session_type session_type;
 	struct cvp_session_queue session_queue;
+	struct cvp_session_event event_handler;
 	void *session;
 	enum instance_state state;
 	struct msm_cvp_list freqs;
 	struct msm_cvp_list persistbufs;
 	struct msm_cvp_list cvpcpubufs;
 	struct msm_cvp_list cvpdspbufs;
+	struct msm_cvp_list frames;
 	struct cvp_buffer_requirements buff_req;
 	struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1];
 	struct msm_cvp_smem *extradata_handle;
@@ -365,6 +371,19 @@ struct msm_cvp_inst {
 	struct kref kref;
 	unsigned long deprecate_bitmask;
 	struct cvp_kmd_request_power power;
+	struct cvp_session_prop prop;
+	struct kmem_cache *fence_data_cache;
+	u32 cur_cmd_type;
+	struct kmem_cache *frame_cache;
+	struct kmem_cache *frame_buf_cache;
+	struct kmem_cache *internal_buf_cache;
+};
+
+struct msm_cvp_fence_thread_data {
+	struct msm_cvp_inst *inst;
+	unsigned int device_id;
+	struct cvp_kmd_hfi_fence_packet in_fence_pkt;
+	unsigned int arg_type;
 };
 
 extern struct msm_cvp_drv *cvp_driver;
@@ -387,15 +406,25 @@ struct msm_cvp_internal_buffer {
 	struct cvp_kmd_buffer buf;
 };
 
+struct msm_cvp_frame_buf {
+	struct list_head list;
+	struct cvp_buf_type buf;
+};
+
+struct msm_cvp_frame {
+	struct list_head list;
+	struct msm_cvp_list bufs;
+	u64 ktid;
+};
+
 void msm_cvp_comm_handle_thermal_event(void);
-int msm_cvp_smem_alloc(size_t size, u32 align, u32 flags,
-	enum hal_buffer buffer_type, int map_kernel,
+int msm_cvp_smem_alloc(size_t size, u32 align, u32 flags, int map_kernel,
 	void  *res, u32 session_type, struct msm_cvp_smem *smem);
 int msm_cvp_smem_free(struct msm_cvp_smem *smem);
 
 struct context_bank_info *msm_cvp_smem_get_context_bank(u32 session_type,
 	bool is_secure, struct msm_cvp_platform_resources *res,
-	enum hal_buffer buffer_type);
+	unsigned long ion_flags);
 int msm_cvp_smem_map_dma_buf(struct msm_cvp_inst *inst,
 				struct msm_cvp_smem *smem);
 int msm_cvp_smem_unmap_dma_buf(struct msm_cvp_inst *inst,
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_platform.c b/drivers/media/platform/msm/cvp/msm_cvp_platform.c
index 2535782..7c533eb 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_platform.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_platform.c
@@ -97,11 +97,15 @@ static struct msm_cvp_common_data sm8250_common_data[] = {
 	},
 	{
 		.key = "qcom,power-collapse-delay",
-		.value = 1500,
+		.value = 3000,
 	},
 	{
 		.key = "qcom,hw-resp-timeout",
-		.value = 1000,
+		.value = 2000,
+	},
+	{
+		.key = "qcom,dsp-resp-timeout",
+		.value = 1000
 	},
 	{
 		.key = "qcom,debug-timeout",
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c b/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
index e03cc6e..35e2bc5 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
@@ -773,6 +773,8 @@ int cvp_read_platform_resources_from_drv_data(
 			"qcom,fw-unload-delay");
 	res->msm_cvp_hw_rsp_timeout = find_key_value(platform_data,
 			"qcom,hw-resp-timeout");
+	res->msm_cvp_dsp_rsp_timeout = find_key_value(platform_data,
+			"qcom,dsp-resp-timeout");
 	res->domain_cvp = find_key_value(platform_data,
 			"qcom,domain-cvp");
 	res->non_fatal_pagefaults = find_key_value(platform_data,
@@ -896,23 +898,10 @@ return rc;
 	return rc;
 }
 
-static int get_secure_vmid(struct context_bank_info *cb)
-{
-	if (!strcasecmp(cb->name, "cvp_sec_pixel"))
-		return VMID_CP_PIXEL;
-	else if (!strcasecmp(cb->name, "cvp_sec_nonpixel"))
-		return VMID_CP_NON_PIXEL;
-
-	WARN(1, "No matching secure vmid for cb name: %s\n",
-		cb->name);
-	return VMID_INVAL;
-}
-
 static int msm_cvp_setup_context_bank(struct msm_cvp_platform_resources *res,
 		struct context_bank_info *cb, struct device *dev)
 {
 	int rc = 0;
-	int secure_vmid = VMID_INVAL;
 	struct bus_type *bus;
 
 	if (!dev || !cb || !res) {
@@ -929,46 +918,6 @@ static int msm_cvp_setup_context_bank(struct msm_cvp_platform_resources *res,
 		goto remove_cb;
 	}
 
-	cb->mapping = __depr_arm_iommu_create_mapping(bus, cb->addr_range.start,
-					cb->addr_range.size);
-	if (IS_ERR_OR_NULL(cb->mapping)) {
-		dprintk(CVP_ERR, "%s - failed to create mapping\n", __func__);
-		rc = PTR_ERR(cb->mapping) ?: -ENODEV;
-		goto remove_cb;
-	}
-
-	if (cb->is_secure) {
-		secure_vmid = get_secure_vmid(cb);
-		rc = iommu_domain_set_attr(cb->mapping->domain,
-			DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
-		if (rc) {
-			dprintk(CVP_ERR,
-				"%s - Couldn't arm_iommu_set_attr vmid\n",
-				__func__);
-			goto release_mapping;
-		}
-	}
-
-	if (res->cache_pagetables) {
-		int cache_pagetables = 1;
-
-		rc = iommu_domain_set_attr(cb->mapping->domain,
-			DOMAIN_ATTR_USE_UPSTREAM_HINT, &cache_pagetables);
-		if (rc) {
-			WARN_ONCE(rc,
-				"%s: failed to set cache pagetables attribute, %d\n",
-				__func__, rc);
-			rc = 0;
-		}
-	}
-
-	rc = __depr_arm_iommu_attach_device(cb->dev, cb->mapping);
-	if (rc) {
-		dprintk(CVP_ERR, "%s - Couldn't arm_iommu_attach_device\n",
-			__func__);
-		goto release_mapping;
-	}
-
 	/*
 	 * configure device segment size and segment boundary to ensure
 	 * iommu mapping returns one mapping (which is required for partial
@@ -982,14 +931,12 @@ static int msm_cvp_setup_context_bank(struct msm_cvp_platform_resources *res,
 
 	dprintk(CVP_DBG, "Attached %s and created mapping\n", dev_name(dev));
 	dprintk(CVP_DBG,
-		"Context bank name:%s, buffer_type: %#x, is_secure: %d, address range start: %#x, size: %#x, dev: %pK, mapping: %pK",
+		"Context bank name:%s, buffer_type: %#x, is_secure: %d, address range start: %#x, size: %#x, dev: %pK",
 		cb->name, cb->buffer_type, cb->is_secure, cb->addr_range.start,
-		cb->addr_range.size, cb->dev, cb->mapping);
+		cb->addr_range.size, cb->dev);
 
 	return rc;
 
-release_mapping:
-	__depr_arm_iommu_release_mapping(cb->mapping);
 remove_cb:
 	return rc;
 }
@@ -1063,7 +1010,7 @@ static int msm_cvp_populate_context_bank(struct device *dev,
 	}
 
 	dprintk(CVP_DBG, "%s: context bank has name %s\n", __func__, cb->name);
-	rc = of_property_read_u32_array(np, "virtual-addr-pool",
+	rc = of_property_read_u32_array(np, "qcom,iommu-dma-addr-pool",
 			(u32 *)&cb->addr_range, 2);
 	if (rc) {
 		dprintk(CVP_ERR,
@@ -1072,7 +1019,7 @@ static int msm_cvp_populate_context_bank(struct device *dev,
 		goto err_setup_cb;
 	}
 
-	cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank");
+	cb->is_secure = of_property_read_bool(np, "qcom,iommu-vmid");
 	dprintk(CVP_DBG, "context bank %s : secure = %d\n",
 			cb->name, cb->is_secure);
 
@@ -1088,28 +1035,20 @@ static int msm_cvp_populate_context_bank(struct device *dev,
 		cb->name, cb->addr_range.start,
 		cb->addr_range.size, cb->buffer_type);
 
+	cb->domain = iommu_get_domain_for_dev(dev);
+	if (IS_ERR_OR_NULL(cb->domain)) {
+		dprintk(CVP_ERR, "Create domain failed\n");
+		rc = -ENODEV;
+		goto err_setup_cb;
+	}
+
 	rc = msm_cvp_setup_context_bank(&core->resources, cb, dev);
 	if (rc) {
 		dprintk(CVP_ERR, "Cannot setup context bank %d\n", rc);
 		goto err_setup_cb;
 	}
 
-	if (core->resources.non_fatal_pagefaults) {
-		int data = 1;
-
-		dprintk(CVP_DBG, "set non-fatal-faults attribute on %s\n",
-				dev_name(dev));
-		rc = iommu_domain_set_attr(cb->mapping->domain,
-					DOMAIN_ATTR_NON_FATAL_FAULTS, &data);
-		if (rc) {
-			dprintk(CVP_WARN,
-				"%s: set non fatal attribute failed: %s %d\n",
-				__func__, dev_name(dev), rc);
-			/* ignore the error */
-		}
-	}
-
-	iommu_set_fault_handler(cb->mapping->domain,
+	iommu_set_fault_handler(cb->domain,
 		msm_cvp_smmu_fault_handler, (void *)core);
 
 	return 0;
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_resources.h b/drivers/media/platform/msm/cvp/msm_cvp_resources.h
index e273143..2867532 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_resources.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_resources.h
@@ -52,7 +52,7 @@ struct context_bank_info {
 	bool is_secure;
 	struct addr_range addr_range;
 	struct device *dev;
-	struct dma_iommu_mapping *mapping;
+	struct iommu_domain *domain;
 };
 
 struct buffer_usage_table {
@@ -190,6 +190,7 @@ struct msm_cvp_platform_resources {
 	uint32_t max_inst_count;
 	uint32_t max_secure_inst_count;
 	int msm_cvp_hw_rsp_timeout;
+	int msm_cvp_dsp_rsp_timeout;
 	int msm_cvp_firmware_unload_delay;
 	uint32_t msm_cvp_pwr_collapse_delay;
 	bool domain_cvp;
diff --git a/drivers/media/platform/msm/cvp/msm_smem.c b/drivers/media/platform/msm/cvp/msm_smem.c
index 11655a1..323e9d8 100644
--- a/drivers/media/platform/msm/cvp/msm_smem.c
+++ b/drivers/media/platform/msm/cvp/msm_smem.c
@@ -17,10 +17,9 @@
 #include "msm_cvp_resources.h"
 
 
-static int msm_dma_get_device_address(struct dma_buf *dbuf, unsigned long align,
-	dma_addr_t *iova, unsigned long *buffer_size,
-	unsigned long flags, enum hal_buffer buffer_type,
-	unsigned long session_type, struct msm_cvp_platform_resources *res,
+static int msm_dma_get_device_address(struct dma_buf *dbuf, u32 align,
+	dma_addr_t *iova, u32 *buffer_size, u32 flags, unsigned long ion_flags,
+	u32 session_type, struct msm_cvp_platform_resources *res,
 	struct cvp_dma_mapping_info *mapping_info)
 {
 	int rc = 0;
@@ -37,7 +36,7 @@ static int msm_dma_get_device_address(struct dma_buf *dbuf, unsigned long align,
 	if (is_iommu_present(res)) {
 		cb = msm_cvp_smem_get_context_bank(
 				session_type, (flags & SMEM_SECURE),
-				res, buffer_type);
+				res, ion_flags);
 		if (!cb) {
 			dprintk(CVP_ERR,
 				"%s: Failed to get context bank device\n",
@@ -102,7 +101,7 @@ static int msm_dma_get_device_address(struct dma_buf *dbuf, unsigned long align,
 		}
 
 		mapping_info->dev = cb->dev;
-		mapping_info->mapping = cb->mapping;
+		mapping_info->domain = cb->domain;
 		mapping_info->table = table;
 		mapping_info->attach = attach;
 		mapping_info->buf = dbuf;
@@ -126,8 +125,7 @@ static int msm_dma_get_device_address(struct dma_buf *dbuf, unsigned long align,
 }
 
 static int msm_dma_put_device_address(u32 flags,
-	struct cvp_dma_mapping_info *mapping_info,
-	enum hal_buffer buffer_type)
+	struct cvp_dma_mapping_info *mapping_info)
 {
 	int rc = 0;
 
@@ -150,7 +148,7 @@ static int msm_dma_put_device_address(u32 flags,
 	trace_msm_cvp_smem_buffer_iommu_op_end("UNMAP", 0, 0, 0, 0, 0);
 
 	mapping_info->dev = NULL;
-	mapping_info->mapping = NULL;
+	mapping_info->domain = NULL;
 	mapping_info->table = NULL;
 	mapping_info->attach = NULL;
 	mapping_info->buf = NULL;
@@ -191,8 +189,8 @@ int msm_cvp_smem_map_dma_buf(struct msm_cvp_inst *inst,
 
 	dma_addr_t iova = 0;
 	u32 temp = 0;
-	unsigned long buffer_size = 0;
-	unsigned long align = SZ_4K;
+	u32 buffer_size = 0;
+	u32 align = SZ_4K;
 	struct dma_buf *dbuf;
 	unsigned long ion_flags = 0;
 
@@ -208,14 +206,20 @@ int msm_cvp_smem_map_dma_buf(struct msm_cvp_inst *inst,
 		goto exit;
 	}
 
-	dbuf = msm_cvp_smem_get_dma_buf(smem->fd);
-	if (!dbuf) {
-		rc = -EINVAL;
-		goto exit;
+	if (smem->fd > 0) {
+		dbuf = msm_cvp_smem_get_dma_buf(smem->fd);
+		if (!dbuf) {
+			rc = -EINVAL;
+			dprintk(CVP_ERR, "%s: Invalid fd=%d", __func__,
+				smem->fd);
+			goto exit;
+		}
+		smem->dma_buf = dbuf;
+	} else {
+		dbuf = smem->dma_buf;
+		get_dma_buf(dbuf);
 	}
 
-	smem->dma_buf = dbuf;
-
 	rc = dma_buf_get_flags(dbuf, &ion_flags);
 	if (rc) {
 		dprintk(CVP_ERR, "Failed to get dma buf flags: %d\n", rc);
@@ -229,8 +233,9 @@ int msm_cvp_smem_map_dma_buf(struct msm_cvp_inst *inst,
 
 	buffer_size = smem->size;
 
+	/* Ignore the buffer_type from user space. Only use ion flags */
 	rc = msm_dma_get_device_address(dbuf, align, &iova, &buffer_size,
-			smem->flags, smem->buffer_type, inst->session_type,
+			smem->flags, ion_flags, inst->session_type,
 			&(inst->core->resources), &smem->mapping_info);
 	if (rc) {
 		dprintk(CVP_ERR, "Failed to get device address: %d\n", rc);
@@ -273,8 +278,7 @@ int msm_cvp_smem_unmap_dma_buf(struct msm_cvp_inst *inst,
 	if (smem->refcount)
 		goto exit;
 
-	rc = msm_dma_put_device_address(smem->flags, &smem->mapping_info,
-		smem->buffer_type);
+	rc = msm_dma_put_device_address(smem->flags, &smem->mapping_info);
 	if (rc) {
 		dprintk(CVP_ERR, "Failed to put device address: %d\n", rc);
 		goto exit;
@@ -289,48 +293,12 @@ int msm_cvp_smem_unmap_dma_buf(struct msm_cvp_inst *inst,
 	return rc;
 }
 
-static int get_secure_flag_for_buffer_type(
-	u32 session_type, enum hal_buffer buffer_type)
-{
-	switch (buffer_type) {
-	case HAL_BUFFER_INPUT:
-		if (session_type == MSM_CVP_ENCODER)
-			return ION_FLAG_CP_PIXEL;
-		else
-			return ION_FLAG_CP_BITSTREAM;
-	case HAL_BUFFER_OUTPUT:
-	case HAL_BUFFER_OUTPUT2:
-		if (session_type == MSM_CVP_ENCODER)
-			return ION_FLAG_CP_BITSTREAM;
-		else
-			return ION_FLAG_CP_PIXEL;
-	case HAL_BUFFER_INTERNAL_SCRATCH:
-		return ION_FLAG_CP_BITSTREAM;
-	case HAL_BUFFER_INTERNAL_SCRATCH_1:
-		return ION_FLAG_CP_NON_PIXEL;
-	case HAL_BUFFER_INTERNAL_SCRATCH_2:
-		return ION_FLAG_CP_PIXEL;
-	case HAL_BUFFER_INTERNAL_PERSIST:
-		if (session_type == MSM_CVP_ENCODER)
-			return ION_FLAG_CP_NON_PIXEL;
-		else
-			return ION_FLAG_CP_BITSTREAM;
-	case HAL_BUFFER_INTERNAL_PERSIST_1:
-		return ION_FLAG_CP_NON_PIXEL;
-	default:
-		WARN(1, "No matching secure flag for buffer type : %x\n",
-				buffer_type);
-		return -EINVAL;
-	}
-}
-
-static int alloc_dma_mem(size_t size, u32 align, u32 flags,
-	enum hal_buffer buffer_type, int map_kernel,
+static int alloc_dma_mem(size_t size, u32 align, u32 flags, int map_kernel,
 	struct msm_cvp_platform_resources *res, u32 session_type,
 	struct msm_cvp_smem *mem)
 {
 	dma_addr_t iova = 0;
-	unsigned long buffer_size = 0;
+	u32 buffer_size = 0;
 	unsigned long heap_mask = 0;
 	int rc = 0;
 	int ion_flags = 0;
@@ -353,59 +321,44 @@ static int alloc_dma_mem(size_t size, u32 align, u32 flags,
 		}
 	} else {
 		dprintk(CVP_DBG,
-			"allocate shared memory from adsp heap size %zx align %d\n",
-			size, align);
+		"allocate shared memory from adsp heap size %zx align %d\n",
+		size, align);
 		heap_mask = ION_HEAP(ION_ADSP_HEAP_ID);
 	}
 
 	if (flags & SMEM_CACHED)
 		ion_flags |= ION_FLAG_CACHED;
 
-	if ((flags & SMEM_SECURE) ||
-		(buffer_type == HAL_BUFFER_INTERNAL_PERSIST &&
-		 session_type == MSM_CVP_ENCODER)) {
-		int secure_flag =
-			get_secure_flag_for_buffer_type(
-				session_type, buffer_type);
-		if (secure_flag < 0) {
-			rc = secure_flag;
-			goto fail_shared_mem_alloc;
-		}
+	if (flags & SMEM_NON_PIXEL)
+		ion_flags |= ION_FLAG_CP_NON_PIXEL;
 
-		ion_flags |= ION_FLAG_SECURE | secure_flag;
+	if (flags & SMEM_SECURE) {
+		ion_flags |= ION_FLAG_SECURE;
 		heap_mask = ION_HEAP(ION_SECURE_HEAP_ID);
-
-		if (res->slave_side_cp) {
-			heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID);
-			size = ALIGN(size, SZ_1M);
-			align = ALIGN(size, SZ_1M);
-		}
-		flags |= SMEM_SECURE;
 	}
 
-	trace_msm_cvp_smem_buffer_dma_op_start("ALLOC", (u32)buffer_type,
+	trace_msm_cvp_smem_buffer_dma_op_start("ALLOC", (u32)ion_flags,
 		heap_mask, size, align, flags, map_kernel);
 	dbuf = ion_alloc(size, heap_mask, ion_flags);
 	if (IS_ERR_OR_NULL(dbuf)) {
 		dprintk(CVP_ERR,
-		"Failed to allocate shared memory = %zx, %#x\n",
-		size, flags);
+		"Failed to allocate shared memory = %x bytes, %llx, %x\n",
+		size, heap_mask, ion_flags);
 		rc = -ENOMEM;
 		goto fail_shared_mem_alloc;
 	}
-	trace_msm_cvp_smem_buffer_dma_op_end("ALLOC", (u32)buffer_type,
+	trace_msm_cvp_smem_buffer_dma_op_end("ALLOC", (u32)ion_flags,
 		heap_mask, size, align, flags, map_kernel);
 
 	mem->flags = flags;
-	mem->buffer_type = buffer_type;
+	mem->buffer_type = ion_flags;
 	mem->offset = 0;
 	mem->size = size;
 	mem->dma_buf = dbuf;
 	mem->kvaddr = NULL;
 
-	rc = msm_dma_get_device_address(dbuf, align, &iova,
-			&buffer_size, flags, buffer_type,
-			session_type, res, &mem->mapping_info);
+	rc = msm_dma_get_device_address(dbuf, align, &iova, &buffer_size, flags,
+			ion_flags, session_type, res, &mem->mapping_info);
 	if (rc) {
 		dprintk(CVP_ERR, "Failed to get device address: %d\n",
 			rc);
@@ -452,8 +405,7 @@ static int free_dma_mem(struct msm_cvp_smem *mem)
 		mem->kvaddr, mem->buffer_type);
 
 	if (mem->device_addr) {
-		msm_dma_put_device_address(mem->flags,
-			&mem->mapping_info, mem->buffer_type);
+		msm_dma_put_device_address(mem->flags, &mem->mapping_info);
 		mem->device_addr = 0x0;
 	}
 
@@ -477,8 +429,7 @@ static int free_dma_mem(struct msm_cvp_smem *mem)
 	return 0;
 }
 
-int msm_cvp_smem_alloc(size_t size, u32 align, u32 flags,
-	enum hal_buffer buffer_type, int map_kernel,
+int msm_cvp_smem_alloc(size_t size, u32 align, u32 flags, int map_kernel,
 	void *res, u32 session_type, struct msm_cvp_smem *smem)
 {
 	int rc = 0;
@@ -489,7 +440,7 @@ int msm_cvp_smem_alloc(size_t size, u32 align, u32 flags,
 		return -EINVAL;
 	}
 
-	rc = alloc_dma_mem(size, align, flags, buffer_type, map_kernel,
+	rc = alloc_dma_mem(size, align, flags, map_kernel,
 				(struct msm_cvp_platform_resources *)res,
 				session_type, smem);
 
@@ -560,37 +511,33 @@ int msm_cvp_smem_cache_operations(struct dma_buf *dbuf,
 
 struct context_bank_info *msm_cvp_smem_get_context_bank(u32 session_type,
 	bool is_secure, struct msm_cvp_platform_resources *res,
-	enum hal_buffer buffer_type)
+	unsigned long ion_flags)
 {
 	struct context_bank_info *cb = NULL, *match = NULL;
+	char *search_str;
+	char *non_secure_cb = "cvp_hlos";
+	char *secure_nonpixel_cb = "cvp_sec_nonpixel";
+	char *secure_pixel_cb = "cvp_sec_pixel";
 
-	/*
-	 * HAL_BUFFER_INPUT is directly mapped to bitstream CB in DT
-	 * as the buffer type structure was initially designed
-	 * just for decoder. For Encoder, input should be mapped to
-	 * yuvpixel CB. Persist is mapped to nonpixel CB.
-	 * So swap the buffer types just in this local scope.
-	 */
-	if (is_secure && session_type == MSM_CVP_ENCODER) {
-		if (buffer_type == HAL_BUFFER_INPUT)
-			buffer_type = HAL_BUFFER_OUTPUT;
-		else if (buffer_type == HAL_BUFFER_OUTPUT)
-			buffer_type = HAL_BUFFER_INPUT;
-		else if (buffer_type == HAL_BUFFER_INTERNAL_PERSIST)
-			buffer_type = HAL_BUFFER_INTERNAL_PERSIST_1;
-	}
+	if (ion_flags & ION_FLAG_CP_PIXEL)
+		search_str = secure_pixel_cb;
+	else if (ion_flags & ION_FLAG_CP_NON_PIXEL)
+		search_str = secure_nonpixel_cb;
+	else
+		search_str = non_secure_cb;
 
 	list_for_each_entry(cb, &res->context_banks, list) {
 		if (cb->is_secure == is_secure &&
-				cb->buffer_type & buffer_type) {
+			!strcmp(search_str, cb->name)) {
 			match = cb;
 			break;
 		}
 	}
+
 	if (!match)
 		dprintk(CVP_ERR,
 			"%s: cb not found for buffer_type %x, is_secure %d\n",
-			__func__, buffer_type, is_secure);
+			__func__, ion_flags, is_secure);
 
 	return match;
 }
diff --git a/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c b/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
index 4a59e57..ba47887 100644
--- a/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
@@ -61,7 +61,18 @@ static int cvp_close(struct inode *inode, struct file *filp)
 
 static unsigned int cvp_poll(struct file *filp, struct poll_table_struct *p)
 {
-	return 0;
+	int rc = 0;
+	struct msm_cvp_inst *inst = filp->private_data;
+	unsigned long flags = 0;
+
+	poll_wait(filp, &inst->event_handler.wq, p);
+
+	spin_lock_irqsave(&inst->event_handler.lock, flags);
+	if (inst->event_handler.event == CVP_SSR_EVENT)
+		rc |= POLLPRI;
+	spin_unlock_irqrestore(&inst->event_handler.lock, flags);
+
+	return rc;
 }
 
 static const struct file_operations cvp_fops = {
@@ -227,7 +238,6 @@ static ssize_t boot_store(struct device *dev,
 			const char *buf, size_t count)
 {
 	int rc = 0, val = 0;
-	struct msm_cvp_inst *inst;
 	static int booted;
 
 	rc = kstrtoint(buf, 0, &val);
@@ -238,6 +248,7 @@ static ssize_t boot_store(struct device *dev,
 	}
 
 	if (val > 0 && booted == 0) {
+		struct msm_cvp_inst *inst;
 		inst = msm_cvp_open(MSM_CORE_CVP, MSM_CVP_CORE);
 		if (!inst) {
 			dprintk(CVP_ERR,
@@ -250,8 +261,8 @@ static ssize_t boot_store(struct device *dev,
 			"Failed to close cvp instance\n");
 			return rc;
 		}
-		booted = 1;
 	}
+	booted = 1;
 	return count;
 }
 
@@ -392,6 +403,8 @@ static int msm_probe_cvp_device(struct platform_device *pdev)
 		goto err_fail_sub_device_probe;
 	}
 
+	atomic64_set(&core->kernel_trans_id, 0);
+
 	return rc;
 
 err_fail_sub_device_probe:
diff --git a/drivers/media/platform/msm/cvp/msm_v4l2_private.c b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
index b74f3e2..f75a64a 100644
--- a/drivers/media/platform/msm/cvp/msm_v4l2_private.c
+++ b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
@@ -105,15 +105,13 @@ static int _copy_sysprop_from_user(struct cvp_kmd_arg *kp,
 	if (get_user(k->prop_num, &u->prop_num))
 		return -EFAULT;
 
-	if (k->prop_num != 1) {
-		dprintk(CVP_ERR, "Only one prop allowed\n");
+	if (k->prop_num < 1 || k->prop_num > 32) {
+		dprintk(CVP_ERR, "Num of prop out of range %d\n", k->prop_num);
 		return -EFAULT;
 	}
 
-	if (get_user(k->prop_data.prop_type, &u->prop_data.prop_type))
-		return -EFAULT;
-
-	return 0;
+	return _copy_pkt_from_user(kp, up,
+		(k->prop_num*((sizeof(struct cvp_kmd_sys_property)>>2)+1)));
 }
 
 static int _copy_pkt_to_user(struct cvp_kmd_arg *kp,
@@ -206,7 +204,7 @@ static void print_hfi_short(struct cvp_kmd_arg __user *up)
 			get_user(words[4], &pkt->pkt_data[1]))
 		dprintk(CVP_ERR, "Failed to print ioctl cmd\n");
 
-	dprintk(CVP_DBG, "IOCTL cmd type %d, offset %d, num %d, pkt %d %d\n",
+	dprintk(CVP_DBG, "IOCTL cmd type %#x, offset %d, num %d, pkt %d %#x\n",
 			words[0], words[1], words[2], words[3], words[4]);
 }
 
@@ -411,6 +409,14 @@ static int convert_from_user(struct cvp_kmd_arg *kp,
 		}
 		break;
 	}
+	case CVP_KMD_SET_SYS_PROPERTY:
+	{
+		if (_copy_sysprop_from_user(kp, up)) {
+			dprintk(CVP_ERR, "Failed to set sysprop from user\n");
+			return -EFAULT;
+		}
+		break;
+	}
 	default:
 		dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
 			__func__, kp->type);
@@ -579,6 +585,8 @@ static int convert_to_user(struct cvp_kmd_arg *kp, unsigned long arg)
 		}
 		break;
 	}
+	case CVP_KMD_SET_SYS_PROPERTY:
+		break;
 	default:
 		dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
 			__func__, kp->type);
@@ -614,8 +622,8 @@ static long cvp_ioctl(struct msm_cvp_inst *inst,
 
 	rc = msm_cvp_private((void *)inst, cmd, &karg);
 	if (rc) {
-		dprintk(CVP_ERR, "%s: failed cmd type %x\n",
-			__func__, karg.type);
+		dprintk(CVP_ERR, "%s: failed cmd type %x %d\n",
+			__func__, karg.type, rc);
 		return rc;
 	}
 
diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h
index aafb0c7..fec1e40 100644
--- a/drivers/media/platform/msm/npu/npu_common.h
+++ b/drivers/media/platform/msm/npu/npu_common.h
@@ -22,6 +22,9 @@
 #include <linux/types.h>
 #include <linux/uaccess.h>
 #include <linux/mailbox/qmp.h>
+#include <linux/msm-bus.h>
+#include <linux/mailbox_controller.h>
+#include <linux/reset.h>
 
 #include "npu_mgr.h"
 
@@ -29,7 +32,7 @@
  * Defines
  * -------------------------------------------------------------------------
  */
-#define NPU_MAX_MBOX_NUM	    2
+#define NPU_MAX_MBOX_NUM	    4
 #define NPU_MBOX_LOW_PRI	    0
 #define NPU_MBOX_HIGH_PRI	    1
 
@@ -90,6 +93,7 @@ struct npu_ion_buf {
 struct npu_clk {
 	struct clk *clk;
 	char clk_name[NPU_MAX_DT_NAME_LEN];
+	struct reset_control *reset;
 };
 
 struct npu_regulator {
@@ -116,6 +120,9 @@ struct npu_mbox {
 	struct mbox_chan *chan;
 	struct npu_device *npu_dev;
 	uint32_t id;
+	uint32_t client_id;
+	uint32_t signal_id;
+	bool send_data_pending;
 };
 
 /**
@@ -201,6 +208,27 @@ struct npu_io_data {
 	void __iomem *base;
 };
 
+#define MAX_PATHS	2
+#define DBL_BUF	2
+#define MBYTE (1ULL << 20)
+
+struct npu_bwctrl {
+	struct msm_bus_vectors vectors[MAX_PATHS * DBL_BUF];
+	struct msm_bus_paths bw_levels[DBL_BUF];
+	struct msm_bus_scale_pdata bw_data;
+	uint32_t bus_client;
+	int cur_ab;
+	int cur_ib;
+	int cur_idx;
+	uint32_t num_paths;
+};
+
+struct mbox_bridge_data {
+	struct mbox_controller mbox;
+	struct mbox_chan *chans;
+	void *priv_data;
+};
+
 struct npu_device {
 	struct mutex dev_lock;
 
@@ -233,15 +261,20 @@ struct npu_device {
 	struct npu_smmu_ctx smmu_ctx;
 	struct npu_debugfs_ctx debugfs_ctx;
 
-	struct npu_mbox mbox_aop;
+	struct npu_mbox *mbox_aop;
+	struct npu_mbox mbox[NPU_MAX_MBOX_NUM];
+	struct mbox_bridge_data mbox_bridge_data;
 
 	struct thermal_cooling_device *tcdev;
 	struct npu_pwrctrl pwrctrl;
 	struct npu_thermalctrl thermalctrl;
+	struct npu_bwctrl bwctrl;
 
 	struct llcc_slice_desc *sys_cache;
 	uint32_t execute_v2_flag;
 	bool cxlimit_registered;
+
+	uint32_t hw_version;
 };
 
 struct npu_kevent {
@@ -259,6 +292,14 @@ struct npu_client {
 	struct list_head mapped_buffer_list;
 };
 
+struct ipcc_mbox_chan {
+	u16 client_id;
+	u16 signal_id;
+	struct mbox_chan *chan;
+	struct npu_mbox *npu_mbox;
+	struct npu_device *npu_dev;
+};
+
 /* -------------------------------------------------------------------------
  * Function Prototypes
  * -------------------------------------------------------------------------
@@ -283,5 +324,6 @@ int enable_fw(struct npu_device *npu_dev);
 void disable_fw(struct npu_device *npu_dev);
 int load_fw(struct npu_device *npu_dev);
 int unload_fw(struct npu_device *npu_dev);
+int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab);
 
 #endif /* _NPU_COMMON_H */
diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c
index 79fa0cb..8653f38 100644
--- a/drivers/media/platform/msm/npu/npu_debugfs.c
+++ b/drivers/media/platform/msm/npu/npu_debugfs.c
@@ -436,6 +436,12 @@ int npu_debugfs_init(struct npu_device *npu_dev)
 		goto err;
 	}
 
+	if (!debugfs_create_bool("auto_pil_disable", 0644,
+		debugfs->root, &(host_ctx->auto_pil_disable))) {
+		NPU_ERR("debugfs_creat_bool fail for auto pil\n");
+		goto err;
+	}
+
 	if (!debugfs_create_u32("fw_dbg_mode", 0644,
 		debugfs->root, &(host_ctx->fw_dbg_mode))) {
 		NPU_ERR("debugfs_create_u32 fail for fw_dbg_mode\n");
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index e4a751d..02fe246 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -91,6 +91,7 @@ static long npu_ioctl(struct file *file, unsigned int cmd,
 static unsigned int npu_poll(struct file *filp, struct poll_table_struct *p);
 static int npu_parse_dt_clock(struct npu_device *npu_dev);
 static int npu_parse_dt_regulator(struct npu_device *npu_dev);
+static int npu_parse_dt_bw(struct npu_device *npu_dev);
 static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
 		struct device_node *node);
 static int npu_pwrctrl_init(struct npu_device *npu_dev);
@@ -132,6 +133,12 @@ static const char * const npu_exclude_rate_clocks[] = {
 	"s2p_clk",
 };
 
+static const char * const npu_require_reset_clocks[] = {
+	"dpm_temp_clk",
+	"llm_temp_clk",
+	"llm_curr_clk",
+};
+
 static const struct npu_irq npu_irq_info[] = {
 	{"ipc_irq", 0, IRQF_TRIGGER_RISING | IRQF_ONESHOT, npu_ipc_intr_hdlr},
 	{"general_irq", 0,  IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
@@ -319,7 +326,7 @@ static ssize_t boot_store(struct device *dev,
 			return rc;
 		}
 	} else {
-		NPU_INFO("%s: unload fw\n", __func__);
+		NPU_DBG("%s: unload fw\n", __func__);
 		unload_fw(npu_dev);
 	}
 
@@ -582,6 +589,21 @@ static bool npu_is_exclude_rate_clock(const char *clk_name)
 	return ret;
 }
 
+static bool npu_clk_need_reset(const char *clk_name)
+{
+	int ret = false;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(npu_require_reset_clocks); i++) {
+		if (!strcmp(clk_name, npu_require_reset_clocks[i])) {
+			ret = true;
+			break;
+		}
+	}
+
+	return ret;
+}
+
 static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
 {
 	int i, rc = 0;
@@ -604,6 +626,14 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
 
 		NPU_DBG("enabling clock %s\n", core_clks[i].clk_name);
 
+		if (core_clks[i].reset) {
+			NPU_DBG("Deassert %s\n", core_clks[i].clk_name);
+			rc = reset_control_deassert(core_clks[i].reset);
+			if (rc)
+				NPU_WARN("deassert %s reset failed\n",
+					core_clks[i].clk_name);
+		}
+
 		rc = clk_prepare_enable(core_clks[i].clk);
 		if (rc) {
 			NPU_ERR("%s enable failed\n",
@@ -639,6 +669,14 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
 			}
 			NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
 			clk_disable_unprepare(core_clks[i].clk);
+
+			if (core_clks[i].reset) {
+				NPU_DBG("Assert %s\n", core_clks[i].clk_name);
+				rc = reset_control_assert(core_clks[i].reset);
+				if (rc)
+					NPU_WARN("assert %s reset failed\n",
+						core_clks[i].clk_name);
+			}
 		}
 	}
 
@@ -673,6 +711,14 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil)
 
 		NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
 		clk_disable_unprepare(core_clks[i].clk);
+
+		if (core_clks[i].reset) {
+			NPU_DBG("Assert %s\n", core_clks[i].clk_name);
+			rc = reset_control_assert(core_clks[i].reset);
+			if (rc)
+				NPU_WARN("assert %s reset failed\n",
+					core_clks[i].clk_name);
+		}
 	}
 }
 
@@ -1318,6 +1364,71 @@ static int npu_receive_event(struct npu_client *client,
 	return ret;
 }
 
+static int npu_set_property(struct npu_client *client,
+	unsigned long arg)
+{
+	struct msm_npu_property prop;
+	void __user *argp = (void __user *)arg;
+	int ret = -EINVAL;
+
+	ret = copy_from_user(&prop, argp, sizeof(prop));
+	if (ret) {
+		NPU_ERR("fail to copy from user\n");
+		return -EFAULT;
+	}
+
+	switch (prop.prop_id) {
+	default:
+		NPU_ERR("Not supported property %d\n", prop.prop_id);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int npu_get_property(struct npu_client *client,
+	unsigned long arg)
+{
+	struct msm_npu_property prop;
+	void __user *argp = (void __user *)arg;
+	int ret = -EINVAL;
+	struct npu_device *npu_dev = client->npu_dev;
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+
+	ret = copy_from_user(&prop, argp, sizeof(prop));
+	if (ret) {
+		NPU_ERR("fail to copy from user\n");
+		return -EFAULT;
+	}
+
+	switch (prop.prop_id) {
+	case MSM_NPU_PROP_ID_FW_STATE:
+		prop.prop_param[0] = host_ctx->fw_state;
+		break;
+	case MSM_NPU_PROP_ID_PERF_MODE_MAX:
+		prop.prop_param[0] = npu_dev->pwrctrl.num_pwrlevels;
+		break;
+	case MSM_NPU_PROP_ID_DRV_VERSION:
+		prop.prop_param[0] = 0;
+		break;
+	case MSM_NPU_PROP_ID_HARDWARE_VERSION:
+		prop.prop_param[0] = npu_dev->hw_version;
+		break;
+	default:
+		NPU_ERR("Not supported property %d\n", prop.prop_id);
+		return -EINVAL;
+	}
+
+	ret = copy_to_user(argp, &prop, sizeof(prop));
+	if (ret) {
+		pr_err("fail to copy to user\n");
+		return -EFAULT;
+	}
+
+	return ret;
+}
+
 static long npu_ioctl(struct file *file, unsigned int cmd,
 						 unsigned long arg)
 {
@@ -1352,6 +1463,12 @@ static long npu_ioctl(struct file *file, unsigned int cmd,
 	case MSM_NPU_RECEIVE_EVENT:
 		ret = npu_receive_event(client, arg);
 		break;
+	case MSM_NPU_SET_PROP:
+		ret = npu_set_property(client, arg);
+		break;
+	case MSM_NPU_GET_PROP:
+		ret = npu_get_property(client, arg);
+		break;
 	default:
 		NPU_ERR("unexpected IOCTL %x\n", cmd);
 	}
@@ -1388,6 +1505,7 @@ static int npu_parse_dt_clock(struct npu_device *npu_dev)
 	int num_clk;
 	struct npu_clk *core_clks = npu_dev->core_clks;
 	struct platform_device *pdev = npu_dev->pdev;
+	struct reset_control *reset;
 
 	num_clk = of_property_count_strings(pdev->dev.of_node,
 			"clock-names");
@@ -1413,6 +1531,15 @@ static int npu_parse_dt_clock(struct npu_device *npu_dev)
 			rc = -EINVAL;
 			break;
 		}
+
+		if (npu_clk_need_reset(clock_name)) {
+			reset = devm_reset_control_get(&pdev->dev, clock_name);
+			if (IS_ERR(reset))
+				NPU_WARN("no reset for %s %d\n", clock_name,
+					PTR_ERR(reset));
+			else
+				core_clks[i].reset = reset;
+		}
 	}
 
 clk_err:
@@ -1460,6 +1587,91 @@ static int npu_parse_dt_regulator(struct npu_device *npu_dev)
 	return rc;
 }
 
+static int npu_parse_dt_bw(struct npu_device *npu_dev)
+{
+	int ret, len;
+	uint32_t ports[2];
+	struct platform_device *pdev = npu_dev->pdev;
+	struct npu_bwctrl *bwctrl = &npu_dev->bwctrl;
+
+	if (of_find_property(pdev->dev.of_node, "qcom,src-dst-ports", &len)) {
+		len /= sizeof(ports[0]);
+		if (len != 2) {
+			NPU_ERR("Unexpected number of ports\n");
+			return -EINVAL;
+		}
+
+		ret = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,src-dst-ports", ports, len);
+		if (ret) {
+			NPU_ERR("Failed to read bw property\n");
+			return ret;
+		}
+	} else {
+		NPU_ERR("can't find bw property\n");
+		return -EINVAL;
+	}
+
+	bwctrl->bw_levels[0].vectors = &bwctrl->vectors[0];
+	bwctrl->bw_levels[1].vectors = &bwctrl->vectors[MAX_PATHS];
+	bwctrl->bw_data.usecase = bwctrl->bw_levels;
+	bwctrl->bw_data.num_usecases = ARRAY_SIZE(bwctrl->bw_levels);
+	bwctrl->bw_data.name = dev_name(&pdev->dev);
+	bwctrl->bw_data.active_only = false;
+
+	bwctrl->bw_levels[0].vectors[0].src = ports[0];
+	bwctrl->bw_levels[0].vectors[0].dst = ports[1];
+	bwctrl->bw_levels[1].vectors[0].src = ports[0];
+	bwctrl->bw_levels[1].vectors[0].dst = ports[1];
+	bwctrl->bw_levels[0].num_paths = 1;
+	bwctrl->bw_levels[1].num_paths = 1;
+	bwctrl->num_paths = 1;
+
+	bwctrl->bus_client = msm_bus_scale_register_client(&bwctrl->bw_data);
+	if (!bwctrl->bus_client) {
+		NPU_ERR("Unable to register bus client\n");
+		return -ENODEV;
+	}
+
+	NPU_INFO("NPU BW client sets up successfully\n");
+
+	return 0;
+}
+
+int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab)
+{
+	int i, ret;
+	struct npu_bwctrl *bwctrl = &npu_dev->bwctrl;
+
+	if (!bwctrl->bus_client) {
+		NPU_DBG("bus client doesn't exist\n");
+		return 0;
+	}
+
+	if (bwctrl->cur_ib == new_ib && bwctrl->cur_ab == new_ab)
+		return 0;
+
+	i = (bwctrl->cur_idx + 1) % DBL_BUF;
+
+	bwctrl->bw_levels[i].vectors[0].ib = new_ib * MBYTE;
+	bwctrl->bw_levels[i].vectors[0].ab = new_ab / bwctrl->num_paths * MBYTE;
+	bwctrl->bw_levels[i].vectors[1].ib = new_ib * MBYTE;
+	bwctrl->bw_levels[i].vectors[1].ab = new_ab / bwctrl->num_paths * MBYTE;
+
+	NPU_INFO("BW MBps: AB: %d IB: %d\n", new_ab, new_ib);
+
+	ret = msm_bus_scale_client_update_request(bwctrl->bus_client, i);
+	if (ret) {
+		NPU_ERR("bandwidth request failed (%d)\n", ret);
+	} else {
+		bwctrl->cur_idx = i;
+		bwctrl->cur_ib = new_ib;
+		bwctrl->cur_ab = new_ab;
+	}
+
+	return ret;
+}
+
 static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
 		struct device_node *node)
 {
@@ -1641,35 +1853,247 @@ static int npu_irq_init(struct npu_device *npu_dev)
 	return ret;
 }
 
-static int npu_mbox_init(struct npu_device *npu_dev)
+/* -------------------------------------------------------------------------
+ * Mailbox
+ * -------------------------------------------------------------------------
+ */
+static int npu_ipcc_bridge_mbox_send_data(struct mbox_chan *chan, void *data)
 {
-	struct platform_device *pdev = npu_dev->pdev;
-	struct npu_mbox *mbox_aop = &npu_dev->mbox_aop;
+	struct ipcc_mbox_chan *ipcc_mbox_chan = chan->con_priv;
+	struct npu_device *npu_dev = ipcc_mbox_chan->npu_dev;
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	unsigned long flags;
 
-	if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) {
-		mbox_aop->client.dev = &pdev->dev;
-		mbox_aop->client.tx_block = true;
-		mbox_aop->client.tx_tout = MBOX_OP_TIMEOUTMS;
-		mbox_aop->client.knows_txdone = false;
+	NPU_DBG("Generating IRQ for client_id: %u; signal_id: %u\n",
+		ipcc_mbox_chan->client_id, ipcc_mbox_chan->signal_id);
 
-		mbox_aop->chan = mbox_request_channel(&mbox_aop->client, 0);
-		if (IS_ERR(mbox_aop->chan)) {
-			NPU_WARN("aop mailbox is not available\n");
-			mbox_aop->chan = NULL;
-		}
-	}
+	spin_lock_irqsave(&host_ctx->bridge_mbox_lock, flags);
+	ipcc_mbox_chan->npu_mbox->send_data_pending = true;
+	queue_work(host_ctx->wq, &host_ctx->bridge_mbox_work);
+	spin_unlock_irqrestore(&host_ctx->bridge_mbox_lock, flags);
 
 	return 0;
 }
 
+static void npu_ipcc_bridge_mbox_shutdown(struct mbox_chan *chan)
+{
+	struct ipcc_mbox_chan *ipcc_mbox_chan = chan->con_priv;
+
+	chan->con_priv = NULL;
+	kfree(ipcc_mbox_chan);
+}
+
+static struct mbox_chan *npu_ipcc_bridge_mbox_xlate(
+	struct mbox_controller *mbox, const struct of_phandle_args *ph)
+{
+	int chan_id, i;
+	struct npu_device *npu_dev;
+	struct mbox_bridge_data *bridge_data;
+	struct ipcc_mbox_chan *ipcc_mbox_chan;
+
+	bridge_data = container_of(mbox, struct mbox_bridge_data, mbox);
+	if (WARN_ON(!bridge_data))
+		return ERR_PTR(-EINVAL);
+
+	npu_dev = bridge_data->priv_data;
+
+	if (ph->args_count != 2)
+		return ERR_PTR(-EINVAL);
+
+	for (chan_id = 0; chan_id < mbox->num_chans; chan_id++) {
+		ipcc_mbox_chan = bridge_data->chans[chan_id].con_priv;
+
+		if (!ipcc_mbox_chan)
+			break;
+		else if (ipcc_mbox_chan->client_id == ph->args[0] &&
+				ipcc_mbox_chan->signal_id == ph->args[1])
+			return ERR_PTR(-EBUSY);
+	}
+
+	if (chan_id >= mbox->num_chans)
+		return ERR_PTR(-EBUSY);
+
+	/* search for target mailbox */
+	for (i = 0; i < NPU_MAX_MBOX_NUM; i++) {
+		if (npu_dev->mbox[i].chan &&
+			(npu_dev->mbox[i].client_id == ph->args[0]) &&
+			(npu_dev->mbox[i].signal_id == ph->args[1])) {
+			NPU_DBG("Find matched target mailbox %d\n", i);
+			break;
+		}
+	}
+
+	if (i == NPU_MAX_MBOX_NUM) {
+		NPU_ERR("Can't find matched target mailbox %d:%d\n",
+			ph->args[0], ph->args[1]);
+		return ERR_PTR(-EINVAL);
+	}
+
+	ipcc_mbox_chan = kzalloc(sizeof(*ipcc_mbox_chan), GFP_KERNEL);
+	if (!ipcc_mbox_chan)
+		return ERR_PTR(-ENOMEM);
+
+	ipcc_mbox_chan->client_id = ph->args[0];
+	ipcc_mbox_chan->signal_id = ph->args[1];
+	ipcc_mbox_chan->chan = &bridge_data->chans[chan_id];
+	ipcc_mbox_chan->npu_dev = npu_dev;
+	ipcc_mbox_chan->chan->con_priv = ipcc_mbox_chan;
+	ipcc_mbox_chan->npu_mbox = &npu_dev->mbox[i];
+
+	NPU_DBG("New mailbox channel: %u for client_id: %u; signal_id: %u\n",
+		chan_id, ipcc_mbox_chan->client_id,
+		ipcc_mbox_chan->signal_id);
+
+	return ipcc_mbox_chan->chan;
+}
+
+static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
+	.send_data = npu_ipcc_bridge_mbox_send_data,
+	.shutdown = npu_ipcc_bridge_mbox_shutdown
+};
+
+static int npu_setup_ipcc_bridge_mbox(struct npu_device *npu_dev)
+{
+	int i, j, ret;
+	int num_chans = 0;
+	struct mbox_controller *mbox;
+	struct device_node *client_dn;
+	struct of_phandle_args curr_ph;
+	struct device *dev = &npu_dev->pdev->dev;
+	struct device_node *controller_dn = dev->of_node;
+	struct mbox_bridge_data *mbox_data = &npu_dev->mbox_bridge_data;
+
+	NPU_DBG("Setup ipcc brige mbox\n");
+	/*
+	 * Find out the number of clients interested in this mailbox
+	 * and create channels accordingly.
+	 */
+	for_each_node_with_property(client_dn, "mboxes") {
+		if (!of_device_is_available(client_dn)) {
+			NPU_DBG("No node available\n");
+			continue;
+		}
+		i = of_count_phandle_with_args(client_dn,
+						"mboxes", "#mbox-cells");
+		for (j = 0; j < i; j++) {
+			ret = of_parse_phandle_with_args(client_dn, "mboxes",
+						"#mbox-cells", j, &curr_ph);
+			of_node_put(curr_ph.np);
+			if (!ret && curr_ph.np == controller_dn) {
+				NPU_DBG("Found a client\n");
+				num_chans++;
+				break;
+			}
+		}
+	}
+
+	/* If no clients are found, skip registering as a mbox controller */
+	if (!num_chans) {
+		NPU_WARN("Can't find ipcc bridge mbox client\n");
+		return 0;
+	}
+
+	mbox_data->chans = devm_kcalloc(dev, num_chans,
+					sizeof(struct mbox_chan), GFP_KERNEL);
+	if (!mbox_data->chans)
+		return -ENOMEM;
+
+	mbox_data->priv_data = npu_dev;
+	mbox = &mbox_data->mbox;
+	mbox->dev = dev;
+	mbox->num_chans = num_chans;
+	mbox->chans = mbox_data->chans;
+	mbox->ops = &ipcc_mbox_chan_ops;
+	mbox->of_xlate = npu_ipcc_bridge_mbox_xlate;
+	mbox->txdone_irq = false;
+	mbox->txdone_poll = false;
+
+	return mbox_controller_register(mbox);
+}
+
+static int npu_mbox_init(struct npu_device *npu_dev)
+{
+	struct platform_device *pdev = npu_dev->pdev;
+	struct npu_mbox *mbox = NULL;
+	struct property *prop;
+	const char *mbox_name;
+	uint32_t index = 0;
+	int ret = 0;
+	struct of_phandle_args curr_ph;
+
+	if (!of_get_property(pdev->dev.of_node, "mbox-names", NULL)  ||
+		!of_find_property(pdev->dev.of_node, "mboxes", NULL)) {
+		NPU_WARN("requires mbox-names and mboxes property\n");
+		return 0;
+	}
+
+	of_property_for_each_string(pdev->dev.of_node,
+		"mbox-names", prop, mbox_name) {
+		NPU_DBG("setup mbox[%d] %s\n", index, mbox_name);
+		mbox = &npu_dev->mbox[index];
+		mbox->client.dev = &pdev->dev;
+		mbox->client.knows_txdone = true;
+		mbox->chan = mbox_request_channel(&mbox->client, index);
+		if (IS_ERR(mbox->chan)) {
+			NPU_WARN("mailbox %s is not available\n", mbox_name);
+			mbox->chan = NULL;
+		} else if (!strcmp(mbox_name, "aop")) {
+			npu_dev->mbox_aop = mbox;
+		} else {
+			ret = of_parse_phandle_with_args(pdev->dev.of_node,
+				"mboxes", "#mbox-cells", index, &curr_ph);
+			of_node_put(curr_ph.np);
+			if (ret) {
+				NPU_WARN("can't get mailbox %s args\n",
+					mbox_name);
+			} else {
+				mbox->client_id = curr_ph.args[0];
+				mbox->signal_id = curr_ph.args[1];
+				NPU_DBG("argument for mailbox %x is %x %x\n",
+					mbox_name, curr_ph.args[0],
+					curr_ph.args[1]);
+			}
+		}
+		index++;
+	}
+
+	return npu_setup_ipcc_bridge_mbox(npu_dev);
+}
+
 static void npu_mbox_deinit(struct npu_device *npu_dev)
 {
-	if (npu_dev->mbox_aop.chan) {
-		mbox_free_channel(npu_dev->mbox_aop.chan);
-		npu_dev->mbox_aop.chan = NULL;
+	int i;
+
+	mbox_controller_unregister(&npu_dev->mbox_bridge_data.mbox);
+
+	for (i = 0; i < NPU_MAX_MBOX_NUM; i++) {
+		if (!npu_dev->mbox[i].chan)
+			continue;
+
+		mbox_free_channel(npu_dev->mbox[i].chan);
+		npu_dev->mbox[i].chan = NULL;
 	}
 }
 
+static int npu_hw_info_init(struct npu_device *npu_dev)
+{
+	int rc = 0;
+
+	npu_set_bw(npu_dev, 100, 100);
+	rc = npu_enable_core_power(npu_dev);
+	if (rc) {
+		NPU_ERR("Failed to enable power\n");
+		return rc;
+	}
+
+	npu_dev->hw_version = REGR(npu_dev, NPU_HW_VERSION);
+	NPU_DBG("NPU_HW_VERSION 0x%x\n", npu_dev->hw_version);
+	npu_disable_core_power(npu_dev);
+	npu_set_bw(npu_dev, 0, 0);
+
+	return rc;
+}
+
 /* -------------------------------------------------------------------------
  * Probe/Remove
  * -------------------------------------------------------------------------
@@ -1810,6 +2234,14 @@ static int npu_probe(struct platform_device *pdev)
 	if (rc)
 		goto error_get_dev_num;
 
+	rc = npu_parse_dt_bw(npu_dev);
+	if (rc)
+		NPU_WARN("Parse bw info failed\n");
+
+	rc = npu_hw_info_init(npu_dev);
+	if (rc)
+		goto error_get_dev_num;
+
 	rc = npu_pwrctrl_init(npu_dev);
 	if (rc)
 		goto error_get_dev_num;
@@ -1926,6 +2358,7 @@ static int npu_remove(struct platform_device *pdev)
 	unregister_chrdev_region(npu_dev->dev_num, 1);
 	platform_set_drvdata(pdev, NULL);
 	npu_mbox_deinit(npu_dev);
+	msm_bus_scale_unregister_client(npu_dev->bwctrl.bus_client);
 
 	g_npu_dev = NULL;
 
diff --git a/drivers/media/platform/msm/npu/npu_hw_access.c b/drivers/media/platform/msm/npu/npu_hw_access.c
index d915884..2f6e440 100644
--- a/drivers/media/platform/msm/npu/npu_hw_access.c
+++ b/drivers/media/platform/msm/npu/npu_hw_access.c
@@ -295,8 +295,6 @@ int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size,
 		goto map_end;
 	}
 
-	dma_sync_sg_for_device(&(npu_dev->pdev->dev), ion_buf->table->sgl,
-		ion_buf->table->nents, DMA_BIDIRECTIONAL);
 	ion_buf->iova = ion_buf->table->sgl->dma_address;
 	ion_buf->size = ion_buf->dma_buf->size;
 	*addr = ion_buf->iova;
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index c49ff0a..6f92a11 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -34,6 +34,8 @@
  */
 static void npu_ipc_irq_work(struct work_struct *work);
 static void npu_wdg_err_irq_work(struct work_struct *work);
+static void npu_bridge_mbox_work(struct work_struct *work);
+static void npu_disable_fw_work(struct work_struct *work);
 static void turn_off_fw_logging(struct npu_device *npu_dev);
 static int wait_for_status_ready(struct npu_device *npu_dev,
 	uint32_t status_reg, uint32_t status_bits);
@@ -53,21 +55,21 @@ static void host_session_msg_hdlr(struct npu_device *npu_dev);
 static void host_session_log_hdlr(struct npu_device *npu_dev);
 static int host_error_hdlr(struct npu_device *npu_dev, bool force);
 static int npu_send_network_cmd(struct npu_device *npu_dev,
-	struct npu_network *network, void *cmd_ptr, bool async);
+	struct npu_network *network, void *cmd_ptr, bool async, bool force);
 static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx,
 	void *cmd_ptr);
 static int npu_queue_event(struct npu_client *client, struct npu_kevent *evt);
 static int npu_notify_aop(struct npu_device *npu_dev, bool on);
 static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
 	uint32_t pwr_level, bool post);
-static int load_fw_nolock(struct npu_device *npu_dev);
+static int load_fw_nolock(struct npu_device *npu_dev, bool enable);
 static void disable_fw_nolock(struct npu_device *npu_dev);
 
 /* -------------------------------------------------------------------------
  * Function Definitions - Init / Deinit
  * -------------------------------------------------------------------------
  */
-static int load_fw_nolock(struct npu_device *npu_dev)
+static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	int ret = 0;
@@ -77,6 +79,12 @@ static int load_fw_nolock(struct npu_device *npu_dev)
 		return 0;
 	}
 
+	ret = npu_set_bw(npu_dev, 100, 100);
+	if (ret) {
+		NPU_ERR("Vote bandwidth failed\n");
+		return ret;
+	}
+
 	/* Boot the NPU subsystem */
 	host_ctx->subsystem_handle = subsystem_get_local("npu");
 	if (IS_ERR_OR_NULL(host_ctx->subsystem_handle)) {
@@ -98,6 +106,15 @@ static int load_fw_nolock(struct npu_device *npu_dev)
 	NPU_DBG("firmware init complete\n");
 
 	host_ctx->fw_state = FW_ENABLED;
+	if (enable) {
+		ret = npu_notify_fw_pwr_state(npu_dev,
+			npu_dev->pwrctrl.active_pwrlevel, true);
+		if (ret) {
+			NPU_ERR("notify fw pwr on failed\n");
+			goto load_fw_fail;
+		}
+		return ret;
+	}
 
 	reinit_completion(&host_ctx->fw_shutdown_done);
 	ret = npu_notify_fw_pwr_state(npu_dev, NPU_PWRLEVEL_OFF, false);
@@ -119,33 +136,61 @@ static int load_fw_nolock(struct npu_device *npu_dev)
 	npu_disable_irq(npu_dev);
 	npu_disable_sys_cache(npu_dev);
 	npu_disable_core_power(npu_dev);
+	npu_notify_aop(npu_dev, false);
 	if (!ret) {
 		host_ctx->fw_state = FW_LOADED;
 	} else {
-		if (!IS_ERR_OR_NULL(host_ctx->subsystem_handle))
+		if (!IS_ERR_OR_NULL(host_ctx->subsystem_handle)) {
 			subsystem_put_local(host_ctx->subsystem_handle);
+			npu_set_bw(npu_dev, 0, 0);
+		}
 		host_ctx->fw_state = FW_UNLOADED;
 	}
 
 	return ret;
 }
 
+static void npu_load_fw_work(struct work_struct *work)
+{
+	int ret;
+	struct npu_host_ctx *host_ctx;
+	struct npu_device *npu_dev;
+
+	host_ctx = container_of(work, struct npu_host_ctx, load_fw_work);
+	npu_dev = container_of(host_ctx, struct npu_device, host_ctx);
+
+	mutex_lock(&host_ctx->lock);
+	ret = load_fw_nolock(npu_dev, false);
+	mutex_unlock(&host_ctx->lock);
+
+	if (ret)
+		NPU_ERR("load fw failed %d\n", ret);
+}
+
 int load_fw(struct npu_device *npu_dev)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
-	int ret = 0;
 
-	mutex_lock(&host_ctx->lock);
-	ret = load_fw_nolock(npu_dev);
-	mutex_unlock(&host_ctx->lock);
+	if (host_ctx->auto_pil_disable) {
+		NPU_WARN("auto pil is disabled\n");
+		return -EINVAL;
+	}
 
-	return ret;
+	if (host_ctx->wq)
+		queue_work(host_ctx->wq, &host_ctx->load_fw_work);
+
+	return 0;
 }
 
 int unload_fw(struct npu_device *npu_dev)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
+	if (host_ctx->auto_pil_disable) {
+		NPU_WARN("auto pil is disabled\n");
+		return 0;
+	}
+
 	mutex_lock(&host_ctx->lock);
 	if (host_ctx->fw_state == FW_UNLOADED) {
 		NPU_INFO("fw is unloaded already\n");
@@ -158,6 +203,7 @@ int unload_fw(struct npu_device *npu_dev)
 	}
 
 	subsystem_put_local(host_ctx->subsystem_handle);
+	npu_set_bw(npu_dev, 0, 0);
 	host_ctx->fw_state = FW_UNLOADED;
 	NPU_DBG("fw is unloaded\n");
 	mutex_unlock(&host_ctx->lock);
@@ -165,26 +211,30 @@ int unload_fw(struct npu_device *npu_dev)
 	return 0;
 }
 
-int enable_fw(struct npu_device *npu_dev)
+static int enable_fw_nolock(struct npu_device *npu_dev)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	int ret = 0;
 
-	mutex_lock(&host_ctx->lock);
-
 	if (host_ctx->fw_state == FW_UNLOADED) {
-		ret = load_fw_nolock(npu_dev);
+		ret = load_fw_nolock(npu_dev,
+			host_ctx->auto_pil_disable ? true : false);
 		if (ret) {
 			NPU_ERR("load fw failed\n");
-			mutex_unlock(&host_ctx->lock);
 			return ret;
 		}
+
+		if (host_ctx->auto_pil_disable) {
+			host_ctx->fw_error = false;
+			host_ctx->fw_ref_cnt++;
+			mutex_unlock(&host_ctx->lock);
+			goto enable_log;
+		}
 	}
 
 	if (host_ctx->fw_state == FW_ENABLED) {
 		host_ctx->fw_ref_cnt++;
 		NPU_DBG("fw_ref_cnt %d\n", host_ctx->fw_ref_cnt);
-		mutex_unlock(&host_ctx->lock);
 		return 0;
 	}
 
@@ -212,9 +262,8 @@ int enable_fw(struct npu_device *npu_dev)
 		goto enable_irq_fail;
 	}
 
+	/* set fw_state to FW_ENABLED before send IPC command */
 	host_ctx->fw_state = FW_ENABLED;
-	host_ctx->fw_error = false;
-	host_ctx->fw_ref_cnt++;
 
 	NPU_DBG("NPU powers up\n");
 
@@ -238,8 +287,11 @@ int enable_fw(struct npu_device *npu_dev)
 		ret = 0;
 	}
 
-	mutex_unlock(&host_ctx->lock);
+	host_ctx->fw_error = false;
+	host_ctx->fw_ref_cnt++;
 
+
+enable_log:
 	/* Set logging state */
 	if (!npu_hw_log_enabled()) {
 		NPU_DBG("fw logging disabled\n");
@@ -249,14 +301,25 @@ int enable_fw(struct npu_device *npu_dev)
 	return ret;
 
 notify_fw_pwr_fail:
+	host_ctx->fw_state = FW_LOADED;
 	npu_disable_irq(npu_dev);
 enable_irq_fail:
 	npu_disable_sys_cache(npu_dev);
 enable_sys_cache_fail:
 	npu_disable_core_power(npu_dev);
 enable_pw_fail:
-	host_ctx->fw_state = FW_LOADED;
+	return ret;
+}
+
+int enable_fw(struct npu_device *npu_dev)
+{
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	int ret;
+
+	mutex_lock(&host_ctx->lock);
+	ret = enable_fw_nolock(npu_dev);
 	mutex_unlock(&host_ctx->lock);
+
 	return ret;
 }
 
@@ -288,7 +351,8 @@ static void disable_fw_nolock(struct npu_device *npu_dev)
 		msleep(500);
 	}
 
-	if (!wait_for_completion_interruptible_timeout(
+	if (!host_ctx->auto_pil_disable
+		&& !wait_for_completion_interruptible_timeout(
 		&host_ctx->fw_shutdown_done, NW_CMD_TIMEOUT))
 		NPU_ERR("Wait for fw shutdown timedout\n");
 
@@ -300,6 +364,13 @@ static void disable_fw_nolock(struct npu_device *npu_dev)
 	NPU_DBG("firmware is disabled\n");
 	npu_notify_aop(npu_dev, false);
 	complete(&host_ctx->fw_deinit_done);
+
+	if (host_ctx->auto_pil_disable) {
+		subsystem_put_local(host_ctx->subsystem_handle);
+		host_ctx->fw_state = FW_UNLOADED;
+		NPU_DBG("fw is unloaded\n");
+		npu_set_bw(npu_dev, 0, 0);
+	}
 }
 
 void disable_fw(struct npu_device *npu_dev)
@@ -318,19 +389,8 @@ static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	struct ipc_cmd_notify_pwr_pkt pwr_notify_pkt;
 	int ret = 0;
-	bool shutdown = false, bringup = false;
 	uint32_t reg_val;
 
-	if (post && (pwr_level != NPU_PWRLEVEL_OFF)) {
-		NPU_DBG("Notify fw BRINGUP\n");
-		bringup = true;
-	}
-
-	if (!post && (pwr_level == NPU_PWRLEVEL_OFF)) {
-		NPU_DBG("Notify fw SHUTDOWN\n");
-		shutdown = true;
-	}
-
 	/* Clear PWR_NOTIFY bits before sending cmd */
 	reg_val = REGR(npu_dev, REG_NPU_FW_CTRL_STATUS);
 	reg_val &=  ~(FW_CTRL_STATUS_PWR_NOTIFY_ERR_VAL|
@@ -338,6 +398,12 @@ static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
 	REGW(npu_dev, REG_NPU_FW_CTRL_STATUS, reg_val);
 	REGR(npu_dev, REG_NPU_FW_CTRL_STATUS);
 
+	if (pwr_level == NPU_PWRLEVEL_OFF)
+		NPU_DBG("Notify fw power off\n");
+	else
+		NPU_DBG("Notify fw power level %d [%s]", pwr_level,
+			post ? "post" : "pre");
+
 	/* send IPC command to FW */
 	pwr_notify_pkt.header.cmd_type = NPU_IPC_CMD_NOTIFY_PWR;
 	pwr_notify_pkt.header.size = sizeof(struct ipc_cmd_notify_pwr_pkt);
@@ -395,6 +461,7 @@ static int npu_notifier_cb(struct notifier_block *this, unsigned long code,
 		 * It will be called during initial load fw
 		 * or subsyste restart
 		 */
+		npu_notify_aop(npu_dev, true);
 		ret = npu_enable_core_power(npu_dev);
 		if (ret) {
 			NPU_WARN("Enable core power failed\n");
@@ -407,7 +474,7 @@ static int npu_notifier_cb(struct notifier_block *this, unsigned long code,
 			break;
 		}
 
-		npu_cc_reg_write(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL, 0);
+		npu_cc_reg_write(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL, 3);
 
 		/* Clear control/status registers */
 		REGW(npu_dev, REG_NPU_FW_CTRL_STATUS, 0x0);
@@ -445,10 +512,11 @@ static int npu_notifier_cb(struct notifier_block *this, unsigned long code,
 		/* Prepare for unloading fw via PIL */
 		if (host_ctx->fw_state == FW_ENABLED) {
 			/* only happens during subsystem_restart */
+			host_ctx->fw_state = FW_UNLOADED;
 			npu_disable_irq(npu_dev);
 			npu_disable_sys_cache(npu_dev);
 			npu_disable_core_power(npu_dev);
-			host_ctx->fw_state = FW_LOADED;
+			npu_notify_aop(npu_dev, false);
 		}
 		break;
 	}
@@ -473,6 +541,7 @@ int npu_host_init(struct npu_device *npu_dev)
 	init_completion(&host_ctx->fw_bringup_done);
 	init_completion(&host_ctx->fw_shutdown_done);
 	mutex_init(&host_ctx->lock);
+	spin_lock_init(&host_ctx->bridge_mbox_lock);
 	atomic_set(&host_ctx->ipc_trans_id, 1);
 
 	host_ctx->npu_dev = npu_dev;
@@ -491,8 +560,17 @@ int npu_host_init(struct npu_device *npu_dev)
 	} else {
 		INIT_WORK(&host_ctx->ipc_irq_work, npu_ipc_irq_work);
 		INIT_WORK(&host_ctx->wdg_err_irq_work, npu_wdg_err_irq_work);
+		INIT_WORK(&host_ctx->bridge_mbox_work, npu_bridge_mbox_work);
+		INIT_WORK(&host_ctx->load_fw_work, npu_load_fw_work);
+		INIT_DELAYED_WORK(&host_ctx->disable_fw_work,
+			npu_disable_fw_work);
 	}
 
+	if (npu_dev->hw_version != 0x20000000)
+		host_ctx->auto_pil_disable = true;
+	else
+		host_ctx->auto_pil_disable = false;
+
 	return sts;
 }
 
@@ -720,6 +798,79 @@ static void npu_wdg_err_irq_work(struct work_struct *work)
 	host_error_hdlr(npu_dev, false);
 }
 
+static void npu_disable_fw_work(struct work_struct *work)
+{
+	struct npu_host_ctx *host_ctx;
+	struct npu_device *npu_dev;
+
+	NPU_DBG("Enter disable fw work\n");
+	host_ctx = container_of(work, struct npu_host_ctx,
+		disable_fw_work.work);
+	npu_dev = container_of(host_ctx, struct npu_device, host_ctx);
+
+	mutex_lock(&host_ctx->lock);
+	disable_fw_nolock(npu_dev);
+	host_ctx->bridge_mbox_pwr_on = false;
+	mutex_unlock(&host_ctx->lock);
+	NPU_DBG("Exit disable fw work\n");
+}
+
+static int npu_bridge_mbox_send_data(struct npu_host_ctx *host_ctx,
+	struct npu_mbox *mbox, void *data)
+{
+	NPU_DBG("Generating IRQ for client_id: %u; signal_id: %u\n",
+		mbox->client_id, mbox->signal_id);
+	mbox_send_message(mbox->chan, NULL);
+	mbox_client_txdone(mbox->chan, 0);
+	mbox->send_data_pending = false;
+
+	return 0;
+}
+
+static void npu_bridge_mbox_work(struct work_struct *work)
+{
+	int i, ret;
+	struct npu_host_ctx *host_ctx;
+	struct npu_device *npu_dev;
+	unsigned long flags;
+
+	NPU_DBG("Enter bridge mbox work\n");
+	host_ctx = container_of(work, struct npu_host_ctx, bridge_mbox_work);
+	npu_dev = container_of(host_ctx, struct npu_device, host_ctx);
+
+	mutex_lock(&host_ctx->lock);
+	if (host_ctx->fw_state == FW_UNLOADED) {
+		NPU_WARN("NPU fw is not loaded\n");
+		mutex_unlock(&host_ctx->lock);
+		return;
+	}
+
+	/* queue or modify delayed work to disable fw */
+	mod_delayed_work(host_ctx->wq, &host_ctx->disable_fw_work,
+		NPU_MBOX_IDLE_TIMEOUT);
+
+	if (!host_ctx->bridge_mbox_pwr_on) {
+		ret = enable_fw_nolock(npu_dev);
+		if (ret) {
+			mutex_unlock(&host_ctx->lock);
+			NPU_ERR("Enable fw failed\n");
+			return;
+		}
+		host_ctx->bridge_mbox_pwr_on = true;
+		NPU_DBG("Fw is enabled by mbox\n");
+	}
+
+	spin_lock_irqsave(&host_ctx->bridge_mbox_lock, flags);
+	for (i = 0; i < NPU_MAX_MBOX_NUM; i++)
+		if (npu_dev->mbox[i].send_data_pending)
+			npu_bridge_mbox_send_data(host_ctx,
+				&npu_dev->mbox[i], NULL);
+
+	spin_unlock_irqrestore(&host_ctx->bridge_mbox_lock, flags);
+	mutex_unlock(&host_ctx->lock);
+	NPU_DBG("Exit bridge mbox work\n");
+}
+
 static void turn_off_fw_logging(struct npu_device *npu_dev)
 {
 	struct ipc_cmd_log_state_pkt log_packet;
@@ -780,7 +931,7 @@ static int npu_notify_aop(struct npu_device *npu_dev, bool on)
 	struct qmp_pkt pkt;
 	int buf_size, rc = 0;
 
-	if (!npu_dev->mbox_aop.chan) {
+	if (!npu_dev->mbox_aop || !npu_dev->mbox_aop->chan) {
 		NPU_WARN("aop mailbox channel is not available\n");
 		return 0;
 	}
@@ -797,7 +948,7 @@ static int npu_notify_aop(struct npu_device *npu_dev, bool on)
 	pkt.size = (buf_size + 3) & ~0x3;
 	pkt.data = buf;
 
-	rc = mbox_send_message(npu_dev->mbox_aop.chan, &pkt);
+	rc = mbox_send_message(npu_dev->mbox_aop->chan, &pkt);
 	if (rc < 0)
 		NPU_ERR("qmp message send failed, ret=%d\n", rc);
 
@@ -1249,8 +1400,16 @@ int32_t npu_host_get_info(struct npu_device *npu_dev,
 int32_t npu_host_map_buf(struct npu_client *client,
 			struct msm_npu_map_buf_ioctl *map_ioctl)
 {
-	return npu_mem_map(client, map_ioctl->buf_ion_hdl, map_ioctl->size,
+	struct npu_device *npu_dev = client->npu_dev;
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	int ret;
+
+	mutex_lock(&host_ctx->lock);
+	ret = npu_mem_map(client, map_ioctl->buf_ion_hdl, map_ioctl->size,
 		&map_ioctl->npu_phys_addr);
+	mutex_unlock(&host_ctx->lock);
+
+	return ret;
 }
 
 int32_t npu_host_unmap_buf(struct npu_client *client,
@@ -1268,13 +1427,16 @@ int32_t npu_host_unmap_buf(struct npu_client *client,
 		&host_ctx->fw_deinit_done, NW_CMD_TIMEOUT))
 		NPU_WARN("npu: wait for fw_deinit_done time out\n");
 
+	mutex_lock(&host_ctx->lock);
 	npu_mem_unmap(client, unmap_ioctl->buf_ion_hdl,
 		unmap_ioctl->npu_phys_addr);
+	mutex_unlock(&host_ctx->lock);
+
 	return 0;
 }
 
 static int npu_send_network_cmd(struct npu_device *npu_dev,
-	struct npu_network *network, void *cmd_ptr, bool async)
+	struct npu_network *network, void *cmd_ptr, bool async, bool force)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	int ret = 0;
@@ -1285,7 +1447,7 @@ static int npu_send_network_cmd(struct npu_device *npu_dev,
 		(host_ctx->fw_state != FW_ENABLED)) {
 		NPU_ERR("fw is in error state or disabled\n");
 		ret = -EIO;
-	} else if (network->cmd_pending) {
+	} else if (network->cmd_pending && !force) {
 		NPU_ERR("Another cmd is pending\n");
 		ret = -EBUSY;
 	} else {
@@ -1457,7 +1619,7 @@ int32_t npu_host_load_network(struct npu_client *client,
 	load_packet.header.size = sizeof(struct ipc_cmd_load_pkt);
 	load_packet.header.trans_id =
 		atomic_add_return(1, &host_ctx->ipc_trans_id);
-	load_packet.header.flags = 0;
+	load_packet.header.flags = load_ioctl->flags;
 
 	/* ACO Buffer. Use the npu mapped aco address */
 	load_packet.buf_pkt.address = (uint64_t)network->phy_add;
@@ -1466,7 +1628,8 @@ int32_t npu_host_load_network(struct npu_client *client,
 
 	/* NPU_IPC_CMD_LOAD will go onto IPC_QUEUE_APPS_EXEC */
 	reinit_completion(&network->cmd_done);
-	ret = npu_send_network_cmd(npu_dev, network, &load_packet, false);
+	ret = npu_send_network_cmd(npu_dev, network, &load_packet, false,
+		false);
 	if (ret) {
 		NPU_ERR("NPU_IPC_CMD_LOAD sent failed: %d\n", ret);
 		goto error_free_network;
@@ -1523,6 +1686,7 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 	struct npu_device *npu_dev = client->npu_dev;
 	struct npu_network *network;
 	struct ipc_cmd_load_pkt_v2 *load_packet = NULL;
+	struct ipc_cmd_unload_pkt unload_packet;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	uint32_t num_patch_params, pkt_size;
 
@@ -1579,7 +1743,7 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 	load_packet->header.size = pkt_size;
 	load_packet->header.trans_id =
 		atomic_add_return(1, &host_ctx->ipc_trans_id);
-	load_packet->header.flags = 0;
+	load_packet->header.flags = load_ioctl->flags;
 
 	/* ACO Buffer. Use the npu mapped aco address */
 	load_packet->buf_pkt.address = (uint32_t)network->phy_add;
@@ -1590,7 +1754,7 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 
 	/* NPU_IPC_CMD_LOAD_V2 will go onto IPC_QUEUE_APPS_EXEC */
 	reinit_completion(&network->cmd_done);
-	ret = npu_send_network_cmd(npu_dev, network, load_packet, false);
+	ret = npu_send_network_cmd(npu_dev, network, load_packet, false, false);
 	if (ret) {
 		NPU_DBG("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
 		goto error_free_network;
@@ -1598,7 +1762,7 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 
 	mutex_unlock(&host_ctx->lock);
 
-	ret = wait_for_completion_interruptible_timeout(
+	ret = wait_for_completion_timeout(
 		&network->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
@@ -1608,10 +1772,7 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 	if (!ret) {
 		NPU_ERR("npu: NPU_IPC_CMD_LOAD time out\n");
 		ret = -ETIMEDOUT;
-		goto error_free_network;
-	} else if (ret < 0) {
-		NPU_ERR("NPU_IPC_CMD_LOAD_V2 is interrupted by signal\n");
-		goto error_free_network;
+		goto error_load_network;
 	}
 
 	if (network->fw_error) {
@@ -1632,6 +1793,18 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 
 	return ret;
 
+error_load_network:
+	NPU_DBG("Unload network %lld\n", network->id);
+	/* send NPU_IPC_CMD_UNLOAD command to fw */
+	unload_packet.header.cmd_type = NPU_IPC_CMD_UNLOAD;
+	unload_packet.header.size = sizeof(struct ipc_cmd_unload_pkt);
+	unload_packet.header.trans_id =
+		atomic_add_return(1, &host_ctx->ipc_trans_id);
+	unload_packet.header.flags = 0;
+	unload_packet.network_hdl = (uint32_t)network->network_hdl;
+	npu_send_network_cmd(npu_dev, network, &unload_packet, false, true);
+	/* wait 200 ms to make sure fw has processed this command */
+	msleep(200);
 error_free_network:
 	kfree(load_packet);
 	network_put(network);
@@ -1683,7 +1856,8 @@ int32_t npu_host_unload_network(struct npu_client *client,
 
 	/* NPU_IPC_CMD_UNLOAD will go onto IPC_QUEUE_APPS_EXEC */
 	reinit_completion(&network->cmd_done);
-	ret = npu_send_network_cmd(npu_dev, network, &unload_packet, false);
+	ret = npu_send_network_cmd(npu_dev, network, &unload_packet, false,
+		false);
 
 	if (ret) {
 		NPU_ERR("NPU_IPC_CMD_UNLOAD sent failed: %d\n", ret);
@@ -1702,7 +1876,7 @@ int32_t npu_host_unload_network(struct npu_client *client,
 
 	mutex_unlock(&host_ctx->lock);
 
-	ret = wait_for_completion_interruptible_timeout(
+	ret = wait_for_completion_timeout(
 		&network->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
@@ -1714,10 +1888,6 @@ int32_t npu_host_unload_network(struct npu_client *client,
 		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
 		goto free_network;
-	} else if (ret < 0) {
-		NPU_ERR("Wait for unload done interrupted by signal\n");
-		network->cmd_pending = false;
-		goto free_network;
 	}
 
 	if (network->fw_error) {
@@ -1817,7 +1987,8 @@ int32_t npu_host_exec_network(struct npu_client *client,
 
 	/* Send it on the high priority queue */
 	reinit_completion(&network->cmd_done);
-	ret = npu_send_network_cmd(npu_dev, network, &exec_packet, async_ioctl);
+	ret = npu_send_network_cmd(npu_dev, network, &exec_packet, async_ioctl,
+		false);
 
 	if (ret) {
 		NPU_ERR("NPU_IPC_CMD_EXECUTE sent failed: %d\n", ret);
@@ -1831,7 +2002,7 @@ int32_t npu_host_exec_network(struct npu_client *client,
 
 	mutex_unlock(&host_ctx->lock);
 
-	ret = wait_for_completion_interruptible_timeout(
+	ret = wait_for_completion_timeout(
 		&network->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
@@ -1844,10 +2015,6 @@ int32_t npu_host_exec_network(struct npu_client *client,
 		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
 		goto exec_done;
-	} else if (ret == -ERESTARTSYS) {
-		NPU_ERR("Wait for execution done interrupted by signal\n");
-		network->cmd_pending = false;
-		goto exec_done;
 	}
 
 	if (network->fw_error) {
@@ -1863,10 +2030,10 @@ int32_t npu_host_exec_network(struct npu_client *client,
 	mutex_unlock(&host_ctx->lock);
 
 	/*
-	 * treat network execution timed our or interrupted by signal
-	 * as error in order to force npu fw to stop execution
+	 * treat network execution timed out as error in order to
+	 * force npu fw to stop execution
 	 */
-	if ((ret == -ETIMEDOUT) || (ret == -ERESTARTSYS)) {
+	if (ret == -ETIMEDOUT) {
 		NPU_ERR("Error handling after execution failure\n");
 		host_error_hdlr(npu_dev, true);
 	}
@@ -1954,7 +2121,8 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 
 	/* Send it on the high priority queue */
 	reinit_completion(&network->cmd_done);
-	ret = npu_send_network_cmd(npu_dev, network, exec_packet, async_ioctl);
+	ret = npu_send_network_cmd(npu_dev, network, exec_packet, async_ioctl,
+		false);
 
 	if (ret) {
 		NPU_ERR("NPU_IPC_CMD_EXECUTE_V2 sent failed: %d\n", ret);
@@ -1968,7 +2136,7 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 
 	mutex_unlock(&host_ctx->lock);
 
-	ret = wait_for_completion_interruptible_timeout(
+	ret = wait_for_completion_timeout(
 		&network->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
@@ -1981,10 +2149,6 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
 		goto free_exec_packet;
-	} else if (ret == -ERESTARTSYS) {
-		NPU_ERR("Wait for execution_v2 done interrupted by signal\n");
-		network->cmd_pending = false;
-		goto free_exec_packet;
 	}
 
 	if (network->fw_error) {
@@ -2014,10 +2178,10 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 	mutex_unlock(&host_ctx->lock);
 
 	/*
-	 * treat network execution timed our or interrupted by signal
-	 * as error in order to force npu fw to stop execution
+	 * treat network execution timed out as error in order to
+	 * force npu fw to stop execution
 	 */
-	if ((ret == -ETIMEDOUT) || (ret == -ERESTARTSYS)) {
+	if (ret == -ETIMEDOUT) {
 		NPU_ERR("Error handling after execution failure\n");
 		host_error_hdlr(npu_dev, true);
 	}
diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h
index b293f87..ecf7fc4 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.h
+++ b/drivers/media/platform/msm/npu/npu_mgr.h
@@ -22,6 +22,8 @@
 #define NW_CMD_TIMEOUT msecs_to_jiffies(NW_CMD_TIMEOUT_MS)
 #define NW_DEBUG_TIMEOUT_MS (1000 * 60 * 30) /* set for 30 minutes */
 #define NW_DEBUG_TIMEOUT msecs_to_jiffies(NW_DEBUG_TIMEOUT_MS)
+#define NPU_MBOX_IDLE_TIMEOUT_MS 500 /* set for 500ms */
+#define NPU_MBOX_IDLE_TIMEOUT msecs_to_jiffies(NPU_MBOX_IDLE_TIMEOUT_MS)
 #define FIRMWARE_VERSION 0x00001000
 #define MAX_LOADED_NETWORK 32
 #define NPU_IPC_BUF_LENGTH 512
@@ -74,6 +76,9 @@ struct npu_host_ctx {
 	int32_t power_vote_num;
 	struct work_struct ipc_irq_work;
 	struct work_struct wdg_err_irq_work;
+	struct work_struct bridge_mbox_work;
+	struct work_struct load_fw_work;
+	struct delayed_work disable_fw_work;
 	struct workqueue_struct *wq;
 	struct completion misc_cmd_done;
 	struct completion fw_deinit_done;
@@ -82,6 +87,7 @@ struct npu_host_ctx {
 	int32_t network_num;
 	struct npu_network networks[MAX_LOADED_NETWORK];
 	bool sys_cache_disable;
+	bool auto_pil_disable;
 	uint32_t fw_dbg_mode;
 	uint32_t exec_flags_override;
 	atomic_t ipc_trans_id;
@@ -95,6 +101,8 @@ struct npu_host_ctx {
 	uint32_t misc_cmd_result;
 	struct notifier_block nb;
 	void *notif_hdle;
+	spinlock_t bridge_mbox_lock;
+	bool bridge_mbox_pwr_on;
 };
 
 struct npu_device;
diff --git a/drivers/media/platform/msm/sde/Kconfig b/drivers/media/platform/msm/sde/Kconfig
deleted file mode 100644
index 6529b48..0000000
--- a/drivers/media/platform/msm/sde/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-config MSM_SDE_ROTATOR
-	bool "QTI V4L2 based SDE Rotator"
-	depends on ARCH_QCOM && VIDEO_V4L2
-	select V4L2_MEM2MEM_DEV
-	select VIDEOBUF2_CORE
-	select SYNC_FILE
-	help
-	  Enable support of V4L2 rotator driver.
-	  This feature enables the MSM SDE rotator v4l2
-	  video driver for Qualcomm Technologies, Inc.
-	  SYNC_FILE objects are used to help manage buffer
-	  synchronization.
-
-config MSM_SDE_ROTATOR_EVTLOG_DEBUG
-	depends on MSM_SDE_ROTATOR
-	bool "Enable sde rotator debugging"
-	help
-	The SDE rotator debugging provides support to enable rotator debugging
-	features to: Dump rotator registers during driver errors, panic
-	driver during fatal errors and enable some rotator driver logging
-	into an internal buffer (this avoids logging overhead).
diff --git a/drivers/media/platform/msm/sde/Makefile b/drivers/media/platform/msm/sde/Makefile
deleted file mode 100644
index cedb164..0000000
--- a/drivers/media/platform/msm/sde/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-obj-$(CONFIG_MSM_SDE_ROTATOR)	    += rotator/
diff --git a/drivers/media/platform/msm/sde/rotator/Makefile b/drivers/media/platform/msm/sde/rotator/Makefile
deleted file mode 100644
index bae5a67b..0000000
--- a/drivers/media/platform/msm/sde/rotator/Makefile
+++ /dev/null
@@ -1,29 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-ccflags-y += -I$(src) -Idrivers/staging/android
-
-obj-y := \
-		sde_rotator_dev.o \
-		sde_rotator_core.o \
-		sde_rotator_base.o \
-		sde_rotator_formats.o \
-		sde_rotator_util.o \
-		sde_rotator_io_util.o \
-		sde_rotator_smmu.o
-
-obj-y += \
-		sde_rotator_r1_wb.o \
-		sde_rotator_r1_pipe.o \
-		sde_rotator_r1_ctl.o \
-		sde_rotator_r1.o
-
-obj-y += \
-		sde_rotator_r3.o
-
-obj-$(CONFIG_SYNC_FILE) += \
-		sde_rotator_sync.o
-
-obj-$(CONFIG_DEBUG_FS) += \
-		sde_rotator_debug.o \
-		sde_rotator_r1_debug.o \
-		sde_rotator_r3_debug.o
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
deleted file mode 100644
index 17416a1..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
+++ /dev/null
@@ -1,892 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012, 2015-2019, The Linux Foundation. All rights reserved.
- */
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/errno.h>
-#include <linux/file.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/major.h>
-#include <linux/debugfs.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
-#include <linux/regulator/consumer.h>
-
-#define CREATE_TRACE_POINTS
-#include "sde_rotator_base.h"
-#include "sde_rotator_util.h"
-#include "sde_rotator_trace.h"
-#include "sde_rotator_debug.h"
-#include "sde_rotator_dev.h"
-
-static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
-{
-	u64 result = (val * (u64)numer);
-
-	do_div(result, denom);
-	return result;
-}
-
-static inline u64 apply_fudge_factor(u64 val,
-	struct sde_mult_factor *factor)
-{
-	return fudge_factor(val, factor->numer, factor->denom);
-}
-
-static inline u64 apply_inverse_fudge_factor(u64 val,
-	struct sde_mult_factor *factor)
-{
-	return fudge_factor(val, factor->denom, factor->numer);
-}
-
-static inline bool validate_comp_ratio(struct sde_mult_factor *factor)
-{
-	return factor->numer && factor->denom;
-}
-
-u32 sde_apply_comp_ratio_factor(u32 quota,
-	struct sde_mdp_format_params *fmt,
-	struct sde_mult_factor *factor)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-
-	if (!mdata || !test_bit(SDE_QOS_OVERHEAD_FACTOR,
-		      mdata->sde_qos_map))
-		return quota;
-
-	/* apply compression ratio, only for compressed formats */
-	if (sde_mdp_is_ubwc_format(fmt) &&
-	    validate_comp_ratio(factor))
-		quota = apply_inverse_fudge_factor(quota, factor);
-
-	return quota;
-}
-
-#define RES_1080p		(1088*1920)
-#define RES_UHD		(3840*2160)
-#define XIN_HALT_TIMEOUT_US	0x4000
-
-static int sde_mdp_wait_for_xin_halt(u32 xin_id)
-{
-	void __iomem *vbif_base;
-	u32 status;
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	u32 idle_mask = BIT(xin_id);
-	int rc;
-
-	vbif_base = mdata->vbif_nrt_io.base;
-
-	rc = readl_poll_timeout(vbif_base + MMSS_VBIF_XIN_HALT_CTRL1,
-		status, (status & idle_mask),
-		1000, XIN_HALT_TIMEOUT_US);
-	if (rc == -ETIMEDOUT) {
-		SDEROT_ERR("VBIF client %d not halting. TIMEDOUT.\n",
-			xin_id);
-	} else {
-		SDEROT_DBG("VBIF client %d is halted\n", xin_id);
-	}
-
-	return rc;
-}
-
-/**
- * force_on_xin_clk() - enable/disable the force-on for the pipe clock
- * @bit_off: offset of the bit to enable/disable the force-on.
- * @reg_off: register offset for the clock control.
- * @enable: boolean to indicate if the force-on of the clock needs to be
- * enabled or disabled.
- *
- * This function returns:
- * true - if the clock is forced-on by this function
- * false - if the clock was already forced on
- * It is the caller responsibility to check if this function is forcing
- * the clock on; if so, it will need to remove the force of the clock,
- * otherwise it should avoid to remove the force-on.
- * Clocks must be on when calling this function.
- */
-static bool force_on_xin_clk(u32 bit_off, u32 clk_ctl_reg_off, bool enable)
-{
-	u32 val;
-	u32 force_on_mask;
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	bool clk_forced_on = false;
-
-	force_on_mask = BIT(bit_off);
-	val = readl_relaxed(mdata->mdp_base + clk_ctl_reg_off);
-
-	clk_forced_on = !(force_on_mask & val);
-
-	if (enable)
-		val |= force_on_mask;
-	else
-		val &= ~force_on_mask;
-
-	writel_relaxed(val, mdata->mdp_base + clk_ctl_reg_off);
-
-	return clk_forced_on;
-}
-
-void sde_mdp_halt_vbif_xin(struct sde_mdp_vbif_halt_params *params)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	u32 reg_val;
-	bool forced_on;
-
-	if (!mdata || !params || !params->reg_off_mdp_clk_ctrl) {
-		SDEROT_ERR("null input parameter\n");
-		return;
-	}
-
-	if (params->xin_id > MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1) {
-		SDEROT_ERR("xin_id:%d exceed max limit\n", params->xin_id);
-		return;
-	}
-
-	forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
-		params->reg_off_mdp_clk_ctrl, true);
-
-	SDEROT_EVTLOG(forced_on, params->xin_id);
-
-	reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
-	SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
-		reg_val | BIT(params->xin_id));
-
-	/* this is a polling operation */
-	sde_mdp_wait_for_xin_halt(params->xin_id);
-
-	reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
-	SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
-		reg_val & ~BIT(params->xin_id));
-
-	if (forced_on)
-		force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
-			params->reg_off_mdp_clk_ctrl, false);
-}
-
-u32 sde_mdp_get_ot_limit(u32 width, u32 height, u32 pixfmt, u32 fps, u32 is_rd)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_mdp_format_params *fmt;
-	u32 ot_lim;
-	u32 is_yuv;
-	u64 res;
-
-	ot_lim = (is_rd) ? mdata->default_ot_rd_limit :
-				mdata->default_ot_wr_limit;
-
-	/*
-	 * If default ot is not set from dt,
-	 * then do not configure it.
-	 */
-	if (ot_lim == 0)
-		goto exit;
-
-	/* Modify the limits if the target and the use case requires it */
-	if (false == test_bit(SDE_QOS_OTLIM, mdata->sde_qos_map))
-		goto exit;
-
-	width = min_t(u32, width, SDE_ROT_MAX_IMG_WIDTH);
-	height = min_t(u32, height, SDE_ROT_MAX_IMG_HEIGHT);
-
-	res = width * height;
-	res = res * fps;
-
-	fmt = sde_get_format_params(pixfmt);
-
-	if (!fmt) {
-		SDEROT_WARN("invalid format %8.8x\n", pixfmt);
-		goto exit;
-	}
-
-	is_yuv = sde_mdp_is_yuv_format(fmt);
-
-	SDEROT_DBG("w:%d h:%d fps:%d pixfmt:%8.8x yuv:%d res:%llu rd:%d\n",
-		width, height, fps, pixfmt, is_yuv, res, is_rd);
-
-	if (!is_yuv)
-		goto exit;
-
-	/*
-	 * If (total_source_pixels <= 62208000  && YUV) -> RD/WROT=2 //1080p30
-	 * If (total_source_pixels <= 124416000 && YUV) -> RD/WROT=4 //1080p60
-	 * If (total_source_pixels <= 2160p && YUV && FPS <= 30) -> RD/WROT = 32
-	 */
-	if (res <= (RES_1080p * 30))
-		ot_lim = 2;
-	else if (res <= (RES_1080p * 60))
-		ot_lim = 4;
-
-exit:
-	SDEROT_DBG("ot_lim=%d\n", ot_lim);
-	return ot_lim;
-}
-
-static u32 get_ot_limit(u32 reg_off, u32 bit_off,
-	struct sde_mdp_set_ot_params *params)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	u32 ot_lim;
-	u32 val;
-
-	ot_lim = sde_mdp_get_ot_limit(
-			params->width, params->height,
-			params->fmt, params->fps,
-			params->reg_off_vbif_lim_conf == MMSS_VBIF_RD_LIM_CONF);
-
-	/*
-	 * If default ot is not set from dt,
-	 * then do not configure it.
-	 */
-	if (ot_lim == 0)
-		goto exit;
-
-	val = SDE_VBIF_READ(mdata, reg_off);
-	val &= (0xFF << bit_off);
-	val = val >> bit_off;
-
-	SDEROT_EVTLOG(val, ot_lim);
-
-	if (val == ot_lim)
-		ot_lim = 0;
-
-exit:
-	SDEROT_DBG("ot_lim=%d\n", ot_lim);
-	SDEROT_EVTLOG(params->width, params->height, params->fmt, params->fps,
-			ot_lim);
-	return ot_lim;
-}
-
-void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	u32 ot_lim;
-	u32 reg_off_vbif_lim_conf = ((params->xin_id / mdata->npriority_lvl)
-					* mdata->npriority_lvl)
-					+ params->reg_off_vbif_lim_conf;
-	u32 bit_off_vbif_lim_conf = (params->xin_id % mdata->npriority_lvl) * 8;
-	u32 reg_val;
-	u32 sts;
-	bool forced_on;
-
-	ot_lim = get_ot_limit(
-		reg_off_vbif_lim_conf,
-		bit_off_vbif_lim_conf,
-		params) & 0xFF;
-
-	if (ot_lim == 0)
-		goto exit;
-
-	if (params->rotsts_base && params->rotsts_busy_mask) {
-		sts = readl_relaxed(params->rotsts_base);
-		if (sts & params->rotsts_busy_mask) {
-			SDEROT_ERR(
-				"Rotator still busy, should not modify VBIF\n");
-			SDEROT_EVTLOG_TOUT_HANDLER(
-				"rot", "vbif_dbg_bus", "panic");
-		}
-	}
-
-	trace_rot_perf_set_ot(params->num, params->xin_id, ot_lim);
-
-	forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
-		params->reg_off_mdp_clk_ctrl, true);
-
-	reg_val = SDE_VBIF_READ(mdata, reg_off_vbif_lim_conf);
-	reg_val &= ~(0xFF << bit_off_vbif_lim_conf);
-	reg_val |= (ot_lim) << bit_off_vbif_lim_conf;
-	SDE_VBIF_WRITE(mdata, reg_off_vbif_lim_conf, reg_val);
-
-	reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
-	SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
-		reg_val | BIT(params->xin_id));
-
-	/* this is a polling operation */
-	sde_mdp_wait_for_xin_halt(params->xin_id);
-
-	reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
-	SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
-		reg_val & ~BIT(params->xin_id));
-
-	if (forced_on)
-		force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
-			params->reg_off_mdp_clk_ctrl, false);
-
-	SDEROT_EVTLOG(params->num, params->xin_id, ot_lim);
-exit:
-	return;
-}
-
-/*
- * sde_mdp_set_vbif_memtype - set memtype output for the given xin port
- * @mdata: pointer to global rotator data
- * @xin_id: xin identifier
- * @memtype: memtype output configuration
- * return: none
- */
-static void sde_mdp_set_vbif_memtype(struct sde_rot_data_type *mdata,
-		u32 xin_id, u32 memtype)
-{
-	u32 reg_off;
-	u32 bit_off;
-	u32 reg_val;
-
-	/*
-	 * Assume 4 bits per bit field, 8 fields per 32-bit register.
-	 */
-	if (xin_id >= 8)
-		return;
-
-	reg_off = MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0;
-
-	bit_off = (xin_id & 0x7) * 4;
-	reg_val = SDE_VBIF_READ(mdata, reg_off);
-	reg_val &= ~(0x7 << bit_off);
-	reg_val |= (memtype & 0x7) << bit_off;
-	SDE_VBIF_WRITE(mdata, reg_off, reg_val);
-}
-
-/*
- * sde_mdp_init_vbif - initialize static vbif configuration
- * return: 0 if success; error code otherwise
- */
-int sde_mdp_init_vbif(void)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	int i;
-
-	if (!mdata)
-		return -EINVAL;
-
-	if (mdata->vbif_memtype_count && mdata->vbif_memtype) {
-		for (i = 0; i < mdata->vbif_memtype_count; i++)
-			sde_mdp_set_vbif_memtype(mdata, i,
-					mdata->vbif_memtype[i]);
-
-		SDEROT_DBG("amemtype=0x%x\n", SDE_VBIF_READ(mdata,
-				MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0));
-	}
-
-	return 0;
-}
-
-struct reg_bus_client *sde_reg_bus_vote_client_create(char *client_name)
-{
-	struct reg_bus_client *client;
-	struct sde_rot_data_type *sde_res = sde_rot_get_mdata();
-	static u32 id;
-
-	if (client_name == NULL) {
-		SDEROT_ERR("client name is null\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	client = kzalloc(sizeof(struct reg_bus_client), GFP_KERNEL);
-	if (!client)
-		return ERR_PTR(-ENOMEM);
-
-	mutex_lock(&sde_res->reg_bus_lock);
-	strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
-	client->usecase_ndx = VOTE_INDEX_DISABLE;
-	client->id = id;
-	SDEROT_DBG("bus vote client %s created:%pK id :%d\n", client_name,
-		client, id);
-	id++;
-	list_add(&client->list, &sde_res->reg_bus_clist);
-	mutex_unlock(&sde_res->reg_bus_lock);
-
-	return client;
-}
-
-void sde_reg_bus_vote_client_destroy(struct reg_bus_client *client)
-{
-	struct sde_rot_data_type *sde_res = sde_rot_get_mdata();
-
-	if (!client) {
-		SDEROT_ERR("reg bus vote: invalid client handle\n");
-	} else {
-		SDEROT_DBG("bus vote client %s destroyed:%pK id:%u\n",
-			client->name, client, client->id);
-		mutex_lock(&sde_res->reg_bus_lock);
-		list_del_init(&client->list);
-		mutex_unlock(&sde_res->reg_bus_lock);
-		kfree(client);
-	}
-}
-
-int sde_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
-{
-	int ret = 0;
-	bool changed = false;
-	u32 max_usecase_ndx = VOTE_INDEX_DISABLE;
-	struct reg_bus_client *client, *temp_client;
-	struct sde_rot_data_type *sde_res = sde_rot_get_mdata();
-
-	if (!sde_res || !sde_res->reg_bus_hdl || !bus_client)
-		return 0;
-
-	mutex_lock(&sde_res->reg_bus_lock);
-	bus_client->usecase_ndx = usecase_ndx;
-	list_for_each_entry_safe(client, temp_client, &sde_res->reg_bus_clist,
-		list) {
-
-		if (client->usecase_ndx < VOTE_INDEX_MAX &&
-		    client->usecase_ndx > max_usecase_ndx)
-			max_usecase_ndx = client->usecase_ndx;
-	}
-
-	if (sde_res->reg_bus_usecase_ndx != max_usecase_ndx) {
-		changed = true;
-		sde_res->reg_bus_usecase_ndx = max_usecase_ndx;
-	}
-
-	SDEROT_DBG(
-		"%pS: changed=%d current idx=%d request client %s id:%u idx:%d\n",
-		__builtin_return_address(0), changed, max_usecase_ndx,
-		bus_client->name, bus_client->id, usecase_ndx);
-	if (changed)
-		ret = msm_bus_scale_client_update_request(sde_res->reg_bus_hdl,
-			max_usecase_ndx);
-
-	mutex_unlock(&sde_res->reg_bus_lock);
-	return ret;
-}
-
-static int sde_mdp_parse_dt_handler(struct platform_device *pdev,
-		char *prop_name, u32 *offsets, int len)
-{
-	int rc;
-
-	rc = of_property_read_u32_array(pdev->dev.of_node, prop_name,
-					offsets, len);
-	if (rc) {
-		SDEROT_ERR("Error from prop %s : u32 array read\n", prop_name);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int sde_mdp_parse_dt_prop_len(struct platform_device *pdev,
-				      char *prop_name)
-{
-	int len = 0;
-
-	of_find_property(pdev->dev.of_node, prop_name, &len);
-
-	if (len < 1) {
-		SDEROT_INFO("prop %s : doesn't exist in device tree\n",
-			prop_name);
-		return 0;
-	}
-
-	len = len/sizeof(u32);
-
-	return len;
-}
-
-static void sde_mdp_parse_vbif_memtype(struct platform_device *pdev,
-		struct sde_rot_data_type *mdata)
-{
-	int rc;
-
-	mdata->vbif_memtype_count = sde_mdp_parse_dt_prop_len(pdev,
-			"qcom,mdss-rot-vbif-memtype");
-	mdata->vbif_memtype = kcalloc(mdata->vbif_memtype_count,
-			sizeof(u32), GFP_KERNEL);
-	if (!mdata->vbif_memtype) {
-		mdata->vbif_memtype_count = 0;
-		return;
-	}
-
-	rc = sde_mdp_parse_dt_handler(pdev,
-		"qcom,mdss-rot-vbif-memtype", mdata->vbif_memtype,
-			mdata->vbif_memtype_count);
-	if (rc) {
-		SDEROT_DBG("vbif memtype not found\n");
-		kfree(mdata->vbif_memtype);
-		mdata->vbif_memtype = NULL;
-		mdata->vbif_memtype_count = 0;
-		return;
-	}
-}
-
-static void sde_mdp_parse_vbif_qos(struct platform_device *pdev,
-		struct sde_rot_data_type *mdata)
-{
-	int rc;
-
-	mdata->vbif_rt_qos = NULL;
-
-	mdata->npriority_lvl = sde_mdp_parse_dt_prop_len(pdev,
-			"qcom,mdss-rot-vbif-qos-setting");
-	mdata->vbif_nrt_qos = kcalloc(mdata->npriority_lvl,
-			sizeof(u32), GFP_KERNEL);
-	if (!mdata->vbif_nrt_qos) {
-		mdata->npriority_lvl = 0;
-		return;
-	}
-
-	rc = sde_mdp_parse_dt_handler(pdev,
-		"qcom,mdss-rot-vbif-qos-setting", mdata->vbif_nrt_qos,
-			mdata->npriority_lvl);
-	if (rc) {
-		SDEROT_DBG("vbif setting not found\n");
-		kfree(mdata->vbif_nrt_qos);
-		mdata->vbif_nrt_qos = NULL;
-		mdata->npriority_lvl = 0;
-		return;
-	}
-}
-
-static void sde_mdp_parse_cdp_setting(struct platform_device *pdev,
-		struct sde_rot_data_type *mdata)
-{
-	int rc;
-	u32 len, data[SDE_ROT_OP_MAX] = {0};
-
-	len = sde_mdp_parse_dt_prop_len(pdev,
-			"qcom,mdss-rot-cdp-setting");
-	if (len == SDE_ROT_OP_MAX) {
-		rc = sde_mdp_parse_dt_handler(pdev,
-			"qcom,mdss-rot-cdp-setting", data, len);
-		if (rc) {
-			SDEROT_ERR("invalid CDP setting\n");
-			goto end;
-		}
-
-		set_bit(SDE_QOS_CDP, mdata->sde_qos_map);
-		mdata->enable_cdp[SDE_ROT_RD] = data[SDE_ROT_RD];
-		mdata->enable_cdp[SDE_ROT_WR] = data[SDE_ROT_WR];
-		return;
-	}
-end:
-	clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
-}
-
-static void sde_mdp_parse_rot_lut_setting(struct platform_device *pdev,
-		struct sde_rot_data_type *mdata)
-{
-	int rc;
-	u32 len, data[4];
-
-	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-qos-lut");
-	if (len == 4) {
-		rc = sde_mdp_parse_dt_handler(pdev,
-			"qcom,mdss-rot-qos-lut", data, len);
-		if (!rc) {
-			mdata->lut_cfg[SDE_ROT_RD].creq_lut_0 = data[0];
-			mdata->lut_cfg[SDE_ROT_RD].creq_lut_1 = data[1];
-			mdata->lut_cfg[SDE_ROT_WR].creq_lut_0 = data[2];
-			mdata->lut_cfg[SDE_ROT_WR].creq_lut_1 = data[3];
-			set_bit(SDE_QOS_LUT, mdata->sde_qos_map);
-		} else {
-			SDEROT_DBG("qos lut setting not found\n");
-		}
-	}
-
-	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-danger-lut");
-	if (len == SDE_ROT_OP_MAX) {
-		rc = sde_mdp_parse_dt_handler(pdev,
-			"qcom,mdss-rot-danger-lut", data, len);
-		if (!rc) {
-			mdata->lut_cfg[SDE_ROT_RD].danger_lut
-							= data[SDE_ROT_RD];
-			mdata->lut_cfg[SDE_ROT_WR].danger_lut
-							= data[SDE_ROT_WR];
-			set_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map);
-		} else {
-			SDEROT_DBG("danger lut setting not found\n");
-		}
-	}
-
-	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-safe-lut");
-	if (len == SDE_ROT_OP_MAX) {
-		rc = sde_mdp_parse_dt_handler(pdev,
-			"qcom,mdss-rot-safe-lut", data, len);
-		if (!rc) {
-			mdata->lut_cfg[SDE_ROT_RD].safe_lut = data[SDE_ROT_RD];
-			mdata->lut_cfg[SDE_ROT_WR].safe_lut = data[SDE_ROT_WR];
-			set_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map);
-		} else {
-			SDEROT_DBG("safe lut setting not found\n");
-		}
-	}
-}
-
-static void sde_mdp_parse_inline_rot_lut_setting(struct platform_device *pdev,
-		struct sde_rot_data_type *mdata)
-{
-	int rc;
-	u32 len, data[4];
-
-	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-inline-rot-qos-lut");
-	if (len == 4) {
-		rc = sde_mdp_parse_dt_handler(pdev,
-			"qcom,mdss-inline-rot-qos-lut", data, len);
-		if (!rc) {
-			mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0 = data[0];
-			mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1 = data[1];
-			mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0 = data[2];
-			mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1 = data[3];
-			set_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map);
-		} else {
-			SDEROT_DBG("inline qos lut setting not found\n");
-		}
-	}
-
-	len = sde_mdp_parse_dt_prop_len(pdev,
-				"qcom,mdss-inline-rot-danger-lut");
-	if (len == SDE_ROT_OP_MAX) {
-		rc = sde_mdp_parse_dt_handler(pdev,
-			"qcom,mdss-inline-rot-danger-lut", data, len);
-		if (!rc) {
-			mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut
-							= data[SDE_ROT_RD];
-			mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut
-							= data[SDE_ROT_WR];
-			set_bit(SDE_INLINE_QOS_DANGER_LUT,
-					mdata->sde_inline_qos_map);
-		} else {
-			SDEROT_DBG("inline danger lut setting not found\n");
-		}
-	}
-
-	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-inline-rot-safe-lut");
-	if (len == SDE_ROT_OP_MAX) {
-		rc = sde_mdp_parse_dt_handler(pdev,
-			"qcom,mdss-inline-rot-safe-lut", data, len);
-		if (!rc) {
-			mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut
-							= data[SDE_ROT_RD];
-			mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut
-							= data[SDE_ROT_WR];
-			set_bit(SDE_INLINE_QOS_SAFE_LUT,
-					mdata->sde_inline_qos_map);
-		} else {
-			SDEROT_DBG("inline safe lut setting not found\n");
-		}
-	}
-}
-
-static int sde_mdp_parse_dt_misc(struct platform_device *pdev,
-		struct sde_rot_data_type *mdata)
-{
-	int rc;
-	u32 data;
-
-	rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size",
-		&data);
-	mdata->rot_block_size = (!rc ? data : 128);
-
-	rc = of_property_read_u32(pdev->dev.of_node,
-		"qcom,mdss-default-ot-rd-limit", &data);
-	mdata->default_ot_rd_limit = (!rc ? data : 0);
-
-	rc = of_property_read_u32(pdev->dev.of_node,
-		"qcom,mdss-default-ot-wr-limit", &data);
-	mdata->default_ot_wr_limit = (!rc ? data : 0);
-
-	rc = of_property_read_u32(pdev->dev.of_node,
-		 "qcom,mdss-highest-bank-bit", &(mdata->highest_bank_bit));
-	if (rc)
-		SDEROT_DBG(
-			"Could not read optional property: highest bank bit\n");
-
-	sde_mdp_parse_cdp_setting(pdev, mdata);
-
-	sde_mdp_parse_vbif_qos(pdev, mdata);
-
-	sde_mdp_parse_vbif_memtype(pdev, mdata);
-
-	sde_mdp_parse_rot_lut_setting(pdev, mdata);
-
-	sde_mdp_parse_inline_rot_lut_setting(pdev, mdata);
-
-	rc = of_property_read_u32(pdev->dev.of_node,
-		"qcom,mdss-rot-qos-cpu-mask", &data);
-	mdata->rot_pm_qos_cpu_mask = (!rc ? data : 0);
-
-	rc = of_property_read_u32(pdev->dev.of_node,
-		 "qcom,mdss-rot-qos-cpu-dma-latency", &data);
-	mdata->rot_pm_qos_cpu_dma_latency = (!rc ? data : 0);
-
-	mdata->mdp_base = mdata->sde_io.base + SDE_MDP_OFFSET;
-
-	return 0;
-}
-
-static void sde_mdp_destroy_dt_misc(struct platform_device *pdev,
-		struct sde_rot_data_type *mdata)
-{
-	kfree(mdata->vbif_memtype);
-	mdata->vbif_memtype = NULL;
-	kfree(mdata->vbif_rt_qos);
-	mdata->vbif_rt_qos = NULL;
-	kfree(mdata->vbif_nrt_qos);
-	mdata->vbif_nrt_qos = NULL;
-}
-
-#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val)	\
-	{						\
-		.src = MSM_BUS_MASTER_AMPSS_M0,		\
-		.dst = MSM_BUS_SLAVE_DISPLAY_CFG,	\
-		.ab = (ab_val),				\
-		.ib = (ib_val),				\
-	}
-
-#define BUS_VOTE_19_MHZ 153600000
-#define BUS_VOTE_40_MHZ 320000000
-#define BUS_VOTE_80_MHZ 640000000
-
-#ifdef CONFIG_QCOM_BUS_SCALING
-
-static struct msm_bus_vectors mdp_reg_bus_vectors[] = {
-	MDP_REG_BUS_VECTOR_ENTRY(0, 0),
-	MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
-	MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_40_MHZ),
-	MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_80_MHZ),
-};
-static struct msm_bus_paths mdp_reg_bus_usecases[ARRAY_SIZE(
-		mdp_reg_bus_vectors)];
-static struct msm_bus_scale_pdata mdp_reg_bus_scale_table = {
-	.usecase = mdp_reg_bus_usecases,
-	.num_usecases = ARRAY_SIZE(mdp_reg_bus_usecases),
-	.name = "sde_reg",
-	.active_only = true,
-};
-
-static int sde_mdp_bus_scale_register(struct sde_rot_data_type *mdata)
-{
-	struct msm_bus_scale_pdata *reg_bus_pdata;
-	int i;
-
-	if (!mdata->reg_bus_hdl) {
-		reg_bus_pdata = &mdp_reg_bus_scale_table;
-		for (i = 0; i < reg_bus_pdata->num_usecases; i++) {
-			mdp_reg_bus_usecases[i].num_paths = 1;
-			mdp_reg_bus_usecases[i].vectors =
-				&mdp_reg_bus_vectors[i];
-		}
-
-		mdata->reg_bus_hdl =
-			msm_bus_scale_register_client(reg_bus_pdata);
-		if (!mdata->reg_bus_hdl) {
-			/* Continue without reg_bus scaling */
-			SDEROT_WARN("reg_bus_client register failed\n");
-		} else
-			SDEROT_DBG("register reg_bus_hdl=%x\n",
-					mdata->reg_bus_hdl);
-	}
-
-	return 0;
-}
-#else
-static inline int sde_mdp_bus_scale_register(struct sde_rot_data_type *mdata)
-{
-	return 0;
-}
-#endif
-
-static void sde_mdp_bus_scale_unregister(struct sde_rot_data_type *mdata)
-{
-	SDEROT_DBG("unregister reg_bus_hdl=%x\n", mdata->reg_bus_hdl);
-
-	if (mdata->reg_bus_hdl) {
-		msm_bus_scale_unregister_client(mdata->reg_bus_hdl);
-		mdata->reg_bus_hdl = 0;
-	}
-}
-
-static struct sde_rot_data_type *sde_rot_res;
-
-struct sde_rot_data_type *sde_rot_get_mdata(void)
-{
-	return sde_rot_res;
-}
-
-/*
- * sde_rotator_base_init - initialize base rotator data/resource
- */
-int sde_rotator_base_init(struct sde_rot_data_type **pmdata,
-		struct platform_device *pdev,
-		const void *drvdata)
-{
-	int rc;
-	struct sde_rot_data_type *mdata;
-
-	mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
-	if (mdata == NULL)
-		return -ENOMEM;
-
-	mdata->pdev = pdev;
-	sde_rot_res = mdata;
-	mutex_init(&mdata->reg_bus_lock);
-	INIT_LIST_HEAD(&mdata->reg_bus_clist);
-
-	rc = sde_rot_ioremap_byname(pdev, &mdata->sde_io, "mdp_phys");
-	if (rc) {
-		SDEROT_ERR("unable to map SDE base\n");
-		goto probe_done;
-	}
-	SDEROT_DBG("SDE ROT HW Base addr=0x%x len=0x%x\n",
-		(int) (unsigned long) mdata->sde_io.base,
-		mdata->sde_io.len);
-
-	rc = sde_rot_ioremap_byname(pdev, &mdata->vbif_nrt_io, "rot_vbif_phys");
-	if (rc) {
-		SDEROT_ERR("unable to map SDE ROT VBIF base\n");
-		goto probe_done;
-	}
-	SDEROT_DBG("SDE ROT VBIF HW Base addr=%pK len=0x%x\n",
-			mdata->vbif_nrt_io.base, mdata->vbif_nrt_io.len);
-
-	rc = sde_mdp_parse_dt_misc(pdev, mdata);
-	if (rc) {
-		SDEROT_ERR("Error in device tree : misc\n");
-		goto probe_done;
-	}
-
-	rc = sde_mdp_bus_scale_register(mdata);
-	if (rc) {
-		SDEROT_ERR("unable to register bus scaling\n");
-		goto probe_done;
-	}
-
-	rc = sde_smmu_init(&pdev->dev);
-	if (rc) {
-		SDEROT_ERR("sde smmu init failed %d\n", rc);
-		goto probe_done;
-	}
-
-	*pmdata = mdata;
-
-	return 0;
-probe_done:
-	return rc;
-}
-
-/*
- * sde_rotator_base_destroy - clean up base rotator data/resource
- */
-void sde_rotator_base_destroy(struct sde_rot_data_type *mdata)
-{
-	struct platform_device *pdev;
-
-	if (!mdata || !mdata->pdev)
-		return;
-
-	pdev = mdata->pdev;
-
-	sde_rot_res = NULL;
-	sde_mdp_bus_scale_unregister(mdata);
-	sde_mdp_destroy_dt_misc(pdev, mdata);
-	sde_rot_iounmap(&mdata->vbif_nrt_io);
-	sde_rot_iounmap(&mdata->sde_io);
-	devm_kfree(&pdev->dev, mdata);
-}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
deleted file mode 100644
index 8ba4eb9..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ /dev/null
@@ -1,321 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_ROTATOR_BASE_H__
-#define __SDE_ROTATOR_BASE_H__
-
-#include <linux/types.h>
-#include <linux/file.h>
-#include <linux/kref.h>
-#include <linux/kernel.h>
-#include <linux/regulator/consumer.h>
-
-#include "sde_rotator_hwio.h"
-#include "sde_rotator_io_util.h"
-#include "sde_rotator_smmu.h"
-#include "sde_rotator_formats.h"
-#include <linux/pm_qos.h>
-
-/* HW Revisions for different targets */
-#define SDE_GET_MAJOR_REV(rev)	((rev) >> 28)
-#define SDE_GET_MAJOR_MINOR(rev)	((rev) >> 16)
-
-#define IS_SDE_MAJOR_SAME(rev1, rev2)   \
-	(SDE_GET_MAJOR_REV((rev1)) == SDE_GET_MAJOR_REV((rev2)))
-
-#define IS_SDE_MAJOR_MINOR_SAME(rev1, rev2) \
-	(SDE_GET_MAJOR_MINOR(rev1) == SDE_GET_MAJOR_MINOR(rev2))
-
-#define SDE_MDP_REV(major, minor, step) \
-	((((major) & 0x000F) << 28) | \
-	 (((minor) & 0x0FFF) << 16) | \
-	  ((step)  & 0xFFFF))
-
-#define SDE_MDP_HW_REV_107	SDE_MDP_REV(1, 0, 7)	/* 8996 v1.0 */
-#define SDE_MDP_HW_REV_300	SDE_MDP_REV(3, 0, 0)	/* 8998 v1.0 */
-#define SDE_MDP_HW_REV_301	SDE_MDP_REV(3, 0, 1)	/* 8998 v1.1 */
-#define SDE_MDP_HW_REV_400	SDE_MDP_REV(4, 0, 0)	/* sdm845 v1.0 */
-#define SDE_MDP_HW_REV_410	SDE_MDP_REV(4, 1, 0)	/* sdm670 v1.0 */
-#define SDE_MDP_HW_REV_500	SDE_MDP_REV(5, 0, 0)	/* sm8150 v1.0 */
-#define SDE_MDP_HW_REV_520	SDE_MDP_REV(5, 2, 0)	/* sdmmagpie v1.0 */
-#define SDE_MDP_HW_REV_530	SDE_MDP_REV(5, 3, 0)	/* sm6150 v1.0 */
-#define SDE_MDP_HW_REV_600	SDE_MDP_REV(6, 0, 0)    /* msmnile+ v1.0 */
-
-#define SDE_MDP_VBIF_4_LEVEL_REMAPPER	4
-#define SDE_MDP_VBIF_8_LEVEL_REMAPPER	8
-
-struct sde_mult_factor {
-	uint32_t numer;
-	uint32_t denom;
-};
-
-struct sde_mdp_set_ot_params {
-	u32 xin_id;
-	u32 num;
-	u32 width;
-	u32 height;
-	u32 fps;
-	u32 fmt;
-	u32 reg_off_vbif_lim_conf;
-	u32 reg_off_mdp_clk_ctrl;
-	u32 bit_off_mdp_clk_ctrl;
-	char __iomem *rotsts_base;
-	u32 rotsts_busy_mask;
-};
-
-/*
- * struct sde_mdp_vbif_halt_params: parameters for issue halt request to vbif
- * @xin_id: xin port number of vbif
- * @reg_off_mdp_clk_ctrl: reg offset for vbif clock control
- * @bit_off_mdp_clk_ctrl: bit offset for vbif clock control
- */
-struct sde_mdp_vbif_halt_params {
-	u32 xin_id;
-	u32 reg_off_mdp_clk_ctrl;
-	u32 bit_off_mdp_clk_ctrl;
-};
-
-enum sde_bus_vote_type {
-	VOTE_INDEX_DISABLE,
-	VOTE_INDEX_19_MHZ,
-	VOTE_INDEX_40_MHZ,
-	VOTE_INDEX_80_MHZ,
-	VOTE_INDEX_MAX,
-};
-
-#define MAX_CLIENT_NAME_LEN 64
-
-enum sde_qos_settings {
-	SDE_QOS_PER_PIPE_IB,
-	SDE_QOS_OVERHEAD_FACTOR,
-	SDE_QOS_CDP,
-	SDE_QOS_OTLIM,
-	SDE_QOS_PER_PIPE_LUT,
-	SDE_QOS_SIMPLIFIED_PREFILL,
-	SDE_QOS_VBLANK_PANIC_CTRL,
-	SDE_QOS_LUT,
-	SDE_QOS_DANGER_LUT,
-	SDE_QOS_SAFE_LUT,
-	SDE_QOS_MAX,
-};
-
-enum sde_inline_qos_settings {
-	SDE_INLINE_QOS_LUT,
-	SDE_INLINE_QOS_DANGER_LUT,
-	SDE_INLINE_QOS_SAFE_LUT,
-	SDE_INLINE_QOS_MAX,
-};
-
-/**
- * enum sde_rot_type: SDE rotator HW version
- * @SDE_ROT_TYPE_V1_0: V1.0 HW version
- * @SDE_ROT_TYPE_V1_1: V1.1 HW version
- */
-enum sde_rot_type {
-	SDE_ROT_TYPE_V1_0 = 0x10000000,
-	SDE_ROT_TYPE_V1_1 = 0x10010000,
-	SDE_ROT_TYPE_MAX,
-};
-
-/**
- * enum sde_caps_settings: SDE rotator capability definition
- * @SDE_CAPS_R1_WB: MDSS V1.x WB block
- * @SDE_CAPS_R3_WB: MDSS V3.x WB block
- * @SDE_CAPS_R3_1P5_DOWNSCALE: 1.5x downscale rotator support
- * @SDE_CAPS_SBUF_1: stream buffer support for inline rotation
- * @SDE_CAPS_UBWC_2: universal bandwidth compression version 2
- * @SDE_CAPS_PARTIALWR: partial write override
- * @SDE_CAPS_HW_TIMESTAMP: rotator has hw timestamp support
- * @SDE_CAPS_UBWC_3: universal bandwidth compression version 3
- * @SDE_CAPS_UBWC_4: universal bandwidth compression version 4
- */
-enum sde_caps_settings {
-	SDE_CAPS_R1_WB,
-	SDE_CAPS_R3_WB,
-	SDE_CAPS_R3_1P5_DOWNSCALE,
-	SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
-	SDE_CAPS_SBUF_1,
-	SDE_CAPS_UBWC_2,
-	SDE_CAPS_PARTIALWR,
-	SDE_CAPS_HW_TIMESTAMP,
-	SDE_CAPS_UBWC_3,
-	SDE_CAPS_UBWC_4,
-	SDE_CAPS_MAX,
-};
-
-enum sde_bus_clients {
-	SDE_ROT_RT,
-	SDE_ROT_NRT,
-	SDE_MAX_BUS_CLIENTS
-};
-
-enum sde_rot_op {
-	SDE_ROT_RD,
-	SDE_ROT_WR,
-	SDE_ROT_OP_MAX
-};
-
-enum sde_rot_regdump_access {
-	SDE_ROT_REGDUMP_READ,
-	SDE_ROT_REGDUMP_WRITE,
-	SDE_ROT_REGDUMP_VBIF,
-	SDE_ROT_REGDUMP_MAX
-};
-
-struct reg_bus_client {
-	char name[MAX_CLIENT_NAME_LEN];
-	short usecase_ndx;
-	u32 id;
-	struct list_head list;
-};
-
-struct sde_smmu_client {
-	struct device *dev;
-	struct dma_iommu_mapping *mmu_mapping;
-	struct sde_module_power mp;
-	struct reg_bus_client *reg_bus_clt;
-	bool domain_attached;
-	int domain;
-	u32 sid;
-};
-
-/*
- * struct sde_rot_debug_bus: rotator debugbus header structure
- * @wr_addr: write address for debugbus controller
- * @block_id: rotator debugbus block id
- * @test_id: rotator debugbus test id
- */
-struct sde_rot_debug_bus {
-	u32 wr_addr;
-	u32 block_id;
-	u32 test_id;
-};
-
-struct sde_rot_vbif_debug_bus {
-	u32 disable_bus_addr;
-	u32 block_bus_addr;
-	u32 bit_offset;
-	u32 block_cnt;
-	u32 test_pnt_cnt;
-};
-
-struct sde_rot_regdump {
-	char *name;
-	u32 offset;
-	u32 len;
-	enum sde_rot_regdump_access access;
-	u32 value;
-};
-
-struct sde_rot_lut_cfg {
-	u32 creq_lut_0;
-	u32 creq_lut_1;
-	u32 danger_lut;
-	u32 safe_lut;
-};
-
-struct sde_rot_data_type {
-	u32 mdss_version;
-
-	struct platform_device *pdev;
-	struct sde_io_data sde_io;
-	struct sde_io_data vbif_nrt_io;
-	char __iomem *mdp_base;
-
-	struct sde_smmu_client sde_smmu[SDE_IOMMU_MAX_DOMAIN];
-
-	/* bitmap to track qos applicable settings */
-	DECLARE_BITMAP(sde_qos_map, SDE_QOS_MAX);
-	DECLARE_BITMAP(sde_inline_qos_map, SDE_QOS_MAX);
-
-	/* bitmap to track capability settings */
-	DECLARE_BITMAP(sde_caps_map, SDE_CAPS_MAX);
-
-	u32 default_ot_rd_limit;
-	u32 default_ot_wr_limit;
-	u32 highest_bank_bit;
-	u32 rot_block_size;
-
-	/* register bus (AHB) */
-	u32 reg_bus_hdl;
-	u32 reg_bus_usecase_ndx;
-	struct list_head reg_bus_clist;
-	struct mutex reg_bus_lock;
-
-	u32 *vbif_rt_qos;
-	u32 *vbif_nrt_qos;
-	u32 npriority_lvl;
-
-	struct pm_qos_request pm_qos_rot_cpu_req;
-	u32 rot_pm_qos_cpu_count;
-	u32 rot_pm_qos_cpu_mask;
-	u32 rot_pm_qos_cpu_dma_latency;
-
-	u32 vbif_memtype_count;
-	u32 *vbif_memtype;
-
-	int iommu_attached;
-	int iommu_ref_cnt;
-
-	struct sde_rot_vbif_debug_bus *nrt_vbif_dbg_bus;
-	u32 nrt_vbif_dbg_bus_size;
-	struct sde_rot_debug_bus *rot_dbg_bus;
-	u32 rot_dbg_bus_size;
-
-	struct sde_rot_regdump *regdump;
-	u32 regdump_size;
-
-	void *sde_rot_hw;
-	int sec_cam_en;
-
-	u32 enable_cdp[SDE_ROT_OP_MAX];
-
-	struct sde_rot_lut_cfg lut_cfg[SDE_ROT_OP_MAX];
-	struct sde_rot_lut_cfg inline_lut_cfg[SDE_ROT_OP_MAX];
-
-	bool clk_always_on;
-};
-
-int sde_rotator_base_init(struct sde_rot_data_type **pmdata,
-		struct platform_device *pdev,
-		const void *drvdata);
-
-void sde_rotator_base_destroy(struct sde_rot_data_type *data);
-
-struct sde_rot_data_type *sde_rot_get_mdata(void);
-
-struct reg_bus_client *sde_reg_bus_vote_client_create(char *client_name);
-
-void sde_reg_bus_vote_client_destroy(struct reg_bus_client *client);
-
-int sde_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx);
-
-u32 sde_apply_comp_ratio_factor(u32 quota,
-	struct sde_mdp_format_params *fmt,
-	struct sde_mult_factor *factor);
-
-u32 sde_mdp_get_ot_limit(u32 width, u32 height, u32 pixfmt, u32 fps, u32 is_rd);
-
-void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params);
-
-void sde_mdp_halt_vbif_xin(struct sde_mdp_vbif_halt_params *params);
-
-int sde_mdp_init_vbif(void);
-
-#define SDE_VBIF_WRITE(mdata, offset, value) \
-		(sde_reg_w(&mdata->vbif_nrt_io, offset, value, 0))
-#define SDE_VBIF_READ(mdata, offset) \
-		(sde_reg_r(&mdata->vbif_nrt_io, offset, 0))
-#define SDE_REG_WRITE(mdata, offset, value) \
-		sde_reg_w(&mdata->sde_io, offset, value, 0)
-#define SDE_REG_READ(mdata, offset) \
-		sde_reg_r(&mdata->sde_io, offset, 0)
-
-#define ATRACE_END(name) trace_rot_mark_write(current->tgid, name, 0)
-#define ATRACE_BEGIN(name) trace_rot_mark_write(current->tgid, name, 1)
-#define ATRACE_INT(name, value) \
-	trace_rot_trace_counter(current->tgid, name, value)
-
-#endif /* __SDE_ROTATOR_BASE__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
deleted file mode 100644
index 86dc561..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ /dev/null
@@ -1,3569 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s:%d: " fmt, __func__, __LINE__
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/uaccess.h>
-#include <linux/of.h>
-#include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
-#include <linux/regulator/consumer.h>
-#include <linux/dma-direction.h>
-#include <soc/qcom/scm.h>
-#include <soc/qcom/secure_buffer.h>
-#include <asm/cacheflush.h>
-#include <uapi/linux/sched/types.h>
-
-#include "sde_rotator_base.h"
-#include "sde_rotator_core.h"
-#include "sde_rotator_dev.h"
-#include "sde_rotator_util.h"
-#include "sde_rotator_io_util.h"
-#include "sde_rotator_smmu.h"
-#include "sde_rotator_r1.h"
-#include "sde_rotator_r3.h"
-#include "sde_rotator_trace.h"
-#include "sde_rotator_debug.h"
-
-
-/* Rotator device id to be used in SCM call */
-#define SDE_ROTATOR_DEVICE	21
-
-/*
- * SCM call function id to be used for switching between secure and non
- * secure context
- */
-#define MEM_PROTECT_SD_CTRL_SWITCH 0x18
-
-/* waiting for hw time out, 3 vsync for 30fps*/
-#define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100
-
-/* waiting for inline hw start */
-#define ROT_INLINE_START_TIMEOUT_IN_MS	(10000 + 500)
-
-/* default pixel per clock ratio */
-#define ROT_PIXEL_PER_CLK_NUMERATOR	36
-#define ROT_PIXEL_PER_CLK_DENOMINATOR	10
-#define ROT_FUDGE_FACTOR_NUMERATOR	105
-#define ROT_FUDGE_FACTOR_DENOMINATOR	100
-#define ROT_OVERHEAD_NUMERATOR		27
-#define ROT_OVERHEAD_DENOMINATOR	10000
-
-/* Minimum Rotator Clock value */
-#define ROT_MIN_ROT_CLK			20000000
-
-/* default minimum bandwidth vote */
-#define ROT_ENABLE_BW_VOTE		64000
-/*
- * Max rotator hw blocks possible. Used for upper array limits instead of
- * alloc and freeing small array
- */
-#define ROT_MAX_HW_BLOCKS 2
-
-#define SDE_REG_BUS_VECTOR_ENTRY(ab_val, ib_val)	\
-	{						\
-		.src = MSM_BUS_MASTER_AMPSS_M0,		\
-		.dst = MSM_BUS_SLAVE_DISPLAY_CFG,	\
-		.ab = (ab_val),				\
-		.ib = (ib_val),				\
-	}
-
-#define BUS_VOTE_19_MHZ 153600000
-
-/* forward prototype */
-static int sde_rotator_update_perf(struct sde_rot_mgr *mgr);
-
-#ifdef CONFIG_QCOM_BUS_SCALING
-static struct msm_bus_vectors rot_reg_bus_vectors[] = {
-	SDE_REG_BUS_VECTOR_ENTRY(0, 0),
-	SDE_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
-};
-static struct msm_bus_paths rot_reg_bus_usecases[ARRAY_SIZE(
-		rot_reg_bus_vectors)];
-static struct msm_bus_scale_pdata rot_reg_bus_scale_table = {
-	.usecase = rot_reg_bus_usecases,
-	.num_usecases = ARRAY_SIZE(rot_reg_bus_usecases),
-	.name = "mdss_rot_reg",
-	.active_only = 1,
-};
-
-static int sde_rotator_bus_scale_set_quota(struct sde_rot_bus_data_type *bus,
-		u64 quota)
-{
-	int new_uc_idx;
-	int ret;
-
-	if (!bus) {
-		SDEROT_ERR("null parameter\n");
-		return -EINVAL;
-	}
-
-	if (!bus->bus_hdl) {
-		SDEROT_DBG("bus scaling not enabled\n");
-		return 0;
-	} else if (bus->bus_hdl < 0) {
-		SDEROT_ERR("invalid bus handle %d\n", bus->bus_hdl);
-		return -EINVAL;
-	}
-
-	if (bus->curr_quota_val == quota) {
-		SDEROT_DBG("bw request already requested\n");
-		return 0;
-	}
-
-	if (!bus->bus_scale_pdata || !bus->bus_scale_pdata->num_usecases) {
-		SDEROT_ERR("invalid bus scale data\n");
-		return -EINVAL;
-	}
-
-	if (!quota) {
-		new_uc_idx = 0;
-	} else {
-		struct msm_bus_vectors *vect = NULL;
-		struct msm_bus_scale_pdata *bw_table =
-			bus->bus_scale_pdata;
-		u64 port_quota = quota;
-		u32 total_axi_port_cnt;
-		int i;
-
-		new_uc_idx = (bus->curr_bw_uc_idx %
-			(bw_table->num_usecases - 1)) + 1;
-
-		total_axi_port_cnt = bw_table->usecase[new_uc_idx].num_paths;
-		if (total_axi_port_cnt == 0) {
-			SDEROT_ERR("Number of bw paths is 0\n");
-			return -ENODEV;
-		}
-		do_div(port_quota, total_axi_port_cnt);
-
-		for (i = 0; i < total_axi_port_cnt; i++) {
-			vect = &bw_table->usecase[new_uc_idx].vectors[i];
-			vect->ab = port_quota;
-			vect->ib = 0;
-		}
-	}
-	bus->curr_bw_uc_idx = new_uc_idx;
-	bus->curr_quota_val = quota;
-
-	SDEROT_EVTLOG(new_uc_idx, quota);
-	SDEROT_DBG("uc_idx=%d quota=%llu\n", new_uc_idx, quota);
-	ATRACE_BEGIN("msm_bus_scale_req_rot");
-	ret = msm_bus_scale_client_update_request(bus->bus_hdl,
-		new_uc_idx);
-	ATRACE_END("msm_bus_scale_req_rot");
-
-	return ret;
-}
-
-static int sde_rotator_enable_reg_bus(struct sde_rot_mgr *mgr, u64 quota)
-{
-	int ret = 0, changed = 0;
-	u32 usecase_ndx = 0;
-
-	if (!mgr || !mgr->reg_bus.bus_hdl)
-		return 0;
-
-	if (quota)
-		usecase_ndx = 1;
-
-	if (usecase_ndx != mgr->reg_bus.curr_bw_uc_idx) {
-		mgr->reg_bus.curr_bw_uc_idx = usecase_ndx;
-		changed++;
-	}
-
-	SDEROT_DBG("%s, changed=%d register bus %s\n", __func__, changed,
-		quota ? "Enable":"Disable");
-
-	if (changed) {
-		ATRACE_BEGIN("msm_bus_scale_req_rot_reg");
-		ret = msm_bus_scale_client_update_request(mgr->reg_bus.bus_hdl,
-			usecase_ndx);
-		ATRACE_END("msm_bus_scale_req_rot_reg");
-	}
-
-	return ret;
-}
-#else
-static inline int sde_rotator_enable_reg_bus(struct sde_rot_mgr *mgr, u64 quota)
-{
-	return 0;
-}
-
-static inline int sde_rotator_bus_scale_set_quota(
-		struct sde_rot_bus_data_type *bus, u64 quota)
-{
-	return 0;
-}
-#endif
-
-/*
- * Clock rate of all open sessions working a particular hw block
- * are added together to get the required rate for that hw block.
- * The max of each hw block becomes the final clock rate voted for
- */
-static unsigned long sde_rotator_clk_rate_calc(
-	struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private)
-{
-	struct sde_rot_perf *perf;
-	unsigned long clk_rate[ROT_MAX_HW_BLOCKS] = {0};
-	unsigned long total_clk_rate = 0;
-	int i, wb_idx;
-
-	list_for_each_entry(perf, &private->perf_list, list) {
-		bool rate_accounted_for = false;
-		/*
-		 * If there is one session that has two work items across
-		 * different hw blocks rate is accounted for in both blocks.
-		 */
-		for (i = 0; i < mgr->queue_count; i++) {
-			if (perf->work_distribution[i]) {
-				clk_rate[i] += perf->clk_rate;
-				rate_accounted_for = true;
-			}
-		}
-
-		/*
-		 * Sessions that are open but not distributed on any hw block
-		 * Still need to be accounted for. Rate is added to last known
-		 * wb idx.
-		 */
-		wb_idx = perf->last_wb_idx;
-		if ((!rate_accounted_for) && (wb_idx >= 0) &&
-				(wb_idx < mgr->queue_count))
-			clk_rate[wb_idx] += perf->clk_rate;
-	}
-
-	for (i = 0; i < mgr->queue_count; i++)
-		total_clk_rate = max(clk_rate[i], total_clk_rate);
-
-	SDEROT_DBG("Total clk rate calc=%lu\n", total_clk_rate);
-	return total_clk_rate;
-}
-
-static struct clk *sde_rotator_get_clk(struct sde_rot_mgr *mgr, u32 clk_idx)
-{
-	if (clk_idx >= mgr->num_rot_clk) {
-		SDEROT_ERR("Invalid clk index:%u", clk_idx);
-		return NULL;
-	}
-
-	return mgr->rot_clk[clk_idx].clk;
-}
-
-static void sde_rotator_set_clk_rate(struct sde_rot_mgr *mgr,
-		unsigned long rate, u32 clk_idx)
-{
-	unsigned long clk_rate;
-	struct clk *clk = sde_rotator_get_clk(mgr, clk_idx);
-	int ret;
-
-	if (clk) {
-		clk_rate = clk_round_rate(clk, rate);
-		if (IS_ERR_VALUE(clk_rate)) {
-			SDEROT_ERR("unable to round rate err=%ld\n", clk_rate);
-		} else {
-			ret = clk_set_rate(clk, clk_rate);
-			if (ret < 0)
-				SDEROT_ERR("clk_set_rate failed, err:%d\n",
-						ret);
-			else
-				SDEROT_DBG("rotator clk rate=%lu\n", clk_rate);
-		}
-	} else {
-		SDEROT_ERR("rotator clk not setup properly\n");
-	}
-}
-
-/*
- * Update clock according to all open files on rotator block.
- */
-static int sde_rotator_update_clk(struct sde_rot_mgr *mgr)
-{
-	struct sde_rot_file_private *priv;
-	unsigned long clk_rate, total_clk_rate;
-
-	total_clk_rate = 0;
-	list_for_each_entry(priv, &mgr->file_list, list) {
-		clk_rate = sde_rotator_clk_rate_calc(mgr, priv);
-		total_clk_rate += clk_rate;
-	}
-
-	SDEROT_DBG("core_clk %lu\n", total_clk_rate);
-	ATRACE_INT("core_clk", total_clk_rate);
-	sde_rotator_set_clk_rate(mgr, total_clk_rate, SDE_ROTATOR_CLK_MDSS_ROT);
-
-	return 0;
-}
-
-static int sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
-{
-	int ret = 0;
-
-	if (mgr->regulator_enable == on) {
-		SDEROT_DBG("Regulators already in selected mode on=%d\n", on);
-		return 0;
-	}
-
-	SDEROT_EVTLOG(on);
-	SDEROT_DBG("%s: rotator regulators\n", on ? "Enable" : "Disable");
-
-	if (on) {
-		mgr->minimum_bw_vote = mgr->enable_bw_vote;
-		sde_rotator_update_perf(mgr);
-	}
-
-	if (mgr->ops_hw_pre_pmevent)
-		mgr->ops_hw_pre_pmevent(mgr, on);
-
-	if (!sde_rot_mgr_pd_enabled(mgr))
-		ret = sde_rot_enable_vreg(mgr->module_power.vreg_config,
-			mgr->module_power.num_vreg, on);
-	if (ret) {
-		pr_err("rotator regulator failed to %s ret:%d client:%d\n",
-		      on ? "enable" : "disable", ret,
-				      sde_rot_mgr_pd_enabled(mgr));
-		return ret;
-	}
-
-	if (mgr->ops_hw_post_pmevent)
-		mgr->ops_hw_post_pmevent(mgr, on);
-
-	if (!on) {
-		mgr->minimum_bw_vote = 0;
-		sde_rotator_update_perf(mgr);
-	}
-
-	mgr->regulator_enable = on;
-	return 0;
-}
-
-static int sde_rotator_enable_clk(struct sde_rot_mgr *mgr, int clk_idx)
-{
-	struct clk *clk;
-	int ret = 0;
-
-	clk = sde_rotator_get_clk(mgr, clk_idx);
-	if (clk) {
-		ret = clk_prepare_enable(clk);
-		if (ret)
-			SDEROT_ERR("enable failed clk_idx %d\n", clk_idx);
-	}
-
-	return ret;
-}
-
-static void sde_rotator_disable_clk(struct sde_rot_mgr *mgr, int clk_idx)
-{
-	struct clk *clk;
-
-	clk = sde_rotator_get_clk(mgr, clk_idx);
-	if (clk)
-		clk_disable_unprepare(clk);
-}
-
-int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable)
-{
-	int ret = 0;
-	int changed = 0;
-
-	if (enable) {
-		if (mgr->rot_enable_clk_cnt == 0)
-			changed++;
-		mgr->rot_enable_clk_cnt++;
-	} else {
-		if (mgr->rot_enable_clk_cnt) {
-			mgr->rot_enable_clk_cnt--;
-			if (mgr->rot_enable_clk_cnt == 0)
-				changed++;
-		} else {
-			SDEROT_ERR("Can not be turned off\n");
-		}
-	}
-
-	if (changed) {
-		SDEROT_EVTLOG(enable);
-		SDEROT_DBG("Rotator clk %s\n", enable ? "enable" : "disable");
-
-		if (enable) {
-			ret = sde_rotator_enable_clk(mgr,
-						SDE_ROTATOR_CLK_MNOC_AHB);
-			if (ret)
-				goto error_mnoc_ahb;
-			ret = sde_rotator_enable_clk(mgr,
-						SDE_ROTATOR_CLK_GCC_AHB);
-			if (ret)
-				goto error_gcc_ahb;
-			ret = sde_rotator_enable_clk(mgr,
-						SDE_ROTATOR_CLK_GCC_AXI);
-			if (ret)
-				goto error_gcc_axi;
-			ret = sde_rotator_enable_clk(mgr,
-						SDE_ROTATOR_CLK_MDSS_AHB);
-			if (ret)
-				goto error_mdss_ahb;
-			ret = sde_rotator_enable_clk(mgr,
-						SDE_ROTATOR_CLK_MDSS_AXI);
-			if (ret)
-				goto error_mdss_axi;
-			ret = sde_rotator_enable_clk(mgr,
-						SDE_ROTATOR_CLK_MDSS_ROT);
-			if (ret)
-				goto error_mdss_rot;
-			ret = sde_rotator_enable_clk(mgr,
-						SDE_ROTATOR_CLK_MDSS_ROT_SUB);
-			if (ret)
-				goto error_rot_sub;
-
-			/* Active+Sleep */
-			msm_bus_scale_client_update_context(
-				mgr->data_bus.bus_hdl, false,
-				mgr->data_bus.curr_bw_uc_idx);
-			trace_rot_bw_ao_as_context(0);
-		} else {
-			sde_rotator_disable_clk(mgr,
-					SDE_ROTATOR_CLK_MDSS_ROT_SUB);
-			sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_ROT);
-			sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AXI);
-			sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AHB);
-			sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_GCC_AXI);
-			sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_GCC_AHB);
-			sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MNOC_AHB);
-
-			/* Active Only */
-			msm_bus_scale_client_update_context(
-				mgr->data_bus.bus_hdl, true,
-				mgr->data_bus.curr_bw_uc_idx);
-			trace_rot_bw_ao_as_context(1);
-		}
-	}
-
-	return ret;
-error_rot_sub:
-	sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_ROT);
-error_mdss_rot:
-	sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AXI);
-error_mdss_axi:
-	sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AHB);
-error_mdss_ahb:
-	sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_GCC_AXI);
-error_gcc_axi:
-	sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_GCC_AHB);
-error_gcc_ahb:
-	sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MNOC_AHB);
-error_mnoc_ahb:
-	return ret;
-}
-
-/* sde_rotator_resource_ctrl - control state of power resource
- * @mgr: Pointer to rotator manager
- * @enable: 1 to enable; 0 to disable
- *
- * This function returns 1 if resource is already in the requested state,
- * return 0 if the state is changed successfully, or negative error code
- * if not successful.
- */
-static int sde_rotator_resource_ctrl(struct sde_rot_mgr *mgr, int enable)
-{
-	int ret;
-
-	if (enable) {
-		mgr->res_ref_cnt++;
-		ret = pm_runtime_get_sync(&mgr->pdev->dev);
-	} else {
-		mgr->res_ref_cnt--;
-		ret = pm_runtime_put_sync(&mgr->pdev->dev);
-	}
-
-	SDEROT_DBG("%s: res_cnt=%d pm=%d enable=%d\n",
-		__func__, mgr->res_ref_cnt, ret, enable);
-	ATRACE_INT("res_cnt", mgr->res_ref_cnt);
-
-	return ret;
-}
-
-/* caller is expected to hold perf->work_dis_lock lock */
-static bool sde_rotator_is_work_pending(struct sde_rot_mgr *mgr,
-	struct sde_rot_perf *perf)
-{
-	int i;
-
-	for (i = 0; i < mgr->queue_count; i++) {
-		if (perf->work_distribution[i]) {
-			SDEROT_DBG("Work is still scheduled to complete\n");
-			return true;
-		}
-	}
-	return false;
-}
-
-static void sde_rotator_clear_fence(struct sde_rot_entry *entry)
-{
-	if (entry->input_fence) {
-		SDEROT_EVTLOG(entry->input_fence, 1111);
-		SDEROT_DBG("sys_fence_put i:%pK\n", entry->input_fence);
-		sde_rotator_put_sync_fence(entry->input_fence);
-		entry->input_fence = NULL;
-	}
-
-	/* fence failed to copy to user space */
-	if (entry->output_fence) {
-		if (entry->fenceq && entry->fenceq->timeline)
-			sde_rotator_resync_timeline(entry->fenceq->timeline);
-
-		SDEROT_EVTLOG(entry->output_fence, 2222);
-		SDEROT_DBG("sys_fence_put o:%pK\n", entry->output_fence);
-		sde_rotator_put_sync_fence(entry->output_fence);
-		entry->output_fence = NULL;
-	}
-}
-
-static int sde_rotator_signal_output(struct sde_rot_entry *entry)
-{
-	struct sde_rot_timeline *rot_timeline;
-
-	if (!entry->fenceq)
-		return -EINVAL;
-
-	rot_timeline = entry->fenceq->timeline;
-
-	if (entry->output_signaled) {
-		SDEROT_DBG("output already signaled\n");
-		return 0;
-	}
-
-	SDEROT_DBG("signal fence s:%d.%d\n", entry->item.session_id,
-			entry->item.sequence_id);
-
-	sde_rotator_inc_timeline(rot_timeline, 1);
-
-	entry->output_signaled = true;
-
-	return 0;
-}
-
-static int sde_rotator_import_buffer(struct sde_layer_buffer *buffer,
-	struct sde_mdp_data *data, u32 flags, struct device *dev, bool input)
-{
-	int i, ret = 0;
-	struct sde_fb_data planes[SDE_ROT_MAX_PLANES];
-	int dir = DMA_TO_DEVICE;
-
-	if (!input)
-		dir = DMA_FROM_DEVICE;
-
-	if (buffer->plane_count > SDE_ROT_MAX_PLANES) {
-		SDEROT_ERR("buffer plane_count exceeds MAX_PLANE limit:%d\n",
-				buffer->plane_count);
-		return -EINVAL;
-	}
-
-	data->sbuf = buffer->sbuf;
-	data->scid = buffer->scid;
-	data->writeback = buffer->writeback;
-
-	memset(planes, 0, sizeof(planes));
-
-	for (i = 0; i < buffer->plane_count; i++) {
-		planes[i].memory_id = buffer->planes[i].fd;
-		planes[i].offset = buffer->planes[i].offset;
-		planes[i].buffer = buffer->planes[i].buffer;
-		planes[i].addr = buffer->planes[i].addr;
-		planes[i].len = buffer->planes[i].len;
-	}
-
-	ret =  sde_mdp_data_get_and_validate_size(data, planes,
-			buffer->plane_count, flags, dev, true, dir, buffer);
-
-	return ret;
-}
-
-static int sde_rotator_secure_session_ctrl(bool enable)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	uint32_t *sid_info = NULL;
-	struct scm_desc desc = {0};
-	unsigned int resp = 0;
-	int ret = 0;
-
-	if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map)) {
-
-		sid_info = kzalloc(sizeof(uint32_t), GFP_KERNEL);
-		if (!sid_info)
-			return -ENOMEM;
-
-		sid_info[0] = mdata->sde_smmu[SDE_IOMMU_DOMAIN_ROT_SECURE].sid;
-		desc.arginfo = SCM_ARGS(4, SCM_VAL, SCM_RW, SCM_VAL, SCM_VAL);
-		desc.args[0] = SDE_ROTATOR_DEVICE;
-		desc.args[1] = SCM_BUFFER_PHYS(sid_info);
-		desc.args[2] = sizeof(uint32_t);
-
-		if (!mdata->sec_cam_en && enable) {
-			/*
-			 * Enable secure camera operation
-			 * Send SCM call to hypervisor to switch the
-			 * secure_vmid to secure context
-			 */
-			desc.args[3] = VMID_CP_CAMERA_PREVIEW;
-
-			mdata->sec_cam_en = 1;
-			sde_smmu_secure_ctrl(0);
-
-			dmac_flush_range(sid_info, sid_info + 1);
-			ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
-					MEM_PROTECT_SD_CTRL_SWITCH), &desc);
-			resp = desc.ret[0];
-			if (ret) {
-				SDEROT_ERR("scm_call(1) ret=%d, resp=%x\n",
-					ret, resp);
-				/* failure, attach smmu */
-				mdata->sec_cam_en = 0;
-				sde_smmu_secure_ctrl(1);
-				ret = -EINVAL;
-				goto end;
-			}
-
-			SDEROT_DBG(
-			  "scm(1) sid0x%x dev0x%llx vmid0x%llx ret%d resp%x\n",
-				sid_info[0], desc.args[0], desc.args[3],
-				ret, resp);
-			SDEROT_EVTLOG(1, sid_info, sid_info[0],
-					desc.args[0], desc.args[3],
-					ret, resp);
-		} else if (mdata->sec_cam_en && !enable) {
-			/*
-			 * Disable secure camera operation
-			 * Send SCM call to hypervisor to switch the
-			 * secure_vmid to non-secure context
-			 */
-			desc.args[3] = VMID_CP_PIXEL;
-			mdata->sec_cam_en = 0;
-
-			dmac_flush_range(sid_info, sid_info + 1);
-			ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
-				MEM_PROTECT_SD_CTRL_SWITCH), &desc);
-			resp = desc.ret[0];
-
-			SDEROT_DBG(
-			  "scm(0) sid0x%x dev0x%llx vmid0x%llx ret%d resp%d\n",
-				sid_info[0], desc.args[0], desc.args[3],
-				ret, resp);
-
-			/* force smmu to reattach */
-			sde_smmu_secure_ctrl(1);
-
-			SDEROT_EVTLOG(0, sid_info, sid_info[0],
-					desc.args[0], desc.args[3],
-					ret, resp);
-		}
-	} else {
-		return 0;
-	}
-
-end:
-	kfree(sid_info);
-
-	if (ret)
-		return ret;
-
-	return resp;
-}
-
-
-static int sde_rotator_map_and_check_data(struct sde_rot_entry *entry)
-{
-	int ret;
-	struct sde_layer_buffer *input;
-	struct sde_layer_buffer *output;
-	struct sde_mdp_format_params *in_fmt, *out_fmt;
-	struct sde_mdp_plane_sizes ps;
-	bool rotation;
-	bool secure;
-
-	input = &entry->item.input;
-	output = &entry->item.output;
-
-	rotation = (entry->item.flags &  SDE_ROTATION_90) ? true : false;
-
-	ret = sde_smmu_ctrl(1);
-	if (ret < 0)
-		return ret;
-
-	secure = (entry->item.flags & SDE_ROTATION_SECURE_CAMERA) ?
-			true : false;
-	ret = sde_rotator_secure_session_ctrl(secure);
-	if (ret) {
-		SDEROT_ERR("failed secure session enabling/disabling %d\n",
-			ret);
-		goto end;
-	}
-
-	in_fmt = sde_get_format_params(input->format);
-	if (!in_fmt) {
-		SDEROT_ERR("invalid input format:%d\n", input->format);
-		ret = -EINVAL;
-		goto end;
-	}
-
-	out_fmt = sde_get_format_params(output->format);
-	if (!out_fmt) {
-		SDEROT_ERR("invalid output format:%d\n", output->format);
-		ret = -EINVAL;
-		goto end;
-	}
-
-	/* if error during map, the caller will release the data */
-	ret = sde_mdp_data_map(&entry->src_buf, true, DMA_TO_DEVICE);
-	if (ret) {
-		SDEROT_ERR("source buffer mapping failed ret:%d\n", ret);
-		goto end;
-	}
-
-	ret = sde_mdp_data_map(&entry->dst_buf, true, DMA_FROM_DEVICE);
-	if (ret) {
-		SDEROT_ERR("destination buffer mapping failed ret:%d\n", ret);
-		goto end;
-	}
-
-	ret = sde_mdp_get_plane_sizes(
-			in_fmt, input->width, input->height, &ps, 0, rotation);
-	if (ret) {
-		SDEROT_ERR("fail to get input plane size ret=%d\n", ret);
-		goto end;
-	}
-
-	ret = sde_mdp_data_check(&entry->src_buf, &ps, in_fmt);
-	if (ret) {
-		SDEROT_ERR("fail to check input data ret=%d\n", ret);
-		goto end;
-	}
-
-	ret = sde_mdp_get_plane_sizes(out_fmt, output->width, output->height,
-			&ps, 0, rotation);
-	if (ret) {
-		SDEROT_ERR("fail to get output plane size ret=%d\n", ret);
-		goto end;
-	}
-
-	ret = sde_mdp_data_check(&entry->dst_buf, &ps, out_fmt);
-	if (ret) {
-		SDEROT_ERR("fail to check output data ret=%d\n", ret);
-		goto end;
-	}
-
-end:
-	sde_smmu_ctrl(0);
-
-	return ret;
-}
-
-static struct sde_rot_perf *__sde_rotator_find_session(
-	struct sde_rot_file_private *private,
-	u32 session_id)
-{
-	struct sde_rot_perf *perf, *perf_next;
-	bool found = false;
-
-	list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) {
-		if (perf->config.session_id == session_id) {
-			found = true;
-			break;
-		}
-	}
-	if (!found)
-		perf = NULL;
-	return perf;
-}
-
-static struct sde_rot_perf *sde_rotator_find_session(
-	struct sde_rot_file_private *private,
-	u32 session_id)
-{
-	struct sde_rot_perf *perf;
-
-	perf = __sde_rotator_find_session(private, session_id);
-	return perf;
-}
-
-static void sde_rotator_release_data(struct sde_rot_entry *entry)
-{
-	SDEROT_EVTLOG(entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr);
-	sde_mdp_data_free(&entry->src_buf, true, DMA_TO_DEVICE);
-	sde_mdp_data_free(&entry->dst_buf, true, DMA_FROM_DEVICE);
-}
-
-static int sde_rotator_import_data(struct sde_rot_mgr *mgr,
-	struct sde_rot_entry *entry)
-{
-	int ret;
-	struct sde_layer_buffer *input;
-	struct sde_layer_buffer *output;
-	u32 flag = 0;
-
-	input = &entry->item.input;
-	output = &entry->item.output;
-
-	if (entry->item.flags & SDE_ROTATION_SECURE)
-		flag = SDE_SECURE_OVERLAY_SESSION;
-
-	if (entry->item.flags & SDE_ROTATION_EXT_DMA_BUF)
-		flag |= SDE_ROT_EXT_DMA_BUF;
-
-	if (entry->item.flags & SDE_ROTATION_EXT_IOVA)
-		flag |= SDE_ROT_EXT_IOVA;
-
-	if (entry->item.flags & SDE_ROTATION_SECURE_CAMERA)
-		flag |= SDE_SECURE_CAMERA_SESSION;
-
-	ret = sde_rotator_import_buffer(input, &entry->src_buf, flag,
-				&mgr->pdev->dev, true);
-	if (ret) {
-		SDEROT_ERR("fail to import input buffer ret=%d\n", ret);
-		return ret;
-	}
-
-	/*
-	 * driver assumes output buffer is ready to be written
-	 * immediately
-	 */
-	ret = sde_rotator_import_buffer(output, &entry->dst_buf, flag,
-				&mgr->pdev->dev, false);
-	if (ret) {
-		SDEROT_ERR("fail to import output buffer ret=%d\n", ret);
-		return ret;
-	}
-
-	return ret;
-}
-
-/*
- * sde_rotator_require_reconfiguration - check if reconfiguration is required
- * @mgr: Pointer to rotator manager
- * @hw: Pointer to rotator hw resource
- * @entry: Pointer to next rotation entry
- *
- * Parameters are validated by caller.
- */
-static int sde_rotator_require_reconfiguration(struct sde_rot_mgr *mgr,
-		struct sde_rot_hw_resource *hw, struct sde_rot_entry *entry)
-{
-	/* OT setting change may impact queued entries */
-	if (entry->perf && (entry->perf->rdot_limit != mgr->rdot_limit ||
-			entry->perf->wrot_limit != mgr->wrot_limit))
-		return true;
-
-	/* sbuf mode is exclusive and may impact queued entries */
-	if (!mgr->sbuf_ctx && entry->perf && entry->perf->config.output.sbuf)
-		return true;
-
-	return false;
-}
-
-/*
- * sde_rotator_is_hw_idle - check if hw block is not processing request
- * @mgr: Pointer to rotator manager
- * @hw: Pointer to rotator hw resource
- *
- * Parameters are validated by caller.
- */
-static int sde_rotator_is_hw_idle(struct sde_rot_mgr *mgr,
-		struct sde_rot_hw_resource *hw)
-{
-	int i;
-
-	/*
-	 * Wait until all queues are idle in order to update global
-	 * setting such as VBIF QoS.  This check can be relaxed if global
-	 * settings can be updated individually by entries already
-	 * queued in hw queue, i.e. REGDMA can update VBIF directly.
-	 */
-	for (i = 0; i < mgr->queue_count; i++) {
-		struct sde_rot_hw_resource *hw_res = mgr->commitq[i].hw;
-
-		if (hw_res && atomic_read(&hw_res->num_active))
-			return false;
-	}
-
-	return true;
-}
-
-/*
- * sde_rotator_is_hw_available - check if hw is available for the given entry
- * @mgr: Pointer to rotator manager
- * @hw: Pointer to rotator hw resource
- * @entry: Pointer to rotation entry
- *
- * Parameters are validated by caller.
- */
-static int sde_rotator_is_hw_available(struct sde_rot_mgr *mgr,
-		struct sde_rot_hw_resource *hw, struct sde_rot_entry *entry)
-{
-	/*
-	 * Wait until hw is idle if reconfiguration is required; otherwise,
-	 * wait until free queue entry is available
-	 */
-	if (sde_rotator_require_reconfiguration(mgr, hw, entry)) {
-		SDEROT_DBG(
-			"wait4idle active=%d pending=%d rdot:%u/%u wrot:%u/%u s:%d.%d\n",
-				atomic_read(&hw->num_active), hw->pending_count,
-				mgr->rdot_limit, entry->perf->rdot_limit,
-				mgr->wrot_limit, entry->perf->wrot_limit,
-				entry->item.session_id,
-				entry->item.sequence_id);
-		return sde_rotator_is_hw_idle(mgr, hw);
-	} else if (mgr->sbuf_ctx && mgr->sbuf_ctx != entry->private) {
-		SDEROT_DBG("wait until sbuf mode is off\n");
-		return false;
-	} else {
-		return (atomic_read(&hw->num_active) < hw->max_active);
-	}
-}
-
-/*
- * sde_rotator_req_wait_for_idle - wait for hw for a request to be idle
- * @mgr: Pointer to rotator manager
- * @req: Pointer to rotation request
- */
-static void sde_rotator_req_wait_for_idle(struct sde_rot_mgr *mgr,
-		struct sde_rot_entry_container *req)
-{
-	struct sde_rot_queue *queue;
-	struct sde_rot_hw_resource *hw;
-	int i, ret;
-
-	if (!mgr || !req) {
-		SDEROT_ERR("invalid params\n");
-		return;
-	}
-
-	for (i = 0; i < req->count; i++) {
-		queue = req->entries[i].commitq;
-		if (!queue || !queue->hw)
-			continue;
-		hw = queue->hw;
-		while (atomic_read(&hw->num_active) > 1) {
-			sde_rot_mgr_unlock(mgr);
-			ret = wait_event_timeout(hw->wait_queue,
-				atomic_read(&hw->num_active) <= 1,
-				msecs_to_jiffies(mgr->hwacquire_timeout));
-			sde_rot_mgr_lock(mgr);
-			if (!ret) {
-				SDEROT_ERR(
-					"timeout waiting for hw idle, a:%d\n",
-					atomic_read(&hw->num_active));
-				return;
-			}
-		}
-	}
-}
-
-/*
- * sde_rotator_get_hw_resource - block waiting for hw availability or timeout
- * @queue: Pointer to rotator queue
- * @entry: Pointer to rotation entry
- */
-static struct sde_rot_hw_resource *sde_rotator_get_hw_resource(
-	struct sde_rot_queue *queue, struct sde_rot_entry *entry)
-{
-	struct sde_rot_hw_resource *hw;
-	struct sde_rot_mgr *mgr;
-	int ret;
-
-	if (!queue || !entry || !queue->hw) {
-		SDEROT_ERR("null parameters\n");
-		return NULL;
-	}
-
-	hw = queue->hw;
-	mgr = entry->private->mgr;
-
-	WARN_ON(atomic_read(&hw->num_active) > hw->max_active);
-	while (!sde_rotator_is_hw_available(mgr, hw, entry)) {
-		sde_rot_mgr_unlock(mgr);
-		ret = wait_event_timeout(hw->wait_queue,
-			sde_rotator_is_hw_available(mgr, hw, entry),
-			msecs_to_jiffies(mgr->hwacquire_timeout));
-		sde_rot_mgr_lock(mgr);
-		if (!ret) {
-			SDEROT_ERR(
-				"timeout waiting for hw resource, a:%d p:%d\n",
-				atomic_read(&hw->num_active),
-				hw->pending_count);
-			SDEROT_EVTLOG(entry->item.session_id,
-					entry->item.sequence_id,
-					atomic_read(&hw->num_active),
-					hw->pending_count,
-					SDE_ROT_EVTLOG_ERROR);
-			return NULL;
-		}
-	}
-	atomic_inc(&hw->num_active);
-	SDEROT_EVTLOG(atomic_read(&hw->num_active), hw->pending_count,
-			mgr->rdot_limit, entry->perf->rdot_limit,
-			mgr->wrot_limit, entry->perf->wrot_limit,
-			entry->item.session_id, entry->item.sequence_id);
-	SDEROT_DBG("active=%d pending=%d rdot=%u/%u wrot=%u/%u s:%d.%d\n",
-			atomic_read(&hw->num_active), hw->pending_count,
-			mgr->rdot_limit, entry->perf->rdot_limit,
-			mgr->wrot_limit, entry->perf->wrot_limit,
-			entry->item.session_id, entry->item.sequence_id);
-	mgr->rdot_limit = entry->perf->rdot_limit;
-	mgr->wrot_limit = entry->perf->wrot_limit;
-
-	if (!mgr->sbuf_ctx && entry->perf->config.output.sbuf) {
-		SDEROT_DBG("acquire sbuf s:%d.%d\n", entry->item.session_id,
-				entry->item.sequence_id);
-		SDEROT_EVTLOG(entry->item.session_id, entry->item.sequence_id);
-		mgr->sbuf_ctx = entry->private;
-	}
-
-	return hw;
-}
-
-/*
- * sde_rotator_put_hw_resource - return hw resource and wake up waiting clients
- * @queue: Pointer to rotator queue
- * @entry: Pointer to rotation entry
- * @hw: Pointer to hw resource to be returned
- */
-static void sde_rotator_put_hw_resource(struct sde_rot_queue *queue,
-		struct sde_rot_entry *entry, struct sde_rot_hw_resource *hw)
-{
-	struct sde_rot_mgr *mgr;
-	int i;
-
-	if (!queue || !entry || !hw) {
-		SDEROT_ERR("null parameters\n");
-		return;
-	}
-
-	mgr = entry->private->mgr;
-
-	WARN_ON(atomic_read(&hw->num_active) < 1);
-	if (!atomic_add_unless(&hw->num_active, -1, 0))
-		SDEROT_ERR("underflow active=%d pending=%d s:%d.%d\n",
-			atomic_read(&hw->num_active), hw->pending_count,
-			entry->item.session_id, entry->item.sequence_id);
-	/*
-	 * Wake up all queues in case any entry is waiting for hw idle,
-	 * in order to update global settings, such as VBIF QoS.
-	 * This can be relaxed to the given hw resource if global
-	 * settings can be updated individually by entries already
-	 * queued in hw queue.
-	 */
-	for (i = 0; i < mgr->queue_count; i++) {
-		struct sde_rot_hw_resource *hw_res = mgr->commitq[i].hw;
-
-		if (hw_res)
-			wake_up(&hw_res->wait_queue);
-	}
-	SDEROT_EVTLOG(atomic_read(&hw->num_active), hw->pending_count,
-			entry->item.session_id, entry->item.sequence_id);
-	SDEROT_DBG("active=%d pending=%d s:%d.%d\n",
-			atomic_read(&hw->num_active), hw->pending_count,
-			entry->item.session_id, entry->item.sequence_id);
-}
-
-/*
- * caller will need to call sde_rotator_deinit_queue when
- * the function returns error
- */
-static int sde_rotator_init_queue(struct sde_rot_mgr *mgr)
-{
-	int i, size, ret = 0;
-	char name[32];
-	struct sched_param param = { .sched_priority = 5 };
-
-	size = sizeof(struct sde_rot_queue) * mgr->queue_count;
-	mgr->commitq = devm_kzalloc(mgr->device, size, GFP_KERNEL);
-	if (!mgr->commitq)
-		return -ENOMEM;
-
-	for (i = 0; i < mgr->queue_count; i++) {
-		snprintf(name, sizeof(name), "rot_commitq_%d_%d",
-				mgr->device->id, i);
-		SDEROT_DBG("work queue name=%s\n", name);
-		kthread_init_worker(&mgr->commitq[i].rot_kw);
-		mgr->commitq[i].rot_thread = kthread_run(kthread_worker_fn,
-				&mgr->commitq[i].rot_kw, name);
-		if (IS_ERR(mgr->commitq[i].rot_thread)) {
-			ret = -EPERM;
-			mgr->commitq[i].rot_thread = NULL;
-			break;
-		}
-
-		ret = sched_setscheduler(mgr->commitq[i].rot_thread,
-			SCHED_FIFO, &param);
-		if (ret) {
-			SDEROT_ERR(
-				"failed to set kthread priority for commitq %d\n",
-				ret);
-			break;
-		}
-
-		/* timeline not used */
-		mgr->commitq[i].timeline = NULL;
-	}
-
-	size = sizeof(struct sde_rot_queue) * mgr->queue_count;
-	mgr->doneq = devm_kzalloc(mgr->device, size, GFP_KERNEL);
-	if (!mgr->doneq)
-		return -ENOMEM;
-
-	for (i = 0; i < mgr->queue_count; i++) {
-		snprintf(name, sizeof(name), "rot_doneq_%d_%d",
-				mgr->device->id, i);
-		SDEROT_DBG("work queue name=%s\n", name);
-		kthread_init_worker(&mgr->doneq[i].rot_kw);
-		mgr->doneq[i].rot_thread = kthread_run(kthread_worker_fn,
-				&mgr->doneq[i].rot_kw, name);
-		if (IS_ERR(mgr->doneq[i].rot_thread)) {
-			ret = -EPERM;
-			mgr->doneq[i].rot_thread = NULL;
-			break;
-		}
-
-		ret = sched_setscheduler(mgr->doneq[i].rot_thread,
-			SCHED_FIFO, &param);
-		if (ret) {
-			SDEROT_ERR(
-				"failed to set kthread priority for doneq %d\n",
-				ret);
-			break;
-		}
-
-		/* timeline not used */
-		mgr->doneq[i].timeline = NULL;
-	}
-	return ret;
-}
-
-static void sde_rotator_deinit_queue(struct sde_rot_mgr *mgr)
-{
-	int i;
-
-	if (mgr->commitq) {
-		for (i = 0; i < mgr->queue_count; i++) {
-			if (mgr->commitq[i].rot_thread) {
-				kthread_flush_worker(&mgr->commitq[i].rot_kw);
-				kthread_stop(mgr->commitq[i].rot_thread);
-			}
-		}
-		devm_kfree(mgr->device, mgr->commitq);
-		mgr->commitq = NULL;
-	}
-	if (mgr->doneq) {
-		for (i = 0; i < mgr->queue_count; i++) {
-			if (mgr->doneq[i].rot_thread) {
-				kthread_flush_worker(&mgr->doneq[i].rot_kw);
-				kthread_stop(mgr->doneq[i].rot_thread);
-			}
-		}
-		devm_kfree(mgr->device, mgr->doneq);
-		mgr->doneq = NULL;
-	}
-	mgr->queue_count = 0;
-}
-
-/*
- * sde_rotator_assign_queue() - Function assign rotation work onto hw
- * @mgr:	Rotator manager.
- * @entry:	Contains details on rotator work item being requested
- * @private:	Private struct used for access rot session performance struct
- *
- * This Function allocates hw required to complete rotation work item
- * requested.
- *
- * Caller is responsible for calling cleanup function if error is returned
- */
-static int sde_rotator_assign_queue(struct sde_rot_mgr *mgr,
-	struct sde_rot_entry *entry,
-	struct sde_rot_file_private *private)
-{
-	struct sde_rot_perf *perf;
-	struct sde_rot_queue *queue;
-	struct sde_rot_hw_resource *hw;
-	struct sde_rotation_item *item = &entry->item;
-	u32 wb_idx = item->wb_idx;
-	u32 pipe_idx = item->pipe_idx;
-	int ret = 0;
-
-	if (wb_idx >= mgr->queue_count) {
-		/* assign to the lowest priority queue */
-		wb_idx = mgr->queue_count - 1;
-	}
-
-	entry->doneq = &mgr->doneq[wb_idx];
-	entry->commitq = &mgr->commitq[wb_idx];
-	queue = mgr->commitq;
-
-	if (!queue->hw) {
-		hw = mgr->ops_hw_alloc(mgr, pipe_idx, wb_idx);
-		if (IS_ERR_OR_NULL(hw)) {
-			SDEROT_ERR("fail to allocate hw\n");
-			ret = PTR_ERR(hw);
-		} else {
-			queue->hw = hw;
-		}
-	}
-
-	if (queue->hw) {
-		entry->commitq = queue;
-		queue->hw->pending_count++;
-	}
-
-	perf = sde_rotator_find_session(private, item->session_id);
-	if (!perf) {
-		SDEROT_ERR(
-			"Could not find session based on rotation work item\n");
-		return -EINVAL;
-	}
-
-	entry->perf = perf;
-	perf->last_wb_idx = wb_idx;
-
-	return ret;
-}
-
-static void sde_rotator_unassign_queue(struct sde_rot_mgr *mgr,
-	struct sde_rot_entry *entry)
-{
-	struct sde_rot_queue *queue = entry->commitq;
-
-	if (!queue)
-		return;
-
-	entry->fenceq = NULL;
-	entry->commitq = NULL;
-	entry->doneq = NULL;
-
-	if (!queue->hw) {
-		SDEROT_ERR("entry assigned a queue with no hw\n");
-		return;
-	}
-
-	queue->hw->pending_count--;
-	if (queue->hw->pending_count == 0) {
-		mgr->ops_hw_free(mgr, queue->hw);
-		queue->hw = NULL;
-	}
-}
-
-void sde_rotator_queue_request(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rot_entry_container *req)
-{
-	struct sde_rot_entry *entry;
-	struct sde_rot_queue *queue;
-	u32 wb_idx;
-	int i;
-
-	if (!mgr || !private || !req) {
-		SDEROT_ERR("null parameters\n");
-		return;
-	}
-
-	if (!req->entries) {
-		SDEROT_DBG("no entries in request\n");
-		return;
-	}
-
-	for (i = 0; i < req->count; i++) {
-		entry = req->entries + i;
-		queue = entry->commitq;
-		wb_idx = queue->hw->wb_id;
-		entry->perf->work_distribution[wb_idx]++;
-		entry->work_assigned = true;
-	}
-
-	for (i = 0; i < req->count; i++) {
-		entry = req->entries + i;
-		queue = entry->commitq;
-		entry->output_fence = NULL;
-
-		if (entry->item.ts)
-			entry->item.ts[SDE_ROTATOR_TS_QUEUE] = ktime_get();
-		kthread_queue_work(&queue->rot_kw, &entry->commit_work);
-	}
-}
-
-static u32 sde_rotator_calc_buf_bw(struct sde_mdp_format_params *fmt,
-		uint32_t width, uint32_t height, uint32_t frame_rate)
-{
-	u32 bw;
-
-	bw = width * height * frame_rate;
-
-	if (sde_mdp_is_tp10_format(fmt))
-		bw *= 2;
-	else if (sde_mdp_is_p010_format(fmt))
-		bw *= 3;
-	else if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
-		bw = (bw * 3) / 2;
-	else
-		bw *= fmt->bpp;
-	SDEROT_EVTLOG(bw, width, height, frame_rate, fmt->format);
-	return bw;
-}
-
-static int sde_rotator_find_max_fps(struct sde_rot_mgr *mgr)
-{
-	struct sde_rot_file_private *priv;
-	struct sde_rot_perf *perf;
-	int max_fps = 0;
-
-	list_for_each_entry(priv, &mgr->file_list, list) {
-		list_for_each_entry(perf, &priv->perf_list, list) {
-			if (perf->config.frame_rate > max_fps)
-				max_fps = perf->config.frame_rate;
-		}
-	}
-
-	SDEROT_DBG("Max fps:%d\n", max_fps);
-	return max_fps;
-}
-
-static int sde_rotator_calc_perf(struct sde_rot_mgr *mgr,
-		struct sde_rot_perf *perf)
-{
-	struct sde_rotation_config *config = &perf->config;
-	u32 read_bw, write_bw;
-	struct sde_mdp_format_params *in_fmt, *out_fmt;
-	struct sde_rotator_device *rot_dev;
-	int max_fps;
-
-	rot_dev = platform_get_drvdata(mgr->pdev);
-
-	in_fmt = sde_get_format_params(config->input.format);
-	if (!in_fmt) {
-		SDEROT_ERR("invalid input format %d\n", config->input.format);
-		return -EINVAL;
-	}
-	out_fmt = sde_get_format_params(config->output.format);
-	if (!out_fmt) {
-		SDEROT_ERR("invalid output format %d\n", config->output.format);
-		return -EINVAL;
-	}
-
-	/*
-	 * rotator processes 4 pixels per clock, but the actual throughtput
-	 * is 3.6. We also need to take into account for overhead time. Final
-	 * equation is:
-	 *        W x H / throughput / (1/fps - overhead) * fudge_factor
-	 */
-	max_fps = sde_rotator_find_max_fps(mgr);
-	perf->clk_rate = config->input.width * config->input.height;
-	perf->clk_rate = (perf->clk_rate * mgr->pixel_per_clk.denom) /
-			mgr->pixel_per_clk.numer;
-	perf->clk_rate *= max_fps;
-	perf->clk_rate = (perf->clk_rate * mgr->fudge_factor.numer) /
-			mgr->fudge_factor.denom;
-	perf->clk_rate *= mgr->overhead.denom;
-
-	/*
-	 * check for override overhead default value
-	 */
-	if (rot_dev->min_overhead_us > (mgr->overhead.numer * 100))
-		perf->clk_rate = DIV_ROUND_UP_ULL(perf->clk_rate,
-				(mgr->overhead.denom - max_fps *
-				(rot_dev->min_overhead_us / 100)));
-	else
-		perf->clk_rate = DIV_ROUND_UP_ULL(perf->clk_rate,
-				(mgr->overhead.denom - max_fps *
-				mgr->overhead.numer));
-
-	/* use client provided clock if specified */
-	if (config->flags & SDE_ROTATION_EXT_PERF)
-		perf->clk_rate = config->clk_rate;
-
-	/*
-	 * check for Override clock calculation
-	 */
-	if (rot_dev->min_rot_clk > perf->clk_rate)
-		perf->clk_rate = rot_dev->min_rot_clk;
-
-	if (mgr->min_rot_clk > perf->clk_rate)
-		perf->clk_rate = mgr->min_rot_clk;
-
-	if (mgr->max_rot_clk && (perf->clk_rate > mgr->max_rot_clk)) {
-		SDEROT_ERR("invalid clock:%ld exceeds max:%ld allowed\n",
-				perf->clk_rate, mgr->max_rot_clk);
-		return -EINVAL;
-	}
-
-	read_bw =  sde_rotator_calc_buf_bw(in_fmt, config->input.width,
-				config->input.height, max_fps);
-
-	write_bw = sde_rotator_calc_buf_bw(out_fmt, config->output.width,
-				config->output.height, max_fps);
-
-	read_bw = sde_apply_comp_ratio_factor(read_bw, in_fmt,
-			&config->input.comp_ratio);
-	write_bw = sde_apply_comp_ratio_factor(write_bw, out_fmt,
-			&config->output.comp_ratio);
-
-	perf->bw = read_bw + write_bw;
-
-	/*
-	 * check for override bw calculation
-	 */
-	if (rot_dev->min_bw > perf->bw)
-		perf->bw = rot_dev->min_bw;
-
-	/* use client provided bandwidth if specified */
-	if (config->flags & SDE_ROTATION_EXT_PERF)
-		perf->bw = config->data_bw;
-
-	perf->rdot_limit = sde_mdp_get_ot_limit(
-			config->input.width, config->input.height,
-			config->input.format, config->frame_rate, true);
-	perf->wrot_limit = sde_mdp_get_ot_limit(
-			config->input.width, config->input.height,
-			config->input.format, config->frame_rate, false);
-
-	SDEROT_DBG("clk:%lu, rdBW:%d, wrBW:%d, rdOT:%d, wrOT:%d\n",
-			perf->clk_rate, read_bw, write_bw, perf->rdot_limit,
-			perf->wrot_limit);
-	SDEROT_EVTLOG(perf->clk_rate, read_bw, write_bw, perf->rdot_limit,
-			perf->wrot_limit);
-	return 0;
-}
-
-static int sde_rotator_update_perf(struct sde_rot_mgr *mgr)
-{
-	struct sde_rot_file_private *priv;
-	struct sde_rot_perf *perf;
-	int not_in_suspend_mode;
-	u64 total_bw = 0;
-
-	not_in_suspend_mode = !atomic_read(&mgr->device_suspended);
-
-	if (not_in_suspend_mode) {
-		list_for_each_entry(priv, &mgr->file_list, list) {
-			list_for_each_entry(perf, &priv->perf_list, list) {
-				total_bw += perf->bw;
-			}
-		}
-	}
-
-	total_bw += mgr->pending_close_bw_vote;
-	total_bw = max_t(u64, total_bw, mgr->minimum_bw_vote);
-	sde_rotator_enable_reg_bus(mgr, total_bw);
-	ATRACE_INT("bus_quota", total_bw);
-	sde_rotator_bus_scale_set_quota(&mgr->data_bus, total_bw);
-
-	return 0;
-}
-
-static void sde_rotator_release_from_work_distribution(
-		struct sde_rot_mgr *mgr,
-		struct sde_rot_entry *entry)
-{
-	if (entry->work_assigned) {
-		bool free_perf = false;
-		u32 wb_idx = entry->commitq->hw->wb_id;
-
-		if (entry->perf->work_distribution[wb_idx])
-			entry->perf->work_distribution[wb_idx]--;
-
-		if (!entry->perf->work_distribution[wb_idx]
-				&& list_empty(&entry->perf->list)) {
-			/* close session has offloaded perf free to us */
-			free_perf = true;
-		}
-
-		entry->work_assigned = false;
-		if (free_perf) {
-			if (mgr->pending_close_bw_vote < entry->perf->bw) {
-				SDEROT_ERR(
-					"close bw vote underflow %llu / %llu\n",
-						mgr->pending_close_bw_vote,
-						entry->perf->bw);
-				mgr->pending_close_bw_vote = 0;
-			} else {
-				mgr->pending_close_bw_vote -= entry->perf->bw;
-			}
-			devm_kfree(&mgr->pdev->dev,
-				entry->perf->work_distribution);
-			devm_kfree(&mgr->pdev->dev, entry->perf);
-			sde_rotator_update_perf(mgr);
-			sde_rotator_clk_ctrl(mgr, false);
-			sde_rotator_resource_ctrl(mgr, false);
-			entry->perf = NULL;
-		}
-	}
-}
-
-static void sde_rotator_release_entry(struct sde_rot_mgr *mgr,
-	struct sde_rot_entry *entry)
-{
-	sde_rotator_release_from_work_distribution(mgr, entry);
-	sde_rotator_clear_fence(entry);
-	sde_rotator_release_data(entry);
-	sde_rotator_unassign_queue(mgr, entry);
-}
-
-/*
- * sde_rotator_commit_handler - Commit workqueue handler.
- * @file: Pointer to work struct.
- *
- * This handler is responsible for commit the job to h/w.
- * Once the job is committed, the job entry is added to the done queue.
- *
- * Note this asynchronous handler is protected by hal lock.
- */
-static void sde_rotator_commit_handler(struct kthread_work *work)
-{
-	struct sde_rot_entry *entry;
-	struct sde_rot_entry_container *request;
-	struct sde_rot_hw_resource *hw;
-	struct sde_rot_mgr *mgr;
-	struct sched_param param = { .sched_priority = 5 };
-	struct sde_rot_trace_entry rot_trace;
-	int ret;
-
-	entry = container_of(work, struct sde_rot_entry, commit_work);
-	request = entry->request;
-
-	if (!request || !entry->private || !entry->private->mgr) {
-		SDEROT_ERR("fatal error, null request/context/device\n");
-		return;
-	}
-
-	ret = sched_setscheduler(entry->fenceq->rot_thread, SCHED_FIFO, &param);
-	if (ret) {
-		SDEROT_WARN("Fail to set kthread priority for fenceq: %d\n",
-				ret);
-	}
-
-	mgr = entry->private->mgr;
-
-	SDEROT_EVTLOG(
-		entry->item.session_id, entry->item.sequence_id,
-		entry->item.src_rect.x, entry->item.src_rect.y,
-		entry->item.src_rect.w, entry->item.src_rect.h,
-		entry->item.dst_rect.x, entry->item.dst_rect.y,
-		entry->item.dst_rect.w, entry->item.dst_rect.h,
-		entry->item.flags,
-		entry->dnsc_factor_w, entry->dnsc_factor_h);
-
-	SDEDEV_DBG(mgr->device,
-		"commit handler s:%d.%u src:(%d,%d,%d,%d) dst:(%d,%d,%d,%d) f:0x%x dnsc:%u/%u\n",
-		entry->item.session_id, entry->item.sequence_id,
-		entry->item.src_rect.x, entry->item.src_rect.y,
-		entry->item.src_rect.w, entry->item.src_rect.h,
-		entry->item.dst_rect.x, entry->item.dst_rect.y,
-		entry->item.dst_rect.w, entry->item.dst_rect.h,
-		entry->item.flags,
-		entry->dnsc_factor_w, entry->dnsc_factor_h);
-
-	sde_rot_mgr_lock(mgr);
-
-	hw = sde_rotator_get_hw_resource(entry->commitq, entry);
-	if (!hw) {
-		SDEROT_ERR("no hw for the queue\n");
-		goto get_hw_res_err;
-	}
-
-	if (entry->item.ts)
-		entry->item.ts[SDE_ROTATOR_TS_COMMIT] = ktime_get();
-
-	/* Set values to pass to trace */
-	rot_trace.wb_idx = entry->item.wb_idx;
-	rot_trace.flags = entry->item.flags;
-	rot_trace.input_format = entry->item.input.format;
-	rot_trace.input_width = entry->item.input.width;
-	rot_trace.input_height = entry->item.input.height;
-	rot_trace.src_x = entry->item.src_rect.x;
-	rot_trace.src_y = entry->item.src_rect.y;
-	rot_trace.src_w = entry->item.src_rect.w;
-	rot_trace.src_h = entry->item.src_rect.h;
-	rot_trace.output_format = entry->item.output.format;
-	rot_trace.output_width = entry->item.output.width;
-	rot_trace.output_height = entry->item.output.height;
-	rot_trace.dst_x = entry->item.dst_rect.x;
-	rot_trace.dst_y = entry->item.dst_rect.y;
-	rot_trace.dst_w = entry->item.dst_rect.w;
-	rot_trace.dst_h = entry->item.dst_rect.h;
-
-	trace_rot_entry_commit(
-		entry->item.session_id, entry->item.sequence_id, &rot_trace);
-
-	ATRACE_INT("sde_smmu_ctrl", 0);
-	ret = sde_smmu_ctrl(1);
-	if (ret < 0) {
-		SDEROT_ERR("IOMMU attach failed\n");
-		goto smmu_error;
-	}
-	ATRACE_INT("sde_smmu_ctrl", 1);
-
-	ret = sde_rotator_map_and_check_data(entry);
-	if (ret) {
-		SDEROT_ERR("fail to prepare input/output data %d\n", ret);
-		goto error;
-	}
-
-	ret = mgr->ops_config_hw(hw, entry);
-	if (ret) {
-		SDEROT_ERR("fail to configure hw resource %d\n", ret);
-		goto error;
-	}
-
-	if (entry->item.ts)
-		entry->item.ts[SDE_ROTATOR_TS_START] = ktime_get();
-
-	ret = sde_rotator_req_wait_start(mgr, request);
-	if (ret) {
-		SDEROT_WARN("timeout waiting for inline start\n");
-		SDEROT_EVTLOG(entry->item.session_id, entry->item.sequence_id,
-				SDE_ROT_EVTLOG_ERROR);
-		goto kickoff_error;
-	}
-
-	ret = mgr->ops_kickoff_entry(hw, entry);
-	if (ret) {
-		SDEROT_ERR("fail to do kickoff %d\n", ret);
-		SDEROT_EVTLOG(entry->item.session_id, entry->item.sequence_id,
-				SDE_ROT_EVTLOG_ERROR);
-		goto kickoff_error;
-	}
-
-	if (entry->item.ts)
-		entry->item.ts[SDE_ROTATOR_TS_FLUSH] = ktime_get();
-
-	SDEROT_EVTLOG(entry->item.session_id, 1);
-
-	kthread_queue_work(&entry->doneq->rot_kw, &entry->done_work);
-	sde_rot_mgr_unlock(mgr);
-	return;
-kickoff_error:
-	/*
-	 * Wait for any pending operations to complete before cancelling this
-	 * one so that the system is left in a consistent state.
-	 */
-	sde_rotator_req_wait_for_idle(mgr, request);
-	mgr->ops_cancel_hw(hw, entry);
-error:
-	sde_smmu_ctrl(0);
-smmu_error:
-	sde_rotator_put_hw_resource(entry->commitq, entry, hw);
-get_hw_res_err:
-	sde_rotator_signal_output(entry);
-	sde_rotator_release_entry(mgr, entry);
-	atomic_dec(&request->pending_count);
-	atomic_inc(&request->failed_count);
-	if (request->retire_kw && request->retire_work)
-		kthread_queue_work(request->retire_kw, request->retire_work);
-	sde_rot_mgr_unlock(mgr);
-}
-
-/*
- * sde_rotator_done_handler - Done workqueue handler.
- * @file: Pointer to work struct.
- *
- * This handler is responsible for waiting for h/w done event.
- * Once the job is done, the output fence will be signaled and the job entry
- * will be retired.
- *
- * Note this asynchronous handler is protected by hal lock.
- */
-static void sde_rotator_done_handler(struct kthread_work *work)
-{
-	struct sde_rot_entry *entry;
-	struct sde_rot_entry_container *request;
-	struct sde_rot_hw_resource *hw;
-	struct sde_rot_mgr *mgr;
-	struct sde_rot_trace_entry rot_trace;
-	int ret;
-
-	entry = container_of(work, struct sde_rot_entry, done_work);
-	request = entry->request;
-
-	if (!request || !entry->private || !entry->private->mgr) {
-		SDEROT_ERR("fatal error, null request/context/device\n");
-		return;
-	}
-
-	mgr = entry->private->mgr;
-	hw = entry->commitq->hw;
-
-	SDEDEV_DBG(mgr->device,
-		"done handler s:%d.%u src:(%d,%d,%d,%d) dst:(%d,%d,%d,%d) f:0x%x dsnc:%u/%u\n",
-		entry->item.session_id, entry->item.sequence_id,
-		entry->item.src_rect.x, entry->item.src_rect.y,
-		entry->item.src_rect.w, entry->item.src_rect.h,
-		entry->item.dst_rect.x, entry->item.dst_rect.y,
-		entry->item.dst_rect.w, entry->item.dst_rect.h,
-		entry->item.flags,
-		entry->dnsc_factor_w, entry->dnsc_factor_h);
-
-	SDEROT_EVTLOG(entry->item.session_id, 0);
-	ret = mgr->ops_wait_for_entry(hw, entry);
-	if (ret) {
-		SDEROT_ERR("fail to wait for completion %d\n", ret);
-		atomic_inc(&request->failed_count);
-	}
-	SDEROT_EVTLOG(entry->item.session_id, 1);
-
-	if (entry->item.ts)
-		entry->item.ts[SDE_ROTATOR_TS_DONE] = ktime_get();
-
-	/* Set values to pass to trace */
-	rot_trace.wb_idx = entry->item.wb_idx;
-	rot_trace.flags = entry->item.flags;
-	rot_trace.input_format = entry->item.input.format;
-	rot_trace.input_width = entry->item.input.width;
-	rot_trace.input_height = entry->item.input.height;
-	rot_trace.src_x = entry->item.src_rect.x;
-	rot_trace.src_y = entry->item.src_rect.y;
-	rot_trace.src_w = entry->item.src_rect.w;
-	rot_trace.src_h = entry->item.src_rect.h;
-	rot_trace.output_format = entry->item.output.format;
-	rot_trace.output_width = entry->item.output.width;
-	rot_trace.output_height = entry->item.output.height;
-	rot_trace.dst_x = entry->item.dst_rect.x;
-	rot_trace.dst_y = entry->item.dst_rect.y;
-	rot_trace.dst_w = entry->item.dst_rect.w;
-	rot_trace.dst_h = entry->item.dst_rect.h;
-
-	trace_rot_entry_done(entry->item.session_id, entry->item.sequence_id,
-			&rot_trace);
-
-	sde_rot_mgr_lock(mgr);
-	sde_rotator_put_hw_resource(entry->commitq, entry, entry->commitq->hw);
-	sde_rotator_signal_output(entry);
-	ATRACE_INT("sde_rot_done", 1);
-	sde_rotator_release_entry(mgr, entry);
-	atomic_dec(&request->pending_count);
-	if (request->retire_kw && request->retire_work)
-		kthread_queue_work(request->retire_kw, request->retire_work);
-	if (entry->item.ts)
-		entry->item.ts[SDE_ROTATOR_TS_RETIRE] = ktime_get();
-	sde_rot_mgr_unlock(mgr);
-
-	ATRACE_INT("sde_smmu_ctrl", 3);
-	sde_smmu_ctrl(0);
-	ATRACE_INT("sde_smmu_ctrl", 4);
-}
-
-static bool sde_rotator_verify_format(struct sde_rot_mgr *mgr,
-	struct sde_mdp_format_params *in_fmt,
-	struct sde_mdp_format_params *out_fmt, bool rotation, u32 mode)
-{
-	u8 in_v_subsample, in_h_subsample;
-	u8 out_v_subsample, out_h_subsample;
-
-	if (!sde_rotator_is_valid_pixfmt(mgr, in_fmt->format, true, mode)) {
-		SDEROT_ERR("Invalid input format 0x%x (%4.4s)\n",
-				in_fmt->format, (char *)&in_fmt->format);
-		goto verify_error;
-	}
-
-	if (!sde_rotator_is_valid_pixfmt(mgr, out_fmt->format, false, mode)) {
-		SDEROT_ERR("Invalid output format 0x%x (%4.4s)\n",
-				out_fmt->format, (char *)&out_fmt->format);
-		goto verify_error;
-	}
-
-	if ((in_fmt->is_yuv != out_fmt->is_yuv) ||
-		(in_fmt->pixel_mode != out_fmt->pixel_mode) ||
-		(in_fmt->unpack_tight != out_fmt->unpack_tight)) {
-		SDEROT_ERR(
-			"Rotator does not support CSC yuv:%d/%d pm:%d/%d ut:%d/%d\n",
-			in_fmt->is_yuv, out_fmt->is_yuv,
-			in_fmt->pixel_mode, out_fmt->pixel_mode,
-			in_fmt->unpack_tight, out_fmt->unpack_tight);
-		goto verify_error;
-	}
-
-	/* Forcing same pixel depth */
-	if (memcmp(in_fmt->bits, out_fmt->bits, sizeof(in_fmt->bits))) {
-		/* Exception is that RGB can drop alpha or add X */
-		if (in_fmt->is_yuv || out_fmt->alpha_enable ||
-			(in_fmt->bits[C2_R_Cr] != out_fmt->bits[C2_R_Cr]) ||
-			(in_fmt->bits[C0_G_Y] != out_fmt->bits[C0_G_Y]) ||
-			(in_fmt->bits[C1_B_Cb] != out_fmt->bits[C1_B_Cb])) {
-			SDEROT_ERR("Bit format does not match\n");
-			goto verify_error;
-		}
-	}
-
-	/* Need to make sure that sub-sampling persists through rotation */
-	if (rotation) {
-		sde_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample,
-			&in_v_subsample, &in_h_subsample);
-		sde_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample,
-			&out_v_subsample, &out_h_subsample);
-
-		if ((in_v_subsample != out_h_subsample) ||
-				(in_h_subsample != out_v_subsample)) {
-			SDEROT_ERR("Rotation has invalid subsampling\n");
-			goto verify_error;
-		}
-	} else {
-		if (in_fmt->chroma_sample != out_fmt->chroma_sample) {
-			SDEROT_ERR("Format subsampling mismatch\n");
-			goto verify_error;
-		}
-	}
-
-	return true;
-
-verify_error:
-	SDEROT_ERR("in_fmt=0x%x (%4.4s), out_fmt=0x%x (%4.4s), mode=%d\n",
-			in_fmt->format, (char *)&in_fmt->format,
-			out_fmt->format, (char *)&out_fmt->format,
-			mode);
-	return false;
-}
-
-static struct sde_mdp_format_params *__verify_input_config(
-		struct sde_rot_mgr *mgr,
-		struct sde_rotation_config *config)
-{
-	struct sde_mdp_format_params *in_fmt;
-	u8 in_v_subsample, in_h_subsample;
-	u32 input;
-	int verify_input_only;
-
-	if (!mgr || !config) {
-		SDEROT_ERR("null parameters\n");
-		return NULL;
-	}
-
-	input = config->input.format;
-	verify_input_only =
-		(config->flags & SDE_ROTATION_VERIFY_INPUT_ONLY) ? 1 : 0;
-
-	in_fmt = sde_get_format_params(input);
-	if (!in_fmt) {
-		if (!verify_input_only)
-			SDEROT_ERR("Unrecognized input format:0x%x\n", input);
-		return NULL;
-	}
-
-	sde_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample,
-		&in_v_subsample, &in_h_subsample);
-
-	/* Dimension of image needs to be divisible by subsample rate  */
-	if ((config->input.height % in_v_subsample) ||
-			(config->input.width % in_h_subsample)) {
-		if (!verify_input_only)
-			SDEROT_ERR(
-				"In ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n",
-					config->input.width,
-					config->input.height,
-					in_v_subsample, in_h_subsample);
-		return NULL;
-	}
-
-	return in_fmt;
-}
-
-static struct sde_mdp_format_params *__verify_output_config(
-		struct sde_rot_mgr *mgr,
-		struct sde_rotation_config *config)
-{
-	struct sde_mdp_format_params *out_fmt;
-	u8 out_v_subsample, out_h_subsample;
-	u32 output;
-	int verify_input_only;
-
-	if (!mgr || !config) {
-		SDEROT_ERR("null parameters\n");
-		return NULL;
-	}
-
-	output = config->output.format;
-	verify_input_only =
-		(config->flags & SDE_ROTATION_VERIFY_INPUT_ONLY) ? 1 : 0;
-
-	out_fmt = sde_get_format_params(output);
-	if (!out_fmt) {
-		if (!verify_input_only)
-			SDEROT_ERR("Unrecognized output format:0x%x\n", output);
-		return NULL;
-	}
-
-	sde_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample,
-		&out_v_subsample, &out_h_subsample);
-
-	/* Dimension of image needs to be divisible by subsample rate  */
-	if ((config->output.height % out_v_subsample) ||
-			(config->output.width % out_h_subsample)) {
-		if (!verify_input_only)
-			SDEROT_ERR(
-				"Out ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n",
-					config->output.width,
-					config->output.height,
-					out_v_subsample, out_h_subsample);
-		return NULL;
-	}
-
-	return out_fmt;
-}
-
-int sde_rotator_verify_config_input(struct sde_rot_mgr *mgr,
-		struct sde_rotation_config *config)
-{
-	struct sde_mdp_format_params *in_fmt;
-
-	in_fmt = __verify_input_config(mgr, config);
-	if (!in_fmt)
-		return -EINVAL;
-
-	return 0;
-}
-
-int sde_rotator_verify_config_output(struct sde_rot_mgr *mgr,
-		struct sde_rotation_config *config)
-{
-	struct sde_mdp_format_params *out_fmt;
-
-	out_fmt = __verify_output_config(mgr, config);
-	if (!out_fmt)
-		return -EINVAL;
-
-	return 0;
-}
-
-int sde_rotator_verify_config_all(struct sde_rot_mgr *mgr,
-	struct sde_rotation_config *config)
-{
-	struct sde_mdp_format_params *in_fmt, *out_fmt;
-	bool rotation;
-	u32 mode;
-
-	if (!mgr || !config) {
-		SDEROT_ERR("null parameters\n");
-		return -EINVAL;
-	}
-
-	rotation = (config->flags & SDE_ROTATION_90) ? true : false;
-
-	mode = config->output.sbuf ? SDE_ROTATOR_MODE_SBUF :
-				SDE_ROTATOR_MODE_OFFLINE;
-
-	in_fmt = __verify_input_config(mgr, config);
-	if (!in_fmt)
-		return -EINVAL;
-
-	out_fmt = __verify_output_config(mgr, config);
-	if (!out_fmt)
-		return -EINVAL;
-
-	if (!sde_rotator_verify_format(mgr, in_fmt, out_fmt, rotation, mode)) {
-		SDEROT_ERR(
-			"Rot format pairing invalid, in_fmt:0x%x, out_fmt:0x%x\n",
-					config->input.format,
-					config->output.format);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int sde_rotator_validate_item_matches_session(
-	struct sde_rotation_config *config, struct sde_rotation_item *item)
-{
-	int ret;
-
-	ret = __compare_session_item_rect(&config->input,
-		&item->src_rect, item->input.format, true);
-	if (ret)
-		return ret;
-
-	ret = __compare_session_item_rect(&config->output,
-		&item->dst_rect, item->output.format, false);
-	if (ret)
-		return ret;
-
-	ret = __compare_session_rotations(config->flags, item->flags);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-/* Only need to validate x and y offset for ubwc dst fmt */
-static int sde_rotator_validate_img_roi(struct sde_rotation_item *item)
-{
-	struct sde_mdp_format_params *fmt;
-	int ret = 0;
-
-	fmt = sde_get_format_params(item->output.format);
-	if (!fmt) {
-		SDEROT_DBG("invalid output format:%d\n",
-					item->output.format);
-		return -EINVAL;
-	}
-
-	if (sde_mdp_is_ubwc_format(fmt))
-		ret = sde_validate_offset_for_ubwc_format(fmt,
-			item->dst_rect.x, item->dst_rect.y);
-
-	return ret;
-}
-
-static int sde_rotator_validate_fmt_and_item_flags(
-	struct sde_rotation_config *config, struct sde_rotation_item *item)
-{
-	struct sde_mdp_format_params *fmt;
-
-	fmt = sde_get_format_params(item->input.format);
-	if ((item->flags & SDE_ROTATION_DEINTERLACE) &&
-			sde_mdp_is_ubwc_format(fmt)) {
-		SDEROT_DBG("cannot perform deinterlace on tiled formats\n");
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static int sde_rotator_validate_entry(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rot_entry *entry)
-{
-	int ret;
-	struct sde_rotation_item *item;
-	struct sde_rot_perf *perf;
-
-	item = &entry->item;
-
-	if (item->wb_idx >= mgr->queue_count)
-		item->wb_idx = mgr->queue_count - 1;
-
-	perf = sde_rotator_find_session(private, item->session_id);
-	if (!perf) {
-		SDEROT_DBG("Could not find session:%u\n", item->session_id);
-		return -EINVAL;
-	}
-
-	ret = sde_rotator_validate_item_matches_session(&perf->config, item);
-	if (ret) {
-		SDEROT_DBG("Work item does not match session:%u\n",
-					item->session_id);
-		return ret;
-	}
-
-	ret = sde_rotator_validate_img_roi(item);
-	if (ret) {
-		SDEROT_DBG("Image roi is invalid\n");
-		return ret;
-	}
-
-	ret = sde_rotator_validate_fmt_and_item_flags(&perf->config, item);
-	if (ret)
-		return ret;
-
-	ret = mgr->ops_hw_validate_entry(mgr, entry);
-	if (ret) {
-		SDEROT_DBG("fail to configure downscale factor\n");
-		return ret;
-	}
-	return ret;
-}
-
-/*
- * Upon failure from the function, caller needs to make sure
- * to call sde_rotator_remove_request to clean up resources.
- */
-static int sde_rotator_add_request(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rot_entry_container *req)
-{
-	struct sde_rot_entry *entry;
-	struct sde_rotation_item *item;
-	int i, ret;
-
-	for (i = 0; i < req->count; i++) {
-		entry = req->entries + i;
-		item = &entry->item;
-		entry->fenceq = private->fenceq;
-
-		ret = sde_rotator_validate_entry(mgr, private, entry);
-		if (ret) {
-			SDEROT_ERR("fail to validate the entry\n");
-			return ret;
-		}
-
-		ret = sde_rotator_import_data(mgr, entry);
-		if (ret) {
-			SDEROT_ERR("fail to import the data\n");
-			return ret;
-		}
-
-		entry->input_fence = item->input.fence;
-		entry->output_fence = item->output.fence;
-
-		ret = sde_rotator_assign_queue(mgr, entry, private);
-		if (ret) {
-			SDEROT_ERR("fail to assign queue to entry\n");
-			return ret;
-		}
-
-		entry->request = req;
-
-		kthread_init_work(&entry->commit_work,
-				sde_rotator_commit_handler);
-		kthread_init_work(&entry->done_work,
-				sde_rotator_done_handler);
-		SDEROT_DBG(
-			"Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%x dst{%u,%u,%u,%u}f=%x session_id=%u\n",
-			item->wb_idx,
-			item->src_rect.x, item->src_rect.y,
-			item->src_rect.w, item->src_rect.h, item->input.format,
-			item->dst_rect.x, item->dst_rect.y,
-			item->dst_rect.w, item->dst_rect.h, item->output.format,
-			item->session_id);
-	}
-
-	list_add(&req->list, &private->req_list);
-
-	return 0;
-}
-
-void sde_rotator_remove_request(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rot_entry_container *req)
-{
-	int i;
-
-	if (!mgr || !private || !req) {
-		SDEROT_ERR("null parameters\n");
-		return;
-	}
-
-	for (i = 0; i < req->count; i++)
-		sde_rotator_release_entry(mgr, req->entries + i);
-	list_del_init(&req->list);
-}
-
-/* This function should be called with req_lock */
-static void sde_rotator_cancel_request(struct sde_rot_mgr *mgr,
-	struct sde_rot_entry_container *req)
-{
-	struct sde_rot_entry *entry;
-	int i;
-
-	if (atomic_read(&req->pending_count)) {
-		/*
-		 * To avoid signal the rotation entry output fence in the wrong
-		 * order, all the entries in the same request needs to be
-		 * canceled first, before signaling the output fence.
-		 */
-		SDEROT_DBG("cancel work start\n");
-		sde_rot_mgr_unlock(mgr);
-		for (i = req->count - 1; i >= 0; i--) {
-			entry = req->entries + i;
-			kthread_cancel_work_sync(&entry->commit_work);
-			kthread_cancel_work_sync(&entry->done_work);
-		}
-		sde_rot_mgr_lock(mgr);
-		SDEROT_DBG("cancel work done\n");
-		for (i = req->count - 1; i >= 0; i--) {
-			entry = req->entries + i;
-			sde_rotator_signal_output(entry);
-			sde_rotator_release_entry(mgr, entry);
-		}
-	}
-
-	list_del_init(&req->list);
-	devm_kfree(&mgr->pdev->dev, req);
-}
-
-void sde_rotator_cancel_all_requests(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private)
-{
-	struct sde_rot_entry_container *req, *req_next;
-
-	SDEROT_DBG("Canceling all rotator requests\n");
-
-	list_for_each_entry_safe(req, req_next, &private->req_list, list)
-		sde_rotator_cancel_request(mgr, req);
-}
-
-static void sde_rotator_free_completed_request(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private)
-{
-	struct sde_rot_entry_container *req, *req_next;
-
-	list_for_each_entry_safe(req, req_next, &private->req_list, list) {
-		if ((atomic_read(&req->pending_count) == 0) && req->finished) {
-			list_del_init(&req->list);
-			devm_kfree(&mgr->pdev->dev, req);
-		}
-	}
-}
-
-static void sde_rotator_release_rotator_perf_session(
-	struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private)
-{
-	struct sde_rot_perf *perf, *perf_next;
-
-	SDEROT_DBG("Releasing all rotator request\n");
-	sde_rotator_cancel_all_requests(mgr, private);
-
-	list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) {
-		list_del_init(&perf->list);
-		devm_kfree(&mgr->pdev->dev, perf->work_distribution);
-		devm_kfree(&mgr->pdev->dev, perf);
-	}
-}
-
-static void sde_rotator_release_all(struct sde_rot_mgr *mgr)
-{
-	struct sde_rot_file_private *priv, *priv_next;
-
-	list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) {
-		sde_rotator_release_rotator_perf_session(mgr, priv);
-		sde_rotator_resource_ctrl(mgr, false);
-		list_del_init(&priv->list);
-	}
-
-	sde_rotator_update_perf(mgr);
-}
-
-int sde_rotator_validate_request(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rot_entry_container *req)
-{
-	int i, ret = 0;
-	struct sde_rot_entry *entry;
-
-	if (!mgr || !private || !req) {
-		SDEROT_ERR("null parameters\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < req->count; i++) {
-		entry = req->entries + i;
-		ret = sde_rotator_validate_entry(mgr, private,
-			entry);
-		if (ret) {
-			SDEROT_DBG("invalid entry\n");
-			return ret;
-		}
-	}
-
-	return ret;
-}
-
-static int sde_rotator_open_session(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private, u32 session_id)
-{
-	struct sde_rotation_config config;
-	struct sde_rot_perf *perf;
-	int ret;
-
-	if (!mgr || !private)
-		return -EINVAL;
-
-	memset(&config, 0, sizeof(struct sde_rotation_config));
-
-	/* initialize with default parameters */
-	config.frame_rate = 30;
-	config.input.comp_ratio.numer = 1;
-	config.input.comp_ratio.denom = 1;
-	config.input.format = SDE_PIX_FMT_Y_CBCR_H2V2;
-	config.input.width = 640;
-	config.input.height = 480;
-	config.output.comp_ratio.numer = 1;
-	config.output.comp_ratio.denom = 1;
-	config.output.format = SDE_PIX_FMT_Y_CBCR_H2V2;
-	config.output.width = 640;
-	config.output.height = 480;
-
-	perf = devm_kzalloc(&mgr->pdev->dev, sizeof(*perf), GFP_KERNEL);
-	if (!perf)
-		return -ENOMEM;
-
-	perf->work_distribution = devm_kzalloc(&mgr->pdev->dev,
-		sizeof(u32) * mgr->queue_count, GFP_KERNEL);
-	if (!perf->work_distribution) {
-		ret = -ENOMEM;
-		goto alloc_err;
-	}
-
-	config.session_id = session_id;
-	perf->config = config;
-	perf->last_wb_idx = 0;
-
-	INIT_LIST_HEAD(&perf->list);
-	list_add(&perf->list, &private->perf_list);
-
-	ret = sde_rotator_resource_ctrl(mgr, true);
-	if (ret < 0) {
-		SDEROT_ERR("Failed to acquire rotator resources\n");
-		goto resource_err;
-	}
-
-	ret = sde_rotator_update_clk(mgr);
-	if (ret) {
-		SDEROT_ERR("failed to update clk %d\n", ret);
-		goto update_clk_err;
-	}
-
-	ret = sde_rotator_clk_ctrl(mgr, true);
-	if (ret) {
-		SDEROT_ERR("failed to enable clk %d\n", ret);
-		goto enable_clk_err;
-	}
-
-	SDEROT_DBG("open session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n",
-		config.session_id, config.input.width, config.input.height,
-		config.input.format, config.output.width, config.output.height,
-		config.output.format);
-
-	goto done;
-enable_clk_err:
-update_clk_err:
-	sde_rotator_resource_ctrl(mgr, false);
-resource_err:
-	list_del_init(&perf->list);
-	devm_kfree(&mgr->pdev->dev, perf->work_distribution);
-alloc_err:
-	devm_kfree(&mgr->pdev->dev, perf);
-done:
-	return ret;
-}
-
-static int sde_rotator_close_session(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private, u32 session_id)
-{
-	struct sde_rot_perf *perf;
-	bool offload_release_work = false;
-	u32 id;
-
-	id = (u32)session_id;
-	perf = __sde_rotator_find_session(private, id);
-	if (!perf) {
-		SDEROT_ERR("Trying to close session that does not exist\n");
-		return -EINVAL;
-	}
-
-	if (sde_rotator_is_work_pending(mgr, perf)) {
-		SDEROT_DBG("Work is still pending, offload free to wq\n");
-		mgr->pending_close_bw_vote += perf->bw;
-		offload_release_work = true;
-	}
-	list_del_init(&perf->list);
-
-	if (offload_release_work)
-		goto done;
-
-	devm_kfree(&mgr->pdev->dev, perf->work_distribution);
-	devm_kfree(&mgr->pdev->dev, perf);
-	sde_rotator_update_perf(mgr);
-	sde_rotator_clk_ctrl(mgr, false);
-	sde_rotator_update_clk(mgr);
-	sde_rotator_resource_ctrl(mgr, false);
-done:
-	if (mgr->sbuf_ctx == private) {
-		SDEROT_DBG("release sbuf session id:%u\n", id);
-		SDEROT_EVTLOG(id);
-		mgr->sbuf_ctx = NULL;
-	}
-
-	SDEROT_DBG("Closed session id:%u\n", id);
-	return 0;
-}
-
-static int sde_rotator_config_session(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rotation_config *config)
-{
-	int ret = 0;
-	struct sde_rot_perf *perf;
-
-	ret = sde_rotator_verify_config_all(mgr, config);
-	if (ret) {
-		SDEROT_ERR("Rotator verify format failed\n");
-		return ret;
-	}
-
-	perf = sde_rotator_find_session(private, config->session_id);
-	if (!perf) {
-		SDEROT_ERR("No session with id=%u could be found\n",
-			config->session_id);
-		return -EINVAL;
-	}
-
-	perf->config = *config;
-	ret = sde_rotator_calc_perf(mgr, perf);
-
-	if (ret) {
-		SDEROT_ERR("error in configuring the session %d\n", ret);
-		goto done;
-	}
-
-	ret = sde_rotator_update_perf(mgr);
-	if (ret) {
-		SDEROT_ERR("error in updating perf: %d\n", ret);
-		goto done;
-	}
-
-	ret = sde_rotator_update_clk(mgr);
-	if (ret) {
-		SDEROT_ERR("error in updating the rotator clk: %d\n", ret);
-		goto done;
-	}
-
-	if (config->output.sbuf && mgr->sbuf_ctx != private && mgr->sbuf_ctx) {
-		SDEROT_ERR("too many sbuf sessions\n");
-		ret = -EBUSY;
-		goto done;
-	}
-
-	SDEROT_DBG(
-		"reconfig session id=%u in{%u,%u}f:%x out{%u,%u}f:%x fps:%d clk:%lu bw:%llu\n",
-		config->session_id, config->input.width, config->input.height,
-		config->input.format, config->output.width,
-		config->output.height, config->output.format,
-		config->frame_rate, perf->clk_rate, perf->bw);
-	SDEROT_EVTLOG(config->session_id, config->input.width,
-			config->input.height, config->input.format,
-			config->output.width, config->output.height,
-			config->output.format, config->frame_rate);
-done:
-	return ret;
-}
-
-struct sde_rot_entry_container *sde_rotator_req_init(
-	struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rotation_item *items,
-	u32 count, u32 flags)
-{
-	struct sde_rot_entry_container *req;
-	int size, i;
-
-	if (!mgr || !private || !items) {
-		SDEROT_ERR("null parameters\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	size = sizeof(struct sde_rot_entry_container);
-	size += sizeof(struct sde_rot_entry) * count;
-	req = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
-
-	if (!req)
-		return ERR_PTR(-ENOMEM);
-
-	INIT_LIST_HEAD(&req->list);
-	req->count = count;
-	req->entries = (struct sde_rot_entry *)
-		((void *)req + sizeof(struct sde_rot_entry_container));
-	req->flags = flags;
-	atomic_set(&req->pending_count, count);
-	atomic_set(&req->failed_count, 0);
-
-	for (i = 0; i < count; i++) {
-		req->entries[i].item = items[i];
-		req->entries[i].private = private;
-
-		init_completion(&req->entries[i].item.inline_start);
-		complete_all(&req->entries[i].item.inline_start);
-	}
-
-	return req;
-}
-
-void sde_rotator_req_reset_start(struct sde_rot_mgr *mgr,
-		struct sde_rot_entry_container *req)
-{
-	int i;
-
-	if (!mgr || !req)
-		return;
-
-	for (i = 0; i < req->count; i++)
-		reinit_completion(&req->entries[i].item.inline_start);
-}
-
-void sde_rotator_req_set_start(struct sde_rot_mgr *mgr,
-		struct sde_rot_entry_container *req)
-{
-	struct kthread_work *commit_work;
-	int i;
-
-	if (!mgr || !req || !req->entries)
-		return;
-
-	/* signal ready to start */
-	for (i = 0; i < req->count; i++)
-		complete_all(&req->entries[i].item.inline_start);
-
-	for (i = 0; i < req->count; i++) {
-		commit_work = &req->entries[i].commit_work;
-
-		SDEROT_EVTLOG(i, req->count);
-
-		sde_rot_mgr_unlock(mgr);
-		kthread_flush_work(commit_work);
-		sde_rot_mgr_lock(mgr);
-	}
-}
-
-int sde_rotator_req_wait_start(struct sde_rot_mgr *mgr,
-		struct sde_rot_entry_container *req)
-{
-	struct completion *inline_start;
-	int i, ret;
-
-	if (!mgr || !req || !req->entries)
-		return -EINVAL;
-
-	/* only wait for sbuf mode */
-	if (!mgr->sbuf_ctx || !req->count ||
-			mgr->sbuf_ctx != req->entries[0].private)
-		return 0;
-
-	for (i = 0; i < req->count; i++) {
-		inline_start = &req->entries[i].item.inline_start;
-
-		sde_rot_mgr_unlock(mgr);
-		ret = wait_for_completion_timeout(inline_start,
-			msecs_to_jiffies(ROT_INLINE_START_TIMEOUT_IN_MS));
-		sde_rot_mgr_lock(mgr);
-	}
-
-	/* wait call returns zero on timeout */
-	return ret ? 0 : -EBUSY;
-}
-
-void sde_rotator_req_finish(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rot_entry_container *req)
-{
-	if (!mgr || !private || !req) {
-		SDEROT_ERR("null parameters\n");
-		return;
-	}
-
-	req->finished = true;
-}
-
-void sde_rotator_abort_inline_request(struct sde_rot_mgr *mgr,
-		struct sde_rot_file_private *private,
-		struct sde_rot_entry_container *req)
-{
-	struct kthread_work *commit_work;
-	struct kthread_work *done_work;
-	struct sde_rot_entry *entry;
-	struct sde_rot_hw_resource *hw;
-	int i;
-
-	if (!mgr || !private || !req || !req->entries)
-		return;
-
-	for (i = 0; i < req->count; i++) {
-		entry = &req->entries[i];
-		if (!entry)
-			continue;
-
-		commit_work = &entry->commit_work;
-		done_work = &entry->done_work;
-
-		hw = sde_rotator_get_hw_resource(entry->commitq, entry);
-		if (!hw) {
-			SDEROT_ERR("no hw for the queue\n");
-			SDEROT_EVTLOG(i, req->count, SDE_ROT_EVTLOG_ERROR);
-			continue;
-		}
-
-		SDEROT_EVTLOG(i, req->count);
-
-		mgr->ops_abort_hw(hw, entry);
-
-		sde_rot_mgr_unlock(mgr);
-		kthread_flush_work(commit_work);
-		kthread_flush_work(done_work);
-		sde_rot_mgr_lock(mgr);
-	}
-}
-
-int sde_rotator_handle_request_common(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rot_entry_container *req)
-{
-	int ret;
-
-	if (!mgr || !private || !req) {
-		SDEROT_ERR("null parameters\n");
-		return -EINVAL;
-	}
-
-	sde_rotator_free_completed_request(mgr, private);
-
-	ret = sde_rotator_add_request(mgr, private, req);
-	if (ret) {
-		SDEROT_ERR("fail to add rotation request\n");
-		sde_rotator_remove_request(mgr, private, req);
-		return ret;
-	}
-	return ret;
-}
-
-static int sde_rotator_open(struct sde_rot_mgr *mgr,
-		struct sde_rot_file_private **pprivate)
-{
-	struct sde_rot_file_private *private;
-
-	if (!mgr || !pprivate)
-		return -ENODEV;
-
-	if (atomic_read(&mgr->device_suspended))
-		return -EPERM;
-
-	private = devm_kzalloc(&mgr->pdev->dev, sizeof(*private),
-		GFP_KERNEL);
-	if (!private)
-		return -ENOMEM;
-
-	INIT_LIST_HEAD(&private->req_list);
-	INIT_LIST_HEAD(&private->perf_list);
-	INIT_LIST_HEAD(&private->list);
-
-	list_add(&private->list, &mgr->file_list);
-
-	*pprivate = private;
-
-	return 0;
-}
-
-static bool sde_rotator_file_priv_allowed(struct sde_rot_mgr *mgr,
-		struct sde_rot_file_private *priv)
-{
-	struct sde_rot_file_private *_priv, *_priv_next;
-	bool ret = false;
-
-	list_for_each_entry_safe(_priv, _priv_next, &mgr->file_list, list) {
-		if (_priv == priv) {
-			ret = true;
-			break;
-		}
-	}
-	return ret;
-}
-
-static int sde_rotator_close(struct sde_rot_mgr *mgr,
-		struct sde_rot_file_private *private)
-{
-	if (!mgr || !private)
-		return -ENODEV;
-
-	if (!(sde_rotator_file_priv_allowed(mgr, private))) {
-		SDEROT_ERR(
-			"Calling close with unrecognized rot_file_private\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * if secure camera session was enabled
-	 * go back to non secure state
-	 */
-	sde_rotator_secure_session_ctrl(false);
-	sde_rotator_release_rotator_perf_session(mgr, private);
-
-	list_del_init(&private->list);
-	devm_kfree(&mgr->pdev->dev, private);
-
-	sde_rotator_update_perf(mgr);
-	return 0;
-}
-
-static ssize_t caps_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	size_t len = PAGE_SIZE;
-	int cnt = 0;
-	struct sde_rot_mgr *mgr = sde_rot_mgr_from_device(dev);
-
-	if (!mgr)
-		return cnt;
-
-#define SPRINT(fmt, ...) \
-		(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
-
-	SPRINT("queue_count=%d\n", mgr->queue_count);
-	SPRINT("downscale=1\n");
-	SPRINT("ubwc=1\n");
-
-	if (mgr->ops_hw_show_caps)
-		cnt += mgr->ops_hw_show_caps(mgr, attr, buf + cnt, len - cnt);
-
-	return cnt;
-}
-
-static ssize_t state_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	size_t len = PAGE_SIZE;
-	int cnt = 0;
-	struct sde_rot_mgr *mgr = sde_rot_mgr_from_device(dev);
-	int i;
-
-	if (!mgr)
-		return cnt;
-
-#define SPRINT(fmt, ...) \
-		(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
-
-	SPRINT("reg_bus_bw=%llu\n", mgr->reg_bus.curr_quota_val);
-	SPRINT("data_bus_bw=%llu\n", mgr->data_bus.curr_quota_val);
-	SPRINT("pending_close_bw_vote=%llu\n", mgr->pending_close_bw_vote);
-	SPRINT("device_suspended=%d\n", atomic_read(&mgr->device_suspended));
-	SPRINT("footswitch_cnt=%d\n", mgr->res_ref_cnt);
-	SPRINT("regulator_enable=%d\n", mgr->regulator_enable);
-	SPRINT("enable_clk_cnt=%d\n", mgr->rot_enable_clk_cnt);
-	for (i = 0; i < mgr->num_rot_clk; i++)
-		if (mgr->rot_clk[i].clk)
-			SPRINT("%s=%lu\n", mgr->rot_clk[i].clk_name,
-					clk_get_rate(mgr->rot_clk[i].clk));
-
-	if (mgr->ops_hw_show_state)
-		cnt += mgr->ops_hw_show_state(mgr, attr, buf + cnt, len - cnt);
-
-	return cnt;
-}
-
-static DEVICE_ATTR_RO(caps);
-static DEVICE_ATTR_RO(state);
-
-static struct attribute *sde_rotator_fs_attrs[] = {
-	&dev_attr_caps.attr,
-	&dev_attr_state.attr,
-	NULL
-};
-
-static struct attribute_group sde_rotator_fs_attr_group = {
-	.attrs = sde_rotator_fs_attrs
-};
-
-#ifdef CONFIG_QCOM_BUS_SCALING
-static int sde_rotator_parse_dt_bus(struct sde_rot_mgr *mgr,
-	struct platform_device *dev)
-{
-	int ret = 0, i;
-	int usecases;
-	struct device_node *node;
-
-	mgr->data_bus.bus_scale_pdata = msm_bus_cl_get_pdata(dev);
-	if (IS_ERR_OR_NULL(mgr->data_bus.bus_scale_pdata)) {
-		ret = PTR_ERR(mgr->data_bus.bus_scale_pdata);
-		if (ret) {
-			SDEROT_ERR("msm_bus_cl_get_pdata failed. ret=%d\n",
-					ret);
-			mgr->data_bus.bus_scale_pdata = NULL;
-		}
-	}
-
-	node = of_get_child_by_name(dev->dev.of_node, "qcom,rot-reg-bus");
-	if (node) {
-		mgr->reg_bus.bus_scale_pdata
-				= msm_bus_pdata_from_node(dev, node);
-		if (IS_ERR_OR_NULL(mgr->reg_bus.bus_scale_pdata)) {
-			SDEROT_ERR("reg bus pdata parsing failed\n");
-			ret = PTR_ERR(mgr->reg_bus.bus_scale_pdata);
-			if (!mgr->reg_bus.bus_scale_pdata)
-				ret = -EINVAL;
-			mgr->reg_bus.bus_scale_pdata = NULL;
-		}
-	} else {
-		SDEROT_DBG(
-			"no DT entries, configuring default reg bus table\n");
-		mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table;
-		usecases = mgr->reg_bus.bus_scale_pdata->num_usecases;
-		for (i = 0; i < usecases; i++) {
-			rot_reg_bus_usecases[i].num_paths = 1;
-			rot_reg_bus_usecases[i].vectors =
-				&rot_reg_bus_vectors[i];
-		}
-	}
-
-	return ret;
-}
-#else
-static inline int sde_rotator_parse_dt_bus(struct sde_rot_mgr *mgr,
-	struct platform_device *dev)
-{
-	return 0;
-}
-#endif
-
-static int sde_rotator_parse_dt(struct sde_rot_mgr *mgr,
-	struct platform_device *dev)
-{
-	int ret = 0;
-	u32 data;
-
-	ret = of_property_read_u32(dev->dev.of_node,
-		"qcom,mdss-wb-count", &data);
-	if (!ret) {
-		if (data > ROT_MAX_HW_BLOCKS) {
-			SDEROT_ERR(
-				"Err, num of wb block (%d) larger than sw max %d\n",
-				data, ROT_MAX_HW_BLOCKS);
-			return -EINVAL;
-		}
-
-		mgr->queue_count = data;
-	}
-
-	ret = sde_rotator_parse_dt_bus(mgr, dev);
-	if (ret)
-		SDEROT_ERR("Failed to parse bus data\n");
-
-	return ret;
-}
-
-static void sde_rotator_put_dt_vreg_data(struct device *dev,
-	struct sde_module_power *mp)
-{
-	if (!mp) {
-		SDEROT_ERR("%s: invalid input\n", __func__);
-		return;
-	}
-
-	sde_rot_config_vreg(dev, mp->vreg_config, mp->num_vreg, 0);
-	if (mp->vreg_config) {
-		devm_kfree(dev, mp->vreg_config);
-		mp->vreg_config = NULL;
-	}
-	mp->num_vreg = 0;
-}
-
-static int sde_rotator_get_dt_vreg_data(struct device *dev,
-	struct sde_module_power *mp)
-{
-	const char *st = NULL;
-	struct device_node *of_node = NULL;
-	int dt_vreg_total = 0;
-	int i;
-	int rc;
-
-	if (!dev || !mp) {
-		SDEROT_ERR("%s: invalid input\n", __func__);
-		return -EINVAL;
-	}
-
-	of_node = dev->of_node;
-
-	dt_vreg_total = of_property_count_strings(of_node, "qcom,supply-names");
-	if (dt_vreg_total < 0) {
-		SDEROT_ERR("%s: vreg not found. rc=%d\n", __func__,
-			dt_vreg_total);
-		return 0;
-	}
-	mp->num_vreg = dt_vreg_total;
-	mp->vreg_config = devm_kzalloc(dev, sizeof(struct sde_vreg) *
-		dt_vreg_total, GFP_KERNEL);
-	if (!mp->vreg_config)
-		return -ENOMEM;
-
-	/* vreg-name */
-	for (i = 0; i < dt_vreg_total; i++) {
-		rc = of_property_read_string_index(of_node,
-			"qcom,supply-names", i, &st);
-		if (rc) {
-			SDEROT_ERR("%s: error reading name. i=%d, rc=%d\n",
-				__func__, i, rc);
-			goto error;
-		}
-		snprintf(mp->vreg_config[i].vreg_name, 32, "%s", st);
-	}
-	sde_rot_config_vreg(dev, mp->vreg_config, mp->num_vreg, 1);
-
-	for (i = 0; i < dt_vreg_total; i++) {
-		SDEROT_DBG("%s: %s min=%d, max=%d, enable=%d disable=%d\n",
-			__func__,
-			mp->vreg_config[i].vreg_name,
-			mp->vreg_config[i].min_voltage,
-			mp->vreg_config[i].max_voltage,
-			mp->vreg_config[i].enable_load,
-			mp->vreg_config[i].disable_load);
-	}
-	return rc;
-
-error:
-	if (mp->vreg_config) {
-		devm_kfree(dev, mp->vreg_config);
-		mp->vreg_config = NULL;
-	}
-	mp->num_vreg = 0;
-	return rc;
-}
-
-#ifdef CONFIG_QCOM_BUS_SCALING
-static void sde_rotator_bus_scale_unregister(struct sde_rot_mgr *mgr)
-{
-	SDEROT_DBG("unregister bus_hdl=%x, reg_bus_hdl=%x\n",
-		mgr->data_bus.bus_hdl, mgr->reg_bus.bus_hdl);
-
-	if (mgr->data_bus.bus_hdl)
-		msm_bus_scale_unregister_client(mgr->data_bus.bus_hdl);
-
-	if (mgr->reg_bus.bus_hdl)
-		msm_bus_scale_unregister_client(mgr->reg_bus.bus_hdl);
-}
-
-static int sde_rotator_bus_scale_register(struct sde_rot_mgr *mgr)
-{
-	if (!mgr->data_bus.bus_scale_pdata) {
-		SDEROT_DBG("Bus scaling is not enabled\n");
-		return 0;
-	}
-
-	mgr->data_bus.bus_hdl =
-		msm_bus_scale_register_client(
-		mgr->data_bus.bus_scale_pdata);
-	if (!mgr->data_bus.bus_hdl) {
-		SDEROT_ERR("bus_client register failed\n");
-		return -EINVAL;
-	}
-	SDEROT_DBG("registered bus_hdl=%x\n", mgr->data_bus.bus_hdl);
-
-	if (mgr->reg_bus.bus_scale_pdata) {
-		mgr->reg_bus.bus_hdl =
-			msm_bus_scale_register_client(
-			mgr->reg_bus.bus_scale_pdata);
-		if (!mgr->reg_bus.bus_hdl) {
-			SDEROT_ERR("register bus_client register failed\n");
-			sde_rotator_bus_scale_unregister(mgr);
-		} else {
-			SDEROT_DBG("registered register bus_hdl=%x\n",
-					mgr->reg_bus.bus_hdl);
-		}
-	}
-
-	return 0;
-}
-#else
-static inline void sde_rotator_bus_scale_unregister(struct sde_rot_mgr *mgr)
-{
-}
-static inline int sde_rotator_bus_scale_register(struct sde_rot_mgr *mgr)
-{
-	return 0;
-}
-#endif
-
-static inline int sde_rotator_search_dt_clk(struct platform_device *pdev,
-		struct sde_rot_mgr *mgr, char *clk_name, int clk_idx,
-		bool mandatory)
-{
-	struct clk *tmp;
-	int rc = 0;
-
-	if (clk_idx >= SDE_ROTATOR_CLK_MAX) {
-		SDEROT_ERR("invalid clk index %d\n", clk_idx);
-		return -EINVAL;
-	}
-
-	tmp = devm_clk_get(&pdev->dev, clk_name);
-	if (IS_ERR(tmp)) {
-		if (mandatory)
-			SDEROT_ERR("unable to get clk: %s\n", clk_name);
-		else
-			tmp = NULL;
-		rc = PTR_ERR(tmp);
-	}
-
-	strlcpy(mgr->rot_clk[clk_idx].clk_name, clk_name,
-			sizeof(mgr->rot_clk[clk_idx].clk_name));
-
-	mgr->rot_clk[clk_idx].clk = tmp;
-	return mandatory ? rc : 0;
-}
-
-static int sde_rotator_parse_dt_clk(struct platform_device *pdev,
-		struct sde_rot_mgr *mgr)
-{
-	u32 rc = 0;
-	int num_clk;
-
-	num_clk = of_property_count_strings(pdev->dev.of_node,
-			"clock-names");
-	if ((num_clk <= 0) || (num_clk > SDE_ROTATOR_CLK_MAX)) {
-		SDEROT_ERR("Number of clocks are out of range: %d\n", num_clk);
-		goto clk_err;
-	}
-
-	mgr->num_rot_clk = SDE_ROTATOR_CLK_MAX;
-	mgr->rot_clk = devm_kzalloc(&pdev->dev,
-			sizeof(struct sde_rot_clk) * mgr->num_rot_clk,
-			GFP_KERNEL);
-	if (!mgr->rot_clk) {
-		rc = -ENOMEM;
-		mgr->num_rot_clk = 0;
-		goto clk_err;
-	}
-
-	if (sde_rotator_search_dt_clk(pdev, mgr, "mnoc_clk",
-			SDE_ROTATOR_CLK_MNOC_AHB, false) ||
-			sde_rotator_search_dt_clk(pdev, mgr, "gcc_iface",
-				SDE_ROTATOR_CLK_GCC_AHB, false) ||
-			sde_rotator_search_dt_clk(pdev, mgr, "gcc_bus",
-				SDE_ROTATOR_CLK_GCC_AXI, false) ||
-			sde_rotator_search_dt_clk(pdev, mgr, "iface_clk",
-				SDE_ROTATOR_CLK_MDSS_AHB, true) ||
-			sde_rotator_search_dt_clk(pdev, mgr, "axi_clk",
-				SDE_ROTATOR_CLK_MDSS_AXI, false) ||
-			sde_rotator_search_dt_clk(pdev, mgr, "rot_core_clk",
-				SDE_ROTATOR_CLK_MDSS_ROT, false)) {
-		rc = -EINVAL;
-		goto clk_err;
-	}
-
-	/*
-	 * If 'MDSS_ROT' is already present, place 'rot_clk' under
-	 * MDSS_ROT_SUB. Otherwise, place it directly into MDSS_ROT.
-	 */
-	if (sde_rotator_get_clk(mgr, SDE_ROTATOR_CLK_MDSS_ROT))
-		rc = sde_rotator_search_dt_clk(pdev, mgr, "rot_clk",
-				SDE_ROTATOR_CLK_MDSS_ROT_SUB, true);
-	else
-		rc = sde_rotator_search_dt_clk(pdev, mgr, "rot_clk",
-				SDE_ROTATOR_CLK_MDSS_ROT, true);
-clk_err:
-	return rc;
-}
-
-static int sde_rotator_register_clk(struct platform_device *pdev,
-		struct sde_rot_mgr *mgr)
-{
-	int ret;
-
-	ret = sde_rotator_parse_dt_clk(pdev, mgr);
-	if (ret) {
-		SDEROT_ERR("unable to parse clocks\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static void sde_rotator_unregister_clk(struct sde_rot_mgr *mgr)
-{
-	devm_kfree(mgr->device, mgr->rot_clk);
-	mgr->rot_clk = NULL;
-	mgr->num_rot_clk = 0;
-}
-
-static int sde_rotator_res_init(struct platform_device *pdev,
-	struct sde_rot_mgr *mgr)
-{
-	int ret;
-
-	if (!sde_rot_mgr_pd_enabled(mgr)) {
-		ret = sde_rotator_get_dt_vreg_data(
-				&pdev->dev, &mgr->module_power);
-		if (ret)
-			return ret;
-	}
-
-	ret = sde_rotator_register_clk(pdev, mgr);
-	if (ret)
-		goto error;
-
-	ret = sde_rotator_bus_scale_register(mgr);
-	if (ret)
-		goto error;
-
-	return 0;
-error:
-	sde_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power);
-	return ret;
-}
-
-static void sde_rotator_res_destroy(struct sde_rot_mgr *mgr)
-{
-	struct platform_device *pdev = mgr->pdev;
-
-	sde_rotator_unregister_clk(mgr);
-	sde_rotator_bus_scale_unregister(mgr);
-
-	if (!sde_rot_mgr_pd_enabled(mgr))
-		sde_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power);
-}
-
-int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
-		struct platform_device *pdev)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_rot_mgr *mgr;
-	int ret;
-
-	if (!pmgr || !pdev) {
-		SDEROT_ERR("null parameters\n");
-		return -EINVAL;
-	}
-
-	mgr = devm_kzalloc(&pdev->dev, sizeof(struct sde_rot_mgr),
-		GFP_KERNEL);
-	if (!mgr)
-		return -ENOMEM;
-
-	mgr->pdev = pdev;
-	mgr->device = &pdev->dev;
-	mgr->pending_close_bw_vote = 0;
-	mgr->enable_bw_vote = ROT_ENABLE_BW_VOTE;
-	mgr->hwacquire_timeout = ROT_HW_ACQUIRE_TIMEOUT_IN_MS;
-	mgr->queue_count = 1;
-	mgr->pixel_per_clk.numer = ROT_PIXEL_PER_CLK_NUMERATOR;
-	mgr->pixel_per_clk.denom = ROT_PIXEL_PER_CLK_DENOMINATOR;
-	mgr->fudge_factor.numer = ROT_FUDGE_FACTOR_NUMERATOR;
-	mgr->fudge_factor.denom = ROT_FUDGE_FACTOR_DENOMINATOR;
-	mgr->overhead.numer = ROT_OVERHEAD_NUMERATOR;
-	mgr->overhead.denom = ROT_OVERHEAD_DENOMINATOR;
-
-	mutex_init(&mgr->lock);
-	atomic_set(&mgr->device_suspended, 0);
-	INIT_LIST_HEAD(&mgr->file_list);
-
-	ret = sysfs_create_group(&mgr->device->kobj,
-			&sde_rotator_fs_attr_group);
-	if (ret) {
-		SDEROT_ERR("unable to register rotator sysfs nodes\n");
-		goto error_create_sysfs;
-	}
-
-	ret = sde_rotator_parse_dt(mgr, pdev);
-	if (ret) {
-		SDEROT_ERR("fail to parse the dt\n");
-		goto error_parse_dt;
-	}
-
-	ret = sde_rotator_res_init(pdev, mgr);
-	if (ret) {
-		SDEROT_ERR("res_init failed %d\n", ret);
-		goto error_res_init;
-	}
-
-	*pmgr = mgr;
-	ret = sde_rotator_footswitch_ctrl(mgr, true);
-	if (ret) {
-		SDEROT_INFO("res_init failed %d, use probe defer\n", ret);
-		ret = -EPROBE_DEFER;
-		goto error_fs_en_fail;
-	}
-
-	/* enable power and clock before h/w initialization/query */
-	sde_rotator_update_clk(mgr);
-	sde_rotator_resource_ctrl(mgr, true);
-	sde_rotator_clk_ctrl(mgr, true);
-
-	mdata->mdss_version = SDE_REG_READ(mdata, SDE_REG_HW_VERSION);
-	SDEROT_DBG("mdss revision %x\n", mdata->mdss_version);
-
-	if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
-			SDE_MDP_HW_REV_107)) {
-		mgr->ops_hw_init = sde_rotator_r1_init;
-	} else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
-			SDE_MDP_HW_REV_300) ||
-		IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
-			SDE_MDP_HW_REV_400) ||
-		IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
-			SDE_MDP_HW_REV_410) ||
-		IS_SDE_MAJOR_SAME(mdata->mdss_version,
-			SDE_MDP_HW_REV_500) ||
-		IS_SDE_MAJOR_SAME(mdata->mdss_version,
-			SDE_MDP_HW_REV_600)) {
-		mgr->ops_hw_init = sde_rotator_r3_init;
-		mgr->min_rot_clk = ROT_MIN_ROT_CLK;
-
-		/*
-		 * on platforms where the maxlinewidth is greater than
-		 * default we need to have a max clock rate check to
-		 * ensure we do not cross the max allowed clock for rotator
-		 */
-		if (IS_SDE_MAJOR_SAME(mdata->mdss_version,
-			SDE_MDP_HW_REV_500))
-			mgr->max_rot_clk = ROT_R3_MAX_ROT_CLK;
-
-		if (!IS_SDE_MAJOR_SAME(mdata->mdss_version,
-					SDE_MDP_HW_REV_600) &&
-				!sde_rotator_get_clk(mgr,
-					SDE_ROTATOR_CLK_MDSS_AXI)) {
-			SDEROT_ERR("unable to get mdss_axi_clk\n");
-			ret = -EINVAL;
-			goto error_map_hw_ops;
-		}
-	} else {
-		ret = -ENODEV;
-		SDEROT_ERR("unsupported sde version %x\n",
-				mdata->mdss_version);
-		goto error_map_hw_ops;
-	}
-
-	ret = mgr->ops_hw_init(mgr);
-	if (ret) {
-		SDEROT_ERR("hw init failed %d\n", ret);
-		goto error_hw_init;
-	}
-
-	sde_rotator_pm_qos_add(mdata);
-
-	ret = sde_rotator_init_queue(mgr);
-	if (ret) {
-		SDEROT_ERR("fail to init queue\n");
-		goto error_init_queue;
-	}
-
-	/* disable power and clock after h/w initialization/query */
-	sde_rotator_clk_ctrl(mgr, false);
-	sde_rotator_resource_ctrl(mgr, false);
-	sde_rotator_footswitch_ctrl(mgr, false);
-	pm_runtime_set_suspended(&pdev->dev);
-	pm_runtime_enable(&pdev->dev);
-
-	return 0;
-
-error_init_queue:
-	mgr->ops_hw_destroy(mgr);
-error_hw_init:
-error_map_hw_ops:
-	sde_rotator_clk_ctrl(mgr, false);
-	sde_rotator_resource_ctrl(mgr, false);
-	sde_rotator_footswitch_ctrl(mgr, false);
-error_fs_en_fail:
-	sde_rotator_res_destroy(mgr);
-error_res_init:
-error_parse_dt:
-	sysfs_remove_group(&mgr->device->kobj, &sde_rotator_fs_attr_group);
-error_create_sysfs:
-	devm_kfree(&pdev->dev, mgr);
-	*pmgr = NULL;
-	return ret;
-}
-
-void sde_rotator_core_destroy(struct sde_rot_mgr *mgr)
-{
-	struct device *dev;
-
-	if (!mgr) {
-		SDEROT_ERR("null parameters\n");
-		return;
-	}
-
-	dev = mgr->device;
-	sde_rotator_deinit_queue(mgr);
-	mgr->ops_hw_destroy(mgr);
-	sde_rotator_release_all(mgr);
-	pm_runtime_disable(mgr->device);
-	sde_rotator_res_destroy(mgr);
-	sysfs_remove_group(&mgr->device->kobj, &sde_rotator_fs_attr_group);
-	devm_kfree(dev, mgr);
-}
-
-void sde_rotator_core_dump(struct sde_rot_mgr *mgr)
-{
-	if (!mgr) {
-		SDEROT_ERR("null parameters\n");
-		return;
-	}
-
-	sde_rotator_resource_ctrl(mgr, true);
-
-	if (mgr->ops_hw_dump_status)
-		mgr->ops_hw_dump_status(mgr);
-
-	SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus", "vbif_dbg_bus");
-
-	sde_rotator_resource_ctrl(mgr, false);
-}
-
-static void sde_rotator_suspend_cancel_rot_work(struct sde_rot_mgr *mgr)
-{
-	struct sde_rot_file_private *priv, *priv_next;
-
-	list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) {
-		sde_rotator_cancel_all_requests(mgr, priv);
-	}
-}
-
-#if defined(CONFIG_PM)
-/*
- * sde_rotator_runtime_suspend - Turn off power upon runtime suspend event
- * @dev: Pointer to device structure
- */
-int sde_rotator_runtime_suspend(struct device *dev)
-{
-	struct sde_rot_mgr *mgr;
-
-	mgr = sde_rot_mgr_from_device(dev);
-
-	if (!mgr) {
-		SDEROT_ERR("null parameters\n");
-		return -ENODEV;
-	}
-
-	if (mgr->rot_enable_clk_cnt) {
-		SDEROT_ERR("invalid runtime suspend request %d\n",
-				mgr->rot_enable_clk_cnt);
-		return -EBUSY;
-	}
-
-	sde_rotator_footswitch_ctrl(mgr, false);
-	ATRACE_END("runtime_active");
-	SDEROT_DBG("exit runtime_active\n");
-	return 0;
-}
-
-/*
- * sde_rotator_runtime_resume - Turn on power upon runtime resume event
- * @dev: Pointer to device structure
- */
-int sde_rotator_runtime_resume(struct device *dev)
-{
-	struct sde_rot_mgr *mgr;
-
-	mgr = sde_rot_mgr_from_device(dev);
-
-	if (!mgr) {
-		SDEROT_ERR("null parameters\n");
-		return -ENODEV;
-	}
-
-	SDEROT_DBG("begin runtime_active\n");
-	ATRACE_BEGIN("runtime_active");
-	return sde_rotator_footswitch_ctrl(mgr, true);
-}
-
-/*
- * sde_rotator_runtime_idle - check if device is idling
- * @dev: Pointer to device structure
- */
-int sde_rotator_runtime_idle(struct device *dev)
-{
-	struct sde_rot_mgr *mgr;
-
-	mgr = sde_rot_mgr_from_device(dev);
-
-	if (!mgr) {
-		SDEROT_ERR("null parameters\n");
-		return -ENODEV;
-	}
-
-	/* add check for any busy status, if any */
-	SDEROT_DBG("idling ...\n");
-	return 0;
-}
-
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-/*
- * sde_rotator_pm_suspend - put the device in pm suspend state by cancelling
- *							 all active requests
- * @dev: Pointer to device structure
- */
-int sde_rotator_pm_suspend(struct device *dev)
-{
-	struct sde_rot_mgr *mgr;
-
-	mgr = sde_rot_mgr_from_device(dev);
-
-	if (!mgr) {
-		SDEROT_ERR("null parameters\n");
-		return -ENODEV;
-	}
-
-
-	sde_rot_mgr_lock(mgr);
-	atomic_inc(&mgr->device_suspended);
-	sde_rotator_suspend_cancel_rot_work(mgr);
-	mgr->minimum_bw_vote = 0;
-	sde_rotator_update_perf(mgr);
-	ATRACE_END("pm_active");
-	SDEROT_DBG("end pm active %d\n", atomic_read(&mgr->device_suspended));
-	sde_rot_mgr_unlock(mgr);
-	return 0;
-}
-
-/*
- * sde_rotator_pm_resume - put the device in pm active state
- * @dev: Pointer to device structure
- */
-int sde_rotator_pm_resume(struct device *dev)
-{
-	struct sde_rot_mgr *mgr;
-
-	mgr = sde_rot_mgr_from_device(dev);
-
-	if (!mgr) {
-		SDEROT_ERR("null parameters\n");
-		return -ENODEV;
-	}
-
-	/*
-	 * It is possible that the runtime status of the device may
-	 * have been active when the system was suspended. Reset the runtime
-	 * status to suspended state after a complete system resume.
-	 */
-	pm_runtime_disable(dev);
-	pm_runtime_set_suspended(dev);
-	pm_runtime_set_active(dev);
-	pm_runtime_enable(dev);
-
-	sde_rot_mgr_lock(mgr);
-	SDEROT_DBG("begin pm active %d\n", atomic_read(&mgr->device_suspended));
-	ATRACE_BEGIN("pm_active");
-	atomic_dec(&mgr->device_suspended);
-	sde_rotator_update_perf(mgr);
-	sde_rot_mgr_unlock(mgr);
-	return 0;
-}
-#endif
-
-#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
-int sde_rotator_suspend(struct platform_device *dev, pm_message_t state)
-{
-	struct sde_rot_mgr *mgr;
-
-	mgr = sde_rot_mgr_from_pdevice(dev);
-
-	if (!mgr) {
-		SDEROT_ERR("null_parameters\n");
-		return -ENODEV;
-	}
-
-	sde_rot_mgr_lock(mgr);
-	atomic_inc(&mgr->device_suspended);
-	sde_rotator_suspend_cancel_rot_work(mgr);
-	sde_rotator_update_perf(mgr);
-	sde_rot_mgr_unlock(mgr);
-	return 0;
-}
-
-int sde_rotator_resume(struct platform_device *dev)
-{
-	struct sde_rot_mgr *mgr;
-
-	mgr = sde_rot_mgr_from_pdevice(dev);
-
-	if (!mgr) {
-		SDEROT_ERR("null parameters\n");
-		return -ENODEV;
-	}
-
-	sde_rot_mgr_lock(mgr);
-	atomic_dec(&mgr->device_suspended);
-	sde_rotator_update_perf(mgr);
-	sde_rot_mgr_unlock(mgr);
-	return 0;
-}
-#endif
-
-/*
- * sde_rotator_session_open - external wrapper for open function
- *
- * Note each file open (sde_rot_file_private) is mapped to one session only.
- */
-int sde_rotator_session_open(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private **pprivate, int session_id,
-	struct sde_rot_queue_v1 *queue)
-{
-	int ret;
-	struct sde_rot_file_private *private;
-
-	if (!mgr || !pprivate || !queue) {
-		SDEROT_ERR("null parameters\n");
-		return -EINVAL;
-	}
-
-	ret = sde_rotator_open(mgr, &private);
-	if (ret)
-		goto error_open;
-
-	private->mgr = mgr;
-	private->fenceq = queue;
-
-	ret = sde_rotator_open_session(mgr, private, session_id);
-	if (ret)
-		goto error_open_session;
-
-	*pprivate = private;
-
-	return 0;
-error_open_session:
-	sde_rotator_close(mgr, private);
-error_open:
-	return ret;
-}
-
-/*
- * sde_rotator_session_close - external wrapper for close function
- */
-void sde_rotator_session_close(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private, int session_id)
-{
-	if (!mgr || !private) {
-		SDEROT_ERR("null parameters\n");
-		return;
-	}
-
-	sde_rotator_close_session(mgr, private, session_id);
-	sde_rotator_close(mgr, private);
-
-	SDEROT_DBG("session closed s:%d\n", session_id);
-}
-
-/*
- * sde_rotator_session_config - external wrapper for config function
- */
-int sde_rotator_session_config(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rotation_config *config)
-{
-	if (!mgr || !private || !config) {
-		SDEROT_ERR("null parameters\n");
-		return -EINVAL;
-	}
-
-	return sde_rotator_config_session(mgr, private, config);
-}
-
-/*
- * sde_rotator_session_validate - validate session
- */
-int sde_rotator_session_validate(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rotation_config *config)
-{
-	int ret;
-
-	if (!mgr || !private || !config) {
-		SDEROT_ERR("null parameters\n");
-		return -EINVAL;
-	}
-
-	SDEROT_DBG(
-		"validate session id=%u in{%u,%u}f:%x out{%u,%u}f:%x fps:%d\n",
-		config->session_id, config->input.width, config->input.height,
-		config->input.format, config->output.width,
-		config->output.height, config->output.format,
-		config->frame_rate);
-
-	ret = sde_rotator_verify_config_all(mgr, config);
-	if (ret) {
-		SDEROT_WARN("rotator verify format failed %d\n", ret);
-		return ret;
-	}
-
-	if (config->output.sbuf && mgr->sbuf_ctx != private && mgr->sbuf_ctx) {
-		SDEROT_WARN("too many sbuf sessions\n");
-		return -EBUSY;
-	}
-
-	return 0;
-}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
deleted file mode 100644
index f9ce335..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ /dev/null
@@ -1,848 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef SDE_ROTATOR_CORE_H
-#define SDE_ROTATOR_CORE_H
-
-#include <linux/list.h>
-#include <linux/file.h>
-#include <linux/ktime.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <linux/cdev.h>
-#include <linux/pm_runtime.h>
-#include <linux/kthread.h>
-
-#include "sde_rotator_base.h"
-#include "sde_rotator_util.h"
-#include "sde_rotator_sync.h"
-
-/**********************************************************************
- * Rotation request flag
- **********************************************************************/
-/* no rotation flag */
-#define SDE_ROTATION_NOP	0x01
-
-/* left/right flip */
-#define SDE_ROTATION_FLIP_LR	0x02
-
-/* up/down flip */
-#define SDE_ROTATION_FLIP_UD	0x04
-
-/* rotate 90 degree */
-#define SDE_ROTATION_90	0x08
-
-/* rotate 180 degre */
-#define SDE_ROTATION_180	(SDE_ROTATION_FLIP_LR | SDE_ROTATION_FLIP_UD)
-
-/* rotate 270 degree */
-#define SDE_ROTATION_270	(SDE_ROTATION_90 | SDE_ROTATION_180)
-
-/* format is interlaced */
-#define SDE_ROTATION_DEINTERLACE	0x10
-
-/* secure data */
-#define SDE_ROTATION_SECURE		0x80
-
-/* verify input configuration only */
-#define SDE_ROTATION_VERIFY_INPUT_ONLY	0x10000
-
-/* use client provided dma buf instead of ion fd */
-#define SDE_ROTATION_EXT_DMA_BUF	0x20000
-
-/* secure camera operation*/
-#define SDE_ROTATION_SECURE_CAMERA	0x40000
-
-/* use client mapped i/o virtual address */
-#define SDE_ROTATION_EXT_IOVA		0x80000
-
-/* use client provided clock/bandwidth parameters */
-#define SDE_ROTATION_EXT_PERF		0x100000
-
-/**********************************************************************
- * configuration structures
- **********************************************************************/
-
-/*
- * struct sde_rotation_buf_info - input/output buffer configuration
- * @width: width of buffer region to be processed
- * @height: height of buffer region to be processed
- * @format: pixel format of buffer
- * @comp_ratio: compression ratio for the session
- * @sbuf: true if buffer is streaming buffer
- */
-struct sde_rotation_buf_info {
-	uint32_t width;
-	uint32_t height;
-	uint32_t format;
-	struct sde_mult_factor comp_ratio;
-	bool sbuf;
-};
-
-/*
- * struct sde_rotation_config - rotation configuration for given session
- * @session_id: identifier of the given session
- * @input: input buffer information
- * @output: output buffer information
- * @frame_rate: session frame rate in fps
- * @clk_rate: requested rotator clock rate if SDE_ROTATION_EXT_PERF is set
- * @data_bw: requested data bus bandwidth if SDE_ROTATION_EXT_PERF is set
- * @flags: configuration flags, e.g. rotation angle, flip, etc...
- */
-struct sde_rotation_config {
-	uint32_t	session_id;
-	struct sde_rotation_buf_info	input;
-	struct sde_rotation_buf_info	output;
-	uint32_t	frame_rate;
-	uint64_t	clk_rate;
-	uint64_t	data_bw;
-	uint32_t	flags;
-};
-
-enum sde_rotator_ts {
-	SDE_ROTATOR_TS_SRCQB,		/* enqueue source buffer */
-	SDE_ROTATOR_TS_DSTQB,		/* enqueue destination buffer */
-	SDE_ROTATOR_TS_FENCE,		/* wait for source buffer fence */
-	SDE_ROTATOR_TS_QUEUE,		/* wait for h/w resource */
-	SDE_ROTATOR_TS_COMMIT,		/* prepare h/w command */
-	SDE_ROTATOR_TS_START,		/* wait for h/w kickoff rdy (inline) */
-	SDE_ROTATOR_TS_FLUSH,		/* initiate h/w processing */
-	SDE_ROTATOR_TS_DONE,		/* receive h/w completion */
-	SDE_ROTATOR_TS_RETIRE,		/* signal destination buffer fence */
-	SDE_ROTATOR_TS_SRCDQB,		/* dequeue source buffer */
-	SDE_ROTATOR_TS_DSTDQB,		/* dequeue destination buffer */
-	SDE_ROTATOR_TS_MAX
-};
-
-enum sde_rotator_clk_type {
-	SDE_ROTATOR_CLK_MDSS_AHB,
-	SDE_ROTATOR_CLK_MDSS_AXI,
-	SDE_ROTATOR_CLK_MDSS_ROT_SUB,
-	SDE_ROTATOR_CLK_MDSS_ROT,
-	SDE_ROTATOR_CLK_MNOC_AHB,
-	SDE_ROTATOR_CLK_GCC_AHB,
-	SDE_ROTATOR_CLK_GCC_AXI,
-	SDE_ROTATOR_CLK_MAX
-};
-
-enum sde_rotator_trigger {
-	SDE_ROTATOR_TRIGGER_IMMEDIATE,
-	SDE_ROTATOR_TRIGGER_VIDEO,
-	SDE_ROTATOR_TRIGGER_COMMAND,
-};
-
-enum sde_rotator_mode {
-	SDE_ROTATOR_MODE_OFFLINE,
-	SDE_ROTATOR_MODE_SBUF,
-	SDE_ROTATOR_MODE_MAX,
-};
-
-struct sde_rotation_item {
-	/* rotation request flag */
-	uint32_t	flags;
-
-	/* rotation trigger mode */
-	uint32_t	trigger;
-
-	/* prefill bandwidth in Bps */
-	uint64_t	prefill_bw;
-
-	/* Source crop rectangle */
-	struct sde_rect	src_rect;
-
-	/* Destination rectangle */
-	struct sde_rect	dst_rect;
-
-	/* Input buffer for the request */
-	struct sde_layer_buffer	input;
-
-	/* The output buffer for the request */
-	struct sde_layer_buffer	output;
-
-	/*
-	 * DMA pipe selection for this request by client:
-	 * 0: DMA pipe 0
-	 * 1: DMA pipe 1
-	 * or SDE_ROTATION_HW_ANY if client wants
-	 * driver to allocate any that is available
-	 *
-	 * OR
-	 *
-	 * Reserved
-	 */
-	uint32_t	pipe_idx;
-
-	/*
-	 * Write-back block selection for this request by client:
-	 * 0: Write-back block 0
-	 * 1: Write-back block 1
-	 * or SDE_ROTATION_HW_ANY if client wants
-	 * driver to allocate any that is available
-	 *
-	 * OR
-	 *
-	 * Priority selection for this request by client:
-	 * 0: Highest
-	 * 1..n: Limited by the lowest available priority
-	 */
-	uint32_t	wb_idx;
-
-	/*
-	 * Sequence ID of this request within the session
-	 */
-	uint32_t	sequence_id;
-
-	/* Which session ID is this request scheduled on */
-	uint32_t	session_id;
-
-	/* Time stamp for profiling purposes */
-	ktime_t		*ts;
-
-	/* Completion structure for inline rotation */
-	struct completion inline_start;
-};
-
-/*
- * Defining characteristics about rotation work, that has corresponding
- * fmt and roi checks in open session
- */
-#define SDE_ROT_DEFINING_FLAG_BITS SDE_ROTATION_90
-
-struct sde_rot_entry;
-struct sde_rot_perf;
-
-struct sde_rot_clk {
-	struct clk *clk;
-	char clk_name[32];
-	unsigned long rate;
-};
-
-struct sde_rot_hw_resource {
-	u32 wb_id;
-	u32 pending_count;
-	atomic_t num_active;
-	int max_active;
-	wait_queue_head_t wait_queue;
-};
-
-struct sde_rot_queue {
-	struct kthread_worker rot_kw;
-	struct task_struct *rot_thread;
-	struct sde_rot_timeline *timeline;
-	struct sde_rot_hw_resource *hw;
-};
-
-struct sde_rot_queue_v1 {
-	struct kthread_worker *rot_kw;
-	struct task_struct *rot_thread;
-	struct sde_rot_timeline *timeline;
-	struct sde_rot_hw_resource *hw;
-};
-/*
- * struct sde_rot_entry_container - rotation request
- * @list: list of active requests managed by rotator manager
- * @flags: reserved
- * @count: size of rotation entries
- * @pending_count: count of entries pending completion
- * @failed_count: count of entries failed completion
- * @finished: true if client is finished with the request
- * @retireq: workqueue to post completion notification
- * @retire_work: work for completion notification
- * @entries: array of rotation entries
- */
-struct sde_rot_entry_container {
-	struct list_head list;
-	u32 flags;
-	u32 count;
-	atomic_t pending_count;
-	atomic_t failed_count;
-	struct kthread_worker *retire_kw;
-	struct kthread_work *retire_work;
-	bool finished;
-	struct sde_rot_entry *entries;
-};
-
-struct sde_rot_mgr;
-struct sde_rot_file_private;
-
-/*
- * struct sde_rot_entry - rotation entry
- * @item: rotation item
- * @commit_work: work descriptor for commit handler
- * @done_work: work descriptor for done handler
- * @commitq: pointer to commit handler rotator queue
- * @fenceq: pointer to fence signaling rotator queue
- * @doneq: pointer to done handler rotator queue
- * @request: pointer to containing request
- * @src_buf: descriptor of source buffer
- * @dst_buf: descriptor of destination buffer
- * @input_fence: pointer to input fence for when input content is available
- * @output_fence: pointer to output fence for when output content is available
- * @output_signaled: true if output fence of this entry has been signaled
- * @dnsc_factor_w: calculated width downscale factor for this entry
- * @dnsc_factor_w: calculated height downscale factor for this entry
- * @perf: pointer to performance configuration associated with this entry
- * @work_assigned: true if this item is assigned to h/w queue/unit
- * @private: pointer to controlling session context
- */
-struct sde_rot_entry {
-	struct sde_rotation_item item;
-	struct kthread_work commit_work;
-	struct kthread_work done_work;
-	struct sde_rot_queue *commitq;
-	struct sde_rot_queue_v1 *fenceq;
-	struct sde_rot_queue *doneq;
-	struct sde_rot_entry_container *request;
-
-	struct sde_mdp_data src_buf;
-	struct sde_mdp_data dst_buf;
-
-	struct sde_rot_sync_fence *input_fence;
-
-	struct sde_rot_sync_fence *output_fence;
-	bool output_signaled;
-
-	u32 dnsc_factor_w;
-	u32 dnsc_factor_h;
-
-	struct sde_rot_perf *perf;
-	bool work_assigned; /* Used when cleaning up work_distribution */
-	struct sde_rot_file_private *private;
-};
-
-/*
- * struct sde_rot_trace_entry - structure used to pass info to trace
- */
-struct sde_rot_trace_entry {
-	u32 wb_idx;
-	u32 flags;
-	u32 input_format;
-	u32 input_width;
-	u32 input_height;
-	u32 src_x;
-	u32 src_y;
-	u32 src_w;
-	u32 src_h;
-	u32 output_format;
-	u32 output_width;
-	u32 output_height;
-	u32 dst_x;
-	u32 dst_y;
-	u32 dst_w;
-	u32 dst_h;
-};
-
-/*
- * struct sde_rot_perf - rotator session performance configuration
- * @list: list of performance configuration under one session
- * @config: current rotation configuration
- * @clk_rate: current clock rate in Hz
- * @bw: current bandwidth in byte per second
- * @work_dis_lock: serialization lock for updating work distribution (not used)
- * @work_distribution: work distribution among multiple hardware queue/unit
- * @last_wb_idx: last queue/unit index, used to account for pre-distributed work
- * @rdot_limit: read OT limit of this session
- * @wrot_limit: write OT limit of this session
- */
-struct sde_rot_perf {
-	struct list_head list;
-	struct sde_rotation_config config;
-	unsigned long clk_rate;
-	u64 bw;
-	struct mutex work_dis_lock;
-	u32 *work_distribution;
-	int last_wb_idx; /* last known wb index, used when above count is 0 */
-	u32 rdot_limit;
-	u32 wrot_limit;
-};
-
-/*
- * struct sde_rot_file_private - rotator manager per session context
- * @list: list of all session context
- * @req_list: list of rotation request for this session
- * @perf_list: list of performance configuration for this session (only one)
- * @mgr: pointer to the controlling rotator manager
- * @fenceq: pointer to rotator queue to signal when entry is done
- */
-struct sde_rot_file_private {
-	struct list_head list;
-	struct list_head req_list;
-	struct list_head perf_list;
-	struct sde_rot_mgr *mgr;
-	struct sde_rot_queue_v1 *fenceq;
-};
-
-/*
- * struct sde_rot_bus_data_type - rotator bus scaling configuration
- * @bus_cale_pdata: pointer to bus scaling configuration table
- * @bus_hdl: msm bus scaling handle
- * @curr_bw_uc_idx; current usecase index into configuration table
- * @curr_quota_val: current bandwidth request in byte per second
- */
-struct sde_rot_bus_data_type {
-	struct msm_bus_scale_pdata *bus_scale_pdata;
-	u32 bus_hdl;
-	u32 curr_bw_uc_idx;
-	u64 curr_quota_val;
-};
-
-/*
- * struct sde_rot_mgr - core rotator manager
- * @lock: serialization lock to rotator manager functions
- * @device_suspended: 0 if device is not suspended; non-zero suspended
- * @pdev: pointer to controlling platform device
- * @device: pointer to controlling device
- * @queue_count: number of hardware queue/unit available
- * @commitq: array of rotator commit queue corresponding to hardware queue
- * @doneq: array of rotator done queue corresponding to hardware queue
- * @file_list: list of all sessions managed by rotator manager
- * @pending_close_bw_vote: bandwidth of closed sessions with pending work
- * @minimum_bw_vote: minimum bandwidth required for current use case
- * @enable_bw_vote: minimum bandwidth required for power enable
- * @data_bus: data bus configuration state
- * @reg_bus: register bus configuration state
- * @module_power: power/clock configuration state
- * @regulator_enable: true if foot switch is enabled; false otherwise
- * @res_ref_cnt: reference count of how many times resource is requested
- * @rot_enable_clk_cnt: reference count of how many times clock is requested
- * @rot_clk: array of rotator and periphery clocks
- * @num_rot_clk: size of the rotator clock array
- * @rdot_limit: current read OT limit
- * @wrot_limit: current write OT limit
- * @hwacquire_timeout: maximum wait time for hardware availability in msec
- * @pixel_per_clk: rotator hardware performance in pixel for clock
- * @fudge_factor: fudge factor for clock calculation
- * @overhead: software overhead for offline rotation in msec
- * @min_rot_clk: minimum rotator clock rate
- * @max_rot_clk: maximum allowed rotator clock rate
- * @sbuf_ctx: pointer to sbuf session context
- * @ops_xxx: function pointers of rotator HAL layer
- * @hw_data: private handle of rotator HAL layer
- */
-struct sde_rot_mgr {
-	struct mutex lock;
-	atomic_t device_suspended;
-	struct platform_device *pdev;
-	struct device *device;
-
-	/*
-	 * Managing rotation queues, depends on
-	 * how many hw pipes available on the system
-	 */
-	int queue_count;
-	struct sde_rot_queue *commitq;
-	struct sde_rot_queue *doneq;
-
-	/*
-	 * managing all the open file sessions to bw calculations,
-	 * and resource clean up during suspend
-	 */
-	struct list_head file_list;
-
-	u64 pending_close_bw_vote;
-	u64 minimum_bw_vote;
-	u64 enable_bw_vote;
-	struct sde_rot_bus_data_type data_bus;
-	struct sde_rot_bus_data_type reg_bus;
-
-	/* Module power is only used for regulator management */
-	struct sde_module_power module_power;
-	bool regulator_enable;
-
-	int res_ref_cnt;
-	int rot_enable_clk_cnt;
-	struct sde_rot_clk *rot_clk;
-	int num_rot_clk;
-	u32 rdot_limit;
-	u32 wrot_limit;
-
-	u32 hwacquire_timeout;
-	struct sde_mult_factor pixel_per_clk;
-	struct sde_mult_factor fudge_factor;
-	struct sde_mult_factor overhead;
-	unsigned long min_rot_clk;
-	unsigned long max_rot_clk;
-
-	struct sde_rot_file_private *sbuf_ctx;
-
-	int (*ops_config_hw)(struct sde_rot_hw_resource *hw,
-			struct sde_rot_entry *entry);
-	int (*ops_cancel_hw)(struct sde_rot_hw_resource *hw,
-			struct sde_rot_entry *entry);
-	int (*ops_abort_hw)(struct sde_rot_hw_resource *hw,
-			struct sde_rot_entry *entry);
-	int (*ops_kickoff_entry)(struct sde_rot_hw_resource *hw,
-			struct sde_rot_entry *entry);
-	int (*ops_wait_for_entry)(struct sde_rot_hw_resource *hw,
-			struct sde_rot_entry *entry);
-	struct sde_rot_hw_resource *(*ops_hw_alloc)(struct sde_rot_mgr *mgr,
-			u32 pipe_id, u32 wb_id);
-	void (*ops_hw_free)(struct sde_rot_mgr *mgr,
-			struct sde_rot_hw_resource *hw);
-	int (*ops_hw_init)(struct sde_rot_mgr *mgr);
-	void (*ops_hw_pre_pmevent)(struct sde_rot_mgr *mgr, bool pmon);
-	void (*ops_hw_post_pmevent)(struct sde_rot_mgr *mgr, bool pmon);
-	void (*ops_hw_destroy)(struct sde_rot_mgr *mgr);
-	ssize_t (*ops_hw_show_caps)(struct sde_rot_mgr *mgr,
-			struct device_attribute *attr, char *buf, ssize_t len);
-	ssize_t (*ops_hw_show_state)(struct sde_rot_mgr *mgr,
-			struct device_attribute *attr, char *buf, ssize_t len);
-	int (*ops_hw_create_debugfs)(struct sde_rot_mgr *mgr,
-			struct dentry *debugfs_root);
-	int (*ops_hw_validate_entry)(struct sde_rot_mgr *mgr,
-			struct sde_rot_entry *entry);
-	u32 (*ops_hw_get_pixfmt)(struct sde_rot_mgr *mgr, int index,
-			bool input, u32 mode);
-	int (*ops_hw_is_valid_pixfmt)(struct sde_rot_mgr *mgr, u32 pixfmt,
-			bool input, u32 mode);
-	int (*ops_hw_get_downscale_caps)(struct sde_rot_mgr *mgr, char *caps,
-			int len);
-	int (*ops_hw_get_maxlinewidth)(struct sde_rot_mgr *mgr);
-	void (*ops_hw_dump_status)(struct sde_rot_mgr *mgr);
-
-	void *hw_data;
-};
-
-static inline int sde_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr,
-		u32 pixfmt, bool input, u32 mode)
-{
-	if (mgr && mgr->ops_hw_is_valid_pixfmt)
-		return mgr->ops_hw_is_valid_pixfmt(mgr, pixfmt, input, mode);
-
-	return false;
-}
-
-static inline u32 sde_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
-		int index, bool input, u32 mode)
-{
-	if (mgr && mgr->ops_hw_get_pixfmt)
-		return mgr->ops_hw_get_pixfmt(mgr, index, input, mode);
-
-	return 0;
-}
-
-static inline int sde_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
-		char *caps, int len)
-{
-	if (mgr && mgr->ops_hw_get_downscale_caps)
-		return mgr->ops_hw_get_downscale_caps(mgr, caps, len);
-
-	return 0;
-}
-
-static inline int sde_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
-{
-	if (mgr && mgr->ops_hw_get_maxlinewidth)
-		return mgr->ops_hw_get_maxlinewidth(mgr);
-
-	return 2048;
-}
-
-static inline int __compare_session_item_rect(
-	struct sde_rotation_buf_info *s_rect,
-	struct sde_rect *i_rect, uint32_t i_fmt, bool src)
-{
-	if ((s_rect->width != i_rect->w) || (s_rect->height != i_rect->h) ||
-			(s_rect->format != i_fmt)) {
-		SDEROT_DBG(
-			"%s: session{%u,%u}f:%u mismatch from item{%u,%u}f:%u\n",
-			(src ? "src":"dst"), s_rect->width, s_rect->height,
-			s_rect->format, i_rect->w, i_rect->h, i_fmt);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-/*
- * Compare all important flag bits associated with rotation between session
- * config and item request. Format and roi validation is done during open
- * session and is based certain defining bits. If these defining bits are
- * different in item request, there is a possibility that rotation item
- * is not a valid configuration.
- */
-static inline int __compare_session_rotations(uint32_t cfg_flag,
-	uint32_t item_flag)
-{
-	cfg_flag &= SDE_ROT_DEFINING_FLAG_BITS;
-	item_flag &= SDE_ROT_DEFINING_FLAG_BITS;
-	if (cfg_flag != item_flag) {
-		SDEROT_DBG(
-			"Rotation degree request different from open session\n");
-		return -EINVAL;
-	}
-	return 0;
-}
-
-/*
- * sde_rotator_core_init - initialize rotator manager for the given platform
- *	device
- * @pmgr: Pointer to pointer of the newly initialized rotator manager
- * @pdev: Pointer to platform device
- * return: 0 if success; error code otherwise
- */
-int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
-		struct platform_device *pdev);
-
-/*
- * sde_rotator_core_destroy - destroy given rotator manager
- * @mgr: Pointer to rotator manager
- * return: none
- */
-void sde_rotator_core_destroy(struct sde_rot_mgr *mgr);
-
-/*
- * sde_rotator_core_dump - perform register dump
- * @mgr: Pointer to rotator manager
- */
-void sde_rotator_core_dump(struct sde_rot_mgr *mgr);
-
-/*
- * sde_rotator_session_open - open a new rotator per file session
- * @mgr: Pointer to rotator manager
- * @pprivate: Pointer to pointer of the newly initialized per file session
- * @session_id: identifier of the newly created session
- * @queue: Pointer to fence queue of the new session
- * return: 0 if success; error code otherwise
- */
-int sde_rotator_session_open(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private **pprivate, int session_id,
-	struct sde_rot_queue_v1 *queue);
-
-/*
- * sde_rotator_session_close - close the given rotator per file session
- * @mgr: Pointer to rotator manager
- * @private: Pointer to per file session
- * @session_id: identifier of the session
- * return: none
- */
-void sde_rotator_session_close(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private, int session_id);
-
-/*
- * sde_rotator_session_config - configure the given rotator per file session
- * @mgr: Pointer to rotator manager
- * @private: Pointer to  per file session
- * @config: Pointer to rotator configuration
- * return: 0 if success; error code otherwise
- */
-int sde_rotator_session_config(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rotation_config *config);
-
-/*
- * sde_rotator_session_validate - validate session configuration
- * @mgr: Pointer to rotator manager
- * @private: Pointer to per file session
- * @config: Pointer to rotator configuration
- * return: 0 if success; error code otherwise
- */
-int sde_rotator_session_validate(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rotation_config *config);
-
-/*
- * sde_rotator_req_init - allocate a new request and initialzie with given
- *	array of rotation items
- * @rot_dev: Pointer to rotator device
- * @private: Pointer to rotator manager per file context
- * @items: Pointer to array of rotation item
- * @count: size of rotation item array
- * @flags: rotation request flags
- * return: Pointer to new rotation request if success; ERR_PTR otherwise
- */
-struct sde_rot_entry_container *sde_rotator_req_init(
-	struct sde_rot_mgr *rot_dev,
-	struct sde_rot_file_private *private,
-	struct sde_rotation_item *items,
-	u32 count, u32 flags);
-
-/*
- * sde_rotator_req_reset_start - reset inline h/w 'start' indicator
- *	For inline rotations, the time of rotation start is not controlled
- *	by the rotator driver. This function resets an internal 'start'
- *	indicator that allows the rotator to delay its rotator
- *	timeout waiting until such time as the inline rotation has
- *	really started.
- * @mgr: Pointer to rotator manager
- * @req: Pointer to rotation request
- */
-void sde_rotator_req_reset_start(struct sde_rot_mgr *mgr,
-		struct sde_rot_entry_container *req);
-
-/*
- * sde_rotator_req_set_start - set inline h/w 'start' indicator
- * @mgr: Pointer to rotator manager
- * @req: Pointer to rotation request
- */
-void sde_rotator_req_set_start(struct sde_rot_mgr *mgr,
-		struct sde_rot_entry_container *req);
-
-/*
- * sde_rotator_req_wait_start - wait for inline h/w 'start' indicator
- * @mgr: Pointer to rotator manager
- * @req: Pointer to rotation request
- * return: Zero on success
- */
-int sde_rotator_req_wait_start(struct sde_rot_mgr *mgr,
-		struct sde_rot_entry_container *req);
-
-/*
- * sde_rotator_req_finish - notify manager that client is finished with the
- *	given request and manager can release the request as required
- * @mgr: Pointer to rotator manager
- * @private: Pointer to rotator manager per file context
- * @req: Pointer to rotation request
- * return: none
- */
-void sde_rotator_req_finish(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
-	struct sde_rot_entry_container *req);
-
-/*
- * sde_rotator_abort_inline_request - abort inline rotation request after start
- *	This function allows inline rotation requests to be aborted after
- *	sde_rotator_req_set_start has already been issued.
- * @mgr: Pointer to rotator manager
- * @private: Pointer to rotator manager per file context
- * @req: Pointer to rotation request
- * return: none
- */
-void sde_rotator_abort_inline_request(struct sde_rot_mgr *mgr,
-		struct sde_rot_file_private *private,
-		struct sde_rot_entry_container *req);
-
-/*
- * sde_rotator_handle_request_common - add the given request to rotator
- *	manager and clean up completed requests
- * @rot_dev: Pointer to rotator device
- * @private: Pointer to rotator manager per file context
- * @req: Pointer to rotation request
- * return: 0 if success; error code otherwise
- */
-int sde_rotator_handle_request_common(struct sde_rot_mgr *rot_dev,
-	struct sde_rot_file_private *ctx,
-	struct sde_rot_entry_container *req);
-
-/*
- * sde_rotator_queue_request - queue/schedule the given request for h/w commit
- * @rot_dev: Pointer to rotator device
- * @private: Pointer to rotator manager per file context
- * @req: Pointer to rotation request
- * return: 0 if success; error code otherwise
- */
-void sde_rotator_queue_request(struct sde_rot_mgr *rot_dev,
-	struct sde_rot_file_private *ctx,
-	struct sde_rot_entry_container *req);
-
-/*
- * sde_rotator_verify_config_all - verify given rotation configuration
- * @rot_dev: Pointer to rotator device
- * @config: Pointer to rotator configuration
- * return: 0 if success; error code otherwise
- */
-int sde_rotator_verify_config_all(struct sde_rot_mgr *rot_dev,
-	struct sde_rotation_config *config);
-
-/*
- * sde_rotator_verify_config_input - verify rotation input configuration
- * @rot_dev: Pointer to rotator device
- * @config: Pointer to rotator configuration
- * return: 0 if success; error code otherwise
- */
-int sde_rotator_verify_config_input(struct sde_rot_mgr *rot_dev,
-	struct sde_rotation_config *config);
-
-/*
- * sde_rotator_verify_config_output - verify rotation output configuration
- * @rot_dev: Pointer to rotator device
- * @config: Pointer to rotator configuration
- * return: 0 if success; error code otherwise
- */
-int sde_rotator_verify_config_output(struct sde_rot_mgr *rot_dev,
-	struct sde_rotation_config *config);
-
-/*
- * sde_rotator_validate_request - validates given rotation request with
- *	previous rotator configuration
- * @rot_dev: Pointer to rotator device
- * @private: Pointer to rotator manager per file context
- * @req: Pointer to rotation request
- * return: 0 if success; error code otherwise
- */
-int sde_rotator_validate_request(struct sde_rot_mgr *rot_dev,
-	struct sde_rot_file_private *ctx,
-	struct sde_rot_entry_container *req);
-
-/*
- * sde_rotator_clk_ctrl - enable/disable rotator clock with reference counting
- * @mgr: Pointer to rotator manager
- * @enable: true to enable clock; false to disable clock
- * return: 0 if success; error code otherwise
- */
-int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable);
-
-/* sde_rotator_resource_ctrl_enabled - check if resource control is enabled
- * @mgr: Pointer to rotator manager
- * Return: true if enabled; false otherwise
- */
-static inline int sde_rotator_resource_ctrl_enabled(struct sde_rot_mgr *mgr)
-{
-	return mgr->regulator_enable;
-}
-
-/*
- * sde_rotator_cancel_all_requests - cancel all outstanding requests
- * @mgr: Pointer to rotator manager
- * @private: Pointer to rotator manager per file context
- */
-void sde_rotator_cancel_all_requests(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private);
-
-/*
- * sde_rot_mgr_lock - serialization lock prior to rotator manager calls
- * @mgr: Pointer to rotator manager
- */
-static inline void sde_rot_mgr_lock(struct sde_rot_mgr *mgr)
-{
-	mutex_lock(&mgr->lock);
-}
-
-/*
- * sde_rot_mgr_lock - serialization unlock after rotator manager calls
- * @mgr: Pointer to rotator manager
- */
-static inline void sde_rot_mgr_unlock(struct sde_rot_mgr *mgr)
-{
-	mutex_unlock(&mgr->lock);
-}
-
-/*
- * sde_rot_mgr_pd_enabled - return true if power domain is enabled
- * @mgr: Pointer to rotator manager
- */
-static inline bool sde_rot_mgr_pd_enabled(struct sde_rot_mgr *mgr)
-{
-	return mgr && mgr->device && mgr->device->pm_domain;
-}
-
-#if defined(CONFIG_PM)
-int sde_rotator_runtime_resume(struct device *dev);
-int sde_rotator_runtime_suspend(struct device *dev);
-int sde_rotator_runtime_idle(struct device *dev);
-#endif
-
-#if defined(CONFIG_PM_SLEEP)
-int sde_rotator_pm_suspend(struct device *dev);
-int sde_rotator_pm_resume(struct device *dev);
-#endif
-
-#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
-int sde_rotator_suspend(struct platform_device *dev, pm_message_t state);
-int sde_rotator_resume(struct platform_device *dev);
-#else
-#define sde_rotator_suspend NULL
-#define sde_rotator_resume NULL
-#endif
-#endif /* __SDE_ROTATOR_CORE_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
deleted file mode 100644
index f211e0a..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ /dev/null
@@ -1,1384 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-
-#include "sde_rotator_debug.h"
-#include "sde_rotator_base.h"
-#include "sde_rotator_core.h"
-#include "sde_rotator_dev.h"
-#include "sde_rotator_trace.h"
-
-#ifdef CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG
-#define SDE_EVTLOG_DEFAULT_ENABLE 1
-#else
-#define SDE_EVTLOG_DEFAULT_ENABLE 0
-#endif
-#define SDE_EVTLOG_DEFAULT_PANIC 1
-#define SDE_EVTLOG_DEFAULT_REGDUMP SDE_ROT_DBG_DUMP_IN_MEM
-#define SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM
-#define SDE_EVTLOG_DEFAULT_ROT_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM
-
-/*
- * evtlog will print this number of entries when it is called through
- * sysfs node or panic. This prevents kernel log from evtlog message
- * flood.
- */
-#define SDE_ROT_EVTLOG_PRINT_ENTRY	256
-
-/*
- * evtlog keeps this number of entries in memory for debug purpose. This
- * number must be greater than print entry to prevent out of bound evtlog
- * entry array access.
- */
-#define SDE_ROT_EVTLOG_ENTRY	(SDE_ROT_EVTLOG_PRINT_ENTRY * 4)
-#define SDE_ROT_EVTLOG_MAX_DATA 15
-#define SDE_ROT_EVTLOG_BUF_MAX 512
-#define SDE_ROT_EVTLOG_BUF_ALIGN 32
-#define SDE_ROT_DEBUG_BASE_MAX 10
-
-#define SDE_ROT_DEFAULT_BASE_REG_CNT 0x100
-#define GROUP_BYTES 4
-#define ROW_BYTES 16
-
-#define SDE_ROT_TEST_MASK(id, tp)	((id << 4) | (tp << 1) | BIT(0))
-
-static DEFINE_SPINLOCK(sde_rot_xlock);
-
-/*
- * tlog - EVTLOG entry structure
- * @counter - EVTLOG entriy counter
- * @time - timestamp of EVTLOG entry
- * @name - function name of EVTLOG entry
- * @line - line number of EVTLOG entry
- * @data - EVTLOG data contents
- * @data_cnt - number of data contents
- * @pid - pid of current calling thread
- */
-struct tlog {
-	u32 counter;
-	s64 time;
-	const char *name;
-	int line;
-	u32 data[SDE_ROT_EVTLOG_MAX_DATA];
-	u32 data_cnt;
-	int pid;
-};
-
-/*
- * sde_rot_dbg_evtlog - EVTLOG debug data structure
- * @logs - EVTLOG entries
- * @first - first entry index in the EVTLOG
- * @last - last entry index in the EVTLOG
- * @curr - curr entry index in the EVTLOG
- * @evtlog - EVTLOG debugfs handle
- * @evtlog_enable - boolean indicates EVTLOG enable/disable
- * @panic_on_err - boolean indicates issue panic after EVTLOG dump
- * @enable_reg_dump - control in-log/memory dump for rotator registers
- * @enable_vbif_dbgbus_dump - control in-log/memory dump for VBIF debug bus
- * @enable_rot_dbgbus_dump - control in-log/memroy dump for rotator debug bus
- * @evtlog_dump_work - schedule work strucutre for timeout handler
- * @work_dump_reg - storage for register dump control in schedule work
- * @work_panic - storage for panic control in schedule work
- * @work_vbif_dbgbus - storage for VBIF debug bus control in schedule work
- * @work_rot_dbgbus - storage for rotator debug bus control in schedule work
- * @nrt_vbif_dbgbus_dump - memory buffer for VBIF debug bus dumping
- * @rot_dbgbus_dump - memory buffer for rotator debug bus dumping
- * @reg_dump_array - memory buffer for rotator registers dumping
- */
-struct sde_rot_dbg_evtlog {
-	struct tlog logs[SDE_ROT_EVTLOG_ENTRY];
-	u32 first;
-	u32 last;
-	u32 curr;
-	struct dentry *evtlog;
-	u32 evtlog_enable;
-	u32 panic_on_err;
-	u32 enable_reg_dump;
-	u32 enable_vbif_dbgbus_dump;
-	u32 enable_rot_dbgbus_dump;
-	struct work_struct evtlog_dump_work;
-	bool work_dump_reg;
-	bool work_panic;
-	bool work_vbif_dbgbus;
-	bool work_rot_dbgbus;
-	u32 *nrt_vbif_dbgbus_dump; /* address for the nrt vbif debug bus dump */
-	u32 *rot_dbgbus_dump;
-	u32 *reg_dump_array[SDE_ROT_DEBUG_BASE_MAX];
-} sde_rot_dbg_evtlog;
-
-static void sde_rot_dump_debug_bus(u32 bus_dump_flag, u32 **dump_mem)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	bool in_log, in_mem;
-	u32 *dump_addr = NULL;
-	u32 status = 0;
-	struct sde_rot_debug_bus *head;
-	phys_addr_t phys = 0;
-	int i;
-	u32 offset;
-	void __iomem *base;
-
-	in_log = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
-	in_mem = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
-	base = mdata->sde_io.base;
-
-	if (!base || !mdata->rot_dbg_bus || !mdata->rot_dbg_bus_size)
-		return;
-
-	pr_info("======== SDE Rotator Debug bus DUMP =========\n");
-
-	if (in_mem) {
-		if (!(*dump_mem))
-			*dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
-				mdata->rot_dbg_bus_size * 4 * sizeof(u32),
-				&phys, GFP_KERNEL);
-
-		if (*dump_mem) {
-			dump_addr = *dump_mem;
-			pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
-				__func__, dump_addr,
-				dump_addr + (u32)mdata->rot_dbg_bus_size * 16);
-		} else {
-			in_mem = false;
-			pr_err("dump_mem: allocation fails\n");
-		}
-	}
-
-	sde_smmu_ctrl(1);
-
-	for (i = 0; i < mdata->rot_dbg_bus_size; i++) {
-		head = mdata->rot_dbg_bus + i;
-		writel_relaxed(SDE_ROT_TEST_MASK(head->block_id, head->test_id),
-				base + head->wr_addr);
-		wmb(); /* make sure test bits were written */
-
-		offset = head->wr_addr + 0x4;
-
-		status = readl_relaxed(base + offset);
-
-		if (in_log)
-			pr_err("waddr=0x%x blk=%d tst=%d val=0x%x\n",
-				head->wr_addr, head->block_id, head->test_id,
-				status);
-
-		if (dump_addr && in_mem) {
-			dump_addr[i*4]     = head->wr_addr;
-			dump_addr[i*4 + 1] = head->block_id;
-			dump_addr[i*4 + 2] = head->test_id;
-			dump_addr[i*4 + 3] = status;
-		}
-
-		/* Disable debug bus once we are done */
-		writel_relaxed(0, base + head->wr_addr);
-	}
-
-	sde_smmu_ctrl(0);
-
-	pr_info("========End Debug bus=========\n");
-}
-
-/*
- * sde_rot_evtlog_is_enabled - helper function for checking EVTLOG
- *                             enable/disable
- * @flag - EVTLOG option flag
- */
-static inline bool sde_rot_evtlog_is_enabled(u32 flag)
-{
-	return (flag & sde_rot_dbg_evtlog.evtlog_enable) ||
-		(flag == SDE_ROT_EVTLOG_ALL &&
-		 sde_rot_dbg_evtlog.evtlog_enable);
-}
-
-/*
- * __vbif_debug_bus - helper function for VBIF debug bus dump
- * @head - VBIF debug bus data structure
- * @vbif_base - VBIF IO mapped address
- * @dump_addr - output buffer for memory dump option
- * @in_log - boolean indicates in-log dump option
- */
-static void __vbif_debug_bus(struct sde_rot_vbif_debug_bus *head,
-	void __iomem *vbif_base, u32 *dump_addr, bool in_log)
-{
-	int i, j;
-	u32 val;
-
-	if (!dump_addr && !in_log)
-		return;
-
-	for (i = 0; i < head->block_cnt; i++) {
-		writel_relaxed(1 << (i + head->bit_offset),
-				vbif_base + head->block_bus_addr);
-		/* make sure that current bus blcok enable */
-		wmb();
-		for (j = 0; j < head->test_pnt_cnt; j++) {
-			writel_relaxed(j, vbif_base + head->block_bus_addr + 4);
-			/* make sure that test point is enabled */
-			wmb();
-			val = readl_relaxed(vbif_base + MMSS_VBIF_TEST_BUS_OUT);
-			if (dump_addr) {
-				*dump_addr++ = head->block_bus_addr;
-				*dump_addr++ = i;
-				*dump_addr++ = j;
-				*dump_addr++ = val;
-			}
-			if (in_log)
-				pr_err("testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
-					head->block_bus_addr, i, j, val);
-		}
-	}
-}
-
-/*
- * sde_rot_dump_vbif_debug_bus - VBIF debug bus dump
- * @bus_dump_flag - dump flag controlling in-log/memory dump option
- * @dump_mem - output buffer for memory dump location
- */
-static void sde_rot_dump_vbif_debug_bus(u32 bus_dump_flag,
-	u32 **dump_mem)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	bool in_log, in_mem;
-	u32 *dump_addr = NULL;
-	u32 value;
-	struct sde_rot_vbif_debug_bus *head;
-	phys_addr_t phys = 0;
-	int i, list_size = 0;
-	void __iomem *vbif_base;
-	struct sde_rot_vbif_debug_bus *dbg_bus;
-	u32 bus_size;
-
-	pr_info("======== NRT VBIF Debug bus DUMP =========\n");
-	vbif_base = mdata->vbif_nrt_io.base;
-	dbg_bus = mdata->nrt_vbif_dbg_bus;
-	bus_size = mdata->nrt_vbif_dbg_bus_size;
-
-	if (!vbif_base || !dbg_bus || !bus_size)
-		return;
-
-	/* allocate memory for each test point */
-	for (i = 0; i < bus_size; i++) {
-		head = dbg_bus + i;
-		list_size += (head->block_cnt * head->test_pnt_cnt);
-	}
-
-	/* 4 bytes * 4 entries for each test point*/
-	list_size *= 16;
-
-	in_log = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
-	in_mem = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
-
-	if (in_mem) {
-		if (!(*dump_mem))
-			*dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
-				list_size, &phys, GFP_KERNEL);
-
-		if (*dump_mem) {
-			dump_addr = *dump_mem;
-			pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
-				__func__, dump_addr, dump_addr + list_size);
-		} else {
-			in_mem = false;
-			pr_err("dump_mem: allocation fails\n");
-		}
-	}
-
-	sde_smmu_ctrl(1);
-
-	value = readl_relaxed(vbif_base + MMSS_VBIF_CLKON);
-	writel_relaxed(value | BIT(1), vbif_base + MMSS_VBIF_CLKON);
-
-	/* make sure that vbif core is on */
-	wmb();
-
-	for (i = 0; i < bus_size; i++) {
-		head = dbg_bus + i;
-
-		writel_relaxed(0, vbif_base + head->disable_bus_addr);
-		writel_relaxed(BIT(0), vbif_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
-		/* make sure that other bus is off */
-		wmb();
-
-		__vbif_debug_bus(head, vbif_base, dump_addr, in_log);
-		if (dump_addr)
-			dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
-	}
-
-	sde_smmu_ctrl(0);
-
-	pr_info("========End VBIF Debug bus=========\n");
-}
-
-/*
- * sde_rot_dump_reg - helper function for dumping rotator register set content
- * @dump_name - register set name
- * @reg_dump_flag - dumping flag controlling in-log/memory dump location
- * @access - access type, sde registers or vbif registers
- * @addr - starting address offset for dumping
- * @len - range of the register set
- * @dump_mem - output buffer for memory dump location option
- */
-void sde_rot_dump_reg(const char *dump_name, u32 reg_dump_flag,
-	enum sde_rot_regdump_access access, u32 addr,
-	int len, u32 **dump_mem)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	bool in_log, in_mem;
-	u32 *dump_addr = NULL;
-	phys_addr_t phys = 0;
-	int i;
-	void __iomem *base;
-
-	in_log = (reg_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
-	in_mem = (reg_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
-
-	pr_debug("reg_dump_flag=%d in_log=%d in_mem=%d\n",
-		reg_dump_flag, in_log, in_mem);
-
-	if (len % 16)
-		len += 16;
-	len /= 16;
-
-	if (in_mem) {
-		if (!(*dump_mem))
-			*dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
-				len * 16, &phys, GFP_KERNEL);
-
-		if (*dump_mem) {
-			dump_addr = *dump_mem;
-			pr_info("%s: start_addr:0x%pK end_addr:0x%pK reg_addr=0x%X\n",
-				dump_name, dump_addr, dump_addr + (u32)len * 16,
-				addr);
-		} else {
-			in_mem = false;
-			pr_err("dump_mem: kzalloc fails!\n");
-		}
-	}
-
-	base = mdata->sde_io.base;
-	/*
-	 * VBIF NRT base handling
-	 */
-	if (access == SDE_ROT_REGDUMP_VBIF)
-		base = mdata->vbif_nrt_io.base;
-
-	for (i = 0; i < len; i++) {
-		u32 x0, x4, x8, xc;
-
-		x0 = readl_relaxed(base + addr+0x0);
-		x4 = readl_relaxed(base + addr+0x4);
-		x8 = readl_relaxed(base + addr+0x8);
-		xc = readl_relaxed(base + addr+0xc);
-
-		if (in_log)
-			pr_info("0x%08X : %08x %08x %08x %08x\n",
-					addr, x0, x4, x8, xc);
-
-		if (dump_addr && in_mem) {
-			dump_addr[i*4] = x0;
-			dump_addr[i*4 + 1] = x4;
-			dump_addr[i*4 + 2] = x8;
-			dump_addr[i*4 + 3] = xc;
-		}
-
-		addr += 16;
-	}
-}
-
-/*
- * sde_rot_dump_reg_all - dumping all SDE rotator registers
- */
-static void sde_rot_dump_reg_all(void)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_rot_regdump *head, *regdump;
-	u32 regdump_size;
-	int i;
-
-	regdump = mdata->regdump;
-	regdump_size = mdata->regdump_size;
-
-	if (!regdump || !regdump_size)
-		return;
-
-	/* Enable clock to rotator if not yet enabled */
-	sde_smmu_ctrl(1);
-
-	for (i = 0; (i < regdump_size) && (i < SDE_ROT_DEBUG_BASE_MAX); i++) {
-		head = &regdump[i];
-
-		if (head->access == SDE_ROT_REGDUMP_WRITE) {
-			if (head->len != 1) {
-				SDEROT_ERR("invalid write len %u\n", head->len);
-				continue;
-			}
-			writel_relaxed(head->value,
-					mdata->sde_io.base + head->offset);
-			/* Make sure write go through */
-			wmb();
-		} else {
-			sde_rot_dump_reg(head->name,
-					sde_rot_dbg_evtlog.enable_reg_dump,
-					head->access,
-					head->offset, head->len,
-					&sde_rot_dbg_evtlog.reg_dump_array[i]);
-		}
-	}
-
-	/* Disable rotator clock */
-	sde_smmu_ctrl(0);
-}
-
-/*
- * __sde_rot_evtlog_dump_calc_range - calculate dump range for EVTLOG
- */
-static bool __sde_rot_evtlog_dump_calc_range(void)
-{
-	static u32 next;
-	bool need_dump = true;
-	unsigned long flags;
-	struct sde_rot_dbg_evtlog *evtlog = &sde_rot_dbg_evtlog;
-
-	spin_lock_irqsave(&sde_rot_xlock, flags);
-
-	evtlog->first = next;
-
-	if (evtlog->last == evtlog->first) {
-		need_dump = false;
-		goto dump_exit;
-	}
-
-	if (evtlog->last < evtlog->first) {
-		evtlog->first %= SDE_ROT_EVTLOG_ENTRY;
-		if (evtlog->last < evtlog->first)
-			evtlog->last += SDE_ROT_EVTLOG_ENTRY;
-	}
-
-	if ((evtlog->last - evtlog->first) > SDE_ROT_EVTLOG_PRINT_ENTRY) {
-		pr_warn("evtlog buffer overflow before dump: %d\n",
-			evtlog->last - evtlog->first);
-		evtlog->first = evtlog->last - SDE_ROT_EVTLOG_PRINT_ENTRY;
-	}
-	next = evtlog->first + 1;
-
-dump_exit:
-	spin_unlock_irqrestore(&sde_rot_xlock, flags);
-
-	return need_dump;
-}
-
-/*
- * sde_rot_evtlog_dump_entry - helper function for EVTLOG content dumping
- * @evtlog_buf: EVTLOG dump output buffer
- * @evtlog_buf_size: EVTLOG output buffer size
- */
-static ssize_t sde_rot_evtlog_dump_entry(char *evtlog_buf,
-		ssize_t evtlog_buf_size)
-{
-	int i;
-	ssize_t off = 0;
-	struct tlog *log, *prev_log;
-	unsigned long flags;
-
-	spin_lock_irqsave(&sde_rot_xlock, flags);
-
-	log = &sde_rot_dbg_evtlog.logs[sde_rot_dbg_evtlog.first %
-		SDE_ROT_EVTLOG_ENTRY];
-
-	prev_log = &sde_rot_dbg_evtlog.logs[(sde_rot_dbg_evtlog.first - 1) %
-		SDE_ROT_EVTLOG_ENTRY];
-
-	off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d",
-		log->name, log->line);
-
-	if (off < SDE_ROT_EVTLOG_BUF_ALIGN) {
-		memset((evtlog_buf + off), 0x20,
-				(SDE_ROT_EVTLOG_BUF_ALIGN - off));
-		off = SDE_ROT_EVTLOG_BUF_ALIGN;
-	}
-
-	off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
-		"=>[%-8d:%-11llu:%9llu][%-4d]:", sde_rot_dbg_evtlog.first,
-		log->time, (log->time - prev_log->time), log->pid);
-
-	for (i = 0; i < log->data_cnt; i++)
-		off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
-			"%x ", log->data[i]);
-
-	off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n");
-
-	spin_unlock_irqrestore(&sde_rot_xlock, flags);
-
-	return off;
-}
-
-/*
- * sde_rot_evtlog_dump_all - Dumping all content in EVTLOG buffer
- */
-static void sde_rot_evtlog_dump_all(void)
-{
-	char evtlog_buf[SDE_ROT_EVTLOG_BUF_MAX];
-
-	while (__sde_rot_evtlog_dump_calc_range()) {
-		sde_rot_evtlog_dump_entry(evtlog_buf, SDE_ROT_EVTLOG_BUF_MAX);
-		pr_info("%s\n", evtlog_buf);
-	}
-}
-
-/*
- * sde_rot_evtlog_dump_open - debugfs open handler for evtlog dump
- * @inode: debugfs inode
- * @file: file handler
- */
-static int sde_rot_evtlog_dump_open(struct inode *inode, struct file *file)
-{
-	/* non-seekable */
-	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
-	file->private_data = inode->i_private;
-	return 0;
-}
-
-/*
- * sde_rot_evtlog_dump_read - debugfs read handler for evtlog dump
- * @file: file handler
- * @buff: user buffer content for debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t sde_rot_evtlog_dump_read(struct file *file, char __user *buff,
-		size_t count, loff_t *ppos)
-{
-	ssize_t len = 0;
-	char evtlog_buf[SDE_ROT_EVTLOG_BUF_MAX];
-
-	if (__sde_rot_evtlog_dump_calc_range()) {
-		len = sde_rot_evtlog_dump_entry(evtlog_buf,
-				SDE_ROT_EVTLOG_BUF_MAX);
-		if (len < 0 || len > count) {
-			pr_err("len is more than the user buffer size\n");
-			return 0;
-		}
-
-		if (copy_to_user(buff, evtlog_buf, len))
-			return -EFAULT;
-		*ppos += len;
-	}
-
-	return len;
-}
-
-/*
- * sde_rot_evtlog_dump_write - debugfs write handler for evtlog dump
- * @file: file handler
- * @user_buf: user buffer content from debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t sde_rot_evtlog_dump_write(struct file *file,
-	const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	sde_rot_evtlog_dump_all();
-
-	sde_rot_dump_reg_all();
-
-	if (sde_rot_dbg_evtlog.panic_on_err)
-		panic("evtlog_dump_write");
-
-	return count;
-}
-
-/*
- * sde_rot_evtlog_dump_helper - helper function for evtlog dump
- * @dead: boolean indicates panic after dump
- * @panic_name: Panic signature name show up in log
- * @dump_rot: boolean indicates rotator register dump
- * @dump_vbif_debug_bus: boolean indicates VBIF debug bus dump
- */
-static void sde_rot_evtlog_dump_helper(bool dead, const char *panic_name,
-	bool dump_rot, bool dump_vbif_debug_bus, bool dump_rot_debug_bus)
-{
-	sde_rot_evtlog_dump_all();
-
-	if (dump_rot_debug_bus)
-		sde_rot_dump_debug_bus(
-				sde_rot_dbg_evtlog.enable_rot_dbgbus_dump,
-				&sde_rot_dbg_evtlog.rot_dbgbus_dump);
-
-	if (dump_vbif_debug_bus)
-		sde_rot_dump_vbif_debug_bus(
-				sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump,
-				&sde_rot_dbg_evtlog.nrt_vbif_dbgbus_dump);
-
-	/*
-	 * Rotator registers always dump last
-	 */
-	if (dump_rot)
-		sde_rot_dump_reg_all();
-
-	if (dead)
-		panic(panic_name);
-}
-
-/*
- * sde_rot_evtlog_debug_work - schedule work function for evtlog dump
- * @work: schedule work structure
- */
-static void sde_rot_evtlog_debug_work(struct work_struct *work)
-{
-	sde_rot_evtlog_dump_helper(
-		sde_rot_dbg_evtlog.work_panic,
-		"evtlog_workitem",
-		sde_rot_dbg_evtlog.work_dump_reg,
-		sde_rot_dbg_evtlog.work_vbif_dbgbus,
-		sde_rot_dbg_evtlog.work_rot_dbgbus);
-}
-
-/*
- * sde_rot_evtlog_tout_handler - log dump timeout handler
- * @queue: boolean indicate putting log dump into queue
- * @name: function name having timeout
- */
-void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...)
-{
-	int i;
-	bool dead = false;
-	bool dump_rot = false;
-	bool dump_vbif_dbgbus = false;
-	bool dump_rot_dbgbus = false;
-	char *blk_name = NULL;
-	va_list args;
-
-	if (!sde_rot_evtlog_is_enabled(SDE_ROT_EVTLOG_DEFAULT))
-		return;
-
-	if (queue && work_pending(&sde_rot_dbg_evtlog.evtlog_dump_work))
-		return;
-
-	va_start(args, name);
-	for (i = 0; i < SDE_ROT_EVTLOG_MAX_DATA; i++) {
-		blk_name = va_arg(args, char*);
-		if (IS_ERR_OR_NULL(blk_name))
-			break;
-
-		if (!strcmp(blk_name, "rot"))
-			dump_rot = true;
-
-		if (!strcmp(blk_name, "vbif_dbg_bus"))
-			dump_vbif_dbgbus = true;
-
-		if (!strcmp(blk_name, "rot_dbg_bus"))
-			dump_rot_dbgbus = true;
-
-		if (!strcmp(blk_name, "panic"))
-			dead = true;
-	}
-	va_end(args);
-
-	if (queue) {
-		/* schedule work to dump later */
-		sde_rot_dbg_evtlog.work_panic = dead;
-		sde_rot_dbg_evtlog.work_dump_reg = dump_rot;
-		sde_rot_dbg_evtlog.work_vbif_dbgbus = dump_vbif_dbgbus;
-		sde_rot_dbg_evtlog.work_rot_dbgbus = dump_rot_dbgbus;
-		schedule_work(&sde_rot_dbg_evtlog.evtlog_dump_work);
-	} else {
-		sde_rot_evtlog_dump_helper(dead, name, dump_rot,
-			dump_vbif_dbgbus, dump_rot_dbgbus);
-	}
-}
-
-/*
- * sde_rot_evtlog - log contents into memory for dump analysis
- * @name: Name of function calling evtlog
- * @line: line number of calling function
- * @flag: Log control flag
- */
-void sde_rot_evtlog(const char *name, int line, int flag, ...)
-{
-	unsigned long flags;
-	int i, val = 0;
-	va_list args;
-	struct tlog *log;
-
-	if (!sde_rot_evtlog_is_enabled(flag))
-		return;
-
-	spin_lock_irqsave(&sde_rot_xlock, flags);
-	log = &sde_rot_dbg_evtlog.logs[sde_rot_dbg_evtlog.curr];
-	log->time = ktime_to_us(ktime_get());
-	log->name = name;
-	log->line = line;
-	log->data_cnt = 0;
-	log->pid = current->pid;
-
-	va_start(args, flag);
-	for (i = 0; i < SDE_ROT_EVTLOG_MAX_DATA; i++) {
-
-		val = va_arg(args, int);
-		if (val == SDE_ROT_DATA_LIMITER)
-			break;
-
-		log->data[i] = val;
-	}
-	va_end(args);
-	log->data_cnt = i;
-	sde_rot_dbg_evtlog.curr =
-		(sde_rot_dbg_evtlog.curr + 1) % SDE_ROT_EVTLOG_ENTRY;
-	sde_rot_dbg_evtlog.last++;
-
-	trace_sde_rot_evtlog(name, line, log->data_cnt, log->data);
-
-	spin_unlock_irqrestore(&sde_rot_xlock, flags);
-}
-
-/*
- * sde_rotator_stat_show - Show statistics on read to this debugfs file
- * @s: Pointer to sequence file structure
- * @data: Pointer to private data structure
- */
-static int sde_rotator_stat_show(struct seq_file *s, void *data)
-{
-	int i, offset;
-	struct sde_rotator_device *rot_dev = s->private;
-	struct sde_rotator_statistics *stats = &rot_dev->stats;
-	u64 count = stats->count;
-	int num_events;
-	s64 proc_max, proc_min, proc_avg;
-	s64 swoh_max, swoh_min, swoh_avg;
-
-	proc_max = 0;
-	proc_min = S64_MAX;
-	proc_avg = 0;
-	swoh_max = 0;
-	swoh_min = S64_MAX;
-	swoh_avg = 0;
-
-	if (count > SDE_ROTATOR_NUM_EVENTS) {
-		num_events = SDE_ROTATOR_NUM_EVENTS;
-		offset = count % SDE_ROTATOR_NUM_EVENTS;
-	} else {
-		num_events = count;
-		offset = 0;
-	}
-
-	for (i = 0; i < num_events; i++) {
-		int k = (offset + i) % SDE_ROTATOR_NUM_EVENTS;
-		ktime_t *ts = stats->ts[k];
-		ktime_t start_time =
-			ktime_before(ts[SDE_ROTATOR_TS_SRCQB],
-					ts[SDE_ROTATOR_TS_DSTQB]) ?
-					ts[SDE_ROTATOR_TS_SRCQB] :
-					ts[SDE_ROTATOR_TS_DSTQB];
-		s64 proc_time =
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_RETIRE],
-					start_time));
-		s64 sw_overhead_time =
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FLUSH],
-					start_time));
-
-		seq_printf(s,
-			"s:%d sq:%lld dq:%lld fe:%lld q:%lld c:%lld st:%lld fl:%lld d:%lld sdq:%lld ddq:%lld t:%lld oht:%lld\n",
-			i,
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FENCE],
-					ts[SDE_ROTATOR_TS_SRCQB])),
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FENCE],
-					ts[SDE_ROTATOR_TS_DSTQB])),
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_QUEUE],
-					ts[SDE_ROTATOR_TS_FENCE])),
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_COMMIT],
-					ts[SDE_ROTATOR_TS_QUEUE])),
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_START],
-					ts[SDE_ROTATOR_TS_COMMIT])),
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FLUSH],
-					ts[SDE_ROTATOR_TS_START])),
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DONE],
-					ts[SDE_ROTATOR_TS_FLUSH])),
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_RETIRE],
-					ts[SDE_ROTATOR_TS_DONE])),
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_SRCDQB],
-					ts[SDE_ROTATOR_TS_RETIRE])),
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DSTDQB],
-					ts[SDE_ROTATOR_TS_RETIRE])),
-			proc_time, sw_overhead_time);
-
-		proc_max = max(proc_max, proc_time);
-		proc_min = min(proc_min, proc_time);
-		proc_avg += proc_time;
-
-		swoh_max = max(swoh_max, sw_overhead_time);
-		swoh_min = min(swoh_min, sw_overhead_time);
-		swoh_avg += sw_overhead_time;
-	}
-
-	proc_avg = (num_events) ?
-			DIV_ROUND_CLOSEST_ULL(proc_avg, num_events) : 0;
-	swoh_avg = (num_events) ?
-			DIV_ROUND_CLOSEST_ULL(swoh_avg, num_events) : 0;
-
-	seq_printf(s, "count:%llu\n", count);
-	seq_printf(s, "fai1:%llu\n", stats->fail_count);
-	seq_printf(s, "t_max:%lld\n", proc_max);
-	seq_printf(s, "t_min:%lld\n", proc_min);
-	seq_printf(s, "t_avg:%lld\n", proc_avg);
-	seq_printf(s, "swoh_max:%lld\n", swoh_max);
-	seq_printf(s, "swoh_min:%lld\n", swoh_min);
-	seq_printf(s, "swoh_avg:%lld\n", swoh_avg);
-
-	return 0;
-}
-
-/*
- * sde_rotator_raw_show - Show raw statistics on read from this debugfs file
- * @s: Pointer to sequence file structure
- * @data: Pointer to private data structure
- */
-static int sde_rotator_raw_show(struct seq_file *s, void *data)
-{
-	int i, j, offset;
-	struct sde_rotator_device *rot_dev = s->private;
-	struct sde_rotator_statistics *stats = &rot_dev->stats;
-	u64 count = stats->count;
-	int num_events;
-
-	if (count > SDE_ROTATOR_NUM_EVENTS) {
-		num_events = SDE_ROTATOR_NUM_EVENTS;
-		offset = count % SDE_ROTATOR_NUM_EVENTS;
-	} else {
-		num_events = count;
-		offset = 0;
-	}
-
-	for (i = 0; i < num_events; i++) {
-		int k = (offset + i) % SDE_ROTATOR_NUM_EVENTS;
-		ktime_t *ts = stats->ts[k];
-
-		seq_printf(s, "%d ", i);
-		for (j = 0; j < SDE_ROTATOR_NUM_TIMESTAMPS; j++)
-			seq_printf(s, "%lld ", ktime_to_us(ts[j]));
-		seq_puts(s, "\n");
-	}
-
-	return 0;
-}
-
-/*
- * sde_rotator_dbg_open - Processed statistics debugfs file open function
- * @inode:
- * @file:
- */
-static int sde_rotator_stat_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, sde_rotator_stat_show, inode->i_private);
-}
-
-/*
- * sde_rotator_dbg_open - Raw statistics debugfs file open function
- * @inode:
- * @file:
- */
-static int sde_rotator_raw_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, sde_rotator_raw_show, inode->i_private);
-}
-
-/*
- * sde_rotator_dbg_open - Raw statistics debugfs file open function
- * @mdata: Pointer to rotator global data
- * @debugfs_root: Pointer to parent debugfs node
- */
-static int sde_rotator_base_create_debugfs(
-		struct sde_rot_data_type *mdata,
-		struct dentry *debugfs_root)
-{
-	if (!debugfs_create_u32("iommu_ref_cnt", 0444,
-			debugfs_root, &mdata->iommu_ref_cnt)) {
-		SDEROT_WARN("failed to create debugfs iommu ref cnt\n");
-		return -EINVAL;
-	}
-
-	mdata->clk_always_on = false;
-	if (!debugfs_create_bool("clk_always_on", 0644,
-			debugfs_root, &mdata->clk_always_on)) {
-		SDEROT_WARN("failed to create debugfs clk_always_on\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/*
- * sde_rotator_dbg_open - Raw statistics debugfs file open function
- * @mgr: Pointer to rotator manager structure
- * @debugfs_root: Pointer to parent debugfs node
- */
-static int sde_rotator_core_create_debugfs(
-		struct sde_rot_mgr *mgr,
-		struct dentry *debugfs_root)
-{
-	int ret;
-
-	if (!debugfs_create_u32("hwacquire_timeout", 0400,
-			debugfs_root, &mgr->hwacquire_timeout)) {
-		SDEROT_WARN("failed to create debugfs hw acquire timeout\n");
-		return -EINVAL;
-	}
-
-	if (!debugfs_create_u32("ppc_numer", 0644,
-			debugfs_root, &mgr->pixel_per_clk.numer)) {
-		SDEROT_WARN("failed to create debugfs ppc numerator\n");
-		return -EINVAL;
-	}
-
-	if (!debugfs_create_u32("ppc_denom", 0600,
-			debugfs_root, &mgr->pixel_per_clk.denom)) {
-		SDEROT_WARN("failed to create debugfs ppc denominator\n");
-		return -EINVAL;
-	}
-
-	if (!debugfs_create_u64("enable_bw_vote", 0644,
-			debugfs_root, &mgr->enable_bw_vote)) {
-		SDEROT_WARN("failed to create enable_bw_vote\n");
-		return -EINVAL;
-	}
-
-	if (mgr->ops_hw_create_debugfs) {
-		ret = mgr->ops_hw_create_debugfs(mgr, debugfs_root);
-		if (ret)
-			return ret;
-	}
-	return 0;
-}
-
-static const struct file_operations sde_rot_evtlog_fops = {
-	.open = sde_rot_evtlog_dump_open,
-	.read = sde_rot_evtlog_dump_read,
-	.write = sde_rot_evtlog_dump_write,
-};
-
-static int sde_rotator_evtlog_create_debugfs(
-		struct sde_rot_mgr *mgr,
-		struct dentry *debugfs_root)
-{
-	int i;
-
-	sde_rot_dbg_evtlog.evtlog = debugfs_create_dir("evtlog", debugfs_root);
-	if (IS_ERR_OR_NULL(sde_rot_dbg_evtlog.evtlog)) {
-		pr_err("debugfs_create_dir fail, error %ld\n",
-		       PTR_ERR(sde_rot_dbg_evtlog.evtlog));
-		sde_rot_dbg_evtlog.evtlog = NULL;
-		return -ENODEV;
-	}
-
-	INIT_WORK(&sde_rot_dbg_evtlog.evtlog_dump_work,
-			sde_rot_evtlog_debug_work);
-	sde_rot_dbg_evtlog.work_panic = false;
-
-	for (i = 0; i < SDE_ROT_EVTLOG_ENTRY; i++)
-		sde_rot_dbg_evtlog.logs[i].counter = i;
-
-	debugfs_create_file("dump", 0644, sde_rot_dbg_evtlog.evtlog, NULL,
-						&sde_rot_evtlog_fops);
-	debugfs_create_u32("enable", 0644, sde_rot_dbg_evtlog.evtlog,
-			    &sde_rot_dbg_evtlog.evtlog_enable);
-	debugfs_create_u32("panic", 0644, sde_rot_dbg_evtlog.evtlog,
-			    &sde_rot_dbg_evtlog.panic_on_err);
-	debugfs_create_u32("reg_dump", 0644, sde_rot_dbg_evtlog.evtlog,
-			    &sde_rot_dbg_evtlog.enable_reg_dump);
-	debugfs_create_u32("vbif_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog,
-			    &sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump);
-	debugfs_create_u32("rot_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog,
-			    &sde_rot_dbg_evtlog.enable_rot_dbgbus_dump);
-
-	sde_rot_dbg_evtlog.evtlog_enable = SDE_EVTLOG_DEFAULT_ENABLE;
-	sde_rot_dbg_evtlog.panic_on_err = SDE_EVTLOG_DEFAULT_PANIC;
-	sde_rot_dbg_evtlog.enable_reg_dump = SDE_EVTLOG_DEFAULT_REGDUMP;
-	sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump =
-		SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP;
-	sde_rot_dbg_evtlog.enable_rot_dbgbus_dump =
-		SDE_EVTLOG_DEFAULT_ROT_DBGBUSDUMP;
-
-	pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n",
-			sde_rot_dbg_evtlog.evtlog_enable,
-			sde_rot_dbg_evtlog.panic_on_err,
-			sde_rot_dbg_evtlog.enable_reg_dump);
-
-	return 0;
-}
-
-/*
- * struct sde_rotator_stat_ops - processed statistics file operations
- */
-static const struct file_operations sde_rotator_stat_ops = {
-	.open		= sde_rotator_stat_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release
-};
-
-/*
- * struct sde_rotator_raw_ops - raw statistics file operations
- */
-static const struct file_operations sde_rotator_raw_ops = {
-	.open		= sde_rotator_raw_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release
-};
-
-static int sde_rotator_debug_base_open(struct inode *inode, struct file *file)
-{
-	/* non-seekable */
-	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
-	file->private_data = inode->i_private;
-	return 0;
-}
-
-static int sde_rotator_debug_base_release(struct inode *inode,
-		struct file *file)
-{
-	struct sde_rotator_debug_base *dbg = file->private_data;
-
-	if (dbg) {
-		mutex_lock(&dbg->buflock);
-		kfree(dbg->buf);
-		dbg->buf_len = 0;
-		dbg->buf = NULL;
-		mutex_unlock(&dbg->buflock);
-	}
-
-	return 0;
-}
-
-static ssize_t sde_rotator_debug_base_offset_write(struct file *file,
-		    const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	struct sde_rotator_debug_base *dbg = file->private_data;
-	u32 off = 0;
-	u32 cnt = SDE_ROT_DEFAULT_BASE_REG_CNT;
-	char buf[24];
-
-	if (!dbg)
-		return -ENODEV;
-
-	if (count >= sizeof(buf))
-		return -EFAULT;
-
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
-	buf[count] = 0;
-
-	if (sscanf(buf, "%5x %x", &off, &cnt) < 2)
-		return -EINVAL;
-
-	if (off % sizeof(u32))
-		return -EINVAL;
-
-	if (off > dbg->max_offset)
-		return -EINVAL;
-
-	if (cnt > (dbg->max_offset - off))
-		cnt = dbg->max_offset - off;
-
-	mutex_lock(&dbg->buflock);
-	dbg->off = off;
-	dbg->cnt = cnt;
-	mutex_unlock(&dbg->buflock);
-
-	SDEROT_DBG("offset=%x cnt=%x\n", off, cnt);
-
-	return count;
-}
-
-static ssize_t sde_rotator_debug_base_offset_read(struct file *file,
-			char __user *buff, size_t count, loff_t *ppos)
-{
-	struct sde_rotator_debug_base *dbg = file->private_data;
-	int len = 0;
-	char buf[24] = {'\0'};
-
-	if (!dbg)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;	/* the end */
-
-	mutex_lock(&dbg->buflock);
-	len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
-	mutex_unlock(&dbg->buflock);
-
-	if (len < 0 || len >= sizeof(buf))
-		return 0;
-
-	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
-		return -EFAULT;
-
-	*ppos += len;	/* increase offset */
-
-	return len;
-}
-
-static ssize_t sde_rotator_debug_base_reg_write(struct file *file,
-		const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	struct sde_rotator_debug_base *dbg = file->private_data;
-	size_t off;
-	u32 data, cnt;
-	char buf[24];
-
-	if (!dbg)
-		return -ENODEV;
-
-	if (count >= sizeof(buf))
-		return -EFAULT;
-
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
-	buf[count] = 0;
-
-	cnt = sscanf(buf, "%zx %x", &off, &data);
-
-	if (cnt < 2)
-		return -EFAULT;
-
-	if (off % sizeof(u32))
-		return -EFAULT;
-
-	if (off >= dbg->max_offset)
-		return -EFAULT;
-
-	mutex_lock(&dbg->buflock);
-
-	/* Enable Clock for register access */
-	sde_rot_mgr_lock(dbg->mgr);
-	if (!sde_rotator_resource_ctrl_enabled(dbg->mgr)) {
-		SDEROT_WARN("resource ctrl is not enabled\n");
-		sde_rot_mgr_unlock(dbg->mgr);
-		goto debug_write_error;
-	}
-	sde_rotator_clk_ctrl(dbg->mgr, true);
-
-	writel_relaxed(data, dbg->base + off);
-
-	/* Disable Clock after register access */
-	sde_rotator_clk_ctrl(dbg->mgr, false);
-	sde_rot_mgr_unlock(dbg->mgr);
-
-	mutex_unlock(&dbg->buflock);
-
-	SDEROT_DBG("addr=%zx data=%x\n", off, data);
-
-	return count;
-
-debug_write_error:
-	mutex_unlock(&dbg->buflock);
-	return 0;
-}
-
-static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
-			char __user *user_buf, size_t count, loff_t *ppos)
-{
-	struct sde_rotator_debug_base *dbg = file->private_data;
-	size_t len;
-	int rc = 0;
-
-	if (!dbg) {
-		SDEROT_ERR("invalid handle\n");
-		return -ENODEV;
-	}
-
-	mutex_lock(&dbg->buflock);
-	if (!dbg->buf) {
-		char dump_buf[64];
-		char *ptr;
-		int cnt, tot;
-
-		dbg->buf_len = sizeof(dump_buf) *
-			DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
-		dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL);
-
-		if (!dbg->buf) {
-			SDEROT_ERR("not enough memory to hold reg dump\n");
-			rc = -ENOMEM;
-			goto debug_read_error;
-		}
-
-		if (dbg->off % sizeof(u32)) {
-			rc = -EFAULT;
-			goto debug_read_error;
-		}
-
-		ptr = dbg->base + dbg->off;
-		tot = 0;
-
-		/* Enable clock for register access */
-		sde_rot_mgr_lock(dbg->mgr);
-		if (!sde_rotator_resource_ctrl_enabled(dbg->mgr)) {
-			SDEROT_WARN("resource ctrl is not enabled\n");
-			sde_rot_mgr_unlock(dbg->mgr);
-			goto debug_read_error;
-		}
-		sde_rotator_clk_ctrl(dbg->mgr, true);
-
-		for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
-			hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
-					   ROW_BYTES, GROUP_BYTES, dump_buf,
-					   sizeof(dump_buf), false);
-			len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
-					"0x%08x: %s\n",
-					((int) (unsigned long) ptr) -
-					((int) (unsigned long) dbg->base),
-					dump_buf);
-
-			ptr += ROW_BYTES;
-			tot += len;
-			if (tot >= dbg->buf_len)
-				break;
-		}
-		/* Disable clock after register access */
-		sde_rotator_clk_ctrl(dbg->mgr, false);
-		sde_rot_mgr_unlock(dbg->mgr);
-
-		dbg->buf_len = tot;
-	}
-
-	if (*ppos >= dbg->buf_len) {
-		rc = 0; /* done reading */
-		goto debug_read_error;
-	}
-
-	len = min(count, dbg->buf_len - (size_t) *ppos);
-	if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
-		SDEROT_ERR("failed to copy to user\n");
-		rc = -EFAULT;
-		goto debug_read_error;
-	}
-
-	*ppos += len; /* increase offset */
-
-	mutex_unlock(&dbg->buflock);
-	return len;
-
-debug_read_error:
-	mutex_unlock(&dbg->buflock);
-	return rc;
-}
-
-static const struct file_operations sde_rotator_off_fops = {
-	.open = sde_rotator_debug_base_open,
-	.release = sde_rotator_debug_base_release,
-	.read = sde_rotator_debug_base_offset_read,
-	.write = sde_rotator_debug_base_offset_write,
-};
-
-static const struct file_operations sde_rotator_reg_fops = {
-	.open = sde_rotator_debug_base_open,
-	.release = sde_rotator_debug_base_release,
-	.read = sde_rotator_debug_base_reg_read,
-	.write = sde_rotator_debug_base_reg_write,
-};
-
-/*
- * sde_rotator_create_debugfs - Setup rotator debugfs directory structure.
- * @rot_dev: Pointer to rotator device
- */
-struct dentry *sde_rotator_create_debugfs(
-		struct sde_rotator_device *rot_dev)
-{
-	struct dentry *debugfs_root;
-	char dirname[32] = {0};
-
-	snprintf(dirname, sizeof(dirname), "%s%d",
-			SDE_ROTATOR_DRV_NAME, rot_dev->dev->id);
-	debugfs_root = debugfs_create_dir(dirname, NULL);
-	if (!debugfs_root) {
-		SDEROT_ERR("fail create debugfs root\n");
-		return NULL;
-	}
-
-	if (!debugfs_create_file("stats", 0400,
-		debugfs_root, rot_dev, &sde_rotator_stat_ops)) {
-		SDEROT_ERR("fail create debugfs stats\n");
-		debugfs_remove_recursive(debugfs_root);
-		return NULL;
-	}
-
-	if (!debugfs_create_file("raw", 0400,
-		debugfs_root, rot_dev, &sde_rotator_raw_ops)) {
-		SDEROT_ERR("fail create debugfs raw\n");
-		debugfs_remove_recursive(debugfs_root);
-		return NULL;
-	}
-
-	if (!debugfs_create_u32("fence_timeout", 0400,
-			debugfs_root, &rot_dev->fence_timeout)) {
-		SDEROT_ERR("fail create fence_timeout\n");
-		debugfs_remove_recursive(debugfs_root);
-		return NULL;
-	}
-
-	if (!debugfs_create_u32("open_timeout", 0400,
-			debugfs_root, &rot_dev->open_timeout)) {
-		SDEROT_ERR("fail create open_timeout\n");
-		debugfs_remove_recursive(debugfs_root);
-		return NULL;
-	}
-
-	if (!debugfs_create_u32("disable_syscache", 0400,
-			debugfs_root, &rot_dev->disable_syscache)) {
-		SDEROT_ERR("fail create disable_syscache\n");
-		debugfs_remove_recursive(debugfs_root);
-		return NULL;
-	}
-
-	if (!debugfs_create_u32("streamoff_timeout", 0400,
-			debugfs_root, &rot_dev->streamoff_timeout)) {
-		SDEROT_ERR("fail create streamoff_timeout\n");
-		debugfs_remove_recursive(debugfs_root);
-		return NULL;
-	}
-
-	if (!debugfs_create_u32("early_submit", 0400,
-			debugfs_root, &rot_dev->early_submit)) {
-		SDEROT_ERR("fail create early_submit\n");
-		debugfs_remove_recursive(debugfs_root);
-		return NULL;
-	}
-
-	if (sde_rotator_base_create_debugfs(rot_dev->mdata, debugfs_root)) {
-		SDEROT_ERR("fail create base debugfs\n");
-		debugfs_remove_recursive(debugfs_root);
-		return NULL;
-	}
-
-	if (sde_rotator_core_create_debugfs(rot_dev->mgr, debugfs_root)) {
-		SDEROT_ERR("fail create core debugfs\n");
-		debugfs_remove_recursive(debugfs_root);
-		return NULL;
-	}
-
-	if (sde_rotator_evtlog_create_debugfs(rot_dev->mgr, debugfs_root)) {
-		SDEROT_ERR("fail create evtlog debugfs\n");
-		debugfs_remove_recursive(debugfs_root);
-		return NULL;
-	}
-
-	return debugfs_root;
-}
-
-/*
- * sde_rotator_destroy_debugfs - Destroy rotator debugfs directory structure.
- * @rot_dev: Pointer to rotator debugfs
- */
-void sde_rotator_destroy_debugfs(struct dentry *debugfs)
-{
-	debugfs_remove_recursive(debugfs);
-}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
deleted file mode 100644
index 0ca8c75..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_ROTATOR_DEBUG_H__
-#define __SDE_ROTATOR_DEBUG_H__
-
-#include <linux/types.h>
-#include <linux/dcache.h>
-
-#define SDE_ROT_DATA_LIMITER (-1)
-#define SDE_ROT_EVTLOG_TOUT_DATA_LIMITER (NULL)
-#define SDE_ROT_EVTLOG_PANIC		0xdead
-#define SDE_ROT_EVTLOG_FATAL		0xbad
-#define SDE_ROT_EVTLOG_ERROR		0xebad
-
-enum sde_rot_dbg_reg_dump_flag {
-	SDE_ROT_DBG_DUMP_IN_LOG = BIT(0),
-	SDE_ROT_DBG_DUMP_IN_MEM = BIT(1),
-};
-
-enum sde_rot_dbg_evtlog_flag {
-	SDE_ROT_EVTLOG_DEFAULT = BIT(0),
-	SDE_ROT_EVTLOG_IOMMU = BIT(1),
-	SDE_ROT_EVTLOG_DBG = BIT(6),
-	SDE_ROT_EVTLOG_ALL = BIT(7)
-};
-
-#define SDEROT_EVTLOG(...) sde_rot_evtlog(__func__, __LINE__, \
-		SDE_ROT_EVTLOG_DEFAULT, ##__VA_ARGS__, SDE_ROT_DATA_LIMITER)
-
-#define SDEROT_EVTLOG_TOUT_HANDLER(...)	\
-	sde_rot_evtlog_tout_handler(false, __func__, ##__VA_ARGS__, \
-		SDE_ROT_EVTLOG_TOUT_DATA_LIMITER)
-
-void sde_rot_evtlog(const char *name, int line, int flag, ...);
-void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...);
-
-struct sde_rotator_device;
-
-struct sde_rotator_debug_base {
-	char name[80];
-	void __iomem *base;
-	size_t off;
-	size_t cnt;
-	size_t max_offset;
-	char *buf;
-	size_t buf_len;
-	struct sde_rot_mgr *mgr;
-	struct mutex buflock;
-};
-
-#if defined(CONFIG_DEBUG_FS)
-struct dentry *sde_rotator_create_debugfs(
-		struct sde_rotator_device *rot_dev);
-
-void sde_rotator_destroy_debugfs(struct dentry *debugfs);
-#else
-static inline
-struct dentry *sde_rotator_create_debugfs(
-		struct sde_rotator_device *rot_dev)
-{
-	return NULL;
-}
-
-static inline
-void sde_rotator_destroy_debugfs(struct dentry *debugfs)
-{
-}
-#endif
-#endif /* __SDE_ROTATOR_DEBUG_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
deleted file mode 100644
index 3bfe717..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ /dev/null
@@ -1,3738 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-#define pr_fmt(fmt)	"%s:%d: " fmt, __func__, __LINE__
-
-#include <linux/vmalloc.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/regulator/consumer.h>
-#include <linux/delay.h>
-#include <linux/wait.h>
-#include <linux/of.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-event.h>
-#include <media/videobuf2-v4l2.h>
-#include <media/v4l2-mem2mem.h>
-
-#include "sde_rotator_inline.h"
-#include "sde_rotator_base.h"
-#include "sde_rotator_core.h"
-#include "sde_rotator_dev.h"
-#include "sde_rotator_debug.h"
-#include "sde_rotator_trace.h"
-
-/* Start v4l2 device number (default allocation) */
-#define SDE_ROTATOR_BASE_DEVICE_NUMBER	-1
-
-/* Default value for early_submit flag */
-#define SDE_ROTATOR_EARLY_SUBMIT	1
-
-/* Timeout (msec) waiting for stream to turn off. */
-#define SDE_ROTATOR_STREAM_OFF_TIMEOUT	500
-
-/* acquire fence time out, following other driver fence time out practice */
-#define SDE_ROTATOR_FENCE_TIMEOUT	MSEC_PER_SEC
-
-/* Timeout (msec) waiting for ctx open */
-#define SDE_ROTATOR_CTX_OPEN_TIMEOUT	500
-
-/* Rotator default fps */
-#define SDE_ROTATOR_DEFAULT_FPS	60
-
-/* Rotator rotation angles */
-#define SDE_ROTATOR_DEGREE_270		270
-#define SDE_ROTATOR_DEGREE_180		180
-#define SDE_ROTATOR_DEGREE_90		90
-
-/* Inline rotator qos request */
-#define SDE_ROTATOR_ADD_REQUEST		1
-#define SDE_ROTATOR_REMOVE_REQUEST		0
-
-
-static void sde_rotator_submit_handler(struct kthread_work *work);
-static void sde_rotator_retire_handler(struct kthread_work *work);
-static void sde_rotator_pm_qos_request(struct sde_rotator_device *rot_dev,
-					 bool add_request);
-#ifdef CONFIG_COMPAT
-static long sde_rotator_compat_ioctl32(struct file *file,
-	unsigned int cmd, unsigned long arg);
-#endif
-
-/*
- * sde_rotator_ctx_from_fh - Get rotator context from v4l2 fh.
- * @fh: Pointer to v4l2 fh.
- */
-static inline struct sde_rotator_ctx *sde_rotator_ctx_from_fh(
-		struct v4l2_fh *fh)
-{
-	return container_of(fh, struct sde_rotator_ctx, fh);
-}
-
-/*
- * sde_rotator_get_flags_from_ctx - Get low-level command flag
- * @ctx: Pointer to rotator context.
- */
-static uint32_t sde_rotator_get_flags_from_ctx(struct sde_rotator_ctx *ctx)
-{
-	uint32_t ret_flags = 0;
-
-	if (ctx->rotate == SDE_ROTATOR_DEGREE_270)
-		ret_flags |= SDE_ROTATION_270;
-	else if (ctx->rotate == SDE_ROTATOR_DEGREE_180)
-		ret_flags |= SDE_ROTATION_180;
-	else if (ctx->rotate == SDE_ROTATOR_DEGREE_90)
-		ret_flags |= SDE_ROTATION_90;
-	if (ctx->hflip)
-		ret_flags ^= SDE_ROTATION_FLIP_LR;
-	if (ctx->vflip)
-		ret_flags ^= SDE_ROTATION_FLIP_UD;
-	if (ctx->secure)
-		ret_flags |= SDE_ROTATION_SECURE;
-	if (ctx->secure_camera)
-		ret_flags |= SDE_ROTATION_SECURE_CAMERA;
-	if (ctx->format_out.fmt.pix.field == V4L2_FIELD_INTERLACED &&
-			ctx->format_cap.fmt.pix.field == V4L2_FIELD_NONE)
-		ret_flags |= SDE_ROTATION_DEINTERLACE;
-
-	return ret_flags;
-}
-
-/*
- * sde_rotator_get_config_from_ctx - Fill rotator configure structure.
- * @ctx: Pointer to rotator ctx.
- * @config: Pointer to config structure.
- */
-static void sde_rotator_get_config_from_ctx(struct sde_rotator_ctx *ctx,
-		struct sde_rotation_config *config)
-{
-	memset(config, 0, sizeof(struct sde_rotation_config));
-	config->flags = sde_rotator_get_flags_from_ctx(ctx);
-	config->frame_rate = (ctx->timeperframe.numerator) ?
-				ctx->timeperframe.denominator
-					/ ctx->timeperframe.numerator :	0;
-	config->session_id = ctx->session_id;
-	config->input.width = ctx->crop_out.width;
-	config->input.height = ctx->crop_out.height;
-	config->input.format = ctx->format_out.fmt.pix.pixelformat;
-	config->input.comp_ratio.numer = 1;
-	config->input.comp_ratio.denom = 1;
-	config->output.width = ctx->crop_cap.width;
-	config->output.height = ctx->crop_cap.height;
-	config->output.format = ctx->format_cap.fmt.pix.pixelformat;
-	config->output.comp_ratio.numer = 1;
-	config->output.comp_ratio.denom = 1;
-
-	/*
-	 * Use compression ratio of the first buffer to estimate
-	 * performance requirement of the session. If core layer does
-	 * not support dynamic per buffer compression ratio recalculation,
-	 * this configuration will determine the overall static compression
-	 * ratio of the session.
-	 */
-	if (ctx->vbinfo_out)
-		config->input.comp_ratio = ctx->vbinfo_out[0].comp_ratio;
-	if (ctx->vbinfo_cap)
-		config->output.comp_ratio = ctx->vbinfo_cap[0].comp_ratio;
-
-	SDEDEV_DBG(ctx->rot_dev->dev, "config s:%d out_cr:%u/%u cap_cr:%u/%u\n",
-			ctx->session_id,
-			config->input.comp_ratio.numer,
-			config->input.comp_ratio.denom,
-			config->output.comp_ratio.numer,
-			config->output.comp_ratio.denom);
-}
-
-/*
- * sde_rotator_get_item_from_ctx - Fill rotator item structure.
- * @ctx: Pointer to rotator ctx.
- * @item: Pointer to item structure.
- */
-static void sde_rotator_get_item_from_ctx(struct sde_rotator_ctx *ctx,
-		struct sde_rotation_item *item)
-{
-	memset(item, 0, sizeof(struct sde_rotation_item));
-	item->flags = sde_rotator_get_flags_from_ctx(ctx);
-	item->session_id = ctx->session_id;
-	item->sequence_id = 0;
-	/* assign high/low priority */
-	item->wb_idx = (ctx->fh.prio >= V4L2_PRIORITY_DEFAULT) ? 0 : 1;
-	item->src_rect.x = ctx->crop_out.left;
-	item->src_rect.y = ctx->crop_out.top;
-	item->src_rect.w = ctx->crop_out.width;
-	item->src_rect.h = ctx->crop_out.height;
-	item->input.width = ctx->format_out.fmt.pix.width;
-	item->input.height = ctx->format_out.fmt.pix.height;
-	item->input.format = ctx->format_out.fmt.pix.pixelformat;
-	item->input.planes[0].fd = -1;
-	item->input.planes[0].offset = 0;
-	item->input.planes[0].stride = ctx->format_out.fmt.pix.bytesperline;
-	item->input.plane_count = 1;
-	item->input.fence = NULL;
-	item->input.comp_ratio.numer = 1;
-	item->input.comp_ratio.denom = 1;
-
-	item->dst_rect.x = ctx->crop_cap.left;
-	item->dst_rect.y = ctx->crop_cap.top;
-	item->dst_rect.w = ctx->crop_cap.width;
-	item->dst_rect.h = ctx->crop_cap.height;
-	item->output.width = ctx->format_cap.fmt.pix.width;
-	item->output.height = ctx->format_cap.fmt.pix.height;
-	item->output.format = ctx->format_cap.fmt.pix.pixelformat;
-	item->output.planes[0].fd = -1;
-	item->output.planes[0].offset = 0;
-	item->output.planes[0].stride = ctx->format_cap.fmt.pix.bytesperline;
-	item->output.plane_count = 1;
-	item->output.fence = NULL;
-	item->output.comp_ratio.numer = 1;
-	item->output.comp_ratio.denom = 1;
-}
-
-/*
- * sde_rotator_format_recalc - Recalculate format parameters.
- * @f: v4l2 format.
- */
-static void sde_rotator_format_recalc(struct v4l2_format *f)
-{
-	int ret;
-	struct sde_mdp_format_params *fmt;
-	struct sde_mdp_plane_sizes ps;
-
-	fmt = sde_get_format_params(f->fmt.pix.pixelformat);
-	if (!fmt) {
-		SDEROT_ERR("invalid format\n");
-		goto error_fmt;
-	}
-
-	ret = sde_mdp_get_plane_sizes(fmt,
-		f->fmt.pix.width, f->fmt.pix.height, &ps, 0, 0);
-	if (ret) {
-		SDEROT_ERR("invalid plane size\n");
-		goto error_fmt;
-	}
-
-	f->fmt.pix.bytesperline = ps.ystride[0];
-	f->fmt.pix.sizeimage = ps.total_size;
-
-	return;
-error_fmt:
-	f->fmt.pix.bytesperline = 0;
-	f->fmt.pix.sizeimage = 0;
-}
-
-/*
- * sde_rotator_validate_item - Check if rotator item is valid for processing.
- * @ctx: Pointer to rotator ctx.
- * @item: Pointer to item structure
- */
-static int sde_rotator_validate_item(struct sde_rotator_ctx *ctx,
-		struct sde_rotation_item *item)
-{
-	int ret;
-	struct sde_rot_entry_container *req;
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-
-	sde_rot_mgr_lock(rot_dev->mgr);
-	req = sde_rotator_req_init(rot_dev->mgr, ctx->private, item, 1, 0);
-	if (IS_ERR_OR_NULL(req)) {
-		SDEDEV_ERR(rot_dev->dev, "fail allocate item\n");
-		return -ENOMEM;
-	}
-
-	ret = sde_rotator_validate_request(rot_dev->mgr, ctx->private, req);
-	sde_rot_mgr_unlock(rot_dev->mgr);
-	devm_kfree(rot_dev->dev, req);
-	return ret;
-}
-
-/*
- * sde_rotator_queue_setup - vb2_ops queue_setup callback.
- * @q: Pointer to vb2 queue struct.
- * @num_buffers: Pointer of number of buffers requested.
- * @num_planes: Pointer to number of planes requested.
- * @sizes: Array containing sizes of planes.
- * @alloc_ctxs: Array of allocated contexts for each plane.
- */
-static int sde_rotator_queue_setup(struct vb2_queue *q,
-	unsigned int *num_buffers, unsigned int *num_planes,
-	unsigned int sizes[], struct device *alloc_devs[])
-{
-	struct sde_rotator_ctx *ctx = vb2_get_drv_priv(q);
-	int i;
-
-	if (!num_buffers)
-		return -EINVAL;
-
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
-		sizes[0] = ctx->format_out.fmt.pix.sizeimage;
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-		sizes[0] = ctx->format_cap.fmt.pix.sizeimage;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	*num_planes = 1;
-	alloc_devs[0] = (struct device *)ctx;
-
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
-		ctx->nbuf_out = *num_buffers;
-		kfree(ctx->vbinfo_out);
-		ctx->vbinfo_out = kcalloc(ctx->nbuf_out,
-				sizeof(struct sde_rotator_vbinfo), GFP_KERNEL);
-		if (!ctx->vbinfo_out)
-			return -ENOMEM;
-		for (i = 0; i < ctx->nbuf_out; i++) {
-			ctx->vbinfo_out[i].fd = -1;
-			ctx->vbinfo_out[i].comp_ratio.numer = 1;
-			ctx->vbinfo_out[i].comp_ratio.denom = 1;
-		}
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-		ctx->nbuf_cap = *num_buffers;
-		kfree(ctx->vbinfo_cap);
-		ctx->vbinfo_cap = kcalloc(ctx->nbuf_cap,
-				sizeof(struct sde_rotator_vbinfo), GFP_KERNEL);
-		if (!ctx->vbinfo_cap)
-			return -ENOMEM;
-		for (i = 0; i < ctx->nbuf_cap; i++) {
-			ctx->vbinfo_cap[i].fd = -1;
-			ctx->vbinfo_cap[i].comp_ratio.numer = 1;
-			ctx->vbinfo_cap[i].comp_ratio.denom = 1;
-		}
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/*
- * sde_rotator_buf_queue - vb2_ops buf_queue callback.
- * @vb: Pointer to vb2 buffer struct.
- */
-static void sde_rotator_buf_queue(struct vb2_buffer *vb)
-{
-	struct sde_rotator_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-
-	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
-}
-
-/*
- * sde_rotator_buf_finish - vb2_ops buf_finish to finalize buffer before going
- *				back to user space
- * @vb: Pointer to vb2 buffer struct.
- */
-static void sde_rotator_buf_finish(struct vb2_buffer *vb)
-{
-	struct sde_rotator_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-	int i;
-
-	SDEDEV_DBG(ctx->rot_dev->dev,
-			"buf_finish t:%d i:%d s:%d m:%u np:%d up:%lu\n",
-			vb->type, vb->index, vb->state,
-			vb->vb2_queue->memory,
-			vb->num_planes,
-			vb->planes[0].m.userptr);
-
-	if (vb->vb2_queue->memory != VB2_MEMORY_USERPTR)
-		return;
-
-	/*
-	 * We use userptr to tunnel fd, and fd can be the same across qbuf
-	 * even though the underlying buffer is different.  Since vb2 layer
-	 * optimizes memory mapping for userptr by first checking if userptr
-	 * has changed, it will not trigger put_userptr if fd value does
-	 * not change.  In order to force buffer release, we need to clear
-	 * userptr when the current buffer is done and ready to go back to
-	 * user mode. Since 0 is a valid fd, reset userptr to -1 instead.
-	 */
-	for (i = 0; i < vb->num_planes; i++)
-		vb->planes[i].m.userptr = ~0;
-}
-
-/*
- * sde_rotator_return_all_buffers - Return all buffers with the given status.
- * @q: Pointer to vb2 buffer queue struct.
- * @state: State of the buffer
- */
-static void sde_rotator_return_all_buffers(struct vb2_queue *q,
-		enum vb2_buffer_state state)
-{
-	struct sde_rotator_ctx *ctx = vb2_get_drv_priv(q);
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-
-	SDEDEV_DBG(rot_dev->dev,
-			"return q t:%d c:%d dc:%d s:%d\n",
-			q->type, q->queued_count,
-			atomic_read(&q->owned_by_drv_count),
-			state);
-
-	/* return buffers according videobuffer2-core.h */
-	if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
-		struct vb2_v4l2_buffer *buf;
-
-		while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx))) {
-			SDEDEV_DBG(rot_dev->dev,
-					"return vb t:%d i:%d\n",
-					buf->vb2_buf.type,
-					buf->vb2_buf.index);
-			v4l2_m2m_buf_done(buf, state);
-		}
-	} else if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
-		struct vb2_v4l2_buffer *buf;
-
-		while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx))) {
-			SDEDEV_DBG(rot_dev->dev,
-					"return vb t:%d i:%d\n",
-					buf->vb2_buf.type,
-					buf->vb2_buf.index);
-			v4l2_m2m_buf_done(buf, state);
-		}
-	} else {
-		SDEDEV_ERR(rot_dev->dev, "unsupported vb t:%d\n", q->type);
-	}
-}
-
-/*
- * sde_rotator_start_streaming - vb2_ops start_streaming callback.
- * @q: Pointer to vb2 queue struct.
- * @count: Number of buffer queued before stream on call.
- */
-static int sde_rotator_start_streaming(struct vb2_queue *q, unsigned int count)
-{
-	struct sde_rotator_ctx *ctx = vb2_get_drv_priv(q);
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-
-	SDEDEV_DBG(rot_dev->dev, "start streaming s:%d t:%d\n",
-			ctx->session_id, q->type);
-
-	if (!list_empty(&ctx->pending_list)) {
-		SDEDEV_ERR(rot_dev->dev,
-				"command pending error s:%d t:%d p:%d\n",
-				ctx->session_id, q->type,
-				!list_empty(&ctx->pending_list));
-		return -EINVAL;
-	}
-
-	ctx->abort_pending = 0;
-
-	return 0;
-}
-
-/*
- * sde_rotator_stop_streaming - vb2_ops stop_streaming callback.
- * @q: Pointer to vb2 queue struct.
- *
- * This function will block waiting for stream to stop.  Unlock queue
- * lock to avoid deadlock.
- */
-static void sde_rotator_stop_streaming(struct vb2_queue *q)
-{
-	struct sde_rotator_ctx *ctx = vb2_get_drv_priv(q);
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-	struct sde_rotator_request *request;
-	struct list_head *curr, *next;
-	int i;
-	int ret;
-
-	SDEDEV_DBG(rot_dev->dev, "stop streaming s:%d t:%d p:%d\n",
-			ctx->session_id, q->type,
-			!list_empty(&ctx->pending_list));
-	ctx->abort_pending = 1;
-	mutex_unlock(q->lock);
-	ret = wait_event_timeout(ctx->wait_queue,
-			list_empty(&ctx->pending_list),
-			msecs_to_jiffies(rot_dev->streamoff_timeout));
-	mutex_lock(q->lock);
-	if (!ret) {
-		SDEDEV_ERR(rot_dev->dev,
-				"timeout to stream off s:%d t:%d p:%d\n",
-				ctx->session_id, q->type,
-				!list_empty(&ctx->pending_list));
-		SDEROT_EVTLOG(ctx->session_id, q->type,
-				!list_empty(&ctx->pending_list),
-				SDE_ROT_EVTLOG_ERROR);
-		sde_rot_mgr_lock(rot_dev->mgr);
-		sde_rotator_cancel_all_requests(rot_dev->mgr, ctx->private);
-		sde_rot_mgr_unlock(rot_dev->mgr);
-		list_for_each_safe(curr, next, &ctx->pending_list) {
-			request = container_of(curr, struct sde_rotator_request,
-						list);
-
-			SDEDEV_DBG(rot_dev->dev, "cancel request s:%d\n",
-					ctx->session_id);
-			mutex_unlock(q->lock);
-			kthread_cancel_work_sync(&request->submit_work);
-			kthread_cancel_work_sync(&request->retire_work);
-			mutex_lock(q->lock);
-			spin_lock(&ctx->list_lock);
-			list_del_init(&request->list);
-			list_add_tail(&request->list, &ctx->retired_list);
-			spin_unlock(&ctx->list_lock);
-		}
-	}
-
-	sde_rotator_return_all_buffers(q, VB2_BUF_STATE_ERROR);
-
-	/* clear fence for buffer */
-	sde_rotator_resync_timeline(ctx->work_queue.timeline);
-	if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
-		for (i = 0; i < ctx->nbuf_cap; i++) {
-			struct sde_rotator_vbinfo *vbinfo =
-					&ctx->vbinfo_cap[i];
-
-			if (vbinfo->fence) {
-				/* fence is not used */
-				SDEDEV_DBG(rot_dev->dev,
-						"put fence s:%d t:%d i:%d\n",
-						ctx->session_id, q->type, i);
-				sde_rotator_put_sync_fence(vbinfo->fence);
-			}
-			vbinfo->fence = NULL;
-			vbinfo->fd = -1;
-		}
-	} else if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
-		for (i = 0; i < ctx->nbuf_out; i++) {
-			struct sde_rotator_vbinfo *vbinfo =
-					&ctx->vbinfo_out[i];
-
-			if (vbinfo->fence) {
-				SDEDEV_DBG(rot_dev->dev,
-						"put fence s:%d t:%d i:%d\n",
-						ctx->session_id, q->type, i);
-				sde_rotator_put_sync_fence(vbinfo->fence);
-			}
-			vbinfo->fence = NULL;
-			vbinfo->fd = -1;
-		}
-	}
-}
-
-/* Videobuf2 queue callbacks. */
-static const struct vb2_ops sde_rotator_vb2_q_ops = {
-	.queue_setup     = sde_rotator_queue_setup,
-	.buf_queue       = sde_rotator_buf_queue,
-	.start_streaming = sde_rotator_start_streaming,
-	.stop_streaming  = sde_rotator_stop_streaming,
-	.wait_prepare	 = vb2_ops_wait_prepare,
-	.wait_finish	 = vb2_ops_wait_finish,
-	.buf_finish      = sde_rotator_buf_finish,
-};
-
-/*
- * sde_rotator_get_userptr - Map and get buffer handler for user pointer buffer.
- * @dev: device allocated in buf_setup.
- * @vaddr: Virtual addr passed from userpsace (in our case ion fd)
- * @size: Size of the buffer
- * @dma_dir: DMA data direction of the given buffer.
- */
-static void *sde_rotator_get_userptr(struct device *dev,
-	unsigned long vaddr, unsigned long size,
-	enum dma_data_direction dma_dir)
-{
-	struct sde_rotator_ctx *ctx = (struct sde_rotator_ctx *)dev;
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-	struct sde_rotator_buf_handle *buf;
-
-	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
-	if (!buf)
-		return ERR_PTR(-ENOMEM);
-
-	buf->fd = vaddr;
-	buf->secure = ctx->secure || ctx->secure_camera;
-	buf->ctx = ctx;
-	buf->rot_dev = rot_dev;
-	buf->size = size;
-	buf->buffer = dma_buf_get(buf->fd);
-	if (IS_ERR_OR_NULL(buf->buffer)) {
-		SDEDEV_ERR(rot_dev->dev,
-			"fail get dmabuf fd:%d r:%ld\n",
-			buf->fd, PTR_ERR(buf->buffer));
-		goto error_buf_get;
-	}
-	SDEDEV_DBG(rot_dev->dev,
-			"get dmabuf s:%d fd:%d buf:%pad\n",
-			buf->ctx->session_id,
-			buf->fd, &buf->buffer);
-
-	return buf;
-error_buf_get:
-	kfree(buf);
-	return ERR_PTR(-ENOMEM);
-}
-
-/*
- * sde_rotator_put_userptr - Unmap and free buffer handler.
- * @buf_priv: Buffer handler allocated get_userptr callback.
- */
-static void sde_rotator_put_userptr(void *buf_priv)
-{
-	struct sde_rotator_buf_handle *buf = buf_priv;
-
-	if (IS_ERR_OR_NULL(buf))
-		return;
-
-	if (!buf->rot_dev || !buf->ctx) {
-		WARN_ON(!buf->rot_dev || !buf->ctx);
-		SDEROT_ERR("null rotator device/context\n");
-		return;
-	}
-
-	SDEDEV_DBG(buf->rot_dev->dev, "put dmabuf s:%d fd:%d buf:%pad\n",
-			buf->ctx->session_id,
-			buf->fd, &buf->buffer);
-
-	if (buf->buffer) {
-		dma_buf_put(buf->buffer);
-		buf->buffer = NULL;
-	}
-
-	kfree(buf_priv);
-}
-
-/* Videobuf2 memory callbacks. */
-static struct vb2_mem_ops sde_rotator_vb2_mem_ops = {
-	.get_userptr = sde_rotator_get_userptr,
-	.put_userptr = sde_rotator_put_userptr,
-};
-
-/*
- * sde_rotator_s_ctx_ctrl - set context control variable to v4l2 control
- * @ctx: Pointer to rotator context.
- * @ctx_ctrl: Pointer to context control variable
- * @ctrl: Pointer to v4l2 control variable
- */
-static int sde_rotator_s_ctx_ctrl(struct sde_rotator_ctx *ctx,
-		s32 *ctx_ctrl, struct v4l2_ctrl *ctrl)
-{
-	*ctx_ctrl = ctrl->val;
-	return 0;
-}
-
-/*
- * sde_rotator_s_ctrl - Set control.
- * @ctrl: Pointer to v4l2 control structure.
- */
-static int sde_rotator_s_ctrl(struct v4l2_ctrl *ctrl)
-{
-	struct sde_rotator_ctx *ctx =
-		container_of(ctrl->handler,
-				struct sde_rotator_ctx, ctrl_handler);
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-	int ret;
-
-	SDEDEV_DBG(rot_dev->dev, "set %s:%d s:%d\n", ctrl->name, ctrl->val,
-			ctx->session_id);
-
-	sde_rot_mgr_lock(rot_dev->mgr);
-
-	switch (ctrl->id) {
-	case V4L2_CID_HFLIP:
-		ret = sde_rotator_s_ctx_ctrl(ctx, &ctx->hflip, ctrl);
-		break;
-
-	case V4L2_CID_VFLIP:
-		ret = sde_rotator_s_ctx_ctrl(ctx, &ctx->vflip, ctrl);
-		break;
-
-	case V4L2_CID_ROTATE:
-		ret = sde_rotator_s_ctx_ctrl(ctx, &ctx->rotate, ctrl);
-		break;
-
-	case V4L2_CID_SDE_ROTATOR_SECURE:
-		ret = sde_rotator_s_ctx_ctrl(ctx, &ctx->secure, ctrl);
-		break;
-
-	case V4L2_CID_SDE_ROTATOR_SECURE_CAMERA:
-		ret = sde_rotator_s_ctx_ctrl(ctx, &ctx->secure_camera, ctrl);
-		break;
-	default:
-		v4l2_warn(&rot_dev->v4l2_dev, "invalid control %d\n", ctrl->id);
-		ret = -EINVAL;
-	}
-
-	sde_rot_mgr_unlock(rot_dev->mgr);
-	return ret;
-}
-
-/*
- * sde_rotator_ctrl_ops - Control operations.
- */
-static const struct v4l2_ctrl_ops sde_rotator_ctrl_ops = {
-	.s_ctrl = sde_rotator_s_ctrl,
-};
-
-/*
- * sde_rotator_ctrl_secure - Non-secure/Secure.
- */
-static const struct v4l2_ctrl_config sde_rotator_ctrl_secure = {
-	.ops = &sde_rotator_ctrl_ops,
-	.id = V4L2_CID_SDE_ROTATOR_SECURE,
-	.name = "Non-secure/Secure Domain",
-	.type = V4L2_CTRL_TYPE_INTEGER,
-	.def = 0,
-	.min = 0,
-	.max = 1,
-	.step = 1,
-};
-
-static const struct v4l2_ctrl_config sde_rotator_ctrl_secure_camera = {
-	.ops = &sde_rotator_ctrl_ops,
-	.id = V4L2_CID_SDE_ROTATOR_SECURE_CAMERA,
-	.name = "Secure Camera content",
-	.type = V4L2_CTRL_TYPE_INTEGER,
-	.def = 0,
-	.min = 0,
-	.max = 1,
-	.step = 1,
-};
-
-/*
- * sde_rotator_ctx_show - show context state.
- */
-static ssize_t sde_rotator_ctx_show(struct kobject *kobj,
-	struct kobj_attribute *attr, char *buf)
-{
-	size_t len = PAGE_SIZE;
-	int cnt = 0;
-	struct sde_rotator_ctx *ctx =
-		container_of(kobj, struct sde_rotator_ctx, kobj);
-
-	if (!ctx)
-		return cnt;
-
-#define SPRINT(fmt, ...) \
-		(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
-
-	SPRINT("rotate=%d\n", ctx->rotate);
-	SPRINT("hflip=%d\n", ctx->hflip);
-	SPRINT("vflip=%d\n", ctx->vflip);
-	SPRINT("priority=%d\n", ctx->fh.prio);
-	SPRINT("secure=%d\n", ctx->secure);
-	SPRINT("timeperframe=%u %u\n", ctx->timeperframe.numerator,
-			ctx->timeperframe.denominator);
-	SPRINT("nbuf_out=%d\n", ctx->nbuf_out);
-	SPRINT("nbuf_cap=%d\n", ctx->nbuf_cap);
-	SPRINT("crop_out=%u %u %u %u\n",
-			ctx->crop_out.left, ctx->crop_out.top,
-			ctx->crop_out.width, ctx->crop_out.height);
-	SPRINT("crop_cap=%u %u %u %u\n",
-			ctx->crop_cap.left, ctx->crop_cap.top,
-			ctx->crop_cap.width, ctx->crop_cap.height);
-	SPRINT("fmt_out=%c%c%c%c %u %u %u %u\n",
-			(ctx->format_out.fmt.pix.pixelformat>>0)&0xff,
-			(ctx->format_out.fmt.pix.pixelformat>>8)&0xff,
-			(ctx->format_out.fmt.pix.pixelformat>>16)&0xff,
-			(ctx->format_out.fmt.pix.pixelformat>>24)&0xff,
-			ctx->format_out.fmt.pix.width,
-			ctx->format_out.fmt.pix.height,
-			ctx->format_out.fmt.pix.bytesperline,
-			ctx->format_out.fmt.pix.sizeimage);
-	SPRINT("fmt_cap=%c%c%c%c %u %u %u %u\n",
-			(ctx->format_cap.fmt.pix.pixelformat>>0)&0xff,
-			(ctx->format_cap.fmt.pix.pixelformat>>8)&0xff,
-			(ctx->format_cap.fmt.pix.pixelformat>>16)&0xff,
-			(ctx->format_cap.fmt.pix.pixelformat>>24)&0xff,
-			ctx->format_cap.fmt.pix.width,
-			ctx->format_cap.fmt.pix.height,
-			ctx->format_cap.fmt.pix.bytesperline,
-			ctx->format_cap.fmt.pix.sizeimage);
-	SPRINT("abort_pending=%d\n", ctx->abort_pending);
-	SPRINT("command_pending=%d\n", !list_empty(&ctx->pending_list));
-	SPRINT("sequence=%u\n",
-		sde_rotator_get_timeline_commit_ts(ctx->work_queue.timeline));
-	SPRINT("timestamp=%u\n",
-		sde_rotator_get_timeline_retire_ts(ctx->work_queue.timeline));
-	return cnt;
-}
-
-static struct kobj_attribute sde_rotator_ctx_attr =
-	__ATTR(state, 0664, sde_rotator_ctx_show, NULL);
-
-static struct attribute *sde_rotator_fs_attrs[] = {
-	&sde_rotator_ctx_attr.attr,
-	NULL
-};
-
-static struct attribute_group sde_rotator_fs_attr_group = {
-	.attrs = sde_rotator_fs_attrs
-};
-
-/*
- * sde_rotator_ctx_show - sysfs show callback.
- */
-static ssize_t sde_rotator_fs_show(struct kobject *kobj,
-	struct attribute *attr, char *buf)
-{
-	ssize_t ret = -EIO;
-	struct kobj_attribute *kattr =
-			container_of(attr, struct kobj_attribute, attr);
-	if (kattr->show)
-		ret = kattr->show(kobj, kattr, buf);
-	return ret;
-}
-
-/*
- * sde_rotator_fs_store - sysfs store callback.
- */
-static ssize_t sde_rotator_fs_store(struct kobject *kobj,
-	struct attribute *attr, const char *buf, size_t count)
-{
-	ssize_t ret = -EIO;
-	struct kobj_attribute *kattr =
-			container_of(attr, struct kobj_attribute, attr);
-	if (kattr->store)
-		ret = kattr->store(kobj, kattr, buf, count);
-	return ret;
-}
-
-static const struct sysfs_ops sde_rotator_fs_ops = {
-	.show = sde_rotator_fs_show,
-	.store = sde_rotator_fs_store,
-};
-
-static struct kobj_type sde_rotator_fs_ktype = {
-	.sysfs_ops = &sde_rotator_fs_ops,
-};
-
-/*
- * sde_rotator_queue_init - m2m_ops queue_setup callback.
- * @priv: Pointer to rotator ctx.
- * @src_vq: vb2 source queue.
- * @dst_vq: vb2 destination queue.
- */
-static int sde_rotator_queue_init(void *priv, struct vb2_queue *src_vq,
-	struct vb2_queue *dst_vq)
-{
-	struct sde_rotator_ctx *ctx = priv;
-	int ret;
-
-	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
-	src_vq->io_modes = VB2_USERPTR;
-	src_vq->drv_priv = ctx;
-	src_vq->mem_ops = &sde_rotator_vb2_mem_ops;
-	src_vq->ops = &sde_rotator_vb2_q_ops;
-	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
-	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
-	src_vq->lock = &ctx->rot_dev->lock;
-	src_vq->min_buffers_needed = 1;
-	src_vq->dev = ctx->rot_dev->dev;
-
-	ret = vb2_queue_init(src_vq);
-	if (ret) {
-		SDEDEV_ERR(ctx->rot_dev->dev,
-				"fail init src queue r:%d\n", ret);
-		return ret;
-	}
-
-	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	dst_vq->io_modes = VB2_USERPTR;
-	dst_vq->drv_priv = ctx;
-	dst_vq->mem_ops = &sde_rotator_vb2_mem_ops;
-	dst_vq->ops = &sde_rotator_vb2_q_ops;
-	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
-	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
-	dst_vq->lock = &ctx->rot_dev->lock;
-	dst_vq->min_buffers_needed = 1;
-	src_vq->dev = ctx->rot_dev->dev;
-
-	ret = vb2_queue_init(dst_vq);
-	if (ret) {
-		SDEDEV_ERR(ctx->rot_dev->dev,
-				"fail init dst queue r:%d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-/*
- * sde_rotator_ctx_open - Rotator device open method.
- * @rot_dev: Pointer to rotator device structure
- * @file: Pointer to file struct (optional)
- * return: Pointer rotator context if success; ptr error code, otherwise.
- */
-struct sde_rotator_ctx *sde_rotator_ctx_open(
-		struct sde_rotator_device *rot_dev, struct file *file)
-{
-	struct video_device *video = file ? video_devdata(file) : NULL;
-	struct sde_rotator_ctx *ctx;
-	struct v4l2_ctrl_handler *ctrl_handler;
-	char name[32];
-	int i, ret;
-
-	if (atomic_read(&rot_dev->mgr->device_suspended))
-		return ERR_PTR(-EPERM);
-
-	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-	if (!ctx)
-		return ERR_PTR(-ENOMEM);
-
-	if (mutex_lock_interruptible(&rot_dev->lock)) {
-		ret = -ERESTARTSYS;
-		goto error_lock;
-	}
-
-	/* wait until exclusive ctx, if exists, finishes or timeout */
-	while (rot_dev->excl_ctx) {
-		SDEROT_DBG("waiting to open %s session %d ...\n",
-				file ? "v4l2" : "excl",	rot_dev->session_id);
-		mutex_unlock(&rot_dev->lock);
-		ret = wait_event_interruptible_timeout(rot_dev->open_wq,
-				!rot_dev->excl_ctx,
-				msecs_to_jiffies(rot_dev->open_timeout));
-		if (ret < 0) {
-			goto error_lock;
-		} else if (!ret) {
-			SDEROT_WARN("timeout to open session %d\n",
-					rot_dev->session_id);
-			SDEROT_EVTLOG(rot_dev->session_id,
-					SDE_ROT_EVTLOG_ERROR);
-			ret = -EBUSY;
-			goto error_lock;
-		} else if (mutex_lock_interruptible(&rot_dev->lock)) {
-			ret = -ERESTARTSYS;
-			goto error_lock;
-		}
-	}
-
-	ctx->rot_dev = rot_dev;
-	ctx->file = file;
-
-	/* Set context defaults */
-	ctx->session_id = rot_dev->session_id++;
-	SDEDEV_DBG(ctx->rot_dev->dev, "open %d\n", ctx->session_id);
-	ctx->timeperframe.numerator = 1;
-	ctx->timeperframe.denominator = SDE_ROTATOR_DEFAULT_FPS;
-	ctx->hflip = 0;
-	ctx->vflip = 0;
-	ctx->rotate = 0;
-	ctx->secure = 0;
-	ctx->abort_pending = 0;
-	ctx->kthread_id = -1;
-	ctx->format_cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	ctx->format_cap.fmt.pix.pixelformat = SDE_PIX_FMT_Y_CBCR_H2V2;
-	ctx->format_cap.fmt.pix.width = 640;
-	ctx->format_cap.fmt.pix.height = 480;
-	ctx->crop_cap.width = 640;
-	ctx->crop_cap.height = 480;
-	ctx->format_out.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
-	ctx->format_out.fmt.pix.pixelformat = SDE_PIX_FMT_Y_CBCR_H2V2;
-	ctx->format_out.fmt.pix.width = 640;
-	ctx->format_out.fmt.pix.height = 480;
-	ctx->crop_out.width = 640;
-	ctx->crop_out.height = 480;
-	init_waitqueue_head(&ctx->wait_queue);
-	spin_lock_init(&ctx->list_lock);
-	INIT_LIST_HEAD(&ctx->pending_list);
-	INIT_LIST_HEAD(&ctx->retired_list);
-
-	for (i = 0 ; i < ARRAY_SIZE(ctx->requests); i++) {
-		struct sde_rotator_request *request = &ctx->requests[i];
-
-		kthread_init_work(&request->submit_work,
-				sde_rotator_submit_handler);
-		kthread_init_work(&request->retire_work,
-				sde_rotator_retire_handler);
-		request->ctx = ctx;
-		INIT_LIST_HEAD(&request->list);
-		list_add_tail(&request->list, &ctx->retired_list);
-	}
-
-	if (ctx->file) {
-		v4l2_fh_init(&ctx->fh, video);
-		file->private_data = &ctx->fh;
-		v4l2_fh_add(&ctx->fh);
-
-		ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rot_dev->m2m_dev,
-			ctx, sde_rotator_queue_init);
-		if (IS_ERR_OR_NULL(ctx->fh.m2m_ctx)) {
-			ret = PTR_ERR(ctx->fh.m2m_ctx);
-			ctx->fh.m2m_ctx = NULL;
-			goto error_m2m_init;
-		}
-	}
-
-	ret = kobject_init_and_add(&ctx->kobj, &sde_rotator_fs_ktype,
-			&rot_dev->dev->kobj, "session_%d", ctx->session_id);
-	if (ret) {
-		SDEDEV_ERR(ctx->rot_dev->dev,
-				"fail initialize context kobject\n");
-		goto error_kobj_init;
-	}
-
-	ret = sysfs_create_group(&ctx->kobj, &sde_rotator_fs_attr_group);
-	if (ret) {
-		SDEDEV_ERR(ctx->rot_dev->dev,
-				"fail register rotator sysfs nodes\n");
-		goto error_create_sysfs;
-	}
-
-	for (i = 0; i < MAX_ROT_OPEN_SESSION; i++) {
-		if (rot_dev->kthread_free[i]) {
-			rot_dev->kthread_free[i] = false;
-			ctx->kthread_id = i;
-			ctx->work_queue.rot_kw = &rot_dev->rot_kw[i];
-			ctx->work_queue.rot_thread = rot_dev->rot_thread[i];
-			break;
-		}
-	}
-
-	if (ctx->kthread_id < 0) {
-		SDEDEV_ERR(ctx->rot_dev->dev,
-				"fail to acquire the kthread\n");
-		ret = -EINVAL;
-		goto error_alloc_kthread;
-	}
-
-	snprintf(name, sizeof(name), "%d_%d", rot_dev->dev->id,
-			ctx->session_id);
-	ctx->work_queue.timeline = sde_rotator_create_timeline(name);
-	if (!ctx->work_queue.timeline)
-		SDEDEV_DBG(ctx->rot_dev->dev, "timeline is not available\n");
-
-	sde_rot_mgr_lock(rot_dev->mgr);
-	sde_rotator_pm_qos_request(rot_dev,
-				 SDE_ROTATOR_ADD_REQUEST);
-	ret = sde_rotator_session_open(rot_dev->mgr, &ctx->private,
-			ctx->session_id, &ctx->work_queue);
-	if (ret < 0) {
-		SDEDEV_ERR(ctx->rot_dev->dev, "fail open session\n");
-		goto error_open_session;
-	}
-	sde_rot_mgr_unlock(rot_dev->mgr);
-
-	if (ctx->file) {
-		/* Create control */
-		ctrl_handler = &ctx->ctrl_handler;
-		v4l2_ctrl_handler_init(ctrl_handler, 4);
-		v4l2_ctrl_new_std(ctrl_handler,
-			&sde_rotator_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
-		v4l2_ctrl_new_std(ctrl_handler,
-			&sde_rotator_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
-		v4l2_ctrl_new_std(ctrl_handler,
-			&sde_rotator_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0);
-		v4l2_ctrl_new_custom(ctrl_handler,
-			&sde_rotator_ctrl_secure, NULL);
-		v4l2_ctrl_new_custom(ctrl_handler,
-			&sde_rotator_ctrl_secure_camera, NULL);
-		if (ctrl_handler->error) {
-			ret = ctrl_handler->error;
-			v4l2_ctrl_handler_free(ctrl_handler);
-			goto error_ctrl_handler;
-		}
-		ctx->fh.ctrl_handler = ctrl_handler;
-		v4l2_ctrl_handler_setup(ctrl_handler);
-	} else {
-		/* acquire exclusive context */
-		SDEDEV_DBG(rot_dev->dev, "acquire exclusive session id:%u\n",
-				ctx->session_id);
-		SDEROT_EVTLOG(ctx->session_id);
-		rot_dev->excl_ctx = ctx;
-	}
-
-	mutex_unlock(&rot_dev->lock);
-
-	SDEDEV_DBG(ctx->rot_dev->dev, "SDE v4l2 rotator open success\n");
-
-	ATRACE_BEGIN(ctx->kobj.name);
-
-	return ctx;
-error_ctrl_handler:
-error_open_session:
-	sde_rot_mgr_unlock(rot_dev->mgr);
-	sde_rotator_destroy_timeline(ctx->work_queue.timeline);
-	rot_dev->kthread_free[ctx->kthread_id] = true;
-	ctx->kthread_id = -1;
-error_alloc_kthread:
-	sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group);
-error_create_sysfs:
-	kobject_put(&ctx->kobj);
-error_kobj_init:
-	if (ctx->file) {
-		v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
-		ctx->fh.m2m_ctx = NULL;
-	}
-error_m2m_init:
-	if (ctx->file) {
-		v4l2_fh_del(&ctx->fh);
-		v4l2_fh_exit(&ctx->fh);
-	}
-	mutex_unlock(&rot_dev->lock);
-error_lock:
-	kfree(ctx);
-	ctx = NULL;
-	return ERR_PTR(ret);
-}
-
-/*
- * sde_rotator_ctx_release - Rotator device release method.
- * @ctx: Pointer rotator context.
- * @file: Pointer to file struct (optional)
- * return: 0 if success; error code, otherwise
- */
-static int sde_rotator_ctx_release(struct sde_rotator_ctx *ctx,
-		struct file *file)
-{
-	struct sde_rotator_device *rot_dev;
-	u32 session_id;
-	struct list_head *curr, *next;
-
-	if (!ctx) {
-		SDEROT_DBG("ctx is NULL\n");
-		return -EINVAL;
-	}
-
-	rot_dev = ctx->rot_dev;
-	session_id = ctx->session_id;
-
-	ATRACE_END(ctx->kobj.name);
-
-	SDEDEV_DBG(rot_dev->dev, "release s:%d\n", session_id);
-	mutex_lock(&rot_dev->lock);
-	if (rot_dev->excl_ctx == ctx) {
-		SDEDEV_DBG(rot_dev->dev, "release exclusive session id:%u\n",
-				session_id);
-		SDEROT_EVTLOG(session_id);
-		rot_dev->excl_ctx = NULL;
-	}
-	if (ctx->file) {
-		v4l2_ctrl_handler_free(&ctx->ctrl_handler);
-		SDEDEV_DBG(rot_dev->dev, "release streams s:%d\n", session_id);
-		if (ctx->fh.m2m_ctx) {
-			v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx,
-				V4L2_BUF_TYPE_VIDEO_OUTPUT);
-			v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx,
-				V4L2_BUF_TYPE_VIDEO_CAPTURE);
-		}
-	}
-	mutex_unlock(&rot_dev->lock);
-	SDEDEV_DBG(rot_dev->dev, "release submit work s:%d\n", session_id);
-	list_for_each_safe(curr, next, &ctx->pending_list) {
-		struct sde_rotator_request *request =
-			container_of(curr, struct sde_rotator_request, list);
-
-		SDEDEV_DBG(rot_dev->dev, "release submit work s:%d\n",
-				session_id);
-		kthread_cancel_work_sync(&request->submit_work);
-	}
-	SDEDEV_DBG(rot_dev->dev, "release session s:%d\n", session_id);
-	sde_rot_mgr_lock(rot_dev->mgr);
-	sde_rotator_pm_qos_request(rot_dev,
-			SDE_ROTATOR_REMOVE_REQUEST);
-	sde_rotator_session_close(rot_dev->mgr, ctx->private, session_id);
-	sde_rot_mgr_unlock(rot_dev->mgr);
-	SDEDEV_DBG(rot_dev->dev, "release retire work s:%d\n", session_id);
-	list_for_each_safe(curr, next, &ctx->pending_list) {
-		struct sde_rotator_request *request =
-			container_of(curr, struct sde_rotator_request, list);
-
-		SDEDEV_DBG(rot_dev->dev, "release retire work s:%d\n",
-				session_id);
-		kthread_cancel_work_sync(&request->retire_work);
-	}
-	mutex_lock(&rot_dev->lock);
-	SDEDEV_DBG(rot_dev->dev, "release context s:%d\n", session_id);
-	sde_rotator_destroy_timeline(ctx->work_queue.timeline);
-	if (ctx->kthread_id >= 0 && ctx->work_queue.rot_kw) {
-		kthread_flush_worker(ctx->work_queue.rot_kw);
-		rot_dev->kthread_free[ctx->kthread_id] = true;
-	}
-	sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group);
-	kobject_put(&ctx->kobj);
-	if (ctx->file) {
-		if (ctx->fh.m2m_ctx)
-			v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
-		if (ctx->fh.vdev) {
-			v4l2_fh_del(&ctx->fh);
-			v4l2_fh_exit(&ctx->fh);
-		}
-	}
-	kfree(ctx->vbinfo_out);
-	kfree(ctx->vbinfo_cap);
-	kfree(ctx);
-	wake_up_interruptible(&rot_dev->open_wq);
-	mutex_unlock(&rot_dev->lock);
-	SDEDEV_DBG(rot_dev->dev, "release complete s:%d\n", session_id);
-	return 0;
-}
-
-/*
- * sde_rotator_update_retire_sequence - update retired sequence of the context
- *	referenced in the request, and wake up any waiting for update event
- * @request: Pointer to rotator request
- */
-static void sde_rotator_update_retire_sequence(
-		struct sde_rotator_request *request)
-{
-	struct sde_rotator_ctx *ctx;
-
-	if (!request || !request->ctx) {
-		SDEROT_ERR("invalid parameters\n");
-		return;
-	}
-
-	ctx = request->ctx;
-	ctx->retired_sequence_id = request->sequence_id;
-
-	wake_up(&ctx->wait_queue);
-
-	SDEROT_DBG("update sequence s:%d.%d\n",
-				ctx->session_id, ctx->retired_sequence_id);
-}
-
-/*
- * sde_rotator_retire_request - retire the given rotator request with
- *	device mutex locked
- * @request: Pointer to rotator request
- */
-static void sde_rotator_retire_request(struct sde_rotator_request *request)
-{
-	struct sde_rotator_ctx *ctx;
-
-	if (!request || !request->ctx) {
-		SDEROT_ERR("invalid parameters\n");
-		return;
-	}
-
-	ctx = request->ctx;
-
-	request->req = NULL;
-	request->sequence_id = 0;
-	request->committed = false;
-	spin_lock(&ctx->list_lock);
-	list_del_init(&request->list);
-	list_add_tail(&request->list, &ctx->retired_list);
-	spin_unlock(&ctx->list_lock);
-
-	wake_up(&ctx->wait_queue);
-
-	SDEROT_DBG("retire request s:%d.%d\n",
-				ctx->session_id, ctx->retired_sequence_id);
-}
-
-/*
- * sde_rotator_is_request_retired - Return true if given request already expired
- * @request: Pointer to rotator request
- */
-static bool sde_rotator_is_request_retired(struct sde_rotator_request *request)
-{
-	struct sde_rotator_ctx *ctx;
-	u32 sequence_id;
-	s32 retire_delta;
-
-	if (!request || !request->ctx)
-		return true;
-
-	ctx = request->ctx;
-	sequence_id = request->sequence_id;
-
-	retire_delta = (s32) (ctx->retired_sequence_id - sequence_id);
-
-	SDEROT_DBG("sequence:%u/%u\n", sequence_id, ctx->retired_sequence_id);
-
-	return retire_delta >= 0;
-}
-
-static void sde_rotator_pm_qos_remove(struct sde_rot_data_type *rot_mdata)
-{
-	struct pm_qos_request *req;
-	u32 cpu_mask;
-
-	if (!rot_mdata) {
-		SDEROT_DBG("invalid rot device or context\n");
-		return;
-	}
-
-	cpu_mask = rot_mdata->rot_pm_qos_cpu_mask;
-
-	if (!cpu_mask)
-		return;
-
-	req = &rot_mdata->pm_qos_rot_cpu_req;
-	pm_qos_remove_request(req);
-}
-
-void sde_rotator_pm_qos_add(struct sde_rot_data_type *rot_mdata)
-{
-	struct pm_qos_request *req;
-	u32 cpu_mask;
-	int cpu;
-
-	if (!rot_mdata) {
-		SDEROT_DBG("invalid rot device or context\n");
-		return;
-	}
-
-	cpu_mask = rot_mdata->rot_pm_qos_cpu_mask;
-
-	if (!cpu_mask)
-		return;
-
-	req = &rot_mdata->pm_qos_rot_cpu_req;
-	req->type = PM_QOS_REQ_AFFINE_CORES;
-	cpumask_empty(&req->cpus_affine);
-	for_each_possible_cpu(cpu) {
-		if ((1 << cpu) & cpu_mask)
-			cpumask_set_cpu(cpu, &req->cpus_affine);
-	}
-	pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY,
-		PM_QOS_DEFAULT_VALUE);
-
-	SDEROT_DBG("rotator pmqos add mask %x latency %x\n",
-		rot_mdata->rot_pm_qos_cpu_mask,
-		rot_mdata->rot_pm_qos_cpu_dma_latency);
-}
-
-static void sde_rotator_pm_qos_request(struct sde_rotator_device *rot_dev,
-					 bool add_request)
-{
-	u32 cpu_mask;
-	u32 cpu_dma_latency;
-	bool changed = false;
-
-	if (!rot_dev) {
-		SDEROT_DBG("invalid rot device or context\n");
-		return;
-	}
-
-	cpu_mask = rot_dev->mdata->rot_pm_qos_cpu_mask;
-	cpu_dma_latency = rot_dev->mdata->rot_pm_qos_cpu_dma_latency;
-
-	if (!cpu_mask)
-		return;
-
-	if (add_request) {
-		if (rot_dev->mdata->rot_pm_qos_cpu_count == 0)
-			changed = true;
-		rot_dev->mdata->rot_pm_qos_cpu_count++;
-	} else {
-		if (rot_dev->mdata->rot_pm_qos_cpu_count != 0) {
-			rot_dev->mdata->rot_pm_qos_cpu_count--;
-			if (rot_dev->mdata->rot_pm_qos_cpu_count == 0)
-				changed = true;
-		} else {
-			SDEROT_DBG("%s: ref_count is not balanced\n",
-				__func__);
-		}
-	}
-
-	if (!changed)
-		return;
-
-	SDEROT_EVTLOG(add_request, cpu_mask, cpu_dma_latency);
-
-	if (!add_request) {
-		pm_qos_update_request(&rot_dev->mdata->pm_qos_rot_cpu_req,
-			PM_QOS_DEFAULT_VALUE);
-		return;
-	}
-
-	pm_qos_update_request(&rot_dev->mdata->pm_qos_rot_cpu_req,
-		cpu_dma_latency);
-}
-
-/*
- * sde_rotator_inline_open - open inline rotator session
- * @pdev: Pointer to rotator platform device
- * @video_mode: true if video mode is requested
- * return: Pointer to new rotator session context
- */
-void *sde_rotator_inline_open(struct platform_device *pdev)
-{
-	struct sde_rotator_device *rot_dev;
-	struct sde_rotator_ctx *ctx;
-	int rc;
-
-	if (!pdev) {
-		SDEROT_ERR("invalid platform device\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
-	if (!rot_dev) {
-		SDEROT_ERR("invalid rotator device\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	ctx = sde_rotator_ctx_open(rot_dev, NULL);
-	if (IS_ERR_OR_NULL(ctx)) {
-		rc = PTR_ERR(ctx);
-		SDEROT_ERR("failed to open rotator context %d\n", rc);
-		goto rotator_open_error;
-	}
-
-	ctx->slice = llcc_slice_getd(LLCC_ROTATOR);
-	if (IS_ERR(ctx->slice)) {
-		rc = PTR_ERR(ctx->slice);
-		SDEROT_ERR("failed to get system cache %d\n", rc);
-		goto slice_getd_error;
-	}
-
-	if (!rot_dev->disable_syscache) {
-		rc = llcc_slice_activate(ctx->slice);
-		if (rc) {
-			SDEROT_ERR("failed to activate slice %d\n", rc);
-			goto activate_error;
-		}
-		SDEROT_DBG("scid %d size %zukb\n",
-				llcc_get_slice_id(ctx->slice),
-				llcc_get_slice_size(ctx->slice));
-	} else {
-		SDEROT_DBG("syscache bypassed\n");
-	}
-
-	SDEROT_EVTLOG(ctx->session_id, llcc_get_slice_id(ctx->slice),
-			llcc_get_slice_size(ctx->slice),
-			rot_dev->disable_syscache);
-
-	return ctx;
-
-activate_error:
-	llcc_slice_putd(ctx->slice);
-	ctx->slice = NULL;
-slice_getd_error:
-	sde_rotator_ctx_release(ctx, NULL);
-rotator_open_error:
-	return ERR_PTR(rc);
-}
-EXPORT_SYMBOL(sde_rotator_inline_open);
-
-int sde_rotator_inline_release(void *handle)
-{
-	struct sde_rotator_device *rot_dev;
-	struct sde_rotator_ctx *ctx;
-
-	if (!handle) {
-		SDEROT_ERR("invalid rotator ctx\n");
-		return -EINVAL;
-	}
-
-	ctx = handle;
-	rot_dev = ctx->rot_dev;
-
-	if (!rot_dev) {
-		SDEROT_ERR("invalid rotator device\n");
-		return -EINVAL;
-	}
-
-	if (ctx->slice) {
-		if (!rot_dev->disable_syscache)
-			llcc_slice_deactivate(ctx->slice);
-		llcc_slice_putd(ctx->slice);
-		ctx->slice = NULL;
-	}
-
-	SDEROT_EVTLOG(ctx->session_id);
-
-	return sde_rotator_ctx_release(ctx, NULL);
-}
-EXPORT_SYMBOL(sde_rotator_inline_release);
-
-/*
- * sde_rotator_inline_get_dst_pixfmt - determine output pixel format
- * @pdev: Pointer to platform device
- * @src_pixfmt: input pixel format
- * @dst_pixfmt: Pointer to output pixel format (output)
- * return: 0 if success; error code otherwise
- */
-int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev,
-		u32 src_pixfmt, u32 *dst_pixfmt)
-{
-	int rc;
-
-	if (!src_pixfmt || !dst_pixfmt)
-		return -EINVAL;
-
-	rc = sde_rot_get_base_tilea5x_pixfmt(src_pixfmt, dst_pixfmt);
-	if (rc)
-		return rc;
-
-	/*
-	 * Currently, NV21 tile is not supported as output; hence,
-	 * override with NV12 tile.
-	 */
-	if (*dst_pixfmt == SDE_PIX_FMT_Y_CRCB_H2V2_TILE)
-		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TILE;
-
-	return 0;
-}
-EXPORT_SYMBOL(sde_rotator_inline_get_dst_pixfmt);
-
-/*
- * sde_rotator_inline_get_downscale_caps - get scaling capability
- * @pdev: Pointer to platform device
- * @caps: string buffer for capability
- * @len: length of string buffer
- * return: length of capability string
- */
-int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
-		char *caps, int len)
-{
-	struct sde_rotator_device *rot_dev;
-	int rc;
-
-	if (!pdev) {
-		SDEROT_ERR("invalid platform device\n");
-		return -EINVAL;
-	}
-
-	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
-	if (!rot_dev || !rot_dev->mgr) {
-		SDEROT_ERR("invalid rotator device\n");
-		return -EINVAL;
-	}
-
-	sde_rot_mgr_lock(rot_dev->mgr);
-	rc = sde_rotator_get_downscale_caps(rot_dev->mgr, caps, len);
-	sde_rot_mgr_unlock(rot_dev->mgr);
-
-	return rc;
-}
-EXPORT_SYMBOL(sde_rotator_inline_get_downscale_caps);
-
-/*
- * sde_rotator_inline_get_maxlinewidth - get maximum line width of rotator
- * @pdev: Pointer to platform device
- * return: maximum line width
- */
-int sde_rotator_inline_get_maxlinewidth(struct platform_device *pdev)
-{
-	struct sde_rotator_device *rot_dev;
-	int maxlinewidth;
-
-	if (!pdev) {
-		SDEROT_ERR("invalid platform device\n");
-		return -EINVAL;
-	}
-
-	rot_dev = (struct sde_rotator_device *)platform_get_drvdata(pdev);
-	if (!rot_dev || !rot_dev->mgr) {
-		SDEROT_ERR("invalid rotator device\n");
-		return -EINVAL;
-	}
-
-	sde_rot_mgr_lock(rot_dev->mgr);
-	maxlinewidth = sde_rotator_get_maxlinewidth(rot_dev->mgr);
-	sde_rot_mgr_unlock(rot_dev->mgr);
-
-	return maxlinewidth;
-}
-EXPORT_SYMBOL(sde_rotator_inline_get_maxlinewidth);
-
-/*
- * sde_rotator_inline_get_pixfmt_caps - get pixel format capability
- * @pdev: Pointer to platform device
- * @pixfmt: array of pixel format buffer
- * @len: length of pixel format buffer
- * return: length of pixel format capability if success; error code otherwise
- */
-int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
-		bool input, u32 *pixfmts, int len)
-{
-	struct sde_rotator_device *rot_dev;
-	u32 i, pixfmt;
-
-	if (!pdev) {
-		SDEROT_ERR("invalid platform device\n");
-		return -EINVAL;
-	}
-
-	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
-	if (!rot_dev || !rot_dev->mgr) {
-		SDEROT_ERR("invalid rotator device\n");
-		return -EINVAL;
-	}
-
-	sde_rot_mgr_lock(rot_dev->mgr);
-	for (i = 0;; i++) {
-		pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, i, input,
-				SDE_ROTATOR_MODE_SBUF);
-		if (!pixfmt)
-			break;
-		if (pixfmts && i < len)
-			pixfmts[i] = pixfmt;
-	}
-	sde_rot_mgr_unlock(rot_dev->mgr);
-
-	return i;
-}
-EXPORT_SYMBOL(sde_rotator_inline_get_pixfmt_caps);
-
-/*
- * _sde_rotator_inline_cleanup - perform inline related request cleanup
- *	This function assumes rot_dev->mgr lock has been taken when called.
- * @handle: Pointer to rotator context
- * @request: Pointer to rotation request
- * return: 0 if success; -EAGAIN if cleanup should be retried
- */
-static int _sde_rotator_inline_cleanup(void *handle,
-		struct sde_rotator_request *request)
-{
-	struct sde_rotator_ctx *ctx;
-	struct sde_rotator_device *rot_dev;
-	int ret;
-
-	if (!handle || !request) {
-		SDEROT_ERR("invalid rotator handle/request\n");
-		return -EINVAL;
-	}
-
-	ctx = handle;
-	rot_dev = ctx->rot_dev;
-
-	if (!rot_dev || !rot_dev->mgr) {
-		SDEROT_ERR("invalid rotator device\n");
-		return -EINVAL;
-	}
-
-	if (request->committed) {
-		/* wait until request is finished */
-		sde_rot_mgr_unlock(rot_dev->mgr);
-		mutex_unlock(&rot_dev->lock);
-		ret = wait_event_timeout(ctx->wait_queue,
-			sde_rotator_is_request_retired(request),
-			msecs_to_jiffies(rot_dev->streamoff_timeout));
-		mutex_lock(&rot_dev->lock);
-		sde_rot_mgr_lock(rot_dev->mgr);
-
-		if (!ret) {
-			SDEROT_ERR("timeout w/o retire s:%d\n",
-					ctx->session_id);
-			SDEROT_EVTLOG(ctx->session_id, SDE_ROT_EVTLOG_ERROR);
-			sde_rotator_abort_inline_request(rot_dev->mgr,
-					ctx->private, request->req);
-			return -EAGAIN;
-		} else if (ret == 1) {
-			SDEROT_ERR("timeout w/ retire s:%d\n", ctx->session_id);
-			SDEROT_EVTLOG(ctx->session_id, SDE_ROT_EVTLOG_ERROR);
-		}
-	}
-
-	sde_rotator_req_finish(rot_dev->mgr, ctx->private, request->req);
-	sde_rotator_retire_request(request);
-	return 0;
-}
-
-/*
- * sde_rotator_inline_commit - commit given rotator command
- * @handle: Pointer to rotator context
- * @cmd: Pointer to rotator command
- * @cmd_type: command type - validate/prepare/commit/cleanup
- * return: 0 if success; error code otherwise
- */
-int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
-		enum sde_rotator_inline_cmd_type cmd_type)
-{
-	struct sde_rotator_ctx *ctx;
-	struct sde_rotator_device *rot_dev;
-	struct sde_rotator_request *request = NULL;
-	struct sde_rot_entry_container *req = NULL;
-	struct sde_rotation_config rotcfg;
-	struct sde_rot_trace_entry rot_trace;
-	ktime_t *ts;
-	u32 flags = 0;
-	int i, ret = 0;
-
-	if (!handle || !cmd) {
-		SDEROT_ERR("invalid rotator handle/cmd\n");
-		return -EINVAL;
-	}
-
-	ctx = handle;
-	rot_dev = ctx->rot_dev;
-
-	if (!rot_dev || !rot_dev->mgr) {
-		SDEROT_ERR("invalid rotator device\n");
-		return -EINVAL;
-	}
-
-	SDEROT_DBG(
-		"s:%d.%u src:(%u,%u,%u,%u)/%ux%u/%c%c%c%c dst:(%u,%u,%u,%u)/%c%c%c%c r:%d f:%d/%d s:%d fps:%u clk:%llu bw:%llu prefill:%llu wb:%d vid:%d cmd:%d\n",
-		ctx->session_id, cmd->sequence_id,
-		cmd->src_rect_x, cmd->src_rect_y,
-		cmd->src_rect_w, cmd->src_rect_h,
-		cmd->src_width, cmd->src_height,
-		cmd->src_pixfmt >> 0, cmd->src_pixfmt >> 8,
-		cmd->src_pixfmt >> 16, cmd->src_pixfmt >> 24,
-		cmd->dst_rect_x, cmd->dst_rect_y,
-		cmd->dst_rect_w, cmd->dst_rect_h,
-		cmd->dst_pixfmt >> 0, cmd->dst_pixfmt >> 8,
-		cmd->dst_pixfmt >> 16, cmd->dst_pixfmt >> 24,
-		cmd->rot90, cmd->hflip, cmd->vflip, cmd->secure, cmd->fps,
-		cmd->clkrate, cmd->data_bw, cmd->prefill_bw,
-		cmd->dst_writeback, cmd->video_mode, cmd_type);
-	SDEROT_EVTLOG(ctx->session_id, cmd->sequence_id,
-		cmd->src_rect_x, cmd->src_rect_y,
-		cmd->src_rect_w, cmd->src_rect_h,
-		cmd->src_pixfmt,
-		cmd->dst_rect_w, cmd->dst_rect_h,
-		cmd->dst_pixfmt,
-		cmd->fps, cmd->clkrate, cmd->data_bw, cmd->prefill_bw,
-		(cmd->rot90 << 0) | (cmd->hflip << 1) | (cmd->vflip << 2) |
-		(cmd->secure << 3) | (cmd->dst_writeback << 4) |
-		(cmd->video_mode << 5) |
-		(cmd_type << 24));
-
-	mutex_lock(&rot_dev->lock);
-	sde_rot_mgr_lock(rot_dev->mgr);
-
-	if (cmd_type == SDE_ROTATOR_INLINE_CMD_VALIDATE ||
-			cmd_type == SDE_ROTATOR_INLINE_CMD_COMMIT) {
-
-		struct sde_rotation_item item;
-		struct sde_rotator_statistics *stats = &rot_dev->stats;
-		int scid = llcc_get_slice_id(ctx->slice);
-
-		/* allocate slot for timestamp */
-		ts = stats->ts[stats->count % SDE_ROTATOR_NUM_EVENTS];
-		if (cmd_type == SDE_ROTATOR_INLINE_CMD_COMMIT)
-			stats->count++;
-
-		if (cmd->rot90)
-			flags |= SDE_ROTATION_90;
-		if (cmd->hflip)
-			flags |= SDE_ROTATION_FLIP_LR;
-		if (cmd->vflip)
-			flags |= SDE_ROTATION_FLIP_UD;
-		if (cmd->secure)
-			flags |= SDE_ROTATION_SECURE;
-
-		flags |= SDE_ROTATION_EXT_PERF;
-
-		/* fill in item work structure */
-		memset(&item, 0, sizeof(struct sde_rotation_item));
-		item.flags = flags | SDE_ROTATION_EXT_IOVA;
-		item.trigger = cmd->video_mode ? SDE_ROTATOR_TRIGGER_VIDEO :
-				SDE_ROTATOR_TRIGGER_COMMAND;
-		item.prefill_bw = cmd->prefill_bw;
-		item.session_id = ctx->session_id;
-		item.sequence_id = cmd->sequence_id;
-		item.src_rect.x = cmd->src_rect_x;
-		item.src_rect.y = cmd->src_rect_y;
-		item.src_rect.w = cmd->src_rect_w;
-		item.src_rect.h = cmd->src_rect_h;
-		item.input.width = cmd->src_width;
-		item.input.height = cmd->src_height;
-		item.input.format = cmd->src_pixfmt;
-
-		for (i = 0; i < SDE_ROTATOR_INLINE_PLANE_MAX; i++) {
-			item.input.planes[i].addr = cmd->src_addr[i];
-			item.input.planes[i].len = cmd->src_len[i];
-			item.input.planes[i].fd = -1;
-		}
-		item.input.plane_count = cmd->src_planes;
-		item.input.comp_ratio.numer = 1;
-		item.input.comp_ratio.denom = 1;
-
-		item.output.width = cmd->dst_rect_x + cmd->dst_rect_w;
-		item.output.height = cmd->dst_rect_y + cmd->dst_rect_h;
-		item.dst_rect.x = cmd->dst_rect_x;
-		item.dst_rect.y = cmd->dst_rect_y;
-		item.dst_rect.w = cmd->dst_rect_w;
-		item.dst_rect.h = cmd->dst_rect_h;
-		item.output.sbuf = true;
-		item.output.scid = scid;
-		item.output.writeback = cmd->dst_writeback;
-		item.output.format = cmd->dst_pixfmt;
-
-		for (i = 0; i < SDE_ROTATOR_INLINE_PLANE_MAX; i++) {
-			item.output.planes[i].addr = cmd->dst_addr[i];
-			item.output.planes[i].len = cmd->dst_len[i];
-			item.output.planes[i].fd = -1;
-		}
-		item.output.plane_count = cmd->dst_planes;
-		item.output.comp_ratio.numer = 1;
-		item.output.comp_ratio.denom = 1;
-		item.sequence_id = ++(ctx->commit_sequence_id);
-		item.ts = ts;
-
-		req = sde_rotator_req_init(rot_dev->mgr, ctx->private,
-				&item, 1, 0);
-		if (IS_ERR_OR_NULL(req)) {
-			SDEROT_ERR("fail allocate request s:%d\n",
-					ctx->session_id);
-			ret = -ENOMEM;
-			goto error_init_request;
-		}
-
-		/* initialize session configuration */
-		memset(&rotcfg, 0, sizeof(struct sde_rotation_config));
-		rotcfg.flags = flags;
-		rotcfg.frame_rate = cmd->fps;
-		rotcfg.clk_rate = cmd->clkrate;
-		rotcfg.data_bw = cmd->data_bw;
-		rotcfg.session_id = ctx->session_id;
-		rotcfg.input.width = cmd->src_rect_w;
-		rotcfg.input.height = cmd->src_rect_h;
-		rotcfg.input.format = cmd->src_pixfmt;
-		rotcfg.input.comp_ratio.numer = 1;
-		rotcfg.input.comp_ratio.denom = 1;
-		rotcfg.output.width = cmd->dst_rect_w;
-		rotcfg.output.height = cmd->dst_rect_h;
-		rotcfg.output.format = cmd->dst_pixfmt;
-		rotcfg.output.comp_ratio.numer = 1;
-		rotcfg.output.comp_ratio.denom = 1;
-		rotcfg.output.sbuf = true;
-	}
-
-	if (cmd_type == SDE_ROTATOR_INLINE_CMD_VALIDATE) {
-
-		ret = sde_rotator_session_validate(rot_dev->mgr,
-				ctx->private, &rotcfg);
-		if (ret) {
-			SDEROT_WARN("fail session validation s:%d\n",
-					ctx->session_id);
-			goto error_session_validate;
-		}
-
-		devm_kfree(rot_dev->dev, req);
-		req = NULL;
-
-	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_COMMIT) {
-
-		if (memcmp(&rotcfg, &ctx->rotcfg, sizeof(rotcfg))) {
-			ret = sde_rotator_session_config(rot_dev->mgr,
-					ctx->private, &rotcfg);
-			if (ret) {
-				SDEROT_ERR("fail session config s:%d\n",
-						ctx->session_id);
-				goto error_session_config;
-			}
-
-			ctx->rotcfg = rotcfg;
-		}
-
-		request = list_first_entry_or_null(&ctx->retired_list,
-				struct sde_rotator_request, list);
-		if (!request) {
-			/* should not happen */
-			ret = -ENOMEM;
-			SDEROT_ERR("no free request s:%d\n", ctx->session_id);
-			goto error_retired_list;
-		}
-
-		request->req = req;
-		request->sequence_id = req->entries[0].item.sequence_id;
-
-		spin_lock(&ctx->list_lock);
-		list_del_init(&request->list);
-		list_add_tail(&request->list, &ctx->pending_list);
-		spin_unlock(&ctx->list_lock);
-
-		ts = req->entries[0].item.ts;
-		if (ts) {
-			ts[SDE_ROTATOR_TS_SRCQB] = ktime_get();
-			ts[SDE_ROTATOR_TS_DSTQB] = ktime_get();
-			ts[SDE_ROTATOR_TS_FENCE] = ktime_get();
-		} else {
-			SDEROT_ERR("invalid stats timestamp\n");
-		}
-		req->retire_kw = ctx->work_queue.rot_kw;
-		req->retire_work = &request->retire_work;
-
-		/* Set values to pass to trace */
-		rot_trace.wb_idx = req->entries[0].item.wb_idx;
-		rot_trace.flags = req->entries[0].item.flags;
-		rot_trace.input_format = req->entries[0].item.input.format;
-		rot_trace.input_width = req->entries[0].item.input.width;
-		rot_trace.input_height = req->entries[0].item.input.height;
-		rot_trace.src_x = req->entries[0].item.src_rect.x;
-		rot_trace.src_y = req->entries[0].item.src_rect.y;
-		rot_trace.src_w = req->entries[0].item.src_rect.w;
-		rot_trace.src_h = req->entries[0].item.src_rect.h;
-		rot_trace.output_format = req->entries[0].item.output.format;
-		rot_trace.output_width = req->entries[0].item.output.width;
-		rot_trace.output_height = req->entries[0].item.output.height;
-		rot_trace.dst_x = req->entries[0].item.dst_rect.x;
-		rot_trace.dst_y = req->entries[0].item.dst_rect.y;
-		rot_trace.dst_w = req->entries[0].item.dst_rect.w;
-		rot_trace.dst_h = req->entries[0].item.dst_rect.h;
-
-
-		trace_rot_entry_fence(
-			ctx->session_id, cmd->sequence_id, &rot_trace);
-
-		ret = sde_rotator_handle_request_common(
-				rot_dev->mgr, ctx->private, req);
-		if (ret) {
-			SDEROT_ERR("fail handle request s:%d\n",
-					ctx->session_id);
-			goto error_handle_request;
-		}
-
-		sde_rotator_req_reset_start(rot_dev->mgr, req);
-
-		sde_rotator_queue_request(rot_dev->mgr, ctx->private, req);
-
-		request->committed = true;
-
-		/* save request in private handle */
-		cmd->priv_handle = request;
-
-	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_START) {
-		if (!cmd->priv_handle) {
-			ret = -EINVAL;
-			SDEROT_ERR("invalid private handle\n");
-			goto error_invalid_handle;
-		}
-
-		request = cmd->priv_handle;
-		sde_rotator_req_set_start(rot_dev->mgr, request->req);
-	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_CLEANUP) {
-		if (!cmd->priv_handle) {
-			ret = -EINVAL;
-			SDEROT_ERR("invalid private handle\n");
-			goto error_invalid_handle;
-		}
-
-		request = cmd->priv_handle;
-
-		/* attempt single retry if first cleanup attempt failed */
-		if (_sde_rotator_inline_cleanup(handle, request) == -EAGAIN)
-			_sde_rotator_inline_cleanup(handle, request);
-
-		cmd->priv_handle = NULL;
-	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_ABORT) {
-		if (!cmd->priv_handle) {
-			ret = -EINVAL;
-			SDEROT_ERR("invalid private handle\n");
-			goto error_invalid_handle;
-		}
-
-		request = cmd->priv_handle;
-		if (!sde_rotator_is_request_retired(request))
-			sde_rotator_abort_inline_request(rot_dev->mgr,
-					ctx->private, request->req);
-	}
-
-	sde_rot_mgr_unlock(rot_dev->mgr);
-	mutex_unlock(&rot_dev->lock);
-	return 0;
-
-error_handle_request:
-	sde_rotator_update_retire_sequence(request);
-	sde_rotator_retire_request(request);
-error_retired_list:
-error_session_validate:
-error_session_config:
-	devm_kfree(rot_dev->dev, req);
-error_invalid_handle:
-error_init_request:
-	sde_rot_mgr_unlock(rot_dev->mgr);
-	mutex_unlock(&rot_dev->lock);
-	return ret;
-}
-EXPORT_SYMBOL(sde_rotator_inline_commit);
-
-void sde_rotator_inline_reg_dump(struct platform_device *pdev)
-{
-	struct sde_rotator_device *rot_dev;
-
-	if (!pdev) {
-		SDEROT_ERR("invalid platform device\n");
-		return;
-	}
-
-	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
-	if (!rot_dev || !rot_dev->mgr) {
-		SDEROT_ERR("invalid rotator device\n");
-		return;
-	}
-
-	sde_rot_mgr_lock(rot_dev->mgr);
-	sde_rotator_core_dump(rot_dev->mgr);
-	sde_rot_mgr_unlock(rot_dev->mgr);
-}
-EXPORT_SYMBOL(sde_rotator_inline_reg_dump);
-
-/*
- * sde_rotator_open - Rotator device open method.
- * @file: Pointer to file struct.
- */
-static int sde_rotator_open(struct file *file)
-{
-	struct sde_rotator_device *rot_dev = video_drvdata(file);
-	struct sde_rotator_ctx *ctx;
-	int ret = 0;
-
-	ctx = sde_rotator_ctx_open(rot_dev, file);
-	if (IS_ERR_OR_NULL(ctx)) {
-		SDEDEV_DBG(rot_dev->dev, "failed to open %d\n", ret);
-		ret = PTR_ERR(ctx);
-	}
-
-	return ret;
-}
-
-/*
- * sde_rotator_release - Rotator device release method.
- * @file: Pointer to file struct.
- */
-static int sde_rotator_release(struct file *file)
-{
-	struct sde_rotator_ctx *ctx =
-			sde_rotator_ctx_from_fh(file->private_data);
-
-	return sde_rotator_ctx_release(ctx, file);
-}
-
-/*
- * sde_rotator_poll - rotator device pool method.
- * @file: Pointer to file struct.
- * @wait: Pointer to poll table struct.
- */
-static unsigned int sde_rotator_poll(struct file *file,
-	struct poll_table_struct *wait)
-{
-	struct sde_rotator_device *rot_dev = video_drvdata(file);
-	struct sde_rotator_ctx *ctx =
-			sde_rotator_ctx_from_fh(file->private_data);
-	int ret;
-
-	mutex_lock(&rot_dev->lock);
-	ret = v4l2_m2m_poll(file, ctx->fh.m2m_ctx, wait);
-	mutex_unlock(&rot_dev->lock);
-	return ret;
-}
-
-/* rotator device file operations callbacks */
-static const struct v4l2_file_operations sde_rotator_fops = {
-	.owner          = THIS_MODULE,
-	.open           = sde_rotator_open,
-	.release        = sde_rotator_release,
-	.poll           = sde_rotator_poll,
-	.unlocked_ioctl = video_ioctl2,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl32 = sde_rotator_compat_ioctl32,
-#endif
-};
-
-/*
- * sde_rotator_querycap - V4l2 ioctl query capability handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @cap: Pointer to v4l2_capability struct need to be filled.
- */
-static int sde_rotator_querycap(struct file *file,
-	void *fh, struct v4l2_capability *cap)
-{
-	cap->bus_info[0] = 0;
-	strlcpy(cap->driver, SDE_ROTATOR_DRV_NAME, sizeof(cap->driver));
-	strlcpy(cap->card, SDE_ROTATOR_DRV_NAME, sizeof(cap->card));
-	cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M |
-			V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_CAPTURE;
-	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
-	return 0;
-}
-
-/*
- * sde_rotator_enum_fmt_vid_cap - V4l2 ioctl enumerate output format handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @f: Pointer to v4l2_fmtdesc struct need to be filled.
- */
-static int sde_rotator_enum_fmt_vid_cap(struct file *file,
-	void *fh, struct v4l2_fmtdesc *f)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-	struct sde_mdp_format_params *fmt;
-	u32 i, index, pixfmt;
-	bool found = false;
-
-	for (i = 0, index = 0; index <= f->index; i++) {
-		pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, i, false,
-				SDE_ROTATOR_MODE_OFFLINE);
-		if (!pixfmt)
-			return -EINVAL;
-
-		fmt = sde_get_format_params(pixfmt);
-		if (!fmt)
-			return -EINVAL;
-
-		if (sde_mdp_is_private_format(fmt))
-			continue;
-
-		if (index == f->index) {
-			found = true;
-			break;
-		}
-
-		index++;
-	}
-
-	if (!found)
-		return -EINVAL;
-
-	f->pixelformat = pixfmt;
-	strlcpy(f->description, fmt->description, sizeof(f->description));
-
-	return 0;
-}
-
-/*
- * sde_rotator_enum_fmt_vid_out - V4l2 ioctl enumerate capture format handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @f: Pointer to v4l2_fmtdesc struct need to be filled.
- */
-static int sde_rotator_enum_fmt_vid_out(struct file *file,
-	void *fh, struct v4l2_fmtdesc *f)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-	struct sde_mdp_format_params *fmt;
-	u32 i, index, pixfmt;
-	bool found = false;
-
-	for (i = 0, index = 0; index <= f->index; i++) {
-		pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, i, true,
-				SDE_ROTATOR_MODE_OFFLINE);
-		if (!pixfmt)
-			return -EINVAL;
-
-		fmt = sde_get_format_params(pixfmt);
-		if (!fmt)
-			return -EINVAL;
-
-		if (sde_mdp_is_private_format(fmt))
-			continue;
-
-		if (index == f->index) {
-			found = true;
-			break;
-		}
-
-		index++;
-	}
-
-	if (!found)
-		return -EINVAL;
-
-	f->pixelformat = pixfmt;
-	strlcpy(f->description, fmt->description, sizeof(f->description));
-
-	return 0;
-}
-
-/*
- * sde_rotator_g_fmt_cap - V4l2 ioctl get capture format handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @f: Pointer to v4l2_format struct need to be filled.
- */
-static int sde_rotator_g_fmt_cap(struct file *file, void *fh,
-	struct v4l2_format *f)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-
-	*f = ctx->format_cap;
-
-	return 0;
-}
-
-/*
- * sde_rotator_g_fmt_out - V4l2 ioctl get output format handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @f: Pointer to v4l2_format struct need to be filled.
- */
-static int sde_rotator_g_fmt_out(struct file *file, void *fh,
-	struct v4l2_format *f)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-
-	*f = ctx->format_out;
-
-	return 0;
-}
-
-/*
- * sde_rotator_try_fmt_vid_cap - V4l2 ioctl try capture format handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @f: Pointer to v4l2_format struct.
- */
-static int sde_rotator_try_fmt_vid_cap(struct file *file,
-	void *fh, struct v4l2_format *f)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-	struct sde_rotation_config config;
-	int ret;
-
-	if ((f->fmt.pix.width == 0) || (f->fmt.pix.height == 0)) {
-		SDEDEV_WARN(ctx->rot_dev->dev,
-				"Not supporting 0 width/height: %dx%d\n",
-				f->fmt.pix.width, f->fmt.pix.height);
-		return -EINVAL;
-	}
-
-	sde_rot_mgr_lock(rot_dev->mgr);
-	sde_rotator_get_config_from_ctx(ctx, &config);
-	config.output.format = f->fmt.pix.pixelformat;
-	config.output.width = f->fmt.pix.width;
-	config.output.height = f->fmt.pix.height;
-	config.flags |= SDE_ROTATION_VERIFY_INPUT_ONLY;
-	ret = sde_rotator_verify_config_output(rot_dev->mgr, &config);
-	sde_rot_mgr_unlock(rot_dev->mgr);
-	if (ret) {
-		if ((config.output.width == f->fmt.pix.width) &&
-				(config.output.height == f->fmt.pix.height)) {
-			SDEDEV_WARN(ctx->rot_dev->dev,
-				"invalid capture format 0x%8.8x %dx%d\n",
-				f->fmt.pix.pixelformat,
-				f->fmt.pix.width,
-				f->fmt.pix.height);
-			return -EINVAL;
-		}
-		f->fmt.pix.width = config.output.width;
-		f->fmt.pix.height = config.output.height;
-	}
-
-	sde_rotator_format_recalc(f);
-	return ret;
-}
-
-/*
- * sde_rotator_try_fmt_vid_out - V4l2 ioctl try output format handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @f: Pointer to v4l2_format struct.
- */
-static int sde_rotator_try_fmt_vid_out(struct file *file,
-	void *fh, struct v4l2_format *f)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-	struct sde_rotation_config config;
-	int ret;
-
-	if ((f->fmt.pix.width == 0) || (f->fmt.pix.height == 0)) {
-		SDEDEV_WARN(ctx->rot_dev->dev,
-				"Not supporting 0 width/height: %dx%d\n",
-				f->fmt.pix.width, f->fmt.pix.height);
-		return -EINVAL;
-	}
-
-	sde_rot_mgr_lock(rot_dev->mgr);
-	sde_rotator_get_config_from_ctx(ctx, &config);
-	config.input.format = f->fmt.pix.pixelformat;
-	config.input.width = f->fmt.pix.width;
-	config.input.height = f->fmt.pix.height;
-	config.flags |= SDE_ROTATION_VERIFY_INPUT_ONLY;
-	ret = sde_rotator_verify_config_input(rot_dev->mgr, &config);
-	sde_rot_mgr_unlock(rot_dev->mgr);
-	if (ret) {
-		if ((config.input.width == f->fmt.pix.width) &&
-				(config.input.height == f->fmt.pix.height)) {
-			SDEDEV_WARN(ctx->rot_dev->dev,
-				"invalid output format 0x%8.8x %dx%d\n",
-				f->fmt.pix.pixelformat,
-				f->fmt.pix.width,
-				f->fmt.pix.height);
-			return -EINVAL;
-		}
-		f->fmt.pix.width = config.input.width;
-		f->fmt.pix.height = config.input.height;
-	}
-
-	sde_rotator_format_recalc(f);
-	return ret;
-}
-
-/*
- * sde_rotator_s_fmt_vid_cap - V4l2 ioctl set capture format handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @f: Pointer to v4l2_format struct.
- */
-static int sde_rotator_s_fmt_vid_cap(struct file *file,
-	void *fh, struct v4l2_format *f)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-	int ret;
-
-	ret = sde_rotator_try_fmt_vid_cap(file, fh, f);
-	if (ret)
-		return -EINVAL;
-
-	/* Initialize crop */
-	ctx->crop_cap.top = 0;
-	ctx->crop_cap.left = 0;
-	ctx->crop_cap.width = f->fmt.pix.width;
-	ctx->crop_cap.height = f->fmt.pix.height;
-
-	ctx->format_cap = *f;
-
-	SDEDEV_DBG(rot_dev->dev,
-		"s_fmt s:%d t:%d fmt:0x%8.8x field:%u (%u,%u)\n",
-		ctx->session_id, f->type,
-		f->fmt.pix.pixelformat,
-		f->fmt.pix.field,
-		f->fmt.pix.width, f->fmt.pix.height);
-
-	return 0;
-}
-
-/*
- * sde_rotator_s_fmt_vid_out - V4l2 ioctl set output format handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @f: Pointer to v4l2_format struct.
- */
-static int sde_rotator_s_fmt_vid_out(struct file *file,
-	void *fh, struct v4l2_format *f)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-	int ret;
-
-	ret = sde_rotator_try_fmt_vid_out(file, fh, f);
-	if (ret)
-		return -EINVAL;
-
-	/* Initialize crop */
-	ctx->crop_out.top = 0;
-	ctx->crop_out.left = 0;
-	ctx->crop_out.width = f->fmt.pix.width;
-	ctx->crop_out.height = f->fmt.pix.height;
-
-	ctx->format_out = *f;
-
-	SDEDEV_DBG(rot_dev->dev,
-		"s_fmt s:%d t:%d fmt:0x%8.8x field:%u (%u,%u)\n",
-		ctx->session_id, f->type,
-		f->fmt.pix.pixelformat,
-		f->fmt.pix.field,
-		f->fmt.pix.width, f->fmt.pix.height);
-
-	return 0;
-}
-
-/*
- * sde_rotator_reqbufs - V4l2 ioctl request buffers handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @req: Pointer to v4l2_requestbuffer struct.
- */
-static int sde_rotator_reqbufs(struct file *file,
-	void *fh, struct v4l2_requestbuffers *req)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-
-	return v4l2_m2m_reqbufs(file, ctx->fh.m2m_ctx, req);
-}
-
-/*
- * sde_rotator_qbuf - V4l2 ioctl queue buffer handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @buf: Pointer to v4l2_buffer struct.
- */
-static int sde_rotator_qbuf(struct file *file, void *fh,
-	struct v4l2_buffer *buf)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-	int ret;
-
-	/* create fence for capture buffer */
-	if ((buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
-			&& (buf->index < ctx->nbuf_cap)) {
-		int idx = buf->index;
-
-		ctx->vbinfo_cap[idx].fd = -1;
-		ctx->vbinfo_cap[idx].fence = sde_rotator_get_sync_fence(
-				ctx->work_queue.timeline, NULL,
-				&ctx->vbinfo_cap[idx].fence_ts);
-		ctx->vbinfo_cap[idx].qbuf_ts = ktime_get();
-		ctx->vbinfo_cap[idx].dqbuf_ts = NULL;
-		SDEDEV_DBG(ctx->rot_dev->dev,
-				"create buffer fence s:%d.%u i:%d f:%pK\n",
-				ctx->session_id,
-				ctx->vbinfo_cap[idx].fence_ts,
-				idx,
-				ctx->vbinfo_cap[idx].fence);
-	} else if ((buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
-			&& (buf->index < ctx->nbuf_out)) {
-		int idx = buf->index;
-
-		ctx->vbinfo_out[idx].qbuf_ts = ktime_get();
-		ctx->vbinfo_out[idx].dqbuf_ts = NULL;
-	}
-
-	ret = v4l2_m2m_qbuf(file, ctx->fh.m2m_ctx, buf);
-	if (ret < 0)
-		SDEDEV_ERR(ctx->rot_dev->dev, "fail qbuf s:%d t:%d r:%d\n",
-				ctx->session_id, buf->type, ret);
-	SDEROT_EVTLOG(buf->type, buf->bytesused, buf->length, buf->m.fd, ret);
-
-	return ret;
-}
-
-/*
- * sde_rotator_dqbuf - V4l2 ioctl dequeue buffer handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @buf: Pointer to v4l2_buffer struct.
- */
-static int sde_rotator_dqbuf(struct file *file,
-	void *fh, struct v4l2_buffer *buf)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-	int ret;
-
-	ret = v4l2_m2m_dqbuf(file, ctx->fh.m2m_ctx, buf);
-
-	if (ret) {
-		SDEDEV_ERR(ctx->rot_dev->dev,
-				"fail dqbuf s:%d t:%d i:%d r:%d\n",
-				ctx->session_id, buf->type, buf->index, ret);
-		return ret;
-	}
-
-	/* clear fence for buffer */
-	if ((buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
-			&& (buf->index < ctx->nbuf_cap)) {
-		int idx = buf->index;
-
-		if (ctx->vbinfo_cap[idx].fence) {
-			/* fence is not used */
-			SDEDEV_DBG(ctx->rot_dev->dev, "put fence s:%d i:%d\n",
-					ctx->session_id, idx);
-			sde_rotator_put_sync_fence(ctx->vbinfo_cap[idx].fence);
-		}
-		ctx->vbinfo_cap[idx].fence = NULL;
-		ctx->vbinfo_cap[idx].fd = -1;
-		if (ctx->vbinfo_cap[idx].dqbuf_ts)
-			*(ctx->vbinfo_cap[idx].dqbuf_ts) = ktime_get();
-	} else if ((buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
-			&& (buf->index < ctx->nbuf_out)) {
-		int idx = buf->index;
-
-		ctx->vbinfo_out[idx].fence = NULL;
-		ctx->vbinfo_out[idx].fd = -1;
-		if (ctx->vbinfo_out[idx].dqbuf_ts)
-			*(ctx->vbinfo_out[idx].dqbuf_ts) = ktime_get();
-	} else {
-		SDEDEV_WARN(ctx->rot_dev->dev, "invalid dq s:%d t:%d i:%d\n",
-				ctx->session_id, buf->type, buf->index);
-	}
-
-	return 0;
-}
-
-/*
- * sde_rotator_querybuf - V4l2 ioctl query buffer handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @buf: Pointer to v4l2_buffer struct.
- */
-static int sde_rotator_querybuf(struct file *file,
-	void *fh, struct v4l2_buffer *buf)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-
-	return v4l2_m2m_querybuf(file, ctx->fh.m2m_ctx, buf);
-}
-
-/*
- * sde_rotator_streamon - V4l2 ioctl stream on handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @buf_type: V4l2 buffer type.
- */
-static int sde_rotator_streamon(struct file *file,
-	void *fh, enum v4l2_buf_type buf_type)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-	struct sde_rotation_config config;
-	struct vb2_queue *vq;
-	int ret;
-
-	SDEDEV_DBG(ctx->rot_dev->dev, "stream on s:%d t:%d\n",
-			ctx->session_id, buf_type);
-
-	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
-			buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT ?
-			V4L2_BUF_TYPE_VIDEO_CAPTURE :
-			V4L2_BUF_TYPE_VIDEO_OUTPUT);
-
-	if (!vq) {
-		SDEDEV_ERR(ctx->rot_dev->dev, "fail to get vq on s:%d t:%d\n",
-				ctx->session_id, buf_type);
-		return -EINVAL;
-	}
-
-	if (vb2_is_streaming(vq)) {
-		sde_rot_mgr_lock(rot_dev->mgr);
-		sde_rotator_get_config_from_ctx(ctx, &config);
-		config.flags &= ~SDE_ROTATION_VERIFY_INPUT_ONLY;
-		ret = sde_rotator_session_config(rot_dev->mgr, ctx->private,
-				&config);
-		sde_rot_mgr_unlock(rot_dev->mgr);
-		if (ret < 0) {
-			SDEDEV_ERR(rot_dev->dev,
-				"fail config in stream on s:%d t:%d r:%d\n",
-				ctx->session_id, buf_type, ret);
-			return ret;
-		}
-		ctx->rotcfg = config;
-	}
-
-	ret = v4l2_m2m_streamon(file, ctx->fh.m2m_ctx, buf_type);
-	if (ret < 0)
-		SDEDEV_ERR(ctx->rot_dev->dev, "fail stream on s:%d t:%d\n",
-				ctx->session_id, buf_type);
-
-	return ret;
-}
-
-/*
- * sde_rotator_streamoff - V4l2 ioctl stream off handler.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @buf_type: V4l2 buffer type.
- */
-static int sde_rotator_streamoff(struct file *file,
-	void *fh, enum v4l2_buf_type buf_type)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-	int ret;
-
-	SDEDEV_DBG(ctx->rot_dev->dev, "stream off s:%d t:%d\n",
-			ctx->session_id, buf_type);
-
-	ret = v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx, buf_type);
-	if (ret < 0)
-		SDEDEV_ERR(ctx->rot_dev->dev, "fail stream off s:%d t:%d\n",
-				ctx->session_id, buf_type);
-
-	return ret;
-}
-
-/*
- * sde_rotator_cropcap - V4l2 ioctl crop capabilities.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @a: Pointer to v4l2_cropcap struct need to be set.
- */
-static int sde_rotator_cropcap(struct file *file, void *fh,
-	struct v4l2_cropcap *a)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-	struct v4l2_format *format;
-	struct v4l2_rect *crop;
-
-	switch (a->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
-		format = &ctx->format_out;
-		crop = &ctx->crop_out;
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-		format = &ctx->format_cap;
-		crop = &ctx->crop_cap;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	a->bounds.top = 0;
-	a->bounds.left = 0;
-	a->bounds.width = format->fmt.pix.width;
-	a->bounds.height = format->fmt.pix.height;
-
-	a->defrect = *crop;
-
-	a->pixelaspect.numerator = 1;
-	a->pixelaspect.denominator = 1;
-
-	SDEROT_EVTLOG(format->fmt.pix.width, format->fmt.pix.height, a->type);
-	return 0;
-}
-
-/*
- * sde_rotator_g_crop - V4l2 ioctl get crop.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @crop: Pointer to v4l2_crop struct need to be set.
- */
-static int sde_rotator_g_crop(struct file *file, void *fh,
-	struct v4l2_crop *crop)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-
-	switch (crop->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
-		crop->c = ctx->crop_out;
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-		crop->c = ctx->crop_cap;
-		break;
-	default:
-		return -EINVAL;
-	}
-	return 0;
-}
-
-/*
- * sde_rotator_s_crop - V4l2 ioctl set crop.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @crop: Pointer to v4l2_crop struct need to be set.
- */
-static int sde_rotator_s_crop(struct file *file, void *fh,
-	const struct v4l2_crop *crop)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-	struct sde_rotation_item item;
-	struct v4l2_rect rect;
-
-	sde_rotator_get_item_from_ctx(ctx, &item);
-
-	rect.left = max_t(__u32, crop->c.left, 0);
-	rect.top = max_t(__u32, crop->c.top, 0);
-	rect.height = max_t(__u32, crop->c.height, 0);
-	rect.width = max_t(__u32, crop->c.width, 0);
-
-	if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
-		rect.left = min_t(__u32, rect.left,
-				ctx->format_out.fmt.pix.width - 1);
-		rect.top = min_t(__u32, rect.top,
-				ctx->format_out.fmt.pix.height - 1);
-		rect.width = min_t(__u32, rect.width,
-				(ctx->format_out.fmt.pix.width - rect.left));
-		rect.height = min_t(__u32, rect.height,
-				(ctx->format_out.fmt.pix.height - rect.top));
-
-		item.src_rect.x = rect.left;
-		item.src_rect.y = rect.top;
-		item.src_rect.w = rect.width;
-		item.src_rect.h = rect.height;
-
-		sde_rotator_validate_item(ctx, &item);
-
-		SDEDEV_DBG(rot_dev->dev,
-			"s_crop s:%d t:%d (%u,%u,%u,%u)->(%u,%u,%u,%u)\n",
-			ctx->session_id, crop->type,
-			crop->c.left, crop->c.top,
-			crop->c.width, crop->c.height,
-			item.src_rect.x, item.src_rect.y,
-			item.src_rect.w, item.src_rect.h);
-
-		ctx->crop_out.left = item.src_rect.x;
-		ctx->crop_out.top = item.src_rect.y;
-		ctx->crop_out.width = item.src_rect.w;
-		ctx->crop_out.height = item.src_rect.h;
-	} else if (crop->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
-		rect.left = min_t(__u32, rect.left,
-				ctx->format_cap.fmt.pix.width - 1);
-		rect.top = min_t(__u32, rect.top,
-				ctx->format_cap.fmt.pix.height - 1);
-		rect.width = min_t(__u32, rect.width,
-				(ctx->format_cap.fmt.pix.width - rect.left));
-		rect.height = min_t(__u32, rect.height,
-				(ctx->format_cap.fmt.pix.height - rect.top));
-
-		item.dst_rect.x = rect.left;
-		item.dst_rect.y = rect.top;
-		item.dst_rect.w = rect.width;
-		item.dst_rect.h = rect.height;
-
-		sde_rotator_validate_item(ctx, &item);
-
-		SDEDEV_DBG(rot_dev->dev,
-			"s_crop s:%d t:%d (%u,%u,%u,%u)->(%u,%u,%u,%u)\n",
-			ctx->session_id, crop->type,
-			crop->c.left, crop->c.top,
-			crop->c.width, crop->c.height,
-			item.dst_rect.x, item.dst_rect.y,
-			item.dst_rect.w, item.dst_rect.h);
-
-		ctx->crop_cap.left = item.dst_rect.x;
-		ctx->crop_cap.top = item.dst_rect.y;
-		ctx->crop_cap.width = item.dst_rect.w;
-		ctx->crop_cap.height = item.dst_rect.h;
-	} else {
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/*
- * sde_rotator_g_parm - V4l2 ioctl get parm.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @a: Pointer to v4l2_streamparm struct need to be filled.
- */
-static int sde_rotator_g_parm(struct file *file, void *fh,
-	struct v4l2_streamparm *a)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-
-	/* Get param is supported only for input buffers */
-	if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
-		return -EINVAL;
-
-	a->parm.output.capability = 0;
-	a->parm.output.extendedmode = 0;
-	a->parm.output.outputmode = 0;
-	a->parm.output.writebuffers = 0;
-	a->parm.output.timeperframe = ctx->timeperframe;
-
-	return 0;
-}
-
-/*
- * sde_rotator_s_parm - V4l2 ioctl set parm.
- * @file: Pointer to file struct.
- * @fh: V4l2 File handle.
- * @a: Pointer to v4l2_streamparm struct need to be set.
- */
-static int sde_rotator_s_parm(struct file *file, void *fh,
-	struct v4l2_streamparm *a)
-{
-	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
-
-	/* Set param is supported only for input buffers */
-	if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
-		return -EINVAL;
-
-	if (!a->parm.output.timeperframe.numerator ||
-	    !a->parm.output.timeperframe.denominator)
-		return -EINVAL;
-
-	ctx->timeperframe = a->parm.output.timeperframe;
-	return 0;
-}
-
-/*
- * sde_rotator_private_ioctl - V4l2 private ioctl handler.
- * @file: Pointer to file struct.
- * @fd: V4l2 device file handle.
- * @valid_prio: Priority ioctl valid flag.
- * @cmd: Ioctl command.
- * @arg: Ioctl argument.
- */
-static long sde_rotator_private_ioctl(struct file *file, void *fh,
-	bool valid_prio, unsigned int cmd, void *arg)
-{
-	struct sde_rotator_ctx *ctx =
-			sde_rotator_ctx_from_fh(file->private_data);
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-	struct msm_sde_rotator_fence *fence = arg;
-	struct msm_sde_rotator_comp_ratio *comp_ratio = arg;
-	struct sde_rotator_vbinfo *vbinfo;
-	int ret;
-
-	switch (cmd) {
-	case VIDIOC_S_SDE_ROTATOR_FENCE:
-		if (!fence)
-			return -EINVAL;
-
-		if (fence->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
-			return -EINVAL;
-
-		if (fence->index >= ctx->nbuf_out)
-			return -EINVAL;
-
-		SDEDEV_DBG(rot_dev->dev,
-				"VIDIOC_S_SDE_ROTATOR_FENCE s:%d i:%d fd:%d\n",
-				ctx->session_id, fence->index,
-				fence->fd);
-
-		vbinfo = &ctx->vbinfo_out[fence->index];
-
-		if (vbinfo->fd >= 0) {
-			if (vbinfo->fence) {
-				SDEDEV_DBG(rot_dev->dev,
-						"put fence s:%d t:%d i:%d\n",
-						ctx->session_id,
-						fence->type, fence->index);
-				sde_rotator_put_sync_fence(vbinfo->fence);
-			}
-			vbinfo->fence = NULL;
-			vbinfo->fd = -1;
-		}
-
-		vbinfo->fd = fence->fd;
-		if (vbinfo->fd >= 0) {
-			vbinfo->fence =
-				sde_rotator_get_fd_sync_fence(vbinfo->fd);
-			if (!vbinfo->fence) {
-				SDEDEV_WARN(rot_dev->dev,
-					"invalid input fence fd s:%d fd:%d\n",
-					ctx->session_id, vbinfo->fd);
-				vbinfo->fd = -1;
-				return -EINVAL;
-			}
-		} else {
-			vbinfo->fence = NULL;
-		}
-		break;
-	case VIDIOC_G_SDE_ROTATOR_FENCE:
-		if (!fence)
-			return -EINVAL;
-
-		if (fence->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
-			return -EINVAL;
-
-		if (fence->index >= ctx->nbuf_cap)
-			return -EINVAL;
-
-		vbinfo = &ctx->vbinfo_cap[fence->index];
-
-		if (!vbinfo)
-			return -EINVAL;
-
-		if (vbinfo->fence) {
-			ret = sde_rotator_get_sync_fence_fd(vbinfo->fence);
-			if (ret < 0) {
-				SDEDEV_ERR(rot_dev->dev,
-						"fail get fence fd s:%d\n",
-						ctx->session_id);
-				return ret;
-			}
-
-			/**
-			 * Cache fence descriptor in case user calls this
-			 * ioctl multiple times. Cached value would be stale
-			 * if user duplicated and closed old descriptor.
-			 */
-			vbinfo->fd = ret;
-		} else if (!sde_rotator_get_fd_sync_fence(vbinfo->fd)) {
-			/**
-			 * User has closed cached fence descriptor.
-			 * Invalidate descriptor cache.
-			 */
-			vbinfo->fd = -1;
-		}
-		fence->fd = vbinfo->fd;
-
-		SDEDEV_DBG(rot_dev->dev,
-				"VIDIOC_G_SDE_ROTATOR_FENCE s:%d i:%d fd:%d\n",
-				ctx->session_id, fence->index,
-				fence->fd);
-		break;
-	case VIDIOC_S_SDE_ROTATOR_COMP_RATIO:
-		if (!comp_ratio)
-			return -EINVAL;
-		else if (!comp_ratio->numer || !comp_ratio->denom)
-			return -EINVAL;
-		else if (comp_ratio->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
-				comp_ratio->index < ctx->nbuf_out)
-			vbinfo = &ctx->vbinfo_out[comp_ratio->index];
-		else if (comp_ratio->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
-				comp_ratio->index < ctx->nbuf_cap)
-			vbinfo = &ctx->vbinfo_cap[comp_ratio->index];
-		else
-			return -EINVAL;
-
-		vbinfo->comp_ratio.numer = comp_ratio->numer;
-		vbinfo->comp_ratio.denom = comp_ratio->denom;
-
-		SDEDEV_DBG(rot_dev->dev,
-				"VIDIOC_S_SDE_ROTATOR_COMP_RATIO s:%d i:%d t:%d cr:%u/%u\n",
-				ctx->session_id, comp_ratio->index,
-				comp_ratio->type,
-				vbinfo->comp_ratio.numer,
-				vbinfo->comp_ratio.denom);
-		break;
-	case VIDIOC_G_SDE_ROTATOR_COMP_RATIO:
-		if (!comp_ratio)
-			return -EINVAL;
-		else if (comp_ratio->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
-				comp_ratio->index < ctx->nbuf_out)
-			vbinfo = &ctx->vbinfo_out[comp_ratio->index];
-		else if (comp_ratio->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
-				comp_ratio->index < ctx->nbuf_cap)
-			vbinfo = &ctx->vbinfo_cap[comp_ratio->index];
-		else
-			return -EINVAL;
-
-		comp_ratio->numer = vbinfo->comp_ratio.numer;
-		comp_ratio->denom = vbinfo->comp_ratio.denom;
-
-		SDEDEV_DBG(rot_dev->dev,
-				"VIDIOC_G_SDE_ROTATOR_COMP_RATIO s:%d i:%d t:%d cr:%u/%u\n",
-				ctx->session_id, comp_ratio->index,
-				comp_ratio->type,
-				comp_ratio->numer,
-				comp_ratio->denom);
-		break;
-	default:
-		SDEDEV_WARN(rot_dev->dev, "invalid ioctl type %x\n", cmd);
-		return -ENOTTY;
-	}
-
-	return 0;
-}
-
-#ifdef CONFIG_COMPAT
-/*
- * sde_rotator_compat_ioctl32 - Compat ioctl handler function.
- * @file: Pointer to file struct.
- * @cmd: Ioctl command.
- * @arg: Ioctl argument.
- */
-static long sde_rotator_compat_ioctl32(struct file *file,
-	unsigned int cmd, unsigned long arg)
-{
-	struct video_device *vdev = video_devdata(file);
-	struct sde_rotator_ctx *ctx =
-			sde_rotator_ctx_from_fh(file->private_data);
-	long ret;
-
-	mutex_lock(vdev->lock);
-
-	switch (cmd) {
-	case VIDIOC_S_SDE_ROTATOR_FENCE:
-	case VIDIOC_G_SDE_ROTATOR_FENCE:
-	{
-		struct msm_sde_rotator_fence fence;
-
-		if (copy_from_user(&fence, (void __user *)arg,
-				sizeof(struct msm_sde_rotator_fence)))
-			goto ioctl32_error;
-
-		ret = sde_rotator_private_ioctl(file, file->private_data,
-			0, cmd, (void *)&fence);
-
-		if (copy_to_user((void __user *)arg, &fence,
-				sizeof(struct msm_sde_rotator_fence)))
-			goto ioctl32_error;
-
-		break;
-	}
-	case VIDIOC_S_SDE_ROTATOR_COMP_RATIO:
-	case VIDIOC_G_SDE_ROTATOR_COMP_RATIO:
-	{
-		struct msm_sde_rotator_comp_ratio comp_ratio;
-
-		if (copy_from_user(&comp_ratio, (void __user *)arg,
-				sizeof(struct msm_sde_rotator_comp_ratio)))
-			goto ioctl32_error;
-
-		ret = sde_rotator_private_ioctl(file, file->private_data,
-			0, cmd, (void *)&comp_ratio);
-
-		if (copy_to_user((void __user *)arg, &comp_ratio,
-				sizeof(struct msm_sde_rotator_comp_ratio)))
-			goto ioctl32_error;
-
-		break;
-	}
-	default:
-		SDEDEV_ERR(ctx->rot_dev->dev, "invalid ioctl32 type:%x\n", cmd);
-		ret = -ENOIOCTLCMD;
-		break;
-
-	}
-
-	mutex_unlock(vdev->lock);
-	return ret;
-
-ioctl32_error:
-	mutex_unlock(vdev->lock);
-	SDEDEV_ERR(ctx->rot_dev->dev, "error handling ioctl32 cmd:%x\n", cmd);
-	return -EFAULT;
-}
-#endif
-
-static int sde_rotator_ctrl_subscribe_event(struct v4l2_fh *fh,
-				const struct v4l2_event_subscription *sub)
-{
-	return -EINVAL;
-}
-
-static int sde_rotator_event_unsubscribe(struct v4l2_fh *fh,
-			   const struct v4l2_event_subscription *sub)
-{
-	return -EINVAL;
-}
-
-/* V4l2 ioctl handlers */
-static const struct v4l2_ioctl_ops sde_rotator_ioctl_ops = {
-	.vidioc_querycap          = sde_rotator_querycap,
-	.vidioc_enum_fmt_vid_out  = sde_rotator_enum_fmt_vid_out,
-	.vidioc_enum_fmt_vid_cap  = sde_rotator_enum_fmt_vid_cap,
-	.vidioc_g_fmt_vid_out     = sde_rotator_g_fmt_out,
-	.vidioc_g_fmt_vid_cap     = sde_rotator_g_fmt_cap,
-	.vidioc_try_fmt_vid_out   = sde_rotator_try_fmt_vid_out,
-	.vidioc_try_fmt_vid_cap   = sde_rotator_try_fmt_vid_cap,
-	.vidioc_s_fmt_vid_out     = sde_rotator_s_fmt_vid_out,
-	.vidioc_s_fmt_vid_cap     = sde_rotator_s_fmt_vid_cap,
-	.vidioc_reqbufs           = sde_rotator_reqbufs,
-	.vidioc_qbuf              = sde_rotator_qbuf,
-	.vidioc_dqbuf             = sde_rotator_dqbuf,
-	.vidioc_querybuf          = sde_rotator_querybuf,
-	.vidioc_streamon          = sde_rotator_streamon,
-	.vidioc_streamoff         = sde_rotator_streamoff,
-	.vidioc_cropcap           = sde_rotator_cropcap,
-	.vidioc_g_crop            = sde_rotator_g_crop,
-	.vidioc_s_crop            = sde_rotator_s_crop,
-	.vidioc_g_parm            = sde_rotator_g_parm,
-	.vidioc_s_parm            = sde_rotator_s_parm,
-	.vidioc_default           = sde_rotator_private_ioctl,
-	.vidioc_log_status        = v4l2_ctrl_log_status,
-	.vidioc_subscribe_event   = sde_rotator_ctrl_subscribe_event,
-	.vidioc_unsubscribe_event = sde_rotator_event_unsubscribe,
-};
-
-/*
- * sde_rotator_retire_handler - Invoked by hal when processing is done.
- * @work: Pointer to work structure.
- *
- * This function is scheduled in work queue context.
- */
-static void sde_rotator_retire_handler(struct kthread_work *work)
-{
-	struct vb2_v4l2_buffer *src_buf;
-	struct vb2_v4l2_buffer *dst_buf;
-	struct sde_rotator_ctx *ctx;
-	struct sde_rotator_device *rot_dev;
-	struct sde_rotator_request *request;
-
-	request = container_of(work, struct sde_rotator_request, retire_work);
-	ctx = request->ctx;
-
-	if (!ctx || !ctx->rot_dev) {
-		SDEROT_ERR("null context/device\n");
-		return;
-	}
-
-	rot_dev = ctx->rot_dev;
-
-	SDEDEV_DBG(rot_dev->dev, "retire handler s:%d\n", ctx->session_id);
-
-	mutex_lock(&rot_dev->lock);
-	if (ctx->abort_pending) {
-		SDEDEV_DBG(rot_dev->dev, "abort command in retire s:%d\n",
-				ctx->session_id);
-		sde_rotator_update_retire_sequence(request);
-		sde_rotator_retire_request(request);
-		mutex_unlock(&rot_dev->lock);
-		return;
-	}
-
-	if (!ctx->file) {
-		sde_rotator_update_retire_sequence(request);
-	} else if (rot_dev->early_submit) {
-		if (IS_ERR_OR_NULL(request->req)) {
-			/* fail pending request or something wrong */
-			SDEDEV_ERR(rot_dev->dev,
-					"pending request fail in retire s:%d\n",
-					ctx->session_id);
-		}
-
-		/* pending request. reschedule this context. */
-		v4l2_m2m_try_schedule(ctx->fh.m2m_ctx);
-	} else {
-		/* no pending request. acknowledge the usual way. */
-		src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
-		dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-
-		if (!src_buf || !dst_buf) {
-			SDEDEV_ERR(rot_dev->dev,
-				"null buffer in retire s:%d sb:%pK db:%pK\n",
-				ctx->session_id,
-				src_buf, dst_buf);
-		}
-
-		sde_rotator_update_retire_sequence(request);
-		sde_rotator_retire_request(request);
-		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
-		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
-		v4l2_m2m_job_finish(rot_dev->m2m_dev, ctx->fh.m2m_ctx);
-	}
-	mutex_unlock(&rot_dev->lock);
-}
-
-/*
- * sde_rotator_process_buffers - Start rotator processing.
- * @ctx: Pointer rotator context.
- * @src_buf: Pointer to Vb2 source buffer.
- * @dst_buf: Pointer to Vb2 destination buffer.
- * @request: Pointer to rotator request
- */
-static int sde_rotator_process_buffers(struct sde_rotator_ctx *ctx,
-	struct vb2_buffer *src_buf, struct vb2_buffer *dst_buf,
-	struct sde_rotator_request *request)
-{
-	struct sde_rotator_device *rot_dev = ctx->rot_dev;
-	struct sde_rotation_item item;
-	struct sde_rot_entry_container *req = NULL;
-	struct sde_rotator_buf_handle *src_handle;
-	struct sde_rotator_buf_handle *dst_handle;
-	struct sde_rotator_statistics *stats = &rot_dev->stats;
-	struct sde_rotator_vbinfo *vbinfo_out;
-	struct sde_rotator_vbinfo *vbinfo_cap;
-	struct sde_rot_trace_entry rot_trace;
-	ktime_t *ts;
-	int ret;
-
-	if (!src_buf || !dst_buf) {
-		SDEDEV_ERR(rot_dev->dev, "null vb2 buffers\n");
-		ret = -EINVAL;
-		goto error_null_buffer;
-	}
-
-	src_handle = src_buf->planes[0].mem_priv;
-	dst_handle = dst_buf->planes[0].mem_priv;
-
-	if (!src_handle || !dst_handle) {
-		SDEDEV_ERR(rot_dev->dev, "null buffer handle\n");
-		ret = -EINVAL;
-		goto error_null_buffer;
-	}
-
-	vbinfo_out = &ctx->vbinfo_out[src_buf->index];
-	vbinfo_cap = &ctx->vbinfo_cap[dst_buf->index];
-
-	SDEDEV_DBG(rot_dev->dev,
-		"process buffer s:%d.%u src:(%u,%u,%u,%u) dst:(%u,%u,%u,%u) rot:%d flip:%d/%d sec:%d src_cr:%u/%u dst_cr:%u/%u\n",
-		ctx->session_id, vbinfo_cap->fence_ts,
-		ctx->crop_out.left, ctx->crop_out.top,
-		ctx->crop_out.width, ctx->crop_out.height,
-		ctx->crop_cap.left, ctx->crop_cap.top,
-		ctx->crop_cap.width, ctx->crop_cap.height,
-		ctx->rotate, ctx->hflip, ctx->vflip, ctx->secure,
-		vbinfo_out->comp_ratio.numer, vbinfo_out->comp_ratio.denom,
-		vbinfo_cap->comp_ratio.numer, vbinfo_cap->comp_ratio.denom);
-
-	/* allocate slot for timestamp */
-	ts = stats->ts[stats->count++ % SDE_ROTATOR_NUM_EVENTS];
-	ts[SDE_ROTATOR_TS_SRCQB] = vbinfo_out->qbuf_ts;
-	ts[SDE_ROTATOR_TS_DSTQB] = vbinfo_cap->qbuf_ts;
-	vbinfo_out->dqbuf_ts = &ts[SDE_ROTATOR_TS_SRCDQB];
-	vbinfo_cap->dqbuf_ts = &ts[SDE_ROTATOR_TS_DSTDQB];
-
-	ts[SDE_ROTATOR_TS_FENCE] = ktime_get();
-
-	/* Set values to pass to trace */
-	rot_trace.wb_idx = ctx->fh.prio;
-	rot_trace.flags = (ctx->rotate << 0) | (ctx->hflip << 8) |
-			(ctx->hflip << 9) | (ctx->secure << 10);
-	rot_trace.input_format = ctx->format_out.fmt.pix.pixelformat;
-	rot_trace.input_width = ctx->format_out.fmt.pix.width;
-	rot_trace.input_height = ctx->format_out.fmt.pix.height;
-	rot_trace.src_x = ctx->crop_out.left;
-	rot_trace.src_y = ctx->crop_out.top;
-	rot_trace.src_w = ctx->crop_out.width;
-	rot_trace.src_h = ctx->crop_out.height;
-	rot_trace.output_format = ctx->format_cap.fmt.pix.pixelformat;
-	rot_trace.output_width = ctx->format_cap.fmt.pix.width;
-	rot_trace.output_height = ctx->format_cap.fmt.pix.height;
-	rot_trace.dst_x = ctx->crop_cap.left;
-	rot_trace.dst_y = ctx->crop_cap.top;
-	rot_trace.dst_w = ctx->crop_cap.width;
-	rot_trace.dst_h = ctx->crop_cap.height;
-
-	trace_rot_entry_fence(
-		ctx->session_id, vbinfo_cap->fence_ts, &rot_trace);
-
-	if (vbinfo_out->fence) {
-		sde_rot_mgr_unlock(rot_dev->mgr);
-		mutex_unlock(&rot_dev->lock);
-		SDEDEV_DBG(rot_dev->dev, "fence enter s:%d.%d fd:%d\n",
-			ctx->session_id, vbinfo_cap->fence_ts, vbinfo_out->fd);
-		ret = sde_rotator_wait_sync_fence(vbinfo_out->fence,
-				rot_dev->fence_timeout);
-		mutex_lock(&rot_dev->lock);
-		sde_rot_mgr_lock(rot_dev->mgr);
-		sde_rotator_put_sync_fence(vbinfo_out->fence);
-		vbinfo_out->fence = NULL;
-		if (ret) {
-			SDEDEV_ERR(rot_dev->dev,
-				"error waiting for fence s:%d.%d fd:%d r:%d\n",
-				ctx->session_id,
-				vbinfo_cap->fence_ts, vbinfo_out->fd, ret);
-			SDEROT_EVTLOG(ctx->session_id, vbinfo_cap->fence_ts,
-					vbinfo_out->fd, ret,
-					SDE_ROT_EVTLOG_ERROR);
-			goto error_fence_wait;
-		} else {
-			SDEDEV_DBG(rot_dev->dev, "fence exit s:%d.%d fd:%d\n",
-				ctx->session_id,
-				vbinfo_cap->fence_ts, vbinfo_out->fd);
-		}
-	}
-
-	/* fill in item work structure */
-	sde_rotator_get_item_from_ctx(ctx, &item);
-	item.flags |= SDE_ROTATION_EXT_DMA_BUF;
-	item.input.planes[0].fd = src_handle->fd;
-	item.input.planes[0].buffer = src_handle->buffer;
-	item.input.planes[0].offset = src_handle->addr;
-	item.input.planes[0].stride = ctx->format_out.fmt.pix.bytesperline;
-	item.input.plane_count = 1;
-	item.input.fence = NULL;
-	item.input.comp_ratio = vbinfo_out->comp_ratio;
-	item.output.planes[0].fd = dst_handle->fd;
-	item.output.planes[0].buffer = dst_handle->buffer;
-	item.output.planes[0].offset = dst_handle->addr;
-	item.output.planes[0].stride = ctx->format_cap.fmt.pix.bytesperline;
-	item.output.plane_count = 1;
-	item.output.fence = NULL;
-	item.output.comp_ratio = vbinfo_cap->comp_ratio;
-	item.sequence_id = vbinfo_cap->fence_ts;
-	item.ts = ts;
-
-	req = sde_rotator_req_init(rot_dev->mgr, ctx->private, &item, 1, 0);
-	if (IS_ERR_OR_NULL(req)) {
-		SDEDEV_ERR(rot_dev->dev, "fail allocate rotation request\n");
-		ret = -ENOMEM;
-		goto error_init_request;
-	}
-
-	req->retire_kw = ctx->work_queue.rot_kw;
-	req->retire_work = &request->retire_work;
-
-	ret = sde_rotator_handle_request_common(
-			rot_dev->mgr, ctx->private, req);
-	if (ret) {
-		SDEDEV_ERR(rot_dev->dev, "fail handle request\n");
-		goto error_handle_request;
-	}
-
-	sde_rotator_queue_request(rot_dev->mgr, ctx->private, req);
-	request->req = req;
-	request->sequence_id = item.sequence_id;
-	request->committed = true;
-
-	return 0;
-error_handle_request:
-	devm_kfree(rot_dev->dev, req);
-error_init_request:
-error_fence_wait:
-error_null_buffer:
-	request->req = NULL;
-	request->sequence_id = 0;
-	request->committed = false;
-	return ret;
-}
-
-/*
- * sde_rotator_submit_handler - Invoked by m2m to submit job.
- * @work: Pointer to work structure.
- *
- * This function is scheduled in work queue context.
- */
-static void sde_rotator_submit_handler(struct kthread_work *work)
-{
-	struct sde_rotator_ctx *ctx;
-	struct sde_rotator_device *rot_dev;
-	struct vb2_v4l2_buffer *src_buf;
-	struct vb2_v4l2_buffer *dst_buf;
-	struct sde_rotator_request *request;
-	int ret;
-
-	request = container_of(work, struct sde_rotator_request, submit_work);
-	ctx = request->ctx;
-
-	if (!ctx || !ctx->rot_dev) {
-		SDEROT_ERR("null device\n");
-		return;
-	}
-
-	rot_dev = ctx->rot_dev;
-	SDEDEV_DBG(rot_dev->dev, "submit handler s:%d\n", ctx->session_id);
-
-	mutex_lock(&rot_dev->lock);
-	if (ctx->abort_pending) {
-		SDEDEV_DBG(rot_dev->dev, "abort command in submit s:%d\n",
-				ctx->session_id);
-		sde_rotator_update_retire_sequence(request);
-		sde_rotator_retire_request(request);
-		mutex_unlock(&rot_dev->lock);
-		return;
-	}
-
-	/* submit new request */
-	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
-	src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
-	sde_rot_mgr_lock(rot_dev->mgr);
-	ret = sde_rotator_process_buffers(ctx, &src_buf->vb2_buf,
-			&dst_buf->vb2_buf, request);
-	sde_rot_mgr_unlock(rot_dev->mgr);
-	if (ret) {
-		SDEDEV_ERR(rot_dev->dev,
-			"fail process buffer in submit s:%d\n",
-			ctx->session_id);
-		/* advance to device run to clean up buffers */
-		v4l2_m2m_try_schedule(ctx->fh.m2m_ctx);
-	}
-
-	mutex_unlock(&rot_dev->lock);
-}
-
-/*
- * sde_rotator_device_run - rotator m2m device run callback
- * @priv: Pointer rotator context.
- */
-static void sde_rotator_device_run(void *priv)
-{
-	struct sde_rotator_ctx *ctx = priv;
-	struct sde_rotator_device *rot_dev;
-	struct vb2_v4l2_buffer *src_buf;
-	struct vb2_v4l2_buffer *dst_buf;
-	struct sde_rotator_request *request;
-	int ret;
-
-	if (!ctx || !ctx->rot_dev) {
-		SDEROT_ERR("null context/device\n");
-		return;
-	}
-
-	rot_dev = ctx->rot_dev;
-	SDEDEV_DBG(rot_dev->dev, "device run s:%d\n", ctx->session_id);
-
-	if (rot_dev->early_submit) {
-		request = list_first_entry_or_null(&ctx->pending_list,
-				struct sde_rotator_request, list);
-
-		/* pending request mode, check for completion */
-		if (!request || IS_ERR_OR_NULL(request->req)) {
-			/* pending request fails or something wrong. */
-			SDEDEV_ERR(rot_dev->dev,
-				"pending request fail in device run s:%d\n",
-				ctx->session_id);
-			rot_dev->stats.fail_count++;
-			ATRACE_INT("fail_count", rot_dev->stats.fail_count);
-			goto error_process_buffers;
-
-		} else if (!atomic_read(&request->req->pending_count)) {
-			/* pending request completed. signal done. */
-			int failed_count =
-				atomic_read(&request->req->failed_count);
-			SDEDEV_DBG(rot_dev->dev,
-				"pending request completed in device run s:%d\n",
-				ctx->session_id);
-
-			/* disconnect request (will be freed by core layer) */
-			sde_rot_mgr_lock(rot_dev->mgr);
-			sde_rotator_req_finish(rot_dev->mgr, ctx->private,
-					request->req);
-			sde_rot_mgr_unlock(rot_dev->mgr);
-
-			if (failed_count) {
-				SDEDEV_ERR(rot_dev->dev,
-					"pending request failed in device run s:%d f:%d\n",
-					ctx->session_id,
-					failed_count);
-				rot_dev->stats.fail_count++;
-				ATRACE_INT("fail_count",
-						rot_dev->stats.fail_count);
-				goto error_process_buffers;
-			}
-
-			src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
-			dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-			if (!src_buf || !dst_buf) {
-				SDEDEV_ERR(rot_dev->dev,
-					"null buffer in device run s:%d sb:%pK db:%pK\n",
-					ctx->session_id,
-					src_buf, dst_buf);
-				goto error_process_buffers;
-			}
-
-			sde_rotator_update_retire_sequence(request);
-			sde_rotator_retire_request(request);
-			v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
-			v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
-			v4l2_m2m_job_finish(rot_dev->m2m_dev, ctx->fh.m2m_ctx);
-		} else {
-			/* pending request not complete. something wrong. */
-			SDEDEV_ERR(rot_dev->dev,
-				"Incomplete pending request in device run s:%d\n",
-				ctx->session_id);
-
-			/* disconnect request (will be freed by core layer) */
-			sde_rot_mgr_lock(rot_dev->mgr);
-			sde_rotator_req_finish(rot_dev->mgr, ctx->private,
-					request->req);
-			sde_rot_mgr_unlock(rot_dev->mgr);
-
-			goto error_process_buffers;
-		}
-	} else {
-		request = list_first_entry_or_null(&ctx->retired_list,
-				struct sde_rotator_request, list);
-		if (!request) {
-			SDEDEV_ERR(rot_dev->dev,
-				"no free request in device run s:%d\n",
-				ctx->session_id);
-			goto error_retired_list;
-		}
-
-		spin_lock(&ctx->list_lock);
-		list_del_init(&request->list);
-		list_add_tail(&request->list, &ctx->pending_list);
-		spin_unlock(&ctx->list_lock);
-
-		/* no pending request. submit buffer the usual way. */
-		dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
-		src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
-		if (!src_buf || !dst_buf) {
-			SDEDEV_ERR(rot_dev->dev,
-				"null buffer in device run s:%d sb:%pK db:%pK\n",
-				ctx->session_id,
-				src_buf, dst_buf);
-			goto error_empty_buffer;
-		}
-
-		sde_rot_mgr_lock(rot_dev->mgr);
-		ret = sde_rotator_process_buffers(ctx, &src_buf->vb2_buf,
-				&dst_buf->vb2_buf, request);
-		sde_rot_mgr_unlock(rot_dev->mgr);
-		if (ret) {
-			SDEDEV_ERR(rot_dev->dev,
-				"fail process buffer in device run s:%d\n",
-				ctx->session_id);
-			rot_dev->stats.fail_count++;
-			ATRACE_INT("fail_count", rot_dev->stats.fail_count);
-			goto error_process_buffers;
-		}
-	}
-
-	return;
-error_process_buffers:
-error_empty_buffer:
-error_retired_list:
-	sde_rotator_update_retire_sequence(request);
-	sde_rotator_retire_request(request);
-	src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
-	dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-	if (src_buf)
-		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
-	if (dst_buf)
-		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
-	sde_rotator_resync_timeline(ctx->work_queue.timeline);
-	v4l2_m2m_job_finish(rot_dev->m2m_dev, ctx->fh.m2m_ctx);
-}
-
-/*
- * sde_rotator_job_abort - rotator m2m job abort callback
- * @priv: Pointer rotator context.
- */
-static void sde_rotator_job_abort(void *priv)
-{
-	struct sde_rotator_ctx *ctx = priv;
-	struct sde_rotator_device *rot_dev;
-
-	if (!ctx || !ctx->rot_dev) {
-		SDEROT_ERR("null context/device\n");
-		return;
-	}
-
-	rot_dev = ctx->rot_dev;
-	SDEDEV_DBG(rot_dev->dev, "job abort s:%d\n", ctx->session_id);
-
-	v4l2_m2m_job_finish(rot_dev->m2m_dev, ctx->fh.m2m_ctx);
-}
-
-/*
- * sde_rotator_job_ready - rotator m2m job ready callback
- * @priv: Pointer rotator context.
- */
-static int sde_rotator_job_ready(void *priv)
-{
-	struct sde_rotator_ctx *ctx = priv;
-	struct sde_rotator_device *rot_dev;
-	struct sde_rotator_request *request;
-	int ret = 0;
-
-	if (!ctx || !ctx->rot_dev) {
-		SDEROT_ERR("null context/device\n");
-		return 0;
-	}
-
-	rot_dev = ctx->rot_dev;
-	SDEDEV_DBG(rot_dev->dev, "job ready s:%d\n", ctx->session_id);
-
-	request = list_first_entry_or_null(&ctx->pending_list,
-			struct sde_rotator_request, list);
-
-	if (!rot_dev->early_submit) {
-		/* always ready in normal mode. */
-		ret = 1;
-	} else if (request && IS_ERR_OR_NULL(request->req)) {
-		/* if pending request fails, forward to device run state. */
-		SDEDEV_DBG(rot_dev->dev,
-				"pending request fail in job ready s:%d\n",
-				ctx->session_id);
-		ret = 1;
-	} else if (list_empty(&ctx->pending_list)) {
-		/* if no pending request, submit a new request. */
-		SDEDEV_DBG(rot_dev->dev,
-				"submit job s:%d sc:%d dc:%d p:%d\n",
-				ctx->session_id,
-				v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
-				v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx),
-				!list_empty(&ctx->pending_list));
-
-		request = list_first_entry_or_null(&ctx->retired_list,
-				struct sde_rotator_request, list);
-		if (!request) {
-			/* should not happen */
-			SDEDEV_ERR(rot_dev->dev,
-					"no free request in job ready s:%d\n",
-					ctx->session_id);
-		} else {
-			spin_lock(&ctx->list_lock);
-			list_del_init(&request->list);
-			list_add_tail(&request->list, &ctx->pending_list);
-			spin_unlock(&ctx->list_lock);
-			kthread_queue_work(ctx->work_queue.rot_kw,
-					&request->submit_work);
-		}
-	} else if (request && !atomic_read(&request->req->pending_count)) {
-		/* if pending request completed, forward to device run state */
-		SDEDEV_DBG(rot_dev->dev,
-				"pending request completed in job ready s:%d\n",
-				ctx->session_id);
-		ret = 1;
-	}
-
-	return ret;
-}
-
-/* V4l2 mem2mem handlers */
-static struct v4l2_m2m_ops sde_rotator_m2m_ops = {
-	.device_run	= sde_rotator_device_run,
-	.job_abort	= sde_rotator_job_abort,
-	.job_ready	= sde_rotator_job_ready,
-};
-
-/* Device tree match struct */
-static const struct of_device_id sde_rotator_dt_match[] = {
-	{
-		.compatible = "qcom,sde_rotator",
-		.data = NULL,
-	},
-	{}
-};
-
-/*
- * sde_rotator_get_drv_data - rotator device driver data.
- * @dev: Pointer to device.
- */
-static const void *sde_rotator_get_drv_data(struct device *dev)
-{
-	const struct of_device_id *match;
-
-	match = of_match_node(sde_rotator_dt_match, dev->of_node);
-
-	if (match)
-		return match->data;
-
-	return NULL;
-}
-
-/*
- * sde_rotator_probe - rotator device probe method.
- * @pdev: Pointer to rotator platform device.
- */
-static int sde_rotator_probe(struct platform_device *pdev)
-{
-	struct sde_rotator_device *rot_dev;
-	struct video_device *vdev;
-	int ret, i;
-	char name[32];
-
-	SDEDEV_DBG(&pdev->dev, "SDE v4l2 rotator probed\n");
-
-	/* sde rotator device struct */
-	rot_dev = kzalloc(sizeof(struct sde_rotator_device), GFP_KERNEL);
-	if (!rot_dev)
-		return -ENOMEM;
-
-	mutex_init(&rot_dev->lock);
-	rot_dev->early_submit = SDE_ROTATOR_EARLY_SUBMIT;
-	rot_dev->fence_timeout = SDE_ROTATOR_FENCE_TIMEOUT;
-	rot_dev->streamoff_timeout = SDE_ROTATOR_STREAM_OFF_TIMEOUT;
-	rot_dev->min_rot_clk = 0;
-	rot_dev->min_bw = 0;
-	rot_dev->min_overhead_us = 0;
-	rot_dev->drvdata = sde_rotator_get_drv_data(&pdev->dev);
-	rot_dev->open_timeout = SDE_ROTATOR_CTX_OPEN_TIMEOUT;
-	init_waitqueue_head(&rot_dev->open_wq);
-
-	rot_dev->pdev = pdev;
-	rot_dev->dev = &pdev->dev;
-	platform_set_drvdata(pdev, rot_dev);
-
-	ret = sde_rotator_base_init(&rot_dev->mdata, pdev, rot_dev->drvdata);
-	if (ret < 0) {
-		SDEDEV_ERR(&pdev->dev, "fail init base data %d\n", ret);
-		goto error_rotator_base_init;
-	}
-
-	ret = sde_rotator_core_init(&rot_dev->mgr, pdev);
-	if (ret < 0) {
-		if (ret == -EPROBE_DEFER)
-			SDEDEV_INFO(&pdev->dev, "probe defer for core init\n");
-		else
-			SDEDEV_ERR(&pdev->dev, "fail init core %d\n", ret);
-		goto error_rotator_core_init;
-	}
-
-	/* mem2mem device */
-	rot_dev->m2m_dev = v4l2_m2m_init(&sde_rotator_m2m_ops);
-	if (IS_ERR(rot_dev->m2m_dev)) {
-		ret = PTR_ERR(rot_dev->m2m_dev);
-		SDEDEV_ERR(&pdev->dev, "fail init mem2mem device %d\n", ret);
-		goto error_m2m_init;
-	}
-
-	/* v4l2 device */
-	ret = v4l2_device_register(&pdev->dev, &rot_dev->v4l2_dev);
-	if (ret < 0) {
-		SDEDEV_ERR(&pdev->dev, "fail register v4l2 device %d\n", ret);
-		goto error_v4l2_register;
-	}
-
-	vdev = video_device_alloc();
-	if (!vdev) {
-		SDEDEV_ERR(&pdev->dev, "fail allocate video device\n");
-		goto error_alloc_video_device;
-	}
-
-	vdev->fops = &sde_rotator_fops;
-	vdev->ioctl_ops = &sde_rotator_ioctl_ops;
-	vdev->lock = &rot_dev->lock;
-	vdev->minor = -1;
-	vdev->release = video_device_release;
-	vdev->v4l2_dev = &rot_dev->v4l2_dev;
-	vdev->vfl_dir = VFL_DIR_M2M;
-	vdev->vfl_type = VFL_TYPE_GRABBER;
-	strlcpy(vdev->name, SDE_ROTATOR_DRV_NAME, sizeof(vdev->name));
-
-	ret = video_register_device(vdev, VFL_TYPE_GRABBER,
-			SDE_ROTATOR_BASE_DEVICE_NUMBER);
-	if (ret < 0) {
-		SDEDEV_ERR(&pdev->dev, "fail register video device %d\n",
-				ret);
-		goto error_video_register;
-	}
-
-	rot_dev->vdev = vdev;
-	video_set_drvdata(rot_dev->vdev, rot_dev);
-
-	rot_dev->debugfs_root = sde_rotator_create_debugfs(rot_dev);
-
-	for (i = 0; i < MAX_ROT_OPEN_SESSION; i++) {
-		snprintf(name, sizeof(name), "rot_fenceq_%d_%d",
-			rot_dev->dev->id, i);
-		kthread_init_worker(&rot_dev->rot_kw[i]);
-		rot_dev->rot_thread[i] = kthread_run(kthread_worker_fn,
-			&rot_dev->rot_kw[i], name);
-		if (IS_ERR(rot_dev->rot_thread[i])) {
-			SDEDEV_ERR(rot_dev->dev,
-				"fail allocate kthread i:%d\n", i);
-			ret = -EPERM;
-			goto error_kthread_create;
-		}
-		rot_dev->kthread_free[i] = true;
-	}
-
-	SDEDEV_INFO(&pdev->dev, "SDE v4l2 rotator probe success\n");
-
-	return 0;
-error_kthread_create:
-	for (i--; i >= 0; i--)
-		kthread_stop(rot_dev->rot_thread[i]);
-	sde_rotator_destroy_debugfs(rot_dev->debugfs_root);
-	video_unregister_device(rot_dev->vdev);
-error_video_register:
-	video_device_release(vdev);
-error_alloc_video_device:
-	v4l2_device_unregister(&rot_dev->v4l2_dev);
-error_v4l2_register:
-	v4l2_m2m_release(rot_dev->m2m_dev);
-error_m2m_init:
-	sde_rotator_core_destroy(rot_dev->mgr);
-error_rotator_core_init:
-	sde_rotator_base_destroy(rot_dev->mdata);
-error_rotator_base_init:
-	kfree(rot_dev);
-	return ret;
-}
-
-/*
- * sde_rotator_remove - rotator device remove method.
- * @pdev: Pointer rotator platform device.
- */
-static int sde_rotator_remove(struct platform_device *pdev)
-{
-	struct sde_rotator_device *rot_dev;
-	int i;
-
-	rot_dev = platform_get_drvdata(pdev);
-	if (rot_dev == NULL) {
-		SDEDEV_ERR(&pdev->dev, "fail get rotator drvdata\n");
-		return 0;
-	}
-
-	sde_rotator_pm_qos_remove(rot_dev->mdata);
-	for (i = MAX_ROT_OPEN_SESSION - 1; i >= 0; i--)
-		kthread_stop(rot_dev->rot_thread[i]);
-	sde_rotator_destroy_debugfs(rot_dev->debugfs_root);
-	video_unregister_device(rot_dev->vdev);
-	video_device_release(rot_dev->vdev);
-	v4l2_device_unregister(&rot_dev->v4l2_dev);
-	v4l2_m2m_release(rot_dev->m2m_dev);
-	sde_rotator_core_destroy(rot_dev->mgr);
-	sde_rotator_base_destroy(rot_dev->mdata);
-	kfree(rot_dev);
-	return 0;
-}
-
-static const struct dev_pm_ops sde_rotator_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(sde_rotator_pm_suspend, sde_rotator_pm_resume)
-	SET_RUNTIME_PM_OPS(sde_rotator_runtime_suspend,
-			sde_rotator_runtime_resume,
-			sde_rotator_runtime_idle)
-};
-
-/* SDE Rotator platform driver definition */
-static struct platform_driver rotator_driver = {
-	.probe = sde_rotator_probe,
-	.remove = sde_rotator_remove,
-	.suspend = sde_rotator_suspend,
-	.resume = sde_rotator_resume,
-	.driver = {
-		.name = SDE_ROTATOR_DRV_NAME,
-		.of_match_table = sde_rotator_dt_match,
-		.pm = &sde_rotator_pm_ops,
-	},
-};
-
-static int __init sde_rotator_init_module(void)
-{
-	return platform_driver_register(&rotator_driver);
-}
-
-static void __exit sde_rotator_exit_module(void)
-{
-	platform_driver_unregister(&rotator_driver);
-}
-
-module_init(sde_rotator_init_module);
-module_exit(sde_rotator_exit_module);
-MODULE_DESCRIPTION("MSM SDE ROTATOR driver");
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
deleted file mode 100644
index 346384b..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
+++ /dev/null
@@ -1,258 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_ROTATOR_DEV_H__
-#define __SDE_ROTATOR_DEV_H__
-
-#include <linux/types.h>
-#include <linux/atomic.h>
-#include <linux/slab.h>
-#include <linux/ktime.h>
-#include <linux/iommu.h>
-#include <linux/dma-buf.h>
-#include <linux/msm-bus.h>
-#include <linux/platform_device.h>
-#include <linux/soc/qcom/llcc-qcom.h>
-#include <linux/kthread.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-ctrls.h>
-#include <media/msm_sde_rotator.h>
-
-#include "sde_rotator_core.h"
-#include "sde_rotator_sync.h"
-
-/* Rotator device name */
-#define SDE_ROTATOR_DRV_NAME		"sde_rotator"
-
-/* Event logging constants */
-#define SDE_ROTATOR_NUM_EVENTS		4096
-#define SDE_ROTATOR_NUM_TIMESTAMPS	SDE_ROTATOR_TS_MAX
-
-/* maximum number of outstanding requests per ctx session */
-#define SDE_ROTATOR_REQUEST_MAX		2
-
-#define MAX_ROT_OPEN_SESSION 16
-
-struct sde_rotator_device;
-struct sde_rotator_ctx;
-
-/*
- * struct sde_rotator_buf_handle - Structure contain rotator buffer information.
- * @fd: ion file descriptor from which this buffer is imported.
- * @rot_dev: Pointer to rotator device.
- * @ctx: Pointer to rotator context.
- * @size: Size of the buffer.
- * @addr: Address of rotator mmu mapped buffer.
- * @secure: Non-secure/secure buffer.
- * @buffer: Pointer to dma buf associated with this fd.
- */
-struct sde_rotator_buf_handle {
-	int fd;
-	struct sde_rotator_device *rot_dev;
-	struct sde_rotator_ctx *ctx;
-	unsigned long size;
-	dma_addr_t addr;
-	int secure;
-	struct dma_buf *buffer;
-};
-
-/*
- * struct sde_rotator_vbinfo - Structure define video buffer info.
- * @fd: fence file descriptor.
- * @fence: fence associated with fd.
- * @fence_ts: completion timestamp associated with fd
- * @qbuf_ts: timestamp associated with buffer queue event
- * @dqbuf_ts: Pointer to timestamp associated with buffer dequeue event
- * @comp_ratio: compression ratio of this buffer
- */
-struct sde_rotator_vbinfo {
-	int fd;
-	struct sde_rot_sync_fence *fence;
-	u32 fence_ts;
-	ktime_t qbuf_ts;
-	ktime_t *dqbuf_ts;
-	struct sde_mult_factor comp_ratio;
-};
-
-/*
- * struct sde_rotator_request - device layer rotation request
- * @list: list head for submit/retire list
- * @submit_work: submit work structure
- * @retire_work: retire work structure
- * @req: Pointer to core layer rotator manager request
- *	 Request can be freed by core layer during sde_rotator_stop_streaming.
- *	 Avoid dereference in dev layer if possible.
- * @ctx: Pointer to parent context
- * @committed: true if request committed to hardware
- * @sequence_id: sequence identifier of this request
- */
-struct sde_rotator_request {
-	struct list_head list;
-	struct kthread_work submit_work;
-	struct kthread_work retire_work;
-	struct sde_rot_entry_container *req;
-	struct sde_rotator_ctx *ctx;
-	bool committed;
-	u32 sequence_id;
-};
-
-/*
- * struct sde_rotator_ctx - Structure contains per open file handle context.
- * @kobj: kernel object of this context
- * @rot_dev: Pointer to rotator device.
- * @file: Pointer to device file handle
- * @fh: V4l2 file handle.
- * @ctrl_handler: control handler
- * @format_cap: Current capture format.
- * @format_out: Current output format.
- * @crop_cap: Current capture crop.
- * @crop_out: Current output crop.
- * @timeperframe: Time per frame in seconds.
- * @session_id: unique id for this context
- * @hflip: horizontal flip (1-flip)
- * @vflip: vertical flip (1-flip)
- * @rotate: rotation angle (0,90,180,270)
- * @secure: Non-secure (0) / Secure processing
- * @abort_pending: True if abort is requested for async handling.
- * @nbuf_cap: Number of requested buffer for capture queue
- * @nbuf_out: Number of requested buffer for output queue
- * @fence_cap: Fence info for each requested capture buffer
- * @fence_out: Fence info for each requested output buffer
- * @wait_queue: Wait queue for signaling end of job
- * @work_queue: work queue for submit and retire processing
- * @private: Pointer to session private information
- * @slice: Pointer to system cache slice descriptor
- * @commit_sequence_id: last committed sequence id
- * @retired_sequence_id: last retired sequence id
- * @list_lock: lock for pending/retired list
- * @pending_list: list of pending request
- * @retired_list: list of retired/free request
- * @requests: static allocation of free requests
- * @rotcfg: current core rotation configuration
- * @kthread_id: thread_id used for fence management
- */
-struct sde_rotator_ctx {
-	struct kobject kobj;
-	struct sde_rotator_device *rot_dev;
-	struct file *file;
-	struct v4l2_fh fh;
-	struct v4l2_ctrl_handler ctrl_handler;
-	struct v4l2_format format_cap;
-	struct v4l2_format format_out;
-	struct v4l2_rect crop_cap;
-	struct v4l2_rect crop_out;
-	struct v4l2_fract timeperframe;
-	u32 session_id;
-	s32 hflip;
-	s32 vflip;
-	s32 rotate;
-	s32 secure;
-	s32 secure_camera;
-	int abort_pending;
-	int nbuf_cap;
-	int nbuf_out;
-	struct sde_rotator_vbinfo *vbinfo_cap;
-	struct sde_rotator_vbinfo *vbinfo_out;
-	wait_queue_head_t wait_queue;
-	struct sde_rot_queue_v1 work_queue;
-	struct sde_rot_file_private *private;
-	struct llcc_slice_desc *slice;
-	u32 commit_sequence_id;
-	u32 retired_sequence_id;
-	spinlock_t list_lock;
-	struct list_head pending_list;
-	struct list_head retired_list;
-	struct sde_rotator_request requests[SDE_ROTATOR_REQUEST_MAX];
-	struct sde_rotation_config rotcfg;
-
-	int kthread_id;
-};
-
-/*
- * struct sde_rotator_statistics - Storage for statistics
- * @count: Number of processed request
- * @fail_count: Number of failed request
- * @ts: Timestamps of most recent requests
- */
-struct sde_rotator_statistics {
-	u64 count;
-	u64 fail_count;
-	ktime_t ts[SDE_ROTATOR_NUM_EVENTS][SDE_ROTATOR_NUM_TIMESTAMPS];
-};
-
-/*
- * struct sde_rotator_device - FD device structure.
- * @lock: Lock protecting this device structure and serializing IOCTL.
- * @dev: Pointer to device struct.
- * @v4l2_dev: V4l2 device.
- * @vdev: Pointer to video device.
- * @m2m_dev: Memory to memory device.
- * @pdev: Pointer to platform device.
- * @drvdata: Pointer to driver data.
- * @early_submit: flag enable job submission in ready state.
- * @disable_syscache: true to disable system cache
- * @mgr: Pointer to core rotator manager.
- * @mdata: Pointer to common rotator data/resource.
- * @session_id: Next context session identifier
- * @fence_timeout: Timeout value in msec for fence wait
- * @streamoff_timeout: Timeout value in msec for stream off
- * @min_rot_clk: Override the minimum rotator clock from perf calculation
- * @min_bw: Override the minimum bandwidth from perf calculation
- * @min_overhead_us: Override the minimum overhead in us from perf calculation
- * @debugfs_root: Pointer to debugfs directory entry.
- * @stats: placeholder for rotator statistics
- * @open_timeout: maximum wait time for ctx open in msec
- * @open_wq: wait queue for ctx open
- * @excl_ctx: Pointer to exclusive ctx
- * @rot_kw: rotator thread work
- * @rot_thread: rotator threads
- * @kthread_free: check if thread is available or not
- */
-struct sde_rotator_device {
-	struct mutex lock;
-	struct device *dev;
-	struct v4l2_device v4l2_dev;
-	struct video_device *vdev;
-	struct v4l2_m2m_dev *m2m_dev;
-	struct platform_device *pdev;
-	const void *drvdata;
-	u32 early_submit;
-	u32 disable_syscache;
-	struct sde_rot_mgr *mgr;
-	struct sde_rot_data_type *mdata;
-	u32 session_id;
-	u32 fence_timeout;
-	u32 streamoff_timeout;
-	u32 min_rot_clk;
-	u32 min_bw;
-	u32 min_overhead_us;
-	struct sde_rotator_statistics stats;
-	struct dentry *debugfs_root;
-	struct dentry *perf_root;
-	u32 open_timeout;
-	wait_queue_head_t open_wq;
-	struct sde_rotator_ctx *excl_ctx;
-
-	struct kthread_worker rot_kw[MAX_ROT_OPEN_SESSION];
-	struct task_struct *rot_thread[MAX_ROT_OPEN_SESSION];
-	bool kthread_free[MAX_ROT_OPEN_SESSION];
-};
-
-static inline
-struct sde_rot_mgr *sde_rot_mgr_from_pdevice(struct platform_device *pdev)
-{
-	return ((struct sde_rotator_device *) platform_get_drvdata(pdev))->mgr;
-}
-
-static inline
-struct sde_rot_mgr *sde_rot_mgr_from_device(struct device *dev)
-{
-	return ((struct sde_rotator_device *) dev_get_drvdata(dev))->mgr;
-}
-
-void sde_rotator_pm_qos_add(struct sde_rot_data_type *rot_mdata);
-
-#endif /* __SDE_ROTATOR_DEV_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
deleted file mode 100644
index 56c51e9..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
+++ /dev/null
@@ -1,943 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012, 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#include <media/msm_sde_rotator.h>
-
-#include "sde_rotator_formats.h"
-#include "sde_rotator_util.h"
-
-#define FMT_RGB_565(fmt, desc, frame_fmt, flag_arg, e0, e1, e2, isubwc)	\
-	{							\
-		.format = (fmt),				\
-		.description = (desc),				\
-		.flag = flag_arg,				\
-		.fetch_planes = SDE_MDP_PLANE_INTERLEAVED,	\
-		.unpack_tight = 1,				\
-		.unpack_align_msb = 0,				\
-		.alpha_enable = 0,				\
-		.unpack_count = 3,				\
-		.bpp = 2,					\
-		.frame_format = (frame_fmt),			\
-		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
-		.element = { (e0), (e1), (e2) },		\
-		.bits = {					\
-			[C2_R_Cr] = SDE_COLOR_5BIT,		\
-			[C0_G_Y] = SDE_COLOR_6BIT,		\
-			[C1_B_Cb] = SDE_COLOR_5BIT,		\
-		},						\
-		.is_ubwc = isubwc,				\
-	}
-
-#define FMT_RGB_888(fmt, desc, frame_fmt, flag_arg, e0, e1, e2, isubwc)	\
-	{							\
-		.format = (fmt),				\
-		.description = (desc),				\
-		.flag = flag_arg,				\
-		.fetch_planes = SDE_MDP_PLANE_INTERLEAVED,	\
-		.unpack_tight = 1,				\
-		.unpack_align_msb = 0,				\
-		.alpha_enable = 0,				\
-		.unpack_count = 3,				\
-		.bpp = 3,					\
-		.frame_format = (frame_fmt),			\
-		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
-		.element = { (e0), (e1), (e2) },		\
-		.bits = {					\
-			[C2_R_Cr] = SDE_COLOR_8BIT,		\
-			[C0_G_Y] = SDE_COLOR_8BIT,		\
-			[C1_B_Cb] = SDE_COLOR_8BIT,		\
-		},						\
-		.is_ubwc = isubwc,				\
-	}
-
-#define FMT_RGB_8888(fmt, desc, frame_fmt, flag_arg,			\
-		alpha_en, e0, e1, e2, e3, isubwc)		\
-	{							\
-		.format = (fmt),				\
-		.description = (desc),				\
-		.flag = flag_arg,				\
-		.fetch_planes = SDE_MDP_PLANE_INTERLEAVED,	\
-		.unpack_tight = 1,				\
-		.unpack_align_msb = 0,				\
-		.alpha_enable = (alpha_en),			\
-		.unpack_count = 4,				\
-		.bpp = 4,					\
-		.frame_format = (frame_fmt),			\
-		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
-		.element = { (e0), (e1), (e2), (e3) },		\
-		.bits = {					\
-			[C3_ALPHA] = SDE_COLOR_8BIT,		\
-			[C2_R_Cr] = SDE_COLOR_8BIT,		\
-			[C0_G_Y] = SDE_COLOR_8BIT,		\
-			[C1_B_Cb] = SDE_COLOR_8BIT,		\
-		},						\
-		.is_ubwc = isubwc,				\
-	}
-
-#define FMT_YUV10_COMMON(fmt)					\
-		.format = (fmt),				\
-		.is_yuv = 1,					\
-		.bits = {					\
-			[C2_R_Cr] = SDE_COLOR_8BIT,		\
-			[C0_G_Y] = SDE_COLOR_8BIT,		\
-			[C1_B_Cb] = SDE_COLOR_8BIT,		\
-		},						\
-		.alpha_enable = 0
-
-#define FMT_YUV_COMMON(fmt)					\
-		.format = (fmt),				\
-		.is_yuv = 1,					\
-		.bits = {					\
-			[C2_R_Cr] = SDE_COLOR_8BIT,		\
-			[C0_G_Y] = SDE_COLOR_8BIT,		\
-			[C1_B_Cb] = SDE_COLOR_8BIT,		\
-		},						\
-		.alpha_enable = 0,				\
-		.unpack_tight = 1,				\
-		.unpack_align_msb = 0
-
-#define FMT_YUV_PSEUDO(fmt, desc, frame_fmt, samp, pixel_type,	\
-		flag_arg, e0, e1, isubwc)			\
-	{							\
-		FMT_YUV_COMMON(fmt),				\
-		.description = (desc),				\
-		.flag = flag_arg,				\
-		.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,	\
-		.chroma_sample = samp,				\
-		.unpack_count = 2,				\
-		.bpp = 2,					\
-		.frame_format = (frame_fmt),			\
-		.pixel_mode = (pixel_type),			\
-		.element = { (e0), (e1) },			\
-		.is_ubwc = isubwc,				\
-	}
-
-#define FMT_YUV_PLANR(fmt, desc, frame_fmt, samp, \
-		flag_arg, e0, e1)		\
-	{							\
-		FMT_YUV_COMMON(fmt),				\
-		.description = (desc),				\
-		.flag = flag_arg,				\
-		.fetch_planes = SDE_MDP_PLANE_PLANAR,		\
-		.chroma_sample = samp,				\
-		.bpp = 1,					\
-		.unpack_count = 1,				\
-		.frame_format = (frame_fmt),			\
-		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
-		.element = { (e0), (e1) },			\
-		.is_ubwc = SDE_MDP_COMPRESS_NONE,		\
-	}
-
-#define FMT_RGB_1555(fmt, desc, alpha_en, flag_arg, e0, e1, e2, e3)	\
-	{							\
-		.format = (fmt),				\
-		.description = (desc),				\
-		.flag = flag_arg,				\
-		.fetch_planes = SDE_MDP_PLANE_INTERLEAVED,	\
-		.unpack_tight = 1,				\
-		.unpack_align_msb = 0,				\
-		.alpha_enable = (alpha_en),			\
-		.unpack_count = 4,				\
-		.bpp = 2,					\
-		.element = { (e0), (e1), (e2), (e3) },		\
-		.frame_format = SDE_MDP_FMT_LINEAR,		\
-		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
-		.bits = {					\
-			[C3_ALPHA] = SDE_COLOR_ALPHA_1BIT,	\
-			[C2_R_Cr] = SDE_COLOR_5BIT,		\
-			[C0_G_Y] = SDE_COLOR_5BIT,		\
-			[C1_B_Cb] = SDE_COLOR_5BIT,		\
-		},						\
-		.is_ubwc = SDE_MDP_COMPRESS_NONE,		\
-	}
-
-#define FMT_RGB_4444(fmt, desc, alpha_en, flag_arg, e0, e1, e2, e3)	\
-	{							\
-		.format = (fmt),				\
-		.description = (desc),				\
-		.flag = flag_arg,				\
-		.fetch_planes = SDE_MDP_PLANE_INTERLEAVED,	\
-		.unpack_tight = 1,				\
-		.unpack_align_msb = 0,				\
-		.alpha_enable = (alpha_en),			\
-		.unpack_count = 4,				\
-		.bpp = 2,					\
-		.frame_format = SDE_MDP_FMT_LINEAR,		\
-		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
-		.element = { (e0), (e1), (e2), (e3) },		\
-		.bits = {					\
-			[C3_ALPHA] = SDE_COLOR_ALPHA_4BIT,	\
-			[C2_R_Cr] = SDE_COLOR_4BIT,		\
-			[C0_G_Y] = SDE_COLOR_4BIT,		\
-			[C1_B_Cb] = SDE_COLOR_4BIT,		\
-		},						\
-		.is_ubwc = SDE_MDP_COMPRESS_NONE,		\
-	}
-
-#define FMT_RGB_1010102(fmt, desc, frame_fmt, flag_arg,		\
-			alpha_en, e0, e1, e2, e3, isubwc)	\
-	{							\
-		.format = (fmt),				\
-		.description = (desc),				\
-		.flag = flag_arg,				\
-		.fetch_planes = SDE_MDP_PLANE_INTERLEAVED,	\
-		.unpack_tight = 1,				\
-		.unpack_align_msb = 0,				\
-		.alpha_enable = (alpha_en),			\
-		.unpack_count = 4,				\
-		.bpp = 4,					\
-		.frame_format = frame_fmt,			\
-		.pixel_mode = SDE_MDP_PIXEL_10BIT,		\
-		.element = { (e0), (e1), (e2), (e3) },		\
-		.bits = {					\
-			[C3_ALPHA] = SDE_COLOR_8BIT,		\
-			[C2_R_Cr] = SDE_COLOR_8BIT,		\
-			[C0_G_Y] = SDE_COLOR_8BIT,		\
-			[C1_B_Cb] = SDE_COLOR_8BIT,		\
-		},						\
-		.is_ubwc = isubwc,				\
-	}
-
-/*
- * UBWC formats table:
- * This table holds the UBWC formats supported.
- * If a compression ratio needs to be used for this or any other format,
- * the data will be passed by user-space.
- */
-static struct sde_mdp_format_params_ubwc sde_mdp_format_ubwc_map[] = {
-	{
-		.mdp_format = FMT_RGB_565(SDE_PIX_FMT_RGB_565_UBWC,
-			"SDE/RGB_565_UBWC",
-			SDE_MDP_FMT_TILE_A5X, 0,
-			C2_R_Cr, C0_G_Y, C1_B_Cb, SDE_MDP_COMPRESS_UBWC),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format = FMT_RGB_8888(SDE_PIX_FMT_RGBA_8888_UBWC,
-			"SDE/RGBA_8888_UBWC",
-			SDE_MDP_FMT_TILE_A5X, 0, 1,
-			C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
-			SDE_MDP_COMPRESS_UBWC),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format = FMT_RGB_8888(SDE_PIX_FMT_RGBX_8888_UBWC,
-			"SDE/RGBX_8888_UBWC",
-			SDE_MDP_FMT_TILE_A5X, 0, 0,
-			C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
-			SDE_MDP_COMPRESS_UBWC),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format = FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
-			"SDE/Y_CBCR_H2V2_UBWC",
-			SDE_MDP_FMT_TILE_A5X, SDE_MDP_CHROMA_420,
-			SDE_MDP_PIXEL_NORMAL,
-			0, C1_B_Cb, C2_R_Cr,
-			SDE_MDP_COMPRESS_UBWC),
-		.micro = {
-			.tile_height = 8,
-			.tile_width = 32,
-		},
-	},
-	{
-		.mdp_format = FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102_UBWC,
-			"SDE/RGBA_1010102_UBWC",
-			SDE_MDP_FMT_TILE_A5X, 0, 1,
-			C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
-			SDE_MDP_COMPRESS_UBWC),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format = FMT_RGB_1010102(SDE_PIX_FMT_RGBX_1010102_UBWC,
-			"SDE/RGBX_1010102_UBWC",
-			SDE_MDP_FMT_TILE_A5X, 0, 0,
-			C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
-			SDE_MDP_COMPRESS_UBWC),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format = FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
-			"SDE/Y_CBCR_H2V2_TP10_UBWC",
-			SDE_MDP_FMT_TILE_A5X, SDE_MDP_CHROMA_420,
-			SDE_MDP_PIXEL_10BIT,
-			0,
-			C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_UBWC),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 48,
-		},
-	},
-	{
-		.mdp_format = {
-			FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC),
-			.description = "SDE/Y_CBCR_H2V2_P010_UBWC",
-			.flag = 0,
-			.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
-			.chroma_sample = SDE_MDP_CHROMA_420,
-			.unpack_count = 2,
-			.bpp = 2,
-			.frame_format = SDE_MDP_FMT_TILE_A5X,
-			.pixel_mode = SDE_MDP_PIXEL_10BIT,
-			.element = { C1_B_Cb, C2_R_Cr },
-			.unpack_tight = 0,
-			.unpack_align_msb = 1,
-			.is_ubwc = SDE_MDP_COMPRESS_UBWC
-		},
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 32,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102_TILE,
-			"SDE/RGBA_1010102_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_1010102(SDE_PIX_FMT_RGBX_1010102_TILE,
-			"SDE/RGBX_1010102102_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_1010102(SDE_PIX_FMT_BGRA_1010102_TILE,
-			"SDE/BGRA_1010102_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_1010102(SDE_PIX_FMT_BGRX_1010102_TILE,
-			"SDE/BGRX_1010102_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_1010102(SDE_PIX_FMT_ARGB_2101010_TILE,
-			"SDE/ARGB_2101010_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_1010102(SDE_PIX_FMT_XRGB_2101010_TILE,
-			"SDE/XRGB_2101010_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_1010102(SDE_PIX_FMT_ABGR_2101010_TILE,
-			"SDE/ABGR_2101010_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_1010102(SDE_PIX_FMT_XBGR_2101010_TILE,
-			"SDE/XBGR_2101010_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
-			"Y_CRCB_H2V2_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 8,
-			.tile_width = 32,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
-			"Y_CBCR_H2V2_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 8,
-			.tile_width = 32,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_8888(SDE_PIX_FMT_ABGR_8888_TILE,
-			"SDE/ABGR_8888_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_8888(SDE_PIX_FMT_XRGB_8888_TILE,
-			"SDE/XRGB_8888_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 32,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_8888(SDE_PIX_FMT_ARGB_8888_TILE,
-			"SDE/ARGB_8888_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_8888(SDE_PIX_FMT_RGBA_8888_TILE,
-			"SDE/RGBA_8888_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_8888(SDE_PIX_FMT_RGBX_8888_TILE,
-			"SDE/RGBX_8888_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_8888(SDE_PIX_FMT_BGRA_8888_TILE,
-			"SDE/BGRA_8888_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_8888(SDE_PIX_FMT_BGRX_8888_TILE,
-			"SDE/BGRX_8888_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format =
-			FMT_RGB_8888(SDE_PIX_FMT_XBGR_8888_TILE,
-			"SDE/XBGR_8888_TILE",
-			SDE_MDP_FMT_TILE_A5X,
-			SDE_MDP_FORMAT_FLAG_PRIVATE,
-			0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
-			SDE_MDP_COMPRESS_NONE),
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 16,
-		},
-	},
-	{
-		.mdp_format = {
-			FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE),
-			.description = "SDE/Y_CBCR_H2V2_P010_TILE",
-			.flag = SDE_MDP_FORMAT_FLAG_PRIVATE,
-			.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
-			.chroma_sample = SDE_MDP_CHROMA_420,
-			.unpack_count = 2,
-			.bpp = 2,
-			.frame_format = SDE_MDP_FMT_TILE_A5X,
-			.pixel_mode = SDE_MDP_PIXEL_10BIT,
-			.element = { C1_B_Cb, C2_R_Cr },
-			.unpack_tight = 0,
-			.unpack_align_msb = 1,
-			.is_ubwc = SDE_MDP_COMPRESS_NONE,
-		},
-		.micro = {
-			.tile_height = 4,
-			.tile_width = 32,
-		},
-	},
-};
-
-static struct sde_mdp_format_params sde_mdp_format_map[] = {
-	FMT_RGB_565(
-		SDE_PIX_FMT_RGB_565, "RGB_565", SDE_MDP_FMT_LINEAR,
-		0, C1_B_Cb, C0_G_Y, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_565(
-		SDE_PIX_FMT_BGR_565, "BGR_565", SDE_MDP_FMT_LINEAR,
-		0, C2_R_Cr, C0_G_Y, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_888(
-		SDE_PIX_FMT_RGB_888, "RGB_888", SDE_MDP_FMT_LINEAR,
-		0, C2_R_Cr, C0_G_Y, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_888(
-		SDE_PIX_FMT_BGR_888, "BGR_888", SDE_MDP_FMT_LINEAR,
-		0, C1_B_Cb, C0_G_Y, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
-
-	FMT_RGB_8888(
-		SDE_PIX_FMT_ABGR_8888, "SDE/ABGR_8888", SDE_MDP_FMT_LINEAR,
-		0, 1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
-		SDE_MDP_COMPRESS_NONE),
-
-	FMT_RGB_8888(
-		SDE_PIX_FMT_XRGB_8888, "SDE/XRGB_8888", SDE_MDP_FMT_LINEAR,
-		0, 0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
-		SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_8888(
-		SDE_PIX_FMT_ARGB_8888, "SDE/ARGB_8888", SDE_MDP_FMT_LINEAR,
-		0, 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
-		SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_8888(
-		SDE_PIX_FMT_RGBA_8888, "SDE/RGBA_8888", SDE_MDP_FMT_LINEAR,
-		0, 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
-		SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_8888(
-		SDE_PIX_FMT_RGBX_8888, "SDE/RGBX_8888", SDE_MDP_FMT_LINEAR,
-		0, 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
-		SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_8888(
-		SDE_PIX_FMT_BGRA_8888, "SDE/BGRA_8888", SDE_MDP_FMT_LINEAR,
-		0, 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
-		SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_8888(
-		SDE_PIX_FMT_BGRX_8888, "SDE/BGRX_8888", SDE_MDP_FMT_LINEAR,
-		0, 0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
-		SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_8888(
-		SDE_PIX_FMT_XBGR_8888, "SDE/XBGR_8888", SDE_MDP_FMT_LINEAR,
-		0, 0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
-		SDE_MDP_COMPRESS_NONE),
-
-	FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V1, "Y_CRCB_H2V1",
-		SDE_MDP_FMT_LINEAR,
-		SDE_MDP_CHROMA_H2V1, SDE_MDP_PIXEL_NORMAL,
-		0, C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
-	FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V1, "Y_CBCR_H2V1",
-		SDE_MDP_FMT_LINEAR,
-		SDE_MDP_CHROMA_H2V1, SDE_MDP_PIXEL_NORMAL,
-		0, C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
-	FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H1V2, "Y_CRCB_H1V2",
-		SDE_MDP_FMT_LINEAR,
-		SDE_MDP_CHROMA_H1V2, SDE_MDP_PIXEL_NORMAL,
-		0, C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
-	FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H1V2, "Y_CBCR_H1V2",
-		SDE_MDP_FMT_LINEAR,
-		SDE_MDP_CHROMA_H1V2, SDE_MDP_PIXEL_NORMAL,
-		0, C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
-	FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V2, "Y_CRCB_H2V2",
-		SDE_MDP_FMT_LINEAR,
-		SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
-		0, C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
-	FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2, "Y_CBCR_H2V2",
-		SDE_MDP_FMT_LINEAR,
-		SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
-		0, C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
-	FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2_VENUS, "SDE/Y_CBCR_H2V2_VENUS",
-		SDE_MDP_FMT_LINEAR,
-		SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
-		0, C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
-	FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V2_VENUS, "SDE/Y_CRCB_H2V2_VENUS",
-		SDE_MDP_FMT_LINEAR,
-		SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
-		0, C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
-
-	{
-		FMT_YUV10_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010),
-		.description = "SDE/Y_CBCR_H2V2_P010",
-		.flag = 0,
-		.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
-		.chroma_sample = SDE_MDP_CHROMA_420,
-		.unpack_count = 2,
-		.bpp = 2,
-		.frame_format = SDE_MDP_FMT_LINEAR,
-		.pixel_mode = SDE_MDP_PIXEL_10BIT,
-		.element = { C1_B_Cb, C2_R_Cr },
-		.unpack_tight = 0,
-		.unpack_align_msb = 1,
-		.is_ubwc = SDE_MDP_COMPRESS_NONE,
-	},
-	{
-		FMT_YUV10_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS),
-		.description = "SDE/Y_CBCR_H2V2_P010_VENUS",
-		.flag = 0,
-		.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
-		.chroma_sample = SDE_MDP_CHROMA_420,
-		.unpack_count = 2,
-		.bpp = 2,
-		.frame_format = SDE_MDP_FMT_LINEAR,
-		.pixel_mode = SDE_MDP_PIXEL_10BIT,
-		.element = { C1_B_Cb, C2_R_Cr },
-		.unpack_tight = 0,
-		.unpack_align_msb = 1,
-		.is_ubwc = SDE_MDP_COMPRESS_NONE,
-	},
-	{
-		FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_TP10),
-		.description = "SDE/Y_CBCR_H2V2_TP10",
-		.flag = 0,
-		.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
-		.chroma_sample = SDE_MDP_CHROMA_420,
-		.unpack_count = 2,
-		.bpp = 2,
-		.frame_format = SDE_MDP_FMT_TILE_A5X,
-		.pixel_mode = SDE_MDP_PIXEL_10BIT,
-		.element = { C1_B_Cb, C2_R_Cr },
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.is_ubwc = SDE_MDP_COMPRESS_NONE,
-	},
-
-	FMT_YUV_PLANR(SDE_PIX_FMT_Y_CB_CR_H2V2, "Y_CB_CR_H2V2",
-		SDE_MDP_FMT_LINEAR,
-		SDE_MDP_CHROMA_420, 0, C2_R_Cr, C1_B_Cb),
-	FMT_YUV_PLANR(SDE_PIX_FMT_Y_CR_CB_H2V2, "Y_CR_CB_H2V2",
-		SDE_MDP_FMT_LINEAR,
-		SDE_MDP_CHROMA_420, 0, C1_B_Cb, C2_R_Cr),
-	FMT_YUV_PLANR(SDE_PIX_FMT_Y_CR_CB_GH2V2, "SDE/Y_CR_CB_GH2V2",
-		SDE_MDP_FMT_LINEAR,
-		SDE_MDP_CHROMA_420, 0, C1_B_Cb, C2_R_Cr),
-
-	{
-		FMT_YUV_COMMON(SDE_PIX_FMT_YCBYCR_H2V1),
-		.description = "YCBYCR_H2V1",
-		.flag = 0,
-		.fetch_planes = SDE_MDP_PLANE_INTERLEAVED,
-		.chroma_sample = SDE_MDP_CHROMA_H2V1,
-		.unpack_count = 4,
-		.bpp = 2,
-		.frame_format = SDE_MDP_FMT_LINEAR,
-		.pixel_mode = SDE_MDP_PIXEL_NORMAL,
-		.element = { C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y },
-		.is_ubwc = SDE_MDP_COMPRESS_NONE,
-	},
-	FMT_RGB_1555(SDE_PIX_FMT_RGBA_5551, "RGBA_5551", 1, 0,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
-	FMT_RGB_1555(SDE_PIX_FMT_ARGB_1555, "ARGB_1555", 1, 0,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
-	FMT_RGB_1555(SDE_PIX_FMT_ABGR_1555, "ABGR_1555", 1, 0,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
-	FMT_RGB_1555(SDE_PIX_FMT_BGRA_5551, "BGRA_5551", 1, 0,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
-	FMT_RGB_1555(SDE_PIX_FMT_BGRX_5551, "BGRX_5551", 0, 0,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
-	FMT_RGB_1555(SDE_PIX_FMT_RGBX_5551, "RGBX_5551", 0, 0,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
-	FMT_RGB_1555(SDE_PIX_FMT_XBGR_1555, "XBGR_1555", 0, 0,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
-	FMT_RGB_1555(SDE_PIX_FMT_XRGB_1555, "XRGB_1555", 0, 0,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
-	FMT_RGB_4444(SDE_PIX_FMT_RGBA_4444, "RGBA_4444", 1, 0,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
-	FMT_RGB_4444(SDE_PIX_FMT_ARGB_4444, "ARGB_4444", 1, 0,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
-	FMT_RGB_4444(SDE_PIX_FMT_BGRA_4444, "BGRA_4444", 1, 0,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
-	FMT_RGB_4444(SDE_PIX_FMT_ABGR_4444, "ABGR_4444", 1, 0,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
-	FMT_RGB_4444(SDE_PIX_FMT_RGBX_4444, "RGBX_4444", 0, 0,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
-	FMT_RGB_4444(SDE_PIX_FMT_XRGB_4444, "XRGB_4444", 0, 0,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
-	FMT_RGB_4444(SDE_PIX_FMT_BGRX_4444, "BGRX_4444", 0, 0,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
-	FMT_RGB_4444(SDE_PIX_FMT_XBGR_4444, "XBGR_4444", 0, 0,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
-	FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102, "SDE/RGBA_1010102",
-		SDE_MDP_FMT_LINEAR,
-		0, 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
-		SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_1010102(SDE_PIX_FMT_RGBX_1010102, "SDE/RGBX_1010102",
-		SDE_MDP_FMT_LINEAR,
-		0, 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
-		SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_1010102(SDE_PIX_FMT_BGRA_1010102, "SDE/BGRA_1010102",
-		SDE_MDP_FMT_LINEAR,
-		0, 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
-		SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_1010102(SDE_PIX_FMT_BGRX_1010102, "SDE/BGRX_1010102",
-		SDE_MDP_FMT_LINEAR,
-		0, 0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
-		SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_1010102(SDE_PIX_FMT_ARGB_2101010, "SDE/ARGB_2101010",
-		SDE_MDP_FMT_LINEAR,
-		0, 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
-		SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_1010102(SDE_PIX_FMT_XRGB_2101010, "SDE/XRGB_2101010",
-		SDE_MDP_FMT_LINEAR,
-		0, 0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
-		SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_1010102(SDE_PIX_FMT_ABGR_2101010, "SDE/ABGR_2101010",
-		SDE_MDP_FMT_LINEAR,
-		0, 1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
-		SDE_MDP_COMPRESS_NONE),
-	FMT_RGB_1010102(SDE_PIX_FMT_XBGR_2101010, "SDE/XBGR_2101010",
-		SDE_MDP_FMT_LINEAR,
-		0, 0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
-		SDE_MDP_COMPRESS_NONE),
-};
-
-/*
- * sde_get_format_params - return format parameter of the given format
- * @format: format to lookup
- */
-struct sde_mdp_format_params *sde_get_format_params(u32 format)
-{
-	struct sde_mdp_format_params *fmt = NULL;
-	int i;
-	bool fmt_found = false;
-
-	for (i = 0; i < ARRAY_SIZE(sde_mdp_format_map); i++) {
-		fmt = &sde_mdp_format_map[i];
-		if (format == fmt->format) {
-			fmt_found = true;
-			break;
-		}
-	}
-
-	if (!fmt_found) {
-		for (i = 0; i < ARRAY_SIZE(sde_mdp_format_ubwc_map); i++) {
-			fmt = &sde_mdp_format_ubwc_map[i].mdp_format;
-			if (format == fmt->format) {
-				fmt_found = true;
-				break;
-			}
-		}
-	}
-	/* If format not supported than return NULL */
-	if (!fmt_found)
-		fmt = NULL;
-
-	return fmt;
-}
-
-/*
- * sde_rot_get_ubwc_micro_dim - return micro dimension of the given ubwc format
- * @format: format to lookup
- * @w: Pointer to returned width dimension
- * @h: Pointer to returned height dimension
- */
-int sde_rot_get_ubwc_micro_dim(u32 format, u16 *w, u16 *h)
-{
-	struct sde_mdp_format_params_ubwc *fmt = NULL;
-	bool fmt_found = false;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(sde_mdp_format_ubwc_map); i++) {
-		fmt = &sde_mdp_format_ubwc_map[i];
-		if (format == fmt->mdp_format.format) {
-			fmt_found = true;
-			break;
-		}
-	}
-
-	if (!fmt_found)
-		return -EINVAL;
-
-	*w = fmt->micro.tile_width;
-	*h = fmt->micro.tile_height;
-
-	return 0;
-}
-
-/*
- * sde_rot_get_tilea5x_pixfmt - get base a5x tile format of given source format
- * @src_pixfmt: source pixel format to be converted
- * @dst_pixfmt: pointer to base a5x tile pixel format
- * return: 0 if success; error code otherwise
- */
-int sde_rot_get_base_tilea5x_pixfmt(u32 src_pixfmt, u32 *dst_pixfmt)
-{
-	int rc = 0;
-
-	if (!dst_pixfmt) {
-		SDEROT_ERR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	switch (src_pixfmt) {
-	case SDE_PIX_FMT_Y_CBCR_H2V2:
-	case SDE_PIX_FMT_Y_CBCR_H2V2_UBWC:
-	case SDE_PIX_FMT_Y_CBCR_H2V2_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TILE;
-		break;
-	case SDE_PIX_FMT_Y_CRCB_H2V2:
-	case SDE_PIX_FMT_Y_CRCB_H2V2_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_Y_CRCB_H2V2_TILE;
-		break;
-	case V4L2_PIX_FMT_RGB565:
-	case SDE_PIX_FMT_RGB_565_UBWC:
-	case SDE_PIX_FMT_RGB_565_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_RGB_565_TILE;
-		break;
-	case SDE_PIX_FMT_RGBA_8888:
-	case SDE_PIX_FMT_RGBA_8888_UBWC:
-	case SDE_PIX_FMT_RGBA_8888_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_RGBA_8888_TILE;
-		break;
-	case SDE_PIX_FMT_RGBX_8888:
-	case SDE_PIX_FMT_RGBX_8888_UBWC:
-	case SDE_PIX_FMT_RGBX_8888_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_RGBX_8888_TILE;
-		break;
-	case SDE_PIX_FMT_ARGB_8888:
-	case SDE_PIX_FMT_ARGB_8888_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_ARGB_8888_TILE;
-		break;
-	case SDE_PIX_FMT_XRGB_8888:
-	case SDE_PIX_FMT_XRGB_8888_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_XRGB_8888_TILE;
-		break;
-	case SDE_PIX_FMT_ABGR_8888:
-	case SDE_PIX_FMT_ABGR_8888_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_ABGR_8888_TILE;
-		break;
-	case SDE_PIX_FMT_XBGR_8888:
-	case SDE_PIX_FMT_XBGR_8888_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_XBGR_8888_TILE;
-		break;
-	case SDE_PIX_FMT_ARGB_2101010:
-	case SDE_PIX_FMT_ARGB_2101010_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_ARGB_2101010_TILE;
-		break;
-	case SDE_PIX_FMT_XRGB_2101010:
-	case SDE_PIX_FMT_XRGB_2101010_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_XRGB_2101010_TILE;
-		break;
-	case SDE_PIX_FMT_ABGR_2101010:
-	case SDE_PIX_FMT_ABGR_2101010_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_ABGR_2101010_TILE;
-		break;
-	case SDE_PIX_FMT_XBGR_2101010:
-	case SDE_PIX_FMT_XBGR_2101010_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_XBGR_2101010_TILE;
-		break;
-	case SDE_PIX_FMT_BGRA_1010102:
-	case SDE_PIX_FMT_BGRA_1010102_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_BGRA_1010102_TILE;
-		break;
-	case SDE_PIX_FMT_BGRX_1010102:
-	case SDE_PIX_FMT_BGRX_1010102_TILE:
-		*dst_pixfmt = SDE_PIX_FMT_BGRX_1010102_TILE;
-		break;
-	case SDE_PIX_FMT_Y_CBCR_H2V2_P010:
-	case SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE:
-	case SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC:
-		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE;
-		break;
-	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10:
-	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC:
-		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TP10;
-		break;
-	default:
-		SDEROT_ERR("invalid src pixel format %c%c%c%c\n",
-				src_pixfmt >> 0, src_pixfmt >> 8,
-				src_pixfmt >> 16, src_pixfmt >> 24);
-		rc = -EINVAL;
-		break;
-	}
-
-	return rc;
-}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h
deleted file mode 100644
index ed54a89..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012, 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef SDE_ROTATOR_FORMATS_H
-#define SDE_ROTATOR_FORMATS_H
-
-#include <linux/types.h>
-#include <media/msm_sde_rotator.h>
-
-/* Internal rotator pixel formats */
-#define SDE_PIX_FMT_RGBA_8888_TILE	v4l2_fourcc('Q', 'T', '0', '0')
-#define SDE_PIX_FMT_RGBX_8888_TILE	v4l2_fourcc('Q', 'T', '0', '1')
-#define SDE_PIX_FMT_BGRA_8888_TILE	v4l2_fourcc('Q', 'T', '0', '2')
-#define SDE_PIX_FMT_BGRX_8888_TILE	v4l2_fourcc('Q', 'T', '0', '3')
-#define SDE_PIX_FMT_ARGB_8888_TILE	v4l2_fourcc('Q', 'T', '0', '4')
-#define SDE_PIX_FMT_XRGB_8888_TILE	v4l2_fourcc('Q', 'T', '0', '5')
-#define SDE_PIX_FMT_ABGR_8888_TILE	v4l2_fourcc('Q', 'T', '0', '6')
-#define SDE_PIX_FMT_XBGR_8888_TILE	v4l2_fourcc('Q', 'T', '0', '7')
-#define SDE_PIX_FMT_Y_CBCR_H2V2_TILE	v4l2_fourcc('Q', 'T', '0', '8')
-#define SDE_PIX_FMT_Y_CRCB_H2V2_TILE	v4l2_fourcc('Q', 'T', '0', '9')
-#define SDE_PIX_FMT_ARGB_2101010_TILE	v4l2_fourcc('Q', 'T', '0', 'A')
-#define SDE_PIX_FMT_XRGB_2101010_TILE	v4l2_fourcc('Q', 'T', '0', 'B')
-#define SDE_PIX_FMT_ABGR_2101010_TILE	v4l2_fourcc('Q', 'T', '0', 'C')
-#define SDE_PIX_FMT_XBGR_2101010_TILE	v4l2_fourcc('Q', 'T', '0', 'D')
-#define SDE_PIX_FMT_BGRA_1010102_TILE	v4l2_fourcc('Q', 'T', '0', 'E')
-#define SDE_PIX_FMT_BGRX_1010102_TILE	v4l2_fourcc('Q', 'T', '0', 'F')
-#define SDE_PIX_FMT_RGBA_1010102_TILE	v4l2_fourcc('Q', 'T', '1', '0')
-#define SDE_PIX_FMT_RGBX_1010102_TILE	v4l2_fourcc('Q', 'T', '1', '1')
-#define SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE	v4l2_fourcc('Q', 'T', '1', '2')
-#define SDE_PIX_FMT_RGB_565_TILE	v4l2_fourcc('Q', 'T', '1', '3')
-
-#define SDE_ROT_MAX_PLANES		4
-
-#define UBWC_META_MACRO_W_H		16
-#define UBWC_META_BLOCK_SIZE		256
-
-/*
- * Value of enum chosen to fit the number of bits
- * expected by the HW programming.
- */
-enum {
-	SDE_COLOR_4BIT,
-	SDE_COLOR_5BIT,
-	SDE_COLOR_6BIT,
-	SDE_COLOR_8BIT,
-	SDE_COLOR_ALPHA_1BIT = 0,
-	SDE_COLOR_ALPHA_4BIT = 1,
-};
-
-#define C3_ALPHA	3	/* alpha */
-#define C2_R_Cr		2	/* R/Cr */
-#define C1_B_Cb		1	/* B/Cb */
-#define C0_G_Y		0	/* G/luma */
-
-enum sde_mdp_compress_type {
-	SDE_MDP_COMPRESS_NONE,
-	SDE_MDP_COMPRESS_UBWC,
-};
-
-enum sde_mdp_frame_format_type {
-	SDE_MDP_FMT_LINEAR,
-	SDE_MDP_FMT_TILE_A4X,
-	SDE_MDP_FMT_TILE_A5X,
-};
-
-enum sde_mdp_pixel_type {
-	SDE_MDP_PIXEL_NORMAL,
-	SDE_MDP_PIXEL_10BIT,
-};
-
-enum sde_mdp_sspp_fetch_type {
-	SDE_MDP_PLANE_INTERLEAVED,
-	SDE_MDP_PLANE_PLANAR,
-	SDE_MDP_PLANE_PSEUDO_PLANAR,
-};
-
-enum sde_mdp_sspp_chroma_samp_type {
-	SDE_MDP_CHROMA_RGB,
-	SDE_MDP_CHROMA_H2V1,
-	SDE_MDP_CHROMA_H1V2,
-	SDE_MDP_CHROMA_420
-};
-
-enum sde_mdp_format_flag_type {
-	SDE_MDP_FORMAT_FLAG_PRIVATE = BIT(0)
-};
-
-struct sde_mdp_format_params {
-	u32 format;
-	const char *description;
-	u32 flag;
-	u8 is_yuv;
-	u8 is_ubwc;
-
-	u8 frame_format;
-	u8 chroma_sample;
-	u8 solid_fill;
-	u8 fetch_planes;
-	u8 unpack_align_msb;	/* 0 to LSB, 1 to MSB */
-	u8 unpack_tight;	/* 0 for loose, 1 for tight */
-	u8 unpack_count;	/* 0 = 1 component, 1 = 2 component ... */
-	u8 bpp;
-	u8 alpha_enable;	/*  source has alpha */
-	u8 pixel_mode;		/* 0: normal, 1:10bit */
-	u8 bits[SDE_ROT_MAX_PLANES];
-	u8 element[SDE_ROT_MAX_PLANES];
-};
-
-struct sde_mdp_format_ubwc_tile_info {
-	u16 tile_height;
-	u16 tile_width;
-};
-
-struct sde_mdp_format_params_ubwc {
-	struct sde_mdp_format_params mdp_format;
-	struct sde_mdp_format_ubwc_tile_info micro;
-};
-
-struct sde_mdp_format_params *sde_get_format_params(u32 format);
-
-int sde_rot_get_ubwc_micro_dim(u32 format, u16 *w, u16 *h);
-
-int sde_rot_get_base_tilea5x_pixfmt(u32 src_pixfmt, u32 *dst_pixfmt);
-
-static inline bool sde_mdp_is_tilea4x_format(struct sde_mdp_format_params *fmt)
-{
-	return fmt && (fmt->frame_format == SDE_MDP_FMT_TILE_A4X);
-}
-
-static inline bool sde_mdp_is_tilea5x_format(struct sde_mdp_format_params *fmt)
-{
-	return fmt && (fmt->frame_format == SDE_MDP_FMT_TILE_A5X);
-}
-
-static inline bool sde_mdp_is_ubwc_format(struct sde_mdp_format_params *fmt)
-{
-	return fmt && (fmt->is_ubwc == SDE_MDP_COMPRESS_UBWC);
-}
-
-static inline bool sde_mdp_is_linear_format(struct sde_mdp_format_params *fmt)
-{
-	return fmt && (fmt->frame_format == SDE_MDP_FMT_LINEAR);
-}
-
-static inline bool sde_mdp_is_nv12_format(struct sde_mdp_format_params *fmt)
-{
-	return fmt && (fmt->fetch_planes == SDE_MDP_PLANE_PSEUDO_PLANAR) &&
-			(fmt->chroma_sample == SDE_MDP_CHROMA_420);
-}
-
-static inline bool sde_mdp_is_nv12_8b_format(struct sde_mdp_format_params *fmt)
-{
-	return fmt && sde_mdp_is_nv12_format(fmt) &&
-			(fmt->pixel_mode == SDE_MDP_PIXEL_NORMAL);
-}
-
-static inline bool sde_mdp_is_nv12_10b_format(struct sde_mdp_format_params *fmt)
-{
-	return fmt && sde_mdp_is_nv12_format(fmt) &&
-			(fmt->pixel_mode == SDE_MDP_PIXEL_10BIT);
-}
-
-static inline bool sde_mdp_is_tp10_format(struct sde_mdp_format_params *fmt)
-{
-	return fmt && sde_mdp_is_nv12_10b_format(fmt) &&
-			fmt->unpack_tight;
-}
-
-static inline bool sde_mdp_is_p010_format(struct sde_mdp_format_params *fmt)
-{
-	return fmt && sde_mdp_is_nv12_10b_format(fmt) &&
-			!fmt->unpack_tight;
-}
-
-static inline bool sde_mdp_is_yuv_format(struct sde_mdp_format_params *fmt)
-{
-	return fmt && fmt->is_yuv;
-}
-
-static inline bool sde_mdp_is_rgb_format(struct sde_mdp_format_params *fmt)
-{
-	return !sde_mdp_is_yuv_format(fmt);
-}
-
-static inline bool sde_mdp_is_private_format(struct sde_mdp_format_params *fmt)
-{
-	return fmt && (fmt->flag & SDE_MDP_FORMAT_FLAG_PRIVATE);
-}
-
-static inline int sde_mdp_format_blk_size(struct sde_mdp_format_params *fmt)
-{
-	return sde_mdp_is_tp10_format(fmt) ? 96 : 128;
-}
-#endif
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
deleted file mode 100644
index 0527dcd..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef SDE_ROTATOR_HWIO_H
-#define SDE_ROTATOR_HWIO_H
-
-#include <linux/bitops.h>
-
-#define SDE_REG_HW_VERSION			0x0
-#define SDE_REG_HW_INTR_STATUS			0x10
-
-#define SDE_INTR_MDP				BIT(0)
-
-#define SDE_MDP_OFFSET				0x1000
-
-#define MMSS_MDP_PANIC_ROBUST_CTRL		0x00178
-#define MMSS_MDP_PANIC_LUT0			0x0017C
-#define MMSS_MDP_PANIC_LUT1			0x00180
-#define MMSS_MDP_ROBUST_LUT			0x00184
-#define MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL	0x00190
-
-/* following offsets are with respect to MDP VBIF base */
-#define MMSS_VBIF_CLKON				0x4
-#define MMSS_VBIF_RD_LIM_CONF			0x0B0
-#define MMSS_VBIF_WR_LIM_CONF			0x0C0
-
-#define MMSS_VBIF_XIN_HALT_CTRL0		0x200
-#define MMSS_VBIF_XIN_HALT_CTRL1		0x204
-#define MMSS_VBIF_AXI_HALT_CTRL0		0x208
-#define MMSS_VBIF_AXI_HALT_CTRL1		0x20C
-#define MMSS_VBIF_TEST_BUS_OUT_CTRL		0x210
-#define MMSS_VBIF_TEST_BUS_OUT			0x230
-
-#define SDE_VBIF_QOS_REMAP_BASE			0x020
-#define SDE_VBIF_QOS_REMAP_ENTRIES		0x4
-
-#define SDE_VBIF_FIXED_SORT_EN			0x30
-#define SDE_VBIF_FIXED_SORT_SEL0		0x34
-
-/* MMSS_VBIF_NRT - offset relative to base offset */
-#define MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0		0x0008
-#define MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0		0
-#define MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1		1
-#define MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1		0x000C
-#define MMSS_VBIF_NRT_VBIF_QOS_REMAP_00			0x0020
-#define MMSS_VBIF_NRT_VBIF_QOS_REMAP_01			0x0024
-#define MMSS_VBIF_NRT_VBIF_QOS_REMAP_10			0x0028
-#define MMSS_VBIF_NRT_VBIF_QOS_REMAP_11			0x002C
-#define MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN		0x00AC
-#define MMSS_VBIF_NRT_VBIF_IN_RD_LIM_CONF0		0x00B0
-#define MMSS_VBIF_NRT_VBIF_IN_RD_LIM_CONF1		0x00B4
-#define MMSS_VBIF_NRT_VBIF_IN_RD_LIM_CONF2		0x00B8
-#define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF0		0x00C0
-#define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF1		0x00C4
-#define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF2		0x00C8
-#define MMSS_VBIF_NRT_VBIF_OUT_RD_LIM_CONF0		0x00D0
-#define MMSS_VBIF_NRT_VBIF_OUT_WR_LIM_CONF0		0x00D4
-#define MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0	0x0160
-#define MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000		0x0550
-#define MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000		0x0590
-
-#define SDE_MDP_REG_TRAFFIC_SHAPER_EN			BIT(31)
-#define SDE_MDP_REG_TRAFFIC_SHAPER_RD_CLIENT(num)	(0x030 + (num * 4))
-#define SDE_MDP_REG_TRAFFIC_SHAPER_WR_CLIENT(num)	(0x060 + (num * 4))
-#define SDE_MDP_REG_TRAFFIC_SHAPER_FIXPOINT_FACTOR	4
-
-#endif
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
deleted file mode 100644
index 1d1b07a..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_ROTATOR_INLINE_H__
-#define __SDE_ROTATOR_INLINE_H__
-
-#include <linux/types.h>
-#include <linux/dma-buf.h>
-#include <linux/platform_device.h>
-
-#include "sde_rotator_formats.h"
-
-#define SDE_ROTATOR_INLINE_PLANE_MAX	4
-
-/*
- * enum sde_rotator_inline_cmd_type - inline rotator command stages
- * @SDE_ROTATOR_INLINE_CMD_VALIDATE: validate command only
- * @SDE_ROTATOR_INLINE_CMD_COMMIT: commit command to hardware
- * @SDE_ROTATOR_INLINE_CMD_START: ready to start inline rotation
- * @SDE_ROTATOR_INLINE_CMD_CLEANUP: cleanup after commit is done
- * @SDE_ROTATOR_INLINE_CMD_ABORT: abort current commit and reset
- */
-enum sde_rotator_inline_cmd_type {
-	SDE_ROTATOR_INLINE_CMD_VALIDATE,
-	SDE_ROTATOR_INLINE_CMD_COMMIT,
-	SDE_ROTATOR_INLINE_CMD_START,
-	SDE_ROTATOR_INLINE_CMD_CLEANUP,
-	SDE_ROTATOR_INLINE_CMD_ABORT,
-};
-
-/**
- * sde_rotator_inline_cmd - inline rotation command
- * @sequence_id: unique command sequence identifier
- * @video_mode: true if video interface is connected
- * @fps: frame rate in frame-per-second
- * @rot90: rotate 90 counterclockwise
- * @hflip: horizontal flip prior to rotation
- * @vflip: vertical flip prior to rotation
- * @secure: true if buffer is in secure domain
- * @prefill_bw: prefill bandwidth in Bps
- * @clkrate: clock rate in Hz
- * @data_bw: data bus bandwidth in Bps
- * @src_addr: source i/o buffer virtual address
- * @src_len: source i/o buffer length
- * @src_planes: source plane number
- * @src_pixfmt: v4l2 fourcc pixel format of source buffer
- * @src_width: width of source buffer
- * @src_height: height of source buffer
- * @src_rect_x: roi x coordinate of source buffer
- * @src_rect_y: roi y coordinate of source buffer
- * @src_rect_w: roi width of source buffer
- * @src_rect_h: roi height of source buffer
- * @dst_addr: destination i/o virtual buffer address
- * @dst_len: destination i/o buffer length
- * @dst_planes: destination plane number
- * @dst_pixfmt: v4l2 fourcc pixel format of destination buffer
- * @dst_rect_x: roi x coordinate of destination buffer
- * @dst_rect_y: roi y coordinate of destination buffer
- * @dst_rect_w: roi width of destination buffer
- * @dst_rect_h: roi height of destination buffer
- * @dst_writeback: true if cache writeback is required
- * @priv_handle: private handle of rotator session
- */
-struct sde_rotator_inline_cmd {
-	u32 sequence_id;
-	bool video_mode;
-	u32 fps;
-	bool rot90;
-	bool hflip;
-	bool vflip;
-	bool secure;
-	u64 prefill_bw;
-	u64 clkrate;
-	u64 data_bw;
-	dma_addr_t src_addr[SDE_ROTATOR_INLINE_PLANE_MAX];
-	u32 src_len[SDE_ROTATOR_INLINE_PLANE_MAX];
-	u32 src_planes;
-	u32 src_pixfmt;
-	u32 src_width;
-	u32 src_height;
-	u32 src_rect_x;
-	u32 src_rect_y;
-	u32 src_rect_w;
-	u32 src_rect_h;
-	dma_addr_t dst_addr[SDE_ROTATOR_INLINE_PLANE_MAX];
-	u32 dst_len[SDE_ROTATOR_INLINE_PLANE_MAX];
-	u32 dst_planes;
-	u32 dst_pixfmt;
-	u32 dst_rect_x;
-	u32 dst_rect_y;
-	u32 dst_rect_w;
-	u32 dst_rect_h;
-	bool dst_writeback;
-	void *priv_handle;
-};
-
-#if defined(CONFIG_MSM_SDE_ROTATOR)
-
-void *sde_rotator_inline_open(struct platform_device *pdev);
-int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev,
-		u32 src_pixfmt, u32 *dst_pixfmt);
-int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
-		char *downscale_caps, int len);
-int sde_rotator_inline_get_maxlinewidth(struct platform_device *pdev);
-int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
-		bool input, u32 *pixfmt, int len);
-int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
-		enum sde_rotator_inline_cmd_type cmd_type);
-int sde_rotator_inline_release(void *handle);
-void sde_rotator_inline_reg_dump(struct platform_device *pdev);
-
-#else
-
-void *sde_rotator_inline_open(struct platform_device *pdev)
-{
-	return NULL;
-}
-
-int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev,
-		u32 src_pixfmt, u32 *dst_pixfmt)
-{
-	return 0;
-}
-
-int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
-		char *downscale_caps, int len)
-{
-	return 0;
-}
-
-int sde_rotator_inline_get_maxlinewidth(struct platform_device *pdev)
-{
-	return 0;
-}
-
-int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
-		bool input, u32 *pixfmt, int len)
-{
-	return 0;
-}
-
-int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
-		enum sde_rotator_inline_cmd_type cmd_type)
-{
-	return 0;
-}
-
-int sde_rotator_inline_release(void *handle)
-{
-	return 0;
-}
-
-void sde_rotator_inline_reg_dump(struct platform_device *pdev)
-{
-}
-
-#endif
-
-#endif /* __SDE_ROTATOR_INLINE_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.c
deleted file mode 100644
index f2526e82..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.c
+++ /dev/null
@@ -1,423 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012, 2015-2018, The Linux Foundation. All rights reserved.
- */
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/regulator/consumer.h>
-#include <linux/delay.h>
-
-#include "sde_rotator_io_util.h"
-
-void sde_reg_w(struct sde_io_data *io, u32 offset, u32 value, u32 debug)
-{
-	u32 in_val;
-
-	if (!io || !io->base) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return;
-	}
-
-	if (offset > io->len) {
-		DEV_ERR("%pS->%s: offset out of range\n",
-			__builtin_return_address(0), __func__);
-		return;
-	}
-
-	DEV_DBG("sdeio:%6.6x:%8.8x\n", offset, value);
-	writel_relaxed(value, io->base + offset);
-	if (debug) {
-		/* ensure register read is ordered after register write */
-		mb();
-		in_val = readl_relaxed(io->base + offset);
-		DEV_DBG("[%08x] => %08x [%08x]\n",
-			(u32)(unsigned long)(io->base + offset),
-			value, in_val);
-	}
-} /* sde_reg_w */
-
-u32 sde_reg_r(struct sde_io_data *io, u32 offset, u32 debug)
-{
-	u32 value;
-
-	if (!io || !io->base) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return -EINVAL;
-	}
-
-	if (offset > io->len) {
-		DEV_ERR("%pS->%s: offset out of range\n",
-			__builtin_return_address(0), __func__);
-		return -EINVAL;
-	}
-
-	value = readl_relaxed(io->base + offset);
-	if (debug)
-		DEV_DBG("[%08x] <= %08x\n",
-			(u32)(unsigned long)(io->base + offset), value);
-
-	DEV_DBG("sdeio:%6.6x:%8.8x\n", offset, value);
-	return value;
-} /* sde_reg_r */
-
-void sde_reg_dump(void __iomem *base, u32 length, const char *prefix,
-	u32 debug)
-{
-	if (debug)
-		print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
-			(void *)base, length, false);
-} /* sde_reg_dump */
-
-static struct resource *sde_rot_get_res_byname(struct platform_device *pdev,
-	unsigned int type, const char *name)
-{
-	struct resource *res = NULL;
-
-	res = platform_get_resource_byname(pdev, type, name);
-	if (!res)
-		DEV_ERR("%s: '%s' resource not found\n", __func__, name);
-
-	return res;
-} /* sde_rot_get_res_byname */
-
-int sde_rot_ioremap_byname(struct platform_device *pdev,
-	struct sde_io_data *io_data, const char *name)
-{
-	struct resource *res = NULL;
-
-	if (!pdev || !io_data) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return -EINVAL;
-	}
-
-	res = sde_rot_get_res_byname(pdev, IORESOURCE_MEM, name);
-	if (!res) {
-		DEV_ERR("%pS->%s: '%s' sde_rot_get_res_byname failed\n",
-			__builtin_return_address(0), __func__, name);
-		return -ENODEV;
-	}
-
-	io_data->len = (u32)resource_size(res);
-	io_data->base = ioremap(res->start, io_data->len);
-	if (!io_data->base) {
-		DEV_ERR("%pS->%s: '%s' ioremap failed\n",
-			__builtin_return_address(0), __func__, name);
-		return -EIO;
-	}
-
-	return 0;
-} /* sde_rot_ioremap_byname */
-
-void sde_rot_iounmap(struct sde_io_data *io_data)
-{
-	if (!io_data) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return;
-	}
-
-	if (io_data->base) {
-		iounmap(io_data->base);
-		io_data->base = NULL;
-	}
-	io_data->len = 0;
-} /* sde_rot_iounmap */
-
-int sde_rot_config_vreg(struct device *dev, struct sde_vreg *in_vreg,
-	int num_vreg, int config)
-{
-	int i = 0, rc = 0;
-	struct sde_vreg *curr_vreg = NULL;
-	enum sde_vreg_type type;
-
-	if (!dev || !in_vreg || !num_vreg) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return -EINVAL;
-	}
-
-	if (config) {
-		for (i = 0; i < num_vreg; i++) {
-			curr_vreg = &in_vreg[i];
-			curr_vreg->vreg = regulator_get(dev,
-				curr_vreg->vreg_name);
-			rc = PTR_RET(curr_vreg->vreg);
-			if (rc) {
-				DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
-					 __builtin_return_address(0), __func__,
-					 curr_vreg->vreg_name, rc);
-				curr_vreg->vreg = NULL;
-				goto vreg_get_fail;
-			}
-			type = (regulator_count_voltages(curr_vreg->vreg) > 0)
-					? SDE_REG_LDO : SDE_REG_VS;
-			if (type == SDE_REG_LDO) {
-				rc = regulator_set_voltage(
-					curr_vreg->vreg,
-					curr_vreg->min_voltage,
-					curr_vreg->max_voltage);
-				if (rc < 0) {
-					DEV_ERR("%pS->%s: %s set vltg fail\n",
-						__builtin_return_address(0),
-						__func__,
-						curr_vreg->vreg_name);
-					goto vreg_set_voltage_fail;
-				}
-			}
-		}
-	} else {
-		for (i = num_vreg-1; i >= 0; i--) {
-			curr_vreg = &in_vreg[i];
-			if (curr_vreg->vreg) {
-				type = (regulator_count_voltages(
-					curr_vreg->vreg) > 0)
-					? SDE_REG_LDO : SDE_REG_VS;
-				if (type == SDE_REG_LDO) {
-					regulator_set_voltage(curr_vreg->vreg,
-						0, curr_vreg->max_voltage);
-				}
-				regulator_put(curr_vreg->vreg);
-				curr_vreg->vreg = NULL;
-			}
-		}
-	}
-	return 0;
-
-vreg_unconfig:
-if (type == SDE_REG_LDO)
-	regulator_set_load(curr_vreg->vreg, 0);
-
-vreg_set_voltage_fail:
-	regulator_put(curr_vreg->vreg);
-	curr_vreg->vreg = NULL;
-
-vreg_get_fail:
-	for (i--; i >= 0; i--) {
-		curr_vreg = &in_vreg[i];
-		type = (regulator_count_voltages(curr_vreg->vreg) > 0)
-			? SDE_REG_LDO : SDE_REG_VS;
-		goto vreg_unconfig;
-	}
-	return rc;
-} /* sde_rot_config_vreg */
-
-int sde_rot_enable_vreg(struct sde_vreg *in_vreg, int num_vreg, int enable)
-{
-	int i = 0, rc = 0;
-	bool need_sleep;
-
-	if (!in_vreg) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return -EINVAL;
-	}
-
-	if (enable) {
-		for (i = 0; i < num_vreg; i++) {
-			rc = PTR_RET(in_vreg[i].vreg);
-			if (rc) {
-				DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
-					__builtin_return_address(0), __func__,
-					in_vreg[i].vreg_name, rc);
-				goto vreg_set_opt_mode_fail;
-			}
-			need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
-			if (in_vreg[i].pre_on_sleep && need_sleep)
-				usleep_range(in_vreg[i].pre_on_sleep * 1000,
-					in_vreg[i].pre_on_sleep * 1000);
-			rc = regulator_set_load(in_vreg[i].vreg,
-				in_vreg[i].enable_load);
-			if (rc < 0) {
-				DEV_ERR("%pS->%s: %s set opt m fail\n",
-					__builtin_return_address(0), __func__,
-					in_vreg[i].vreg_name);
-				goto vreg_set_opt_mode_fail;
-			}
-			rc = regulator_enable(in_vreg[i].vreg);
-			if (in_vreg[i].post_on_sleep && need_sleep)
-				usleep_range(in_vreg[i].post_on_sleep * 1000,
-					in_vreg[i].post_on_sleep * 1000);
-			if (rc < 0) {
-				DEV_ERR("%pS->%s: %s enable failed\n",
-					__builtin_return_address(0), __func__,
-					in_vreg[i].vreg_name);
-				goto disable_vreg;
-			}
-		}
-	} else {
-		for (i = num_vreg-1; i >= 0; i--) {
-			if (in_vreg[i].pre_off_sleep)
-				usleep_range(in_vreg[i].pre_off_sleep * 1000,
-					in_vreg[i].pre_off_sleep * 1000);
-			regulator_set_load(in_vreg[i].vreg,
-				in_vreg[i].disable_load);
-			regulator_disable(in_vreg[i].vreg);
-			if (in_vreg[i].post_off_sleep)
-				usleep_range(in_vreg[i].post_off_sleep * 1000,
-					in_vreg[i].post_off_sleep * 1000);
-		}
-	}
-	return rc;
-
-disable_vreg:
-	regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load);
-
-vreg_set_opt_mode_fail:
-	for (i--; i >= 0; i--) {
-		if (in_vreg[i].pre_off_sleep)
-			usleep_range(in_vreg[i].pre_off_sleep * 1000,
-				in_vreg[i].pre_off_sleep * 1000);
-		regulator_set_load(in_vreg[i].vreg,
-			in_vreg[i].disable_load);
-		regulator_disable(in_vreg[i].vreg);
-		if (in_vreg[i].post_off_sleep)
-			usleep_range(in_vreg[i].post_off_sleep * 1000,
-				in_vreg[i].post_off_sleep * 1000);
-	}
-
-	return rc;
-} /* sde_rot_enable_vreg */
-
-void sde_rot_put_clk(struct sde_clk *clk_arry, int num_clk)
-{
-	int i;
-
-	if (!clk_arry) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return;
-	}
-
-	for (i = num_clk - 1; i >= 0; i--) {
-		if (clk_arry[i].clk)
-			clk_put(clk_arry[i].clk);
-		clk_arry[i].clk = NULL;
-	}
-} /* sde_rot_put_clk */
-
-int sde_rot_get_clk(struct device *dev, struct sde_clk *clk_arry, int num_clk)
-{
-	int i, rc = 0;
-
-	if (!dev || !clk_arry) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < num_clk; i++) {
-		clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
-		rc = PTR_RET(clk_arry[i].clk);
-		if (rc) {
-			DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
-				__builtin_return_address(0), __func__,
-				clk_arry[i].clk_name, rc);
-			goto error;
-		}
-	}
-
-	return rc;
-
-error:
-	sde_rot_put_clk(clk_arry, num_clk);
-
-	return rc;
-} /* sde_rot_get_clk */
-
-int sde_rot_clk_set_rate(struct sde_clk *clk_arry, int num_clk)
-{
-	int i, rc = 0;
-
-	if (!clk_arry) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < num_clk; i++) {
-		if (clk_arry[i].clk) {
-			if (clk_arry[i].type != SDE_CLK_AHB) {
-				DEV_DBG("%pS->%s: '%s' rate %ld\n",
-					__builtin_return_address(0), __func__,
-					clk_arry[i].clk_name,
-					clk_arry[i].rate);
-				rc = clk_set_rate(clk_arry[i].clk,
-					clk_arry[i].rate);
-				if (rc) {
-					DEV_ERR("%pS->%s: %s failed. rc=%d\n",
-						__builtin_return_address(0),
-						__func__,
-						clk_arry[i].clk_name, rc);
-					break;
-				}
-			}
-		} else {
-			DEV_ERR("%pS->%s: '%s' is not available\n",
-				__builtin_return_address(0), __func__,
-				clk_arry[i].clk_name);
-			rc = -EPERM;
-			break;
-		}
-	}
-
-	return rc;
-} /* sde_rot_clk_set_rate */
-
-int sde_rot_enable_clk(struct sde_clk *clk_arry, int num_clk, int enable)
-{
-	int i, rc = 0;
-
-	if (!clk_arry) {
-		DEV_ERR("%pS->%s: invalid input\n",
-			__builtin_return_address(0), __func__);
-		return -EINVAL;
-	}
-
-	if (enable) {
-		for (i = 0; i < num_clk; i++) {
-			DEV_DBG("%pS->%s: enable '%s'\n",
-				__builtin_return_address(0), __func__,
-				clk_arry[i].clk_name);
-			if (clk_arry[i].clk) {
-				rc = clk_prepare_enable(clk_arry[i].clk);
-				if (rc)
-					DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
-						__builtin_return_address(0),
-						__func__,
-						clk_arry[i].clk_name, rc);
-			} else {
-				DEV_ERR("%pS->%s: '%s' is not available\n",
-					__builtin_return_address(0), __func__,
-					clk_arry[i].clk_name);
-				rc = -EPERM;
-			}
-
-			if (rc) {
-				sde_rot_enable_clk(&clk_arry[i],
-					i, false);
-				break;
-			}
-		}
-	} else {
-		for (i = num_clk - 1; i >= 0; i--) {
-			DEV_DBG("%pS->%s: disable '%s'\n",
-				__builtin_return_address(0), __func__,
-				clk_arry[i].clk_name);
-
-			if (clk_arry[i].clk)
-				clk_disable_unprepare(clk_arry[i].clk);
-			else
-				DEV_ERR("%pS->%s: '%s' is not available\n",
-					__builtin_return_address(0), __func__,
-					clk_arry[i].clk_name);
-		}
-	}
-
-	return rc;
-} /* sde_rot_enable_clk */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.h
deleted file mode 100644
index 93d1d806..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012, 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-
-#ifndef __SDE_ROTATOR_IO_UTIL_H__
-#define __SDE_ROTATOR_IO_UTIL_H__
-
-#include <linux/gpio.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/i2c.h>
-#include <linux/types.h>
-
-#ifdef DEBUG
-#define DEV_DBG(fmt, args...)   pr_err("<SDEROT_ERR> " fmt, ##args)
-#else
-#define DEV_DBG(fmt, args...)   pr_debug("<SDEROT_DBG> " fmt, ##args)
-#endif
-#define DEV_INFO(fmt, args...)  pr_info("<SDEROT_INFO> " fmt, ##args)
-#define DEV_WARN(fmt, args...)  pr_warn("<SDEROT_WARN> " fmt, ##args)
-#define DEV_ERR(fmt, args...)   pr_err("<SDEROT_ERR> " fmt, ##args)
-
-struct sde_io_data {
-	u32 len;
-	void __iomem *base;
-};
-
-void sde_reg_w(struct sde_io_data *io, u32 offset, u32 value, u32 debug);
-u32 sde_reg_r(struct sde_io_data *io, u32 offset, u32 debug);
-void sde_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug);
-
-#define SDE_REG_W_ND(io, offset, val)  sde_reg_w(io, offset, val, false)
-#define SDE_REG_W(io, offset, val)     sde_reg_w(io, offset, val, true)
-#define SDE_REG_R_ND(io, offset)       sde_reg_r(io, offset, false)
-#define SDE_REG_R(io, offset)          sde_reg_r(io, offset, true)
-
-enum sde_vreg_type {
-	SDE_REG_LDO,
-	SDE_REG_VS,
-};
-
-struct sde_vreg {
-	struct regulator *vreg; /* vreg handle */
-	char vreg_name[32];
-	int min_voltage;
-	int max_voltage;
-	int enable_load;
-	int disable_load;
-	int pre_on_sleep;
-	int post_on_sleep;
-	int pre_off_sleep;
-	int post_off_sleep;
-};
-
-struct sde_gpio {
-	unsigned int gpio;
-	unsigned int value;
-	char gpio_name[32];
-};
-
-enum sde_clk_type {
-	SDE_CLK_AHB, /* no set rate. rate controlled through rpm */
-	SDE_CLK_PCLK,
-	SDE_CLK_OTHER,
-};
-
-struct sde_clk {
-	struct clk *clk; /* clk handle */
-	char clk_name[32];
-	enum sde_clk_type type;
-	unsigned long rate;
-};
-
-struct sde_module_power {
-	unsigned int num_vreg;
-	struct sde_vreg *vreg_config;
-	unsigned int num_gpio;
-	struct sde_gpio *gpio_config;
-	unsigned int num_clk;
-	struct sde_clk *clk_config;
-};
-
-int sde_rot_ioremap_byname(struct platform_device *pdev,
-	struct sde_io_data *io_data, const char *name);
-void sde_rot_iounmap(struct sde_io_data *io_data);
-
-int sde_rot_config_vreg(struct device *dev, struct sde_vreg *in_vreg,
-	int num_vreg, int config);
-int sde_rot_enable_vreg(struct sde_vreg *in_vreg, int num_vreg,	int enable);
-
-int sde_rot_get_clk(struct device *dev, struct sde_clk *clk_arry, int num_clk);
-void sde_rot_put_clk(struct sde_clk *clk_arry, int num_clk);
-int sde_rot_clk_set_rate(struct sde_clk *clk_arry, int num_clk);
-int sde_rot_enable_clk(struct sde_clk *clk_arry, int num_clk, int enable);
-
-#endif /* __SDE_ROTATOR_IO_UTIL_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c
deleted file mode 100644
index 8c2a7b0..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c
+++ /dev/null
@@ -1,745 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/delay.h>
-#include <linux/debugfs.h>
-#include <linux/interrupt.h>
-
-#include "sde_rotator_r1_hwio.h"
-#include "sde_rotator_core.h"
-#include "sde_rotator_util.h"
-#include "sde_rotator_r1_internal.h"
-#include "sde_rotator_r1.h"
-#include "sde_rotator_r1_debug.h"
-
-struct sde_mdp_hw_resource {
-	struct sde_rot_hw_resource hw;
-	struct sde_mdp_ctl *ctl;
-	struct sde_mdp_mixer *mixer;
-	struct sde_mdp_pipe *pipe;
-	struct sde_mdp_writeback *wb;
-};
-
-struct sde_rotator_r1_data {
-	struct sde_rot_mgr *mgr;
-	int wb_id;
-	int ctl_id;
-	int irq_num;
-	struct sde_mdp_hw_resource *mdp_hw;
-};
-
-static u32 sde_hw_rotator_input_pixfmts[] = {
-	SDE_PIX_FMT_XRGB_8888,
-	SDE_PIX_FMT_ARGB_8888,
-	SDE_PIX_FMT_ABGR_8888,
-	SDE_PIX_FMT_RGBA_8888,
-	SDE_PIX_FMT_BGRA_8888,
-	SDE_PIX_FMT_RGBX_8888,
-	SDE_PIX_FMT_BGRX_8888,
-	SDE_PIX_FMT_XBGR_8888,
-	SDE_PIX_FMT_RGBA_5551,
-	SDE_PIX_FMT_ARGB_1555,
-	SDE_PIX_FMT_ABGR_1555,
-	SDE_PIX_FMT_BGRA_5551,
-	SDE_PIX_FMT_BGRX_5551,
-	SDE_PIX_FMT_RGBX_5551,
-	SDE_PIX_FMT_XBGR_1555,
-	SDE_PIX_FMT_XRGB_1555,
-	SDE_PIX_FMT_ARGB_4444,
-	SDE_PIX_FMT_RGBA_4444,
-	SDE_PIX_FMT_BGRA_4444,
-	SDE_PIX_FMT_ABGR_4444,
-	SDE_PIX_FMT_RGBX_4444,
-	SDE_PIX_FMT_XRGB_4444,
-	SDE_PIX_FMT_BGRX_4444,
-	SDE_PIX_FMT_XBGR_4444,
-	SDE_PIX_FMT_RGB_888,
-	SDE_PIX_FMT_BGR_888,
-	SDE_PIX_FMT_RGB_565,
-	SDE_PIX_FMT_BGR_565,
-	SDE_PIX_FMT_Y_CB_CR_H2V2,
-	SDE_PIX_FMT_Y_CR_CB_H2V2,
-	SDE_PIX_FMT_Y_CR_CB_GH2V2,
-	SDE_PIX_FMT_Y_CBCR_H2V2,
-	SDE_PIX_FMT_Y_CRCB_H2V2,
-	SDE_PIX_FMT_Y_CBCR_H1V2,
-	SDE_PIX_FMT_Y_CRCB_H1V2,
-	SDE_PIX_FMT_Y_CBCR_H2V1,
-	SDE_PIX_FMT_Y_CRCB_H2V1,
-	SDE_PIX_FMT_YCBYCR_H2V1,
-	SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
-	SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
-	SDE_PIX_FMT_RGBA_8888_UBWC,
-	SDE_PIX_FMT_RGBX_8888_UBWC,
-	SDE_PIX_FMT_RGB_565_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
-};
-
-static u32 sde_hw_rotator_output_pixfmts[] = {
-	SDE_PIX_FMT_XRGB_8888,
-	SDE_PIX_FMT_ARGB_8888,
-	SDE_PIX_FMT_ABGR_8888,
-	SDE_PIX_FMT_RGBA_8888,
-	SDE_PIX_FMT_BGRA_8888,
-	SDE_PIX_FMT_RGBX_8888,
-	SDE_PIX_FMT_BGRX_8888,
-	SDE_PIX_FMT_XBGR_8888,
-	SDE_PIX_FMT_RGBA_5551,
-	SDE_PIX_FMT_ARGB_1555,
-	SDE_PIX_FMT_ABGR_1555,
-	SDE_PIX_FMT_BGRA_5551,
-	SDE_PIX_FMT_BGRX_5551,
-	SDE_PIX_FMT_RGBX_5551,
-	SDE_PIX_FMT_XBGR_1555,
-	SDE_PIX_FMT_XRGB_1555,
-	SDE_PIX_FMT_ARGB_4444,
-	SDE_PIX_FMT_RGBA_4444,
-	SDE_PIX_FMT_BGRA_4444,
-	SDE_PIX_FMT_ABGR_4444,
-	SDE_PIX_FMT_RGBX_4444,
-	SDE_PIX_FMT_XRGB_4444,
-	SDE_PIX_FMT_BGRX_4444,
-	SDE_PIX_FMT_XBGR_4444,
-	SDE_PIX_FMT_RGB_888,
-	SDE_PIX_FMT_BGR_888,
-	SDE_PIX_FMT_RGB_565,
-	SDE_PIX_FMT_BGR_565,
-	SDE_PIX_FMT_Y_CB_CR_H2V2,
-	SDE_PIX_FMT_Y_CR_CB_H2V2,
-	SDE_PIX_FMT_Y_CR_CB_GH2V2,
-	SDE_PIX_FMT_Y_CBCR_H2V2,
-	SDE_PIX_FMT_Y_CRCB_H2V2,
-	SDE_PIX_FMT_Y_CBCR_H1V2,
-	SDE_PIX_FMT_Y_CRCB_H1V2,
-	SDE_PIX_FMT_Y_CBCR_H2V1,
-	SDE_PIX_FMT_Y_CRCB_H2V1,
-	SDE_PIX_FMT_YCBYCR_H2V1,
-	SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
-	SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
-	SDE_PIX_FMT_RGBA_8888_UBWC,
-	SDE_PIX_FMT_RGBX_8888_UBWC,
-	SDE_PIX_FMT_RGB_565_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
-};
-
-static struct sde_mdp_hw_resource *sde_rotator_hw_alloc(
-	struct sde_rot_mgr *mgr, u32 ctl_id, u32 wb_id, int irq_num)
-{
-	struct sde_mdp_hw_resource *mdp_hw;
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	int pipe_ndx, offset = ctl_id;
-	int ret = 0;
-
-	mdp_hw = devm_kzalloc(&mgr->pdev->dev,
-			sizeof(struct sde_mdp_hw_resource), GFP_KERNEL);
-	if (!mdp_hw)
-		return ERR_PTR(-ENOMEM);
-
-	mdp_hw->ctl = sde_mdp_ctl_alloc(mdata, offset);
-	if (IS_ERR_OR_NULL(mdp_hw->ctl)) {
-		SDEROT_ERR("unable to allocate ctl\n");
-		ret = -ENODEV;
-		goto error;
-	}
-	mdp_hw->ctl->irq_num = irq_num;
-
-	mdp_hw->wb = sde_mdp_wb_assign(wb_id, mdp_hw->ctl->num);
-	if (IS_ERR_OR_NULL(mdp_hw->wb)) {
-		SDEROT_ERR("unable to allocate wb\n");
-		ret = -ENODEV;
-		goto error;
-	}
-
-	mdp_hw->ctl->wb = mdp_hw->wb;
-	mdp_hw->mixer = sde_mdp_mixer_assign(mdp_hw->wb->num, true);
-	if (IS_ERR_OR_NULL(mdp_hw->mixer)) {
-		SDEROT_ERR("unable to allocate wb mixer\n");
-		ret = -ENODEV;
-		goto error;
-	}
-
-	mdp_hw->ctl->mixer_left = mdp_hw->mixer;
-	mdp_hw->mixer->ctl = mdp_hw->ctl;
-
-	mdp_hw->mixer->rotator_mode = true;
-
-	switch (mdp_hw->mixer->num) {
-	case SDE_MDP_WB_LAYERMIXER0:
-		mdp_hw->ctl->opmode = SDE_MDP_CTL_OP_ROT0_MODE;
-		break;
-	case SDE_MDP_WB_LAYERMIXER1:
-		mdp_hw->ctl->opmode =  SDE_MDP_CTL_OP_ROT1_MODE;
-		break;
-	default:
-		SDEROT_ERR("invalid layer mixer=%d\n", mdp_hw->mixer->num);
-		ret = -EINVAL;
-		goto error;
-	}
-
-	mdp_hw->ctl->ops.start_fnc = sde_mdp_writeback_start;
-	mdp_hw->ctl->wb_type = SDE_MDP_WB_CTL_TYPE_BLOCK;
-
-	if (mdp_hw->ctl->ops.start_fnc)
-		ret = mdp_hw->ctl->ops.start_fnc(mdp_hw->ctl);
-
-	if (ret)
-		goto error;
-
-	/* override from dt */
-	pipe_ndx = wb_id;
-	mdp_hw->pipe = sde_mdp_pipe_assign(mdata, mdp_hw->mixer, pipe_ndx);
-	if (IS_ERR_OR_NULL(mdp_hw->pipe)) {
-		SDEROT_ERR("dma pipe allocation failed\n");
-		ret = -ENODEV;
-		goto error;
-	}
-
-	mdp_hw->pipe->mixer_left = mdp_hw->mixer;
-	mdp_hw->hw.wb_id = mdp_hw->wb->num;
-	mdp_hw->hw.pending_count = 0;
-	atomic_set(&mdp_hw->hw.num_active, 0);
-	mdp_hw->hw.max_active = 1;
-	init_waitqueue_head(&mdp_hw->hw.wait_queue);
-
-	return mdp_hw;
-error:
-	if (!IS_ERR_OR_NULL(mdp_hw->pipe))
-		sde_mdp_pipe_destroy(mdp_hw->pipe);
-	if (!IS_ERR_OR_NULL(mdp_hw->ctl)) {
-		if (mdp_hw->ctl->ops.stop_fnc)
-			mdp_hw->ctl->ops.stop_fnc(mdp_hw->ctl, 0);
-		sde_mdp_ctl_free(mdp_hw->ctl);
-	}
-	devm_kfree(&mgr->pdev->dev, mdp_hw);
-
-	return ERR_PTR(ret);
-}
-
-static void sde_rotator_hw_free(struct sde_rot_mgr *mgr,
-	struct sde_mdp_hw_resource *mdp_hw)
-{
-	struct sde_mdp_mixer *mixer;
-	struct sde_mdp_ctl *ctl;
-
-	if (!mgr || !mdp_hw)
-		return;
-
-	mixer = mdp_hw->pipe->mixer_left;
-
-	sde_mdp_pipe_destroy(mdp_hw->pipe);
-
-	ctl = sde_mdp_ctl_mixer_switch(mixer->ctl,
-		SDE_MDP_WB_CTL_TYPE_BLOCK);
-	if (ctl) {
-		if (ctl->ops.stop_fnc)
-			ctl->ops.stop_fnc(ctl, 0);
-		sde_mdp_ctl_free(ctl);
-	}
-
-	devm_kfree(&mgr->pdev->dev, mdp_hw);
-}
-
-static struct sde_rot_hw_resource *sde_rotator_hw_alloc_ext(
-	struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
-{
-	struct sde_mdp_hw_resource *mdp_hw;
-	struct sde_rotator_r1_data *hw_data;
-
-	if (!mgr || !mgr->hw_data)
-		return NULL;
-
-	hw_data = mgr->hw_data;
-	mdp_hw = hw_data->mdp_hw;
-
-	return &mdp_hw->hw;
-}
-
-static void sde_rotator_hw_free_ext(struct sde_rot_mgr *mgr,
-	struct sde_rot_hw_resource *hw)
-{
-	/* currently nothing specific for this device */
-}
-
-static void sde_rotator_translate_rect(struct sde_rect *dst,
-	struct sde_rect *src)
-{
-	dst->x = src->x;
-	dst->y = src->y;
-	dst->w = src->w;
-	dst->h = src->h;
-}
-
-static u32 sde_rotator_translate_flags(u32 input)
-{
-	u32 output = 0;
-
-	if (input & SDE_ROTATION_NOP)
-		output |= SDE_ROT_NOP;
-	if (input & SDE_ROTATION_FLIP_LR)
-		output |= SDE_FLIP_LR;
-	if (input & SDE_ROTATION_FLIP_UD)
-		output |= SDE_FLIP_UD;
-	if (input & SDE_ROTATION_90)
-		output |= SDE_ROT_90;
-	if (input & SDE_ROTATION_DEINTERLACE)
-		output |= SDE_DEINTERLACE;
-	if (input & SDE_ROTATION_SECURE)
-		output |= SDE_SECURE_OVERLAY_SESSION;
-	return output;
-}
-
-static int sde_rotator_config_hw(struct sde_rot_hw_resource *hw,
-	struct sde_rot_entry *entry)
-{
-	struct sde_mdp_hw_resource *mdp_hw;
-	struct sde_mdp_pipe *pipe;
-	struct sde_rotation_item *item;
-	int ret;
-
-	if (!hw || !entry) {
-		SDEROT_ERR("null hw resource/entry");
-		return -EINVAL;
-	}
-
-	mdp_hw = container_of(hw, struct sde_mdp_hw_resource, hw);
-
-	pipe = mdp_hw->pipe;
-	item = &entry->item;
-
-	pipe->flags = sde_rotator_translate_flags(item->flags);
-	pipe->src_fmt = sde_get_format_params(item->input.format);
-	pipe->img_width = item->input.width;
-	pipe->img_height = item->input.height;
-	sde_rotator_translate_rect(&pipe->src, &item->src_rect);
-	sde_rotator_translate_rect(&pipe->dst, &item->src_rect);
-
-	pipe->params_changed++;
-
-	ret = sde_mdp_pipe_queue_data(pipe, &entry->src_buf);
-	SDEROT_DBG("Config pipe. src{%u,%u,%u,%u}f=%u\n"
-		"dst{%u,%u,%u,%u}f=%u session_id=%u\n",
-		item->src_rect.x, item->src_rect.y,
-		item->src_rect.w, item->src_rect.h, item->input.format,
-		item->dst_rect.x, item->dst_rect.y,
-		item->dst_rect.w, item->dst_rect.h, item->output.format,
-		item->session_id);
-
-	return ret;
-}
-
-static int sde_rotator_cancel_hw(struct sde_rot_hw_resource *hw,
-	struct sde_rot_entry *entry)
-{
-	return 0;
-}
-
-static int sde_rotator_abort_hw(struct sde_rot_hw_resource *hw,
-	struct sde_rot_entry *entry)
-{
-	return 0;
-}
-
-static int sde_rotator_kickoff_entry(struct sde_rot_hw_resource *hw,
-	struct sde_rot_entry *entry)
-{
-	struct sde_mdp_hw_resource *mdp_hw;
-	int ret;
-	struct sde_mdp_writeback_arg wb_args;
-
-	if (!hw || !entry) {
-		SDEROT_ERR("null hw resource/entry");
-		return -EINVAL;
-	}
-
-	wb_args.data = &entry->dst_buf;
-	wb_args.priv_data = entry;
-
-	mdp_hw = container_of(hw, struct sde_mdp_hw_resource, hw);
-
-	ret = sde_mdp_writeback_display_commit(mdp_hw->ctl, &wb_args);
-	return ret;
-}
-
-static int sde_rotator_wait_for_entry(struct sde_rot_hw_resource *hw,
-	struct sde_rot_entry *entry)
-{
-	struct sde_mdp_hw_resource *mdp_hw;
-	int ret;
-	struct sde_mdp_ctl *ctl;
-
-	if (!hw || !entry) {
-		SDEROT_ERR("null hw resource/entry");
-		return -EINVAL;
-	}
-
-	mdp_hw = container_of(hw, struct sde_mdp_hw_resource, hw);
-
-	ctl = mdp_hw->ctl;
-
-	ret = sde_mdp_display_wait4comp(ctl);
-
-	return ret;
-}
-
-static int sde_rotator_hw_validate_entry(struct sde_rot_mgr *mgr,
-	struct sde_rot_entry *entry)
-{
-	int ret = 0;
-	u16 src_w, src_h, dst_w, dst_h, bit;
-	struct sde_rotation_item *item = &entry->item;
-	struct sde_mdp_format_params *fmt;
-
-	src_w = item->src_rect.w;
-	src_h = item->src_rect.h;
-
-	if (item->flags & SDE_ROTATION_90) {
-		dst_w = item->dst_rect.h;
-		dst_h = item->dst_rect.w;
-	} else {
-		dst_w = item->dst_rect.w;
-		dst_h = item->dst_rect.h;
-	}
-
-	entry->dnsc_factor_w = 0;
-	entry->dnsc_factor_h = 0;
-
-	if ((src_w != dst_w) || (src_h != dst_h)) {
-		if ((src_w % dst_w) || (src_h % dst_h)) {
-			SDEROT_DBG("non integral scale not support\n");
-			ret = -EINVAL;
-			goto dnsc_err;
-		}
-		entry->dnsc_factor_w = src_w / dst_w;
-		bit = fls(entry->dnsc_factor_w);
-		if ((entry->dnsc_factor_w & ~BIT(bit - 1)) || (bit > 5)) {
-			SDEROT_DBG("non power-of-2 scale not support\n");
-			ret = -EINVAL;
-			goto dnsc_err;
-		}
-		entry->dnsc_factor_h = src_h / dst_h;
-		bit = fls(entry->dnsc_factor_h);
-		if ((entry->dnsc_factor_h & ~BIT(bit - 1)) || (bit > 5)) {
-			SDEROT_DBG("non power-of-2 dscale not support\n");
-			ret = -EINVAL;
-			goto dnsc_err;
-		}
-	}
-
-	fmt =  sde_get_format_params(item->output.format);
-	if (sde_mdp_is_ubwc_format(fmt) &&
-		(entry->dnsc_factor_h || entry->dnsc_factor_w)) {
-		SDEROT_DBG("downscale with ubwc not support\n");
-		ret = -EINVAL;
-	}
-
-dnsc_err:
-
-	/* Downscaler does not support asymmetrical dnsc */
-	if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
-		SDEROT_DBG("asymmetric downscale not support\n");
-		ret = -EINVAL;
-	}
-
-	if (ret) {
-		entry->dnsc_factor_w = 0;
-		entry->dnsc_factor_h = 0;
-	}
-	return ret;
-}
-
-static ssize_t sde_rotator_hw_show_caps(struct sde_rot_mgr *mgr,
-		struct device_attribute *attr, char *buf, ssize_t len)
-{
-	struct sde_rotator_r1_data *hw_data;
-	int cnt = 0;
-
-	if (!mgr || !buf)
-		return 0;
-
-	hw_data = mgr->hw_data;
-
-#define SPRINT(fmt, ...) \
-		(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
-
-	SPRINT("wb_id=%d\n", hw_data->wb_id);
-	SPRINT("ctl_id=%d\n", hw_data->ctl_id);
-	return cnt;
-}
-
-static ssize_t sde_rotator_hw_show_state(struct sde_rot_mgr *mgr,
-		struct device_attribute *attr, char *buf, ssize_t len)
-{
-	struct sde_rotator_r1_data *hw_data;
-	int cnt = 0;
-
-	if (!mgr || !buf)
-		return 0;
-
-	hw_data = mgr->hw_data;
-
-#define SPRINT(fmt, ...) \
-		(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
-
-	if (hw_data && hw_data->mdp_hw) {
-		struct sde_rot_hw_resource *hw = &hw_data->mdp_hw->hw;
-
-		SPRINT("irq_num=%d\n", hw_data->irq_num);
-		SPRINT("max_active=%d\n", hw->max_active);
-		SPRINT("num_active=%d\n", atomic_read(&hw->num_active));
-		SPRINT("pending_cnt=%u\n", hw->pending_count);
-	}
-
-	return cnt;
-}
-
-/*
- * sde_hw_rotator_get_pixfmt - get the indexed pixel format
- * @mgr: Pointer to rotator manager
- * @index: index of pixel format
- * @input: true for input port; false for output port
- * @mode: operating mode
- */
-static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
-		int index, bool input, u32 mode)
-{
-	if (input) {
-		if (index < ARRAY_SIZE(sde_hw_rotator_input_pixfmts))
-			return sde_hw_rotator_input_pixfmts[index];
-		else
-			return 0;
-	} else {
-		if (index < ARRAY_SIZE(sde_hw_rotator_output_pixfmts))
-			return sde_hw_rotator_output_pixfmts[index];
-		else
-			return 0;
-	}
-}
-
-/*
- * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
- * @mgr: Pointer to rotator manager
- * @pixfmt: pixel format to be verified
- * @input: true for input port; false for output port
- * @mode: operating mode
- */
-static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
-		bool input, u32 mode)
-{
-	int i;
-
-	if (input) {
-		for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_input_pixfmts); i++)
-			if (sde_hw_rotator_input_pixfmts[i] == pixfmt)
-				return true;
-	} else {
-		for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_output_pixfmts); i++)
-			if (sde_hw_rotator_output_pixfmts[i] == pixfmt)
-				return true;
-	}
-
-	return false;
-}
-
-static int sde_rotator_hw_parse_dt(struct sde_rotator_r1_data *hw_data,
-		struct platform_device *dev)
-{
-	int ret = 0;
-	u32 data;
-
-	if (!hw_data || !dev)
-		return -EINVAL;
-
-	ret = of_property_read_u32(dev->dev.of_node,
-			"qcom,mdss-wb-id", &data);
-	if (ret)
-		hw_data->wb_id = -1;
-	else
-		hw_data->wb_id = (int) data;
-	ret = of_property_read_u32(dev->dev.of_node,
-			"qcom,mdss-ctl-id", &data);
-	if (ret)
-		hw_data->ctl_id = -1;
-	else
-		hw_data->ctl_id = (int) data;
-
-	return ret;
-}
-
-static int sde_rotator_hw_rev_init(struct sde_rot_data_type *mdata)
-{
-	if (!mdata) {
-		SDEROT_ERR("null rotator data\n");
-		return -EINVAL;
-	}
-
-	clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
-	set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
-	clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
-	set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
-	set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
-	clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
-	set_bit(SDE_CAPS_R1_WB, mdata->sde_caps_map);
-
-	return 0;
-}
-
-enum {
-	SDE_ROTATOR_INTR_WB_0,
-	SDE_ROTATOR_INTR_WB_1,
-	SDE_ROTATOR_INTR_MAX,
-};
-
-struct intr_callback {
-	void (*func)(void *data);
-	void *arg;
-};
-
-struct intr_callback sde_intr_cb[SDE_ROTATOR_INTR_MAX];
-
-int sde_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
-			       void (*fnc_ptr)(void *), void *arg)
-{
-	if (intf_num >= SDE_ROTATOR_INTR_MAX) {
-		SDEROT_WARN("invalid intr type=%u intf_num=%u\n",
-				intr_type, intf_num);
-		return -EINVAL;
-	}
-
-	sde_intr_cb[intf_num].func = fnc_ptr;
-	sde_intr_cb[intf_num].arg = arg;
-
-	return 0;
-}
-
-static irqreturn_t sde_irq_handler(int irq, void *ptr)
-{
-	struct sde_rot_data_type *mdata = ptr;
-	irqreturn_t ret = IRQ_NONE;
-	u32 isr;
-
-	isr = readl_relaxed(mdata->mdp_base + SDE_MDP_REG_INTR_STATUS);
-
-	SDEROT_DBG("intr_status = %8.8x\n", isr);
-
-	if (isr & SDE_MDP_INTR_WB_0_DONE) {
-		struct intr_callback *cb = &sde_intr_cb[SDE_ROTATOR_INTR_WB_0];
-
-		if (cb->func) {
-			writel_relaxed(SDE_MDP_INTR_WB_0_DONE,
-				mdata->mdp_base + SDE_MDP_REG_INTR_CLEAR);
-			cb->func(cb->arg);
-			ret = IRQ_HANDLED;
-		}
-	}
-
-	if (isr & SDE_MDP_INTR_WB_1_DONE) {
-		struct intr_callback *cb = &sde_intr_cb[SDE_ROTATOR_INTR_WB_1];
-
-		if (cb->func) {
-			writel_relaxed(SDE_MDP_INTR_WB_1_DONE,
-				mdata->mdp_base + SDE_MDP_REG_INTR_CLEAR);
-			cb->func(cb->arg);
-			ret = IRQ_HANDLED;
-		}
-	}
-
-	return ret;
-}
-
-static void sde_rotator_hw_destroy(struct sde_rot_mgr *mgr)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_rotator_r1_data *hw_data;
-
-	if (!mgr || !mgr->pdev || !mgr->hw_data)
-		return;
-
-	hw_data = mgr->hw_data;
-	if (hw_data->irq_num >= 0)
-		devm_free_irq(&mgr->pdev->dev, hw_data->irq_num, mdata);
-	sde_rotator_hw_free(mgr, hw_data->mdp_hw);
-	devm_kfree(&mgr->pdev->dev, mgr->hw_data);
-	mgr->hw_data = NULL;
-}
-
-int sde_rotator_r1_init(struct sde_rot_mgr *mgr)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_rotator_r1_data *hw_data;
-	int ret;
-
-	if (!mgr || !mgr->pdev) {
-		SDEROT_ERR("null rotator manager/platform device");
-		return -EINVAL;
-	}
-
-	hw_data = devm_kzalloc(&mgr->pdev->dev,
-			sizeof(struct sde_rotator_r1_data), GFP_KERNEL);
-	if (hw_data == NULL)
-		return -ENOMEM;
-
-	mgr->hw_data = hw_data;
-	mgr->ops_config_hw = sde_rotator_config_hw;
-	mgr->ops_cancel_hw = sde_rotator_cancel_hw;
-	mgr->ops_abort_hw = sde_rotator_abort_hw;
-	mgr->ops_kickoff_entry = sde_rotator_kickoff_entry;
-	mgr->ops_wait_for_entry = sde_rotator_wait_for_entry;
-	mgr->ops_hw_alloc = sde_rotator_hw_alloc_ext;
-	mgr->ops_hw_free = sde_rotator_hw_free_ext;
-	mgr->ops_hw_destroy = sde_rotator_hw_destroy;
-	mgr->ops_hw_validate_entry = sde_rotator_hw_validate_entry;
-	mgr->ops_hw_show_caps = sde_rotator_hw_show_caps;
-	mgr->ops_hw_show_state = sde_rotator_hw_show_state;
-	mgr->ops_hw_create_debugfs = sde_rotator_r1_create_debugfs;
-	mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
-	mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
-
-	ret = sde_rotator_hw_parse_dt(mgr->hw_data, mgr->pdev);
-	if (ret)
-		goto error_parse_dt;
-
-	hw_data->irq_num = platform_get_irq(mgr->pdev, 0);
-	if (hw_data->irq_num < 0) {
-		SDEROT_ERR("fail to get rotator irq\n");
-	} else {
-		ret = devm_request_threaded_irq(&mgr->pdev->dev,
-				hw_data->irq_num,
-				sde_irq_handler, NULL,
-				0, "sde_rotator_r1", mdata);
-		if (ret) {
-			SDEROT_ERR("fail to request irq r:%d\n", ret);
-			hw_data->irq_num = -1;
-		} else {
-			disable_irq(hw_data->irq_num);
-		}
-	}
-
-	hw_data->mdp_hw = sde_rotator_hw_alloc(mgr, hw_data->ctl_id,
-			hw_data->wb_id, hw_data->irq_num);
-	if (IS_ERR_OR_NULL(hw_data->mdp_hw))
-		goto error_hw_alloc;
-
-	ret = sde_rotator_hw_rev_init(sde_rot_get_mdata());
-	if (ret)
-		goto error_hw_rev_init;
-
-	hw_data->mgr = mgr;
-
-	return 0;
-error_hw_rev_init:
-	if (hw_data->irq_num >= 0)
-		devm_free_irq(&mgr->pdev->dev, hw_data->irq_num, mdata);
-	sde_rotator_hw_free(mgr, hw_data->mdp_hw);
-error_hw_alloc:
-	devm_kfree(&mgr->pdev->dev, mgr->hw_data);
-error_parse_dt:
-	return ret;
-}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.h
deleted file mode 100644
index 1a90510..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_ROTATOR_R1_H__
-#define __SDE_ROTATOR_R1_H__
-
-#include <linux/types.h>
-
-#include "sde_rotator_core.h"
-
-int sde_rotator_r1_init(struct sde_rot_mgr *mgr);
-
-#endif /* __SDE_ROTATOR_R1_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c
deleted file mode 100644
index 9d645ec..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c
+++ /dev/null
@@ -1,259 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/errno.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sort.h>
-#include <linux/clk.h>
-#include <linux/bitmap.h>
-
-#include "sde_rotator_r1_hwio.h"
-#include "sde_rotator_util.h"
-#include "sde_rotator_r1_internal.h"
-#include "sde_rotator_core.h"
-
-struct sde_mdp_ctl *sde_mdp_ctl_alloc(struct sde_rot_data_type *mdata,
-					       u32 off)
-{
-	struct sde_mdp_ctl *ctl = NULL;
-	static struct sde_mdp_ctl sde_ctl[5];
-	static const u32 offset[] = {0x00002000, 0x00002200, 0x00002400,
-			     0x00002600, 0x00002800};
-
-	if (off >= ARRAY_SIZE(offset)) {
-		SDEROT_ERR("invalid parameters\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	ctl = &sde_ctl[off];
-	ctl->mdata = mdata;
-	ctl->num = off;
-	ctl->offset = offset[ctl->num];
-	ctl->base = mdata->sde_io.base + ctl->offset;
-	return ctl;
-}
-
-int sde_mdp_ctl_free(struct sde_mdp_ctl *ctl)
-{
-	if (!ctl)
-		return -ENODEV;
-
-	if (ctl->wb)
-		sde_mdp_wb_free(ctl->wb);
-
-	ctl->is_secure = false;
-	ctl->mixer_left = NULL;
-	ctl->mixer_right = NULL;
-	ctl->wb = NULL;
-	memset(&ctl->ops, 0, sizeof(ctl->ops));
-
-	return 0;
-}
-
-struct sde_mdp_mixer *sde_mdp_mixer_assign(u32 id, bool wb)
-{
-	struct sde_mdp_mixer *mixer = NULL;
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	static struct sde_mdp_mixer sde_mixer[16];
-	static const u32 offset[] = {0x00048000, 0x00049000};
-
-	if (id >= ARRAY_SIZE(offset)) {
-		SDEROT_ERR("invalid parameters\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	mixer = &sde_mixer[id];
-	mixer->num = id;
-	mixer->offset = offset[mixer->num];
-	mixer->base = mdata->sde_io.base + mixer->offset;
-	return mixer;
-}
-
-static void sde_mdp_mixer_setup(struct sde_mdp_ctl *master_ctl,
-	int mixer_mux)
-{
-	int i;
-	struct sde_mdp_ctl *ctl = NULL;
-	struct sde_mdp_mixer *mixer = sde_mdp_mixer_get(master_ctl,
-		mixer_mux);
-
-	if (!mixer)
-		return;
-
-	ctl = mixer->ctl;
-	if (!ctl)
-		return;
-
-	/* check if mixer setup for rotator is needed */
-	if (mixer->rotator_mode) {
-		int nmixers = 5;
-
-		for (i = 0; i < nmixers; i++)
-			sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_LAYER(i), 0);
-		return;
-	}
-}
-
-struct sde_mdp_mixer *sde_mdp_mixer_get(struct sde_mdp_ctl *ctl, int mux)
-{
-	struct sde_mdp_mixer *mixer = NULL;
-
-	if (!ctl) {
-		SDEROT_ERR("ctl not initialized\n");
-		return NULL;
-	}
-
-	switch (mux) {
-	case SDE_MDP_MIXER_MUX_DEFAULT:
-	case SDE_MDP_MIXER_MUX_LEFT:
-		mixer = ctl->mixer_left;
-		break;
-	case SDE_MDP_MIXER_MUX_RIGHT:
-		mixer = ctl->mixer_right;
-		break;
-	}
-
-	return mixer;
-}
-
-int sde_mdp_get_pipe_flush_bits(struct sde_mdp_pipe *pipe)
-{
-	u32 flush_bits = 0;
-
-	if (pipe->type == SDE_MDP_PIPE_TYPE_DMA)
-		flush_bits |= BIT(pipe->num) << 5;
-	else if (pipe->num == SDE_MDP_SSPP_VIG3 ||
-			pipe->num == SDE_MDP_SSPP_RGB3)
-		flush_bits |= BIT(pipe->num) << 10;
-	else if (pipe->type == SDE_MDP_PIPE_TYPE_CURSOR)
-		flush_bits |= BIT(22 + pipe->num - SDE_MDP_SSPP_CURSOR0);
-	else /* RGB/VIG 0-2 pipes */
-		flush_bits |= BIT(pipe->num);
-
-	return flush_bits;
-}
-
-int sde_mdp_mixer_pipe_update(struct sde_mdp_pipe *pipe,
-			 struct sde_mdp_mixer *mixer, int params_changed)
-{
-	struct sde_mdp_ctl *ctl;
-
-	if (!pipe)
-		return -EINVAL;
-	if (!mixer)
-		return -EINVAL;
-	ctl = mixer->ctl;
-	if (!ctl)
-		return -EINVAL;
-
-	ctl->flush_bits |= sde_mdp_get_pipe_flush_bits(pipe);
-	return 0;
-}
-
-int sde_mdp_display_wait4comp(struct sde_mdp_ctl *ctl)
-{
-	int ret = 0;
-
-	if (!ctl) {
-		SDEROT_ERR("invalid ctl\n");
-		return -ENODEV;
-	}
-
-	if (ctl->ops.wait_fnc)
-		ret = ctl->ops.wait_fnc(ctl, NULL);
-
-	return ret;
-}
-
-int sde_mdp_display_commit(struct sde_mdp_ctl *ctl, void *arg,
-	struct sde_mdp_commit_cb *commit_cb)
-{
-	int ret = 0;
-	u32 ctl_flush_bits = 0;
-
-	if (!ctl) {
-		SDEROT_ERR("display function not set\n");
-		return -ENODEV;
-	}
-
-	if (ctl->ops.prepare_fnc)
-		ret = ctl->ops.prepare_fnc(ctl, arg);
-
-	if (ret) {
-		SDEROT_ERR("error preparing display\n");
-		goto done;
-	}
-
-	sde_mdp_mixer_setup(ctl, SDE_MDP_MIXER_MUX_LEFT);
-	sde_mdp_mixer_setup(ctl, SDE_MDP_MIXER_MUX_RIGHT);
-
-	sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_TOP, ctl->opmode);
-	ctl->flush_bits |= BIT(17);	/* CTL */
-
-	ctl_flush_bits = ctl->flush_bits;
-
-	sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_FLUSH, ctl_flush_bits);
-	/* ensure the flush command is issued after the barrier */
-	wmb();
-	ctl->flush_reg_data = ctl_flush_bits;
-	ctl->flush_bits = 0;
-	if (ctl->ops.display_fnc)
-		ret = ctl->ops.display_fnc(ctl, arg); /* DSI0 kickoff */
-	if (ret)
-		SDEROT_WARN("ctl %d error displaying frame\n", ctl->num);
-
-done:
-	return ret;
-}
-
-/**
- * @sde_mdp_ctl_mixer_switch() - return ctl mixer of @return_type
- * @ctl: Pointer to ctl structure to be switched.
- * @return_type: wb_type of the ctl to be switched to.
- *
- * Virtual mixer switch should be performed only when there is no
- * dedicated wfd block and writeback block is shared.
- */
-struct sde_mdp_ctl *sde_mdp_ctl_mixer_switch(struct sde_mdp_ctl *ctl,
-					       u32 return_type)
-{
-	if (ctl->wb_type == return_type)
-		return ctl;
-
-	SDEROT_ERR("unable to switch mixer to type=%d\n", return_type);
-	return NULL;
-}
-
-struct sde_mdp_writeback *sde_mdp_wb_assign(u32 num, u32 reg_index)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_mdp_writeback *wb = NULL;
-	static struct sde_mdp_writeback sde_wb[16];
-	static const u32 offset[] = {0x00065000, 0x00065800, 0x00066000};
-
-	if (num >= ARRAY_SIZE(offset)) {
-		SDEROT_ERR("invalid parameters\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	wb = &sde_wb[num];
-	wb->num = num;
-	wb->offset = offset[wb->num];
-	if (!wb)
-		return NULL;
-
-	wb->base = mdata->sde_io.base;
-	wb->base += wb->offset;
-	return wb;
-}
-
-void sde_mdp_wb_free(struct sde_mdp_writeback *wb)
-{
-}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.c
deleted file mode 100644
index 09cce4e..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.c
+++ /dev/null
@@ -1,36 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-
-#include "sde_rotator_r1_debug.h"
-#include "sde_rotator_core.h"
-#include "sde_rotator_r1.h"
-#include "sde_rotator_r1_internal.h"
-
-/*
- * sde_rotator_r1_create_debugfs - Setup rotator r1 debugfs directory structure.
- * @rot_dev: Pointer to rotator device
- */
-int sde_rotator_r1_create_debugfs(struct sde_rot_mgr *mgr,
-		struct dentry *debugfs_root)
-{
-	struct sde_rotator_r1_data *hw_data;
-
-	if (!mgr || !debugfs_root || !mgr->hw_data)
-		return -EINVAL;
-
-	hw_data = mgr->hw_data;
-
-	/* add debugfs */
-
-	return 0;
-}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.h
deleted file mode 100644
index 11a3104..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_ROTATOR_R3_DEBUG_H__
-#define __SDE_ROTATOR_R3_DEBUG_H__
-
-#include <linux/types.h>
-#include <linux/dcache.h>
-
-struct sde_rot_mgr;
-
-#if defined(CONFIG_DEBUG_FS)
-int sde_rotator_r1_create_debugfs(struct sde_rot_mgr *mgr,
-		struct dentry *debugfs_root);
-#else
-static inline
-int sde_rotator_r1_create_debugfs(struct sde_rot_mgr *mgr,
-		struct dentry *debugfs_root)
-{
-	return 0;
-}
-#endif
-#endif /* __SDE_ROTATOR_R3_DEBUG_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_hwio.h
deleted file mode 100644
index 0f26fc4..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_hwio.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef SDE_ROTATOR_R1_HWIO_H
-#define SDE_ROTATOR_R1_HWIO_H
-
-#include <linux/bitops.h>
-
-#define SDE_MDP_FETCH_CONFIG_RESET_VALUE		0x00000087
-
-#define SDE_MDP_REG_HW_VERSION				0x0
-#define SDE_MDP_REG_INTR_EN				0x00010
-#define SDE_MDP_REG_INTR_STATUS				0x00014
-#define SDE_MDP_REG_INTR_CLEAR				0x00018
-
-#define SDE_MDP_INTR_WB_0_DONE				BIT(0)
-#define SDE_MDP_INTR_WB_1_DONE				BIT(1)
-
-enum mdss_mdp_intr_type {
-	SDE_MDP_IRQ_WB_ROT_COMP = 0,
-	SDE_MDP_IRQ_WB_WFD = 4,
-	SDE_MDP_IRQ_PING_PONG_COMP = 8,
-	SDE_MDP_IRQ_PING_PONG_RD_PTR = 12,
-	SDE_MDP_IRQ_PING_PONG_WR_PTR = 16,
-	SDE_MDP_IRQ_PING_PONG_AUTO_REF = 20,
-	SDE_MDP_IRQ_INTF_UNDER_RUN = 24,
-	SDE_MDP_IRQ_INTF_VSYNC = 25,
-};
-
-enum mdss_mdp_ctl_index {
-	SDE_MDP_CTL0,
-	SDE_MDP_CTL1,
-	SDE_MDP_CTL2,
-	SDE_MDP_CTL3,
-	SDE_MDP_CTL4,
-	SDE_MDP_CTL5,
-	SDE_MDP_MAX_CTL
-};
-
-#define SDE_MDP_REG_CTL_LAYER(lm)	\
-			((lm == 5) ? (0x024) : ((lm) * 0x004))
-#define SDE_MDP_REG_CTL_TOP				0x014
-#define SDE_MDP_REG_CTL_FLUSH				0x018
-#define SDE_MDP_REG_CTL_START				0x01C
-
-#define SDE_MDP_CTL_OP_ROT0_MODE			0x1
-#define SDE_MDP_CTL_OP_ROT1_MODE			0x2
-
-enum sde_mdp_sspp_index {
-	SDE_MDP_SSPP_VIG0,
-	SDE_MDP_SSPP_VIG1,
-	SDE_MDP_SSPP_VIG2,
-	SDE_MDP_SSPP_RGB0,
-	SDE_MDP_SSPP_RGB1,
-	SDE_MDP_SSPP_RGB2,
-	SDE_MDP_SSPP_DMA0,
-	SDE_MDP_SSPP_DMA1,
-	SDE_MDP_SSPP_VIG3,
-	SDE_MDP_SSPP_RGB3,
-	SDE_MDP_SSPP_CURSOR0,
-	SDE_MDP_SSPP_CURSOR1,
-	SDE_MDP_MAX_SSPP
-};
-
-#define SDE_MDP_REG_SSPP_SRC_SIZE			0x000
-#define SDE_MDP_REG_SSPP_SRC_IMG_SIZE			0x004
-#define SDE_MDP_REG_SSPP_SRC_XY				0x008
-#define SDE_MDP_REG_SSPP_OUT_SIZE			0x00C
-#define SDE_MDP_REG_SSPP_OUT_XY				0x010
-#define SDE_MDP_REG_SSPP_SRC0_ADDR			0x014
-#define SDE_MDP_REG_SSPP_SRC1_ADDR			0x018
-#define SDE_MDP_REG_SSPP_SRC2_ADDR			0x01C
-#define SDE_MDP_REG_SSPP_SRC3_ADDR			0x020
-#define SDE_MDP_REG_SSPP_SRC_YSTRIDE0			0x024
-#define SDE_MDP_REG_SSPP_SRC_YSTRIDE1			0x028
-#define SDE_MDP_REG_SSPP_STILE_FRAME_SIZE		0x02C
-#define SDE_MDP_REG_SSPP_SRC_FORMAT			0x030
-#define SDE_MDP_REG_SSPP_SRC_UNPACK_PATTERN		0x034
-#define SDE_MDP_REG_SSPP_SRC_CONSTANT_COLOR		0x03C
-#define SDE_MDP_REG_SSPP_REQPRIO_FIFO_WM_0		0x050
-#define SDE_MDP_REG_SSPP_REQPRIO_FIFO_WM_1		0x054
-#define SDE_MDP_REG_SSPP_REQPRIO_FIFO_WM_2		0x058
-#define SDE_MDP_REG_SSPP_DANGER_LUT			0x060
-#define SDE_MDP_REG_SSPP_SAFE_LUT			0x064
-#define SDE_MDP_REG_SSPP_CREQ_LUT			0x068
-#define SDE_MDP_REG_SSPP_QOS_CTRL			0x06C
-#define SDE_MDP_REG_SSPP_CDP_CTRL			0x134
-#define SDE_MDP_REG_SSPP_UBWC_ERROR_STATUS		0x138
-
-#define SDE_MDP_REG_SSPP_SRC_OP_MODE			0x038
-#define SDE_MDP_OP_FLIP_UD				BIT(14)
-#define SDE_MDP_OP_FLIP_LR				BIT(13)
-#define SDE_MDP_OP_BWC_EN				BIT(0)
-#define SDE_MDP_OP_BWC_LOSSLESS				(0 << 1)
-#define SDE_MDP_OP_BWC_Q_HIGH				(1 << 1)
-#define SDE_MDP_OP_BWC_Q_MED				(2 << 1)
-
-#define SDE_MDP_REG_SSPP_SRC_CONSTANT_COLOR		0x03C
-#define SDE_MDP_REG_SSPP_FETCH_CONFIG			0x048
-#define SDE_MDP_REG_SSPP_VC1_RANGE			0x04C
-#define SDE_MDP_REG_SSPP_SRC_ADDR_SW_STATUS		0x070
-#define SDE_MDP_REG_SSPP_CURRENT_SRC0_ADDR		0x0A4
-#define SDE_MDP_REG_SSPP_CURRENT_SRC1_ADDR		0x0A8
-#define SDE_MDP_REG_SSPP_CURRENT_SRC2_ADDR		0x0AC
-#define SDE_MDP_REG_SSPP_CURRENT_SRC3_ADDR		0x0B0
-#define SDE_MDP_REG_SSPP_DECIMATION_CONFIG		0x0B4
-
-enum sde_mdp_mixer_wb_index {
-	SDE_MDP_WB_LAYERMIXER0,
-	SDE_MDP_WB_LAYERMIXER1,
-	SDE_MDP_WB_MAX_LAYERMIXER,
-};
-
-enum mdss_mdp_writeback_index {
-	SDE_MDP_WRITEBACK0,
-	SDE_MDP_WRITEBACK1,
-	SDE_MDP_WRITEBACK2,
-	SDE_MDP_WRITEBACK3,
-	SDE_MDP_WRITEBACK4,
-	SDE_MDP_MAX_WRITEBACK
-};
-
-#define SDE_MDP_REG_WB_DST_FORMAT			0x000
-#define SDE_MDP_REG_WB_DST_OP_MODE			0x004
-#define SDE_MDP_REG_WB_DST_PACK_PATTERN			0x008
-#define SDE_MDP_REG_WB_DST0_ADDR			0x00C
-#define SDE_MDP_REG_WB_DST1_ADDR			0x010
-#define SDE_MDP_REG_WB_DST2_ADDR			0x014
-#define SDE_MDP_REG_WB_DST3_ADDR			0x018
-#define SDE_MDP_REG_WB_DST_YSTRIDE0			0x01C
-#define SDE_MDP_REG_WB_DST_YSTRIDE1			0x020
-#define SDE_MDP_REG_WB_DST_WRITE_CONFIG			0x048
-#define SDE_MDP_REG_WB_ROTATION_DNSCALER		0x050
-#define SDE_MDP_REG_WB_ROTATOR_PIPE_DOWNSCALER		0x054
-#define SDE_MDP_REG_WB_OUT_SIZE				0x074
-#define SDE_MDP_REG_WB_ALPHA_X_VALUE			0x078
-#define SDE_MDP_REG_WB_DST_ADDR_SW_STATUS		0x2B0
-
-#endif
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_internal.h
deleted file mode 100644
index 37421c2..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_internal.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_ROTATOR_R1_INTERNAL_H__
-#define __SDE_ROTATOR_R1_INTERNAL_H__
-
-#include <linux/types.h>
-#include <linux/file.h>
-#include <linux/kref.h>
-#include <linux/kernel.h>
-
-#include "sde_rotator_util.h"
-
-/**
- * enum sde_commit_stage_type - Indicate different commit stages
- */
-enum sde_commit_stage_type {
-	SDE_COMMIT_STAGE_SETUP_DONE,
-	SDE_COMMIT_STAGE_READY_FOR_KICKOFF,
-};
-
-enum sde_mdp_wb_ctl_type {
-	SDE_MDP_WB_CTL_TYPE_BLOCK = 1,
-	SDE_MDP_WB_CTL_TYPE_LINE
-};
-
-enum sde_mdp_mixer_mux {
-	SDE_MDP_MIXER_MUX_DEFAULT,
-	SDE_MDP_MIXER_MUX_LEFT,
-	SDE_MDP_MIXER_MUX_RIGHT,
-};
-
-enum sde_mdp_pipe_type {
-	SDE_MDP_PIPE_TYPE_UNUSED,
-	SDE_MDP_PIPE_TYPE_VIG,
-	SDE_MDP_PIPE_TYPE_RGB,
-	SDE_MDP_PIPE_TYPE_DMA,
-	SDE_MDP_PIPE_TYPE_CURSOR,
-};
-
-struct sde_mdp_data;
-struct sde_mdp_ctl;
-struct sde_mdp_pipe;
-struct sde_mdp_mixer;
-struct sde_mdp_wb;
-
-struct sde_mdp_writeback {
-	u32 num;
-	char __iomem *base;
-	u32 offset;
-};
-
-struct sde_mdp_ctl_intfs_ops {
-	int (*start_fnc)(struct sde_mdp_ctl *ctl);
-	int (*stop_fnc)(struct sde_mdp_ctl *ctl, int panel_power_state);
-	int (*prepare_fnc)(struct sde_mdp_ctl *ctl, void *arg);
-	int (*display_fnc)(struct sde_mdp_ctl *ctl, void *arg);
-	int (*wait_fnc)(struct sde_mdp_ctl *ctl, void *arg);
-};
-
-struct sde_mdp_ctl {
-	u32 num;
-	char __iomem *base;
-	u32 opmode;
-	u32 flush_bits;
-	u32 flush_reg_data;
-	bool is_secure;
-	struct sde_rot_data_type *mdata;
-	struct sde_mdp_mixer *mixer_left;
-	struct sde_mdp_mixer *mixer_right;
-	void *priv_data;
-	u32 wb_type;
-	struct sde_mdp_writeback *wb;
-	struct sde_mdp_ctl_intfs_ops ops;
-	u32 offset;
-	int irq_num;
-};
-
-struct sde_mdp_mixer {
-	u32 num;
-	char __iomem *base;
-	u8 rotator_mode;
-	struct sde_mdp_ctl *ctl;
-	u32 offset;
-};
-
-struct sde_mdp_shared_reg_ctrl {
-	u32 reg_off;
-	u32 bit_off;
-};
-
-struct sde_mdp_pipe {
-	u32 num;
-	u32 type;
-	u32 ndx;
-	char __iomem *base;
-	u32 xin_id;
-	u32 flags;
-	u32 bwc_mode;
-	u16 img_width;
-	u16 img_height;
-	u8 horz_deci;
-	u8 vert_deci;
-	struct sde_rect src;
-	struct sde_rect dst;
-	struct sde_mdp_format_params *src_fmt;
-	struct sde_mdp_plane_sizes src_planes;
-	struct sde_mdp_mixer *mixer_left;
-	struct sde_mdp_mixer *mixer_right;
-	struct sde_mdp_shared_reg_ctrl clk_ctrl;
-	u32 params_changed;
-	u32 offset;
-};
-
-struct sde_mdp_writeback_arg {
-	struct sde_mdp_data *data;
-	void *priv_data;
-};
-
-struct sde_mdp_commit_cb {
-	void *data;
-	int (*commit_cb_fnc)(enum sde_commit_stage_type commit_state,
-		void *data);
-};
-
-static inline void sde_mdp_ctl_write(struct sde_mdp_ctl *ctl,
-				      u32 reg, u32 val)
-{
-	SDEROT_DBG("ctl%d:%6.6x:%8.8x\n", ctl->num, ctl->offset + reg, val);
-	writel_relaxed(val, ctl->base + reg);
-}
-
-static inline bool sde_mdp_is_nrt_vbif_client(struct sde_rot_data_type *mdata,
-					struct sde_mdp_pipe *pipe)
-{
-	return mdata->vbif_nrt_io.base && pipe->mixer_left &&
-			pipe->mixer_left->rotator_mode;
-}
-int sde_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
-				void (*fnc_ptr)(void *), void *arg);
-int sde_mdp_display_wait4comp(struct sde_mdp_ctl *ctl);
-int sde_mdp_writeback_display_commit(struct sde_mdp_ctl *ctl, void *arg);
-int sde_mdp_pipe_queue_data(struct sde_mdp_pipe *pipe,
-			     struct sde_mdp_data *src_data);
-struct sde_mdp_ctl *sde_mdp_ctl_alloc(struct sde_rot_data_type *mdata,
-					       u32 off);
-struct sde_mdp_writeback *sde_mdp_wb_assign(u32 num, u32 reg_index);
-void sde_mdp_wb_free(struct sde_mdp_writeback *wb);
-struct sde_mdp_mixer *sde_mdp_mixer_assign(u32 id, bool wb);
-int sde_mdp_writeback_start(struct sde_mdp_ctl *ctl);
-struct sde_mdp_pipe *sde_mdp_pipe_assign(struct sde_rot_data_type *mdata,
-	struct sde_mdp_mixer *mixer, u32 ndx);
-int sde_mdp_pipe_destroy(struct sde_mdp_pipe *pipe);
-int sde_mdp_ctl_free(struct sde_mdp_ctl *ctl);
-int sde_mdp_display_commit(struct sde_mdp_ctl *ctl, void *arg,
-	struct sde_mdp_commit_cb *commit_cb);
-int sde_mdp_mixer_pipe_update(struct sde_mdp_pipe *pipe,
-			 struct sde_mdp_mixer *mixer, int params_changed);
-int sde_mdp_get_pipe_flush_bits(struct sde_mdp_pipe *pipe);
-struct sde_mdp_ctl *sde_mdp_ctl_mixer_switch(struct sde_mdp_ctl *ctl,
-					       u32 return_type);
-struct sde_mdp_mixer *sde_mdp_mixer_get(struct sde_mdp_ctl *ctl, int mux);
-#endif /* __SDE_ROTATOR_R1_INTERNAL_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_pipe.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_pipe.c
deleted file mode 100644
index 8410d17..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_pipe.c
+++ /dev/null
@@ -1,422 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012, 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/bitmap.h>
-#include <linux/errno.h>
-#include <linux/iopoll.h>
-#include <linux/mutex.h>
-
-#include "sde_rotator_r1_hwio.h"
-#include "sde_rotator_base.h"
-#include "sde_rotator_util.h"
-#include "sde_rotator_r1_internal.h"
-#include "sde_rotator_core.h"
-#include "sde_rotator_trace.h"
-
-#define SMP_MB_SIZE		(mdss_res->smp_mb_size)
-#define SMP_MB_CNT		(mdss_res->smp_mb_cnt)
-#define SMP_MB_ENTRY_SIZE	16
-#define MAX_BPP 4
-
-#define PIPE_CLEANUP_TIMEOUT_US 100000
-
-/* following offsets are relative to ctrl register bit offset */
-#define CLK_FORCE_ON_OFFSET	0x0
-#define CLK_FORCE_OFF_OFFSET	0x1
-/* following offsets are relative to status register bit offset */
-#define CLK_STATUS_OFFSET	0x0
-
-#define QOS_LUT_NRT_READ	0x0
-#define PANIC_LUT_NRT_READ	0x0
-#define ROBUST_LUT_NRT_READ	0xFFFF
-
-/* Priority 2, no panic */
-#define VBLANK_PANIC_DEFAULT_CONFIG 0x200000
-
-static inline void sde_mdp_pipe_write(struct sde_mdp_pipe *pipe,
-				       u32 reg, u32 val)
-{
-	SDEROT_DBG("pipe%d:%6.6x:%8.8x\n", pipe->num, pipe->offset + reg, val);
-	writel_relaxed(val, pipe->base + reg);
-}
-
-static int sde_mdp_pipe_qos_lut(struct sde_mdp_pipe *pipe)
-{
-	u32 qos_lut;
-
-	qos_lut = QOS_LUT_NRT_READ; /* low priority for nrt */
-
-	trace_rot_perf_set_qos_luts(pipe->num, pipe->src_fmt->format,
-		qos_lut, sde_mdp_is_linear_format(pipe->src_fmt));
-
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_CREQ_LUT,
-		qos_lut);
-
-	return 0;
-}
-
-/**
- * @sde_mdp_pipe_nrt_vbif_setup -
- * @mdata: pointer to global driver data.
- * @pipe: pointer to a pipe
- *
- * This function assumes that clocks are enabled, so it is callers
- * responsibility to enable clocks before calling this function.
- */
-static void sde_mdp_pipe_nrt_vbif_setup(struct sde_rot_data_type *mdata,
-					struct sde_mdp_pipe *pipe)
-{
-	uint32_t nrt_vbif_client_sel;
-
-	if (pipe->type != SDE_MDP_PIPE_TYPE_DMA)
-		return;
-
-	nrt_vbif_client_sel = readl_relaxed(mdata->mdp_base +
-				MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL);
-	if (sde_mdp_is_nrt_vbif_client(mdata, pipe))
-		nrt_vbif_client_sel |= BIT(pipe->num - SDE_MDP_SSPP_DMA0);
-	else
-		nrt_vbif_client_sel &= ~BIT(pipe->num - SDE_MDP_SSPP_DMA0);
-	SDEROT_DBG("mdp:%6.6x:%8.8x\n", MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL,
-			nrt_vbif_client_sel);
-	writel_relaxed(nrt_vbif_client_sel,
-			mdata->mdp_base + MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL);
-}
-
-/**
- * sde_mdp_qos_vbif_remapper_setup - Program the VBIF QoS remapper
- *		registers based on real or non real time clients
- * @mdata:	Pointer to the global mdss data structure.
- * @pipe:	Pointer to source pipe struct to get xin id's.
- * @is_realtime:	To determine if pipe's client is real or
- *			non real time.
- * This function assumes that clocks are on, so it is caller responsibility to
- * call this function with clocks enabled.
- */
-static void sde_mdp_qos_vbif_remapper_setup(struct sde_rot_data_type *mdata,
-			struct sde_mdp_pipe *pipe, bool is_realtime)
-{
-	u32 mask, reg_val, i, vbif_qos;
-
-	if (mdata->npriority_lvl == 0)
-		return;
-
-	for (i = 0; i < mdata->npriority_lvl; i++) {
-		reg_val = SDE_VBIF_READ(mdata, SDE_VBIF_QOS_REMAP_BASE + i*4);
-		mask = 0x3 << (pipe->xin_id * 2);
-		reg_val &= ~(mask);
-		vbif_qos = is_realtime ?
-			mdata->vbif_rt_qos[i] : mdata->vbif_nrt_qos[i];
-		reg_val |= vbif_qos << (pipe->xin_id * 2);
-		SDE_VBIF_WRITE(mdata, SDE_VBIF_QOS_REMAP_BASE + i*4, reg_val);
-	}
-}
-
-struct sde_mdp_pipe *sde_mdp_pipe_assign(struct sde_rot_data_type *mdata,
-	struct sde_mdp_mixer *mixer, u32 ndx)
-{
-	struct sde_mdp_pipe *pipe = NULL;
-	static struct sde_mdp_pipe sde_pipe[16];
-	static const u32 offset[] = {0x00025000, 0x00027000};
-	static const u32 xin_id[] = {2, 10};
-	static const struct sde_mdp_shared_reg_ctrl clk_ctrl[] = {
-		{0x2AC, 8},
-		{0x2B4, 8}
-	};
-
-	if (ndx >= ARRAY_SIZE(offset)) {
-		SDEROT_ERR("invalid parameters\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	pipe = &sde_pipe[ndx];
-	pipe->num = ndx + SDE_MDP_SSPP_DMA0;
-	pipe->offset = offset[pipe->num - SDE_MDP_SSPP_DMA0];
-	pipe->xin_id = xin_id[pipe->num - SDE_MDP_SSPP_DMA0];
-	pipe->base = mdata->sde_io.base + pipe->offset;
-	pipe->type = SDE_MDP_PIPE_TYPE_DMA;
-	pipe->mixer_left = mixer;
-	pipe->clk_ctrl = clk_ctrl[pipe->num - SDE_MDP_SSPP_DMA0];
-
-	return pipe;
-}
-
-int sde_mdp_pipe_destroy(struct sde_mdp_pipe *pipe)
-{
-	return 0;
-}
-
-void sde_mdp_pipe_position_update(struct sde_mdp_pipe *pipe,
-		struct sde_rect *src, struct sde_rect *dst)
-{
-	u32 src_size, src_xy, dst_size, dst_xy;
-
-	src_size = (src->h << 16) | src->w;
-	src_xy = (src->y << 16) | src->x;
-	dst_size = (dst->h << 16) | dst->w;
-	dst_xy = (dst->y << 16) | dst->x;
-
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_SIZE, src_size);
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_XY, src_xy);
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_OUT_SIZE, dst_size);
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_OUT_XY, dst_xy);
-}
-
-static int sde_mdp_image_setup(struct sde_mdp_pipe *pipe,
-					struct sde_mdp_data *data)
-{
-	u32 img_size, ystride0, ystride1;
-	u32 width, height, decimation;
-	int ret = 0;
-	struct sde_rect dst, src;
-	bool rotation = false;
-
-	SDEROT_DBG(
-		"ctl: %d pnum=%d wh=%dx%d src={%d,%d,%d,%d} dst={%d,%d,%d,%d}\n",
-			pipe->mixer_left->ctl->num, pipe->num,
-			pipe->img_width, pipe->img_height,
-			pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
-			pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
-
-	width = pipe->img_width;
-	height = pipe->img_height;
-
-	if (pipe->flags & SDE_SOURCE_ROTATED_90)
-		rotation = true;
-
-	sde_mdp_get_plane_sizes(pipe->src_fmt, width, height,
-			&pipe->src_planes, pipe->bwc_mode, rotation);
-
-	if (data != NULL) {
-		ret = sde_mdp_data_check(data, &pipe->src_planes,
-			pipe->src_fmt);
-		if (ret)
-			return ret;
-	}
-
-	if ((pipe->flags & SDE_DEINTERLACE) &&
-			!(pipe->flags & SDE_SOURCE_ROTATED_90)) {
-		int i;
-
-		for (i = 0; i < pipe->src_planes.num_planes; i++)
-			pipe->src_planes.ystride[i] *= 2;
-		width *= 2;
-		height /= 2;
-	}
-
-	decimation = ((1 << pipe->horz_deci) - 1) << 8;
-	decimation |= ((1 << pipe->vert_deci) - 1);
-	if (decimation)
-		SDEROT_DBG("Image decimation h=%d v=%d\n",
-				pipe->horz_deci, pipe->vert_deci);
-
-	dst = pipe->dst;
-	src = pipe->src;
-
-	ystride0 =  (pipe->src_planes.ystride[0]) |
-			(pipe->src_planes.ystride[1] << 16);
-	ystride1 =  (pipe->src_planes.ystride[2]) |
-			(pipe->src_planes.ystride[3] << 16);
-
-	img_size = (height << 16) | width;
-
-	sde_mdp_pipe_position_update(pipe, &src, &dst);
-
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_IMG_SIZE, img_size);
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_YSTRIDE0, ystride0);
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_YSTRIDE1, ystride1);
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_DECIMATION_CONFIG,
-			decimation);
-
-	return 0;
-}
-
-static int sde_mdp_format_setup(struct sde_mdp_pipe *pipe)
-{
-	struct sde_mdp_format_params *fmt;
-	u32 chroma_samp, unpack, src_format;
-	u32 secure = 0;
-	u32 opmode;
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-
-	fmt = pipe->src_fmt;
-
-	if (pipe->flags & SDE_SECURE_OVERLAY_SESSION)
-		secure = 0xF;
-
-	opmode = pipe->bwc_mode;
-	if (pipe->flags & SDE_FLIP_LR)
-		opmode |= SDE_MDP_OP_FLIP_LR;
-	if (pipe->flags & SDE_FLIP_UD)
-		opmode |= SDE_MDP_OP_FLIP_UD;
-
-	SDEROT_DBG("pnum=%d format=%d opmode=%x\n", pipe->num, fmt->format,
-			opmode);
-
-	chroma_samp = fmt->chroma_sample;
-	if (pipe->flags & SDE_SOURCE_ROTATED_90) {
-		if (chroma_samp == SDE_MDP_CHROMA_H2V1)
-			chroma_samp = SDE_MDP_CHROMA_H1V2;
-		else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
-			chroma_samp = SDE_MDP_CHROMA_H2V1;
-	}
-
-	src_format = (chroma_samp << 23) |
-		     (fmt->fetch_planes << 19) |
-		     (fmt->bits[C3_ALPHA] << 6) |
-		     (fmt->bits[C2_R_Cr] << 4) |
-		     (fmt->bits[C1_B_Cb] << 2) |
-		     (fmt->bits[C0_G_Y] << 0);
-
-	if (sde_mdp_is_tilea4x_format(fmt))
-		src_format |= BIT(30);
-
-	if (sde_mdp_is_tilea5x_format(fmt))
-		src_format |= BIT(31);
-
-	if (pipe->flags & SDE_ROT_90)
-		src_format |= BIT(11); /* ROT90 */
-
-	if (fmt->alpha_enable &&
-			fmt->fetch_planes != SDE_MDP_PLANE_INTERLEAVED)
-		src_format |= BIT(8); /* SRCC3_EN */
-
-	unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
-			(fmt->element[1] << 8) | (fmt->element[0] << 0);
-	src_format |= ((fmt->unpack_count - 1) << 12) |
-			(fmt->unpack_tight << 17) |
-			(fmt->unpack_align_msb << 18) |
-			((fmt->bpp - 1) << 9);
-
-	if (sde_mdp_is_ubwc_format(fmt))
-		opmode |= BIT(0);
-
-	if (fmt->is_yuv)
-		src_format |= BIT(15);
-
-	if (fmt->frame_format != SDE_MDP_FMT_LINEAR
-		&& mdata->highest_bank_bit) {
-		sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_FETCH_CONFIG,
-			SDE_MDP_FETCH_CONFIG_RESET_VALUE |
-				 mdata->highest_bank_bit << 18);
-	}
-
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_FORMAT, src_format);
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_OP_MODE, opmode);
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
-
-	/* clear UBWC error */
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_UBWC_ERROR_STATUS, BIT(31));
-
-	return 0;
-}
-
-static int sde_mdp_src_addr_setup(struct sde_mdp_pipe *pipe,
-				   struct sde_mdp_data *src_data)
-{
-	struct sde_mdp_data data = *src_data;
-	u32 x = 0, y = 0;
-	int ret = 0;
-
-	SDEROT_DBG("pnum=%d\n", pipe->num);
-
-	ret = sde_mdp_data_check(&data, &pipe->src_planes, pipe->src_fmt);
-	if (ret)
-		return ret;
-
-	sde_rot_data_calc_offset(&data, x, y,
-		&pipe->src_planes, pipe->src_fmt);
-
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC0_ADDR, data.p[0].addr);
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC1_ADDR, data.p[1].addr);
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC2_ADDR, data.p[2].addr);
-	sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC3_ADDR, data.p[3].addr);
-
-	return 0;
-}
-
-static void sde_mdp_set_ot_limit_pipe(struct sde_mdp_pipe *pipe)
-{
-	struct sde_mdp_set_ot_params ot_params = {0,};
-
-	ot_params.xin_id = pipe->xin_id;
-	ot_params.num = pipe->num;
-	ot_params.width = pipe->src.w;
-	ot_params.height = pipe->src.h;
-	ot_params.fps = 60;
-	ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
-	ot_params.reg_off_mdp_clk_ctrl = pipe->clk_ctrl.reg_off;
-	ot_params.bit_off_mdp_clk_ctrl = pipe->clk_ctrl.bit_off +
-		CLK_FORCE_ON_OFFSET;
-	ot_params.fmt = (pipe->src_fmt) ? pipe->src_fmt->format : 0;
-
-	sde_mdp_set_ot_limit(&ot_params);
-}
-
-int sde_mdp_pipe_queue_data(struct sde_mdp_pipe *pipe,
-			     struct sde_mdp_data *src_data)
-{
-	int ret = 0;
-	u32 params_changed;
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-
-	if (!pipe) {
-		SDEROT_ERR("pipe not setup properly for queue\n");
-		return -ENODEV;
-	}
-
-	/*
-	 * Reprogram the pipe when there is no dedicated wfd blk and
-	 * virtual mixer is allocated for the DMA pipe during concurrent
-	 * line and block mode operations
-	 */
-
-	params_changed = (pipe->params_changed);
-	if (params_changed) {
-		bool is_realtime = !(pipe->mixer_left->rotator_mode);
-
-		sde_mdp_qos_vbif_remapper_setup(mdata, pipe, is_realtime);
-
-		if (mdata->vbif_nrt_io.base)
-			sde_mdp_pipe_nrt_vbif_setup(mdata, pipe);
-	}
-
-	if (params_changed) {
-		pipe->params_changed = 0;
-
-		ret = sde_mdp_image_setup(pipe, src_data);
-		if (ret) {
-			SDEROT_ERR("image setup error for pnum=%d\n",
-					pipe->num);
-			goto done;
-		}
-
-		ret = sde_mdp_format_setup(pipe);
-		if (ret) {
-			SDEROT_ERR("format %d setup error pnum=%d\n",
-			       pipe->src_fmt->format, pipe->num);
-			goto done;
-		}
-
-		if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map))
-			sde_mdp_pipe_qos_lut(pipe);
-
-		sde_mdp_set_ot_limit_pipe(pipe);
-	}
-
-	ret = sde_mdp_src_addr_setup(pipe, src_data);
-	if (ret) {
-		SDEROT_ERR("addr setup error for pnum=%d\n", pipe->num);
-		goto done;
-	}
-
-	sde_mdp_mixer_pipe_update(pipe, pipe->mixer_left,
-			params_changed);
-done:
-	return ret;
-}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c
deleted file mode 100644
index 420f81a..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c
+++ /dev/null
@@ -1,523 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include "sde_rotator_r1_hwio.h"
-#include "sde_rotator_util.h"
-#include "sde_rotator_r1_internal.h"
-#include "sde_rotator_core.h"
-
-/* wait for at most 2 vsync for lowest refresh rate (24hz) */
-#define KOFF_TIMEOUT msecs_to_jiffies(84)
-
-/*
- * if BWC enabled and format is H1V2 or 420, do not use site C or I.
- * Hence, set the bits 29:26 in format register, as zero.
- */
-#define BWC_FMT_MASK	0xC3FFFFFF
-#define MDSS_DEFAULT_OT_SETTING    0x10
-
-enum sde_mdp_writeback_type {
-	SDE_MDP_WRITEBACK_TYPE_ROTATOR,
-	SDE_MDP_WRITEBACK_TYPE_LINE,
-	SDE_MDP_WRITEBACK_TYPE_WFD,
-};
-
-struct sde_mdp_writeback_ctx {
-	u32 wb_num;
-	char __iomem *base;
-	u8 ref_cnt;
-	u8 type;
-	struct completion wb_comp;
-	int comp_cnt;
-
-	u32 intr_type;
-	u32 intf_num;
-
-	u32 xin_id;
-	u32 wr_lim;
-	struct sde_mdp_shared_reg_ctrl clk_ctrl;
-
-	u32 opmode;
-	struct sde_mdp_format_params *dst_fmt;
-	u16 img_width;
-	u16 img_height;
-	u16 width;
-	u16 height;
-	struct sde_rect dst_rect;
-
-	u32 dnsc_factor_w;
-	u32 dnsc_factor_h;
-
-	u8 rot90;
-	u32 bwc_mode;
-
-	struct sde_mdp_plane_sizes dst_planes;
-
-	ktime_t start_time;
-	ktime_t end_time;
-	u32 offset;
-};
-
-static struct sde_mdp_writeback_ctx wb_ctx_list[SDE_MDP_MAX_WRITEBACK] = {
-	{
-		.type = SDE_MDP_WRITEBACK_TYPE_ROTATOR,
-		.intr_type = SDE_MDP_IRQ_WB_ROT_COMP,
-		.intf_num = 0,
-		.xin_id = 3,
-		.clk_ctrl.reg_off = 0x2BC,
-		.clk_ctrl.bit_off = 0x8,
-	},
-	{
-		.type = SDE_MDP_WRITEBACK_TYPE_ROTATOR,
-		.intr_type = SDE_MDP_IRQ_WB_ROT_COMP,
-		.intf_num = 1,
-		.xin_id = 11,
-		.clk_ctrl.reg_off = 0x2BC,
-		.clk_ctrl.bit_off = 0xC,
-	},
-};
-
-static inline void sde_wb_write(struct sde_mdp_writeback_ctx *ctx,
-				u32 reg, u32 val)
-{
-	SDEROT_DBG("wb%d:%6.6x:%8.8x\n", ctx->wb_num, ctx->offset + reg, val);
-	writel_relaxed(val, ctx->base + reg);
-}
-
-static int sde_mdp_writeback_addr_setup(struct sde_mdp_writeback_ctx *ctx,
-					 const struct sde_mdp_data *in_data)
-{
-	int ret;
-	struct sde_mdp_data data;
-
-	if (!in_data)
-		return -EINVAL;
-	data = *in_data;
-
-	SDEROT_DBG("wb_num=%d addr=0x%pa\n", ctx->wb_num, &data.p[0].addr);
-
-	ret = sde_mdp_data_check(&data, &ctx->dst_planes, ctx->dst_fmt);
-	if (ret)
-		return ret;
-
-	sde_rot_data_calc_offset(&data, ctx->dst_rect.x, ctx->dst_rect.y,
-			&ctx->dst_planes, ctx->dst_fmt);
-
-	if ((ctx->dst_fmt->fetch_planes == SDE_MDP_PLANE_PLANAR) &&
-			(ctx->dst_fmt->element[0] == C1_B_Cb))
-		swap(data.p[1].addr, data.p[2].addr);
-
-	sde_wb_write(ctx, SDE_MDP_REG_WB_DST0_ADDR, data.p[0].addr);
-	sde_wb_write(ctx, SDE_MDP_REG_WB_DST1_ADDR, data.p[1].addr);
-	sde_wb_write(ctx, SDE_MDP_REG_WB_DST2_ADDR, data.p[2].addr);
-	sde_wb_write(ctx, SDE_MDP_REG_WB_DST3_ADDR, data.p[3].addr);
-
-	return 0;
-}
-
-static int sde_mdp_writeback_format_setup(struct sde_mdp_writeback_ctx *ctx,
-		u32 format, struct sde_mdp_ctl *ctl)
-{
-	struct sde_mdp_format_params *fmt;
-	u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
-	u32 dnsc_factor, write_config = 0;
-	u32 opmode = ctx->opmode;
-	bool rotation = false;
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-
-	SDEROT_DBG("wb_num=%d format=%d\n", ctx->wb_num, format);
-
-	if (ctx->rot90)
-		rotation = true;
-
-	fmt = sde_get_format_params(format);
-	if (!fmt) {
-		SDEROT_ERR("wb format=%d not supported\n", format);
-		return -EINVAL;
-	}
-
-	sde_mdp_get_plane_sizes(fmt, ctx->img_width, ctx->img_height,
-				 &ctx->dst_planes,
-				 ctx->opmode & SDE_MDP_OP_BWC_EN, rotation);
-
-	ctx->dst_fmt = fmt;
-
-	chroma_samp = fmt->chroma_sample;
-
-	dst_format = (chroma_samp << 23) |
-		     (fmt->fetch_planes << 19) |
-		     (fmt->bits[C3_ALPHA] << 6) |
-		     (fmt->bits[C2_R_Cr] << 4) |
-		     (fmt->bits[C1_B_Cb] << 2) |
-		     (fmt->bits[C0_G_Y] << 0);
-
-	dst_format &= BWC_FMT_MASK;
-
-	if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
-		dst_format |= BIT(8); /* DSTC3_EN */
-		if (!fmt->alpha_enable)
-			dst_format |= BIT(14); /* DST_ALPHA_X */
-	}
-
-	if (fmt->is_yuv)
-		dst_format |= BIT(15);
-
-	pattern = (fmt->element[3] << 24) |
-			  (fmt->element[2] << 16) |
-			  (fmt->element[1] << 8)  |
-			  (fmt->element[0] << 0);
-
-	dst_format |= (fmt->unpack_align_msb << 18) |
-		      (fmt->unpack_tight << 17) |
-		      ((fmt->unpack_count - 1) << 12) |
-		      ((fmt->bpp - 1) << 9);
-
-	ystride0 = (ctx->dst_planes.ystride[0]) |
-		   (ctx->dst_planes.ystride[1] << 16);
-	ystride1 = (ctx->dst_planes.ystride[2]) |
-		   (ctx->dst_planes.ystride[3] << 16);
-	outsize = (ctx->dst_rect.h << 16) | ctx->dst_rect.w;
-
-	if (sde_mdp_is_ubwc_format(fmt)) {
-		opmode |= BIT(0);
-
-		dst_format |= BIT(31);
-		if (mdata->highest_bank_bit)
-			write_config |= (mdata->highest_bank_bit << 8);
-
-		if (fmt->format == SDE_PIX_FMT_RGB_565_UBWC)
-			write_config |= 0x8;
-	}
-
-	if (ctx->type == SDE_MDP_WRITEBACK_TYPE_ROTATOR) {
-		dnsc_factor = (ctx->dnsc_factor_h) | (ctx->dnsc_factor_w << 16);
-		sde_wb_write(ctx, SDE_MDP_REG_WB_ROTATOR_PIPE_DOWNSCALER,
-								dnsc_factor);
-	}
-	sde_wb_write(ctx, SDE_MDP_REG_WB_ALPHA_X_VALUE, 0xFF);
-	sde_wb_write(ctx, SDE_MDP_REG_WB_DST_FORMAT, dst_format);
-	sde_wb_write(ctx, SDE_MDP_REG_WB_DST_OP_MODE, opmode);
-	sde_wb_write(ctx, SDE_MDP_REG_WB_DST_PACK_PATTERN, pattern);
-	sde_wb_write(ctx, SDE_MDP_REG_WB_DST_YSTRIDE0, ystride0);
-	sde_wb_write(ctx, SDE_MDP_REG_WB_DST_YSTRIDE1, ystride1);
-	sde_wb_write(ctx, SDE_MDP_REG_WB_OUT_SIZE, outsize);
-	sde_wb_write(ctx, SDE_MDP_REG_WB_DST_WRITE_CONFIG, write_config);
-	return 0;
-}
-
-static int sde_mdp_writeback_prepare_rot(struct sde_mdp_ctl *ctl, void *arg)
-{
-	struct sde_mdp_writeback_ctx *ctx;
-	struct sde_mdp_writeback_arg *wb_args;
-	struct sde_rot_entry *entry;
-	struct sde_rotation_item *item;
-	struct sde_rot_data_type *mdata;
-	u32 format;
-
-	ctx = (struct sde_mdp_writeback_ctx *) ctl->priv_data;
-	if (!ctx)
-		return -ENODEV;
-	wb_args = (struct sde_mdp_writeback_arg *) arg;
-	if (!wb_args)
-		return -ENOENT;
-
-	entry = (struct sde_rot_entry *) wb_args->priv_data;
-	if (!entry) {
-		SDEROT_ERR("unable to retrieve rot session ctl=%d\n", ctl->num);
-		return -ENODEV;
-	}
-	item = &entry->item;
-	mdata = ctl->mdata;
-	if (!mdata) {
-		SDEROT_ERR("no mdata attached to ctl=%d", ctl->num);
-		return -ENODEV;
-	}
-	SDEROT_DBG("rot setup wb_num=%d\n", ctx->wb_num);
-
-	ctx->opmode = BIT(6); /* ROT EN */
-	if (ctl->mdata->rot_block_size == 128)
-		ctx->opmode |= BIT(4); /* block size 128 */
-
-	ctx->bwc_mode = 0;
-	ctx->opmode |= ctx->bwc_mode;
-
-	ctx->img_width = item->output.width;
-	ctx->img_height = item->output.height;
-	ctx->width = ctx->dst_rect.w = item->dst_rect.w;
-	ctx->height = ctx->dst_rect.h = item->dst_rect.h;
-	ctx->dst_rect.x = item->dst_rect.x;
-	ctx->dst_rect.y = item->dst_rect.y;
-	ctx->dnsc_factor_w = entry->dnsc_factor_w;
-	ctx->dnsc_factor_h = entry->dnsc_factor_h;
-
-	ctx->rot90 = !!(item->flags & SDE_ROTATION_90);
-
-	format = item->output.format;
-
-	if (ctx->rot90)
-		ctx->opmode |= BIT(5); /* ROT 90 */
-
-	return sde_mdp_writeback_format_setup(ctx, format, ctl);
-}
-
-static int sde_mdp_writeback_stop(struct sde_mdp_ctl *ctl,
-	int panel_power_state)
-{
-	struct sde_mdp_writeback_ctx *ctx;
-
-	SDEROT_DBG("stop ctl=%d\n", ctl->num);
-
-	ctx = (struct sde_mdp_writeback_ctx *) ctl->priv_data;
-	if (ctx) {
-		sde_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
-				NULL, NULL);
-
-		complete_all(&ctx->wb_comp);
-
-		ctl->priv_data = NULL;
-		ctx->ref_cnt--;
-	}
-
-	return 0;
-}
-
-static void sde_mdp_writeback_intr_done(void *arg)
-{
-	struct sde_mdp_ctl *ctl = arg;
-	struct sde_mdp_writeback_ctx *ctx = ctl->priv_data;
-
-	if (!ctx) {
-		SDEROT_ERR("invalid ctx\n");
-		return;
-	}
-
-	SDEROT_DBG("intr wb_num=%d\n", ctx->wb_num);
-	if (ctl->irq_num >= 0)
-		disable_irq_nosync(ctl->irq_num);
-	complete_all(&ctx->wb_comp);
-}
-
-static int sde_mdp_wb_wait4comp(struct sde_mdp_ctl *ctl, void *arg)
-{
-	struct sde_mdp_writeback_ctx *ctx;
-	int rc = 0;
-	u64 rot_time = 0;
-	u32 status, mask, isr = 0;
-
-	ctx = (struct sde_mdp_writeback_ctx *) ctl->priv_data;
-	if (!ctx) {
-		SDEROT_ERR("invalid ctx\n");
-		return -ENODEV;
-	}
-
-	if (ctx->comp_cnt == 0)
-		return rc;
-
-	if (ctl->irq_num >= 0) {
-		rc = wait_for_completion_timeout(&ctx->wb_comp,
-				KOFF_TIMEOUT);
-		sde_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
-			NULL, NULL);
-
-		if (rc == 0) {
-			mask = BIT(ctx->intr_type + ctx->intf_num);
-
-			isr = readl_relaxed(ctl->mdata->mdp_base +
-						SDE_MDP_REG_INTR_STATUS);
-			status = mask & isr;
-
-			SDEROT_INFO_ONCE(
-					"mask: 0x%x, isr: 0x%x, status: 0x%x\n",
-					mask, isr, status);
-
-			if (status) {
-				SDEROT_WARN("wb done but irq not triggered\n");
-				writel_relaxed(BIT(ctl->wb->num),
-						ctl->mdata->mdp_base +
-						SDE_MDP_REG_INTR_CLEAR);
-				sde_mdp_writeback_intr_done(ctl);
-				rc = 0;
-			} else {
-				rc = -ENODEV;
-				WARN(1, "wb timeout (%d) ctl=%d\n",
-								rc, ctl->num);
-				if (ctl->irq_num >= 0)
-					disable_irq_nosync(ctl->irq_num);
-			}
-		} else {
-			rc = 0;
-		}
-	} else {
-		/* use polling if interrupt is not available */
-		int cnt = 200;
-
-		mask = BIT(ctl->wb->num);
-		do {
-			udelay(500);
-			isr = readl_relaxed(ctl->mdata->mdp_base +
-					SDE_MDP_REG_INTR_STATUS);
-			status = mask & isr;
-			cnt--;
-		} while (cnt > 0 && !status);
-		writel_relaxed(mask, ctl->mdata->mdp_base +
-				SDE_MDP_REG_INTR_CLEAR);
-
-		rc = (status) ? 0 : -ENODEV;
-	}
-
-	if (rc == 0)
-		ctx->end_time = ktime_get();
-
-	sde_smmu_ctrl(0);
-	ctx->comp_cnt--;
-
-	if (!rc) {
-		rot_time = (u64)ktime_to_us(ctx->end_time) -
-				(u64)ktime_to_us(ctx->start_time);
-		SDEROT_DBG(
-			"ctx%d type:%d xin_id:%d intf_num:%d took %llu microsecs\n",
-			ctx->wb_num, ctx->type, ctx->xin_id,
-				ctx->intf_num, rot_time);
-	}
-
-	SDEROT_DBG("s:%8.8x %s t:%llu c:%d\n", isr,
-			(rc)?"Timeout":"Done", rot_time, ctx->comp_cnt);
-	return rc;
-}
-
-static void sde_mdp_set_ot_limit_wb(struct sde_mdp_writeback_ctx *ctx)
-{
-	struct sde_mdp_set_ot_params ot_params = {0,};
-
-	ot_params.xin_id = ctx->xin_id;
-	ot_params.num = ctx->wb_num;
-	ot_params.width = ctx->width;
-	ot_params.height = ctx->height;
-	ot_params.fps = 60;
-	ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
-	ot_params.reg_off_mdp_clk_ctrl = ctx->clk_ctrl.reg_off;
-	ot_params.bit_off_mdp_clk_ctrl = ctx->clk_ctrl.bit_off;
-	ot_params.fmt = (ctx->dst_fmt) ? ctx->dst_fmt->format : 0;
-
-	sde_mdp_set_ot_limit(&ot_params);
-}
-
-static int sde_mdp_writeback_display(struct sde_mdp_ctl *ctl, void *arg)
-{
-	struct sde_mdp_writeback_ctx *ctx;
-	struct sde_mdp_writeback_arg *wb_args;
-	u32 flush_bits = 0;
-	int ret;
-
-	if (!ctl || !ctl->mdata)
-		return -ENODEV;
-
-	ctx = (struct sde_mdp_writeback_ctx *) ctl->priv_data;
-	if (!ctx)
-		return -ENODEV;
-
-	if (ctx->comp_cnt) {
-		SDEROT_ERR("previous kickoff not completed yet, ctl=%d\n",
-					ctl->num);
-		return -EPERM;
-	}
-
-	if (ctl->mdata->default_ot_wr_limit ||
-			ctl->mdata->default_ot_rd_limit)
-		sde_mdp_set_ot_limit_wb(ctx);
-
-	wb_args = (struct sde_mdp_writeback_arg *) arg;
-	if (!wb_args)
-		return -ENOENT;
-
-	ret = sde_mdp_writeback_addr_setup(ctx, wb_args->data);
-	if (ret) {
-		SDEROT_ERR("writeback data setup error ctl=%d\n", ctl->num);
-		return ret;
-	}
-
-	sde_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
-		   sde_mdp_writeback_intr_done, ctl);
-
-	flush_bits |= ctl->flush_reg_data;
-	flush_bits |= BIT(16); /* WB */
-	sde_wb_write(ctx, SDE_MDP_REG_WB_DST_ADDR_SW_STATUS, ctl->is_secure);
-	sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_FLUSH, flush_bits);
-
-	reinit_completion(&ctx->wb_comp);
-	if (ctl->irq_num >= 0)
-		enable_irq(ctl->irq_num);
-	ret = sde_smmu_ctrl(1);
-	if (ret < 0) {
-		SDEROT_ERR("IOMMU attach failed\n");
-		return ret;
-	}
-
-	ctx->start_time = ktime_get();
-	sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_START, 1);
-	/* ensure that start command is issued after the barrier */
-	wmb();
-
-	SDEROT_DBG("ctx%d type:%d xin_id:%d intf_num:%d start\n",
-		ctx->wb_num, ctx->type, ctx->xin_id, ctx->intf_num);
-
-	ctx->comp_cnt++;
-
-	return 0;
-}
-
-int sde_mdp_writeback_start(struct sde_mdp_ctl *ctl)
-{
-	struct sde_mdp_writeback_ctx *ctx;
-	struct sde_mdp_writeback *wb;
-	u32 mem_sel;
-
-	SDEROT_DBG("start ctl=%d\n", ctl->num);
-
-	if (!ctl->wb) {
-		SDEROT_DBG("wb not setup in the ctl\n");
-		return 0;
-	}
-
-	wb = ctl->wb;
-	mem_sel = (ctl->opmode & 0xF) - 1;
-	if (mem_sel < SDE_MDP_MAX_WRITEBACK) {
-		ctx = &wb_ctx_list[mem_sel];
-		if (ctx->ref_cnt) {
-			SDEROT_ERR("writeback in use %d\n", mem_sel);
-			return -EBUSY;
-		}
-		ctx->ref_cnt++;
-	} else {
-		SDEROT_ERR("invalid writeback mode %d\n", mem_sel);
-		return -EINVAL;
-	}
-
-	ctl->priv_data = ctx;
-	ctx->wb_num = wb->num;
-	ctx->base = wb->base;
-	ctx->offset = wb->offset;
-
-	init_completion(&ctx->wb_comp);
-
-	if (ctx->type == SDE_MDP_WRITEBACK_TYPE_ROTATOR)
-		ctl->ops.prepare_fnc = sde_mdp_writeback_prepare_rot;
-
-	ctl->ops.stop_fnc = sde_mdp_writeback_stop;
-	ctl->ops.display_fnc = sde_mdp_writeback_display;
-	ctl->ops.wait_fnc = sde_mdp_wb_wait4comp;
-
-	return 0;
-}
-
-int sde_mdp_writeback_display_commit(struct sde_mdp_ctl *ctl, void *arg)
-{
-	return sde_mdp_display_commit(ctl, arg, NULL);
-}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
deleted file mode 100644
index b2f12820..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ /dev/null
@@ -1,3982 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s:%d: " fmt, __func__, __LINE__
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/delay.h>
-#include <linux/debugfs.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma-buf.h>
-#include <linux/clk.h>
-#include <linux/clk/qcom.h>
-
-#include "sde_rotator_core.h"
-#include "sde_rotator_util.h"
-#include "sde_rotator_smmu.h"
-#include "sde_rotator_r3.h"
-#include "sde_rotator_r3_internal.h"
-#include "sde_rotator_r3_hwio.h"
-#include "sde_rotator_r3_debug.h"
-#include "sde_rotator_trace.h"
-#include "sde_rotator_debug.h"
-
-#define RES_UHD              (3840*2160)
-#define MS_TO_US(t) ((t) * USEC_PER_MSEC)
-
-/* traffic shaping clock ticks = finish_time x 19.2MHz */
-#define TRAFFIC_SHAPE_CLKTICK_14MS   268800
-#define TRAFFIC_SHAPE_CLKTICK_12MS   230400
-#define TRAFFIC_SHAPE_VSYNC_CLK      19200000
-
-/* XIN mapping */
-#define XIN_SSPP		0
-#define XIN_WRITEBACK		1
-
-/* wait for at most 2 vsync for lowest refresh rate (24hz) */
-#define KOFF_TIMEOUT		(42 * 8)
-
-/*
- * When in sbuf mode, select a much longer wait, to allow the other driver
- * to detect timeouts and abort if necessary.
- */
-#define KOFF_TIMEOUT_SBUF	(10000)
-
-/* default stream buffer headroom in lines */
-#define DEFAULT_SBUF_HEADROOM	20
-#define DEFAULT_UBWC_MALSIZE	0
-#define DEFAULT_UBWC_SWIZZLE	0
-
-#define DEFAULT_MAXLINEWIDTH	4096
-
-/* stride alignment requirement for avoiding partial writes */
-#define PARTIAL_WRITE_ALIGNMENT	0x1F
-
-/* Macro for constructing the REGDMA command */
-#define SDE_REGDMA_WRITE(p, off, data) \
-	do { \
-		SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
-				(u32)(data));\
-		writel_relaxed_no_log( \
-				(REGDMA_OP_REGWRITE | \
-				 ((off) & REGDMA_ADDR_OFFSET_MASK)), \
-				p); \
-		p += sizeof(u32); \
-		writel_relaxed_no_log(data, p); \
-		p += sizeof(u32); \
-	} while (0)
-
-#define SDE_REGDMA_MODIFY(p, off, mask, data) \
-	do { \
-		SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
-				(u32)(data));\
-		writel_relaxed_no_log( \
-				(REGDMA_OP_REGMODIFY | \
-				 ((off) & REGDMA_ADDR_OFFSET_MASK)), \
-				p); \
-		p += sizeof(u32); \
-		writel_relaxed_no_log(mask, p); \
-		p += sizeof(u32); \
-		writel_relaxed_no_log(data, p); \
-		p += sizeof(u32); \
-	} while (0)
-
-#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
-	do { \
-		SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
-				(u32)(len));\
-		writel_relaxed_no_log( \
-				(REGDMA_OP_BLKWRITE_INC | \
-				 ((off) & REGDMA_ADDR_OFFSET_MASK)), \
-				p); \
-		p += sizeof(u32); \
-		writel_relaxed_no_log(len, p); \
-		p += sizeof(u32); \
-	} while (0)
-
-#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
-	do { \
-		SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
-		writel_relaxed_no_log(data, p); \
-		p += sizeof(u32); \
-	} while (0)
-
-#define SDE_REGDMA_READ(p, data) \
-	do { \
-		data = readl_relaxed_no_log(p); \
-		p += sizeof(u32); \
-	} while (0)
-
-/* Macro for directly accessing mapped registers */
-#define SDE_ROTREG_WRITE(base, off, data) \
-	do { \
-		SDEROT_DBG("SDEREG.D:[%s:0x%X] <= 0x%X\n", #off, (off)\
-				, (u32)(data));\
-		writel_relaxed(data, (base + (off))); \
-	} while (0)
-
-#define SDE_ROTREG_READ(base, off) \
-	readl_relaxed(base + (off))
-
-#define SDE_ROTTOP_IN_OFFLINE_MODE(_rottop_op_mode_) \
-	(((_rottop_op_mode_) & ROTTOP_OP_MODE_ROT_OUT_MASK) == 0)
-
-static const u32 sde_hw_rotator_v3_inpixfmts[] = {
-	SDE_PIX_FMT_XRGB_8888,
-	SDE_PIX_FMT_ARGB_8888,
-	SDE_PIX_FMT_ABGR_8888,
-	SDE_PIX_FMT_RGBA_8888,
-	SDE_PIX_FMT_BGRA_8888,
-	SDE_PIX_FMT_RGBX_8888,
-	SDE_PIX_FMT_BGRX_8888,
-	SDE_PIX_FMT_XBGR_8888,
-	SDE_PIX_FMT_RGBA_5551,
-	SDE_PIX_FMT_ARGB_1555,
-	SDE_PIX_FMT_ABGR_1555,
-	SDE_PIX_FMT_BGRA_5551,
-	SDE_PIX_FMT_BGRX_5551,
-	SDE_PIX_FMT_RGBX_5551,
-	SDE_PIX_FMT_XBGR_1555,
-	SDE_PIX_FMT_XRGB_1555,
-	SDE_PIX_FMT_ARGB_4444,
-	SDE_PIX_FMT_RGBA_4444,
-	SDE_PIX_FMT_BGRA_4444,
-	SDE_PIX_FMT_ABGR_4444,
-	SDE_PIX_FMT_RGBX_4444,
-	SDE_PIX_FMT_XRGB_4444,
-	SDE_PIX_FMT_BGRX_4444,
-	SDE_PIX_FMT_XBGR_4444,
-	SDE_PIX_FMT_RGB_888,
-	SDE_PIX_FMT_BGR_888,
-	SDE_PIX_FMT_RGB_565,
-	SDE_PIX_FMT_BGR_565,
-	SDE_PIX_FMT_Y_CB_CR_H2V2,
-	SDE_PIX_FMT_Y_CR_CB_H2V2,
-	SDE_PIX_FMT_Y_CR_CB_GH2V2,
-	SDE_PIX_FMT_Y_CBCR_H2V2,
-	SDE_PIX_FMT_Y_CRCB_H2V2,
-	SDE_PIX_FMT_Y_CBCR_H1V2,
-	SDE_PIX_FMT_Y_CRCB_H1V2,
-	SDE_PIX_FMT_Y_CBCR_H2V1,
-	SDE_PIX_FMT_Y_CRCB_H2V1,
-	SDE_PIX_FMT_YCBYCR_H2V1,
-	SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
-	SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
-	SDE_PIX_FMT_RGBA_8888_UBWC,
-	SDE_PIX_FMT_RGBX_8888_UBWC,
-	SDE_PIX_FMT_RGB_565_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
-	SDE_PIX_FMT_RGBA_1010102,
-	SDE_PIX_FMT_RGBX_1010102,
-	SDE_PIX_FMT_ARGB_2101010,
-	SDE_PIX_FMT_XRGB_2101010,
-	SDE_PIX_FMT_BGRA_1010102,
-	SDE_PIX_FMT_BGRX_1010102,
-	SDE_PIX_FMT_ABGR_2101010,
-	SDE_PIX_FMT_XBGR_2101010,
-	SDE_PIX_FMT_RGBA_1010102_UBWC,
-	SDE_PIX_FMT_RGBX_1010102_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
-};
-
-static const u32 sde_hw_rotator_v3_outpixfmts[] = {
-	SDE_PIX_FMT_XRGB_8888,
-	SDE_PIX_FMT_ARGB_8888,
-	SDE_PIX_FMT_ABGR_8888,
-	SDE_PIX_FMT_RGBA_8888,
-	SDE_PIX_FMT_BGRA_8888,
-	SDE_PIX_FMT_RGBX_8888,
-	SDE_PIX_FMT_BGRX_8888,
-	SDE_PIX_FMT_XBGR_8888,
-	SDE_PIX_FMT_RGBA_5551,
-	SDE_PIX_FMT_ARGB_1555,
-	SDE_PIX_FMT_ABGR_1555,
-	SDE_PIX_FMT_BGRA_5551,
-	SDE_PIX_FMT_BGRX_5551,
-	SDE_PIX_FMT_RGBX_5551,
-	SDE_PIX_FMT_XBGR_1555,
-	SDE_PIX_FMT_XRGB_1555,
-	SDE_PIX_FMT_ARGB_4444,
-	SDE_PIX_FMT_RGBA_4444,
-	SDE_PIX_FMT_BGRA_4444,
-	SDE_PIX_FMT_ABGR_4444,
-	SDE_PIX_FMT_RGBX_4444,
-	SDE_PIX_FMT_XRGB_4444,
-	SDE_PIX_FMT_BGRX_4444,
-	SDE_PIX_FMT_XBGR_4444,
-	SDE_PIX_FMT_RGB_888,
-	SDE_PIX_FMT_BGR_888,
-	SDE_PIX_FMT_RGB_565,
-	SDE_PIX_FMT_BGR_565,
-	/* SDE_PIX_FMT_Y_CB_CR_H2V2 */
-	/* SDE_PIX_FMT_Y_CR_CB_H2V2 */
-	/* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
-	SDE_PIX_FMT_Y_CBCR_H2V2,
-	SDE_PIX_FMT_Y_CRCB_H2V2,
-	SDE_PIX_FMT_Y_CBCR_H1V2,
-	SDE_PIX_FMT_Y_CRCB_H1V2,
-	SDE_PIX_FMT_Y_CBCR_H2V1,
-	SDE_PIX_FMT_Y_CRCB_H2V1,
-	/* SDE_PIX_FMT_YCBYCR_H2V1 */
-	SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
-	SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
-	SDE_PIX_FMT_RGBA_8888_UBWC,
-	SDE_PIX_FMT_RGBX_8888_UBWC,
-	SDE_PIX_FMT_RGB_565_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
-	SDE_PIX_FMT_RGBA_1010102,
-	SDE_PIX_FMT_RGBX_1010102,
-	/* SDE_PIX_FMT_ARGB_2101010 */
-	/* SDE_PIX_FMT_XRGB_2101010 */
-	SDE_PIX_FMT_BGRA_1010102,
-	SDE_PIX_FMT_BGRX_1010102,
-	/* SDE_PIX_FMT_ABGR_2101010 */
-	/* SDE_PIX_FMT_XBGR_2101010 */
-	SDE_PIX_FMT_RGBA_1010102_UBWC,
-	SDE_PIX_FMT_RGBX_1010102_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
-};
-
-static const u32 sde_hw_rotator_v4_inpixfmts[] = {
-	SDE_PIX_FMT_XRGB_8888,
-	SDE_PIX_FMT_ARGB_8888,
-	SDE_PIX_FMT_ABGR_8888,
-	SDE_PIX_FMT_RGBA_8888,
-	SDE_PIX_FMT_BGRA_8888,
-	SDE_PIX_FMT_RGBX_8888,
-	SDE_PIX_FMT_BGRX_8888,
-	SDE_PIX_FMT_XBGR_8888,
-	SDE_PIX_FMT_RGBA_5551,
-	SDE_PIX_FMT_ARGB_1555,
-	SDE_PIX_FMT_ABGR_1555,
-	SDE_PIX_FMT_BGRA_5551,
-	SDE_PIX_FMT_BGRX_5551,
-	SDE_PIX_FMT_RGBX_5551,
-	SDE_PIX_FMT_XBGR_1555,
-	SDE_PIX_FMT_XRGB_1555,
-	SDE_PIX_FMT_ARGB_4444,
-	SDE_PIX_FMT_RGBA_4444,
-	SDE_PIX_FMT_BGRA_4444,
-	SDE_PIX_FMT_ABGR_4444,
-	SDE_PIX_FMT_RGBX_4444,
-	SDE_PIX_FMT_XRGB_4444,
-	SDE_PIX_FMT_BGRX_4444,
-	SDE_PIX_FMT_XBGR_4444,
-	SDE_PIX_FMT_RGB_888,
-	SDE_PIX_FMT_BGR_888,
-	SDE_PIX_FMT_RGB_565,
-	SDE_PIX_FMT_BGR_565,
-	SDE_PIX_FMT_Y_CB_CR_H2V2,
-	SDE_PIX_FMT_Y_CR_CB_H2V2,
-	SDE_PIX_FMT_Y_CR_CB_GH2V2,
-	SDE_PIX_FMT_Y_CBCR_H2V2,
-	SDE_PIX_FMT_Y_CRCB_H2V2,
-	SDE_PIX_FMT_Y_CBCR_H1V2,
-	SDE_PIX_FMT_Y_CRCB_H1V2,
-	SDE_PIX_FMT_Y_CBCR_H2V1,
-	SDE_PIX_FMT_Y_CRCB_H2V1,
-	SDE_PIX_FMT_YCBYCR_H2V1,
-	SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
-	SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
-	SDE_PIX_FMT_RGBA_8888_UBWC,
-	SDE_PIX_FMT_RGBX_8888_UBWC,
-	SDE_PIX_FMT_RGB_565_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
-	SDE_PIX_FMT_RGBA_1010102,
-	SDE_PIX_FMT_RGBX_1010102,
-	SDE_PIX_FMT_ARGB_2101010,
-	SDE_PIX_FMT_XRGB_2101010,
-	SDE_PIX_FMT_BGRA_1010102,
-	SDE_PIX_FMT_BGRX_1010102,
-	SDE_PIX_FMT_ABGR_2101010,
-	SDE_PIX_FMT_XBGR_2101010,
-	SDE_PIX_FMT_RGBA_1010102_UBWC,
-	SDE_PIX_FMT_RGBX_1010102_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
-	SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
-	SDE_PIX_FMT_XRGB_8888_TILE,
-	SDE_PIX_FMT_ARGB_8888_TILE,
-	SDE_PIX_FMT_ABGR_8888_TILE,
-	SDE_PIX_FMT_XBGR_8888_TILE,
-	SDE_PIX_FMT_RGBA_8888_TILE,
-	SDE_PIX_FMT_BGRA_8888_TILE,
-	SDE_PIX_FMT_RGBX_8888_TILE,
-	SDE_PIX_FMT_BGRX_8888_TILE,
-	SDE_PIX_FMT_RGBA_1010102_TILE,
-	SDE_PIX_FMT_RGBX_1010102_TILE,
-	SDE_PIX_FMT_ARGB_2101010_TILE,
-	SDE_PIX_FMT_XRGB_2101010_TILE,
-	SDE_PIX_FMT_BGRA_1010102_TILE,
-	SDE_PIX_FMT_BGRX_1010102_TILE,
-	SDE_PIX_FMT_ABGR_2101010_TILE,
-	SDE_PIX_FMT_XBGR_2101010_TILE,
-};
-
-static const u32 sde_hw_rotator_v4_outpixfmts[] = {
-	SDE_PIX_FMT_XRGB_8888,
-	SDE_PIX_FMT_ARGB_8888,
-	SDE_PIX_FMT_ABGR_8888,
-	SDE_PIX_FMT_RGBA_8888,
-	SDE_PIX_FMT_BGRA_8888,
-	SDE_PIX_FMT_RGBX_8888,
-	SDE_PIX_FMT_BGRX_8888,
-	SDE_PIX_FMT_XBGR_8888,
-	SDE_PIX_FMT_RGBA_5551,
-	SDE_PIX_FMT_ARGB_1555,
-	SDE_PIX_FMT_ABGR_1555,
-	SDE_PIX_FMT_BGRA_5551,
-	SDE_PIX_FMT_BGRX_5551,
-	SDE_PIX_FMT_RGBX_5551,
-	SDE_PIX_FMT_XBGR_1555,
-	SDE_PIX_FMT_XRGB_1555,
-	SDE_PIX_FMT_ARGB_4444,
-	SDE_PIX_FMT_RGBA_4444,
-	SDE_PIX_FMT_BGRA_4444,
-	SDE_PIX_FMT_ABGR_4444,
-	SDE_PIX_FMT_RGBX_4444,
-	SDE_PIX_FMT_XRGB_4444,
-	SDE_PIX_FMT_BGRX_4444,
-	SDE_PIX_FMT_XBGR_4444,
-	SDE_PIX_FMT_RGB_888,
-	SDE_PIX_FMT_BGR_888,
-	SDE_PIX_FMT_RGB_565,
-	SDE_PIX_FMT_BGR_565,
-	/* SDE_PIX_FMT_Y_CB_CR_H2V2 */
-	/* SDE_PIX_FMT_Y_CR_CB_H2V2 */
-	/* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
-	SDE_PIX_FMT_Y_CBCR_H2V2,
-	SDE_PIX_FMT_Y_CRCB_H2V2,
-	SDE_PIX_FMT_Y_CBCR_H1V2,
-	SDE_PIX_FMT_Y_CRCB_H1V2,
-	SDE_PIX_FMT_Y_CBCR_H2V1,
-	SDE_PIX_FMT_Y_CRCB_H2V1,
-	/* SDE_PIX_FMT_YCBYCR_H2V1 */
-	SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
-	SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
-	SDE_PIX_FMT_RGBA_8888_UBWC,
-	SDE_PIX_FMT_RGBX_8888_UBWC,
-	SDE_PIX_FMT_RGB_565_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
-	SDE_PIX_FMT_RGBA_1010102,
-	SDE_PIX_FMT_RGBX_1010102,
-	SDE_PIX_FMT_ARGB_2101010,
-	SDE_PIX_FMT_XRGB_2101010,
-	SDE_PIX_FMT_BGRA_1010102,
-	SDE_PIX_FMT_BGRX_1010102,
-	SDE_PIX_FMT_ABGR_2101010,
-	SDE_PIX_FMT_XBGR_2101010,
-	SDE_PIX_FMT_RGBA_1010102_UBWC,
-	SDE_PIX_FMT_RGBX_1010102_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
-	SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
-	SDE_PIX_FMT_XRGB_8888_TILE,
-	SDE_PIX_FMT_ARGB_8888_TILE,
-	SDE_PIX_FMT_ABGR_8888_TILE,
-	SDE_PIX_FMT_XBGR_8888_TILE,
-	SDE_PIX_FMT_RGBA_8888_TILE,
-	SDE_PIX_FMT_BGRA_8888_TILE,
-	SDE_PIX_FMT_RGBX_8888_TILE,
-	SDE_PIX_FMT_BGRX_8888_TILE,
-	SDE_PIX_FMT_RGBA_1010102_TILE,
-	SDE_PIX_FMT_RGBX_1010102_TILE,
-	SDE_PIX_FMT_ARGB_2101010_TILE,
-	SDE_PIX_FMT_XRGB_2101010_TILE,
-	SDE_PIX_FMT_BGRA_1010102_TILE,
-	SDE_PIX_FMT_BGRX_1010102_TILE,
-	SDE_PIX_FMT_ABGR_2101010_TILE,
-	SDE_PIX_FMT_XBGR_2101010_TILE,
-};
-
-static const u32 sde_hw_rotator_v4_inpixfmts_sbuf[] = {
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
-	SDE_PIX_FMT_Y_CBCR_H2V2,
-	SDE_PIX_FMT_Y_CRCB_H2V2,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
-};
-
-static const u32 sde_hw_rotator_v4_outpixfmts_sbuf[] = {
-	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
-	SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
-	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
-};
-
-static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
-	{0x214, 0x21c, 16, 1, 0x200}, /* arb clients main */
-	{0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
-	{0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
-};
-
-static struct sde_rot_debug_bus rot_dbgbus_r3[] = {
-	/*
-	 * rottop - 0xA8850
-	 */
-	/* REGDMA */
-	{ 0XA8850, 0, 0 },
-	{ 0XA8850, 0, 1 },
-	{ 0XA8850, 0, 2 },
-	{ 0XA8850, 0, 3 },
-	{ 0XA8850, 0, 4 },
-
-	/* ROT_WB */
-	{ 0XA8850, 1, 0 },
-	{ 0XA8850, 1, 1 },
-	{ 0XA8850, 1, 2 },
-	{ 0XA8850, 1, 3 },
-	{ 0XA8850, 1, 4 },
-	{ 0XA8850, 1, 5 },
-	{ 0XA8850, 1, 6 },
-	{ 0XA8850, 1, 7 },
-
-	/* UBWC_DEC */
-	{ 0XA8850, 2, 0 },
-
-	/* UBWC_ENC */
-	{ 0XA8850, 3, 0 },
-
-	/* ROT_FETCH_0 */
-	{ 0XA8850, 4, 0 },
-	{ 0XA8850, 4, 1 },
-	{ 0XA8850, 4, 2 },
-	{ 0XA8850, 4, 3 },
-	{ 0XA8850, 4, 4 },
-	{ 0XA8850, 4, 5 },
-	{ 0XA8850, 4, 6 },
-	{ 0XA8850, 4, 7 },
-
-	/* ROT_FETCH_1 */
-	{ 0XA8850, 5, 0 },
-	{ 0XA8850, 5, 1 },
-	{ 0XA8850, 5, 2 },
-	{ 0XA8850, 5, 3 },
-	{ 0XA8850, 5, 4 },
-	{ 0XA8850, 5, 5 },
-	{ 0XA8850, 5, 6 },
-	{ 0XA8850, 5, 7 },
-
-	/* ROT_FETCH_2 */
-	{ 0XA8850, 6, 0 },
-	{ 0XA8850, 6, 1 },
-	{ 0XA8850, 6, 2 },
-	{ 0XA8850, 6, 3 },
-	{ 0XA8850, 6, 4 },
-	{ 0XA8850, 6, 5 },
-	{ 0XA8850, 6, 6 },
-	{ 0XA8850, 6, 7 },
-
-	/* ROT_FETCH_3 */
-	{ 0XA8850, 7, 0 },
-	{ 0XA8850, 7, 1 },
-	{ 0XA8850, 7, 2 },
-	{ 0XA8850, 7, 3 },
-	{ 0XA8850, 7, 4 },
-	{ 0XA8850, 7, 5 },
-	{ 0XA8850, 7, 6 },
-	{ 0XA8850, 7, 7 },
-
-	/* ROT_FETCH_4 */
-	{ 0XA8850, 8, 0 },
-	{ 0XA8850, 8, 1 },
-	{ 0XA8850, 8, 2 },
-	{ 0XA8850, 8, 3 },
-	{ 0XA8850, 8, 4 },
-	{ 0XA8850, 8, 5 },
-	{ 0XA8850, 8, 6 },
-	{ 0XA8850, 8, 7 },
-
-	/* ROT_UNPACK_0*/
-	{ 0XA8850, 9, 0 },
-	{ 0XA8850, 9, 1 },
-	{ 0XA8850, 9, 2 },
-	{ 0XA8850, 9, 3 },
-};
-
-static struct sde_rot_regdump sde_rot_r3_regdump[] = {
-	{ "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
-	{ "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
-	{ "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
-	{ "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
-		SDE_ROT_REGDUMP_READ },
-	/*
-	 * Need to perform a SW reset to REGDMA in order to access the
-	 * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
-	 * REGDMA RAM should be dump at last.
-	 */
-	{ "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
-		SDE_ROT_REGDUMP_WRITE, 1 },
-	{ "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
-		SDE_ROT_REGDUMP_READ },
-	{ "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
-		SDE_ROT_REGDUMP_VBIF },
-	{ "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
-		SDE_ROT_REGDUMP_WRITE, 0 },
-};
-
-struct sde_rot_cdp_params {
-	bool enable;
-	struct sde_mdp_format_params *fmt;
-	u32 offset;
-};
-
-/* Invalid software timestamp value for initialization */
-#define SDE_REGDMA_SWTS_INVALID	(~0)
-
-/**
- * __sde_hw_rotator_get_timestamp - obtain rotator current timestamp
- * @rot: rotator context
- * @q_id: regdma queue id (low/high)
- * @return: current timestmap
- */
-static u32 __sde_hw_rotator_get_timestamp(struct sde_hw_rotator *rot, u32 q_id)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	u32 ts;
-
-	if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map)) {
-		if (q_id == ROT_QUEUE_HIGH_PRIORITY)
-			ts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_ROT_CNTR_0);
-		else
-			ts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_ROT_CNTR_1);
-	} else {
-		ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
-		if (q_id == ROT_QUEUE_LOW_PRIORITY)
-			ts >>= SDE_REGDMA_SWTS_SHIFT;
-	}
-
-	return ts & SDE_REGDMA_SWTS_MASK;
-}
-
-/**
- * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
- * @ts_curr: current software timestamp
- * @ts_prev: previous software timestamp
- * @return: the amount ts_curr is ahead of ts_prev
- */
-static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
-{
-	u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
-
-	return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
-}
-
-/**
- * sde_hw_rotator_pending_hwts - Check if the given context is still pending
- * @rot: Pointer to hw rotator
- * @ctx: Pointer to rotator context
- * @phwts: Pointer to returned reference hw timestamp, optional
- * @return: true if context has pending requests
- */
-static int sde_hw_rotator_pending_hwts(struct sde_hw_rotator *rot,
-		struct sde_hw_rotator_context *ctx, u32 *phwts)
-{
-	u32 hwts;
-	int ts_diff;
-	bool pending;
-
-	if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID) {
-		if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
-			hwts = SDE_ROTREG_READ(rot->mdss_base,
-					ROTTOP_ROT_CNTR_1);
-		else
-			hwts = SDE_ROTREG_READ(rot->mdss_base,
-					ROTTOP_ROT_CNTR_0);
-	} else {
-		hwts = ctx->last_regdma_timestamp;
-	}
-
-	hwts &= SDE_REGDMA_SWTS_MASK;
-
-	ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, hwts);
-
-	if (phwts)
-		*phwts = hwts;
-
-	pending = (ts_diff > 0) ? true : false;
-
-	SDEROT_DBG("ts:0x%x, queue_id:%d, hwts:0x%x, pending:%d\n",
-		ctx->timestamp, ctx->q_id, hwts, pending);
-	SDEROT_EVTLOG(ctx->timestamp, hwts, ctx->q_id, ts_diff);
-	return pending;
-}
-
-/**
- * sde_hw_rotator_update_hwts - update hw timestamp with given value
- * @rot: Pointer to hw rotator
- * @q_id: rotator queue id
- * @hwts: new hw timestamp
- */
-static void sde_hw_rotator_update_hwts(struct sde_hw_rotator *rot,
-		u32 q_id, u32 hwts)
-{
-	if (q_id == ROT_QUEUE_LOW_PRIORITY)
-		SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_ROT_CNTR_1, hwts);
-	else
-		SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_ROT_CNTR_0, hwts);
-}
-
-/**
- * sde_hw_rotator_pending_swts - Check if the given context is still pending
- * @rot: Pointer to hw rotator
- * @ctx: Pointer to rotator context
- * @pswts: Pointer to returned reference software timestamp, optional
- * @return: true if context has pending requests
- */
-static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
-		struct sde_hw_rotator_context *ctx, u32 *pswts)
-{
-	u32 swts;
-	int ts_diff;
-	bool pending;
-
-	if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
-		swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
-	else
-		swts = ctx->last_regdma_timestamp;
-
-	if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
-		swts >>= SDE_REGDMA_SWTS_SHIFT;
-
-	swts &= SDE_REGDMA_SWTS_MASK;
-
-	ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
-
-	if (pswts)
-		*pswts = swts;
-
-	pending = (ts_diff > 0) ? true : false;
-
-	SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
-		ctx->timestamp, ctx->q_id, swts, pending);
-	SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
-	return pending;
-}
-
-/**
- * sde_hw_rotator_update_swts - update software timestamp with given value
- * @rot: Pointer to hw rotator
- * @q_id: rotator queue id
- * @swts: new software timestamp
- */
-static void sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
-		u32 q_id, u32 swts)
-{
-	u32 mask = SDE_REGDMA_SWTS_MASK;
-
-	swts &= SDE_REGDMA_SWTS_MASK;
-	if (q_id == ROT_QUEUE_LOW_PRIORITY) {
-		swts <<= SDE_REGDMA_SWTS_SHIFT;
-		mask <<= SDE_REGDMA_SWTS_SHIFT;
-	}
-
-	swts |= (SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG) & ~mask);
-	SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
-}
-
-/**
- * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
- *				Also, clear rotator/regdma irq status.
- * @rot: Pointer to hw rotator
- */
-static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
-{
-	SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
-		atomic_read(&rot->irq_enabled));
-
-	if (!atomic_read(&rot->irq_enabled)) {
-		if (rot->mode == ROT_REGDMA_OFF)
-			SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
-				ROT_DONE_MASK);
-		else
-			SDE_ROTREG_WRITE(rot->mdss_base,
-				REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
-
-		enable_irq(rot->irq_num);
-	}
-	atomic_inc(&rot->irq_enabled);
-}
-
-/**
- * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
- *				Also, clear rotator/regdma irq enable masks.
- * @rot: Pointer to hw rotator
- */
-static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
-{
-	SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
-		atomic_read(&rot->irq_enabled));
-
-	if (!atomic_read(&rot->irq_enabled)) {
-		SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
-		return;
-	}
-
-	if (!atomic_dec_return(&rot->irq_enabled)) {
-		if (rot->mode == ROT_REGDMA_OFF)
-			SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
-		else
-			SDE_ROTREG_WRITE(rot->mdss_base,
-				REGDMA_CSR_REGDMA_INT_EN, 0);
-		/* disable irq after last pending irq is handled, if any */
-		synchronize_irq(rot->irq_num);
-		disable_irq_nosync(rot->irq_num);
-	}
-}
-
-static void sde_hw_rotator_halt_vbif_xin_client(void)
-{
-	struct sde_mdp_vbif_halt_params halt_params;
-
-	memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
-	halt_params.xin_id = XIN_SSPP;
-	halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
-	halt_params.bit_off_mdp_clk_ctrl =
-		MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
-	sde_mdp_halt_vbif_xin(&halt_params);
-
-	memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
-	halt_params.xin_id = XIN_WRITEBACK;
-	halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
-	halt_params.bit_off_mdp_clk_ctrl =
-		MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
-	sde_mdp_halt_vbif_xin(&halt_params);
-}
-
-/**
- * sde_hw_rotator_reset - Reset rotator hardware
- * @rot: pointer to hw rotator
- * @ctx: pointer to current rotator context during the hw hang (optional)
- */
-static int sde_hw_rotator_reset(struct sde_hw_rotator *rot,
-		struct sde_hw_rotator_context *ctx)
-{
-	struct sde_hw_rotator_context *rctx = NULL;
-	u32 int_mask = (REGDMA_INT_0_MASK | REGDMA_INT_1_MASK |
-			REGDMA_INT_2_MASK);
-	u32 last_ts[ROT_QUEUE_MAX] = {0,};
-	u32 latest_ts, opmode;
-	int elapsed_time, t;
-	int i, j;
-	unsigned long flags;
-
-	if (!rot) {
-		SDEROT_ERR("NULL rotator\n");
-		return -EINVAL;
-	}
-
-	/* sw reset the hw rotator */
-	SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 1);
-	/* ensure write is issued to the rotator HW */
-	wmb();
-	usleep_range(MS_TO_US(10), MS_TO_US(20));
-
-	/* force rotator into offline mode */
-	opmode = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE);
-	SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_OP_MODE,
-			opmode & ~(BIT(5) | BIT(4) | BIT(1) | BIT(0)));
-
-	SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 0);
-
-	/* halt vbif xin client to ensure no pending transaction */
-	sde_hw_rotator_halt_vbif_xin_client();
-
-	/* if no ctx is specified, skip ctx wake up */
-	if (!ctx)
-		return 0;
-
-	if (ctx->q_id >= ROT_QUEUE_MAX) {
-		SDEROT_ERR("context q_id out of range: %d\n", ctx->q_id);
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&rot->rotisr_lock, flags);
-
-	/* update timestamp register with current context */
-	last_ts[ctx->q_id] = ctx->timestamp;
-	rot->ops.update_ts(rot, ctx->q_id, ctx->timestamp);
-	SDEROT_EVTLOG(ctx->timestamp);
-
-	/*
-	 * Search for any pending rot session, and look for last timestamp
-	 * per hw queue.
-	 */
-	for (i = 0; i < ROT_QUEUE_MAX; i++) {
-		latest_ts = atomic_read(&rot->timestamp[i]);
-		latest_ts &= SDE_REGDMA_SWTS_MASK;
-		elapsed_time = sde_hw_rotator_elapsed_swts(latest_ts,
-			last_ts[i]);
-
-		for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
-			rctx = rot->rotCtx[i][j];
-			if (rctx && rctx != ctx) {
-				rctx->last_regdma_isr_status = int_mask;
-				rctx->last_regdma_timestamp  = rctx->timestamp;
-
-				t = sde_hw_rotator_elapsed_swts(latest_ts,
-							rctx->timestamp);
-				if (t < elapsed_time) {
-					elapsed_time = t;
-					last_ts[i] = rctx->timestamp;
-					rot->ops.update_ts(rot, i, last_ts[i]);
-				}
-
-				SDEROT_DBG("rotctx[%d][%d], ts:%d\n",
-						i, j, rctx->timestamp);
-				SDEROT_EVTLOG(i, j, rctx->timestamp,
-						last_ts[i]);
-			}
-		}
-	}
-
-	/* Finally wakeup all pending rotator context in queue */
-	for (i = 0; i < ROT_QUEUE_MAX; i++) {
-		for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
-			rctx = rot->rotCtx[i][j];
-			if (rctx && rctx != ctx)
-				wake_up_all(&rctx->regdma_waitq);
-		}
-	}
-
-	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
-
-	return 0;
-}
-
-/**
- * _sde_hw_rotator_dump_status - Dump hw rotator status on error
- * @rot: Pointer to hw rotator
- */
-static void _sde_hw_rotator_dump_status(struct sde_hw_rotator *rot,
-		u32 *ubwcerr)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	u32 reg = 0;
-
-	SDEROT_ERR(
-		"op_mode = %x, int_en = %x, int_status = %x\n",
-		SDE_ROTREG_READ(rot->mdss_base,
-			REGDMA_CSR_REGDMA_OP_MODE),
-		SDE_ROTREG_READ(rot->mdss_base,
-			REGDMA_CSR_REGDMA_INT_EN),
-		SDE_ROTREG_READ(rot->mdss_base,
-			REGDMA_CSR_REGDMA_INT_STATUS));
-
-	SDEROT_ERR(
-		"ts0/ts1 = %x/%x, q0_status = %x, q1_status = %x, block_status = %x\n",
-		__sde_hw_rotator_get_timestamp(rot, ROT_QUEUE_HIGH_PRIORITY),
-		__sde_hw_rotator_get_timestamp(rot, ROT_QUEUE_LOW_PRIORITY),
-		SDE_ROTREG_READ(rot->mdss_base,
-			REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
-		SDE_ROTREG_READ(rot->mdss_base,
-			REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
-		SDE_ROTREG_READ(rot->mdss_base,
-			REGDMA_CSR_REGDMA_BLOCK_STATUS));
-
-	SDEROT_ERR(
-		"invalid_cmd_offset = %x, fsm_state = %x\n",
-		SDE_ROTREG_READ(rot->mdss_base,
-			REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
-		SDE_ROTREG_READ(rot->mdss_base,
-			REGDMA_CSR_REGDMA_FSM_STATE));
-
-	SDEROT_ERR("rottop: op_mode = %x, status = %x, clk_status = %x\n",
-		SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE),
-		SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS),
-		SDE_ROTREG_READ(rot->mdss_base, ROTTOP_CLK_STATUS));
-
-	reg = SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS);
-	if (ubwcerr)
-		*ubwcerr = reg;
-	SDEROT_ERR(
-		"UBWC decode status = %x, UBWC encode status = %x\n", reg,
-		SDE_ROTREG_READ(rot->mdss_base, ROT_WB_UBWC_ERROR_STATUS));
-
-	SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
-		SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
-		SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
-
-	SDEROT_ERR("sspp unpack wr: plane0 = %x, plane1 = %x, plane2 = %x\n",
-			SDE_ROTREG_READ(rot->mdss_base,
-				ROT_SSPP_FETCH_SMP_WR_PLANE0),
-			SDE_ROTREG_READ(rot->mdss_base,
-				ROT_SSPP_FETCH_SMP_WR_PLANE1),
-			SDE_ROTREG_READ(rot->mdss_base,
-				ROT_SSPP_FETCH_SMP_WR_PLANE2));
-	SDEROT_ERR("sspp unpack rd: plane0 = %x, plane1 = %x, plane2 = %x\n",
-			SDE_ROTREG_READ(rot->mdss_base,
-					ROT_SSPP_SMP_UNPACK_RD_PLANE0),
-			SDE_ROTREG_READ(rot->mdss_base,
-					ROT_SSPP_SMP_UNPACK_RD_PLANE1),
-			SDE_ROTREG_READ(rot->mdss_base,
-					ROT_SSPP_SMP_UNPACK_RD_PLANE2));
-	SDEROT_ERR("sspp: unpack_ln = %x, unpack_blk = %x, fill_lvl = %x\n",
-			SDE_ROTREG_READ(rot->mdss_base,
-				ROT_SSPP_UNPACK_LINE_COUNT),
-			SDE_ROTREG_READ(rot->mdss_base,
-				ROT_SSPP_UNPACK_BLK_COUNT),
-			SDE_ROTREG_READ(rot->mdss_base,
-				ROT_SSPP_FILL_LEVELS));
-
-	SDEROT_ERR("wb: sbuf0 = %x, sbuf1 = %x, sys_cache = %x\n",
-			SDE_ROTREG_READ(rot->mdss_base,
-				ROT_WB_SBUF_STATUS_PLANE0),
-			SDE_ROTREG_READ(rot->mdss_base,
-				ROT_WB_SBUF_STATUS_PLANE1),
-			SDE_ROTREG_READ(rot->mdss_base,
-				ROT_WB_SYS_CACHE_MODE));
-}
-
-/**
- * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
- * on provided session_id. Each rotator has a different session_id.
- * @rot: Pointer to rotator hw
- * @session_id: Identifier for rotator session
- * @sequence_id: Identifier for rotation request within the session
- * @q_id: Rotator queue identifier
- */
-static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
-		struct sde_hw_rotator *rot, u32 session_id, u32 sequence_id,
-		enum sde_rot_queue_prio q_id)
-{
-	int i;
-	struct sde_hw_rotator_context  *ctx = NULL;
-
-	for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
-		ctx = rot->rotCtx[q_id][i];
-
-		if (ctx && (ctx->session_id == session_id) &&
-				(ctx->sequence_id == sequence_id)) {
-			SDEROT_DBG(
-				"rotCtx sloti[%d][%d] ==> ctx:%pK | session-id:%d | sequence-id:%d\n",
-				q_id, i, ctx, ctx->session_id,
-				ctx->sequence_id);
-			return ctx;
-		}
-	}
-
-	return NULL;
-}
-
-/*
- * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
- * @dbgbuf: Pointer to debug buffer
- * @buf: Pointer to layer buffer structure
- * @data: Pointer to h/w mapped buffer structure
- */
-static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
-		struct sde_layer_buffer *buf, struct sde_mdp_data *data)
-{
-	dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
-	dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
-
-	dbgbuf->vaddr  = NULL;
-	dbgbuf->width  = buf->width;
-	dbgbuf->height = buf->height;
-
-	if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
-		dma_buf_begin_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
-		dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
-		SDEROT_DBG("vaddr mapping: 0x%pK/%ld w:%d/h:%d\n",
-				dbgbuf->vaddr, dbgbuf->buflen,
-				dbgbuf->width, dbgbuf->height);
-	}
-}
-
-/*
- * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
- * @dbgbuf: Pointer to debug buffer
- */
-static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
-{
-	if (dbgbuf->vaddr) {
-		dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
-		dma_buf_end_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
-	}
-
-	dbgbuf->vaddr  = NULL;
-	dbgbuf->dmabuf = NULL;
-	dbgbuf->buflen = 0;
-	dbgbuf->width  = 0;
-	dbgbuf->height = 0;
-}
-
-/*
- * sde_hw_rotator_vbif_setting - helper function to set vbif QoS remapper
- * levels, enable write gather enable and avoid clk gating setting for
- * debug purpose.
- *
- * @rot: Pointer to rotator hw
- */
-static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
-{
-	u32 i, mask, vbif_qos, reg_val = 0;
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-
-	/* VBIF_ROT QoS remapper setting */
-	switch (mdata->npriority_lvl) {
-
-	case SDE_MDP_VBIF_4_LEVEL_REMAPPER:
-		for (i = 0; i < mdata->npriority_lvl; i++) {
-			reg_val = SDE_VBIF_READ(mdata,
-					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
-			mask = 0x3 << (XIN_SSPP * 2);
-			vbif_qos = mdata->vbif_nrt_qos[i];
-			reg_val |= vbif_qos << (XIN_SSPP * 2);
-			/* ensure write is issued after the read operation */
-			mb();
-			SDE_VBIF_WRITE(mdata,
-					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
-					reg_val);
-		}
-		break;
-
-	case SDE_MDP_VBIF_8_LEVEL_REMAPPER:
-		mask = mdata->npriority_lvl - 1;
-		for (i = 0; i < mdata->npriority_lvl; i++) {
-			/* RD and WR client */
-			reg_val |= (mdata->vbif_nrt_qos[i] & mask)
-							<< (XIN_SSPP * 4);
-			reg_val |= (mdata->vbif_nrt_qos[i] & mask)
-							<< (XIN_WRITEBACK * 4);
-
-			SDE_VBIF_WRITE(mdata,
-				MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + i*8,
-				reg_val);
-			SDE_VBIF_WRITE(mdata,
-				MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + i*8,
-				reg_val);
-		}
-		break;
-
-	default:
-		SDEROT_DBG("invalid vbif remapper levels\n");
-	}
-
-	/* Enable write gather for writeback to remove write gaps, which
-	 * may hang AXI/BIMC/SDE.
-	 */
-	SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
-			BIT(XIN_WRITEBACK));
-
-	/*
-	 * For debug purpose, disable clock gating, i.e. Clocks always on
-	 */
-	if (mdata->clk_always_on) {
-		SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
-		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
-		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
-				0xFFFF);
-		SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
-	}
-}
-
-/*
- * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
- * @ctx: Pointer to rotator context
- * @mask: Bit mask location of the timestamp
- * @swts: Software timestamp
- */
-static void sde_hw_rotator_setup_timestamp_packet(
-		struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
-{
-	char __iomem *wrptr;
-
-	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
-
-	/*
-	 * Create a dummy packet write out to 1 location for timestamp
-	 * generation.
-	 */
-	SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
-	SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
-	SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
-	/*
-	 * Must clear secure buffer setting for SW timestamp because
-	 * SW timstamp buffer allocation is always non-secure region.
-	 */
-	if (ctx->is_secure) {
-		SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
-		SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
-	}
-	SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
-	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
-	SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
-	SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
-	SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
-	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
-			(ctx->rot->highest_bank & 0x3) << 8);
-	SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
-	SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
-	SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
-	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
-
-	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
-}
-
-/*
- * sde_hw_rotator_cdp_configs - configures the CDP registers
- * @ctx: Pointer to rotator context
- * @params: Pointer to parameters needed for CDP configs
- */
-static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
-		struct sde_rot_cdp_params *params)
-{
-	int reg_val;
-	char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
-
-	if (!params->enable) {
-		SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
-		goto end;
-	}
-
-	reg_val = BIT(0); /* enable cdp */
-
-	if (sde_mdp_is_ubwc_format(params->fmt))
-		reg_val |= BIT(1); /* enable UBWC meta cdp */
-
-	if (sde_mdp_is_ubwc_format(params->fmt)
-			|| sde_mdp_is_tilea4x_format(params->fmt)
-			|| sde_mdp_is_tilea5x_format(params->fmt))
-		reg_val |= BIT(2); /* enable tile amortize */
-
-	reg_val |= BIT(3); /* enable preload addr ahead cnt 64 */
-
-	SDE_REGDMA_WRITE(wrptr, params->offset, reg_val);
-
-end:
-	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
-}
-
-/*
- * sde_hw_rotator_setup_qos_lut_wr - Set QoS LUT/Danger LUT/Safe LUT configs
- * for the WRITEBACK rotator for inline and offline rotation.
- *
- * @ctx: Pointer to rotator context
- */
-static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
-
-	/* Offline rotation setting */
-	if (!ctx->sbuf_mode) {
-		/* QOS LUT WR setting */
-		if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
-			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
-					mdata->lut_cfg[SDE_ROT_WR].creq_lut_0);
-			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
-					mdata->lut_cfg[SDE_ROT_WR].creq_lut_1);
-		}
-
-		/* Danger LUT WR setting */
-		if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
-			SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
-					mdata->lut_cfg[SDE_ROT_WR].danger_lut);
-
-		/* Safe LUT WR setting */
-		if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
-			SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
-					mdata->lut_cfg[SDE_ROT_WR].safe_lut);
-
-	/* Inline rotation setting */
-	} else {
-		/* QOS LUT WR setting */
-		if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
-			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
-				mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0);
-			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
-				mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1);
-		}
-
-		/* Danger LUT WR setting */
-		if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
-					mdata->sde_inline_qos_map))
-			SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
-				mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut);
-
-		/* Safe LUT WR setting */
-		if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
-					mdata->sde_inline_qos_map))
-			SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
-				mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut);
-	}
-
-	/* Update command queue write ptr */
-	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
-}
-
-/*
- * sde_hw_rotator_setup_qos_lut_rd - Set QoS LUT/Danger LUT/Safe LUT configs
- * for the SSPP rotator for inline and offline rotation.
- *
- * @ctx: Pointer to rotator context
- */
-static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
-
-	/* Offline rotation setting */
-	if (!ctx->sbuf_mode) {
-		/* QOS LUT RD setting */
-		if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
-			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
-					mdata->lut_cfg[SDE_ROT_RD].creq_lut_0);
-			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
-					mdata->lut_cfg[SDE_ROT_RD].creq_lut_1);
-		}
-
-		/* Danger LUT RD setting */
-		if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
-			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
-					mdata->lut_cfg[SDE_ROT_RD].danger_lut);
-
-		/* Safe LUT RD setting */
-		if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
-			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
-					mdata->lut_cfg[SDE_ROT_RD].safe_lut);
-
-	/* inline rotation setting */
-	} else {
-		/* QOS LUT RD setting */
-		if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
-			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
-				mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0);
-			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
-				mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1);
-		}
-
-		/* Danger LUT RD setting */
-		if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
-					mdata->sde_inline_qos_map))
-			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
-				mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut);
-
-		/* Safe LUT RD setting */
-		if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
-					mdata->sde_inline_qos_map))
-			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
-				mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut);
-	}
-
-	/* Update command queue write ptr */
-	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
-}
-
-static void sde_hw_rotator_setup_fetchengine_helper(
-		struct sde_hw_rot_sspp_cfg *cfg,
-		struct sde_rot_data_type *mdata,
-		struct sde_hw_rotator_context *ctx, char __iomem *wrptr,
-		u32 flags, u32 *width, u32 *height)
-{
-	int i;
-
-	/*
-	 * initialize start control trigger selection first
-	 */
-	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
-		if (ctx->sbuf_mode)
-			SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL,
-					ctx->start_ctrl);
-		else
-			SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 0);
-	}
-
-	/* source image setup */
-	if ((flags & SDE_ROT_FLAG_DEINTERLACE)
-			&& !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
-		for (i = 0; i < cfg->src_plane.num_planes; i++)
-			cfg->src_plane.ystride[i] *= 2;
-		*width *= 2;
-		*height /= 2;
-	}
-}
-
-/*
- * sde_hw_rotator_setup_fetchengine - setup fetch engine
- * @ctx: Pointer to rotator context
- * @queue_id: Priority queue identifier
- * @cfg: Fetch configuration
- * @danger_lut: real-time QoS LUT for danger setting (not used)
- * @safe_lut: real-time QoS LUT for safe setting (not used)
- * @dnsc_factor_w: downscale factor for width
- * @dnsc_factor_h: downscale factor for height
- * @flags: Control flag
- */
-static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
-		enum sde_rot_queue_prio queue_id,
-		struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
-		u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
-{
-	struct sde_hw_rotator *rot = ctx->rot;
-	struct sde_mdp_format_params *fmt;
-	struct sde_mdp_data *data;
-	struct sde_rot_cdp_params cdp_params = {0};
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	char __iomem *wrptr;
-	u32 opmode = 0;
-	u32 chroma_samp = 0;
-	u32 src_format = 0;
-	u32 unpack = 0;
-	u32 width = cfg->img_width;
-	u32 height = cfg->img_height;
-	u32 fetch_blocksize = 0;
-	int i;
-
-	if (ctx->rot->mode == ROT_REGDMA_ON) {
-		if (rot->irq_num >= 0)
-			SDE_ROTREG_WRITE(rot->mdss_base,
-					REGDMA_CSR_REGDMA_INT_EN,
-					REGDMA_INT_MASK);
-		SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
-				REGDMA_EN);
-	}
-
-	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
-
-	sde_hw_rotator_setup_fetchengine_helper(cfg, mdata, ctx, wrptr,
-							flags, &width, &height);
-
-	/*
-	 * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
-	 */
-	SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
-
-	/* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
-	SDE_REGDMA_BLKWRITE_DATA(wrptr,
-			cfg->src_rect->w | (cfg->src_rect->h << 16));
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
-	SDE_REGDMA_BLKWRITE_DATA(wrptr,
-			cfg->src_rect->x | (cfg->src_rect->y << 16));
-	SDE_REGDMA_BLKWRITE_DATA(wrptr,
-			cfg->src_rect->w | (cfg->src_rect->h << 16));
-	SDE_REGDMA_BLKWRITE_DATA(wrptr,
-			cfg->src_rect->x | (cfg->src_rect->y << 16));
-
-	/* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
-	data = cfg->data;
-	for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
-		SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
-			(cfg->src_plane.ystride[1] << 16));
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
-			(cfg->src_plane.ystride[3] << 16));
-
-	/* UNUSED, write 0 */
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
-
-	/* setup source format */
-	fmt = cfg->fmt;
-
-	chroma_samp = fmt->chroma_sample;
-	if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
-		if (chroma_samp == SDE_MDP_CHROMA_H2V1)
-			chroma_samp = SDE_MDP_CHROMA_H1V2;
-		else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
-			chroma_samp = SDE_MDP_CHROMA_H2V1;
-	}
-
-	src_format = (chroma_samp << 23)   |
-		(fmt->fetch_planes << 19)  |
-		(fmt->bits[C3_ALPHA] << 6) |
-		(fmt->bits[C2_R_Cr] << 4)  |
-		(fmt->bits[C1_B_Cb] << 2)  |
-		(fmt->bits[C0_G_Y] << 0);
-
-	if (fmt->alpha_enable &&
-			(fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
-		src_format |= BIT(8); /* SRCC3_EN */
-
-	src_format |= ((fmt->unpack_count - 1) << 12) |
-			(fmt->unpack_tight << 17)       |
-			(fmt->unpack_align_msb << 18)   |
-			((fmt->bpp - 1) << 9)           |
-			((fmt->frame_format & 3) << 30);
-
-	if (flags & SDE_ROT_FLAG_ROT_90)
-		src_format |= BIT(11);	/* ROT90 */
-
-	if (sde_mdp_is_ubwc_format(fmt))
-		opmode |= BIT(0); /* BWC_DEC_EN */
-
-	/* if this is YUV pixel format, enable CSC */
-	if (sde_mdp_is_yuv_format(fmt))
-		src_format |= BIT(15); /* SRC_COLOR_SPACE */
-
-	if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
-		src_format |= BIT(14); /* UNPACK_DX_FORMAT */
-
-	if (rot->solid_fill)
-		src_format |= BIT(22); /* SOLID_FILL */
-
-	/* SRC_FORMAT */
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
-
-	/* setup source unpack pattern */
-	unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
-		 (fmt->element[1] << 8)  | (fmt->element[0] << 0);
-
-	/* SRC_UNPACK_PATTERN */
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
-
-	/* setup source op mode */
-	if (flags & SDE_ROT_FLAG_FLIP_LR)
-		opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
-	if (flags & SDE_ROT_FLAG_FLIP_UD)
-		opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
-	opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
-
-	/* SRC_OP_MODE */
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
-
-	/* setup source fetch config, TP10 uses different block size */
-	if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
-			(dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
-		if (sde_mdp_is_tp10_format(fmt))
-			fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
-		else
-			fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
-	} else {
-		if (sde_mdp_is_tp10_format(fmt))
-			fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
-		else
-			fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
-	}
-
-	if (rot->solid_fill)
-		SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_CONSTANT_COLOR,
-				rot->constant_color);
-
-	SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
-			fetch_blocksize |
-			SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
-			((rot->highest_bank & 0x3) << 18));
-
-	if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
-		SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL,
-				((ctx->rot->ubwc_malsize & 0x3) << 8) |
-				((ctx->rot->highest_bank & 0x3) << 4) |
-				((ctx->rot->ubwc_swizzle & 0x1) << 0));
-	else if (test_bit(SDE_CAPS_UBWC_3, mdata->sde_caps_map) ||
-			test_bit(SDE_CAPS_UBWC_4, mdata->sde_caps_map))
-		SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(30));
-
-	/* setup source buffer plane security status */
-	if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
-			SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
-		SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
-		ctx->is_secure = true;
-	} else {
-		SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
-		ctx->is_secure = false;
-	}
-
-	/* Update command queue write ptr */
-	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
-
-	/* CDP register RD setting */
-	cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
-					 mdata->enable_cdp[SDE_ROT_RD] : false;
-	cdp_params.fmt = fmt;
-	cdp_params.offset = ROT_SSPP_CDP_CNTL;
-	sde_hw_rotator_cdp_configs(ctx, &cdp_params);
-
-	/* QOS LUT/ Danger LUT/ Safe Lut WR setting */
-	sde_hw_rotator_setup_qos_lut_rd(ctx);
-
-	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
-
-	/*
-	 * Determine if traffic shaping is required. Only enable traffic
-	 * shaping when content is 4k@30fps. The actual traffic shaping
-	 * bandwidth calculation is done in output setup.
-	 */
-	if (((!ctx->sbuf_mode)
-			&& (cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD)
-			&& (cfg->fps <= 30)) {
-		SDEROT_DBG("Enable Traffic Shaper\n");
-		ctx->is_traffic_shaping = true;
-	} else {
-		SDEROT_DBG("Disable Traffic Shaper\n");
-		ctx->is_traffic_shaping = false;
-	}
-
-	/* Update command queue write ptr */
-	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
-}
-
-/*
- * sde_hw_rotator_setup_wbengine - setup writeback engine
- * @ctx: Pointer to rotator context
- * @queue_id: Priority queue identifier
- * @cfg: Writeback configuration
- * @flags: Control flag
- */
-static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
-		enum sde_rot_queue_prio queue_id,
-		struct sde_hw_rot_wb_cfg *cfg,
-		u32 flags)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_mdp_format_params *fmt;
-	struct sde_rot_cdp_params cdp_params = {0};
-	char __iomem *wrptr;
-	u32 pack = 0;
-	u32 dst_format = 0;
-	u32 no_partial_writes = 0;
-	int i;
-
-	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
-
-	fmt = cfg->fmt;
-
-	/* setup WB DST format */
-	dst_format |= (fmt->chroma_sample << 23) |
-			(fmt->fetch_planes << 19)  |
-			(fmt->bits[C3_ALPHA] << 6) |
-			(fmt->bits[C2_R_Cr] << 4)  |
-			(fmt->bits[C1_B_Cb] << 2)  |
-			(fmt->bits[C0_G_Y] << 0);
-
-	/* alpha control */
-	if (fmt->alpha_enable || (!fmt->is_yuv && (fmt->unpack_count == 4))) {
-		dst_format |= BIT(8);
-		if (!fmt->alpha_enable) {
-			dst_format |= BIT(14);
-			SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
-		}
-	}
-
-	dst_format |= ((fmt->unpack_count - 1) << 12)	|
-			(fmt->unpack_tight << 17)	|
-			(fmt->unpack_align_msb << 18)	|
-			((fmt->bpp - 1) << 9)		|
-			((fmt->frame_format & 3) << 30);
-
-	if (sde_mdp_is_yuv_format(fmt))
-		dst_format |= BIT(15);
-
-	if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
-		dst_format |= BIT(21); /* PACK_DX_FORMAT */
-
-	/*
-	 * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
-	 */
-	SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
-
-	/* DST_FORMAT */
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
-
-	/* DST_OP_MODE */
-	if (sde_mdp_is_ubwc_format(fmt))
-		SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
-	else
-		SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
-
-	/* DST_PACK_PATTERN */
-	pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
-		(fmt->element[1] << 8) | (fmt->element[0] << 0);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
-
-	/* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
-	for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
-		SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
-			(cfg->dst_plane.ystride[1] << 16));
-	SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
-			(cfg->dst_plane.ystride[3] << 16));
-
-	/* setup WB out image size and ROI */
-	SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
-			cfg->img_width | (cfg->img_height << 16));
-	SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
-			cfg->dst_rect->w | (cfg->dst_rect->h << 16));
-	SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
-			cfg->dst_rect->x | (cfg->dst_rect->y << 16));
-
-	if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
-			SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
-		SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
-	else
-		SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
-
-	/*
-	 * setup Downscale factor
-	 */
-	SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
-			cfg->v_downscale_factor |
-			(cfg->h_downscale_factor << 16));
-
-	/* partial write check */
-	if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map)) {
-		no_partial_writes = BIT(10);
-
-		/*
-		 * For simplicity, don't disable partial writes if
-		 * the ROI does not span the entire width of the
-		 * output image, and require the total stride to
-		 * also be properly aligned.
-		 *
-		 * This avoids having to determine the memory access
-		 * alignment of the actual horizontal ROI on a per
-		 * color format basis.
-		 */
-		if (sde_mdp_is_ubwc_format(fmt)) {
-			no_partial_writes = 0x0;
-		} else if (cfg->dst_rect->x ||
-				cfg->dst_rect->w != cfg->img_width) {
-			no_partial_writes = 0x0;
-		} else {
-			for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
-				if (cfg->dst_plane.ystride[i] &
-						PARTIAL_WRITE_ALIGNMENT)
-					no_partial_writes = 0x0;
-		}
-	}
-
-	/* write config setup for bank configuration */
-	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, no_partial_writes |
-			(ctx->rot->highest_bank & 0x3) << 8);
-
-	if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
-		SDE_REGDMA_WRITE(wrptr, ROT_WB_UBWC_STATIC_CTRL,
-				((ctx->rot->ubwc_malsize & 0x3) << 8) |
-				((ctx->rot->highest_bank & 0x3) << 4) |
-				((ctx->rot->ubwc_swizzle & 0x1) << 0));
-
-	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
-		SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
-				ctx->sys_cache_mode);
-
-	SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
-			(flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
-
-	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
-
-	/* CDP register WR setting */
-	cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
-					mdata->enable_cdp[SDE_ROT_WR] : false;
-	cdp_params.fmt = fmt;
-	cdp_params.offset = ROT_WB_CDP_CNTL;
-	sde_hw_rotator_cdp_configs(ctx, &cdp_params);
-
-	/* QOS LUT/ Danger LUT/ Safe LUT WR setting */
-	sde_hw_rotator_setup_qos_lut_wr(ctx);
-
-	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
-
-	/* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
-	if (ctx->is_traffic_shaping || cfg->prefill_bw) {
-		u32 bw;
-
-		/*
-		 * Target to finish in 12ms, and we need to set number of bytes
-		 * per clock tick for traffic shaping.
-		 * Each clock tick run @ 19.2MHz, so we need we know total of
-		 * clock ticks in 14ms, i.e. 12ms/(1/19.2MHz) ==> 23040
-		 * Finally, calcualte the byte count per clock tick based on
-		 * resolution, bpp and compression ratio.
-		 */
-		bw = cfg->dst_rect->w * cfg->dst_rect->h;
-
-		if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
-			bw = (bw * 3) / 2;
-		else
-			bw *= fmt->bpp;
-
-		bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
-
-		/* use prefill bandwidth instead if specified */
-		if (cfg->prefill_bw)
-			bw = DIV_ROUND_UP_SECTOR_T(cfg->prefill_bw,
-					TRAFFIC_SHAPE_VSYNC_CLK);
-
-		if (bw > 0xFF)
-			bw = 0xFF;
-		else if (bw == 0)
-			bw = 1;
-
-		SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
-				BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
-		SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
-	} else {
-		SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
-		SDEROT_DBG("Disable ROT_WB Traffic Shaper\n");
-	}
-
-	/* Update command queue write ptr */
-	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
-}
-
-/*
- * sde_hw_rotator_start_no_regdma - start non-regdma operation
- * @ctx: Pointer to rotator context
- * @queue_id: Priority queue identifier
- */
-static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
-		enum sde_rot_queue_prio queue_id)
-{
-	struct sde_hw_rotator *rot = ctx->rot;
-	char __iomem *wrptr;
-	char __iomem *mem_rdptr;
-	char __iomem *addr;
-	u32 mask;
-	u32 cmd0, cmd1, cmd2;
-	u32 blksize;
-
-	/*
-	 * when regdma is not using, the regdma segment is just a normal
-	 * DRAM, and not an iomem.
-	 */
-	mem_rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
-	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
-
-	if (rot->irq_num >= 0) {
-		SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
-		SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
-		reinit_completion(&ctx->rot_comp);
-		sde_hw_rotator_enable_irq(rot);
-	}
-
-	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
-
-	/* Update command queue write ptr */
-	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
-
-	SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
-	/* Write all command stream to Rotator blocks */
-	/* Rotator will start right away after command stream finish writing */
-	while (mem_rdptr < wrptr) {
-		u32 op = REGDMA_OP_MASK & readl_relaxed_no_log(mem_rdptr);
-
-		switch (op) {
-		case REGDMA_OP_NOP:
-			SDEROT_DBG("NOP\n");
-			mem_rdptr += sizeof(u32);
-			break;
-		case REGDMA_OP_REGWRITE:
-			SDE_REGDMA_READ(mem_rdptr, cmd0);
-			SDE_REGDMA_READ(mem_rdptr, cmd1);
-			SDEROT_DBG("REGW %6.6x %8.8x\n",
-					cmd0 & REGDMA_ADDR_OFFSET_MASK,
-					cmd1);
-			addr =  rot->mdss_base +
-				(cmd0 & REGDMA_ADDR_OFFSET_MASK);
-			writel_relaxed(cmd1, addr);
-			break;
-		case REGDMA_OP_REGMODIFY:
-			SDE_REGDMA_READ(mem_rdptr, cmd0);
-			SDE_REGDMA_READ(mem_rdptr, cmd1);
-			SDE_REGDMA_READ(mem_rdptr, cmd2);
-			SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
-					cmd0 & REGDMA_ADDR_OFFSET_MASK,
-					cmd1, cmd2);
-			addr =  rot->mdss_base +
-				(cmd0 & REGDMA_ADDR_OFFSET_MASK);
-			mask = cmd1;
-			writel_relaxed((readl_relaxed(addr) & mask) | cmd2,
-					addr);
-			break;
-		case REGDMA_OP_BLKWRITE_SINGLE:
-			SDE_REGDMA_READ(mem_rdptr, cmd0);
-			SDE_REGDMA_READ(mem_rdptr, cmd1);
-			SDEROT_DBG("BLKWS %6.6x %6.6x\n",
-					cmd0 & REGDMA_ADDR_OFFSET_MASK,
-					cmd1);
-			addr =  rot->mdss_base +
-				(cmd0 & REGDMA_ADDR_OFFSET_MASK);
-			blksize = cmd1;
-			while (blksize--) {
-				SDE_REGDMA_READ(mem_rdptr, cmd0);
-				SDEROT_DBG("DATA %8.8x\n", cmd0);
-				writel_relaxed(cmd0, addr);
-			}
-			break;
-		case REGDMA_OP_BLKWRITE_INC:
-			SDE_REGDMA_READ(mem_rdptr, cmd0);
-			SDE_REGDMA_READ(mem_rdptr, cmd1);
-			SDEROT_DBG("BLKWI %6.6x %6.6x\n",
-					cmd0 & REGDMA_ADDR_OFFSET_MASK,
-					cmd1);
-			addr =  rot->mdss_base +
-				(cmd0 & REGDMA_ADDR_OFFSET_MASK);
-			blksize = cmd1;
-			while (blksize--) {
-				SDE_REGDMA_READ(mem_rdptr, cmd0);
-				SDEROT_DBG("DATA %8.8x\n", cmd0);
-				writel_relaxed(cmd0, addr);
-				addr += 4;
-			}
-			break;
-		default:
-			/* Other not supported OP mode
-			 * Skip data for now for unregonized OP mode
-			 */
-			SDEROT_DBG("UNDEFINED\n");
-			mem_rdptr += sizeof(u32);
-			break;
-		}
-	}
-	SDEROT_DBG("END %d\n", ctx->timestamp);
-
-	return ctx->timestamp;
-}
-
-/*
- * sde_hw_rotator_start_regdma - start regdma operation
- * @ctx: Pointer to rotator context
- * @queue_id: Priority queue identifier
- */
-static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
-		enum sde_rot_queue_prio queue_id)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_hw_rotator *rot = ctx->rot;
-	char __iomem *wrptr;
-	u32  regdmaSlot;
-	u32  offset;
-	u32  length;
-	u32  ts_length;
-	u32  enableInt;
-	u32  swts = 0;
-	u32  mask = 0;
-	u32  trig_sel;
-	bool int_trigger = false;
-
-	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
-
-	/* Enable HW timestamp if supported in rotator */
-	if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map)) {
-		SDE_REGDMA_MODIFY(wrptr, ROTTOP_ROT_CNTR_CTRL,
-				~BIT(queue_id), BIT(queue_id));
-		int_trigger = true;
-	} else if (ctx->sbuf_mode) {
-		int_trigger = true;
-	}
-
-	/*
-	 * Last ROT command must be ROT_START before REGDMA start
-	 */
-	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
-
-	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
-
-	/*
-	 * Start REGDMA with command offset and size
-	 */
-	regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
-	length = (wrptr - ctx->regdma_base) / 4;
-	offset = (ctx->regdma_base - (rot->mdss_base +
-				REGDMA_RAM_REGDMA_CMD_RAM)) / sizeof(u32);
-	enableInt = ((ctx->timestamp & 1) + 1) << 30;
-	trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
-			REGDMA_CMD_TRIG_SEL_SW_START;
-
-	SDEROT_DBG(
-		"regdma(%d)[%d] <== INT:0x%X|length:%d|offset:0x%X, ts:%X\n",
-		queue_id, regdmaSlot, enableInt, length, offset,
-		ctx->timestamp);
-
-	/* ensure the command packet is issued before the submit command */
-	wmb();
-
-	/* REGDMA submission for current context */
-	if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
-		SDE_ROTREG_WRITE(rot->mdss_base,
-				REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
-				(int_trigger ? enableInt : 0) | trig_sel |
-				((length & 0x3ff) << 14) | offset);
-		swts = ctx->timestamp;
-		mask = ~SDE_REGDMA_SWTS_MASK;
-	} else {
-		SDE_ROTREG_WRITE(rot->mdss_base,
-				REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
-				(int_trigger ? enableInt : 0) | trig_sel |
-				((length & 0x3ff) << 14) | offset);
-		swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
-		mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
-	}
-
-	SDEROT_EVTLOG(ctx->timestamp, queue_id, length, offset, ctx->sbuf_mode);
-
-	/* sw timestamp update can only be used in offline multi-context mode */
-	if (!int_trigger) {
-		/* Write timestamp after previous rotator job finished */
-		sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
-		offset += length;
-		ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
-		ts_length /= sizeof(u32);
-		WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
-
-		/* ensure command packet is issue before the submit command */
-		wmb();
-
-		SDEROT_EVTLOG(queue_id, enableInt, ts_length, offset);
-
-		if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
-			SDE_ROTREG_WRITE(rot->mdss_base,
-					REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
-					enableInt | (ts_length << 14) | offset);
-		} else {
-			SDE_ROTREG_WRITE(rot->mdss_base,
-					REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
-					enableInt | (ts_length << 14) | offset);
-		}
-	}
-
-	/* Update command queue write ptr */
-	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
-
-	return ctx->timestamp;
-}
-
-/*
- * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
- * @ctx: Pointer to rotator context
- * @queue_id: Priority queue identifier
- * @flags: Option flag
- */
-static u32 sde_hw_rotator_wait_done_no_regdma(
-		struct sde_hw_rotator_context *ctx,
-		enum sde_rot_queue_prio queue_id, u32 flag)
-{
-	struct sde_hw_rotator *rot = ctx->rot;
-	int rc = 0;
-	u32 sts = 0;
-	u32 status;
-	unsigned long flags;
-
-	if (rot->irq_num >= 0) {
-		SDEROT_DBG("Wait for Rotator completion\n");
-		rc = wait_for_completion_timeout(&ctx->rot_comp,
-				ctx->sbuf_mode ?
-				msecs_to_jiffies(KOFF_TIMEOUT_SBUF) :
-				msecs_to_jiffies(rot->koff_timeout));
-
-		spin_lock_irqsave(&rot->rotisr_lock, flags);
-		status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
-		if (rc == 0) {
-			/*
-			 * Timeout, there might be error,
-			 * or rotator still busy
-			 */
-			if (status & ROT_BUSY_BIT)
-				SDEROT_ERR(
-					"Timeout waiting for rotator done\n");
-			else if (status & ROT_ERROR_BIT)
-				SDEROT_ERR(
-					"Rotator report error status\n");
-			else
-				SDEROT_WARN(
-					"Timeout waiting, but rotator job is done!!\n");
-
-			sde_hw_rotator_disable_irq(rot);
-		}
-		spin_unlock_irqrestore(&rot->rotisr_lock, flags);
-	} else {
-		int cnt = 200;
-
-		do {
-			udelay(500);
-			status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
-			cnt--;
-		} while ((cnt > 0) && (status & ROT_BUSY_BIT)
-				&& ((status & ROT_ERROR_BIT) == 0));
-
-		if (status & ROT_ERROR_BIT)
-			SDEROT_ERR("Rotator error\n");
-		else if (status & ROT_BUSY_BIT)
-			SDEROT_ERR("Rotator busy\n");
-
-		SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
-				ROT_DONE_CLEAR);
-	}
-
-	sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
-
-	return sts;
-}
-
-/*
- * sde_hw_rotator_wait_done_regdma - wait for regdma completion
- * @ctx: Pointer to rotator context
- * @queue_id: Priority queue identifier
- * @flags: Option flag
- */
-static u32 sde_hw_rotator_wait_done_regdma(
-		struct sde_hw_rotator_context *ctx,
-		enum sde_rot_queue_prio queue_id, u32 flag)
-{
-	struct sde_hw_rotator *rot = ctx->rot;
-	int rc = 0;
-	bool timeout = false;
-	bool pending;
-	bool abort;
-	u32 status;
-	u32 last_isr;
-	u32 last_ts;
-	u32 int_id;
-	u32 swts;
-	u32 sts = 0;
-	u32 ubwcerr;
-	u32 hwts[ROT_QUEUE_MAX];
-	unsigned long flags;
-
-	if (rot->irq_num >= 0) {
-		SDEROT_DBG("Wait for REGDMA completion, ctx:%pK, ts:%X\n",
-				ctx, ctx->timestamp);
-		rc = wait_event_timeout(ctx->regdma_waitq,
-				!rot->ops.get_pending_ts(rot, ctx, &swts),
-				ctx->sbuf_mode ?
-				msecs_to_jiffies(KOFF_TIMEOUT_SBUF) :
-				msecs_to_jiffies(rot->koff_timeout));
-
-		ATRACE_INT("sde_rot_done", 0);
-		spin_lock_irqsave(&rot->rotisr_lock, flags);
-
-		last_isr = ctx->last_regdma_isr_status;
-		last_ts  = ctx->last_regdma_timestamp;
-		abort    = ctx->abort;
-		status   = last_isr & REGDMA_INT_MASK;
-		int_id   = last_ts & 1;
-		SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
-				status, int_id, last_ts);
-
-		if (rc == 0 || (status & REGDMA_INT_ERR_MASK) || abort) {
-			timeout = true;
-			pending = rot->ops.get_pending_ts(rot, ctx, &swts);
-
-			/* cache ubwcerr and hw timestamps while locked */
-			ubwcerr = SDE_ROTREG_READ(rot->mdss_base,
-					ROT_SSPP_UBWC_ERROR_STATUS);
-			hwts[ROT_QUEUE_HIGH_PRIORITY] =
-					__sde_hw_rotator_get_timestamp(rot,
-					ROT_QUEUE_HIGH_PRIORITY);
-			hwts[ROT_QUEUE_LOW_PRIORITY] =
-					__sde_hw_rotator_get_timestamp(rot,
-					ROT_QUEUE_LOW_PRIORITY);
-
-			if (ubwcerr || abort) {
-				/*
-				 * Perform recovery for ROT SSPP UBWC decode
-				 * error.
-				 * - SW reset rotator hw block
-				 * - reset TS logic so all pending rotation
-				 *   in hw queue got done signalled
-				 */
-				spin_unlock_irqrestore(&rot->rotisr_lock,
-						flags);
-				if (!sde_hw_rotator_reset(rot, ctx))
-					status = REGDMA_INCOMPLETE_CMD;
-				else
-					status = ROT_ERROR_BIT;
-				spin_lock_irqsave(&rot->rotisr_lock, flags);
-			} else {
-				status = ROT_ERROR_BIT;
-			}
-		} else {
-			if (rc == 1)
-				SDEROT_WARN(
-					"REGDMA done but no irq, ts:0x%X/0x%X\n",
-					ctx->timestamp, swts);
-			status = 0;
-		}
-
-		spin_unlock_irqrestore(&rot->rotisr_lock, flags);
-
-		/* dump rot status after releasing lock if timeout occurred */
-		if (timeout) {
-			SDEROT_ERR(
-				"TIMEOUT, ts:0x%X/0x%X, pending:%d, abort:%d\n",
-				ctx->timestamp, swts, pending, abort);
-			SDEROT_ERR(
-				"Cached: HW ts0/ts1 = %x/%x, ubwcerr = %x\n",
-				hwts[ROT_QUEUE_HIGH_PRIORITY],
-				hwts[ROT_QUEUE_LOW_PRIORITY], ubwcerr);
-
-			if (status & REGDMA_WATCHDOG_INT)
-				SDEROT_ERR("REGDMA watchdog interrupt\n");
-			else if (status & REGDMA_INVALID_DESCRIPTOR)
-				SDEROT_ERR("REGDMA invalid descriptor\n");
-			else if (status & REGDMA_INCOMPLETE_CMD)
-				SDEROT_ERR("REGDMA incomplete command\n");
-			else if (status & REGDMA_INVALID_CMD)
-				SDEROT_ERR("REGDMA invalid command\n");
-
-			_sde_hw_rotator_dump_status(rot, &ubwcerr);
-		}
-	} else {
-		int cnt = 200;
-		bool pending;
-
-		do {
-			udelay(500);
-			last_isr = SDE_ROTREG_READ(rot->mdss_base,
-					REGDMA_CSR_REGDMA_INT_STATUS);
-			pending = rot->ops.get_pending_ts(rot, ctx, &swts);
-			cnt--;
-		} while ((cnt > 0) && pending &&
-				((last_isr & REGDMA_INT_ERR_MASK) == 0));
-
-		if (last_isr & REGDMA_INT_ERR_MASK) {
-			SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
-				ctx->timestamp, swts, last_isr);
-			_sde_hw_rotator_dump_status(rot, NULL);
-			status = ROT_ERROR_BIT;
-		} else if (pending) {
-			SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
-				ctx->timestamp, swts, last_isr);
-			_sde_hw_rotator_dump_status(rot, NULL);
-			status = ROT_ERROR_BIT;
-		} else {
-			status = 0;
-		}
-
-		SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
-				last_isr);
-	}
-
-	sts = (status & (ROT_ERROR_BIT | REGDMA_INCOMPLETE_CMD)) ? -ENODEV : 0;
-
-	if (status & ROT_ERROR_BIT)
-		SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
-				"vbif_dbg_bus", "panic");
-
-	return sts;
-}
-
-/*
- * setup_rotator_ops - setup callback functions for the low-level HAL
- * @ops: Pointer to low-level ops callback
- * @mode: Operation mode (non-regdma or regdma)
- * @use_hwts: HW timestamp support mode
- */
-static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
-		enum sde_rotator_regdma_mode mode,
-		bool use_hwts)
-{
-	ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
-	ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
-	if (mode == ROT_REGDMA_ON) {
-		ops->start_rotator = sde_hw_rotator_start_regdma;
-		ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
-	} else {
-		ops->start_rotator = sde_hw_rotator_start_no_regdma;
-		ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
-	}
-
-	if (use_hwts) {
-		ops->get_pending_ts = sde_hw_rotator_pending_hwts;
-		ops->update_ts = sde_hw_rotator_update_hwts;
-	} else {
-		ops->get_pending_ts = sde_hw_rotator_pending_swts;
-		ops->update_ts = sde_hw_rotator_update_swts;
-	}
-}
-
-/*
- * sde_hw_rotator_swts_create - create software timestamp buffer
- * @rot: Pointer to rotator hw
- *
- * This buffer is used by regdma to keep track of last completed command.
- */
-static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
-{
-	int rc = 0;
-	struct sde_mdp_img_data *data;
-	u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
-
-	if (bufsize < SZ_4K)
-		bufsize = SZ_4K;
-
-	data = &rot->swts_buf;
-	data->len = bufsize;
-	data->srcp_dma_buf = sde_rot_get_dmabuf(data);
-	if (!data->srcp_dma_buf) {
-		SDEROT_ERR("Fail dmabuf create\n");
-		return -ENOMEM;
-	}
-
-	sde_smmu_ctrl(1);
-
-	data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
-			&rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
-	if (IS_ERR_OR_NULL(data->srcp_attachment)) {
-		SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
-		rc = -ENOMEM;
-		goto err_put;
-	}
-
-	data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
-			DMA_BIDIRECTIONAL);
-	if (IS_ERR_OR_NULL(data->srcp_table)) {
-		SDEROT_ERR("dma_buf_map_attachment error\n");
-		rc = -ENOMEM;
-		goto err_detach;
-	}
-
-	rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
-			SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
-			&data->len, DMA_BIDIRECTIONAL);
-	if (rc < 0) {
-		SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
-		goto err_unmap;
-	}
-
-	data->mapped = true;
-	SDEROT_DBG("swts buffer mapped: %pad/%lx va:%pK\n", &data->addr,
-			data->len, rot->swts_buffer);
-
-	sde_smmu_ctrl(0);
-
-	return rc;
-err_unmap:
-	dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
-			DMA_FROM_DEVICE);
-err_detach:
-	dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
-err_put:
-	data->srcp_dma_buf = NULL;
-
-	sde_smmu_ctrl(0);
-	return rc;
-}
-
-/*
- * sde_hw_rotator_swts_destroy - destroy software timestamp buffer
- * @rot: Pointer to rotator hw
- */
-static void sde_hw_rotator_swts_destroy(struct sde_hw_rotator *rot)
-{
-	struct sde_mdp_img_data *data;
-
-	data = &rot->swts_buf;
-
-	sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
-			DMA_FROM_DEVICE, data->srcp_dma_buf);
-	dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
-			DMA_FROM_DEVICE);
-	dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
-	dma_buf_put(data->srcp_dma_buf);
-	data->addr = 0;
-	data->srcp_dma_buf = NULL;
-	data->srcp_attachment = NULL;
-	data->mapped = false;
-}
-
-/*
- * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
- *                              PM event occurs
- * @mgr: Pointer to rotator manager
- * @pmon: Boolean indicate an on/off power event
- */
-void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
-{
-	struct sde_hw_rotator *rot;
-	u32 l_ts, h_ts, l_hwts, h_hwts;
-	u32 rotsts, regdmasts, rotopmode;
-
-	/*
-	 * Check last HW timestamp with SW timestamp before power off event.
-	 * If there is a mismatch, that will be quite possible the rotator HW
-	 * is either hang or not finishing last submitted job. In that case,
-	 * it is best to do a timeout eventlog to capture some good events
-	 * log data for analysis.
-	 */
-	if (!pmon && mgr && mgr->hw_data) {
-		rot = mgr->hw_data;
-		h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]) &
-				SDE_REGDMA_SWTS_MASK;
-		l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]) &
-				SDE_REGDMA_SWTS_MASK;
-
-		/* Need to turn on clock to access rotator register */
-		sde_rotator_clk_ctrl(mgr, true);
-		l_hwts = __sde_hw_rotator_get_timestamp(rot,
-				ROT_QUEUE_LOW_PRIORITY);
-		h_hwts = __sde_hw_rotator_get_timestamp(rot,
-				ROT_QUEUE_HIGH_PRIORITY);
-		regdmasts = SDE_ROTREG_READ(rot->mdss_base,
-				REGDMA_CSR_REGDMA_BLOCK_STATUS);
-		rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
-		rotopmode = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE);
-
-		SDEROT_DBG(
-			"swts(l/h):0x%x/0x%x, hwts(l/h):0x%x/0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
-				l_ts, h_ts, l_hwts, h_hwts,
-				regdmasts, rotsts);
-		SDEROT_EVTLOG(l_ts, h_ts, l_hwts, h_hwts, regdmasts, rotsts);
-
-		if (((l_ts != l_hwts) || (h_ts != h_hwts)) &&
-				((regdmasts & REGDMA_BUSY) ||
-				 (rotsts & ROT_STATUS_MASK))) {
-			SDEROT_ERR(
-				"Mismatch SWTS with HWTS: swts(l/h):0x%x/0x%x, hwts(l/h):0x%x/0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
-				l_ts, h_ts, l_hwts, h_hwts,
-				regdmasts, rotsts);
-			_sde_hw_rotator_dump_status(rot, NULL);
-			SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
-					"vbif_dbg_bus", "panic");
-		} else if (!SDE_ROTTOP_IN_OFFLINE_MODE(rotopmode) &&
-				((regdmasts & REGDMA_BUSY) ||
-						(rotsts & ROT_BUSY_BIT))) {
-			/*
-			 * rotator can stuck in inline while mdp is detached
-			 */
-			SDEROT_WARN(
-				"Inline Rot busy: regdma-sts:0x%x, rottop-sts:0x%x, rottop-opmode:0x%x\n",
-				regdmasts, rotsts, rotopmode);
-			sde_hw_rotator_reset(rot, NULL);
-		} else if ((regdmasts & REGDMA_BUSY) ||
-				(rotsts & ROT_BUSY_BIT)) {
-			_sde_hw_rotator_dump_status(rot, NULL);
-			SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
-					"vbif_dbg_bus", "panic");
-			sde_hw_rotator_reset(rot, NULL);
-		}
-
-		/* Turn off rotator clock after checking rotator registers */
-		sde_rotator_clk_ctrl(mgr, false);
-	}
-}
-
-/*
- * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
- *                               PM event occurs
- * @mgr: Pointer to rotator manager
- * @pmon: Boolean indicate an on/off power event
- */
-void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
-{
-	struct sde_hw_rotator *rot;
-	u32 l_ts, h_ts;
-
-	/*
-	 * After a power on event, the rotator HW is reset to default setting.
-	 * It is necessary to synchronize the SW timestamp with the HW.
-	 */
-	if (pmon && mgr && mgr->hw_data) {
-		rot = mgr->hw_data;
-		h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
-		l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
-
-		SDEROT_DBG("h_ts:0x%x, l_ts;0x%x\n", h_ts, l_ts);
-		SDEROT_EVTLOG(h_ts, l_ts);
-		rot->reset_hw_ts = true;
-		rot->last_hwts[ROT_QUEUE_LOW_PRIORITY] =
-				l_ts & SDE_REGDMA_SWTS_MASK;
-		rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY] =
-				h_ts & SDE_REGDMA_SWTS_MASK;
-	}
-}
-
-/*
- * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
- * @mgr: Pointer to rotator manager
- */
-static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_hw_rotator *rot;
-
-	if (!mgr || !mgr->pdev || !mgr->hw_data) {
-		SDEROT_ERR("null parameters\n");
-		return;
-	}
-
-	rot = mgr->hw_data;
-	if (rot->irq_num >= 0)
-		devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
-
-	if (!test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) &&
-			rot->mode == ROT_REGDMA_ON)
-		sde_hw_rotator_swts_destroy(rot);
-
-	devm_kfree(&mgr->pdev->dev, mgr->hw_data);
-	mgr->hw_data = NULL;
-}
-
-/*
- * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
- * @mgr: Pointer to rotator manager
- * @pipe_id: pipe identifier (not used)
- * @wb_id: writeback identifier/priority queue identifier
- *
- * This function allocates a new hw rotator resource for the given priority.
- */
-static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
-		struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_hw_rotator_resource_info *resinfo;
-
-	if (!mgr || !mgr->hw_data) {
-		SDEROT_ERR("null parameters\n");
-		return NULL;
-	}
-
-	/*
-	 * Allocate rotator resource info. Each allocation is per
-	 * HW priority queue
-	 */
-	resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
-	if (!resinfo) {
-		SDEROT_ERR("Failed allocation HW rotator resource info\n");
-		return NULL;
-	}
-
-	resinfo->rot = mgr->hw_data;
-	resinfo->hw.wb_id = wb_id;
-	atomic_set(&resinfo->hw.num_active, 0);
-	init_waitqueue_head(&resinfo->hw.wait_queue);
-
-	/* For non-regdma, only support one active session */
-	if (resinfo->rot->mode == ROT_REGDMA_OFF)
-		resinfo->hw.max_active = 1;
-	else {
-		resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
-
-		if (!test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) &&
-				(!resinfo->rot->swts_buf.mapped))
-			sde_hw_rotator_swts_create(resinfo->rot);
-	}
-
-	if (resinfo->rot->irq_num >= 0)
-		sde_hw_rotator_enable_irq(resinfo->rot);
-
-	SDEROT_DBG("New rotator resource:%pK, priority:%d\n",
-			resinfo, wb_id);
-
-	return &resinfo->hw;
-}
-
-/*
- * sde_hw_rotator_free_ext - free the given rotator resource
- * @mgr: Pointer to rotator manager
- * @hw: Pointer to rotator resource
- */
-static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
-		struct sde_rot_hw_resource *hw)
-{
-	struct sde_hw_rotator_resource_info *resinfo;
-
-	if (!mgr || !mgr->hw_data)
-		return;
-
-	resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
-
-	SDEROT_DBG(
-		"Free rotator resource:%pK, priority:%d, active:%d, pending:%d\n",
-		resinfo, hw->wb_id, atomic_read(&hw->num_active),
-		hw->pending_count);
-
-	if (resinfo->rot->irq_num >= 0)
-		sde_hw_rotator_disable_irq(resinfo->rot);
-
-	devm_kfree(&mgr->pdev->dev, resinfo);
-}
-
-/*
- * sde_hw_rotator_alloc_rotctx - allocate rotator context
- * @rot: Pointer to rotator hw
- * @hw: Pointer to rotator resource
- * @session_id: Session identifier of this context
- * @sequence_id: Sequence identifier of this request
- * @sbuf_mode: true if stream buffer is requested
- *
- * This function allocates a new rotator context for the given session id.
- */
-static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
-		struct sde_hw_rotator *rot,
-		struct sde_rot_hw_resource *hw,
-		u32    session_id,
-		u32    sequence_id,
-		bool   sbuf_mode)
-{
-	struct sde_hw_rotator_context *ctx;
-
-	/* Allocate rotator context */
-	ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
-	if (!ctx) {
-		SDEROT_ERR("Failed allocation HW rotator context\n");
-		return NULL;
-	}
-
-	ctx->rot        = rot;
-	ctx->q_id       = hw->wb_id;
-	ctx->session_id = session_id;
-	ctx->sequence_id = sequence_id;
-	ctx->hwres      = hw;
-	ctx->timestamp  = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
-	ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
-	ctx->is_secure  = false;
-	ctx->sbuf_mode  = sbuf_mode;
-	INIT_LIST_HEAD(&ctx->list);
-
-	ctx->regdma_base  = rot->cmd_wr_ptr[ctx->q_id]
-		[sde_hw_rotator_get_regdma_ctxidx(ctx)];
-	ctx->regdma_wrptr = ctx->regdma_base;
-	ctx->ts_addr      = (dma_addr_t)((u32 *)rot->swts_buf.addr +
-		ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
-		sde_hw_rotator_get_regdma_ctxidx(ctx));
-
-	ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
-
-	init_completion(&ctx->rot_comp);
-	init_waitqueue_head(&ctx->regdma_waitq);
-
-	/* Store rotator context for lookup purpose */
-	sde_hw_rotator_put_ctx(ctx);
-
-	SDEROT_DBG(
-		"New rot CTX:%pK, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
-		ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
-		ctx->q_id, ctx->timestamp,
-		atomic_read(&ctx->hwres->num_active),
-		ctx->sbuf_mode);
-
-	return ctx;
-}
-
-/*
- * sde_hw_rotator_free_rotctx - free the given rotator context
- * @rot: Pointer to rotator hw
- * @ctx: Pointer to rotator context
- */
-static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
-		struct sde_hw_rotator_context *ctx)
-{
-	if (!rot || !ctx)
-		return;
-
-	SDEROT_DBG(
-		"Free rot CTX:%pK, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
-		ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
-		ctx->q_id, ctx->timestamp,
-		atomic_read(&ctx->hwres->num_active),
-		ctx->sbuf_mode);
-
-	/* Clear rotator context from lookup purpose */
-	sde_hw_rotator_clr_ctx(ctx);
-
-	devm_kfree(&rot->pdev->dev, ctx);
-}
-
-/*
- * sde_hw_rotator_config - configure hw for the given rotation entry
- * @hw: Pointer to rotator resource
- * @entry: Pointer to rotation entry
- *
- * This function setup the fetch/writeback/rotator blocks, as well as VBIF
- * based on the given rotation entry.
- */
-static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
-		struct sde_rot_entry *entry)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_hw_rotator *rot;
-	struct sde_hw_rotator_resource_info *resinfo;
-	struct sde_hw_rotator_context *ctx;
-	struct sde_hw_rot_sspp_cfg sspp_cfg;
-	struct sde_hw_rot_wb_cfg wb_cfg;
-	u32 danger_lut = 0;	/* applicable for realtime client only */
-	u32 safe_lut = 0;	/* applicable for realtime client only */
-	u32 flags = 0;
-	u32 rststs = 0;
-	struct sde_rotation_item *item;
-	int ret;
-
-	if (!hw || !entry) {
-		SDEROT_ERR("null hw resource/entry\n");
-		return -EINVAL;
-	}
-
-	resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
-	rot = resinfo->rot;
-	item = &entry->item;
-
-	ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
-			item->sequence_id, item->output.sbuf);
-	if (!ctx) {
-		SDEROT_ERR("Failed allocating rotator context!!\n");
-		return -EINVAL;
-	}
-
-	/* save entry for debugging purposes */
-	ctx->last_entry = entry;
-
-	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
-		if (entry->dst_buf.sbuf) {
-			u32 op_mode;
-
-			if (entry->item.trigger ==
-					SDE_ROTATOR_TRIGGER_COMMAND)
-				ctx->start_ctrl = (rot->cmd_trigger << 4);
-			else if (entry->item.trigger ==
-					SDE_ROTATOR_TRIGGER_VIDEO)
-				ctx->start_ctrl = (rot->vid_trigger << 4);
-			else
-				ctx->start_ctrl = 0;
-
-			ctx->sys_cache_mode = BIT(15) |
-					((item->output.scid & 0x1f) << 8) |
-					(item->output.writeback ? 0x5 : 0);
-
-			ctx->op_mode = BIT(4) |
-				((ctx->rot->sbuf_headroom & 0xff) << 8);
-
-			/* detect transition to inline mode */
-			op_mode = (SDE_ROTREG_READ(rot->mdss_base,
-					ROTTOP_OP_MODE) >> 4) & 0x3;
-			if (!op_mode) {
-				u32 status;
-
-				status = SDE_ROTREG_READ(rot->mdss_base,
-						ROTTOP_STATUS);
-				if (status & BIT(0)) {
-					SDEROT_ERR("rotator busy 0x%x\n",
-							status);
-					_sde_hw_rotator_dump_status(rot, NULL);
-					SDEROT_EVTLOG_TOUT_HANDLER("rot",
-							"vbif_dbg_bus",
-							"panic");
-				}
-			}
-
-		} else {
-			ctx->start_ctrl = BIT(0);
-			ctx->sys_cache_mode = 0;
-			ctx->op_mode = 0;
-		}
-	} else  {
-		ctx->start_ctrl = BIT(0);
-	}
-
-	SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
-
-	/*
-	 * if Rotator HW is reset, but missing PM event notification, we
-	 * need to init the SW timestamp automatically.
-	 */
-	rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
-	if (!rot->reset_hw_ts && rststs) {
-		u32 l_ts, h_ts, l_hwts, h_hwts;
-
-		h_hwts = __sde_hw_rotator_get_timestamp(rot,
-				ROT_QUEUE_HIGH_PRIORITY);
-		l_hwts = __sde_hw_rotator_get_timestamp(rot,
-				ROT_QUEUE_LOW_PRIORITY);
-		h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
-		l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
-		SDEROT_EVTLOG(0xbad0, rststs, l_hwts, h_hwts, l_ts, h_ts);
-
-		if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY) {
-			h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
-			l_ts &= SDE_REGDMA_SWTS_MASK;
-		} else {
-			l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
-			h_ts &= SDE_REGDMA_SWTS_MASK;
-		}
-
-		SDEROT_DBG("h_ts:0x%x, l_ts;0x%x\n", h_ts, l_ts);
-		SDEROT_EVTLOG(0x900d, h_ts, l_ts);
-		rot->last_hwts[ROT_QUEUE_LOW_PRIORITY] = l_ts;
-		rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY] = h_ts;
-
-		rot->ops.update_ts(rot, ROT_QUEUE_HIGH_PRIORITY, h_ts);
-		rot->ops.update_ts(rot, ROT_QUEUE_LOW_PRIORITY, l_ts);
-		SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
-
-		/* ensure write is issued to the rotator HW */
-		wmb();
-	}
-
-	if (rot->reset_hw_ts) {
-		SDEROT_EVTLOG(rot->last_hwts[ROT_QUEUE_LOW_PRIORITY],
-				rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY]);
-		rot->ops.update_ts(rot, ROT_QUEUE_HIGH_PRIORITY,
-				rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY]);
-		rot->ops.update_ts(rot, ROT_QUEUE_LOW_PRIORITY,
-				rot->last_hwts[ROT_QUEUE_LOW_PRIORITY]);
-		SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
-
-		/* ensure write is issued to the rotator HW */
-		wmb();
-		rot->reset_hw_ts = false;
-	}
-
-	flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
-			SDE_ROT_FLAG_FLIP_LR : 0;
-	flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
-			SDE_ROT_FLAG_FLIP_UD : 0;
-	flags |= (item->flags & SDE_ROTATION_90) ?
-			SDE_ROT_FLAG_ROT_90 : 0;
-	flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
-			SDE_ROT_FLAG_DEINTERLACE : 0;
-	flags |= (item->flags & SDE_ROTATION_SECURE) ?
-			SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
-	flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
-			SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
-
-
-	sspp_cfg.img_width = item->input.width;
-	sspp_cfg.img_height = item->input.height;
-	sspp_cfg.fps = entry->perf->config.frame_rate;
-	sspp_cfg.bw = entry->perf->bw;
-	sspp_cfg.fmt = sde_get_format_params(item->input.format);
-	if (!sspp_cfg.fmt) {
-		SDEROT_ERR("null format\n");
-		ret = -EINVAL;
-		goto error;
-	}
-	sspp_cfg.src_rect = &item->src_rect;
-	sspp_cfg.data = &entry->src_buf;
-	sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
-			item->input.height, &sspp_cfg.src_plane,
-			0, /* No bwc_mode */
-			(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
-					true : false);
-
-	rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
-			&sspp_cfg, danger_lut, safe_lut,
-			entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
-
-	wb_cfg.img_width = item->output.width;
-	wb_cfg.img_height = item->output.height;
-	wb_cfg.fps = entry->perf->config.frame_rate;
-	wb_cfg.bw = entry->perf->bw;
-	wb_cfg.fmt = sde_get_format_params(item->output.format);
-	if (!wb_cfg.fmt) {
-		SDEROT_ERR("null format\n");
-		ret = -EINVAL;
-		goto error;
-	}
-
-	wb_cfg.dst_rect = &item->dst_rect;
-	wb_cfg.data = &entry->dst_buf;
-	sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
-			item->output.height, &wb_cfg.dst_plane,
-			0, /* No bwc_mode */
-			(flags & SDE_ROT_FLAG_ROT_90) ? true : false);
-
-	wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
-	wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
-	wb_cfg.prefill_bw = item->prefill_bw;
-
-	rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
-
-	/* setup VA mapping for debugfs */
-	if (rot->dbgmem) {
-		sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
-				&item->input,
-				&entry->src_buf);
-
-		sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
-				&item->output,
-				&entry->dst_buf);
-	}
-
-	SDEROT_EVTLOG(ctx->timestamp, flags,
-			item->input.width, item->input.height,
-			item->output.width, item->output.height,
-			entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr,
-			item->input.format, item->output.format,
-			entry->perf->config.frame_rate);
-
-	/* initialize static vbif setting */
-	sde_mdp_init_vbif();
-
-	if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) {
-		struct sde_mdp_set_ot_params ot_params;
-
-		memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
-		ot_params.xin_id = XIN_SSPP;
-		ot_params.num = 0; /* not used */
-		ot_params.width = entry->perf->config.input.width;
-		ot_params.height = entry->perf->config.input.height;
-		ot_params.fps = entry->perf->config.frame_rate;
-		ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
-		ot_params.reg_off_mdp_clk_ctrl =
-				MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
-		ot_params.bit_off_mdp_clk_ctrl =
-				MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
-		ot_params.fmt = ctx->is_traffic_shaping ?
-			SDE_PIX_FMT_ABGR_8888 :
-			entry->perf->config.input.format;
-		ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
-		ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
-		sde_mdp_set_ot_limit(&ot_params);
-	}
-
-	if (!ctx->sbuf_mode && mdata->default_ot_wr_limit) {
-		struct sde_mdp_set_ot_params ot_params;
-
-		memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
-		ot_params.xin_id = XIN_WRITEBACK;
-		ot_params.num = 0; /* not used */
-		ot_params.width = entry->perf->config.input.width;
-		ot_params.height = entry->perf->config.input.height;
-		ot_params.fps = entry->perf->config.frame_rate;
-		ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
-		ot_params.reg_off_mdp_clk_ctrl =
-				MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
-		ot_params.bit_off_mdp_clk_ctrl =
-				MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
-		ot_params.fmt = ctx->is_traffic_shaping ?
-			SDE_PIX_FMT_ABGR_8888 :
-			entry->perf->config.input.format;
-		ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
-		ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
-		sde_mdp_set_ot_limit(&ot_params);
-	}
-
-	if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map))	{
-		u32 qos_lut = 0; /* low priority for nrt read client */
-
-		trace_rot_perf_set_qos_luts(XIN_SSPP, sspp_cfg.fmt->format,
-			qos_lut, sde_mdp_is_linear_format(sspp_cfg.fmt));
-
-		SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
-	}
-
-	/* VBIF QoS and other settings */
-	if (!ctx->sbuf_mode)
-		sde_hw_rotator_vbif_setting(rot);
-
-	return 0;
-
-error:
-	sde_hw_rotator_free_rotctx(rot, ctx);
-	return ret;
-}
-
-/*
- * sde_hw_rotator_cancel - cancel hw configuration for the given rotation entry
- * @hw: Pointer to rotator resource
- * @entry: Pointer to rotation entry
- *
- * This function cancels a previously configured rotation entry.
- */
-static int sde_hw_rotator_cancel(struct sde_rot_hw_resource *hw,
-		struct sde_rot_entry *entry)
-{
-	struct sde_hw_rotator *rot;
-	struct sde_hw_rotator_resource_info *resinfo;
-	struct sde_hw_rotator_context *ctx;
-	unsigned long flags;
-
-	if (!hw || !entry) {
-		SDEROT_ERR("null hw resource/entry\n");
-		return -EINVAL;
-	}
-
-	resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
-	rot = resinfo->rot;
-
-	/* Lookup rotator context from session-id */
-	ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
-			entry->item.sequence_id, hw->wb_id);
-	if (!ctx) {
-		SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
-				entry->item.session_id);
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&rot->rotisr_lock, flags);
-	rot->ops.update_ts(rot, ctx->q_id, ctx->timestamp);
-	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
-
-	SDEROT_EVTLOG(entry->item.session_id, ctx->timestamp);
-
-	if (rot->dbgmem) {
-		sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
-		sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
-	}
-
-	/* Current rotator context job is finished, time to free up */
-	sde_hw_rotator_free_rotctx(rot, ctx);
-
-	return 0;
-}
-
-/*
- * sde_hw_rotator_kickoff - kickoff processing on the given entry
- * @hw: Pointer to rotator resource
- * @entry: Pointer to rotation entry
- */
-static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
-		struct sde_rot_entry *entry)
-{
-	struct sde_hw_rotator *rot;
-	struct sde_hw_rotator_resource_info *resinfo;
-	struct sde_hw_rotator_context *ctx;
-
-	if (!hw || !entry) {
-		SDEROT_ERR("null hw resource/entry\n");
-		return -EINVAL;
-	}
-
-	resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
-	rot = resinfo->rot;
-
-	/* Lookup rotator context from session-id */
-	ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
-			entry->item.sequence_id, hw->wb_id);
-	if (!ctx) {
-		SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
-				entry->item.session_id);
-		return -EINVAL;
-	}
-
-	rot->ops.start_rotator(ctx, ctx->q_id);
-
-	return 0;
-}
-
-static int sde_hw_rotator_abort_kickoff(struct sde_rot_hw_resource *hw,
-		struct sde_rot_entry *entry)
-{
-	struct sde_hw_rotator *rot;
-	struct sde_hw_rotator_resource_info *resinfo;
-	struct sde_hw_rotator_context *ctx;
-	unsigned long flags;
-
-	if (!hw || !entry) {
-		SDEROT_ERR("null hw resource/entry\n");
-		return -EINVAL;
-	}
-
-	resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
-	rot = resinfo->rot;
-
-	/* Lookup rotator context from session-id */
-	ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
-			entry->item.sequence_id, hw->wb_id);
-	if (!ctx) {
-		SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
-				entry->item.session_id);
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&rot->rotisr_lock, flags);
-	rot->ops.update_ts(rot, ctx->q_id, ctx->timestamp);
-	ctx->abort = true;
-	wake_up_all(&ctx->regdma_waitq);
-	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
-
-	SDEROT_EVTLOG(entry->item.session_id, ctx->timestamp);
-
-	return 0;
-}
-
-/*
- * sde_hw_rotator_wait4done - wait for completion notification
- * @hw: Pointer to rotator resource
- * @entry: Pointer to rotation entry
- *
- * This function blocks until the given entry is complete, error
- * is detected, or timeout.
- */
-static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
-		struct sde_rot_entry *entry)
-{
-	struct sde_hw_rotator *rot;
-	struct sde_hw_rotator_resource_info *resinfo;
-	struct sde_hw_rotator_context *ctx;
-	int ret;
-
-	if (!hw || !entry) {
-		SDEROT_ERR("null hw resource/entry\n");
-		return -EINVAL;
-	}
-
-	resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
-	rot = resinfo->rot;
-
-	/* Lookup rotator context from session-id */
-	ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
-			entry->item.sequence_id, hw->wb_id);
-	if (!ctx) {
-		SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
-				entry->item.session_id);
-		return -EINVAL;
-	}
-
-	ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
-
-	if (rot->dbgmem) {
-		sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
-		sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
-	}
-
-	/* Current rotator context job is finished, time to free up*/
-	sde_hw_rotator_free_rotctx(rot, ctx);
-
-	return ret;
-}
-
-/*
- * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
- * @rot: Pointer to hw rotator
- *
- * This function initializes feature and/or capability bitmask based on
- * h/w version read from the device.
- */
-static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	u32 hw_version;
-
-	if (!mdata) {
-		SDEROT_ERR("null rotator data\n");
-		return -EINVAL;
-	}
-
-	hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
-	SDEROT_DBG("hw version %8.8x\n", hw_version);
-
-	clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
-	set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
-	set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
-	set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
-	clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
-
-	set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
-
-	/* features exposed via rotator top h/w version */
-	if (hw_version != SDE_ROT_TYPE_V1_0) {
-		SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
-		set_bit(SDE_CAPS_R3_1P5_DOWNSCALE,  mdata->sde_caps_map);
-	}
-
-	set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
-
-	mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
-	mdata->nrt_vbif_dbg_bus_size =
-			ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
-
-	mdata->rot_dbg_bus = rot_dbgbus_r3;
-	mdata->rot_dbg_bus_size = ARRAY_SIZE(rot_dbgbus_r3);
-
-	mdata->regdump = sde_rot_r3_regdump;
-	mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
-	SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
-
-	/* features exposed via mdss h/w version */
-	if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_600)) {
-		SDEROT_DBG("Supporting sys cache inline rotation\n");
-		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
-		set_bit(SDE_CAPS_UBWC_4,  mdata->sde_caps_map);
-		set_bit(SDE_CAPS_PARTIALWR,  mdata->sde_caps_map);
-		set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
-		rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
-				sde_hw_rotator_v4_inpixfmts;
-		rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
-				ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
-		rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
-				sde_hw_rotator_v4_outpixfmts;
-		rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
-				ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
-		rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
-				sde_hw_rotator_v4_inpixfmts_sbuf;
-		rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
-				ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
-		rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
-				sde_hw_rotator_v4_outpixfmts_sbuf;
-		rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
-				ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
-		rot->downscale_caps =
-			"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
-	} else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
-		SDE_MDP_HW_REV_500)) {
-		SDEROT_DBG("Supporting sys cache inline rotation\n");
-		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
-		set_bit(SDE_CAPS_UBWC_3,  mdata->sde_caps_map);
-		set_bit(SDE_CAPS_PARTIALWR,  mdata->sde_caps_map);
-		set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
-		rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
-				sde_hw_rotator_v4_inpixfmts;
-		rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
-				ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
-		rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
-				sde_hw_rotator_v4_outpixfmts;
-		rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
-				ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
-		rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
-				sde_hw_rotator_v4_inpixfmts_sbuf;
-		rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
-				ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
-		rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
-				sde_hw_rotator_v4_outpixfmts_sbuf;
-		rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
-				ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
-		rot->downscale_caps =
-			"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
-	} else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
-				SDE_MDP_HW_REV_530) ||
-				IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
-					SDE_MDP_HW_REV_520)) {
-		SDEROT_DBG("Supporting sys cache inline rotation\n");
-		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
-		set_bit(SDE_CAPS_UBWC_2,  mdata->sde_caps_map);
-		set_bit(SDE_CAPS_PARTIALWR,  mdata->sde_caps_map);
-		set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
-		rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
-				sde_hw_rotator_v4_inpixfmts;
-		rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
-				ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
-		rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
-				sde_hw_rotator_v4_outpixfmts;
-		rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
-				ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
-		rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
-				sde_hw_rotator_v4_inpixfmts_sbuf;
-		rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
-				ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
-		rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
-				sde_hw_rotator_v4_outpixfmts_sbuf;
-		rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
-				ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
-		rot->downscale_caps =
-			"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
-	} else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
-				SDE_MDP_HW_REV_400) ||
-			IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
-				SDE_MDP_HW_REV_410)) {
-		SDEROT_DBG("Supporting sys cache inline rotation\n");
-		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
-		set_bit(SDE_CAPS_UBWC_2,  mdata->sde_caps_map);
-		set_bit(SDE_CAPS_PARTIALWR,  mdata->sde_caps_map);
-		rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
-				sde_hw_rotator_v4_inpixfmts;
-		rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
-				ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
-		rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
-				sde_hw_rotator_v4_outpixfmts;
-		rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
-				ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
-		rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
-				sde_hw_rotator_v4_inpixfmts_sbuf;
-		rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
-				ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
-		rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
-				sde_hw_rotator_v4_outpixfmts_sbuf;
-		rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
-				ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
-		rot->downscale_caps =
-			"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
-	} else {
-		rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
-				sde_hw_rotator_v3_inpixfmts;
-		rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
-				ARRAY_SIZE(sde_hw_rotator_v3_inpixfmts);
-		rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
-				sde_hw_rotator_v3_outpixfmts;
-		rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
-				ARRAY_SIZE(sde_hw_rotator_v3_outpixfmts);
-		rot->downscale_caps = (hw_version == SDE_ROT_TYPE_V1_0) ?
-			"LINEAR/2/4/8/16/32/64 TILE/2/4 TP10/2" :
-			"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
-	}
-
-	return 0;
-}
-
-/*
- * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
- * @irq: Interrupt number
- * @ptr: Pointer to private handle provided during registration
- *
- * This function services rotator interrupt and wakes up waiting client
- * with pending rotation requests already submitted to h/w.
- */
-static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
-{
-	struct sde_hw_rotator *rot = ptr;
-	struct sde_hw_rotator_context *ctx;
-	irqreturn_t ret = IRQ_NONE;
-	u32 isr;
-
-	isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
-
-	SDEROT_DBG("intr_status = %8.8x\n", isr);
-
-	if (isr & ROT_DONE_MASK) {
-		if (rot->irq_num >= 0)
-			sde_hw_rotator_disable_irq(rot);
-		SDEROT_DBG("Notify rotator complete\n");
-
-		/* Normal rotator only 1 session, no need to lookup */
-		ctx = rot->rotCtx[0][0];
-		WARN_ON(ctx == NULL);
-		complete_all(&ctx->rot_comp);
-
-		spin_lock(&rot->rotisr_lock);
-		SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
-				ROT_DONE_CLEAR);
-		spin_unlock(&rot->rotisr_lock);
-		ret = IRQ_HANDLED;
-	}
-
-	return ret;
-}
-
-/*
- * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
- * @irq: Interrupt number
- * @ptr: Pointer to private handle provided during registration
- *
- * This function services rotator interrupt, decoding the source of
- * events (high/low priority queue), and wakes up all waiting clients
- * with pending rotation requests already submitted to h/w.
- */
-static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_hw_rotator *rot = ptr;
-	struct sde_hw_rotator_context *ctx, *tmp;
-	irqreturn_t ret = IRQ_NONE;
-	u32 isr, isr_tmp;
-	u32 ts;
-	u32 q_id;
-
-	isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
-	/* acknowledge interrupt before reading latest timestamp */
-	SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
-
-	SDEROT_DBG("intr_status = %8.8x\n", isr);
-
-	/* Any REGDMA status, including error and watchdog timer, should
-	 * trigger and wake up waiting thread
-	 */
-	if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
-		spin_lock(&rot->rotisr_lock);
-
-		/*
-		 * Obtain rotator context based on timestamp from regdma
-		 * and low/high interrupt status
-		 */
-		if (isr & REGDMA_INT_HIGH_MASK) {
-			q_id = ROT_QUEUE_HIGH_PRIORITY;
-		} else if (isr & REGDMA_INT_LOW_MASK) {
-			q_id = ROT_QUEUE_LOW_PRIORITY;
-		} else {
-			SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
-			goto done_isr_handle;
-		}
-
-		ts = __sde_hw_rotator_get_timestamp(rot, q_id);
-
-		/*
-		 * Timestamp packet is not available in sbuf mode.
-		 * Simulate timestamp update in the handler instead.
-		 */
-		if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) ||
-				list_empty(&rot->sbuf_ctx[q_id]))
-			goto skip_sbuf;
-
-		ctx = NULL;
-		isr_tmp = isr;
-		list_for_each_entry(tmp, &rot->sbuf_ctx[q_id], list) {
-			u32 mask;
-
-			mask = tmp->timestamp & 0x1 ? REGDMA_INT_1_MASK :
-				REGDMA_INT_0_MASK;
-			if (isr_tmp & mask) {
-				isr_tmp &= ~mask;
-				ctx = tmp;
-				ts = ctx->timestamp;
-				rot->ops.update_ts(rot, ctx->q_id, ts);
-				SDEROT_DBG("update swts:0x%X\n", ts);
-			}
-			SDEROT_EVTLOG(isr, tmp->timestamp);
-		}
-		if (ctx == NULL)
-			SDEROT_ERR("invalid swts ctx\n");
-skip_sbuf:
-		ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
-
-		/*
-		 * Wake up all waiting context from the current and previous
-		 * SW Timestamp.
-		 */
-		while (ctx &&
-			sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
-			ctx->last_regdma_isr_status = isr;
-			ctx->last_regdma_timestamp  = ts;
-			SDEROT_DBG(
-				"regdma complete: ctx:%pK, ts:%X\n", ctx, ts);
-			wake_up_all(&ctx->regdma_waitq);
-
-			ts  = (ts - 1) & SDE_REGDMA_SWTS_MASK;
-			ctx = rot->rotCtx[q_id]
-				[ts & SDE_HW_ROT_REGDMA_SEG_MASK];
-		}
-
-done_isr_handle:
-		spin_unlock(&rot->rotisr_lock);
-		ret = IRQ_HANDLED;
-	} else if (isr & REGDMA_INT_ERR_MASK) {
-		/*
-		 * For REGDMA Err, we save the isr info and wake up
-		 * all waiting contexts
-		 */
-		int i, j;
-
-		SDEROT_ERR(
-			"regdma err isr:%X, wake up all waiting contexts\n",
-			isr);
-
-		spin_lock(&rot->rotisr_lock);
-
-		for (i = 0; i < ROT_QUEUE_MAX; i++) {
-			for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
-				ctx = rot->rotCtx[i][j];
-				if (ctx && ctx->last_regdma_isr_status == 0) {
-					ts = __sde_hw_rotator_get_timestamp(
-							rot, i);
-					ctx->last_regdma_isr_status = isr;
-					ctx->last_regdma_timestamp  = ts;
-					wake_up_all(&ctx->regdma_waitq);
-					SDEROT_DBG(
-						"Wakeup rotctx[%d][%d]:%pK\n",
-						i, j, ctx);
-				}
-			}
-		}
-
-		spin_unlock(&rot->rotisr_lock);
-		ret = IRQ_HANDLED;
-	}
-
-	return ret;
-}
-
-/*
- * sde_hw_rotator_validate_entry - validate rotation entry
- * @mgr: Pointer to rotator manager
- * @entry: Pointer to rotation entry
- *
- * This function validates the given rotation entry and provides possible
- * fixup (future improvement) if available.  This function returns 0 if
- * the entry is valid, and returns error code otherwise.
- */
-static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
-		struct sde_rot_entry *entry)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_hw_rotator *hw_data;
-	int ret = 0;
-	u16 src_w, src_h, dst_w, dst_h;
-	struct sde_rotation_item *item = &entry->item;
-	struct sde_mdp_format_params *fmt;
-
-	if (!mgr || !entry || !mgr->hw_data) {
-		SDEROT_ERR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	hw_data = mgr->hw_data;
-
-	if (hw_data->maxlinewidth < item->src_rect.w) {
-		SDEROT_ERR("invalid src width %u\n", item->src_rect.w);
-		return -EINVAL;
-	}
-
-	src_w = item->src_rect.w;
-	src_h = item->src_rect.h;
-
-	if (item->flags & SDE_ROTATION_90) {
-		dst_w = item->dst_rect.h;
-		dst_h = item->dst_rect.w;
-	} else {
-		dst_w = item->dst_rect.w;
-		dst_h = item->dst_rect.h;
-	}
-
-	entry->dnsc_factor_w = 0;
-	entry->dnsc_factor_h = 0;
-
-	if (item->output.sbuf &&
-			!test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
-		SDEROT_ERR("stream buffer not supported\n");
-		return -EINVAL;
-	}
-
-	if ((src_w != dst_w) || (src_h != dst_h)) {
-		if (!dst_w || !dst_h) {
-			SDEROT_DBG("zero output width/height not support\n");
-			ret = -EINVAL;
-			goto dnsc_err;
-		}
-		if ((src_w % dst_w) || (src_h % dst_h)) {
-			SDEROT_DBG("non integral scale not support\n");
-			ret = -EINVAL;
-			goto dnsc_1p5_check;
-		}
-		entry->dnsc_factor_w = src_w / dst_w;
-		if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
-				(entry->dnsc_factor_w > 64)) {
-			SDEROT_DBG("non power-of-2 w_scale not support\n");
-			ret = -EINVAL;
-			goto dnsc_err;
-		}
-		entry->dnsc_factor_h = src_h / dst_h;
-		if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
-				(entry->dnsc_factor_h > 64)) {
-			SDEROT_DBG("non power-of-2 h_scale not support\n");
-			ret = -EINVAL;
-			goto dnsc_err;
-		}
-	}
-
-	fmt = sde_get_format_params(item->output.format);
-	/*
-	 * Rotator downscale support max 4 times for UBWC format and
-	 * max 2 times for TP10/TP10_UBWC format
-	 */
-	if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
-		SDEROT_DBG("max downscale for UBWC format is 4\n");
-		ret = -EINVAL;
-		goto dnsc_err;
-	}
-	if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
-		SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
-		ret = -EINVAL;
-	}
-	goto dnsc_err;
-
-dnsc_1p5_check:
-	/* Check for 1.5 downscale that only applies to V2 HW */
-	if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
-		entry->dnsc_factor_w = src_w / dst_w;
-		if ((entry->dnsc_factor_w != 1) ||
-				((dst_w * 3) != (src_w * 2))) {
-			SDEROT_DBG(
-				"No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
-				src_w, dst_w);
-			ret = -EINVAL;
-			goto dnsc_err;
-		}
-
-		entry->dnsc_factor_h = src_h / dst_h;
-		if ((entry->dnsc_factor_h != 1) ||
-				((dst_h * 3) != (src_h * 2))) {
-			SDEROT_DBG(
-				"Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
-				src_h, dst_h);
-			ret = -EINVAL;
-			goto dnsc_err;
-		}
-		ret = 0;
-	}
-
-dnsc_err:
-	/* Downscaler does not support asymmetrical dnsc */
-	if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
-		SDEROT_DBG("asymmetric downscale not support\n");
-		ret = -EINVAL;
-	}
-
-	if (ret) {
-		entry->dnsc_factor_w = 0;
-		entry->dnsc_factor_h = 0;
-	}
-	return ret;
-}
-
-/*
- * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
- * @mgr: Pointer to rotator manager
- * @attr: Pointer to device attribute interface
- * @buf: Pointer to output buffer
- * @len: Length of output buffer
- */
-static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
-		struct device_attribute *attr, char *buf, ssize_t len)
-{
-	struct sde_hw_rotator *hw_data;
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	int cnt = 0;
-
-	if (!mgr || !buf)
-		return 0;
-
-	hw_data = mgr->hw_data;
-
-#define SPRINT(fmt, ...) \
-		(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
-
-	/* insert capabilities here */
-	if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
-		SPRINT("min_downscale=1.5\n");
-	else
-		SPRINT("min_downscale=2.0\n");
-
-	SPRINT("downscale_compression=1\n");
-
-	if (hw_data->downscale_caps)
-		SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
-
-	SPRINT("max_line_width=%d\n", sde_rotator_get_maxlinewidth(mgr));
-
-#undef SPRINT
-	return cnt;
-}
-
-/*
- * sde_hw_rotator_show_state - output state info to sysfs 'state' file
- * @mgr: Pointer to rotator manager
- * @attr: Pointer to device attribute interface
- * @buf: Pointer to output buffer
- * @len: Length of output buffer
- */
-static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
-		struct device_attribute *attr, char *buf, ssize_t len)
-{
-	struct sde_hw_rotator *rot;
-	struct sde_hw_rotator_context *ctx;
-	int cnt = 0;
-	int num_active = 0;
-	int i, j;
-
-	if (!mgr || !buf) {
-		SDEROT_ERR("null parameters\n");
-		return 0;
-	}
-
-	rot = mgr->hw_data;
-
-#define SPRINT(fmt, ...) \
-		(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
-
-	if (rot) {
-		SPRINT("rot_mode=%d\n", rot->mode);
-		SPRINT("irq_num=%d\n", rot->irq_num);
-
-		if (rot->mode == ROT_REGDMA_OFF) {
-			SPRINT("max_active=1\n");
-			SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
-		} else {
-			for (i = 0; i < ROT_QUEUE_MAX; i++) {
-				for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
-						j++) {
-					ctx = rot->rotCtx[i][j];
-
-					if (ctx) {
-						SPRINT(
-							"rotCtx[%d][%d]:%pK\n",
-							i, j, ctx);
-						++num_active;
-					}
-				}
-			}
-
-			SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
-			SPRINT("num_active=%d\n", num_active);
-		}
-	}
-
-#undef SPRINT
-	return cnt;
-}
-
-/*
- * sde_hw_rotator_get_pixfmt - get the indexed pixel format
- * @mgr: Pointer to rotator manager
- * @index: index of pixel format
- * @input: true for input port; false for output port
- * @mode: operating mode
- */
-static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
-		int index, bool input, u32 mode)
-{
-	struct sde_hw_rotator *rot;
-
-	if (!mgr || !mgr->hw_data) {
-		SDEROT_ERR("null parameters\n");
-		return 0;
-	}
-
-	rot = mgr->hw_data;
-
-	if (mode >= SDE_ROTATOR_MODE_MAX) {
-		SDEROT_ERR("invalid rotator mode %d\n", mode);
-		return 0;
-	}
-
-	if (input) {
-		if ((index < rot->num_inpixfmt[mode]) && rot->inpixfmts[mode])
-			return rot->inpixfmts[mode][index];
-		else
-			return 0;
-	} else {
-		if ((index < rot->num_outpixfmt[mode]) && rot->outpixfmts[mode])
-			return rot->outpixfmts[mode][index];
-		else
-			return 0;
-	}
-}
-
-/*
- * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
- * @mgr: Pointer to rotator manager
- * @pixfmt: pixel format to be verified
- * @input: true for input port; false for output port
- * @mode: operating mode
- */
-static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
-		bool input, u32 mode)
-{
-	struct sde_hw_rotator *rot;
-	const u32 *pixfmts;
-	u32 num_pixfmt;
-	int i;
-
-	if (!mgr || !mgr->hw_data) {
-		SDEROT_ERR("null parameters\n");
-		return false;
-	}
-
-	rot = mgr->hw_data;
-
-	if (mode >= SDE_ROTATOR_MODE_MAX) {
-		SDEROT_ERR("invalid rotator mode %d\n", mode);
-		return false;
-	}
-
-	if (input) {
-		pixfmts = rot->inpixfmts[mode];
-		num_pixfmt = rot->num_inpixfmt[mode];
-	} else {
-		pixfmts = rot->outpixfmts[mode];
-		num_pixfmt = rot->num_outpixfmt[mode];
-	}
-
-	if (!pixfmts || !num_pixfmt) {
-		SDEROT_ERR("invalid pixel format tables\n");
-		return false;
-	}
-
-	for (i = 0; i < num_pixfmt; i++)
-		if (pixfmts[i] == pixfmt)
-			return true;
-
-	return false;
-}
-
-/*
- * sde_hw_rotator_get_downscale_caps - get scaling capability string
- * @mgr: Pointer to rotator manager
- * @caps: Pointer to capability string buffer; NULL to return maximum length
- * @len: length of capability string buffer
- * return: length of capability string
- */
-static int sde_hw_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
-		char *caps, int len)
-{
-	struct sde_hw_rotator *rot;
-	int rc = 0;
-
-	if (!mgr || !mgr->hw_data) {
-		SDEROT_ERR("null parameters\n");
-		return -EINVAL;
-	}
-
-	rot = mgr->hw_data;
-
-	if (rot->downscale_caps) {
-		if (caps)
-			rc = snprintf(caps, len, "%s", rot->downscale_caps);
-		else
-			rc = strlen(rot->downscale_caps);
-	}
-
-	return rc;
-}
-
-/*
- * sde_hw_rotator_get_maxlinewidth - get maximum line width supported
- * @mgr: Pointer to rotator manager
- * return: maximum line width supported by hardware
- */
-static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
-{
-	struct sde_hw_rotator *rot;
-
-	if (!mgr || !mgr->hw_data) {
-		SDEROT_ERR("null parameters\n");
-		return -EINVAL;
-	}
-
-	rot = mgr->hw_data;
-
-	return rot->maxlinewidth;
-}
-
-/*
- * sde_hw_rotator_dump_status - dump status to debug output
- * @mgr: Pointer to rotator manager
- * return: none
- */
-static void sde_hw_rotator_dump_status(struct sde_rot_mgr *mgr)
-{
-	if (!mgr || !mgr->hw_data) {
-		SDEROT_ERR("null parameters\n");
-		return;
-	}
-
-	_sde_hw_rotator_dump_status(mgr->hw_data, NULL);
-}
-
-/*
- * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
- * @hw_data: Pointer to rotator hw
- * @dev: Pointer to platform device
- */
-static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
-		struct platform_device *dev)
-{
-	int ret = 0;
-	u32 data;
-
-	if (!hw_data || !dev)
-		return -EINVAL;
-
-	ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
-			&data);
-	if (ret) {
-		SDEROT_DBG("default to regdma off\n");
-		ret = 0;
-		hw_data->mode = ROT_REGDMA_OFF;
-	} else if (data < ROT_REGDMA_MAX) {
-		SDEROT_DBG("set to regdma mode %d\n", data);
-		hw_data->mode = data;
-	} else {
-		SDEROT_ERR("regdma mode out of range. default to regdma off\n");
-		hw_data->mode = ROT_REGDMA_OFF;
-	}
-
-	ret = of_property_read_u32(dev->dev.of_node,
-			"qcom,mdss-highest-bank-bit", &data);
-	if (ret) {
-		SDEROT_DBG("default to A5X bank\n");
-		ret = 0;
-		hw_data->highest_bank = 2;
-	} else {
-		SDEROT_DBG("set highest bank bit to %d\n", data);
-		hw_data->highest_bank = data;
-	}
-
-	ret = of_property_read_u32(dev->dev.of_node,
-			"qcom,sde-ubwc-malsize", &data);
-	if (ret) {
-		ret = 0;
-		hw_data->ubwc_malsize = DEFAULT_UBWC_MALSIZE;
-	} else {
-		SDEROT_DBG("set ubwc malsize to %d\n", data);
-		hw_data->ubwc_malsize = data;
-	}
-
-	ret = of_property_read_u32(dev->dev.of_node,
-			"qcom,sde-ubwc_swizzle", &data);
-	if (ret) {
-		ret = 0;
-		hw_data->ubwc_swizzle = DEFAULT_UBWC_SWIZZLE;
-	} else {
-		SDEROT_DBG("set ubwc swizzle to %d\n", data);
-		hw_data->ubwc_swizzle = data;
-	}
-
-	ret = of_property_read_u32(dev->dev.of_node,
-			"qcom,mdss-sbuf-headroom", &data);
-	if (ret) {
-		ret = 0;
-		hw_data->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
-	} else {
-		SDEROT_DBG("set sbuf headroom to %d\n", data);
-		hw_data->sbuf_headroom = data;
-	}
-
-	ret = of_property_read_u32(dev->dev.of_node,
-			"qcom,mdss-rot-linewidth", &data);
-	if (ret) {
-		ret = 0;
-		hw_data->maxlinewidth = DEFAULT_MAXLINEWIDTH;
-	} else {
-		SDEROT_DBG("set mdss-rot-linewidth to %d\n", data);
-		hw_data->maxlinewidth = data;
-	}
-
-	return ret;
-}
-
-/*
- * sde_rotator_r3_init - initialize the r3 module
- * @mgr: Pointer to rotator manager
- *
- * This function setup r3 callback functions, parses r3 specific
- * device tree settings, installs r3 specific interrupt handler,
- * as well as initializes r3 internal data structure.
- */
-int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
-{
-	struct sde_hw_rotator *rot;
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	int i;
-	int ret;
-
-	rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
-	if (!rot)
-		return -ENOMEM;
-
-	mgr->hw_data = rot;
-	mgr->queue_count = ROT_QUEUE_MAX;
-
-	rot->mdss_base = mdata->sde_io.base;
-	rot->pdev      = mgr->pdev;
-	rot->koff_timeout = KOFF_TIMEOUT;
-	rot->vid_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
-	rot->cmd_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
-
-	/* Assign ops */
-	mgr->ops_hw_destroy = sde_hw_rotator_destroy;
-	mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
-	mgr->ops_hw_free = sde_hw_rotator_free_ext;
-	mgr->ops_config_hw = sde_hw_rotator_config;
-	mgr->ops_cancel_hw = sde_hw_rotator_cancel;
-	mgr->ops_abort_hw = sde_hw_rotator_abort_kickoff;
-	mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
-	mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
-	mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
-	mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
-	mgr->ops_hw_show_state = sde_hw_rotator_show_state;
-	mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
-	mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
-	mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
-	mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
-	mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
-	mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
-	mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
-	mgr->ops_hw_dump_status = sde_hw_rotator_dump_status;
-
-	ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
-	if (ret)
-		goto error_parse_dt;
-
-	rot->irq_num = platform_get_irq(mgr->pdev, 0);
-	if (rot->irq_num == -EPROBE_DEFER) {
-		SDEROT_INFO("irq master master not ready, defer probe\n");
-		return -EPROBE_DEFER;
-	} else if (rot->irq_num < 0) {
-		SDEROT_ERR("fail to get rotator irq, fallback to polling\n");
-	} else {
-		if (rot->mode == ROT_REGDMA_OFF)
-			ret = devm_request_threaded_irq(&mgr->pdev->dev,
-					rot->irq_num,
-					sde_hw_rotator_rotirq_handler,
-					NULL, 0, "sde_rotator_r3", rot);
-		else
-			ret = devm_request_threaded_irq(&mgr->pdev->dev,
-					rot->irq_num,
-					sde_hw_rotator_regdmairq_handler,
-					NULL, 0, "sde_rotator_r3", rot);
-		if (ret) {
-			SDEROT_ERR("fail to request irq r:%d\n", ret);
-			rot->irq_num = -1;
-		} else {
-			disable_irq(rot->irq_num);
-		}
-	}
-	atomic_set(&rot->irq_enabled, 0);
-
-	ret = sde_rotator_hw_rev_init(rot);
-	if (ret)
-		goto error_hw_rev_init;
-
-	setup_rotator_ops(&rot->ops, rot->mode,
-			test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map));
-
-	spin_lock_init(&rot->rotctx_lock);
-	spin_lock_init(&rot->rotisr_lock);
-
-	/* REGDMA initialization */
-	if (rot->mode == ROT_REGDMA_OFF) {
-		for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
-			rot->cmd_wr_ptr[0][i] = (char __iomem *)(
-					&rot->cmd_queue[
-					SDE_HW_ROT_REGDMA_SEG_SIZE * i]);
-	} else {
-		for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
-			rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
-				rot->mdss_base +
-					REGDMA_RAM_REGDMA_CMD_RAM +
-					SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i;
-
-		for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
-			rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
-				rot->mdss_base +
-					REGDMA_RAM_REGDMA_CMD_RAM +
-					SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
-					(i + SDE_HW_ROT_REGDMA_TOTAL_CTX);
-	}
-
-	for (i = 0; i < ROT_QUEUE_MAX; i++) {
-		atomic_set(&rot->timestamp[i], 0);
-		INIT_LIST_HEAD(&rot->sbuf_ctx[i]);
-	}
-
-	/* set rotator CBCR to shutoff memory/periphery on clock off.*/
-	clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
-			CLKFLAG_NORETAIN_MEM);
-	clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
-			CLKFLAG_NORETAIN_PERIPH);
-
-	mdata->sde_rot_hw = rot;
-	return 0;
-error_hw_rev_init:
-	if (rot->irq_num >= 0)
-		devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
-	devm_kfree(&mgr->pdev->dev, mgr->hw_data);
-error_parse_dt:
-	return ret;
-}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.h
deleted file mode 100644
index de7b613..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_ROTATOR_R3_H__
-#define __SDE_ROTATOR_R3_H__
-
-#include "sde_rotator_core.h"
-
-/* Maximum allowed Rotator clock value */
-#define ROT_R3_MAX_ROT_CLK			345000000
-
-int sde_rotator_r3_init(struct sde_rot_mgr *mgr);
-
-#endif /* __SDE_ROTATOR_R3_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c
deleted file mode 100644
index c5adea0..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-
-#include "sde_rotator_r3_debug.h"
-#include "sde_rotator_core.h"
-#include "sde_rotator_r3.h"
-#include "sde_rotator_r3_internal.h"
-
-/*
- * sde_rotator_r3_create_debugfs - Setup rotator r3 debugfs directory structure.
- * @rot_dev: Pointer to rotator device
- */
-int sde_rotator_r3_create_debugfs(struct sde_rot_mgr *mgr,
-		struct dentry *debugfs_root)
-{
-	struct sde_hw_rotator *hw_data;
-
-	if (!mgr || !debugfs_root || !mgr->hw_data)
-		return -EINVAL;
-
-	hw_data = mgr->hw_data;
-
-	if (!debugfs_create_bool("dbgmem", 0644,
-			debugfs_root, &hw_data->dbgmem)) {
-		SDEROT_ERR("fail create dbgmem\n");
-		return -EINVAL;
-	}
-
-	if (!debugfs_create_u32("koff_timeout", 0644,
-			debugfs_root, &hw_data->koff_timeout)) {
-		SDEROT_ERR("fail create koff_timeout\n");
-		return -EINVAL;
-	}
-
-	if (!debugfs_create_u32("vid_trigger", 0644,
-			debugfs_root, &hw_data->vid_trigger)) {
-		SDEROT_ERR("fail create vid_trigger\n");
-		return -EINVAL;
-	}
-
-	if (!debugfs_create_u32("cmd_trigger", 0644,
-			debugfs_root, &hw_data->cmd_trigger)) {
-		SDEROT_ERR("fail create cmd_trigger\n");
-		return -EINVAL;
-	}
-
-	if (!debugfs_create_u32("sbuf_headroom", 0644,
-			debugfs_root, &hw_data->sbuf_headroom)) {
-		SDEROT_ERR("fail create sbuf_headroom\n");
-		return -EINVAL;
-	}
-
-	if (!debugfs_create_u32("solid_fill", 0644,
-			debugfs_root, &hw_data->solid_fill)) {
-		SDEROT_ERR("fail create solid_fill\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.h
deleted file mode 100644
index 4c5d1e7..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_ROTATOR_R3_DEBUG_H__
-#define __SDE_ROTATOR_R3_DEBUG_H__
-
-#include <linux/types.h>
-#include <linux/dcache.h>
-
-struct sde_rot_mgr;
-
-#if defined(CONFIG_DEBUG_FS)
-int sde_rotator_r3_create_debugfs(struct sde_rot_mgr *mgr,
-		struct dentry *debugfs_root);
-#else
-static inline
-int sde_rotator_r3_create_debugfs(struct sde_rot_mgr *mgr,
-		struct dentry *debugfs_root)
-{
-	return 0;
-}
-#endif
-#endif /* __SDE_ROTATOR_R3_DEBUG_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
deleted file mode 100644
index a14f18a..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ /dev/null
@@ -1,305 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_ROTATOR_R3_HWIO_H
-#define _SDE_ROTATOR_R3_HWIO_H
-
-#include <linux/bitops.h>
-
-/* MMSS_MDSS:
- * OFFSET=0x000000
- */
-#define MMSS_MDSS_HW_INTR_STATUS		0x10
-#define MMSS_MDSS_HW_INTR_STATUS_ROT		BIT(2)
-
-/* SDE_ROT_ROTTOP:
- * OFFSET=0x0A8800
- */
-#define SDE_ROT_ROTTOP_OFFSET                   0xA8800
-#define ROTTOP_HW_VERSION                       (SDE_ROT_ROTTOP_OFFSET+0x00)
-#define ROTTOP_CLK_CTRL                         (SDE_ROT_ROTTOP_OFFSET+0x10)
-#define ROTTOP_CLK_STATUS                       (SDE_ROT_ROTTOP_OFFSET+0x14)
-#define ROTTOP_ROT_NEWROI_PRIOR_TO_START        (SDE_ROT_ROTTOP_OFFSET+0x18)
-#define ROTTOP_SW_RESET                         (SDE_ROT_ROTTOP_OFFSET+0x20)
-#define ROTTOP_SW_RESET_CTRL                    (SDE_ROT_ROTTOP_OFFSET+0x24)
-#define ROTTOP_SW_RESET_OVERRIDE                (SDE_ROT_ROTTOP_OFFSET+0x28)
-#define ROTTOP_INTR_EN                          (SDE_ROT_ROTTOP_OFFSET+0x30)
-#define ROTTOP_INTR_STATUS                      (SDE_ROT_ROTTOP_OFFSET+0x34)
-#define ROTTOP_INTR_CLEAR                       (SDE_ROT_ROTTOP_OFFSET+0x38)
-#define ROTTOP_START_CTRL                       (SDE_ROT_ROTTOP_OFFSET+0x40)
-#define ROTTOP_STATUS                           (SDE_ROT_ROTTOP_OFFSET+0x44)
-#define ROTTOP_OP_MODE                          (SDE_ROT_ROTTOP_OFFSET+0x48)
-#define ROTTOP_DNSC                             (SDE_ROT_ROTTOP_OFFSET+0x4C)
-#define ROTTOP_DEBUGBUS_CTRL                    (SDE_ROT_ROTTOP_OFFSET+0x50)
-#define ROTTOP_DEBUGBUS_STATUS                  (SDE_ROT_ROTTOP_OFFSET+0x54)
-#define ROTTOP_ROT_UBWC_DEC_VERSION             (SDE_ROT_ROTTOP_OFFSET+0x58)
-#define ROTTOP_ROT_UBWC_ENC_VERSION             (SDE_ROT_ROTTOP_OFFSET+0x5C)
-#define ROTTOP_ROT_CNTR_CTRL                    (SDE_ROT_ROTTOP_OFFSET+0x60)
-#define ROTTOP_ROT_CNTR_0                       (SDE_ROT_ROTTOP_OFFSET+0x64)
-#define ROTTOP_ROT_CNTR_1                       (SDE_ROT_ROTTOP_OFFSET+0x68)
-#define ROTTOP_ROT_SCRATCH_0                    (SDE_ROT_ROTTOP_OFFSET+0x70)
-#define ROTTOP_ROT_SCRATCH_1                    (SDE_ROT_ROTTOP_OFFSET+0x74)
-#define ROTTOP_ROT_SCRATCH_2                    (SDE_ROT_ROTTOP_OFFSET+0x78)
-#define ROTTOP_ROT_SCRATCH_3                    (SDE_ROT_ROTTOP_OFFSET+0x7C)
-
-#define ROTTOP_START_CTRL_TRIG_SEL_SW           0
-#define ROTTOP_START_CTRL_TRIG_SEL_DONE         1
-#define ROTTOP_START_CTRL_TRIG_SEL_REGDMA       2
-#define ROTTOP_START_CTRL_TRIG_SEL_MDP          3
-
-#define ROTTOP_OP_MODE_ROT_OUT_MASK             (0x3 << 4)
-
-/* SDE_ROT_SSPP:
- * OFFSET=0x0A8900
- */
-#define SDE_ROT_SSPP_OFFSET                     0xA8900
-#define ROT_SSPP_SRC_SIZE                       (SDE_ROT_SSPP_OFFSET+0x00)
-#define ROT_SSPP_SRC_IMG_SIZE                   (SDE_ROT_SSPP_OFFSET+0x04)
-#define ROT_SSPP_SRC_XY                         (SDE_ROT_SSPP_OFFSET+0x08)
-#define ROT_SSPP_OUT_SIZE                       (SDE_ROT_SSPP_OFFSET+0x0C)
-#define ROT_SSPP_OUT_XY                         (SDE_ROT_SSPP_OFFSET+0x10)
-#define ROT_SSPP_SRC0_ADDR                      (SDE_ROT_SSPP_OFFSET+0x14)
-#define ROT_SSPP_SRC1_ADDR                      (SDE_ROT_SSPP_OFFSET+0x18)
-#define ROT_SSPP_SRC2_ADDR                      (SDE_ROT_SSPP_OFFSET+0x1C)
-#define ROT_SSPP_SRC3_ADDR                      (SDE_ROT_SSPP_OFFSET+0x20)
-#define ROT_SSPP_SRC_YSTRIDE0                   (SDE_ROT_SSPP_OFFSET+0x24)
-#define ROT_SSPP_SRC_YSTRIDE1                   (SDE_ROT_SSPP_OFFSET+0x28)
-#define ROT_SSPP_TILE_FRAME_SIZE                (SDE_ROT_SSPP_OFFSET+0x2C)
-#define ROT_SSPP_SRC_FORMAT                     (SDE_ROT_SSPP_OFFSET+0x30)
-#define ROT_SSPP_SRC_UNPACK_PATTERN             (SDE_ROT_SSPP_OFFSET+0x34)
-#define ROT_SSPP_SRC_OP_MODE                    (SDE_ROT_SSPP_OFFSET+0x38)
-#define ROT_SSPP_SRC_CONSTANT_COLOR             (SDE_ROT_SSPP_OFFSET+0x3C)
-#define ROT_SSPP_UBWC_STATIC_CTRL               (SDE_ROT_SSPP_OFFSET+0x44)
-#define ROT_SSPP_FETCH_CONFIG                   (SDE_ROT_SSPP_OFFSET+0x48)
-#define ROT_SSPP_VC1_RANGE                      (SDE_ROT_SSPP_OFFSET+0x4C)
-#define ROT_SSPP_REQPRIORITY_FIFO_WATERMARK_0   (SDE_ROT_SSPP_OFFSET+0x50)
-#define ROT_SSPP_REQPRIORITY_FIFO_WATERMARK_1   (SDE_ROT_SSPP_OFFSET+0x54)
-#define ROT_SSPP_REQPRIORITY_FIFO_WATERMARK_2   (SDE_ROT_SSPP_OFFSET+0x58)
-#define ROT_SSPP_DANGER_LUT                     (SDE_ROT_SSPP_OFFSET+0x60)
-#define ROT_SSPP_SAFE_LUT                       (SDE_ROT_SSPP_OFFSET+0x64)
-#define ROT_SSPP_CREQ_LUT                       (SDE_ROT_SSPP_OFFSET+0x68)
-#define ROT_SSPP_QOS_CTRL                       (SDE_ROT_SSPP_OFFSET+0x6C)
-#define ROT_SSPP_SRC_ADDR_SW_STATUS             (SDE_ROT_SSPP_OFFSET+0x70)
-#define ROT_SSPP_CREQ_LUT_0                     (SDE_ROT_SSPP_OFFSET+0x74)
-#define ROT_SSPP_CREQ_LUT_1                     (SDE_ROT_SSPP_OFFSET+0x78)
-#define ROT_SSPP_CURRENT_SRC0_ADDR              (SDE_ROT_SSPP_OFFSET+0xA4)
-#define ROT_SSPP_CURRENT_SRC1_ADDR              (SDE_ROT_SSPP_OFFSET+0xA8)
-#define ROT_SSPP_CURRENT_SRC2_ADDR              (SDE_ROT_SSPP_OFFSET+0xAC)
-#define ROT_SSPP_CURRENT_SRC3_ADDR              (SDE_ROT_SSPP_OFFSET+0xB0)
-#define ROT_SSPP_DECIMATION_CONFIG              (SDE_ROT_SSPP_OFFSET+0xB4)
-#define ROT_SSPP_FETCH_SMP_WR_PLANE0            (SDE_ROT_SSPP_OFFSET+0xD0)
-#define ROT_SSPP_FETCH_SMP_WR_PLANE1            (SDE_ROT_SSPP_OFFSET+0xD4)
-#define ROT_SSPP_FETCH_SMP_WR_PLANE2            (SDE_ROT_SSPP_OFFSET+0xD8)
-#define ROT_SSPP_SMP_UNPACK_RD_PLANE0           (SDE_ROT_SSPP_OFFSET+0xE0)
-#define ROT_SSPP_SMP_UNPACK_RD_PLANE1           (SDE_ROT_SSPP_OFFSET+0xE4)
-#define ROT_SSPP_SMP_UNPACK_RD_PLANE2           (SDE_ROT_SSPP_OFFSET+0xE8)
-#define ROT_SSPP_FILL_LEVELS                    (SDE_ROT_SSPP_OFFSET+0xF0)
-#define ROT_SSPP_STATUS                         (SDE_ROT_SSPP_OFFSET+0xF4)
-#define ROT_SSPP_UNPACK_LINE_COUNT              (SDE_ROT_SSPP_OFFSET+0xF8)
-#define ROT_SSPP_UNPACK_BLK_COUNT               (SDE_ROT_SSPP_OFFSET+0xFC)
-#define ROT_SSPP_SW_PIX_EXT_C0_LR               (SDE_ROT_SSPP_OFFSET+0x100)
-#define ROT_SSPP_SW_PIX_EXT_C0_TB               (SDE_ROT_SSPP_OFFSET+0x104)
-#define ROT_SSPP_SW_PIX_EXT_C0_REQ_PIXELS       (SDE_ROT_SSPP_OFFSET+0x108)
-#define ROT_SSPP_SW_PIX_EXT_C1C2_LR             (SDE_ROT_SSPP_OFFSET+0x110)
-#define ROT_SSPP_SW_PIX_EXT_C1C2_TB             (SDE_ROT_SSPP_OFFSET+0x114)
-#define ROT_SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS     (SDE_ROT_SSPP_OFFSET+0x118)
-#define ROT_SSPP_SW_PIX_EXT_C3_LR               (SDE_ROT_SSPP_OFFSET+0x120)
-#define ROT_SSPP_SW_PIX_EXT_C3_TB               (SDE_ROT_SSPP_OFFSET+0x124)
-#define ROT_SSPP_SW_PIX_EXT_C3_REQ_PIXELS       (SDE_ROT_SSPP_OFFSET+0x128)
-#define ROT_SSPP_TRAFFIC_SHAPER                 (SDE_ROT_SSPP_OFFSET+0x130)
-#define ROT_SSPP_CDP_CNTL                       (SDE_ROT_SSPP_OFFSET+0x134)
-#define ROT_SSPP_UBWC_ERROR_STATUS              (SDE_ROT_SSPP_OFFSET+0x138)
-#define ROT_SSPP_SW_CROP_W_C0C3                 (SDE_ROT_SSPP_OFFSET+0x140)
-#define ROT_SSPP_SW_CROP_W_C1C2                 (SDE_ROT_SSPP_OFFSET+0x144)
-#define ROT_SSPP_SW_CROP_H_C0C3                 (SDE_ROT_SSPP_OFFSET+0x148)
-#define ROT_SSPP_SW_CROP_H_C1C2                 (SDE_ROT_SSPP_OFFSET+0x14C)
-#define ROT_SSPP_TRAFFIC_SHAPER_PREFILL         (SDE_ROT_SSPP_OFFSET+0x150)
-#define ROT_SSPP_TRAFFIC_SHAPER_REC1_PREFILL    (SDE_ROT_SSPP_OFFSET+0x154)
-#define ROT_SSPP_OUT_SIZE_REC1                  (SDE_ROT_SSPP_OFFSET+0x160)
-#define ROT_SSPP_OUT_XY_REC1                    (SDE_ROT_SSPP_OFFSET+0x164)
-#define ROT_SSPP_SRC_XY_REC1                    (SDE_ROT_SSPP_OFFSET+0x168)
-#define ROT_SSPP_SRC_SIZE_REC1                  (SDE_ROT_SSPP_OFFSET+0x16C)
-#define ROT_SSPP_MULTI_REC_OP_MODE              (SDE_ROT_SSPP_OFFSET+0x170)
-#define ROT_SSPP_SRC_FORMAT_REC1                (SDE_ROT_SSPP_OFFSET+0x174)
-#define ROT_SSPP_SRC_UNPACK_PATTERN_REC1        (SDE_ROT_SSPP_OFFSET+0x178)
-#define ROT_SSPP_SRC_OP_MODE_REC1               (SDE_ROT_SSPP_OFFSET+0x17C)
-#define ROT_SSPP_SRC_CONSTANT_COLOR_REC1        (SDE_ROT_SSPP_OFFSET+0x180)
-#define ROT_SSPP_TPG_CONTROL                    (SDE_ROT_SSPP_OFFSET+0x190)
-#define ROT_SSPP_TPG_CONFIG                     (SDE_ROT_SSPP_OFFSET+0x194)
-#define ROT_SSPP_TPG_COMPONENT_LIMITS           (SDE_ROT_SSPP_OFFSET+0x198)
-#define ROT_SSPP_TPG_RECTANGLE                  (SDE_ROT_SSPP_OFFSET+0x19C)
-#define ROT_SSPP_TPG_BLACK_WHITE_PATTERN_FRAMES (SDE_ROT_SSPP_OFFSET+0x1A0)
-#define ROT_SSPP_TPG_RGB_MAPPING                (SDE_ROT_SSPP_OFFSET+0x1A4)
-#define ROT_SSPP_TPG_PATTERN_GEN_INIT_VAL       (SDE_ROT_SSPP_OFFSET+0x1A8)
-
-#define SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE   0x00087
-#define SDE_ROT_SSPP_FETCH_BLOCKSIZE_128        (0 << 16)
-#define SDE_ROT_SSPP_FETCH_BLOCKSIZE_96         (2 << 16)
-#define SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT    ((0 << 16) | (1 << 15))
-#define SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT    ((2 << 16) | (1 << 15))
-
-
-/* SDE_ROT_WB:
- * OFFSET=0x0A8B00
- */
-#define SDE_ROT_WB_OFFSET                       0xA8B00
-#define ROT_WB_DST_FORMAT                       (SDE_ROT_WB_OFFSET+0x000)
-#define ROT_WB_DST_OP_MODE                      (SDE_ROT_WB_OFFSET+0x004)
-#define ROT_WB_DST_PACK_PATTERN                 (SDE_ROT_WB_OFFSET+0x008)
-#define ROT_WB_DST0_ADDR                        (SDE_ROT_WB_OFFSET+0x00C)
-#define ROT_WB_DST1_ADDR                        (SDE_ROT_WB_OFFSET+0x010)
-#define ROT_WB_DST2_ADDR                        (SDE_ROT_WB_OFFSET+0x014)
-#define ROT_WB_DST3_ADDR                        (SDE_ROT_WB_OFFSET+0x018)
-#define ROT_WB_DST_YSTRIDE0                     (SDE_ROT_WB_OFFSET+0x01C)
-#define ROT_WB_DST_YSTRIDE1                     (SDE_ROT_WB_OFFSET+0x020)
-#define ROT_WB_DST_DITHER_BITDEPTH              (SDE_ROT_WB_OFFSET+0x024)
-#define ROT_WB_DITHER_MATRIX_ROW0               (SDE_ROT_WB_OFFSET+0x030)
-#define ROT_WB_DITHER_MATRIX_ROW1               (SDE_ROT_WB_OFFSET+0x034)
-#define ROT_WB_DITHER_MATRIX_ROW2               (SDE_ROT_WB_OFFSET+0x038)
-#define ROT_WB_DITHER_MATRIX_ROW3               (SDE_ROT_WB_OFFSET+0x03C)
-#define ROT_WB_TRAFFIC_SHAPER_WR_CLIENT         (SDE_ROT_WB_OFFSET+0x040)
-#define ROT_WB_DST_WRITE_CONFIG                 (SDE_ROT_WB_OFFSET+0x048)
-#define ROT_WB_ROTATOR_PIPE_DOWNSCALER          (SDE_ROT_WB_OFFSET+0x054)
-#define ROT_WB_OUT_SIZE                         (SDE_ROT_WB_OFFSET+0x074)
-#define ROT_WB_DST_ALPHA_X_VALUE                (SDE_ROT_WB_OFFSET+0x078)
-#define ROT_WB_HW_VERSION                       (SDE_ROT_WB_OFFSET+0x080)
-#define ROT_WB_DANGER_LUT                       (SDE_ROT_WB_OFFSET+0x084)
-#define ROT_WB_SAFE_LUT                         (SDE_ROT_WB_OFFSET+0x088)
-#define ROT_WB_CREQ_LUT                         (SDE_ROT_WB_OFFSET+0x08C)
-#define ROT_WB_QOS_CTRL                         (SDE_ROT_WB_OFFSET+0x090)
-#define ROT_WB_SYS_CACHE_MODE                   (SDE_ROT_WB_OFFSET+0x094)
-#define ROT_WB_CREQ_LUT_0                       (SDE_ROT_WB_OFFSET+0x098)
-#define ROT_WB_CREQ_LUT_1                       (SDE_ROT_WB_OFFSET+0x09C)
-#define ROT_WB_UBWC_STATIC_CTRL                 (SDE_ROT_WB_OFFSET+0x144)
-#define ROT_WB_SBUF_STATUS_PLANE0               (SDE_ROT_WB_OFFSET+0x148)
-#define ROT_WB_SBUF_STATUS_PLANE1               (SDE_ROT_WB_OFFSET+0x14C)
-#define ROT_WB_CSC_MATRIX_COEFF_0               (SDE_ROT_WB_OFFSET+0x260)
-#define ROT_WB_CSC_MATRIX_COEFF_1               (SDE_ROT_WB_OFFSET+0x264)
-#define ROT_WB_CSC_MATRIX_COEFF_2               (SDE_ROT_WB_OFFSET+0x268)
-#define ROT_WB_CSC_MATRIX_COEFF_3               (SDE_ROT_WB_OFFSET+0x26C)
-#define ROT_WB_CSC_MATRIX_COEFF_4               (SDE_ROT_WB_OFFSET+0x270)
-#define ROT_WB_CSC_COMP0_PRECLAMP               (SDE_ROT_WB_OFFSET+0x274)
-#define ROT_WB_CSC_COMP1_PRECLAMP               (SDE_ROT_WB_OFFSET+0x278)
-#define ROT_WB_CSC_COMP2_PRECLAMP               (SDE_ROT_WB_OFFSET+0x27C)
-#define ROT_WB_CSC_COMP0_POSTCLAMP              (SDE_ROT_WB_OFFSET+0x280)
-#define ROT_WB_CSC_COMP1_POSTCLAMP              (SDE_ROT_WB_OFFSET+0x284)
-#define ROT_WB_CSC_COMP2_POSTCLAMP              (SDE_ROT_WB_OFFSET+0x288)
-#define ROT_WB_CSC_COMP0_PREBIAS                (SDE_ROT_WB_OFFSET+0x28C)
-#define ROT_WB_CSC_COMP1_PREBIAS                (SDE_ROT_WB_OFFSET+0x290)
-#define ROT_WB_CSC_COMP2_PREBIAS                (SDE_ROT_WB_OFFSET+0x294)
-#define ROT_WB_CSC_COMP0_POSTBIAS               (SDE_ROT_WB_OFFSET+0x298)
-#define ROT_WB_CSC_COMP1_POSTBIAS               (SDE_ROT_WB_OFFSET+0x29C)
-#define ROT_WB_CSC_COMP2_POSTBIAS               (SDE_ROT_WB_OFFSET+0x2A0)
-#define ROT_WB_DST_ADDR_SW_STATUS               (SDE_ROT_WB_OFFSET+0x2B0)
-#define ROT_WB_CDP_CNTL                         (SDE_ROT_WB_OFFSET+0x2B4)
-#define ROT_WB_STATUS                           (SDE_ROT_WB_OFFSET+0x2B8)
-#define ROT_WB_UBWC_ERROR_STATUS                (SDE_ROT_WB_OFFSET+0x2BC)
-#define ROT_WB_OUT_IMG_SIZE                     (SDE_ROT_WB_OFFSET+0x2C0)
-#define ROT_WB_OUT_XY                           (SDE_ROT_WB_OFFSET+0x2C4)
-
-
-/* SDE_ROT_REGDMA_RAM:
- * OFFSET=0x0A8E00
- */
-#define SDE_ROT_REGDMA_RAM_OFFSET              0xA8E00
-#define REGDMA_RAM_REGDMA_CMD_RAM              (SDE_ROT_REGDMA_RAM_OFFSET+0x00)
-
-
-/* SDE_ROT_REGDMA_CSR:
- * OFFSET=0x0AAE00
- */
-#define SDE_ROT_REGDMA_OFFSET                    0xAAE00
-#define REGDMA_CSR_REGDMA_VERSION                (SDE_ROT_REGDMA_OFFSET+0x00)
-#define REGDMA_CSR_REGDMA_OP_MODE                (SDE_ROT_REGDMA_OFFSET+0x04)
-#define REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT         (SDE_ROT_REGDMA_OFFSET+0x10)
-#define REGDMA_CSR_REGDMA_QUEUE_0_STATUS         (SDE_ROT_REGDMA_OFFSET+0x14)
-#define REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT         (SDE_ROT_REGDMA_OFFSET+0x18)
-#define REGDMA_CSR_REGDMA_QUEUE_1_STATUS         (SDE_ROT_REGDMA_OFFSET+0x1C)
-#define REGDMA_CSR_REGDMA_BLOCK_LO_0             (SDE_ROT_REGDMA_OFFSET+0x20)
-#define REGDMA_CSR_REGDMA_BLOCK_HI_0             (SDE_ROT_REGDMA_OFFSET+0x24)
-#define REGDMA_CSR_REGDMA_BLOCK_LO_1             (SDE_ROT_REGDMA_OFFSET+0x28)
-#define REGDMA_CSR_REGDMA_BLOCK_HI_1             (SDE_ROT_REGDMA_OFFSET+0x2C)
-#define REGDMA_CSR_REGDMA_BLOCK_LO_2             (SDE_ROT_REGDMA_OFFSET+0x30)
-#define REGDMA_CSR_REGDMA_BLOCK_HI_2             (SDE_ROT_REGDMA_OFFSET+0x34)
-#define REGDMA_CSR_REGDMA_BLOCK_LO_3             (SDE_ROT_REGDMA_OFFSET+0x38)
-#define REGDMA_CSR_REGDMA_BLOCK_HI_3             (SDE_ROT_REGDMA_OFFSET+0x3C)
-#define REGDMA_CSR_REGDMA_WD_TIMER_CTL           (SDE_ROT_REGDMA_OFFSET+0x40)
-#define REGDMA_CSR_REGDMA_WD_TIMER_CTL2          (SDE_ROT_REGDMA_OFFSET+0x44)
-#define REGDMA_CSR_REGDMA_WD_TIMER_LOAD_VALUE    (SDE_ROT_REGDMA_OFFSET+0x48)
-#define REGDMA_CSR_REGDMA_WD_TIMER_STATUS_VALUE  (SDE_ROT_REGDMA_OFFSET+0x4C)
-#define REGDMA_CSR_REGDMA_INT_STATUS             (SDE_ROT_REGDMA_OFFSET+0x50)
-#define REGDMA_CSR_REGDMA_INT_EN                 (SDE_ROT_REGDMA_OFFSET+0x54)
-#define REGDMA_CSR_REGDMA_INT_CLEAR              (SDE_ROT_REGDMA_OFFSET+0x58)
-#define REGDMA_CSR_REGDMA_BLOCK_STATUS           (SDE_ROT_REGDMA_OFFSET+0x5C)
-#define REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET (SDE_ROT_REGDMA_OFFSET+0x60)
-#define REGDMA_CSR_REGDMA_FSM_STATE              (SDE_ROT_REGDMA_OFFSET+0x64)
-#define REGDMA_CSR_REGDMA_DEBUG_SEL              (SDE_ROT_REGDMA_OFFSET+0x68)
-
-
-/* SDE_ROT_QDSS:
- * OFFSET=0x0AAF00
- */
-#define ROT_QDSS_CONFIG                          0x00
-#define ROT_QDSS_ATB_DATA_ENABLE0                0x04
-#define ROT_QDSS_ATB_DATA_ENABLE1                0x08
-#define ROT_QDSS_ATB_DATA_ENABLE2                0x0C
-#define ROT_QDSS_ATB_DATA_ENABLE3                0x10
-#define ROT_QDSS_CLK_CTRL                        0x14
-#define ROT_QDSS_CLK_STATUS                      0x18
-#define ROT_QDSS_PULSE_TRIGGER                   0x20
-
-/*
- * SDE_ROT_VBIF_NRT:
- */
-#define SDE_ROT_VBIF_NRT_OFFSET                  0
-
-/* REGDMA OP Code */
-#define REGDMA_OP_NOP                   (0 << 28)
-#define REGDMA_OP_REGWRITE              (1 << 28)
-#define REGDMA_OP_REGMODIFY             (2 << 28)
-#define REGDMA_OP_BLKWRITE_SINGLE       (3 << 28)
-#define REGDMA_OP_BLKWRITE_INC          (4 << 28)
-#define REGDMA_OP_MASK                  0xF0000000
-
-/* REGDMA ADDR offset Mask */
-#define REGDMA_ADDR_OFFSET_MASK         0xFFFFF
-
-/* REGDMA command trigger select */
-#define REGDMA_CMD_TRIG_SEL_SW_START    (0 << 27)
-#define REGDMA_CMD_TRIG_SEL_MDP_FLUSH   (1 << 27)
-
-/* General defines */
-#define ROT_DONE_MASK                   0x1
-#define ROT_DONE_CLEAR                  0x1
-#define ROT_BUSY_BIT                    BIT(0)
-#define ROT_ERROR_BIT                   BIT(8)
-#define ROT_STATUS_MASK                 (ROT_BUSY_BIT | ROT_ERROR_BIT)
-#define REGDMA_BUSY                     BIT(0)
-#define REGDMA_EN                       0x1
-#define REGDMA_SECURE_EN                BIT(8)
-#define REGDMA_HALT                     BIT(16)
-
-#define REGDMA_WATCHDOG_INT             BIT(19)
-#define REGDMA_INVALID_DESCRIPTOR       BIT(18)
-#define REGDMA_INCOMPLETE_CMD           BIT(17)
-#define REGDMA_INVALID_CMD              BIT(16)
-#define REGDMA_QUEUE1_INT2              BIT(10)
-#define REGDMA_QUEUE1_INT1              BIT(9)
-#define REGDMA_QUEUE1_INT0              BIT(8)
-#define REGDMA_QUEUE0_INT2              BIT(2)
-#define REGDMA_QUEUE0_INT1              BIT(1)
-#define REGDMA_QUEUE0_INT0              BIT(0)
-#define REGDMA_INT_MASK                 0x000F0707
-#define REGDMA_INT_HIGH_MASK            0x00000007
-#define REGDMA_INT_LOW_MASK             0x00000700
-#define REGDMA_INT_ERR_MASK             0x000F0000
-#define REGDMA_TIMESTAMP_REG            ROT_SSPP_TPG_PATTERN_GEN_INIT_VAL
-#define REGDMA_RESET_STATUS_REG         ROT_SSPP_TPG_RGB_MAPPING
-
-#define REGDMA_INT_0_MASK               0x101
-#define REGDMA_INT_1_MASK               0x202
-#define REGDMA_INT_2_MASK               0x404
-
-#endif /*_SDE_ROTATOR_R3_HWIO_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
deleted file mode 100644
index 10585c8..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ /dev/null
@@ -1,452 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _SDE_ROTATOR_R3_INTERNAL_H
-#define _SDE_ROTATOR_R3_INTERNAL_H
-
-#include "sde_rotator_core.h"
-
-struct sde_hw_rotator;
-struct sde_hw_rotator_context;
-
-/**
- * Flags
- */
-#define SDE_ROT_FLAG_SECURE_OVERLAY_SESSION 0x1
-#define SDE_ROT_FLAG_FLIP_LR                0x2
-#define SDE_ROT_FLAG_FLIP_UD                0x4
-#define SDE_ROT_FLAG_SOURCE_ROTATED_90      0x8
-#define SDE_ROT_FLAG_ROT_90                 0x10
-#define SDE_ROT_FLAG_DEINTERLACE            0x20
-#define SDE_ROT_FLAG_SECURE_CAMERA_SESSION  0x40
-
-/**
- * General defines
- */
-#define SDE_HW_ROT_REGDMA_RAM_SIZE      1024
-#define SDE_HW_ROT_REGDMA_TOTAL_CTX     8
-#define SDE_HW_ROT_REGDMA_SEG_MASK      (SDE_HW_ROT_REGDMA_TOTAL_CTX - 1)
-#define SDE_HW_ROT_REGDMA_SEG_SIZE \
-	(SDE_HW_ROT_REGDMA_RAM_SIZE / SDE_HW_ROT_REGDMA_TOTAL_CTX)
-#define SDE_REGDMA_SWTS_MASK            0x00000FFF
-#define SDE_REGDMA_SWTS_SHIFT           12
-
-enum sde_rot_queue_prio {
-	ROT_QUEUE_HIGH_PRIORITY,
-	ROT_QUEUE_LOW_PRIORITY,
-	ROT_QUEUE_MAX
-};
-
-enum sde_rot_angle {
-	ROT_ANGLE_0,
-	ROT_ANGLE_90,
-	ROT_ANGEL_MAX
-};
-
-enum sde_rotator_regdma_mode {
-	ROT_REGDMA_OFF,
-	ROT_REGDMA_ON,
-	ROT_REGDMA_MAX
-};
-
-/**
- * struct sde_hw_rot_sspp_cfg: Rotator SSPP Configration description
- * @src:       source surface information
- * @src_rect:  src ROI, caller takes into account the different operations
- *             such as decimation, flip etc to program this field
- * @addr:      source surface address
- */
-struct sde_hw_rot_sspp_cfg {
-	struct sde_mdp_format_params *fmt;
-	struct sde_mdp_plane_sizes    src_plane;
-	struct sde_rect              *src_rect;
-	struct sde_mdp_data          *data;
-	u32                           img_width;
-	u32                           img_height;
-	u32                           fps;
-	u64                           bw;
-};
-
-
-
-/**
- *  struct sde_hw_rot_wb_cfg: Rotator WB Configration description
- *  @dest:      destination surface information
- *  @dest_rect: dest ROI, caller takes into account the different operations
- *              such as decimation, flip etc to program this field
- *  @addr:      destination surface address
- *  @prefill_bw: prefill bandwidth in Bps
- */
-struct sde_hw_rot_wb_cfg {
-	struct sde_mdp_format_params   *fmt;
-	struct sde_mdp_plane_sizes      dst_plane;
-	struct sde_rect                *dst_rect;
-	struct sde_mdp_data            *data;
-	u32                             img_width;
-	u32                             img_height;
-	u32                             v_downscale_factor;
-	u32                             h_downscale_factor;
-	u32                             fps;
-	u64                             bw;
-	u64                             prefill_bw;
-};
-
-
-
-/**
- *
- * struct sde_hw_rotator_ops: Interface to the Rotator Hw driver functions
- *
- * Pre-requsises:
- *  - Caller must call the init function to get the rotator context
- *  - These functions will be called after clocks are enabled
- */
-struct sde_hw_rotator_ops {
-	/**
-	 * setup_rotator_fetchengine():
-	 *    Setup Source format
-	 *    Setup Source dimension/cropping rectangle (ROI)
-	 *    Setup Source surface base address and stride
-	 *    Setup fetch engine op mode (linear/tiled/compression/...)
-	 * @ctx:        Rotator context created in sde_hw_rotator_config
-	 * @queue_id:   Select either low / high priority queue
-	 * @cfg:        Rotator Fetch engine configuration parameters
-	 * @danger_lut: Danger LUT setting
-	 * @safe_lut:   Safe LUT setting
-	 * @dnsc_factor_w: Downscale factor for width
-	 * @dnsc_factor_h: Downscale factor for height
-	 * @flags:      Specific config flag, see SDE_ROT_FLAG_ for details
-	 */
-	void (*setup_rotator_fetchengine)(
-			struct sde_hw_rotator_context  *ctx,
-			enum   sde_rot_queue_prio       queue_id,
-			struct sde_hw_rot_sspp_cfg     *cfg,
-			u32                             danger_lut,
-			u32                             safe_lut,
-			u32                             dnsc_factor_w,
-			u32                             dnsc_factor_h,
-			u32                             flags);
-
-	/**
-	 * setup_rotator_wbengine():
-	 *     Setup destination formats
-	 *     Setup destination dimension/cropping rectangle (ROI)
-	 *     Setup destination surface base address and strides
-	 *     Setup writeback engine op mode (linear/tiled/compression)
-	 * @ctx:        Rotator context created in sde_hw_rotator_config
-	 * @queue_id:   Select either low / high priority queue
-	 * @cfg:        Rotator WriteBack engine configuration parameters
-	 * @flags:      Specific config flag, see SDE_ROT_FLAG_ for details
-	 */
-	void (*setup_rotator_wbengine)(
-			struct sde_hw_rotator_context *ctx,
-			enum   sde_rot_queue_prio      queue_id,
-			struct sde_hw_rot_wb_cfg      *cfg,
-			u32                            flags);
-
-	/**
-	 * start_rotator():
-	 *     Kick start rotator operation based on cached setup parameters
-	 *     REGDMA commands will get generated at this points
-	 * @ctx:      Rotator context
-	 * @queue_id: Select either low / high priority queue
-	 * Returns:   unique job timestamp per submit. Used for tracking
-	 *            rotator finished job.
-	 */
-	u32 (*start_rotator)(
-			struct sde_hw_rotator_context  *ctx,
-			enum   sde_rot_queue_prio       queue_id);
-
-	/**
-	 * wait_rotator_done():
-	 *     Notify Rotator HAL layer previously submitted job finished.
-	 *     A job timestamp will return to caller.
-	 * @ctx:    Rotator context
-	 * @flags:  Reserved
-	 * Returns: job timestamp for tracking purpose
-	 *
-	 */
-	u32 (*wait_rotator_done)(
-			struct sde_hw_rotator_context  *ctx,
-			enum   sde_rot_queue_prio       queue_id,
-			u32                             flags);
-
-	/**
-	 * get_pending_ts():
-	 *     Obtain current active timestamp from rotator hw
-	 * @rot:    HW Rotator structure
-	 * @ctx:    Rotator context
-	 * @ts:     current timestamp return from rot hw
-	 * Returns: true if context has pending requests
-	 */
-	int (*get_pending_ts)(
-			struct sde_hw_rotator *rot,
-			struct sde_hw_rotator_context *ctx,
-			u32 *ts);
-
-	/**
-	 * update_ts():
-	 *     Update rotator timestmap with given value
-	 * @rot:    HW Rotator structure
-	 * @q_id:   rotator queue id
-	 * @ts:     new timestamp for rotator
-	 */
-	void (*update_ts)(
-			struct sde_hw_rotator *rot,
-			u32 q_id,
-			u32 ts);
-};
-
-/**
- * struct sde_dbg_buf : Debug buffer used by debugfs
- * @vaddr:        VA address mapped from dma buffer
- * @dmabuf:       DMA buffer
- * @buflen:       Length of DMA buffer
- * @width:        pixel width of buffer
- * @height:       pixel height of buffer
- */
-struct sde_dbg_buf {
-	void *vaddr;
-	struct dma_buf *dmabuf;
-	unsigned long buflen;
-	u32 width;
-	u32 height;
-};
-
-/**
- * struct sde_hw_rotator_context : Each rotator context ties to each priority
- * queue. Max number of concurrent contexts in regdma is limited to regdma
- * ram segment size allocation. Each rotator context can be any priority. A
- * incremental timestamp is used to identify and assigned to each context.
- * @list: list of pending context
- * @sequence_id: unique sequence identifier for rotation request
- * @sbuf_mode: true if stream buffer is requested
- * @start_ctrl: start control register update value
- * @sys_cache_mode: sys cache mode register update value
- * @op_mode: rot top op mode selection
- * @last_entry: pointer to last configured entry (for debugging purposes)
- */
-struct sde_hw_rotator_context {
-	struct list_head list;
-	struct sde_hw_rotator *rot;
-	struct sde_rot_hw_resource *hwres;
-	enum   sde_rot_queue_prio q_id;
-	u32    session_id;
-	u32    sequence_id;
-	char __iomem *regdma_base;
-	char __iomem *regdma_wrptr;
-	u32    timestamp;
-	struct completion rot_comp;
-	wait_queue_head_t regdma_waitq;
-	struct sde_dbg_buf src_dbgbuf;
-	struct sde_dbg_buf dst_dbgbuf;
-	u32    last_regdma_isr_status;
-	u32    last_regdma_timestamp;
-	dma_addr_t ts_addr;
-	bool   is_secure;
-	bool   is_traffic_shaping;
-	bool   sbuf_mode;
-	bool   abort;
-	u32    start_ctrl;
-	u32    sys_cache_mode;
-	u32    op_mode;
-	struct sde_rot_entry *last_entry;
-};
-
-/**
- * struct sde_hw_rotator_resource_info : Each rotator resource ties to each
- * priority queue
- */
-struct sde_hw_rotator_resource_info {
-	struct sde_hw_rotator      *rot;
-	struct sde_rot_hw_resource  hw;
-};
-
-/**
- * struct sde_hw_rotator : Rotator description
- * @hw:           mdp register mapped offset
- * @ops:          pointer to operations possible for the rotator HW
- * @highest_bank: highest bank size of memory
- * @ubwc_malsize: ubwc minimum allowable length
- * @ubwc_swizzle: ubwc swizzle enable
- * @sbuf_headroom: stream buffer headroom in lines
- * @solid_fill: true if solid fill is requested
- * @constant_color: solid fill constant color
- * @sbuf_ctx: list of active sbuf context in FIFO order
- * @vid_trigger: video mode trigger select
- * @cmd_trigger: command mode trigger select
- * @inpixfmts: array of supported input pixel formats fourcc per mode
- * @num_inpixfmt: size of the supported input pixel format array per mode
- * @outpixfmts: array of supported output pixel formats in fourcc per mode
- * @num_outpixfmt: size of the supported output pixel formats array per mode
- * @downscale_caps: capability string of scaling
- * @maxlinewidth: maximum line width supported
- */
-struct sde_hw_rotator {
-	/* base */
-	char __iomem *mdss_base;
-
-	/* Platform device from upper manager */
-	struct platform_device *pdev;
-
-	/* Ops */
-	struct sde_hw_rotator_ops ops;
-
-	/* Cmd Queue */
-	u32    cmd_queue[SDE_HW_ROT_REGDMA_RAM_SIZE];
-
-	/* Cmd Queue Write Ptr */
-	char __iomem *cmd_wr_ptr[ROT_QUEUE_MAX][SDE_HW_ROT_REGDMA_TOTAL_CTX];
-
-	/* Rotator Context */
-	struct sde_hw_rotator_context
-		*rotCtx[ROT_QUEUE_MAX][SDE_HW_ROT_REGDMA_TOTAL_CTX];
-
-	/* Cmd timestamp sequence for different priority*/
-	atomic_t timestamp[ROT_QUEUE_MAX];
-
-	/* regdma mode */
-	enum   sde_rotator_regdma_mode mode;
-
-	/* logical interrupt number */
-	int    irq_num;
-	atomic_t irq_enabled;
-
-	/* internal ION memory for SW timestamp */
-	struct ion_client *iclient;
-	struct sde_mdp_img_data swts_buf;
-	void *swts_buffer;
-
-	u32    highest_bank;
-	u32    ubwc_malsize;
-	u32    ubwc_swizzle;
-	u32    sbuf_headroom;
-	u32    solid_fill;
-	u32    constant_color;
-
-	spinlock_t rotctx_lock;
-	spinlock_t rotisr_lock;
-
-	bool    dbgmem;
-	bool reset_hw_ts;
-	u32 last_hwts[ROT_QUEUE_MAX];
-	u32 koff_timeout;
-	u32 vid_trigger;
-	u32 cmd_trigger;
-
-	struct list_head sbuf_ctx[ROT_QUEUE_MAX];
-
-	const u32 *inpixfmts[SDE_ROTATOR_MODE_MAX];
-	u32 num_inpixfmt[SDE_ROTATOR_MODE_MAX];
-	const u32 *outpixfmts[SDE_ROTATOR_MODE_MAX];
-	u32 num_outpixfmt[SDE_ROTATOR_MODE_MAX];
-	const char *downscale_caps;
-	u32 maxlinewidth;
-};
-
-/**
- * sde_hw_rotator_get_regdma_ctxidx(): regdma segment index is based on
- * timestamp. For non-regdma, just return 0 (i.e. first index)
- * @ctx: Rotator Context
- * return: regdma segment index
- */
-static inline u32 sde_hw_rotator_get_regdma_ctxidx(
-		struct sde_hw_rotator_context *ctx)
-{
-	if (ctx->rot->mode == ROT_REGDMA_OFF)
-		return 0;
-	else
-		return ctx->timestamp & SDE_HW_ROT_REGDMA_SEG_MASK;
-}
-
-/**
- * sde_hw_rotator_get_regdma_segment_base: return the base pointe of current
- * regdma command buffer
- * @ctx: Rotator Context
- * return: base segment address
- */
-static inline char __iomem *sde_hw_rotator_get_regdma_segment_base(
-		struct sde_hw_rotator_context *ctx)
-{
-	SDEROT_DBG("regdma base @slot[%d]: %pK\n",
-			sde_hw_rotator_get_regdma_ctxidx(ctx),
-			ctx->regdma_base);
-
-	return ctx->regdma_base;
-}
-
-/**
- * sde_hw_rotator_get_regdma_segment(): return current regdma command buffer
- * pointer for current regdma segment.
- * @ctx: Rotator Context
- * return: segment address
- */
-static inline char __iomem *sde_hw_rotator_get_regdma_segment(
-		struct sde_hw_rotator_context *ctx)
-{
-	u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
-	char __iomem *addr = ctx->regdma_wrptr;
-
-	SDEROT_DBG("regdma slot[%d] ==> %pK\n", idx, addr);
-	return addr;
-}
-
-/**
- * sde_hw_rotator_put_regdma_segment(): update current regdma command buffer
- * pointer for current regdma segment
- * @ctx: Rotator Context
- * @wrptr: current regdma segment location
- */
-static inline void sde_hw_rotator_put_regdma_segment(
-		struct sde_hw_rotator_context *ctx,
-		char __iomem *wrptr)
-{
-	u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
-
-	ctx->regdma_wrptr = wrptr;
-	SDEROT_DBG("regdma slot[%d] <== %pK\n", idx, wrptr);
-}
-
-/**
- * sde_hw_rotator_put_ctx(): Storing rotator context according to its
- * timestamp.
- */
-static inline void sde_hw_rotator_put_ctx(struct sde_hw_rotator_context *ctx)
-{
-	struct sde_hw_rotator *rot = ctx->rot;
-	u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
-	unsigned long flags;
-
-	spin_lock_irqsave(&rot->rotisr_lock, flags);
-	rot->rotCtx[ctx->q_id][idx] = ctx;
-	if (ctx->sbuf_mode)
-		list_add_tail(&ctx->list, &rot->sbuf_ctx[ctx->q_id]);
-	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
-
-	SDEROT_DBG("rotCtx[%d][%d] <== ctx:%pK | session-id:%d\n",
-			 ctx->q_id, idx, ctx, ctx->session_id);
-}
-
-/**
- * sde_hw_rotator_clr_ctx(): Clearing rotator context according to its
- * timestamp.
- */
-static inline void sde_hw_rotator_clr_ctx(struct sde_hw_rotator_context *ctx)
-{
-	struct sde_hw_rotator *rot = ctx->rot;
-	u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
-	unsigned long flags;
-
-	spin_lock_irqsave(&rot->rotisr_lock, flags);
-	rot->rotCtx[ctx->q_id][idx] = NULL;
-	if (ctx->sbuf_mode)
-		list_del_init(&ctx->list);
-	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
-
-	SDEROT_DBG("rotCtx[%d][%d] <== null | session-id:%d\n",
-			 ctx->q_id, idx, ctx->session_id);
-}
-
-#endif /*_SDE_ROTATOR_R3_INTERNAL_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
deleted file mode 100644
index 7fe651c..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ /dev/null
@@ -1,739 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/iommu.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma-buf.h>
-#include <linux/of_platform.h>
-#include <linux/msm_dma_iommu_mapping.h>
-#include <asm/dma-iommu.h>
-
-#include "soc/qcom/secure_buffer.h"
-#include "sde_rotator_base.h"
-#include "sde_rotator_util.h"
-#include "sde_rotator_io_util.h"
-#include "sde_rotator_smmu.h"
-#include "sde_rotator_debug.h"
-
-#define SMMU_SDE_ROT_SEC	"qcom,smmu_sde_rot_sec"
-#define SMMU_SDE_ROT_UNSEC	"qcom,smmu_sde_rot_unsec"
-
-#ifndef SZ_4G
-#define SZ_4G	(((size_t) SZ_1G) * 4)
-#endif
-
-#ifndef SZ_2G
-#define SZ_2G	(((size_t) SZ_1G) * 2)
-#endif
-
-struct sde_smmu_domain {
-	char *ctx_name;
-	int domain;
-	unsigned long start;
-	unsigned long size;
-};
-
-static inline bool sde_smmu_is_valid_domain_type(
-		struct sde_rot_data_type *mdata, int domain_type)
-{
-	return true;
-}
-
-static inline bool sde_smmu_is_valid_domain_condition(
-		struct sde_rot_data_type *mdata,
-		int domain_type,
-		bool is_attach)
-{
-	if (is_attach) {
-		if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
-			mdata->sde_caps_map) &&
-			(mdata->sec_cam_en &&
-			 domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
-			return false;
-		else
-			return true;
-	} else {
-		if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
-			mdata->sde_caps_map) &&
-			(mdata->sec_cam_en &&
-			 domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
-			return true;
-		else
-			return false;
-	}
-}
-
-struct sde_smmu_client *sde_smmu_get_cb(u32 domain)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-
-	if (!sde_smmu_is_valid_domain_type(mdata, domain))
-		return NULL;
-
-	return (domain >= SDE_IOMMU_MAX_DOMAIN) ? NULL :
-			&mdata->sde_smmu[domain];
-}
-
-static int sde_smmu_util_parse_dt_clock(struct platform_device *pdev,
-		struct sde_module_power *mp)
-{
-	u32 i = 0, rc = 0;
-	const char *clock_name;
-	u32 clock_rate;
-	int num_clk;
-
-	num_clk = of_property_count_strings(pdev->dev.of_node,
-			"clock-names");
-	if (num_clk < 0) {
-		SDEROT_DBG("clocks are not defined\n");
-		num_clk = 0;
-	}
-
-	mp->num_clk = num_clk;
-	mp->clk_config = devm_kzalloc(&pdev->dev,
-			sizeof(struct sde_clk) * mp->num_clk, GFP_KERNEL);
-	if (num_clk && !mp->clk_config) {
-		rc = -ENOMEM;
-		mp->num_clk = 0;
-		goto clk_err;
-	}
-
-	for (i = 0; i < mp->num_clk; i++) {
-		of_property_read_string_index(pdev->dev.of_node, "clock-names",
-							i, &clock_name);
-		strlcpy(mp->clk_config[i].clk_name, clock_name,
-				sizeof(mp->clk_config[i].clk_name));
-
-		of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
-							i, &clock_rate);
-		mp->clk_config[i].rate = clock_rate;
-
-		if (!clock_rate)
-			mp->clk_config[i].type = SDE_CLK_AHB;
-		else
-			mp->clk_config[i].type = SDE_CLK_PCLK;
-	}
-
-clk_err:
-	return rc;
-}
-
-static int sde_smmu_clk_register(struct platform_device *pdev,
-		struct sde_module_power *mp)
-{
-	int i, ret;
-	struct clk *clk;
-
-	ret = sde_smmu_util_parse_dt_clock(pdev, mp);
-	if (ret) {
-		SDEROT_ERR("unable to parse clocks\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < mp->num_clk; i++) {
-		clk = devm_clk_get(&pdev->dev,
-				mp->clk_config[i].clk_name);
-		if (IS_ERR(clk)) {
-			SDEROT_ERR("unable to get clk: %s\n",
-					mp->clk_config[i].clk_name);
-			return PTR_ERR(clk);
-		}
-		mp->clk_config[i].clk = clk;
-	}
-	return 0;
-}
-
-static int sde_smmu_enable_power(struct sde_smmu_client *sde_smmu,
-	bool enable)
-{
-	int rc = 0;
-	struct sde_module_power *mp;
-
-	if (!sde_smmu)
-		return -EINVAL;
-
-	mp = &sde_smmu->mp;
-
-	if (!mp->num_vreg && !mp->num_clk)
-		return 0;
-
-	if (enable) {
-		rc = sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, true);
-		if (rc) {
-			SDEROT_ERR("vreg enable failed - rc:%d\n", rc);
-			goto end;
-		}
-		sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
-			VOTE_INDEX_19_MHZ);
-		rc = sde_rot_enable_clk(mp->clk_config, mp->num_clk, true);
-		if (rc) {
-			SDEROT_ERR("clock enable failed - rc:%d\n", rc);
-			sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
-				VOTE_INDEX_DISABLE);
-			sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg,
-				false);
-			goto end;
-		}
-	} else {
-		sde_rot_enable_clk(mp->clk_config, mp->num_clk, false);
-		sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
-			VOTE_INDEX_DISABLE);
-		sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, false);
-	}
-end:
-	return rc;
-}
-
-/*
- * sde_smmu_attach()
- *
- * Associates each configured VA range with the corresponding smmu context
- * bank device. Enables the clks as smmu requires voting it before the usage.
- * And iommu attach is done only once during the initial attach and it is never
- * detached as smmu v2 uses a feature called 'retention'.
- */
-int sde_smmu_attach(struct sde_rot_data_type *mdata)
-{
-	struct sde_smmu_client *sde_smmu;
-	int i, rc = 0;
-
-	for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
-		if (!sde_smmu_is_valid_domain_type(mdata, i))
-			continue;
-
-		sde_smmu = sde_smmu_get_cb(i);
-		if (sde_smmu && sde_smmu->dev) {
-			rc = sde_smmu_enable_power(sde_smmu, true);
-			if (rc) {
-				SDEROT_ERR(
-					"power enable failed - domain:[%d] rc:%d\n",
-					i, rc);
-				goto err;
-			}
-
-			if (!sde_smmu->domain_attached &&
-				sde_smmu_is_valid_domain_condition(mdata,
-						i,
-						true)) {
-				rc = __depr_arm_iommu_attach_device(
-					sde_smmu->dev, sde_smmu->mmu_mapping);
-				if (rc) {
-					SDEROT_ERR(
-						"iommu attach device failed for domain[%d] with err:%d\n",
-						i, rc);
-					sde_smmu_enable_power(sde_smmu,
-						false);
-					goto err;
-				}
-				sde_smmu->domain_attached = true;
-				SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
-			}
-		} else {
-			SDEROT_DBG(
-				"iommu device not attached for domain[%d]\n",
-				i);
-		}
-	}
-	return 0;
-
-err:
-	for (i--; i >= 0; i--) {
-		sde_smmu = sde_smmu_get_cb(i);
-		if (sde_smmu && sde_smmu->dev) {
-			__depr_arm_iommu_detach_device(sde_smmu->dev);
-			sde_smmu_enable_power(sde_smmu, false);
-			sde_smmu->domain_attached = false;
-		}
-	}
-	return rc;
-}
-
-/*
- * sde_smmu_detach()
- *
- * Only disables the clks as it is not required to detach the iommu mapped
- * VA range from the device in smmu as explained in the sde_smmu_attach
- */
-int sde_smmu_detach(struct sde_rot_data_type *mdata)
-{
-	struct sde_smmu_client *sde_smmu;
-	int i;
-
-	for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
-		if (!sde_smmu_is_valid_domain_type(mdata, i))
-			continue;
-
-		sde_smmu = sde_smmu_get_cb(i);
-		if (sde_smmu && sde_smmu->dev) {
-			if (sde_smmu->domain_attached &&
-				sde_smmu_is_valid_domain_condition(mdata,
-					i, false)) {
-				__depr_arm_iommu_detach_device(sde_smmu->dev);
-				SDEROT_DBG("iommu domain[%i] detached\n", i);
-				sde_smmu->domain_attached = false;
-				}
-			else {
-				sde_smmu_enable_power(sde_smmu, false);
-			}
-		}
-	}
-	return 0;
-}
-
-int sde_smmu_get_domain_id(u32 type)
-{
-	return type;
-}
-
-/*
- * sde_smmu_dma_buf_attach()
- *
- * Same as sde_smmu_dma_buf_attach except that the device is got from
- * the configured smmu v2 context banks.
- */
-struct dma_buf_attachment *sde_smmu_dma_buf_attach(
-		struct dma_buf *dma_buf, struct device *dev, int domain)
-{
-	struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
-
-	if (!sde_smmu) {
-		SDEROT_ERR("not able to get smmu context\n");
-		return NULL;
-	}
-
-	return dma_buf_attach(dma_buf, sde_smmu->dev);
-}
-
-/*
- * sde_smmu_map_dma_buf()
- *
- * Maps existing buffer (by struct scatterlist) into SMMU context bank device.
- * From which we can take the virtual address and size allocated.
- * msm_map_dma_buf is depricated with smmu v2 and it uses dma_map_sg instead
- */
-int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
-		struct sg_table *table, int domain, dma_addr_t *iova,
-		unsigned long *size, int dir)
-{
-	int rc;
-	struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
-	unsigned long attrs = 0;
-
-	if (!sde_smmu) {
-		SDEROT_ERR("not able to get smmu context\n");
-		return -EINVAL;
-	}
-
-	rc = dma_map_sg_attrs(sde_smmu->dev, table->sgl, table->nents, dir,
-			attrs);
-	if (!rc) {
-		SDEROT_ERR("dma map sg failed\n");
-		return -ENOMEM;
-	}
-
-	*iova = table->sgl->dma_address;
-	*size = table->sgl->dma_length;
-	return 0;
-}
-
-void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
-		int dir, struct dma_buf *dma_buf)
-{
-	struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
-
-	if (!sde_smmu) {
-		SDEROT_ERR("not able to get smmu context\n");
-		return;
-	}
-
-	dma_unmap_sg(sde_smmu->dev, table->sgl, table->nents, dir);
-}
-
-static DEFINE_MUTEX(sde_smmu_ref_cnt_lock);
-
-int sde_smmu_ctrl(int enable)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	int rc = 0;
-
-	mutex_lock(&sde_smmu_ref_cnt_lock);
-	SDEROT_EVTLOG(__builtin_return_address(0), enable, mdata->iommu_ref_cnt,
-		mdata->iommu_attached);
-	SDEROT_DBG("%pS: enable:%d ref_cnt:%d attach:%d\n",
-		__builtin_return_address(0), enable, mdata->iommu_ref_cnt,
-		mdata->iommu_attached);
-
-	if (enable) {
-		if (!mdata->iommu_attached) {
-			rc = sde_smmu_attach(mdata);
-			if (!rc)
-				mdata->iommu_attached = true;
-		}
-		mdata->iommu_ref_cnt++;
-	} else {
-		if (mdata->iommu_ref_cnt) {
-			mdata->iommu_ref_cnt--;
-			if (mdata->iommu_ref_cnt == 0)
-				if (mdata->iommu_attached) {
-					rc = sde_smmu_detach(mdata);
-					if (!rc)
-						mdata->iommu_attached = false;
-				}
-		} else {
-			SDEROT_ERR("unbalanced iommu ref\n");
-		}
-	}
-	mutex_unlock(&sde_smmu_ref_cnt_lock);
-
-	if (rc < 0)
-		return rc;
-	else
-		return mdata->iommu_ref_cnt;
-}
-
-int sde_smmu_secure_ctrl(int enable)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	int rc = 0;
-
-	mutex_lock(&sde_smmu_ref_cnt_lock);
-	/*
-	 * Attach/detach secure context irrespective of ref count,
-	 * We come here only when secure camera is disabled
-	 */
-	if (enable) {
-		rc = sde_smmu_attach(mdata);
-		if (!rc)
-			mdata->iommu_attached = true;
-	} else {
-		rc = sde_smmu_detach(mdata);
-		/*
-		 * keep iommu_attached equal to true,
-		 * so that driver does not attemp to attach
-		 * while in secure state
-		 */
-	}
-
-	mutex_unlock(&sde_smmu_ref_cnt_lock);
-	return rc;
-}
-
-/*
- * sde_smmu_device_create()
- * @dev: sde_mdp device
- *
- * For smmu, each context bank is a separate child device of sde rot.
- * Platform devices are created for those smmu related child devices of
- * sde rot here. This would facilitate probes to happen for these devices in
- * which the smmu mapping and initialization is handled.
- */
-void sde_smmu_device_create(struct device *dev)
-{
-	struct device_node *parent, *child;
-
-	parent = dev->of_node;
-	for_each_child_of_node(parent, child) {
-		if (of_device_is_compatible(child, SMMU_SDE_ROT_SEC))
-			of_platform_device_create(child, NULL, dev);
-		else if (of_device_is_compatible(child, SMMU_SDE_ROT_UNSEC))
-			of_platform_device_create(child, NULL, dev);
-	}
-}
-
-int sde_smmu_init(struct device *dev)
-{
-	sde_smmu_device_create(dev);
-
-	return 0;
-}
-
-static int sde_smmu_fault_handler(struct iommu_domain *domain,
-		struct device *dev, unsigned long iova,
-		int flags, void *token)
-{
-	struct sde_smmu_client *sde_smmu;
-	int rc = -EINVAL;
-
-	if (!token) {
-		SDEROT_ERR("Error: token is NULL\n");
-		return -EINVAL;
-	}
-
-	sde_smmu = (struct sde_smmu_client *)token;
-
-	/* trigger rotator dump */
-	SDEROT_ERR("trigger rotator dump, iova=0x%08lx, flags=0x%x\n",
-			iova, flags);
-	SDEROT_ERR("SMMU device:%s", sde_smmu->dev->kobj.name);
-
-	/* generate dump, but no panic */
-	SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus", "vbif_dbg_bus");
-
-	/*
-	 * return -ENOSYS to allow smmu driver to dump out useful
-	 * debug info.
-	 */
-	return rc;
-}
-
-static struct sde_smmu_domain sde_rot_unsec = {
-	"rot_0", SDE_IOMMU_DOMAIN_ROT_UNSECURE, SZ_2G, (SZ_4G - SZ_2G)};
-static struct sde_smmu_domain sde_rot_sec = {
-	"rot_1", SDE_IOMMU_DOMAIN_ROT_SECURE, SZ_2G, (SZ_4G - SZ_2G)};
-
-static const struct of_device_id sde_smmu_dt_match[] = {
-	{ .compatible = SMMU_SDE_ROT_UNSEC, .data = &sde_rot_unsec},
-	{ .compatible = SMMU_SDE_ROT_SEC, .data = &sde_rot_sec},
-	{}
-};
-MODULE_DEVICE_TABLE(of, sde_smmu_dt_match);
-
-/*
- * sde_smmu_probe()
- * @pdev: platform device
- *
- * Each smmu context acts as a separate device and the context banks are
- * configured with a VA range.
- * Registeres the clks as each context bank has its own clks, for which voting
- * has to be done everytime before using that context bank.
- */
-int sde_smmu_probe(struct platform_device *pdev)
-{
-	struct device *dev;
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_smmu_client *sde_smmu;
-	int rc = 0;
-	struct sde_smmu_domain smmu_domain;
-	const struct of_device_id *match;
-	struct sde_module_power *mp;
-	char name[MAX_CLIENT_NAME_LEN];
-	int mdphtw_llc_enable = 1;
-	u32 sid = 0;
-	bool smmu_rot_full_map;
-
-	if (!mdata) {
-		SDEROT_INFO(
-			"probe failed as mdata is not initializedi, probe defer\n");
-		return -EPROBE_DEFER;
-	}
-
-	match = of_match_device(sde_smmu_dt_match, &pdev->dev);
-	if (!match || !match->data) {
-		SDEROT_ERR("probe failed as match data is invalid\n");
-		return -EINVAL;
-	}
-
-	smmu_domain = *(struct sde_smmu_domain *) (match->data);
-	if (smmu_domain.domain >= SDE_IOMMU_MAX_DOMAIN) {
-		SDEROT_ERR("no matching device found\n");
-		return -EINVAL;
-	}
-
-	if (of_find_property(pdev->dev.of_node, "iommus", NULL)) {
-		dev = &pdev->dev;
-		rc = of_property_read_u32_index(pdev->dev.of_node, "iommus",
-			1, &sid);
-		if (rc)
-			SDEROT_DBG("SID not defined for domain:%d",
-					smmu_domain.domain);
-	} else {
-		SDEROT_ERR("Invalid SMMU ctx for domain:%d\n",
-				smmu_domain.domain);
-		return -EINVAL;
-	}
-
-	sde_smmu = &mdata->sde_smmu[smmu_domain.domain];
-	sde_smmu->domain = smmu_domain.domain;
-	sde_smmu->sid = sid;
-	mp = &sde_smmu->mp;
-	memset(mp, 0, sizeof(struct sde_module_power));
-
-	if (of_find_property(pdev->dev.of_node,
-		"gdsc-mdss-supply", NULL)) {
-
-		mp->vreg_config = devm_kzalloc(&pdev->dev,
-			sizeof(struct sde_vreg), GFP_KERNEL);
-		if (!mp->vreg_config)
-			return -ENOMEM;
-
-		strlcpy(mp->vreg_config->vreg_name, "gdsc-mdss",
-				sizeof(mp->vreg_config->vreg_name));
-		mp->num_vreg = 1;
-	}
-
-	if (mp->vreg_config) {
-		rc = sde_rot_config_vreg(&pdev->dev, mp->vreg_config,
-			mp->num_vreg, true);
-		if (rc) {
-			SDEROT_ERR("vreg config failed rc=%d\n", rc);
-			goto release_vreg;
-		}
-	}
-
-	rc = sde_smmu_clk_register(pdev, mp);
-	if (rc) {
-		SDEROT_ERR(
-			"smmu clk register failed for domain[%d] with err:%d\n",
-			smmu_domain.domain, rc);
-		goto disable_vreg;
-	}
-
-	snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
-	sde_smmu->reg_bus_clt = sde_reg_bus_vote_client_create(name);
-	if (IS_ERR_OR_NULL(sde_smmu->reg_bus_clt)) {
-		SDEROT_ERR("mdss bus client register failed\n");
-		rc = PTR_ERR(sde_smmu->reg_bus_clt);
-		sde_smmu->reg_bus_clt = NULL;
-		goto unregister_clk;
-	}
-
-	rc = sde_smmu_enable_power(sde_smmu, true);
-	if (rc) {
-		SDEROT_ERR("power enable failed - domain:[%d] rc:%d\n",
-			smmu_domain.domain, rc);
-		goto bus_client_destroy;
-	}
-
-	smmu_rot_full_map = of_property_read_bool(dev->of_node,
-					 "qcom,fullsize-va-map");
-	if (smmu_rot_full_map) {
-		smmu_domain.start = SZ_128K;
-		smmu_domain.size = SZ_4G - SZ_128K;
-	}
-
-	sde_smmu->mmu_mapping = __depr_arm_iommu_create_mapping(
-		&platform_bus_type, smmu_domain.start, smmu_domain.size);
-	if (IS_ERR(sde_smmu->mmu_mapping)) {
-		SDEROT_ERR("iommu create mapping failed for domain[%d]\n",
-			smmu_domain.domain);
-		rc = PTR_ERR(sde_smmu->mmu_mapping);
-		sde_smmu->mmu_mapping = NULL;
-		goto disable_power;
-	}
-
-	rc = iommu_domain_set_attr(sde_smmu->mmu_mapping->domain,
-			DOMAIN_ATTR_USE_UPSTREAM_HINT, &mdphtw_llc_enable);
-	if (rc) {
-		SDEROT_ERR("couldn't enable rot pagetable walks: %d\n", rc);
-		goto release_mapping;
-	}
-
-	if (smmu_domain.domain == SDE_IOMMU_DOMAIN_ROT_SECURE) {
-		int secure_vmid = VMID_CP_PIXEL;
-
-		rc = iommu_domain_set_attr(sde_smmu->mmu_mapping->domain,
-			DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
-		if (rc) {
-			SDEROT_ERR("couldn't set secure pixel vmid\n");
-			goto release_mapping;
-		}
-	}
-
-	if (!dev->dma_parms)
-		dev->dma_parms = devm_kzalloc(dev,
-				sizeof(*dev->dma_parms), GFP_KERNEL);
-
-	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
-	dma_set_seg_boundary(dev, DMA_BIT_MASK(64));
-
-	iommu_set_fault_handler(sde_smmu->mmu_mapping->domain,
-			sde_smmu_fault_handler, (void *)sde_smmu);
-
-	sde_smmu_enable_power(sde_smmu, false);
-
-	sde_smmu->dev = dev;
-	SDEROT_INFO(
-		"iommu v2 domain[%d] mapping and clk register successful!\n",
-			smmu_domain.domain);
-	return 0;
-
-release_mapping:
-	__depr_arm_iommu_release_mapping(sde_smmu->mmu_mapping);
-	sde_smmu->mmu_mapping = NULL;
-disable_power:
-	sde_smmu_enable_power(sde_smmu, false);
-bus_client_destroy:
-	sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
-	sde_smmu->reg_bus_clt = NULL;
-unregister_clk:
-disable_vreg:
-	sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
-			sde_smmu->mp.num_vreg, false);
-release_vreg:
-	devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
-	sde_smmu->mp.vreg_config = NULL;
-	sde_smmu->mp.num_vreg = 0;
-	return rc;
-}
-
-int sde_smmu_remove(struct platform_device *pdev)
-{
-	int i;
-	struct sde_smmu_client *sde_smmu;
-
-	for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
-		sde_smmu = sde_smmu_get_cb(i);
-		if (!sde_smmu || !sde_smmu->dev ||
-			(sde_smmu->dev != &pdev->dev))
-			continue;
-
-		sde_smmu->dev = NULL;
-		__depr_arm_iommu_release_mapping(sde_smmu->mmu_mapping);
-		sde_smmu->mmu_mapping = NULL;
-		sde_smmu_enable_power(sde_smmu, false);
-		sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
-		sde_smmu->reg_bus_clt = NULL;
-		sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
-				sde_smmu->mp.num_vreg, false);
-		devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
-		sde_smmu->mp.vreg_config = NULL;
-		sde_smmu->mp.num_vreg = 0;
-	}
-	return 0;
-}
-
-static struct platform_driver sde_smmu_driver = {
-	.probe = sde_smmu_probe,
-	.remove = sde_smmu_remove,
-	.shutdown = NULL,
-	.driver = {
-		.name = "sde_smmu",
-		.of_match_table = sde_smmu_dt_match,
-	},
-};
-
-static int sde_smmu_register_driver(void)
-{
-	return platform_driver_register(&sde_smmu_driver);
-}
-
-static int __init sde_smmu_driver_init(void)
-{
-	int ret;
-
-	ret = sde_smmu_register_driver();
-	if (ret)
-		SDEROT_ERR("sde_smmu_register_driver() failed!\n");
-
-	return ret;
-}
-module_init(sde_smmu_driver_init);
-
-static void __exit sde_smmu_driver_cleanup(void)
-{
-	platform_driver_unregister(&sde_smmu_driver);
-}
-module_exit(sde_smmu_driver_cleanup);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("SDE SMMU driver");
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.h
deleted file mode 100644
index a1fcd80..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef SDE_ROTATOR_SMMU_H
-#define SDE_ROTATOR_SMMU_H
-
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/dma-buf.h>
-
-#include "sde_rotator_io_util.h"
-
-enum sde_iommu_domain_type {
-	SDE_IOMMU_DOMAIN_ROT_UNSECURE,
-	SDE_IOMMU_DOMAIN_ROT_SECURE,
-	SDE_IOMMU_MAX_DOMAIN
-};
-
-int sde_smmu_init(struct device *dev);
-
-static inline int sde_smmu_dma_data_direction(int dir)
-{
-	return dir;
-}
-
-int sde_smmu_ctrl(int enable);
-
-struct dma_buf_attachment *sde_smmu_dma_buf_attach(
-		struct dma_buf *dma_buf, struct device *dev, int domain);
-
-int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
-		struct sg_table *table, int domain, dma_addr_t *iova,
-		unsigned long *size, int dir);
-
-void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
-		int dir, struct dma_buf *dma_buf);
-
-int sde_smmu_secure_ctrl(int enable);
-
-#endif /* SDE_ROTATOR_SMMU_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_sync.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_sync.c
deleted file mode 100644
index 278139c..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_sync.c
+++ /dev/null
@@ -1,435 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/dma-fence.h>
-#include <linux/sync_file.h>
-
-#include "sde_rotator_util.h"
-#include "sde_rotator_sync.h"
-
-#define SDE_ROT_SYNC_NAME_SIZE		64
-#define SDE_ROT_SYNC_DRIVER_NAME	"sde_rot"
-
-/**
- * struct sde_rot_fence - sync fence context
- * @base: base sync fence object
- * @name: name of this sync fence
- * @fence_list: linked list of outstanding sync fence
- */
-struct sde_rot_fence {
-	struct dma_fence base;
-	char name[SDE_ROT_SYNC_NAME_SIZE];
-	struct list_head fence_list;
-};
-
-/**
- * struct sde_rot_timeline - sync timeline context
- * @kref: reference count of timeline
- * @lock: serialization lock for timeline and fence update
- * @name: name of timeline
- * @fence_name: fence name prefix
- * @next_value: next commit sequence number
- * @curr_value: current retired sequence number
- * @context: fence context identifier
- * @fence_list_head: linked list of outstanding sync fence
- */
-struct sde_rot_timeline {
-	struct kref kref;
-	spinlock_t lock;
-	char name[SDE_ROT_SYNC_NAME_SIZE];
-	char fence_name[SDE_ROT_SYNC_NAME_SIZE];
-	u32 next_value;
-	u32 curr_value;
-	u64 context;
-	struct list_head fence_list_head;
-};
-
-/*
- * to_sde_rot_fence - get rotator fence from fence base object
- * @fence: Pointer to fence base object
- */
-static struct sde_rot_fence *to_sde_rot_fence(struct dma_fence *fence)
-{
-	return container_of(fence, struct sde_rot_fence, base);
-}
-
-/*
- * to_sde_rot_timeline - get rotator timeline from fence base object
- * @fence: Pointer to fence base object
- */
-static struct sde_rot_timeline *to_sde_rot_timeline(struct dma_fence *fence)
-{
-	return container_of(fence->lock, struct sde_rot_timeline, lock);
-}
-
-/*
- * sde_rotator_free_timeline - Free the given timeline object
- * @kref: Pointer to timeline kref object.
- */
-static void sde_rotator_free_timeline(struct kref *kref)
-{
-	struct sde_rot_timeline *tl =
-		container_of(kref, struct sde_rot_timeline, kref);
-
-	kfree(tl);
-}
-
-/*
- * sde_rotator_put_timeline - Put the given timeline object
- * @tl: Pointer to timeline object.
- */
-static void sde_rotator_put_timeline(struct sde_rot_timeline *tl)
-{
-	if (!tl) {
-		SDEROT_ERR("invalid parameters\n");
-		return;
-	}
-
-	kref_put(&tl->kref, sde_rotator_free_timeline);
-}
-
-/*
- * sde_rotator_get_timeline - Get the given timeline object
- * @tl: Pointer to timeline object.
- */
-static void sde_rotator_get_timeline(struct sde_rot_timeline *tl)
-{
-	if (!tl) {
-		SDEROT_ERR("invalid parameters\n");
-		return;
-	}
-
-	kref_get(&tl->kref);
-}
-
-static const char *sde_rot_fence_get_driver_name(struct dma_fence *fence)
-{
-	return SDE_ROT_SYNC_DRIVER_NAME;
-}
-
-static const char *sde_rot_fence_get_timeline_name(struct dma_fence *fence)
-{
-	struct sde_rot_timeline *tl = to_sde_rot_timeline(fence);
-
-	return tl->name;
-}
-
-static bool sde_rot_fence_enable_signaling(struct dma_fence *fence)
-{
-	return true;
-}
-
-static bool sde_rot_fence_signaled(struct dma_fence *fence)
-{
-	struct sde_rot_timeline *tl = to_sde_rot_timeline(fence);
-	bool status;
-
-	status = ((s32) (tl->curr_value - fence->seqno)) >= 0;
-	SDEROT_DBG("status:%d fence seq:%d and timeline:%d\n",
-			status, fence->seqno, tl->curr_value);
-	return status;
-}
-
-static void sde_rot_fence_release(struct dma_fence *fence)
-{
-	struct sde_rot_fence *f = to_sde_rot_fence(fence);
-	unsigned long flags;
-
-	spin_lock_irqsave(fence->lock, flags);
-	if (!list_empty(&f->fence_list))
-		list_del(&f->fence_list);
-	spin_unlock_irqrestore(fence->lock, flags);
-	sde_rotator_put_timeline(to_sde_rot_timeline(fence));
-	kfree_rcu(f, base.rcu);
-}
-
-static void sde_rot_fence_value_str(struct dma_fence *fence, char *str,
-		int size)
-{
-	snprintf(str, size, "%u", fence->seqno);
-}
-
-static void sde_rot_fence_timeline_value_str(struct dma_fence *fence,
-		char *str, int size)
-{
-	struct sde_rot_timeline *tl = to_sde_rot_timeline(fence);
-
-	snprintf(str, size, "%u", tl->curr_value);
-}
-
-static struct dma_fence_ops sde_rot_fence_ops = {
-	.get_driver_name = sde_rot_fence_get_driver_name,
-	.get_timeline_name = sde_rot_fence_get_timeline_name,
-	.enable_signaling = sde_rot_fence_enable_signaling,
-	.signaled = sde_rot_fence_signaled,
-	.wait = dma_fence_default_wait,
-	.release = sde_rot_fence_release,
-	.fence_value_str = sde_rot_fence_value_str,
-	.timeline_value_str = sde_rot_fence_timeline_value_str,
-};
-
-/*
- * sde_rotator_create_timeline - Create timeline object with the given name
- * @name: Pointer to name character string.
- */
-struct sde_rot_timeline *sde_rotator_create_timeline(const char *name)
-{
-	struct sde_rot_timeline *tl;
-
-	if (!name) {
-		SDEROT_ERR("invalid parameters\n");
-		return NULL;
-	}
-
-	tl = kzalloc(sizeof(struct sde_rot_timeline), GFP_KERNEL);
-	if (!tl)
-		return NULL;
-
-	kref_init(&tl->kref);
-	snprintf(tl->name, sizeof(tl->name), "rot_timeline_%s", name);
-	snprintf(tl->fence_name, sizeof(tl->fence_name), "rot_fence_%s", name);
-	spin_lock_init(&tl->lock);
-	tl->context = dma_fence_context_alloc(1);
-	INIT_LIST_HEAD(&tl->fence_list_head);
-
-	return tl;
-}
-
-/*
- * sde_rotator_destroy_timeline - Destroy the given timeline object
- * @tl: Pointer to timeline object.
- */
-void sde_rotator_destroy_timeline(struct sde_rot_timeline *tl)
-{
-	sde_rotator_put_timeline(tl);
-}
-
-/*
- * sde_rotator_inc_timeline_locked - Increment timeline by given amount
- * @tl: Pointer to timeline object.
- * @increment: the amount to increase the timeline by.
- */
-static int sde_rotator_inc_timeline_locked(struct sde_rot_timeline *tl,
-		int increment)
-{
-	struct sde_rot_fence *f, *next;
-
-	tl->curr_value += increment;
-	list_for_each_entry_safe(f, next, &tl->fence_list_head, fence_list) {
-		if (dma_fence_is_signaled_locked(&f->base)) {
-			SDEROT_DBG("%s signaled\n", f->name);
-			list_del_init(&f->fence_list);
-		}
-	}
-
-	return 0;
-}
-
-/*
- * sde_rotator_resync_timeline - Resync timeline to last committed value
- * @tl: Pointer to timeline object.
- */
-void sde_rotator_resync_timeline(struct sde_rot_timeline *tl)
-{
-	unsigned long flags;
-	s32 val;
-
-	if (!tl) {
-		SDEROT_ERR("invalid parameters\n");
-		return;
-	}
-
-	spin_lock_irqsave(&tl->lock, flags);
-	val = tl->next_value - tl->curr_value;
-	if (val > 0) {
-		SDEROT_WARN("flush %s:%d\n", tl->name, val);
-		sde_rotator_inc_timeline_locked(tl, val);
-	}
-	spin_unlock_irqrestore(&tl->lock, flags);
-}
-
-/*
- * sde_rotator_get_sync_fence - Create fence object from the given timeline
- * @tl: Pointer to timeline object
- * @fence_fd: Pointer to file descriptor associated with the returned fence.
- *		Null if not required.
- * @timestamp: Pointer to timestamp of the returned fence. Null if not required.
- */
-struct sde_rot_sync_fence *sde_rotator_get_sync_fence(
-		struct sde_rot_timeline *tl, int *fence_fd, u32 *timestamp)
-{
-	struct sde_rot_fence *f;
-	unsigned long flags;
-	u32 val;
-
-	if (!tl) {
-		SDEROT_ERR("invalid parameters\n");
-		return NULL;
-	}
-
-	f = kzalloc(sizeof(struct sde_rot_fence), GFP_KERNEL);
-	if (!f)
-		return NULL;
-
-	INIT_LIST_HEAD(&f->fence_list);
-	spin_lock_irqsave(&tl->lock, flags);
-	val = ++(tl->next_value);
-	dma_fence_init(&f->base, &sde_rot_fence_ops, &tl->lock,
-			tl->context, val);
-	list_add_tail(&f->fence_list, &tl->fence_list_head);
-	sde_rotator_get_timeline(tl);
-	spin_unlock_irqrestore(&tl->lock, flags);
-	snprintf(f->name, sizeof(f->name), "%s_%u", tl->fence_name, val);
-
-	if (fence_fd)
-		*fence_fd = sde_rotator_get_sync_fence_fd(
-				(struct sde_rot_sync_fence *) &f->base);
-
-	if (timestamp)
-		*timestamp = val;
-
-	SDEROT_DBG("output sync fence created at val=%u\n", val);
-
-	return (struct sde_rot_sync_fence *) &f->base;
-}
-
-/*
- * sde_rotator_inc_timeline - Increment timeline by given amount
- * @tl: Pointer to timeline object.
- * @increment: the amount to increase the timeline by.
- */
-int sde_rotator_inc_timeline(struct sde_rot_timeline *tl, int increment)
-{
-	unsigned long flags;
-	int rc;
-
-	if (!tl) {
-		SDEROT_ERR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&tl->lock, flags);
-	rc = sde_rotator_inc_timeline_locked(tl, increment);
-	spin_unlock_irqrestore(&tl->lock, flags);
-
-	return rc;
-}
-
-/*
- * sde_rotator_get_timeline_commit_ts - Return commit tick of given timeline
- * @tl: Pointer to timeline object.
- */
-u32 sde_rotator_get_timeline_commit_ts(struct sde_rot_timeline *tl)
-{
-	if (!tl) {
-		SDEROT_ERR("invalid parameters\n");
-		return 0;
-	}
-
-	return tl->next_value;
-}
-
-/*
- * sde_rotator_get_timeline_retire_ts - Return retire tick of given timeline
- * @tl: Pointer to timeline object.
- */
-u32 sde_rotator_get_timeline_retire_ts(struct sde_rot_timeline *tl)
-{
-	if (!tl) {
-		SDEROT_ERR("invalid parameters\n");
-		return 0;
-	}
-
-	return tl->curr_value;
-}
-
-/*
- * sde_rotator_put_sync_fence - Destroy given fence object
- * @fence: Pointer to fence object.
- */
-void sde_rotator_put_sync_fence(struct sde_rot_sync_fence *fence)
-{
-	if (!fence) {
-		SDEROT_ERR("invalid parameters\n");
-		return;
-	}
-
-	dma_fence_put((struct dma_fence *) fence);
-}
-
-/*
- * sde_rotator_wait_sync_fence - Wait until fence signal or timeout
- * @fence: Pointer to fence object.
- * @timeout: maximum wait time, in msec, for fence to signal.
- */
-int sde_rotator_wait_sync_fence(struct sde_rot_sync_fence *fence,
-		long timeout)
-{
-	int rc;
-
-	if (!fence) {
-		SDEROT_ERR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	rc = dma_fence_wait_timeout((struct dma_fence *) fence, false,
-			msecs_to_jiffies(timeout));
-	if (rc > 0) {
-		SDEROT_DBG("fence signaled\n");
-		rc = 0;
-	} else if (rc == 0) {
-		SDEROT_DBG("fence timeout\n");
-		rc = -ETIMEDOUT;
-	}
-
-	return rc;
-}
-
-/*
- * sde_rotator_get_sync_fence_fd - Get fence object of given file descriptor
- * @fd: File description of fence object.
- */
-struct sde_rot_sync_fence *sde_rotator_get_fd_sync_fence(int fd)
-{
-	return (struct sde_rot_sync_fence *) sync_file_get_fence(fd);
-}
-
-/*
- * sde_rotator_get_sync_fence_fd - Get file descriptor of given fence object
- * @fence: Pointer to fence object.
- */
-int sde_rotator_get_sync_fence_fd(struct sde_rot_sync_fence *fence)
-{
-	int fd;
-	struct sync_file *sync_file;
-
-	if (!fence) {
-		SDEROT_ERR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	fd = get_unused_fd_flags(O_CLOEXEC);
-	if (fd < 0) {
-		SDEROT_ERR("fail to get unused fd\n");
-		return fd;
-	}
-
-	sync_file = sync_file_create((struct dma_fence *) fence);
-	if (!sync_file) {
-		put_unused_fd(fd);
-		SDEROT_ERR("failed to create sync file\n");
-		return -ENOMEM;
-	}
-
-	fd_install(fd, sync_file->file);
-
-	return fd;
-}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_sync.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_sync.h
deleted file mode 100644
index 0711d6e..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_sync.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef SDE_ROTATOR_SYNC_H
-#define SDE_ROTATOR_SYNC_H
-
-#include <linux/types.h>
-#include <linux/errno.h>
-
-struct sde_rot_sync_fence;
-struct sde_rot_timeline;
-
-#if defined(CONFIG_SYNC_FILE)
-struct sde_rot_timeline *sde_rotator_create_timeline(const char *name);
-
-void sde_rotator_destroy_timeline(struct sde_rot_timeline *tl);
-
-struct sde_rot_sync_fence *sde_rotator_get_sync_fence(
-		struct sde_rot_timeline *tl, int *fence_fd, u32 *timestamp);
-
-void sde_rotator_resync_timeline(struct sde_rot_timeline *tl);
-
-u32 sde_rotator_get_timeline_commit_ts(struct sde_rot_timeline *tl);
-
-u32 sde_rotator_get_timeline_retire_ts(struct sde_rot_timeline *tl);
-
-int sde_rotator_inc_timeline(struct sde_rot_timeline *tl, int increment);
-
-void sde_rotator_put_sync_fence(struct sde_rot_sync_fence *fence);
-
-int sde_rotator_wait_sync_fence(struct sde_rot_sync_fence *fence,
-		long timeout);
-
-struct sde_rot_sync_fence *sde_rotator_get_fd_sync_fence(int fd);
-
-int sde_rotator_get_sync_fence_fd(struct sde_rot_sync_fence *fence);
-
-#else
-static inline
-struct sde_rot_timeline *sde_rotator_create_timeline(const char *name)
-{
-	return NULL;
-}
-
-static inline
-void sde_rotator_destroy_timeline(struct sde_rot_timeline *tl)
-{
-}
-
-static inline
-struct sde_rot_sync_fence *sde_rotator_get_sync_fence(
-		struct sde_rot_timeline *tl, int *fence_fd, u32 *timestamp)
-{
-	return NULL;
-}
-
-static inline
-void sde_rotator_resync_timeline(struct sde_rot_timeline *tl)
-{
-}
-
-static inline
-int sde_rotator_inc_timeline(struct sde_rot_timeline *tl, int increment)
-{
-	return 0;
-}
-
-static inline
-u32 sde_rotator_get_timeline_commit_ts(struct sde_rot_timeline *tl)
-{
-	return 0;
-}
-
-static inline
-u32 sde_rotator_get_timeline_retire_ts(struct sde_rot_timeline *tl)
-{
-	return 0;
-}
-
-static inline
-void sde_rotator_put_sync_fence(struct sde_rot_sync_fence *fence)
-{
-}
-
-static inline
-int sde_rotator_wait_sync_fence(struct sde_rot_sync_fence *fence,
-		long timeout)
-{
-	return 0;
-}
-
-static inline
-struct sde_rot_sync_fence *sde_rotator_get_fd_sync_fence(int fd)
-{
-	return NULL;
-}
-
-static inline
-int sde_rotator_get_sync_fence_fd(struct sde_rot_sync_fence *fence)
-{
-	return -EBADF;
-}
-#endif
-
-#endif /* SDE_ROTATOR_SYNC_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_trace.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_trace.h
deleted file mode 100644
index 066c7ee..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_trace.h
+++ /dev/null
@@ -1,306 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014, 2015-2019, The Linux Foundation. All rights reserved.
- */
-#if !defined(TRACE_SDE_ROTATOR_H) || defined(TRACE_HEADER_MULTI_READ)
-#define TRACE_SDE_ROTATOR_H
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM sde_rotator
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE sde_rotator_trace
-
-#include <linux/tracepoint.h>
-#include <sde_rotator_core.h>
-
-DECLARE_EVENT_CLASS(rot_entry_template,
-	TP_PROTO(u32 ss_id, u32 sq_id, struct sde_rot_trace_entry *rot),
-	TP_ARGS(ss_id, sq_id, rot),
-	TP_STRUCT__entry(
-			__field(u32, ss_id)
-			__field(u32, sq_id)
-			__field(u32, pr_id)
-			__field(u32, flags)
-			__field(u32, src_fmt)
-			__field(u16, src_bw)
-			__field(u16, src_bh)
-			__field(u16, src_x)
-			__field(u16, src_y)
-			__field(u16, src_w)
-			__field(u16, src_h)
-			__field(u32, dst_fmt)
-			__field(u16, dst_bw)
-			__field(u16, dst_bh)
-			__field(u16, dst_x)
-			__field(u16, dst_y)
-			__field(u16, dst_w)
-			__field(u16, dst_h)
-	),
-	TP_fast_assign(
-			__entry->ss_id = ss_id;
-			__entry->sq_id = sq_id;
-			__entry->pr_id = rot->wb_idx;
-			__entry->flags = rot->flags;
-			__entry->src_fmt = rot->input_format;
-			__entry->src_bw = rot->input_width;
-			__entry->src_bh = rot->input_height;
-			__entry->src_x = rot->src_x;
-			__entry->src_y = rot->src_y;
-			__entry->src_w = rot->src_w;
-			__entry->src_h = rot->src_h;
-			__entry->dst_fmt = rot->output_format;
-			__entry->dst_bw = rot->output_width;
-			__entry->dst_bh = rot->output_height;
-			__entry->dst_x = rot->dst_x;
-			__entry->dst_y = rot->dst_y;
-			__entry->dst_w = rot->dst_w;
-			__entry->dst_h = rot->dst_h;
-	),
-
-	TP_printk("%d.%d|%d|%x|%x|%u,%u|%u,%u,%u,%u|%x|%u,%u|%u,%u,%u,%u|",
-			__entry->ss_id, __entry->sq_id, __entry->pr_id,
-			__entry->flags,
-			__entry->src_fmt, __entry->src_bw, __entry->src_bh,
-			__entry->src_x, __entry->src_y,
-			__entry->src_w, __entry->src_h,
-			__entry->dst_fmt, __entry->dst_bw, __entry->dst_bh,
-			__entry->dst_x, __entry->dst_y,
-			__entry->dst_w, __entry->dst_h)
-);
-
-DEFINE_EVENT(rot_entry_template, rot_entry_fence,
-	TP_PROTO(u32 ss_id, u32 sq_id, struct sde_rot_trace_entry *rot),
-	TP_ARGS(ss_id, sq_id, rot)
-);
-
-DEFINE_EVENT(rot_entry_template, rot_entry_commit,
-	TP_PROTO(u32 ss_id, u32 sq_id, struct sde_rot_trace_entry *rot),
-	TP_ARGS(ss_id, sq_id, rot)
-);
-
-DEFINE_EVENT(rot_entry_template, rot_entry_done,
-	TP_PROTO(u32 ss_id, u32 sq_id, struct sde_rot_trace_entry *rot),
-	TP_ARGS(ss_id, sq_id, rot)
-);
-
-TRACE_EVENT(rot_perf_set_qos_luts,
-	TP_PROTO(u32 pnum, u32 fmt, u32 lut, bool linear),
-	TP_ARGS(pnum, fmt, lut, linear),
-	TP_STRUCT__entry(
-			__field(u32, pnum)
-			__field(u32, fmt)
-			__field(u32, lut)
-			__field(bool, linear)
-	),
-	TP_fast_assign(
-			__entry->pnum = pnum;
-			__entry->fmt = fmt;
-			__entry->lut = lut;
-			__entry->linear = linear;
-	),
-	TP_printk("pnum=%d fmt=%d lut=0x%x lin:%d",
-			__entry->pnum, __entry->fmt,
-			__entry->lut, __entry->linear)
-);
-
-TRACE_EVENT(rot_perf_set_panic_luts,
-	TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 panic_lut,
-		u32 robust_lut),
-	TP_ARGS(pnum, fmt, mode, panic_lut, robust_lut),
-	TP_STRUCT__entry(
-			__field(u32, pnum)
-			__field(u32, fmt)
-			__field(u32, mode)
-			__field(u32, panic_lut)
-			__field(u32, robust_lut)
-	),
-	TP_fast_assign(
-			__entry->pnum = pnum;
-			__entry->fmt = fmt;
-			__entry->mode = mode;
-			__entry->panic_lut = panic_lut;
-			__entry->robust_lut = robust_lut;
-	),
-	TP_printk("pnum=%d fmt=%d mode=%d luts[0x%x, 0x%x]",
-			__entry->pnum, __entry->fmt,
-			__entry->mode, __entry->panic_lut,
-			__entry->robust_lut)
-);
-
-TRACE_EVENT(rot_perf_set_wm_levels,
-	TP_PROTO(u32 pnum, u32 use_space, u32 priority_bytes, u32 wm0, u32 wm1,
-		u32 wm2, u32 mb_cnt, u32 mb_size),
-	TP_ARGS(pnum, use_space, priority_bytes, wm0, wm1, wm2, mb_cnt,
-		mb_size),
-	TP_STRUCT__entry(
-			__field(u32, pnum)
-			__field(u32, use_space)
-			__field(u32, priority_bytes)
-			__field(u32, wm0)
-			__field(u32, wm1)
-			__field(u32, wm2)
-			__field(u32, mb_cnt)
-			__field(u32, mb_size)
-	),
-	TP_fast_assign(
-			__entry->pnum = pnum;
-			__entry->use_space = use_space;
-			__entry->priority_bytes = priority_bytes;
-			__entry->wm0 = wm0;
-			__entry->wm1 = wm1;
-			__entry->wm2 = wm2;
-			__entry->mb_cnt = mb_cnt;
-			__entry->mb_size = mb_size;
-	),
-	TP_printk(
-		"pnum:%d useable_space:%d priority_bytes:%d watermark:[%d | %d | %d] nmb=%d mb_size=%d",
-			__entry->pnum, __entry->use_space,
-			__entry->priority_bytes, __entry->wm0, __entry->wm1,
-			__entry->wm2, __entry->mb_cnt, __entry->mb_size)
-);
-
-TRACE_EVENT(rot_perf_set_ot,
-	TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim),
-	TP_ARGS(pnum, xin_id, rd_lim),
-	TP_STRUCT__entry(
-			__field(u32, pnum)
-			__field(u32, xin_id)
-			__field(u32, rd_lim)
-	),
-	TP_fast_assign(
-			__entry->pnum = pnum;
-			__entry->xin_id = xin_id;
-			__entry->rd_lim = rd_lim;
-	),
-	TP_printk("pnum:%d xin_id:%d ot:%d",
-			__entry->pnum, __entry->xin_id, __entry->rd_lim)
-);
-
-TRACE_EVENT(rot_perf_prefill_calc,
-	TP_PROTO(u32 pnum, u32 latency_buf, u32 ot, u32 y_buf, u32 y_scaler,
-		u32 pp_lines, u32 pp_bytes, u32 post_sc, u32 fbc_bytes,
-		u32 prefill_bytes),
-	TP_ARGS(pnum, latency_buf, ot, y_buf, y_scaler, pp_lines, pp_bytes,
-		post_sc, fbc_bytes, prefill_bytes),
-	TP_STRUCT__entry(
-			__field(u32, pnum)
-			__field(u32, latency_buf)
-			__field(u32, ot)
-			__field(u32, y_buf)
-			__field(u32, y_scaler)
-			__field(u32, pp_lines)
-			__field(u32, pp_bytes)
-			__field(u32, post_sc)
-			__field(u32, fbc_bytes)
-			__field(u32, prefill_bytes)
-	),
-	TP_fast_assign(
-			__entry->pnum = pnum;
-			__entry->latency_buf = latency_buf;
-			__entry->ot = ot;
-			__entry->y_buf = y_buf;
-			__entry->y_scaler = y_scaler;
-			__entry->pp_lines = pp_lines;
-			__entry->pp_bytes = pp_bytes;
-			__entry->post_sc = post_sc;
-			__entry->fbc_bytes = fbc_bytes;
-			__entry->prefill_bytes = prefill_bytes;
-	),
-	TP_printk(
-		"pnum:%d latency_buf:%d ot:%d y_buf:%d y_scaler:%d pp_lines:%d, pp_bytes=%d post_sc:%d fbc_bytes:%d prefill:%d",
-			__entry->pnum, __entry->latency_buf, __entry->ot,
-			__entry->y_buf, __entry->y_scaler, __entry->pp_lines,
-			__entry->pp_bytes, __entry->post_sc,
-			__entry->fbc_bytes, __entry->prefill_bytes)
-);
-
-TRACE_EVENT(rot_mark_write,
-	TP_PROTO(int pid, const char *name, bool trace_begin),
-	TP_ARGS(pid, name, trace_begin),
-	TP_STRUCT__entry(
-			__field(int, pid)
-			__string(trace_name, name)
-			__field(bool, trace_begin)
-	),
-	TP_fast_assign(
-			__entry->pid = pid;
-			__assign_str(trace_name, name);
-			__entry->trace_begin = trace_begin;
-	),
-	TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
-		__entry->pid, __get_str(trace_name))
-);
-
-TRACE_EVENT(rot_trace_counter,
-	TP_PROTO(int pid, char *name, s64 value),
-	TP_ARGS(pid, name, value),
-	TP_STRUCT__entry(
-			__field(int, pid)
-			__string(counter_name, name)
-			__field(s64, value)
-	),
-	TP_fast_assign(
-			__entry->pid = current->tgid;
-			__assign_str(counter_name, name);
-			__entry->value = value;
-	),
-	TP_printk("%d|%s|%lld", __entry->pid,
-			__get_str(counter_name), __entry->value)
-);
-
-TRACE_EVENT(rot_bw_ao_as_context,
-	TP_PROTO(u32 state),
-	TP_ARGS(state),
-	TP_STRUCT__entry(
-			__field(u32, state)
-	),
-	TP_fast_assign(
-			__entry->state = state;
-	),
-	TP_printk("Rotator bw context %s",
-			__entry->state ? "Active Only" : "Active+Sleep")
-
-);
-
-#define SDE_ROT_TRACE_EVTLOG_SIZE	15
-TRACE_EVENT(sde_rot_evtlog,
-	TP_PROTO(const char *tag, u32 tag_id, u32 cnt, u32 *data),
-	TP_ARGS(tag, tag_id, cnt, data),
-	TP_STRUCT__entry(
-			__field(int, pid)
-			__string(evtlog_tag, tag)
-			__field(u32, tag_id)
-			__array(u32, data, SDE_ROT_TRACE_EVTLOG_SIZE)
-	),
-	TP_fast_assign(
-			__entry->pid = current->tgid;
-			__assign_str(evtlog_tag, tag);
-			__entry->tag_id = tag_id;
-			if (cnt > SDE_ROT_TRACE_EVTLOG_SIZE)
-				cnt = SDE_ROT_TRACE_EVTLOG_SIZE;
-			memcpy(__entry->data, data, cnt * sizeof(u32));
-			memset(&__entry->data[cnt], 0,
-				(SDE_ROT_TRACE_EVTLOG_SIZE - cnt) *
-				sizeof(u32));
-	),
-	TP_printk("%d|%s:%d|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x",
-			__entry->pid, __get_str(evtlog_tag),
-			__entry->tag_id,
-			__entry->data[0], __entry->data[1],
-			__entry->data[2], __entry->data[3],
-			__entry->data[4], __entry->data[5],
-			__entry->data[6], __entry->data[7],
-			__entry->data[8], __entry->data[9],
-			__entry->data[10], __entry->data[11],
-			__entry->data[12], __entry->data[13],
-			__entry->data[14])
-)
-
-#endif /* if !defined(TRACE_SDE_ROTATOR_H) ||
-	*		defined(TRACE_HEADER_MULTI_READ)
-	*/
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
deleted file mode 100644
index 0dcb241..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
+++ /dev/null
@@ -1,1229 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012, 2015-2019, The Linux Foundation. All rights reserved.
- */
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/dma-mapping.h>
-#include <linux/errno.h>
-#include <linux/file.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/major.h>
-#include <linux/dma-buf.h>
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/wait.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
-#include <linux/regulator/consumer.h>
-#include <media/msm_media_info.h>
-#include <linux/videodev2.h>
-#include <linux/ion.h>
-
-#include "sde_rotator_util.h"
-#include "sde_rotator_smmu.h"
-#include "sde_rotator_debug.h"
-
-#define Y_TILEWIDTH     48
-#define Y_TILEHEIGHT    4
-#define UV_TILEWIDTH    48
-#define UV_TILEHEIGHT   8
-#define TILEWIDTH_SIZE  64
-#define TILEHEIGHT_SIZE 4
-
-void sde_mdp_get_v_h_subsample_rate(u8 chroma_sample,
-		u8 *v_sample, u8 *h_sample)
-{
-	switch (chroma_sample) {
-	case SDE_MDP_CHROMA_H2V1:
-		*v_sample = 1;
-		*h_sample = 2;
-		break;
-	case SDE_MDP_CHROMA_H1V2:
-		*v_sample = 2;
-		*h_sample = 1;
-		break;
-	case SDE_MDP_CHROMA_420:
-		*v_sample = 2;
-		*h_sample = 2;
-		break;
-	default:
-		*v_sample = 1;
-		*h_sample = 1;
-		break;
-	}
-}
-
-void sde_rot_intersect_rect(struct sde_rect *res_rect,
-	const struct sde_rect *dst_rect,
-	const struct sde_rect *sci_rect)
-{
-	int l = max(dst_rect->x, sci_rect->x);
-	int t = max(dst_rect->y, sci_rect->y);
-	int r = min((dst_rect->x + dst_rect->w), (sci_rect->x + sci_rect->w));
-	int b = min((dst_rect->y + dst_rect->h), (sci_rect->y + sci_rect->h));
-
-	if (r < l || b < t)
-		*res_rect = (struct sde_rect){0, 0, 0, 0};
-	else
-		*res_rect = (struct sde_rect){l, t, (r-l), (b-t)};
-}
-
-void sde_rot_crop_rect(struct sde_rect *src_rect,
-	struct sde_rect *dst_rect,
-	const struct sde_rect *sci_rect)
-{
-	struct sde_rect res;
-
-	sde_rot_intersect_rect(&res, dst_rect, sci_rect);
-
-	if (res.w && res.h) {
-		if ((res.w != dst_rect->w) || (res.h != dst_rect->h)) {
-			src_rect->x = src_rect->x + (res.x - dst_rect->x);
-			src_rect->y = src_rect->y + (res.y - dst_rect->y);
-			src_rect->w = res.w;
-			src_rect->h = res.h;
-		}
-		*dst_rect = (struct sde_rect)
-			{(res.x - sci_rect->x), (res.y - sci_rect->y),
-			res.w, res.h};
-	}
-}
-
-/*
- * sde_rect_cmp() - compares two rects
- * @rect1 - rect value to compare
- * @rect2 - rect value to compare
- *
- * Returns 1 if the rects are same, 0 otherwise.
- */
-int sde_rect_cmp(struct sde_rect *rect1, struct sde_rect *rect2)
-{
-	return rect1->x == rect2->x && rect1->y == rect2->y &&
-	       rect1->w == rect2->w && rect1->h == rect2->h;
-}
-
-/*
- * sde_rect_overlap_check() - compare two rects and check if they overlap
- * @rect1 - rect value to compare
- * @rect2 - rect value to compare
- *
- * Returns true if rects overlap, false otherwise.
- */
-bool sde_rect_overlap_check(struct sde_rect *rect1, struct sde_rect *rect2)
-{
-	u32 rect1_left = rect1->x, rect1_right = rect1->x + rect1->w;
-	u32 rect1_top = rect1->y, rect1_bottom = rect1->y + rect1->h;
-	u32 rect2_left = rect2->x, rect2_right = rect2->x + rect2->w;
-	u32 rect2_top = rect2->y, rect2_bottom = rect2->y + rect2->h;
-
-	if ((rect1_right <= rect2_left) ||
-	    (rect1_left >= rect2_right) ||
-	    (rect1_bottom <= rect2_top) ||
-	    (rect1_top >= rect2_bottom))
-		return false;
-
-	return true;
-}
-
-int sde_mdp_get_rau_strides(u32 w, u32 h,
-			       struct sde_mdp_format_params *fmt,
-			       struct sde_mdp_plane_sizes *ps)
-{
-	if (fmt->is_yuv) {
-		ps->rau_cnt = DIV_ROUND_UP(w, 64);
-		ps->ystride[0] = 64 * 4;
-		ps->rau_h[0] = 4;
-		ps->rau_h[1] = 2;
-		if (fmt->chroma_sample == SDE_MDP_CHROMA_H1V2)
-			ps->ystride[1] = 64 * 2;
-		else if (fmt->chroma_sample == SDE_MDP_CHROMA_H2V1) {
-			ps->ystride[1] = 32 * 4;
-			ps->rau_h[1] = 4;
-		} else
-			ps->ystride[1] = 32 * 2;
-
-		/* account for both chroma components */
-		ps->ystride[1] <<= 1;
-	} else if (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED) {
-		ps->rau_cnt = DIV_ROUND_UP(w, 32);
-		ps->ystride[0] = 32 * 4 * fmt->bpp;
-		ps->ystride[1] = 0;
-		ps->rau_h[0] = 4;
-		ps->rau_h[1] = 0;
-	} else  {
-		SDEROT_ERR("Invalid format=%d\n", fmt->format);
-		return -EINVAL;
-	}
-
-	ps->ystride[0] *= ps->rau_cnt;
-	ps->ystride[1] *= ps->rau_cnt;
-	ps->num_planes = 2;
-
-	SDEROT_DBG("BWC rau_cnt=%d strides={%d,%d} heights={%d,%d}\n",
-		ps->rau_cnt, ps->ystride[0], ps->ystride[1],
-		ps->rau_h[0], ps->rau_h[1]);
-
-	return 0;
-}
-
-static int sde_mdp_get_a5x_plane_size(struct sde_mdp_format_params *fmt,
-	u32 width, u32 height, struct sde_mdp_plane_sizes *ps)
-{
-	int rc = 0;
-
-	if (sde_mdp_is_nv12_8b_format(fmt)) {
-		ps->num_planes = 2;
-		/* Y bitstream stride and plane size */
-		ps->ystride[0] = ALIGN(width, 128);
-		ps->plane_size[0] = ALIGN(ps->ystride[0] * ALIGN(height, 32),
-					4096);
-
-		/* CbCr bitstream stride and plane size */
-		ps->ystride[1] = ALIGN(width, 128);
-		ps->plane_size[1] = ALIGN(ps->ystride[1] *
-			ALIGN(height / 2, 32), 4096);
-
-		if (!sde_mdp_is_ubwc_format(fmt))
-			goto done;
-
-		ps->num_planes += 2;
-
-		/* Y meta data stride and plane size */
-		ps->ystride[2] = ALIGN(DIV_ROUND_UP(width, 32), 64);
-		ps->plane_size[2] = ALIGN(ps->ystride[2] *
-			ALIGN(DIV_ROUND_UP(height, 8), 16), 4096);
-
-		/* CbCr meta data stride and plane size */
-		ps->ystride[3] = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
-		ps->plane_size[3] = ALIGN(ps->ystride[3] *
-			ALIGN(DIV_ROUND_UP(height / 2, 8), 16), 4096);
-	} else if (sde_mdp_is_p010_format(fmt)) {
-		ps->num_planes = 2;
-		/* Y bitstream stride and plane size */
-		ps->ystride[0] = ALIGN(width * 2, 256);
-		ps->plane_size[0] = ALIGN(ps->ystride[0] * ALIGN(height, 16),
-					4096);
-
-		/* CbCr bitstream stride and plane size */
-		ps->ystride[1] = ALIGN(width * 2, 256);
-		ps->plane_size[1] = ALIGN(ps->ystride[1] *
-			ALIGN(height / 2, 16), 4096);
-
-		if (!sde_mdp_is_ubwc_format(fmt))
-			goto done;
-
-		ps->num_planes += 2;
-
-		/* Y meta data stride and plane size */
-		ps->ystride[2] = ALIGN(DIV_ROUND_UP(width, 32), 64);
-		ps->plane_size[2] = ALIGN(ps->ystride[2] *
-			ALIGN(DIV_ROUND_UP(height, 4), 16), 4096);
-
-		/* CbCr meta data stride and plane size */
-		ps->ystride[3] = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
-		ps->plane_size[3] = ALIGN(ps->ystride[3] *
-			ALIGN(DIV_ROUND_UP(height / 2, 4), 16), 4096);
-	} else if (sde_mdp_is_tp10_format(fmt)) {
-		u32 yWidth   = sde_mdp_general_align(width, 192);
-		u32 yHeight  = ALIGN(height, 16);
-		u32 uvWidth  = sde_mdp_general_align(width, 192);
-		u32 uvHeight = ALIGN(height, 32);
-
-		ps->num_planes = 2;
-
-		/* Y bitstream stride and plane size */
-		ps->ystride[0]    = yWidth * TILEWIDTH_SIZE / Y_TILEWIDTH;
-		ps->plane_size[0] = ALIGN(ps->ystride[0] *
-				(yHeight * TILEHEIGHT_SIZE / Y_TILEHEIGHT),
-				4096);
-
-		/* CbCr bitstream stride and plane size */
-		ps->ystride[1]    = uvWidth * TILEWIDTH_SIZE / UV_TILEWIDTH;
-		ps->plane_size[1] = ALIGN(ps->ystride[1] *
-				(uvHeight * TILEHEIGHT_SIZE / UV_TILEHEIGHT),
-				4096);
-
-		if (!sde_mdp_is_ubwc_format(fmt))
-			goto done;
-
-		ps->num_planes += 2;
-
-		/* Y meta data stride and plane size */
-		ps->ystride[2]    = ALIGN(yWidth / Y_TILEWIDTH, 64);
-		ps->plane_size[2] = ALIGN(ps->ystride[2] *
-				ALIGN((yHeight / Y_TILEHEIGHT), 16), 4096);
-
-		/* CbCr meta data stride and plane size */
-		ps->ystride[3]    = ALIGN(uvWidth / UV_TILEWIDTH, 64);
-		ps->plane_size[3] = ALIGN(ps->ystride[3] *
-				ALIGN((uvHeight / UV_TILEHEIGHT), 16), 4096);
-	} else if (sde_mdp_is_rgb_format(fmt)) {
-		uint32_t stride_alignment, bpp, aligned_bitstream_width;
-
-		if (fmt->format == SDE_PIX_FMT_RGB_565_UBWC) {
-			stride_alignment = 128;
-			bpp = 2;
-		} else {
-			stride_alignment = 64;
-			bpp = 4;
-		}
-
-		ps->num_planes = 1;
-
-		/* RGB bitstream stride and plane size */
-		aligned_bitstream_width = ALIGN(width, stride_alignment);
-		ps->ystride[0] = aligned_bitstream_width * bpp;
-		ps->plane_size[0] = ALIGN(bpp * aligned_bitstream_width *
-			ALIGN(height, 16), 4096);
-
-		if (!sde_mdp_is_ubwc_format(fmt))
-			goto done;
-
-		ps->num_planes += 1;
-
-		/* RGB meta data stride and plane size */
-		ps->ystride[2] = ALIGN(DIV_ROUND_UP(aligned_bitstream_width,
-			16), 64);
-		ps->plane_size[2] = ALIGN(ps->ystride[2] *
-			ALIGN(DIV_ROUND_UP(height, 4), 16), 4096);
-	} else {
-		SDEROT_ERR("%s: UBWC format not supported for fmt:%d\n",
-			__func__, fmt->format);
-		rc = -EINVAL;
-	}
-done:
-	return rc;
-}
-
-int sde_mdp_get_plane_sizes(struct sde_mdp_format_params *fmt, u32 w, u32 h,
-	struct sde_mdp_plane_sizes *ps, u32 bwc_mode, bool rotation)
-{
-	int i, rc = 0;
-	u32 bpp;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	if ((w > SDE_ROT_MAX_IMG_WIDTH) || (h > SDE_ROT_MAX_IMG_HEIGHT))
-		return -ERANGE;
-
-	bpp = fmt->bpp;
-	memset(ps, 0, sizeof(struct sde_mdp_plane_sizes));
-
-	if (sde_mdp_is_tilea5x_format(fmt)) {
-		rc = sde_mdp_get_a5x_plane_size(fmt, w, h, ps);
-	} else if (bwc_mode) {
-		u32 height, meta_size;
-
-		rc = sde_mdp_get_rau_strides(w, h, fmt, ps);
-		if (rc)
-			return rc;
-
-		height = DIV_ROUND_UP(h, ps->rau_h[0]);
-		meta_size = DIV_ROUND_UP(ps->rau_cnt, 8);
-		ps->ystride[1] += meta_size;
-		ps->ystride[0] += ps->ystride[1] + meta_size;
-		ps->plane_size[0] = ps->ystride[0] * height;
-
-		ps->ystride[1] = 2;
-		ps->plane_size[1] = 2 * ps->rau_cnt * height;
-
-		SDEROT_DBG("BWC data stride=%d size=%d meta size=%d\n",
-			ps->ystride[0], ps->plane_size[0], ps->plane_size[1]);
-	} else {
-		if (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED) {
-			ps->num_planes = 1;
-			ps->plane_size[0] = w * h * bpp;
-			ps->ystride[0] = w * bpp;
-		} else if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_VENUS ||
-			fmt->format == SDE_PIX_FMT_Y_CRCB_H2V2_VENUS ||
-			fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS) {
-
-			int cf;
-
-			switch (fmt->format) {
-			case SDE_PIX_FMT_Y_CBCR_H2V2_VENUS:
-				cf = COLOR_FMT_NV12;
-				break;
-			case SDE_PIX_FMT_Y_CRCB_H2V2_VENUS:
-				cf = COLOR_FMT_NV21;
-				break;
-			case SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS:
-				cf = COLOR_FMT_P010;
-				break;
-			default:
-				SDEROT_ERR("unknown color format %d\n",
-						fmt->format);
-				return -EINVAL;
-			}
-
-			ps->num_planes = 2;
-			ps->ystride[0] = VENUS_Y_STRIDE(cf, w);
-			ps->ystride[1] = VENUS_UV_STRIDE(cf, w);
-			ps->plane_size[0] = VENUS_Y_SCANLINES(cf, h) *
-				ps->ystride[0];
-			ps->plane_size[1] = VENUS_UV_SCANLINES(cf, h) *
-				ps->ystride[1];
-		} else if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_P010) {
-			/*
-			 * |<---Y1--->000000<---Y0--->000000|  Plane0
-			 * |rrrrrrrrrr000000bbbbbbbbbb000000|  Plane1
-			 * |--------------------------------|
-			 *  33222222222211111111110000000000  Bit
-			 *  10987654321098765432109876543210  Location
-			 */
-			ps->num_planes = 2;
-			ps->ystride[0] = w * 2;
-			ps->ystride[1] = w * 2;
-			ps->plane_size[0] = ps->ystride[0] * h;
-			ps->plane_size[1] = ps->ystride[1] * h / 2;
-		} else if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_TP10) {
-			u32 yWidth   = sde_mdp_general_align(w, 192);
-			u32 yHeight  = ALIGN(h, 16);
-			u32 uvWidth  = sde_mdp_general_align(w, 192);
-			u32 uvHeight = (ALIGN(h, 32)) / 2;
-
-			ps->num_planes = 2;
-
-			ps->ystride[0] = (yWidth / 3) * 4;
-			ps->ystride[1] = (uvWidth / 3) * 4;
-			ps->plane_size[0] = ALIGN(ps->ystride[0] * yHeight,
-					4096);
-			ps->plane_size[1] = ALIGN(ps->ystride[1] * uvHeight,
-					4096);
-		} else {
-			u8 v_subsample, h_subsample, stride_align, height_align;
-			u32 chroma_samp;
-
-			chroma_samp = fmt->chroma_sample;
-
-			sde_mdp_get_v_h_subsample_rate(chroma_samp,
-				&v_subsample, &h_subsample);
-
-			switch (fmt->format) {
-			case SDE_PIX_FMT_Y_CR_CB_GH2V2:
-				stride_align = 16;
-				height_align = 1;
-				break;
-			default:
-				stride_align = 1;
-				height_align = 1;
-				break;
-			}
-
-			ps->ystride[0] = ALIGN(w, stride_align);
-			ps->ystride[1] = ALIGN(w / h_subsample, stride_align);
-			ps->plane_size[0] = ps->ystride[0] *
-				ALIGN(h, height_align);
-			ps->plane_size[1] = ps->ystride[1] * (h / v_subsample);
-
-			if (fmt->fetch_planes == SDE_MDP_PLANE_PSEUDO_PLANAR) {
-				ps->num_planes = 2;
-				ps->plane_size[1] *= 2;
-				ps->ystride[1] *= 2;
-			} else { /* planar */
-				ps->num_planes = 3;
-				ps->plane_size[2] = ps->plane_size[1];
-				ps->ystride[2] = ps->ystride[1];
-			}
-		}
-	}
-
-	/* Safe to use MAX_PLANES as ps is memset at start of function */
-	for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
-		ps->total_size += ps->plane_size[i];
-
-	return rc;
-}
-
-static int sde_mdp_a5x_data_check(struct sde_mdp_data *data,
-			struct sde_mdp_plane_sizes *ps,
-			struct sde_mdp_format_params *fmt)
-{
-	int i, inc;
-	unsigned long data_size = 0;
-	dma_addr_t base_addr;
-
-	if (data->p[0].len == ps->plane_size[0])
-		goto end;
-
-	/* From this point, assumption is plane 0 is to be divided */
-	data_size = data->p[0].len;
-	if (data_size < ps->total_size) {
-		SDEROT_ERR(
-			"insufficient current mem len=%lu required mem len=%u\n",
-			data_size, ps->total_size);
-		return -ENOMEM;
-	}
-
-	base_addr = data->p[0].addr;
-
-	if (sde_mdp_is_yuv_format(fmt)) {
-		/************************************************/
-		/*      UBWC            **                      */
-		/*      buffer          **      MDP PLANE       */
-		/*      format          **                      */
-		/************************************************/
-		/* -------------------  ** -------------------- */
-		/* |      Y meta     |  ** |    Y bitstream   | */
-		/* |       data      |  ** |       plane      | */
-		/* -------------------  ** -------------------- */
-		/* |    Y bitstream  |  ** |  CbCr bitstream  | */
-		/* |       data      |  ** |       plane      | */
-		/* -------------------  ** -------------------- */
-		/* |   Cbcr metadata |  ** |       Y meta     | */
-		/* |       data      |  ** |       plane      | */
-		/* -------------------  ** -------------------- */
-		/* |  CbCr bitstream |  ** |     CbCr meta    | */
-		/* |       data      |  ** |       plane      | */
-		/* -------------------  ** -------------------- */
-		/************************************************/
-
-		/* configure Y bitstream plane */
-		data->p[0].addr = base_addr + ps->plane_size[2];
-		data->p[0].len = ps->plane_size[0];
-
-		/* configure CbCr bitstream plane */
-		data->p[1].addr = base_addr + ps->plane_size[0]
-			+ ps->plane_size[2] + ps->plane_size[3];
-		data->p[1].len = ps->plane_size[1];
-
-		if (!sde_mdp_is_ubwc_format(fmt))
-			goto done;
-
-		/* configure Y metadata plane */
-		data->p[2].addr = base_addr;
-		data->p[2].len = ps->plane_size[2];
-
-		/* configure CbCr metadata plane */
-		data->p[3].addr = base_addr + ps->plane_size[0]
-			+ ps->plane_size[2];
-		data->p[3].len = ps->plane_size[3];
-	} else {
-		/************************************************/
-		/*      UBWC            **                      */
-		/*      buffer          **      MDP PLANE       */
-		/*      format          **                      */
-		/************************************************/
-		/* -------------------  ** -------------------- */
-		/* |      RGB meta   |  ** |   RGB bitstream  | */
-		/* |       data      |  ** |       plane      | */
-		/* -------------------  ** -------------------- */
-		/* |  RGB bitstream  |  ** |       NONE       | */
-		/* |       data      |  ** |                  | */
-		/* -------------------  ** -------------------- */
-		/*                      ** |     RGB meta     | */
-		/*                      ** |       plane      | */
-		/*                      ** -------------------- */
-		/************************************************/
-
-		/* configure RGB bitstream plane */
-		data->p[0].addr = base_addr + ps->plane_size[2];
-		data->p[0].len = ps->plane_size[0];
-
-		if (!sde_mdp_is_ubwc_format(fmt))
-			goto done;
-
-		/* configure RGB metadata plane */
-		data->p[2].addr = base_addr;
-		data->p[2].len = ps->plane_size[2];
-	}
-done:
-	data->num_planes = ps->num_planes;
-
-end:
-	if (data->num_planes != ps->num_planes) {
-		SDEROT_ERR("num_planes don't match: fmt:%d, data:%d, ps:%d\n",
-				fmt->format, data->num_planes, ps->num_planes);
-		return -EINVAL;
-	}
-
-	inc = (sde_mdp_is_yuv_format(fmt) ? 1 : 2);
-	for (i = 0; i < SDE_ROT_MAX_PLANES; i += inc) {
-		if (data->p[i].len != ps->plane_size[i]) {
-			SDEROT_ERR(
-				"plane:%d fmt:%d, len does not match: data:%lu, ps:%d\n",
-					i, fmt->format, data->p[i].len,
-					ps->plane_size[i]);
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
-int sde_mdp_data_check(struct sde_mdp_data *data,
-			struct sde_mdp_plane_sizes *ps,
-			struct sde_mdp_format_params *fmt)
-{
-	struct sde_mdp_img_data *prev, *curr;
-	int i;
-
-	if (!ps)
-		return 0;
-
-	if (!data || data->num_planes == 0)
-		return -ENOMEM;
-
-	if (sde_mdp_is_tilea5x_format(fmt))
-		return sde_mdp_a5x_data_check(data, ps, fmt);
-
-	SDEROT_DBG("srcp0=%pa len=%lu frame_size=%u\n", &data->p[0].addr,
-		data->p[0].len, ps->total_size);
-
-	for (i = 0; i < ps->num_planes; i++) {
-		curr = &data->p[i];
-		if (i >= data->num_planes) {
-			u32 psize = ps->plane_size[i-1];
-
-			prev = &data->p[i-1];
-			if (prev->len > psize) {
-				curr->len = prev->len - psize;
-				prev->len = psize;
-			}
-			curr->addr = prev->addr + psize;
-		}
-		if (curr->len < ps->plane_size[i]) {
-			SDEROT_ERR("insufficient mem=%lu p=%d len=%u\n",
-			       curr->len, i, ps->plane_size[i]);
-			return -ENOMEM;
-		}
-		SDEROT_DBG("plane[%d] addr=%pa len=%lu\n", i,
-				&curr->addr, curr->len);
-	}
-	data->num_planes = ps->num_planes;
-
-	return 0;
-}
-
-int sde_validate_offset_for_ubwc_format(
-	struct sde_mdp_format_params *fmt, u16 x, u16 y)
-{
-	int ret;
-	u16 micro_w = 0, micro_h = 0;
-
-	ret = sde_rot_get_ubwc_micro_dim(fmt->format, &micro_w, &micro_h);
-	if (ret || !micro_w || !micro_h) {
-		SDEROT_ERR("Could not get valid micro tile dimensions\n");
-		return -EINVAL;
-	}
-
-	if (x % (micro_w * UBWC_META_MACRO_W_H)) {
-		SDEROT_ERR("x=%d does not align with meta width=%d\n", x,
-			micro_w * UBWC_META_MACRO_W_H);
-		return -EINVAL;
-	}
-
-	if (y % (micro_h * UBWC_META_MACRO_W_H)) {
-		SDEROT_ERR("y=%d does not align with meta height=%d\n", y,
-			UBWC_META_MACRO_W_H);
-		return -EINVAL;
-	}
-	return ret;
-}
-
-/* x and y are assumed to be valid, expected to line up with start of tiles */
-void sde_rot_ubwc_data_calc_offset(struct sde_mdp_data *data, u16 x, u16 y,
-	struct sde_mdp_plane_sizes *ps, struct sde_mdp_format_params *fmt)
-{
-	u16 macro_w, micro_w, micro_h;
-	u32 offset = 0;
-	int ret;
-
-	ret = sde_rot_get_ubwc_micro_dim(fmt->format, &micro_w, &micro_h);
-	if (ret || !micro_w || !micro_h) {
-		SDEROT_ERR("Could not get valid micro tile dimensions\n");
-		return;
-	}
-	macro_w = 4 * micro_w;
-
-	if (sde_mdp_is_nv12_8b_format(fmt)) {
-		u16 chroma_macro_w = macro_w / 2;
-		u16 chroma_micro_w = micro_w / 2;
-
-		/* plane 1 and 3 are chroma, with sub sample of 2 */
-		offset = y * ps->ystride[0] +
-			(x / macro_w) * 4096;
-		if (offset < data->p[0].len) {
-			data->p[0].addr += offset;
-		} else {
-			ret = 1;
-			goto done;
-		}
-
-		offset = y / 2 * ps->ystride[1] +
-			((x / 2) / chroma_macro_w) * 4096;
-		if (offset < data->p[1].len) {
-			data->p[1].addr += offset;
-		} else {
-			ret = 2;
-			goto done;
-		}
-
-		offset = (y / micro_h) * ps->ystride[2] +
-			((x / micro_w) / UBWC_META_MACRO_W_H) *
-			UBWC_META_BLOCK_SIZE;
-		if (offset < data->p[2].len) {
-			data->p[2].addr += offset;
-		} else {
-			ret = 3;
-			goto done;
-		}
-
-		offset = ((y / 2) / micro_h) * ps->ystride[3] +
-			(((x / 2) / chroma_micro_w) / UBWC_META_MACRO_W_H) *
-			UBWC_META_BLOCK_SIZE;
-		if (offset < data->p[3].len) {
-			data->p[3].addr += offset;
-		} else {
-			ret = 4;
-			goto done;
-		}
-	} else if (sde_mdp_is_nv12_10b_format(fmt)) {
-		/* TODO: */
-		SDEROT_ERR("%c%c%c%c format not implemented yet",
-				fmt->format >> 0, fmt->format >> 8,
-				fmt->format >> 16, fmt->format >> 24);
-		ret = 1;
-		goto done;
-	} else {
-		offset = y * ps->ystride[0] +
-			(x / macro_w) * 4096;
-		if (offset < data->p[0].len) {
-			data->p[0].addr += offset;
-		} else {
-			ret = 1;
-			goto done;
-		}
-
-		offset = DIV_ROUND_UP(y, micro_h) * ps->ystride[2] +
-			((x / micro_w) / UBWC_META_MACRO_W_H) *
-			UBWC_META_BLOCK_SIZE;
-		if (offset < data->p[2].len) {
-			data->p[2].addr += offset;
-		} else {
-			ret = 3;
-			goto done;
-		}
-	}
-
-done:
-	if (ret) {
-		WARN(1, "idx %d, offsets:%u too large for buflen%lu\n",
-			(ret - 1), offset, data->p[(ret - 1)].len);
-	}
-}
-
-void sde_rot_data_calc_offset(struct sde_mdp_data *data, u16 x, u16 y,
-	struct sde_mdp_plane_sizes *ps, struct sde_mdp_format_params *fmt)
-{
-	if ((x == 0) && (y == 0))
-		return;
-
-	if (sde_mdp_is_tilea5x_format(fmt)) {
-		sde_rot_ubwc_data_calc_offset(data, x, y, ps, fmt);
-		return;
-	}
-
-	data->p[0].addr += y * ps->ystride[0];
-
-	if (data->num_planes == 1) {
-		data->p[0].addr += x * fmt->bpp;
-	} else {
-		u16 xoff, yoff;
-		u8 v_subsample, h_subsample;
-
-		sde_mdp_get_v_h_subsample_rate(fmt->chroma_sample,
-			&v_subsample, &h_subsample);
-
-		xoff = x / h_subsample;
-		yoff = y / v_subsample;
-
-		data->p[0].addr += x;
-		data->p[1].addr += xoff + (yoff * ps->ystride[1]);
-		if (data->num_planes == 2) /* pseudo planar */
-			data->p[1].addr += xoff;
-		else /* planar */
-			data->p[2].addr += xoff + (yoff * ps->ystride[2]);
-	}
-}
-
-static int sde_smmu_get_domain_type(u32 flags, bool rotator)
-{
-	int type;
-
-	if (flags & SDE_SECURE_OVERLAY_SESSION)
-		type = SDE_IOMMU_DOMAIN_ROT_SECURE;
-	else
-		type = SDE_IOMMU_DOMAIN_ROT_UNSECURE;
-
-	return type;
-}
-
-static int sde_mdp_is_map_needed(struct sde_mdp_img_data *data)
-{
-	if (data->flags & SDE_SECURE_CAMERA_SESSION)
-		return false;
-	return true;
-}
-
-static int sde_mdp_put_img(struct sde_mdp_img_data *data, bool rotator,
-		int dir)
-{
-	u32 domain;
-
-	if (data->flags & SDE_ROT_EXT_IOVA) {
-		SDEROT_DBG("buffer %pad/%lx is client mapped\n",
-				&data->addr, data->len);
-		return 0;
-	}
-
-	if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
-		SDEROT_DBG("ion hdl=%pK buf=0x%pa\n", data->srcp_dma_buf,
-							&data->addr);
-		if (sde_mdp_is_map_needed(data) && data->mapped) {
-			domain = sde_smmu_get_domain_type(data->flags,
-				rotator);
-			data->mapped = false;
-			SDEROT_DBG("unmap %pad/%lx d:%u f:%x\n", &data->addr,
-					data->len, domain, data->flags);
-		}
-		if (!data->skip_detach) {
-			data->srcp_attachment->dma_map_attrs |=
-				DMA_ATTR_DELAYED_UNMAP;
-			dma_buf_unmap_attachment(data->srcp_attachment,
-				data->srcp_table, dir);
-			dma_buf_detach(data->srcp_dma_buf,
-					data->srcp_attachment);
-			if (!(data->flags & SDE_ROT_EXT_DMA_BUF)) {
-				dma_buf_put(data->srcp_dma_buf);
-				data->srcp_dma_buf = NULL;
-			}
-			data->skip_detach = true;
-		}
-	} else {
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
-static int sde_mdp_get_img(struct sde_fb_data *img,
-		struct sde_mdp_img_data *data, struct device *dev,
-		bool rotator, int dir)
-{
-	int ret = -EINVAL;
-	u32 domain;
-
-	data->flags |= img->flags;
-	data->offset = img->offset;
-	if (data->flags & SDE_ROT_EXT_DMA_BUF) {
-		data->srcp_dma_buf = img->buffer;
-	} else if (data->flags & SDE_ROT_EXT_IOVA) {
-		data->addr = img->addr;
-		data->len = img->len;
-		SDEROT_DBG("use client %pad/%lx\n", &data->addr, data->len);
-		return 0;
-	} else if (IS_ERR(data->srcp_dma_buf)) {
-		SDEROT_ERR("error on ion_import_fd\n");
-		ret = PTR_ERR(data->srcp_dma_buf);
-		data->srcp_dma_buf = NULL;
-		return ret;
-	}
-
-	if (sde_mdp_is_map_needed(data)) {
-		domain = sde_smmu_get_domain_type(data->flags, rotator);
-
-		SDEROT_DBG("%d domain=%d ihndl=%pK\n",
-				__LINE__, domain, data->srcp_dma_buf);
-		data->srcp_attachment =
-			sde_smmu_dma_buf_attach(data->srcp_dma_buf, dev,
-					domain);
-		if (IS_ERR(data->srcp_attachment)) {
-			SDEROT_ERR("%d Failed to attach dma buf\n", __LINE__);
-			ret = PTR_ERR(data->srcp_attachment);
-			goto err_put;
-		}
-	} else {
-		data->srcp_attachment = dma_buf_attach(
-				data->srcp_dma_buf, dev);
-		if (IS_ERR(data->srcp_attachment)) {
-			SDEROT_ERR(
-				"Failed to attach dma buf for secure camera\n");
-			ret = PTR_ERR(data->srcp_attachment);
-			goto err_put;
-		}
-	}
-
-	SDEROT_DBG("%d attach=%pK\n", __LINE__, data->srcp_attachment);
-	data->addr = 0;
-	data->len = 0;
-	data->mapped = false;
-	data->skip_detach = false;
-	/* return early, mapping will be done later */
-
-	return 0;
-err_put:
-	if (!(data->flags & SDE_ROT_EXT_DMA_BUF)) {
-		dma_buf_put(data->srcp_dma_buf);
-		data->srcp_dma_buf = NULL;
-	}
-	return ret;
-}
-
-static int sde_mdp_map_buffer(struct sde_mdp_img_data *data, bool rotator,
-		int dir)
-{
-	int ret = -EINVAL;
-	struct scatterlist *sg;
-	struct sg_table *sgt = NULL;
-	unsigned int i;
-	unsigned long flags = 0;
-
-	if (data->addr && data->len)
-		return 0;
-
-	if (data->flags & SDE_ROT_EXT_IOVA) {
-		SDEROT_DBG("buffer %pad/%lx is client mapped\n",
-				&data->addr, data->len);
-		return 0;
-	}
-
-	if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
-		/*
-		 * dma_buf_map_attachment will call into
-		 * dma_map_sg_attrs, and so all cache maintenance
-		 * attribute and lazy unmap attribute will be all
-		 * provided here.
-		 */
-		data->srcp_attachment->dma_map_attrs |=
-			DMA_ATTR_DELAYED_UNMAP;
-
-		if (data->srcp_dma_buf && data->srcp_dma_buf->ops &&
-				data->srcp_dma_buf->ops->get_flags) {
-			if (data->srcp_dma_buf->ops->get_flags(
-						data->srcp_dma_buf,
-						&flags) == 0) {
-				if ((flags & ION_FLAG_CACHED) == 0) {
-					SDEROT_DBG("dmabuf is uncached type\n");
-					data->srcp_attachment->dma_map_attrs |=
-						DMA_ATTR_SKIP_CPU_SYNC;
-				}
-			}
-		}
-
-		sgt = dma_buf_map_attachment(
-				data->srcp_attachment, dir);
-		if (IS_ERR_OR_NULL(sgt) ||
-				IS_ERR_OR_NULL(sgt->sgl)) {
-			SDEROT_ERR("Failed to map attachment\n");
-			ret = PTR_ERR(sgt);
-			goto err_detach;
-		}
-		data->srcp_table = sgt;
-
-		data->len = 0;
-		for_each_sg(sgt->sgl, sg, sgt->nents, i) {
-			data->len += sg->length;
-		}
-
-		if (sde_mdp_is_map_needed(data)) {
-			data->addr = data->srcp_table->sgl->dma_address;
-			SDEROT_DBG("map %pad/%lx f:%x\n",
-					&data->addr,
-					data->len,
-					data->flags);
-			data->mapped = true;
-			ret = 0;
-		} else {
-			if (sgt->nents != 1) {
-				SDEROT_ERR(
-					"Fail ion buffer mapping for secure camera\n");
-				ret = -EINVAL;
-				goto err_unmap;
-			}
-
-			if (((uint64_t)sg_dma_address(sgt->sgl) >=
-					PHY_ADDR_4G - sgt->sgl->length)) {
-				SDEROT_ERR(
-					"ion buffer mapped size invalid, size=%d\n",
-					sgt->sgl->length);
-				ret = -EINVAL;
-				goto err_unmap;
-			}
-
-			data->addr = sg_phys(data->srcp_table->sgl);
-			ret = 0;
-		}
-	}
-
-	if (!data->addr) {
-		SDEROT_ERR("start address is zero!\n");
-		sde_mdp_put_img(data, rotator, dir);
-		return -ENOMEM;
-	}
-
-	if (!ret && (data->offset < data->len)) {
-		data->addr += data->offset;
-		data->len -= data->offset;
-
-		SDEROT_DBG("ihdl=%pK buf=0x%pa len=0x%lx\n",
-			 data->srcp_dma_buf, &data->addr, data->len);
-	} else {
-		sde_mdp_put_img(data, rotator, dir);
-		return ret ? : -EOVERFLOW;
-	}
-
-	return ret;
-
-err_unmap:
-	dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table, dir);
-err_detach:
-	dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
-	if (!(data->flags & SDE_ROT_EXT_DMA_BUF)) {
-		dma_buf_put(data->srcp_dma_buf);
-		data->srcp_dma_buf = NULL;
-	}
-	return ret;
-}
-
-static int sde_mdp_data_get(struct sde_mdp_data *data,
-		struct sde_fb_data *planes, int num_planes, u32 flags,
-		struct device *dev, bool rotator, int dir)
-{
-	int i, rc = 0;
-
-	if ((num_planes <= 0) || (num_planes > SDE_ROT_MAX_PLANES))
-		return -EINVAL;
-
-	for (i = 0; i < num_planes; i++) {
-		data->p[i].flags = flags;
-		rc = sde_mdp_get_img(&planes[i], &data->p[i], dev, rotator,
-				dir);
-		if (rc) {
-			SDEROT_ERR("failed to get buf p=%d flags=%x\n",
-					i, flags);
-			while (i > 0) {
-				i--;
-				sde_mdp_put_img(&data->p[i], rotator, dir);
-			}
-			break;
-		}
-	}
-
-	data->num_planes = i;
-
-	return rc;
-}
-
-int sde_mdp_data_map(struct sde_mdp_data *data, bool rotator, int dir)
-{
-	int i, rc = 0;
-
-	if (!data || !data->num_planes || data->num_planes > SDE_ROT_MAX_PLANES)
-		return -EINVAL;
-
-	for (i = 0; i < data->num_planes; i++) {
-		rc = sde_mdp_map_buffer(&data->p[i], rotator, dir);
-		if (rc) {
-			SDEROT_ERR("failed to map buf p=%d\n", i);
-			while (i > 0) {
-				i--;
-				sde_mdp_put_img(&data->p[i], rotator, dir);
-			}
-			break;
-		}
-	}
-	SDEROT_EVTLOG(data->num_planes, dir, data->p[0].addr, data->p[0].len,
-			data->p[0].mapped);
-
-	return rc;
-}
-
-void sde_mdp_data_free(struct sde_mdp_data *data, bool rotator, int dir)
-{
-	int i;
-
-	sde_smmu_ctrl(1);
-	for (i = 0; i < data->num_planes && data->p[i].len; i++)
-		sde_mdp_put_img(&data->p[i], rotator, dir);
-	sde_smmu_ctrl(0);
-
-	data->num_planes = 0;
-}
-
-int sde_mdp_data_get_and_validate_size(struct sde_mdp_data *data,
-	struct sde_fb_data *planes, int num_planes, u32 flags,
-	struct device *dev, bool rotator, int dir,
-	struct sde_layer_buffer *buffer)
-{
-	struct sde_mdp_format_params *fmt;
-	struct sde_mdp_plane_sizes ps;
-	int ret, i;
-	unsigned long total_buf_len = 0;
-
-	fmt = sde_get_format_params(buffer->format);
-	if (!fmt) {
-		SDEROT_ERR("Format %d not supported\n", buffer->format);
-		return -EINVAL;
-	}
-
-	ret = sde_mdp_data_get(data, planes, num_planes,
-		flags, dev, rotator, dir);
-	if (ret)
-		return ret;
-
-	sde_mdp_get_plane_sizes(fmt, buffer->width, buffer->height, &ps, 0, 0);
-
-	for (i = 0; i < num_planes ; i++) {
-		unsigned long plane_len = (data->p[i].srcp_dma_buf) ?
-				data->p[i].srcp_dma_buf->size : data->p[i].len;
-
-		if (plane_len < planes[i].offset) {
-			SDEROT_ERR("Offset=%d larger than buffer size=%lu\n",
-				planes[i].offset, plane_len);
-			ret = -EINVAL;
-			goto buf_too_small;
-		}
-		total_buf_len += plane_len - planes[i].offset;
-	}
-
-	if (total_buf_len < ps.total_size) {
-		SDEROT_ERR("Buffer size=%lu, expected size=%d\n",
-				total_buf_len,
-			ps.total_size);
-		ret = -EINVAL;
-		goto buf_too_small;
-	}
-	return 0;
-
-buf_too_small:
-	sde_mdp_data_free(data, rotator, dir);
-	return ret;
-}
-
-static struct sg_table *sde_rot_dmabuf_map_tiny(
-		struct dma_buf_attachment *attach, enum dma_data_direction dir)
-{
-	struct sde_mdp_img_data *data = attach->dmabuf->priv;
-	struct sg_table *sgt;
-	unsigned int order;
-	struct page *p;
-
-	if (!data) {
-		SDEROT_ERR("NULL img data\n");
-		return NULL;
-	}
-
-	if (data->len > PAGE_SIZE) {
-		SDEROT_ERR("DMA buffer size is larger than %ld, bufsize:%ld\n",
-				PAGE_SIZE, data->len);
-		return NULL;
-	}
-
-	order = get_order(data->len);
-	p = alloc_pages(GFP_KERNEL, order);
-	if (!p) {
-		SDEROT_ERR("Fail allocating page for datasize:%ld\n",
-				data->len);
-		return NULL;
-	}
-
-	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
-	if (!sgt)
-		goto free_alloc_pages;
-
-	/* only alloc a single page */
-	if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
-		SDEROT_ERR("fail sg_alloc_table\n");
-		goto free_sgt;
-	}
-
-	sg_set_page(sgt->sgl, p, data->len, 0);
-
-	if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
-		SDEROT_ERR("fail dma_map_sg\n");
-		goto free_table;
-	}
-
-	SDEROT_DBG("Successful generate sg_table:%pK datalen:%ld\n",
-			sgt, data->len);
-	return sgt;
-
-free_table:
-	sg_free_table(sgt);
-free_sgt:
-	kfree(sgt);
-free_alloc_pages:
-	__free_pages(p, order);
-	return NULL;
-}
-
-static void sde_rot_dmabuf_unmap(struct dma_buf_attachment *attach,
-			struct sg_table *sgt, enum dma_data_direction dir)
-{
-	struct scatterlist *sg;
-	int i;
-
-	SDEROT_DBG("DMABUF unmap, sgt:%pK\n", sgt);
-	dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
-
-	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
-		put_page(sg_page(sg));
-		__free_page(sg_page(sg));
-	}
-
-	sg_free_table(sgt);
-	kfree(sgt);
-}
-
-static void *sde_rot_dmabuf_no_map(struct dma_buf *buf, unsigned long n)
-{
-	SDEROT_WARN("NOT SUPPORTING dmabuf map\n");
-	return NULL;
-}
-
-static void sde_rot_dmabuf_no_unmap(struct dma_buf *buf, unsigned long n,
-		void *addr)
-{
-	SDEROT_WARN("NOT SUPPORTING dmabuf unmap\n");
-}
-
-static void sde_rot_dmabuf_release(struct dma_buf *buf)
-{
-	SDEROT_DBG("Release dmabuf:%pK\n", buf);
-}
-
-static int sde_rot_dmabuf_no_mmap(struct dma_buf *buf,
-		struct vm_area_struct *vma)
-{
-	SDEROT_WARN("NOT SUPPORTING dmabuf mmap\n");
-	return -EINVAL;
-}
-
-static const struct dma_buf_ops sde_rot_dmabuf_ops = {
-	.map_dma_buf	= sde_rot_dmabuf_map_tiny,
-	.unmap_dma_buf	= sde_rot_dmabuf_unmap,
-	.release	= sde_rot_dmabuf_release,
-	.map		= sde_rot_dmabuf_no_map,
-	.unmap		= sde_rot_dmabuf_no_unmap,
-	.mmap		= sde_rot_dmabuf_no_mmap,
-};
-
-struct dma_buf *sde_rot_get_dmabuf(struct sde_mdp_img_data *data)
-{
-	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
-	exp_info.ops = &sde_rot_dmabuf_ops;
-	exp_info.size = (size_t)data->len;
-	exp_info.flags = O_RDWR;
-	exp_info.priv = data;
-
-	return dma_buf_export(&exp_info);
-}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h
deleted file mode 100644
index fc10f64..0000000
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __SDE_ROTATOR_UTIL_H__
-#define __SDE_ROTATOR_UTIL_H__
-
-#include <linux/types.h>
-#include <linux/file.h>
-#include <linux/kref.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/dma-buf.h>
-
-#include "sde_rotator_hwio.h"
-#include "sde_rotator_base.h"
-#include "sde_rotator_sync.h"
-#include "sde_rotator_io_util.h"
-#include "sde_rotator_formats.h"
-
-#define SDE_ROT_MAX_IMG_WIDTH		0x3FFF
-#define SDE_ROT_MAX_IMG_HEIGHT		0x3FFF
-
-#define SDEROT_DBG(fmt, ...)	pr_debug("<SDEROT_DBG> " fmt, ##__VA_ARGS__)
-#define SDEROT_INFO(fmt, ...)	pr_info("<SDEROT_INFO> " fmt, ##__VA_ARGS__)
-#define SDEROT_INFO_ONCE(fmt, ...)  \
-	pr_info_once("<SDEROT_INFO> " fmt, ##__VA_ARGS__)
-#define SDEROT_WARN(fmt, ...)	pr_warn("<SDEROT_WARN> " fmt, ##__VA_ARGS__)
-#define SDEROT_ERR(fmt, ...)	pr_err("<SDEROT_ERR> " fmt, ##__VA_ARGS__)
-#define SDEDEV_DBG(dev, fmt, ...)	\
-	dev_dbg(dev, "<SDEROT_DBG> " fmt, ##__VA_ARGS__)
-#define SDEDEV_INFO(dev, fmt, ...)	\
-	dev_info(dev, "<SDEROT_INFO> " fmt, ##__VA_ARGS__)
-#define SDEDEV_WARN(dev, fmt, ...)	\
-	dev_warn(dev, "<SDEROT_WARN> " fmt, ##__VA_ARGS__)
-#define SDEDEV_ERR(dev, fmt, ...)	\
-	dev_err(dev, "<SDEROT_ERR> " fmt, ##__VA_ARGS__)
-
-#define PHY_ADDR_4G (1ULL<<32)
-
-struct sde_rect {
-	u16 x;
-	u16 y;
-	u16 w;
-	u16 h;
-};
-
-/* sde flag values */
-#define SDE_ROT_NOP			0
-#define SDE_FLIP_LR			0x1
-#define SDE_FLIP_UD			0x2
-#define SDE_ROT_90			0x4
-#define SDE_ROT_180			(SDE_FLIP_UD|SDE_FLIP_LR)
-#define SDE_ROT_270			(SDE_ROT_90|SDE_FLIP_UD|SDE_FLIP_LR)
-#define SDE_DEINTERLACE			0x80000000
-#define SDE_SOURCE_ROTATED_90		0x00100000
-#define SDE_SECURE_OVERLAY_SESSION	0x00008000
-#define SDE_ROT_EXT_DMA_BUF		0x00010000
-#define SDE_SECURE_CAMERA_SESSION	0x00020000
-#define SDE_ROT_EXT_IOVA			0x00040000
-
-struct sde_rot_data_type;
-
-struct sde_fb_data {
-	uint32_t offset;
-	struct dma_buf *buffer;
-	int memory_id;
-	int id;
-	uint32_t flags;
-	uint32_t priv;
-	dma_addr_t addr;
-	u32 len;
-};
-
-struct sde_layer_plane {
-	/* DMA buffer file descriptor information. */
-	int fd;
-	struct dma_buf *buffer;
-
-	/* i/o virtual address & length */
-	dma_addr_t addr;
-	u32 len;
-
-	/* Pixel offset in the dma buffer. */
-	uint32_t offset;
-
-	/* Number of bytes in one scan line including padding bytes. */
-	uint32_t stride;
-};
-
-struct sde_layer_buffer {
-	/* layer width in pixels. */
-	uint32_t width;
-
-	/* layer height in pixels. */
-	uint32_t height;
-
-	/*
-	 * layer format in DRM-style fourcc, refer drm_fourcc.h for
-	 * standard formats
-	 */
-	uint32_t format;
-
-	/* plane to hold the fd, offset, etc for all color components */
-	struct sde_layer_plane planes[SDE_ROT_MAX_PLANES];
-
-	/* valid planes count in layer planes list */
-	uint32_t plane_count;
-
-	/* compression ratio factor, value depends on the pixel format */
-	struct sde_mult_factor comp_ratio;
-
-	/*
-	 * SyncFence associated with this buffer. It is used in two ways.
-	 *
-	 * 1. Driver waits to consume the buffer till producer signals in case
-	 * of primary and external display.
-	 *
-	 * 2. Writeback device uses buffer structure for output buffer where
-	 * driver is producer. However, client sends the fence with buffer to
-	 * indicate that consumer is still using the buffer and it is not ready
-	 * for new content.
-	 */
-	struct sde_rot_sync_fence *fence;
-
-	/* indicate if this is a stream (inline) buffer */
-	bool sbuf;
-
-	/* specify the system cache id in stream buffer mode */
-	int scid;
-
-	/* indicate if system cache writeback is required */
-	bool writeback;
-};
-
-struct sde_mdp_plane_sizes {
-	u32 num_planes;
-	u32 plane_size[SDE_ROT_MAX_PLANES];
-	u32 total_size;
-	u32 ystride[SDE_ROT_MAX_PLANES];
-	u32 rau_cnt;
-	u32 rau_h[2];
-};
-
-struct sde_mdp_img_data {
-	dma_addr_t addr;
-	unsigned long len;
-	u32 offset;
-	u32 flags;
-	bool mapped;
-	bool skip_detach;
-	struct fd srcp_f;
-	struct dma_buf *srcp_dma_buf;
-	struct dma_buf_attachment *srcp_attachment;
-	struct sg_table *srcp_table;
-};
-
-struct sde_mdp_data {
-	u8 num_planes;
-	struct sde_mdp_img_data p[SDE_ROT_MAX_PLANES];
-	bool sbuf;
-	int scid;
-	bool writeback;
-};
-
-void sde_mdp_get_v_h_subsample_rate(u8 chroma_sample,
-		u8 *v_sample, u8 *h_sample);
-
-static inline u32 sde_mdp_general_align(u32 data, u32 alignment)
-{
-	return ((data + alignment - 1)/alignment) * alignment;
-}
-
-void sde_rot_data_calc_offset(struct sde_mdp_data *data, u16 x, u16 y,
-	struct sde_mdp_plane_sizes *ps, struct sde_mdp_format_params *fmt);
-
-int sde_validate_offset_for_ubwc_format(
-	struct sde_mdp_format_params *fmt, u16 x, u16 y);
-
-int sde_mdp_data_get_and_validate_size(struct sde_mdp_data *data,
-	struct sde_fb_data *planes, int num_planes, u32 flags,
-	struct device *dev, bool rotator, int dir,
-	struct sde_layer_buffer *buffer);
-
-int sde_mdp_get_plane_sizes(struct sde_mdp_format_params *fmt, u32 w, u32 h,
-	struct sde_mdp_plane_sizes *ps, u32 bwc_mode,
-	bool rotation);
-
-int sde_mdp_data_map(struct sde_mdp_data *data, bool rotator, int dir);
-
-int sde_mdp_data_check(struct sde_mdp_data *data,
-			struct sde_mdp_plane_sizes *ps,
-			struct sde_mdp_format_params *fmt);
-
-void sde_mdp_data_free(struct sde_mdp_data *data, bool rotator, int dir);
-
-struct dma_buf *sde_rot_get_dmabuf(struct sde_mdp_img_data *data);
-#endif /* __SDE_ROTATOR_UTIL_H__ */
diff --git a/drivers/media/platform/msm/synx/Makefile b/drivers/media/platform/msm/synx/Makefile
index 946211e..2767c1f 100644
--- a/drivers/media/platform/msm/synx/Makefile
+++ b/drivers/media/platform/msm/synx/Makefile
@@ -1,4 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-ccflags-$(CONFIG_SPECTRA_CAMERA) += -Idrivers/media/platform/msm/camera/cam_sync
 obj-$(CONFIG_MSM_GLOBAL_SYNX) += synx.o synx_util.o synx_debugfs.o
diff --git a/drivers/media/platform/msm/synx/synx.c b/drivers/media/platform/msm/synx/synx.c
index da8267a..5bc6640 100644
--- a/drivers/media/platform/msm/synx/synx.c
+++ b/drivers/media/platform/msm/synx/synx.c
@@ -10,10 +10,6 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
-#ifdef CONFIG_SPECTRA_CAMERA
-#include <cam_sync_api.h>
-#endif
-
 #include "synx_api.h"
 #include "synx_util.h"
 #include "synx_debugfs.h"
@@ -97,9 +93,15 @@ int synx_create(s32 *synx_obj, const char *name)
 
 	/* global synx id */
 	id = synx_create_handle(synx_dev->synx_table + idx);
+	if (id < 0) {
+		pr_err("unable to allocate the synx handle\n");
+		clear_bit(idx, synx_dev->bitmap);
+		return -EINVAL;
+	}
+
 	rc = synx_init_object(synx_dev->synx_table,
 			idx, id, name, &synx_fence_ops);
-	if (rc) {
+	if (rc < 0) {
 		pr_err("unable to init row at idx = %ld\n", idx);
 		clear_bit(idx, synx_dev->bitmap);
 		return -EINVAL;
@@ -221,7 +223,6 @@ int synx_signal_core(struct synx_table_row *row, u32 status)
 	int rc, ret;
 	u32 i = 0;
 	u32 idx = 0;
-	u32 type;
 	s32 sync_id;
 	struct synx_external_data *data = NULL;
 	struct synx_bind_desc bind_descs[SYNX_MAX_NUM_BINDINGS];
@@ -301,46 +302,42 @@ int synx_signal_core(struct synx_table_row *row, u32 status)
 	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
 
 	for (i = 0; i < idx; i++) {
-		type = bind_descs[i].external_desc.type;
 		sync_id = bind_descs[i].external_desc.id[0];
 		data = bind_descs[i].external_data;
-		if (is_valid_type(type)) {
-			bind_ops = &synx_dev->bind_vtbl[type];
-			if (!bind_ops->deregister_callback ||
-				!bind_ops->signal) {
-				pr_err("invalid bind ops for %u\n", type);
-				kfree(data);
+		bind_ops = synx_get_bind_ops(
+					bind_descs[i].external_desc.type);
+		if (!bind_ops) {
+			pr_err("invalid bind ops for %u\n",
+				bind_descs[i].external_desc.type);
+			kfree(data);
+			continue;
+		}
+		/*
+		 * we are already signaled, so don't want to
+		 * recursively be signaled
+		 */
+		ret = bind_ops->deregister_callback(
+				synx_external_callback, data, sync_id);
+		if (ret < 0)
+			pr_err("de-registration fail on sync: %d, err: %d\n",
+				sync_id, ret);
+		pr_debug("signaling external sync: %d, status: %u\n",
+			sync_id, status);
+		/* optional function to enable external signaling */
+		if (bind_ops->enable_signaling) {
+			ret = bind_ops->enable_signaling(sync_id);
+			if (ret < 0) {
+				pr_err("enable signaling fail on sync: %d, err: %d\n",
+					sync_id, ret);
 				continue;
 			}
-			/*
-			 * we are already signaled, so don't want to
-			 * recursively be signaled
-			 */
-			ret = bind_ops->deregister_callback(
-					synx_external_callback, data, sync_id);
-			if (ret < 0)
-				pr_err("de-registration fail on sync: %d, err: %d\n",
-					sync_id, ret);
-			pr_debug("signaling external sync: %d, status: %u\n",
-				sync_id, status);
-			/* optional function to enable external signaling */
-			if (bind_ops->enable_signaling) {
-				ret = bind_ops->enable_signaling(sync_id);
-				if (ret < 0) {
-					pr_err("enable signaling fail on sync: %d, err: %d\n",
-						sync_id, ret);
-					continue;
-				}
-			}
-
-			ret = bind_ops->signal(sync_id, status);
-			if (ret < 0)
-				pr_err("signaling fail on sync: %d, err: %d\n",
-					sync_id, ret);
-		} else {
-			pr_warn("unimplemented external type: %u\n", type);
 		}
 
+		ret = bind_ops->signal(sync_id, status);
+		if (ret < 0)
+			pr_err("signaling fail on sync: %d, err: %d\n",
+				sync_id, ret);
+
 		/*
 		 * release the memory allocated for external data.
 		 * It is safe to release this memory as external cb
@@ -530,8 +527,10 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync)
 		return -EINVAL;
 	}
 
-	if (!is_valid_type(external_sync.type)) {
-		pr_err("invalid external sync object\n");
+	bind_ops = synx_get_bind_ops(external_sync.type);
+	if (!bind_ops) {
+		pr_err("invalid bind ops for %u\n",
+			external_sync.type);
 		return -EINVAL;
 	}
 
@@ -568,15 +567,6 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync)
 		}
 	}
 
-	bind_ops = &synx_dev->bind_vtbl[external_sync.type];
-	if (!bind_ops->register_callback) {
-		pr_err("invalid bind register for %u\n",
-			external_sync.type);
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
-		kfree(data);
-		return -EINVAL;
-	}
-
 	/* data passed to external callback */
 	data->synx_obj = row->synx_obj;
 	data->secure_key = synx_generate_secure_key(row);
@@ -1391,39 +1381,60 @@ static const struct file_operations synx_fops = {
 #endif
 };
 
-#ifdef CONFIG_SPECTRA_CAMERA
-static void synx_bind_ops_csl_type(struct bind_operations *vtbl)
+int synx_register_ops(const struct synx_register_params *params)
 {
-	if (!vtbl)
-		return;
+	s32 rc;
+	struct synx_registered_ops *client_ops;
 
-	vtbl->register_callback = cam_sync_register_callback;
-	vtbl->deregister_callback = cam_sync_deregister_callback;
-	vtbl->enable_signaling = cam_sync_get_obj_ref;
-	vtbl->signal = cam_sync_signal;
-
-	pr_debug("csl bind functionality set\n");
-}
-#else
-static void synx_bind_ops_csl_type(struct bind_operations *vtbl)
-{
-	pr_debug("csl bind functionality not available\n");
-}
-#endif
-
-static void synx_bind_ops_register(struct synx_device *synx_dev)
-{
-	u32 i;
-
-	for (i = 0; i < SYNX_MAX_BIND_TYPES; i++) {
-		switch (i) {
-		case SYNX_TYPE_CSL:
-			synx_bind_ops_csl_type(&synx_dev->bind_vtbl[i]);
-			break;
-		default:
-			pr_err("invalid external sync type\n");
-		}
+	if (!params || !params->name ||
+		!is_valid_type(params->type) ||
+		!params->ops.register_callback ||
+		!params->ops.deregister_callback ||
+		!params->ops.signal) {
+		pr_err("invalid register params\n");
+		return -EINVAL;
 	}
+
+	mutex_lock(&synx_dev->table_lock);
+	client_ops = &synx_dev->bind_vtbl[params->type];
+	if (!client_ops->valid) {
+		client_ops->valid = true;
+		memcpy(&client_ops->ops, &params->ops,
+			sizeof(client_ops->ops));
+		strlcpy(client_ops->name, params->name,
+			sizeof(client_ops->name));
+		client_ops->type = params->type;
+		pr_info("registered bind ops for %s\n",
+			params->name);
+		rc = 0;
+	} else {
+		pr_info("client already registered by %s\n",
+			client_ops->name);
+		rc = -EINVAL;
+	}
+	mutex_unlock(&synx_dev->table_lock);
+
+	return rc;
+}
+
+int synx_deregister_ops(const struct synx_register_params *params)
+{
+	struct synx_registered_ops *client_ops;
+
+	if (!params || !params->name ||
+		!is_valid_type(params->type)) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&synx_dev->table_lock);
+	client_ops = &synx_dev->bind_vtbl[params->type];
+	memset(client_ops, 0, sizeof(*client_ops));
+	pr_info("deregistered bind ops for %s\n",
+		params->name);
+	mutex_unlock(&synx_dev->table_lock);
+
+	return 0;
 }
 
 static int __init synx_init(void)
@@ -1443,6 +1454,7 @@ static int __init synx_init(void)
 		spin_lock_init(&synx_dev->row_spinlocks[idx]);
 
 	idr_init(&synx_dev->synx_ids);
+	spin_lock_init(&synx_dev->idr_lock);
 
 	rc = alloc_chrdev_region(&synx_dev->dev, 0, 1, SYNX_DEVICE_NAME);
 	if (rc < 0) {
@@ -1480,9 +1492,6 @@ static int __init synx_init(void)
 	synx_dev->dma_context = dma_fence_context_alloc(1);
 
 	synx_dev->debugfs_root = init_synx_debug_dir(synx_dev);
-
-	synx_bind_ops_register(synx_dev);
-
 	pr_info("synx device init success\n");
 
 	return 0;
diff --git a/drivers/media/platform/msm/synx/synx_api.h b/drivers/media/platform/msm/synx/synx_api.h
index 579f542..978a561 100644
--- a/drivers/media/platform/msm/synx/synx_api.h
+++ b/drivers/media/platform/msm/synx/synx_api.h
@@ -11,8 +11,65 @@
 
 typedef void (*synx_callback)(s32 sync_obj, int status, void *data);
 
+/**
+ * struct bind_operations - Function pointers that need to be defined
+ *    to achieve bind functionality for external fence with synx obj
+ *
+ * @register_callback   : Function to register with external sync object
+ * @deregister_callback : Function to deregister with external sync object
+ * @enable_signaling    : Function to enable the signaling on the external
+ *                        sync object (optional)
+ * @signal              : Function to signal the external sync object
+ */
+struct bind_operations {
+	int (*register_callback)(synx_callback cb_func,
+		void *userdata, s32 sync_obj);
+	int (*deregister_callback)(synx_callback cb_func,
+		void *userdata, s32 sync_obj);
+	int (*enable_signaling)(s32 sync_obj);
+	int (*signal)(s32 sync_obj, u32 status);
+};
+
+/**
+ * struct synx_register_params - External registration parameters
+ *
+ * @ops  : Pointer to bind operations struct
+ * @name : Client name
+ *         Only first 32 bytes are accepted, rest will be ignored
+ * @type : Synx external client type
+ */
+struct synx_register_params {
+	struct bind_operations ops;
+	char *name;
+	u32 type;
+};
+
 /* Kernel APIs */
 
+/* @brief: Register operations for external synchronization
+ *
+ * Register with synx for enabling external synchronization through bind
+ *
+ * @param params : Pointer to register params
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if params are invalid.
+ * -ENOMEM will be returned if client cannot be registered due to not
+ * enough memory.
+ * -EALREADY will be returned if client name is already in use
+ */
+int synx_register_ops(const struct synx_register_params *params);
+
+/**
+ * @brief: De-register external synchronization operations
+ *
+ * @param params : Pointer to register params
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if client not found.
+ */
+int synx_deregister_ops(const struct synx_register_params *params);
+
 /**
  * @brief: Creates a synx object
  *
diff --git a/drivers/media/platform/msm/synx/synx_private.h b/drivers/media/platform/msm/synx/synx_private.h
index 0bb2f5e..4a4f287 100644
--- a/drivers/media/platform/msm/synx/synx_private.h
+++ b/drivers/media/platform/msm/synx/synx_private.h
@@ -140,22 +140,19 @@ struct synx_table_row {
 };
 
 /**
- * struct bind_operations - Function pointers that need to be defined
- *    to achieve bind functionality for external fence with synx obj
+ * struct synx_registered_ops - External sync clients registered for bind
+ * operations with synx driver
  *
- * @register_callback   : Function to register with external sync object
- * @deregister_callback : Function to deregister with external sync object
- * @enable_signaling    : Function to enable the signaling on the external
- *                        sync object (optional)
- * @signal              : Function to signal the external sync object
+ * @valid : Validity of the client registered bind ops
+ * @name  : Name of the external sync client
+ * @ops   : Bind operations struct for the client
+ * @type  : External client type
  */
-struct bind_operations {
-	int (*register_callback)(synx_callback cb_func,
-		void *userdata, s32 sync_obj);
-	int (*deregister_callback)(synx_callback cb_func,
-		void *userdata, s32 sync_obj);
-	int (*enable_signaling)(s32 sync_obj);
-	int (*signal)(s32 sync_obj, u32 status);
+struct synx_registered_ops {
+	bool valid;
+	char name[32];
+	struct bind_operations ops;
+	u32 type;
 };
 
 /**
@@ -171,8 +168,9 @@ struct bind_operations {
  * @work_queue    : Work queue used for dispatching kernel callbacks
  * @bitmap        : Bitmap representation of all synx objects
  * synx_ids       : Global unique ids
+ * idr_lock       : Spin lock for id allocation
  * dma_context    : dma context id
- * bind_vtbl      : Table with bind ops for supported external sync objects
+ * bind_vtbl      : Table with registered bind ops for external sync (bind)
  * client_list    : All the synx clients
  * debugfs_root   : Root directory for debugfs
  * synx_node_head : list head for synx nodes
@@ -189,8 +187,9 @@ struct synx_device {
 	struct workqueue_struct *work_queue;
 	DECLARE_BITMAP(bitmap, SYNX_MAX_OBJS);
 	struct idr synx_ids;
+	spinlock_t idr_lock;
 	u64 dma_context;
-	struct bind_operations bind_vtbl[SYNX_MAX_BIND_TYPES];
+	struct synx_registered_ops bind_vtbl[SYNX_MAX_BIND_TYPES];
 	struct list_head client_list;
 	struct dentry *debugfs_root;
 	struct list_head synx_debug_head;
diff --git a/drivers/media/platform/msm/synx/synx_util.c b/drivers/media/platform/msm/synx/synx_util.c
index 38955ff..98d30ab 100644
--- a/drivers/media/platform/msm/synx/synx_util.c
+++ b/drivers/media/platform/msm/synx/synx_util.c
@@ -143,17 +143,20 @@ int synx_deinit_object(struct synx_table_row *row)
 	struct synx_callback_info *synx_cb, *temp_cb;
 	struct synx_cb_data  *upayload_info, *temp_upayload;
 
-	if (!row)
+	if (!row || !synx_dev)
 		return -EINVAL;
 
 	synx_obj = row->synx_obj;
 
+	spin_lock_bh(&synx_dev->idr_lock);
 	if ((struct synx_table_row *)idr_remove(&synx_dev->synx_ids,
 			row->synx_obj) != row) {
 		pr_err("removing data in idr table failed 0x%x\n",
 			row->synx_obj);
+		spin_unlock_bh(&synx_dev->idr_lock);
 		return -EINVAL;
 	}
+	spin_unlock_bh(&synx_dev->idr_lock);
 
 	/*
 	 * release the fence memory only for individual obj.
@@ -488,9 +491,15 @@ u32 synx_status_locked(struct synx_table_row *row)
 void *synx_from_handle(s32 synx_obj)
 {
 	s32 base;
-	struct synx_table_row *row =
-		(struct synx_table_row *) idr_find(&synx_dev->synx_ids,
+	struct synx_table_row *row;
+
+	if (!synx_dev)
+		return NULL;
+
+	spin_lock_bh(&synx_dev->idr_lock);
+	row = (struct synx_table_row *) idr_find(&synx_dev->synx_ids,
 		synx_obj);
+	spin_unlock_bh(&synx_dev->idr_lock);
 
 	if (!row) {
 		pr_err(
@@ -512,8 +521,15 @@ void *synx_from_handle(s32 synx_obj)
 s32 synx_create_handle(void *pObj)
 {
 	s32 base = current->tgid << 16;
-	s32 id = idr_alloc(&synx_dev->synx_ids, pObj,
-					base, base + 0x10000, GFP_ATOMIC);
+	s32 id;
+
+	if (!synx_dev)
+		return -EINVAL;
+
+	spin_lock_bh(&synx_dev->idr_lock);
+	id = idr_alloc(&synx_dev->synx_ids, pObj,
+			base, base + 0x10000, GFP_ATOMIC);
+	spin_unlock_bh(&synx_dev->idr_lock);
 
 	pr_debug("generated Id: 0x%x, base: 0x%x, client: 0x%x\n",
 		id, base, current->tgid);
@@ -567,11 +583,17 @@ void *synx_from_key(s32 id, u32 secure_key)
 {
 	struct synx_table_row *row = NULL;
 
+	if (!synx_dev)
+		return NULL;
+
+	spin_lock_bh(&synx_dev->idr_lock);
 	row = (struct synx_table_row *) idr_find(&synx_dev->synx_ids, id);
 	if (!row) {
 		pr_err("invalid synx obj 0x%x\n", id);
+		spin_unlock_bh(&synx_dev->idr_lock);
 		return NULL;
 	}
+	spin_unlock_bh(&synx_dev->idr_lock);
 
 	if (row->secure_key != secure_key)
 		row = NULL;
@@ -579,6 +601,25 @@ void *synx_from_key(s32 id, u32 secure_key)
 	return row;
 }
 
+struct bind_operations *synx_get_bind_ops(u32 type)
+{
+	struct synx_registered_ops *client_ops;
+
+	if (!is_valid_type(type))
+		return NULL;
+
+	mutex_lock(&synx_dev->table_lock);
+	client_ops = &synx_dev->bind_vtbl[type];
+	if (!client_ops->valid) {
+		mutex_unlock(&synx_dev->table_lock);
+		return NULL;
+	}
+	pr_debug("found bind ops for %s\n", client_ops->name);
+	mutex_unlock(&synx_dev->table_lock);
+
+	return &client_ops->ops;
+}
+
 void generate_timestamp(char *timestamp, size_t size)
 {
 	struct timeval tv;
diff --git a/drivers/media/platform/msm/synx/synx_util.h b/drivers/media/platform/msm/synx/synx_util.h
index 4b4029a..9af46f5 100644
--- a/drivers/media/platform/msm/synx/synx_util.h
+++ b/drivers/media/platform/msm/synx/synx_util.h
@@ -208,6 +208,16 @@ void *synx_from_handle(s32 synx_id);
 s32 synx_create_handle(void *pObj);
 
 /**
+ * @brief: Function to retrieve the bind ops for external sync
+ *
+ * @param type : External sync type
+ *
+ * @return Bind operations registered by external sync.
+ * NULL otherwise.
+ */
+struct bind_operations *synx_get_bind_ops(u32 type);
+
+/**
  * @brief: Function to generate a secure key for authentication
  *         Used to verify the requests generated on synx objects
  *         not owned by the process.
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index 7215641..d386822 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -808,6 +808,9 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
 
 	sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
 	if (!sd_fmt) {
+		if (!dcmi->num_of_sd_formats)
+			return -ENODATA;
+
 		sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
 		pix->pixelformat = sd_fmt->fourcc;
 	}
@@ -986,6 +989,9 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
 
 	sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
 	if (!sd_fmt) {
+		if (!dcmi->num_of_sd_formats)
+			return -ENODATA;
+
 		sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
 		pix->pixelformat = sd_fmt->fourcc;
 	}
@@ -1645,7 +1651,7 @@ static int dcmi_probe(struct platform_device *pdev)
 	dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
 	if (IS_ERR(dcmi->rstc)) {
 		dev_err(&pdev->dev, "Could not get reset control\n");
-		return -ENODEV;
+		return PTR_ERR(dcmi->rstc);
 	}
 
 	/* Get bus characteristics from devicetree */
@@ -1660,7 +1666,7 @@ static int dcmi_probe(struct platform_device *pdev)
 	of_node_put(np);
 	if (ret) {
 		dev_err(&pdev->dev, "Could not parse the endpoint\n");
-		return -ENODEV;
+		return ret;
 	}
 
 	if (ep.bus_type == V4L2_MBUS_CSI2) {
@@ -1673,8 +1679,9 @@ static int dcmi_probe(struct platform_device *pdev)
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq <= 0) {
-		dev_err(&pdev->dev, "Could not get irq\n");
-		return -ENODEV;
+		if (irq != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Could not get irq\n");
+		return irq;
 	}
 
 	dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1694,12 +1701,13 @@ static int dcmi_probe(struct platform_device *pdev)
 					dev_name(&pdev->dev), dcmi);
 	if (ret) {
 		dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
-		return -ENODEV;
+		return ret;
 	}
 
 	mclk = devm_clk_get(&pdev->dev, "mclk");
 	if (IS_ERR(mclk)) {
-		dev_err(&pdev->dev, "Unable to get mclk\n");
+		if (PTR_ERR(mclk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get mclk\n");
 		return PTR_ERR(mclk);
 	}
 
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index c01e1592..c8ffe7b 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -365,9 +365,14 @@ static int video_mux_probe(struct platform_device *pdev)
 	vmux->active = -1;
 	vmux->pads = devm_kcalloc(dev, num_pads, sizeof(*vmux->pads),
 				  GFP_KERNEL);
+	if (!vmux->pads)
+		return -ENOMEM;
+
 	vmux->format_mbus = devm_kcalloc(dev, num_pads,
 					 sizeof(*vmux->format_mbus),
 					 GFP_KERNEL);
+	if (!vmux->format_mbus)
+		return -ENOMEM;
 
 	for (i = 0; i < num_pads; i++) {
 		vmux->pads[i].flags = (i < num_pads - 1) ? MEDIA_PAD_FL_SINK
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index 9246f26..27db883 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -303,6 +303,8 @@ static int vimc_probe(struct platform_device *pdev)
 
 	dev_dbg(&pdev->dev, "probe");
 
+	memset(&vimc->mdev, 0, sizeof(vimc->mdev));
+
 	/* Create platform_device for each entity in the topology*/
 	vimc->subdevs = devm_kcalloc(&vimc->pdev.dev, vimc->pipe_cfg->num_ents,
 				     sizeof(*vimc->subdevs), GFP_KERNEL);
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
index fcc897f..392754c 100644
--- a/drivers/media/platform/vimc/vimc-streamer.c
+++ b/drivers/media/platform/vimc/vimc-streamer.c
@@ -120,7 +120,6 @@ static int vimc_streamer_thread(void *data)
 	int i;
 
 	set_freezable();
-	set_current_state(TASK_UNINTERRUPTIBLE);
 
 	for (;;) {
 		try_to_freeze();
@@ -137,6 +136,7 @@ static int vimc_streamer_thread(void *data)
 				break;
 		}
 		//wait for 60hz
+		set_current_state(TASK_UNINTERRUPTIBLE);
 		schedule_timeout(HZ / 60);
 	}
 
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index baa7c83..3b09ffce 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -992,7 +992,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
 		v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
 		if (dev->bitmap_cap && (compose->width != s->r.width ||
 					compose->height != s->r.height)) {
-			kfree(dev->bitmap_cap);
+			vfree(dev->bitmap_cap);
 			dev->bitmap_cap = NULL;
 		}
 		*compose = s->r;
diff --git a/drivers/media/radio/rtc6226/radio-rtc6226-common.c b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
index a442eb5..15cf8c8 100644
--- a/drivers/media/radio/rtc6226/radio-rtc6226-common.c
+++ b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
@@ -612,7 +612,6 @@ int rtc6226_stop(struct rtc6226_device *radio)
 static void rtc6226_get_rds(struct rtc6226_device *radio)
 {
 	int retval = 0;
-	int i;
 
 	mutex_lock(&radio->lock);
 	retval = rtc6226_get_all_registers(radio);
@@ -627,9 +626,6 @@ static void rtc6226_get_rds(struct rtc6226_device *radio)
 	radio->block[2] = radio->registers[BC_DATA];
 	radio->block[3] = radio->registers[BD_DATA];
 
-	for (i = 0; i < 4; i++)
-		FMDBG("%s block[%d] %x\n", __func__, i, radio->block[i]);
-
 	radio->bler[0] = (radio->registers[RSSI] & RSSI_RDS_BA_ERRS) >> 14;
 	radio->bler[1] = (radio->registers[RSSI] & RSSI_RDS_BB_ERRS) >> 12;
 	radio->bler[2] = (radio->registers[RSSI] & RSSI_RDS_BC_ERRS) >> 10;
@@ -1244,7 +1240,7 @@ void rtc6226_rds_handler(struct work_struct *worker)
 		grp_type = radio->block[1] >> OFFSET_OF_GRP_TYP;
 		FMDBG("%s grp_type = %d\n", __func__, grp_type);
 	} else {
-		FMDERR("%s invalid data %d\n", __func__, radio->bler[1]);
+		/* invalid data case */
 		return;
 	}
 	if (grp_type & 0x01)
diff --git a/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c b/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c
index d67ac8b..f4e62c1 100644
--- a/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c
+++ b/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c
@@ -236,16 +236,17 @@ static void rtc6226_i2c_interrupt_handler(struct rtc6226_device *radio)
 		FMDERR("%s read fail to STATUS\n", __func__);
 		goto end;
 	}
-	FMDBG("%s : STATUS=0x%4.4hx\n", __func__, radio->registers[STATUS]);
-
-	retval = rtc6226_get_register(radio, RSSI);
-	if (retval < 0) {
-		FMDERR("%s read fail to RSSI\n", __func__);
-		goto end;
-	}
-	FMDBG("%s : RSSI=0x%4.4hx\n", __func__, radio->registers[RSSI]);
 
 	if (radio->registers[STATUS] & STATUS_STD) {
+		FMDBG("%s : STATUS=0x%4.4hx\n", __func__,
+				radio->registers[STATUS]);
+
+		retval = rtc6226_get_register(radio, RSSI);
+		if (retval < 0) {
+			FMDERR("%s read fail to RSSI\n", __func__);
+			goto end;
+		}
+		FMDBG("%s : RSSI=0x%4.4hx\n", __func__, radio->registers[RSSI]);
 			/* stop seeking : clear STD*/
 		radio->registers[SEEKCFG1] &= ~SEEKCFG1_CSR0_SEEK;
 		retval = rtc6226_set_register(radio, SEEKCFG1);
@@ -294,7 +295,6 @@ static void rtc6226_i2c_interrupt_handler(struct rtc6226_device *radio)
 			/* avoid RDS interrupt lock disable_irq*/
 			if ((radio->registers[SYSCFG] &
 						SYSCFG_CSR0_RDS_EN) != 0) {
-				FMDBG("%s start rds handler\n", __func__);
 				schedule_work(&radio->rds_worker);
 			}
 		}
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 800d69c..1cf4019 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -489,7 +489,8 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
 		return -EIO;
 	}
 	/* Send response data to caller */
-	if (response != NULL && response_len != NULL && evt_hdr->dlen) {
+	if (response != NULL && response_len != NULL && evt_hdr->dlen &&
+	    evt_hdr->dlen <= payload_len) {
 		/* Skip header info and copy only response data */
 		skb_pull(skb, sizeof(struct fm_event_msg_hdr));
 		memcpy(response, skb->data, evt_hdr->dlen);
@@ -583,6 +584,8 @@ static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
 		return;
 
 	fm_evt_hdr = (void *)skb->data;
+	if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag))
+		return;
 
 	/* Skip header info and copy only response data */
 	skb_pull(skb, sizeof(struct fm_event_msg_hdr));
@@ -1308,7 +1311,7 @@ static int load_default_rx_configuration(struct fmdev *fmdev)
 static int fm_power_up(struct fmdev *fmdev, u8 mode)
 {
 	u16 payload;
-	__be16 asic_id, asic_ver;
+	__be16 asic_id = 0, asic_ver = 0;
 	int resp_len, ret;
 	u8 fw_name[50];
 
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 8bf5637..e613c01 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -773,8 +773,6 @@ static void serial_ir_exit(void)
 
 static int __init serial_ir_init_module(void)
 {
-	int result;
-
 	switch (type) {
 	case IR_HOMEBREW:
 	case IR_IRDEO:
@@ -802,12 +800,7 @@ static int __init serial_ir_init_module(void)
 	if (sense != -1)
 		sense = !!sense;
 
-	result = serial_ir_init();
-	if (!result)
-		return 0;
-
-	serial_ir_exit();
-	return result;
+	return serial_ir_init();
 }
 
 static void __exit serial_ir_exit_module(void)
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 62b4506..3e111f7 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -758,6 +758,9 @@ static int au0828_analog_stream_enable(struct au0828_dev *d)
 
 	dprintk(1, "au0828_analog_stream_enable called\n");
 
+	if (test_bit(DEV_DISCONNECTED, &d->dev_state))
+		return -ENODEV;
+
 	iface = usb_ifnum_to_if(d->usbdev, 0);
 	if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
 		dprintk(1, "Changing intf#0 to alt 5\n");
@@ -839,9 +842,9 @@ int au0828_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
 			return rc;
 		}
 
+		v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 1);
+
 		if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
-			v4l2_device_call_all(&dev->v4l2_dev, 0, video,
-						s_stream, 1);
 			dev->vid_timeout_running = 1;
 			mod_timer(&dev->vid_timeout, jiffies + (HZ / 10));
 		} else if (vq->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
@@ -861,10 +864,11 @@ static void au0828_stop_streaming(struct vb2_queue *vq)
 
 	dprintk(1, "au0828_stop_streaming called %d\n", dev->streaming_users);
 
-	if (dev->streaming_users-- == 1)
+	if (dev->streaming_users-- == 1) {
 		au0828_uninit_isoc(dev);
+		v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
+	}
 
-	v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
 	dev->vid_timeout_running = 0;
 	del_timer_sync(&dev->vid_timeout);
 
@@ -893,8 +897,10 @@ void au0828_stop_vbi_streaming(struct vb2_queue *vq)
 	dprintk(1, "au0828_stop_vbi_streaming called %d\n",
 		dev->streaming_users);
 
-	if (dev->streaming_users-- == 1)
+	if (dev->streaming_users-- == 1) {
 		au0828_uninit_isoc(dev);
+		v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
+	}
 
 	spin_lock_irqsave(&dev->slock, flags);
 	if (dev->isoc_ctl.vbi_buf != NULL) {
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 99f106b..d473189 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -1244,8 +1244,7 @@ static int __init cpia2_init(void)
 	LOG("%s v%s\n",
 	    ABOUT, CPIA_VERSION);
 	check_parameters();
-	cpia2_usb_init();
-	return 0;
+	return cpia2_usb_init();
 }
 
 
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index e28bd88..ae0814d 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -615,16 +615,18 @@ static int dvbsky_init(struct dvb_usb_device *d)
 	return 0;
 }
 
-static void dvbsky_exit(struct dvb_usb_device *d)
+static int dvbsky_frontend_detach(struct dvb_usb_adapter *adap)
 {
+	struct dvb_usb_device *d = adap_to_d(adap);
 	struct dvbsky_state *state = d_to_priv(d);
-	struct dvb_usb_adapter *adap = &d->adapter[0];
+
+	dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, adap->id);
 
 	dvb_module_release(state->i2c_client_tuner);
 	dvb_module_release(state->i2c_client_demod);
 	dvb_module_release(state->i2c_client_ci);
 
-	adap->fe[0] = NULL;
+	return 0;
 }
 
 /* DVB USB Driver stuff */
@@ -640,11 +642,11 @@ static struct dvb_usb_device_properties dvbsky_s960_props = {
 
 	.i2c_algo         = &dvbsky_i2c_algo,
 	.frontend_attach  = dvbsky_s960_attach,
+	.frontend_detach  = dvbsky_frontend_detach,
 	.init             = dvbsky_init,
 	.get_rc_config    = dvbsky_get_rc_config,
 	.streaming_ctrl   = dvbsky_streaming_ctrl,
 	.identify_state	  = dvbsky_identify_state,
-	.exit             = dvbsky_exit,
 	.read_mac_address = dvbsky_read_mac_addr,
 
 	.num_adapters = 1,
@@ -667,11 +669,11 @@ static struct dvb_usb_device_properties dvbsky_s960c_props = {
 
 	.i2c_algo         = &dvbsky_i2c_algo,
 	.frontend_attach  = dvbsky_s960c_attach,
+	.frontend_detach  = dvbsky_frontend_detach,
 	.init             = dvbsky_init,
 	.get_rc_config    = dvbsky_get_rc_config,
 	.streaming_ctrl   = dvbsky_streaming_ctrl,
 	.identify_state	  = dvbsky_identify_state,
-	.exit             = dvbsky_exit,
 	.read_mac_address = dvbsky_read_mac_addr,
 
 	.num_adapters = 1,
@@ -694,11 +696,11 @@ static struct dvb_usb_device_properties dvbsky_t680c_props = {
 
 	.i2c_algo         = &dvbsky_i2c_algo,
 	.frontend_attach  = dvbsky_t680c_attach,
+	.frontend_detach  = dvbsky_frontend_detach,
 	.init             = dvbsky_init,
 	.get_rc_config    = dvbsky_get_rc_config,
 	.streaming_ctrl   = dvbsky_streaming_ctrl,
 	.identify_state	  = dvbsky_identify_state,
-	.exit             = dvbsky_exit,
 	.read_mac_address = dvbsky_read_mac_addr,
 
 	.num_adapters = 1,
@@ -721,11 +723,11 @@ static struct dvb_usb_device_properties dvbsky_t330_props = {
 
 	.i2c_algo         = &dvbsky_i2c_algo,
 	.frontend_attach  = dvbsky_t330_attach,
+	.frontend_detach  = dvbsky_frontend_detach,
 	.init             = dvbsky_init,
 	.get_rc_config    = dvbsky_get_rc_config,
 	.streaming_ctrl   = dvbsky_streaming_ctrl,
 	.identify_state	  = dvbsky_identify_state,
-	.exit             = dvbsky_exit,
 	.read_mac_address = dvbsky_read_mac_addr,
 
 	.num_adapters = 1,
@@ -748,11 +750,11 @@ static struct dvb_usb_device_properties mygica_t230c_props = {
 
 	.i2c_algo         = &dvbsky_i2c_algo,
 	.frontend_attach  = dvbsky_mygica_t230c_attach,
+	.frontend_detach  = dvbsky_frontend_detach,
 	.init             = dvbsky_init,
 	.get_rc_config    = dvbsky_get_rc_config,
 	.streaming_ctrl   = dvbsky_streaming_ctrl,
 	.identify_state	  = dvbsky_identify_state,
-	.exit             = dvbsky_exit,
 
 	.num_adapters = 1,
 	.adapter = {
diff --git a/drivers/media/usb/go7007/go7007-fw.c b/drivers/media/usb/go7007/go7007-fw.c
index 24f5b61..dfa9f89 100644
--- a/drivers/media/usb/go7007/go7007-fw.c
+++ b/drivers/media/usb/go7007/go7007-fw.c
@@ -1499,8 +1499,8 @@ static int modet_to_package(struct go7007 *go, __le16 *code, int space)
 	return cnt;
 }
 
-static int do_special(struct go7007 *go, u16 type, __le16 *code, int space,
-			int *framelen)
+static noinline_for_stack int do_special(struct go7007 *go, u16 type,
+					 __le16 *code, int space, int *framelen)
 {
 	switch (type) {
 	case SPECIAL_FRM_HEAD:
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 405a6a7..b12356c 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -294,7 +294,7 @@ static void fill_frame(struct gspca_dev *gspca_dev,
 		/* check the packet status and length */
 		st = urb->iso_frame_desc[i].status;
 		if (st) {
-			pr_err("ISOC data error: [%d] len=%d, status=%d\n",
+			gspca_dbg(gspca_dev, D_PACK, "ISOC data error: [%d] len=%d, status=%d\n",
 			       i, len, st);
 			gspca_dev->last_packet_type = DISCARD_PACKET;
 			continue;
@@ -314,6 +314,8 @@ static void fill_frame(struct gspca_dev *gspca_dev,
 	}
 
 resubmit:
+	if (!gspca_dev->streaming)
+		return;
 	/* resubmit the URB */
 	st = usb_submit_urb(urb, GFP_ATOMIC);
 	if (st < 0)
@@ -330,7 +332,7 @@ static void isoc_irq(struct urb *urb)
 	struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
 
 	gspca_dbg(gspca_dev, D_PACK, "isoc irq\n");
-	if (!vb2_start_streaming_called(&gspca_dev->queue))
+	if (!gspca_dev->streaming)
 		return;
 	fill_frame(gspca_dev, urb);
 }
@@ -344,7 +346,7 @@ static void bulk_irq(struct urb *urb)
 	int st;
 
 	gspca_dbg(gspca_dev, D_PACK, "bulk irq\n");
-	if (!vb2_start_streaming_called(&gspca_dev->queue))
+	if (!gspca_dev->streaming)
 		return;
 	switch (urb->status) {
 	case 0:
@@ -367,6 +369,8 @@ static void bulk_irq(struct urb *urb)
 				urb->actual_length);
 
 resubmit:
+	if (!gspca_dev->streaming)
+		return;
 	/* resubmit the URB */
 	if (gspca_dev->cam.bulk_nurbs != 0) {
 		st = usb_submit_urb(urb, GFP_ATOMIC);
@@ -1630,6 +1634,8 @@ void gspca_disconnect(struct usb_interface *intf)
 
 	mutex_lock(&gspca_dev->usb_lock);
 	gspca_dev->present = false;
+	destroy_urbs(gspca_dev);
+	gspca_input_destroy_urb(gspca_dev);
 
 	vb2_queue_error(&gspca_dev->queue);
 
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index a8519da..673fdca 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -666,6 +666,8 @@ static int ctrl_get_input(struct pvr2_ctrl *cptr,int *vp)
 
 static int ctrl_check_input(struct pvr2_ctrl *cptr,int v)
 {
+	if (v < 0 || v > PVR2_CVAL_INPUT_MAX)
+		return 0;
 	return ((1 << v) & cptr->hdw->input_allowed_mask) != 0;
 }
 
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
index 25648ad..bd2b7a6 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
@@ -50,6 +50,7 @@
 #define PVR2_CVAL_INPUT_COMPOSITE 2
 #define PVR2_CVAL_INPUT_SVIDEO 3
 #define PVR2_CVAL_INPUT_RADIO 4
+#define PVR2_CVAL_INPUT_MAX PVR2_CVAL_INPUT_RADIO
 
 enum pvr2_config {
 	pvr2_config_empty,    /* No configuration */
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index bd25faf..c8f1666 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -280,7 +280,7 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
 	u32 value;
 
 	/* compute the number of MC clock cycles per tick */
-	tick = mc->tick * clk_get_rate(mc->clk);
+	tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
 	do_div(tick, NSEC_PER_SEC);
 
 	value = readl(mc->regs + MC_EMEM_ARB_CFG);
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 299016b..104477b 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1245,6 +1245,28 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
 	return status;
 }
 
+static int __maybe_unused twl_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+
+	if (client->irq)
+		disable_irq(client->irq);
+
+	return 0;
+}
+
+static int __maybe_unused twl_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+
+	if (client->irq)
+		enable_irq(client->irq);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
+
 static const struct i2c_device_id twl_ids[] = {
 	{ "twl4030", TWL4030_VAUX2 },	/* "Triton 2" */
 	{ "twl5030", 0 },		/* T2 updated */
@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
 /* One Client Driver , 4 Clients */
 static struct i2c_driver twl_driver = {
 	.driver.name	= DRIVER_NAME,
+	.driver.pm	= &twl_dev_pm_ops,
 	.id_table	= twl_ids,
 	.probe		= twl_probe,
 	.remove		= twl_remove,
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 3370a41..1578589 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -10,6 +10,7 @@
 lkdtm-$(CONFIG_LKDTM)		+= usercopy.o
 
 KCOV_INSTRUMENT_rodata.o	:= n
+CFLAGS_lkdtm_rodata.o		+= $(DISABLE_LTO)
 
 OBJCOPYFLAGS :=
 OBJCOPYFLAGS_rodata_objcopy.o	:= \
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 33ae209..99d80db 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -46,6 +46,10 @@
 #include "compat_qseecom.h"
 #include <linux/pfk.h>
 #include <linux/kthread.h>
+#include <linux/dma-contiguous.h>
+#include <linux/cma.h>
+#include <linux/of_platform.h>
+#include <soc/qcom/qtee_shmbridge.h>
 
 #define QSEECOM_DEV			"qseecom"
 #define QSEOS_VERSION_14		0x14
@@ -198,7 +202,8 @@ struct qseecom_registered_listener_list {
 	bool                       listener_in_use;
 	/* wq for thread blocked on this listener*/
 	wait_queue_head_t          listener_block_app_wq;
-	struct sglist_info         sglistinfo_ptr[MAX_ION_FD];
+	struct sglist_info         *sglistinfo_ptr;
+	struct qtee_shm            sglistinfo_shm;
 	uint32_t                   sglist_cnt;
 	int                        abort;
 	bool                       unregister_pending;
@@ -303,6 +308,8 @@ struct qseecom_control {
 	atomic_t qseecom_state;
 	int is_apps_region_protected;
 	bool smcinvoke_support;
+	uint64_t qseecom_bridge_handle;
+	uint64_t ta_bridge_handle;
 
 	struct list_head  unregister_lsnr_pending_list_head;
 	wait_queue_head_t register_lsnr_pending_wq;
@@ -315,7 +322,8 @@ struct qseecom_sec_buf_fd_info {
 	bool is_sec_buf_fd;
 	size_t size;
 	void *vbase;
-	dma_addr_t pbase;
+	phys_addr_t pbase;
+	struct qtee_shm shm;
 };
 
 struct qseecom_param_memref {
@@ -336,6 +344,7 @@ struct qseecom_client_handle {
 	u32  app_arch;
 	struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
 	bool from_smcinvoke;
+	struct qtee_shm shm; /* kernel client's shm for req/rsp buf */
 };
 
 struct qseecom_listener_handle {
@@ -359,7 +368,8 @@ struct qseecom_dev_handle {
 	bool  perf_enabled;
 	bool  fast_load_enabled;
 	enum qseecom_bandwidth_request_mode mode;
-	struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+	struct sglist_info *sglistinfo_ptr;
+	struct qtee_shm sglistinfo_shm;
 	uint32_t sglist_cnt;
 	bool use_legacy_cmd;
 };
@@ -444,6 +454,25 @@ static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
 	return ret;
 }
 
+static char *__qseecom_alloc_tzbuf(uint32_t size,
+				phys_addr_t *pa, struct qtee_shm *shm)
+{
+	char *tzbuf = NULL;
+	int ret = qtee_shmbridge_allocate_shm(size, shm);
+
+	if (ret)
+		return NULL;
+	tzbuf = shm->vaddr;
+	memset(tzbuf, 0, size);
+	*pa = shm->paddr;
+	return tzbuf;
+}
+
+static void __qseecom_free_tzbuf(struct qtee_shm *shm)
+{
+	qtee_shmbridge_free_shm(shm);
+}
+
 static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
 			const void *req_buf, void *resp_buf)
 {
@@ -452,6 +481,8 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
 	uint32_t qseos_cmd_id = 0;
 	struct scm_desc desc = {0};
 	struct qseecom_command_scm_resp *scm_resp = NULL;
+	struct qtee_shm shm = {0};
+	phys_addr_t pa;
 
 	if (!req_buf || !resp_buf) {
 		pr_err("Invalid buffer pointer\n");
@@ -481,8 +512,8 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
 			struct qseecom_save_partition_hash_req *p_hash_req =
 				(struct qseecom_save_partition_hash_req *)
 				req_buf;
-			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
-
+			char *tzbuf = __qseecom_alloc_tzbuf(
+						tzbuflen, &pa, &shm);
 			if (!tzbuf)
 				return -ENOMEM;
 			memset(tzbuf, 0, tzbuflen);
@@ -492,10 +523,10 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
 			smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
 			desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
 			desc.args[0] = p_hash_req->partition_id;
-			desc.args[1] = virt_to_phys(tzbuf);
+			desc.args[1] = pa;
 			desc.args[2] = SHA256_DIGEST_LENGTH;
 			ret = __qseecom_scm_call2_locked(smc_id, &desc);
-			kzfree(tzbuf);
+			__qseecom_free_tzbuf(&shm);
 			break;
 		}
 		default: {
@@ -539,14 +570,14 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
 			smc_id = TZ_OS_APP_SHUTDOWN_ID;
 			desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
 			desc.args[0] = req->app_id;
-			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			ret = scm_call2(smc_id, &desc);
 			break;
 		}
 		case QSEOS_APP_LOOKUP_COMMAND: {
 			struct qseecom_check_app_ireq *req;
 			u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
-			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
-
+			char *tzbuf = __qseecom_alloc_tzbuf(
+						tzbuflen, &pa, &shm);
 			if (!tzbuf)
 				return -ENOMEM;
 			req = (struct qseecom_check_app_ireq *)req_buf;
@@ -555,11 +586,11 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
 			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
 			smc_id = TZ_OS_APP_LOOKUP_ID;
 			desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
-			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[0] = pa;
 			desc.args[1] = strlen(req->app_name);
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
 			ret = __qseecom_scm_call2_locked(smc_id, &desc);
-			kzfree(tzbuf);
+			__qseecom_free_tzbuf(&shm);
 			break;
 		}
 		case QSEOS_APP_REGION_NOTIFICATION: {
@@ -818,8 +849,8 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
 			u32 tzbuflen = PAGE_ALIGN(sizeof
 				(struct qseecom_key_generate_ireq) -
 				sizeof(uint32_t));
-			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
-
+			char *tzbuf = __qseecom_alloc_tzbuf(
+						tzbuflen, &pa, &shm);
 			if (!tzbuf)
 				return -ENOMEM;
 			memset(tzbuf, 0, tzbuflen);
@@ -829,19 +860,19 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
 			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
 			smc_id = TZ_OS_KS_GEN_KEY_ID;
 			desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
-			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[0] = pa;
 			desc.args[1] = tzbuflen;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
 			ret = __qseecom_scm_call2_locked(smc_id, &desc);
-			kzfree(tzbuf);
+			__qseecom_free_tzbuf(&shm);
 			break;
 		}
 		case QSEOS_DELETE_KEY: {
 			u32 tzbuflen = PAGE_ALIGN(sizeof
 				(struct qseecom_key_delete_ireq) -
 				sizeof(uint32_t));
-			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
-
+			char *tzbuf = __qseecom_alloc_tzbuf(
+						tzbuflen, &pa, &shm);
 			if (!tzbuf)
 				return -ENOMEM;
 			memset(tzbuf, 0, tzbuflen);
@@ -851,19 +882,19 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
 			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
 			smc_id = TZ_OS_KS_DEL_KEY_ID;
 			desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
-			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[0] = pa;
 			desc.args[1] = tzbuflen;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
 			ret = __qseecom_scm_call2_locked(smc_id, &desc);
-			kzfree(tzbuf);
+			__qseecom_free_tzbuf(&shm);
 			break;
 		}
 		case QSEOS_SET_KEY: {
 			u32 tzbuflen = PAGE_ALIGN(sizeof
 				(struct qseecom_key_select_ireq) -
 				sizeof(uint32_t));
-			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
-
+			char *tzbuf = __qseecom_alloc_tzbuf(
+						tzbuflen, &pa, &shm);
 			if (!tzbuf)
 				return -ENOMEM;
 			memset(tzbuf, 0, tzbuflen);
@@ -873,19 +904,19 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
 			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
 			smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
 			desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
-			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[0] = pa;
 			desc.args[1] = tzbuflen;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
 			ret = __qseecom_scm_call2_locked(smc_id, &desc);
-			kzfree(tzbuf);
+			__qseecom_free_tzbuf(&shm);
 			break;
 		}
 		case QSEOS_UPDATE_KEY_USERINFO: {
 			u32 tzbuflen = PAGE_ALIGN(sizeof
 				(struct qseecom_key_userinfo_update_ireq) -
 				sizeof(uint32_t));
-			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
-
+			char *tzbuf = __qseecom_alloc_tzbuf(
+						tzbuflen, &pa, &shm);
 			if (!tzbuf)
 				return -ENOMEM;
 			memset(tzbuf, 0, tzbuflen);
@@ -895,11 +926,11 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
 			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
 			smc_id = TZ_OS_KS_UPDATE_KEY_ID;
 			desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
-			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[0] = pa;
 			desc.args[1] = tzbuflen;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
 			ret = __qseecom_scm_call2_locked(smc_id, &desc);
-			kzfree(tzbuf);
+			__qseecom_free_tzbuf(&shm);
 			break;
 		}
 		case QSEOS_TEE_OPEN_SESSION: {
@@ -1167,6 +1198,108 @@ static int qseecom_dmabuf_cache_operations(struct dma_buf *dmabuf,
 	return ret;
 }
 
+static int qseecom_destroy_bridge_callback(
+				struct dma_buf *dmabuf, void *dtor_data)
+{
+	int ret = 0;
+	uint64_t handle = (uint64_t)dtor_data;
+
+	if (!dmabuf) {
+		pr_err("dmabuf NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("to destroy shm bridge %lld\n", handle);
+	ret = qtee_shmbridge_deregister(handle);
+	if (ret) {
+		pr_err("failed to destroy shm bridge %lld\n", handle);
+		return ret;
+	}
+	dma_buf_set_destructor(dmabuf, NULL, NULL);
+	return ret;
+}
+
+static int qseecom_create_bridge_for_secbuf(int ion_fd, struct dma_buf *dmabuf,
+				struct sg_table *sgt)
+{
+	int ret = 0, i;
+	phys_addr_t phys;
+	size_t size = 0;
+	uint64_t handle = 0;
+	int tz_perm = PERM_READ|PERM_WRITE;
+	unsigned long dma_buf_flags = 0;
+	uint32_t *vmid_list;
+	uint32_t *perms_list;
+	uint32_t nelems;
+	struct scatterlist *sg = sgt->sgl;
+
+	ret = dma_buf_get_flags(dmabuf, &dma_buf_flags);
+	if (ret) {
+		pr_err("failed to get dmabuf flag for fd %d\n",
+				ion_fd);
+		return ret;
+	}
+
+	if (!(dma_buf_flags & ION_FLAG_SECURE) || (sgt->nents != 1)) {
+		pr_debug("just create bridge for contiguous secure buf\n");
+		return 0;
+	}
+
+	phys = sg_phys(sg);
+	size = sg->length;
+
+	ret = qtee_shmbridge_query(phys);
+	if (ret) {
+		pr_debug("bridge exists\n");
+		return 0;
+	}
+
+	nelems = ion_get_flags_num_vm_elems(dma_buf_flags);
+	if (nelems == 0) {
+		pr_err("failed to get vm num from flag = %x\n", dma_buf_flags);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	vmid_list = kcalloc(nelems, sizeof(*vmid_list), GFP_KERNEL);
+	if (!vmid_list) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	ret = ion_populate_vm_list(dma_buf_flags, vmid_list, nelems);
+	if (ret)
+		goto exit_free_vmid_list;
+
+	perms_list = kcalloc(nelems, sizeof(*perms_list), GFP_KERNEL);
+	if (!perms_list) {
+		ret = -ENOMEM;
+		goto exit_free_vmid_list;
+	}
+
+	for (i = 0; i < nelems; i++)
+		perms_list[i] = msm_secure_get_vmid_perms(vmid_list[i]);
+
+	ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems,
+				      tz_perm, &handle);
+
+	if (ret && ret != -EEXIST) {
+		pr_err("creation of shm bridge failed with ret: %d\n",
+		       ret);
+		goto exit_free_perms_list;
+	}
+
+	pr_debug("created shm bridge %lld\n", handle);
+	dma_buf_set_destructor(dmabuf, qseecom_destroy_bridge_callback,
+			       (void *)handle);
+
+exit_free_perms_list:
+	kfree(perms_list);
+exit_free_vmid_list:
+	kfree(vmid_list);
+exit:
+	return ret;
+}
+
 static int qseecom_dmabuf_map(int ion_fd, struct sg_table **sgt,
 				struct dma_buf_attachment **attach,
 				struct dma_buf **dmabuf)
@@ -1200,6 +1333,12 @@ static int qseecom_dmabuf_map(int ion_fd, struct sg_table **sgt,
 	*sgt = new_sgt;
 	*attach = new_attach;
 	*dmabuf = new_dma_buf;
+
+	ret = qseecom_create_bridge_for_secbuf(ion_fd, new_dma_buf, new_sgt);
+	if (ret) {
+		pr_err("failed to create bridge for fd %d\n", ion_fd);
+		goto err_detach;
+	}
 	return ret;
 
 err_detach:
@@ -1388,12 +1527,22 @@ static int qseecom_register_listener(struct qseecom_dev_handle *data,
 	memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
 	new_entry->rcv_req_flag = 0;
 
+	new_entry->sglistinfo_ptr =
+				(struct sglist_info *)__qseecom_alloc_tzbuf(
+				sizeof(struct sglist_info) * MAX_ION_FD,
+				&new_entry->sglistinfo_shm.paddr,
+				&new_entry->sglistinfo_shm);
+	if (!new_entry->sglistinfo_ptr) {
+		kfree(new_entry);
+		return -ENOMEM;
+	}
 	new_entry->svc.listener_id = rcvd_lstnr.listener_id;
 	new_entry->sb_length = rcvd_lstnr.sb_size;
 	new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
 	if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
 		pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
 				rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
+		__qseecom_free_tzbuf(&new_entry->sglistinfo_shm);
 		kzfree(new_entry);
 		return -ENOMEM;
 	}
@@ -1454,7 +1603,7 @@ static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
 	if (ptr_svc->dmabuf)
 		qseecom_vaddr_unmap(ptr_svc->sb_virt,
 			ptr_svc->sgt, ptr_svc->attach, ptr_svc->dmabuf);
-
+	__qseecom_free_tzbuf(&ptr_svc->sglistinfo_shm);
 	list_del(&ptr_svc->list);
 	kzfree(ptr_svc);
 
@@ -3413,7 +3562,7 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
 					data, (uintptr_t)req->resp_buf));
 		send_data_req.rsp_len = req->resp_len;
 		send_data_req.sglistinfo_ptr =
-				(uint32_t)virt_to_phys(table);
+				(uint32_t)data->sglistinfo_shm.paddr;
 		send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
 		dmac_flush_range((void *)table,
 				(void *)table + SGLISTINFO_TABLE_SIZE);
@@ -3442,7 +3591,7 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
 			return -EFAULT;
 		}
 		send_data_req_64bit.sglistinfo_ptr =
-				(uint64_t)virt_to_phys(table);
+				(uint64_t)data->sglistinfo_shm.paddr;
 		send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
 		dmac_flush_range((void *)table,
 				(void *)table + SGLISTINFO_TABLE_SIZE);
@@ -4680,6 +4829,14 @@ int qseecom_start_app(struct qseecom_handle **handle,
 	data->released = false;
 	data->client.sb_length = size;
 	data->client.user_virt_sb_base = 0;
+	data->sglistinfo_ptr = (struct sglist_info *)__qseecom_alloc_tzbuf(
+				sizeof(struct sglist_info) * MAX_ION_FD,
+				&data->sglistinfo_shm.paddr,
+				&data->sglistinfo_shm);
+	if (!data->sglistinfo_ptr) {
+		ret = -ENOMEM;
+		goto err;
+	}
 
 	init_waitqueue_head(&data->abort_wq);
 
@@ -4782,6 +4939,7 @@ int qseecom_start_app(struct qseecom_handle **handle,
 err:
 	if (va)
 		__qseecom_free_coherent_buf(size, va, pa);
+	__qseecom_free_tzbuf(&data->sglistinfo_shm);
 	kfree(data);
 	kfree(*handle);
 	*handle = NULL;
@@ -4834,6 +4992,7 @@ int qseecom_shutdown_app(struct qseecom_handle **handle)
 		if (data->client.sb_virt)
 			__qseecom_free_coherent_buf(data->client.sb_length,
 				data->client.sb_virt, data->client.sb_phys);
+		__qseecom_free_tzbuf(&data->sglistinfo_shm);
 		kzfree(data);
 		kzfree(*handle);
 		kzfree(kclient);
@@ -6454,6 +6613,8 @@ static int qseecom_mdtp_cipher_dip(void __user *argp)
 	char *tzbufin = NULL, *tzbufout = NULL;
 	struct scm_desc desc = {0};
 	int ret;
+	phys_addr_t pain, paout;
+	struct qtee_shm shmin = {0}, shmout = {0};
 
 	do {
 		/* Copy the parameters from userspace */
@@ -6480,7 +6641,7 @@ static int qseecom_mdtp_cipher_dip(void __user *argp)
 
 		/* Copy the input buffer from userspace to kernel space */
 		tzbuflenin = PAGE_ALIGN(req.in_buf_size);
-		tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
+		tzbufin = __qseecom_alloc_tzbuf(tzbuflenin, &pain, &shmin);
 		if (!tzbufin) {
 			pr_err("error allocating in buffer\n");
 			ret = -ENOMEM;
@@ -6497,7 +6658,7 @@ static int qseecom_mdtp_cipher_dip(void __user *argp)
 
 		/* Prepare the output buffer in kernel space */
 		tzbuflenout = PAGE_ALIGN(req.out_buf_size);
-		tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
+		tzbufout = __qseecom_alloc_tzbuf(tzbuflenout, &paout, &shmout);
 		if (!tzbufout) {
 			pr_err("error allocating out buffer\n");
 			ret = -ENOMEM;
@@ -6508,9 +6669,9 @@ static int qseecom_mdtp_cipher_dip(void __user *argp)
 
 		/* Send the command to TZ */
 		desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
-		desc.args[0] = virt_to_phys(tzbufin);
+		desc.args[0] = pain;
 		desc.args[1] = req.in_buf_size;
-		desc.args[2] = virt_to_phys(tzbufout);
+		desc.args[2] = paout;
 		desc.args[3] = req.out_buf_size;
 		desc.args[4] = req.direction;
 
@@ -6537,8 +6698,8 @@ static int qseecom_mdtp_cipher_dip(void __user *argp)
 		}
 	} while (0);
 
-	kzfree(tzbufin);
-	kzfree(tzbufout);
+	__qseecom_free_tzbuf(&shmin);
+	__qseecom_free_tzbuf(&shmout);
 
 	return ret;
 }
@@ -7158,7 +7319,7 @@ static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
 	}
 }
 
-static inline long qseecom_ioctl(struct file *file,
+static long qseecom_ioctl(struct file *file,
 			unsigned int cmd, unsigned long arg)
 {
 	int ret = 0;
@@ -7897,6 +8058,12 @@ static int qseecom_open(struct inode *inode, struct file *file)
 	data->mode = INACTIVE;
 	init_waitqueue_head(&data->abort_wq);
 	atomic_set(&data->ioctl_count, 0);
+	data->sglistinfo_ptr = (struct sglist_info *)__qseecom_alloc_tzbuf(
+				sizeof(struct sglist_info) * MAX_ION_FD,
+				&data->sglistinfo_shm.paddr,
+				&data->sglistinfo_shm);
+	if (!data->sglistinfo_ptr)
+		return -ENOMEM;
 	return ret;
 }
 
@@ -7957,8 +8124,10 @@ static int qseecom_release(struct inode *inode, struct file *file)
 			qsee_disable_clock_vote(data, CLK_DFAB);
 	}
 
-	if (free_private_data)
+	if (free_private_data) {
+		__qseecom_free_tzbuf(&data->sglistinfo_shm);
 		kfree(data);
+	}
 	return ret;
 }
 
@@ -9035,6 +9204,72 @@ static int qseecom_create_kthread_unregister_lsnr(void)
 	return 0;
 }
 
+static int qseecom_register_heap_shmbridge(uint32_t heapid, uint64_t *handle)
+{
+	phys_addr_t heap_pa = 0;
+	size_t heap_size = 0;
+	uint32_t val = 0;
+	struct device_node *ion_node, *node;
+	struct platform_device *ion_pdev = NULL;
+	struct cma *cma = NULL;
+	int ret = -1;
+	uint32_t ns_vmids[] = {VMID_HLOS};
+	uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
+
+	ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
+	if (!ion_node) {
+		pr_err("Failed to get qcom,msm-ion node\n");
+		return ret;
+	}
+
+	for_each_available_child_of_node(ion_node, node) {
+		if (of_property_read_u32(node, "reg", &val) || val != heapid)
+			continue;
+		ion_pdev = of_find_device_by_node(node);
+		if (!ion_pdev) {
+			pr_err("Failed to find node for heap %d\n", heapid);
+			break;
+		}
+		cma = dev_get_cma_area(&ion_pdev->dev);
+		if (cma) {
+			heap_pa = cma_get_base(cma);
+			heap_size = (size_t)cma_get_size(cma);
+		} else {
+			pr_err("Failed to get Heap %d info\n", heapid);
+		}
+		break;
+	}
+
+	if (heap_pa)
+		ret = qtee_shmbridge_register(heap_pa,
+				heap_size, ns_vmids, ns_vm_perms, 1,
+				PERM_READ | PERM_WRITE, handle);
+	return ret;
+}
+
+static int qseecom_register_shmbridge(void)
+{
+	if (qseecom_register_heap_shmbridge(ION_QSECOM_TA_HEAP_ID,
+					&qseecom.ta_bridge_handle)) {
+		pr_err("Failed to register shmbridge for ta heap\n");
+		return -ENOMEM;
+	}
+
+	if (qseecom_register_heap_shmbridge(ION_QSECOM_HEAP_ID,
+					&qseecom.qseecom_bridge_handle)) {
+		pr_err("Failed to register shmbridge for qseecom heap\n");
+		qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void qseecom_deregister_shmbridge(void)
+{
+	qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
+	qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle);
+}
+
 static int qseecom_probe(struct platform_device *pdev)
 {
 	int rc;
@@ -9071,6 +9306,10 @@ static int qseecom_probe(struct platform_device *pdev)
 	if (rc)
 		goto exit_deinit_bus;
 
+	rc = qseecom_register_shmbridge();
+	if (rc)
+		goto exit_deinit_bus;
+
 	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
 	return 0;
 
@@ -9122,6 +9361,7 @@ static int qseecom_remove(struct platform_device *pdev)
 	if (qseecom.qseos_version > QSEEE_VERSION_00)
 		qseecom_unload_commonlib_image();
 
+	qseecom_deregister_shmbridge();
 	kthread_stop(qseecom.unregister_lsnr_kthread_task);
 	qseecom_deinit_bus();
 	qseecom_deinit_clk();
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 42e8906..99a4bf1 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -61,6 +61,17 @@
 
 	  If unsure, say 8 here.
 
+config MMC_BLOCK_DEFERRED_RESUME
+	bool "Defer MMC layer resume until I/O is requested"
+	depends on MMC_BLOCK
+	default n
+	help
+	  Say Y here to enable deferred MMC resume until I/O
+	  is requested.
+
+	  This will reduce overall resume latency and
+	  save power when there is an SD card inserted but not being used.
+
 config SDIO_UART
 	tristate "SDIO UART/GPS class support"
 	depends on TTY
@@ -80,3 +91,12 @@
 	  This driver is only of interest to those developing or
 	  testing a host driver. Most people should say N here.
 
+config MMC_IPC_LOGGING
+	bool "MMC_IPC_LOGGING"
+	depends on MMC
+	help
+	  This enables the IPC logging of significant events for mmc
+	  driver to provide command history for debugging purpose.
+
+	  If unsure, say N.
+
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 9da2208..d6e67bd 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -459,7 +459,8 @@ static int ioctl_do_sanitize(struct mmc_card *card)
 {
 	int err;
 
-	if (!mmc_can_sanitize(card)) {
+	if (!mmc_can_sanitize(card) &&
+			(card->host->caps2 & MMC_CAP2_SANITIZE)) {
 		pr_warn("%s: %s - SANITIZE is not supported\n",
 			mmc_hostname(card->host), __func__);
 		err = -EOPNOTSUPP;
@@ -653,13 +654,13 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
 	struct request *req;
 
 	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
-	if (IS_ERR(idata))
+	if (IS_ERR_OR_NULL(idata))
 		return PTR_ERR(idata);
 	/* This will be NULL on non-RPMB ioctl():s */
 	idata->rpmb = rpmb;
 
 	card = md->queue.card;
-	if (IS_ERR(card)) {
+	if (IS_ERR_OR_NULL(card)) {
 		err = PTR_ERR(card);
 		goto cmd_done;
 	}
@@ -2984,6 +2985,9 @@ static int mmc_blk_probe(struct mmc_card *card)
 
 	dev_set_drvdata(&card->dev, md);
 
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 1);
+#endif
 	if (mmc_add_disk(md))
 		goto out;
 
@@ -3032,6 +3036,9 @@ static void mmc_blk_remove(struct mmc_card *card)
 	pm_runtime_put_noidle(&card->dev);
 	mmc_blk_remove_req(md);
 	dev_set_drvdata(&card->dev, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 0);
+#endif
 	destroy_workqueue(card->complete_wq);
 }
 
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index fc92c6c..9ccd2a9 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -134,6 +134,16 @@ static void mmc_bus_shutdown(struct device *dev)
 	struct mmc_host *host = card->host;
 	int ret;
 
+	if (!drv) {
+		pr_debug("%s: %s: drv is NULL\n", dev_name(dev), __func__);
+		return;
+	}
+
+	if (!card) {
+		pr_debug("%s: %s: card is NULL\n", dev_name(dev), __func__);
+		return;
+	}
+
 	if (dev->driver && drv->shutdown)
 		drv->shutdown(card);
 
@@ -156,6 +166,8 @@ static int mmc_bus_suspend(struct device *dev)
 	if (ret)
 		return ret;
 
+	if (mmc_bus_needs_resume(host))
+		return 0;
 	ret = host->bus_ops->suspend(host);
 	if (ret)
 		pm_generic_resume(dev);
@@ -169,11 +181,17 @@ static int mmc_bus_resume(struct device *dev)
 	struct mmc_host *host = card->host;
 	int ret;
 
+	if (mmc_bus_manual_resume(host)) {
+		host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+		goto skip_full_resume;
+	}
+
 	ret = host->bus_ops->resume(host);
 	if (ret)
 		pr_warn("%s: error %d during resume (card was removed?)\n",
 			mmc_hostname(host), ret);
 
+skip_full_resume:
 	ret = pm_generic_resume(dev);
 	return ret;
 }
@@ -185,6 +203,9 @@ static int mmc_runtime_suspend(struct device *dev)
 	struct mmc_card *card = mmc_dev_to_card(dev);
 	struct mmc_host *host = card->host;
 
+	if (mmc_bus_needs_resume(host))
+		return 0;
+
 	return host->bus_ops->runtime_suspend(host);
 }
 
@@ -193,6 +214,9 @@ static int mmc_runtime_resume(struct device *dev)
 	struct mmc_card *card = mmc_dev_to_card(dev);
 	struct mmc_host *host = card->host;
 
+	if (mmc_bus_needs_resume(host))
+		host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+
 	return host->bus_ops->runtime_resume(host);
 }
 #endif /* !CONFIG_PM */
@@ -250,12 +274,15 @@ EXPORT_SYMBOL(mmc_unregister_driver);
 static void mmc_release_card(struct device *dev)
 {
 	struct mmc_card *card = mmc_dev_to_card(dev);
+	struct mmc_host *host = card->host;
 
 	sdio_free_common_cis(card);
 
 	kfree(card->info);
 
 	kfree(card);
+	if (host)
+		host->card = NULL;
 }
 
 /*
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index cc37013..d30f1a2 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1277,7 +1277,7 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
 	struct mmc_command *cmd;
 
 	while (1) {
-		wait_for_completion(&mrq->completion);
+		wait_for_completion_io(&mrq->completion);
 
 		cmd = mrq->cmd;
 
@@ -1496,6 +1496,10 @@ EXPORT_SYMBOL(mmc_is_req_done);
  */
 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 {
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	if (mmc_bus_needs_resume(host))
+		mmc_resume_bus(host);
+#endif
 	__mmc_start_req(host, mrq);
 
 	if (!mrq->cap_cmd_during_tfr)
@@ -1544,6 +1548,10 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
 {
 	unsigned int mult;
 
+	if (!card) {
+		WARN_ON(1);
+		return;
+	}
 	/*
 	 * SDIO cards only define an upper 1 s limit on access.
 	 */
@@ -1776,7 +1784,12 @@ void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
 {
 	pm_runtime_get_sync(&card->dev);
 	__mmc_claim_host(card->host, ctx, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	if (mmc_bus_needs_resume(card->host))
+		mmc_resume_bus(card->host);
+#endif
 }
+
 EXPORT_SYMBOL(mmc_get_card);
 
 /*
@@ -2636,6 +2649,56 @@ static inline void mmc_bus_put(struct mmc_host *host)
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+int mmc_resume_bus(struct mmc_host *host)
+{
+	unsigned long flags;
+	int err = 0;
+
+	if (!mmc_bus_needs_resume(host))
+		return -EINVAL;
+
+	pr_debug("%s: Starting deferred resume\n", mmc_hostname(host));
+	spin_lock_irqsave(&host->lock, flags);
+	host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	mmc_bus_get(host);
+	if (host->bus_ops && !host->bus_dead && host->card) {
+		mmc_power_up(host, host->card->ocr);
+		BUG_ON(!host->bus_ops->deferred_resume);
+		err = host->bus_ops->deferred_resume(host);
+		if (err) {
+			pr_err("%s: %s: resume failed: %d\n",
+				       mmc_hostname(host), __func__, err);
+			/*
+			 * If we have cd-gpio based detection mechanism and
+			 * deferred resume is supported, we will not detect
+			 * card removal event when system is suspended. So if
+			 * resume fails after a system suspend/resume,
+			 * schedule the work to detect card presence.
+			 */
+			if (mmc_card_is_removable(host) &&
+					!(host->caps & MMC_CAP_NEEDS_POLL)) {
+				mmc_detect_change(host, 0);
+			}
+		}
+		if (host->card->ext_csd.cmdq_en && !host->cqe_enabled) {
+			err = host->cqe_ops->cqe_enable(host, host->card);
+			host->cqe_enabled = true;
+			if (err)
+				pr_err("%s: %s: cqe enable failed: %d\n",
+				       mmc_hostname(host), __func__, err);
+			else
+				mmc_card_clr_suspended(host->card);
+		}
+	}
+
+	mmc_bus_put(host);
+	pr_debug("%s: Deferred resume completed\n", mmc_hostname(host));
+	return 0;
+}
+EXPORT_SYMBOL(mmc_resume_bus);
+
 /*
  * Assign a mmc bus handler to a host. Only one bus handler may control a
  * host at any given time.
@@ -2689,6 +2752,16 @@ static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
 		pm_wakeup_event(mmc_dev(host), 5000);
 
 	host->detect_change = 1;
+	/*
+	 * Change in cd_gpio state, so make sure detection part is
+	 * not overided because of manual resume.
+	 */
+	if (cd_irq && mmc_bus_manual_resume(host))
+		host->ignore_bus_resume_flags = true;
+
+	if (delayed_work_pending(&host->detect))
+		cancel_delayed_work(&host->detect);
+
 	mmc_schedule_delayed_work(&host->detect, delay);
 }
 
@@ -3262,6 +3335,10 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
 	struct mmc_host *host = card->host;
 	unsigned int max_discard, max_trim;
 
+	if (!host->max_busy_timeout ||
+			(host->caps2 & MMC_CAP2_MAX_DISCARD_SIZE))
+		return UINT_MAX;
+
 	/*
 	 * Without erase_group_def set, MMC erase timeout depends on clock
 	 * frequence which can change.  In that case, the best choice is
@@ -3504,12 +3581,16 @@ EXPORT_SYMBOL(mmc_flush_detect_work);
 
 void mmc_rescan(struct work_struct *work)
 {
+	unsigned long flags;
 	struct mmc_host *host =
 		container_of(work, struct mmc_host, detect.work);
-	int i;
 
-	if (host->rescan_disable)
+	spin_lock_irqsave(&host->lock, flags);
+	if (host->rescan_disable) {
+		spin_unlock_irqrestore(&host->lock, flags);
 		return;
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
 
 	/* If there is a non-removable card registered, only scan once */
 	if (!mmc_card_is_removable(host) && host->rescan_entered)
@@ -3533,6 +3614,8 @@ void mmc_rescan(struct work_struct *work)
 		host->bus_ops->detect(host);
 
 	host->detect_change = 0;
+	if (host->ignore_bus_resume_flags)
+		host->ignore_bus_resume_flags = false;
 
 	/*
 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
@@ -3561,12 +3644,7 @@ void mmc_rescan(struct work_struct *work)
 		goto out;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
-		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
-			break;
-		if (freqs[i] <= host->f_min)
-			break;
-	}
+	mmc_rescan_try_freq(host, host->f_min);
 	host->err_stats[MMC_ERR_CMD_TIMEOUT] = 0;
 	mmc_release_host(host);
 
@@ -3577,17 +3655,16 @@ void mmc_rescan(struct work_struct *work)
 
 void mmc_start_host(struct mmc_host *host)
 {
+	mmc_claim_host(host);
 	host->f_init = max(freqs[0], host->f_min);
 	host->rescan_disable = 0;
 	host->ios.power_mode = MMC_POWER_UNDEFINED;
 
-	if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
-		mmc_claim_host(host);
+	if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP))
 		mmc_power_up(host, host->ocr_avail);
-		mmc_release_host(host);
-	}
 
 	mmc_gpiod_request_cd_irq(host);
+	mmc_release_host(host);
 	mmc_register_extcon(host);
 	_mmc_detect_change(host, 0, false);
 }
@@ -3678,6 +3755,11 @@ static int mmc_pm_notify(struct notifier_block *notify_block,
 
 		spin_lock_irqsave(&host->lock, flags);
 		host->rescan_disable = 0;
+		if (mmc_bus_manual_resume(host) &&
+				!host->ignore_bus_resume_flags) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			break;
+		}
 		spin_unlock_irqrestore(&host->lock, flags);
 		_mmc_detect_change(host, 0, false);
 
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 1215440..cd4aa88 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -27,6 +27,7 @@ struct mmc_bus_ops {
 	int (*pre_suspend)(struct mmc_host *);
 	int (*suspend)(struct mmc_host *);
 	int (*resume)(struct mmc_host *);
+	int (*deferred_resume)(struct mmc_host *host);
 	int (*runtime_suspend)(struct mmc_host *);
 	int (*runtime_resume)(struct mmc_host *);
 	int (*alive)(struct mmc_host *);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 94aeb5b..191ffc4 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -986,11 +986,11 @@ static void mmc_set_bus_speed(struct mmc_card *card)
  */
 static int mmc_select_bus_width(struct mmc_card *card)
 {
-	static unsigned int ext_csd_bits[] = {
+	static const unsigned int ext_csd_bits[] = {
 		EXT_CSD_BUS_WIDTH_8,
 		EXT_CSD_BUS_WIDTH_4,
 	};
-	static unsigned int bus_widths[] = {
+	static const unsigned int bus_widths[] = {
 		MMC_BUS_WIDTH_8,
 		MMC_BUS_WIDTH_4,
 	};
@@ -1275,10 +1275,6 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
 	int err;
 	u8 val;
 
-	/* Reduce frequency to HS */
-	max_dtr = card->ext_csd.hs_max_dtr;
-	mmc_set_clock(host, max_dtr);
-
 	/* Switch HS400 to HS DDR */
 	val = EXT_CSD_TIMING_HS;
 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
@@ -1289,6 +1285,10 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
 
 	mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
 
+	/* Reduce frequency to HS */
+	max_dtr = card->ext_csd.hs_max_dtr;
+	mmc_set_clock(host, max_dtr);
+
 	err = mmc_switch_status(card);
 	if (err)
 		goto out_err;
@@ -2252,6 +2252,27 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
 	return err;
 }
 
+int mmc_send_pon(struct mmc_card *card)
+{
+	int err = 0;
+	struct mmc_host *host = card->host;
+
+	if (!mmc_can_poweroff_notify(card))
+		goto out;
+
+	mmc_get_card(card, NULL);
+	if (card->pon_type & MMC_LONG_PON)
+		err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_LONG);
+	else if (card->pon_type & MMC_SHRT_PON)
+		err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT);
+	if (err)
+		pr_warn("%s: error %d sending PON type %u\n",
+			mmc_hostname(host), err, card->pon_type);
+	mmc_put_card(card, NULL);
+out:
+	return err;
+}
+
 /*
  * Host is being removed. Free up the current card.
  */
@@ -2259,7 +2280,9 @@ static void mmc_remove(struct mmc_host *host)
 {
 	mmc_exit_clk_scaling(host);
 	mmc_remove_card(host->card);
+	mmc_claim_host(host);
 	host->card = NULL;
+	mmc_release_host(host);
 }
 
 /*
@@ -2398,12 +2421,37 @@ static int _mmc_resume(struct mmc_host *host)
 	return err;
 }
 
+static int _mmc_deferred_resume(struct mmc_host *host)
+{
+	int err = 0;
+
+	if (!mmc_card_suspended(host->card)) {
+		mmc_release_host(host);
+		goto out;
+	}
+
+	mmc_log_string(host, "Enter\n");
+	mmc_power_up(host, host->card->ocr);
+	err = mmc_init_card(host, host->card->ocr, host->card);
+	mmc_card_clr_suspended(host->card);
+
+	mmc_log_string(host, "Exit err %d\n", err);
+
+	err = mmc_resume_clk_scaling(host);
+	if (err)
+		pr_err("%s: %s: fail to resume clock scaling (%d)\n",
+			mmc_hostname(host), __func__, err);
+out:
+	return err;
+}
+
 /*
  * Shutdown callback
  */
 static int mmc_shutdown(struct mmc_host *host)
 {
 	int err = 0;
+	struct mmc_card *card = host->card;
 
 	/*
 	 * In a specific case for poweroff notify, we need to resume the card
@@ -2420,8 +2468,10 @@ static int mmc_shutdown(struct mmc_host *host)
 	if (host->caps2 & MMC_CAP2_CLK_SCALE)
 		mmc_exit_clk_scaling(host);
 
-	if (!err)
-		err = _mmc_suspend(host, false);
+	/* send power off notification */
+	if (mmc_card_mmc(card))
+		mmc_send_pon(card);
+
 	mmc_log_string(host, "done err %d\n", err);
 	return err;
 }
@@ -2431,9 +2481,33 @@ static int mmc_shutdown(struct mmc_host *host)
  */
 static int mmc_resume(struct mmc_host *host)
 {
+	int err = 0;
+
+	err = _mmc_resume(host);
+	pm_runtime_set_active(&host->card->dev);
+	pm_runtime_mark_last_busy(&host->card->dev);
 	pm_runtime_enable(&host->card->dev);
+
 	mmc_log_string(host, "Done\n");
-	return 0;
+
+	return err;
+}
+
+/*
+ * Callback for deferred resume.
+ */
+static int mmc_deferred_resume(struct mmc_host *host)
+{
+	int err = 0;
+
+	err = _mmc_deferred_resume(host);
+	pm_runtime_set_active(&host->card->dev);
+	pm_runtime_mark_last_busy(&host->card->dev);
+	pm_runtime_enable(&host->card->dev);
+
+	mmc_log_string(host, "Done\n");
+
+	return err;
 }
 
 /*
@@ -2518,6 +2592,7 @@ static const struct mmc_bus_ops mmc_ops = {
 	.detect = mmc_detect,
 	.suspend = mmc_suspend,
 	.resume = mmc_resume,
+	.deferred_resume = mmc_deferred_resume,
 	.runtime_suspend = mmc_runtime_suspend,
 	.runtime_resume = mmc_runtime_resume,
 	.alive = mmc_alive,
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 873b2aa..87aead4 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -819,7 +819,7 @@ static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
 
 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
 	if (err) {
-		pr_warn("%s: error %d interrupting operation. "
+		pr_debug("%s: error %d interrupting operation. "
 			"HPI command response %#x\n", mmc_hostname(card->host),
 			err, cmd.resp[0]);
 		return err;
@@ -875,8 +875,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
 	}
 
 	err = mmc_send_hpi_cmd(card, &status);
-	if (err)
-		goto out;
 
 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
 	do {
@@ -884,8 +882,13 @@ int mmc_interrupt_hpi(struct mmc_card *card)
 
 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
 			break;
-		if (time_after(jiffies, prg_wait))
-			err = -ETIMEDOUT;
+		if (time_after(jiffies, prg_wait)) {
+			err = mmc_send_status(card, &status);
+			if (!err && R1_CURRENT_STATE(status) != R1_STATE_TRAN)
+				err = -ETIMEDOUT;
+			else
+				break;
+		}
 	} while (!err);
 
 out:
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index ef18dae..4fd9ebf 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -3114,7 +3114,8 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
 	}
 
 #ifdef CONFIG_HIGHMEM
-	__free_pages(test->highmem, BUFFER_ORDER);
+	if (test->highmem)
+		__free_pages(test->highmem, BUFFER_ORDER);
 #endif
 	kfree(test->buffer);
 	kfree(test);
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index efb8a79..154f420 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -30,19 +30,14 @@ struct mmc_pwrseq_emmc {
 
 #define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
 
-static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
-{
-	gpiod_set_value(pwrseq->reset_gpio, 1);
-	udelay(1);
-	gpiod_set_value(pwrseq->reset_gpio, 0);
-	udelay(200);
-}
-
 static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
 {
 	struct mmc_pwrseq_emmc *pwrseq =  to_pwrseq_emmc(host->pwrseq);
 
-	__mmc_pwrseq_emmc_reset(pwrseq);
+	gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
+	udelay(1);
+	gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
+	udelay(200);
 }
 
 static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
@@ -50,8 +45,11 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
 {
 	struct mmc_pwrseq_emmc *pwrseq = container_of(this,
 					struct mmc_pwrseq_emmc, reset_nb);
+	gpiod_set_value(pwrseq->reset_gpio, 1);
+	udelay(1);
+	gpiod_set_value(pwrseq->reset_gpio, 0);
+	udelay(200);
 
-	__mmc_pwrseq_emmc_reset(pwrseq);
 	return NOTIFY_DONE;
 }
 
@@ -72,14 +70,18 @@ static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
 	if (IS_ERR(pwrseq->reset_gpio))
 		return PTR_ERR(pwrseq->reset_gpio);
 
-	/*
-	 * register reset handler to ensure emmc reset also from
-	 * emergency_reboot(), priority 255 is the highest priority
-	 * so it will be executed before any system reboot handler.
-	 */
-	pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
-	pwrseq->reset_nb.priority = 255;
-	register_restart_handler(&pwrseq->reset_nb);
+	if (!gpiod_cansleep(pwrseq->reset_gpio)) {
+		/*
+		 * register reset handler to ensure emmc reset also from
+		 * emergency_reboot(), priority 255 is the highest priority
+		 * so it will be executed before any system reboot handler.
+		 */
+		pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
+		pwrseq->reset_nb.priority = 255;
+		register_restart_handler(&pwrseq->reset_nb);
+	} else {
+		dev_notice(dev, "EMMC reset pin tied to a sleepy GPIO driver; reset on emergency-reboot disabled\n");
+	}
 
 	pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
 	pwrseq->pwrseq.dev = dev;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index cb036f5..13ed3ac 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -206,8 +206,11 @@ static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
 			      gfp_t gfp)
 {
 	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
-	struct mmc_card *card = mq->card;
-	struct mmc_host *host = card->host;
+	struct mmc_host *host;
+
+	if (!mq)
+		return -ENODEV;
+	host = mq->card->host;
 
 	mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
 	if (!mq_rq->sg)
@@ -500,7 +503,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
 	if (blk_queue_quiesced(q))
 		blk_mq_unquiesce_queue(q);
 
-	blk_cleanup_queue(q);
+	if (likely(!blk_queue_dead(q)))
+		blk_cleanup_queue(q);
 	blk_mq_free_tag_set(&mq->tag_set);
 
 	/*
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index de7ec75..0fe8455a 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -216,6 +216,14 @@ static int mmc_decode_scr(struct mmc_card *card)
 
 	if (scr->sda_spec3)
 		scr->cmds = UNSTUFF_BITS(resp, 32, 2);
+
+	/* SD Spec says: any SD Card shall set at least bits 0 and 2 */
+	if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) ||
+	    !(scr->bus_widths & SD_SCR_BUS_WIDTH_4)) {
+		pr_err("%s: invalid bus width\n", mmc_hostname(card->host));
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
@@ -1066,6 +1074,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
 		err = mmc_send_relative_addr(host, &card->rca);
 		if (err)
 			goto free_card;
+		host->card = card;
 	}
 
 	if (!oldcard) {
@@ -1170,12 +1179,13 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
 	card->clk_scaling_highest = mmc_sd_get_max_clock(card);
 	card->clk_scaling_lowest = host->f_min;
 
-	host->card = card;
 	return 0;
 
 free_card:
-	if (!oldcard)
+	if (!oldcard) {
+		host->card = NULL;
 		mmc_remove_card(card);
+	}
 
 	return err;
 }
@@ -1187,7 +1197,10 @@ static void mmc_sd_remove(struct mmc_host *host)
 {
 	mmc_exit_clk_scaling(host);
 	mmc_remove_card(host->card);
+
+	mmc_claim_host(host);
 	host->card = NULL;
+	mmc_release_host(host);
 }
 
 /*
@@ -1266,7 +1279,9 @@ static int mmc_sd_suspend(struct mmc_host *host)
 	if (!err) {
 		pm_runtime_disable(&host->card->dev);
 		pm_runtime_set_suspended(&host->card->dev);
-	}
+	/* if suspend fails, force mmc_detect_change during resume */
+	} else if (mmc_bus_manual_resume(host))
+		host->ignore_bus_resume_flags = true;
 
 	return err;
 }
@@ -1287,8 +1302,15 @@ static int _mmc_sd_resume(struct mmc_host *host)
 
 	mmc_power_up(host, host->card->ocr);
 	err = mmc_sd_init_card(host, host->card->ocr, host->card);
+	if (err == -ENOENT) {
+		pr_debug("%s: %s: found a different card(%d), do detect change\n",
+			mmc_hostname(host), __func__, err);
+		mmc_card_set_removed(host->card);
+		mmc_detect_change(host, msecs_to_jiffies(200));
+	} else if (err) {
+		goto out;
+	}
 	mmc_card_clr_suspended(host->card);
-
 	err = mmc_resume_clk_scaling(host);
 	if (err) {
 		pr_err("%s: %s: fail to resume clock scaling (%d)\n",
@@ -1301,18 +1323,71 @@ static int _mmc_sd_resume(struct mmc_host *host)
 	return err;
 }
 
+static int _mmc_sd_deferred_resume(struct mmc_host *host)
+{
+	int err = 0;
+
+	mmc_log_string(host, "Enter\n");
+
+	if (!mmc_card_suspended(host->card))
+		goto out;
+
+	mmc_power_up(host, host->card->ocr);
+	err = mmc_sd_init_card(host, host->card->ocr, host->card);
+	if (err == -ENOENT) {
+		pr_debug("%s: %s: found a different card(%d), do detect change\n",
+			mmc_hostname(host), __func__, err);
+		mmc_card_set_removed(host->card);
+		mmc_detect_change(host, msecs_to_jiffies(200));
+	} else if (err) {
+		goto out;
+	}
+	mmc_card_clr_suspended(host->card);
+	err = mmc_resume_clk_scaling(host);
+	if (err) {
+		pr_err("%s: %s: fail to resume clock scaling (%d)\n",
+			mmc_hostname(host), __func__, err);
+		goto out;
+	}
+out:
+	mmc_log_string(host, "Exit err: %d\n", err);
+	return err;
+}
+
 /*
  * Callback for resume
  */
 static int mmc_sd_resume(struct mmc_host *host)
 {
+	int err = 0;
+
+	err = _mmc_sd_resume(host);
+	pm_runtime_set_active(&host->card->dev);
+	pm_runtime_mark_last_busy(&host->card->dev);
 	pm_runtime_enable(&host->card->dev);
 	mmc_log_string(host, "done\n");
 
-	return 0;
+	return err;
 }
 
 /*
+ * Callback for deferred resume
+ */
+static int mmc_sd_deferred_resume(struct mmc_host *host)
+{
+	int err = 0;
+
+	err = _mmc_sd_deferred_resume(host);
+	pm_runtime_set_active(&host->card->dev);
+	pm_runtime_mark_last_busy(&host->card->dev);
+	pm_runtime_enable(&host->card->dev);
+	mmc_log_string(host, "done\n");
+
+	return err;
+}
+
+
+/*
  * Callback for runtime_suspend.
  */
 static int mmc_sd_runtime_suspend(struct mmc_host *host)
@@ -1358,8 +1433,8 @@ static const struct mmc_bus_ops mmc_sd_ops = {
 	.runtime_resume = mmc_sd_runtime_resume,
 	.suspend = mmc_sd_suspend,
 	.resume = mmc_sd_resume,
+	.deferred_resume = mmc_sd_deferred_resume,
 	.alive = mmc_sd_alive,
-	.shutdown = mmc_sd_suspend,
 	.hw_reset = mmc_sd_hw_reset,
 	.change_bus_speed = mmc_sd_change_bus_speed,
 	.change_bus_speed_deferred = mmc_sd_change_bus_speed_deferred,
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 67f6bd2..ea254d00 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -819,6 +819,10 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
 	}
 
 	status = spi_sync_locked(spi, &host->m);
+	if (status < 0) {
+		dev_dbg(&spi->dev, "read error %d\n", status);
+		return status;
+	}
 
 	if (host->dma_dev) {
 		dma_sync_single_for_cpu(host->dma_dev,
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index c61109f..57c1ec3 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -247,7 +247,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
 static bool sdhci_acpi_byt(void)
 {
 	static const struct x86_cpu_id byt[] = {
-		{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+		{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
 		{}
 	};
 
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 8dae12b..629860f 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -429,7 +429,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
 				val = readl(host->ioaddr + ESDHC_MIX_CTRL);
 			else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
 				/* the std tuning bits is in ACMD12_ERR for imx6sl */
-				val = readl(host->ioaddr + SDHCI_ACMD12_ERR);
+				val = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
 		}
 
 		if (val & ESDHC_MIX_CTRL_EXE_TUNE)
@@ -494,7 +494,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
 			}
 			writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
 		} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
-			u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
+			u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
 			u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
 			if (val & SDHCI_CTRL_TUNED_CLK) {
 				v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
@@ -512,7 +512,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
 				v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
 			}
 
-			writel(v, host->ioaddr + SDHCI_ACMD12_ERR);
+			writel(v, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
 			writel(m, host->ioaddr + ESDHC_MIX_CTRL);
 		}
 		return;
@@ -957,9 +957,9 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
 			writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL);
 			writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
 		} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
-			ctrl = readl(host->ioaddr + SDHCI_ACMD12_ERR);
+			ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
 			ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
-			writel(ctrl, host->ioaddr + SDHCI_ACMD12_ERR);
+			writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
 		}
 	}
 }
@@ -1319,7 +1319,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
 
 		/* clear tuning bits in case ROM has set it already */
 		writel(0x0, host->ioaddr + ESDHC_MIX_CTRL);
-		writel(0x0, host->ioaddr + SDHCI_ACMD12_ERR);
+		writel(0x0, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
 		writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
 	}
 
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 94eeed2..f903ab9 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -185,7 +185,8 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
 };
 
 static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
-	.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+	.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+		  SDHCI_QUIRK_NO_HISPD_BIT,
 	.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
 	.ops = &sdhci_iproc_32only_ops,
 };
@@ -208,7 +209,8 @@ static const struct sdhci_iproc_data iproc_cygnus_data = {
 
 static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
 	.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
-		  SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+		  SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 |
+		  SDHCI_QUIRK_NO_HISPD_BIT,
 	.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
 	.ops = &sdhci_iproc_ops,
 };
diff --git a/drivers/mmc/host/sdhci-msm-ice.c b/drivers/mmc/host/sdhci-msm-ice.c
index 13611e6..3bbb5b3 100644
--- a/drivers/mmc/host/sdhci-msm-ice.c
+++ b/drivers/mmc/host/sdhci-msm-ice.c
@@ -448,7 +448,8 @@ int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq)
 	req = mrq->req;
 	if (req) {
 		if (msm_host->ice.vops->config_end) {
-			err = msm_host->ice.vops->config_end(req);
+			err = msm_host->ice.vops->config_end(
+					msm_host->ice.pdev, req);
 			if (err) {
 				pr_err("%s: ice config end failed %d\n",
 						mmc_hostname(host->mmc), err);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 0ead396..b5f1d2c 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -167,6 +167,8 @@
 #define MAX_DRV_TYPES_SUPPORTED_HS200	4
 #define MSM_AUTOSUSPEND_DELAY_MS 100
 
+#define RCLK_TOGGLE 0x2
+
 struct sdhci_msm_offset {
 	u32 CORE_MCI_DATA_CNT;
 	u32 CORE_MCI_STATUS;
@@ -3300,7 +3302,7 @@ static void sdhci_msm_registers_save(struct sdhci_host *host)
 	msm_host->regs_restore.hc_2c_2e =
 		sdhci_readl(host, SDHCI_CLOCK_CONTROL);
 	msm_host->regs_restore.hc_3c_3e =
-		sdhci_readl(host, SDHCI_ACMD12_ERR);
+		sdhci_readl(host, SDHCI_AUTO_CMD_STATUS);
 	msm_host->regs_restore.vendor_pwrctl_ctl =
 		readl_relaxed(host->ioaddr +
 		msm_host_offset->CORE_PWRCTL_CTL);
@@ -3363,7 +3365,7 @@ static void sdhci_msm_registers_restore(struct sdhci_host *host)
 	sdhci_writel(host, msm_host->regs_restore.hc_2c_2e,
 			SDHCI_CLOCK_CONTROL);
 	sdhci_writel(host, msm_host->regs_restore.hc_3c_3e,
-			SDHCI_ACMD12_ERR);
+			SDHCI_AUTO_CMD_STATUS);
 	sdhci_writel(host, msm_host->regs_restore.hc_38_3a,
 			SDHCI_SIGNAL_ENABLE);
 	sdhci_writel(host, msm_host->regs_restore.hc_34_36,
@@ -3723,6 +3725,33 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
 					| CORE_HC_SELECT_IN_EN), host->ioaddr +
 					msm_host_offset->CORE_VENDOR_SPEC);
 		}
+		/*
+		 * After MCLK ugating, toggle the FIFO write clock to get
+		 * the FIFO pointers and flags to valid state.
+		 */
+		if (msm_host->tuning_done ||
+				(card && mmc_card_strobe(card) &&
+				msm_host->enhanced_strobe)) {
+			/*
+			 * set HC_REG_DLL_CONFIG_3[1] to select MCLK as
+			 * DLL input clock
+			 */
+			writel_relaxed(((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_3))
+				| RCLK_TOGGLE), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_3);
+			/* ensure above write as toggling same bit quickly */
+			wmb();
+			udelay(2);
+			/*
+			 * clear HC_REG_DLL_CONFIG_3[1] to select RCLK as
+			 * DLL input clock
+			 */
+			writel_relaxed(((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_3))
+				& ~RCLK_TOGGLE), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_3);
+		}
 		if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
 			/*
 			 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index a40bcc2..7fdac27 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -814,7 +814,10 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
 		host->mmc_host_ops.start_signal_voltage_switch =
 					sdhci_arasan_voltage_switch;
 		sdhci_arasan->has_cqe = true;
-		host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+		host->mmc->caps2 |= MMC_CAP2_CQE;
+
+		if (!of_property_read_bool(np, "disable-cqe-dcmd"))
+			host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
 	}
 
 	ret = sdhci_arasan_add_host(sdhci_arasan);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index a7bf851..e5c598ae 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -643,6 +643,9 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 
+	if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
+		mdelay(5);
+
 	if (mask & SDHCI_RESET_ALL) {
 		val = sdhci_readl(host, ESDHC_TBCTL);
 		val &= ~ESDHC_TB_EN;
@@ -917,6 +920,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
 	if (esdhc->vendor_ver > VENDOR_V_22)
 		host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
 
+	if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
+		host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
+		host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+	}
+
 	if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
 	    of_device_is_compatible(np, "fsl,p5020-esdhc") ||
 	    of_device_is_compatible(np, "fsl,p4080-esdhc") ||
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 2dc2678..e6974ac 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -115,11 +115,11 @@ void sdhci_dumpregs(struct sdhci_host *host)
 	       sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
 	       sdhci_readl(host, SDHCI_INT_STATUS));
 	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
-	       sdhci_readl(host, SDHCI_INT_ENABLE),
-	       sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
-	SDHCI_DUMP("AC12 err:  0x%08x | Slot int: 0x%08x\n",
-		   sdhci_readw(host, SDHCI_ACMD12_ERR),
-	       sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
+		   sdhci_readl(host, SDHCI_INT_ENABLE),
+		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
+	SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
+		   sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
+		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
 	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
 	       sdhci_readl(host, SDHCI_CAPABILITIES),
 	       sdhci_readl(host, SDHCI_CAPABILITIES_1));
@@ -327,7 +327,7 @@ static void sdhci_set_default_irqs(struct sdhci_host *host)
 		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
 		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
 		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
-		    SDHCI_INT_RESPONSE | SDHCI_INT_ACMD12ERR;
+		    SDHCI_INT_RESPONSE | SDHCI_INT_AUTO_CMD_ERR;
 
 	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
 	    host->tuning_mode == SDHCI_TUNING_MODE_3)
@@ -946,6 +946,11 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
 	else
 		host->ier = (host->ier & ~dma_irqs) | pio_irqs;
 
+	if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
+		host->ier |= SDHCI_INT_AUTO_CMD_ERR;
+	else
+		host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
+
 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 }
@@ -1218,8 +1223,7 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
 	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
 		((mrq->cmd && mrq->cmd->error) ||
 		 (mrq->sbc && mrq->sbc->error) ||
-		 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
-				(mrq->data->stop && mrq->data->stop->error))) ||
+		 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
 		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
 }
 
@@ -1273,6 +1277,16 @@ static void sdhci_finish_data(struct sdhci_host *host)
 
 	mmc_log_string(host->mmc, "PRESENT_STATE=0x%08x\n",
 		sdhci_readl(host, SDHCI_PRESENT_STATE));
+	/*
+	 * The controller needs a reset of internal state machines upon error
+	 * conditions.
+	 */
+	if (data->error) {
+		if (!host->cmd || host->cmd == data_cmd)
+			sdhci_do_reset(host, SDHCI_RESET_CMD);
+		sdhci_do_reset(host, SDHCI_RESET_DATA);
+	}
+
 	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
 	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
 		sdhci_adma_table_post(host, data);
@@ -1297,17 +1311,6 @@ static void sdhci_finish_data(struct sdhci_host *host)
 	if (data->stop &&
 	    (data->error ||
 	     !data->mrq->sbc)) {
-
-		/*
-		 * The controller needs a reset of internal state machines
-		 * upon error conditions.
-		 */
-		if (data->error) {
-			if (!host->cmd || host->cmd == data_cmd)
-				sdhci_do_reset(host, SDHCI_RESET_CMD);
-			sdhci_do_reset(host, SDHCI_RESET_DATA);
-		}
-
 		/*
 		 * 'cap_cmd_during_tfr' request must not use the command line
 		 * after mmc_command_done() has been called. It is upper layer's
@@ -2789,9 +2792,15 @@ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
 	}
 
 	if (host->ops->platform_execute_tuning) {
+		/*
+		 * Make sure re-tuning won't get triggered for the CRC errors
+		 * occurred while executing tuning
+		 */
+		mmc_retune_disable(mmc);
 		err = host->ops->platform_execute_tuning(host, opcode);
-			goto out;
-		}
+		mmc_retune_enable(mmc);
+		goto out;
+	}
 
 	host->mmc->retune_period = tuning_count;
 
@@ -3171,9 +3180,25 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
  *                                                                           *
 \*****************************************************************************/
 
-static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
+static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
 {
 	u16 auto_cmd_status;
+
+	/* Handle auto-CMD12 error */
+	if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
+		struct mmc_request *mrq = host->data_cmd->mrq;
+		u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
+		int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
+				   SDHCI_INT_DATA_TIMEOUT :
+				   SDHCI_INT_DATA_CRC;
+
+		/* Treat auto-CMD12 error the same as data error */
+		if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
+			*intmask_p |= data_err_bit;
+			return;
+		}
+	}
+
 	if (!host->cmd) {
 		/*
 		 * SDHCI recovers from errors by resetting the cmd and data
@@ -3196,7 +3221,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
 
 	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
 		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX |
-		       SDHCI_INT_ACMD12ERR)) {
+		       SDHCI_INT_AUTO_CMD_ERR)) {
 		if (intmask & SDHCI_INT_TIMEOUT) {
 			host->cmd->error = -ETIMEDOUT;
 			host->mmc->err_stats[MMC_ERR_CMD_TIMEOUT]++;
@@ -3205,31 +3230,23 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
 			host->mmc->err_stats[MMC_ERR_CMD_CRC]++;
 		}
 
-		if (intmask & SDHCI_INT_ACMD12ERR) {
+		if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
 			auto_cmd_status = host->auto_cmd_err_sts;
 			host->mmc->err_stats[MMC_ERR_AUTO_CMD]++;
 			pr_err_ratelimited("%s: %s: AUTO CMD err sts 0x%08x\n",
 				mmc_hostname(host->mmc), __func__,
 					auto_cmd_status);
 			if (auto_cmd_status & (SDHCI_AUTO_CMD12_NOT_EXEC |
-					       SDHCI_AUTO_CMD_INDEX_ERR |
-					       SDHCI_AUTO_CMD_ENDBIT_ERR))
+					       SDHCI_AUTO_CMD_INDEX |
+					       SDHCI_AUTO_CMD_END_BIT))
 				host->cmd->error = -EIO;
-			else if (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT_ERR)
+			else if (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT)
 				host->cmd->error = -ETIMEDOUT;
-			else if (auto_cmd_status & SDHCI_AUTO_CMD_CRC_ERR)
+			else if (auto_cmd_status & SDHCI_AUTO_CMD_CRC)
 				host->cmd->error = -EILSEQ;
 		}
 
-		/*
-		 * If this command initiates a data phase and a response
-		 * CRC error is signalled, the card can start transferring
-		 * data - the card may have received the command without
-		 * error.  We must not terminate the mmc_request early.
-		 *
-		 * If the card did not receive the command or returned an
-		 * error which prevented it sending data, the data phase
-		 * will time out.
+		/* Treat data command CRC error the same as data CRC error
 		 *
 		 * Even in case of cmd INDEX OR ENDBIT error we
 		 * handle it the same way.
@@ -3238,6 +3255,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
 		    (((intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
 		     SDHCI_INT_CRC) || (host->cmd->error == -EILSEQ))) {
 			host->cmd = NULL;
+			*intmask_p |= SDHCI_INT_DATA_CRC;
 			return;
 		}
 
@@ -3245,6 +3263,21 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
 		return;
 	}
 
+	/* Handle auto-CMD23 error */
+	if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
+		struct mmc_request *mrq = host->cmd->mrq;
+		u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
+		int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
+			  -ETIMEDOUT :
+			  -EILSEQ;
+
+		if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
+			mrq->sbc->error = err;
+			sdhci_finish_mrq(host, mrq);
+			return;
+		}
+	}
+
 	if (intmask & SDHCI_INT_RESPONSE)
 		sdhci_finish_command(host);
 }
@@ -3305,13 +3338,6 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
 		 * above in sdhci_cmd_irq().
 		 */
 		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
-			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
-				host->data_cmd = NULL;
-				data_cmd->error = -ETIMEDOUT;
-				host->mmc->err_stats[MMC_ERR_CMD_TIMEOUT]++;
-				sdhci_finish_mrq(host, data_cmd->mrq);
-				return;
-			}
 			if (intmask & SDHCI_INT_DATA_END) {
 				host->data_cmd = NULL;
 				/*
@@ -3507,9 +3533,9 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
 		}
 		mmc_log_string(host->mmc, "intmask: 0x%x\n", intmask);
 
-		if (intmask & SDHCI_INT_ACMD12ERR)
+		if (intmask & SDHCI_INT_AUTO_CMD_ERR)
 			host->auto_cmd_err_sts = sdhci_readw(host,
-			SDHCI_ACMD12_ERR);
+			SDHCI_AUTO_CMD_STATUS);
 
 		/* Clear selected interrupts. */
 		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
@@ -3550,7 +3576,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
 			if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
 			    (host->clock <= 400000))
 				udelay(40);
-			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
 		}
 
 		if (intmask & SDHCI_INT_DATA_MASK) {
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 3bc5431..708e201 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -144,7 +144,7 @@
 #define  SDHCI_INT_DATA_CRC	0x00200000
 #define  SDHCI_INT_DATA_END_BIT	0x00400000
 #define  SDHCI_INT_BUS_POWER	0x00800000
-#define  SDHCI_INT_ACMD12ERR	0x01000000
+#define  SDHCI_INT_AUTO_CMD_ERR	0x01000000
 #define  SDHCI_INT_ADMA_ERROR	0x02000000
 
 #define  SDHCI_INT_NORMAL_MASK	0x00007FFF
@@ -152,8 +152,7 @@
 
 #define  SDHCI_INT_CMD_MASK	(SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
 		SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX | \
-				SDHCI_INT_ACMD12ERR)
-
+		SDHCI_INT_AUTO_CMD_ERR)
 #define  SDHCI_INT_DATA_MASK	(SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
 		SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
 		SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
@@ -170,12 +169,13 @@
 
 #define SDHCI_CQE_INT_MASK (SDHCI_CQE_INT_ERR_MASK | SDHCI_INT_CQE)
 
-#define SDHCI_ACMD12_ERR		0x3C
+#define SDHCI_AUTO_CMD_STATUS	0x3C
+
 #define SDHCI_AUTO_CMD12_NOT_EXEC	0x0001
-#define SDHCI_AUTO_CMD_TIMEOUT_ERR	0x0002
-#define SDHCI_AUTO_CMD_CRC_ERR		0x0004
-#define SDHCI_AUTO_CMD_ENDBIT_ERR	0x0008
-#define SDHCI_AUTO_CMD_INDEX_ERR	0x0010
+#define  SDHCI_AUTO_CMD_TIMEOUT	0x00000002
+#define  SDHCI_AUTO_CMD_CRC	0x00000004
+#define  SDHCI_AUTO_CMD_END_BIT	0x00000008
+#define  SDHCI_AUTO_CMD_INDEX	0x00000010
 #define SDHCI_AUTO_CMD12_NOT_ISSUED	0x0080
 
 #define SDHCI_HOST_CONTROL2		0x3E
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index af0a220..d60cbf2 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -632,6 +632,10 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
 	while (len > 0) {
 		block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
 
+		/* Read cannot cross 4K boundary */
+		block_size = min_t(loff_t, from + block_size,
+				   round_up(from + 1, SZ_4K)) - from;
+
 		writel(from, ispi->base + FADDR);
 
 		val = readl(ispi->base + HSFSTS_CTL);
@@ -685,6 +689,10 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
 	while (len > 0) {
 		block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
 
+		/* Write cannot cross 4K boundary */
+		block_size = min_t(loff_t, to + block_size,
+				   round_up(to + 1, SZ_4K)) - to;
+
 		writel(to, ispi->base + FADDR);
 
 		val = readl(ispi->base + HSFSTS_CTL);
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 21cde7e..0d3ba05 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -40,7 +40,7 @@
 obj-$(CONFIG_DEV_APPLETALK) += appletalk/
 obj-$(CONFIG_CAIF) += caif/
 obj-$(CONFIG_CAN) += can/
-obj-$(CONFIG_NET_DSA) += dsa/
+obj-y += dsa/
 obj-$(CONFIG_ETHERNET) += ethernet/
 obj-$(CONFIG_FDDI) += fddi/
 obj-$(CONFIG_HIPPI) += hippi/
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b2c42ca..091b454 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3198,8 +3198,12 @@ static int bond_netdev_event(struct notifier_block *this,
 		return NOTIFY_DONE;
 
 	if (event_dev->flags & IFF_MASTER) {
+		int ret;
+
 		netdev_dbg(event_dev, "IFF_MASTER\n");
-		return bond_master_netdev_event(event, event_dev);
+		ret = bond_master_netdev_event(event, event_dev);
+		if (ret != NOTIFY_DONE)
+			return ret;
 	}
 
 	if (event_dev->flags & IFF_SLAVE) {
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 4d5d01c..80867bd 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1098,13 +1098,6 @@ static int bond_option_arp_validate_set(struct bonding *bond,
 {
 	netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n",
 		   newval->string, newval->value);
-
-	if (bond->dev->flags & IFF_UP) {
-		if (!newval->value)
-			bond->recv_probe = NULL;
-		else if (bond->params.arp_interval)
-			bond->recv_probe = bond_arp_rcv;
-	}
 	bond->params.arp_validate = newval->value;
 
 	return 0;
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2f120b2..4985268 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
 
 static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
 {
-	return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+	return sprintf(buf, "%*phC\n",
+		       slave->dev->addr_len,
+		       slave->perm_hwaddr);
 }
 static SLAVE_ATTR_RO(perm_hwaddr);
 
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 47c5f27..21db180 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -742,6 +742,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
 	     fs->m_ext.data[1]))
 		return -EINVAL;
 
+	if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
+		return -EINVAL;
+
 	if (fs->location != RX_CLS_LOC_ANY &&
 	    test_bit(fs->location, priv->cfp.used))
 		return -EBUSY;
@@ -836,6 +839,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
 	u32 next_loc = 0;
 	int ret;
 
+	if (loc >= CFP_NUM_RULES)
+		return -EINVAL;
+
 	/* Refuse deleting unused rules, and those that are not unique since
 	 * that could leave IPv6 rules with one of the chained rule in the
 	 * table.
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index dabe899..2caa5c0 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -4821,6 +4821,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
 	if (err)
 		goto out;
 
+	mv88e6xxx_ports_cmode_init(chip);
 	mv88e6xxx_phy_init(chip);
 
 	if (chip->info->ops->get_eeprom) {
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 7fffce7..fdeddbf 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -379,18 +379,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
 		return 0;
 
 	lane = mv88e6390x_serdes_get_lane(chip, port);
-	if (lane < 0)
+	if (lane < 0 && lane != -ENODEV)
 		return lane;
 
-	if (chip->ports[port].serdes_irq) {
-		err = mv88e6390_serdes_irq_disable(chip, port, lane);
+	if (lane >= 0) {
+		if (chip->ports[port].serdes_irq) {
+			err = mv88e6390_serdes_irq_disable(chip, port, lane);
+			if (err)
+				return err;
+		}
+
+		err = mv88e6390x_serdes_power(chip, port, false);
 		if (err)
 			return err;
 	}
 
-	err = mv88e6390x_serdes_power(chip, port, false);
-	if (err)
-		return err;
+	chip->ports[port].cmode = 0;
 
 	if (cmode) {
 		err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -404,6 +408,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
 		if (err)
 			return err;
 
+		chip->ports[port].cmode = cmode;
+
+		lane = mv88e6390x_serdes_get_lane(chip, port);
+		if (lane < 0)
+			return lane;
+
 		err = mv88e6390x_serdes_power(chip, port, true);
 		if (err)
 			return err;
@@ -415,8 +425,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
 		}
 	}
 
-	chip->ports[port].cmode = cmode;
-
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 1b5f591..b5d7281 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2223,7 +2223,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
 
 	host_info->os_type = ENA_ADMIN_OS_LINUX;
 	host_info->kernel_ver = LINUX_VERSION_CODE;
-	strncpy(host_info->kernel_ver_str, utsname()->version,
+	strlcpy(host_info->kernel_ver_str, utsname()->version,
 		sizeof(host_info->kernel_ver_str) - 1);
 	host_info->os_dist = 0;
 	strncpy(host_info->os_dist_str, utsname()->release,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 581ad0a..de46331 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1584,7 +1584,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 			netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
 			bnxt_sched_reset(bp, rxr);
 		}
-		goto next_rx;
+		goto next_rx_no_len;
 	}
 
 	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
@@ -1665,12 +1665,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 	rc = 1;
 
 next_rx:
-	rxr->rx_prod = NEXT_RX(prod);
-	rxr->rx_next_cons = NEXT_RX(cons);
-
 	cpr->rx_packets += 1;
 	cpr->rx_bytes += len;
 
+next_rx_no_len:
+	rxr->rx_prod = NEXT_RX(prod);
+	rxr->rx_next_cons = NEXT_RX(cons);
+
 next_rx_no_prod_no_len:
 	*raw_cons = tmp_raw_cons;
 
@@ -7441,8 +7442,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
 
 skip_uc:
 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+	if (rc && vnic->mc_list_count) {
+		netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
+			    rc);
+		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+		vnic->mc_list_count = 0;
+		rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+	}
 	if (rc)
-		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
 			   rc);
 
 	return rc;
@@ -9077,6 +9085,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	bnxt_clear_int_mode(bp);
 
 init_err_pci_clean:
+	bnxt_free_hwrm_short_cmd_req(bp);
 	bnxt_free_hwrm_resources(bp);
 	bnxt_cleanup_pci(bp);
 
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 8abea1c..74eeb3a 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2419,12 +2419,12 @@ static int macb_open(struct net_device *dev)
 		return err;
 	}
 
-	bp->macbgem_ops.mog_init_rings(bp);
-	macb_init_hw(bp);
-
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
 		napi_enable(&queue->napi);
 
+	bp->macbgem_ops.mog_init_rings(bp);
+	macb_init_hw(bp);
+
 	/* schedule a link state check */
 	phy_start(dev->phydev);
 
@@ -3323,14 +3323,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
 		*hclk = devm_clk_get(&pdev->dev, "hclk");
 	}
 
-	if (IS_ERR(*pclk)) {
+	if (IS_ERR_OR_NULL(*pclk)) {
 		err = PTR_ERR(*pclk);
+		if (!err)
+			err = -ENODEV;
+
 		dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
 		return err;
 	}
 
-	if (IS_ERR(*hclk)) {
+	if (IS_ERR_OR_NULL(*hclk)) {
 		err = PTR_ERR(*hclk);
+		if (!err)
+			err = -ENODEV;
+
 		dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
 		return err;
 	}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 9800738..dca02b3 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -32,6 +32,13 @@
 #define DRV_NAME	"nicvf"
 #define DRV_VERSION	"1.0"
 
+/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
+ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
+ * this value, keeping headroom for the 14 byte Ethernet header and two
+ * VLAN tags (for QinQ)
+ */
+#define MAX_XDP_MTU	(1530 - ETH_HLEN - VLAN_HLEN * 2)
+
 /* Supported devices */
 static const struct pci_device_id nicvf_id_table[] = {
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
@@ -1547,6 +1554,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
 	struct nicvf *nic = netdev_priv(netdev);
 	int orig_mtu = netdev->mtu;
 
+	/* For now just support only the usual MTU sized frames,
+	 * plus some headroom for VLAN, QinQ.
+	 */
+	if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
+		netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+			    netdev->mtu);
+		return -EINVAL;
+	}
+
 	netdev->mtu = new_mtu;
 
 	if (!netif_running(netdev))
@@ -1795,8 +1811,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
 	bool bpf_attached = false;
 	int ret = 0;
 
-	/* For now just support only the usual MTU sized frames */
-	if (prog && (dev->mtu > 1500)) {
+	/* For now just support only the usual MTU sized frames,
+	 * plus some headroom for VLAN, QinQ.
+	 */
+	if (prog && dev->mtu > MAX_XDP_MTU) {
 		netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
 			    dev->mtu);
 		return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c2fd323..ea75f27 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -75,8 +75,8 @@ struct l2t_data {
 	struct l2t_entry *rover;	/* starting point for next allocation */
 	atomic_t nfree;		/* number of free entries */
 	rwlock_t lock;
-	struct l2t_entry l2tab[0];
 	struct rcu_head rcu_head;	/* to handle rcu cleanup */
+	struct l2t_entry l2tab[];
 };
 
 typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 961e3087..bb04c69 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6010,15 +6010,24 @@ static int __init cxgb4_init_module(void)
 
 	ret = pci_register_driver(&cxgb4_driver);
 	if (ret < 0)
-		debugfs_remove(cxgb4_debugfs_root);
+		goto err_pci;
 
 #if IS_ENABLED(CONFIG_IPV6)
 	if (!inet6addr_registered) {
-		register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
-		inet6addr_registered = true;
+		ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+		if (ret)
+			pci_unregister_driver(&cxgb4_driver);
+		else
+			inet6addr_registered = true;
 	}
 #endif
 
+	if (ret == 0)
+		return ret;
+
+err_pci:
+	debugfs_remove(cxgb4_debugfs_root);
+
 	return ret;
 }
 
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 74849be..e291900 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
 		ppmax = max;
 
 	/* pool size must be multiple of unsigned long */
-	bmap = BITS_TO_LONGS(ppmax);
+	bmap = ppmax / BITS_PER_TYPE(unsigned long);
+	if (!bmap)
+		return NULL;
+
 	ppmax = (bmap * sizeof(unsigned long)) << 3;
 
 	alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
 	if (reserve_factor) {
 		ppmax_pool = ppmax / reserve_factor;
 		pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+		if (!pool) {
+			ppmax_pool = 0;
+			reserve_factor = 0;
+		}
 
 		pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
 			 ndev->name, ppmax, ppmax_pool, pool_index_max);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 029730b..d7915cd 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1648,7 +1648,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 				 qm_sg_entry_get_len(&sgt[0]), dma_dir);
 
 		/* remaining pages were mapped with skb_frag_dma_map() */
-		for (i = 1; i < nr_frags; i++) {
+		for (i = 1; i <= nr_frags; i++) {
 			WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
 
 			dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 7b98bb7..ad41ace 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1850,13 +1850,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 	int ret;
 
 	if (enable) {
-		ret = clk_prepare_enable(fep->clk_ahb);
-		if (ret)
-			return ret;
-
 		ret = clk_prepare_enable(fep->clk_enet_out);
 		if (ret)
-			goto failed_clk_enet_out;
+			return ret;
 
 		if (fep->clk_ptp) {
 			mutex_lock(&fep->ptp_clk_mutex);
@@ -1876,7 +1872,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 
 		phy_reset_after_clk_enable(ndev->phydev);
 	} else {
-		clk_disable_unprepare(fep->clk_ahb);
 		clk_disable_unprepare(fep->clk_enet_out);
 		if (fep->clk_ptp) {
 			mutex_lock(&fep->ptp_clk_mutex);
@@ -1895,8 +1890,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 failed_clk_ptp:
 	if (fep->clk_enet_out)
 		clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
-		clk_disable_unprepare(fep->clk_ahb);
 
 	return ret;
 }
@@ -3485,6 +3478,9 @@ fec_probe(struct platform_device *pdev)
 	ret = clk_prepare_enable(fep->clk_ipg);
 	if (ret)
 		goto failed_clk_ipg;
+	ret = clk_prepare_enable(fep->clk_ahb);
+	if (ret)
+		goto failed_clk_ahb;
 
 	fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
 	if (!IS_ERR(fep->reg_phy)) {
@@ -3578,6 +3574,9 @@ fec_probe(struct platform_device *pdev)
 	pm_runtime_put(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 failed_regulator:
+	clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+	clk_disable_unprepare(fep->clk_ipg);
 failed_clk_ipg:
 	fec_enet_clk_enable(ndev, false);
 failed_clk:
@@ -3701,6 +3700,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
 	struct net_device *ndev = dev_get_drvdata(dev);
 	struct fec_enet_private *fep = netdev_priv(ndev);
 
+	clk_disable_unprepare(fep->clk_ahb);
 	clk_disable_unprepare(fep->clk_ipg);
 
 	return 0;
@@ -3710,8 +3710,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev)
 {
 	struct net_device *ndev = dev_get_drvdata(dev);
 	struct fec_enet_private *fep = netdev_priv(ndev);
+	int ret;
 
-	return clk_prepare_enable(fep->clk_ipg);
+	ret = clk_prepare_enable(fep->clk_ahb);
+	if (ret)
+		return ret;
+	ret = clk_prepare_enable(fep->clk_ipg);
+	if (ret)
+		goto failed_clk_ipg;
+
+	return 0;
+
+failed_clk_ipg:
+	clk_disable_unprepare(fep->clk_ahb);
+	return ret;
 }
 
 static const struct dev_pm_ops fec_pm_ops = {
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 0beee2c..722b6de 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -252,14 +252,12 @@ uec_set_ringparam(struct net_device *netdev,
 		return -EINVAL;
 	}
 
+	if (netif_running(netdev))
+		return -EBUSY;
+
 	ug_info->bdRingLenRx[queue] = ring->rx_pending;
 	ug_info->bdRingLenTx[queue] = ring->tx_pending;
 
-	if (netif_running(netdev)) {
-		/* FIXME: restart automatically */
-		netdev_info(netdev, "Please re-open the interface\n");
-	}
-
 	return ret;
 }
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 79d03f8..c7fa97a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -150,7 +150,6 @@ static int hnae_alloc_buffers(struct hnae_ring *ring)
 /* free desc along with its attached buffer */
 static void hnae_free_desc(struct hnae_ring *ring)
 {
-	hnae_free_buffers(ring);
 	dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
 			 ring->desc_num * sizeof(ring->desc[0]),
 			 ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
 /* fini ring, also free the buffer for the ring */
 static void hnae_fini_ring(struct hnae_ring *ring)
 {
+	if (is_rx_ring(ring))
+		hnae_free_buffers(ring);
+
 	hnae_free_desc(ring);
 	kfree(ring->desc_cb);
 	ring->desc_cb = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index b8155f5..fdff552 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2750,6 +2750,17 @@ int hns_dsaf_get_regs_count(void)
 	return DSAF_DUMP_REGS_NUM;
 }
 
+static int hns_dsaf_get_port_id(u8 port)
+{
+	if (port < DSAF_SERVICE_NW_NUM)
+		return port;
+
+	if (port >= DSAF_BASE_INNER_PORT_NUM)
+		return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+
+	return -EINVAL;
+}
+
 static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
 {
 	struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2815,23 +2826,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
 	memset(&temp_key, 0x0, sizeof(temp_key));
 	mask_entry.addr[0] = 0x01;
 	hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
-			     port, mask_entry.addr);
+			     0xf, mask_entry.addr);
 	tbl_tcam_mcast.tbl_mcast_item_vld = 1;
 	tbl_tcam_mcast.tbl_mcast_old_en = 0;
 
-	if (port < DSAF_SERVICE_NW_NUM) {
-		mskid = port;
-	} else if (port >= DSAF_BASE_INNER_PORT_NUM) {
-		mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
-	} else {
+	/* set MAC port to handle multicast */
+	mskid = hns_dsaf_get_port_id(port);
+	if (mskid == -EINVAL) {
 		dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
 			dsaf_dev->ae_dev.name, port,
 			mask_key.high.val, mask_key.low.val);
 		return;
 	}
-
 	dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
 		     mskid % 32, 1);
+
+	/* set pool bit map to handle multicast */
+	mskid = hns_dsaf_get_port_id(port_num);
+	if (mskid == -EINVAL) {
+		dev_err(dsaf_dev->dev,
+			"%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
+			dsaf_dev->ae_dev.name, port_num,
+			mask_key.high.val, mask_key.low.val);
+		return;
+	}
+	dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+		     mskid % 32, 1);
+
 	memcpy(&temp_key, &mask_key, sizeof(mask_key));
 	hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
 				   (struct dsaf_tbl_tcam_data *)(&mask_key),
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index ba43169..a60f207 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
 	dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
 	dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
 	dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
-	dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
+	dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
 }
 
 /**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index b043370..1c70f9a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -29,9 +29,6 @@
 
 #define SERVICE_TIMER_HZ (1 * HZ)
 
-#define NIC_TX_CLEAN_MAX_NUM 256
-#define NIC_RX_CLEAN_MAX_NUM 64
-
 #define RCB_IRQ_NOT_INITED 0
 #define RCB_IRQ_INITED 1
 #define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
 	wmb(); /* commit all data before submit */
 	assert(skb->queue_mapping < priv->ae_handle->q_num);
 	hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
-	ring->stats.tx_pkts++;
-	ring->stats.tx_bytes += skb->len;
 
 	return NETDEV_TX_OK;
 
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
 		/* issue prefetch for next Tx descriptor */
 		prefetch(&ring->desc_cb[ring->next_to_clean]);
 	}
+	/* update tx ring statistics. */
+	ring->stats.tx_pkts += pkts;
+	ring->stats.tx_bytes += bytes;
 
 	NETIF_TX_UNLOCK(ring);
 
@@ -1169,6 +1167,12 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
 	if (!h->phy_dev)
 		return 0;
 
+	phy_dev->supported &= h->if_support;
+	phy_dev->advertising = phy_dev->supported;
+
+	if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
+		phy_dev->autoneg = false;
+
 	if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
 		phy_dev->dev_flags = 0;
 
@@ -1180,15 +1184,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
 	if (unlikely(ret))
 		return -ENODEV;
 
-	phy_dev->supported &= h->if_support;
-	phy_dev->advertising = phy_dev->supported;
-
-	if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
-		phy_dev->autoneg = false;
-
-	if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
-		phy_stop(phy_dev);
-
 	return 0;
 }
 
@@ -2153,7 +2148,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
 			hns_nic_tx_fini_pro_v2;
 
 		netif_napi_add(priv->netdev, &rd->napi,
-			       hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+			       hns_nic_common_poll, NAPI_POLL_WEIGHT);
 		rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
 	}
 	for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2166,7 +2161,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
 			hns_nic_rx_fini_pro_v2;
 
 		netif_napi_add(priv->netdev, &rd->napi,
-			       hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+			       hns_nic_common_poll, NAPI_POLL_WEIGHT);
 		rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
 	}
 
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index cb8ddd0..d278fc7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -3,7 +3,7 @@
 # Makefile for the HISILICON network device drivers.
 #
 
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
 
 obj-$(CONFIG_HNS3_HCLGE) += hclge.o
 hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index fb93bbd..6193f8f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -3,7 +3,7 @@
 # Makefile for the HISILICON network device drivers.
 #
 
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
 
 obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
 hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
\ No newline at end of file
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 03f64f4..506f783 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3161,6 +3161,7 @@ static ssize_t ehea_probe_port(struct device *dev,
 
 	if (ehea_add_adapter_mr(adapter)) {
 		pr_err("creating MR failed\n");
+		of_node_put(eth_dn);
 		return -EIO;
 	}
 
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a475f36..426789e 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1859,7 +1859,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
 
 	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
 	    adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
-		netdev_notify_peers(netdev);
+		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
 
 	netif_carrier_on(netdev);
 
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 8b11682..8cd339c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7329,7 +7329,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
 
-	if (pci_dev_run_wake(pdev))
+	if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp)
 		pm_runtime_put_noidle(&pdev->dev);
 
 	return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 3f53654..78a43d6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
 	/* create driver workqueue */
 	fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
 					  fm10k_driver_name);
+	if (!fm10k_workqueue)
+		return -ENOMEM;
 
 	fm10k_dbg_init();
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f81ad0a..df8808c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2654,6 +2654,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
 	struct i40e_vsi_context ctxt;
 	i40e_status ret;
 
+	/* Don't modify stripping options if a port VLAN is active */
+	if (vsi->info.pvid)
+		return;
+
 	if ((vsi->info.valid_sections &
 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
@@ -2684,6 +2688,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
 	struct i40e_vsi_context ctxt;
 	i40e_status ret;
 
+	/* Don't modify stripping options if a port VLAN is active */
+	if (vsi->info.pvid)
+		return;
+
 	if ((vsi->info.valid_sections &
 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index c6d24ea..d86f3fa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2399,8 +2399,10 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 				      (u8 *)&stats, sizeof(stats));
 }
 
-/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
-#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program
+ * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
+ */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
 #define I40E_VC_MAX_VLAN_PER_VF 8
 
 /**
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 8a28f33..dca6715 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -194,6 +194,8 @@
 /* enable link status from external LINK_0 and LINK_1 pins */
 #define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
 #define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000  /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
 #define E1000_CTRL_SDP0_DIR 0x00400000  /* SDP0 Data direction */
 #define E1000_CTRL_SDP1_DIR 0x00800000  /* SDP1 Data direction */
 #define E1000_CTRL_RST      0x04000000  /* Global reset */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ffaa6e0..5aa083d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3468,6 +3468,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 			break;
 		}
 	}
+
+	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+
 	pm_runtime_put_noidle(&pdev->dev);
 	return 0;
 
@@ -8754,9 +8757,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl, rctl, status;
 	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-#ifdef CONFIG_PM
-	int retval = 0;
-#endif
+	bool wake;
 
 	rtnl_lock();
 	netif_device_detach(netdev);
@@ -8769,14 +8770,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
 	igb_clear_interrupt_scheme(adapter);
 	rtnl_unlock();
 
-#ifdef CONFIG_PM
-	if (!runtime) {
-		retval = pci_save_state(pdev);
-		if (retval)
-			return retval;
-	}
-#endif
-
 	status = rd32(E1000_STATUS);
 	if (status & E1000_STATUS_LU)
 		wufc &= ~E1000_WUFC_LNKC;
@@ -8793,10 +8786,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
 		}
 
 		ctrl = rd32(E1000_CTRL);
-		/* advertise wake from D3Cold */
-		#define E1000_CTRL_ADVD3WUC 0x00100000
-		/* phy power management enable */
-		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
 		ctrl |= E1000_CTRL_ADVD3WUC;
 		wr32(E1000_CTRL, ctrl);
 
@@ -8810,12 +8799,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
 		wr32(E1000_WUFC, 0);
 	}
 
-	*enable_wake = wufc || adapter->en_mng_pt;
-	if (!*enable_wake)
+	wake = wufc || adapter->en_mng_pt;
+	if (!wake)
 		igb_power_down_link(adapter);
 	else
 		igb_power_up_link(adapter);
 
+	if (enable_wake)
+		*enable_wake = wake;
+
 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
 	 * would have already happened in close and is redundant.
 	 */
@@ -8858,22 +8850,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
 
 static int __maybe_unused igb_suspend(struct device *dev)
 {
-	int retval;
-	bool wake;
-	struct pci_dev *pdev = to_pci_dev(dev);
-
-	retval = __igb_shutdown(pdev, &wake, 0);
-	if (retval)
-		return retval;
-
-	if (wake) {
-		pci_prepare_to_sleep(pdev);
-	} else {
-		pci_wake_from_d3(pdev, false);
-		pci_set_power_state(pdev, PCI_D3hot);
-	}
-
-	return 0;
+	return __igb_shutdown(to_pci_dev(dev), NULL, 0);
 }
 
 static int __maybe_unused igb_resume(struct device *dev)
@@ -8944,22 +8921,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
 
 static int __maybe_unused igb_runtime_suspend(struct device *dev)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
-	int retval;
-	bool wake;
-
-	retval = __igb_shutdown(pdev, &wake, 1);
-	if (retval)
-		return retval;
-
-	if (wake) {
-		pci_prepare_to_sleep(pdev);
-	} else {
-		pci_wake_from_d3(pdev, false);
-		pci_set_power_state(pdev, PCI_D3hot);
-	}
-
-	return 0;
+	return __igb_shutdown(to_pci_dev(dev), NULL, 1);
 }
 
 static int __maybe_unused igb_runtime_resume(struct device *dev)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 9988c89..9b10abb 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4272,7 +4272,7 @@ static void mvpp2_phylink_validate(struct net_device *dev,
 	case PHY_INTERFACE_MODE_RGMII_ID:
 	case PHY_INTERFACE_MODE_RGMII_RXID:
 	case PHY_INTERFACE_MODE_RGMII_TXID:
-		if (port->gop_id == 0)
+		if (port->priv->hw_version == MVPP22 && port->gop_id == 0)
 			goto empty_set;
 		break;
 	default:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index ffed2d4..9c48182 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1492,7 +1492,7 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
 	rule.port = port;
 	rule.qpn = qpn;
 	INIT_LIST_HEAD(&rule.list);
-	mlx4_err(dev, "going promisc on %x\n", port);
+	mlx4_info(dev, "going promisc on %x\n", port);
 
 	return  mlx4_flow_attach(dev, &rule, regid_p);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 37a5514..b7e3b89 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -8,6 +8,7 @@
 	depends on PCI
 	imply PTP_1588_CLOCK
 	imply VXLAN
+	imply MLXFW
 	default n
 	---help---
 	  Core driver for low level functionality of the ConnectX-4 and
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 4a33c9a..599114a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -33,6 +33,26 @@
 #include <linux/bpf_trace.h>
 #include "en/xdp.h"
 
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params)
+{
+	int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
+
+	/* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
+	 * The condition checked in mlx5e_rx_is_linear_skb is:
+	 *   SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE         (1)
+	 *   (Note that hw_mtu == sw_mtu + hard_mtu.)
+	 * What is returned from this function is:
+	 *   max_mtu = PAGE_SIZE - S - hr - hard_mtu                         (2)
+	 * After assigning sw_mtu := max_mtu, the left side of (1) turns to
+	 * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE,
+	 * because both PAGE_SIZE and S are already aligned. Any number greater
+	 * than max_mtu would make the left side of (1) greater than PAGE_SIZE,
+	 * so max_mtu is the maximum MTU allowed.
+	 */
+
+	return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr));
+}
+
 static inline bool
 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
 		    struct xdp_buff *xdp)
@@ -207,9 +227,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
 			sqcc++;
 
 			if (is_redirect) {
-				xdp_return_frame(xdpi->xdpf);
 				dma_unmap_single(sq->pdev, xdpi->dma_addr,
 						 xdpi->xdpf->len, DMA_TO_DEVICE);
+				xdp_return_frame(xdpi->xdpf);
 			} else {
 				/* Recycle RX page */
 				mlx5e_page_release(rq, &xdpi->di, true);
@@ -243,9 +263,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
 		sq->cc++;
 
 		if (is_redirect) {
-			xdp_return_frame(xdpi->xdpf);
 			dma_unmap_single(sq->pdev, xdpi->dma_addr,
 					 xdpi->xdpf->len, DMA_TO_DEVICE);
+			xdp_return_frame(xdpi->xdpf);
 		} else {
 			/* Recycle RX page */
 			mlx5e_page_release(rq, &xdpi->di, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 4d09662..827ceef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -34,12 +34,11 @@
 
 #include "en.h"
 
-#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
-				 MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
 #define MLX5E_XDP_TX_DS_COUNT \
 	((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
 
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params);
 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
 		      void *va, u16 *rx_headroom, u32 *len);
 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 5e54230..792bb8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1317,7 +1317,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
 		break;
 	case MLX5_MODULE_ID_SFP:
 		modinfo->type       = ETH_MODULE_SFF_8472;
-		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+		modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
 		break;
 	default:
 		netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
@@ -1609,6 +1609,22 @@ static int mlx5e_flash_device(struct net_device *dev,
 	return mlx5e_ethtool_flash_device(priv, flash);
 }
 
+#ifndef CONFIG_MLX5_EN_RXNFC
+/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS
+ * otherwise this function will be defined from en_fs_ethtool.c
+ */
+static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	if (info->cmd != ETHTOOL_GRXRINGS)
+		return -EOPNOTSUPP;
+	/* ring_count is needed by ethtool -x */
+	info->data = priv->channels.params.num_channels;
+	return 0;
+}
+#endif
+
 const struct ethtool_ops mlx5e_ethtool_ops = {
 	.get_drvinfo       = mlx5e_get_drvinfo,
 	.get_link          = ethtool_op_get_link,
@@ -1627,8 +1643,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
 	.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
 	.get_rxfh          = mlx5e_get_rxfh,
 	.set_rxfh          = mlx5e_set_rxfh,
-#ifdef CONFIG_MLX5_EN_RXNFC
 	.get_rxnfc         = mlx5e_get_rxnfc,
+#ifdef CONFIG_MLX5_EN_RXNFC
 	.set_rxnfc         = mlx5e_set_rxnfc,
 #endif
 	.flash_device      = mlx5e_flash_device,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 637d59c..b190c44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3761,7 +3761,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
 	if (params->xdp_prog &&
 	    !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
 		netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
-			   new_mtu, MLX5E_XDP_MAX_MTU);
+			   new_mtu, mlx5e_xdp_max_mtu(params));
 		err = -EINVAL;
 		goto out;
 	}
@@ -4227,7 +4227,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
 
 	if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
 		netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
-			    new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
+			    new_channels.params.sw_mtu,
+			    mlx5e_xdp_max_mtu(&new_channels.params));
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 26c9f94..55ccd90b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -80,8 +80,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
 	MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
-	if (vport)
-		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
 				     in, nic_vport_context);
 
@@ -109,8 +108,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
 	MLX5_SET(modify_esw_vport_context_in, in, opcode,
 		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
 	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
-	if (vport)
-		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+	MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
 	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index 8de64e8..22a2ef1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
 	return ret;
 }
 
-static void mlx5_fpga_tls_release_swid(struct idr *idr,
-				       spinlock_t *idr_spinlock, u32 swid)
+static void *mlx5_fpga_tls_release_swid(struct idr *idr,
+					spinlock_t *idr_spinlock, u32 swid)
 {
 	unsigned long flags;
+	void *ptr;
 
 	spin_lock_irqsave(idr_spinlock, flags);
-	idr_remove(idr, swid);
+	ptr = idr_remove(idr, swid);
 	spin_unlock_irqrestore(idr_spinlock, flags);
+	return ptr;
 }
 
 static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
 	kfree(buf);
 }
 
-struct mlx5_teardown_stream_context {
-	struct mlx5_fpga_tls_command_context cmd;
-	u32 swid;
-};
-
 static void
 mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
 				  struct mlx5_fpga_device *fdev,
 				  struct mlx5_fpga_tls_command_context *cmd,
 				  struct mlx5_fpga_dma_buf *resp)
 {
-	struct mlx5_teardown_stream_context *ctx =
-		    container_of(cmd, struct mlx5_teardown_stream_context, cmd);
-
 	if (resp) {
 		u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
 
@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
 			mlx5_fpga_err(fdev,
 				      "Teardown stream failed with syndrome = %d",
 				      syndrome);
-		else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
-			mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
-						   &fdev->tls->tx_idr_spinlock,
-						   ctx->swid);
-		else
-			mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
-						   &fdev->tls->rx_idr_spinlock,
-						   ctx->swid);
 	}
 	mlx5_fpga_tls_put_command_ctx(cmd);
 }
@@ -217,22 +203,22 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
 	void *cmd;
 	int ret;
 
-	rcu_read_lock();
-	flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
-	rcu_read_unlock();
-
-	if (!flow) {
-		WARN_ONCE(1, "Received NULL pointer for handle\n");
-		return -EINVAL;
-	}
-
 	buf = kzalloc(size, GFP_ATOMIC);
 	if (!buf)
 		return -ENOMEM;
 
 	cmd = (buf + 1);
 
+	rcu_read_lock();
+	flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
+	if (unlikely(!flow)) {
+		rcu_read_unlock();
+		WARN_ONCE(1, "Received NULL pointer for handle\n");
+		kfree(buf);
+		return -EINVAL;
+	}
 	mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+	rcu_read_unlock();
 
 	MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
 	MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
@@ -253,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
 static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
 					    void *flow, u32 swid, gfp_t flags)
 {
-	struct mlx5_teardown_stream_context *ctx;
+	struct mlx5_fpga_tls_command_context *ctx;
 	struct mlx5_fpga_dma_buf *buf;
 	void *cmd;
 
@@ -261,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
 	if (!ctx)
 		return;
 
-	buf = &ctx->cmd.buf;
+	buf = &ctx->buf;
 	cmd = (ctx + 1);
 	MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
 	MLX5_SET(tls_cmd, cmd, swid, swid);
@@ -272,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
 	buf->sg[0].data = cmd;
 	buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
 
-	ctx->swid = swid;
-	mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
+	mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
 			       mlx5_fpga_tls_teardown_completion);
 }
 
@@ -283,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
 	struct mlx5_fpga_tls *tls = mdev->fpga->tls;
 	void *flow;
 
-	rcu_read_lock();
 	if (direction_sx)
-		flow = idr_find(&tls->tx_idr, swid);
+		flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
+						  &tls->tx_idr_spinlock,
+						  swid);
 	else
-		flow = idr_find(&tls->rx_idr, swid);
-
-	rcu_read_unlock();
+		flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
+						  &tls->rx_idr_spinlock,
+						  swid);
 
 	if (!flow) {
 		mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
@@ -297,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
 		return;
 	}
 
+	synchronize_rcu(); /* before kfree(flow) */
 	mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 31a9cbd..09b6b1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -404,10 +404,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
 		size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
 
 	i2c_addr = MLX5_I2C_ADDR_LOW;
-	if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
-		i2c_addr = MLX5_I2C_ADDR_HIGH;
-		offset -= MLX5_EEPROM_PAGE_LENGTH;
-	}
 
 	MLX5_SET(mcia_reg, in, l, 0);
 	MLX5_SET(mcia_reg, in, module, module_num);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f7154f35..2e6df58 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
 	if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
 		return 0;
 
-	emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
+	emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
 	if (!emad_wq)
 		return -ENOMEM;
 	mlxsw_core->emad_wq = emad_wq;
@@ -1875,10 +1875,10 @@ static int __init mlxsw_core_module_init(void)
 {
 	int err;
 
-	mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
+	mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
 	if (!mlxsw_wq)
 		return -ENOMEM;
-	mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
+	mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
 					    mlxsw_core_driver_name);
 	if (!mlxsw_owq) {
 		err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 72cdaa0..1006185 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
 
 #define MLXSW_PCI_SW_RESET			0xF0010
 #define MLXSW_PCI_SW_RESET_RST_BIT		BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS	13000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS	20000
 #define MLXSW_PCI_SW_RESET_WAIT_MSECS		100
 #define MLXSW_PCI_FW_READY			0xA1844
 #define MLXSW_PCI_FW_READY_MASK			0xFFFF
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f9bef03..c5b82e2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2504,11 +2504,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
 	if (err)
 		return err;
 
+	mlxsw_sp_port->link.autoneg = autoneg;
+
 	if (!netif_running(dev))
 		return 0;
 
-	mlxsw_sp_port->link.autoneg = autoneg;
-
 	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
 	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
 
@@ -2783,7 +2783,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
 					    MLXSW_REG_QEEC_HIERARCY_TC,
 					    i + 8, i,
-					    false, 0);
+					    true, 100);
 		if (err)
 			return err;
 	}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index af673ab..a4f237f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1585,7 +1585,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
 	u16 fid_index;
 	int err = 0;
 
-	if (switchdev_trans_ph_prepare(trans))
+	if (switchdev_trans_ph_commit(trans))
 		return 0;
 
 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index bd6e901..b83b070 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -535,9 +535,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
 		/* set dma read address */
 		ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
 
-		/* start the packet dma process, and set auto-dequeue rx */
-		ks8851_wrreg16(ks, KS_RXQCR,
-			       ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
+		/* start DMA access */
+		ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
 
 		if (rxlen > 4) {
 			unsigned int rxalign;
@@ -568,7 +567,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
 			}
 		}
 
-		ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+		/* end DMA access and dequeue packet */
+		ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
 	}
 }
 
@@ -785,6 +785,15 @@ static void ks8851_tx_work(struct work_struct *work)
 static int ks8851_net_open(struct net_device *dev)
 {
 	struct ks8851_net *ks = netdev_priv(dev);
+	int ret;
+
+	ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
+				   IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+				   dev->name, ks);
+	if (ret < 0) {
+		netdev_err(dev, "failed to get irq\n");
+		return ret;
+	}
 
 	/* lock the card, even if we may not actually be doing anything
 	 * else at the moment */
@@ -849,6 +858,7 @@ static int ks8851_net_open(struct net_device *dev)
 	netif_dbg(ks, ifup, ks->netdev, "network device up\n");
 
 	mutex_unlock(&ks->lock);
+	mii_check_link(&ks->mii);
 	return 0;
 }
 
@@ -899,6 +909,8 @@ static int ks8851_net_stop(struct net_device *dev)
 		dev_kfree_skb(txb);
 	}
 
+	free_irq(dev->irq, ks);
+
 	return 0;
 }
 
@@ -1508,6 +1520,7 @@ static int ks8851_probe(struct spi_device *spi)
 
 	spi_set_drvdata(spi, ks);
 
+	netif_carrier_off(ks->netdev);
 	ndev->if_port = IF_PORT_100BASET;
 	ndev->netdev_ops = &ks8851_netdev_ops;
 	ndev->irq = spi->irq;
@@ -1529,14 +1542,6 @@ static int ks8851_probe(struct spi_device *spi)
 	ks8851_read_selftest(ks);
 	ks8851_init_mac(ks);
 
-	ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
-				   IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-				   ndev->name, ks);
-	if (ret < 0) {
-		dev_err(&spi->dev, "failed to get irq\n");
-		goto err_irq;
-	}
-
 	ret = register_netdev(ndev);
 	if (ret) {
 		dev_err(&spi->dev, "failed to register network device\n");
@@ -1549,14 +1554,10 @@ static int ks8851_probe(struct spi_device *spi)
 
 	return 0;
 
-
 err_netdev:
-	free_irq(ndev->irq, ks);
-
-err_irq:
+err_id:
 	if (gpio_is_valid(gpio))
 		gpio_set_value(gpio, 0);
-err_id:
 	regulator_disable(ks->vdd_reg);
 err_reg:
 	regulator_disable(ks->vdd_io);
@@ -1574,7 +1575,6 @@ static int ks8851_remove(struct spi_device *spi)
 		dev_info(&spi->dev, "remove\n");
 
 	unregister_netdev(priv->netdev);
-	free_irq(spi->irq, priv);
 	if (gpio_is_valid(priv->gpio))
 		gpio_set_value(priv->gpio, 0);
 	regulator_disable(priv->vdd_reg);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 0bdd3c4..1029119 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -605,7 +605,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port,
 			      struct netdev_hw_addr *hw_addr)
 {
 	struct ocelot *ocelot = port->ocelot;
-	struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL);
+	struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC);
 
 	if (!ha)
 		return -ENOMEM;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index bf4302e..28f7656 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2365,6 +2365,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
 				dma_object->addr))) {
 			vxge_os_dma_free(devh->pdev, memblock,
 				&dma_object->acc_handle);
+			memblock = NULL;
 			goto exit;
 		}
 
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 7a1e9cd..777b994 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -80,8 +80,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
 
 	tmp_push_vlan_tci =
 		FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
-		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
-		NFP_FL_PUSH_VLAN_CFI;
+		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action));
 	push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
 }
 
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 325954b..9b01832 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -55,7 +55,7 @@
 #define NFP_FLOWER_LAYER2_GENEVE_OP	BIT(6)
 
 #define NFP_FLOWER_MASK_VLAN_PRIO	GENMASK(15, 13)
-#define NFP_FLOWER_MASK_VLAN_CFI	BIT(12)
+#define NFP_FLOWER_MASK_VLAN_PRESENT	BIT(12)
 #define NFP_FLOWER_MASK_VLAN_VID	GENMASK(11, 0)
 
 #define NFP_FLOWER_MASK_MPLS_LB		GENMASK(31, 12)
@@ -109,7 +109,6 @@
 #define NFP_FL_OUT_FLAGS_TYPE_IDX	GENMASK(2, 0)
 
 #define NFP_FL_PUSH_VLAN_PRIO		GENMASK(15, 13)
-#define NFP_FL_PUSH_VLAN_CFI		BIT(12)
 #define NFP_FL_PUSH_VLAN_VID		GENMASK(11, 0)
 
 /* LAG ports */
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 17acb8c..b99d55cf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -56,14 +56,12 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
 						      FLOW_DISSECTOR_KEY_VLAN,
 						      target);
 		/* Populate the tci field. */
-		if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
-			tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
-					     flow_vlan->vlan_priority) |
-				  FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
-					     flow_vlan->vlan_id) |
-				  NFP_FLOWER_MASK_VLAN_CFI;
-			frame->tci = cpu_to_be16(tmp_tci);
-		}
+		tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+		tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+				      flow_vlan->vlan_priority) |
+			   FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+				      flow_vlan->vlan_id);
+		frame->tci = cpu_to_be16(tmp_tci);
 	}
 }
 
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 382bb93..ff5c741 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -194,6 +194,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
 		return;
 	}
 
+	rcu_read_lock();
 	for (i = 0; i < count; i++) {
 		ipv4_addr = payload->tun_info[i].ipv4;
 		port = be32_to_cpu(payload->tun_info[i].egress_port);
@@ -209,6 +210,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
 		neigh_event_send(n, NULL);
 		neigh_release(n);
 	}
+	rcu_read_unlock();
 }
 
 static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
@@ -404,9 +406,10 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
 
 	payload = nfp_flower_cmsg_get_data(skb);
 
+	rcu_read_lock();
 	netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
 	if (!netdev)
-		goto route_fail_warning;
+		goto fail_rcu_unlock;
 
 	flow.daddr = payload->ipv4_addr;
 	flow.flowi4_proto = IPPROTO_UDP;
@@ -416,21 +419,23 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
 	rt = ip_route_output_key(dev_net(netdev), &flow);
 	err = PTR_ERR_OR_ZERO(rt);
 	if (err)
-		goto route_fail_warning;
+		goto fail_rcu_unlock;
 #else
-	goto route_fail_warning;
+	goto fail_rcu_unlock;
 #endif
 
 	/* Get the neighbour entry for the lookup */
 	n = dst_neigh_lookup(&rt->dst, &flow.daddr);
 	ip_rt_put(rt);
 	if (!n)
-		goto route_fail_warning;
-	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
+		goto fail_rcu_unlock;
+	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
 	neigh_release(n);
+	rcu_read_unlock();
 	return;
 
-route_fail_warning:
+fail_rcu_unlock:
+	rcu_read_unlock();
 	nfp_flower_cmsg_warn(app, "Requested route not found.\n");
 }
 
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 013ff56..5e574c3 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
 
 	ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
 	if (IS_ERR(ptp->clock)) {
-		rc = -EINVAL;
 		DP_ERR(edev, "PTP clock registration failed\n");
+		qede_ptp_disable(edev);
+		rc = -EINVAL;
 		goto err2;
 	}
 
 	return 0;
 
-err2:
-	qede_ptp_disable(edev);
-	ptp->clock = NULL;
 err1:
 	kfree(ptp);
+err2:
 	edev->ptp = NULL;
 
 	return rc;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 3b0adda..a4cd6f2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1048,6 +1048,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
 
 	for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
 		skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
+		if (!skb)
+			break;
 		qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
 		skb_put(skb, QLCNIC_ILB_PKT_SIZE);
 		adapter->ahw->diag_cnt = 0;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/Makefile b/drivers/net/ethernet/qualcomm/rmnet/Makefile
index 442c980..ac76d0e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/Makefile
+++ b/drivers/net/ethernet/qualcomm/rmnet/Makefile
@@ -9,4 +9,5 @@
 rmnet-y		 += rmnet_handlers.o
 rmnet-y		 += rmnet_map_data.o
 rmnet-y		 += rmnet_map_command.o
+rmnet-y		 += rmnet_descriptor.o
 obj-$(CONFIG_RMNET) += rmnet.o
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index c1a937f..1514808 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -14,6 +14,7 @@
 #include "rmnet_vnd.h"
 #include "rmnet_private.h"
 #include "rmnet_map.h"
+#include "rmnet_descriptor.h"
 #include <soc/qcom/rmnet_qmi.h>
 #include <soc/qcom/qmi_rmnet.h>
 
@@ -38,6 +39,9 @@
 
 /* Local Definitions and Declarations */
 
+#define RMNET_SCHED_BOOST_THRESH  (500000000)
+bool rmnet_sched_boost;
+
 enum {
 	IFLA_RMNET_DFC_QOS = __IFLA_RMNET_MAX,
 	IFLA_RMNET_UL_AGG_PARAMS,
@@ -81,6 +85,8 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
 	rmnet_map_cmd_exit(port);
 	rmnet_map_tx_aggregate_exit(port);
 
+	rmnet_descriptor_deinit(port);
+
 	kfree(port);
 
 	netdev_rx_handler_unregister(real_dev);
@@ -118,6 +124,12 @@ static int rmnet_register_real_device(struct net_device *real_dev)
 	for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
 		INIT_HLIST_HEAD(&port->muxed_ep[entry]);
 
+	rc = rmnet_descriptor_init(port);
+	if (rc) {
+		rmnet_descriptor_deinit(port);
+		return rc;
+	}
+
 	rmnet_map_tx_aggregate_init(port);
 	rmnet_map_cmd_init(port);
 
@@ -597,7 +609,7 @@ void rmnet_init_qmi_pt(void *port, void *qmi)
 }
 EXPORT_SYMBOL(rmnet_init_qmi_pt);
 
-void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
+void rmnet_get_stats(void *port, u64 *rx, u64 *tx)
 {
 	struct rmnet_priv *priv;
 	struct rmnet_pcpu_stats *ps;
@@ -618,14 +630,14 @@ void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
 			ps = per_cpu_ptr(priv->pcpu_stats, cpu);
 			do {
 				start = u64_stats_fetch_begin_irq(&ps->syncp);
-				*tx += ps->stats.tx_pkts;
-				*rx += ps->stats.rx_pkts;
+				*tx += ps->stats.tx_bytes;
+				*rx += ps->stats.rx_bytes;
 			} while (u64_stats_fetch_retry_irq(&ps->syncp, start));
 		}
 	}
 	rcu_read_unlock();
 }
-EXPORT_SYMBOL(rmnet_get_packets);
+EXPORT_SYMBOL(rmnet_get_stats);
 
 void  rmnet_set_powersave_format(void *port)
 {
@@ -693,6 +705,16 @@ int rmnet_get_powersave_notif(void *port)
 EXPORT_SYMBOL(rmnet_get_powersave_notif);
 #endif
 
+/* Set data rates (in bits/s) */
+void rmnet_set_data_rates(void *port, u64 rx_rate, u64 tx_rate)
+{
+	if (((struct rmnet_port *)port)->data_format & RMNET_FORMAT_TASK_BOOST)
+		WRITE_ONCE(rmnet_sched_boost,
+			   (tx_rate >= RMNET_SCHED_BOOST_THRESH) ?
+				true : false);
+}
+EXPORT_SYMBOL(rmnet_set_data_rates);
+
 /* Startup/Shutdown */
 
 static int __init rmnet_init(void)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 5c88e6c..47a469c 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -14,6 +14,8 @@
 #define RMNET_MAX_LOGICAL_EP 255
 #define RMNET_MAX_VEID 4
 
+extern bool rmnet_sched_boost;
+
 struct rmnet_endpoint {
 	u8 mux_id;
 	struct net_device *egress_dev;
@@ -21,6 +23,9 @@ struct rmnet_endpoint {
 };
 
 struct rmnet_port_priv_stats {
+	u64 dl_hdr_last_qmap_vers;
+	u64 dl_hdr_last_ep_id;
+	u64 dl_hdr_last_trans_id;
 	u64 dl_hdr_last_seq;
 	u64 dl_hdr_last_bytes;
 	u64 dl_hdr_last_pkts;
@@ -69,6 +74,10 @@ struct rmnet_port {
 	struct list_head dl_list;
 	struct rmnet_port_priv_stats stats;
 	int dl_marker_flush;
+
+	/* Descriptor pool */
+	spinlock_t desc_pool_lock;
+	struct rmnet_frag_descriptor_pool *frag_desc_pool;
 };
 
 extern struct rtnl_link_ops rmnet_link_ops;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
new file mode 100644
index 0000000..9712941
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
@@ -0,0 +1,1146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET Packet Descriptor Framework
+ *
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/ip6_checksum.h>
+#include "rmnet_config.h"
+#include "rmnet_descriptor.h"
+#include "rmnet_handlers.h"
+#include "rmnet_private.h"
+#include "rmnet_vnd.h"
+#include <soc/qcom/rmnet_qmi.h>
+#include <soc/qcom/qmi_rmnet.h>
+
+#define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64
+#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
+			       sizeof(struct rmnet_map_header) + \
+			       sizeof(struct rmnet_map_control_command_header))
+#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
+			       sizeof(struct rmnet_map_header) + \
+			       sizeof(struct rmnet_map_control_command_header))
+
+typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc,
+				       struct rmnet_port *port);
+typedef void (*rmnet_perf_chain_hook_t)(void);
+
+struct rmnet_frag_descriptor *
+rmnet_get_frag_descriptor(struct rmnet_port *port)
+{
+	struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
+	struct rmnet_frag_descriptor *frag_desc;
+
+	spin_lock(&port->desc_pool_lock);
+	if (!list_empty(&pool->free_list)) {
+		frag_desc = list_first_entry(&pool->free_list,
+					     struct rmnet_frag_descriptor,
+					     list);
+		list_del_init(&frag_desc->list);
+	} else {
+		frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
+		if (!frag_desc)
+			goto out;
+
+		INIT_LIST_HEAD(&frag_desc->list);
+		INIT_LIST_HEAD(&frag_desc->sub_frags);
+		pool->pool_size++;
+	}
+
+out:
+	spin_unlock(&port->desc_pool_lock);
+	return frag_desc;
+}
+EXPORT_SYMBOL(rmnet_get_frag_descriptor);
+
+void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
+				   struct rmnet_port *port)
+{
+	struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
+	struct page *page = skb_frag_page(&frag_desc->frag);
+
+	list_del(&frag_desc->list);
+	if (page)
+		put_page(page);
+
+	memset(frag_desc, 0, sizeof(*frag_desc));
+	INIT_LIST_HEAD(&frag_desc->list);
+	INIT_LIST_HEAD(&frag_desc->sub_frags);
+	spin_lock(&port->desc_pool_lock);
+	list_add_tail(&frag_desc->list, &pool->free_list);
+	spin_unlock(&port->desc_pool_lock);
+}
+EXPORT_SYMBOL(rmnet_recycle_frag_descriptor);
+
+void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list,
+			       struct page *p, u32 page_offset, u32 len)
+{
+	struct rmnet_frag_descriptor *frag_desc;
+
+	frag_desc = rmnet_get_frag_descriptor(port);
+	if (!frag_desc)
+		return;
+
+	rmnet_frag_fill(frag_desc, p, page_offset, len);
+	list_add_tail(&frag_desc->list, list);
+}
+EXPORT_SYMBOL(rmnet_descriptor_add_frag);
+
+int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
+				int start, u8 *nexthdrp, __be16 *fragp)
+{
+	u8 nexthdr = *nexthdrp;
+
+	*fragp = 0;
+
+	while (ipv6_ext_hdr(nexthdr)) {
+		struct ipv6_opt_hdr *hp;
+		int hdrlen;
+
+		if (nexthdr == NEXTHDR_NONE)
+			return -EINVAL;
+
+		hp = rmnet_frag_data_ptr(frag_desc) + start;
+
+		if (nexthdr == NEXTHDR_FRAGMENT) {
+			__be16 *fp;
+
+			fp = rmnet_frag_data_ptr(frag_desc) + start +
+			     offsetof(struct frag_hdr, frag_off);
+			*fragp = *fp;
+			if (ntohs(*fragp) & ~0x7)
+				break;
+			hdrlen = 8;
+		} else if (nexthdr == NEXTHDR_AUTH) {
+			hdrlen = (hp->hdrlen + 2) << 2;
+		} else {
+			hdrlen = ipv6_optlen(hp);
+		}
+
+		nexthdr = hp->nexthdr;
+		start += hdrlen;
+	}
+
+	*nexthdrp = nexthdr;
+	return start;
+}
+EXPORT_SYMBOL(rmnet_frag_ipv6_skip_exthdr);
+
+static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap,
+				     struct rmnet_port *port,
+				     int enable)
+{
+	struct rmnet_map_control_command *cmd;
+	struct rmnet_endpoint *ep;
+	struct net_device *vnd;
+	u16 ip_family;
+	u16 fc_seq;
+	u32 qos_id;
+	u8 mux_id;
+	int r;
+
+	mux_id = qmap->mux_id;
+	cmd = (struct rmnet_map_control_command *)
+	      ((char *)qmap + sizeof(*qmap));
+
+	if (mux_id >= RMNET_MAX_LOGICAL_EP)
+		return RX_HANDLER_CONSUMED;
+
+	ep = rmnet_get_endpoint(port, mux_id);
+	if (!ep)
+		return RX_HANDLER_CONSUMED;
+
+	vnd = ep->egress_dev;
+
+	ip_family = cmd->flow_control.ip_family;
+	fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
+	qos_id = ntohl(cmd->flow_control.qos_id);
+
+	/* Ignore the ip family and pass the sequence number for both v4 and v6
+	 * sequence. User space does not support creating dedicated flows for
+	 * the 2 protocols
+	 */
+	r = rmnet_vnd_do_flow_control(vnd, enable);
+	if (r)
+		return RMNET_MAP_COMMAND_UNSUPPORTED;
+	else
+		return RMNET_MAP_COMMAND_ACK;
+}
+
+static void rmnet_frag_send_ack(struct rmnet_map_header *qmap,
+				unsigned char type,
+				struct rmnet_port *port)
+{
+	struct rmnet_map_control_command *cmd;
+	struct net_device *dev = port->dev;
+	struct sk_buff *skb;
+	u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap);
+
+	skb = alloc_skb(alloc_len, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	skb->protocol = htons(ETH_P_MAP);
+	skb->dev = dev;
+
+	cmd = rmnet_map_get_cmd_start(skb);
+	cmd->cmd_type = type & 0x03;
+
+	netif_tx_lock(dev);
+	dev->netdev_ops->ndo_start_xmit(skb, dev);
+	netif_tx_unlock(dev);
+}
+
+static void
+rmnet_frag_process_flow_start(struct rmnet_map_control_command_header *cmd,
+			      struct rmnet_port *port,
+			      u16 cmd_len)
+{
+	struct rmnet_map_dl_ind_hdr *dlhdr;
+	u32 data_format;
+	bool is_dl_mark_v2;
+
+	if (cmd_len + sizeof(struct rmnet_map_header) < RMNET_DL_IND_HDR_SIZE)
+		return;
+
+	data_format = port->data_format;
+	is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
+	dlhdr = (struct rmnet_map_dl_ind_hdr *)((char *)cmd + sizeof(*cmd));
+
+	port->stats.dl_hdr_last_ep_id = cmd->source_id;
+	port->stats.dl_hdr_last_qmap_vers = cmd->reserved;
+	port->stats.dl_hdr_last_trans_id = cmd->transaction_id;
+	port->stats.dl_hdr_last_seq = dlhdr->le.seq;
+	port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
+	port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
+	port->stats.dl_hdr_last_flows = dlhdr->le.flows;
+	port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
+	port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
+	port->stats.dl_hdr_count++;
+
+	/* If a target is taking frag path, we can assume DL marker v2 is in
+	 * play
+	 */
+	if (is_dl_mark_v2)
+		rmnet_map_dl_hdr_notify_v2(port, dlhdr, cmd);
+	else
+		rmnet_map_dl_hdr_notify(port, dlhdr);
+}
+
+static void
+rmnet_frag_process_flow_end(struct rmnet_map_control_command_header *cmd,
+			    struct rmnet_port *port, u16 cmd_len)
+{
+	struct rmnet_map_dl_ind_trl *dltrl;
+	u32 data_format;
+	bool is_dl_mark_v2;
+
+
+	if (cmd_len + sizeof(struct rmnet_map_header) < RMNET_DL_IND_TRL_SIZE)
+		return;
+
+	data_format = port->data_format;
+	is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
+	dltrl = (struct rmnet_map_dl_ind_trl *)((char *)cmd + sizeof(*cmd));
+
+	port->stats.dl_trl_last_seq = dltrl->seq_le;
+	port->stats.dl_trl_count++;
+
+	/* If a target is taking frag path, we can assume DL marker v2 is in
+	 * play
+	 */
+	if (is_dl_mark_v2)
+		rmnet_map_dl_trl_notify_v2(port, dltrl, cmd);
+	else
+		rmnet_map_dl_trl_notify(port, dltrl);
+}
+
+/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
+ * name is decoded here and appropriate handler is called.
+ */
+void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port)
+{
+	struct rmnet_map_control_command *cmd;
+	unsigned char command_name;
+	unsigned char rc = 0;
+
+	cmd = (struct rmnet_map_control_command *)
+	      ((char *)qmap + sizeof(*qmap));
+	command_name = cmd->command_name;
+
+	switch (command_name) {
+	case RMNET_MAP_COMMAND_FLOW_ENABLE:
+		rc = rmnet_frag_do_flow_control(qmap, port, 1);
+		break;
+
+	case RMNET_MAP_COMMAND_FLOW_DISABLE:
+		rc = rmnet_frag_do_flow_control(qmap, port, 0);
+		break;
+
+	default:
+		rc = RMNET_MAP_COMMAND_UNSUPPORTED;
+		break;
+	}
+	if (rc == RMNET_MAP_COMMAND_ACK)
+		rmnet_frag_send_ack(qmap, rc, port);
+}
+
+int rmnet_frag_flow_command(struct rmnet_map_header *qmap,
+			    struct rmnet_port *port, u16 pkt_len)
+{
+	struct rmnet_map_control_command_header *cmd;
+	unsigned char command_name;
+
+	cmd = (struct rmnet_map_control_command_header *)
+	      ((char *)qmap + sizeof(*qmap));
+	command_name = cmd->command_name;
+
+	switch (command_name) {
+	case RMNET_MAP_COMMAND_FLOW_START:
+		rmnet_frag_process_flow_start(cmd, port, pkt_len);
+		break;
+
+	case RMNET_MAP_COMMAND_FLOW_END:
+		rmnet_frag_process_flow_end(cmd, port, pkt_len);
+		break;
+
+	default:
+		return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_frag_flow_command);
+
+void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port,
+			    struct list_head *list)
+{
+	struct rmnet_map_header *maph;
+	u8 *data = skb_frag_address(frag);
+	u32 offset = 0;
+	u32 packet_len;
+
+	while (offset < skb_frag_size(frag)) {
+		maph = (struct rmnet_map_header *)data;
+		packet_len = ntohs(maph->pkt_len);
+
+		/* Some hardware can send us empty frames. Catch them */
+		if (packet_len == 0)
+			return;
+
+		packet_len += sizeof(*maph);
+
+		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
+			packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+		} else if (port->data_format &
+			   (RMNET_FLAGS_INGRESS_MAP_CKSUMV5 |
+			    RMNET_FLAGS_INGRESS_COALESCE) && !maph->cd_bit) {
+			u32 hsize = 0;
+			u8 type;
+
+			type = ((struct rmnet_map_v5_coal_header *)
+				(data + sizeof(*maph)))->header_type;
+			switch (type) {
+			case RMNET_MAP_HEADER_TYPE_COALESCING:
+				hsize = sizeof(struct rmnet_map_v5_coal_header);
+				break;
+			case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
+				hsize = sizeof(struct rmnet_map_v5_csum_header);
+				break;
+			}
+
+			packet_len += hsize;
+		}
+
+		if ((int)skb_frag_size(frag) - (int)packet_len < 0)
+			return;
+
+		rmnet_descriptor_add_frag(port, list, skb_frag_page(frag),
+					  frag->page_offset + offset,
+					  packet_len);
+
+		offset += packet_len;
+		data += packet_len;
+	}
+}
+
+/* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
+ * if needed (i.e. forwarding, UDP GRO)
+ */
+static void rmnet_frag_gso_stamp(struct sk_buff *skb,
+				 struct rmnet_frag_descriptor *frag_desc)
+{
+	struct skb_shared_info *shinfo = skb_shinfo(skb);
+	struct iphdr *iph = (struct iphdr *)skb->data;
+	__sum16 pseudo;
+	u16 pkt_len = skb->len - frag_desc->ip_len;
+	bool ipv4 = frag_desc->ip_proto == 4;
+
+	if (ipv4) {
+		iph->tot_len = htons(skb->len);
+		iph->check = 0;
+		iph->check = ip_fast_csum(iph, iph->ihl);
+		pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+					    pkt_len, frag_desc->trans_proto,
+					    0);
+	} else {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
+
+		/* Payload length includes any extension headers */
+		ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
+		pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+					  pkt_len, frag_desc->trans_proto, 0);
+	}
+
+	if (frag_desc->trans_proto == IPPROTO_TCP) {
+		struct tcphdr *tp = (struct tcphdr *)
+				    ((u8 *)iph + frag_desc->ip_len);
+
+		tp->check = pseudo;
+		shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+		skb->csum_offset = offsetof(struct tcphdr, check);
+	} else {
+		struct udphdr *up = (struct udphdr *)
+				    ((u8 *)iph + frag_desc->ip_len);
+
+		up->len = htons(pkt_len);
+		up->check = pseudo;
+		shinfo->gso_type = SKB_GSO_UDP_L4;
+		skb->csum_offset = offsetof(struct udphdr, check);
+	}
+
+	skb->ip_summed = CHECKSUM_PARTIAL;
+	skb->csum_start = (u8 *)iph + frag_desc->ip_len - skb->head;
+	shinfo->gso_size = frag_desc->gso_size;
+	shinfo->gso_segs = frag_desc->gso_segs;
+}
+
+/* Allocate and populate an skb to contain the packet represented by the
+ * frag descriptor.
+ */
+static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
+				       struct rmnet_port *port)
+{
+	struct sk_buff *head_skb, *current_skb, *skb;
+	struct skb_shared_info *shinfo;
+	struct rmnet_frag_descriptor *sub_frag, *tmp;
+
+	/* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
+	if (frag_desc->hdrs_valid) {
+		u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len;
+
+		head_skb = alloc_skb(hdr_len + RMNET_MAP_DEAGGR_HEADROOM,
+				     GFP_ATOMIC);
+		if (!head_skb)
+			return NULL;
+
+		skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
+		skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len);
+		skb_reset_network_header(head_skb);
+		if (frag_desc->trans_len)
+			skb_set_transport_header(head_skb, frag_desc->ip_len);
+
+		/* Packets that have no data portion don't need any frags */
+		if (hdr_len == skb_frag_size(&frag_desc->frag))
+			goto skip_frags;
+
+		/* If the headers we added are the start of the page,
+		 * we don't want to add them twice
+		 */
+		if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) {
+			if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
+				kfree_skb(head_skb);
+				return NULL;
+			}
+		}
+	} else {
+		/* Allocate enough space to avoid penalties in the stack
+		 * from __pskb_pull_tail()
+		 */
+		head_skb = alloc_skb(256 + RMNET_MAP_DEAGGR_HEADROOM,
+				     GFP_ATOMIC);
+		if (!head_skb)
+			return NULL;
+
+		skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
+	}
+
+	/* Add main fragment */
+	get_page(skb_frag_page(&frag_desc->frag));
+	skb_add_rx_frag(head_skb, 0, skb_frag_page(&frag_desc->frag),
+			frag_desc->frag.page_offset,
+			skb_frag_size(&frag_desc->frag),
+			skb_frag_size(&frag_desc->frag));
+
+	shinfo = skb_shinfo(head_skb);
+	current_skb = head_skb;
+
+	/* Add in any frags from rmnet_perf */
+	list_for_each_entry_safe(sub_frag, tmp, &frag_desc->sub_frags, list) {
+		skb_frag_t *frag;
+		u32 frag_size;
+
+		frag = &sub_frag->frag;
+		frag_size = skb_frag_size(frag);
+
+add_frag:
+		if (shinfo->nr_frags < MAX_SKB_FRAGS) {
+			get_page(skb_frag_page(frag));
+			skb_add_rx_frag(current_skb, shinfo->nr_frags,
+					skb_frag_page(frag), frag->page_offset,
+					frag_size, frag_size);
+			if (current_skb != head_skb) {
+				head_skb->len += frag_size;
+				head_skb->data_len += frag_size;
+			}
+		} else {
+			/* Alloc a new skb and try again */
+			skb = alloc_skb(0, GFP_ATOMIC);
+			if (!skb)
+				break;
+
+			if (current_skb == head_skb)
+				shinfo->frag_list = skb;
+			else
+				current_skb->next = skb;
+
+			current_skb = skb;
+			shinfo = skb_shinfo(current_skb);
+			goto add_frag;
+		}
+
+		rmnet_recycle_frag_descriptor(sub_frag, port);
+	}
+
+skip_frags:
+	head_skb->dev = frag_desc->dev;
+	rmnet_set_skb_proto(head_skb);
+
+	/* Handle any header metadata that needs to be updated after RSB/RSC
+	 * segmentation
+	 */
+	if (frag_desc->ip_id_set) {
+		struct iphdr *iph;
+
+		iph = (struct iphdr *)rmnet_map_data_ptr(head_skb);
+		csum_replace2(&iph->check, iph->id, frag_desc->ip_id);
+		iph->id = frag_desc->ip_id;
+	}
+
+	if (frag_desc->tcp_seq_set) {
+		struct tcphdr *th;
+
+		th = (struct tcphdr *)
+		     (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
+		th->seq = frag_desc->tcp_seq;
+	}
+
+	/* Handle csum offloading */
+	if (frag_desc->csum_valid)
+		head_skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	/* Handle any rmnet_perf metadata */
+	if (frag_desc->hash) {
+		head_skb->hash = frag_desc->hash;
+		head_skb->sw_hash = 1;
+	}
+
+	if (frag_desc->flush_shs)
+		head_skb->cb[0] = 1;
+
+	/* Handle coalesced packets */
+	if (frag_desc->gso_segs > 1)
+		rmnet_frag_gso_stamp(head_skb, frag_desc);
+
+	return head_skb;
+}
+
+/* Deliver the packets contained within a frag descriptor */
+void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
+			struct rmnet_port *port)
+{
+	struct sk_buff *skb;
+
+	skb = rmnet_alloc_skb(frag_desc, port);
+	if (skb)
+		rmnet_deliver_skb(skb, port);
+	rmnet_recycle_frag_descriptor(frag_desc, port);
+}
+EXPORT_SYMBOL(rmnet_frag_deliver);
+
+static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
+				      struct rmnet_port *port,
+				      struct list_head *list,
+				      u8 pkt_id)
+{
+	struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
+	struct rmnet_frag_descriptor *new_frag;
+	u8 *hdr_start = rmnet_frag_data_ptr(coal_desc);
+	u32 offset;
+
+	new_frag = rmnet_get_frag_descriptor(port);
+	if (!new_frag)
+		return;
+
+	/* Account for header lengths to access the data start */
+	offset = coal_desc->frag.page_offset + coal_desc->ip_len +
+		 coal_desc->trans_len + coal_desc->data_offset;
+
+	/* Header information and most metadata is the same as the original */
+	memcpy(new_frag, coal_desc, sizeof(*coal_desc));
+	INIT_LIST_HEAD(&new_frag->list);
+	INIT_LIST_HEAD(&new_frag->sub_frags);
+	rmnet_frag_fill(new_frag, skb_frag_page(&coal_desc->frag), offset,
+			coal_desc->gso_size * coal_desc->gso_segs);
+
+	if (coal_desc->trans_proto == IPPROTO_TCP) {
+		struct tcphdr *th;
+
+		th = (struct tcphdr *)(hdr_start + coal_desc->ip_len);
+		new_frag->tcp_seq_set = 1;
+		new_frag->tcp_seq = htonl(ntohl(th->seq) +
+					  coal_desc->data_offset);
+	}
+
+	if (coal_desc->ip_proto == 4) {
+		struct iphdr *iph;
+
+		iph = (struct iphdr *)hdr_start;
+		new_frag->ip_id_set = 1;
+		new_frag->ip_id = htons(ntohs(iph->id) + coal_desc->pkt_id);
+	}
+
+	new_frag->hdr_ptr = hdr_start;
+	new_frag->csum_valid = true;
+	priv->stats.coal.coal_reconstruct++;
+
+	/* Update meta information to move past the data we just segmented */
+	coal_desc->data_offset += coal_desc->gso_size * coal_desc->gso_segs;
+	coal_desc->pkt_id = pkt_id + 1;
+	coal_desc->gso_segs = 0;
+
+	list_add_tail(&new_frag->list, list);
+}
+
+static bool rmnet_frag_validate_csum(struct rmnet_frag_descriptor *frag_desc)
+{
+	u8 *data = rmnet_frag_data_ptr(frag_desc);
+	unsigned int datagram_len;
+	__wsum csum;
+	__sum16 pseudo;
+
+	datagram_len = skb_frag_size(&frag_desc->frag) - frag_desc->ip_len;
+	if (frag_desc->ip_proto == 4) {
+		struct iphdr *iph = (struct iphdr *)data;
+
+		pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+					    datagram_len,
+					    frag_desc->trans_proto, 0);
+	} else {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
+
+		pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+					  datagram_len, frag_desc->trans_proto,
+					  0);
+	}
+
+	csum = csum_partial(data + frag_desc->ip_len, datagram_len,
+			    csum_unfold(pseudo));
+	return !csum_fold(csum);
+}
+
+/* Converts the coalesced frame into a list of descriptors.
+ * NLOs containing csum erros will not be included.
+ */
+static void
+rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
+			     u64 nlo_err_mask, struct rmnet_port *port,
+			     struct list_head *list)
+{
+	struct iphdr *iph;
+	struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
+	struct rmnet_map_v5_coal_header *coal_hdr;
+	u16 pkt_len;
+	u8 pkt, total_pkt = 0;
+	u8 nlo;
+	bool gro = coal_desc->dev->features & NETIF_F_GRO_HW;
+
+	/* Pull off the headers we no longer need */
+	if (!rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header)))
+		return;
+
+	coal_hdr = (struct rmnet_map_v5_coal_header *)
+		   rmnet_frag_data_ptr(coal_desc);
+	if (!rmnet_frag_pull(coal_desc, port, sizeof(*coal_hdr)))
+		return;
+
+	iph = (struct iphdr *)rmnet_frag_data_ptr(coal_desc);
+
+	if (iph->version == 4) {
+		coal_desc->ip_proto = 4;
+		coal_desc->ip_len = iph->ihl * 4;
+		coal_desc->trans_proto = iph->protocol;
+
+		/* Don't allow coalescing of any packets with IP options */
+		if (iph->ihl != 5)
+			gro = false;
+	} else if (iph->version == 6) {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
+		int ip_len;
+		__be16 frag_off;
+		u8 protocol = ip6h->nexthdr;
+
+		coal_desc->ip_proto = 6;
+		ip_len = rmnet_frag_ipv6_skip_exthdr(coal_desc,
+						     sizeof(*ip6h),
+						     &protocol,
+						     &frag_off);
+		coal_desc->trans_proto = protocol;
+
+		/* If we run into a problem, or this has a fragment header
+		 * (which should technically not be possible, if the HW
+		 * works as intended...), bail.
+		 */
+		if (ip_len < 0 || frag_off) {
+			priv->stats.coal.coal_ip_invalid++;
+			return;
+		}
+
+		coal_desc->ip_len = (u16)ip_len;
+		if (coal_desc->ip_len > sizeof(*ip6h)) {
+			/* Don't allow coalescing of any packets with IPv6
+			 * extension headers.
+			 */
+			gro = false;
+		}
+	} else {
+		priv->stats.coal.coal_ip_invalid++;
+		return;
+	}
+
+	if (coal_desc->trans_proto == IPPROTO_TCP) {
+		struct tcphdr *th;
+
+		th = (struct tcphdr *)((u8 *)iph + coal_desc->ip_len);
+		coal_desc->trans_len = th->doff * 4;
+	} else if (coal_desc->trans_proto == IPPROTO_UDP) {
+		coal_desc->trans_len = sizeof(struct udphdr);
+	} else {
+		priv->stats.coal.coal_trans_invalid++;
+		return;
+	}
+
+	coal_desc->hdrs_valid = 1;
+
+	if (rmnet_map_v5_csum_buggy(coal_hdr)) {
+		/* Mark the checksum as valid if it checks out */
+		if (rmnet_frag_validate_csum(coal_desc))
+			coal_desc->csum_valid = true;
+
+		coal_desc->hdr_ptr = rmnet_frag_data_ptr(coal_desc);
+		coal_desc->gso_size = ntohs(coal_hdr->nl_pairs[0].pkt_len);
+		coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
+		coal_desc->gso_segs = coal_hdr->nl_pairs[0].num_packets;
+		list_add_tail(&coal_desc->list, list);
+		return;
+	}
+
+	/* Segment the coalesced descriptor into new packets */
+	for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
+		pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
+		pkt_len -= coal_desc->ip_len + coal_desc->trans_len;
+		coal_desc->gso_size = pkt_len;
+		for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets;
+		     pkt++, total_pkt++) {
+			nlo_err_mask <<= 1;
+			if (nlo_err_mask & (1ULL << 63)) {
+				priv->stats.coal.coal_csum_err++;
+
+				/* Segment out the good data */
+				if (gro && coal_desc->gso_segs)
+					__rmnet_frag_segment_data(coal_desc,
+								  port,
+								  list,
+								  total_pkt);
+
+				/* skip over bad packet */
+				coal_desc->data_offset += pkt_len;
+				coal_desc->pkt_id = total_pkt + 1;
+			} else {
+				coal_desc->gso_segs++;
+
+				/* Segment the packet if we aren't sending the
+				 * larger packet up the stack.
+				 */
+				if (!gro)
+					__rmnet_frag_segment_data(coal_desc,
+								  port,
+								  list,
+								  total_pkt);
+			}
+		}
+
+		/* If we're switching NLOs, we need to send out everything from
+		 * the previous one, if we haven't done so. NLOs only switch
+		 * when the packet length changes.
+		 */
+		if (gro && coal_desc->gso_segs) {
+			/* Fast forward the (hopefully) common case.
+			 * Frames with only one NLO (i.e. one packet length) and
+			 * no checksum errors don't need to be segmented here.
+			 * We can just pass off the original skb.
+			 */
+			if (coal_desc->gso_size * coal_desc->gso_segs ==
+			    skb_frag_size(&coal_desc->frag) -
+			    coal_desc->ip_len - coal_desc->trans_len) {
+				coal_desc->hdr_ptr =
+					rmnet_frag_data_ptr(coal_desc);
+				coal_desc->csum_valid = true;
+				list_add_tail(&coal_desc->list, list);
+				return;
+			}
+
+			__rmnet_frag_segment_data(coal_desc, port, list,
+						  total_pkt);
+		}
+	}
+}
+
+/* Record reason for coalescing pipe closure */
+static void rmnet_frag_data_log_close_stats(struct rmnet_priv *priv, u8 type,
+					    u8 code)
+{
+	struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;
+
+	switch (type) {
+	case RMNET_MAP_COAL_CLOSE_NON_COAL:
+		stats->non_coal++;
+		break;
+	case RMNET_MAP_COAL_CLOSE_IP_MISS:
+		stats->ip_miss++;
+		break;
+	case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
+		stats->trans_miss++;
+		break;
+	case RMNET_MAP_COAL_CLOSE_HW:
+		switch (code) {
+		case RMNET_MAP_COAL_CLOSE_HW_NL:
+			stats->hw_nl++;
+			break;
+		case RMNET_MAP_COAL_CLOSE_HW_PKT:
+			stats->hw_pkt++;
+			break;
+		case RMNET_MAP_COAL_CLOSE_HW_BYTE:
+			stats->hw_byte++;
+			break;
+		case RMNET_MAP_COAL_CLOSE_HW_TIME:
+			stats->hw_time++;
+			break;
+		case RMNET_MAP_COAL_CLOSE_HW_EVICT:
+			stats->hw_evict++;
+			break;
+		default:
+			break;
+		}
+		break;
+	case RMNET_MAP_COAL_CLOSE_COAL:
+		stats->coal++;
+		break;
+	default:
+		break;
+	}
+}
+
+/* Check if the coalesced header has any incorrect values, in which case, the
+ * entire coalesced frame must be dropped. Then check if there are any
+ * checksum issues
+ */
+static int
+rmnet_frag_data_check_coal_header(struct rmnet_frag_descriptor *frag_desc,
+				  u64 *nlo_err_mask)
+{
+	struct rmnet_map_v5_coal_header *coal_hdr;
+	unsigned char *data = rmnet_frag_data_ptr(frag_desc);
+	struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
+	u64 mask = 0;
+	int i;
+	u8 veid, pkts = 0;
+
+	coal_hdr = (struct rmnet_map_v5_coal_header *)
+		   (data + sizeof(struct rmnet_map_header));
+	veid = coal_hdr->virtual_channel_id;
+
+	if (coal_hdr->num_nlos == 0 ||
+	    coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
+		priv->stats.coal.coal_hdr_nlo_err++;
+		return -EINVAL;
+	}
+
+	for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
+		/* If there is a checksum issue, we need to split
+		 * up the skb. Rebuild the full csum error field
+		 */
+		u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
+		u8 pkt = coal_hdr->nl_pairs[i].num_packets;
+
+		mask |= ((u64)err) << (7 - i) * 8;
+
+		/* Track total packets in frame */
+		pkts += pkt;
+		if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
+			priv->stats.coal.coal_hdr_pkt_err++;
+			return -EINVAL;
+		}
+	}
+
+	/* Track number of packets we get inside of coalesced frames */
+	priv->stats.coal.coal_pkts += pkts;
+
+	/* Update ethtool stats */
+	rmnet_frag_data_log_close_stats(priv,
+					coal_hdr->close_type,
+					coal_hdr->close_value);
+	if (veid < RMNET_MAX_VEID)
+		priv->stats.coal.coal_veid[veid]++;
+
+	*nlo_err_mask = mask;
+
+	return 0;
+}
+
+/* Process a QMAPv5 packet header */
+int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
+				       struct rmnet_port *port,
+				       struct list_head *list,
+				       u16 len)
+{
+	struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
+	u64 nlo_err_mask;
+	int rc = 0;
+
+	switch (rmnet_frag_get_next_hdr_type(frag_desc)) {
+	case RMNET_MAP_HEADER_TYPE_COALESCING:
+		priv->stats.coal.coal_rx++;
+		rc = rmnet_frag_data_check_coal_header(frag_desc,
+						       &nlo_err_mask);
+		if (rc)
+			return rc;
+
+		rmnet_frag_segment_coal_data(frag_desc, nlo_err_mask, port,
+					     list);
+		if (list_first_entry(list, struct rmnet_frag_descriptor,
+				     list) != frag_desc)
+			rmnet_recycle_frag_descriptor(frag_desc, port);
+		break;
+	case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
+		if (rmnet_frag_get_csum_valid(frag_desc)) {
+			priv->stats.csum_ok++;
+			frag_desc->csum_valid = true;
+		} else {
+			priv->stats.csum_valid_unset++;
+		}
+
+		if (!rmnet_frag_pull(frag_desc, port,
+				     sizeof(struct rmnet_map_header) +
+				     sizeof(struct rmnet_map_v5_csum_header))) {
+			rc = -EINVAL;
+			break;
+		}
+
+		frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
+
+		/* Remove padding only for csum offload packets.
+		 * Coalesced packets should never have padding.
+		 */
+		if (!rmnet_frag_trim(frag_desc, port, len)) {
+			rc = -EINVAL;
+			break;
+		}
+
+		list_del_init(&frag_desc->list);
+		list_add_tail(&frag_desc->list, list);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+/* Perf hook handler */
+rmnet_perf_desc_hook_t rmnet_perf_desc_entry __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_perf_desc_entry);
+
+static void
+__rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
+			     struct rmnet_port *port)
+{
+	rmnet_perf_desc_hook_t rmnet_perf_ingress;
+	struct rmnet_map_header *qmap;
+	struct rmnet_endpoint *ep;
+	struct rmnet_frag_descriptor *frag, *tmp;
+	LIST_HEAD(segs);
+	u16 len, pad;
+	u8 mux_id;
+
+	qmap = (struct rmnet_map_header *)skb_frag_address(&frag_desc->frag);
+	mux_id = qmap->mux_id;
+	pad = qmap->pad_len;
+	len = ntohs(qmap->pkt_len) - pad;
+
+	if (qmap->cd_bit) {
+		qmi_rmnet_set_dl_msg_active(port);
+		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
+			rmnet_frag_flow_command(qmap, port, len);
+			goto recycle;
+		}
+
+		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
+			rmnet_frag_command(qmap, port);
+
+		goto recycle;
+	}
+
+	if (mux_id >= RMNET_MAX_LOGICAL_EP)
+		goto recycle;
+
+	ep = rmnet_get_endpoint(port, mux_id);
+	if (!ep)
+		goto recycle;
+
+	frag_desc->dev = ep->egress_dev;
+
+	/* Handle QMAPv5 packet */
+	if (qmap->next_hdr &&
+	    (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
+				  RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
+		if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs,
+						       len))
+			goto recycle;
+	} else {
+		/* We only have the main QMAP header to worry about */
+		if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap)))
+			return;
+
+		frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
+
+		if (!rmnet_frag_trim(frag_desc, port, len))
+			return;
+
+		list_add_tail(&frag_desc->list, &segs);
+	}
+
+	if (port->data_format & RMNET_INGRESS_FORMAT_PS)
+		qmi_rmnet_work_maybe_restart(port);
+
+	rcu_read_lock();
+	rmnet_perf_ingress = rcu_dereference(rmnet_perf_desc_entry);
+	if (rmnet_perf_ingress) {
+		list_for_each_entry_safe(frag, tmp, &segs, list) {
+			list_del_init(&frag->list);
+			rmnet_perf_ingress(frag, port);
+		}
+		rcu_read_unlock();
+		return;
+	}
+	rcu_read_unlock();
+
+	list_for_each_entry_safe(frag, tmp, &segs, list) {
+		list_del_init(&frag->list);
+		rmnet_frag_deliver(frag, port);
+	}
+	return;
+
+recycle:
+	rmnet_recycle_frag_descriptor(frag_desc, port);
+}
+
+/* Notify perf at the end of SKB chain */
+rmnet_perf_chain_hook_t rmnet_perf_chain_end __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_perf_chain_end);
+
+void rmnet_frag_ingress_handler(struct sk_buff *skb,
+				struct rmnet_port *port)
+{
+	rmnet_perf_chain_hook_t rmnet_perf_opt_chain_end;
+	LIST_HEAD(desc_list);
+
+	/* Deaggregation and freeing of HW originating
+	 * buffers is done within here
+	 */
+	while (skb) {
+		struct sk_buff *skb_frag;
+
+		rmnet_frag_deaggregate(skb_shinfo(skb)->frags, port,
+				       &desc_list);
+		if (!list_empty(&desc_list)) {
+			struct rmnet_frag_descriptor *frag_desc, *tmp;
+
+			list_for_each_entry_safe(frag_desc, tmp, &desc_list,
+						 list) {
+				list_del_init(&frag_desc->list);
+				__rmnet_frag_ingress_handler(frag_desc, port);
+			}
+		}
+
+		skb_frag = skb_shinfo(skb)->frag_list;
+		skb_shinfo(skb)->frag_list = NULL;
+		consume_skb(skb);
+		skb = skb_frag;
+	}
+
+	rcu_read_lock();
+	rmnet_perf_opt_chain_end = rcu_dereference(rmnet_perf_chain_end);
+	if (rmnet_perf_opt_chain_end)
+		rmnet_perf_opt_chain_end();
+	rcu_read_unlock();
+}
+
+void rmnet_descriptor_deinit(struct rmnet_port *port)
+{
+	struct rmnet_frag_descriptor_pool *pool;
+	struct rmnet_frag_descriptor *frag_desc, *tmp;
+
+	pool = port->frag_desc_pool;
+
+	list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) {
+		kfree(frag_desc);
+		pool->pool_size--;
+	}
+
+	kfree(pool);
+}
+
+int rmnet_descriptor_init(struct rmnet_port *port)
+{
+	struct rmnet_frag_descriptor_pool *pool;
+	int i;
+
+	spin_lock_init(&port->desc_pool_lock);
+	pool = kzalloc(sizeof(*pool), GFP_ATOMIC);
+	if (!pool)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&pool->free_list);
+	port->frag_desc_pool = pool;
+
+	for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) {
+		struct rmnet_frag_descriptor *frag_desc;
+
+		frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
+		if (!frag_desc)
+			return -ENOMEM;
+
+		INIT_LIST_HEAD(&frag_desc->list);
+		INIT_LIST_HEAD(&frag_desc->sub_frags);
+		list_add_tail(&frag_desc->list, &pool->free_list);
+		pool->pool_size++;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.h
new file mode 100644
index 0000000..b395c2d
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET Packet Descriptor Framework
+ *
+ */
+
+#ifndef _RMNET_DESCRIPTOR_H_
+#define _RMNET_DESCRIPTOR_H_
+
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include "rmnet_config.h"
+#include "rmnet_map.h"
+
+struct rmnet_frag_descriptor_pool {
+	struct list_head free_list;
+	u32 pool_size;
+};
+
+struct rmnet_frag_descriptor {
+	struct list_head list;
+	struct list_head sub_frags;
+	skb_frag_t frag;
+	u8 *hdr_ptr;
+	struct net_device *dev;
+	u32 hash;
+	__be32 tcp_seq;
+	__be16 ip_id;
+	u16 data_offset;
+	u16 gso_size;
+	u16 gso_segs;
+	u16 ip_len;
+	u16 trans_len;
+	u8 ip_proto;
+	u8 trans_proto;
+	u8 pkt_id;
+	u8 csum_valid:1,
+	   hdrs_valid:1,
+	   ip_id_set:1,
+	   tcp_seq_set:1,
+	   flush_shs:1,
+	   reserved:3;
+};
+
+/* Descriptor management */
+struct rmnet_frag_descriptor *
+rmnet_get_frag_descriptor(struct rmnet_port *port);
+void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
+				   struct rmnet_port *port);
+void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list,
+			       struct page *p, u32 page_offset, u32 len);
+int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
+				int start, u8 *nexthdrp, __be16 *fragp);
+
+/* QMAP command packets */
+void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port);
+int rmnet_frag_flow_command(struct rmnet_map_header *qmap,
+			    struct rmnet_port *port, u16 pkt_len);
+
+/* Ingress data handlers */
+void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port,
+			    struct list_head *list);
+void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
+			struct rmnet_port *port);
+int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
+				       struct rmnet_port *port,
+				       struct list_head *list,
+				       u16 len);
+void rmnet_frag_ingress_handler(struct sk_buff *skb,
+				struct rmnet_port *port);
+
+int rmnet_descriptor_init(struct rmnet_port *port);
+void rmnet_descriptor_deinit(struct rmnet_port *port);
+
+static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc)
+{
+	return skb_frag_address(&frag_desc->frag);
+}
+
+static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
+				    struct rmnet_port *port,
+				    unsigned int size)
+{
+	if (size >= skb_frag_size(&frag_desc->frag)) {
+		pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n",
+			__func__, size, skb_frag_size(&frag_desc->frag));
+		rmnet_recycle_frag_descriptor(frag_desc, port);
+		return NULL;
+	}
+
+	frag_desc->frag.page_offset += size;
+	skb_frag_size_sub(&frag_desc->frag, size);
+
+	return rmnet_frag_data_ptr(frag_desc);
+}
+
+static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
+				    struct rmnet_port *port,
+				    unsigned int size)
+{
+	if (!size) {
+		pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n",
+			__func__, skb_frag_size(&frag_desc->frag));
+		rmnet_recycle_frag_descriptor(frag_desc, port);
+		return NULL;
+	}
+
+	if (size < skb_frag_size(&frag_desc->frag))
+		skb_frag_size_set(&frag_desc->frag, size);
+
+	return rmnet_frag_data_ptr(frag_desc);
+}
+
+static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc,
+				   struct page *p, u32 page_offset, u32 len)
+{
+	get_page(p);
+	__skb_frag_set_page(&frag_desc->frag, p);
+	skb_frag_size_set(&frag_desc->frag, len);
+	frag_desc->frag.page_offset = page_offset;
+}
+
+static inline u8
+rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc)
+{
+	unsigned char *data = rmnet_frag_data_ptr(frag_desc);
+
+	data += sizeof(struct rmnet_map_header);
+	return ((struct rmnet_map_v5_coal_header *)data)->header_type;
+}
+
+static inline bool
+rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc)
+{
+	unsigned char *data = rmnet_frag_data_ptr(frag_desc);
+
+	data += sizeof(struct rmnet_map_header);
+	return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
+}
+
+#endif /* _RMNET_DESCRIPTOR_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index d3cec34..ad9453b 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -17,6 +17,7 @@
 #include "rmnet_vnd.h"
 #include "rmnet_map.h"
 #include "rmnet_handlers.h"
+#include "rmnet_descriptor.h"
 
 #include <soc/qcom/rmnet_qmi.h>
 #include <soc/qcom/qmi_rmnet.h>
@@ -78,6 +79,20 @@ void rmnet_set_skb_proto(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(rmnet_set_skb_proto);
 
+bool (*rmnet_shs_slow_start_detect)(u32 hash_key) __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_shs_slow_start_detect);
+
+bool rmnet_slow_start_on(u32 hash_key)
+{
+	bool (*rmnet_shs_slow_start_on)(u32 hash_key);
+
+	rmnet_shs_slow_start_on = rcu_dereference(rmnet_shs_slow_start_detect);
+	if (rmnet_shs_slow_start_on)
+		return rmnet_shs_slow_start_on(hash_key);
+	return false;
+}
+EXPORT_SYMBOL(rmnet_slow_start_on);
+
 /* Shs hook handler */
 
 int (*rmnet_shs_skb_entry)(struct sk_buff *skb,
@@ -106,11 +121,14 @@ rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port)
 	skb->pkt_type = PACKET_HOST;
 	skb_set_mac_header(skb, 0);
 
+	rcu_read_lock();
 	rmnet_shs_stamp = rcu_dereference(rmnet_shs_skb_entry);
 	if (rmnet_shs_stamp) {
 		rmnet_shs_stamp(skb, port);
+		rcu_read_unlock();
 		return;
 	}
+	rcu_read_unlock();
 
 	if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
 		if (!rmnet_check_skb_can_gro(skb) &&
@@ -151,12 +169,15 @@ rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
 	/* packets coming from work queue context due to packet flush timer
 	 * must go through the special workqueue path in SHS driver
 	 */
+	rcu_read_lock();
 	rmnet_shs_stamp = (!ctx) ? rcu_dereference(rmnet_shs_skb_entry) :
 				   rcu_dereference(rmnet_shs_skb_entry_wq);
 	if (rmnet_shs_stamp) {
 		rmnet_shs_stamp(skb, port);
+		rcu_read_unlock();
 		return;
 	}
+	rcu_read_unlock();
 
 	if (ctx == RMNET_NET_RX_CTX) {
 		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
@@ -294,6 +315,14 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
 		skb_push(skb, ETH_HLEN);
 	}
 
+	if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
+				 RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) {
+		if (skb_is_nonlinear(skb)) {
+			rmnet_frag_ingress_handler(skb, port);
+			return;
+		}
+	}
+
 	/* No aggregation. Pass the frame on as is */
 	if (!(port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION)) {
 		__rmnet_map_ingress_handler(skb, port);
@@ -301,11 +330,14 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
 	}
 
 	/* Pass off handling to rmnet_perf module, if present */
+	rcu_read_lock();
 	rmnet_perf_core_deaggregate = rcu_dereference(rmnet_perf_deag_entry);
 	if (rmnet_perf_core_deaggregate) {
 		rmnet_perf_core_deaggregate(skb, port);
+		rcu_read_unlock();
 		return;
 	}
+	rcu_read_unlock();
 
 	/* Deaggregation and freeing of HW originating
 	 * buffers is done within here
@@ -361,26 +393,17 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
 	if (csum_type)
 		rmnet_map_checksum_uplink_packet(skb, orig_dev, csum_type);
 
-	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
+	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0,
+					      port);
 	if (!map_header)
 		return -ENOMEM;
 
 	map_header->mux_id = mux_id;
 
 	if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
-		int non_linear_skb;
-
 		if (rmnet_map_tx_agg_skip(skb, required_headroom))
 			goto done;
 
-		non_linear_skb = (orig_dev->features & NETIF_F_GSO) &&
-				 skb_is_nonlinear(skb);
-
-		if (non_linear_skb) {
-			if (unlikely(__skb_linearize(skb)))
-				goto done;
-		}
-
 		rmnet_map_tx_aggregate(skb, port);
 		return -EINPROGRESS;
 	}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
index 09a2954..da43970 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
@@ -20,6 +20,7 @@ void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
 void rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
 			  enum rmnet_packet_context ctx);
 void rmnet_set_skb_proto(struct sk_buff *skb);
+bool rmnet_slow_start_on(u32 hash_key);
 rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
 					       struct rmnet_port *port);
 rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 03ce4f3..463eeef 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -118,10 +118,12 @@ struct rmnet_map_ul_csum_header {
 } __aligned(1);
 
 struct rmnet_map_control_command_header {
-	u8  command_name;
-	u8  cmd_type:2;
-	u8  reserved:6;
-	u16 reserved2;
+	u8 command_name;
+	u8 cmd_type:2;
+	u8 reserved:5;
+	u8 e:1;
+	u16 source_id:15;
+	u16 ext:1;
 	u32 transaction_id;
 }  __aligned(1);
 
@@ -167,8 +169,20 @@ struct rmnet_map_dl_ind_trl {
 
 struct rmnet_map_dl_ind {
 	u8 priority;
-	void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *dlhdr);
-	void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *dltrl);
+	union {
+		void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *dlhdr);
+		void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *dlhdr,
+					  struct
+					  rmnet_map_control_command_header
+					  * qcmd);
+	} __aligned(1);
+	union {
+		void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *dltrl);
+		void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *dltrl,
+					  struct
+					  rmnet_map_control_command_header
+					  * qcmd);
+	} __aligned(1);
 	struct list_head list;
 };
 
@@ -232,12 +246,14 @@ static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb)
 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
 				      struct rmnet_port *port);
 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
-						  int hdrlen, int pad);
+						  int hdrlen, int pad,
+						  struct rmnet_port *port);
 void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
 				      struct net_device *orig_dev,
 				      int csum_type);
+bool rmnet_map_v5_csum_buggy(struct rmnet_map_v5_coal_header *coal_hdr);
 int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
 				      struct sk_buff_head *list,
 				      u16 len);
@@ -245,6 +261,16 @@ int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
 void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
 void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
 void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
+void rmnet_map_dl_hdr_notify(struct rmnet_port *port,
+			     struct rmnet_map_dl_ind_hdr *dl_hdr);
+void rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
+				struct rmnet_map_dl_ind_hdr *dl_hdr,
+				struct rmnet_map_control_command_header *qcmd);
+void rmnet_map_dl_trl_notify(struct rmnet_port *port,
+			     struct rmnet_map_dl_ind_trl *dltrl);
+void rmnet_map_dl_trl_notify_v2(struct rmnet_port *port,
+				struct rmnet_map_dl_ind_trl *dltrl,
+				struct rmnet_map_control_command_header *qcmd);
 int rmnet_map_flow_command(struct sk_buff *skb,
 			   struct rmnet_port *port,
 			   bool rmnet_perf);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 33da4bf..8269e6b 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -87,8 +87,22 @@ static void rmnet_map_send_ack(struct sk_buff *skb,
 	netif_tx_unlock(dev);
 }
 
-static  void rmnet_map_dl_hdr_notify(struct rmnet_port *port,
-				     struct rmnet_map_dl_ind_hdr *dlhdr)
+void
+rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
+			   struct rmnet_map_dl_ind_hdr *dlhdr,
+			   struct rmnet_map_control_command_header *qcmd)
+{
+	struct rmnet_map_dl_ind *tmp;
+
+	port->dl_marker_flush = 0;
+
+	list_for_each_entry(tmp, &port->dl_list, list)
+		tmp->dl_hdr_handler_v2(dlhdr, qcmd);
+}
+
+void
+rmnet_map_dl_hdr_notify(struct rmnet_port *port,
+			struct rmnet_map_dl_ind_hdr *dlhdr)
 {
 	struct rmnet_map_dl_ind *tmp;
 
@@ -98,8 +112,28 @@ static  void rmnet_map_dl_hdr_notify(struct rmnet_port *port,
 		tmp->dl_hdr_handler(dlhdr);
 }
 
-static  void rmnet_map_dl_trl_notify(struct rmnet_port *port,
-				     struct rmnet_map_dl_ind_trl *dltrl)
+void
+rmnet_map_dl_trl_notify_v2(struct rmnet_port *port,
+			   struct rmnet_map_dl_ind_trl *dltrl,
+			   struct rmnet_map_control_command_header *qcmd)
+{
+	struct rmnet_map_dl_ind *tmp;
+	struct napi_struct *napi;
+
+	list_for_each_entry(tmp, &port->dl_list, list)
+		tmp->dl_trl_handler_v2(dltrl, qcmd);
+
+	if (port->dl_marker_flush) {
+		napi = get_current_napi_context();
+		napi_gro_flush(napi, false);
+	}
+
+	port->dl_marker_flush = -1;
+}
+
+void
+rmnet_map_dl_trl_notify(struct rmnet_port *port,
+			struct rmnet_map_dl_ind_trl *dltrl)
 {
 	struct rmnet_map_dl_ind *tmp;
 	struct napi_struct *napi;
@@ -120,11 +154,26 @@ static void rmnet_map_process_flow_start(struct sk_buff *skb,
 					 bool rmnet_perf)
 {
 	struct rmnet_map_dl_ind_hdr *dlhdr;
+	struct rmnet_map_control_command_header *qcmd;
+	u32 data_format;
+	bool is_dl_mark_v2;
 
 	if (skb->len < RMNET_DL_IND_HDR_SIZE)
 		return;
 
-	pskb_pull(skb, RMNET_MAP_CMD_SIZE);
+	data_format = port->data_format;
+	is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
+	if (is_dl_mark_v2) {
+		pskb_pull(skb, sizeof(struct rmnet_map_header));
+		qcmd = (struct rmnet_map_control_command_header *)
+			rmnet_map_data_ptr(skb);
+		port->stats.dl_hdr_last_ep_id = qcmd->source_id;
+		port->stats.dl_hdr_last_qmap_vers = qcmd->reserved;
+		port->stats.dl_hdr_last_trans_id = qcmd->transaction_id;
+		pskb_pull(skb, sizeof(struct rmnet_map_control_command_header));
+	} else {
+		pskb_pull(skb, RMNET_MAP_CMD_SIZE);
+	}
 
 	dlhdr = (struct rmnet_map_dl_ind_hdr *)rmnet_map_data_ptr(skb);
 
@@ -136,12 +185,16 @@ static void rmnet_map_process_flow_start(struct sk_buff *skb,
 	port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
 	port->stats.dl_hdr_count++;
 
-	rmnet_map_dl_hdr_notify(port, dlhdr);
+	if (is_dl_mark_v2)
+		rmnet_map_dl_hdr_notify_v2(port, dlhdr, qcmd);
+	else
+		rmnet_map_dl_hdr_notify(port, dlhdr);
+
 	if (rmnet_perf) {
 		unsigned int pull_size;
 
 		pull_size = sizeof(struct rmnet_map_dl_ind_hdr);
-		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+		if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
 			pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
 		pskb_pull(skb, pull_size);
 	}
@@ -152,23 +205,39 @@ static void rmnet_map_process_flow_end(struct sk_buff *skb,
 				       bool rmnet_perf)
 {
 	struct rmnet_map_dl_ind_trl *dltrl;
+	struct rmnet_map_control_command_header *qcmd;
+	u32 data_format;
+	bool is_dl_mark_v2;
 
 	if (skb->len < RMNET_DL_IND_TRL_SIZE)
 		return;
 
-	pskb_pull(skb, RMNET_MAP_CMD_SIZE);
+	data_format = port->data_format;
+	is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
+	if (is_dl_mark_v2) {
+		pskb_pull(skb, sizeof(struct rmnet_map_header));
+		qcmd = (struct rmnet_map_control_command_header *)
+			rmnet_map_data_ptr(skb);
+		pskb_pull(skb, sizeof(struct rmnet_map_control_command_header));
+	} else {
+		pskb_pull(skb, RMNET_MAP_CMD_SIZE);
+	}
 
 	dltrl = (struct rmnet_map_dl_ind_trl *)rmnet_map_data_ptr(skb);
 
 	port->stats.dl_trl_last_seq = dltrl->seq_le;
 	port->stats.dl_trl_count++;
 
-	rmnet_map_dl_trl_notify(port, dltrl);
+	if (is_dl_mark_v2)
+		rmnet_map_dl_trl_notify_v2(port, dltrl, qcmd);
+	else
+		rmnet_map_dl_trl_notify(port, dltrl);
+
 	if (rmnet_perf) {
 		unsigned int pull_size;
 
 		pull_size = sizeof(struct rmnet_map_dl_ind_trl);
-		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+		if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
 			pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
 		pskb_pull(skb, pull_size);
 	}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 85aa855..6c55057 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -279,10 +279,10 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
  * initialized to 0.
  */
 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
-						  int hdrlen, int pad)
+						  int hdrlen, int pad,
+						  struct rmnet_port *port)
 {
 	struct rmnet_map_header *map_header;
-	struct rmnet_port *port = rmnet_get_port(skb->dev);
 	u32 padding, map_datalen;
 	u8 *padbytes;
 
@@ -542,6 +542,29 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
 	}
 }
 
+bool rmnet_map_v5_csum_buggy(struct rmnet_map_v5_coal_header *coal_hdr)
+{
+	/* Only applies to frames with a single packet */
+	if (coal_hdr->num_nlos != 1 || coal_hdr->nl_pairs[0].num_packets != 1)
+		return false;
+
+	/* TCP header has FIN or PUSH set */
+	if (coal_hdr->close_type == RMNET_MAP_COAL_CLOSE_COAL)
+		return true;
+
+	/* Hit packet limit, byte limit, or time limit/EOF on DMA */
+	if (coal_hdr->close_type == RMNET_MAP_COAL_CLOSE_HW) {
+		switch (coal_hdr->close_value) {
+		case RMNET_MAP_COAL_CLOSE_HW_PKT:
+		case RMNET_MAP_COAL_CLOSE_HW_BYTE:
+		case RMNET_MAP_COAL_CLOSE_HW_TIME:
+			return true;
+		}
+	}
+
+	return false;
+}
+
 static void rmnet_map_move_headers(struct sk_buff *skb)
 {
 	struct iphdr *iph;
@@ -659,7 +682,7 @@ static void rmnet_map_gso_stamp(struct sk_buff *skb,
 	}
 
 	skb->ip_summed = CHECKSUM_PARTIAL;
-	skb->csum_start = skb_transport_header(skb) - skb->head;
+	skb->csum_start = skb->data + coal_meta->ip_len - skb->head;
 	shinfo->gso_size = coal_meta->data_len;
 	shinfo->gso_segs = coal_meta->pkt_count;
 }
@@ -680,7 +703,7 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
 		alloc_len = coal_meta->ip_len + coal_meta->trans_len;
 	else
 		alloc_len = coal_meta->ip_len + coal_meta->trans_len +
-			    coal_meta->data_len;
+			    (coal_meta->data_len * coal_meta->pkt_count);
 
 	skbn = alloc_skb(alloc_len, GFP_ATOMIC);
 	if (!skbn)
@@ -734,6 +757,34 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
 	coal_meta->pkt_count = 0;
 }
 
+static bool rmnet_map_validate_csum(struct sk_buff *skb,
+				    struct rmnet_map_coal_metadata *meta)
+{
+	u8 *data = rmnet_map_data_ptr(skb);
+	unsigned int datagram_len;
+	__wsum csum;
+	__sum16 pseudo;
+
+	datagram_len = skb->len - meta->ip_len;
+	if (meta->ip_proto == 4) {
+		struct iphdr *iph = (struct iphdr *)data;
+
+		pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+					    datagram_len,
+					    meta->trans_proto, 0);
+	} else {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
+
+		pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+					  datagram_len, meta->trans_proto,
+					  0);
+	}
+
+	csum = skb_checksum(skb, meta->ip_len, datagram_len,
+			    csum_unfold(pseudo));
+	return !csum_fold(csum);
+}
+
 /* Converts the coalesced SKB into a list of SKBs.
  * NLOs containing csum erros will not be included.
  * The original coalesced SKB should be treated as invalid and
@@ -817,6 +868,17 @@ static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
 		return;
 	}
 
+	if (rmnet_map_v5_csum_buggy(coal_hdr)) {
+		rmnet_map_move_headers(coal_skb);
+		/* Mark as valid if it checks out */
+		if (rmnet_map_validate_csum(coal_skb, &coal_meta))
+			coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		__skb_queue_tail(list, coal_skb);
+		return;
+	}
+
+	/* Segment the coalesced SKB into new packets */
 	for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
 		pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
 		pkt_len -= coal_meta.ip_len + coal_meta.trans_len;
@@ -1097,13 +1159,45 @@ enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
 	return HRTIMER_NORESTART;
 }
 
+static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
+{
+	unsigned int linear = src->len - src->data_len, target = src->len;
+	unsigned char *src_buf;
+	struct sk_buff *skb;
+
+	src_buf = src->data;
+	skb_put_data(dst, src_buf, linear);
+	target -= linear;
+
+	skb = src;
+
+	while (target) {
+		unsigned int i = 0, non_linear = 0;
+
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			non_linear = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+			src_buf = skb_frag_address(&skb_shinfo(skb)->frags[i]);
+
+			skb_put_data(dst, src_buf, non_linear);
+			target -= non_linear;
+		}
+
+		if (skb_shinfo(skb)->frag_list) {
+			skb = skb_shinfo(skb)->frag_list;
+			continue;
+		}
+
+		if (skb->next)
+			skb = skb->next;
+	}
+}
+
 void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
 {
 	struct timespec diff, last;
 	int size, agg_count = 0;
 	struct sk_buff *agg_skb;
 	unsigned long flags;
-	u8 *dest_buff;
 
 new_packet:
 	spin_lock_irqsave(&port->agg_lock, flags);
@@ -1125,7 +1219,8 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
 			return;
 		}
 
-		port->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
+		port->agg_skb = alloc_skb(port->egress_agg_params.agg_size,
+					  GFP_ATOMIC);
 		if (!port->agg_skb) {
 			port->agg_skb = 0;
 			port->agg_count = 0;
@@ -1135,6 +1230,8 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
 			dev_queue_xmit(skb);
 			return;
 		}
+		rmnet_map_linearize_copy(port->agg_skb, skb);
+		port->agg_skb->dev = skb->dev;
 		port->agg_skb->protocol = htons(ETH_P_MAP);
 		port->agg_count = 1;
 		getnstimeofday(&port->agg_time);
@@ -1159,8 +1256,7 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
 		goto new_packet;
 	}
 
-	dest_buff = skb_put(port->agg_skb, skb->len);
-	memcpy(dest_buff, skb->data, skb->len);
+	rmnet_map_linearize_copy(port->agg_skb, skb);
 	port->agg_count++;
 	dev_kfree_skb_any(skb);
 
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
index c30815b..7d45d482 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef _RMNET_PRIVATE_H_
@@ -12,13 +12,19 @@
 
 /* Constants */
 #define RMNET_EGRESS_FORMAT_AGGREGATION         BIT(31)
-#define RMNET_INGRESS_FORMAT_DL_MARKER          BIT(30)
-#define RMNET_INGRESS_FORMAT_RPS_STAMP          BIT(29)
+#define RMNET_INGRESS_FORMAT_DL_MARKER_V1       BIT(30)
+#define RMNET_INGRESS_FORMAT_DL_MARKER_V2       BIT(29)
+
+#define RMNET_INGRESS_FORMAT_DL_MARKER  (RMNET_INGRESS_FORMAT_DL_MARKER_V1 |\
+RMNET_INGRESS_FORMAT_DL_MARKER_V2)
 
 /* Power save feature*/
 #define RMNET_INGRESS_FORMAT_PS                 BIT(27)
 #define RMNET_FORMAT_PS_NOTIF                   BIT(26)
 
+/* Task boost */
+#define RMNET_FORMAT_TASK_BOOST                 BIT(25)
+
 /* Replace skb->dev to a virtual rmnet device and pass up the stack */
 #define RMNET_EPMODE_VND (1)
 /* Pass the frame directly to another device with dev_queue_xmit() */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index d148845..b7e1616 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -8,6 +8,7 @@
 #include <linux/etherdevice.h>
 #include <linux/if_arp.h>
 #include <linux/ip.h>
+#include <linux/sched.h>
 #include <net/pkt_sched.h>
 #include "rmnet_config.h"
 #include "rmnet_handlers.h"
@@ -19,6 +20,9 @@
 #include <soc/qcom/qmi_rmnet.h>
 #include <soc/qcom/rmnet_qmi.h>
 
+/* Task boost time in ms */
+#define RMNET_TASK_BOOST_PERIOD 10000
+
 /* RX/TX Fixup */
 
 void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len)
@@ -161,6 +165,9 @@ static u16 rmnet_vnd_select_queue(struct net_device *dev,
 	struct rmnet_priv *priv = netdev_priv(dev);
 	int txq = 0;
 
+	if (READ_ONCE(rmnet_sched_boost))
+		set_task_boost(1, RMNET_TASK_BOOST_PERIOD);
+
 	if (priv->real_dev)
 		txq = qmi_rmnet_get_queue(dev, skb);
 
@@ -214,6 +221,9 @@ static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
 };
 
 static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
+	"MAP Cmd last version",
+	"MAP Cmd last ep id",
+	"MAP Cmd last transaction id",
 	"DL header last seen sequence",
 	"DL header last seen bytes",
 	"DL header last seen packets",
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 70cce63..696037d 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -735,6 +735,7 @@ static int sgiseeq_probe(struct platform_device *pdev)
 	}
 
 	platform_set_drvdata(pdev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
 	sp = netdev_priv(dev);
 
 	/* Make private data page aligned */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 40d6356..3dfb07a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -29,11 +29,13 @@
 /* Specific functions used for Ring mode */
 
 /* Enhanced descriptors */
-static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
+					   int bfsize)
 {
-	p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
-			<< ERDES1_BUFFER2_SIZE_SHIFT)
-		   & ERDES1_BUFFER2_SIZE_MASK);
+	if (bfsize == BUF_SIZE_16KiB)
+		p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
+				<< ERDES1_BUFFER2_SIZE_SHIFT)
+			   & ERDES1_BUFFER2_SIZE_MASK);
 
 	if (end)
 		p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
 }
 
 /* Normal descriptors */
-static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
 {
-	p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
-				<< RDES1_BUFFER2_SIZE_SHIFT)
-		    & RDES1_BUFFER2_SIZE_MASK);
+	if (bfsize >= BUF_SIZE_2KiB) {
+		int bfsize2;
+
+		bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
+		p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
+			    & RDES1_BUFFER2_SIZE_MASK);
+	}
 
 	if (end)
 		p->des1 |= cpu_to_le32(RDES1_END_RING);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 0f660af..49a896a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1015,6 +1015,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
 	mac->mac = &sun8i_dwmac_ops;
 	mac->dma = &sun8i_dwmac_dma_ops;
 
+	priv->dev->priv_flags |= IFF_UNICAST_FLT;
+
 	/* The loopback bit seems to be re-set when link change
 	 * Simply mask it each time
 	 * Speed 10/100/1000 are set in BIT(2)/BIT(3)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 736e296..313a58b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -296,7 +296,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
 }
 
 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-				   int mode, int end)
+				   int mode, int end, int bfsize)
 {
 	dwmac4_set_rx_owner(p, disable_rx_ic);
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 1d858fd..98fa471 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
 }
 
 static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-				  int mode, int end)
+				  int mode, int end, int bfsize)
 {
 	dwxgmac2_set_rx_owner(p, disable_rx_ic);
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 5ef91a7..5202d6a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 	if (unlikely(rdes0 & RDES0_OWN))
 		return dma_own;
 
+	if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+		stats->rx_length_errors++;
+		return discard_frame;
+	}
+
 	if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
 		if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
 			x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 	 * It doesn't match with the information reported into the databook.
 	 * At any rate, we need to understand if the CSUM hw computation is ok
 	 * and report this info to the upper layers. */
-	ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
-				 !!(rdes0 & RDES0_FRAME_TYPE),
-				 !!(rdes0 & ERDES0_RX_MAC_ADDR));
+	if (likely(ret == good_frame))
+		ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+					 !!(rdes0 & RDES0_FRAME_TYPE),
+					 !!(rdes0 & ERDES0_RX_MAC_ADDR));
 
 	if (unlikely(rdes0 & RDES0_DRIBBLING))
 		x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 }
 
 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-				  int mode, int end)
+				  int mode, int end, int bfsize)
 {
+	int bfsize1;
+
 	p->des0 |= cpu_to_le32(RDES0_OWN);
-	p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
+
+	bfsize1 = min(bfsize, BUF_SIZE_8KiB);
+	p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
 
 	if (mode == STMMAC_CHAIN_MODE)
 		ehn_desc_rx_set_on_chain(p);
 	else
-		ehn_desc_rx_set_on_ring(p, end);
+		ehn_desc_rx_set_on_ring(p, end, bfsize);
 
 	if (disable_rx_ic)
 		p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 92b8944..5bb0023 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -33,7 +33,7 @@ struct dma_extended_desc;
 struct stmmac_desc_ops {
 	/* DMA RX descriptor ring initialization */
 	void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
-			int end);
+			int end, int bfsize);
 	/* DMA TX descriptor ring initialization */
 	void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
 	/* Invoked by the xmit function to prepare the tx descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index de65bb2..6d69067 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 		return dma_own;
 
 	if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
-		pr_warn("%s: Oversized frame spanned multiple buffers\n",
-			__func__);
 		stats->rx_length_errors++;
 		return discard_frame;
 	}
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 }
 
 static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
-			       int end)
+			       int end, int bfsize)
 {
+	int bfsize1;
+
 	p->des0 |= cpu_to_le32(RDES0_OWN);
-	p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
+
+	bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
+	p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
 
 	if (mode == STMMAC_CHAIN_MODE)
 		ndesc_rx_set_on_chain(p, end);
 	else
-		ndesc_rx_set_on_ring(p, end);
+		ndesc_rx_set_on_ring(p, end, bfsize);
 
 	if (disable_rx_ic)
 		p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 886176b..5debe93 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1111,11 +1111,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
 		if (priv->extend_desc)
 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
 					priv->use_riwt, priv->mode,
-					(i == DMA_RX_SIZE - 1));
+					(i == DMA_RX_SIZE - 1),
+					priv->dma_buf_sz);
 		else
 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
 					priv->use_riwt, priv->mode,
-					(i == DMA_RX_SIZE - 1));
+					(i == DMA_RX_SIZE - 1),
+					priv->dma_buf_sz);
 }
 
 /**
@@ -2595,8 +2597,6 @@ static int stmmac_open(struct net_device *dev)
 	u32 chan;
 	int ret;
 
-	stmmac_check_ether_addr(priv);
-
 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
 	    priv->hw->pcs != STMMAC_PCS_TBI &&
 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
@@ -3197,14 +3197,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
 				csum_insertion, priv->mode, 1, last_segment,
 				skb->len);
-
-		/* The own bit must be the latest setting done when prepare the
-		 * descriptor and then barrier is needed to make sure that
-		 * all is coherent before granting the DMA engine.
-		 */
-		wmb();
+	} else {
+		stmmac_set_tx_owner(priv, first);
 	}
 
+	/* The own bit must be the latest setting done when prepare the
+	 * descriptor and then barrier is needed to make sure that
+	 * all is coherent before granting the DMA engine.
+	 */
+	wmb();
+
 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
 
 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -3331,9 +3333,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 {
 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
 	struct stmmac_channel *ch = &priv->channel[queue];
-	unsigned int entry = rx_q->cur_rx;
+	unsigned int next_entry = rx_q->cur_rx;
 	int coe = priv->hw->rx_csum;
-	unsigned int next_entry;
 	unsigned int count = 0;
 	bool xmac;
 
@@ -3351,10 +3352,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
 	}
 	while (count < limit) {
-		int status;
+		int entry, status;
 		struct dma_desc *p;
 		struct dma_desc *np;
 
+		entry = next_entry;
+
 		if (priv->extend_desc)
 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
 		else
@@ -3410,11 +3413,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 			 *  ignored
 			 */
 			if (frame_len > priv->dma_buf_sz) {
-				netdev_err(priv->dev,
-					   "len %d larger than size (%d)\n",
-					   frame_len, priv->dma_buf_sz);
+				if (net_ratelimit())
+					netdev_err(priv->dev,
+						   "len %d larger than size (%d)\n",
+						   frame_len, priv->dma_buf_sz);
 				priv->dev->stats.rx_length_errors++;
-				break;
+				continue;
 			}
 
 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3449,7 +3453,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 						dev_warn(priv->device,
 							 "packet dropped\n");
 					priv->dev->stats.rx_dropped++;
-					break;
+					continue;
 				}
 
 				dma_sync_single_for_cpu(priv->device,
@@ -3469,11 +3473,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 			} else {
 				skb = rx_q->rx_skbuff[entry];
 				if (unlikely(!skb)) {
-					netdev_err(priv->dev,
-						   "%s: Inconsistent Rx chain\n",
-						   priv->dev->name);
+					if (net_ratelimit())
+						netdev_err(priv->dev,
+							   "%s: Inconsistent Rx chain\n",
+							   priv->dev->name);
 					priv->dev->stats.rx_dropped++;
-					break;
+					continue;
 				}
 				prefetch(skb->data - NET_IP_ALIGN);
 				rx_q->rx_skbuff[entry] = NULL;
@@ -3508,7 +3513,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 			priv->dev->stats.rx_packets++;
 			priv->dev->stats.rx_bytes += frame_len;
 		}
-		entry = next_entry;
 	}
 
 	stmmac_rx_refill(priv, queue);
@@ -4296,6 +4300,8 @@ int stmmac_dvr_probe(struct device *device,
 	if (ret)
 		goto error_hw_init;
 
+	stmmac_check_ether_addr(priv);
+
 	/* Configure real RX and TX queues */
 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index d819e8e..cc1e887 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
 		},
 		.driver_data = (void *)&galileo_stmmac_dmi_data,
 	},
+	/*
+	 * There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040.
+	 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
+	 * has only one pci network device while other asset tags are
+	 * for IOT2040 which has two.
+	 */
 	{
 		.matches = {
 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
@@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
 	{
 		.matches = {
 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
-			DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
-					"6ES7647-0AA00-1YA2"),
 		},
 		.driver_data = (void *)&iot2040_stmmac_dmi_data,
 	},
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 72b98e2..d177dfd 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3655,12 +3655,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
 
 	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
 				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
-	if (ret)
+	if (ret) {
+		of_node_put(interfaces);
 		return ret;
+	}
 
 	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
-	if (ret)
+	if (ret) {
+		of_node_put(interfaces);
 		return ret;
+	}
 
 	/* Create network interfaces */
 	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index f24f48f..7cfd7ff 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1574,12 +1574,14 @@ static int axienet_probe(struct platform_device *pdev)
 	ret = of_address_to_resource(np, 0, &dmares);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to get DMA resource\n");
+		of_node_put(np);
 		goto free_netdev;
 	}
 	lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
 	if (IS_ERR(lp->dma_regs)) {
 		dev_err(&pdev->dev, "could not map DMA regs\n");
 		ret = PTR_ERR(lp->dma_regs);
+		of_node_put(np);
 		goto free_netdev;
 	}
 	lp->rx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index fb12b63..3541304 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -872,12 +872,6 @@ static inline int netvsc_send_pkt(
 	} else if (ret == -EAGAIN) {
 		netif_tx_stop_queue(txq);
 		ndev_ctx->eth_stats.stop_queue++;
-		if (atomic_read(&nvchan->queue_sends) < 1 &&
-		    !net_device->tx_disable) {
-			netif_tx_wake_queue(txq);
-			ndev_ctx->eth_stats.wake_queue++;
-			ret = -ENOSPC;
-		}
 	} else {
 		netdev_err(ndev,
 			   "Unable to send packet pages %u len %u, ret %d\n",
@@ -885,6 +879,15 @@ static inline int netvsc_send_pkt(
 			   ret);
 	}
 
+	if (netif_tx_queue_stopped(txq) &&
+	    atomic_read(&nvchan->queue_sends) < 1 &&
+	    !net_device->tx_disable) {
+		netif_tx_wake_queue(txq);
+		ndev_ctx->eth_stats.wake_queue++;
+		if (ret == -EAGAIN)
+			ret = -ENOSPC;
+	}
+
 	return ret;
 }
 
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index cd1d8fa..cd6b95e 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi)
 	INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
 	lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
 					     WQ_MEM_RECLAIM);
+	if (unlikely(!lp->wqueue)) {
+		ret = -ENOMEM;
+		goto err_hw_init;
+	}
 
 	ret = adf7242_hw_init(lp);
 	if (ret)
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 624bff4..f1ed174 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -332,7 +332,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
 			goto out_err;
 		}
 
-		genlmsg_reply(skb, info);
+		res = genlmsg_reply(skb, info);
 		break;
 	}
 
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 04891429..fe4057fc 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -539,6 +539,8 @@ mcr20a_start(struct ieee802154_hw *hw)
 	dev_dbg(printdev(lp), "no slotted operation\n");
 	ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
 				 DAR_PHY_CTRL1_SLOTTED, 0x0);
+	if (ret < 0)
+		return ret;
 
 	/* enable irq */
 	enable_irq(lp->spi->irq);
@@ -546,11 +548,15 @@ mcr20a_start(struct ieee802154_hw *hw)
 	/* Unmask SEQ interrupt */
 	ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
 				 DAR_PHY_CTRL2_SEQMSK, 0x0);
+	if (ret < 0)
+		return ret;
 
 	/* Start the RX sequence */
 	dev_dbg(printdev(lp), "start the RX sequence\n");
 	ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
 				 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
+	if (ret < 0)
+		return ret;
 
 	return 0;
 }
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 73813c7..bb6107f3 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1513,9 +1513,10 @@ static int marvell_get_sset_count(struct phy_device *phydev)
 
 static void marvell_get_strings(struct phy_device *phydev, u8 *data)
 {
+	int count = marvell_get_sset_count(phydev);
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
+	for (i = 0; i < count; i++) {
 		strlcpy(data + i * ETH_GSTRING_LEN,
 			marvell_hw_stats[i].string, ETH_GSTRING_LEN);
 	}
@@ -1543,9 +1544,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
 static void marvell_get_stats(struct phy_device *phydev,
 			      struct ethtool_stats *stats, u64 *data)
 {
+	int count = marvell_get_sset_count(phydev);
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
+	for (i = 0; i < count; i++)
 		data[i] = marvell_get_stat(phydev, i);
 }
 
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index f17b344..d8ea414 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -162,6 +162,14 @@ static const struct spi_device_id ks8995_id[] = {
 };
 MODULE_DEVICE_TABLE(spi, ks8995_id);
 
+static const struct of_device_id ks8895_spi_of_match[] = {
+        { .compatible = "micrel,ks8995" },
+        { .compatible = "micrel,ksz8864" },
+        { .compatible = "micrel,ksz8795" },
+        { },
+ };
+MODULE_DEVICE_TABLE(of, ks8895_spi_of_match);
+
 static inline u8 get_chip_id(u8 val)
 {
 	return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
@@ -529,6 +537,7 @@ static int ks8995_remove(struct spi_device *spi)
 static struct spi_driver ks8995_driver = {
 	.driver = {
 		.name	    = "spi-ks8995",
+		.of_match_table = of_match_ptr(ks8895_spi_of_match),
 	},
 	.probe	  = ks8995_probe,
 	.remove	  = ks8995_remove,
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index b5edc7f..685e875 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -610,12 +610,20 @@ static struct compressor ppp_deflate_draft = {
 
 static int __init deflate_init(void)
 {
-        int answer = ppp_register_compressor(&ppp_deflate);
-        if (answer == 0)
-                printk(KERN_INFO
-		       "PPP Deflate Compression module registered\n");
-	ppp_register_compressor(&ppp_deflate_draft);
-        return answer;
+	int rc;
+
+	rc = ppp_register_compressor(&ppp_deflate);
+	if (rc)
+		return rc;
+
+	rc = ppp_register_compressor(&ppp_deflate_draft);
+	if (rc) {
+		ppp_unregister_compressor(&ppp_deflate);
+		return rc;
+	}
+
+	pr_info("PPP Deflate Compression module registered\n");
+	return 0;
 }
 
 static void __exit deflate_cleanup(void)
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index f4e93f5..ea90db3 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -153,7 +153,7 @@ slhc_init(int rslots, int tslots)
 void
 slhc_free(struct slcompress *comp)
 {
-	if ( comp == NULLSLCOMPR )
+	if ( IS_ERR_OR_NULL(comp) )
 		return;
 
 	if ( comp->tstate != NULLSLSTATE )
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 95ee9d8..6c6230b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1160,6 +1160,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
 		return -EINVAL;
 	}
 
+	if (netdev_has_upper_dev(dev, port_dev)) {
+		NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
+		netdev_err(dev, "Device %s is already an upper device of the team interface\n",
+			   portname);
+		return -EBUSY;
+	}
+
 	if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
 	    vlan_uses_dev(dev)) {
 		NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
@@ -1250,6 +1257,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
 		goto err_option_port_add;
 	}
 
+	/* set promiscuity level to new slave */
+	if (dev->flags & IFF_PROMISC) {
+		err = dev_set_promiscuity(port_dev, 1);
+		if (err)
+			goto err_set_slave_promisc;
+	}
+
+	/* set allmulti level to new slave */
+	if (dev->flags & IFF_ALLMULTI) {
+		err = dev_set_allmulti(port_dev, 1);
+		if (err) {
+			if (dev->flags & IFF_PROMISC)
+				dev_set_promiscuity(port_dev, -1);
+			goto err_set_slave_promisc;
+		}
+	}
+
 	netif_addr_lock_bh(dev);
 	dev_uc_sync_multiple(port_dev, dev);
 	dev_mc_sync_multiple(port_dev, dev);
@@ -1266,6 +1290,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
 
 	return 0;
 
+err_set_slave_promisc:
+	__team_option_inst_del_port(team, port);
+
 err_option_port_add:
 	team_upper_dev_unlink(team, port);
 
@@ -1311,6 +1338,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
 
 	team_port_disable(team, port);
 	list_del_rcu(&port->list);
+
+	if (dev->flags & IFF_PROMISC)
+		dev_set_promiscuity(port_dev, -1);
+	if (dev->flags & IFF_ALLMULTI)
+		dev_set_allmulti(port_dev, -1);
+
 	team_upper_dev_unlink(team, port);
 	netdev_rx_handler_unregister(port_dev);
 	team_port_disable_netpoll(port);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a8e791a..cb787a9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -599,13 +599,18 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 {
 	struct tun_prog *prog;
+	u32 numqueues;
 	u16 ret = 0;
 
+	numqueues = READ_ONCE(tun->numqueues);
+	if (!numqueues)
+		return 0;
+
 	prog = rcu_dereference(tun->steering_prog);
 	if (prog)
 		ret = bpf_prog_run_clear_cb(prog->prog, skb);
 
-	return ret % tun->numqueues;
+	return ret % numqueues;
 }
 
 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
@@ -703,6 +708,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
 				   tun->tfiles[tun->numqueues - 1]);
 		ntfile = rtnl_dereference(tun->tfiles[index]);
 		ntfile->queue_index = index;
+		rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
+				   NULL);
 
 		--tun->numqueues;
 		if (clean) {
@@ -1085,7 +1092,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 	tfile = rcu_dereference(tun->tfiles[txq]);
 
 	/* Drop packet if interface is not attached */
-	if (txq >= tun->numqueues)
+	if (!tfile)
 		goto drop;
 
 	if (!rcu_dereference(tun->steering_prog))
@@ -1276,6 +1283,7 @@ static int tun_xdp_xmit(struct net_device *dev, int n,
 
 	rcu_read_lock();
 
+resample:
 	numqueues = READ_ONCE(tun->numqueues);
 	if (!numqueues) {
 		rcu_read_unlock();
@@ -1284,6 +1292,8 @@ static int tun_xdp_xmit(struct net_device *dev, int n,
 
 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
 					    numqueues]);
+	if (unlikely(!tfile))
+		goto resample;
 
 	spin_lock(&tfile->tx_ring.producer_lock);
 	for (i = 0; i < n; i++) {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9195f34..d9a6699 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -63,6 +63,7 @@ enum qmi_wwan_flags {
 
 enum qmi_wwan_quirks {
 	QMI_WWAN_QUIRK_DTR = 1 << 0,	/* needs "set DTR" request */
+	QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1,	/* check num. endpoints */
 };
 
 struct qmimux_hdr {
@@ -845,6 +846,16 @@ static const struct driver_info	qmi_wwan_info_quirk_dtr = {
 	.data           = QMI_WWAN_QUIRK_DTR,
 };
 
+static const struct driver_info	qmi_wwan_info_quirk_quectel_dyncfg = {
+	.description	= "WWAN/QMI device",
+	.flags		= FLAG_WWAN | FLAG_SEND_ZLP,
+	.bind		= qmi_wwan_bind,
+	.unbind		= qmi_wwan_unbind,
+	.manage_power	= qmi_wwan_manage_power,
+	.rx_fixup       = qmi_wwan_rx_fixup,
+	.data           = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
+};
+
 #define HUAWEI_VENDOR_ID	0x12D1
 
 /* map QMI/wwan function by a fixed interface number */
@@ -865,6 +876,15 @@ static const struct driver_info	qmi_wwan_info_quirk_dtr = {
 #define QMI_GOBI_DEVICE(vend, prod) \
 	QMI_FIXED_INTF(vend, prod, 0)
 
+/* Quectel does not use fixed interface numbers on at least some of their
+ * devices. We need to check the number of endpoints to ensure that we bind to
+ * the correct interface.
+ */
+#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
+	USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
+				      USB_SUBCLASS_VENDOR_SPEC, 0xff), \
+	.driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
+
 static const struct usb_device_id products[] = {
 	/* 1. CDC ECM like devices match on the control interface */
 	{	/* Huawei E392, E398 and possibly others sharing both device id and more... */
@@ -969,20 +989,9 @@ static const struct usb_device_id products[] = {
 		USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
 		.driver_info = (unsigned long)&qmi_wwan_info,
 	},
-	{	/* Quectel EP06/EG06/EM06 */
-		USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
-					      USB_CLASS_VENDOR_SPEC,
-					      USB_SUBCLASS_VENDOR_SPEC,
-					      0xff),
-		.driver_info	    = (unsigned long)&qmi_wwan_info_quirk_dtr,
-	},
-	{	/* Quectel EG12/EM12 */
-		USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
-					      USB_CLASS_VENDOR_SPEC,
-					      USB_SUBCLASS_VENDOR_SPEC,
-					      0xff),
-		.driver_info	    = (unsigned long)&qmi_wwan_info_quirk_dtr,
-	},
+	{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)},	/* Quectel EC25, EC20 R2.0  Mini PCIe */
+	{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)},	/* Quectel EP06/EG06/EM06 */
+	{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)},	/* Quectel EG12/EM12 */
 
 	/* 3. Combined interface devices matching on interface number */
 	{QMI_FIXED_INTF(0x0408, 0xea42, 4)},	/* Yota / Megafon M100-1 */
@@ -1122,9 +1131,16 @@ static const struct usb_device_id products[] = {
 	{QMI_FIXED_INTF(0x0846, 0x68d3, 8)},	/* Netgear Aircard 779S */
 	{QMI_FIXED_INTF(0x12d1, 0x140c, 1)},	/* Huawei E173 */
 	{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},	/* Huawei E1820 */
+	{QMI_FIXED_INTF(0x1435, 0x0918, 3)},	/* Wistron NeWeb D16Q1 */
+	{QMI_FIXED_INTF(0x1435, 0x0918, 4)},	/* Wistron NeWeb D16Q1 */
+	{QMI_FIXED_INTF(0x1435, 0x0918, 5)},	/* Wistron NeWeb D16Q1 */
+	{QMI_FIXED_INTF(0x1435, 0x3185, 4)},	/* Wistron NeWeb M18Q5 */
+	{QMI_FIXED_INTF(0x1435, 0xd111, 4)},	/* M9615A DM11-1 D51QC */
 	{QMI_FIXED_INTF(0x1435, 0xd181, 3)},	/* Wistron NeWeb D18Q1 */
 	{QMI_FIXED_INTF(0x1435, 0xd181, 4)},	/* Wistron NeWeb D18Q1 */
 	{QMI_FIXED_INTF(0x1435, 0xd181, 5)},	/* Wistron NeWeb D18Q1 */
+	{QMI_FIXED_INTF(0x1435, 0xd182, 4)},	/* Wistron NeWeb D18 */
+	{QMI_FIXED_INTF(0x1435, 0xd182, 5)},	/* Wistron NeWeb D18 */
 	{QMI_FIXED_INTF(0x1435, 0xd191, 4)},	/* Wistron NeWeb D19Q1 */
 	{QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)},	/* Fibocom NL668 series */
 	{QMI_FIXED_INTF(0x16d8, 0x6003, 0)},	/* CMOTech 6003 */
@@ -1180,6 +1196,7 @@ static const struct usb_device_id products[] = {
 	{QMI_FIXED_INTF(0x19d2, 0x0265, 4)},	/* ONDA MT8205 4G LTE */
 	{QMI_FIXED_INTF(0x19d2, 0x0284, 4)},	/* ZTE MF880 */
 	{QMI_FIXED_INTF(0x19d2, 0x0326, 4)},	/* ZTE MF821D */
+	{QMI_FIXED_INTF(0x19d2, 0x0396, 3)},	/* ZTE ZM8620 */
 	{QMI_FIXED_INTF(0x19d2, 0x0412, 4)},	/* Telewell TW-LTE 4G */
 	{QMI_FIXED_INTF(0x19d2, 0x1008, 4)},	/* ZTE (Vodafone) K3570-Z */
 	{QMI_FIXED_INTF(0x19d2, 0x1010, 4)},	/* ZTE (Vodafone) K3571-Z */
@@ -1200,7 +1217,9 @@ static const struct usb_device_id products[] = {
 	{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
 	{QMI_FIXED_INTF(0x19d2, 0x1426, 2)},	/* ZTE MF91 */
 	{QMI_FIXED_INTF(0x19d2, 0x1428, 2)},	/* Telewell TW-LTE 4G v2 */
+	{QMI_FIXED_INTF(0x19d2, 0x1432, 3)},	/* ZTE ME3620 */
 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
+	{QMI_FIXED_INTF(0x2001, 0x7e16, 3)},	/* D-Link DWM-221 */
 	{QMI_FIXED_INTF(0x2001, 0x7e19, 4)},	/* D-Link DWM-221 B1 */
 	{QMI_FIXED_INTF(0x2001, 0x7e35, 4)},	/* D-Link DWM-222 */
 	{QMI_FIXED_INTF(0x2020, 0x2031, 4)},	/* Olicard 600 */
@@ -1240,6 +1259,8 @@ static const struct usb_device_id products[] = {
 	{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},	/* Telit ME910 dual modem */
 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)},	/* Telit LE920, LE920A4 */
+	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)},	/* Telit LE910Cx */
+	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)},	/* Telit LE910Cx */
 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)},	/* Telit LN940 series */
 	{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)},	/* Telewell TW-3G HSPA+ */
 	{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)},	/* Telewell TW-3G HSPA+ */
@@ -1271,7 +1292,6 @@ static const struct usb_device_id products[] = {
 	{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},	/* HP lt4120 Snapdragon X5 LTE */
 	{QMI_FIXED_INTF(0x22de, 0x9061, 3)},	/* WeTelecom WPD-600N */
 	{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)},	/* SIMCom 7100E, 7230E, 7600E ++ */
-	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)},	/* Quectel EC25, EC20 R2.0  Mini PCIe */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)},	/* Quectel EC21 Mini PCIe */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)},	/* Quectel EG91 */
 	{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},	/* Quectel BG96 */
@@ -1351,27 +1371,12 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
 	return false;
 }
 
-static bool quectel_diag_detected(struct usb_interface *intf)
-{
-	struct usb_device *dev = interface_to_usbdev(intf);
-	struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
-	u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
-	u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
-
-	if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
-		return false;
-
-	if (id_product == 0x0306 || id_product == 0x0512)
-		return true;
-	else
-		return false;
-}
-
 static int qmi_wwan_probe(struct usb_interface *intf,
 			  const struct usb_device_id *prod)
 {
 	struct usb_device_id *id = (struct usb_device_id *)prod;
 	struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
+	const struct driver_info *info;
 
 	/* Workaround to enable dynamic IDs.  This disables usbnet
 	 * blacklisting functionality.  Which, if required, can be
@@ -1405,10 +1410,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
 	 * we need to match on class/subclass/protocol. These values are
 	 * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
 	 * different. Ignore the current interface if the number of endpoints
-	 * the number for the diag interface (two).
+	 * equals the number for the diag interface (two).
 	 */
-	if (quectel_diag_detected(intf))
-		return -ENODEV;
+	info = (void *)&id->driver_info;
+
+	if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
+		if (desc->bNumEndpoints == 2)
+			return -ENODEV;
+	}
 
 	return usbnet_probe(intf, id);
 }
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 90f9372..f3b1cfa 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5622,7 +5622,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
 	}
 
 	if (changed & BSS_CHANGED_MCAST_RATE &&
-	    !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) {
+	    !ath10k_mac_vif_chan(arvif->vif, &def)) {
 		band = def.chan->band;
 		rateidx = vif->bss_conf.mcast_rate[band] - 1;
 
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 718856a..735fc1a 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -24,6 +24,7 @@
 wil6210-y += p2p.o
 wil6210-y += ftm.o
 wil6210-$(CONFIG_WIL6210_IPA) += ipa.o
+wil6210-y += config.o
 
 # for tracing framework to find trace.h
 CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index a4c9cbf..46415bf 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -28,13 +28,14 @@ static struct wiphy_wowlan_support wil_wowlan_support = {
 };
 #endif
 
-static bool country_specific_board_file;
+bool country_specific_board_file;
 module_param(country_specific_board_file, bool, 0444);
 MODULE_PARM_DESC(country_specific_board_file, " switch board file upon regulatory domain change (Default: false)");
 
-static bool ignore_reg_hints = true;
+bool ignore_reg_hints = true;
 module_param(ignore_reg_hints, bool, 0444);
-MODULE_PARM_DESC(ignore_reg_hints, " Ignore OTA regulatory hints (Default: true)");
+MODULE_PARM_DESC(ignore_reg_hints,
+		 " Ignore OTA regulatory hints (Default: true)");
 
 #define CHAN60G(_channel, _flags) {				\
 	.band			= NL80211_BAND_60GHZ,		\
@@ -580,7 +581,8 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
 	memset(&reply, 0, sizeof(reply));
 
 	rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid, &cmd, sizeof(cmd),
-		      WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20);
+		      WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
 	if (rc)
 		return rc;
 
@@ -2829,8 +2831,10 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
 	.update_ft_ies = wil_cfg80211_update_ft_ies,
 };
 
-static void wil_wiphy_init(struct wiphy *wiphy)
+void wil_wiphy_init(struct wil6210_priv *wil)
 {
+	struct wiphy *wiphy = wil_to_wiphy(wil);
+
 	wiphy->max_scan_ssids = 1;
 	wiphy->max_scan_ie_len = WMI_MAX_IE_LEN;
 	wiphy->max_remain_on_channel_duration = WIL_MAX_ROC_DURATION_MS;
@@ -2969,13 +2973,11 @@ struct wil6210_priv *wil_cfg80211_init(struct device *dev)
 		return ERR_PTR(-ENOMEM);
 
 	set_wiphy_dev(wiphy, dev);
-	wil_wiphy_init(wiphy);
-
 	wil = wiphy_to_wil(wiphy);
 	wil->wiphy = wiphy;
 
 	/* default monitor channel */
-	ch = wiphy->bands[NL80211_BAND_60GHZ]->channels;
+	ch = wil_band_60ghz.channels;
 	cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT);
 
 	return wil;
diff --git a/drivers/net/wireless/ath/wil6210/config.c b/drivers/net/wireless/ath/wil6210/config.c
new file mode 100644
index 0000000..a5788c10
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/config.c
@@ -0,0 +1,737 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#include <linux/firmware.h>
+#include <linux/ctype.h>
+#include "wil6210.h"
+#include "wil_platform.h"
+#include "txrx_edma.h"
+#include "config.h"
+
+#define WIL_CONFIG_MAX_INI_ITEMS 256
+#define WIL_CONFIG_INI_FILE "wigig.ini"
+
+#define WIL_CONFIG_BOOL_MIN 0
+#define WIL_CONFIG_BOOL_MAX 1
+#define WIL_CONFIG_MAX_UNLIMITED UINT_MAX
+#define WIL_CONFIG_BOOL_SIZE sizeof(u8)
+
+/** struct wil_config_file_entry - ini configuration entry
+ *
+ * name: name of the entry
+ * value: value of the entry
+ */
+struct wil_config_file_entry {
+	char *name;
+	char *value;
+};
+
+/* bool parameter */
+#define WIL_CONFIG_COUNTRY_BOARD_FILE_NAME "country_specific_board_file"
+
+/* bool parameter */
+#define WIL_CONFIG_REG_HINTS_NAME "ignore_reg_hints"
+
+#define WIL_CONFIG_SCAN_DWELL_TIME_NAME "scan_dwell_time"
+
+#define WIL_CONFIG_SCAN_TIMEOUT_NAME "scan_timeout"
+
+/* bool parameter */
+#define WIL_CONFIG_DEBUG_FW_NAME "debug_fw"
+
+#define WIL_CONFIG_OOB_MODE_NAME "oob_mode"
+#define WIL_CONFIG_OOB_MODE_MIN 0
+#define WIL_CONFIG_OOB_MODE_MAX 2
+
+/* bool parameter */
+#define WIL_CONFIG_NO_FW_RECOVERY_NAME "no_fw_recovery"
+
+#define WIL_CONFIG_MTU_MAX_NAME "mtu_max"
+#define WIL_CONFIG_MTU_MAX_MIN 68
+#define WIL_CONFIG_MTU_MAX_MAX WIL_MAX_ETH_MTU
+
+/* bool parameter */
+#define WIL_CONFIG_RX_ALIGN_2_NAME "align_ip_header"
+
+/* bool parameter */
+#define WIL_CONFIG_RX_LARGE_BUF_NAME "rx_large_buf"
+
+#define WIL_CONFIG_HEADROOM_SIZE_NAME "skb_headroom_size"
+#define WIL_CONFIG_HEADROOM_SIZE_MIN 0
+#define WIL_CONFIG_HEADROOM_SIZE_MAX WIL6210_MAX_HEADROOM_SIZE
+
+#define WIL_CONFIG_BCAST_MCS0_LIMIT_NAME "bcast_mcs0_limit"
+#define WIL_CONFIG_BCAST_MCS0_LIMIT_MIN 0
+#define WIL_CONFIG_BCAST_MCS0_LIMIT_MAX WIL_BCAST_MCS0_LIMIT
+
+#define WIL_CONFIG_BCAST_MCS_NAME "bcast_mcs"
+#define WIL_CONFIG_BCAST_MCS_MIN 1
+#define WIL_CONFIG_BCAST_MCS_MAX WIL_MCS_MAX
+
+#define WIL_CONFIG_N_MSI_NAME "num_of_msi"
+#define WIL_CONFIG_N_MSI_MIN 0
+#define WIL_CONFIG_N_MSI_MAX 3
+
+/* bool parameter */
+#define WIL_CONFIG_FTM_MODE_NAME "factory_test_mode"
+
+#define WIL_CONFIG_MAX_ASSOC_STA_NAME "max_assoc_sta"
+#define WIL_CONFIG_MAX_ASSOC_STA_MIN 1
+#define WIL_CONFIG_MAX_ASSOC_STA_MAX WIL6210_MAX_CID
+
+#define WIL_CONFIG_AGG_WSIZE_NAME "block_ack_window_size"
+#define WIL_CONFIG_AGG_WSIZE_MIN -1
+#define WIL_CONFIG_AGG_WSIZE_MAX 64
+
+/* bool parameter */
+#define WIL_CONFIG_AC_QUEUES_NAME "ac_queues"
+
+/* bool parameter */
+#define WIL_CONFIG_Q_PER_STA_NAME "q_per_sta"
+
+/* bool parameter */
+#define WIL_CONFIG_DROP_IF_FULL_NAME "drop_if_ring_full"
+
+#define WIL_CONFIG_RX_RING_ORDER_NAME "rx_ring_order"
+
+#define WIL_CONFIG_TX_RING_ORDER_NAME "tx_ring_order"
+
+#define WIL_CONFIG_BCAST_RING_ORDER_NAME "bcast_ring_order"
+
+/* configuration for debug fs paramaeters */
+#define WIL_CONFIG_DISCOVERY_MODE_NAME "discovery_scan"
+#define WIL_CONFIG_DISCOVERY_MODE_MIN 0
+#define WIL_CONFIG_DISCOVERY_MODE_MAX 1
+
+#define WIL_CONFIG_ABFT_LEN_NAME "abft_len"
+#define WIL_CONFIG_ABFT_LEN_MIN 0
+#define WIL_CONFIG_ABFT_LEN_MAX 255
+
+#define WIL_CONFIG_WAKEUP_TRIGGER_NAME "wakeup_trigger"
+#define WIL_CONFIG_WAKEUP_TRIGGER_MIN 0
+#define WIL_CONFIG_WAKEUP_TRIGGER_MAX WMI_WAKEUP_TRIGGER_BCAST
+
+#define WIL_CONFIG_RX_STATUS_ORDER_NAME "rx_status_ring_order"
+
+#define WIL_CONFIG_TX_STATUS_ORDER_NAME "tx_status_ring_order"
+
+#define WIL_CONFIG_RX_BUFF_COUNT_NAME "rx_buff_count"
+
+/* bool parameter */
+#define WIL_CONFIG_AMSDU_EN_NAME "amsdu_en"
+
+#define WIL_CONFIG_BOARD_FILE_NAME "board_file"
+
+#define WIL_CONFIG_SNR_THRESH_NAME "snr_thresh"
+
+#define WIL_CONFIG_FTM_OFFSET_NAME "ftm_offset"
+
+#define WIL_CONFIG_TT_NAME "thermal_throttling"
+
+#define WIL_CONFIG_LED_BLINK_NAME "led_blink"
+
+#define WIL_CONFIG_VR_PROFILE_NAME "vr_profile"
+
+#define WIL_CONFIG_LED_ID_NAME "led_id"
+#define WIL_CONFIG_LED_ID_MIN 0
+#define WIL_CONFIG_LED_ID_MAX 0xF
+
+#define WIL_CONFIG_LED_BLINK_TIME_NAME "led_blink_time"
+
+#define WIL_CONFIG_PS_PROFILE_NAME "ps_profile"
+#define WIL_CONFIG_PS_PROFILE_MIN WMI_PS_PROFILE_TYPE_DEFAULT
+#define WIL_CONFIG_PS_PROFILE_MAX WMI_PS_PROFILE_TYPE_LOW_LATENCY_PS
+
+static int wil_board_file_handler(struct wil6210_priv *wil, const char *buf,
+				  size_t count);
+static int wil_snr_thresh_handler(struct wil6210_priv *wil, const char *buf,
+				  size_t count);
+static int wil_ftm_offset_handler(struct wil6210_priv *wil, const char *buf,
+				  size_t count);
+static int wil_tt_handler(struct wil6210_priv *wil, const char *buf,
+			  size_t count);
+static int wil_led_blink_handler(struct wil6210_priv *wil, const char *buf,
+				 size_t count);
+
+static struct wil_config_entry config_table[] = {
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_COUNTRY_BOARD_FILE_NAME,
+			     wil_ini_param_type_unsigned,
+			     &country_specific_board_file, 0,
+			     WIL_CONFIG_BOOL_SIZE, WIL_CONFIG_BOOL_MIN,
+			     WIL_CONFIG_BOOL_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_REG_HINTS_NAME,
+			     wil_ini_param_type_unsigned, &ignore_reg_hints, 0,
+			     WIL_CONFIG_BOOL_SIZE, WIL_CONFIG_BOOL_MIN,
+			     WIL_CONFIG_BOOL_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_DEBUG_FW_NAME,
+			     wil_ini_param_type_unsigned, &debug_fw, 0,
+			     WIL_CONFIG_BOOL_SIZE, WIL_CONFIG_BOOL_MIN,
+			     WIL_CONFIG_BOOL_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_OOB_MODE_NAME,
+			     wil_ini_param_type_unsigned, &oob_mode, 0,
+			     sizeof(oob_mode), WIL_CONFIG_OOB_MODE_MIN,
+			     WIL_CONFIG_OOB_MODE_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_NO_FW_RECOVERY_NAME,
+			     wil_ini_param_type_unsigned, &no_fw_recovery, 0,
+			     WIL_CONFIG_BOOL_SIZE, WIL_CONFIG_BOOL_MIN,
+			     WIL_CONFIG_BOOL_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_MTU_MAX_NAME,
+			     wil_ini_param_type_unsigned, &mtu_max, 0,
+			     sizeof(mtu_max), WIL_CONFIG_MTU_MAX_MIN,
+			     WIL_CONFIG_MTU_MAX_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_RX_ALIGN_2_NAME,
+			     wil_ini_param_type_unsigned, &rx_align_2, 0,
+			     WIL_CONFIG_BOOL_SIZE, WIL_CONFIG_BOOL_MIN,
+			     WIL_CONFIG_BOOL_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_RX_LARGE_BUF_NAME,
+			     wil_ini_param_type_unsigned, &rx_large_buf, 0,
+			     WIL_CONFIG_BOOL_SIZE, WIL_CONFIG_BOOL_MIN,
+			     WIL_CONFIG_BOOL_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_HEADROOM_SIZE_NAME,
+			     wil_ini_param_type_unsigned, &headroom_size, 0,
+			     sizeof(headroom_size),
+			     WIL_CONFIG_HEADROOM_SIZE_MIN,
+			     WIL_CONFIG_HEADROOM_SIZE_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_N_MSI_NAME,
+			     wil_ini_param_type_unsigned, &n_msi, 0,
+			     sizeof(n_msi), WIL_CONFIG_N_MSI_MIN,
+			     WIL_CONFIG_N_MSI_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_FTM_MODE_NAME,
+			     wil_ini_param_type_unsigned, &ftm_mode, 0,
+			     WIL_CONFIG_BOOL_SIZE, WIL_CONFIG_BOOL_MIN,
+			     WIL_CONFIG_BOOL_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_MAX_ASSOC_STA_NAME,
+			     wil_ini_param_type_unsigned, &max_assoc_sta, 0,
+			     sizeof(max_assoc_sta),
+			     WIL_CONFIG_MAX_ASSOC_STA_MIN,
+			     WIL_CONFIG_MAX_ASSOC_STA_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_AGG_WSIZE_NAME,
+			     wil_ini_param_type_signed, &agg_wsize, 0,
+			     sizeof(agg_wsize), WIL_CONFIG_AGG_WSIZE_MIN,
+			     WIL_CONFIG_AGG_WSIZE_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_DROP_IF_FULL_NAME,
+			     wil_ini_param_type_unsigned, &drop_if_ring_full,
+			     0, WIL_CONFIG_BOOL_SIZE, WIL_CONFIG_BOOL_MIN,
+			     WIL_CONFIG_BOOL_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_RX_RING_ORDER_NAME,
+			     wil_ini_param_type_unsigned, &rx_ring_order, 0,
+			     sizeof(rx_ring_order), WIL_RING_SIZE_ORDER_MIN,
+			     WIL_RING_SIZE_ORDER_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_TX_RING_ORDER_NAME,
+			     wil_ini_param_type_unsigned, &tx_ring_order, 0,
+			     sizeof(tx_ring_order), WIL_RING_SIZE_ORDER_MIN,
+			     WIL_RING_SIZE_ORDER_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_BCAST_RING_ORDER_NAME,
+			     wil_ini_param_type_unsigned, &bcast_ring_order, 0,
+			     sizeof(bcast_ring_order), WIL_RING_SIZE_ORDER_MIN,
+			     WIL_RING_SIZE_ORDER_MAX),
+
+	/* wil6210_priv fields */
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_DISCOVERY_MODE_NAME,
+			     wil_ini_param_type_unsigned, NULL,
+			     WIL_CONFIG_VAR_OFFSET(struct wil6210_priv,
+						   discovery_mode),
+			     WIL_CONFIG_VAR_SIZE(struct wil6210_priv,
+						 discovery_mode),
+			     WIL_CONFIG_DISCOVERY_MODE_MIN,
+			     WIL_CONFIG_DISCOVERY_MODE_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_ABFT_LEN_NAME,
+			     wil_ini_param_type_unsigned, NULL,
+			     WIL_CONFIG_VAR_OFFSET(struct wil6210_priv,
+						   abft_len),
+			     WIL_CONFIG_VAR_SIZE(struct wil6210_priv,
+						 abft_len),
+			     WIL_CONFIG_ABFT_LEN_MIN,
+			     WIL_CONFIG_ABFT_LEN_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_WAKEUP_TRIGGER_NAME,
+			     wil_ini_param_type_unsigned, NULL,
+			     WIL_CONFIG_VAR_OFFSET(struct wil6210_priv,
+						   wakeup_trigger),
+			     WIL_CONFIG_VAR_SIZE(struct wil6210_priv,
+						 wakeup_trigger),
+			     WIL_CONFIG_WAKEUP_TRIGGER_MIN,
+			     WIL_CONFIG_WAKEUP_TRIGGER_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_RX_STATUS_ORDER_NAME,
+			     wil_ini_param_type_unsigned, NULL,
+			     WIL_CONFIG_VAR_OFFSET(struct wil6210_priv,
+						   rx_status_ring_order),
+			     WIL_CONFIG_VAR_SIZE(struct wil6210_priv,
+						 rx_status_ring_order),
+			     WIL_SRING_SIZE_ORDER_MIN,
+			     WIL_SRING_SIZE_ORDER_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_TX_STATUS_ORDER_NAME,
+			     wil_ini_param_type_unsigned, NULL,
+			     WIL_CONFIG_VAR_OFFSET(struct wil6210_priv,
+						   tx_status_ring_order),
+			     WIL_CONFIG_VAR_SIZE(struct wil6210_priv,
+						 tx_status_ring_order),
+			     WIL_SRING_SIZE_ORDER_MIN,
+			     WIL_SRING_SIZE_ORDER_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_RX_BUFF_COUNT_NAME,
+			     wil_ini_param_type_unsigned, NULL,
+			     WIL_CONFIG_VAR_OFFSET(struct wil6210_priv,
+						   rx_buff_id_count),
+			     WIL_CONFIG_VAR_SIZE(struct wil6210_priv,
+						 rx_buff_id_count),
+			     0, WIL_CONFIG_MAX_UNLIMITED),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_AMSDU_EN_NAME,
+			     wil_ini_param_type_unsigned, NULL,
+			     WIL_CONFIG_VAR_OFFSET(struct wil6210_priv,
+						   amsdu_en),
+			     WIL_CONFIG_BOOL_SIZE, WIL_CONFIG_BOOL_MIN,
+			     WIL_CONFIG_BOOL_MAX),
+	WIL_CONFIG_INI_PARAM_WITH_HANDLER(WIL_CONFIG_BOARD_FILE_NAME,
+					  wil_board_file_handler),
+	WIL_CONFIG_INI_PARAM_WITH_HANDLER(WIL_CONFIG_SNR_THRESH_NAME,
+					  wil_snr_thresh_handler),
+	WIL_CONFIG_INI_PARAM_WITH_HANDLER(WIL_CONFIG_FTM_OFFSET_NAME,
+					  wil_ftm_offset_handler),
+	WIL_CONFIG_INI_PARAM_WITH_HANDLER(WIL_CONFIG_TT_NAME,
+					  wil_tt_handler),
+	WIL_CONFIG_INI_PARAM_WITH_HANDLER(WIL_CONFIG_LED_BLINK_NAME,
+					  wil_led_blink_handler),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_LED_ID_NAME,
+			     wil_ini_param_type_unsigned, &led_id, 0,
+			     sizeof(led_id), WIL_CONFIG_LED_ID_MIN,
+			     WIL_CONFIG_LED_ID_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_VR_PROFILE_NAME,
+			     wil_ini_param_type_unsigned, NULL,
+			     WIL_CONFIG_VAR_OFFSET(struct wil6210_priv,
+						   vr_profile),
+			     WIL_CONFIG_VAR_SIZE(struct wil6210_priv,
+						 vr_profile),
+			     0, U8_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_PS_PROFILE_NAME,
+			     wil_ini_param_type_unsigned, NULL,
+			     WIL_CONFIG_VAR_OFFSET(struct wil6210_priv,
+						   ps_profile),
+			     WIL_CONFIG_VAR_SIZE(struct wil6210_priv,
+						 ps_profile),
+			     WIL_CONFIG_PS_PROFILE_MIN,
+			     WIL_CONFIG_PS_PROFILE_MAX),
+};
+
+/**
+ * find and locate the new line pointer
+ *
+ * str: pointer to string
+ *
+ * This function returns a pointer to the character after the
+ * occurrence of a new line character. It also modifies the
+ * original string by replacing the '\n' character with the null
+ * character.
+ *
+ * Return: the pointer to the character at new line,
+ *            or NULL if no new line character was found
+ */
+static char *get_next_line(char *str)
+{
+	char c;
+
+	if (!str || *str == '\0')
+		return NULL;
+
+	c = *str;
+	while (c != '\n' && c != '\0' && c != 0xd) {
+		str = str + 1;
+		c = *str;
+	}
+
+	if (c == '\0')
+		return NULL;
+
+	*str = '\0';
+	return str + 1;
+}
+
+/**
+ * trim - Removes leading whitespace from buffer.
+ * s: The string to be stripped.
+ *
+ * Return pointer to the first non-whitespace character in
+ * buffer s.
+ */
+static char *trim(char *s)
+{
+	while (isspace(*s))
+		s++;
+
+	return s;
+}
+
+/**
+ * find the configuration item
+ * file_ini_table: pointer to configuration table
+ * entries: number of configuration entries
+ * name: the interested configuration to find
+ * value: the value to read back
+ *
+ * Return: 0 if the interested configuration is found
+ */
+static int find_cfg_item(struct wil6210_priv *wil,
+			 struct wil_config_file_entry *file_ini_table,
+			 u32 entries, char *name, char **value)
+{
+	u32 i;
+
+	for (i = 0; i < entries; i++) {
+		if (strcmp(file_ini_table[i].name, name) == 0) {
+			*value = file_ini_table[i].value;
+			wil_dbg_misc(wil, "parameter=[%s] value=[%s]\n",
+				     name, *value);
+			return 0;
+		}
+	}
+
+	return -ENODATA;
+}
+
+static int cfg_item_read_unsigned(struct wil6210_priv *wil, void *field,
+				  struct wil_config_entry *entry,
+				  char *value_str)
+{
+	u32 value;
+	int rc;
+	size_t len_value_str = strlen(value_str);
+
+	if (len_value_str > 2 && value_str[0] == '0' && value_str[1] == 'x')
+		/* param value is in hex format */
+		rc = kstrtou32(value_str, 16, &value);
+	else
+		/* param value is in dec format */
+		rc = kstrtou32(value_str, 10, &value);
+
+	if (rc < 0) {
+		wil_err(wil, "Parameter %s invalid\n", entry->name);
+		return -EINVAL;
+	}
+
+	/* Do the range check here for the parameter converted int value,
+	 * overwrite if exceeds from range with min and max values.
+	 */
+	if (value > entry->max_val) {
+		wil_dbg_misc(wil,
+			     "Parameter %s > allowed Maximum [%u > %lu]. Enforcing maximum\n",
+			     entry->name, value, entry->max_val);
+		value = entry->max_val;
+	}
+
+	if (value < entry->min_val) {
+		wil_dbg_misc(wil,
+			     "Parameter %s < allowed Minimum [%u < %lu]. Enforcing Minimum\n",
+			     entry->name, value, entry->min_val);
+		value = entry->min_val;
+	}
+
+	/* Move the variable into the output field */
+	memcpy(field, &value, entry->var_size);
+	wil_dbg_misc(wil, "Parameter %s: Value: %u\n", entry->name, value);
+
+	return 0;
+}
+
+static int cfg_item_read_signed(struct wil6210_priv *wil, void *field,
+				struct wil_config_entry *entry,
+				char *value_str)
+{
+	int value, rc;
+
+	/* convert the value string to a signed integer value */
+	rc = kstrtos32(value_str, 10, &value);
+	if (rc < 0) {
+		wil_err(wil, "Parameter %s invalid\n", entry->name);
+		return -EINVAL;
+	}
+
+	/* Do the range check here for the parameter converted int value,
+	 * overwrite if exceeds from range with min and max values.
+	 */
+	if (value > (int)entry->max_val) {
+		wil_dbg_misc(wil,
+			     "Parameter %s > allowed Maximum [%d > %d]. Enforcing Maximum\n",
+			     entry->name, value, (int)entry->max_val);
+		value = (int32_t)entry->max_val;
+	}
+
+	if (value < (int)entry->min_val) {
+		wil_dbg_misc(wil,
+			     "Parameter %s < allowed Minimum [%d < %d]. Enforcing Minimum\n",
+			     entry->name, value, (int)entry->min_val);
+		value = (int)entry->min_val;
+	}
+
+	/* Move the variable into the output field */
+	memcpy(field, &value, entry->var_size);
+
+	return 0;
+}
+
+static int cfg_item_read_string(struct wil6210_priv *wil, void *field,
+				struct wil_config_entry *entry,
+				char *value_str)
+{
+	int len_value_str;
+
+	wil_dbg_misc(wil, "name = %s, var_size %u\n", entry->name,
+		     entry->var_size);
+
+	len_value_str = strlen(value_str);
+	if (entry->handler)
+		return entry->handler(wil, value_str, len_value_str);
+
+	if (len_value_str > (entry->var_size - 1)) {
+		wil_err(wil, "too long value=[%s] specified for param=[%s]\n",
+			value_str, entry->name);
+		return -EINVAL;
+	}
+
+	memcpy(field, (void *)(value_str), len_value_str);
+	((u8 *)field)[len_value_str] = '\0';
+
+	return 0;
+}
+
+static int cfg_item_read_mac(struct wil6210_priv *wil, void *field,
+			     struct wil_config_entry *entry, char *buffer)
+{
+	int len, rc;
+	char mac[ETH_ALEN];
+	char *c;
+	int mac_len = 0;
+	s8 value;
+
+	if (entry->var_size != ETH_ALEN) {
+		wil_err(wil, "Invalid var_size %u for name=[%s]\n",
+			entry->var_size, entry->name);
+		return -EINVAL;
+	}
+
+	len = strlen(buffer);
+	/* mac format is as the following mac=00:11:22:33:44:55 */
+	if (len != (ETH_ALEN * 2 + 5)) { /* 5 ':' separators */
+		wil_err(wil, "Invalid MAC addr [%s] specified for name=[%s]\n",
+			buffer, entry->name);
+		return -EINVAL;
+	}
+
+	/* parse the string and store it in the byte array */
+	for (c = buffer; (c[2] == ':' || c[2] == '\0') && mac_len < ETH_ALEN;
+	     c += 2) {
+		c[2] = '\0';
+		rc = kstrtos8(c, 16, &value);
+		if (rc < 0) {
+			wil_err(wil, "kstrtos8 failed with status %d\n", rc);
+			return rc;
+		}
+		mac[mac_len++] = value;
+	}
+
+	if (mac_len < ETH_ALEN) {
+		wil_err(wil, "failed to read MAC addr [%s] specified for name=[%s]\n",
+			buffer, entry->name);
+		return -EINVAL;
+	}
+
+	memcpy(field, (void *)(mac), ETH_ALEN);
+
+	return 0;
+}
+
+/**
+ * apply the ini configuration file
+ *
+ * wil: the pointer to wil6210 context
+ * file_ini_table: pointer to configuration table
+ * file_entries: number fo the configuration entries
+ *
+ * Return: 0 if the ini configuration file is correctly parsed
+ */
+static int wil_apply_cfg_ini(struct wil6210_priv *wil,
+			     struct wil_config_file_entry *file_ini_table,
+			     u32 file_entries)
+{
+	int match;
+	unsigned int idx;
+	void *field;
+	char *val;
+	struct wil_config_entry *entry = config_table;
+	u32 table_entries = ARRAY_SIZE(config_table);
+	int rc;
+
+	for (idx = 0; idx < table_entries; idx++, entry++) {
+		/* Calculate the address of the destination field in the
+		 * structure.
+		 */
+		if (entry->var_ref)
+			field = entry->var_ref;
+		else
+			field = (void *)wil + entry->var_offset;
+
+		match = find_cfg_item(wil, file_ini_table, file_entries,
+				      entry->name, &val);
+
+		if (match != 0)
+			/* keep the default value */
+			continue;
+
+		switch (entry->type) {
+		case wil_ini_param_type_unsigned:
+			rc = cfg_item_read_unsigned(wil, field, entry, val);
+			break;
+		case wil_ini_param_type_signed:
+			rc = cfg_item_read_signed(wil, field, entry, val);
+			break;
+		case wil_ini_param_type_string:
+			rc = cfg_item_read_string(wil, field, entry, val);
+			break;
+		case wil_ini_param_type_macaddr:
+			rc = cfg_item_read_mac(wil, field, entry, val);
+			break;
+		default:
+			wil_err(wil, "Unknown param type [%d] for name [%s]\n",
+				entry->type, entry->name);
+			return -EINVAL;
+		}
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * parse the ini configuration file
+ *
+ * This function reads the wigig.ini file and
+ * parses each 'Name=Value' pair in the ini file
+ *
+ * Return: 0 if the wigig.ini is correctly read,
+ *	   fail code otherwise
+ */
+int wil_parse_config_ini(struct wil6210_priv *wil)
+{
+	int rc, i = 0, line_index = 0;
+	const struct firmware *fw = NULL;
+	const char *start;
+	char *buffer, *line;
+	char *name, *value;
+	/* will hold list of strings [param, value] */
+	struct wil_config_file_entry *cfg_ini_table;
+	size_t cfg_size = sizeof(struct wil_config_file_entry) *
+		WIL_CONFIG_MAX_INI_ITEMS;
+
+	rc = request_firmware(&fw, WIL_CONFIG_INI_FILE, wil_to_dev(wil));
+	if (rc) {
+		wil_dbg_misc(wil, "Couldn't load configuration %s rc %d\n",
+			     WIL_CONFIG_INI_FILE, rc);
+		return rc;
+	}
+	if (!fw || !fw->data || !fw->size) {
+		wil_err(wil, "%s no data\n", WIL_CONFIG_INI_FILE);
+		rc = -ENOMEM;
+		goto release_fw;
+	}
+
+	wil_dbg_misc(wil, "Parsing <%s>, %zu bytes\n", WIL_CONFIG_INI_FILE,
+		     fw->size);
+
+	/* save the start of the buffer */
+	buffer = kmemdup(fw->data, fw->size, GFP_KERNEL);
+	if (!buffer) {
+		rc = -ENOMEM;
+		goto release_fw;
+	}
+	start = buffer;
+
+	cfg_ini_table = kzalloc(cfg_size, GFP_KERNEL);
+	if (!cfg_ini_table) {
+		kfree(buffer);
+		rc = -ENOMEM;
+		goto release_fw;
+	}
+
+	while (buffer && buffer - start < fw->size) {
+		line_index++;
+		line = get_next_line(buffer);
+		buffer = trim(buffer);
+
+		if (strlen((char *)buffer) == 0 || *buffer == '#') {
+			buffer = line;
+			continue;
+		} else if (memcmp(buffer, "END", 3) == 0) {
+			break;
+		}
+
+		/* parse new line */
+		name = strsep(&buffer, "=");
+		if (!name) {
+			wil_err(wil, "file parse error at line %d. expecting '='\n",
+				line_index);
+			rc = -EINVAL;
+			goto out;
+		}
+
+		/* get the value [string] */
+		value = trim(buffer);
+		if (strlen(value) == 0) {
+			wil_err(wil, "file parse error, no value for param %s at line %d\n",
+				name, line_index);
+			rc = -EINVAL;
+			goto out;
+		}
+
+		cfg_ini_table[i].name = name;
+		cfg_ini_table[i++].value = value;
+
+		if (i > WIL_CONFIG_MAX_INI_ITEMS) {
+			wil_err(wil, "too many items in %s (%d) > max items (%d)\n",
+				WIL_CONFIG_INI_FILE, i,
+				WIL_CONFIG_MAX_INI_ITEMS);
+			break;
+		}
+
+		buffer = line;
+	}
+
+	/* Loop through the parsed config table and apply all these configs */
+	rc = wil_apply_cfg_ini(wil, cfg_ini_table, i);
+
+out:
+	kfree(start);
+	kfree(cfg_ini_table);
+release_fw:
+	release_firmware(fw);
+
+	return rc;
+}
+
+static int wil_board_file_handler(struct wil6210_priv *wil, const char *buf,
+				  size_t count)
+{
+	return wil_board_file_set(wil, buf, count);
+}
+
+static int wil_snr_thresh_handler(struct wil6210_priv *wil, const char *buf,
+				  size_t count)
+{
+	return wil_snr_thresh_set(wil, buf);
+}
+
+static int wil_ftm_offset_handler(struct wil6210_priv *wil, const char *buf,
+				  size_t count)
+{
+	return wil_ftm_offset_set(wil, buf);
+}
+
+static int wil_tt_handler(struct wil6210_priv *wil, const char *buf,
+			  size_t count)
+{
+	return wil_tt_set(wil, buf, count);
+}
+
+static int wil_led_blink_handler(struct wil6210_priv *wil, const char *buf,
+				 size_t count)
+{
+	return wil_led_blink_set(wil, buf);
+}
+
diff --git a/drivers/net/wireless/ath/wil6210/config.h b/drivers/net/wireless/ath/wil6210/config.h
new file mode 100644
index 0000000..d207d7c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/config.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#ifndef __WIL_CONFIG_H__
+#define __WIL_CONFIG_H__
+
+#define WIL_CONFIG_VAR_OFFSET(_struct, _var) (offsetof(_struct, _var))
+#define WIL_CONFIG_VAR_SIZE(_struct, _var) (FIELD_SIZEOF(_struct, _var))
+
+enum wil_ini_param_type {
+	wil_ini_param_type_unsigned,
+	wil_ini_param_type_signed,
+	wil_ini_param_type_string,
+	wil_ini_param_type_macaddr,
+};
+
+#define WIL_CONFIG_INI_PARAM(_name, _type, _ref, _offset, _size, _min, _max)\
+	{						\
+		(_name),				\
+		(_type),				\
+		(_ref),					\
+		(_offset),				\
+		(_size),				\
+		(_min),					\
+		(_max),					\
+		(NULL),					\
+	}
+
+#define WIL_CONFIG_INI_STRING_PARAM(_name, _ref, _offset, _size)\
+	{						\
+		(_name),				\
+		(wil_ini_param_type_string),		\
+		(_ref),					\
+		(_offset),				\
+		(_size),				\
+		(0),					\
+		(0),					\
+		(NULL),					\
+	}
+
+#define WIL_CONFIG_INI_PARAM_WITH_HANDLER(_name, _handler)\
+	{						\
+		(_name),				\
+		(wil_ini_param_type_string),		\
+		(NULL),					\
+		(0),					\
+		(0),					\
+		(0),					\
+		(0),					\
+		(_handler),				\
+	}
+
+/* forward declaration */
+struct wil6210_priv;
+
+struct wil_config_entry {
+	char *name; /* variable name as expected in wigig.ini file */
+	enum wil_ini_param_type type; /* variable type */
+	void *var_ref; /* reference to global variable */
+	u16 var_offset; /* offset to field inside wil6210_priv structure */
+	u32 var_size;  /* size (in bytes) of the field */
+	unsigned long min_val; /* minimum value, for range checking */
+	unsigned long max_val; /* maximum value, for range checking */
+	/* handler function for complex parameters */
+	int (*handler)(struct wil6210_priv *wil, const char *buf,
+		       size_t count);
+};
+
+/* read and parse ini file */
+int wil_parse_config_ini(struct wil6210_priv *wil);
+
+#endif /* __WIL_CONFIG_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 0b8b47c..7a33dab 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -810,6 +810,44 @@ static const struct file_operations fops_rxon = {
 	.open  = simple_open,
 };
 
+static ssize_t wil_write_file_rbufcap(struct file *file,
+				      const char __user *buf,
+				      size_t count, loff_t *ppos)
+{
+	struct wil6210_priv *wil = file->private_data;
+	int val;
+	int rc;
+
+	rc = kstrtoint_from_user(buf, count, 0, &val);
+	if (rc) {
+		wil_err(wil, "Invalid argument\n");
+		return rc;
+	}
+	/* input value: negative to disable, 0 to use system default,
+	 * 1..ring size to set descriptor threshold
+	 */
+	wil_info(wil, "%s RBUFCAP, descriptors threshold - %d\n",
+		 val < 0 ? "Disabling" : "Enabling", val);
+
+	if (!wil->ring_rx.va || val > wil->ring_rx.size) {
+		wil_err(wil, "Invalid descriptors threshold, %d\n", val);
+		return -EINVAL;
+	}
+
+	rc = wmi_rbufcap_cfg(wil, val < 0 ? 0 : 1, val < 0 ? 0 : val);
+	if (rc) {
+		wil_err(wil, "RBUFCAP config failed: %d\n", rc);
+		return rc;
+	}
+
+	return count;
+}
+
+static const struct file_operations fops_rbufcap = {
+	.write = wil_write_file_rbufcap,
+	.open  = simple_open,
+};
+
 /* block ack control, write:
  * - "add <ringid> <agg_size> <timeout>" to trigger ADDBA
  * - "del_tx <ringid> <reason>" to trigger DELBA for Tx side
@@ -1364,7 +1402,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data)
 		rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid,
 			      &cmd, sizeof(cmd),
 			      WMI_NOTIFY_REQ_DONE_EVENTID, &reply,
-			      sizeof(reply), 20);
+			      sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
 		/* if reply is all-0, ignore this CID */
 		if (rc || is_all_zeros(&reply.evt, sizeof(reply.evt)))
 			continue;
@@ -1413,7 +1451,7 @@ static void print_temp(struct seq_file *s, const char *prefix, s32 t)
 {
 	switch (t) {
 	case 0:
-	case ~(u32)0:
+	case WMI_INVALID_TEMPERATURE:
 		seq_printf(s, "%s N/A\n", prefix);
 	break;
 	default:
@@ -1426,17 +1464,41 @@ static void print_temp(struct seq_file *s, const char *prefix, s32 t)
 static int wil_temp_debugfs_show(struct seq_file *s, void *data)
 {
 	struct wil6210_priv *wil = s->private;
-	s32 t_m, t_r;
-	int rc = wmi_get_temperature(wil, &t_m, &t_r);
+	int rc, i;
 
-	if (rc) {
-		seq_puts(s, "Failed\n");
-		return 0;
+	if (test_bit(WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF,
+		     wil->fw_capabilities)) {
+		struct wmi_temp_sense_all_done_event sense_all_evt;
+
+		wil_dbg_misc(wil,
+			     "WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF is supported");
+		rc = wmi_get_all_temperatures(wil, &sense_all_evt);
+		if (rc) {
+			seq_puts(s, "Failed\n");
+			return 0;
+		}
+		print_temp(s, "T_mac   =",
+			   le32_to_cpu(sense_all_evt.baseband_t1000));
+		seq_printf(s, "Connected RFs [0x%08x]\n",
+			   sense_all_evt.rf_bitmap);
+		for (i = 0; i < WMI_MAX_XIF_PORTS_NUM; i++) {
+			seq_printf(s, "RF[%d]   = ", i);
+			print_temp(s, "",
+				   le32_to_cpu(sense_all_evt.rf_t1000[i]));
+		}
+	} else {
+		s32 t_m, t_r;
+
+		wil_dbg_misc(wil,
+			     "WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF is not supported");
+		rc = wmi_get_temperature(wil, &t_m, &t_r);
+		if (rc) {
+			seq_puts(s, "Failed\n");
+			return 0;
+		}
+		print_temp(s, "T_mac   =", t_m);
+		print_temp(s, "T_radio =", t_r);
 	}
-
-	print_temp(s, "T_mac   =", t_m);
-	print_temp(s, "T_radio =", t_r);
-
 	return 0;
 }
 
@@ -2220,6 +2282,29 @@ static const struct file_operations fops_led_cfg = {
 	.open  = simple_open,
 };
 
+int wil_led_blink_set(struct wil6210_priv *wil, const char *buf)
+{
+	int rc;
+
+	/* "<blink_on_slow> <blink_off_slow> <blink_on_med> <blink_off_med>
+	 * <blink_on_fast> <blink_off_fast>"
+	 */
+	rc = sscanf(buf, "%u %u %u %u %u %u",
+		    &led_blink_time[WIL_LED_TIME_SLOW].on_ms,
+		    &led_blink_time[WIL_LED_TIME_SLOW].off_ms,
+		    &led_blink_time[WIL_LED_TIME_MED].on_ms,
+		    &led_blink_time[WIL_LED_TIME_MED].off_ms,
+		    &led_blink_time[WIL_LED_TIME_FAST].on_ms,
+		    &led_blink_time[WIL_LED_TIME_FAST].off_ms);
+
+	if (rc < 0)
+		return rc;
+	if (rc < 6)
+		return -EINVAL;
+
+	return 0;
+}
+
 /* led_blink_time, write:
  * "<blink_on_slow> <blink_off_slow> <blink_on_med> <blink_off_med> <blink_on_fast> <blink_off_fast>
  */
@@ -2227,6 +2312,7 @@ static ssize_t wil_write_led_blink_time(struct file *file,
 					const char __user *buf,
 					size_t len, loff_t *ppos)
 {
+	struct wil6210_priv *wil = file->private_data;
 	int rc;
 	char *kbuf = kmalloc(len + 1, GFP_KERNEL);
 
@@ -2240,19 +2326,11 @@ static ssize_t wil_write_led_blink_time(struct file *file,
 	}
 
 	kbuf[len] = '\0';
-	rc = sscanf(kbuf, "%d %d %d %d %d %d",
-		    &led_blink_time[WIL_LED_TIME_SLOW].on_ms,
-		    &led_blink_time[WIL_LED_TIME_SLOW].off_ms,
-		    &led_blink_time[WIL_LED_TIME_MED].on_ms,
-		    &led_blink_time[WIL_LED_TIME_MED].off_ms,
-		    &led_blink_time[WIL_LED_TIME_FAST].on_ms,
-		    &led_blink_time[WIL_LED_TIME_FAST].off_ms);
+	rc = wil_led_blink_set(wil, kbuf);
 	kfree(kbuf);
 
 	if (rc < 0)
 		return rc;
-	if (rc < 6)
-		return -EINVAL;
 
 	return len;
 }
@@ -2508,6 +2586,7 @@ static const struct {
 	{"tx_latency",	0644,		&fops_tx_latency},
 	{"link_stats",	0644,		&fops_link_stats},
 	{"link_stats_global",	0644,	&fops_link_stats_global},
+	{"rbufcap",	0244,		&fops_rbufcap},
 };
 
 static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
diff --git a/drivers/net/wireless/ath/wil6210/ipa.c b/drivers/net/wireless/ath/wil6210/ipa.c
index 1148d83..0e825dd 100644
--- a/drivers/net/wireless/ath/wil6210/ipa.c
+++ b/drivers/net/wireless/ath/wil6210/ipa.c
@@ -23,6 +23,9 @@
 #define WIL_IPA_RX_BUFF_SIZE (8 * 1024)
 #define WIL_IPA_TX_BUFF_SIZE (2 * 1024)
 
+#define WIL_IPA_DEFAULT_OUTSTANDING_HIGH 128
+#define WIL_IPA_DEFAULT_OUTSTANDING_LOW 64
+
 u8 ipa_offload;
 module_param(ipa_offload, byte, 0444);
 MODULE_PARM_DESC(ipa_offload, " Enable IPA offload, default - disabled");
@@ -138,13 +141,30 @@ static void wil_ipa_notify_cb(void *priv, enum ipa_dp_evt_type evt,
 			      unsigned long data)
 {
 	struct wil_ipa *ipa = (struct wil_ipa *)priv;
+	struct wil6210_priv *wil = ipa->wil;
+	struct net_device *ndev = wil->main_ndev;
 	struct sk_buff *skb;
+	int outs;
 
 	switch (evt) {
 	case IPA_RECEIVE:
 		skb = (struct sk_buff *)data;
 		wil_ipa_rx(ipa, skb);
 		break;
+	case IPA_WRITE_DONE:
+		skb = (struct sk_buff *)data;
+		outs = atomic_dec_return(&ipa->outstanding_pkts);
+		wil_dbg_txrx(wil, "ipa tx complete len %d, outstanding %d",
+			     skb->len, outs);
+		if (netif_queue_stopped(ndev) &&
+		    outs <= WIL_IPA_DEFAULT_OUTSTANDING_LOW) {
+			wil_dbg_txrx(wil, "outstanding low reached (%d)\n",
+				     WIL_IPA_DEFAULT_OUTSTANDING_LOW);
+			netif_wake_queue(ndev);
+		}
+
+		dev_kfree_skb_any(skb);
+		break;
 	default:
 		wil_dbg_misc(ipa->wil, "unhandled ipa evt %d\n", evt);
 		break;
@@ -728,6 +748,7 @@ int wil_ipa_tx(void *ipa_handle, struct wil_ring *ring, struct sk_buff *skb)
 	int cid, rc;
 	const u8 *da;
 	unsigned int len = skb->len;
+	int outs;
 
 	wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
 			  skb->data, skb_headlen(skb), false);
@@ -752,6 +773,14 @@ int wil_ipa_tx(void *ipa_handle, struct wil_ring *ring, struct sk_buff *skb)
 		return rc;
 	/* skb could be freed after this point */
 
+	outs = atomic_inc_return(&ipa->outstanding_pkts);
+	wil_dbg_txrx(wil, "ipa tx outstanding %d", outs);
+	if (outs >= WIL_IPA_DEFAULT_OUTSTANDING_HIGH) {
+		wil_dbg_txrx(wil, "outstanding high reached (%d)\n",
+			     WIL_IPA_DEFAULT_OUTSTANDING_HIGH);
+		netif_stop_queue(ndev);
+	}
+
 	stats = &wil->sta[cid].stats;
 	stats->tx_packets++;
 	stats->tx_bytes += len;
@@ -927,6 +956,8 @@ void *wil_ipa_init(struct wil6210_priv *wil)
 	if (rc)
 		goto err;
 
+	atomic_set(&ipa->outstanding_pkts, 0);
+
 	return ipa;
 
 err:
diff --git a/drivers/net/wireless/ath/wil6210/ipa.h b/drivers/net/wireless/ath/wil6210/ipa.h
index 6c563d2..5b9fa84 100644
--- a/drivers/net/wireless/ath/wil6210/ipa.h
+++ b/drivers/net/wireless/ath/wil6210/ipa.h
@@ -52,6 +52,7 @@ struct wil_ipa {
 	struct wil_ipa_conn conn[WIL6210_MAX_CID];
 	struct wil_ipa_rx_buf rx_buf; /* contiguous memory split into rx bufs */
 	struct msi_msg orig_msi_msg;
+	atomic_t outstanding_pkts;
 };
 
 static inline bool wil_ipa_offload(void) {return ipa_offload; }
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 7ba793b..1c2227c 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -27,7 +27,7 @@ bool debug_fw; /* = false; */
 module_param(debug_fw, bool, 0444);
 MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
 
-static u8 oob_mode;
+u8 oob_mode;
 module_param(oob_mode, byte, 0444);
 MODULE_PARM_DESC(oob_mode,
 		 " enable out of the box (OOB) mode in FW, for diagnostics and certification");
@@ -73,9 +73,9 @@ static const struct kernel_param_ops mtu_max_ops = {
 module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444);
 MODULE_PARM_DESC(mtu_max, " Max MTU value.");
 
-static uint rx_ring_order;
-static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
-static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT;
+uint rx_ring_order;
+uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
+uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT;
 
 static int ring_order_set(const char *val, const struct kernel_param *kp)
 {
@@ -297,15 +297,16 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 	/* crypto context */
 	memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
 	memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
+
+	if (wil->ipa_handle)
+		wil_ipa_disconn_client(wil->ipa_handle, cid);
+
 	/* release vrings */
 	for (i = min_ring_id; i < ARRAY_SIZE(wil->ring_tx); i++) {
 		if (wil->ring2cid_tid[i][0] == cid)
 			wil_ring_fini_tx(wil, i);
 	}
 
-	if (wil->ipa_handle)
-		wil_ipa_disconn_client(wil->ipa_handle, cid);
-
 	/* statistics */
 	memset(&sta->stats, 0, sizeof(sta->stats));
 	sta->stats.tx_latency_min_us = U32_MAX;
@@ -1599,6 +1600,7 @@ int wil_vr_update_profile(struct wil6210_priv *wil, u8 profile)
 
 static void wil_pre_fw_config(struct wil6210_priv *wil)
 {
+	wil_clear_fw_log_addr(wil);
 	/* Mark FW as loaded from host */
 	wil_s(wil, RGF_USER_USAGE_6, 1);
 
@@ -1656,6 +1658,20 @@ static int wil_restore_vifs(struct wil6210_priv *wil)
 }
 
 /*
+ * Clear FW and ucode log start addr to indicate FW log is not ready. The host
+ * driver clears the addresses before FW starts and FW initializes the address
+ * when it is ready to send logs.
+ */
+void wil_clear_fw_log_addr(struct wil6210_priv *wil)
+{
+	/* FW log addr */
+	wil_w(wil, RGF_USER_USAGE_1, 0);
+	/* ucode log addr */
+	wil_w(wil, RGF_USER_USAGE_2, 0);
+	wil_dbg_misc(wil, "Cleared FW and ucode log address");
+}
+
+/*
  * We reset all the structures, and we reset the UMAC.
  * After calling this routine, you're expected to reload
  * the firmware.
@@ -1860,6 +1876,13 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 			wmi_set_snr_thresh(wil, wil->snr_thresh.omni,
 					   wil->snr_thresh.direct);
 
+		if (wil->ftm_txrx_offset.enabled) {
+			struct wil_ftm_offsets *ftm = &wil->ftm_txrx_offset;
+
+			wmi_set_tof_tx_rx_offset(wil, ftm->tx_offset,
+						 ftm->rx_offset);
+		}
+
 		if (wil->platform_ops.notify) {
 			rc = wil->platform_ops.notify(wil->platform_handle,
 						      WIL_PLATFORM_EVT_FW_RDY);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index ebac98f..97d5933 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -10,6 +10,7 @@
 #include "wil6210.h"
 #include "txrx.h"
 #include "ipa.h"
+#include "config.h"
 
 static bool alt_ifname; /* = false; */
 module_param(alt_ifname, bool, 0444);
@@ -392,6 +393,10 @@ void *wil_if_alloc(struct device *dev)
 		goto out_cfg;
 	}
 
+	/* read and parse ini file */
+	wil_parse_config_ini(wil);
+	wil_wiphy_init(wil);
+
 	wil_dbg_misc(wil, "if_alloc\n");
 
 	vif = wil_vif_alloc(wil, ifname, NET_NAME_UNKNOWN,
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 9998a01..f2b1564 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -14,7 +14,7 @@
 #include <linux/pm_runtime.h>
 #include "ipa.h"
 
-static int n_msi = 3;
+int n_msi = 3;
 module_param(n_msi, int, 0444);
 MODULE_PARM_DESC(n_msi, " Use MSI interrupt: 0 - use INTx, 1 - single, or 3 - (default) ");
 
@@ -521,6 +521,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	}
 	/* rollback to bus_disable */
 
+	wil_clear_fw_log_addr(wil);
 	rc = wil_if_add(wil);
 	if (rc) {
 		wil_err(wil, "wil_if_add failed: %d\n", rc);
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 9495e66..5bdf8ff 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -305,7 +305,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 	u16 agg_timeout = le16_to_cpu(ba_timeout);
 	u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
 	struct wil_sta_info *sta;
-	u16 agg_wsize = 0;
+	u16 agg_wsize;
 	/* bit 0: A-MSDU supported
 	 * bit 1: policy (should be 0 for us)
 	 * bits 2..5: TID
@@ -317,7 +317,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 		test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
 		wil->amsdu_en && (param_set & BIT(0));
 	int ba_policy = param_set & BIT(1);
-	u16 status = WLAN_STATUS_SUCCESS;
 	u16 ssn = seq_ctrl >> 4;
 	struct wil_tid_ampdu_rx *r;
 	int rc = 0;
@@ -344,27 +343,19 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 		    agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn);
 
 	/* apply policies */
-	if (ba_policy) {
-		wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
-		status = WLAN_STATUS_INVALID_QOS_PARAM;
-	}
-	if (status == WLAN_STATUS_SUCCESS) {
-		if (req_agg_wsize == 0) {
-			wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
-				     wil->max_agg_wsize);
-			agg_wsize = wil->max_agg_wsize;
-		} else {
-			agg_wsize = min_t(u16,
-					  wil->max_agg_wsize, req_agg_wsize);
-		}
+	if (req_agg_wsize == 0) {
+		wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
+			     wil->max_agg_wsize);
+		agg_wsize = wil->max_agg_wsize;
+	} else {
+		agg_wsize = min_t(u16, wil->max_agg_wsize, req_agg_wsize);
 	}
 
 	rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token,
-					     status, agg_amsdu, agg_wsize,
-					     agg_timeout);
-	if (rc || (status != WLAN_STATUS_SUCCESS)) {
-		wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
-			status);
+					     WLAN_STATUS_SUCCESS, agg_amsdu,
+					     agg_wsize, agg_timeout);
+	if (rc) {
+		wil_err(wil, "do not apply ba, rc(%d)\n", rc);
 		goto out;
 	}
 
diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c
index 7f3e658..ed1081c 100644
--- a/drivers/net/wireless/ath/wil6210/sysfs.c
+++ b/drivers/net/wireless/ath/wil6210/sysfs.c
@@ -41,41 +41,34 @@ ftm_txrx_offset_show(struct device *dev,
 	return len;
 }
 
+int wil_ftm_offset_set(struct wil6210_priv *wil, const char *buf)
+{
+	wil->ftm_txrx_offset.enabled = 0;
+	if (sscanf(buf, "%u %u", &wil->ftm_txrx_offset.tx_offset,
+		   &wil->ftm_txrx_offset.tx_offset) != 2)
+		return -EINVAL;
+
+	wil->ftm_txrx_offset.enabled = 1;
+	return 0;
+}
+
 static ssize_t
 ftm_txrx_offset_store(struct device *dev,
 		      struct device_attribute *attr,
 		      const char *buf, size_t count)
 {
 	struct wil6210_priv *wil = dev_get_drvdata(dev);
-	struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
-	struct wmi_tof_set_tx_rx_offset_cmd cmd;
-	struct {
-		struct wmi_cmd_hdr wmi;
-		struct wmi_tof_set_tx_rx_offset_event evt;
-	} __packed reply;
-	unsigned int tx_offset, rx_offset;
 	int rc;
 
-	if (sscanf(buf, "%u %u", &tx_offset, &rx_offset) != 2)
-		return -EINVAL;
-
-	if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
-		return -EOPNOTSUPP;
-
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.tx_offset = cpu_to_le32(tx_offset);
-	cmd.rx_offset = cpu_to_le32(rx_offset);
-	memset(&reply, 0, sizeof(reply));
-	rc = wmi_call(wil, WMI_TOF_SET_TX_RX_OFFSET_CMDID, vif->mid,
-		      &cmd, sizeof(cmd), WMI_TOF_SET_TX_RX_OFFSET_EVENTID,
-		      &reply, sizeof(reply), 100);
+	rc = wil_ftm_offset_set(wil, buf);
 	if (rc < 0)
 		return rc;
-	if (reply.evt.status) {
-		wil_err(wil, "set_tof_tx_rx_offset failed, error %d\n",
-			reply.evt.status);
-		return -EIO;
-	}
+
+	rc = wmi_set_tof_tx_rx_offset(wil, wil->ftm_txrx_offset.tx_offset,
+				      wil->ftm_txrx_offset.rx_offset);
+	if (rc < 0)
+		return rc;
+
 	return count;
 }
 
@@ -93,12 +86,9 @@ board_file_show(struct device *dev,
 	return strlen(buf);
 }
 
-static ssize_t
-board_file_store(struct device *dev,
-		 struct device_attribute *attr,
-		 const char *buf, size_t count)
+int wil_board_file_set(struct wil6210_priv *wil, const char *buf,
+		       size_t count)
 {
-	struct wil6210_priv *wil = dev_get_drvdata(dev);
 	size_t len;
 
 	mutex_lock(&wil->mutex);
@@ -120,6 +110,21 @@ board_file_store(struct device *dev,
 	}
 	mutex_unlock(&wil->mutex);
 
+	return 0;
+}
+
+static ssize_t
+board_file_store(struct device *dev,
+		 struct device_attribute *attr,
+		 const char *buf, size_t count)
+{
+	struct wil6210_priv *wil = dev_get_drvdata(dev);
+	int rc;
+
+	rc = wil_board_file_set(wil, buf, count);
+	if (rc < 0)
+		return rc;
+
 	return count;
 }
 
@@ -163,11 +168,9 @@ thermal_throttling_show(struct device *dev, struct device_attribute *attr,
 	return len;
 }
 
-static ssize_t
-thermal_throttling_store(struct device *dev, struct device_attribute *attr,
-			 const char *buf, size_t count)
+int wil_tt_set(struct wil6210_priv *wil, const char *buf,
+	       size_t count)
 {
-	struct wil6210_priv *wil = dev_get_drvdata(dev);
 	int i, rc = -EINVAL;
 	char *token, *dupbuf, *tmp;
 	struct wmi_tt_data tt_data = {
@@ -223,16 +226,33 @@ thermal_throttling_store(struct device *dev, struct device_attribute *attr,
 			tt_data.rf_enabled = 1;
 	}
 
-	rc = wmi_set_tt_cfg(wil, &tt_data);
-	if (rc)
-		goto out;
+	wil->tt_data = tt_data;
+	wil->tt_data_set = true;
+	rc = 0;
 
-	rc = count;
 out:
 	kfree(tmp);
 	return rc;
 }
 
+static ssize_t
+thermal_throttling_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct wil6210_priv *wil = dev_get_drvdata(dev);
+	int rc;
+
+	rc = wil_tt_set(wil, buf, count);
+	if (rc)
+		return rc;
+
+	rc = wmi_set_tt_cfg(wil, &wil->tt_data);
+	if (rc)
+		return rc;
+
+	return count;
+}
+
 static DEVICE_ATTR_RW(thermal_throttling);
 
 static ssize_t
@@ -364,6 +384,20 @@ snr_thresh_show(struct device *dev, struct device_attribute *attr,
 	return len;
 }
 
+int wil_snr_thresh_set(struct wil6210_priv *wil, const char *buf)
+{
+	wil->snr_thresh.enabled = 0;
+	/* to disable snr threshold, set both omni and direct to 0 */
+	if (sscanf(buf, "%hd %hd", &wil->snr_thresh.omni,
+		   &wil->snr_thresh.direct) != 2)
+		return -EINVAL;
+
+	if (wil->snr_thresh.omni != 0 || wil->snr_thresh.direct != 0)
+		wil->snr_thresh.enabled = 1;
+
+	return 0;
+}
+
 static ssize_t
 snr_thresh_store(struct device *dev,
 		 struct device_attribute *attr,
@@ -371,13 +405,13 @@ snr_thresh_store(struct device *dev,
 {
 	struct wil6210_priv *wil = dev_get_drvdata(dev);
 	int rc;
-	short omni, direct;
 
-	/* to disable snr threshold, set both omni and direct to 0 */
-	if (sscanf(buf, "%hd %hd", &omni, &direct) != 2)
-		return -EINVAL;
+	rc = wil_snr_thresh_set(wil, buf);
+	if (rc < 0)
+		return rc;
 
-	rc = wmi_set_snr_thresh(wil, omni, direct);
+	rc = wmi_set_snr_thresh(wil, wil->snr_thresh.omni,
+				wil->snr_thresh.direct);
 	if (!rc)
 		rc = count;
 
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index cce49e8a..3e90f4e 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -28,8 +28,6 @@ bool rx_large_buf;
 module_param(rx_large_buf, bool, 0444);
 MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
 
-#define WIL6210_MAX_HEADROOM_SIZE	(256)
-
 ushort headroom_size; /* = 0; */
 static int headroom_size_set(const char *val, const struct kernel_param *kp)
 {
@@ -1064,7 +1062,8 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
 	if (!vif->privacy)
 		txdata->dot1x_open = true;
 	rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
-		      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+		      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
 	if (rc)
 		goto out_free;
 
@@ -1152,7 +1151,8 @@ static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid,
 	cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
 
 	rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
-		      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+		      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
 	if (rc)
 		goto fail;
 
@@ -1232,7 +1232,8 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
 		txdata->dot1x_open = true;
 	rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid,
 		      &cmd, sizeof(cmd),
-		      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+		      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
 	if (rc)
 		goto out_free;
 
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index aae4952..1b66756 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -16,6 +16,10 @@
 #include "trace.h"
 #include "ipa.h"
 
+/* Max number of entries (packets to complete) to update the hwtail of tx
+ * status ring. Should be power of 2
+ */
+#define WIL_EDMA_TX_SRING_UPDATE_HW_TAIL 128
 #define WIL_EDMA_MAX_DATA_OFFSET (2)
 /* RX buffer size must be aligned to 4 bytes */
 #define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
@@ -223,10 +227,17 @@ static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
 }
 
 static inline
-void wil_get_next_rx_status_msg(struct wil_status_ring *sring, void *msg)
+void wil_get_next_rx_status_msg(struct wil_status_ring *sring, u8 *dr_bit,
+				void *msg)
 {
-	memcpy(msg, (void *)(sring->va + (sring->elem_size * sring->swhead)),
-	       sring->elem_size);
+	struct wil_rx_status_compressed *_msg;
+
+	_msg = (struct wil_rx_status_compressed *)
+		(sring->va + (sring->elem_size * sring->swhead));
+	*dr_bit = WIL_GET_BITS(_msg->d0, 31, 31);
+	/* make sure dr_bit is read before the rest of status msg */
+	rmb();
+	memcpy(msg, (void *)_msg, sring->elem_size);
 }
 
 static inline void wil_sring_advance_swhead(struct wil_status_ring *sring)
@@ -606,8 +617,7 @@ static bool wil_is_rx_idle_edma(struct wil6210_priv *wil)
 		if (!sring->va)
 			continue;
 
-		wil_get_next_rx_status_msg(sring, msg);
-		dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
+		wil_get_next_rx_status_msg(sring, &dr_bit, msg);
 
 		/* Check if there are unhandled RX status messages */
 		if (dr_bit == sring->desc_rdy_pol)
@@ -916,8 +926,7 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
 	BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
 
 again:
-	wil_get_next_rx_status_msg(sring, msg);
-	dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
+	wil_get_next_rx_status_msg(sring, &dr_bit, msg);
 
 	/* Completed handling all the ready status messages */
 	if (dr_bit != sring->desc_rdy_pol)
@@ -1169,12 +1178,15 @@ int wil_tx_desc_map_edma(union wil_tx_desc *desc, dma_addr_t pa, u32 len,
 }
 
 static inline void
-wil_get_next_tx_status_msg(struct wil_status_ring *sring,
+wil_get_next_tx_status_msg(struct wil_status_ring *sring, u8 *dr_bit,
 			   struct wil_ring_tx_status *msg)
 {
 	struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *)
 		(sring->va + (sring->elem_size * sring->swhead));
 
+	*dr_bit = _msg->desc_ready >> TX_STATUS_DESC_READY_POS;
+	/* make sure dr_bit is read before the rest of status msg */
+	rmb();
 	*msg = *_msg;
 }
 
@@ -1195,7 +1207,7 @@ int wil_tx_sring_handler(struct wil6210_priv *wil,
 	struct wil_net_stats *stats;
 	struct wil_tx_enhanced_desc *_d;
 	unsigned int ring_id;
-	unsigned int num_descs;
+	unsigned int num_descs, num_statuses = 0;
 	int i;
 	u8 dr_bit; /* Descriptor Ready bit */
 	struct wil_ring_tx_status msg;
@@ -1203,8 +1215,7 @@ int wil_tx_sring_handler(struct wil6210_priv *wil,
 	int used_before_complete;
 	int used_new;
 
-	wil_get_next_tx_status_msg(sring, &msg);
-	dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
+	wil_get_next_tx_status_msg(sring, &dr_bit, &msg);
 
 	/* Process completion messages while DR bit has the expected polarity */
 	while (dr_bit == sring->desc_rdy_pol) {
@@ -1315,18 +1326,23 @@ int wil_tx_sring_handler(struct wil6210_priv *wil,
 		}
 
 again:
+		num_statuses++;
+		if (num_statuses % WIL_EDMA_TX_SRING_UPDATE_HW_TAIL == 0)
+			/* update HW tail to allow HW to push new statuses */
+			wil_w(wil, sring->hwtail, sring->swhead);
+
 		wil_sring_advance_swhead(sring);
 
-		wil_get_next_tx_status_msg(sring, &msg);
-		dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
+		wil_get_next_tx_status_msg(sring, &dr_bit, &msg);
 	}
 
 	/* shall we wake net queues? */
 	if (desc_cnt)
 		wil_update_net_queues(wil, vif, NULL, false);
 
-	/* Update the HW tail ptr (RD ptr) */
-	wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
+	if (num_statuses % WIL_EDMA_TX_SRING_UPDATE_HW_TAIL != 0)
+		/* Update the HW tail ptr (RD ptr) */
+		wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
 
 	return desc_cnt;
 }
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index ce5c56d..e9ab926 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -13,7 +13,7 @@
 #define WIL_SRING_SIZE_ORDER_MAX	(WIL_RING_SIZE_ORDER_MAX)
 /* RX sring order should be bigger than RX ring order */
 #define WIL_RX_SRING_SIZE_ORDER_DEFAULT	(12)
-#define WIL_TX_SRING_SIZE_ORDER_DEFAULT	(12)
+#define WIL_TX_SRING_SIZE_ORDER_DEFAULT	(14)
 #define WIL_RX_BUFF_ARR_SIZE_DEFAULT (2600)
 
 #define WIL_RX_DESC_RING_ID 0
@@ -403,12 +403,6 @@ static inline u8 wil_rx_status_get_tid(void *msg)
 		return val & WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
 }
 
-static inline int wil_rx_status_get_desc_rdy_bit(void *msg)
-{
-	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
-			    31, 31);
-}
-
 static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */
 {
 	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index daf3717..5120b46 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -20,6 +20,10 @@
 #include "fw.h"
 
 extern bool no_fw_recovery;
+extern bool country_specific_board_file;
+extern bool ignore_reg_hints;
+extern bool debug_fw;
+extern u8 oob_mode;
 extern unsigned int mtu_max;
 extern unsigned short rx_ring_overflow_thrsh;
 extern int agg_wsize;
@@ -30,7 +34,12 @@ extern bool disable_ap_sme;
 extern bool ftm_mode;
 extern ushort headroom_size;
 extern bool drop_if_ring_full;
+extern int n_msi;
 extern uint max_assoc_sta;
+extern bool drop_if_ring_full;
+extern uint rx_ring_order;
+extern uint tx_ring_order;
+extern uint bcast_ring_order;
 
 struct wil6210_priv;
 struct wil6210_vif;
@@ -94,6 +103,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
 #define WIL_MAX_AMPDU_SIZE_128	(128 * 1024) /* FW/HW limit */
 #define WIL_MAX_AGG_WSIZE_64	(64) /* FW/HW limit */
 #define WIL6210_MAX_STATUS_RINGS	(8)
+#define WIL6210_MAX_HEADROOM_SIZE      (256)
+#define WIL_WMI_CALL_GENERAL_TO_MS 100
 
 /* Hardware offload block adds the following:
  * 26 bytes - 3-address QoS data header
@@ -460,7 +471,7 @@ static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
  * wil_cid_valid - check cid is valid
  * @cid: CID value
  */
-static inline bool wil_cid_valid(u8 cid)
+static inline bool wil_cid_valid(int cid)
 {
 	return (cid >= 0 && cid < max_assoc_sta);
 }
@@ -922,6 +933,12 @@ struct wil_brd_info {
 	u32 file_max_size;
 };
 
+struct wil_ftm_offsets {
+	u8 enabled;
+	unsigned int tx_offset;
+	unsigned int rx_offset;
+};
+
 struct wil6210_priv {
 	struct pci_dev *pdev;
 	u32 bar_size;
@@ -1047,6 +1064,8 @@ struct wil6210_priv {
 		short direct;
 	} snr_thresh;
 
+	struct wil_ftm_offsets ftm_txrx_offset;
+
 	/* VR profile, VR is disabled on profile 0 */
 	u8 vr_profile;
 
@@ -1215,6 +1234,7 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
 int wil_mem_access_lock(struct wil6210_priv *wil);
 void wil_mem_access_unlock(struct wil6210_priv *wil);
 
+void wil_wiphy_init(struct wil6210_priv *wil);
 struct wil6210_vif *
 wil_vif_alloc(struct wil6210_priv *wil, const char *name,
 	      unsigned char name_assign_type, enum nl80211_iftype iftype);
@@ -1275,6 +1295,9 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
 int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie);
 int wmi_rxon(struct wil6210_priv *wil, bool on);
 int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
+int wmi_get_all_temperatures(struct wil6210_priv *wil,
+			     struct wmi_temp_sense_all_done_event
+			     *sense_all_evt);
 int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason,
 		       bool del_sta);
 int wmi_addba(struct wil6210_priv *wil, u8 mid,
@@ -1295,6 +1318,8 @@ int wmi_port_allocate(struct wil6210_priv *wil, u8 mid,
 		      const u8 *mac, enum nl80211_iftype iftype);
 int wmi_port_delete(struct wil6210_priv *wil, u8 mid);
 int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval);
+int wmi_set_tof_tx_rx_offset(struct wil6210_priv *wil, u32 tx_offset,
+			     u32 rx_offset);
 int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
 			 u8 dialog_token, __le16 ba_param_set,
 			 __le16 ba_timeout, __le16 ba_seq_ctrl);
@@ -1351,6 +1376,17 @@ static inline void wil6210_debugfs_remove(struct wil6210_priv *wil) {}
 
 int wil6210_sysfs_init(struct wil6210_priv *wil);
 void wil6210_sysfs_remove(struct wil6210_priv *wil);
+
+int wil_board_file_set(struct wil6210_priv *wil, const char *buf,
+		       size_t count);
+int wil_snr_thresh_set(struct wil6210_priv *wil, const char *buf);
+int wil_ftm_offset_set(struct wil6210_priv *wil, const char *buf);
+int wil_tt_set(struct wil6210_priv *wil, const char *buf,
+	       size_t count);
+int wil_qos_weights_set(struct wil6210_priv *wil, const char *buf,
+			size_t count);
+int wil_led_blink_set(struct wil6210_priv *wil, const char *buf);
+
 int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
 		       struct station_info *sinfo);
 
@@ -1466,6 +1502,7 @@ int wmi_stop_sched_scan(struct wil6210_priv *wil);
 int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len);
 int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
 		    u8 channel, u16 duration_ms);
+int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold);
 
 int wil_wmi2spec_ch(u8 wmi_ch, u8 *spec_ch);
 int wil_spec2wmi_ch(u8 spec_ch, u8 *wmi_ch);
@@ -1489,4 +1526,5 @@ int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
 void update_supported_bands(struct wil6210_priv *wil);
 int wmi_reset_spi_slave(struct wil6210_priv *wil);
 
+void wil_clear_fw_log_addr(struct wil6210_priv *wil);
 #endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 02aba64..102f2bd4 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -31,7 +31,6 @@ MODULE_PARM_DESC(led_id,
 		 " 60G device led enablement. Set the led ID (0-2) to enable");
 
 #define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
-#define WIL_WMI_CALL_GENERAL_TO_MS 100
 #define WIL_WMI_PCP_STOP_TO_MS 5000
 #define WIL_WMI_SPI_SLAVE_RESET_TO_MS 500
 
@@ -480,6 +479,10 @@ static const char *cmdid2name(u16 cmdid)
 		return "WMI_SET_VR_PROFILE_CMD";
 	case WMI_RESET_SPI_SLAVE_CMDID:
 		return "WMI_RESET_SPI_SLAVE_CMD";
+	case WMI_RBUFCAP_CFG_CMDID:
+		return "WMI_RBUFCAP_CFG_CMD";
+	case WMI_TEMP_SENSE_ALL_CMDID:
+		return "WMI_TEMP_SENSE_ALL_CMDID";
 	default:
 		return "Untracked CMD";
 	}
@@ -628,6 +631,10 @@ static const char *eventid2name(u16 eventid)
 		return "WMI_SET_VR_PROFILE_EVENT";
 	case WMI_RESET_SPI_SLAVE_EVENTID:
 		return "WMI_RESET_SPI_SLAVE_EVENT";
+	case WMI_RBUFCAP_CFG_EVENTID:
+		return "WMI_RBUFCAP_CFG_EVENT";
+	case WMI_TEMP_SENSE_ALL_DONE_EVENTID:
+		return "WMI_TEMP_SENSE_ALL_DONE_EVENTID";
 	default:
 		return "Untracked EVENT";
 	}
@@ -1351,6 +1358,12 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 		cid = evt->cid;
 		tid = evt->tid;
 	}
+
+	if (!wil_cid_valid(cid)) {
+		wil_err(wil, "DELBA: Invalid CID %d\n", cid);
+		return;
+	}
+
 	wil_dbg_wmi(wil, "DELBA MID %d CID %d TID %d from %s reason %d\n",
 		    vif->mid, cid, tid,
 		    evt->from_initiator ? "originator" : "recipient",
@@ -2128,7 +2141,8 @@ int wmi_echo(struct wil6210_priv *wil)
 	};
 
 	return wmi_call(wil, WMI_ECHO_CMDID, vif->mid, &cmd, sizeof(cmd),
-			WMI_ECHO_RSP_EVENTID, NULL, 0, 50);
+			WMI_ECHO_RSP_EVENTID, NULL, 0,
+			WIL_WMI_CALL_GENERAL_TO_MS);
 }
 
 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
@@ -2187,7 +2201,7 @@ int wmi_led_cfg(struct wil6210_priv *wil, bool enable)
 
 	rc = wmi_call(wil, WMI_LED_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
 		      WMI_LED_CFG_DONE_EVENTID, &reply, sizeof(reply),
-		      100);
+		      WIL_WMI_CALL_GENERAL_TO_MS);
 	if (rc)
 		goto out;
 
@@ -2201,6 +2215,37 @@ int wmi_led_cfg(struct wil6210_priv *wil, bool enable)
 	return rc;
 }
 
+int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold)
+{
+	struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+	int rc;
+
+	struct wmi_rbufcap_cfg_cmd cmd = {
+		.enable = enable,
+		.rx_desc_threshold = cpu_to_le16(threshold),
+	};
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_rbufcap_cfg_event evt;
+	} __packed reply = {
+		.evt = {.status = WMI_FW_STATUS_FAILURE},
+	};
+
+	rc = wmi_call(wil, WMI_RBUFCAP_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
+		      WMI_RBUFCAP_CFG_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc)
+		return rc;
+
+	if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+		wil_err(wil, "RBUFCAP_CFG failed. status %d\n",
+			reply.evt.status);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
 int wmi_pcp_start(struct wil6210_vif *vif,
 		  int bi, u8 wmi_nettype, u8 chan, u8 hidden_ssid, u8 is_go)
 {
@@ -2318,7 +2363,8 @@ int wmi_get_ssid(struct wil6210_vif *vif, u8 *ssid_len, void *ssid)
 	memset(&reply, 0, sizeof(reply));
 
 	rc = wmi_call(wil, WMI_GET_SSID_CMDID, vif->mid, NULL, 0,
-		      WMI_GET_SSID_EVENTID, &reply, sizeof(reply), 20);
+		      WMI_GET_SSID_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
 	if (rc)
 		return rc;
 
@@ -2355,7 +2401,8 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel)
 	memset(&reply, 0, sizeof(reply));
 
 	rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, vif->mid, NULL, 0,
-		      WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20);
+		      WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
 	if (rc)
 		return rc;
 
@@ -2451,7 +2498,8 @@ int wmi_stop_discovery(struct wil6210_vif *vif)
 	wil_dbg_wmi(wil, "sending WMI_DISCOVERY_STOP_CMDID\n");
 
 	rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, vif->mid, NULL, 0,
-		      WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 100);
+		      WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0,
+		      WIL_WMI_CALL_GENERAL_TO_MS);
 
 	if (rc)
 		wil_err(wil, "Failed to stop discovery\n");
@@ -2597,12 +2645,14 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
 	if (on) {
 		rc = wmi_call(wil, WMI_START_LISTEN_CMDID, vif->mid, NULL, 0,
 			      WMI_LISTEN_STARTED_EVENTID,
-			      &reply, sizeof(reply), 100);
+			      &reply, sizeof(reply),
+			      WIL_WMI_CALL_GENERAL_TO_MS);
 		if ((rc == 0) && (reply.evt.status != WMI_FW_STATUS_SUCCESS))
 			rc = -EINVAL;
 	} else {
 		rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, vif->mid, NULL, 0,
-			      WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 20);
+			      WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0,
+			      WIL_WMI_CALL_GENERAL_TO_MS);
 	}
 
 	return rc;
@@ -2691,7 +2741,8 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
 	memset(&reply, 0, sizeof(reply));
 
 	rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, vif->mid, &cmd, sizeof(cmd),
-		      WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100);
+		      WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
 	if (rc)
 		return rc;
 
@@ -2703,6 +2754,44 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
 	return 0;
 }
 
+int wmi_get_all_temperatures(struct wil6210_priv *wil,
+			     struct wmi_temp_sense_all_done_event
+			     *sense_all_evt)
+{
+	struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+	int rc;
+	struct wmi_temp_sense_all_cmd cmd = {
+		.measure_baseband_en = true,
+		.measure_rf_en = true,
+		.measure_mode = TEMPERATURE_MEASURE_NOW,
+	};
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_temp_sense_all_done_event evt;
+	} __packed reply;
+
+	if (!sense_all_evt) {
+		wil_err(wil, "Invalid sense_all_evt value\n");
+		return -EINVAL;
+	}
+
+	memset(&reply, 0, sizeof(reply));
+	reply.evt.status = WMI_FW_STATUS_FAILURE;
+	rc = wmi_call(wil, WMI_TEMP_SENSE_ALL_CMDID, vif->mid, &cmd,
+		      sizeof(cmd), WMI_TEMP_SENSE_ALL_DONE_EVENTID,
+		      &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc)
+		return rc;
+
+	if (reply.evt.status == WMI_FW_STATUS_FAILURE) {
+		wil_err(wil, "Failed geting TEMP_SENSE_ALL\n");
+		return -EINVAL;
+	}
+
+	memcpy(sense_all_evt, &reply.evt, sizeof(reply.evt));
+	return 0;
+}
+
 int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason,
 		       bool del_sta)
 {
@@ -2805,7 +2894,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
 		.dialog_token = token,
 		.status_code = cpu_to_le16(status),
 		/* bit 0: A-MSDU supported
-		 * bit 1: policy (should be 0 for us)
+		 * bit 1: policy (controlled by FW)
 		 * bits 2..5: TID
 		 * bits 6..15: buffer size
 		 */
@@ -2835,7 +2924,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
 
 	rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, mid, &cmd, sizeof(cmd),
 		      WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply),
-		      100);
+		      WIL_WMI_CALL_GENERAL_TO_MS);
 	if (rc)
 		return rc;
 
@@ -2859,7 +2948,7 @@ int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
 		.dialog_token = token,
 		.status_code = cpu_to_le16(status),
 		/* bit 0: A-MSDU supported
-		 * bit 1: policy (should be 0 for us)
+		 * bit 1: policy (controlled by FW)
 		 * bits 2..5: TID
 		 * bits 6..15: buffer size
 		 */
@@ -2917,7 +3006,7 @@ int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
 	rc = wmi_call(wil, WMI_PS_DEV_PROFILE_CFG_CMDID, vif->mid,
 		      &cmd, sizeof(cmd),
 		      WMI_PS_DEV_PROFILE_CFG_EVENTID, &reply, sizeof(reply),
-		      100);
+		      WIL_WMI_CALL_GENERAL_TO_MS);
 	if (rc)
 		return rc;
 
@@ -2954,7 +3043,7 @@ int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short)
 	rc = wmi_call(wil, WMI_SET_MGMT_RETRY_LIMIT_CMDID, vif->mid,
 		      &cmd, sizeof(cmd),
 		      WMI_SET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply),
-		      100);
+		      WIL_WMI_CALL_GENERAL_TO_MS);
 	if (rc)
 		return rc;
 
@@ -2984,7 +3073,7 @@ int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short)
 	memset(&reply, 0, sizeof(reply));
 	rc = wmi_call(wil, WMI_GET_MGMT_RETRY_LIMIT_CMDID, vif->mid, NULL, 0,
 		      WMI_GET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply),
-		      100);
+		      WIL_WMI_CALL_GENERAL_TO_MS);
 	if (rc)
 		return rc;
 
@@ -3059,9 +3148,6 @@ int wmi_set_tt_cfg(struct wil6210_priv *wil, struct wmi_tt_data *tt_data)
 		return -EIO;
 	}
 
-	wil->tt_data = *tt_data;
-	wil->tt_data_set = true;
-
 	return 0;
 }
 
@@ -3092,6 +3178,39 @@ int wmi_get_tt_cfg(struct wil6210_priv *wil, struct wmi_tt_data *tt_data)
 	return 0;
 }
 
+int wmi_set_tof_tx_rx_offset(struct wil6210_priv *wil, u32 tx_offset,
+			     u32 rx_offset)
+{
+	struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+	struct wmi_tof_set_tx_rx_offset_cmd cmd;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_tof_set_tx_rx_offset_event evt;
+	} __packed reply = {
+		.evt = {.status = WMI_FW_STATUS_FAILURE},
+	};
+	int rc;
+
+	if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+		return -EOPNOTSUPP;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.tx_offset = cpu_to_le32(tx_offset);
+	cmd.rx_offset = cpu_to_le32(rx_offset);
+	rc = wmi_call(wil, WMI_TOF_SET_TX_RX_OFFSET_CMDID, vif->mid,
+		      &cmd, sizeof(cmd), WMI_TOF_SET_TX_RX_OFFSET_EVENTID,
+		      &reply, sizeof(reply), 100);
+	if (rc < 0)
+		return rc;
+	if (reply.evt.status) {
+		wil_err(wil, "set_tof_tx_rx_offset failed, error %d\n",
+			reply.evt.status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
 void wmi_event_flush(struct wil6210_priv *wil)
 {
 	ulong flags;
@@ -3434,7 +3553,18 @@ static void wmi_event_handle(struct wil6210_priv *wil,
 		/* check if someone waits for this event */
 		if (wil->reply_id && wil->reply_id == id &&
 		    wil->reply_mid == mid) {
-			WARN_ON(wil->reply_buf);
+			if (wil->reply_buf) {
+				/* event received while wmi_call is waiting
+				 * with a buffer. Such event should be handled
+				 * in wmi_recv_cmd function. Handling the event
+				 * here means a previous wmi_call was timeout.
+				 * Drop the event and do not handle it.
+				 */
+				wil_err(wil,
+					"Old event (%d, %s) while wmi_call is waiting. Drop it and Continue waiting\n",
+					id, eventid2name(id));
+				return;
+			}
 
 			wmi_evt_call_handler(vif, id, evt_data,
 					     len - sizeof(*wmi));
@@ -4050,6 +4180,7 @@ int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id,
 			.ring_size = cpu_to_le16(ring->size),
 			.ring_id = ring_id,
 		},
+		.max_msdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
 		.status_ring_id = sring_id,
 		.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
 	};
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index e9b16fe..ceb0c5b 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -24,6 +24,7 @@
 #define WMI_PROX_RANGE_NUM		(3)
 #define WMI_MAX_LOSS_DMG_BEACONS	(20)
 #define MAX_NUM_OF_SECTORS		(128)
+#define WMI_INVALID_TEMPERATURE		(0xFFFFFFFF)
 #define WMI_SCHED_MAX_ALLOCS_PER_CMD	(4)
 #define WMI_RF_DTYPE_LENGTH		(3)
 #define WMI_RF_ETYPE_LENGTH		(3)
@@ -53,6 +54,7 @@
 #define WMI_QOS_MAX_WEIGHT		50
 #define WMI_QOS_SET_VIF_PRIORITY	(0xFF)
 #define WMI_QOS_DEFAULT_PRIORITY	(WMI_QOS_NUM_OF_PRIORITY)
+#define WMI_MAX_XIF_PORTS_NUM		(8)
 
 /* Mailbox interface
  * used for commands and events
@@ -96,6 +98,7 @@ enum wmi_fw_capability {
 	WMI_FW_CAPABILITY_TX_REQ_EXT			= 25,
 	WMI_FW_CAPABILITY_CHANNEL_4			= 26,
 	WMI_FW_CAPABILITY_IPA				= 27,
+	WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF		= 30,
 	WMI_FW_CAPABILITY_MAX,
 };
 
@@ -289,6 +292,7 @@ enum wmi_command_id {
 	WMI_SET_VRING_PRIORITY_WEIGHT_CMDID		= 0xA10,
 	WMI_SET_VRING_PRIORITY_CMDID			= 0xA11,
 	WMI_RBUFCAP_CFG_CMDID				= 0xA12,
+	WMI_TEMP_SENSE_ALL_CMDID			= 0xA13,
 	WMI_SET_MAC_ADDRESS_CMDID			= 0xF003,
 	WMI_ABORT_SCAN_CMDID				= 0xF007,
 	WMI_SET_PROMISCUOUS_MODE_CMDID			= 0xF041,
@@ -1426,12 +1430,7 @@ struct wmi_rf_xpm_write_cmd {
 	u8 data_bytes[0];
 } __packed;
 
-/* WMI_TEMP_SENSE_CMDID
- *
- * Measure MAC and radio temperatures
- *
- * Possible modes for temperature measurement
- */
+/* Possible modes for temperature measurement */
 enum wmi_temperature_measure_mode {
 	TEMPERATURE_USE_OLD_VALUE	= 0x01,
 	TEMPERATURE_MEASURE_NOW		= 0x02,
@@ -1964,6 +1963,14 @@ struct wmi_set_ap_slot_size_cmd {
 	__le32 slot_size;
 } __packed;
 
+/* WMI_TEMP_SENSE_ALL_CMDID */
+struct wmi_temp_sense_all_cmd {
+	u8 measure_baseband_en;
+	u8 measure_rf_en;
+	u8 measure_mode;
+	u8 reserved;
+} __packed;
+
 /* WMI Events
  * List of Events (target to host)
  */
@@ -2125,6 +2132,7 @@ enum wmi_event_id {
 	WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID		= 0x1A10,
 	WMI_SET_VRING_PRIORITY_EVENTID			= 0x1A11,
 	WMI_RBUFCAP_CFG_EVENTID				= 0x1A12,
+	WMI_TEMP_SENSE_ALL_DONE_EVENTID			= 0x1A13,
 	WMI_SET_CHANNEL_EVENTID				= 0x9000,
 	WMI_ASSOC_REQ_EVENTID				= 0x9001,
 	WMI_EAPOL_RX_EVENTID				= 0x9002,
@@ -2812,11 +2820,13 @@ struct wmi_fixed_scheduling_ul_config_event {
  */
 struct wmi_temp_sense_done_event {
 	/* Temperature times 1000 (actual temperature will be achieved by
-	 * dividing the value by 1000)
+	 * dividing the value by 1000). When temperature cannot be read from
+	 * device return WMI_INVALID_TEMPERATURE
 	 */
 	__le32 baseband_t1000;
 	/* Temperature times 1000 (actual temperature will be achieved by
-	 * dividing the value by 1000)
+	 * dividing the value by 1000). When temperature cannot be read from
+	 * device return WMI_INVALID_TEMPERATURE
 	 */
 	__le32 rf_t1000;
 } __packed;
@@ -4194,4 +4204,25 @@ struct wmi_set_vr_profile_event {
 	u8 reserved[3];
 } __packed;
 
+/* WMI_TEMP_SENSE_ALL_DONE_EVENTID
+ * Measure MAC and all radio temperatures
+ */
+struct wmi_temp_sense_all_done_event {
+	/* enum wmi_fw_status */
+	u8 status;
+	/* Bitmap of connected RFs */
+	u8 rf_bitmap;
+	u8 reserved[2];
+	/* Temperature times 1000 (actual temperature will be achieved by
+	 * dividing the value by 1000). When temperature cannot be read from
+	 * device return WMI_INVALID_TEMPERATURE
+	 */
+	__le32 rf_t1000[WMI_MAX_XIF_PORTS_NUM];
+	/* Temperature times 1000 (actual temperature will be achieved by
+	 * dividing the value by 1000). When temperature cannot be read from
+	 * device return WMI_INVALID_TEMPERATURE
+	 */
+	__le32 baseband_t1000;
+} __packed;
+
 #endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index e99e766..1cabae4 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2585,8 +2585,8 @@ static int __init at76_mod_init(void)
 	if (result < 0)
 		printk(KERN_ERR DRIVER_NAME
 		       ": usb_register failed (status %d)\n", result);
-
-	led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
+	else
+		led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
 	return result;
 }
 
diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c
index 6922cbb..5a0699f 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lp.c
+++ b/drivers/net/wireless/broadcom/b43/phy_lp.c
@@ -1834,7 +1834,7 @@ static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
 static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
 {
 	struct b43_phy_lp *lpphy = dev->phy.lp;
-	struct lpphy_tx_gains gains, oldgains;
+	struct lpphy_tx_gains oldgains;
 	int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
 
 	lpphy_read_tx_pctl_mode_from_hardware(dev);
@@ -1848,9 +1848,9 @@ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
 	lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
 
 	if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0)
-		lpphy_papd_cal(dev, gains, 0, 1, 30);
+		lpphy_papd_cal(dev, oldgains, 0, 1, 30);
 	else
-		lpphy_papd_cal(dev, gains, 0, 1, 65);
+		lpphy_papd_cal(dev, oldgains, 0, 1, 65);
 
 	if (old_afe_ovr)
 		lpphy_set_tx_gains(dev, oldgains);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 6f3faaf..c7c520f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -3466,6 +3466,8 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e,
 	}
 
 	netinfo = brcmf_get_netinfo_array(pfn_result);
+	if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN)
+		netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
 	memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len);
 	cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len;
 	cfg->wowl.nd->n_channels = 1;
@@ -5366,6 +5368,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
 		conn_info->req_ie =
 		    kmemdup(cfg->extra_buf, conn_info->req_ie_len,
 			    GFP_KERNEL);
+		if (!conn_info->req_ie)
+			conn_info->req_ie_len = 0;
 	} else {
 		conn_info->req_ie_len = 0;
 		conn_info->req_ie = NULL;
@@ -5382,6 +5386,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
 		conn_info->resp_ie =
 		    kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
 			    GFP_KERNEL);
+		if (!conn_info->resp_ie)
+			conn_info->resp_ie_len = 0;
 	} else {
 		conn_info->resp_ie_len = 0;
 		conn_info->resp_ie = NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 860a437..36a04c1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -464,7 +464,8 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
 	} else {
 		/* Process special event packets */
 		if (handle_event)
-			brcmf_fweh_process_skb(ifp->drvr, skb);
+			brcmf_fweh_process_skb(ifp->drvr, skb,
+					       BCMILCP_SUBTYPE_VENDOR_LONG);
 
 		brcmf_netif_rx(ifp, skb);
 	}
@@ -481,7 +482,7 @@ void brcmf_rx_event(struct device *dev, struct sk_buff *skb)
 	if (brcmf_rx_hdrpull(drvr, skb, &ifp))
 		return;
 
-	brcmf_fweh_process_skb(ifp->drvr, skb);
+	brcmf_fweh_process_skb(ifp->drvr, skb, 0);
 	brcmu_pkt_buf_free_skb(skb);
 }
 
@@ -783,17 +784,17 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
 			 bool rtnl_locked)
 {
 	struct brcmf_if *ifp;
+	int ifidx;
 
 	ifp = drvr->iflist[bsscfgidx];
-	drvr->iflist[bsscfgidx] = NULL;
 	if (!ifp) {
 		brcmf_err("Null interface, bsscfgidx=%d\n", bsscfgidx);
 		return;
 	}
 	brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx,
 		  ifp->ifidx);
-	if (drvr->if2bss[ifp->ifidx] == bsscfgidx)
-		drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID;
+	ifidx = ifp->ifidx;
+
 	if (ifp->ndev) {
 		if (bsscfgidx == 0) {
 			if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
@@ -821,6 +822,10 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
 		brcmf_p2p_ifp_removed(ifp, rtnl_locked);
 		kfree(ifp);
 	}
+
+	drvr->iflist[bsscfgidx] = NULL;
+	if (drvr->if2bss[ifidx] == bsscfgidx)
+		drvr->if2bss[ifidx] = BRCMF_BSSIDX_INVALID;
 }
 
 void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index 816f80e..ebd66fe 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -211,7 +211,7 @@ enum brcmf_fweh_event_code {
  */
 #define BRCM_OUI				"\x00\x10\x18"
 #define BCMILCP_BCM_SUBTYPE_EVENT		1
-
+#define BCMILCP_SUBTYPE_VENDOR_LONG		32769
 
 /**
  * struct brcm_ethhdr - broadcom specific ether header.
@@ -334,10 +334,10 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
 void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing);
 
 static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
-					  struct sk_buff *skb)
+					  struct sk_buff *skb, u16 stype)
 {
 	struct brcmf_event *event_packet;
-	u16 usr_stype;
+	u16 subtype, usr_stype;
 
 	/* only process events when protocol matches */
 	if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL))
@@ -346,8 +346,16 @@ static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
 	if ((skb->len + ETH_HLEN) < sizeof(*event_packet))
 		return;
 
-	/* check for BRCM oui match */
 	event_packet = (struct brcmf_event *)skb_mac_header(skb);
+
+	/* check subtype if needed */
+	if (unlikely(stype)) {
+		subtype = get_unaligned_be16(&event_packet->hdr.subtype);
+		if (subtype != stype)
+			return;
+	}
+
+	/* check for BRCM oui match */
 	if (memcmp(BRCM_OUI, &event_packet->hdr.oui[0],
 		   sizeof(event_packet->hdr.oui)))
 		return;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index f3cbf78..5a0a29c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -579,24 +579,6 @@ static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
 	return ifidx == *(int *)arg;
 }
 
-static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
-				int ifidx)
-{
-	bool (*matchfn)(struct sk_buff *, void *) = NULL;
-	struct sk_buff *skb;
-	int prec;
-
-	if (ifidx != -1)
-		matchfn = brcmf_fws_ifidx_match;
-	for (prec = 0; prec < q->num_prec; prec++) {
-		skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
-		while (skb) {
-			brcmu_pkt_buf_free_skb(skb);
-			skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
-		}
-	}
-}
-
 static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger)
 {
 	int i;
@@ -668,6 +650,28 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
 	return 0;
 }
 
+static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
+				int ifidx)
+{
+	bool (*matchfn)(struct sk_buff *, void *) = NULL;
+	struct sk_buff *skb;
+	int prec;
+	u32 hslot;
+
+	if (ifidx != -1)
+		matchfn = brcmf_fws_ifidx_match;
+	for (prec = 0; prec < q->num_prec; prec++) {
+		skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
+		while (skb) {
+			hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+			brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
+						true);
+			brcmu_pkt_buf_free_skb(skb);
+			skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
+		}
+	}
+}
+
 static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
 					    u32 slot_id)
 {
@@ -2168,6 +2172,8 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp)
 	brcmf_fws_lock(fws);
 	ifp->fws_desc = NULL;
 	brcmf_dbg(TRACE, "deleting %s\n", entry->name);
+	brcmf_fws_macdesc_cleanup(fws, &fws->desc.iface[ifp->ifidx],
+				  ifp->ifidx);
 	brcmf_fws_macdesc_deinit(entry);
 	brcmf_fws_cleanup(fws, ifp->ifidx);
 	brcmf_fws_unlock(fws);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 4e8397a..ee922b0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -1116,7 +1116,7 @@ static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
 
 	skb->protocol = eth_type_trans(skb, ifp->ndev);
 
-	brcmf_fweh_process_skb(ifp->drvr, skb);
+	brcmf_fweh_process_skb(ifp->drvr, skb, 0);
 
 exit:
 	brcmu_pkt_buf_free_skb(skb);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index a4308c6..44ead0f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -160,7 +160,7 @@ struct brcmf_usbdev_info {
 
 	struct usb_device *usbdev;
 	struct device *dev;
-	struct mutex dev_init_lock;
+	struct completion dev_init_done;
 
 	int ctl_in_pipe, ctl_out_pipe;
 	struct urb *ctl_urb; /* URB for control endpoint */
@@ -684,12 +684,18 @@ static int brcmf_usb_up(struct device *dev)
 
 static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo)
 {
+	int i;
+
 	if (devinfo->ctl_urb)
 		usb_kill_urb(devinfo->ctl_urb);
 	if (devinfo->bulk_urb)
 		usb_kill_urb(devinfo->bulk_urb);
-	brcmf_usb_free_q(&devinfo->tx_postq, true);
-	brcmf_usb_free_q(&devinfo->rx_postq, true);
+	if (devinfo->tx_reqs)
+		for (i = 0; i < devinfo->bus_pub.ntxq; i++)
+			usb_kill_urb(devinfo->tx_reqs[i].urb);
+	if (devinfo->rx_reqs)
+		for (i = 0; i < devinfo->bus_pub.nrxq; i++)
+			usb_kill_urb(devinfo->rx_reqs[i].urb);
 }
 
 static void brcmf_usb_down(struct device *dev)
@@ -1195,11 +1201,11 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret,
 	if (ret)
 		goto error;
 
-	mutex_unlock(&devinfo->dev_init_lock);
+	complete(&devinfo->dev_init_done);
 	return;
 error:
 	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
-	mutex_unlock(&devinfo->dev_init_lock);
+	complete(&devinfo->dev_init_done);
 	device_release_driver(dev);
 }
 
@@ -1267,7 +1273,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
 		if (ret)
 			goto fail;
 		/* we are done */
-		mutex_unlock(&devinfo->dev_init_lock);
+		complete(&devinfo->dev_init_done);
 		return 0;
 	}
 	bus->chip = bus_pub->devid;
@@ -1327,11 +1333,10 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
 	devinfo->usbdev = usb;
 	devinfo->dev = &usb->dev;
-	/* Take an init lock, to protect for disconnect while still loading.
+	/* Init completion, to protect for disconnect while still loading.
 	 * Necessary because of the asynchronous firmware load construction
 	 */
-	mutex_init(&devinfo->dev_init_lock);
-	mutex_lock(&devinfo->dev_init_lock);
+	init_completion(&devinfo->dev_init_done);
 
 	usb_set_intfdata(intf, devinfo);
 
@@ -1409,7 +1414,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 	return 0;
 
 fail:
-	mutex_unlock(&devinfo->dev_init_lock);
+	complete(&devinfo->dev_init_done);
 	kfree(devinfo);
 	usb_set_intfdata(intf, NULL);
 	return ret;
@@ -1424,7 +1429,7 @@ brcmf_usb_disconnect(struct usb_interface *intf)
 	devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
 
 	if (devinfo) {
-		mutex_lock(&devinfo->dev_init_lock);
+		wait_for_completion(&devinfo->dev_init_done);
 		/* Make sure that devinfo still exists. Firmware probe routines
 		 * may have released the device and cleared the intfdata.
 		 */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
index 8eff275..d493021 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
@@ -35,9 +35,10 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
 	struct brcmf_if *ifp;
 	const struct brcmf_vndr_dcmd_hdr *cmdhdr = data;
 	struct sk_buff *reply;
-	int ret, payload, ret_len;
+	unsigned int payload, ret_len;
 	void *dcmd_buf = NULL, *wr_pointer;
 	u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
+	int ret;
 
 	if (len < sizeof(*cmdhdr)) {
 		brcmf_err("vendor command too short: %d\n", len);
@@ -65,7 +66,7 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
 			brcmf_err("oversize return buffer %d\n", ret_len);
 			ret_len = BRCMF_DCMD_MAXLEN;
 		}
-		payload = max(ret_len, len) + 1;
+		payload = max_t(unsigned int, ret_len, len) + 1;
 		dcmd_buf = vzalloc(payload);
 		if (NULL == dcmd_buf)
 			return -ENOMEM;
diff --git a/drivers/net/wireless/cnss2/Makefile b/drivers/net/wireless/cnss2/Makefile
index 3d6b813..023cefc 100644
--- a/drivers/net/wireless/cnss2/Makefile
+++ b/drivers/net/wireless/cnss2/Makefile
@@ -8,4 +8,4 @@
 cnss2-y += pci.o
 cnss2-y += power.o
 cnss2-$(CONFIG_CNSS2_DEBUG) += genl.o
-cnss2-$(CONFIG_CNSS2_QMI) += qmi.o wlan_firmware_service_v01.o coexistence_service_v01.o
+cnss2-$(CONFIG_CNSS2_QMI) += qmi.o wlan_firmware_service_v01.o coexistence_service_v01.o ip_multimedia_subsystem_private_service_v01.o
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index 9463962..99abd7a 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -9,6 +9,7 @@
 #include "pci.h"
 
 void *cnss_ipc_log_context;
+void *cnss_ipc_log_long_context;
 
 static int cnss_pin_connect_show(struct seq_file *s, void *data)
 {
@@ -100,6 +101,9 @@ static int cnss_stats_show_state(struct seq_file *s,
 		case CNSS_COEX_CONNECTED:
 			seq_puts(s, "COEX_CONNECTED");
 			continue;
+		case CNSS_IMS_CONNECTED:
+			seq_puts(s, "IMS_CONNECTED");
+			continue;
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
@@ -552,6 +556,8 @@ static ssize_t cnss_control_params_debug_write(struct file *fp,
 		plat_priv->ctrl_params.qmi_timeout = val;
 	else if (strcmp(cmd, "bdf_type") == 0)
 		plat_priv->ctrl_params.bdf_type = val;
+	else if (strcmp(cmd, "time_sync_period") == 0)
+		plat_priv->ctrl_params.time_sync_period = val;
 	else
 		return -EINVAL;
 
@@ -601,6 +607,9 @@ static int cnss_show_quirks_state(struct seq_file *s,
 		case ENABLE_DAEMON_SUPPORT:
 			seq_puts(s, "DAEMON_SUPPORT");
 			continue;
+		case DISABLE_DRV:
+			seq_puts(s, "DISABLE_DRV");
+			continue;
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
@@ -619,12 +628,15 @@ static int cnss_control_params_debug_show(struct seq_file *s, void *data)
 	seq_puts(s, "mhi_timeout: Timeout for MHI operation in milliseconds\n");
 	seq_puts(s, "qmi_timeout: Timeout for QMI message in milliseconds\n");
 	seq_puts(s, "bdf_type: Type of board data file to be downloaded\n");
+	seq_puts(s, "time_sync_period: Time period to do time sync with device in milliseconds\n");
 
 	seq_puts(s, "\nCurrent value:\n");
 	cnss_show_quirks_state(s, cnss_priv);
 	seq_printf(s, "mhi_timeout: %u\n", cnss_priv->ctrl_params.mhi_timeout);
 	seq_printf(s, "qmi_timeout: %u\n", cnss_priv->ctrl_params.qmi_timeout);
 	seq_printf(s, "bdf_type: %u\n", cnss_priv->ctrl_params.bdf_type);
+	seq_printf(s, "time_sync_period: %u\n",
+		   cnss_priv->ctrl_params.time_sync_period);
 
 	return 0;
 }
@@ -644,6 +656,51 @@ static const struct file_operations cnss_control_params_debug_fops = {
 	.llseek = seq_lseek,
 };
 
+static ssize_t cnss_dynamic_feature_write(struct file *fp,
+					  const char __user *user_buf,
+					  size_t count, loff_t *off)
+{
+	struct cnss_plat_data *plat_priv =
+		((struct seq_file *)fp->private_data)->private;
+	int ret = 0;
+	u64 val;
+
+	ret = kstrtou64_from_user(user_buf, count, 0, &val);
+	if (ret)
+		return ret;
+
+	plat_priv->dynamic_feature = val;
+	ret = cnss_wlfw_dynamic_feature_mask_send_sync(plat_priv);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static int cnss_dynamic_feature_show(struct seq_file *s, void *data)
+{
+	struct cnss_plat_data *cnss_priv = s->private;
+
+	seq_printf(s, "dynamic_feature: 0x%llx\n", cnss_priv->dynamic_feature);
+
+	return 0;
+}
+
+static int cnss_dynamic_feature_open(struct inode *inode,
+				     struct file *file)
+{
+	return single_open(file, cnss_dynamic_feature_show,
+			   inode->i_private);
+}
+
+static const struct file_operations cnss_dynamic_feature_fops = {
+	.read = seq_read,
+	.write = cnss_dynamic_feature_write,
+	.open = cnss_dynamic_feature_open,
+	.owner = THIS_MODULE,
+	.llseek = seq_lseek,
+};
+
 #ifdef CONFIG_CNSS2_DEBUG
 static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
 {
@@ -659,6 +716,8 @@ static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
 			    &cnss_runtime_pm_debug_fops);
 	debugfs_create_file("control_params", 0600, root_dentry, plat_priv,
 			    &cnss_control_params_debug_fops);
+	debugfs_create_file("dynamic_feature", 0600, root_dentry, plat_priv,
+			    &cnss_dynamic_feature_fops);
 
 	return 0;
 }
@@ -704,7 +763,15 @@ int cnss_debug_init(void)
 	cnss_ipc_log_context = ipc_log_context_create(CNSS_IPC_LOG_PAGES,
 						      "cnss", 0);
 	if (!cnss_ipc_log_context) {
-		cnss_pr_err("Unable to create IPC log context!\n");
+		cnss_pr_err("Unable to create IPC log context\n");
+		return -EINVAL;
+	}
+
+	cnss_ipc_log_long_context = ipc_log_context_create(CNSS_IPC_LOG_PAGES,
+							   "cnss-long", 0);
+	if (!cnss_ipc_log_long_context) {
+		cnss_pr_err("Unable to create IPC long log context\n");
+		ipc_log_context_destroy(cnss_ipc_log_context);
 		return -EINVAL;
 	}
 
@@ -713,6 +780,11 @@ int cnss_debug_init(void)
 
 void cnss_debug_deinit(void)
 {
+	if (cnss_ipc_log_long_context) {
+		ipc_log_context_destroy(cnss_ipc_log_long_context);
+		cnss_ipc_log_long_context = NULL;
+	}
+
 	if (cnss_ipc_log_context) {
 		ipc_log_context_destroy(cnss_ipc_log_context);
 		cnss_ipc_log_context = NULL;
diff --git a/drivers/net/wireless/cnss2/debug.h b/drivers/net/wireless/cnss2/debug.h
index 51c74c5..1d7b488 100644
--- a/drivers/net/wireless/cnss2/debug.h
+++ b/drivers/net/wireless/cnss2/debug.h
@@ -10,12 +10,18 @@
 #define CNSS_IPC_LOG_PAGES		32
 
 extern void *cnss_ipc_log_context;
+extern void *cnss_ipc_log_long_context;
 
 #define cnss_ipc_log_string(_x...) do {					\
 		if (cnss_ipc_log_context)				\
 			ipc_log_string(cnss_ipc_log_context, _x);	\
 	} while (0)
 
+#define cnss_ipc_log_long_string(_x...) do {				\
+		if (cnss_ipc_log_long_context)				\
+			ipc_log_string(cnss_ipc_log_long_context, _x);	\
+	} while (0)
+
 #define cnss_pr_err(_fmt, ...) do {					\
 		printk("%scnss: " _fmt, KERN_ERR, ##__VA_ARGS__);	\
 		cnss_ipc_log_string("%scnss: " _fmt, "", ##__VA_ARGS__);\
@@ -36,6 +42,12 @@ extern void *cnss_ipc_log_context;
 		cnss_ipc_log_string("%scnss: " _fmt, "", ##__VA_ARGS__);\
 	} while (0)
 
+#define cnss_pr_vdbg(_fmt, ...) do {					\
+		printk("%scnss: " _fmt, KERN_DEBUG, ##__VA_ARGS__);	\
+		cnss_ipc_log_long_string("%scnss: " _fmt, "",		\
+					 ##__VA_ARGS__);		\
+	} while (0)
+
 #ifdef CONFIG_CNSS2_DEBUG
 #define CNSS_ASSERT(_condition) do {					\
 		if (!(_condition)) {					\
diff --git a/drivers/net/wireless/cnss2/ip_multimedia_subsystem_private_service_v01.c b/drivers/net/wireless/cnss2/ip_multimedia_subsystem_private_service_v01.c
new file mode 100644
index 0000000..5b958e9
--- /dev/null
+++ b/drivers/net/wireless/cnss2/ip_multimedia_subsystem_private_service_v01.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#include <linux/soc/qcom/qmi.h>
+
+#include "ip_multimedia_subsystem_private_service_v01.h"
+
+struct qmi_elem_info
+ims_private_service_subscribe_for_indications_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+		ims_private_service_subscribe_for_indications_req_msg_v01,
+				mt_invite_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+		ims_private_service_subscribe_for_indications_req_msg_v01,
+				mt_invite),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+		ims_private_service_subscribe_for_indications_req_msg_v01,
+				wfc_call_status_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+		ims_private_service_subscribe_for_indications_req_msg_v01,
+				wfc_call_status),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info
+ims_private_service_subscribe_for_indications_rsp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+		ims_private_service_subscribe_for_indications_rsp_msg_v01,
+				resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ims_private_service_wfc_call_status_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				ims_private_service_wfc_call_status_ind_msg_v01,
+				wfc_call_active),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/net/wireless/cnss2/ip_multimedia_subsystem_private_service_v01.h b/drivers/net/wireless/cnss2/ip_multimedia_subsystem_private_service_v01.h
new file mode 100644
index 0000000..582c4ff
--- /dev/null
+++ b/drivers/net/wireless/cnss2/ip_multimedia_subsystem_private_service_v01.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#ifndef IP_MULTIMEDIA_SUBSYSTEM_PRIVATE_SERVICE_V01_H
+#define IP_MULTIMEDIA_SUBSYSTEM_PRIVATE_SERVICE_V01_H
+
+#define IMSPRIVATE_SERVICE_ID_V01 0x4D
+#define IMSPRIVATE_SERVICE_VERS_V01 0x01
+
+#define IMSPRIVATE_SERVICE_MAX_MSG_LEN 8
+
+#define QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_RSP_V01 0x003E
+#define QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01 0x003E
+#define QMI_IMS_PRIVATE_SERVICE_WFC_CALL_STATUS_IND_V01 0x0040
+
+struct ims_private_service_subscribe_for_indications_req_msg_v01 {
+	u8 mt_invite_valid;
+	u8 mt_invite;
+	u8 wfc_call_status_valid;
+	u8 wfc_call_status;
+};
+
+#define IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_MSG_V01_MAX_MSG_LEN 8
+extern struct
+qmi_elem_info ims_private_service_subscribe_for_indications_req_msg_v01_ei[];
+
+struct ims_private_service_subscribe_for_indications_rsp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_RSP_MSG_V01_MAX_MSG_LEN 7
+extern struct
+qmi_elem_info ims_private_service_subscribe_for_indications_rsp_msg_v01_ei[];
+
+struct ims_private_service_wfc_call_status_ind_msg_v01 {
+	u8 wfc_call_active;
+};
+
+#define IMS_PRIVATE_SERVICE_WFC_CALL_STATUS_IND_MSG_V01_MAX_MSG_LEN 4
+extern struct
+qmi_elem_info ims_private_service_wfc_call_status_ind_msg_v01_ei[];
+
+#endif
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 82729f1..ec46aec 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -38,6 +38,7 @@
 #endif
 #define CNSS_QMI_TIMEOUT_DEFAULT	10000
 #define CNSS_BDF_TYPE_DEFAULT		CNSS_BDF_ELF
+#define CNSS_TIME_SYNC_PERIOD_DEFAULT	900000
 
 static struct cnss_plat_data *plat_env;
 
@@ -168,9 +169,11 @@ int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
 
 	switch (bandwidth) {
 	case CNSS_BUS_WIDTH_NONE:
+	case CNSS_BUS_WIDTH_IDLE:
 	case CNSS_BUS_WIDTH_LOW:
 	case CNSS_BUS_WIDTH_MEDIUM:
 	case CNSS_BUS_WIDTH_HIGH:
+	case CNSS_BUS_WIDTH_VERY_HIGH:
 		ret = msm_bus_scale_client_update_request
 			(bus_bw_info->bus_client, bandwidth);
 		if (!ret)
@@ -441,6 +444,8 @@ static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
 	else if (ret)
 		goto shutdown;
 
+	cnss_vreg_unvote_type(plat_priv, CNSS_VREG_PRIM);
+
 	return 0;
 
 shutdown:
@@ -654,7 +659,7 @@ int cnss_idle_restart(struct device *dev)
 
 	ret = cnss_driver_event_post(plat_priv,
 				     CNSS_DRIVER_EVENT_IDLE_RESTART,
-				     CNSS_EVENT_SYNC, NULL);
+				     CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
 	if (ret)
 		goto out;
 
@@ -708,7 +713,7 @@ int cnss_idle_shutdown(struct device *dev)
 skip_wait:
 	return cnss_driver_event_post(plat_priv,
 				      CNSS_DRIVER_EVENT_IDLE_SHUTDOWN,
-				      CNSS_EVENT_SYNC, NULL);
+				      CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
 }
 EXPORT_SYMBOL(cnss_idle_shutdown);
 
@@ -722,19 +727,32 @@ static int cnss_get_resources(struct cnss_plat_data *plat_priv)
 		goto out;
 	}
 
+	ret = cnss_get_clk(plat_priv);
+	if (ret) {
+		cnss_pr_err("Failed to get clocks, err = %d\n", ret);
+		goto put_vreg;
+	}
+
 	ret = cnss_get_pinctrl(plat_priv);
 	if (ret) {
 		cnss_pr_err("Failed to get pinctrl, err = %d\n", ret);
-		goto out;
+		goto put_clk;
 	}
 
 	return 0;
+
+put_clk:
+	cnss_put_clk(plat_priv);
+put_vreg:
+	cnss_put_vreg_type(plat_priv, CNSS_VREG_PRIM);
 out:
 	return ret;
 }
 
 static void cnss_put_resources(struct cnss_plat_data *plat_priv)
 {
+	cnss_put_clk(plat_priv);
+	cnss_put_vreg_type(plat_priv, CNSS_VREG_PRIM);
 }
 
 static int cnss_modem_notifier_nb(struct notifier_block *nb,
@@ -1206,18 +1224,36 @@ static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
 	return ret;
 }
 
-static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv)
+static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv,
+					void *data)
 {
-	if (!test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state))
-		return 0;
+	struct cnss_cal_info *cal_info = data;
 
-	plat_priv->cal_done = true;
+	if (!test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state))
+		goto out;
+
+	switch (cal_info->cal_status) {
+	case CNSS_CAL_DONE:
+		cnss_pr_dbg("Calibration completed successfully\n");
+		plat_priv->cal_done = true;
+		break;
+	case CNSS_CAL_TIMEOUT:
+		cnss_pr_dbg("Calibration timed out, force shutdown\n");
+		break;
+	default:
+		cnss_pr_err("Unknown calibration status: %u\n",
+			    cal_info->cal_status);
+		break;
+	}
+
 	cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
 	cnss_release_antenna_sharing(plat_priv);
 	cnss_bus_dev_shutdown(plat_priv);
 	complete(&plat_priv->cal_complete);
 	clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
 
+out:
+	kfree(data);
 	return 0;
 }
 
@@ -1393,7 +1429,8 @@ static void cnss_driver_event_work(struct work_struct *work)
 			ret = cnss_cold_boot_cal_start_hdlr(plat_priv);
 			break;
 		case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
-			ret = cnss_cold_boot_cal_done_hdlr(plat_priv);
+			ret = cnss_cold_boot_cal_done_hdlr(plat_priv,
+							   event->data);
 			break;
 		case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
 			ret = cnss_bus_register_driver_hdlr(plat_priv,
@@ -1524,7 +1561,8 @@ static int cnss_init_dump_entry(struct cnss_plat_data *plat_priv)
 	dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
 	dump_entry.addr = virt_to_phys(&ramdump_info->dump_data);
 
-	return msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+	return msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS,
+						&dump_entry);
 }
 
 static int cnss_register_ramdump_v1(struct cnss_plat_data *plat_priv)
@@ -1631,7 +1669,8 @@ static int cnss_register_ramdump_v2(struct cnss_plat_data *plat_priv)
 	dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
 	dump_entry.addr = virt_to_phys(dump_data);
 
-	ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+	ret = msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS,
+						&dump_entry);
 	if (ret) {
 		cnss_pr_err("Failed to setup dump table, err = %d\n", ret);
 		goto free_ramdump;
@@ -1867,6 +1906,7 @@ static void cnss_init_control_params(struct cnss_plat_data *plat_priv)
 	plat_priv->ctrl_params.mhi_timeout = CNSS_MHI_TIMEOUT_DEFAULT;
 	plat_priv->ctrl_params.qmi_timeout = CNSS_QMI_TIMEOUT_DEFAULT;
 	plat_priv->ctrl_params.bdf_type = CNSS_BDF_TYPE_DEFAULT;
+	plat_priv->ctrl_params.time_sync_period = CNSS_TIME_SYNC_PERIOD_DEFAULT;
 }
 
 static const struct platform_device_id cnss_platform_id_table[] = {
@@ -1929,6 +1969,7 @@ static int cnss_probe(struct platform_device *plat_dev)
 	cnss_set_plat_priv(plat_dev, plat_priv);
 	platform_set_drvdata(plat_dev, plat_priv);
 	INIT_LIST_HEAD(&plat_priv->vreg_list);
+	INIT_LIST_HEAD(&plat_priv->clk_list);
 
 	cnss_get_cpr_info(plat_priv);
 	cnss_init_control_params(plat_priv);
@@ -1976,6 +2017,7 @@ static int cnss_probe(struct platform_device *plat_dev)
 		goto destroy_debugfs;
 
 	cnss_register_coex_service(plat_priv);
+	cnss_register_ims_service(plat_priv);
 
 	ret = cnss_genl_init();
 	if (ret < 0)
@@ -2017,6 +2059,7 @@ static int cnss_remove(struct platform_device *plat_dev)
 	struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
 
 	cnss_genl_exit();
+	cnss_unregister_ims_service(plat_priv);
 	cnss_unregister_coex_service(plat_priv);
 	cnss_misc_deinit(plat_priv);
 	cnss_debugfs_destroy(plat_priv);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index 49e5043..b0ed7a5 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -38,6 +38,7 @@ struct cnss_vreg_cfg {
 	u32 max_uv;
 	u32 load_ua;
 	u32 delay_us;
+	u32 need_unvote;
 };
 
 struct cnss_vreg_info {
@@ -51,6 +52,19 @@ enum cnss_vreg_type {
 	CNSS_VREG_PRIM,
 };
 
+struct cnss_clk_cfg {
+	const char *name;
+	u32 freq;
+	u32 required;
+};
+
+struct cnss_clk_info {
+	struct list_head list;
+	struct clk *clk;
+	struct cnss_clk_cfg cfg;
+	u32 enabled;
+};
+
 struct cnss_pinctrl_info {
 	struct pinctrl *pinctrl;
 	struct pinctrl_state *bootstrap_active;
@@ -187,6 +201,7 @@ enum cnss_driver_state {
 	CNSS_DEV_ERR_NOTIFY,
 	CNSS_DRIVER_DEBUG,
 	CNSS_COEX_CONNECTED,
+	CNSS_IMS_CONNECTED,
 };
 
 struct cnss_recovery_data {
@@ -222,6 +237,7 @@ enum cnss_debug_quirks {
 	ENABLE_PCI_LINK_DOWN_PANIC,
 	FBC_BYPASS,
 	ENABLE_DAEMON_SUPPORT,
+	DISABLE_DRV,
 };
 
 enum cnss_bdf_type {
@@ -231,11 +247,21 @@ enum cnss_bdf_type {
 	CNSS_BDF_DUMMY = 255,
 };
 
+enum cnss_cal_status {
+	CNSS_CAL_DONE,
+	CNSS_CAL_TIMEOUT,
+};
+
+struct cnss_cal_info {
+	enum cnss_cal_status cal_status;
+};
+
 struct cnss_control_params {
 	unsigned long quirks;
 	unsigned int mhi_timeout;
 	unsigned int qmi_timeout;
 	unsigned int bdf_type;
+	unsigned int time_sync_period;
 };
 
 struct cnss_cpr_info {
@@ -268,6 +294,7 @@ struct cnss_plat_data {
 	void *bus_priv;
 	enum cnss_dev_bus_type bus_type;
 	struct list_head vreg_list;
+	struct list_head clk_list;
 	struct cnss_pinctrl_info pinctrl_info;
 	struct cnss_subsys_info subsys_info;
 	struct cnss_ramdump_info ramdump_info;
@@ -320,6 +347,9 @@ struct cnss_plat_data {
 	u64 antenna;
 	u64 grant;
 	struct qmi_handle coex_qmi;
+	struct qmi_handle ims_qmi;
+	struct qmi_txn txn;
+	u64 dynamic_feature;
 };
 
 #ifdef CONFIG_ARCH_QCOM
@@ -354,9 +384,14 @@ int cnss_vreg_on_type(struct cnss_plat_data *plat_priv,
 		      enum cnss_vreg_type type);
 int cnss_vreg_off_type(struct cnss_plat_data *plat_priv,
 		       enum cnss_vreg_type type);
+int cnss_get_clk(struct cnss_plat_data *plat_priv);
+void cnss_put_clk(struct cnss_plat_data *plat_priv);
+int cnss_vreg_unvote_type(struct cnss_plat_data *plat_priv,
+			  enum cnss_vreg_type type);
 int cnss_get_pinctrl(struct cnss_plat_data *plat_priv);
 int cnss_power_on_device(struct cnss_plat_data *plat_priv);
 void cnss_power_off_device(struct cnss_plat_data *plat_priv);
+bool cnss_is_device_powered_on(struct cnss_plat_data *plat_priv);
 int cnss_register_subsys(struct cnss_plat_data *plat_priv);
 void cnss_unregister_subsys(struct cnss_plat_data *plat_priv);
 int cnss_register_ramdump(struct cnss_plat_data *plat_priv);
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index bedb72e..98fb025 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -52,8 +52,6 @@
 #define EMULATION_HW			0
 #endif
 
-#define TIME_SYNC_PERIOD_JF		msecs_to_jiffies(900000)
-
 static DEFINE_SPINLOCK(pci_link_down_lock);
 static DEFINE_SPINLOCK(pci_reg_window_lock);
 
@@ -100,9 +98,9 @@ static DEFINE_SPINLOCK(pci_reg_window_lock);
 #define QCA6390_CE_REG_INTERVAL			0x2000
 
 #define SHADOW_REG_COUNT			36
-#define QCA6390_PCIE_SHADOW_REG_VALUE_0		0x1E03024
-#define QCA6390_PCIE_SHADOW_REG_VALUE_34	0x1E030AC
-#define QCA6390_PCIE_SHADOW_REG_VALUE_35	0x1E030B0
+#define QCA6390_PCIE_SHADOW_REG_VALUE_0		0x8FC
+#define QCA6390_PCIE_SHADOW_REG_VALUE_34	0x984
+#define QCA6390_PCIE_SHADOW_REG_VALUE_35	0x988
 #define QCA6390_WLAON_GLOBAL_COUNTER_CTRL3	0x1F80118
 #define QCA6390_WLAON_GLOBAL_COUNTER_CTRL4	0x1F8011C
 #define QCA6390_WLAON_GLOBAL_COUNTER_CTRL5	0x1F80120
@@ -208,10 +206,11 @@ static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset)
 {
 	u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
 
+	writel_relaxed(WINDOW_ENABLE_BIT | window,
+		       QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET +
+		       pci_priv->bar);
+
 	if (window != pci_priv->remap_window) {
-		writel_relaxed(WINDOW_ENABLE_BIT | window,
-			       QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET +
-			       pci_priv->bar);
 		pci_priv->remap_window = window;
 		cnss_pr_dbg("Config PCIe remap window register to 0x%x\n",
 			    WINDOW_ENABLE_BIT | window);
@@ -341,25 +340,93 @@ static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
 	return 0;
 }
 
+static int cnss_pci_get_link_status(struct cnss_pci_data *pci_priv)
+{
+	u16 link_status;
+	int ret;
+
+	ret = pcie_capability_read_word(pci_priv->pci_dev, PCI_EXP_LNKSTA,
+					&link_status);
+	if (ret)
+		return ret;
+
+	cnss_pr_dbg("Get PCI link status register: %u\n", link_status);
+
+	pci_priv->def_link_speed = link_status & PCI_EXP_LNKSTA_CLS;
+	pci_priv->def_link_width =
+		(link_status & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
+
+	cnss_pr_dbg("Default PCI link speed is 0x%x, link width is 0x%x\n",
+		    pci_priv->def_link_speed, pci_priv->def_link_width);
+
+	return 0;
+}
+
+static int cnss_set_pci_link_status(struct cnss_pci_data *pci_priv,
+				    enum pci_link_status status)
+{
+	u16 link_speed, link_width;
+
+	cnss_pr_vdbg("Set PCI link status to: %u\n", status);
+
+	switch (status) {
+	case PCI_GEN1:
+		link_speed = PCI_EXP_LNKSTA_CLS_2_5GB;
+		link_width = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
+		break;
+	case PCI_GEN2:
+		link_speed = PCI_EXP_LNKSTA_CLS_5_0GB;
+		link_width = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
+		break;
+	case PCI_DEF:
+		link_speed = pci_priv->def_link_speed;
+		link_width = pci_priv->def_link_width;
+		if (!link_speed && !link_width) {
+			cnss_pr_err("PCI link speed or width is not valid\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		cnss_pr_err("Unknown PCI link status config: %u\n", status);
+		return -EINVAL;
+	}
+
+	return msm_pcie_set_link_bandwidth(pci_priv->pci_dev,
+					   link_speed, link_width);
+}
+
 static int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
 {
 	int ret = 0;
 	struct pci_dev *pci_dev = pci_priv->pci_dev;
+	enum msm_pcie_pm_opt pm_ops;
 
-	cnss_pr_dbg("%s PCI link\n", link_up ? "Resuming" : "Suspending");
+	cnss_pr_vdbg("%s PCI link\n", link_up ? "Resuming" : "Suspending");
 
-	ret = msm_pcie_pm_control(link_up ? MSM_PCIE_RESUME :
-				  MSM_PCIE_SUSPEND,
-				  pci_dev->bus->number,
-				  pci_dev, NULL,
-				  PM_OPTIONS_DEFAULT);
-	if (ret) {
-		cnss_pr_err("Failed to %s PCI link with default option, err = %d\n",
-			    link_up ? "resume" : "suspend", ret);
-		return ret;
+	if (link_up) {
+		pm_ops = MSM_PCIE_RESUME;
+	} else {
+		if (pci_priv->drv_connected_last) {
+			cnss_pr_vdbg("Use PCIe DRV suspend\n");
+			pm_ops = MSM_PCIE_DRV_SUSPEND;
+			cnss_set_pci_link_status(pci_priv, PCI_GEN1);
+		} else {
+			pm_ops = MSM_PCIE_SUSPEND;
+		}
 	}
 
-	return 0;
+	ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
+				  NULL, PM_OPTIONS_DEFAULT);
+	if (ret)
+		cnss_pr_err("Failed to %s PCI link with default option, err = %d\n",
+			    link_up ? "resume" : "suspend", ret);
+
+	if (pci_priv->drv_connected_last) {
+		if ((link_up && !ret) || (!link_up && ret))
+			cnss_set_pci_link_status(pci_priv, PCI_DEF);
+	}
+
+	return ret;
 }
 
 int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
@@ -499,6 +566,245 @@ int cnss_pci_is_device_down(struct device *dev)
 }
 EXPORT_SYMBOL(cnss_pci_is_device_down);
 
+static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
+{
+	switch (mhi_state) {
+	case CNSS_MHI_INIT:
+		return "INIT";
+	case CNSS_MHI_DEINIT:
+		return "DEINIT";
+	case CNSS_MHI_POWER_ON:
+		return "POWER_ON";
+	case CNSS_MHI_POWER_OFF:
+		return "POWER_OFF";
+	case CNSS_MHI_FORCE_POWER_OFF:
+		return "FORCE_POWER_OFF";
+	case CNSS_MHI_SUSPEND:
+		return "SUSPEND";
+	case CNSS_MHI_RESUME:
+		return "RESUME";
+	case CNSS_MHI_TRIGGER_RDDM:
+		return "TRIGGER_RDDM";
+	case CNSS_MHI_RDDM_DONE:
+		return "RDDM_DONE";
+	default:
+		return "UNKNOWN";
+	}
+};
+
+static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
+					enum cnss_mhi_state mhi_state)
+{
+	switch (mhi_state) {
+	case CNSS_MHI_INIT:
+		if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_DEINIT:
+	case CNSS_MHI_POWER_ON:
+		if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
+		    !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_FORCE_POWER_OFF:
+		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_POWER_OFF:
+	case CNSS_MHI_SUSPEND:
+		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
+		    !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_RESUME:
+		if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_TRIGGER_RDDM:
+		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
+		    !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_RDDM_DONE:
+		return 0;
+	default:
+		cnss_pr_err("Unhandled MHI state: %s(%d)\n",
+			    cnss_mhi_state_to_str(mhi_state), mhi_state);
+	}
+
+	cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
+		    cnss_mhi_state_to_str(mhi_state), mhi_state,
+		    pci_priv->mhi_state);
+
+	return -EINVAL;
+}
+
+static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
+				       enum cnss_mhi_state mhi_state)
+{
+	switch (mhi_state) {
+	case CNSS_MHI_INIT:
+		set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_DEINIT:
+		clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_POWER_ON:
+		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_POWER_OFF:
+	case CNSS_MHI_FORCE_POWER_OFF:
+		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+		clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
+		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_SUSPEND:
+		set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_RESUME:
+		clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_TRIGGER_RDDM:
+		set_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_RDDM_DONE:
+		set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
+		break;
+	default:
+		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
+	}
+}
+
+static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
+				  enum cnss_mhi_state mhi_state)
+{
+	int ret = 0;
+
+	if (pci_priv->device_id == QCA6174_DEVICE_ID)
+		return 0;
+
+	if (mhi_state < 0) {
+		cnss_pr_err("Invalid MHI state (%d)\n", mhi_state);
+		return -EINVAL;
+	}
+
+	ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
+	if (ret)
+		goto out;
+
+	cnss_pr_vdbg("Setting MHI state: %s(%d)\n",
+		     cnss_mhi_state_to_str(mhi_state), mhi_state);
+
+	switch (mhi_state) {
+	case CNSS_MHI_INIT:
+		ret = mhi_prepare_for_power_up(pci_priv->mhi_ctrl);
+		break;
+	case CNSS_MHI_DEINIT:
+		mhi_unprepare_after_power_down(pci_priv->mhi_ctrl);
+		ret = 0;
+		break;
+	case CNSS_MHI_POWER_ON:
+		ret = mhi_sync_power_up(pci_priv->mhi_ctrl);
+		break;
+	case CNSS_MHI_POWER_OFF:
+		mhi_power_down(pci_priv->mhi_ctrl, true);
+		ret = 0;
+		break;
+	case CNSS_MHI_FORCE_POWER_OFF:
+		mhi_power_down(pci_priv->mhi_ctrl, false);
+		ret = 0;
+		break;
+	case CNSS_MHI_SUSPEND:
+		if (pci_priv->drv_connected_last)
+			ret = mhi_pm_fast_suspend(pci_priv->mhi_ctrl, true);
+		else
+			ret = mhi_pm_suspend(pci_priv->mhi_ctrl);
+		break;
+	case CNSS_MHI_RESUME:
+		if (pci_priv->drv_connected_last)
+			ret = mhi_pm_fast_resume(pci_priv->mhi_ctrl, true);
+		else
+			ret = mhi_pm_resume(pci_priv->mhi_ctrl);
+		break;
+	case CNSS_MHI_TRIGGER_RDDM:
+		ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
+		break;
+	case CNSS_MHI_RDDM_DONE:
+		break;
+	default:
+		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
+		ret = -EINVAL;
+	}
+
+	if (ret)
+		goto out;
+
+	cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
+
+	return 0;
+
+out:
+	cnss_pr_err("Failed to set MHI state: %s(%d)\n",
+		    cnss_mhi_state_to_str(mhi_state), mhi_state);
+	return ret;
+}
+
+int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	plat_priv = pci_priv->plat_priv;
+	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
+		return 0;
+
+	if (MHI_TIMEOUT_OVERWRITE_MS)
+		pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS;
+
+	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
+	if (ret)
+		goto out;
+
+	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
+	if (ret)
+		goto out;
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static void cnss_pci_power_off_mhi(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
+		return;
+
+	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
+
+	if (!pci_priv->pci_link_down_ind)
+		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
+	else
+		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_FORCE_POWER_OFF);
+}
+
+static void cnss_pci_deinit_mhi(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
+		return;
+
+	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
+}
+
 static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv,
 					 u64 *time_us)
 {
@@ -579,16 +885,21 @@ static void cnss_pci_time_sync_work_hdlr(struct work_struct *work)
 {
 	struct cnss_pci_data *pci_priv =
 		container_of(work, struct cnss_pci_data, time_sync_work.work);
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	unsigned int time_sync_period_ms =
+		plat_priv->ctrl_params.time_sync_period;
 
 	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
 		return;
 
 	if (cnss_pci_pm_runtime_get_sync(pci_priv) < 0)
-		return;
+		goto runtime_pm_put;
 
 	cnss_pci_update_timestamp(pci_priv);
-	schedule_delayed_work(&pci_priv->time_sync_work, TIME_SYNC_PERIOD_JF);
+	schedule_delayed_work(&pci_priv->time_sync_work,
+			      msecs_to_jiffies(time_sync_period_ms));
 
+runtime_pm_put:
 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
 	cnss_pci_pm_runtime_put_autosuspend(pci_priv);
 }
@@ -772,23 +1083,25 @@ int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
 static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
 {
 	int i, j = 0, array_size = SHADOW_REG_COUNT + SHADOW_REG_INTER_COUNT;
-	gfp_t gfp = GFP_KERNEL;
 	u32 reg_offset;
 
+	if (in_interrupt() || irqs_disabled())
+		return;
+
 	if (cnss_pci_check_link_status(pci_priv))
 		return;
 
-	if (in_interrupt() || irqs_disabled())
-		gfp = GFP_ATOMIC;
-
 	if (!pci_priv->debug_reg) {
 		pci_priv->debug_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
 						   sizeof(*pci_priv->debug_reg)
-						   * array_size, gfp);
+						   * array_size, GFP_KERNEL);
 		if (!pci_priv->debug_reg)
 			return;
 	}
 
+	if (cnss_pci_force_wake_get(pci_priv))
+		return;
+
 	cnss_pr_dbg("Start to dump shadow registers\n");
 
 	for (i = 0; i < SHADOW_REG_COUNT; i++, j++) {
@@ -796,7 +1109,7 @@ static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
 		pci_priv->debug_reg[j].offset = reg_offset;
 		if (cnss_pci_reg_read(pci_priv, reg_offset,
 				      &pci_priv->debug_reg[j].val))
-			return;
+			goto force_wake_put;
 	}
 
 	for (i = 0; i < SHADOW_REG_INTER_COUNT; i++, j++) {
@@ -804,8 +1117,11 @@ static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
 		pci_priv->debug_reg[j].offset = reg_offset;
 		if (cnss_pci_reg_read(pci_priv, reg_offset,
 				      &pci_priv->debug_reg[j].val))
-			return;
+			goto force_wake_put;
 	}
+
+force_wake_put:
+	cnss_pci_force_wake_put(pci_priv);
 }
 
 #ifdef CONFIG_CNSS2_DEBUG
@@ -909,8 +1225,8 @@ static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
 
 	if (plat_priv->ramdump_info_v2.dump_data_valid ||
 	    test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
-		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
 		cnss_pci_clear_dump_info(pci_priv);
+		cnss_pci_deinit_mhi(pci_priv);
 	}
 
 	ret = cnss_power_on_device(plat_priv);
@@ -957,8 +1273,9 @@ static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
 	return 0;
 
 stop_mhi:
-	cnss_pci_stop_mhi(pci_priv);
+	cnss_pci_power_off_mhi(pci_priv);
 	cnss_suspend_pci_link(pci_priv);
+	cnss_pci_deinit_mhi(pci_priv);
 power_off:
 	cnss_power_off_device(plat_priv);
 out:
@@ -988,11 +1305,13 @@ static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
 		cnss_pci_collect_dump(pci_priv);
 	}
 
-	cnss_pci_stop_mhi(pci_priv);
-
+	cnss_pci_power_off_mhi(pci_priv);
 	ret = cnss_suspend_pci_link(pci_priv);
 	if (ret)
 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+	if (!plat_priv->ramdump_info_v2.dump_data_valid &&
+	    !test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state))
+		cnss_pci_deinit_mhi(pci_priv);
 
 	cnss_power_off_device(plat_priv);
 
@@ -1048,8 +1367,8 @@ static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
 			     dump_data->nentries);
 	kfree(ramdump_segs);
 
-	cnss_pci_set_mhi_state(plat_priv->bus_priv, CNSS_MHI_DEINIT);
-	cnss_pci_clear_dump_info(plat_priv->bus_priv);
+	cnss_pci_clear_dump_info(pci_priv);
+	cnss_pci_deinit_mhi(pci_priv);
 
 	return ret;
 }
@@ -1179,6 +1498,7 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
 	struct cnss_pci_data *pci_priv;
 	unsigned int timeout;
+	struct cnss_cal_info *cal_info;
 
 	if (!plat_priv) {
 		cnss_pr_err("plat_priv is NULL\n");
@@ -1199,15 +1519,21 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
 	if (!test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state))
 		goto register_driver;
 
+	cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
+	if (!cal_info)
+		return -ENOMEM;
+
 	cnss_pr_dbg("Start to wait for calibration to complete\n");
 
 	timeout = cnss_get_boot_timeout(&pci_priv->pci_dev->dev);
 	ret = wait_for_completion_timeout(&plat_priv->cal_complete,
-					  msecs_to_jiffies(timeout) << 2);
+					  msecs_to_jiffies(timeout));
 	if (!ret) {
 		cnss_pr_err("Timeout waiting for calibration to complete\n");
-		ret = -EAGAIN;
-		goto out;
+		cal_info->cal_status = CNSS_CAL_TIMEOUT;
+		cnss_driver_event_post(plat_priv,
+				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
+				       0, cal_info);
 	}
 
 register_driver:
@@ -1216,7 +1542,6 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
 				     CNSS_EVENT_SYNC_UNINTERRUPTIBLE,
 				     driver_ops);
 
-out:
 	return ret;
 }
 EXPORT_SYMBOL(cnss_wlan_register_driver);
@@ -1279,6 +1604,22 @@ int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
 	return 0;
 }
 
+static bool cnss_pci_is_drv_supported(struct cnss_pci_data *pci_priv)
+{
+	struct pci_dev *root_port = pci_find_pcie_root_port(pci_priv->pci_dev);
+	struct device_node *root_of_node = root_port->dev.of_node;
+	bool drv_supported = false;
+
+	if (root_of_node->parent)
+		drv_supported = of_property_read_bool(root_of_node->parent,
+						      "qcom,drv-supported");
+
+	cnss_pr_dbg("PCIe DRV is %s\n",
+		    drv_supported ? "supported" : "not supported");
+
+	return drv_supported;
+}
+
 static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
 {
 	unsigned long flags;
@@ -1325,6 +1666,14 @@ static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
 			cnss_pci_pm_request_resume(pci_priv);
 		}
 		break;
+	case MSM_PCIE_EVENT_DRV_CONNECT:
+		cnss_pr_dbg("DRV subsystem is connected\n");
+		cnss_pci_set_drv_connected(pci_priv, 1);
+		break;
+	case MSM_PCIE_EVENT_DRV_DISCONNECT:
+		cnss_pr_dbg("DRV subsystem is disconnected\n");
+		cnss_pci_set_drv_connected(pci_priv, 0);
+		break;
 	default:
 		cnss_pr_err("Received invalid PCI event: %d\n", notify->event);
 	}
@@ -1338,6 +1687,12 @@ static int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
 	pci_event = &pci_priv->msm_pci_event;
 	pci_event->events = MSM_PCIE_EVENT_LINKDOWN |
 		MSM_PCIE_EVENT_WAKEUP;
+
+	if (cnss_pci_is_drv_supported(pci_priv))
+		pci_event->events = pci_event->events |
+			MSM_PCIE_EVENT_DRV_CONNECT |
+			MSM_PCIE_EVENT_DRV_DISCONNECT;
+
 	pci_event->user = pci_priv->pci_dev;
 	pci_event->mode = MSM_PCIE_TRIGGER_CALLBACK;
 	pci_event->callback = cnss_pci_event_cb;
@@ -1361,6 +1716,7 @@ static int cnss_pci_suspend(struct device *dev)
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	pm_message_t state = { .event = PM_EVENT_SUSPEND };
@@ -1368,6 +1724,17 @@ static int cnss_pci_suspend(struct device *dev)
 	if (!pci_priv)
 		goto out;
 
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		goto out;
+
+	if (!cnss_is_device_powered_on(plat_priv))
+		goto out;
+
+	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks))
+		pci_priv->drv_connected_last =
+			cnss_pci_get_drv_connected(pci_priv);
+
 	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->suspend) {
 		ret = driver_ops->suspend(pci_dev, state);
@@ -1385,16 +1752,18 @@ static int cnss_pci_suspend(struct device *dev)
 			goto resume_driver;
 		}
 
+		if (pci_priv->drv_connected_last)
+			goto skip_disable_pci;
+
 		pci_clear_master(pci_dev);
-		cnss_set_pci_config_space(pci_priv,
-					  SAVE_PCI_CONFIG_SPACE);
+		cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
 		pci_disable_device(pci_dev);
 
 		ret = pci_set_power_state(pci_dev, PCI_D3hot);
 		if (ret)
-			cnss_pr_err("Failed to set D3Hot, err = %d\n",
-				    ret);
+			cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
 
+skip_disable_pci:
 		if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
 			ret = -EAGAIN;
 			goto resume_mhi;
@@ -1416,6 +1785,7 @@ static int cnss_pci_suspend(struct device *dev)
 resume_driver:
 	if (driver_ops && driver_ops->resume)
 		driver_ops->resume(pci_dev);
+	pci_priv->drv_connected_last = 0;
 out:
 	return ret;
 }
@@ -1433,6 +1803,9 @@ static int cnss_pci_resume(struct device *dev)
 	if (pci_priv->pci_link_down_ind)
 		goto out;
 
+	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
+		goto out;
+
 	if (pci_priv->pci_link_state == PCI_LINK_DOWN &&
 	    !pci_priv->disable_pc) {
 		if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
@@ -1443,6 +1816,9 @@ static int cnss_pci_resume(struct device *dev)
 		}
 		pci_priv->pci_link_state = PCI_LINK_UP;
 
+		if (pci_priv->drv_connected_last)
+			goto skip_enable_pci;
+
 		ret = pci_enable_device(pci_dev);
 		if (ret)
 			cnss_pr_err("Failed to enable PCI device, err = %d\n",
@@ -1451,8 +1827,9 @@ static int cnss_pci_resume(struct device *dev)
 		if (pci_priv->saved_state)
 			cnss_set_pci_config_space(pci_priv,
 						  RESTORE_PCI_CONFIG_SPACE);
-
 		pci_set_master(pci_dev);
+
+skip_enable_pci:
 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
 	}
 
@@ -1464,6 +1841,8 @@ static int cnss_pci_resume(struct device *dev)
 				    ret);
 	}
 
+	pci_priv->drv_connected_last = 0;
+
 	return 0;
 
 out:
@@ -1512,17 +1891,29 @@ static int cnss_pci_runtime_suspend(struct device *dev)
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	if (!pci_priv)
 		return -EAGAIN;
 
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		return -EAGAIN;
+
+	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
+		return -EAGAIN;
+
 	if (pci_priv->pci_link_down_ind) {
 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
 		return -EAGAIN;
 	}
 
-	cnss_pr_dbg("Runtime suspend start\n");
+	cnss_pr_vdbg("Runtime suspend start\n");
+
+	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks))
+		pci_priv->drv_connected_last =
+			cnss_pci_get_drv_connected(pci_priv);
 
 	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->runtime_ops &&
@@ -1531,7 +1922,10 @@ static int cnss_pci_runtime_suspend(struct device *dev)
 	else
 		ret = cnss_auto_suspend(dev);
 
-	cnss_pr_info("Runtime suspend status: %d\n", ret);
+	if (ret)
+		pci_priv->drv_connected_last = 0;
+
+	cnss_pr_vdbg("Runtime suspend status: %d\n", ret);
 
 	return ret;
 }
@@ -1546,12 +1940,15 @@ static int cnss_pci_runtime_resume(struct device *dev)
 	if (!pci_priv)
 		return -EAGAIN;
 
+	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
+		return -EAGAIN;
+
 	if (pci_priv->pci_link_down_ind) {
 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
 		return -EAGAIN;
 	}
 
-	cnss_pr_dbg("Runtime resume start\n");
+	cnss_pr_vdbg("Runtime resume start\n");
 
 	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->runtime_ops &&
@@ -1560,14 +1957,17 @@ static int cnss_pci_runtime_resume(struct device *dev)
 	else
 		ret = cnss_auto_resume(dev);
 
-	cnss_pr_info("Runtime resume status: %d\n", ret);
+	if (!ret)
+		pci_priv->drv_connected_last = 0;
+
+	cnss_pr_vdbg("Runtime resume status: %d\n", ret);
 
 	return ret;
 }
 
 static int cnss_pci_runtime_idle(struct device *dev)
 {
-	cnss_pr_dbg("Runtime idle\n");
+	cnss_pr_vdbg("Runtime idle\n");
 
 	pm_request_autosuspend(dev);
 
@@ -1707,6 +2107,9 @@ int cnss_auto_suspend(struct device *dev)
 			goto out;
 		}
 
+		if (pci_priv->drv_connected_last)
+			goto skip_disable_pci;
+
 		pci_clear_master(pci_dev);
 		cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
 		pci_disable_device(pci_dev);
@@ -1715,6 +2118,7 @@ int cnss_auto_suspend(struct device *dev)
 		if (ret)
 			cnss_pr_err("Failed to set D3Hot, err =  %d\n", ret);
 
+skip_disable_pci:
 		if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
 			ret = -EAGAIN;
 			goto resume_mhi;
@@ -1765,6 +2169,9 @@ int cnss_auto_resume(struct device *dev)
 		}
 		pci_priv->pci_link_state = PCI_LINK_UP;
 
+		if (pci_priv->drv_connected_last)
+			goto skip_enable_pci;
+
 		ret = pci_enable_device(pci_dev);
 		if (ret)
 			cnss_pr_err("Failed to enable PCI device, err = %d\n",
@@ -1772,6 +2179,8 @@ int cnss_auto_resume(struct device *dev)
 
 		cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
 		pci_set_master(pci_dev);
+
+skip_enable_pci:
 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
 	}
 
@@ -2038,6 +2447,7 @@ int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
 	if (!plat_priv)
 		return -ENODEV;
 
+	cnss_pci_pm_runtime_resume(pci_priv);
 	cnss_pci_dump_shadow_reg(pci_priv);
 
 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
@@ -2103,7 +2513,6 @@ static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
 	cnss_pr_dbg("Initializing SMMU\n");
 
 	pci_priv->iommu_domain = iommu_get_domain_for_dev(&pci_dev->dev);
-	pci_priv->smmu_mapping.domain = pci_priv->iommu_domain;
 	ret = of_property_read_string(of_node, "qcom,iommu-dma",
 				      &iommu_dma_type);
 	if (!ret && !strcmp("fastmap", iommu_dma_type)) {
@@ -2145,20 +2554,8 @@ static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
 static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
 {
 	pci_priv->iommu_domain = NULL;
-	pci_priv->smmu_mapping.domain = NULL;
 }
 
-struct dma_iommu_mapping *cnss_smmu_get_mapping(struct device *dev)
-{
-	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
-
-	if (!pci_priv)
-		return NULL;
-
-	return &pci_priv->smmu_mapping;
-}
-EXPORT_SYMBOL(cnss_smmu_get_mapping);
-
 struct iommu_domain *cnss_smmu_get_domain(struct device *dev)
 {
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
@@ -2484,47 +2881,6 @@ static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv)
 		pci_disable_device(pci_dev);
 }
 
-static int cnss_mhi_pm_runtime_get(struct mhi_controller *mhi_ctrl, void *priv)
-{
-	struct cnss_pci_data *pci_priv = priv;
-
-	return cnss_pci_pm_runtime_get(pci_priv);
-}
-
-static void cnss_mhi_pm_runtime_put_noidle(struct mhi_controller *mhi_ctrl,
-					   void *priv)
-{
-	struct cnss_pci_data *pci_priv = priv;
-
-	cnss_pci_pm_runtime_put_noidle(pci_priv);
-}
-
-static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
-{
-	switch (mhi_state) {
-	case CNSS_MHI_INIT:
-		return "INIT";
-	case CNSS_MHI_DEINIT:
-		return "DEINIT";
-	case CNSS_MHI_POWER_ON:
-		return "POWER_ON";
-	case CNSS_MHI_POWER_OFF:
-		return "POWER_OFF";
-	case CNSS_MHI_FORCE_POWER_OFF:
-		return "FORCE_POWER_OFF";
-	case CNSS_MHI_SUSPEND:
-		return "SUSPEND";
-	case CNSS_MHI_RESUME:
-		return "RESUME";
-	case CNSS_MHI_TRIGGER_RDDM:
-		return "TRIGGER_RDDM";
-	case CNSS_MHI_RDDM_DONE:
-		return "RDDM_DONE";
-	default:
-		return "UNKNOWN";
-	}
-};
-
 static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv)
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
@@ -2705,6 +3061,21 @@ void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
 	plat_priv->ramdump_info_v2.dump_data_valid = false;
 }
 
+static int cnss_mhi_pm_runtime_get(struct mhi_controller *mhi_ctrl, void *priv)
+{
+	struct cnss_pci_data *pci_priv = priv;
+
+	return cnss_pci_pm_runtime_get(pci_priv);
+}
+
+static void cnss_mhi_pm_runtime_put_noidle(struct mhi_controller *mhi_ctrl,
+					   void *priv)
+{
+	struct cnss_pci_data *pci_priv = priv;
+
+	cnss_pci_pm_runtime_put_noidle(pci_priv);
+}
+
 static char *cnss_mhi_notify_status_to_str(enum MHI_CB status)
 {
 	switch (status) {
@@ -2781,7 +3152,10 @@ static void cnss_mhi_notify_status(struct mhi_controller *mhi_ctrl, void *priv,
 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
 		return;
 	case MHI_CB_EE_RDDM:
+		set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
+		del_timer(&plat_priv->fw_boot_timer);
 		del_timer(&pci_priv->dev_rddm_timer);
+		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
 		cnss_reason = CNSS_REASON_RDDM;
 		break;
 	default:
@@ -2926,219 +3300,6 @@ static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
 	kfree(mhi_ctrl->irq);
 }
 
-static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
-					enum cnss_mhi_state mhi_state)
-{
-	switch (mhi_state) {
-	case CNSS_MHI_INIT:
-		if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
-			return 0;
-		break;
-	case CNSS_MHI_DEINIT:
-	case CNSS_MHI_POWER_ON:
-		if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
-		    !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
-			return 0;
-		break;
-	case CNSS_MHI_FORCE_POWER_OFF:
-		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
-			return 0;
-		break;
-	case CNSS_MHI_POWER_OFF:
-	case CNSS_MHI_SUSPEND:
-		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
-		    !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
-			return 0;
-		break;
-	case CNSS_MHI_RESUME:
-		if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
-			return 0;
-		break;
-	case CNSS_MHI_TRIGGER_RDDM:
-		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
-		    !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
-			return 0;
-		break;
-	case CNSS_MHI_RDDM_DONE:
-		return 0;
-	default:
-		cnss_pr_err("Unhandled MHI state: %s(%d)\n",
-			    cnss_mhi_state_to_str(mhi_state), mhi_state);
-	}
-
-	cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
-		    cnss_mhi_state_to_str(mhi_state), mhi_state,
-		    pci_priv->mhi_state);
-
-	return -EINVAL;
-}
-
-static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
-				       enum cnss_mhi_state mhi_state)
-{
-	switch (mhi_state) {
-	case CNSS_MHI_INIT:
-		set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
-		break;
-	case CNSS_MHI_DEINIT:
-		clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
-		break;
-	case CNSS_MHI_POWER_ON:
-		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
-		break;
-	case CNSS_MHI_POWER_OFF:
-	case CNSS_MHI_FORCE_POWER_OFF:
-		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
-		clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
-		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
-		break;
-	case CNSS_MHI_SUSPEND:
-		set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
-		break;
-	case CNSS_MHI_RESUME:
-		clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
-		break;
-	case CNSS_MHI_TRIGGER_RDDM:
-		set_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
-		break;
-	case CNSS_MHI_RDDM_DONE:
-		set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
-		break;
-	default:
-		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
-	}
-}
-
-int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
-			   enum cnss_mhi_state mhi_state)
-{
-	int ret = 0;
-
-	if (!pci_priv) {
-		cnss_pr_err("pci_priv is NULL!\n");
-		return -ENODEV;
-	}
-
-	if (pci_priv->device_id == QCA6174_DEVICE_ID)
-		return 0;
-
-	if (mhi_state < 0) {
-		cnss_pr_err("Invalid MHI state (%d)\n", mhi_state);
-		return -EINVAL;
-	}
-
-	ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
-	if (ret)
-		goto out;
-
-	cnss_pr_dbg("Setting MHI state: %s(%d)\n",
-		    cnss_mhi_state_to_str(mhi_state), mhi_state);
-
-	switch (mhi_state) {
-	case CNSS_MHI_INIT:
-		ret = mhi_prepare_for_power_up(pci_priv->mhi_ctrl);
-		break;
-	case CNSS_MHI_DEINIT:
-		mhi_unprepare_after_power_down(pci_priv->mhi_ctrl);
-		ret = 0;
-		break;
-	case CNSS_MHI_POWER_ON:
-		ret = mhi_sync_power_up(pci_priv->mhi_ctrl);
-		break;
-	case CNSS_MHI_POWER_OFF:
-		mhi_power_down(pci_priv->mhi_ctrl, true);
-		ret = 0;
-		break;
-	case CNSS_MHI_FORCE_POWER_OFF:
-		mhi_power_down(pci_priv->mhi_ctrl, false);
-		ret = 0;
-		break;
-	case CNSS_MHI_SUSPEND:
-		ret = mhi_pm_suspend(pci_priv->mhi_ctrl);
-		break;
-	case CNSS_MHI_RESUME:
-		ret = mhi_pm_resume(pci_priv->mhi_ctrl);
-		break;
-	case CNSS_MHI_TRIGGER_RDDM:
-		ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
-		break;
-	case CNSS_MHI_RDDM_DONE:
-		break;
-	default:
-		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
-		ret = -EINVAL;
-	}
-
-	if (ret)
-		goto out;
-
-	cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
-
-	return 0;
-
-out:
-	cnss_pr_err("Failed to set MHI state: %s(%d)\n",
-		    cnss_mhi_state_to_str(mhi_state), mhi_state);
-	return ret;
-}
-
-int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
-{
-	int ret = 0;
-	struct cnss_plat_data *plat_priv;
-
-	if (!pci_priv) {
-		cnss_pr_err("pci_priv is NULL!\n");
-		return -ENODEV;
-	}
-
-	plat_priv = pci_priv->plat_priv;
-	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
-		return 0;
-
-	if (MHI_TIMEOUT_OVERWRITE_MS)
-		pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS;
-
-	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
-	if (ret)
-		goto out;
-
-	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
-	if (ret)
-		goto out;
-
-	return 0;
-
-out:
-	return ret;
-}
-
-void cnss_pci_stop_mhi(struct cnss_pci_data *pci_priv)
-{
-	struct cnss_plat_data *plat_priv;
-
-	if (!pci_priv) {
-		cnss_pr_err("pci_priv is NULL!\n");
-		return;
-	}
-
-	plat_priv = pci_priv->plat_priv;
-	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
-		return;
-
-	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
-	if (!pci_priv->pci_link_down_ind)
-		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
-	else
-		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_FORCE_POWER_OFF);
-
-	if (plat_priv->ramdump_info_v2.dump_data_valid ||
-	    test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state))
-		return;
-
-	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
-}
-
 static int cnss_pci_probe(struct pci_dev *pci_dev,
 			  const struct pci_device_id *id)
 {
@@ -3219,6 +3380,7 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
 			cnss_pci_disable_msi(pci_priv);
 			goto disable_bus;
 		}
+		cnss_pci_get_link_status(pci_priv);
 		if (EMULATION_HW)
 			break;
 		ret = cnss_suspend_pci_link(pci_priv);
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index c824b0b..769ba66 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -25,6 +25,12 @@ enum cnss_mhi_state {
 	CNSS_MHI_RDDM_DONE,
 };
 
+enum pci_link_status {
+	PCI_GEN1,
+	PCI_GEN2,
+	PCI_DEF,
+};
+
 struct cnss_msi_user {
 	char *name;
 	int num_vectors;
@@ -60,9 +66,11 @@ struct cnss_pci_data {
 	struct pci_saved_state *default_state;
 	struct msm_pcie_register_event msm_pci_event;
 	atomic_t auto_suspended;
+	atomic_t drv_connected;
 	u8 drv_connected_last;
+	u16 def_link_speed;
+	u16 def_link_width;
 	u8 monitor_wake_intr;
-	struct dma_iommu_mapping smmu_mapping;
 	struct iommu_domain *iommu_domain;
 	u8 smmu_s1_enable;
 	dma_addr_t smmu_iova_start;
@@ -126,6 +134,20 @@ static inline int cnss_pci_get_auto_suspended(void *bus_priv)
 	return atomic_read(&pci_priv->auto_suspended);
 }
 
+static inline void cnss_pci_set_drv_connected(void *bus_priv, int val)
+{
+	struct cnss_pci_data *pci_priv = bus_priv;
+
+	atomic_set(&pci_priv->drv_connected, val);
+}
+
+static inline int cnss_pci_get_drv_connected(void *bus_priv)
+{
+	struct cnss_pci_data *pci_priv = bus_priv;
+
+	return atomic_read(&pci_priv->drv_connected);
+}
+
 int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv);
 int cnss_resume_pci_link(struct cnss_pci_data *pci_priv);
 int cnss_pci_init(struct cnss_plat_data *plat_priv);
@@ -134,10 +156,7 @@ int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv);
 int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv);
 void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv);
 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv);
-int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
-			   enum cnss_mhi_state state);
 int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv);
-void cnss_pci_stop_mhi(struct cnss_pci_data *pci_priv);
 void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic);
 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv);
 u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv);
diff --git a/drivers/net/wireless/cnss2/power.c b/drivers/net/wireless/cnss2/power.c
index 496a08f..e31c6c8 100644
--- a/drivers/net/wireless/cnss2/power.c
+++ b/drivers/net/wireless/cnss2/power.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. */
 
+#include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/of.h>
 #include <linux/pinctrl/consumer.h>
@@ -11,24 +12,29 @@
 #include "debug.h"
 
 static struct cnss_vreg_cfg cnss_vreg_list[] = {
-	{"vdd-wlan-core", 1300000, 1300000, 0, 0},
-	{"vdd-wlan-io", 1800000, 1800000, 0, 0},
-	{"vdd-wlan-xtal-aon", 0, 0, 0, 0},
-	{"vdd-wlan-xtal", 1800000, 1800000, 0, 2},
-	{"vdd-wlan", 0, 0, 0, 0},
-	{"vdd-wlan-ctrl1", 0, 0, 0, 0},
-	{"vdd-wlan-ctrl2", 0, 0, 0, 0},
-	{"vdd-wlan-sp2t", 2700000, 2700000, 0, 0},
-	{"wlan-ant-switch", 1800000, 1800000, 0, 0},
-	{"wlan-soc-swreg", 1200000, 1200000, 0, 0},
-	{"vdd-wlan-aon", 950000, 950000, 0, 0},
-	{"vdd-wlan-dig", 950000, 952000, 0, 0},
-	{"vdd-wlan-rfa1", 1900000, 1900000, 0, 0},
-	{"vdd-wlan-rfa2", 1350000, 1350000, 0, 0},
-	{"vdd-wlan-en", 0, 0, 0, 10},
+	{"vdd-wlan-core", 1300000, 1300000, 0, 0, 0},
+	{"vdd-wlan-io", 1800000, 1800000, 0, 0, 0},
+	{"vdd-wlan-xtal-aon", 0, 0, 0, 0, 0},
+	{"vdd-wlan-xtal", 1800000, 1800000, 0, 2, 0},
+	{"vdd-wlan", 0, 0, 0, 0, 0},
+	{"vdd-wlan-ctrl1", 0, 0, 0, 0, 0},
+	{"vdd-wlan-ctrl2", 0, 0, 0, 0, 0},
+	{"vdd-wlan-sp2t", 2700000, 2700000, 0, 0, 0},
+	{"wlan-ant-switch", 1800000, 1800000, 0, 0, 0},
+	{"wlan-soc-swreg", 1200000, 1200000, 0, 0, 0},
+	{"vdd-wlan-aon", 950000, 950000, 0, 0, 0},
+	{"vdd-wlan-dig", 950000, 952000, 0, 0, 0},
+	{"vdd-wlan-rfa1", 1900000, 1900000, 0, 0, 0},
+	{"vdd-wlan-rfa2", 1350000, 1350000, 0, 0, 0},
+	{"vdd-wlan-en", 0, 0, 0, 10, 0},
+};
+
+static struct cnss_clk_cfg cnss_clk_list[] = {
+	{"rf_clk", 0, 0},
 };
 
 #define CNSS_VREG_INFO_SIZE		ARRAY_SIZE(cnss_vreg_list)
+#define CNSS_CLK_INFO_SIZE		ARRAY_SIZE(cnss_clk_list)
 #define MAX_PROP_SIZE			32
 
 #define BOOTSTRAP_GPIO			"qcom,enable-bootstrap-gpio"
@@ -74,11 +80,11 @@ static int cnss_get_vreg_single(struct cnss_plat_data *plat_priv,
 
 	vreg->reg = reg;
 
-	snprintf(prop_name, MAX_PROP_SIZE, "qcom,%s-info",
+	snprintf(prop_name, MAX_PROP_SIZE, "qcom,%s-config",
 		 vreg->cfg.name);
 
 	prop = of_get_property(dev->of_node, prop_name, &len);
-	if (!prop || len != (4 * sizeof(__be32))) {
+	if (!prop || len != (5 * sizeof(__be32))) {
 		cnss_pr_dbg("Property %s %s, use default\n", prop_name,
 			    prop ? "invalid format" : "doesn't exist");
 	} else {
@@ -86,12 +92,13 @@ static int cnss_get_vreg_single(struct cnss_plat_data *plat_priv,
 		vreg->cfg.max_uv = be32_to_cpup(&prop[1]);
 		vreg->cfg.load_ua = be32_to_cpup(&prop[2]);
 		vreg->cfg.delay_us = be32_to_cpup(&prop[3]);
+		vreg->cfg.need_unvote = be32_to_cpup(&prop[4]);
 	}
 
-	cnss_pr_dbg("Got regulator: %s, min_uv: %u, max_uv: %u, load_ua: %u, delay_us: %u\n",
+	cnss_pr_dbg("Got regulator: %s, min_uv: %u, max_uv: %u, load_ua: %u, delay_us: %u, need_unvote: %u\n",
 		    vreg->cfg.name, vreg->cfg.min_uv,
 		    vreg->cfg.max_uv, vreg->cfg.load_ua,
-		    vreg->cfg.delay_us);
+		    vreg->cfg.delay_us, vreg->cfg.need_unvote);
 
 	return 0;
 }
@@ -158,6 +165,36 @@ static int cnss_vreg_on_single(struct cnss_vreg_info *vreg)
 	return ret;
 }
 
+static int cnss_vreg_unvote_single(struct cnss_vreg_info *vreg)
+{
+	int ret = 0;
+
+	if (!vreg->enabled) {
+		cnss_pr_dbg("Regulator %s is already disabled\n",
+			    vreg->cfg.name);
+		return 0;
+	}
+
+	cnss_pr_dbg("Removing vote for Regulator %s\n", vreg->cfg.name);
+
+	if (vreg->cfg.load_ua) {
+		ret = regulator_set_load(vreg->reg, 0);
+		if (ret < 0)
+			cnss_pr_err("Failed to set load for regulator %s, err = %d\n",
+				    vreg->cfg.name, ret);
+	}
+
+	if (vreg->cfg.min_uv != 0 && vreg->cfg.max_uv != 0) {
+		ret = regulator_set_voltage(vreg->reg, 0,
+					    vreg->cfg.max_uv);
+		if (ret)
+			cnss_pr_err("Failed to set voltage for regulator %s, err = %d\n",
+				    vreg->cfg.name, ret);
+	}
+
+	return ret;
+}
+
 static int cnss_vreg_off_single(struct cnss_vreg_info *vreg)
 {
 	int ret = 0;
@@ -304,6 +341,22 @@ static int cnss_vreg_off(struct cnss_plat_data *plat_priv,
 	return 0;
 }
 
+static int cnss_vreg_unvote(struct cnss_plat_data *plat_priv,
+			    struct list_head *vreg_list)
+{
+	struct cnss_vreg_info *vreg;
+
+	list_for_each_entry_reverse(vreg, vreg_list, list) {
+		if (IS_ERR_OR_NULL(vreg->reg))
+			continue;
+
+		if (vreg->cfg.need_unvote)
+			cnss_vreg_unvote_single(vreg);
+	}
+
+	return 0;
+}
+
 int cnss_get_vreg_type(struct cnss_plat_data *plat_priv,
 		       enum cnss_vreg_type type)
 {
@@ -374,6 +427,229 @@ int cnss_vreg_off_type(struct cnss_plat_data *plat_priv,
 	return ret;
 }
 
+int cnss_vreg_unvote_type(struct cnss_plat_data *plat_priv,
+			  enum cnss_vreg_type type)
+{
+	int ret = 0;
+
+	switch (type) {
+	case CNSS_VREG_PRIM:
+		ret = cnss_vreg_unvote(plat_priv, &plat_priv->vreg_list);
+		break;
+	default:
+		cnss_pr_err("Unsupported vreg type 0x%x\n", type);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int cnss_get_clk_single(struct cnss_plat_data *plat_priv,
+			       struct cnss_clk_info *clk_info)
+{
+	struct device *dev = &plat_priv->plat_dev->dev;
+	struct clk *clk;
+	int ret;
+
+	clk = devm_clk_get(dev, clk_info->cfg.name);
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		if (clk_info->cfg.required)
+			cnss_pr_err("Failed to get clock %s, err = %d\n",
+				    clk_info->cfg.name, ret);
+		else
+			cnss_pr_dbg("Failed to get optional clock %s, err = %d\n",
+				    clk_info->cfg.name, ret);
+		return ret;
+	}
+
+	clk_info->clk = clk;
+	cnss_pr_dbg("Got clock: %s, freq: %u\n",
+		    clk_info->cfg.name, clk_info->cfg.freq);
+
+	return 0;
+}
+
+static void cnss_put_clk_single(struct cnss_plat_data *plat_priv,
+				struct cnss_clk_info *clk_info)
+{
+	struct device *dev = &plat_priv->plat_dev->dev;
+
+	cnss_pr_dbg("Put clock: %s\n", clk_info->cfg.name);
+	devm_clk_put(dev, clk_info->clk);
+}
+
+static int cnss_clk_on_single(struct cnss_clk_info *clk_info)
+{
+	int ret;
+
+	if (clk_info->enabled) {
+		cnss_pr_dbg("Clock %s is already enabled\n",
+			    clk_info->cfg.name);
+		return 0;
+	}
+
+	cnss_pr_dbg("Clock %s is being enabled\n", clk_info->cfg.name);
+
+	if (clk_info->cfg.freq) {
+		ret = clk_set_rate(clk_info->clk, clk_info->cfg.freq);
+		if (ret) {
+			cnss_pr_err("Failed to set frequency %u for clock %s, err = %d\n",
+				    clk_info->cfg.freq, clk_info->cfg.name,
+				    ret);
+			return ret;
+		}
+	}
+
+	ret = clk_prepare_enable(clk_info->clk);
+	if (ret) {
+		cnss_pr_err("Failed to enable clock %s, err = %d\n",
+			    clk_info->cfg.name, ret);
+		return ret;
+	}
+
+	clk_info->enabled = true;
+
+	return 0;
+}
+
+static int cnss_clk_off_single(struct cnss_clk_info *clk_info)
+{
+	if (!clk_info->enabled) {
+		cnss_pr_dbg("Clock %s is already disabled\n",
+			    clk_info->cfg.name);
+		return 0;
+	}
+
+	cnss_pr_dbg("Clock %s is being disabled\n", clk_info->cfg.name);
+
+	clk_disable_unprepare(clk_info->clk);
+	clk_info->enabled = false;
+
+	return 0;
+}
+
+int cnss_get_clk(struct cnss_plat_data *plat_priv)
+{
+	struct device *dev;
+	struct list_head *clk_list;
+	struct cnss_clk_info *clk_info;
+	int ret, i;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	dev = &plat_priv->plat_dev->dev;
+	clk_list = &plat_priv->clk_list;
+
+	if (!list_empty(clk_list)) {
+		cnss_pr_dbg("Clocks have already been updated\n");
+		return 0;
+	}
+
+	for (i = 0; i < CNSS_CLK_INFO_SIZE; i++) {
+		clk_info = devm_kzalloc(dev, sizeof(*clk_info), GFP_KERNEL);
+		if (!clk_info) {
+			ret = -ENOMEM;
+			goto cleanup;
+		}
+
+		memcpy(&clk_info->cfg, &cnss_clk_list[i],
+		       sizeof(clk_info->cfg));
+		ret = cnss_get_clk_single(plat_priv, clk_info);
+		if (ret != 0) {
+			if (clk_info->cfg.required) {
+				devm_kfree(dev, clk_info);
+				goto cleanup;
+			} else {
+				devm_kfree(dev, clk_info);
+				continue;
+			}
+		}
+		list_add_tail(&clk_info->list, clk_list);
+	}
+
+	return 0;
+
+cleanup:
+	while (!list_empty(clk_list)) {
+		clk_info = list_first_entry(clk_list, struct cnss_clk_info,
+					    list);
+		list_del(&clk_info->list);
+		if (IS_ERR_OR_NULL(clk_info->clk))
+			continue;
+		cnss_put_clk_single(plat_priv, clk_info);
+		devm_kfree(dev, clk_info);
+	}
+
+	return ret;
+}
+
+void cnss_put_clk(struct cnss_plat_data *plat_priv)
+{
+	struct device *dev;
+	struct list_head *clk_list;
+	struct cnss_clk_info *clk_info;
+
+	if (!plat_priv)
+		return;
+
+	dev = &plat_priv->plat_dev->dev;
+	clk_list = &plat_priv->clk_list;
+
+	while (!list_empty(clk_list)) {
+		clk_info = list_first_entry(clk_list, struct cnss_clk_info,
+					    list);
+		list_del(&clk_info->list);
+		if (IS_ERR_OR_NULL(clk_info->clk))
+			continue;
+		cnss_put_clk_single(plat_priv, clk_info);
+		devm_kfree(dev, clk_info);
+	}
+}
+
+static int cnss_clk_on(struct cnss_plat_data *plat_priv,
+		       struct list_head *clk_list)
+{
+	struct cnss_clk_info *clk_info;
+	int ret = 0;
+
+	list_for_each_entry(clk_info, clk_list, list) {
+		if (IS_ERR_OR_NULL(clk_info->clk))
+			continue;
+		ret = cnss_clk_on_single(clk_info);
+		if (ret)
+			break;
+	}
+
+	if (!ret)
+		return 0;
+
+	list_for_each_entry_continue_reverse(clk_info, clk_list, list) {
+		if (IS_ERR_OR_NULL(clk_info->clk))
+			continue;
+
+		cnss_clk_off_single(clk_info);
+	}
+
+	return ret;
+}
+
+static int cnss_clk_off(struct cnss_plat_data *plat_priv,
+			struct list_head *clk_list)
+{
+	struct cnss_clk_info *clk_info;
+
+	list_for_each_entry_reverse(clk_info, clk_list, list) {
+		if (IS_ERR_OR_NULL(clk_info->clk))
+			continue;
+
+		cnss_clk_off_single(clk_info);
+	}
+
+	return 0;
+}
+
 int cnss_get_pinctrl(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
@@ -499,14 +775,24 @@ int cnss_power_on_device(struct cnss_plat_data *plat_priv)
 		goto out;
 	}
 
+	ret = cnss_clk_on(plat_priv, &plat_priv->clk_list);
+	if (ret) {
+		cnss_pr_err("Failed to turn on clocks, err = %d\n", ret);
+		goto vreg_off;
+	}
+
 	ret = cnss_select_pinctrl_state(plat_priv, true);
 	if (ret) {
 		cnss_pr_err("Failed to select pinctrl state, err = %d\n", ret);
-		goto vreg_off;
+		goto clk_off;
 	}
+
 	plat_priv->powered_on = true;
 
 	return 0;
+
+clk_off:
+	cnss_clk_off(plat_priv, &plat_priv->clk_list);
 vreg_off:
 	cnss_vreg_off_type(plat_priv, CNSS_VREG_PRIM);
 out:
@@ -521,10 +807,16 @@ void cnss_power_off_device(struct cnss_plat_data *plat_priv)
 	}
 
 	cnss_select_pinctrl_state(plat_priv, false);
+	cnss_clk_off(plat_priv, &plat_priv->clk_list);
 	cnss_vreg_off_type(plat_priv, CNSS_VREG_PRIM);
 	plat_priv->powered_on = false;
 }
 
+bool cnss_is_device_powered_on(struct cnss_plat_data *plat_priv)
+{
+	return plat_priv->powered_on;
+}
+
 void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv)
 {
 	unsigned long pin_status = 0;
@@ -548,7 +840,8 @@ int cnss_get_cpr_info(struct cnss_plat_data *plat_priv)
 	struct resource *res;
 	resource_size_t addr_len;
 	void __iomem *tcs_cmd_base_addr;
-	u32 s2f_addr = 0, s6a_addr = 0;
+	const char *cmd_db_name;
+	u32 cpr_pmic_addr = 0;
 	int ret = 0;
 
 	res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, "tcs_cmd");
@@ -557,25 +850,27 @@ int cnss_get_cpr_info(struct cnss_plat_data *plat_priv)
 		goto out;
 	}
 
+	ret = of_property_read_string(plat_dev->dev.of_node,
+				      "qcom,cmd_db_name", &cmd_db_name);
+	if (ret) {
+		cnss_pr_dbg("CommandDB name is not present for CPR\n");
+		goto out;
+	}
+
 	ret = cmd_db_ready();
 	if (ret) {
 		cnss_pr_err("CommandDB is not ready\n");
 		goto out;
 	}
 
-	s2f_addr = cmd_db_read_addr("smpf2");
-	s6a_addr = cmd_db_read_addr("smpa6");
-
-	if (s2f_addr > 0) {
-		cpr_info->cpr_pmic_addr = s2f_addr;
-		cnss_pr_dbg("Get CPR PMIC address 0x%x from s2f\n",
-			    cpr_info->cpr_pmic_addr);
-	} else if (s6a_addr > 0) {
-		cpr_info->cpr_pmic_addr = s6a_addr;
-		cnss_pr_dbg("Get CPR PMIC address 0x%x from s6a\n",
-			    cpr_info->cpr_pmic_addr);
+	cpr_pmic_addr = cmd_db_read_addr(cmd_db_name);
+	if (cpr_pmic_addr > 0) {
+		cpr_info->cpr_pmic_addr = cpr_pmic_addr;
+		cnss_pr_dbg("Get CPR PMIC address 0x%x from %s\n",
+			    cpr_info->cpr_pmic_addr, cmd_db_name);
 	} else {
-		cnss_pr_err("CPR PMIC addresses are not available\n");
+		cnss_pr_err("CPR PMIC address is not available for %s\n",
+			    cmd_db_name);
 		ret = -EINVAL;
 		goto out;
 	}
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index 6174e88..89e12659 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -24,6 +24,7 @@
 #define QMI_WLFW_TIMEOUT_MS		(plat_priv->ctrl_params.qmi_timeout)
 #define QMI_WLFW_TIMEOUT_JF		msecs_to_jiffies(QMI_WLFW_TIMEOUT_MS)
 #define COEX_TIMEOUT			QMI_WLFW_TIMEOUT_JF
+#define IMS_TIMEOUT                     QMI_WLFW_TIMEOUT_JF
 
 #define QMI_WLFW_MAX_RECV_BUF_SIZE	SZ_8K
 
@@ -125,12 +126,21 @@ static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
 		goto out;
 	}
 
+	if (resp->fw_status_valid) {
+		if (resp->fw_status & QMI_WLFW_ALREADY_REGISTERED_V01) {
+			ret = -EALREADY;
+			goto qmi_registered;
+		}
+	}
+
 	kfree(req);
 	kfree(resp);
 	return 0;
 
 out:
 	CNSS_ASSERT(0);
+
+qmi_registered:
 	kfree(req);
 	kfree(resp);
 	return ret;
@@ -1300,6 +1310,137 @@ int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv)
 	return ret;
 }
 
+static int cnss_wlfw_wfc_call_status_send_sync(struct cnss_plat_data *plat_priv,
+					       u32 data_len, const void *data)
+{
+	struct wlfw_wfc_call_status_req_msg_v01 *req;
+	struct wlfw_wfc_call_status_resp_msg_v01 *resp;
+	struct qmi_txn txn;
+	int ret = 0;
+
+	cnss_pr_dbg("Sending WFC call status: state: 0x%lx\n",
+		    plat_priv->driver_state);
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->wfc_call_status_len = data_len;
+	memcpy(req->wfc_call_status, data, req->wfc_call_status_len);
+
+	ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+			   wlfw_wfc_call_status_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		cnss_pr_err("Fail to initialize txn for WFC call status request: err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+			       QMI_WLFW_WFC_CALL_STATUS_REQ_V01,
+			       WLFW_WFC_CALL_STATUS_REQ_MSG_V01_MAX_MSG_LEN,
+			       wlfw_wfc_call_status_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		cnss_pr_err("Fail to send WFC call status request: err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+	if (ret < 0) {
+		cnss_pr_err("Fail to wait for response of WFC call status request, err %d\n",
+			    ret);
+		goto out;
+	}
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("WFC call status request failed, result: %d, err: %d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+		goto out;
+	}
+
+	kfree(req);
+	kfree(resp);
+	return 0;
+
+out:
+	kfree(req);
+	kfree(resp);
+	return ret;
+}
+
+int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv)
+{
+	struct wlfw_dynamic_feature_mask_req_msg_v01 *req;
+	struct wlfw_dynamic_feature_mask_resp_msg_v01 *resp;
+	struct qmi_txn txn;
+	int ret = 0;
+
+	cnss_pr_dbg("Sending dynamic feature mask 0x%llx, state: 0x%lx\n",
+		    plat_priv->dynamic_feature,
+		    plat_priv->driver_state);
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->mask_valid = 1;
+	req->mask = plat_priv->dynamic_feature;
+
+	ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+			   wlfw_dynamic_feature_mask_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		cnss_pr_err("Fail to initialize txn for dynamic feature mask request: err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_send_request
+		(&plat_priv->qmi_wlfw, NULL, &txn,
+		 QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01,
+		 WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN,
+		 wlfw_dynamic_feature_mask_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		cnss_pr_err("Fail to send dynamic feature mask request: err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+	if (ret < 0) {
+		cnss_pr_err("Fail to wait for response of dynamic feature mask request, err %d\n",
+			    ret);
+		goto out;
+	}
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("Dynamic feature mask request failed, result: %d, err: %d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+		goto out;
+	}
+
+out:
+	kfree(req);
+	kfree(resp);
+	return ret;
+}
+
 unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv)
 {
 	cnss_pr_dbg("QMI timeout is %u ms\n", QMI_WLFW_TIMEOUT_MS);
@@ -1359,6 +1500,7 @@ static void cnss_wlfw_fw_ready_ind_cb(struct qmi_handle *qmi_wlfw,
 {
 	struct cnss_plat_data *plat_priv =
 		container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+	struct cnss_cal_info *cal_info;
 
 	cnss_pr_dbg("Received QMI WLFW FW ready indication\n");
 
@@ -1367,8 +1509,13 @@ static void cnss_wlfw_fw_ready_ind_cb(struct qmi_handle *qmi_wlfw,
 		return;
 	}
 
+	cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
+	if (!cal_info)
+		return;
+
+	cal_info->cal_status = CNSS_CAL_DONE;
 	cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
-			       0, NULL);
+			       0, cal_info);
 }
 
 static void cnss_wlfw_fw_init_done_ind_cb(struct qmi_handle *qmi_wlfw,
@@ -1423,6 +1570,7 @@ static void cnss_wlfw_cal_done_ind_cb(struct qmi_handle *qmi_wlfw,
 {
 	struct cnss_plat_data *plat_priv =
 		container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+	struct cnss_cal_info *cal_info;
 
 	cnss_pr_dbg("Received QMI WLFW calibration done indication\n");
 
@@ -1431,8 +1579,13 @@ static void cnss_wlfw_cal_done_ind_cb(struct qmi_handle *qmi_wlfw,
 		return;
 	}
 
+	cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
+	if (!cal_info)
+		return;
+
+	cal_info->cal_status = CNSS_CAL_DONE;
 	cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
-			       0, NULL);
+			       0, cal_info);
 }
 
 static void cnss_wlfw_qdss_trace_req_mem_ind_cb(struct qmi_handle *qmi_wlfw,
@@ -1666,8 +1819,11 @@ int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv, void *data)
 		goto out;
 
 	ret = cnss_wlfw_ind_register_send_sync(plat_priv);
-	if (ret < 0)
+	if (ret < 0) {
+		if (ret == -EALREADY)
+			ret = 0;
 		goto out;
+	}
 
 	ret = cnss_wlfw_host_cap_send_sync(plat_priv);
 	if (ret < 0)
@@ -1955,3 +2111,189 @@ void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv)
 {
 	qmi_handle_release(&plat_priv->coex_qmi);
 }
+
+/* IMS Service */
+int ims_subscribe_for_indication_send_async(struct cnss_plat_data *plat_priv)
+{
+	int ret;
+	struct ims_private_service_subscribe_for_indications_req_msg_v01 *req;
+	struct qmi_txn *txn;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	cnss_pr_dbg("Sending ASYNC ims subscribe for indication\n");
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	req->wfc_call_status_valid = 1;
+	req->wfc_call_status = 1;
+
+	txn = &plat_priv->txn;
+	ret = qmi_txn_init(&plat_priv->ims_qmi, txn, NULL, NULL);
+	if (ret < 0) {
+		cnss_pr_err("Fail to init txn for ims subscribe for indication resp %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_send_request
+	(&plat_priv->ims_qmi, NULL, txn,
+	QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01,
+	IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_MSG_V01_MAX_MSG_LEN,
+	ims_private_service_subscribe_for_indications_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(txn);
+		cnss_pr_err("Fail to send ims subscribe for indication req %d\n",
+			    ret);
+		goto out;
+	}
+
+	kfree(req);
+	return 0;
+
+out:
+	kfree(req);
+	return ret;
+}
+
+static void ims_subscribe_for_indication_resp_cb(struct qmi_handle *qmi,
+						 struct sockaddr_qrtr *sq,
+						 struct qmi_txn *txn,
+						 const void *data)
+{
+	const
+	struct ims_private_service_subscribe_for_indications_rsp_msg_v01 *resp =
+		data;
+
+	cnss_pr_dbg("Received IMS subscribe indication response\n");
+
+	if (!txn) {
+		cnss_pr_err("spurious response\n");
+		return;
+	}
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("IMS subscribe for indication request rejected, result:%d error:%d\n",
+			    resp->resp.result, resp->resp.error);
+		txn->result = -resp->resp.result;
+	}
+}
+
+static void ims_wfc_call_status_ind_cb(struct qmi_handle *ims_qmi,
+				       struct sockaddr_qrtr *sq,
+				       struct qmi_txn *txn, const void *data)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(ims_qmi, struct cnss_plat_data, ims_qmi);
+	const
+	struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg = data;
+	u32 data_len = 0;
+
+	cnss_pr_dbg("Received IMS wfc call status indication\n");
+
+	if (!txn) {
+		cnss_pr_err("Spurious indication\n");
+		return;
+	}
+
+	if (!ind_msg) {
+		cnss_pr_err("Invalid indication\n");
+		return;
+	}
+
+	data_len = sizeof(*ind_msg);
+	if (data_len > QMI_WLFW_MAX_WFC_CALL_STATUS_DATA_SIZE_V01) {
+		cnss_pr_err("Exceed maxinum data len:%u\n", data_len);
+		return;
+	}
+
+	cnss_wlfw_wfc_call_status_send_sync(plat_priv, data_len, ind_msg);
+}
+
+static struct qmi_msg_handler qmi_ims_msg_handlers[] = {
+	{
+		.type = QMI_RESPONSE,
+		.msg_id =
+		QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01,
+		.ei =
+		ims_private_service_subscribe_for_indications_rsp_msg_v01_ei,
+		.decoded_size = sizeof(struct
+		ims_private_service_subscribe_for_indications_rsp_msg_v01),
+		.fn = ims_subscribe_for_indication_resp_cb
+	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_IMS_PRIVATE_SERVICE_WFC_CALL_STATUS_IND_V01,
+		.ei = ims_private_service_wfc_call_status_ind_msg_v01_ei,
+		.decoded_size =
+		sizeof(struct ims_private_service_wfc_call_status_ind_msg_v01),
+		.fn = ims_wfc_call_status_ind_cb
+	},
+	{}
+};
+
+static int ims_new_server(struct qmi_handle *qmi,
+			  struct qmi_service *service)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(qmi, struct cnss_plat_data, ims_qmi);
+	struct sockaddr_qrtr sq = { 0 };
+	int ret = 0;
+
+	cnss_pr_dbg("IMS server arrive: node %u port %u\n",
+		    service->node, service->port);
+
+	sq.sq_family = AF_QIPCRTR;
+	sq.sq_node = service->node;
+	sq.sq_port = service->port;
+	ret = kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0);
+	if (ret < 0) {
+		cnss_pr_err("Fail to connect to remote service port\n");
+		return ret;
+	}
+
+	set_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state);
+	cnss_pr_dbg("IMS Server Connected: 0x%lx\n",
+		    plat_priv->driver_state);
+
+	ret = ims_subscribe_for_indication_send_async(plat_priv);
+	return ret;
+}
+
+static void ims_del_server(struct qmi_handle *qmi,
+			   struct qmi_service *service)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(qmi, struct cnss_plat_data, ims_qmi);
+
+	cnss_pr_dbg("IMS server exit\n");
+
+	clear_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state);
+}
+
+static struct qmi_ops ims_qmi_ops = {
+	.new_server = ims_new_server,
+	.del_server = ims_del_server,
+};
+
+int cnss_register_ims_service(struct cnss_plat_data *plat_priv)
+{	int ret;
+
+	ret = qmi_handle_init(&plat_priv->ims_qmi,
+			      IMSPRIVATE_SERVICE_MAX_MSG_LEN,
+			      &ims_qmi_ops, qmi_ims_msg_handlers);
+	if (ret < 0)
+		return ret;
+
+	ret = qmi_add_lookup(&plat_priv->ims_qmi, IMSPRIVATE_SERVICE_ID_V01,
+			     IMSPRIVATE_SERVICE_VERS_V01, 0);
+	return ret;
+}
+
+void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv)
+{
+	qmi_handle_release(&plat_priv->ims_qmi);
+}
diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h
index 784aadc..a064660 100644
--- a/drivers/net/wireless/cnss2/qmi.h
+++ b/drivers/net/wireless/cnss2/qmi.h
@@ -31,6 +31,7 @@ struct cnss_qmi_event_qdss_trace_save_data {
 #ifdef CONFIG_CNSS2_QMI
 #include "wlan_firmware_service_v01.h"
 #include "coexistence_service_v01.h"
+#include "ip_multimedia_subsystem_private_service_v01.h"
 
 int cnss_qmi_init(struct cnss_plat_data *plat_priv);
 void cnss_qmi_deinit(struct cnss_plat_data *plat_priv);
@@ -57,11 +58,14 @@ int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
 			    u8 fw_log_mode);
 int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv);
 int cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv);
 int cnss_register_coex_service(struct cnss_plat_data *plat_priv);
 void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv);
 int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv);
 int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv);
 int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_register_ims_service(struct cnss_plat_data *plat_priv);
+void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv);
 #else
 #define QMI_WLFW_TIMEOUT_MS		10000
 
@@ -164,6 +168,12 @@ int cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data *plat_priv)
 }
 
 static inline
+int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv)
+{
+	return 0;
+}
+
+static inline
 int cnss_register_coex_service(struct cnss_plat_data *plat_priv)
 {
 	return 0;
@@ -186,6 +196,16 @@ int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv)
 {
 	return 0;
 }
+
+static inline
+int cnss_register_ims_service(struct cnss_plat_data *plat_priv)
+{
+	return 0;
+}
+
+static inline
+void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv) {}
+
 #endif /* CONFIG_CNSS2_QMI */
 
 #endif /* _CNSS_QMI_H */
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
index 30d263a..03a418e 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
@@ -3561,3 +3561,50 @@ struct qmi_elem_info wlfw_antenna_grant_resp_msg_v01_ei[] = {
 	},
 };
 
+struct qmi_elem_info wlfw_wfc_call_status_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				wlfw_wfc_call_status_req_msg_v01,
+				wfc_call_status_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_WFC_CALL_STATUS_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				wlfw_wfc_call_status_req_msg_v01,
+				wfc_call_status),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info wlfw_wfc_call_status_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				wlfw_wfc_call_status_resp_msg_v01,
+				resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
index dc28221..dacdfdb 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
@@ -9,11 +9,13 @@
 #define WLFW_SERVICE_ID_V01 0x45
 #define WLFW_SERVICE_VERS_V01 0x01
 
+#define QMI_WLFW_WFC_CALL_STATUS_REQ_V01 0x0049
 #define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
 #define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
 #define QMI_WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_V01 0x0044
 #define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
 #define QMI_WLFW_CAL_DONE_IND_V01 0x003E
+#define QMI_WLFW_WFC_CALL_STATUS_RESP_V01 0x0049
 #define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
 #define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
 #define QMI_WLFW_M3_INFO_REQ_V01 0x003C
@@ -86,6 +88,7 @@
 #define QMI_WLFW_MAX_NUM_CE_V01 12
 #define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
 #define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144
+#define QMI_WLFW_MAX_WFC_CALL_STATUS_DATA_SIZE_V01 256
 #define QMI_WLFW_MAX_NUM_GPIO_V01 32
 #define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
 #define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2
@@ -931,4 +934,19 @@ struct wlfw_antenna_grant_resp_msg_v01 {
 #define WLFW_ANTENNA_GRANT_RESP_MSG_V01_MAX_MSG_LEN 7
 extern struct qmi_elem_info wlfw_antenna_grant_resp_msg_v01_ei[];
 
+struct wlfw_wfc_call_status_req_msg_v01 {
+	u32 wfc_call_status_len;
+	u8 wfc_call_status[QMI_WLFW_MAX_WFC_CALL_STATUS_DATA_SIZE_V01];
+};
+
+#define WLFW_WFC_CALL_STATUS_REQ_MSG_V01_MAX_MSG_LEN 261
+extern struct qmi_elem_info wlfw_wfc_call_status_req_msg_v01_ei[];
+
+struct wlfw_wfc_call_status_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WFC_CALL_STATUS_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_wfc_call_status_resp_msg_v01_ei[];
+
 #endif
diff --git a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
index 45d6c86..bf0557f 100644
--- a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
+++ b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
@@ -73,6 +73,8 @@ static struct wcnss_prealloc wcnss_allocs[] = {
 	{0, 32 * 1024, NULL},
 	{0, 64 * 1024, NULL},
 	{0, 64 * 1024, NULL},
+	{0, 64 * 1024, NULL},
+	{0, 64 * 1024, NULL},
 };
 
 int wcnss_prealloc_init(void)
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index 36151e6..c04d934 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -140,6 +140,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
 	.ht_params = &iwl5000_ht_params,
 	.led_mode = IWL_LED_BLINK,
 	.internal_wimax_coex = true,
+	.csr = &iwl_csr_v1,
 };
 
 #define IWL_DEVICE_5150						\
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index b53148f..036d1d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -143,9 +143,9 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
 }
 
 /* iwl_mvm_create_skb Adds the rxb to a new skb */
-static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
-			       u16 len, u8 crypt_len,
-			       struct iwl_rx_cmd_buffer *rxb)
+static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+			      struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
+			      struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
@@ -178,6 +178,20 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
 	 * present before copying packet data.
 	 */
 	hdrlen += crypt_len;
+
+	if (WARN_ONCE(headlen < hdrlen,
+		      "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
+		      hdrlen, len, crypt_len)) {
+		/*
+		 * We warn and trace because we want to be able to see
+		 * it in trace-cmd as well.
+		 */
+		IWL_DEBUG_RX(mvm,
+			     "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
+			     hdrlen, len, crypt_len);
+		return -EINVAL;
+	}
+
 	skb_put_data(skb, hdr, hdrlen);
 	skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
 
@@ -190,6 +204,8 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
 		skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
 				fraglen, rxb->truesize);
 	}
+
+	return 0;
 }
 
 /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
@@ -1425,7 +1441,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 			rx_status->boottime_ns = ktime_get_boot_ns();
 	}
 
-	iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
+	if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
+		kfree_skb(skb);
+		goto out;
+	}
+
 	if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
 		iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
 out:
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index b2905f0..6dcd537 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1388,10 +1388,15 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
 static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
+	struct iwl_rxq *rxq;
 	u32 r, i, count = 0;
 	bool emergency = false;
 
+	if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
+		return;
+
+	rxq = &trans_pcie->rxq[queue];
+
 restart:
 	spin_lock(&rxq->lock);
 	/* uCode's read index (stored in shared DRAM) indicates the last Rx
diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
index 27a4906..57ad564 100644
--- a/drivers/net/wireless/intersil/p54/p54pci.c
+++ b/drivers/net/wireless/intersil/p54/p54pci.c
@@ -554,7 +554,7 @@ static int p54p_probe(struct pci_dev *pdev,
 	err = pci_enable_device(pdev);
 	if (err) {
 		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
-		return err;
+		goto err_put;
 	}
 
 	mem_addr = pci_resource_start(pdev, 0);
@@ -639,6 +639,7 @@ static int p54p_probe(struct pci_dev *pdev,
 	pci_release_regions(pdev);
  err_disable_dev:
 	pci_disable_device(pdev);
+err_put:
 	pci_dev_put(pdev);
 	return err;
 }
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 2d87ebb..47ec529 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -4045,16 +4045,20 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
 
 		if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) {
 			dev_err(priv->adapter->dev, "Failed to process hostcmd\n");
+			kfree(hostcmd);
 			return -EFAULT;
 		}
 
 		/* process hostcmd response*/
 		skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len);
-		if (!skb)
+		if (!skb) {
+			kfree(hostcmd);
 			return -ENOMEM;
+		}
 		err = nla_put(skb, MWIFIEX_TM_ATTR_DATA,
 			      hostcmd->len, hostcmd->cmd);
 		if (err) {
+			kfree(hostcmd);
 			kfree_skb(skb);
 			return -EMSGSIZE;
 		}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index bfe84e5..f1522fb 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -531,5 +531,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
 		rate_index = (rx_rate > MWIFIEX_RATE_INDEX_OFDM0) ?
 			      rx_rate - 1 : rx_rate;
 
+	if (rate_index >= MWIFIEX_MAX_AC_RX_RATES)
+		rate_index = MWIFIEX_MAX_AC_RX_RATES - 1;
+
 	return rate_index;
 }
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index d49fbd5..bfbe3aa0 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev)
 
 	adapter = card->adapter;
 
-	if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
+	if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
 		mwifiex_dbg(adapter, WARN,
 			    "device already resumed\n");
 		return 0;
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 8e4e9b6..ffc565a 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -441,6 +441,9 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
 #define MWL8K_CMD_UPDATE_STADB		0x1123
 #define MWL8K_CMD_BASTREAM		0x1125
 
+#define MWL8K_LEGACY_5G_RATE_OFFSET \
+	(ARRAY_SIZE(mwl8k_rates_24) - ARRAY_SIZE(mwl8k_rates_50))
+
 static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
 {
 	u16 command = le16_to_cpu(cmd);
@@ -1016,8 +1019,9 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status,
 
 	if (rxd->channel > 14) {
 		status->band = NL80211_BAND_5GHZ;
-		if (!(status->encoding == RX_ENC_HT))
-			status->rate_idx -= 5;
+		if (!(status->encoding == RX_ENC_HT) &&
+		    status->rate_idx >= MWL8K_LEGACY_5G_RATE_OFFSET)
+			status->rate_idx -= MWL8K_LEGACY_5G_RATE_OFFSET;
 	} else {
 		status->band = NL80211_BAND_2GHZ;
 	}
@@ -1124,8 +1128,9 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
 
 	if (rxd->channel > 14) {
 		status->band = NL80211_BAND_5GHZ;
-		if (!(status->encoding == RX_ENC_HT))
-			status->rate_idx -= 5;
+		if (!(status->encoding == RX_ENC_HT) &&
+		    status->rate_idx >= MWL8K_LEGACY_5G_RATE_OFFSET)
+			status->rate_idx -= MWL8K_LEGACY_5G_RATE_OFFSET;
 	} else {
 		status->band = NL80211_BAND_2GHZ;
 	}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index a279a43..1d21424 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -672,7 +672,6 @@ enum rt2x00_state_flags {
 	CONFIG_CHANNEL_HT40,
 	CONFIG_POWERSAVING,
 	CONFIG_HT_DISABLED,
-	CONFIG_QOS_DISABLED,
 	CONFIG_MONITORING,
 
 	/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index fa2fd64..da52668 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -642,19 +642,9 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
 			rt2x00dev->intf_associated--;
 
 		rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
-
-		clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
 	}
 
 	/*
-	 * Check for access point which do not support 802.11e . We have to
-	 * generate data frames sequence number in S/W for such AP, because
-	 * of H/W bug.
-	 */
-	if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
-		set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
-
-	/*
 	 * When the erp information has changed, we should perform
 	 * additional configuration steps. For all other changes we are done.
 	 */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 710e964..85e3201 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -200,15 +200,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
 	if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
 		/*
 		 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
-		 * seqno on retransmited data (non-QOS) frames. To workaround
-		 * the problem let's generate seqno in software if QOS is
-		 * disabled.
+		 * seqno on retransmitted data (non-QOS) and management frames.
+		 * To workaround the problem let's generate seqno in software.
+		 * Except for beacons which are transmitted periodically by H/W
+		 * hence hardware has to assign seqno for them.
 		 */
-		if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
-			__clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
-		else
+	    	if (ieee80211_is_beacon(hdr->frame_control)) {
+			__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 			/* H/W will generate sequence number */
 			return;
+		}
+
+		__clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 	}
 
 	/*
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index ef9b502..a3189294 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -469,6 +469,11 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
 	/* <2> work queue */
 	rtlpriv->works.hw = hw;
 	rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
+	if (unlikely(!rtlpriv->works.rtl_wq)) {
+		pr_err("Failed to allocate work queue\n");
+		return;
+	}
+
 	INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
 			  (void *)rtl_watchdog_wq_callback);
 	INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index 6387451..b5f91c9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -622,6 +622,8 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
 		      u1rsvdpageloc, 3);
 
 	skb = dev_alloc_skb(totalpacketlen);
+	if (!skb)
+		return;
 	skb_put_data(skb, &reserved_page_packet, totalpacketlen);
 
 	rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
index f3bff66..81ec0e6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
@@ -646,6 +646,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
 
 
 	skb = dev_alloc_skb(totalpacketlen);
+	if (!skb)
+		return;
 	skb_put_data(skb, &reserved_page_packet, totalpacketlen);
 
 	if (cmd_send_packet)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 84a0d0e..a933490 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -766,6 +766,8 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
 		      u1rsvdpageloc, 3);
 
 	skb = dev_alloc_skb(totalpacketlen);
+	if (!skb)
+		return;
 	skb_put_data(skb, &reserved_page_packet, totalpacketlen);
 
 	rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index bf9859f..52f1087 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -470,6 +470,8 @@ void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
 		      u1rsvdpageloc, 3);
 
 	skb = dev_alloc_skb(totalpacketlen);
+	if (!skb)
+		return;
 	skb_put_data(skb, &reserved_page_packet, totalpacketlen);
 
 	rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index 545115d..4dc9f4e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1699,6 +1699,7 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw,
 					rtlhal->oem_id = RT_CID_819X_LENOVO;
 					break;
 				}
+				break;
 			case 0x1025:
 				rtlhal->oem_id = RT_CID_819X_ACER;
 				break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
index f2441fb..307c2bd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
@@ -584,6 +584,8 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
 		      u1rsvdpageloc, sizeof(u1rsvdpageloc));
 
 	skb = dev_alloc_skb(totalpacketlen);
+	if (!skb)
+		return;
 	skb_put_data(skb, &reserved_page_packet, totalpacketlen);
 
 	rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
index d868a03..d7235f6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
@@ -1645,6 +1645,8 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
 		      &reserved_page_packet_8812[0], totalpacketlen);
 
 	skb = dev_alloc_skb(totalpacketlen);
+	if (!skb)
+		return;
 	skb_put_data(skb, &reserved_page_packet_8812, totalpacketlen);
 
 	rtstatus = rtl_cmd_send_packet(hw, skb);
@@ -1781,6 +1783,8 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
 		      &reserved_page_packet_8821[0], totalpacketlen);
 
 	skb = dev_alloc_skb(totalpacketlen);
+	if (!skb)
+		return;
 	skb_put_data(skb, &reserved_page_packet_8821, totalpacketlen);
 
 	rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 4e510cb..be59d66 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -188,27 +188,27 @@ bool rsi_is_cipher_wep(struct rsi_common *common)
  * @adapter: Pointer to the adapter structure.
  * @band: Operating band to be set.
  *
- * Return: None.
+ * Return: int - 0 on success, negative error on failure.
  */
-static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
+static int rsi_register_rates_channels(struct rsi_hw *adapter, int band)
 {
 	struct ieee80211_supported_band *sbands = &adapter->sbands[band];
 	void *channels = NULL;
 
 	if (band == NL80211_BAND_2GHZ) {
-		channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
-		memcpy(channels,
-		       rsi_2ghz_channels,
-		       sizeof(rsi_2ghz_channels));
+		channels = kmemdup(rsi_2ghz_channels, sizeof(rsi_2ghz_channels),
+				   GFP_KERNEL);
+		if (!channels)
+			return -ENOMEM;
 		sbands->band = NL80211_BAND_2GHZ;
 		sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
 		sbands->bitrates = rsi_rates;
 		sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
 	} else {
-		channels = kmalloc(sizeof(rsi_5ghz_channels), GFP_KERNEL);
-		memcpy(channels,
-		       rsi_5ghz_channels,
-		       sizeof(rsi_5ghz_channels));
+		channels = kmemdup(rsi_5ghz_channels, sizeof(rsi_5ghz_channels),
+				   GFP_KERNEL);
+		if (!channels)
+			return -ENOMEM;
 		sbands->band = NL80211_BAND_5GHZ;
 		sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
 		sbands->bitrates = &rsi_rates[4];
@@ -227,6 +227,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
 	sbands->ht_cap.mcs.rx_mask[0] = 0xff;
 	sbands->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
 	/* sbands->ht_cap.mcs.rx_highest = 0x82; */
+	return 0;
 }
 
 /**
@@ -1985,11 +1986,16 @@ int rsi_mac80211_attach(struct rsi_common *common)
 	wiphy->available_antennas_rx = 1;
 	wiphy->available_antennas_tx = 1;
 
-	rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
+	status = rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
+	if (status)
+		return status;
 	wiphy->bands[NL80211_BAND_2GHZ] =
 		&adapter->sbands[NL80211_BAND_2GHZ];
 	if (common->num_supp_bands > 1) {
-		rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ);
+		status = rsi_register_rates_channels(adapter,
+						     NL80211_BAND_5GHZ);
+		if (status)
+			return status;
 		wiphy->bands[NL80211_BAND_5GHZ] =
 			&adapter->sbands[NL80211_BAND_5GHZ];
 	}
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index 90dc979f..c1608f0 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -345,6 +345,11 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
 	mutex_init(&priv->wsm_cmd_mux);
 	mutex_init(&priv->conf_mutex);
 	priv->workqueue = create_singlethread_workqueue("cw1200_wq");
+	if (!priv->workqueue) {
+		ieee80211_free_hw(hw);
+		return NULL;
+	}
+
 	sema_init(&priv->scan.lock, 1);
 	INIT_WORK(&priv->scan.work, cw1200_scan_work);
 	INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work);
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index 0a9eac9..71e9b91 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -84,8 +84,11 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
 
 	frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
 		req->ie_len);
-	if (!frame.skb)
+	if (!frame.skb) {
+		mutex_unlock(&priv->conf_mutex);
+		up(&priv->scan.lock);
 		return -ENOMEM;
+	}
 
 	if (req->ie_len)
 		skb_put_data(frame.skb, req->ie, req->ie_len);
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 42f1d12..2c8f425 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -21,6 +21,7 @@
 #ifdef CONFIG_COMPAT
 #include <linux/compat.h>
 #endif
+#include <linux/jiffies.h>
 
 struct nqx_platform_data {
 	unsigned int irq_gpio;
@@ -51,6 +52,7 @@ MODULE_DEVICE_TABLE(of, msm_match_table);
 #define NCI_RESET_NTF_LEN		13
 #define NCI_GET_VERSION_CMD_LEN		8
 #define NCI_GET_VERSION_RSP_LEN		12
+#define MAX_IRQ_WAIT_TIME		(90)	//in ms
 
 struct nqx_dev {
 	wait_queue_head_t	read_wq;
@@ -827,6 +829,9 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
 
 reset_enable_gpio:
 	/* making sure that the NFCC starts in a clean state. */
+	gpio_set_value(enable_gpio, 1);/* HPD : Enable*/
+	/* hardware dependent delay */
+	usleep_range(10000, 10100);
 	gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
 	/* hardware dependent delay */
 	usleep_range(10000, 10100);
@@ -892,8 +897,13 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
 		}
 		goto err_nfcc_reset_failed;
 	}
-	/* hardware dependent delay */
-	msleep(30);
+	nqx_enable_irq(nqx_dev);
+	ret = wait_event_interruptible_timeout(nqx_dev->read_wq,
+		!nqx_dev->irq_enabled, msecs_to_jiffies(MAX_IRQ_WAIT_TIME));
+	if (ret <= 0) {
+		nqx_disable_irq(nqx_dev);
+		goto err_nfcc_hw_check;
+	}
 
 	/* Read Response of RESET command */
 	ret = i2c_master_recv(client, nci_reset_rsp, NCI_RESET_RSP_LEN);
@@ -905,9 +915,13 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
 			goto reset_enable_gpio;
 		goto err_nfcc_hw_check;
 	}
-
-	/* hardware dependent delay */
-	msleep(30);
+	nqx_enable_irq(nqx_dev);
+	ret = wait_event_interruptible_timeout(nqx_dev->read_wq,
+		!nqx_dev->irq_enabled, msecs_to_jiffies(MAX_IRQ_WAIT_TIME));
+	if (ret <= 0) {
+		nqx_disable_irq(nqx_dev);
+		goto err_nfcc_hw_check;
+	}
 
 	/* Read Notification of RESET command */
 	ret = i2c_master_recv(client, nci_reset_ntf, NCI_RESET_NTF_LEN);
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index 2b26f76..01acb6e 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -1074,6 +1074,12 @@ static const struct spi_device_id st95hf_id[] = {
 };
 MODULE_DEVICE_TABLE(spi, st95hf_id);
 
+static const struct of_device_id st95hf_spi_of_match[] = {
+        { .compatible = "st,st95hf" },
+        { },
+};
+MODULE_DEVICE_TABLE(of, st95hf_spi_of_match);
+
 static int st95hf_probe(struct spi_device *nfc_spi_dev)
 {
 	int ret;
@@ -1260,6 +1266,7 @@ static struct spi_driver st95hf_driver = {
 	.driver = {
 		.name = "st95hf",
 		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(st95hf_spi_of_match),
 	},
 	.id_table = st95hf_id,
 	.probe = st95hf_probe,
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 795ad4f..e341498 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -190,14 +190,15 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
 		return NULL;
 
 	nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL);
-	if (nd_btt->id < 0) {
-		kfree(nd_btt);
-		return NULL;
-	}
+	if (nd_btt->id < 0)
+		goto out_nd_btt;
 
 	nd_btt->lbasize = lbasize;
-	if (uuid)
+	if (uuid) {
 		uuid = kmemdup(uuid, 16, GFP_KERNEL);
+		if (!uuid)
+			goto out_put_id;
+	}
 	nd_btt->uuid = uuid;
 	dev = &nd_btt->dev;
 	dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
@@ -212,6 +213,13 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
 		return NULL;
 	}
 	return dev;
+
+out_put_id:
+	ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
+
+out_nd_btt:
+	kfree(nd_btt);
+	return NULL;
 }
 
 struct device *nd_btt_create(struct nd_region *nd_region)
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 1eeb7be..452ad37 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -623,6 +623,17 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
 		return &guid_null;
 }
 
+static void reap_victim(struct nd_mapping *nd_mapping,
+		struct nd_label_ent *victim)
+{
+	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+	u32 slot = to_slot(ndd, victim->label);
+
+	dev_dbg(ndd->dev, "free: %d\n", slot);
+	nd_label_free_slot(ndd, slot);
+	victim->label = NULL;
+}
+
 static int __pmem_label_update(struct nd_region *nd_region,
 		struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
 		int pos, unsigned long flags)
@@ -630,9 +641,9 @@ static int __pmem_label_update(struct nd_region *nd_region,
 	struct nd_namespace_common *ndns = &nspm->nsio.common;
 	struct nd_interleave_set *nd_set = nd_region->nd_set;
 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
-	struct nd_label_ent *label_ent, *victim = NULL;
 	struct nd_namespace_label *nd_label;
 	struct nd_namespace_index *nsindex;
+	struct nd_label_ent *label_ent;
 	struct nd_label_id label_id;
 	struct resource *res;
 	unsigned long *free;
@@ -701,18 +712,10 @@ static int __pmem_label_update(struct nd_region *nd_region,
 	list_for_each_entry(label_ent, &nd_mapping->labels, list) {
 		if (!label_ent->label)
 			continue;
-		if (memcmp(nspm->uuid, label_ent->label->uuid,
-					NSLABEL_UUID_LEN) != 0)
-			continue;
-		victim = label_ent;
-		list_move_tail(&victim->list, &nd_mapping->labels);
-		break;
-	}
-	if (victim) {
-		dev_dbg(ndd->dev, "free: %d\n", slot);
-		slot = to_slot(ndd, victim->label);
-		nd_label_free_slot(ndd, slot);
-		victim->label = NULL;
+		if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
+				|| memcmp(nspm->uuid, label_ent->label->uuid,
+					NSLABEL_UUID_LEN) == 0)
+			reap_victim(nd_mapping, label_ent);
 	}
 
 	/* update index */
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 54d7983..5dc3b40 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1248,12 +1248,27 @@ static int namespace_update_uuid(struct nd_region *nd_region,
 	for (i = 0; i < nd_region->ndr_mappings; i++) {
 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+		struct nd_label_ent *label_ent;
 		struct resource *res;
 
 		for_each_dpa_resource(ndd, res)
 			if (strcmp(res->name, old_label_id.id) == 0)
 				sprintf((void *) res->name, "%s",
 						new_label_id.id);
+
+		mutex_lock(&nd_mapping->lock);
+		list_for_each_entry(label_ent, &nd_mapping->labels, list) {
+			struct nd_namespace_label *nd_label = label_ent->label;
+			struct nd_label_id label_id;
+
+			if (!nd_label)
+				continue;
+			nd_label_gen_id(&label_id, nd_label->uuid,
+					__le32_to_cpu(nd_label->flags));
+			if (strcmp(old_label_id.id, label_id.id) == 0)
+				set_bit(ND_LABEL_REAP, &label_ent->flags);
+		}
+		mutex_unlock(&nd_mapping->lock);
 	}
 	kfree(*old_uuid);
  out:
@@ -2251,9 +2266,12 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
 	if (!nsblk->uuid)
 		goto blk_err;
 	memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
-	if (name[0])
+	if (name[0]) {
 		nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
 				GFP_KERNEL);
+		if (!nsblk->alt_name)
+			goto blk_err;
+	}
 	res = nsblk_add_resource(nd_region, ndd, nsblk,
 			__le64_to_cpu(nd_label->dpa));
 	if (!res)
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 98317e7..01e194a 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -113,8 +113,12 @@ struct nd_percpu_lane {
 	spinlock_t lock;
 };
 
+enum nd_label_flags {
+	ND_LABEL_REAP,
+};
 struct nd_label_ent {
 	struct list_head list;
+	unsigned long flags;
 	struct nd_namespace_label *label;
 };
 
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 1d432c5..a7ce2f1 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -113,13 +113,13 @@ static void write_pmem(void *pmem_addr, struct page *page,
 
 	while (len) {
 		mem = kmap_atomic(page);
-		chunk = min_t(unsigned int, len, PAGE_SIZE);
+		chunk = min_t(unsigned int, len, PAGE_SIZE - off);
 		memcpy_flushcache(pmem_addr, mem + off, chunk);
 		kunmap_atomic(mem);
 		len -= chunk;
 		off = 0;
 		page++;
-		pmem_addr += PAGE_SIZE;
+		pmem_addr += chunk;
 	}
 }
 
@@ -132,7 +132,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
 
 	while (len) {
 		mem = kmap_atomic(page);
-		chunk = min_t(unsigned int, len, PAGE_SIZE);
+		chunk = min_t(unsigned int, len, PAGE_SIZE - off);
 		rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
 		kunmap_atomic(mem);
 		if (rem)
@@ -140,7 +140,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
 		len -= chunk;
 		off = 0;
 		page++;
-		pmem_addr += PAGE_SIZE;
+		pmem_addr += chunk;
 	}
 	return BLK_STS_OK;
 }
@@ -281,16 +281,22 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
 	return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
 }
 
+/*
+ * Use the 'no check' versions of copy_from_iter_flushcache() and
+ * copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds
+ * checking, both file offset and device offset, is handled by
+ * dax_iomap_actor()
+ */
 static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
 		void *addr, size_t bytes, struct iov_iter *i)
 {
-	return copy_from_iter_flushcache(addr, bytes, i);
+	return _copy_from_iter_flushcache(addr, bytes, i);
 }
 
 static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
 		void *addr, size_t bytes, struct iov_iter *i)
 {
-	return copy_to_iter_mcsafe(addr, bytes, i);
+	return _copy_to_iter_mcsafe(addr, bytes, i);
 }
 
 static const struct dax_operations pmem_dax_ops = {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2cdb303..abfb463 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1480,6 +1480,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
 	sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
 	unsigned short bs = 1 << ns->lba_shift;
 
+	if (ns->lba_shift > PAGE_SHIFT) {
+		/* unsupported block size, set capacity to 0 later */
+		bs = (1 << 9);
+	}
 	blk_mq_freeze_queue(disk->queue);
 	blk_integrity_unregister(disk);
 
@@ -1490,7 +1494,8 @@ static void nvme_update_disk_info(struct gendisk *disk,
 	if (ns->ms && !ns->ext &&
 	    (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
 		nvme_init_integrity(disk, ns->ms, ns->pi_type);
-	if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk))
+	if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
+	    ns->lba_shift > PAGE_SHIFT)
 		capacity = 0;
 
 	set_capacity(disk, capacity);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9375fa7..67dec88 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1844,7 +1844,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
 	memset(queue, 0, sizeof(*queue));
 	queue->ctrl = ctrl;
 	queue->qnum = idx;
-	atomic_set(&queue->csn, 1);
+	atomic_set(&queue->csn, 0);
 	queue->dev = ctrl->dev;
 
 	if (idx > 0)
@@ -1886,7 +1886,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
 	 */
 
 	queue->connection_id = 0;
-	atomic_set(&queue->csn, 1);
+	atomic_set(&queue->csn, 0);
 }
 
 static void
@@ -2182,7 +2182,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 {
 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
 	struct nvme_command *sqe = &cmdiu->sqe;
-	u32 csn;
 	int ret, opstate;
 
 	/*
@@ -2197,8 +2196,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 
 	/* format the FC-NVME CMD IU and fcp_req */
 	cmdiu->connection_id = cpu_to_be64(queue->connection_id);
-	csn = atomic_inc_return(&queue->csn);
-	cmdiu->csn = cpu_to_be32(csn);
 	cmdiu->data_len = cpu_to_be32(data_len);
 	switch (io_dir) {
 	case NVMEFC_FCP_WRITE:
@@ -2256,11 +2253,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 	if (!(op->flags & FCOP_FLAGS_AEN))
 		blk_mq_start_request(op->rq);
 
+	cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
 	ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
 					&ctrl->rport->remoteport,
 					queue->lldd_handle, &op->fcp_req);
 
 	if (ret) {
+		/*
+		 * If the lld fails to send the command is there an issue with
+		 * the csn value?  If the command that fails is the Connect,
+		 * no - as the connection won't be live.  If it is a command
+		 * post-connect, it's possible a gap in csn may be created.
+		 * Does this matter?  As Linux initiators don't send fused
+		 * commands, no.  The gap would exist, but as there's nothing
+		 * that depends on csn order to be delivered on the target
+		 * side, it shouldn't hurt.  It would be difficult for a
+		 * target to even detect the csn gap as it has no idea when the
+		 * cmd with the csn was supposed to arrive.
+		 */
 		opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
 		__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
 
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index da8f5ad..260248fb 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -349,15 +349,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
 		struct nvme_ns *ns)
 {
-	enum nvme_ana_state old;
-
 	mutex_lock(&ns->head->lock);
-	old = ns->ana_state;
 	ns->ana_grpid = le32_to_cpu(desc->grpid);
 	ns->ana_state = desc->state;
 	clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
 
-	if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
+	if (nvme_state_is_live(ns->ana_state))
 		nvme_mpath_set_live(ns);
 	mutex_unlock(&ns->head->lock);
 }
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 0939a4e..e4f167e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -880,8 +880,9 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
 {
 	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
 	nvme_rdma_stop_queue(&ctrl->queues[0]);
-	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request,
-			&ctrl->ctrl);
+	if (ctrl->ctrl.admin_tagset)
+		blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
+			nvme_cancel_request, &ctrl->ctrl);
 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 	nvme_rdma_destroy_admin_queue(ctrl, remove);
 }
@@ -892,8 +893,9 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
 	if (ctrl->ctrl.queue_count > 1) {
 		nvme_stop_queues(&ctrl->ctrl);
 		nvme_rdma_stop_io_queues(ctrl);
-		blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request,
-				&ctrl->ctrl);
+		if (ctrl->ctrl.tagset)
+			blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
+				nvme_cancel_request, &ctrl->ctrl);
 		if (remove)
 			nvme_start_queues(&ctrl->ctrl);
 		nvme_rdma_destroy_io_queues(ctrl, remove);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b5ec96a..776b7e9 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -921,6 +921,15 @@ bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
 		return __nvmet_host_allowed(subsys, hostnqn);
 }
 
+static void nvmet_fatal_error_handler(struct work_struct *work)
+{
+	struct nvmet_ctrl *ctrl =
+			container_of(work, struct nvmet_ctrl, fatal_err_work);
+
+	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
+	ctrl->ops->delete_ctrl(ctrl);
+}
+
 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
 {
@@ -962,6 +971,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
 
 	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
 	INIT_LIST_HEAD(&ctrl->async_events);
+	INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
 
 	memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
 	memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
@@ -1076,21 +1086,11 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
 	kref_put(&ctrl->ref, nvmet_ctrl_free);
 }
 
-static void nvmet_fatal_error_handler(struct work_struct *work)
-{
-	struct nvmet_ctrl *ctrl =
-			container_of(work, struct nvmet_ctrl, fatal_err_work);
-
-	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
-	ctrl->ops->delete_ctrl(ctrl);
-}
-
 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
 {
 	mutex_lock(&ctrl->lock);
 	if (!(ctrl->csts & NVME_CSTS_CFS)) {
 		ctrl->csts |= NVME_CSTS_CFS;
-		INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
 		schedule_work(&ctrl->fatal_err_work);
 	}
 	mutex_unlock(&ctrl->lock);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 7c530c8..60dc568 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -19,6 +19,7 @@
 #include <linux/fs.h>
 #include <linux/idr.h>
 #include <linux/init.h>
+#include <linux/kref.h>
 #include <linux/module.h>
 #include <linux/nvmem-consumer.h>
 #include <linux/nvmem-provider.h>
@@ -32,12 +33,13 @@ struct nvmem_device {
 	int			stride;
 	int			word_size;
 	int			id;
-	int			users;
+	struct kref		refcnt;
 	size_t			size;
 	bool			read_only;
 	int			flags;
 	struct bin_attribute	eeprom;
 	struct device		*base_dev;
+	struct list_head	cells;
 	nvmem_reg_read_t	reg_read;
 	nvmem_reg_write_t	reg_write;
 	void *priv;
@@ -58,8 +60,8 @@ struct nvmem_cell {
 static DEFINE_MUTEX(nvmem_mutex);
 static DEFINE_IDA(nvmem_ida);
 
-static LIST_HEAD(nvmem_cells);
-static DEFINE_MUTEX(nvmem_cells_mutex);
+static DEFINE_MUTEX(nvmem_cell_mutex);
+static LIST_HEAD(nvmem_cell_tables);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 static struct lock_class_key eeprom_lock_key;
@@ -282,48 +284,27 @@ static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
 	return to_nvmem_device(d);
 }
 
-static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
-{
-	struct nvmem_cell *p;
-
-	mutex_lock(&nvmem_cells_mutex);
-
-	list_for_each_entry(p, &nvmem_cells, node)
-		if (!strcmp(p->name, cell_id)) {
-			mutex_unlock(&nvmem_cells_mutex);
-			return p;
-		}
-
-	mutex_unlock(&nvmem_cells_mutex);
-
-	return NULL;
-}
-
 static void nvmem_cell_drop(struct nvmem_cell *cell)
 {
-	mutex_lock(&nvmem_cells_mutex);
+	mutex_lock(&nvmem_mutex);
 	list_del(&cell->node);
-	mutex_unlock(&nvmem_cells_mutex);
+	mutex_unlock(&nvmem_mutex);
 	kfree(cell);
 }
 
 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
 {
-	struct nvmem_cell *cell;
-	struct list_head *p, *n;
+	struct nvmem_cell *cell, *p;
 
-	list_for_each_safe(p, n, &nvmem_cells) {
-		cell = list_entry(p, struct nvmem_cell, node);
-		if (cell->nvmem == nvmem)
-			nvmem_cell_drop(cell);
-	}
+	list_for_each_entry_safe(cell, p, &nvmem->cells, node)
+		nvmem_cell_drop(cell);
 }
 
 static void nvmem_cell_add(struct nvmem_cell *cell)
 {
-	mutex_lock(&nvmem_cells_mutex);
-	list_add_tail(&cell->node, &nvmem_cells);
-	mutex_unlock(&nvmem_cells_mutex);
+	mutex_lock(&nvmem_mutex);
+	list_add_tail(&cell->node, &cell->nvmem->cells);
+	mutex_unlock(&nvmem_mutex);
 }
 
 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
@@ -439,6 +420,110 @@ static int nvmem_setup_compat(struct nvmem_device *nvmem,
 	return 0;
 }
 
+static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
+{
+	const struct nvmem_cell_info *info;
+	struct nvmem_cell_table *table;
+	struct nvmem_cell *cell;
+	int rval = 0, i;
+
+	mutex_lock(&nvmem_cell_mutex);
+	list_for_each_entry(table, &nvmem_cell_tables, node) {
+		if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
+			for (i = 0; i < table->ncells; i++) {
+				info = &table->cells[i];
+
+				cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+				if (!cell) {
+					rval = -ENOMEM;
+					goto out;
+				}
+
+				rval = nvmem_cell_info_to_nvmem_cell(nvmem,
+								     info,
+								     cell);
+				if (rval) {
+					kfree(cell);
+					goto out;
+				}
+
+				nvmem_cell_add(cell);
+			}
+		}
+	}
+
+out:
+	mutex_unlock(&nvmem_cell_mutex);
+	return rval;
+}
+
+static struct nvmem_cell *
+nvmem_find_cell_by_index(struct nvmem_device *nvmem, int index)
+{
+	struct nvmem_cell *cell = NULL;
+	int i = 0;
+
+	mutex_lock(&nvmem_mutex);
+	list_for_each_entry(cell, &nvmem->cells, node) {
+		if (index == i++)
+			break;
+	}
+	mutex_unlock(&nvmem_mutex);
+
+	return cell;
+}
+
+static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
+{
+	struct device_node *parent, *child;
+	struct device *dev = &nvmem->dev;
+	struct nvmem_cell *cell;
+	const __be32 *addr;
+	int len;
+
+	parent = dev->of_node;
+
+	for_each_child_of_node(parent, child) {
+		addr = of_get_property(child, "reg", &len);
+		if (!addr || (len < 2 * sizeof(u32))) {
+			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
+			return -EINVAL;
+		}
+
+		cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+		if (!cell)
+			return -ENOMEM;
+
+		cell->nvmem = nvmem;
+		cell->offset = be32_to_cpup(addr++);
+		cell->bytes = be32_to_cpup(addr);
+		cell->name = child->name;
+
+		addr = of_get_property(child, "bits", &len);
+		if (addr && len == (2 * sizeof(u32))) {
+			cell->bit_offset = be32_to_cpup(addr++);
+			cell->nbits = be32_to_cpup(addr);
+		}
+
+		if (cell->nbits)
+			cell->bytes = DIV_ROUND_UP(
+					cell->nbits + cell->bit_offset,
+					BITS_PER_BYTE);
+
+		if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
+			dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
+				cell->name, nvmem->stride);
+			/* Cells already added will be freed later. */
+			kfree(cell);
+			return -EINVAL;
+		}
+
+		nvmem_cell_add(cell);
+	}
+
+	return 0;
+}
+
 /**
  * nvmem_register() - Register a nvmem device for given nvmem_config.
  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
@@ -467,6 +552,9 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 		return ERR_PTR(rval);
 	}
 
+	kref_init(&nvmem->refcnt);
+	INIT_LIST_HEAD(&nvmem->cells);
+
 	nvmem->id = rval;
 	nvmem->owner = config->owner;
 	if (!nvmem->owner && config->dev->driver)
@@ -522,8 +610,18 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 			goto err_teardown_compat;
 	}
 
+	rval = nvmem_add_cells_from_table(nvmem);
+	if (rval)
+		goto err_remove_cells;
+
+	rval = nvmem_add_cells_from_of(nvmem);
+	if (rval)
+		goto err_remove_cells;
+
 	return nvmem;
 
+err_remove_cells:
+	nvmem_device_remove_all_cells(nvmem);
 err_teardown_compat:
 	if (config->compat)
 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
@@ -536,6 +634,20 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 }
 EXPORT_SYMBOL_GPL(nvmem_register);
 
+static void nvmem_device_release(struct kref *kref)
+{
+	struct nvmem_device *nvmem;
+
+	nvmem = container_of(kref, struct nvmem_device, refcnt);
+
+	if (nvmem->flags & FLAG_COMPAT)
+		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
+
+	nvmem_device_remove_all_cells(nvmem);
+	device_del(&nvmem->dev);
+	put_device(&nvmem->dev);
+}
+
 /**
  * nvmem_unregister() - Unregister previously registered nvmem device
  *
@@ -545,19 +657,7 @@ EXPORT_SYMBOL_GPL(nvmem_register);
  */
 int nvmem_unregister(struct nvmem_device *nvmem)
 {
-	mutex_lock(&nvmem_mutex);
-	if (nvmem->users) {
-		mutex_unlock(&nvmem_mutex);
-		return -EBUSY;
-	}
-	mutex_unlock(&nvmem_mutex);
-
-	if (nvmem->flags & FLAG_COMPAT)
-		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
-
-	nvmem_device_remove_all_cells(nvmem);
-	device_del(&nvmem->dev);
-	put_device(&nvmem->dev);
+	kref_put(&nvmem->refcnt, nvmem_device_release);
 
 	return 0;
 }
@@ -630,52 +730,32 @@ static struct nvmem_device *__nvmem_device_get(struct device_node *np,
 {
 	struct nvmem_device *nvmem = NULL;
 
+	if (!np)
+		return ERR_PTR(-ENOENT);
+
 	mutex_lock(&nvmem_mutex);
-
-	if (np) {
-		nvmem = of_nvmem_find(np);
-		if (!nvmem) {
-			mutex_unlock(&nvmem_mutex);
-			return ERR_PTR(-EPROBE_DEFER);
-		}
-	} else {
-		struct nvmem_cell *cell = nvmem_find_cell(cell_id);
-
-		if (cell) {
-			nvmem = cell->nvmem;
-			*cellp = cell;
-		}
-
-		if (!nvmem) {
-			mutex_unlock(&nvmem_mutex);
-			return ERR_PTR(-ENOENT);
-		}
-	}
-
-	nvmem->users++;
+	nvmem = of_nvmem_find(np);
 	mutex_unlock(&nvmem_mutex);
+	if (!nvmem)
+		return ERR_PTR(-EPROBE_DEFER);
 
 	if (!try_module_get(nvmem->owner)) {
 		dev_err(&nvmem->dev,
 			"could not increase module refcount for cell %s\n",
 			nvmem->name);
 
-		mutex_lock(&nvmem_mutex);
-		nvmem->users--;
-		mutex_unlock(&nvmem_mutex);
-
 		return ERR_PTR(-EINVAL);
 	}
 
+	kref_get(&nvmem->refcnt);
+
 	return nvmem;
 }
 
 static void __nvmem_device_put(struct nvmem_device *nvmem)
 {
 	module_put(nvmem->owner);
-	mutex_lock(&nvmem_mutex);
-	nvmem->users--;
-	mutex_unlock(&nvmem_mutex);
+	kref_put(&nvmem->refcnt, nvmem_device_release);
 }
 
 static struct nvmem_device *nvmem_find(const char *name)
@@ -845,10 +925,8 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
 					    const char *name)
 {
 	struct device_node *cell_np, *nvmem_np;
-	struct nvmem_cell *cell;
 	struct nvmem_device *nvmem;
-	const __be32 *addr;
-	int rval, len;
+	struct nvmem_cell *cell;
 	int index = 0;
 
 	/* if cell name exists, find index to the name */
@@ -868,54 +946,13 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
 	if (IS_ERR(nvmem))
 		return ERR_CAST(nvmem);
 
-	addr = of_get_property(cell_np, "reg", &len);
-	if (!addr || (len < 2 * sizeof(u32))) {
-		dev_err(&nvmem->dev, "nvmem: invalid reg on %pOF\n",
-			cell_np);
-		rval  = -EINVAL;
-		goto err_mem;
-	}
-
-	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+	cell = nvmem_find_cell_by_index(nvmem, index);
 	if (!cell) {
-		rval = -ENOMEM;
-		goto err_mem;
+		__nvmem_device_put(nvmem);
+		return ERR_PTR(-ENOENT);
 	}
 
-	cell->nvmem = nvmem;
-	cell->offset = be32_to_cpup(addr++);
-	cell->bytes = be32_to_cpup(addr);
-	cell->name = cell_np->name;
-
-	addr = of_get_property(cell_np, "bits", &len);
-	if (addr && len == (2 * sizeof(u32))) {
-		cell->bit_offset = be32_to_cpup(addr++);
-		cell->nbits = be32_to_cpup(addr);
-	}
-
-	if (cell->nbits)
-		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
-					   BITS_PER_BYTE);
-
-	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
-			dev_err(&nvmem->dev,
-				"cell %s unaligned to nvmem stride %d\n",
-				cell->name, nvmem->stride);
-		rval  = -EINVAL;
-		goto err_sanity;
-	}
-
-	nvmem_cell_add(cell);
-
 	return cell;
-
-err_sanity:
-	kfree(cell);
-
-err_mem:
-	__nvmem_device_put(nvmem);
-
-	return ERR_PTR(rval);
 }
 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
 #endif
@@ -1021,7 +1058,6 @@ void nvmem_cell_put(struct nvmem_cell *cell)
 	struct nvmem_device *nvmem = cell->nvmem;
 
 	__nvmem_device_put(nvmem);
-	nvmem_cell_drop(cell);
 }
 EXPORT_SYMBOL_GPL(nvmem_cell_put);
 
@@ -1349,6 +1385,45 @@ int nvmem_device_write(struct nvmem_device *nvmem,
 }
 EXPORT_SYMBOL_GPL(nvmem_device_write);
 
+/**
+ * nvmem_add_cell_table() - register a table of cell info entries
+ *
+ * @table: table of cell info entries
+ */
+void nvmem_add_cell_table(struct nvmem_cell_table *table)
+{
+	mutex_lock(&nvmem_cell_mutex);
+	list_add_tail(&table->node, &nvmem_cell_tables);
+	mutex_unlock(&nvmem_cell_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
+
+/**
+ * nvmem_del_cell_table() - remove a previously registered cell info table
+ *
+ * @table: table of cell info entries
+ */
+void nvmem_del_cell_table(struct nvmem_cell_table *table)
+{
+	mutex_lock(&nvmem_cell_mutex);
+	list_del(&table->node);
+	mutex_unlock(&nvmem_cell_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
+
+/**
+ * nvmem_dev_name() - Get the name of a given nvmem device.
+ *
+ * @nvmem: nvmem device.
+ *
+ * Return: name of the nvmem device.
+ */
+const char *nvmem_dev_name(struct nvmem_device *nvmem)
+{
+	return dev_name(&nvmem->dev);
+}
+EXPORT_SYMBOL_GPL(nvmem_dev_name);
+
 static int __init nvmem_init(void)
 {
 	return bus_register(&nvmem_bus_type);
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 0c6e8b4..c60b465 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -568,6 +568,9 @@ int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long d
 		break;
 
 	case DISPLAY_MODEL_LASI:
+		/* Skip to register LED in QEMU */
+		if (running_on_qemu)
+			return 1;
 		LED_DATA_REG = data_reg;
 		led_func_ptr = led_LASI_driver;
 		printk(KERN_INFO "LED display at %lx registered\n", LED_DATA_REG);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 9ba4d12..808a182 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1491,6 +1491,21 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
 	}
 }
 
+/*
+ * Remove entries in sysfs pci slot directory.
+ */
+static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
+{
+	struct hv_pci_dev *hpdev;
+
+	list_for_each_entry(hpdev, &hbus->children, list_entry) {
+		if (!hpdev->pci_slot)
+			continue;
+		pci_destroy_slot(hpdev->pci_slot);
+		hpdev->pci_slot = NULL;
+	}
+}
+
 /**
  * create_root_hv_pci_bus() - Expose a new root PCI bus
  * @hbus:	Root PCI bus, as understood by this driver
@@ -1766,6 +1781,10 @@ static void pci_devices_present_work(struct work_struct *work)
 		hpdev = list_first_entry(&removed, struct hv_pci_dev,
 					 list_entry);
 		list_del(&hpdev->list_entry);
+
+		if (hpdev->pci_slot)
+			pci_destroy_slot(hpdev->pci_slot);
+
 		put_pcichild(hpdev);
 	}
 
@@ -1905,6 +1924,9 @@ static void hv_eject_device_work(struct work_struct *work)
 			 sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
 			 VM_PKT_DATA_INBAND, 0);
 
+	/* For the get_pcichild() in hv_pci_eject_device() */
+	put_pcichild(hpdev);
+	/* For the two refs got in new_pcichild_device() */
 	put_pcichild(hpdev);
 	put_pcichild(hpdev);
 	put_hvpcibus(hpdev->hbus);
@@ -2682,6 +2704,7 @@ static int hv_pci_remove(struct hv_device *hdev)
 		pci_lock_rescan_remove();
 		pci_stop_root_bus(hbus->pci_bus);
 		pci_remove_root_bus(hbus->pci_bus);
+		hv_pci_remove_slots(hbus);
 		pci_unlock_rescan_remove();
 		hbus->state = hv_pcibus_removed;
 	}
diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c
index a8ef120..8c27fd7 100644
--- a/drivers/pci/controller/pci-msm.c
+++ b/drivers/pci/controller/pci-msm.c
@@ -1,7 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.*/
 
-#include <asm/dma-iommu.h>
 #include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
 #include <linux/bitops.h>
 #include <linux/clk.h>
@@ -13,7 +12,6 @@
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
-#include <linux/iommu.h>
 #include <linux/iopoll.h>
 #include <linux/ipc_logging.h>
 #include <linux/irq.h>
@@ -153,12 +151,6 @@
 #define RATE_CHANGE_19P2MHZ (19200000)
 #define RATE_CHANGE_100MHZ (100000000)
 
-#define MSM_PCIE_IOMMU_PRESENT BIT(0)
-#define MSM_PCIE_IOMMU_S1_BYPASS BIT(1)
-#define MSM_PCIE_IOMMU_FAST BIT(2)
-#define MSM_PCIE_IOMMU_ATOMIC BIT(3)
-#define MSM_PCIE_IOMMU_FORCE_COHERENT BIT(4)
-
 #define MSM_PCIE_LTSSM_MASK (0x3f)
 
 #define MSM_PCIE_DRV_MAJOR_VERSION (1)
@@ -559,6 +551,12 @@ struct msm_pcie_irq_info_t {
 	uint32_t num;
 };
 
+/* bandwidth info structure */
+struct msm_pcie_bw_scale_info_t {
+	u32 cx_vreg_min;
+	u32 rate_change_freq;
+};
+
 /* phy info structure */
 struct msm_pcie_phy_info_t {
 	u32 offset;
@@ -670,12 +668,15 @@ struct msm_pcie_dev_t {
 
 	struct msm_pcie_vreg_info_t *cx_vreg;
 	struct msm_pcie_clk_info_t *rate_change_clk;
+	struct msm_pcie_bw_scale_info_t *bw_scale;
+	u32 bw_gen_max;
 
 	bool cfg_access;
 	spinlock_t cfg_lock;
 	unsigned long irqsave_flags;
 	struct mutex enumerate_lock;
 	struct mutex setup_lock;
+	struct mutex clk_lock;
 
 	struct irq_domain *irq_domain;
 	DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
@@ -771,9 +772,6 @@ struct msm_pcie_dev_t {
 struct msm_root_dev_t {
 	struct msm_pcie_dev_t *pcie_dev;
 	struct pci_dev *pci_dev;
-	uint32_t iommu_cfg;
-	dma_addr_t iommu_base;
-	size_t iommu_size;
 };
 
 /* debug mask sys interface */
@@ -847,6 +845,7 @@ static struct pcie_drv_sta {
 	struct msm_pcie_dev_t *msm_pcie_dev;
 	struct rpmsg_device *rpdev;
 	struct work_struct drv_connect; /* connect worker */
+	u32 rate_change_vote; /* each bit corresponds to RC vote for 100MHz */
 	struct mutex drv_lock;
 } pcie_drv;
 
@@ -3182,6 +3181,12 @@ static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
 			msm_pcie_config_clock_mem(dev, info);
 
 		if (info->freq) {
+			if (!strcmp(info->name, "pcie_phy_refgen_clk")) {
+				mutex_lock(&dev->clk_lock);
+				pcie_drv.rate_change_vote |= BIT(dev->rc_idx);
+				mutex_unlock(&dev->clk_lock);
+			}
+
 			rc = clk_set_rate(info->hdl, info->freq);
 			if (rc) {
 				PCIE_ERR(dev,
@@ -3262,6 +3267,17 @@ static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
 		if (dev->clk[i].hdl)
 			clk_disable_unprepare(dev->clk[i].hdl);
 
+	if (dev->rate_change_clk) {
+		mutex_lock(&dev->clk_lock);
+
+		pcie_drv.rate_change_vote &= ~BIT(dev->rc_idx);
+		if (!pcie_drv.rate_change_vote)
+			clk_set_rate(dev->rate_change_clk->hdl,
+					RATE_CHANGE_19P2MHZ);
+
+		mutex_unlock(&dev->clk_lock);
+	}
+
 	if (dev->bus_client) {
 		PCIE_DBG(dev, "PCIe: removing bus vote for RC%d\n",
 			dev->rc_idx);
@@ -3733,6 +3749,29 @@ static int msm_pcie_get_reset(struct msm_pcie_dev_t *pcie_dev)
 	return 0;
 }
 
+static int msm_pcie_get_bw_scale(struct msm_pcie_dev_t *pcie_dev)
+{
+	int size = 0;
+	struct platform_device *pdev = pcie_dev->pdev;
+
+	of_get_property(pdev->dev.of_node, "qcom,bw-scale", &size);
+	if (size) {
+		pcie_dev->bw_scale = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+		if (!pcie_dev->bw_scale)
+			return -ENOMEM;
+
+		of_property_read_u32_array(pdev->dev.of_node, "qcom,bw-scale",
+				(u32 *)pcie_dev->bw_scale, size / sizeof(u32));
+
+		pcie_dev->bw_gen_max = size / sizeof(u32);
+	} else {
+		PCIE_DBG(pcie_dev, "RC%d: bandwidth scaling is not supported\n",
+			pcie_dev->rc_idx);
+	}
+
+	return 0;
+}
+
 static int msm_pcie_get_phy(struct msm_pcie_dev_t *pcie_dev)
 {
 	int ret, size = 0;
@@ -3945,6 +3984,10 @@ static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
 	if (ret)
 		return ret;
 
+	ret = msm_pcie_get_bw_scale(dev);
+	if (ret)
+		return ret;
+
 	ret = msm_pcie_get_phy(dev);
 	if (ret)
 		return ret;
@@ -3977,6 +4020,50 @@ static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
 	dev->rumi = NULL;
 }
 
+static void msm_pcie_scale_link_bandwidth(struct msm_pcie_dev_t *pcie_dev,
+					u16 target_link_speed)
+{
+	struct msm_pcie_bw_scale_info_t *bw_scale;
+	u32 index = target_link_speed - PCI_EXP_LNKCTL2_TLS_2_5GT;
+
+	if (!pcie_dev->bw_scale)
+		return;
+
+	if (index >= pcie_dev->bw_gen_max) {
+		PCIE_ERR(pcie_dev,
+			"PCIe: RC%d: invalid target link speed: %d\n",
+			pcie_dev->rc_idx, target_link_speed);
+		return;
+	}
+
+	bw_scale = &pcie_dev->bw_scale[index];
+
+	if (pcie_dev->cx_vreg)
+		regulator_set_voltage(pcie_dev->cx_vreg->hdl,
+					bw_scale->cx_vreg_min,
+					pcie_dev->cx_vreg->max_v);
+
+	if (pcie_dev->rate_change_clk) {
+		mutex_lock(&pcie_dev->clk_lock);
+
+		/* it is okay to always scale up */
+		clk_set_rate(pcie_dev->rate_change_clk->hdl,
+				RATE_CHANGE_100MHZ);
+
+		if (bw_scale->rate_change_freq == RATE_CHANGE_100MHZ)
+			pcie_drv.rate_change_vote |= BIT(pcie_dev->rc_idx);
+		else
+			pcie_drv.rate_change_vote &= ~BIT(pcie_dev->rc_idx);
+
+		/* scale down to 19.2MHz if no one needs 100MHz */
+		if (!pcie_drv.rate_change_vote)
+			clk_set_rate(pcie_dev->rate_change_clk->hdl,
+					RATE_CHANGE_19P2MHZ);
+
+		mutex_unlock(&pcie_dev->clk_lock);
+	}
+}
+
 static int msm_pcie_link_train(struct msm_pcie_dev_t *dev)
 {
 	int link_check_count = 0;
@@ -4044,6 +4131,39 @@ static int msm_pcie_link_train(struct msm_pcie_dev_t *dev)
 		return MSM_PCIE_ERROR;
 	}
 
+	if (dev->bw_scale) {
+		u32 index;
+		u32 current_link_speed;
+		struct msm_pcie_bw_scale_info_t *bw_scale;
+
+		/*
+		 * check if the link up GEN speed is less than the max/default
+		 * supported. If it is, scale down CX corner and rate change
+		 * clock accordingly.
+		 */
+		current_link_speed = readl_relaxed(dev->dm_core +
+						PCIE20_CAP_LINKCTRLSTATUS);
+		current_link_speed = ((current_link_speed >> 16) &
+					PCI_EXP_LNKSTA_CLS);
+
+		index = current_link_speed - PCI_EXP_LNKCTL2_TLS_2_5GT;
+		if (index >= dev->bw_gen_max) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: unsupported gen speed: %d\n",
+				dev->rc_idx, current_link_speed);
+			return 0;
+		}
+
+		bw_scale = &dev->bw_scale[index];
+
+		if (bw_scale->cx_vreg_min < dev->cx_vreg->min_v) {
+			msm_pcie_write_reg_field(dev->dm_core,
+				PCIE20_CAP + PCI_EXP_LNKCTL2,
+				PCI_EXP_LNKCAP_SLS, current_link_speed);
+			msm_pcie_scale_link_bandwidth(dev, current_link_speed);
+		}
+	}
+
 	return 0;
 }
 
@@ -6151,9 +6271,6 @@ static int msm_pcie_link_retrain(struct msm_pcie_dev_t *pcie_dev,
 	u32 cnt_max = 1000; /* 100ms timeout */
 	u32 link_status_lbms_mask = PCI_EXP_LNKSTA_LBMS << PCI_EXP_LNKCTL;
 
-	/* force link to L0 */
-	msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL,  0, BIT(5));
-
 	cnt = 0;
 	/* confirm link is in L0 */
 	while (((readl_relaxed(pcie_dev->parf + PCIE20_PARF_LTSSM) &
@@ -6186,34 +6303,34 @@ static int msm_pcie_link_retrain(struct msm_pcie_dev_t *pcie_dev,
 		usleep_range(100, 105);
 	}
 
-	/* re-enable link LPM */
-	msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
-
 	return 0;
 }
 
-static void msm_pcie_set_link_width(struct msm_pcie_dev_t *pcie_dev,
-					u16 *target_link_width)
+static int msm_pcie_set_link_width(struct msm_pcie_dev_t *pcie_dev,
+					u16 target_link_width)
 {
-	switch (*target_link_width) {
+	u16 link_width;
+
+	switch (target_link_width) {
 	case PCI_EXP_LNKSTA_NLW_X1:
-		*target_link_width = LINK_WIDTH_X1;
+		link_width = LINK_WIDTH_X1;
 		break;
 	case PCI_EXP_LNKSTA_NLW_X2:
-		*target_link_width = LINK_WIDTH_X2;
+		link_width = LINK_WIDTH_X2;
 		break;
 	default:
 		PCIE_ERR(pcie_dev,
 			"PCIe: RC%d: unsupported link width request: %d\n",
-			pcie_dev->rc_idx, *target_link_width);
-		*target_link_width = 0;
-		return;
+			pcie_dev->rc_idx, target_link_width);
+		return -EINVAL;
 	}
 
 	msm_pcie_write_reg_field(pcie_dev->dm_core,
 				PCIE20_PORT_LINK_CTRL_REG,
 				LINK_WIDTH_MASK << LINK_WIDTH_SHIFT,
-				*target_link_width);
+				link_width);
+
+	return 0;
 }
 
 int msm_pcie_set_link_bandwidth(struct pci_dev *pci_dev, u16 target_link_speed,
@@ -6224,6 +6341,8 @@ int msm_pcie_set_link_bandwidth(struct pci_dev *pci_dev, u16 target_link_speed,
 	u16 link_status;
 	u16 current_link_speed;
 	u16 current_link_width;
+	bool set_link_speed = true;
+	bool set_link_width = true;
 	int ret;
 
 	if (!pci_dev)
@@ -6239,207 +6358,59 @@ int msm_pcie_set_link_bandwidth(struct pci_dev *pci_dev, u16 target_link_speed,
 	target_link_width <<= PCI_EXP_LNKSTA_NLW_SHIFT;
 
 	if (target_link_speed == current_link_speed)
-		target_link_speed = 0;
+		set_link_speed = false;
 
 	if (target_link_width == current_link_width)
-		target_link_width = 0;
+		set_link_width = false;
 
-	if (target_link_width)
-		msm_pcie_set_link_width(pcie_dev, &target_link_width);
-
-	if (!target_link_speed && !target_link_width)
+	if (!set_link_speed && !set_link_width)
 		return 0;
 
-	if (target_link_speed)
+	if (set_link_width) {
+		ret = msm_pcie_set_link_width(pcie_dev, target_link_width);
+		if (ret)
+			return ret;
+	}
+
+	if (set_link_speed)
 		msm_pcie_config_clear_set_dword(root_pci_dev,
 						root_pci_dev->pcie_cap +
 						PCI_EXP_LNKCTL2,
 						PCI_EXP_LNKSTA_CLS,
 						target_link_speed);
 
-	/* increase CX and rate change clk freq if target speed is Gen3 */
-	if (target_link_speed == PCI_EXP_LNKCTL2_TLS_8_0GT) {
-		if (pcie_dev->cx_vreg)
-			regulator_set_voltage(pcie_dev->cx_vreg->hdl,
-						RPMH_REGULATOR_LEVEL_NOM,
-						pcie_dev->cx_vreg->max_v);
+	/* disable link L1. Need to be in L0 for gen switch */
+	msm_pcie_config_l1(pcie_dev, root_pci_dev, false);
+	msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL,  0, BIT(5));
 
-		if (pcie_dev->rate_change_clk)
-			clk_set_rate(pcie_dev->rate_change_clk->hdl,
-					RATE_CHANGE_100MHZ);
-	}
+	if (target_link_speed > current_link_speed)
+		msm_pcie_scale_link_bandwidth(pcie_dev, target_link_speed);
 
 	ret = msm_pcie_link_retrain(pcie_dev, root_pci_dev);
 	if (ret)
-		return ret;
+		goto out;
 
-	/* decrease CX and rate change clk freq if link is in Gen1 */
 	pcie_capability_read_word(root_pci_dev, PCI_EXP_LNKSTA, &link_status);
-	if ((link_status & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKCTL2_TLS_2_5GT) {
-		if (pcie_dev->cx_vreg)
-			regulator_set_voltage(pcie_dev->cx_vreg->hdl,
-						RPMH_REGULATOR_LEVEL_LOW_SVS,
-						pcie_dev->cx_vreg->max_v);
-
-		if (pcie_dev->rate_change_clk)
-			clk_set_rate(pcie_dev->rate_change_clk->hdl,
-					RATE_CHANGE_19P2MHZ);
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL(msm_pcie_set_link_bandwidth);
-
-static int msm_pci_iommu_parse_dt(struct msm_root_dev_t *root_dev)
-{
-	int ret;
-	struct msm_pcie_dev_t *pcie_dev = root_dev->pcie_dev;
-	struct pci_dev *pci_dev = root_dev->pci_dev;
-	struct device_node *pci_of_node = pci_dev->dev.of_node;
-
-	ret = of_property_read_u32(pci_of_node, "qcom,iommu-cfg",
-				&root_dev->iommu_cfg);
-	if (ret) {
-		PCIE_DBG(pcie_dev, "PCIe: RC%d: no iommu-cfg present in DT\n",
-			pcie_dev->rc_idx);
-		return 0;
-	}
-
-	if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_S1_BYPASS) {
-		root_dev->iommu_base = 0;
-		root_dev->iommu_size = PAGE_SIZE;
-	} else {
-		u64 iommu_range[2];
-
-		ret = of_property_count_elems_of_size(pci_of_node,
-							"qcom,iommu-range",
-							sizeof(iommu_range));
-		if (ret != 1) {
-			PCIE_ERR(pcie_dev,
-				"invalid entry for iommu address: %d\n",
-				ret);
-			return ret;
-		}
-
-		ret = of_property_read_u64_array(pci_of_node,
-						"qcom,iommu-range",
-						iommu_range, 2);
-		if (ret) {
-			PCIE_ERR(pcie_dev,
-				"failed to get iommu address: %d\n", ret);
-			return ret;
-		}
-
-		root_dev->iommu_base = (dma_addr_t)iommu_range[0];
-		root_dev->iommu_size = (size_t)iommu_range[1];
-	}
-
-	PCIE_DBG(pcie_dev,
-		"iommu-cfg: 0x%x iommu-base: %pad iommu-size: 0x%zx\n",
-		root_dev->iommu_cfg, &root_dev->iommu_base,
-		root_dev->iommu_size);
-
-	return 0;
-}
-
-static int msm_pci_iommu_init(struct msm_root_dev_t *root_dev)
-{
-	int ret;
-	struct dma_iommu_mapping *mapping;
-	struct msm_pcie_dev_t *pcie_dev = root_dev->pcie_dev;
-	struct pci_dev *pci_dev = root_dev->pci_dev;
-
-	ret = msm_pci_iommu_parse_dt(root_dev);
-	if (ret)
-		return ret;
-
-	if (!(root_dev->iommu_cfg & MSM_PCIE_IOMMU_PRESENT))
-		return 0;
-
-	mapping = __depr_arm_iommu_create_mapping(&pci_bus_type,
-						root_dev->iommu_base,
-						root_dev->iommu_size);
-	if (IS_ERR_OR_NULL(mapping)) {
-		ret = PTR_ERR(mapping);
+	if ((link_status & PCI_EXP_LNKSTA_CLS) != target_link_speed ||
+		(link_status & PCI_EXP_LNKSTA_NLW) != target_link_width) {
 		PCIE_ERR(pcie_dev,
-			"PCIe: RC%d: Failed to create IOMMU mapping (%d)\n",
-			pcie_dev->rc_idx, ret);
-		return ret;
+			"PCIe: RC%d: failed to switch bandwidth: target speed: %d width: %d\n",
+			pcie_dev->rc_idx, target_link_speed,
+			target_link_width >> PCI_EXP_LNKSTA_NLW_SHIFT);
+		ret = -EIO;
+		goto out;
 	}
 
-	if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_S1_BYPASS) {
-		int iommu_s1_bypass = 1;
-
-		ret = iommu_domain_set_attr(mapping->domain,
-					DOMAIN_ATTR_S1_BYPASS,
-					&iommu_s1_bypass);
-		if (ret) {
-			PCIE_ERR(pcie_dev,
-				"PCIe: RC%d: failed to set attribute S1_BYPASS: %d\n",
-				pcie_dev->rc_idx, ret);
-			goto release_mapping;
-		}
-	}
-
-	if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_FAST) {
-		int iommu_fast = 1;
-
-		ret = iommu_domain_set_attr(mapping->domain,
-					DOMAIN_ATTR_FAST,
-					&iommu_fast);
-		if (ret) {
-			PCIE_ERR(pcie_dev,
-				"PCIe: RC%d: failed to set attribute FAST: %d\n",
-				pcie_dev->rc_idx, ret);
-			goto release_mapping;
-		}
-	}
-
-	if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_ATOMIC) {
-		int iommu_atomic = 1;
-
-		ret = iommu_domain_set_attr(mapping->domain,
-					DOMAIN_ATTR_ATOMIC,
-					&iommu_atomic);
-		if (ret) {
-			PCIE_ERR(pcie_dev,
-				"PCIe: RC%d: failed to set attribute ATOMIC: %d\n",
-				pcie_dev->rc_idx, ret);
-			goto release_mapping;
-		}
-	}
-
-	if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_FORCE_COHERENT) {
-		int iommu_force_coherent = 1;
-
-		ret = iommu_domain_set_attr(mapping->domain,
-				DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
-				&iommu_force_coherent);
-		if (ret) {
-			PCIE_ERR(pcie_dev,
-				"PCIe: RC%d: failed to set attribute FORCE_COHERENT: %d\n",
-				pcie_dev->rc_idx, ret);
-			goto release_mapping;
-		}
-	}
-
-	ret = __depr_arm_iommu_attach_device(&pci_dev->dev, mapping);
-	if (ret) {
-		PCIE_ERR(pcie_dev,
-			"RC%d: failed to iommu attach device (%d)\n",
-			pcie_dev->rc_idx, ret);
-		goto release_mapping;
-	}
-
-	PCIE_DBG(pcie_dev, "PCIe: RC%d: successful iommu attach\n",
-		pcie_dev->rc_idx);
-	return 0;
-
-release_mapping:
-	__depr_arm_iommu_release_mapping(mapping);
+	if (target_link_speed < current_link_speed)
+		msm_pcie_scale_link_bandwidth(pcie_dev, target_link_speed);
+out:
+	/* re-enable link L1 */
+	msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
+	msm_pcie_config_l1(pcie_dev, root_pci_dev, true);
 
 	return ret;
 }
+EXPORT_SYMBOL(msm_pcie_set_link_bandwidth);
 
 int msm_pci_probe(struct pci_dev *pci_dev,
 		  const struct pci_device_id *device_id)
@@ -6461,10 +6432,6 @@ int msm_pci_probe(struct pci_dev *pci_dev,
 	root_dev->pci_dev = pci_dev;
 	dev_set_drvdata(&pci_dev->dev, root_dev);
 
-	ret = msm_pci_iommu_init(root_dev);
-	if (ret)
-		return ret;
-
 	ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
 	if (ret) {
 		PCIE_ERR(pcie_dev, "DMA set mask failed (%d)\n", ret);
@@ -6476,6 +6443,7 @@ int msm_pci_probe(struct pci_dev *pci_dev,
 
 static struct pci_device_id msm_pci_device_id[] = {
 	{PCI_DEVICE(0x17cb, 0x0108)},
+	{PCI_DEVICE(0x17cb, 0x010b)},
 	{0},
 };
 
@@ -6726,6 +6694,7 @@ static int __init pcie_init(void)
 		msm_pcie_dev[i].cfg_access = true;
 		mutex_init(&msm_pcie_dev[i].enumerate_lock);
 		mutex_init(&msm_pcie_dev[i].setup_lock);
+		mutex_init(&msm_pcie_dev[i].clk_lock);
 		mutex_init(&msm_pcie_dev[i].recovery_lock);
 		spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
 		spin_lock_init(&msm_pcie_dev[i].irq_lock);
@@ -7102,7 +7071,8 @@ static int msm_pcie_drv_resume(struct msm_pcie_dev_t *pcie_dev)
 		if (unlikely(drv_info->seq == MSM_PCIE_DRV_SEQ_RESV))
 			drv_info->seq = 0;
 
-		ret = rpmsg_send(rpdev->ept, drv_disable, sizeof(*drv_disable));
+		ret = rpmsg_trysend(rpdev->ept, drv_disable,
+					sizeof(*drv_disable));
 		if (!ret) {
 			ret = wait_for_completion_timeout(&drv_info->completion,
 					msecs_to_jiffies(drv_info->timeout_ms));
@@ -7164,7 +7134,7 @@ static int msm_pcie_drv_suspend(struct msm_pcie_dev_t *pcie_dev)
 	if (unlikely(drv_info->seq == MSM_PCIE_DRV_SEQ_RESV))
 		drv_info->seq = 0;
 
-	ret = rpmsg_send(rpdev->ept, drv_enable, sizeof(*drv_enable));
+	ret = rpmsg_trysend(rpdev->ept, drv_enable, sizeof(*drv_enable));
 	if (ret) {
 		PCIE_ERR(pcie_dev, "PCIe: RC%d: DRV: failed to send rpmsg\n",
 			pcie_dev->rc_idx);
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index c8febb0..6a4e435 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -46,6 +46,7 @@
 
 /* Transfer control */
 #define PCIETCTLR		0x02000
+#define  DL_DOWN		BIT(3)
 #define  CFINIT			1
 #define PCIETSTR		0x02004
 #define  DATA_LINK_ACTIVE	1
@@ -94,6 +95,7 @@
 #define MACCTLR			0x011058
 #define  SPEED_CHANGE		BIT(24)
 #define  SCRAMBLE_DISABLE	BIT(27)
+#define PMSR			0x01105c
 #define MACS2R			0x011078
 #define MACCGSPSETR		0x011084
 #define  SPCNGRSN		BIT(31)
@@ -1130,6 +1132,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
 	pcie = pci_host_bridge_priv(bridge);
 
 	pcie->dev = dev;
+	platform_set_drvdata(pdev, pcie);
 
 	err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
 	if (err)
@@ -1221,10 +1224,28 @@ static int rcar_pcie_probe(struct platform_device *pdev)
 	return err;
 }
 
+static int rcar_pcie_resume_noirq(struct device *dev)
+{
+	struct rcar_pcie *pcie = dev_get_drvdata(dev);
+
+	if (rcar_pci_read_reg(pcie, PMSR) &&
+	    !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
+		return 0;
+
+	/* Re-establish the PCIe link */
+	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
+	return rcar_pcie_wait_for_dl(pcie);
+}
+
+static const struct dev_pm_ops rcar_pcie_pm_ops = {
+	.resume_noirq = rcar_pcie_resume_noirq,
+};
+
 static struct platform_driver rcar_pcie_driver = {
 	.driver = {
 		.name = "rcar-pcie",
 		.of_match_table = rcar_pcie_of_match,
+		.pm = &rcar_pcie_pm_ops,
 		.suppress_bind_attrs = true,
 	},
 	.probe = rcar_pcie_probe,
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index 314e135..30fbe2e 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -62,8 +62,8 @@ static const struct pci_platform_pm_ops mid_pci_platform_pm = {
  * arch/x86/platform/intel-mid/pwr.c.
  */
 static const struct x86_cpu_id lpss_cpu_ids[] = {
-	ICPU(INTEL_FAM6_ATOM_PENWELL),
-	ICPU(INTEL_FAM6_ATOM_MERRIFIELD),
+	ICPU(INTEL_FAM6_ATOM_SALTWELL_MID),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID),
 	{}
 };
 
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 30649ad..61f2ef2 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -6135,8 +6135,7 @@ static int __init pci_setup(char *str)
 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
 			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
-				disable_acs_redir_param =
-					kstrdup(str + 18, GFP_KERNEL);
+				disable_acs_redir_param = str + 18;
 			} else {
 				printk(KERN_ERR "PCI: Unknown option `%s'\n",
 						str);
@@ -6147,3 +6146,19 @@ static int __init pci_setup(char *str)
 	return 0;
 }
 early_param("pci", pci_setup);
+
+/*
+ * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point
+ * to data in the __initdata section which will be freed after the init
+ * sequence is complete. We can't allocate memory in pci_setup() because some
+ * architectures do not have any memory allocation service available during
+ * an early_param() call. So we allocate memory and copy the variable here
+ * before the init section is freed.
+ */
+static int __init pci_realloc_setup_params(void)
+{
+	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
+
+	return 0;
+}
+pure_initcall(pci_realloc_setup_params);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 6e0d152..ab25752f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -530,7 +530,7 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev);
 void pci_aer_clear_device_status(struct pci_dev *dev);
 #else
 static inline void pci_no_aer(void) { }
-static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
+static inline void pci_aer_init(struct pci_dev *d) { }
 static inline void pci_aer_exit(struct pci_dev *d) { }
 static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
 static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index f78860c..1117b25 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -198,6 +198,38 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
 	link->clkpm_capable = (blacklist) ? 0 : capable;
 }
 
+static bool pcie_retrain_link(struct pcie_link_state *link)
+{
+	struct pci_dev *parent = link->pdev;
+	unsigned long start_jiffies;
+	u16 reg16;
+
+	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+	reg16 |= PCI_EXP_LNKCTL_RL;
+	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+	if (parent->clear_retrain_link) {
+		/*
+		 * Due to an erratum in some devices the Retrain Link bit
+		 * needs to be cleared again manually to allow the link
+		 * training to succeed.
+		 */
+		reg16 &= ~PCI_EXP_LNKCTL_RL;
+		pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+	}
+
+	/* Wait for link training end. Break out after waiting for timeout */
+	start_jiffies = jiffies;
+	for (;;) {
+		pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
+		if (!(reg16 & PCI_EXP_LNKSTA_LT))
+			break;
+		if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
+			break;
+		msleep(1);
+	}
+	return !(reg16 & PCI_EXP_LNKSTA_LT);
+}
+
 /*
  * pcie_aspm_configure_common_clock: check if the 2 ends of a link
  *   could use common clock. If they are, configure them to use the
@@ -207,7 +239,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
 {
 	int same_clock = 1;
 	u16 reg16, parent_reg, child_reg[8];
-	unsigned long start_jiffies;
 	struct pci_dev *child, *parent = link->pdev;
 	struct pci_bus *linkbus = parent->subordinate;
 	/*
@@ -265,21 +296,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
 		reg16 &= ~PCI_EXP_LNKCTL_CCC;
 	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
 
-	/* Retrain link */
-	reg16 |= PCI_EXP_LNKCTL_RL;
-	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
-
-	/* Wait for link training end. Break out after waiting for timeout */
-	start_jiffies = jiffies;
-	for (;;) {
-		pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
-		if (!(reg16 & PCI_EXP_LNKSTA_LT))
-			break;
-		if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
-			break;
-		msleep(1);
-	}
-	if (!(reg16 & PCI_EXP_LNKSTA_LT))
+	if (pcie_retrain_link(link))
 		return;
 
 	/* Training failed. Restore common clock configurations */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 4a4c16b..fa4c386 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -535,16 +535,9 @@ static void pci_release_host_bridge_dev(struct device *dev)
 	kfree(to_pci_host_bridge(dev));
 }
 
-struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
+static void pci_init_host_bridge(struct pci_host_bridge *bridge)
 {
-	struct pci_host_bridge *bridge;
-
-	bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
-	if (!bridge)
-		return NULL;
-
 	INIT_LIST_HEAD(&bridge->windows);
-	bridge->dev.release = pci_release_host_bridge_dev;
 
 	/*
 	 * We assume we can manage these PCIe features.  Some systems may
@@ -557,6 +550,18 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
 	bridge->native_shpc_hotplug = 1;
 	bridge->native_pme = 1;
 	bridge->native_ltr = 1;
+}
+
+struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
+{
+	struct pci_host_bridge *bridge;
+
+	bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
+	if (!bridge)
+		return NULL;
+
+	pci_init_host_bridge(bridge);
+	bridge->dev.release = pci_release_host_bridge_dev;
 
 	return bridge;
 }
@@ -571,7 +576,7 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
 	if (!bridge)
 		return NULL;
 
-	INIT_LIST_HEAD(&bridge->windows);
+	pci_init_host_bridge(bridge);
 	bridge->dev.release = devm_pci_release_host_bridge_dev;
 
 	return bridge;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 37d897b..28c64f84 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2220,6 +2220,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
 
+/*
+ * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
+ * Link bit cleared after starting the link retrain process to allow this
+ * process to finish.
+ *
+ * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130.  See also the
+ * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf.
+ */
+static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
+{
+	dev->clear_retrain_link = 1;
+	pci_info(dev, "Enable PCIe Retrain Link quirk\n");
+}
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
+
 static void fixup_rev1_53c810(struct pci_dev *dev)
 {
 	u32 class = dev->class;
@@ -3383,6 +3400,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
 
 /*
  * Root port on some Cavium CN8xxx chips do not successfully complete a bus
@@ -4878,6 +4896,7 @@ static void quirk_no_ats(struct pci_dev *pdev)
 
 /* AMD Stoney platform GPU */
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
 #endif /* CONFIG_PCI_ATS */
 
 /* Freescale PCIe doesn't support MSI in RC mode */
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 1bfeb16..14a541c 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1692,21 +1692,24 @@ static int cci_pmu_probe(struct platform_device *pdev)
 	raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
 	mutex_init(&cci_pmu->reserve_mutex);
 	atomic_set(&cci_pmu->active_events, 0);
-	cci_pmu->cpu = get_cpu();
 
-	ret = cci_pmu_init(cci_pmu, pdev);
-	if (ret) {
-		put_cpu();
-		return ret;
-	}
-
+	cci_pmu->cpu = raw_smp_processor_id();
+	g_cci_pmu = cci_pmu;
 	cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
 				  "perf/arm/cci:online", NULL,
 				  cci_pmu_offline_cpu);
-	put_cpu();
-	g_cci_pmu = cci_pmu;
+
+	ret = cci_pmu_init(cci_pmu, pdev);
+	if (ret)
+		goto error_pmu_init;
+
 	pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
 	return 0;
+
+error_pmu_init:
+	cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
+	g_cci_pmu = NULL;
+	return ret;
 }
 
 static int cci_pmu_remove(struct platform_device *pdev)
diff --git a/drivers/perf/qcom_llcc_pmu.c b/drivers/perf/qcom_llcc_pmu.c
index 6dd4eea..accab0b 100644
--- a/drivers/perf/qcom_llcc_pmu.c
+++ b/drivers/perf/qcom_llcc_pmu.c
@@ -16,7 +16,7 @@
 #include <linux/ktime.h>
 
 enum llcc_pmu_version {
-	LLCC_PMU_VER1,
+	LLCC_PMU_VER1 = 1,
 	LLCC_PMU_VER2,
 };
 
@@ -63,7 +63,7 @@ static void mon_disable(struct llcc_pmu *llccpmu, int cpu)
 		break;
 	case LLCC_PMU_VER2:
 		reg = readl_relaxed(MON_CFG(llccpmu));
-		reg &= (DISABLE << cpu);
+		reg &= ~(ENABLE << cpu);
 		writel_relaxed(reg, MON_CFG(llccpmu));
 		break;
 	}
@@ -87,6 +87,8 @@ static void mon_clear(struct llcc_pmu *llccpmu, int cpu)
 		reg = readl_relaxed(MON_CFG(llccpmu));
 		reg |= (ENABLE << clear_bit);
 		writel_relaxed(reg, MON_CFG(llccpmu));
+		reg &= ~(ENABLE << clear_bit);
+		writel_relaxed(reg, MON_CFG(llccpmu));
 		break;
 	}
 }
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 15c8fc2..1f8809b 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -550,6 +550,7 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
 	struct sun4i_usb_phy_data *data =
 		container_of(work, struct sun4i_usb_phy_data, detect.work);
 	struct phy *phy0 = data->phys[0].phy;
+	struct sun4i_usb_phy *phy = phy_get_drvdata(phy0);
 	bool force_session_end, id_notify = false, vbus_notify = false;
 	int id_det, vbus_det;
 
@@ -606,6 +607,9 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
 			mutex_unlock(&phy0->mutex);
 		}
 
+		/* Enable PHY0 passby for host mode only. */
+		sun4i_usb_phy_passby(phy, !id_det);
+
 		/* Re-route PHY0 if necessary */
 		if (data->cfg->phy0_dual_route)
 			sun4i_usb_phy0_reroute(data, id_det);
diff --git a/drivers/phy/motorola/Kconfig b/drivers/phy/motorola/Kconfig
index 8265152..718f872 100644
--- a/drivers/phy/motorola/Kconfig
+++ b/drivers/phy/motorola/Kconfig
@@ -13,7 +13,7 @@
 
 config PHY_MAPPHONE_MDM6600
 	tristate "Motorola Mapphone MDM6600 modem USB PHY driver"
-	depends on OF && USB_SUPPORT
+	depends on OF && USB_SUPPORT && GPIOLIB
 	select GENERIC_PHY
 	help
 	  Enable this for MDM6600 USB modem to work on Motorola phones
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
index 73a78f6..6bb62b3 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-i.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
@@ -161,7 +161,8 @@ struct ufs_qcom_phy {
  * @dbg_register_dump: pointer to a function that dumps phy registers for debug.
  */
 struct ufs_qcom_phy_specific_ops {
-	int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B);
+	int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B,
+			     bool is_g4);
 	void (*start_serdes)(struct ufs_qcom_phy *phy);
 	int (*is_physical_coding_sublayer_ready)(struct ufs_qcom_phy *phy);
 	void (*set_tx_lane_enable)(struct ufs_qcom_phy *phy, u32 val);
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.c
index b8cb819..a11b552 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.c
@@ -18,7 +18,7 @@
 
 static
 int ufs_qcom_phy_qmp_v4_lito_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
-					bool is_rate_B)
+					bool is_rate_B, bool is_g4)
 {
 
 	writel_relaxed(0x01, ufs_qcom_phy->mmio + UFS_PHY_SW_RESET);
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.c
index 219e93a..ed3b8b3 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.c
@@ -18,10 +18,8 @@
 
 static
 int ufs_qcom_phy_qmp_v4_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
-					bool is_rate_B)
+					bool is_rate_B, bool is_g4)
 {
-	struct device_node *np = ufs_qcom_phy->dev->of_node;
-
 	writel_relaxed(0x01, ufs_qcom_phy->mmio + UFS_PHY_SW_RESET);
 	/* Ensure PHY is in reset before writing PHY calibration data */
 	wmb();
@@ -31,14 +29,14 @@ int ufs_qcom_phy_qmp_v4_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
 	 * 2. Write 2nd lane configuration if needed.
 	 * 3. Write Rate-B calibration overrides
 	 */
-	if (of_device_is_compatible(np, "qcom,ufs-phy-qmp-v4")) {
+	if (is_g4) {
 		ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_A,
 				       ARRAY_SIZE(phy_cal_table_rate_A));
 		if (ufs_qcom_phy->lanes_per_direction == 2)
 			ufs_qcom_phy_write_tbl(ufs_qcom_phy,
 					phy_cal_table_2nd_lane,
 					ARRAY_SIZE(phy_cal_table_2nd_lane));
-	} else if (of_device_is_compatible(np, "qcom,ufs-phy-qmp-v4-card")) {
+	} else {
 		ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_A_no_g4,
 				       ARRAY_SIZE(phy_cal_table_rate_A_no_g4));
 		if (ufs_qcom_phy->lanes_per_direction == 2)
@@ -86,6 +84,34 @@ static int ufs_qcom_phy_qmp_v4_exit(struct phy *generic_phy)
 	return 0;
 }
 
+static inline
+void ufs_qcom_phy_qmp_v4_tx_pull_down_ctrl(struct ufs_qcom_phy *phy,
+						bool enable)
+{
+	u32 temp;
+
+	temp = readl_relaxed(phy->mmio + QSERDES_RX0_RX_INTERFACE_MODE);
+	if (enable)
+		temp |= QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT;
+	else
+		temp &= ~QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT;
+	writel_relaxed(temp, phy->mmio + QSERDES_RX0_RX_INTERFACE_MODE);
+
+	if (phy->lanes_per_direction == 1)
+		goto out;
+
+	temp = readl_relaxed(phy->mmio + QSERDES_RX1_RX_INTERFACE_MODE);
+	if (enable)
+		temp |= QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT;
+	else
+		temp &= ~QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT;
+	writel_relaxed(temp, phy->mmio + QSERDES_RX1_RX_INTERFACE_MODE);
+
+out:
+	/* ensure register value is committed */
+	mb();
+}
+
 static
 void ufs_qcom_phy_qmp_v4_power_control(struct ufs_qcom_phy *phy,
 					 bool power_ctrl)
@@ -98,7 +124,9 @@ void ufs_qcom_phy_qmp_v4_power_control(struct ufs_qcom_phy *phy,
 		 * powered OFF.
 		 */
 		mb();
+		ufs_qcom_phy_qmp_v4_tx_pull_down_ctrl(phy, true);
 	} else {
+		ufs_qcom_phy_qmp_v4_tx_pull_down_ctrl(phy, false);
 		/* bring PHY out of analog power collapse */
 		writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
 
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.h
index 6892725..47fc7fc 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.h
@@ -135,6 +135,7 @@
 #define QSERDES_RX0_AC_JTAG_ENABLE			RX_OFF(0, 0x68)
 #define QSERDES_RX0_UCDR_FO_GAIN			RX_OFF(0, 0x08)
 #define QSERDES_RX0_UCDR_SO_GAIN			RX_OFF(0, 0x14)
+#define QSERDES_RX0_RX_INTERFACE_MODE			RX_OFF(0, 0x134)
 
 #define QSERDES_RX1_SIGDET_LVL				RX_OFF(1, 0x120)
 #define QSERDES_RX1_SIGDET_CNTRL			RX_OFF(1, 0x11C)
@@ -175,8 +176,10 @@
 #define QSERDES_RX1_AC_JTAG_ENABLE			RX_OFF(1, 0x68)
 #define QSERDES_RX1_UCDR_FO_GAIN			RX_OFF(1, 0x08)
 #define QSERDES_RX1_UCDR_SO_GAIN			RX_OFF(1, 0x14)
+#define QSERDES_RX1_RX_INTERFACE_MODE			RX_OFF(1, 0x134)
 
 #define UFS_PHY_RX_LINECFG_DISABLE_BIT		BIT(1)
+#define QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT	BIT(5)
 
 /*
  * This structure represents the v4 specific phy.
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qrbtc-sdm845.c b/drivers/phy/qualcomm/phy-qcom-ufs-qrbtc-sdm845.c
index 6b0592e..fe033c4 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qrbtc-sdm845.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qrbtc-sdm845.c
@@ -19,7 +19,7 @@
 
 static
 int ufs_qcom_phy_qrbtc_sdm845_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
-					bool is_rate_B)
+					bool is_rate_B, bool is_g4)
 {
 	int err;
 	int tbl_size_A, tbl_size_B;
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index f278492..d7de831 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -669,7 +669,8 @@ void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
 }
 EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
 
-int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
+int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B,
+			       bool is_g4)
 {
 	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
 	int ret = 0;
@@ -680,7 +681,8 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
 		ret = -ENOTSUPP;
 	} else {
 		ret = ufs_qcom_phy->phy_spec_ops->calibrate_phy(ufs_qcom_phy,
-								is_rate_B);
+								is_rate_B,
+								is_g4);
 		if (ret)
 			dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n",
 				__func__, ret);
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index 68ce4a08..693acc1 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -303,7 +303,7 @@ static void ti_pipe3_calibrate(struct ti_pipe3 *phy)
 
 	val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY);
 	val &= ~(INTERFACE_MASK | LOSD_MASK | MEM_PLLDIV);
-	val = (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT);
+	val |= (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT);
 	ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY, val);
 
 	val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_DIGITAL_MODES);
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 302190d..0d7d379 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1368,6 +1368,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
 		if (!of_find_property(child, "gpio-controller", NULL)) {
 			dev_err(pctl->dev,
 				"No gpio-controller property for bank %u\n", i);
+			of_node_put(child);
 			ret = -ENODEV;
 			goto err;
 		}
@@ -1375,6 +1376,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
 		irq = irq_of_parse_and_map(child, 0);
 		if (irq < 0) {
 			dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq);
+			of_node_put(child);
 			ret = irq;
 			goto err;
 		}
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 4353c4c..5193808 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -174,4 +174,13 @@
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
 	  Technologies Inc LITO platform.
 
+config PINCTRL_BENGAL
+	tristate "Qualcomm Technologies Inc BENGAL pin controller driver"
+	depends on GPIOLIB && OF
+	select PINCTRL_MSM
+	help
+	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
+	  Technologies Inc BENGAL platform.
+
 endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 1cff637..3fb4f48 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -22,3 +22,4 @@
 obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
 obj-$(CONFIG_PINCTRL_KONA) += pinctrl-kona.o
 obj-$(CONFIG_PINCTRL_LITO) += pinctrl-lito.o
+obj-$(CONFIG_PINCTRL_BENGAL) += pinctrl-bengal.o
diff --git a/drivers/pinctrl/qcom/pinctrl-bengal.c b/drivers/pinctrl/qcom/pinctrl-bengal.c
new file mode 100644
index 0000000..7b70942
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-bengal.c
@@ -0,0 +1,1535 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname)					\
+	[msm_mux_##fname] = {				\
+		.name = #fname,				\
+		.groups = fname##_groups,		\
+		.ngroups = ARRAY_SIZE(fname##_groups),	\
+	}
+
+#define NORTH
+#define SOUTH	0x00500000
+#define WEST	0x00100000
+#define EAST	0x00900000
+#define DUMMY	0x0
+#define REG_SIZE 0x1000
+#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
+	{						\
+		.name = "gpio" #id,			\
+		.pins = gpio##id##_pins,		\
+		.npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins),	\
+		.funcs = (int[]){			\
+			msm_mux_gpio, /* gpio mode */	\
+			msm_mux_##f1,			\
+			msm_mux_##f2,			\
+			msm_mux_##f3,			\
+			msm_mux_##f4,			\
+			msm_mux_##f5,			\
+			msm_mux_##f6,			\
+			msm_mux_##f7,			\
+			msm_mux_##f8,			\
+			msm_mux_##f9			\
+		},					\
+		.nfuncs = 10,				\
+		.ctl_reg = base + REG_SIZE * id,		\
+		.io_reg = base + 0x4 + REG_SIZE * id,		\
+		.intr_cfg_reg = base + 0x8 + REG_SIZE * id,	\
+		.intr_status_reg = base + 0xc + REG_SIZE * id,	\
+		.intr_target_reg = base + 0x8 + REG_SIZE * id,	\
+		.mux_bit = 2,			\
+		.pull_bit = 0,			\
+		.drv_bit = 6,			\
+		.egpio_enable = 12,		\
+		.egpio_present = 11,		\
+		.oe_bit = 9,			\
+		.in_bit = 0,			\
+		.out_bit = 1,			\
+		.intr_enable_bit = 0,		\
+		.intr_status_bit = 0,		\
+		.intr_target_bit = 5,		\
+		.intr_target_kpss_val = 3,	\
+		.intr_raw_status_bit = 4,	\
+		.intr_polarity_bit = 1,		\
+		.intr_detection_bit = 2,	\
+		.intr_detection_width = 2,	\
+	}
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)	\
+	{						\
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = ctl,				\
+		.io_reg = 0,				\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = pull,			\
+		.drv_bit = drv,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = -1,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+
+#define UFS_RESET(pg_name, offset)				\
+	{						\
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = offset,			\
+		.io_reg = offset + 0x4,			\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = 3,				\
+		.drv_bit = 0,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = 0,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+static const struct pinctrl_pin_desc bengal_pins[] = {
+	PINCTRL_PIN(0, "GPIO_0"),
+	PINCTRL_PIN(1, "GPIO_1"),
+	PINCTRL_PIN(2, "GPIO_2"),
+	PINCTRL_PIN(3, "GPIO_3"),
+	PINCTRL_PIN(4, "GPIO_4"),
+	PINCTRL_PIN(5, "GPIO_5"),
+	PINCTRL_PIN(6, "GPIO_6"),
+	PINCTRL_PIN(7, "GPIO_7"),
+	PINCTRL_PIN(8, "GPIO_8"),
+	PINCTRL_PIN(9, "GPIO_9"),
+	PINCTRL_PIN(10, "GPIO_10"),
+	PINCTRL_PIN(11, "GPIO_11"),
+	PINCTRL_PIN(12, "GPIO_12"),
+	PINCTRL_PIN(13, "GPIO_13"),
+	PINCTRL_PIN(14, "GPIO_14"),
+	PINCTRL_PIN(15, "GPIO_15"),
+	PINCTRL_PIN(16, "GPIO_16"),
+	PINCTRL_PIN(17, "GPIO_17"),
+	PINCTRL_PIN(18, "GPIO_18"),
+	PINCTRL_PIN(19, "GPIO_19"),
+	PINCTRL_PIN(20, "GPIO_20"),
+	PINCTRL_PIN(21, "GPIO_21"),
+	PINCTRL_PIN(22, "GPIO_22"),
+	PINCTRL_PIN(23, "GPIO_23"),
+	PINCTRL_PIN(24, "GPIO_24"),
+	PINCTRL_PIN(25, "GPIO_25"),
+	PINCTRL_PIN(26, "GPIO_26"),
+	PINCTRL_PIN(27, "GPIO_27"),
+	PINCTRL_PIN(28, "GPIO_28"),
+	PINCTRL_PIN(29, "GPIO_29"),
+	PINCTRL_PIN(30, "GPIO_30"),
+	PINCTRL_PIN(31, "GPIO_31"),
+	PINCTRL_PIN(32, "GPIO_32"),
+	PINCTRL_PIN(33, "GPIO_33"),
+	PINCTRL_PIN(34, "GPIO_34"),
+	PINCTRL_PIN(35, "GPIO_35"),
+	PINCTRL_PIN(36, "GPIO_36"),
+	PINCTRL_PIN(37, "GPIO_37"),
+	PINCTRL_PIN(38, "GPIO_38"),
+	PINCTRL_PIN(39, "GPIO_39"),
+	PINCTRL_PIN(40, "GPIO_40"),
+	PINCTRL_PIN(41, "GPIO_41"),
+	PINCTRL_PIN(42, "GPIO_42"),
+	PINCTRL_PIN(43, "GPIO_43"),
+	PINCTRL_PIN(44, "GPIO_44"),
+	PINCTRL_PIN(45, "GPIO_45"),
+	PINCTRL_PIN(46, "GPIO_46"),
+	PINCTRL_PIN(47, "GPIO_47"),
+	PINCTRL_PIN(48, "GPIO_48"),
+	PINCTRL_PIN(49, "GPIO_49"),
+	PINCTRL_PIN(50, "GPIO_50"),
+	PINCTRL_PIN(51, "GPIO_51"),
+	PINCTRL_PIN(52, "GPIO_52"),
+	PINCTRL_PIN(53, "GPIO_53"),
+	PINCTRL_PIN(54, "GPIO_54"),
+	PINCTRL_PIN(55, "GPIO_55"),
+	PINCTRL_PIN(56, "GPIO_56"),
+	PINCTRL_PIN(57, "GPIO_57"),
+	PINCTRL_PIN(58, "GPIO_58"),
+	PINCTRL_PIN(59, "GPIO_59"),
+	PINCTRL_PIN(60, "GPIO_60"),
+	PINCTRL_PIN(61, "GPIO_61"),
+	PINCTRL_PIN(62, "GPIO_62"),
+	PINCTRL_PIN(63, "GPIO_63"),
+	PINCTRL_PIN(64, "GPIO_64"),
+	PINCTRL_PIN(65, "GPIO_65"),
+	PINCTRL_PIN(66, "GPIO_66"),
+	PINCTRL_PIN(67, "GPIO_67"),
+	PINCTRL_PIN(68, "GPIO_68"),
+	PINCTRL_PIN(69, "GPIO_69"),
+	PINCTRL_PIN(70, "GPIO_70"),
+	PINCTRL_PIN(71, "GPIO_71"),
+	PINCTRL_PIN(72, "GPIO_72"),
+	PINCTRL_PIN(73, "GPIO_73"),
+	PINCTRL_PIN(74, "GPIO_74"),
+	PINCTRL_PIN(75, "GPIO_75"),
+	PINCTRL_PIN(76, "GPIO_76"),
+	PINCTRL_PIN(77, "GPIO_77"),
+	PINCTRL_PIN(78, "GPIO_78"),
+	PINCTRL_PIN(79, "GPIO_79"),
+	PINCTRL_PIN(80, "GPIO_80"),
+	PINCTRL_PIN(81, "GPIO_81"),
+	PINCTRL_PIN(82, "GPIO_82"),
+	PINCTRL_PIN(83, "GPIO_83"),
+	PINCTRL_PIN(84, "GPIO_84"),
+	PINCTRL_PIN(85, "GPIO_85"),
+	PINCTRL_PIN(86, "GPIO_86"),
+	PINCTRL_PIN(87, "GPIO_87"),
+	PINCTRL_PIN(88, "GPIO_88"),
+	PINCTRL_PIN(89, "GPIO_89"),
+	PINCTRL_PIN(90, "GPIO_90"),
+	PINCTRL_PIN(91, "GPIO_91"),
+	PINCTRL_PIN(92, "GPIO_92"),
+	PINCTRL_PIN(93, "GPIO_93"),
+	PINCTRL_PIN(94, "GPIO_94"),
+	PINCTRL_PIN(95, "GPIO_95"),
+	PINCTRL_PIN(96, "GPIO_96"),
+	PINCTRL_PIN(97, "GPIO_97"),
+	PINCTRL_PIN(98, "GPIO_98"),
+	PINCTRL_PIN(99, "GPIO_99"),
+	PINCTRL_PIN(100, "GPIO_100"),
+	PINCTRL_PIN(101, "GPIO_101"),
+	PINCTRL_PIN(102, "GPIO_102"),
+	PINCTRL_PIN(103, "GPIO_103"),
+	PINCTRL_PIN(104, "GPIO_104"),
+	PINCTRL_PIN(105, "GPIO_105"),
+	PINCTRL_PIN(106, "GPIO_106"),
+	PINCTRL_PIN(107, "GPIO_107"),
+	PINCTRL_PIN(108, "GPIO_108"),
+	PINCTRL_PIN(109, "GPIO_109"),
+	PINCTRL_PIN(110, "GPIO_110"),
+	PINCTRL_PIN(111, "GPIO_111"),
+	PINCTRL_PIN(112, "GPIO_112"),
+	PINCTRL_PIN(113, "SDC1_RCLK"),
+	PINCTRL_PIN(114, "SDC1_CLK"),
+	PINCTRL_PIN(115, "SDC1_CMD"),
+	PINCTRL_PIN(116, "SDC1_DATA"),
+	PINCTRL_PIN(117, "SDC2_CLK"),
+	PINCTRL_PIN(118, "SDC2_CMD"),
+	PINCTRL_PIN(119, "SDC2_DATA"),
+	PINCTRL_PIN(120, "UFS_RESET"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+	static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+
+static const unsigned int sdc1_rclk_pins[] = { 113 };
+static const unsigned int sdc1_clk_pins[] = { 114 };
+static const unsigned int sdc1_cmd_pins[] = { 115 };
+static const unsigned int sdc1_data_pins[] = { 116 };
+static const unsigned int sdc2_clk_pins[] = { 117 };
+static const unsigned int sdc2_cmd_pins[] = { 118 };
+static const unsigned int sdc2_data_pins[] = { 119 };
+static const unsigned int ufs_reset_pins[] = { 120 };
+
+enum bengal_functions {
+	msm_mux_ddr_bist,
+	msm_mux_m_voc,
+	msm_mux_gpio,
+	msm_mux_qup0,
+	msm_mux_phase_flag0,
+	msm_mux_qdss_gpio8,
+	msm_mux_atest_tsens,
+	msm_mux_mpm_pwr,
+	msm_mux_phase_flag1,
+	msm_mux_qdss_gpio9,
+	msm_mux_atest_tsens2,
+	msm_mux_phase_flag2,
+	msm_mux_qdss_gpio10,
+	msm_mux_dac_calib0,
+	msm_mux_atest_usb10,
+	msm_mux_phase_flag3,
+	msm_mux_qdss_gpio11,
+	msm_mux_dac_calib1,
+	msm_mux_atest_usb11,
+	msm_mux_qup1,
+	msm_mux_CRI_TRNG0,
+	msm_mux_phase_flag4,
+	msm_mux_dac_calib2,
+	msm_mux_atest_usb12,
+	msm_mux_CRI_TRNG1,
+	msm_mux_phase_flag5,
+	msm_mux_dac_calib3,
+	msm_mux_atest_usb13,
+	msm_mux_qup2,
+	msm_mux_phase_flag6,
+	msm_mux_dac_calib4,
+	msm_mux_atest_usb1,
+	msm_mux_qup3,
+	msm_mux_pbs_out,
+	msm_mux_PLL_BIST,
+	msm_mux_qdss_gpio,
+	msm_mux_tsense_pwm,
+	msm_mux_AGERA_PLL,
+	msm_mux_pbs0,
+	msm_mux_qdss_gpio0,
+	msm_mux_pbs1,
+	msm_mux_qdss_gpio1,
+	msm_mux_qup4,
+	msm_mux_tgu_ch0,
+	msm_mux_tgu_ch1,
+	msm_mux_qup5,
+	msm_mux_tgu_ch2,
+	msm_mux_phase_flag7,
+	msm_mux_qdss_gpio4,
+	msm_mux_dac_calib5,
+	msm_mux_tgu_ch3,
+	msm_mux_phase_flag8,
+	msm_mux_qdss_gpio5,
+	msm_mux_dac_calib6,
+	msm_mux_phase_flag9,
+	msm_mux_qdss_gpio6,
+	msm_mux_dac_calib7,
+	msm_mux_phase_flag10,
+	msm_mux_qdss_gpio7,
+	msm_mux_dac_calib8,
+	msm_mux_SDC2_TB,
+	msm_mux_CRI_TRNG,
+	msm_mux_pbs2,
+	msm_mux_qdss_gpio2,
+	msm_mux_SDC1_TB,
+	msm_mux_pbs3,
+	msm_mux_qdss_gpio3,
+	msm_mux_cam_mclk,
+	msm_mux_pbs4,
+	msm_mux_adsp_ext,
+	msm_mux_pbs5,
+	msm_mux_cci_i2c,
+	msm_mux_prng_rosc,
+	msm_mux_pbs6,
+	msm_mux_phase_flag11,
+	msm_mux_dac_calib9,
+	msm_mux_atest_usb20,
+	msm_mux_pbs7,
+	msm_mux_phase_flag12,
+	msm_mux_dac_calib10,
+	msm_mux_atest_usb21,
+	msm_mux_CCI_TIMER1,
+	msm_mux_GCC_GP1,
+	msm_mux_pbs8,
+	msm_mux_phase_flag13,
+	msm_mux_dac_calib11,
+	msm_mux_atest_usb22,
+	msm_mux_cci_async,
+	msm_mux_CCI_TIMER0,
+	msm_mux_pbs9,
+	msm_mux_phase_flag14,
+	msm_mux_dac_calib12,
+	msm_mux_atest_usb23,
+	msm_mux_pbs10,
+	msm_mux_phase_flag15,
+	msm_mux_dac_calib13,
+	msm_mux_atest_usb2,
+	msm_mux_vsense_trigger,
+	msm_mux_qdss_cti,
+	msm_mux_CCI_TIMER2,
+	msm_mux_phase_flag16,
+	msm_mux_dac_calib14,
+	msm_mux_atest_char,
+	msm_mux_phase_flag17,
+	msm_mux_dac_calib15,
+	msm_mux_atest_char0,
+	msm_mux_GP_PDM0,
+	msm_mux_phase_flag18,
+	msm_mux_dac_calib16,
+	msm_mux_atest_char1,
+	msm_mux_CCI_TIMER3,
+	msm_mux_GP_PDM1,
+	msm_mux_phase_flag19,
+	msm_mux_dac_calib17,
+	msm_mux_atest_char2,
+	msm_mux_GP_PDM2,
+	msm_mux_phase_flag20,
+	msm_mux_dac_calib18,
+	msm_mux_atest_char3,
+	msm_mux_phase_flag21,
+	msm_mux_phase_flag22,
+	msm_mux_NAV_GPIO,
+	msm_mux_phase_flag23,
+	msm_mux_phase_flag24,
+	msm_mux_phase_flag25,
+	msm_mux_pbs14,
+	msm_mux_qdss_gpio14,
+	msm_mux_vfr_1,
+	msm_mux_pbs15,
+	msm_mux_qdss_gpio15,
+	msm_mux_PA_INDICATOR,
+	msm_mux_gsm1_tx,
+	msm_mux_SSBI_WTR1,
+	msm_mux_pll_bypassnl,
+	msm_mux_pll_reset,
+	msm_mux_phase_flag26,
+	msm_mux_ddr_pxi0,
+	msm_mux_gsm0_tx,
+	msm_mux_phase_flag27,
+	msm_mux_GCC_GP2,
+	msm_mux_qdss_gpio12,
+	msm_mux_ddr_pxi1,
+	msm_mux_GCC_GP3,
+	msm_mux_qdss_gpio13,
+	msm_mux_dbg_out,
+	msm_mux_uim2_data,
+	msm_mux_uim2_clk,
+	msm_mux_uim2_reset,
+	msm_mux_uim2_present,
+	msm_mux_uim1_data,
+	msm_mux_uim1_clk,
+	msm_mux_uim1_reset,
+	msm_mux_uim1_present,
+	msm_mux_dac_calib19,
+	msm_mux_mdp_vsync,
+	msm_mux_dac_calib20,
+	msm_mux_dac_calib21,
+	msm_mux_atest_bbrx1,
+	msm_mux_pbs11,
+	msm_mux_usb_phy,
+	msm_mux_atest_bbrx0,
+	msm_mux_mss_lte,
+	msm_mux_pbs12,
+	msm_mux_pbs13,
+	msm_mux_wlan1_adc0,
+	msm_mux_wlan1_adc1,
+	msm_mux_sd_write,
+	msm_mux_JITTER_BIST,
+	msm_mux_atest_gpsadc_dtest0_native,
+	msm_mux_atest_gpsadc_dtest1_native,
+	msm_mux_phase_flag28,
+	msm_mux_dac_calib22,
+	msm_mux_ddr_pxi2,
+	msm_mux_phase_flag29,
+	msm_mux_dac_calib23,
+	msm_mux_phase_flag30,
+	msm_mux_dac_calib24,
+	msm_mux_ddr_pxi3,
+	msm_mux_phase_flag31,
+	msm_mux_dac_calib25,
+	msm_mux_NA,
+};
+
+static const char * const ddr_bist_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const m_voc_groups[] = {
+	"gpio0",
+};
+static const char * const gpio_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+	"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+	"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+	"gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+	"gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+	"gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+	"gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+	"gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+	"gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+	"gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+	"gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+	"gpio111", "gpio112",
+};
+static const char * const qup0_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio82", "gpio86",
+};
+static const char * const phase_flag0_groups[] = {
+	"gpio0",
+};
+static const char * const qdss_gpio8_groups[] = {
+	"gpio0", "gpio24",
+};
+static const char * const atest_tsens_groups[] = {
+	"gpio0",
+};
+static const char * const mpm_pwr_groups[] = {
+	"gpio1",
+};
+static const char * const phase_flag1_groups[] = {
+	"gpio1",
+};
+static const char * const qdss_gpio9_groups[] = {
+	"gpio1", "gpio25",
+};
+static const char * const atest_tsens2_groups[] = {
+	"gpio1",
+};
+static const char * const phase_flag2_groups[] = {
+	"gpio2",
+};
+static const char * const qdss_gpio10_groups[] = {
+	"gpio2", "gpio26",
+};
+static const char * const dac_calib0_groups[] = {
+	"gpio2",
+};
+static const char * const atest_usb10_groups[] = {
+	"gpio2",
+};
+static const char * const phase_flag3_groups[] = {
+	"gpio3",
+};
+static const char * const qdss_gpio11_groups[] = {
+	"gpio3", "gpio87",
+};
+static const char * const dac_calib1_groups[] = {
+	"gpio3",
+};
+static const char * const atest_usb11_groups[] = {
+	"gpio3",
+};
+static const char * const qup1_groups[] = {
+	"gpio4", "gpio5", "gpio69", "gpio70",
+};
+static const char * const CRI_TRNG0_groups[] = {
+	"gpio4",
+};
+static const char * const phase_flag4_groups[] = {
+	"gpio4",
+};
+static const char * const dac_calib2_groups[] = {
+	"gpio4",
+};
+static const char * const atest_usb12_groups[] = {
+	"gpio4",
+};
+static const char * const CRI_TRNG1_groups[] = {
+	"gpio5",
+};
+static const char * const phase_flag5_groups[] = {
+	"gpio5",
+};
+static const char * const dac_calib3_groups[] = {
+	"gpio5",
+};
+static const char * const atest_usb13_groups[] = {
+	"gpio5",
+};
+static const char * const qup2_groups[] = {
+	"gpio6", "gpio7", "gpio71", "gpio80",
+};
+static const char * const phase_flag6_groups[] = {
+	"gpio6",
+};
+static const char * const dac_calib4_groups[] = {
+	"gpio6",
+};
+static const char * const atest_usb1_groups[] = {
+	"gpio6",
+};
+static const char * const qup3_groups[] = {
+	"gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const pbs_out_groups[] = {
+	"gpio8", "gpio9", "gpio52",
+};
+static const char * const PLL_BIST_groups[] = {
+	"gpio8", "gpio9",
+};
+static const char * const qdss_gpio_groups[] = {
+	"gpio8", "gpio9", "gpio105", "gpio106",
+};
+static const char * const tsense_pwm_groups[] = {
+	"gpio8",
+};
+static const char * const AGERA_PLL_groups[] = {
+	"gpio10", "gpio11",
+};
+static const char * const pbs0_groups[] = {
+	"gpio10",
+};
+static const char * const qdss_gpio0_groups[] = {
+	"gpio10", "gpio107",
+};
+static const char * const pbs1_groups[] = {
+	"gpio11",
+};
+static const char * const qdss_gpio1_groups[] = {
+	"gpio11", "gpio104",
+};
+static const char * const qup4_groups[] = {
+	"gpio12", "gpio13", "gpio96", "gpio97",
+};
+static const char * const tgu_ch0_groups[] = {
+	"gpio12",
+};
+static const char * const tgu_ch1_groups[] = {
+	"gpio13",
+};
+static const char * const qup5_groups[] = {
+	"gpio14", "gpio15", "gpio16", "gpio17",
+};
+static const char * const tgu_ch2_groups[] = {
+	"gpio14",
+};
+static const char * const phase_flag7_groups[] = {
+	"gpio14",
+};
+static const char * const qdss_gpio4_groups[] = {
+	"gpio14", "gpio20",
+};
+static const char * const dac_calib5_groups[] = {
+	"gpio14",
+};
+static const char * const tgu_ch3_groups[] = {
+	"gpio15",
+};
+static const char * const phase_flag8_groups[] = {
+	"gpio15",
+};
+static const char * const qdss_gpio5_groups[] = {
+	"gpio15", "gpio21",
+};
+static const char * const dac_calib6_groups[] = {
+	"gpio15",
+};
+static const char * const phase_flag9_groups[] = {
+	"gpio16",
+};
+static const char * const qdss_gpio6_groups[] = {
+	"gpio16", "gpio22",
+};
+static const char * const dac_calib7_groups[] = {
+	"gpio16",
+};
+static const char * const phase_flag10_groups[] = {
+	"gpio17",
+};
+static const char * const qdss_gpio7_groups[] = {
+	"gpio17", "gpio23",
+};
+static const char * const dac_calib8_groups[] = {
+	"gpio17",
+};
+static const char * const SDC2_TB_groups[] = {
+	"gpio18",
+};
+static const char * const CRI_TRNG_groups[] = {
+	"gpio18",
+};
+static const char * const pbs2_groups[] = {
+	"gpio18",
+};
+static const char * const qdss_gpio2_groups[] = {
+	"gpio18", "gpio109",
+};
+static const char * const SDC1_TB_groups[] = {
+	"gpio19",
+};
+static const char * const pbs3_groups[] = {
+	"gpio19",
+};
+static const char * const qdss_gpio3_groups[] = {
+	"gpio19", "gpio110",
+};
+static const char * const cam_mclk_groups[] = {
+	"gpio20", "gpio21", "gpio27", "gpio28",
+};
+static const char * const pbs4_groups[] = {
+	"gpio20",
+};
+static const char * const adsp_ext_groups[] = {
+	"gpio21",
+};
+static const char * const pbs5_groups[] = {
+	"gpio21",
+};
+static const char * const cci_i2c_groups[] = {
+	"gpio22", "gpio23", "gpio29", "gpio30",
+};
+static const char * const prng_rosc_groups[] = {
+	"gpio22", "gpio23",
+};
+static const char * const pbs6_groups[] = {
+	"gpio22",
+};
+static const char * const phase_flag11_groups[] = {
+	"gpio22",
+};
+static const char * const dac_calib9_groups[] = {
+	"gpio22",
+};
+static const char * const atest_usb20_groups[] = {
+	"gpio22",
+};
+static const char * const pbs7_groups[] = {
+	"gpio23",
+};
+static const char * const phase_flag12_groups[] = {
+	"gpio23",
+};
+static const char * const dac_calib10_groups[] = {
+	"gpio23",
+};
+static const char * const atest_usb21_groups[] = {
+	"gpio23",
+};
+static const char * const CCI_TIMER1_groups[] = {
+	"gpio24",
+};
+static const char * const GCC_GP1_groups[] = {
+	"gpio24", "gpio86",
+};
+static const char * const pbs8_groups[] = {
+	"gpio24",
+};
+static const char * const phase_flag13_groups[] = {
+	"gpio24",
+};
+static const char * const dac_calib11_groups[] = {
+	"gpio24",
+};
+static const char * const atest_usb22_groups[] = {
+	"gpio24",
+};
+static const char * const cci_async_groups[] = {
+	"gpio25",
+};
+static const char * const CCI_TIMER0_groups[] = {
+	"gpio25",
+};
+static const char * const pbs9_groups[] = {
+	"gpio25",
+};
+static const char * const phase_flag14_groups[] = {
+	"gpio25",
+};
+static const char * const dac_calib12_groups[] = {
+	"gpio25",
+};
+static const char * const atest_usb23_groups[] = {
+	"gpio25",
+};
+static const char * const pbs10_groups[] = {
+	"gpio26",
+};
+static const char * const phase_flag15_groups[] = {
+	"gpio26",
+};
+static const char * const dac_calib13_groups[] = {
+	"gpio26",
+};
+static const char * const atest_usb2_groups[] = {
+	"gpio26",
+};
+static const char * const vsense_trigger_groups[] = {
+	"gpio26",
+};
+static const char * const qdss_cti_groups[] = {
+	"gpio27", "gpio28", "gpio72", "gpio73", "gpio96", "gpio97",
+};
+static const char * const CCI_TIMER2_groups[] = {
+	"gpio28",
+};
+static const char * const phase_flag16_groups[] = {
+	"gpio29",
+};
+static const char * const dac_calib14_groups[] = {
+	"gpio29",
+};
+static const char * const atest_char_groups[] = {
+	"gpio29",
+};
+static const char * const phase_flag17_groups[] = {
+	"gpio30",
+};
+static const char * const dac_calib15_groups[] = {
+	"gpio30",
+};
+static const char * const atest_char0_groups[] = {
+	"gpio30",
+};
+static const char * const GP_PDM0_groups[] = {
+	"gpio31", "gpio95",
+};
+static const char * const phase_flag18_groups[] = {
+	"gpio31",
+};
+static const char * const dac_calib16_groups[] = {
+	"gpio31",
+};
+static const char * const atest_char1_groups[] = {
+	"gpio31",
+};
+static const char * const CCI_TIMER3_groups[] = {
+	"gpio32",
+};
+static const char * const GP_PDM1_groups[] = {
+	"gpio32", "gpio96",
+};
+static const char * const phase_flag19_groups[] = {
+	"gpio32",
+};
+static const char * const dac_calib17_groups[] = {
+	"gpio32",
+};
+static const char * const atest_char2_groups[] = {
+	"gpio32",
+};
+static const char * const GP_PDM2_groups[] = {
+	"gpio33", "gpio97",
+};
+static const char * const phase_flag20_groups[] = {
+	"gpio33",
+};
+static const char * const dac_calib18_groups[] = {
+	"gpio33",
+};
+static const char * const atest_char3_groups[] = {
+	"gpio33",
+};
+static const char * const phase_flag21_groups[] = {
+	"gpio35",
+};
+static const char * const phase_flag22_groups[] = {
+	"gpio36",
+};
+static const char * const NAV_GPIO_groups[] = {
+	"gpio42", "gpio47", "gpio52", "gpio95", "gpio96", "gpio97", "gpio106",
+	"gpio107", "gpio108",
+};
+static const char * const phase_flag23_groups[] = {
+	"gpio43",
+};
+static const char * const phase_flag24_groups[] = {
+	"gpio44",
+};
+static const char * const phase_flag25_groups[] = {
+	"gpio45",
+};
+static const char * const pbs14_groups[] = {
+	"gpio47",
+};
+static const char * const qdss_gpio14_groups[] = {
+	"gpio47", "gpio94",
+};
+static const char * const vfr_1_groups[] = {
+	"gpio48",
+};
+static const char * const pbs15_groups[] = {
+	"gpio48",
+};
+static const char * const qdss_gpio15_groups[] = {
+	"gpio48", "gpio95",
+};
+static const char * const PA_INDICATOR_groups[] = {
+	"gpio49",
+};
+static const char * const gsm1_tx_groups[] = {
+	"gpio53",
+};
+static const char * const SSBI_WTR1_groups[] = {
+	"gpio59", "gpio60",
+};
+static const char * const pll_bypassnl_groups[] = {
+	"gpio62",
+};
+static const char * const pll_reset_groups[] = {
+	"gpio63",
+};
+static const char * const phase_flag26_groups[] = {
+	"gpio63",
+};
+static const char * const ddr_pxi0_groups[] = {
+	"gpio63", "gpio64",
+};
+static const char * const gsm0_tx_groups[] = {
+	"gpio64",
+};
+static const char * const phase_flag27_groups[] = {
+	"gpio64",
+};
+static const char * const GCC_GP2_groups[] = {
+	"gpio69", "gpio107",
+};
+static const char * const qdss_gpio12_groups[] = {
+	"gpio69", "gpio90",
+};
+static const char * const ddr_pxi1_groups[] = {
+	"gpio69", "gpio70",
+};
+static const char * const GCC_GP3_groups[] = {
+	"gpio70", "gpio106",
+};
+static const char * const qdss_gpio13_groups[] = {
+	"gpio70", "gpio91",
+};
+static const char * const dbg_out_groups[] = {
+	"gpio71",
+};
+static const char * const uim2_data_groups[] = {
+	"gpio72",
+};
+static const char * const uim2_clk_groups[] = {
+	"gpio73",
+};
+static const char * const uim2_reset_groups[] = {
+	"gpio74",
+};
+static const char * const uim2_present_groups[] = {
+	"gpio75",
+};
+static const char * const uim1_data_groups[] = {
+	"gpio76",
+};
+static const char * const uim1_clk_groups[] = {
+	"gpio77",
+};
+static const char * const uim1_reset_groups[] = {
+	"gpio78",
+};
+static const char * const uim1_present_groups[] = {
+	"gpio79",
+};
+static const char * const dac_calib19_groups[] = {
+	"gpio80",
+};
+static const char * const mdp_vsync_groups[] = {
+	"gpio81", "gpio96", "gpio97",
+};
+static const char * const dac_calib20_groups[] = {
+	"gpio81",
+};
+static const char * const dac_calib21_groups[] = {
+	"gpio82",
+};
+static const char * const atest_bbrx1_groups[] = {
+	"gpio86",
+};
+static const char * const pbs11_groups[] = {
+	"gpio87",
+};
+static const char * const usb_phy_groups[] = {
+	"gpio89",
+};
+static const char * const atest_bbrx0_groups[] = {
+	"gpio89",
+};
+static const char * const mss_lte_groups[] = {
+	"gpio90", "gpio91",
+};
+static const char * const pbs12_groups[] = {
+	"gpio90",
+};
+static const char * const pbs13_groups[] = {
+	"gpio91",
+};
+static const char * const wlan1_adc0_groups[] = {
+	"gpio94",
+};
+static const char * const wlan1_adc1_groups[] = {
+	"gpio95",
+};
+static const char * const sd_write_groups[] = {
+	"gpio96",
+};
+static const char * const JITTER_BIST_groups[] = {
+	"gpio96", "gpio97",
+};
+static const char * const atest_gpsadc_dtest0_native_groups[] = {
+	"gpio100",
+};
+static const char * const atest_gpsadc_dtest1_native_groups[] = {
+	"gpio101",
+};
+static const char * const phase_flag28_groups[] = {
+	"gpio102",
+};
+static const char * const dac_calib22_groups[] = {
+	"gpio102",
+};
+static const char * const ddr_pxi2_groups[] = {
+	"gpio102", "gpio103",
+};
+static const char * const phase_flag29_groups[] = {
+	"gpio103",
+};
+static const char * const dac_calib23_groups[] = {
+	"gpio103",
+};
+static const char * const phase_flag30_groups[] = {
+	"gpio104",
+};
+static const char * const dac_calib24_groups[] = {
+	"gpio104",
+};
+static const char * const ddr_pxi3_groups[] = {
+	"gpio104", "gpio105",
+};
+static const char * const phase_flag31_groups[] = {
+	"gpio105",
+};
+static const char * const dac_calib25_groups[] = {
+	"gpio105",
+};
+
+static const struct msm_function bengal_functions[] = {
+	FUNCTION(ddr_bist),
+	FUNCTION(m_voc),
+	FUNCTION(gpio),
+	FUNCTION(qup0),
+	FUNCTION(phase_flag0),
+	FUNCTION(qdss_gpio8),
+	FUNCTION(atest_tsens),
+	FUNCTION(mpm_pwr),
+	FUNCTION(phase_flag1),
+	FUNCTION(qdss_gpio9),
+	FUNCTION(atest_tsens2),
+	FUNCTION(phase_flag2),
+	FUNCTION(qdss_gpio10),
+	FUNCTION(dac_calib0),
+	FUNCTION(atest_usb10),
+	FUNCTION(phase_flag3),
+	FUNCTION(qdss_gpio11),
+	FUNCTION(dac_calib1),
+	FUNCTION(atest_usb11),
+	FUNCTION(qup1),
+	FUNCTION(CRI_TRNG0),
+	FUNCTION(phase_flag4),
+	FUNCTION(dac_calib2),
+	FUNCTION(atest_usb12),
+	FUNCTION(CRI_TRNG1),
+	FUNCTION(phase_flag5),
+	FUNCTION(dac_calib3),
+	FUNCTION(atest_usb13),
+	FUNCTION(qup2),
+	FUNCTION(phase_flag6),
+	FUNCTION(dac_calib4),
+	FUNCTION(atest_usb1),
+	FUNCTION(qup3),
+	FUNCTION(pbs_out),
+	FUNCTION(PLL_BIST),
+	FUNCTION(qdss_gpio),
+	FUNCTION(tsense_pwm),
+	FUNCTION(AGERA_PLL),
+	FUNCTION(pbs0),
+	FUNCTION(qdss_gpio0),
+	FUNCTION(pbs1),
+	FUNCTION(qdss_gpio1),
+	FUNCTION(qup4),
+	FUNCTION(tgu_ch0),
+	FUNCTION(tgu_ch1),
+	FUNCTION(qup5),
+	FUNCTION(tgu_ch2),
+	FUNCTION(phase_flag7),
+	FUNCTION(qdss_gpio4),
+	FUNCTION(dac_calib5),
+	FUNCTION(tgu_ch3),
+	FUNCTION(phase_flag8),
+	FUNCTION(qdss_gpio5),
+	FUNCTION(dac_calib6),
+	FUNCTION(phase_flag9),
+	FUNCTION(qdss_gpio6),
+	FUNCTION(dac_calib7),
+	FUNCTION(phase_flag10),
+	FUNCTION(qdss_gpio7),
+	FUNCTION(dac_calib8),
+	FUNCTION(SDC2_TB),
+	FUNCTION(CRI_TRNG),
+	FUNCTION(pbs2),
+	FUNCTION(qdss_gpio2),
+	FUNCTION(SDC1_TB),
+	FUNCTION(pbs3),
+	FUNCTION(qdss_gpio3),
+	FUNCTION(cam_mclk),
+	FUNCTION(pbs4),
+	FUNCTION(adsp_ext),
+	FUNCTION(pbs5),
+	FUNCTION(cci_i2c),
+	FUNCTION(prng_rosc),
+	FUNCTION(pbs6),
+	FUNCTION(phase_flag11),
+	FUNCTION(dac_calib9),
+	FUNCTION(atest_usb20),
+	FUNCTION(pbs7),
+	FUNCTION(phase_flag12),
+	FUNCTION(dac_calib10),
+	FUNCTION(atest_usb21),
+	FUNCTION(CCI_TIMER1),
+	FUNCTION(GCC_GP1),
+	FUNCTION(pbs8),
+	FUNCTION(phase_flag13),
+	FUNCTION(dac_calib11),
+	FUNCTION(atest_usb22),
+	FUNCTION(cci_async),
+	FUNCTION(CCI_TIMER0),
+	FUNCTION(pbs9),
+	FUNCTION(phase_flag14),
+	FUNCTION(dac_calib12),
+	FUNCTION(atest_usb23),
+	FUNCTION(pbs10),
+	FUNCTION(phase_flag15),
+	FUNCTION(dac_calib13),
+	FUNCTION(atest_usb2),
+	FUNCTION(vsense_trigger),
+	FUNCTION(qdss_cti),
+	FUNCTION(CCI_TIMER2),
+	FUNCTION(phase_flag16),
+	FUNCTION(dac_calib14),
+	FUNCTION(atest_char),
+	FUNCTION(phase_flag17),
+	FUNCTION(dac_calib15),
+	FUNCTION(atest_char0),
+	FUNCTION(GP_PDM0),
+	FUNCTION(phase_flag18),
+	FUNCTION(dac_calib16),
+	FUNCTION(atest_char1),
+	FUNCTION(CCI_TIMER3),
+	FUNCTION(GP_PDM1),
+	FUNCTION(phase_flag19),
+	FUNCTION(dac_calib17),
+	FUNCTION(atest_char2),
+	FUNCTION(GP_PDM2),
+	FUNCTION(phase_flag20),
+	FUNCTION(dac_calib18),
+	FUNCTION(atest_char3),
+	FUNCTION(phase_flag21),
+	FUNCTION(phase_flag22),
+	FUNCTION(NAV_GPIO),
+	FUNCTION(phase_flag23),
+	FUNCTION(phase_flag24),
+	FUNCTION(phase_flag25),
+	FUNCTION(pbs14),
+	FUNCTION(qdss_gpio14),
+	FUNCTION(vfr_1),
+	FUNCTION(pbs15),
+	FUNCTION(qdss_gpio15),
+	FUNCTION(PA_INDICATOR),
+	FUNCTION(gsm1_tx),
+	FUNCTION(SSBI_WTR1),
+	FUNCTION(pll_bypassnl),
+	FUNCTION(pll_reset),
+	FUNCTION(phase_flag26),
+	FUNCTION(ddr_pxi0),
+	FUNCTION(gsm0_tx),
+	FUNCTION(phase_flag27),
+	FUNCTION(GCC_GP2),
+	FUNCTION(qdss_gpio12),
+	FUNCTION(ddr_pxi1),
+	FUNCTION(GCC_GP3),
+	FUNCTION(qdss_gpio13),
+	FUNCTION(dbg_out),
+	FUNCTION(uim2_data),
+	FUNCTION(uim2_clk),
+	FUNCTION(uim2_reset),
+	FUNCTION(uim2_present),
+	FUNCTION(uim1_data),
+	FUNCTION(uim1_clk),
+	FUNCTION(uim1_reset),
+	FUNCTION(uim1_present),
+	FUNCTION(dac_calib19),
+	FUNCTION(mdp_vsync),
+	FUNCTION(dac_calib20),
+	FUNCTION(dac_calib21),
+	FUNCTION(atest_bbrx1),
+	FUNCTION(pbs11),
+	FUNCTION(usb_phy),
+	FUNCTION(atest_bbrx0),
+	FUNCTION(mss_lte),
+	FUNCTION(pbs12),
+	FUNCTION(pbs13),
+	FUNCTION(wlan1_adc0),
+	FUNCTION(wlan1_adc1),
+	FUNCTION(sd_write),
+	FUNCTION(JITTER_BIST),
+	FUNCTION(atest_gpsadc_dtest0_native),
+	FUNCTION(atest_gpsadc_dtest1_native),
+	FUNCTION(phase_flag28),
+	FUNCTION(dac_calib22),
+	FUNCTION(ddr_pxi2),
+	FUNCTION(phase_flag29),
+	FUNCTION(dac_calib23),
+	FUNCTION(phase_flag30),
+	FUNCTION(dac_calib24),
+	FUNCTION(ddr_pxi3),
+	FUNCTION(phase_flag31),
+	FUNCTION(dac_calib25),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup bengal_groups[] = {
+	[0] = PINGROUP(0, WEST, qup0, m_voc, ddr_bist, NA, phase_flag0,
+		       qdss_gpio8, atest_tsens, NA, NA),
+	[1] = PINGROUP(1, WEST, qup0, mpm_pwr, ddr_bist, NA, phase_flag1,
+		       qdss_gpio9, atest_tsens2, NA, NA),
+	[2] = PINGROUP(2, WEST, qup0, ddr_bist, NA, phase_flag2, qdss_gpio10,
+		       dac_calib0, atest_usb10, NA, NA),
+	[3] = PINGROUP(3, WEST, qup0, ddr_bist, NA, phase_flag3, qdss_gpio11,
+		       dac_calib1, atest_usb11, NA, NA),
+	[4] = PINGROUP(4, WEST, qup1, CRI_TRNG0, NA, phase_flag4, dac_calib2,
+		       atest_usb12, NA, NA, NA),
+	[5] = PINGROUP(5, WEST, qup1, CRI_TRNG1, NA, phase_flag5, dac_calib3,
+		       atest_usb13, NA, NA, NA),
+	[6] = PINGROUP(6, WEST, qup2, NA, phase_flag6, dac_calib4, atest_usb1,
+		       NA, NA, NA, NA),
+	[7] = PINGROUP(7, WEST, qup2, NA, NA, NA, NA, NA, NA, NA, NA),
+	[8] = PINGROUP(8, EAST, qup3, pbs_out, PLL_BIST, NA, qdss_gpio, NA,
+		       tsense_pwm, NA, NA),
+	[9] = PINGROUP(9, EAST, qup3, pbs_out, PLL_BIST, NA, qdss_gpio, NA, NA,
+		       NA, NA),
+	[10] = PINGROUP(10, EAST, qup3, AGERA_PLL, NA, pbs0, qdss_gpio0, NA,
+			NA, NA, NA),
+	[11] = PINGROUP(11, EAST, qup3, AGERA_PLL, NA, pbs1, qdss_gpio1, NA,
+			NA, NA, NA),
+	[12] = PINGROUP(12, WEST, qup4, tgu_ch0, NA, NA, NA, NA, NA, NA, NA),
+	[13] = PINGROUP(13, WEST, qup4, tgu_ch1, NA, NA, NA, NA, NA, NA, NA),
+	[14] = PINGROUP(14, WEST, qup5, tgu_ch2, NA, phase_flag7, qdss_gpio4,
+			dac_calib5, NA, NA, NA),
+	[15] = PINGROUP(15, WEST, qup5, tgu_ch3, NA, phase_flag8, qdss_gpio5,
+			dac_calib6, NA, NA, NA),
+	[16] = PINGROUP(16, WEST, qup5, NA, phase_flag9, qdss_gpio6,
+			dac_calib7, NA, NA, NA, NA),
+	[17] = PINGROUP(17, WEST, qup5, NA, phase_flag10, qdss_gpio7,
+			dac_calib8, NA, NA, NA, NA),
+	[18] = PINGROUP(18, EAST, SDC2_TB, CRI_TRNG, pbs2, qdss_gpio2, NA, NA,
+			NA, NA, NA),
+	[19] = PINGROUP(19, EAST, SDC1_TB, pbs3, qdss_gpio3, NA, NA, NA, NA,
+			NA, NA),
+	[20] = PINGROUP(20, EAST, cam_mclk, pbs4, qdss_gpio4, NA, NA, NA, NA,
+			NA, NA),
+	[21] = PINGROUP(21, EAST, cam_mclk, adsp_ext, pbs5, qdss_gpio5, NA, NA,
+			NA, NA, NA),
+	[22] = PINGROUP(22, EAST, cci_i2c, prng_rosc, NA, pbs6, phase_flag11,
+			qdss_gpio6, dac_calib9, atest_usb20, NA),
+	[23] = PINGROUP(23, EAST, cci_i2c, prng_rosc, NA, pbs7, phase_flag12,
+			qdss_gpio7, dac_calib10, atest_usb21, NA),
+	[24] = PINGROUP(24, EAST, CCI_TIMER1, GCC_GP1, NA, pbs8, phase_flag13,
+			qdss_gpio8, dac_calib11, atest_usb22, NA),
+	[25] = PINGROUP(25, EAST, cci_async, CCI_TIMER0, NA, pbs9,
+			phase_flag14, qdss_gpio9, dac_calib12, atest_usb23, NA),
+	[26] = PINGROUP(26, EAST, NA, pbs10, phase_flag15, qdss_gpio10,
+			dac_calib13, atest_usb2, vsense_trigger, NA, NA),
+	[27] = PINGROUP(27, EAST, cam_mclk, qdss_cti, NA, NA, NA, NA, NA, NA,
+			NA),
+	[28] = PINGROUP(28, EAST, cam_mclk, CCI_TIMER2, qdss_cti, NA, NA, NA,
+			NA, NA, NA),
+	[29] = PINGROUP(29, EAST, cci_i2c, NA, phase_flag16, dac_calib14,
+			atest_char, NA, NA, NA, NA),
+	[30] = PINGROUP(30, EAST, cci_i2c, NA, phase_flag17, dac_calib15,
+			atest_char0, NA, NA, NA, NA),
+	[31] = PINGROUP(31, EAST, GP_PDM0, NA, phase_flag18, dac_calib16,
+			atest_char1, NA, NA, NA, NA),
+	[32] = PINGROUP(32, EAST, CCI_TIMER3, GP_PDM1, NA, phase_flag19,
+			dac_calib17, atest_char2, NA, NA, NA),
+	[33] = PINGROUP(33, EAST, GP_PDM2, NA, phase_flag20, dac_calib18,
+			atest_char3, NA, NA, NA, NA),
+	[34] = PINGROUP(34, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[35] = PINGROUP(35, EAST, NA, phase_flag21, NA, NA, NA, NA, NA, NA, NA),
+	[36] = PINGROUP(36, EAST, NA, phase_flag22, NA, NA, NA, NA, NA, NA, NA),
+	[37] = PINGROUP(37, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[38] = PINGROUP(38, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[39] = PINGROUP(39, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[40] = PINGROUP(40, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[41] = PINGROUP(41, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[42] = PINGROUP(42, EAST, NA, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA),
+	[43] = PINGROUP(43, EAST, NA, NA, phase_flag23, NA, NA, NA, NA, NA, NA),
+	[44] = PINGROUP(44, EAST, NA, NA, phase_flag24, NA, NA, NA, NA, NA, NA),
+	[45] = PINGROUP(45, EAST, NA, NA, phase_flag25, NA, NA, NA, NA, NA, NA),
+	[46] = PINGROUP(46, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[47] = PINGROUP(47, EAST, NA, NAV_GPIO, pbs14, qdss_gpio14, NA, NA, NA,
+			NA, NA),
+	[48] = PINGROUP(48, EAST, NA, vfr_1, NA, pbs15, qdss_gpio15, NA, NA,
+			NA, NA),
+	[49] = PINGROUP(49, EAST, NA, PA_INDICATOR, NA, NA, NA, NA, NA, NA, NA),
+	[50] = PINGROUP(50, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[51] = PINGROUP(51, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[52] = PINGROUP(52, EAST, NA, NAV_GPIO, pbs_out, NA, NA, NA, NA, NA,
+			NA),
+	[53] = PINGROUP(53, EAST, NA, gsm1_tx, NA, NA, NA, NA, NA, NA, NA),
+	[54] = PINGROUP(54, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[55] = PINGROUP(55, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[56] = PINGROUP(56, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[57] = PINGROUP(57, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[58] = PINGROUP(58, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[59] = PINGROUP(59, EAST, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA),
+	[60] = PINGROUP(60, EAST, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA),
+	[61] = PINGROUP(61, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[62] = PINGROUP(62, EAST, NA, pll_bypassnl, NA, NA, NA, NA, NA, NA, NA),
+	[63] = PINGROUP(63, EAST, pll_reset, NA, phase_flag26, ddr_pxi0, NA,
+			NA, NA, NA, NA),
+	[64] = PINGROUP(64, EAST, gsm0_tx, NA, phase_flag27, ddr_pxi0, NA, NA,
+			NA, NA, NA),
+	[65] = PINGROUP(65, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[66] = PINGROUP(66, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[67] = PINGROUP(67, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[68] = PINGROUP(68, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[69] = PINGROUP(69, WEST, qup1, GCC_GP2, qdss_gpio12, ddr_pxi1, NA, NA,
+			NA, NA, NA),
+	[70] = PINGROUP(70, WEST, qup1, GCC_GP3, qdss_gpio13, ddr_pxi1, NA, NA,
+			NA, NA, NA),
+	[71] = PINGROUP(71, WEST, qup2, dbg_out, NA, NA, NA, NA, NA, NA, NA),
+	[72] = PINGROUP(72, SOUTH, uim2_data, qdss_cti, NA, NA, NA, NA, NA, NA,
+			NA),
+	[73] = PINGROUP(73, SOUTH, uim2_clk, NA, qdss_cti, NA, NA, NA, NA, NA,
+			NA),
+	[74] = PINGROUP(74, SOUTH, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+	[75] = PINGROUP(75, SOUTH, uim2_present, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[76] = PINGROUP(76, SOUTH, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
+	[77] = PINGROUP(77, SOUTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+	[78] = PINGROUP(78, SOUTH, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+	[79] = PINGROUP(79, SOUTH, uim1_present, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[80] = PINGROUP(80, WEST, qup2, dac_calib19, NA, NA, NA, NA, NA, NA,
+			NA),
+	[81] = PINGROUP(81, WEST, mdp_vsync, mdp_vsync, mdp_vsync, dac_calib20,
+			NA, NA, NA, NA, NA),
+	[82] = PINGROUP(82, WEST, qup0, dac_calib21, NA, NA, NA, NA, NA, NA,
+			NA),
+	[83] = PINGROUP(83, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[84] = PINGROUP(84, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[85] = PINGROUP(85, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[86] = PINGROUP(86, WEST, qup0, GCC_GP1, atest_bbrx1, NA, NA, NA, NA,
+			NA, NA),
+	[87] = PINGROUP(87, EAST, pbs11, qdss_gpio11, NA, NA, NA, NA, NA, NA,
+			NA),
+	[88] = PINGROUP(88, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[89] = PINGROUP(89, WEST, usb_phy, atest_bbrx0, NA, NA, NA, NA, NA, NA,
+			NA),
+	[90] = PINGROUP(90, EAST, mss_lte, pbs12, qdss_gpio12, NA, NA, NA, NA,
+			NA, NA),
+	[91] = PINGROUP(91, EAST, mss_lte, pbs13, qdss_gpio13, NA, NA, NA, NA,
+			NA, NA),
+	[92] = PINGROUP(92, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[93] = PINGROUP(93, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[94] = PINGROUP(94, WEST, NA, qdss_gpio14, wlan1_adc0, NA, NA, NA, NA,
+			NA, NA),
+	[95] = PINGROUP(95, WEST, NAV_GPIO, GP_PDM0, qdss_gpio15, wlan1_adc1,
+			NA, NA, NA, NA, NA),
+	[96] = PINGROUP(96, WEST, qup4, NAV_GPIO, mdp_vsync, GP_PDM1, sd_write,
+			JITTER_BIST, qdss_cti, qdss_cti, NA),
+	[97] = PINGROUP(97, WEST, qup4, NAV_GPIO, mdp_vsync, GP_PDM2,
+			JITTER_BIST, qdss_cti, qdss_cti, NA, NA),
+	[98] = PINGROUP(98, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[99] = PINGROUP(99, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[100] = PINGROUP(100, SOUTH, atest_gpsadc_dtest0_native, NA, NA, NA,
+			 NA, NA, NA, NA, NA),
+	[101] = PINGROUP(101, SOUTH, atest_gpsadc_dtest1_native, NA, NA, NA,
+			 NA, NA, NA, NA, NA),
+	[102] = PINGROUP(102, SOUTH, NA, phase_flag28, dac_calib22, ddr_pxi2,
+			 NA, NA, NA, NA, NA),
+	[103] = PINGROUP(103, SOUTH, NA, phase_flag29, dac_calib23, ddr_pxi2,
+			 NA, NA, NA, NA, NA),
+	[104] = PINGROUP(104, SOUTH, NA, phase_flag30, qdss_gpio1, dac_calib24,
+			 ddr_pxi3, NA, NA, NA, NA),
+	[105] = PINGROUP(105, SOUTH, NA, phase_flag31, qdss_gpio, dac_calib25,
+			 ddr_pxi3, NA, NA, NA, NA),
+	[106] = PINGROUP(106, SOUTH, NAV_GPIO, GCC_GP3, qdss_gpio, NA, NA, NA,
+			 NA, NA, NA),
+	[107] = PINGROUP(107, SOUTH, NAV_GPIO, GCC_GP2, qdss_gpio0, NA, NA, NA,
+			 NA, NA, NA),
+	[108] = PINGROUP(108, SOUTH, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA, NA),
+	[109] = PINGROUP(109, SOUTH, NA, qdss_gpio2, NA, NA, NA, NA, NA, NA,
+			 NA),
+	[110] = PINGROUP(110, SOUTH, NA, qdss_gpio3, NA, NA, NA, NA, NA, NA,
+			 NA),
+	[111] = PINGROUP(111, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[112] = PINGROUP(112, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[113] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x175000, 15, 0),
+	[114] = SDC_QDSD_PINGROUP(sdc1_clk, 0x175000, 13, 6),
+	[115] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x175000, 11, 3),
+	[116] = SDC_QDSD_PINGROUP(sdc1_data, 0x175000, 9, 0),
+	[117] = SDC_QDSD_PINGROUP(sdc2_clk, 0x173000, 14, 6),
+	[118] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x173000, 11, 3),
+	[119] = SDC_QDSD_PINGROUP(sdc2_data, 0x173000, 9, 0),
+	[120] = UFS_RESET(ufs_reset, 0x177000),
+};
+
+static const struct msm_pinctrl_soc_data bengal_pinctrl = {
+	.pins = bengal_pins,
+	.npins = ARRAY_SIZE(bengal_pins),
+	.functions = bengal_functions,
+	.nfunctions = ARRAY_SIZE(bengal_functions),
+	.groups = bengal_groups,
+	.ngroups = ARRAY_SIZE(bengal_groups),
+	.ngpios = 113,
+};
+
+static int bengal_pinctrl_probe(struct platform_device *pdev)
+{
+	return msm_pinctrl_probe(pdev, &bengal_pinctrl);
+}
+
+static const struct of_device_id bengal_pinctrl_of_match[] = {
+	{ .compatible = "qcom,bengal-pinctrl", },
+	{ },
+};
+
+static struct platform_driver bengal_pinctrl_driver = {
+	.driver = {
+		.name = "bengal-pinctrl",
+		.of_match_table = bengal_pinctrl_of_match,
+	},
+	.probe = bengal_pinctrl_probe,
+	.remove = msm_pinctrl_remove,
+};
+
+static int __init bengal_pinctrl_init(void)
+{
+	return platform_driver_register(&bengal_pinctrl_driver);
+}
+arch_initcall(bengal_pinctrl_init);
+
+static void __exit bengal_pinctrl_exit(void)
+{
+	platform_driver_unregister(&bengal_pinctrl_driver);
+}
+module_exit(bengal_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI bengal pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, bengal_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 5399e13..3562bed 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -633,7 +633,7 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl,
 		val, val2);
 }
 
-static void msm_gpio_irq_mask(struct irq_data *d)
+static void _msm_gpio_irq_mask(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
 	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
@@ -643,13 +643,6 @@ static void msm_gpio_irq_mask(struct irq_data *d)
 
 	g = &pctrl->soc->groups[d->hwirq];
 
-	if (d->parent_data)
-		irq_chip_mask_parent(d);
-
-	/* Monitored by parent wakeup controller? */
-	if (test_bit(d->hwirq, pctrl->wakeup_masked_irqs))
-		return;
-
 	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	val = readl(pctrl->regs + g->intr_cfg_reg);
@@ -684,7 +677,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
 	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
-static void msm_gpio_irq_unmask(struct irq_data *d)
+static void _msm_gpio_irq_unmask(struct irq_data *d, bool status_clear)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
 	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
@@ -694,15 +687,19 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
 
 	g = &pctrl->soc->groups[d->hwirq];
 
-	if (d->parent_data)
-		irq_chip_unmask_parent(d);
-
-	/* Monitored by parent wakeup controller? Keep masked */
-	if (test_bit(d->hwirq, pctrl->wakeup_masked_irqs))
-		return;
-
 	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
+	if (status_clear) {
+		/*
+		 * clear the interrupt status bit before unmask to avoid
+		 * any erroneous interrupts that would have got latched
+		 * when the interrupt is not in use.
+		 */
+		val = readl(pctrl->regs + g->intr_status_reg);
+		val &= ~BIT(g->intr_status_bit);
+		writel(val, pctrl->regs + g->intr_status_reg);
+	}
+
 	val = readl(pctrl->regs + g->intr_cfg_reg);
 	val |= BIT(g->intr_raw_status_bit);
 	val |= BIT(g->intr_enable_bit);
@@ -713,6 +710,73 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
 	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
+static void msm_gpio_irq_mask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+	if (d->parent_data)
+		irq_chip_mask_parent(d);
+
+	if (test_bit(d->hwirq, pctrl->wakeup_masked_irqs))
+		return;
+
+	_msm_gpio_irq_mask(d);
+}
+
+static void msm_gpio_irq_unmask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+	if (d->parent_data)
+		irq_chip_unmask_parent(d);
+
+	if (test_bit(d->hwirq, pctrl->wakeup_masked_irqs))
+		return;
+
+	_msm_gpio_irq_unmask(d, false);
+}
+
+static void msm_gpio_irq_disable(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+	if (d->parent_data)
+		irq_chip_disable_parent(d);
+
+	if (test_bit(d->hwirq, pctrl->wakeup_masked_irqs))
+		return;
+
+	_msm_gpio_irq_mask(d);
+}
+
+static void msm_gpio_irq_enable(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+	if (d->parent_data) {
+		/*
+		 * Clear the interrupt that may be pending before we enable
+		 * the line.
+		 * This is especially a problem with the GPIOs routed to the
+		 * PDC. These GPIOs are direct-connect interrupts to the GIC.
+		 * Disabling the interrupt line at the PDC does not prevent
+		 * the interrupt from being latched at the GIC. The state at
+		 * GIC needs to be cleared before enabling.
+		 */
+		irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+		irq_chip_enable_parent(d);
+	}
+
+	if (test_bit(d->hwirq, pctrl->wakeup_masked_irqs))
+		return;
+
+	_msm_gpio_irq_unmask(d, true);
+}
+
 static void msm_gpio_irq_ack(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -975,11 +1039,12 @@ static int msm_gpio_domain_alloc(struct irq_domain *domain, unsigned int virq,
 
 	parent.fwspec.param_count = 2;
 	parent.fwspec.param[0] = GPIO_NO_WAKE_IRQ;
-	parent.fwspec.param[1] = type;
 	ret = of_irq_domain_map(fwspec, &parent.fwspec);
 	if (ret == -ENOMEM)
 		return ret;
 
+	/* Set something other than IRQ_TYPE_NONE to avoid GIC complaint. */
+	parent.fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
 	parent.fwspec.fwnode = domain->parent->fwnode;
 
 	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent);
@@ -1001,6 +1066,10 @@ static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
 
 	fwspec.fwnode = of_node_to_fwnode(chip->of_node);
 	fwspec.param[0] = offset;
+	/*
+	 * Since we don't know the trigger type, let's create it with
+	 * IRQ_TYPE_NONE and let the driver override it in request_irq.
+	 */
 	fwspec.param[1] = IRQ_TYPE_NONE;
 	fwspec.param_count = 2;
 
@@ -1034,6 +1103,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
 
 	pctrl->irq_chip.name = "msmgpio";
 	pctrl->irq_chip.irq_eoi	= irq_chip_eoi_parent;
+	pctrl->irq_chip.irq_enable = msm_gpio_irq_enable;
+	pctrl->irq_chip.irq_disable = msm_gpio_irq_disable;
 	pctrl->irq_chip.irq_mask = msm_gpio_irq_mask;
 	pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask;
 	pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
index 44c6b75..85ddf49 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
@@ -71,6 +71,7 @@ s5pv210_retention_init(struct samsung_pinctrl_drv_data *drvdata,
 	}
 
 	clk_base = of_iomap(np, 0);
+	of_node_put(np);
 	if (!clk_base) {
 		pr_err("%s: failed to map clock registers\n", __func__);
 		return ERR_PTR(-EINVAL);
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
index caa44dd..3cb6930 100644
--- a/drivers/pinctrl/zte/pinctrl-zx.c
+++ b/drivers/pinctrl/zte/pinctrl-zx.c
@@ -411,6 +411,7 @@ int zx_pinctrl_init(struct platform_device *pdev,
 	}
 
 	zpctl->aux_base = of_iomap(np, 0);
+	of_node_put(np);
 	if (!zpctl->aux_base)
 		return -ENOMEM;
 
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 83867b7..0e3fc66 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -2400,7 +2400,8 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
 
 	if (erindex < GSI_EVT_RING_MAX) {
 		ctx->evtr = &gsi_ctx->evtr[erindex];
-		atomic_inc(&ctx->evtr->chan_ref_cnt);
+		if (props->prot != GSI_CHAN_PROT_GCI)
+			atomic_inc(&ctx->evtr->chan_ref_cnt);
 		if (props->prot != GSI_CHAN_PROT_GCI &&
 			ctx->evtr->props.exclusive &&
 			atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
@@ -2523,6 +2524,38 @@ int gsi_write_channel_scratch3_reg(unsigned long chan_hdl,
 }
 EXPORT_SYMBOL(gsi_write_channel_scratch3_reg);
 
+int gsi_write_channel_scratch2_reg(unsigned long chan_hdl,
+		union __packed gsi_wdi2_channel_scratch2_reg val)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	mutex_lock(&ctx->mlock);
+
+	ctx->scratch.wdi2_new.endp_metadatareg_offset =
+				val.wdi.endp_metadatareg_offset;
+	ctx->scratch.wdi2_new.qmap_id = val.wdi.qmap_id;
+	val.wdi.update_ri_moderation_threshold =
+		ctx->scratch.wdi2_new.update_ri_moderation_threshold;
+	gsi_writel(val.data.word1, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	mutex_unlock(&ctx->mlock);
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_write_channel_scratch2_reg);
+
 static void __gsi_read_channel_scratch(unsigned long chan_hdl,
 		union __packed gsi_channel_scratch * val)
 {
@@ -3114,7 +3147,7 @@ int gsi_dealloc_channel(unsigned long chan_hdl)
 	}
 	devm_kfree(gsi_ctx->dev, ctx->user_data);
 	ctx->allocated = false;
-	if (ctx->evtr)
+	if (ctx->evtr && (ctx->props.prot != GSI_CHAN_PROT_GCI))
 		atomic_dec(&ctx->evtr->chan_ref_cnt);
 	atomic_dec(&gsi_ctx->num_chan);
 
@@ -3343,8 +3376,8 @@ int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
 	 * idx is not completed yet and it is getting reused by a new TRE.
 	 */
 	ctx->stats.userdata_in_use++;
+	end = ctx->ring.max_num_elem + 1;
 	for (i = 0; i < GSI_VEID_MAX; i++) {
-		end = ctx->ring.max_num_elem + 1;
 		if (!ctx->user_data[end + i].valid) {
 			ctx->user_data[end + i].valid = true;
 			return end + i;
@@ -3353,7 +3386,7 @@ int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
 
 	/* TODO: Increase escape buffer size if we hit this */
 	GSIERR("user_data is full\n");
-	return -EPERM;
+	return 0xFFFF;
 }
 
 int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
@@ -3384,7 +3417,7 @@ int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
 	gci_tre.buf_len = xfer->len;
 	gci_tre.re_type = GSI_RE_COAL;
 	gci_tre.cookie = __gsi_get_gci_cookie(ctx, idx);
-	if (gci_tre.cookie < 0)
+	if (gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX))
 		return -EPERM;
 
 	/* write the TRE to ring */
@@ -3698,6 +3731,8 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 		gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
 			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(gsi_ctx->per.ee));
 		atomic_set(&ctx->poll_mode, mode);
+		if (ctx->props.prot == GSI_CHAN_PROT_GCI)
+			atomic_set(&ctx->evtr->chan->poll_mode, mode);
 		GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
 			ctx->evtr->id, mode);
 		ctx->stats.callback_to_poll++;
@@ -3706,6 +3741,8 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 	if (curr == GSI_CHAN_MODE_POLL &&
 			mode == GSI_CHAN_MODE_CALLBACK) {
 		atomic_set(&ctx->poll_mode, mode);
+		if (ctx->props.prot == GSI_CHAN_PROT_GCI)
+			atomic_set(&ctx->evtr->chan->poll_mode, mode);
 		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
 		GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
 			ctx->evtr->id, mode);
@@ -3715,8 +3752,9 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 		 * to losing an interrupt. For these versions an
 		 * explicit check is needed after enabling the interrupt
 		 */
-		if (gsi_ctx->per.ver == GSI_VER_2_2 ||
-		    gsi_ctx->per.ver == GSI_VER_2_5) {
+		if ((gsi_ctx->per.ver == GSI_VER_2_2 ||
+		    gsi_ctx->per.ver == GSI_VER_2_5) &&
+			!gsi_ctx->per.skip_ieob_mask_wa) {
 			u32 src = gsi_readl(gsi_ctx->base +
 				GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(
 					gsi_ctx->per.ee));
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index a84d2d0..28a2fc6 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -14,6 +14,7 @@
 #include <linux/ipa_uc_offload.h>
 #include <linux/pci.h>
 #include "ipa_api.h"
+#include "ipa_v3/ipa_i.h"
 
 /*
  * The following for adding code (ie. for EMULATION) not found on x86.
@@ -934,6 +935,25 @@ int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
 EXPORT_SYMBOL(ipa_add_rt_rule);
 
 /**
+ * ipa_add_rt_rule_v2() - Add the specified routing rules to SW
+ * and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_rt_rule_v2(struct ipa_ioc_add_rt_rule_v2 *rules)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_rt_rule_v2, rules);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule_v2);
+
+/**
  * ipa_add_rt_rule_usr() - Add the specified routing rules to SW and optionally
  * commit to IPA HW
  * @rules:	[inout] set of routing rules to add
@@ -954,6 +974,26 @@ int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only)
 EXPORT_SYMBOL(ipa_add_rt_rule_usr);
 
 /**
+ * ipa_add_rt_rule_usr_v2() - Add the specified routing rules to
+ * SW and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ * @user_only:	[in] indicate rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_rt_rule_usr_v2(struct ipa_ioc_add_rt_rule_v2 *rules, bool user_only)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_rt_rule_usr_v2, rules, user_only);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule_usr_v2);
+
+/**
  * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally
  * commit to IPA HW
  * @hdls:	[inout] set of routing rules to delete
@@ -1086,6 +1126,24 @@ int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
 EXPORT_SYMBOL(ipa_mdfy_rt_rule);
 
 /**
+ * ipa_mdfy_rt_rule_v2() - Modify the specified routing rules in
+ * SW and optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_mdfy_rt_rule_v2(struct ipa_ioc_mdfy_rt_rule_v2 *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mdfy_rt_rule_v2, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_mdfy_rt_rule_v2);
+
+/**
  * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally
  * commit to IPA HW
  * @rules:	[inout] set of filtering rules to add
@@ -1105,6 +1163,25 @@ int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
 EXPORT_SYMBOL(ipa_add_flt_rule);
 
 /**
+ * ipa_add_flt_rule_v2() - Add the specified filtering rules to
+ * SW and optionally commit to IPA HW
+ * @rules:	[inout] set of filtering rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_flt_rule_v2(struct ipa_ioc_add_flt_rule_v2 *rules)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_flt_rule_v2, rules);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule_v2);
+
+/**
  * ipa_add_flt_rule_usr() - Add the specified filtering rules to
  * SW and optionally commit to IPA HW
  * @rules:		[inout] set of filtering rules to add
@@ -1125,6 +1202,28 @@ int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only)
 EXPORT_SYMBOL(ipa_add_flt_rule_usr);
 
 /**
+ * ipa_add_flt_rule_usr_v2() - Add the specified filtering rules
+ * to SW and optionally commit to IPA HW
+ * @rules:		[inout] set of filtering rules to add
+ * @user_only:	[in] indicate rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_flt_rule_usr_v2(struct ipa_ioc_add_flt_rule_v2 *rules,
+	bool user_only)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_flt_rule_usr_v2,
+		rules, user_only);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule_usr_v2);
+
+/**
  * ipa_del_flt_rule() - Remove the specified filtering rules from SW and
  * optionally commit to IPA HW
  *
@@ -1161,6 +1260,24 @@ int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
 EXPORT_SYMBOL(ipa_mdfy_flt_rule);
 
 /**
+ * ipa_mdfy_flt_rule_v2() - Modify the specified filtering rules
+ * in SW and optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_mdfy_flt_rule_v2(struct ipa_ioc_mdfy_flt_rule_v2 *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mdfy_flt_rule_v2, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_mdfy_flt_rule_v2);
+
+/**
  * ipa_commit_flt() - Commit the current SW filtering table of specified type to
  * IPA HW
  * @ip:	[in] the family of routing tables
@@ -1954,29 +2071,29 @@ int ipa_uc_reg_rdyCB(
 EXPORT_SYMBOL(ipa_uc_reg_rdyCB);
 
 /**
-* ipa_wigig_uc_init() - get uc db and register uC
+* ipa_wigig_internal_init() - get uc db and register uC
 * ready CB if uC not ready, wigig only.
 * @inout:	[in/out] uc ready input/output parameters
 * from/to client
 * @int_notify: [in] wigig misc interrupt handler function
+* @uc_db_pa: [out] uC db physical address
 *
 * Returns:	0 on success, negative on failure
 *
 */
-
-int ipa_wigig_uc_init(
+int ipa_wigig_internal_init(
 	struct ipa_wdi_uc_ready_params *inout,
 	ipa_wigig_misc_int_cb int_notify,
 	phys_addr_t *uc_db_pa)
 {
 	int ret;
 
-	IPA_API_DISPATCH_RETURN(ipa_wigig_uc_init, inout,
+	IPA_API_DISPATCH_RETURN(ipa_wigig_internal_init, inout,
 		int_notify, uc_db_pa);
 
 	return ret;
 }
-EXPORT_SYMBOL(ipa_wigig_uc_init);
+EXPORT_SYMBOL(ipa_wigig_internal_init);
 
 /**
  * ipa_uc_dereg_rdyCB() - To de-register uC ready CB
@@ -2678,24 +2795,19 @@ enum ipa_client_type ipa_get_client_mapping(int pipe_idx)
 EXPORT_SYMBOL(ipa_get_client_mapping);
 
 /**
- * ipa_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
- * the supplied pipe index.
- *
- * @pipe_idx:
- *
- * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
- * found.
+ * ipa_get_rm_resource_from_ep() - this function is part of the deprecated
+ * RM mechanism but is still used by some drivers so we kept the definition.
  */
+
 enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx)
 {
-	int ret;
-
-	IPA_API_DISPATCH_RETURN(ipa_get_rm_resource_from_ep, pipe_idx);
-
-	return ret;
+	IPAERR("IPA RM is not supported idx=%d\n", pipe_idx);
+	return -EFAULT;
 }
 EXPORT_SYMBOL(ipa_get_rm_resource_from_ep);
 
+
+
 /**
  * ipa_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt
  *
@@ -3437,11 +3549,12 @@ EXPORT_SYMBOL(ipa_wigig_uc_msi_init);
 /**
  * ipa_conn_wigig_rx_pipe_i() - connect wigig rx pipe
  */
-int ipa_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out)
+int ipa_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out,
+	struct dentry **parent)
 {
 	int ret;
 
-	IPA_API_DISPATCH_RETURN(ipa_conn_wigig_rx_pipe_i, in, out);
+	IPA_API_DISPATCH_RETURN(ipa_conn_wigig_rx_pipe_i, in, out, parent);
 
 	return ret;
 }
@@ -3450,11 +3563,15 @@ EXPORT_SYMBOL(ipa_conn_wigig_rx_pipe_i);
 /**
  * ipa_conn_wigig_client_i() - connect a wigig client
  */
-int ipa_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out)
+int ipa_conn_wigig_client_i(void *in,
+	struct ipa_wigig_conn_out_params *out,
+	ipa_notify_cb tx_notify,
+	void *priv)
 {
 	int ret;
 
-	IPA_API_DISPATCH_RETURN(ipa_conn_wigig_client_i, in, out);
+	IPA_API_DISPATCH_RETURN(ipa_conn_wigig_client_i, in, out,
+		tx_notify, priv);
 
 	return ret;
 }
@@ -3529,18 +3646,6 @@ void ipa_deregister_client_callback(enum ipa_client_type client)
 }
 
 
-/**
- * ipa_pm_is_used() - Returns if IPA PM framework is used
- */
-bool ipa_pm_is_used(void)
-{
-	bool ret;
-
-	IPA_API_DISPATCH_RETURN(ipa_pm_is_used);
-
-	return ret;
-}
-
 static const struct dev_pm_ops ipa_pm_ops = {
 	.suspend_noirq = ipa_ap_suspend,
 	.resume_noirq = ipa_ap_resume,
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 19a1b88..62457e2 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -82,9 +82,14 @@ struct ipa_api_controller {
 
 	int (*ipa_add_rt_rule)(struct ipa_ioc_add_rt_rule *rules);
 
+	int (*ipa_add_rt_rule_v2)(struct ipa_ioc_add_rt_rule_v2 *rules);
+
 	int (*ipa_add_rt_rule_usr)(struct ipa_ioc_add_rt_rule *rules,
 							bool user_only);
 
+	int (*ipa_add_rt_rule_usr_v2)(struct ipa_ioc_add_rt_rule_v2 *rules,
+							bool user_only);
+
 	int (*ipa_del_rt_rule)(struct ipa_ioc_del_rt_rule *hdls);
 
 	int (*ipa_commit_rt)(enum ipa_ip_type ip);
@@ -99,15 +104,24 @@ struct ipa_api_controller {
 
 	int (*ipa_mdfy_rt_rule)(struct ipa_ioc_mdfy_rt_rule *rules);
 
+	int (*ipa_mdfy_rt_rule_v2)(struct ipa_ioc_mdfy_rt_rule_v2 *rules);
+
 	int (*ipa_add_flt_rule)(struct ipa_ioc_add_flt_rule *rules);
 
+	int (*ipa_add_flt_rule_v2)(struct ipa_ioc_add_flt_rule_v2 *rules);
+
 	int (*ipa_add_flt_rule_usr)(struct ipa_ioc_add_flt_rule *rules,
 								bool user_only);
 
+	int (*ipa_add_flt_rule_usr_v2)
+		(struct ipa_ioc_add_flt_rule_v2 *rules, bool user_only);
+
 	int (*ipa_del_flt_rule)(struct ipa_ioc_del_flt_rule *hdls);
 
 	int (*ipa_mdfy_flt_rule)(struct ipa_ioc_mdfy_flt_rule *rules);
 
+	int (*ipa_mdfy_flt_rule_v2)(struct ipa_ioc_mdfy_flt_rule_v2 *rules);
+
 	int (*ipa_commit_flt)(enum ipa_ip_type ip);
 
 	int (*ipa_reset_flt)(enum ipa_ip_type ip, bool user_only);
@@ -326,8 +340,6 @@ struct ipa_api_controller {
 
 	enum ipa_client_type (*ipa_get_client_mapping)(int pipe_idx);
 
-	enum ipa_rm_resource_name (*ipa_get_rm_resource_from_ep)(int pipe_idx);
-
 	bool (*ipa_get_modem_cfg_emb_pipe_flt)(void);
 
 	enum ipa_transport_type (*ipa_get_transport_type)(void);
@@ -420,18 +432,19 @@ struct ipa_api_controller {
 		struct ipa_smmu_out_params *out);
 	int (*ipa_is_vlan_mode)(enum ipa_vlan_ifaces iface, bool *res);
 
-	bool (*ipa_pm_is_used)(void);
-
-	int (*ipa_wigig_uc_init)(
+	int (*ipa_wigig_internal_init)(
 		struct ipa_wdi_uc_ready_params *inout,
 		ipa_wigig_misc_int_cb int_notify,
 		phys_addr_t *uc_db_pa);
 
 	int (*ipa_conn_wigig_rx_pipe_i)(void *in,
-		struct ipa_wigig_conn_out_params *out);
+		struct ipa_wigig_conn_out_params *out,
+		struct dentry **parent);
 
 	int (*ipa_conn_wigig_client_i)(void *in,
-		struct ipa_wigig_conn_out_params *out);
+		struct ipa_wigig_conn_out_params *out,
+		ipa_notify_cb tx_notify,
+		void *priv);
 
 	int (*ipa_disconn_wigig_pipe_i)(enum ipa_client_type client,
 		struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
diff --git a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
index 1d8d529..ff70d64 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/debugfs.h>
@@ -112,7 +112,7 @@ enum ecm_ipa_state {
 };
 
 /**
- * enum ecm_ipa_operation - enumerations used to descibe the API operation
+ * enum ecm_ipa_operation - enumerations used to describe the API operation
  *
  * Those enums are used as input for the driver state machine.
  */
@@ -147,8 +147,6 @@ enum ecm_ipa_operation {
  * state is changed to RNDIS_IPA_CONNECTED_AND_UP
  * @ipa_to_usb_client: consumer client
  * @usb_to_ipa_client: producer client
- * @ipa_rm_resource_name_prod: IPA resource manager producer resource
- * @ipa_rm_resource_name_cons: IPA resource manager consumer resource
  * @pm_hdl: handle for IPA PM
  * @is_vlan_mode: does the driver need to work in VLAN mode?
  */
@@ -166,8 +164,6 @@ struct ecm_ipa_dev {
 	void (*device_ready_notify)(void);
 	enum ipa_client_type ipa_to_usb_client;
 	enum ipa_client_type usb_to_ipa_client;
-	enum ipa_rm_resource_name ipa_rm_resource_name_prod;
-	enum ipa_rm_resource_name ipa_rm_resource_name_cons;
 	u32 pm_hdl;
 	bool is_vlan_mode;
 };
@@ -186,15 +182,9 @@ static int ecm_ipa_rules_cfg
 static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *ecm_ipa_ctx);
 static int ecm_ipa_register_properties(struct ecm_ipa_dev *ecm_ipa_ctx);
 static void ecm_ipa_deregister_properties(void);
-static void ecm_ipa_rm_notify
-	(void *user_data, enum ipa_rm_event event, unsigned long data);
 static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net);
-static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
-static void ecm_ipa_destroy_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
 static int ecm_ipa_register_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx);
 static void ecm_ipa_deregister_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx);
-static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx);
-static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx);
 static netdev_tx_t ecm_ipa_start_xmit
 	(struct sk_buff *skb, struct net_device *net);
 static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file);
@@ -242,7 +232,6 @@ static void ecm_ipa_msg_free_cb(void *buff, u32 len, u32 type)
  *  - allocate the network device
  *  - set default values for driver internals
  *  - create debugfs folder and files
- *  - create IPA resource manager client
  *  - add header insertion rules for IPA driver (based on host/device
  *    Ethernet addresses given in input params)
  *  - register tx/rx properties to IPA driver (will be later used
@@ -446,44 +435,18 @@ int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, void *priv)
 	ECM_IPA_DEBUG("usb_to_ipa_client = %d\n",
 		      ecm_ipa_ctx->usb_to_ipa_client);
 
-	if (ipa_pm_is_used()) {
-		retval = ecm_ipa_register_pm_client(ecm_ipa_ctx);
-	} else {
-		ecm_ipa_ctx->ipa_rm_resource_name_cons =
-			ipa_get_rm_resource_from_ep(ipa_to_usb_hdl);
-		if (ecm_ipa_ctx->ipa_rm_resource_name_cons < 0) {
-			ECM_IPA_ERROR(
-			"Error getting CONS RM resource from handle %d\n",
-				      ecm_ipa_ctx->ipa_rm_resource_name_cons);
-			return -EINVAL;
-		}
-		ECM_IPA_DEBUG("ipa_rm_resource_name_cons = %d\n",
-			      ecm_ipa_ctx->ipa_rm_resource_name_cons);
-
-		ecm_ipa_ctx->ipa_rm_resource_name_prod =
-			ipa_get_rm_resource_from_ep(usb_to_ipa_hdl);
-		if (ecm_ipa_ctx->ipa_rm_resource_name_prod < 0) {
-			ECM_IPA_ERROR(
-			"Error getting PROD RM resource from handle %d\n",
-				      ecm_ipa_ctx->ipa_rm_resource_name_prod);
-			return -EINVAL;
-		}
-		ECM_IPA_DEBUG("ipa_rm_resource_name_prod = %d\n",
-			      ecm_ipa_ctx->ipa_rm_resource_name_prod);
-
-		retval = ecm_ipa_create_rm_resource(ecm_ipa_ctx);
-	}
+	retval = ecm_ipa_register_pm_client(ecm_ipa_ctx);
 
 	if (retval) {
-		ECM_IPA_ERROR("fail on RM create\n");
-		goto fail_create_rm;
+		ECM_IPA_ERROR("fail register PM client\n");
+		return retval;
 	}
-	ECM_IPA_DEBUG("RM resource was created\n");
+	ECM_IPA_DEBUG("PM client registered\n");
 
 	retval = ecm_ipa_register_properties(ecm_ipa_ctx);
 	if (retval) {
 		ECM_IPA_ERROR("fail on properties set\n");
-		goto fail_create_rm;
+		goto fail_register_pm;
 	}
 	ECM_IPA_DEBUG("ecm_ipa 2 Tx and 2 Rx properties were registered\n");
 
@@ -537,11 +500,8 @@ int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, void *priv)
 
 fail:
 	ecm_ipa_deregister_properties();
-fail_create_rm:
-	if (ipa_pm_is_used())
-		ecm_ipa_deregister_pm_client(ecm_ipa_ctx);
-	else
-		ecm_ipa_destroy_rm_resource(ecm_ipa_ctx);
+fail_register_pm:
+	ecm_ipa_deregister_pm_client(ecm_ipa_ctx);
 	return retval;
 }
 EXPORT_SYMBOL(ecm_ipa_connect);
@@ -592,10 +552,7 @@ static int ecm_ipa_open(struct net_device *net)
  *   in "send" state
  * - The driver internal state is in "UP" state.
  * - Filter Tx switch is turned off
- * - The IPA resource manager state for the driver producer client
- *   is "Granted" which implies that all the resources in the dependency
- *   graph are valid for data flow.
- * - outstanding high boundary did not reach.
+ * - Outstanding high boundary did not reach.
  *
  * In case all of the above conditions are met, the network driver will
  * send the packet by using the IPA API for Tx.
@@ -626,11 +583,11 @@ static netdev_tx_t ecm_ipa_start_xmit
 		return NETDEV_TX_BUSY;
 	}
 
-	ret = resource_request(ecm_ipa_ctx);
+	ret = ipa_pm_activate(ecm_ipa_ctx->pm_hdl);
 	if (ret) {
-		ECM_IPA_DEBUG("Waiting to resource\n");
+		ECM_IPA_DEBUG("Failed to activate PM client\n");
 		netif_stop_queue(net);
-		goto resource_busy;
+		goto fail_pm_activate;
 	}
 
 	if (atomic_read(&ecm_ipa_ctx->outstanding_pkts) >=
@@ -662,8 +619,8 @@ static netdev_tx_t ecm_ipa_start_xmit
 
 fail_tx_packet:
 out:
-	resource_release(ecm_ipa_ctx);
-resource_busy:
+	ipa_pm_deferred_deactivate(ecm_ipa_ctx->pm_hdl);
+fail_pm_activate:
 	return status;
 }
 
@@ -803,10 +760,7 @@ int ecm_ipa_disconnect(void *priv)
 	netif_stop_queue(ecm_ipa_ctx->net);
 	ECM_IPA_DEBUG("queue stopped\n");
 
-	if (ipa_pm_is_used())
-		ecm_ipa_deregister_pm_client(ecm_ipa_ctx);
-	else
-		ecm_ipa_destroy_rm_resource(ecm_ipa_ctx);
+	ecm_ipa_deregister_pm_client(ecm_ipa_ctx);
 
 	outstanding_dropped_pkts =
 		atomic_read(&ecm_ipa_ctx->outstanding_pkts);
@@ -831,8 +785,6 @@ EXPORT_SYMBOL(ecm_ipa_disconnect);
  * needed anymore, e.g: when the USB composition does not support ECM.
  * This function shall be called after the pipes were disconnected.
  * Detailed description:
- *  - delete the driver dependency defined for IPA resource manager and
- *   destroy the producer resource.
  *  -  remove the debugfs entries
  *  - deregister the network interface from Linux network stack
  *  - free all internal data structs
@@ -1110,99 +1062,11 @@ static void ecm_ipa_deregister_properties(void)
  * Returns negative errno, or zero on success
  */
 
-static void ecm_ipa_rm_notify
-	(void *user_data, enum ipa_rm_event event, unsigned long data)
-{
-	struct ecm_ipa_dev *ecm_ipa_ctx = user_data;
-
-	ECM_IPA_LOG_ENTRY();
-	if
-		(event == IPA_RM_RESOURCE_GRANTED &&
-			netif_queue_stopped(ecm_ipa_ctx->net)) {
-		ECM_IPA_DEBUG("Resource Granted - starting queue\n");
-		netif_start_queue(ecm_ipa_ctx->net);
-	} else {
-		ECM_IPA_DEBUG("Resource released\n");
-	}
-	ECM_IPA_LOG_EXIT();
-}
-
 static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net)
 {
 	return &net->stats;
 }
 
-static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx)
-{
-	struct ipa_rm_create_params create_params = {0};
-	struct ipa_rm_perf_profile profile;
-	int result;
-
-	ECM_IPA_LOG_ENTRY();
-	create_params.name = IPA_RM_RESOURCE_STD_ECM_PROD;
-	create_params.reg_params.user_data = ecm_ipa_ctx;
-	create_params.reg_params.notify_cb = ecm_ipa_rm_notify;
-	result = ipa_rm_create_resource(&create_params);
-	if (result) {
-		ECM_IPA_ERROR("Fail on ipa_rm_create_resource\n");
-		goto fail_rm_create;
-	}
-	ECM_IPA_DEBUG("rm client was created");
-
-	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
-	ipa_rm_set_perf_profile(IPA_RM_RESOURCE_STD_ECM_PROD, &profile);
-
-	result = ipa_rm_inactivity_timer_init
-		(IPA_RM_RESOURCE_STD_ECM_PROD,
-		INACTIVITY_MSEC_DELAY);
-	if (result) {
-		ECM_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n");
-		goto fail_it;
-	}
-	ECM_IPA_DEBUG("rm_it client was created");
-
-	result = ipa_rm_add_dependency_sync
-		(IPA_RM_RESOURCE_STD_ECM_PROD,
-		ecm_ipa_ctx->ipa_rm_resource_name_cons);
-	if (result && result != -EINPROGRESS)
-		ECM_IPA_ERROR
-		("unable to add ECM/USB dependency (%d)\n", result);
-
-	result = ipa_rm_add_dependency_sync
-		(ecm_ipa_ctx->ipa_rm_resource_name_prod,
-		IPA_RM_RESOURCE_APPS_CONS);
-	if (result && result != -EINPROGRESS)
-		ECM_IPA_ERROR
-		("unable to add USB/APPS dependency (%d)\n", result);
-
-	ECM_IPA_DEBUG("rm dependency was set\n");
-
-	ECM_IPA_LOG_EXIT();
-	return 0;
-
-fail_it:
-fail_rm_create:
-	return result;
-}
-
-static void ecm_ipa_destroy_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx)
-{
-	int result;
-
-	ECM_IPA_LOG_ENTRY();
-
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_STD_ECM_PROD,
-				 ecm_ipa_ctx->ipa_rm_resource_name_cons);
-	ipa_rm_delete_dependency(ecm_ipa_ctx->ipa_rm_resource_name_prod,
-				 IPA_RM_RESOURCE_APPS_CONS);
-	ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_STD_ECM_PROD);
-	result = ipa_rm_delete_resource(IPA_RM_RESOURCE_STD_ECM_PROD);
-	if (result)
-		ECM_IPA_ERROR("resource deletion failed\n");
-
-	ECM_IPA_LOG_EXIT();
-}
-
 static void ecm_ipa_pm_cb(void *p, enum ipa_pm_cb_event event)
 {
 	struct ecm_ipa_dev *ecm_ipa_ctx = p;
@@ -1246,23 +1110,6 @@ static void ecm_ipa_deregister_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx)
 	ecm_ipa_ctx->pm_hdl = ~0;
 }
 
-static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx)
-{
-	if (ipa_pm_is_used())
-		return ipa_pm_activate(ecm_ipa_ctx->pm_hdl);
-
-	return ipa_rm_inactivity_timer_request_resource(
-		IPA_RM_RESOURCE_STD_ECM_PROD);
-}
-
-static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx)
-{
-	if (ipa_pm_is_used())
-		ipa_pm_deferred_deactivate(ecm_ipa_ctx->pm_hdl);
-	else
-		ipa_rm_inactivity_timer_release_resource(
-			IPA_RM_RESOURCE_STD_ECM_PROD);
-}
 
 /**
  * ecm_ipa_tx_complete_notify() - Rx notify
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index 45e890f..134cc62 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -51,7 +51,6 @@
 #define IPA_MHI_FUNC_EXIT() \
 	IPA_MHI_DBG("EXIT\n")
 
-#define IPA_MHI_RM_TIMEOUT_MSEC 10000
 #define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10
 
 #define IPA_MHI_SUSPEND_SLEEP_MIN 900
@@ -64,13 +63,6 @@
 #define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \
 	((ipa_mhi_client_ctx->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
 
-enum ipa_mhi_rm_state {
-	IPA_MHI_RM_STATE_RELEASED,
-	IPA_MHI_RM_STATE_REQUESTED,
-	IPA_MHI_RM_STATE_GRANTED,
-	IPA_MHI_RM_STATE_MAX
-};
-
 enum ipa_mhi_state {
 	IPA_MHI_STATE_INITIALIZED,
 	IPA_MHI_STATE_READY,
@@ -130,9 +122,6 @@ struct ipa_mhi_client_ctx {
 	spinlock_t state_lock;
 	mhi_client_cb cb_notify;
 	void *cb_priv;
-	struct completion rm_prod_granted_comp;
-	enum ipa_mhi_rm_state rm_cons_state;
-	struct completion rm_cons_comp;
 	bool trigger_wakeup;
 	bool wakeup_notified;
 	struct workqueue_struct *wq;
@@ -535,81 +524,6 @@ static void ipa_mhi_notify_wakeup(void)
 }
 
 /**
- * ipa_mhi_rm_cons_request() - callback function for IPA RM request resource
- *
- * In case IPA MHI is not suspended, MHI CONS will be granted immediately.
- * In case IPA MHI is suspended, MHI CONS will be granted after resume.
- */
-static int ipa_mhi_rm_cons_request(void)
-{
-	unsigned long flags;
-	int res;
-
-	IPA_MHI_FUNC_ENTRY();
-
-	IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state));
-	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
-	ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_REQUESTED;
-	if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_STARTED) {
-		ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
-		res = 0;
-	} else if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
-		ipa_mhi_notify_wakeup();
-		res = -EINPROGRESS;
-	} else if (ipa_mhi_client_ctx->state ==
-			IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
-		/* wakeup event will be trigger after suspend finishes */
-		ipa_mhi_client_ctx->trigger_wakeup = true;
-		res = -EINPROGRESS;
-	} else {
-		res = -EINPROGRESS;
-	}
-
-	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
-	IPA_MHI_DBG("EXIT with %d\n", res);
-	return res;
-}
-
-static int ipa_mhi_rm_cons_release(void)
-{
-	unsigned long flags;
-
-	IPA_MHI_FUNC_ENTRY();
-
-	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
-	ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
-	complete_all(&ipa_mhi_client_ctx->rm_cons_comp);
-	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
-
-	IPA_MHI_FUNC_EXIT();
-	return 0;
-}
-
-static void ipa_mhi_rm_prod_notify(void *user_data, enum ipa_rm_event event,
-	unsigned long data)
-{
-	IPA_MHI_FUNC_ENTRY();
-
-	switch (event) {
-	case IPA_RM_RESOURCE_GRANTED:
-		IPA_MHI_DBG("IPA_RM_RESOURCE_GRANTED\n");
-		complete_all(&ipa_mhi_client_ctx->rm_prod_granted_comp);
-		break;
-
-	case IPA_RM_RESOURCE_RELEASED:
-		IPA_MHI_DBG("IPA_RM_RESOURCE_RELEASED\n");
-		break;
-
-	default:
-		IPA_MHI_ERR("unexpected event %d\n", event);
-		WARN_ON(1);
-		break;
-	}
-
-	IPA_MHI_FUNC_EXIT();
-}
-
-/**
  * ipa_mhi_wq_notify_wakeup() - Notify MHI client on data available
  *
  * This function is called from IPA MHI workqueue to notify
@@ -699,14 +613,6 @@ static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
 		} else if (new_state == IPA_MHI_STATE_STARTED) {
 			ipa_mhi_client_ctx->wakeup_notified = false;
 			ipa_mhi_client_ctx->trigger_wakeup = false;
-			if (ipa_mhi_client_ctx->rm_cons_state ==
-				IPA_MHI_RM_STATE_REQUESTED) {
-				ipa_rm_notify_completion(
-					IPA_RM_RESOURCE_GRANTED,
-					IPA_RM_RESOURCE_MHI_CONS);
-				ipa_mhi_client_ctx->rm_cons_state =
-					IPA_MHI_RM_STATE_GRANTED;
-			}
 			res = 0;
 		}
 		break;
@@ -726,14 +632,6 @@ static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
 		} else if (new_state == IPA_MHI_STATE_STARTED) {
 			ipa_mhi_client_ctx->trigger_wakeup = false;
 			ipa_mhi_client_ctx->wakeup_notified = false;
-			if (ipa_mhi_client_ctx->rm_cons_state ==
-				IPA_MHI_RM_STATE_REQUESTED) {
-				ipa_rm_notify_completion(
-					IPA_RM_RESOURCE_GRANTED,
-					IPA_RM_RESOURCE_MHI_CONS);
-				ipa_mhi_client_ctx->rm_cons_state =
-					IPA_MHI_RM_STATE_GRANTED;
-			}
 			res = 0;
 		}
 		break;
@@ -781,48 +679,6 @@ static void ipa_mhi_uc_wakeup_request_cb(void)
 	IPA_MHI_FUNC_EXIT();
 }
 
-static int ipa_mhi_request_prod(void)
-{
-	int res;
-
-	IPA_MHI_FUNC_ENTRY();
-
-	reinit_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp);
-	IPA_MHI_DBG("requesting mhi prod\n");
-	res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD);
-	if (res) {
-		if (res != -EINPROGRESS) {
-			IPA_MHI_ERR("failed to request mhi prod %d\n", res);
-			return res;
-		}
-		res = wait_for_completion_timeout(
-			&ipa_mhi_client_ctx->rm_prod_granted_comp,
-			msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
-		if (res == 0) {
-			IPA_MHI_ERR("timeout request mhi prod\n");
-			return -ETIME;
-		}
-	}
-
-	IPA_MHI_DBG("mhi prod granted\n");
-	IPA_MHI_FUNC_EXIT();
-	return 0;
-
-}
-
-static int ipa_mhi_release_prod(void)
-{
-	int res;
-
-	IPA_MHI_FUNC_ENTRY();
-
-	res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD);
-
-	IPA_MHI_FUNC_EXIT();
-	return res;
-
-}
-
 /**
  * ipa_mhi_start() - Start IPA MHI engine
  * @params: pcie addresses for MHI
@@ -873,38 +729,15 @@ int ipa_mhi_start(struct ipa_mhi_start_params *params)
 	IPA_MHI_DBG("event_context_array_addr 0x%llx\n",
 		ipa_mhi_client_ctx->event_context_array_addr);
 
-	if (ipa_pm_is_used()) {
-		res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
-		if (res) {
-			IPA_MHI_ERR("failed activate client %d\n", res);
-			goto fail_pm_activate;
-		}
-		res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
-		if (res) {
-			IPA_MHI_ERR("failed activate modem client %d\n", res);
-			goto fail_pm_activate_modem;
-		}
-	} else {
-		/* Add MHI <-> Q6 dependencies to IPA RM */
-		res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
-			IPA_RM_RESOURCE_Q6_CONS);
-		if (res && res != -EINPROGRESS) {
-			IPA_MHI_ERR("failed to add dependency %d\n", res);
-			goto fail_add_mhi_q6_dep;
-		}
-
-		res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
-			IPA_RM_RESOURCE_MHI_CONS);
-		if (res && res != -EINPROGRESS) {
-			IPA_MHI_ERR("failed to add dependency %d\n", res);
-			goto fail_add_q6_mhi_dep;
-		}
-
-		res = ipa_mhi_request_prod();
-		if (res) {
-			IPA_MHI_ERR("failed request prod %d\n", res);
-			goto fail_request_prod;
-		}
+	res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("failed activate client %d\n", res);
+		goto fail_pm_activate;
+	}
+	res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("failed activate modem client %d\n", res);
+		goto fail_pm_activate_modem;
 	}
 
 	/* gsi params */
@@ -932,22 +765,9 @@ int ipa_mhi_start(struct ipa_mhi_start_params *params)
 	return 0;
 
 fail_init_engine:
-	if (!ipa_pm_is_used())
-		ipa_mhi_release_prod();
-fail_request_prod:
-	if (!ipa_pm_is_used())
-		ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
-			IPA_RM_RESOURCE_MHI_CONS);
-fail_add_q6_mhi_dep:
-	if (!ipa_pm_is_used())
-		ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
-			IPA_RM_RESOURCE_Q6_CONS);
-fail_add_mhi_q6_dep:
-	if (ipa_pm_is_used())
-		ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+	ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
 fail_pm_activate_modem:
-	if (ipa_pm_is_used())
-		ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
+	ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
 fail_pm_activate:
 	ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
 	return res;
@@ -1724,31 +1544,6 @@ int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
 	return res;
 }
 
-static int ipa_mhi_wait_for_cons_release(void)
-{
-	unsigned long flags;
-	int res;
-
-	IPA_MHI_FUNC_ENTRY();
-	reinit_completion(&ipa_mhi_client_ctx->rm_cons_comp);
-	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
-	if (ipa_mhi_client_ctx->rm_cons_state != IPA_MHI_RM_STATE_GRANTED) {
-		spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
-		return 0;
-	}
-	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
-
-	res = wait_for_completion_timeout(
-		&ipa_mhi_client_ctx->rm_cons_comp,
-		msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
-	if (res == 0) {
-		IPA_MHI_ERR("timeout release mhi cons\n");
-		return -ETIME;
-	}
-	IPA_MHI_FUNC_EXIT();
-	return 0;
-}
-
 static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels,
 	int max_channels)
 {
@@ -2179,35 +1974,20 @@ int ipa_mhi_suspend(bool force)
 
 	/*
 	 * hold IPA clocks and release them after all
-	 * IPA RM resource are released to make sure tag process will not start
+	 * IPA PM clients are deactivated to make sure tag process
+	 * will not start
 	 */
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 
-	if (ipa_pm_is_used()) {
-		res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
-		if (res) {
-			IPA_MHI_ERR("fail to deactivate client %d\n", res);
-			goto fail_deactivate_pm;
-		}
-		res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
-		if (res) {
-			IPA_MHI_ERR("fail to deactivate client %d\n", res);
-			goto fail_deactivate_modem_pm;
-		}
-	} else {
-		IPA_MHI_DBG("release prod\n");
-		res = ipa_mhi_release_prod();
-		if (res) {
-			IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
-			goto fail_release_prod;
-		}
-
-		IPA_MHI_DBG("wait for cons release\n");
-		res = ipa_mhi_wait_for_cons_release();
-		if (res) {
-			IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed\n");
-			goto fail_release_cons;
-		}
+	res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("fail to deactivate client %d\n", res);
+		goto fail_deactivate_pm;
+	}
+	res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("fail to deactivate client %d\n", res);
+		goto fail_deactivate_modem_pm;
 	}
 	usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
 
@@ -2225,14 +2005,9 @@ int ipa_mhi_suspend(bool force)
 	return 0;
 
 fail_release_cons:
-	if (!ipa_pm_is_used())
-		ipa_mhi_request_prod();
-fail_release_prod:
-	if (ipa_pm_is_used())
-		ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+	ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
 fail_deactivate_modem_pm:
-	if (ipa_pm_is_used())
-		ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
+	ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
 fail_deactivate_pm:
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 fail_suspend_ul_channel:
@@ -2262,7 +2037,7 @@ int ipa_mhi_suspend(bool force)
  * When this function returns device can move to M0 state.
  * This function is doing the following:
  *	- Send command to uC/GSI to resume corresponding MHI channel
- *	- Request MHI_PROD in IPA RM
+ *	- Activate PM clients
  *	- Resume data to IPA
  *
  * Return codes: 0	  : success
@@ -2271,7 +2046,6 @@ int ipa_mhi_suspend(bool force)
 int ipa_mhi_resume(void)
 {
 	int res;
-	bool dl_channel_resumed = false;
 
 	IPA_MHI_FUNC_ENTRY();
 
@@ -2281,40 +2055,16 @@ int ipa_mhi_resume(void)
 		return res;
 	}
 
-	if (ipa_mhi_client_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) {
-		/* resume all DL channels */
-		res = ipa_mhi_resume_channels(false,
-				ipa_mhi_client_ctx->dl_channels,
-				IPA_MHI_MAX_DL_CHANNELS);
-		if (res) {
-			IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
-				res);
-			goto fail_resume_dl_channels;
-		}
-		dl_channel_resumed = true;
-
-		ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
-			IPA_RM_RESOURCE_MHI_CONS);
-		ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
+	res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("fail to activate client %d\n", res);
+		goto fail_pm_activate;
 	}
 
-	if (ipa_pm_is_used()) {
-		res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
-		if (res) {
-			IPA_MHI_ERR("fail to activate client %d\n", res);
-			goto fail_pm_activate;
-		}
-		ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
-		if (res) {
-			IPA_MHI_ERR("fail to activate client %d\n", res);
-			goto fail_pm_activate_modem;
-		}
-	} else {
-		res = ipa_mhi_request_prod();
-		if (res) {
-			IPA_MHI_ERR("ipa_mhi_request_prod failed %d\n", res);
-			goto fail_request_prod;
-		}
+	res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("fail to activate client %d\n", res);
+		goto fail_pm_activate_modem;
 	}
 
 	/* resume all UL channels */
@@ -2326,15 +2076,13 @@ int ipa_mhi_resume(void)
 		goto fail_resume_ul_channels;
 	}
 
-	if (!dl_channel_resumed) {
-		res = ipa_mhi_resume_channels(false,
-					ipa_mhi_client_ctx->dl_channels,
-					IPA_MHI_MAX_DL_CHANNELS);
-		if (res) {
-			IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
-				res);
-			goto fail_resume_dl_channels2;
-		}
+	res = ipa_mhi_resume_channels(false,
+				ipa_mhi_client_ctx->dl_channels,
+				IPA_MHI_MAX_DL_CHANNELS);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
+			res);
+		goto fail_resume_dl_channels;
 	}
 
 	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
@@ -2352,22 +2100,14 @@ int ipa_mhi_resume(void)
 fail_set_state:
 	ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
 		IPA_MHI_MAX_DL_CHANNELS);
-fail_resume_dl_channels2:
+fail_resume_dl_channels:
 	ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels,
 		IPA_MHI_MAX_UL_CHANNELS);
 fail_resume_ul_channels:
-	if (!ipa_pm_is_used())
-		ipa_mhi_release_prod();
-fail_request_prod:
-	if (ipa_pm_is_used())
-		ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+	ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
 fail_pm_activate_modem:
-	if (ipa_pm_is_used())
-		ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
+	ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
 fail_pm_activate:
-	ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
-		IPA_MHI_MAX_DL_CHANNELS);
-fail_resume_dl_channels:
 	ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
 	return res;
 }
@@ -2449,74 +2189,6 @@ static void ipa_mhi_debugfs_destroy(void)
 	debugfs_remove_recursive(dent);
 }
 
-static void ipa_mhi_delete_rm_resources(void)
-{
-	int res;
-
-	if (ipa_mhi_client_ctx->state != IPA_MHI_STATE_INITIALIZED  &&
-		ipa_mhi_client_ctx->state != IPA_MHI_STATE_READY) {
-
-		IPA_MHI_DBG("release prod\n");
-		res = ipa_mhi_release_prod();
-		if (res) {
-			IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n",
-				res);
-			goto fail;
-		}
-		IPA_MHI_DBG("wait for cons release\n");
-		res = ipa_mhi_wait_for_cons_release();
-		if (res) {
-			IPA_MHI_ERR("ipa_mhi_wait_for_cons_release%d\n",
-				res);
-			goto fail;
-		}
-
-		usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN,
-			IPA_MHI_SUSPEND_SLEEP_MAX);
-
-		IPA_MHI_DBG("deleate dependency Q6_PROD->MHI_CONS\n");
-		res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
-			IPA_RM_RESOURCE_MHI_CONS);
-		if (res) {
-			IPA_MHI_ERR(
-				"Error deleting dependency %d->%d, res=%d\n",
-				IPA_RM_RESOURCE_Q6_PROD,
-				IPA_RM_RESOURCE_MHI_CONS,
-				res);
-			goto fail;
-		}
-		IPA_MHI_DBG("deleate dependency MHI_PROD->Q6_CONS\n");
-		res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
-			IPA_RM_RESOURCE_Q6_CONS);
-		if (res) {
-			IPA_MHI_ERR(
-				"Error deleting dependency %d->%d, res=%d\n",
-				IPA_RM_RESOURCE_MHI_PROD,
-				IPA_RM_RESOURCE_Q6_CONS,
-				res);
-			goto fail;
-		}
-	}
-
-	res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
-	if (res) {
-		IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
-			IPA_RM_RESOURCE_MHI_PROD, res);
-		goto fail;
-	}
-
-	res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
-	if (res) {
-		IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
-			IPA_RM_RESOURCE_MHI_CONS, res);
-		goto fail;
-	}
-
-	return;
-fail:
-	ipa_assert();
-}
-
 static void ipa_mhi_deregister_pm(void)
 {
 	ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
@@ -2563,11 +2235,7 @@ void ipa_mhi_destroy(void)
 		ipa_uc_mhi_cleanup();
 	}
 
-	if (ipa_pm_is_used())
-		ipa_mhi_deregister_pm();
-	else
-		ipa_mhi_delete_rm_resources();
-
+	ipa_mhi_deregister_pm();
 	ipa_dma_destroy();
 	ipa_mhi_debugfs_destroy();
 	destroy_workqueue(ipa_mhi_client_ctx->wq);
@@ -2653,60 +2321,6 @@ static int ipa_mhi_register_pm(void)
 	return res;
 }
 
-static int ipa_mhi_create_rm_resources(void)
-{
-	int res;
-	struct ipa_rm_create_params mhi_prod_params;
-	struct ipa_rm_create_params mhi_cons_params;
-	struct ipa_rm_perf_profile profile;
-
-	/* Create PROD in IPA RM */
-	memset(&mhi_prod_params, 0, sizeof(mhi_prod_params));
-	mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD;
-	mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS;
-	mhi_prod_params.reg_params.notify_cb = ipa_mhi_rm_prod_notify;
-	res = ipa_rm_create_resource(&mhi_prod_params);
-	if (res) {
-		IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n");
-		goto fail_create_rm_prod;
-	}
-
-	memset(&profile, 0, sizeof(profile));
-	profile.max_supported_bandwidth_mbps = 1000;
-	res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_PROD, &profile);
-	if (res) {
-		IPA_MHI_ERR("fail to set profile to MHI_PROD\n");
-		goto fail_perf_rm_prod;
-	}
-
-	/* Create CONS in IPA RM */
-	memset(&mhi_cons_params, 0, sizeof(mhi_cons_params));
-	mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS;
-	mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS;
-	mhi_cons_params.request_resource = ipa_mhi_rm_cons_request;
-	mhi_cons_params.release_resource = ipa_mhi_rm_cons_release;
-	res = ipa_rm_create_resource(&mhi_cons_params);
-	if (res) {
-		IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n");
-		goto fail_create_rm_cons;
-	}
-
-	memset(&profile, 0, sizeof(profile));
-	profile.max_supported_bandwidth_mbps = 1000;
-	res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_CONS, &profile);
-	if (res) {
-		IPA_MHI_ERR("fail to set profile to MHI_CONS\n");
-		goto fail_perf_rm_cons;
-	}
-fail_perf_rm_cons:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
-fail_create_rm_cons:
-fail_perf_rm_prod:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
-fail_create_rm_prod:
-	return res;
-}
-
 /**
  * ipa_mhi_init() - Initialize IPA MHI driver
  * @params: initialization params
@@ -2715,7 +2329,7 @@ static int ipa_mhi_create_rm_resources(void)
  * Driver. When this function returns device can move to READY state.
  * This function is doing the following:
  *	- Initialize MHI IPA internal data structures
- *	- Create IPA RM resources
+ *	- Register with PM
  *	- Initialize debugfs
  *
  * Return codes: 0	  : success
@@ -2763,10 +2377,7 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params)
 	ipa_mhi_client_ctx->state = IPA_MHI_STATE_INITIALIZED;
 	ipa_mhi_client_ctx->cb_notify = params->notify;
 	ipa_mhi_client_ctx->cb_priv = params->priv;
-	ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
-	init_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp);
 	spin_lock_init(&ipa_mhi_client_ctx->state_lock);
-	init_completion(&ipa_mhi_client_ctx->rm_cons_comp);
 	ipa_mhi_client_ctx->msi = params->msi;
 	ipa_mhi_client_ctx->mmio_addr = params->mmio_addr;
 	ipa_mhi_client_ctx->first_ch_idx = params->first_ch_idx;
@@ -2789,14 +2400,11 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params)
 		goto fail_dma_init;
 	}
 
-	if (ipa_pm_is_used())
-		res = ipa_mhi_register_pm();
-	else
-		res = ipa_mhi_create_rm_resources();
+	res = ipa_mhi_register_pm();
 	if (res) {
-		IPA_MHI_ERR("failed to create RM resources\n");
+		IPA_MHI_ERR("failed to create PM resources\n");
 		res = -EFAULT;
-		goto fail_rm;
+		goto fail_pm;
 	}
 
 	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
@@ -2818,7 +2426,7 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params)
 	IPA_MHI_FUNC_EXIT();
 	return 0;
 
-fail_rm:
+fail_pm:
 	ipa_dma_destroy();
 fail_dma_init:
 	destroy_workqueue(ipa_mhi_client_ctx->wq);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
index 45e4e39..807c75a 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/ipa_uc_offload.h>
@@ -69,9 +69,6 @@ struct ipa_uc_offload_ctx {
 
 static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
 
-static int ipa_uc_ntn_cons_release(void);
-static int ipa_uc_ntn_cons_request(void);
-static void ipa_uc_offload_rm_notify(void *, enum ipa_rm_event, unsigned long);
 
 static int ipa_commit_partial_hdr(
 	struct ipa_ioc_add_hdr *hdr,
@@ -150,36 +147,6 @@ static void ipa_uc_offload_ntn_deregister_pm_client(
 	ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
 	ipa_pm_deregister(ntn_ctx->pm_hdl);
 }
-static int ipa_uc_offload_ntn_create_rm_resources(
-	struct ipa_uc_offload_ctx *ntn_ctx)
-{
-	int ret;
-	struct ipa_rm_create_params param;
-
-	memset(&param, 0, sizeof(param));
-	param.name = IPA_RM_RESOURCE_ETHERNET_PROD;
-	param.reg_params.user_data = ntn_ctx;
-	param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
-	param.floor_voltage = IPA_VOLTAGE_SVS;
-	ret = ipa_rm_create_resource(&param);
-	if (ret) {
-		IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_PROD resource\n");
-		return -EFAULT;
-	}
-
-	memset(&param, 0, sizeof(param));
-	param.name = IPA_RM_RESOURCE_ETHERNET_CONS;
-	param.request_resource = ipa_uc_ntn_cons_request;
-	param.release_resource = ipa_uc_ntn_cons_release;
-	ret = ipa_rm_create_resource(&param);
-	if (ret) {
-		IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_CONS resource\n");
-		ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
-		return -EFAULT;
-	}
-
-	return 0;
-}
 
 static int ipa_uc_offload_ntn_reg_intf(
 	struct ipa_uc_offload_intf_params *inp,
@@ -197,12 +164,9 @@ static int ipa_uc_offload_ntn_reg_intf(
 
 	IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
 					 inp->netdev_name);
-	if (ipa_pm_is_used())
-		ret = ipa_uc_offload_ntn_register_pm_client(ntn_ctx);
-	else
-		ret = ipa_uc_offload_ntn_create_rm_resources(ntn_ctx);
+	ret = ipa_uc_offload_ntn_register_pm_client(ntn_ctx);
 	if (ret) {
-		IPA_UC_OFFLOAD_ERR("fail to create rm resource\n");
+		IPA_UC_OFFLOAD_ERR("fail to register PM client\n");
 		return -EFAULT;
 	}
 	memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
@@ -282,12 +246,7 @@ static int ipa_uc_offload_ntn_reg_intf(
 fail:
 	kfree(hdr);
 fail_alloc:
-	if (ipa_pm_is_used()) {
-		ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
-	} else {
-		ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS);
-		ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
-	}
+	ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
 	return ret;
 }
 
@@ -336,56 +295,6 @@ int ipa_uc_offload_reg_intf(
 }
 EXPORT_SYMBOL(ipa_uc_offload_reg_intf);
 
-static int ipa_uc_ntn_cons_release(void)
-{
-	return 0;
-}
-
-static int ipa_uc_ntn_cons_request(void)
-{
-	int ret = 0;
-	struct ipa_uc_offload_ctx *ntn_ctx;
-
-	ntn_ctx = ipa_uc_offload_ctx[IPA_UC_NTN];
-	if (!ntn_ctx) {
-		IPA_UC_OFFLOAD_ERR("NTN is not initialized\n");
-		ret = -EFAULT;
-	} else if (ntn_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
-		IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", ntn_ctx->state);
-		ret = -EFAULT;
-	}
-
-	return ret;
-}
-
-static void ipa_uc_offload_rm_notify(void *user_data, enum ipa_rm_event event,
-		unsigned long data)
-{
-	struct ipa_uc_offload_ctx *offload_ctx;
-
-	offload_ctx = (struct ipa_uc_offload_ctx *)user_data;
-	if (!(offload_ctx && offload_ctx->proto > IPA_UC_INVALID &&
-		  offload_ctx->proto < IPA_UC_MAX_PROT_SIZE)) {
-		IPA_UC_OFFLOAD_ERR("Invalid user data\n");
-		return;
-	}
-
-	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED)
-		IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", offload_ctx->state);
-
-	switch (event) {
-	case IPA_RM_RESOURCE_GRANTED:
-		complete_all(&offload_ctx->ntn_completion);
-		break;
-
-	case IPA_RM_RESOURCE_RELEASED:
-		break;
-
-	default:
-		IPA_UC_OFFLOAD_ERR("Invalid RM Evt: %d", event);
-		break;
-	}
-}
 
 static int ipa_uc_ntn_alloc_conn_smmu_info(struct ipa_ntn_setup_info *dest,
 	struct ipa_ntn_setup_info *source)
@@ -456,34 +365,10 @@ int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp,
 		return -EINVAL;
 	}
 
-	if (ipa_pm_is_used()) {
-		result = ipa_pm_activate_sync(ntn_ctx->pm_hdl);
-		if (result) {
-			IPA_UC_OFFLOAD_ERR("fail to activate: %d\n", result);
-			return result;
-		}
-	} else {
-		result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
-			IPA_RM_RESOURCE_APPS_CONS);
-		if (result) {
-			IPA_UC_OFFLOAD_ERR("fail to add rm dependency: %d\n",
-				result);
-			return result;
-		}
-
-		result = ipa_rm_request_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
-		if (result == -EINPROGRESS) {
-			if (wait_for_completion_timeout(&ntn_ctx->ntn_completion
-				, 10*HZ) == 0) {
-				IPA_UC_OFFLOAD_ERR("ETH_PROD req timeout\n");
-				result = -EFAULT;
-				goto fail;
-			}
-		} else if (result != 0) {
-			IPA_UC_OFFLOAD_ERR("fail to request resource\n");
-			result = -EFAULT;
-			goto fail;
-		}
+	result = ipa_pm_activate_sync(ntn_ctx->pm_hdl);
+	if (result) {
+		IPA_UC_OFFLOAD_ERR("fail to activate: %d\n", result);
+		return result;
 	}
 
 	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
@@ -514,9 +399,6 @@ int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp,
 	}
 
 fail:
-	if (!ipa_pm_is_used())
-		ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
-			IPA_RM_RESOURCE_APPS_CONS);
 	return result;
 }
 
@@ -565,42 +447,6 @@ int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp,
 }
 EXPORT_SYMBOL(ipa_uc_offload_conn_pipes);
 
-int ipa_set_perf_profile(struct ipa_perf_profile *profile)
-{
-	struct ipa_rm_perf_profile rm_profile;
-	enum ipa_rm_resource_name resource_name;
-
-	if (profile == NULL) {
-		IPA_UC_OFFLOAD_ERR("Invalid input\n");
-		return -EINVAL;
-	}
-
-	rm_profile.max_supported_bandwidth_mbps =
-		profile->max_supported_bw_mbps;
-
-	if (profile->client == IPA_CLIENT_ETHERNET_PROD) {
-		resource_name = IPA_RM_RESOURCE_ETHERNET_PROD;
-	} else if (profile->client == IPA_CLIENT_ETHERNET_CONS) {
-		resource_name = IPA_RM_RESOURCE_ETHERNET_CONS;
-	} else {
-		IPA_UC_OFFLOAD_ERR("not supported\n");
-		return -EINVAL;
-	}
-
-	if (ipa_pm_is_used())
-		return ipa_pm_set_throughput(
-			ipa_uc_offload_ctx[IPA_UC_NTN]->pm_hdl,
-			profile->max_supported_bw_mbps);
-
-	if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
-		IPA_UC_OFFLOAD_ERR("fail to setup rm perf profile\n");
-		return -EFAULT;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL(ipa_set_perf_profile);
-
 static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
 {
 	int ipa_ep_idx_ul, ipa_ep_idx_dl;
@@ -612,28 +458,11 @@ static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
 	}
 
 	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
-
-	if (ipa_pm_is_used()) {
-		ret = ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
-		if (ret) {
-			IPA_UC_OFFLOAD_ERR("fail to deactivate res: %d\n",
-				ret);
-			return -EFAULT;
-		}
-	} else {
-		ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
-		if (ret) {
-			IPA_UC_OFFLOAD_ERR("fail release ETHERNET_PROD: %d\n",
-				ret);
-			return -EFAULT;
-		}
-
-		ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
-			IPA_RM_RESOURCE_APPS_CONS);
-		if (ret) {
-			IPA_UC_OFFLOAD_ERR("fail del dep ETH->APPS, %d\n", ret);
-			return -EFAULT;
-		}
+	ret = ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to deactivate res: %d\n",
+			ret);
+		return -EFAULT;
 	}
 
 	ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
@@ -695,19 +524,7 @@ static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx)
 	int len, result = 0;
 	struct ipa_ioc_del_hdr *hdr;
 
-	if (ipa_pm_is_used()) {
-		ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
-	} else {
-		if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD)) {
-			IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_PROD\n");
-			return -EFAULT;
-		}
-
-		if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS)) {
-			IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_CONS\n");
-			return -EFAULT;
-		}
-	}
+	ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
 
 	len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
 	hdr = kzalloc(len, GFP_KERNEL);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index e0c9b4f..ab096de 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -14,7 +14,6 @@
 #include "../ipa_v3/ipa_i.h"
 #include "../ipa_rm_i.h"
 
-#define IPA_USB_RM_TIMEOUT_MSEC 10000
 #define IPA_USB_DEV_READY_TIMEOUT_MSEC 10000
 
 #define IPA_HOLB_TMR_EN 0x1
@@ -99,24 +98,6 @@ struct ipa3_usb_teth_prot_context {
 	void *user_data;
 };
 
-enum ipa3_usb_cons_state {
-	IPA_USB_CONS_GRANTED,
-	IPA_USB_CONS_RELEASED
-};
-
-struct ipa3_usb_rm_context {
-	struct ipa_rm_create_params prod_params;
-	struct ipa_rm_create_params cons_params;
-	bool prod_valid;
-	bool cons_valid;
-	struct completion prod_comp;
-	enum ipa3_usb_cons_state cons_state;
-	/* consumer was requested*/
-	bool cons_requested;
-	/* consumer was requested and released before it was granted*/
-	bool cons_requested_released;
-};
-
 struct ipa3_usb_pm_context {
 	struct ipa_pm_register_params reg_params;
 	struct work_struct *remote_wakeup_work;
@@ -157,10 +138,9 @@ struct ipa3_usb_teth_prot_conn_params {
 
 /**
  * Transport type - could be either data tethering or DPL
- * Each transport has it's own RM resources and statuses
+ * Each transport has it's own PM resources and statuses
  */
 struct ipa3_usb_transport_type_ctx {
-	struct ipa3_usb_rm_context rm_ctx;
 	struct ipa3_usb_pm_context pm_ctx;
 	int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
 	void *user_data;
@@ -231,8 +211,6 @@ struct ipa3_usb_status_dbg_info {
 	const char *inited_prots[IPA_USB_MAX_TETH_PROT_SIZE];
 	const char *teth_connected_prot;
 	const char *dpl_connected_prot;
-	const char *teth_cons_state;
-	const char *dpl_cons_state;
 };
 
 static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work);
@@ -326,7 +304,6 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
 	int state_legal = false;
 	enum ipa3_usb_state state;
 	bool rwakeup_pending;
-	struct ipa3_usb_rm_context *rm_ctx;
 
 	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
 	state = ipa3_usb_ctx->ttype_ctx[ttype].state;
@@ -404,25 +381,6 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
 			ipa3_usb_state_to_string(new_state));
 	}
 
-	if (!ipa_pm_is_used() &&
-		state_legal && (new_state == IPA_USB_CONNECTED)) {
-		rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
-		if ((rm_ctx->cons_state == IPA_USB_CONS_GRANTED) ||
-			rm_ctx->cons_requested_released) {
-			rm_ctx->cons_requested = false;
-			rm_ctx->cons_requested_released =
-			false;
-		}
-		/* Notify RM that consumer is granted */
-		if (rm_ctx->cons_requested) {
-			ipa_rm_notify_completion(
-				IPA_RM_RESOURCE_GRANTED,
-				rm_ctx->cons_params.name);
-			rm_ctx->cons_state = IPA_USB_CONS_GRANTED;
-			rm_ctx->cons_requested = false;
-		}
-	}
-
 	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
 	return state_legal;
 }
@@ -542,42 +500,6 @@ void ipa3_usb_device_ready_notify_cb(void)
 	IPA_USB_DBG_LOW("exit\n");
 }
 
-static void ipa3_usb_prod_notify_cb_do(enum ipa_rm_event event,
-		enum  ipa3_usb_transport_type ttype)
-{
-	struct ipa3_usb_rm_context *rm_ctx;
-
-	IPA_USB_DBG_LOW("entry\n");
-
-	rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
-
-	switch (event) {
-	case IPA_RM_RESOURCE_GRANTED:
-		IPA_USB_DBG(":%s granted\n",
-			ipa_rm_resource_str(rm_ctx->prod_params.name));
-		complete_all(&rm_ctx->prod_comp);
-		break;
-	case IPA_RM_RESOURCE_RELEASED:
-		IPA_USB_DBG(":%s released\n",
-			ipa_rm_resource_str(rm_ctx->prod_params.name));
-		complete_all(&rm_ctx->prod_comp);
-		break;
-	}
-	IPA_USB_DBG_LOW("exit\n");
-}
-
-static void ipa3_usb_prod_notify_cb(void *user_data, enum ipa_rm_event event,
-			     unsigned long data)
-{
-	ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH);
-}
-
-static void ipa3_usb_dpl_dummy_prod_notify_cb(void *user_data,
-		enum ipa_rm_event event, unsigned long data)
-{
-	ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH);
-}
-
 static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work)
 {
 	bool rwakeup_pending;
@@ -585,6 +507,7 @@ static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work)
 	enum ipa3_usb_transport_type ttype =
 		IPA_USB_TRANSPORT_TETH;
 
+
 	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
 	rwakeup_pending =
 		ipa3_usb_ctx->ttype_ctx[ttype].rwakeup_pending;
@@ -618,116 +541,6 @@ static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work)
 	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
 }
 
-static int ipa3_usb_cons_request_resource_cb_do(
-	enum ipa3_usb_transport_type ttype,
-	struct work_struct *remote_wakeup_work)
-{
-	struct ipa3_usb_rm_context *rm_ctx;
-	unsigned long flags;
-	int result;
-
-	IPA_USB_DBG_LOW("entry\n");
-	rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
-	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
-	IPA_USB_DBG("state is %s\n",
-			ipa3_usb_state_to_string(
-				ipa3_usb_ctx->ttype_ctx[ttype].state));
-	switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
-	case IPA_USB_CONNECTED:
-	case IPA_USB_SUSPENDED_NO_RWAKEUP:
-		rm_ctx->cons_state = IPA_USB_CONS_GRANTED;
-		result = 0;
-		break;
-	case IPA_USB_SUSPEND_REQUESTED:
-		rm_ctx->cons_requested = true;
-		if (rm_ctx->cons_state == IPA_USB_CONS_GRANTED)
-			result = 0;
-		else
-			result = -EINPROGRESS;
-		break;
-	case IPA_USB_SUSPENDED:
-		if (!rm_ctx->cons_requested) {
-			rm_ctx->cons_requested = true;
-			queue_work(ipa3_usb_ctx->wq, remote_wakeup_work);
-		}
-		result = -EINPROGRESS;
-		break;
-	default:
-		rm_ctx->cons_requested = true;
-		result = -EINPROGRESS;
-		break;
-	}
-	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-	IPA_USB_DBG_LOW("exit with %d\n", result);
-	return result;
-}
-
-static int ipa3_usb_cons_request_resource_cb(void)
-{
-	return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_TETH,
-		&ipa3_usb_notify_remote_wakeup_work);
-}
-
-static int ipa3_usb_dpl_cons_request_resource_cb(void)
-{
-	return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_DPL,
-		&ipa3_usb_dpl_notify_remote_wakeup_work);
-}
-
-static int ipa3_usb_cons_release_resource_cb_do(
-	enum ipa3_usb_transport_type ttype)
-{
-	unsigned long flags;
-	struct ipa3_usb_rm_context *rm_ctx;
-
-	IPA_USB_DBG_LOW("entry\n");
-	rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
-	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
-	IPA_USB_DBG("state is %s\n",
-			ipa3_usb_state_to_string(
-			ipa3_usb_ctx->ttype_ctx[ttype].state));
-	switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
-	case IPA_USB_SUSPENDED:
-		/* Proceed with the suspend if no DL/DPL data */
-		if (rm_ctx->cons_requested)
-			rm_ctx->cons_requested_released = true;
-		break;
-	case IPA_USB_SUSPEND_REQUESTED:
-		if (rm_ctx->cons_requested)
-			rm_ctx->cons_requested_released = true;
-		break;
-	case IPA_USB_STOPPED:
-	case IPA_USB_RESUME_IN_PROGRESS:
-	case IPA_USB_SUSPENDED_NO_RWAKEUP:
-		if (rm_ctx->cons_requested)
-			rm_ctx->cons_requested = false;
-		break;
-	case IPA_USB_CONNECTED:
-	case IPA_USB_INITIALIZED:
-		break;
-	default:
-		IPA_USB_ERR("received cons_release_cb in bad state: %s!\n",
-			ipa3_usb_state_to_string(
-				ipa3_usb_ctx->ttype_ctx[ttype].state));
-		WARN_ON(1);
-		break;
-	}
-
-	rm_ctx->cons_state = IPA_USB_CONS_RELEASED;
-	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-	IPA_USB_DBG_LOW("exit\n");
-	return 0;
-}
-
-static int ipa3_usb_cons_release_resource_cb(void)
-{
-	return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_TETH);
-}
-
-static int ipa3_usb_dpl_cons_release_resource_cb(void)
-{
-	return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_DPL);
-}
 
 static void ipa3_usb_pm_cb(void *p, enum ipa_pm_cb_event event)
 {
@@ -857,76 +670,6 @@ static int ipa3_usb_deregister_pm(enum ipa3_usb_transport_type ttype)
 	return 0;
 }
 
-static int ipa3_usb_create_rm_resources(enum ipa3_usb_transport_type ttype)
-{
-	struct ipa3_usb_rm_context *rm_ctx;
-	int result = -EFAULT;
-	bool created = false;
-
-	rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
-
-	/* create PROD */
-	if (!rm_ctx->prod_valid) {
-		rm_ctx->prod_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ?
-			IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD :
-			IPA_RM_RESOURCE_USB_PROD;
-		rm_ctx->prod_params.floor_voltage = IPA_VOLTAGE_SVS2;
-		rm_ctx->prod_params.reg_params.user_data = NULL;
-		rm_ctx->prod_params.reg_params.notify_cb =
-			IPA3_USB_IS_TTYPE_DPL(ttype) ?
-			ipa3_usb_dpl_dummy_prod_notify_cb :
-			ipa3_usb_prod_notify_cb;
-		rm_ctx->prod_params.request_resource = NULL;
-		rm_ctx->prod_params.release_resource = NULL;
-		result = ipa_rm_create_resource(&rm_ctx->prod_params);
-		if (result) {
-			IPA_USB_ERR("Failed to create %s RM resource\n",
-				ipa_rm_resource_str(rm_ctx->prod_params.name));
-			return result;
-		}
-		rm_ctx->prod_valid = true;
-		created = true;
-		IPA_USB_DBG("Created %s RM resource\n",
-			ipa_rm_resource_str(rm_ctx->prod_params.name));
-	}
-
-	/* Create CONS */
-	if (!rm_ctx->cons_valid) {
-		rm_ctx->cons_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ?
-			IPA_RM_RESOURCE_USB_DPL_CONS :
-			IPA_RM_RESOURCE_USB_CONS;
-		rm_ctx->cons_params.floor_voltage = IPA_VOLTAGE_SVS2;
-		rm_ctx->cons_params.reg_params.user_data = NULL;
-		rm_ctx->cons_params.reg_params.notify_cb = NULL;
-		rm_ctx->cons_params.request_resource =
-			IPA3_USB_IS_TTYPE_DPL(ttype) ?
-			ipa3_usb_dpl_cons_request_resource_cb :
-			ipa3_usb_cons_request_resource_cb;
-		rm_ctx->cons_params.release_resource =
-			IPA3_USB_IS_TTYPE_DPL(ttype) ?
-			ipa3_usb_dpl_cons_release_resource_cb :
-			ipa3_usb_cons_release_resource_cb;
-		result = ipa_rm_create_resource(&rm_ctx->cons_params);
-		if (result) {
-			IPA_USB_ERR("Failed to create %s RM resource\n",
-				ipa_rm_resource_str(rm_ctx->cons_params.name));
-			goto create_cons_rsc_fail;
-		}
-		rm_ctx->cons_valid = true;
-		IPA_USB_DBG("Created %s RM resource\n",
-			ipa_rm_resource_str(rm_ctx->cons_params.name));
-	}
-
-	return 0;
-
-create_cons_rsc_fail:
-	if (created) {
-		rm_ctx->prod_valid = false;
-		ipa_rm_delete_resource(rm_ctx->prod_params.name);
-	}
-	return result;
-}
-
 static bool ipa3_usb_is_teth_switch_valid(enum ipa_usb_teth_prot new_teth)
 {
 	enum ipa_usb_teth_prot old_teth;
@@ -1018,14 +761,11 @@ int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
 		goto bad_params;
 	}
 
-	/* Create IPA RM USB resources */
+	/* Register with IPA PM */
 	teth_prot_ptr = &ipa3_usb_ctx->teth_prot_ctx[teth_prot];
-	if (ipa_pm_is_used())
-		result = ipa3_usb_register_pm(ttype);
-	else
-		result = ipa3_usb_create_rm_resources(ttype);
+	result = ipa3_usb_register_pm(ttype);
 	if (result) {
-		IPA_USB_ERR("Failed creating IPA RM USB resources\n");
+		IPA_USB_ERR("Failed registering IPA PM\n");
 		goto bad_params;
 	}
 
@@ -1160,18 +900,7 @@ int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
 teth_prot_init_fail:
 	if ((IPA3_USB_IS_TTYPE_DPL(ttype))
 		|| (ipa3_usb_ctx->num_init_prot == 0)) {
-		if (ipa_pm_is_used()) {
-			ipa3_usb_deregister_pm(ttype);
-		} else {
-			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid =
-				false;
-			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid =
-				false;
-			ipa_rm_delete_resource(
-			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
-			ipa_rm_delete_resource(
-			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name);
-		}
+		ipa3_usb_deregister_pm(ttype);
 	}
 bad_params:
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
@@ -1553,66 +1282,6 @@ static int ipa3_usb_release_xdci_channel(u32 clnt_hdl,
 	return 0;
 }
 
-static int ipa3_usb_request_prod(enum ipa3_usb_transport_type ttype)
-{
-	int result;
-	struct ipa3_usb_rm_context *rm_ctx;
-	const char *rsrc_str;
-
-	rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
-	rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name);
-
-	IPA_USB_DBG_LOW("requesting %s\n", rsrc_str);
-	init_completion(&rm_ctx->prod_comp);
-	result = ipa_rm_request_resource(rm_ctx->prod_params.name);
-	if (result) {
-		if (result != -EINPROGRESS) {
-			IPA_USB_ERR("failed to request %s: %d\n",
-				rsrc_str, result);
-			return result;
-		}
-		result = wait_for_completion_timeout(&rm_ctx->prod_comp,
-				msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC));
-		if (result == 0) {
-			IPA_USB_ERR("timeout request %s\n", rsrc_str);
-			return -ETIME;
-		}
-	}
-
-	IPA_USB_DBG_LOW("%s granted\n", rsrc_str);
-	return 0;
-}
-
-static int ipa3_usb_release_prod(enum ipa3_usb_transport_type ttype)
-{
-	int result;
-	struct ipa3_usb_rm_context *rm_ctx;
-	const char *rsrc_str;
-
-	rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
-	rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name);
-
-	IPA_USB_DBG_LOW("releasing %s\n", rsrc_str);
-
-	init_completion(&rm_ctx->prod_comp);
-	result = ipa_rm_release_resource(rm_ctx->prod_params.name);
-	if (result) {
-		if (result != -EINPROGRESS) {
-			IPA_USB_ERR("failed to release %s: %d\n",
-				rsrc_str, result);
-			return result;
-		}
-		result = wait_for_completion_timeout(&rm_ctx->prod_comp,
-			msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC));
-		if (result == 0) {
-			IPA_USB_ERR("timeout release %s\n", rsrc_str);
-			return -ETIME;
-		}
-	}
-
-	IPA_USB_DBG_LOW("%s released\n", rsrc_str);
-	return 0;
-}
 
 static bool ipa3_usb_check_connect_params(
 	struct ipa_usb_xdci_connect_params_internal *params)
@@ -1660,41 +1329,6 @@ static int ipa3_usb_connect_teth_bridge(
 	return 0;
 }
 
-static int ipa3_usb_connect_dpl(void)
-{
-	int res = 0;
-
-	if (ipa_pm_is_used())
-		return 0;
-
-	/*
-	 * Add DPL dependency to RM dependency graph, first add_dependency call
-	 * is sync in order to make sure the IPA clocks are up before we
-	 * continue and notify the USB driver it may continue.
-	 */
-	res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
-				    IPA_RM_RESOURCE_Q6_CONS);
-	if (res < 0) {
-		IPA_USB_ERR("ipa_rm_add_dependency_sync() failed\n");
-		return res;
-	}
-
-	/*
-	 * this add_dependency call can't be sync since it will block until DPL
-	 * status is connected (which can happen only later in the flow),
-	 * the clocks are already up so the call doesn't need to block.
-	 */
-	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
-				    IPA_RM_RESOURCE_USB_DPL_CONS);
-	if (res < 0 && res != -EINPROGRESS) {
-		IPA_USB_ERR("ipa_rm_add_dependency() failed\n");
-		ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
-				IPA_RM_RESOURCE_Q6_CONS);
-		return res;
-	}
-
-	return 0;
-}
 
 static int ipa3_usb_connect_teth_prot(enum ipa_usb_teth_prot teth_prot)
 {
@@ -1807,13 +1441,6 @@ static int ipa3_usb_connect_teth_prot(enum ipa_usb_teth_prot teth_prot)
 
 		ipa3_usb_ctx->ttype_ctx[ttype].user_data =
 			ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data;
-		result = ipa3_usb_connect_dpl();
-		if (result) {
-			IPA_USB_ERR("Failed connecting DPL result=%d\n",
-				result);
-			ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
-			return result;
-		}
 		ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state =
 			IPA_USB_TETH_PROT_CONNECTED;
 		ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY);
@@ -1841,27 +1468,6 @@ static int ipa3_usb_disconnect_teth_bridge(void)
 	return 0;
 }
 
-static int ipa3_usb_disconnect_dpl(void)
-{
-	int res;
-
-	if (ipa_pm_is_used())
-		return 0;
-
-	/* Remove DPL RM dependency */
-	res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
-				    IPA_RM_RESOURCE_Q6_CONS);
-	if (res)
-		IPA_USB_ERR("deleting DPL_DUMMY_PROD rsrc dependency fail\n");
-
-	res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
-				 IPA_RM_RESOURCE_USB_DPL_CONS);
-	if (res)
-		IPA_USB_ERR("deleting DPL_CONS rsrc dependencty fail\n");
-
-	return 0;
-}
-
 static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot)
 {
 	int result = 0;
@@ -1919,9 +1525,6 @@ static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot)
 				ipa3_usb_teth_prot_to_string(teth_prot));
 			return -EPERM;
 		}
-		result = ipa3_usb_disconnect_dpl();
-		if (result)
-			break;
 		teth_prot_ptr->state = IPA_USB_TETH_PROT_INITIALIZED;
 		IPA_USB_DBG("disconnected %s\n",
 			ipa3_usb_teth_prot_to_string(teth_prot));
@@ -1938,11 +1541,8 @@ static int ipa3_usb_xdci_connect_internal(
 	struct ipa_usb_xdci_connect_params_internal *params)
 {
 	int result = -EFAULT;
-	struct ipa_rm_perf_profile profile;
 	enum ipa3_usb_transport_type ttype;
 	struct ipa3_usb_teth_prot_conn_params *teth_prot_ptr;
-	struct ipa3_usb_rm_context *rm_ctx_ptr;
-	struct ipa3_usb_transport_type_ctx *t_ctx_ptr;
 
 	IPA_USB_DBG_LOW("entry\n");
 	if (params == NULL || !ipa3_usb_check_connect_params(params)) {
@@ -1960,7 +1560,6 @@ static int ipa3_usb_xdci_connect_internal(
 
 	teth_prot_ptr = &ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params;
 	teth_prot_ptr->ipa_to_usb_clnt_hdl = params->ipa_to_usb_clnt_hdl;
-	rm_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
 
 	if (!IPA3_USB_IS_TTYPE_DPL(ttype))
 		teth_prot_ptr->usb_to_ipa_clnt_hdl =
@@ -1973,57 +1572,22 @@ static int ipa3_usb_xdci_connect_internal(
 		IPA_USB_ERR("failed setting xDCI EE scratch field\n");
 		return result;
 	}
-
-	if (ipa_pm_is_used()) {
-		/* perf profile is not set on  USB DPL pipe */
-		if (ttype != IPA_USB_TRANSPORT_DPL) {
-			result = ipa_pm_set_throughput(
-				ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl,
-				params->max_supported_bandwidth_mbps);
-			if (result) {
-				IPA_USB_ERR("failed to set perf profile\n");
-				return result;
-			}
-		}
-
-		result = ipa_pm_activate_sync(
-			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	/* perf profile is not set on  USB DPL pipe */
+	if (ttype != IPA_USB_TRANSPORT_DPL) {
+		result = ipa_pm_set_throughput(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl,
+			params->max_supported_bandwidth_mbps);
 		if (result) {
-			IPA_USB_ERR("failed to activate pm\n");
+			IPA_USB_ERR("failed to set pm throughput\n");
 			return result;
 		}
-	} else {
-		/* Set RM PROD & CONS perf profile */
-		profile.max_supported_bandwidth_mbps =
-				params->max_supported_bandwidth_mbps;
-		result = ipa_rm_set_perf_profile(
-			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name,
-			&profile);
+	}
 
-		t_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[ttype];
-
-		if (result) {
-			IPA_USB_ERR("failed to set %s perf profile\n",
-				ipa_rm_resource_str(
-					t_ctx_ptr->rm_ctx.prod_params.name));
-			return result;
-		}
-
-		result = ipa_rm_set_perf_profile(
-			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name,
-			&profile);
-
-		if (result) {
-			IPA_USB_ERR("failed to set %s perf profile\n",
-				ipa_rm_resource_str(
-					t_ctx_ptr->rm_ctx.cons_params.name));
-			return result;
-		}
-
-		/* Request PROD */
-		result = ipa3_usb_request_prod(ttype);
-		if (result)
-			return result;
+	result = ipa_pm_activate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	if (result) {
+		IPA_USB_ERR("failed to activate pm\n");
+		return result;
 	}
 
 	if (params->teth_prot != IPA_USB_DIAG) {
@@ -2050,8 +1614,8 @@ static int ipa3_usb_xdci_connect_internal(
 	if (ipa3_is_mhip_offload_enabled()) {
 		result = ipa_mpm_mhip_xdci_pipe_enable(params->teth_prot);
 		if (result) {
-			IPA_USB_ERR("failed to connect MHIP channel\n");
-			goto connect_dl_fail;
+			IPA_USB_ERR("failed to enable MHIP channel\n");
+			goto connect_teth_prot_fail;
 		}
 	}
 
@@ -2059,7 +1623,7 @@ static int ipa3_usb_xdci_connect_internal(
 	result = ipa3_usb_connect_teth_prot(params->teth_prot);
 	if (result) {
 		IPA_USB_ERR("failed to connect teth protocol\n");
-		goto connect_teth_prot_fail;
+		goto connect_mhip_prot_fail;
 	}
 
 	if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) {
@@ -2073,6 +1637,9 @@ static int ipa3_usb_xdci_connect_internal(
 
 state_change_connected_fail:
 	ipa3_usb_disconnect_teth_prot(params->teth_prot);
+connect_mhip_prot_fail:
+	if (ipa3_is_mhip_offload_enabled())
+		ipa_mpm_mhip_xdci_pipe_disable(params->teth_prot);
 connect_teth_prot_fail:
 	ipa3_xdci_disconnect(params->ipa_to_usb_clnt_hdl, false, -1);
 	ipa3_reset_gsi_channel(params->ipa_to_usb_clnt_hdl);
@@ -2084,35 +1651,19 @@ static int ipa3_usb_xdci_connect_internal(
 		ipa3_reset_gsi_event_ring(params->usb_to_ipa_clnt_hdl);
 	}
 connect_ul_fail:
-	if (ipa_pm_is_used())
 		ipa_pm_deactivate_sync(
 			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
-	else
-		ipa3_usb_release_prod(ttype);
 	return result;
 }
 
 #ifdef CONFIG_DEBUG_FS
 static char dbg_buff[IPA_USB_MAX_MSG_LEN];
 
-static char *ipa3_usb_cons_state_to_string(enum ipa3_usb_cons_state state)
-{
-	switch (state) {
-	case IPA_USB_CONS_GRANTED:
-		return "CONS_GRANTED";
-	case IPA_USB_CONS_RELEASED:
-		return "CONS_RELEASED";
-	}
-
-	return "UNSUPPORTED";
-}
-
 static int ipa3_usb_get_status_dbg_info(struct ipa3_usb_status_dbg_info *status)
 {
 	int res;
 	int i;
 	unsigned long flags;
-	struct ipa3_usb_rm_context *rm_ctx_ptr;
 
 	IPA_USB_DBG_LOW("entry\n");
 
@@ -2132,18 +1683,10 @@ static int ipa3_usb_get_status_dbg_info(struct ipa3_usb_status_dbg_info *status)
 	memset(status, 0, sizeof(struct ipa3_usb_status_dbg_info));
 
 	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
-	rm_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].rm_ctx;
 	status->teth_state = ipa3_usb_state_to_string(
 		ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].state);
 	status->dpl_state = ipa3_usb_state_to_string(
 		ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].state);
-	if (rm_ctx_ptr->cons_valid)
-		status->teth_cons_state = ipa3_usb_cons_state_to_string(
-			rm_ctx_ptr->cons_state);
-	rm_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].rm_ctx;
-	if (rm_ctx_ptr->cons_valid)
-		status->dpl_cons_state = ipa3_usb_cons_state_to_string(
-			rm_ctx_ptr->cons_state);
 	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
 
 	for (i = 0 ; i < IPA_USB_MAX_TETH_PROT_SIZE ; i++) {
@@ -2233,18 +1776,6 @@ static ssize_t ipa3_read_usb_state_info(struct file *file, char __user *ubuf,
 				(status.teth_connected_prot ||
 				status.dpl_connected_prot) ? "\n" : "None\n");
 		cnt += nbytes;
-
-		nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
-				"USB Tethering Consumer State: %s\n",
-				status.teth_cons_state ?
-				status.teth_cons_state : "Invalid");
-		cnt += nbytes;
-
-		nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
-				"DPL Consumer State: %s\n",
-				status.dpl_cons_state ? status.dpl_cons_state :
-				"Invalid");
-		cnt += nbytes;
 	}
 
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
@@ -2520,14 +2051,6 @@ int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 		if (orig_state != IPA_USB_SUSPENDED) {
 			spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
 				flags);
-			/* Stop UL MHIP channel */
-			if (ipa3_is_mhip_offload_enabled()) {
-				result = ipa_mpm_mhip_ul_data_stop(teth_prot);
-				if (result) {
-					IPA_USB_ERR("fail UL MHIPData stop\n");
-					goto bad_params;
-				}
-			}
 			/* Stop UL channel */
 			result = ipa3_xdci_disconnect(ul_clnt_hdl,
 				true,
@@ -2543,12 +2066,11 @@ int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 	} else
 		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
 
-	if (teth_prot == IPA_USB_RMNET) {
-		IPA_USB_DBG("USB suspend resetting dma mode\n");
-		result = ipa_mpm_reset_dma_mode(IPA_CLIENT_USB_PROD,
-			IPA_CLIENT_MHI_PRIME_RMNET_CONS);
+	/* Stop UL/DL MHIP channels */
+	if (ipa3_is_mhip_offload_enabled()) {
+		result = ipa_mpm_mhip_xdci_pipe_disable(teth_prot);
 		if (result) {
-			IPA_USB_ERR("failed to reset dma mode\n");
+			IPA_USB_ERR("failed to disconnect MHIP pipe\n");
 			goto bad_params;
 		}
 	}
@@ -2557,14 +2079,6 @@ int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 			teth_prot);
 	if (result)
 		goto bad_params;
-	/* Stop UL/DL MHIP channels */
-	if (ipa3_is_mhip_offload_enabled()) {
-		result = ipa_mpm_mhip_xdci_pipe_disable(teth_prot);
-		if (result) {
-			IPA_USB_ERR("failed to disconnect MHIP channel\n");
-			goto bad_params;
-		}
-	}
 
 	/* Disconnect tethering protocol */
 	result = ipa3_usb_disconnect_teth_prot(teth_prot);
@@ -2572,13 +2086,10 @@ int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 		goto bad_params;
 
 	if (orig_state != IPA_USB_SUSPENDED) {
-		if (ipa_pm_is_used())
-			result = ipa_pm_deactivate_sync(
-				ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
-		else
-			result = ipa3_usb_release_prod(ttype);
+		result = ipa_pm_deactivate_sync(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
 		if (result) {
-			IPA_USB_ERR("failed to release PROD\n");
+			IPA_USB_ERR("failed to deactivate PM\n");
 			goto bad_params;
 		}
 	}
@@ -2683,20 +2194,8 @@ int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
 		if (!ipa3_usb_set_state(IPA_USB_INVALID, false, ttype))
 			IPA_USB_ERR(
 				"failed to change state to invalid\n");
-		if (ipa_pm_is_used()) {
-			ipa3_usb_deregister_pm(ttype);
-			ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL;
-		} else {
-			ipa_rm_delete_resource(
-			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
-			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid =
-				false;
-			ipa_rm_delete_resource(
-			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name);
-			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid =
-				false;
-			ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL;
-		}
+		ipa3_usb_deregister_pm(ttype);
+		ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL;
 	}
 
 	IPA_USB_DBG_LOW("exit\n");
@@ -2751,18 +2250,24 @@ static int ipa3_usb_suspend_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 		ipa3_usb_ctx->qmi_req_id++;
 	}
 
+	/* Stop MHIP channel */
+	if (ipa3_is_mhip_offload_enabled()) {
+		result = ipa_mpm_mhip_xdci_pipe_disable(teth_prot);
+		if (result) {
+			IPA_USB_ERR("failed to disconnect MHIP pipe\n");
+			goto start_ul;
+		}
+	}
+
 	/* Disconnect tethering protocol */
 	result = ipa3_usb_disconnect_teth_prot(teth_prot);
 	if (result)
-		goto start_ul;
+		goto enable_mhip;
 
-	if (ipa_pm_is_used())
-		result = ipa_pm_deactivate_sync(
-			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
-	else
-		result = ipa3_usb_release_prod(ttype);
+	result = ipa_pm_deactivate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
 	if (result) {
-		IPA_USB_ERR("failed to release PROD\n");
+		IPA_USB_ERR("failed to deactivate PM\n");
 		goto connect_teth;
 	}
 
@@ -2775,6 +2280,9 @@ static int ipa3_usb_suspend_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 
 connect_teth:
 	(void)ipa3_usb_connect_teth_prot(teth_prot);
+enable_mhip:
+	if (ipa3_is_mhip_offload_enabled())
+		(void)ipa_mpm_mhip_xdci_pipe_enable(teth_prot);
 start_ul:
 	if (!IPA3_USB_IS_TTYPE_DPL(ttype))
 		(void)ipa3_xdci_connect(ul_clnt_hdl);
@@ -2788,7 +2296,6 @@ int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 	enum ipa_usb_teth_prot teth_prot, bool with_remote_wakeup)
 {
 	int result = 0;
-	unsigned long flags;
 	enum ipa3_usb_transport_type ttype;
 
 	mutex_lock(&ipa3_usb_ctx->general_mutex);
@@ -2827,16 +2334,6 @@ int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 		goto bad_params;
 	}
 
-	if (teth_prot == IPA_USB_RMNET) {
-		IPA_USB_DBG("USB suspend resetting dma mode\n");
-		result = ipa_mpm_reset_dma_mode(IPA_CLIENT_USB_PROD,
-			IPA_CLIENT_MHI_PRIME_RMNET_CONS);
-		if (result) {
-			IPA_USB_ERR("failed to reset dma mode\n");
-			goto bad_params;
-		}
-	}
-
 	/* Stop UL channel & suspend DL/DPL EP */
 	result = ipa3_xdci_suspend(ul_clnt_hdl, dl_clnt_hdl,
 		true,
@@ -2847,57 +2344,23 @@ int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 	}
 	ipa3_usb_ctx->qmi_req_id++;
 
-	if (ipa_pm_is_used())
-		result = ipa_pm_deactivate_sync(
-			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
-	else
-		result = ipa3_usb_release_prod(ttype);
+	result = ipa_pm_deactivate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
 	if (result) {
-		IPA_USB_ERR("failed to release PROD\n");
-		goto release_prod_fail;
+		IPA_USB_ERR("failed to deactivate PM IPA client\n");
+		goto pm_deactivate_fail;
 	}
 
-	/* Check if DL/DPL data pending */
-	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
-	if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state ==
-		IPA_USB_CONS_GRANTED &&
-		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
-
-		IPA_USB_DBG("DL/DPL data pending, invoke remote wakeup\n");
-		queue_work(ipa3_usb_ctx->wq,
-			IPA3_USB_IS_TTYPE_DPL(ttype) ?
-			&ipa3_usb_dpl_notify_remote_wakeup_work :
-			&ipa3_usb_notify_remote_wakeup_work);
-	}
-	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
 	/* Change state to SUSPENDED */
 	if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false, ttype))
 		IPA_USB_ERR("failed to change state to suspended\n");
 
-	/* Check if DL/DPL data pending */
-	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
-	if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
-		IPA_USB_DBG_LOW(
-			"DL/DPL data is pending, invoking remote wakeup\n");
-		queue_work(ipa3_usb_ctx->wq, IPA3_USB_IS_TTYPE_DPL(ttype) ?
-			&ipa3_usb_dpl_notify_remote_wakeup_work :
-			&ipa3_usb_notify_remote_wakeup_work);
-	}
-	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-	/* Stop MHIP channel */
-	if (ipa3_is_mhip_offload_enabled()) {
-		result = ipa_mpm_mhip_xdci_pipe_disable(teth_prot);
-		if (result) {
-			IPA_USB_ERR("failed to disconnect MHIP channel\n");
-			goto release_prod_fail;
-		}
-	}
+
 	IPA_USB_DBG_LOW("exit\n");
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
 	return 0;
 
-release_prod_fail:
+pm_deactivate_fail:
 	ipa3_xdci_resume(ul_clnt_hdl, dl_clnt_hdl,
 		IPA3_USB_IS_TTYPE_DPL(ttype));
 suspend_fail:
@@ -2923,12 +2386,9 @@ static int ipa3_usb_resume_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 		IPA3_USB_IS_TTYPE_DPL(ttype) ?
 		"DPL channel":"Data Tethering channels");
 
-	/* Request USB_PROD */
-	if (ipa_pm_is_used())
-		result = ipa_pm_activate_sync(
-			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
-	else
-		result = ipa3_usb_request_prod(ttype);
+	/* Activate PM */
+	result = ipa_pm_activate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
 	if (result)
 		goto fail_exit;
 
@@ -2955,15 +2415,25 @@ static int ipa3_usb_resume_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 		goto stop_ul;
 	}
 
+	/* Start MHIP channel */
+	if (ipa3_is_mhip_offload_enabled()) {
+		result = ipa_mpm_mhip_xdci_pipe_enable(teth_prot);
+		if (result) {
+			IPA_USB_ERR("failed to enable MHIP pipe\n");
+			goto stop_dl;
+		}
+	}
 	/* Change state to CONNECTED */
 	if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) {
 		IPA_USB_ERR("failed to change state to connected\n");
 		result = -EFAULT;
-		goto stop_dl;
+		goto stop_mhip;
 	}
 
 	return 0;
-
+stop_mhip:
+	if (ipa3_is_mhip_offload_enabled())
+		(void)ipa_mpm_mhip_xdci_pipe_disable(teth_prot);
 stop_dl:
 	(void)ipa3_xdci_disconnect(dl_clnt_hdl, false, -1);
 stop_ul:
@@ -2975,11 +2445,8 @@ static int ipa3_usb_resume_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 disconn_teth:
 	(void)ipa3_usb_disconnect_teth_prot(teth_prot);
 release_prod:
-	if (ipa_pm_is_used())
-		(void)ipa_pm_deactivate_sync(
-			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
-	else
-		(void)ipa3_usb_release_prod(ttype);
+	(void)ipa_pm_deactivate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
 fail_exit:
 	return result;
 }
@@ -3015,8 +2482,7 @@ int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 	if (prev_state == IPA_USB_SUSPENDED_NO_RWAKEUP) {
 		result = ipa3_usb_resume_no_remote_wakeup(ul_clnt_hdl,
 			dl_clnt_hdl, teth_prot);
-		mutex_unlock(&ipa3_usb_ctx->general_mutex);
-		return result;
+		goto bad_params;
 	}
 
 	IPA_USB_DBG("Start resume sequence: %s\n",
@@ -3030,14 +2496,11 @@ int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 		goto bad_params;
 	}
 
-	/* Request USB_PROD */
-	if (ipa_pm_is_used())
-		result = ipa_pm_activate_sync(
-			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
-	else
-		result = ipa3_usb_request_prod(ttype);
+	/* Activate PM */
+	result = ipa_pm_activate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
 	if (result)
-		goto prod_req_fail;
+		goto activate_pm_fail;
 
 	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
 		/* Start UL channel */
@@ -3078,12 +2541,9 @@ int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 			IPA_USB_ERR("Error stopping UL channel: %d\n", result);
 	}
 start_ul_fail:
-	if (ipa_pm_is_used())
-		ipa_pm_deactivate_sync(
-			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
-	else
-		ipa3_usb_release_prod(ttype);
-prod_req_fail:
+	ipa_pm_deactivate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+activate_pm_fail:
 	/* Change state back to prev_state */
 	if (!ipa3_usb_set_state(prev_state, true, ttype))
 		IPA_USB_ERR("failed to change state back to %s\n",
@@ -3128,17 +2588,12 @@ static int __init ipa3_usb_init(void)
 	pm_ctx->remote_wakeup_work = &ipa3_usb_dpl_notify_remote_wakeup_work;
 
 	for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) {
-		ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_valid = false;
-		ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_valid = false;
-		init_completion(&ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_comp);
 		ipa3_usb_ctx->ttype_ctx[i].user_data = NULL;
 	}
 
 	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
 	for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) {
 		ipa3_usb_ctx->ttype_ctx[i].state = IPA_USB_INVALID;
-		ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_state =
-			IPA_USB_CONS_RELEASED;
 	}
 	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
 
@@ -3177,6 +2632,53 @@ static void ipa3_usb_exit(void)
 	kfree(ipa3_usb_ctx);
 }
 
+/**
+ * ipa3_get_usb_gsi_stats() - Query USB gsi stats from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_usb_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
+{
+	int i;
+
+	if (!ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio) {
+		IPAERR("bad parms NULL usb_gsi_stats_mmio\n");
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = 0; i < MAX_USB_CHANNELS; i++) {
+		stats->ring[i].ringFull = ioread32(
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
+		stats->ring[i].ringEmpty = ioread32(
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
+		stats->ring[i].ringUsageHigh = ioread32(
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
+		stats->ring[i].ringUsageLow = ioread32(
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
+		stats->ring[i].RingUtilCount = ioread32(
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+
+	return 0;
+}
+
+
 arch_initcall(ipa3_usb_init);
 module_exit(ipa3_usb_exit);
 
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
index 3200a19..90082fc 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
@@ -379,44 +379,6 @@ int ipa_wdi_dereg_intf(const char *netdev_name)
 }
 EXPORT_SYMBOL(ipa_wdi_dereg_intf);
 
-static void ipa_wdi_rm_notify(void *user_data, enum ipa_rm_event event,
-		unsigned long data)
-{
-	if (!ipa_wdi_ctx) {
-		IPA_WDI_ERR("Invalid context\n");
-		return;
-	}
-
-	switch (event) {
-	case IPA_RM_RESOURCE_GRANTED:
-		complete_all(&ipa_wdi_ctx->wdi_completion);
-		break;
-
-	case IPA_RM_RESOURCE_RELEASED:
-		break;
-
-	default:
-		IPA_WDI_ERR("Invalid RM Evt: %d", event);
-		break;
-	}
-}
-
-static int ipa_wdi_cons_release(void)
-{
-	return 0;
-}
-
-static int ipa_wdi_cons_request(void)
-{
-	int ret = 0;
-
-	if (!ipa_wdi_ctx) {
-		IPA_WDI_ERR("wdi ctx is not initialized\n");
-		ret = -EFAULT;
-	}
-
-	return ret;
-}
 
 static void ipa_wdi_pm_cb(void *p, enum ipa_pm_cb_event event)
 {
@@ -427,7 +389,6 @@ int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in,
 			struct ipa_wdi_conn_out_params *out)
 {
 	int i, j, ret = 0;
-	struct ipa_rm_create_params param;
 	struct ipa_pm_register_params pm_params;
 	struct ipa_wdi_in_params in_tx;
 	struct ipa_wdi_in_params in_rx;
@@ -463,46 +424,15 @@ int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in,
 		}
 	}
 
-	if (!ipa_pm_is_used()) {
-		memset(&param, 0, sizeof(param));
-		param.name = IPA_RM_RESOURCE_WLAN_PROD;
-		param.reg_params.user_data = ipa_wdi_ctx;
-		param.reg_params.notify_cb = ipa_wdi_rm_notify;
-		param.floor_voltage = IPA_VOLTAGE_SVS;
-		ret = ipa_rm_create_resource(&param);
-		if (ret) {
-			IPA_WDI_ERR("fail to create WLAN_PROD resource\n");
-			ret = -EFAULT;
-			goto fail_setup_sys_pipe;
-		}
-
-		memset(&param, 0, sizeof(param));
-		param.name = IPA_RM_RESOURCE_WLAN_CONS;
-		param.request_resource = ipa_wdi_cons_request;
-		param.release_resource = ipa_wdi_cons_release;
-		ret = ipa_rm_create_resource(&param);
-		if (ret) {
-			IPA_WDI_ERR("fail to create WLAN_CONS resource\n");
-			goto fail_create_rm_cons;
-		}
-
-		if (ipa_rm_add_dependency(IPA_RM_RESOURCE_WLAN_PROD,
-			IPA_RM_RESOURCE_APPS_CONS)) {
-			IPA_WDI_ERR("fail to add rm dependency\n");
-			ret = -EFAULT;
-			goto fail_add_dependency;
-		}
-	} else {
-		memset(&pm_params, 0, sizeof(pm_params));
-		pm_params.name = "wdi";
-		pm_params.callback = ipa_wdi_pm_cb;
-		pm_params.user_data = NULL;
-		pm_params.group = IPA_PM_GROUP_DEFAULT;
-		if (ipa_pm_register(&pm_params, &ipa_wdi_ctx->ipa_pm_hdl)) {
-			IPA_WDI_ERR("fail to register ipa pm\n");
-			ret = -EFAULT;
-			goto fail_setup_sys_pipe;
-		}
+	memset(&pm_params, 0, sizeof(pm_params));
+	pm_params.name = "wdi";
+	pm_params.callback = ipa_wdi_pm_cb;
+	pm_params.user_data = NULL;
+	pm_params.group = IPA_PM_GROUP_DEFAULT;
+	if (ipa_pm_register(&pm_params, &ipa_wdi_ctx->ipa_pm_hdl)) {
+		IPA_WDI_ERR("fail to register ipa pm\n");
+		ret = -EFAULT;
+		goto fail_setup_sys_pipe;
 	}
 
 	if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
@@ -631,17 +561,8 @@ int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in,
 fail:
 	ipa_disconnect_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl);
 fail_connect_pipe:
-	if (!ipa_pm_is_used())
-		ipa_rm_delete_dependency(IPA_RM_RESOURCE_WLAN_PROD,
-			IPA_RM_RESOURCE_APPS_CONS);
-	else
-		ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl);
-fail_add_dependency:
-	if (!ipa_pm_is_used())
-		ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS);
-fail_create_rm_cons:
-	if (!ipa_pm_is_used())
-		ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD);
+	ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl);
+
 fail_setup_sys_pipe:
 	for (j = 0; j < i; j++)
 		ipa_teardown_sys_pipe(ipa_wdi_ctx->sys_pipe_hdl[j]);
@@ -690,27 +611,9 @@ int ipa_wdi_disconn_pipes(void)
 		}
 	}
 
-	if (!ipa_pm_is_used()) {
-		if (ipa_rm_delete_dependency(IPA_RM_RESOURCE_WLAN_PROD,
-					IPA_RM_RESOURCE_APPS_CONS)) {
-			IPA_WDI_ERR("fail to delete rm dependency\n");
-			return -EFAULT;
-		}
-
-		if (ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD)) {
-			IPA_WDI_ERR("fail to delete WLAN_PROD resource\n");
-			return -EFAULT;
-		}
-
-		if (ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS)) {
-			IPA_WDI_ERR("fail to delete WLAN_CONS resource\n");
-			return -EFAULT;
-		}
-	} else {
-		if (ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl)) {
-			IPA_WDI_ERR("fail to deregister ipa pm\n");
-			return -EFAULT;
-		}
+	if (ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl)) {
+		IPA_WDI_ERR("fail to deregister ipa pm\n");
+		return -EFAULT;
 	}
 
 	return 0;
@@ -759,24 +662,10 @@ int ipa_wdi_enable_pipes(void)
 		}
 	}
 
-	if (!ipa_pm_is_used()) {
-		ret = ipa_rm_request_resource(IPA_RM_RESOURCE_WLAN_PROD);
-		if (ret == -EINPROGRESS) {
-			if (wait_for_completion_timeout(
-				&ipa_wdi_ctx->wdi_completion, 10*HZ) == 0) {
-				IPA_WDI_ERR("WLAN_PROD res req time out\n");
-				return -EFAULT;
-			}
-		} else if (ret != 0) {
-			IPA_WDI_ERR("fail to request resource\n");
-			return -EFAULT;
-		}
-	} else {
-		ret = ipa_pm_activate_sync(ipa_wdi_ctx->ipa_pm_hdl);
-		if (ret) {
-			IPA_WDI_ERR("fail to activate ipa pm\n");
-			return -EFAULT;
-		}
+	ret = ipa_pm_activate_sync(ipa_wdi_ctx->ipa_pm_hdl);
+	if (ret) {
+		IPA_WDI_ERR("fail to activate ipa pm\n");
+		return -EFAULT;
 	}
 
 	return 0;
@@ -825,18 +714,10 @@ int ipa_wdi_disable_pipes(void)
 		}
 	}
 
-	if (!ipa_pm_is_used()) {
-		ret = ipa_rm_release_resource(IPA_RM_RESOURCE_WLAN_PROD);
-		if (ret != 0) {
-			IPA_WDI_ERR("fail to release resource\n");
-			return -EFAULT;
-		}
-	} else {
-		ret = ipa_pm_deactivate_sync(ipa_wdi_ctx->ipa_pm_hdl);
-		if (ret) {
-			IPA_WDI_ERR("fail to deactivate ipa pm\n");
-			return -EFAULT;
-		}
+	ret = ipa_pm_deactivate_sync(ipa_wdi_ctx->ipa_pm_hdl);
+	if (ret) {
+		IPA_WDI_ERR("fail to deactivate ipa pm\n");
+		return -EFAULT;
 	}
 
 	return 0;
@@ -845,39 +726,15 @@ EXPORT_SYMBOL(ipa_wdi_disable_pipes);
 
 int ipa_wdi_set_perf_profile(struct ipa_wdi_perf_profile *profile)
 {
-	struct ipa_rm_perf_profile rm_profile;
-	enum ipa_rm_resource_name resource_name;
-
 	if (profile == NULL) {
 		IPA_WDI_ERR("Invalid input\n");
 		return -EINVAL;
 	}
 
-	if (!ipa_pm_is_used()) {
-		rm_profile.max_supported_bandwidth_mbps =
-			profile->max_supported_bw_mbps;
-
-		if (profile->client == IPA_CLIENT_WLAN1_PROD ||
-			profile->client == IPA_CLIENT_WLAN2_PROD) {
-			resource_name = IPA_RM_RESOURCE_WLAN_PROD;
-		} else if (profile->client == IPA_CLIENT_WLAN1_CONS ||
-				   profile->client == IPA_CLIENT_WLAN2_CONS) {
-			resource_name = IPA_RM_RESOURCE_WLAN_CONS;
-		} else {
-			IPA_WDI_ERR("not supported\n");
-			return -EINVAL;
-		}
-
-		if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
-			IPA_WDI_ERR("fail to setup rm perf profile\n");
-			return -EFAULT;
-		}
-	} else {
-		if (ipa_pm_set_throughput(ipa_wdi_ctx->ipa_pm_hdl,
-			profile->max_supported_bw_mbps)) {
-			IPA_WDI_ERR("fail to setup pm perf profile\n");
-			return -EFAULT;
-		}
+	if (ipa_pm_set_throughput(ipa_wdi_ctx->ipa_pm_hdl,
+		profile->max_supported_bw_mbps)) {
+		IPA_WDI_ERR("fail to set pm throughput\n");
+		return -EFAULT;
 	}
 
 	return 0;
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_wigig.c b/drivers/platform/msm/ipa/ipa_clients/ipa_wigig.c
index 4455bc1..6bcb502 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_wigig.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_wigig.c
@@ -4,6 +4,7 @@
  */
 
 #include <linux/ipa_wigig.h>
+#include <linux/debugfs.h>
 #include <linux/string.h>
 #include "../ipa_common_i.h"
 #include "../ipa_v3/ipa_pm.h"
@@ -37,9 +38,26 @@
 			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
-#define IPA_WIGIG_RX_PIPE_IDX	0
+#define IPA_WIGIG_ERR_RL(fmt, args...) \
+	do { \
+		pr_err_ratelimited_ipa( \
+		OFFLOAD_DRV_NAME " %s:%d " fmt, __func__,\
+		__LINE__, ## args);\
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
 #define IPA_WIGIG_TX_PIPE_NUM	4
-#define IPA_WIGIG_MAX_PIPES	5
+
+enum ipa_wigig_pipes_idx {
+	IPA_CLIENT_WIGIG_PROD_IDX = 0,
+	IPA_CLIENT_WIGIG1_CONS_IDX = 1,
+	IPA_CLIENT_WIGIG2_CONS_IDX = 2,
+	IPA_CLIENT_WIGIG3_CONS_IDX = 3,
+	IPA_CLIENT_WIGIG4_CONS_IDX = 4,
+	IPA_WIGIG_MAX_PIPES
+};
 
 struct ipa_wigig_intf_info {
 	char netdev_name[IPA_RESOURCE_NAME_MAX];
@@ -49,6 +67,21 @@ struct ipa_wigig_intf_info {
 	struct list_head link;
 };
 
+struct ipa_wigig_pipe_values {
+	uint8_t dir;
+	uint8_t tx_ring_id;
+	uint32_t desc_ring_HWHEAD;
+	uint32_t desc_ring_HWTAIL;
+	uint32_t status_ring_HWHEAD;
+	uint32_t status_ring_HWTAIL;
+};
+
+struct ipa_wigig_regs_save {
+	struct ipa_wigig_pipe_values pipes_val[IPA_WIGIG_MAX_PIPES];
+	u32 int_gen_tx_val;
+	u32 int_gen_rx_val;
+};
+
 struct ipa_wigig_context {
 	struct list_head head_intf_list;
 	struct mutex lock;
@@ -58,18 +91,36 @@ struct ipa_wigig_context {
 	phys_addr_t int_gen_tx_pa;
 	phys_addr_t int_gen_rx_pa;
 	phys_addr_t dma_ep_misc_pa;
-	struct ipa_wigig_pipe_setup_info_smmu pipes_smmu[IPA_WIGIG_MAX_PIPES];
+	ipa_notify_cb tx_notify;
+	void *priv;
+	union pipes {
+		struct ipa_wigig_pipe_setup_info flat[IPA_WIGIG_MAX_PIPES];
+		struct ipa_wigig_pipe_setup_info_smmu
+			smmu[IPA_WIGIG_MAX_PIPES];
+	} pipes;
 	struct ipa_wigig_rx_pipe_data_buffer_info_smmu rx_buff_smmu;
 	struct ipa_wigig_tx_pipe_data_buffer_info_smmu
 		tx_buff_smmu[IPA_WIGIG_TX_PIPE_NUM];
 	char clients_mac[IPA_WIGIG_TX_PIPE_NUM][IPA_MAC_ADDR_SIZE];
+	struct ipa_wigig_regs_save regs_save;
 	bool smmu_en;
 	bool shared_cb;
-	bool rx_connected;
+	u8 conn_pipes;
+	struct dentry *parent;
+	struct dentry *dent_conn_clients;
+	struct dentry *dent_smmu;
 };
 
 static struct ipa_wigig_context *ipa_wigig_ctx;
 
+#ifdef CONFIG_DEBUG_FS
+static int ipa_wigig_init_debugfs(struct dentry *parent);
+static inline void ipa_wigig_deinit_debugfs(void);
+#else
+static int ipa_wigig_init_debugfs(struct dentry *parent) { return 0; }
+static inline void ipa_wigig_deinit_debugfs(void) { }
+#endif
+
 int ipa_wigig_init(struct ipa_wigig_init_in_params *in,
 	struct ipa_wigig_init_out_params *out)
 {
@@ -109,7 +160,8 @@ int ipa_wigig_init(struct ipa_wigig_init_in_params *in,
 
 	inout.notify = in->notify;
 	inout.priv = in->priv;
-	if (ipa_wigig_uc_init(&inout, in->int_notify, &out->uc_db_pa)) {
+	if (ipa_wigig_internal_init(&inout, in->int_notify,
+		&out->uc_db_pa)) {
 		kfree(ipa_wigig_ctx);
 		ipa_wigig_ctx = NULL;
 		return -EFAULT;
@@ -143,6 +195,9 @@ int ipa_wigig_cleanup(void)
 	}
 
 	mutex_destroy(&ipa_wigig_ctx->lock);
+
+	ipa_wigig_deinit_debugfs();
+
 	kfree(ipa_wigig_ctx);
 	ipa_wigig_ctx = NULL;
 
@@ -557,6 +612,62 @@ static void ipa_wigig_pm_cb(void *p, enum ipa_pm_cb_event event)
 	IPA_WIGIG_DBG("received pm event %d\n", event);
 }
 
+static int ipa_wigig_store_pipe_info(struct ipa_wigig_pipe_setup_info *pipe,
+	unsigned int idx)
+{
+	IPA_WIGIG_DBG(
+		"idx %d: desc_ring HWHEAD_pa %pa, HWTAIL_pa %pa, status_ring HWHEAD_pa %pa, HWTAIL_pa %pa\n",
+		idx,
+		&pipe->desc_ring_HWHEAD_pa,
+		&pipe->desc_ring_HWTAIL_pa,
+		&pipe->status_ring_HWHEAD_pa,
+		&pipe->status_ring_HWTAIL_pa);
+
+	/* store regs */
+	ipa_wigig_ctx->pipes.flat[idx].desc_ring_HWHEAD_pa =
+		pipe->desc_ring_HWHEAD_pa;
+	ipa_wigig_ctx->pipes.flat[idx].desc_ring_HWTAIL_pa =
+		pipe->desc_ring_HWTAIL_pa;
+
+	ipa_wigig_ctx->pipes.flat[idx].status_ring_HWHEAD_pa =
+		pipe->status_ring_HWHEAD_pa;
+
+	ipa_wigig_ctx->pipes.flat[idx].status_ring_HWTAIL_pa =
+		pipe->status_ring_HWTAIL_pa;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+
+static u8 ipa_wigig_pipe_to_bit_val(int client)
+{
+	u8 shift_val;
+
+	switch (client) {
+	case IPA_CLIENT_WIGIG_PROD:
+		shift_val = 0x1 << IPA_CLIENT_WIGIG_PROD_IDX;
+		break;
+	case IPA_CLIENT_WIGIG1_CONS:
+		shift_val = 0x1 << IPA_CLIENT_WIGIG1_CONS_IDX;
+		break;
+	case IPA_CLIENT_WIGIG2_CONS:
+		shift_val = 0x1 << IPA_CLIENT_WIGIG2_CONS_IDX;
+		break;
+	case IPA_CLIENT_WIGIG3_CONS:
+		shift_val = 0x1 << IPA_CLIENT_WIGIG3_CONS_IDX;
+		break;
+	case IPA_CLIENT_WIGIG4_CONS:
+		shift_val = 0x1 << IPA_CLIENT_WIGIG4_CONS_IDX;
+		break;
+	default:
+		IPA_WIGIG_ERR("invalid pipe %d\n", client);
+		return 1;
+	}
+
+	return shift_val;
+}
+
 int ipa_wigig_conn_rx_pipe(struct ipa_wigig_conn_rx_in_params *in,
 	struct ipa_wigig_conn_out_params *out)
 {
@@ -613,13 +724,23 @@ int ipa_wigig_conn_rx_pipe(struct ipa_wigig_conn_rx_in_params *in,
 		goto fail_msi;
 	}
 
-	if (ipa_conn_wigig_rx_pipe_i(in, out)) {
+	if (ipa_conn_wigig_rx_pipe_i(in, out, &ipa_wigig_ctx->parent)) {
 		IPA_WIGIG_ERR("fail to connect rx pipe\n");
 		ret = -EFAULT;
 		goto fail_connect_pipe;
 	}
 
-	ipa_wigig_ctx->rx_connected = true;
+	ipa_wigig_ctx->tx_notify = in->notify;
+	ipa_wigig_ctx->priv = in->priv;
+
+	if (ipa_wigig_ctx->parent)
+		ipa_wigig_init_debugfs(ipa_wigig_ctx->parent);
+
+	ipa_wigig_store_pipe_info(ipa_wigig_ctx->pipes.flat,
+		IPA_CLIENT_WIGIG_PROD_IDX);
+
+	ipa_wigig_ctx->conn_pipes |=
+		ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD);
 
 	IPA_WIGIG_DBG("exit\n");
 
@@ -663,7 +784,7 @@ static int ipa_wigig_client_to_idx(enum ipa_client_type client,
 	return 0;
 }
 
-static int ipa_wigig_clean_pipe_smmu_info(unsigned int idx)
+static int ipa_wigig_clean_pipe_info(unsigned int idx)
 {
 	IPA_WIGIG_DBG("cleaning pipe %d info\n", idx);
 
@@ -672,12 +793,19 @@ static int ipa_wigig_clean_pipe_smmu_info(unsigned int idx)
 		return -EINVAL;
 	}
 
-	sg_free_table(&ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base);
-	sg_free_table(&ipa_wigig_ctx->pipes_smmu[idx].status_ring_base);
+	if (ipa_wigig_ctx->smmu_en) {
+		sg_free_table(
+			&ipa_wigig_ctx->pipes.smmu[idx].desc_ring_base);
+		sg_free_table(
+			&ipa_wigig_ctx->pipes.smmu[idx].status_ring_base);
 
-	memset(ipa_wigig_ctx->pipes_smmu + idx,
-		0,
-		sizeof(ipa_wigig_ctx->pipes_smmu[idx]));
+		memset(ipa_wigig_ctx->pipes.smmu + idx,
+			0,
+			sizeof(ipa_wigig_ctx->pipes.smmu[idx]));
+	} else {
+		memset(ipa_wigig_ctx->pipes.flat + idx, 0,
+			sizeof(ipa_wigig_ctx->pipes.flat[idx]));
+	}
 
 	IPA_WIGIG_DBG("exit\n");
 
@@ -710,33 +838,43 @@ static int ipa_wigig_store_pipe_smmu_info
 {
 	int ret;
 
-	IPA_WIGIG_DBG("\n");
+	IPA_WIGIG_DBG(
+		"idx %d: desc_ring HWHEAD_pa %pa, HWTAIL_pa %pa, status_ring HWHEAD_pa %pa, HWTAIL_pa %pa, desc_ring_base 0x%llx, status_ring_base 0x%llx\n",
+		idx,
+		&pipe_smmu->desc_ring_HWHEAD_pa,
+		&pipe_smmu->desc_ring_HWTAIL_pa,
+		&pipe_smmu->status_ring_HWHEAD_pa,
+		&pipe_smmu->status_ring_HWTAIL_pa,
+		(unsigned long long)pipe_smmu->desc_ring_base_iova,
+		(unsigned long long)pipe_smmu->status_ring_base_iova);
 
 	/* store regs */
-	ipa_wigig_ctx->pipes_smmu[idx].desc_ring_HWHEAD_pa =
+	ipa_wigig_ctx->pipes.smmu[idx].desc_ring_HWHEAD_pa =
 		pipe_smmu->desc_ring_HWHEAD_pa;
-	ipa_wigig_ctx->pipes_smmu[idx].desc_ring_HWTAIL_pa =
+	ipa_wigig_ctx->pipes.smmu[idx].desc_ring_HWTAIL_pa =
 		pipe_smmu->desc_ring_HWTAIL_pa;
 
-	ipa_wigig_ctx->pipes_smmu[idx].status_ring_HWHEAD_pa =
+	ipa_wigig_ctx->pipes.smmu[idx].status_ring_HWHEAD_pa =
 		pipe_smmu->status_ring_HWHEAD_pa;
-	ipa_wigig_ctx->pipes_smmu[idx].status_ring_HWTAIL_pa =
+	ipa_wigig_ctx->pipes.smmu[idx].status_ring_HWTAIL_pa =
 		pipe_smmu->status_ring_HWTAIL_pa;
 
 	/* store rings IOVAs */
-	ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base_iova =
+	ipa_wigig_ctx->pipes.smmu[idx].desc_ring_base_iova =
 		pipe_smmu->desc_ring_base_iova;
-	ipa_wigig_ctx->pipes_smmu[idx].status_ring_base_iova =
+	ipa_wigig_ctx->pipes.smmu[idx].status_ring_base_iova =
 		pipe_smmu->status_ring_base_iova;
 
 	/* copy sgt */
-	ret = ipa_wigig_clone_sg_table(&pipe_smmu->desc_ring_base,
-		&ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base);
+	ret = ipa_wigig_clone_sg_table(
+		&pipe_smmu->desc_ring_base,
+		&ipa_wigig_ctx->pipes.smmu[idx].desc_ring_base);
 	if (ret)
 		goto fail_desc;
 
-	ret = ipa_wigig_clone_sg_table(&pipe_smmu->status_ring_base,
-		&ipa_wigig_ctx->pipes_smmu[idx].status_ring_base);
+	ret = ipa_wigig_clone_sg_table(
+		&pipe_smmu->status_ring_base,
+		&ipa_wigig_ctx->pipes.smmu[idx].status_ring_base);
 	if (ret)
 		goto fail_stat;
 
@@ -744,9 +882,9 @@ static int ipa_wigig_store_pipe_smmu_info
 
 	return 0;
 fail_stat:
-	sg_free_table(&ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base);
-	memset(&ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base,
-		0, sizeof(ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base));
+	sg_free_table(&ipa_wigig_ctx->pipes.smmu[idx].desc_ring_base);
+	memset(&ipa_wigig_ctx->pipes.smmu[idx].desc_ring_base, 0,
+	       sizeof(ipa_wigig_ctx->pipes.smmu[idx].desc_ring_base));
 fail_desc:
 	return ret;
 }
@@ -760,11 +898,239 @@ static int ipa_wigig_get_pipe_smmu_info(
 		return -EINVAL;
 	}
 
-	*pipe_smmu = &ipa_wigig_ctx->pipes_smmu[idx];
+	*pipe_smmu = &ipa_wigig_ctx->pipes.smmu[idx];
 
 	return 0;
 }
-static void  ipa_wigig_clean_rx_buff_smmu_info(void)
+
+static int ipa_wigig_get_pipe_info(
+	struct ipa_wigig_pipe_setup_info **pipe, unsigned int idx)
+{
+	if (idx >= IPA_WIGIG_MAX_PIPES) {
+		IPA_WIGIG_ERR("exceeded pipe num %d >= %d\n", idx,
+			IPA_WIGIG_MAX_PIPES);
+		return -EINVAL;
+	}
+
+	*pipe = &ipa_wigig_ctx->pipes.flat[idx];
+
+	return 0;
+}
+
+static int ipa_wigig_get_regs_addr(
+	void __iomem **desc_ring_h, void __iomem **desc_ring_t,
+	void __iomem **status_ring_h, void __iomem **status_ring_t,
+	unsigned int idx)
+{
+	struct ipa_wigig_pipe_setup_info *pipe;
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu;
+	int ret = 0;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (idx >= IPA_WIGIG_MAX_PIPES) {
+		IPA_WIGIG_DBG("exceeded pipe num %d >= %d\n", idx,
+			IPA_WIGIG_MAX_PIPES);
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_DBG("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	if (!(ipa_wigig_ctx->conn_pipes &
+		ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD))) {
+		IPA_WIGIG_DBG(
+			"must connect rx pipe before connecting any client\n");
+		return -EINVAL;
+	}
+
+	if (ipa_wigig_ctx->smmu_en) {
+		ret = ipa_wigig_get_pipe_smmu_info(&pipe_smmu, idx);
+		if (ret)
+			return -EINVAL;
+
+		*desc_ring_h =
+			ioremap(pipe_smmu->desc_ring_HWHEAD_pa, sizeof(u32));
+		if (!*desc_ring_h) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap desc ring head address\n");
+			ret = -EINVAL;
+			goto fail_map_desc_h;
+		}
+		*desc_ring_t =
+			ioremap(pipe_smmu->desc_ring_HWTAIL_pa, sizeof(u32));
+		if (!*desc_ring_t) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap desc ring tail address\n");
+			ret = -EINVAL;
+			goto fail_map_desc_t;
+		}
+		*status_ring_h =
+			ioremap(pipe_smmu->status_ring_HWHEAD_pa, sizeof(u32));
+		if (!*status_ring_h) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap status ring head address\n");
+			ret = -EINVAL;
+			goto fail_map_status_h;
+		}
+		*status_ring_t =
+			ioremap(pipe_smmu->status_ring_HWTAIL_pa, sizeof(u32));
+		if (!*status_ring_t) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap status ring tail address\n");
+			ret = -EINVAL;
+			goto fail_map_status_t;
+		}
+	} else {
+		ret = ipa_wigig_get_pipe_info(&pipe, idx);
+		if (ret)
+			return -EINVAL;
+
+		*desc_ring_h = ioremap(pipe->desc_ring_HWHEAD_pa, sizeof(u32));
+		if (!*desc_ring_h) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap desc ring head address\n");
+			ret = -EINVAL;
+			goto fail_map_desc_h;
+		}
+		*desc_ring_t = ioremap(pipe->desc_ring_HWTAIL_pa, sizeof(u32));
+		if (!*desc_ring_t) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap desc ring tail address\n");
+			ret = -EINVAL;
+			goto fail_map_desc_t;
+		}
+		*status_ring_h =
+			ioremap(pipe->status_ring_HWHEAD_pa, sizeof(u32));
+		if (!*status_ring_h) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap status ring head address\n");
+			ret = -EINVAL;
+			goto fail_map_status_h;
+		}
+		*status_ring_t =
+			ioremap(pipe->status_ring_HWTAIL_pa, sizeof(u32));
+		if (!*status_ring_t) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap status ring tail address\n");
+			ret = -EINVAL;
+			goto fail_map_status_t;
+		}
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+
+fail_map_status_t:
+	iounmap(*status_ring_h);
+fail_map_status_h:
+	iounmap(*desc_ring_t);
+fail_map_desc_t:
+	iounmap(*desc_ring_h);
+fail_map_desc_h:
+	IPA_WIGIG_DBG("couldn't get regs information idx %d\n", idx);
+	return ret;
+}
+
+int ipa_wigig_save_regs(void)
+{
+	void __iomem *desc_ring_h = NULL, *desc_ring_t = NULL,
+		*status_ring_h = NULL, *status_ring_t = NULL,
+		*int_gen_rx_pa = NULL, *int_gen_tx_pa = NULL;
+	uint32_t readval;
+	u8 pipe_connected;
+	int i, ret = 0;
+
+	IPA_WIGIG_DBG("Start collecting pipes information\n");
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+	if (!(ipa_wigig_ctx->conn_pipes &
+		ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD))) {
+		IPA_WIGIG_ERR(
+			"must connect rx pipe before connecting any client\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < IPA_WIGIG_MAX_PIPES; i++) {
+		pipe_connected = (ipa_wigig_ctx->conn_pipes & (0x1 << i));
+		if (pipe_connected) {
+			ret = ipa_wigig_get_regs_addr(
+				&desc_ring_h, &desc_ring_t,
+				&status_ring_h, &status_ring_t, i);
+
+			if (ret) {
+				IPA_WIGIG_ERR(
+					"couldn't get registers information on client %d\n",
+					i);
+				return -EINVAL;
+			}
+
+			IPA_WIGIG_DBG("collecting pipe info of index %d\n", i);
+			if (i == IPA_CLIENT_WIGIG_PROD_IDX) {
+				ipa_wigig_ctx->regs_save.pipes_val[i].dir = 0;
+			} else {
+				ipa_wigig_ctx->regs_save.pipes_val[i].dir = 1;
+				/* TX ids start from 2 */
+				ipa_wigig_ctx->regs_save.pipes_val[i]
+					.tx_ring_id = i + 1;
+			}
+
+			readval = readl_relaxed(desc_ring_h);
+			ipa_wigig_ctx->regs_save.pipes_val[i].desc_ring_HWHEAD =
+				readval;
+			readval = readl_relaxed(desc_ring_t);
+			ipa_wigig_ctx->regs_save.pipes_val[i].desc_ring_HWTAIL =
+				readval;
+			readval = readl_relaxed(status_ring_h);
+			ipa_wigig_ctx->regs_save.pipes_val[i]
+				.status_ring_HWHEAD = readval;
+			readval = readl_relaxed(status_ring_t);
+			ipa_wigig_ctx->regs_save.pipes_val[i]
+				.status_ring_HWTAIL = readval;
+			/* unmap all regs */
+			iounmap(desc_ring_h);
+			iounmap(desc_ring_t);
+			iounmap(status_ring_h);
+			iounmap(status_ring_t);
+		}
+	}
+	int_gen_rx_pa = ioremap(ipa_wigig_ctx->int_gen_rx_pa, sizeof(u32));
+	if (!int_gen_rx_pa) {
+		IPA_WIGIG_ERR("couldn't ioremap gen rx address\n");
+		ret = -EINVAL;
+		goto fail_map_gen_rx;
+	}
+	int_gen_tx_pa = ioremap(ipa_wigig_ctx->int_gen_tx_pa, sizeof(u32));
+	if (!int_gen_tx_pa) {
+		IPA_WIGIG_ERR("couldn't ioremap gen tx address\n");
+		ret = -EINVAL;
+		goto fail_map_gen_tx;
+	}
+
+	IPA_WIGIG_DBG("collecting int_gen_rx_pa info\n");
+	readval = readl_relaxed(int_gen_rx_pa);
+	ipa_wigig_ctx->regs_save.int_gen_rx_val = readval;
+
+	IPA_WIGIG_DBG("collecting int_gen_tx_pa info\n");
+	readval = readl_relaxed(int_gen_tx_pa);
+	ipa_wigig_ctx->regs_save.int_gen_tx_val = readval;
+
+	IPA_WIGIG_DBG("Finish collecting pipes info\n");
+	IPA_WIGIG_DBG("exit\n");
+
+	iounmap(int_gen_tx_pa);
+fail_map_gen_tx:
+	iounmap(int_gen_rx_pa);
+fail_map_gen_rx:
+	return ret;
+}
+
+static void ipa_wigig_clean_rx_buff_smmu_info(void)
 {
 	IPA_WIGIG_DBG("clearing rx buff smmu info\n");
 
@@ -774,9 +1140,6 @@ static void  ipa_wigig_clean_rx_buff_smmu_info(void)
 		sizeof(ipa_wigig_ctx->rx_buff_smmu));
 
 	IPA_WIGIG_DBG("\n");
-
-	return;
-
 }
 
 static int ipa_wigig_store_rx_buff_smmu_info(
@@ -926,7 +1289,7 @@ static int ipa_wigig_store_rx_smmu_info
 	IPA_WIGIG_DBG("\n");
 
 	ret = ipa_wigig_store_pipe_smmu_info(&in->pipe_smmu,
-		IPA_WIGIG_RX_PIPE_IDX);
+		IPA_CLIENT_WIGIG_PROD_IDX);
 	if (ret)
 		return ret;
 
@@ -941,7 +1304,7 @@ static int ipa_wigig_store_rx_smmu_info
 	return 0;
 
 fail_buff:
-	ipa_wigig_clean_pipe_smmu_info(IPA_WIGIG_RX_PIPE_IDX);
+	ipa_wigig_clean_pipe_info(IPA_CLIENT_WIGIG_PROD_IDX);
 	return ret;
 }
 
@@ -973,7 +1336,7 @@ static int ipa_wigig_store_client_smmu_info
 	return 0;
 
 fail_buff:
-	ipa_wigig_clean_pipe_smmu_info(IPA_WIGIG_RX_PIPE_IDX);
+	ipa_wigig_clean_pipe_info(IPA_CLIENT_WIGIG_PROD_IDX);
 	return ret;
 }
 
@@ -983,7 +1346,8 @@ static int ipa_wigig_get_rx_smmu_info(
 {
 	int ret;
 
-	ret = ipa_wigig_get_pipe_smmu_info(pipe_smmu, IPA_WIGIG_RX_PIPE_IDX);
+	ret = ipa_wigig_get_pipe_smmu_info(pipe_smmu,
+		IPA_CLIENT_WIGIG_PROD_IDX);
 	if (ret)
 		return ret;
 
@@ -1022,7 +1386,7 @@ static int ipa_wigig_clean_smmu_info(enum ipa_client_type client)
 	int ret;
 
 	if (client == IPA_CLIENT_WIGIG_PROD) {
-		ret = ipa_wigig_clean_pipe_smmu_info(IPA_WIGIG_RX_PIPE_IDX);
+		ret = ipa_wigig_clean_pipe_info(IPA_CLIENT_WIGIG_PROD_IDX);
 		if (ret)
 			return ret;
 		if (!ipa_wigig_ctx->shared_cb)
@@ -1034,7 +1398,7 @@ static int ipa_wigig_clean_smmu_info(enum ipa_client_type client)
 		if (ret)
 			return ret;
 
-		ret = ipa_wigig_clean_pipe_smmu_info(idx);
+		ret = ipa_wigig_clean_pipe_info(idx);
 		if (ret)
 			return ret;
 
@@ -1108,19 +1472,26 @@ int ipa_wigig_conn_rx_pipe_smmu(
 		goto fail_msi;
 	}
 
-	if (ipa_conn_wigig_rx_pipe_i(in, out)) {
+	if (ipa_conn_wigig_rx_pipe_i(in, out, &ipa_wigig_ctx->parent)) {
 		IPA_WIGIG_ERR("fail to connect rx pipe\n");
 		ret = -EFAULT;
 		goto fail_connect_pipe;
 	}
 
+	if (ipa_wigig_ctx->parent)
+		ipa_wigig_init_debugfs(ipa_wigig_ctx->parent);
+
 	if (ipa_wigig_store_rx_smmu_info(in)) {
 		IPA_WIGIG_ERR("fail to store smmu data for rx pipe\n");
 		ret = -EFAULT;
 		goto fail_smmu_store;
 	}
 
-	ipa_wigig_ctx->rx_connected = true;
+	ipa_wigig_ctx->tx_notify = in->notify;
+	ipa_wigig_ctx->priv = in->priv;
+
+	ipa_wigig_ctx->conn_pipes |=
+		ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD);
 
 	IPA_WIGIG_DBG("exit\n");
 
@@ -1201,6 +1572,7 @@ int ipa_wigig_conn_client(struct ipa_wigig_conn_tx_in_params *in,
 	struct ipa_wigig_conn_out_params *out)
 {
 	char dev_name[IPA_RESOURCE_NAME_MAX];
+	unsigned int idx;
 
 	IPA_WIGIG_DBG("\n");
 
@@ -1214,7 +1586,8 @@ int ipa_wigig_conn_client(struct ipa_wigig_conn_tx_in_params *in,
 		return -EPERM;
 	}
 
-	if (!ipa_wigig_ctx->rx_connected) {
+	if (!(ipa_wigig_ctx->conn_pipes &
+		ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD))) {
 		IPA_WIGIG_ERR(
 			"must connect rx pipe before connecting any client\n"
 		);
@@ -1236,7 +1609,8 @@ int ipa_wigig_conn_client(struct ipa_wigig_conn_tx_in_params *in,
 		return -EFAULT;
 	}
 
-	if (ipa_conn_wigig_client_i(in, out)) {
+	if (ipa_conn_wigig_client_i(in, out, ipa_wigig_ctx->tx_notify,
+		ipa_wigig_ctx->priv)) {
 		IPA_WIGIG_ERR(
 			"fail to connect client. MAC [%X][%X][%X][%X][%X][%X]\n"
 		, in->client_mac[0], in->client_mac[1], in->client_mac[2]
@@ -1244,6 +1618,13 @@ int ipa_wigig_conn_client(struct ipa_wigig_conn_tx_in_params *in,
 		return -EFAULT;
 	}
 
+	if (ipa_wigig_client_to_idx(out->client, &idx)) {
+		IPA_WIGIG_ERR("couldn't acquire idx\n");
+		goto fail_convert_client_to_idx;
+	}
+
+	ipa_wigig_store_pipe_info(&in->pipe, idx);
+
 	if (ipa_wigig_send_msg(WIGIG_CLIENT_CONNECT,
 		dev_name,
 		in->client_mac, out->client, false)) {
@@ -1251,13 +1632,20 @@ int ipa_wigig_conn_client(struct ipa_wigig_conn_tx_in_params *in,
 		goto fail_sendmsg;
 	}
 
+	/* update connected clients */
+	ipa_wigig_ctx->conn_pipes |=
+		ipa_wigig_pipe_to_bit_val(out->client);
+
 	ipa_wigig_store_client_mac(out->client, in->client_mac);
 
 	IPA_WIGIG_DBG("exit\n");
 	return 0;
+
 fail_sendmsg:
+	ipa_wigig_clean_pipe_info(idx);
+fail_convert_client_to_idx:
 	ipa_disconn_wigig_pipe_i(out->client, NULL, NULL);
-	return -EFAULT;
+	return -EINVAL;
 }
 EXPORT_SYMBOL(ipa_wigig_conn_client);
 
@@ -1280,7 +1668,8 @@ int ipa_wigig_conn_client_smmu(
 		return -EPERM;
 	}
 
-	if (!ipa_wigig_ctx->rx_connected) {
+	if (!(ipa_wigig_ctx->conn_pipes &
+		ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD))) {
 		IPA_WIGIG_ERR(
 			"must connect rx pipe before connecting any client\n"
 		);
@@ -1303,7 +1692,8 @@ int ipa_wigig_conn_client_smmu(
 		return -EFAULT;
 	}
 
-	if (ipa_conn_wigig_client_i(in, out)) {
+	if (ipa_conn_wigig_client_i(in, out, ipa_wigig_ctx->tx_notify,
+		ipa_wigig_ctx->priv)) {
 		IPA_WIGIG_ERR(
 			"fail to connect client. MAC [%X][%X][%X][%X][%X][%X]\n"
 			, in->client_mac[0], in->client_mac[1]
@@ -1324,6 +1714,10 @@ int ipa_wigig_conn_client_smmu(
 	if (ret)
 		goto fail_smmu;
 
+	/* update connected clients */
+	ipa_wigig_ctx->conn_pipes |=
+		ipa_wigig_pipe_to_bit_val(out->client);
+
 	ipa_wigig_store_client_mac(out->client, in->client_mac);
 
 	IPA_WIGIG_DBG("exit\n");
@@ -1352,7 +1746,7 @@ static inline int ipa_wigig_validate_client_type(enum ipa_client_type client)
 	case IPA_CLIENT_WIGIG4_CONS:
 		break;
 	default:
-		IPA_WIGIG_ERR("invalid client type %d\n", client);
+		IPA_WIGIG_ERR_RL("invalid client type %d\n", client);
 		return -EINVAL;
 	}
 
@@ -1439,7 +1833,9 @@ int ipa_wigig_disconn_pipe(enum ipa_client_type client)
 			WARN_ON(1);
 		}
 
-		ipa_wigig_ctx->rx_connected = false;
+		ipa_wigig_ctx->conn_pipes &=
+			~ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD);
+		WARN_ON(ipa_wigig_ctx->conn_pipes);
 	} else {
 		/*
 		 * wigig clients are disconnected with legacy message since
@@ -1539,3 +1935,156 @@ int ipa_wigig_tx_dp(enum ipa_client_type dst, struct sk_buff *skb)
 	return 0;
 }
 EXPORT_SYMBOL(ipa_wigig_tx_dp);
+
+
+#ifdef CONFIG_DEBUG_FS
+#define IPA_MAX_MSG_LEN 4096
+
+static ssize_t ipa_wigig_read_conn_clients(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int i;
+	int nbytes = 0;
+	u8 pipe_connected;
+	char *dbg_buff;
+	ssize_t ret;
+
+	dbg_buff = kzalloc(IPA_MAX_MSG_LEN, GFP_KERNEL);
+	if (!dbg_buff)
+		return -ENOMEM;
+
+	if (!ipa_wigig_ctx) {
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"IPA WIGIG not initialized\n");
+		goto finish;
+	}
+
+	if (!ipa_wigig_ctx->conn_pipes) {
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"no WIGIG pipes connected\n");
+		goto finish;
+	}
+
+	for (i = 0; i < IPA_WIGIG_MAX_PIPES; i++) {
+		pipe_connected = (ipa_wigig_ctx->conn_pipes & (0x1 << i));
+		switch (i) {
+		case 0:
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"IPA_CLIENT_WIGIG_PROD");
+			break;
+		case 1:
+		case 2:
+		case 3:
+		case 4:
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"IPA_CLIENT_WIGIG%d_CONS",
+				i);
+			break;
+		default:
+			IPA_WIGIG_ERR("invalid pipe %d\n", i);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"invalid pipe %d",
+				i);
+			break;
+		}
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			" %s connected\n", pipe_connected ? "is" : "not");
+	}
+
+finish:
+	ret = simple_read_from_buffer(
+		ubuf, count, ppos, dbg_buff, nbytes);
+	kfree(dbg_buff);
+	return ret;
+}
+
+static ssize_t ipa_wigig_read_smmu_status(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	char *dbg_buff;
+	ssize_t ret;
+
+	dbg_buff = kzalloc(IPA_MAX_MSG_LEN, GFP_KERNEL);
+	if (!dbg_buff)
+		return -ENOMEM;
+
+	if (!ipa_wigig_ctx) {
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"IPA WIGIG not initialized\n");
+		goto finish;
+	}
+
+	if (ipa_wigig_ctx->smmu_en) {
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"SMMU enabled\n");
+
+		if (ipa_wigig_ctx->shared_cb) {
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"CB shared\n");
+		} else {
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"CB not shared\n");
+		}
+	} else {
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"SMMU in S1 bypass\n");
+	}
+finish:
+	ret = simple_read_from_buffer(
+		ubuf, count, ppos, dbg_buff, nbytes);
+	kfree(dbg_buff);
+	return ret;
+}
+static const struct file_operations ipa_wigig_conn_clients_ops = {
+	.read = ipa_wigig_read_conn_clients,
+};
+
+static const struct file_operations ipa_wigig_smmu_ops = {
+	.read = ipa_wigig_read_smmu_status,
+};
+
+static inline void ipa_wigig_deinit_debugfs(void)
+{
+	debugfs_remove(ipa_wigig_ctx->dent_conn_clients);
+	debugfs_remove(ipa_wigig_ctx->dent_smmu);
+}
+
+static int ipa_wigig_init_debugfs(struct dentry *parent)
+{
+	const mode_t read_only_mode = 0444;
+
+	ipa_wigig_ctx->dent_conn_clients =
+		debugfs_create_file("conn_clients", read_only_mode, parent,
+				NULL, &ipa_wigig_conn_clients_ops);
+	if (IS_ERR_OR_NULL(ipa_wigig_ctx->dent_conn_clients)) {
+		IPA_WIGIG_ERR("fail to create file %s\n", "conn_clients");
+		goto fail_conn_clients;
+	}
+
+	ipa_wigig_ctx->dent_smmu =
+		debugfs_create_file("smmu", read_only_mode, parent, NULL,
+				&ipa_wigig_smmu_ops);
+	if (IS_ERR_OR_NULL(ipa_wigig_ctx->dent_smmu)) {
+		IPA_WIGIG_ERR("fail to create file %s\n", "smmu");
+		goto fail_smmu;
+	}
+
+	return 0;
+fail_smmu:
+	debugfs_remove(ipa_wigig_ctx->dent_conn_clients);
+fail_conn_clients:
+	return -EFAULT;
+}
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
index 29cdb59..efdf97b 100644
--- a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
@@ -117,7 +117,6 @@ struct stats {
  * @odu_prod_hdl: handle for IPA_CLIENT_ODU_PROD pipe
  * @odu_emb_cons_hdl: handle for IPA_CLIENT_ODU_EMB_CONS pipe
  * @odu_teth_cons_hdl: handle for IPA_CLIENT_ODU_TETH_CONS pipe
- * @rm_comp: completion object for IP RM
  * @wakeup_request: client callback to wakeup
  */
 struct odu_bridge_ctx {
@@ -144,7 +143,6 @@ struct odu_bridge_ctx {
 	u32 ipa_sys_desc_size;
 	void *logbuf;
 	void *logbuf_low;
-	struct completion rm_comp;
 	void (*wakeup_request)(void *cl_priv);
 	u32 pm_hdl;
 };
@@ -268,24 +266,6 @@ static int odu_bridge_connect_bridge(void)
 	memset(&odu_prod_params, 0, sizeof(odu_prod_params));
 	memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params));
 
-	if (!ipa_pm_is_used()) {
-		/* Build IPA Resource manager dependency graph */
-		ODU_BRIDGE_DBG_LOW("build dependency graph\n");
-		res = ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-					IPA_RM_RESOURCE_Q6_CONS);
-		if (res && res != -EINPROGRESS) {
-			ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n");
-			goto fail_add_dependency_1;
-		}
-
-		res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
-					IPA_RM_RESOURCE_ODU_ADAPT_CONS);
-		if (res && res != -EINPROGRESS) {
-			ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n");
-			goto fail_add_dependency_2;
-		}
-	}
-
 	/* configure RX (ODU->IPA) EP */
 	odu_prod_params.client = IPA_CLIENT_ODU_PROD;
 	odu_prod_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ;
@@ -343,14 +323,6 @@ static int odu_bridge_connect_bridge(void)
 	ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
 	odu_bridge_ctx->odu_prod_hdl = 0;
 fail_odu_prod:
-	if (!ipa_pm_is_used())
-		ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
-				IPA_RM_RESOURCE_ODU_ADAPT_CONS);
-fail_add_dependency_2:
-	if (!ipa_pm_is_used())
-		ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-				IPA_RM_RESOURCE_Q6_CONS);
-fail_add_dependency_1:
 	return res;
 }
 
@@ -396,27 +368,13 @@ static int odu_bridge_disconnect_bridge(void)
 		ODU_BRIDGE_ERR("teardown ODU EMB CONS failed\n");
 	odu_bridge_ctx->odu_emb_cons_hdl = 0;
 
-	if (!ipa_pm_is_used()) {
-		/* Delete IPA Resource manager dependency graph */
-		ODU_BRIDGE_DBG("deleting dependency graph\n");
-		res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-			IPA_RM_RESOURCE_Q6_CONS);
-		if (res && res != -EINPROGRESS)
-			ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n");
-
-		res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
-			IPA_RM_RESOURCE_ODU_ADAPT_CONS);
-		if (res && res != -EINPROGRESS)
-			ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n");
-	}
-
 	return 0;
 }
 
 /**
  * odu_bridge_disconnect() - Disconnect odu bridge
  *
- * Disconnect all pipes and deletes IPA RM dependencies on bridge mode
+ * Disconnect all pipes
  *
  * Return codes: 0- success, error otherwise
  */
@@ -464,8 +422,6 @@ EXPORT_SYMBOL(odu_bridge_disconnect);
  * odu_bridge_connect() - Connect odu bridge.
  *
  * Call to the mode-specific connect function for connection IPA pipes
- * and adding IPA RM dependencies
-
  * Return codes: 0: success
  *		-EINVAL: invalid parameters
  *		-EPERM: Operation not permitted as the bridge is already
diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
index f8b70ee..fd2eab5 100644
--- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/atomic.h>
@@ -31,7 +31,6 @@
 #define DEBUGFS_DIR_NAME "rndis_ipa"
 #define DEBUGFS_AGGR_DIR_NAME "rndis_ipa_aggregation"
 #define NETDEV_NAME "rndis"
-#define DRV_RESOURCE_ID IPA_RM_RESOURCE_RNDIS_PROD
 #define IPV4_HDR_NAME "rndis_eth_ipv4"
 #define IPV6_HDR_NAME "rndis_eth_ipv6"
 #define IPA_TO_USB_CLIENT IPA_CLIENT_USB_CONS
@@ -160,7 +159,6 @@ enum rndis_ipa_operation {
  * @rx_dropped: number of filtered out Rx packets
  * @rx_dump_enable: dump all Rx packets
  * @icmp_filter: allow all ICMP packet to pass through the filters
- * @rm_enable: flag that enable/disable Resource manager request prior to Tx
  * @deaggregation_enable: enable/disable IPA HW deaggregation logic
  * @during_xmit_error: flags that indicate that the driver is in a middle
  *  of error handling in Tx path
@@ -195,7 +193,6 @@ struct rndis_ipa_dev {
 	u32 rx_dropped;
 	bool rx_dump_enable;
 	bool icmp_filter;
-	bool rm_enable;
 	bool deaggregation_enable;
 	bool during_xmit_error;
 	struct dentry *directory;
@@ -256,18 +253,10 @@ static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx);
 static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net);
 static int rndis_ipa_register_properties(char *netdev_name, bool is_vlan_mode);
 static int rndis_ipa_deregister_properties(char *netdev_name);
-static void rndis_ipa_rm_notify
-	(void *user_data, enum ipa_rm_event event,
-	unsigned long data);
-static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx);
-static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx);
 static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx);
 static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx);
 static bool rx_filter(struct sk_buff *skb);
 static bool tx_filter(struct sk_buff *skb);
-static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx);
-static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx);
-static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx);
 static netdev_tx_t rndis_ipa_start_xmit
 	(struct sk_buff *skb, struct net_device *net);
 static int rndis_ipa_debugfs_atomic_open
@@ -548,7 +537,6 @@ int rndis_ipa_init(struct ipa_usb_init_params *params)
 	rndis_ipa_ctx->tx_filter = false;
 	rndis_ipa_ctx->rx_filter = false;
 	rndis_ipa_ctx->icmp_filter = true;
-	rndis_ipa_ctx->rm_enable = true;
 	rndis_ipa_ctx->tx_dropped = 0;
 	rndis_ipa_ctx->rx_dropped = 0;
 	rndis_ipa_ctx->tx_dump_enable = false;
@@ -749,15 +737,12 @@ int rndis_ipa_pipe_connect_notify(
 		return -EINVAL;
 	}
 
-	if (ipa_pm_is_used())
-		result = rndis_ipa_register_pm_client(rndis_ipa_ctx);
-	else
-		result = rndis_ipa_create_rm_resource(rndis_ipa_ctx);
+	result = rndis_ipa_register_pm_client(rndis_ipa_ctx);
 	if (result) {
-		RNDIS_IPA_ERROR("fail on RM create\n");
-		goto fail_create_rm;
+		RNDIS_IPA_ERROR("fail on PM register\n");
+		goto fail_register_pm;
 	}
-	RNDIS_IPA_DEBUG("RM resource was created\n");
+	RNDIS_IPA_DEBUG("PM client was registered\n");
 
 	rndis_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl;
 	rndis_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl;
@@ -835,11 +820,8 @@ int rndis_ipa_pipe_connect_notify(
 	return 0;
 
 fail:
-	if (ipa_pm_is_used())
-		rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
-	else
-		rndis_ipa_destroy_rm_resource(rndis_ipa_ctx);
-fail_create_rm:
+	rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
+fail_register_pm:
 	return result;
 }
 EXPORT_SYMBOL(rndis_ipa_pipe_connect_notify);
@@ -957,11 +939,11 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
 		goto out;
 	}
 
-	ret = resource_request(rndis_ipa_ctx);
+	ret = ipa_pm_activate(rndis_ipa_ctx->pm_hdl);
 	if (ret) {
-		RNDIS_IPA_DEBUG("Waiting to resource\n");
+		RNDIS_IPA_DEBUG("Failed activate PM client\n");
 		netif_stop_queue(net);
-		goto resource_busy;
+		goto fail_pm_activate;
 	}
 
 	if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) >=
@@ -990,8 +972,8 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
 fail_tx_packet:
 	rndis_ipa_xmit_error(skb);
 out:
-	resource_release(rndis_ipa_ctx);
-resource_busy:
+	ipa_pm_deferred_deactivate(rndis_ipa_ctx->pm_hdl);
+fail_pm_activate:
 	RNDIS_IPA_DEBUG
 		("packet Tx done - %s\n",
 		(status == NETDEV_TX_OK) ? "OK" : "FAIL");
@@ -1080,50 +1062,6 @@ static void rndis_ipa_tx_timeout(struct net_device *net)
 }
 
 /**
- * rndis_ipa_rm_notify() - callback supplied to IPA resource manager
- *   for grant/release events
- * user_data: the driver context supplied to IPA resource manager during call
- *  to ipa_rm_create_resource().
- * event: the event notified to us by IPA resource manager (Release/Grant)
- * data: reserved field supplied by IPA resource manager
- *
- * This callback shall be called based on resource request/release sent
- * to the IPA resource manager.
- * In case the queue was stopped during EINPROGRESS for Tx path and the
- * event received is Grant then the queue shall be restarted.
- * In case the event notified is a release notification the netdev discard it.
- */
-static void rndis_ipa_rm_notify(
-	void *user_data, enum ipa_rm_event event,
-	unsigned long data)
-{
-	struct rndis_ipa_dev *rndis_ipa_ctx = user_data;
-
-	RNDIS_IPA_LOG_ENTRY();
-
-	if (event == IPA_RM_RESOURCE_RELEASED) {
-		RNDIS_IPA_DEBUG("Resource Released\n");
-		return;
-	}
-
-	if (event != IPA_RM_RESOURCE_GRANTED) {
-		RNDIS_IPA_ERROR
-			("Unexceoted event receieved from RM (%d\n)", event);
-		return;
-	}
-	RNDIS_IPA_DEBUG("Resource Granted\n");
-
-	if (netif_queue_stopped(rndis_ipa_ctx->net)) {
-		RNDIS_IPA_DEBUG("starting queue\n");
-		netif_start_queue(rndis_ipa_ctx->net);
-	} else {
-		RNDIS_IPA_DEBUG("queue already awake\n");
-	}
-
-	RNDIS_IPA_LOG_EXIT();
-}
-
-/**
  * rndis_ipa_packet_receive_notify() - Rx notify for packet sent from
  *  tethered PC (USB->IPA).
  *  is USB->IPA->Apps-processor
@@ -1332,15 +1270,12 @@ int rndis_ipa_pipe_disconnect_notify(void *private)
 	rndis_ipa_ctx->net->stats.tx_dropped += outstanding_dropped_pkts;
 	atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0);
 
-	if (ipa_pm_is_used())
-		retval = rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
-	else
-		retval = rndis_ipa_destroy_rm_resource(rndis_ipa_ctx);
+	retval = rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
 	if (retval) {
-		RNDIS_IPA_ERROR("Fail to clean RM\n");
+		RNDIS_IPA_ERROR("Fail to deregister PM\n");
 		return retval;
 	}
-	RNDIS_IPA_DEBUG("RM was successfully destroyed\n");
+	RNDIS_IPA_DEBUG("PM was successfully deregistered\n");
 
 	spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
 	next_state = rndis_ipa_next_state(rndis_ipa_ctx->state,
@@ -1808,86 +1743,7 @@ static int  rndis_ipa_deregister_properties(char *netdev_name)
 	return 0;
 }
 
-/**
- * rndis_ipa_create_rm_resource() -creates the resource representing
- *  this Netdev and supply notification callback for resource event
- *  such as Grant/Release
- * @rndis_ipa_ctx: this driver context
- *
- * In order make sure all needed resources are available during packet
- * transmit this Netdev shall use Request/Release mechanism of
- * the IPA resource manager.
- * This mechanism shall iterate over a dependency graph and make sure
- * all dependent entities are ready to for packet Tx
- * transfer (Apps->IPA->USB).
- * In this function the resource representing the Netdev is created
- * in addition to the basic dependency between the Netdev and the USB client.
- * Hence, USB client, is a dependency for the Netdev and may be notified in
- * case of packet transmit from this Netdev to tethered Host.
- * As implied from the "may" in the above sentence there is a scenario where
- * the USB is not notified. This is done thanks to the IPA resource manager
- * inactivity timer.
- * The inactivity timer allow the Release requests to be delayed in order
- * prevent ping-pong with the USB and other dependencies.
- */
-static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx)
-{
-	struct ipa_rm_create_params create_params = {0};
-	struct ipa_rm_perf_profile profile;
-	int result;
 
-	RNDIS_IPA_LOG_ENTRY();
-
-	create_params.name = DRV_RESOURCE_ID;
-	create_params.reg_params.user_data = rndis_ipa_ctx;
-	create_params.reg_params.notify_cb = rndis_ipa_rm_notify;
-	result = ipa_rm_create_resource(&create_params);
-	if (result) {
-		RNDIS_IPA_ERROR("Fail on ipa_rm_create_resource\n");
-		goto fail_rm_create;
-	}
-	RNDIS_IPA_DEBUG("RM client was created\n");
-
-	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
-	ipa_rm_set_perf_profile(DRV_RESOURCE_ID, &profile);
-
-	result = ipa_rm_inactivity_timer_init
-		(DRV_RESOURCE_ID,
-		INACTIVITY_MSEC_DELAY);
-	if (result) {
-		RNDIS_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n");
-		goto fail_inactivity_timer;
-	}
-
-	RNDIS_IPA_DEBUG("rm_it client was created\n");
-
-	result = ipa_rm_add_dependency_sync
-		(DRV_RESOURCE_ID,
-		IPA_RM_RESOURCE_USB_CONS);
-
-	if (result && result != -EINPROGRESS)
-		RNDIS_IPA_ERROR("unable to add RNDIS/USB dependency (%d)\n",
-				result);
-	else
-		RNDIS_IPA_DEBUG("RNDIS/USB dependency was set\n");
-
-	result = ipa_rm_add_dependency_sync
-		(IPA_RM_RESOURCE_USB_PROD,
-		IPA_RM_RESOURCE_APPS_CONS);
-	if (result && result != -EINPROGRESS)
-		RNDIS_IPA_ERROR("unable to add USB/APPS dependency (%d)\n",
-				result);
-	else
-		RNDIS_IPA_DEBUG("USB/APPS dependency was set\n");
-
-	RNDIS_IPA_LOG_EXIT();
-
-	return 0;
-
-fail_inactivity_timer:
-fail_rm_create:
-	return result;
-}
 
 static void rndis_ipa_pm_cb(void *p, enum ipa_pm_cb_event event)
 {
@@ -1912,64 +1768,6 @@ static void rndis_ipa_pm_cb(void *p, enum ipa_pm_cb_event event)
 	RNDIS_IPA_LOG_EXIT();
 }
 
-/**
- * rndis_ipa_destroy_rm_resource() - delete the dependency and destroy
- * the resource done on rndis_ipa_create_rm_resource()
- * @rndis_ipa_ctx: this driver context
- *
- * This function shall delete the dependency create between
- * the Netdev to the USB.
- * In addition the inactivity time shall be destroy and the resource shall
- * be deleted.
- */
-static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx)
-{
-	int result;
-
-	RNDIS_IPA_LOG_ENTRY();
-
-	result = ipa_rm_delete_dependency
-		(DRV_RESOURCE_ID,
-		IPA_RM_RESOURCE_USB_CONS);
-	if (result && result != -EINPROGRESS) {
-		RNDIS_IPA_ERROR("Fail to delete RNDIS/USB dependency\n");
-		goto bail;
-	}
-	RNDIS_IPA_DEBUG("RNDIS/USB dependency was successfully deleted\n");
-
-	result = ipa_rm_delete_dependency
-		(IPA_RM_RESOURCE_USB_PROD,
-		IPA_RM_RESOURCE_APPS_CONS);
-	if (result == -EINPROGRESS) {
-		RNDIS_IPA_DEBUG("RM dependency deletion is in progress");
-	} else if (result) {
-		RNDIS_IPA_ERROR("Fail to delete USB/APPS dependency\n");
-		goto bail;
-	} else {
-		RNDIS_IPA_DEBUG("USB/APPS dependency was deleted\n");
-	}
-
-	result = ipa_rm_inactivity_timer_destroy(DRV_RESOURCE_ID);
-	if (result) {
-		RNDIS_IPA_ERROR("Fail to destroy inactivity timern");
-		goto bail;
-	}
-	RNDIS_IPA_DEBUG("RM inactivity timer was successfully destroy\n");
-
-	result = ipa_rm_delete_resource(DRV_RESOURCE_ID);
-	if (result) {
-		RNDIS_IPA_ERROR("resource deletion failed\n");
-		goto bail;
-	}
-	RNDIS_IPA_DEBUG
-		("Netdev RM resource was deleted (resid:%d)\n",
-		DRV_RESOURCE_ID);
-
-	RNDIS_IPA_LOG_EXIT();
-
-bail:
-	return result;
-}
 
 static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx)
 {
@@ -1998,52 +1796,6 @@ static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx)
 	return 0;
 }
 
-/**
- * resource_request() - request for the Netdev resource
- * @rndis_ipa_ctx: main driver context
- *
- * This function shall send the IPA resource manager inactivity time a request
- * to Grant the Netdev producer.
- * In case the resource is already Granted the function shall return immediately
- * and "pet" the inactivity timer.
- * In case the resource was not already Granted this function shall
- * return EINPROGRESS and the Netdev shall stop the send queue until
- * the IPA resource manager notify it that the resource is
- * granted (done in a differ context)
- */
-static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx)
-{
-	int result = 0;
-
-	if (!rm_enabled(rndis_ipa_ctx))
-		return result;
-
-	if (ipa_pm_is_used())
-		return ipa_pm_activate(rndis_ipa_ctx->pm_hdl);
-
-	return ipa_rm_inactivity_timer_request_resource(
-			DRV_RESOURCE_ID);
-
-}
-
-/**
- * resource_release() - release the Netdev resource
- * @rndis_ipa_ctx: main driver context
- *
- * start the inactivity timer count down.by using the IPA resource
- * manager inactivity time.
- * The actual resource release shall occur only if no request shall be done
- * during the INACTIVITY_MSEC_DELAY.
- */
-static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx)
-{
-	if (!rm_enabled(rndis_ipa_ctx))
-		return;
-	if (ipa_pm_is_used())
-		ipa_pm_deferred_deactivate(rndis_ipa_ctx->pm_hdl);
-	else
-		ipa_rm_inactivity_timer_release_resource(DRV_RESOURCE_ID);
-}
 
 /**
  * rndis_encapsulate_skb() - encapsulate the given Ethernet skb with
@@ -2131,19 +1883,6 @@ static bool tx_filter(struct sk_buff *skb)
 	return true;
 }
 
-/**
- * rm_enabled() - allow the use of resource manager Request/Release to
- *  be bypassed
- * @rndis_ipa_ctx: main driver context
- *
- * By disabling the resource manager flag the Request for the Netdev resource
- * shall be bypassed and the packet shall be sent.
- * accordingly, Release request shall be bypass as well.
- */
-static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx)
-{
-	return rndis_ipa_ctx->rm_enable;
-}
 
 /**
  * rndis_ipa_ep_registers_cfg() - configure the USB endpoints
@@ -2419,14 +2158,6 @@ static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx)
 		goto fail_file;
 	}
 
-	file = debugfs_create_bool
-		("rm_enable", flags_read_write,
-		rndis_ipa_ctx->directory, &rndis_ipa_ctx->rm_enable);
-	if (!file) {
-		RNDIS_IPA_ERROR("could not create debugfs rm file\n");
-		goto fail_file;
-	}
-
 	file = debugfs_create_u32
 		("outstanding_high", flags_read_write,
 		rndis_ipa_ctx->directory,
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index 9276537..3d71e39 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -428,8 +428,6 @@ int ipa_disable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
 const char *ipa_get_version_string(enum ipa_hw_type ver);
 int ipa_start_gsi_channel(u32 clnt_hdl);
 
-bool ipa_pm_is_used(void);
-
 int ipa_smmu_store_sgt(struct sg_table **out_ch_ptr,
 		struct sg_table *in_sgt_ptr);
 int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr);
@@ -437,14 +435,17 @@ int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr);
 int ipa_ut_module_init(void);
 void ipa_ut_module_exit(void);
 
-int ipa_wigig_uc_init(
+int ipa_wigig_internal_init(
 	struct ipa_wdi_uc_ready_params *inout,
 	ipa_wigig_misc_int_cb int_notify,
 	phys_addr_t *uc_db_pa);
 
-int ipa_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out);
+int ipa_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out,
+	struct dentry **parent);
 
-int ipa_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out);
+int ipa_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out,
+	ipa_notify_cb tx_notify,
+	void *priv);
 
 int ipa_wigig_uc_msi_init(
 	bool init,
@@ -466,6 +467,8 @@ int ipa_wigig_send_msg(int msg_type,
 	const char *netdev_name, u8 *mac,
 	enum ipa_client_type client, bool to_wigig);
 
+int ipa_wigig_save_regs(void);
+
 void ipa_register_client_callback(int (*client_cb)(bool is_lock),
 			bool (*teth_port_state)(void), u32 ipa_ep_idx);
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
index 1e2a6c4..56c9877 100644
--- a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
@@ -696,7 +696,7 @@ static struct reg_access_funcs_s *get_access_funcs(u32 addr)
 
 	for (i = 0; i < ARRAY_SIZE(mem_access_map); i++) {
 		if (addr >= mem_access_map[i].addr_range_begin &&
-			addr <= mem_access_map[i].addr_range_end) {
+			addr <  mem_access_map[i].addr_range_end) {
 			return mem_access_map[i].access[asub];
 		}
 	}
@@ -879,36 +879,40 @@ void ipa_save_registers(void)
 		if (!ipa_reg_save.gsi.ch_cntxt.a7[
 				i].gsi_map_ee_n_ch_k_vp_table.valid)
 			continue;
+
 		ipa_reg_save.gsi.ch_cntxt.a7[
 			i].mcs_channel_scratch.scratch4.shram =
 			IPA_READ_1xVECTOR_REG(
 				GSI_SHRAM_n,
-				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 2);
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH4);
+
 		ipa_reg_save.gsi.ch_cntxt.a7[
 			i].mcs_channel_scratch.scratch5.shram =
 			IPA_READ_1xVECTOR_REG(
 				GSI_SHRAM_n,
-				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 1);
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH5);
 	}
 
 	for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC; i++) {
 		u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.uc[
 			i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
-		u32 n = phys_ch_idx*IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
+		u32 n = phys_ch_idx * IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
 
 		if (!ipa_reg_save.gsi.ch_cntxt.uc[
 				i].gsi_map_ee_n_ch_k_vp_table.valid)
 			continue;
+
 		ipa_reg_save.gsi.ch_cntxt.uc[
 			i].mcs_channel_scratch.scratch4.shram =
 			IPA_READ_1xVECTOR_REG(
 				GSI_SHRAM_n,
-				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 2);
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH4);
+
 		ipa_reg_save.gsi.ch_cntxt.uc[
 			i].mcs_channel_scratch.scratch5.shram =
 			IPA_READ_1xVECTOR_REG(
 				GSI_SHRAM_n,
-				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 1);
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH5);
 	}
 
 	/*
@@ -989,6 +993,12 @@ void ipa_save_registers(void)
 			ipa_reg_save.ipa.ipa_gsi_ptr[i] =
 				in_dword(IPA_GSI_ADDR + (i * sizeof(u32)));
 		}
+		IPALOG_VnP_ADDRS(ipa_reg_save.ipa.ipa_iu_ptr);
+		IPALOG_VnP_ADDRS(ipa_reg_save.ipa.ipa_sram_ptr);
+		IPALOG_VnP_ADDRS(ipa_reg_save.ipa.ipa_mbox_ptr);
+		IPALOG_VnP_ADDRS(ipa_reg_save.ipa.ipa_hram_ptr);
+		IPALOG_VnP_ADDRS(ipa_reg_save.ipa.ipa_seq_ptr);
+		IPALOG_VnP_ADDRS(ipa_reg_save.ipa.ipa_gsi_ptr);
 	}
 
 	ipa_reg_save_anomaly_check();
@@ -1483,6 +1493,7 @@ int ipa_reg_save_init(u32 value)
 		 i++)
 		*(ipa_regs_to_save_array[num_regs + i].dst_addr) = 0x0;
 
+	ipa_reg_save.ipa.ipa_gsi_ptr  = NULL;
 	ipa_reg_save.ipa.ipa_seq_ptr  = NULL;
 	ipa_reg_save.ipa.ipa_hram_ptr = NULL;
 	ipa_reg_save.ipa.ipa_mbox_ptr = NULL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
index 873d397..5061cfa 100644
--- a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
@@ -13,16 +13,6 @@
 #include "ipa_pkt_cntxt.h"
 #include "ipa_hw_common_ex.h"
 
-/*
- * The following macros are used to peek and poke register values and
- * are required by some of the macros and include files that follow...
- */
-#define my_in_dword(addr) \
-	(readl(addr))
-
-#define my_out_dword(addr, val) \
-	({ __iowmb(); writel_relaxed((val), (addr)); })
-
 #define IPA_0_IPA_WRAPPER_BASE 0 /* required by following includes */
 
 #include "ipa_hwio.h"
@@ -56,6 +46,10 @@
 #define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_1   (0xC)
 #define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_2   (0xD)
 #define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_3   (0xE)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_4   (0xF)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_5   (0x10)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_6   (0x11)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_7   (0x12)
 #define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_0   (0x13)
 #define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_1   (0x14)
 #define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_2   (0x15)
@@ -82,6 +76,16 @@
 #define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_2 (0x35)
 #define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_3 (0x36)
 #define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR     (0x3A)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_SDMA_0  (0x3C)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_SDMA_1  (0x3D)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_2    (0x1D)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_1   (0x3E)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_2   (0x3F)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_5   (0x40)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_5    (0x41)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_3   (0x42)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TLV_0   (0x43)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_8   (0x44)
 
 #define IPA_DEBUG_TESTBUS_DEF_EXTERNAL           50
 #define IPA_DEBUG_TESTBUS_DEF_INTERNAL           6
@@ -90,6 +94,9 @@
 
 #define IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS         22
 
+#define IPA_GSI_OFFSET_WORDS_SCRATCH4            6
+#define IPA_GSI_OFFSET_WORDS_SCRATCH5            7
+
 #define IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_BIT_MASK 0x7E000
 #define IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_SHIFT    13
 
@@ -945,6 +952,10 @@ static u32 ipa_reg_save_gsi_ch_test_bus_selector_array[] = {
 	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_1,
 	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_2,
 	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_4,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_5,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_6,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_7,
 	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_0,
 	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_1,
 	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_2,
@@ -971,6 +982,16 @@ static u32 ipa_reg_save_gsi_ch_test_bus_selector_array[] = {
 	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_2,
 	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_3,
 	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_SDMA_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_SDMA_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_5,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_5,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TLV_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_8,
 };
 
 /*
@@ -1292,7 +1313,7 @@ struct regs_save_hierarchy_s {
 static inline u32
 act_read(void __iomem *addr)
 {
-	u32 val = my_in_dword(addr);
+	u32 val = ioread32(addr);
 
 	return val;
 }
@@ -1303,7 +1324,7 @@ act_read(void __iomem *addr)
 static inline void
 act_write(void __iomem *addr, u32 val)
 {
-	my_out_dword(addr, val);
+	iowrite32(val, addr);
 }
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index d606dae..ba3b24e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -121,6 +121,16 @@ static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
 static DECLARE_WORK(ipa_dec_clients_disable_clks_on_wq_work,
 	ipa_dec_clients_disable_clks_on_wq);
 
+static int ipa3_ioctl_add_rt_rule_v2(unsigned long arg);
+static int ipa3_ioctl_add_rt_rule_ext_v2(unsigned long arg);
+static int ipa3_ioctl_add_rt_rule_after_v2(unsigned long arg);
+static int ipa3_ioctl_mdfy_rt_rule_v2(unsigned long arg);
+static int ipa3_ioctl_add_flt_rule_v2(unsigned long arg);
+static int ipa3_ioctl_add_flt_rule_after_v2(unsigned long arg);
+static int ipa3_ioctl_mdfy_flt_rule_v2(unsigned long arg);
+static int ipa3_ioctl_fnr_counter_alloc(unsigned long arg);
+static int ipa3_ioctl_fnr_counter_query(unsigned long arg);
+
 static struct ipa3_plat_drv_res ipa3_res = {0, };
 
 static struct clk *ipa3_clk;
@@ -203,6 +213,10 @@ int ipa3_active_clients_log_print_table(char *buf, int size)
 	cnt += scnprintf(buf + cnt, size - cnt,
 			"\nTotal active clients count: %d\n",
 			atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+
+	if (ipa3_is_mhip_offload_enabled())
+		cnt += ipa_mpm_panic_handler(buf + cnt, size - cnt);
+
 	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
 		flags);
 
@@ -246,6 +260,24 @@ static int ipa3_clean_modem_rule(void)
 	return val;
 }
 
+static int ipa3_clean_mhip_dl_rule(void)
+{
+	struct ipa_remove_offload_connection_req_msg_v01 req;
+
+	memset(&req, 0, sizeof(struct
+		ipa_remove_offload_connection_req_msg_v01));
+
+	req.clean_all_rules_valid = true;
+	req.clean_all_rules = true;
+
+	if (ipa3_qmi_rmv_offload_request_send(&req)) {
+		IPAWANDBG("clean dl rule cache failed\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
 static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
 		unsigned long event, void *ptr)
 {
@@ -647,6 +679,805 @@ static int ipa3_send_gsb_msg(unsigned long usr_param, uint8_t msg_type)
 	return 0;
 }
 
+static int ipa3_ioctl_add_rt_rule_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_add_rt_rule_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_add_rt_rule_v2 *)header)->num_rules;
+	if (unlikely(((struct ipa_ioc_add_rt_rule_v2 *)
+		header)->rule_add_size >
+		sizeof(struct ipa_rt_rule_add_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_add_rt_rule_v2 *)
+		header)->rule_add_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_add_rt_rule_v2 *)
+		header)->rule_add_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_rt_rule_add_i) * pre_entry;
+	uptr = ((struct ipa_ioc_add_rt_rule_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_rt_rule_add_i),
+			(void *)param + i *
+			((struct ipa_ioc_add_rt_rule_v2 *)
+			header)->rule_add_size,
+			((struct ipa_ioc_add_rt_rule_v2 *)
+			header)->rule_add_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_add_rt_rule_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_add_rt_rule_usr_v2(
+		(struct ipa_ioc_add_rt_rule_v2 *)header, true)) {
+		IPAERR_RL("ipa3_add_rt_rule_usr_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_add_rt_rule_v2 *)
+			header)->rule_add_size,
+			kptr + i * sizeof(struct ipa_rt_rule_add_i),
+			((struct ipa_ioc_add_rt_rule_v2 *)
+			header)->rule_add_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_add_rt_rule_ext_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header,
+			(const void __user *)arg,
+			sizeof(struct ipa_ioc_add_rt_rule_ext_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_add_rt_rule_ext_v2 *)
+		header)->num_rules;
+	if (unlikely(((struct ipa_ioc_add_rt_rule_ext_v2 *)
+		header)->rule_add_ext_size >
+		sizeof(struct ipa_rt_rule_add_ext_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_add_rt_rule_ext_v2 *)
+		header)->rule_add_ext_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_add_rt_rule_ext_v2 *)
+		header)->rule_add_ext_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_rt_rule_add_ext_i)
+		* pre_entry;
+	uptr = ((struct ipa_ioc_add_rt_rule_ext_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i *
+			sizeof(struct ipa_rt_rule_add_ext_i),
+			(void *)param + i *
+			((struct ipa_ioc_add_rt_rule_ext_v2 *)
+			header)->rule_add_ext_size,
+			((struct ipa_ioc_add_rt_rule_ext_v2 *)
+			header)->rule_add_ext_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_add_rt_rule_ext_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_add_rt_rule_ext_v2(
+		(struct ipa_ioc_add_rt_rule_ext_v2 *)header)) {
+		IPAERR_RL("ipa3_add_rt_rule_ext_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_add_rt_rule_ext_v2 *)
+			header)->rule_add_ext_size,
+			kptr + i *
+			sizeof(struct ipa_rt_rule_add_ext_i),
+			((struct ipa_ioc_add_rt_rule_ext_v2 *)
+			header)->rule_add_ext_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_add_rt_rule_after_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_add_rt_rule_after_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_add_rt_rule_after_v2 *)
+		header)->num_rules;
+	if (unlikely(((struct ipa_ioc_add_rt_rule_after_v2 *)
+		header)->rule_add_size >
+		sizeof(struct ipa_rt_rule_add_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_add_rt_rule_after_v2 *)
+		header)->rule_add_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_add_rt_rule_after_v2 *)
+		header)->rule_add_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_rt_rule_add_i)
+		* pre_entry;
+	uptr = ((struct ipa_ioc_add_rt_rule_after_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_rt_rule_add_i),
+			(void *)param + i *
+			((struct ipa_ioc_add_rt_rule_after_v2 *)
+			header)->rule_add_size,
+			((struct ipa_ioc_add_rt_rule_after_v2 *)
+			header)->rule_add_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_add_rt_rule_after_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_add_rt_rule_after_v2(
+		(struct ipa_ioc_add_rt_rule_after_v2 *)header)) {
+		IPAERR_RL("ipa3_add_rt_rule_after_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_add_rt_rule_after_v2 *)
+			header)->rule_add_size,
+			kptr + i * sizeof(struct ipa_rt_rule_add_i),
+			((struct ipa_ioc_add_rt_rule_after_v2 *)
+			header)->rule_add_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_mdfy_rt_rule_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_mdfy_rt_rule_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_mdfy_rt_rule_v2 *)
+		header)->num_rules;
+	if (unlikely(((struct ipa_ioc_mdfy_rt_rule_v2 *)
+		header)->rule_mdfy_size >
+		sizeof(struct ipa_rt_rule_mdfy_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_mdfy_rt_rule_v2 *)
+		header)->rule_mdfy_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_mdfy_rt_rule_v2 *)
+		header)->rule_mdfy_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_rt_rule_mdfy_i)
+		* pre_entry;
+	uptr = ((struct ipa_ioc_mdfy_rt_rule_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_rt_rule_mdfy_i),
+			(void *)param + i *
+			((struct ipa_ioc_mdfy_rt_rule_v2 *)
+			header)->rule_mdfy_size,
+			((struct ipa_ioc_mdfy_rt_rule_v2 *)
+			header)->rule_mdfy_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_mdfy_rt_rule_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_mdfy_rt_rule_v2((struct ipa_ioc_mdfy_rt_rule_v2 *)
+		header)) {
+		IPAERR_RL("ipa3_mdfy_rt_rule_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_mdfy_rt_rule_v2 *)
+			header)->rule_mdfy_size,
+			kptr + i * sizeof(struct ipa_rt_rule_mdfy_i),
+			((struct ipa_ioc_mdfy_rt_rule_v2 *)
+			header)->rule_mdfy_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_add_flt_rule_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_add_flt_rule_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_add_flt_rule_v2 *)header)->num_rules;
+	if (unlikely(((struct ipa_ioc_add_flt_rule_v2 *)
+		header)->flt_rule_size >
+		sizeof(struct ipa_flt_rule_add_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_add_flt_rule_v2 *)
+		header)->flt_rule_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_add_flt_rule_v2 *)
+		header)->flt_rule_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_flt_rule_add_i)
+		* pre_entry;
+	uptr = ((struct ipa_ioc_add_flt_rule_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_flt_rule_add_i),
+			(void *)param + i *
+			((struct ipa_ioc_add_flt_rule_v2 *)
+			header)->flt_rule_size,
+			((struct ipa_ioc_add_flt_rule_v2 *)
+			header)->flt_rule_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_add_flt_rule_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_add_flt_rule_usr_v2((struct ipa_ioc_add_flt_rule_v2 *)
+			header, true)) {
+		IPAERR_RL("ipa3_add_flt_rule_usr_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_add_flt_rule_v2 *)
+			header)->flt_rule_size,
+			kptr + i * sizeof(struct ipa_flt_rule_add_i),
+			((struct ipa_ioc_add_flt_rule_v2 *)
+			header)->flt_rule_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_add_flt_rule_after_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_add_flt_rule_after_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_add_flt_rule_after_v2 *)
+		 header)->num_rules;
+	if (unlikely(((struct ipa_ioc_add_flt_rule_after_v2 *)
+		header)->flt_rule_size >
+		sizeof(struct ipa_flt_rule_add_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_add_flt_rule_after_v2 *)
+		header)->flt_rule_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_add_flt_rule_after_v2 *)
+		header)->flt_rule_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_flt_rule_add_i)
+		* pre_entry;
+	uptr = ((struct ipa_ioc_add_flt_rule_after_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_flt_rule_add_i),
+			(void *)param + i *
+			((struct ipa_ioc_add_flt_rule_after_v2 *)
+			header)->flt_rule_size,
+			((struct ipa_ioc_add_flt_rule_after_v2 *)
+			header)->flt_rule_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_add_flt_rule_after_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_add_flt_rule_after_v2(
+		(struct ipa_ioc_add_flt_rule_after_v2 *)header)) {
+		IPAERR_RL("ipa3_add_flt_rule_after_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_add_flt_rule_after_v2 *)
+			header)->flt_rule_size,
+			kptr + i * sizeof(struct ipa_flt_rule_add_i),
+			((struct ipa_ioc_add_flt_rule_after_v2 *)
+			header)->flt_rule_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_mdfy_flt_rule_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_mdfy_flt_rule_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_mdfy_flt_rule_v2 *)
+		 header)->num_rules;
+	if (unlikely(((struct ipa_ioc_mdfy_flt_rule_v2 *)
+		header)->rule_mdfy_size >
+		sizeof(struct ipa_flt_rule_mdfy_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_mdfy_flt_rule_v2 *)
+		header)->rule_mdfy_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_mdfy_flt_rule_v2 *)
+		header)->rule_mdfy_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_flt_rule_mdfy_i)
+		* pre_entry;
+	uptr = ((struct ipa_ioc_mdfy_flt_rule_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_flt_rule_mdfy_i),
+			(void *)param + i *
+			((struct ipa_ioc_mdfy_flt_rule_v2 *)
+			header)->rule_mdfy_size,
+			((struct ipa_ioc_mdfy_flt_rule_v2 *)
+			header)->rule_mdfy_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_add_flt_rule_after_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_mdfy_flt_rule_v2
+		((struct ipa_ioc_mdfy_flt_rule_v2 *)header)) {
+		IPAERR_RL("ipa3_mdfy_flt_rule_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_mdfy_flt_rule_v2 *)
+			header)->rule_mdfy_size,
+			kptr + i * sizeof(struct ipa_flt_rule_mdfy_i),
+			((struct ipa_ioc_mdfy_flt_rule_v2 *)
+			header)->rule_mdfy_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_fnr_counter_alloc(unsigned long arg)
+{
+	int retval = 0;
+	u8 header[128] = { 0 };
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_flt_rt_counter_alloc))) {
+		IPAERR("copy_from_user fails\n");
+		return -EFAULT;
+	}
+	if (((struct ipa_ioc_flt_rt_counter_alloc *)
+		header)->hw_counter.num_counters >
+		IPA_FLT_RT_HW_COUNTER ||
+		((struct ipa_ioc_flt_rt_counter_alloc *)
+		header)->sw_counter.num_counters >
+		IPA_FLT_RT_SW_COUNTER) {
+		IPAERR("failed: wrong sw/hw num_counters\n");
+		return -EPERM;
+	}
+	if (((struct ipa_ioc_flt_rt_counter_alloc *)
+		header)->hw_counter.num_counters == 0 &&
+		((struct ipa_ioc_flt_rt_counter_alloc *)
+		header)->sw_counter.num_counters == 0) {
+		IPAERR("failed: both sw/hw num_counters 0\n");
+		return -EPERM;
+	}
+	retval = ipa3_alloc_counter_id
+		((struct ipa_ioc_flt_rt_counter_alloc *)header);
+	if (retval < 0) {
+		IPAERR("ipa3_alloc_counter_id failed\n");
+		return retval;
+	}
+	if (copy_to_user((void __user *)arg, header,
+		sizeof(struct ipa_ioc_flt_rt_counter_alloc))) {
+		IPAERR("copy_to_user fails\n");
+		ipa3_counter_remove_hdl(
+		((struct ipa_ioc_flt_rt_counter_alloc *)
+		header)->hdl);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int ipa3_ioctl_fnr_counter_query(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_flt_rt_query))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_flt_rt_query *)
+		header)->end_id - ((struct ipa_ioc_flt_rt_query *)
+		header)->start_id + 1;
+	if (pre_entry <= 0 || pre_entry > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("IPA_IOC_FNR_COUNTER_QUERY failed: num %d\n",
+			pre_entry);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	if (((struct ipa_ioc_flt_rt_query *)header)->stats_size
+		> sizeof(struct ipa_flt_rt_stats)) {
+		IPAERR_RL("unexpected stats_size %d\n",
+		((struct ipa_ioc_flt_rt_query *)header)->stats_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_flt_rt_query *)
+		header)->stats_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_flt_rt_stats) * pre_entry;
+	uptr = ((struct ipa_ioc_flt_rt_query *)
+		header)->stats;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_flt_rt_stats),
+			(void *)param + i *
+			((struct ipa_ioc_flt_rt_query *)
+			header)->stats_size,
+			((struct ipa_ioc_flt_rt_query *)
+			header)->stats_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_flt_rt_query *)
+		header)->stats = (u64)kptr;
+	retval = ipa_get_flt_rt_stats
+		((struct ipa_ioc_flt_rt_query *)header);
+	if (retval < 0) {
+		IPAERR("ipa_get_flt_rt_stats failed\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_flt_rt_query *)
+			header)->stats_size,
+			kptr + i * sizeof(struct ipa_flt_rt_stats),
+			((struct ipa_ioc_flt_rt_query *)
+			header)->stats_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
 static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	int retval = 0;
@@ -661,12 +1492,12 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 	struct ipa_ioc_v4_nat_del nat_del;
 	struct ipa_ioc_nat_ipv6ct_table_del table_del;
 	struct ipa_ioc_nat_pdn_entry mdfy_pdn;
-	struct ipa_ioc_rm_dependency rm_depend;
 	struct ipa_ioc_nat_dma_cmd *table_dma_cmd;
 	struct ipa_ioc_get_vlan_mode vlan_mode;
 	struct ipa_ioc_wigig_fst_switch fst_switch;
 	size_t sz;
 	int pre_entry;
+	int hdl;
 
 	IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
 
@@ -1497,31 +2328,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		}
 		break;
 	case IPA_IOC_RM_ADD_DEPENDENCY:
-		/* deprecate if IPA PM is used */
-		if (ipa3_ctx->use_ipa_pm)
-			return -EINVAL;
+		/* IPA RM is deprecate because IPA PM is used */
+		IPAERR("using obselete command: IPA_IOC_RM_ADD_DEPENDENCY");
+		return -EINVAL;
 
-		if (copy_from_user(&rm_depend, (const void __user *)arg,
-			sizeof(struct ipa_ioc_rm_dependency))) {
-			retval = -EFAULT;
-			break;
-		}
-		retval = ipa_rm_add_dependency_from_ioctl(
-			rm_depend.resource_name, rm_depend.depends_on_name);
-		break;
 	case IPA_IOC_RM_DEL_DEPENDENCY:
-		/* deprecate if IPA PM is used */
-		if (ipa3_ctx->use_ipa_pm)
-			return -EINVAL;
+		/* IPA RM is deprecate because IPA PM is used */
+		IPAERR("using obselete command: IPA_IOC_RM_DEL_DEPENDENCY");
+		return -EINVAL;
 
-		if (copy_from_user(&rm_depend, (const void __user *)arg,
-			sizeof(struct ipa_ioc_rm_dependency))) {
-			retval = -EFAULT;
-			break;
-		}
-		retval = ipa_rm_delete_dependency_from_ioctl(
-			rm_depend.resource_name, rm_depend.depends_on_name);
-		break;
 	case IPA_IOC_GENERATE_FLT_EQ:
 		{
 			struct ipa_ioc_generate_flt_eq flt_eq;
@@ -1756,7 +2571,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		memset(&nat_del, 0, sizeof(nat_del));
 		nat_del.table_index = 0;
 		retval = ipa3_nat_del_cmd(&nat_del);
-		retval = ipa3_clean_modem_rule();
+		if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ)
+			retval = ipa3_clean_mhip_dl_rule();
+		else
+			retval = ipa3_clean_modem_rule();
+		ipa3_counter_id_remove_all();
 		break;
 
 	case IPA_IOC_QUERY_WLAN_CLIENT:
@@ -1780,6 +2599,53 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		}
 		break;
 
+	case IPA_IOC_ADD_RT_RULE_V2:
+		retval = ipa3_ioctl_add_rt_rule_v2(arg);
+		break;
+
+	case IPA_IOC_ADD_RT_RULE_EXT_V2:
+		retval = ipa3_ioctl_add_rt_rule_ext_v2(arg);
+		break;
+
+	case IPA_IOC_ADD_RT_RULE_AFTER_V2:
+		retval = ipa3_ioctl_add_rt_rule_after_v2(arg);
+		break;
+
+	case IPA_IOC_MDFY_RT_RULE_V2:
+		retval = ipa3_ioctl_mdfy_rt_rule_v2(arg);
+		break;
+
+	case IPA_IOC_ADD_FLT_RULE_V2:
+		retval = ipa3_ioctl_add_flt_rule_v2(arg);
+		break;
+
+	case IPA_IOC_ADD_FLT_RULE_AFTER_V2:
+		retval = ipa3_ioctl_add_flt_rule_after_v2(arg);
+		break;
+
+	case IPA_IOC_MDFY_FLT_RULE_V2:
+		retval = ipa3_ioctl_mdfy_flt_rule_v2(arg);
+		break;
+
+	case IPA_IOC_FNR_COUNTER_ALLOC:
+		retval = ipa3_ioctl_fnr_counter_alloc(arg);
+		break;
+
+	case IPA_IOC_FNR_COUNTER_DEALLOC:
+		hdl = (int)arg;
+		if (hdl < 0) {
+			IPAERR("IPA_FNR_COUNTER_DEALLOC failed: hdl %d\n",
+				hdl);
+			retval = -EPERM;
+			break;
+		}
+		ipa3_counter_remove_hdl(hdl);
+		break;
+
+	case IPA_IOC_FNR_COUNTER_QUERY:
+		retval = ipa3_ioctl_fnr_counter_query(arg);
+		break;
+
 	case IPA_IOC_WIGIG_FST_SWITCH:
 		IPADBG("Got IPA_IOCTL_WIGIG_FST_SWITCH\n");
 		if (copy_from_user(&fst_switch, (const void __user *)arg,
@@ -2088,6 +2954,12 @@ static void ipa3_q6_avoid_holb(void)
 			ipahal_write_reg_n_fields(
 				IPA_ENDP_INIT_HOL_BLOCK_EN_n,
 				ep_idx, &ep_holb);
+
+			/* IPA4.5 issue requires HOLB_EN to be written twice */
+			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+				ipahal_write_reg_n_fields(
+					IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+					ep_idx, &ep_holb);
 		}
 	}
 }
@@ -2539,6 +3411,19 @@ static int ipa3_q6_set_ex_path_to_apps(void)
 	return retval;
 }
 
+/*
+ * ipa3_update_ssr_state() - updating current SSR state
+ * @is_ssr:	[in] Current SSR state
+ */
+
+void ipa3_update_ssr_state(bool is_ssr)
+{
+	if (is_ssr)
+		atomic_set(&ipa3_ctx->is_ssr, 1);
+	else
+		atomic_set(&ipa3_ctx->is_ssr, 0);
+}
+
 /**
  * ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
  *                    in IPA HW. This is performed in case of SSR.
@@ -2552,6 +3437,7 @@ void ipa3_q6_pre_shutdown_cleanup(void)
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 
+	ipa3_update_ssr_state(true);
 	if (!ipa3_ctx->ipa_endp_delay_wa)
 		ipa3_q6_pipe_delay(true);
 	ipa3_q6_avoid_holb();
@@ -3646,8 +4532,8 @@ void ipa3_enable_clks(void)
 	if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
 	    ipa3_get_bus_vote()))
 		WARN(1, "bus scaling failed");
-	atomic_set(&ipa3_ctx->ipa_clk_vote, 1);
 	ipa3_ctx->ctrl->ipa3_enable_clks();
+	atomic_set(&ipa3_ctx->ipa_clk_vote, 1);
 }
 
 
@@ -3680,8 +4566,7 @@ void ipa3_disable_clks(void)
 
 	ipa3_ctx->ctrl->ipa3_disable_clks();
 
-	if (ipa3_ctx->use_ipa_pm)
-		ipa_pm_set_clock_index(0);
+	ipa_pm_set_clock_index(0);
 
 	if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, 0))
 		WARN(1, "bus scaling failed");
@@ -4180,14 +5065,12 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
 				void *private_data,
 				void *interrupt_data)
 {
-	enum ipa_rm_resource_name resource;
 	u32 suspend_data =
 		((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
 	u32 bmsk = 1;
 	u32 i = 0;
 	int res;
 	struct ipa_ep_cfg_holb holb_cfg;
-	struct mutex *pm_mutex_ptr = &ipa3_ctx->transport_pm.transport_pm_mutex;
 	u32 pipe_bitmask = 0;
 
 	IPADBG("interrupt=%d, interrupt_data=%u\n",
@@ -4196,55 +5079,13 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
 
 	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++, bmsk = bmsk << 1) {
 		if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
-			if (ipa3_ctx->use_ipa_pm) {
 				pipe_bitmask |= bmsk;
-				continue;
-			}
-			if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
-				/*
-				 * pipe will be unsuspended as part of
-				 * enabling IPA clocks
-				 */
-				mutex_lock(pm_mutex_ptr);
-				if (!atomic_read(
-					&ipa3_ctx->transport_pm.dec_clients)
-					) {
-					IPA_ACTIVE_CLIENTS_INC_EP(
-						ipa3_ctx->ep[i].client);
-					IPADBG_LOW("Pipes un-suspended.\n");
-					IPADBG_LOW("Enter poll mode.\n");
-					atomic_set(
-					&ipa3_ctx->transport_pm.dec_clients,
-					1);
-					/*
-					 * acquire wake lock as long as suspend
-					 * vote is held
-					 */
-					ipa3_inc_acquire_wakelock();
-					ipa3_process_irq_schedule_rel();
-				}
-				mutex_unlock(pm_mutex_ptr);
-			} else {
-				resource = ipa3_get_rm_resource_from_ep(i);
-				res =
-				ipa_rm_request_resource_with_timer(resource);
-				if (res == -EPERM &&
-					IPA_CLIENT_IS_CONS(
-					   ipa3_ctx->ep[i].client)) {
-					holb_cfg.en = 1;
-					res = ipa3_cfg_ep_holb_by_client(
-					   ipa3_ctx->ep[i].client, &holb_cfg);
-					WARN(res, "holb en failed\n");
-				}
-			}
 		}
 	}
-	if (ipa3_ctx->use_ipa_pm) {
-		res = ipa_pm_handle_suspend(pipe_bitmask);
-		if (res) {
-			IPAERR("ipa_pm_handle_suspend failed %d\n", res);
-			return;
-		}
+	res = ipa_pm_handle_suspend(pipe_bitmask);
+	if (res) {
+		IPAERR("ipa_pm_handle_suspend failed %d\n", res);
+		return;
 	}
 }
 
@@ -4458,6 +5299,7 @@ static int ipa3_panic_notifier(struct notifier_block *this,
 	if (atomic_read(&ipa3_ctx->ipa_clk_vote)) {
 		ipahal_print_all_regs(false);
 		ipa_save_registers();
+		ipa_wigig_save_regs();
 	}
 
 	return NOTIFY_DONE;
@@ -4792,6 +5634,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 		gsi_props.mhi_er_id_limits[0] = resource_p->mhi_evid_limits[0];
 		gsi_props.mhi_er_id_limits[1] = resource_p->mhi_evid_limits[1];
 	}
+	gsi_props.skip_ieob_mask_wa = resource_p->skip_ieob_mask_wa;
 
 	result = gsi_register_device(&gsi_props,
 		&ipa3_ctx->gsi_dev_hdl);
@@ -5282,14 +6125,14 @@ static bool ipa_is_mem_dump_allowed(void)
  * Initialize the filter block by committing IPV4 and IPV6 default rules
  * Create empty routing table in system memory(no committing)
  * Create a char-device for IPA
- * Initialize IPA RM (resource manager)
+ * Initialize IPA PM (power manager)
  * Configure GSI registers (in GSI case)
  */
 static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 		struct platform_device *ipa_pdev)
 {
 	int result = 0;
-	int i;
+	int i, j;
 	struct ipa3_rt_tbl_set *rset;
 	struct ipa_active_client_logging_info log_info;
 	struct cdev *cdev;
@@ -5311,6 +6154,15 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	for (i = 0; i < IPA_SMMU_CB_MAX; i++)
 		ipa3_ctx->s1_bypass_arr[i] = true;
 
+	/* initialize the gsi protocol info for uC debug stats */
+	for (i = 0; i < IPA_HW_PROTOCOL_MAX; i++) {
+		ipa3_ctx->gsi_info[i].protocol = i;
+		/* initialize all to be not started */
+		for (j = 0; j < MAX_CH_STATS_SUPPORTED; j++)
+			ipa3_ctx->gsi_info[i].ch_id_info[j].ch_id =
+				0xFF;
+	}
+
 	ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
 	ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
 	ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
@@ -5329,7 +6181,6 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
 	ipa3_ctx->ee = resource_p->ee;
 	ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
-	ipa3_ctx->use_ipa_pm = resource_p->use_ipa_pm;
 	ipa3_ctx->wdi_over_pcie = resource_p->wdi_over_pcie;
 	ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
 	ipa3_ctx->ipa_config_is_mhi = resource_p->ipa_mhi_dynamic_config;
@@ -5631,6 +6482,12 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
 	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
 	idr_init(&rset->rule_ids);
+	idr_init(&ipa3_ctx->flt_rt_counters.hdl);
+	spin_lock_init(&ipa3_ctx->flt_rt_counters.hdl_lock);
+	memset(&ipa3_ctx->flt_rt_counters.used_hw, 0,
+		   sizeof(ipa3_ctx->flt_rt_counters.used_hw));
+	memset(&ipa3_ctx->flt_rt_counters.used_sw, 0,
+		   sizeof(ipa3_ctx->flt_rt_counters.used_sw));
 
 	INIT_LIST_HEAD(&ipa3_ctx->intf_list);
 	INIT_LIST_HEAD(&ipa3_ctx->msg_list);
@@ -5677,30 +6534,13 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
 
 	/* Initialize Power Management framework */
-	if (ipa3_ctx->use_ipa_pm) {
-		result = ipa_pm_init(&ipa3_res.pm_init);
-		if (result) {
-			IPAERR("IPA PM initialization failed (%d)\n", -result);
-			result = -ENODEV;
-			goto fail_ipa_rm_init;
-		}
-		IPADBG("IPA resource manager initialized");
-	} else {
-		result = ipa_rm_initialize();
-		if (result) {
-			IPAERR("RM initialization failed (%d)\n", -result);
-			result = -ENODEV;
-			goto fail_ipa_rm_init;
-		}
-		IPADBG("IPA resource manager initialized");
-
-		result = ipa3_create_apps_resource();
-		if (result) {
-			IPAERR("Failed to create APPS_CONS resource\n");
-			result = -ENODEV;
-			goto fail_create_apps_resource;
-		}
+	result = ipa_pm_init(&ipa3_res.pm_init);
+	if (result) {
+		IPAERR("IPA PM initialization failed (%d)\n", -result);
+		result = -ENODEV;
+		goto fail_ipa_pm_init;
 	}
+	IPADBG("IPA power manager initialized\n");
 
 	INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
 
@@ -5770,14 +6610,8 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 fail_gsi_pre_fw_load_init:
 	ipa3_dma_shutdown();
 fail_ipa_dma_setup:
-	if (ipa3_ctx->use_ipa_pm)
-		ipa_pm_destroy();
-	else
-		ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
-fail_create_apps_resource:
-	if (!ipa3_ctx->use_ipa_pm)
-		ipa_rm_exit();
-fail_ipa_rm_init:
+	ipa_pm_destroy();
+fail_ipa_pm_init:
 	device_destroy(ipa3_ctx->cdev.class, ipa3_ctx->cdev.dev_num);
 fail_device_create:
 	unregister_chrdev_region(ipa3_ctx->cdev.dev_num, 1);
@@ -5849,12 +6683,6 @@ static int get_ipa_dts_pm_info(struct platform_device *pdev,
 	int result;
 	int i, j;
 
-	ipa_drv_res->use_ipa_pm = of_property_read_bool(pdev->dev.of_node,
-		"qcom,use-ipa-pm");
-	IPADBG("use_ipa_pm=%d\n", ipa_drv_res->use_ipa_pm);
-	if (!ipa_drv_res->use_ipa_pm)
-		return 0;
-
 	result = of_property_read_u32(pdev->dev.of_node,
 		"qcom,msm-bus,num-cases",
 		&ipa_drv_res->pm_init.threshold_size);
@@ -5902,7 +6730,7 @@ static int get_ipa_dts_pm_info(struct platform_device *pdev,
 
 		result = of_property_read_string_index(pdev->dev.of_node,
 			"qcom,scaling-exceptions",
-			i * ipa_drv_res->pm_init.threshold_size,
+			i * (ipa_drv_res->pm_init.threshold_size + 1),
 			&ex[i].usecase);
 		if (result) {
 			IPAERR("failed to read qcom,scaling-exceptions");
@@ -5915,7 +6743,8 @@ static int get_ipa_dts_pm_info(struct platform_device *pdev,
 			result = of_property_read_string_index(
 				pdev->dev.of_node,
 				"qcom,scaling-exceptions",
-				i * ipa_drv_res->pm_init.threshold_size + j + 1,
+				i * (ipa_drv_res->pm_init.threshold_size + 1)
+				+ j + 1,
 				&str);
 			if (result) {
 				IPAERR("failed to read qcom,scaling-exceptions"
@@ -5965,6 +6794,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 	ipa_drv_res->mhi_evid_limits[1] = IPA_MHI_GSI_EVENT_RING_ID_END;
 	ipa_drv_res->ipa_fltrt_not_hashable = false;
 	ipa_drv_res->ipa_endp_delay_wa = false;
+	ipa_drv_res->skip_ieob_mask_wa = false;
 
 	/* Get IPA HW Version */
 	result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
@@ -6091,6 +6921,12 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 	IPADBG(": use_bw_vote = %s\n",
 			ipa_drv_res->use_bw_vote
 			? "True" : "False");
+	ipa_drv_res->skip_ieob_mask_wa =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,skip-ieob-mask-wa");
+	IPADBG(": skip ieob mask wa = %s\n",
+			ipa_drv_res->skip_ieob_mask_wa
+			? "True" : "False");
 
 	ipa_drv_res->skip_uc_pipe_reset =
 		of_property_read_bool(pdev->dev.of_node,
@@ -6383,7 +7219,8 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
 	}
 
 	IPADBG("WLAN CB PROBE mapping retrieved\n");
-
+	cb->is_cache_coherent = of_property_read_bool(dev->of_node,
+							"dma-coherent");
 	cb->dev   = dev;
 	cb->valid = true;
 
@@ -6491,6 +7328,8 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
 
 	IPADBG("UC CB PROBE mapping retrieved\n");
 
+	cb->is_cache_coherent = of_property_read_bool(dev->of_node,
+						"dma-coherent");
 	cb->dev   = dev;
 	cb->valid = true;
 
@@ -6581,6 +7420,8 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
 
 	IPADBG("AP CB PROBE mapping retrieved\n");
 
+	cb->is_cache_coherent = of_property_read_bool(dev->of_node,
+						"dma-coherent");
 	cb->dev   = dev;
 	cb->valid = true;
 
@@ -6715,7 +7556,8 @@ static int ipa_smmu_11ad_cb_probe(struct device *dev)
 		IPAERR("could not get iommu domain\n");
 		return -EINVAL;
 	}
-
+	cb->is_cache_coherent = of_property_read_bool(dev->of_node,
+							"dma-coherent");
 	cb->dev   = dev;
 	cb->valid = true;
 
@@ -6982,16 +7824,8 @@ int ipa3_ap_suspend(struct device *dev)
 		}
 	}
 
-	if (ipa3_ctx->use_ipa_pm) {
-		ipa_pm_deactivate_all_deferred();
-	} else {
-		/*
-		 * Release transport IPA resource without waiting
-		 * for inactivity timer
-		 */
-		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
-		ipa3_transport_release_resource(NULL);
-	}
+	ipa_pm_deactivate_all_deferred();
+
 	IPADBG("Exit\n");
 
 	return 0;
@@ -7095,24 +7929,28 @@ int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
 int ipa3_iommu_map(struct iommu_domain *domain,
 	unsigned long iova, phys_addr_t paddr, size_t size, int prot)
 {
-	struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
-	struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
+	struct ipa_smmu_cb_ctx *cb = NULL;
 
 	IPADBG_LOW("domain =0x%pK iova 0x%lx\n", domain, iova);
 	IPADBG_LOW("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
 
 	/* make sure no overlapping */
 	if (domain == ipa3_get_smmu_domain()) {
-		if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
+		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+		if (iova >= cb->va_start && iova < cb->va_end) {
 			IPAERR("iommu AP overlap addr 0x%lx\n", iova);
 			ipa_assert();
 			return -EFAULT;
 		}
-	} else if (domain == ipa3_get_wlan_smmu_domain() ||
-		domain == ipa3_get_11ad_smmu_domain()) {
-		/* wlan\11ad is one time map */
+	} else if (domain == ipa3_get_wlan_smmu_domain()) {
+		/* wlan is one time map */
+		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
+	} else if (domain == ipa3_get_11ad_smmu_domain()) {
+		/* 11ad is one time map */
+		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD);
 	} else if (domain == ipa3_get_uc_smmu_domain()) {
-		if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
+		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
+		if (iova >= cb->va_start && iova < cb->va_end) {
 			IPAERR("iommu uC overlap addr 0x%lx\n", iova);
 			ipa_assert();
 			return -EFAULT;
@@ -7123,6 +7961,18 @@ int ipa3_iommu_map(struct iommu_domain *domain,
 		return -EFAULT;
 	}
 
+	if (cb == NULL) {
+		IPAERR("Unexpected cb turning NULL for domain 0x%pK\n", domain);
+		ipa_assert();
+	}
+
+	/*
+	 * IOMMU_CACHE is needed to make the entries cachable
+	 * if cache coherency is enabled in dtsi.
+	 */
+	if (cb->is_cache_coherent)
+		prot |= IOMMU_CACHE;
+
 	return iommu_map(domain, iova, paddr, size, prot);
 }
 
@@ -7148,7 +7998,8 @@ int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
 
 	switch (in->smmu_client) {
 	case IPA_SMMU_WLAN_CLIENT:
-		if (ipa3_ctx->ipa_wdi3_over_gsi)
+		if (ipa3_ctx->ipa_wdi3_over_gsi ||
+			ipa3_ctx->ipa_wdi2_over_gsi)
 			is_smmu_enable =
 				!(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] ||
 				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 0576d51..6b56ab6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -30,6 +30,7 @@
 
 static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
 	bool *is_empty);
+static void ipa3_start_gsi_debug_monitor(u32 clnt_hdl);
 
 int ipa3_enable_data_path(u32 clnt_hdl)
 {
@@ -248,6 +249,69 @@ static bool ipa3_is_legal_params(struct ipa_request_gsi_channel_params *params)
 		return true;
 }
 
+static void ipa3_start_gsi_debug_monitor(u32 clnt_hdl)
+{
+	struct IpaHwOffloadStatsAllocCmdData_t *gsi_info;
+	struct ipa3_ep_context *ep;
+	enum ipa_client_type client_type;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameters.\n");
+		return;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	client_type = ipa3_get_client_mapping(clnt_hdl);
+
+	/* start uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->ipa_hw_type != IPA_HW_v4_7) {
+		switch (client_type) {
+		case IPA_CLIENT_MHI_PRIME_TETH_PROD:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[0].ch_id = ep->gsi_chan_hdl;
+			gsi_info->ch_id_info[0].dir = DIR_PRODUCER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_MHI_PRIME_TETH_CONS:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[1].ch_id = ep->gsi_chan_hdl;
+			gsi_info->ch_id_info[1].dir = DIR_CONSUMER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_MHI_PRIME_RMNET_PROD:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[2].ch_id = ep->gsi_chan_hdl;
+			gsi_info->ch_id_info[2].dir = DIR_PRODUCER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_MHI_PRIME_RMNET_CONS:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[3].ch_id = ep->gsi_chan_hdl;
+			gsi_info->ch_id_info[3].dir = DIR_CONSUMER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_USB_PROD:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_USB];
+			gsi_info->ch_id_info[0].ch_id = ep->gsi_chan_hdl;
+			gsi_info->ch_id_info[0].dir = DIR_PRODUCER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_USB_CONS:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_USB];
+			gsi_info->ch_id_info[1].ch_id = ep->gsi_chan_hdl;
+			gsi_info->ch_id_info[1].dir = DIR_CONSUMER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		default:
+			IPADBG("client_type %d not supported\n",
+				client_type);
+		}
+	}
+}
+
 int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map,
 	enum ipa_smmu_cb_type cb_type)
 {
@@ -750,6 +814,7 @@ int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
 		IPAERR("Error starting channel: %d\n", gsi_res);
 		goto write_chan_scratch_fail;
 	}
+	ipa3_start_gsi_debug_monitor(clnt_hdl);
 	if (!ep->keep_ipa_awake)
 		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
 
@@ -1230,6 +1295,12 @@ int ipa3_set_reset_client_cons_pipe_sus_holb(bool set_reset,
 		ipahal_write_reg_n_fields(
 			IPA_ENDP_INIT_HOL_BLOCK_EN_n,
 			pipe_idx, &ep_holb);
+
+		/* IPA4.5 issue requires HOLB_EN to be written twice */
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			ipahal_write_reg_n_fields(
+				IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+				pipe_idx, &ep_holb);
 	}
 	client_lock_unlock_cb(client, false);
 	return 0;
@@ -1505,6 +1576,7 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 
 start_dl_and_exit:
 	gsi_start_channel(dl_ep->gsi_chan_hdl);
+	ipa3_start_gsi_debug_monitor(dl_clnt_hdl);
 unsuspend_dl_and_exit:
 	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
 		/* Unsuspend the DL EP */
@@ -1522,6 +1594,7 @@ int ipa3_start_gsi_channel(u32 clnt_hdl)
 	struct ipa3_ep_context *ep;
 	int result = -EFAULT;
 	enum gsi_status gsi_res;
+	enum ipa_client_type client_type;
 
 	IPADBG("entry\n");
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes  ||
@@ -1531,18 +1604,19 @@ int ipa3_start_gsi_channel(u32 clnt_hdl)
 	}
 
 	ep = &ipa3_ctx->ep[clnt_hdl];
-
+	client_type = ipa3_get_client_mapping(clnt_hdl);
 	if (!ep->keep_ipa_awake)
-		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+		IPA_ACTIVE_CLIENTS_INC_EP(client_type);
 
 	gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
 	if (gsi_res != GSI_STATUS_SUCCESS) {
 		IPAERR("Error starting channel: %d\n", gsi_res);
 		goto start_chan_fail;
 	}
+	ipa3_start_gsi_debug_monitor(clnt_hdl);
 
 	if (!ep->keep_ipa_awake)
-		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+		IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
 
 	IPADBG("exit\n");
 	return 0;
@@ -1587,12 +1661,14 @@ int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
 	gsi_res = gsi_start_channel(dl_ep->gsi_chan_hdl);
 	if (gsi_res != GSI_STATUS_SUCCESS)
 		IPAERR("Error starting DL channel: %d\n", gsi_res);
+	ipa3_start_gsi_debug_monitor(dl_clnt_hdl);
 
 	/* Start UL channel */
 	if (!is_dpl) {
 		gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl);
 		if (gsi_res != GSI_STATUS_SUCCESS)
 			IPAERR("Error starting UL channel: %d\n", gsi_res);
+		ipa3_start_gsi_debug_monitor(ul_clnt_hdl);
 	}
 
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 3a5bed3..adb0c99 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -12,6 +12,7 @@
 #include "../ipa_rm_i.h"
 #include "ipahal/ipahal_nat.h"
 #include "ipa_odl.h"
+#include "ipa_qmi_service.h"
 
 #define IPA_MAX_ENTRY_STRING_LEN 500
 #define IPA_MAX_MSG_LEN 4096
@@ -74,6 +75,8 @@ const char *ipa3_event_name[IPA_EVENT_MAX_NUM] = {
 	__stringify(IPA_GSB_DISCONNECT),
 	__stringify(IPA_COALESCE_ENABLE),
 	__stringify(IPA_COALESCE_DISABLE),
+	__stringify_1(WIGIG_CLIENT_CONNECT),
+	__stringify_1(WIGIG_FST_SWITCH),
 };
 
 const char *ipa3_hdr_l2_type_name[] = {
@@ -317,17 +320,41 @@ static ssize_t ipa3_write_keep_awake(struct file *file, const char __user *buf,
 {
 	s8 option = 0;
 	int ret;
+	uint32_t bw_mbps = 0;
 
 	ret = kstrtos8_from_user(buf, count, 0, &option);
 	if (ret)
 		return ret;
 
-	if (option == 1)
-		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-	else if (option == 0)
+	switch (option) {
+	case 0:
 		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
-	else
+		bw_mbps = 0;
+		break;
+	case 1:
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+		bw_mbps = 0;
+		break;
+	case 2:
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+		bw_mbps = 700;
+		break;
+	case 3:
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+		bw_mbps = 3000;
+		break;
+	case 4:
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+		bw_mbps = 7000;
+		break;
+	default:
+		pr_err("Not support this vote (%d)\n", option);
 		return -EFAULT;
+	}
+	if (ipa3_vote_for_bus_bw(&bw_mbps)) {
+		IPAERR("Failed to vote for bus BW (%u)\n", bw_mbps);
+		return -EFAULT;
+	}
 
 	return count;
 }
@@ -707,6 +734,9 @@ static ssize_t ipa3_read_rt(struct file *file, char __user *ubuf, size_t count,
 				pr_err("rule_id:%u max_prio:%u prio:%u ",
 					entry->rule_id, entry->rule.max_prio,
 					entry->prio);
+				pr_err("enable_stats:%u counter_id:%u\n",
+					entry->rule.enable_stats,
+					entry->rule.cnt_idx);
 				pr_err("hashable:%u retain_hdr:%u ",
 					entry->rule.hashable,
 					entry->rule.retain_hdr);
@@ -729,6 +759,9 @@ static ssize_t ipa3_read_rt(struct file *file, char __user *ubuf, size_t count,
 				pr_err("rule_id:%u max_prio:%u prio:%u ",
 					entry->rule_id, entry->rule.max_prio,
 					entry->prio);
+				pr_err("enable_stats:%u counter_id:%u\n",
+					entry->rule.enable_stats,
+					entry->rule.cnt_idx);
 				pr_err("hashable:%u retain_hdr:%u ",
 					entry->rule.hashable,
 					entry->rule.retain_hdr);
@@ -803,9 +836,9 @@ static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf,
 					rules[rl].hdr_ofst,
 					rules[rl].eq_attrib.rule_eq_bitmap);
 
-			pr_err("rule_id:%u prio:%u retain_hdr:%u ",
-				rules[rl].id, rules[rl].priority,
-				rules[rl].retain_hdr);
+			pr_err("rule_id:%u cnt_id:%hhu prio:%u retain_hdr:%u\n",
+				rules[rl].id, rules[rl].cnt_idx,
+				rules[rl].priority, rules[rl].retain_hdr);
 			res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
 			if (res) {
 				IPAERR_RL("failed read attrib eq\n");
@@ -838,9 +871,9 @@ static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf,
 					rules[rl].hdr_ofst,
 					rules[rl].eq_attrib.rule_eq_bitmap);
 
-			pr_err("rule_id:%u prio:%u retain_hdr:%u\n",
-				rules[rl].id, rules[rl].priority,
-				rules[rl].retain_hdr);
+			pr_err("rule_id:%u cnt_id:%hhu prio:%u retain_hdr:%u\n",
+				rules[rl].id, rules[rl].cnt_idx,
+				rules[rl].priority, rules[rl].retain_hdr);
 			res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
 			if (res) {
 				IPAERR_RL("failed read attrib eq\n");
@@ -952,6 +985,9 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
 			pr_err("hashable:%u rule_id:%u max_prio:%u prio:%u ",
 				entry->rule.hashable, entry->rule_id,
 				entry->rule.max_prio, entry->prio);
+			pr_err("enable_stats:%u counter_id:%u\n",
+					entry->rule.enable_stats,
+					entry->rule.cnt_idx);
 			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
 				pr_err("pdn index %d, set metadata %d ",
 					entry->rule.pdn_idx,
@@ -1019,8 +1055,9 @@ static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
 				pipe, rl, rules[rl].rule.action, rt_tbl_idx);
 			pr_err("attrib_mask:%08x retain_hdr:%d ",
 				bitmap, rules[rl].rule.retain_hdr);
-			pr_err("rule_id:%u prio:%u ",
-				rules[rl].id, rules[rl].priority);
+			pr_err("rule_id:%u cnt_id:%hhu prio:%u\n",
+				rules[rl].id, rules[rl].cnt_idx,
+				rules[rl].priority);
 			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
 				pr_err("pdn: %u, set_metadata: %u ",
 					rules[rl].rule.pdn_idx,
@@ -1050,8 +1087,9 @@ static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
 				pipe, rl, rules[rl].rule.action, rt_tbl_idx);
 			pr_err("attrib_mask:%08x retain_hdr:%d ",
 				bitmap, rules[rl].rule.retain_hdr);
-			pr_err("rule_id:%u  prio:%u ",
-				rules[rl].id, rules[rl].priority);
+			pr_err("rule_id:%u cnt_id:%hhu prio:%u\n",
+				rules[rl].id, rules[rl].cnt_idx,
+				rules[rl].priority);
 			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
 				pr_err("pdn: %u, set_metadata: %u ",
 					rules[rl].rule.pdn_idx,
@@ -1840,40 +1878,11 @@ static ssize_t ipa3_read_ipv6ct(struct file *file,
 	return 0;
 }
 
-static ssize_t ipa3_rm_read_stats(struct file *file, char __user *ubuf,
-		size_t count, loff_t *ppos)
-{
-	int result, cnt = 0;
-
-	/* deprecate if IPA PM is used */
-	if (ipa3_ctx->use_ipa_pm) {
-		cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
-			"IPA RM is disabled\n");
-		goto ret;
-	}
-
-	result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN);
-	if (result < 0) {
-		cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
-				"Error in printing RM stat %d\n", result);
-		goto ret;
-	}
-	cnt += result;
-ret:
-	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
-}
-
 static ssize_t ipa3_pm_read_stats(struct file *file, char __user *ubuf,
 		size_t count, loff_t *ppos)
 {
 	int result, cnt = 0;
 
-	if (!ipa3_ctx->use_ipa_pm) {
-		cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
-			"IPA PM is disabled\n");
-		goto ret;
-	}
-
 	result = ipa_pm_stat(dbg_buff, IPA_MAX_MSG_LEN);
 	if (result < 0) {
 		cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
@@ -1890,12 +1899,6 @@ static ssize_t ipa3_pm_ex_read_stats(struct file *file, char __user *ubuf,
 {
 	int result, cnt = 0;
 
-	if (!ipa3_ctx->use_ipa_pm) {
-		cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
-			"IPA PM is disabled\n");
-		goto ret;
-	}
-
 	result = ipa_pm_exceptions_stat(dbg_buff, IPA_MAX_MSG_LEN);
 	if (result < 0) {
 		cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
@@ -1917,6 +1920,256 @@ static ssize_t ipa3_read_ipahal_regs(struct file *file, char __user *ubuf,
 	return 0;
 }
 
+static ssize_t ipa3_read_wdi_gsi_stats(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct ipa3_uc_dbg_ring_stats stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+
+	if (!ipa3_get_wdi_gsi_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n",
+			stats.ring[1].ringFull,
+			stats.ring[1].ringEmpty,
+			stats.ring[1].ringUsageHigh,
+			stats.ring[1].ringUsageLow,
+			stats.ring[1].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n",
+			stats.ring[0].ringFull,
+			stats.ring[0].ringEmpty,
+			stats.ring[0].ringUsageHigh,
+			stats.ring[0].ringUsageLow,
+			stats.ring[0].RingUtilCount);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Fail to read WDI GSI stats\n");
+		cnt += nbytes;
+	}
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_wdi3_gsi_stats(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct ipa3_uc_dbg_ring_stats stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+	if (!ipa3_get_wdi3_gsi_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n",
+			stats.ring[1].ringFull,
+			stats.ring[1].ringEmpty,
+			stats.ring[1].ringUsageHigh,
+			stats.ring[1].ringUsageLow,
+			stats.ring[1].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n",
+			stats.ring[0].ringFull,
+			stats.ring[0].ringEmpty,
+			stats.ring[0].ringUsageHigh,
+			stats.ring[0].ringUsageLow,
+			stats.ring[0].RingUtilCount);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Fail to read WDI GSI stats\n");
+		cnt += nbytes;
+	}
+
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_11ad_gsi_stats(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+	return 0;
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_aqc_gsi_stats(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+	return 0;
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_mhip_gsi_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct ipa3_uc_dbg_ring_stats stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+	if (!ipa3_get_mhip_gsi_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"IPA_CLIENT_MHI_PRIME_TETH_CONS ringFull=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_CONS ringEmpty=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_CONS ringUsageHigh=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_CONS ringUsageLow=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_CONS RingUtilCount=%u\n",
+			stats.ring[1].ringFull,
+			stats.ring[1].ringEmpty,
+			stats.ring[1].ringUsageHigh,
+			stats.ring[1].ringUsageLow,
+			stats.ring[1].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"IPA_CLIENT_MHI_PRIME_TETH_PROD ringFull=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_PROD ringEmpty=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_PROD ringUsageHigh=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_PROD ringUsageLow=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_PROD RingUtilCount=%u\n",
+			stats.ring[0].ringFull,
+			stats.ring[0].ringEmpty,
+			stats.ring[0].ringUsageHigh,
+			stats.ring[0].ringUsageLow,
+			stats.ring[0].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringFull=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringEmpty=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringUsageHigh=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringUsageLow=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_CONS RingUtilCount=%u\n",
+			stats.ring[3].ringFull,
+			stats.ring[3].ringEmpty,
+			stats.ring[3].ringUsageHigh,
+			stats.ring[3].ringUsageLow,
+			stats.ring[3].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringFull=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringEmpty=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringUsageHigh=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringUsageLow=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_PROD RingUtilCount=%u\n",
+			stats.ring[2].ringFull,
+			stats.ring[2].ringEmpty,
+			stats.ring[2].ringUsageHigh,
+			stats.ring[2].ringUsageLow,
+			stats.ring[2].RingUtilCount);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"Fail to read WDI GSI stats\n");
+		cnt += nbytes;
+	}
+
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_usb_gsi_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct ipa3_uc_dbg_ring_stats stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+	if (!ipa3_get_usb_gsi_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n",
+			stats.ring[1].ringFull,
+			stats.ring[1].ringEmpty,
+			stats.ring[1].ringUsageHigh,
+			stats.ring[1].ringUsageLow,
+			stats.ring[1].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n",
+			stats.ring[0].ringFull,
+			stats.ring[0].ringEmpty,
+			stats.ring[0].ringUsageHigh,
+			stats.ring[0].ringUsageLow,
+			stats.ring[0].RingUtilCount);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"Fail to read WDI GSI stats\n");
+		cnt += nbytes;
+	}
+
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
 static void ipa_dump_status(struct ipahal_pkt_status *status)
 {
 	IPA_DUMP_STATUS_FIELD(status_opcode);
@@ -2147,10 +2400,6 @@ static const struct ipa3_debugfs_file debugfs_files[] = {
 			.read = ipa3_read_ipv6ct,
 		}
 	}, {
-		"rm_stats", IPA_READ_ONLY_MODE, NULL, {
-			.read = ipa3_rm_read_stats,
-		}
-	}, {
 		"pm_stats", IPA_READ_ONLY_MODE, NULL, {
 			.read = ipa3_pm_read_stats,
 		}
@@ -2170,6 +2419,30 @@ static const struct ipa3_debugfs_file debugfs_files[] = {
 		"ipa_dump_regs", IPA_READ_ONLY_MODE, NULL, {
 			.read = ipa3_read_ipahal_regs,
 		}
+	}, {
+		"wdi_gsi_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_wdi_gsi_stats,
+		}
+	}, {
+		"wdi3_gsi_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_wdi3_gsi_stats,
+		}
+	}, {
+		"11ad_gsi_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_11ad_gsi_stats,
+		}
+	}, {
+		"aqc_gsi_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_aqc_gsi_stats,
+		}
+	}, {
+		"mhip_gsi_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_mhip_gsi_stats,
+		}
+	}, {
+		"usb_gsi_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_usb_gsi_stats,
+		}
 	}
 };
 
@@ -2251,6 +2524,8 @@ void ipa3_debugfs_init(void)
 
 	ipa_debugfs_init_stats(dent);
 
+	ipa3_wigig_init_debugfs_i(dent);
+
 	return;
 
 fail:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_defs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_defs.h
new file mode 100644
index 0000000..9df2bc4
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_defs.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPA_DEFS_H_
+#define _IPA_DEFS_H_
+#include <linux/ipa.h>
+
+/**
+ * struct ipa_rt_rule_i - attributes of a routing rule
+ * @dst: dst "client"
+ * @hdr_hdl: handle to the dynamic header
+	it is not an index or an offset
+ * @hdr_proc_ctx_hdl: handle to header processing context. if it is provided
+	hdr_hdl shall be 0
+ * @attrib: attributes of the rule
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ * @coalesce: bool to decide whether packets should be coalesced or not
+ * @enable_stats: is true when we want to enable stats for this
+ * rt rule.
+ * @cnt_idx: if enable_stats is 1 and cnt_idx is 0, then cnt_idx
+ * will be assigned by ipa driver.
+ */
+struct ipa_rt_rule_i {
+	enum ipa_client_type dst;
+	u32 hdr_hdl;
+	u32 hdr_proc_ctx_hdl;
+	struct ipa_rule_attrib attrib;
+	u8 max_prio;
+	u8 hashable;
+	u8 retain_hdr;
+	u8 coalesce;
+	u8 enable_stats;
+	u8 cnt_idx;
+};
+
+/**
+ * struct ipa_flt_rule_i - attributes of a filtering rule
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ * @to_uc: bool switch to pass packet to micro-controller
+ * @action: action field
+ * @rt_tbl_hdl: handle of table from "get"
+ * @attrib: attributes of the rule
+ * @eq_attrib: attributes of the rule in equation form (valid when
+ * eq_attrib_type is true)
+ * @rt_tbl_idx: index of RT table referred to by filter rule (valid when
+ * eq_attrib_type is true and non-exception action)
+ * @eq_attrib_type: true if equation level form used to specify attributes
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @rule_id: rule_id to be assigned to the filter rule. In case client specifies
+ *  rule_id as 0 the driver will assign a new rule_id
+ * @set_metadata: bool switch. should metadata replacement at the NAT block
+ *  take place?
+ * @pdn_idx: if action is "pass to source\destination NAT" then a comparison
+ * against the PDN index in the matching PDN entry will take place as an
+ * additional condition for NAT hit.
+ * @enable_stats: is true when we want to enable stats for this
+ * flt rule.
+ * @cnt_idx: if 0 means disable, otherwise use for index.
+ * will be assigned by ipa driver.
+ */
+struct ipa_flt_rule_i {
+	u8 retain_hdr;
+	u8 to_uc;
+	enum ipa_flt_action action;
+	u32 rt_tbl_hdl;
+	struct ipa_rule_attrib attrib;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+	u32 rt_tbl_idx;
+	u8 eq_attrib_type;
+	u8 max_prio;
+	u8 hashable;
+	u16 rule_id;
+	u8 set_metadata;
+	u8 pdn_idx;
+	u8 enable_stats;
+	u8 cnt_idx;
+};
+
+#endif /* _IPA_DEFS_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index ca78a5b..553dfb6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -111,8 +111,6 @@ static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
 	u32 ring_size, gfp_t mem_flag);
 static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
 	u32 ring_size, struct ipa3_sys_context *user_data, gfp_t mem_flag);
-static int ipa_setup_coal_def_pipe(struct ipa_sys_connect_params *sys_in,
-	struct ipa3_ep_context *ep_coalescing);
 static int ipa3_teardown_coal_def_pipe(u32 clnt_hdl);
 static int ipa_populate_tag_field(struct ipa3_desc *desc,
 		struct ipa3_tx_pkt_wrapper *tx_pkt,
@@ -795,10 +793,7 @@ static void ipa3_handle_rx(struct ipa3_sys_context *sys)
 	int cnt;
 	int ret;
 
-	if (ipa3_ctx->use_ipa_pm)
-		ipa_pm_activate_sync(sys->pm_hdl);
-	else
-		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipa_pm_activate_sync(sys->pm_hdl);
 start_poll:
 	inactive_cycles = 0;
 	do {
@@ -827,10 +822,7 @@ static void ipa3_handle_rx(struct ipa3_sys_context *sys)
 	if (ret == -GSI_STATUS_PENDING_IRQ)
 		goto start_poll;
 
-	if (ipa3_ctx->use_ipa_pm)
-		ipa_pm_deferred_deactivate(sys->pm_hdl);
-	else
-		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	ipa_pm_deferred_deactivate(sys->pm_hdl);
 }
 
 static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
@@ -884,6 +876,11 @@ static void ipa_pm_sys_pipe_cb(void *p, enum ipa_pm_cb_event event)
 			usleep_range(SUSPEND_MIN_SLEEP_RX,
 				SUSPEND_MAX_SLEEP_RX);
 			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_LAN");
+		} else if (sys->ep->client == IPA_CLIENT_ODL_DPL_CONS) {
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_ODL");
+			usleep_range(SUSPEND_MIN_SLEEP_RX,
+				SUSPEND_MAX_SLEEP_RX);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_ODL");
 		} else
 			IPAERR("Unexpected event %d\n for client %d\n",
 				event, sys->ep->client);
@@ -912,7 +909,7 @@ static void ipa_pm_sys_pipe_cb(void *p, enum ipa_pm_cb_event event)
 int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 {
 	struct ipa3_ep_context *ep;
-	int i, ipa_ep_idx;
+	int i, ipa_ep_idx, wan_handle;
 	int result = -EINVAL;
 	struct ipahal_reg_coal_qmap_cfg qmap_cfg;
 	struct ipahal_reg_coal_evict_lru evict_lru;
@@ -993,8 +990,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 		ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn;
 
 		/* create IPA PM resources for handling polling mode */
-		if (ipa3_ctx->use_ipa_pm &&
-			IPA_CLIENT_IS_CONS(sys_in->client)) {
+		if (IPA_CLIENT_IS_CONS(sys_in->client)) {
 			pm_reg.name = ipa_clients_strings[sys_in->client];
 			pm_reg.callback = ipa_pm_sys_pipe_cb;
 			pm_reg.user_data = ep->sys;
@@ -1086,10 +1082,6 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 		atomic_set(&ep->sys->repl->pending, 0);
 		ep->sys->repl->capacity = ep->sys->rx_pool_sz + 1;
 
-		/*double for wan_coal since it will be shared between 2 pipes */
-		if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
-			ep->sys->repl->capacity *= 2;
-
 		ep->sys->repl->cache = kcalloc(ep->sys->repl->capacity,
 				sizeof(void *), GFP_KERNEL);
 		if (!ep->sys->repl->cache) {
@@ -1154,7 +1146,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 
 		sys_in->client = IPA_CLIENT_APPS_WAN_CONS;
 		sys_in->ipa_ep_cfg = ep_cfg_copy;
-		result = ipa_setup_coal_def_pipe(sys_in, ep);
+		result = ipa3_setup_sys_pipe(sys_in, &wan_handle);
 		if (result) {
 			IPAERR("failed to setup default coalescing pipe\n");
 			goto fail_repl;
@@ -1170,8 +1162,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 	ep->sys->repl->capacity = 0;
 	kfree(ep->sys->repl);
 fail_gen2:
-	if (ipa3_ctx->use_ipa_pm)
-		ipa_pm_deregister(ep->sys->pm_hdl);
+	ipa_pm_deregister(ep->sys->pm_hdl);
 fail_pm:
 	destroy_workqueue(ep->sys->repl_wq);
 fail_wq2:
@@ -1186,137 +1177,6 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 }
 
 /**
- * ipa3_setup_coal_def_pipe() - Setup a crippled default pipe in addition to the
- * coalescing pipe.
- *
- * @sys_in:	[in] input needed to setup the pipe and configure EP
- * @ep_coalescing [in] the ep context of the coal pipe
- *
- *  - configure the end-point registers with the supplied
- *    parameters from the user.
- *  - Creates a GPI connection with IPA.
- *  - allocate descriptor FIFO
- *
- * Returns:	0 on success, negative on failure
- */
-static int ipa_setup_coal_def_pipe(struct ipa_sys_connect_params *sys_in,
-	struct ipa3_ep_context *ep_coalescing)
-{
-	struct ipa3_ep_context *ep;
-	int result = -EINVAL;
-	int ipa_ep_idx;
-
-	ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
-
-	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
-		IPAERR("failed to get idx");
-		goto fail_gen;
-	}
-
-	ep = &ipa3_ctx->ep[ipa_ep_idx];
-	if (ep->valid == 1) {
-		IPAERR("EP %d already allocated.\n", ipa_ep_idx);
-		goto fail_gen;
-	}
-
-	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
-
-	if (!ep->sys) {
-		ep->sys = kzalloc(sizeof(struct ipa3_sys_context), GFP_KERNEL);
-		if (!ep->sys) {
-			IPAERR("failed to sys ctx for client %d\n",
-				IPA_CLIENT_APPS_WAN_CONS);
-			result = -ENOMEM;
-			goto fail_wq;
-		}
-
-		ep->sys->ep = ep;
-		ep->sys->wq = ep_coalescing->sys->wq;
-		ep->sys->repl_wq = ep_coalescing->sys->repl_wq;
-
-		spin_lock_init(&ep->sys->spinlock);
-		hrtimer_init(&ep->sys->db_timer, CLOCK_MONOTONIC,
-			HRTIMER_MODE_REL);
-		ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn;
-	} else {
-		memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
-	}
-
-	ep->skip_ep_cfg = ep_coalescing->skip_ep_cfg;
-
-	if (ipa3_assign_policy(sys_in, ep->sys)) {
-		IPAERR("failed to sys ctx for client %d\n",
-			IPA_CLIENT_APPS_WAN_CONS);
-		result = -ENOMEM;
-		goto fail_wq;
-	}
-
-	ep->valid = 1;
-	ep->client = sys_in->client;
-	ep->client_notify = ep_coalescing->client_notify;
-	ep->priv = ep_coalescing->priv;
-	ep->keep_ipa_awake = ep_coalescing->keep_ipa_awake;
-	atomic_set(&ep->avail_fifo_desc,
-		((sys_in->desc_fifo_sz / IPA_FIFO_ELEMENT_SIZE) - 1));
-
-	if (!ep->skip_ep_cfg) {
-		if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
-			IPAERR("fail to configure EP.\n");
-			goto fail_wq;
-		}
-
-		if (ep->status.status_en) {
-			IPAERR("status should be disabled for this EP.\n");
-			goto fail_wq;
-		}
-
-		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
-			IPAERR("fail to configure status of EP.\n");
-			goto fail_wq;
-		}
-		IPADBG("ep %d configuration successful\n", ipa_ep_idx);
-	} else {
-		IPADBG("skipping ep %d configuration\n", ipa_ep_idx);
-	}
-
-	result = ipa_gsi_setup_coal_def_channel(sys_in, ep, ep_coalescing);
-	if (result) {
-		IPAERR("Failed to setup default coal GSI channel\n");
-		goto fail_wq;
-	}
-
-	ep->sys->repl = ep_coalescing->sys->repl;
-	ipa3_replenish_rx_cache(ep->sys);
-
-	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
-
-	result = ipa3_enable_data_path(ipa_ep_idx);
-	if (result) {
-		IPAERR("enable data path failed res=%d ep=%d.\n", result,
-			ipa_ep_idx);
-		goto fail_wq;
-	}
-
-	result = gsi_start_channel(ep->gsi_chan_hdl);
-	if (result != GSI_STATUS_SUCCESS)
-		goto fail_start_channel;
-
-	IPADBG("client %d (ep: %d) connected sys=%pK\n", ep->client,
-			ipa_ep_idx, ep->sys);
-
-	return 0;
-
-/* the rest of the fails are handled by ipa3_setup_sys_pipe */
-fail_start_channel:
-	ipa3_disable_data_path(ipa_ep_idx);
-fail_wq:
-	kfree(ep->sys);
-	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
-fail_gen:
-	return result;
-}
-
-/**
  * ipa3_teardown_sys_pipe() - Teardown the GPI pipe and cleanup IPA EP
  * @clnt_hdl:	[in] the handle obtained from ipa3_setup_sys_pipe
  *
@@ -1511,6 +1371,17 @@ static int ipa3_teardown_coal_def_pipe(u32 clnt_hdl)
 		ipa_assert();
 		return result;
 	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
+
+	flush_workqueue(ep->sys->wq);
+
+	if (ep->sys->repl_wq)
+		flush_workqueue(ep->sys->repl_wq);
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		ipa3_cleanup_rx(ep->sys);
+
 	ep->valid = 0;
 
 	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
@@ -1628,8 +1499,8 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 	sys = ipa3_ctx->ep[src_ep_idx].sys;
 
 	if (!sys || !sys->ep->valid) {
-		IPAERR_RL("pipe not valid\n");
-		goto fail_gen;
+		IPAERR_RL("pipe %d not valid\n", src_ep_idx);
+		goto fail_pipe_not_valid;
 	}
 
 	num_frags = skb_shinfo(skb)->nr_frags;
@@ -1797,6 +1668,8 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 		kfree(desc);
 fail_gen:
 	return -EFAULT;
+fail_pipe_not_valid:
+	return -EPIPE;
 }
 
 static void ipa3_wq_handle_rx(struct work_struct *work)
@@ -1806,10 +1679,7 @@ static void ipa3_wq_handle_rx(struct work_struct *work)
 	sys = container_of(work, struct ipa3_sys_context, work);
 
 	if (sys->napi_obj) {
-		if (!ipa3_ctx->use_ipa_pm)
-			IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
-		else
-			ipa_pm_activate_sync(sys->pm_hdl);
+		ipa_pm_activate_sync(sys->pm_hdl);
 		napi_schedule(sys->napi_obj);
 	} else
 		ipa3_handle_rx(sys);
@@ -2389,6 +2259,7 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
 	 * provided to gsi
 	 */
 
+	spin_lock_bh(&sys->spinlock);
 	list_for_each_entry_safe(rx_pkt, r,
 				 &sys->rcycl_list, link) {
 		list_del(&rx_pkt->link);
@@ -2397,6 +2268,7 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
 		sys->free_skb(rx_pkt->data.skb);
 		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
 	}
+	spin_unlock_bh(&sys->spinlock);
 
 	if (sys->repl) {
 		head = atomic_read(&sys->repl->head_idx);
@@ -3034,6 +2906,13 @@ static struct sk_buff *handle_skb_completion(struct gsi_chan_xfer_notify
 		return NULL;
 	}
 
+	/*Assesrt when WAN consumer channel receive EOB event*/
+	if (notify->evt_id == GSI_CHAN_EVT_EOB &&
+		sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) {
+		IPAERR("EOB event received on WAN consumer channel\n");
+		ipa_assert();
+	}
+
 	head = &rx_pkt->sys->pending_pkts[notify->veid];
 
 	INIT_LIST_HEAD(&rx_pkt->link);
@@ -3248,16 +3127,12 @@ static void ipa3_set_aggr_limit(struct ipa_sys_connect_params *in,
 	sys->ep->status.status_en = false;
 	sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(adjusted_sz);
 
-	if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
-		*aggr_byte_limit = sys->rx_buff_sz < *aggr_byte_limit ?
-			IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(sys->rx_buff_sz) :
-			IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(*aggr_byte_limit);
+	if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
 		in->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1;
-	} else {
-		*aggr_byte_limit = sys->rx_buff_sz < *aggr_byte_limit ?
-			IPA_ADJUST_AGGR_BYTE_LIMIT(sys->rx_buff_sz) :
-			IPA_ADJUST_AGGR_BYTE_LIMIT(*aggr_byte_limit);
-	}
+
+	*aggr_byte_limit = sys->rx_buff_sz < *aggr_byte_limit ?
+		IPA_ADJUST_AGGR_BYTE_LIMIT(sys->rx_buff_sz) :
+		IPA_ADJUST_AGGR_BYTE_LIMIT(*aggr_byte_limit);
 
 	IPADBG("set aggr_limit %lu\n", (unsigned long) *aggr_byte_limit);
 }
@@ -3901,29 +3776,15 @@ void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys)
 	 * pm deactivate is done in wq context
 	 * or after NAPI poll
 	 */
-	if (ipa3_ctx->use_ipa_pm) {
-		clk_off = ipa_pm_activate(sys->pm_hdl);
-		if (!clk_off && sys->napi_obj) {
-			napi_schedule(sys->napi_obj);
-			return;
-		}
-		queue_work(sys->wq, &sys->work);
+
+	clk_off = ipa_pm_activate(sys->pm_hdl);
+	if (!clk_off && sys->napi_obj) {
+		napi_schedule(sys->napi_obj);
 		return;
 	}
-
-	if (sys->napi_obj) {
-		struct ipa_active_client_logging_info log;
-
-		IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
-		clk_off = ipa3_inc_client_enable_clks_no_block(
-			&log);
-		if (!clk_off) {
-			napi_schedule(sys->napi_obj);
-			return;
-		}
-	}
-
 	queue_work(sys->wq, &sys->work);
+	return;
+
 }
 
 static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
@@ -4039,7 +3900,7 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
 	u32 ring_size;
 	int result;
 	gfp_t mem_flag = GFP_KERNEL;
-
+	u32 coale_ep_idx;
 
 	if (in->client == IPA_CLIENT_APPS_WAN_CONS ||
 		in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
@@ -4050,6 +3911,7 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
 		IPAERR("EP context is empty\n");
 		return -EINVAL;
 	}
+	coale_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 	/*
 	 * GSI ring length is calculated based on the desc_fifo_sz
 	 * which was meant to define the BAM desc fifo. GSI descriptors
@@ -4068,8 +3930,19 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
 		}
 		ipa3_ctx->gsi_evt_comm_ring_rem -= (ring_size);
 		ep->gsi_evt_ring_hdl = ipa3_ctx->gsi_evt_comm_hdl;
+	} else if (in->client == IPA_CLIENT_APPS_WAN_CONS &&
+			coale_ep_idx != IPA_EP_NOT_ALLOCATED &&
+			ipa3_ctx->ep[coale_ep_idx].valid == 1) {
+		IPADBG("Wan consumer pipe configured\n");
+		result = ipa_gsi_setup_coal_def_channel(in, ep,
+					&ipa3_ctx->ep[coale_ep_idx]);
+		if (result) {
+			IPAERR("Failed to setup default coal GSI channel\n");
+			goto fail_setup_event_ring;
+		}
+		return result;
 	} else if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
-		IPA_CLIENT_IS_CONS(ep->client)) {
+			IPA_CLIENT_IS_CONS(ep->client)) {
 		result = ipa_gsi_setup_event_ring(ep, ring_size, mem_flag);
 		if (result)
 			goto fail_setup_event_ring;
@@ -4473,13 +4346,8 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight)
 		if (ret == -GSI_STATUS_PENDING_IRQ &&
 				napi_reschedule(ep->sys->napi_obj))
 			goto start_poll;
-
-		if (ipa3_ctx->use_ipa_pm)
-			ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
-		else
-			ipa3_dec_client_disable_clks_no_block(&log);
+		ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
 	}
-
 	return cnt;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 03ebdd11..2b21b85 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include "ipa_i.h"
@@ -71,7 +71,8 @@ static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
 
 	gen_params.priority = entry->prio;
 	gen_params.id = entry->rule_id;
-	gen_params.rule = (const struct ipa_flt_rule *)&entry->rule;
+	gen_params.rule = (const struct ipa_flt_rule_i *)&entry->rule;
+	gen_params.cnt_idx = entry->cnt_idx;
 
 	res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
 	if (res) {
@@ -776,9 +777,11 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
 	return rc;
 }
 
-static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
+static int __ipa_validate_flt_rule(const struct ipa_flt_rule_i *rule,
 		struct ipa3_rt_tbl **rt_tbl, enum ipa_ip_type ip)
 {
+	int index;
+
 	if (rule->action != IPA_PASS_TO_EXCEPTION) {
 		if (!rule->eq_attrib_type) {
 			if (!rule->rt_tbl_hdl) {
@@ -838,6 +841,30 @@ static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
 		}
 	}
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		if (rule->enable_stats && rule->cnt_idx) {
+			if (!ipahal_is_rule_cnt_id_valid(rule->cnt_idx)) {
+				IPAERR_RL(
+					"invalid cnt_idx %hhu out of range\n",
+					rule->cnt_idx);
+				goto error;
+			}
+			index = rule->cnt_idx - 1;
+			if (!ipa3_ctx->flt_rt_counters.used_hw[index]) {
+				IPAERR_RL(
+					"invalid cnt_idx %hhu not alloc by driver\n",
+					rule->cnt_idx);
+				goto error;
+			}
+		}
+	} else {
+		if (rule->enable_stats) {
+			IPAERR_RL(
+				"enable_stats won't support on ipa_hw_type %d\n",
+				ipa3_ctx->ipa_hw_type);
+			goto error;
+		}
+	}
 	return 0;
 
 error:
@@ -845,7 +872,7 @@ static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
 }
 
 static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
-		const struct ipa_flt_rule *rule, struct ipa3_rt_tbl *rt_tbl,
+		const struct ipa_flt_rule_i *rule, struct ipa3_rt_tbl *rt_tbl,
 		struct ipa3_flt_tbl *tbl, bool user)
 {
 	int id;
@@ -870,7 +897,10 @@ static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
 	}
 	(*entry)->rule_id = id;
 	(*entry)->ipacm_installed = user;
-
+	if (rule->enable_stats)
+		(*entry)->cnt_idx = rule->cnt_idx;
+	else
+		(*entry)->cnt_idx = 0;
 	return 0;
 
 rule_id_fail:
@@ -906,7 +936,7 @@ static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
 }
 
 static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
-			      const struct ipa_flt_rule *rule, u8 add_rear,
+			      const struct ipa_flt_rule_i *rule, u8 add_rear,
 			      u32 *rule_hdl, bool user)
 {
 	struct ipa3_flt_entry *entry;
@@ -945,7 +975,7 @@ static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
 }
 
 static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
-				const struct ipa_flt_rule *rule,
+				const struct ipa_flt_rule_i *rule,
 				u32 *rule_hdl,
 				enum ipa_ip_type ip,
 				struct ipa3_flt_entry **add_after_entry)
@@ -1030,7 +1060,7 @@ static int __ipa_del_flt_rule(u32 rule_hdl)
 	return 0;
 }
 
-static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
+static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy_i *frule,
 		enum ipa_ip_type ip)
 {
 	struct ipa3_flt_entry *entry;
@@ -1059,6 +1089,10 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
 		entry->rt_tbl->ref_cnt++;
 	entry->hw_len = 0;
 	entry->prio = 0;
+	if (frule->rule.enable_stats)
+		entry->cnt_idx = frule->rule.cnt_idx;
+	else
+		entry->cnt_idx = 0;
 
 	return 0;
 
@@ -1085,7 +1119,7 @@ static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx)
 }
 
 static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
-				 const struct ipa_flt_rule *rule, u8 add_rear,
+				 const struct ipa_flt_rule_i *rule, u8 add_rear,
 				 u32 *rule_hdl, bool user)
 {
 	struct ipa3_flt_tbl *tbl;
@@ -1111,6 +1145,68 @@ static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
 	return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, user);
 }
 
+static void __ipa_convert_flt_rule_in(struct ipa_flt_rule rule_in,
+	struct ipa_flt_rule_i *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_flt_rule) >
+			sizeof(struct ipa_flt_rule_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_flt_rule_i),
+			sizeof(struct ipa_flt_rule));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_flt_rule_i));
+	memcpy(rule_out, &rule_in, sizeof(struct ipa_flt_rule));
+}
+
+static void __ipa_convert_flt_rule_out(struct ipa_flt_rule_i rule_in,
+	struct ipa_flt_rule *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_flt_rule) >
+			sizeof(struct ipa_flt_rule_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_flt_rule_i),
+			sizeof(struct ipa_flt_rule));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_flt_rule));
+	memcpy(rule_out, &rule_in, sizeof(struct ipa_flt_rule));
+}
+
+static void __ipa_convert_flt_mdfy_in(struct ipa_flt_rule_mdfy rule_in,
+	struct ipa_flt_rule_mdfy_i *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_flt_rule_mdfy) >
+			sizeof(struct ipa_flt_rule_mdfy_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_flt_rule_mdfy),
+			sizeof(struct ipa_flt_rule_mdfy_i));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_flt_rule_mdfy_i));
+	memcpy(&rule_out->rule, &rule_in.rule,
+		sizeof(struct ipa_flt_rule));
+	rule_out->rule_hdl = rule_in.rule_hdl;
+	rule_out->status = rule_in.status;
+}
+
+static void __ipa_convert_flt_mdfy_out(struct ipa_flt_rule_mdfy_i rule_in,
+	struct ipa_flt_rule_mdfy *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_flt_rule_mdfy) >
+			sizeof(struct ipa_flt_rule_mdfy_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_flt_rule_mdfy),
+			sizeof(struct ipa_flt_rule_mdfy_i));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_flt_rule_mdfy));
+	memcpy(&rule_out->rule, &rule_in.rule,
+		sizeof(struct ipa_flt_rule));
+	rule_out->rule_hdl = rule_in.rule_hdl;
+	rule_out->status = rule_in.status;
+}
+
 /**
  * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally
  * commit to IPA HW
@@ -1126,6 +1222,21 @@ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
 }
 
 /**
+ * ipa3_add_flt_rule_v2() - Add the specified filtering rules to
+ * SW and optionally commit to IPA HW
+ * @rules:	[inout] set of filtering rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_flt_rule_v2(struct ipa_ioc_add_flt_rule_v2 *rules)
+{
+	return ipa3_add_flt_rule_usr_v2(rules, false);
+}
+
+
+/**
  * ipa3_add_flt_rule_usr() - Add the specified filtering rules to
  * SW and optionally commit to IPA HW
  * @rules:	[inout] set of filtering rules to add
@@ -1139,6 +1250,75 @@ int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only)
 {
 	int i;
 	int result;
+	struct ipa_flt_rule_i rule;
+
+	if (rules == NULL || rules->num_rules == 0 ||
+			rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (!rules->global) {
+			/* if hashing not supported, all table entry
+			 * are non-hash tables
+			 */
+			if (ipa3_ctx->ipa_fltrt_not_hashable)
+				rules->rules[i].rule.hashable = false;
+			__ipa_convert_flt_rule_in(
+				rules->rules[i].rule, &rule);
+			result = __ipa_add_ep_flt_rule(rules->ip,
+					rules->ep,
+					&rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].flt_rule_hdl,
+					user_only);
+			__ipa_convert_flt_rule_out(rule,
+				&rules->rules[i].rule);
+		} else
+			result = -1;
+		if (result) {
+			IPAERR_RL("failed to add flt rule %d\n", i);
+			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->global) {
+		IPAERR_RL("no support for global filter rules\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_add_flt_rule_usr_v2() - Add the specified filtering
+ * rules to SW and optionally commit to IPA HW
+ * @rules:	[inout] set of filtering rules to add
+ * @user_only:	[in] indicate rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_flt_rule_usr_v2(struct ipa_ioc_add_flt_rule_v2
+	*rules, bool user_only)
+{
+	int i;
+	int result;
 
 	if (rules == NULL || rules->num_rules == 0 ||
 			rules->ip >= IPA_IP_MAX) {
@@ -1153,22 +1333,27 @@ int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only)
 			 * are non-hash tables
 			 */
 			if (ipa3_ctx->ipa_fltrt_not_hashable)
-				rules->rules[i].rule.hashable = false;
+				((struct ipa_flt_rule_add_i *)
+				rules->rules)[i].rule.hashable = false;
 			result = __ipa_add_ep_flt_rule(rules->ip,
-				rules->ep,
-				&rules->rules[i].rule,
-				rules->rules[i].at_rear,
-				&rules->rules[i].flt_rule_hdl,
-				user_only);
-		} else {
+					rules->ep,
+					&(((struct ipa_flt_rule_add_i *)
+					rules->rules)[i].rule),
+					((struct ipa_flt_rule_add_i *)
+					rules->rules)[i].at_rear,
+					&(((struct ipa_flt_rule_add_i *)
+					rules->rules)[i].flt_rule_hdl),
+					user_only);
+		} else
 			result = -1;
-		}
 
 		if (result) {
 			IPAERR_RL("failed to add flt rule %d\n", i);
-			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+			((struct ipa_flt_rule_add_i *)
+			rules->rules)[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
 		} else {
-			rules->rules[i].status = 0;
+			((struct ipa_flt_rule_add_i *)
+			rules->rules)[i].status = 0;
 		}
 	}
 
@@ -1205,6 +1390,7 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
 	struct ipa3_flt_tbl *tbl;
 	int ipa_ep_idx;
 	struct ipa3_flt_entry *entry;
+	struct ipa_flt_rule_i rule;
 
 	if (rules == NULL || rules->num_rules == 0 ||
 			rules->ip >= IPA_IP_MAX) {
@@ -1224,7 +1410,7 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
 		goto bail;
 	}
 
-	if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+	if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES || ipa_ep_idx < 0) {
 		IPAERR_RL("invalid ipa_ep_idx=%u\n", ipa_ep_idx);
 		result = -EINVAL;
 		goto bail;
@@ -1272,12 +1458,19 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
 		/* if hashing not supported, all tables are non-hash tables*/
 		if (ipa3_ctx->ipa_fltrt_not_hashable)
 			rules->rules[i].rule.hashable = false;
+
+		__ipa_convert_flt_rule_in(
+				rules->rules[i].rule, &rule);
+
 		result = __ipa_add_flt_rule_after(tbl,
-				&rules->rules[i].rule,
+				&rule,
 				&rules->rules[i].flt_rule_hdl,
 				rules->ip,
 				&entry);
 
+		__ipa_convert_flt_rule_out(rule,
+				&rules->rules[i].rule);
+
 		if (result) {
 			IPAERR_RL("failed to add flt rule %d\n", i);
 			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
@@ -1300,6 +1493,122 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
 }
 
 /**
+ * ipa3_add_flt_rule_after_v2() - Add the specified filtering
+ *  rules to SW after the rule which its handle is given and
+ *  optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_flt_rule_after_v2(struct ipa_ioc_add_flt_rule_after_v2
+	*rules)
+{
+	int i;
+	int result;
+	struct ipa3_flt_tbl *tbl;
+	int ipa_ep_idx;
+	struct ipa3_flt_entry *entry;
+
+	if (rules == NULL || rules->num_rules == 0 ||
+			rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	if (rules->ep >= IPA_CLIENT_MAX) {
+		IPAERR_RL("bad parms ep=%d\n", rules->ep);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) {
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES ||
+		ipa_ep_idx < 0) {
+		IPAERR_RL("invalid ipa_ep_idx=%u\n", ipa_ep_idx);
+		result = -EINVAL;
+		goto bail;
+	}
+
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip];
+
+	entry = ipa3_id_find(rules->add_after_hdl);
+	if (entry == NULL) {
+		IPAERR_RL("lookup failed\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->cookie != IPA_FLT_COOKIE) {
+		IPAERR_RL("Invalid cookie value =  %u flt hdl id = %d\n",
+			entry->cookie, rules->add_after_hdl);
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->tbl != tbl) {
+		IPAERR_RL("given entry does not match the table\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (tbl->sticky_rear)
+		if (&entry->link == tbl->head_flt_rule_list.prev) {
+			IPAERR_RL("cannot add rule at end of a sticky table");
+			result = -EINVAL;
+			goto bail;
+		}
+
+	IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n",
+			rules->ip, rules->ep, rules->add_after_hdl);
+
+	/*
+	 * we add all rules one after the other, if one insertion fails, it cuts
+	 * the chain (all following will receive fail status) following calls to
+	 * __ipa_add_flt_rule_after will fail (entry == NULL)
+	 */
+
+	for (i = 0; i < rules->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			((struct ipa_flt_rule_add_i *)
+			rules->rules)[i].rule.hashable = false;
+		result = __ipa_add_flt_rule_after(tbl,
+				&(((struct ipa_flt_rule_add_i *)
+				rules->rules)[i].rule),
+				&(((struct ipa_flt_rule_add_i *)
+				rules->rules)[i].flt_rule_hdl),
+				rules->ip,
+				&entry);
+		if (result) {
+			IPAERR_RL("failed to add flt rule %d\n", i);
+			((struct ipa_flt_rule_add_i *)
+			rules->rules)[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+		} else {
+			((struct ipa_flt_rule_add_i *)
+			rules->rules)[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
+			IPAERR("failed to commit flt rules\n");
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
  * ipa3_del_flt_rule() - Remove the specified filtering rules from SW and
  * optionally commit to IPA HW
  *
@@ -1351,6 +1660,7 @@ int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
 {
 	int i;
 	int result;
+	struct ipa_flt_rule_mdfy_i rule;
 
 	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
 		IPAERR_RL("bad parm\n");
@@ -1362,11 +1672,13 @@ int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
 		/* if hashing not supported, all tables are non-hash tables*/
 		if (ipa3_ctx->ipa_fltrt_not_hashable)
 			hdls->rules[i].rule.hashable = false;
-		if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
+		__ipa_convert_flt_mdfy_in(hdls->rules[i], &rule);
+		if (__ipa_mdfy_flt_rule(&rule, hdls->ip)) {
 			IPAERR_RL("failed to mdfy flt rule %i\n", i);
 			hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
 		} else {
 			hdls->rules[i].status = 0;
+			__ipa_convert_flt_mdfy_out(rule, &hdls->rules[i]);
 		}
 	}
 
@@ -1382,6 +1694,52 @@ int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
 	return result;
 }
 
+/**
+ * ipa3_mdfy_flt_rule_v2() - Modify the specified filtering
+ * rules in SW and optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_mdfy_flt_rule_v2(struct ipa_ioc_mdfy_flt_rule_v2 *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			((struct ipa_flt_rule_mdfy_i *)
+			hdls->rules)[i].rule.hashable = false;
+		if (__ipa_mdfy_flt_rule(&(((struct ipa_flt_rule_mdfy_i *)
+			hdls->rules)[i]), hdls->ip)) {
+			IPAERR_RL("failed to mdfy flt rule %i\n", i);
+			((struct ipa_flt_rule_mdfy_i *)
+			hdls->rules)[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
+		} else {
+			((struct ipa_flt_rule_mdfy_i *)
+			hdls->rules)[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
 
 /**
  * ipa3_commit_flt() - Commit the current SW filtering table of specified type
@@ -1492,7 +1850,7 @@ void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx)
 {
 	struct ipa3_flt_tbl *tbl;
 	struct ipa3_ep_context *ep;
-	struct ipa_flt_rule rule;
+	struct ipa_flt_rule_i rule;
 
 	if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
 		IPAERR("invalid ipa_ep_idx=%u\n", ipa_ep_idx);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
index 422f200..8a3ab7b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
@@ -32,21 +32,64 @@ int ipa_hw_stats_init(void)
 		return -ENOMEM;
 	}
 	/* enable prod mask */
-	teth_stats_init->prod_mask = (
-		IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_PROD) |
-		IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD) |
-		IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_PROD));
+	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
+		teth_stats_init->prod_mask = (
+			IPA_CLIENT_BIT_32(IPA_CLIENT_MHI_PRIME_TETH_PROD) |
+			IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD));
+		if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+			teth_stats_init->prod_mask |=
+			IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_PROD);
+		else
+			teth_stats_init->prod_mask |=
+			IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_PROD);
 
-	if (IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_PROD)) {
-		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_PROD);
-		if (ep_index == -1) {
-			IPAERR("Invalid client.\n");
-			kfree(teth_stats_init);
-			return -EINVAL;
+		if (IPA_CLIENT_BIT_32(IPA_CLIENT_MHI_PRIME_TETH_PROD)) {
+			ep_index = ipa3_get_ep_mapping(
+				IPA_CLIENT_MHI_PRIME_TETH_PROD);
+			if (ep_index == -1) {
+				IPAERR("Invalid client.\n");
+				kfree(teth_stats_init);
+				return -EINVAL;
+			}
+			teth_stats_init->dst_ep_mask[ep_index] =
+				IPA_CLIENT_BIT_32(IPA_CLIENT_USB_CONS);
+
+			if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+				teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_CONS);
+			else
+				teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_CONS);
 		}
-		teth_stats_init->dst_ep_mask[ep_index] =
-			(IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_CONS) |
-			IPA_CLIENT_BIT_32(IPA_CLIENT_USB_CONS));
+	} else {
+		teth_stats_init->prod_mask = (
+			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_PROD) |
+			IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD));
+
+		if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+			teth_stats_init->prod_mask |=
+			IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_PROD);
+		else
+			teth_stats_init->prod_mask |=
+			IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_PROD);
+
+		if (IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_PROD)) {
+			ep_index = ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_PROD);
+			if (ep_index == -1) {
+				IPAERR("Invalid client.\n");
+				kfree(teth_stats_init);
+				return -EINVAL;
+			}
+			teth_stats_init->dst_ep_mask[ep_index] =
+			IPA_CLIENT_BIT_32(IPA_CLIENT_USB_CONS);
+
+			if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+				teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_CONS);
+			else
+				teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_CONS);
+		}
 	}
 
 	if (IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD)) {
@@ -56,8 +99,16 @@ int ipa_hw_stats_init(void)
 			kfree(teth_stats_init);
 			return -EINVAL;
 		}
-		teth_stats_init->dst_ep_mask[ep_index] =
-			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
+		/* enable addtional pipe monitoring for pcie modem */
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_1)
+			teth_stats_init->dst_ep_mask[ep_index] =
+				(IPA_CLIENT_BIT_32(
+					IPA_CLIENT_Q6_WAN_CONS) |
+				IPA_CLIENT_BIT_32(
+					IPA_CLIENT_MHI_PRIME_TETH_CONS));
+		else
+			teth_stats_init->dst_ep_mask[ep_index] =
+				IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
 	}
 
 	if (IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_PROD)) {
@@ -67,12 +118,45 @@ int ipa_hw_stats_init(void)
 			kfree(teth_stats_init);
 			return -EINVAL;
 		}
-		teth_stats_init->dst_ep_mask[ep_index] =
-			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
+		/* enable addtional pipe monitoring for pcie modem*/
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_1)
+			teth_stats_init->dst_ep_mask[ep_index] =
+				(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
+				IPA_CLIENT_BIT_32(
+					IPA_CLIENT_MHI_PRIME_TETH_CONS));
+		else
+			teth_stats_init->dst_ep_mask[ep_index] =
+				IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
 	}
 
+	if (IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_PROD)) {
+		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
+		if (ep_index == -1) {
+			IPAERR("Invalid client.\n");
+			kfree(teth_stats_init);
+			return -EINVAL;
+		}
+		/* enable addtional pipe monitoring for pcie modem*/
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_1)
+			teth_stats_init->dst_ep_mask[ep_index] =
+				(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
+				IPA_CLIENT_BIT_32(
+					IPA_CLIENT_MHI_PRIME_TETH_CONS));
+		else
+			teth_stats_init->dst_ep_mask[ep_index] =
+				IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
+	}
+
+
 	ret = ipa_init_teth_stats(teth_stats_init);
+	if (ret != 0)
+		IPAERR("init teth stats fails\n");
 	kfree(teth_stats_init);
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		ret = ipa_init_flt_rt_stats();
+		if (ret != 0)
+			IPAERR("init flt rt stats fails\n");
+	}
 	return ret;
 }
 
@@ -771,122 +855,34 @@ int ipa_reset_all_teth_stats(void)
 	return 0;
 }
 
-int ipa_flt_rt_stats_add_rule_id(enum ipa_ip_type ip, bool filtering,
-	u16 rule_id)
-{
-	int rule_idx, rule_bit;
-	u32 *bmsk_ptr;
-
-	if (!ipa3_ctx->hw_stats.enabled)
-		return 0;
-
-	if (ip < 0 || ip >= IPA_IP_MAX) {
-		IPAERR("wrong ip type %d\n", ip);
-		return -EINVAL;
-	}
-
-	rule_idx = rule_id / 32;
-	rule_bit = rule_id % 32;
-
-	if (rule_idx >= IPAHAL_MAX_RULE_ID_32) {
-		IPAERR("invalid rule_id %d\n", rule_id);
-		return -EINVAL;
-	}
-
-	if (ip == IPA_IP_v4 && filtering)
-		bmsk_ptr =
-			ipa3_ctx->hw_stats.flt_rt.flt_v4_init.rule_id_bitmask;
-	else if (ip == IPA_IP_v4)
-		bmsk_ptr =
-			ipa3_ctx->hw_stats.flt_rt.rt_v4_init.rule_id_bitmask;
-	else if (ip == IPA_IP_v6 && filtering)
-		bmsk_ptr =
-			ipa3_ctx->hw_stats.flt_rt.flt_v6_init.rule_id_bitmask;
-	else
-		bmsk_ptr =
-			ipa3_ctx->hw_stats.flt_rt.rt_v6_init.rule_id_bitmask;
-
-	bmsk_ptr[rule_idx] |= (1 << rule_bit);
-
-	return 0;
-}
-
-int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering)
+int ipa_init_flt_rt_stats(void)
 {
 	struct ipahal_stats_init_pyld *pyld;
-	int smem_ofst, smem_size, stats_base, start_id_ofst, end_id_ofst;
-	int start_id, end_id;
-	struct ipahal_stats_init_flt_rt *init;
+	int smem_ofst, smem_size;
+	int stats_base_flt_v4, stats_base_flt_v6;
+	int stats_base_rt_v4, stats_base_rt_v6;
 	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
 	struct ipahal_imm_cmd_pyld *cmd_pyld;
-	struct ipahal_imm_cmd_register_write flt_rt_base = {0};
-	struct ipahal_imm_cmd_pyld *flt_rt_base_pyld;
-	struct ipahal_imm_cmd_register_write flt_rt_start_id = {0};
-	struct ipahal_imm_cmd_pyld *flt_rt_start_id_pyld;
-	struct ipahal_imm_cmd_register_write flt_rt_end_id = { 0 };
-	struct ipahal_imm_cmd_pyld *flt_rt_end_id_pyld;
-	struct ipa3_desc desc[4] = { {0} };
+	struct ipahal_imm_cmd_register_write flt_v4_base = {0};
+	struct ipahal_imm_cmd_pyld *flt_v4_base_pyld;
+	struct ipahal_imm_cmd_register_write flt_v6_base = {0};
+	struct ipahal_imm_cmd_pyld *flt_v6_base_pyld;
+	struct ipahal_imm_cmd_register_write rt_v4_base = {0};
+	struct ipahal_imm_cmd_pyld *rt_v4_base_pyld;
+	struct ipahal_imm_cmd_register_write rt_v6_base = {0};
+	struct ipahal_imm_cmd_pyld *rt_v6_base_pyld;
+	struct ipa3_desc desc[5] = { {0} };
 	dma_addr_t dma_address;
 	int ret;
 
 	if (!ipa3_ctx->hw_stats.enabled)
 		return 0;
 
-	if (ip == IPA_IP_v4 && filtering) {
-		init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
-		smem_ofst = IPA_MEM_PART(stats_flt_v4_ofst);
-		smem_size = IPA_MEM_PART(stats_flt_v4_size);
-		stats_base = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_BASE);
-		start_id_ofst =
-			ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_START_ID);
-		end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_END_ID);
-	} else if (ip == IPA_IP_v4) {
-		init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
-		smem_ofst = IPA_MEM_PART(stats_rt_v4_ofst);
-		smem_size = IPA_MEM_PART(stats_rt_v4_size);
-		stats_base = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_BASE);
-		start_id_ofst =
-			ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_START_ID);
-		end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_END_ID);
-	} else if (ip == IPA_IP_v6 && filtering) {
-		init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
-		smem_ofst = IPA_MEM_PART(stats_flt_v6_ofst);
-		smem_size = IPA_MEM_PART(stats_flt_v6_size);
-		stats_base = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_BASE);
-		start_id_ofst =
-			ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_START_ID);
-		end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_END_ID);
-	} else {
-		init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
-		smem_ofst = IPA_MEM_PART(stats_rt_v6_ofst);
-		smem_size = IPA_MEM_PART(stats_rt_v6_size);
-		stats_base = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_BASE);
-		start_id_ofst =
-			ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_START_ID);
-		end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_END_ID);
-	}
+	smem_ofst = IPA_MEM_PART(stats_fnr_ofst);
+	smem_size = IPA_MEM_PART(stats_fnr_size);
 
-	for (start_id = 0; start_id < IPAHAL_MAX_RULE_ID_32; start_id++) {
-		if (init->rule_id_bitmask[start_id])
-			break;
-	}
-
-	if (start_id == IPAHAL_MAX_RULE_ID_32) {
-		IPAERR("empty rule ids\n");
-		return -EINVAL;
-	}
-
-	/* every rule_id_bitmask contains 32 rules */
-	start_id *= 32;
-
-	for (end_id = IPAHAL_MAX_RULE_ID_32 - 1; end_id >= 0; end_id--) {
-		if (init->rule_id_bitmask[end_id])
-			break;
-	}
-	end_id = (end_id + 1) * 32 - 1;
-
-	pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_FNR, init,
-		false);
+	pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_FNR,
+		(void *)(uintptr_t)(IPA_MAX_FLT_RT_CNT_INDEX), false);
 	if (!pyld) {
 		IPAERR("failed to generate pyld\n");
 		return -EPERM;
@@ -909,59 +905,89 @@ int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering)
 		goto destroy_init_pyld;
 	}
 
+	stats_base_flt_v4 = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_BASE);
+	stats_base_flt_v6 = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_BASE);
+	stats_base_rt_v4 = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_BASE);
+	stats_base_rt_v6 = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_BASE);
+
 	/* setting the registers and init the stats pyld are done atomically */
-	flt_rt_start_id.skip_pipeline_clear = false;
-	flt_rt_start_id.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
-	flt_rt_start_id.offset = start_id_ofst;
-	flt_rt_start_id.value = start_id;
-	flt_rt_start_id.value_mask = 0x3FF;
-	flt_rt_start_id_pyld = ipahal_construct_imm_cmd(
-		IPA_IMM_CMD_REGISTER_WRITE, &flt_rt_start_id, false);
-	if (!flt_rt_start_id_pyld) {
+	/* set IPA_STAT_FILTER_IPV4_BASE */
+	flt_v4_base.skip_pipeline_clear = false;
+	flt_v4_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	flt_v4_base.offset = stats_base_flt_v4;
+	flt_v4_base.value = ipa3_ctx->smem_restricted_bytes +
+		smem_ofst;
+	flt_v4_base.value_mask = ~0;
+	flt_v4_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&flt_v4_base, false);
+	if (!flt_v4_base_pyld) {
 		IPAERR("failed to construct register_write imm cmd\n");
 		ret = -ENOMEM;
 		goto unmap;
 	}
-	desc[0].opcode = flt_rt_start_id_pyld->opcode;
-	desc[0].pyld = flt_rt_start_id_pyld->data;
-	desc[0].len = flt_rt_start_id_pyld->len;
+	desc[0].opcode = flt_v4_base_pyld->opcode;
+	desc[0].pyld = flt_v4_base_pyld->data;
+	desc[0].len = flt_v4_base_pyld->len;
 	desc[0].type = IPA_IMM_CMD_DESC;
 
-	flt_rt_end_id.skip_pipeline_clear = false;
-	flt_rt_end_id.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
-	flt_rt_end_id.offset = end_id_ofst;
-	flt_rt_end_id.value = end_id;
-	flt_rt_end_id.value_mask = 0x3FF;
-	flt_rt_end_id_pyld = ipahal_construct_imm_cmd(
-		IPA_IMM_CMD_REGISTER_WRITE, &flt_rt_end_id, false);
-	if (!flt_rt_end_id_pyld) {
+	/* set IPA_STAT_FILTER_IPV6_BASE */
+	flt_v6_base.skip_pipeline_clear = false;
+	flt_v6_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	flt_v6_base.offset = stats_base_flt_v6;
+	flt_v6_base.value = ipa3_ctx->smem_restricted_bytes +
+		smem_ofst;
+	flt_v6_base.value_mask = ~0;
+	flt_v6_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&flt_v6_base, false);
+	if (!flt_v6_base_pyld) {
 		IPAERR("failed to construct register_write imm cmd\n");
 		ret = -ENOMEM;
-		goto destroy_flt_rt_start_id;
+		goto destroy_flt_v4_base;
 	}
-	desc[1].opcode = flt_rt_end_id_pyld->opcode;
-	desc[1].pyld = flt_rt_end_id_pyld->data;
-	desc[1].len = flt_rt_end_id_pyld->len;
+	desc[1].opcode = flt_v6_base_pyld->opcode;
+	desc[1].pyld = flt_v6_base_pyld->data;
+	desc[1].len = flt_v6_base_pyld->len;
 	desc[1].type = IPA_IMM_CMD_DESC;
 
-	flt_rt_base.skip_pipeline_clear = false;
-	flt_rt_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
-	flt_rt_base.offset = stats_base;
-	flt_rt_base.value = ipa3_ctx->smem_restricted_bytes +
+
+	/* set IPA_STAT_ROUTER_IPV4_BASE */
+	rt_v4_base.skip_pipeline_clear = false;
+	rt_v4_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	rt_v4_base.offset = stats_base_rt_v4;
+	rt_v4_base.value = ipa3_ctx->smem_restricted_bytes +
 		smem_ofst;
-	flt_rt_base.value_mask = ~0;
-	flt_rt_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
-		&flt_rt_base, false);
-	if (!flt_rt_base_pyld) {
+	rt_v4_base.value_mask = ~0;
+	rt_v4_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&rt_v4_base, false);
+	if (!rt_v4_base_pyld) {
 		IPAERR("failed to construct register_write imm cmd\n");
 		ret = -ENOMEM;
-		goto destroy_flt_rt_end_id;
+		goto destroy_flt_v6_base;
 	}
-	desc[2].opcode = flt_rt_base_pyld->opcode;
-	desc[2].pyld = flt_rt_base_pyld->data;
-	desc[2].len = flt_rt_base_pyld->len;
+	desc[2].opcode = rt_v4_base_pyld->opcode;
+	desc[2].pyld = rt_v4_base_pyld->data;
+	desc[2].len = rt_v4_base_pyld->len;
 	desc[2].type = IPA_IMM_CMD_DESC;
 
+	/* set IPA_STAT_ROUTER_IPV6_BASE */
+	rt_v6_base.skip_pipeline_clear = false;
+	rt_v6_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	rt_v6_base.offset = stats_base_rt_v6;
+	rt_v6_base.value = ipa3_ctx->smem_restricted_bytes +
+		smem_ofst;
+	rt_v6_base.value_mask = ~0;
+	rt_v6_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&rt_v6_base, false);
+	if (!rt_v6_base_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_rt_v4_base;
+	}
+	desc[3].opcode = rt_v6_base_pyld->opcode;
+	desc[3].pyld = rt_v6_base_pyld->data;
+	desc[3].len = rt_v6_base_pyld->len;
+	desc[3].type = IPA_IMM_CMD_DESC;
+
 	cmd.is_read = false;
 	cmd.skip_pipeline_clear = false;
 	cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
@@ -974,14 +1000,14 @@ int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering)
 	if (!cmd_pyld) {
 		IPAERR("failed to construct dma_shared_mem imm cmd\n");
 		ret = -ENOMEM;
-		goto destroy_flt_rt_base;
+		goto destroy_rt_v6_base;
 	}
-	desc[3].opcode = cmd_pyld->opcode;
-	desc[3].pyld = cmd_pyld->data;
-	desc[3].len = cmd_pyld->len;
-	desc[3].type = IPA_IMM_CMD_DESC;
+	desc[4].opcode = cmd_pyld->opcode;
+	desc[4].pyld = cmd_pyld->data;
+	desc[4].len = cmd_pyld->len;
+	desc[4].type = IPA_IMM_CMD_DESC;
 
-	ret = ipa3_send_cmd(4, desc);
+	ret = ipa3_send_cmd(5, desc);
 	if (ret) {
 		IPAERR("failed to send immediate command (error %d)\n", ret);
 		goto destroy_imm;
@@ -991,12 +1017,14 @@ int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering)
 
 destroy_imm:
 	ipahal_destroy_imm_cmd(cmd_pyld);
-destroy_flt_rt_base:
-	ipahal_destroy_imm_cmd(flt_rt_base_pyld);
-destroy_flt_rt_end_id:
-	ipahal_destroy_imm_cmd(flt_rt_end_id_pyld);
-destroy_flt_rt_start_id:
-	ipahal_destroy_imm_cmd(flt_rt_start_id_pyld);
+destroy_rt_v6_base:
+	ipahal_destroy_imm_cmd(rt_v6_base_pyld);
+destroy_rt_v4_base:
+	ipahal_destroy_imm_cmd(rt_v4_base_pyld);
+destroy_flt_v6_base:
+	ipahal_destroy_imm_cmd(flt_v6_base_pyld);
+destroy_flt_v4_base:
+	ipahal_destroy_imm_cmd(flt_v4_base_pyld);
 unmap:
 	dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
 destroy_init_pyld:
@@ -1004,55 +1032,17 @@ int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering)
 	return ret;
 }
 
-int ipa_flt_rt_stats_clear_rule_ids(enum ipa_ip_type ip, bool filtering)
-{
-	struct ipahal_stats_init_flt_rt *init;
-	int i;
-
-	if (!ipa3_ctx->hw_stats.enabled)
-		return 0;
-
-	if (ip < 0 || ip >= IPA_IP_MAX) {
-		IPAERR("wrong ip type %d\n", ip);
-		return -EINVAL;
-	}
-
-	if (ip == IPA_IP_v4 && filtering)
-		init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
-	else if (ip == IPA_IP_v4)
-		init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
-	else if (ip == IPA_IP_v6 && filtering)
-		init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
-	else
-		init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
-
-	for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++)
-		init->rule_id_bitmask[i] = 0;
-
-	return 0;
-}
-
-static int __ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering,
-	u16 rule_id, struct ipa_flt_rt_stats *out)
+static int __ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query)
 {
 	int ret;
 	int smem_ofst;
-	bool clear = false;
-	struct ipahal_stats_get_offset_flt_rt *get_offset;
+	bool clear = query->reset;
+	struct ipahal_stats_get_offset_flt_rt_v4_5 *get_offset;
 	struct ipahal_stats_offset offset = { 0 };
 	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
 	struct ipahal_imm_cmd_pyld *cmd_pyld;
 	struct ipa_mem_buffer mem;
 	struct ipa3_desc desc = { 0 };
-	struct ipahal_stats_flt_rt stats;
-
-	if (rule_id >= IPAHAL_MAX_RULE_ID_32 * 32) {
-		IPAERR("invalid rule_id %d\n", rule_id);
-		return -EINVAL;
-	}
-
-	if (out == NULL)
-		clear = true;
 
 	get_offset = kzalloc(sizeof(*get_offset), GFP_KERNEL);
 	if (!get_offset) {
@@ -1060,21 +1050,10 @@ static int __ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering,
 		return -ENOMEM;
 	}
 
-	if (ip == IPA_IP_v4 && filtering) {
-		get_offset->init = ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
-		smem_ofst = IPA_MEM_PART(stats_flt_v4_ofst);
-	} else if (ip == IPA_IP_v4) {
-		get_offset->init = ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
-		smem_ofst = IPA_MEM_PART(stats_rt_v4_ofst);
-	} else if (ip == IPA_IP_v6 && filtering) {
-		get_offset->init = ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
-		smem_ofst = IPA_MEM_PART(stats_flt_v6_ofst);
-	} else {
-		get_offset->init = ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
-		smem_ofst = IPA_MEM_PART(stats_rt_v6_ofst);
-	}
+	smem_ofst = IPA_MEM_PART(stats_fnr_ofst);
 
-	get_offset->rule_id = rule_id;
+	get_offset->start_id = query->start_id;
+	get_offset->end_id = query->end_id;
 
 	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_FNR, get_offset,
 		&offset);
@@ -1083,7 +1062,7 @@ static int __ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering,
 		goto free_offset;
 	}
 
-	IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
+	IPADBG("offset = %d size = %d\n", offset.offset, offset.size);
 
 	if (offset.size == 0) {
 		ret = 0;
@@ -1127,15 +1106,138 @@ static int __ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering,
 	}
 
 	ret = ipahal_parse_stats(IPAHAL_HW_STATS_FNR,
-		&get_offset->init, mem.base, &stats);
+		NULL, mem.base, query);
 	if (ret) {
 		IPAERR("failed to parse stats (error %d)\n", ret);
 		goto destroy_imm;
 	}
+	ret = 0;
 
-	if (out) {
-		out->num_pkts = stats.num_packets;
-		out->num_pkts_hash = stats.num_packets_hash;
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+free_dma_mem:
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+free_offset:
+	kfree(get_offset);
+	return ret;
+}
+
+int ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query)
+{
+	if (!ipa3_ctx->hw_stats.enabled) {
+		IPAERR("hw_stats is not enabled\n");
+		return 0;
+	}
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		IPAERR("FnR stats not supported in %d hw_type\n",
+			ipa3_ctx->ipa_hw_type);
+		return 0;
+	}
+
+	if (query->start_id == 0 || query->end_id == 0) {
+		IPAERR("Invalid start_id/end_id, must be not 0\n");
+		IPAERR("start_id %d, end_id %d\n",
+			query->start_id, query->end_id);
+		return -EINVAL;
+	}
+
+	if (query->start_id > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("start_cnt_id %d out of range\n", query->start_id);
+		return -EINVAL;
+	}
+
+	if (query->end_id > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("end_cnt_id %d out of range\n", query->end_id);
+		return -EINVAL;
+	}
+
+	if (query->end_id < query->start_id) {
+		IPAERR("end_id %d < start_id %d\n",
+			query->end_id, query->start_id);
+		return -EINVAL;
+	}
+
+	if (query->stats_size > sizeof(struct ipa_flt_rt_stats)) {
+		IPAERR("stats_size %d > ipa_flt_rt_stats %d\n",
+			query->stats_size, sizeof(struct ipa_flt_rt_stats));
+		return -EINVAL;
+	}
+
+	return __ipa_get_flt_rt_stats(query);
+}
+
+
+static int __ipa_set_flt_rt_stats(int index, struct ipa_flt_rt_stats stats)
+{
+	int ret;
+	int smem_ofst;
+	struct ipahal_stats_get_offset_flt_rt_v4_5 *get_offset;
+	struct ipahal_stats_offset offset = { 0 };
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipa_mem_buffer mem;
+	struct ipa3_desc desc = { 0 };
+
+	get_offset = kzalloc(sizeof(*get_offset), GFP_KERNEL);
+	if (!get_offset) {
+		IPADBG("no mem\n");
+		return -ENOMEM;
+	}
+
+	smem_ofst = IPA_MEM_PART(stats_fnr_ofst);
+
+	get_offset->start_id = index;
+	get_offset->end_id = index;
+
+	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_FNR, get_offset,
+		&offset);
+	if (ret) {
+		IPAERR("failed to get offset from hal %d\n", ret);
+		goto free_offset;
+	}
+
+	IPADBG("offset = %d size = %d\n", offset.offset, offset.size);
+
+	if (offset.size == 0) {
+		ret = 0;
+		goto free_offset;
+	}
+
+	mem.size = offset.size;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+		mem.size,
+		&mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA memory\n");
+		goto free_offset;
+	}
+	ipahal_set_flt_rt_sw_stats(mem.base, stats);
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		smem_ofst + offset.offset;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto free_dma_mem;
+	}
+	desc.opcode = cmd_pyld->opcode;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	desc.type = IPA_IMM_CMD_DESC;
+
+	ret = ipa3_send_cmd(1, &desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
 	}
 
 	ret = 0;
@@ -1147,68 +1249,33 @@ static int __ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering,
 free_offset:
 	kfree(get_offset);
 	return ret;
-
 }
 
-
-int ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id,
-	struct ipa_flt_rt_stats *out)
+int ipa_set_flt_rt_stats(int index, struct ipa_flt_rt_stats stats)
 {
-	if (!ipa3_ctx->hw_stats.enabled)
+	if (!ipa3_ctx->hw_stats.enabled) {
+		IPAERR("hw_stats is not enabled\n");
 		return 0;
+	}
 
-	if (ip < 0 || ip >= IPA_IP_MAX) {
-		IPAERR("wrong ip type %d\n", ip);
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		IPAERR("FnR stats not supported in %d hw_type\n",
+			ipa3_ctx->ipa_hw_type);
+		return 0;
+	}
+
+	if (index > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("index %d out of range\n", index);
 		return -EINVAL;
 	}
 
-	return __ipa_get_flt_rt_stats(ip, filtering, rule_id, out);
-}
-
-int ipa_reset_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id)
-{
-	if (!ipa3_ctx->hw_stats.enabled)
-		return 0;
-
-	if (ip < 0 || ip >= IPA_IP_MAX) {
-		IPAERR("wrong ip type %d\n", ip);
+	if (index <= IPA_FLT_RT_HW_COUNTER) {
+		IPAERR("index %d invalid, only support sw counter set\n",
+			index);
 		return -EINVAL;
 	}
 
-	return __ipa_get_flt_rt_stats(ip, filtering, rule_id, NULL);
-}
-
-int ipa_reset_all_flt_rt_stats(enum ipa_ip_type ip, bool filtering)
-{
-	struct ipahal_stats_init_flt_rt *init;
-	int i;
-
-	if (!ipa3_ctx->hw_stats.enabled)
-		return 0;
-
-	if (ip < 0 || ip >= IPA_IP_MAX) {
-		IPAERR("wrong ip type %d\n", ip);
-		return -EINVAL;
-	}
-
-	if (ip == IPA_IP_v4 && filtering)
-		init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
-	else if (ip == IPA_IP_v4)
-		init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
-	else if (ip == IPA_IP_v6 && filtering)
-		init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
-	else
-		init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
-
-	for (i = 0; i < IPAHAL_MAX_RULE_ID_32 * 32; i++) {
-		int idx = i / 32;
-		int bit = i % 32;
-
-		if (init->rule_id_bitmask[idx] & (1 << bit))
-			__ipa_get_flt_rt_stats(ip, filtering, i, NULL);
-	}
-
-	return 0;
+	return __ipa_set_flt_rt_stats(index, stats);
 }
 
 int ipa_init_drop_stats(u32 pipe_bitmask)
@@ -1692,14 +1759,27 @@ static ssize_t ipa_debugfs_print_tethering_stats(struct file *file,
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
 }
 
-static ssize_t ipa_debugfs_control_flt_rt_stats(enum ipa_ip_type ip,
-	bool filtering, struct file *file,
+static ssize_t ipa_debugfs_control_flt_rt_stats(struct file *file,
 	const char __user *ubuf, size_t count, loff_t *ppos)
 {
+	struct ipa_ioc_flt_rt_query *query;
 	unsigned long missing;
-	u16 rule_id = 0;
+	int pyld_size = 0;
 	int ret;
 
+	query = kzalloc(sizeof(struct ipa_ioc_flt_rt_query),
+		GFP_KERNEL);
+	if (!query)
+		return -ENOMEM;
+	query->stats_size = sizeof(struct ipa_flt_rt_stats);
+	pyld_size = IPA_MAX_FLT_RT_CNT_INDEX *
+		sizeof(struct ipa_flt_rt_stats);
+	query->stats = (uint64_t)kzalloc(pyld_size, GFP_KERNEL);
+	if (!query->stats) {
+		kfree(query);
+		return -ENOMEM;
+	}
+
 	mutex_lock(&ipa3_ctx->lock);
 	if (count >= sizeof(dbg_buff)) {
 		ret = -EFAULT;
@@ -1713,76 +1793,81 @@ static ssize_t ipa_debugfs_control_flt_rt_stats(enum ipa_ip_type ip,
 	}
 
 	dbg_buff[count] = '\0';
-	if (strcmp(dbg_buff, "start\n") == 0) {
-		ipa_flt_rt_stats_start(ip, filtering);
-	} else if (strcmp(dbg_buff, "clear\n") == 0) {
-		ipa_flt_rt_stats_clear_rule_ids(ip, filtering);
-	} else if (strcmp(dbg_buff, "reset\n") == 0) {
-		ipa_reset_all_flt_rt_stats(ip, filtering);
+	if (strcmp(dbg_buff, "reset\n") == 0) {
+		query->reset = 1;
+		query->start_id = 1;
+		query->end_id = IPA_MAX_FLT_RT_CNT_INDEX;
+		ipa_get_flt_rt_stats(query);
 	} else {
-		if (kstrtou16(dbg_buff, 0, &rule_id)) {
-			ret = -EFAULT;
-			goto bail;
-		}
-		ipa_flt_rt_stats_add_rule_id(ip, filtering, rule_id);
+		IPAERR("unsupport flt_rt command\n");
 	}
 
 	ret = count;
 bail:
+	kfree((void *)(uintptr_t)(query->stats));
+	kfree(query);
 	mutex_unlock(&ipa3_ctx->lock);
 	return ret;
 }
 
-static ssize_t ipa_debugfs_print_flt_rt_stats(enum ipa_ip_type ip,
-	bool filtering, struct file *file,
+static ssize_t ipa_debugfs_print_flt_rt_stats(struct file *file,
 	char __user *ubuf, size_t count, loff_t *ppos)
 {
 	int nbytes = 0;
-	struct ipahal_stats_init_flt_rt *init;
-	struct ipa_flt_rt_stats out;
 	int i;
 	int res;
+	int pyld_size = 0;
+	struct ipa_ioc_flt_rt_query *query;
 
-	if (ip == IPA_IP_v4 && filtering)
-		init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
-	else if (ip == IPA_IP_v4)
-		init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
-	else if (ip == IPA_IP_v6 && filtering)
-		init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
-	else
-		init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
-
-	mutex_lock(&ipa3_ctx->lock);
-	for (i = 0; i < IPAHAL_MAX_RULE_ID_32 * 32; i++) {
-		int idx = i / 32;
-		int bit = i % 32;
-
-		if (init->rule_id_bitmask[idx] & (1 << bit)) {
-			res = ipa_get_flt_rt_stats(ip, filtering, i, &out);
-			if (res) {
-				mutex_unlock(&ipa3_ctx->lock);
-				return res;
-			}
-
-			nbytes += scnprintf(dbg_buff + nbytes,
-				IPA_MAX_MSG_LEN - nbytes,
-				"rule_id: %d\n", i);
-			nbytes += scnprintf(dbg_buff + nbytes,
-				IPA_MAX_MSG_LEN - nbytes,
-				"num_pkts: %d\n",
-				out.num_pkts);
-			nbytes += scnprintf(dbg_buff + nbytes,
-				IPA_MAX_MSG_LEN - nbytes,
-				"num_pkts_hash: %d\n",
-				out.num_pkts_hash);
-			nbytes += scnprintf(dbg_buff + nbytes,
-				IPA_MAX_MSG_LEN - nbytes,
-				"\n");
-		}
+	query = kzalloc(sizeof(struct ipa_ioc_flt_rt_query),
+		GFP_KERNEL);
+	if (!query)
+		return -ENOMEM;
+	query->start_id = 1;
+	query->end_id = IPA_MAX_FLT_RT_CNT_INDEX;
+	query->reset = true;
+	query->stats_size = sizeof(struct ipa_flt_rt_stats);
+	pyld_size = IPA_MAX_FLT_RT_CNT_INDEX *
+		sizeof(struct ipa_flt_rt_stats);
+	query->stats = (uint64_t)kzalloc(pyld_size, GFP_KERNEL);
+	if (!query->stats) {
+		kfree(query);
+		return -ENOMEM;
 	}
-
+	mutex_lock(&ipa3_ctx->lock);
+	res = ipa_get_flt_rt_stats(query);
+	if (res) {
+		mutex_unlock(&ipa3_ctx->lock);
+		kfree((void *)(uintptr_t)(query->stats));
+		kfree(query);
+		return res;
+	}
+	for (i = 0; i < IPA_MAX_FLT_RT_CNT_INDEX; i++) {
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"cnt_id: %d\n", i + 1);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_pkts: %d\n",
+			((struct ipa_flt_rt_stats *)
+			query->stats)[i].num_pkts);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_pkts_hash: %d\n",
+			((struct ipa_flt_rt_stats *)
+			query->stats)[i].num_pkts_hash);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_bytes: %lld\n",
+			((struct ipa_flt_rt_stats *)
+			query->stats)[i].num_bytes);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"\n");
+	}
 	mutex_unlock(&ipa3_ctx->lock);
-
+	kfree((void *)(uintptr_t)(query->stats));
+	kfree(query);
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
 }
 
@@ -1870,62 +1955,6 @@ static ssize_t ipa_debugfs_print_drop_stats(struct file *file,
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
 }
 
-static ssize_t ipa_debugfs_control_flt_v4_stats(struct file *file,
-	const char __user *ubuf, size_t count, loff_t *ppos)
-{
-	return ipa_debugfs_control_flt_rt_stats(IPA_IP_v4, true, file, ubuf,
-		count, ppos);
-}
-
-static ssize_t ipa_debugfs_control_flt_v6_stats(struct file *file,
-	const char __user *ubuf, size_t count, loff_t *ppos)
-{
-	return ipa_debugfs_control_flt_rt_stats(IPA_IP_v6, true, file, ubuf,
-		count, ppos);
-}
-
-static ssize_t ipa_debugfs_control_rt_v4_stats(struct file *file,
-	const char __user *ubuf, size_t count, loff_t *ppos)
-{
-	return ipa_debugfs_control_flt_rt_stats(IPA_IP_v4, false, file, ubuf,
-		count, ppos);
-}
-
-static ssize_t ipa_debugfs_control_rt_v6_stats(struct file *file,
-	const char __user *ubuf, size_t count, loff_t *ppos)
-{
-	return ipa_debugfs_control_flt_rt_stats(IPA_IP_v6, false, file, ubuf,
-		count, ppos);
-}
-
-static ssize_t ipa_debugfs_print_flt_v4_stats(struct file *file,
-	char __user *ubuf, size_t count, loff_t *ppos)
-{
-	return ipa_debugfs_print_flt_rt_stats(IPA_IP_v4, true, file, ubuf,
-		count, ppos);
-}
-
-static ssize_t ipa_debugfs_print_flt_v6_stats(struct file *file,
-	char __user *ubuf, size_t count, loff_t *ppos)
-{
-	return ipa_debugfs_print_flt_rt_stats(IPA_IP_v6, true, file, ubuf,
-		count, ppos);
-}
-
-static ssize_t ipa_debugfs_print_rt_v4_stats(struct file *file,
-	char __user *ubuf, size_t count, loff_t *ppos)
-{
-	return ipa_debugfs_print_flt_rt_stats(IPA_IP_v4, false, file, ubuf,
-		count, ppos);
-}
-
-static ssize_t ipa_debugfs_print_rt_v6_stats(struct file *file,
-	char __user *ubuf, size_t count, loff_t *ppos)
-{
-	return ipa_debugfs_print_flt_rt_stats(IPA_IP_v6, false, file, ubuf,
-		count, ppos);
-}
-
 static const struct file_operations ipa3_quota_ops = {
 	.read = ipa_debugfs_print_quota_stats,
 	.write = ipa_debugfs_reset_quota_stats,
@@ -1936,24 +1965,9 @@ static const struct file_operations ipa3_tethering_ops = {
 	.write = ipa_debugfs_reset_tethering_stats,
 };
 
-static const struct file_operations ipa3_flt_v4_ops = {
-	.read = ipa_debugfs_print_flt_v4_stats,
-	.write = ipa_debugfs_control_flt_v4_stats,
-};
-
-static const struct file_operations ipa3_flt_v6_ops = {
-	.read = ipa_debugfs_print_flt_v6_stats,
-	.write = ipa_debugfs_control_flt_v6_stats,
-};
-
-static const struct file_operations ipa3_rt_v4_ops = {
-	.read = ipa_debugfs_print_rt_v4_stats,
-	.write = ipa_debugfs_control_rt_v4_stats,
-};
-
-static const struct file_operations ipa3_rt_v6_ops = {
-	.read = ipa_debugfs_print_rt_v6_stats,
-	.write = ipa_debugfs_control_rt_v6_stats,
+static const struct file_operations ipa3_flt_rt_ops = {
+	.read = ipa_debugfs_print_flt_rt_stats,
+	.write = ipa_debugfs_control_flt_rt_stats,
 };
 
 static const struct file_operations ipa3_drop_ops = {
@@ -1998,31 +2012,10 @@ int ipa_debugfs_init_stats(struct dentry *parent)
 		goto fail;
 	}
 
-	file = debugfs_create_file("flt_v4", read_write_mode, dent, NULL,
-		&ipa3_flt_v4_ops);
+	file = debugfs_create_file("flt_rt", read_write_mode, dent, NULL,
+		&ipa3_flt_rt_ops);
 	if (IS_ERR_OR_NULL(file)) {
-		IPAERR("fail to create file %s\n", "flt_v4");
-		goto fail;
-	}
-
-	file = debugfs_create_file("flt_v6", read_write_mode, dent, NULL,
-		&ipa3_flt_v6_ops);
-	if (IS_ERR_OR_NULL(file)) {
-		IPAERR("fail to create file %s\n", "flt_v6");
-		goto fail;
-	}
-
-	file = debugfs_create_file("rt_v4", read_write_mode, dent, NULL,
-		&ipa3_rt_v4_ops);
-	if (IS_ERR_OR_NULL(file)) {
-		IPAERR("fail to create file %s\n", "rt_v4");
-		goto fail;
-	}
-
-	file = debugfs_create_file("rt_v6", read_write_mode, dent, NULL,
-		&ipa3_rt_v6_ops);
-	if (IS_ERR_OR_NULL(file)) {
-		IPAERR("fail to create file %s\n", "rt_v6");
+		IPAERR("fail to create file flt_rt\n");
 		goto fail;
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 008e90a..a9f4616 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -31,8 +31,10 @@
 #include "../ipa_common_i.h"
 #include "ipa_uc_offload_i.h"
 #include "ipa_pm.h"
+#include "ipa_defs.h"
 #include <linux/mailbox_client.h>
 #include <linux/mailbox/qmp.h>
+#include <linux/rmnet_ipa_fd_ioctl.h>
 
 #define IPA_DEV_NAME_MAX_LEN 15
 #define DRV_NAME "ipa"
@@ -115,6 +117,13 @@
 		} \
 	} while (0)
 
+#define IPALOG_VnP_ADDRS(ptr) \
+	do { \
+		phys_addr_t b = (phys_addr_t) virt_to_phys(ptr); \
+		IPAERR("%s: VIRT: %pK PHYS: %pa\n", \
+			   #ptr, ptr, &b); \
+	} while (0)
+
 /* round addresses for closes page per SMMU requirements */
 #define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
 	do { \
@@ -251,7 +260,7 @@ enum {
 
 #define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
 
-#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
+#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 4096
 
 #define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
 #define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
@@ -451,6 +460,96 @@ struct ipa_smmu_cb_ctx {
 	u32 va_size;
 	u32 va_end;
 	bool shared;
+	bool is_cache_coherent;
+};
+
+/**
+ * struct ipa_flt_rule_add_i - filtering rule descriptor
+ * includes in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of filtering table?
+ * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of filtering rule add   operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_add_i {
+	u8 at_rear;
+	u32 flt_rule_hdl;
+	int status;
+	struct ipa_flt_rule_i rule;
+};
+
+/**
+ * struct ipa_flt_rule_mdfy_i - filtering rule descriptor
+ * includes in and out parameters
+ * @rule: actual rule to be added
+ * @flt_rule_hdl: handle to rule
+ * @status:	output parameter, status of filtering rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_mdfy_i {
+	u32 rule_hdl;
+	int status;
+	struct ipa_flt_rule_i rule;
+};
+
+/**
+ * struct ipa_rt_rule_add_i - routing rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add_i {
+	u8 at_rear;
+	u32 rt_rule_hdl;
+	int status;
+	struct ipa_rt_rule_i rule;
+};
+
+/**
+ * struct ipa_rt_rule_mdfy_i - routing rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @rt_rule_hdl: handle to rule which supposed to modify
+ * @status:	output parameter, status of routing rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_rt_rule_mdfy_i {
+	u32 rt_rule_hdl;
+	int status;
+	struct ipa_rt_rule_i rule;
+};
+
+/**
+ * struct ipa_rt_rule_add_ext_i - routing rule descriptor
+ * includes in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ * @rule_id: rule_id to be assigned to the routing rule. In case client
+ *  specifies rule_id as 0 the driver will assign a new rule_id
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add_ext_i {
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+	uint16_t rule_id;
+	struct ipa_rt_rule_i rule;
 };
 
 /**
@@ -465,18 +564,20 @@ struct ipa_smmu_cb_ctx {
  * @prio: rule 10bit priority which defines the order of the rule
  *  among other rules at the same integrated table
  * @rule_id: rule 10bit ID to be returned in packet status
+ * @cnt_idx: stats counter index
  * @ipacm_installed: indicate if installed by ipacm
  */
 struct ipa3_flt_entry {
 	struct list_head link;
 	u32 cookie;
-	struct ipa_flt_rule rule;
+	struct ipa_flt_rule_i rule;
 	struct ipa3_flt_tbl *tbl;
 	struct ipa3_rt_tbl *rt_tbl;
 	u32 hw_len;
 	int id;
 	u16 prio;
 	u16 rule_id;
+	u8 cnt_idx;
 	bool ipacm_installed;
 };
 
@@ -666,12 +767,13 @@ struct ipa3_flt_tbl {
  *  among other rules at the integrated same table
  * @rule_id: rule 10bit ID to be returned in packet status
  * @rule_id_valid: indicate if rule_id_valid valid or not?
+ * @cnt_idx: stats counter index
  * @ipacm_installed: indicate if installed by ipacm
  */
 struct ipa3_rt_entry {
 	struct list_head link;
 	u32 cookie;
-	struct ipa_rt_rule rule;
+	struct ipa_rt_rule_i rule;
 	struct ipa3_rt_tbl *tbl;
 	struct ipa3_hdr_entry *hdr;
 	struct ipa3_hdr_proc_ctx_entry *proc_ctx;
@@ -680,6 +782,7 @@ struct ipa3_rt_entry {
 	u16 prio;
 	u16 rule_id;
 	u16 rule_id_valid;
+	u8 cnt_idx;
 	bool ipacm_installed;
 };
 
@@ -1173,6 +1276,36 @@ struct ipa3_stats {
 	u32 tx_non_linear;
 };
 
+/* offset for each stats */
+#define IPA3_UC_DEBUG_STATS_RINGFULL_OFF (0)
+#define IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF (4)
+#define IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF (8)
+#define IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF (12)
+#define IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF (16)
+#define IPA3_UC_DEBUG_STATS_OFF (20)
+
+/**
+ * struct ipa3_uc_dbg_gsi_stats - uC dbg stats info for each
+ * offloading protocol
+ * @ring: ring stats for each channel
+ */
+struct ipa3_uc_dbg_ring_stats {
+	struct IpaHwRingStats_t ring[MAX_CH_STATS_SUPPORTED];
+};
+
+/**
+ * struct ipa3_uc_dbg_stats - uC dbg stats for offloading
+ * protocols
+ * @uc_dbg_stats_ofst: offset to SRAM base
+ * @uc_dbg_stats_size: stats size for all channels
+ * @uc_dbg_stats_mmio: mmio offset
+ */
+struct ipa3_uc_dbg_stats {
+	u32 uc_dbg_stats_ofst;
+	u16 uc_dbg_stats_size;
+	void __iomem *uc_dbg_stats_mmio;
+};
+
 struct ipa3_active_clients {
 	struct mutex mutex;
 	atomic_t cnt;
@@ -1320,7 +1453,30 @@ struct ipa3_wdi2_ctx {
 	u32 rdy_comp_ring_size;
 	u32 *rdy_ring_rp_va;
 	u32 *rdy_comp_ring_wp_va;
+	struct ipa3_uc_dbg_stats dbg_stats;
 };
+
+/**
+ * struct ipa3_wdi3_ctx - IPA wdi3 context
+ */
+struct ipa3_wdi3_ctx {
+	struct ipa3_uc_dbg_stats dbg_stats;
+};
+
+/**
+ * struct ipa3_usb_ctx - IPA usb context
+ */
+struct ipa3_usb_ctx {
+	struct ipa3_uc_dbg_stats dbg_stats;
+};
+
+/**
+ * struct ipa3_mhip_ctx - IPA mhip context
+ */
+struct ipa3_mhip_ctx {
+	struct ipa3_uc_dbg_stats dbg_stats;
+};
+
 /**
  * struct ipa3_transport_pm - transport power management related members
  * @transport_pm_mutex: Mutex to protect the transport_pm functionality.
@@ -1444,6 +1600,20 @@ enum ipa_client_cb_type {
 };
 
 /**
+ * struct ipa_flt_rt_counter - IPA flt rt counters management
+ * @hdl: idr structure to manage hdl per request
+ * @used_hw: boolean array to track used hw counters
+ * @used_sw: boolean array to track used sw counters
+ * @hdl_lock: spinlock for flt_rt handle
+ */
+struct ipa_flt_rt_counter {
+	struct idr hdl;
+	bool used_hw[IPA_FLT_RT_HW_COUNTER];
+	bool used_sw[IPA_FLT_RT_SW_COUNTER];
+	spinlock_t hdl_lock;
+};
+
+/**
  * struct ipa3_char_device_context - IPA character device
  * @class: pointer to the struct class
  * @dev_num: device number
@@ -1550,6 +1720,9 @@ struct ipa3_pc_mbox_data {
  * @init_completion_obj: Completion object to be used in case IPA driver hasn't
  * @mhi_evid_limits: MHI event rings start and end ids
  *  finished initializing. Example of use - IOCTLs to /dev/ipa
+ * @flt_rt_counters: the counters usage info for flt rt stats
+ * @wdi3_ctx: IPA wdi3 context
+ * @gsi_info: channel/protocol info for GSI offloading uC stats
  * IPA context - holds all relevant info about IPA driver and its state
  */
 struct ipa3_context {
@@ -1689,10 +1862,10 @@ struct ipa3_context {
 	struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
 	struct ipa_dma_task_info dma_task_info;
 	struct ipa_hw_stats hw_stats;
+	struct ipa_flt_rt_counter flt_rt_counters;
 	struct ipa_cne_evt ipa_cne_evt_req_cache[IPA_MAX_NUM_REQ_CACHE];
 	int num_ipa_cne_evt_req;
 	struct mutex ipa_cne_evt_lock;
-	bool use_ipa_pm;
 	bool vlan_mode_iface[IPA_VLAN_IF_MAX];
 	bool wdi_over_pcie;
 	u32 entire_ipa_block_size;
@@ -1705,10 +1878,16 @@ struct ipa3_context {
 	void __iomem *reg_collection_base;
 	struct ipa3_wdi2_ctx wdi2_ctx;
 	struct ipa3_pc_mbox_data pc_mbox;
+	struct ipa3_wdi3_ctx wdi3_ctx;
+	struct ipa3_usb_ctx usb_ctx;
+	struct ipa3_mhip_ctx mhip_ctx;
 	atomic_t ipa_clk_vote;
 	int (*client_lock_unlock[IPA_MAX_CLNT])(bool is_lock);
 	bool fw_loaded;
 	bool (*get_teth_port_state[IPA_MAX_CLNT])(void);
+	atomic_t is_ssr;
+	struct IpaHwOffloadStatsAllocCmdData_t
+		gsi_info[IPA_HW_PROTOCOL_MAX];
 };
 
 struct ipa3_plat_drv_res {
@@ -1745,7 +1924,6 @@ struct ipa3_plat_drv_res {
 	bool ipa_mhi_dynamic_config;
 	u32 ipa_tz_unlock_reg_num;
 	struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
-	bool use_ipa_pm;
 	struct ipa_pm_init_params pm_init;
 	bool wdi_over_pcie;
 	u32 entire_ipa_block_size;
@@ -1755,6 +1933,7 @@ struct ipa3_plat_drv_res {
 	bool do_ram_collection_on_crash;
 	u32 secure_debug_check_action;
 	bool ipa_endp_delay_wa;
+	bool skip_ieob_mask_wa;
 };
 
 /**
@@ -2132,13 +2311,23 @@ int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
  */
 int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
 
+int ipa3_add_rt_rule_v2(struct ipa_ioc_add_rt_rule_v2 *rules);
+
 int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules,
 	bool user_only);
 
+int ipa3_add_rt_rule_usr_v2(struct ipa_ioc_add_rt_rule_v2 *rules,
+	bool user_only);
+
 int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules);
 
+int ipa3_add_rt_rule_ext_v2(struct ipa_ioc_add_rt_rule_ext_v2 *rules);
+
 int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules);
 
+int ipa3_add_rt_rule_after_v2(struct ipa_ioc_add_rt_rule_after_v2
+	*rules);
+
 int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
 
 int ipa3_commit_rt(enum ipa_ip_type ip);
@@ -2153,20 +2342,32 @@ int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in);
 
 int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules);
 
+int ipa3_mdfy_rt_rule_v2(struct ipa_ioc_mdfy_rt_rule_v2 *rules);
+
 /*
  * Filtering
  */
 int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
 
+int ipa3_add_flt_rule_v2(struct ipa_ioc_add_flt_rule_v2 *rules);
+
 int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules,
 	bool user_only);
 
+int ipa3_add_flt_rule_usr_v2(struct ipa_ioc_add_flt_rule_v2 *rules,
+	bool user_only);
+
 int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules);
 
+int ipa3_add_flt_rule_after_v2(struct ipa_ioc_add_flt_rule_after_v2
+	*rules);
+
 int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
 
 int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules);
 
+int ipa3_mdfy_flt_rule_v2(struct ipa_ioc_mdfy_flt_rule_v2 *rules);
+
 int ipa3_commit_flt(enum ipa_ip_type ip);
 
 int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only);
@@ -2269,6 +2470,9 @@ int ipa3_disconnect_gsi_wdi_pipe(u32 clnt_hdl);
 int ipa3_resume_wdi_pipe(u32 clnt_hdl);
 int ipa3_resume_gsi_wdi_pipe(u32 clnt_hdl);
 int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
+int ipa3_get_wdi_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats);
+int ipa3_get_wdi3_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats);
+int ipa3_get_usb_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats);
 int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
 u16 ipa3_get_smem_restr_bytes(void);
 int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes);
@@ -2287,9 +2491,13 @@ int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
 int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
 
 int ipa3_conn_wigig_rx_pipe_i(void *in,
-	struct ipa_wigig_conn_out_params *out);
+	struct ipa_wigig_conn_out_params *out,
+	struct dentry **parent);
 
-int ipa3_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out);
+int ipa3_conn_wigig_client_i(void *in,
+	struct ipa_wigig_conn_out_params *out,
+	ipa_notify_cb tx_notify,
+	void *priv);
 
 int ipa3_wigig_uc_msi_init(bool init,
 	phys_addr_t periph_baddr_pa,
@@ -2306,6 +2514,8 @@ int ipa3_enable_wigig_pipe_i(enum ipa_client_type client);
 
 int ipa3_disable_wigig_pipe_i(enum ipa_client_type client);
 
+int ipa3_wigig_init_debugfs_i(struct dentry *dent);
+
 /*
  * To retrieve doorbell physical address of
  * wlan pipes
@@ -2434,8 +2644,6 @@ void ipa_init_ep_flt_bitmap(void);
 
 bool ipa_is_ep_support_flt(int pipe_idx);
 
-enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx);
-
 bool ipa3_get_modem_cfg_emb_pipe_flt(void);
 
 u8 ipa3_get_qmb_master_sel(enum ipa_client_type client);
@@ -2549,6 +2757,9 @@ int ipa3_enable_data_path(u32 clnt_hdl);
 int ipa3_disable_data_path(u32 clnt_hdl);
 int ipa3_disable_gsi_data_path(u32 clnt_hdl);
 int ipa3_alloc_rule_id(struct idr *rule_ids);
+int ipa3_alloc_counter_id(struct ipa_ioc_flt_rt_counter_alloc *counter);
+void ipa3_counter_remove_hdl(int hdl);
+void ipa3_counter_id_remove_all(void);
 int ipa3_id_alloc(void *ptr);
 void *ipa3_id_find(u32 id);
 void ipa3_id_remove(u32 id);
@@ -2579,6 +2790,7 @@ int ipa3_tag_process(struct ipa3_desc *desc, int num_descs,
 void ipa3_q6_pre_shutdown_cleanup(void);
 void ipa3_q6_post_shutdown_cleanup(void);
 void ipa3_q6_pre_powerup_cleanup(void);
+void ipa3_update_ssr_state(bool is_ssr);
 int ipa3_init_q6_smem(void);
 
 int ipa3_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
@@ -2618,12 +2830,15 @@ int ipa3_uc_mhi_stop_event_update_channel(int channelHandle);
 int ipa3_uc_mhi_print_stats(char *dbg_buff, int size);
 int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
 int ipa3_uc_send_remote_ipa_info(u32 remote_addr, uint32_t mbox_n);
+int ipa3_uc_debug_stats_alloc(
+	struct IpaHwOffloadStatsAllocCmdData_t cmdinfo);
+int ipa3_uc_debug_stats_dealloc(uint32_t protocol);
 void ipa3_tag_destroy_imm(void *user1, int user2);
 const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
 	(enum ipa_client_type client);
 
 int ipa3_wigig_init_i(void);
-int ipa3_wigig_uc_init(
+int ipa3_wigig_internal_init(
 	struct ipa_wdi_uc_ready_params *inout,
 	ipa_wigig_misc_int_cb int_notify,
 	phys_addr_t *uc_db_pa);
@@ -2637,13 +2852,10 @@ struct ipa_teth_stats_endpoints {
 	u32 dst_ep_mask[IPA_STATS_MAX_PIPE_BIT];
 };
 
-struct ipa_flt_rt_stats {
-	u32 num_pkts;
-	u32 num_pkts_hash;
-};
-
 int ipa_hw_stats_init(void);
 
+int ipa_init_flt_rt_stats(void);
+
 int ipa_debugfs_init_stats(struct dentry *parent);
 
 int ipa_init_quota_stats(u32 pipe_bitmask);
@@ -2675,19 +2887,9 @@ int ipa_reset_all_cons_teth_stats(enum ipa_client_type prod);
 
 int ipa_reset_all_teth_stats(void);
 
-int ipa_flt_rt_stats_add_rule_id(enum ipa_ip_type ip, bool filtering,
-	u16 rule_id);
+int ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query);
 
-int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering);
-
-int ipa_flt_rt_stats_clear_rule_ids(enum ipa_ip_type ip, bool filtering);
-
-int ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id,
-	struct ipa_flt_rt_stats *out);
-
-int ipa_reset_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id);
-
-int ipa_reset_all_flt_rt_stats(enum ipa_ip_type ip, bool filtering);
+int ipa_set_flt_rt_stats(int index, struct ipa_flt_rt_stats stats);
 
 u32 ipa3_get_num_pipes(void);
 struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(enum ipa_smmu_cb_type);
@@ -2784,11 +2986,12 @@ int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
 #ifdef CONFIG_IPA3_MHI_PRIME_MANAGER
 int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot prot);
 int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot);
-int ipa_mpm_notify_wan_state(void);
-int ipa_mpm_mhip_ul_data_stop(enum ipa_usb_teth_prot xdci_teth_prot);
+int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state);
 int ipa3_is_mhip_offload_enabled(void);
 int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
 	enum ipa_client_type dst_pipe);
+int ipa_mpm_panic_handler(char *buf, int size);
+int ipa3_get_mhip_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats);
 #else
 static inline int ipa_mpm_mhip_xdci_pipe_enable(
 	enum ipa_usb_teth_prot prot)
@@ -2800,12 +3003,8 @@ static inline int ipa_mpm_mhip_xdci_pipe_disable(
 {
 	return 0;
 }
-static inline int ipa_mpm_notify_wan_state(void)
-{
-	return 0;
-}
-static inline int ipa_mpm_mhip_ul_data_stop(
-	enum ipa_usb_teth_prot xdci_teth_prot)
+static inline int ipa_mpm_notify_wan_state(
+	struct wan_ioctl_notify_wan_state *state)
 {
 	return 0;
 }
@@ -2818,7 +3017,15 @@ static inline int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
 {
 	return 0;
 }
+static inline int ipa_mpm_panic_handler(char *buf, int size)
+{
+	return 0;
+}
 
+static inline int ipa3_get_mhip_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
+{
+	return 0;
+}
 #endif /* CONFIG_IPA3_MHI_PRIME_MANAGER */
 
 static inline void *alloc_and_init(u32 size, u32 init_val)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
index d93ffc9..6cd91a2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
@@ -381,12 +381,9 @@ static int __imp_configure_mhi_device(
 			ridx++;
 			resp->alloc_resp_arr_len = ridx;
 			resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
-			/* return INCOMPATIBLE_STATE if mhi not active */
-			if (mhi_is_active(imp_ctx->md.mhi_dev))
-				resp->resp.error = IPA_QMI_ERR_INVALID_ID_V01;
-			else
-				resp->resp.error =
-					IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
+			/* return INCOMPATIBLE_STATE in any case */
+			resp->resp.error =
+				IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
 			return -EINVAL;
 		}
 
@@ -549,11 +546,8 @@ struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req(
 			.is_success = 0;
 		resp->alloc_resp_arr_len++;
 		resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
-		/* return INCOMPATIBLE_STATE if mhi not active */
-		if (mhi_is_active(imp_ctx->md.mhi_dev))
-			resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
-		else
-			resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
+		/* return INCOMPATIBLE_STATE in any case */
+		resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
 		goto fail_smmu;
 	}
 
@@ -613,7 +607,7 @@ struct ipa_mhi_clk_vote_resp_msg_v01
 	IMP_DBG_LOW("vote %d\n", vote);
 	memset(resp, 0, sizeof(struct ipa_mhi_clk_vote_resp_msg_v01));
 	resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
-	resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
+	resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
 
 	mutex_lock(&imp_ctx->mutex);
 	if (imp_ctx->state != IMP_STARTED) {
@@ -640,11 +634,8 @@ struct ipa_mhi_clk_vote_resp_msg_v01
 		if (ret) {
 			IMP_ERR("mhi_sync_get failed %d\n", ret);
 			resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
-			/* return INCOMPATIBLE_STATE if mhi not active */
-			if (mhi_is_active(imp_ctx->md.mhi_dev))
-				resp->resp.error = IPA_QMI_ERR_INVALID_ID_V01;
-			else
-				resp->resp.error =
+			/* return INCOMPATIBLE_STATE in any case */
+			resp->resp.error =
 					IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
 			return resp;
 		}
@@ -686,7 +677,8 @@ static void imp_mhi_shutdown(void)
 
 	IMP_FUNC_ENTRY();
 
-	if (imp_ctx->state == IMP_STARTED) {
+	if (imp_ctx->state == IMP_STARTED ||
+		imp_ctx->state == IMP_READY) {
 		req.cleanup_valid = true;
 		req.cleanup = true;
 		ipa3_qmi_send_mhi_cleanup_request(&req);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
index bb3b1a5..17b1786 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -59,9 +59,7 @@
 
 #define IPA_MPM_MHI_HOST_UL_CHANNEL 4
 #define IPA_MPM_MHI_HOST_DL_CHANNEL  5
-#define DEFAULT_AGGR_TIME_LIMIT 1000 /* 1ms */
-#define DEFAULT_AGGR_PKT_LIMIT 0
-#define TETH_AGGR_TIME_LIMIT 10000
+#define TETH_AGGR_TIME_LIMIT 10000 /* 10ms */
 #define TETH_AGGR_BYTE_LIMIT 24
 #define TETH_AGGR_DL_BYTE_LIMIT 16
 #define TRE_BUFF_SIZE 32768
@@ -108,11 +106,6 @@ enum ipa_mpm_mhip_client_type {
 	IPA_MPM_MHIP_NONE,
 };
 
-enum ipa_mpm_start_stop_type {
-	STOP,
-	START,
-};
-
 enum ipa_mpm_clk_vote_type {
 	CLK_ON,
 	CLK_OFF,
@@ -133,13 +126,22 @@ enum mhip_smmu_domain_type {
 	MHIP_SMMU_DOMAIN_NONE,
 };
 
+enum ipa_mpm_start_stop_type {
+	MPM_MHIP_STOP,
+	MPM_MHIP_START,
+};
 /* each pair of UL/DL channels are defined below */
 static const struct mhi_device_id mhi_driver_match_table[] = {
-	{ .chan = "IP_HW_MHIP_0" }, // for rmnet pipes
-	{ .chan = "IP_HW_MHIP_1" }, // for MHIP teth pipes - rndis/wifi
-	{ .chan = "IP_HW_ADPL" }, // DPL/ODL DL pipe
+	{ .chan = "IP_HW_MHIP_0" }, /* for rndis/Wifi teth pipes */
+	{ .chan = "IP_HW_MHIP_1" }, /* for MHIP rmnet */
+	{ .chan = "IP_HW_ADPL" }, /* ADPL/ODL DL pipe */
 };
 
+static const char *ipa_mpm_mhip_chan_str[IPA_MPM_MHIP_CH_ID_MAX] = {
+	__stringify(IPA_MPM_MHIP_TETH),
+	__stringify(IPA_MPM_MHIP_USB_RMNET),
+	__stringify(IPA_MPM_MHIP_USB_DPL),
+};
 /*
  * MHI PRIME GSI Descriptor format that Host IPA uses.
  */
@@ -273,7 +275,7 @@ static struct ipa_ep_cfg mhip_ul_rmnet_ep_cfg = {
 static struct ipa_ep_cfg mhip_dl_dpl_ep_cfg = {
 	.mode = {
 		.mode = IPA_DMA,
-		.dst = IPA_CLIENT_USB_CONS,
+		.dst = IPA_CLIENT_USB_DPL_CONS,
 	},
 };
 
@@ -292,6 +294,7 @@ struct ipa_mpm_dev_info {
 	struct ipa_mpm_iova_addr data;
 	u32 chdb_base;
 	u32 erdb_base;
+	bool is_cache_coherent;
 };
 
 struct ipa_mpm_event_props {
@@ -334,6 +337,11 @@ enum ipa_mpm_mhip_chan {
 	IPA_MPM_MHIP_CHAN_BOTH,
 };
 
+struct ipa_mpm_clk_cnt_type {
+	atomic_t pcie_clk_cnt;
+	atomic_t ipa_clk_cnt;
+};
+
 struct producer_rings {
 	struct mhi_p_desc *tr_va;
 	struct mhi_p_desc *er_va;
@@ -358,19 +366,31 @@ struct ipa_mpm_mhi_driver {
 	struct ipa_mpm_channel dl_cons;
 	enum ipa_mpm_mhip_client_type mhip_client;
 	enum ipa_mpm_teth_state teth_state;
-	struct mutex mutex;
 	bool init_complete;
+	/* General MPM mutex to protect concurrent update of MPM GSI states */
+	struct mutex mutex;
+	/*
+	 * Mutex to protect IPA clock vote/unvote to make sure IPA isn't double
+	 * devoted for concurrency scenarios such as SSR and LPM mode CB
+	 * concurrency.
+	 */
 	struct mutex lpm_mutex;
+	/*
+	 * Mutex to protect mhi_dev update/ access, for concurrency such as
+	 * 5G SSR and USB disconnect/connect.
+	 */
+	struct mutex mhi_mutex;
 	bool in_lpm;
+	struct ipa_mpm_clk_cnt_type clk_cnt;
 };
 
 struct ipa_mpm_context {
 	struct ipa_mpm_dev_info dev_info;
 	struct ipa_mpm_mhi_driver md[IPA_MPM_MAX_MHIP_CHAN];
 	struct mutex mutex;
-	atomic_t ipa_clk_ref_cnt;
-	atomic_t pcie_clk_ref_cnt;
 	atomic_t probe_cnt;
+	atomic_t pcie_clk_total_cnt;
+	atomic_t ipa_clk_total_cnt;
 	struct device *parent_pdev;
 	struct ipa_smmu_cb_ctx carved_smmu_cb;
 	struct device *mhi_parent_dev;
@@ -396,12 +416,13 @@ static void ipa_mpm_change_teth_state(int probe_id,
 static void ipa_mpm_change_gsi_state(int probe_id,
 	enum ipa_mpm_mhip_chan mhip_chan,
 	enum ipa_mpm_gsi_state next_state);
-static int ipa_mpm_start_stop_mhip_data_path(int probe_id,
+static int ipa_mpm_start_stop_ul_mhip_data_path(int probe_id,
 	enum ipa_mpm_start_stop_type start);
 static int ipa_mpm_probe(struct platform_device *pdev);
 static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 	int probe_id);
-static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote);
+static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
+	int probe_id);
 static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
 	enum ipa_mpm_mhip_chan mhip_chan,
 	int probe_id,
@@ -478,6 +499,12 @@ static dma_addr_t ipa_mpm_smmu_map(void *va_addr,
 	unsigned long carved_iova = roundup(cb->next_addr, IPA_MPM_PAGE_SIZE);
 	int ret = 0;
 
+	/* check cache coherent */
+	if (ipa_mpm_ctx->dev_info.is_cache_coherent)  {
+		IPA_MPM_DBG(" enable cache coherent\n");
+		prot |= IOMMU_CACHE;
+	}
+
 	if (carved_iova >= cb->va_end) {
 		IPA_MPM_ERR("running out of carved_iova %lx\n", carved_iova);
 		ipa_assert();
@@ -630,6 +657,12 @@ static u32 ipa_mpm_smmu_map_doorbell(enum mhip_smmu_domain_type smmu_domain,
 	u32 iova = 0;
 	u64 offset = 0;
 
+	/* check cache coherent */
+	if (ipa_mpm_ctx->dev_info.is_cache_coherent)  {
+		IPA_MPM_DBG(" enable cache coherent\n");
+		prot |= IOMMU_CACHE;
+	}
+
 	if (carved_iova >= cb->va_end) {
 		IPA_MPM_ERR("running out of carved_iova %lx\n", carved_iova);
 		ipa_assert();
@@ -978,7 +1011,6 @@ static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
 		ipa_mpm_change_gsi_state(mhi_idx,
 			IPA_MPM_MHIP_CHAN_UL,
 			GSI_ALLOCATED);
-
 	result = ipa3_start_gsi_channel(ipa_ep_idx);
 	if (result) {
 		IPA_MPM_ERR("start MHIP channel %d failed\n", mhip_client);
@@ -1056,15 +1088,9 @@ static void ipa_mpm_clean_mhip_chan(int mhi_idx,
 		return;
 	}
 
-	/* Release channel */
-	if (mhi_idx == IPA_MPM_MHIP_CH_ID_2) {
-		/* Note: DPL not supported yet */
-		IPA_MPM_ERR("DPL not supported yet. returning\n");
-		return;
-	}
-
-	ipa3_set_reset_client_prod_pipe_delay(true,
-					IPA_CLIENT_USB_PROD);
+	/* For the uplink channels, enable HOLB. */
+	if (IPA_CLIENT_IS_CONS(mhip_client))
+		ipa3_disable_data_path(ipa_ep_idx);
 
 	/* Release channel */
 	result = ipa3_stop_gsi_channel(ipa_ep_idx);
@@ -1257,9 +1283,7 @@ static int __ipa_mpm_configure_mhi_device(struct ipa_mpm_channel *ch,
 		IPA_MPM_ERR("mhi_device_configure failed\n");
 		return -EINVAL;
 	}
-
 	IPA_MPM_FUNC_EXIT();
-
 	return 0;
 }
 
@@ -1270,21 +1294,28 @@ static void ipa_mpm_mhip_shutdown(int mhip_idx)
 	IPA_MPM_FUNC_ENTRY();
 
 	get_ipa3_client(mhip_idx, &ul_prod_chan, &dl_cons_chan);
-	if (mhip_idx == IPA_MPM_MHIP_CH_ID_2) {
-		IPA_MPM_ERR("DPL - return\n");
-		return;
+
+	if (mhip_idx != IPA_MPM_MHIP_CH_ID_2) {
+		/* For DPL, stop only DL channel */
+		ipa_mpm_start_stop_ul_mhip_data_path(mhip_idx, MPM_MHIP_STOP);
+		ipa_mpm_clean_mhip_chan(mhip_idx, ul_prod_chan);
 	}
 
-	ipa_mpm_clean_mhip_chan(mhip_idx, ul_prod_chan);
 	ipa_mpm_clean_mhip_chan(mhip_idx, dl_cons_chan);
 
-
 	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
 	if (!ipa_mpm_ctx->md[mhip_idx].in_lpm) {
-		ipa_mpm_vote_unvote_ipa_clk(CLK_OFF);
+		ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, mhip_idx);
+		/* while in modem shutdown scenarios such as SSR, no explicit
+		 * PCIe vote is needed.
+		 */
 		ipa_mpm_ctx->md[mhip_idx].in_lpm = true;
 	}
 	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
+	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+	ipa_mpm_ctx->md[mhip_idx].mhi_dev = NULL;
+	ipa_mpm_ctx->md[mhip_idx].init_complete = false;
+	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
 	IPA_MPM_FUNC_EXIT();
 }
 
@@ -1307,55 +1338,81 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 		return -EINVAL;
 	}
 
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	if (ipa_mpm_ctx->md[probe_id].mhi_dev == NULL) {
 		IPA_MPM_ERR("MHI not initialized yet\n");
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 		return 0;
 	}
+
+	IPA_MPM_ERR("PCIe clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
+		vote, probe_id,
+		atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt));
+
 	if (vote == CLK_ON) {
-		if ((atomic_read(&ipa_mpm_ctx->pcie_clk_ref_cnt) == 0)) {
-			result = mhi_device_get_sync(
-					ipa_mpm_ctx->md[probe_id].mhi_dev,
-					MHI_VOTE_BUS);
-			if (result) {
-				IPA_MPM_ERR("mhi_sync_get failed %d\n",
-					result);
-				return result;
-			}
-			IPA_MPM_DBG("PCIE clock now ON\n");
+		result = mhi_device_get_sync(
+			ipa_mpm_ctx->md[probe_id].mhi_dev, MHI_VOTE_BUS);
+		if (result) {
+			IPA_MPM_ERR("mhi_sync_get failed for probe_id %d\n",
+				result, probe_id);
+			mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+			return result;
 		}
-		atomic_inc(&ipa_mpm_ctx->pcie_clk_ref_cnt);
+
+		IPA_MPM_DBG("probe_id %d PCIE clock now ON\n", probe_id);
+		atomic_inc(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
+		atomic_inc(&ipa_mpm_ctx->pcie_clk_total_cnt);
 	} else {
-		if ((atomic_read(&ipa_mpm_ctx->pcie_clk_ref_cnt) == 1)) {
-			mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev,
-				       MHI_VOTE_BUS);
-			IPA_MPM_DBG("PCIE clock off ON\n");
+		if ((atomic_read(
+			&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt)
+								== 0)) {
+			IPA_MPM_DBG("probe_id %d PCIE clock already devoted\n",
+				probe_id);
+			WARN_ON(1);
+			mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+			return 0;
 		}
-		atomic_dec(&ipa_mpm_ctx->pcie_clk_ref_cnt);
+		mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev, MHI_VOTE_BUS);
+		IPA_MPM_DBG("probe_id %d PCIE clock off\n", probe_id);
+		atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
+		atomic_dec(&ipa_mpm_ctx->pcie_clk_total_cnt);
 	}
 
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	return result;
 }
 
 /*
  * Turning on/OFF IPA Clock is done only once- for all clients
  */
-static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote)
+static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
+	int probe_id)
 {
 	if (vote > CLK_OFF)
 		return;
 
+	IPA_MPM_ERR("IPA clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
+		vote, probe_id,
+		atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt));
+
 	if (vote == CLK_ON) {
-		if ((!atomic_read(&ipa_mpm_ctx->ipa_clk_ref_cnt))) {
-			IPA_ACTIVE_CLIENTS_INC_SPECIAL("ipa_mpm");
-			IPA_MPM_DBG("IPA clock now ON\n");
-		}
-		atomic_inc(&ipa_mpm_ctx->ipa_clk_ref_cnt);
+		IPA_ACTIVE_CLIENTS_INC_SPECIAL(ipa_mpm_mhip_chan_str[probe_id]);
+		IPA_MPM_DBG("IPA clock now ON for probe_id %d\n", probe_id);
+		atomic_inc(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt);
+		atomic_inc(&ipa_mpm_ctx->ipa_clk_total_cnt);
 	} else {
-		if ((atomic_read(&ipa_mpm_ctx->ipa_clk_ref_cnt) == 1)) {
-			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("ipa_mpm");
-			IPA_MPM_DBG("IPA clock now OFF\n");
+		if ((atomic_read
+			(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt)
+								== 0)) {
+			IPA_MPM_DBG("probe_id %d IPA clock count < 0\n",
+				probe_id);
+			WARN_ON(1);
+			return;
 		}
-		atomic_dec(&ipa_mpm_ctx->ipa_clk_ref_cnt);
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL(ipa_mpm_mhip_chan_str[probe_id]);
+		IPA_MPM_DBG("probe_id %d IPA clock off\n", probe_id);
+		atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt);
+		atomic_dec(&ipa_mpm_ctx->ipa_clk_total_cnt);
 	}
 }
 
@@ -1421,7 +1478,7 @@ static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
 		}
 	}
 
-	is_start = (start_stop == START) ? true : false;
+	is_start = (start_stop == MPM_MHIP_START) ? true : false;
 
 	if (is_start) {
 		if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
@@ -1510,15 +1567,17 @@ static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
 	return MHIP_STATUS_FAIL;
 }
 
-int ipa_mpm_notify_wan_state(void)
+int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 {
 	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
 	int i;
 	static enum mhip_status_type status;
 	int ret = 0;
-	enum ipa_client_type ul_chan, dl_chan;
 	enum ipa_mpm_mhip_client_type mhip_client = IPA_MPM_MHIP_TETH;
 
+	if (!state)
+		return -EPERM;
+
 	if (!ipa3_is_mhip_offload_enabled())
 		return -EPERM;
 
@@ -1535,53 +1594,85 @@ int ipa_mpm_notify_wan_state(void)
 	}
 
 	IPA_MPM_DBG("WAN backhaul available for probe_id = %d\n", probe_id);
-	get_ipa3_client(probe_id, &ul_chan, &dl_chan);
 
-	/* Start UL MHIP channel for offloading the tethering connection */
-	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
-
-	if (ret) {
-		IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
-		return ret;
-	}
-
-	status = ipa_mpm_start_stop_mhip_chan(
-				IPA_MPM_MHIP_CHAN_UL, probe_id, START);
-	switch (status) {
-	case MHIP_STATUS_SUCCESS:
-	case MHIP_STATUS_NO_OP:
-		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
-		ret = ipa_mpm_start_stop_mhip_data_path(probe_id, START);
-
+	if (state->up) {
+		/* Start UL MHIP channel for offloading tethering connection */
+		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
 		if (ret) {
-			IPA_MPM_ERR("Couldnt start UL GSI channel");
-			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+			IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n",
+				ret);
 			return ret;
 		}
-
-		if (status == MHIP_STATUS_NO_OP) {
-			/* Channels already have been started,
-			 * we can devote for pcie clocks
-			 */
+		status = ipa_mpm_start_stop_mhip_chan(
+				IPA_MPM_MHIP_CHAN_UL, probe_id, MPM_MHIP_START);
+		switch (status) {
+		case MHIP_STATUS_SUCCESS:
+			ipa_mpm_ctx->md[probe_id].teth_state =
+						IPA_MPM_TETH_CONNECTED;
+			ret = ipa_mpm_start_stop_ul_mhip_data_path(
+						probe_id, MPM_MHIP_START);
+			if (ret) {
+				IPA_MPM_ERR("err UL chan start\n");
+				ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+				return ret;
+			}
+			break;
+		case MHIP_STATUS_EP_NOT_READY:
+		case MHIP_STATUS_NO_OP:
+			IPA_MPM_DBG("UL chan already start, status = %d\n",
+					status);
+			ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+			return ret;
+		case MHIP_STATUS_FAIL:
+		case MHIP_STATUS_BAD_STATE:
+		case MHIP_STATUS_EP_NOT_FOUND:
+			IPA_MPM_ERR("UL chan start err =%d\n", status);
+			ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+			ipa_assert();
+			return -EFAULT;
+		default:
+			IPA_MPM_ERR("Err not found\n");
 			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+			ret = -EFAULT;
+			break;
 		}
-		break;
-	case MHIP_STATUS_EP_NOT_READY:
-		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INPROGRESS);
-		break;
-	case MHIP_STATUS_FAIL:
-	case MHIP_STATUS_BAD_STATE:
-	case MHIP_STATUS_EP_NOT_FOUND:
-		IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
+		ipa_mpm_ctx->md[probe_id].mhip_client = mhip_client;
+	} else {
+		status = ipa_mpm_start_stop_mhip_chan(
+					IPA_MPM_MHIP_CHAN_UL, probe_id,
+					MPM_MHIP_STOP);
+		switch (status) {
+		case MHIP_STATUS_SUCCESS:
+			ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
+			ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
+							MPM_MHIP_STOP);
+			break;
+		case MHIP_STATUS_NO_OP:
+		case MHIP_STATUS_EP_NOT_READY:
+			IPA_MPM_DBG("UL chan already stop, status = %d\n",
+					status);
+			break;
+		case MHIP_STATUS_FAIL:
+		case MHIP_STATUS_BAD_STATE:
+		case MHIP_STATUS_EP_NOT_FOUND:
+			IPA_MPM_ERR("UL chan cant be stopped err =%d\n",
+				status);
+			ipa_assert();
+			return -EFAULT;
+		default:
+			IPA_MPM_ERR("Err not found\n");
+			return -EFAULT;
+		}
+		/* Stop UL MHIP channel for offloading tethering connection */
 		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
-		return -EFAULT;
-	default:
-		IPA_MPM_ERR("Err not found\n");
-		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
-		ret = -EFAULT;
-		break;
-	}
 
+		if (ret) {
+			IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n",
+				ret);
+			return ret;
+		}
+		ipa_mpm_ctx->md[probe_id].mhip_client = IPA_MPM_MHIP_NONE;
+	}
 	return ret;
 }
 
@@ -1667,7 +1758,7 @@ static void ipa_mpm_read_channel(enum ipa_client_type chan)
 		IPA_MPM_ERR("Reading of channel failed for ep %d\n", ep);
 }
 
-static int ipa_mpm_start_stop_mhip_data_path(int probe_id,
+static int ipa_mpm_start_stop_ul_mhip_data_path(int probe_id,
 	enum ipa_mpm_start_stop_type start)
 {
 	int ipa_ep_idx;
@@ -1759,17 +1850,11 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 
 	if (ipa_mpm_ctx->md[probe_id].init_complete) {
 		IPA_MPM_ERR("Probe initialization already done, returning\n");
-		return -EPERM;
+		return 0;
 	}
 
 	IPA_MPM_DBG("Received probe for id=%d\n", probe_id);
 
-	if (probe_id == IPA_MPM_MHIP_CH_ID_2) {
-		/* NOTE :: DPL not supported yet , remove later */
-		IPA_MPM_DBG("DPL not supported yet - returning for DPL..\n");
-		return 0;
-	}
-
 	get_ipa3_client(probe_id, &ul_prod, &dl_cons);
 
 	/* Vote for IPA clock for first time in initialization seq.
@@ -1785,7 +1870,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 
 	ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
 	mutex_lock(&ipa_mpm_ctx->md[probe_id].lpm_mutex);
-	ipa_mpm_vote_unvote_ipa_clk(CLK_ON);
+	ipa_mpm_vote_unvote_ipa_clk(CLK_ON, probe_id);
 	ipa_mpm_ctx->md[probe_id].in_lpm = false;
 	mutex_unlock(&ipa_mpm_ctx->md[probe_id].lpm_mutex);
 	IPA_MPM_DBG("ul chan = %d, dl_chan = %d\n", ul_prod, dl_cons);
@@ -1804,34 +1889,33 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	 * IPA Device PROD TRE -> IPA HOST CONS EV
 	 * IPA Device PROD EV ->  IPA HOST CONS TRE
 	 */
-	if (probe_id != IPA_MPM_MHIP_CH_ID_2) {
-		if (ul_prod != IPA_CLIENT_MAX) {
-			/* store UL properties */
-			ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
-			/* Store Channel properties */
-			ch->chan_props.id = mhi_dev->ul_chan_id;
-			ch->chan_props.device_db =
-				ipa_mpm_ctx->dev_info.chdb_base +
-				ch->chan_props.id * 8;
-			/* Fill Channel Conext to be sent to Device side */
-			ch->chan_props.ch_ctx.chtype =
-				IPA_MPM_MHI_HOST_UL_CHANNEL;
-			ch->chan_props.ch_ctx.erindex =
-				mhi_dev->ul_event_id;
-			ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
-				GSI_EVT_RING_RE_SIZE_16B;
-			/* Store Event properties */
-			ch->evt_props.ev_ctx.update_rp_modc = 0;
-			ch->evt_props.ev_ctx.update_rp_intmodt = 0;
-			ch->evt_props.ev_ctx.ertype = 1;
-			ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
-				GSI_EVT_RING_RE_SIZE_16B;
-			ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
-			ch->evt_props.device_db =
-				ipa_mpm_ctx->dev_info.erdb_base +
-				ch->chan_props.ch_ctx.erindex * 8;
-		}
+	if (ul_prod != IPA_CLIENT_MAX) {
+		/* store UL properties */
+		ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
+		/* Store Channel properties */
+		ch->chan_props.id = mhi_dev->ul_chan_id;
+		ch->chan_props.device_db =
+			ipa_mpm_ctx->dev_info.chdb_base +
+			ch->chan_props.id * 8;
+		/* Fill Channel Conext to be sent to Device side */
+		ch->chan_props.ch_ctx.chtype =
+			IPA_MPM_MHI_HOST_UL_CHANNEL;
+		ch->chan_props.ch_ctx.erindex =
+			mhi_dev->ul_event_id;
+		ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
+			GSI_EVT_RING_RE_SIZE_16B;
+		/* Store Event properties */
+		ch->evt_props.ev_ctx.update_rp_modc = 0;
+		ch->evt_props.ev_ctx.update_rp_intmodt = 0;
+		ch->evt_props.ev_ctx.ertype = 1;
+		ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
+			GSI_EVT_RING_RE_SIZE_16B;
+		ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
+		ch->evt_props.device_db =
+			ipa_mpm_ctx->dev_info.erdb_base +
+			ch->chan_props.ch_ctx.erindex * 8;
 	}
+
 	if (dl_cons != IPA_CLIENT_MAX) {
 		/* store DL channel properties */
 		ch = &ipa_mpm_ctx->md[probe_id].dl_cons;
@@ -1859,7 +1943,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 			ch->chan_props.ch_ctx.erindex * 8;
 	}
 	/* connect Host GSI pipes with MHI' protocol */
-	if (probe_id != IPA_MPM_MHIP_CH_ID_2)  {
+	if (ul_prod != IPA_CLIENT_MAX)  {
 		ret = ipa_mpm_connect_mhip_gsi_pipe(ul_prod,
 			probe_id, &ul_out_params);
 		if (ret) {
@@ -1868,31 +1952,34 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 			goto fail_gsi_setup;
 		}
 	}
-	ret = ipa_mpm_connect_mhip_gsi_pipe(dl_cons, probe_id, &dl_out_params);
-	if (ret) {
-		IPA_MPM_ERR("connecting MPM client = %d failed\n",
-			dl_cons);
-		goto fail_gsi_setup;
-	}
-	if (probe_id != IPA_MPM_MHIP_CH_ID_2)  {
-		if (ul_prod != IPA_CLIENT_MAX) {
-			ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
-			ch->evt_props.ev_ctx.update_rp_addr =
-				ipa_mpm_smmu_map_doorbell(
-					MHIP_SMMU_DOMAIN_PCIE,
-					ul_out_params.db_reg_phs_addr_lsb);
-			if (ch->evt_props.ev_ctx.update_rp_addr == 0)
-				ipa_assert();
-			ipa_mpm_ctx->md[probe_id].ul_prod.db_device_iova =
-				ch->evt_props.ev_ctx.update_rp_addr;
 
-			ret = __ipa_mpm_configure_mhi_device(
-					ch, probe_id, DMA_TO_HIPA);
-			if (ret) {
-				IPA_MPM_ERR("configure_mhi_dev fail %d\n",
-						ret);
-				goto fail_smmu;
-			}
+	if (dl_cons != IPA_CLIENT_MAX) {
+		ret = ipa_mpm_connect_mhip_gsi_pipe(dl_cons,
+			probe_id, &dl_out_params);
+		if (ret) {
+			IPA_MPM_ERR("connecting MPM client = %d failed\n",
+				dl_cons);
+			goto fail_gsi_setup;
+		}
+	}
+
+	if (ul_prod != IPA_CLIENT_MAX) {
+		ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
+		ch->evt_props.ev_ctx.update_rp_addr =
+			ipa_mpm_smmu_map_doorbell(
+				MHIP_SMMU_DOMAIN_PCIE,
+				ul_out_params.db_reg_phs_addr_lsb);
+		if (ch->evt_props.ev_ctx.update_rp_addr == 0)
+			ipa_assert();
+		ipa_mpm_ctx->md[probe_id].ul_prod.db_device_iova =
+			ch->evt_props.ev_ctx.update_rp_addr;
+
+		ret = __ipa_mpm_configure_mhi_device(
+				ch, probe_id, DMA_TO_HIPA);
+		if (ret) {
+			IPA_MPM_ERR("configure_mhi_dev fail %d\n",
+					ret);
+			goto fail_smmu;
 		}
 	}
 
@@ -1906,7 +1993,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 		if (ch->evt_props.ev_ctx.update_rp_addr == 0)
 			ipa_assert();
 
-	ipa_mpm_ctx->md[probe_id].dl_cons.db_device_iova =
+		ipa_mpm_ctx->md[probe_id].dl_cons.db_device_iova =
 			ch->evt_props.ev_ctx.update_rp_addr;
 
 		ret = __ipa_mpm_configure_mhi_device(ch, probe_id,
@@ -1920,7 +2007,17 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	ret = mhi_prepare_for_transfer(ipa_mpm_ctx->md[probe_id].mhi_dev);
 	if (ret) {
 		IPA_MPM_ERR("mhi_prepare_for_transfer failed %d\n", ret);
-		goto fail_smmu;
+		WARN_ON(1);
+		/*
+		 * WA to handle prepare_for_tx failures.
+		 * Though prepare for transfer fails, indicate success
+		 * to MHI driver. remove_cb will be called eventually when
+		 * Device side comes from where pending cleanup happens.
+		 */
+		atomic_inc(&ipa_mpm_ctx->probe_cnt);
+		ipa_mpm_ctx->md[probe_id].init_complete = true;
+		IPA_MPM_FUNC_EXIT();
+		return 0;
 	}
 
 	/*
@@ -2061,14 +2158,16 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	 */
 	switch (ipa_mpm_ctx->md[probe_id].teth_state) {
 	case IPA_MPM_TETH_INIT:
-		/* No teth started yet, disable UL channel */
-		ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
-						probe_id, STOP);
-
-		/* Disable data path */
-		if (ipa_mpm_start_stop_mhip_data_path(probe_id, STOP)) {
-			IPA_MPM_ERR("MHIP Enable data path failed\n");
-			goto fail_start_channel;
+		if (ul_prod != IPA_CLIENT_MAX) {
+			/* No teth started yet, disable UL channel */
+			ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
+						probe_id, MPM_MHIP_STOP);
+			/* Disable data path */
+			if (ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
+				MPM_MHIP_STOP)) {
+				IPA_MPM_ERR("MHIP Enable data path failed\n");
+				goto fail_start_channel;
+			}
 		}
 		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
 		break;
@@ -2078,14 +2177,18 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
 
 		/* Enable data path */
-		if (ipa_mpm_start_stop_mhip_data_path(probe_id, START)) {
-			IPA_MPM_ERR("MHIP Enable data path failed\n");
-			goto fail_start_channel;
+		if (ul_prod != IPA_CLIENT_MAX) {
+			if (ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
+				MPM_MHIP_START)) {
+				IPA_MPM_ERR("MHIP Enable data path failed\n");
+				goto fail_start_channel;
+			}
 		}
-
 		/* Lift the delay for rmnet USB prod pipe */
-		pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
-		ipa3_xdci_ep_delay_rm(pipe_idx);
+		if (probe_id == IPA_MPM_MHIP_CH_ID_1) {
+			pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
+			ipa3_xdci_ep_delay_rm(pipe_idx);
+		}
 		break;
 	default:
 		IPA_MPM_DBG("No op for UL channel, in teth state = %d",
@@ -2094,6 +2197,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	}
 
 	atomic_inc(&ipa_mpm_ctx->probe_cnt);
+	ipa_mpm_ctx->md[probe_id].init_complete = true;
 	IPA_MPM_FUNC_EXIT();
 	return 0;
 
@@ -2103,7 +2207,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled)
 		IPA_MPM_DBG("SMMU failed\n");
 	ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
-	ipa_mpm_vote_unvote_ipa_clk(CLK_OFF);
+	ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, probe_id);
 	ipa_assert();
 	return ret;
 }
@@ -2142,7 +2246,7 @@ static void ipa_mpm_init_mhip_channel_info(void)
 	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].ul_prod.ipa_client =
 		IPA_CLIENT_MAX;
 	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].mhip_client =
-	IPA_MPM_MHIP_USB_DPL;
+		IPA_MPM_MHIP_USB_DPL;
 }
 
 static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
@@ -2161,7 +2265,9 @@ static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
 		return;
 	}
 
+	IPA_MPM_DBG("remove_cb for mhip_idx = %d", mhip_idx);
 	ipa_mpm_mhip_shutdown(mhip_idx);
+
 	atomic_dec(&ipa_mpm_ctx->probe_cnt);
 
 	if (atomic_read(&ipa_mpm_ctx->probe_cnt) == 0) {
@@ -2169,7 +2275,13 @@ static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
 		ipa_mpm_ctx->mhi_parent_dev = NULL;
 		ipa_mpm_ctx->carved_smmu_cb.next_addr =
 			ipa_mpm_ctx->carved_smmu_cb.va_start;
-		atomic_set(&ipa_mpm_ctx->pcie_clk_ref_cnt, 0);
+		atomic_set(&ipa_mpm_ctx->pcie_clk_total_cnt, 0);
+		for (mhip_idx = 0;
+			mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
+			atomic_set(
+				&ipa_mpm_ctx->md[mhip_idx].clk_cnt.pcie_clk_cnt,
+				0);
+		}
 	}
 
 	IPA_MPM_FUNC_EXIT();
@@ -2200,9 +2312,9 @@ static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
 		if (!ipa_mpm_ctx->md[mhip_idx].in_lpm) {
 			status = ipa_mpm_start_stop_mhip_chan(
 				IPA_MPM_MHIP_CHAN_DL,
-							mhip_idx, STOP);
-		IPA_MPM_DBG("status = %d\n", status);
-		ipa_mpm_vote_unvote_ipa_clk(CLK_OFF);
+				mhip_idx, MPM_MHIP_STOP);
+			IPA_MPM_DBG("status = %d\n", status);
+			ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, mhip_idx);
 			ipa_mpm_ctx->md[mhip_idx].in_lpm = true;
 		} else {
 			IPA_MPM_DBG("Already in lpm\n");
@@ -2210,17 +2322,22 @@ static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
 		break;
 	case MHI_CB_LPM_EXIT:
 		if (ipa_mpm_ctx->md[mhip_idx].in_lpm) {
-			ipa_mpm_vote_unvote_ipa_clk(CLK_ON);
+			ipa_mpm_vote_unvote_ipa_clk(CLK_ON, mhip_idx);
 			status = ipa_mpm_start_stop_mhip_chan(
 				IPA_MPM_MHIP_CHAN_DL,
-				mhip_idx, START);
+				mhip_idx, MPM_MHIP_START);
 			IPA_MPM_DBG("status = %d\n", status);
 			ipa_mpm_ctx->md[mhip_idx].in_lpm = false;
 		} else {
 			IPA_MPM_DBG("Already out of lpm\n");
 		}
 		break;
-	default:
+	case MHI_CB_EE_RDDM:
+	case MHI_CB_PENDING_DATA:
+	case MHI_CB_SYS_ERROR:
+	case MHI_CB_FATAL_ERROR:
+	case MHI_CB_BW_REQ:
+	case MHI_CB_EE_MISSION_MODE:
 		IPA_MPM_ERR("unexpected event %d\n", mhi_cb);
 		break;
 	}
@@ -2317,41 +2434,47 @@ int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
 	}
 
 	IPA_MPM_DBG("Connect xdci prot %d -> mhip_client = %d probe_id = %d\n",
-		xdci_teth_prot, mhip_client, probe_id);
+			xdci_teth_prot, mhip_client, probe_id);
 
 	ipa_mpm_ctx->md[probe_id].mhip_client = mhip_client;
 
+	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
+	if (ret) {
+		IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
+		return ret;
+	}
+
 	switch (mhip_client) {
 	case IPA_MPM_MHIP_USB_RMNET:
 		ipa_mpm_set_dma_mode(IPA_CLIENT_USB_PROD,
 			IPA_CLIENT_MHI_PRIME_RMNET_CONS);
 		break;
-	case IPA_MPM_MHIP_TETH:
 	case IPA_MPM_MHIP_USB_DPL:
-		IPA_MPM_DBG("Teth connecting for prot %d\n", mhip_client);
+		IPA_MPM_DBG("connecting DPL prot %d\n", mhip_client);
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
 		return 0;
 	default:
-		IPA_MPM_ERR("mhip_client = %d not supported\n", mhip_client);
-		ret = 0;
-		break;
+		IPA_MPM_DBG("mhip_client = %d not processed\n", mhip_client);
+		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		if (ret) {
+			IPA_MPM_ERR("Error unvoting on PCIe clk, err = %d\n",
+					ret);
+			return ret;
+		}
+		return 0;
 	}
 
-	/* Start UL MHIP channel for offloading the tethering connection */
-	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
-
-	if (ret) {
-		IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
-		return ret;
-	}
-
-	status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
-						probe_id, START);
-
+	if (mhip_client != IPA_MPM_MHIP_USB_DPL)
+		/* Start UL MHIP channel for offloading teth connection */
+		status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
+							probe_id,
+							MPM_MHIP_START);
 	switch (status) {
 	case MHIP_STATUS_SUCCESS:
 	case MHIP_STATUS_NO_OP:
 		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
-		ipa_mpm_start_stop_mhip_data_path(probe_id, START);
+		ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
+						MPM_MHIP_START);
 
 		pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
 
@@ -2383,43 +2506,6 @@ int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
 	return ret;
 }
 
-int ipa_mpm_mhip_ul_data_stop(enum ipa_usb_teth_prot xdci_teth_prot)
-{
-	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
-	int i;
-	enum ipa_mpm_mhip_client_type mhip_client;
-	int ret = 0;
-
-	if (ipa_mpm_ctx == NULL) {
-		IPA_MPM_ERR("MPM not platform probed, returning ..\n");
-		return 0;
-	}
-
-	ipa_mpm_mhip_map_prot(xdci_teth_prot, &mhip_client);
-
-	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
-		if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
-			probe_id = i;
-			break;
-		}
-	}
-
-	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
-		IPA_MPM_ERR("Invalid probe_id\n");
-		return 0;
-	}
-
-	IPA_MPM_DBG("Map xdci prot %d to mhip_client = %d probe_id = %d\n",
-		xdci_teth_prot, mhip_client, probe_id);
-
-	ret = ipa_mpm_start_stop_mhip_data_path(probe_id, STOP);
-
-	if (ret)
-		IPA_MPM_ERR("Error stopping UL path, err = %d\n", ret);
-
-	return ret;
-}
-
 int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
 {
 	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
@@ -2447,31 +2533,44 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
 		return 0;
 	}
 
-	IPA_MPM_DBG("xdci disconnect prot %d mhip_client = %d probe_id = %d\n",
+	IPA_MPM_ERR("xdci disconnect prot %d mhip_client = %d probe_id = %d\n",
 			xdci_teth_prot, mhip_client, probe_id);
 
 	switch (mhip_client) {
 	case IPA_MPM_MHIP_USB_RMNET:
-	case IPA_MPM_MHIP_TETH:
-		IPA_MPM_DBG("Teth Disconnecting for prot %d\n", mhip_client);
+		ret = ipa_mpm_reset_dma_mode(IPA_CLIENT_USB_PROD,
+			IPA_CLIENT_MHI_PRIME_RMNET_CONS);
+		if (ret) {
+			IPA_MPM_ERR("failed to reset dma mode\n");
+			return ret;
+		}
 		break;
-	case IPA_MPM_MHIP_USB_DPL:
-		IPA_MPM_DBG("Teth Disconnecting for DPL, return\n");
+	case IPA_MPM_MHIP_TETH:
+		IPA_MPM_DBG("Rndis Disconnect, wait for wan_state ioctl\n");
 		return 0;
+	case IPA_MPM_MHIP_USB_DPL:
+		IPA_MPM_DBG("Teth Disconnecting for DPL\n");
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
+		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		if (ret)
+			IPA_MPM_ERR("Error cloking off PCIe clk, err = %d\n",
+				ret);
+		ipa_mpm_ctx->md[probe_id].mhip_client = IPA_MPM_MHIP_NONE;
+		return ret;
 	default:
 		IPA_MPM_ERR("mhip_client = %d not supported\n", mhip_client);
 		return 0;
 	}
 
 	status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
-		probe_id, STOP);
+		probe_id, MPM_MHIP_STOP);
 
 	switch (status) {
 	case MHIP_STATUS_SUCCESS:
 	case MHIP_STATUS_NO_OP:
 	case MHIP_STATUS_EP_NOT_READY:
 		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
-		ipa_mpm_start_stop_mhip_data_path(probe_id, STOP);
+		ipa_mpm_start_stop_ul_mhip_data_path(probe_id, MPM_MHIP_STOP);
 		break;
 	case MHIP_STATUS_FAIL:
 	case MHIP_STATUS_BAD_STATE:
@@ -2521,6 +2620,8 @@ static int ipa_mpm_populate_smmu_info(struct platform_device *pdev)
 		ipa_mpm_ctx->dev_info.ipa_smmu_enabled =
 		smmu_out.smmu_enable;
 
+	/* get cache_coherent enable or not */
+	ipa_mpm_ctx->dev_info.is_cache_coherent = ap_cb->is_cache_coherent;
 	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iova-mapping",
 		carved_iova_ap_mapping, 2)) {
 		IPA_MPM_ERR("failed to read of_node %s\n",
@@ -2539,8 +2640,8 @@ static int ipa_mpm_populate_smmu_info(struct platform_device *pdev)
 	cb->va_size = carved_iova_ap_mapping[1];
 	cb->va_end = cb->va_start + cb->va_size;
 
-	if (cb->va_start >= ap_cb->va_start && cb->va_start < ap_cb->va_end) {
-		IPA_MPM_ERR("MPM iommu and AP overlap addr 0x%x\n",
+	if (cb->va_end >= ap_cb->va_start) {
+		IPA_MPM_ERR("MPM iommu and AP overlap addr 0x%lx\n",
 				cb->va_start);
 		ipa_assert();
 		return -EFAULT;
@@ -2590,6 +2691,7 @@ static int ipa_mpm_probe(struct platform_device *pdev)
 
 	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
 		mutex_init(&ipa_mpm_ctx->md[i].mutex);
+		mutex_init(&ipa_mpm_ctx->md[i].mhi_mutex);
 		mutex_init(&ipa_mpm_ctx->md[i].lpm_mutex);
 	}
 
@@ -2619,12 +2721,14 @@ static int ipa_mpm_probe(struct platform_device *pdev)
 		goto fail_probe;
 	}
 
-	atomic_set(&ipa_mpm_ctx->ipa_clk_ref_cnt, 0);
-	atomic_set(&ipa_mpm_ctx->pcie_clk_ref_cnt, 0);
+	atomic_set(&ipa_mpm_ctx->ipa_clk_total_cnt, 0);
+	atomic_set(&ipa_mpm_ctx->pcie_clk_total_cnt, 0);
 
 	for (idx = 0; idx < IPA_MPM_MHIP_CH_ID_MAX; idx++) {
 		ipa_mpm_ctx->md[idx].ul_prod.gsi_state = GSI_INIT;
 		ipa_mpm_ctx->md[idx].dl_cons.gsi_state = GSI_INIT;
+		atomic_set(&ipa_mpm_ctx->md[idx].clk_cnt.ipa_clk_cnt, 0);
+		atomic_set(&ipa_mpm_ctx->md[idx].clk_cnt.pcie_clk_cnt, 0);
 	}
 
 	ret = mhi_driver_register(&mhi_driver);
@@ -2694,6 +2798,76 @@ int ipa3_is_mhip_offload_enabled(void)
 		return 1;
 }
 
+int ipa_mpm_panic_handler(char *buf, int size)
+{
+	int i;
+	int cnt = 0;
+
+	cnt = scnprintf(buf, size,
+			"\n---- MHIP Active Clients Table ----\n");
+	cnt += scnprintf(buf + cnt, size - cnt,
+			"Total PCIe active clients count: %d\n",
+			atomic_read(&ipa_mpm_ctx->pcie_clk_total_cnt));
+	cnt += scnprintf(buf + cnt, size - cnt,
+			"Total IPA active clients count: %d\n",
+			atomic_read(&ipa_mpm_ctx->ipa_clk_total_cnt));
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		cnt += scnprintf(buf + cnt, size - cnt,
+			"client id: %d ipa vote cnt: %d pcie vote cnt\n", i,
+			atomic_read(&ipa_mpm_ctx->md[i].clk_cnt.ipa_clk_cnt),
+			atomic_read(&ipa_mpm_ctx->md[i].clk_cnt.pcie_clk_cnt));
+	}
+	return cnt;
+}
+
+/**
+ * ipa3_get_mhip_gsi_stats() - Query MHIP gsi stats from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_mhip_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
+{
+	int i;
+
+	if (!ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio) {
+		IPAERR("bad parms NULL mhip_gsi_stats_mmio\n");
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = 0; i < MAX_MHIP_CHANNELS; i++) {
+		stats->ring[i].ringFull = ioread32(
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
+		stats->ring[i].ringEmpty = ioread32(
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
+		stats->ring[i].ringUsageHigh = ioread32(
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
+		stats->ring[i].ringUsageLow = ioread32(
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
+		stats->ring[i].RingUtilCount = ioread32(
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+
+	return 0;
+}
+
+
 late_initcall(ipa_mpm_init);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("MHI Proxy Manager Driver");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
index 7202c25..f6173cea 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include "ipa_i.h"
@@ -318,6 +318,30 @@ int ipa_setup_odl_pipe(void)
 
 }
 
+/**
+ * ipa3_odl_register_pm - Register odl client for PM
+ *
+ * This function will register 1 client with IPA PM to represent odl
+ * in clock scaling calculation:
+ *	- "ODL" - this client will be activated when pipe connected
+ */
+static int ipa3_odl_register_pm(void)
+{
+	int result = 0;
+	struct ipa_pm_register_params pm_reg;
+
+	memset(&pm_reg, 0, sizeof(pm_reg));
+	pm_reg.name = "ODL";
+	pm_reg.group = IPA_PM_GROUP_DEFAULT;
+	pm_reg.skip_clk_vote = true;
+	result = ipa_pm_register(&pm_reg, &ipa3_odl_ctx->odl_pm_hdl);
+	if (result) {
+		IPAERR("failed to create IPA PM client %d\n", result);
+		return result;
+	}
+	return result;
+}
+
 int ipa3_odl_pipe_open(void)
 {
 	int ret = 0;
@@ -365,7 +389,12 @@ static int ipa_adpl_open(struct inode *inode, struct file *filp)
 	int ret = 0;
 
 	IPADBG("Called the function :\n");
-	if (ipa3_odl_ctx->odl_state.odl_init) {
+	if (ipa3_odl_ctx->odl_state.odl_init &&
+				!ipa3_odl_ctx->odl_state.adpl_open) {
+		/* Activate ipa_pm*/
+		ret = ipa_pm_activate_sync(ipa3_odl_ctx->odl_pm_hdl);
+		if (ret)
+			IPAERR("failed to activate pm\n");
 		ipa3_odl_ctx->odl_state.adpl_open = true;
 		ret = ipa3_odl_pipe_open();
 	} else {
@@ -379,8 +408,13 @@ static int ipa_adpl_open(struct inode *inode, struct file *filp)
 
 static int ipa_adpl_release(struct inode *inode, struct file *filp)
 {
+	int ret = 0;
+	/* Deactivate ipa_pm */
+	ret = ipa_pm_deactivate_sync(ipa3_odl_ctx->odl_pm_hdl);
+	if (ret)
+		IPAERR("failed to activate pm\n");
 	ipa3_odl_pipe_cleanup(false);
-	return 0;
+	return ret;
 }
 
 void ipa3_odl_pipe_cleanup(bool is_ssr)
@@ -414,6 +448,8 @@ void ipa3_odl_pipe_cleanup(bool is_ssr)
 	/*Assume DIAG will not close this node in SSR case*/
 	if (is_ssr)
 		ipa3_odl_ctx->odl_state.adpl_open = true;
+	else
+		ipa3_odl_ctx->odl_state.adpl_open = false;
 
 	ipa3_odl_ctx->odl_state.odl_disconnected = true;
 	ipa3_odl_ctx->odl_state.odl_ep_setup = false;
@@ -665,6 +701,13 @@ int ipa_odl_init(void)
 	}
 
 	ipa3_odl_ctx->odl_state.odl_init = true;
+
+	/* register ipa_pm */
+	result = ipa3_odl_register_pm();
+	if (result) {
+		IPAWANERR("ipa3_odl_register_pm failed, ret: %d\n",
+				result);
+	}
 	return 0;
 cdev1_add_fail:
 	device_destroy(odl_cdev[1].class, odl_cdev[1].dev_num);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h
index 64bbba5..5049001 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _IPA3_ODL_H_
@@ -56,6 +56,7 @@ struct ipa_odl_context {
 	struct odl_state_bit_mask odl_state;
 	bool odl_ctl_msg_wq_flag;
 	struct ipa3_odlstats stats;
+	u32 odl_pm_hdl;
 };
 
 struct ipa3_push_msg_odl {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
index afc2b7b..c73f32c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
@@ -361,7 +361,7 @@ static int do_clk_scaling(void)
 	mutex_unlock(&ipa_pm_ctx->client_mutex);
 
 	for (i = 0; i < clk_scaling->threshold_size; i++) {
-		if (tput > clk_scaling->current_threshold[i])
+		if (tput >= clk_scaling->current_threshold[i])
 			new_th_idx++;
 	}
 
@@ -526,15 +526,19 @@ static int find_next_open_array_element(const char *name)
  */
 static int add_client_to_exception_list(u32 hdl)
 {
-	int i;
+	int i, len = 0;
 	struct ipa_pm_exception_list *exception;
 
 	mutex_lock(&ipa_pm_ctx->client_mutex);
+	len = strlen(ipa_pm_ctx->clients[hdl]->name);
 	for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
 		exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
 		if (strnstr(exception->clients, ipa_pm_ctx->clients[hdl]->name,
-			strlen(exception->clients))) {
+			len) && (strlen(exception->clients)
+			== len)) {
 			exception->pending--;
+			IPA_PM_DBG("Pending: %d\n",
+			exception->pending);
 
 			if (exception->pending < 0) {
 				WARN_ON(1);
@@ -568,6 +572,8 @@ static int remove_client_from_exception_list(u32 hdl)
 		exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
 		if (exception->bitmask & (1 << hdl)) {
 			exception->pending++;
+			IPA_PM_DBG("Pending: %d\n",
+			exception->pending);
 			exception->bitmask &= ~(1 << hdl);
 		}
 	}
@@ -651,6 +657,7 @@ int ipa_pm_init(struct ipa_pm_init_params *params)
 				clk_scaling->exception_list[i].pending++;
 		}
 
+		/* for the first client */
 		clk_scaling->exception_list[i].pending++;
 		IPA_PM_DBG("Pending: %d\n",
 			clk_scaling->exception_list[i].pending);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
index 8cca2ef..baf7b01 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
@@ -12,7 +12,7 @@
 #define IPA_PM_MAX_CLIENTS 32 /* actual max is value -1 since we start from 1*/
 #define IPA_PM_MAX_EX_CL 64
 #define IPA_PM_THRESHOLD_MAX 5
-#define IPA_PM_EXCEPTION_MAX 2
+#define IPA_PM_EXCEPTION_MAX 5
 #define IPA_PM_DEFERRED_TIMEOUT 10
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 3dca52b..652d82a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -313,7 +313,7 @@ static void ipa3_handle_mhi_alloc_channel_req(struct qmi_handle *qmi_handle,
 
 	resp = imp_handle_allocate_channel_req(ch_alloc_req);
 	if (!resp) {
-		IPAWANERR("imp handle allocate channel req fails");
+		IPAWANERR("imp handle allocate channel req fails\n");
 		return;
 	}
 
@@ -338,18 +338,62 @@ static void ipa3_handle_mhi_vote_req(struct qmi_handle *qmi_handle,
 	const void *decoded_msg)
 {
 	struct ipa_mhi_clk_vote_req_msg_v01 *vote_req;
-	struct ipa_mhi_clk_vote_resp_msg_v01 *resp = NULL;
+	struct ipa_mhi_clk_vote_resp_msg_v01 *resp = NULL, resp2;
 	int rc;
+	uint32_t bw_mbps = 0;
 
 	vote_req = (struct ipa_mhi_clk_vote_req_msg_v01 *)decoded_msg;
 	IPAWANDBG("Received QMI_IPA_MHI_CLK_VOTE_REQ_V01(%d)\n",
 		vote_req->mhi_vote);
-	resp = imp_handle_vote_req(vote_req->mhi_vote);
-	if (!resp) {
-		IPAWANERR("imp handle allocate channel req fails");
-		return;
+
+	memset(&resp2, 0, sizeof(struct ipa_mhi_clk_vote_resp_msg_v01));
+
+	/* for mpm used for ipa clk voting */
+	if (ipa3_is_apq()) {
+		IPAWANDBG("Throughput(%d:%d) clk-rate(%d:%d)\n",
+			vote_req->tput_value_valid,
+			vote_req->tput_value,
+			vote_req->clk_rate_valid,
+			vote_req->clk_rate);
+		if (vote_req->clk_rate_valid) {
+			switch (vote_req->clk_rate) {
+			case QMI_IPA_CLOCK_RATE_LOW_SVS_V01:
+				bw_mbps = 0;
+				break;
+			case QMI_IPA_CLOCK_RATE_SVS_V01:
+				bw_mbps = 350;
+				break;
+			case QMI_IPA_CLOCK_RATE_NOMINAL_V01:
+				bw_mbps = 690;
+				break;
+			case QMI_IPA_CLOCK_RATE_TURBO_V01:
+				bw_mbps = 1200;
+				break;
+			default:
+				IPAWANERR("Note supported clk_rate (%d)\n",
+				vote_req->clk_rate);
+				bw_mbps = 0;
+				resp2.resp.result = IPA_QMI_RESULT_FAILURE_V01;
+				resp2.resp.error =
+					IPA_QMI_ERR_NOT_SUPPORTED_V01;
+				break;
+			}
+			if (ipa3_vote_for_bus_bw(&bw_mbps)) {
+				IPAWANERR("Failed to vote BW (%u)\n", bw_mbps);
+				resp2.resp.result = IPA_QMI_RESULT_FAILURE_V01;
+				resp2.resp.error =
+					IPA_QMI_ERR_NOT_SUPPORTED_V01;
+			}
+			resp = &resp2;
+		}
+	} else {
+		resp = imp_handle_vote_req(vote_req->mhi_vote);
+		if (!resp) {
+			IPAWANERR("imp handle allocate channel req fails");
+			return;
+		}
+		IPAWANDBG("start sending QMI_IPA_MHI_CLK_VOTE_RESP_V01\n");
 	}
-	IPAWANDBG("start sending QMI_IPA_MHI_CLK_VOTE_RESP_V01\n");
 
 	IPAWANDBG("qmi_snd_rsp: result %d, err %d\n",
 		resp->resp.result, resp->resp.error);
@@ -1033,6 +1077,15 @@ int ipa3_qmi_rmv_offload_request_send(
 	req_desc.msg_id = QMI_IPA_REMOVE_OFFLOAD_CONNECTION_REQ_V01;
 	req_desc.ei_array = ipa_remove_offload_connection_req_msg_v01_ei;
 
+	/* clean the Dl rules  in the cache if flag is set */
+	if (req->clean_all_rules) {
+		for (i = 0; i < QMI_IPA_MAX_FILTERS_V01; i++)
+			if (ipa3_qmi_ctx->ipa_offload_cache[i].valid)
+				ipa3_qmi_ctx->ipa_offload_cache[i].valid =
+				false;
+	}
+
+
 	memset(&resp, 0, sizeof(struct
 		ipa_remove_offload_connection_resp_msg_v01));
 	resp_desc.max_msg_len =
@@ -1280,13 +1333,25 @@ int ipa3_qmi_filter_notify_send(
 		return -EINVAL;
 	}
 
+	if (req->rule_id_ex_len == 0) {
+		IPAWANDBG(" delete UL filter rule for pipe %d\n",
+		req->source_pipe_index);
+	} else if (req->rule_id_ex_len > QMI_IPA_MAX_FILTERS_EX2_V01) {
+		IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n",
+		req->source_pipe_index,
+		req->rule_id_ex_len);
+		return -EINVAL;
+	}
+
 	if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
 		IPAWANERR(" UL filter rule for pipe %d install_status = %d\n",
 			req->source_pipe_index, req->install_status);
 		return -EINVAL;
-	} else if (req->rule_id_valid != 1) {
-		IPAWANERR(" UL filter rule for pipe %d rule_id_valid = %d\n",
-			req->source_pipe_index, req->rule_id_valid);
+	} else if ((req->rule_id_valid != 1) &&
+		(req->rule_id_ex_valid != 1)) {
+		IPAWANERR(" UL filter rule for pipe %d rule_id_valid = %d/%d\n",
+			req->source_pipe_index, req->rule_id_valid,
+			req->rule_id_ex_valid);
 		return -EINVAL;
 	} else if (req->source_pipe_index >= ipa3_ctx->ipa_num_pipes) {
 		IPAWANDBG(
@@ -1419,7 +1484,7 @@ static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
 		return;
 
 	IPAWANDBG("Q6 QMI service available now\n");
-	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
+	if (ipa3_is_apq()) {
 		ipa3_qmi_modem_init_fin = true;
 		IPAWANDBG("QMI-client complete, ipa3_qmi_modem_init_fin : %d\n",
 			ipa3_qmi_modem_init_fin);
@@ -1812,6 +1877,8 @@ int ipa3_vote_for_bus_bw(uint32_t *bw_mbps)
 {
 	int ret;
 
+	IPAWANDBG("Bus BW is %d\n", *bw_mbps);
+
 	if (bw_mbps == NULL) {
 		IPAWANERR("Bus BW is invalid\n");
 		return -EINVAL;
@@ -2162,6 +2229,22 @@ int ipa3_qmi_send_mhi_cleanup_request(struct ipa_mhi_cleanup_req_msg_v01 *req)
 		resp.resp.error, "ipa_mhi_cleanup_req_msg");
 }
 
+int ipa3_qmi_send_rsc_pipe_indication(
+	struct ipa_endp_desc_indication_msg_v01 *req)
+{
+	IPAWANDBG("Sending QMI_IPA_ENDP_DESC_INDICATION_V01\n");
+
+	if (unlikely(!ipa3_svc_handle))
+		return -ETIMEDOUT;
+
+	return qmi_send_indication(ipa3_svc_handle,
+		&ipa3_qmi_ctx->client_sq,
+		QMI_IPA_ENDP_DESC_INDICATION_V01,
+		IPA_ENDP_DESC_INDICATION_MSG_V01_MAX_MSG_LEN,
+		ipa_endp_desc_indication_msg_v01_ei,
+		req);
+}
+
 void ipa3_qmi_init(void)
 {
 	mutex_init(&ipa3_qmi_lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index dacce0d..c1fb534 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -109,6 +109,9 @@ struct ipa3_qmi_context {
 	int num_ipa_offload_connection;
 	struct ipa_offload_connection_val
 		ipa_offload_cache[QMI_IPA_MAX_FILTERS_V01];
+	uint8_t ul_firewall_indices_list_valid;
+	uint32_t ul_firewall_indices_list_len;
+	uint32_t ul_firewall_indices_list[QMI_IPA_MAX_FILTERS_V01];
 };
 
 struct ipa3_rmnet_mux_val {
@@ -333,6 +336,9 @@ int ipa3_qmi_get_per_client_packet_stats(
 int ipa3_qmi_send_mhi_ready_indication(
 	struct ipa_mhi_ready_indication_msg_v01 *req);
 
+int ipa3_qmi_send_rsc_pipe_indication(
+	struct ipa_endp_desc_indication_msg_v01 *req);
+
 int ipa3_qmi_send_mhi_cleanup_request(struct ipa_mhi_cleanup_req_msg_v01 *req);
 
 void ipa3_qmi_init(void);
@@ -475,6 +481,12 @@ static inline int ipa3_qmi_send_mhi_ready_indication(
 	return -EPERM;
 }
 
+static int ipa3_qmi_send_rsc_pipe_indication(
+	struct ipa_endp_desc_indication_msg_v01 *req)
+{
+	return -EPERM;
+}
+
 static inline int ipa3_qmi_send_mhi_cleanup_request(
 	struct ipa_mhi_cleanup_req_msg_v01 *req)
 {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
index 14ca0566..bf4fffe 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
@@ -1350,6 +1350,26 @@ struct qmi_elem_info ipa3_indication_reg_req_msg_data_v01_ei[] = {
 				ipa_mhi_ready_ind_valid),
 	},
 	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+				endpoint_desc_ind_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+				endpoint_desc_ind),
+	},
+	{
 		.data_type	= QMI_UNSIGNED_1_BYTE,
 		.elem_len	= 1,
 		.elem_size	= sizeof(u8),
@@ -1912,6 +1932,36 @@ struct qmi_elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[] = {
 		.ei_array      = ipa_filter_spec_ex2_type_v01_ei,
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			ul_firewall_indices_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			ul_firewall_indices_list_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(uint32_t),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			ul_firewall_indices_list),
+	},
+	{
 		.data_type	= QMI_EOTI,
 		.array_type	= NO_ARRAY,
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
@@ -2244,6 +2294,36 @@ struct qmi_elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[] = {
 			dst_pipe_id),
 	},
 	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_ex_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_ex_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_EX2_V01,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_ex),
+	},
+	{
 		.data_type	= QMI_EOTI,
 		.array_type	= NO_ARRAY,
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
@@ -3389,6 +3469,36 @@ struct qmi_elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[] = {
 		.ei_array      = ipa_filter_spec_ex2_type_v01_ei,
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			ul_firewall_indices_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			ul_firewall_indices_list_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(uint32_t),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			ul_firewall_indices_list),
+	},
+	{
 		.data_type	= QMI_EOTI,
 		.array_type	= NO_ARRAY,
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
@@ -4370,6 +4480,46 @@ struct qmi_elem_info ipa_mhi_clk_vote_req_msg_v01_ei[] = {
 		mhi_vote),
 	},
 	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_mhi_clk_vote_req_msg_v01,
+			tput_value_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_mhi_clk_vote_req_msg_v01,
+			tput_value),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_mhi_clk_vote_req_msg_v01,
+			clk_rate_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_mhi_clk_vote_req_msg_v01,
+			clk_rate),
+	},
+	{
 		.data_type = QMI_EOTI,
 		.array_type = NO_ARRAY,
 		.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -4781,6 +4931,46 @@ struct qmi_elem_info ipa_add_offload_connection_req_msg_v01_ei[] = {
 		.ei_array      = ipa_filter_spec_ex2_type_v01_ei,
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			embedded_call_mux_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			embedded_call_mux_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			default_mhi_path_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			default_mhi_path),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.array_type       = NO_ARRAY,
 		.tlv_type       = QMI_COMMON_TLV_TYPE,
@@ -4874,6 +5064,26 @@ struct qmi_elem_info ipa_remove_offload_connection_req_msg_v01_ei[] = {
 			ipa3_filter_rule_identifier_to_handle_map_data_v01_ei,
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_req_msg_v01,
+			clean_all_rules_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_req_msg_v01,
+			clean_all_rules),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.array_type       = NO_ARRAY,
 		.tlv_type       = QMI_COMMON_TLV_TYPE,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 7c57144..b39b5a5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -118,7 +118,8 @@ static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
 
 	gen_params.priority = entry->prio;
 	gen_params.id = entry->rule_id;
-	gen_params.rule = (const struct ipa_rt_rule *)&entry->rule;
+	gen_params.rule = (const struct ipa_rt_rule_i *)&entry->rule;
+	gen_params.cnt_idx = entry->cnt_idx;
 
 	res = ipahal_rt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
 	if (res)
@@ -937,10 +938,12 @@ static int __ipa_rt_validate_rule_id(u16 rule_id)
 
 	return 0;
 }
-static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
+static int __ipa_rt_validate_hndls(const struct ipa_rt_rule_i *rule,
 				struct ipa3_hdr_entry **hdr,
 				struct ipa3_hdr_proc_ctx_entry **proc_ctx)
 {
+	int index;
+
 	if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) {
 		IPAERR_RL("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
 		return -EPERM;
@@ -967,11 +970,35 @@ static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
 		return -EPERM;
 	}
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		if (rule->enable_stats && rule->cnt_idx) {
+			if (!ipahal_is_rule_cnt_id_valid(rule->cnt_idx)) {
+				IPAERR_RL(
+					"invalid cnt_idx %hhu out of range\n",
+					rule->cnt_idx);
+				return -EPERM;
+			}
+			index = rule->cnt_idx - 1;
+			if (!ipa3_ctx->flt_rt_counters.used_hw[index]) {
+				IPAERR_RL(
+					"invalid cnt_idx %hhu not alloc by driver\n",
+					rule->cnt_idx);
+				return -EPERM;
+			}
+		}
+	} else {
+		if (rule->enable_stats) {
+			IPAERR_RL(
+				"enable_stats won't support on ipa_hw_type %d\n",
+				ipa3_ctx->ipa_hw_type);
+			return -EPERM;
+		}
+	}
 	return 0;
 }
 
 static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
-		const struct ipa_rt_rule *rule,
+		const struct ipa_rt_rule_i *rule,
 		struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr,
 		struct ipa3_hdr_proc_ctx_entry *proc_ctx,
 		u16 rule_id, bool user)
@@ -1007,6 +1034,10 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
 		ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1)
 		(*(entry))->rule.dst = IPA_CLIENT_APPS_WAN_COAL_CONS;
 
+	if (rule->enable_stats)
+		(*entry)->cnt_idx = rule->cnt_idx;
+	else
+		(*entry)->cnt_idx = 0;
 	return 0;
 
 alloc_rule_id_fail:
@@ -1050,7 +1081,7 @@ static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl,
 }
 
 static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
-		const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl,
+		const struct ipa_rt_rule_i *rule, u8 at_rear, u32 *rule_hdl,
 		u16 rule_id, bool user)
 {
 	struct ipa3_rt_tbl *tbl;
@@ -1099,7 +1130,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
 }
 
 static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl,
-		const struct ipa_rt_rule *rule, u32 *rule_hdl,
+		const struct ipa_rt_rule_i *rule, u32 *rule_hdl,
 		struct ipa3_rt_entry **add_after_entry)
 {
 	struct ipa3_rt_entry *entry;
@@ -1132,6 +1163,68 @@ static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl,
 	return -EPERM;
 }
 
+static void __ipa_convert_rt_rule_in(struct ipa_rt_rule rule_in,
+	struct ipa_rt_rule_i *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_rt_rule) >
+			sizeof(struct ipa_rt_rule_i))) {
+		IPAERR_RL("invalid size in: %d size out: %d\n",
+			sizeof(struct ipa_rt_rule),
+			sizeof(struct ipa_rt_rule_i));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_rt_rule_i));
+	memcpy(rule_out, &rule_in, sizeof(struct ipa_rt_rule));
+}
+
+static void __ipa_convert_rt_rule_out(struct ipa_rt_rule_i rule_in,
+	struct ipa_rt_rule *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_rt_rule) >
+			sizeof(struct ipa_rt_rule_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_rt_rule),
+			sizeof(struct ipa_rt_rule_i));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_rt_rule));
+	memcpy(rule_out, &rule_in, sizeof(struct ipa_rt_rule));
+}
+
+static void __ipa_convert_rt_mdfy_in(struct ipa_rt_rule_mdfy rule_in,
+	struct ipa_rt_rule_mdfy_i *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_rt_rule_mdfy) >
+			sizeof(struct ipa_rt_rule_mdfy_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_rt_rule_mdfy),
+			sizeof(struct ipa_rt_rule_mdfy_i));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_rt_rule_mdfy_i));
+	memcpy(&rule_out->rule, &rule_in.rule,
+		sizeof(struct ipa_rt_rule));
+	rule_out->rt_rule_hdl = rule_in.rt_rule_hdl;
+	rule_out->status = rule_in.status;
+}
+
+static void __ipa_convert_rt_mdfy_out(struct ipa_rt_rule_mdfy_i rule_in,
+	struct ipa_rt_rule_mdfy *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_rt_rule_mdfy) >
+			sizeof(struct ipa_rt_rule_mdfy_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_rt_rule_mdfy),
+			sizeof(struct ipa_rt_rule_mdfy_i));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_rt_rule_mdfy));
+	memcpy(&rule_out->rule, &rule_in.rule,
+		sizeof(struct ipa_rt_rule));
+	rule_out->rt_rule_hdl = rule_in.rt_rule_hdl;
+	rule_out->status = rule_in.status;
+}
+
 /**
  * ipa3_add_rt_rule() - Add the specified routing rules to SW and optionally
  * commit to IPA HW
@@ -1148,6 +1241,21 @@ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
 }
 
 /**
+ * ipa3_add_rt_rule_v2() - Add the specified routing rules to SW
+ * and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+
+int ipa3_add_rt_rule_v2(struct ipa_ioc_add_rt_rule_v2 *rules)
+{
+	return ipa3_add_rt_rule_usr_v2(rules, false);
+}
+
+/**
  * ipa3_add_rt_rule_usr() - Add the specified routing rules to SW and optionally
  * commit to IPA HW
  * @rules:		[inout] set of routing rules to add
@@ -1162,6 +1270,7 @@ int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only)
 {
 	int i;
 	int ret;
+	struct ipa_rt_rule_i rule;
 
 	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
 		IPAERR_RL("bad param\n");
@@ -1174,8 +1283,9 @@ int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only)
 		/* if hashing not supported, all tables are non-hash tables*/
 		if (ipa3_ctx->ipa_fltrt_not_hashable)
 			rules->rules[i].rule.hashable = false;
+		__ipa_convert_rt_rule_in(rules->rules[i].rule, &rule);
 		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
-					&rules->rules[i].rule,
+					&rule,
 					rules->rules[i].at_rear,
 					&rules->rules[i].rt_rule_hdl,
 					0,
@@ -1183,6 +1293,7 @@ int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only)
 			IPAERR_RL("failed to add rt rule %d\n", i);
 			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
 		} else {
+			__ipa_convert_rt_rule_out(rule, &rules->rules[i].rule);
 			rules->rules[i].status = 0;
 		}
 	}
@@ -1200,6 +1311,66 @@ int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only)
 }
 
 /**
+ * ipa3_add_rt_rule_usr_v2() - Add the specified routing rules
+ * to SW and optionally commit to IPA HW
+ * @rules:		[inout] set of routing rules to add
+ * @user_only:	[in] indicate installed by userspace module
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+
+int ipa3_add_rt_rule_usr_v2(struct ipa_ioc_add_rt_rule_v2 *rules,
+	bool user_only)
+{
+	int i;
+	int ret;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		rules->rt_tbl_name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			((struct ipa_rt_rule_add_i *)
+			rules->rules)[i].rule.hashable = false;
+		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+					&(((struct ipa_rt_rule_add_i *)
+					rules->rules)[i].rule),
+					((struct ipa_rt_rule_add_i *)
+					rules->rules)[i].at_rear,
+					&(((struct ipa_rt_rule_add_i *)
+					rules->rules)[i].rt_rule_hdl),
+					0,
+					user_only)) {
+			IPAERR_RL("failed to add rt rule %d\n", i);
+			((struct ipa_rt_rule_add_i *)rules->rules)[i].status
+				= IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			((struct ipa_rt_rule_add_i *)
+			rules->rules)[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+
+/**
  * ipa3_add_rt_rule_ext() - Add the specified routing rules to SW with rule id
  * and optionally commit to IPA HW
  * @rules:	[inout] set of routing rules to add
@@ -1212,9 +1383,10 @@ int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules)
 {
 	int i;
 	int ret;
+	struct ipa_rt_rule_i rule;
 
 	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
-		IPAERR_RL("bad parm\n");
+		IPAERR_RL("bad param\n");
 		return -EINVAL;
 	}
 
@@ -1223,14 +1395,17 @@ int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules)
 		/* if hashing not supported, all tables are non-hash tables*/
 		if (ipa3_ctx->ipa_fltrt_not_hashable)
 			rules->rules[i].rule.hashable = false;
+		__ipa_convert_rt_rule_in(
+				rules->rules[i].rule, &rule);
 		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
-					&rules->rules[i].rule,
+					&rule,
 					rules->rules[i].at_rear,
 					&rules->rules[i].rt_rule_hdl,
 					rules->rules[i].rule_id, true)) {
 			IPAERR_RL("failed to add rt rule %d\n", i);
 			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
 		} else {
+			__ipa_convert_rt_rule_out(rule, &rules->rules[i].rule);
 			rules->rules[i].status = 0;
 		}
 	}
@@ -1248,6 +1423,61 @@ int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules)
 }
 
 /**
+ * ipa3_add_rt_rule_ext_v2() - Add the specified routing rules
+ * to SW with rule id and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_ext_v2(struct ipa_ioc_add_rt_rule_ext_v2 *rules)
+{
+	int i;
+	int ret;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			((struct ipa_rt_rule_add_ext_i *)
+			rules->rules)[i].rule.hashable = false;
+		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+					&(((struct ipa_rt_rule_add_ext_i *)
+					rules->rules)[i].rule),
+					((struct ipa_rt_rule_add_ext_i *)
+					rules->rules)[i].at_rear,
+					&(((struct ipa_rt_rule_add_ext_i *)
+					rules->rules)[i].rt_rule_hdl),
+					((struct ipa_rt_rule_add_ext_i *)
+					rules->rules)[i].rule_id, true)) {
+			IPAERR_RL("failed to add rt rule %d\n", i);
+			((struct ipa_rt_rule_add_ext_i *)
+			rules->rules)[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			((struct ipa_rt_rule_add_ext_i *)
+			rules->rules)[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
  * ipa3_add_rt_rule_after() - Add the given routing rules after the
  * specified rule to SW and optionally commit to IPA HW
  * @rules:	[inout] set of routing rules to add + handle where to add
@@ -1262,6 +1492,117 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
 	int ret = 0;
 	struct ipa3_rt_tbl *tbl = NULL;
 	struct ipa3_rt_entry *entry = NULL;
+	struct ipa_rt_rule_i rule;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	rules->rt_tbl_name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+	tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name);
+	if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
+		IPAERR_RL("failed finding rt tbl name = %s\n",
+			rules->rt_tbl_name);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (!tbl->rule_cnt) {
+		IPAERR_RL("tbl->rule_cnt == 0");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	entry = ipa3_id_find(rules->add_after_hdl);
+	if (!entry) {
+		IPAERR_RL("failed finding rule %d in rt tbls\n",
+			rules->add_after_hdl);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->cookie != IPA_RT_RULE_COOKIE) {
+		IPAERR_RL("Invalid cookie value =  %u rule %d in rt tbls\n",
+			entry->cookie, rules->add_after_hdl);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->tbl != tbl) {
+		IPAERR_RL("given rt rule does not match the table\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * do not allow any rule to be added at "default" routing
+	 * table
+	 */
+	if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
+		(tbl->rule_cnt > 0)) {
+		IPAERR_RL("cannot add rules to default rt table\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * we add all rules one after the other, if one insertion fails, it cuts
+	 * the chain (all following will receive fail status) following calls to
+	 * __ipa_add_rt_rule_after will fail (entry == NULL)
+	 */
+
+	for (i = 0; i < rules->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			rules->rules[i].rule.hashable = false;
+		__ipa_convert_rt_rule_in(
+				rules->rules[i].rule, &rule);
+		if (__ipa_add_rt_rule_after(tbl,
+					&rule,
+					&rules->rules[i].rt_rule_hdl,
+					&entry)) {
+			IPAERR_RL("failed to add rt rule %d\n", i);
+			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+			__ipa_convert_rt_rule_out(rule, &rules->rules[i].rule);
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			IPAERR_RL("failed to commit\n");
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+	goto bail;
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa3_add_rt_rule_after_v2() - Add the given routing rules
+ * after the specified rule to SW and optionally commit to IPA
+ * HW
+ * @rules:	[inout] set of routing rules to add + handle where to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_after_v2(struct ipa_ioc_add_rt_rule_after_v2
+	*rules)
+{
+	int i;
+	int ret = 0;
+	struct ipa3_rt_tbl *tbl = NULL;
+	struct ipa3_rt_entry *entry = NULL;
 
 	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
 		IPAERR_RL("bad param\n");
@@ -1325,15 +1666,20 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
 	for (i = 0; i < rules->num_rules; i++) {
 		/* if hashing not supported, all tables are non-hash tables*/
 		if (ipa3_ctx->ipa_fltrt_not_hashable)
-			rules->rules[i].rule.hashable = false;
+			((struct ipa_rt_rule_add_i *)
+			rules->rules)[i].rule.hashable = false;
 		if (__ipa_add_rt_rule_after(tbl,
-					&rules->rules[i].rule,
-					&rules->rules[i].rt_rule_hdl,
+					&(((struct ipa_rt_rule_add_i *)
+					rules->rules)[i].rule),
+					&(((struct ipa_rt_rule_add_i *)
+					rules->rules)[i].rt_rule_hdl),
 					&entry)) {
 			IPAERR_RL("failed to add rt rule %d\n", i);
-			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+			((struct ipa_rt_rule_add_i *)
+			rules->rules)[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
 		} else {
-			rules->rules[i].status = 0;
+			((struct ipa_rt_rule_add_i *)
+			rules->rules)[i].status = 0;
 		}
 	}
 
@@ -1767,7 +2113,7 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
 }
 
 
-static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
+static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy_i *rtrule)
 {
 	struct ipa3_rt_entry *entry;
 	struct ipa3_hdr_entry *hdr = NULL;
@@ -1840,7 +2186,10 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
 
 	entry->hw_len = 0;
 	entry->prio = 0;
-
+	if (rtrule->rule.enable_stats)
+		entry->cnt_idx = rtrule->rule.cnt_idx;
+	else
+		entry->cnt_idx = 0;
 	return 0;
 
 error:
@@ -1859,6 +2208,7 @@ int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
 {
 	int i;
 	int result;
+	struct ipa_rt_rule_mdfy_i rule;
 
 	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
 		IPAERR_RL("bad param\n");
@@ -1870,11 +2220,60 @@ int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
 		/* if hashing not supported, all tables are non-hash tables*/
 		if (ipa3_ctx->ipa_fltrt_not_hashable)
 			hdls->rules[i].rule.hashable = false;
-		if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
+		__ipa_convert_rt_mdfy_in(hdls->rules[i], &rule);
+		if (__ipa_mdfy_rt_rule(&rule)) {
 			IPAERR_RL("failed to mdfy rt rule %i\n", i);
 			hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
 		} else {
 			hdls->rules[i].status = 0;
+			__ipa_convert_rt_mdfy_out(rule, &hdls->rules[i]);
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_mdfy_rt_rule_v2() - Modify the specified routing rules
+ * in SW and optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_mdfy_rt_rule_v2(struct ipa_ioc_mdfy_rt_rule_v2 *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			((struct ipa_rt_rule_mdfy_i *)
+			hdls->rules)[i].rule.hashable = false;
+		if (__ipa_mdfy_rt_rule(&(((struct ipa_rt_rule_mdfy_i *)
+			hdls->rules)[i]))) {
+			IPAERR_RL("failed to mdfy rt rule %i\n", i);
+			((struct ipa_rt_rule_mdfy_i *)
+			hdls->rules)[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
+		} else {
+			((struct ipa_rt_rule_mdfy_i *)
+			hdls->rules)[i].status = 0;
 		}
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index 2ade494..5df8dcb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -12,6 +12,9 @@
 #define IPA_UC_POLL_SLEEP_USEC 100
 #define IPA_UC_POLL_MAX_RETRY 10000
 
+#define IPA_UC_DBG_STATS_GET_PROT_ID(x) (0xff & ((x) >> 24))
+#define IPA_UC_DBG_STATS_GET_OFFSET(x) (0x00ffffff & (x))
+
 /**
  * Mailbox register to Interrupt HWP for CPU cmd
  * Usage of IPA_UC_MAILBOX_m_n doorbell instead of IPA_IRQ_EE_UC_0
@@ -115,7 +118,7 @@ struct IpaHwRegWriteCmdData_t {
  * for IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED response.
  * @originalCmdOp : The original command opcode
  * @status : 0 for success indication, otherwise failure
- * @reserved : Reserved
+ * @responseData : 16b responseData
  *
  * Parameters are sent as 32b immediate parameters.
  */
@@ -123,7 +126,7 @@ union IpaHwCpuCmdCompletedResponseData_t {
 	struct IpaHwCpuCmdCompletedResponseParams_t {
 		u32 originalCmdOp:8;
 		u32 status:8;
-		u32 reserved:16;
+		u32 responseData:16;
 	} __packed params;
 	u32 raw32b;
 } __packed;
@@ -214,6 +217,55 @@ const char *ipa_hw_error_str(enum ipa3_hw_errors err_type)
 	return str;
 }
 
+static void ipa3_uc_save_dbg_stats(u32 size)
+{
+	u8 protocol_id;
+	u32 addr_offset;
+	void __iomem *mmio;
+
+	protocol_id = IPA_UC_DBG_STATS_GET_PROT_ID(
+		ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams_1);
+	addr_offset = IPA_UC_DBG_STATS_GET_OFFSET(
+		ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams_1);
+	mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
+		addr_offset, sizeof(struct IpaHwRingStats_t) *
+		MAX_CH_STATS_SUPPORTED);
+	if (mmio == NULL) {
+		IPAERR("unexpected NULL mmio\n");
+		return;
+	}
+	switch (protocol_id) {
+	case IPA_HW_PROTOCOL_AQC:
+		break;
+	case IPA_HW_PROTOCOL_11ad:
+		break;
+	case IPA_HW_PROTOCOL_WDI:
+		ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_size = size;
+		ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_ofst = addr_offset;
+		ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio = mmio;
+		break;
+	case IPA_HW_PROTOCOL_WDI3:
+		ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_size = size;
+		ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_ofst = addr_offset;
+		ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio = mmio;
+		break;
+	case IPA_HW_PROTOCOL_ETH:
+		break;
+	case IPA_HW_PROTOCOL_MHIP:
+		ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_size = size;
+		ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_ofst = addr_offset;
+		ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio = mmio;
+		break;
+	case IPA_HW_PROTOCOL_USB:
+		ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_size = size;
+		ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_ofst = addr_offset;
+		ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio = mmio;
+		break;
+	default:
+		IPAERR("unknown protocols %d\n", protocol_id);
+	}
+}
+
 static void ipa3_log_evt_hdlr(void)
 {
 	int i;
@@ -513,6 +565,10 @@ static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
 		if (uc_rsp.params.originalCmdOp ==
 		    ipa3_ctx->uc_ctx.pending_cmd) {
 			ipa3_ctx->uc_ctx.uc_status = uc_rsp.params.status;
+			if (uc_rsp.params.originalCmdOp ==
+				IPA_CPU_2_HW_CMD_OFFLOAD_STATS_ALLOC)
+				ipa3_uc_save_dbg_stats(
+					uc_rsp.params.responseData);
 			complete_all(&ipa3_ctx->uc_ctx.uc_completion);
 		} else {
 			IPAERR("Expected cmd=%u rcvd cmd=%u\n",
@@ -947,3 +1003,99 @@ int ipa3_uc_send_remote_ipa_info(u32 remote_addr, uint32_t mbox_n)
 	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
 	return res;
 }
+
+int ipa3_uc_debug_stats_alloc(
+	struct IpaHwOffloadStatsAllocCmdData_t cmdinfo)
+{
+	int result;
+	struct ipa_mem_buffer cmd;
+	enum ipa_cpu_2_hw_offload_commands command;
+	struct IpaHwOffloadStatsAllocCmdData_t *cmd_data;
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		result = -ENOMEM;
+		return result;
+	}
+	cmd_data = (struct IpaHwOffloadStatsAllocCmdData_t *)cmd.base;
+	memcpy(cmd_data, &cmdinfo,
+		sizeof(struct IpaHwOffloadStatsAllocCmdData_t));
+	command = IPA_CPU_2_HW_CMD_OFFLOAD_STATS_ALLOC;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		command,
+		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+		false, 10 * HZ);
+	if (result) {
+		IPAERR("fail to alloc offload stats\n");
+		goto cleanup;
+	}
+	result = 0;
+cleanup:
+	dma_free_coherent(ipa3_ctx->uc_pdev,
+		cmd.size,
+		cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("exit\n");
+	return result;
+}
+
+int ipa3_uc_debug_stats_dealloc(uint32_t protocol)
+{
+	int result;
+	struct ipa_mem_buffer cmd;
+	enum ipa_cpu_2_hw_offload_commands command;
+	struct IpaHwOffloadStatsDeAllocCmdData_t *cmd_data;
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		result = -ENOMEM;
+		return result;
+	}
+	cmd_data = (struct IpaHwOffloadStatsDeAllocCmdData_t *)
+		cmd.base;
+	cmd_data->protocol = protocol;
+	command = IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		command,
+		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+		false, 10 * HZ);
+	if (result) {
+		IPAERR("fail to dealloc offload stats\n");
+		goto cleanup;
+	}
+	switch (protocol) {
+	case IPA_HW_PROTOCOL_AQC:
+		break;
+	case IPA_HW_PROTOCOL_11ad:
+		break;
+	case IPA_HW_PROTOCOL_WDI:
+		iounmap(ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio);
+		ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
+		break;
+	case IPA_HW_PROTOCOL_WDI3:
+		iounmap(ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio);
+		ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
+		break;
+	case IPA_HW_PROTOCOL_ETH:
+		break;
+	default:
+		IPAERR("unknown protocols %d\n", protocol);
+	}
+	result = 0;
+cleanup:
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("exit\n");
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
index ea54936..4d296ec 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _IPA_UC_OFFLOAD_I_H_
@@ -19,6 +19,17 @@
 #define IPA_NTN_TX_DIR 1
 #define IPA_NTN_RX_DIR 2
 
+#define MAX_CH_STATS_SUPPORTED 5
+#define DIR_CONSUMER 0
+#define DIR_PRODUCER 1
+
+#define MAX_AQC_CHANNELS 2
+#define MAX_11AD_CHANNELS 5
+#define MAX_WDI2_CHANNELS 2
+#define MAX_WDI3_CHANNELS 2
+#define MAX_MHIP_CHANNELS 4
+#define MAX_USB_CHANNELS 2
+
 /**
  *  @brief   Enum value determined based on the feature it
  *           corresponds to
@@ -64,14 +75,20 @@ enum ipa3_hw_features {
 * @IPA_HW_PROTOCOL_AQC : protocol related to AQC operation in IPA HW
 * @IPA_HW_PROTOCOL_11ad: protocol related to 11ad operation in IPA HW
 * @IPA_HW_PROTOCOL_WDI : protocol related to WDI operation in IPA HW
+* @IPA_HW_PROTOCOL_WDI3: protocol related to WDI3 operation in IPA HW
 * @IPA_HW_PROTOCOL_ETH : protocol related to ETH operation in IPA HW
+* @IPA_HW_PROTOCOL_MHIP: protocol related to MHIP operation in IPA HW
+* @IPA_HW_PROTOCOL_USB : protocol related to USB operation in IPA HW
 */
 enum ipa4_hw_protocol {
 	IPA_HW_PROTOCOL_COMMON = 0x0,
 	IPA_HW_PROTOCOL_AQC = 0x1,
 	IPA_HW_PROTOCOL_11ad = 0x2,
 	IPA_HW_PROTOCOL_WDI = 0x3,
+	IPA_HW_PROTOCOL_WDI3 = 0x4,
 	IPA_HW_PROTOCOL_ETH = 0x5,
+	IPA_HW_PROTOCOL_MHIP = 0x6,
+	IPA_HW_PROTOCOL_USB = 0x7,
 	IPA_HW_PROTOCOL_MAX
 };
 
@@ -151,6 +168,7 @@ enum ipa3_hw_errors {
  * @warningCounter : The warnings counter. The counter carries information
  *						regarding non fatal errors in HW
  * @interfaceVersionCommon : The Common interface version as reported by HW
+ * @responseParams_1: offset addr for uC stats
  *
  * The shared memory is used for communication between IPA HW and CPU.
  */
@@ -174,6 +192,7 @@ struct IpaHwSharedMemCommonMapping_t {
 	u16 reserved_23_22;
 	u16 interfaceVersionCommon;
 	u16 reserved_27_26;
+	u32 responseParams_1;
 } __packed;
 
 /**
@@ -419,11 +438,15 @@ struct Ipa3HwStatsNTNInfoData_t {
  * enum ipa_cpu_2_hw_offload_commands -  Values that represent
  * the offload commands from CPU
  * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
- *				Offload protocol's Tx/Rx Path
+ * Offload protocol's Tx/Rx Path
  * @IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN : Command to tear down
- *				Offload protocol's Tx/ Rx Path
+ * Offload protocol's Tx/ Rx Path
  * @IPA_CPU_2_HW_CMD_PERIPHERAL_INIT :Command to initialize peripheral
  * @IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT : Command to deinitialize peripheral
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_STATS_ALLOC: Command to start the
+ * uC stats calculation for a particular protocol
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC: Command to stop the
+ * uC stats calculation for a particular protocol
  */
 enum ipa_cpu_2_hw_offload_commands {
 	IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP  =
@@ -434,8 +457,47 @@ enum ipa_cpu_2_hw_offload_commands {
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
 	IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT =
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+	IPA_CPU_2_HW_CMD_OFFLOAD_STATS_ALLOC =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+	IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
 };
 
+/**
+ * struct IpaOffloadStatschannel_info - channel info for uC
+ * stats
+ * @dir: Director of the channel ID DIR_CONSUMER =0,
+ * DIR_PRODUCER = 1
+ * @ch_id: Channel id of the IPA endpoint for which stats need
+ * to be calculated, 0xFF means invalid channel or disable stats
+ * on already stats enabled channel
+ */
+struct IpaOffloadStatschannel_info {
+	uint8_t dir;
+	uint8_t ch_id;
+} __packed;
+
+/**
+ * struct IpaHwOffloadStatsAllocCmdData_t - protocol info for uC
+ * stats start
+ * @protocol: Enum that indicates the protocol type
+ * @ch_id_info: Channel id of the IPA endpoint for which stats
+ * need to be calculated
+ */
+struct IpaHwOffloadStatsAllocCmdData_t {
+	uint32_t protocol;
+	struct IpaOffloadStatschannel_info
+		ch_id_info[MAX_CH_STATS_SUPPORTED];
+} __packed;
+
+/**
+ * struct IpaHwOffloadStatsDeAllocCmdData_t - protocol info for
+ * uC stats stop
+ * @protocol: Enum that indicates the protocol type
+ */
+struct IpaHwOffloadStatsDeAllocCmdData_t {
+	uint32_t protocol;
+} __packed;
 
 /**
  * enum ipa3_hw_offload_channel_states - Values that represent
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index e63fbde..983ff16 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -411,6 +411,52 @@ static void ipa3_uc_wdi_event_handler(struct IpaHwSharedMemCommonMapping_t
 }
 
 /**
+ * ipa3_get_wdi_gsi_stats() - Query WDI gsi stats from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_wdi_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
+{
+	int i;
+
+	if (!ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio) {
+		IPAERR("bad NULL parms for wdi_gsi_stats\n");
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = 0; i < MAX_WDI2_CHANNELS; i++) {
+		stats->ring[i].ringFull = ioread32(
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
+		stats->ring[i].ringEmpty = ioread32(
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
+		stats->ring[i].ringUsageHigh = ioread32(
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
+		stats->ring[i].ringUsageLow = ioread32(
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
+		stats->ring[i].RingUtilCount = ioread32(
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
  * ipa3_get_wdi_stats() - Query WDI statistics from uc
  * @stats:	[inout] stats blob from client populated by driver
  *
@@ -1129,6 +1175,7 @@ int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in,
 	union __packed gsi_channel_scratch gsi_scratch;
 	phys_addr_t pa;
 	unsigned long va;
+	unsigned long wifi_rx_ri_addr = 0;
 	u32 gsi_db_reg_phs_addr_lsb;
 	u32 gsi_db_reg_phs_addr_msb;
 
@@ -1306,16 +1353,11 @@ int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in,
 					NULL,
 					4,
 					false,
-					&va)) {
+					&wifi_rx_ri_addr)) {
 			IPAERR("fail to create gsi RX rng RP\n");
 			result = -ENOMEM;
 			goto gsi_timeout;
 		}
-		gsi_scratch.wdi.wifi_rx_ri_addr_low =
-			va & 0xFFFFFFFF;
-		gsi_scratch.wdi.wifi_rx_ri_addr_high =
-			(va & 0xFFFFF00000000) >> 32;
-
 		len = in->smmu_enabled ?
 			in->u.ul_smmu.rdy_comp_ring_size :
 			in->u.ul.rdy_comp_ring_size;
@@ -1354,13 +1396,6 @@ int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in,
 			goto gsi_timeout;
 		}
 		gsi_evt_ring_props.rp_update_addr = va;
-		gsi_scratch.wdi.wdi_rx_vdev_id = 0xff;
-		gsi_scratch.wdi.wdi_rx_fw_desc = 0xff;
-		gsi_scratch.wdi.endp_metadatareg_offset =
-					ipahal_get_reg_mn_ofst(
-					IPA_ENDP_INIT_HDR_METADATA_n, 0,
-							ipa_ep_idx)/4;
-		gsi_scratch.wdi.qmap_id = 0;
 	}
 
 	ep->valid = 1;
@@ -1418,11 +1453,44 @@ int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in,
 
 	num_ring_ele = ep->gsi_mem_info.evt_ring_len/gsi_evt_ring_props.re_size;
 	IPAERR("UPDATE_RI_MODERATION_THRESHOLD: %d\n", num_ring_ele);
-	gsi_scratch.wdi.update_ri_moderation_threshold =
-		min(UPDATE_RI_MODERATION_THRESHOLD, num_ring_ele);
-	gsi_scratch.wdi.update_ri_moderation_counter = 0;
-	gsi_scratch.wdi.wdi_rx_tre_proc_in_progress = 0;
-	gsi_scratch.wdi.resv1 = 0;
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_7) {
+		if (IPA_CLIENT_IS_PROD(in->sys.client)) {
+			gsi_scratch.wdi.wifi_rx_ri_addr_low =
+				wifi_rx_ri_addr & 0xFFFFFFFF;
+			gsi_scratch.wdi.wifi_rx_ri_addr_high =
+				(wifi_rx_ri_addr & 0xFFFFF00000000) >> 32;
+			gsi_scratch.wdi.wdi_rx_vdev_id = 0xff;
+			gsi_scratch.wdi.wdi_rx_fw_desc = 0xff;
+			gsi_scratch.wdi.endp_metadatareg_offset =
+						ipahal_get_reg_mn_ofst(
+						IPA_ENDP_INIT_HDR_METADATA_n, 0,
+								ipa_ep_idx)/4;
+			gsi_scratch.wdi.qmap_id = 0;
+		}
+		gsi_scratch.wdi.update_ri_moderation_threshold =
+			min(UPDATE_RI_MODERATION_THRESHOLD, num_ring_ele);
+		gsi_scratch.wdi.update_ri_moderation_counter = 0;
+		gsi_scratch.wdi.wdi_rx_tre_proc_in_progress = 0;
+	} else {
+		if (IPA_CLIENT_IS_PROD(in->sys.client)) {
+			gsi_scratch.wdi2_new.wifi_rx_ri_addr_low =
+				wifi_rx_ri_addr & 0xFFFFFFFF;
+			gsi_scratch.wdi2_new.wifi_rx_ri_addr_high =
+				(wifi_rx_ri_addr & 0xFFFFF00000000) >> 32;
+			gsi_scratch.wdi2_new.wdi_rx_vdev_id = 0xff;
+			gsi_scratch.wdi2_new.wdi_rx_fw_desc = 0xff;
+			gsi_scratch.wdi2_new.endp_metadatareg_offset =
+						ipahal_get_reg_mn_ofst(
+						IPA_ENDP_INIT_HDR_METADATA_n, 0,
+								ipa_ep_idx)/4;
+			gsi_scratch.wdi2_new.qmap_id = 0;
+		}
+		gsi_scratch.wdi2_new.update_ri_moderation_threshold =
+			min(UPDATE_RI_MODERATION_THRESHOLD, num_ring_ele);
+		gsi_scratch.wdi2_new.update_ri_moderation_counter = 0;
+		gsi_scratch.wdi2_new.wdi_rx_tre_proc_in_progress = 0;
+	}
+
 	result = gsi_write_channel_scratch(ep->gsi_chan_hdl,
 			gsi_scratch);
 	if (result != GSI_STATUS_SUCCESS) {
@@ -2045,7 +2113,9 @@ int ipa3_disconnect_gsi_wdi_pipe(u32 clnt_hdl)
 		ipa3_ctx->uc_wdi_ctx.stats_notify = NULL;
 	else
 		IPADBG("uc_wdi_ctx.stats_notify already null\n");
-
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->ipa_hw_type != IPA_HW_v4_7)
+		ipa3_uc_debug_stats_dealloc(IPA_HW_PROTOCOL_WDI);
 	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
 
 fail_dealloc_channel:
@@ -2399,6 +2469,7 @@ int ipa3_resume_gsi_wdi_pipe(u32 clnt_hdl)
 	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
 	struct gsi_chan_info chan_info;
 	union __packed gsi_channel_scratch gsi_scratch;
+	struct IpaHwOffloadStatsAllocCmdData_t *pcmd_t = NULL;
 
 	IPADBG("ep=%d\n", clnt_hdl);
 	ep = &ipa3_ctx->ep[clnt_hdl];
@@ -2422,6 +2493,24 @@ int ipa3_resume_gsi_wdi_pipe(u32 clnt_hdl)
 		IPAERR("gsi_start_channel failed %d\n", result);
 		ipa_assert();
 	}
+	pcmd_t = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI];
+	/* start uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->ipa_hw_type != IPA_HW_v4_7) {
+		if (IPA_CLIENT_IS_PROD(ep->client)) {
+			pcmd_t->ch_id_info[0].ch_id
+				= ep->gsi_chan_hdl;
+			pcmd_t->ch_id_info[0].dir
+				= DIR_PRODUCER;
+		} else {
+			pcmd_t->ch_id_info[1].ch_id
+				= ep->gsi_chan_hdl;
+			pcmd_t->ch_id_info[1].dir
+				= DIR_CONSUMER;
+		}
+		ipa3_uc_debug_stats_alloc(
+			ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI]);
+	}
 	gsi_query_channel_info(ep->gsi_chan_hdl, &chan_info);
 	gsi_read_channel_scratch(ep->gsi_chan_hdl, &gsi_scratch);
 	IPADBG("ch=%lu channel base = 0x%llx , event base 0x%llx\n",
@@ -2516,6 +2605,7 @@ int ipa3_suspend_gsi_wdi_pipe(u32 clnt_hdl)
 	int retry_cnt = 0;
 	struct gsi_chan_info chan_info;
 	union __packed gsi_channel_scratch gsi_scratch;
+	struct IpaHwOffloadStatsAllocCmdData_t *pcmd_t = NULL;
 
 	ipa_ep_idx = ipa3_get_ep_mapping(ipa3_get_client_mapping(clnt_hdl));
 	if (ipa_ep_idx < 0) {
@@ -2580,7 +2670,24 @@ int ipa3_suspend_gsi_wdi_pipe(u32 clnt_hdl)
 				gsi_scratch.data.word3);
 		IPADBG("Scratch 3 = %x\n", gsi_scratch.data.word4);
 	}
-
+	pcmd_t = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI];
+	/* stop uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->ipa_hw_type != IPA_HW_v4_7) {
+		if (IPA_CLIENT_IS_PROD(ep->client)) {
+			pcmd_t->ch_id_info[0].ch_id
+				= 0xff;
+			pcmd_t->ch_id_info[0].dir
+				= DIR_PRODUCER;
+		} else {
+			pcmd_t->ch_id_info[1].ch_id
+				= 0xff;
+			pcmd_t->ch_id_info[1].dir
+				= DIR_CONSUMER;
+		}
+		ipa3_uc_debug_stats_alloc(
+			ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI]);
+	}
 	if (disable_force_clear)
 		ipa3_disable_force_clear(clnt_hdl);
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
@@ -2738,17 +2845,29 @@ int ipa3_write_qmapid_gsi_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
 {
 	int result = 0;
 	struct ipa3_ep_context *ep;
-	union __packed gsi_wdi_channel_scratch3_reg gsi_scratch;
+	union __packed gsi_wdi_channel_scratch3_reg gsi_scratch3;
+	union __packed gsi_wdi2_channel_scratch2_reg gsi_scratch2;
 
-	memset(&gsi_scratch, 0, sizeof(gsi_scratch));
 	ep = &ipa3_ctx->ep[clnt_hdl];
 	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
 
-	gsi_scratch.wdi.qmap_id = qmap_id;
-	gsi_scratch.wdi.endp_metadatareg_offset = ipahal_get_reg_mn_ofst(
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_7) {
+		memset(&gsi_scratch3, 0, sizeof(gsi_scratch3));
+		gsi_scratch3.wdi.qmap_id = qmap_id;
+		gsi_scratch3.wdi.endp_metadatareg_offset =
+			ipahal_get_reg_mn_ofst(
 				IPA_ENDP_INIT_HDR_METADATA_n, 0, clnt_hdl)/4;
-
-	result = gsi_write_channel_scratch3_reg(ep->gsi_chan_hdl, gsi_scratch);
+		result = gsi_write_channel_scratch3_reg(ep->gsi_chan_hdl,
+								gsi_scratch3);
+	} else {
+		memset(&gsi_scratch2, 0, sizeof(gsi_scratch2));
+		gsi_scratch2.wdi.qmap_id = qmap_id;
+		gsi_scratch2.wdi.endp_metadatareg_offset =
+			ipahal_get_reg_mn_ofst(
+				IPA_ENDP_INIT_HDR_METADATA_n, 0, clnt_hdl)/4;
+		result = gsi_write_channel_scratch2_reg(ep->gsi_chan_hdl,
+								gsi_scratch2);
+	}
 	if (result != GSI_STATUS_SUCCESS) {
 		IPAERR("gsi_write_channel_scratch failed %d\n",
 			result);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index bede437..7c80164 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -2167,7 +2167,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 2, 7, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } },
+			{ 2, 7, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 7 } },
 	[IPA_4_5][IPA_CLIENT_APPS_CMD_PROD]	  = {
 			true, IPA_v4_5_GROUP_UL_DL,
 			false,
@@ -2179,13 +2179,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 10, 13, 8, 19, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+			{ 3, 5, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
 	[IPA_4_5][IPA_CLIENT_ETHERNET_PROD]	  = {
 			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 12, 0, 8, 16, IPA_EE_UC, GSI_SMART_PRE_FETCH, 4 } },
+			{ 12, 0, 8, 16, IPA_EE_UC, GSI_SMART_PRE_FETCH, 3 } },
 	[IPA_4_5][IPA_CLIENT_Q6_WAN_PROD]         = {
 			true, IPA_v4_5_GROUP_UL_DL,
 			true,
@@ -2241,7 +2241,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 24, 3, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+			{ 24, 3, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
 	[IPA_4_5][IPA_CLIENT_USB_CONS]            = {
 			true, IPA_v4_5_GROUP_UL_DL,
 			false,
@@ -2271,7 +2271,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 13, 4, 8, 11, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+			{ 13, 4, 8, 11, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
 	[IPA_4_5][IPA_CLIENT_APPS_WAN_CONS]       = {
 			true, IPA_v4_5_GROUP_UL_DL,
 			false,
@@ -2283,7 +2283,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 23, 8, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+			{ 30, 6, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5][IPA_CLIENT_ETHERNET_CONS]	  = {
 			true, IPA_v4_5_GROUP_UL_DL,
 			false,
@@ -2390,13 +2390,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
-			{ 4, 8, 8, 16, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 4 } },
+			{ 4, 8, 8, 16, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 3 } },
 	[IPA_4_5_MHI][IPA_CLIENT_MHI_PROD]		= {
 			true, IPA_v4_5_MHI_GROUP_PCIE,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_PCIE,
-			{ 1, 0, 16, 20, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } },
+			{ 1, 0, 16, 20, IPA_EE_AP, GSI_SMART_PRE_FETCH, 7 } },
 	[IPA_4_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]	= {
 			true, IPA_v4_5_MHI_GROUP_DMA,
 			false,
@@ -2463,7 +2463,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			true, IPA_v4_5_MHI_GROUP_DMA,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
+			QMB_MASTER_SELECT_PCIE,
 			{ 29, 9, 9, 9, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]	= {
 			true, IPA_v4_5_MHI_GROUP_DMA,
@@ -2691,13 +2691,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 	[IPA_4_7][IPA_CLIENT_WLAN1_PROD]          = {
 			true, IPA_v4_7_GROUP_UL_DL,
 			true,
-			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 3, 3, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+			{ 3, 3, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
 	[IPA_4_7][IPA_CLIENT_USB_PROD]            = {
 			true, IPA_v4_7_GROUP_UL_DL,
 			true,
-			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_7][IPA_CLIENT_APPS_LAN_PROD]	  = {
@@ -2705,13 +2705,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 4, 4, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+			{ 4, 4, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
 	[IPA_4_7][IPA_CLIENT_APPS_WAN_PROD]	  = {
 			true, IPA_v4_7_GROUP_UL_DL,
 			true,
-			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 2, 2, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } },
+			{ 2, 2, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 7 } },
 	[IPA_4_7][IPA_CLIENT_APPS_CMD_PROD]	  = {
 			true, IPA_v4_7_GROUP_UL_DL,
 			false,
@@ -2723,7 +2723,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 8 } },
+			{ 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
 	[IPA_4_7][IPA_CLIENT_Q6_CMD_PROD]	  = {
 			true, IPA_v4_7_GROUP_UL_DL,
 			false,
@@ -2735,7 +2735,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 5 } },
+			{ 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 } },
 	/* Only for test purpose */
 	[IPA_4_7][IPA_CLIENT_TEST_PROD]           = {
 			true, IPA_v4_7_GROUP_UL_DL,
@@ -2760,20 +2760,20 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 3, 3, 8, 16, IPA_EE_AP } },
+			{ 2, 2, 16, 32, IPA_EE_AP } },
 	[IPA_4_7][IPA_CLIENT_TEST4_PROD]          = {
 			true, IPA_v4_7_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 4, 4, 8, 16, IPA_EE_AP } },
+			{ 1, 1, 8, 16, IPA_EE_AP } },
 
 	[IPA_4_7][IPA_CLIENT_WLAN1_CONS]          = {
 			true, IPA_v4_7_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 18, 9, 8, 13, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+			{ 18, 9, 8, 13, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
 	[IPA_4_7][IPA_CLIENT_USB_CONS]            = {
 			true, IPA_v4_7_GROUP_UL_DL,
 			false,
@@ -2809,7 +2809,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 15, 6, 8, 11, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+			{ 15, 6, 8, 11, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
 	[IPA_4_7][IPA_CLIENT_Q6_LAN_CONS]         = {
 			true, IPA_v4_7_GROUP_UL_DL,
 			false,
@@ -2859,7 +2859,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 18, 9, 8, 13, IPA_EE_AP } },
+			{ 21, 12, 9, 9, IPA_EE_AP } },
 	[IPA_4_7][IPA_CLIENT_TEST3_CONS]          = {
 			true, IPA_v4_7_GROUP_UL_DL,
 			false,
@@ -3104,21 +3104,19 @@ static struct ipa3_mem_partition ipa_4_5_mem_part = {
 	.apps_hdr_size			= 0x200,
 	.apps_hdr_size_ddr		= 0x800,
 	.modem_hdr_proc_ctx_ofst	= 0xad0,
-	.modem_hdr_proc_ctx_size	= 0xac0,
-	.apps_hdr_proc_ctx_ofst		= 0x1590,
+	.modem_hdr_proc_ctx_size	= 0xb20,
+	.apps_hdr_proc_ctx_ofst		= 0x15f0,
 	.apps_hdr_proc_ctx_size		= 0x200,
 	.apps_hdr_proc_ctx_size_ddr	= 0x0,
-	.nat_tbl_ofst			= 0x17a0,
+	.nat_tbl_ofst			= 0x1800,
 	.nat_tbl_size			= 0x800,
-	.nat_index_tbl_ofst		= 0x1fa0,
+	.nat_index_tbl_ofst		= 0x2000,
 	.nat_index_tbl_size		= 0x100,
-	.nat_exp_tbl_ofst		= 0x20a0,
+	.nat_exp_tbl_ofst		= 0x2100,
 	.nat_exp_tbl_size		= 0x400,
-	.pdn_config_ofst		= 0x24a8,
-	.pdn_config_size		= 0x50,
-	.stats_quota_ofst		= 0x2500,
+	.stats_quota_ofst		= 0x2510,
 	.stats_quota_size		= 0x78,
-	.stats_tethering_ofst		= 0x2578,
+	.stats_tethering_ofst		= 0x2588,
 	.stats_tethering_size		= 0x238,
 	.stats_flt_v4_ofst		= 0,
 	.stats_flt_v4_size		= 0,
@@ -3128,13 +3126,13 @@ static struct ipa3_mem_partition ipa_4_5_mem_part = {
 	.stats_rt_v4_size		= 0,
 	.stats_rt_v6_ofst		= 0,
 	.stats_rt_v6_size		= 0,
-	.stats_fnr_ofst			= 0x27b0,
+	.stats_fnr_ofst			= 0x27c0,
 	.stats_fnr_size			= 0x800,
-	.stats_drop_ofst		= 0x2fb0,
+	.stats_drop_ofst		= 0x2fc0,
 	.stats_drop_size		= 0x20,
 	.modem_comp_decomp_ofst		= 0x0,
 	.modem_comp_decomp_size		= 0x0,
-	.modem_ofst			= 0x2fd8,
+	.modem_ofst			= 0x2fe8,
 	.modem_size			= 0x800,
 	.apps_v4_flt_hash_ofst	= 0x2718,
 	.apps_v4_flt_hash_size	= 0x0,
@@ -3154,7 +3152,9 @@ static struct ipa3_mem_partition ipa_4_5_mem_part = {
 	.apps_v6_rt_nhash_size	= 0x0,
 	.uc_descriptor_ram_ofst	= 0x3800,
 	.uc_descriptor_ram_size	= 0x1000,
-	.end_ofst		= 0x4800,
+	.pdn_config_ofst	= 0x4800,
+	.pdn_config_size	= 0x50,
+	.end_ofst		= 0x4850,
 };
 
 static struct ipa3_mem_partition ipa_4_7_mem_part = {
@@ -3912,7 +3912,7 @@ static void ipa_cfg_qtime(void)
 	memset(&gran_cfg, 0, sizeof(gran_cfg));
 	gran_cfg.gran_0 = IPA_TIMERS_TIME_GRAN_100_USEC;
 	gran_cfg.gran_1 = IPA_TIMERS_TIME_GRAN_1_MSEC;
-	gran_cfg.gran_2 = IPA_TIMERS_TIME_GRAN_10_USEC;
+	gran_cfg.gran_2 = IPA_TIMERS_TIME_GRAN_1_MSEC;
 	val = ipahal_read_reg(IPA_TIMERS_PULSE_GRAN_CFG);
 	IPADBG("timer pulse granularity before cfg: 0x%x\n", val);
 	ipahal_write_reg_fields(IPA_TIMERS_PULSE_GRAN_CFG, &gran_cfg);
@@ -4193,48 +4193,6 @@ bool ipa3_get_client_uplink(int pipe_idx)
 	return ipa3_ctx->ipacm_client[pipe_idx].uplink;
 }
 
-/**
- * ipa3_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
- * the supplied pipe index.
- *
- * @pipe_idx:
- *
- * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
- * found.
- */
-enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx)
-{
-	int i;
-	int j;
-	enum ipa_client_type client;
-	struct ipa3_client_names clients;
-	bool found = false;
-
-	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
-		IPAERR("Bad pipe index!\n");
-		return -EINVAL;
-	}
-
-	client = ipa3_ctx->ep[pipe_idx].client;
-
-	for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
-		memset(&clients, 0, sizeof(clients));
-		ipa3_get_clients_from_rm_resource(i, &clients);
-		for (j = 0; j < clients.length; j++) {
-			if (clients.names[j] == client) {
-				found = true;
-				break;
-			}
-		}
-		if (found)
-			break;
-	}
-
-	if (!found)
-		return -EFAULT;
-
-	return i;
-}
 
 /**
  * ipa3_get_client_mapping() - provide client mapping
@@ -5271,6 +5229,11 @@ int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
 	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl,
 		ep_holb);
 
+	/* IPA4.5 issue requires HOLB_EN to be written twice */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+			clnt_hdl, ep_holb);
+
 	/* Configure timer */
 	if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_2) {
 		ipa3_cal_ep_holb_scale_base_val(ep_holb->tmr_val,
@@ -6049,10 +6012,219 @@ int ipa3_alloc_rule_id(struct idr *rule_ids)
 	 * Distinction by high bit: Modem Ids are high bit asserted.
 	 */
 	return idr_alloc(rule_ids, NULL,
-		ipahal_get_low_rule_id(), ipahal_get_rule_id_hi_bit(),
+		ipahal_get_low_rule_id(),
+		ipahal_get_rule_id_hi_bit(),
 		GFP_KERNEL);
 }
 
+static int __ipa3_alloc_counter_hdl
+	(struct ipa_ioc_flt_rt_counter_alloc *counter)
+{
+	int id;
+
+	/* assign a handle using idr to this counter block */
+	id = idr_alloc(&ipa3_ctx->flt_rt_counters.hdl, counter,
+		ipahal_get_low_hdl_id(), ipahal_get_high_hdl_id(),
+		GFP_ATOMIC);
+
+	return id;
+}
+
+int ipa3_alloc_counter_id(struct ipa_ioc_flt_rt_counter_alloc *counter)
+{
+	int i, unused_cnt, unused_max, unused_start_id;
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+
+	/* allocate hw counters */
+	counter->hw_counter.start_id = 0;
+	counter->hw_counter.end_id = 0;
+	unused_cnt = 0;
+	unused_max = 0;
+	unused_start_id = 0;
+	if (counter->hw_counter.num_counters == 0)
+		goto sw_counter_alloc;
+	/* find the start id which can be used for the block */
+	for (i = 0; i < IPA_FLT_RT_HW_COUNTER; i++) {
+		if (!ipa3_ctx->flt_rt_counters.used_hw[i])
+			unused_cnt++;
+		else {
+			/* tracking max unused block in case allow less */
+			if (unused_cnt > unused_max) {
+				unused_start_id = i - unused_cnt + 2;
+				unused_max = unused_cnt;
+			}
+			unused_cnt = 0;
+		}
+		/* find it, break and use this 1st possible block */
+		if (unused_cnt == counter->hw_counter.num_counters) {
+			counter->hw_counter.start_id = i - unused_cnt + 2;
+			counter->hw_counter.end_id = i + 1;
+			break;
+		}
+	}
+	if (counter->hw_counter.start_id == 0) {
+		/* if not able to find such a block but allow less */
+		if (counter->hw_counter.allow_less && unused_max) {
+			/* give the max possible unused blocks */
+			counter->hw_counter.num_counters = unused_max;
+			counter->hw_counter.start_id = unused_start_id;
+			counter->hw_counter.end_id =
+				unused_start_id + unused_max - 1;
+		} else {
+			/* not able to find such a block */
+			counter->hw_counter.num_counters = 0;
+			counter->hw_counter.start_id = 0;
+			counter->hw_counter.end_id = 0;
+			goto err;
+		}
+	}
+
+sw_counter_alloc:
+	/* allocate sw counters */
+	counter->sw_counter.start_id = 0;
+	counter->sw_counter.end_id = 0;
+	unused_cnt = 0;
+	unused_max = 0;
+	unused_start_id = 0;
+	if (counter->sw_counter.num_counters == 0)
+		goto mark_hw_cnt;
+	/* find the start id which can be used for the block */
+	for (i = 0; i < IPA_FLT_RT_SW_COUNTER; i++) {
+		if (!ipa3_ctx->flt_rt_counters.used_sw[i])
+			unused_cnt++;
+		else {
+			/* tracking max unused block in case allow less */
+			if (unused_cnt > unused_max) {
+				unused_start_id = i - unused_cnt +
+					2 + IPA_FLT_RT_HW_COUNTER;
+				unused_max = unused_cnt;
+			}
+			unused_cnt = 0;
+		}
+		/* find it, break and use this 1st possible block */
+		if (unused_cnt == counter->sw_counter.num_counters) {
+			counter->sw_counter.start_id = i - unused_cnt +
+				2 + IPA_FLT_RT_HW_COUNTER;
+			counter->sw_counter.end_id =
+				i + 1 + IPA_FLT_RT_HW_COUNTER;
+			break;
+		}
+	}
+	if (counter->sw_counter.start_id == 0) {
+		/* if not able to find such a block but allow less */
+		if (counter->sw_counter.allow_less && unused_max) {
+			/* give the max possible unused blocks */
+			counter->sw_counter.num_counters = unused_max;
+			counter->sw_counter.start_id = unused_start_id;
+			counter->sw_counter.end_id =
+				unused_start_id + unused_max - 1;
+		} else {
+			/* not able to find such a block */
+			counter->sw_counter.num_counters = 0;
+			counter->sw_counter.start_id = 0;
+			counter->sw_counter.end_id = 0;
+			goto err;
+		}
+	}
+
+mark_hw_cnt:
+	/* add hw counters, set used to 1 */
+	if (counter->hw_counter.num_counters == 0)
+		goto mark_sw_cnt;
+	unused_start_id = counter->hw_counter.start_id;
+	if (unused_start_id < 1 ||
+		unused_start_id > IPA_FLT_RT_HW_COUNTER) {
+		IPAERR("unexpected hw_counter start id %d\n",
+			   unused_start_id);
+		goto err;
+	}
+	for (i = 0; i < counter->hw_counter.num_counters; i++)
+		ipa3_ctx->flt_rt_counters.used_hw[unused_start_id + i - 1]
+			= true;
+mark_sw_cnt:
+	/* add sw counters, set used to 1 */
+	if (counter->sw_counter.num_counters == 0)
+		goto done;
+	unused_start_id = counter->sw_counter.start_id
+		- IPA_FLT_RT_HW_COUNTER;
+	if (unused_start_id < 1 ||
+		unused_start_id > IPA_FLT_RT_SW_COUNTER) {
+		IPAERR("unexpected sw_counter start id %d\n",
+			   unused_start_id);
+		goto err;
+	}
+	for (i = 0; i < counter->sw_counter.num_counters; i++)
+		ipa3_ctx->flt_rt_counters.used_sw[unused_start_id + i - 1]
+			= true;
+done:
+	/* get a handle from idr for dealloc */
+	counter->hdl = __ipa3_alloc_counter_hdl(counter);
+	spin_unlock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+	idr_preload_end();
+	return 0;
+
+err:
+	counter->hdl = -1;
+	spin_unlock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+	idr_preload_end();
+	return -ENOMEM;
+}
+
+void ipa3_counter_remove_hdl(int hdl)
+{
+	struct ipa_ioc_flt_rt_counter_alloc *counter;
+	int offset = 0;
+
+	spin_lock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+	counter = idr_find(&ipa3_ctx->flt_rt_counters.hdl, hdl);
+	if (counter == NULL) {
+		IPAERR("unexpected hdl %d\n", hdl);
+		goto err;
+	}
+	/* remove counters belong to this hdl, set used back to 0 */
+	offset = counter->hw_counter.start_id - 1;
+	if (offset >= 0 && offset + counter->hw_counter.num_counters
+		< IPA_FLT_RT_HW_COUNTER) {
+		memset(&ipa3_ctx->flt_rt_counters.used_hw + offset,
+			   0, counter->hw_counter.num_counters * sizeof(bool));
+	} else {
+		IPAERR("unexpected hdl %d\n", hdl);
+		goto err;
+	}
+	offset = counter->sw_counter.start_id - 1 - IPA_FLT_RT_HW_COUNTER;
+	if (offset >= 0 && offset + counter->sw_counter.num_counters
+		< IPA_FLT_RT_SW_COUNTER) {
+		memset(&ipa3_ctx->flt_rt_counters.used_sw + offset,
+		   0, counter->sw_counter.num_counters * sizeof(bool));
+	} else {
+		IPAERR("unexpected hdl %d\n", hdl);
+		goto err;
+	}
+	/* remove the handle */
+	idr_remove(&ipa3_ctx->flt_rt_counters.hdl, hdl);
+err:
+	spin_unlock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+}
+
+void ipa3_counter_id_remove_all(void)
+{
+	struct ipa_ioc_flt_rt_counter_alloc *counter;
+	int hdl;
+
+	spin_lock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+	/* remove all counters, set used back to 0 */
+	memset(&ipa3_ctx->flt_rt_counters.used_hw, 0,
+		   sizeof(ipa3_ctx->flt_rt_counters.used_hw));
+	memset(&ipa3_ctx->flt_rt_counters.used_sw, 0,
+		   sizeof(ipa3_ctx->flt_rt_counters.used_sw));
+	/* remove all handles */
+	idr_for_each_entry(&ipa3_ctx->flt_rt_counters.hdl, counter, hdl)
+		idr_remove(&ipa3_ctx->flt_rt_counters.hdl, hdl);
+	spin_unlock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+}
+
 int ipa3_id_alloc(void *ptr)
 {
 	int id;
@@ -6610,11 +6782,6 @@ int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
 	return 0;
 }
 
-static bool ipa3_pm_is_used(void)
-{
-	return (ipa3_ctx) ? ipa3_ctx->use_ipa_pm : false;
-}
-
 int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
 	struct ipa_api_controller *api_ctrl)
 {
@@ -6655,7 +6822,9 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
 	api_ctrl->ipa_add_hdr_proc_ctx = ipa3_add_hdr_proc_ctx;
 	api_ctrl->ipa_del_hdr_proc_ctx = ipa3_del_hdr_proc_ctx;
 	api_ctrl->ipa_add_rt_rule = ipa3_add_rt_rule;
+	api_ctrl->ipa_add_rt_rule_v2 = ipa3_add_rt_rule_v2;
 	api_ctrl->ipa_add_rt_rule_usr = ipa3_add_rt_rule_usr;
+	api_ctrl->ipa_add_rt_rule_usr_v2 = ipa3_add_rt_rule_usr_v2;
 	api_ctrl->ipa_del_rt_rule = ipa3_del_rt_rule;
 	api_ctrl->ipa_commit_rt = ipa3_commit_rt;
 	api_ctrl->ipa_reset_rt = ipa3_reset_rt;
@@ -6663,10 +6832,14 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
 	api_ctrl->ipa_put_rt_tbl = ipa3_put_rt_tbl;
 	api_ctrl->ipa_query_rt_index = ipa3_query_rt_index;
 	api_ctrl->ipa_mdfy_rt_rule = ipa3_mdfy_rt_rule;
+	api_ctrl->ipa_mdfy_rt_rule_v2 = ipa3_mdfy_rt_rule_v2;
 	api_ctrl->ipa_add_flt_rule = ipa3_add_flt_rule;
+	api_ctrl->ipa_add_flt_rule_v2 = ipa3_add_flt_rule_v2;
 	api_ctrl->ipa_add_flt_rule_usr = ipa3_add_flt_rule_usr;
+	api_ctrl->ipa_add_flt_rule_usr_v2 = ipa3_add_flt_rule_usr_v2;
 	api_ctrl->ipa_del_flt_rule = ipa3_del_flt_rule;
 	api_ctrl->ipa_mdfy_flt_rule = ipa3_mdfy_flt_rule;
+	api_ctrl->ipa_mdfy_flt_rule_v2 = ipa3_mdfy_flt_rule_v2;
 	api_ctrl->ipa_commit_flt = ipa3_commit_flt;
 	api_ctrl->ipa_reset_flt = ipa3_reset_flt;
 	api_ctrl->ipa_allocate_nat_device = ipa3_allocate_nat_device;
@@ -6760,7 +6933,6 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
 	api_ctrl->ipa_proxy_clk_unvote = ipa3_proxy_clk_unvote;
 	api_ctrl->ipa_is_client_handle_valid = ipa3_is_client_handle_valid;
 	api_ctrl->ipa_get_client_mapping = ipa3_get_client_mapping;
-	api_ctrl->ipa_get_rm_resource_from_ep = ipa3_get_rm_resource_from_ep;
 	api_ctrl->ipa_get_modem_cfg_emb_pipe_flt =
 		ipa3_get_modem_cfg_emb_pipe_flt;
 	api_ctrl->ipa_get_transport_type = ipa3_get_transport_type;
@@ -6802,8 +6974,7 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
 	api_ctrl->ipa_tz_unlock_reg = ipa3_tz_unlock_reg;
 	api_ctrl->ipa_get_smmu_params = ipa3_get_smmu_params;
 	api_ctrl->ipa_is_vlan_mode = ipa3_is_vlan_mode;
-	api_ctrl->ipa_pm_is_used = ipa3_pm_is_used;
-	api_ctrl->ipa_wigig_uc_init = ipa3_wigig_uc_init;
+	api_ctrl->ipa_wigig_internal_init = ipa3_wigig_internal_init;
 	api_ctrl->ipa_conn_wigig_rx_pipe_i = ipa3_conn_wigig_rx_pipe_i;
 	api_ctrl->ipa_conn_wigig_client_i = ipa3_conn_wigig_client_i;
 	api_ctrl->ipa_disconn_wigig_pipe_i = ipa3_disconn_wigig_pipe_i;
@@ -7336,6 +7507,8 @@ static int __ipa3_stop_gsi_channel(u32 clnt_hdl)
 	int res = 0;
 	int i;
 	struct ipa3_ep_context *ep;
+	enum ipa_client_type client_type;
+	struct IpaHwOffloadStatsAllocCmdData_t *gsi_info;
 
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
 		ipa3_ctx->ep[clnt_hdl].valid == 0) {
@@ -7344,8 +7517,54 @@ static int __ipa3_stop_gsi_channel(u32 clnt_hdl)
 	}
 
 	ep = &ipa3_ctx->ep[clnt_hdl];
+	client_type = ipa3_get_client_mapping(clnt_hdl);
 	memset(&mem, 0, sizeof(mem));
 
+	/* stop uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->ipa_hw_type != IPA_HW_v4_7) {
+		switch (client_type) {
+		case IPA_CLIENT_MHI_PRIME_TETH_PROD:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[0].ch_id = 0xff;
+			gsi_info->ch_id_info[0].dir = DIR_PRODUCER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_MHI_PRIME_TETH_CONS:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[1].ch_id = 0xff;
+			gsi_info->ch_id_info[1].dir = DIR_CONSUMER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_MHI_PRIME_RMNET_PROD:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[2].ch_id = 0xff;
+			gsi_info->ch_id_info[2].dir = DIR_PRODUCER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_MHI_PRIME_RMNET_CONS:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[3].ch_id = 0xff;
+			gsi_info->ch_id_info[3].dir = DIR_CONSUMER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_USB_PROD:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_USB];
+			gsi_info->ch_id_info[0].ch_id = 0xff;
+			gsi_info->ch_id_info[0].dir = DIR_PRODUCER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_USB_CONS:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_USB];
+			gsi_info->ch_id_info[1].ch_id = 0xff;
+			gsi_info->ch_id_info[1].dir = DIR_CONSUMER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		default:
+			IPADBG("client_type %d not supported\n",
+				client_type);
+		}
+	}
 	if (IPA_CLIENT_IS_PROD(ep->client)) {
 		IPADBG("Calling gsi_stop_channel ch:%lu\n",
 			ep->gsi_chan_hdl);
@@ -7480,7 +7699,11 @@ void ipa3_suspend_apps_pipes(bool suspend)
 					IPAERR("failed to stop WAN channel\n");
 					ipa_assert();
 				}
-			} else {
+			} else if (!atomic_read(&ipa3_ctx->is_ssr)) {
+				/* If SSR was alreday started not required to
+				 * start WAN channel,Because in SSR will stop
+				 * channel and reset the channel.
+				 */
 				res = gsi_start_channel(ep->gsi_chan_hdl);
 				if (res) {
 					IPAERR("failed to start WAN channel\n");
@@ -7493,6 +7716,49 @@ void ipa3_suspend_apps_pipes(bool suspend)
 		if (suspend)
 			ipa3_gsi_poll_after_suspend(ep);
 	}
+
+	ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_ODL_DPL_CONS);
+	/* Considering the case for SSR. */
+	if (ipa_ep_idx == -1) {
+		IPADBG("Invalid mapping for IPA_CLIENT_ODL_DPL_CONS\n");
+		return;
+	}
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (ep->valid) {
+		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
+			ipa_ep_idx);
+		/*
+		 * move the channel to callback mode.
+		 * This needs to happen before starting the channel to make
+		 * sure we don't loose any interrupt
+		 */
+		if (!suspend && !atomic_read(&ep->sys->curr_polling_state))
+			gsi_config_channel_mode(ep->gsi_chan_hdl,
+			GSI_CHAN_MODE_CALLBACK);
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+			if (suspend) {
+				res = __ipa3_stop_gsi_channel(ipa_ep_idx);
+				if (res) {
+					IPAERR("failed to stop ODL channel\n");
+					ipa_assert();
+				}
+			} else if (!atomic_read(&ipa3_ctx->is_ssr)) {
+				/* If SSR was alreday started not required to
+				 * start WAN channel,Because in SSR will stop
+				 * channel and reset the channel.
+				 */
+				res = gsi_start_channel(ep->gsi_chan_hdl);
+				if (res) {
+					IPAERR("failed to start ODL channel\n");
+					ipa_assert();
+				}
+			}
+		} else {
+			ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		}
+		if (suspend)
+			ipa3_gsi_poll_after_suspend(ep);
+	}
 }
 
 int ipa3_allocate_dma_task_for_gsi(void)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
index cd1e048..f213178 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
@@ -694,6 +694,8 @@ int ipa3_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 		goto exit;
 	}
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+		ipa3_uc_debug_stats_dealloc(IPA_HW_PROTOCOL_WDI3);
 	ipa3_delete_dflt_flt_rules(ipa_ep_idx_rx);
 	memset(ep_rx, 0, sizeof(struct ipa3_ep_context));
 	IPADBG("rx client (ep: %d) disconnected\n", ipa_ep_idx_rx);
@@ -723,39 +725,55 @@ int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 
 	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
 
+	/* enable data path */
+	result = ipa3_enable_data_path(ipa_ep_idx_rx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d\n", result,
+			ipa_ep_idx_rx);
+		goto exit;
+	}
+
+	result = ipa3_enable_data_path(ipa_ep_idx_tx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d\n", result,
+			ipa_ep_idx_tx);
+		goto fail_enable_path1;
+	}
+
 	/* start gsi tx channel */
 	result = gsi_start_channel(ep_tx->gsi_chan_hdl);
 	if (result) {
 		IPAERR("failed to start gsi tx channel\n");
-		result = -EFAULT;
-		goto exit;
+		goto fail_enable_path2;
 	}
 
 	/* start gsi rx channel */
 	result = gsi_start_channel(ep_rx->gsi_chan_hdl);
 	if (result) {
 		IPAERR("failed to start gsi rx channel\n");
-		result = -EFAULT;
-		goto exit;
+		goto fail_start_channel1;
 	}
-
-	/* enable data path */
-	result = ipa3_enable_data_path(ipa_ep_idx_rx);
-	if (result) {
-		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
-			ipa_ep_idx_rx);
-		result = -EFAULT;
-		goto exit;
+	/* start uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].ch_id
+			= ep_rx->gsi_chan_hdl;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].dir
+			= DIR_PRODUCER;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].ch_id
+			= ep_tx->gsi_chan_hdl;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].dir
+			= DIR_CONSUMER;
+		ipa3_uc_debug_stats_alloc(
+			ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3]);
 	}
+	goto exit;
 
-	result = ipa3_enable_data_path(ipa_ep_idx_tx);
-	if (result) {
-		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
-			ipa_ep_idx_tx);
-		result = -EFAULT;
-		goto exit;
-	}
-
+fail_start_channel1:
+	gsi_stop_channel(ep_tx->gsi_chan_hdl);
+fail_enable_path2:
+	ipa3_disable_data_path(ipa_ep_idx_tx);
+fail_enable_path1:
+	ipa3_disable_data_path(ipa_ep_idx_rx);
 exit:
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
 	return result;
@@ -833,7 +851,19 @@ int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 		result = -EFAULT;
 		goto fail;
 	}
-
+	/* stop uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].ch_id
+			= 0xff;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].dir
+			= DIR_PRODUCER;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].ch_id
+			= 0xff;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].dir
+			= DIR_CONSUMER;
+		ipa3_uc_debug_stats_alloc(
+			ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3]);
+	}
 	if (disable_force_clear)
 		ipa3_disable_force_clear(ipa_ep_idx_rx);
 
@@ -888,3 +918,49 @@ int ipa3_write_qmapid_wdi3_gsi_pipe(u32 clnt_hdl, u8 qmap_id)
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
 	return result;
 }
+
+/**
+ * ipa3_get_wdi3_gsi_stats() - Query WDI3 gsi stats from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_wdi3_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
+{
+	int i;
+
+	if (!ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio) {
+		IPAERR("bad NULL parms for wdi3_gsi_stats\n");
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = 0; i < MAX_WDI3_CHANNELS; i++) {
+		stats->ring[i].ringFull = ioread32(
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
+		stats->ring[i].ringEmpty = ioread32(
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
+		stats->ring[i].ringUsageHigh = ioread32(
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
+		stats->ring[i].ringUsageLow = ioread32(
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
+		stats->ring[i].RingUtilCount = ioread32(
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
index 64de92b..52546bd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
@@ -6,6 +6,7 @@
 #include "ipa_i.h"
 #include <linux/if_ether.h>
 #include <linux/log2.h>
+#include <linux/debugfs.h>
 #include <linux/ipa_wigig.h>
 
 #define IPA_WIGIG_DESC_RING_EL_SIZE	32
@@ -27,6 +28,25 @@
 #define W11AD_TO_GSI_DB_m 1
 #define W11AD_TO_GSI_DB_n 1
 
+static LIST_HEAD(smmu_reg_addr_list);
+static LIST_HEAD(smmu_ring_addr_list);
+static DEFINE_MUTEX(smmu_lock);
+struct dentry *wigig_dent;
+
+struct ipa_wigig_smmu_reg_addr {
+	struct list_head link;
+	phys_addr_t phys_addr;
+	enum ipa_smmu_cb_type cb_type;
+	u8 count;
+};
+
+struct ipa_wigig_smmu_ring_addr {
+	struct list_head link;
+	u64 iova;
+	enum ipa_smmu_cb_type cb_type;
+	u8 count;
+};
+
 
 static int ipa3_wigig_uc_loaded_handler(struct notifier_block *self,
 	unsigned long val, void *data)
@@ -68,7 +88,7 @@ int ipa3_wigig_init_i(void)
 	return 0;
 }
 
-int ipa3_wigig_uc_init(
+int ipa3_wigig_internal_init(
 	struct ipa_wdi_uc_ready_params *inout,
 	ipa_wigig_misc_int_cb int_notify,
 	phys_addr_t *uc_db_pa)
@@ -206,6 +226,132 @@ static int ipa3_wigig_smmu_map_buffers(bool Rx,
 	return result;
 }
 
+static int ipa3_wigig_smmu_map_reg(phys_addr_t phys_addr, bool map,
+	enum ipa_smmu_cb_type cb_type)
+{
+	struct ipa_wigig_smmu_reg_addr *entry;
+	struct ipa_wigig_smmu_reg_addr *next;
+	int result = 0;
+
+	IPADBG("addr %pa, %s\n", &phys_addr, map ? "map" : "unmap");
+	mutex_lock(&smmu_lock);
+	list_for_each_entry_safe(entry, next, &smmu_reg_addr_list, link) {
+		if ((entry->phys_addr == phys_addr) &&
+			(entry->cb_type == cb_type)) {
+			IPADBG("cb %d, page %pa already mapped, ", cb_type,
+				&phys_addr);
+			if (map) {
+				entry->count++;
+				IPADBG("inc to %d\n", (entry->count));
+			} else {
+				--entry->count;
+				IPADBG("dec to %d\n", entry->count);
+				if (!(entry->count)) {
+					IPADBG("unmap and delete\n");
+					result = ipa3_smmu_map_peer_reg(
+						phys_addr, map, cb_type);
+					if (result) {
+						IPAERR("failed to unmap %pa\n",
+							&phys_addr);
+						goto finish;
+					}
+					list_del(&entry->link);
+					kfree(entry);
+				}
+			}
+			goto finish;
+		}
+	}
+	IPADBG("new page found %pa, map and add to list CB %d\n", &phys_addr,
+		cb_type);
+	result = ipa3_smmu_map_peer_reg(phys_addr, map, cb_type);
+	if (result) {
+		IPAERR("failed to map %pa\n", &phys_addr);
+		goto finish;
+	}
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (entry == NULL) {
+		IPAERR("couldn't allocate for %pa\n", &phys_addr);
+		ipa3_smmu_map_peer_reg(phys_addr, !map, cb_type);
+		result = -ENOMEM;
+		goto finish;
+	}
+	INIT_LIST_HEAD(&entry->link);
+	entry->phys_addr = phys_addr;
+	entry->cb_type = cb_type;
+	entry->count = 1;
+	list_add(&entry->link, &smmu_reg_addr_list);
+
+finish:
+	mutex_unlock(&smmu_lock);
+	IPADBG("exit\n");
+	return result;
+}
+
+static int ipa3_wigig_smmu_map_ring(u64 iova, u32 size, bool map,
+	struct sg_table *sgt, enum ipa_smmu_cb_type cb_type)
+{
+	struct ipa_wigig_smmu_ring_addr *entry;
+	struct ipa_wigig_smmu_ring_addr *next;
+	int result = 0;
+
+	IPADBG("iova %llX, %s\n", iova, map ? "map" : "unmap");
+	mutex_lock(&smmu_lock);
+	list_for_each_entry_safe(entry, next, &smmu_ring_addr_list, link) {
+		if ((entry->iova == iova) &&
+			(entry->cb_type == cb_type)) {
+			IPADBG("cb %d, page 0x%llX already mapped, ", cb_type,
+				iova);
+			if (map) {
+				entry->count++;
+				IPADBG("inc to %d\n", (entry->count));
+			} else {
+				--entry->count;
+				IPADBG("dec to %d\n", entry->count);
+				if (!(entry->count)) {
+					IPADBG("unmap and delete\n");
+					result = ipa3_smmu_map_peer_buff(
+						iova, size, map, sgt, cb_type);
+					if (result) {
+						IPAERR(
+							"failed to unmap 0x%llX\n",
+							iova);
+						goto finish;
+					}
+					list_del(&entry->link);
+					kfree(entry);
+				}
+			}
+			goto finish;
+		}
+	}
+	IPADBG("new page found 0x%llX, map and add to list\n", iova);
+	result = ipa3_smmu_map_peer_buff(iova, size, map, sgt, cb_type);
+	if (result) {
+		IPAERR("failed to map 0x%llX\n", iova);
+		goto finish;
+	}
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (entry == NULL) {
+		IPAERR("couldn't allocate for 0x%llX\n", iova);
+		ipa3_smmu_map_peer_buff(iova, size, !map, sgt, cb_type);
+		result = -ENOMEM;
+		goto finish;
+	}
+	INIT_LIST_HEAD(&entry->link);
+	entry->iova = iova;
+	entry->cb_type = cb_type;
+	entry->count = 1;
+	list_add(&entry->link, &smmu_ring_addr_list);
+
+finish:
+	mutex_unlock(&smmu_lock);
+	IPADBG("exit\n");
+	return result;
+}
+
 static int ipa3_wigig_smmu_map_channel(bool Rx,
 	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
 	void *buff,
@@ -239,7 +385,10 @@ static int ipa3_wigig_smmu_map_channel(bool Rx,
 	}
 
 	if (Rx) {
-		result = ipa3_smmu_map_peer_reg(
+		IPADBG("RX %s status_ring_HWHEAD_pa %pa uC CB\n",
+			map ? "map" : "unmap",
+			&pipe_smmu->status_ring_HWHEAD_pa);
+		result = ipa3_wigig_smmu_map_reg(
 			rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
 			map,
 			IPA_SMMU_CB_UC);
@@ -251,9 +400,12 @@ static int ipa3_wigig_smmu_map_channel(bool Rx,
 			goto fail;
 		}
 	} else {
-
-		result = ipa3_smmu_map_peer_reg(
-			rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
+		IPADBG("TX %s status_ring_HWHEAD_pa %pa AP CB\n",
+			map ? "map" : "unmap",
+			&pipe_smmu->status_ring_HWHEAD_pa);
+		result = ipa3_wigig_smmu_map_reg(
+			rounddown(pipe_smmu->status_ring_HWHEAD_pa,
+				PAGE_SIZE),
 			map,
 			IPA_SMMU_CB_AP);
 		if (result) {
@@ -264,8 +416,12 @@ static int ipa3_wigig_smmu_map_channel(bool Rx,
 			goto fail;
 		}
 
-		result = ipa3_smmu_map_peer_reg(
-			rounddown(pipe_smmu->desc_ring_HWHEAD_pa, PAGE_SIZE),
+		IPADBG("TX %s desc_ring_HWHEAD_pa %pa uC CB\n",
+			map ? "map" : "unmap",
+			&pipe_smmu->desc_ring_HWHEAD_pa);
+		result = ipa3_wigig_smmu_map_reg(
+			rounddown(pipe_smmu->desc_ring_HWHEAD_pa,
+				PAGE_SIZE),
 			map,
 			IPA_SMMU_CB_UC);
 		if (result) {
@@ -276,7 +432,10 @@ static int ipa3_wigig_smmu_map_channel(bool Rx,
 		}
 	}
 
-	result = ipa3_smmu_map_peer_reg(
+	IPADBG("%s status_ring_HWTAIL_pa %pa AP CB\n",
+		map ? "map" : "unmap",
+		&pipe_smmu->status_ring_HWTAIL_pa);
+	result = ipa3_wigig_smmu_map_reg(
 		rounddown(pipe_smmu->status_ring_HWTAIL_pa, PAGE_SIZE),
 		map,
 		IPA_SMMU_CB_AP);
@@ -288,7 +447,10 @@ static int ipa3_wigig_smmu_map_channel(bool Rx,
 		goto fail_status_HWTAIL;
 	}
 
-	result = ipa3_smmu_map_peer_reg(
+	IPADBG("%s desc_ring_HWTAIL_pa %pa AP CB\n",
+		map ? "map" : "unmap",
+		&pipe_smmu->desc_ring_HWTAIL_pa);
+	result = ipa3_wigig_smmu_map_reg(
 		rounddown(pipe_smmu->desc_ring_HWTAIL_pa, PAGE_SIZE),
 		map,
 		IPA_SMMU_CB_AP);
@@ -300,7 +462,10 @@ static int ipa3_wigig_smmu_map_channel(bool Rx,
 	}
 
 	/* rings */
-	result = ipa3_smmu_map_peer_buff(
+	IPADBG("%s desc_ring_base_iova %llX AP CB\n",
+		map ? "map" : "unmap",
+		pipe_smmu->desc_ring_base_iova);
+	result = ipa3_wigig_smmu_map_ring(
 		pipe_smmu->desc_ring_base_iova,
 		pipe_smmu->desc_ring_size,
 		map,
@@ -313,7 +478,10 @@ static int ipa3_wigig_smmu_map_channel(bool Rx,
 		goto fail_desc_ring;
 	}
 
-	result = ipa3_smmu_map_peer_buff(
+	IPADBG("%s status_ring_base_iova %llX AP CB\n",
+		map ? "map" : "unmap",
+		pipe_smmu->status_ring_base_iova);
+	result = ipa3_wigig_smmu_map_ring(
 		pipe_smmu->status_ring_base_iova,
 		pipe_smmu->status_ring_size,
 		map,
@@ -340,33 +508,33 @@ static int ipa3_wigig_smmu_map_channel(bool Rx,
 	IPADBG("exit\n");
 	return 0;
 fail_buffers:
-	ipa3_smmu_map_peer_buff(
+	ipa3_wigig_smmu_map_ring(
 		pipe_smmu->status_ring_base_iova, pipe_smmu->status_ring_size,
 		!map, &pipe_smmu->status_ring_base, IPA_SMMU_CB_AP);
 fail_status_ring:
-	ipa3_smmu_map_peer_buff(
+	ipa3_wigig_smmu_map_ring(
 		pipe_smmu->desc_ring_base_iova,	pipe_smmu->desc_ring_size,
 		!map, &pipe_smmu->desc_ring_base, IPA_SMMU_CB_AP);
 fail_desc_ring:
-	ipa3_smmu_map_peer_reg(
+	ipa3_wigig_smmu_map_reg(
 		rounddown(pipe_smmu->desc_ring_HWTAIL_pa, PAGE_SIZE),
 		!map, IPA_SMMU_CB_AP);
 fail_desc_HWTAIL:
-	ipa3_smmu_map_peer_reg(
+	ipa3_wigig_smmu_map_reg(
 		rounddown(pipe_smmu->status_ring_HWTAIL_pa, PAGE_SIZE),
 		!map, IPA_SMMU_CB_AP);
 fail_status_HWTAIL:
 	if (Rx)
-		ipa3_smmu_map_peer_reg(
+		ipa3_wigig_smmu_map_reg(
 			rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
 			!map, IPA_SMMU_CB_UC);
 	else
-		ipa3_smmu_map_peer_reg(
+		ipa3_wigig_smmu_map_reg(
 			rounddown(pipe_smmu->desc_ring_HWHEAD_pa, PAGE_SIZE),
 			!map, IPA_SMMU_CB_UC);
 fail_desc_HWHEAD:
 	if (!Rx)
-		ipa3_smmu_map_peer_reg(
+		ipa3_wigig_smmu_map_reg(
 			rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
 			!map, IPA_SMMU_CB_AP);
 fail:
@@ -421,6 +589,11 @@ static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
 	ipa_assert();
 }
 
+static uint16_t int_modt = 15;
+static uint8_t int_modc = 200;
+static uint8_t tx_hwtail_mod_threshold = 200;
+static uint8_t rx_hwtail_mod_threshold = 200;
+
 static int ipa3_wigig_config_gsi(bool Rx,
 	bool smmu_en,
 	void *pipe_info,
@@ -450,8 +623,8 @@ static int ipa3_wigig_config_gsi(bool Rx,
 	evt_props.exclusive = true;
 	evt_props.err_cb = ipa_gsi_evt_ring_err_cb;
 	evt_props.user_data = NULL;
-	evt_props.int_modc = 200;
-	evt_props.int_modt = 15;
+	evt_props.int_modc = int_modc;
+	evt_props.int_modt = int_modt;
 	evt_props.ring_base_vaddr = NULL;
 
 	if (smmu_en) {
@@ -480,7 +653,8 @@ static int ipa3_wigig_config_gsi(bool Rx,
 		union __packed gsi_evt_scratch evt_scratch;
 
 		memset(&evt_scratch, 0, sizeof(evt_scratch));
-		evt_scratch.w11ad.update_status_hwtail_mod_threshold = 200;
+		evt_scratch.w11ad.update_status_hwtail_mod_threshold =
+			rx_hwtail_mod_threshold;
 		gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl,
 			evt_scratch);
 		if (gsi_res != GSI_STATUS_SUCCESS) {
@@ -626,7 +800,8 @@ static int ipa3_wigig_config_gsi(bool Rx,
 			gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2 =
 				ilog2(tx_dbuff->data_buffer_size);
 		}
-		gsi_scratch.tx_11ad.update_status_hwtail_mod_threshold = 200;
+		gsi_scratch.tx_11ad.update_status_hwtail_mod_threshold =
+			tx_hwtail_mod_threshold;
 		IPADBG("tx scratch: status_ring_hwtail_address_lsb 0x%X\n",
 			gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb);
 		IPADBG("tx scratch: status_ring_hwhead_address_lsb 0x%X\n",
@@ -760,7 +935,8 @@ static int ipa3_wigig_config_uc(bool init,
 	return result;
 }
 
-int ipa3_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out)
+int ipa3_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out,
+	struct dentry **parent)
 {
 	int ipa_ep_idx;
 	struct ipa3_ep_context *ep;
@@ -777,6 +953,8 @@ int ipa3_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out)
 
 	IPADBG("\n");
 
+	*parent = wigig_dent;
+
 	ipa_ep_idx = ipa_get_ep_mapping(rx_client);
 	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
 		ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
@@ -965,7 +1143,10 @@ int ipa3_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out)
 	return result;
 }
 
-int ipa3_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out)
+int ipa3_conn_wigig_client_i(void *in,
+	struct ipa_wigig_conn_out_params *out,
+	ipa_notify_cb tx_notify,
+	void *priv)
 {
 	int ipa_ep_idx;
 	struct ipa3_ep_context *ep;
@@ -1107,8 +1288,8 @@ int ipa3_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out)
 		goto fail;
 	}
 
-	ep->client_notify = NULL;
-	ep->priv = NULL;
+	ep->client_notify = tx_notify;
+	ep->priv = priv;
 
 	memset(&ep_cfg, 0, sizeof(ep_cfg));
 	ep_cfg.nat.nat_en = IPA_DST_NAT;
@@ -1245,6 +1426,15 @@ int ipa3_disconn_wigig_pipe_i(enum ipa_client_type client,
 		goto fail;
 	}
 
+	/* only gsi ch number and dir are necessary */
+	result = ipa3_wigig_config_uc(
+		false, rx, 0,
+		ep_gsi->ipa_gsi_chan_num, 0);
+	if (result) {
+		IPAERR("failed uC channel teardown %d\n", result);
+		WARN_ON(1);
+	}
+
 	is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD];
 	if (is_smmu_enabled) {
 		if (!pipe_smmu || !dbuff) {
@@ -1264,21 +1454,24 @@ int ipa3_disconn_wigig_pipe_i(enum ipa_client_type client,
 				goto fail;
 			}
 		}
+
+		if (rx) {
+			if (!list_empty(&smmu_reg_addr_list)) {
+				IPAERR("smmu_reg_addr_list not empty\n");
+				WARN_ON(1);
+			}
+
+			if (!list_empty(&smmu_ring_addr_list)) {
+				IPAERR("smmu_ring_addr_list not empty\n");
+				WARN_ON(1);
+			}
+		}
 	} else if (pipe_smmu || dbuff) {
 		IPAERR("smmu input is not null %pK %pK\n",
 			pipe_smmu, dbuff);
 		WARN_ON(1);
 	}
 
-	/* only gsi ch number and dir are necessary */
-	result = ipa3_wigig_config_uc(
-		false, rx, 0,
-		ep_gsi->ipa_gsi_chan_num, 0);
-	if (result) {
-		IPAERR("failed uC channel teardown %d\n", result);
-		WARN_ON(1);
-	}
-
 	memset(ep, 0, sizeof(struct ipa3_ep_context));
 
 	ep->gsi_offload_state = 0;
@@ -1666,3 +1859,56 @@ int ipa3_disable_wigig_pipe_i(enum ipa_client_type client)
 	ipa_assert();
 	return res;
 }
+
+#ifndef CONFIG_DEBUG_FS
+int ipa3_wigig_init_debugfs_i(struct dentry *parent) { return 0; }
+#else
+int ipa3_wigig_init_debugfs_i(struct dentry *parent)
+{
+	const mode_t read_write_mode = 0664;
+	struct dentry *file = NULL;
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("ipa_wigig", parent);
+	if (IS_ERR_OR_NULL(dent)) {
+		IPAERR("fail to create folder in debug_fs\n");
+		return -EFAULT;
+	}
+
+	wigig_dent = dent;
+
+	file = debugfs_create_u8("modc", read_write_mode, dent,
+		&int_modc);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file modc\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u16("modt", read_write_mode, dent,
+		&int_modt);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file modt\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u8("rx_mod_th", read_write_mode, dent,
+		&rx_hwtail_mod_threshold);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file rx_mod_th\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u8("tx_mod_th", read_write_mode, dent,
+		&tx_hwtail_mod_threshold);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file tx_mod_th\n");
+		goto fail;
+	}
+
+	return 0;
+fail:
+	debugfs_remove_recursive(dent);
+	wigig_dent = NULL;
+	return -EFAULT;
+}
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index 942fa521..25c9416 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -6,7 +6,7 @@
 #ifndef _IPAHAL_H_
 #define _IPAHAL_H_
 
-#include <linux/msm_ipa.h>
+#include "../ipa_defs.h"
 #include "../../ipa_common_i.h"
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index 735e7ea..4f6332a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -155,10 +155,14 @@ static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
 		struct ipa_ipfltri_rule_eq *eq_atrb);
 static int ipa_rt_parse_hw_rule(u8 *addr,
 		struct ipahal_rt_rule_entry *rule);
+static int ipa_rt_parse_hw_rule_ipav4_5(u8 *addr,
+		struct ipahal_rt_rule_entry *rule);
 static int ipa_flt_parse_hw_rule(u8 *addr,
 		struct ipahal_flt_rule_entry *rule);
 static int ipa_flt_parse_hw_rule_ipav4(u8 *addr,
 		struct ipahal_flt_rule_entry *rule);
+static int ipa_flt_parse_hw_rule_ipav4_5(u8 *addr,
+	struct ipahal_flt_rule_entry *rule);
 
 #define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \
 	(ARRAY_SIZE(__eq_array) <= (__eq_index))
@@ -268,6 +272,74 @@ static int ipa_rt_gen_hw_rule(struct ipahal_rt_rule_gen_params *params,
 	return 0;
 }
 
+static int ipa_rt_gen_hw_rule_ipav4_5(struct ipahal_rt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipa4_5_rt_rule_hw_hdr *rule_hdr;
+	u8 *start;
+	u16 en_rule = 0;
+
+	start = buf;
+	rule_hdr = (struct ipa4_5_rt_rule_hw_hdr *)buf;
+
+	ipa_assert_on(params->dst_pipe_idx & ~0x1F);
+	rule_hdr->u.hdr.pipe_dest_idx = params->dst_pipe_idx;
+	switch (params->hdr_type) {
+	case IPAHAL_RT_RULE_HDR_PROC_CTX:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 1;
+		ipa_assert_on(params->hdr_ofst & 31);
+		rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 5;
+		break;
+	case IPAHAL_RT_RULE_HDR_RAW:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 0;
+		ipa_assert_on(params->hdr_ofst & 3);
+		rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 2;
+		break;
+	case IPAHAL_RT_RULE_HDR_NONE:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 0;
+		rule_hdr->u.hdr.hdr_offset = 0;
+		break;
+	default:
+		IPAHAL_ERR("Invalid HDR type %d\n", params->hdr_type);
+		WARN_ON_RATELIMIT_IPA(1);
+		return -EINVAL;
+	}
+
+	ipa_assert_on(params->priority & ~0x3FF);
+	rule_hdr->u.hdr.priority = params->priority;
+	rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+	ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	rule_hdr->u.hdr.rule_id = params->id;
+	rule_hdr->u.hdr.stats_cnt_idx_lsb = params->cnt_idx & 0x3F;
+	rule_hdr->u.hdr.stats_cnt_idx_msb = (params->cnt_idx & 0xC0) >> 6;
+
+	buf += sizeof(struct ipa4_5_rt_rule_hw_hdr);
+
+	if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, &params->rule->attrib,
+		&buf, &en_rule)) {
+		IPAHAL_ERR("fail to generate hw rule\n");
+		return -EPERM;
+	}
+	rule_hdr->u.hdr.en_rule = en_rule;
+
+	IPAHAL_DBG_LOW("en_rule 0x%x\n", en_rule);
+	ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (*hw_len == 0) {
+		*hw_len = buf - start;
+	} else if (*hw_len != (buf - start)) {
+		IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+			*hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
 static int ipa_flt_gen_hw_rule(struct ipahal_flt_rule_gen_params *params,
 	u32 *hw_len, u8 *buf)
 {
@@ -436,6 +508,95 @@ static int ipa_flt_gen_hw_rule_ipav4(struct ipahal_flt_rule_gen_params *params,
 	return 0;
 }
 
+static int ipa_flt_gen_hw_rule_ipav4_5(
+	struct ipahal_flt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipa4_5_flt_rule_hw_hdr *rule_hdr;
+	u8 *start;
+	u16 en_rule = 0;
+
+	start = buf;
+	rule_hdr = (struct ipa4_5_flt_rule_hw_hdr *)buf;
+
+	switch (params->rule->action) {
+	case IPA_PASS_TO_ROUTING:
+		rule_hdr->u.hdr.action = 0x0;
+		break;
+	case IPA_PASS_TO_SRC_NAT:
+		rule_hdr->u.hdr.action = 0x1;
+		break;
+	case IPA_PASS_TO_DST_NAT:
+		rule_hdr->u.hdr.action = 0x2;
+		break;
+	case IPA_PASS_TO_EXCEPTION:
+		rule_hdr->u.hdr.action = 0x3;
+		break;
+	default:
+		IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action);
+		WARN_ON_RATELIMIT_IPA(1);
+		return -EINVAL;
+	}
+
+	ipa_assert_on(params->rt_tbl_idx & ~0x1F);
+	rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx;
+	rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+
+	ipa_assert_on(params->rule->pdn_idx & ~0xF);
+	rule_hdr->u.hdr.pdn_idx = params->rule->pdn_idx;
+	rule_hdr->u.hdr.set_metadata = params->rule->set_metadata;
+	rule_hdr->u.hdr.rsvd2 = 0;
+
+	ipa_assert_on(params->priority & ~0x3FF);
+	rule_hdr->u.hdr.priority = params->priority;
+	ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	rule_hdr->u.hdr.rule_id = params->id;
+	rule_hdr->u.hdr.stats_cnt_idx_lsb = params->cnt_idx & 0x3F;
+	rule_hdr->u.hdr.stats_cnt_idx_msb = (params->cnt_idx & 0xC0) >> 6;
+
+	buf += sizeof(struct ipa4_5_flt_rule_hw_hdr);
+
+	if (params->rule->eq_attrib_type) {
+		if (ipa_fltrt_generate_hw_rule_bdy_from_eq(
+			&params->rule->eq_attrib, &buf)) {
+			IPAHAL_ERR("fail to generate hw rule from eq\n");
+			return -EPERM;
+		}
+		en_rule = params->rule->eq_attrib.rule_eq_bitmap;
+	} else {
+		if (ipa_fltrt_generate_hw_rule_bdy(params->ipt,
+			&params->rule->attrib, &buf, &en_rule)) {
+			IPAHAL_ERR("fail to generate hw rule\n");
+			return -EPERM;
+		}
+	}
+	rule_hdr->u.hdr.en_rule = en_rule;
+
+	IPAHAL_DBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
+		en_rule,
+		rule_hdr->u.hdr.action,
+		rule_hdr->u.hdr.rt_tbl_idx,
+		rule_hdr->u.hdr.retain_hdr);
+	IPAHAL_DBG_LOW("priority=%d, rule_id=%d, pdn=%d, set_metadata=%d\n",
+		rule_hdr->u.hdr.priority,
+		rule_hdr->u.hdr.rule_id,
+		rule_hdr->u.hdr.pdn_idx,
+		rule_hdr->u.hdr.set_metadata);
+
+	ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (*hw_len == 0) {
+		*hw_len = buf - start;
+	} else if (*hw_len != (buf - start)) {
+		IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+			*hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
 /*
  * This array contains the FLT/RT info for IPAv3 and later.
  * All the information on IPAv3 are statically defined below.
@@ -599,11 +760,11 @@ static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = {
 		ipa_fltrt_create_flt_bitmap,
 		ipa_fltrt_create_tbl_addr,
 		ipa_fltrt_parse_tbl_addr,
-		ipa_rt_gen_hw_rule,
-		ipa_flt_gen_hw_rule_ipav4,
+		ipa_rt_gen_hw_rule_ipav4_5,
+		ipa_flt_gen_hw_rule_ipav4_5,
 		ipa_flt_generate_eq,
-		ipa_rt_parse_hw_rule,
-		ipa_flt_parse_hw_rule_ipav4,
+		ipa_rt_parse_hw_rule_ipav4_5,
+		ipa_flt_parse_hw_rule_ipav4_5,
 		{
 			[IPA_TOS_EQ]			= 0xFF,
 			[IPA_PROTOCOL_EQ]		= 1,
@@ -3049,6 +3210,46 @@ static int ipa_rt_parse_hw_rule(u8 *addr, struct ipahal_rt_rule_entry *rule)
 		atrb, &rule->rule_size);
 }
 
+static int ipa_rt_parse_hw_rule_ipav4_5(u8 *addr,
+	struct ipahal_rt_rule_entry *rule)
+{
+	struct ipa4_5_rt_rule_hw_hdr *rule_hdr;
+	struct ipa_ipfltri_rule_eq *atrb;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	rule_hdr = (struct ipa4_5_rt_rule_hw_hdr *)addr;
+	atrb = &rule->eq_attrib;
+
+	IPAHAL_DBG_LOW("read hdr 0x%llx\n", rule_hdr->u.word);
+
+	if (rule_hdr->u.word == 0) {
+		/* table termintator - empty table */
+		rule->rule_size = 0;
+		return 0;
+	}
+
+	rule->dst_pipe_idx = rule_hdr->u.hdr.pipe_dest_idx;
+	if (rule_hdr->u.hdr.proc_ctx) {
+		rule->hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX;
+		rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 5;
+	} else {
+		rule->hdr_type = IPAHAL_RT_RULE_HDR_RAW;
+		rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 2;
+	}
+	rule->hdr_lcl = !rule_hdr->u.hdr.system;
+
+	rule->priority = rule_hdr->u.hdr.priority;
+	rule->retain_hdr = rule_hdr->u.hdr.retain_hdr;
+	rule->cnt_idx = rule_hdr->u.hdr.stats_cnt_idx_lsb |
+		(rule_hdr->u.hdr.stats_cnt_idx_msb) << 6;
+	rule->id = rule_hdr->u.hdr.rule_id;
+
+	atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+	return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+		atrb, &rule->rule_size);
+}
+
 static int ipa_flt_parse_hw_rule(u8 *addr, struct ipahal_flt_rule_entry *rule)
 {
 	struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
@@ -3144,6 +3345,57 @@ static int ipa_flt_parse_hw_rule_ipav4(u8 *addr,
 		atrb, &rule->rule_size);
 }
 
+static int ipa_flt_parse_hw_rule_ipav4_5(u8 *addr,
+	struct ipahal_flt_rule_entry *rule)
+{
+	struct ipa4_5_flt_rule_hw_hdr *rule_hdr;
+	struct ipa_ipfltri_rule_eq *atrb;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	rule_hdr = (struct ipa4_5_flt_rule_hw_hdr *)addr;
+	atrb = &rule->rule.eq_attrib;
+
+	if (rule_hdr->u.word == 0) {
+		/* table termintator - empty table */
+		rule->rule_size = 0;
+		return 0;
+	}
+
+	switch (rule_hdr->u.hdr.action) {
+	case 0x0:
+		rule->rule.action = IPA_PASS_TO_ROUTING;
+		break;
+	case 0x1:
+		rule->rule.action = IPA_PASS_TO_SRC_NAT;
+		break;
+	case 0x2:
+		rule->rule.action = IPA_PASS_TO_DST_NAT;
+		break;
+	case 0x3:
+		rule->rule.action = IPA_PASS_TO_EXCEPTION;
+		break;
+	default:
+		IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
+		WARN_ON_RATELIMIT_IPA(1);
+		rule->rule.action = rule_hdr->u.hdr.action;
+	}
+
+	rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx;
+	rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr;
+	rule->priority = rule_hdr->u.hdr.priority;
+	rule->id = rule_hdr->u.hdr.rule_id;
+	rule->rule.pdn_idx = rule_hdr->u.hdr.pdn_idx;
+	rule->rule.set_metadata = rule_hdr->u.hdr.set_metadata;
+	rule->cnt_idx = rule_hdr->u.hdr.stats_cnt_idx_lsb |
+		(rule_hdr->u.hdr.stats_cnt_idx_msb) << 6;
+
+	atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+	rule->rule.eq_attrib_type = 1;
+	return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+		atrb, &rule->rule_size);
+}
+
 /*
  * ipahal_fltrt_init() - Build the FLT/RT information table
  *  See ipahal_fltrt_objs[] comments
@@ -3430,6 +3682,33 @@ u32 ipahal_get_low_rule_id(void)
 }
 
 /*
+ * Is the given counter id valid
+ */
+bool ipahal_is_rule_cnt_id_valid(u8 cnt_id)
+{
+	if (cnt_id < 0 || cnt_id > IPA_FLT_RT_HW_COUNTER)
+		return false;
+	return true;
+}
+
+
+/*
+ * low value possible for counter hdl id
+ */
+u32 ipahal_get_low_hdl_id(void)
+{
+	return IPA4_5_LOW_CNT_ID;
+}
+
+/*
+ * max counter hdl id for stats
+ */
+u32 ipahal_get_high_hdl_id(void)
+{
+	return IPA_MAX_FLT_RT_CNT_INDEX;
+}
+
+/*
  * ipahal_rt_generate_empty_img() - Generate empty route image
  *  Creates routing header buffer for the given tables number.
  *  For each table, make it point to the empty table on DDR.
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
index b084b1e..f18700d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _IPAHAL_FLTRT_H_
@@ -56,6 +56,7 @@ enum ipahal_rt_rule_hdr_type {
  * @hdr_ofst: Offset of the header in the header table
  * @priority: Rule priority
  * @id: Rule ID
+ * @cnt_idx: Stats counter index
  * @rule: Rule info
  */
 struct ipahal_rt_rule_gen_params {
@@ -66,7 +67,8 @@ struct ipahal_rt_rule_gen_params {
 	u32 hdr_ofst;
 	u32 priority;
 	u32 id;
-	const struct ipa_rt_rule *rule;
+	u8 cnt_idx;
+	const struct ipa_rt_rule_i *rule;
 };
 
 /*
@@ -78,6 +80,7 @@ struct ipahal_rt_rule_gen_params {
  * @priority: Rule priority
  * @retain_hdr: to retain the removed header in header removal
  * @id: Rule ID
+ * @cnt_idx: stats counter index
  * @eq_attrib: Equations and their params in the rule
  * @rule_size: Rule size in memory
  */
@@ -89,6 +92,7 @@ struct ipahal_rt_rule_entry {
 	u32 priority;
 	bool retain_hdr;
 	u32 id;
+	u8 cnt_idx;
 	struct ipa_ipfltri_rule_eq eq_attrib;
 	u32 rule_size;
 };
@@ -99,6 +103,7 @@ struct ipahal_rt_rule_entry {
  * @rt_tbl_idx: Routing table the rule pointing to
  * @priority: Rule priority
  * @id: Rule ID
+ * @cnt_idx: Stats counter index
  * @rule: Rule info
  */
 struct ipahal_flt_rule_gen_params {
@@ -106,7 +111,8 @@ struct ipahal_flt_rule_gen_params {
 	u32 rt_tbl_idx;
 	u32 priority;
 	u32 id;
-	const struct ipa_flt_rule *rule;
+	u8 cnt_idx;
+	const struct ipa_flt_rule_i *rule;
 };
 
 /*
@@ -114,12 +120,14 @@ struct ipahal_flt_rule_gen_params {
  * @rule: Rule info
  * @priority: Rule priority
  * @id: Rule ID
+ * @cnt_idx: stats counter index
  * @rule_size: Rule size in memory
  */
 struct ipahal_flt_rule_entry {
-	struct ipa_flt_rule rule;
+	struct ipa_flt_rule_i rule;
 	u32 priority;
 	u32 id;
+	u8 cnt_idx;
 	u32 rule_size;
 };
 
@@ -157,6 +165,22 @@ u32 ipahal_get_rule_id_hi_bit(void);
 u32 ipahal_get_low_rule_id(void);
 
 /*
+ * low value possible for counter hdl id
+ */
+u32 ipahal_get_low_hdl_id(void);
+
+/*
+ * max counter hdl id for stats
+ */
+u32 ipahal_get_high_hdl_id(void);
+
+/* used for query check and associated with rt/flt rules */
+bool ipahal_is_rule_cnt_id_valid(u8 cnt_id);
+
+/* max rule id for stats */
+bool ipahal_get_max_stats_rule_id(void);
+
+/*
  * ipahal_rt_generate_empty_img() - Generate empty route image
  *  Creates routing header buffer for the given tables number.
  * For each table, make it point to the empty table on DDR.
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
index 0062ff1..c8a6a30 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
@@ -60,6 +60,11 @@ enum ipa_fltrt_equations {
 #define IPA3_0_RULE_ID_BIT_LEN (10)
 #define IPA3_0_LOW_RULE_ID (1)
 
+/*
+ * COUNTER ID, LOW COUNTER ID.
+ */
+#define IPA4_5_LOW_CNT_ID (1)
+
 /**
  * struct ipa3_0_rt_rule_hw_hdr - HW header of IPA routing rule
  * @word: routing rule header properties
@@ -98,6 +103,45 @@ struct ipa3_0_rt_rule_hw_hdr {
 };
 
 /**
+ * struct ipa3_0_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: routing rule header properties
+ * @en_rule: enable rule - Equation bit fields
+ * @pipe_dest_idx: destination pipe index
+ * @system: Is referenced header is lcl or sys memory
+ * @hdr_offset: header offset
+ * @proc_ctx: whether hdr_offset points to header table or to
+ *	header processing context table
+ * @priority: Rule priority. Added to distinguish rules order
+ *  at the integrated table consisting from hashable and
+ *  non-hashable parts
+ * @stats_cnt_idx_msb: stats cnt index msb
+ * @rsvd2: reserved bits
+ * @retain_hdr: added to add back to the packet the header removed
+ *  as part of header removal. This will be done as part of
+ *  header insertion block.
+ * @rule_id: rule ID that will be returned in the packet status
+ * @stats_cnt_idx_lsb: stats cnt index lsb
+ */
+struct ipa4_5_rt_rule_hw_hdr {
+	union {
+		u64 word;
+		struct {
+			u64 en_rule:16;
+			u64 pipe_dest_idx:5;
+			u64 system:1;
+			u64 hdr_offset:9;
+			u64 proc_ctx:1;
+			u64 priority:10;
+			u64 stats_cnt_idx_msb : 2;
+			u64 rsvd2 : 3;
+			u64 retain_hdr:1;
+			u64 rule_id:10;
+			u64 stats_cnt_idx_lsb : 6;
+		} hdr;
+	} u;
+};
+
+/**
  * struct ipa3_0_flt_rule_hw_hdr - HW header of IPA filter rule
  * @word: filtering rule properties
  * @en_rule: enable rule
@@ -168,6 +212,45 @@ struct ipa4_0_flt_rule_hw_hdr {
 	} u;
 };
 
+/**
+ * struct ipa4_5_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post filtering action
+ * @rt_tbl_idx: index in routing table
+ * @retain_hdr: added to add back to the packet the header removed
+ *  as part of header removal. This will be done as part of
+ *  header insertion block.
+ * @pdn_idx: in case of go to src nat action possible to input the pdn index to
+ *  the NAT block
+ * @set_metadata: enable metadata replacement in the NAT block
+ * @priority: Rule priority. Added to distinguish rules order
+ *  at the integrated table consisting from hashable and
+ *  non-hashable parts
+ * @stats_cnt_idx_msb: stats cnt index msb
+ * @rsvd2: reserved bits
+ * @rule_id: rule ID that will be returned in the packet status
+ * @stats_cnt_idx_lsb: stats cnt index lsb
+ */
+struct ipa4_5_flt_rule_hw_hdr {
+	union {
+		u64 word;
+		struct {
+			u64 en_rule : 16;
+			u64 action : 5;
+			u64 rt_tbl_idx : 5;
+			u64 retain_hdr : 1;
+			u64 pdn_idx : 4;
+			u64 set_metadata : 1;
+			u64 priority : 10;
+			u64 stats_cnt_idx_msb : 2;
+			u64 rsvd2 : 4;
+			u64 rule_id : 10;
+			u64 stats_cnt_idx_lsb : 6;
+		} hdr;
+	} u;
+};
+
 int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type);
 void ipahal_fltrt_destroy(void);
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c
index 9ab1db0..3edca59 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c
@@ -1,8 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
+#include "ipahal.h"
 #include "ipahal_hw_stats.h"
 #include "ipahal_hw_stats_i.h"
 #include "ipahal_i.h"
@@ -220,6 +221,70 @@ static int ipahal_parse_stats_tethering(void *init_params, void *raw_stats,
 	return 0;
 }
 
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_flt_rt_v4_5(
+	void *params, bool is_atomic_ctx)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	int num = (int)(params);
+
+	if (num > IPA_MAX_FLT_RT_CNT_INDEX ||
+		num <= 0) {
+		IPAHAL_ERR("num %d not valid\n", num);
+		return NULL;
+	}
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+		num *
+		sizeof(struct ipahal_stats_flt_rt_v4_5_hw),
+		is_atomic_ctx);
+	if (!pyld)
+		return NULL;
+	pyld->len = num *
+		sizeof(struct ipahal_stats_flt_rt_v4_5_hw);
+	return pyld;
+}
+
+static int ipahal_get_offset_flt_rt_v4_5(void *params,
+	struct ipahal_stats_offset *out)
+{
+	struct ipahal_stats_get_offset_flt_rt_v4_5 *in =
+		(struct ipahal_stats_get_offset_flt_rt_v4_5 *)params;
+	int num;
+
+	out->offset = (in->start_id - 1) *
+		sizeof(struct ipahal_stats_flt_rt_v4_5);
+	num = in->end_id - in->start_id + 1;
+	out->size = num * sizeof(struct ipahal_stats_flt_rt_v4_5);
+
+	return 0;
+}
+
+static int ipahal_parse_stats_flt_rt_v4_5(void *init_params,
+	void *raw_stats, void *parsed_stats)
+{
+	struct ipahal_stats_flt_rt_v4_5_hw *raw_hw =
+		(struct ipahal_stats_flt_rt_v4_5_hw *)raw_stats;
+	struct ipa_ioc_flt_rt_query *query =
+		(struct ipa_ioc_flt_rt_query *)parsed_stats;
+	int num, i;
+
+	num = query->end_id - query->start_id + 1;
+	IPAHAL_DBG_LOW("\n");
+	for (i = 0; i < num; i++) {
+		((struct ipa_flt_rt_stats *)
+		query->stats)[i].num_bytes =
+			raw_hw[i].num_bytes;
+		((struct ipa_flt_rt_stats *)
+		query->stats)[i].num_pkts_hash =
+			raw_hw[i].num_packets_hash;
+		((struct ipa_flt_rt_stats *)
+		query->stats)[i].num_pkts =
+			raw_hw[i].num_packets;
+	}
+
+	return 0;
+}
+
+
 static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_flt_rt(
 	void *params, bool is_atomic_ctx)
 {
@@ -439,6 +504,26 @@ static struct ipahal_hw_stats_obj
 		ipahal_get_offset_drop,
 		ipahal_parse_stats_drop
 	},
+	[IPA_HW_v4_5][IPAHAL_HW_STATS_QUOTA] = {
+		ipahal_generate_init_pyld_quota,
+		ipahal_get_offset_quota,
+		ipahal_parse_stats_quota
+	},
+	[IPA_HW_v4_5][IPAHAL_HW_STATS_FNR] = {
+		ipahal_generate_init_pyld_flt_rt_v4_5,
+		ipahal_get_offset_flt_rt_v4_5,
+		ipahal_parse_stats_flt_rt_v4_5
+	},
+	[IPA_HW_v4_5][IPAHAL_HW_STATS_TETHERING] = {
+		ipahal_generate_init_pyld_tethering,
+		ipahal_get_offset_tethering,
+		ipahal_parse_stats_tethering
+	},
+	[IPA_HW_v4_5][IPAHAL_HW_STATS_DROP] = {
+		ipahal_generate_init_pyld_drop,
+		ipahal_get_offset_drop,
+		ipahal_parse_stats_drop
+	},
 };
 
 int ipahal_hw_stats_init(enum ipa_hw_type ipa_hw_type)
@@ -518,9 +603,6 @@ struct ipahal_stats_init_pyld *ipahal_stats_generate_init_pyld(
 		return NULL;
 	}
 
-	if (WARN(!params, "Null arg\n"))
-		return NULL;
-
 	hw_obj_ptr = &ipahal_hw_stats_objs[ipahal_ctx->hw_type][type];
 	return hw_obj_ptr->generate_init_pyld(params, is_atomic_ctx);
 }
@@ -538,3 +620,15 @@ int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params,
 	return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].parse_stats(
 		init_params, raw_stats, parsed_stats);
 }
+
+void ipahal_set_flt_rt_sw_stats(void *raw_stats,
+	struct ipa_flt_rt_stats sw_stats)
+{
+	struct ipahal_stats_flt_rt_v4_5_hw *raw_hw =
+		(struct ipahal_stats_flt_rt_v4_5_hw *)raw_stats;
+
+	IPAHAL_DBG_LOW("\n");
+	raw_hw->num_bytes = sw_stats.num_bytes;
+	raw_hw->num_packets_hash = sw_stats.num_pkts_hash;
+	raw_hw->num_packets = sw_stats.num_pkts;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h
index 094f21b..1d42bd4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _IPAHAL_HW_STATS_H_
@@ -152,6 +152,28 @@ struct ipahal_stats_flt_rt {
 };
 
 /*
+ * struct ipahal_stats_flt_rt_v4_5 - flt_rt statistics
+ * @num_packets: Total number of packets hit this rule
+ * @num_packets_hash: Total number of packets hit this rule in hash table
+ * @num_bytes: Total number of bytes hit this rule
+ */
+struct ipahal_stats_flt_rt_v4_5 {
+	u32 num_packets;
+	u32 num_packets_hash;
+	u64 num_bytes;
+};
+
+/*
+ * struct ipahal_stats_get_offset_flt_rt_v4_5 - Get offset parameters for flt_rt
+ * @start_id: start_id to get the offset
+ * @end_id: end_id to get the offset
+ */
+struct ipahal_stats_get_offset_flt_rt_v4_5 {
+	u8 start_id;
+	u8 end_id;
+};
+
+/*
  * struct ipahal_stats_init_drop - Initializations parameters for Drop
  * @enabled_bitmask: bit mask of pipes to be monitored
  */
@@ -238,4 +260,14 @@ int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params,
 	void *raw_stats, void *parsed_stats);
 
 
+/*
+ * ipahal_set_flt_rt_sw_stats - set sw counter stats for FnR
+ * @raw_stats: stats write to IPA SRAM
+ * @sw_stats: FnR sw stats to be written
+ *
+ * Return: None
+ */
+void ipahal_set_flt_rt_sw_stats(void *raw_stats,
+	struct ipa_flt_rt_stats sw_stats);
+
 #endif /* _IPAHAL_HW_STATS_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h
index 530cb51..9cd35f6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _IPAHAL_HW_STATS_I_H_
@@ -40,6 +40,12 @@ struct ipahal_stats_flt_rt_hw {
 	u64 num_packets:32;
 };
 
+struct ipahal_stats_flt_rt_v4_5_hw {
+	u64 num_packets_hash:32;
+	u64 num_packets:32;
+	u64 num_bytes;
+};
+
 struct ipahal_stats_drop_hw {
 	u64 drop_byte_cnt:40;
 	u64 drop_packet_cnt:24;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 99e3a15..6bf24c86 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -3021,6 +3021,36 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
 		ipareg_construct_timers_xo_clk_div_cfg,
 		ipareg_parse_timers_xo_clk_div_cfg,
 		0x000000250, 0, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_STAT_QUOTA_BASE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000700, 0x4, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_QUOTA_MASK_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000708, 0x4, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_TETHERING_BASE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000710, 0x4, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_TETHERING_MASK_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000718, 0x4, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_FILTER_IPV4_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000720, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_FILTER_IPV6_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000724, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_ROUTER_IPV4_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000728, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_ROUTER_IPV6_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000072C, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_DROP_CNT_BASE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000750, 0x4, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_STAT_DROP_CNT_MASK_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000758, 0x4, 0, 0, 1},
 	[IPA_HW_v4_5][IPA_ENDP_INIT_SEQ_n] = {
 		ipareg_construct_dummy, ipareg_parse_dummy,
 		0x0000083C, 0x70, 0, 12, 1},
@@ -3484,7 +3514,7 @@ u32 ipahal_get_reg_base(void)
 void ipahal_get_aggr_force_close_valmask(int ep_idx,
 	struct ipahal_reg_valmask *valmask)
 {
-	u32 shft;
+	u32 shft = 0;
 	u32 bmsk = 0;
 
 	if (!valmask) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index dd691c0..d1f23c5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -42,7 +42,6 @@
 #define WWAN_METADATA_SHFT 24
 #define WWAN_METADATA_MASK 0xFF000000
 #define WWAN_DATA_LEN 9216
-#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
 #define HEADROOM_FOR_QMAP   8 /* for mux header */
 #define TAILROOM            0 /* for padding by mux layer */
 #define MAX_NUM_OF_MUX_CHANNEL  15 /* max mux channels */
@@ -137,7 +136,6 @@ struct rmnet_ipa3_context {
 	int rmnet_index;
 	bool egress_set;
 	bool a7_ul_flt_set;
-	struct workqueue_struct *rm_q6_wq;
 	atomic_t is_initialized;
 	atomic_t is_ssr;
 	void *lcl_mdm_subsys_notify_handle;
@@ -681,6 +679,41 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
 			}
 		}
 	}
+
+	if (rule_req->ul_firewall_indices_list_valid) {
+		IPAWANDBG("Receive ul_firewall_indices_list_len = (%d)",
+			rule_req->ul_firewall_indices_list_len);
+
+		if (rule_req->ul_firewall_indices_list_len >
+			rmnet_ipa3_ctx->num_q6_rules) {
+			IPAWANERR("UL rule indices are not valid: (%d/%d)\n",
+					rule_req->xlat_filter_indices_list_len,
+					rmnet_ipa3_ctx->num_q6_rules);
+			goto failure;
+		}
+
+		ipa3_qmi_ctx->ul_firewall_indices_list_valid = 1;
+		ipa3_qmi_ctx->ul_firewall_indices_list_len =
+			rule_req->ul_firewall_indices_list_len;
+
+		for (i = 0; i < rule_req->ul_firewall_indices_list_len; i++) {
+			ipa3_qmi_ctx->ul_firewall_indices_list[i] =
+				rule_req->ul_firewall_indices_list[i];
+		}
+
+		for (i = 0; i < rule_req->ul_firewall_indices_list_len; i++) {
+			if (rule_req->ul_firewall_indices_list[i]
+				>= rmnet_ipa3_ctx->num_q6_rules) {
+				IPAWANERR("UL rule idx is wrong: %d\n",
+					rule_req->ul_firewall_indices_list[i]);
+				goto failure;
+			} else {
+				ipa3_qmi_ctx->q6_ul_filter_rule
+				[rule_req->ul_firewall_indices_list[i]]
+				.replicate_needed = 1;
+			}
+		}
+	}
 	goto success;
 
 failure:
@@ -1252,29 +1285,25 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
 	}
 
 send:
-	/* IPA_RM checking start */
-	if (ipa3_ctx->use_ipa_pm) {
-		/* activate the modem pm for clock scaling */
-		ipa_pm_activate(rmnet_ipa3_ctx->q6_pm_hdl);
-		ret = ipa_pm_activate(rmnet_ipa3_ctx->pm_hdl);
-	} else {
-		ret = ipa_rm_inactivity_timer_request_resource(
-			IPA_RM_RESOURCE_WWAN_0_PROD);
-	}
+	/* IPA_PM checking start */
+	/* activate the modem pm for clock scaling */
+	ipa_pm_activate(rmnet_ipa3_ctx->q6_pm_hdl);
+	ret = ipa_pm_activate(rmnet_ipa3_ctx->pm_hdl);
+
 	if (ret == -EINPROGRESS) {
 		netif_stop_queue(dev);
 		spin_unlock_irqrestore(&wwan_ptr->lock, flags);
 		return NETDEV_TX_BUSY;
 	}
 	if (ret) {
-		IPAWANERR("[%s] fatal: ipa rm timer req resource failed %d\n",
+		IPAWANERR("[%s] fatal: ipa pm activate failed %d\n",
 		       dev->name, ret);
 		dev_kfree_skb_any(skb);
 		dev->stats.tx_dropped++;
 		spin_unlock_irqrestore(&wwan_ptr->lock, flags);
-		return -EFAULT;
+		return NETDEV_TX_OK;
 	}
-	/* IPA_RM checking end */
+	/* IPA_PM checking end */
 
 	/*
 	 * both data packets and command will be routed to
@@ -1282,6 +1311,14 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
 	 */
 	ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, NULL);
 	if (ret) {
+		if (ret == -EPIPE) {
+			IPAWANERR_RL("[%s] fatal: pipe is not valid\n",
+				dev->name);
+			dev_kfree_skb_any(skb);
+			dev->stats.tx_dropped++;
+			spin_unlock_irqrestore(&wwan_ptr->lock, flags);
+			return NETDEV_TX_OK;
+		}
 		ret = NETDEV_TX_BUSY;
 		goto out;
 	}
@@ -1292,13 +1329,9 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
 	ret = NETDEV_TX_OK;
 out:
 	if (atomic_read(&wwan_ptr->outstanding_pkts) == 0) {
-		if (ipa3_ctx->use_ipa_pm) {
-			ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->pm_hdl);
-			ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->q6_pm_hdl);
-		} else {
-			ipa_rm_inactivity_timer_release_resource(
-				IPA_RM_RESOURCE_WWAN_0_PROD);
-		}
+		ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->pm_hdl);
+		ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->q6_pm_hdl);
+
 	}
 	spin_unlock_irqrestore(&wwan_ptr->lock, flags);
 	return ret;
@@ -1357,13 +1390,9 @@ static void apps_ipa_tx_complete_notify(void *priv,
 	}
 
 	if (atomic_read(&wwan_ptr->outstanding_pkts) == 0) {
-		if (ipa3_ctx->use_ipa_pm) {
-			ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->pm_hdl);
-			ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->q6_pm_hdl);
-		} else {
-			ipa_rm_inactivity_timer_release_resource(
-			IPA_RM_RESOURCE_WWAN_0_PROD);
-		}
+		ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->pm_hdl);
+		ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->q6_pm_hdl);
+
 	}
 	__netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
 	dev_kfree_skb_any(skb);
@@ -1419,6 +1448,26 @@ static void apps_ipa_packet_receive_notify(void *priv,
 	}
 }
 
+/* Send RSC endpoint info to modem using QMI indication message */
+
+static int ipa_send_rsc_pipe_ind_to_modem(void)
+{
+	struct ipa_endp_desc_indication_msg_v01 req;
+	struct ipa_ep_id_type_v01 *ep_info;
+
+	memset(&req, 0, sizeof(struct ipa_endp_desc_indication_msg_v01));
+	req.ep_info_len = 1;
+	req.ep_info_valid = true;
+	req.num_eps_valid = true;
+	req.num_eps = 1;
+	ep_info = &req.ep_info[req.ep_info_len - 1];
+	ep_info->ep_id = rmnet_ipa3_ctx->ipa3_to_apps_hdl;
+	ep_info->ic_type = DATA_IC_TYPE_AP_V01;
+	ep_info->ep_type = DATA_EP_DESC_TYPE_RSC_PROD_V01;
+	ep_info->ep_status = DATA_EP_STATUS_CONNECTED_V01;
+	return ipa3_qmi_send_rsc_pipe_indication(&req);
+}
+
 static int handle3_ingress_format(struct net_device *dev,
 			struct rmnet_ioctl_extended_s *in)
 {
@@ -1520,6 +1569,9 @@ static int handle3_ingress_format(struct net_device *dev,
 	if (ret)
 		ipa3_del_a7_qmap_hdr();
 
+	/* Sending QMI indication message share RSC pipe details*/
+	if (dev->features & NETIF_F_GRO_HW)
+		ipa_send_rsc_pipe_ind_to_modem();
 end:
 	if (ret)
 		IPAWANERR("failed to configure ingress\n");
@@ -2044,68 +2096,6 @@ static void ipa3_wwan_setup(struct net_device *dev)
 	dev->watchdog_timeo = 1000;
 }
 
-/* IPA_RM related functions start*/
-static void ipa3_q6_prod_rm_request_resource(struct work_struct *work);
-static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_request,
-		ipa3_q6_prod_rm_request_resource);
-static void ipa3_q6_prod_rm_release_resource(struct work_struct *work);
-static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_release,
-		ipa3_q6_prod_rm_release_resource);
-
-static void ipa3_q6_prod_rm_request_resource(struct work_struct *work)
-{
-	int ret = 0;
-
-	ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
-	if (ret < 0 && ret != -EINPROGRESS) {
-		IPAWANERR("ipa_rm_request_resource failed %d\n", ret);
-		return;
-	}
-}
-
-static int ipa3_q6_rm_request_resource(void)
-{
-	queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
-	   &ipa3_q6_con_rm_request, 0);
-	return 0;
-}
-
-static void ipa3_q6_prod_rm_release_resource(struct work_struct *work)
-{
-	int ret = 0;
-
-	ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
-	if (ret < 0 && ret != -EINPROGRESS) {
-		IPAWANERR("ipa_rm_release_resource failed %d\n", ret);
-		return;
-	}
-}
-
-
-static int ipa3_q6_rm_release_resource(void)
-{
-	queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
-	   &ipa3_q6_con_rm_release, 0);
-	return 0;
-}
-
-
-static void ipa3_q6_rm_notify_cb(void *user_data,
-		enum ipa_rm_event event,
-		unsigned long data)
-{
-	switch (event) {
-	case IPA_RM_RESOURCE_GRANTED:
-		IPAWANDBG_LOW("Q6_PROD GRANTED CB\n");
-		break;
-	case IPA_RM_RESOURCE_RELEASED:
-		IPAWANDBG_LOW("Q6_PROD RELEASED CB\n");
-		break;
-	default:
-		return;
-	}
-}
-
 /**
  * rmnet_ipa_send_coalesce_notification
  * (uint8_t qmap_id, bool enable, bool tcp, bool udp)
@@ -2150,16 +2140,36 @@ static int rmnet_ipa_send_coalesce_notification(uint8_t qmap_id,
 
 int ipa3_wwan_set_modem_state(struct wan_ioctl_notify_wan_state *state)
 {
+	uint32_t bw_mbps = 0;
+	int ret = 0;
+
 	if (!state)
 		return -EINVAL;
 
-	if (!ipa_pm_is_used())
-		return 0;
-
-	if (state->up)
-		return ipa_pm_activate_sync(rmnet_ipa3_ctx->q6_teth_pm_hdl);
-	else
-		return ipa_pm_deactivate_sync(rmnet_ipa3_ctx->q6_teth_pm_hdl);
+	if (state->up) {
+		if (rmnet_ipa3_ctx->ipa_config_is_apq) {
+			bw_mbps = 5200;
+			ret = ipa3_vote_for_bus_bw(&bw_mbps);
+			if (ret) {
+				IPAERR("Failed to vote for bus BW (%u)\n",
+							bw_mbps);
+				return ret;
+			}
+		}
+		ret = ipa_pm_activate_sync(rmnet_ipa3_ctx->q6_teth_pm_hdl);
+	} else {
+		if (rmnet_ipa3_ctx->ipa_config_is_apq) {
+			bw_mbps = 0;
+			ret = ipa3_vote_for_bus_bw(&bw_mbps);
+			if (ret) {
+				IPAERR("Failed to vote for bus BW (%u)\n",
+							bw_mbps);
+				return ret;
+			}
+		}
+		ret = ipa_pm_deactivate_sync(rmnet_ipa3_ctx->q6_teth_pm_hdl);
+	}
+	return ret;
 }
 
 /**
@@ -2206,114 +2216,16 @@ static void ipa3_q6_deregister_pm(void)
 
 int ipa3_wwan_set_modem_perf_profile(int throughput)
 {
-	struct ipa_rm_perf_profile profile;
 	int ret;
-	int tether_bridge_handle = 0;
 
-	if (ipa3_ctx->use_ipa_pm) {
-		/* query rmnet-tethering handle */
-		tether_bridge_handle = ipa3_teth_bridge_get_pm_hdl();
-		if (tether_bridge_handle > 0) {
-			/* only update with valid handle*/
-			ret = ipa_pm_set_throughput(tether_bridge_handle,
-				throughput);
-		}
-		/* for TETH MODEM on softap/rndis */
-		ret = ipa_pm_set_throughput(rmnet_ipa3_ctx->q6_teth_pm_hdl,
-			throughput);
-	} else {
-		memset(&profile, 0, sizeof(profile));
-		profile.max_supported_bandwidth_mbps = throughput;
-		ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
-			&profile);
-	}
+	IPAWANDBG("throughput: %d\n", throughput);
+	/* for TETH MODEM on softap/rndis */
+	ret = ipa_pm_set_throughput(rmnet_ipa3_ctx->q6_teth_pm_hdl,
+	throughput);
 
 	return ret;
 }
 
-static int ipa3_q6_initialize_rm(void)
-{
-	struct ipa_rm_create_params create_params;
-	struct ipa_rm_perf_profile profile;
-	int result;
-
-	/* Initialize IPA_RM workqueue */
-	rmnet_ipa3_ctx->rm_q6_wq = create_singlethread_workqueue("clnt_req");
-	if (!rmnet_ipa3_ctx->rm_q6_wq)
-		return -ENOMEM;
-
-	memset(&create_params, 0, sizeof(create_params));
-	create_params.name = IPA_RM_RESOURCE_Q6_PROD;
-	create_params.reg_params.notify_cb = &ipa3_q6_rm_notify_cb;
-	result = ipa_rm_create_resource(&create_params);
-	if (result)
-		goto create_rsrc_err1;
-	memset(&create_params, 0, sizeof(create_params));
-	create_params.name = IPA_RM_RESOURCE_Q6_CONS;
-	create_params.release_resource = &ipa3_q6_rm_release_resource;
-	create_params.request_resource = &ipa3_q6_rm_request_resource;
-	result = ipa_rm_create_resource(&create_params);
-	if (result)
-		goto create_rsrc_err2;
-	/* add dependency*/
-	result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
-			IPA_RM_RESOURCE_APPS_CONS);
-	if (result)
-		goto add_dpnd_err;
-	/* setup Performance profile */
-	memset(&profile, 0, sizeof(profile));
-	profile.max_supported_bandwidth_mbps = 100;
-	result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
-			&profile);
-	if (result)
-		goto set_perf_err;
-	result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
-			&profile);
-	if (result)
-		goto set_perf_err;
-	return result;
-
-set_perf_err:
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
-			IPA_RM_RESOURCE_APPS_CONS);
-add_dpnd_err:
-	result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
-	if (result < 0)
-		IPAWANERR("Error deleting resource %d, ret=%d\n",
-			IPA_RM_RESOURCE_Q6_CONS, result);
-create_rsrc_err2:
-	result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
-	if (result < 0)
-		IPAWANERR("Error deleting resource %d, ret=%d\n",
-			IPA_RM_RESOURCE_Q6_PROD, result);
-create_rsrc_err1:
-	destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
-	return result;
-}
-
-void ipa3_q6_deinitialize_rm(void)
-{
-	int ret;
-
-	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
-			IPA_RM_RESOURCE_APPS_CONS);
-	if (ret < 0)
-		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
-			IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
-			ret);
-	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
-	if (ret < 0)
-		IPAWANERR("Error deleting resource %d, ret=%d\n",
-			IPA_RM_RESOURCE_Q6_CONS, ret);
-	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
-	if (ret < 0)
-		IPAWANERR("Error deleting resource %d, ret=%d\n",
-			IPA_RM_RESOURCE_Q6_PROD, ret);
-
-	if (rmnet_ipa3_ctx->rm_q6_wq)
-		destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
-}
-
 static void ipa3_wake_tx_queue(struct work_struct *work)
 {
 	if (IPA_NETDEV()) {
@@ -2325,56 +2237,21 @@ static void ipa3_wake_tx_queue(struct work_struct *work)
 }
 
 /**
- * ipa3_rm_resource_granted() - Called upon
- * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
+ * ipa3_pm_resource_granted() - Called upon
+ * IPA_PM_RESOURCE_GRANTED event. Wakes up the tx workqueue.
  *
  * @work: work object supplied ny workqueue
  *
  * Return codes:
  * None
  */
-static void ipa3_rm_resource_granted(void *dev)
+static void ipa3_pm_resource_granted(void *dev)
 {
 	IPAWANDBG_LOW("Resource Granted - starting queue\n");
 	schedule_work(&ipa3_tx_wakequeue_work);
 }
 
-/**
- * ipa3_rm_notify() - Callback function for RM events. Handles
- * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
- * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
- * workqueue.
- *
- * @dev: network device
- * @event: IPA RM event
- * @data: Additional data provided by IPA RM
- *
- * Return codes:
- * None
- */
-static void ipa3_rm_notify(void *dev, enum ipa_rm_event event,
-			  unsigned long data)
-{
-	struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
-
-	pr_debug("%s: event %d\n", __func__, event);
-	switch (event) {
-	case IPA_RM_RESOURCE_GRANTED:
-		if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
-			complete_all(&wwan_ptr->resource_granted_completion);
-			break;
-		}
-		ipa3_rm_resource_granted(dev);
-		break;
-	case IPA_RM_RESOURCE_RELEASED:
-		break;
-	default:
-		pr_err("%s: unknown event %d\n", __func__, event);
-		break;
-	}
-}
-
-/* IPA_RM related functions end*/
+/* IPA_PM related functions end*/
 
 static int ipa3_lcl_mdm_ssr_notifier_cb(struct notifier_block *this,
 			   unsigned long code,
@@ -2465,7 +2342,7 @@ static void ipa_pm_wwan_pm_cb(void *p, enum ipa_pm_cb_event event)
 			complete_all(&wwan_ptr->resource_granted_completion);
 			break;
 		}
-		ipa3_rm_resource_granted(dev);
+		ipa3_pm_resource_granted(dev);
 		break;
 	default:
 		pr_err("%s: unknown event %d\n", __func__, event);
@@ -2497,76 +2374,6 @@ static void ipa3_wwan_deregister_netdev_pm_client(void)
 	ipa_pm_deregister(rmnet_ipa3_ctx->pm_hdl);
 }
 
-static int ipa3_wwan_create_wwan_rm_resource(struct net_device *dev)
-{
-	struct ipa_rm_create_params ipa_rm_params;
-	struct ipa_rm_perf_profile profile;
-	int ret;
-
-	memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
-	ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
-	ipa_rm_params.reg_params.user_data = dev;
-	ipa_rm_params.reg_params.notify_cb = ipa3_rm_notify;
-	ret = ipa_rm_create_resource(&ipa_rm_params);
-	if (ret) {
-		pr_err("%s: unable to create resourse %d in IPA RM\n",
-			__func__, IPA_RM_RESOURCE_WWAN_0_PROD);
-		return ret;
-	}
-	ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
-		IPA_RM_INACTIVITY_TIMER);
-	if (ret) {
-		pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
-			__func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
-		goto timer_init_err;
-	}
-	/* add dependency */
-	ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
-		IPA_RM_RESOURCE_Q6_CONS);
-	if (ret)
-		goto add_dpnd_err;
-	/* setup Performance profile */
-	memset(&profile, 0, sizeof(profile));
-	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
-	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
-		&profile);
-	if (ret)
-		goto set_perf_err;
-
-	return 0;
-
-set_perf_err:
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
-		IPA_RM_RESOURCE_Q6_CONS);
-add_dpnd_err:
-	ipa_rm_inactivity_timer_destroy(
-		IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
-timer_init_err:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
-	return ret;
-}
-
-static void ipa3_wwan_delete_wwan_rm_resource(void)
-{
-	int ret;
-
-	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
-		IPA_RM_RESOURCE_Q6_CONS);
-	if (ret < 0)
-		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
-		IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
-		ret);
-	ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
-	if (ret < 0)
-		IPAWANERR(
-		"Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
-		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
-	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
-	if (ret < 0)
-		IPAWANERR("Error deleting resource %d, ret=%d\n",
-		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
-}
-
 /**
  * ipa3_wwan_probe() - Initialized the module and registers as a
  * network interface to the network stack
@@ -2677,22 +2484,16 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
 		&rmnet_ipa3_ctx->wwan_priv->resource_granted_completion);
 
 	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
-		/* IPA_RM configuration starts */
-		if (ipa3_ctx->use_ipa_pm)
-			ret = ipa3_q6_register_pm();
-		else
-			ret = ipa3_q6_initialize_rm();
+		/* IPA_PM configuration starts */
+		ret = ipa3_q6_register_pm();
 		if (ret) {
-			IPAWANERR("ipa3_q6_initialize_rm failed, ret: %d\n",
+			IPAWANERR("ipa3_q6_register_pm failed, ret: %d\n",
 					ret);
 			goto q6_init_err;
 		}
 	}
 
-	if (ipa3_ctx->use_ipa_pm)
-		ret = ipa3_wwan_register_netdev_pm_client(dev);
-	else
-		ret = ipa3_wwan_create_wwan_rm_resource(dev);
+	ret = ipa3_wwan_register_netdev_pm_client(dev);
 	if (ret) {
 		IPAWANERR("fail to create/register pm resources\n");
 		goto fail_pm;
@@ -2735,6 +2536,7 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
 	}
 	atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
 	atomic_set(&rmnet_ipa3_ctx->ap_suspend, 0);
+	ipa3_update_ssr_state(false);
 
 	IPAWANERR("rmnet_ipa completed initialization\n");
 	return 0;
@@ -2743,18 +2545,11 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
 		netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
 	unregister_netdev(dev);
 set_perf_err:
-	if (ipa3_ctx->use_ipa_pm)
-		ipa3_wwan_deregister_netdev_pm_client();
-	else
-		ipa3_wwan_delete_wwan_rm_resource();
+
+	ipa3_wwan_deregister_netdev_pm_client();
 fail_pm:
-	if (ipa3_ctx->use_ipa_pm) {
-		if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
-			ipa3_q6_deregister_pm();
-	} else {
-		if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
-			ipa3_q6_deinitialize_rm();
-	}
+	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
+		ipa3_q6_deregister_pm();
 q6_init_err:
 	free_netdev(dev);
 	rmnet_ipa3_ctx->wwan_priv = NULL;
@@ -2785,11 +2580,9 @@ static int ipa3_wwan_remove(struct platform_device *pdev)
 	if (ipa3_rmnet_res.ipa_napi_enable)
 		netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
 	mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
+	IPAWANINFO("rmnet_ipa unregister_netdev\n");
 	unregister_netdev(IPA_NETDEV());
-	if (ipa3_ctx->use_ipa_pm)
-		ipa3_wwan_deregister_netdev_pm_client();
-	else
-		ipa3_wwan_delete_wwan_rm_resource();
+	ipa3_wwan_deregister_netdev_pm_client();
 	cancel_work_sync(&ipa3_tx_wakequeue_work);
 	cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
 	if (IPA_NETDEV())
@@ -2871,11 +2664,8 @@ static int rmnet_ipa_ap_suspend(struct device *dev)
 	netif_stop_queue(netdev);
 	spin_unlock_irqrestore(&wwan_ptr->lock, flags);
 
-	IPAWANDBG("De-activating the PM/RM resource.\n");
-	if (ipa3_ctx->use_ipa_pm)
-		ipa_pm_deactivate_sync(rmnet_ipa3_ctx->pm_hdl);
-	else
-		ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+	IPAWANDBG("De-activating the PM resource.\n");
+	ipa_pm_deactivate_sync(rmnet_ipa3_ctx->pm_hdl);
 	ret = 0;
 bail:
 	IPAWANDBG("Exit with %d\n", ret);
@@ -3599,8 +3389,9 @@ static int rmnet_ipa3_query_tethering_stats_modem(
 static int rmnet_ipa3_query_tethering_stats_hw(
 	struct wan_ioctl_query_tether_stats *data, bool reset)
 {
-	int rc = 0;
+	int rc = 0, index = 0;
 	struct ipa_quota_stats_all *con_stats;
+	enum ipa_client_type wlan_client;
 
 	/* qet HW-stats */
 	rc = ipa_get_teth_stats();
@@ -3616,17 +3407,36 @@ static int rmnet_ipa3_query_tethering_stats_hw(
 		IPAWANERR("no memory\n");
 		return -ENOMEM;
 	}
-	rc = ipa_query_teth_stats(IPA_CLIENT_Q6_WAN_PROD, con_stats, reset);
-	if (rc) {
-		IPAERR("IPA_CLIENT_Q6_WAN_PROD query failed %d,\n", rc);
-		kfree(con_stats);
-		return rc;
+
+	if (rmnet_ipa3_ctx->ipa_config_is_apq) {
+		rc = ipa_query_teth_stats(IPA_CLIENT_MHI_PRIME_TETH_PROD,
+			con_stats, reset);
+		if (rc) {
+			IPAERR("MHI_PRIME_TETH_PROD query failed %d,\n", rc);
+			kfree(con_stats);
+			return rc;
+		}
+	} else {
+		rc = ipa_query_teth_stats(IPA_CLIENT_Q6_WAN_PROD,
+			con_stats, reset);
+		if (rc) {
+			IPAERR("IPA_CLIENT_Q6_WAN_PROD query failed %d,\n", rc);
+			kfree(con_stats);
+			return rc;
+		}
 	}
-	IPAWANDBG("wlan: v4_rx_p(%d) b(%lld) v6_rx_p(%d) b(%lld)\n",
-	con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv4_pkts,
-	con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv4_bytes,
-	con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv6_pkts,
-	con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv6_bytes);
+
+	if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+		wlan_client = IPA_CLIENT_WLAN2_CONS;
+	else
+		wlan_client = IPA_CLIENT_WLAN1_CONS;
+
+	IPAWANDBG("wlan: v4_rx_p-b(%d,%lld) v6_rx_p-b(%d,%lld),client(%d)\n",
+	con_stats->client[wlan_client].num_ipv4_pkts,
+	con_stats->client[wlan_client].num_ipv4_bytes,
+	con_stats->client[wlan_client].num_ipv6_pkts,
+	con_stats->client[wlan_client].num_ipv6_bytes,
+	wlan_client);
 
 	IPAWANDBG("usb: v4_rx_p(%d) b(%lld) v6_rx_p(%d) b(%lld)\n",
 	con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_pkts,
@@ -3636,16 +3446,16 @@ static int rmnet_ipa3_query_tethering_stats_hw(
 
 	/* update the DL stats */
 	data->ipv4_rx_packets =
-		con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv4_pkts +
+		con_stats->client[wlan_client].num_ipv4_pkts +
 			con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_pkts;
 	data->ipv6_rx_packets =
-		con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv6_pkts +
+		con_stats->client[wlan_client].num_ipv6_pkts +
 			con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_pkts;
 	data->ipv4_rx_bytes =
-		con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv4_bytes +
+		con_stats->client[wlan_client].num_ipv4_bytes +
 			con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_bytes;
 	data->ipv6_rx_bytes =
-		con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv6_bytes +
+		con_stats->client[wlan_client].num_ipv6_bytes +
 			con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_bytes;
 
 	IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
@@ -3663,46 +3473,63 @@ static int rmnet_ipa3_query_tethering_stats_hw(
 		return rc;
 	}
 
+	if (rmnet_ipa3_ctx->ipa_config_is_apq)
+		index = IPA_CLIENT_MHI_PRIME_TETH_CONS;
+	else
+		index = IPA_CLIENT_Q6_WAN_CONS;
+
 	IPAWANDBG("usb: v4_tx_p(%d) b(%lld) v6_tx_p(%d) b(%lld)\n",
-	con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_pkts,
-	con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_bytes,
-	con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_pkts,
-	con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_bytes);
+	con_stats->client[index].num_ipv4_pkts,
+	con_stats->client[index].num_ipv4_bytes,
+	con_stats->client[index].num_ipv6_pkts,
+	con_stats->client[index].num_ipv6_bytes);
 
 	/* update the USB UL stats */
 	data->ipv4_tx_packets =
-		con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_pkts;
+		con_stats->client[index].num_ipv4_pkts;
 	data->ipv6_tx_packets =
-		con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_pkts;
+		con_stats->client[index].num_ipv6_pkts;
 	data->ipv4_tx_bytes =
-		con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_bytes;
+		con_stats->client[index].num_ipv4_bytes;
 	data->ipv6_tx_bytes =
-		con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_bytes;
+		con_stats->client[index].num_ipv6_bytes;
 
 	/* query WLAN UL stats */
 	memset(con_stats, 0, sizeof(struct ipa_quota_stats_all));
-	rc = ipa_query_teth_stats(IPA_CLIENT_WLAN1_PROD, con_stats, reset);
+
+	if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+		rc = ipa_query_teth_stats(IPA_CLIENT_WLAN2_PROD,
+			con_stats, reset);
+	else
+		rc = ipa_query_teth_stats(IPA_CLIENT_WLAN1_PROD,
+			con_stats, reset);
+
 	if (rc) {
-		IPAERR("IPA_CLIENT_WLAN1_PROD query failed %d\n", rc);
+		IPAERR("IPA_CLIENT_WLAN_PROD query failed %d\n", rc);
 		kfree(con_stats);
 		return rc;
 	}
 
-	IPAWANDBG("wlan: v4_tx_p(%d) b(%lld) v6_tx_p(%d) b(%lld)\n",
-	con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_pkts,
-	con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_bytes,
-	con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_pkts,
-	con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_bytes);
+	if (rmnet_ipa3_ctx->ipa_config_is_apq)
+		index = IPA_CLIENT_MHI_PRIME_TETH_CONS;
+	else
+		index = IPA_CLIENT_Q6_WAN_CONS;
+
+	IPAWANDBG("wlan1: v4_tx_p(%d) b(%lld) v6_tx_p(%d) b(%lld)\n",
+	con_stats->client[index].num_ipv4_pkts,
+	con_stats->client[index].num_ipv4_bytes,
+	con_stats->client[index].num_ipv6_pkts,
+	con_stats->client[index].num_ipv6_bytes);
 
 	/* update the wlan UL stats */
 	data->ipv4_tx_packets +=
-		con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_pkts;
+		con_stats->client[index].num_ipv4_pkts;
 	data->ipv6_tx_packets +=
-		con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_pkts;
+		con_stats->client[index].num_ipv6_pkts;
 	data->ipv4_tx_bytes +=
-		con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_bytes;
+		con_stats->client[index].num_ipv4_bytes;
 	data->ipv6_tx_bytes +=
-		con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_bytes;
+		con_stats->client[index].num_ipv6_bytes;
 
 	IPAWANDBG("v4_tx_p(%lu) v6_tx_p(%lu) v4_tx_b(%lu) v6_tx_b(%lu)\n",
 		(unsigned long) data->ipv4_tx_packets,
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 15eb7f8..bad741f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -382,7 +382,8 @@ static long ipa3_wan_ioctl(struct file *filp,
 			break;
 		}
 
-		if (ipa_mpm_notify_wan_state()) {
+		if (ipa_mpm_notify_wan_state(
+			(struct wan_ioctl_notify_wan_state *)param)) {
 			IPAWANERR("WAN_IOC_NOTIFY_WAN_STATE failed\n");
 			retval = -EPERM;
 		}
diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
index d9e93ed..6bb8f96 100644
--- a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
@@ -124,7 +124,7 @@ int ipa3_teth_bridge_get_pm_hdl(void)
 		return -EINVAL;
 	}
 
-	TETH_DBG("Return rm-handle %d\n", ipa3_teth_ctx->modem_pm_hdl);
+	TETH_DBG("Return pm-handle %d\n", ipa3_teth_ctx->modem_pm_hdl);
 	TETH_DBG_FUNC_EXIT();
 	return ipa3_teth_ctx->modem_pm_hdl;
 }
@@ -137,20 +137,15 @@ int ipa3_teth_bridge_disconnect(enum ipa_client_type client)
 	int res = 0;
 
 	TETH_DBG_FUNC_ENTRY();
-	if (ipa_pm_is_used()) {
-		res = ipa_pm_deactivate_sync(ipa3_teth_ctx->modem_pm_hdl);
-		if (res) {
-			TETH_ERR("fail to deactivate modem %d\n", res);
-			return res;
-		}
-		res = ipa_pm_deregister(ipa3_teth_ctx->modem_pm_hdl);
-		ipa3_teth_ctx->modem_pm_hdl = ~0;
-	} else {
-		ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
-					IPA_RM_RESOURCE_Q6_CONS);
-		ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
-					IPA_RM_RESOURCE_USB_CONS);
+	res = ipa_pm_deactivate_sync(ipa3_teth_ctx->modem_pm_hdl);
+
+	if (res) {
+		TETH_ERR("fail to deactivate modem %d\n", res);
+		return res;
 	}
+
+	res = ipa_pm_deregister(ipa3_teth_ctx->modem_pm_hdl);
+	ipa3_teth_ctx->modem_pm_hdl = ~0;
 	TETH_DBG_FUNC_EXIT();
 
 	return res;
@@ -174,49 +169,21 @@ int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
 
 	TETH_DBG_FUNC_ENTRY();
 
-	if (ipa_pm_is_used()) {
-		reg_params.name = "MODEM (USB RMNET)";
-		reg_params.group = IPA_PM_GROUP_MODEM;
-		reg_params.skip_clk_vote = true;
-		res = ipa_pm_register(&reg_params,
-			&ipa3_teth_ctx->modem_pm_hdl);
-		if (res) {
-			TETH_ERR("fail to register with PM %d\n", res);
-			return res;
-		}
-
-		res = ipa_pm_activate_sync(ipa3_teth_ctx->modem_pm_hdl);
-		goto bail;
+	reg_params.name = "MODEM (USB RMNET)";
+	reg_params.group = IPA_PM_GROUP_MODEM;
+	reg_params.skip_clk_vote = true;
+	res = ipa_pm_register(&reg_params,
+		&ipa3_teth_ctx->modem_pm_hdl);
+	if (res) {
+		TETH_ERR("fail to register with PM %d\n", res);
+		return res;
 	}
+	/* vote for turbo in case of MHIP channels*/
+	if (ipa3_is_apq())
+		res = ipa_pm_set_throughput(ipa3_teth_ctx->modem_pm_hdl,
+			5200);
+	res = ipa_pm_activate_sync(ipa3_teth_ctx->modem_pm_hdl);
 
-	/* Build the dependency graph, first add_dependency call is sync
-	 * in order to make sure the IPA clocks are up before we continue
-	 * and notify the USB driver it may continue.
-	 */
-	res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD,
-				    IPA_RM_RESOURCE_Q6_CONS);
-	if (res < 0) {
-		TETH_ERR("ipa_rm_add_dependency() failed.\n");
-		goto bail;
-	}
-
-	/* this add_dependency call can't be sync since it will block until USB
-	 * status is connected (which can happen only after the tethering
-	 * bridge is connected), the clocks are already up so the call doesn't
-	 * need to block.
-	 */
-	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
-				    IPA_RM_RESOURCE_USB_CONS);
-	if (res < 0 && res != -EINPROGRESS) {
-		ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
-					IPA_RM_RESOURCE_Q6_CONS);
-		TETH_ERR("ipa_rm_add_dependency() failed.\n");
-		goto bail;
-	}
-
-	res = 0;
-
-bail:
 	TETH_DBG_FUNC_EXIT();
 	return res;
 }
diff --git a/drivers/platform/msm/ipa/test/ipa_pm_ut.c b/drivers/platform/msm/ipa/test/ipa_pm_ut.c
index b8b3607..cd0c293 100644
--- a/drivers/platform/msm/ipa/test/ipa_pm_ut.c
+++ b/drivers/platform/msm/ipa/test/ipa_pm_ut.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/ipa.h>
@@ -35,15 +35,13 @@ static int ipa_pm_ut_setup(void **ppriv)
 	/*decouple PM from RPM */
 	ipa3_ctx->enable_clock_scaling = false;
 
-	if (ipa3_ctx->use_ipa_pm) {
-		for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) {
-			ipa_pm_deactivate_sync(i);
-			ipa_pm_deregister(i);
-		}
-
-		ipa_pm_destroy();
+	for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) {
+		ipa_pm_deactivate_sync(i);
+		ipa_pm_deregister(i);
 	}
 
+	ipa_pm_destroy();
+
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
index 2339c00..c1e450d 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
@@ -1,18 +1,27 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include "ipa_ut_framework.h"
+#include "../ipa_v3/ipa_i.h"
 #include <linux/netdevice.h>
 
 struct ipa_test_hw_stats_ctx {
 	u32 odu_prod_hdl;
 	u32 odu_cons_hdl;
 	u32 rt4_usb;
+	u32 rt4_usb_cnt_id;
 	u32 rt6_usb;
+	u32 rt6_usb_cnt_id;
 	u32 rt4_odu_cons;
+	u32 rt4_odu_cnt_id;
 	u32 rt6_odu_cons;
+	u32 rt6_odu_cnt_id;
+	u32 flt4_usb_cnt_id;
+	u32 flt6_usb_cnt_id;
+	u32 flt4_odu_cnt_id;
+	u32 flt6_odu_cnt_id;
 	atomic_t odu_pending;
 };
 
@@ -118,25 +127,83 @@ static int ipa_test_hw_stats_configure(void *priv)
 
 static int ipa_test_hw_stats_add_FnR(void *priv)
 {
-	struct ipa_ioc_add_rt_rule *rt_rule;
-	struct ipa_ioc_add_flt_rule *flt_rule;
+	struct ipa_ioc_add_rt_rule_v2 *rt_rule;
+	struct ipa_ioc_add_flt_rule_v2 *flt_rule;
 	struct ipa_ioc_get_rt_tbl rt_lookup;
+	struct ipa_ioc_flt_rt_counter_alloc *counter = NULL;
+	struct ipa_ioc_flt_rt_query *query;
+	int pyld_size = 0;
 	int ret;
-
-	rt_rule = kzalloc(sizeof(*rt_rule) + 1 * sizeof(struct ipa_rt_rule_add),
-		GFP_KERNEL);
+	rt_rule = kzalloc(sizeof(*rt_rule), GFP_KERNEL);
 	if (!rt_rule) {
 		IPA_UT_DBG("no mem\n");
 		return -ENOMEM;
 	}
+	rt_rule->rules = (uint64_t)kzalloc(1 *
+		sizeof(struct ipa_rt_rule_add_v2), GFP_KERNEL);
+	if (!rt_rule->rules) {
+		IPA_UT_DBG("no mem\n");
+		ret = -ENOMEM;
+		goto free_rt;
+	}
 
-	flt_rule = kzalloc(sizeof(*flt_rule) +
-		1 * sizeof(struct ipa_flt_rule_add), GFP_KERNEL);
+	flt_rule = kzalloc(sizeof(*flt_rule), GFP_KERNEL);
 	if (!flt_rule) {
 		IPA_UT_DBG("no mem\n");
 		ret = -ENOMEM;
 		goto free_rt;
 	}
+	flt_rule->rules = (uint64_t)kzalloc(1 *
+		sizeof(struct ipa_flt_rule_add_v2), GFP_KERNEL);
+	if (!flt_rule->rules) {
+		ret = -ENOMEM;
+		goto free_flt;
+	}
+
+	counter = kzalloc(sizeof(struct ipa_ioc_flt_rt_counter_alloc),
+					  GFP_KERNEL);
+	if (!counter) {
+		ret = -ENOMEM;
+		goto free_flt;
+	}
+	counter->hw_counter.num_counters = 8;
+	counter->sw_counter.num_counters = 1;
+
+	/* allocate counters */
+	ret = ipa3_alloc_counter_id(counter);
+	if (ret < 0) {
+		IPA_UT_DBG("ipa3_alloc_counter_id fails\n");
+		ret = -ENOMEM;
+		goto free_counter;
+	}
+
+	/* initially clean all allocated counters */
+	query = kzalloc(sizeof(struct ipa_ioc_flt_rt_query),
+		GFP_KERNEL);
+	if (!query) {
+		ret = -ENOMEM;
+		goto free_counter;
+	}
+	query->start_id = counter->hw_counter.start_id;
+	query->end_id = counter->hw_counter.start_id +
+		counter->hw_counter.num_counters - 1;
+	query->reset = true;
+	query->stats_size = sizeof(struct ipa_flt_rt_stats);
+	pyld_size = IPA_MAX_FLT_RT_CNT_INDEX *
+		sizeof(struct ipa_flt_rt_stats);
+	query->stats = (uint64_t)kzalloc(pyld_size, GFP_KERNEL);
+	if (!query->stats) {
+		ret = -ENOMEM;
+		goto free_query;
+	}
+	ipa_get_flt_rt_stats(query);
+
+	query->start_id = counter->sw_counter.start_id;
+	query->end_id = counter->sw_counter.start_id +
+		counter->sw_counter.num_counters - 1;
+	query->reset = true;
+	query->stats_size = sizeof(struct ipa_flt_rt_stats);
+	ipa_get_flt_rt_stats(query);
 
 	rt_rule->commit = 1;
 	rt_rule->ip = IPA_IP_v4;
@@ -145,23 +212,33 @@ static int ipa_test_hw_stats_add_FnR(void *priv)
 		IPA_RESOURCE_NAME_MAX);
 	strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX);
 	rt_rule->num_rules = 1;
-	rt_rule->rules[0].rule.dst = IPA_CLIENT_USB_CONS;
-	rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
-	rt_rule->rules[0].rule.attrib.dst_port = 5002;
-	rt_rule->rules[0].rule.hashable = true;
-	if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) {
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.dst = IPA_CLIENT_USB_CONS;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.attrib.dst_port = 5002;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.hashable = true;
+	ctx->rt4_usb_cnt_id = counter->hw_counter.start_id;
+	IPA_UT_INFO("rt4_usb_cnt_id %u\n", ctx->rt4_usb_cnt_id);
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.cnt_idx = ctx->rt4_usb_cnt_id;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.enable_stats = true;
+	if (ipa_add_rt_rule_v2(rt_rule) || ((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].status) {
 		IPA_UT_ERR("failed to install V4 rules\n");
 		ret = -EFAULT;
-		goto free_flt;
+		goto free_query;
 	}
 	if (ipa_get_rt_tbl(&rt_lookup)) {
 		IPA_UT_ERR("failed to query V4 rules\n");
 		ret = -EFAULT;
-		goto free_flt;
+		goto free_query;
 	}
 	ctx->rt4_usb = rt_lookup.hdl;
 
-	memset(rt_rule, 0, sizeof(*rt_rule));
 	rt_rule->commit = 1;
 	rt_rule->ip = IPA_IP_v6;
 	rt_lookup.ip = rt_rule->ip;
@@ -169,23 +246,33 @@ static int ipa_test_hw_stats_add_FnR(void *priv)
 		IPA_RESOURCE_NAME_MAX);
 	strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX);
 	rt_rule->num_rules = 1;
-	rt_rule->rules[0].rule.dst = IPA_CLIENT_USB_CONS;
-	rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
-	rt_rule->rules[0].rule.attrib.dst_port = 5002;
-	rt_rule->rules[0].rule.hashable = true;
-	if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) {
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.dst = IPA_CLIENT_USB_CONS;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.attrib.dst_port = 5002;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.hashable = true;
+	ctx->rt6_usb_cnt_id = counter->hw_counter.start_id + 1;
+	IPA_UT_INFO("rt6_usb_cnt_id %u\n", ctx->rt6_usb_cnt_id);
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.cnt_idx = ctx->rt6_usb_cnt_id;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.enable_stats = true;
+	if (ipa_add_rt_rule_v2(rt_rule) || ((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].status) {
 		IPA_UT_ERR("failed to install V4 rules\n");
 		ret = -EFAULT;
-		goto free_flt;
+		goto free_query;
 	}
 	if (ipa_get_rt_tbl(&rt_lookup)) {
 		IPA_UT_ERR("failed to query V4 rules\n");
 		ret = -EFAULT;
-		goto free_flt;
+		goto free_query;
 	}
 	ctx->rt6_usb = rt_lookup.hdl;
 
-	memset(rt_rule, 0, sizeof(*rt_rule));
 	rt_rule->commit = 1;
 	rt_rule->ip = IPA_IP_v4;
 	rt_lookup.ip = rt_rule->ip;
@@ -193,22 +280,33 @@ static int ipa_test_hw_stats_add_FnR(void *priv)
 		IPA_RESOURCE_NAME_MAX);
 	strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX);
 	rt_rule->num_rules = 1;
-	rt_rule->rules[0].rule.dst = IPA_CLIENT_ODU_EMB_CONS;
-	rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
-	rt_rule->rules[0].rule.attrib.dst_port = 5002;
-	rt_rule->rules[0].rule.hashable = true;
-	if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) {
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.dst = IPA_CLIENT_ODU_EMB_CONS;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.attrib.dst_port = 5002;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.hashable = true;
+	ctx->rt4_odu_cnt_id = counter->hw_counter.start_id + 2;
+	IPA_UT_INFO("rt4_odu_cnt_id %u\n", ctx->rt4_odu_cnt_id);
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.cnt_idx = ctx->rt4_odu_cnt_id;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.enable_stats = true;
+	if (ipa_add_rt_rule_v2(rt_rule) || ((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].status) {
 		IPA_UT_ERR("failed to install V4 rules\n");
 		ret = -EFAULT;
-		goto free_flt;
+		goto free_query;
 	}
 	if (ipa_get_rt_tbl(&rt_lookup)) {
 		IPA_UT_ERR("failed to query V4 rules\n");
-		return -EFAULT;
+		ret = -EFAULT;
+		goto free_query;
 	}
 	ctx->rt4_odu_cons = rt_lookup.hdl;
 
-	memset(rt_rule, 0, sizeof(*rt_rule));
 	rt_rule->commit = 1;
 	rt_rule->ip = IPA_IP_v6;
 	rt_lookup.ip = rt_rule->ip;
@@ -216,19 +314,30 @@ static int ipa_test_hw_stats_add_FnR(void *priv)
 		IPA_RESOURCE_NAME_MAX);
 	strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX);
 	rt_rule->num_rules = 1;
-	rt_rule->rules[0].rule.dst = IPA_CLIENT_ODU_EMB_CONS;
-	rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
-	rt_rule->rules[0].rule.attrib.dst_port = 5002;
-	rt_rule->rules[0].rule.hashable = true;
-	if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) {
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.dst = IPA_CLIENT_ODU_EMB_CONS;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.attrib.dst_port = 5002;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.hashable = true;
+	ctx->rt6_odu_cnt_id = counter->hw_counter.start_id + 3;
+	IPA_UT_INFO("rt6_odu_cnt_id %u\n", ctx->rt6_odu_cnt_id);
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.cnt_idx = ctx->rt6_odu_cnt_id;
+	((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].rule.enable_stats = true;
+	if (ipa_add_rt_rule_v2(rt_rule) || ((struct ipa_rt_rule_add_v2 *)
+	rt_rule->rules)[0].status) {
 		IPA_UT_ERR("failed to install V4 rules\n");
 		ret = -EFAULT;
-		goto free_flt;
+		goto free_query;
 	}
 	if (ipa_get_rt_tbl(&rt_lookup)) {
 		IPA_UT_ERR("failed to query V4 rules\n");
 		ret = -EFAULT;
-		goto free_flt;
+		goto free_query;
 	}
 	ctx->rt6_odu_cons = rt_lookup.hdl;
 
@@ -236,78 +345,387 @@ static int ipa_test_hw_stats_add_FnR(void *priv)
 	flt_rule->ip = IPA_IP_v4;
 	flt_rule->ep = IPA_CLIENT_USB_PROD;
 	flt_rule->num_rules = 1;
-	flt_rule->rules[0].at_rear = 1;
-	flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING;
-	flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
-	flt_rule->rules[0].rule.attrib.dst_port = 5002;
-	flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt4_odu_cons;
-	flt_rule->rules[0].rule.hashable = 1;
-	if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) {
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].at_rear = 0;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.action = IPA_PASS_TO_ROUTING;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.attrib.dst_port = 5002;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.rt_tbl_hdl = ctx->rt4_odu_cons;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.hashable = 1;
+	ctx->flt4_usb_cnt_id = counter->hw_counter.start_id + 4;
+	IPA_UT_INFO("flt4_usb_cnt_id %u\n", ctx->flt4_usb_cnt_id);
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.cnt_idx = ctx->flt4_usb_cnt_id;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.enable_stats = true;
+	if (ipa_add_flt_rule_v2(flt_rule) || ((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].status) {
 		IPA_UT_ERR("failed to install V4 rules\n");
 		ret = -EFAULT;
-		goto free_flt;
+		goto free_query;
 	}
 
-	memset(flt_rule, 0, sizeof(*flt_rule));
 	flt_rule->commit = 1;
 	flt_rule->ip = IPA_IP_v6;
 	flt_rule->ep = IPA_CLIENT_USB_PROD;
 	flt_rule->num_rules = 1;
-	flt_rule->rules[0].at_rear = 1;
-	flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING;
-	flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
-	flt_rule->rules[0].rule.attrib.dst_port = 5002;
-	flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt6_odu_cons;
-	flt_rule->rules[0].rule.hashable = 1;
-	if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) {
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].at_rear = 0;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.action = IPA_PASS_TO_ROUTING;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.attrib.dst_port = 5002;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.rt_tbl_hdl = ctx->rt6_odu_cons;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.hashable = 1;
+	ctx->flt6_usb_cnt_id = counter->hw_counter.start_id + 5;
+	IPA_UT_INFO("flt6_usb_cnt_id %u\n", ctx->flt6_usb_cnt_id);
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.cnt_idx = ctx->flt6_usb_cnt_id;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.enable_stats = true;
+	if (ipa_add_flt_rule_v2(flt_rule) || ((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].status) {
 		IPA_UT_ERR("failed to install V6 rules\n");
 		ret = -EFAULT;
-		goto free_flt;
+		goto free_query;
 	}
 
-	memset(flt_rule, 0, sizeof(*flt_rule));
 	flt_rule->commit = 1;
 	flt_rule->ip = IPA_IP_v4;
 	flt_rule->ep = IPA_CLIENT_ODU_PROD;
 	flt_rule->num_rules = 1;
-	flt_rule->rules[0].at_rear = 1;
-	flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING;
-	flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
-	flt_rule->rules[0].rule.attrib.dst_port = 5002;
-	flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt4_usb;
-	flt_rule->rules[0].rule.hashable = 1;
-	if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) {
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].at_rear = 0;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.action = IPA_PASS_TO_ROUTING;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.attrib.dst_port = 5002;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.rt_tbl_hdl = ctx->rt4_usb;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.hashable = 1;
+	ctx->flt4_odu_cnt_id = counter->hw_counter.start_id + 6;
+	IPA_UT_INFO("flt4_odu_cnt_id %u\n", ctx->flt4_odu_cnt_id);
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.cnt_idx = ctx->flt4_odu_cnt_id;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.enable_stats = true;
+	if (ipa_add_flt_rule_v2(flt_rule) || ((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].status) {
 		IPA_UT_ERR("failed to install V4 rules\n");
 		ret = -EFAULT;
-		goto free_flt;
+		goto free_query;
 	}
 
-	memset(flt_rule, 0, sizeof(*flt_rule));
 	flt_rule->commit = 1;
 	flt_rule->ip = IPA_IP_v6;
 	flt_rule->ep = IPA_CLIENT_ODU_PROD;
 	flt_rule->num_rules = 1;
-	flt_rule->rules[0].at_rear = 1;
-	flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING;
-	flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
-	flt_rule->rules[0].rule.attrib.dst_port = 5002;
-	flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt6_usb;
-	flt_rule->rules[0].rule.hashable = 1;
-	if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) {
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].at_rear = 0;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.action = IPA_PASS_TO_ROUTING;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.attrib.dst_port = 5002;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.rt_tbl_hdl = ctx->rt6_usb;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.hashable = 1;
+	ctx->flt6_odu_cnt_id = counter->hw_counter.start_id + 7;
+	IPA_UT_INFO("flt4_odu_cnt_id %u\n", ctx->flt6_odu_cnt_id);
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.cnt_idx = ctx->flt6_odu_cnt_id;
+	((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].rule.enable_stats = true;
+	if (ipa_add_flt_rule_v2(flt_rule) || ((struct ipa_flt_rule_add_v2 *)
+	flt_rule->rules)[0].status) {
 		IPA_UT_ERR("failed to install V6 rules\n");
 		ret = -EFAULT;
-		goto free_flt;
+		goto free_query;
 	}
-
 	IPA_UT_INFO(
 		"Rules added. Please start data transfer on ports 5001/5002\n");
 	ret = 0;
+
+free_query:
+	kfree((void *)(query->stats));
+	kfree(query);
+free_counter:
+	kfree(counter);
 free_flt:
+	kfree((void *)(flt_rule->rules));
 	kfree(flt_rule);
 free_rt:
+	kfree((void *)(rt_rule->rules));
 	kfree(rt_rule);
 	return ret;
+}
 
+static int ipa_test_hw_stats_query_FnR_one_by_one(void *priv)
+{
+	int ret;
+	struct ipa_ioc_flt_rt_query *query;
+	int pyld_size = 0;
+
+	query = kzalloc(sizeof(struct ipa_ioc_flt_rt_query), GFP_KERNEL);
+	if (!query)
+		return -ENOMEM;
+	pyld_size = IPA_MAX_FLT_RT_CNT_INDEX *
+		sizeof(struct ipa_flt_rt_stats);
+	query->stats = (uint64_t)kzalloc(pyld_size, GFP_KERNEL);
+	if (!query->stats) {
+		kfree(query);
+		return -ENOMEM;
+	}
+	/* query 1 by 1 */
+	IPA_UT_INFO("========query 1 by 1========\n");
+	query->start_id = ctx->rt4_usb_cnt_id;
+	query->end_id = ctx->rt4_usb_cnt_id;
+	ipa_get_flt_rt_stats(query);
+	IPA_UT_INFO(
+		"usb v4 route counter %u pkt_cnt %u bytes cnt %llu\n",
+		ctx->rt4_usb_cnt_id, ((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_pkts,
+		((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_bytes);
+
+	query->start_id = ctx->rt6_usb_cnt_id;
+	query->end_id = ctx->rt6_usb_cnt_id;
+	ipa_get_flt_rt_stats(query);
+	IPA_UT_INFO(
+		"usb v6 route counter %u pkt_cnt %u bytes cnt %llu\n",
+		ctx->rt6_usb_cnt_id, ((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_pkts,
+		((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_bytes);
+
+	query->start_id = ctx->rt4_odu_cnt_id;
+	query->end_id = ctx->rt4_odu_cnt_id;
+	ipa_get_flt_rt_stats(query);
+	IPA_UT_INFO(
+		"odu v4 route counter %u pkt_cnt %u bytes cnt %llu\n",
+		ctx->rt4_odu_cnt_id, ((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_pkts,
+		((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_bytes);
+
+	query->start_id = ctx->rt6_odu_cnt_id;
+	query->end_id = ctx->rt6_odu_cnt_id;
+	ipa_get_flt_rt_stats(query);
+	IPA_UT_INFO(
+		"odu v6 route counter %u pkt_cnt %u bytes cnt %llu\n",
+		ctx->rt6_odu_cnt_id, ((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_pkts,
+		((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_bytes);
+
+	query->start_id = ctx->flt4_usb_cnt_id;
+	query->end_id = ctx->flt4_usb_cnt_id;
+	ipa_get_flt_rt_stats(query);
+	IPA_UT_INFO(
+		"usb v4 filter counter %u pkt_cnt %u bytes cnt %llu\n",
+		ctx->flt4_usb_cnt_id, ((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_pkts,
+		((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_bytes);
+
+	query->start_id = ctx->flt6_usb_cnt_id;
+	query->end_id = ctx->flt6_usb_cnt_id;
+	ipa_get_flt_rt_stats(query);
+	IPA_UT_INFO(
+		"usb v6 filter counter %u pkt_cnt %u bytes cnt %llu\n",
+		ctx->flt6_usb_cnt_id, ((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_pkts,
+		((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_bytes);
+
+	query->start_id = ctx->flt4_odu_cnt_id;
+	query->end_id = ctx->flt4_odu_cnt_id;
+	ipa_get_flt_rt_stats(query);
+	IPA_UT_INFO(
+		"odu v4 filter counter %u pkt_cnt %u bytes cnt %llu\n",
+		ctx->flt4_odu_cnt_id, ((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_pkts,
+		((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_bytes);
+
+	query->start_id = ctx->flt6_odu_cnt_id;
+	query->end_id = ctx->flt6_odu_cnt_id;
+	ipa_get_flt_rt_stats(query);
+	IPA_UT_INFO(
+		"odu v6 filter counter %u pkt_cnt %u bytes cnt %llu\n",
+		ctx->flt6_odu_cnt_id, ((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_pkts,
+		((struct ipa_flt_rt_stats *)
+		query->stats)[0].num_bytes);
+	IPA_UT_INFO("================ done ============\n");
+
+	ret = 0;
+	kfree(query);
+	return ret;
+}
+
+static int ipa_test_hw_stats_query_FnR_one_shot(void *priv)
+{
+	int ret, i, start = 0;
+	struct ipa_ioc_flt_rt_query *query;
+	int pyld_size = 0;
+
+	query = kzalloc(sizeof(struct ipa_ioc_flt_rt_query), GFP_KERNEL);
+	if (!query)
+		return -ENOMEM;
+	pyld_size = IPA_MAX_FLT_RT_CNT_INDEX *
+		sizeof(struct ipa_flt_rt_stats);
+	query->stats = (uint64_t)kzalloc(pyld_size, GFP_KERNEL);
+	if (!query->stats) {
+		kfree(query);
+		return -ENOMEM;
+	}
+
+	/* query all together */
+	IPA_UT_INFO("========query all together========\n");
+	query->start_id = ctx->rt4_usb_cnt_id;
+	query->end_id = ctx->flt6_odu_cnt_id;
+	ipa_get_flt_rt_stats(query);
+	start = 0;
+	for (i = ctx->rt4_usb_cnt_id;
+		i <= ctx->flt6_odu_cnt_id; i++) {
+		IPA_UT_INFO(
+			"counter %u pkt_cnt %u bytes cnt %llu\n",
+			i, ((struct ipa_flt_rt_stats *)
+			query->stats)[start].num_pkts,
+			((struct ipa_flt_rt_stats *)
+			query->stats)[start].num_bytes);
+		start++;
+	}
+	IPA_UT_INFO("================ done ============\n");
+
+	ret = 0;
+	kfree((void *)(query->stats));
+	kfree(query);
+	return ret;
+}
+
+static int ipa_test_hw_stats_query_FnR_clean(void *priv)
+{
+	int ret, i, start = 0;
+	struct ipa_ioc_flt_rt_query *query;
+	int pyld_size = 0;
+
+	query = kzalloc(sizeof(struct ipa_ioc_flt_rt_query), GFP_KERNEL);
+	if (!query) {
+		IPA_UT_DBG("no mem\n");
+		return -ENOMEM;
+	}
+	pyld_size = IPA_MAX_FLT_RT_CNT_INDEX *
+		sizeof(struct ipa_flt_rt_stats);
+	query->stats = (uint64_t)kzalloc(pyld_size, GFP_KERNEL);
+	if (!query->stats) {
+		kfree(query);
+		return -ENOMEM;
+	}
+
+	/* query and reset */
+	IPA_UT_INFO("========query and reset========\n");
+	query->start_id = ctx->rt4_usb_cnt_id;
+	query->reset = true;
+	query->end_id = ctx->flt6_odu_cnt_id;
+	start = 0;
+	ipa_get_flt_rt_stats(query);
+	for (i = ctx->rt4_usb_cnt_id;
+		i <= ctx->flt6_odu_cnt_id; i++) {
+		IPA_UT_INFO(
+			"counter %u pkt_cnt %u bytes cnt %llu\n",
+			i, ((struct ipa_flt_rt_stats *)
+			query->stats)[start].num_pkts,
+			((struct ipa_flt_rt_stats *)
+			query->stats)[start].num_bytes);
+		start++;
+	}
+	IPA_UT_INFO("================ done ============\n");
+
+	ret = 0;
+	kfree((void *)(query->stats));
+	kfree(query);
+	return ret;
+}
+
+
+static int ipa_test_hw_stats_query_sw_stats(void *priv)
+{
+	int ret, i, start = 0;
+	struct ipa_ioc_flt_rt_query *query;
+	int pyld_size = 0;
+
+	query = kzalloc(sizeof(struct ipa_ioc_flt_rt_query), GFP_KERNEL);
+	if (!query)
+		return -ENOMEM;
+	pyld_size = IPA_MAX_FLT_RT_CNT_INDEX *
+		sizeof(struct ipa_flt_rt_stats);
+	query->stats = (uint64_t)kzalloc(pyld_size, GFP_KERNEL);
+	if (!query->stats) {
+		kfree(query);
+		return -ENOMEM;
+	}
+
+	/* query all together */
+	IPA_UT_INFO("========query all SW counters========\n");
+	query->start_id = IPA_FLT_RT_HW_COUNTER + 1;
+	query->end_id = IPA_MAX_FLT_RT_CNT_INDEX;
+	ipa_get_flt_rt_stats(query);
+	start = 0;
+	for (i = IPA_FLT_RT_HW_COUNTER + 1;
+		i <= IPA_MAX_FLT_RT_CNT_INDEX; i++) {
+		IPA_UT_INFO(
+			"counter %u pkt_cnt %u bytes cnt %llu\n",
+			i, ((struct ipa_flt_rt_stats *)
+			query->stats)[start].num_pkts,
+			((struct ipa_flt_rt_stats *)
+			query->stats)[start].num_bytes);
+		start++;
+	}
+	IPA_UT_INFO("================ done ============\n");
+
+	ret = 0;
+	kfree((void *)(query->stats));
+	kfree(query);
+	return ret;
+}
+
+static int ipa_test_hw_stats_set_sw_stats(void *priv)
+{
+	int ret = 0, i, start = 0;
+	struct ipa_flt_rt_stats stats;
+
+	/* set sw counters */
+	IPA_UT_INFO("========set all SW counters========\n");
+	for (i = IPA_FLT_RT_HW_COUNTER + 1;
+		i <= IPA_MAX_FLT_RT_CNT_INDEX; i++) {
+		stats.num_bytes = start;
+		stats.num_pkts_hash = start + 10;
+		stats.num_pkts = start + 100;
+		IPA_UT_INFO(
+			"set counter %u pkt_cnt %u bytes cnt %llu\n",
+			i, stats.num_pkts, stats.num_bytes);
+		ipa_set_flt_rt_stats(i, stats);
+		start++;
+	}
+	IPA_UT_INFO("================ done ============\n");
+
+	return ret;
 }
 
 /* Suite definition block */
@@ -318,6 +736,26 @@ IPA_UT_DEFINE_SUITE_START(hw_stats, "HW stats test",
 		ipa_test_hw_stats_configure, false, IPA_HW_v4_0, IPA_HW_MAX),
 
 	IPA_UT_ADD_TEST(add_rules, "Add FLT and RT rules",
-		ipa_test_hw_stats_add_FnR, false, IPA_HW_v4_0, IPA_HW_MAX),
+		ipa_test_hw_stats_add_FnR, false, IPA_HW_v4_5, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(query_stats_one_by_one, "Query one by one",
+		ipa_test_hw_stats_query_FnR_one_by_one, false,
+		IPA_HW_v4_5, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(query_stats_one_shot, "Query one shot",
+		ipa_test_hw_stats_query_FnR_one_shot, false,
+		IPA_HW_v4_5, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(query_stats_one_shot_clean, "Query and clean",
+		ipa_test_hw_stats_query_FnR_clean, false,
+		IPA_HW_v4_5, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(query_sw_stats, "Query SW stats",
+		ipa_test_hw_stats_query_sw_stats, false,
+		IPA_HW_v4_5, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(set_sw_stats, "Set SW stats to dummy values",
+		ipa_test_hw_stats_set_sw_stats, false,
+		IPA_HW_v4_5, IPA_HW_MAX),
 
 } IPA_UT_DEFINE_SUITE_END(hw_stats);
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index 5d0d4f2..acfecd1 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -119,9 +119,6 @@ struct geni_se_device {
 	bool vote_for_bw;
 };
 
-/* Offset of QUPV3 Hardware Version Register */
-#define QUPV3_HW_VER (0x4)
-
 #define HW_VER_MAJOR_MASK GENMASK(31, 28)
 #define HW_VER_MAJOR_SHFT 28
 #define HW_VER_MINOR_MASK GENMASK(27, 16)
@@ -338,14 +335,16 @@ static int geni_se_select_fifo_mode(void __iomem *base)
 			M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
 		common_geni_s_irq_en |= S_CMD_DONE_EN;
 	}
+
+	if (proto == I3C)
+		common_geni_m_irq_en |=  (M_GP_SYNC_IRQ_0_EN | M_SEC_IRQ_EN);
+
 	geni_dma_mode &= ~GENI_DMA_MODE_EN;
 
 	geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN);
 	geni_write_reg(common_geni_s_irq_en, base, SE_GENI_S_IRQ_EN);
 	geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
 
-	if (proto == I3C)
-		geni_write_reg(0x3, base, GENI_I3C_IBI_LEGACY);
 	return 0;
 }
 
@@ -1575,6 +1574,8 @@ void geni_se_dump_dbg_regs(struct se_geni_rsc *rsc, void __iomem *base,
 {
 	u32 m_cmd0 = 0;
 	u32 m_irq_status = 0;
+	u32 s_cmd0 = 0;
+	u32 s_irq_status = 0;
 	u32 geni_status = 0;
 	u32 geni_ios = 0;
 	u32 dma_rx_irq = 0;
@@ -1601,10 +1602,12 @@ void geni_se_dump_dbg_regs(struct se_geni_rsc *rsc, void __iomem *base,
 	}
 	m_cmd0 = geni_read_reg(base, SE_GENI_M_CMD0);
 	m_irq_status = geni_read_reg(base, SE_GENI_M_IRQ_STATUS);
+	s_cmd0 = geni_read_reg(base, SE_GENI_S_CMD0);
+	s_irq_status = geni_read_reg(base, SE_GENI_S_IRQ_STATUS);
 	geni_status = geni_read_reg(base, SE_GENI_STATUS);
 	geni_ios = geni_read_reg(base, SE_GENI_IOS);
-	dma_rx_irq = geni_read_reg(base, SE_DMA_TX_IRQ_STAT);
-	dma_tx_irq = geni_read_reg(base, SE_DMA_RX_IRQ_STAT);
+	dma_tx_irq = geni_read_reg(base, SE_DMA_TX_IRQ_STAT);
+	dma_rx_irq = geni_read_reg(base, SE_DMA_RX_IRQ_STAT);
 	rx_fifo_status = geni_read_reg(base, SE_GENI_RX_FIFO_STATUS);
 	tx_fifo_status = geni_read_reg(base, SE_GENI_TX_FIFO_STATUS);
 	se_dma_dbg = geni_read_reg(base, SE_DMA_DEBUG_REG0);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 06978c1..3433986 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -532,7 +532,7 @@ static void dell_rfkill_query(struct rfkill *rfkill, void *data)
 		return;
 	}
 
-	dell_fill_request(&buffer, 0, 0x2, 0, 0);
+	dell_fill_request(&buffer, 0x2, 0, 0, 0);
 	ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
 	hwswitch = buffer.output[1];
 
@@ -563,7 +563,7 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
 		return ret;
 	status = buffer.output[1];
 
-	dell_fill_request(&buffer, 0, 0x2, 0, 0);
+	dell_fill_request(&buffer, 0x2, 0, 0, 0);
 	hwswitch_ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
 	if (hwswitch_ret)
 		return hwswitch_ret;
@@ -648,7 +648,7 @@ static void dell_update_rfkill(struct work_struct *ignored)
 	if (ret != 0)
 		return;
 
-	dell_fill_request(&buffer, 0, 0x2, 0, 0);
+	dell_fill_request(&buffer, 0x2, 0, 0, 0);
 	ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
 
 	if (ret == 0 && (status & BIT(0)))
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index a473dc5..e89ad49 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -60,7 +60,7 @@ static const struct x86_cpu_id int0002_cpu_ids[] = {
 /*
  * Limit ourselves to Cherry Trail for now, until testing shows we
  * need to handle the INT0002 device on Baytrail too.
- *	ICPU(INTEL_FAM6_ATOM_SILVERMONT1),	 * Valleyview, Bay Trail *
+ *	ICPU(INTEL_FAM6_ATOM_SILVERMONT),	 * Valleyview, Bay Trail *
  */
 	ICPU(INTEL_FAM6_ATOM_AIRMONT),		/* Braswell, Cherry Trail */
 	{}
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index d79fbf9..5ad4420 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -125,8 +125,8 @@ static const struct mid_pb_ddata mrfld_ddata = {
 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata }
 
 static const struct x86_cpu_id mid_pb_cpu_ids[] = {
-	ICPU(INTEL_FAM6_ATOM_PENWELL,		mfld_ddata),
-	ICPU(INTEL_FAM6_ATOM_MERRIFIELD,	mrfld_ddata),
+	ICPU(INTEL_FAM6_ATOM_SALTWELL_MID,		mfld_ddata),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID,	mrfld_ddata),
 	{}
 };
 
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index e0dcdb3..088d1c2 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -185,7 +185,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
 	{"CNVI",                BIT(3)},
 	{"UFS0",                BIT(4)},
 	{"EMMC",                BIT(5)},
-	{"Res_6",               BIT(6)},
+	{"SPF",			BIT(6)},
 	{"SBR6",                BIT(7)},
 
 	{"SBR7",                BIT(0)},
@@ -682,7 +682,7 @@ static int __init pmc_core_probe(void)
 	 * Sunrisepoint PCH regmap can't be used. Use Cannonlake PCH regmap
 	 * in this case.
 	 */
-	if (!pci_dev_present(pmc_pci_ids))
+	if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
 		pmcdev->map = &cnp_reg_map;
 
 	if (lpit_read_residency_count_address(&slp_s0_addr))
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index 1423fa8..b998d7d 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -320,7 +320,7 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
 
 static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
 	TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_debugfs_conf),
-	TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, telem_apl_debugfs_conf),
+	TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, telem_apl_debugfs_conf),
 	{}
 };
 
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 2f889d6..fcc6bee 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -192,7 +192,7 @@ static struct telemetry_plt_config telem_glk_config = {
 
 static const struct x86_cpu_id telemetry_cpu_ids[] = {
 	TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_config),
-	TELEM_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, telem_glk_config),
+	TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, telem_glk_config),
 	{}
 };
 
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 8f018b3..c7039f5 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -17,6 +17,7 @@
 
 #include <linux/debugfs.h>
 #include <linux/device.h>
+#include <linux/dmi.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/platform_data/x86/clk-pmc-atom.h>
@@ -391,11 +392,27 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
 }
 #endif /* CONFIG_DEBUG_FS */
 
+/*
+ * Some systems need one or more of their pmc_plt_clks to be
+ * marked as critical.
+ */
+static const struct dmi_system_id critclk_systems[] = {
+	{
+		.ident = "MPL CEC1x",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
+		},
+	},
+	{ /*sentinel*/ }
+};
+
 static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
 			  const struct pmc_data *pmc_data)
 {
 	struct platform_device *clkdev;
 	struct pmc_clk_data *clk_data;
+	const struct dmi_system_id *d = dmi_first_match(critclk_systems);
 
 	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
 	if (!clk_data)
@@ -403,6 +420,10 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
 
 	clk_data->base = pmc_regmap; /* offset is added by client */
 	clk_data->clks = pmc_data->clks;
+	if (d) {
+		clk_data->critical = true;
+		pr_info("%s critclks quirk enabled\n", d->ident);
+	}
 
 	clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom",
 					       PLATFORM_DEVID_NONE,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index b205b03..b50f8f7 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4424,14 +4424,16 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
 			}
 			return AE_OK;
 		}
-	default:
-		dprintk("Resource %d isn't an IRQ nor an IO port\n",
-			resource->type);
 
 	case ACPI_RESOURCE_TYPE_END_TAG:
 		return AE_OK;
+
+	default:
+		dprintk("Resource %d isn't an IRQ nor an IO port\n",
+			resource->type);
+		return AE_CTRL_TERMINATE;
+
 	}
-	return AE_CTRL_TERMINATE;
 }
 
 static int sony_pic_possible_resources(struct acpi_device *device)
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index fde08a9..8f85bb4 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -79,7 +79,7 @@
 #include <linux/jiffies.h>
 #include <linux/workqueue.h>
 #include <linux/acpi.h>
-#include <linux/pci_ids.h>
+#include <linux/pci.h>
 #include <linux/power_supply.h>
 #include <linux/thinkpad_acpi.h>
 #include <sound/core.h>
@@ -4496,6 +4496,74 @@ static void bluetooth_exit(void)
 	bluetooth_shutdown();
 }
 
+static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+	{
+		.ident = "ThinkPad E485",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_BOARD_NAME, "20KU"),
+		},
+	},
+	{
+		.ident = "ThinkPad E585",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_BOARD_NAME, "20KV"),
+		},
+	},
+	{
+		.ident = "ThinkPad A285 - 20MW",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_BOARD_NAME, "20MW"),
+		},
+	},
+	{
+		.ident = "ThinkPad A285 - 20MX",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_BOARD_NAME, "20MX"),
+		},
+	},
+	{
+		.ident = "ThinkPad A485 - 20MU",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_BOARD_NAME, "20MU"),
+		},
+	},
+	{
+		.ident = "ThinkPad A485 - 20MV",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_BOARD_NAME, "20MV"),
+		},
+	},
+	{}
+};
+
+static const struct pci_device_id fwbug_cards_ids[] __initconst = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24F3) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24FD) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2526) },
+	{}
+};
+
+
+static int __init have_bt_fwbug(void)
+{
+	/*
+	 * Some AMD based ThinkPads have a firmware bug that calling
+	 * "GBDC" will cause bluetooth on Intel wireless cards blocked
+	 */
+	if (dmi_check_system(bt_fwbug_list) && pci_dev_present(fwbug_cards_ids)) {
+		vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+			FW_BUG "disable bluetooth subdriver for Intel cards\n");
+		return 1;
+	} else
+		return 0;
+}
+
 static int __init bluetooth_init(struct ibm_init_struct *iibm)
 {
 	int res;
@@ -4508,7 +4576,7 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
 
 	/* bluetooth not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
 	   G4x, R30, R31, R40e, R50e, T20-22, X20-21 */
-	tp_features.bluetooth = hkey_handle &&
+	tp_features.bluetooth = !have_bt_fwbug() && hkey_handle &&
 	    acpi_evalf(hkey_handle, &status, "GBDC", "qd");
 
 	vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 735658e..c60659f 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -832,6 +832,10 @@ static int axp288_charger_probe(struct platform_device *pdev)
 	/* Register charger interrupts */
 	for (i = 0; i < CHRG_INTR_END; i++) {
 		pirq = platform_get_irq(info->pdev, i);
+		if (pirq < 0) {
+			dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq);
+			return pirq;
+		}
 		info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
 		if (info->irq[i] < 0) {
 			dev_warn(&info->pdev->dev,
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 084c8ba..ab0b6e7 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -696,6 +696,26 @@ static void fuel_gauge_init_irq(struct axp288_fg_info *info)
  */
 static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
 	{
+		/* ACEPC T8 Cherry Trail Z8350 mini PC */
+		.matches = {
+			DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+			DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
+			/* also match on somewhat unique bios-version */
+			DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
+		},
+	},
+	{
+		/* ACEPC T11 Cherry Trail Z8350 mini PC */
+		.matches = {
+			DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+			DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
+			/* also match on somewhat unique bios-version */
+			DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
+		},
+	},
+	{
 		/* Intel Cherry Trail Compute Stick, Windows version */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index 98ba078..3bae023 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -221,6 +221,9 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
 	int avg_current;
 	u32 cc_lsb;
 
+	if (!divider)
+		return 0;
+
 	sample &= 0xffffff;		/* 24-bits, unsigned */
 	offset &= 0x7ff;		/* 10-bits, signed */
 
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 9bb0cb4..2ee04e0c 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -456,6 +456,11 @@ static struct device_attribute power_supply_attrs[] = {
 	POWER_SUPPLY_ATTR(therm_icl_limit),
 	POWER_SUPPLY_ATTR(dc_reset),
 	POWER_SUPPLY_ATTR(voltage_max_limit),
+	POWER_SUPPLY_ATTR(real_capacity),
+	POWER_SUPPLY_ATTR(force_main_icl),
+	POWER_SUPPLY_ATTR(force_main_fcc),
+	POWER_SUPPLY_ATTR(comp_clamp_level),
+	POWER_SUPPLY_ATTR(adapter_cc_mode),
 	/* Charge pump properties */
 	POWER_SUPPLY_ATTR(cp_status1),
 	POWER_SUPPLY_ATTR(cp_status2),
@@ -553,15 +558,11 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
 	char *prop_buf;
 	char *attrname;
 
-	dev_dbg(dev, "uevent\n");
-
 	if (!psy || !psy->desc) {
 		dev_dbg(dev, "No power supply yet\n");
 		return ret;
 	}
 
-	dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name);
-
 	ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name);
 	if (ret)
 		return ret;
@@ -597,8 +598,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
 			goto out;
 		}
 
-		dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
-
 		ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
 		kfree(attrname);
 		if (ret)
diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig
index 51b5238..eed473e 100644
--- a/drivers/power/supply/qcom/Kconfig
+++ b/drivers/power/supply/qcom/Kconfig
@@ -65,4 +65,16 @@
 	  to determine the battery state-of-charge (SOC) and supports other
 	  battery management features.
 
+config HL6111R
+	bool "HL6111R driver"
+	depends on I2C && OF
+	select REGMAP_I2C
+	help
+	  Say Y here to enable the Halo Microelectronics HL6111R driver.
+	  HL6111R is a wireless charging power receiver IC that supports the
+	  A4WP wireless charging power delivery standard. It supports up to
+	  15W, and the output voltage can be programmed with variable step
+	  sizes. The HL6111R has voltage, current and temperature
+	  protection mechanisms, an I2C interface, and a PSNS output.
+
 endmenu
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
index 9f01da2..4132b7b 100644
--- a/drivers/power/supply/qcom/Makefile
+++ b/drivers/power/supply/qcom/Makefile
@@ -6,3 +6,4 @@
 obj-$(CONFIG_QPNP_QNOVO5)		+= qpnp-qnovo5.o battery.o pmic-voter.o
 obj-$(CONFIG_QPNP_FG_GEN4)		+= qpnp-fg-gen4.o fg-memif.o fg-util.o fg-alg.o pmic-voter.o
 obj-$(CONFIG_QPNP_QG)			+= qpnp-qg.o pmic-voter.o qg-util.o qg-soc.o qg-sdam.o qg-battery-profile.o qg-profile-lib.o fg-alg.o
+obj-$(CONFIG_HL6111R)			+= hl6111r.o
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 2331403..47f60f4 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -42,6 +42,7 @@
 #define ICL_LIMIT_VOTER			"ICL_LIMIT_VOTER"
 #define FCC_STEPPER_VOTER		"FCC_STEPPER_VOTER"
 #define FCC_VOTER			"FCC_VOTER"
+#define MAIN_FCC_VOTER			"MAIN_FCC_VOTER"
 
 struct pl_data {
 	int			pl_mode;
@@ -60,6 +61,8 @@ struct pl_data {
 	struct votable		*usb_icl_votable;
 	struct votable		*pl_enable_votable_indirect;
 	struct votable		*cp_ilim_votable;
+	struct votable		*cp_disable_votable;
+	struct votable		*fcc_main_votable;
 	struct delayed_work	status_change_work;
 	struct work_struct	pl_disable_forever_work;
 	struct work_struct	pl_taper_work;
@@ -128,6 +131,37 @@ enum {
 	RESTRICT_CHG_CURRENT,
 	FCC_STEPPING_IN_PROGRESS,
 };
+/*********
+ * HELPER*
+ *********/
+static bool is_cp_available(struct pl_data *chip)
+{
+	if (!chip->cp_master_psy)
+		chip->cp_master_psy =
+			power_supply_get_by_name("charge_pump_master");
+
+	return !!chip->cp_master_psy;
+}
+
+static bool cp_ilim_boost_enabled(struct pl_data *chip)
+{
+	union power_supply_propval pval = {-1, };
+
+	if (is_cp_available(chip))
+		power_supply_get_property(chip->cp_master_psy,
+				POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE, &pval);
+
+	return pval.intval == POWER_SUPPLY_PL_OUTPUT_VPH;
+}
+
+static void cp_configure_ilim(struct pl_data *chip, const char *voter, int ilim)
+{
+	if (!chip->cp_ilim_votable)
+		chip->cp_ilim_votable = find_votable("CP_ILIM");
+
+	if (!cp_ilim_boost_enabled(chip) && chip->cp_ilim_votable)
+		vote(chip->cp_ilim_votable, voter, true, ilim);
+}
 
 /*******
  * ICL *
@@ -489,10 +523,7 @@ static void get_main_fcc_config(struct pl_data *chip, int *total_fcc)
 	union power_supply_propval pval = {0, };
 	int rc;
 
-	if (!chip->cp_master_psy)
-		chip->cp_master_psy =
-			power_supply_get_by_name("charge_pump_master");
-	if (!chip->cp_master_psy)
+	if (!is_cp_available(chip))
 		goto out;
 
 	rc = power_supply_get_property(chip->cp_master_psy,
@@ -527,36 +558,26 @@ static void get_main_fcc_config(struct pl_data *chip, int *total_fcc)
 static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua,
 			int parallel_fcc_ua)
 {
-	union power_supply_propval pval = {0, };
-	int rc;
-
 	if (!chip->fcc_step_size_ua) {
 		pr_err("Invalid fcc stepper step size, value 0\n");
 		return;
 	}
 
 	/* Read current FCC of main charger */
-	rc = power_supply_get_property(chip->main_psy,
-		POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
-	if (rc < 0) {
-		pr_err("Couldn't get main charger current fcc, rc=%d\n", rc);
-		return;
-	}
-	chip->main_fcc_ua = pval.intval;
-
-	chip->main_step_fcc_dir = (main_fcc_ua > pval.intval) ?
+	chip->main_fcc_ua = get_effective_result(chip->fcc_main_votable);
+	chip->main_step_fcc_dir = (main_fcc_ua > chip->main_fcc_ua) ?
 				STEP_UP : STEP_DOWN;
-	chip->main_step_fcc_count = abs((main_fcc_ua - pval.intval) /
+	chip->main_step_fcc_count = abs((main_fcc_ua - chip->main_fcc_ua) /
 				chip->fcc_step_size_ua);
-	chip->main_step_fcc_residual = (main_fcc_ua - pval.intval) %
-				chip->fcc_step_size_ua;
+	chip->main_step_fcc_residual = abs((main_fcc_ua - chip->main_fcc_ua) %
+				chip->fcc_step_size_ua);
 
 	chip->parallel_step_fcc_dir = (parallel_fcc_ua > chip->slave_fcc_ua) ?
 				STEP_UP : STEP_DOWN;
 	chip->parallel_step_fcc_count = abs((parallel_fcc_ua -
 				chip->slave_fcc_ua) / chip->fcc_step_size_ua);
-	chip->parallel_step_fcc_residual = (parallel_fcc_ua -
-				chip->slave_fcc_ua) % chip->fcc_step_size_ua;
+	chip->parallel_step_fcc_residual = abs((parallel_fcc_ua -
+				chip->slave_fcc_ua) % chip->fcc_step_size_ua);
 
 	if (chip->parallel_step_fcc_count || chip->parallel_step_fcc_residual
 		|| chip->main_step_fcc_count || chip->main_step_fcc_residual)
@@ -663,11 +684,37 @@ static void pl_taper_work(struct work_struct *work)
 	vote(chip->pl_awake_votable, TAPER_END_VOTER, false, 0);
 }
 
+static bool is_main_available(struct pl_data *chip)
+{
+	if (chip->main_psy)
+		return true;
+
+	chip->main_psy = power_supply_get_by_name("main");
+
+	return !!chip->main_psy;
+}
+
+static int pl_fcc_main_vote_callback(struct votable *votable, void *data,
+			int fcc_main_ua, const char *client)
+{
+	struct pl_data *chip = data;
+	union power_supply_propval pval = {0,};
+
+	if (!is_main_available(chip))
+		return 0;
+
+	pval.intval = fcc_main_ua;
+	return  power_supply_set_property(chip->main_psy,
+			  POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+			  &pval);
+}
+
 static int pl_fcc_vote_callback(struct votable *votable, void *data,
 			int total_fcc_ua, const char *client)
 {
 	struct pl_data *chip = data;
 	int master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
+	union power_supply_propval pval = {0, };
 
 	if (total_fcc_ua < 0)
 		return 0;
@@ -675,8 +722,26 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
 	if (!chip->main_psy)
 		return 0;
 
-	if (!chip->cp_ilim_votable)
-		chip->cp_ilim_votable = find_votable("CP_ILIM");
+	if (!chip->cp_disable_votable)
+		chip->cp_disable_votable = find_votable("CP_DISABLE");
+
+	if (chip->cp_disable_votable) {
+		if (cp_ilim_boost_enabled(chip)) {
+			power_supply_get_property(chip->cp_master_psy,
+					POWER_SUPPLY_PROP_MIN_ICL, &pval);
+			/*
+			 * With ILIM boost feature ILIM configuration is
+			 * independent of battery FCC, disable CP if FCC/2
+			 * falls below MIN_ICL supported by CP.
+			 */
+			if ((total_fcc_ua / 2) < pval.intval)
+				vote(chip->cp_disable_votable, FCC_VOTER,
+						true, 0);
+			else
+				vote(chip->cp_disable_votable, FCC_VOTER,
+						false, 0);
+		}
+	}
 
 	if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
 		get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
@@ -692,6 +757,13 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
 	}
 
 	rerun_election(chip->pl_disable_votable);
+	/* When FCC changes, trigger psy changed event for CC mode */
+	if (!chip->cp_master_psy)
+		chip->cp_master_psy =
+			power_supply_get_by_name("charge_pump_master");
+
+	if (chip->cp_master_psy)
+		power_supply_changed(chip->cp_master_psy);
 
 	return 0;
 }
@@ -748,14 +820,7 @@ static void fcc_stepper_work(struct work_struct *work)
 		}
 
 		main_fcc = get_effective_result_locked(chip->fcc_votable);
-		pval.intval = main_fcc;
-		rc = power_supply_set_property(chip->main_psy,
-			POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
-		if (rc < 0) {
-			pr_err("Couldn't set main charger fcc, rc=%d\n", rc);
-			goto out;
-		}
-
+		vote(chip->fcc_main_votable, FCC_STEPPER_VOTER, true, main_fcc);
 		goto stepper_exit;
 	}
 
@@ -816,22 +881,10 @@ static void fcc_stepper_work(struct work_struct *work)
 		}
 
 		/* Set main FCC */
-		pval.intval = main_fcc;
-		rc = power_supply_set_property(chip->main_psy,
-			POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
-		if (rc < 0) {
-			pr_err("Couldn't set main charger fcc, rc=%d\n", rc);
-			goto out;
-		}
+		vote(chip->fcc_main_votable, FCC_STEPPER_VOTER, true, main_fcc);
 	} else {
 		/* Set main FCC */
-		pval.intval = main_fcc;
-		rc = power_supply_set_property(chip->main_psy,
-			POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
-		if (rc < 0) {
-			pr_err("Couldn't set main charger fcc, rc=%d\n", rc);
-			goto out;
-		}
+		vote(chip->fcc_main_votable, FCC_STEPPER_VOTER, true, main_fcc);
 
 		/* Set parallel FCC */
 		if (chip->pl_psy) {
@@ -874,9 +927,7 @@ static void fcc_stepper_work(struct work_struct *work)
 	chip->main_fcc_ua = main_fcc;
 	chip->slave_fcc_ua = parallel_fcc;
 
-	if (chip->cp_ilim_votable)
-		vote(chip->cp_ilim_votable, FCC_VOTER, true,
-					chip->main_fcc_ua / 2);
+	cp_configure_ilim(chip, FCC_VOTER, chip->main_fcc_ua / 2);
 
 	if (reschedule_ms) {
 		schedule_delayed_work(&chip->fcc_stepper_work,
@@ -993,8 +1044,7 @@ static int usb_icl_vote_callback(struct votable *votable, void *data,
 
 	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0);
 
-	if (chip->cp_ilim_votable)
-		vote(chip->cp_ilim_votable, ICL_CHANGE_VOTER, true, icl_ua);
+	cp_configure_ilim(chip, ICL_CHANGE_VOTER, icl_ua);
 
 	return 0;
 }
@@ -1020,16 +1070,6 @@ static void pl_awake_work(struct work_struct *work)
 	vote(chip->pl_awake_votable, PL_VOTER, false, 0);
 }
 
-static bool is_main_available(struct pl_data *chip)
-{
-	if (chip->main_psy)
-		return true;
-
-	chip->main_psy = power_supply_get_by_name("main");
-
-	return !!chip->main_psy;
-}
-
 static bool is_batt_available(struct pl_data *chip)
 {
 	if (!chip->batt_psy)
@@ -1141,16 +1181,8 @@ static int pl_disable_vote_callback(struct votable *votable,
 			 *	Set slave ICL then main FCC.
 			 */
 			if (slave_fcc_ua > chip->slave_fcc_ua) {
-				pval.intval = master_fcc_ua;
-				rc = power_supply_set_property(chip->main_psy,
-				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
-					&pval);
-				if (rc < 0) {
-					pr_err("Could not set main fcc, rc=%d\n",
-						rc);
-					return rc;
-				}
-
+				vote(chip->fcc_main_votable, MAIN_FCC_VOTER,
+							true, master_fcc_ua);
 				pval.intval = slave_fcc_ua;
 				rc = power_supply_set_property(chip->pl_psy,
 				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
@@ -1174,16 +1206,8 @@ static int pl_disable_vote_callback(struct votable *votable,
 				}
 
 				chip->slave_fcc_ua = slave_fcc_ua;
-
-				pval.intval = master_fcc_ua;
-				rc = power_supply_set_property(chip->main_psy,
-				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
-					&pval);
-				if (rc < 0) {
-					pr_err("Could not set main fcc, rc=%d\n",
-						rc);
-					return rc;
-				}
+				vote(chip->fcc_main_votable, MAIN_FCC_VOTER,
+							true, master_fcc_ua);
 			}
 
 			/*
@@ -1245,18 +1269,9 @@ static int pl_disable_vote_callback(struct votable *votable,
 			}
 
 			/* main psy gets all share */
-			pval.intval = total_fcc_ua;
-			rc = power_supply_set_property(chip->main_psy,
-				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
-				&pval);
-			if (rc < 0) {
-				pr_err("Could not set main fcc, rc=%d\n", rc);
-				return rc;
-			}
-
-			if (chip->cp_ilim_votable)
-				vote(chip->cp_ilim_votable, FCC_VOTER, true,
-						total_fcc_ua / 2);
+			vote(chip->fcc_main_votable, MAIN_FCC_VOTER, true,
+								total_fcc_ua);
+			cp_configure_ilim(chip, FCC_VOTER, total_fcc_ua / 2);
 
 			/* reset parallel FCC */
 			chip->slave_fcc_ua = 0;
@@ -1648,6 +1663,9 @@ static void pl_config_init(struct pl_data *chip, int smb_version)
 		break;
 	case PMI632_SUBTYPE:
 		break;
+	case PM7250B_SUBTYPE:
+		chip->fcc_step_delay_ms = 100;
+		break;
 	default:
 		break;
 	}
@@ -1697,13 +1715,22 @@ int qcom_batt_init(int smb_version)
 	if (!chip->pl_ws)
 		goto cleanup;
 
+	chip->fcc_main_votable = create_votable("FCC_MAIN", VOTE_MIN,
+					pl_fcc_main_vote_callback,
+					chip);
+	if (IS_ERR(chip->fcc_main_votable)) {
+		rc = PTR_ERR(chip->fcc_main_votable);
+		chip->fcc_main_votable = NULL;
+		goto release_wakeup_source;
+	}
+
 	chip->fcc_votable = create_votable("FCC", VOTE_MIN,
 					pl_fcc_vote_callback,
 					chip);
 	if (IS_ERR(chip->fcc_votable)) {
 		rc = PTR_ERR(chip->fcc_votable);
 		chip->fcc_votable = NULL;
-		goto release_wakeup_source;
+		goto destroy_votable;
 	}
 
 	chip->fv_votable = create_votable("FV", VOTE_MIN,
@@ -1799,6 +1826,7 @@ int qcom_batt_init(int smb_version)
 	destroy_votable(chip->pl_disable_votable);
 	destroy_votable(chip->fv_votable);
 	destroy_votable(chip->fcc_votable);
+	destroy_votable(chip->fcc_main_votable);
 	destroy_votable(chip->usb_icl_votable);
 release_wakeup_source:
 	wakeup_source_unregister(chip->pl_ws);
@@ -1826,6 +1854,7 @@ void qcom_batt_deinit(void)
 	destroy_votable(chip->pl_disable_votable);
 	destroy_votable(chip->fv_votable);
 	destroy_votable(chip->fcc_votable);
+	destroy_votable(chip->fcc_main_votable);
 	wakeup_source_unregister(chip->pl_ws);
 	the_chip = NULL;
 	kfree(chip);
diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c
index 1a6b8db..77fc187 100644
--- a/drivers/power/supply/qcom/fg-alg.c
+++ b/drivers/power/supply/qcom/fg-alg.c
@@ -982,7 +982,10 @@ static int get_time_to_full_locked(struct ttf *ttf, int *val)
 
 	/* at least 10 samples are required to produce a stable IBATT */
 	if (ttf->ibatt.size < MAX_TTF_SAMPLES) {
-		*val = -1;
+		if (ttf->clear_ibatt)
+			*val = ttf->last_ttf;
+		else
+			*val = -1;
 		return 0;
 	}
 
@@ -998,6 +1001,7 @@ static int get_time_to_full_locked(struct ttf *ttf, int *val)
 		return rc;
 	}
 
+	ttf->clear_ibatt = false;
 	ibatt_avg = -ibatt_avg / MILLI_UNIT;
 	vbatt_avg /= MILLI_UNIT;
 
@@ -1113,7 +1117,7 @@ static int get_time_to_full_locked(struct ttf *ttf, int *val)
 
 			/* Calculate OCV for each window */
 			if (power_approx) {
-				i_step = pbatt_avg / max((u32)MILLI_UNIT,
+				i_step = pbatt_avg / max(MILLI_UNIT,
 					(step_chg_cfg[i].high_threshold /
 						MILLI_UNIT));
 			} else {
@@ -1303,6 +1307,7 @@ static void ttf_work(struct work_struct *work)
 			pr_debug("Clear Ibatt buffer, Ibatt_avg=%d Ibatt_now=%d\n",
 					ibatt_avg, ibatt_now);
 			ttf_circ_buf_clr(&ttf->ibatt);
+			ttf->clear_ibatt = true;
 		}
 
 		rc = get_time_to_full_locked(ttf, &ttf_now);
@@ -1312,7 +1317,7 @@ static void ttf_work(struct work_struct *work)
 		}
 
 		/* keep the wake lock and prime the IBATT and VBATT buffers */
-		if (ttf_now < 0) {
+		if (ttf_now < 0 || ttf->clear_ibatt) {
 			/* delay for one FG cycle */
 			schedule_delayed_work(&ttf->ttf_work,
 					msecs_to_jiffies(1000));
diff --git a/drivers/power/supply/qcom/fg-alg.h b/drivers/power/supply/qcom/fg-alg.h
index ba7ebd5..43ccdea 100644
--- a/drivers/power/supply/qcom/fg-alg.h
+++ b/drivers/power/supply/qcom/fg-alg.h
@@ -117,6 +117,7 @@ struct ttf {
 	struct range_data	*step_chg_cfg;
 	bool			step_chg_cfg_valid;
 	bool			ocv_step_chg_cfg_valid;
+	bool			clear_ibatt;
 	int			step_chg_num_params;
 	int			mode;
 	int			last_ttf;
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index c771f90..12cc4f6 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -202,9 +202,13 @@ enum fg_sram_param_id {
 	FG_SRAM_KI_COEFF_LOW_DISCHG,
 	FG_SRAM_KI_COEFF_MED_DISCHG,
 	FG_SRAM_KI_COEFF_HI_DISCHG,
+	FG_SRAM_KI_COEFF_LO_MED_DCHG_THR,
+	FG_SRAM_KI_COEFF_MED_HI_DCHG_THR,
 	FG_SRAM_KI_COEFF_LOW_CHG,
 	FG_SRAM_KI_COEFF_MED_CHG,
 	FG_SRAM_KI_COEFF_HI_CHG,
+	FG_SRAM_KI_COEFF_LO_MED_CHG_THR,
+	FG_SRAM_KI_COEFF_MED_HI_CHG_THR,
 	FG_SRAM_KI_COEFF_FULL_SOC,
 	FG_SRAM_KI_COEFF_CUTOFF,
 	FG_SRAM_ESR_TIGHT_FILTER,
diff --git a/drivers/power/supply/qcom/hl6111r.c b/drivers/power/supply/qcom/hl6111r.c
new file mode 100644
index 0000000..6f0d297
--- /dev/null
+++ b/drivers/power/supply/qcom/hl6111r.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"HL6111R: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/debugfs.h>
+#include "hl6111r.h"
+
+static const struct regmap_config chip_regmap = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = 0xFF,
+};
+
+struct hl6111r {
+	struct device		*dev;
+	struct regmap		*regmap;
+	struct power_supply	*psy;
+	struct dentry		*dfs_root;
+};
+
+struct vout_range {
+	int min_mv;
+	int step_mv;
+};
+
+struct cc_tuple {
+	int raw;
+	int val_ma;
+};
+
+struct cc_range {
+	struct cc_tuple min;
+	struct cc_tuple max;
+	int step_ma;
+};
+
+/* Utility functions */
+
+static int hl6111r_read(struct hl6111r *chip, u8 addr, u8 *val)
+{
+	int rc = 0;
+	unsigned int value = 0;
+
+	rc = regmap_read(chip->regmap, addr, &value);
+	if (rc < 0)
+		return rc;
+
+	*val = (u8)value;
+
+	pr_debug("read 0x%02x: 0x%02x\n", addr, *val);
+	return rc;
+}
+
+static int hl6111r_write(struct hl6111r *chip, u8 addr, u8 val)
+{
+	int rc;
+
+	rc = regmap_write(chip->regmap, addr, val);
+	if (rc < 0)
+		return rc;
+
+	pr_debug("write 0x%02x: 0x%02x\n", addr, val);
+	return rc;
+}
+
+static int hl6111r_masked_write(struct hl6111r *chip, u8 addr, u8 mask, u8 val)
+{
+	pr_debug("mask %02x write 0x%02x: 0x%02x\n", mask, addr, (val & mask));
+	return regmap_update_bits(chip->regmap, addr, mask, val);
+}
+
+static int is_dc_online(bool *online)
+{
+	int rc;
+	struct power_supply *dc_psy;
+	union power_supply_propval pval;
+
+	dc_psy = power_supply_get_by_name("dc");
+	if (!dc_psy) {
+		pr_err_ratelimited("DC psy unavailable\n");
+		return -ENODEV;
+	}
+
+	rc = power_supply_get_property(dc_psy, POWER_SUPPLY_PROP_ONLINE,
+			&pval);
+	pr_debug("%s\n", (pval.intval ? "yes" : "no"));
+	if (rc < 0)
+		return rc;
+
+	*online = pval.intval;
+
+	return 0;
+}
+
+/* Callbacks for gettable properties */
+
+static int hl6111r_get_online(struct hl6111r *chip, int *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = hl6111r_read(chip, LATCHED_STATUS_REG, &stat);
+	if (rc < 0)
+		return rc;
+
+	*val = stat & OUT_EN_L_BIT;
+
+	return rc;
+}
+
+static int hl6111r_get_voltage_now(struct hl6111r *chip, int *val)
+{
+	int rc;
+	u8 raw = 0;
+
+	rc = hl6111r_read(chip, VOUT_NOW_REG, &raw);
+	if (rc < 0)
+		return rc;
+
+	*val = raw * VOUT_STEP_UV;
+
+	pr_debug("raw = 0x%02x, scaled = %d mV\n", raw, (*val / 1000));
+
+	return rc;
+}
+
+static int hl6111r_get_current_now(struct hl6111r *chip, int *val)
+{
+	int rc;
+	u8 raw = 0;
+
+	rc = hl6111r_read(chip, IOUT_NOW_REG, &raw);
+	if (rc < 0)
+		return rc;
+
+	*val = raw * IOUT_NOW_STEP_UA;
+
+	pr_debug("raw = 0x%02x, scaled = %d mA\n", raw, (*val / 1000));
+	return rc;
+}
+
+static int hl6111r_get_voltage_avg(struct hl6111r *chip, int *val)
+{
+	int rc;
+	u8 raw = 0;
+
+	rc = hl6111r_read(chip, VOUT_AVG_REG, &raw);
+	if (rc < 0)
+		return rc;
+
+	*val = raw * VOUT_STEP_UV;
+
+	pr_debug("raw = 0x%02x, scaled = %d mV\n", raw, (*val / 1000));
+	return rc;
+}
+
+static int hl6111r_get_current_avg(struct hl6111r *chip, int *val)
+{
+	int rc;
+	u8 raw = 0;
+
+	rc = hl6111r_read(chip, IOUT_AVG_REG, &raw);
+	if (rc < 0)
+		return rc;
+
+	*val = raw * IOUT_AVG_STEP_UA;
+
+	pr_debug("raw = 0x%02x, scaled = %d mA\n", raw, (*val / 1000));
+	return rc;
+}
+
+#define IOUT_MIN_100_MA		100
+#define IOUT_MAX_2200_MA	2200
+#define IOUT_NO_LIMIT_RAW	0x1F
+#define IOUT_NO_LIMIT_VAL	0
+static const struct cc_range hl6111r_cc_range[] = {
+	{
+		.min = {0, IOUT_MIN_100_MA},
+		.max = {0x12, 1000},
+		.step_ma = 50,
+	},
+	{
+		.min = {0x13, 1100},
+		.max = {0x1E, IOUT_MAX_2200_MA},
+		.step_ma = 100,
+	},
+	{
+		/* IOUT_NO_LIMIT */
+		.min = {IOUT_NO_LIMIT_RAW, IOUT_NO_LIMIT_VAL},
+		.max = {IOUT_NO_LIMIT_RAW, IOUT_NO_LIMIT_VAL},
+		.step_ma = INT_MAX,
+	}
+};
+
+static int hl6111r_get_cc_current(struct hl6111r *chip, int *val)
+{
+	int rc, scaled_ma = 0, range = 0, step = 0;
+	u8 raw = 0;
+	const struct cc_range *r;
+
+	rc = hl6111r_read(chip, IOUT_LIM_SEL_REG, &raw);
+	if (rc < 0)
+		return rc;
+
+	raw >>= IOUT_LIM_SHIFT;
+
+	if (raw == IOUT_NO_LIMIT_RAW) {
+		/* IOUT_NO_LIMIT */
+		*val = IOUT_NO_LIMIT_VAL;
+		return 0;
+	}
+
+	range = raw / hl6111r_cc_range[1].min.raw;
+	step = raw % hl6111r_cc_range[1].min.raw;
+
+	if (range >= ARRAY_SIZE(hl6111r_cc_range))
+		range = ARRAY_SIZE(hl6111r_cc_range) - 1;
+
+	r = &hl6111r_cc_range[range];
+
+	/* Determine constant current output from range */
+	scaled_ma = r->min.val_ma + (step * r->step_ma);
+
+	/* Return value in uA */
+	*val = scaled_ma * 1000;
+
+	pr_debug("raw = 0x%02x, scaled = %d mA\n", raw, scaled_ma);
+	return rc;
+}
+
+static int hl6111r_get_temp(struct hl6111r *chip, int *val)
+{
+	int rc;
+	u8 raw = 0;
+
+	rc = hl6111r_read(chip, DIE_TEMP_REG, &raw);
+	if (rc < 0)
+		return rc;
+
+	*val = 10 * DIE_TEMP_SCALED_DEG_C(raw);
+
+	pr_debug("raw = 0x%02x, scaled = %d deg C\n", raw, (*val / 10));
+	return rc;
+}
+
+static const struct vout_range hl6111r_vout_range[] = {
+	/* {Range's min value (mV), Range's step size (mV) */
+	{4940, 20},
+	{7410, 30},
+	{9880, 40},
+	{3952, 16}
+};
+
+static int hl6111r_get_vout_target(struct hl6111r *chip, int *val)
+{
+	int rc, vout_target_mv = 0;
+	u8 raw = 0, range, vout_target_raw;
+	bool dc_online = false;
+	const struct vout_range *r;
+
+	*val = 0;
+
+	rc = is_dc_online(&dc_online);
+	if (rc < 0)
+		return rc;
+	if (!dc_online)
+		return 0;
+
+	/* Read range selector register to determine range */
+	rc = hl6111r_read(chip, VOUT_RANGE_SEL_REG, &raw);
+	if (rc < 0)
+		return rc;
+
+	range = (raw & VOUT_RANGE_SEL_MASK) >> VOUT_RANGE_SEL_SHIFT;
+	r = &hl6111r_vout_range[range];
+
+	/* Use range information to calculate voltage */
+	rc = hl6111r_read(chip, VOUT_TARGET_REG, &vout_target_raw);
+	if (rc < 0)
+		return rc;
+
+	vout_target_mv = r->min_mv + (r->step_mv * vout_target_raw);
+
+	*val = (vout_target_mv * 1000);
+
+	return rc;
+}
+
+/* Callbacks for settable properties */
+
+#define HL6111R_MIN_VOLTAGE_UV	4940000
+#define HL6111R_MAX_VOLTAGE_UV	20080000
+static int hl6111r_set_vout_target(struct hl6111r *chip, const int val)
+{
+	int rc, vout_target_uv;
+	u8 vout_target_raw;
+	const struct vout_range *r;
+
+	vout_target_uv = val;
+
+	if (val < HL6111R_MIN_VOLTAGE_UV || val > HL6111R_MAX_VOLTAGE_UV)
+		return -EINVAL;
+
+	/*
+	 * Next, write to range selector register to set the range.
+	 * Select only range 0 for now.
+	 *	Range 0: V_out = [4.94 V, 10.04V] in steps of 20mV
+	 */
+	rc = hl6111r_write(chip, VOUT_RANGE_SEL_REG, 0);
+	if (rc < 0)
+		return rc;
+
+	r = &hl6111r_vout_range[0];
+
+	vout_target_raw = ((vout_target_uv / 1000) - r->min_mv) / r->step_mv;
+
+	pr_debug("set = %d, raw = 0x%02x\n", vout_target_uv, vout_target_raw);
+	rc = hl6111r_write(chip, VOUT_TARGET_REG, vout_target_raw);
+
+	return rc;
+}
+
+static int hl6111r_set_cc_current(struct hl6111r *chip, const int val)
+{
+	u8 raw;
+	int rc, tmp_ma, range = 0;
+	const struct cc_range *r;
+	const int max_cc_ranges = ARRAY_SIZE(hl6111r_cc_range) - 1;
+
+	/* Minimum settable cc current = 100 mA */
+	tmp_ma = max(IOUT_MIN_100_MA, (val / 1000));
+
+	/*
+	 * Special case:
+	 *	if tmp_ma is 2200, range will be incorrectly set to 2 according
+	 *	to the range calculation. Correct range to 1 in this case.
+	 */
+	if (tmp_ma == IOUT_MAX_2200_MA)
+		range = 1;
+	else
+		/* Limit max range to 2 */
+		range = min(max_cc_ranges,
+				(tmp_ma / hl6111r_cc_range[1].min.val_ma));
+
+	r = &hl6111r_cc_range[range];
+
+	if (range == (ARRAY_SIZE(hl6111r_cc_range) - 1)) {
+		/* IOUT_NO_LIMIT */
+		raw = IOUT_NO_LIMIT_RAW;
+	} else {
+		raw = r->min.raw +
+			(((tmp_ma - r->min.val_ma) * (r->max.raw - r->min.raw))
+				/ (r->max.val_ma - r->min.val_ma));
+	}
+
+	pr_debug("cc_current = %d mA, unmasked raw = 0x%02x\n", tmp_ma, raw);
+
+	rc = hl6111r_masked_write(chip, IOUT_LIM_SEL_REG, IOUT_LIM_SEL_MASK,
+			(raw << IOUT_LIM_SHIFT));
+
+	return rc;
+}
+
+static enum power_supply_property hl6111r_psy_props[] = {
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_CURRENT_AVG,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+	POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
+};
+
+static int hl6111r_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *pval)
+{
+	int rc, *val = &pval->intval;
+	struct hl6111r *chip = power_supply_get_drvdata(psy);
+	bool dc_online = false;
+
+	/* Check if DC PSY is online first */
+	rc = is_dc_online(&dc_online);
+	if (!dc_online || rc < 0) {
+		*val = 0;
+		return 0;
+	}
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_ONLINE:
+		rc = hl6111r_get_online(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		rc = hl6111r_get_voltage_now(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		rc = hl6111r_get_current_now(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		rc = hl6111r_get_temp(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+		rc = hl6111r_get_voltage_avg(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_AVG:
+		rc = hl6111r_get_current_avg(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		*val = HL6111R_MAX_VOLTAGE_UV;
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+		rc = hl6111r_get_cc_current(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
+		rc = hl6111r_get_vout_target(chip, val);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc < 0) {
+		pr_err_ratelimited("property %d unavailable: %d\n", psp, rc);
+		return -ENODATA;
+	}
+
+	return rc;
+}
+
+static int hl6111r_prop_is_writeable(struct power_supply *psy,
+		enum power_supply_property psp)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int hl6111r_set_prop(struct power_supply *psy,
+		enum power_supply_property prop,
+		const union power_supply_propval *pval)
+{
+	int rc;
+	const int *val = &pval->intval;
+	struct hl6111r *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
+		rc = hl6111r_set_vout_target(chip, *val);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+		rc = hl6111r_set_cc_current(chip, *val);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static const struct power_supply_desc hl6111r_psy_desc = {
+	.name = "wireless",
+	.type = POWER_SUPPLY_TYPE_WIRELESS,
+	.properties = hl6111r_psy_props,
+	.num_properties = ARRAY_SIZE(hl6111r_psy_props),
+	.get_property = hl6111r_get_prop,
+	.set_property = hl6111r_set_prop,
+	.property_is_writeable = hl6111r_prop_is_writeable,
+};
+
+static ssize_t irect_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	int rc, irect_ua = 0;
+	u8 raw = 0;
+	bool dc_online = false;
+	struct hl6111r *chip = dev_get_drvdata(dev);
+
+	rc = is_dc_online(&dc_online);
+	if (rc < 0 || !dc_online)
+		goto exit;
+
+	rc = hl6111r_read(chip, IRECT_REG, &raw);
+	if (rc < 0)
+		goto exit;
+
+	irect_ua = IRECT_SCALED_UA(raw);
+
+	pr_debug("raw = 0x%02x, scaled = %d mA\n", raw, (irect_ua / 1000));
+exit:
+	return scnprintf(buf, PAGE_SIZE, "%d\n", irect_ua);
+}
+static DEVICE_ATTR_RO(irect);
+
+static ssize_t vrect_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	int rc, vrect_uv = 0;
+	u8 raw = 0;
+	bool dc_online = false;
+	struct hl6111r *chip = dev_get_drvdata(dev);
+
+	rc = is_dc_online(&dc_online);
+	if (rc < 0 || !dc_online)
+		goto exit;
+
+	rc = hl6111r_read(chip, VRECT_REG, &raw);
+	if (rc < 0)
+		goto exit;
+
+	vrect_uv = VRECT_SCALED_UV(raw);
+
+	pr_debug("raw = 0x%02x, scaled = %d mV\n",
+			raw, (vrect_uv / 1000));
+exit:
+	return scnprintf(buf, PAGE_SIZE, "%d\n", vrect_uv);
+}
+static DEVICE_ATTR_RO(vrect);
+
+static struct attribute *hl6111r_attrs[] = {
+	&dev_attr_vrect.attr,
+	&dev_attr_irect.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(hl6111r);
+
+static int hl6111r_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+{
+	int rc;
+	struct hl6111r *chip;
+	struct power_supply_config cfg = {0};
+
+	chip = devm_kzalloc(&i2c->dev, sizeof(*chip), GFP_KERNEL);
+	if (chip == NULL)
+		return -ENOMEM;
+	chip->dev = &i2c->dev;
+
+	i2c_set_clientdata(i2c, chip);
+
+	chip->regmap = devm_regmap_init_i2c(i2c, &chip_regmap);
+	if (IS_ERR(chip->regmap)) {
+		rc = PTR_ERR(chip->regmap);
+		dev_err(&i2c->dev, "regmap init failed: %d\n", rc);
+		goto cleanup;
+	}
+
+	/* Create PSY */
+	cfg.drv_data = chip;
+	cfg.of_node = chip->dev->of_node;
+
+	chip->psy = devm_power_supply_register(chip->dev, &hl6111r_psy_desc,
+			&cfg);
+
+	if (IS_ERR(chip->psy)) {
+		dev_err(&i2c->dev, "psy registration failed: %d\n",
+				PTR_ERR(chip->psy));
+		rc = PTR_ERR(chip->psy);
+		goto cleanup;
+	}
+
+	/* Create device attributes */
+	rc = sysfs_create_groups(&chip->dev->kobj, hl6111r_groups);
+	if (rc < 0)
+		goto cleanup;
+
+	pr_info("probe successful\n");
+
+	return 0;
+
+cleanup:
+	i2c_set_clientdata(i2c, NULL);
+	return rc;
+}
+
+static int hl6111r_remove(struct i2c_client *i2c)
+{
+	i2c_set_clientdata(i2c, NULL);
+	return 0;
+}
+
+static const struct of_device_id match_table[] = {
+	{ .compatible = "halo,hl6111r", },
+	{ }
+};
+
+static struct i2c_driver hl6111r_driver = {
+	.driver = {
+		.name = "hl6111r-driver",
+		.of_match_table = match_table,
+	},
+	.probe =    hl6111r_probe,
+	.remove =   hl6111r_remove,
+};
+
+module_i2c_driver(hl6111r_driver);
+
+MODULE_DESCRIPTION("HL6111R driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/hl6111r.h b/drivers/power/supply/qcom/hl6111r.h
new file mode 100644
index 0000000..cd4043e
--- /dev/null
+++ b/drivers/power/supply/qcom/hl6111r.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __HL6111R_H__
+#define __HL6111R_H__
+
+/* Register definitions */
+
+#define LATCHED_STATUS_REG	0x00
+#define OUT_EN_L_BIT		BIT(0)
+
+#define VRECT_REG		0x01
+
+#define IRECT_REG		0x02
+
+#define DIE_TEMP_REG		0x03
+
+#define VOUT_TARGET_REG		0x0E
+
+#define IOUT_LIM_SEL_REG	0x28
+#define IOUT_LIM_SEL_MASK	GENMASK(7, 3)
+#define IOUT_LIM_SHIFT		3
+
+#define VOUT_RANGE_SEL_REG	0x30
+#define VOUT_RANGE_SEL_MASK	GENMASK(7, 6)
+#define VOUT_RANGE_SEL_SHIFT	6
+
+#define IOUT_NOW_REG		0x82
+#define IOUT_NOW_STEP_UA	9180
+
+#define VOUT_STEP_UV		93750
+
+#define VOUT_NOW_REG		0x83
+
+#define VOUT_AVG_REG		0x8E
+
+#define IOUT_AVG_REG		0x8F
+#define IOUT_AVG_STEP_UA	9171
+
+/* Macros for internal use */
+
+#define VRECT_SCALED_UV(raw)	(raw * VOUT_STEP_UV)
+
+#define IRECT_SCALED_UA(raw)	(raw * 1000 * 13)
+
+/*
+ * die_temp in deg C = (220.09 - raw) / 0.6316
+ *		     = (2200900 - (raw * 10000)) / 6316
+ */
+#define DIE_TEMP_SCALED_DEG_C(raw)	((2200900 - (raw * 10000)) / 6316)
+
+#endif
diff --git a/drivers/power/supply/qcom/pmic-voter.c b/drivers/power/supply/qcom/pmic-voter.c
index a141c53..abcc484 100644
--- a/drivers/power/supply/qcom/pmic-voter.c
+++ b/drivers/power/supply/qcom/pmic-voter.c
@@ -28,12 +28,14 @@ struct client_vote {
 
 struct votable {
 	const char		*name;
+	const char		*override_client;
 	struct list_head	list;
 	struct client_vote	votes[NUM_MAX_CLIENTS];
 	int			num_clients;
 	int			type;
 	int			effective_client_id;
 	int			effective_result;
+	int			override_result;
 	struct mutex		vote_lock;
 	void			*data;
 	int			(*callback)(struct votable *votable,
@@ -270,6 +272,9 @@ int get_effective_result_locked(struct votable *votable)
 	if (votable->force_active)
 		return votable->force_val;
 
+	if (votable->override_result != -EINVAL)
+		return votable->override_result;
+
 	return votable->effective_result;
 }
 
@@ -306,6 +311,9 @@ const char *get_effective_client_locked(struct votable *votable)
 	if (votable->force_active)
 		return DEBUG_FORCE_CLIENT;
 
+	if (votable->override_result != -EINVAL)
+		return votable->override_client;
+
 	return get_client_str(votable, votable->effective_client_id);
 }
 
@@ -416,7 +424,8 @@ int vote(struct votable *votable, const char *client_str, bool enabled, int val)
 			votable->name, effective_result,
 			get_client_str(votable, effective_id),
 			effective_id);
-		if (votable->callback && !votable->force_active)
+		if (votable->callback && !votable->force_active
+				&& (votable->override_result == -EINVAL))
 			rc = votable->callback(votable, votable->data,
 					effective_result,
 					get_client_str(votable, effective_id));
@@ -428,6 +437,57 @@ int vote(struct votable *votable, const char *client_str, bool enabled, int val)
 	return rc;
 }
 
+/**
+ * vote_override() -
+ *
+ * @votable:		The votable object
+ * @override_client:	The voting client that will override other client's
+ *			votes, that are already present. When force_active
+ *			and override votes are set on a votable, force_active's
+ *			client will have the higher priority and it's vote will
+ *			be the effective one.
+ * @enabled:		This provides a means for the override client to exclude
+ *			itself from election. This client's vote
+ *			(the next argument) will be considered only when
+ *			it has enabled its participation. When this is
+ *			set true, this will force a value on a MIN/MAX votable
+ *			irrespective of its current value.
+ * @val:		The vote value. This will be effective only if enabled
+ *			is set true.
+ * Returns:
+ *	The result of vote. 0 is returned if the vote
+ *	is successfully set by the overriding client, when enabled is set.
+ */
+int vote_override(struct votable *votable, const char *override_client,
+		  bool enabled, int val)
+{
+	int rc = 0;
+
+	lock_votable(votable);
+	if (votable->force_active) {
+		votable->override_result = enabled ? val : -EINVAL;
+		goto out;
+	}
+
+	if (enabled) {
+		rc = votable->callback(votable, votable->data,
+					val, override_client);
+		if (!rc) {
+			votable->override_client = override_client;
+			votable->override_result = val;
+		}
+	} else {
+		rc = votable->callback(votable, votable->data,
+			votable->effective_result,
+			get_client_str(votable, votable->effective_client_id));
+		votable->override_result = -EINVAL;
+	}
+
+out:
+	unlock_votable(votable);
+	return rc;
+}
+
 int rerun_election(struct votable *votable)
 {
 	int rc = 0;
@@ -482,6 +542,8 @@ static int force_active_set(void *data, u64 val)
 {
 	struct votable *votable = data;
 	int rc = 0;
+	int effective_result;
+	const char *client;
 
 	lock_votable(votable);
 	votable->force_active = !!val;
@@ -494,9 +556,16 @@ static int force_active_set(void *data, u64 val)
 			votable->force_val,
 			DEBUG_FORCE_CLIENT);
 	} else {
-		rc = votable->callback(votable, votable->data,
-			votable->effective_result,
-			get_client_str(votable, votable->effective_client_id));
+		if (votable->override_result != -EINVAL) {
+			effective_result = votable->override_result;
+			client = votable->override_client;
+		} else {
+			effective_result = votable->effective_result;
+			client = get_client_str(votable,
+					votable->effective_client_id);
+		}
+		rc = votable->callback(votable, votable->data, effective_result,
+					client);
 	}
 out:
 	unlock_votable(votable);
@@ -604,6 +673,7 @@ struct votable *create_votable(const char *name,
 	votable->callback = callback;
 	votable->type = votable_type;
 	votable->data = data;
+	votable->override_result = -EINVAL;
 	mutex_init(&votable->vote_lock);
 
 	/*
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index 99ebfa2..7e0b20c 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -36,6 +36,10 @@ struct qg_dt {
 	int			s2_vbat_low_fifo_length;
 	int			s2_acc_length;
 	int			s2_acc_intvl_ms;
+	int			sleep_s2_fifo_length;
+	int			sleep_s2_acc_length;
+	int			sleep_s2_acc_intvl_ms;
+	int			fast_chg_s2_fifo_length;
 	int			ocv_timer_expiry_min;
 	int			ocv_tol_threshold_uv;
 	int			s3_entry_fifo_length;
@@ -51,6 +55,8 @@ struct qg_dt {
 	int			esr_disable_soc;
 	int			esr_min_ibat_ua;
 	int			shutdown_soc_threshold;
+	int			min_sleep_time_secs;
+	int			sys_min_volt_mv;
 	bool			hold_soc_while_full;
 	bool			linearize_soc;
 	bool			cl_disable;
@@ -59,6 +65,8 @@ struct qg_dt {
 	bool			esr_discharge_enable;
 	bool			qg_ext_sense;
 	bool			use_s7_ocv;
+	bool			qg_sleep_config;
+	bool			qg_fast_chg_cfg;
 };
 
 struct qg_esr_data {
@@ -84,6 +92,7 @@ struct qpnp_qg {
 	struct work_struct	udata_work;
 	struct work_struct	scale_soc_work;
 	struct work_struct	qg_status_change_work;
+	struct delayed_work	qg_sleep_exit_work;
 	struct notifier_block	nb;
 	struct mutex		bus_lock;
 	struct mutex		data_lock;
@@ -134,9 +143,12 @@ struct qpnp_qg {
 	u32			charge_counter_uah;
 	u32			esr_avg;
 	u32			esr_last;
+	u32			s2_state;
+	u32			s2_state_mask;
 	ktime_t			last_user_update_time;
 	ktime_t			last_fifo_update_time;
 	unsigned long		last_maint_soc_update_time;
+	unsigned long		suspend_time;
 	struct iio_channel	*batt_therm_chan;
 	struct iio_channel	*batt_id_chan;
 
@@ -179,6 +191,13 @@ enum ocv_type {
 	PON_OCV_MAX,
 };
 
+enum s2_state {
+	S2_FAST_CHARGING = BIT(0),
+	S2_LOW_VBAT = BIT(1),
+	S2_SLEEP = BIT(2),
+	S2_DEFAULT = BIT(3),
+};
+
 enum debug_mask {
 	QG_DEBUG_PON		= BIT(0),
 	QG_DEBUG_PROFILE	= BIT(1),
diff --git a/drivers/power/supply/qcom/qg-defs.h b/drivers/power/supply/qcom/qg-defs.h
index e2d2606..5caeb49 100644
--- a/drivers/power/supply/qcom/qg-defs.h
+++ b/drivers/power/supply/qcom/qg-defs.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef __QG_DEFS_H__
@@ -28,6 +28,8 @@
 #define PROFILE_IRQ_DISABLE		"NO_PROFILE_IRQ_DISABLE"
 #define QG_INIT_STATE_IRQ_DISABLE	"QG_INIT_STATE_IRQ_DISABLE"
 #define TTF_AWAKE_VOTER			"TTF_AWAKE_VOTER"
+#define SLEEP_EXIT_DATA_VOTER		"SLEEP_EXIT_DATA_VOTER"
+#define SLEEP_EXIT_VOTER		"SLEEP_EXIT_VOTER"
 
 #define V_RAW_TO_UV(V_RAW)		div_u64(194637ULL * (u64)V_RAW, 1000)
 #define FIFO_V_RESET_VAL		0x8000
diff --git a/drivers/power/supply/qcom/qg-soc.c b/drivers/power/supply/qcom/qg-soc.c
index 264f948..d8d0c6e 100644
--- a/drivers/power/supply/qcom/qg-soc.c
+++ b/drivers/power/supply/qcom/qg-soc.c
@@ -159,6 +159,8 @@ static void get_next_update_time(struct qpnp_qg *chip)
 
 static bool is_scaling_required(struct qpnp_qg *chip)
 {
+	bool input_present = is_input_present(chip);
+
 	if (!chip->profile_loaded)
 		return false;
 
@@ -175,10 +177,16 @@ static bool is_scaling_required(struct qpnp_qg *chip)
 		return false;
 
 
-	if (chip->catch_up_soc > chip->msoc && !is_input_present(chip))
+	if (chip->catch_up_soc > chip->msoc && !input_present)
 		/* input is not present and SOC has increased */
 		return false;
 
+	if (chip->catch_up_soc > chip->msoc && input_present &&
+			(chip->charge_status != POWER_SUPPLY_STATUS_CHARGING &&
+			chip->charge_status != POWER_SUPPLY_STATUS_FULL))
+		/* USB is present, but not charging */
+		return false;
+
 	return true;
 }
 
diff --git a/drivers/power/supply/qcom/qg-util.c b/drivers/power/supply/qcom/qg-util.c
index 9227c8d..4a23ab1 100644
--- a/drivers/power/supply/qcom/qg-util.c
+++ b/drivers/power/supply/qcom/qg-util.c
@@ -361,7 +361,7 @@ int qg_get_battery_temp(struct qpnp_qg *chip, int *temp)
 	}
 	pr_debug("batt_temp = %d\n", *temp);
 
-	return rc;
+	return 0;
 }
 
 int qg_get_battery_current(struct qpnp_qg *chip, int *ibat_ua)
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c
index 9649f19..b3f63e2 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen4.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c
@@ -92,12 +92,20 @@
 #define KI_COEFF_MED_DISCHG_OFFSET	0
 #define KI_COEFF_HI_DISCHG_WORD		26
 #define KI_COEFF_HI_DISCHG_OFFSET	1
+#define KI_COEFF_LO_MED_DCHG_THR_WORD	27
+#define KI_COEFF_LO_MED_DCHG_THR_OFFSET	0
+#define KI_COEFF_MED_HI_DCHG_THR_WORD	27
+#define KI_COEFF_MED_HI_DCHG_THR_OFFSET	1
 #define KI_COEFF_LOW_CHG_WORD		28
 #define KI_COEFF_LOW_CHG_OFFSET		0
 #define KI_COEFF_MED_CHG_WORD		28
 #define KI_COEFF_MED_CHG_OFFSET		1
 #define KI_COEFF_HI_CHG_WORD		29
 #define KI_COEFF_HI_CHG_OFFSET		0
+#define KI_COEFF_LO_MED_CHG_THR_WORD	29
+#define KI_COEFF_LO_MED_CHG_THR_OFFSET	1
+#define KI_COEFF_MED_HI_CHG_THR_WORD	30
+#define KI_COEFF_MED_HI_CHG_THR_OFFSET	0
 #define DELTA_BSOC_THR_WORD		30
 #define DELTA_BSOC_THR_OFFSET		1
 #define SLOPE_LIMIT_WORD		32
@@ -235,12 +243,16 @@ struct fg_dt_props {
 	int	ki_coeff_low_chg;
 	int	ki_coeff_med_chg;
 	int	ki_coeff_hi_chg;
+	int	ki_coeff_lo_med_chg_thr_ma;
+	int	ki_coeff_med_hi_chg_thr_ma;
 	int	ki_coeff_cutoff_gain;
 	int	ki_coeff_full_soc_dischg[2];
 	int	ki_coeff_soc[KI_COEFF_SOC_LEVELS];
 	int	ki_coeff_low_dischg[KI_COEFF_SOC_LEVELS];
 	int	ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
 	int	ki_coeff_hi_dischg[KI_COEFF_SOC_LEVELS];
+	int	ki_coeff_lo_med_dchg_thr_ma;
+	int	ki_coeff_med_hi_dchg_thr_ma;
 	int	slope_limit_coeffs[SLOPE_LIMIT_NUM_COEFFS];
 };
 
@@ -257,6 +269,7 @@ struct fg_gen4_chip {
 	struct votable		*cp_disable_votable;
 	struct votable		*parallel_current_en_votable;
 	struct votable		*mem_attn_irq_en_votable;
+	struct votable		*fv_votable;
 	struct work_struct	esr_calib_work;
 	struct work_struct	soc_scale_work;
 	struct alarm		esr_fast_cal_timer;
@@ -528,12 +541,24 @@ static struct fg_sram_param pm8150b_v2_sram_params[] = {
 	PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_WORD,
 		KI_COEFF_HI_DISCHG_OFFSET, 1, 1000, 61035, 0,
 		fg_encode_default, NULL),
+	PARAM(KI_COEFF_LO_MED_DCHG_THR, KI_COEFF_LO_MED_DCHG_THR_WORD,
+		KI_COEFF_LO_MED_DCHG_THR_OFFSET, 1, 1000, 15625, 0,
+		fg_encode_default, NULL),
+	PARAM(KI_COEFF_MED_HI_DCHG_THR, KI_COEFF_MED_HI_DCHG_THR_WORD,
+		KI_COEFF_MED_HI_DCHG_THR_OFFSET, 1, 1000, 15625, 0,
+		fg_encode_default, NULL),
 	PARAM(KI_COEFF_LOW_CHG, KI_COEFF_LOW_CHG_WORD, KI_COEFF_LOW_CHG_OFFSET,
 		1, 1000, 61035, 0, fg_encode_default, NULL),
 	PARAM(KI_COEFF_MED_CHG, KI_COEFF_MED_CHG_WORD, KI_COEFF_MED_CHG_OFFSET,
 		1, 1000, 61035, 0, fg_encode_default, NULL),
 	PARAM(KI_COEFF_HI_CHG, KI_COEFF_HI_CHG_WORD, KI_COEFF_HI_CHG_OFFSET, 1,
 		1000, 61035, 0, fg_encode_default, NULL),
+	PARAM(KI_COEFF_LO_MED_CHG_THR, KI_COEFF_LO_MED_CHG_THR_WORD,
+		KI_COEFF_LO_MED_CHG_THR_OFFSET, 1, 1000, 15625, 0,
+		fg_encode_default, NULL),
+	PARAM(KI_COEFF_MED_HI_CHG_THR, KI_COEFF_MED_HI_CHG_THR_WORD,
+		KI_COEFF_MED_HI_CHG_THR_OFFSET, 1, 1000, 15625, 0,
+		fg_encode_default, NULL),
 	PARAM(SLOPE_LIMIT, SLOPE_LIMIT_WORD, SLOPE_LIMIT_OFFSET, 1, 8192,
 		1000000, 0, fg_encode_default, NULL),
 	PARAM(BATT_TEMP_COLD, BATT_TEMP_CONFIG_WORD, BATT_TEMP_COLD_OFFSET, 1,
@@ -860,7 +885,7 @@ static int fg_gen4_get_cell_impedance(struct fg_gen4_chip *chip, int *val)
 {
 	struct fg_dev *fg = &chip->fg;
 	int rc, esr_uohms, temp, vbat_term_mv, v_delta, rprot_uohms = 0;
-	int rslow_uohms;
+	int rslow_uohms, fv_uv = fg->bp.float_volt_uv;
 
 	rc = fg_get_sram_prop(fg, FG_SRAM_ESR_ACT, &esr_uohms);
 	if (rc < 0) {
@@ -879,8 +904,21 @@ static int fg_gen4_get_cell_impedance(struct fg_gen4_chip *chip, int *val)
 	if (!chip->dt.five_pin_battery)
 		goto out;
 
-	if (fg->charge_type != POWER_SUPPLY_CHARGE_TYPE_TAPER ||
-		fg->bp.float_volt_uv <= 0)
+	if (fg->charge_type != POWER_SUPPLY_CHARGE_TYPE_TAPER)
+		goto out;
+
+	if ((fg->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER) &&
+		(fg->health != POWER_SUPPLY_HEALTH_GOOD)) {
+		if (!chip->fv_votable)
+			chip->fv_votable = find_votable("FV");
+
+		if (!chip->fv_votable)
+			goto out;
+
+		fv_uv = get_effective_result(chip->fv_votable);
+	}
+
+	if (fv_uv <= 0)
 		goto out;
 
 	rc = fg_get_battery_voltage(fg, &vbat_term_mv);
@@ -893,7 +931,7 @@ static int fg_gen4_get_cell_impedance(struct fg_gen4_chip *chip, int *val)
 		goto out;
 	}
 
-	v_delta = abs(temp - fg->bp.float_volt_uv);
+	v_delta = abs(temp - fv_uv);
 
 	rc = fg_get_sram_prop(fg, FG_SRAM_IBAT_FINAL, &temp);
 	if (rc < 0) {
@@ -959,6 +997,11 @@ static int fg_gen4_get_prop_capacity(struct fg_dev *fg, int *val)
 	return 0;
 }
 
+static int fg_gen4_get_prop_real_capacity(struct fg_dev *fg, int *val)
+{
+	return fg_get_msoc(fg, val);
+}
+
 static int fg_gen4_get_prop_capacity_raw(struct fg_gen4_chip *chip, int *val)
 {
 	struct fg_dev *fg = &chip->fg;
@@ -1381,8 +1424,8 @@ static void fg_gen4_update_rslow_coeff(struct fg_dev *fg, int batt_temp)
 	}
 }
 
-#define KI_COEFF_FULL_SOC_NORM_DEFAULT		733
-#define KI_COEFF_FULL_SOC_LOW_DEFAULT		184
+#define KI_COEFF_FULL_SOC_NORM_DEFAULT	2442
+#define KI_COEFF_FULL_SOC_LOW_DEFAULT	2442
 static int fg_gen4_adjust_ki_coeff_full_soc(struct fg_gen4_chip *chip,
 						int batt_temp)
 {
@@ -1390,15 +1433,13 @@ static int fg_gen4_adjust_ki_coeff_full_soc(struct fg_gen4_chip *chip,
 	int rc, ki_coeff_full_soc_norm, ki_coeff_full_soc_low;
 	u8 val;
 
-	if (batt_temp < 0) {
+	if ((batt_temp < 0) ||
+		(fg->charge_status == POWER_SUPPLY_STATUS_DISCHARGING)) {
 		ki_coeff_full_soc_norm = 0;
 		ki_coeff_full_soc_low = 0;
-	} else if (fg->charge_status == POWER_SUPPLY_STATUS_DISCHARGING) {
+	} else if (fg->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
 		ki_coeff_full_soc_norm = chip->dt.ki_coeff_full_soc_dischg[0];
 		ki_coeff_full_soc_low = chip->dt.ki_coeff_full_soc_dischg[1];
-	} else {
-		ki_coeff_full_soc_norm = KI_COEFF_FULL_SOC_NORM_DEFAULT;
-		ki_coeff_full_soc_low = KI_COEFF_FULL_SOC_LOW_DEFAULT;
 	}
 
 	if (chip->ki_coeff_full_soc[0] == ki_coeff_full_soc_norm &&
@@ -1430,9 +1471,63 @@ static int fg_gen4_adjust_ki_coeff_full_soc(struct fg_gen4_chip *chip,
 	return 0;
 }
 
-#define KI_COEFF_LOW_DISCHG_DEFAULT	428
-#define KI_COEFF_MED_DISCHG_DEFAULT	245
-#define KI_COEFF_HI_DISCHG_DEFAULT	123
+static int fg_gen4_set_ki_coeff_dischg(struct fg_dev *fg, int ki_coeff_low,
+		int ki_coeff_med, int ki_coeff_hi)
+{
+	int rc;
+	u8 val;
+
+	if (ki_coeff_low >= 0) {
+		fg_encode(fg->sp, FG_SRAM_KI_COEFF_LOW_DISCHG, ki_coeff_low,
+			&val);
+		rc = fg_sram_write(fg,
+			fg->sp[FG_SRAM_KI_COEFF_LOW_DISCHG].addr_word,
+			fg->sp[FG_SRAM_KI_COEFF_LOW_DISCHG].addr_byte, &val,
+			fg->sp[FG_SRAM_KI_COEFF_LOW_DISCHG].len,
+			FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing ki_coeff_low, rc=%d\n", rc);
+			return rc;
+		}
+		fg_dbg(fg, FG_STATUS, "Wrote ki_coeff_low %d\n", ki_coeff_low);
+	}
+
+	if (ki_coeff_med >= 0) {
+		fg_encode(fg->sp, FG_SRAM_KI_COEFF_MED_DISCHG, ki_coeff_med,
+			&val);
+		rc = fg_sram_write(fg,
+			fg->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_word,
+			fg->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_byte, &val,
+			fg->sp[FG_SRAM_KI_COEFF_MED_DISCHG].len,
+			FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing ki_coeff_med, rc=%d\n", rc);
+			return rc;
+		}
+		fg_dbg(fg, FG_STATUS, "Wrote ki_coeff_med %d\n", ki_coeff_med);
+	}
+
+	if (ki_coeff_hi >= 0) {
+		fg_encode(fg->sp, FG_SRAM_KI_COEFF_HI_DISCHG, ki_coeff_hi,
+			&val);
+		rc = fg_sram_write(fg,
+			fg->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_word,
+			fg->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_byte, &val,
+			fg->sp[FG_SRAM_KI_COEFF_HI_DISCHG].len,
+			FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing ki_coeff_hi, rc=%d\n", rc);
+			return rc;
+		}
+		fg_dbg(fg, FG_STATUS, "Wrote ki_coeff_hi %d\n", ki_coeff_hi);
+	}
+
+	return 0;
+}
+
+#define KI_COEFF_LOW_DISCHG_DEFAULT	122
+#define KI_COEFF_MED_DISCHG_DEFAULT	62
+#define KI_COEFF_HI_DISCHG_DEFAULT	0
 static int fg_gen4_adjust_ki_coeff_dischg(struct fg_dev *fg)
 {
 	struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg);
@@ -1440,7 +1535,6 @@ static int fg_gen4_adjust_ki_coeff_dischg(struct fg_dev *fg)
 	int ki_coeff_low = KI_COEFF_LOW_DISCHG_DEFAULT;
 	int ki_coeff_med = KI_COEFF_MED_DISCHG_DEFAULT;
 	int ki_coeff_hi = KI_COEFF_HI_DISCHG_DEFAULT;
-	u8 val;
 
 	if (!chip->ki_coeff_dischg_en)
 		return 0;
@@ -1461,50 +1555,10 @@ static int fg_gen4_adjust_ki_coeff_dischg(struct fg_dev *fg)
 		}
 	}
 
-	if (ki_coeff_low > 0) {
-		fg_encode(fg->sp, FG_SRAM_KI_COEFF_LOW_DISCHG, ki_coeff_low,
-			&val);
-		rc = fg_sram_write(fg,
-			fg->sp[FG_SRAM_KI_COEFF_LOW_DISCHG].addr_word,
-			fg->sp[FG_SRAM_KI_COEFF_LOW_DISCHG].addr_byte, &val,
-			fg->sp[FG_SRAM_KI_COEFF_LOW_DISCHG].len,
-			FG_IMA_DEFAULT);
-		if (rc < 0) {
-			pr_err("Error in writing ki_coeff_low, rc=%d\n", rc);
-			return rc;
-		}
-		fg_dbg(fg, FG_STATUS, "Wrote ki_coeff_low %d\n", ki_coeff_low);
-	}
-
-	if (ki_coeff_med > 0) {
-		fg_encode(fg->sp, FG_SRAM_KI_COEFF_MED_DISCHG, ki_coeff_med,
-			&val);
-		rc = fg_sram_write(fg,
-			fg->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_word,
-			fg->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_byte, &val,
-			fg->sp[FG_SRAM_KI_COEFF_MED_DISCHG].len,
-			FG_IMA_DEFAULT);
-		if (rc < 0) {
-			pr_err("Error in writing ki_coeff_med, rc=%d\n", rc);
-			return rc;
-		}
-		fg_dbg(fg, FG_STATUS, "Wrote ki_coeff_med %d\n", ki_coeff_med);
-	}
-
-	if (ki_coeff_hi > 0) {
-		fg_encode(fg->sp, FG_SRAM_KI_COEFF_HI_DISCHG, ki_coeff_hi,
-			&val);
-		rc = fg_sram_write(fg,
-			fg->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_word,
-			fg->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_byte, &val,
-			fg->sp[FG_SRAM_KI_COEFF_HI_DISCHG].len,
-			FG_IMA_DEFAULT);
-		if (rc < 0) {
-			pr_err("Error in writing ki_coeff_hi, rc=%d\n", rc);
-			return rc;
-		}
-		fg_dbg(fg, FG_STATUS, "Wrote ki_coeff_hi %d\n", ki_coeff_hi);
-	}
+	rc = fg_gen4_set_ki_coeff_dischg(fg,
+			ki_coeff_low, ki_coeff_med, ki_coeff_hi);
+	if (rc < 0)
+		return rc;
 
 	return 0;
 }
@@ -3917,6 +3971,9 @@ static void status_change_work(struct work_struct *work)
 	if (!chip->cp_disable_votable)
 		chip->cp_disable_votable = find_votable("CP_DISABLE");
 
+	if (!chip->fv_votable)
+		chip->fv_votable = find_votable("FV");
+
 	if (!batt_psy_initialized(fg)) {
 		fg_dbg(fg, FG_STATUS, "Charger not available?!\n");
 		goto out;
@@ -4202,6 +4259,9 @@ static int fg_psy_get_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_CAPACITY:
 		rc = fg_gen4_get_prop_capacity(fg, &pval->intval);
 		break;
+	case POWER_SUPPLY_PROP_REAL_CAPACITY:
+		rc = fg_gen4_get_prop_real_capacity(fg, &pval->intval);
+		break;
 	case POWER_SUPPLY_PROP_CAPACITY_RAW:
 		rc = fg_gen4_get_prop_capacity_raw(chip, &pval->intval);
 		break;
@@ -4445,6 +4505,7 @@ static int fg_property_is_writeable(struct power_supply *psy,
 
 static enum power_supply_property fg_psy_props[] = {
 	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_REAL_CAPACITY,
 	POWER_SUPPLY_PROP_CAPACITY_RAW,
 	POWER_SUPPLY_PROP_CC_SOC,
 	POWER_SUPPLY_PROP_TEMP,
@@ -4914,12 +4975,145 @@ static int fg_gen4_batt_temp_config(struct fg_gen4_chip *chip)
 	return rc;
 }
 
+static int fg_gen4_init_ki_coeffts(struct fg_gen4_chip *chip)
+{
+	int rc;
+	u8 val;
+	struct fg_dev *fg = &chip->fg;
+
+	if (chip->dt.ki_coeff_low_chg != -EINVAL) {
+		fg_encode(fg->sp, FG_SRAM_KI_COEFF_LOW_CHG,
+			chip->dt.ki_coeff_low_chg, &val);
+		rc = fg_sram_write(fg,
+			fg->sp[FG_SRAM_KI_COEFF_LOW_CHG].addr_word,
+			fg->sp[FG_SRAM_KI_COEFF_LOW_CHG].addr_byte, &val,
+			fg->sp[FG_SRAM_KI_COEFF_LOW_CHG].len,
+			FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing ki_coeff_low_chg, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.ki_coeff_med_chg != -EINVAL) {
+		fg_encode(fg->sp, FG_SRAM_KI_COEFF_MED_CHG,
+			chip->dt.ki_coeff_med_chg, &val);
+		rc = fg_sram_write(fg,
+			fg->sp[FG_SRAM_KI_COEFF_MED_CHG].addr_word,
+			fg->sp[FG_SRAM_KI_COEFF_MED_CHG].addr_byte, &val,
+			fg->sp[FG_SRAM_KI_COEFF_MED_CHG].len,
+			FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing ki_coeff_med_chg, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.ki_coeff_hi_chg != -EINVAL) {
+		fg_encode(fg->sp, FG_SRAM_KI_COEFF_HI_CHG,
+			chip->dt.ki_coeff_hi_chg, &val);
+		rc = fg_sram_write(fg,
+			fg->sp[FG_SRAM_KI_COEFF_HI_CHG].addr_word,
+			fg->sp[FG_SRAM_KI_COEFF_HI_CHG].addr_byte, &val,
+			fg->sp[FG_SRAM_KI_COEFF_HI_CHG].len,
+			FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing ki_coeff_hi_chg, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.ki_coeff_lo_med_chg_thr_ma != -EINVAL) {
+		fg_encode(fg->sp, FG_SRAM_KI_COEFF_LO_MED_CHG_THR,
+			chip->dt.ki_coeff_lo_med_chg_thr_ma, &val);
+		rc = fg_sram_write(fg,
+			fg->sp[FG_SRAM_KI_COEFF_LO_MED_CHG_THR].addr_word,
+			fg->sp[FG_SRAM_KI_COEFF_LO_MED_CHG_THR].addr_byte,
+			&val, fg->sp[FG_SRAM_KI_COEFF_LO_MED_CHG_THR].len,
+			FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing ki_coeff_lo_med_chg_thr_ma, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.ki_coeff_med_hi_chg_thr_ma != -EINVAL) {
+		fg_encode(fg->sp, FG_SRAM_KI_COEFF_MED_HI_CHG_THR,
+			chip->dt.ki_coeff_med_hi_chg_thr_ma, &val);
+		rc = fg_sram_write(fg,
+			fg->sp[FG_SRAM_KI_COEFF_MED_HI_CHG_THR].addr_word,
+			fg->sp[FG_SRAM_KI_COEFF_MED_HI_CHG_THR].addr_byte, &val,
+			fg->sp[FG_SRAM_KI_COEFF_MED_HI_CHG_THR].len,
+			FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing ki_coeff_med_hi_chg_thr_ma, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.ki_coeff_lo_med_dchg_thr_ma != -EINVAL) {
+		fg_encode(fg->sp, FG_SRAM_KI_COEFF_LO_MED_DCHG_THR,
+			chip->dt.ki_coeff_lo_med_dchg_thr_ma, &val);
+		rc = fg_sram_write(fg,
+			fg->sp[FG_SRAM_KI_COEFF_LO_MED_DCHG_THR].addr_word,
+			fg->sp[FG_SRAM_KI_COEFF_LO_MED_DCHG_THR].addr_byte,
+			&val, fg->sp[FG_SRAM_KI_COEFF_LO_MED_DCHG_THR].len,
+			FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing ki_coeff_lo_med_dchg_thr_ma, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.ki_coeff_med_hi_dchg_thr_ma != -EINVAL) {
+		fg_encode(fg->sp, FG_SRAM_KI_COEFF_MED_HI_DCHG_THR,
+			chip->dt.ki_coeff_med_hi_dchg_thr_ma, &val);
+		rc = fg_sram_write(fg,
+			fg->sp[FG_SRAM_KI_COEFF_MED_HI_DCHG_THR].addr_word,
+			fg->sp[FG_SRAM_KI_COEFF_MED_HI_DCHG_THR].addr_byte,
+			&val, fg->sp[FG_SRAM_KI_COEFF_MED_HI_DCHG_THR].len,
+			FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing ki_coeff_med_hi_dchg_thr_ma, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.ki_coeff_cutoff_gain != -EINVAL) {
+		fg_encode(fg->sp, FG_SRAM_KI_COEFF_CUTOFF,
+			  chip->dt.ki_coeff_cutoff_gain, &val);
+		rc = fg_sram_write(fg,
+			fg->sp[FG_SRAM_KI_COEFF_CUTOFF].addr_word,
+			fg->sp[FG_SRAM_KI_COEFF_CUTOFF].addr_byte, &val,
+			fg->sp[FG_SRAM_KI_COEFF_CUTOFF].len,
+			FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing ki_coeff_cutoff_gain, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	rc = fg_gen4_set_ki_coeff_dischg(fg, KI_COEFF_LOW_DISCHG_DEFAULT,
+		KI_COEFF_MED_DISCHG_DEFAULT, KI_COEFF_HI_DISCHG_DEFAULT);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
 #define VBATT_TAU_DEFAULT	3
 static int fg_gen4_hw_init(struct fg_gen4_chip *chip)
 {
 	struct fg_dev *fg = &chip->fg;
-	int rc;
 	u8 buf[4], val;
+	int rc;
 
 	rc = fg_read(fg, ADC_RR_INT_RT_STS(fg), &val, 1);
 	if (rc < 0) {
@@ -5054,64 +5248,9 @@ static int fg_gen4_hw_init(struct fg_gen4_chip *chip)
 		}
 	}
 
-	if (chip->dt.ki_coeff_low_chg != -EINVAL) {
-		fg_encode(fg->sp, FG_SRAM_KI_COEFF_LOW_CHG,
-			chip->dt.ki_coeff_low_chg, &val);
-		rc = fg_sram_write(fg,
-			fg->sp[FG_SRAM_KI_COEFF_LOW_CHG].addr_word,
-			fg->sp[FG_SRAM_KI_COEFF_LOW_CHG].addr_byte, &val,
-			fg->sp[FG_SRAM_KI_COEFF_LOW_CHG].len,
-			FG_IMA_DEFAULT);
-		if (rc < 0) {
-			pr_err("Error in writing ki_coeff_low_chg, rc=%d\n",
-				rc);
-			return rc;
-		}
-	}
-
-	if (chip->dt.ki_coeff_med_chg != -EINVAL) {
-		fg_encode(fg->sp, FG_SRAM_KI_COEFF_MED_CHG,
-			chip->dt.ki_coeff_med_chg, &val);
-		rc = fg_sram_write(fg,
-			fg->sp[FG_SRAM_KI_COEFF_MED_CHG].addr_word,
-			fg->sp[FG_SRAM_KI_COEFF_MED_CHG].addr_byte, &val,
-			fg->sp[FG_SRAM_KI_COEFF_MED_CHG].len,
-			FG_IMA_DEFAULT);
-		if (rc < 0) {
-			pr_err("Error in writing ki_coeff_med_chg, rc=%d\n",
-				rc);
-			return rc;
-		}
-	}
-
-	if (chip->dt.ki_coeff_hi_chg != -EINVAL) {
-		fg_encode(fg->sp, FG_SRAM_KI_COEFF_HI_CHG,
-			chip->dt.ki_coeff_hi_chg, &val);
-		rc = fg_sram_write(fg,
-			fg->sp[FG_SRAM_KI_COEFF_HI_CHG].addr_word,
-			fg->sp[FG_SRAM_KI_COEFF_HI_CHG].addr_byte, &val,
-			fg->sp[FG_SRAM_KI_COEFF_HI_CHG].len,
-			FG_IMA_DEFAULT);
-		if (rc < 0) {
-			pr_err("Error in writing ki_coeff_hi_chg, rc=%d\n", rc);
-			return rc;
-		}
-	}
-
-	if (chip->dt.ki_coeff_cutoff_gain != -EINVAL) {
-		fg_encode(fg->sp, FG_SRAM_KI_COEFF_CUTOFF,
-			  chip->dt.ki_coeff_cutoff_gain, &val);
-		rc = fg_sram_write(fg,
-			fg->sp[FG_SRAM_KI_COEFF_CUTOFF].addr_word,
-			fg->sp[FG_SRAM_KI_COEFF_CUTOFF].addr_byte, &val,
-			fg->sp[FG_SRAM_KI_COEFF_CUTOFF].len,
-			FG_IMA_DEFAULT);
-		if (rc < 0) {
-			pr_err("Error in writing ki_coeff_cutoff_gain, rc=%d\n",
-				rc);
-			return rc;
-		}
-	}
+	rc = fg_gen4_init_ki_coeffts(chip);
+	if (rc < 0)
+		return rc;
 
 	rc = fg_gen4_esr_calib_config(chip);
 	if (rc < 0)
@@ -5173,6 +5312,9 @@ static int fg_parse_ki_coefficients(struct fg_dev *fg)
 	struct device_node *node = fg->dev->of_node;
 	int rc, i;
 
+	chip->dt.ki_coeff_full_soc_dischg[0] = KI_COEFF_FULL_SOC_NORM_DEFAULT;
+	chip->dt.ki_coeff_full_soc_dischg[1] = KI_COEFF_FULL_SOC_LOW_DEFAULT;
+
 	if (of_find_property(node, "qcom,ki-coeff-full-dischg", NULL)) {
 		rc = fg_parse_dt_property_u32_array(node,
 			"qcom,ki-coeff-full-dischg",
@@ -5189,22 +5331,44 @@ static int fg_parse_ki_coefficients(struct fg_dev *fg)
 		}
 	}
 
-	chip->dt.ki_coeff_low_chg = -EINVAL;
+	chip->dt.ki_coeff_low_chg = 183;
 	of_property_read_u32(node, "qcom,ki-coeff-low-chg",
 		&chip->dt.ki_coeff_low_chg);
 
-	chip->dt.ki_coeff_med_chg = -EINVAL;
+	chip->dt.ki_coeff_med_chg = 62;
 	of_property_read_u32(node, "qcom,ki-coeff-med-chg",
 		&chip->dt.ki_coeff_med_chg);
 
-	chip->dt.ki_coeff_hi_chg = -EINVAL;
+	chip->dt.ki_coeff_hi_chg = 0;
 	of_property_read_u32(node, "qcom,ki-coeff-hi-chg",
 		&chip->dt.ki_coeff_hi_chg);
 
+	chip->dt.ki_coeff_lo_med_chg_thr_ma = 1000;
+	of_property_read_u32(node, "qcom,ki-coeff-chg-low-med-thresh-ma",
+		&chip->dt.ki_coeff_lo_med_chg_thr_ma);
+
+	chip->dt.ki_coeff_med_hi_chg_thr_ma = 1500;
+	of_property_read_u32(node, "qcom,ki-coeff-chg-med-hi-thresh-ma",
+		&chip->dt.ki_coeff_med_hi_chg_thr_ma);
+
+	chip->dt.ki_coeff_lo_med_dchg_thr_ma = 50;
+	of_property_read_u32(node, "qcom,ki-coeff-dischg-low-med-thresh-ma",
+		&chip->dt.ki_coeff_lo_med_dchg_thr_ma);
+
+	chip->dt.ki_coeff_med_hi_dchg_thr_ma = 100;
+	of_property_read_u32(node, "qcom,ki-coeff-dischg-med-hi-thresh-ma",
+		&chip->dt.ki_coeff_med_hi_dchg_thr_ma);
+
 	chip->dt.ki_coeff_cutoff_gain = -EINVAL;
 	of_property_read_u32(node, "qcom,ki-coeff-cutoff",
 		&chip->dt.ki_coeff_cutoff_gain);
 
+	for (i = 0; i < KI_COEFF_SOC_LEVELS; i++) {
+		chip->dt.ki_coeff_low_dischg[i] = KI_COEFF_LOW_DISCHG_DEFAULT;
+		chip->dt.ki_coeff_med_dischg[i] = KI_COEFF_MED_DISCHG_DEFAULT;
+		chip->dt.ki_coeff_hi_dischg[i] = KI_COEFF_HI_DISCHG_DEFAULT;
+	}
+
 	if (!of_find_property(node, "qcom,ki-coeff-soc-dischg", NULL) ||
 		(!of_find_property(node, "qcom,ki-coeff-low-dischg", NULL) &&
 		!of_find_property(node, "qcom,ki-coeff-med-dischg", NULL) &&
@@ -5762,7 +5926,7 @@ static int fg_gen4_probe(struct platform_device *pdev)
 {
 	struct fg_gen4_chip *chip;
 	struct fg_dev *fg;
-	struct power_supply_config fg_psy_cfg;
+	struct power_supply_config fg_psy_cfg = {};
 	int rc, msoc, volt_uv, batt_temp;
 
 	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
@@ -5899,9 +6063,7 @@ static int fg_gen4_probe(struct platform_device *pdev)
 
 	/* Register the power supply */
 	fg_psy_cfg.drv_data = fg;
-	fg_psy_cfg.of_node = NULL;
-	fg_psy_cfg.supplied_to = NULL;
-	fg_psy_cfg.num_supplicants = 0;
+	fg_psy_cfg.of_node = fg->dev->of_node;
 	fg->fg_psy = devm_power_supply_register(fg->dev, &fg_psy_desc,
 			&fg_psy_cfg);
 	if (IS_ERR(fg->fg_psy)) {
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index f441b59..470d67c 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -20,6 +20,7 @@
 #include <linux/power_supply.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
+#include <linux/timekeeping.h>
 #include <linux/uaccess.h>
 #include <linux/pmic-voter.h>
 #include <linux/iio/consumer.h>
@@ -89,6 +90,8 @@ static struct attribute *qg_attrs[] = {
 };
 ATTRIBUTE_GROUPS(qg);
 
+static int qg_process_rt_fifo(struct qpnp_qg *chip);
+
 static bool is_battery_present(struct qpnp_qg *chip)
 {
 	u8 reg = 0;
@@ -179,7 +182,7 @@ static int qg_update_fifo_length(struct qpnp_qg *chip, u8 length)
 		pr_err("Failed to write S2 FIFO length, rc=%d\n", rc);
 
 	/* update the S3 FIFO length, when S2 length is updated */
-	if (length > 3)
+	if (length > 3 && !chip->dt.qg_sleep_config)
 		s3_entry_fifo_length = (chip->dt.s3_entry_fifo_length > 0) ?
 			chip->dt.s3_entry_fifo_length : DEFAULT_S3_FIFO_LENGTH;
 	else	/* Use S3 length as 1 for any S2 length <= 3 */
@@ -308,6 +311,111 @@ static int qg_store_soc_params(struct qpnp_qg *chip)
 	return rc;
 }
 
+static int qg_config_s2_state(struct qpnp_qg *chip,
+		enum s2_state requested_state, bool state_enable,
+		bool process_fifo)
+{
+	int rc, acc_interval, acc_length;
+	u8 fifo_length, reg = 0, state = S2_DEFAULT;
+
+	if ((chip->s2_state_mask & requested_state) && (state_enable == true))
+		return 0; /* No change in state */
+
+	if (!(chip->s2_state_mask & requested_state) && (state_enable == false))
+		return 0; /* No change in state */
+
+	if (state_enable)
+		chip->s2_state_mask |= requested_state;
+	else
+		chip->s2_state_mask &= ~requested_state;
+
+	/* define the priority of the states */
+	if (chip->s2_state_mask & S2_FAST_CHARGING)
+		state = S2_FAST_CHARGING;
+	else if (chip->s2_state_mask & S2_LOW_VBAT)
+		state = S2_LOW_VBAT;
+	else if (chip->s2_state_mask & S2_SLEEP)
+		state = S2_SLEEP;
+	else
+		state = S2_DEFAULT;
+
+	if (state == chip->s2_state)
+		return 0;
+
+	switch (state) {
+	case S2_FAST_CHARGING:
+		fifo_length = chip->dt.fast_chg_s2_fifo_length;
+		acc_interval = chip->dt.s2_acc_intvl_ms;
+		acc_length = chip->dt.s2_acc_length;
+		break;
+	case S2_LOW_VBAT:
+		fifo_length = chip->dt.s2_vbat_low_fifo_length;
+		acc_interval = chip->dt.s2_acc_intvl_ms;
+		acc_length = chip->dt.s2_acc_length;
+		break;
+	case S2_SLEEP:
+		fifo_length = chip->dt.sleep_s2_fifo_length;
+		acc_interval = chip->dt.sleep_s2_acc_intvl_ms;
+		acc_length = chip->dt.sleep_s2_acc_length;
+		break;
+	case S2_DEFAULT:
+		fifo_length = chip->dt.s2_fifo_length;
+		acc_interval = chip->dt.s2_acc_intvl_ms;
+		acc_length = chip->dt.s2_acc_length;
+		break;
+	default:
+		pr_err("Invalid S2 state %d\n", state);
+		return -EINVAL;
+	}
+
+	rc = qg_master_hold(chip, true);
+	if (rc < 0) {
+		pr_err("Failed to hold master, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (process_fifo) {
+		rc = qg_process_rt_fifo(chip);
+		if (rc < 0) {
+			pr_err("Failed to process FIFO real-time, rc=%d\n", rc);
+			goto done;
+		}
+	}
+
+	rc = qg_update_fifo_length(chip, fifo_length);
+	if (rc < 0) {
+		pr_err("Failed to update S2 fifo-length, rc=%d\n", rc);
+		goto done;
+	}
+
+	reg = acc_interval / 10;
+	rc = qg_write(chip, chip->qg_base + QG_S2_NORMAL_MEAS_CTL3_REG,
+					&reg, 1);
+	if (rc < 0) {
+		pr_err("Failed to update S2 acc intrvl, rc=%d\n", rc);
+		goto done;
+	}
+
+	reg = ilog2(acc_length) - 1;
+	rc = qg_masked_write(chip, chip->qg_base + QG_S2_NORMAL_MEAS_CTL2_REG,
+					NUM_OF_ACCUM_MASK, reg);
+	if (rc < 0) {
+		pr_err("Failed to update S2 ACC length, rc=%d\n", rc);
+		goto done;
+	}
+
+	chip->s2_state = state;
+
+	qg_dbg(chip, QG_DEBUG_STATUS, "S2 New state=%x  fifo_length=%d interval=%d acc_length=%d\n",
+				state, fifo_length, acc_interval, acc_length);
+
+done:
+	qg_master_hold(chip, false);
+	/* FIFO restarted */
+	chip->last_fifo_update_time = ktime_get_boottime();
+	return rc;
+}
+
 static int qg_process_fifo(struct qpnp_qg *chip, u32 fifo_length)
 {
 	int rc = 0, i, j = 0, temp;
@@ -342,9 +450,10 @@ static int qg_process_fifo(struct qpnp_qg *chip, u32 fifo_length)
 
 	/*
 	 * If there is pending data from suspend, append the new FIFO
-	 * data to it.
+	 * data to it. Only do this if we can accomadate 8 FIFOs
 	 */
-	if (chip->suspend_data) {
+	if (chip->suspend_data &&
+		(chip->kdata.fifo_length < (MAX_FIFO_LENGTH / 2))) {
 		j = chip->kdata.fifo_length; /* append the data */
 		chip->suspend_data = false;
 		qg_dbg(chip, QG_DEBUG_FIFO,
@@ -413,7 +522,7 @@ static int qg_process_accumulator(struct qpnp_qg *chip)
 		return rc;
 	}
 
-	if (!count) {
+	if (!count || count < 10) { /* Ignore small accumulator data */
 		pr_debug("No ACCUMULATOR data!\n");
 		return 0;
 	}
@@ -445,6 +554,8 @@ static int qg_process_accumulator(struct qpnp_qg *chip)
 	chip->kdata.fifo[index].interval = sample_interval;
 	chip->kdata.fifo[index].count = count;
 	chip->kdata.fifo_length++;
+	if (chip->kdata.fifo_length == MAX_FIFO_LENGTH)
+		chip->kdata.fifo_length = MAX_FIFO_LENGTH - 1;
 
 	if (chip->kdata.fifo_length == 1)	/* Only accumulator data */
 		chip->kdata.seq_no = chip->seq_no++ % U32_MAX;
@@ -489,7 +600,7 @@ static int qg_process_rt_fifo(struct qpnp_qg *chip)
 static int process_rt_fifo_data(struct qpnp_qg *chip, bool update_smb)
 {
 	int rc = 0;
-	ktime_t now = ktime_get();
+	ktime_t now = ktime_get_boottime();
 	s64 time_delta;
 
 	/*
@@ -534,7 +645,7 @@ static int process_rt_fifo_data(struct qpnp_qg *chip, bool update_smb)
 			goto done;
 		}
 		/* FIFOs restarted */
-		chip->last_fifo_update_time = ktime_get();
+		chip->last_fifo_update_time = ktime_get_boottime();
 
 		/* signal the read thread */
 		chip->data_ready = true;
@@ -556,7 +667,7 @@ static int process_rt_fifo_data(struct qpnp_qg *chip, bool update_smb)
 static int qg_vbat_low_wa(struct qpnp_qg *chip)
 {
 	int rc, i, temp = 0;
-	u32 vbat_low_uv = 0, fifo_length = 0;
+	u32 vbat_low_uv = 0;
 
 	if ((chip->wa_flags & QG_VBAT_LOW_WA) && chip->vbat_low) {
 		rc = qg_get_battery_temp(chip, &temp);
@@ -586,37 +697,11 @@ static int qg_vbat_low_wa(struct qpnp_qg *chip)
 		}
 	}
 
-	rc = get_fifo_length(chip, &fifo_length, false);
-	if (rc < 0) {
-		pr_err("Failed to get FIFO length, rc=%d\n", rc);
-		return rc;
-	}
-
-	if (chip->vbat_low && fifo_length == chip->dt.s2_vbat_low_fifo_length)
-		return 0;
-
-	if (!chip->vbat_low && fifo_length == chip->dt.s2_fifo_length)
-		return 0;
-
-	rc = qg_master_hold(chip, true);
-	if (rc < 0) {
-		pr_err("Failed to hold master, rc=%d\n", rc);
-		goto done;
-	}
-
-	fifo_length = chip->vbat_low ? chip->dt.s2_vbat_low_fifo_length :
-					chip->dt.s2_fifo_length;
-
-	rc = qg_update_fifo_length(chip, fifo_length);
+	rc = qg_config_s2_state(chip, S2_LOW_VBAT,
+			chip->vbat_low ? true : false, false);
 	if (rc < 0)
-		goto done;
+		pr_err("Failed to configure for VBAT_LOW rc=%d\n", rc);
 
-	qg_dbg(chip, QG_DEBUG_STATUS, "FIFO length updated to %d vbat_low=%d\n",
-					fifo_length, chip->vbat_low);
-done:
-	qg_master_hold(chip, false);
-	/* FIFOs restarted */
-	chip->last_fifo_update_time = ktime_get();
 	return rc;
 }
 
@@ -687,6 +772,22 @@ static int qg_vbat_thresholds_config(struct qpnp_qg *chip)
 	return rc;
 }
 
+static int qg_fast_charge_config(struct qpnp_qg *chip)
+{
+	int rc = 0;
+
+	if (!chip->dt.qg_fast_chg_cfg)
+		return 0;
+
+	rc = qg_config_s2_state(chip, S2_FAST_CHARGING,
+			(chip->charge_status == POWER_SUPPLY_STATUS_CHARGING)
+			? true : false, false);
+	if (rc < 0)
+		pr_err("Failed to exit S2_SLEEP rc=%d\n", rc);
+
+	return rc;
+}
+
 static void qg_retrieve_esr_params(struct qpnp_qg *chip)
 {
 	u32 data = 0;
@@ -1032,7 +1133,7 @@ static int qg_esr_estimate(struct qpnp_qg *chip)
 		goto done;
 	}
 	/* FIFOs restarted */
-	chip->last_fifo_update_time = ktime_get();
+	chip->last_fifo_update_time = ktime_get_boottime();
 
 	if (chip->esr_avg) {
 		chip->kdata.param[QG_ESR].data = chip->esr_avg;
@@ -1119,7 +1220,7 @@ static void process_udata_work(struct work_struct *work)
 #define MAX_FIFO_DELTA_PERCENT		10
 static irqreturn_t qg_fifo_update_done_handler(int irq, void *data)
 {
-	ktime_t now = ktime_get();
+	ktime_t now = ktime_get_boottime();
 	int rc, hw_delta_ms = 0, margin_ms = 0;
 	u32 fifo_length = 0;
 	s64 time_delta_ms = 0;
@@ -1150,6 +1251,10 @@ static irqreturn_t qg_fifo_update_done_handler(int irq, void *data)
 	if (rc < 0)
 		pr_err("Failed to apply VBAT EMPTY config rc=%d\n", rc);
 
+	rc = qg_fast_charge_config(chip);
+	if (rc < 0)
+		pr_err("Failed to apply fast-charge config rc=%d\n", rc);
+
 	rc = qg_vbat_low_wa(chip);
 	if (rc < 0) {
 		pr_err("Failed to apply VBAT LOW WA, rc=%d\n", rc);
@@ -1580,6 +1685,15 @@ static int qg_get_battery_capacity(struct qpnp_qg *chip, int *soc)
 	return 0;
 }
 
+static int qg_get_battery_capacity_real(struct qpnp_qg *chip, int *soc)
+{
+	mutex_lock(&chip->soc_lock);
+	*soc = chip->msoc;
+	mutex_unlock(&chip->soc_lock);
+
+	return 0;
+}
+
 static int qg_get_charge_counter(struct qpnp_qg *chip, int *charge_counter)
 {
 	int rc, cc_soc = 0;
@@ -1600,6 +1714,54 @@ static int qg_get_charge_counter(struct qpnp_qg *chip, int *charge_counter)
 	return 0;
 }
 
+static int qg_get_power(struct qpnp_qg *chip, int *val, bool average)
+{
+	int rc, v_min, v_ocv, rbatt = 0, esr = 0;
+	s64 power;
+
+	if (is_debug_batt_id(chip)) {
+		*val = -EINVAL;
+		return 0;
+	}
+
+	v_min = chip->dt.sys_min_volt_mv * 1000;
+
+	rc = qg_sdam_read(SDAM_OCV_UV, &v_ocv);
+	if (rc < 0) {
+		pr_err("Failed to read OCV rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qg_sdam_read(SDAM_RBAT_MOHM, &rbatt);
+	if (rc < 0) {
+		pr_err("Failed to read T_RBAT rc=%d\n", rc);
+		return rc;
+	}
+
+	rbatt *= 1000;	/* uohms */
+	esr = chip->esr_last * 1000;
+
+	if (rbatt <= 0 || esr <= 0) {
+		pr_debug("Invalid rbatt/esr rbatt=%d esr=%d\n", rbatt, esr);
+		*val = -EINVAL;
+		return 0;
+	}
+
+	power = (s64)v_min * (v_ocv - v_min);
+
+	if (average)
+		power = div_s64(power, rbatt);
+	else
+		power = div_s64(power, esr);
+
+	*val = power;
+
+	qg_dbg(chip, QG_DEBUG_STATUS, "v_min=%d v_ocv=%d rbatt=%d esr=%d power=%lld\n",
+			v_min, v_ocv, rbatt, esr, power);
+
+	return 0;
+}
+
 static int qg_get_ttf_param(void *data, enum ttf_param param, int *val)
 {
 	union power_supply_propval prop = {0, };
@@ -1833,6 +1995,12 @@ static int qg_psy_get_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_CAPACITY:
 		rc = qg_get_battery_capacity(chip, &pval->intval);
 		break;
+	case POWER_SUPPLY_PROP_CAPACITY_RAW:
+		pval->intval = chip->sys_soc;
+		break;
+	case POWER_SUPPLY_PROP_REAL_CAPACITY:
+		rc = qg_get_battery_capacity_real(chip, &pval->intval);
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
 		rc = qg_get_battery_voltage(chip, &pval->intval);
 		break;
@@ -1927,6 +2095,12 @@ static int qg_psy_get_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_VOLTAGE_AVG:
 		rc = qg_get_vbat_avg(chip, &pval->intval);
 		break;
+	case POWER_SUPPLY_PROP_POWER_NOW:
+		rc = qg_get_power(chip, &pval->intval, false);
+		break;
+	case POWER_SUPPLY_PROP_POWER_AVG:
+		rc = qg_get_power(chip, &pval->intval, true);
+		break;
 	default:
 		pr_debug("Unsupported property %d\n", psp);
 		break;
@@ -1953,6 +2127,8 @@ static int qg_property_is_writeable(struct power_supply *psy,
 
 static enum power_supply_property qg_psy_props[] = {
 	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_CAPACITY_RAW,
+	POWER_SUPPLY_PROP_REAL_CAPACITY,
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_VOLTAGE_OCV,
@@ -1981,6 +2157,8 @@ static enum power_supply_property qg_psy_props[] = {
 	POWER_SUPPLY_PROP_CC_SOC,
 	POWER_SUPPLY_PROP_FG_RESET,
 	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_POWER_AVG,
+	POWER_SUPPLY_PROP_POWER_NOW,
 };
 
 static const struct power_supply_desc qg_psy_desc = {
@@ -2236,6 +2414,33 @@ static int qg_battery_status_update(struct qpnp_qg *chip)
 	return rc;
 }
 
+static void qg_sleep_exit_work(struct work_struct *work)
+{
+	int rc;
+	struct qpnp_qg *chip = container_of(work,
+			struct qpnp_qg, qg_sleep_exit_work.work);
+
+	vote(chip->awake_votable, SLEEP_EXIT_VOTER, true, 0);
+
+	mutex_lock(&chip->data_lock);
+	/*
+	 * if this work is executing, the system has been active
+	 * for a while. So, force back the S2 active configuration
+	 */
+	qg_dbg(chip, QG_DEBUG_STATUS, "sleep_exit_work: exit S2_SLEEP\n");
+	rc = qg_config_s2_state(chip, S2_SLEEP, false, true);
+	if (rc < 0)
+		pr_err("Failed to exit S2_SLEEP rc=%d\n", rc);
+
+	vote(chip->awake_votable, SLEEP_EXIT_DATA_VOTER, true, 0);
+	/* signal the read thread */
+	chip->data_ready = true;
+	wake_up_interruptible(&chip->qg_wait_q);
+
+	mutex_unlock(&chip->data_lock);
+
+	vote(chip->awake_votable, SLEEP_EXIT_VOTER, false, 0);
+}
 
 static void qg_status_change_work(struct work_struct *work)
 {
@@ -2345,13 +2550,11 @@ static int qg_notifier_cb(struct notifier_block *nb,
 
 static int qg_init_psy(struct qpnp_qg *chip)
 {
-	struct power_supply_config qg_psy_cfg;
+	struct power_supply_config qg_psy_cfg = {};
 	int rc;
 
 	qg_psy_cfg.drv_data = chip;
-	qg_psy_cfg.of_node = NULL;
-	qg_psy_cfg.supplied_to = NULL;
-	qg_psy_cfg.num_supplicants = 0;
+	qg_psy_cfg.of_node = chip->dev->of_node;
 	chip->qg_psy = devm_power_supply_register(chip->dev,
 				&qg_psy_desc, &qg_psy_cfg);
 	if (IS_ERR_OR_NULL(chip->qg_psy)) {
@@ -2410,6 +2613,7 @@ static ssize_t qg_device_read(struct file *file, char __user *buf, size_t count,
 	vote(chip->awake_votable, FIFO_DONE_VOTER, false, 0);
 	vote(chip->awake_votable, FIFO_RT_DONE_VOTER, false, 0);
 	vote(chip->awake_votable, SUSPEND_DATA_VOTER, false, 0);
+	vote(chip->awake_votable, SLEEP_EXIT_DATA_VOTER, false, 0);
 
 	qg_dbg(chip, QG_DEBUG_DEVICE,
 		"QG device read complete Seq_no=%u Size=%ld\n",
@@ -2752,7 +2956,7 @@ static int qg_determine_pon_soc(struct qpnp_qg *chip)
 	bool use_pon_ocv = true;
 	unsigned long rtc_sec = 0;
 	u32 ocv_uv = 0, soc = 0, pon_soc = 0, full_soc = 0, cutoff_soc = 0;
-	u32 shutdown[SDAM_MAX] = {0};
+	u32 shutdown[SDAM_MAX] = {0}, soc_raw = 0;
 	char ocv_type[20] = "NONE";
 
 	if (!chip->profile_loaded) {
@@ -2831,6 +3035,7 @@ static int qg_determine_pon_soc(struct qpnp_qg *chip)
 	use_pon_ocv = false;
 	ocv_uv = shutdown[SDAM_OCV_UV];
 	soc = shutdown[SDAM_SOC];
+	soc_raw = shutdown[SDAM_SOC] * 100;
 	strlcpy(ocv_type, "SHUTDOWN_SOC", 20);
 	qg_dbg(chip, QG_DEBUG_PON, "Using SHUTDOWN_SOC @ PON\n");
 
@@ -2898,7 +3103,9 @@ static int qg_determine_pon_soc(struct qpnp_qg *chip)
 			soc = DIV_ROUND_UP(((pon_soc - cutoff_soc) * 100),
 						(full_soc - cutoff_soc));
 			soc = CAP(0, 100, soc);
+			soc_raw = soc * 100;
 		} else {
+			soc_raw = pon_soc * 100;
 			soc = pon_soc;
 		}
 
@@ -2912,6 +3119,7 @@ static int qg_determine_pon_soc(struct qpnp_qg *chip)
 		return rc;
 	}
 
+	chip->cc_soc = chip->sys_soc = soc_raw;
 	chip->last_adj_ssoc = chip->catch_up_soc = chip->msoc = soc;
 	chip->kdata.param[QG_PON_OCV_UV].data = ocv_uv;
 	chip->kdata.param[QG_PON_OCV_UV].valid = true;
@@ -3028,6 +3236,8 @@ static int qg_hw_init(struct qpnp_qg *chip)
 		}
 	}
 
+	chip->s2_state = S2_DEFAULT;
+	chip->s2_state_mask |= S2_DEFAULT;
 	/* signal the read thread */
 	chip->data_ready = true;
 	wake_up_interruptible(&chip->qg_wait_q);
@@ -3038,7 +3248,7 @@ static int qg_hw_init(struct qpnp_qg *chip)
 		pr_err("Failed to release master, rc=%d\n", rc);
 		return rc;
 	}
-	chip->last_fifo_update_time = ktime_get();
+	chip->last_fifo_update_time = ktime_get_boottime();
 
 	if (chip->dt.ocv_timer_expiry_min != -EINVAL) {
 		if (chip->dt.ocv_timer_expiry_min < 2)
@@ -3378,19 +3588,94 @@ static void qg_create_debugfs(struct qpnp_qg *chip)
 }
 #endif
 
-#define DEFAULT_VBATT_EMPTY_MV		3200
-#define DEFAULT_VBATT_EMPTY_COLD_MV	3000
-#define DEFAULT_VBATT_CUTOFF_MV		3400
-#define DEFAULT_VBATT_LOW_MV		3500
-#define DEFAULT_VBATT_LOW_COLD_MV	3800
-#define DEFAULT_ITERM_MA		100
 #define DEFAULT_S2_FIFO_LENGTH		5
 #define DEFAULT_S2_VBAT_LOW_LENGTH	2
 #define DEFAULT_S2_ACC_LENGTH		128
 #define DEFAULT_S2_ACC_INTVL_MS		100
-#define DEFAULT_DELTA_SOC		1
-#define DEFAULT_SHUTDOWN_SOC_SECS	360
-#define DEFAULT_COLD_TEMP_THRESHOLD	0
+#define DEFAULT_SLEEP_S2_FIFO_LENGTH	8
+#define DEFAULT_SLEEP_S2_ACC_LENGTH	256
+#define DEFAULT_SLEEP_S2_ACC_INTVL_MS	200
+#define DEFAULT_FAST_CHG_S2_FIFO_LENGTH	1
+static int qg_parse_s2_dt(struct qpnp_qg *chip)
+{
+	int rc;
+	struct device_node *node = chip->dev->of_node;
+	u32 temp;
+
+	/* S2 state params */
+	rc = of_property_read_u32(node, "qcom,s2-fifo-length", &temp);
+	if (rc < 0)
+		chip->dt.s2_fifo_length = DEFAULT_S2_FIFO_LENGTH;
+	else
+		chip->dt.s2_fifo_length = temp;
+
+	rc = of_property_read_u32(node, "qcom,s2-vbat-low-fifo-length", &temp);
+	if (rc < 0)
+		chip->dt.s2_vbat_low_fifo_length = DEFAULT_S2_VBAT_LOW_LENGTH;
+	else
+		chip->dt.s2_vbat_low_fifo_length = temp;
+
+	rc = of_property_read_u32(node, "qcom,s2-acc-length", &temp);
+	if (rc < 0)
+		chip->dt.s2_acc_length = DEFAULT_S2_ACC_LENGTH;
+	else
+		chip->dt.s2_acc_length = temp;
+
+	rc = of_property_read_u32(node, "qcom,s2-acc-interval-ms", &temp);
+	if (rc < 0)
+		chip->dt.s2_acc_intvl_ms = DEFAULT_S2_ACC_INTVL_MS;
+	else
+		chip->dt.s2_acc_intvl_ms = temp;
+
+	qg_dbg(chip, QG_DEBUG_PON, "DT: S2 FIFO length=%d low_vbat_length=%d acc_length=%d acc_interval=%d\n",
+		chip->dt.s2_fifo_length, chip->dt.s2_vbat_low_fifo_length,
+		chip->dt.s2_acc_length, chip->dt.s2_acc_intvl_ms);
+
+	if (of_property_read_bool(node, "qcom,qg-sleep-config")) {
+
+		chip->dt.qg_sleep_config = true;
+
+		rc = of_property_read_u32(node,
+				"qcom,sleep-s2-fifo-length", &temp);
+		if (rc < 0)
+			chip->dt.sleep_s2_fifo_length =
+					DEFAULT_SLEEP_S2_FIFO_LENGTH;
+		else
+			chip->dt.sleep_s2_fifo_length = temp;
+
+		rc = of_property_read_u32(node,
+				"qcom,sleep-s2-acc-length", &temp);
+		if (rc < 0)
+			chip->dt.sleep_s2_acc_length =
+					DEFAULT_SLEEP_S2_ACC_LENGTH;
+		else
+			chip->dt.sleep_s2_acc_length = temp;
+
+		rc = of_property_read_u32(node,
+				"qcom,sleep-s2-acc-intvl-ms", &temp);
+		if (rc < 0)
+			chip->dt.sleep_s2_acc_intvl_ms =
+					DEFAULT_SLEEP_S2_ACC_INTVL_MS;
+		else
+			chip->dt.sleep_s2_acc_intvl_ms = temp;
+	}
+
+	if (of_property_read_bool(node, "qcom,qg-fast-chg-config")) {
+
+		chip->dt.qg_fast_chg_cfg = true;
+
+		rc = of_property_read_u32(node,
+				"qcom,fast-chg-s2-fifo-length", &temp);
+		if (rc < 0)
+			chip->dt.fast_chg_s2_fifo_length =
+					DEFAULT_FAST_CHG_S2_FIFO_LENGTH;
+		else
+			chip->dt.fast_chg_s2_fifo_length = temp;
+	}
+
+	return 0;
+}
+
 #define DEFAULT_CL_MIN_START_SOC	10
 #define DEFAULT_CL_MAX_START_SOC	15
 #define DEFAULT_CL_MIN_TEMP_DECIDEGC	150
@@ -3400,11 +3685,100 @@ static void qg_create_debugfs(struct qpnp_qg *chip)
 #define DEFAULT_CL_MIN_LIM_DECIPERC	500
 #define DEFAULT_CL_MAX_LIM_DECIPERC	100
 #define DEFAULT_CL_DELTA_BATT_SOC	10
+static int qg_parse_cl_dt(struct qpnp_qg *chip)
+{
+	int rc;
+	struct device_node *node = chip->dev->of_node;
+	u32 temp;
+
+	if (chip->dt.cl_disable)
+		return 0;
+
+	chip->dt.cl_feedback_on = of_property_read_bool(node,
+					"qcom,cl-feedback-on");
+
+	rc = of_property_read_u32(node, "qcom,cl-min-start-soc", &temp);
+	if (rc < 0)
+		chip->cl->dt.min_start_soc = DEFAULT_CL_MIN_START_SOC;
+	else
+		chip->cl->dt.min_start_soc = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-start-soc", &temp);
+	if (rc < 0)
+		chip->cl->dt.max_start_soc = DEFAULT_CL_MAX_START_SOC;
+	else
+		chip->cl->dt.max_start_soc = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-min-temp", &temp);
+	if (rc < 0)
+		chip->cl->dt.min_temp = DEFAULT_CL_MIN_TEMP_DECIDEGC;
+	else
+		chip->cl->dt.min_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-temp", &temp);
+	if (rc < 0)
+		chip->cl->dt.max_temp = DEFAULT_CL_MAX_TEMP_DECIDEGC;
+	else
+		chip->cl->dt.max_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-increment", &temp);
+	if (rc < 0)
+		chip->cl->dt.max_cap_inc = DEFAULT_CL_MAX_INC_DECIPERC;
+	else
+		chip->cl->dt.max_cap_inc = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-decrement", &temp);
+	if (rc < 0)
+		chip->cl->dt.max_cap_dec = DEFAULT_CL_MAX_DEC_DECIPERC;
+	else
+		chip->cl->dt.max_cap_dec = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-min-limit", &temp);
+	if (rc < 0)
+		chip->cl->dt.min_cap_limit =
+					DEFAULT_CL_MIN_LIM_DECIPERC;
+	else
+		chip->cl->dt.min_cap_limit = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-limit", &temp);
+	if (rc < 0)
+		chip->cl->dt.max_cap_limit =
+					DEFAULT_CL_MAX_LIM_DECIPERC;
+	else
+		chip->cl->dt.max_cap_limit = temp;
+
+	chip->cl->dt.min_delta_batt_soc = DEFAULT_CL_DELTA_BATT_SOC;
+	/* read from DT property and update, if value exists */
+	of_property_read_u32(node, "qcom,cl-min-delta-batt-soc",
+				&chip->cl->dt.min_delta_batt_soc);
+
+	chip->cl->dt.cl_wt_enable = of_property_read_bool(node,
+						"qcom,cl-wt-enable");
+
+	qg_dbg(chip, QG_DEBUG_PON, "DT: cl_min_start_soc=%d cl_max_start_soc=%d cl_min_temp=%d cl_max_temp=%d chip->cl->dt.cl_wt_enable=%d\n",
+		chip->cl->dt.min_start_soc, chip->cl->dt.max_start_soc,
+		chip->cl->dt.min_temp, chip->cl->dt.max_temp,
+		chip->cl->dt.cl_wt_enable);
+
+	return 0;
+}
+
+#define DEFAULT_VBATT_EMPTY_MV		3200
+#define DEFAULT_VBATT_EMPTY_COLD_MV	3000
+#define DEFAULT_VBATT_CUTOFF_MV		3400
+#define DEFAULT_VBATT_LOW_MV		3500
+#define DEFAULT_VBATT_LOW_COLD_MV	3800
+#define DEFAULT_ITERM_MA		100
+#define DEFAULT_DELTA_SOC		1
+#define DEFAULT_SHUTDOWN_SOC_SECS	360
+#define DEFAULT_COLD_TEMP_THRESHOLD	0
 #define DEFAULT_SHUTDOWN_TEMP_DIFF	60	/* 6 degC */
 #define DEFAULT_ESR_QUAL_CURRENT_UA	130000
 #define DEFAULT_ESR_QUAL_VBAT_UV	7000
 #define DEFAULT_ESR_DISABLE_SOC		1000
 #define ESR_CHG_MIN_IBAT_UA		(-450000)
+#define DEFAULT_SLEEP_TIME_SECS		1800 /* 30 mins */
+#define DEFAULT_SYS_MIN_VOLT_MV		2800
 static int qg_parse_dt(struct qpnp_qg *chip)
 {
 	int rc = 0;
@@ -3466,34 +3840,13 @@ static int qg_parse_dt(struct qpnp_qg *chip)
 		return -EINVAL;
 	}
 
-	/* S2 state params */
-	rc = of_property_read_u32(node, "qcom,s2-fifo-length", &temp);
+	rc = qg_parse_s2_dt(chip);
 	if (rc < 0)
-		chip->dt.s2_fifo_length = DEFAULT_S2_FIFO_LENGTH;
-	else
-		chip->dt.s2_fifo_length = temp;
+		pr_err("Failed to parse S2 DT params rc=%d\n", rc);
 
-	rc = of_property_read_u32(node, "qcom,s2-vbat-low-fifo-length", &temp);
+	rc = qg_parse_cl_dt(chip);
 	if (rc < 0)
-		chip->dt.s2_vbat_low_fifo_length = DEFAULT_S2_VBAT_LOW_LENGTH;
-	else
-		chip->dt.s2_vbat_low_fifo_length = temp;
-
-	rc = of_property_read_u32(node, "qcom,s2-acc-length", &temp);
-	if (rc < 0)
-		chip->dt.s2_acc_length = DEFAULT_S2_ACC_LENGTH;
-	else
-		chip->dt.s2_acc_length = temp;
-
-	rc = of_property_read_u32(node, "qcom,s2-acc-interval-ms", &temp);
-	if (rc < 0)
-		chip->dt.s2_acc_intvl_ms = DEFAULT_S2_ACC_INTVL_MS;
-	else
-		chip->dt.s2_acc_intvl_ms = temp;
-
-	qg_dbg(chip, QG_DEBUG_PON, "DT: S2 FIFO length=%d low_vbat_length=%d acc_length=%d acc_interval=%d\n",
-		chip->dt.s2_fifo_length, chip->dt.s2_vbat_low_fifo_length,
-		chip->dt.s2_acc_length, chip->dt.s2_acc_intvl_ms);
+		pr_err("Failed to parse CL parameters rc=%d\n", rc);
 
 	/* OCV params */
 	rc = of_property_read_u32(node, "qcom,ocv-timer-expiry-min", &temp);
@@ -3641,77 +3994,22 @@ static int qg_parse_dt(struct qpnp_qg *chip)
 	else
 		chip->dt.shutdown_soc_threshold = temp;
 
+	rc = of_property_read_u32(node, "qcom,qg-sys-min-voltage", &temp);
+	if (rc < 0)
+		chip->dt.sys_min_volt_mv = DEFAULT_SYS_MIN_VOLT_MV;
+	else
+		chip->dt.sys_min_volt_mv = temp;
+
 	chip->dt.qg_ext_sense = of_property_read_bool(node, "qcom,qg-ext-sns");
 
 	chip->dt.use_s7_ocv = of_property_read_bool(node, "qcom,qg-use-s7-ocv");
 
-	/* Capacity learning params*/
-	if (!chip->dt.cl_disable) {
-		chip->dt.cl_feedback_on = of_property_read_bool(node,
-						"qcom,cl-feedback-on");
+	rc = of_property_read_u32(node, "qcom,min-sleep-time-secs", &temp);
+	if (rc < 0)
+		chip->dt.min_sleep_time_secs = DEFAULT_SLEEP_TIME_SECS;
+	else
+		chip->dt.min_sleep_time_secs = temp;
 
-		rc = of_property_read_u32(node, "qcom,cl-min-start-soc", &temp);
-		if (rc < 0)
-			chip->cl->dt.min_start_soc = DEFAULT_CL_MIN_START_SOC;
-		else
-			chip->cl->dt.min_start_soc = temp;
-
-		rc = of_property_read_u32(node, "qcom,cl-max-start-soc", &temp);
-		if (rc < 0)
-			chip->cl->dt.max_start_soc = DEFAULT_CL_MAX_START_SOC;
-		else
-			chip->cl->dt.max_start_soc = temp;
-
-		rc = of_property_read_u32(node, "qcom,cl-min-temp", &temp);
-		if (rc < 0)
-			chip->cl->dt.min_temp = DEFAULT_CL_MIN_TEMP_DECIDEGC;
-		else
-			chip->cl->dt.min_temp = temp;
-
-		rc = of_property_read_u32(node, "qcom,cl-max-temp", &temp);
-		if (rc < 0)
-			chip->cl->dt.max_temp = DEFAULT_CL_MAX_TEMP_DECIDEGC;
-		else
-			chip->cl->dt.max_temp = temp;
-
-		rc = of_property_read_u32(node, "qcom,cl-max-increment", &temp);
-		if (rc < 0)
-			chip->cl->dt.max_cap_inc = DEFAULT_CL_MAX_INC_DECIPERC;
-		else
-			chip->cl->dt.max_cap_inc = temp;
-
-		rc = of_property_read_u32(node, "qcom,cl-max-decrement", &temp);
-		if (rc < 0)
-			chip->cl->dt.max_cap_dec = DEFAULT_CL_MAX_DEC_DECIPERC;
-		else
-			chip->cl->dt.max_cap_dec = temp;
-
-		rc = of_property_read_u32(node, "qcom,cl-min-limit", &temp);
-		if (rc < 0)
-			chip->cl->dt.min_cap_limit =
-						DEFAULT_CL_MIN_LIM_DECIPERC;
-		else
-			chip->cl->dt.min_cap_limit = temp;
-
-		rc = of_property_read_u32(node, "qcom,cl-max-limit", &temp);
-		if (rc < 0)
-			chip->cl->dt.max_cap_limit =
-						DEFAULT_CL_MAX_LIM_DECIPERC;
-		else
-			chip->cl->dt.max_cap_limit = temp;
-
-		chip->cl->dt.min_delta_batt_soc = DEFAULT_CL_DELTA_BATT_SOC;
-		/* read from DT property and update, if value exists */
-		of_property_read_u32(node, "qcom,cl-min-delta-batt-soc",
-					&chip->cl->dt.min_delta_batt_soc);
-
-		chip->cl->dt.cl_wt_enable = of_property_read_bool(node,
-							"qcom,cl-wt-enable");
-
-		qg_dbg(chip, QG_DEBUG_PON, "DT: cl_min_start_soc=%d cl_max_start_soc=%d cl_min_temp=%d cl_max_temp=%d\n",
-			chip->cl->dt.min_start_soc, chip->cl->dt.max_start_soc,
-			chip->cl->dt.min_temp, chip->cl->dt.max_temp);
-	}
 	qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d ext-sns=%d\n",
 			chip->dt.vbatt_empty_mv, chip->dt.vbatt_low_mv,
 			chip->dt.delta_soc, chip->dt.qg_ext_sense);
@@ -3730,6 +4028,7 @@ static int process_suspend(struct qpnp_qg *chip)
 		return 0;
 
 	cancel_delayed_work_sync(&chip->ttf->ttf_work);
+	cancel_delayed_work_sync(&chip->qg_sleep_exit_work);
 
 	chip->suspend_data = false;
 
@@ -3738,6 +4037,13 @@ static int process_suspend(struct qpnp_qg *chip)
 
 	/* ignore any suspend processing if we are charging */
 	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+		/* Reset the sleep config if we are charging */
+		if (chip->dt.qg_sleep_config) {
+			qg_dbg(chip, QG_DEBUG_STATUS, "Suspend: Charging - Exit S2_SLEEP\n");
+			rc = qg_config_s2_state(chip, S2_SLEEP, false, true);
+			if (rc < 0)
+				pr_err("Failed to exit S2-sleep rc=%d\n", rc);
+		}
 		qg_dbg(chip, QG_DEBUG_PM, "Charging @ suspend - ignore processing\n");
 		return 0;
 	}
@@ -3755,12 +4061,23 @@ static int process_suspend(struct qpnp_qg *chip)
 		return rc;
 	}
 	sleep_fifo_length &= SLEEP_IBAT_QUALIFIED_LENGTH_MASK;
-	/*
-	 * If the real-time FIFO count is greater than
-	 * the the #fifo to enter sleep, save the FIFO data
-	 * and reset the fifo count.
-	 */
-	if (fifo_rt_length >= (chip->dt.s2_fifo_length - sleep_fifo_length)) {
+
+	if (chip->dt.qg_sleep_config) {
+		qg_dbg(chip, QG_DEBUG_STATUS, "Suspend: Forcing S2_SLEEP\n");
+		rc = qg_config_s2_state(chip, S2_SLEEP, true, true);
+		if (rc < 0)
+			pr_err("Failed to config S2_SLEEP rc=%d\n", rc);
+		if (chip->kdata.fifo_length > 0)
+			chip->suspend_data = true;
+	} else if (fifo_rt_length >=
+			(chip->dt.s2_fifo_length - sleep_fifo_length)) {
+		/*
+		 * If the real-time FIFO count is greater than
+		 * the the #fifo to enter sleep, save the FIFO data
+		 * and reset the fifo count. This is avoid a gauranteed wakeup
+		 * due to fifo_done event as the curent FIFO length is already
+		 * beyond the sleep length.
+		 */
 		rc = qg_master_hold(chip, true);
 		if (rc < 0) {
 			pr_err("Failed to hold master, rc=%d\n", rc);
@@ -3780,29 +4097,40 @@ static int process_suspend(struct qpnp_qg *chip)
 			return rc;
 		}
 		/* FIFOs restarted */
-		chip->last_fifo_update_time = ktime_get();
+		chip->last_fifo_update_time = ktime_get_boottime();
 
 		chip->suspend_data = true;
 	}
 
-	qg_dbg(chip, QG_DEBUG_PM, "FIFO rt_length=%d sleep_fifo_length=%d default_s2_count=%d suspend_data=%d\n",
+	get_rtc_time(&chip->suspend_time);
+
+	qg_dbg(chip, QG_DEBUG_PM, "FIFO rt_length=%d sleep_fifo_length=%d default_s2_count=%d suspend_data=%d time=%d\n",
 			fifo_rt_length, sleep_fifo_length,
-			chip->dt.s2_fifo_length, chip->suspend_data);
+			chip->dt.s2_fifo_length, chip->suspend_data,
+			chip->suspend_time);
 
 	return rc;
 }
 
+#define QG_SLEEP_EXIT_TIME_MS		15000 /* 15 secs */
 static int process_resume(struct qpnp_qg *chip)
 {
 	u8 status2 = 0, rt_status = 0;
 	u32 ocv_uv = 0, ocv_raw = 0;
 	int rc;
-	unsigned long rtc_sec = 0;
+	unsigned long rtc_sec = 0, sleep_time_secs = 0;
 
 	/* skip if profile is not loaded */
 	if (!chip->profile_loaded)
 		return 0;
 
+	get_rtc_time(&rtc_sec);
+	sleep_time_secs = rtc_sec - chip->suspend_time;
+
+	if (chip->dt.qg_sleep_config)
+		schedule_delayed_work(&chip->qg_sleep_exit_work,
+				msecs_to_jiffies(QG_SLEEP_EXIT_TIME_MS));
+
 	rc = qg_read(chip, chip->qg_base + QG_STATUS2_REG, &status2, 1);
 	if (rc < 0) {
 		pr_err("Failed to read status2 register, rc=%d\n", rc);
@@ -3818,12 +4146,15 @@ static int process_resume(struct qpnp_qg *chip)
 
 		 /* Clear suspend data as there has been a GOOD OCV */
 		memset(&chip->kdata, 0, sizeof(chip->kdata));
-		get_rtc_time(&rtc_sec);
 		chip->kdata.fifo_time = (u32)rtc_sec;
 		chip->kdata.param[QG_GOOD_OCV_UV].data = ocv_uv;
 		chip->kdata.param[QG_GOOD_OCV_UV].valid = true;
 		chip->suspend_data = false;
 
+		/* allow SOC jump if we have slept longer */
+		if (sleep_time_secs >= chip->dt.min_sleep_time_secs)
+			chip->force_soc = true;
+
 		qg_dbg(chip, QG_DEBUG_PM, "GOOD OCV @ resume good_ocv=%d uV\n",
 				ocv_uv);
 	}
@@ -3836,9 +4167,10 @@ static int process_resume(struct qpnp_qg *chip)
 	}
 	rt_status &= FIFO_UPDATE_DONE_INT_LAT_STS_BIT;
 
-	qg_dbg(chip, QG_DEBUG_PM, "FIFO_DONE_STS=%d suspend_data=%d good_ocv=%d\n",
+	qg_dbg(chip, QG_DEBUG_PM, "FIFO_DONE_STS=%d suspend_data=%d good_ocv=%d sleep_time=%d secs\n",
 				!!rt_status, chip->suspend_data,
-				chip->kdata.param[QG_GOOD_OCV_UV].valid);
+				chip->kdata.param[QG_GOOD_OCV_UV].valid,
+				sleep_time_secs);
 	/*
 	 * If this is not a wakeup from FIFO-done,
 	 * process the data immediately if - we have data from
@@ -3966,6 +4298,7 @@ static int qpnp_qg_probe(struct platform_device *pdev)
 	platform_set_drvdata(pdev, chip);
 	INIT_WORK(&chip->udata_work, process_udata_work);
 	INIT_WORK(&chip->qg_status_change_work, qg_status_change_work);
+	INIT_DELAYED_WORK(&chip->qg_sleep_exit_work, qg_sleep_exit_work);
 	mutex_init(&chip->bus_lock);
 	mutex_init(&chip->soc_lock);
 	mutex_init(&chip->data_lock);
@@ -4134,6 +4467,7 @@ static int qpnp_qg_remove(struct platform_device *pdev)
 	qg_batterydata_exit();
 	qg_soc_exit(chip);
 
+	cancel_delayed_work_sync(&chip->qg_sleep_exit_work);
 	cancel_work_sync(&chip->udata_work);
 	cancel_work_sync(&chip->qg_status_change_work);
 	sysfs_remove_groups(&chip->dev->kobj, qg_groups);
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index c69c47b..1bf6027 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -153,7 +153,7 @@ static struct smb_params smb5_pm8150b_params = {
 		.name   = "DC input current limit",
 		.reg    = DCDC_CFG_REF_MAX_PSNS_REG,
 		.min_u  = 0,
-		.max_u  = 1500000,
+		.max_u  = DCIN_ICL_MAX_UA,
 		.step_u = 50000,
 	},
 	.jeita_cc_comp_hot	= {
@@ -292,6 +292,11 @@ enum {
 	SMB_THERM,
 };
 
+static const struct clamp_config clamp_levels[] = {
+	{ {0x11C6, 0x11F9, 0x13F1}, {0x60, 0x2E, 0x90} },
+	{ {0x11C6, 0x11F9, 0x13F1}, {0x60, 0x2B, 0x9C} },
+};
+
 #define PMI632_MAX_ICL_UA	3000000
 #define PM6150_MAX_FCC_UA	3000000
 static int smb5_chg_config_init(struct smb5 *chip)
@@ -608,7 +613,7 @@ static int smb5_parse_dt_adc_channels(struct smb_charger *chg)
 
 static int smb5_parse_dt_currents(struct smb5 *chip, struct device_node *node)
 {
-	int rc = 0;
+	int rc = 0, tmp;
 	struct smb_charger *chg = &chip->chg;
 
 	rc = of_property_read_u32(node,
@@ -640,6 +645,12 @@ static int smb5_parse_dt_currents(struct smb5 *chip, struct device_node *node)
 	rc = of_property_read_u32(node, "qcom,chg-term-current-ma",
 			&chip->dt.term_current_thresh_hi_ma);
 
+	chg->wls_icl_ua = DCIN_ICL_MAX_UA;
+	rc = of_property_read_u32(node, "qcom,wls-current-max-ua",
+			&tmp);
+	if (!rc && tmp < DCIN_ICL_MAX_UA)
+		chg->wls_icl_ua = tmp;
+
 	return 0;
 }
 
@@ -701,6 +712,33 @@ static int smb5_parse_dt(struct smb5 *chip)
 	return 0;
 }
 
+static int smb5_set_prop_comp_clamp_level(struct smb_charger *chg,
+			     const union power_supply_propval *val)
+{
+	int rc = 0, i;
+	struct clamp_config clamp_config;
+	enum comp_clamp_levels level;
+
+	level = val->intval;
+	if (level >= MAX_CLAMP_LEVEL) {
+		pr_err("Invalid comp clamp level=%d\n", val->intval);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(clamp_config.reg); i++) {
+		rc = smblib_write(chg, clamp_levels[level].reg[i],
+			     clamp_levels[level].val[i]);
+		if (rc < 0)
+			dev_err(chg->dev,
+				"Failed to configure comp clamp settings for reg=0x%04x rc=%d\n",
+				   clamp_levels[level].reg[i], rc);
+	}
+
+	chg->comp_clamp_level = val->intval;
+
+	return rc;
+}
+
 /************************
  * USB PSY REGISTRATION *
  ************************/
@@ -732,6 +770,7 @@ static enum power_supply_property smb5_usb_props[] = {
 	POWER_SUPPLY_PROP_VOLTAGE_MAX_LIMIT,
 	POWER_SUPPLY_PROP_SMB_EN_MODE,
 	POWER_SUPPLY_PROP_SMB_EN_REASON,
+	POWER_SUPPLY_PROP_ADAPTER_CC_MODE,
 	POWER_SUPPLY_PROP_SCOPE,
 	POWER_SUPPLY_PROP_MOISTURE_DETECTED,
 	POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED,
@@ -875,6 +914,9 @@ static int smb5_usb_get_prop(struct power_supply *psy,
 		val->intval = get_client_vote(chg->usb_icl_votable,
 					THERMAL_THROTTLE_VOTER);
 		break;
+	case POWER_SUPPLY_PROP_ADAPTER_CC_MODE:
+		val->intval = chg->adapter_cc_mode;
+		break;
 	default:
 		pr_err("get prop %d is not supported in usb\n", psp);
 		rc = -EINVAL;
@@ -961,6 +1003,9 @@ static int smb5_usb_set_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX_LIMIT:
 		smblib_set_prop_usb_voltage_max_limit(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_ADAPTER_CC_MODE:
+		chg->adapter_cc_mode = val->intval;
+		break;
 	default:
 		pr_err("set prop %d is not supported\n", psp);
 		rc = -EINVAL;
@@ -978,6 +1023,7 @@ static int smb5_usb_prop_is_writeable(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
 	case POWER_SUPPLY_PROP_THERM_ICL_LIMIT:
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX_LIMIT:
+	case POWER_SUPPLY_PROP_ADAPTER_CC_MODE:
 		return 1;
 	default:
 		break;
@@ -1128,6 +1174,10 @@ static enum power_supply_property smb5_usb_main_props[] = {
 	POWER_SUPPLY_PROP_FLASH_TRIGGER,
 	POWER_SUPPLY_PROP_TOGGLE_STAT,
 	POWER_SUPPLY_PROP_MAIN_FCC_MAX,
+	POWER_SUPPLY_PROP_IRQ_STATUS,
+	POWER_SUPPLY_PROP_FORCE_MAIN_FCC,
+	POWER_SUPPLY_PROP_FORCE_MAIN_ICL,
+	POWER_SUPPLY_PROP_COMP_CLAMP_LEVEL,
 };
 
 static int smb5_usb_main_get_prop(struct power_supply *psy,
@@ -1173,6 +1223,20 @@ static int smb5_usb_main_get_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_MAIN_FCC_MAX:
 		val->intval = chg->main_fcc_max;
 		break;
+	case POWER_SUPPLY_PROP_IRQ_STATUS:
+		rc = smblib_get_irq_status(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_FORCE_MAIN_FCC:
+		rc = smblib_get_charge_param(chg, &chg->param.fcc,
+							&val->intval);
+		break;
+	case POWER_SUPPLY_PROP_FORCE_MAIN_ICL:
+		rc = smblib_get_charge_param(chg, &chg->param.usb_icl,
+							&val->intval);
+		break;
+	case POWER_SUPPLY_PROP_COMP_CLAMP_LEVEL:
+		val->intval = chg->comp_clamp_level;
+		break;
 	default:
 		pr_debug("get prop %d is not supported in usb-main\n", psp);
 		rc = -EINVAL;
@@ -1243,6 +1307,17 @@ static int smb5_usb_main_set_prop(struct power_supply *psy,
 		chg->main_fcc_max = val->intval;
 		rerun_election(chg->fcc_votable);
 		break;
+	case POWER_SUPPLY_PROP_FORCE_MAIN_FCC:
+		vote_override(chg->fcc_main_votable, CC_MODE_VOTER,
+				(val->intval < 0) ? false : true, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_FORCE_MAIN_ICL:
+		vote_override(chg->usb_icl_votable, CC_MODE_VOTER,
+				(val->intval < 0) ? false : true, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_COMP_CLAMP_LEVEL:
+		rc = smb5_set_prop_comp_clamp_level(chg, val);
+		break;
 	default:
 		pr_err("set prop %d is not supported\n", psp);
 		rc = -EINVAL;
@@ -1260,6 +1335,9 @@ static int smb5_usb_main_prop_is_writeable(struct power_supply *psy,
 	switch (psp) {
 	case POWER_SUPPLY_PROP_TOGGLE_STAT:
 	case POWER_SUPPLY_PROP_MAIN_FCC_MAX:
+	case POWER_SUPPLY_PROP_FORCE_MAIN_FCC:
+	case POWER_SUPPLY_PROP_FORCE_MAIN_ICL:
+	case POWER_SUPPLY_PROP_COMP_CLAMP_LEVEL:
 		rc = 1;
 		break;
 	default:
@@ -1346,6 +1424,7 @@ static int smb5_dc_get_prop(struct power_supply *psy,
 		break;
 	case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
 		rc = smblib_get_prop_voltage_wls_output(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_DC_RESET:
 		val->intval = 0;
 		break;
@@ -1391,15 +1470,14 @@ static int smb5_dc_set_prop(struct power_supply *psy,
 static int smb5_dc_prop_is_writeable(struct power_supply *psy,
 		enum power_supply_property psp)
 {
-	int rc;
-
 	switch (psp) {
+	case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
+		return 1;
 	default:
-		rc = 0;
 		break;
 	}
 
-	return rc;
+	return 0;
 }
 
 static const struct power_supply_desc dc_psy_desc = {
@@ -1449,6 +1527,7 @@ static enum power_supply_property smb5_batt_props[] = {
 	POWER_SUPPLY_PROP_CURRENT_NOW,
 	POWER_SUPPLY_PROP_CURRENT_QNOVO,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
 	POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -1540,6 +1619,9 @@ static int smb5_batt_get_prop(struct power_supply *psy,
 		val->intval = get_client_vote(chg->fcc_votable,
 					      BATT_PROFILE_VOTER);
 		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+		val->intval = get_effective_result(chg->fcc_votable);
+		break;
 	case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
 		rc = smblib_get_prop_batt_iterm(chg, val);
 		break;
@@ -2090,13 +2172,15 @@ static int smb5_configure_iterm_thresholds(struct smb5 *chip)
 
 	switch (chip->dt.term_current_src) {
 	case ITERM_SRC_ADC:
-		rc = smblib_masked_write(chg, CHGR_ADC_TERM_CFG_REG,
-				TERM_BASED_ON_SYNC_CONV_OR_SAMPLE_CNT,
-				TERM_BASED_ON_SAMPLE_CNT);
-		if (rc < 0) {
-			dev_err(chg->dev, "Couldn't configure ADC_ITERM_CFG rc=%d\n",
-					rc);
-			return rc;
+		if (chip->chg.smb_version == PM8150B_SUBTYPE) {
+			rc = smblib_masked_write(chg, CHGR_ADC_TERM_CFG_REG,
+					TERM_BASED_ON_SYNC_CONV_OR_SAMPLE_CNT,
+					TERM_BASED_ON_SAMPLE_CNT);
+			if (rc < 0) {
+				dev_err(chg->dev, "Couldn't configure ADC_ITERM_CFG rc=%d\n",
+						rc);
+				return rc;
+			}
 		}
 		rc = smb5_configure_iterm_thresholds_adc(chip);
 		break;
@@ -2163,8 +2247,8 @@ static int smb5_init_dc_peripheral(struct smb_charger *chg)
 	if (chg->smb_version == PMI632_SUBTYPE)
 		return 0;
 
-	/* set DC icl_max 1A */
-	rc = smblib_set_charge_param(chg, &chg->param.dc_icl, 1000000);
+	/* Set DCIN ICL to 100 mA */
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl, DCIN_ICL_MIN_UA);
 	if (rc < 0) {
 		dev_err(chg->dev, "Couldn't set dc_icl rc=%d\n", rc);
 		return rc;
@@ -2827,7 +2911,8 @@ static struct smb_irq_info smb5_irqs[] = {
 	},
 	[DCIN_UV_IRQ] = {
 		.name		= "dcin-uv",
-		.handler	= default_irq_handler,
+		.handler	= dcin_uv_irq_handler,
+		.wake		= true,
 	},
 	[DCIN_OV_IRQ] = {
 		.name		= "dcin-ov",
@@ -2946,6 +3031,11 @@ static struct smb_irq_info smb5_irqs[] = {
 	[FLASH_EN_IRQ] = {
 		.name		= "flash-en",
 	},
+	/* SDAM */
+	[SDAM_STS_IRQ] = {
+		.name		= "sdam-sts",
+		.handler	= sdam_sts_change_irq_handler,
+	},
 };
 
 static int smb5_get_irq_index_byname(const char *irq_name)
diff --git a/drivers/power/supply/qcom/smb1390-charger-psy.c b/drivers/power/supply/qcom/smb1390-charger-psy.c
index 771d0b3..bf913cd 100644
--- a/drivers/power/supply/qcom/smb1390-charger-psy.c
+++ b/drivers/power/supply/qcom/smb1390-charger-psy.c
@@ -19,6 +19,8 @@
 #include <linux/regmap.h>
 #include <linux/iio/consumer.h>
 
+#define MISC_CSIR_LSB_REG		0x9F1
+#define MISC_CSIR_MSB_REG		0x9F2
 #define CORE_STATUS1_REG		0x1006
 #define WIN_OV_BIT			BIT(0)
 #define WIN_UV_BIT			BIT(1)
@@ -75,6 +77,9 @@
 #define WINDOW_DETECTION_DELTA_X1P0	0
 #define WINDOW_DETECTION_DELTA_X1P5	1
 
+#define CORE_FTRIM_DIS_REG		0x1035
+#define TR_DIS_ILIM_DET_BIT		BIT(4)
+
 #define CORE_ATEST1_SEL_REG		0x10E2
 #define ATEST1_OUTPUT_ENABLE_BIT	BIT(7)
 #define ATEST1_SEL_MASK			GENMASK(6, 0)
@@ -90,8 +95,16 @@
 #define SRC_VOTER		"SRC_VOTER"
 #define SWITCHER_TOGGLE_VOTER	"SWITCHER_TOGGLE_VOTER"
 #define SOC_LEVEL_VOTER		"SOC_LEVEL_VOTER"
+#define HW_DISABLE_VOTER	"HW_DISABLE_VOTER"
+#define CC_MODE_VOTER		"CC_MODE_VOTER"
 
+#define CP_MASTER		0
+#define CP_SLAVE		1
 #define THERMAL_SUSPEND_DECIDEGC	1400
+#define MAX_ILIM_UA			3200000
+#define MAX_ILIM_DUAL_CP_UA		6400000
+#define CC_MODE_TAPER_DELTA_UA		200000
+#define DEFAULT_TAPER_DELTA_UA		100000
 
 #define smb1390_dbg(chip, reason, fmt, ...)				\
 	do {								\
@@ -115,6 +128,12 @@ enum {
 	NUM_IRQS,
 };
 
+enum isns_mode {
+	ISNS_MODE_OFF = 0,
+	ISNS_MODE_ACTIVE,
+	ISNS_MODE_STANDBY,
+};
+
 enum {
 	SWITCHER_EN = 0,
 	SMB_PIN_EN,
@@ -153,8 +172,10 @@ struct smb1390 {
 	struct votable		*ilim_votable;
 	struct votable		*fcc_votable;
 	struct votable		*fv_votable;
+	struct votable		*cp_awake_votable;
 
 	/* power supplies */
+	struct power_supply	*cps_psy;
 	struct power_supply	*usb_psy;
 	struct power_supply	*batt_psy;
 	struct power_supply	*dc_psy;
@@ -163,6 +184,7 @@ struct smb1390 {
 	int			irqs[NUM_IRQS];
 	bool			status_change_running;
 	bool			taper_work_running;
+	bool			smb_init_done;
 	struct smb1390_iio	iio;
 	int			irq_status;
 	int			taper_entry_fv;
@@ -174,6 +196,52 @@ struct smb1390 {
 	u32			min_ilim_ua;
 	u32			max_temp_alarm_degc;
 	u32			max_cutoff_soc;
+	u32			pl_output_mode;
+	u32			cp_role;
+	enum isns_mode		current_capability;
+	bool			batt_soc_validated;
+	int			cp_slave_thr_taper_ua;
+};
+
+struct smb_cfg {
+	u16	address;
+	u8	mask;
+	u8	val;
+};
+
+/* SMB1390 rev2/3 for dual charge */
+static const struct smb_cfg smb1390_dual[] = {
+	{0x1031, 0xff, 0x7A},
+	{0x1032, 0xff, 0x07},
+	{0x1035, 0xff, 0x63},
+	{0x1036, 0xff, 0x80},
+	{0x103A, 0xff, 0x44},
+};
+
+/* SMB1390 rev3, CSIR2500, for triple charge */
+static const struct smb_cfg smb1390_csir2500_triple[] = {
+	{0x1030, 0x80, 0x80},
+	{0x1031, 0xff, 0x72},
+	{0x1032, 0xff, 0x03},
+	{0x1033, 0x04, 0x04},
+	{0x1034, 0x80, 0x00},
+	{0x1035, 0xff, 0xE3},
+	{0x1036, 0xff, 0xA0},
+	{0x1037, 0xff, 0x80},
+	{0x1039, 0xff, 0x30},
+	{0x103A, 0xff, 0x40},
+	{0x103B, 0xff, 0x20},
+	{0x103E, 0xff, 0x00},
+};
+
+/* SMB1390 rev3, CSIR 2515 or 2519 for triple charge */
+static const struct smb_cfg smb1390_triple[] = {
+	{0x1031, 0xff, 0x72},
+	{0x1032, 0xff, 0x03},
+	{0x1035, 0xff, 0xE3},
+	{0x1036, 0xff, 0xA0},
+	{0x103A, 0xff, 0x40},
+	{0x1037, 0x04, 0x00},
 };
 
 struct smb_irq {
@@ -260,14 +328,93 @@ static bool is_psy_voter_available(struct smb1390 *chip)
 	return true;
 }
 
+static int smb1390_isns_mode_control(struct smb1390 *chip, enum isns_mode mode)
+{
+	int rc;
+	u8 val;
+
+	switch  (mode) {
+	case ISNS_MODE_ACTIVE:
+		val = ATEST1_OUTPUT_ENABLE_BIT | ISNS_INT_VAL;
+		break;
+	case ISNS_MODE_STANDBY:
+		val = ATEST1_OUTPUT_ENABLE_BIT;
+		break;
+	case ISNS_MODE_OFF:
+	default:
+		val = 0;
+		break;
+	}
+
+	rc = smb1390_masked_write(chip, CORE_ATEST1_SEL_REG,
+				ATEST1_OUTPUT_ENABLE_BIT | ATEST1_SEL_MASK,
+				val);
+	if (rc < 0)
+		pr_err("Couldn't set CORE_ATEST1_SEL_REG, rc = %d\n", rc);
+
+	return rc;
+}
+
+static bool smb1390_is_adapter_cc_mode(struct smb1390 *chip)
+{
+	int rc;
+	union power_supply_propval pval = {0, };
+
+	if (!chip->usb_psy) {
+		chip->usb_psy = power_supply_get_by_name("usb");
+		if (!chip->usb_psy)
+			return false;
+	}
+
+	rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_ADAPTER_CC_MODE,
+				&pval);
+	if (rc < 0) {
+		pr_err("Couldn't get PPS CC mode status rc=%d\n", rc);
+		return false;
+	}
+
+	return pval.intval;
+}
+
+static bool is_cps_available(struct smb1390 *chip)
+{
+	if (!chip->cps_psy)
+		chip->cps_psy = power_supply_get_by_name("cp_slave");
+	else
+		return true;
+
+	if (!chip->cps_psy)
+		return false;
+
+	return true;
+}
+
 static void cp_toggle_switcher(struct smb1390 *chip)
 {
+	int rc;
+
+	/*
+	 * Disable ILIM detection before toggling the switcher
+	 * to prevent any ILIM interrupt storm while toggling
+	 * the switcher.
+	 */
+	rc = regmap_update_bits(chip->regmap, CORE_FTRIM_DIS_REG,
+			TR_DIS_ILIM_DET_BIT, TR_DIS_ILIM_DET_BIT);
+	if (rc < 0)
+		pr_err("Couldn't disable ILIM rc=%d\n", rc);
+
 	vote(chip->disable_votable, SWITCHER_TOGGLE_VOTER, true, 0);
 
 	/* Delay for toggling switcher */
 	usleep_range(20, 30);
 
 	vote(chip->disable_votable, SWITCHER_TOGGLE_VOTER, false, 0);
+
+	rc = regmap_update_bits(chip->regmap, CORE_FTRIM_DIS_REG,
+			TR_DIS_ILIM_DET_BIT, 0);
+	if (rc < 0)
+		pr_err("Couldn't enable ILIM rc=%d\n", rc);
 }
 
 static int smb1390_get_cp_en_status(struct smb1390 *chip, int id, bool *enable)
@@ -298,6 +445,18 @@ static int smb1390_get_cp_en_status(struct smb1390 *chip, int id, bool *enable)
 	return rc;
 }
 
+static int smb1390_set_ilim(struct smb1390 *chip, int ilim_ua)
+{
+	int rc;
+
+	rc = smb1390_masked_write(chip, CORE_FTRIM_ILIM_REG,
+			CFG_ILIM_MASK, ilim_ua);
+	if (rc < 0)
+		pr_err("Failed to write ILIM Register, rc=%d\n", rc);
+
+	return rc;
+}
+
 static irqreturn_t default_irq_handler(int irq, void *data)
 {
 	struct smb1390 *chip = data;
@@ -403,7 +562,13 @@ static int smb1390_get_die_temp(struct smb1390 *chip,
 	return rc;
 }
 
-static int smb1390_get_isns(struct smb1390 *chip,
+static int smb1390_get_isns(int temp)
+{
+	/* ISNS = 2 * (1496 - 1390_therm_input * 0.00356) * 1000 uA */
+	return ((1496 * 1000 - div_s64((s64)temp * 3560, 1000)) * 2);
+}
+
+static int smb1390_get_isns_master(struct smb1390 *chip,
 			union power_supply_propval *val)
 {
 	int temp = 0;
@@ -423,12 +588,105 @@ static int smb1390_get_isns(struct smb1390 *chip,
 	if (!enable)
 		return -ENODATA;
 
+	/*
+	 * Since master and slave share temp_pin line
+	 * which is re-used to measure isns, configure the
+	 * master as follows:
+	 * 1. Put slave in standby mode
+	 * 2. Configure master to provide current reading
+	 * 3. Read current value
+	 * 4. Configure master back to report temperature
+	 */
 	mutex_lock(&chip->die_chan_lock);
-	rc = smb1390_masked_write(chip, CORE_ATEST1_SEL_REG,
-				ATEST1_OUTPUT_ENABLE_BIT | ATEST1_SEL_MASK,
-				ATEST1_OUTPUT_ENABLE_BIT | ISNS_INT_VAL);
+	if (is_cps_available(chip)) {
+		val->intval = ISNS_MODE_STANDBY;
+		rc = power_supply_set_property(chip->cps_psy,
+				POWER_SUPPLY_PROP_CURRENT_CAPABILITY, val);
+		if (rc < 0) {
+			pr_err("Couldn't change slave charging state rc=%d\n",
+				rc);
+			goto unlock;
+		}
+	}
+
+	rc = smb1390_isns_mode_control(chip, ISNS_MODE_ACTIVE);
 	if (rc < 0) {
-		pr_err("Couldn't set CORE_ATEST1_SEL_REG, rc = %d\n", rc);
+		pr_err("Failed to set master in Active mode, rc=%d\n", rc);
+		goto unlock;
+	}
+
+	rc = iio_read_channel_processed(chip->iio.die_temp_chan,
+			&temp);
+	if (rc < 0) {
+		pr_err("Couldn't read die_temp chan for isns, rc = %d\n", rc);
+		goto unlock;
+	}
+
+	rc = smb1390_isns_mode_control(chip, ISNS_MODE_OFF);
+	if (rc < 0)
+		pr_err("Couldn't set master to off mode, rc = %d\n", rc);
+
+	if (is_cps_available(chip)) {
+		val->intval = ISNS_MODE_OFF;
+		rc = power_supply_set_property(chip->cps_psy,
+				POWER_SUPPLY_PROP_CURRENT_CAPABILITY, val);
+		if (rc < 0)
+			pr_err("Couldn't change slave charging state rc=%d\n",
+				rc);
+	}
+
+unlock:
+	mutex_unlock(&chip->die_chan_lock);
+
+	if (rc >= 0)
+		val->intval = smb1390_get_isns(temp);
+
+	return rc;
+}
+
+static int smb1390_get_isns_slave(struct smb1390 *chip,
+			union power_supply_propval *val)
+{
+	int temp = 0;
+	int rc;
+	bool enable;
+
+	if (!is_cps_available(chip)) {
+		val->intval = 0;
+		return 0;
+	}
+	/*
+	 * If SMB1390 chip is not enabled, adc channel read may render
+	 * erroneous value. Return error to signify, adc read is not admissible
+	 */
+	rc = smb1390_get_cp_en_status(chip, SMB_PIN_EN, &enable);
+	if (rc < 0) {
+		pr_err("Couldn't get SMB_PIN enable status, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (!enable)
+		return -ENODATA;
+
+	/*
+	 * Since master and slave share temp_pin line
+	 * which is re-used to measure isns, configure the
+	 * slave as follows:
+	 * 1. Put slave in standby mode
+	 * 2. Configure slave to in Active mode to provide current reading
+	 * 3. Read current value
+	 */
+	mutex_lock(&chip->die_chan_lock);
+	rc = smb1390_isns_mode_control(chip, ISNS_MODE_STANDBY);
+	if (rc < 0)
+		goto unlock;
+
+	val->intval = ISNS_MODE_ACTIVE;
+	rc = power_supply_set_property(chip->cps_psy,
+			POWER_SUPPLY_PROP_CURRENT_CAPABILITY, val);
+	if (rc < 0) {
+		pr_err("Couldn't change slave charging state rc=%d\n",
+			rc);
 		goto unlock;
 	}
 
@@ -439,18 +697,47 @@ static int smb1390_get_isns(struct smb1390 *chip,
 		goto unlock;
 	}
 
-	rc = smb1390_masked_write(chip, CORE_ATEST1_SEL_REG,
-				ATEST1_OUTPUT_ENABLE_BIT | ATEST1_SEL_MASK, 0);
+	val->intval = ISNS_MODE_OFF;
+	rc = power_supply_set_property(chip->cps_psy,
+			POWER_SUPPLY_PROP_CURRENT_CAPABILITY, val);
+	if (rc < 0)
+		pr_err("Couldn't change slave charging state rc=%d\n",
+			rc);
+
+	rc = smb1390_isns_mode_control(chip, ISNS_MODE_OFF);
 	if (rc < 0)
 		pr_err("Couldn't set CORE_ATEST1_SEL_REG, rc = %d\n", rc);
 
+
 unlock:
 	mutex_unlock(&chip->die_chan_lock);
 
-	/* ISNS = 2 * (1496 - 1390_therm_input * 0.00356) * 1000 uA */
 	if (rc >= 0)
-		val->intval = (1496 * 1000 - div_s64((s64)temp * 3560,
-							1000)) * 2;
+		val->intval = smb1390_get_isns(temp);
+
+	return rc;
+}
+
+static int smb1390_get_cp_ilim(struct smb1390 *chip,
+			       union power_supply_propval *val)
+{
+	int rc = 0, status;
+
+	if (is_cps_available(chip)) {
+		if (!chip->ilim_votable) {
+			chip->ilim_votable = find_votable("CP_ILIM");
+			if (!chip->ilim_votable)
+				return -EINVAL;
+		}
+
+		val->intval = get_effective_result(chip->ilim_votable);
+	} else {
+		rc = smb1390_read(chip, CORE_FTRIM_ILIM_REG, &status);
+		if (!rc)
+			val->intval =
+				((status & CFG_ILIM_MASK) * 100000)
+					+ 500000;
+	}
 
 	return rc;
 }
@@ -477,26 +764,86 @@ static int smb1390_is_batt_soc_valid(struct smb1390 *chip)
 	return true;
 }
 
+static int smb1390_triple_init_hw(struct smb1390 *chip)
+{
+	int i, rc = 0;
+	int csir_lsb = 0, csir_msb = 0;
+	u16 csir = 0;
+
+	smb1390_read(chip, MISC_CSIR_LSB_REG, &csir_lsb);
+	smb1390_read(chip, MISC_CSIR_MSB_REG, &csir_msb);
+	csir = ((csir_msb << 8) | csir_lsb);
+	smb1390_dbg(chip, PR_INFO, "CSIR register = 0x%04x\n", csir);
+
+	if (csir == 0x2500) {
+		for (i = 0; i < ARRAY_SIZE(smb1390_csir2500_triple); i++) {
+			rc = smb1390_masked_write(chip,
+				smb1390_csir2500_triple[i].address,
+				smb1390_csir2500_triple[i].mask,
+				smb1390_csir2500_triple[i].val);
+			if (rc < 0) {
+				pr_err("Failed to configure SMB1390 for triple chg config for address 0x%04x rc=%d\n",
+				       smb1390_csir2500_triple[i].address, rc);
+				return rc;
+			}
+		}
+	} else {
+		for (i = 0; i < ARRAY_SIZE(smb1390_triple); i++) {
+			rc = smb1390_masked_write(chip,
+				smb1390_triple[i].address,
+				smb1390_triple[i].mask,
+				smb1390_triple[i].val);
+			if (rc < 0) {
+				pr_err("Failed to configure SMB1390 for triple chg config for address 0x%04x rc=%d\n",
+				       smb1390_triple[i].address, rc);
+				return rc;
+			}
+		}
+	}
+
+	smb1390_dbg(chip, PR_INFO, "Configured SMB1390 charge pump for triple chg config\n");
+	chip->smb_init_done = true;
+	return rc;
+}
+
+static int smb1390_dual_init_hw(struct smb1390 *chip)
+{
+	int rc = 0, i;
+
+	for (i = 0; i < ARRAY_SIZE(smb1390_dual); i++) {
+		rc = smb1390_masked_write(chip,
+			smb1390_dual[i].address,
+			smb1390_dual[i].mask,
+			smb1390_dual[i].val);
+		if (rc < 0) {
+			pr_err("Failed to configure SMB1390 for dual chg config for address 0x%04x rc=%d\n",
+			       smb1390_dual[i].address, rc);
+			return rc;
+		}
+	}
+
+	smb1390_dbg(chip, PR_INFO, "Configured SMB1390 charge pump for Dual chg config\n");
+	return rc;
+}
+
 /* voter callbacks */
 static int smb1390_disable_vote_cb(struct votable *votable, void *data,
 				  int disable, const char *client)
 {
 	struct smb1390 *chip = data;
 	int rc = 0;
+	u8 mask, val;
 
 	if (!is_psy_voter_available(chip) || chip->suspended)
 		return -EAGAIN;
 
-	if (disable) {
-		rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
-				   CMD_EN_SWITCHER_BIT, 0);
-		if (rc < 0)
-			return rc;
-	} else {
-		rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
-				   CMD_EN_SWITCHER_BIT, CMD_EN_SWITCHER_BIT);
-		if (rc < 0)
-			return rc;
+	mask = CMD_EN_SWITCHER_BIT | CMD_EN_SL_BIT;
+	val = is_cps_available(chip) ? mask : CMD_EN_SWITCHER_BIT;
+	rc = smb1390_masked_write(chip, CORE_CONTROL1_REG, mask,
+				  disable ? 0 : val);
+	if (rc < 0) {
+		pr_err("Couldn't write CORE_CONTROL1_REG, rc=%d\n", rc);
+		return rc;
 	}
 
 	/* charging may have been disabled by ILIM; send uevent */
@@ -511,6 +858,7 @@ static int smb1390_ilim_vote_cb(struct votable *votable, void *data,
 			      int ilim_uA, const char *client)
 {
 	struct smb1390 *chip = data;
+	union power_supply_propval pval = {0, };
 	int rc = 0;
 
 	if (!is_psy_voter_available(chip) || chip->suspended)
@@ -522,20 +870,33 @@ static int smb1390_ilim_vote_cb(struct votable *votable, void *data,
 		return -EINVAL;
 	}
 
-	rc = smb1390_masked_write(chip, CORE_FTRIM_ILIM_REG,
-		CFG_ILIM_MASK,
-		DIV_ROUND_CLOSEST(max(ilim_uA, 500000) - 500000, 100000));
-	if (rc < 0) {
-		pr_err("Failed to write ILIM Register, rc=%d\n", rc);
-		return rc;
-	}
-
+	ilim_uA = min(ilim_uA, (is_cps_available(chip) ?
+				MAX_ILIM_DUAL_CP_UA : MAX_ILIM_UA));
 	/* ILIM less than min_ilim_ua, disable charging */
 	if (ilim_uA < chip->min_ilim_ua) {
 		smb1390_dbg(chip, PR_INFO, "ILIM %duA is too low to allow charging\n",
 			ilim_uA);
 		vote(chip->disable_votable, ILIM_VOTER, true, 0);
 	} else {
+		if (is_cps_available(chip)) {
+			ilim_uA /= 2;
+			pval.intval = DIV_ROUND_CLOSEST(ilim_uA - 500000,
+					100000);
+			rc = power_supply_set_property(chip->cps_psy,
+					POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+					&pval);
+			if (rc < 0)
+				pr_err("Couldn't change slave ilim  rc=%d\n",
+					rc);
+		}
+
+		rc = smb1390_set_ilim(chip,
+		      DIV_ROUND_CLOSEST(ilim_uA - 500000, 100000));
+		if (rc < 0) {
+			pr_err("Failed to set ILIM, rc=%d\n", rc);
+			return rc;
+		}
+
 		smb1390_dbg(chip, PR_INFO, "ILIM set to %duA\n", ilim_uA);
 		vote(chip->disable_votable, ILIM_VOTER, false, 0);
 	}
@@ -543,11 +904,27 @@ static int smb1390_ilim_vote_cb(struct votable *votable, void *data,
 	return rc;
 }
 
+static int smb1390_awake_vote_cb(struct votable *votable, void *data,
+				 int awake, const char *client)
+{
+	struct smb1390 *chip = data;
+
+	if (awake)
+		__pm_stay_awake(chip->cp_ws);
+	else
+		__pm_relax(chip->cp_ws);
+
+	smb1390_dbg(chip, PR_INFO, "client: %s awake: %d\n", client, awake);
+
+	return 0;
+}
+
 static int smb1390_notifier_cb(struct notifier_block *nb,
 			       unsigned long event, void *data)
 {
 	struct smb1390 *chip = container_of(nb, struct smb1390, nb);
 	struct power_supply *psy = data;
+	int rc;
 	unsigned long flags;
 
 	if (event != PSY_EVENT_PROP_CHANGED)
@@ -555,19 +932,36 @@ static int smb1390_notifier_cb(struct notifier_block *nb,
 
 	if (strcmp(psy->desc->name, "battery") == 0
 				|| strcmp(psy->desc->name, "usb") == 0
-				|| strcmp(psy->desc->name, "main") == 0) {
+				|| strcmp(psy->desc->name, "main") == 0
+				|| strcmp(psy->desc->name, "cp_slave") == 0) {
 		spin_lock_irqsave(&chip->status_change_lock, flags);
+
 		if (!chip->status_change_running) {
 			chip->status_change_running = true;
 			pm_stay_awake(chip->dev);
 			schedule_work(&chip->status_change_work);
 		}
 		spin_unlock_irqrestore(&chip->status_change_lock, flags);
+
+		/*
+		 * If not already configured for triple chg, configure master
+		 * SMB1390 here for triple chg, if slave is detected.
+		 */
+		if (is_cps_available(chip) && !chip->smb_init_done) {
+			smb1390_dbg(chip, PR_INFO, "SMB1390 slave has registered, configure for triple charging\n");
+			rc = smb1390_triple_init_hw(chip);
+			if (rc < 0)
+				pr_err("Couldn't configure SMB1390 for triple-chg config rc=%d\n",
+					rc);
+		}
 	}
 
 	return NOTIFY_OK;
 }
 
+#define ILIM_NR			10
+#define ILIM_DR			8
+#define ILIM_FACTOR(ilim)	((ilim * ILIM_NR) / ILIM_DR)
 static void smb1390_status_change_work(struct work_struct *work)
 {
 	struct smb1390 *chip = container_of(work, struct smb1390,
@@ -578,8 +972,9 @@ static void smb1390_status_change_work(struct work_struct *work)
 	if (!is_psy_voter_available(chip))
 		goto out;
 
-	vote(chip->disable_votable, SOC_LEVEL_VOTER,
-			smb1390_is_batt_soc_valid(chip) ? false : true, 0);
+	if (!smb1390_is_adapter_cc_mode(chip))
+		vote(chip->disable_votable, SOC_LEVEL_VOTER,
+		     smb1390_is_batt_soc_valid(chip) ? false : true, 0);
 
 	rc = power_supply_get_property(chip->usb_psy,
 			POWER_SUPPLY_PROP_SMB_EN_MODE, &pval);
@@ -596,13 +991,15 @@ static void smb1390_status_change_work(struct work_struct *work)
 			goto out;
 		}
 
+		/* Check for SOC threshold only once before enabling CP */
 		vote(chip->disable_votable, SRC_VOTER, false, 0);
+		if (!chip->batt_soc_validated) {
+			vote(chip->disable_votable, SOC_LEVEL_VOTER,
+				smb1390_is_batt_soc_valid(chip) ?
+				false : true, 0);
+			chip->batt_soc_validated = true;
+		}
 
-		/*
-		 * ILIM is set based on the primary chargers AICL result. This
-		 * ensures VBUS does not collapse due to the current drawn via
-		 * MID.
-		 */
 		if (pval.intval == POWER_SUPPLY_CP_WIRELESS) {
 			vote(chip->ilim_votable, ICL_VOTER, false, 0);
 			rc = power_supply_get_property(chip->dc_psy,
@@ -611,16 +1008,31 @@ static void smb1390_status_change_work(struct work_struct *work)
 				pr_err("Couldn't get dc icl rc=%d\n", rc);
 			else
 				vote(chip->ilim_votable, WIRELESS_VOTER, true,
-								pval.intval);
-		} else { /* QC3 or PPS */
+						pval.intval);
+		} else {
 			vote(chip->ilim_votable, WIRELESS_VOTER, false, 0);
-			rc = power_supply_get_property(chip->usb_psy,
-				POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
-			if (rc < 0)
-				pr_err("Couldn't get usb icl rc=%d\n", rc);
-			else
-				vote(chip->ilim_votable, ICL_VOTER, true,
-								pval.intval);
+			if ((chip->pl_output_mode == POWER_SUPPLY_PL_OUTPUT_VPH)
+				&& (pval.intval == POWER_SUPPLY_CP_PPS)) {
+				rc = power_supply_get_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_PD_CURRENT_MAX,
+					&pval);
+				if (rc < 0)
+					pr_err("Couldn't get PD CURRENT MAX rc=%d\n",
+							rc);
+				else
+					vote(chip->ilim_votable, ICL_VOTER,
+						true, ILIM_FACTOR(pval.intval));
+			} else {
+				rc = power_supply_get_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+					&pval);
+				if (rc < 0)
+					pr_err("Couldn't get usb aicl rc=%d\n",
+							rc);
+				else
+					vote(chip->ilim_votable, ICL_VOTER,
+							true, pval.intval);
+			}
 		}
 
 		/*
@@ -656,10 +1068,12 @@ static void smb1390_status_change_work(struct work_struct *work)
 			}
 		}
 	} else {
+		chip->batt_soc_validated = false;
 		vote(chip->disable_votable, SRC_VOTER, true, 0);
 		vote(chip->disable_votable, TAPER_END_VOTER, false, 0);
 		vote(chip->fcc_votable, CP_VOTER, false, 0);
 		vote(chip->disable_votable, SOC_LEVEL_VOTER, true, 0);
+		vote_override(chip->ilim_votable, CC_MODE_VOTER, false, 0);
 	}
 
 out:
@@ -667,11 +1081,39 @@ static void smb1390_status_change_work(struct work_struct *work)
 	chip->status_change_running = false;
 }
 
+static int smb1390_validate_slave_chg_taper(struct smb1390 *chip, int fcc_uA)
+{
+	int rc = 0;
+	u8 mask;
+
+	/*
+	 * In Collapse mode, while in Taper, Disable the slave SMB1390
+	 * when FCC drops below a specified threshold.
+	 */
+	if (fcc_uA < (chip->cp_slave_thr_taper_ua) && is_cps_available(chip)) {
+		mask = CMD_EN_SWITCHER_BIT | CMD_EN_SL_BIT;
+		rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
+						  mask, CMD_EN_SWITCHER_BIT);
+		if (rc < 0)
+			return rc;
+		/*
+		 * Set ILIM of master CP to Max value = 3.2A once slave is
+		 * disabled to prevent ILIM irq storm.
+		 */
+		smb1390_dbg(chip, PR_INFO, "Set Master ILIM to MAX, post Slave disable in taper, fcc=%d\n",
+									fcc_uA);
+		vote_override(chip->ilim_votable, CC_MODE_VOTER,
+						true, MAX_ILIM_DUAL_CP_UA);
+	}
+
+	return rc;
+}
+
 static void smb1390_taper_work(struct work_struct *work)
 {
 	struct smb1390 *chip = container_of(work, struct smb1390, taper_work);
 	union power_supply_propval pval = {0, };
-	int rc, fcc_uA;
+	int rc, fcc_uA, delta_fcc_uA;
 
 	if (!is_psy_voter_available(chip))
 		goto out;
@@ -695,11 +1137,21 @@ static void smb1390_taper_work(struct work_struct *work)
 		}
 
 		if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+			delta_fcc_uA =
+				(smb1390_is_adapter_cc_mode(chip) ?
+							CC_MODE_TAPER_DELTA_UA :
+							DEFAULT_TAPER_DELTA_UA);
 			fcc_uA = get_effective_result(chip->fcc_votable)
-								- 100000;
+								- delta_fcc_uA;
 			smb1390_dbg(chip, PR_INFO, "taper work reducing FCC to %duA\n",
 				fcc_uA);
 			vote(chip->fcc_votable, CP_VOTER, true, fcc_uA);
+			rc = smb1390_validate_slave_chg_taper(chip, fcc_uA);
+			if (rc < 0) {
+				pr_err("Couldn't Disable slave in Taper, rc=%d\n",
+				       rc);
+				goto out;
+			}
 
 			if (fcc_uA < (chip->min_ilim_ua * 2)) {
 				vote(chip->disable_votable, TAPER_END_VOTER,
@@ -725,10 +1177,13 @@ static enum power_supply_property smb1390_charge_pump_props[] = {
 	POWER_SUPPLY_PROP_CP_SWITCHER_EN,
 	POWER_SUPPLY_PROP_CP_DIE_TEMP,
 	POWER_SUPPLY_PROP_CP_ISNS,
+	POWER_SUPPLY_PROP_CP_ISNS_SLAVE,
 	POWER_SUPPLY_PROP_CP_TOGGLE_SWITCHER,
 	POWER_SUPPLY_PROP_CP_IRQ_STATUS,
 	POWER_SUPPLY_PROP_CP_ILIM,
 	POWER_SUPPLY_PROP_CHIP_VERSION,
+	POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE,
+	POWER_SUPPLY_PROP_MIN_ICL,
 };
 
 static int smb1390_get_prop(struct power_supply *psy,
@@ -794,7 +1249,10 @@ static int smb1390_get_prop(struct power_supply *psy,
 		}
 		break;
 	case POWER_SUPPLY_PROP_CP_ISNS:
-		rc = smb1390_get_isns(chip, val);
+		rc = smb1390_get_isns_master(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_CP_ISNS_SLAVE:
+		rc = smb1390_get_isns_slave(chip, val);
 		break;
 	case POWER_SUPPLY_PROP_CP_TOGGLE_SWITCHER:
 		val->intval = 0;
@@ -810,14 +1268,17 @@ static int smb1390_get_prop(struct power_supply *psy,
 			val->intval |= status;
 		break;
 	case POWER_SUPPLY_PROP_CP_ILIM:
-		rc = smb1390_read(chip, CORE_FTRIM_ILIM_REG, &status);
-		if (!rc)
-			val->intval = ((status & CFG_ILIM_MASK) * 100000)
-					+ 500000;
+		rc = smb1390_get_cp_ilim(chip, val);
 		break;
 	case POWER_SUPPLY_PROP_CHIP_VERSION:
 		val->intval = chip->pmic_rev_id->rev4;
 		break;
+	case POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE:
+		val->intval = chip->pl_output_mode;
+		break;
+	case POWER_SUPPLY_PROP_MIN_ICL:
+		val->intval = chip->min_ilim_ua;
+		break;
 	default:
 		smb1390_dbg(chip, PR_MISC, "charge pump power supply get prop %d not supported\n",
 			prop);
@@ -845,6 +1306,11 @@ static int smb1390_set_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_CP_IRQ_STATUS:
 		chip->irq_status = val->intval;
 		break;
+	case POWER_SUPPLY_PROP_CP_ILIM:
+		if (chip->ilim_votable)
+			vote_override(chip->ilim_votable, CC_MODE_VOTER,
+							true, val->intval);
+		break;
 	default:
 		smb1390_dbg(chip, PR_MISC, "charge pump power supply set prop %d not supported\n",
 			prop);
@@ -861,6 +1327,9 @@ static int smb1390_prop_is_writeable(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_CP_ENABLE:
 	case POWER_SUPPLY_PROP_CP_TOGGLE_SWITCHER:
 	case POWER_SUPPLY_PROP_CP_IRQ_STATUS:
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+	case POWER_SUPPLY_PROP_CURRENT_CAPABILITY:
+	case POWER_SUPPLY_PROP_CP_ILIM:
 		return 1;
 	default:
 		break;
@@ -931,6 +1400,14 @@ static int smb1390_parse_dt(struct smb1390 *chip)
 	of_property_read_u32(chip->dev->of_node, "qcom,max-cutoff-soc",
 			&chip->max_cutoff_soc);
 
+	/* Default parallel output configuration is VPH connection */
+	chip->pl_output_mode = POWER_SUPPLY_PL_OUTPUT_VPH;
+	of_property_read_u32(chip->dev->of_node, "qcom,parallel-output-mode",
+			&chip->pl_output_mode);
+
+	chip->cp_slave_thr_taper_ua = chip->min_ilim_ua * 3;
+	of_property_read_u32(chip->dev->of_node, "qcom,cp-slave-thr-taper-ua",
+			      &chip->cp_slave_thr_taper_ua);
 	return 0;
 }
 
@@ -942,6 +1419,8 @@ static void smb1390_release_channels(struct smb1390 *chip)
 
 static int smb1390_create_votables(struct smb1390 *chip)
 {
+	chip->cp_awake_votable = create_votable("CP_AWAKE",
+			VOTE_SET_ANY, smb1390_awake_vote_cb, chip);
 	chip->disable_votable = create_votable("CP_DISABLE",
 			VOTE_SET_ANY, smb1390_disable_vote_cb, chip);
 	if (IS_ERR(chip->disable_votable))
@@ -963,9 +1442,11 @@ static int smb1390_create_votables(struct smb1390 *chip)
 
 	/*
 	 * In case SMB1390 probe happens after FCC value has been configured,
-	 * update ilim vote to reflect FCC / 2 value.
+	 * update ilim vote to reflect FCC / 2 value, this is only applicable
+	 * when SMB1390 is directly connected to VBAT.
 	 */
-	if (chip->fcc_votable)
+	if ((chip->pl_output_mode != POWER_SUPPLY_PL_OUTPUT_VPH)
+			&& chip->fcc_votable)
 		vote(chip->ilim_votable, FCC_VOTER, true,
 			get_effective_result(chip->fcc_votable) / 2);
 
@@ -1014,6 +1495,23 @@ static int smb1390_init_hw(struct smb1390 *chip)
 	}
 	rc = smb1390_masked_write(chip, CORE_FTRIM_CTRL_REG,
 			TEMP_ALERT_LVL_MASK, val << TEMP_ALERT_LVL_SHIFT);
+	if (rc < 0) {
+		pr_err("Failed to write CORE_FTRIM_CTRL_REG rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * If the slave charger has registered, configure Master SMB1390 for
+	 * triple-chg config, else configure for dual. Later, if the slave
+	 * charger registers, re-configure for triple chg config from the
+	 * power-supply notifier.
+	 */
+	if (!chip->smb_init_done) {
+		if (is_cps_available(chip))
+			rc = smb1390_triple_init_hw(chip);
+		else
+			rc = smb1390_dual_init_hw(chip);
+	}
 
 	return rc;
 }
@@ -1114,14 +1612,23 @@ static void smb1390_create_debugfs(struct smb1390 *chip)
 }
 #endif
 
-static int smb1390_probe(struct platform_device *pdev)
+static const struct of_device_id match_table[] = {
+	{ .compatible = "qcom,smb1390-charger-psy",
+	  .data = (void *)CP_MASTER
+	},
+	{ .compatible = "qcom,smb1390-slave",
+	  .data = (void *)CP_SLAVE
+	},
+	{ },
+};
+
+static int smb1390_master_probe(struct smb1390 *chip)
 {
-	struct smb1390 *chip;
+	int rc;
 	struct device_node *revid_dev_node;
 	struct pmic_revid_data *pmic_rev_id;
-	int rc;
 
-	revid_dev_node = of_parse_phandle(pdev->dev.of_node,
+	revid_dev_node = of_parse_phandle(chip->dev->of_node,
 					  "qcom,pmic-revid", 0);
 	if (!revid_dev_node) {
 		pr_err("Missing qcom,pmic-revid property\n");
@@ -1139,23 +1646,9 @@ static int smb1390_probe(struct platform_device *pdev)
 		return -EPROBE_DEFER;
 	}
 
-	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
-	if (!chip)
-		return -ENOMEM;
-
-	chip->dev = &pdev->dev;
+	chip->pmic_rev_id = pmic_rev_id;
 	spin_lock_init(&chip->status_change_lock);
 	mutex_init(&chip->die_chan_lock);
-	chip->die_temp = -ENODATA;
-	chip->pmic_rev_id = pmic_rev_id;
-	chip->disabled = true;
-	platform_set_drvdata(pdev, chip);
-
-	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
-	if (!chip->regmap) {
-		pr_err("Couldn't get regmap\n");
-		return -EINVAL;
-	}
 
 	rc = smb1390_parse_dt(chip);
 	if (rc < 0) {
@@ -1182,6 +1675,16 @@ static int smb1390_probe(struct platform_device *pdev)
 		goto out_work;
 	}
 
+	smb1390_dbg(chip, PR_INFO, "Detected revid=0x%02x\n",
+			 chip->pmic_rev_id->rev4);
+	if (chip->pmic_rev_id->rev4 <= 0x02 && chip->pl_output_mode !=
+			POWER_SUPPLY_PL_OUTPUT_VPH) {
+		pr_err("Incompatible SMB1390 HW detected, Disabling the charge pump\n");
+		if (chip->disable_votable)
+			vote(chip->disable_votable, HW_DISABLE_VOTER,
+			     true, 0);
+	}
+
 	rc = smb1390_init_charge_pump_psy(chip);
 	if (rc < 0) {
 		pr_err("Couldn't initialize charge pump psy rc=%d\n", rc);
@@ -1202,9 +1705,6 @@ static int smb1390_probe(struct platform_device *pdev)
 	}
 
 	smb1390_create_debugfs(chip);
-
-	pr_debug("smb1390 probed successfully chip_version=%d\n",
-			chip->pmic_rev_id->rev4);
 	return 0;
 
 out_notifier:
@@ -1218,10 +1718,176 @@ static int smb1390_probe(struct platform_device *pdev)
 	return rc;
 }
 
+static enum power_supply_property smb1390_cp_slave_props[] = {
+	POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CURRENT_CAPABILITY,
+};
+
+static int smb1390_slave_prop_is_writeable(struct power_supply *psy,
+				enum power_supply_property prop)
+{
+	return 0;
+}
+
+static int smb1390_cp_slave_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb1390 *chip = power_supply_get_drvdata(psy);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+		val->intval = 0;
+		if (!chip->ilim_votable)
+			chip->ilim_votable = find_votable("CP_ILIM");
+		if (chip->ilim_votable)
+			val->intval =
+				get_effective_result_locked(chip->ilim_votable);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_CAPABILITY:
+		val->intval = (int)chip->current_capability;
+		break;
+	default:
+		smb1390_dbg(chip, PR_MISC, "SMB 1390 slave power supply get prop %d not supported\n",
+			psp);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int smb1390_cp_slave_set_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	struct smb1390 *chip = power_supply_get_drvdata(psy);
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+		rc = smb1390_set_ilim(chip, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_CAPABILITY:
+		chip->current_capability = (enum isns_mode)val->intval;
+		rc = smb1390_isns_mode_control(chip, val->intval);
+		break;
+	default:
+		smb1390_dbg(chip, PR_MISC, "SMB 1390 slave power supply set prop %d not supported\n",
+			psp);
+		return -EINVAL;
+	}
+
+	return 0;
+};
+
+static const struct power_supply_desc cps_psy_desc = {
+	.name = "cp_slave",
+	.type = POWER_SUPPLY_TYPE_PARALLEL,
+	.properties = smb1390_cp_slave_props,
+	.num_properties = ARRAY_SIZE(smb1390_cp_slave_props),
+	.get_property = smb1390_cp_slave_get_prop,
+	.set_property = smb1390_cp_slave_set_prop,
+	.property_is_writeable = smb1390_slave_prop_is_writeable,
+};
+
+static int smb1390_init_cps_psy(struct smb1390 *chip)
+{
+	struct power_supply_config cps_cfg = {};
+
+	cps_cfg.drv_data = chip;
+	cps_cfg.of_node = chip->dev->of_node;
+	chip->cps_psy = devm_power_supply_register(chip->dev,
+						  &cps_psy_desc,
+						  &cps_cfg);
+	if (IS_ERR(chip->cps_psy)) {
+		pr_err("Couldn't register CP slave power supply\n");
+		return PTR_ERR(chip->cps_psy);
+	}
+
+	return 0;
+}
+
+static int smb1390_slave_probe(struct smb1390 *chip)
+{
+	int stat, rc;
+
+	/* a "hello" read to test the presence of the slave PMIC */
+	rc = smb1390_read(chip, CORE_STATUS1_REG, &stat);
+	if (rc < 0) {
+		pr_err("Couldn't find slave SMB1390\n");
+		return -EINVAL;
+	}
+
+	rc = smb1390_triple_init_hw(chip);
+	if (rc < 0)
+		return rc;
+
+	rc = smb1390_init_cps_psy(chip);
+	if (rc < 0)
+		pr_err("Couldn't initialize cps psy rc=%d\n", rc);
+
+	return rc;
+}
+
+static int smb1390_probe(struct platform_device *pdev)
+{
+	struct smb1390 *chip;
+	int rc;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->dev = &pdev->dev;
+	chip->die_temp = -ENODATA;
+	chip->disabled = true;
+
+	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+	if (!chip->regmap) {
+		pr_err("Couldn't get regmap\n");
+		return -EINVAL;
+	}
+
+	platform_set_drvdata(pdev, chip);
+	chip->cp_role = (int)of_device_get_match_data(chip->dev);
+	switch (chip->cp_role) {
+	case CP_MASTER:
+		rc = smb1390_master_probe(chip);
+		break;
+	case CP_SLAVE:
+		rc = smb1390_slave_probe(chip);
+		break;
+	default:
+		pr_err("Couldn't find a matching role %d\n", chip->cp_role);
+		rc = -EINVAL;
+		goto cleanup;
+	}
+
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't probe SMB1390 %s rc=%d\n",
+			       chip->cp_role ? "Slave" : "Master", rc);
+		goto cleanup;
+	}
+
+	pr_info("smb1390 %s probed successfully\n", chip->cp_role ? "Slave" :
+		"Master");
+	return 0;
+
+cleanup:
+	platform_set_drvdata(pdev, NULL);
+	return rc;
+}
+
 static int smb1390_remove(struct platform_device *pdev)
 {
 	struct smb1390 *chip = platform_get_drvdata(pdev);
 
+	if (chip->cp_role !=  CP_MASTER) {
+		platform_set_drvdata(pdev, NULL);
+		return 0;
+	}
+
 	power_supply_unreg_notifier(&chip->nb);
 
 	/* explicitly disable charging */
@@ -1232,9 +1898,24 @@ static int smb1390_remove(struct platform_device *pdev)
 	wakeup_source_unregister(chip->cp_ws);
 	smb1390_destroy_votables(chip);
 	smb1390_release_channels(chip);
+	platform_set_drvdata(pdev, NULL);
 	return 0;
 }
 
+static void smb1390_shutdown(struct platform_device *pdev)
+{
+	struct smb1390 *chip = platform_get_drvdata(pdev);
+	int rc;
+
+	power_supply_unreg_notifier(&chip->nb);
+	/* Disable SMB1390 */
+	smb1390_dbg(chip, PR_MISC, "Disabling SMB1390\n");
+	rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
+					CMD_EN_SWITCHER_BIT, 0);
+	if (rc < 0)
+		pr_err("Couldn't disable chip rc=%d\n", rc);
+}
+
 static int smb1390_suspend(struct device *dev)
 {
 	struct smb1390 *chip = dev_get_drvdata(dev);
@@ -1248,8 +1929,17 @@ static int smb1390_resume(struct device *dev)
 	struct smb1390 *chip = dev_get_drvdata(dev);
 
 	chip->suspended = false;
-	rerun_election(chip->ilim_votable);
-	rerun_election(chip->disable_votable);
+
+	/* ILIM rerun is applicable for both master and slave */
+	if (!chip->ilim_votable)
+		chip->ilim_votable = find_votable("CP_ILIM");
+
+	if (chip->ilim_votable)
+		rerun_election(chip->ilim_votable);
+
+	/* Run disable votable for master only */
+	if (chip->cp_role == CP_MASTER)
+		rerun_election(chip->disable_votable);
 
 	return 0;
 }
@@ -1259,11 +1949,6 @@ static const struct dev_pm_ops smb1390_pm_ops = {
 	.resume		= smb1390_resume,
 };
 
-static const struct of_device_id match_table[] = {
-	{ .compatible = "qcom,smb1390-charger-psy", },
-	{ },
-};
-
 static struct platform_driver smb1390_driver = {
 	.driver	= {
 		.name		= "qcom,smb1390-charger-psy",
@@ -1272,6 +1957,7 @@ static struct platform_driver smb1390_driver = {
 	},
 	.probe	= smb1390_probe,
 	.remove	= smb1390_remove,
+	.shutdown	= smb1390_shutdown,
 };
 module_platform_driver(smb1390_driver);
 
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 6a4e82d..262f6dd 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -13,6 +13,7 @@
 #include <linux/iio/consumer.h>
 #include <linux/pmic-voter.h>
 #include <linux/of_batterydata.h>
+#include <linux/ktime.h>
 #include "smb5-lib.h"
 #include "smb5-reg.h"
 #include "battery.h"
@@ -951,6 +952,9 @@ static int smblib_request_dpdm(struct smb_charger *chg, bool enable)
 {
 	int rc = 0;
 
+	if (chg->pr_swap_in_progress)
+		return 0;
+
 	/* fetch the DPDM regulator */
 	if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
 				"dpdm-supply", NULL)) {
@@ -1420,6 +1424,29 @@ int smblib_toggle_smb_en(struct smb_charger *chg, int toggle)
 	return rc;
 }
 
+int smblib_get_irq_status(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 reg;
+
+	mutex_lock(&chg->irq_status_lock);
+	/* Report and clear cached status */
+	val->intval = chg->irq_status;
+	chg->irq_status = 0;
+
+	/* get real time status of pulse skip irq */
+	rc = smblib_read(chg, MISC_PBS_RT_STS_REG, &reg);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't read MISC_PBS_RT_STS_REG rc=%d\n",
+				rc);
+	else
+		val->intval |= (reg & PULSE_SKIP_IRQ_BIT);
+	mutex_unlock(&chg->irq_status_lock);
+
+	return rc;
+}
+
 /****************************
  * uUSB Moisture Protection *
  ****************************/
@@ -2950,9 +2977,12 @@ int smblib_get_prop_dc_voltage_now(struct smb_charger *chg,
 	rc = power_supply_get_property(chg->wls_psy,
 				POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
 				val);
-	if (rc < 0)
+	if (rc < 0) {
 		dev_err(chg->dev, "Couldn't get POWER_SUPPLY_PROP_VOLTAGE_REGULATION, rc=%d\n",
 				rc);
+		return rc;
+	}
+
 	return rc;
 }
 
@@ -2966,6 +2996,7 @@ int smblib_set_prop_dc_current_max(struct smb_charger *chg,
 	return smblib_set_charge_param(chg, &chg->param.dc_icl, val->intval);
 }
 
+#define DCIN_AICL_RERUN_DELAY_MS	5000
 int smblib_set_prop_voltage_wls_output(struct smb_charger *chg,
 				    const union power_supply_propval *val)
 {
@@ -2984,7 +3015,19 @@ int smblib_set_prop_voltage_wls_output(struct smb_charger *chg,
 		dev_err(chg->dev, "Couldn't set POWER_SUPPLY_PROP_VOLTAGE_REGULATION, rc=%d\n",
 				rc);
 
-	smblib_dbg(chg, PR_WLS, "Set WLS output voltage %d\n", val->intval);
+	smblib_dbg(chg, PR_WLS, "%d\n", val->intval);
+
+	/*
+	 * When WLS VOUT goes down, the power-constrained adaptor may be able
+	 * to supply more current, so allow it to do so.
+	 */
+	if ((val->intval > 0) && (val->intval < chg->last_wls_vout)) {
+		/* Rerun AICL once after 10 s */
+		alarm_start_relative(&chg->dcin_aicl_alarm,
+				ms_to_ktime(DCIN_AICL_RERUN_DELAY_MS));
+	}
+
+	chg->last_wls_vout = val->intval;
 
 	return rc;
 }
@@ -3909,12 +3952,16 @@ static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
 int smblib_set_prop_pd_current_max(struct smb_charger *chg,
 				    const union power_supply_propval *val)
 {
-	int rc;
+	int rc, icl;
 
-	if (chg->pd_active)
+	if (chg->pd_active) {
+		icl = get_client_vote(chg->usb_icl_votable, PD_VOTER);
 		rc = vote(chg->usb_icl_votable, PD_VOTER, true, val->intval);
-	else
+		if (val->intval != icl)
+			power_supply_changed(chg->usb_psy);
+	} else {
 		rc = -EPERM;
+	}
 
 	return rc;
 }
@@ -4602,6 +4649,22 @@ irqreturn_t smb_en_irq_handler(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+irqreturn_t sdam_sts_change_irq_handler(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	mutex_lock(&chg->irq_status_lock);
+	chg->irq_status |= PULSE_SKIP_IRQ_BIT;
+	mutex_unlock(&chg->irq_status_lock);
+
+	power_supply_changed(chg->usb_main_psy);
+
+	return IRQ_HANDLED;
+}
+
 #define CHG_TERM_WA_ENTRY_DELAY_MS		300000		/* 5 min */
 #define CHG_TERM_WA_EXIT_DELAY_MS		60000		/* 1 min */
 static void smblib_eval_chg_termination(struct smb_charger *chg, u8 batt_status)
@@ -4609,7 +4672,8 @@ static void smblib_eval_chg_termination(struct smb_charger *chg, u8 batt_status)
 	union power_supply_propval pval = {0, };
 	int rc = 0;
 
-	rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CAPACITY, &pval);
+	rc = smblib_get_prop_from_bms(chg,
+				POWER_SUPPLY_PROP_REAL_CAPACITY, &pval);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read SOC value, rc=%d\n", rc);
 		return;
@@ -4623,6 +4687,8 @@ static void smblib_eval_chg_termination(struct smb_charger *chg, u8 batt_status)
 	 * to prevent overcharing.
 	 */
 	if ((batt_status == TERMINATE_CHARGE) && (pval.intval == 100)) {
+		chg->cc_soc_ref = 0;
+		chg->last_cc_soc = 0;
 		alarm_start_relative(&chg->chg_termination_alarm,
 				ms_to_ktime(CHG_TERM_WA_ENTRY_DELAY_MS));
 	} else if (pval.intval < 100) {
@@ -4631,6 +4697,7 @@ static void smblib_eval_chg_termination(struct smb_charger *chg, u8 batt_status)
 		 * we exit the TERMINATE_CHARGE state and soc drops below 100%
 		 */
 		chg->cc_soc_ref = 0;
+		chg->last_cc_soc = 0;
 	}
 }
 
@@ -4952,6 +5019,8 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
 						chg->chg_freq.freq_removal);
 
 	if (vbus_rising) {
+		cancel_delayed_work_sync(&chg->pr_swap_detach_work);
+		vote(chg->awake_votable, DETACH_DETECT_VOTER, false, 0);
 		rc = smblib_request_dpdm(chg, true);
 		if (rc < 0)
 			smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
@@ -5532,6 +5601,12 @@ static void typec_src_removal(struct smb_charger *chg)
 	chg->voltage_max_uv = MICRO_5V;
 	chg->usbin_forced_max_uv = 0;
 
+	/* Reset all CC mode votes */
+	vote(chg->fcc_main_votable, MAIN_FCC_VOTER, false, 0);
+	chg->adapter_cc_mode = 0;
+	vote_override(chg->fcc_votable, CC_MODE_VOTER, false, 0);
+	vote_override(chg->usb_icl_votable, CC_MODE_VOTER, false, 0);
+
 	/* write back the default FLOAT charger configuration */
 	rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
 				(u8)FLOAT_OPTIONS_MASK, chg->float_cfg);
@@ -5783,6 +5858,151 @@ irqreturn_t typec_attach_detach_irq_handler(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+static void dcin_aicl(struct smb_charger *chg)
+{
+	int rc, icl, icl_save;
+	int input_present;
+
+	/*
+	 * Hold awake votable to prevent pm_relax being called prior to
+	 * completion of this work.
+	 */
+	vote(chg->awake_votable, DCIN_AICL_VOTER, true, 0);
+
+increment:
+	mutex_lock(&chg->dcin_aicl_lock);
+
+	rc = smblib_get_charge_param(chg, &chg->param.dc_icl, &icl);
+	if (rc < 0)
+		goto unlock;
+
+	if (icl == chg->wls_icl_ua) {
+		/* Upper limit reached; do nothing */
+		smblib_dbg(chg, PR_WLS, "hit max ICL: stop\n");
+		goto unlock;
+	}
+
+	icl = min(chg->wls_icl_ua, icl + DCIN_ICL_STEP_UA);
+	icl_save = icl;
+
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl, icl);
+	if (rc < 0)
+		goto unlock;
+
+	mutex_unlock(&chg->dcin_aicl_lock);
+
+	smblib_dbg(chg, PR_WLS, "icl: %d mA\n", (icl / 1000));
+
+	/* Check to see if DC is still present before and after sleep */
+	rc = smblib_is_input_present(chg, &input_present);
+	if (!(input_present & INPUT_PRESENT_DC) || rc < 0)
+		goto unvote;
+
+	/*
+	 * Wait awhile to check for any DCIN_UVs (the UV handler reduces the
+	 * ICL). If the adaptor collapses, the ICL read after waking up will be
+	 * lesser, indicating that the AICL process is complete.
+	 */
+	msleep(500);
+
+	rc = smblib_is_input_present(chg, &input_present);
+	if (!(input_present & INPUT_PRESENT_DC) || rc < 0)
+		goto unvote;
+
+	mutex_lock(&chg->dcin_aicl_lock);
+
+	rc = smblib_get_charge_param(chg, &chg->param.dc_icl, &icl);
+	if (rc < 0)
+		goto unlock;
+
+	if (icl < icl_save) {
+		smblib_dbg(chg, PR_WLS, "done: icl: %d mA\n", (icl / 1000));
+		goto unlock;
+	}
+
+	mutex_unlock(&chg->dcin_aicl_lock);
+
+	goto increment;
+unlock:
+	mutex_unlock(&chg->dcin_aicl_lock);
+unvote:
+	vote(chg->awake_votable, DCIN_AICL_VOTER, false, 0);
+}
+
+static void dcin_aicl_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						dcin_aicl_work);
+	dcin_aicl(chg);
+}
+
+static enum alarmtimer_restart dcin_aicl_alarm_cb(struct alarm *alarm,
+							ktime_t now)
+{
+	struct smb_charger *chg = container_of(alarm, struct smb_charger,
+					dcin_aicl_alarm);
+
+	smblib_dbg(chg, PR_WLS, "rerunning DCIN AICL\n");
+
+	pm_stay_awake(chg->dev);
+	schedule_work(&chg->dcin_aicl_work);
+
+	return ALARMTIMER_NORESTART;
+}
+
+static void dcin_icl_decrement(struct smb_charger *chg)
+{
+	int rc, icl;
+	ktime_t now = ktime_get();
+
+	rc = smblib_get_charge_param(chg, &chg->param.dc_icl, &icl);
+	if (rc < 0) {
+		smblib_err(chg, "reading DCIN ICL failed: %d\n", rc);
+		return;
+	}
+
+	if (icl == DCIN_ICL_MIN_UA) {
+		/* Cannot possibly decrease ICL any further - do nothing */
+		smblib_dbg(chg, PR_WLS, "hit min ICL: stop\n");
+		return;
+	}
+
+	/* Reduce ICL by 100 mA if 3 UVs happen in a row */
+	if (ktime_us_delta(now, chg->dcin_uv_last_time) > (200 * 1000)) {
+		chg->dcin_uv_count = 0;
+	} else if (chg->dcin_uv_count == 3) {
+		icl -= DCIN_ICL_STEP_UA;
+
+		smblib_dbg(chg, PR_WLS, "icl: %d mA\n", (icl / 1000));
+		rc = smblib_set_charge_param(chg, &chg->param.dc_icl, icl);
+		if (rc < 0) {
+			smblib_err(chg, "setting DCIN ICL failed: %d\n", rc);
+			return;
+		}
+
+		chg->dcin_uv_count = 0;
+	}
+
+	chg->dcin_uv_last_time = now;
+}
+
+irqreturn_t dcin_uv_irq_handler(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	mutex_lock(&chg->dcin_aicl_lock);
+
+	chg->dcin_uv_count++;
+	smblib_dbg(chg, (PR_WLS | PR_INTERRUPT), "DCIN UV count: %d\n",
+			chg->dcin_uv_count);
+	dcin_icl_decrement(chg);
+
+	mutex_unlock(&chg->dcin_aicl_lock);
+
+	return IRQ_HANDLED;
+}
+
 irqreturn_t dc_plugin_irq_handler(int irq, void *data)
 {
 	struct smb_irq_data *irq_data = data;
@@ -5808,6 +6028,18 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data)
 	vbus_present = input_present & INPUT_PRESENT_USB;
 
 	if (dcin_present && !vbus_present) {
+		cancel_work_sync(&chg->dcin_aicl_work);
+
+		/* Reset DCIN ICL to 100 mA */
+		mutex_lock(&chg->dcin_aicl_lock);
+		rc = smblib_set_charge_param(chg, &chg->param.dc_icl,
+				DCIN_ICL_MIN_UA);
+		mutex_unlock(&chg->dcin_aicl_lock);
+		if (rc < 0)
+			return IRQ_HANDLED;
+
+		smblib_dbg(chg, (PR_WLS | PR_INTERRUPT), "reset: icl: 100 mA\n");
+
 		if (chg->sec_cp_present) {
 			pval.intval = wireless_vout;
 			rc = smblib_set_prop_voltage_wls_output(chg, &pval);
@@ -5832,6 +6064,8 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data)
 				dev_err(chg->dev, "Couldn't set dc voltage to 5 V rc=%d\n",
 					rc);
 		}
+
+		schedule_work(&chg->dcin_aicl_work);
 	} else {
 		if (chg->cp_reason == POWER_SUPPLY_CP_WIRELESS) {
 			sec_charger = chg->sec_pl_present ?
@@ -5845,11 +6079,13 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data)
 		}
 
 		vote(chg->dc_suspend_votable, CHG_TERMINATION_VOTER, false, 0);
+
+		chg->last_wls_vout = 0;
 	}
 
 	power_supply_changed(chg->dc_psy);
 
-	smblib_dbg(chg, PR_WLS, "dcin_present= %d, usbin_present= %d, cp_reason = %d\n",
+	smblib_dbg(chg, (PR_WLS | PR_INTERRUPT), "dcin_present= %d, usbin_present= %d, cp_reason = %d\n",
 			dcin_present, vbus_present, chg->cp_reason);
 
 	return IRQ_HANDLED;
@@ -6073,14 +6309,24 @@ int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
 	return 0;
 }
 
+#define DETACH_DETECT_DELAY_MS 20
 int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
 				const union power_supply_propval *val)
 {
 	int rc;
 	u8 stat = 0, orientation;
 
+	smblib_dbg(chg, PR_MISC, "Requested PR_SWAP %d\n", val->intval);
 	chg->pr_swap_in_progress = val->intval;
 
+	/* check for cable removal during pr_swap */
+	if (!chg->pr_swap_in_progress) {
+		cancel_delayed_work_sync(&chg->pr_swap_detach_work);
+		vote(chg->awake_votable, DETACH_DETECT_VOTER, true, 0);
+		schedule_delayed_work(&chg->pr_swap_detach_work,
+				msecs_to_jiffies(DETACH_DETECT_DELAY_MS));
+	}
+
 	rc = smblib_masked_write(chg, TYPE_C_DEBOUNCE_OPTION_REG,
 			REDUCE_TCCDEBOUNCE_TO_2MS_BIT,
 			val->intval ? REDUCE_TCCDEBOUNCE_TO_2MS_BIT : 0);
@@ -6132,6 +6378,28 @@ int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
 /***************
  * Work Queues *
  ***************/
+static void smblib_pr_swap_detach_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						pr_swap_detach_work.work);
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, TYPE_C_STATE_MACHINE_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read STATE_MACHINE_STS rc=%d\n", rc);
+		goto out;
+	}
+	smblib_dbg(chg, PR_REGISTER, "STATE_MACHINE_STS %x\n", stat);
+	if (!(stat & TYPEC_ATTACH_DETACH_STATE_BIT)) {
+		rc = smblib_request_dpdm(chg, false);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
+	}
+out:
+	vote(chg->awake_votable, DETACH_DETECT_VOTER, false, 0);
+}
+
 static void smblib_uusb_otg_work(struct work_struct *work)
 {
 	struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -6383,7 +6651,8 @@ static void smblib_chg_termination_work(struct work_struct *work)
 	if ((rc < 0) || !input_present)
 		goto out;
 
-	rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CAPACITY, &pval);
+	rc = smblib_get_prop_from_bms(chg,
+				POWER_SUPPLY_PROP_REAL_CAPACITY, &pval);
 	if ((rc < 0) || (pval.intval < 100)) {
 		vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0);
 		vote(chg->dc_suspend_votable, CHG_TERMINATION_VOTER, false, 0);
@@ -6417,6 +6686,18 @@ static void smblib_chg_termination_work(struct work_struct *work)
 	}
 
 	/*
+	 * In BSM a sudden jump in CC_SOC is not expected. If seen, its a
+	 * good_ocv or updated capacity, reject it.
+	 */
+	if (chg->last_cc_soc && pval.intval > (chg->last_cc_soc + 100)) {
+		/* CC_SOC has increased by 1% from last time */
+		chg->cc_soc_ref = pval.intval;
+		smblib_dbg(chg, PR_MISC, "cc_soc jumped(%d->%d), reset cc_soc_ref\n",
+				chg->last_cc_soc, pval.intval);
+	}
+	chg->last_cc_soc = pval.intval;
+
+	/*
 	 * Suspend/Unsuspend USB input to keep cc_soc within the 0.5% to 0.75%
 	 * overshoot range of the cc_soc value at termination, to prevent
 	 * overcharging.
@@ -6696,6 +6977,13 @@ static int smblib_create_votables(struct smb_charger *chg)
 		return rc;
 	}
 
+	chg->fcc_main_votable = find_votable("FCC_MAIN");
+	if (chg->fcc_main_votable == NULL) {
+		rc = -EINVAL;
+		smblib_err(chg, "Couldn't find FCC Main votable rc=%d\n", rc);
+		return rc;
+	}
+
 	chg->fv_votable = find_votable("FV");
 	if (chg->fv_votable == NULL) {
 		rc = -EINVAL;
@@ -6847,9 +7135,12 @@ int smblib_init(struct smb_charger *chg)
 	int rc = 0;
 
 	mutex_init(&chg->smb_lock);
+	mutex_init(&chg->irq_status_lock);
+	mutex_init(&chg->dcin_aicl_lock);
 	INIT_WORK(&chg->bms_update_work, bms_update_work);
 	INIT_WORK(&chg->pl_update_work, pl_update_work);
 	INIT_WORK(&chg->jeita_update_work, jeita_update_work);
+	INIT_WORK(&chg->dcin_aicl_work, dcin_aicl_work);
 	INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
 	INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
 	INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
@@ -6860,6 +7151,8 @@ int smblib_init(struct smb_charger *chg)
 	INIT_DELAYED_WORK(&chg->thermal_regulation_work,
 					smblib_thermal_regulation_work);
 	INIT_DELAYED_WORK(&chg->usbov_dbc_work, smblib_usbov_dbc_work);
+	INIT_DELAYED_WORK(&chg->pr_swap_detach_work,
+					smblib_pr_swap_detach_work);
 
 	if (chg->wa_flags & CHG_TERMINATION_WA) {
 		INIT_WORK(&chg->chg_termination_work,
@@ -6887,6 +7180,14 @@ int smblib_init(struct smb_charger *chg)
 		}
 	}
 
+	if (alarmtimer_get_rtcdev()) {
+		alarm_init(&chg->dcin_aicl_alarm, ALARM_REALTIME,
+				dcin_aicl_alarm_cb);
+	} else {
+		smblib_err(chg, "Failed to initialize dcin aicl alarm\n");
+		return -ENODEV;
+	}
+
 	chg->fake_capacity = -EINVAL;
 	chg->fake_input_current_limited = -EINVAL;
 	chg->fake_batt_status = -EINVAL;
@@ -6990,6 +7291,7 @@ int smblib_deinit(struct smb_charger *chg)
 		cancel_work_sync(&chg->bms_update_work);
 		cancel_work_sync(&chg->jeita_update_work);
 		cancel_work_sync(&chg->pl_update_work);
+		cancel_work_sync(&chg->dcin_aicl_work);
 		cancel_delayed_work_sync(&chg->clear_hdc_work);
 		cancel_delayed_work_sync(&chg->icl_change_work);
 		cancel_delayed_work_sync(&chg->pl_enable_work);
@@ -6999,6 +7301,7 @@ int smblib_deinit(struct smb_charger *chg)
 		cancel_delayed_work_sync(&chg->lpd_detach_work);
 		cancel_delayed_work_sync(&chg->thermal_regulation_work);
 		cancel_delayed_work_sync(&chg->usbov_dbc_work);
+		cancel_delayed_work_sync(&chg->pr_swap_detach_work);
 		power_supply_unreg_notifier(&chg->nb);
 		smblib_destroy_votables(chg);
 		qcom_step_chg_deinit();
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 38ee099..8d15e0d 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -70,6 +70,10 @@ enum print_reason {
 #define USB_SUSPEND_VOTER		"USB_SUSPEND_VOTER"
 #define CHARGER_TYPE_VOTER		"CHARGER_TYPE_VOTER"
 #define HDC_IRQ_VOTER			"HDC_IRQ_VOTER"
+#define DETACH_DETECT_VOTER		"DETACH_DETECT_VOTER"
+#define CC_MODE_VOTER			"CC_MODE_VOTER"
+#define MAIN_FCC_VOTER			"MAIN_FCC_VOTER"
+#define DCIN_AICL_VOTER			"DCIN_AICL_VOTER"
 
 #define BOOST_BACK_STORM_COUNT	3
 #define WEAK_CHG_STORM_COUNT	8
@@ -88,6 +92,9 @@ enum print_reason {
 #define TYPEC_DEFAULT_CURRENT_UA	900000
 #define TYPEC_MEDIUM_CURRENT_UA		1500000
 #define TYPEC_HIGH_CURRENT_UA		3000000
+#define DCIN_ICL_MIN_UA			100000
+#define DCIN_ICL_MAX_UA			1500000
+#define DCIN_ICL_STEP_UA		100000
 
 enum smb_mode {
 	PARALLEL_MASTER = 0,
@@ -199,6 +206,7 @@ enum smb_irq_index {
 	FLASH_STATE_CHANGE_IRQ,
 	TORCH_REQ_IRQ,
 	FLASH_EN_IRQ,
+	SDAM_STS_IRQ,
 	/* END */
 	SMB_IRQ_MAX,
 };
@@ -216,6 +224,17 @@ enum chg_term_config_src {
 	ITERM_SRC_ANALOG
 };
 
+enum comp_clamp_levels {
+	CLAMP_LEVEL_DEFAULT = 0,
+	CLAMP_LEVEL_1,
+	MAX_CLAMP_LEVEL,
+};
+
+struct clamp_config {
+	u16 reg[3];
+	u16 val[3];
+};
+
 struct smb_irq_info {
 	const char			*name;
 	const irq_handler_t		handler;
@@ -362,6 +381,8 @@ struct smb_charger {
 	/* locks */
 	struct mutex		smb_lock;
 	struct mutex		ps_change_lock;
+	struct mutex		irq_status_lock;
+	struct mutex		dcin_aicl_lock;
 
 	/* power supplies */
 	struct power_supply		*batt_psy;
@@ -380,6 +401,9 @@ struct smb_charger {
 	/* parallel charging */
 	struct parallel_params	pl;
 
+	/* CC Mode */
+	int	adapter_cc_mode;
+
 	/* regulators */
 	struct smb_regulator	*vbus_vreg;
 	struct smb_regulator	*vconn_vreg;
@@ -388,6 +412,7 @@ struct smb_charger {
 	/* votables */
 	struct votable		*dc_suspend_votable;
 	struct votable		*fcc_votable;
+	struct votable		*fcc_main_votable;
 	struct votable		*fv_votable;
 	struct votable		*usb_icl_votable;
 	struct votable		*awake_votable;
@@ -407,6 +432,7 @@ struct smb_charger {
 	struct work_struct	jeita_update_work;
 	struct work_struct	moisture_protection_work;
 	struct work_struct	chg_termination_work;
+	struct work_struct	dcin_aicl_work;
 	struct delayed_work	ps_change_timeout_work;
 	struct delayed_work	clear_hdc_work;
 	struct delayed_work	icl_change_work;
@@ -417,10 +443,12 @@ struct smb_charger {
 	struct delayed_work	lpd_detach_work;
 	struct delayed_work	thermal_regulation_work;
 	struct delayed_work	usbov_dbc_work;
+	struct delayed_work	pr_swap_detach_work;
 
 	struct alarm		lpd_recheck_timer;
 	struct alarm		moisture_protection_alarm;
 	struct alarm		chg_termination_alarm;
+	struct alarm		dcin_aicl_alarm;
 
 	/* secondary charger config */
 	bool			sec_pl_present;
@@ -503,8 +531,11 @@ struct smb_charger {
 	bool			aicl_max_reached;
 	int			charge_full_cc;
 	int			cc_soc_ref;
+	int			last_cc_soc;
 	int			usbin_forced_max_uv;
 	int			init_thermal_ua;
+	u32			comp_clamp_level;
+	int			wls_icl_ua;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -532,9 +563,12 @@ struct smb_charger {
 	u32			headroom_mode;
 	bool			flash_init_done;
 	bool			flash_active;
+	u32			irq_status;
 
 	/* wireless */
-	int			wireless_vout;
+	int			dcin_uv_count;
+	ktime_t			dcin_uv_last_time;
+	int			last_wls_vout;
 };
 
 int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -584,6 +618,7 @@ irqreturn_t usb_source_change_irq_handler(int irq, void *data);
 irqreturn_t icl_change_irq_handler(int irq, void *data);
 irqreturn_t typec_state_change_irq_handler(int irq, void *data);
 irqreturn_t typec_attach_detach_irq_handler(int irq, void *data);
+irqreturn_t dcin_uv_irq_handler(int irq, void *data);
 irqreturn_t dc_plugin_irq_handler(int irq, void *data);
 irqreturn_t high_duty_cycle_irq_handler(int irq, void *data);
 irqreturn_t switcher_power_ok_irq_handler(int irq, void *data);
@@ -592,7 +627,7 @@ irqreturn_t wdog_bark_irq_handler(int irq, void *data);
 irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data);
 irqreturn_t temp_change_irq_handler(int irq, void *data);
 irqreturn_t usbin_ov_irq_handler(int irq, void *data);
-
+irqreturn_t sdam_sts_change_irq_handler(int irq, void *data);
 int smblib_get_prop_input_suspend(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_batt_present(struct smb_charger *chg,
@@ -743,6 +778,8 @@ void smblib_hvdcp_hw_inov_enable(struct smb_charger *chg, bool enable);
 void smblib_hvdcp_exit_config(struct smb_charger *chg);
 void smblib_apsd_enable(struct smb_charger *chg, bool enable);
 int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val);
+int smblib_get_irq_status(struct smb_charger *chg,
+				union power_supply_propval *val);
 
 int smblib_init(struct smb_charger *chg);
 int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h
index d4a46c8..f3582aa 100644
--- a/drivers/power/supply/qcom/smb5-reg.h
+++ b/drivers/power/supply/qcom/smb5-reg.h
@@ -15,6 +15,7 @@
 #define DCIN_BASE	0x1400
 #define TYPEC_BASE	0X1500
 #define MISC_BASE	0x1600
+#define MISC_PBS_BASE	0x7500
 
 #define PERPH_TYPE_OFFSET	0x04
 #define TYPE_MASK		GENMASK(7, 0)
@@ -541,4 +542,7 @@ enum {
 
 #define SMB_REG_H_THRESHOLD_MSB_REG		(MISC_BASE + 0XBC)
 
+/* SDAM regs */
+#define MISC_PBS_RT_STS_REG			(MISC_PBS_BASE + 0x10)
+#define PULSE_SKIP_IRQ_BIT			BIT(4)
 #endif /* __SMB5_CHARGER_REG_H */
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
index 63c47ea..1b0906f 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.c
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -152,7 +152,7 @@ static bool is_input_present(struct step_chg_info *chip)
 
 int read_range_data_from_node(struct device_node *node,
 		const char *prop_str, struct range_data *ranges,
-		u32 max_threshold, u32 max_value)
+		int max_threshold, u32 max_value)
 {
 	int rc = 0, i, length, per_tuple_length, tuples;
 
diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h
index 9936e31..285528f 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.h
+++ b/drivers/power/supply/qcom/step-chg-jeita.h
@@ -16,8 +16,8 @@ struct step_chg_jeita_param {
 };
 
 struct range_data {
-	u32 low_threshold;
-	u32 high_threshold;
+	int low_threshold;
+	int high_threshold;
 	u32 value;
 };
 
@@ -26,5 +26,5 @@ int qcom_step_chg_init(struct device *dev,
 void qcom_step_chg_deinit(void);
 int read_range_data_from_node(struct device_node *node,
 		const char *prop_str, struct range_data *ranges,
-		u32 max_threshold, u32 max_value);
+		int max_threshold, u32 max_value);
 #endif /* __STEP_CHG_H__ */
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 295d8dc..8cbfcce 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1164,13 +1164,13 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
 	RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP,	rapl_defaults_core),
 	RAPL_CPU(INTEL_FAM6_CANNONLAKE_MOBILE,	rapl_defaults_core),
 
-	RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1,	rapl_defaults_byt),
+	RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT,	rapl_defaults_byt),
 	RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT,	rapl_defaults_cht),
-	RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD,	rapl_defaults_tng),
-	RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD,	rapl_defaults_ann),
+	RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT_MID,	rapl_defaults_tng),
+	RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT_MID,	rapl_defaults_ann),
 	RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT,	rapl_defaults_core),
-	RAPL_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE,	rapl_defaults_core),
-	RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON,	rapl_defaults_core),
+	RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS,	rapl_defaults_core),
+	RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_X,	rapl_defaults_core),
 
 	RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL,	rapl_defaults_hsw_server),
 	RAPL_CPU(INTEL_FAM6_XEON_PHI_KNM,	rapl_defaults_hsw_server),
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c
index 9175161..c53a218 100644
--- a/drivers/reset/reset-meson-audio-arb.c
+++ b/drivers/reset/reset-meson-audio-arb.c
@@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
 	arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits);
 	arb->rstc.ops = &meson_audio_arb_rstc_ops;
 	arb->rstc.of_node = dev->of_node;
+	arb->rstc.owner = THIS_MODULE;
 
 	/*
 	 * Enable general :
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index 01ffc0e..fbcf13b 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -414,7 +414,7 @@ static int pm860x_rtc_remove(struct platform_device *pdev)
 	struct pm860x_rtc_info *info = platform_get_drvdata(pdev);
 
 #ifdef VRTC_CALIBRATION
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&info->calib_work);
 	/* disable measurement */
 	pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0);
 #endif	/* VRTC_CALIBRATION */
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index e544429..4d6bf93 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev)
 	struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
 
 	if (device_may_wakeup(dev))
-		enable_irq_wake(cros_ec_rtc->cros_ec->irq);
+		return enable_irq_wake(cros_ec_rtc->cros_ec->irq);
 
 	return 0;
 }
@@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev)
 	struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
 
 	if (device_may_wakeup(dev))
-		disable_irq_wake(cros_ec_rtc->cros_ec->irq);
+		return disable_irq_wake(cros_ec_rtc->cros_ec->irq);
 
 	return 0;
 }
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index b4e054c..69b54e5 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
 	da9063_data_to_tm(data, &rtc->alarm_time, rtc);
 	rtc->rtc_sync = false;
 
+	/*
+	 * TODO: some models have alarms on a minute boundary but still support
+	 * real hardware interrupts. Add this once the core supports it.
+	 */
+	if (config->rtc_data_start != RTC_SEC)
+		rtc->rtc_dev->uie_unsupported = 1;
+
 	irq_alarm = platform_get_irq_byname(pdev, "ALARM");
 	ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
 					da9063_alarm_event,
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 51ba414..3d7414e 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -377,7 +377,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
 static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
 {
 	unsigned int byte;
-	int value = 0xff;	/* return 0xff for ignored values */
+	int value = -1;			/* return -1 for ignored values */
 
 	byte = readb(rtc->regbase + reg_off);
 	if (byte & AR_ENB) {
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index c5908cf..8e6c9b3 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -788,11 +788,14 @@ static int stm32_rtc_probe(struct platform_device *pdev)
 	ret = device_init_wakeup(&pdev->dev, true);
 	if (rtc->data->has_wakeirq) {
 		rtc->wakeirq_alarm = platform_get_irq(pdev, 1);
-		if (rtc->wakeirq_alarm <= 0)
-			ret = rtc->wakeirq_alarm;
-		else
+		if (rtc->wakeirq_alarm > 0) {
 			ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
 							    rtc->wakeirq_alarm);
+		} else {
+			ret = rtc->wakeirq_alarm;
+			if (rtc->wakeirq_alarm == -EPROBE_DEFER)
+				goto err;
+		}
 	}
 	if (ret)
 		dev_warn(&pdev->dev, "alarm can't wake up the system: %d", ret);
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 1538208..2f741f4 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -168,6 +168,10 @@ static int xgene_rtc_probe(struct platform_device *pdev)
 	if (IS_ERR(pdata->csr_base))
 		return PTR_ERR(pdata->csr_base);
 
+	pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
+	if (IS_ERR(pdata->rtc))
+		return PTR_ERR(pdata->rtc);
+
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
 		dev_err(&pdev->dev, "No IRQ resource\n");
@@ -198,15 +202,15 @@ static int xgene_rtc_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-	pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
-					 &xgene_rtc_ops, THIS_MODULE);
-	if (IS_ERR(pdata->rtc)) {
-		clk_disable_unprepare(pdata->clk);
-		return PTR_ERR(pdata->rtc);
-	}
-
 	/* HW does not support update faster than 1 seconds */
 	pdata->rtc->uie_unsupported = 1;
+	pdata->rtc->ops = &xgene_rtc_ops;
+
+	ret = rtc_register_device(pdata->rtc);
+	if (ret) {
+		clk_disable_unprepare(pdata->clk);
+		return ret;
+	}
 
 	return 0;
 }
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 6e294b4..f89f9d0 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2004,14 +2004,14 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
 	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
 
 raw:
-	block->blocks = (private->real_cyl *
+	block->blocks = ((unsigned long) private->real_cyl *
 			  private->rdc_data.trk_per_cyl *
 			  blk_per_trk);
 
 	dev_info(&device->cdev->dev,
-		 "DASD with %d KB/block, %d KB total size, %d KB/track, "
+		 "DASD with %u KB/block, %lu KB total size, %u KB/track, "
 		 "%s\n", (block->bp_block >> 10),
-		 ((private->real_cyl *
+		 (((unsigned long) private->real_cyl *
 		   private->rdc_data.trk_per_cyl *
 		   blk_per_trk * (block->bp_block >> 9)) >> 1),
 		 ((blk_per_trk * block->bp_block) >> 10),
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index fd2146b..e17364e 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -629,7 +629,7 @@ con3270_init(void)
 		     (void (*)(unsigned long)) con3270_read_tasklet,
 		     (unsigned long) condev->read);
 
-	raw3270_add_view(&condev->view, &con3270_fn, 1);
+	raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
 
 	INIT_LIST_HEAD(&condev->freemem);
 	for (i = 0; i < CON3270_STRING_PAGES; i++) {
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 16a4e85..2f9905e 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -463,7 +463,8 @@ fs3270_open(struct inode *inode, struct file *filp)
 
 	init_waitqueue_head(&fp->wait);
 	fp->fs_pid = get_pid(task_pid(current));
-	rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
+	rc = raw3270_add_view(&fp->view, &fs3270_fn, minor,
+			      RAW3270_VIEW_LOCK_BH);
 	if (rc) {
 		fs3270_free_view(&fp->view);
 		goto out;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index f8cd293..63a41b1 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -920,7 +920,7 @@ raw3270_deactivate_view(struct raw3270_view *view)
  * Add view to device with minor "minor".
  */
 int
-raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
+raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass)
 {
 	unsigned long flags;
 	struct raw3270 *rp;
@@ -942,6 +942,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
 		view->cols = rp->cols;
 		view->ascebc = rp->ascebc;
 		spin_lock_init(&view->lock);
+		lockdep_set_subclass(&view->lock, subclass);
 		list_add(&view->list, &rp->view_list);
 		rc = 0;
 		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 114ca7c..3afaa35 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -150,6 +150,8 @@ struct raw3270_fn {
 struct raw3270_view {
 	struct list_head list;
 	spinlock_t lock;
+#define RAW3270_VIEW_LOCK_IRQ	0
+#define RAW3270_VIEW_LOCK_BH	1
 	atomic_t ref_count;
 	struct raw3270 *dev;
 	struct raw3270_fn *fn;
@@ -158,7 +160,7 @@ struct raw3270_view {
 	unsigned char *ascebc;		/* ascii -> ebcdic table */
 };
 
-int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
+int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int);
 int raw3270_activate_view(struct raw3270_view *);
 void raw3270_del_view(struct raw3270_view *);
 void raw3270_deactivate_view(struct raw3270_view *);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 5b8af27..81067f5 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -980,7 +980,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
 		return PTR_ERR(tp);
 
 	rc = raw3270_add_view(&tp->view, &tty3270_fn,
-			      tty->index + RAW3270_FIRSTMINOR);
+			      tty->index + RAW3270_FIRSTMINOR,
+			      RAW3270_VIEW_LOCK_BH);
 	if (rc) {
 		tty3270_free_view(tp);
 		return rc;
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 9811fd8..92eabbb 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -115,7 +115,7 @@ struct subchannel {
 	struct schib_config config;
 } __attribute__ ((aligned(8)));
 
-DECLARE_PER_CPU(struct irb, cio_irb);
+DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
 
 #define to_subchannel(n) container_of(n, struct subchannel, dev)
 
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index fabd979..7a06cdf 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -40,26 +40,30 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
 	if (ret != -EBUSY)
 		goto out_unlock;
 
+	iretry = 255;
 	do {
-		iretry = 255;
 
 		ret = cio_cancel_halt_clear(sch, &iretry);
-		while (ret == -EBUSY) {
-			/*
-			 * Flush all I/O and wait for
-			 * cancel/halt/clear completion.
-			 */
-			private->completion = &completion;
-			spin_unlock_irq(sch->lock);
 
+		if (ret == -EIO) {
+			pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
+			       sch->schid.ssid, sch->schid.sch_no);
+			break;
+		}
+
+		/*
+		 * Flush all I/O and wait for
+		 * cancel/halt/clear completion.
+		 */
+		private->completion = &completion;
+		spin_unlock_irq(sch->lock);
+
+		if (ret == -EBUSY)
 			wait_for_completion_timeout(&completion, 3*HZ);
 
-			spin_lock_irq(sch->lock);
-			private->completion = NULL;
-			flush_workqueue(vfio_ccw_work_q);
-			ret = cio_cancel_halt_clear(sch, &iretry);
-		};
-
+		private->completion = NULL;
+		flush_workqueue(vfio_ccw_work_q);
+		spin_lock_irq(sch->lock);
 		ret = cio_disable_subchannel(sch);
 	} while (ret == -EBUSY);
 out_unlock:
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index f673e10..dc5ff47 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -130,11 +130,12 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
 
 	if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
 	    (private->state != VFIO_CCW_STATE_STANDBY)) {
-		if (!vfio_ccw_mdev_reset(mdev))
+		if (!vfio_ccw_sch_quiesce(private->sch))
 			private->state = VFIO_CCW_STATE_STANDBY;
 		/* The state will be NOT_OPER on error. */
 	}
 
+	cp_free(&private->cp);
 	private->mdev = NULL;
 	atomic_inc(&private->avail);
 
@@ -158,6 +159,14 @@ static void vfio_ccw_mdev_release(struct mdev_device *mdev)
 	struct vfio_ccw_private *private =
 		dev_get_drvdata(mdev_parent_dev(mdev));
 
+	if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
+	    (private->state != VFIO_CCW_STATE_STANDBY)) {
+		if (!vfio_ccw_mdev_reset(mdev))
+			private->state = VFIO_CCW_STATE_STANDBY;
+		/* The state will be NOT_OPER on error. */
+	}
+
+	cp_free(&private->cp);
 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
 				 &private->nb);
 }
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 1b4001e..b163444 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -45,7 +45,8 @@ static debug_info_t *debug_info;
 
 static void __init pkey_debug_init(void)
 {
-	debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long));
+	/* 5 arguments per dbf entry (including the format string ptr) */
+	debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
 	debug_register_view(debug_info, &debug_sprintf_view);
 	debug_set_level(debug_info, 3);
 }
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index e685412..b2737bf 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -224,6 +224,7 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
 	trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
 
 	if (mex->outputdatalength < mex->inputdatalength) {
+		func_code = 0;
 		rc = -EINVAL;
 		goto out;
 	}
@@ -298,6 +299,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
 	trace_s390_zcrypt_req(crt, TP_ICARSACRT);
 
 	if (crt->outputdatalength < crt->inputdatalength) {
+		func_code = 0;
 		rc = -EINVAL;
 		goto out;
 	}
@@ -483,6 +485,7 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
 
 		targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
 		if (!targets) {
+			func_code = 0;
 			rc = -ENOMEM;
 			goto out;
 		}
@@ -490,6 +493,7 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
 		uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
 		if (copy_from_user(targets, uptr,
 				   target_num * sizeof(*targets))) {
+			func_code = 0;
 			rc = -EFAULT;
 			goto out_free;
 		}
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 7617d21..f63c5c8 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1595,6 +1595,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
 		if (priv->channel[direction] == NULL) {
 			if (direction == CTCM_WRITE)
 				channel_free(priv->channel[CTCM_READ]);
+			result = -ENODEV;
 			goto out_dev;
 		}
 		priv->channel[direction]->netdev = dev;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 2d1f6a5..b265758 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -201,6 +201,12 @@ struct qeth_vnicc_info {
 	bool rx_bcast_enabled;
 };
 
+static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa,
+		enum qeth_ipa_setadp_cmd func)
+{
+	return (ipa->supported_funcs & func);
+}
+
 static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
 		enum qeth_ipa_funcs func)
 {
@@ -214,9 +220,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
 }
 
 #define qeth_adp_supported(c, f) \
-	qeth_is_ipa_supported(&c->options.adp, f)
-#define qeth_adp_enabled(c, f) \
-	qeth_is_ipa_enabled(&c->options.adp, f)
+	qeth_is_adp_supported(&c->options.adp, f)
 #define qeth_is_supported(c, f) \
 	qeth_is_ipa_supported(&c->options.ipa4, f)
 #define qeth_is_enabled(c, f) \
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 56aacf3..461afc2 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1370,7 +1370,7 @@ static void qeth_set_multiple_write_queues(struct qeth_card *card)
 	card->qdio.no_out_queues = 4;
 }
 
-static void qeth_update_from_chp_desc(struct qeth_card *card)
+static int qeth_update_from_chp_desc(struct qeth_card *card)
 {
 	struct ccw_device *ccwdev;
 	struct channel_path_desc_fmt0 *chp_dsc;
@@ -1380,7 +1380,7 @@ static void qeth_update_from_chp_desc(struct qeth_card *card)
 	ccwdev = card->data.ccwdev;
 	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
 	if (!chp_dsc)
-		goto out;
+		return -ENOMEM;
 
 	card->info.func_level = 0x4100 + chp_dsc->desc;
 	if (card->info.type == QETH_CARD_TYPE_IQD)
@@ -1395,6 +1395,7 @@ static void qeth_update_from_chp_desc(struct qeth_card *card)
 	kfree(chp_dsc);
 	QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
 	QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
+	return 0;
 }
 
 static void qeth_init_qdio_info(struct qeth_card *card)
@@ -5090,7 +5091,9 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
 
 	QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
 	atomic_set(&card->force_alloc_skb, 0);
-	qeth_update_from_chp_desc(card);
+	rc = qeth_update_from_chp_desc(card);
+	if (rc)
+		return rc;
 retry:
 	if (retries < 3)
 		QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
@@ -5768,7 +5771,9 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
 	gdev->cdev[2]->handler = qeth_irq;
 
 	qeth_setup_card(card);
-	qeth_update_from_chp_desc(card);
+	rc = qeth_update_from_chp_desc(card);
+	if (rc)
+		goto err_chp_desc;
 
 	card->dev = qeth_alloc_netdev(card);
 	if (!card->dev) {
@@ -5806,6 +5811,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
 	qeth_core_free_discipline(card);
 err_load:
 	free_netdev(card->dev);
+err_chp_desc:
 err_card:
 	qeth_core_free_card(card);
 err_dev:
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7f71ca0..9c5e801 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2586,12 +2586,14 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
 	int rc;
 
+	hash_init(card->ip_htable);
+
 	if (gdev->dev.type == &qeth_generic_devtype) {
 		rc = qeth_l3_create_device_attributes(&gdev->dev);
 		if (rc)
 			return rc;
 	}
-	hash_init(card->ip_htable);
+
 	hash_init(card->ip_mc_htable);
 	card->options.layer2 = 0;
 	card->info.hwtrap = 0;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index f6c415d..5eb7aab 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
 	list_for_each_entry(port, &adapter->port_list, list) {
 		if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
 			zfcp_fc_test_link(port);
-		if (!port->d_id)
-			zfcp_erp_port_reopen(port,
-					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "fcrscn1");
 	}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
 static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
 {
 	struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct fc_els_rscn *head;
 	struct fc_els_rscn_page *page;
 	u16 i;
@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
 	no_entries = be16_to_cpu(head->rscn_plen) /
 		sizeof(struct fc_els_rscn_page);
 
+	if (no_entries > 1) {
+		/* handle failed ports */
+		unsigned long flags;
+		struct zfcp_port *port;
+
+		read_lock_irqsave(&adapter->port_list_lock, flags);
+		list_for_each_entry(port, &adapter->port_list, list) {
+			if (port->d_id)
+				continue;
+			zfcp_erp_port_reopen(port,
+					     ZFCP_STATUS_COMMON_ERP_FAILED,
+					     "fcrscn1");
+		}
+		read_unlock_irqrestore(&adapter->port_list_lock, flags);
+	}
+
 	for (i = 1; i < no_entries; i++) {
 		/* skip head and start with 1st element */
 		page++;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 39eb415..074760f 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2639,9 +2639,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
 	return capacity;
 }
 
+static inline int aac_pci_offline(struct aac_dev *dev)
+{
+	return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
+}
+
 static inline int aac_adapter_check_health(struct aac_dev *dev)
 {
-	if (unlikely(pci_channel_offline(dev->pdev)))
+	if (unlikely(aac_pci_offline(dev)))
 		return -1;
 
 	return (dev)->a_ops.adapter_check_health(dev);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 3236240..b7588de 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -673,7 +673,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
 					return -ETIMEDOUT;
 				}
 
-				if (unlikely(pci_channel_offline(dev->pdev)))
+				if (unlikely(aac_pci_offline(dev)))
 					return -EFAULT;
 
 				if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -773,7 +773,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
 
 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
 
-		if (unlikely(pci_channel_offline(dev->pdev)))
+		if (unlikely(aac_pci_offline(dev)))
 			return -EFAULT;
 
 		fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index 3d401d0..bdd177e 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -91,6 +91,7 @@ aic7770_probe(struct device *dev)
 	ahc = ahc_alloc(&aic7xxx_driver_template, name);
 	if (ahc == NULL)
 		return (ENOMEM);
+	ahc->dev = dev;
 	error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data,
 			       eisaBase);
 	if (error != 0) {
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 4ce4e90..7f6e832 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -949,6 +949,7 @@ struct ahc_softc {
 	 * Platform specific device information.
 	 */
 	ahc_dev_softc_t		  dev_softc;
+	struct device		  *dev;
 
 	/*
 	 * Bus specific device information.
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index c6be3ae..306d0bf 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -861,8 +861,8 @@ int
 ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
 		 int flags, bus_dmamap_t *mapp)
 {
-	*vaddr = pci_alloc_consistent(ahc->dev_softc,
-				      dmat->maxsize, mapp);
+	/* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */
+	*vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC);
 	if (*vaddr == NULL)
 		return ENOMEM;
 	return 0;
@@ -872,8 +872,7 @@ void
 ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
 		void* vaddr, bus_dmamap_t map)
 {
-	pci_free_consistent(ahc->dev_softc, dmat->maxsize,
-			    vaddr, map);
+	dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map);
 }
 
 int
@@ -1124,8 +1123,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
 
 	host->transportt = ahc_linux_transport_template;
 
-	retval = scsi_add_host(host,
-			(ahc->dev_softc ? &ahc->dev_softc->dev : NULL));
+	retval = scsi_add_host(host, ahc->dev);
 	if (retval) {
 		printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n");
 		scsi_host_put(host);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 0fc14da..717d8d1 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -250,6 +250,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		}
 	}
 	ahc->dev_softc = pci;
+	ahc->dev = &pci->dev;
 	error = ahc_pci_config(ahc, entry);
 	if (error != 0) {
 		ahc_free(ahc);
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index dab0d3f..e09c7f3 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
 	}
 
 out:
-	if (req->nsge > 0)
+	if (req->nsge > 0) {
 		scsi_dma_unmap(cmnd);
+		if (req->dcopy && (host_status == DID_OK))
+			host_status = csio_scsi_copy_to_sgl(hw, req);
+	}
 
 	cmnd->result = (((host_status) << 16) | scsi_status);
 	cmnd->scsi_done(cmnd);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 1797e47..3d51a93 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -2153,7 +2153,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
 		FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
 			     fc_rport_state(rdata));
 
-		rdata->flags &= ~FC_RP_STARTED;
 		fc_rport_enter_delete(rdata, RPORT_EV_STOP);
 		mutex_unlock(&rdata->rp_mutex);
 		kref_put(&rdata->kref, fc_rport_destroy);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index a1551ab..231eb79 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -48,17 +48,16 @@ static void smp_task_timedout(struct timer_list *t)
 	unsigned long flags;
 
 	spin_lock_irqsave(&task->task_state_lock, flags);
-	if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+	if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
 		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+		complete(&task->slow_task->completion);
+	}
 	spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-	complete(&task->slow_task->completion);
 }
 
 static void smp_task_done(struct sas_task *task)
 {
-	if (!del_timer(&task->slow_task->timer))
-		return;
+	del_timer(&task->slow_task->timer);
 	complete(&task->slow_task->completion);
 }
 
@@ -2041,6 +2040,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
 	if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
 		phy->phy_state = PHY_EMPTY;
 		sas_unregister_devs_sas_addr(dev, phy_id, last);
+		/*
+		 * Even though the PHY is empty, for convenience we discover
+		 * the PHY to update the PHY info, like negotiated linkrate.
+		 */
+		sas_ex_phy_discover(dev, phy_id);
 		return res;
 	} else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
 		   dev_type_flutter(type, phy->attached_dev_type)) {
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 1a6ed9b..cb19b12 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -117,7 +117,7 @@ static ssize_t
 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
 		       char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
+	return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
 }
 
 /**
@@ -137,9 +137,9 @@ lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_hba   *phba = vport->phba;
 
 	if (phba->hba_flag & HBA_FIP_SUPPORT)
-		return snprintf(buf, PAGE_SIZE, "1\n");
+		return scnprintf(buf, PAGE_SIZE, "1\n");
 	else
-		return snprintf(buf, PAGE_SIZE, "0\n");
+		return scnprintf(buf, PAGE_SIZE, "0\n");
 }
 
 static ssize_t
@@ -517,14 +517,15 @@ lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	if (phba->cfg_enable_bg)
+	if (phba->cfg_enable_bg) {
 		if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
-			return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
+			return scnprintf(buf, PAGE_SIZE,
+					"BlockGuard Enabled\n");
 		else
-			return snprintf(buf, PAGE_SIZE,
+			return scnprintf(buf, PAGE_SIZE,
 					"BlockGuard Not Supported\n");
-	else
-			return snprintf(buf, PAGE_SIZE,
+	} else
+		return scnprintf(buf, PAGE_SIZE,
 					"BlockGuard Disabled\n");
 }
 
@@ -536,7 +537,7 @@ lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%llu\n",
+	return scnprintf(buf, PAGE_SIZE, "%llu\n",
 			(unsigned long long)phba->bg_guard_err_cnt);
 }
 
@@ -548,7 +549,7 @@ lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%llu\n",
+	return scnprintf(buf, PAGE_SIZE, "%llu\n",
 			(unsigned long long)phba->bg_apptag_err_cnt);
 }
 
@@ -560,7 +561,7 @@ lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%llu\n",
+	return scnprintf(buf, PAGE_SIZE, "%llu\n",
 			(unsigned long long)phba->bg_reftag_err_cnt);
 }
 
@@ -578,7 +579,7 @@ lpfc_info_show(struct device *dev, struct device_attribute *attr,
 {
 	struct Scsi_Host *host = class_to_shost(dev);
 
-	return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
+	return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
 }
 
 /**
@@ -597,7 +598,7 @@ lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
 }
 
 /**
@@ -619,7 +620,7 @@ lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
-	return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
 }
 
 /**
@@ -638,7 +639,7 @@ lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
 }
 
 /**
@@ -657,7 +658,7 @@ lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
 }
 
 /**
@@ -676,7 +677,7 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
 }
 
 /**
@@ -694,7 +695,7 @@ lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
 		(phba->sli.sli_flag & LPFC_MENLO_MAINT));
 }
 
@@ -714,7 +715,7 @@ lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
 }
 
 /**
@@ -742,10 +743,10 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
 	sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
 
 	if (phba->sli_rev < LPFC_SLI_REV4)
-		len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
+		len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
 			       fwrev, phba->sli_rev);
 	else
-		len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
+		len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
 			       fwrev, phba->sli_rev, if_type, sli_family);
 
 	return len;
@@ -769,7 +770,7 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
 	lpfc_vpd_t *vp = &phba->vpd;
 
 	lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
-	return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", hdw);
 }
 
 /**
@@ -790,10 +791,11 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
 	char fwrev[FW_REV_STR_SIZE];
 
 	if (phba->sli_rev < LPFC_SLI_REV4)
-		return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				phba->OptionROMVersion);
 
 	lpfc_decode_firmware_rev(phba, fwrev, 1);
-	return snprintf(buf, PAGE_SIZE, "%s\n", fwrev);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
 }
 
 /**
@@ -824,20 +826,20 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
 	case LPFC_LINK_DOWN:
 	case LPFC_HBA_ERROR:
 		if (phba->hba_flag & LINK_DISABLED)
-			len += snprintf(buf + len, PAGE_SIZE-len,
+			len += scnprintf(buf + len, PAGE_SIZE-len,
 				"Link Down - User disabled\n");
 		else
-			len += snprintf(buf + len, PAGE_SIZE-len,
+			len += scnprintf(buf + len, PAGE_SIZE-len,
 				"Link Down\n");
 		break;
 	case LPFC_LINK_UP:
 	case LPFC_CLEAR_LA:
 	case LPFC_HBA_READY:
-		len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
+		len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
 
 		switch (vport->port_state) {
 		case LPFC_LOCAL_CFG_LINK:
-			len += snprintf(buf + len, PAGE_SIZE-len,
+			len += scnprintf(buf + len, PAGE_SIZE-len,
 					"Configuring Link\n");
 			break;
 		case LPFC_FDISC:
@@ -847,38 +849,40 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
 		case LPFC_NS_QRY:
 		case LPFC_BUILD_DISC_LIST:
 		case LPFC_DISC_AUTH:
-			len += snprintf(buf + len, PAGE_SIZE - len,
+			len += scnprintf(buf + len, PAGE_SIZE - len,
 					"Discovery\n");
 			break;
 		case LPFC_VPORT_READY:
-			len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
+			len += scnprintf(buf + len, PAGE_SIZE - len,
+					"Ready\n");
 			break;
 
 		case LPFC_VPORT_FAILED:
-			len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
+			len += scnprintf(buf + len, PAGE_SIZE - len,
+					"Failed\n");
 			break;
 
 		case LPFC_VPORT_UNKNOWN:
-			len += snprintf(buf + len, PAGE_SIZE - len,
+			len += scnprintf(buf + len, PAGE_SIZE - len,
 					"Unknown\n");
 			break;
 		}
 		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
-			len += snprintf(buf + len, PAGE_SIZE-len,
+			len += scnprintf(buf + len, PAGE_SIZE-len,
 					"   Menlo Maint Mode\n");
 		else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 			if (vport->fc_flag & FC_PUBLIC_LOOP)
-				len += snprintf(buf + len, PAGE_SIZE-len,
+				len += scnprintf(buf + len, PAGE_SIZE-len,
 						"   Public Loop\n");
 			else
-				len += snprintf(buf + len, PAGE_SIZE-len,
+				len += scnprintf(buf + len, PAGE_SIZE-len,
 						"   Private Loop\n");
 		} else {
 			if (vport->fc_flag & FC_FABRIC)
-				len += snprintf(buf + len, PAGE_SIZE-len,
+				len += scnprintf(buf + len, PAGE_SIZE-len,
 						"   Fabric\n");
 			else
-				len += snprintf(buf + len, PAGE_SIZE-len,
+				len += scnprintf(buf + len, PAGE_SIZE-len,
 						"   Point-2-Point\n");
 		}
 	}
@@ -903,15 +907,15 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_hba *phba = vport->phba;
 
 	if (phba->sli_rev < LPFC_SLI_REV4)
-		return snprintf(buf, PAGE_SIZE, "fc\n");
+		return scnprintf(buf, PAGE_SIZE, "fc\n");
 
 	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
 		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
-			return snprintf(buf, PAGE_SIZE, "fcoe\n");
+			return scnprintf(buf, PAGE_SIZE, "fcoe\n");
 		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
-			return snprintf(buf, PAGE_SIZE, "fc\n");
+			return scnprintf(buf, PAGE_SIZE, "fc\n");
 	}
-	return snprintf(buf, PAGE_SIZE, "unknown\n");
+	return scnprintf(buf, PAGE_SIZE, "unknown\n");
 }
 
 /**
@@ -931,7 +935,7 @@ lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
 	struct lpfc_hba *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
 			phba->sli4_hba.pc_sli4_params.oas_supported);
 }
 
@@ -989,7 +993,7 @@ lpfc_num_discovered_ports_show(struct device *dev,
 	struct Scsi_Host  *shost = class_to_shost(dev);
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
 			vport->fc_map_cnt + vport->fc_unmap_cnt);
 }
 
@@ -1427,7 +1431,7 @@ lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
 }
 
 /**
@@ -1456,7 +1460,7 @@ lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
 	else
 		state = "online";
 
-	return snprintf(buf, PAGE_SIZE, "%s\n", state);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", state);
 }
 
 /**
@@ -1669,8 +1673,8 @@ lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
 	uint32_t cnt;
 
 	if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
-		return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
-	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+		return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
+	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1697,8 +1701,8 @@ lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
 	uint32_t cnt, acnt;
 
 	if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
-		return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
-	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+		return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1725,8 +1729,8 @@ lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
 	uint32_t cnt;
 
 	if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
-		return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
-	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+		return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
+	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1753,8 +1757,8 @@ lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
 	uint32_t cnt, acnt;
 
 	if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
-		return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
-	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+		return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1781,8 +1785,8 @@ lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
 	uint32_t cnt;
 
 	if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
-		return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
-	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+		return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
+	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1809,8 +1813,8 @@ lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
 	uint32_t cnt, acnt;
 
 	if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
-		return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
-	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+		return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1835,10 +1839,10 @@ lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_hba   *phba = vport->phba;
 
 	if (!(phba->max_vpi))
-		return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
+		return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
 	if (vport->port_type == LPFC_PHYSICAL_PORT)
-		return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
-	return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
+		return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
+	return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
 }
 
 /**
@@ -1860,7 +1864,7 @@ lpfc_poll_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
+	return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
 }
 
 /**
@@ -1964,7 +1968,7 @@ lpfc_fips_level_show(struct device *dev,  struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
 }
 
 /**
@@ -1983,7 +1987,7 @@ lpfc_fips_rev_show(struct device *dev,  struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
 }
 
 /**
@@ -2002,7 +2006,7 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
+	return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
 			(phba->cfg_enable_dss) ? "Enabled" : "Disabled",
 			(phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
 				"" : "Not ");
@@ -2031,7 +2035,7 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
 	uint16_t max_nr_virtfn;
 
 	max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
-	return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
 }
 
 static inline bool lpfc_rangecheck(uint val, uint min, uint max)
@@ -2091,7 +2095,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
 	struct lpfc_hba   *phba = vport->phba;\
-	return snprintf(buf, PAGE_SIZE, "%d\n",\
+	return scnprintf(buf, PAGE_SIZE, "%d\n",\
 			phba->cfg_##attr);\
 }
 
@@ -2119,7 +2123,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
 	struct lpfc_hba   *phba = vport->phba;\
 	uint val = 0;\
 	val = phba->cfg_##attr;\
-	return snprintf(buf, PAGE_SIZE, "%#x\n",\
+	return scnprintf(buf, PAGE_SIZE, "%#x\n",\
 			phba->cfg_##attr);\
 }
 
@@ -2255,7 +2259,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
 { \
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
-	return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
+	return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
 }
 
 /**
@@ -2280,7 +2284,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
 { \
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
-	return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
+	return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
 }
 
 /**
@@ -2551,7 +2555,7 @@ lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
 			(unsigned long long)phba->cfg_soft_wwpn);
 }
 
@@ -2648,7 +2652,7 @@ lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
 {
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
 			(unsigned long long)phba->cfg_soft_wwnn);
 }
 
@@ -2714,7 +2718,7 @@ lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
 			wwn_to_u64(phba->cfg_oas_tgt_wwpn));
 }
 
@@ -2782,7 +2786,7 @@ lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
 }
 
 /**
@@ -2845,7 +2849,7 @@ lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
 			wwn_to_u64(phba->cfg_oas_vpt_wwpn));
 }
 
@@ -2916,7 +2920,7 @@ lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
 }
 
 /**
@@ -2980,7 +2984,7 @@ lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
 	if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
 		return -EFAULT;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
 }
 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
 		   lpfc_oas_lun_status_show, NULL);
@@ -3132,7 +3136,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
 	if (oas_lun != NOT_OAS_ENABLED_LUN)
 		phba->cfg_oas_flags |= OAS_LUN_VALID;
 
-	len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
+	len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
 
 	return len;
 }
@@ -3266,7 +3270,7 @@ lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
 	struct Scsi_Host  *shost = class_to_shost(dev);
 	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
 }
 
 static DEVICE_ATTR(iocb_hw, S_IRUGO,
@@ -3278,7 +3282,7 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
 	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
 	struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
 			pring ? pring->txq_max : 0);
 }
 
@@ -3292,7 +3296,7 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
 	struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
 			pring ? pring->txcmplq_max : 0);
 }
 
@@ -3328,7 +3332,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host  *shost = class_to_shost(dev);
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",	vport->cfg_devloss_tmo);
+	return scnprintf(buf, PAGE_SIZE, "%d\n",	vport->cfg_devloss_tmo);
 }
 
 /**
@@ -4830,19 +4834,19 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
 
 	switch (phba->cfg_fcp_cpu_map) {
 	case 0:
-		len += snprintf(buf + len, PAGE_SIZE-len,
+		len += scnprintf(buf + len, PAGE_SIZE-len,
 				"fcp_cpu_map: No mapping (%d)\n",
 				phba->cfg_fcp_cpu_map);
 		return len;
 	case 1:
-		len += snprintf(buf + len, PAGE_SIZE-len,
+		len += scnprintf(buf + len, PAGE_SIZE-len,
 				"fcp_cpu_map: HBA centric mapping (%d): "
 				"%d online CPUs\n",
 				phba->cfg_fcp_cpu_map,
 				phba->sli4_hba.num_online_cpu);
 		break;
 	case 2:
-		len += snprintf(buf + len, PAGE_SIZE-len,
+		len += scnprintf(buf + len, PAGE_SIZE-len,
 				"fcp_cpu_map: Driver centric mapping (%d): "
 				"%d online CPUs\n",
 				phba->cfg_fcp_cpu_map,
@@ -4855,14 +4859,14 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
 
 		/* margin should fit in this and the truncated message */
 		if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
-			len += snprintf(buf + len, PAGE_SIZE-len,
+			len += scnprintf(buf + len, PAGE_SIZE-len,
 					"CPU %02d io_chan %02d "
 					"physid %d coreid %d\n",
 					phba->sli4_hba.curr_disp_cpu,
 					cpup->channel_id, cpup->phys_id,
 					cpup->core_id);
 		else
-			len += snprintf(buf + len, PAGE_SIZE-len,
+			len += scnprintf(buf + len, PAGE_SIZE-len,
 					"CPU %02d io_chan %02d "
 					"physid %d coreid %d IRQ %d\n",
 					phba->sli4_hba.curr_disp_cpu,
@@ -4875,7 +4879,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
 		if (phba->sli4_hba.curr_disp_cpu <
 				phba->sli4_hba.num_present_cpu &&
 				(len >= (PAGE_SIZE - 64))) {
-			len += snprintf(buf + len, PAGE_SIZE-len, "more...\n");
+			len += scnprintf(buf + len, PAGE_SIZE-len, "more...\n");
 			break;
 		}
 	}
@@ -6296,7 +6300,7 @@ lpfc_show_rport_##field (struct device *dev,				\
 {									\
 	struct fc_rport *rport = transport_class_to_rport(dev);		\
 	struct lpfc_rport_data *rdata = rport->hostdata;		\
-	return snprintf(buf, sz, format_string,				\
+	return scnprintf(buf, sz, format_string,			\
 		(rdata->target) ? cast rdata->target->field : 0);	\
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 1cbdc89..d909d90 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1220,7 +1220,7 @@ lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
 	 * Name object.  NPIV is not in play so this integer
 	 * value is sufficient and unique per FC-ID.
 	 */
-	n = snprintf(symbol, size, "%d", vport->phba->brd_no);
+	n = scnprintf(symbol, size, "%d", vport->phba->brd_no);
 	return n;
 }
 
@@ -1234,26 +1234,26 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
 
 	lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
 
-	n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
+	n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
 	if (size < n)
 		return n;
 
-	n += snprintf(symbol + n, size - n, " FV%s", fwrev);
+	n += scnprintf(symbol + n, size - n, " FV%s", fwrev);
 	if (size < n)
 		return n;
 
-	n += snprintf(symbol + n, size - n, " DV%s.",
+	n += scnprintf(symbol + n, size - n, " DV%s.",
 		      lpfc_release_version);
 	if (size < n)
 		return n;
 
-	n += snprintf(symbol + n, size - n, " HN:%s.",
+	n += scnprintf(symbol + n, size - n, " HN:%s.",
 		      init_utsname()->nodename);
 	if (size < n)
 		return n;
 
 	/* Note :- OS name is "Linux" */
-	n += snprintf(symbol + n, size - n, " OS:%s\n",
+	n += scnprintf(symbol + n, size - n, " OS:%s\n",
 		      init_utsname()->sysname);
 	return n;
 }
@@ -1762,6 +1762,9 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
 	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
 	memset(ae, 0, 256);
 
+	/* This string MUST be consistent with other FC platforms
+	 * supported by Broadcom.
+	 */
 	strncpy(ae->un.AttrString,
 		"Emulex Corporation",
 		       sizeof(ae->un.AttrString));
@@ -2117,10 +2120,11 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
 	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
 	memset(ae, 0, 32);
 
-	ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
-	ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
-	ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */
-	ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
+	ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+	ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+	if (vport->nvmei_support || vport->phba->nvmet_support)
+		ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
+	ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
 	size = FOURBYTES + 32;
 	ad->AttrLen = cpu_to_be16(size);
 	ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
@@ -2425,9 +2429,11 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
 	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
 	memset(ae, 0, 32);
 
-	ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
-	ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
-	ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
+	ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+	ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+	if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+		ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
+	ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
 	size = FOURBYTES + 32;
 	ad->AttrLen = cpu_to_be16(size);
 	ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index ca6c398..f1951c4 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -170,7 +170,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
 		snprintf(buffer,
 			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
 			dtp->seq_cnt, ms, dtp->fmt);
-		len +=  snprintf(buf+len, size-len, buffer,
+		len +=  scnprintf(buf+len, size-len, buffer,
 			dtp->data1, dtp->data2, dtp->data3);
 	}
 	for (i = 0; i < index; i++) {
@@ -181,7 +181,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
 		snprintf(buffer,
 			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
 			dtp->seq_cnt, ms, dtp->fmt);
-		len +=  snprintf(buf+len, size-len, buffer,
+		len +=  scnprintf(buf+len, size-len, buffer,
 			dtp->data1, dtp->data2, dtp->data3);
 	}
 
@@ -236,7 +236,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
 		snprintf(buffer,
 			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
 			dtp->seq_cnt, ms, dtp->fmt);
-		len +=  snprintf(buf+len, size-len, buffer,
+		len +=  scnprintf(buf+len, size-len, buffer,
 			dtp->data1, dtp->data2, dtp->data3);
 	}
 	for (i = 0; i < index; i++) {
@@ -247,7 +247,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
 		snprintf(buffer,
 			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
 			dtp->seq_cnt, ms, dtp->fmt);
-		len +=  snprintf(buf+len, size-len, buffer,
+		len +=  scnprintf(buf+len, size-len, buffer,
 			dtp->data1, dtp->data2, dtp->data3);
 	}
 
@@ -307,7 +307,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
 
 	i = lpfc_debugfs_last_hbq;
 
-	len +=  snprintf(buf+len, size-len, "HBQ %d Info\n", i);
+	len +=  scnprintf(buf+len, size-len, "HBQ %d Info\n", i);
 
 	hbqs =  &phba->hbqs[i];
 	posted = 0;
@@ -315,21 +315,21 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
 		posted++;
 
 	hip =  lpfc_hbq_defs[i];
-	len +=  snprintf(buf+len, size-len,
+	len +=  scnprintf(buf+len, size-len,
 		"idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n",
 		hip->hbq_index, hip->profile, hip->rn,
 		hip->buffer_count, hip->init_count, hip->add_count, posted);
 
 	raw_index = phba->hbq_get[i];
 	getidx = le32_to_cpu(raw_index);
-	len +=  snprintf(buf+len, size-len,
+	len +=  scnprintf(buf+len, size-len,
 		"entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
 		hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
 		hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
 
 	hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt;
 	for (j=0; j<hbqs->entry_count; j++) {
-		len +=  snprintf(buf+len, size-len,
+		len +=  scnprintf(buf+len, size-len,
 			"%03d: %08x %04x %05x ", j,
 			le32_to_cpu(hbqe->bde.addrLow),
 			le32_to_cpu(hbqe->bde.tus.w),
@@ -341,14 +341,16 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
 		low = hbqs->hbqPutIdx - posted;
 		if (low >= 0) {
 			if ((j >= hbqs->hbqPutIdx) || (j < low)) {
-				len +=  snprintf(buf+len, size-len, "Unused\n");
+				len +=  scnprintf(buf + len, size - len,
+						"Unused\n");
 				goto skipit;
 			}
 		}
 		else {
 			if ((j >= hbqs->hbqPutIdx) &&
 				(j < (hbqs->entry_count+low))) {
-				len +=  snprintf(buf+len, size-len, "Unused\n");
+				len +=  scnprintf(buf + len, size - len,
+						"Unused\n");
 				goto skipit;
 			}
 		}
@@ -358,7 +360,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
 			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
 			phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
 			if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
-				len +=  snprintf(buf+len, size-len,
+				len +=  scnprintf(buf+len, size-len,
 					"Buf%d: %p %06x\n", i,
 					hbq_buf->dbuf.virt, hbq_buf->tag);
 				found = 1;
@@ -367,7 +369,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
 			i++;
 		}
 		if (!found) {
-			len +=  snprintf(buf+len, size-len, "No DMAinfo?\n");
+			len +=  scnprintf(buf+len, size-len, "No DMAinfo?\n");
 		}
 skipit:
 		hbqe++;
@@ -413,7 +415,7 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
 	off = 0;
 	spin_lock_irq(&phba->hbalock);
 
-	len +=  snprintf(buf+len, size-len, "HBA SLIM\n");
+	len +=  scnprintf(buf+len, size-len, "HBA SLIM\n");
 	lpfc_memcpy_from_slim(buffer,
 		phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024);
 
@@ -427,7 +429,7 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
 
 	i = 1024;
 	while (i > 0) {
-		len +=  snprintf(buf+len, size-len,
+		len +=  scnprintf(buf+len, size-len,
 		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
 		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
 		*(ptr+5), *(ptr+6), *(ptr+7));
@@ -471,11 +473,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
 	off = 0;
 	spin_lock_irq(&phba->hbalock);
 
-	len +=  snprintf(buf+len, size-len, "SLIM Mailbox\n");
+	len +=  scnprintf(buf+len, size-len, "SLIM Mailbox\n");
 	ptr = (uint32_t *)phba->slim2p.virt;
 	i = sizeof(MAILBOX_t);
 	while (i > 0) {
-		len +=  snprintf(buf+len, size-len,
+		len +=  scnprintf(buf+len, size-len,
 		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
 		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
 		*(ptr+5), *(ptr+6), *(ptr+7));
@@ -484,11 +486,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
 		off += (8 * sizeof(uint32_t));
 	}
 
-	len +=  snprintf(buf+len, size-len, "SLIM PCB\n");
+	len +=  scnprintf(buf+len, size-len, "SLIM PCB\n");
 	ptr = (uint32_t *)phba->pcb;
 	i = sizeof(PCB_t);
 	while (i > 0) {
-		len +=  snprintf(buf+len, size-len,
+		len +=  scnprintf(buf+len, size-len,
 		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
 		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
 		*(ptr+5), *(ptr+6), *(ptr+7));
@@ -501,7 +503,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
 		for (i = 0; i < 4; i++) {
 			pgpp = &phba->port_gp[i];
 			pring = &psli->sli3_ring[i];
-			len +=  snprintf(buf+len, size-len,
+			len +=  scnprintf(buf+len, size-len,
 					 "Ring %d: CMD GetInx:%d "
 					 "(Max:%d Next:%d "
 					 "Local:%d flg:x%x)  "
@@ -518,7 +520,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
 		word1 = readl(phba->CAregaddr);
 		word2 = readl(phba->HSregaddr);
 		word3 = readl(phba->HCregaddr);
-		len +=  snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
+		len +=  scnprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
 				 "HC:%08x\n", word0, word1, word2, word3);
 	}
 	spin_unlock_irq(&phba->hbalock);
@@ -557,12 +559,12 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
 	cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
 	outio = 0;
 
-	len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
+	len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
 	spin_lock_irq(shost->host_lock);
 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 		iocnt = 0;
 		if (!cnt) {
-			len +=  snprintf(buf+len, size-len,
+			len +=  scnprintf(buf+len, size-len,
 				"Missing Nodelist Entries\n");
 			break;
 		}
@@ -600,62 +602,62 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
 		default:
 			statep = "UNKNOWN";
 		}
-		len += snprintf(buf+len, size-len, "%s DID:x%06x ",
+		len += scnprintf(buf+len, size-len, "%s DID:x%06x ",
 				statep, ndlp->nlp_DID);
-		len += snprintf(buf+len, size-len,
+		len += scnprintf(buf+len, size-len,
 				"WWPN x%llx ",
 				wwn_to_u64(ndlp->nlp_portname.u.wwn));
-		len += snprintf(buf+len, size-len,
+		len += scnprintf(buf+len, size-len,
 				"WWNN x%llx ",
 				wwn_to_u64(ndlp->nlp_nodename.u.wwn));
 		if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
-			len += snprintf(buf+len, size-len, "RPI:%03d ",
+			len += scnprintf(buf+len, size-len, "RPI:%03d ",
 					ndlp->nlp_rpi);
 		else
-			len += snprintf(buf+len, size-len, "RPI:none ");
-		len +=  snprintf(buf+len, size-len, "flag:x%08x ",
+			len += scnprintf(buf+len, size-len, "RPI:none ");
+		len +=  scnprintf(buf+len, size-len, "flag:x%08x ",
 			ndlp->nlp_flag);
 		if (!ndlp->nlp_type)
-			len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
+			len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE ");
 		if (ndlp->nlp_type & NLP_FC_NODE)
-			len += snprintf(buf+len, size-len, "FC_NODE ");
+			len += scnprintf(buf+len, size-len, "FC_NODE ");
 		if (ndlp->nlp_type & NLP_FABRIC) {
-			len += snprintf(buf+len, size-len, "FABRIC ");
+			len += scnprintf(buf+len, size-len, "FABRIC ");
 			iocnt = 0;
 		}
 		if (ndlp->nlp_type & NLP_FCP_TARGET)
-			len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
+			len += scnprintf(buf+len, size-len, "FCP_TGT sid:%d ",
 				ndlp->nlp_sid);
 		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
-			len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
+			len += scnprintf(buf+len, size-len, "FCP_INITIATOR ");
 		if (ndlp->nlp_type & NLP_NVME_TARGET)
-			len += snprintf(buf + len,
+			len += scnprintf(buf + len,
 					size - len, "NVME_TGT sid:%d ",
 					NLP_NO_SID);
 		if (ndlp->nlp_type & NLP_NVME_INITIATOR)
-			len += snprintf(buf + len,
+			len += scnprintf(buf + len,
 					size - len, "NVME_INITIATOR ");
-		len += snprintf(buf+len, size-len, "usgmap:%x ",
+		len += scnprintf(buf+len, size-len, "usgmap:%x ",
 			ndlp->nlp_usg_map);
-		len += snprintf(buf+len, size-len, "refcnt:%x",
+		len += scnprintf(buf+len, size-len, "refcnt:%x",
 			kref_read(&ndlp->kref));
 		if (iocnt) {
 			i = atomic_read(&ndlp->cmd_pending);
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					" OutIO:x%x Qdepth x%x",
 					i, ndlp->cmd_qdepth);
 			outio += i;
 		}
-		len +=  snprintf(buf+len, size-len, "\n");
+		len +=  scnprintf(buf+len, size-len, "\n");
 	}
 	spin_unlock_irq(shost->host_lock);
 
-	len += snprintf(buf + len, size - len,
+	len += scnprintf(buf + len, size - len,
 			"\nOutstanding IO x%x\n",  outio);
 
 	if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"\nNVME Targetport Entry ...\n");
 
 		/* Port state is only one of two values for now. */
@@ -663,18 +665,18 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
 			statep = "REGISTERED";
 		else
 			statep = "INIT";
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"TGT WWNN x%llx WWPN x%llx State %s\n",
 				wwn_to_u64(vport->fc_nodename.u.wwn),
 				wwn_to_u64(vport->fc_portname.u.wwn),
 				statep);
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"    Targetport DID x%06x\n",
 				phba->targetport->port_id);
 		goto out_exit;
 	}
 
-	len += snprintf(buf + len, size - len,
+	len += scnprintf(buf + len, size - len,
 				"\nNVME Lport/Rport Entries ...\n");
 
 	localport = vport->localport;
@@ -689,11 +691,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
 	else
 		statep = "UNKNOWN ";
 
-	len += snprintf(buf + len, size - len,
+	len += scnprintf(buf + len, size - len,
 			"Lport DID x%06x PortState %s\n",
 			localport->port_id, statep);
 
-	len += snprintf(buf + len, size - len, "\tRport List:\n");
+	len += scnprintf(buf + len, size - len, "\tRport List:\n");
 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 		/* local short-hand pointer. */
 		spin_lock(&phba->hbalock);
@@ -720,32 +722,32 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
 		}
 
 		/* Tab in to show lport ownership. */
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"\t%s Port ID:x%06x ",
 				statep, nrport->port_id);
-		len += snprintf(buf + len, size - len, "WWPN x%llx ",
+		len += scnprintf(buf + len, size - len, "WWPN x%llx ",
 				nrport->port_name);
-		len += snprintf(buf + len, size - len, "WWNN x%llx ",
+		len += scnprintf(buf + len, size - len, "WWNN x%llx ",
 				nrport->node_name);
 
 		/* An NVME rport can have multiple roles. */
 		if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
-			len +=  snprintf(buf + len, size - len,
+			len +=  scnprintf(buf + len, size - len,
 					 "INITIATOR ");
 		if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
-			len +=  snprintf(buf + len, size - len,
+			len +=  scnprintf(buf + len, size - len,
 					 "TARGET ");
 		if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
-			len +=  snprintf(buf + len, size - len,
+			len +=  scnprintf(buf + len, size - len,
 					 "DISCSRVC ");
 		if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
 					  FC_PORT_ROLE_NVME_TARGET |
 					  FC_PORT_ROLE_NVME_DISCOVERY))
-			len +=  snprintf(buf + len, size - len,
+			len +=  scnprintf(buf + len, size - len,
 					 "UNKNOWN ROLE x%x",
 					 nrport->port_role);
 		/* Terminate the string. */
-		len +=  snprintf(buf + len, size - len, "\n");
+		len +=  scnprintf(buf + len, size - len, "\n");
 	}
 
 	spin_unlock_irq(shost->host_lock);
@@ -784,35 +786,35 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 		if (!phba->targetport)
 			return len;
 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"\nNVME Targetport Statistics\n");
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"LS: Rcv %08x Drop %08x Abort %08x\n",
 				atomic_read(&tgtp->rcv_ls_req_in),
 				atomic_read(&tgtp->rcv_ls_req_drop),
 				atomic_read(&tgtp->xmt_ls_abort));
 		if (atomic_read(&tgtp->rcv_ls_req_in) !=
 		    atomic_read(&tgtp->rcv_ls_req_out)) {
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"Rcv LS: in %08x != out %08x\n",
 					atomic_read(&tgtp->rcv_ls_req_in),
 					atomic_read(&tgtp->rcv_ls_req_out));
 		}
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"LS: Xmt %08x Drop %08x Cmpl %08x\n",
 				atomic_read(&tgtp->xmt_ls_rsp),
 				atomic_read(&tgtp->xmt_ls_drop),
 				atomic_read(&tgtp->xmt_ls_rsp_cmpl));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"LS: RSP Abort %08x xb %08x Err %08x\n",
 				atomic_read(&tgtp->xmt_ls_rsp_aborted),
 				atomic_read(&tgtp->xmt_ls_rsp_xb_set),
 				atomic_read(&tgtp->xmt_ls_rsp_error));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"FCP: Rcv %08x Defer %08x Release %08x "
 				"Drop %08x\n",
 				atomic_read(&tgtp->rcv_fcp_cmd_in),
@@ -822,13 +824,13 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 
 		if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
 		    atomic_read(&tgtp->rcv_fcp_cmd_out)) {
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"Rcv FCP: in %08x != out %08x\n",
 					atomic_read(&tgtp->rcv_fcp_cmd_in),
 					atomic_read(&tgtp->rcv_fcp_cmd_out));
 		}
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"FCP Rsp: read %08x readrsp %08x "
 				"write %08x rsp %08x\n",
 				atomic_read(&tgtp->xmt_fcp_read),
@@ -836,31 +838,31 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 				atomic_read(&tgtp->xmt_fcp_write),
 				atomic_read(&tgtp->xmt_fcp_rsp));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"FCP Rsp Cmpl: %08x err %08x drop %08x\n",
 				atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
 				atomic_read(&tgtp->xmt_fcp_rsp_error),
 				atomic_read(&tgtp->xmt_fcp_rsp_drop));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"FCP Rsp Abort: %08x xb %08x xricqe  %08x\n",
 				atomic_read(&tgtp->xmt_fcp_rsp_aborted),
 				atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
 				atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"ABORT: Xmt %08x Cmpl %08x\n",
 				atomic_read(&tgtp->xmt_fcp_abort),
 				atomic_read(&tgtp->xmt_fcp_abort_cmpl));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"ABORT: Sol %08x  Usol %08x Err %08x Cmpl %08x",
 				atomic_read(&tgtp->xmt_abort_sol),
 				atomic_read(&tgtp->xmt_abort_unsol),
 				atomic_read(&tgtp->xmt_abort_rsp),
 				atomic_read(&tgtp->xmt_abort_rsp_error));
 
-		len +=  snprintf(buf + len, size - len, "\n");
+		len +=  scnprintf(buf + len, size - len, "\n");
 
 		cnt = 0;
 		spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
@@ -871,7 +873,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 		}
 		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
 		if (cnt) {
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"ABORT: %d ctx entries\n", cnt);
 			spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
 			list_for_each_entry_safe(ctxp, next_ctxp,
@@ -879,7 +881,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 				    list) {
 				if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ))
 					break;
-				len += snprintf(buf + len, size - len,
+				len += scnprintf(buf + len, size - len,
 						"Entry: oxid %x state %x "
 						"flag %x\n",
 						ctxp->oxid, ctxp->state,
@@ -893,7 +895,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 		tot += atomic_read(&tgtp->xmt_fcp_release);
 		tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
 				"CTX Outstanding %08llx\n",
 				phba->sli4_hba.nvmet_xri_cnt,
@@ -911,10 +913,10 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 		if (!lport)
 			return len;
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"\nNVME Lport Statistics\n");
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"LS: Xmt %016x Cmpl %016x\n",
 				atomic_read(&lport->fc4NvmeLsRequests),
 				atomic_read(&lport->fc4NvmeLsCmpls));
@@ -938,20 +940,20 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 			if (i >= 32)
 				continue;
 
-			len += snprintf(buf + len, PAGE_SIZE - len,
+			len += scnprintf(buf + len, PAGE_SIZE - len,
 					"FCP (%d): Rd %016llx Wr %016llx "
 					"IO %016llx ",
 					i, data1, data2, data3);
-			len += snprintf(buf + len, PAGE_SIZE - len,
+			len += scnprintf(buf + len, PAGE_SIZE - len,
 					"Cmpl %016llx OutIO %016llx\n",
 					tot, ((data1 + data2 + data3) - tot));
 		}
-		len += snprintf(buf + len, PAGE_SIZE - len,
+		len += scnprintf(buf + len, PAGE_SIZE - len,
 				"Total FCP Cmpl %016llx Issue %016llx "
 				"OutIO %016llx\n",
 				totin, totout, totout - totin);
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"LS Xmt Err: Abrt %08x Err %08x  "
 				"Cmpl Err: xb %08x Err %08x\n",
 				atomic_read(&lport->xmt_ls_abort),
@@ -959,7 +961,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 				atomic_read(&lport->cmpl_ls_xb),
 				atomic_read(&lport->cmpl_ls_err));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"FCP Xmt Err: noxri %06x nondlp %06x "
 				"qdepth %06x wqerr %06x err %06x Abrt %06x\n",
 				atomic_read(&lport->xmt_fcp_noxri),
@@ -969,7 +971,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 				atomic_read(&lport->xmt_fcp_err),
 				atomic_read(&lport->xmt_fcp_abort));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"FCP Cmpl Err: xb %08x Err %08x\n",
 				atomic_read(&lport->cmpl_fcp_xb),
 				atomic_read(&lport->cmpl_fcp_err));
@@ -1001,58 +1003,58 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
 
 	if (phba->nvmet_support == 0) {
 		/* NVME Initiator */
-		len += snprintf(buf + len, PAGE_SIZE - len,
+		len += scnprintf(buf + len, PAGE_SIZE - len,
 				"ktime %s: Total Samples: %lld\n",
 				(phba->ktime_on ?  "Enabled" : "Disabled"),
 				phba->ktime_data_samples);
 		if (phba->ktime_data_samples == 0)
 			return len;
 
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"Segment 1: Last NVME Cmd cmpl "
 			"done -to- Start of next NVME cnd (in driver)\n");
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg1_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg1_min,
 			phba->ktime_seg1_max);
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"Segment 2: Driver start of NVME cmd "
 			"-to- Firmware WQ doorbell\n");
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg2_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg2_min,
 			phba->ktime_seg2_max);
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"Segment 3: Firmware WQ doorbell -to- "
 			"MSI-X ISR cmpl\n");
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg3_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg3_min,
 			phba->ktime_seg3_max);
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"Segment 4: MSI-X ISR cmpl -to- "
 			"NVME cmpl done\n");
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg4_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg4_min,
 			phba->ktime_seg4_max);
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"Total IO avg time: %08lld\n",
 			div_u64(phba->ktime_seg1_total +
@@ -1064,7 +1066,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
 	}
 
 	/* NVME Target */
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"ktime %s: Total Samples: %lld %lld\n",
 			(phba->ktime_on ? "Enabled" : "Disabled"),
 			phba->ktime_data_samples,
@@ -1072,46 +1074,46 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
 	if (phba->ktime_data_samples == 0)
 		return len;
 
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 1: MSI-X ISR Rcv cmd -to- "
 			"cmd pass to NVME Layer\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg1_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg1_min,
 			phba->ktime_seg1_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 2: cmd pass to NVME Layer- "
 			"-to- Driver rcv cmd OP (action)\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg2_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg2_min,
 			phba->ktime_seg2_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 3: Driver rcv cmd OP -to- "
 			"Firmware WQ doorbell: cmd\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg3_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg3_min,
 			phba->ktime_seg3_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 4: Firmware WQ doorbell: cmd "
 			"-to- MSI-X ISR for cmd cmpl\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg4_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg4_min,
 			phba->ktime_seg4_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 5: MSI-X ISR for cmd cmpl "
 			"-to- NVME layer passed cmd done\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg5_total,
 				phba->ktime_data_samples),
@@ -1119,10 +1121,10 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
 			phba->ktime_seg5_max);
 
 	if (phba->ktime_status_samples == 0) {
-		len += snprintf(buf + len, PAGE_SIZE-len,
+		len += scnprintf(buf + len, PAGE_SIZE-len,
 				"Total: cmd received by MSI-X ISR "
 				"-to- cmd completed on wire\n");
-		len += snprintf(buf + len, PAGE_SIZE-len,
+		len += scnprintf(buf + len, PAGE_SIZE-len,
 				"avg:%08lld min:%08lld "
 				"max %08lld\n",
 				div_u64(phba->ktime_seg10_total,
@@ -1132,46 +1134,46 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
 		return len;
 	}
 
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 6: NVME layer passed cmd done "
 			"-to- Driver rcv rsp status OP\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg6_total,
 				phba->ktime_status_samples),
 			phba->ktime_seg6_min,
 			phba->ktime_seg6_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 7: Driver rcv rsp status OP "
 			"-to- Firmware WQ doorbell: status\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg7_total,
 				phba->ktime_status_samples),
 			phba->ktime_seg7_min,
 			phba->ktime_seg7_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 8: Firmware WQ doorbell: status"
 			" -to- MSI-X ISR for status cmpl\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg8_total,
 				phba->ktime_status_samples),
 			phba->ktime_seg8_min,
 			phba->ktime_seg8_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 9: MSI-X ISR for status cmpl  "
 			"-to- NVME layer passed status done\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg9_total,
 				phba->ktime_status_samples),
 			phba->ktime_seg9_min,
 			phba->ktime_seg9_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Total: cmd received by MSI-X ISR -to- "
 			"cmd completed on wire\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg10_total,
 				phba->ktime_status_samples),
@@ -1206,7 +1208,7 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
 		(phba->nvmeio_trc_size - 1);
 	skip = phba->nvmeio_trc_output_idx;
 
-	len += snprintf(buf + len, size - len,
+	len += scnprintf(buf + len, size - len,
 			"%s IO Trace %s: next_idx %d skip %d size %d\n",
 			(phba->nvmet_support ? "NVME" : "NVMET"),
 			(state ? "Enabled" : "Disabled"),
@@ -1228,18 +1230,18 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
 		if (!dtp->fmt)
 			continue;
 
-		len +=  snprintf(buf + len, size - len, dtp->fmt,
+		len +=  scnprintf(buf + len, size - len, dtp->fmt,
 			dtp->data1, dtp->data2, dtp->data3);
 
 		if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
 			phba->nvmeio_trc_output_idx = 0;
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"Trace Complete\n");
 			goto out;
 		}
 
 		if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"Trace Continue (%d of %d)\n",
 					phba->nvmeio_trc_output_idx,
 					phba->nvmeio_trc_size);
@@ -1257,18 +1259,18 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
 		if (!dtp->fmt)
 			continue;
 
-		len +=  snprintf(buf + len, size - len, dtp->fmt,
+		len +=  scnprintf(buf + len, size - len, dtp->fmt,
 			dtp->data1, dtp->data2, dtp->data3);
 
 		if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
 			phba->nvmeio_trc_output_idx = 0;
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"Trace Complete\n");
 			goto out;
 		}
 
 		if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"Trace Continue (%d of %d)\n",
 					phba->nvmeio_trc_output_idx,
 					phba->nvmeio_trc_size);
@@ -1276,7 +1278,7 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
 		}
 	}
 
-	len += snprintf(buf + len, size - len,
+	len += scnprintf(buf + len, size - len,
 			"Trace Done\n");
 out:
 	return len;
@@ -1308,39 +1310,39 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
 
 	if (phba->nvmet_support == 0) {
 		/* NVME Initiator */
-		len += snprintf(buf + len, PAGE_SIZE - len,
+		len += scnprintf(buf + len, PAGE_SIZE - len,
 				"CPUcheck %s\n",
 				(phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
 					"Enabled" : "Disabled"));
 		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
 			if (i >= LPFC_CHECK_CPU_CNT)
 				break;
-			len += snprintf(buf + len, PAGE_SIZE - len,
+			len += scnprintf(buf + len, PAGE_SIZE - len,
 					"%02d: xmit x%08x cmpl x%08x\n",
 					i, phba->cpucheck_xmt_io[i],
 					phba->cpucheck_cmpl_io[i]);
 			tot_xmt += phba->cpucheck_xmt_io[i];
 			tot_cmpl += phba->cpucheck_cmpl_io[i];
 		}
-		len += snprintf(buf + len, PAGE_SIZE - len,
+		len += scnprintf(buf + len, PAGE_SIZE - len,
 				"tot:xmit x%08x cmpl x%08x\n",
 				tot_xmt, tot_cmpl);
 		return len;
 	}
 
 	/* NVME Target */
-	len += snprintf(buf + len, PAGE_SIZE - len,
+	len += scnprintf(buf + len, PAGE_SIZE - len,
 			"CPUcheck %s ",
 			(phba->cpucheck_on & LPFC_CHECK_NVMET_IO ?
 				"IO Enabled - " : "IO Disabled - "));
-	len += snprintf(buf + len, PAGE_SIZE - len,
+	len += scnprintf(buf + len, PAGE_SIZE - len,
 			"%s\n",
 			(phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
 				"Rcv Enabled\n" : "Rcv Disabled\n"));
 	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
 		if (i >= LPFC_CHECK_CPU_CNT)
 			break;
-		len += snprintf(buf + len, PAGE_SIZE - len,
+		len += scnprintf(buf + len, PAGE_SIZE - len,
 				"%02d: xmit x%08x ccmpl x%08x "
 				"cmpl x%08x rcv x%08x\n",
 				i, phba->cpucheck_xmt_io[i],
@@ -1352,7 +1354,7 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
 		tot_cmpl += phba->cpucheck_cmpl_io[i];
 		tot_ccmpl += phba->cpucheck_ccmpl_io[i];
 	}
-	len += snprintf(buf + len, PAGE_SIZE - len,
+	len += scnprintf(buf + len, PAGE_SIZE - len,
 			"tot:xmit x%08x ccmpl x%08x cmpl x%08x rcv x%08x\n",
 			tot_xmt, tot_ccmpl, tot_cmpl, tot_rcv);
 	return len;
@@ -1797,28 +1799,29 @@ lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
 	int cnt = 0;
 
 	if (dent == phba->debug_writeGuard)
-		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
+		cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
 	else if (dent == phba->debug_writeApp)
-		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
+		cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
 	else if (dent == phba->debug_writeRef)
-		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
+		cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
 	else if (dent == phba->debug_readGuard)
-		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
+		cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
 	else if (dent == phba->debug_readApp)
-		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
+		cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
 	else if (dent == phba->debug_readRef)
-		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
+		cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
 	else if (dent == phba->debug_InjErrNPortID)
-		cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid);
+		cnt = scnprintf(cbuf, 32, "0x%06x\n",
+				phba->lpfc_injerr_nportid);
 	else if (dent == phba->debug_InjErrWWPN) {
 		memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
 		tmp = cpu_to_be64(tmp);
-		cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp);
+		cnt = scnprintf(cbuf, 32, "0x%016llx\n", tmp);
 	} else if (dent == phba->debug_InjErrLBA) {
 		if (phba->lpfc_injerr_lba == (sector_t)(-1))
-			cnt = snprintf(cbuf, 32, "off\n");
+			cnt = scnprintf(cbuf, 32, "off\n");
 		else
-			cnt = snprintf(cbuf, 32, "0x%llx\n",
+			cnt = scnprintf(cbuf, 32, "0x%llx\n",
 				 (uint64_t) phba->lpfc_injerr_lba);
 	} else
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2624,17 +2627,17 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
 	switch (count) {
 	case SIZE_U8: /* byte (8 bits) */
 		pci_read_config_byte(pdev, where, &u8val);
-		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 				"%03x: %02x\n", where, u8val);
 		break;
 	case SIZE_U16: /* word (16 bits) */
 		pci_read_config_word(pdev, where, &u16val);
-		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 				"%03x: %04x\n", where, u16val);
 		break;
 	case SIZE_U32: /* double word (32 bits) */
 		pci_read_config_dword(pdev, where, &u32val);
-		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 				"%03x: %08x\n", where, u32val);
 		break;
 	case LPFC_PCI_CFG_BROWSE: /* browse all */
@@ -2654,25 +2657,25 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
 	offset = offset_label;
 
 	/* Read PCI config space */
-	len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 			"%03x: ", offset_label);
 	while (index > 0) {
 		pci_read_config_dword(pdev, offset, &u32val);
-		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 				"%08x ", u32val);
 		offset += sizeof(uint32_t);
 		if (offset >= LPFC_PCI_CFG_SIZE) {
-			len += snprintf(pbuffer+len,
+			len += scnprintf(pbuffer+len,
 					LPFC_PCI_CFG_SIZE-len, "\n");
 			break;
 		}
 		index -= sizeof(uint32_t);
 		if (!index)
-			len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+			len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 					"\n");
 		else if (!(index % (8 * sizeof(uint32_t)))) {
 			offset_label += (8 * sizeof(uint32_t));
-			len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+			len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 					"\n%03x: ", offset_label);
 		}
 	}
@@ -2943,7 +2946,7 @@ lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes,
 	if (acc_range == SINGLE_WORD) {
 		offset_run = offset;
 		u32val = readl(mem_mapped_bar + offset_run);
-		len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
 				"%05x: %08x\n", offset_run, u32val);
 	} else
 		goto baracc_browse;
@@ -2957,35 +2960,35 @@ lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes,
 	offset_run = offset_label;
 
 	/* Read PCI bar memory mapped space */
-	len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
 			"%05x: ", offset_label);
 	index = LPFC_PCI_BAR_RD_SIZE;
 	while (index > 0) {
 		u32val = readl(mem_mapped_bar + offset_run);
-		len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
 				"%08x ", u32val);
 		offset_run += sizeof(uint32_t);
 		if (acc_range == LPFC_PCI_BAR_BROWSE) {
 			if (offset_run >= bar_size) {
-				len += snprintf(pbuffer+len,
+				len += scnprintf(pbuffer+len,
 					LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
 				break;
 			}
 		} else {
 			if (offset_run >= offset +
 			    (acc_range * sizeof(uint32_t))) {
-				len += snprintf(pbuffer+len,
+				len += scnprintf(pbuffer+len,
 					LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
 				break;
 			}
 		}
 		index -= sizeof(uint32_t);
 		if (!index)
-			len += snprintf(pbuffer+len,
+			len += scnprintf(pbuffer+len,
 					LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
 		else if (!(index % (8 * sizeof(uint32_t)))) {
 			offset_label += (8 * sizeof(uint32_t));
-			len += snprintf(pbuffer+len,
+			len += scnprintf(pbuffer+len,
 					LPFC_PCI_BAR_RD_BUF_SIZE-len,
 					"\n%05x: ", offset_label);
 		}
@@ -3158,19 +3161,19 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype,
 	if (!qp)
 		return len;
 
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\t\t%s WQ info: ", wqtype);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"AssocCQID[%04d]: WQ-STAT[oflow:x%x posted:x%llx]\n",
 			qp->assoc_qid, qp->q_cnt_1,
 			(unsigned long long)qp->q_cnt_4);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
 			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
 			qp->queue_id, qp->entry_count,
 			qp->entry_size, qp->host_index,
 			qp->hba_index, qp->entry_repost);
-	len +=  snprintf(pbuffer + len,
+	len +=  scnprintf(pbuffer + len,
 			LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
 	return len;
 }
@@ -3208,21 +3211,21 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype,
 	if (!qp)
 		return len;
 
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\t%s CQ info: ", cqtype);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"AssocEQID[%02d]: CQ STAT[max:x%x relw:x%x "
 			"xabt:x%x wq:x%llx]\n",
 			qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
 			qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
 			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
 			qp->queue_id, qp->entry_count,
 			qp->entry_size, qp->host_index,
 			qp->hba_index, qp->entry_repost);
 
-	len +=  snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+	len +=  scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
 
 	return len;
 }
@@ -3234,19 +3237,19 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
 	if (!qp || !datqp)
 		return len;
 
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\t\t%s RQ info: ", rqtype);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x "
 			"posted:x%x rcv:x%llx]\n",
 			qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
 			qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
 			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
 			qp->queue_id, qp->entry_count, qp->entry_size,
 			qp->host_index, qp->hba_index, qp->entry_repost);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
 			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
 			datqp->queue_id, datqp->entry_count,
@@ -3331,17 +3334,17 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
 	if (!qp)
 		return len;
 
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
 			"cqe_proc:x%x eqe_proc:x%llx eqd %d]\n",
 			eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
 			(unsigned long long)qp->q_cnt_4, qp->q_mode);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
 			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
 			qp->queue_id, qp->entry_count, qp->entry_size,
 			qp->host_index, qp->hba_index, qp->entry_repost);
-	len +=  snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+	len +=  scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
 
 	return len;
 }
@@ -3399,7 +3402,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 			if (phba->cfg_fof == 0)
 				phba->lpfc_idiag_last_eq = 0;
 
-		len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+		len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 					"EQ %d out of %d HBA EQs\n",
 					x, phba->io_channel_irqs);
 
@@ -3512,7 +3515,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
 
 too_big:
-	len +=  snprintf(pbuffer + len,
+	len +=  scnprintf(pbuffer + len,
 		LPFC_QUE_INFO_GET_BUF_SIZE - len, "Truncated ...\n");
 out:
 	spin_unlock_irq(&phba->hbalock);
@@ -3568,22 +3571,22 @@ lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque,
 		return 0;
 
 	esize = pque->entry_size;
-	len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
 			"QE-INDEX[%04d]:\n", index);
 
 	offset = 0;
 	pentry = pque->qe[index].address;
 	while (esize > 0) {
-		len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
 				"%08x ", *pentry);
 		pentry++;
 		offset += sizeof(uint32_t);
 		esize -= sizeof(uint32_t);
 		if (esize > 0 && !(offset % (4 * sizeof(uint32_t))))
-			len += snprintf(pbuffer+len,
+			len += scnprintf(pbuffer+len,
 					LPFC_QUE_ACC_BUF_SIZE-len, "\n");
 	}
-	len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n");
+	len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n");
 
 	return len;
 }
@@ -3989,27 +3992,27 @@ lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
 
 	switch (drbregid) {
 	case LPFC_DRB_EQ:
-		len += snprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE-len,
 				"EQ-DRB-REG: 0x%08x\n",
 				readl(phba->sli4_hba.EQDBregaddr));
 		break;
 	case LPFC_DRB_CQ:
-		len += snprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len,
+		len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len,
 				"CQ-DRB-REG: 0x%08x\n",
 				readl(phba->sli4_hba.CQDBregaddr));
 		break;
 	case LPFC_DRB_MQ:
-		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
 				"MQ-DRB-REG:   0x%08x\n",
 				readl(phba->sli4_hba.MQDBregaddr));
 		break;
 	case LPFC_DRB_WQ:
-		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
 				"WQ-DRB-REG:   0x%08x\n",
 				readl(phba->sli4_hba.WQDBregaddr));
 		break;
 	case LPFC_DRB_RQ:
-		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
 				"RQ-DRB-REG:   0x%08x\n",
 				readl(phba->sli4_hba.RQDBregaddr));
 		break;
@@ -4199,37 +4202,37 @@ lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
 
 	switch (ctlregid) {
 	case LPFC_CTL_PORT_SEM:
-		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
 				"Port SemReg:   0x%08x\n",
 				readl(phba->sli4_hba.conf_regs_memmap_p +
 				      LPFC_CTL_PORT_SEM_OFFSET));
 		break;
 	case LPFC_CTL_PORT_STA:
-		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
 				"Port StaReg:   0x%08x\n",
 				readl(phba->sli4_hba.conf_regs_memmap_p +
 				      LPFC_CTL_PORT_STA_OFFSET));
 		break;
 	case LPFC_CTL_PORT_CTL:
-		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
 				"Port CtlReg:   0x%08x\n",
 				readl(phba->sli4_hba.conf_regs_memmap_p +
 				      LPFC_CTL_PORT_CTL_OFFSET));
 		break;
 	case LPFC_CTL_PORT_ER1:
-		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
 				"Port Er1Reg:   0x%08x\n",
 				readl(phba->sli4_hba.conf_regs_memmap_p +
 				      LPFC_CTL_PORT_ER1_OFFSET));
 		break;
 	case LPFC_CTL_PORT_ER2:
-		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
 				"Port Er2Reg:   0x%08x\n",
 				readl(phba->sli4_hba.conf_regs_memmap_p +
 				      LPFC_CTL_PORT_ER2_OFFSET));
 		break;
 	case LPFC_CTL_PDEV_CTL:
-		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
 				"PDev CtlReg:   0x%08x\n",
 				readl(phba->sli4_hba.conf_regs_memmap_p +
 				      LPFC_CTL_PDEV_CTL_OFFSET));
@@ -4422,13 +4425,13 @@ lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer)
 	mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
 	mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
 
-	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
 			"mbx_dump_map: 0x%08x\n", mbx_dump_map);
-	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
 			"mbx_dump_cnt: %04d\n", mbx_dump_cnt);
-	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
 			"mbx_word_cnt: %04d\n", mbx_word_cnt);
-	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
 			"mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd);
 
 	return len;
@@ -4577,35 +4580,35 @@ lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
 {
 	uint16_t ext_cnt, ext_size;
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\nAvailable Extents Information:\n");
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tPort Available VPI extents: ");
 	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI,
 				       &ext_cnt, &ext_size);
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"Count %3d, Size %3d\n", ext_cnt, ext_size);
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tPort Available VFI extents: ");
 	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI,
 				       &ext_cnt, &ext_size);
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"Count %3d, Size %3d\n", ext_cnt, ext_size);
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tPort Available RPI extents: ");
 	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI,
 				       &ext_cnt, &ext_size);
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"Count %3d, Size %3d\n", ext_cnt, ext_size);
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tPort Available XRI extents: ");
 	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI,
 				       &ext_cnt, &ext_size);
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"Count %3d, Size %3d\n", ext_cnt, ext_size);
 
 	return len;
@@ -4629,55 +4632,55 @@ lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
 	uint16_t ext_cnt, ext_size;
 	int rc;
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\nAllocated Extents Information:\n");
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tHost Allocated VPI extents: ");
 	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI,
 					    &ext_cnt, &ext_size);
 	if (!rc)
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"Port %d Extent %3d, Size %3d\n",
 				phba->brd_no, ext_cnt, ext_size);
 	else
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"N/A\n");
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tHost Allocated VFI extents: ");
 	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI,
 					    &ext_cnt, &ext_size);
 	if (!rc)
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"Port %d Extent %3d, Size %3d\n",
 				phba->brd_no, ext_cnt, ext_size);
 	else
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"N/A\n");
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tHost Allocated RPI extents: ");
 	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI,
 					    &ext_cnt, &ext_size);
 	if (!rc)
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"Port %d Extent %3d, Size %3d\n",
 				phba->brd_no, ext_cnt, ext_size);
 	else
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"N/A\n");
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tHost Allocated XRI extents: ");
 	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI,
 					    &ext_cnt, &ext_size);
 	if (!rc)
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"Port %d Extent %3d, Size %3d\n",
 				phba->brd_no, ext_cnt, ext_size);
 	else
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"N/A\n");
 
 	return len;
@@ -4701,49 +4704,49 @@ lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
 	struct lpfc_rsrc_blks *rsrc_blks;
 	int index;
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\nDriver Extents Information:\n");
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tVPI extents:\n");
 	index = 0;
 	list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) {
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"\t\tBlock %3d: Start %4d, Count %4d\n",
 				index, rsrc_blks->rsrc_start,
 				rsrc_blks->rsrc_size);
 		index++;
 	}
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tVFI extents:\n");
 	index = 0;
 	list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list,
 			    list) {
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"\t\tBlock %3d: Start %4d, Count %4d\n",
 				index, rsrc_blks->rsrc_start,
 				rsrc_blks->rsrc_size);
 		index++;
 	}
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tRPI extents:\n");
 	index = 0;
 	list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list,
 			    list) {
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"\t\tBlock %3d: Start %4d, Count %4d\n",
 				index, rsrc_blks->rsrc_start,
 				rsrc_blks->rsrc_size);
 		index++;
 	}
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tXRI extents:\n");
 	index = 0;
 	list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list,
 			    list) {
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"\t\tBlock %3d: Start %4d, Count %4d\n",
 				index, rsrc_blks->rsrc_start,
 				rsrc_blks->rsrc_size);
@@ -5137,11 +5140,11 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
 				if (i != 0)
 					pr_err("%s\n", line_buf);
 				len = 0;
-				len += snprintf(line_buf+len,
+				len += scnprintf(line_buf+len,
 						LPFC_MBX_ACC_LBUF_SZ-len,
 						"%03d: ", i);
 			}
-			len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+			len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
 					"%08x ", (uint32_t)*pword);
 			pword++;
 		}
@@ -5204,11 +5207,11 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
 					pr_err("%s\n", line_buf);
 				len = 0;
 				memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
-				len += snprintf(line_buf+len,
+				len += scnprintf(line_buf+len,
 						LPFC_MBX_ACC_LBUF_SZ-len,
 						"%03d: ", i);
 			}
-			len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+			len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
 					"%08x ",
 					((uint32_t)*pword) & 0xffffffff);
 			pword++;
@@ -5227,18 +5230,18 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
 					pr_err("%s\n", line_buf);
 				len = 0;
 				memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
-				len += snprintf(line_buf+len,
+				len += scnprintf(line_buf+len,
 						LPFC_MBX_ACC_LBUF_SZ-len,
 						"%03d: ", i);
 			}
 			for (j = 0; j < 4; j++) {
-				len += snprintf(line_buf+len,
+				len += scnprintf(line_buf+len,
 						LPFC_MBX_ACC_LBUF_SZ-len,
 						"%02x",
 						((uint8_t)*pbyte) & 0xff);
 				pbyte++;
 			}
-			len += snprintf(line_buf+len,
+			len += scnprintf(line_buf+len,
 					LPFC_MBX_ACC_LBUF_SZ-len, " ");
 		}
 		if ((i - 1) % 8)
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 30efc7b..824de3e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -342,7 +342,7 @@ lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
 	pword = q->qe[idx].address;
 
 	len = 0;
-	len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
+	len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
 	if (qe_word_cnt > 8)
 		printk(KERN_ERR "%s\n", line_buf);
 
@@ -353,11 +353,11 @@ lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
 			if (qe_word_cnt > 8) {
 				len = 0;
 				memset(line_buf, 0, LPFC_LBUF_SZ);
-				len += snprintf(line_buf+len, LPFC_LBUF_SZ-len,
+				len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len,
 						"%03d: ", i);
 			}
 		}
-		len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
+		len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
 				((uint32_t)*pword) & 0xffffffff);
 		pword++;
 	}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index eb71877..ccdd82b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -921,7 +921,11 @@ lpfc_linkdown(struct lpfc_hba *phba)
 		}
 	}
 	lpfc_destroy_vport_work_array(phba, vports);
-	/* Clean up any firmware default rpi's */
+
+	/* Clean up any SLI3 firmware default rpi's */
+	if (phba->sli_rev > LPFC_SLI_REV3)
+		goto skip_unreg_did;
+
 	mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (mb) {
 		lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
@@ -933,6 +937,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
 		}
 	}
 
+ skip_unreg_did:
 	/* Setup myDID for link up if we are in pt2pt mode */
 	if (phba->pport->fc_flag & FC_PT2PT) {
 		mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -4855,6 +4860,10 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
 	LPFC_MBOXQ_t     *mbox;
 	int rc;
 
+	/* Unreg DID is an SLI3 operation. */
+	if (phba->sli_rev > LPFC_SLI_REV3)
+		return;
+
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (mbox) {
 		lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index ca62117..099f707 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2482,15 +2482,15 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
 	if (!cstat)
 		return -ENOMEM;
 
+	if (!IS_ENABLED(CONFIG_NVME_FC))
+		return ret;
+
 	/* localport is allocated from the stack, but the registration
 	 * call allocates heap memory as well as the private area.
 	 */
-#if (IS_ENABLED(CONFIG_NVME_FC))
+
 	ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
 					 &vport->phba->pcidev->dev, &localport);
-#else
-	ret = -ENOMEM;
-#endif
 	if (!ret) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
 				 "6005 Successfully registered local "
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index b59bba3..8776330 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3280,12 +3280,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 
 	if (smid < ioc->hi_priority_smid) {
 		struct scsiio_tracker *st;
+		void *request;
 
 		st = _get_st_from_smid(ioc, smid);
 		if (!st) {
 			_base_recovery_check(ioc);
 			return;
 		}
+
+		/* Clear MPI request frame */
+		request = mpt3sas_base_get_msg_frame(ioc, smid);
+		memset(request, 0, ioc->request_sz);
+
 		mpt3sas_base_clear_st(ioc, st);
 		_base_recovery_check(ioc);
 		return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 622832e..73d661a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1474,11 +1474,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 {
 	struct scsi_cmnd *scmd = NULL;
 	struct scsiio_tracker *st;
+	Mpi25SCSIIORequest_t *mpi_request;
 
 	if (smid > 0  &&
 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
 		u32 unique_tag = smid - 1;
 
+		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+		/*
+		 * If SCSI IO request is outstanding at driver level then
+		 * DevHandle filed must be non-zero. If DevHandle is zero
+		 * then it means that this smid is free at driver level,
+		 * so return NULL.
+		 */
+		if (!mpi_request->DevHandle)
+			return scmd;
+
 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
 		if (scmd) {
 			st = scsi_cmd_priv(scmd);
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 6bbc38b..a17c138 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -902,6 +902,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
 		QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
 		kref_put(&io_req->refcount, qedf_release_cmd);
+		return -EINVAL;
 	}
 
 	/* Obtain free SQE */
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index d4821b9..4130b91 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -1001,6 +1001,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
 	qedi_ep = ep->dd_data;
 	qedi = qedi_ep->qedi;
 
+	if (qedi_ep->state == EP_STATE_OFLDCONN_START)
+		goto ep_exit_recover;
+
 	flush_work(&qedi_ep->offload_work);
 
 	if (qedi_ep->conn) {
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4888b99..f8f4d3ea 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -345,7 +345,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
 		}
 
 		ha->optrom_region_start = start;
-		ha->optrom_region_size = start + size;
+		ha->optrom_region_size = size;
 
 		ha->optrom_state = QLA_SREADING;
 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -418,7 +418,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
 		}
 
 		ha->optrom_region_start = start;
-		ha->optrom_region_size = start + size;
+		ha->optrom_region_size = size;
 
 		ha->optrom_state = QLA_SWRITING;
 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 36cbb29..88d8acf 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3449,7 +3449,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
 		ql_log(ql_log_fatal, vha, 0x00c8,
 		    "Failed to allocate memory for ha->msix_entries.\n");
 		ret = -ENOMEM;
-		goto msix_out;
+		goto free_irqs;
 	}
 	ha->flags.msix_enabled = 1;
 
@@ -3532,6 +3532,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
 
 msix_out:
 	return ret;
+
+free_irqs:
+	pci_free_irq_vectors(ha->pdev);
+	goto msix_out;
 }
 
 int
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index d2888b3..9d7feb0 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -684,7 +684,6 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
 {
 	fc_port_t *t;
-	unsigned long flags;
 
 	switch (e->u.nack.type) {
 	case SRB_NACK_PRLI:
@@ -694,10 +693,8 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
 		if (t) {
 			ql_log(ql_log_info, vha, 0xd034,
 			    "%s create sess success %p", __func__, t);
-			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 			/* create sess has an extra kref */
 			vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
-			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 		}
 		break;
 	}
@@ -709,9 +706,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work)
 {
 	fc_port_t *fcport = container_of(work, struct fc_port, del_work);
 	struct qla_hw_data *ha = fcport->vha->hw;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 
 	if (fcport->se_sess) {
 		ha->tgt.tgt_ops->shutdown_sess(fcport);
@@ -719,7 +713,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work)
 	} else {
 		qlt_unreg_sess(fcport);
 	}
-	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 }
 
 /*
@@ -788,8 +781,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
 		    fcport->port_name, sess->loop_id);
 		sess->local = 0;
 	}
-	ha->tgt.tgt_ops->put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+	ha->tgt.tgt_ops->put_sess(sess);
 }
 
 /*
@@ -981,6 +975,8 @@ void qlt_free_session_done(struct work_struct *work)
 		sess->send_els_logo);
 
 	if (!IS_SW_RESV_ADDR(sess->d_id)) {
+		qla2x00_mark_device_lost(vha, sess, 0, 0);
+
 		if (sess->send_els_logo) {
 			qlt_port_logo_t logo;
 
@@ -1161,8 +1157,6 @@ void qlt_unreg_sess(struct fc_port *sess)
 	if (sess->se_sess)
 		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
 
-	qla2x00_mark_device_lost(vha, sess, 0, 0);
-
 	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
 	sess->disc_state = DSC_DELETE_PEND;
 	sess->last_rscn_gen = sess->rscn_gen;
@@ -4135,9 +4129,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
 	/*
 	 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
 	 */
-	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 	ha->tgt.tgt_ops->put_sess(sess);
-	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 	return;
 
 out_term:
@@ -4154,9 +4146,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
 	target_free_tag(sess->se_sess, &cmd->se_cmd);
 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 
-	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 	ha->tgt.tgt_ops->put_sess(sess);
-	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 }
 
 static void qlt_do_work(struct work_struct *work)
@@ -4365,9 +4355,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
 	if (!cmd) {
 		ql_dbg(ql_dbg_io, vha, 0x3062,
 		    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
-		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 		ha->tgt.tgt_ops->put_sess(sess);
-		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 		return -EBUSY;
 	}
 
@@ -6105,17 +6093,19 @@ static void qlt_abort_work(struct qla_tgt *tgt,
 	}
 
 	rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
-	ha->tgt.tgt_ops->put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
 
+	ha->tgt.tgt_ops->put_sess(sess);
+
 	if (rc != 0)
 		goto out_term;
 	return;
 
 out_term2:
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
 	if (sess)
 		ha->tgt.tgt_ops->put_sess(sess);
-	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
 
 out_term:
 	spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -6175,9 +6165,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
 	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
 
 	rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
-	ha->tgt.tgt_ops->put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 
+	ha->tgt.tgt_ops->put_sess(sess);
+
 	if (rc != 0)
 		goto out_term;
 	return;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 64e2d85..b8c1a73 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -350,7 +350,6 @@ static void tcm_qla2xxx_put_sess(struct fc_port *sess)
 	if (!sess)
 		return;
 
-	assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
 	kref_put(&sess->sess_kref, tcm_qla2xxx_release_session);
 }
 
@@ -365,8 +364,9 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
 
 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 	target_sess_cmd_list_set_waiting(se_sess);
-	tcm_qla2xxx_put_sess(sess);
 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+	tcm_qla2xxx_put_sess(sess);
 }
 
 static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
@@ -390,6 +390,8 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
 			cmd->se_cmd.transport_state,
 			cmd->se_cmd.t_state,
 			cmd->se_cmd.se_cmd_flags);
+		transport_generic_request_failure(&cmd->se_cmd,
+			TCM_CHECK_CONDITION_ABORT_CMD);
 		return 0;
 	}
 	cmd->trc_flags |= TRC_XFR_RDY;
@@ -829,7 +831,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess)
 
 static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess)
 {
-	assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
 	target_sess_cmd_list_set_waiting(sess->se_sess);
 }
 
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 575445c..25c8ce5 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3204,6 +3204,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
 		return -EINVAL;
 	ep = iscsi_lookup_endpoint(transport_fd);
+	if (!ep)
+		return -EINVAL;
 	conn = cls_conn->dd_data;
 	qla_conn = conn->dd_data;
 	qla_conn->qla_ep = ep->dd_data;
@@ -5933,7 +5935,7 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
 		val = rd_nvram_byte(ha, sec_addr);
 		if (val & BIT_7)
 			ddb_index[1] = (val & 0x7f);
-
+		goto exit_boot_info;
 	} else if (is_qla80XX(ha)) {
 		buf = dma_alloc_coherent(&ha->pdev->dev, size,
 					 &buf_dma, GFP_KERNEL);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c4cbfd0..a08ff3b 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -238,6 +238,7 @@ static struct {
 	{"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
 	{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
 	{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+	{"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
 	{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
 	{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
 	{"SONY", "TSL", NULL, BLIST_FORCELUN},		/* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 5a58cbf..c14006a 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
 	{"NETAPP", "INF-01-00",		"rdac", },
 	{"LSI", "INF-01-00",		"rdac", },
 	{"ENGENIO", "INF-01-00",	"rdac", },
+	{"LENOVO", "DE_Series",		"rdac", },
 	{NULL, NULL,			NULL },
 };
 
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4b1fef7..ffc0d14 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2153,8 +2153,12 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 			ret = BLK_STS_DEV_RESOURCE;
 		break;
 	default:
+		if (unlikely(!scsi_device_online(sdev)))
+			scsi_req(req)->result = DID_NO_CONNECT << 16;
+		else
+			scsi_req(req)->result = DID_ERROR << 16;
 		/*
-		 * Make sure to release all allocated ressources when
+		 * Make sure to release all allocated resources when
 		 * we hit an error, as we will never see this command
 		 * again.
 		 */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 8aa2527..e102edf 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2525,7 +2525,6 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
 	int res;
 	struct scsi_device *sdp = sdkp->device;
 	struct scsi_mode_data data;
-	int disk_ro = get_disk_ro(sdkp->disk);
 	int old_wp = sdkp->write_prot;
 
 	set_disk_ro(sdkp->disk, 0);
@@ -2566,7 +2565,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
 			  "Test WP failed, assume Write Enabled\n");
 	} else {
 		sdkp->write_prot = ((data.device_specific & 0x80) != 0);
-		set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
+		set_disk_ro(sdkp->disk, sdkp->write_prot);
 		if (sdkp->first_scan || old_wp != sdkp->write_prot) {
 			sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
 				  sdkp->write_prot ? "on" : "off");
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index f03dc03..0c2ba07 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -664,13 +664,22 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
 static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
 {
 	struct storvsc_device *stor_device;
-	int num_cpus = num_online_cpus();
 	int num_sc;
 	struct storvsc_cmd_request *request;
 	struct vstor_packet *vstor_packet;
 	int ret, t;
 
-	num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
+	/*
+	 * If the number of CPUs is artificially restricted, such as
+	 * with maxcpus=1 on the kernel boot line, Hyper-V could offer
+	 * sub-channels >= the number of CPUs. These sub-channels
+	 * should not be created. The primary channel is already created
+	 * and assigned to one CPU, so check against # CPUs - 1.
+	 */
+	num_sc = min((int)(num_online_cpus() - 1), max_chns);
+	if (!num_sc)
+		return;
+
 	stor_device = get_out_stor_device(device);
 	if (!stor_device)
 		return;
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index a2d05dc..c94cdd9 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -1083,8 +1083,8 @@ static int ufsdbg_power_mode_show(struct seq_file *file, void *data)
 
 static bool ufsdbg_power_mode_validate(struct ufs_pa_layer_attr *pwr_mode)
 {
-	if (pwr_mode->gear_rx < UFS_HS_G1 || pwr_mode->gear_rx > UFS_HS_G3 ||
-	    pwr_mode->gear_tx < UFS_HS_G1 || pwr_mode->gear_tx > UFS_HS_G3 ||
+	if (pwr_mode->gear_rx < UFS_HS_G1 || pwr_mode->gear_rx > UFS_HS_G4 ||
+	    pwr_mode->gear_tx < UFS_HS_G1 || pwr_mode->gear_tx > UFS_HS_G4 ||
 	    pwr_mode->lane_rx < 1 || pwr_mode->lane_rx > 2 ||
 	    pwr_mode->lane_tx < 1 || pwr_mode->lane_tx > 2 ||
 	    (pwr_mode->pwr_rx != FAST_MODE &&
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 452e19f..c2cee73 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -544,6 +544,10 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
 	ufshcd_set_variant(hba, host);
 
 	host->rst  = devm_reset_control_get(dev, "rst");
+	if (IS_ERR(host->rst)) {
+		dev_err(dev, "%s: failed to get reset control\n", __func__);
+		return PTR_ERR(host->rst);
+	}
 
 	ufs_hisi_set_pm_lvl(hba);
 
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index 00954f3..1a3046f 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -235,7 +235,13 @@ int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
 			err = -ENOMEM;
 			goto out;
 		}
-		INIT_WORK(&qcom_host->ice_cfg_work, ufs_qcom_ice_cfg_work);
+	}
+	if (ice_workqueue) {
+		if (!qcom_host->is_ice_cfg_work_set) {
+			INIT_WORK(&qcom_host->ice_cfg_work,
+					ufs_qcom_ice_cfg_work);
+			qcom_host->is_ice_cfg_work_set = true;
+		}
 	}
 
 out:
@@ -319,7 +325,6 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
 					}
 					qcom_host->work_pending = true;
 				}
-
 			} else {
 				if (err != -EBUSY)
 					dev_err(qcom_host->hba->dev,
@@ -553,7 +558,7 @@ int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host, struct request *req)
 	struct device *dev = qcom_host->hba->dev;
 
 	if (qcom_host->ice.vops->config_end) {
-		err = qcom_host->ice.vops->config_end(req);
+		err = qcom_host->ice.vops->config_end(qcom_host->ice.pdev, req);
 		if (err) {
 			dev_err(dev, "%s: error in ice_vops->config_end %d\n",
 				__func__, err);
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index c0b50ac..2ce111e 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -272,12 +272,16 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
 	bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
 							? true : false;
 
+	/* Use Rate-A for Gear4 */
+	if (hba->phy_init_g4)
+		is_rate_B = false;
+
 	/* Assert PHY reset and apply PHY calibration values */
 	ufs_qcom_assert_reset(hba);
 	/* provide 1ms delay to let the reset pulse propagate */
 	usleep_range(1000, 1100);
 
-	ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
+	ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B, hba->phy_init_g4);
 
 	if (ret) {
 		dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
@@ -1408,7 +1412,10 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
 		ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
 		ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
 		ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
-		ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
+		if (hba->phy_init_g4)
+			ufs_qcom_cap.hs_rate = PA_HS_MODE_A;
+		else
+			ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
 		ufs_qcom_cap.desired_working_mode =
 					UFS_QCOM_LIMIT_DESIRED_MODE;
 
@@ -1932,6 +1939,9 @@ static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host)
 		if (ret)
 			goto free_groups;
 
+		host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_CORES;
+		host->pm_qos.groups[i].req.cpus_affine =
+			host->pm_qos.groups[i].mask;
 		host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
 		host->pm_qos.groups[i].active_reqs = 0;
 		host->pm_qos.groups[i].host = host;
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index b7690d3..011697a5 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -383,6 +383,7 @@ struct ufs_qcom_host {
 
 	spinlock_t ice_work_lock;
 	struct work_struct ice_cfg_work;
+	bool is_ice_cfg_work_set;
 	struct request *req_pending;
 	struct ufs_vreg *vddp_ref_clk;
 	bool work_pending;
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index d790923..4da79b9 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -515,9 +515,9 @@ struct ufs_query_res {
 #define UFS_VREG_VCC_MAX_UV	   3600000 /* uV */
 #define UFS_VREG_VCC_1P8_MIN_UV    1700000 /* uV */
 #define UFS_VREG_VCC_1P8_MAX_UV    1950000 /* uV */
-#define UFS_VREG_VCCQ_MIN_UV	   1100000 /* uV */
+#define UFS_VREG_VCCQ_MIN_UV	   1140000 /* uV */
 #define UFS_VREG_VCCQ_MAX_UV	   1300000 /* uV */
-#define UFS_VREG_VCCQ2_MIN_UV	   1750000 /* uV */
+#define UFS_VREG_VCCQ2_MIN_UV	   1700000 /* uV */
 #define UFS_VREG_VCCQ2_MAX_UV	   1950000 /* uV */
 
 /*
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 65824020..ae0a5b3 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -70,6 +70,7 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
 	struct ufs_clk_info *clki;
 	int len = 0;
 	size_t sz = 0;
+	char *str = NULL;
 
 	if (!np)
 		goto out;
@@ -131,7 +132,15 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
 
 		clki->min_freq = clkfreq[i];
 		clki->max_freq = clkfreq[i+1];
-		clki->name = kstrdup(name, GFP_KERNEL);
+		str = devm_kzalloc(dev, strlen(name) + 1, GFP_KERNEL);
+		if (!str) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		memcpy(str, name, strlen(name) + 1);
+		clki->name = str;
+
 		dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
 				clki->min_freq, clki->max_freq, clki->name);
 		list_add_tail(&clki->list, &hba->clk_list_head);
@@ -149,6 +158,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
 	struct ufs_vreg *vreg = NULL;
 	struct device_node *np = dev->of_node;
 	const __be32 *prop;
+	char *str = NULL;
 
 	if (!np) {
 		dev_err(dev, "%s: non DT initialization\n", __func__);
@@ -166,7 +176,11 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
 	if (!vreg)
 		return -ENOMEM;
 
-	vreg->name = kstrdup(name, GFP_KERNEL);
+	str = devm_kzalloc(dev, strlen(name) + 1, GFP_KERNEL);
+	if (!str)
+		return -ENOMEM;
+	memcpy(str, name, strlen(name) + 1);
+	vreg->name = str;
 
 	/* if fixed regulator no need further initialization */
 	snprintf(prop_name, MAX_PROP_SIZE, "%s-fixed-regulator", name);
@@ -334,6 +348,14 @@ static void ufshcd_parse_cmd_timeout(struct ufs_hba *hba)
 		hba->scsi_cmd_timeout = 0;
 }
 
+static void ufshcd_parse_force_g4_flag(struct ufs_hba *hba)
+{
+	if (device_property_read_bool(hba->dev, "force-g4"))
+		hba->force_g4 = true;
+	else
+		hba->force_g4 = false;
+}
+
 static void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba)
 {
 	struct device *dev = hba->dev;
@@ -489,6 +511,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
 	ufshcd_parse_pm_levels(hba);
 	ufshcd_parse_gear_limits(hba);
 	ufshcd_parse_cmd_timeout(hba);
+	ufshcd_parse_force_g4_flag(hba);
 	err = ufshcd_parse_extcon_info(hba);
 	if (err)
 		goto dealloc_host;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 5a2f5c7..99f4e05 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -42,6 +42,7 @@
 #include <linux/nls.h>
 #include <linux/of.h>
 #include <linux/bitfield.h>
+#include <linux/suspend.h>
 #include "ufshcd.h"
 #include "ufs_quirks.h"
 #include "unipro.h"
@@ -375,6 +376,39 @@ static inline bool ufshcd_is_card_offline(struct ufs_hba *hba)
 	return (atomic_read(&hba->card_state) == UFS_CARD_STATE_OFFLINE);
 }
 
+static bool ufshcd_is_card_present(struct ufs_hba *hba)
+{
+	if (ufshcd_is_card_online(hba))
+		/*
+		 * TODO: need better way to ensure that this delay is
+		 * more than extcon's debounce-ms
+		 */
+		msleep(300);
+
+	/*
+	 * Check if card was online and offline/removed now or
+	 * card was already offline.
+	 */
+	if (ufshcd_is_card_offline(hba))
+		return false;
+
+	return true;
+}
+
+static int ufshcd_card_get_extcon_state(struct ufs_hba *hba)
+{
+	int ret;
+
+	if (!hba->extcon)
+		return -EINVAL;
+
+	ret = extcon_get_state(hba->extcon, EXTCON_MECHANICAL);
+	if (ret < 0)
+		dev_err(hba->dev, "%s: Failed to check card Extcon state, ret=%d\n",
+				 __func__, ret);
+	return ret;
+}
+
 static inline enum ufs_pm_level
 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
 					enum uic_link_state link_state)
@@ -476,6 +510,8 @@ static int ufshcd_config_vreg(struct device *dev,
 				struct ufs_vreg *vreg, bool on);
 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg);
 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg);
+static void ufshcd_register_pm_notifier(struct ufs_hba *hba);
+static void ufshcd_unregister_pm_notifier(struct ufs_hba *hba);
 
 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
 static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
@@ -5126,7 +5162,9 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 	ufshcd_dme_cmd_log(hba, "dme_cmpl_2", hba->active_uic_cmd->command);
 
 out:
-	if (ret && !(hba->extcon && ufshcd_is_card_offline(hba))) {
+	if (ret) {
+		if (hba->extcon && !ufshcd_is_card_present(hba))
+			goto skip_dump;
 		ufsdbg_set_err_state(hba);
 		ufshcd_print_host_state(hba);
 		ufshcd_print_pwr_info(hba);
@@ -5135,6 +5173,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 		BUG_ON(hba->crash_on_err);
 	}
 
+skip_dump:
 	ufshcd_save_tstamp_of_last_dme_cmd(hba);
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	hba->active_uic_cmd = NULL;
@@ -5209,6 +5248,7 @@ static int ufshcd_link_recovery(struct ufs_hba *hba)
 	 */
 	hba->ufshcd_state = UFSHCD_STATE_ERROR;
 	hba->force_host_reset = true;
+	ufshcd_set_eh_in_progress(hba);
 	schedule_work(&hba->eh_work);
 
 	/* wait for the reset work to finish */
@@ -6951,33 +6991,18 @@ static void ufshcd_err_handler(struct work_struct *work)
 
 	hba = container_of(work, struct ufs_hba, eh_work);
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (hba->extcon) {
-		if (ufshcd_is_card_online(hba)) {
-			spin_unlock_irqrestore(hba->host->host_lock, flags);
-			/*
-			 * TODO: need better way to ensure that this delay is
-			 * more than extcon's debounce-ms
-			 */
-			msleep(300);
-			spin_lock_irqsave(hba->host->host_lock, flags);
-		}
-
-		/*
-		 * ignore error if card was online and offline/removed now or
-		 * card was already offline.
-		 */
-		if (ufshcd_is_card_offline(hba)) {
-			hba->saved_err = 0;
-			hba->saved_uic_err = 0;
-			hba->saved_ce_err = 0;
-			hba->auto_h8_err = false;
-			hba->force_host_reset = false;
-			hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
-			goto out;
-		}
+	if (hba->extcon && !ufshcd_is_card_present(hba)) {
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		hba->saved_err = 0;
+		hba->saved_uic_err = 0;
+		hba->saved_ce_err = 0;
+		hba->auto_h8_err = false;
+		hba->force_host_reset = false;
+		hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+		goto out;
 	}
 
+	spin_lock_irqsave(hba->host->host_lock, flags);
 	ufsdbg_set_err_state(hba);
 
 	if (hba->ufshcd_state == UFSHCD_STATE_RESET)
@@ -7165,6 +7190,10 @@ static void ufshcd_rls_handler(struct work_struct *work)
 	u32 mode;
 
 	hba = container_of(work, struct ufs_hba, rls_work);
+
+	if (hba->extcon && !ufshcd_is_card_present(hba))
+		return;
+
 	pm_runtime_get_sync(hba->dev);
 	ufshcd_scsi_block_requests(hba);
 	down_write(&hba->lock);
@@ -8083,19 +8112,19 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
 		goto out;
 	}
 
-	if (hba->vreg_info.vcc)
+	if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
 		icc_level = ufshcd_get_max_icc_level(
 				hba->vreg_info.vcc->max_uA,
 				POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
 				&desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
 
-	if (hba->vreg_info.vccq)
+	if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
 		icc_level = ufshcd_get_max_icc_level(
 				hba->vreg_info.vccq->max_uA,
 				icc_level,
 				&desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
 
-	if (hba->vreg_info.vccq2)
+	if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
 		icc_level = ufshcd_get_max_icc_level(
 				hba->vreg_info.vccq2->max_uA,
 				icc_level,
@@ -8250,10 +8279,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
 	buff_len = max_t(size_t, hba->desc_size.dev_desc,
 			 QUERY_DESC_MAX_SIZE + 1);
 	desc_buf = kmalloc(buff_len, GFP_KERNEL);
-	if (!desc_buf) {
-		err = -ENOMEM;
-		goto out;
-	}
+	if (!desc_buf)
+		return -ENOMEM;
 
 	err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
 	if (err) {
@@ -8656,15 +8683,14 @@ static int ufs_read_device_desc_data(struct ufs_hba *hba)
 	if (hba->desc_size.dev_desc) {
 		desc_buf = kmalloc(hba->desc_size.dev_desc, GFP_KERNEL);
 		if (!desc_buf) {
-			err = -ENOMEM;
 			dev_err(hba->dev,
 				"%s: Failed to allocate desc_buf\n", __func__);
-			return err;
+			return -ENOMEM;
 		}
 	}
 	err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
 	if (err)
-		return err;
+		goto out;
 
 	/*
 	 * getting vendor (manufacturerID) and Bank Index in big endian
@@ -8679,8 +8705,28 @@ static int ufs_read_device_desc_data(struct ufs_hba *hba)
 	hba->dev_info.w_spec_version =
 		desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
 		desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+out:
+	kfree(desc_buf);
+	return err;
+}
 
-	return 0;
+static inline bool ufshcd_needs_reinit(struct ufs_hba *hba)
+{
+	bool reinit = false;
+
+	if (hba->dev_info.w_spec_version < 0x300 && hba->phy_init_g4) {
+		dev_warn(hba->dev, "%s: Using force-g4 setting for a non-g4 device, re-init\n",
+				  __func__);
+		hba->phy_init_g4 = false;
+		reinit = true;
+	} else if (hba->dev_info.w_spec_version >= 0x300 && !hba->phy_init_g4) {
+		dev_warn(hba->dev, "%s: Re-init UFS host to use proper PHY settings for the UFS device. This can be avoided by setting the force-g4 in DT\n",
+				  __func__);
+		hba->phy_init_g4 = true;
+		reinit = true;
+	}
+
+	return reinit;
 }
 
 /**
@@ -8695,6 +8741,12 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 	int ret;
 	ktime_t start = ktime_get();
 
+reinit:
+	if (hba->extcon && (ufshcd_card_get_extcon_state(hba) <= 0)) {
+		ret = -ENOLINK;
+		goto out;
+	}
+
 	ret = ufshcd_link_startup(hba);
 	if (ret)
 		goto out;
@@ -8734,6 +8786,32 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 		goto out;
 	}
 
+	if (ufshcd_needs_reinit(hba)) {
+		unsigned long flags;
+		int err;
+
+		err = ufshcd_vops_full_reset(hba);
+		if (err)
+			dev_warn(hba->dev, "%s: full reset returned %d\n",
+				 __func__, err);
+
+		err = ufshcd_reset_device(hba);
+		if (err)
+			dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+				 __func__, err);
+
+		/* Reset the host controller */
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		ufshcd_hba_stop(hba, false);
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+		err = ufshcd_hba_enable(hba);
+		if (err)
+			goto out;
+
+		goto reinit;
+	}
+
 	ufs_fixup_device_setup(hba, &card);
 	ufshcd_tune_unipro_params(hba);
 
@@ -8857,6 +8935,7 @@ static void ufshcd_remove_device(struct ufs_hba *hba)
 	int sdev_count = 0, i;
 	unsigned long flags;
 
+	hba->card_removal_in_progress = 1;
 	ufshcd_hold_all(hba);
 	/* Reset the host controller */
 	spin_lock_irqsave(hba->host->host_lock, flags);
@@ -8883,10 +8962,14 @@ static void ufshcd_remove_device(struct ufs_hba *hba)
 		scsi_remove_device(sdev_cache[i]);
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
+	/* Complete the flying async UIC command if there is one */
+	if (hba->uic_async_done)
+		complete(hba->uic_async_done);
 	hba->silence_err_logs = false;
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
 	ufshcd_release_all(hba);
+	hba->card_removal_in_progress = 0;
 }
 
 static void ufshcd_card_detect_handler(struct work_struct *work)
@@ -8912,9 +8995,11 @@ static int ufshcd_card_detect_notifier(struct notifier_block *nb,
 {
 	struct ufs_hba *hba = container_of(nb, struct ufs_hba, card_detect_nb);
 
-	if (event)
+	if (event) {
+		if (hba->card_removal_in_progress)
+			goto out;
 		ufshcd_set_card_online(hba);
-	else
+	} else
 		ufshcd_set_card_offline(hba);
 
 	if (ufshcd_is_card_offline(hba) && !hba->sdev_ufs_device)
@@ -9307,6 +9392,15 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
 	if (!vreg)
 		return 0;
 
+	/*
+	 * "set_load" operation shall be required on those regulators
+	 * which specifically configured current limitation. Otherwise
+	 * zero max_uA may cause unexpected behavior when regulator is
+	 * enabled or set as high power mode.
+	 */
+	if (!vreg->max_uA)
+		return 0;
+
 	ret = regulator_set_load(vreg->reg, ua);
 	if (ret < 0) {
 		dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
@@ -9358,15 +9452,18 @@ static int ufshcd_config_vreg(struct device *dev,
 		if (ret)
 			goto out;
 
-		min_uV = on ? vreg->min_uV : 0;
-		if (vreg->low_voltage_sup && !vreg->low_voltage_active)
-			min_uV = vreg->max_uV;
+		if (vreg->min_uV && vreg->max_uV) {
+			min_uV = on ? vreg->min_uV : 0;
+			if (vreg->low_voltage_sup && !vreg->low_voltage_active)
+				min_uV = vreg->max_uV;
 
-		ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
-		if (ret) {
-			dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+			ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+			if (ret) {
+				dev_err(dev,
+					"%s: %s set voltage failed, err=%d\n",
 					__func__, name, ret);
-			goto out;
+				goto out;
+			}
 		}
 	}
 out:
@@ -10026,6 +10123,54 @@ static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
 		ufshcd_setup_hba_vreg(hba, true);
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int ufshcd_pm_notify(struct notifier_block *notify_block,
+			 unsigned long mode, void *unused)
+{
+	struct ufs_hba *hba = container_of(
+		notify_block, struct ufs_hba, pm_notify);
+	int ret = 0;
+
+	if (!hba->extcon)
+		return ret;
+
+	switch (mode) {
+	case PM_SUSPEND_PREPARE:
+		ret = ufshcd_extcon_unregister(hba);
+		if (ret)
+			break;
+		cancel_work_sync(&hba->card_detect_work);
+		break;
+	case PM_POST_SUSPEND:
+		ret = ufshcd_extcon_register(hba);
+		if (ret)
+			break;
+		extcon_sync(hba->extcon, EXTCON_MECHANICAL);
+	}
+
+	return ret;
+}
+
+static void ufshcd_register_pm_notifier(struct ufs_hba *hba)
+{
+	hba->pm_notify.notifier_call = ufshcd_pm_notify;
+	register_pm_notifier(&hba->pm_notify);
+}
+
+static void ufshcd_unregister_pm_notifier(struct ufs_hba *hba)
+{
+	unregister_pm_notifier(&hba->pm_notify);
+}
+#else
+static void ufshcd_register_pm_notifier(struct ufs_hba *hba)
+{
+}
+
+static void ufshcd_unregister_pm_notifier(struct ufs_hba *hba)
+{
+}
+#endif /* CONFIG_PM_SLEEP */
+
 /**
  * ufshcd_suspend - helper function for suspend operations
  * @hba: per adapter instance
@@ -10231,7 +10376,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
 	if (hba->extcon &&
 	    (ufshcd_is_card_offline(hba) ||
-	     (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device)))
+	     (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device) ||
+	     !ufshcd_card_get_extcon_state(hba)))
 		goto skip_dev_ops;
 
 	if (ufshcd_is_link_hibern8(hba)) {
@@ -10570,6 +10716,7 @@ void ufshcd_remove(struct ufs_hba *hba)
 	}
 	ufshcd_hba_exit(hba);
 	ufsdbg_remove_debugfs(hba);
+	ufshcd_unregister_pm_notifier(hba);
 }
 EXPORT_SYMBOL_GPL(ufshcd_remove);
 
@@ -10785,6 +10932,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 		dev_warn(hba->dev, "%s: device reset failed. err %d\n",
 			 __func__, err);
 
+	if (hba->force_g4)
+		hba->phy_init_g4 = true;
+
 	/* Host controller enable */
 	err = ufshcd_hba_enable(hba);
 	if (err) {
@@ -10844,6 +10994,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
 	ufs_sysfs_add_nodes(hba->dev);
 
+	ufshcd_register_pm_notifier(hba);
 	return 0;
 
 out_remove_scsi_host:
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 4ec21e3..05a5cfd 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -58,7 +58,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/reset.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
 #include <linux/devfreq.h>
 #include "unipro.h"
 
@@ -768,6 +768,8 @@ enum ufshcd_card_state {
  * @card_detect_nb: card detector notifier registered with @extcon
  * @card_detect_work: work to exectute the card detect function
  * @card_state: card state event, enum ufshcd_card_state defines possible states
+ * @card_removal_in_progress: to track card removal progress
+ * @pm_notify: used to register for PM events
  * @vreg_info: UFS device voltage regulator information
  * @clk_list_head: UFS host controller clocks list node head
  * @pwr_info: holds current power mode
@@ -1005,6 +1007,8 @@ struct ufs_hba {
 	struct notifier_block card_detect_nb;
 	struct work_struct card_detect_work;
 	atomic_t card_state;
+	int card_removal_in_progress;
+	struct notifier_block pm_notify;
 
 	struct ufs_pa_layer_attr pwr_info;
 	struct ufs_pwr_mode_info max_pwr_info;
@@ -1068,6 +1072,9 @@ struct ufs_hba {
 	struct ufs_desc_size desc_size;
 	atomic_t scsi_block_reqs_cnt;
 	bool restore_needed;
+
+	bool phy_init_g4;
+	bool force_g4;
 };
 
 static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index b2e111f..93e8457 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -1754,6 +1754,7 @@ static int ngd_slim_probe(struct platform_device *pdev)
 	bool			rxreg_access = false;
 	bool			slim_mdm = false;
 	const char		*ext_modem_id = NULL;
+	char			ipc_err_log_name[30];
 
 	if (of_device_is_compatible(pdev->dev.of_node,
 				    "qcom,iommu-slim-ctrl-cb"))
@@ -1796,6 +1797,12 @@ static int ngd_slim_probe(struct platform_device *pdev)
 		goto err_nobulk;
 	}
 
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret) {
+		dev_err(&pdev->dev, "could not set 32 bit DMA mask\n");
+		goto err_nobulk;
+	}
+
 	/* typical txn numbers and size used in bulk operation */
 	dev->bulk.buf_sz = SLIM_MAX_TXNS * 8;
 	dev->bulk.base = kzalloc(dev->bulk.buf_sz, GFP_KERNEL | GFP_DMA);
@@ -1820,6 +1827,21 @@ static int ngd_slim_probe(struct platform_device *pdev)
 		SLIM_INFO(dev, "start logging for slim dev %s\n",
 				dev_name(dev->dev));
 	}
+
+	/* Create Error IPC log context */
+	memset(ipc_err_log_name, 0, sizeof(ipc_err_log_name));
+	scnprintf(ipc_err_log_name, sizeof(ipc_err_log_name), "%s%s",
+						dev_name(dev->dev), "_err");
+	dev->ipc_slimbus_log_err =
+		ipc_log_context_create(IPC_SLIMBUS_LOG_PAGES,
+						ipc_err_log_name, 0);
+	if (!dev->ipc_slimbus_log_err)
+		dev_err(&pdev->dev,
+			"error creating ipc_error_logging context\n");
+	else
+		SLIM_INFO(dev, "start error logging for slim dev %s\n",
+							ipc_err_log_name);
+
 	ret = sysfs_create_file(&dev->dev->kobj, &dev_attr_debug_mask.attr);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to create dev. attr\n");
@@ -2071,9 +2093,9 @@ static int ngd_slim_runtime_resume(struct device *device)
 	int ret = 0;
 
 	mutex_lock(&dev->tx_lock);
-	if (dev->state >= MSM_CTRL_ASLEEP)
+	if ((dev->state >= MSM_CTRL_ASLEEP) && (dev->qmi.handle != NULL))
 		ret = ngd_slim_power_up(dev, false);
-	if (ret) {
+	if (ret || dev->qmi.handle == NULL) {
 		/* Did SSR cause this power up failure */
 		if (dev->state != MSM_CTRL_DOWN)
 			dev->state = MSM_CTRL_ASLEEP;
@@ -2095,7 +2117,18 @@ static int ngd_slim_runtime_suspend(struct device *device)
 	int ret = 0;
 
 	mutex_lock(&dev->tx_lock);
-	ret = ngd_slim_power_down(dev);
+	if (dev->qmi.handle != NULL) {
+		ret = ngd_slim_power_down(dev);
+	} else {
+		if (dev->state == MSM_CTRL_DOWN)
+			SLIM_INFO(dev, "SB rt suspend in SSR: %d\n",
+								dev->state);
+		else
+			SLIM_INFO(dev, "SB rt suspend bad state: %d\n",
+								dev->state);
+		mutex_unlock(&dev->tx_lock);
+		return ret;
+	}
 	if (ret && ret != -EBUSY)
 		SLIM_INFO(dev, "slim resource not idle:%d\n", ret);
 	if (!ret || ret == -ETIMEDOUT)
@@ -2142,7 +2175,7 @@ static int ngd_slim_suspend(struct device *dev)
 			cdev->qmi.deferred_resp = false;
 		}
 	}
-	SLIM_INFO(cdev, "system suspend\n");
+	SLIM_INFO(cdev, "system suspend state: %d\n", cdev->state);
 	return ret;
 }
 
@@ -2176,7 +2209,7 @@ static int ngd_slim_resume(struct device *dev)
 	 * Even if it's not enabled, rely on 1st client transaction to do
 	 * clock/power on
 	 */
-	SLIM_INFO(cdev, "system resume\n");
+	SLIM_INFO(cdev, "system resume state: %d\n", cdev->state);
 	return ret;
 }
 #endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index c80f11c..5baabc3 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -314,6 +314,7 @@ struct msm_slim_ctrl {
 	int			ipc_log_mask;
 	bool			sysfs_created;
 	void			*ipc_slimbus_log;
+	void			*ipc_slimbus_log_err;
 	void (*rx_slim)(struct msm_slim_ctrl *dev, u8 *buf);
 	u32			current_rx_buf[10];
 	int			current_count;
@@ -369,6 +370,9 @@ enum {
 	if (dev->ipc_slimbus_log && dev->ipc_log_mask >= DBG_LEV) { \
 		ipc_log_string(dev->ipc_slimbus_log, x); \
 	} \
+	if (dev->ipc_slimbus_log_err && dev->ipc_log_mask == FATAL_LEV) { \
+		ipc_log_string(dev->ipc_slimbus_log_err, x); \
+	} \
 } while (0)
 
 #define SLIM_INFO(dev, x...) do { \
@@ -376,26 +380,36 @@ enum {
 	if (dev->ipc_slimbus_log && dev->ipc_log_mask >= INFO_LEV) {\
 		ipc_log_string(dev->ipc_slimbus_log, x); \
 	} \
+	if (dev->ipc_slimbus_log_err && dev->ipc_log_mask == FATAL_LEV) { \
+		ipc_log_string(dev->ipc_slimbus_log_err, x); \
+	} \
 } while (0)
 
 /* warnings and errors show up on console always */
 #define SLIM_WARN(dev, x...) do { \
-	pr_warn(x); \
-	if (dev->ipc_slimbus_log && dev->ipc_log_mask >= WARN_LEV) \
+	if (dev->ipc_slimbus_log && dev->ipc_log_mask >= WARN_LEV) { \
+		pr_warn(x); \
 		ipc_log_string(dev->ipc_slimbus_log, x); \
+	} \
+	if (dev->ipc_slimbus_log_err && dev->ipc_log_mask == FATAL_LEV) { \
+		ipc_log_string(dev->ipc_slimbus_log_err, x); \
+	} \
 } while (0)
 
 /* ERROR condition in the driver sets the hs_serial_debug_mask
  * to ERR_FATAL level, so that this message can be seen
- * in IPC logging. Further errors continue to log on the console
+ * in IPC logging. Further errors continue to log on the error IPC logging.
  */
 #define SLIM_ERR(dev, x...) do { \
-	pr_err(x); \
 	if (dev->ipc_slimbus_log && dev->ipc_log_mask >= ERR_LEV) { \
+		pr_err(x); \
 		ipc_log_string(dev->ipc_slimbus_log, x); \
 		dev->default_ipc_log_mask = dev->ipc_log_mask; \
 		dev->ipc_log_mask = FATAL_LEV; \
 	} \
+	if (dev->ipc_slimbus_log_err && dev->ipc_log_mask == FATAL_LEV) { \
+		ipc_log_string(dev->ipc_slimbus_log_err, x); \
+	} \
 } while (0)
 
 #define SLIM_RST_LOGLVL(dev) { \
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index 6a145d2..dcf3977 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -2897,7 +2897,8 @@ static void slim_chan_changes(struct slim_device *sb, bool revert)
  * This API does what commit flag in other scheduling APIs do.
  * -EXFULL is returned if there is no space in TDM to reserve the
  * bandwidth. -EBUSY is returned if reconfiguration request is already in
- * progress.
+ * progress. This API caller should take care of the mutex lock for
+ * ctrl->sched.m_reconf.
  */
 int slim_reconfigure_now(struct slim_device *sb)
 {
@@ -2913,7 +2914,6 @@ int slim_reconfigure_now(struct slim_device *sb)
 	DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, 3,
 				NULL, NULL, sb->laddr);
 
-	mutex_lock(&ctrl->sched.m_reconf);
 	/*
 	 * If there are no pending changes from this client, avoid sending
 	 * the reconfiguration sequence
@@ -2943,7 +2943,6 @@ int slim_reconfigure_now(struct slim_device *sb)
 			}
 		}
 		if (list_empty(&sb->mark_removal)) {
-			mutex_unlock(&ctrl->sched.m_reconf);
 			pr_info("SLIM_CL: skip reconfig sequence\n");
 			return 0;
 		}
@@ -3148,14 +3147,12 @@ int slim_reconfigure_now(struct slim_device *sb)
 		ctrl->sched.msgsl = ctrl->sched.pending_msgsl;
 		sb->cur_msgsl = sb->pending_msgsl;
 		slim_chan_changes(sb, false);
-		mutex_unlock(&ctrl->sched.m_reconf);
 		return 0;
 	}
 
 revert_reconfig:
 	/* Revert channel changes */
 	slim_chan_changes(sb, true);
-	mutex_unlock(&ctrl->sched.m_reconf);
 	return ret;
 }
 EXPORT_SYMBOL(slim_reconfigure_now);
@@ -3264,9 +3261,9 @@ int slim_control_ch(struct slim_device *sb, u16 chanh,
 		if (nchan < SLIM_GRP_TO_NCHAN(chanh))
 			chan = SLIM_HDL_TO_CHIDX(slc->nextgrp);
 	} while (nchan < SLIM_GRP_TO_NCHAN(chanh));
-	mutex_unlock(&ctrl->sched.m_reconf);
 	if (!ret && commit)
 		ret = slim_reconfigure_now(sb);
+	mutex_unlock(&ctrl->sched.m_reconf);
 	mutex_unlock(&sb->sldev_reconf);
 	return ret;
 }
@@ -3300,8 +3297,11 @@ int slim_reservemsg_bw(struct slim_device *sb, u32 bw_bps, bool commit)
 	dev_dbg(&ctrl->dev, "request:bw:%d, slots:%d, current:%d\n", bw_bps, sl,
 						sb->cur_msgsl);
 	sb->pending_msgsl = sl;
-	if (commit)
+	if (commit) {
+		mutex_lock(&ctrl->sched.m_reconf);
 		ret = slim_reconfigure_now(sb);
+		mutex_unlock(&ctrl->sched.m_reconf);
+	}
 	mutex_unlock(&sb->sldev_reconf);
 	return ret;
 }
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index e60d7b6..1a36bc8 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -713,6 +713,15 @@
 	  the low power modes that RPM enters. The drivers outputs the message
 	  via a sysfs node.
 
+config QTI_DDR_STATS_LOG
+	tristate "Qualcomm Technologies Inc (QTI) DDR Stats Driver"
+	depends on QCOM_RPMH
+	help
+	  This option enables a driver which reads DDR statistical information
+	  from AOP shared memory location such as DDR low power modes and DDR
+	  frequency residency and counts. The driver outputs information using
+	  sysfs.
+
 config MSM_JTAGV8
 	bool "Debug and ETM trace support across power collapse for ARMv8"
 	default y if CORESIGHT_SOURCE_ETM4X
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 4e08680..64024f4 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -79,6 +79,7 @@
 obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpmh_master_stat.o
 obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_stats.o
 obj-$(CONFIG_QCOM_MEM_OFFLINE) += mem-offline.o
+obj-$(CONFIG_QTI_DDR_STATS_LOG) += ddr_stats.o
 obj-$(CONFIG_QMP_DEBUGFS_CLIENT) += qmp-debugfs-client.o
 obj-$(CONFIG_QCOM_HYP_CORE_CTL) += hyp_core_ctl.o
 obj-$(CONFIG_MSM_PERFORMANCE) += msm_performance.o
diff --git a/drivers/soc/qcom/cpuss_dump.c b/drivers/soc/qcom/cpuss_dump.c
index 6745dd4..1495bc5 100644
--- a/drivers/soc/qcom/cpuss_dump.c
+++ b/drivers/soc/qcom/cpuss_dump.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -12,6 +12,388 @@
 #include <linux/slab.h>
 #include <soc/qcom/memory_dump.h>
 
+#define REG_DUMP_ID		0xEF
+
+#define INPUT_DATA_BY_HLOS		0x00C0FFEE
+#define FORMAT_VERSION_1		0x1
+#define CORE_REG_NUM_DEFAULT	0x1
+
+#define MAGIC_INDEX				0
+#define FORMAT_VERSION_INDEX	1
+#define SYS_REG_INPUT_INDEX		2
+#define OUTPUT_DUMP_INDEX		3
+#define PERCORE_INDEX			4
+#define SYSTEM_REGS_INPUT_INDEX	5
+
+struct cpuss_dump_drvdata {
+	void *dump_vaddr;
+	u32 size;
+	u32 core_reg_num;
+	u32 core_reg_used_num;
+	u32 core_reg_end_index;
+	u32 sys_reg_size;
+	u32 used_memory;
+	struct mutex mutex;
+};
+
+struct reg_dump_data {
+	uint32_t magic;
+	uint32_t version;
+	uint32_t system_regs_input_index;
+	uint32_t regdump_output_byte_offset;
+};
+
+/**
+ * update_reg_dump_table - update the register dump table
+ * @core_reg_num: the number of per-core registers
+ *
+ * This function calculates system_regs_input_index and
+ * regdump_output_byte_offset to store into the dump memory.
+ * It also updates members of drvdata by the parameter core_reg_num.
+ *
+ * Returns 0 on success, or -ENOMEM on error of no enough memory.
+ */
+static int update_reg_dump_table(struct device *dev, u32 core_reg_num)
+{
+	int ret = 0;
+	u32 system_regs_input_index = SYSTEM_REGS_INPUT_INDEX +
+			core_reg_num * 2;
+	u32 regdump_output_byte_offset = (system_regs_input_index + 1)
+			* sizeof(uint32_t);
+	struct reg_dump_data *p;
+	struct cpuss_dump_drvdata *drvdata = dev_get_drvdata(dev);
+
+	mutex_lock(&drvdata->mutex);
+
+	if (regdump_output_byte_offset >= drvdata->size ||
+			regdump_output_byte_offset / sizeof(uint32_t)
+			< system_regs_input_index + 1) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	drvdata->core_reg_num = core_reg_num;
+	drvdata->core_reg_used_num = 0;
+	drvdata->core_reg_end_index = PERCORE_INDEX;
+	drvdata->sys_reg_size = 0;
+	drvdata->used_memory = regdump_output_byte_offset;
+
+	memset(drvdata->dump_vaddr, 0xDE, drvdata->size);
+	p = (struct reg_dump_data *)drvdata->dump_vaddr;
+	p->magic = INPUT_DATA_BY_HLOS;
+	p->version = FORMAT_VERSION_1;
+	p->system_regs_input_index = system_regs_input_index;
+	p->regdump_output_byte_offset = regdump_output_byte_offset;
+	memset((uint32_t *)drvdata->dump_vaddr + PERCORE_INDEX, 0x0,
+			(system_regs_input_index - PERCORE_INDEX + 1)
+			* sizeof(uint32_t));
+
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static void init_register_dump(struct device *dev)
+{
+	update_reg_dump_table(dev, CORE_REG_NUM_DEFAULT);
+}
+
+static ssize_t core_reg_num_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct cpuss_dump_drvdata *drvdata = dev_get_drvdata(dev);
+
+	mutex_lock(&drvdata->mutex);
+
+	ret = scnprintf(buf, PAGE_SIZE, "%u\n", drvdata->core_reg_num);
+
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+
+static ssize_t core_reg_num_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	int ret;
+	unsigned int val;
+	struct cpuss_dump_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtouint(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+
+	if (drvdata->core_reg_used_num || drvdata->sys_reg_size) {
+		dev_err(dev, "Couldn't set core_reg_num, register available in list\n");
+		ret = -EPERM;
+		goto err;
+	}
+	if (val == drvdata->core_reg_num) {
+		ret = 0;
+		goto err;
+	}
+
+	mutex_unlock(&drvdata->mutex);
+
+	ret = update_reg_dump_table(dev, val);
+	if (ret) {
+		dev_err(dev, "Couldn't set core_reg_num, no enough memory\n");
+		return ret;
+	}
+
+	return size;
+
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+static DEVICE_ATTR_RW(core_reg_num);
+
+/**
+ * This function shows configs of per-core and system registers.
+ */
+static ssize_t register_config_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	char local_buf[64];
+	int len = 0, count = 0;
+	int index, system_index_start, index_end;
+	uint32_t register_offset, length_in_bytes;
+	uint32_t length_in_words;
+	uint32_t *p;
+	struct cpuss_dump_drvdata *drvdata = dev_get_drvdata(dev);
+
+	buf[0] = '\0';
+
+	mutex_lock(&drvdata->mutex);
+
+	p = (uint32_t *)drvdata->dump_vaddr;
+
+	/* print per-core & system registers */
+	len = snprintf(local_buf, 64, "per-core registers:\n");
+	strlcat(buf, local_buf, PAGE_SIZE);
+	count += len;
+
+	system_index_start = *(p + SYS_REG_INPUT_INDEX);
+	index_end = system_index_start +
+			drvdata->sys_reg_size / sizeof(uint32_t) + 1;
+	for (index = PERCORE_INDEX; index < index_end;) {
+		if (index == system_index_start) {
+			len = snprintf(local_buf, 64, "system registers:\n");
+			if ((count + len) > PAGE_SIZE) {
+				dev_err(dev, "Couldn't write complete config\n");
+				break;
+			}
+
+			strlcat(buf, local_buf, PAGE_SIZE);
+			count += len;
+		}
+
+		register_offset = *(p + index);
+		if (register_offset == 0) {
+			index++;
+			continue;
+		}
+
+		if (register_offset & 0x3) {
+			length_in_words = register_offset & 0x3;
+			length_in_bytes = length_in_words << 2;
+			len = snprintf(local_buf, 64,
+				"Index: 0x%x, addr: 0x%x\n",
+				index, register_offset);
+			index++;
+		} else {
+			length_in_bytes = *(p + index + 1);
+			len = snprintf(local_buf, 64,
+				"Index: 0x%x, addr: 0x%x, length: 0x%x\n",
+				index, register_offset, length_in_bytes);
+			index += 2;
+		}
+
+		if ((count + len) > PAGE_SIZE) {
+			dev_err(dev, "Couldn't write complete config\n");
+			break;
+		}
+
+		strlcat(buf, local_buf, PAGE_SIZE);
+		count += len;
+	}
+
+	mutex_unlock(&drvdata->mutex);
+	return count;
+}
+
+/**
+ * This function sets configs of per-core or system registers.
+ */
+static ssize_t register_config_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	int ret;
+	uint32_t register_offset, length_in_bytes, per_core = 0;
+	uint32_t length_in_words;
+	int nval;
+	uint32_t num_cores;
+	u32 extra_memory;
+	u32 used_memory;
+	u32 system_reg_end_index;
+	uint32_t *p;
+	struct cpuss_dump_drvdata *drvdata = dev_get_drvdata(dev);
+
+	nval = sscanf(buf, "%x %x %u", &register_offset,
+				&length_in_bytes, &per_core);
+	if (nval != 2 && nval != 3)
+		return -EINVAL;
+	if (per_core > 1)
+		return -EINVAL;
+	if (register_offset & 0x3) {
+		dev_err(dev, "Invalid address, must be 4 byte aligned\n");
+		return -EINVAL;
+	}
+	if (length_in_bytes & 0x3) {
+		dev_err(dev, "Invalid length, must be 4 byte aligned\n");
+		return -EINVAL;
+	}
+	if (length_in_bytes == 0) {
+		dev_err(dev, "Invalid length of 0\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&drvdata->mutex);
+
+	p = (uint32_t *)drvdata->dump_vaddr;
+	length_in_words = length_in_bytes >> 2;
+	if (per_core) { /* per-core register */
+		if (drvdata->core_reg_used_num == drvdata->core_reg_num) {
+			dev_err(dev, "Couldn't add per-core config, out of range\n");
+			ret = -EINVAL;
+			goto err;
+		}
+
+		num_cores = num_possible_cpus();
+		extra_memory = length_in_bytes * num_cores;
+		used_memory = drvdata->used_memory + extra_memory;
+		if (extra_memory / num_cores < length_in_bytes ||
+				used_memory > drvdata->size ||
+				used_memory < drvdata->used_memory) {
+			dev_err(dev, "Couldn't add per-core reg config, no enough memory\n");
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		if (length_in_words > 3) {
+			*(p + drvdata->core_reg_end_index) = register_offset;
+			*(p + drvdata->core_reg_end_index + 1) =
+					length_in_bytes;
+			drvdata->core_reg_end_index += 2;
+		} else {
+			*(p + drvdata->core_reg_end_index) = register_offset |
+					length_in_words;
+			drvdata->core_reg_end_index++;
+		}
+
+		drvdata->core_reg_used_num++;
+		drvdata->used_memory = used_memory;
+	} else { /* system register */
+		system_reg_end_index = *(p + SYS_REG_INPUT_INDEX) +
+				drvdata->sys_reg_size / sizeof(uint32_t);
+
+		if (length_in_words > 3) {
+			extra_memory = sizeof(uint32_t) * 2 + length_in_bytes;
+			used_memory = drvdata->used_memory + extra_memory;
+			if (extra_memory < length_in_bytes ||
+					used_memory > drvdata->size ||
+					used_memory < drvdata->used_memory) {
+				dev_err(dev, "Couldn't add system reg config, no enough memory\n");
+				ret = -ENOMEM;
+				goto err;
+			}
+
+			*(p + system_reg_end_index) = register_offset;
+			*(p + system_reg_end_index + 1) = length_in_bytes;
+			system_reg_end_index += 2;
+			drvdata->sys_reg_size += sizeof(uint32_t) * 2;
+		} else {
+			extra_memory = sizeof(uint32_t) + length_in_bytes;
+			used_memory = drvdata->used_memory + extra_memory;
+			if (extra_memory < length_in_bytes ||
+					used_memory > drvdata->size ||
+					used_memory < drvdata->used_memory) {
+				dev_err(dev, "Couldn't add system reg config, no enough memory\n");
+				ret = -ENOMEM;
+				goto err;
+			}
+
+			*(p + system_reg_end_index) = register_offset |
+					length_in_words;
+			system_reg_end_index++;
+			drvdata->sys_reg_size += sizeof(uint32_t);
+		}
+
+		drvdata->used_memory = used_memory;
+
+		*(p + system_reg_end_index) = 0x0;
+		*(p + OUTPUT_DUMP_INDEX) = (system_reg_end_index + 1)
+				* sizeof(uint32_t);
+	}
+
+	ret = size;
+
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+static DEVICE_ATTR_RW(register_config);
+
+/**
+ * This function resets the register dump table.
+ */
+static ssize_t register_reset_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	unsigned int val;
+
+	if (kstrtouint(buf, 16, &val))
+		return -EINVAL;
+	if (val != 1)
+		return -EINVAL;
+
+	init_register_dump(dev);
+
+	return size;
+}
+static DEVICE_ATTR_WO(register_reset);
+
+static const struct device_attribute *register_dump_attrs[] = {
+	&dev_attr_core_reg_num,
+	&dev_attr_register_config,
+	&dev_attr_register_reset,
+	NULL,
+};
+
+static int register_dump_create_files(struct device *dev,
+			const struct device_attribute **attrs)
+{
+	int ret = 0;
+	int i, j;
+
+	for (i = 0; attrs[i] != NULL; i++) {
+		ret = device_create_file(dev, attrs[i]);
+		if (ret) {
+			dev_err(dev, "Couldn't create sysfs attribute: %s\n",
+				attrs[i]->attr.name);
+			for (j = 0; j < i; j++)
+				device_remove_file(dev, attrs[j]);
+			break;
+		}
+	}
+	return ret;
+}
+
 static int cpuss_dump_probe(struct platform_device *pdev)
 {
 	struct device_node *child_node, *dump_node;
@@ -22,9 +404,11 @@ static int cpuss_dump_probe(struct platform_device *pdev)
 	struct msm_dump_entry dump_entry;
 	int ret;
 	u32 size, id;
+	struct cpuss_dump_drvdata *drvdata;
 
 	for_each_available_child_of_node(node, child_node) {
 		dump_node = of_parse_phandle(child_node, "qcom,dump-node", 0);
+		drvdata = NULL;
 
 		if (!dump_node) {
 			dev_err(&pdev->dev, "Unable to find node for %s\n",
@@ -54,13 +438,44 @@ static int cpuss_dump_probe(struct platform_device *pdev)
 			continue;
 		}
 
-		memset(dump_vaddr, 0x0, size);
+		if (id == REG_DUMP_ID) {
+			drvdata = devm_kzalloc(&pdev->dev,
+				sizeof(struct cpuss_dump_drvdata), GFP_KERNEL);
+			if (!drvdata) {
+				dma_free_coherent(&pdev->dev, size, dump_vaddr,
+						dump_addr);
+				continue;
+			}
+
+			drvdata->dump_vaddr = dump_vaddr;
+			drvdata->size = size;
+
+			ret = register_dump_create_files(&pdev->dev,
+					register_dump_attrs);
+			if (ret) {
+				dma_free_coherent(&pdev->dev, size, dump_vaddr,
+						dump_addr);
+				devm_kfree(&pdev->dev, drvdata);
+				continue;
+			}
+
+			mutex_init(&drvdata->mutex);
+			platform_set_drvdata(pdev, drvdata);
+
+			init_register_dump(&pdev->dev);
+		} else {
+			memset(dump_vaddr, 0x0, size);
+		}
 
 		dump_data = devm_kzalloc(&pdev->dev,
 				sizeof(struct msm_dump_data), GFP_KERNEL);
 		if (!dump_data) {
 			dma_free_coherent(&pdev->dev, size, dump_vaddr,
 					dump_addr);
+			if (drvdata) {
+				devm_kfree(&pdev->dev, drvdata);
+				platform_set_drvdata(pdev, NULL);
+			}
 			continue;
 		}
 
@@ -76,6 +491,10 @@ static int cpuss_dump_probe(struct platform_device *pdev)
 				id);
 			dma_free_coherent(&pdev->dev, size, dump_vaddr,
 					dump_addr);
+			if (drvdata) {
+				devm_kfree(&pdev->dev, drvdata);
+				platform_set_drvdata(pdev, NULL);
+			}
 			devm_kfree(&pdev->dev, dump_data);
 		}
 
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index a45bcc3..d1241fe5 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -600,6 +600,7 @@ static int dcc_enable(struct dcc_drvdata *drvdata)
 		ram_cfg_base = drvdata->ram_cfg;
 		ret = __dcc_ll_cfg(drvdata, list);
 		if (ret) {
+			dcc_writel(drvdata, 0, DCC_LL_LOCK(list));
 			dev_info(drvdata->dev, "DCC ram programming failed\n");
 			goto err;
 		}
diff --git a/drivers/soc/qcom/ddr_stats.c b/drivers/soc/qcom/ddr_stats.c
new file mode 100644
index 0000000..cb10342
--- /dev/null
+++ b/drivers/soc/qcom/ddr_stats.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/uaccess.h>
+#include <asm/arch_timer.h>
+
+#define MAGIC_KEY1		0xA1157A75
+#define MAX_NUM_MODES		0x14
+#define MSM_ARCH_TIMER_FREQ	19200000
+
+#define GET_PDATA_OF_ATTR(attr) \
+	(container_of(attr, struct ddr_stats_kobj_attr, ka)->pd)
+
+struct ddr_stats_platform_data {
+	phys_addr_t phys_addr_base;
+	u32 phys_size;
+};
+
+struct stats_entry {
+	uint32_t name;
+	uint32_t count;
+	uint64_t duration;
+};
+
+struct ddr_stats_data {
+	uint32_t key;
+	uint32_t entry_count;
+	struct stats_entry entry[MAX_NUM_MODES];
+};
+
+struct ddr_stats_kobj_attr {
+	struct kobject *kobj;
+	struct kobj_attribute ka;
+	struct ddr_stats_platform_data *pd;
+};
+
+static inline u64 get_time_in_msec(u64 counter)
+{
+	do_div(counter, MSM_ARCH_TIMER_FREQ);
+	counter *= MSEC_PER_SEC;
+	return counter;
+}
+
+static ssize_t ddr_stats_append_data_to_buf(char *buf, int length, int *count,
+		struct stats_entry *data, u64 accumulated_duration)
+{
+	u32 cp_idx = 0, name, duration = 0;
+
+	if (accumulated_duration)
+		duration = (data->duration * 100) / accumulated_duration;
+
+	name = (data->name >> 8) & 0xFF;
+
+	if (name == 0x0) {
+		name = (data->name) & 0xFF;
+		*count = *count + 1;
+		return snprintf(buf, length,
+				"LPM %d:\tName:0x%x\tcount:%u\tTime(msec):%llu (~%d%%)\n",
+				*count, name, data->count,
+				data->duration, duration);
+	} else if (name == 0x1) {
+		cp_idx = data->name & 0x1F;
+		name = data->name >> 16;
+
+		if (!name || !data->count)
+			return 0;
+
+		return snprintf(buf, length,
+				"Freq %dMhz:\tCP IDX:%u\tcount:%u\tTime(msec):%llu (~%d%%)\n",
+				name, cp_idx, data->count,
+				data->duration, duration);
+	}
+
+	return 0;
+}
+
+static ssize_t ddr_stats_copy_stats(char *buf, int size, void __iomem *reg,
+							u32 entry_count)
+{
+	struct stats_entry data[MAX_NUM_MODES];
+	u64 accumulated_duration = 0;
+	int lpm_count = 0, i;
+	ssize_t length, op_length;
+
+	reg += offsetofend(struct ddr_stats_data, entry_count);
+
+	for (i = 0; i < entry_count; i++) {
+		data[i].count = readl_relaxed(reg + offsetof(
+					      struct stats_entry, count));
+
+		data[i].name = readl_relaxed(reg + offsetof(
+					     struct stats_entry, name));
+
+		data[i].duration = readq_relaxed(reg + offsetof(
+						 struct stats_entry, duration));
+
+		data[i].duration = get_time_in_msec(data[i].duration);
+		accumulated_duration += data[i].duration;
+		reg += sizeof(struct stats_entry);
+	}
+
+	for (i = 0, length = 0; i < entry_count; i++) {
+		op_length = ddr_stats_append_data_to_buf(buf + length,
+						size - length, &lpm_count,
+						&data[i], accumulated_duration);
+		if (op_length >= size - length)
+			return length;
+
+		length += op_length;
+	}
+
+	return length;
+}
+
+static ssize_t ddr_stats_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	struct ddr_stats_platform_data *pdata = NULL;
+	void __iomem *reg;
+	ssize_t length;
+	u32 key, entry_count;
+
+	pdata = GET_PDATA_OF_ATTR(attr);
+
+	reg = ioremap_nocache(pdata->phys_addr_base, pdata->phys_size);
+	if (!reg) {
+		pr_err("ERROR could not ioremap start=%pa, len=%u\n",
+		       &pdata->phys_addr_base, pdata->phys_size);
+		return 0;
+	}
+
+	key = readl_relaxed(reg + offsetof(struct ddr_stats_data, key));
+	if (key != MAGIC_KEY1) {
+		pr_err("Invalid key\n");
+		return 0;
+	}
+
+	entry_count = readl_relaxed(reg + offsetof(struct ddr_stats_data,
+				    entry_count));
+	if (entry_count > MAX_NUM_MODES) {
+		pr_err("Invalid entry count\n");
+		return 0;
+	}
+
+	length = ddr_stats_copy_stats(buf, PAGE_SIZE, reg, entry_count);
+	iounmap(reg);
+
+	return length;
+}
+
+static int ddr_stats_create_sysfs(struct platform_device *pdev,
+				struct ddr_stats_platform_data *pd)
+{
+	struct kobject *ddr_stats_kobj = NULL;
+	struct ddr_stats_kobj_attr *ddr_stats_ka = NULL;
+
+	ddr_stats_kobj = kobject_create_and_add("ddr", power_kobj);
+	if (!ddr_stats_kobj) {
+		pr_err("Cannot create ddr stats kobject\n");
+		return -ENODEV;
+	}
+
+	ddr_stats_ka = devm_kzalloc(&pdev->dev, sizeof(*ddr_stats_ka),
+				    GFP_KERNEL);
+	if (!ddr_stats_ka) {
+		kobject_put(ddr_stats_kobj);
+		return -ENOMEM;
+	}
+
+	ddr_stats_ka->kobj = ddr_stats_kobj;
+
+	sysfs_attr_init(&ddr_stats_ka->ka.attr);
+	ddr_stats_ka->pd = pd;
+	ddr_stats_ka->ka.attr.mode = 0444;
+	ddr_stats_ka->ka.attr.name = "residency";
+	ddr_stats_ka->ka.show = ddr_stats_show;
+	ddr_stats_ka->ka.store = NULL;
+
+	platform_set_drvdata(pdev, ddr_stats_ka);
+
+	return sysfs_create_file(ddr_stats_kobj, &ddr_stats_ka->ka.attr);
+}
+
+static int ddr_stats_probe(struct platform_device *pdev)
+{
+	struct ddr_stats_platform_data *pdata;
+	struct resource *res = NULL, *offset = NULL;
+	u32 offset_addr = 0;
+	void __iomem *phys_ptr = NULL;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "phys_addr_base");
+	if (!res)
+		return -ENODEV;
+
+	offset = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					      "offset_addr");
+	if (offset) {
+		/* Remap the ddr stats pointer */
+		phys_ptr = ioremap_nocache(offset->start, SZ_4);
+		if (!phys_ptr) {
+			pr_err("Failed to ioremap offset address\n");
+			return -ENODEV;
+		}
+		offset_addr = readl_relaxed(phys_ptr);
+		iounmap(phys_ptr);
+	}
+
+	if (!offset_addr)
+		return -ENODEV;
+
+	pdata->phys_addr_base  = res->start + offset_addr;
+	pdata->phys_size = resource_size(res);
+
+	return ddr_stats_create_sysfs(pdev, pdata);
+}
+
+static int ddr_stats_remove(struct platform_device *pdev)
+{
+	struct ddr_stats_kobj_attr *ddr_stats_ka;
+
+	ddr_stats_ka = (struct ddr_stats_kobj_attr *)
+			platform_get_drvdata(pdev);
+
+	sysfs_remove_file(ddr_stats_ka->kobj, &ddr_stats_ka->ka.attr);
+	kobject_put(ddr_stats_ka->kobj);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id ddr_stats_table[] = {
+	{ .compatible = "qcom,ddr-stats" },
+	{ },
+};
+
+static struct platform_driver ddr_stats_driver = {
+	.probe = ddr_stats_probe,
+	.remove = ddr_stats_remove,
+	.driver = {
+		.name = "ddr_stats",
+		.of_match_table = ddr_stats_table,
+	},
+};
+module_platform_driver(ddr_stats_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM DDR Statistics driver");
+MODULE_ALIAS("platform:msm_ddr_stats_log");
diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c
index 65b9251..05a491c 100644
--- a/drivers/soc/qcom/dfc_qmi.c
+++ b/drivers/soc/qcom/dfc_qmi.c
@@ -18,8 +18,6 @@
 #define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
 #define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
 
-#define DFC_IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)
-
 #define DFC_MAX_QOS_ID_V01 2
 
 #define DFC_ACK_TYPE_DISABLE 1
@@ -77,11 +75,11 @@ static void dfc_svc_init(struct work_struct *work);
 
 #define QMI_DFC_INDICATION_REGISTER_REQ_V01 0x0001
 #define QMI_DFC_INDICATION_REGISTER_RESP_V01 0x0001
-#define QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN 4
+#define QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN 8
 #define QMI_DFC_INDICATION_REGISTER_RESP_V01_MAX_MSG_LEN 7
 
 #define QMI_DFC_FLOW_STATUS_IND_V01 0x0022
-#define QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN 540
+#define QMI_DFC_TX_LINK_STATUS_IND_V01 0x0024
 
 #define QMI_DFC_GET_FLOW_STATUS_REQ_V01 0x0023
 #define QMI_DFC_GET_FLOW_STATUS_RESP_V01 0x0023
@@ -100,6 +98,8 @@ struct dfc_bind_client_resp_msg_v01 {
 struct dfc_indication_register_req_msg_v01 {
 	u8 report_flow_status_valid;
 	u8 report_flow_status;
+	u8 report_tx_link_status_valid;
+	u8 report_tx_link_status;
 };
 
 struct dfc_indication_register_resp_msg_v01 {
@@ -311,6 +311,20 @@ struct dfc_flow_status_ind_msg_v01 {
 	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
 };
 
+struct dfc_bearer_info_type_v01 {
+	u8 subs_id;
+	u8 mux_id;
+	u8 bearer_id;
+	enum dfc_ip_type_enum_v01 ip_type;
+};
+
+struct dfc_tx_link_status_ind_msg_v01 {
+	u8 tx_status;
+	u8 bearer_info_valid;
+	u8 bearer_info_len;
+	struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
+};
+
 struct dfc_get_flow_status_req_msg_v01 {
 	u8 bearer_id_list_valid;
 	u8 bearer_id_list_len;
@@ -326,7 +340,11 @@ struct dfc_get_flow_status_resp_msg_v01 {
 
 struct dfc_svc_ind {
 	struct list_head list;
-	struct dfc_flow_status_ind_msg_v01 dfc_info;
+	u16 msg_id;
+	union {
+		struct dfc_flow_status_ind_msg_v01 dfc_info;
+		struct dfc_tx_link_status_ind_msg_v01 tx_status;
+	} d;
 };
 
 static struct qmi_elem_info dfc_bind_client_req_msg_v01_ei[] = {
@@ -399,6 +417,28 @@ static struct qmi_elem_info dfc_indication_register_req_msg_v01_ei[] = {
 		.ei_array	= NULL,
 	},
 	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct
+					   dfc_indication_register_req_msg_v01,
+					   report_tx_link_status_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct
+					   dfc_indication_register_req_msg_v01,
+					   report_tx_link_status),
+		.ei_array	= NULL,
+	},
+	{
 		.data_type	= QMI_EOTI,
 		.array_type	= NO_ARRAY,
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
@@ -616,6 +656,111 @@ static struct qmi_elem_info dfc_get_flow_status_resp_msg_v01_ei[] = {
 	},
 };
 
+static struct qmi_elem_info dfc_bearer_info_type_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_bearer_info_type_v01,
+					   subs_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_bearer_info_type_v01,
+					   mux_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_bearer_info_type_v01,
+					   bearer_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(enum dfc_ip_type_enum_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_bearer_info_type_v01,
+					   ip_type),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_tx_link_status_ind_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(struct
+					   dfc_tx_link_status_ind_msg_v01,
+					   tx_status),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_tx_link_status_ind_msg_v01,
+					   bearer_info_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_tx_link_status_ind_msg_v01,
+					   bearer_info_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= DFC_MAX_BEARERS_V01,
+		.elem_size	= sizeof(struct
+					 dfc_bearer_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_tx_link_status_ind_msg_v01,
+					   bearer_info),
+		.ei_array	= dfc_bearer_info_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
 static int
 dfc_bind_client_req(struct qmi_handle *dfc_handle,
 		    struct sockaddr_qrtr *ssctl, struct svc_info *svc)
@@ -702,6 +847,9 @@ dfc_indication_register_req(struct qmi_handle *dfc_handle,
 
 	req->report_flow_status_valid = 1;
 	req->report_flow_status = reg;
+	req->report_tx_link_status_valid = 1;
+	req->report_tx_link_status = reg;
+
 	ret = qmi_send_request(dfc_handle, ssctl, &txn,
 			       QMI_DFC_INDICATION_REGISTER_REQ_V01,
 			       QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN,
@@ -834,30 +982,27 @@ dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
 	rmnet_map_tx_qmap_cmd(skb);
 }
 
-static int dfc_bearer_flow_ctl(struct net_device *dev,
-			       struct rmnet_bearer_map *bearer,
-			       struct qos_info *qos)
+int dfc_bearer_flow_ctl(struct net_device *dev,
+			struct rmnet_bearer_map *bearer,
+			struct qos_info *qos)
 {
-	struct rmnet_flow_map *itm;
 	int rc = 0, qlen;
 	int enable;
+	int i;
 
 	enable = bearer->grant_size ? 1 : 0;
 
-	list_for_each_entry(itm, &qos->flow_head, list) {
-		if (itm->bearer_id == bearer->bearer_id) {
-			/*
-			 * Do not flow disable ancillary q if ancillary is true
-			 */
-			if (bearer->tcp_bidir && enable == 0 &&
-					DFC_IS_ANCILLARY(itm->ip_type))
+	for (i = 0; i < MAX_MQ_NUM; i++) {
+		if (qos->mq[i].bearer == bearer) {
+			/* Do not flow disable ancillary q in tcp bidir */
+			if (qos->mq[i].ancillary &&
+			    bearer->tcp_bidir && !enable)
 				continue;
 
-			qlen = qmi_rmnet_flow_control(dev, itm->tcm_handle,
-						    enable);
-			trace_dfc_qmi_tc(dev->name, itm->bearer_id,
-					 itm->flow_id, bearer->grant_size,
-					 qlen, itm->tcm_handle, enable);
+			qlen = qmi_rmnet_flow_control(dev, i, enable);
+			trace_dfc_qmi_tc(dev->name, bearer->bearer_id,
+					 bearer->grant_size,
+					 qlen, i, enable);
 			rc++;
 		}
 	}
@@ -875,9 +1020,9 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
 				struct dfc_flow_status_info_type_v01 *fc_info)
 {
 	struct rmnet_bearer_map *bearer_itm;
-	struct rmnet_flow_map *flow_itm;
 	int rc = 0, qlen;
 	bool enable;
+	int i;
 
 	enable = fc_info->num_bytes > 0 ? 1 : 0;
 
@@ -892,12 +1037,14 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
 		bearer_itm->last_seq = fc_info->seq_num;
 	}
 
-	list_for_each_entry(flow_itm, &qos->flow_head, list) {
-		qlen = qmi_rmnet_flow_control(dev, flow_itm->tcm_handle,
-					      enable);
-		trace_dfc_qmi_tc(dev->name, flow_itm->bearer_id,
-				 flow_itm->flow_id, fc_info->num_bytes,
-				 qlen, flow_itm->tcm_handle, enable);
+	for (i = 0; i < MAX_MQ_NUM; i++) {
+		bearer_itm = qos->mq[i].bearer;
+		if (!bearer_itm)
+			continue;
+		qlen = qmi_rmnet_flow_control(dev, i, enable);
+		trace_dfc_qmi_tc(dev->name, bearer_itm->bearer_id,
+				 fc_info->num_bytes,
+				 qlen, i, enable);
 		rc++;
 	}
 
@@ -928,6 +1075,10 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
 			if (itm->rat_switch)
 				return 0;
 
+		/* If TX is OFF but we received grant, ignore it */
+		if (itm->tx_off  && fc_info->num_bytes > 0)
+			return 0;
+
 		if ((itm->grant_size == 0 && fc_info->num_bytes > 0) ||
 		    (itm->grant_size > 0 && fc_info->num_bytes == 0))
 			action = true;
@@ -951,7 +1102,7 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
 static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
 				      struct dfc_svc_ind *svc_ind)
 {
-	struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->dfc_info;
+	struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->d.dfc_info;
 	struct net_device *dev;
 	struct qos_info *qos;
 	struct dfc_flow_status_info_type_v01 *flow_status;
@@ -996,6 +1147,11 @@ static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
 
 		spin_lock_bh(&qos->qos_lock);
 
+		if (qmi_rmnet_ignore_grant(dfc->rmnet_port)) {
+			spin_unlock_bh(&qos->qos_lock);
+			continue;
+		}
+
 		if (unlikely(flow_status->bearer_id == 0xFF))
 			dfc_all_bearer_flow_ctl(
 				dev, qos, ack_req, ancillary, flow_status);
@@ -1010,6 +1166,71 @@ static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
 	rcu_read_unlock();
 }
 
+static void dfc_update_tx_link_status(struct net_device *dev,
+				      struct qos_info *qos, u8 tx_status,
+				      struct dfc_bearer_info_type_v01 *binfo)
+{
+	struct rmnet_bearer_map *itm = NULL;
+
+	itm = qmi_rmnet_get_bearer_map(qos, binfo->bearer_id);
+	if (!itm)
+		return;
+
+	if (itm->grant_size && !tx_status) {
+		itm->grant_size = 0;
+		itm->tcp_bidir = false;
+		dfc_bearer_flow_ctl(dev, itm, qos);
+	} else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
+		itm->grant_size = DEFAULT_GRANT;
+		itm->grant_thresh = DEFAULT_GRANT;
+		itm->seq = 0;
+		itm->ack_req = 0;
+		dfc_bearer_flow_ctl(dev, itm, qos);
+	}
+
+	itm->tx_off = !tx_status;
+}
+
+static void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
+					  struct dfc_svc_ind *svc_ind)
+{
+	struct dfc_tx_link_status_ind_msg_v01 *ind = &svc_ind->d.tx_status;
+	struct net_device *dev;
+	struct qos_info *qos;
+	struct dfc_bearer_info_type_v01 *bearer_info;
+	int i;
+
+	rcu_read_lock();
+
+	for (i = 0; i < ind->bearer_info_len; i++) {
+		bearer_info = &ind->bearer_info[i];
+
+		trace_dfc_tx_link_status_ind(dfc->index, i,
+					     ind->tx_status,
+					     bearer_info->mux_id,
+					     bearer_info->bearer_id);
+
+		dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
+					  bearer_info->mux_id);
+		if (!dev)
+			goto clean_out;
+
+		qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+		if (!qos)
+			continue;
+
+		spin_lock_bh(&qos->qos_lock);
+
+		dfc_update_tx_link_status(
+			dev, qos, ind->tx_status, bearer_info);
+
+		spin_unlock_bh(&qos->qos_lock);
+	}
+
+clean_out:
+	rcu_read_unlock();
+}
+
 static void dfc_qmi_ind_work(struct work_struct *work)
 {
 	struct dfc_qmi_data *dfc = container_of(work, struct dfc_qmi_data,
@@ -1030,12 +1251,18 @@ static void dfc_qmi_ind_work(struct work_struct *work)
 			list_del(&svc_ind->list);
 		spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
 
-		if (svc_ind) {
-			if (!dfc->restart_state)
+		if (!svc_ind)
+			break;
+
+		if (!dfc->restart_state) {
+			if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01)
 				dfc_do_burst_flow_control(dfc, svc_ind);
-			kfree(svc_ind);
+			else if (svc_ind->msg_id ==
+					QMI_DFC_TX_LINK_STATUS_IND_V01)
+				dfc_handle_tx_link_status_ind(dfc, svc_ind);
 		}
-	} while (svc_ind != NULL);
+		kfree(svc_ind);
+	} while (1);
 
 	local_bh_enable();
 
@@ -1066,7 +1293,44 @@ static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
 		if (!svc_ind)
 			return;
 
-		memcpy(&svc_ind->dfc_info, ind_msg, sizeof(*ind_msg));
+		svc_ind->msg_id = QMI_DFC_FLOW_STATUS_IND_V01;
+		memcpy(&svc_ind->d.dfc_info, ind_msg, sizeof(*ind_msg));
+
+		spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
+		list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
+		spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
+
+		queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
+	}
+}
+
+static void dfc_tx_link_status_ind_cb(struct qmi_handle *qmi,
+				      struct sockaddr_qrtr *sq,
+				      struct qmi_txn *txn, const void *data)
+{
+	struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data,
+						handle);
+	struct dfc_tx_link_status_ind_msg_v01 *ind_msg;
+	struct dfc_svc_ind *svc_ind;
+	unsigned long flags;
+
+	if (qmi != &dfc->handle)
+		return;
+
+	ind_msg = (struct dfc_tx_link_status_ind_msg_v01 *)data;
+	if (ind_msg->bearer_info_valid) {
+		if (ind_msg->bearer_info_len > DFC_MAX_BEARERS_V01) {
+			pr_err("%s() Invalid bearer info len: %d\n",
+			       __func__, ind_msg->bearer_info_len);
+			return;
+		}
+
+		svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC);
+		if (!svc_ind)
+			return;
+
+		svc_ind->msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01;
+		memcpy(&svc_ind->d.tx_status, ind_msg, sizeof(*ind_msg));
 
 		spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
 		list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
@@ -1151,9 +1415,16 @@ static struct qmi_msg_handler qmi_indication_handler[] = {
 		.type = QMI_INDICATION,
 		.msg_id = QMI_DFC_FLOW_STATUS_IND_V01,
 		.ei = dfc_flow_status_ind_v01_ei,
-		.decoded_size = QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN,
+		.decoded_size = sizeof(struct dfc_flow_status_ind_msg_v01),
 		.fn = dfc_clnt_ind_cb,
 	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01,
+		.ei = dfc_tx_link_status_ind_v01_ei,
+		.decoded_size = sizeof(struct dfc_tx_link_status_ind_msg_v01),
+		.fn = dfc_tx_link_status_ind_cb,
+	},
 	{},
 };
 
@@ -1236,22 +1507,28 @@ void dfc_qmi_client_exit(void *dfc_data)
 void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
 			 int ip_type, u32 mark, unsigned int len)
 {
-	struct rmnet_bearer_map *bearer;
+	struct rmnet_bearer_map *bearer = NULL;
 	struct rmnet_flow_map *itm;
 	u32 start_grant;
 
 	spin_lock_bh(&qos->qos_lock);
 
-	itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
-	if (unlikely(!itm))
-		goto out;
+	if (dfc_mode == DFC_MODE_MQ_NUM) {
+		/* Mark is mq num */
+		if (likely(mark < MAX_MQ_NUM))
+			bearer = qos->mq[mark].bearer;
+	} else {
+		/* Mark is flow_id */
+		itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
+		if (likely(itm))
+			bearer = itm->bearer;
+	}
 
-	bearer = qmi_rmnet_get_bearer_map(qos, itm->bearer_id);
 	if (unlikely(!bearer))
 		goto out;
 
 	trace_dfc_flow_check(dev->name, bearer->bearer_id,
-			     len, bearer->grant_size);
+			     len, mark, bearer->grant_size);
 
 	if (!bearer->grant_size)
 		goto out;
@@ -1276,18 +1553,6 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
 	spin_unlock_bh(&qos->qos_lock);
 }
 
-void dfc_qmi_wq_flush(struct qmi_info *qmi)
-{
-	struct dfc_qmi_data *dfc_data;
-	int i;
-
-	for (i = 0; i < MAX_CLIENT_NUM; i++) {
-		dfc_data = (struct dfc_qmi_data *)(qmi->dfc_clients[i]);
-		if (dfc_data)
-			flush_workqueue(dfc_data->dfc_wq);
-	}
-}
-
 void dfc_qmi_query_flow(void *dfc_data)
 {
 	struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
@@ -1314,9 +1579,9 @@ void dfc_qmi_query_flow(void *dfc_data)
 	    resp->flow_status_len > DFC_MAX_BEARERS_V01)
 		goto done;
 
-	svc_ind->dfc_info.flow_status_valid = resp->flow_status_valid;
-	svc_ind->dfc_info.flow_status_len = resp->flow_status_len;
-	memcpy(&svc_ind->dfc_info.flow_status, resp->flow_status,
+	svc_ind->d.dfc_info.flow_status_valid = resp->flow_status_valid;
+	svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len;
+	memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status,
 		sizeof(resp->flow_status[0]) * resp->flow_status_len);
 	dfc_do_burst_flow_control(data, svc_ind);
 
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 0d2933d..01c0ddb 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -34,6 +34,7 @@
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/soc/qcom/qmi.h>
+#include <linux/sysfs.h>
 #include <soc/qcom/memory_dump.h>
 #include <soc/qcom/icnss.h>
 #include <soc/qcom/secure_buffer.h>
@@ -61,7 +62,7 @@
 #define ICNSS_QUIRKS_DEFAULT		BIT(FW_REJUVENATE_ENABLE)
 #define ICNSS_MAX_PROBE_CNT		2
 
-#define PROBE_TIMEOUT			5000
+#define PROBE_TIMEOUT                 15000
 
 static struct icnss_priv *penv;
 
@@ -78,40 +79,6 @@ void *icnss_ipc_log_long_context;
 #define ICNSS_EVENT_UNINTERRUPTIBLE		BIT(1)
 #define ICNSS_EVENT_SYNC_UNINTERRUPTIBLE	(ICNSS_EVENT_UNINTERRUPTIBLE | \
 						 ICNSS_EVENT_SYNC)
-
-struct icnss_msa_perm_list_t msa_perm_secure_list[ICNSS_MSA_PERM_MAX] = {
-	[ICNSS_MSA_PERM_HLOS_ALL] = {
-		.vmids = {VMID_HLOS},
-		.perms = {PERM_READ | PERM_WRITE | PERM_EXEC},
-		.nelems = 1,
-	},
-
-	[ICNSS_MSA_PERM_WLAN_HW_RW] = {
-		.vmids = {VMID_MSS_MSA, VMID_WLAN},
-		.perms = {PERM_READ | PERM_WRITE,
-			PERM_READ | PERM_WRITE},
-		.nelems = 2,
-	},
-
-};
-
-struct icnss_msa_perm_list_t msa_perm_list[ICNSS_MSA_PERM_MAX] = {
-	[ICNSS_MSA_PERM_HLOS_ALL] = {
-		.vmids = {VMID_HLOS},
-		.perms = {PERM_READ | PERM_WRITE | PERM_EXEC},
-		.nelems = 1,
-	},
-
-	[ICNSS_MSA_PERM_WLAN_HW_RW] = {
-		.vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_WLAN_CE},
-		.perms = {PERM_READ | PERM_WRITE,
-			PERM_READ | PERM_WRITE,
-			PERM_READ | PERM_WRITE},
-		.nelems = 3,
-	},
-
-};
-
 static struct icnss_vreg_info icnss_vreg_info[] = {
 	{NULL, "vdd-cx-mx", 752000, 752000, 0, 0, false},
 	{NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
@@ -141,93 +108,17 @@ static const char * const icnss_pdr_cause[] = {
 	[ICNSS_HOST_ERROR] = "Host error",
 };
 
-static int icnss_assign_msa_perm(struct icnss_mem_region_info
-				 *mem_region, enum icnss_msa_perm new_perm)
+static ssize_t icnss_sysfs_store(struct kobject *kobj,
+				 struct kobj_attribute *attr,
+				 const char *buf, size_t count)
 {
-	int ret = 0;
-	phys_addr_t addr;
-	u32 size;
-	u32 i = 0;
-	u32 source_vmids[ICNSS_MAX_VMIDS] = {0};
-	u32 source_nelems;
-	u32 dest_vmids[ICNSS_MAX_VMIDS] = {0};
-	u32 dest_perms[ICNSS_MAX_VMIDS] = {0};
-	u32 dest_nelems;
-	enum icnss_msa_perm cur_perm = mem_region->perm;
-	struct icnss_msa_perm_list_t *new_perm_list, *old_perm_list;
-
-	if (penv && penv->is_hyp_disabled) {
-		icnss_pr_err("hyperviser disabled");
-		return 0;
-	}
-
-	addr = mem_region->reg_addr;
-	size = mem_region->size;
-
-	if (mem_region->secure_flag) {
-		new_perm_list = &msa_perm_secure_list[new_perm];
-		old_perm_list = &msa_perm_secure_list[cur_perm];
-	} else {
-		new_perm_list = &msa_perm_list[new_perm];
-		old_perm_list = &msa_perm_list[cur_perm];
-	}
-
-	source_nelems = old_perm_list->nelems;
-	dest_nelems = new_perm_list->nelems;
-
-	for (i = 0; i < source_nelems; ++i)
-		source_vmids[i] = old_perm_list->vmids[i];
-
-	for (i = 0; i < dest_nelems; ++i) {
-		dest_vmids[i] = new_perm_list->vmids[i];
-		dest_perms[i] = new_perm_list->perms[i];
-	}
-
-	ret = hyp_assign_phys(addr, size, source_vmids, source_nelems,
-			      dest_vmids, dest_perms, dest_nelems);
-	if (ret) {
-		icnss_pr_err("Hyperviser map failed for PA=%pa size=%u err=%d\n",
-			     &addr, size, ret);
-		goto out;
-	}
-
-	icnss_pr_dbg("Hypervisor map for source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x, source[3]=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x, dest[3]=%x\n",
-		     source_nelems, source_vmids[0], source_vmids[1],
-		     source_vmids[2], source_vmids[3], dest_nelems,
-		     dest_vmids[0], dest_vmids[1], dest_vmids[2],
-		     dest_vmids[3]);
-out:
-	return ret;
+	atomic_set(&penv->is_shutdown, true);
+	icnss_pr_dbg("Received shutdown indication");
+	return count;
 }
 
-static int icnss_assign_msa_perm_all(struct icnss_priv *priv,
-				     enum icnss_msa_perm new_perm)
-{
-	int ret;
-	int i;
-	enum icnss_msa_perm old_perm;
-
-	if (priv->nr_mem_region > WLFW_MAX_NUM_MEMORY_REGIONS) {
-		icnss_pr_err("Invalid memory region len %d\n",
-			     priv->nr_mem_region);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < priv->nr_mem_region; i++) {
-		old_perm = priv->mem_region[i].perm;
-		ret = icnss_assign_msa_perm(&priv->mem_region[i], new_perm);
-		if (ret)
-			goto err_unmap;
-		priv->mem_region[i].perm = new_perm;
-	}
-	return 0;
-
-err_unmap:
-	for (i--; i >= 0; i--)
-		icnss_assign_msa_perm(&priv->mem_region[i], old_perm);
-
-	return ret;
-}
+static struct kobj_attribute icnss_sysfs_attribute =
+__ATTR(shutdown, 0660, NULL, icnss_sysfs_store);
 
 static void icnss_pm_stay_awake(struct icnss_priv *priv)
 {
@@ -275,6 +166,10 @@ static char *icnss_driver_event_to_str(enum icnss_driver_event_type type)
 		return "PD_SERVICE_DOWN";
 	case ICNSS_DRIVER_EVENT_FW_EARLY_CRASH_IND:
 		return "FW_EARLY_CRASH_IND";
+	case ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
+		return "IDLE_SHUTDOWN";
+	case ICNSS_DRIVER_EVENT_IDLE_RESTART:
+		return "IDLE_RESTART";
 	case ICNSS_DRIVER_EVENT_MAX:
 		return "EVENT_MAX";
 	}
@@ -946,6 +841,10 @@ static int icnss_driver_event_server_arrive(void *data)
 
 	ret = wlfw_ind_register_send_sync_msg(penv);
 	if (ret < 0) {
+		if (ret == -EALREADY) {
+			ret = 0;
+			goto qmi_registered;
+		}
 		ignore_assert = true;
 		goto err_power_on;
 	}
@@ -962,24 +861,16 @@ static int icnss_driver_event_server_arrive(void *data)
 		goto err_power_on;
 	}
 
-	if (!test_bit(ICNSS_MSA0_ASSIGNED, &penv->state)) {
-		ret = icnss_assign_msa_perm_all(penv,
-				ICNSS_MSA_PERM_WLAN_HW_RW);
-		if (ret < 0)
-			goto err_power_on;
-		set_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
-	}
-
 	ret = wlfw_msa_ready_send_sync_msg(penv);
 	if (ret < 0) {
 		ignore_assert = true;
-		goto err_setup_msa;
+		goto err_power_on;
 	}
 
 	ret = wlfw_cap_send_sync_msg(penv);
 	if (ret < 0) {
 		ignore_assert = true;
-		goto err_setup_msa;
+		goto err_power_on;
 	}
 
 	wlfw_dynamic_feature_mask_send_sync_msg(penv,
@@ -996,15 +887,13 @@ static int icnss_driver_event_server_arrive(void *data)
 
 	return ret;
 
-err_setup_msa:
-	icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL);
-	clear_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
 err_power_on:
 	icnss_hw_power_off(penv);
 clear_server:
 	icnss_clear_server(penv);
 fail:
 	ICNSS_ASSERT(ignore_assert);
+qmi_registered:
 	return ret;
 }
 
@@ -1092,6 +981,7 @@ static int icnss_pd_restart_complete(struct icnss_priv *priv)
 	icnss_call_driver_shutdown(priv);
 
 	clear_bit(ICNSS_PDR, &priv->state);
+	clear_bit(ICNSS_MODEM_CRASHED, &priv->state);
 	clear_bit(ICNSS_REJUVENATE, &priv->state);
 	clear_bit(ICNSS_PD_RESTART, &priv->state);
 	priv->early_crash_ind = false;
@@ -1250,6 +1140,32 @@ static int icnss_driver_event_unregister_driver(void *data)
 	return 0;
 }
 
+static int icnss_call_driver_remove(struct icnss_priv *priv)
+{
+	icnss_pr_dbg("Calling driver remove state: 0x%lx\n", priv->state);
+
+	clear_bit(ICNSS_FW_READY, &priv->state);
+
+	if (test_bit(ICNSS_DRIVER_UNLOADING, &priv->state))
+		return 0;
+
+	if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+		return 0;
+
+	if (!priv->ops || !priv->ops->remove)
+		return 0;
+
+	set_bit(ICNSS_DRIVER_UNLOADING, &priv->state);
+	priv->ops->remove(&priv->pdev->dev);
+
+	clear_bit(ICNSS_DRIVER_UNLOADING, &priv->state);
+	clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
+
+	icnss_hw_power_off(priv);
+
+	return 0;
+}
+
 static int icnss_fw_crashed(struct icnss_priv *priv,
 			    struct icnss_event_pd_service_down_data *event_data)
 {
@@ -1322,6 +1238,51 @@ static int icnss_driver_event_early_crash_ind(struct icnss_priv *priv,
 	return 0;
 }
 
+static int icnss_driver_event_idle_shutdown(void *data)
+{
+	int ret = 0;
+
+	if (!penv->ops || !penv->ops->idle_shutdown)
+		return 0;
+
+	if (test_bit(ICNSS_MODEM_CRASHED, &penv->state) ||
+			test_bit(ICNSS_PDR, &penv->state) ||
+			test_bit(ICNSS_REJUVENATE, &penv->state)) {
+		icnss_pr_err("SSR/PDR is already in-progress during idle shutdown callback\n");
+		ret = -EBUSY;
+	} else {
+		icnss_pr_dbg("Calling driver idle shutdown, state: 0x%lx\n",
+								penv->state);
+		icnss_block_shutdown(true);
+		ret = penv->ops->idle_shutdown(&penv->pdev->dev);
+		icnss_block_shutdown(false);
+	}
+
+	return ret;
+}
+
+static int icnss_driver_event_idle_restart(void *data)
+{
+	int ret = 0;
+
+	if (!penv->ops || !penv->ops->idle_restart)
+		return 0;
+
+	if (test_bit(ICNSS_MODEM_CRASHED, &penv->state) ||
+			test_bit(ICNSS_PDR, &penv->state) ||
+			test_bit(ICNSS_REJUVENATE, &penv->state)) {
+		icnss_pr_err("SSR/PDR is already in-progress during idle restart callback\n");
+		ret = -EBUSY;
+	} else {
+		icnss_pr_dbg("Calling driver idle restart, state: 0x%lx\n",
+								penv->state);
+		icnss_block_shutdown(true);
+		ret = penv->ops->idle_restart(&penv->pdev->dev);
+		icnss_block_shutdown(false);
+	}
+
+	return ret;
+}
 
 static void icnss_driver_event_work(struct work_struct *work)
 {
@@ -1368,6 +1329,12 @@ static void icnss_driver_event_work(struct work_struct *work)
 			ret = icnss_driver_event_early_crash_ind(penv,
 								 event->data);
 			break;
+		case ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
+			ret = icnss_driver_event_idle_shutdown(event->data);
+			break;
+		case ICNSS_DRIVER_EVENT_IDLE_RESTART:
+			ret = icnss_driver_event_idle_restart(event->data);
+			break;
 		default:
 			icnss_pr_err("Invalid Event type: %d", event->type);
 			kfree(event);
@@ -1423,17 +1390,8 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
 
 	if (code == SUBSYS_AFTER_SHUTDOWN &&
 	    notif->crashed == CRASH_STATUS_ERR_FATAL) {
-		ret = icnss_assign_msa_perm_all(priv,
-						ICNSS_MSA_PERM_HLOS_ALL);
-		if (!ret) {
-			icnss_pr_info("Collecting msa0 segment dump\n");
-			icnss_msa0_ramdump(priv);
-			icnss_assign_msa_perm_all(priv,
-						  ICNSS_MSA_PERM_WLAN_HW_RW);
-		} else {
-			icnss_pr_err("Not able to Collect msa0 segment dump, Apps permissions not assigned %d\n",
-				     ret);
-		}
+		icnss_pr_info("Collecting msa0 segment dump\n");
+		icnss_msa0_ramdump(priv);
 		return NOTIFY_OK;
 	}
 
@@ -1442,11 +1400,20 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
 
 	priv->is_ssr = true;
 
+	if (notif->crashed)
+		set_bit(ICNSS_MODEM_CRASHED, &priv->state);
+
+	if (code == SUBSYS_BEFORE_SHUTDOWN && !notif->crashed &&
+	    atomic_read(&priv->is_shutdown)) {
+		atomic_set(&priv->is_shutdown, false);
+		icnss_call_driver_remove(priv);
+	}
+
 	if (code == SUBSYS_BEFORE_SHUTDOWN && !notif->crashed &&
 	    test_bit(ICNSS_BLOCK_SHUTDOWN, &priv->state)) {
 		if (!wait_for_completion_timeout(&priv->unblock_shutdown,
-						 PROBE_TIMEOUT))
-			icnss_pr_err("wlan driver probe timeout\n");
+				msecs_to_jiffies(PROBE_TIMEOUT)))
+			icnss_pr_err("modem block shutdown timeout\n");
 	}
 
 	if (code == SUBSYS_BEFORE_SHUTDOWN && !notif->crashed) {
@@ -2156,20 +2123,6 @@ int icnss_get_irq(struct device *dev, int ce_id)
 }
 EXPORT_SYMBOL(icnss_get_irq);
 
-struct dma_iommu_mapping *icnss_smmu_get_mapping(struct device *dev)
-{
-	struct icnss_priv *priv = dev_get_drvdata(dev);
-
-	if (!priv) {
-		icnss_pr_err("Invalid drvdata: dev %pK, data %pK\n",
-			     dev, priv);
-		return NULL;
-	}
-
-	return &priv->smmu_mapping;
-}
-EXPORT_SYMBOL(icnss_smmu_get_mapping);
-
 struct iommu_domain *icnss_smmu_get_domain(struct device *dev)
 {
 	struct icnss_priv *priv = dev_get_drvdata(dev);
@@ -2283,6 +2236,48 @@ int icnss_trigger_recovery(struct device *dev)
 }
 EXPORT_SYMBOL(icnss_trigger_recovery);
 
+int icnss_idle_shutdown(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv) {
+		icnss_pr_err("Invalid drvdata: dev %pK", dev);
+		return -EINVAL;
+	}
+
+	if (test_bit(ICNSS_MODEM_CRASHED, &priv->state) ||
+			test_bit(ICNSS_PDR, &priv->state) ||
+			test_bit(ICNSS_REJUVENATE, &penv->state)) {
+		icnss_pr_err("SSR/PDR is already in-progress during idle shutdown\n");
+		return -EBUSY;
+	}
+
+	return icnss_driver_event_post(ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN,
+					ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
+}
+EXPORT_SYMBOL(icnss_idle_shutdown);
+
+int icnss_idle_restart(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv) {
+		icnss_pr_err("Invalid drvdata: dev %pK", dev);
+		return -EINVAL;
+	}
+
+	if (test_bit(ICNSS_MODEM_CRASHED, &priv->state) ||
+			test_bit(ICNSS_PDR, &priv->state) ||
+			test_bit(ICNSS_REJUVENATE, &penv->state)) {
+		icnss_pr_err("SSR/PDR is already in-progress during idle restart\n");
+		return -EBUSY;
+	}
+
+	return icnss_driver_event_post(ICNSS_DRIVER_EVENT_IDLE_RESTART,
+					ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
+}
+EXPORT_SYMBOL(icnss_idle_restart);
+
 static int icnss_get_vreg_info(struct device *dev,
 			       struct icnss_vreg_info *vreg_info)
 {
@@ -2515,6 +2510,15 @@ static void icnss_allow_recursive_recovery(struct device *dev)
 	icnss_pr_info("Recursive recovery allowed for WLAN\n");
 }
 
+static void icnss_disallow_recursive_recovery(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	priv->allow_recursive_recovery = false;
+
+	icnss_pr_info("Recursive recovery disallowed for WLAN\n");
+}
+
 static ssize_t icnss_fw_debug_write(struct file *fp,
 				    const char __user *user_buf,
 				    size_t count, loff_t *off)
@@ -2566,6 +2570,9 @@ static ssize_t icnss_fw_debug_write(struct file *fp,
 		case 4:
 			icnss_allow_recursive_recovery(&priv->pdev->dev);
 			break;
+		case 5:
+			icnss_disallow_recursive_recovery(&priv->pdev->dev);
+			break;
 		default:
 			return -EINVAL;
 		}
@@ -2659,9 +2666,6 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
 		case ICNSS_PD_RESTART:
 			seq_puts(s, "PD RESTART");
 			continue;
-		case ICNSS_MSA0_ASSIGNED:
-			seq_puts(s, "MSA0 ASSIGNED");
-			continue;
 		case ICNSS_WLFW_EXISTS:
 			seq_puts(s, "WLAN FW EXISTS");
 			continue;
@@ -2688,6 +2692,9 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
 			continue;
 		case ICNSS_PDR:
 			seq_puts(s, "PDR TRIGGERED");
+			continue;
+		case ICNSS_MODEM_CRASHED:
+			seq_puts(s, "MODEM CRASHED");
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
@@ -3117,6 +3124,34 @@ static void icnss_debugfs_destroy(struct icnss_priv *priv)
 	debugfs_remove_recursive(priv->root_dentry);
 }
 
+static void icnss_sysfs_create(struct icnss_priv *priv)
+{
+	struct kobject *icnss_kobject;
+	int error = 0;
+
+	atomic_set(&priv->is_shutdown, false);
+
+	icnss_kobject = kobject_create_and_add("shutdown_wlan", kernel_kobj);
+	if (!icnss_kobject) {
+		icnss_pr_err("Unable to create kernel object");
+		return;
+	}
+
+	priv->icnss_kobject = icnss_kobject;
+
+	error = sysfs_create_file(icnss_kobject, &icnss_sysfs_attribute.attr);
+	if (error)
+		icnss_pr_err("Unable to create icnss sysfs file");
+}
+
+static void icnss_sysfs_destroy(struct icnss_priv *priv)
+{
+	struct kobject *icnss_kobject;
+
+	icnss_kobject = priv->icnss_kobject;
+	if (icnss_kobject)
+		kobject_put(icnss_kobject);
+}
 
 static int icnss_get_vbatt_info(struct icnss_priv *priv)
 {
@@ -3186,6 +3221,8 @@ static int icnss_probe(struct platform_device *pdev)
 
 	priv->vreg_info = icnss_vreg_info;
 
+	icnss_allow_recursive_recovery(dev);
+
 	if (of_property_read_bool(pdev->dev.of_node, "qcom,icnss-adc_tm")) {
 		ret = icnss_get_vbatt_info(priv);
 		if (ret == -EPROBE_DEFER)
@@ -3207,11 +3244,6 @@ static int icnss_probe(struct platform_device *pdev)
 			goto out;
 	}
 
-	if (of_property_read_bool(pdev->dev.of_node, "qcom,hyp_disabled"))
-		priv->is_hyp_disabled = true;
-
-	icnss_pr_dbg("Hypervisor disabled = %d\n", priv->is_hyp_disabled);
-
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
 	if (!res) {
 		icnss_pr_err("Memory base not found in DT\n");
@@ -3300,7 +3332,6 @@ static int icnss_probe(struct platform_device *pdev)
 	} else {
 		priv->iommu_domain =
 			iommu_get_domain_for_dev(&pdev->dev);
-		priv->smmu_mapping.domain = priv->iommu_domain;
 
 		res = platform_get_resource_byname(pdev,
 						   IORESOURCE_MEM,
@@ -3340,6 +3371,8 @@ static int icnss_probe(struct platform_device *pdev)
 
 	icnss_debugfs_create(priv);
 
+	icnss_sysfs_create(priv);
+
 	ret = device_init_wakeup(&priv->pdev->dev, true);
 	if (ret)
 		icnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
@@ -3357,7 +3390,6 @@ static int icnss_probe(struct platform_device *pdev)
 	destroy_workqueue(priv->event_wq);
 smmu_cleanup:
 	priv->iommu_domain = NULL;
-	priv->smmu_mapping.domain = NULL;
 out:
 	dev_set_drvdata(dev, NULL);
 
@@ -3372,6 +3404,8 @@ static int icnss_remove(struct platform_device *pdev)
 
 	icnss_debugfs_destroy(penv);
 
+	icnss_sysfs_destroy(penv);
+
 	complete_all(&penv->unblock_shutdown);
 
 	icnss_modem_ssr_unregister_notifier(penv);
@@ -3385,13 +3419,9 @@ static int icnss_remove(struct platform_device *pdev)
 		destroy_workqueue(penv->event_wq);
 
 	penv->iommu_domain = NULL;
-	penv->smmu_mapping.domain = NULL;
 
 	icnss_hw_power_off(penv);
 
-	icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL);
-	clear_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
-
 	dev_set_drvdata(&pdev->dev, NULL);
 
 	return 0;
diff --git a/drivers/soc/qcom/icnss_private.h b/drivers/soc/qcom/icnss_private.h
index 8b487ea3..ba44305 100644
--- a/drivers/soc/qcom/icnss_private.h
+++ b/drivers/soc/qcom/icnss_private.h
@@ -9,6 +9,7 @@
 #include <linux/adc-tm-clients.h>
 #include <linux/iio/consumer.h>
 #include <asm/dma-iommu.h>
+#include <linux/kobject.h>
 
 #define icnss_ipc_log_string(_x...) do {				\
 	if (icnss_ipc_log_context)					\
@@ -112,6 +113,8 @@ enum icnss_driver_event_type {
 	ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
 	ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
 	ICNSS_DRIVER_EVENT_FW_EARLY_CRASH_IND,
+	ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN,
+	ICNSS_DRIVER_EVENT_IDLE_RESTART,
 	ICNSS_DRIVER_EVENT_MAX,
 };
 
@@ -145,7 +148,6 @@ enum icnss_driver_state {
 	ICNSS_SSR_REGISTERED,
 	ICNSS_PDR_REGISTERED,
 	ICNSS_PD_RESTART,
-	ICNSS_MSA0_ASSIGNED,
 	ICNSS_WLFW_EXISTS,
 	ICNSS_SHUTDOWN_DONE,
 	ICNSS_HOST_TRIGGERED_PDR,
@@ -155,6 +157,7 @@ enum icnss_driver_state {
 	ICNSS_MODE_ON,
 	ICNSS_BLOCK_SHUTDOWN,
 	ICNSS_PDR,
+	ICNSS_MODEM_CRASHED,
 };
 
 struct ce_irq_list {
@@ -272,25 +275,10 @@ struct wlfw_fw_version_info {
 	char fw_build_timestamp[WLFW_MAX_TIMESTAMP_LEN + 1];
 };
 
-enum icnss_msa_perm {
-	ICNSS_MSA_PERM_HLOS_ALL = 0,
-	ICNSS_MSA_PERM_WLAN_HW_RW = 1,
-	ICNSS_MSA_PERM_MAX,
-};
-
-#define ICNSS_MAX_VMIDS     4
-
 struct icnss_mem_region_info {
 	uint64_t reg_addr;
 	uint32_t size;
 	uint8_t secure_flag;
-	enum icnss_msa_perm perm;
-};
-
-struct icnss_msa_perm_list_t {
-	int vmids[ICNSS_MAX_VMIDS];
-	int perms[ICNSS_MAX_VMIDS];
-	int nelems;
 };
 
 struct icnss_priv {
@@ -303,7 +291,6 @@ struct icnss_priv {
 	u32 ce_irqs[ICNSS_MAX_IRQ_REGISTRATIONS];
 	phys_addr_t mem_base_pa;
 	void __iomem *mem_base_va;
-	struct dma_iommu_mapping smmu_mapping;
 	struct iommu_domain *iommu_domain;
 	dma_addr_t smmu_iova_ipa_start;
 	size_t smmu_iova_ipa_len;
@@ -351,7 +338,6 @@ struct icnss_priv {
 	u8 requesting_sub_system;
 	u16 line_number;
 	struct mutex dev_lock;
-	bool is_hyp_disabled;
 	uint32_t fw_error_fatal_irq;
 	uint32_t fw_early_crash_irq;
 	struct completion unblock_shutdown;
@@ -362,6 +348,9 @@ struct icnss_priv {
 	bool vbatt_supported;
 	char function_name[WLFW_FUNCTION_NAME_LEN + 1];
 	bool is_ssr;
+	struct kobject *icnss_kobject;
+	atomic_t is_shutdown;
+
 };
 
 int icnss_call_driver_uevent(struct icnss_priv *priv,
diff --git a/drivers/soc/qcom/icnss_qmi.c b/drivers/soc/qcom/icnss_qmi.c
index 19b7875..2bcf3d2 100644
--- a/drivers/soc/qcom/icnss_qmi.c
+++ b/drivers/soc/qcom/icnss_qmi.c
@@ -307,14 +307,23 @@ int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv)
 
 	priv->stats.ind_register_resp++;
 
+	if (resp->fw_status_valid &&
+	   (resp->fw_status & QMI_WLFW_ALREADY_REGISTERED_V01)) {
+		ret = -EALREADY;
+		icnss_pr_dbg("WLFW already registered\n");
+		goto qmi_registered;
+	}
+
 	kfree(resp);
 	kfree(req);
+
 	return 0;
 
 out:
+	priv->stats.ind_register_err++;
+qmi_registered:
 	kfree(resp);
 	kfree(req);
-	priv->stats.ind_register_err++;
 	return ret;
 }
 
diff --git a/drivers/soc/qcom/llcc-kona.c b/drivers/soc/qcom/llcc-kona.c
index dbbd79f..a155d63 100644
--- a/drivers/soc/qcom/llcc-kona.c
+++ b/drivers/soc/qcom/llcc-kona.c
@@ -51,22 +51,22 @@
 	}
 
 static struct llcc_slice_config kona_data[] =  {
-	SCT_ENTRY(LLCC_CPUSS,    1, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1),
+	SCT_ENTRY(LLCC_CPUSS,    1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1),
 	SCT_ENTRY(LLCC_VIDSC0,   2, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
 	SCT_ENTRY(LLCC_AUDIO,    6, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0),
 	SCT_ENTRY(LLCC_CMPT,    10, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0),
 	SCT_ENTRY(LLCC_GPUHTW,  11, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_GPU,     12, 2048, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_GPU,     12, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 1, 0),
 	SCT_ENTRY(LLCC_MMUHWT,  13, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 1),
 	SCT_ENTRY(LLCC_CMPTDMA, 15, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
 	SCT_ENTRY(LLCC_DISP,    16, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
 	SCT_ENTRY(LLCC_AUDHW,   22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_NPU,     23, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_NPU,     23, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
 	SCT_ENTRY(LLCC_WLNHW,   24, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_CVP,     28, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_CVP,     28, 256, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
 	SCT_ENTRY(LLCC_APTCM,   30, 128, 3, 0, 0x0,  0x3, 1, 0, 0, 0, 1, 0),
 	SCT_ENTRY(LLCC_WRTCH,   31, 256, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 1),
-	SCT_ENTRY(LLCC_CVPFW,   17, 512, 3, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_CVPFW,   17, 512, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
 };
 
 static int kona_qcom_llcc_probe(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/llcc_events.h b/drivers/soc/qcom/llcc_events.h
index 1c37cd0..1359ad6 100644
--- a/drivers/soc/qcom/llcc_events.h
+++ b/drivers/soc/qcom/llcc_events.h
@@ -15,6 +15,7 @@ enum event_port_select {
 	EVENT_PORT_TRP,
 	EVENT_PORT_DRP,
 	EVENT_PORT_PMGR,
+	EVENT_PORT_BEAC1,
 	EVENT_PORT_TENURE,
 	EVENT_PORT_TLAT,
 };
diff --git a/drivers/soc/qcom/llcc_perfmon.c b/drivers/soc/qcom/llcc_perfmon.c
index 2e68bba..6bae29d 100644
--- a/drivers/soc/qcom/llcc_perfmon.c
+++ b/drivers/soc/qcom/llcc_perfmon.c
@@ -19,7 +19,7 @@
 #include "llcc_perfmon.h"
 
 #define LLCC_PERFMON_NAME		"qcom_llcc_perfmon"
-#define LLCC_PERFMON_COUNTER_MAX	16
+#define MAX_CNTR			16
 #define MAX_NUMBER_OF_PORTS		8
 #define NUM_CHANNELS			16
 #define DELIM_CHAR			" "
@@ -45,7 +45,7 @@ struct llcc_perfmon_private;
  */
 struct event_port_ops {
 	void (*event_config)(struct llcc_perfmon_private *priv,
-			unsigned int type, unsigned int num, bool enable);
+			unsigned int type, unsigned int *num, bool enable);
 	void (*event_enable)(struct llcc_perfmon_private *priv, bool enable);
 	void (*event_filter_config)(struct llcc_perfmon_private *priv,
 			enum filter_type filter, unsigned long match,
@@ -55,12 +55,12 @@ struct event_port_ops {
 /**
  * struct llcc_perfmon_private	- llcc perfmon private
  * @llcc_map:		llcc register address space map
+ * @llcc_bcast_map:	llcc broadcast register address space map
  * @bank_off:		Offset of llcc banks
  * @num_banks:		Number of banks supported
  * @port_ops:		struct event_port_ops
  * @configured:		Mapping of configured event counters
- * @configured_counters:
- *			Count of configured counters.
+ * @configured_cntrs:	Count of configured counters.
  * @enables_port:	Port enabled for perfmon configuration
  * @filtered_ports:	Port filter enabled
  * @port_configd:	Number of perfmon port configuration supported
@@ -76,8 +76,8 @@ struct llcc_perfmon_private {
 	unsigned int bank_off[NUM_CHANNELS];
 	unsigned int num_banks;
 	struct event_port_ops *port_ops[MAX_NUMBER_OF_PORTS];
-	struct llcc_perfmon_counter_map configured[LLCC_PERFMON_COUNTER_MAX];
-	unsigned int configured_counters;
+	struct llcc_perfmon_counter_map configured[MAX_CNTR];
+	unsigned int configured_cntrs;
 	unsigned int enables_port;
 	unsigned int filtered_ports;
 	unsigned int port_configd;
@@ -113,18 +113,20 @@ static void llcc_bcast_modify(struct llcc_perfmon_private *llcc_priv,
 
 static void perfmon_counter_dump(struct llcc_perfmon_private *llcc_priv)
 {
+	struct llcc_perfmon_counter_map *counter_map;
 	uint32_t val;
 	unsigned int i, j;
 
-	if (!llcc_priv->configured_counters)
+	if (!llcc_priv->configured_cntrs)
 		return;
 
 	llcc_bcast_write(llcc_priv, PERFMON_DUMP, MONITOR_DUMP);
-	for (i = 0; i < llcc_priv->configured_counters; i++) {
+	for (i = 0; i < llcc_priv->configured_cntrs; i++) {
+		counter_map = &llcc_priv->configured[i];
 		for (j = 0; j < llcc_priv->num_banks; j++) {
 			regmap_read(llcc_priv->llcc_map, llcc_priv->bank_off[j]
 					+ LLCC_COUNTER_n_VALUE(i), &val);
-			llcc_priv->configured[i].counter_dump[j] += val;
+			counter_map->counter_dump[j] += val;
 		}
 	}
 }
@@ -133,26 +135,39 @@ static ssize_t perfmon_counter_dump_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
+	struct llcc_perfmon_counter_map *counter_map;
 	unsigned int i, j;
 	unsigned long long total;
 	ssize_t cnt = 0;
 
-	if (llcc_priv->configured_counters == 0) {
+	if (llcc_priv->configured_cntrs == 0) {
 		pr_err("counters not configured\n");
 		return cnt;
 	}
 
 	perfmon_counter_dump(llcc_priv);
-	for (i = 0; i < llcc_priv->configured_counters - 1; i++) {
-		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "Port %02d,",
-				llcc_priv->configured[i].port_sel);
-		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "Event %02d,",
-				llcc_priv->configured[i].event_sel);
-
+	for (i = 0; i < llcc_priv->configured_cntrs - 1; i++) {
 		total = 0;
-		for (j = 0; j < llcc_priv->num_banks; j++) {
-			total += llcc_priv->configured[i].counter_dump[j];
-			llcc_priv->configured[i].counter_dump[j] = 0;
+		counter_map = &llcc_priv->configured[i];
+		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt,
+				"Port %02d,Event %02d,",
+				counter_map->port_sel, counter_map->event_sel);
+
+		if ((counter_map->port_sel == EVENT_PORT_BEAC) &&
+				(llcc_priv->num_mc > 1)) {
+			/* DBX uses 2 counters for BEAC 0 & 1 */
+			i++;
+			for (j = 0; j < llcc_priv->num_banks; j++) {
+				total += counter_map->counter_dump[j];
+				counter_map->counter_dump[j] = 0;
+				total += counter_map[1].counter_dump[j];
+				counter_map[1].counter_dump[j] = 0;
+			}
+		} else {
+			for (j = 0; j < llcc_priv->num_banks; j++) {
+				total += counter_map->counter_dump[j];
+				counter_map->counter_dump[j] = 0;
+			}
 		}
 
 		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "0x%016llx\n",
@@ -161,9 +176,10 @@ static ssize_t perfmon_counter_dump_show(struct device *dev,
 
 	cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "CYCLE COUNT, ,");
 	total = 0;
+	counter_map = &llcc_priv->configured[i];
 	for (j = 0; j < llcc_priv->num_banks; j++) {
-		total += llcc_priv->configured[i].counter_dump[j];
-		llcc_priv->configured[i].counter_dump[j] = 0;
+		total += counter_map->counter_dump[j];
+		counter_map->counter_dump[j] = 0;
 	}
 
 	cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "0x%016llx\n", total);
@@ -179,19 +195,19 @@ static ssize_t perfmon_configure_store(struct device *dev,
 {
 	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
 	struct event_port_ops *port_ops;
-	unsigned int j = 0, k;
+	struct llcc_perfmon_counter_map *counter_map;
+	unsigned int j = 0, k, end_cntrs;
 	unsigned long port_sel, event_sel;
 	uint32_t val;
 	char *token, *delim = DELIM_CHAR;
 
 	mutex_lock(&llcc_priv->mutex);
-	if (llcc_priv->configured_counters) {
+	if (llcc_priv->configured_cntrs) {
 		pr_err("Counters configured already, remove & try again\n");
 		mutex_unlock(&llcc_priv->mutex);
 		return -EINVAL;
 	}
 
-	llcc_priv->configured_counters = 0;
 	token = strsep((char **)&buf, delim);
 
 	while (token != NULL) {
@@ -214,32 +230,35 @@ static ssize_t perfmon_configure_store(struct device *dev,
 			continue;
 		}
 
-		llcc_priv->configured[j].port_sel = port_sel;
-		llcc_priv->configured[j].event_sel = event_sel;
+		/* Last perfmon counter for cycle counter */
+		end_cntrs = 1;
+		if (port_sel == EVENT_PORT_BEAC)
+			end_cntrs = llcc_priv->num_mc;
+
+		if (j == (MAX_CNTR - end_cntrs))
+			break;
+
+		counter_map = &llcc_priv->configured[j];
+		counter_map->port_sel = port_sel;
+		counter_map->event_sel = event_sel;
 		for (k = 0; k < llcc_priv->num_banks; k++)
-			llcc_priv->configured[j].counter_dump[k] = 0;
+			counter_map->counter_dump[k] = 0;
 
 		port_ops = llcc_priv->port_ops[port_sel];
-		pr_info("counter %d configured for event %ld from port %ld\n",
-				j, event_sel, port_sel);
-		port_ops->event_config(llcc_priv, event_sel, j++, true);
-		if (!(llcc_priv->enables_port & (1 << port_sel)))
-			if (port_ops->event_enable)
-				port_ops->event_enable(llcc_priv, true);
+		port_ops->event_config(llcc_priv, event_sel, &j, true);
+		pr_info("counter %2d configured for event %2ld from port %ld\n",
+				j++, event_sel, port_sel);
+		if (((llcc_priv->enables_port & (1 << port_sel)) == 0) &&
+				(port_ops->event_enable))
+			port_ops->event_enable(llcc_priv, true);
 
 		llcc_priv->enables_port |= (1 << port_sel);
-
-		/* Last perfmon counter for cycle counter */
-		if (llcc_priv->configured_counters++ ==
-				(LLCC_PERFMON_COUNTER_MAX - 2))
-			break;
 	}
 
 	/* configure clock event */
 	val = COUNT_CLOCK_EVENT | CLEAR_ON_ENABLE | CLEAR_ON_DUMP;
-	llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(j), val);
-
-	llcc_priv->configured_counters++;
+	llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(j++), val);
+	llcc_priv->configured_cntrs = j;
 	mutex_unlock(&llcc_priv->mutex);
 	return count;
 }
@@ -249,12 +268,13 @@ static ssize_t perfmon_remove_store(struct device *dev,
 {
 	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
 	struct event_port_ops *port_ops;
-	unsigned int j = 0, counter_remove = 0;
+	struct llcc_perfmon_counter_map *counter_map;
+	unsigned int j = 0, end_cntrs;
 	unsigned long port_sel, event_sel;
 	char *token, *delim = DELIM_CHAR;
 
 	mutex_lock(&llcc_priv->mutex);
-	if (!llcc_priv->configured_counters) {
+	if (!llcc_priv->configured_cntrs) {
 		pr_err("Counters not configured\n");
 		mutex_unlock(&llcc_priv->mutex);
 		return -EINVAL;
@@ -282,29 +302,32 @@ static ssize_t perfmon_remove_store(struct device *dev,
 			continue;
 		}
 
-		/* put dummy values */
-		llcc_priv->configured[j].port_sel = MAX_NUMBER_OF_PORTS;
-		llcc_priv->configured[j].event_sel = 100;
-		port_ops = llcc_priv->port_ops[port_sel];
-		pr_info("removed counter %d for event %ld from port %ld\n",
-				j, event_sel, port_sel);
+		/* Last perfmon counter for cycle counter */
+		end_cntrs = 1;
+		if (port_sel == EVENT_PORT_BEAC)
+			end_cntrs = llcc_priv->num_mc;
 
-		port_ops->event_config(llcc_priv, event_sel, j++, false);
-		if (llcc_priv->enables_port & (1 << port_sel))
-			if (port_ops->event_enable)
-				port_ops->event_enable(llcc_priv, false);
+		if (j == (llcc_priv->configured_cntrs - end_cntrs))
+			break;
+
+		/* put dummy values */
+		counter_map = &llcc_priv->configured[j];
+		counter_map->port_sel = MAX_NUMBER_OF_PORTS;
+		counter_map->event_sel = 0;
+		port_ops = llcc_priv->port_ops[port_sel];
+		port_ops->event_config(llcc_priv, event_sel, &j, false);
+		pr_info("removed counter %2d for event %2ld from port %2ld\n",
+				j++, event_sel, port_sel);
+		if ((llcc_priv->enables_port & (1 << port_sel)) &&
+				(port_ops->event_enable))
+			port_ops->event_enable(llcc_priv, false);
 
 		llcc_priv->enables_port &= ~(1 << port_sel);
-
-		/* Last perfmon counter for cycle counter */
-		if (counter_remove++ == (LLCC_PERFMON_COUNTER_MAX - 2))
-			break;
 	}
 
 	/* remove clock event */
 	llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(j), 0);
-
-	llcc_priv->configured_counters = 0;
+	llcc_priv->configured_cntrs = 0;
 	mutex_unlock(&llcc_priv->mutex);
 	return count;
 }
@@ -339,13 +362,12 @@ static ssize_t perfmon_filter_config_store(struct device *dev,
 	char *token, *delim = DELIM_CHAR;
 	enum filter_type filter = UNKNOWN;
 
-	if (llcc_priv->configured_counters) {
+	if (llcc_priv->configured_cntrs) {
 		pr_err("remove configured events and try\n");
 		return count;
 	}
 
 	mutex_lock(&llcc_priv->mutex);
-
 	token = strsep((char **)&buf, delim);
 	if (token != NULL)
 		filter = find_filter_type(token);
@@ -484,7 +506,7 @@ static ssize_t perfmon_start_store(struct device *dev,
 
 	mutex_lock(&llcc_priv->mutex);
 	if (start) {
-		if (!llcc_priv->configured_counters) {
+		if (!llcc_priv->configured_cntrs) {
 			pr_err("start failed. perfmon not configured\n");
 			mutex_unlock(&llcc_priv->mutex);
 			return -EINVAL;
@@ -505,14 +527,13 @@ static ssize_t perfmon_start_store(struct device *dev,
 		if (llcc_priv->expires)
 			hrtimer_cancel(&llcc_priv->hrtimer);
 
-		if (!llcc_priv->configured_counters)
+		if (!llcc_priv->configured_cntrs)
 			pr_err("stop failed. perfmon not configured\n");
 	}
 
 	mask_val = PERFMON_MODE_MONITOR_MODE_MASK |
 		PERFMON_MODE_MONITOR_EN_MASK;
 	llcc_bcast_modify(llcc_priv, PERFMON_MODE, val, mask_val);
-
 	mutex_unlock(&llcc_priv->mutex);
 	return count;
 }
@@ -555,7 +576,6 @@ static ssize_t perfmon_scid_status_show(struct device *dev,
 	for (i = 0; i < SCID_MAX; i++) {
 		total = 0;
 		offset = TRP_SCID_n_STATUS(i);
-
 		for (j = 0; j < llcc_priv->num_banks; j++) {
 			regmap_read(llcc_priv->llcc_map,
 					llcc_priv->bank_off[j] + offset, &val);
@@ -604,39 +624,49 @@ static struct attribute_group llcc_perfmon_group = {
 	.attrs	= llcc_perfmon_attrs,
 };
 
-static void perfmon_counter_config(struct llcc_perfmon_private *llcc_priv,
-		unsigned int port, unsigned int event_counter_num)
+static void perfmon_cntr_config(struct llcc_perfmon_private *llcc_priv,
+		unsigned int port, unsigned int counter_num, bool enable)
 {
-	uint32_t val;
+	uint32_t val = 0;
 
-	val = (port & PERFMON_PORT_SELECT_MASK) |
-		((event_counter_num << EVENT_SELECT_SHIFT) &
-		PERFMON_EVENT_SELECT_MASK) | CLEAR_ON_ENABLE | CLEAR_ON_DUMP;
-	llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(event_counter_num),
-			val);
+	if (counter_num >= MAX_CNTR)
+		return;
+
+	if (enable)
+		val = (port & PERFMON_PORT_SELECT_MASK) |
+			((counter_num << EVENT_SELECT_SHIFT) &
+			PERFMON_EVENT_SELECT_MASK) | CLEAR_ON_ENABLE |
+			CLEAR_ON_DUMP;
+
+	llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(counter_num), val);
 }
 
 static void feac_event_config(struct llcc_perfmon_private *llcc_priv,
-		unsigned int event_type, unsigned int event_counter_num,
+		unsigned int event_type, unsigned int *counter_num,
 		bool enable)
 {
-	uint32_t val = 0, mask_val, counter_num = 0;
+	uint32_t val = 0, mask_val;
 
 	mask_val = EVENT_SEL_MASK;
+	if (llcc_priv->version == REV_2)
+		mask_val = EVENT_SEL_MASK7;
+
 	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEAC))
 		mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;
 
 	if (enable) {
 		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
+		if (llcc_priv->version == REV_2)
+			val = (event_type << EVENT_SEL_SHIFT) &
+					EVENT_SEL_MASK7;
+
 		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEAC))
 			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
-
-		counter_num = event_counter_num;
 	}
 
-	llcc_bcast_modify(llcc_priv, FEAC_PROF_EVENT_n_CFG(event_counter_num),
+	llcc_bcast_modify(llcc_priv, FEAC_PROF_EVENT_n_CFG(*counter_num),
 			val, mask_val);
-	perfmon_counter_config(llcc_priv, EVENT_PORT_FEAC, counter_num);
+	perfmon_cntr_config(llcc_priv, EVENT_PORT_FEAC, *counter_num, enable);
 }
 
 static void feac_event_enable(struct llcc_perfmon_private *llcc_priv,
@@ -748,10 +778,10 @@ static struct event_port_ops feac_port_ops = {
 };
 
 static void ferc_event_config(struct llcc_perfmon_private *llcc_priv,
-		unsigned int event_type, unsigned int event_counter_num,
+		unsigned int event_type, unsigned int *counter_num,
 		bool enable)
 {
-	uint32_t val = 0, mask_val, counter_num = 0;
+	uint32_t val = 0, mask_val;
 
 	mask_val = EVENT_SEL_MASK;
 	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FERC))
@@ -762,12 +792,11 @@ static void ferc_event_config(struct llcc_perfmon_private *llcc_priv,
 		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FERC))
 			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
 
-		counter_num = event_counter_num;
 	}
 
-	llcc_bcast_modify(llcc_priv, FERC_PROF_EVENT_n_CFG(event_counter_num),
+	llcc_bcast_modify(llcc_priv, FERC_PROF_EVENT_n_CFG(*counter_num),
 			val, mask_val);
-	perfmon_counter_config(llcc_priv, EVENT_PORT_FERC, counter_num);
+	perfmon_cntr_config(llcc_priv, EVENT_PORT_FERC, *counter_num, enable);
 }
 
 static void ferc_event_enable(struct llcc_perfmon_private *llcc_priv,
@@ -810,10 +839,10 @@ static struct event_port_ops ferc_port_ops = {
 };
 
 static void fewc_event_config(struct llcc_perfmon_private *llcc_priv,
-		unsigned int event_type, unsigned int event_counter_num,
+		unsigned int event_type, unsigned int *counter_num,
 		bool enable)
 {
-	uint32_t val = 0, mask_val, counter_num = 0;
+	uint32_t val = 0, mask_val;
 
 	mask_val = EVENT_SEL_MASK;
 	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEWC))
@@ -824,12 +853,11 @@ static void fewc_event_config(struct llcc_perfmon_private *llcc_priv,
 		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEWC))
 			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
 
-		counter_num = event_counter_num;
 	}
 
-	llcc_bcast_modify(llcc_priv, FEWC_PROF_EVENT_n_CFG(event_counter_num),
+	llcc_bcast_modify(llcc_priv, FEWC_PROF_EVENT_n_CFG(*counter_num),
 			val, mask_val);
-	perfmon_counter_config(llcc_priv, EVENT_PORT_FEWC, counter_num);
+	perfmon_cntr_config(llcc_priv, EVENT_PORT_FEWC, *counter_num, enable);
 }
 
 static void fewc_event_filter_config(struct llcc_perfmon_private *llcc_priv,
@@ -857,17 +885,18 @@ static struct event_port_ops fewc_port_ops = {
 };
 
 static void beac_event_config(struct llcc_perfmon_private *llcc_priv,
-		unsigned int event_type, unsigned int event_counter_num,
+		unsigned int event_type, unsigned int *counter_num,
 		bool enable)
 {
-	uint32_t val = 0, mask_val, counter_num = 0;
+	uint32_t val = 0, mask_val;
 	uint32_t valcfg = 0, mask_valcfg;
 	unsigned int mc_cnt, offset;
+	struct llcc_perfmon_counter_map *counter_map;
 
 	mask_val = EVENT_SEL_MASK;
 	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BEAC)) {
 		mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;
-		if (llcc_priv->version == REV_0)
+		if (llcc_priv->version == REV_2)
 			mask_valcfg = BEAC_WR_BEAT_FILTER_SEL_MASK |
 				BEAC_WR_BEAT_FILTER_EN_MASK |
 				BEAC_RD_BEAT_FILTER_SEL_MASK |
@@ -877,9 +906,8 @@ static void beac_event_config(struct llcc_perfmon_private *llcc_priv,
 	if (enable) {
 		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
 		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BEAC)) {
-			val |= (FILTER_0 << FILTER_SEL_SHIFT) |
-				FILTER_EN;
-			if (llcc_priv->version == REV_0)
+			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
+			if (llcc_priv->version == REV_2)
 				valcfg = (FILTER_0 <<
 					BEAC_WR_BEAT_FILTER_SEL_SHIFT) |
 					BEAC_WR_BEAT_FILTER_EN |
@@ -887,20 +915,35 @@ static void beac_event_config(struct llcc_perfmon_private *llcc_priv,
 					BEAC_RD_BEAT_FILTER_SEL_SHIFT) |
 					BEAC_RD_BEAT_FILTER_EN;
 		}
-
-		counter_num = event_counter_num;
 	}
 
 	for (mc_cnt = 0; mc_cnt < llcc_priv->num_mc; mc_cnt++) {
-		offset = BEAC_PROF_EVENT_n_CFG(event_counter_num) +
+		offset = BEAC_PROF_EVENT_n_CFG(*counter_num + mc_cnt) +
 			mc_cnt * BEAC_INST_OFF;
 		llcc_bcast_modify(llcc_priv, offset, val, mask_val);
 
 		offset = BEAC_PROF_CFG + mc_cnt * BEAC_INST_OFF;
 		llcc_bcast_modify(llcc_priv, offset, valcfg, mask_valcfg);
+
+		perfmon_cntr_config(llcc_priv, EVENT_PORT_BEAC, *counter_num,
+				enable);
+		/* DBX uses 2 counters for BEAC 0 & 1 */
+		if (mc_cnt == 1)
+			perfmon_cntr_config(llcc_priv, EVENT_PORT_BEAC1,
+					*counter_num + mc_cnt, enable);
 	}
 
-	perfmon_counter_config(llcc_priv, EVENT_PORT_BEAC, counter_num);
+	/* DBX uses 2 counters for BEAC 0 & 1 */
+	if (llcc_priv->num_mc > 1) {
+		counter_map = &llcc_priv->configured[(*counter_num)++];
+		if (enable) {
+			counter_map->port_sel = EVENT_PORT_BEAC;
+			counter_map->event_sel = event_type;
+		} else {
+			counter_map->port_sel = MAX_NUMBER_OF_PORTS;
+			counter_map->event_sel = 100;
+		}
+	}
 }
 
 static void beac_event_enable(struct llcc_perfmon_private *llcc_priv,
@@ -961,10 +1004,10 @@ static struct event_port_ops beac_port_ops = {
 };
 
 static void berc_event_config(struct llcc_perfmon_private *llcc_priv,
-		unsigned int event_type, unsigned int event_counter_num,
+		unsigned int event_type, unsigned int *counter_num,
 		bool enable)
 {
-	uint32_t val = 0, mask_val, counter_num = 0;
+	uint32_t val = 0, mask_val;
 
 	mask_val = EVENT_SEL_MASK;
 	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BERC))
@@ -974,13 +1017,11 @@ static void berc_event_config(struct llcc_perfmon_private *llcc_priv,
 		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
 		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BERC))
 			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
-
-		counter_num = event_counter_num;
 	}
 
-	llcc_bcast_modify(llcc_priv, BERC_PROF_EVENT_n_CFG(event_counter_num),
+	llcc_bcast_modify(llcc_priv, BERC_PROF_EVENT_n_CFG(*counter_num),
 			val, mask_val);
-	perfmon_counter_config(llcc_priv, EVENT_PORT_BERC, counter_num);
+	perfmon_cntr_config(llcc_priv, EVENT_PORT_BERC, *counter_num, enable);
 }
 
 static void berc_event_enable(struct llcc_perfmon_private *llcc_priv,
@@ -1023,26 +1064,31 @@ static struct event_port_ops berc_port_ops = {
 };
 
 static void trp_event_config(struct llcc_perfmon_private *llcc_priv,
-		unsigned int event_type, unsigned int event_counter_num,
+		unsigned int event_type, unsigned int *counter_num,
 		bool enable)
 {
-	uint32_t val = 0, mask_val, counter_num = 0;
+	uint32_t val = 0, mask_val;
 
 	mask_val = EVENT_SEL_MASK;
+	if (llcc_priv->version == REV_2)
+		mask_val = EVENT_SEL_MASK7;
+
 	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_TRP))
 		mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;
 
 	if (enable) {
 		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
+		if (llcc_priv->version == REV_2)
+			val = (event_type << EVENT_SEL_SHIFT) &
+					EVENT_SEL_MASK7;
+
 		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_TRP))
 			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
-
-		counter_num = event_counter_num;
 	}
 
-	llcc_bcast_modify(llcc_priv, TRP_PROF_EVENT_n_CFG(event_counter_num),
+	llcc_bcast_modify(llcc_priv, TRP_PROF_EVENT_n_CFG(*counter_num),
 			val, mask_val);
-	perfmon_counter_config(llcc_priv, EVENT_PORT_TRP, counter_num);
+	perfmon_cntr_config(llcc_priv, EVENT_PORT_TRP, *counter_num, enable);
 }
 
 static void trp_event_filter_config(struct llcc_perfmon_private *llcc_priv,
@@ -1052,11 +1098,18 @@ static void trp_event_filter_config(struct llcc_perfmon_private *llcc_priv,
 	uint32_t val = 0, mask_val;
 
 	if (filter == SCID) {
-		if (enable)
-			val = (match << TRP_SCID_MATCH_SHIFT) |
-				(mask << TRP_SCID_MASK_SHIFT);
+		if (llcc_priv->version == REV_2) {
+			if (enable)
+				val = (1 << match);
 
-		mask_val = TRP_SCID_MATCH_MASK | TRP_SCID_MASK_MASK;
+			mask_val = SCID_MULTI_MATCH_MASK;
+		} else {
+			if (enable)
+				val = (match << TRP_SCID_MATCH_SHIFT) |
+					(mask << TRP_SCID_MASK_SHIFT);
+
+			mask_val = TRP_SCID_MATCH_MASK | TRP_SCID_MASK_MASK;
+		}
 	} else if (filter == WAY_ID) {
 		if (enable)
 			val = (match << TRP_WAY_ID_MATCH_SHIFT) |
@@ -1074,7 +1127,12 @@ static void trp_event_filter_config(struct llcc_perfmon_private *llcc_priv,
 		return;
 	}
 
-	llcc_bcast_modify(llcc_priv, TRP_PROF_FILTER_0_CFG1, val, mask_val);
+	if ((llcc_priv->version == REV_2) && (filter == SCID))
+		llcc_bcast_modify(llcc_priv, TRP_PROF_FILTER_0_CFG2, val,
+				mask_val);
+	else
+		llcc_bcast_modify(llcc_priv, TRP_PROF_FILTER_0_CFG1, val,
+				mask_val);
 }
 
 static struct event_port_ops  trp_port_ops = {
@@ -1083,26 +1141,31 @@ static struct event_port_ops  trp_port_ops = {
 };
 
 static void drp_event_config(struct llcc_perfmon_private *llcc_priv,
-		unsigned int event_type, unsigned int event_counter_num,
+		unsigned int event_type, unsigned int *counter_num,
 		bool enable)
 {
-	uint32_t val = 0, mask_val, counter_num = 0;
+	uint32_t val = 0, mask_val;
 
 	mask_val = EVENT_SEL_MASK;
+	if (llcc_priv->version == REV_2)
+		mask_val = EVENT_SEL_MASK7;
+
 	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_DRP))
 		mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;
 
 	if (enable) {
 		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
+		if (llcc_priv->version == REV_2)
+			val = (event_type << EVENT_SEL_SHIFT) &
+					EVENT_SEL_MASK7;
+
 		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_DRP))
 			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
-
-		counter_num = event_counter_num;
 	}
 
-	llcc_bcast_modify(llcc_priv, DRP_PROF_EVENT_n_CFG(event_counter_num),
+	llcc_bcast_modify(llcc_priv, DRP_PROF_EVENT_n_CFG(*counter_num),
 			val, mask_val);
-	perfmon_counter_config(llcc_priv, EVENT_PORT_DRP, counter_num);
+	perfmon_cntr_config(llcc_priv, EVENT_PORT_DRP, *counter_num, enable);
 }
 
 static void drp_event_enable(struct llcc_perfmon_private *llcc_priv,
@@ -1123,10 +1186,10 @@ static struct event_port_ops drp_port_ops = {
 };
 
 static void pmgr_event_config(struct llcc_perfmon_private *llcc_priv,
-		unsigned int event_type, unsigned int event_counter_num,
+		unsigned int event_type, unsigned int *counter_num,
 		bool enable)
 {
-	uint32_t val = 0, mask_val, counter_num = 0;
+	uint32_t val = 0, mask_val;
 
 	mask_val = EVENT_SEL_MASK;
 	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_PMGR))
@@ -1136,13 +1199,11 @@ static void pmgr_event_config(struct llcc_perfmon_private *llcc_priv,
 		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
 		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_PMGR))
 			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
-
-		counter_num = event_counter_num;
 	}
 
-	llcc_bcast_modify(llcc_priv, PMGR_PROF_EVENT_n_CFG(event_counter_num),
+	llcc_bcast_modify(llcc_priv, PMGR_PROF_EVENT_n_CFG(*counter_num),
 			val, mask_val);
-	perfmon_counter_config(llcc_priv, EVENT_PORT_PMGR, counter_num);
+	perfmon_cntr_config(llcc_priv, EVENT_PORT_PMGR, *counter_num, enable);
 }
 
 static struct event_port_ops pmgr_port_ops = {
@@ -1191,17 +1252,22 @@ static int llcc_perfmon_probe(struct platform_device *pdev)
 
 	llcc_priv->llcc_map = llcc_driv_data->regmap;
 	llcc_priv->llcc_bcast_map = llcc_driv_data->bcast_regmap;
-
 	llcc_bcast_read(llcc_priv, LLCC_COMMON_STATUS0, &val);
 	llcc_priv->num_mc = (val & NUM_MC_MASK) >> NUM_MC_SHIFT;
+	/* Setting to 1, as some platforms it read as 0 */
+	if (llcc_priv->num_mc == 0)
+		llcc_priv->num_mc = 1;
+
 	llcc_priv->num_banks = (val & LB_CNT_MASK) >> LB_CNT_SHIFT;
 	for (val = 0; val < llcc_priv->num_banks; val++)
 		llcc_priv->bank_off[val] = BANK_OFFSET * val;
 
 	llcc_priv->version = REV_0;
 	llcc_bcast_read(llcc_priv, LLCC_COMMON_HW_INFO, &val);
-	if (val >= LLCC_VERSION)
+	if (val == LLCC_VERSION_1)
 		llcc_priv->version = REV_1;
+	else if (val == LLCC_VERSION_2)
+		llcc_priv->version = REV_2;
 
 	result = sysfs_create_group(&pdev->dev.kobj, &llcc_perfmon_group);
 	if (result) {
@@ -1222,6 +1288,8 @@ static int llcc_perfmon_probe(struct platform_device *pdev)
 	hrtimer_init(&llcc_priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	llcc_priv->hrtimer.function = llcc_perfmon_timer_handler;
 	llcc_priv->expires = 0;
+	pr_info("Revision %d, has %d memory controllers connected with LLCC\n",
+			llcc_priv->version, llcc_priv->num_mc);
 	return 0;
 }
 
diff --git a/drivers/soc/qcom/llcc_perfmon.h b/drivers/soc/qcom/llcc_perfmon.h
index 816e4c7..4a3accd 100644
--- a/drivers/soc/qcom/llcc_perfmon.h
+++ b/drivers/soc/qcom/llcc_perfmon.h
@@ -12,49 +12,50 @@
 #define FEAC_PROF_FILTER_0_CFG3		(0x03700C)
 #define FEAC_PROF_FILTER_0_CFG5		(0x037014)
 #define FEAC_PROF_FILTER_0_CFG6		(0x037018)
-#define FEAC_PROF_EVENT_n_CFG(n)	(0x037060 + 4 * n)
+#define FEAC_PROF_EVENT_n_CFG(n)	(0x037060 + 4 * (n))
 #define FEAC_PROF_CFG			(0x0370A0)
 
 /* FERC */
 #define FERC_PROF_FILTER_0_CFG0		(0x03B000)
-#define FERC_PROF_EVENT_n_CFG(n)	(0x03B020 + 4 * n)
+#define FERC_PROF_EVENT_n_CFG(n)	(0x03B020 + 4 * (n))
 #define FERC_PROF_CFG			(0x03B060)
 
 /* FEWC */
 #define FEWC_PROF_FILTER_0_CFG0		(0x033000)
-#define FEWC_PROF_EVENT_n_CFG(n)	(0x033020 + 4 * n)
+#define FEWC_PROF_EVENT_n_CFG(n)	(0x033020 + 4 * (n))
 
 /* BEAC */
 #define BEAC_PROF_FILTER_0_CFG5		(0x049014)
-#define BEAC_PROF_EVENT_n_CFG(n)	(0x049040 + 4 * n)
+#define BEAC_PROF_EVENT_n_CFG(n)	(0x049040 + 4 * (n))
 #define BEAC_PROF_CFG			(0x049080)
 #define BEAC_INST_OFF			(0x4000)
 
 /* BERC */
 #define BERC_PROF_FILTER_0_CFG0		(0x039000)
-#define BERC_PROF_EVENT_n_CFG(n)	(0x039020 + 4 * n)
+#define BERC_PROF_EVENT_n_CFG(n)	(0x039020 + 4 * (n))
 #define BERC_PROF_CFG			(0x039060)
 
 /* TRP */
 #define TRP_PROF_FILTER_0_CFG1		(0x024004)
-#define TRP_PROF_EVENT_n_CFG(n)		(0x024020 + 4 * n)
-#define TRP_SCID_n_STATUS(n)		(0x000004 + 0x1000 * n)
+#define TRP_PROF_FILTER_0_CFG2		(0x024008)
+#define TRP_PROF_EVENT_n_CFG(n)		(0x024020 + 4 * (n))
+#define TRP_SCID_n_STATUS(n)		(0x000004 + 0x1000 * (n))
 
 /* DRP */
-#define DRP_PROF_EVENT_n_CFG(n)		(0x044010 + 4 * n)
+#define DRP_PROF_EVENT_n_CFG(n)		(0x044010 + 4 * (n))
 #define DRP_PROF_CFG			(0x044050)
 
 /* PMGR */
-#define PMGR_PROF_EVENT_n_CFG(n)	(0x03F000 + 4 * n)
+#define PMGR_PROF_EVENT_n_CFG(n)	(0x03F000 + 4 * (n))
 
-#define PERFMON_COUNTER_n_CONFIG(n)	(0x031020 + 4 * n)
+#define PERFMON_COUNTER_n_CONFIG(n)	(0x031020 + 4 * (n))
 #define PERFMON_MODE			(0x03100C)
 #define PERFMON_DUMP			(0x031010)
-#define BROADCAST_COUNTER_n_VALUE(n)	(0x031060 + 4 * n)
+#define BROADCAST_COUNTER_n_VALUE(n)	(0x031060 + 4 * (n))
 
-#define LLCC_COUNTER_n_VALUE(n)		(0x031060 + 4 * n)
+#define LLCC_COUNTER_n_VALUE(n)		(0x031060 + 4 * (n))
 
-#define EVENT_NUM_MAX			(64)
+#define EVENT_NUM_MAX			(128)
 #define SCID_MAX			(32)
 
 /* Perfmon */
@@ -122,6 +123,8 @@
 #define EVENT_SEL_SHIFT			(0)
 #define EVENT_SEL_MASK			GENMASK(EVENT_SEL_SHIFT + 5,\
 						EVENT_SEL_SHIFT)
+#define EVENT_SEL_MASK7			GENMASK(EVENT_SEL_SHIFT + 6,\
+						EVENT_SEL_SHIFT)
 
 #define CACHEALLOC_MASK_SHIFT		(16)
 #define CACHEALLOC_MASK_MASK		GENMASK(CACHEALLOC_MASK_SHIFT + 3, \
@@ -263,8 +266,10 @@
 					+ 13, \
 					TRP_SCID_STATUS_CURRENT_CAP_SHIFT)
 
-#define LLCC_VERSION			(0x01010100)
+#define LLCC_VERSION_1			(0x01010200)
+#define LLCC_VERSION_2			(0x02000000)
 #define REV_0				(0x0)
 #define REV_1				(0x1)
+#define REV_2				(0x2)
 #define BANK_OFFSET			(0x80000)
 #endif /* _SOC_QCOM_LLCC_PERFMON_H_ */
diff --git a/drivers/soc/qcom/mem-offline.c b/drivers/soc/qcom/mem-offline.c
index a78ce19..4b0785a 100644
--- a/drivers/soc/qcom/mem-offline.c
+++ b/drivers/soc/qcom/mem-offline.c
@@ -16,6 +16,8 @@
 #include <linux/of.h>
 #include <linux/mailbox_client.h>
 #include <linux/mailbox/qmp.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
 
 #define AOP_MSG_ADDR_MASK		0xffffffff
 #define AOP_MSG_ADDR_HIGH_SHIFT		32
@@ -50,6 +52,43 @@ static struct mem_offline_mailbox {
 
 static struct section_stat *mem_info;
 
+static void clear_pgtable_mapping(phys_addr_t start, phys_addr_t end)
+{
+	unsigned long size = end - start;
+	unsigned long virt = (unsigned long)phys_to_virt(start);
+	unsigned long addr_end = virt + size;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+
+	pgd = pgd_offset_k(virt);
+
+	while (virt < addr_end) {
+
+		/* Check if we have PUD section mapping */
+		pud = pud_offset(pgd, virt);
+		if (pud_sect(*pud)) {
+			pud_clear(pud);
+			virt += PUD_SIZE;
+			continue;
+		}
+
+		/* Check if we have PMD section mapping */
+		pmd = pmd_offset(pud, virt);
+		if (pmd_sect(*pmd)) {
+			pmd_clear(pmd);
+			virt += PMD_SIZE;
+			continue;
+		}
+
+		/* Clear mapping for page entry */
+		set_memory_valid(virt, 1, (int)false);
+		virt += PAGE_SIZE;
+	}
+
+	virt = (unsigned long)phys_to_virt(start);
+	flush_tlb_kernel_range(virt, addr_end);
+}
 void record_stat(unsigned long sec, ktime_t delay, int mode)
 {
 	unsigned int total_sec = end_section_nr - start_section_nr + 1;
@@ -137,13 +176,18 @@ static int mem_event_callback(struct notifier_block *self,
 		if (aop_send_msg(__pfn_to_phys(start), true))
 			pr_err("PASR: AOP online request addr:0x%llx failed\n",
 			       __pfn_to_phys(start));
+		if (!debug_pagealloc_enabled()) {
+			/* Create kernel page-tables */
+			create_pgtable_mapping(start_addr, end_addr);
+		}
 
 		break;
 	case MEM_ONLINE:
-		pr_info("mem-offline: Onlined memory block mem%lu\n", sec_nr);
 		delay = ktime_ms_delta(ktime_get(), cur);
 		record_stat(sec_nr, delay, MEMORY_ONLINE);
 		cur = 0;
+		pr_info("mem-offline: Onlined memory block mem%pK\n",
+			(void *)sec_nr);
 		break;
 	case MEM_GOING_OFFLINE:
 		pr_debug("mem-offline: MEM_GOING_OFFLINE : start = 0x%llx end = 0x%llx\n",
@@ -153,8 +197,10 @@ static int mem_event_callback(struct notifier_block *self,
 		cur = ktime_get();
 		break;
 	case MEM_OFFLINE:
-		pr_info("mem-offline: Offlined memory block mem%lu\n", sec_nr);
-
+		if (!debug_pagealloc_enabled()) {
+			/* Clear kernel page-tables */
+			clear_pgtable_mapping(start_addr, end_addr);
+		}
 		if (aop_send_msg(__pfn_to_phys(start), false))
 			pr_err("PASR: AOP offline request addr:0x%llx failed\n",
 			       __pfn_to_phys(start));
@@ -162,10 +208,15 @@ static int mem_event_callback(struct notifier_block *self,
 		delay = ktime_ms_delta(ktime_get(), cur);
 		record_stat(sec_nr, delay, MEMORY_OFFLINE);
 		cur = 0;
+		pr_info("mem-offline: Offlined memory block mem%pK\n",
+			(void *)sec_nr);
 		break;
 	case MEM_CANCEL_ONLINE:
 		pr_info("mem-offline: MEM_CANCEL_ONLINE: start = 0x%llx end = 0x%llx\n",
 				start_addr, end_addr);
+		if (aop_send_msg(__pfn_to_phys(start), false))
+			pr_err("PASR: AOP online request addr:0x%llx failed\n",
+			       __pfn_to_phys(start));
 		break;
 	default:
 		break;
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index 7cc7dfb..459ebea 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -111,8 +111,8 @@ static int msm_dump_data_add_minidump(struct msm_dump_entry *entry)
 	return msm_minidump_add_region(&md_entry);
 }
 
-int msm_dump_data_register(enum msm_dump_table_ids id,
-			   struct msm_dump_entry *entry)
+static int register_dump_table_entry(enum msm_dump_table_ids id,
+					struct msm_dump_entry *entry)
 {
 	struct msm_dump_entry *e;
 	struct msm_dump_table *table;
@@ -131,13 +131,42 @@ int msm_dump_data_register(enum msm_dump_table_ids id,
 	table->num_entries++;
 
 	dmac_flush_range(table, (void *)table + sizeof(struct msm_dump_table));
-	if (msm_dump_data_add_minidump(entry))
-		pr_err("Failed to add entry in Minidump table\n");
-
 	return 0;
 }
+
+/**
+ * msm_dump_data_register - register to dump data and minidump framework
+ * @id: ID of the dump table.
+ * @entry: dump entry to be registered
+ * This api will register the entry passed to dump table and minidump table
+ */
+int msm_dump_data_register(enum msm_dump_table_ids id,
+			   struct msm_dump_entry *entry)
+{
+	int ret;
+
+	ret = register_dump_table_entry(id, entry);
+	if (!ret)
+		if (msm_dump_data_add_minidump(entry))
+			pr_err("Failed to add entry in Minidump table\n");
+
+	return ret;
+}
 EXPORT_SYMBOL(msm_dump_data_register);
 
+/**
+ * msm_dump_data_register_nominidump - register to dump data framework
+ * @id: ID of the dump table.
+ * @entry: dump entry to be registered
+ * This api will register the entry passed to dump table only
+ */
+int msm_dump_data_register_nominidump(enum msm_dump_table_ids id,
+			   struct msm_dump_entry *entry)
+{
+	return register_dump_table_entry(id, entry);
+}
+EXPORT_SYMBOL(msm_dump_data_register_nominidump);
+
 static int __init init_memory_dump(void)
 {
 	struct msm_dump_table *table;
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
index cba9a42..801be431 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -59,6 +59,8 @@ static void copy_remaining_nodes(struct list_head *edge_list, struct list_head
 		return;
 
 	search_node = kzalloc(sizeof(struct bus_search_type), GFP_KERNEL);
+	if (!search_node)
+		return;
 	INIT_LIST_HEAD(&search_node->node_list);
 	list_splice_init(edge_list, traverse_list);
 	list_splice_init(traverse_list, &search_node->node_list);
@@ -450,6 +452,8 @@ static int getpath(struct device *src_dev, int dest, const char *cl_name)
 		/* Keep tabs of the previous search list */
 		search_node = kzalloc(sizeof(struct bus_search_type),
 				 GFP_KERNEL);
+		if (!search_node)
+			return -ENOMEM;
 		INIT_LIST_HEAD(&search_node->node_list);
 		list_splice_init(&traverse_list,
 				 &search_node->node_list);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index cbd5ee9..d0afd29 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -571,7 +571,8 @@ int msm_bus_commit_data(struct list_head *clist)
 
 	list_for_each_entry_safe(node, node_tmp, clist, link) {
 		bcm_clist_add(node);
-		msm_bus_dev_sbm_config(&node->dev, false);
+		if (!init_time)
+			msm_bus_dev_sbm_config(&node->dev, false);
 	}
 
 	if (!cur_rsc) {
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
index 2d1e465..65089ad 100644
--- a/drivers/soc/qcom/qdss_bridge.c
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -24,9 +24,6 @@
 #define MODULE_NAME "qdss_bridge"
 #define INIT_STATUS -1
 
-/* Max number of objects needed */
-static int poolsize = 32;
-
 static struct class *mhi_class;
 static enum mhi_dev_state dev_state = INIT_STATUS;
 static enum mhi_ch curr_chan;
@@ -99,8 +96,12 @@ static int qdss_create_buf_tbl(struct qdss_bridge_drvdata *drvdata)
 	void *buf;
 	struct qdss_request *usb_req;
 	int i;
+	struct mhi_device *mhi_dev = drvdata->mhi_dev;
 
-	for (i = 0; i < poolsize; i++) {
+	drvdata->nr_trbs = mhi_get_no_free_descriptors(mhi_dev,
+							DMA_FROM_DEVICE);
+
+	for (i = 0; i < drvdata->nr_trbs; i++) {
 		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 		if (!entry)
 			goto err;
@@ -454,7 +455,7 @@ static void usb_notifier(void *priv, unsigned int event,
 
 	switch (event) {
 	case USB_QDSS_CONNECT:
-		usb_qdss_alloc_req(ch, poolsize, 0);
+		usb_qdss_alloc_req(ch, drvdata->nr_trbs, 0);
 		mhi_queue_read(drvdata);
 		break;
 
@@ -708,12 +709,11 @@ static ssize_t mhi_uci_read(struct file *file,
 static int mhi_queue_inbound(struct qdss_bridge_drvdata *drvdata)
 {
 	struct mhi_device *mhi_dev = drvdata->mhi_dev;
-	int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
 	void *buf;
 	struct qdss_mhi_buf_tbl_t *entry;
 	int ret = -EIO, i;
 
-	for (i = 0; i < nr_trbs; i++) {
+	for (i = 0; i < drvdata->nr_trbs; i++) {
 		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 		if (!entry)
 			goto err;
@@ -879,7 +879,7 @@ static int qdss_mhi_probe(struct mhi_device *mhi_dev,
 			return -EINVAL;
 		if (!strcmp(id->chan, "QDSS"))
 			curr_chan = QDSS;
-		if (!strcmp(id->chan, "QDSS_HW"))
+		if (!strcmp(id->chan, "IP_HW_QDSS"))
 			curr_chan = QDSS_HW;
 	} else if (dev_state == MHI_STATE_RESET) {
 		if (strcmp(id->chan, str_mhi_curr_chan[curr_chan]))
@@ -891,7 +891,7 @@ static int qdss_mhi_probe(struct mhi_device *mhi_dev,
 		}
 		if (!strcmp(id->chan, "QDSS"))
 			curr_chan = QDSS;
-		if (!strcmp(id->chan, "QDSS_HW"))
+		if (!strcmp(id->chan, "IP_HW_QDSS"))
 			curr_chan = QDSS_HW;
 	}
 
@@ -962,8 +962,8 @@ static int qdss_mhi_probe(struct mhi_device *mhi_dev,
 }
 
 static const struct mhi_device_id qdss_mhi_match_table[] = {
-	{ .chan = "QDSS", .driver_data = 0x4000 },
-	{ .chan = "IP_HW_QDSS", .driver_data = 0x4000 },
+	{ .chan = "QDSS", .driver_data = 0x8000 },
+	{ .chan = "IP_HW_QDSS", .driver_data = 0x8000 },
 	{},
 };
 
diff --git a/drivers/soc/qcom/qdss_bridge.h b/drivers/soc/qcom/qdss_bridge.h
index 81f096f..3a79eb2 100644
--- a/drivers/soc/qcom/qdss_bridge.h
+++ b/drivers/soc/qcom/qdss_bridge.h
@@ -38,6 +38,7 @@ enum mhi_ch {
 
 struct qdss_bridge_drvdata {
 	int alias;
+	int nr_trbs;
 	enum open_status opened;
 	struct completion completion;
 	size_t mtu;
diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c
index 047de99..8560882 100644
--- a/drivers/soc/qcom/qmi_rmnet.c
+++ b/drivers/soc/qcom/qmi_rmnet.c
@@ -23,7 +23,12 @@
 
 #define FLAG_DFC_MASK 0x000F
 #define FLAG_POWERSAVE_MASK 0x0010
-#define DFC_MODE_MULTIQ 2
+#define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
+#define DFC_SUPPORTED_MODE(m) \
+	((m) == DFC_MODE_FLOW_ID || (m) == DFC_MODE_MQ_NUM)
+
+int dfc_mode;
+#define IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)
 
 unsigned int rmnet_wq_frequency __read_mostly = 1000;
 
@@ -36,6 +41,10 @@ unsigned int rmnet_wq_frequency __read_mostly = 1000;
 static unsigned int qmi_rmnet_scale_factor = 5;
 #endif
 
+static int
+qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
+		   struct qmi_info *qmi);
+
 struct qmi_elem_info data_ep_id_type_v01_ei[] = {
 	{
 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
@@ -73,7 +82,7 @@ void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
 {
 	int i;
 
-	if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ))
+	if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)))
 		return NULL;
 
 	for (i = 0; i < MAX_CLIENT_NUM; i++) {
@@ -128,6 +137,8 @@ qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev,
 		list_del(&bearer->list);
 		kfree(bearer);
 	}
+
+	memset(qos->mq, 0, sizeof(qos->mq));
 }
 
 struct rmnet_flow_map *
@@ -166,17 +177,17 @@ static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
 	itm->bearer_id = new_map->bearer_id;
 	itm->flow_id = new_map->flow_id;
 	itm->ip_type = new_map->ip_type;
-	itm->tcm_handle = new_map->tcm_handle;
+	itm->mq_idx = new_map->mq_idx;
 }
 
-int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable)
+int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable)
 {
 	struct netdev_queue *q;
 
-	if (unlikely(tcm_handle >= dev->num_tx_queues))
+	if (unlikely(mq_idx >= dev->num_tx_queues))
 		return 0;
 
-	q = netdev_get_tx_queue(dev, tcm_handle);
+	q = netdev_get_tx_queue(dev, mq_idx);
 	if (unlikely(!q))
 		return 0;
 
@@ -188,74 +199,113 @@ int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable)
 	return 0;
 }
 
+static void qmi_rmnet_reset_txq(struct net_device *dev, unsigned int txq)
+{
+	struct Qdisc *qdisc;
+
+	if (unlikely(txq >= dev->num_tx_queues))
+		return;
+
+	qdisc = rtnl_dereference(netdev_get_tx_queue(dev, txq)->qdisc);
+	if (qdisc) {
+		spin_lock_bh(qdisc_lock(qdisc));
+		qdisc_reset(qdisc);
+		spin_unlock_bh(qdisc_lock(qdisc));
+	}
+}
+
 static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
 			      struct qmi_info *qmi)
 {
 	struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
 	struct rmnet_flow_map new_map, *itm;
 	struct rmnet_bearer_map *bearer;
+	struct tcmsg tmp_tcm;
+	struct mq_map *mq;
+	u32 mq_idx;
 
-	if (!qos_info)
+	if (!qos_info || !tcm || tcm->tcm_handle >= MAX_MQ_NUM)
 		return -EINVAL;
 
 	ASSERT_RTNL();
 
 	/* flow activate
 	 * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id,
-	 * tcm->tcm_ifindex - ip_type, tcm->tcm_handle - tcm_handle
+	 * tcm->tcm_ifindex - ip_type, tcm->tcm_handle - mq_idx
 	 */
 
 	new_map.bearer_id = tcm->tcm__pad1;
 	new_map.flow_id = tcm->tcm_parent;
 	new_map.ip_type = tcm->tcm_ifindex;
-	new_map.tcm_handle = tcm->tcm_handle;
+	new_map.mq_idx = tcm->tcm_handle;
 	trace_dfc_flow_info(dev->name, new_map.bearer_id, new_map.flow_id,
-			    new_map.ip_type, new_map.tcm_handle, 1);
+			    new_map.ip_type, new_map.mq_idx, 1);
 
+again:
 	spin_lock_bh(&qos_info->qos_lock);
 
 	itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
 				     new_map.ip_type);
 	if (itm) {
-		qmi_rmnet_update_flow_map(itm, &new_map);
+		pr_debug("%s: stale flow found\n", __func__);
+		tmp_tcm.tcm__pad1 = itm->bearer_id;
+		tmp_tcm.tcm_parent = itm->flow_id;
+		tmp_tcm.tcm_ifindex = itm->ip_type;
+		tmp_tcm.tcm_handle = itm->mq_idx;
+		spin_unlock_bh(&qos_info->qos_lock);
+		qmi_rmnet_del_flow(dev, &tmp_tcm, qmi);
+		goto again;
+	}
+
+	/* Create flow map */
+	itm = kzalloc(sizeof(*itm), GFP_ATOMIC);
+	if (!itm) {
+		spin_unlock_bh(&qos_info->qos_lock);
+		return -ENOMEM;
+	}
+
+	qmi_rmnet_update_flow_map(itm, &new_map);
+	list_add(&itm->list, &qos_info->flow_head);
+
+	/* Create or update bearer map */
+	bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
+	if (bearer) {
+		bearer->flow_ref++;
 	} else {
-		itm = kzalloc(sizeof(*itm), GFP_ATOMIC);
-		if (!itm) {
+		bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
+		if (!bearer) {
 			spin_unlock_bh(&qos_info->qos_lock);
 			return -ENOMEM;
 		}
 
-		qmi_rmnet_update_flow_map(itm, &new_map);
-		list_add(&itm->list, &qos_info->flow_head);
+		bearer->bearer_id = new_map.bearer_id;
+		bearer->flow_ref = 1;
+		bearer->grant_size = qos_info->default_grant;
+		bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
+		qos_info->default_grant = DEFAULT_GRANT;
+		list_add(&bearer->list, &qos_info->bearer_head);
+	}
+	itm->bearer = bearer;
 
-		bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
-		if (bearer) {
-			bearer->flow_ref++;
-		} else {
-			bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
-			if (!bearer) {
-				spin_unlock_bh(&qos_info->qos_lock);
-				return -ENOMEM;
-			}
+	/* Update mq map */
+	mq_idx = tcm->tcm_handle;
+	mq = &qos_info->mq[mq_idx];
+	if (!mq->bearer) {
+		mq->bearer = bearer;
+		mq->ancillary = IS_ANCILLARY(new_map.ip_type);
 
-			bearer->bearer_id = new_map.bearer_id;
-			bearer->flow_ref = 1;
-			bearer->grant_size = qos_info->default_grant;
-			bearer->grant_thresh =
-				qmi_rmnet_grant_per(bearer->grant_size);
-			qos_info->default_grant = DEFAULT_GRANT;
-			list_add(&bearer->list, &qos_info->bearer_head);
-		}
+		qmi_rmnet_flow_control(dev, mq_idx,
+				       bearer->grant_size > 0 ? 1 : 0);
+		trace_dfc_qmi_tc(dev->name, itm->bearer_id,
+				 bearer->grant_size, 0, mq_idx,
+				 bearer->grant_size > 0 ? 1 : 0);
 
-		qmi_rmnet_flow_control(dev, itm->tcm_handle,
-				bearer->grant_size > 0 ? 1 : 0);
-
-		trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id,
-				 bearer->grant_size, 0, itm->tcm_handle, 1);
+	} else if (mq->bearer->bearer_id != new_map.bearer_id) {
+		pr_debug("%s: un-managered bearer %u\n",
+				__func__, new_map.bearer_id);
 	}
 
 	spin_unlock_bh(&qos_info->qos_lock);
-
 	return 0;
 }
 
@@ -266,6 +316,8 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
 	struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
 	struct rmnet_flow_map new_map, *itm;
 	struct rmnet_bearer_map *bearer;
+	struct mq_map *mq;
+	u32 mq_idx;
 
 	if (!qos_info)
 		return -EINVAL;
@@ -287,31 +339,38 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
 	if (itm) {
 		trace_dfc_flow_info(dev->name, new_map.bearer_id,
 				    new_map.flow_id, new_map.ip_type,
-				    itm->tcm_handle, 0);
-		list_del(&itm->list);
+				    itm->mq_idx, 0);
 
-		/* Enable flow to allow new call setup */
-		qmi_rmnet_flow_control(dev, itm->tcm_handle, 1);
-		trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id,
-				 0, 0, itm->tcm_handle, 1);
-
-		/*clear bearer map*/
-		bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
+		bearer = itm->bearer;
 		if (bearer && --bearer->flow_ref == 0) {
+			/* Remove the bearer from mq map */
+			for (mq_idx = 0; mq_idx < MAX_MQ_NUM; mq_idx++) {
+				mq = &qos_info->mq[mq_idx];
+				if (mq->bearer != bearer)
+					continue;
+
+				mq->bearer = NULL;
+				mq->ancillary = false;
+				qmi_rmnet_reset_txq(dev, mq_idx);
+				qmi_rmnet_flow_control(dev, mq_idx, 1);
+				trace_dfc_qmi_tc(dev->name,
+					new_map.bearer_id, 0, 0, mq_idx, 1);
+			}
+
+			/* Remove from bearer map */
 			list_del(&bearer->list);
 			kfree(bearer);
 		}
 
+		/* Remove from flow map */
+		list_del(&itm->list);
 		kfree(itm);
 	}
 
-	if (list_empty(&qos_info->flow_head)) {
+	if (list_empty(&qos_info->flow_head))
 		netif_tx_wake_all_queues(dev);
-		trace_dfc_qmi_tc(dev->name, 0xFF, 0, DEFAULT_GRANT, 0, 0, 1);
-	}
 
 	spin_unlock_bh(&qos_info->qos_lock);
-
 	return 0;
 }
 
@@ -388,7 +447,7 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
 	svc.ep_type = tcm->tcm_info;
 	svc.iface_id = tcm->tcm_parent;
 
-	if (((tcm->tcm_ifindex & FLAG_DFC_MASK) == DFC_MODE_MULTIQ) &&
+	if (DFC_SUPPORTED_MODE(FLAG_TO_MODE(tcm->tcm_ifindex)) &&
 	    !qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) {
 		rc = dfc_qmi_client_init(port, idx, &svc, qmi);
 		if (rc < 0)
@@ -451,6 +510,8 @@ qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
 		wda_qmi_client_exit(data);
 		qmi->wda_client = NULL;
 		qmi->wda_pending = NULL;
+	} else {
+		qmi_rmnet_flush_ps_wq();
 	}
 
 	__qmi_rmnet_delete_client(port, qmi, idx);
@@ -463,20 +524,21 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
 
 	switch (tcm->tcm_family) {
 	case NLMSG_FLOW_ACTIVATE:
-		if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) ||
+		if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)) ||
 		    !qmi_rmnet_has_dfc_client(qmi))
 			return;
 
 		qmi_rmnet_add_flow(dev, tcm, qmi);
 		break;
 	case NLMSG_FLOW_DEACTIVATE:
-		if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ))
+		if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)))
 			return;
 
 		qmi_rmnet_del_flow(dev, tcm, qmi);
 		break;
 	case NLMSG_CLIENT_SETUP:
-		if (((tcm->tcm_ifindex & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) &&
+		dfc_mode = FLAG_TO_MODE(tcm->tcm_ifindex);
+		if (!DFC_SUPPORTED_MODE(dfc_mode) &&
 		    !(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
 			return;
 
@@ -553,7 +615,7 @@ void qmi_rmnet_enable_all_flows(struct net_device *dev)
 {
 	struct qos_info *qos;
 	struct rmnet_bearer_map *bearer;
-	bool do_wake = false;
+	bool do_wake;
 
 	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
 	if (!qos)
@@ -562,19 +624,18 @@ void qmi_rmnet_enable_all_flows(struct net_device *dev)
 	spin_lock_bh(&qos->qos_lock);
 
 	list_for_each_entry(bearer, &qos->bearer_head, list) {
-		if (!bearer->grant_size)
-			do_wake = true;
+		if (bearer->tx_off)
+			continue;
+		do_wake = !bearer->grant_size;
 		bearer->grant_size = DEFAULT_GRANT;
 		bearer->grant_thresh = DEFAULT_GRANT;
 		bearer->seq = 0;
 		bearer->ack_req = 0;
 		bearer->tcp_bidir = false;
 		bearer->rat_switch = false;
-	}
 
-	if (do_wake) {
-		netif_tx_wake_all_queues(dev);
-		trace_dfc_qmi_tc(dev->name, 0xFF, 0, DEFAULT_GRANT, 0, 0, 1);
+		if (do_wake)
+			dfc_bearer_flow_ctl(dev, bearer, qos);
 	}
 
 	spin_unlock_bh(&qos->qos_lock);
@@ -630,6 +691,10 @@ int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
 	if (!qos)
 		return 0;
 
+	/* If mark is mq num return it */
+	if (dfc_mode == DFC_MODE_MQ_NUM)
+		return mark;
+
 	switch (skb->protocol) {
 	/* TCPv4 ACKs */
 	case htons(ETH_P_IP):
@@ -664,7 +729,7 @@ int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
 	if (unlikely(!itm))
 		goto done;
 
-	txq = itm->tcm_handle;
+	txq = itm->mq_idx;
 
 done:
 	spin_unlock_bh(&qos->qos_lock);
@@ -682,7 +747,7 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
 {
 	struct qos_info *qos;
 
-	qos = kmalloc(sizeof(*qos), GFP_KERNEL);
+	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
 	if (!qos)
 		return NULL;
 
@@ -716,13 +781,14 @@ EXPORT_SYMBOL(qmi_rmnet_qos_exit);
 #ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
 static struct workqueue_struct  *rmnet_ps_wq;
 static struct rmnet_powersave_work *rmnet_work;
+static bool rmnet_work_quit;
 static LIST_HEAD(ps_list);
 
 struct rmnet_powersave_work {
 	struct delayed_work work;
 	void *port;
-	u64 old_rx_pkts;
-	u64 old_tx_pkts;
+	u64 old_rx_bytes;
+	u64 old_tx_bytes;
 };
 
 void qmi_rmnet_ps_on_notify(void *port)
@@ -792,28 +858,25 @@ int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable)
 		return rc;
 	}
 
-	if (enable)
-		dfc_qmi_wq_flush(qmi);
-	else
-		qmi_rmnet_query_flows(qmi);
-
 	return 0;
 }
 EXPORT_SYMBOL(qmi_rmnet_set_powersave_mode);
 
 static void qmi_rmnet_work_restart(void *port)
 {
-	if (!rmnet_ps_wq || !rmnet_work)
-		return;
-	queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, NO_DELAY);
+	rcu_read_lock();
+	if (!rmnet_work_quit)
+		queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, NO_DELAY);
+	rcu_read_unlock();
 }
 
 static void qmi_rmnet_check_stats(struct work_struct *work)
 {
 	struct rmnet_powersave_work *real_work;
 	struct qmi_info *qmi;
-	u64 rxd, txd;
+	s64 rxd, txd;
 	u64 rx, tx;
+	u64 rx_rate, tx_rate;
 	bool dl_msg_active;
 
 	real_work = container_of(to_delayed_work(work),
@@ -827,28 +890,37 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
 		return;
 
 	if (qmi->ps_enabled) {
-		/* Register to get QMI DFC and DL marker */
-		if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0) {
-			/* If this failed need to retry quickly */
-			queue_delayed_work(rmnet_ps_wq,
-					   &real_work->work, HZ / 50);
-			return;
 
-		}
+		/* Ready to accept grant */
+		qmi->ps_ignore_grant = false;
+
+		/* Register to get QMI DFC and DL marker */
+		if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0)
+			goto end;
+
 		qmi->ps_enabled = false;
 
+		/* Do a query when coming out of powersave */
+		qmi_rmnet_query_flows(qmi);
+
 		if (rmnet_get_powersave_notif(real_work->port))
 			qmi_rmnet_ps_off_notify(real_work->port);
 
-
 		goto end;
 	}
 
-	rmnet_get_packets(real_work->port, &rx, &tx);
-	rxd = rx - real_work->old_rx_pkts;
-	txd = tx - real_work->old_tx_pkts;
-	real_work->old_rx_pkts = rx;
-	real_work->old_tx_pkts = tx;
+	rmnet_get_stats(real_work->port, &rx, &tx);
+	rxd = rx - real_work->old_rx_bytes;
+	txd = tx - real_work->old_tx_bytes;
+	real_work->old_rx_bytes = rx;
+	real_work->old_tx_bytes = tx;
+
+	if (rxd >= 0 && txd >= 0) {
+		/* data rates in bits/s */
+		rx_rate = (rxd * HZ / PS_INTERVAL) << 3;
+		tx_rate = (txd * HZ / PS_INTERVAL) << 3;
+		rmnet_set_data_rates(real_work->port, rx_rate, tx_rate);
+	}
 
 	dl_msg_active = qmi->dl_msg_active;
 	qmi->dl_msg_active = false;
@@ -862,13 +934,14 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
 			goto end;
 
 		/* Deregister to suppress QMI DFC and DL marker */
-		if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0) {
-			queue_delayed_work(rmnet_ps_wq,
-					   &real_work->work, PS_INTERVAL);
-			return;
-		}
+		if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0)
+			goto end;
+
 		qmi->ps_enabled = true;
 
+		/* Ignore grant after going into powersave */
+		qmi->ps_ignore_grant = true;
+
 		/* Clear the bit before enabling flow so pending packets
 		 * can trigger the work again
 		 */
@@ -881,7 +954,10 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
 		return;
 	}
 end:
-	queue_delayed_work(rmnet_ps_wq, &real_work->work, PS_INTERVAL);
+	rcu_read_lock();
+	if (!rmnet_work_quit)
+		queue_delayed_work(rmnet_ps_wq, &real_work->work, PS_INTERVAL);
+	rcu_read_unlock();
 }
 
 static void qmi_rmnet_work_set_active(void *port, int status)
@@ -906,7 +982,7 @@ void qmi_rmnet_work_init(void *port)
 	rmnet_ps_wq = alloc_workqueue("rmnet_powersave_work",
 					WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
 
-	rmnet_work = kmalloc(sizeof(*rmnet_work), GFP_ATOMIC);
+	rmnet_work = kzalloc(sizeof(*rmnet_work), GFP_ATOMIC);
 	if (!rmnet_work) {
 		destroy_workqueue(rmnet_ps_wq);
 		rmnet_ps_wq = NULL;
@@ -914,9 +990,10 @@ void qmi_rmnet_work_init(void *port)
 	}
 	INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
 	rmnet_work->port = port;
-	rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts,
-			  &rmnet_work->old_tx_pkts);
+	rmnet_get_stats(rmnet_work->port, &rmnet_work->old_rx_bytes,
+			&rmnet_work->old_tx_bytes);
 
+	rmnet_work_quit = false;
 	qmi_rmnet_work_set_active(rmnet_work->port, 1);
 	queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, PS_INTERVAL);
 }
@@ -939,6 +1016,10 @@ void qmi_rmnet_work_exit(void *port)
 {
 	if (!rmnet_ps_wq || !rmnet_work)
 		return;
+
+	rmnet_work_quit = true;
+	synchronize_rcu();
+
 	cancel_delayed_work_sync(&rmnet_work->work);
 	destroy_workqueue(rmnet_ps_wq);
 	qmi_rmnet_work_set_active(port, 0);
@@ -959,4 +1040,22 @@ void qmi_rmnet_set_dl_msg_active(void *port)
 	qmi->dl_msg_active = true;
 }
 EXPORT_SYMBOL(qmi_rmnet_set_dl_msg_active);
+
+void qmi_rmnet_flush_ps_wq(void)
+{
+	if (rmnet_ps_wq)
+		flush_workqueue(rmnet_ps_wq);
+}
+
+bool qmi_rmnet_ignore_grant(void *port)
+{
+	struct qmi_info *qmi;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+	if (unlikely(!qmi))
+		return false;
+
+	return qmi->ps_ignore_grant;
+}
+EXPORT_SYMBOL(qmi_rmnet_ignore_grant);
 #endif
diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h
index 7fe4862..1466822 100644
--- a/drivers/soc/qcom/qmi_rmnet_i.h
+++ b/drivers/soc/qcom/qmi_rmnet_i.h
@@ -12,18 +12,15 @@
 #define IP_VER_4 4
 #define IP_VER_6 6
 
+#define MAX_MQ_NUM 10
 #define MAX_CLIENT_NUM 2
 #define MAX_FLOW_NUM 32
 #define DEFAULT_GRANT 1
 #define DFC_MAX_BEARERS_V01 16
 
-struct rmnet_flow_map {
-	struct list_head list;
-	u8 bearer_id;
-	u32 flow_id;
-	int ip_type;
-	u32 tcm_handle;
-};
+#define DFC_MODE_FLOW_ID 2
+#define DFC_MODE_MQ_NUM 3
+extern int dfc_mode;
 
 struct rmnet_bearer_map {
 	struct list_head list;
@@ -37,6 +34,16 @@ struct rmnet_bearer_map {
 	u16 last_seq;
 	bool tcp_bidir;
 	bool rat_switch;
+	bool tx_off;
+};
+
+struct rmnet_flow_map {
+	struct list_head list;
+	u8 bearer_id;
+	u32 flow_id;
+	int ip_type;
+	u32 mq_idx;
+	struct rmnet_bearer_map *bearer;
 };
 
 struct svc_info {
@@ -45,21 +52,22 @@ struct svc_info {
 	u32 iface_id;
 };
 
+struct mq_map {
+	struct rmnet_bearer_map *bearer;
+	bool ancillary;
+};
+
 struct qos_info {
 	u8 mux_id;
 	struct net_device *real_dev;
 	struct list_head flow_head;
 	struct list_head bearer_head;
+	struct mq_map mq[MAX_MQ_NUM];
 	u32 default_grant;
 	u32 tran_num;
 	spinlock_t qos_lock;
 };
 
-struct flow_info {
-	struct net_device *dev;
-	struct rmnet_flow_map *itm;
-};
-
 struct qmi_info {
 	int flag;
 	void *wda_client;
@@ -69,6 +77,7 @@ struct qmi_info {
 	unsigned long ps_work_active;
 	bool ps_enabled;
 	bool dl_msg_active;
+	bool ps_ignore_grant;
 };
 
 enum data_ep_type_enum_v01 {
@@ -109,11 +118,13 @@ void dfc_qmi_client_exit(void *dfc_data);
 void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
 			 int ip_type, u32 mark, unsigned int len);
 
-int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable);
-
-void dfc_qmi_wq_flush(struct qmi_info *qmi);
+int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable);
 
 void dfc_qmi_query_flow(void *dfc_data);
+
+int dfc_bearer_flow_ctl(struct net_device *dev,
+			struct rmnet_bearer_map *bearer,
+			struct qos_info *qos);
 #else
 static inline struct rmnet_flow_map *
 qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@@ -146,13 +157,16 @@ dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
 }
 
 static inline void
-dfc_qmi_wq_flush(struct qmi_info *qmi)
+dfc_qmi_query_flow(void *dfc_data)
 {
 }
 
-static inline void
-dfc_qmi_query_flow(void *dfc_data)
+static inline int
+dfc_bearer_flow_ctl(struct net_device *dev,
+		    struct rmnet_bearer_map *bearer,
+		    struct qos_info *qos)
 {
+	return 0;
 }
 #endif
 
@@ -161,6 +175,7 @@ int
 wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi);
 void wda_qmi_client_exit(void *wda_data);
 int wda_set_powersave_mode(void *wda_data, u8 enable);
+void qmi_rmnet_flush_ps_wq(void);
 #else
 static inline int
 wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
@@ -176,5 +191,8 @@ static inline int wda_set_powersave_mode(void *wda_data, u8 enable)
 {
 	return -EINVAL;
 }
+static inline void qmi_rmnet_flush_ps_wq(void)
+{
+}
 #endif
 #endif /*_RMNET_QMI_I_H*/
diff --git a/drivers/soc/qcom/qtee_shmbridge.c b/drivers/soc/qcom/qtee_shmbridge.c
index f021b7c..7971146 100644
--- a/drivers/soc/qcom/qtee_shmbridge.c
+++ b/drivers/soc/qcom/qtee_shmbridge.c
@@ -93,9 +93,21 @@ struct bridge_info {
 	int min_alloc_order;
 	struct gen_pool *genpool;
 };
-static struct bridge_info default_bridge;
-static bool qtee_shmbridge_enabled;
 
+struct bridge_list {
+	struct list_head head;
+	struct mutex lock;
+};
+
+struct bridge_list_entry {
+	struct list_head list;
+	phys_addr_t paddr;
+	uint64_t handle;
+};
+
+static struct bridge_info default_bridge;
+static struct bridge_list bridge_list_head;
+static bool qtee_shmbridge_enabled;
 
 /* enable shared memory bridge mechanism in HYP */
 static int32_t qtee_shmbridge_enable(bool enable)
@@ -114,7 +126,7 @@ static int32_t qtee_shmbridge_enable(bool enable)
 	if (ret) {
 		pr_err("Failed to enable shmbridge, rsp = %lld, ret = %d\n",
 			desc.ret[0], ret);
-		return -EINVAL;
+		return ret;
 	}
 	qtee_shmbridge_enabled = true;
 	pr_warn("shmbridge is enabled\n");
@@ -128,6 +140,56 @@ bool qtee_shmbridge_is_enabled(void)
 }
 EXPORT_SYMBOL(qtee_shmbridge_is_enabled);
 
+int32_t qtee_shmbridge_list_add_nolock(phys_addr_t paddr, uint64_t handle)
+{
+	struct bridge_list_entry *entry;
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+	entry->handle = handle;
+	entry->paddr = paddr;
+	list_add_tail(&entry->list, &bridge_list_head.head);
+	return 0;
+}
+
+void qtee_shmbridge_list_del_nolock(uint64_t handle)
+{
+	struct bridge_list_entry *entry;
+
+	list_for_each_entry(entry, &bridge_list_head.head, list) {
+		if (entry->handle == handle) {
+			list_del(&entry->list);
+			kfree(entry);
+			break;
+		}
+	}
+}
+
+int32_t qtee_shmbridge_query_nolock(phys_addr_t paddr)
+{
+	struct bridge_list_entry *entry;
+
+	list_for_each_entry(entry, &bridge_list_head.head, list)
+		if (entry->paddr == paddr) {
+			pr_debug("A bridge on %llx exists\n", (uint64_t)paddr);
+			return -EEXIST;
+		}
+	return 0;
+}
+
+/* Check whether a bridge starting from paddr exists */
+int32_t qtee_shmbridge_query(phys_addr_t paddr)
+{
+	int32_t ret = 0;
+
+	mutex_lock(&bridge_list_head.lock);
+	ret = qtee_shmbridge_query_nolock(paddr);
+	mutex_unlock(&bridge_list_head.lock);
+	return ret;
+}
+EXPORT_SYMBOL(qtee_shmbridge_query);
+
 /* Register paddr & size as a bridge, return bridge handle */
 int32_t qtee_shmbridge_register(
 		phys_addr_t paddr,
@@ -145,12 +207,20 @@ int32_t qtee_shmbridge_register(
 	struct scm_desc desc = {0};
 	int i = 0;
 
+	if (!qtee_shmbridge_enabled)
+		return 0;
+
 	if (!handle || !ns_vmid_list || !ns_vm_perm_list ||
 				ns_vmid_num > MAXSHMVMS) {
 		pr_err("invalid input parameters\n");
 		return -EINVAL;
 	}
 
+	mutex_lock(&bridge_list_head.lock);
+	ret = qtee_shmbridge_query_nolock(paddr);
+	if (ret)
+		goto exit;
+
 	for (i = 0; i < ns_vmid_num; i++) {
 		ns_perms = UPDATE_NS_PERMS(ns_perms, ns_vm_perm_list[i]);
 		ns_vmids = UPDATE_NS_VMIDS(ns_vmids, ns_vmid_list[i]);
@@ -169,10 +239,15 @@ int32_t qtee_shmbridge_register(
 	if (ret || desc.ret[0]) {
 		pr_err("create shmbridge failed, ret = %d, status = %llx\n",
 				ret, desc.ret[0]);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto exit;
 	}
 	*handle = desc.ret[1];
-	return 0;
+
+	ret = qtee_shmbridge_list_add_nolock(paddr, *handle);
+exit:
+	mutex_unlock(&bridge_list_head.lock);
+	return ret;
 }
 EXPORT_SYMBOL(qtee_shmbridge_register);
 
@@ -182,14 +257,22 @@ int32_t qtee_shmbridge_deregister(uint64_t handle)
 	int32_t ret = 0;
 	struct scm_desc desc = {0};
 
+	if (!qtee_shmbridge_enabled)
+		return 0;
+
+	mutex_lock(&bridge_list_head.lock);
 	desc.arginfo = TZ_SHM_BRIDGE_DELETE_PARAM_ID;
 	desc.args[0] = handle;
 	ret = scm_call2(TZ_SHM_BRIDGE_DELETE, &desc);
 	if (ret) {
-		pr_err("scm_call to delete shmbridge failed, ret = %d\n", ret);
-		return ret;
+		pr_err("Failed to del bridge %lld, ret = %d\n", handle, ret);
+		goto exit;
 	}
-	return 0;
+	qtee_shmbridge_list_del_nolock(handle);
+
+exit:
+	mutex_unlock(&bridge_list_head.lock);
+	return ret;
 }
 EXPORT_SYMBOL(qtee_shmbridge_deregister);
 
@@ -238,7 +321,7 @@ EXPORT_SYMBOL(qtee_shmbridge_allocate_shm);
 /* Free buffer that is sub-allocated from default kernel bridge */
 void qtee_shmbridge_free_shm(struct qtee_shm *shm)
 {
-	if (IS_ERR_OR_NULL(shm))
+	if (IS_ERR_OR_NULL(shm) || !shm->vaddr)
 		return;
 	gen_pool_free(default_bridge.genpool, (unsigned long)shm->vaddr,
 		      shm->size);
@@ -257,22 +340,48 @@ static int __init qtee_shmbridge_init(void)
 
 	if (default_bridge.vaddr) {
 		pr_warn("qtee shmbridge is already initialized\n");
-		goto exit;
+		return 0;
 	}
 
-	/* do not enable shm bridge mechanism for now*/
-	ret = qtee_shmbridge_enable(false);
-	if (ret)
-		goto exit;
-
 	/* allocate a contiguous buffer */
 	default_bridge.size = DEFAULT_BRIDGE_SIZE;
 	default_bridge.vaddr = kzalloc(default_bridge.size, GFP_KERNEL);
-	if (!default_bridge.vaddr) {
+	if (!default_bridge.vaddr)
+		return -ENOMEM;
+	default_bridge.paddr = virt_to_phys(default_bridge.vaddr);
+
+	/* create a general mem pool */
+	default_bridge.min_alloc_order = 3; /* 8 byte aligned */
+	default_bridge.genpool = gen_pool_create(
+					default_bridge.min_alloc_order, -1);
+	if (!default_bridge.genpool) {
+		pr_err("gen_pool_add_virt() failed\n");
 		ret = -ENOMEM;
+		goto exit_freebuf;
+	}
+
+	gen_pool_set_algo(default_bridge.genpool, gen_pool_best_fit, NULL);
+	ret = gen_pool_add_virt(default_bridge.genpool,
+			(uintptr_t)default_bridge.vaddr,
+				default_bridge.paddr, default_bridge.size, -1);
+	if (ret) {
+		pr_err("gen_pool_add_virt() failed, ret = %d\n", ret);
+		goto exit_destroy_pool;
+	}
+
+	mutex_init(&bridge_list_head.lock);
+	INIT_LIST_HEAD(&bridge_list_head.head);
+
+	/* do not enable shm bridge mechanism for now*/
+	ret = qtee_shmbridge_enable(false);
+	if (ret) {
+		if (ret == -EIO) {
+			/* keep the mem pool even shmbridge isn't supported */
+			pr_warn("shmbridge feature is not supported\n");
+			ret = 0;
+		}
 		goto exit;
 	}
-	default_bridge.paddr = virt_to_phys(default_bridge.vaddr);
 
 	/*register default bridge*/
 	ret = qtee_shmbridge_register(default_bridge.paddr,
@@ -282,37 +391,16 @@ static int __init qtee_shmbridge_init(void)
 	if (ret) {
 		pr_err("Failed to register default bridge, size %zu\n",
 			default_bridge.size);
-		goto exit_freebuf;
+		goto exit;
 	}
 
-	/* create a general mem pool */
-	default_bridge.min_alloc_order = 3; /* 8 byte aligned */
-	default_bridge.genpool = gen_pool_create(
-					default_bridge.min_alloc_order, -1);
-	if (!default_bridge.genpool) {
-		pr_err("gen_pool_add_virt() failed\n");
-		ret = -ENOMEM;
-		goto exit_dereg;
-	}
-
-	gen_pool_set_algo(default_bridge.genpool, gen_pool_best_fit, NULL);
-	ret = gen_pool_add_virt(default_bridge.genpool,
-			(uintptr_t)default_bridge.vaddr,
-				default_bridge.paddr, default_bridge.size, -1);
-	if (ret) {
-		pr_err("gen_pool_add_virt() failed\n");
-		goto exit_destroy_pool;
-	}
-
-	pr_warn("qtee shmbridge registered default bridge with size %d bytes\n",
+	pr_debug("qtee shmbridge registered default bridge with size %d bytes\n",
 			DEFAULT_BRIDGE_SIZE);
 
 	return 0;
 
 exit_destroy_pool:
 	gen_pool_destroy(default_bridge.genpool);
-exit_dereg:
-	qtee_shmbridge_deregister(default_bridge.handle);
 exit_freebuf:
 	kfree(default_bridge.vaddr);
 exit:
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index cd6e67a..7b17bbb 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -16,6 +16,7 @@
 #include <asm/compiler.h>
 
 #include <soc/qcom/scm.h>
+#include <soc/qcom/qtee_shmbridge.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/scm.h>
@@ -314,22 +315,23 @@ bool is_scm_armv8(void)
 static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags)
 {
 	int i, j;
+	int rc;
 	struct scm_extra_arg *argbuf;
 	int arglen = desc->arginfo & 0xf;
+	struct qtee_shm shm;
 	size_t argbuflen = PAGE_ALIGN(sizeof(struct scm_extra_arg));
 
 	desc->x5 = desc->args[FIRST_EXT_ARG_IDX];
 
-	if (likely(arglen <= N_REGISTER_ARGS)) {
-		desc->extra_arg_buf = NULL;
+	if (likely(arglen <= N_REGISTER_ARGS))
 		return 0;
-	}
 
-	argbuf = kzalloc(argbuflen, flags);
-	if (!argbuf)
-		return -ENOMEM;
+	rc = qtee_shmbridge_allocate_shm(argbuflen, &shm);
+	if (rc)
+		return rc;
 
-	desc->extra_arg_buf = argbuf;
+	desc->shm = shm;
+	argbuf = shm.vaddr;
 
 	j = FIRST_EXT_ARG_IDX;
 	if (scm_version == SCM_ARMV8_64)
@@ -338,10 +340,10 @@ static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags)
 	else
 		for (i = 0; i < N_EXT_SCM_ARGS; i++)
 			argbuf->args32[i] = desc->args[j++];
-	desc->x5 = virt_to_phys(argbuf);
+
+	desc->x5 = shm.paddr;
 	__cpuc_flush_dcache_area(argbuf, argbuflen);
-	outer_flush_range(virt_to_phys(argbuf),
-			  virt_to_phys(argbuf) + argbuflen);
+	outer_flush_range(shm.paddr, shm.paddr + argbuflen);
 
 	return 0;
 }
@@ -402,7 +404,7 @@ static int __scm_call2(u32 fn_id, struct scm_desc *desc, bool retry)
 			x0, ret, desc->ret[0], desc->ret[1], desc->ret[2]);
 
 	if (arglen > N_REGISTER_ARGS)
-		kfree(desc->extra_arg_buf);
+		qtee_shmbridge_free_shm(&desc->shm);
 	if (ret < 0)
 		return scm_remap_error(ret);
 	return 0;
@@ -488,7 +490,7 @@ int scm_call2_atomic(u32 fn_id, struct scm_desc *desc)
 			desc->ret[1], desc->ret[2]);
 
 	if (arglen > N_REGISTER_ARGS)
-		kfree(desc->extra_arg_buf);
+		qtee_shmbridge_free_shm(&desc->shm);
 	if (ret < 0)
 		return scm_remap_error(ret);
 	return ret;
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index dd1ac3b..1ab2680 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (C) 2011 Google, Inc
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/highmem.h>
@@ -438,3 +438,14 @@ bool msm_secure_v2_is_supported(void)
 	return (scm_get_feat_version(FEATURE_ID_CP) >=
 			MAKE_CP_VERSION(1, 1, 0));
 }
+
+u32 msm_secure_get_vmid_perms(u32 vmid)
+{
+	if (vmid == VMID_CP_SEC_DISPLAY)
+		return PERM_READ;
+	else if (vmid == VMID_CP_CDSP)
+		return PERM_READ | PERM_WRITE | PERM_EXEC;
+	else
+		return PERM_READ | PERM_WRITE;
+}
+EXPORT_SYMBOL(msm_secure_get_vmid_perms);
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
index 0c1755f..5fd72b6 100644
--- a/drivers/soc/qcom/smcinvoke.c
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -21,6 +21,7 @@
 #include <soc/qcom/scm.h>
 #include <asm/cacheflush.h>
 #include <soc/qcom/qseecomi.h>
+#include <soc/qcom/qtee_shmbridge.h>
 
 #include "smcinvoke_object.h"
 #include "../../misc/qseecom_kernel.h"
@@ -125,6 +126,10 @@
 #define FILE_IS_REMOTE_OBJ(f) ((f)->f_op && (f)->f_op == &g_smcinvoke_fops)
 
 static DEFINE_MUTEX(g_smcinvoke_lock);
+#define NO_LOCK 0
+#define TAKE_LOCK 1
+#define MUTEX_LOCK(x) { if (x) mutex_lock(&g_smcinvoke_lock); }
+#define MUTEX_UNLOCK(x) { if (x) mutex_unlock(&g_smcinvoke_lock); }
 static DEFINE_HASHTABLE(g_cb_servers, 8);
 static LIST_HEAD(g_mem_objs);
 static uint16_t g_last_cb_server_id = CBOBJ_SERVER_ID_START;
@@ -134,6 +139,7 @@ static size_t g_max_cb_buf_size = SMCINVOKE_TZ_MIN_BUF_SIZE;
 static long smcinvoke_ioctl(struct file *, unsigned int, unsigned long);
 static int smcinvoke_open(struct inode *, struct file *);
 static int smcinvoke_release(struct inode *, struct file *);
+static int destroy_cb_server(uint16_t);
 
 static const struct file_operations g_smcinvoke_fops = {
 	.owner		= THIS_MODULE,
@@ -195,6 +201,7 @@ struct smcinvoke_cb_txn {
 	size_t cb_req_bytes;
 	struct file **filp_to_release;
 	struct hlist_node hash;
+	struct kref ref_cnt;
 };
 
 struct smcinvoke_server_info {
@@ -272,10 +279,8 @@ static  struct smcinvoke_mem_obj *find_mem_obj_locked(uint16_t mem_obj_id,
 {
 	struct smcinvoke_mem_obj *mem_obj = NULL;
 
-	if (list_empty(&g_mem_objs)) {
-		pr_err("%s: mem obj %d not found\n", __func__, mem_obj_id);
+	if (list_empty(&g_mem_objs))
 		return NULL;
-	}
 
 	list_for_each_entry(mem_obj, &g_mem_objs, list) {
 		if ((is_mem_rgn_obj &&
@@ -373,8 +378,10 @@ static void free_pending_cbobj_locked(struct kref *kref)
 	server = obj->server;
 	kfree(obj);
 	if ((server->state == SMCINVOKE_SERVER_STATE_DEFUNCT) &&
-				list_empty(&server->pending_cbobjs))
+				list_empty(&server->pending_cbobjs)) {
+		hash_del(&server->hash);
 		kfree(server);
+	}
 }
 
 static int get_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id)
@@ -445,6 +452,15 @@ static void release_tzhandles(const int32_t *tzhandles, size_t len)
 	mutex_unlock(&g_smcinvoke_lock);
 }
 
+static void delete_cb_txn(struct kref *kref)
+{
+	struct smcinvoke_cb_txn *cb_txn = container_of(kref,
+					struct smcinvoke_cb_txn, ref_cnt);
+
+	kfree(cb_txn->cb_req);
+	kfree(cb_txn);
+}
+
 static struct smcinvoke_cb_txn *find_cbtxn_locked(
 				struct smcinvoke_server_info *server,
 				uint32_t txn_id, int32_t state)
@@ -459,6 +475,7 @@ static struct smcinvoke_cb_txn *find_cbtxn_locked(
 	if (state == SMCINVOKE_REQ_PLACED) {
 		/* pick up 1st req */
 		hash_for_each(server->reqs_table, i, cb_txn, hash) {
+			kref_get(&cb_txn->ref_cnt);
 			hash_del(&cb_txn->hash);
 			return cb_txn;
 		}
@@ -466,6 +483,7 @@ static struct smcinvoke_cb_txn *find_cbtxn_locked(
 		hash_for_each_possible(
 				server->responses_table, cb_txn, hash, txn_id) {
 			if (cb_txn->txn_id == txn_id) {
+				kref_get(&cb_txn->ref_cnt);
 				hash_del(&cb_txn->hash);
 				return cb_txn;
 			}
@@ -612,14 +630,10 @@ static int get_tzhandle_from_uhandle(int32_t uhandle, int32_t server_fd,
 		}
 	}
 out:
-	if (ret && *filp) {
-		fput(*filp);
-		*filp = NULL;
-	}
 	return ret;
 }
 
-static int get_fd_for_obj(uint32_t obj_type, uint32_t obj, int64_t *fd)
+static int get_fd_for_obj(uint32_t obj_type, uint32_t obj, int32_t *fd)
 {
 	int unused_fd = -1, ret = -EINVAL;
 	struct file *f = NULL;
@@ -663,7 +677,7 @@ static int get_fd_for_obj(uint32_t obj_type, uint32_t obj, int64_t *fd)
 }
 
 static int get_uhandle_from_tzhandle(int32_t tzhandle, int32_t srvr_id,
-						int64_t *uhandle)
+				int32_t *uhandle, bool lock)
 {
 	int ret = -1;
 
@@ -674,14 +688,14 @@ static int get_uhandle_from_tzhandle(int32_t tzhandle, int32_t srvr_id,
 		if (srvr_id != TZHANDLE_GET_SERVER(tzhandle))
 			goto out;
 		*uhandle = UHANDLE_MAKE_CB_OBJ(TZHANDLE_GET_OBJID(tzhandle));
-		mutex_lock(&g_smcinvoke_lock);
-		ret = get_pending_cbobj_locked(srvr_id,
+		MUTEX_LOCK(lock)
+		ret = get_pending_cbobj_locked(TZHANDLE_GET_SERVER(tzhandle),
 						TZHANDLE_GET_OBJID(tzhandle));
-		mutex_unlock(&g_smcinvoke_lock);
+		MUTEX_UNLOCK(lock)
 	} else if (TZHANDLE_IS_MEM_RGN_OBJ(tzhandle)) {
 		struct smcinvoke_mem_obj *mem_obj =  NULL;
 
-		mutex_lock(&g_smcinvoke_lock);
+		MUTEX_LOCK(lock)
 		mem_obj = find_mem_obj_locked(TZHANDLE_GET_OBJID(tzhandle),
 						SMCINVOKE_MEM_RGN_OBJ);
 
@@ -699,7 +713,7 @@ static int get_uhandle_from_tzhandle(int32_t tzhandle, int32_t srvr_id,
 			ret = 0;
 		}
 exit_lock:
-		mutex_unlock(&g_smcinvoke_lock);
+		MUTEX_UNLOCK(lock)
 	} else if (TZHANDLE_IS_REMOTE(tzhandle)) {
 		/* if execution comes here => tzhandle is an unsigned int */
 		ret = get_fd_for_obj(SMCINVOKE_OBJ_TYPE_TZ_OBJ,
@@ -826,7 +840,12 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
 	if (buf_len < sizeof(struct smcinvoke_tzcb_req))
 		return;
 
-	cb_req = buf;
+	cb_req = kmemdup(buf, buf_len, GFP_KERNEL);
+	if (!cb_req) {
+		ret =  OBJECT_ERROR_KMEM;
+		goto out;
+	}
+
 	/* check whether it is to be served by kernel or userspace */
 	if (TZHANDLE_IS_KERNEL_OBJ(cb_req->hdr.tzhandle)) {
 		return process_kernel_obj(buf, buf_len);
@@ -847,6 +866,7 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
 	cb_txn->cb_req = cb_req;
 	cb_txn->cb_req_bytes = buf_len;
 	cb_txn->filp_to_release = arr_filp;
+	kref_init(&cb_txn->ref_cnt);
 
 	mutex_lock(&g_smcinvoke_lock);
 	srvr_info = find_cb_server_locked(
@@ -860,9 +880,11 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
 	hash_add(srvr_info->reqs_table, &cb_txn->hash, cb_txn->txn_id);
 	mutex_unlock(&g_smcinvoke_lock);
 	wake_up_interruptible(&srvr_info->req_wait_q);
-	wait_event(srvr_info->rsp_wait_q,
+	ret = wait_event_interruptible(srvr_info->rsp_wait_q,
 			(cb_txn->state == SMCINVOKE_REQ_PROCESSED) ||
 			(srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT));
+	if (ret)
+		pr_err("%s wait_event interrupted: ret = %d\n", __func__, ret);
 out:
 	/*
 	 * If we are here, either req is processed or not
@@ -870,19 +892,19 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
 	 * if not processed, we should set result with ret which should have
 	 * correct value that TZ/TA can understand
 	 */
+	mutex_lock(&g_smcinvoke_lock);
 	if (!cb_txn || (cb_txn->state != SMCINVOKE_REQ_PROCESSED)) {
 		cb_req->result = ret;
 		if (srvr_info &&
 		    srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT &&
 		    OBJECT_OP_METHODID(cb_req->hdr.op) == OBJECT_OP_RELEASE) {
-			mutex_lock(&g_smcinvoke_lock);
-			put_pending_cbobj_locked(
-				TZHANDLE_GET_SERVER(cb_req->hdr.tzhandle),
-				TZHANDLE_GET_OBJID(cb_req->hdr.tzhandle));
-			mutex_unlock(&g_smcinvoke_lock);
+			release_tzhandle_locked(cb_req->hdr.tzhandle);
 		}
 	}
-	kfree(cb_txn);
+	hash_del(&cb_txn->hash);
+	memcpy(buf, cb_req, buf_len);
+	kref_put(&cb_txn->ref_cnt, delete_cb_txn);
+	mutex_unlock(&g_smcinvoke_lock);
 }
 
 static int marshal_out_invoke_req(const uint8_t *buf, uint32_t buf_size,
@@ -931,7 +953,8 @@ static int marshal_out_invoke_req(const uint8_t *buf, uint32_t buf_size,
 		 * to server who serves it and that info comes from USpace.
 		 */
 		ret = get_uhandle_from_tzhandle(tz_args->handle,
-			args_buf[i].o.cb_server_fd, &(args_buf[i].o.fd));
+					TZHANDLE_GET_SERVER(tz_args->handle),
+				(int32_t *)&(args_buf[i].o.fd), NO_LOCK);
 		if (ret)
 			goto out;
 		tz_args++;
@@ -948,8 +971,10 @@ static bool is_inbound_req(int val)
 		val == QSEOS_RESULT_BLOCKED_ON_LISTENER);
 }
 
-static int prepare_send_scm_msg(const uint8_t *in_buf, size_t in_buf_len,
-				uint8_t *out_buf, size_t out_buf_len,
+static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
+				size_t in_buf_len,
+				uint8_t *out_buf, phys_addr_t out_paddr,
+				size_t out_buf_len,
 				struct smcinvoke_cmd_req *req,
 				union smcinvoke_arg *args_buf,
 				bool *tz_acked)
@@ -964,9 +989,9 @@ static int prepare_send_scm_msg(const uint8_t *in_buf, size_t in_buf_len,
 		return -EINVAL;
 
 	desc.arginfo = SMCINVOKE_INVOKE_PARAM_ID;
-	desc.args[0] = (uint64_t)virt_to_phys(in_buf);
+	desc.args[0] = (uint64_t)in_paddr;
 	desc.args[1] = in_buf_len;
-	desc.args[2] = (uint64_t)virt_to_phys(out_buf);
+	desc.args[2] = (uint64_t)out_paddr;
 	desc.args[3] = out_buf_len;
 	cmd = SMCINVOKE_INVOKE_CMD;
 	dmac_flush_range(in_buf, in_buf + in_buf_len);
@@ -1146,7 +1171,7 @@ static int marshal_in_tzcb_req(const struct smcinvoke_cb_txn *cb_txn,
 
 	user_req->txn_id = cb_txn->txn_id;
 	if (get_uhandle_from_tzhandle(tzcb_req->hdr.tzhandle, srvr_id,
-					(int64_t *)&user_req->cbobj_id)) {
+				&user_req->cbobj_id, TAKE_LOCK)) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1208,7 +1233,7 @@ static int marshal_in_tzcb_req(const struct smcinvoke_cb_txn *cb_txn,
 		 * context
 		 */
 		ret = get_uhandle_from_tzhandle(tz_args[i].handle, srvr_id,
-							&(tmp_arg.o.fd));
+					(int32_t *)&(tmp_arg.o.fd), TAKE_LOCK);
 		if (ret) {
 			ret = -EINVAL;
 			goto out;
@@ -1233,6 +1258,7 @@ static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req,
 	struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req;
 	union smcinvoke_tz_args *tz_args = tzcb_req->args;
 
+	release_tzhandles(&cb_txn->cb_req->hdr.tzhandle, 1);
 	tzcb_req->result = user_req->result;
 	FOR_ARGS(i, tzcb_req->hdr.counts, BO) {
 		union smcinvoke_arg tmp_arg;
@@ -1262,12 +1288,17 @@ static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req,
 			ret = -EFAULT;
 			goto out;
 		}
-		ret = get_tzhandle_from_uhandle(tmp_arg.o.fd, 0, &arr_filp[i],
-							&(tz_args[i].handle));
+		ret = get_tzhandle_from_uhandle(tmp_arg.o.fd,
+				tmp_arg.o.cb_server_fd, &arr_filp[i],
+						&(tz_args[i].handle));
 		if (ret)
 			goto out;
 		tzhandles_to_release[i] = tz_args[i].handle;
 	}
+	FOR_ARGS(i, tzcb_req->hdr.counts, OI) {
+		if (TZHANDLE_IS_CB_OBJ(tz_args[i].handle))
+			release_tzhandles(&tz_args[i].handle, 1);
+	}
 	ret = 0;
 out:
 	if (ret)
@@ -1324,7 +1355,7 @@ static long process_server_req(struct file *filp, unsigned int cmd,
 						 unsigned long arg)
 {
 	int ret = -1;
-	int64_t server_fd = -1;
+	int32_t server_fd = -1;
 	struct smcinvoke_server server_req = {0};
 	struct smcinvoke_server_info *server_info = NULL;
 
@@ -1359,12 +1390,9 @@ static long process_server_req(struct file *filp, unsigned int cmd,
 	ret = get_fd_for_obj(SMCINVOKE_OBJ_TYPE_SERVER,
 				server_info->server_id, &server_fd);
 
-	if (ret) {
-		mutex_lock(&g_smcinvoke_lock);
-		hash_del(&server_info->hash);
-		mutex_unlock(&g_smcinvoke_lock);
-		kfree(server_info);
-	}
+	if (ret)
+		destroy_cb_server(server_info->server_id);
+
 	return server_fd;
 }
 
@@ -1419,11 +1447,10 @@ static long process_accept_req(struct file *filp, unsigned int cmd,
 			cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL;
 
 		if (OBJECT_OP_METHODID(user_args.op) == OBJECT_OP_RELEASE)
-			put_pending_cbobj_locked(
-			    TZHANDLE_GET_SERVER(cb_txn->cb_req->hdr.tzhandle),
-			    TZHANDLE_GET_OBJID(cb_txn->cb_req->hdr.tzhandle));
+			release_tzhandles(&cb_txn->cb_req->hdr.tzhandle, 1);
 
 		cb_txn->state = SMCINVOKE_REQ_PROCESSED;
+		kref_put(&cb_txn->ref_cnt, delete_cb_txn);
 		wake_up(&server_info->rsp_wait_q);
 		/*
 		 * if marshal_out fails, we should let userspace release
@@ -1439,8 +1466,11 @@ static long process_accept_req(struct file *filp, unsigned int cmd,
 	do {
 		ret = wait_event_interruptible(server_info->req_wait_q,
 				!hash_empty(server_info->reqs_table));
-		if (ret)
+		if (ret) {
+			pr_err("%s wait_event interrupted: ret = %d\n",
+							__func__, ret);
 			goto out;
+		}
 
 		mutex_lock(&g_smcinvoke_lock);
 		cb_txn = find_cbtxn_locked(server_info,
@@ -1454,12 +1484,14 @@ static long process_accept_req(struct file *filp, unsigned int cmd,
 			if (ret) {
 				cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL;
 				cb_txn->state = SMCINVOKE_REQ_PROCESSED;
+				kref_put(&cb_txn->ref_cnt, delete_cb_txn);
 				wake_up_interruptible(&server_info->rsp_wait_q);
 				continue;
 			}
 			mutex_lock(&g_smcinvoke_lock);
 			hash_add(server_info->responses_table, &cb_txn->hash,
 							cb_txn->txn_id);
+			kref_put(&cb_txn->ref_cnt, delete_cb_txn);
 			mutex_unlock(&g_smcinvoke_lock);
 			ret =  copy_to_user((void __user *)arg, &user_args,
 					sizeof(struct smcinvoke_accept));
@@ -1478,6 +1510,7 @@ static long process_invoke_req(struct file *filp, unsigned int cmd,
 	size_t inmsg_size = 0, outmsg_size = SMCINVOKE_TZ_MIN_BUF_SIZE;
 	union  smcinvoke_arg *args_buf = NULL;
 	struct smcinvoke_file_data *tzobj = filp->private_data;
+	static struct qtee_shm in_shm = {0}, out_shm = {0};
 	/*
 	 * Hold reference to remote object until invoke op is not
 	 * completed. Release once invoke is done.
@@ -1529,31 +1562,30 @@ static long process_invoke_req(struct file *filp, unsigned int cmd,
 	}
 
 	inmsg_size = compute_in_msg_size(&req, args_buf);
-	in_msg = (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP,
-						get_order(inmsg_size));
-	if (!in_msg) {
+	ret = qtee_shmbridge_allocate_shm(inmsg_size, &in_shm);
+	if (ret) {
 		ret = -ENOMEM;
 		goto out;
 	}
-	memset(in_msg, 0, inmsg_size);
+	in_msg = in_shm.vaddr;
 
 	mutex_lock(&g_smcinvoke_lock);
 	outmsg_size = PAGE_ALIGN(g_max_cb_buf_size);
 	mutex_unlock(&g_smcinvoke_lock);
-	out_msg = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP,
-						get_order(outmsg_size));
-	if (!out_msg) {
+	ret = qtee_shmbridge_allocate_shm(outmsg_size, &out_shm);
+	if (ret) {
 		ret = -ENOMEM;
 		goto out;
 	}
-	memset(out_msg, 0, outmsg_size);
+	out_msg = out_shm.vaddr;
 
 	ret = marshal_in_invoke_req(&req, args_buf, tzobj->tzhandle, in_msg,
 			inmsg_size, filp_to_release, tzhandles_to_release);
 	if (ret)
 		goto out;
 
-	ret = prepare_send_scm_msg(in_msg, inmsg_size, out_msg, outmsg_size,
+	ret = prepare_send_scm_msg(in_msg, in_shm.paddr, inmsg_size,
+					out_msg, out_shm.paddr, outmsg_size,
 					&req, args_buf, &tz_acked);
 
 	/*
@@ -1589,8 +1621,8 @@ static long process_invoke_req(struct file *filp, unsigned int cmd,
 	release_filp(filp_to_release, OBJECT_COUNTS_MAX_OO);
 	if (ret)
 		release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO);
-	free_pages((long)out_msg, get_order(outmsg_size));
-	free_pages((long)in_msg, get_order(inmsg_size));
+	qtee_shmbridge_free_shm(&in_shm);
+	qtee_shmbridge_free_shm(&out_shm);
 	kfree(args_buf);
 	return ret;
 }
@@ -1669,6 +1701,7 @@ static int smcinvoke_release(struct inode *nodp, struct file *filp)
 	struct smcinvoke_file_data *file_data = filp->private_data;
 	struct smcinvoke_cmd_req req = {0};
 	uint32_t tzhandle = 0;
+	struct qtee_shm in_shm = {0}, out_shm = {0};
 
 	if (file_data->context_type == SMCINVOKE_OBJ_TYPE_SERVER) {
 		ret = destroy_cb_server(file_data->server_id);
@@ -1680,24 +1713,34 @@ static int smcinvoke_release(struct inode *nodp, struct file *filp)
 	if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ)
 		goto out;
 
-	in_buf = (uint8_t *)__get_free_page(GFP_KERNEL | __GFP_COMP);
-	out_buf = (uint8_t *)__get_free_page(GFP_KERNEL | __GFP_COMP);
-	if (!in_buf || !out_buf) {
+	ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm);
+	if (ret) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
+	ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &out_shm);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	in_buf = in_shm.vaddr;
+	out_buf = out_shm.vaddr;
 	hdr.tzhandle = tzhandle;
 	hdr.op = OBJECT_OP_RELEASE;
 	hdr.counts = 0;
 	*(struct smcinvoke_msg_hdr *)in_buf = hdr;
 
-	ret = prepare_send_scm_msg(in_buf, SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf,
+	ret = prepare_send_scm_msg(in_buf, in_shm.paddr,
+		SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf, out_shm.paddr,
 		SMCINVOKE_TZ_MIN_BUF_SIZE, &req, NULL, &release_handles);
+
+	process_piggyback_data(out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE);
 out:
 	kfree(filp->private_data);
-	free_page((long)in_buf);
-	free_page((long)out_buf);
+	qtee_shmbridge_free_shm(&in_shm);
+	qtee_shmbridge_free_shm(&out_shm);
 
 	return ret;
 }
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index f98d7bf..4b378ea 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -318,6 +318,9 @@ static struct msm_soc_info cpu_of_id[] = {
 	/* Lito ID */
 	[400] = {MSM_CPU_LITO, "LITO"},
 
+	/* Bengal ID */
+	[417] = {MSM_CPU_BENGAL, "BENGAL"},
+
 	/* Uninitialized IDs are not known to run Linux.
 	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
 	 * considered as unknown CPU.
@@ -1185,6 +1188,10 @@ static void * __init setup_dummy_socinfo(void)
 		dummy_socinfo.id = 400;
 		strlcpy(dummy_socinfo.build_id, "lito - ",
 		sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_bengal()) {
+		dummy_socinfo.id = 417;
+		strlcpy(dummy_socinfo.build_id, "bengal - ",
+		sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_sdmshrike()) {
 		dummy_socinfo.id = 340;
 		strlcpy(dummy_socinfo.build_id, "sdmshrike - ",
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 11df576..c2b48dd 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -11,13 +11,14 @@
  *
  * It provides interface to userspace spcomlib.
  *
- * Userspace application shall use spcomlib for communication with SP. Userspace
- * application can be either client or server. spcomlib shall use write() file
- * operation to send data, and read() file operation to read data.
+ * Userspace application shall use spcomlib for communication with SP.
+ * Userspace application can be either client or server. spcomlib shall
+ * use write() file operation to send data, and read() file operation
+ * to read data.
  *
  * This driver uses RPMSG with glink-spss as a transport layer.
- * This driver exposes "/dev/<sp-channel-name>" file node for each rpmsg logical
- * channel.
+ * This driver exposes "/dev/<sp-channel-name>" file node for each rpmsg
+ * logical channel.
  * This driver exposes "/dev/spcom" file node for some debug/control command.
  * The predefined channel "/dev/sp_kernel" is used for loading SP application
  * from HLOS.
@@ -76,6 +77,9 @@
 /* SPCOM driver name */
 #define DEVICE_NAME	"spcom"
 
+/* maximum clients that can register over a single channel */
+#define SPCOM_MAX_CHANNEL_CLIENTS 2
+
 /* maximum ION buffers should be >= SPCOM_MAX_CHANNELS  */
 #define SPCOM_MAX_ION_BUF_PER_CH (SPCOM_MAX_CHANNELS + 4)
 
@@ -161,11 +165,16 @@ struct spcom_channel {
 	struct completion rx_done;
 	struct completion connect;
 
-	/*
-	 * Only one client or server per channel.
-	 * Only one rx/tx transaction at a time (request + response).
+	/**
+	 * Only one client or server per non-sharable channel       .
+	 * SPCOM_MAX_CHANNEL_CLIENTS clients for sharable channel
+	 * Only one tx-rx transaction at a time (request + response).
 	 */
 	bool is_busy;
+	bool is_sharable;              /* channel's sharable property   */
+	u32 active_pid;                /* current tx-rx transaction pid */
+	uint8_t num_clients;           /* current number of clients     */
+	struct mutex shared_sync_lock;
 
 	u32 pid; /* debug only to find user space application */
 
@@ -239,7 +248,7 @@ static u32 spcom_soc2sp_rmb_reg_addr;
 static u32 spcom_soc2sp_rmb_sp_ssr_mask;
 
 /* static functions declaration */
-static int spcom_create_channel_chardev(const char *name);
+static int spcom_create_channel_chardev(const char *name, bool is_sharable);
 static struct spcom_channel *spcom_find_channel_by_name(const char *name);
 static int spcom_register_rpmsg_drv(struct spcom_channel *ch);
 static int spcom_unregister_rpmsg_drv(struct spcom_channel *ch);
@@ -288,7 +297,7 @@ static int spcom_create_predefined_channels_chardev(void)
 
 		if (name[0] == 0)
 			break;
-		ret = spcom_create_channel_chardev(name);
+		ret = spcom_create_channel_chardev(name, false);
 		if (ret) {
 			pr_err("failed to create chardev [%s], ret [%d]\n",
 			       name, ret);
@@ -309,9 +318,12 @@ static int spcom_create_predefined_channels_chardev(void)
  * spcom_init_channel() - initialize channel state.
  *
  * @ch: channel state struct pointer
+ * @is_sharable: whether channel is sharable
  * @name: channel name
  */
-static int spcom_init_channel(struct spcom_channel *ch, const char *name)
+static int spcom_init_channel(struct spcom_channel *ch,
+			      bool is_sharable,
+			      const char *name)
 {
 	if (!ch || !name || !name[0]) {
 		pr_err("invalid parameters\n");
@@ -333,7 +345,10 @@ static int spcom_init_channel(struct spcom_channel *ch, const char *name)
 	ch->rpmsg_abort = false;
 	ch->rpmsg_rx_buf = NULL;
 	ch->comm_role_undefined = true;
-
+	ch->is_sharable = is_sharable;
+	ch->active_pid = 0;
+	ch->num_clients = 0;
+	mutex_init(&ch->shared_sync_lock);
 	return 0;
 }
 
@@ -529,7 +544,6 @@ static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
 {
 	int ret = 0;
 	struct spcom_user_create_channel_command *cmd = cmd_buf;
-	const char *ch_name;
 	const size_t maxlen = sizeof(cmd->ch_name);
 
 	if (cmd_size != sizeof(*cmd)) {
@@ -538,15 +552,14 @@ static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
 		return -EINVAL;
 	}
 
-	ch_name = cmd->ch_name;
 	if (strnlen(cmd->ch_name, maxlen) == maxlen) {
 		pr_err("channel name is not NULL terminated\n");
 		return -EINVAL;
 	}
 
-	pr_debug("ch_name [%s]\n", ch_name);
+	pr_debug("ch_name [%s]\n", cmd->ch_name);
 
-	ret = spcom_create_channel_chardev(ch_name);
+	ret = spcom_create_channel_chardev(cmd->ch_name, cmd->is_sharable);
 
 	return ret;
 }
@@ -618,7 +631,7 @@ static int spcom_handle_restart_sp_command(void *cmd_buf, int cmd_size)
 		}
 
 		pr_debug("restart - Name: %s FW name: %s Depends on: %s\n",
-			desc_p->name, desc_p->fw_name, desc_p->depends_on);
+			desc_p->name, desc_p->fw_name, desc_p->pon_depends_on);
 		desc_powerup = desc_p->powerup;
 		/**
 		 * Overwrite the subsys PIL powerup function with an spcom
@@ -1151,9 +1164,21 @@ static int spcom_handle_write(struct spcom_channel *ch,
 
 	switch (cmd_id) {
 	case SPCOM_CMD_SEND:
+		if (ch->is_sharable) {
+			/* Channel shared, mutex protect TxRx */
+			mutex_lock(&ch->shared_sync_lock);
+			/* pid indicates the current active ch */
+			ch->active_pid = current_pid();
+		}
 		ret = spcom_handle_send_command(ch, buf, buf_size);
 		break;
 	case SPCOM_CMD_SEND_MODIFIED:
+		if (ch->is_sharable) {
+			/* Channel shared, mutex protect TxRx */
+			mutex_lock(&ch->shared_sync_lock);
+			/* pid indicates the current active ch */
+			ch->active_pid = current_pid();
+		}
 		ret = spcom_handle_send_modified_command(ch, buf, buf_size);
 		break;
 	case SPCOM_CMD_LOCK_ION_BUF:
@@ -1398,15 +1423,37 @@ static int spcom_device_open(struct inode *inode, struct file *filp)
 			return ret;
 		}
 	}
-	/* only one client/server may use the channel */
+	/* max number of channel clients reached */
 	if (ch->is_busy) {
-		pr_err("channel [%s] is BUSY, already in use by pid [%d]\n",
-			name, ch->pid);
+		pr_err("channel [%s] is BUSY and has %d of clients, already in use by pid [%d]\n",
+			name, ch->num_clients, ch->pid);
 		mutex_unlock(&ch->lock);
 		return -EBUSY;
 	}
 
-	ch->is_busy = true;
+	/*
+	 * if same active client trying to register again, this will fail.
+	 * Note: in the case of shared channel and SPCOM_MAX_CHANNEL_CLIENTS > 2
+	 * It possible to register with same pid if you are not the current
+	 * active client
+	 */
+	if (ch->pid == pid) {
+		pr_err("client is already registered with channel[%s]\n", name);
+		return -EINVAL;
+	}
+
+	if (ch->is_sharable) {
+		ch->num_clients++;
+		if (ch->num_clients >= SPCOM_MAX_CHANNEL_CLIENTS)
+			ch->is_busy = true;
+		else
+			ch->is_busy = false;
+	} else {
+		ch->num_clients = 1;
+		ch->is_busy = true;
+	}
+
+	/* pid has the last registed client's pid */
 	ch->pid = pid;
 	mutex_unlock(&ch->lock);
 
@@ -1460,8 +1507,31 @@ static int spcom_device_release(struct inode *inode, struct file *filp)
 		return 0;
 	}
 
+	if (ch->num_clients > 1) {
+		/*
+		 * Shared client is trying to close channel,
+		 * release the sync_lock if applicable
+		 */
+		if (ch->active_pid == current_pid()) {
+			pr_debug("active_pid [%x] is releasing ch [%s] sync lock\n",
+				 ch->active_pid, name);
+			mutex_unlock(&ch->shared_sync_lock);
+			/* No longer the current active user of the channel */
+			ch->active_pid = 0;
+		}
+		ch->num_clients--;
+		ch->is_busy = false;
+		if (ch->num_clients > 0) {
+			mutex_unlock(&ch->lock);
+			return 0;
+		}
+	}
+
 	ch->is_busy = false;
 	ch->pid = 0;
+	ch->num_clients = 0;
+	ch->active_pid = 0;
+
 	if (ch->rpmsg_rx_buf) {
 		pr_debug("ch [%s] discarting unconsumed rx packet actual_rx_size=%zd\n",
 		       name, ch->actual_rx_size);
@@ -1471,7 +1541,6 @@ static int spcom_device_release(struct inode *inode, struct file *filp)
 	ch->actual_rx_size = 0;
 	mutex_unlock(&ch->lock);
 	filp->private_data = NULL;
-
 	return ret;
 }
 
@@ -1515,6 +1584,12 @@ static ssize_t spcom_device_write(struct file *filp,
 		return -EINVAL;
 	}
 
+	if (size > SPCOM_MAX_COMMAND_SIZE) {
+		pr_err("size [%d] > max size [%d]\n",
+			   (int) size, (int) SPCOM_MAX_COMMAND_SIZE);
+		return -EINVAL;
+	}
+
 	ch = filp->private_data;
 	if (!ch) {
 		if (strcmp(name, DEVICE_NAME) != 0) {
@@ -1529,12 +1604,6 @@ static ssize_t spcom_device_write(struct file *filp,
 			return -ENOTCONN;
 		}
 	}
-
-	if (size > SPCOM_MAX_COMMAND_SIZE) {
-		pr_err("size [%d] > max size [%d]\n",
-			   (int) size, (int) SPCOM_MAX_COMMAND_SIZE);
-		return -EINVAL;
-	}
 	buf_size = size; /* explicit casting size_t to int */
 	buf = kzalloc(size, GFP_KERNEL);
 	if (buf == NULL)
@@ -1551,6 +1620,10 @@ static ssize_t spcom_device_write(struct file *filp,
 	if (ret) {
 		pr_err("handle command error [%d]\n", ret);
 		kfree(buf);
+		if (ch && ch->active_pid == current_pid()) {
+			mutex_unlock(&ch->shared_sync_lock);
+			ch->active_pid = 0;
+		}
 		return ret;
 	}
 
@@ -1576,6 +1649,7 @@ static ssize_t spcom_device_read(struct file *filp, char __user *user_buff,
 	struct spcom_channel *ch;
 	const char *name = file_to_filename(filp);
 	uint32_t buf_size = 0;
+	u32 cur_pid = current_pid();
 
 	pr_debug("read file [%s], size = %d bytes\n", name, (int) size);
 
@@ -1604,34 +1678,47 @@ static ssize_t spcom_device_read(struct file *filp, char __user *user_buff,
 	}
 
 	buf = kzalloc(size, GFP_KERNEL);
-	if (buf == NULL)
-		return -ENOMEM;
+	if (buf == NULL) {
+		ret =  -ENOMEM;
+		goto exit_err;
+	}
 
 	ret = spcom_handle_read(ch, buf, buf_size);
 	if (ret < 0) {
 		if (ret != -ERESTARTSYS)
 			pr_err("read error [%d]\n", ret);
-		kfree(buf);
-		return ret;
+		goto exit_err;
 	}
 	actual_size = ret;
 	if ((actual_size == 0) || (actual_size > size)) {
 		pr_err("invalid actual_size [%d]\n", actual_size);
-		kfree(buf);
-		return -EFAULT;
+		ret = -EFAULT;
+		goto exit_err;
 	}
 
 	ret = copy_to_user(user_buff, buf, actual_size);
 	if (ret) {
 		pr_err("Unable to copy to user, err = %d\n", ret);
-		kfree(buf);
-		return -EFAULT;
+		ret = -EFAULT;
+		goto exit_err;
 	}
 
 	kfree(buf);
 	pr_debug("ch [%s] ret [%d]\n", name, (int) actual_size);
 
+	if (ch->active_pid == cur_pid) {
+		mutex_unlock(&ch->shared_sync_lock);
+		ch->active_pid = 0;
+	}
 	return actual_size;
+
+exit_err:
+	kfree(buf);
+	if (ch->active_pid == cur_pid) {
+		mutex_unlock(&ch->shared_sync_lock);
+		ch->active_pid = 0;
+	}
+	return ret;
 }
 
 static inline int handle_poll(struct file *file,
@@ -1763,7 +1850,7 @@ static const struct file_operations fops = {
  * spcom_create_channel_chardev() - Create a channel char-dev node file
  * for user space interface
  */
-static int spcom_create_channel_chardev(const char *name)
+static int spcom_create_channel_chardev(const char *name, bool is_sharable)
 {
 	int ret;
 	struct device *dev;
@@ -1779,7 +1866,7 @@ static int spcom_create_channel_chardev(const char *name)
 	ch = spcom_find_channel_by_name(name);
 	if (ch) {
 		pr_err("channel [%s] already exist\n", name);
-		return -EINVAL;
+		return -EBUSY;
 	}
 
 	ch = spcom_find_channel_by_name(""); /* find reserved channel */
@@ -1788,7 +1875,7 @@ static int spcom_create_channel_chardev(const char *name)
 		return -ENODEV;
 	}
 
-	ret = spcom_init_channel(ch, name);
+	ret = spcom_init_channel(ch, is_sharable, name);
 	if (ret < 0) {
 		pr_err("can't init channel %d\n", ret);
 		return ret;
diff --git a/drivers/soc/qcom/spss_utils.c b/drivers/soc/qcom/spss_utils.c
index 0f33a44..af8c225 100644
--- a/drivers/soc/qcom/spss_utils.c
+++ b/drivers/soc/qcom/spss_utils.c
@@ -29,11 +29,19 @@
 #include <linux/platform_device.h> /* platform_driver_register() */
 #include <linux/of.h>       /* of_property_count_strings() */
 #include <linux/io.h>       /* ioremap_nocache() */
+#include <linux/notifier.h>
+#include <linux/sizes.h>    /* SZ_4K */
+#include <linux/uaccess.h>  /* copy_from_user() */
 
+#include <soc/qcom/subsystem_notif.h>
 #include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/secure_buffer.h>     /* VMID_HLOS */
+
+#include <uapi/linux/ioctl.h>       /* ioctl() */
+#include <uapi/linux/spss_utils.h>  /* IOCTL to user space */
 
 /* driver name */
-#define DEVICE_NAME	"spss-utils"
+#define DEVICE_NAME	"spss_utils"
 
 enum spss_firmware_type {
 	SPSS_FW_TYPE_DEV = 'd',
@@ -51,10 +59,41 @@ static const char *firmware_name = "NA";
 static struct device *spss_dev;
 static u32 spss_debug_reg_addr; /* SP_SCSR_MBn_SP2CL_GPm(n,m) */
 static u32 spss_emul_type_reg_addr; /* TCSR_SOC_EMULATION_TYPE */
+static void *iar_notif_handle;
+static struct notifier_block *iar_nb;
+
+#define CMAC_SIZE_IN_BYTES (128/8) /* 128 bit */
+
+static u32 pil_addr;
+static u32 pil_size;
+static u32 cmac_buf[CMAC_SIZE_IN_BYTES/sizeof(u32)]; /* saved cmac */
+static u32 pbl_cmac_buf[CMAC_SIZE_IN_BYTES/sizeof(u32)]; /* pbl cmac */
+static u32 iar_state;
+static bool is_iar_enabled;
+static bool is_pbl_ce; /* Did SPU PBL performed Cryptographic Erase (CE) */
+
+static void __iomem *cmac_mem;
+static size_t cmac_mem_size = SZ_4K; /* XPU align to 4KB */
+static phys_addr_t cmac_mem_addr;
 
 #define SPU_EMULATUION (BIT(0) | BIT(1))
 #define SPU_PRESENT_IN_EMULATION BIT(2)
 
+/**
+ * struct device state
+ */
+struct spss_utils_device {
+	/* char device info */
+	struct cdev *cdev;
+	dev_t device_no;
+	struct class *driver_class;
+	struct device *class_dev;
+	struct platform_device *pdev;
+};
+
+/* Device State */
+static struct spss_utils_device *spss_utils_dev;
+
 /*==========================================================================*/
 /*		Device Sysfs */
 /*==========================================================================*/
@@ -65,6 +104,11 @@ static ssize_t firmware_name_show(struct device *dev,
 {
 	int ret;
 
+	if (!dev || !attr || !buf) {
+		pr_err("invalid param.\n");
+		return -EINVAL;
+	}
+
 	if (firmware_name == NULL)
 		ret = scnprintf(buf, PAGE_SIZE, "%s\n", "unknown");
 	else
@@ -81,6 +125,11 @@ static ssize_t test_fuse_state_show(struct device *dev,
 {
 	int ret;
 
+	if (!dev || !attr || !buf) {
+		pr_err("invalid param.\n");
+		return -EINVAL;
+	}
+
 	switch (firmware_type) {
 	case SPSS_FW_TYPE_DEV:
 		ret = scnprintf(buf, PAGE_SIZE, "%s", "dev");
@@ -108,7 +157,12 @@ static ssize_t spss_debug_reg_show(struct device *dev,
 	void __iomem *spss_debug_reg = NULL;
 	u32 val1, val2;
 
-	pr_debug("spss_debug_reg_addr [0x%x]\n", spss_debug_reg_addr);
+	if (!dev || !attr || !buf) {
+		pr_err("invalid param.\n");
+		return -EINVAL;
+	}
+
+	pr_debug("spss_debug_reg_addr [0x%x].\n", spss_debug_reg_addr);
 
 	spss_debug_reg = ioremap_nocache(spss_debug_reg_addr, sizeof(u32)*2);
 
@@ -130,32 +184,287 @@ static ssize_t spss_debug_reg_show(struct device *dev,
 
 static DEVICE_ATTR_RO(spss_debug_reg);
 
+static ssize_t cmac_buf_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret = 0;
+
+	if (!dev || !attr || !buf) {
+		pr_err("invalid param.\n");
+		return -EINVAL;
+	}
+
+	ret = snprintf(buf, PAGE_SIZE, "0x%x,0x%x,0x%x,0x%x\n",
+		cmac_buf[0], cmac_buf[1], cmac_buf[2], cmac_buf[3]);
+
+	return ret;
+}
+
+static DEVICE_ATTR_RO(cmac_buf);
+
+static ssize_t iar_state_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret = 0;
+
+	if (!dev || !attr || !buf) {
+		pr_err("invalid param.\n");
+		return -EINVAL;
+	}
+
+	/* show IAR-STATE from soc fuse */
+	ret = snprintf(buf, PAGE_SIZE, "0x%x\n", iar_state);
+
+	return ret;
+}
+
+static DEVICE_ATTR_RO(iar_state);
+
+static ssize_t iar_enabled_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret = 0;
+
+	if (!dev || !attr || !buf) {
+		pr_err("invalid param.\n");
+		return -EINVAL;
+	}
+
+	ret = snprintf(buf, PAGE_SIZE, "0x%x\n", is_iar_enabled);
+
+	return ret;
+}
+
+static DEVICE_ATTR_RO(iar_enabled);
+
+static ssize_t pbl_ce_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret = 0;
+
+	if (!dev || !attr || !buf) {
+		pr_err("invalid param.\n");
+		return -EINVAL;
+	}
+
+	ret = snprintf(buf, PAGE_SIZE, "0x%x\n", is_pbl_ce);
+
+	return ret;
+}
+
+static DEVICE_ATTR_RO(pbl_ce);
+
+static ssize_t pbl_cmac_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret = 0;
+
+	if (!dev || !attr || !buf) {
+		pr_err("invalid param.\n");
+		return -EINVAL;
+	}
+
+	ret = snprintf(buf, PAGE_SIZE, "0x%x,0x%x,0x%x,0x%x\n",
+	    pbl_cmac_buf[0], pbl_cmac_buf[1], pbl_cmac_buf[2], pbl_cmac_buf[3]);
+
+	return ret;
+}
+
+static DEVICE_ATTR_RO(pbl_cmac);
+
+/*--------------------------------------------------------------------------*/
 static int spss_create_sysfs(struct device *dev)
 {
 	int ret;
 
 	ret = device_create_file(dev, &dev_attr_firmware_name);
 	if (ret < 0) {
-		pr_err("failed to create sysfs file for firmware_name\n");
+		pr_err("failed to create sysfs file for firmware_name.\n");
 		return ret;
 	}
 
 	ret = device_create_file(dev, &dev_attr_test_fuse_state);
 	if (ret < 0) {
-		pr_err("failed to create sysfs file for test_fuse_state\n");
-		device_remove_file(dev, &dev_attr_firmware_name);
-		return ret;
+		pr_err("failed to create sysfs file for test_fuse_state.\n");
+		goto remove_firmware_name;
 	}
 
 	ret = device_create_file(dev, &dev_attr_spss_debug_reg);
 	if (ret < 0) {
-		pr_err("failed to create sysfs file for spss_debug_reg\n");
-		device_remove_file(dev, &dev_attr_firmware_name);
-		device_remove_file(dev, &dev_attr_test_fuse_state);
-		return ret;
+		pr_err("failed to create sysfs file for spss_debug_reg.\n");
+		goto remove_test_fuse_state;
+	}
+
+	ret = device_create_file(dev, &dev_attr_cmac_buf);
+	if (ret < 0) {
+		pr_err("failed to create sysfs file for cmac_buf.\n");
+		goto remove_spss_debug_reg;
+	}
+
+	ret = device_create_file(dev, &dev_attr_iar_state);
+	if (ret < 0) {
+		pr_err("failed to create sysfs file for iar_state.\n");
+		goto remove_cmac_buf;
+	}
+
+	ret = device_create_file(dev, &dev_attr_iar_enabled);
+	if (ret < 0) {
+		pr_err("failed to create sysfs file for iar_enabled.\n");
+		goto remove_iar_state;
+	}
+
+	ret = device_create_file(dev, &dev_attr_pbl_ce);
+	if (ret < 0) {
+		pr_err("failed to create sysfs file for pbl_ce.\n");
+		goto remove_iar_enabled;
+	}
+
+	ret = device_create_file(dev, &dev_attr_pbl_cmac);
+	if (ret < 0) {
+		pr_err("failed to create sysfs file for pbl_cmac.\n");
+		goto remove_pbl_ce;
 	}
 
 	return 0;
+
+remove_pbl_ce:
+		device_remove_file(dev, &dev_attr_pbl_ce);
+remove_iar_enabled:
+		device_remove_file(dev, &dev_attr_iar_enabled);
+remove_iar_state:
+		device_remove_file(dev, &dev_attr_iar_state);
+remove_cmac_buf:
+		device_remove_file(dev, &dev_attr_cmac_buf);
+remove_spss_debug_reg:
+		device_remove_file(dev, &dev_attr_spss_debug_reg);
+remove_test_fuse_state:
+		device_remove_file(dev, &dev_attr_test_fuse_state);
+remove_firmware_name:
+		device_remove_file(dev, &dev_attr_firmware_name);
+
+	return ret;
+}
+
+/*==========================================================================*/
+/*  IOCTL */
+/*==========================================================================*/
+static long spss_utils_ioctl(struct file *file,
+	unsigned int cmd, unsigned long arg)
+{
+	void *buf = (void *) arg;
+	unsigned char data[64] = {0};
+	size_t size = 0;
+
+	if (buf == NULL) {
+		pr_err("invalid ioctl arg\n");
+		return -EINVAL;
+	}
+
+	size = _IOC_SIZE(cmd);
+	if (size && (cmd & IOC_IN)) {
+		if (size > sizeof(data)) {
+			pr_err("cmd [0x%x] size [0x%x] too large\n",
+				cmd, size);
+			return -EINVAL;
+		}
+
+		if (copy_from_user(data, (void __user *)arg, size)) {
+			pr_err("copy_from_user() failed, cmd [0x%x]\n",
+				cmd, size);
+			return -EFAULT;
+		}
+	}
+
+	switch (cmd) {
+	case SPSS_IOC_SET_FW_CMAC:
+		if (size != sizeof(cmac_buf)) {
+			pr_err("cmd [0x%x] invalid size [0x%x]\n", cmd, size);
+			return -EINVAL;
+		}
+
+		memcpy(cmac_buf, data, sizeof(cmac_buf));
+		pr_info("cmac_buf: 0x%x,0x%x,0x%x,0x%x\n",
+			cmac_buf[0], cmac_buf[1], cmac_buf[2], cmac_buf[3]);
+		break;
+
+	default:
+		pr_err("invalid ioctl cmd [0x%x]\n", cmd);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct file_operations spss_utils_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = spss_utils_ioctl,
+	.compat_ioctl = spss_utils_ioctl,
+};
+
+static int spss_utils_create_chardev(struct device *dev)
+{
+	int ret;
+	unsigned int baseminor = 0;
+	unsigned int count = 1;
+	void *priv = (void *) spss_utils_dev;
+
+	spss_utils_dev->cdev =
+		kzalloc(sizeof(*spss_utils_dev->cdev), GFP_KERNEL);
+	if (!spss_utils_dev->cdev)
+		return -ENOMEM;
+
+	/* get device_no */
+	ret = alloc_chrdev_region(&spss_utils_dev->device_no, baseminor, count,
+				 DEVICE_NAME);
+	if (ret < 0) {
+		pr_err("alloc_chrdev_region failed %d\n", ret);
+		return ret;
+	}
+
+	spss_utils_dev->driver_class = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(spss_utils_dev->driver_class)) {
+		ret = -ENOMEM;
+		pr_err("class_create failed %d\n", ret);
+		goto exit_unreg_chrdev_region;
+	}
+
+	spss_utils_dev->class_dev =
+	    device_create(spss_utils_dev->driver_class, NULL,
+				  spss_utils_dev->device_no, priv,
+				  DEVICE_NAME);
+
+	if (IS_ERR(spss_utils_dev->class_dev)) {
+		pr_err("class_device_create failed %d\n", ret);
+		ret = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	cdev_init(spss_utils_dev->cdev, &spss_utils_fops);
+	spss_utils_dev->cdev->owner = THIS_MODULE;
+
+	ret = cdev_add(spss_utils_dev->cdev,
+		       MKDEV(MAJOR(spss_utils_dev->device_no), 0),
+		       1);
+	if (ret < 0) {
+		pr_err("cdev_add failed %d\n", ret);
+		goto exit_destroy_device;
+	}
+
+	pr_debug("char device created.\n");
+	return 0;
+
+exit_destroy_device:
+	device_destroy(spss_utils_dev->driver_class, spss_utils_dev->device_no);
+exit_destroy_class:
+	class_destroy(spss_utils_dev->driver_class);
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(spss_utils_dev->device_no, 1);
+	return ret;
 }
 
 /*==========================================================================*/
@@ -176,6 +485,17 @@ static int spss_parse_dt(struct device_node *node)
 	u32 spss_fuse2_bit = 0;
 	u32 spss_fuse2_mask = 0;
 	void __iomem *spss_fuse2_reg = NULL;
+	/* IAR_FEATURE_ENABLED soc fuse */
+	u32 spss_fuse3_addr = 0;
+	u32 spss_fuse3_bit = 0;
+	u32 spss_fuse3_mask = 0;
+	void __iomem *spss_fuse3_reg = NULL;
+	/* IAR_STATE soc fuses */
+	u32 spss_fuse4_addr = 0;
+	u32 spss_fuse4_bit = 0;
+	u32 spss_fuse4_mask = 0;
+	void __iomem *spss_fuse4_reg = NULL;
+
 	u32 val1 = 0;
 	u32 val2 = 0;
 	void __iomem *spss_emul_type_reg = NULL;
@@ -310,9 +630,202 @@ static int spss_parse_dt(struct device_node *node)
 	}
 	iounmap(spss_emul_type_reg);
 
+	/* PIL-SPSS area */
+	ret = of_property_read_u32(node, "qcom,pil-addr",
+			     &pil_addr);
+	if (ret < 0) {
+		pr_err("can't get pil_addr\n");
+		return -EFAULT;
+	}
+	ret = of_property_read_u32(node, "qcom,pil-size",
+			     &pil_size);
+	if (ret < 0) {
+		pr_err("can't get pil_size\n");
+		return -EFAULT;
+	}
+
+	pr_info("pil_addr [0x%x].\n", pil_addr);
+	pr_info("pil_size [0x%x].\n", pil_size);
+
+	/* cmac buffer after spss firmware end */
+	cmac_mem_addr = pil_addr + pil_size;
+
+	ret = of_property_read_u32(node, "qcom,spss-fuse3-addr",
+		&spss_fuse3_addr);
+	if (ret < 0) {
+		pr_err("can't get fuse3 addr.\n");
+		return -EFAULT;
+	}
+
+	ret = of_property_read_u32(node, "qcom,spss-fuse3-bit",
+		&spss_fuse3_bit);
+	if (ret < 0) {
+		pr_err("can't get fuse3 bit.\n");
+		return -EFAULT;
+	}
+
+	spss_fuse3_reg = ioremap_nocache(spss_fuse3_addr, sizeof(u32));
+
+	if (!spss_fuse3_reg) {
+		pr_err("can't map fuse3 addr.\n");
+		return -EFAULT;
+	}
+
+	/* read IAR_FEATURE_ENABLED from soc fuse */
+	val1 = readl_relaxed(spss_fuse3_reg);
+	spss_fuse3_mask = (1<<spss_fuse3_bit);
+	pr_info("iar_enabled fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
+		spss_fuse3_addr, val1, spss_fuse3_mask);
+	if (val1 & spss_fuse3_mask)
+		is_iar_enabled = true;
+	else
+		is_iar_enabled = false;
+
+	memset(cmac_buf, 0xA5, sizeof(cmac_buf));
+
+	ret = of_property_read_u32(node, "qcom,spss-fuse4-addr",
+		&spss_fuse4_addr);
+	if (ret < 0) {
+		pr_err("can't get fuse4 addr.\n");
+		return -EFAULT;
+	}
+
+	ret = of_property_read_u32(node, "qcom,spss-fuse4-bit",
+		&spss_fuse4_bit);
+	if (ret < 0) {
+		pr_err("can't get fuse4 bit.\n");
+		return -EFAULT;
+	}
+
+	spss_fuse4_reg = ioremap_nocache(spss_fuse4_addr, sizeof(u32));
+
+	if (!spss_fuse4_reg) {
+		pr_err("can't map fuse4 addr.\n");
+		return -EFAULT;
+	}
+
+	val1 = readl_relaxed(spss_fuse4_reg);
+	spss_fuse4_mask = (0x07 << spss_fuse4_bit); /* 3 bits */
+	pr_info("IAR_STATE fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
+	spss_fuse4_addr, val1, spss_fuse4_mask);
+	val1 = ((val1 & spss_fuse4_mask) >> spss_fuse4_bit) & 0x07;
+
+	iar_state = val1;
+
+	pr_info("iar_state [%d]\n", iar_state);
+
 	return 0;
 }
 
+static int spss_assign_mem_to_spss_and_hlos(phys_addr_t addr, size_t size)
+{
+	int ret;
+	int srcVM[1] = {VMID_HLOS};
+	int destVM[2] = {VMID_HLOS, VMID_CP_SPSS_HLOS_SHARED};
+	int destVMperm[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
+
+	ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 2);
+	if (ret)
+		pr_err("hyp_assign_phys() failed, addr [%pa] size [%zx] ret [%d]\n",
+				&addr, size, ret);
+
+	return ret;
+}
+
+static int spss_set_fw_cmac(u32 *cmac, size_t cmac_size)
+{
+	int ret;
+	u8 __iomem *reg = NULL;
+	int i;
+
+	if (cmac_mem == NULL) {
+		cmac_mem = ioremap_nocache(cmac_mem_addr, cmac_mem_size);
+		if (!cmac_mem) {
+			pr_err("can't map cmac_mem.\n");
+			return -EFAULT;
+		}
+	}
+
+	ret = spss_assign_mem_to_spss_and_hlos(cmac_mem_addr, cmac_mem_size);
+	if (ret)
+		return ret;
+
+	pr_debug("pil_addr [0x%x]\n", pil_addr);
+	pr_debug("pil_size [0x%x]\n", pil_size);
+	pr_debug("cmac_mem [%pK]\n", cmac_mem);
+	reg = cmac_mem;
+	pr_debug("reg [%pK]\n", reg);
+
+	for (i = 0; i < cmac_size/4; i++) {
+		writel_relaxed(cmac[i], reg + i*sizeof(u32));
+		pr_debug("cmac[%d] [0x%x]\n", i, cmac[i]);
+	}
+	reg += cmac_size;
+
+	for (i = 0; i < cmac_size/4; i++)
+		writel_relaxed(0, reg + i*sizeof(u32));
+
+	return 0;
+}
+
+static int spss_get_pbl_calc_cmac(u32 *cmac, size_t cmac_size)
+{
+	u8 __iomem *reg = NULL;
+	int i;
+	u32 val;
+
+	if (cmac_mem == NULL)
+		return -EFAULT;
+
+	/* PBL calculated cmac after HLOS expected cmac */
+	reg = cmac_mem + cmac_size;
+	pr_debug("reg [%pK]\n", reg);
+
+	for (i = 0; i < cmac_size/4; i++) {
+		val = readl_relaxed(reg + i*sizeof(u32));
+		cmac[i] = val;
+		pr_debug("cmac[%d] [0x%x]\n", (int) i, (int) val);
+	}
+
+	return 0;
+}
+
+static int spss_utils_iar_callback(struct notifier_block *nb,
+				  unsigned long code,
+				  void *data)
+{
+
+	switch (code) {
+	case SUBSYS_BEFORE_SHUTDOWN:
+		pr_debug("[SUBSYS_BEFORE_SHUTDOWN] event.\n");
+		break;
+	case SUBSYS_AFTER_SHUTDOWN:
+		pr_debug("[SUBSYS_AFTER_SHUTDOWN] event.\n");
+		break;
+	case SUBSYS_BEFORE_POWERUP:
+		pr_debug("[SUBSYS_BEFORE_POWERUP] event.\n");
+		break;
+	case SUBSYS_AFTER_POWERUP:
+		pr_debug("[SUBSYS_AFTER_POWERUP] event.\n");
+		spss_get_pbl_calc_cmac(pbl_cmac_buf, sizeof(pbl_cmac_buf));
+		if (memcmp(cmac_buf, pbl_cmac_buf, sizeof(cmac_buf)) != 0)
+			is_pbl_ce = true; /* cmacs not the same */
+		else
+			is_pbl_ce = false;
+		break;
+	case SUBSYS_BEFORE_AUTH_AND_RESET:
+		pr_debug("[SUBSYS_BEFORE_AUTH_AND_RESET] event.\n");
+		spss_set_fw_cmac(cmac_buf, sizeof(cmac_buf));
+		break;
+	default:
+		pr_err("unknown code [0x%x] .\n", (int) code);
+		break;
+
+	}
+
+	return NOTIFY_OK;
+}
+
 /**
  * spss_probe() - initialization sequence
  */
@@ -322,9 +835,29 @@ static int spss_probe(struct platform_device *pdev)
 	struct device_node *np = NULL;
 	struct device *dev = NULL;
 
+	if (!pdev) {
+		pr_err("invalid pdev.\n");
+		return -ENODEV;
+	}
+
 	np = pdev->dev.of_node;
+	if (!np) {
+		pr_err("invalid DT node.\n");
+		return -EINVAL;
+	}
+
+	spss_utils_dev = kzalloc(sizeof(*spss_utils_dev), GFP_KERNEL);
+	if (spss_utils_dev == NULL)
+		return -ENOMEM;
+
 	dev = &pdev->dev;
 	spss_dev = dev;
+
+	if (dev == NULL) {
+		pr_err("invalid dev.\n");
+		return -EINVAL;
+	}
+
 	platform_set_drvdata(pdev, dev);
 
 	ret = spss_parse_dt(np);
@@ -356,10 +889,29 @@ static int spss_probe(struct platform_device *pdev)
 		return -EINVAL;
 	}
 
+	ret = spss_utils_create_chardev(dev);
+	if (ret < 0)
+		return ret;
+
 	ret = spss_create_sysfs(dev);
 	if (ret < 0)
 		return ret;
 
+	pr_info("Initialization completed ok, firmware_name [%s].\n",
+		firmware_name);
+
+	iar_nb = kzalloc(sizeof(*iar_nb), GFP_KERNEL);
+	if (!iar_nb)
+		return -ENOMEM;
+
+	iar_nb->notifier_call = spss_utils_iar_callback;
+
+	iar_notif_handle = subsys_notif_register_notifier("spss", iar_nb);
+	if (IS_ERR_OR_NULL(iar_notif_handle)) {
+		pr_err("register fail for IAR notifier\n");
+		kfree(iar_nb);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index f9ac0a7..2d0c3b8 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -956,7 +956,7 @@ void *__subsystem_get(const char *name, const char *fw_name)
 		goto err_module;
 	}
 
-	subsys_d = subsystem_get(subsys->desc->depends_on);
+	subsys_d = subsystem_get(subsys->desc->pon_depends_on);
 	if (IS_ERR(subsys_d)) {
 		retval = subsys_d;
 		goto err_depends;
@@ -1035,6 +1035,10 @@ void subsystem_put(void *subsystem)
 	if (IS_ERR_OR_NULL(subsys))
 		return;
 
+	subsys_d = find_subsys_device(subsys->desc->poff_depends_on);
+	if (subsys_d)
+		subsystem_put(subsys_d);
+
 	track = subsys_get_track(subsys);
 	mutex_lock(&track->lock);
 	if (WARN(!subsys->count, "%s: %s: Reference count mismatch\n",
@@ -1048,11 +1052,6 @@ void subsystem_put(void *subsystem)
 	}
 	mutex_unlock(&track->lock);
 
-	subsys_d = find_subsys_device(subsys->desc->depends_on);
-	if (subsys_d) {
-		subsystem_put(subsys_d);
-		put_device(&subsys_d->dev);
-	}
 	module_put(subsys->owner);
 	put_device(&subsys->dev);
 	return;
@@ -1654,6 +1653,14 @@ static int subsys_parse_devicetree(struct subsys_desc *desc)
 		return PTR_ERR(order);
 	}
 
+	if (of_property_read_string(pdev->dev.of_node, "qcom,pon-depends-on",
+				&desc->pon_depends_on))
+		pr_debug("pon-depends-on not set for %s\n", desc->name);
+
+	if (of_property_read_string(pdev->dev.of_node, "qcom,poff-depends-on",
+				&desc->poff_depends_on))
+		pr_debug("poff-depends-on not set for %s\n", desc->name);
+
 	return 0;
 }
 
diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig
index 353b07e..e84eb4e 100644
--- a/drivers/soc/sunxi/Kconfig
+++ b/drivers/soc/sunxi/Kconfig
@@ -4,6 +4,7 @@
 config SUNXI_SRAM
 	bool
 	default ARCH_SUNXI
+	select REGMAP_MMIO
 	help
 	  Say y here to enable the SRAM controller support. This
 	  device is responsible on mapping the SRAM in the sunXi SoCs
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 08dd3a3..5b6f365 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1427,7 +1427,7 @@ static int spi_imx_transfer(struct spi_device *spi,
 
 	/* flush rxfifo before transfer */
 	while (spi_imx->devtype_data->rx_available(spi_imx))
-		spi_imx->rx(spi_imx);
+		readl(spi_imx->base + MXC_CSPIRXDATA);
 
 	if (spi_imx->slave_mode)
 		return spi_imx_pio_transfer_slave(spi, transfer);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index b624f6f..729be74 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -876,10 +876,14 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
 
 	rate = min_t(int, ssp_clk, rate);
 
+	/*
+	 * Calculate the divisor for the SCR (Serial Clock Rate), avoiding
+	 * that the SSP transmission rate can be greater than the device rate
+	 */
 	if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
-		return (ssp_clk / (2 * rate) - 1) & 0xff;
+		return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
 	else
-		return (ssp_clk / rate - 1) & 0xfff;
+		return (DIV_ROUND_UP(ssp_clk, rate) - 1)  & 0xfff;
 }
 
 static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index b37de1d..d611208 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -279,7 +279,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
 	/* Sets parity, interrupt mask */
 	rspi_write8(rspi, 0x00, RSPI_SPCR2);
 
-	/* Sets SPCMD */
+	/* Resets sequencer */
+	rspi_write8(rspi, 0, RSPI_SPSCR);
 	rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
 	rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 
@@ -323,7 +324,8 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
 	rspi_write8(rspi, 0x00, RSPI_SSLND);
 	rspi_write8(rspi, 0x00, RSPI_SPND);
 
-	/* Sets SPCMD */
+	/* Resets sequencer */
+	rspi_write8(rspi, 0, RSPI_SPSCR);
 	rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
 	rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 
@@ -374,7 +376,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
 	/* Sets buffer to allow normal operation */
 	rspi_write8(rspi, 0x00, QSPI_SPBFCR);
 
-	/* Sets SPCMD */
+	/* Resets sequencer */
+	rspi_write8(rspi, 0, RSPI_SPSCR);
 	rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 
 	/* Sets RSPI mode */
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index a76aced..a1888dc 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -1067,27 +1067,19 @@ static int tegra_spi_probe(struct platform_device *pdev)
 
 	spi_irq = platform_get_irq(pdev, 0);
 	tspi->irq = spi_irq;
-	ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
-			tegra_spi_isr_thread, IRQF_ONESHOT,
-			dev_name(&pdev->dev), tspi);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
-					tspi->irq);
-		goto exit_free_master;
-	}
 
 	tspi->clk = devm_clk_get(&pdev->dev, "spi");
 	if (IS_ERR(tspi->clk)) {
 		dev_err(&pdev->dev, "can not get clock\n");
 		ret = PTR_ERR(tspi->clk);
-		goto exit_free_irq;
+		goto exit_free_master;
 	}
 
 	tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
 	if (IS_ERR(tspi->rst)) {
 		dev_err(&pdev->dev, "can not get reset\n");
 		ret = PTR_ERR(tspi->rst);
-		goto exit_free_irq;
+		goto exit_free_master;
 	}
 
 	tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
@@ -1095,7 +1087,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
 
 	ret = tegra_spi_init_dma_param(tspi, true);
 	if (ret < 0)
-		goto exit_free_irq;
+		goto exit_free_master;
 	ret = tegra_spi_init_dma_param(tspi, false);
 	if (ret < 0)
 		goto exit_rx_dma_free;
@@ -1117,18 +1109,32 @@ static int tegra_spi_probe(struct platform_device *pdev)
 		dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
 		goto exit_pm_disable;
 	}
+
+	reset_control_assert(tspi->rst);
+	udelay(2);
+	reset_control_deassert(tspi->rst);
 	tspi->def_command1_reg  = SPI_M_S;
 	tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
 	pm_runtime_put(&pdev->dev);
+	ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
+				   tegra_spi_isr_thread, IRQF_ONESHOT,
+				   dev_name(&pdev->dev), tspi);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+			tspi->irq);
+		goto exit_pm_disable;
+	}
 
 	master->dev.of_node = pdev->dev.of_node;
 	ret = devm_spi_register_master(&pdev->dev, master);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "can not register to master err %d\n", ret);
-		goto exit_pm_disable;
+		goto exit_free_irq;
 	}
 	return ret;
 
+exit_free_irq:
+	free_irq(spi_irq, tspi);
 exit_pm_disable:
 	pm_runtime_disable(&pdev->dev);
 	if (!pm_runtime_status_suspended(&pdev->dev))
@@ -1136,8 +1142,6 @@ static int tegra_spi_probe(struct platform_device *pdev)
 	tegra_spi_deinit_dma_param(tspi, false);
 exit_rx_dma_free:
 	tegra_spi_deinit_dma_param(tspi, true);
-exit_free_irq:
-	free_irq(spi_irq, tspi);
 exit_free_master:
 	spi_master_put(master);
 	return ret;
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 97d1375..4389ab8 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1294,18 +1294,27 @@ static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
 				  dma->rx_buf_virt, dma->rx_buf_dma);
 }
 
-static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
+static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
 			      struct pch_spi_data *data)
 {
 	struct pch_spi_dma_ctrl *dma;
+	int ret;
 
 	dma = &data->dma;
+	ret = 0;
 	/* Get Consistent memory for Tx DMA */
 	dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
 				PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
+	if (!dma->tx_buf_virt)
+		ret = -ENOMEM;
+
 	/* Get Consistent memory for Rx DMA */
 	dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
 				PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
+	if (!dma->rx_buf_virt)
+		ret = -ENOMEM;
+
+	return ret;
 }
 
 static int pch_spi_pd_probe(struct platform_device *plat_dev)
@@ -1382,7 +1391,9 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
 
 	if (use_dma) {
 		dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
-		pch_alloc_dma_buf(board_dat, data);
+		ret = pch_alloc_dma_buf(board_dat, data);
+		if (ret)
+			goto err_spi_register_master;
 	}
 
 	ret = spi_register_master(master);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 9da0bc5..88a8a8e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -982,6 +982,8 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 		if (max_tx || max_rx) {
 			list_for_each_entry(xfer, &msg->transfers,
 					    transfer_list) {
+				if (!xfer->len)
+					continue;
 				if (!xfer->tx_buf)
 					xfer->tx_buf = ctlr->dummy_tx;
 				if (!xfer->rx_buf)
diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c
index f51f150..ffa379e 100644
--- a/drivers/ssb/bridge_pcmcia_80211.c
+++ b/drivers/ssb/bridge_pcmcia_80211.c
@@ -113,16 +113,21 @@ static struct pcmcia_driver ssb_host_pcmcia_driver = {
 	.resume		= ssb_host_pcmcia_resume,
 };
 
+static int pcmcia_init_failed;
+
 /*
  * These are not module init/exit functions!
  * The module_pcmcia_driver() helper cannot be used here.
  */
 int ssb_host_pcmcia_init(void)
 {
-	return pcmcia_register_driver(&ssb_host_pcmcia_driver);
+	pcmcia_init_failed = pcmcia_register_driver(&ssb_host_pcmcia_driver);
+
+	return pcmcia_init_failed;
 }
 
 void ssb_host_pcmcia_exit(void)
 {
-	pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
+	if (!pcmcia_init_failed)
+		pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
 }
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index f4308941..813407f4 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -67,3 +67,22 @@
 	  Choose this option to remove the SCHED_IDLE flag in case of defer
 	  free thereby increasing the priority of defer free thread.
 	  if you're not sure say Y here.
+
+config ION_POOL_AUTO_REFILL
+	bool "Refill the ION heap pools automatically"
+	depends on ION
+	help
+	  Choose this option to refill the ION system heap pools (non-secure)
+	  automatically when the pool pages count becomes lower than a set low mark.
+	  This refilling is done by worker thread which is invoked asynchronously
+	  when the pool count reaches below low mark.
+	  if you're not sure say Y here.
+
+config ION_POOL_FILL_MARK
+	int "ion pool fillmark size in MB"
+	depends on ION_POOL_AUTO_REFILL
+	range 16 256
+	default 100
+	help
+	  Set the fillmark of the pool in terms of mega bytes and the lowmark is
+	  ION_POOL_LOW_MARK_PERCENT of fillmark value.
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index 8b29a76..0cce762 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -17,10 +17,12 @@
 #include <linux/mutex.h>
 #include <linux/rbtree.h>
 #include <linux/sched.h>
+#include <linux/kthread.h>
 #include <linux/shrinker.h>
 #include <linux/types.h>
 #include <linux/miscdevice.h>
 #include <linux/bitops.h>
+#include <linux/vmstat.h>
 #include "ion_kernel.h"
 #include "../uapi/ion.h"
 #include "../uapi/msm_ion.h"
@@ -54,6 +56,19 @@
 #define MAKE_ION_ALLOC_DMA_READY 0
 #endif
 
+/* ION page pool marks in bytes */
+#ifdef CONFIG_ION_POOL_AUTO_REFILL
+#define ION_POOL_FILL_MARK (CONFIG_ION_POOL_FILL_MARK * SZ_1M)
+#define POOL_LOW_MARK_PERCENT	40UL
+#define ION_POOL_LOW_MARK ((ION_POOL_FILL_MARK * POOL_LOW_MARK_PERCENT) / 100)
+#else
+#define ION_POOL_FILL_MARK 0UL
+#define ION_POOL_LOW_MARK 0UL
+#endif
+
+/* if low watermark of zones have reached, defer the refill in this window */
+#define ION_POOL_REFILL_DEFER_WINDOW_MS	10
+
 /**
  * struct ion_platform_heap - defines a heap in the given platform
  * @type:	type of the heap from ion_heap_type enum
@@ -400,6 +415,7 @@ ion_secure_carveout_heap_create(struct ion_platform_heap *heap);
  * struct ion_page_pool - pagepool struct
  * @high_count:		number of highmem items in the pool
  * @low_count:		number of lowmem items in the pool
+ * @count:		total number of pages/items in the pool
  * @high_items:		list of highmem items
  * @low_items:		list of lowmem items
  * @mutex:		lock protecting this struct and especially the count
@@ -407,7 +423,8 @@ ion_secure_carveout_heap_create(struct ion_platform_heap *heap);
  * @gfp_mask:		gfp_mask to use from alloc
  * @order:		order of pages in the pool
  * @list:		plist node for list of pools
- * @cached:            it's cached pool or not
+ * @cached:		it's cached pool or not
+ * @heap:		ion heap associated to this pool
  *
  * Allows you to keep a pool of pre allocated pages to use from your heap.
  * Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -417,18 +434,22 @@ ion_secure_carveout_heap_create(struct ion_platform_heap *heap);
 struct ion_page_pool {
 	int high_count;
 	int low_count;
+	atomic_t count;
 	bool cached;
 	struct list_head high_items;
 	struct list_head low_items;
+	ktime_t last_low_watermark_ktime;
 	/* Protect the pool */
 	struct mutex mutex;
 	gfp_t gfp_mask;
 	unsigned int order;
 	struct plist_node list;
+	struct ion_heap heap;
 };
 
 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
 					   bool cached);
+void ion_page_pool_refill(struct ion_page_pool *pool);
 void ion_page_pool_destroy(struct ion_page_pool *pool);
 struct page *ion_page_pool_alloc(struct ion_page_pool *a, bool *from_pool);
 void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
@@ -468,4 +489,23 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
 
 int ion_query_heaps(struct ion_heap_query *query);
 
+static __always_inline int get_pool_fillmark(struct ion_page_pool *pool)
+{
+	return ION_POOL_FILL_MARK / (PAGE_SIZE << pool->order);
+}
+
+static __always_inline int get_pool_lowmark(struct ion_page_pool *pool)
+{
+	return ION_POOL_LOW_MARK / (PAGE_SIZE << pool->order);
+}
+
+static __always_inline bool pool_count_below_lowmark(struct ion_page_pool *pool)
+{
+	return atomic_read(&pool->count) < get_pool_lowmark(pool);
+}
+
+static __always_inline bool pool_fillmark_reached(struct ion_page_pool *pool)
+{
+	return atomic_read(&pool->count) >= get_pool_fillmark(pool);
+}
 #endif /* _ION_H */
diff --git a/drivers/staging/android/ion/ion_kernel.h b/drivers/staging/android/ion/ion_kernel.h
index c5d3d1e..443175e 100644
--- a/drivers/staging/android/ion/ion_kernel.h
+++ b/drivers/staging/android/ion/ion_kernel.h
@@ -1,13 +1,15 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _ION_KERNEL_H
 #define _ION_KERNEL_H
 
 #include <linux/dma-buf.h>
+#include <linux/bitmap.h>
 #include "../uapi/ion.h"
+#include "../uapi/msm_ion.h"
 
 #ifdef CONFIG_ION
 
@@ -18,6 +20,16 @@
 struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
 			  unsigned int flags);
 
+static inline unsigned int ion_get_flags_num_vm_elems(unsigned int flags)
+{
+	unsigned long vm_flags = flags & ION_FLAGS_CP_MASK;
+
+	return ((unsigned int)bitmap_weight(&vm_flags, BITS_PER_LONG));
+}
+
+int ion_populate_vm_list(unsigned long flags, unsigned int *vm_list,
+			 int nelems);
+
 #else
 
 static inline struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
@@ -26,5 +38,16 @@ static inline struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
 	return -ENOMEM;
 }
 
+static inline unsigned int ion_get_flags_num_vm_elems(unsigned int flags)
+{
+	return 0;
+}
+
+static inline int ion_populate_vm_list(unsigned long flags,
+				       unsigned int *vm_list, int nelems)
+{
+	return -EINVAL;
+}
+
 #endif /* CONFIG_ION */
 #endif /* _ION_KERNEL_H */
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 3158035..ed0898f 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -12,6 +12,41 @@
 
 #include "ion.h"
 
+/* do a simple check to see if we are in any low memory situation */
+static bool pool_refill_ok(struct ion_page_pool *pool)
+{
+	struct zonelist *zonelist;
+	struct zoneref *z;
+	struct zone *zone;
+	int mark;
+	enum zone_type classzone_idx = gfp_zone(pool->gfp_mask);
+	s64 delta;
+
+	/* check if we are within the refill defer window */
+	delta = ktime_ms_delta(ktime_get(), pool->last_low_watermark_ktime);
+	if (delta < ION_POOL_REFILL_DEFER_WINDOW_MS)
+		return false;
+
+	zonelist = node_zonelist(numa_node_id(), pool->gfp_mask);
+	/*
+	 * make sure that if we allocate a pool->order page from buddy,
+	 * we don't put the zone watermarks go below the high threshold.
+	 * This makes sure there's no unwanted repetitive refilling and
+	 * reclaiming of buddy pages on the pool.
+	 */
+	for_each_zone_zonelist(zone, z, zonelist, classzone_idx) {
+		mark = high_wmark_pages(zone);
+		mark += 1 << pool->order;
+		if (!zone_watermark_ok_safe(zone, pool->order, mark,
+					    classzone_idx)) {
+			pool->last_low_watermark_ktime = ktime_get();
+			return false;
+		}
+	}
+
+	return true;
+}
+
 static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 {
 	return alloc_pages(pool->gfp_mask, pool->order);
@@ -34,11 +69,34 @@ static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
 		pool->low_count++;
 	}
 
+	atomic_inc(&pool->count);
 	mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
 			    (1 << (PAGE_SHIFT + pool->order)));
 	mutex_unlock(&pool->mutex);
 }
 
+void ion_page_pool_refill(struct ion_page_pool *pool)
+{
+	struct page *page;
+	gfp_t gfp_refill = (pool->gfp_mask | __GFP_RECLAIM) & ~__GFP_NORETRY;
+	struct device *dev = pool->heap.priv;
+
+	/* skip refilling order 0 pools */
+	if (!pool->order)
+		return;
+
+	while (!pool_fillmark_reached(pool) && pool_refill_ok(pool)) {
+		page = alloc_pages(gfp_refill, pool->order);
+		if (!page)
+			break;
+		if (!pool->cached)
+			ion_pages_sync_for_device(dev, page,
+						  PAGE_SIZE << pool->order,
+						  DMA_BIDIRECTIONAL);
+		ion_page_pool_add(pool, page);
+	}
+}
+
 static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
 {
 	struct page *page;
@@ -53,6 +111,7 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
 		pool->low_count--;
 	}
 
+	atomic_dec(&pool->count);
 	list_del(&page->lru);
 	mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
 			    -(1 << (PAGE_SHIFT + pool->order)));
@@ -165,12 +224,10 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
 					   bool cached)
 {
-	struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+	struct ion_page_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 
 	if (!pool)
 		return NULL;
-	pool->high_count = 0;
-	pool->low_count = 0;
 	INIT_LIST_HEAD(&pool->low_items);
 	INIT_LIST_HEAD(&pool->high_items);
 	pool->gfp_mask = gfp_mask;
diff --git a/drivers/staging/android/ion/ion_secure_util.c b/drivers/staging/android/ion/ion_secure_util.c
index 29bbc1a..e15c90b 100644
--- a/drivers/staging/android/ion/ion_secure_util.c
+++ b/drivers/staging/android/ion/ion_secure_util.c
@@ -70,8 +70,8 @@ static int get_vmid(unsigned long flags)
 	return vmid;
 }
 
-static int populate_vm_list(unsigned long flags, unsigned int *vm_list,
-			    int nelems)
+int ion_populate_vm_list(unsigned long flags, unsigned int *vm_list,
+			 int nelems)
 {
 	unsigned int itr = 0;
 	int vmid;
@@ -148,14 +148,8 @@ int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list,
 		goto out;
 	}
 
-	for (i = 0; i < dest_nelems; i++) {
-		if (dest_vm_list[i] == VMID_CP_SEC_DISPLAY)
-			dest_perms[i] = PERM_READ;
-		else if (dest_vm_list[i] == VMID_CP_CDSP)
-			dest_perms[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
-		else
-			dest_perms[i] = PERM_READ | PERM_WRITE;
-	}
+	for (i = 0; i < dest_nelems; i++)
+		dest_perms[i] = msm_secure_get_vmid_perms(dest_vm_list[i]);
 
 	ret = hyp_assign_table(sgt, &source_vmid, 1,
 			       dest_vm_list, dest_perms, dest_nelems);
@@ -187,7 +181,7 @@ int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
 				 GFP_KERNEL);
 	if (!source_vm_list)
 		return -ENOMEM;
-	ret = populate_vm_list(flags, source_vm_list, source_nelems);
+	ret = ion_populate_vm_list(flags, source_vm_list, source_nelems);
 	if (ret) {
 		pr_err("%s: Failed to get secure vmids\n", __func__);
 		goto out_free_source;
@@ -215,7 +209,7 @@ int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
 		goto out;
 	}
 
-	ret = populate_vm_list(flags, dest_vm_list, dest_nelems);
+	ret = ion_populate_vm_list(flags, dest_vm_list, dest_nelems);
 	if (ret) {
 		pr_err("%s: Failed to get secure vmid(s)\n", __func__);
 		goto out_free_dest_vm;
@@ -263,19 +257,14 @@ int ion_hyp_assign_from_flags(u64 base, u64 size, unsigned long flags)
 	}
 
 	if ((flags & ~ION_FLAGS_CP_MASK) ||
-	    populate_vm_list(flags, vmids, nr)) {
+	    ion_populate_vm_list(flags, vmids, nr)) {
 		pr_err("%s: Failed to parse secure flags 0x%lx\n", __func__,
 		       flags);
 		goto out;
 	}
 
 	for (i = 0; i < nr; i++)
-		if (vmids[i] == VMID_CP_SEC_DISPLAY)
-			modes[i] = PERM_READ;
-		else if (vmids[i] == VMID_CP_CDSP)
-			modes[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
-		else
-			modes[i] = PERM_READ | PERM_WRITE;
+		modes[i] = msm_secure_get_vmid_perms(vmids[i]);
 
 	ret = hyp_assign_phys(base, size, &src_vm, 1, vmids, modes, nr);
 	if (ret)
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index e067a54..eacd470 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -16,6 +16,8 @@
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <linux/sched/types.h>
+#include <linux/sched.h>
 #include <soc/qcom/secure_buffer.h>
 #include "ion_system_heap.h"
 #include "ion.h"
@@ -27,6 +29,9 @@ static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
 				     __GFP_NORETRY) & ~__GFP_RECLAIM;
 static gfp_t low_order_gfp_flags  = GFP_HIGHUSER | __GFP_ZERO;
 
+bool pool_auto_refill_en  __read_mostly =
+		IS_ENABLED(CONFIG_ION_POOL_AUTO_REFILL);
+
 int order_to_index(unsigned int order)
 {
 	int i;
@@ -58,7 +63,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
 				      unsigned long order,
 				      bool *from_pool)
 {
-	bool cached = ion_buffer_cached(buffer);
+	int cached = (int)ion_buffer_cached(buffer);
 	struct page *page;
 	struct ion_page_pool *pool;
 	int vmid = get_secure_vmid(buffer->flags);
@@ -73,6 +78,11 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
 
 	page = ion_page_pool_alloc(pool, from_pool);
 
+	if (pool_auto_refill_en &&
+	    pool_count_below_lowmark(pool)) {
+		wake_up_process(heap->kworker[cached]);
+	}
+
 	if (IS_ERR(page))
 		return page;
 
@@ -469,7 +479,8 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
 	if (!nr_to_scan)
 		only_scan = 1;
 
-	for (i = 0; i < NUM_ORDERS; i++) {
+	/* shrink the pools starting from lower order ones */
+	for (i = NUM_ORDERS - 1; i >= 0; i--) {
 		nr_freed = 0;
 
 		for (j = 0; j < VMID_LAST; j++) {
@@ -624,7 +635,8 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
  * nothing. If it succeeds you'll eventually need to use
  * ion_system_heap_destroy_pools to destroy the pools.
  */
-static int ion_system_heap_create_pools(struct ion_page_pool **pools,
+static int ion_system_heap_create_pools(struct ion_system_heap *sys_heap,
+					struct ion_page_pool **pools,
 					bool cached)
 {
 	int i;
@@ -638,6 +650,7 @@ static int ion_system_heap_create_pools(struct ion_page_pool **pools,
 		pool = ion_page_pool_create(gfp_flags, orders[i], cached);
 		if (!pool)
 			goto err_create_pool;
+		pool->heap = sys_heap->heap;
 		pools[i] = pool;
 	}
 	return 0;
@@ -646,9 +659,70 @@ static int ion_system_heap_create_pools(struct ion_page_pool **pools,
 	return -ENOMEM;
 }
 
+static int ion_sys_heap_worker(void *data)
+{
+	struct ion_page_pool **pools = (struct ion_page_pool **)data;
+	int i;
+
+	for (;;) {
+		for (i = 0; i < NUM_ORDERS; i++) {
+			if (pool_count_below_lowmark(pools[i]))
+				ion_page_pool_refill(pools[i]);
+		}
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (unlikely(kthread_should_stop())) {
+			set_current_state(TASK_RUNNING);
+			break;
+		}
+		schedule();
+
+		set_current_state(TASK_RUNNING);
+	}
+
+	return 0;
+}
+
+static struct task_struct *ion_create_kworker(struct ion_page_pool **pools,
+					      bool cached)
+{
+	struct sched_attr attr = { 0 };
+	struct task_struct *thread;
+	int ret;
+	char *buf;
+	cpumask_t *cpumask;
+	DECLARE_BITMAP(bmap, nr_cpumask_bits);
+
+	attr.sched_nice = ION_KTHREAD_NICE_VAL;
+	buf = cached ? "cached" : "uncached";
+	/*
+	 * Affine the kthreads to min capacity CPUs
+	 * TODO: remove this hack once is_min_capability_cpu is available
+	 */
+	bitmap_fill(bmap, 0x4);
+	cpumask = to_cpumask(bmap);
+
+	thread = kthread_create(ion_sys_heap_worker, pools,
+				"ion-pool-%s-worker", buf);
+	if (IS_ERR(thread)) {
+		pr_err("%s: failed to create %s worker thread: %ld\n",
+		       __func__, buf, PTR_ERR(thread));
+		return thread;
+	}
+	ret = sched_setattr(thread, &attr);
+	if (ret) {
+		kthread_stop(thread);
+		pr_warn("%s: failed to set task priority for %s worker thread: ret = %d\n",
+			__func__, buf, ret);
+		return ERR_PTR(ret);
+	}
+	kthread_bind_mask(thread, cpumask);
+	return thread;
+}
+
 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
 {
 	struct ion_system_heap *heap;
+	int ret = -ENOMEM;
 	int i;
 
 	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
@@ -660,21 +734,39 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
 
 	for (i = 0; i < VMID_LAST; i++)
 		if (is_secure_vmid_valid(i))
-			if (ion_system_heap_create_pools(heap->secure_pools[i],
+			if (ion_system_heap_create_pools(heap,
+							 heap->secure_pools[i],
 							 false))
 				goto destroy_secure_pools;
 
-	if (ion_system_heap_create_pools(heap->uncached_pools, false))
+	if (ion_system_heap_create_pools(heap, heap->uncached_pools, false))
 		goto destroy_secure_pools;
 
-	if (ion_system_heap_create_pools(heap->cached_pools, true))
+	if (ion_system_heap_create_pools(heap, heap->cached_pools, true))
 		goto destroy_uncached_pools;
 
+	if (pool_auto_refill_en) {
+		heap->kworker[ION_KTHREAD_UNCACHED] =
+				ion_create_kworker(heap->uncached_pools, false);
+		if (IS_ERR(heap->kworker[ION_KTHREAD_UNCACHED])) {
+			ret = PTR_ERR(heap->kworker[ION_KTHREAD_UNCACHED]);
+			goto destroy_pools;
+		}
+		heap->kworker[ION_KTHREAD_CACHED] =
+				ion_create_kworker(heap->cached_pools, true);
+		if (IS_ERR(heap->kworker[ION_KTHREAD_CACHED])) {
+			kthread_stop(heap->kworker[ION_KTHREAD_UNCACHED]);
+			ret = PTR_ERR(heap->kworker[ION_KTHREAD_CACHED]);
+			goto destroy_pools;
+		}
+	}
+
 	mutex_init(&heap->split_page_mutex);
 
 	heap->heap.debug_show = ion_system_heap_debug_show;
 	return &heap->heap;
-
+destroy_pools:
+	ion_system_heap_destroy_pools(heap->cached_pools);
 destroy_uncached_pools:
 	ion_system_heap_destroy_pools(heap->uncached_pools);
 destroy_secure_pools:
@@ -683,7 +775,7 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
 			ion_system_heap_destroy_pools(heap->secure_pools[i]);
 	}
 	kfree(heap);
-	return ERR_PTR(-ENOMEM);
+	return ERR_PTR(ret);
 }
 
 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
diff --git a/drivers/staging/android/ion/ion_system_heap.h b/drivers/staging/android/ion/ion_system_heap.h
index c21ba85..45daa48 100644
--- a/drivers/staging/android/ion/ion_system_heap.h
+++ b/drivers/staging/android/ion/ion_system_heap.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 #include <soc/qcom/secure_buffer.h>
 #include "ion.h"
@@ -20,10 +20,20 @@ static const unsigned int orders[] = {0};
 
 #define NUM_ORDERS ARRAY_SIZE(orders)
 
+#define ION_KTHREAD_NICE_VAL 10
+
+enum ion_kthread_type {
+	ION_KTHREAD_UNCACHED,
+	ION_KTHREAD_CACHED,
+	ION_MAX_NUM_KTHREADS
+};
+
 struct ion_system_heap {
 	struct ion_heap heap;
 	struct ion_page_pool *uncached_pools[MAX_ORDER];
 	struct ion_page_pool *cached_pools[MAX_ORDER];
+	/* worker threads to refill the pool */
+	struct task_struct *kworker[ION_MAX_NUM_KTHREADS];
 	struct ion_page_pool *secure_pools[VMID_LAST][MAX_ORDER];
 	/* Prevents unnecessary page splitting */
 	struct mutex split_page_mutex;
diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig
index 6875372..d972588 100644
--- a/drivers/staging/axis-fifo/Kconfig
+++ b/drivers/staging/axis-fifo/Kconfig
@@ -3,6 +3,7 @@
 #
 config XIL_AXIS_FIFO
 	tristate "Xilinx AXI-Stream FIFO IP core driver"
+	depends on OF
 	default n
 	help
 	  This adds support for the Xilinx AXI-Stream
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
index 808ed92..1bb1cb6 100644
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ b/drivers/staging/comedi/drivers/ni_usb6501.c
@@ -463,10 +463,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev)
 
 	size = usb_endpoint_maxp(devpriv->ep_tx);
 	devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
-	if (!devpriv->usb_tx_buf) {
-		kfree(devpriv->usb_rx_buf);
+	if (!devpriv->usb_tx_buf)
 		return -ENOMEM;
-	}
 
 	return 0;
 }
@@ -518,6 +516,9 @@ static int ni6501_auto_attach(struct comedi_device *dev,
 	if (!devpriv)
 		return -ENOMEM;
 
+	mutex_init(&devpriv->mut);
+	usb_set_intfdata(intf, devpriv);
+
 	ret = ni6501_find_endpoints(dev);
 	if (ret)
 		return ret;
@@ -526,9 +527,6 @@ static int ni6501_auto_attach(struct comedi_device *dev,
 	if (ret)
 		return ret;
 
-	mutex_init(&devpriv->mut);
-	usb_set_intfdata(intf, devpriv);
-
 	ret = comedi_alloc_subdevices(dev, 2);
 	if (ret)
 		return ret;
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 6234b64..65dc6c5 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -682,10 +682,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
 
 	size = usb_endpoint_maxp(devpriv->ep_tx);
 	devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
-	if (!devpriv->usb_tx_buf) {
-		kfree(devpriv->usb_rx_buf);
+	if (!devpriv->usb_tx_buf)
 		return -ENOMEM;
-	}
 
 	return 0;
 }
@@ -800,6 +798,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
 
 	devpriv->model = board->model;
 
+	sema_init(&devpriv->limit_sem, 8);
+
 	ret = vmk80xx_find_usb_endpoints(dev);
 	if (ret)
 		return ret;
@@ -808,8 +808,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
 	if (ret)
 		return ret;
 
-	sema_init(&devpriv->limit_sem, 8);
-
 	usb_set_intfdata(intf, devpriv);
 
 	if (devpriv->model == VMK8055_MODEL)
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
index 0529e56..ae5c028 100644
--- a/drivers/staging/greybus/power_supply.c
+++ b/drivers/staging/greybus/power_supply.c
@@ -520,7 +520,7 @@ static int gb_power_supply_prop_descriptors_get(struct gb_power_supply *gbpsy)
 
 	op = gb_operation_create(connection,
 				 GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS,
-				 sizeof(req), sizeof(*resp) + props_count *
+				 sizeof(*req), sizeof(*resp) + props_count *
 				 sizeof(struct gb_power_supply_props_desc),
 				 GFP_KERNEL);
 	if (!op)
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index df0499f..6857a4b 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -109,10 +109,10 @@
 #define AD7192_CH_AIN3		BIT(6) /* AIN3 - AINCOM */
 #define AD7192_CH_AIN4		BIT(7) /* AIN4 - AINCOM */
 
-#define AD7193_CH_AIN1P_AIN2M	0x000  /* AIN1(+) - AIN2(-) */
-#define AD7193_CH_AIN3P_AIN4M	0x001  /* AIN3(+) - AIN4(-) */
-#define AD7193_CH_AIN5P_AIN6M	0x002  /* AIN5(+) - AIN6(-) */
-#define AD7193_CH_AIN7P_AIN8M	0x004  /* AIN7(+) - AIN8(-) */
+#define AD7193_CH_AIN1P_AIN2M	0x001  /* AIN1(+) - AIN2(-) */
+#define AD7193_CH_AIN3P_AIN4M	0x002  /* AIN3(+) - AIN4(-) */
+#define AD7193_CH_AIN5P_AIN6M	0x004  /* AIN5(+) - AIN6(-) */
+#define AD7193_CH_AIN7P_AIN8M	0x008  /* AIN7(+) - AIN8(-) */
 #define AD7193_CH_TEMP		0x100 /* Temp senseor */
 #define AD7193_CH_AIN2P_AIN2M	0x200 /* AIN2(+) - AIN2(-) */
 #define AD7193_CH_AIN1		0x401 /* AIN1 - AINCOM */
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 3f22d10..68866f5 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -47,6 +47,8 @@
 #define ADT7516_MSB_AIN3		0xA
 #define ADT7516_MSB_AIN4		0xB
 #define ADT7316_DA_DATA_BASE		0x10
+#define ADT7316_DA_10_BIT_LSB_SHIFT	6
+#define ADT7316_DA_12_BIT_LSB_SHIFT	4
 #define ADT7316_DA_MSB_DATA_REGS	4
 #define ADT7316_LSB_DAC_A		0x10
 #define ADT7316_MSB_DAC_A		0x11
@@ -1086,7 +1088,7 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
 		ldac_config = chip->ldac_config & (~ADT7516_DAC_IN_VREF_MASK);
 		if (data & 0x1)
 			ldac_config |= ADT7516_DAC_AB_IN_VREF;
-		else if (data & 0x2)
+		if (data & 0x2)
 			ldac_config |= ADT7516_DAC_CD_IN_VREF;
 	} else {
 		ret = kstrtou8(buf, 16, &data);
@@ -1408,7 +1410,7 @@ static IIO_DEVICE_ATTR(ex_analog_temp_offset, 0644,
 static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
 		int channel, char *buf)
 {
-	u16 data;
+	u16 data = 0;
 	u8 msb, lsb, offset;
 	int ret;
 
@@ -1433,7 +1435,11 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
 	if (ret)
 		return -EIO;
 
-	data = (msb << offset) + (lsb & ((1 << offset) - 1));
+	if (chip->dac_bits == 12)
+		data = lsb >> ADT7316_DA_12_BIT_LSB_SHIFT;
+	else if (chip->dac_bits == 10)
+		data = lsb >> ADT7316_DA_10_BIT_LSB_SHIFT;
+	data |= msb << offset;
 
 	return sprintf(buf, "%d\n", data);
 }
@@ -1441,7 +1447,7 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
 static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
 		int channel, const char *buf, size_t len)
 {
-	u8 msb, lsb, offset;
+	u8 msb, lsb, lsb_reg, offset;
 	u16 data;
 	int ret;
 
@@ -1459,9 +1465,13 @@ static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
 		return -EINVAL;
 
 	if (chip->dac_bits > 8) {
-		lsb = data & (1 << offset);
+		lsb = data & ((1 << offset) - 1);
+		if (chip->dac_bits == 12)
+			lsb_reg = lsb << ADT7316_DA_12_BIT_LSB_SHIFT;
+		else
+			lsb_reg = lsb << ADT7316_DA_10_BIT_LSB_SHIFT;
 		ret = chip->bus.write(chip->bus.client,
-			ADT7316_DA_DATA_BASE + channel * 2, lsb);
+			ADT7316_DA_DATA_BASE + channel * 2, lsb_reg);
 		if (ret)
 			return -EIO;
 	}
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index 029c3bf..07774c0 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644,
 static IIO_DEV_ATTR_IPEAK(0644,
 		ade7854_read_32bit,
 		ade7854_write_32bit,
-		ADE7854_VPEAK);
+		ADE7854_IPEAK);
 static IIO_DEV_ATTR_APHCAL(0644,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig
index aea449a..76818cc 100644
--- a/drivers/staging/media/davinci_vpfe/Kconfig
+++ b/drivers/staging/media/davinci_vpfe/Kconfig
@@ -1,7 +1,7 @@
 config VIDEO_DM365_VPFE
 	tristate "DM365 VPFE Media Controller Capture Driver"
 	depends on VIDEO_V4L2
-	depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || COMPILE_TEST
+	depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || (COMPILE_TEST && !ARCH_OMAP1)
 	depends on VIDEO_V4L2_SUBDEV_API
 	depends on VIDEO_DAVINCI_VPBE_DISPLAY
 	select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index e22f123..d17ce1f 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -153,9 +153,10 @@ static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep,
 /*
  * Parses the fwnode endpoint from the source pad of the entity
  * connected to this CSI. This will either be the entity directly
- * upstream from the CSI-2 receiver, or directly upstream from the
- * video mux. The endpoint is needed to determine the bus type and
- * bus config coming into the CSI.
+ * upstream from the CSI-2 receiver, directly upstream from the
+ * video mux, or directly upstream from the CSI itself. The endpoint
+ * is needed to determine the bus type and bus config coming into
+ * the CSI.
  */
 static int csi_get_upstream_endpoint(struct csi_priv *priv,
 				     struct v4l2_fwnode_endpoint *ep)
@@ -168,7 +169,8 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
 	if (!priv->src_sd)
 		return -EPIPE;
 
-	src = &priv->src_sd->entity;
+	sd = priv->src_sd;
+	src = &sd->entity;
 
 	if (src->function == MEDIA_ENT_F_VID_MUX) {
 		/*
@@ -182,6 +184,14 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
 			src = &sd->entity;
 	}
 
+	/*
+	 * If the source is neither the video mux nor the CSI-2 receiver,
+	 * get the source pad directly upstream from CSI itself.
+	 */
+	if (src->function != MEDIA_ENT_F_VID_MUX &&
+	    sd->grp_id != IMX_MEDIA_GRP_ID_CSI2)
+		src = &priv->sd.entity;
+
 	/* get source pad of entity directly upstream from src */
 	pad = imx_media_find_upstream_pad(priv->md, src, 0);
 	if (IS_ERR(pad))
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
index acde372..1647da2 100644
--- a/drivers/staging/media/imx/imx-media-of.c
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -233,15 +233,18 @@ int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
 				  struct v4l2_subdev *csi)
 {
 	struct device_node *csi_np = csi->dev->of_node;
-	struct fwnode_handle *fwnode, *csi_ep;
-	struct v4l2_fwnode_link link;
 	struct device_node *ep;
-	int ret;
-
-	link.local_node = of_fwnode_handle(csi_np);
-	link.local_port = CSI_SINK_PAD;
 
 	for_each_child_of_node(csi_np, ep) {
+		struct fwnode_handle *fwnode, *csi_ep;
+		struct v4l2_fwnode_link link;
+		int ret;
+
+		memset(&link, 0, sizeof(link));
+
+		link.local_node = of_fwnode_handle(csi_np);
+		link.local_port = CSI_SINK_PAD;
+
 		csi_ep = of_fwnode_handle(ep);
 
 		fwnode = fwnode_graph_get_remote_endpoint(csi_ep);
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index 4569838..0b48677 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -546,7 +546,7 @@ static void __exit mod_exit(void)
 		destroy_cdev(c);
 		destroy_channel(c);
 	}
-	unregister_chrdev_region(comp.devno, 1);
+	unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
 	ida_destroy(&comp.minor_id);
 	class_destroy(comp.class);
 }
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 52ad627..25a077f 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -1412,7 +1412,7 @@ int most_register_interface(struct most_interface *iface)
 
 	INIT_LIST_HEAD(&iface->p->channel_list);
 	iface->p->dev_id = id;
-	snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
+	strcpy(iface->p->name, iface->description);
 	iface->dev.init_name = iface->p->name;
 	iface->dev.bus = &mc.bus;
 	iface->dev.parent = &mc.dev;
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
index d335338..c8fa17c 100644
--- a/drivers/staging/mt7621-pci/Kconfig
+++ b/drivers/staging/mt7621-pci/Kconfig
@@ -1,6 +1,7 @@
 config PCI_MT7621
 	tristate "MediaTek MT7621 PCI Controller"
 	depends on RALINK
+	depends on PCI
 	select PCI_DRIVERS_GENERIC
 	help
 	  This selects a driver for the MediaTek MT7621 PCI Controller.
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 2130d78..dd9b02d 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -178,7 +178,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
 
 	pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
 
-	rtw_alloc_hwxmits(padapter);
+	res = rtw_alloc_hwxmits(padapter);
+	if (res == _FAIL)
+		goto exit;
 	rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
 
 	for (i = 0; i < 4; i++)
@@ -1502,7 +1504,7 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
 	return res;
 }
 
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
 {
 	struct hw_xmit *hwxmits;
 	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1511,6 +1513,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
 	pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry,
 				     sizeof(struct hw_xmit), GFP_KERNEL);
+	if (!pxmitpriv->hwxmits)
+		return _FAIL;
 
 	hwxmits = pxmitpriv->hwxmits;
 
@@ -1518,6 +1522,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 	hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
 	hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
 	hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+	return _SUCCESS;
 }
 
 void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index 788f59c..ba7e15f 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
 void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
 s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
 void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
 void rtw_free_hwxmits(struct adapter *padapter);
 s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
 
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index b1dfe9f..63bc811 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -159,17 +159,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
 
 static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
 {
-	u32 val;
-	void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj	*pcmd);
 	struct cmd_obj *pcmd  = (struct cmd_obj *)pbuf;
 
-	if (pcmd->rsp && pcmd->rspsz > 0)
-		memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
-	pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
-	if (!pcmd_callback)
-		r8712_free_cmd_obj(pcmd);
-	else
-		pcmd_callback(padapter, pcmd);
+	r8712_free_cmd_obj(pcmd);
 	return H2C_SUCCESS;
 }
 
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
index 9181bb6..a101a0a 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.h
+++ b/drivers/staging/rtl8712/rtl8712_cmd.h
@@ -152,7 +152,7 @@ enum rtl8712_h2c_cmd {
 static struct _cmd_callback	cmd_callback[] = {
 	{GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
 	{GEN_CMD_CODE(_Write_MACREG), NULL},
-	{GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback},
+	{GEN_CMD_CODE(_Read_BBREG), NULL},
 	{GEN_CMD_CODE(_Write_BBREG), NULL},
 	{GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
 	{GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index edb6781..16291de 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
 		}
 	}
 
-	rtw_alloc_hwxmits(padapter);
+	res = rtw_alloc_hwxmits(padapter);
+	if (res == _FAIL)
+		goto exit;
 	rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
 
 	for (i = 0; i < 4; i++) {
@@ -2144,7 +2146,7 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
 	return res;
 }
 
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
 {
 	struct hw_xmit *hwxmits;
 	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
 	pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
 
-	if (pxmitpriv->hwxmits == NULL) {
-		DBG_871X("alloc hwxmits fail!...\n");
-		return;
-	}
+	if (!pxmitpriv->hwxmits)
+		return _FAIL;
 
 	hwxmits = pxmitpriv->hwxmits;
 
@@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
 	}
 
-
+	return _SUCCESS;
 }
 
 void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index a75b668..021c72361 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -486,7 +486,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
 void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
 
 
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
 void rtw_free_hwxmits(struct adapter *padapter);
 
 
diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
index 9930ed9..4cc77b2 100644
--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c
+++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
@@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv,
 
 	rtlpriv->phydm.internal =
 		kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL);
+	if (!rtlpriv->phydm.internal)
+		return 0;
 
 	_rtl_phydm_init_com_info(rtlpriv, ic, params);
 
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index a403966..c1ed52d 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -741,6 +741,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
 		      u1_rsvd_page_loc, 3);
 
 	skb = dev_alloc_skb(totalpacketlen);
+	if (!skb)
+		return;
 	memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet,
 	       totalpacketlen);
 
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index e767209..c7c8ef6 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -208,6 +208,9 @@ vchiq_platform_init_state(VCHIQ_STATE_T *state)
 	struct vchiq_2835_state *platform_state;
 
 	state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
+	if (!state->platform_state)
+		return VCHIQ_ERROR;
+
 	platform_state = (struct vchiq_2835_state *)state->platform_state;
 
 	platform_state->inited = 1;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 7642ced..63ce567 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -2537,6 +2537,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
 	local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
 
 	status = vchiq_platform_init_state(state);
+	if (status != VCHIQ_SUCCESS)
+		return VCHIQ_ERROR;
 
 	/*
 		bring up slot handler thread
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index 1e47511..d748527 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -45,7 +45,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
 }
 
 static const struct x86_cpu_id soc_thermal_ids[] = {
-	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1, 0,
+	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, 0,
 		BYT_SOC_DTS_APIC_IRQ},
 	{}
 };
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index c4e03f0..6ac5230 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -503,6 +503,26 @@ static bool of_thermal_is_wakeable(struct thermal_zone_device *tz)
 	return data->is_wakeable;
 }
 
+static int of_thermal_set_polling_delay(struct thermal_zone_device *tz,
+				    int delay)
+{
+	struct __thermal_zone *data = tz->devdata;
+
+	data->polling_delay = delay;
+
+	return 0;
+}
+
+static int of_thermal_set_passive_delay(struct thermal_zone_device *tz,
+				    int delay)
+{
+	struct __thermal_zone *data = tz->devdata;
+
+	data->passive_delay = delay;
+
+	return 0;
+}
+
 static int of_thermal_aggregate_trip_types(struct thermal_zone_device *tz,
 		unsigned int trip_type_mask, int *low, int *high)
 {
@@ -630,6 +650,8 @@ static struct thermal_zone_device_ops of_thermal_ops = {
 	.unbind = of_thermal_unbind,
 
 	.is_wakeable = of_thermal_is_wakeable,
+	.set_polling_delay = of_thermal_set_polling_delay,
+	.set_passive_delay = of_thermal_set_passive_delay,
 };
 
 static struct thermal_zone_of_device_ops of_virt_ops = {
diff --git a/drivers/thermal/qcom/adc-tm-common.c b/drivers/thermal/qcom/adc-tm-common.c
index be89746..343a631 100644
--- a/drivers/thermal/qcom/adc-tm-common.c
+++ b/drivers/thermal/qcom/adc-tm-common.c
@@ -11,42 +11,82 @@
  * 1.875V reference and 100k pull-up.
  */
 static const struct adc_tm_map_pt adcmap_100k_104ef_104fb_1875_vref[] = {
-	{ 1831,	-40000 },
-	{ 1814,	-35000 },
-	{ 1791,	-30000 },
-	{ 1761,	-25000 },
-	{ 1723,	-20000 },
-	{ 1675,	-15000 },
-	{ 1616,	-10000 },
-	{ 1545,	-5000 },
-	{ 1463,	0 },
-	{ 1370,	5000 },
-	{ 1268,	10000 },
-	{ 1160,	15000 },
-	{ 1049,	20000 },
-	{ 937,	25000 },
-	{ 828,	30000 },
-	{ 726,	35000 },
-	{ 630,	40000 },
-	{ 544,	45000 },
-	{ 467,	50000 },
-	{ 399,	55000 },
-	{ 340,	60000 },
-	{ 290,	65000 },
-	{ 247,	70000 },
-	{ 209,	75000 },
-	{ 179,	80000 },
-	{ 153,	85000 },
-	{ 130,	90000 },
-	{ 112,	95000 },
-	{ 96,	100000 },
-	{ 82,	105000 },
-	{ 71,	110000 },
-	{ 62,	115000 },
-	{ 53,	120000 },
-	{ 46,	125000 },
+	{ 1831000,	-40000 },
+	{ 1814000,	-35000 },
+	{ 1791000,	-30000 },
+	{ 1761000,	-25000 },
+	{ 1723000,	-20000 },
+	{ 1675000,	-15000 },
+	{ 1616000,	-10000 },
+	{ 1545000,	-5000 },
+	{ 1463000,	0 },
+	{ 1370000,	5000 },
+	{ 1268000,	10000 },
+	{ 1160000,	15000 },
+	{ 1049000,	20000 },
+	{ 937000,	25000 },
+	{ 828000,	30000 },
+	{ 726000,	35000 },
+	{ 630000,	40000 },
+	{ 544000,	45000 },
+	{ 467000,	50000 },
+	{ 399000,	55000 },
+	{ 340000,	60000 },
+	{ 290000,	65000 },
+	{ 247000,	70000 },
+	{ 209000,	75000 },
+	{ 179000,	80000 },
+	{ 153000,	85000 },
+	{ 130000,	90000 },
+	{ 112000,	95000 },
+	{ 96000,	100000 },
+	{ 82000,	105000 },
+	{ 71000,	110000 },
+	{ 62000,	115000 },
+	{ 53000,	120000 },
+	{ 46000,	125000 },
 };
 
+static void adc_tm_map_voltage_temp(const struct adc_tm_map_pt *pts,
+				      size_t tablesize, int input, int *output)
+{
+	unsigned int descending = 1;
+	u32 i = 0;
+
+	/* Check if table is descending or ascending */
+	if (tablesize > 1) {
+		if (pts[0].x < pts[1].x)
+			descending = 0;
+	}
+
+	while (i < tablesize) {
+		if ((descending) && (pts[i].x < input)) {
+			/* table entry is less than measured*/
+			 /* value and table is descending, stop */
+			break;
+		} else if ((!descending) &&
+				(pts[i].x > input)) {
+			/* table entry is greater than measured*/
+			/*value and table is ascending, stop */
+			break;
+		}
+		i++;
+	}
+
+	if (i == 0) {
+		*output = pts[0].y;
+	} else if (i == tablesize) {
+		*output = pts[tablesize - 1].y;
+	} else {
+		/* result is between search_index and search_index-1 */
+		/* interpolate linearly */
+		*output = (((int32_t)((pts[i].y - pts[i - 1].y) *
+			(input - pts[i - 1].x)) /
+			(pts[i].x - pts[i - 1].x)) +
+			pts[i - 1].y);
+	}
+}
+
 static void adc_tm_map_temp_voltage(const struct adc_tm_map_pt *pts,
 		size_t tablesize, int input, int64_t *output)
 {
@@ -91,10 +131,27 @@ static void adc_tm_map_temp_voltage(const struct adc_tm_map_pt *pts,
 	}
 }
 
+int therm_fwd_scale(int64_t code, uint32_t adc_hc_vdd_ref_mv,
+			const struct adc_tm_data *data)
+{
+	int64_t volt = 0;
+	int result = 0;
+
+	volt = (s64) code * adc_hc_vdd_ref_mv;
+	volt = div64_s64(volt, (data->full_scale_code_volt));
+
+	adc_tm_map_voltage_temp(adcmap_100k_104ef_104fb_1875_vref,
+				 ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+				 (int) volt, &result);
+
+	return result;
+}
+EXPORT_SYMBOL(therm_fwd_scale);
+
 void adc_tm_scale_therm_voltage_100k(struct adc_tm_config *param,
 				const struct adc_tm_data *data)
 {
-	uint32_t adc_hc_vdd_ref_mv = 1875;
+	int temp;
 
 	/* High temperature maps to lower threshold voltage */
 	adc_tm_map_temp_voltage(
@@ -104,7 +161,13 @@ void adc_tm_scale_therm_voltage_100k(struct adc_tm_config *param,
 
 	param->low_thr_voltage *= data->full_scale_code_volt;
 	param->low_thr_voltage = div64_s64(param->low_thr_voltage,
-						adc_hc_vdd_ref_mv);
+						ADC_HC_VDD_REF);
+
+	temp = therm_fwd_scale(param->low_thr_voltage,
+				ADC_HC_VDD_REF, data);
+
+	if (temp < param->high_thr_temp)
+		param->low_thr_voltage--;
 
 	/* Low temperature maps to higher threshold voltage */
 	adc_tm_map_temp_voltage(
@@ -114,7 +177,13 @@ void adc_tm_scale_therm_voltage_100k(struct adc_tm_config *param,
 
 	param->high_thr_voltage *= data->full_scale_code_volt;
 	param->high_thr_voltage = div64_s64(param->high_thr_voltage,
-						adc_hc_vdd_ref_mv);
+						ADC_HC_VDD_REF);
+
+	temp = therm_fwd_scale(param->high_thr_voltage,
+				ADC_HC_VDD_REF, data);
+
+	if (temp > param->low_thr_temp)
+		param->high_thr_voltage++;
 
 }
 EXPORT_SYMBOL(adc_tm_scale_therm_voltage_100k);
diff --git a/drivers/thermal/qcom/adc-tm.h b/drivers/thermal/qcom/adc-tm.h
index 63352ab..46f6db4 100644
--- a/drivers/thermal/qcom/adc-tm.h
+++ b/drivers/thermal/qcom/adc-tm.h
@@ -256,6 +256,9 @@ struct adc_tm_linear_graph {
 	s32 gnd;
 };
 
+int therm_fwd_scale(int64_t code, uint32_t adc_hc_vdd_ref_mv,
+				const struct adc_tm_data *data);
+
 void adc_tm_scale_therm_voltage_100k(struct adc_tm_config *param,
 				const struct adc_tm_data *data);
 
diff --git a/drivers/thermal/qcom/adc-tm5.c b/drivers/thermal/qcom/adc-tm5.c
index 318f25e..9570411 100644
--- a/drivers/thermal/qcom/adc-tm5.c
+++ b/drivers/thermal/qcom/adc-tm5.c
@@ -56,6 +56,10 @@
 #define ADC_TM_LOWER_MASK(n)			((n) & 0x000000ff)
 #define ADC_TM_UPPER_MASK(n)			(((n) & 0xffffff00) >> 8)
 
+#define ADC_TM_Mn_DATA0(n)			((n * 2) + 0xa0)
+#define ADC_TM_Mn_DATA1(n)			((n * 2) + 0xa1)
+#define ADC_TM_DATA_SHIFT			8
+
 static struct adc_tm_trip_reg_type adc_tm_ch_data[] = {
 	[ADC_TM_CHAN0] = {ADC_TM_M0_ADC_CH_SEL_CTL},
 	[ADC_TM_CHAN1] = {ADC_TM_M1_ADC_CH_SEL_CTL},
@@ -871,6 +875,8 @@ static irqreturn_t adc_tm5_handler(int irq, void *data)
 
 	while (i < chip->dt_channels) {
 		bool upper_set = false, lower_set = false;
+		u8 data_low = 0, data_high = 0;
+		u16 code = 0;
 		int temp;
 
 		if (!chip->sensor[i].non_thermal &&
@@ -886,6 +892,17 @@ static irqreturn_t adc_tm5_handler(int irq, void *data)
 				i++;
 				continue;
 			}
+			ret = adc_tm5_read_reg(chip, ADC_TM_Mn_DATA0(i),
+						&data_low, 1);
+			if (ret)
+				pr_err("adc_tm data_low read failed with %d\n",
+							ret);
+			ret = adc_tm5_read_reg(chip, ADC_TM_Mn_DATA1(i),
+						&data_high, 1);
+			if (ret)
+				pr_err("adc_tm data_high read failed with %d\n",
+							ret);
+			code = ((data_high << ADC_TM_DATA_SHIFT) | data_low);
 		}
 
 		spin_lock_irqsave(&chip->adc_tm_lock, flags);
@@ -920,7 +937,10 @@ static irqreturn_t adc_tm5_handler(int irq, void *data)
 			 * the appropriate trips.
 			 */
 			pr_debug("notifying of_thermal\n");
-			of_thermal_handle_trip(chip->sensor[i].tzd);
+			temp = therm_fwd_scale((int64_t)code,
+						ADC_HC_VDD_REF, chip->data);
+			of_thermal_handle_trip_temp(chip->sensor[i].tzd,
+						temp);
 		} else {
 			if (lower_set) {
 				ret = adc_tm5_reg_update(chip,
diff --git a/drivers/thermal/qcom/cpu_isolate.c b/drivers/thermal/qcom/cpu_isolate.c
index 74aa4d9..132141c 100644
--- a/drivers/thermal/qcom/cpu_isolate.c
+++ b/drivers/thermal/qcom/cpu_isolate.c
@@ -70,9 +70,11 @@ static int cpu_isolate_pm_notify(struct notifier_block *nb,
 				if (cpu_online(cpu) &&
 					!cpumask_test_and_set_cpu(cpu,
 					&cpus_isolated_by_thermal)) {
+					mutex_unlock(&cpu_isolate_lock);
 					if (sched_isolate_cpu(cpu))
 						cpumask_clear_cpu(cpu,
 						&cpus_isolated_by_thermal);
+					mutex_lock(&cpu_isolate_lock);
 				}
 				continue;
 			}
@@ -182,9 +184,11 @@ static int cpu_isolate_set_cur_state(struct thermal_cooling_device *cdev,
 		if (cpu_online(cpu) &&
 			(!cpumask_test_and_set_cpu(cpu,
 			&cpus_isolated_by_thermal))) {
+			mutex_unlock(&cpu_isolate_lock);
 			if (sched_isolate_cpu(cpu))
 				cpumask_clear_cpu(cpu,
 					&cpus_isolated_by_thermal);
+			mutex_lock(&cpu_isolate_lock);
 		}
 		cpumask_set_cpu(cpu, &cpus_in_max_cooling_level);
 		blocking_notifier_call_chain(&cpu_max_cooling_level_notifer,
@@ -194,6 +198,7 @@ static int cpu_isolate_set_cur_state(struct thermal_cooling_device *cdev,
 			cpu_dev = get_cpu_device(cpu);
 			if (!cpu_dev) {
 				pr_err("CPU:%d cpu dev error\n", cpu);
+				mutex_unlock(&cpu_isolate_lock);
 				return ret;
 			}
 			mutex_unlock(&cpu_isolate_lock);
@@ -203,7 +208,9 @@ static int cpu_isolate_set_cur_state(struct thermal_cooling_device *cdev,
 			return ret;
 		} else if (cpumask_test_and_clear_cpu(cpu,
 			&cpus_isolated_by_thermal)) {
+			mutex_unlock(&cpu_isolate_lock);
 			sched_unisolate_cpu(cpu);
+			mutex_lock(&cpu_isolate_lock);
 		}
 		cpumask_clear_cpu(cpu, &cpus_in_max_cooling_level);
 		blocking_notifier_call_chain(&cpu_max_cooling_level_notifer,
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
index 671677b..95155ac 100644
--- a/drivers/thermal/qcom/msm_lmh_dcvs.c
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -529,6 +529,7 @@ static int limits_dcvs_probe(struct platform_device *pdev)
 	int cpu, idx = 0;
 	cpumask_t mask = { CPU_BITS_NONE };
 	const __be32 *addr;
+	bool no_cdev_register = false;
 
 	for_each_possible_cpu(cpu) {
 		cpu_node = of_cpu_device_node_get(cpu);
@@ -590,6 +591,9 @@ static int limits_dcvs_probe(struct platform_device *pdev)
 		return -EINVAL;
 	}
 
+	no_cdev_register = of_property_read_bool(dn,
+				"qcom,no-cooling-device-register");
+
 	addr = of_get_address(dn, 0, NULL, NULL);
 	if (!addr) {
 		pr_err("Property llm-base-addr not found\n");
@@ -618,8 +622,14 @@ static int limits_dcvs_probe(struct platform_device *pdev)
 			affinity);
 	tzdev = thermal_zone_of_sensor_register(&pdev->dev, 0, hw,
 			&limits_sensor_ops);
-	if (IS_ERR_OR_NULL(tzdev))
-		return PTR_ERR(tzdev);
+	if (IS_ERR_OR_NULL(tzdev)) {
+		/*
+		 * Ignore error in case if thermal zone devicetree node is not
+		 * defined for this lmh hardware.
+		 */
+		if (!tzdev || PTR_ERR(tzdev) != -ENODEV)
+			return PTR_ERR(tzdev);
+	}
 
 	hw->min_freq_reg = devm_ioremap(&pdev->dev, min_reg, 0x4);
 	if (!hw->min_freq_reg) {
@@ -650,7 +660,7 @@ static int limits_dcvs_probe(struct platform_device *pdev)
 	hw->is_irq_enabled = true;
 	ret = devm_request_threaded_irq(&pdev->dev, hw->irq_num, NULL,
 		lmh_dcvs_handle_isr, IRQF_TRIGGER_HIGH | IRQF_ONESHOT
-		| IRQF_NO_SUSPEND, hw->sensor_name, hw);
+		| IRQF_NO_SUSPEND | IRQF_SHARED, hw->sensor_name, hw);
 	if (ret) {
 		pr_err("Error registering for irq. err:%d\n", ret);
 		ret = 0;
@@ -669,11 +679,14 @@ static int limits_dcvs_probe(struct platform_device *pdev)
 	mutex_unlock(&lmh_dcvs_list_access);
 	lmh_debug_register(pdev);
 
-	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lmh-dcvs/cdev:online",
-				limits_cpu_online, NULL);
-	if (ret < 0)
-		goto unregister_sensor;
-	ret = 0;
+	if (!no_cdev_register) {
+		ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+					"lmh-dcvs/cdev:online",
+					limits_cpu_online, NULL);
+		if (ret < 0)
+			goto unregister_sensor;
+		ret = 0;
+	}
 
 	return ret;
 
diff --git a/drivers/thermal/qcom/qmi_cooling.c b/drivers/thermal/qcom/qmi_cooling.c
index d1ea060..cd67695 100644
--- a/drivers/thermal/qcom/qmi_cooling.c
+++ b/drivers/thermal/qcom/qmi_cooling.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
@@ -118,6 +118,10 @@ static struct qmi_dev_info device_clients[] = {
 		.type = QMI_CDEV_MAX_LIMIT_TYPE,
 	},
 	{
+		.dev_name = "wlan",
+		.type = QMI_CDEV_MAX_LIMIT_TYPE,
+	},
+	{
 		.dev_name = "cpuv_restriction_cold",
 		.type = QMI_CDEV_MIN_LIMIT_TYPE,
 	},
diff --git a/drivers/thermal/qcom/qmi_sensors.c b/drivers/thermal/qcom/qmi_sensors.c
index 98259a8..67c6af5 100644
--- a/drivers/thermal/qcom/qmi_sensors.c
+++ b/drivers/thermal/qcom/qmi_sensors.c
@@ -43,6 +43,19 @@ enum qmi_ts_sensor {
 	QMI_TS_MODEM_SKIN,
 	QMI_TS_QFE_PA_MDM,
 	QMI_TS_QFE_PA_WTR,
+	QMI_TS_STREAMER_0,
+	QMI_TS_MOD_MMW_0,
+	QMI_TS_MOD_MMW_1,
+	QMI_TS_MOD_MMW_2,
+	QMI_TS_MOD_MMW_3,
+	QMI_TS_RET_PA_0,
+	QMI_TS_WTR_PA_0,
+	QMI_TS_WTR_PA_1,
+	QMI_TS_WTR_PA_2,
+	QMI_TS_WTR_PA_3,
+	QMI_SYS_THERM1,
+	QMI_SYS_THERM2,
+	QMI_TS_TSENS_1,
 	QMI_TS_MAX_NR
 };
 
@@ -87,6 +100,19 @@ static char sensor_clients[QMI_TS_MAX_NR][QMI_CLIENT_NAME_LENGTH] = {
 	{"xo_therm"},
 	{"qfe_pa_mdm"},
 	{"qfe_pa_wtr"},
+	{"qfe_mmw_streamer0"},
+	{"qfe_mmw0_mod"},
+	{"qfe_mmw1_mod"},
+	{"qfe_mmw2_mod"},
+	{"qfe_mmw3_mod"},
+	{"qfe_ret_pa0"},
+	{"qfe_wtr_pa0"},
+	{"qfe_wtr_pa1"},
+	{"qfe_wtr_pa2"},
+	{"qfe_wtr_pa3"},
+	{"sys_therm1"},
+	{"sys_therm2"},
+	{"modem_tsens1"},
 };
 
 static int32_t encode_qmi(int32_t val)
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 84faccd..d7037a7 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -391,6 +391,8 @@ polling_delay_store(struct device *dev, struct device_attribute *attr,
 
 	mutex_lock(&tz->lock);
 	tz->polling_delay = delay;
+	if (tz->ops->set_polling_delay)
+		tz->ops->set_polling_delay(tz, delay);
 	mutex_unlock(&tz->lock);
 	thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
 
@@ -418,6 +420,8 @@ passive_delay_store(struct device *dev, struct device_attribute *attr,
 
 	mutex_lock(&tz->lock);
 	tz->passive_delay = delay;
+	if (tz->ops->set_passive_delay)
+		tz->ops->set_passive_delay(tz, delay);
 	mutex_unlock(&tz->lock);
 	thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
 
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 28fc4ce..8490a1b6 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -476,6 +476,11 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
 		goto out;
 
 	sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
+	if (!sw->uuid) {
+		tb_sw_warn(sw, "cannot allocate memory for switch\n");
+		tb_switch_put(sw);
+		goto out;
+	}
 	sw->connection_id = connection_id;
 	sw->connection_key = connection_key;
 	sw->link = link;
diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
index 8fe913a..be3f8b5 100644
--- a/drivers/thunderbolt/property.c
+++ b/drivers/thunderbolt/property.c
@@ -551,6 +551,11 @@ int tb_property_add_data(struct tb_property_dir *parent, const char *key,
 
 	property->length = size / 4;
 	property->value.data = kzalloc(size, GFP_KERNEL);
+	if (!property->value.data) {
+		kfree(property);
+		return -ENOMEM;
+	}
+
 	memcpy(property->value.data, buf, buflen);
 
 	list_add_tail(&property->list, &parent->properties);
@@ -581,7 +586,12 @@ int tb_property_add_text(struct tb_property_dir *parent, const char *key,
 		return -ENOMEM;
 
 	property->length = size / 4;
-	property->value.data = kzalloc(size, GFP_KERNEL);
+	property->value.text = kzalloc(size, GFP_KERNEL);
+	if (!property->value.text) {
+		kfree(property);
+		return -ENOMEM;
+	}
+
 	strcpy(property->value.text, text);
 
 	list_add_tail(&property->list, &parent->properties);
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index dd9ae6f..bc7efa6 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -9,15 +9,13 @@
 #include <linux/idr.h>
 #include <linux/nvmem-provider.h>
 #include <linux/pm_runtime.h>
+#include <linux/sched/signal.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 
 #include "tb.h"
 
-/* Switch authorization from userspace is serialized by this lock */
-static DEFINE_MUTEX(switch_lock);
-
 /* Switch NVM support */
 
 #define NVM_DEVID		0x05
@@ -253,8 +251,8 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
 	struct tb_switch *sw = priv;
 	int ret = 0;
 
-	if (mutex_lock_interruptible(&switch_lock))
-		return -ERESTARTSYS;
+	if (!mutex_trylock(&sw->tb->lock))
+		return restart_syscall();
 
 	/*
 	 * Since writing the NVM image might require some special steps,
@@ -274,7 +272,7 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
 	memcpy(sw->nvm->buf + offset, val, bytes);
 
 unlock:
-	mutex_unlock(&switch_lock);
+	mutex_unlock(&sw->tb->lock);
 
 	return ret;
 }
@@ -363,10 +361,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
 	}
 	nvm->non_active = nvm_dev;
 
-	mutex_lock(&switch_lock);
 	sw->nvm = nvm;
-	mutex_unlock(&switch_lock);
-
 	return 0;
 
 err_nvm_active:
@@ -383,10 +378,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw)
 {
 	struct tb_switch_nvm *nvm;
 
-	mutex_lock(&switch_lock);
 	nvm = sw->nvm;
 	sw->nvm = NULL;
-	mutex_unlock(&switch_lock);
 
 	if (!nvm)
 		return;
@@ -717,8 +710,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
 {
 	int ret = -EINVAL;
 
-	if (mutex_lock_interruptible(&switch_lock))
-		return -ERESTARTSYS;
+	if (!mutex_trylock(&sw->tb->lock))
+		return restart_syscall();
 
 	if (sw->authorized)
 		goto unlock;
@@ -761,7 +754,7 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
 	}
 
 unlock:
-	mutex_unlock(&switch_lock);
+	mutex_unlock(&sw->tb->lock);
 	return ret;
 }
 
@@ -818,15 +811,15 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
 	struct tb_switch *sw = tb_to_switch(dev);
 	ssize_t ret;
 
-	if (mutex_lock_interruptible(&switch_lock))
-		return -ERESTARTSYS;
+	if (!mutex_trylock(&sw->tb->lock))
+		return restart_syscall();
 
 	if (sw->key)
 		ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
 	else
 		ret = sprintf(buf, "\n");
 
-	mutex_unlock(&switch_lock);
+	mutex_unlock(&sw->tb->lock);
 	return ret;
 }
 
@@ -843,8 +836,8 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
 	else if (hex2bin(key, buf, sizeof(key)))
 		return -EINVAL;
 
-	if (mutex_lock_interruptible(&switch_lock))
-		return -ERESTARTSYS;
+	if (!mutex_trylock(&sw->tb->lock))
+		return restart_syscall();
 
 	if (sw->authorized) {
 		ret = -EBUSY;
@@ -859,7 +852,7 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
 		}
 	}
 
-	mutex_unlock(&switch_lock);
+	mutex_unlock(&sw->tb->lock);
 	return ret;
 }
 static DEVICE_ATTR(key, 0600, key_show, key_store);
@@ -905,8 +898,8 @@ static ssize_t nvm_authenticate_store(struct device *dev,
 	bool val;
 	int ret;
 
-	if (mutex_lock_interruptible(&switch_lock))
-		return -ERESTARTSYS;
+	if (!mutex_trylock(&sw->tb->lock))
+		return restart_syscall();
 
 	/* If NVMem devices are not yet added */
 	if (!sw->nvm) {
@@ -954,7 +947,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
 	}
 
 exit_unlock:
-	mutex_unlock(&switch_lock);
+	mutex_unlock(&sw->tb->lock);
 
 	if (ret)
 		return ret;
@@ -968,8 +961,8 @@ static ssize_t nvm_version_show(struct device *dev,
 	struct tb_switch *sw = tb_to_switch(dev);
 	int ret;
 
-	if (mutex_lock_interruptible(&switch_lock))
-		return -ERESTARTSYS;
+	if (!mutex_trylock(&sw->tb->lock))
+		return restart_syscall();
 
 	if (sw->safe_mode)
 		ret = -ENODATA;
@@ -978,7 +971,7 @@ static ssize_t nvm_version_show(struct device *dev,
 	else
 		ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
 
-	mutex_unlock(&switch_lock);
+	mutex_unlock(&sw->tb->lock);
 
 	return ret;
 }
@@ -1296,13 +1289,14 @@ int tb_switch_configure(struct tb_switch *sw)
 	return tb_plug_events_active(sw, true);
 }
 
-static void tb_switch_set_uuid(struct tb_switch *sw)
+static int tb_switch_set_uuid(struct tb_switch *sw)
 {
 	u32 uuid[4];
-	int cap;
+	int cap, ret;
 
+	ret = 0;
 	if (sw->uuid)
-		return;
+		return ret;
 
 	/*
 	 * The newer controllers include fused UUID as part of link
@@ -1310,7 +1304,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
 	 */
 	cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
 	if (cap > 0) {
-		tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
+		ret = tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
+		if (ret)
+			return ret;
 	} else {
 		/*
 		 * ICM generates UUID based on UID and fills the upper
@@ -1325,6 +1321,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
 	}
 
 	sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
+	if (!sw->uuid)
+		ret = -ENOMEM;
+	return ret;
 }
 
 static int tb_switch_add_dma_port(struct tb_switch *sw)
@@ -1374,7 +1373,9 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
 
 	if (status) {
 		tb_sw_info(sw, "switch flash authentication failed\n");
-		tb_switch_set_uuid(sw);
+		ret = tb_switch_set_uuid(sw);
+		if (ret)
+			return ret;
 		nvm_set_auth_status(sw, status);
 	}
 
@@ -1424,7 +1425,9 @@ int tb_switch_add(struct tb_switch *sw)
 		}
 		tb_sw_info(sw, "uid: %#llx\n", sw->uid);
 
-		tb_switch_set_uuid(sw);
+		ret = tb_switch_set_uuid(sw);
+		if (ret)
+			return ret;
 
 		for (i = 0; i <= sw->config.max_port_number; i++) {
 			if (sw->ports[i].disabled) {
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 5067d69..7a0ee98 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -79,8 +79,7 @@ struct tb_switch_nvm {
  * @depth: Depth in the chain this switch is connected (ICM only)
  *
  * When the switch is being added or removed to the domain (other
- * switches) you need to have domain lock held. For switch authorization
- * internal switch_lock is enough.
+ * switches) you need to have domain lock held.
  */
 struct tb_switch {
 	struct device dev;
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index db8bece..befe754 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -743,6 +743,7 @@ static void enumerate_services(struct tb_xdomain *xd)
 	struct tb_service *svc;
 	struct tb_property *p;
 	struct device *dev;
+	int id;
 
 	/*
 	 * First remove all services that are not available anymore in
@@ -771,7 +772,12 @@ static void enumerate_services(struct tb_xdomain *xd)
 			break;
 		}
 
-		svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+		id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+		if (id < 0) {
+			kfree(svc);
+			break;
+		}
+		svc->id = id;
 		svc->dev.bus = &tb_bus_type;
 		svc->dev.type = &tb_service_type;
 		svc->dev.parent = &xd->dev;
diff --git a/drivers/tty/hvc/hvc_riscv_sbi.c b/drivers/tty/hvc/hvc_riscv_sbi.c
index 75155bd..31f53fa 100644
--- a/drivers/tty/hvc/hvc_riscv_sbi.c
+++ b/drivers/tty/hvc/hvc_riscv_sbi.c
@@ -53,7 +53,6 @@ device_initcall(hvc_sbi_init);
 static int __init hvc_sbi_console_init(void)
 {
 	hvc_instantiate(0, 0, &hvc_sbi_ops);
-	add_preferred_console("hvc", 0, NULL);
 
 	return 0;
 }
diff --git a/drivers/tty/ipwireless/main.c b/drivers/tty/ipwireless/main.c
index 3475e84..4c18bbf 100644
--- a/drivers/tty/ipwireless/main.c
+++ b/drivers/tty/ipwireless/main.c
@@ -114,6 +114,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
 
 	ipw->common_memory = ioremap(p_dev->resource[2]->start,
 				resource_size(p_dev->resource[2]));
+	if (!ipw->common_memory) {
+		ret = -ENOMEM;
+		goto exit1;
+	}
 	if (!request_mem_region(p_dev->resource[2]->start,
 				resource_size(p_dev->resource[2]),
 				IPWIRELESS_PCCARD_NAME)) {
@@ -134,6 +138,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
 
 	ipw->attr_memory = ioremap(p_dev->resource[3]->start,
 				resource_size(p_dev->resource[3]));
+	if (!ipw->attr_memory) {
+		ret = -ENOMEM;
+		goto exit3;
+	}
 	if (!request_mem_region(p_dev->resource[3]->start,
 				resource_size(p_dev->resource[3]),
 				IPWIRELESS_PCCARD_NAME)) {
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index db5df3d..3bdd56a 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -49,11 +49,6 @@ struct ar933x_uart_port {
 	struct clk		*clk;
 };
 
-static inline bool ar933x_uart_console_enabled(void)
-{
-	return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
-}
-
 static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
 					    int offset)
 {
@@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = {
 	.verify_port	= ar933x_uart_verify_port,
 };
 
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
 static struct ar933x_uart_port *
 ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
 
@@ -604,14 +600,7 @@ static struct console ar933x_uart_console = {
 	.index		= -1,
 	.data		= &ar933x_uart_driver,
 };
-
-static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
-{
-	if (!ar933x_uart_console_enabled())
-		return;
-
-	ar933x_console_ports[up->port.line] = up;
-}
+#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
 
 static struct uart_driver ar933x_uart_driver = {
 	.owner		= THIS_MODULE,
@@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
 	baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
 	up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
 
-	ar933x_uart_add_console_port(up);
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+	ar933x_console_ports[up->port.line] = up;
+#endif
 
 	ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
 	if (ret)
@@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void)
 {
 	int ret;
 
-	if (ar933x_uart_console_enabled())
-		ar933x_uart_driver.cons = &ar933x_uart_console;
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+	ar933x_uart_driver.cons = &ar933x_uart_console;
+#endif
 
 	ret = uart_register_driver(&ar933x_uart_driver);
 	if (ret)
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index a1d56a6..4bef897 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -124,6 +124,16 @@
 
 #define DMA_RX_BUF_SIZE		(2048)
 #define UART_CONSOLE_RX_WM	(2)
+#define QUP_VER                 (0x20050000)
+
+struct msm_geni_serial_ver_info {
+	int hw_major_ver;
+	int hw_minor_ver;
+	int hw_step_ver;
+	int m_fw_ver;
+	int s_fw_ver;
+};
+
 struct msm_geni_serial_port {
 	struct uart_port uport;
 	char name[20];
@@ -160,6 +170,7 @@ struct msm_geni_serial_port {
 	int ioctl_count;
 	int edge_count;
 	bool manual_flow;
+	struct msm_geni_serial_ver_info ver_info;
 };
 
 static const struct uart_ops msm_geni_serial_pops;
@@ -183,8 +194,8 @@ static int msm_geni_serial_poll_bit(struct uart_port *uport,
 static void msm_geni_serial_stop_rx(struct uart_port *uport);
 static int msm_geni_serial_runtime_resume(struct device *dev);
 static int msm_geni_serial_runtime_suspend(struct device *dev);
-
-static atomic_t uart_line_id = ATOMIC_INIT(0);
+static int msm_geni_serial_get_ver_info(struct uart_port *uport);
+static int uart_line_id;
 
 #define GET_DEV_PORT(uport) \
 	container_of(uport, struct msm_geni_serial_port, uport)
@@ -192,6 +203,11 @@ static atomic_t uart_line_id = ATOMIC_INIT(0);
 static struct msm_geni_serial_port msm_geni_console_port;
 static struct msm_geni_serial_port msm_geni_serial_ports[GENI_UART_NR_PORTS];
 
+static int hw_version_info(void __iomem *base_addr)
+{
+	return geni_read_reg(base_addr, QUPV3_HW_VER);
+}
+
 static void msm_geni_serial_config_port(struct uart_port *uport, int cfg_flags)
 {
 	if (cfg_flags & UART_CONFIG_TYPE)
@@ -312,7 +328,7 @@ static void wait_for_transfers_inflight(struct uart_port *uport)
 static int vote_clock_on(struct uart_port *uport)
 {
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
-	int usage_count = atomic_read(&uport->dev->power.usage_count);
+	int usage_count;
 	int ret = 0;
 
 	ret = msm_geni_serial_power_on(uport);
@@ -321,15 +337,18 @@ static int vote_clock_on(struct uart_port *uport)
 		return ret;
 	}
 	port->ioctl_count++;
-	IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d usage_count %d\n",
-		__func__, current->comm, port->ioctl_count, usage_count);
+	usage_count = atomic_read(&uport->dev->power.usage_count);
+	IPC_LOG_MSG(port->ipc_log_pwr,
+		"%s :%s ioctl:%d usage_count:%d edge-Count:%d\n",
+		__func__, current->comm, port->ioctl_count,
+		usage_count, port->edge_count);
 	return 0;
 }
 
 static int vote_clock_off(struct uart_port *uport)
 {
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
-	int usage_count = atomic_read(&uport->dev->power.usage_count);
+	int usage_count;
 
 	if (!pm_runtime_enabled(uport->dev)) {
 		dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
@@ -346,7 +365,8 @@ static int vote_clock_off(struct uart_port *uport)
 	wait_for_transfers_inflight(uport);
 	port->ioctl_count--;
 	msm_geni_serial_power_off(uport);
-	IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d usage_count %d\n",
+	usage_count = atomic_read(&uport->dev->power.usage_count);
+	IPC_LOG_MSG(port->ipc_log_pwr, "%s:%s ioctl:%d usage_count:%d\n",
 		__func__, current->comm, port->ioctl_count, usage_count);
 	return 0;
 };
@@ -381,7 +401,8 @@ static void msm_geni_serial_break_ctl(struct uart_port *uport, int ctl)
 
 	if (!uart_console(uport) && device_pending_suspend(uport)) {
 		IPC_LOG_MSG(port->ipc_log_misc,
-				"%s.Device is suspended.\n", __func__);
+				"%s.Device is suspended, %s\n",
+				__func__, current->comm);
 		return;
 	}
 
@@ -404,9 +425,14 @@ static unsigned int msm_geni_serial_get_mctrl(struct uart_port *uport)
 {
 	u32 geni_ios = 0;
 	unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
+	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
-	if (device_pending_suspend(uport))
+	if (!uart_console(uport) && device_pending_suspend(uport)) {
+		IPC_LOG_MSG(port->ipc_log_misc,
+				"%s.Device is suspended, %s\n",
+				__func__, current->comm);
 		return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+	}
 
 	geni_ios = geni_read_reg_nolog(uport->membase, SE_GENI_IOS);
 	if (!(geni_ios & IO2_DATA_IN))
@@ -428,7 +454,8 @@ static void msm_geni_serial_set_mctrl(struct uart_port *uport,
 
 	if (device_pending_suspend(uport)) {
 		IPC_LOG_MSG(port->ipc_log_misc,
-				"%s.Device is suspended.\n", __func__);
+			"%s.Device is suspended, %s: mctrl=0x%x\n",
+			 __func__, current->comm, mctrl);
 		return;
 	}
 	if (!(mctrl & TIOCM_RTS)) {
@@ -441,6 +468,10 @@ static void msm_geni_serial_set_mctrl(struct uart_port *uport,
 							SE_UART_MANUAL_RFR);
 	/* Write to flow control must complete before return to client*/
 	mb();
+	IPC_LOG_MSG(port->ipc_log_misc,
+			"%s:%s, mctrl=0x%x, manual_rfr=0x%x, flow=%s\n",
+			__func__, current->comm, mctrl, uart_manual_rfr,
+			(port->manual_flow ? "OFF" : "ON"));
 }
 
 static const char *msm_geni_serial_get_type(struct uart_port *uport)
@@ -455,7 +486,7 @@ static struct msm_geni_serial_port *get_port_from_line(int line,
 
 	if (is_console) {
 		if ((line < 0) || (line >= GENI_UART_CONS_PORTS))
-			port = ERR_PTR(-ENXIO);
+			return ERR_PTR(-ENXIO);
 		port = &msm_geni_console_port;
 	} else {
 		if ((line < 0) || (line >= GENI_UART_NR_PORTS))
@@ -870,6 +901,7 @@ static void msm_geni_serial_start_tx(struct uart_port *uport)
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
 	unsigned int geni_status;
 	unsigned int geni_ios;
+	static unsigned int ios_log_limit;
 
 	if (!uart_console(uport) && !pm_runtime_active(uport->dev)) {
 		IPC_LOG_MSG(msm_port->ipc_log_misc,
@@ -912,9 +944,11 @@ static void msm_geni_serial_start_tx(struct uart_port *uport)
 	return;
 check_flow_ctrl:
 	geni_ios = geni_read_reg_nolog(uport->membase, SE_GENI_IOS);
-	if (!(geni_ios & IO2_DATA_IN))
+	if (++ios_log_limit % 5 == 0) {
 		IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: ios: 0x%08x\n",
-							__func__, geni_ios);
+						__func__, geni_ios);
+		ios_log_limit = 0;
+	}
 exit_start_tx:
 	if (!uart_console(uport))
 		msm_geni_serial_power_off(uport);
@@ -1015,8 +1049,21 @@ static void start_rx_sequencer(struct uart_port *uport)
 	u32 geni_se_param = UART_PARAM_RFR_OPEN;
 
 	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
-	if (geni_status & S_GENI_CMD_ACTIVE)
+	if (geni_status & S_GENI_CMD_ACTIVE) {
+		if (port->xfer_mode == SE_DMA && !port->rx_dma) {
+			IPC_LOG_MSG(port->ipc_log_misc,
+				"%s: GENI: 0x%x\n", __func__, geni_status);
+			ret = geni_se_rx_dma_prep(port->wrapper_dev,
+				uport->membase, port->rx_buf, DMA_RX_BUF_SIZE,
+				&port->rx_dma);
+			if (ret) {
+				IPC_LOG_MSG(port->ipc_log_misc,
+					"%s: RX buff Fail %d\n", __func__, ret);
+				goto exit_start_rx_sequencer;
+			}
+		}
 		msm_geni_serial_stop_rx(uport);
+	}
 
 	/* Start RX with the RFR_OPEN to keep RFR in always ready state */
 	geni_setup_s_cmd(uport->membase, UART_START_READ, geni_se_param);
@@ -1049,9 +1096,10 @@ static void start_rx_sequencer(struct uart_port *uport)
 	 * go through.
 	 */
 	mb();
-	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
 exit_start_rx_sequencer:
-	IPC_LOG_MSG(port->ipc_log_misc, "%s 0x%x\n", __func__, geni_status);
+	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
+	IPC_LOG_MSG(port->ipc_log_misc, "%s: 0x%x, dma_dbg:0x%x\n", __func__,
+		geni_status, geni_read_reg(uport->membase, SE_DMA_DEBUG_REG0));
 }
 
 static void msm_geni_serial_start_rx(struct uart_port *uport)
@@ -1149,6 +1197,8 @@ static void stop_rx_sequencer(struct uart_port *uport)
 						      DMA_RX_BUF_SIZE);
 		port->rx_dma = (dma_addr_t)NULL;
 	}
+	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
+	IPC_LOG_MSG(port->ipc_log_misc, "%s: 0x%x\n", __func__, geni_status);
 }
 
 static void msm_geni_serial_stop_rx(struct uart_port *uport)
@@ -1687,16 +1737,6 @@ static int msm_geni_serial_startup(struct uart_port *uport)
 		}
 	}
 
-	if (unlikely(get_se_proto(uport->membase) != UART)) {
-		dev_err(uport->dev, "%s: Invalid FW %d loaded.\n",
-				 __func__, get_se_proto(uport->membase));
-		ret = -ENXIO;
-		goto exit_startup;
-	}
-	IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: FW Ver:0x%x%x\n",
-		__func__,
-		get_se_m_fw(uport->membase), get_se_s_fw(uport->membase));
-
 	get_tx_fifo_size(msm_port);
 	if (!msm_port->port_setup) {
 		if (msm_geni_serial_port_setup(uport))
@@ -1786,7 +1826,6 @@ static void geni_serial_write_term_regs(struct uart_port *uport, u32 loopback,
 						SE_UART_TX_STOP_BIT_LEN);
 	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG);
 	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG);
-
 	geni_read_reg_nolog(uport->membase, GENI_SER_M_CLK_CFG);
 }
 
@@ -1830,10 +1869,6 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
 	unsigned long clk_rate;
 	unsigned long flags;
 
-	geni_write_reg_nolog(0x21, uport->membase, GENI_SER_M_CLK_CFG);
-	geni_write_reg_nolog(0x21, uport->membase, GENI_SER_S_CLK_CFG);
-	geni_read_reg_nolog(uport->membase, GENI_SER_M_CLK_CFG);
-
 	if (!uart_console(uport)) {
 		int ret = msm_geni_serial_power_on(uport);
 
@@ -1858,6 +1893,8 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
 	if (clk_div <= 0)
 		goto exit_set_termios;
 
+	if (hw_version_info(uport->membase) >= QUP_VER)
+		clk_div *= 2;
 	uport->uartclk = clk_rate;
 	clk_set_rate(port->serial_rsc.se_clk, clk_rate);
 	ser_clk_cfg |= SER_CLK_EN;
@@ -2029,6 +2066,23 @@ static ssize_t xfer_mode_store(struct device *dev,
 
 static DEVICE_ATTR_RW(xfer_mode);
 
+static ssize_t ver_info_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
+	ssize_t ret = 0;
+	int len = (sizeof(struct msm_geni_serial_ver_info) * 2);
+
+	ret = snprintf(buf, len, "FW ver=0x%x%x, HW ver=%d.%d.%d\n",
+		port->ver_info.m_fw_ver, port->ver_info.m_fw_ver,
+		port->ver_info.hw_major_ver, port->ver_info.hw_minor_ver,
+		port->ver_info.hw_step_ver);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(ver_info);
+
 #if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
 static int __init msm_geni_console_setup(struct console *co, char *options)
 {
@@ -2134,6 +2188,8 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
 		goto exit_geni_serial_earlyconsetup;
 	}
 
+	if (hw_version_info(uport->membase) >= QUP_VER)
+		clk_div *= 2;
 	s_clk_cfg |= SER_CLK_EN;
 	s_clk_cfg |= (clk_div << CLK_DIV_SHFT);
 
@@ -2144,10 +2200,6 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
 	msm_geni_serial_poll_cancel_tx(uport);
 	msm_geni_serial_abort_rx(uport);
 
-	geni_write_reg_nolog(0x21, uport->membase, GENI_SER_M_CLK_CFG);
-	geni_write_reg_nolog(0x21, uport->membase, GENI_SER_S_CLK_CFG);
-	geni_read_reg_nolog(uport->membase, GENI_SER_M_CLK_CFG);
-
 	se_get_packing_config(8, 1, false, &cfg0, &cfg1);
 	geni_se_init(uport->membase, (DEF_FIFO_DEPTH_WORDS >> 1),
 					(DEF_FIFO_DEPTH_WORDS - 2));
@@ -2332,6 +2384,43 @@ static const struct of_device_id msm_geni_device_tbl[] = {
 	{},
 };
 
+static int msm_geni_serial_get_ver_info(struct uart_port *uport)
+{
+	int hw_ver, ret = 0;
+	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+
+	se_geni_clks_on(&msm_port->serial_rsc);
+	/* Basic HW and FW info */
+	if (unlikely(get_se_proto(uport->membase) != UART)) {
+		dev_err(uport->dev, "%s: Invalid FW %d loaded.\n",
+			 __func__, get_se_proto(uport->membase));
+		ret = -ENXIO;
+		goto exit_ver_info;
+	}
+
+	msm_port->ver_info.m_fw_ver = get_se_m_fw(uport->membase);
+	msm_port->ver_info.s_fw_ver = get_se_s_fw(uport->membase);
+	IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: FW Ver:0x%x%x\n",
+		__func__,
+		msm_port->ver_info.m_fw_ver, msm_port->ver_info.s_fw_ver);
+
+	hw_ver = geni_se_qupv3_hw_version(msm_port->wrapper_dev,
+		&msm_port->ver_info.hw_major_ver,
+		&msm_port->ver_info.hw_minor_ver,
+		&msm_port->ver_info.hw_step_ver);
+	if (hw_ver)
+		dev_err(uport->dev, "%s:Err getting HW version %d\n",
+						__func__, hw_ver);
+	else
+		IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: HW Ver:%x.%x.%x\n",
+			__func__, msm_port->ver_info.hw_major_ver,
+			msm_port->ver_info.hw_minor_ver,
+			msm_port->ver_info.hw_step_ver);
+exit_ver_info:
+	se_geni_clks_off(&msm_port->serial_rsc);
+	return ret;
+}
+
 static int msm_geni_serial_probe(struct platform_device *pdev)
 {
 	int ret = 0;
@@ -2357,19 +2446,21 @@ static int msm_geni_serial_probe(struct platform_device *pdev)
 	}
 
 	if (pdev->dev.of_node) {
-		if (drv->cons)
+		if (drv->cons) {
 			line = of_alias_get_id(pdev->dev.of_node, "serial");
-		else
+			if (line < 0)
+				line = 0;
+		} else {
 			line = of_alias_get_id(pdev->dev.of_node, "hsuart");
+			if (line < 0)
+				line = uart_line_id++;
+			else
+				uart_line_id++;
+		}
 	} else {
 		line = pdev->id;
 	}
 
-	if (line < 0)
-		line = atomic_inc_return(&uart_line_id) - 1;
-
-	if ((line < 0) || (line >= GENI_UART_NR_PORTS))
-		return -ENXIO;
 	is_console = (drv->cons ? true : false);
 	dev_port = get_port_from_line(line, is_console);
 	if (IS_ERR_OR_NULL(dev_port)) {
@@ -2478,13 +2569,22 @@ static int msm_geni_serial_probe(struct platform_device *pdev)
 	}
 	dev_port->serial_rsc.geni_gpio_active =
 		pinctrl_lookup_state(dev_port->serial_rsc.geni_pinctrl,
-							PINCTRL_DEFAULT);
-	if (IS_ERR_OR_NULL(dev_port->serial_rsc.geni_gpio_active)) {
-		dev_err(&pdev->dev, "No default config specified!\n");
-		ret = PTR_ERR(dev_port->serial_rsc.geni_gpio_active);
-		goto exit_geni_serial_probe;
-	}
+							PINCTRL_ACTIVE);
 
+	if (IS_ERR_OR_NULL(dev_port->serial_rsc.geni_gpio_active)) {
+		/*
+		 * Backward compatible : In case few chips doesn't have ACTIVE
+		 * state defined.
+		 */
+		dev_port->serial_rsc.geni_gpio_active =
+		pinctrl_lookup_state(dev_port->serial_rsc.geni_pinctrl,
+							PINCTRL_DEFAULT);
+		if (IS_ERR_OR_NULL(dev_port->serial_rsc.geni_gpio_active)) {
+			dev_err(&pdev->dev, "No default config specified!\n");
+			ret = PTR_ERR(dev_port->serial_rsc.geni_gpio_active);
+			goto exit_geni_serial_probe;
+		}
+	}
 	/*
 	 * For clients who setup an Inband wakeup, leave the GPIO pins
 	 * always connected to the core, else move the pins to their
@@ -2531,18 +2631,16 @@ static int msm_geni_serial_probe(struct platform_device *pdev)
 		pm_runtime_enable(&pdev->dev);
 	}
 
-	se_geni_clks_on(&dev_port->serial_rsc);
-	geni_write_reg_nolog(0x21, uport->membase, GENI_SER_M_CLK_CFG);
-	geni_write_reg_nolog(0x21, uport->membase, GENI_SER_S_CLK_CFG);
-	geni_read_reg_nolog(uport->membase, GENI_SER_M_CLK_CFG);
-	se_geni_clks_off(&dev_port->serial_rsc);
-
 	dev_info(&pdev->dev, "Serial port%d added.FifoSize %d is_console%d\n",
 				line, uport->fifosize, is_console);
 	device_create_file(uport->dev, &dev_attr_loopback);
 	device_create_file(uport->dev, &dev_attr_xfer_mode);
+	device_create_file(uport->dev, &dev_attr_ver_info);
 	msm_geni_serial_debug_init(uport, is_console);
 	dev_port->port_setup = false;
+	ret = msm_geni_serial_get_ver_info(uport);
+	if (ret)
+		goto exit_geni_serial_probe;
 	return uart_add_one_port(drv, uport);
 
 exit_geni_serial_probe:
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 47b4115..55b178c 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1481,7 +1481,7 @@ static int __init sc16is7xx_init(void)
 	ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
 	if (ret < 0) {
 		pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
-		return ret;
+		goto err_i2c;
 	}
 #endif
 
@@ -1489,10 +1489,18 @@ static int __init sc16is7xx_init(void)
 	ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
 	if (ret < 0) {
 		pr_err("failed to init sc16is7xx spi --> %d\n", ret);
-		return ret;
+		goto err_spi;
 	}
 #endif
 	return ret;
+
+err_spi:
+#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
+	i2c_del_driver(&sc16is7xx_i2c_uart_driver);
+#endif
+err_i2c:
+	uart_unregister_driver(&sc16is7xx_uart);
+	return ret;
 }
 module_init(sc16is7xx_init);
 
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index cbbf239..03fe3fb 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2497,14 +2497,16 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
 			 * center of the last stop bit in sampling clocks.
 			 */
 			int last_stop = bits * 2 - 1;
-			int deviation = min_err * srr * last_stop / 2 / baud;
+			int deviation = DIV_ROUND_CLOSEST(min_err * last_stop *
+							  (int)(srr + 1),
+							  2 * (int)baud);
 
 			if (abs(deviation) >= 2) {
 				/* At least two sampling clocks off at the
 				 * last stop bit; we can increase the error
 				 * margin by shifting the sampling point.
 				 */
-				int shift = min(-8, max(7, deviation / 2));
+				int shift = clamp(deviation / 2, -8, 7);
 
 				hssrr |= (shift << HSCIF_SRHP_SHIFT) &
 					 HSCIF_SRHP_MASK;
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 88312c6..0617e87 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -123,6 +123,7 @@ static const int NR_TYPES = ARRAY_SIZE(max_vals);
 static struct input_handler kbd_handler;
 static DEFINE_SPINLOCK(kbd_event_lock);
 static DEFINE_SPINLOCK(led_lock);
+static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf'  and friends */
 static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)];	/* keyboard key bitmap */
 static unsigned char shift_down[NR_SHIFT];		/* shift state counters.. */
 static bool dead_key_next;
@@ -1990,11 +1991,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
 	char *p;
 	u_char *q;
 	u_char __user *up;
-	int sz;
+	int sz, fnw_sz;
 	int delta;
 	char *first_free, *fj, *fnw;
 	int i, j, k;
 	int ret;
+	unsigned long flags;
 
 	if (!capable(CAP_SYS_TTY_CONFIG))
 		perm = 0;
@@ -2037,7 +2039,14 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
 			goto reterr;
 		}
 
+		fnw = NULL;
+		fnw_sz = 0;
+		/* race aginst other writers */
+		again:
+		spin_lock_irqsave(&func_buf_lock, flags);
 		q = func_table[i];
+
+		/* fj pointer to next entry after 'q' */
 		first_free = funcbufptr + (funcbufsize - funcbufleft);
 		for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
 			;
@@ -2045,10 +2054,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
 			fj = func_table[j];
 		else
 			fj = first_free;
-
+		/* buffer usage increase by new entry */
 		delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string);
+
 		if (delta <= funcbufleft) { 	/* it fits in current buf */
 		    if (j < MAX_NR_FUNC) {
+			/* make enough space for new entry at 'fj' */
 			memmove(fj + delta, fj, first_free - fj);
 			for (k = j; k < MAX_NR_FUNC; k++)
 			    if (func_table[k])
@@ -2061,20 +2072,28 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
 		    sz = 256;
 		    while (sz < funcbufsize - funcbufleft + delta)
 		      sz <<= 1;
-		    fnw = kmalloc(sz, GFP_KERNEL);
-		    if(!fnw) {
-		      ret = -ENOMEM;
-		      goto reterr;
+		    if (fnw_sz != sz) {
+		      spin_unlock_irqrestore(&func_buf_lock, flags);
+		      kfree(fnw);
+		      fnw = kmalloc(sz, GFP_KERNEL);
+		      fnw_sz = sz;
+		      if (!fnw) {
+			ret = -ENOMEM;
+			goto reterr;
+		      }
+		      goto again;
 		    }
 
 		    if (!q)
 		      func_table[i] = fj;
+		    /* copy data before insertion point to new location */
 		    if (fj > funcbufptr)
 			memmove(fnw, funcbufptr, fj - funcbufptr);
 		    for (k = 0; k < j; k++)
 		      if (func_table[k])
 			func_table[k] = fnw + (func_table[k] - funcbufptr);
 
+		    /* copy data after insertion point to new location */
 		    if (first_free > fj) {
 			memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj);
 			for (k = j; k < MAX_NR_FUNC; k++)
@@ -2087,7 +2106,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
 		    funcbufleft = funcbufleft - delta + sz - funcbufsize;
 		    funcbufsize = sz;
 		}
+		/* finally insert item itself */
 		strcpy(func_table[i], kbs->kb_string);
+		spin_unlock_irqrestore(&func_buf_lock, flags);
 		break;
 	}
 	ret = 0;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index b9a9a07..f93b948 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1521,7 +1521,8 @@ static void csi_J(struct vc_data *vc, int vpar)
 			return;
 	}
 	scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
-	update_region(vc, (unsigned long) start, count);
+	if (con_should_update(vc))
+		do_update_region(vc, (unsigned long) start, count);
 	vc->vc_need_wrap = 0;
 }
 
@@ -4154,8 +4155,6 @@ void do_blank_screen(int entering_gfx)
 		return;
 	}
 
-	if (blank_state != blank_normal_wait)
-		return;
 	blank_state = blank_off;
 
 	/* don't blank graphics */
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 32da5a4..5b442bc 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -470,12 +470,12 @@ static void acm_read_bulk_callback(struct urb *urb)
 	struct acm *acm = rb->instance;
 	unsigned long flags;
 	int status = urb->status;
+	bool stopped = false;
+	bool stalled = false;
 
 	dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
 		rb->index, urb->actual_length, status);
 
-	set_bit(rb->index, &acm->read_urbs_free);
-
 	if (!acm->dev) {
 		dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
 		return;
@@ -488,15 +488,16 @@ static void acm_read_bulk_callback(struct urb *urb)
 		break;
 	case -EPIPE:
 		set_bit(EVENT_RX_STALL, &acm->flags);
-		schedule_work(&acm->work);
-		return;
+		stalled = true;
+		break;
 	case -ENOENT:
 	case -ECONNRESET:
 	case -ESHUTDOWN:
 		dev_dbg(&acm->data->dev,
 			"%s - urb shutting down with status: %d\n",
 			__func__, status);
-		return;
+		stopped = true;
+		break;
 	default:
 		dev_dbg(&acm->data->dev,
 			"%s - nonzero urb status received: %d\n",
@@ -505,10 +506,24 @@ static void acm_read_bulk_callback(struct urb *urb)
 	}
 
 	/*
-	 * Unthrottle may run on another CPU which needs to see events
-	 * in the same order. Submission has an implict barrier
+	 * Make sure URB processing is done before marking as free to avoid
+	 * racing with unthrottle() on another CPU. Matches the barriers
+	 * implied by the test_and_clear_bit() in acm_submit_read_urb().
 	 */
 	smp_mb__before_atomic();
+	set_bit(rb->index, &acm->read_urbs_free);
+	/*
+	 * Make sure URB is marked as free before checking the throttled flag
+	 * to avoid racing with unthrottle() on another CPU. Matches the
+	 * smp_mb() in unthrottle().
+	 */
+	smp_mb__after_atomic();
+
+	if (stopped || stalled) {
+		if (stalled)
+			schedule_work(&acm->work);
+		return;
+	}
 
 	/* throttle device if requested by tty */
 	spin_lock_irqsave(&acm->read_lock, flags);
@@ -842,6 +857,9 @@ static void acm_tty_unthrottle(struct tty_struct *tty)
 	acm->throttle_req = 0;
 	spin_unlock_irq(&acm->read_lock);
 
+	/* Matches the smp_mb__after_atomic() in acm_read_bulk_callback(). */
+	smp_mb();
+
 	if (was_throttled)
 		acm_submit_read_urbs(acm, GFP_KERNEL);
 }
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 6529ba4..60c2ec4 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -473,11 +473,6 @@ static int usb_unbind_interface(struct device *dev)
 		pm_runtime_disable(dev);
 	pm_runtime_set_suspended(dev);
 
-	/* Undo any residual pm_autopm_get_interface_* calls */
-	for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
-		usb_autopm_put_interface_no_suspend(intf);
-	atomic_set(&intf->pm_usage_cnt, 0);
-
 	if (!error)
 		usb_autosuspend_device(udev);
 
@@ -1648,7 +1643,6 @@ void usb_autopm_put_interface(struct usb_interface *intf)
 	int			status;
 
 	usb_mark_last_busy(udev);
-	atomic_dec(&intf->pm_usage_cnt);
 	status = pm_runtime_put_sync(&intf->dev);
 	dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
 			__func__, atomic_read(&intf->dev.power.usage_count),
@@ -1677,7 +1671,6 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
 	int			status;
 
 	usb_mark_last_busy(udev);
-	atomic_dec(&intf->pm_usage_cnt);
 	status = pm_runtime_put(&intf->dev);
 	dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
 			__func__, atomic_read(&intf->dev.power.usage_count),
@@ -1699,7 +1692,6 @@ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
 	struct usb_device	*udev = interface_to_usbdev(intf);
 
 	usb_mark_last_busy(udev);
-	atomic_dec(&intf->pm_usage_cnt);
 	pm_runtime_put_noidle(&intf->dev);
 }
 EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
@@ -1730,8 +1722,6 @@ int usb_autopm_get_interface(struct usb_interface *intf)
 	status = pm_runtime_get_sync(&intf->dev);
 	if (status < 0)
 		pm_runtime_put_sync(&intf->dev);
-	else
-		atomic_inc(&intf->pm_usage_cnt);
 	dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
 			__func__, atomic_read(&intf->dev.power.usage_count),
 			status);
@@ -1765,8 +1755,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
 	status = pm_runtime_get(&intf->dev);
 	if (status < 0 && status != -EINPROGRESS)
 		pm_runtime_put_noidle(&intf->dev);
-	else
-		atomic_inc(&intf->pm_usage_cnt);
 	dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
 			__func__, atomic_read(&intf->dev.power.usage_count),
 			status);
@@ -1790,7 +1778,6 @@ void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
 	struct usb_device	*udev = interface_to_usbdev(intf);
 
 	usb_mark_last_busy(udev);
-	atomic_inc(&intf->pm_usage_cnt);
 	pm_runtime_get_noresume(&intf->dev);
 }
 EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
@@ -1911,14 +1898,11 @@ int usb_runtime_idle(struct device *dev)
 	return -EBUSY;
 }
 
-int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
 {
 	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
 	int ret = -EPERM;
 
-	if (enable && !udev->usb2_hw_lpm_allowed)
-		return 0;
-
 	if (hcd->driver->set_usb2_hw_lpm) {
 		ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
 		if (!ret)
@@ -1928,6 +1912,24 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
 	return ret;
 }
 
+int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
+{
+	if (!udev->usb2_hw_lpm_capable ||
+	    !udev->usb2_hw_lpm_allowed ||
+	    udev->usb2_hw_lpm_enabled)
+		return 0;
+
+	return usb_set_usb2_hardware_lpm(udev, 1);
+}
+
+int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
+{
+	if (!udev->usb2_hw_lpm_enabled)
+		return 0;
+
+	return usb_set_usb2_hardware_lpm(udev, 0);
+}
+
 #endif /* CONFIG_PM */
 
 struct bus_type usb_bus_type = {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 1a86b5a..0f6e80d 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -3085,6 +3085,9 @@ usb_hcd_platform_shutdown(struct platform_device *dev)
 {
 	struct usb_hcd *hcd = platform_get_drvdata(dev);
 
+	/* No need for pm_runtime_put(), we're shutting down */
+	pm_runtime_get_sync(&dev->dev);
+
 	if (hcd->driver->shutdown)
 		hcd->driver->shutdown(hcd);
 }
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0bd1fc3..27bed4a 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3228,8 +3228,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
 	}
 
 	/* disable USB2 hardware LPM */
-	if (udev->usb2_hw_lpm_enabled == 1)
-		usb_set_usb2_hardware_lpm(udev, 0);
+	usb_disable_usb2_hardware_lpm(udev);
 
 	if (usb_disable_ltm(udev)) {
 		dev_err(&udev->dev, "Failed to disable LTM before suspend\n");
@@ -3267,8 +3266,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
 		usb_enable_ltm(udev);
  err_ltm:
 		/* Try to enable USB2 hardware LPM again */
-		if (udev->usb2_hw_lpm_capable == 1)
-			usb_set_usb2_hardware_lpm(udev, 1);
+		usb_enable_usb2_hardware_lpm(udev);
 
 		if (udev->do_remote_wakeup)
 			(void) usb_disable_remote_wakeup(udev);
@@ -3554,8 +3552,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
 		hub_port_logical_disconnect(hub, port1);
 	} else  {
 		/* Try to enable USB2 hardware LPM */
-		if (udev->usb2_hw_lpm_capable == 1)
-			usb_set_usb2_hardware_lpm(udev, 1);
+		usb_enable_usb2_hardware_lpm(udev);
 
 		/* Try to enable USB3 LTM */
 		usb_enable_ltm(udev);
@@ -4446,7 +4443,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
 	if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
 			connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
 		udev->usb2_hw_lpm_allowed = 1;
-		usb_set_usb2_hardware_lpm(udev, 1);
+		usb_enable_usb2_hardware_lpm(udev);
 	}
 }
 
@@ -5622,8 +5619,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
 	/* Disable USB2 hardware LPM.
 	 * It will be re-enabled by the enumeration process.
 	 */
-	if (udev->usb2_hw_lpm_enabled == 1)
-		usb_set_usb2_hardware_lpm(udev, 0);
+	usb_disable_usb2_hardware_lpm(udev);
 
 	/* Disable LPM while we reset the device and reinstall the alt settings.
 	 * Device-initiated LPM, and system exit latency settings are cleared
@@ -5726,7 +5722,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
 
 done:
 	/* Now that the alt settings are re-installed, enable LTM and LPM. */
-	usb_set_usb2_hardware_lpm(udev, 1);
+	usb_enable_usb2_hardware_lpm(udev);
 	usb_unlocked_enable_lpm(udev);
 	usb_enable_ltm(udev);
 	usb_release_bos_descriptor(udev);
@@ -5841,7 +5837,10 @@ int usb_reset_device(struct usb_device *udev)
 					cintf->needs_binding = 1;
 			}
 		}
-		usb_unbind_and_rebind_marked_interfaces(udev);
+
+		/* If the reset failed, hub_wq will unbind drivers later */
+		if (ret == 0)
+			usb_unbind_and_rebind_marked_interfaces(udev);
 	}
 
 	usb_autosuspend_device(udev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index bfa5eda..4020ce8d 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -820,9 +820,11 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
 
 	if (dev->state == USB_STATE_SUSPENDED)
 		return -EHOSTUNREACH;
-	if (size <= 0 || !buf || !index)
+	if (size <= 0 || !buf)
 		return -EINVAL;
 	buf[0] = 0;
+	if (index <= 0 || index >= 256)
+		return -EINVAL;
 	tbuf = kmalloc(256, GFP_NOIO);
 	if (!tbuf)
 		return -ENOMEM;
@@ -1243,8 +1245,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
 			dev->actconfig->interface[i] = NULL;
 		}
 
-		if (dev->usb2_hw_lpm_enabled == 1)
-			usb_set_usb2_hardware_lpm(dev, 0);
+		usb_disable_usb2_hardware_lpm(dev);
 		usb_unlocked_disable_lpm(dev);
 		usb_disable_ltm(dev);
 
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index ea18284..7e88fdf 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -528,7 +528,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
 
 	if (!ret) {
 		udev->usb2_hw_lpm_allowed = value;
-		ret = usb_set_usb2_hardware_lpm(udev, value);
+		if (value)
+			ret = usb_enable_usb2_hardware_lpm(udev);
+		else
+			ret = usb_disable_usb2_hardware_lpm(udev);
 	}
 
 	usb_unlock_device(udev);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index a009cad..8788c42 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -92,7 +92,8 @@ extern int usb_remote_wakeup(struct usb_device *dev);
 extern int usb_runtime_suspend(struct device *dev);
 extern int usb_runtime_resume(struct device *dev);
 extern int usb_runtime_idle(struct device *dev);
-extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
+extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
+extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
 
 #else
 
@@ -112,7 +113,12 @@ static inline int usb_autoresume_device(struct usb_device *udev)
 	return 0;
 }
 
-static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
+{
+	return 0;
+}
+
+static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
 {
 	return 0;
 }
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 220c0f9..03614ef 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -675,13 +675,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
 	unsigned int maxsize;
 
 	if (is_isoc)
-		maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
-					   DEV_DMA_ISOC_RX_NBYTES_LIMIT;
+		maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
+					   DEV_DMA_ISOC_RX_NBYTES_LIMIT) *
+					   MAX_DMA_DESC_NUM_HS_ISOC;
 	else
-		maxsize = DEV_DMA_NBYTES_LIMIT;
-
-	/* Above size of one descriptor was chosen, multiple it */
-	maxsize *= MAX_DMA_DESC_NUM_GENERIC;
+		maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
 
 	return maxsize;
 }
@@ -864,7 +862,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
 
 	/* Update index of last configured entry in the chain */
 	hs_ep->next_desc++;
-	if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_GENERIC)
+	if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
 		hs_ep->next_desc = 0;
 
 	return 0;
@@ -896,7 +894,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
 	}
 
 	/* Initialize descriptor chain by Host Busy status */
-	for (i = 0; i < MAX_DMA_DESC_NUM_GENERIC; i++) {
+	for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) {
 		desc = &hs_ep->desc_list[i];
 		desc->status = 0;
 		desc->status |= (DEV_DMA_BUFF_STS_HBUSY
@@ -2083,7 +2081,7 @@ static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
 		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
 
 		hs_ep->compl_desc++;
-		if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_GENERIC - 1))
+		if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
 			hs_ep->compl_desc = 0;
 		desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
 	}
@@ -3779,6 +3777,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
 	unsigned int i, val, size;
 	int ret = 0;
 	unsigned char ep_type;
+	int desc_num;
 
 	dev_dbg(hsotg->dev,
 		"%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
@@ -3825,11 +3824,15 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
 	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
 		__func__, epctrl, epctrl_reg);
 
+	if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC)
+		desc_num = MAX_DMA_DESC_NUM_HS_ISOC;
+	else
+		desc_num = MAX_DMA_DESC_NUM_GENERIC;
+
 	/* Allocate DMA descriptor chain for non-ctrl endpoints */
 	if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
 		hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
-			MAX_DMA_DESC_NUM_GENERIC *
-			sizeof(struct dwc2_dma_desc),
+			desc_num * sizeof(struct dwc2_dma_desc),
 			&hs_ep->desc_list_dma, GFP_ATOMIC);
 		if (!hs_ep->desc_list) {
 			ret = -ENOMEM;
@@ -3971,7 +3974,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
 
 error2:
 	if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
-		dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+		dmam_free_coherent(hsotg->dev, desc_num *
 			sizeof(struct dwc2_dma_desc),
 			hs_ep->desc_list, hs_ep->desc_list_dma);
 		hs_ep->desc_list = NULL;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index f11322a..82c761d 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1210,7 +1210,7 @@ static void dwc3_get_properties(struct dwc3 *dwc)
 	u8			tx_max_burst_prd;
 
 	/* default to highest possible threshold */
-	lpm_nyet_threshold = 0xff;
+	lpm_nyet_threshold = 0xf;
 
 	/* default to -3.5dB de-emphasis */
 	tx_de_emphasis = 1;
@@ -1627,6 +1627,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
 		spin_lock_irqsave(&dwc->lock, flags);
 		dwc3_gadget_suspend(dwc);
 		spin_unlock_irqrestore(&dwc->lock, flags);
+		synchronize_irq(dwc->irq_gadget);
 		dwc3_core_exit(dwc);
 		break;
 	case DWC3_GCTL_PRTCAP_HOST:
@@ -1659,6 +1660,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
 			spin_lock_irqsave(&dwc->lock, flags);
 			dwc3_gadget_suspend(dwc);
 			spin_unlock_irqrestore(&dwc->lock, flags);
+			synchronize_irq(dwc->irq_gadget);
 		}
 
 		dwc3_otg_exit(dwc);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 9b27f20..55bb9f6 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -296,6 +296,7 @@ struct dwc3_msm {
 	unsigned int		max_power;
 	bool			charging_disabled;
 	enum dwc3_drd_state	drd_state;
+	enum bus_vote		default_bus_vote;
 	enum bus_vote		override_bus_vote;
 	u32			bus_perf_client;
 	struct msm_bus_scale_pdata	*bus_scale_table;
@@ -337,10 +338,13 @@ struct dwc3_msm {
 	enum usb_device_speed override_usb_speed;
 	u32			*gsi_reg;
 	int			gsi_reg_offset_cnt;
+	bool			gsi_io_coherency_disabled;
 
 	struct notifier_block	dpdm_nb;
 	struct regulator	*dpdm_reg;
 
+	u64			dummy_gsi_db;
+	dma_addr_t		dummy_gsi_db_dma;
 };
 
 #define USB_HSPHY_3P3_VOL_MIN		3050000 /* uV */
@@ -1002,6 +1006,14 @@ static void gsi_store_ringbase_dbl_info(struct usb_ep *ep,
 		ep->name, request->db_reg_phs_addr_lsb,
 		(unsigned long long)request->mapped_db_reg_phs_addr_lsb);
 
+	/*
+	 * Replace dummy doorbell address with real one as IPA connection
+	 * is setup now and GSI must be ready to handle doorbell updates.
+	 */
+	dwc3_msm_write_reg_field(mdwc->base,
+			GSI_DBL_ADDR_H(mdwc->gsi_reg[DBL_ADDR_H], (n)),
+			~0x0, 0x0);
+
 	dwc3_msm_write_reg(mdwc->base,
 		GSI_DBL_ADDR_L(mdwc->gsi_reg[DBL_ADDR_L], (n)),
 		(u32)request->mapped_db_reg_phs_addr_lsb);
@@ -1099,7 +1111,7 @@ static void gsi_endxfer_for_ep(struct usb_ep *ep)
 }
 
 /**
- * Allocates and configures TRBs for GSI EPs.
+ * Allocates Buffers and TRBs. Configures TRBs for GSI EPs.
  *
  * @usb_ep - pointer to usb_ep instance.
  * @request - pointer to GSI request.
@@ -1109,23 +1121,49 @@ static void gsi_endxfer_for_ep(struct usb_ep *ep)
 static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
 {
 	int i = 0;
-	dma_addr_t buffer_addr = req->dma;
+	size_t len;
+	unsigned long dma_attr;
+	dma_addr_t buffer_addr;
 	struct dwc3_ep *dep = to_dwc3_ep(ep);
 	struct dwc3		*dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
 	struct dwc3_trb *trb;
 	int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
 					: (req->num_bufs + 2);
 	struct scatterlist *sg;
 	struct sg_table *sgt;
 
-	dep->trb_pool = dma_zalloc_coherent(dwc->sysdev,
+	if (mdwc->gsi_io_coherency_disabled)
+		dma_attr = DMA_ATTR_FORCE_NON_COHERENT;
+	else
+		dma_attr = DMA_ATTR_FORCE_COHERENT;
+
+	/* Allocate TRB buffers */
+
+	len = req->buf_len * req->num_bufs;
+	req->buf_base_addr = dma_alloc_attrs(dwc->sysdev, len, &req->dma,
+					GFP_KERNEL, dma_attr);
+	if (!req->buf_base_addr) {
+		dev_err(dwc->dev, "%s: buf_base_addr allocate failed %s\n",
+				dep->name);
+		return -ENOMEM;
+	}
+
+	dma_get_sgtable(dwc->sysdev, &req->sgt_data_buff, req->buf_base_addr,
+			req->dma, len);
+
+	buffer_addr = req->dma;
+
+	/* Allocate and configgure TRBs */
+
+	dep->trb_pool = dma_alloc_attrs(dwc->sysdev,
 				num_trbs * sizeof(struct dwc3_trb),
-				&dep->trb_pool_dma, GFP_KERNEL);
+				&dep->trb_pool_dma, GFP_KERNEL, dma_attr);
 
 	if (!dep->trb_pool) {
 		dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
 				dep->name);
-		return -ENOMEM;
+		goto free_trb_buffer;
 	}
 
 	dep->num_trbs = num_trbs;
@@ -1222,10 +1260,17 @@ static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
 	}
 
 	return 0;
+
+free_trb_buffer:
+	dma_free_attrs(dwc->sysdev, len, req->buf_base_addr, req->dma,
+			dma_attr);
+	req->buf_base_addr = NULL;
+	sg_free_table(&req->sgt_data_buff);
+	return -ENOMEM;
 }
 
 /**
- * Frees TRBs for GSI EPs.
+ * Frees TRBs and buffers for GSI EPs.
  *
  * @usb_ep - pointer to usb_ep instance.
  *
@@ -1234,20 +1279,32 @@ static void gsi_free_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
 {
 	struct dwc3_ep *dep = to_dwc3_ep(ep);
 	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	unsigned long dma_attr;
 
 	if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
 		return;
 
+	if (mdwc->gsi_io_coherency_disabled)
+		dma_attr = DMA_ATTR_FORCE_NON_COHERENT;
+	else
+		dma_attr = DMA_ATTR_FORCE_COHERENT;
+
 	/*  Free TRBs and TRB pool for EP */
 	if (dep->trb_pool_dma) {
-		dma_free_coherent(dwc->sysdev,
+		dma_free_attrs(dwc->sysdev,
 			dep->num_trbs * sizeof(struct dwc3_trb),
-			dep->trb_pool,
-			dep->trb_pool_dma);
+			dep->trb_pool, dep->trb_pool_dma, dma_attr);
 		dep->trb_pool = NULL;
 		dep->trb_pool_dma = 0;
 	}
 	sg_free_table(&req->sgt_trb_xfer_ring);
+
+	/* free TRB buffers */
+	dma_free_attrs(dwc->sysdev, req->buf_len * req->num_bufs,
+		req->buf_base_addr, req->dma, dma_attr);
+	req->buf_base_addr = NULL;
+	sg_free_table(&req->sgt_data_buff);
 }
 /**
  * Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
@@ -1263,9 +1320,22 @@ static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
 	struct dwc3_gadget_ep_cmd_params params;
 	const struct usb_endpoint_descriptor *desc = ep->desc;
 	const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
+	int n = ep->ep_intr_num - 1;
 	u32 reg;
 	int ret;
 
+	/* setup dummy doorbell as IPA connection isn't setup yet */
+	dwc3_msm_write_reg_field(mdwc->base,
+			GSI_DBL_ADDR_H(mdwc->gsi_reg[DBL_ADDR_H], (n)),
+			~0x0, (u32)((u64)mdwc->dummy_gsi_db_dma >> 32));
+
+	dwc3_msm_write_reg_field(mdwc->base,
+			GSI_DBL_ADDR_L(mdwc->gsi_reg[DBL_ADDR_L], (n)),
+			~0x0, (u32)mdwc->dummy_gsi_db_dma);
+	dev_dbg(mdwc->dev, "Dummy DB Addr %pK: %llx %llx (LSB)\n",
+		&mdwc->dummy_gsi_db, mdwc->dummy_gsi_db_dma,
+		(u32)mdwc->dummy_gsi_db_dma);
+
 	memset(&params, 0x00, sizeof(params));
 
 	/* Configure GSI EP */
@@ -1797,24 +1867,6 @@ static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
 	return ret;
 }
 
-/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
-static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
-{
-	if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
-		/* On older cores set XHCI_REV bit to specify revision 1.0 */
-		dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
-					 BIT(2), 1);
-
-	/*
-	 * Enable master clock for RAMs to allow BAM to access RAMs when
-	 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
-	 * are seen where RAM clocks get turned OFF in SS mode
-	 */
-	dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
-		dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
-
-}
-
 static void dwc3_msm_vbus_draw_work(struct work_struct *w)
 {
 	struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
@@ -1824,6 +1876,55 @@ static void dwc3_msm_vbus_draw_work(struct work_struct *w)
 	dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
 }
 
+static void dwc3_gsi_event_buf_alloc(struct dwc3 *dwc)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	struct dwc3_event_buffer *evt;
+	int i;
+
+	if (!mdwc->num_gsi_event_buffers)
+		return;
+
+	mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
+		sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
+		GFP_KERNEL);
+	if (!mdwc->gsi_ev_buff) {
+		dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
+		return;
+	}
+
+	for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
+
+		evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
+		if (!evt)
+			return;
+		evt->dwc	= dwc;
+		evt->length	= DWC3_EVENT_BUFFERS_SIZE;
+		evt->buf	= dma_alloc_coherent(dwc->sysdev,
+					DWC3_EVENT_BUFFERS_SIZE,
+					&evt->dma, GFP_KERNEL);
+		if (!evt->buf) {
+			dev_err(dwc->dev,
+				"can't allocate gsi_evt_buf(%d)\n", i);
+			return;
+		}
+		mdwc->gsi_ev_buff[i] = evt;
+	}
+	/*
+	 * Set-up dummy buffer to use as doorbell while IPA GSI
+	 * connection is in progress.
+	 */
+	mdwc->dummy_gsi_db_dma = dma_map_single(dwc->sysdev,
+					&mdwc->dummy_gsi_db,
+					sizeof(mdwc->dummy_gsi_db),
+					DMA_FROM_DEVICE);
+
+	if (dma_mapping_error(dwc->sysdev, mdwc->dummy_gsi_db_dma)) {
+		dev_err(dwc->dev, "failed to map dummy doorbell buffer\n");
+		mdwc->dummy_gsi_db_dma = (dma_addr_t)NULL;
+	}
+}
+
 static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event,
 							unsigned int value)
 {
@@ -1849,11 +1950,6 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event,
 		/* restart USB which performs full reset and reconnect */
 		schedule_work(&mdwc->restart_usb_work);
 		break;
-	case DWC3_CONTROLLER_RESET_EVENT:
-		dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
-		/* HS & SSPHYs get reset as part of core soft reset */
-		dwc3_msm_qscratch_reg_init(mdwc);
-		break;
 	case DWC3_CONTROLLER_POST_RESET_EVENT:
 		dev_dbg(mdwc->dev,
 				"DWC3_CONTROLLER_POST_RESET_EVENT received\n");
@@ -1909,7 +2005,6 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event,
 			dwc->gadget.is_selfpowered = val.intval;
 		else
 			dwc->gadget.is_selfpowered = 0;
-
 		break;
 	case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
 		dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
@@ -1928,35 +2023,7 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event,
 		break;
 	case DWC3_GSI_EVT_BUF_ALLOC:
 		dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
-
-		if (!mdwc->num_gsi_event_buffers)
-			break;
-
-		mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
-			sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
-			GFP_KERNEL);
-		if (!mdwc->gsi_ev_buff) {
-			dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
-			break;
-		}
-
-		for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
-
-			evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
-			if (!evt)
-				break;
-			evt->dwc	= dwc;
-			evt->length	= DWC3_EVENT_BUFFERS_SIZE;
-			evt->buf	= dma_alloc_coherent(dwc->sysdev,
-						DWC3_EVENT_BUFFERS_SIZE,
-						&evt->dma, GFP_KERNEL);
-			if (!evt->buf) {
-				dev_err(dwc->dev,
-					"can't allocate gsi_evt_buf(%d)\n", i);
-				break;
-			}
-			mdwc->gsi_ev_buff[i] = evt;
-		}
+		dwc3_gsi_event_buf_alloc(dwc);
 		break;
 	case DWC3_GSI_EVT_BUF_SETUP:
 		dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
@@ -2021,6 +2088,12 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event,
 				dma_free_coherent(dwc->sysdev, evt->length,
 							evt->buf, evt->dma);
 		}
+		if (mdwc->dummy_gsi_db_dma) {
+			dma_unmap_single(dwc->sysdev, mdwc->dummy_gsi_db_dma,
+					 sizeof(mdwc->dummy_gsi_db),
+					 DMA_FROM_DEVICE);
+			mdwc->dummy_gsi_db_dma = (dma_addr_t)NULL;
+		}
 		break;
 	case DWC3_GSI_EVT_BUF_CLEAR:
 		dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEAR\n");
@@ -2312,7 +2385,7 @@ static int dwc3_msm_update_bus_bw(struct dwc3_msm *mdwc, enum bus_vote bv)
 	 * from userspace.
 	 */
 	if (bv >= mdwc->bus_scale_table->num_usecases)
-		bv_index = BUS_VOTE_NOMINAL;
+		bv_index = mdwc->default_bus_vote;
 	else if (bv == BUS_VOTE_NONE)
 		bv_index = BUS_VOTE_NONE;
 
@@ -2542,7 +2615,7 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
 	if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH)
 		dwc3_msm_update_bus_bw(mdwc, BUS_VOTE_SVS);
 	else
-		dwc3_msm_update_bus_bw(mdwc, BUS_VOTE_NOMINAL);
+		dwc3_msm_update_bus_bw(mdwc, mdwc->default_bus_vote);
 
 	/* Vote for TCXO while waking up USB HSPHY */
 	ret = clk_prepare_enable(mdwc->xo_clk);
@@ -2748,7 +2821,7 @@ static void dwc3_resume_work(struct work_struct *w)
 			dwc->maximum_speed = USB_SPEED_HIGH;
 
 		if (mdwc->override_usb_speed &&
-				mdwc->override_usb_speed < dwc->maximum_speed) {
+			mdwc->override_usb_speed <= dwc->maximum_speed) {
 			dwc->maximum_speed = mdwc->override_usb_speed;
 			dwc->gadget.max_speed = dwc->maximum_speed;
 			dbg_event(0xFF, "override_speed",
@@ -3338,7 +3411,7 @@ static ssize_t bus_vote_store(struct device *dev,
 			&& (mdwc->max_rh_port_speed == USB_SPEED_HIGH))
 			bv = BUS_VOTE_SVS;
 		else
-			bv = BUS_VOTE_NOMINAL;
+			bv = mdwc->default_bus_vote;
 
 		dwc3_msm_update_bus_bw(mdwc, bv);
 	}
@@ -3558,6 +3631,10 @@ static int dwc3_msm_probe(struct platform_device *pdev)
 
 	mdwc->use_pdc_interrupts = of_property_read_bool(node,
 				"qcom,use-pdc-interrupts");
+
+	mdwc->gsi_io_coherency_disabled = of_property_read_bool(node,
+				"qcom,gsi-disable-io-coherency");
+
 	dwc3_set_notifier(&dwc3_msm_notify_event);
 
 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
@@ -3607,10 +3684,20 @@ static int dwc3_msm_probe(struct platform_device *pdev)
 		goto put_dwc3;
 	}
 
+	/* use default as nominal bus voting */
+	mdwc->default_bus_vote = BUS_VOTE_NOMINAL;
+	ret = of_property_read_u32(node, "qcom,default-bus-vote",
+			&mdwc->default_bus_vote);
+
 	mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
 	if (mdwc->bus_scale_table) {
 		mdwc->bus_perf_client =
 			msm_bus_scale_register_client(mdwc->bus_scale_table);
+
+		/* default_bus_vote is out of range, use nominal bus voting */
+		if (mdwc->default_bus_vote >=
+				mdwc->bus_scale_table->num_usecases)
+			mdwc->default_bus_vote = BUS_VOTE_NOMINAL;
 	}
 
 	dwc = platform_get_drvdata(mdwc->dwc3);
@@ -3855,7 +3942,7 @@ static int dwc3_msm_host_notifier(struct notifier_block *nb,
 			dev_dbg(mdwc->dev, "set core clk rate %ld\n",
 				mdwc->core_clk_rate);
 			mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
-			dwc3_msm_update_bus_bw(mdwc, BUS_VOTE_NOMINAL);
+			dwc3_msm_update_bus_bw(mdwc, mdwc->default_bus_vote);
 		}
 	}
 
@@ -4233,6 +4320,14 @@ static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
 
 	psy_type = get_psy_type(mdwc);
 	if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+		/*
+		 * Do not notify charger driver for any current and
+		 * bail out if suspend happened with float cable
+		 * connected
+		 */
+		if (mA == 2)
+			return 0;
+
 		if (!mA)
 			pval.intval = -ETIMEDOUT;
 		else
@@ -4243,11 +4338,11 @@ static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
 	if (mdwc->max_power == mA || psy_type != POWER_SUPPLY_TYPE_USB)
 		return 0;
 
-	dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
 	/* Set max current limit in uA */
 	pval.intval = 1000 * mA;
 
 set_prop:
+	dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
 	ret = power_supply_set_property(mdwc->usb_psy,
 				POWER_SUPPLY_PROP_SDP_CURRENT_MAX, &pval);
 	if (ret) {
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index fdc6e4e..8cced36 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -29,6 +29,7 @@
 #define PCI_DEVICE_ID_INTEL_BXT_M		0x1aaa
 #define PCI_DEVICE_ID_INTEL_APL			0x5aaa
 #define PCI_DEVICE_ID_INTEL_KBP			0xa2b0
+#define PCI_DEVICE_ID_INTEL_CMLH		0x02ee
 #define PCI_DEVICE_ID_INTEL_GLK			0x31aa
 #define PCI_DEVICE_ID_INTEL_CNPLP		0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH		0xa36e
@@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
 	  (kernel_ulong_t) &dwc3_pci_mrfld_properties, },
 
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
+	  (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
 	  (kernel_ulong_t) &dwc3_pci_intel_properties, },
 
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4777bde..0ca09f2 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -199,7 +199,7 @@ void dwc3_ep_inc_deq(struct dwc3_ep *dep)
 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep)
 {
 	int		fifo_size, mdwidth, max_packet = 1024;
-	int		tmp, mult = 1, size;
+	int		tmp, mult = 1, fifo_0_start;
 
 	if (!dwc->needs_fifo_resize || !dwc->tx_fifo_size)
 		return 0;
@@ -234,13 +234,11 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep)
 	fifo_size = DIV_ROUND_UP(tmp, mdwidth);
 	dep->fifo_depth = fifo_size;
 
-	size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
-	if (dwc3_is_usb31(dwc))
-		size = DWC31_GTXFIFOSIZ_TXFDEF(size);
-	else
-		size = DWC3_GTXFIFOSIZ_TXFDEF(size);
+	/* Check if TXFIFOs start at non-zero addr */
+	tmp = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
+	fifo_0_start = DWC3_GTXFIFOSIZ_TXFSTADDR(tmp);
 
-	fifo_size |= (size + (dwc->last_fifo_depth << 16));
+	fifo_size |= (fifo_0_start + (dwc->last_fifo_depth << 16));
 	if (dwc3_is_usb31(dwc))
 		dwc->last_fifo_depth += DWC31_GTXFIFOSIZ_TXFDEF(fifo_size);
 	else
@@ -360,8 +358,6 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
 	return ret;
 }
 
-static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
-
 /**
  * dwc3_send_gadget_ep_cmd - issue an endpoint command
  * @dep: the endpoint to which the command is going to be issued
@@ -400,20 +396,6 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
 		}
 	}
 
-	if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
-		int		needs_wakeup;
-
-		needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
-				dwc->link_state == DWC3_LINK_STATE_U2 ||
-				dwc->link_state == DWC3_LINK_STATE_U3);
-
-		if (unlikely(needs_wakeup)) {
-			ret = __dwc3_gadget_wakeup(dwc);
-			dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
-					ret);
-		}
-	}
-
 	dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
 	dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
 	dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
@@ -842,6 +824,21 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
 
 		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
 	}
+
+	if (dep->number == 1 && dwc->ep0state != EP0_SETUP_PHASE) {
+		unsigned int dir;
+
+		dbg_log_string("CTRLPEND", dwc->ep0state);
+		dir = !!dwc->ep0_expect_in;
+		if (dwc->ep0state == EP0_DATA_PHASE)
+			dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
+		else
+			dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+
+		dwc->eps[0]->trb_enqueue = 0;
+		dwc->eps[1]->trb_enqueue = 0;
+	}
+
 	dbg_log_string("DONE for %s(%d)", dep->name, dep->number);
 }
 
@@ -1852,72 +1849,6 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g)
 	return __dwc3_gadget_get_frame(dwc);
 }
 
-static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
-{
-	int			retries;
-
-	int			ret;
-	u32			reg;
-
-	u8			link_state;
-	u8			speed;
-
-	/*
-	 * According to the Databook Remote wakeup request should
-	 * be issued only when the device is in early suspend state.
-	 *
-	 * We can check that via USB Link State bits in DSTS register.
-	 */
-	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
-
-	speed = reg & DWC3_DSTS_CONNECTSPD;
-	if ((speed == DWC3_DSTS_SUPERSPEED) ||
-	    (speed == DWC3_DSTS_SUPERSPEED_PLUS))
-		return 0;
-
-	link_state = DWC3_DSTS_USBLNKST(reg);
-
-	switch (link_state) {
-	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
-	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
-	if (ret < 0) {
-		dev_err(dwc->dev, "failed to put link in Recovery\n");
-		return ret;
-	}
-
-	/* Recent versions do this automatically */
-	if (dwc->revision < DWC3_REVISION_194A) {
-		/* write zeroes to Link Change Request */
-		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
-		reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
-		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
-	}
-
-	/* poll until Link State changes to ON */
-	retries = 20000;
-
-	while (retries--) {
-		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
-
-		/* in HS, means ON */
-		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
-			break;
-	}
-
-	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
-		dev_err(dwc->dev, "failed to send remote wakeup\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 #define DWC3_PM_RESUME_RETRIES		20    /* Max Number of retries */
 #define DWC3_PM_RESUME_DELAY		100   /* 100 msec */
 
@@ -2254,7 +2185,18 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 			dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
 	}
 
+	/* pull-up disable: clear pending events without queueing bh */
+	dwc->pullups_connected = is_on;
+
+	disable_irq(dwc->irq);
+
+	/* prevent pending bh to run later */
+	flush_work(&dwc->bh_work);
+
 	spin_lock_irqsave(&dwc->lock, flags);
+	if (dwc->ep0state != EP0_SETUP_PHASE)
+		dbg_event(0xFF, "EP0 is not in SETUP phase\n", 0);
+
 	/*
 	 * If we are here after bus suspend notify otg state machine to
 	 * increment pm usage count of dwc to prevent pm_runtime_suspend
@@ -2265,6 +2207,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 
 	ret = dwc3_gadget_run_stop(dwc, is_on, false);
 	spin_unlock_irqrestore(&dwc->lock, flags);
+	enable_irq(dwc->irq);
 
 	pm_runtime_mark_last_busy(dwc->dev);
 	pm_runtime_put_autosuspend(dwc->dev);
@@ -3828,6 +3771,15 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
 	if (dwc->err_evt_seen)
 		return IRQ_HANDLED;
 
+	/* Controller is being halted, ignore the interrupts */
+	if (!dwc->pullups_connected) {
+		count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
+		count &= DWC3_GEVNTCOUNT_MASK;
+		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
+		dbg_event(0xFF, "NO_PULLUP", count);
+		return IRQ_HANDLED;
+	}
+
 	/*
 	 * With PCIe legacy interrupt, test shows that top-half irq handler can
 	 * be called again after HW interrupt deassertion. Check if bottom-half
@@ -4048,8 +4000,6 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
 	dwc3_disconnect_gadget(dwc);
 	__dwc3_gadget_stop(dwc);
 
-	synchronize_irq(dwc->irq_gadget);
-
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index aa6b399..22ca9c2 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -90,6 +90,7 @@ struct gadget_info {
 	struct usb_composite_driver composite;
 	struct usb_composite_dev cdev;
 	bool use_os_desc;
+	bool unbinding;
 	char b_vendor_code;
 	char qw_sign[OS_STRING_QW_SIGN_LEN];
 #ifdef CONFIG_USB_CONFIGFS_UEVENT
@@ -287,9 +288,12 @@ static int unregister_gadget(struct gadget_info *gi)
 	if (!gi->composite.gadget_driver.udc_name)
 		return -ENODEV;
 
+	gi->unbinding = true;
 	ret = usb_gadget_unregister_driver(&gi->composite.gadget_driver);
 	if (ret)
 		return ret;
+
+	gi->unbinding = false;
 	kfree(gi->composite.gadget_driver.udc_name);
 	gi->composite.gadget_driver.udc_name = NULL;
 	return 0;
@@ -326,6 +330,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
 			gi->composite.gadget_driver.udc_name = NULL;
 			goto err;
 		}
+		schedule_work(&gi->work);
 	}
 	mutex_unlock(&gi->lock);
 	return len;
@@ -1569,7 +1574,8 @@ static void android_disconnect(struct usb_gadget *gadget)
 	acc_disconnect();
 #endif
 	gi->connected = 0;
-	schedule_work(&gi->work);
+	if (!gi->unbinding)
+		schedule_work(&gi->work);
 	composite_disconnect(gadget);
 }
 #endif
@@ -1589,7 +1595,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
 	.suspend	= composite_suspend,
 	.resume		= composite_resume,
 
-	.max_speed	= USB_SPEED_SUPER,
+	.max_speed	= USB_SPEED_SUPER_PLUS,
 	.driver = {
 		.owner          = THIS_MODULE,
 		.name		= "configfs-gadget",
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 586f79e..54d6ccc 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -662,7 +662,7 @@ static ssize_t acc_write(struct file *fp, const char __user *buf,
 		req = 0;
 		ret = wait_event_interruptible(dev->write_wq,
 			((req = req_get(dev, &dev->tx_idle)) || !dev->online));
-		if (!dev->online || !dev->disconnected) {
+		if (!dev->online || dev->disconnected) {
 			pr_debug("acc_write dev->error\n");
 			r = -EIO;
 			break;
diff --git a/drivers/usb/gadget/function/f_ccid.c b/drivers/usb/gadget/function/f_ccid.c
index bdf46c7..daf907a 100644
--- a/drivers/usb/gadget/function/f_ccid.c
+++ b/drivers/usb/gadget/function/f_ccid.c
@@ -2,7 +2,7 @@
 /*
  * f_ccid.c -- CCID function Driver
  *
- * Copyright (c) 2011, 2013, 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2013, 2017, 2019 The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -433,18 +433,11 @@ static void ccid_function_disable(struct usb_function *f)
 	struct f_ccid *ccid_dev = func_to_ccid(f);
 	struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
 	struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
-	struct usb_request *req;
 
 	/* Disable endpoints */
 	usb_ep_disable(ccid_dev->notify);
 	usb_ep_disable(ccid_dev->in);
 	usb_ep_disable(ccid_dev->out);
-	/* Free endpoint related requests */
-	ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
-	if (!atomic_read(&bulk_dev->rx_req_busy))
-		ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
-	while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
-		ccid_request_free(req, ccid_dev->in);
 
 	ccid_dev->dtr_state = 0;
 	atomic_set(&ccid_dev->online, 0);
@@ -461,47 +454,7 @@ ccid_function_set_alt(struct usb_function *f, unsigned int intf,
 {
 	struct f_ccid *ccid_dev = func_to_ccid(f);
 	struct usb_composite_dev *cdev = f->config->cdev;
-	struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
-	struct usb_request *req;
 	int ret = 0;
-	int i;
-
-	ccid_dev->notify_req = ccid_request_alloc(ccid_dev->notify,
-			sizeof(struct usb_ccid_notification), GFP_ATOMIC);
-	if (IS_ERR(ccid_dev->notify_req)) {
-		pr_err("%s: unable to allocate memory for notify req\n",
-				__func__);
-		return PTR_ERR(ccid_dev->notify_req);
-	}
-	ccid_dev->notify_req->complete = ccid_notify_complete;
-	ccid_dev->notify_req->context = ccid_dev;
-
-	/* now allocate requests for our endpoints */
-	req = ccid_request_alloc(ccid_dev->out, BULK_OUT_BUFFER_SIZE,
-							GFP_ATOMIC);
-	if (IS_ERR(req)) {
-		pr_err("%s: unable to allocate memory for out req\n",
-				__func__);
-		ret = PTR_ERR(req);
-		goto free_notify;
-	}
-	req->complete = ccid_bulk_complete_out;
-	req->context = ccid_dev;
-	bulk_dev->rx_req = req;
-
-	for (i = 0; i < TX_REQ_MAX; i++) {
-		req = ccid_request_alloc(ccid_dev->in, BULK_IN_BUFFER_SIZE,
-				GFP_ATOMIC);
-		if (IS_ERR(req)) {
-			pr_err("%s: unable to allocate memory for in req\n",
-					__func__);
-			ret = PTR_ERR(req);
-			goto free_bulk_out;
-		}
-		req->complete = ccid_bulk_complete_in;
-		req->context = ccid_dev;
-		ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
-	}
 
 	/* choose the descriptors and enable endpoints */
 	ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->notify);
@@ -509,13 +462,13 @@ ccid_function_set_alt(struct usb_function *f, unsigned int intf,
 		ccid_dev->notify->desc = NULL;
 		pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
 				__func__, ccid_dev->notify->name, ret);
-		goto free_bulk_in;
+		return ret;
 	}
 	ret = usb_ep_enable(ccid_dev->notify);
 	if (ret) {
 		pr_err("%s: usb ep#%s enable failed, err#%d\n",
 				__func__, ccid_dev->notify->name, ret);
-		goto free_bulk_in;
+		return ret;
 	}
 	ccid_dev->notify->driver_data = ccid_dev;
 
@@ -555,19 +508,23 @@ ccid_function_set_alt(struct usb_function *f, unsigned int intf,
 disable_ep_notify:
 	usb_ep_disable(ccid_dev->notify);
 	ccid_dev->notify->driver_data = NULL;
-free_bulk_in:
-	while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
-		ccid_request_free(req, ccid_dev->in);
-free_bulk_out:
-	ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
-free_notify:
-	ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
 	return ret;
 }
 
 static void ccid_function_unbind(struct usb_configuration *c,
 					struct usb_function *f)
 {
+	struct f_ccid *ccid_dev = func_to_ccid(f);
+	struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+	struct usb_request *req;
+
+	/* Free endpoint related requests */
+	ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
+	if (!atomic_read(&bulk_dev->rx_req_busy))
+		ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
+	while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
+		ccid_request_free(req, ccid_dev->in);
+
 	usb_free_all_descriptors(f);
 }
 
@@ -577,7 +534,10 @@ static int ccid_function_bind(struct usb_configuration *c,
 	struct f_ccid *ccid_dev = func_to_ccid(f);
 	struct usb_ep *ep;
 	struct usb_composite_dev *cdev = c->cdev;
+	struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+	struct usb_request *req;
 	int ret = -ENODEV;
+	int i;
 
 	ccid_dev->ifc_id = usb_interface_id(c, f);
 	if (ccid_dev->ifc_id < 0) {
@@ -632,20 +592,66 @@ static int ccid_function_bind(struct usb_configuration *c,
 	ret = usb_assign_descriptors(f, ccid_fs_descs, ccid_hs_descs,
 						ccid_ss_descs, ccid_ss_descs);
 	if (ret)
-		goto ep_auto_out_fail;
+		goto assign_desc_fail;
 
 	pr_debug("%s: CCID %s Speed, IN:%s OUT:%s\n", __func__,
 			gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
 			ccid_dev->in->name, ccid_dev->out->name);
 
+	ccid_dev->notify_req = ccid_request_alloc(ccid_dev->notify,
+			sizeof(struct usb_ccid_notification), GFP_KERNEL);
+	if (IS_ERR(ccid_dev->notify_req)) {
+		pr_err("%s: unable to allocate memory for notify req\n",
+				__func__);
+		goto notify_alloc_fail;
+	}
+	ccid_dev->notify_req->complete = ccid_notify_complete;
+	ccid_dev->notify_req->context = ccid_dev;
+
+	/* now allocate requests for our endpoints */
+	req = ccid_request_alloc(ccid_dev->out, BULK_OUT_BUFFER_SIZE,
+							GFP_KERNEL);
+	if (IS_ERR(req)) {
+		pr_err("%s: unable to allocate memory for out req\n",
+				__func__);
+		ret = PTR_ERR(req);
+		goto out_alloc_fail;
+	}
+	req->complete = ccid_bulk_complete_out;
+	req->context = ccid_dev;
+	bulk_dev->rx_req = req;
+
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = ccid_request_alloc(ccid_dev->in, BULK_IN_BUFFER_SIZE,
+				GFP_KERNEL);
+		if (IS_ERR(req)) {
+			pr_err("%s: unable to allocate memory for in req\n",
+					__func__);
+			ret = PTR_ERR(req);
+			goto in_alloc_fail;
+		}
+		req->complete = ccid_bulk_complete_in;
+		req->context = ccid_dev;
+		ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
+	}
+
 	return 0;
 
-ep_auto_out_fail:
+in_alloc_fail:
+	ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
+out_alloc_fail:
+	ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
+notify_alloc_fail:
+	usb_free_all_descriptors(f);
+assign_desc_fail:
 	ccid_dev->out->driver_data = NULL;
 	ccid_dev->out = NULL;
-ep_auto_in_fail:
+ep_auto_out_fail:
 	ccid_dev->in->driver_data = NULL;
 	ccid_dev->in = NULL;
+ep_auto_in_fail:
+	ccid_dev->notify->driver_data = NULL;
+	ccid_dev->notify = NULL;
 
 	return ret;
 }
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index dc01feb..450a44d 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -7,6 +7,12 @@
 #include "f_gsi.h"
 #include "rndis.h"
 
+struct usb_gsi_debugfs {
+	struct dentry *debugfs_root;
+};
+
+static struct usb_gsi_debugfs debugfs;
+
 static bool qti_packet_debug;
 module_param(qti_packet_debug, bool, 0644);
 MODULE_PARM_DESC(qti_packet_debug, "Print QTI Packet's Raw Data");
@@ -28,8 +34,6 @@ static DEFINE_IDA(gsi_ida);
 static void gsi_inst_clean(struct gsi_opts *opts);
 static void gsi_rndis_ipa_reset_trigger(struct gsi_data_port *d_port);
 static int gsi_ctrl_send_notification(struct f_gsi *gsi);
-static int gsi_alloc_trb_buffer(struct f_gsi *gsi);
-static void gsi_free_trb_buffer(struct f_gsi *gsi);
 static struct gsi_ctrl_pkt *gsi_ctrl_pkt_alloc(unsigned int len, gfp_t flags);
 static void gsi_ctrl_pkt_free(struct gsi_ctrl_pkt *pkt);
 
@@ -38,7 +42,7 @@ static inline bool usb_gsi_remote_wakeup_allowed(struct usb_function *f)
 	bool remote_wakeup_allowed;
 	struct f_gsi *gsi = func_to_gsi(f);
 
-	if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+	if (f->config->cdev->gadget->speed >= USB_SPEED_SUPER)
 		remote_wakeup_allowed = f->func_wakeup_allowed;
 	else
 		remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
@@ -157,7 +161,7 @@ static int gsi_wakeup_host(struct f_gsi *gsi)
 	 * allowed to do so by the host. This is done in order to support non
 	 * fully USB 3.0 compatible hosts.
 	 */
-	if ((gadget->speed == USB_SPEED_SUPER) && (func->func_is_suspended)) {
+	if ((gadget->speed >= USB_SPEED_SUPER) && (func->func_is_suspended)) {
 		log_event_dbg("%s: Calling usb_func_wakeup", __func__);
 		ret = usb_func_wakeup(func);
 	} else {
@@ -173,6 +177,212 @@ static int gsi_wakeup_host(struct f_gsi *gsi)
 	return ret;
 }
 
+static void gsi_rw_timer_func(struct timer_list *t)
+{
+	struct f_gsi *gsi = from_timer(gsi, t, gsi_rw_timer);
+
+	if (!atomic_read(&gsi->connected)) {
+		log_event_dbg("%s: gsi not connected.. bail-out\n", __func__);
+		gsi->debugfs_rw_timer_enable = 0;
+		return;
+	}
+
+	log_event_dbg("%s: calling gsi_wakeup_host\n", __func__);
+	gsi_wakeup_host(gsi);
+
+	if (gsi->debugfs_rw_timer_enable) {
+		log_event_dbg("%s: re-arm the timer\n", __func__);
+		mod_timer(&gsi->gsi_rw_timer,
+			jiffies + msecs_to_jiffies(gsi->gsi_rw_timer_interval));
+	}
+}
+
+static struct f_gsi *get_connected_gsi(void)
+{
+	struct f_gsi *connected_gsi;
+	bool gsi_connected = false;
+	int i;
+
+	for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++) {
+		if (inst_status[i].opts)
+			connected_gsi = inst_status[i].opts->gsi;
+		else
+			continue;
+
+		if (connected_gsi && atomic_read(&connected_gsi->connected)) {
+			gsi_connected = true;
+			break;
+		}
+	}
+
+	if (!gsi_connected)
+		connected_gsi = NULL;
+
+	return connected_gsi;
+}
+
+#define DEFAULT_RW_TIMER_INTERVAL 500 /* in ms */
+static ssize_t usb_gsi_rw_write(struct file *file,
+			const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct f_gsi *gsi;
+	u8 input;
+	int ret;
+
+	gsi = get_connected_gsi();
+	if (!gsi) {
+		log_event_dbg("%s: gsi not connected\n", __func__);
+		goto err;
+	}
+
+	if (ubuf == NULL) {
+		log_event_dbg("%s: buffer is Null.\n", __func__);
+		goto err;
+	}
+
+	ret = kstrtou8_from_user(ubuf, count, 0, &input);
+	if (ret) {
+		log_event_err("%s: Invalid value. err:%d\n", __func__, ret);
+		goto err;
+	}
+
+	if (gsi->debugfs_rw_timer_enable == !!input) {
+		if (!!input)
+			log_event_dbg("%s: RW already enabled\n", __func__);
+		else
+			log_event_dbg("%s: RW already disabled\n", __func__);
+		goto err;
+	}
+
+	gsi->debugfs_rw_timer_enable = !!input;
+
+	if (gsi->debugfs_rw_timer_enable) {
+		mod_timer(&gsi->gsi_rw_timer, jiffies +
+			  msecs_to_jiffies(gsi->gsi_rw_timer_interval));
+		log_event_dbg("%s: timer initialized\n", __func__);
+	} else {
+		del_timer_sync(&gsi->gsi_rw_timer);
+		log_event_dbg("%s: timer deleted\n", __func__);
+	}
+
+err:
+	return count;
+}
+
+static int usb_gsi_rw_show(struct seq_file *s, void *unused)
+{
+
+	struct f_gsi *gsi;
+
+	gsi = get_connected_gsi();
+	if (!gsi) {
+		log_event_dbg("%s: gsi not connected\n", __func__);
+		return 0;
+	}
+
+	seq_printf(s, "%d\n", gsi->debugfs_rw_timer_enable);
+
+	return 0;
+}
+
+static int usb_gsi_rw_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, usb_gsi_rw_show, inode->i_private);
+}
+
+static const struct file_operations fops_usb_gsi_rw = {
+	.open = usb_gsi_rw_open,
+	.read = seq_read,
+	.write = usb_gsi_rw_write,
+	.owner = THIS_MODULE,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static ssize_t usb_gsi_rw_timer_write(struct file *file,
+			const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct f_gsi *gsi;
+	u16 timer_val;
+	int ret;
+
+	gsi = get_connected_gsi();
+	if (!gsi) {
+		log_event_dbg("%s: gsi not connected\n", __func__);
+		goto err;
+	}
+
+	if (ubuf == NULL) {
+		log_event_dbg("%s: buffer is NULL.\n", __func__);
+		goto err;
+	}
+
+	ret = kstrtou16_from_user(ubuf, count, 0, &timer_val);
+	if (ret) {
+		log_event_err("%s: Invalid value. err:%d\n", __func__, ret);
+		goto err;
+	}
+
+	if (timer_val <= 0 || timer_val >  10000) {
+		log_event_err("%s: value must be > 0 and < 10000.\n", __func__);
+		goto err;
+	}
+
+	gsi->gsi_rw_timer_interval = timer_val;
+err:
+	return count;
+}
+
+static int usb_gsi_rw_timer_show(struct seq_file *s, void *unused)
+{
+	struct f_gsi *gsi;
+
+	gsi = get_connected_gsi();
+	if (!gsi) {
+		log_event_dbg("%s: gsi not connected\n", __func__);
+		return 0;
+	}
+
+	seq_printf(s, "%ums\n", gsi->gsi_rw_timer_interval);
+
+	return 0;
+}
+
+static int usb_gsi_rw_timer_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, usb_gsi_rw_timer_show, inode->i_private);
+}
+
+static const struct file_operations fops_usb_gsi_rw_timer = {
+	.open = usb_gsi_rw_timer_open,
+	.read = seq_read,
+	.write = usb_gsi_rw_timer_write,
+	.owner = THIS_MODULE,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int usb_gsi_debugfs_init(void)
+{
+	debugfs.debugfs_root = debugfs_create_dir("usb_gsi", NULL);
+	if (!debugfs.debugfs_root)
+		return -ENOMEM;
+
+	debugfs_create_file("remote_wakeup_enable", 0600,
+					debugfs.debugfs_root,
+					inst_status, &fops_usb_gsi_rw);
+	debugfs_create_file("remote_wakeup_interval", 0600,
+					debugfs.debugfs_root,
+					inst_status,
+					&fops_usb_gsi_rw_timer);
+	return 0;
+}
+
+static void usb_gsi_debugfs_exit(void)
+{
+	debugfs_remove_recursive(debugfs.debugfs_root);
+}
+
 /*
  * Callback for when when network interface is up
  * and userspace is ready to answer DHCP requests,  or remote wakeup
@@ -274,7 +484,9 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
 	struct ipa_req_chan_out_params ipa_in_channel_out_params;
 	struct ipa_req_chan_out_params ipa_out_channel_out_params;
 
-	log_event_dbg("%s: USB GSI IN OPS", __func__);
+	log_event_dbg("IN: num_bufs:=%zu, buf_len=%zu\n",
+		d_port->in_request.num_bufs, d_port->in_request.buf_len);
+
 	usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
 		GSI_EP_OP_PREPARE_TRBS);
 	usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
@@ -318,7 +530,9 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
 		gsi_channel_info.depcmd_hi_addr;
 
 	if (d_port->out_ep) {
-		log_event_dbg("%s: USB GSI OUT OPS", __func__);
+		log_event_dbg("OUT: num_bufs:=%zu, buf_len=%zu\n",
+			d_port->out_request.num_bufs,
+			d_port->out_request.buf_len);
 		usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
 			GSI_EP_OP_PREPARE_TRBS);
 		usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
@@ -384,7 +598,7 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
 	conn_params->teth_prot_params.max_packet_number_to_dev =
 		DEFAULT_MAX_PKT_PER_XFER;
 	conn_params->max_supported_bandwidth_mbps =
-		(cdev->gadget->speed == USB_SPEED_SUPER) ? 3600 : 400;
+		(cdev->gadget->speed >= USB_SPEED_SUPER) ? 3600 : 400;
 
 	memset(&ipa_in_channel_out_params, 0x0,
 				sizeof(ipa_in_channel_out_params));
@@ -509,9 +723,6 @@ static void ipa_disconnect_channel(struct gsi_data_port *d_port)
 	if (gsi->d_port.out_ep)
 		usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
 							GSI_EP_OP_FREE_TRBS);
-
-	/* free buffers allocated with each TRB */
-	gsi_free_trb_buffer(gsi);
 }
 
 static int ipa_suspend_work_handler(struct gsi_data_port *d_port)
@@ -651,7 +862,8 @@ static void ipa_work_handler(struct work_struct *w)
 			/* Configure EPs for GSI */
 			ret = gsi_ep_enable(gsi);
 			if (ret) {
-				log_event_err("%s:ep enable err %d", __func__);
+				log_event_err("%s:ep enable err %d", __func__,
+					ret);
 				usb_composite_setup_continue(gsi->d_port.cdev);
 				usb_gadget_autopm_put_async(d_port->gadget);
 				break;
@@ -670,13 +882,6 @@ static void ipa_work_handler(struct work_struct *w)
 				break;
 			}
 
-			/* allocate buffers used with each TRB */
-			ret = gsi_alloc_trb_buffer(gsi);
-			if (ret) {
-				log_event_err("%s: gsi_alloc_trb_failed\n",
-								__func__);
-				break;
-			}
 			ipa_connect_channels(d_port);
 			d_port->sm_state = STATE_WAIT_FOR_IPA_RDY;
 			log_event_dbg("%s: ST_INIT_EVT_SET_ALT",
@@ -815,13 +1020,6 @@ static void ipa_work_handler(struct work_struct *w)
 			usb_gadget_autopm_get(d_port->gadget);
 			log_event_dbg("%s: get = %d", __func__,
 				atomic_read(&gad_dev->power.usage_count));
-			/* allocate buffers used with each TRB */
-			ret = gsi_alloc_trb_buffer(gsi);
-			if (ret) {
-				log_event_err("%s: gsi_alloc_trb_failed\n",
-								__func__);
-				break;
-			}
 
 			ipa_connect_channels(d_port);
 			ipa_data_path_enable(d_port);
@@ -1446,7 +1644,7 @@ static const struct file_operations gsi_ctrl_dev_fops = {
 /* peak (theoretical) bulk transfer rate in bits-per-second */
 static unsigned int gsi_xfer_bitrate(struct usb_gadget *g)
 {
-	if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+	if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER)
 		return 13 * 1024 * 8 * 1000 * 8;
 	else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
 		return 13 * 512 * 8 * 1000 * 8;
@@ -2047,106 +2245,6 @@ static int gsi_get_alt(struct usb_function *f, unsigned int intf)
 	return -EINVAL;
 }
 
-static int gsi_alloc_trb_buffer(struct f_gsi *gsi)
-{
-	u32 len_in = 0, len_out = 0;
-	int ret = 0;
-	struct device *dev;
-
-	log_event_dbg("allocate trb's buffer\n");
-
-	dev = gsi->d_port.gadget->dev.parent;
-	if (gsi->d_port.in_ep && !gsi->d_port.in_request.buf_base_addr) {
-		log_event_dbg("IN: num_bufs:=%zu, buf_len=%zu\n",
-			gsi->d_port.in_request.num_bufs,
-			gsi->d_port.in_request.buf_len);
-
-		len_in = gsi->d_port.in_request.buf_len *
-				gsi->d_port.in_request.num_bufs;
-		gsi->d_port.in_request.buf_base_addr =
-			dma_zalloc_coherent(dev->parent,
-			len_in, &gsi->d_port.in_request.dma, GFP_KERNEL);
-		if (!gsi->d_port.in_request.buf_base_addr) {
-			dev_err(&gsi->d_port.gadget->dev,
-					"IN buf_base_addr allocate failed %s\n",
-					gsi->function.name);
-			ret = -ENOMEM;
-			goto fail1;
-		}
-
-		dma_get_sgtable(dev->parent,
-			&gsi->d_port.in_request.sgt_data_buff,
-			gsi->d_port.in_request.buf_base_addr,
-			gsi->d_port.in_request.dma, len_in);
-	}
-
-	if (gsi->d_port.out_ep && !gsi->d_port.out_request.buf_base_addr) {
-		log_event_dbg("OUT: num_bufs:=%zu, buf_len=%zu\n",
-			gsi->d_port.out_request.num_bufs,
-			gsi->d_port.out_request.buf_len);
-
-		len_out = gsi->d_port.out_request.buf_len *
-				gsi->d_port.out_request.num_bufs;
-		gsi->d_port.out_request.buf_base_addr =
-			dma_zalloc_coherent(dev->parent,
-			len_out, &gsi->d_port.out_request.dma, GFP_KERNEL);
-		if (!gsi->d_port.out_request.buf_base_addr) {
-			dev_err(&gsi->d_port.gadget->dev,
-					"OUT buf_base_addr allocate failed %s\n",
-					gsi->function.name);
-			ret = -ENOMEM;
-			goto fail;
-		}
-
-		dma_get_sgtable(dev->parent,
-			&gsi->d_port.out_request.sgt_data_buff,
-			gsi->d_port.out_request.buf_base_addr,
-			gsi->d_port.out_request.dma, len_out);
-	}
-
-	log_event_dbg("finished allocating trb's buffer\n");
-	return ret;
-
-fail:
-	if (len_in && gsi->d_port.in_request.buf_base_addr) {
-		dma_free_coherent(dev->parent, len_in,
-				gsi->d_port.in_request.buf_base_addr,
-				gsi->d_port.in_request.dma);
-		gsi->d_port.in_request.buf_base_addr = NULL;
-	}
-fail1:
-	return ret;
-}
-
-static void gsi_free_trb_buffer(struct f_gsi *gsi)
-{
-	u32 len;
-
-	log_event_dbg("freeing trb's buffer\n");
-
-	if (gsi->d_port.out_ep &&
-			gsi->d_port.out_request.buf_base_addr) {
-		len = gsi->d_port.out_request.buf_len *
-			gsi->d_port.out_request.num_bufs;
-		dma_free_coherent(gsi->d_port.gadget->dev.parent->parent, len,
-			gsi->d_port.out_request.buf_base_addr,
-			gsi->d_port.out_request.dma);
-		gsi->d_port.out_request.buf_base_addr = NULL;
-		sg_free_table(&gsi->d_port.out_request.sgt_data_buff);
-	}
-
-	if (gsi->d_port.in_ep &&
-			gsi->d_port.in_request.buf_base_addr) {
-		len = gsi->d_port.in_request.buf_len *
-			gsi->d_port.in_request.num_bufs;
-		dma_free_coherent(gsi->d_port.gadget->dev.parent->parent, len,
-			gsi->d_port.in_request.buf_base_addr,
-			gsi->d_port.in_request.dma);
-		gsi->d_port.in_request.buf_base_addr = NULL;
-		sg_free_table(&gsi->d_port.in_request.sgt_data_buff);
-	}
-}
-
 static int gsi_set_alt(struct usb_function *f, unsigned int intf,
 						unsigned int alt)
 {
@@ -2348,7 +2446,7 @@ static void gsi_resume(struct usb_function *f)
 	 * If the function is in USB3 Function Suspend state, resume is
 	 * canceled. In this case resume is done by a Function Resume request.
 	 */
-	if ((cdev->gadget->speed == USB_SPEED_SUPER) &&
+	if ((cdev->gadget->speed >= USB_SPEED_SUPER) &&
 		f->func_is_suspended)
 		return;
 
@@ -2962,6 +3060,7 @@ static void gsi_unbind(struct usb_configuration *c, struct usb_function *f)
 {
 	struct f_gsi *gsi = func_to_gsi(f);
 
+	log_event_dbg("%s:id:%d: dwq start", __func__, gsi->prot_id);
 	/*
 	 * Use drain_workqueue to accomplish below conditions:
 	 * 1. Make sure that any running work completed
@@ -2971,6 +3070,8 @@ static void gsi_unbind(struct usb_configuration *c, struct usb_function *f)
 	 * with ipa driver shall not fail due to unexpected state.
 	 */
 	drain_workqueue(gsi->d_port.ipa_usb_wq);
+	log_event_dbg("%s:id:%d: dwq end", __func__, gsi->prot_id);
+
 	ipa_usb_deinit_teth_prot(gsi->prot_id);
 
 	if (gsi->prot_id == IPA_USB_RNDIS) {
@@ -3041,8 +3142,6 @@ static int gsi_bind_config(struct f_gsi *gsi)
 	gsi->function.func_suspend = gsi_func_suspend;
 	gsi->function.resume = gsi_resume;
 
-	INIT_DELAYED_WORK(&gsi->d_port.usb_ipa_w, ipa_work_handler);
-
 	return status;
 }
 
@@ -3067,6 +3166,8 @@ static struct f_gsi *gsi_function_init(enum ipa_usb_teth_prot prot_id)
 
 	init_waitqueue_head(&gsi->d_port.wait_for_ipa_ready);
 
+	INIT_DELAYED_WORK(&gsi->d_port.usb_ipa_w, ipa_work_handler);
+
 	gsi->d_port.in_channel_handle = -EINVAL;
 	gsi->d_port.out_channel_handle = -EINVAL;
 
@@ -3079,6 +3180,8 @@ static struct f_gsi *gsi_function_init(enum ipa_usb_teth_prot prot_id)
 		kfree(gsi);
 		goto error;
 	}
+	gsi->gsi_rw_timer_interval = DEFAULT_RW_TIMER_INTERVAL;
+	timer_setup(&gsi->gsi_rw_timer, gsi_rw_timer_func, 0);
 
 	return gsi;
 error:
@@ -3469,6 +3572,7 @@ static int fgsi_init(void)
 
 	major = MAJOR(dev);
 
+	usb_gsi_debugfs_init();
 	return usb_function_register(&gsiusb_func);
 }
 module_init(fgsi_init);
@@ -3485,5 +3589,6 @@ static void __exit fgsi_exit(void)
 	}
 
 	class_destroy(gsi_class);
+	usb_gsi_debugfs_exit();
 }
 module_exit(fgsi_exit);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index 5c629e0..be1d2e1 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -18,6 +18,7 @@
 #include <linux/debugfs.h>
 #include <linux/ipa_usb.h>
 #include <linux/ipc_logging.h>
+#include <linux/timer.h>
 
 #define GSI_RMNET_CTRL_NAME "rmnet_ctrl"
 #define GSI_MBIM_CTRL_NAME "android_mbim"
@@ -274,6 +275,12 @@ struct f_gsi {
 	struct gsi_ctrl_port c_port;
 	void *ipc_log_ctxt;
 	bool rmnet_dtr_status;
+
+	/* To test remote wakeup using debugfs */
+	struct timer_list gsi_rw_timer;
+	u8 debugfs_rw_timer_enable;
+	u16 gsi_rw_timer_interval;
+
 };
 
 static inline struct f_gsi *func_to_gsi(struct usb_function *f)
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 0e3a582..d7805e7 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -32,6 +32,7 @@
 #include <linux/file.h>
 #include <linux/device.h>
 #include <linux/miscdevice.h>
+#include <linux/ipc_logging.h>
 
 #include <linux/usb.h>
 #include <linux/usb_usual.h>
@@ -42,6 +43,18 @@
 
 #include "configfs.h"
 
+#define NUM_PAGES	10 /* # of pages for ipc logging */
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define mtp_log(fmt, ...) do { \
+	ipc_log_string(_mtp_ipc_log, "%s: " fmt,  __func__, ##__VA_ARGS__); \
+	dynamic_pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+#else
+#define mtp_log(fmt, ...) \
+	ipc_log_string(_mtp_ipc_log, "%s: " fmt,  __func__, ##__VA_ARGS__)
+#endif
+
 #define MTP_RX_BUFFER_INIT_SIZE    1048576
 #define MTP_TX_BUFFER_INIT_SIZE    1048576
 #define MTP_BULK_BUFFER_SIZE       16384
@@ -140,6 +153,8 @@ struct mtp_dev {
 	struct mutex  read_mutex;
 };
 
+static void *_mtp_ipc_log;
+
 static struct usb_interface_descriptor mtp_interface_desc = {
 	.bLength                = USB_DT_INTERFACE_SIZE,
 	.bDescriptorType        = USB_DT_INTERFACE,
@@ -499,32 +514,32 @@ static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
 	struct usb_ep *ep;
 	int i;
 
-	DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
+	mtp_log("dev: %pK\n", dev);
 
 	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
 	if (!ep) {
-		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		mtp_log("usb_ep_autoconfig for ep_in failed\n");
 		return -ENODEV;
 	}
-	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	mtp_log("usb_ep_autoconfig for ep_in got %s\n", ep->name);
 	ep->driver_data = dev;		/* claim the endpoint */
 	dev->ep_in = ep;
 
 	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
 	if (!ep) {
-		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		mtp_log("usb_ep_autoconfig for ep_out failed\n");
 		return -ENODEV;
 	}
-	DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+	mtp_log("usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
 	ep->driver_data = dev;		/* claim the endpoint */
 	dev->ep_out = ep;
 
 	ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
 	if (!ep) {
-		DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
+		mtp_log("usb_ep_autoconfig for ep_intr failed\n");
 		return -ENODEV;
 	}
-	DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
+	mtp_log("usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
 	ep->driver_data = dev;		/* claim the endpoint */
 	dev->ep_intr = ep;
 
@@ -592,10 +607,10 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
 	ssize_t r = count, xfer, len;
 	int ret = 0;
 
-	DBG(cdev, "%s(%zu) state:%d\n", __func__, count, dev->state);
+	mtp_log("(%zu) state:%d\n", count, dev->state);
 
 	/* we will block until we're online */
-	DBG(cdev, "mtp_read: waiting for online state\n");
+	mtp_log("waiting for online state\n");
 	ret = wait_event_interruptible(dev->read_wq,
 		dev->state != STATE_OFFLINE);
 	if (ret < 0) {
@@ -652,7 +667,7 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
 		r = -EIO;
 		goto done;
 	} else {
-		DBG(cdev, "rx %pK queue\n", req);
+		mtp_log("rx %pK queue\n", req);
 	}
 
 	/* wait for a request to complete */
@@ -678,7 +693,7 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
 		if (req->actual == 0)
 			goto requeue_req;
 
-		DBG(cdev, "rx %pK %d\n", req, req->actual);
+		mtp_log("rx %pK %d\n", req, req->actual);
 		xfer = (req->actual < count) ? req->actual : count;
 		r = xfer;
 		if (copy_to_user(buf, req->buf, xfer))
@@ -695,7 +710,7 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
 		dev->state = STATE_READY;
 	spin_unlock_irq(&dev->lock);
 
-	DBG(cdev, "%s returning %zd state:%d\n", __func__, r, dev->state);
+	mtp_log("returning %zd state:%d\n", r, dev->state);
 	return r;
 }
 
@@ -703,14 +718,13 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
 	size_t count, loff_t *pos)
 {
 	struct mtp_dev *dev = fp->private_data;
-	struct usb_composite_dev *cdev = dev->cdev;
 	struct usb_request *req = 0;
 	ssize_t r = count;
 	unsigned xfer;
 	int sendZLP = 0;
 	int ret;
 
-	DBG(cdev, "%s(%zu) state:%d\n", __func__, count, dev->state);
+	mtp_log("(%zu) state:%d\n", count, dev->state);
 
 	spin_lock_irq(&dev->lock);
 	if (dev->state == STATE_CANCELED) {
@@ -738,7 +752,7 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
 			sendZLP = 0;
 
 		if (dev->state != STATE_BUSY) {
-			DBG(cdev, "mtp_write dev->error\n");
+			mtp_log("dev->error\n");
 			r = -EIO;
 			break;
 		}
@@ -749,8 +763,8 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
 			((req = mtp_req_get(dev, &dev->tx_idle))
 				|| dev->state != STATE_BUSY));
 		if (!req) {
-			DBG(cdev, "%s request NULL ret:%d state:%d\n",
-				__func__, ret, dev->state);
+			mtp_log("request NULL ret:%d state:%d\n",
+				ret, dev->state);
 			r = ret;
 			break;
 		}
@@ -767,7 +781,7 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
 		req->length = xfer;
 		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
 		if (ret < 0) {
-			DBG(cdev, "mtp_write: xfer error %d\n", ret);
+			mtp_log("xfer error %d\n", ret);
 			r = -EIO;
 			break;
 		}
@@ -789,7 +803,7 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
 		dev->state = STATE_READY;
 	spin_unlock_irq(&dev->lock);
 
-	DBG(cdev, "%s returning %zd state:%d\n", __func__, r, dev->state);
+	mtp_log("returning %zd state:%d\n", r, dev->state);
 	return r;
 }
 
@@ -798,7 +812,6 @@ static void send_file_work(struct work_struct *data)
 {
 	struct mtp_dev *dev = container_of(data, struct mtp_dev,
 						send_file_work);
-	struct usb_composite_dev *cdev = dev->cdev;
 	struct usb_request *req = 0;
 	struct mtp_data_header *header;
 	struct file *filp;
@@ -815,7 +828,7 @@ static void send_file_work(struct work_struct *data)
 	offset = dev->xfer_file_offset;
 	count = dev->xfer_file_length;
 
-	DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+	mtp_log("(%lld %lld)\n", offset, count);
 
 	if (dev->xfer_send_header) {
 		hdr_size = sizeof(struct mtp_data_header);
@@ -845,8 +858,7 @@ static void send_file_work(struct work_struct *data)
 			break;
 		}
 		if (!req) {
-			DBG(cdev,
-				"%s request NULL ret:%d state:%d\n", __func__,
+			mtp_log("request NULL ret:%d state:%d\n",
 				ret, dev->state);
 			r = ret;
 			break;
@@ -889,7 +901,7 @@ static void send_file_work(struct work_struct *data)
 		req->length = xfer;
 		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
 		if (ret < 0) {
-			DBG(cdev, "send_file_work: xfer error %d\n", ret);
+			mtp_log("xfer error %d\n", ret);
 			if (dev->state != STATE_OFFLINE)
 				dev->state = STATE_ERROR;
 			r = -EIO;
@@ -905,7 +917,7 @@ static void send_file_work(struct work_struct *data)
 	if (req)
 		mtp_req_put(dev, &dev->tx_idle, req);
 
-	DBG(cdev, "%s returning %d state:%d\n", __func__, r, dev->state);
+	mtp_log("returning %d state:%d\n", r, dev->state);
 	/* write the result */
 	dev->xfer_result = r;
 	smp_wmb();
@@ -916,7 +928,6 @@ static void receive_file_work(struct work_struct *data)
 {
 	struct mtp_dev *dev = container_of(data, struct mtp_dev,
 						receive_file_work);
-	struct usb_composite_dev *cdev = dev->cdev;
 	struct usb_request *read_req = NULL, *write_req = NULL;
 	struct file *filp;
 	loff_t offset;
@@ -931,9 +942,9 @@ static void receive_file_work(struct work_struct *data)
 	offset = dev->xfer_file_offset;
 	count = dev->xfer_file_length;
 
-	DBG(cdev, "receive_file_work(%lld)\n", count);
+	mtp_log("(%lld)\n", count);
 	if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
-		DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
+		mtp_log("- count(%lld) not multiple of mtu(%d)\n",
 						count, dev->ep_out->maxpacket);
 
 	while (count > 0 || write_req) {
@@ -963,7 +974,7 @@ static void receive_file_work(struct work_struct *data)
 		}
 
 		if (write_req) {
-			DBG(cdev, "rx %pK %d\n", write_req, write_req->actual);
+			mtp_log("rx %pK %d\n", write_req, write_req->actual);
 			start_time = ktime_get();
 			mutex_lock(&dev->read_mutex);
 			if (dev->state == STATE_OFFLINE) {
@@ -973,7 +984,7 @@ static void receive_file_work(struct work_struct *data)
 			}
 			ret = vfs_write(filp, write_req->buf, write_req->actual,
 				&offset);
-			DBG(cdev, "vfs_write %d\n", ret);
+			mtp_log("vfs_write %d\n", ret);
 			if (ret != write_req->actual) {
 				r = -EIO;
 				mutex_unlock(&dev->read_mutex);
@@ -1031,7 +1042,7 @@ static void receive_file_work(struct work_struct *data)
 				 * short packet is used to signal EOF for
 				 * sizes > 4 gig
 				 */
-				DBG(cdev, "got short packet\n");
+				mtp_log("got short packet\n");
 				count = 0;
 			}
 
@@ -1041,7 +1052,7 @@ static void receive_file_work(struct work_struct *data)
 		}
 	}
 
-	DBG(cdev, "receive_file_work returning %d\n", r);
+	mtp_log("returning %d\n", r);
 	/* write the result */
 	dev->xfer_result = r;
 	smp_wmb();
@@ -1053,7 +1064,7 @@ static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
 	int ret;
 	int length = event->length;
 
-	DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
+	mtp_log("(%zu)\n", event->length);
 
 	if (length < 0 || length > INTR_BUFFER_SIZE)
 		return -EINVAL;
@@ -1087,7 +1098,7 @@ static long mtp_send_receive_ioctl(struct file *fp, unsigned int code,
 	int ret = -EINVAL;
 
 	if (mtp_lock(&dev->ioctl_excl)) {
-		DBG(dev->cdev, "ioctl returning EBUSY state:%d\n", dev->state);
+		mtp_log("ioctl returning EBUSY state:%d\n", dev->state);
 		return -EBUSY;
 	}
 
@@ -1155,7 +1166,7 @@ static long mtp_send_receive_ioctl(struct file *fp, unsigned int code,
 	spin_unlock_irq(&dev->lock);
 out:
 	mtp_unlock(&dev->ioctl_excl);
-	DBG(dev->cdev, "ioctl returning %d\n", ret);
+	mtp_log("ioctl returning %d\n", ret);
 	return ret;
 }
 
@@ -1189,7 +1200,7 @@ static long mtp_ioctl(struct file *fp, unsigned int code, unsigned long value)
 		mtp_unlock(&dev->ioctl_excl);
 	break;
 	default:
-		DBG(dev->cdev, "unknown ioctl code: %d\n", code);
+		mtp_log("unknown ioctl code: %d\n", code);
 	}
 fail:
 	return ret;
@@ -1229,7 +1240,7 @@ static long compat_mtp_ioctl(struct file *fp, unsigned int code,
 		cmd = MTP_SEND_EVENT;
 		break;
 	default:
-		DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code);
+		mtp_log("unknown compat_ioctl code: %d\n", code);
 		ret = -ENOIOCTLCMD;
 		goto fail;
 	}
@@ -1319,8 +1330,7 @@ static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
 	u16	w_length = le16_to_cpu(ctrl->wLength);
 	unsigned long	flags;
 
-	VDBG(cdev, "mtp_ctrlrequest "
-			"%02x.%02x v%04x i%04x l%u\n",
+	mtp_log("%02x.%02x v%04x i%04x l%u\n",
 			ctrl->bRequestType, ctrl->bRequest,
 			w_value, w_index, w_length);
 
@@ -1335,7 +1345,7 @@ static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
 		memcpy(cdev->req->buf, mtp_os_string, value);
 	} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
 		/* Handle MTP OS descriptor */
-		DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
+		mtp_log("vendor request: %d index: %d value: %d length: %d\n",
 			ctrl->bRequest, w_index, w_value, w_length);
 
 		if (ctrl->bRequest == 1
@@ -1353,12 +1363,12 @@ static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
 			}
 		}
 	} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
-		DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+		mtp_log("class request: %d index: %d value: %d length: %d\n",
 			ctrl->bRequest, w_index, w_value, w_length);
 
 		if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
 				&& w_value == 0) {
-			DBG(cdev, "MTP_REQ_CANCEL\n");
+			mtp_log("MTP_REQ_CANCEL\n");
 
 			spin_lock_irqsave(&dev->lock, flags);
 			if (dev->state == STATE_BUSY) {
@@ -1380,7 +1390,7 @@ static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
 			status->wLength =
 				__constant_cpu_to_le16(sizeof(*status));
 
-			DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+			mtp_log("MTP_REQ_GET_DEVICE_STATUS\n");
 			spin_lock_irqsave(&dev->lock, flags);
 			/* device status is "busy" until we report
 			 * the cancelation to userspace
@@ -1404,7 +1414,7 @@ static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
 		cdev->req->length = value;
 		rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
 		if (rc < 0)
-			ERROR(cdev, "%s: response queue error\n", __func__);
+			pr_err("%s: response queue error\n", __func__);
 	}
 	return value;
 }
@@ -1419,7 +1429,7 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
 	struct mtp_instance *fi_mtp;
 
 	dev->cdev = cdev;
-	DBG(cdev, "%s dev: %pK\n", __func__, dev);
+	mtp_log("dev: %pK\n", dev);
 
 	/* allocate interface ID(s) */
 	id = usb_interface_id(c, f);
@@ -1474,7 +1484,7 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
 	}
 
 	fi_mtp->func_inst.f = &dev->function;
-	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+	mtp_log("%s speed %s: IN/%s, OUT/%s\n",
 		gadget_is_superspeed(c->cdev->gadget) ? "super" :
 		(gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
 		f->name, dev->ep_in->name, dev->ep_out->name);
@@ -1490,6 +1500,7 @@ mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
 	int i;
 	fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
 	mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
+	mtp_log("dev: %pK\n", dev);
 	mutex_lock(&dev->read_mutex);
 	while ((req = mtp_req_get(dev, &dev->tx_idle)))
 		mtp_request_free(req, dev->ep_in);
@@ -1514,7 +1525,7 @@ static int mtp_function_set_alt(struct usb_function *f,
 	struct usb_composite_dev *cdev = f->config->cdev;
 	int ret;
 
-	DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
+	mtp_log("%d alt: %d\n", intf, alt);
 
 	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
 	if (ret)
@@ -1554,9 +1565,8 @@ static int mtp_function_set_alt(struct usb_function *f,
 static void mtp_function_disable(struct usb_function *f)
 {
 	struct mtp_dev	*dev = func_to_mtp(f);
-	struct usb_composite_dev	*cdev = dev->cdev;
 
-	DBG(cdev, "mtp_function_disable\n");
+	mtp_log("\n");
 	spin_lock_irq(&dev->lock);
 	dev->state = STATE_OFFLINE;
 	spin_unlock_irq(&dev->lock);
@@ -1567,7 +1577,7 @@ static void mtp_function_disable(struct usb_function *f)
 	/* readers may be blocked waiting for us to go online */
 	wake_up(&dev->read_wq);
 
-	VDBG(cdev, "%s disabled\n", dev->function.name);
+	mtp_log("%s disabled\n", dev->function.name);
 }
 
 static int debug_mtp_read_stats(struct seq_file *s, void *unused)
@@ -1917,5 +1927,24 @@ static struct usb_function *mtp_alloc(struct usb_function_instance *fi)
 	return function_alloc_mtp_ptp(fi, true);
 }
 
-DECLARE_USB_FUNCTION_INIT(mtp, mtp_alloc_inst, mtp_alloc);
+DECLARE_USB_FUNCTION(mtp, mtp_alloc_inst, mtp_alloc);
+
+static int mtp_init(void)
+{
+	_mtp_ipc_log = ipc_log_context_create(NUM_PAGES, "usb_mtp", 0);
+	if (IS_ERR_OR_NULL(_mtp_ipc_log))
+		_mtp_ipc_log =  NULL;
+
+	return usb_function_register(&mtpusb_func);
+}
+module_init(mtp_init);
+
+static void __exit mtp_exit(void)
+{
+	ipc_log_context_destroy(_mtp_ipc_log);
+	usb_function_unregister(&mtpusb_func);
+}
+module_exit(mtp_exit);
+
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MTP function driver");
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index baf72f9..213b525 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -979,8 +979,18 @@ static int dummy_udc_start(struct usb_gadget *g,
 	struct dummy_hcd	*dum_hcd = gadget_to_dummy_hcd(g);
 	struct dummy		*dum = dum_hcd->dum;
 
-	if (driver->max_speed == USB_SPEED_UNKNOWN)
+	switch (g->speed) {
+	/* All the speeds we support */
+	case USB_SPEED_LOW:
+	case USB_SPEED_FULL:
+	case USB_SPEED_HIGH:
+	case USB_SPEED_SUPER:
+		break;
+	default:
+		dev_err(dummy_dev(dum_hcd), "Unsupported driver max speed %d\n",
+				driver->max_speed);
 		return -EINVAL;
+	}
 
 	/*
 	 * SLAVE side init ... the layer above hardware, which
@@ -1784,9 +1794,10 @@ static void dummy_timer(struct timer_list *t)
 		/* Bus speed is 500000 bytes/ms, so use a little less */
 		total = 490000;
 		break;
-	default:
+	default:	/* Can't happen */
 		dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
-		return;
+		total = 0;
+		break;
 	}
 
 	/* FIXME if HZ != 1000 this will probably misbehave ... */
@@ -1828,7 +1839,7 @@ static void dummy_timer(struct timer_list *t)
 
 		/* Used up this frame's bandwidth? */
 		if (total <= 0)
-			break;
+			continue;
 
 		/* find the gadget's ep for this request (if configured) */
 		address = usb_pipeendpoint (urb->pipe);
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index b77f312..c2011cd 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 			break;
 	}
 	if (&req->req != _req) {
+		ep->stopped = stopped;
 		spin_unlock_irqrestore(&ep->dev->lock, flags);
 		return -EINVAL;
 	}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index b02ab2a..ee872ca 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
 	(void) readl(&ep->dev->pci->pcimstctl);
 
 	writel(BIT(DMA_START), &dma->dmastat);
-
-	if (!ep->is_in)
-		stop_out_naking(ep);
 }
 
 static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
@@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
 			writel(BIT(DMA_START), &dma->dmastat);
 			return;
 		}
+		stop_out_naking(ep);
 	}
 
 	tmp = dmactl_default;
@@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 			break;
 	}
 	if (&req->req != _req) {
+		ep->stopped = stopped;
 		spin_unlock_irqrestore(&ep->dev->lock, flags);
-		dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
-								__func__);
+		ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
 		return -EINVAL;
 	}
 
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 5b8a3d95..5cac83a 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3202,6 +3202,9 @@ static int __init u132_hcd_init(void)
 	printk(KERN_INFO "driver %s\n", hcd_name);
 	workqueue = create_singlethread_workqueue("u132");
 	retval = platform_driver_register(&u132_platform_driver);
+	if (retval)
+		destroy_workqueue(workqueue);
+
 	return retval;
 }
 
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index a6efb9a..5f7734c 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -601,7 +601,7 @@ static int usb251xb_probe(struct usb251xb *hub)
 							   dev);
 	int err;
 
-	if (np) {
+	if (np && of_id) {
 		err = usb251xb_get_ofdata(hub,
 					  (struct usb251xb_data *)of_id->data);
 		if (err) {
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 6d9fd5f..7b306aa 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -314,6 +314,7 @@ static void yurex_disconnect(struct usb_interface *interface)
 	usb_deregister_dev(interface, &yurex_class);
 
 	/* prevent more I/O from starting */
+	usb_poison_urb(dev->urb);
 	mutex_lock(&dev->io_mutex);
 	dev->interface = NULL;
 	mutex_unlock(&dev->io_mutex);
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index ad08895..c3dae7d 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -66,7 +66,7 @@
 	depends on NOP_USB_XCEIV
 	depends on PHY_SUN4I_USB
 	depends on EXTCON
-	depends on GENERIC_PHY
+	select GENERIC_PHY
 	select SUNXI_SRAM
 
 config USB_MUSB_DAVINCI
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 8603a1f..f3ff59d 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -2871,6 +2871,7 @@ static bool handle_ctrl_snk_ready(struct usbpd *pd, struct rx_msg *rx_msg)
 			break;
 		}
 		vconn_swap(pd);
+		break;
 	case MSG_GET_SOURCE_CAP_EXTENDED:
 		handle_get_src_cap_extended(pd);
 		break;
diff --git a/drivers/usb/phy/phy-msm-snps-hs.c b/drivers/usb/phy/phy-msm-snps-hs.c
index 6749fe3..a358e2f 100644
--- a/drivers/usb/phy/phy-msm-snps-hs.c
+++ b/drivers/usb/phy/phy-msm-snps-hs.c
@@ -97,13 +97,6 @@ struct msm_hsphy {
 	struct mutex		phy_lock;
 	struct regulator_desc	dpdm_rdesc;
 	struct regulator_dev	*dpdm_rdev;
-
-	/* emulation targets specific */
-	void __iomem		*emu_phy_base;
-	int			*emu_init_seq;
-	int			emu_init_seq_len;
-	int			*emu_dcm_reset_seq;
-	int			emu_dcm_reset_seq_len;
 };
 
 static void msm_hsphy_enable_clocks(struct msm_hsphy *phy, bool on)
@@ -318,37 +311,6 @@ static void hsusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
 	}
 }
 
-static int msm_hsphy_emu_init(struct usb_phy *uphy)
-{
-	struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy);
-	int ret;
-
-	dev_dbg(uphy->dev, "%s\n", __func__);
-
-	ret = msm_hsphy_enable_power(phy, true);
-	if (ret)
-		return ret;
-
-	msm_hsphy_enable_clocks(phy, true);
-	msm_hsphy_reset(phy);
-
-	if (phy->emu_init_seq) {
-		hsusb_phy_write_seq(phy->base,
-			phy->emu_init_seq,
-			phy->emu_init_seq_len, 10000);
-
-		/* Wait for 5ms as per QUSB2 RUMI sequence */
-		usleep_range(5000, 7000);
-
-		if (phy->emu_dcm_reset_seq)
-			hsusb_phy_write_seq(phy->emu_phy_base,
-					phy->emu_dcm_reset_seq,
-					phy->emu_dcm_reset_seq_len, 10000);
-	}
-
-	return 0;
-}
-
 static int msm_hsphy_init(struct usb_phy *uphy)
 {
 	struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy);
@@ -604,8 +566,7 @@ static int msm_hsphy_probe(struct platform_device *pdev)
 	struct msm_hsphy *phy;
 	struct device *dev = &pdev->dev;
 	struct resource *res;
-	int ret = 0, size = 0;
-
+	int ret = 0;
 
 	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
 	if (!phy) {
@@ -647,16 +608,6 @@ static int msm_hsphy_probe(struct platform_device *pdev)
 				phy->phy_rcal_reg);
 	}
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-							"emu_phy_base");
-	if (res) {
-		phy->emu_phy_base = devm_ioremap_resource(dev, res);
-		if (IS_ERR(phy->emu_phy_base)) {
-			dev_dbg(dev, "couldn't ioremap emu_phy_base\n");
-			phy->emu_phy_base = NULL;
-		}
-	}
-
 	/* ref_clk_src is needed irrespective of SE_CLK or DIFF_CLK usage */
 	phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
 	if (IS_ERR(phy->ref_clk_src)) {
@@ -681,51 +632,6 @@ static int msm_hsphy_probe(struct platform_device *pdev)
 	if (IS_ERR(phy->phy_reset))
 		return PTR_ERR(phy->phy_reset);
 
-	of_get_property(dev->of_node, "qcom,emu-init-seq", &size);
-	if (size) {
-		phy->emu_init_seq = devm_kzalloc(dev,
-						size, GFP_KERNEL);
-		if (phy->emu_init_seq) {
-			phy->emu_init_seq_len =
-				(size / sizeof(*phy->emu_init_seq));
-			if (phy->emu_init_seq_len % 2) {
-				dev_err(dev, "invalid emu_init_seq_len\n");
-				return -EINVAL;
-			}
-
-			of_property_read_u32_array(dev->of_node,
-				"qcom,emu-init-seq",
-				phy->emu_init_seq,
-				phy->emu_init_seq_len);
-		} else {
-			dev_dbg(dev,
-			"error allocating memory for emu_init_seq\n");
-		}
-	}
-
-	size = 0;
-	of_get_property(dev->of_node, "qcom,emu-dcm-reset-seq", &size);
-	if (size) {
-		phy->emu_dcm_reset_seq = devm_kzalloc(dev,
-						size, GFP_KERNEL);
-		if (phy->emu_dcm_reset_seq) {
-			phy->emu_dcm_reset_seq_len =
-				(size / sizeof(*phy->emu_dcm_reset_seq));
-			if (phy->emu_dcm_reset_seq_len % 2) {
-				dev_err(dev, "invalid emu_dcm_reset_seq_len\n");
-				return -EINVAL;
-			}
-
-			of_property_read_u32_array(dev->of_node,
-				"qcom,emu-dcm-reset-seq",
-				phy->emu_dcm_reset_seq,
-				phy->emu_dcm_reset_seq_len);
-		} else {
-			dev_dbg(dev,
-			"error allocating memory for emu_dcm_reset_seq\n");
-		}
-	}
-
 	phy->no_rext_present = of_property_read_bool(dev->of_node,
 					"qcom,no-rext-present");
 
@@ -790,10 +696,7 @@ static int msm_hsphy_probe(struct platform_device *pdev)
 	mutex_init(&phy->phy_lock);
 	platform_set_drvdata(pdev, phy);
 
-	if (phy->emu_init_seq)
-		phy->phy.init			= msm_hsphy_emu_init;
-	else
-		phy->phy.init			= msm_hsphy_init;
+	phy->phy.init			= msm_hsphy_init;
 	phy->phy.set_suspend		= msm_hsphy_set_suspend;
 	phy->phy.notify_connect		= msm_hsphy_notify_connect;
 	phy->phy.notify_disconnect	= msm_hsphy_notify_disconnect;
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index cfe9914..1deae76 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -128,6 +128,8 @@ struct msm_ssphy_qmp {
 	struct clk		*com_aux_clk;
 	struct clk		*cfg_ahb_clk;
 	struct clk		*pipe_clk;
+	struct clk		*pipe_clk_mux;
+	struct clk		*pipe_clk_ext_src;
 	struct reset_control	*phy_reset;
 	struct reset_control	*phy_phy_reset;
 	struct reset_control	*global_phy_reset;
@@ -898,6 +900,14 @@ static int msm_ssphy_qmp_get_clks(struct msm_ssphy_qmp *phy, struct device *dev)
 		goto err;
 	}
 
+	phy->pipe_clk_mux = devm_clk_get(dev, "pipe_clk_mux");
+	if (IS_ERR(phy->pipe_clk_mux))
+		phy->pipe_clk_mux = NULL;
+
+	phy->pipe_clk_ext_src = devm_clk_get(dev, "pipe_clk_ext_src");
+	if (IS_ERR(phy->pipe_clk_ext_src))
+		phy->pipe_clk_ext_src = NULL;
+
 	phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
 	if (IS_ERR(phy->ref_clk_src))
 		phy->ref_clk_src = NULL;
@@ -941,6 +951,8 @@ static void msm_ssphy_qmp_enable_clks(struct msm_ssphy_qmp *phy, bool on)
 		if (phy->cfg_ahb_clk)
 			clk_prepare_enable(phy->cfg_ahb_clk);
 
+		//select PHY pipe clock
+		clk_set_parent(phy->pipe_clk_mux, phy->pipe_clk_ext_src);
 		clk_prepare_enable(phy->pipe_clk);
 		phy->clk_enabled = true;
 	}
@@ -948,6 +960,9 @@ static void msm_ssphy_qmp_enable_clks(struct msm_ssphy_qmp *phy, bool on)
 	if (phy->clk_enabled && !on) {
 		clk_disable_unprepare(phy->pipe_clk);
 
+		//select XO instead of PHY pipe clock
+		clk_set_parent(phy->pipe_clk_mux, phy->ref_clk_src);
+
 		if (phy->cfg_ahb_clk)
 			clk_disable_unprepare(phy->cfg_ahb_clk);
 
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 96036f8..087e5f1 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -556,9 +556,12 @@ static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
 
 static void f81232_close(struct usb_serial_port *port)
 {
+	struct f81232_private *port_priv = usb_get_serial_port_data(port);
+
 	f81232_port_disable(port);
 	usb_serial_generic_close(port);
 	usb_kill_urb(port->interrupt_in_urb);
+	flush_work(&port_priv->interrupt_work);
 }
 
 static void f81232_dtr_rts(struct usb_serial_port *port, int on)
@@ -652,6 +655,40 @@ static int f81232_port_remove(struct usb_serial_port *port)
 	return 0;
 }
 
+static int f81232_suspend(struct usb_serial *serial, pm_message_t message)
+{
+	struct usb_serial_port *port = serial->port[0];
+	struct f81232_private *port_priv = usb_get_serial_port_data(port);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
+		usb_kill_urb(port->read_urbs[i]);
+
+	usb_kill_urb(port->interrupt_in_urb);
+
+	if (port_priv)
+		flush_work(&port_priv->interrupt_work);
+
+	return 0;
+}
+
+static int f81232_resume(struct usb_serial *serial)
+{
+	struct usb_serial_port *port = serial->port[0];
+	int result;
+
+	if (tty_port_initialized(&port->port)) {
+		result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+		if (result) {
+			dev_err(&port->dev, "submit interrupt urb failed: %d\n",
+					result);
+			return result;
+		}
+	}
+
+	return usb_serial_generic_resume(serial);
+}
+
 static struct usb_serial_driver f81232_device = {
 	.driver = {
 		.owner =	THIS_MODULE,
@@ -675,6 +712,8 @@ static struct usb_serial_driver f81232_device = {
 	.read_int_callback =	f81232_read_int_callback,
 	.port_probe =		f81232_port_probe,
 	.port_remove =		f81232_port_remove,
+	.suspend =		f81232_suspend,
+	.resume =		f81232_resume,
 };
 
 static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 2274d96..0fff496 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -376,6 +376,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
 	struct usb_serial_port *port = urb->context;
 	unsigned char *data = urb->transfer_buffer;
 	unsigned long flags;
+	bool stopped = false;
 	int status = urb->status;
 	int i;
 
@@ -383,33 +384,51 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
 		if (urb == port->read_urbs[i])
 			break;
 	}
-	set_bit(i, &port->read_urbs_free);
 
 	dev_dbg(&port->dev, "%s - urb %d, len %d\n", __func__, i,
 							urb->actual_length);
 	switch (status) {
 	case 0:
+		usb_serial_debug_data(&port->dev, __func__, urb->actual_length,
+							data);
+		port->serial->type->process_read_urb(urb);
 		break;
 	case -ENOENT:
 	case -ECONNRESET:
 	case -ESHUTDOWN:
 		dev_dbg(&port->dev, "%s - urb stopped: %d\n",
 							__func__, status);
-		return;
+		stopped = true;
+		break;
 	case -EPIPE:
 		dev_err(&port->dev, "%s - urb stopped: %d\n",
 							__func__, status);
-		return;
+		stopped = true;
+		break;
 	default:
 		dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
 							__func__, status);
-		goto resubmit;
+		break;
 	}
 
-	usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
-	port->serial->type->process_read_urb(urb);
+	/*
+	 * Make sure URB processing is done before marking as free to avoid
+	 * racing with unthrottle() on another CPU. Matches the barriers
+	 * implied by the test_and_clear_bit() in
+	 * usb_serial_generic_submit_read_urb().
+	 */
+	smp_mb__before_atomic();
+	set_bit(i, &port->read_urbs_free);
+	/*
+	 * Make sure URB is marked as free before checking the throttled flag
+	 * to avoid racing with unthrottle() on another CPU. Matches the
+	 * smp_mb() in unthrottle().
+	 */
+	smp_mb__after_atomic();
 
-resubmit:
+	if (stopped)
+		return;
+
 	/* Throttle the device if requested by tty */
 	spin_lock_irqsave(&port->lock, flags);
 	port->throttled = port->throttle_req;
@@ -484,6 +503,12 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
 	port->throttled = port->throttle_req = 0;
 	spin_unlock_irq(&port->lock);
 
+	/*
+	 * Matches the smp_mb__after_atomic() in
+	 * usb_serial_generic_read_bulk_callback().
+	 */
+	smp_mb();
+
 	if (was_throttled)
 		usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
 }
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 31b0244..cc794e2 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -763,18 +763,16 @@ static void rts51x_suspend_timer_fn(struct timer_list *t)
 		break;
 	case RTS51X_STAT_IDLE:
 	case RTS51X_STAT_SS:
-		usb_stor_dbg(us, "RTS51X_STAT_SS, intf->pm_usage_cnt:%d, power.usage:%d\n",
-			     atomic_read(&us->pusb_intf->pm_usage_cnt),
+		usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n",
 			     atomic_read(&us->pusb_intf->dev.power.usage_count));
 
-		if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) {
+		if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) {
 			usb_stor_dbg(us, "Ready to enter SS state\n");
 			rts51x_set_stat(chip, RTS51X_STAT_SS);
 			/* ignore mass storage interface's children */
 			pm_suspend_ignore_children(&us->pusb_intf->dev, true);
 			usb_autopm_put_interface_async(us->pusb_intf);
-			usb_stor_dbg(us, "RTS51X_STAT_SS 01, intf->pm_usage_cnt:%d, power.usage:%d\n",
-				     atomic_read(&us->pusb_intf->pm_usage_cnt),
+			usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n",
 				     atomic_read(&us->pusb_intf->dev.power.usage_count));
 		}
 		break;
@@ -807,11 +805,10 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
 	int ret;
 
 	if (working_scsi(srb)) {
-		usb_stor_dbg(us, "working scsi, intf->pm_usage_cnt:%d, power.usage:%d\n",
-			     atomic_read(&us->pusb_intf->pm_usage_cnt),
+		usb_stor_dbg(us, "working scsi, power.usage:%d\n",
 			     atomic_read(&us->pusb_intf->dev.power.usage_count));
 
-		if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) {
+		if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) {
 			ret = usb_autopm_get_interface(us->pusb_intf);
 			usb_stor_dbg(us, "working scsi, ret=%d\n", ret);
 		}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 101ebac..59d82b4 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -65,6 +65,7 @@ static const char* host_info(struct Scsi_Host *host)
 static int slave_alloc (struct scsi_device *sdev)
 {
 	struct us_data *us = host_to_us(sdev->host);
+	int maxp;
 
 	/*
 	 * Set the INQUIRY transfer length to 36.  We don't use any of
@@ -74,20 +75,17 @@ static int slave_alloc (struct scsi_device *sdev)
 	sdev->inquiry_len = 36;
 
 	/*
-	 * USB has unusual DMA-alignment requirements: Although the
-	 * starting address of each scatter-gather element doesn't matter,
-	 * the length of each element except the last must be divisible
-	 * by the Bulk maxpacket value.  There's currently no way to
-	 * express this by block-layer constraints, so we'll cop out
-	 * and simply require addresses to be aligned at 512-byte
-	 * boundaries.  This is okay since most block I/O involves
-	 * hardware sectors that are multiples of 512 bytes in length,
-	 * and since host controllers up through USB 2.0 have maxpacket
-	 * values no larger than 512.
-	 *
-	 * But it doesn't suffice for Wireless USB, where Bulk maxpacket
-	 * values can be as large as 2048.  To make that work properly
-	 * will require changes to the block layer.
+	 * USB has unusual scatter-gather requirements: the length of each
+	 * scatterlist element except the last must be divisible by the
+	 * Bulk maxpacket value.  Fortunately this value is always a
+	 * power of 2.  Inform the block layer about this requirement.
+	 */
+	maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0);
+	blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
+
+	/*
+	 * Some host controllers may have alignment requirements.
+	 * We'll play it safe by requiring 512-byte alignment always.
 	 */
 	blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
 
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 1f7b401..5b1d093 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -796,24 +796,33 @@ static int uas_slave_alloc(struct scsi_device *sdev)
 {
 	struct uas_dev_info *devinfo =
 		(struct uas_dev_info *)sdev->host->hostdata;
+	int maxp;
 
 	sdev->hostdata = devinfo;
 
 	/*
-	 * USB has unusual DMA-alignment requirements: Although the
-	 * starting address of each scatter-gather element doesn't matter,
-	 * the length of each element except the last must be divisible
-	 * by the Bulk maxpacket value.  There's currently no way to
-	 * express this by block-layer constraints, so we'll cop out
-	 * and simply require addresses to be aligned at 512-byte
-	 * boundaries.  This is okay since most block I/O involves
-	 * hardware sectors that are multiples of 512 bytes in length,
-	 * and since host controllers up through USB 2.0 have maxpacket
-	 * values no larger than 512.
+	 * We have two requirements here. We must satisfy the requirements
+	 * of the physical HC and the demands of the protocol, as we
+	 * definitely want no additional memory allocation in this path
+	 * ruling out using bounce buffers.
 	 *
-	 * But it doesn't suffice for Wireless USB, where Bulk maxpacket
-	 * values can be as large as 2048.  To make that work properly
-	 * will require changes to the block layer.
+	 * For a transmission on USB to continue we must never send
+	 * a package that is smaller than maxpacket. Hence the length of each
+         * scatterlist element except the last must be divisible by the
+         * Bulk maxpacket value.
+	 * If the HC does not ensure that through SG,
+	 * the upper layer must do that. We must assume nothing
+	 * about the capabilities off the HC, so we use the most
+	 * pessimistic requirement.
+	 */
+
+	maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0);
+	blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
+
+	/*
+	 * The protocol has no requirements on alignment in the strict sense.
+	 * Controllers may or may not have alignment restrictions.
+	 * As this is not exported, we use an extremely conservative guess.
 	 */
 	blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
 
diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c
index 423208e..6770afd4 100644
--- a/drivers/usb/typec/typec_wcove.c
+++ b/drivers/usb/typec/typec_wcove.c
@@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev)
 	wcove->dev = &pdev->dev;
 	wcove->regmap = pmic->regmap;
 
-	irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
-				  platform_get_irq(pdev, 0));
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+		return irq;
+	}
+
+	irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
 	if (irq < 0)
 		return irq;
 
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 97b09a42..dbfb2f2 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -361,16 +361,10 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
 	}
 
 	if (usb_endpoint_xfer_isoc(epd)) {
-		/* validate packet size and number of packets */
-		unsigned int maxp, packets, bytes;
-
-		maxp = usb_endpoint_maxp(epd);
-		maxp *= usb_endpoint_maxp_mult(epd);
-		bytes = pdu->u.cmd_submit.transfer_buffer_length;
-		packets = DIV_ROUND_UP(bytes, maxp);
-
+		/* validate number of packets */
 		if (pdu->u.cmd_submit.number_of_packets < 0 ||
-		    pdu->u.cmd_submit.number_of_packets > packets) {
+		    pdu->u.cmd_submit.number_of_packets >
+		    USBIP_MAX_ISO_PACKETS) {
 			dev_err(&sdev->udev->dev,
 				"CMD_SUBMIT: isoc invalid num packets %d\n",
 				pdu->u.cmd_submit.number_of_packets);
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index bf8afe9..8be857a 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -121,6 +121,13 @@ extern struct device_attribute dev_attr_usbip_debug;
 #define USBIP_DIR_OUT	0x00
 #define USBIP_DIR_IN	0x01
 
+/*
+ * Arbitrary limit for the maximum number of isochronous packets in an URB,
+ * compare for example the uhci_submit_isochronous function in
+ * drivers/usb/host/uhci-q.c
+ */
+#define USBIP_MAX_ISO_PACKETS 1024
+
 /**
  * struct usbip_header_basic - data pertinent to every request
  * @command: the usbip request type
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index cddb453..6cf00d9 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1443,11 +1443,11 @@ static void __init vfio_pci_fill_ids(void)
 		rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
 				   subvendor, subdevice, class, class_mask, 0);
 		if (rc)
-			pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
+			pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
 				vendor, device, subvendor, subdevice,
 				class, class_mask, rc);
 		else
-			pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
+			pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
 				vendor, device, subvendor, subdevice,
 				class, class_mask);
 	}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 64cbc2d..c362757 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
 MODULE_PARM_DESC(disable_hugepages,
 		 "Disable VFIO IOMMU support for IOMMU hugepages.");
 
+static unsigned int dma_entry_limit __read_mostly = U16_MAX;
+module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
+MODULE_PARM_DESC(dma_entry_limit,
+		 "Maximum number of user DMA mappings per container (65535).");
+
 struct vfio_iommu {
 	struct list_head	domain_list;
 	struct vfio_domain	*external_domain; /* domain for external user */
 	struct mutex		lock;
 	struct rb_root		dma_list;
 	struct blocking_notifier_head notifier;
+	unsigned int		dma_avail;
 	bool			v2;
 	bool			nesting;
 };
@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
 	vfio_unlink_dma(iommu, dma);
 	put_task_struct(dma->task);
 	kfree(dma);
+	iommu->dma_avail++;
 }
 
 static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@@ -1110,12 +1117,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
 		goto out_unlock;
 	}
 
+	if (!iommu->dma_avail) {
+		ret = -ENOSPC;
+		goto out_unlock;
+	}
+
 	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
 	if (!dma) {
 		ret = -ENOMEM;
 		goto out_unlock;
 	}
 
+	iommu->dma_avail--;
 	dma->iova = iova;
 	dma->vaddr = vaddr;
 	dma->prot = prot;
@@ -1612,6 +1625,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
 
 	INIT_LIST_HEAD(&iommu->domain_list);
 	iommu->dma_list = RB_ROOT;
+	iommu->dma_avail = dma_entry_limit;
 	mutex_init(&iommu->lock);
 	BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
 
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index b214a72..c163bc1 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
 				u64 start, u64 size, u64 end,
 				u64 userspace_addr, int perm)
 {
-	struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+	struct vhost_umem_node *tmp, *node;
 
+	if (!size)
+		return -EFAULT;
+
+	node = kmalloc(sizeof(*node), GFP_ATOMIC);
 	if (!node)
 		return -ENOMEM;
 
diff --git a/drivers/video/backlight/qcom-spmi-wled.c b/drivers/video/backlight/qcom-spmi-wled.c
index 1550ddf..ccabc38 100644
--- a/drivers/video/backlight/qcom-spmi-wled.c
+++ b/drivers/video/backlight/qcom-spmi-wled.c
@@ -696,7 +696,7 @@ static void wled_get_ovp_delay(struct wled *wled, int *delay_time_us)
 	pr_debug("delay_time_us: %d\n", *delay_time_us);
 }
 
-#define AUTO_CALIB_BRIGHTNESS		200
+#define AUTO_CALIB_BRIGHTNESS		512
 static int wled_auto_calibrate(struct wled *wled)
 {
 	int rc = 0, i, delay_time_us;
@@ -1146,9 +1146,8 @@ static int wled5_setup(struct wled *wled)
 	if (rc < 0)
 		return rc;
 
-	rc = regmap_update_bits(wled->regmap,
-			wled->sink_addr + WLED_SINK_CURR_SINK_EN,
-			WLED_SINK_CURR_SINK_MASK, sink_en);
+	rc = regmap_write(wled->regmap,
+			wled->sink_addr + WLED_SINK_CURR_SINK_EN, sink_en);
 	if (rc < 0)
 		return rc;
 
@@ -1257,9 +1256,8 @@ static int wled4_setup(struct wled *wled)
 	if (rc < 0)
 		return rc;
 
-	rc = regmap_update_bits(wled->regmap,
-			wled->sink_addr + WLED_SINK_CURR_SINK_EN,
-			WLED_SINK_CURR_SINK_MASK, sink_en);
+	rc = regmap_write(wled->regmap,
+			wled->sink_addr + WLED_SINK_CURR_SINK_EN, sink_en);
 	if (rc < 0)
 		return rc;
 
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index 68a1135..2811c4a 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -94,6 +94,8 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
 	int size = len * sizeof(u16);
 	int ret = -ENOMEM;
 
+	flags |= __GFP_NOWARN;
+
 	if (cmap->len != len) {
 		fb_dealloc_cmap(cmap);
 		if (!len)
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index 283d9307..ac04987 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -935,6 +935,9 @@ void fb_var_to_videomode(struct fb_videomode *mode,
 	if (var->vmode & FB_VMODE_DOUBLE)
 		vtotal *= 2;
 
+	if (!htotal || !vtotal)
+		return;
+
 	hfreq = pixclock/htotal;
 	mode->refresh = hfreq/vtotal;
 }
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index ba906876..9f39f0c 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -464,7 +464,8 @@ static int efifb_probe(struct platform_device *dev)
 	info->apertures->ranges[0].base = efifb_fix.smem_start;
 	info->apertures->ranges[0].size = size_remap;
 
-	if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
+	if (efi_enabled(EFI_BOOT) &&
+	    !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
 		if ((efifb_fix.smem_start + efifb_fix.smem_len) >
 		    (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
 			pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
@@ -476,8 +477,12 @@ static int efifb_probe(struct platform_device *dev)
 		 * If the UEFI memory map covers the efifb region, we may only
 		 * remap it using the attributes the memory map prescribes.
 		 */
-		mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
-		mem_flags &= md.attribute;
+		md.attribute &= EFI_MEMORY_UC | EFI_MEMORY_WC |
+				EFI_MEMORY_WT | EFI_MEMORY_WB;
+		if (md.attribute) {
+			mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+			mem_flags &= md.attribute;
+		}
 	}
 	if (mem_flags & EFI_MEMORY_WC)
 		info->screen_base = ioremap_wc(efifb_fix.smem_start,
diff --git a/drivers/video/fbdev/sm712.h b/drivers/video/fbdev/sm712.h
index aad1cc4..c7ebf03 100644
--- a/drivers/video/fbdev/sm712.h
+++ b/drivers/video/fbdev/sm712.h
@@ -15,14 +15,10 @@
 
 #define FB_ACCEL_SMI_LYNX 88
 
-#define SCREEN_X_RES      1024
-#define SCREEN_Y_RES      600
-#define SCREEN_BPP        16
-
-/*Assume SM712 graphics chip has 4MB VRAM */
-#define SM712_VIDEOMEMORYSIZE	  0x00400000
-/*Assume SM722 graphics chip has 8MB VRAM */
-#define SM722_VIDEOMEMORYSIZE	  0x00800000
+#define SCREEN_X_RES          1024
+#define SCREEN_Y_RES_PC       768
+#define SCREEN_Y_RES_NETBOOK  600
+#define SCREEN_BPP            16
 
 #define dac_reg	(0x3c8)
 #define dac_val	(0x3c9)
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 502d0de..f1dcc67 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -530,6 +530,65 @@ static const struct modeinit vgamode[] = {
 			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
 		},
 	},
+	{	/*  1024 x 768  16Bpp  60Hz */
+		1024, 768, 16, 60,
+		/*  Init_MISC */
+		0xEB,
+		{	/*  Init_SR0_SR4 */
+			0x03, 0x01, 0x0F, 0x03, 0x0E,
+		},
+		{	/*  Init_SR10_SR24 */
+			0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+			0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xC4, 0x30, 0x02, 0x01, 0x01,
+		},
+		{	/*  Init_SR30_SR75 */
+			0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+			0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+			0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+			0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+			0x0F, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+			0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
+			0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+		},
+		{	/*  Init_SR80_SR93 */
+			0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+			0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+			0x00, 0x00, 0x00, 0x00,
+		},
+		{	/*  Init_SRA0_SRAF */
+			0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+			0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+		},
+		{	/*  Init_GR00_GR08 */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+			0xFF,
+		},
+		{	/*  Init_AR00_AR14 */
+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x41, 0x00, 0x0F, 0x00, 0x00,
+		},
+		{	/*  Init_CR00_CR18 */
+			0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+			0xFF,
+		},
+		{	/*  Init_CR30_CR4D */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+			0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+			0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
+			0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
+		},
+		{	/*  Init_CR90_CRA7 */
+			0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+			0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+		},
+	},
 	{	/*  mode#5: 1024 x 768  24Bpp  60Hz */
 		1024, 768, 24, 60,
 		/*  Init_MISC */
@@ -827,67 +886,80 @@ static inline unsigned int chan_to_field(unsigned int chan,
 
 static int smtc_blank(int blank_mode, struct fb_info *info)
 {
+	struct smtcfb_info *sfb = info->par;
+
 	/* clear DPMS setting */
 	switch (blank_mode) {
 	case FB_BLANK_UNBLANK:
 		/* Screen On: HSync: On, VSync : On */
+
+		switch (sfb->chip_id) {
+		case 0x710:
+		case 0x712:
+			smtc_seqw(0x6a, 0x16);
+			smtc_seqw(0x6b, 0x02);
+			break;
+		case 0x720:
+			smtc_seqw(0x6a, 0x0d);
+			smtc_seqw(0x6b, 0x02);
+			break;
+		}
+
+		smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
 		smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
-		smtc_seqw(0x6a, 0x16);
-		smtc_seqw(0x6b, 0x02);
 		smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
 		smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
-		smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
-		smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
 		smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
+		smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
 		break;
 	case FB_BLANK_NORMAL:
 		/* Screen Off: HSync: On, VSync : On   Soft blank */
-		smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
-		smtc_seqw(0x6a, 0x16);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
-		smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
 		smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
 		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+		smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
+		smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
+		smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
+		smtc_seqw(0x6a, 0x16);
+		smtc_seqw(0x6b, 0x02);
 		break;
 	case FB_BLANK_VSYNC_SUSPEND:
 		/* Screen On: HSync: On, VSync : Off */
-		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
-		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
-		smtc_seqw(0x6a, 0x0c);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
-		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
-		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
 		smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
 		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
+		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
 		smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+		smtc_seqw(0x6a, 0x0c);
+		smtc_seqw(0x6b, 0x02);
 		break;
 	case FB_BLANK_HSYNC_SUSPEND:
 		/* Screen On: HSync: Off, VSync : On */
-		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
-		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
-		smtc_seqw(0x6a, 0x0c);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
-		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
-		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
 		smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
 		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
 		smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+		smtc_seqw(0x6a, 0x0c);
+		smtc_seqw(0x6b, 0x02);
 		break;
 	case FB_BLANK_POWERDOWN:
 		/* Screen On: HSync: Off, VSync : Off */
-		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
-		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
-		smtc_seqw(0x6a, 0x0c);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
-		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
-		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
 		smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
 		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
 		smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+		smtc_seqw(0x6a, 0x0c);
+		smtc_seqw(0x6b, 0x02);
 		break;
 	default:
 		return -EINVAL;
@@ -1145,8 +1217,10 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
 
 		/* init SEQ register SR30 - SR75 */
 		for (i = 0; i < SIZE_SR30_SR75; i++)
-			if ((i + 0x30) != 0x62 && (i + 0x30) != 0x6a &&
-			    (i + 0x30) != 0x6b)
+			if ((i + 0x30) != 0x30 && (i + 0x30) != 0x62 &&
+			    (i + 0x30) != 0x6a && (i + 0x30) != 0x6b &&
+			    (i + 0x30) != 0x70 && (i + 0x30) != 0x71 &&
+			    (i + 0x30) != 0x74 && (i + 0x30) != 0x75)
 				smtc_seqw(i + 0x30,
 					  vgamode[j].init_sr30_sr75[i]);
 
@@ -1171,8 +1245,12 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
 			smtc_crtcw(i, vgamode[j].init_cr00_cr18[i]);
 
 		/* init CRTC register CR30 - CR4D */
-		for (i = 0; i < SIZE_CR30_CR4D; i++)
+		for (i = 0; i < SIZE_CR30_CR4D; i++) {
+			if ((i + 0x30) >= 0x3B && (i + 0x30) <= 0x3F)
+				/* side-effect, don't write to CR3B-CR3F */
+				continue;
 			smtc_crtcw(i + 0x30, vgamode[j].init_cr30_cr4d[i]);
+		}
 
 		/* init CRTC register CR90 - CRA7 */
 		for (i = 0; i < SIZE_CR90_CRA7; i++)
@@ -1323,6 +1401,11 @@ static int smtc_map_smem(struct smtcfb_info *sfb,
 {
 	sfb->fb->fix.smem_start = pci_resource_start(pdev, 0);
 
+	if (sfb->chip_id == 0x720)
+		/* on SM720, the framebuffer starts at the 1 MB offset */
+		sfb->fb->fix.smem_start += 0x00200000;
+
+	/* XXX: is it safe for SM720 on Big-Endian? */
 	if (sfb->fb->var.bits_per_pixel == 32)
 		sfb->fb->fix.smem_start += big_addr;
 
@@ -1360,12 +1443,82 @@ static inline void sm7xx_init_hw(void)
 	outb_p(0x11, 0x3c5);
 }
 
+static u_long sm7xx_vram_probe(struct smtcfb_info *sfb)
+{
+	u8 vram;
+
+	switch (sfb->chip_id) {
+	case 0x710:
+	case 0x712:
+		/*
+		 * Assume SM712 graphics chip has 4MB VRAM.
+		 *
+		 * FIXME: SM712 can have 2MB VRAM, which is used on earlier
+		 * laptops, such as IBM Thinkpad 240X. This driver would
+		 * probably crash on those machines. If anyone gets one of
+		 * those and is willing to help, run "git blame" and send me
+		 * an E-mail.
+		 */
+		return 0x00400000;
+	case 0x720:
+		outb_p(0x76, 0x3c4);
+		vram = inb_p(0x3c5) >> 6;
+
+		if (vram == 0x00)
+			return 0x00800000;  /* 8 MB */
+		else if (vram == 0x01)
+			return 0x01000000;  /* 16 MB */
+		else if (vram == 0x02)
+			return 0x00400000;  /* illegal, fallback to 4 MB */
+		else if (vram == 0x03)
+			return 0x00400000;  /* 4 MB */
+	}
+	return 0;  /* unknown hardware */
+}
+
+static void sm7xx_resolution_probe(struct smtcfb_info *sfb)
+{
+	/* get mode parameter from smtc_scr_info */
+	if (smtc_scr_info.lfb_width != 0) {
+		sfb->fb->var.xres = smtc_scr_info.lfb_width;
+		sfb->fb->var.yres = smtc_scr_info.lfb_height;
+		sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
+		goto final;
+	}
+
+	/*
+	 * No parameter, default resolution is 1024x768-16.
+	 *
+	 * FIXME: earlier laptops, such as IBM Thinkpad 240X, has a 800x600
+	 * panel, also see the comments about Thinkpad 240X above.
+	 */
+	sfb->fb->var.xres = SCREEN_X_RES;
+	sfb->fb->var.yres = SCREEN_Y_RES_PC;
+	sfb->fb->var.bits_per_pixel = SCREEN_BPP;
+
+#ifdef CONFIG_MIPS
+	/*
+	 * Loongson MIPS netbooks use 1024x600 LCD panels, which is the original
+	 * target platform of this driver, but nearly all old x86 laptops have
+	 * 1024x768. Lighting 768 panels using 600's timings would partially
+	 * garble the display, so we don't want that. But it's not possible to
+	 * distinguish them reliably.
+	 *
+	 * So we change the default to 768, but keep 600 as-is on MIPS.
+	 */
+	sfb->fb->var.yres = SCREEN_Y_RES_NETBOOK;
+#endif
+
+final:
+	big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
+}
+
 static int smtcfb_pci_probe(struct pci_dev *pdev,
 			    const struct pci_device_id *ent)
 {
 	struct smtcfb_info *sfb;
 	struct fb_info *info;
-	u_long smem_size = 0x00800000;	/* default 8MB */
+	u_long smem_size;
 	int err;
 	unsigned long mmio_base;
 
@@ -1405,29 +1558,19 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
 
 	sm7xx_init_hw();
 
-	/* get mode parameter from smtc_scr_info */
-	if (smtc_scr_info.lfb_width != 0) {
-		sfb->fb->var.xres = smtc_scr_info.lfb_width;
-		sfb->fb->var.yres = smtc_scr_info.lfb_height;
-		sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
-	} else {
-		/* default resolution 1024x600 16bit mode */
-		sfb->fb->var.xres = SCREEN_X_RES;
-		sfb->fb->var.yres = SCREEN_Y_RES;
-		sfb->fb->var.bits_per_pixel = SCREEN_BPP;
-	}
-
-	big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
 	/* Map address and memory detection */
 	mmio_base = pci_resource_start(pdev, 0);
 	pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
 
+	smem_size = sm7xx_vram_probe(sfb);
+	dev_info(&pdev->dev, "%lu MiB of VRAM detected.\n",
+					smem_size / 1048576);
+
 	switch (sfb->chip_id) {
 	case 0x710:
 	case 0x712:
 		sfb->fb->fix.mmio_start = mmio_base + 0x00400000;
 		sfb->fb->fix.mmio_len = 0x00400000;
-		smem_size = SM712_VIDEOMEMORYSIZE;
 		sfb->lfb = ioremap(mmio_base, mmio_addr);
 		if (!sfb->lfb) {
 			dev_err(&pdev->dev,
@@ -1459,8 +1602,7 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
 	case 0x720:
 		sfb->fb->fix.mmio_start = mmio_base;
 		sfb->fb->fix.mmio_len = 0x00200000;
-		smem_size = SM722_VIDEOMEMORYSIZE;
-		sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
+		sfb->dp_regs = ioremap(mmio_base, 0x00200000 + smem_size);
 		sfb->lfb = sfb->dp_regs + 0x00200000;
 		sfb->mmio = (smtc_regbaseaddress =
 		    sfb->dp_regs + 0x000c0000);
@@ -1477,6 +1619,9 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
 		goto failed_fb;
 	}
 
+	/* probe and decide resolution */
+	sm7xx_resolution_probe(sfb);
+
 	/* can support 32 bpp */
 	if (sfb->fb->var.bits_per_pixel == 15)
 		sfb->fb->var.bits_per_pixel = 16;
@@ -1487,7 +1632,11 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
 	if (err)
 		goto failed;
 
-	smtcfb_setmode(sfb);
+	/*
+	 * The screen would be temporarily garbled when sm712fb takes over
+	 * vesafb or VGA text mode. Zero the framebuffer.
+	 */
+	memset_io(sfb->lfb, 0, sfb->fb->fix.smem_len);
 
 	err = register_framebuffer(info);
 	if (err < 0)
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 070026a..5a0d6fb 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -594,8 +594,7 @@ static int dlfb_render_hline(struct dlfb_data *dlfb, struct urb **urb_ptr,
 	return 0;
 }
 
-static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
-	       int width, int height, char *data)
+static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
 {
 	int i, ret;
 	char *cmd;
@@ -607,21 +606,29 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
 
 	start_cycles = get_cycles();
 
+	mutex_lock(&dlfb->render_mutex);
+
 	aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
 	width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
 	x = aligned_x;
 
 	if ((width <= 0) ||
 	    (x + width > dlfb->info->var.xres) ||
-	    (y + height > dlfb->info->var.yres))
-		return -EINVAL;
+	    (y + height > dlfb->info->var.yres)) {
+		ret = -EINVAL;
+		goto unlock_ret;
+	}
 
-	if (!atomic_read(&dlfb->usb_active))
-		return 0;
+	if (!atomic_read(&dlfb->usb_active)) {
+		ret = 0;
+		goto unlock_ret;
+	}
 
 	urb = dlfb_get_urb(dlfb);
-	if (!urb)
-		return 0;
+	if (!urb) {
+		ret = 0;
+		goto unlock_ret;
+	}
 	cmd = urb->transfer_buffer;
 
 	for (i = y; i < y + height ; i++) {
@@ -641,7 +648,7 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
 			*cmd++ = 0xAF;
 		/* Send partial buffer remaining before exiting */
 		len = cmd - (char *) urb->transfer_buffer;
-		ret = dlfb_submit_urb(dlfb, urb, len);
+		dlfb_submit_urb(dlfb, urb, len);
 		bytes_sent += len;
 	} else
 		dlfb_urb_completion(urb);
@@ -655,7 +662,55 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
 		    >> 10)), /* Kcycles */
 		   &dlfb->cpu_kcycles_used);
 
-	return 0;
+	ret = 0;
+
+unlock_ret:
+	mutex_unlock(&dlfb->render_mutex);
+	return ret;
+}
+
+static void dlfb_init_damage(struct dlfb_data *dlfb)
+{
+	dlfb->damage_x = INT_MAX;
+	dlfb->damage_x2 = 0;
+	dlfb->damage_y = INT_MAX;
+	dlfb->damage_y2 = 0;
+}
+
+static void dlfb_damage_work(struct work_struct *w)
+{
+	struct dlfb_data *dlfb = container_of(w, struct dlfb_data, damage_work);
+	int x, x2, y, y2;
+
+	spin_lock_irq(&dlfb->damage_lock);
+	x = dlfb->damage_x;
+	x2 = dlfb->damage_x2;
+	y = dlfb->damage_y;
+	y2 = dlfb->damage_y2;
+	dlfb_init_damage(dlfb);
+	spin_unlock_irq(&dlfb->damage_lock);
+
+	if (x < x2 && y < y2)
+		dlfb_handle_damage(dlfb, x, y, x2 - x, y2 - y);
+}
+
+static void dlfb_offload_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
+{
+	unsigned long flags;
+	int x2 = x + width;
+	int y2 = y + height;
+
+	if (x >= x2 || y >= y2)
+		return;
+
+	spin_lock_irqsave(&dlfb->damage_lock, flags);
+	dlfb->damage_x = min(x, dlfb->damage_x);
+	dlfb->damage_x2 = max(x2, dlfb->damage_x2);
+	dlfb->damage_y = min(y, dlfb->damage_y);
+	dlfb->damage_y2 = max(y2, dlfb->damage_y2);
+	spin_unlock_irqrestore(&dlfb->damage_lock, flags);
+
+	schedule_work(&dlfb->damage_work);
 }
 
 /*
@@ -679,7 +734,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
 				(u32)info->var.yres);
 
 		dlfb_handle_damage(dlfb, 0, start, info->var.xres,
-			lines, info->screen_base);
+			lines);
 	}
 
 	return result;
@@ -694,8 +749,8 @@ static void dlfb_ops_copyarea(struct fb_info *info,
 
 	sys_copyarea(info, area);
 
-	dlfb_handle_damage(dlfb, area->dx, area->dy,
-			area->width, area->height, info->screen_base);
+	dlfb_offload_damage(dlfb, area->dx, area->dy,
+			area->width, area->height);
 }
 
 static void dlfb_ops_imageblit(struct fb_info *info,
@@ -705,8 +760,8 @@ static void dlfb_ops_imageblit(struct fb_info *info,
 
 	sys_imageblit(info, image);
 
-	dlfb_handle_damage(dlfb, image->dx, image->dy,
-			image->width, image->height, info->screen_base);
+	dlfb_offload_damage(dlfb, image->dx, image->dy,
+			image->width, image->height);
 }
 
 static void dlfb_ops_fillrect(struct fb_info *info,
@@ -716,8 +771,8 @@ static void dlfb_ops_fillrect(struct fb_info *info,
 
 	sys_fillrect(info, rect);
 
-	dlfb_handle_damage(dlfb, rect->dx, rect->dy, rect->width,
-			      rect->height, info->screen_base);
+	dlfb_offload_damage(dlfb, rect->dx, rect->dy, rect->width,
+			      rect->height);
 }
 
 /*
@@ -739,17 +794,19 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
 	int bytes_identical = 0;
 	int bytes_rendered = 0;
 
+	mutex_lock(&dlfb->render_mutex);
+
 	if (!fb_defio)
-		return;
+		goto unlock_ret;
 
 	if (!atomic_read(&dlfb->usb_active))
-		return;
+		goto unlock_ret;
 
 	start_cycles = get_cycles();
 
 	urb = dlfb_get_urb(dlfb);
 	if (!urb)
-		return;
+		goto unlock_ret;
 
 	cmd = urb->transfer_buffer;
 
@@ -782,6 +839,8 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
 	atomic_add(((unsigned int) ((end_cycles - start_cycles)
 		    >> 10)), /* Kcycles */
 		   &dlfb->cpu_kcycles_used);
+unlock_ret:
+	mutex_unlock(&dlfb->render_mutex);
 }
 
 static int dlfb_get_edid(struct dlfb_data *dlfb, char *edid, int len)
@@ -859,8 +918,7 @@ static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
 		if (area.y > info->var.yres)
 			area.y = info->var.yres;
 
-		dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h,
-			   info->screen_base);
+		dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h);
 	}
 
 	return 0;
@@ -942,6 +1000,10 @@ static void dlfb_ops_destroy(struct fb_info *info)
 {
 	struct dlfb_data *dlfb = info->par;
 
+	cancel_work_sync(&dlfb->damage_work);
+
+	mutex_destroy(&dlfb->render_mutex);
+
 	if (info->cmap.len != 0)
 		fb_dealloc_cmap(&info->cmap);
 	if (info->monspecs.modedb)
@@ -1065,8 +1127,7 @@ static int dlfb_ops_set_par(struct fb_info *info)
 			pix_framebuffer[i] = 0x37e6;
 	}
 
-	dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres,
-			   info->screen_base);
+	dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres);
 
 	return 0;
 }
@@ -1598,7 +1659,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
 	dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
 	if (!dlfb) {
 		dev_err(&intf->dev, "%s: failed to allocate dlfb\n", __func__);
-		goto error;
+		return -ENOMEM;
 	}
 
 	INIT_LIST_HEAD(&dlfb->deferred_free);
@@ -1639,6 +1700,11 @@ static int dlfb_usb_probe(struct usb_interface *intf,
 	dlfb->ops = dlfb_ops;
 	info->fbops = &dlfb->ops;
 
+	mutex_init(&dlfb->render_mutex);
+	dlfb_init_damage(dlfb);
+	spin_lock_init(&dlfb->damage_lock);
+	INIT_WORK(&dlfb->damage_work, dlfb_damage_work);
+
 	INIT_LIST_HEAD(&info->modelist);
 
 	if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
@@ -1703,7 +1769,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
 error:
 	if (dlfb->info) {
 		dlfb_ops_destroy(dlfb->info);
-	} else if (dlfb) {
+	} else {
 		usb_put_dev(dlfb->udev);
 		kfree(dlfb);
 	}
@@ -1730,12 +1796,10 @@ static void dlfb_usb_disconnect(struct usb_interface *intf)
 	/* this function will wait for all in-flight urbs to complete */
 	dlfb_free_urb_list(dlfb);
 
-	if (info) {
-		/* remove udlfb's sysfs interfaces */
-		for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
-			device_remove_file(info->dev, &fb_device_attrs[i]);
-		device_remove_bin_file(info->dev, &edid_attr);
-	}
+	/* remove udlfb's sysfs interfaces */
+	for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
+		device_remove_file(info->dev, &fb_device_attrs[i]);
+	device_remove_bin_file(info->dev, &edid_attr);
 
 	unregister_framebuffer(info);
 }
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 8ba726e..1bbd910 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -215,6 +215,9 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
 	 * hypervisor.
 	 */
 	lb_offset = param.local_vaddr & (PAGE_SIZE - 1);
+	if (param.count == 0 ||
+	    param.count > U64_MAX - lb_offset - PAGE_SIZE + 1)
+		return -EINVAL;
 	num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
 	/* Allocate the buffers we need */
@@ -331,8 +334,8 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
 	struct fsl_hv_ioctl_prop param;
 	char __user *upath, *upropname;
 	void __user *upropval;
-	char *path = NULL, *propname = NULL;
-	void *propval = NULL;
+	char *path, *propname;
+	void *propval;
 	int ret = 0;
 
 	/* Get the parameters from the user. */
@@ -344,32 +347,30 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
 	upropval = (void __user *)(uintptr_t)param.propval;
 
 	path = strndup_user(upath, FH_DTPROP_MAX_PATHLEN);
-	if (IS_ERR(path)) {
-		ret = PTR_ERR(path);
-		goto out;
-	}
+	if (IS_ERR(path))
+		return PTR_ERR(path);
 
 	propname = strndup_user(upropname, FH_DTPROP_MAX_PATHLEN);
 	if (IS_ERR(propname)) {
 		ret = PTR_ERR(propname);
-		goto out;
+		goto err_free_path;
 	}
 
 	if (param.proplen > FH_DTPROP_MAX_PROPLEN) {
 		ret = -EINVAL;
-		goto out;
+		goto err_free_propname;
 	}
 
 	propval = kmalloc(param.proplen, GFP_KERNEL);
 	if (!propval) {
 		ret = -ENOMEM;
-		goto out;
+		goto err_free_propname;
 	}
 
 	if (set) {
 		if (copy_from_user(propval, upropval, param.proplen)) {
 			ret = -EFAULT;
-			goto out;
+			goto err_free_propval;
 		}
 
 		param.ret = fh_partition_set_dtprop(param.handle,
@@ -388,7 +389,7 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
 			if (copy_to_user(upropval, propval, param.proplen) ||
 			    put_user(param.proplen, &p->proplen)) {
 				ret = -EFAULT;
-				goto out;
+				goto err_free_propval;
 			}
 		}
 	}
@@ -396,10 +397,12 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
 	if (put_user(param.ret, &p->ret))
 		ret = -EFAULT;
 
-out:
-	kfree(path);
+err_free_propval:
 	kfree(propval);
+err_free_propname:
 	kfree(propname);
+err_free_path:
+	kfree(path);
 
 	return ret;
 }
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 1475ed5..0afef60 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -1263,6 +1263,20 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
 	return ret;
 }
 
+static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)
+{
+	switch (type) {
+	case VMMDEV_HGCM_PARM_TYPE_32BIT:
+	case VMMDEV_HGCM_PARM_TYPE_64BIT:
+	case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+	case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+	case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+		return true;
+	default:
+		return false;
+	}
+}
+
 static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
 			       struct vbg_session *session, bool f32bit,
 			       struct vbg_ioctl_hgcm_call *call)
@@ -1298,6 +1312,23 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
 	}
 	call->hdr.size_out = actual_size;
 
+	/* Validate parameter types */
+	if (f32bit) {
+		struct vmmdev_hgcm_function_parameter32 *parm =
+			VBG_IOCTL_HGCM_CALL_PARMS32(call);
+
+		for (i = 0; i < call->parm_count; i++)
+			if (!vbg_param_valid(parm[i].type))
+				return -EINVAL;
+	} else {
+		struct vmmdev_hgcm_function_parameter *parm =
+			VBG_IOCTL_HGCM_CALL_PARMS(call);
+
+		for (i = 0; i < call->parm_count; i++)
+			if (!vbg_param_valid(parm[i].type))
+				return -EINVAL;
+	}
+
 	/*
 	 * Validate the client id.
 	 */
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 465a6f5..45b04bc 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -255,9 +255,11 @@ void vp_del_vqs(struct virtio_device *vdev)
 	for (i = 0; i < vp_dev->msix_used_vectors; ++i)
 		free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
 
-	for (i = 0; i < vp_dev->msix_vectors; i++)
-		if (vp_dev->msix_affinity_masks[i])
-			free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+	if (vp_dev->msix_affinity_masks) {
+		for (i = 0; i < vp_dev->msix_vectors; i++)
+			if (vp_dev->msix_affinity_masks[i])
+				free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+	}
 
 	if (vp_dev->msix_enabled) {
 		/* Disable the vector used for configuration */
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 0f4ecfc..a9fb775 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1016,15 +1016,15 @@ static int ds_probe(struct usb_interface *intf,
 	/* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
 	alt = 3;
 	err = usb_set_interface(dev->udev,
-		intf->altsetting[alt].desc.bInterfaceNumber, alt);
+		intf->cur_altsetting->desc.bInterfaceNumber, alt);
 	if (err) {
 		dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
 			"for %d interface: err=%d.\n", alt,
-			intf->altsetting[alt].desc.bInterfaceNumber, err);
+			intf->cur_altsetting->desc.bInterfaceNumber, err);
 		goto err_out_clear;
 	}
 
-	iface_desc = &intf->altsetting[alt];
+	iface_desc = intf->cur_altsetting;
 	if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
 		pr_info("Num endpoints=%d. It is not DS9490R.\n",
 			iface_desc->desc.bNumEndpoints);
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 0364d33..3516ce6 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -432,8 +432,7 @@ int w1_reset_resume_command(struct w1_master *dev)
 	if (w1_reset_bus(dev))
 		return -1;
 
-	/* This will make only the last matched slave perform a skip ROM. */
-	w1_write_8(dev, W1_RESUME_CMD);
+	w1_write_8(dev, dev->slave_count > 1 ? W1_RESUME_CMD : W1_SKIP_ROM);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(w1_reset_resume_command);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index c3e2010..0782ff3 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
 	if (xen_store_evtchn == 0)
 		return -ENOENT;
 
-	nonseekable_open(inode, filp);
-
-	filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
+	stream_open(inode, filp);
 
 	u = kzalloc(sizeof(*u), GFP_KERNEL);
 	if (u == NULL)
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index e1cbdfd..1970693 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -50,8 +50,9 @@
  * @page: structure to page
  *
  */
-static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
+static int v9fs_fid_readpage(void *data, struct page *page)
 {
+	struct p9_fid *fid = data;
 	struct inode *inode = page->mapping->host;
 	struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE};
 	struct iov_iter to;
@@ -122,7 +123,8 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
 	if (ret == 0)
 		return ret;
 
-	ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
+	ret = read_cache_pages(mapping, pages, v9fs_fid_readpage,
+			filp->private_data);
 	p9_debug(P9_DEBUG_VFS, "  = %d\n", ret);
 	return ret;
 }
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 19c04caf..e00461a 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -253,6 +253,7 @@ static void afs_kill_pages(struct address_space *mapping,
 				first = page->index + 1;
 			lock_page(page);
 			generic_error_remove_page(mapping, page);
+			unlock_page(page);
 		}
 
 		__pagevec_release(&pv);
diff --git a/fs/aio.c b/fs/aio.c
index 45d5ef8d..911e230 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -161,9 +161,13 @@ struct kioctx {
 	unsigned		id;
 };
 
+/*
+ * First field must be the file pointer in all the
+ * iocb unions! See also 'struct kiocb' in <linux/fs.h>
+ */
 struct fsync_iocb {
-	struct work_struct	work;
 	struct file		*file;
+	struct work_struct	work;
 	bool			datasync;
 };
 
@@ -171,14 +175,21 @@ struct poll_iocb {
 	struct file		*file;
 	struct wait_queue_head	*head;
 	__poll_t		events;
-	bool			woken;
+	bool			done;
 	bool			cancelled;
 	struct wait_queue_entry	wait;
 	struct work_struct	work;
 };
 
+/*
+ * NOTE! Each of the iocb union members has the file pointer
+ * as the first entry in their struct definition. So you can
+ * access the file pointer through any of the sub-structs,
+ * or directly as just 'ki_filp' in this struct.
+ */
 struct aio_kiocb {
 	union {
+		struct file		*ki_filp;
 		struct kiocb		rw;
 		struct fsync_iocb	fsync;
 		struct poll_iocb	poll;
@@ -187,8 +198,7 @@ struct aio_kiocb {
 	struct kioctx		*ki_ctx;
 	kiocb_cancel_fn		*ki_cancel;
 
-	struct iocb __user	*ki_user_iocb;	/* user's aiocb */
-	__u64			ki_user_data;	/* user's data for completion */
+	struct io_event		ki_res;
 
 	struct list_head	ki_list;	/* the aio core uses this
 						 * for cancellation */
@@ -902,7 +912,7 @@ static void put_reqs_available(struct kioctx *ctx, unsigned nr)
 	local_irq_restore(flags);
 }
 
-static bool get_reqs_available(struct kioctx *ctx)
+static bool __get_reqs_available(struct kioctx *ctx)
 {
 	struct kioctx_cpu *kcpu;
 	bool ret = false;
@@ -994,32 +1004,35 @@ static void user_refill_reqs_available(struct kioctx *ctx)
 	spin_unlock_irq(&ctx->completion_lock);
 }
 
+static bool get_reqs_available(struct kioctx *ctx)
+{
+	if (__get_reqs_available(ctx))
+		return true;
+	user_refill_reqs_available(ctx);
+	return __get_reqs_available(ctx);
+}
+
 /* aio_get_req
  *	Allocate a slot for an aio request.
  * Returns NULL if no requests are free.
+ *
+ * The refcount is initialized to 2 - one for the async op completion,
+ * one for the synchronous code that does this.
  */
 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
 {
 	struct aio_kiocb *req;
 
-	if (!get_reqs_available(ctx)) {
-		user_refill_reqs_available(ctx);
-		if (!get_reqs_available(ctx))
-			return NULL;
-	}
-
-	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
+	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
 	if (unlikely(!req))
-		goto out_put;
+		return NULL;
 
 	percpu_ref_get(&ctx->reqs);
-	INIT_LIST_HEAD(&req->ki_list);
-	refcount_set(&req->ki_refcnt, 0);
 	req->ki_ctx = ctx;
+	INIT_LIST_HEAD(&req->ki_list);
+	refcount_set(&req->ki_refcnt, 2);
+	req->ki_eventfd = NULL;
 	return req;
-out_put:
-	put_reqs_available(ctx, 1);
-	return NULL;
 }
 
 static struct kioctx *lookup_ioctx(unsigned long ctx_id)
@@ -1050,19 +1063,18 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 	return ret;
 }
 
-static inline void iocb_put(struct aio_kiocb *iocb)
+static inline void iocb_destroy(struct aio_kiocb *iocb)
 {
-	if (refcount_read(&iocb->ki_refcnt) == 0 ||
-	    refcount_dec_and_test(&iocb->ki_refcnt)) {
-		percpu_ref_put(&iocb->ki_ctx->reqs);
-		kmem_cache_free(kiocb_cachep, iocb);
-	}
+	if (iocb->ki_filp)
+		fput(iocb->ki_filp);
+	percpu_ref_put(&iocb->ki_ctx->reqs);
+	kmem_cache_free(kiocb_cachep, iocb);
 }
 
 /* aio_complete
  *	Called when the io request on the given iocb is complete.
  */
-static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
+static void aio_complete(struct aio_kiocb *iocb)
 {
 	struct kioctx	*ctx = iocb->ki_ctx;
 	struct aio_ring	*ring;
@@ -1086,17 +1098,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
 	ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
 	event = ev_page + pos % AIO_EVENTS_PER_PAGE;
 
-	event->obj = (u64)(unsigned long)iocb->ki_user_iocb;
-	event->data = iocb->ki_user_data;
-	event->res = res;
-	event->res2 = res2;
+	*event = iocb->ki_res;
 
 	kunmap_atomic(ev_page);
 	flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
 
-	pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
-		 ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
-		 res, res2);
+	pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
+		 (void __user *)(unsigned long)iocb->ki_res.obj,
+		 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
 
 	/* after flagging the request as done, we
 	 * must never even look at it again
@@ -1138,7 +1147,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
 
 	if (waitqueue_active(&ctx->wait))
 		wake_up(&ctx->wait);
-	iocb_put(iocb);
+}
+
+static inline void iocb_put(struct aio_kiocb *iocb)
+{
+	if (refcount_dec_and_test(&iocb->ki_refcnt)) {
+		aio_complete(iocb);
+		iocb_destroy(iocb);
+	}
 }
 
 /* aio_read_events_ring
@@ -1412,18 +1428,17 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
 		file_end_write(kiocb->ki_filp);
 	}
 
-	fput(kiocb->ki_filp);
-	aio_complete(iocb, res, res2);
+	iocb->ki_res.res = res;
+	iocb->ki_res.res2 = res2;
+	iocb_put(iocb);
 }
 
-static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
+static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
 {
 	int ret;
 
-	req->ki_filp = fget(iocb->aio_fildes);
-	if (unlikely(!req->ki_filp))
-		return -EBADF;
 	req->ki_complete = aio_complete_rw;
+	req->private = NULL;
 	req->ki_pos = iocb->aio_offset;
 	req->ki_flags = iocb_flags(req->ki_filp);
 	if (iocb->aio_flags & IOCB_FLAG_RESFD)
@@ -1438,7 +1453,6 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
 		ret = ioprio_check_cap(iocb->aio_reqprio);
 		if (ret) {
 			pr_debug("aio ioprio check cap error: %d\n", ret);
-			fput(req->ki_filp);
 			return ret;
 		}
 
@@ -1448,11 +1462,13 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
 
 	ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
 	if (unlikely(ret))
-		fput(req->ki_filp);
-	return ret;
+		return ret;
+
+	req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
+	return 0;
 }
 
-static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec,
+static int aio_setup_rw(int rw, const struct iocb *iocb, struct iovec **iovec,
 		bool vectored, bool compat, struct iov_iter *iter)
 {
 	void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
@@ -1487,12 +1503,12 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
 		ret = -EINTR;
 		/*FALLTHRU*/
 	default:
-		aio_complete_rw(req, ret, 0);
+		req->ki_complete(req, ret, 0);
 	}
 }
 
-static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored,
-		bool compat)
+static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
+			bool vectored, bool compat)
 {
 	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
 	struct iov_iter iter;
@@ -1503,29 +1519,24 @@ static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored,
 	if (ret)
 		return ret;
 	file = req->ki_filp;
-
-	ret = -EBADF;
 	if (unlikely(!(file->f_mode & FMODE_READ)))
-		goto out_fput;
+		return -EBADF;
 	ret = -EINVAL;
 	if (unlikely(!file->f_op->read_iter))
-		goto out_fput;
+		return -EINVAL;
 
 	ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
 	if (ret)
-		goto out_fput;
+		return ret;
 	ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
 	if (!ret)
 		aio_rw_done(req, call_read_iter(file, req, &iter));
 	kfree(iovec);
-out_fput:
-	if (unlikely(ret))
-		fput(file);
 	return ret;
 }
 
-static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
-		bool compat)
+static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
+			 bool vectored, bool compat)
 {
 	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
 	struct iov_iter iter;
@@ -1537,16 +1548,14 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
 		return ret;
 	file = req->ki_filp;
 
-	ret = -EBADF;
 	if (unlikely(!(file->f_mode & FMODE_WRITE)))
-		goto out_fput;
-	ret = -EINVAL;
+		return -EBADF;
 	if (unlikely(!file->f_op->write_iter))
-		goto out_fput;
+		return -EINVAL;
 
 	ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
 	if (ret)
-		goto out_fput;
+		return ret;
 	ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
 	if (!ret) {
 		/*
@@ -1564,35 +1573,26 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
 		aio_rw_done(req, call_write_iter(file, req, &iter));
 	}
 	kfree(iovec);
-out_fput:
-	if (unlikely(ret))
-		fput(file);
 	return ret;
 }
 
 static void aio_fsync_work(struct work_struct *work)
 {
-	struct fsync_iocb *req = container_of(work, struct fsync_iocb, work);
-	int ret;
+	struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
 
-	ret = vfs_fsync(req->file, req->datasync);
-	fput(req->file);
-	aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
+	iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
+	iocb_put(iocb);
 }
 
-static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
+static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
+		     bool datasync)
 {
 	if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
 			iocb->aio_rw_flags))
 		return -EINVAL;
 
-	req->file = fget(iocb->aio_fildes);
-	if (unlikely(!req->file))
-		return -EBADF;
-	if (unlikely(!req->file->f_op->fsync)) {
-		fput(req->file);
+	if (unlikely(!req->file->f_op->fsync))
 		return -EINVAL;
-	}
 
 	req->datasync = datasync;
 	INIT_WORK(&req->work, aio_fsync_work);
@@ -1600,14 +1600,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
 	return 0;
 }
 
-static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
-{
-	struct file *file = iocb->poll.file;
-
-	aio_complete(iocb, mangle_poll(mask), 0);
-	fput(file);
-}
-
 static void aio_poll_complete_work(struct work_struct *work)
 {
 	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1633,9 +1625,11 @@ static void aio_poll_complete_work(struct work_struct *work)
 		return;
 	}
 	list_del_init(&iocb->ki_list);
+	iocb->ki_res.res = mangle_poll(mask);
+	req->done = true;
 	spin_unlock_irq(&ctx->ctx_lock);
 
-	aio_poll_complete(iocb, mask);
+	iocb_put(iocb);
 }
 
 /* assumes we are called with irqs disabled */
@@ -1663,31 +1657,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 	__poll_t mask = key_to_poll(key);
 	unsigned long flags;
 
-	req->woken = true;
-
 	/* for instances that support it check for an event match first: */
-	if (mask) {
-		if (!(mask & req->events))
-			return 0;
+	if (mask && !(mask & req->events))
+		return 0;
 
+	list_del_init(&req->wait.entry);
+
+	if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
 		/*
 		 * Try to complete the iocb inline if we can. Use
 		 * irqsave/irqrestore because not all filesystems (e.g. fuse)
 		 * call this function with IRQs disabled and because IRQs
 		 * have to be disabled before ctx_lock is obtained.
 		 */
-		if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
-			list_del(&iocb->ki_list);
-			spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
-
-			list_del_init(&req->wait.entry);
-			aio_poll_complete(iocb, mask);
-			return 1;
-		}
+		list_del(&iocb->ki_list);
+		iocb->ki_res.res = mangle_poll(mask);
+		req->done = true;
+		spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
+		iocb_put(iocb);
+	} else {
+		schedule_work(&req->work);
 	}
-
-	list_del_init(&req->wait.entry);
-	schedule_work(&req->work);
 	return 1;
 }
 
@@ -1714,11 +1704,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
 	add_wait_queue(head, &pt->iocb->poll.wait);
 }
 
-static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
+static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
 {
 	struct kioctx *ctx = aiocb->ki_ctx;
 	struct poll_iocb *req = &aiocb->poll;
 	struct aio_poll_table apt;
+	bool cancel = false;
 	__poll_t mask;
 
 	/* reject any unknown events outside the normal event mask. */
@@ -1730,9 +1721,10 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
 
 	INIT_WORK(&req->work, aio_poll_complete_work);
 	req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
-	req->file = fget(iocb->aio_fildes);
-	if (unlikely(!req->file))
-		return -EBADF;
+
+	req->head = NULL;
+	req->done = false;
+	req->cancelled = false;
 
 	apt.pt._qproc = aio_poll_queue_proc;
 	apt.pt._key = req->events;
@@ -1743,83 +1735,79 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
 	INIT_LIST_HEAD(&req->wait.entry);
 	init_waitqueue_func_entry(&req->wait, aio_poll_wake);
 
-	/* one for removal from waitqueue, one for this function */
-	refcount_set(&aiocb->ki_refcnt, 2);
-
 	mask = vfs_poll(req->file, &apt.pt) & req->events;
-	if (unlikely(!req->head)) {
-		/* we did not manage to set up a waitqueue, done */
-		goto out;
-	}
-
 	spin_lock_irq(&ctx->ctx_lock);
-	spin_lock(&req->head->lock);
-	if (req->woken) {
-		/* wake_up context handles the rest */
-		mask = 0;
+	if (likely(req->head)) {
+		spin_lock(&req->head->lock);
+		if (unlikely(list_empty(&req->wait.entry))) {
+			if (apt.error)
+				cancel = true;
+			apt.error = 0;
+			mask = 0;
+		}
+		if (mask || apt.error) {
+			list_del_init(&req->wait.entry);
+		} else if (cancel) {
+			WRITE_ONCE(req->cancelled, true);
+		} else if (!req->done) { /* actually waiting for an event */
+			list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
+			aiocb->ki_cancel = aio_poll_cancel;
+		}
+		spin_unlock(&req->head->lock);
+	}
+	if (mask) { /* no async, we'd stolen it */
+		aiocb->ki_res.res = mangle_poll(mask);
 		apt.error = 0;
-	} else if (mask || apt.error) {
-		/* if we get an error or a mask we are done */
-		WARN_ON_ONCE(list_empty(&req->wait.entry));
-		list_del_init(&req->wait.entry);
-	} else {
-		/* actually waiting for an event */
-		list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
-		aiocb->ki_cancel = aio_poll_cancel;
 	}
-	spin_unlock(&req->head->lock);
 	spin_unlock_irq(&ctx->ctx_lock);
-
-out:
-	if (unlikely(apt.error)) {
-		fput(req->file);
-		return apt.error;
-	}
-
 	if (mask)
-		aio_poll_complete(aiocb, mask);
-	iocb_put(aiocb);
-	return 0;
+		iocb_put(aiocb);
+	return apt.error;
 }
 
-static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
-			 bool compat)
+static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
+			   struct iocb __user *user_iocb, bool compat)
 {
 	struct aio_kiocb *req;
-	struct iocb iocb;
 	ssize_t ret;
 
-	if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
-		return -EFAULT;
-
 	/* enforce forwards compatibility on users */
-	if (unlikely(iocb.aio_reserved2)) {
+	if (unlikely(iocb->aio_reserved2)) {
 		pr_debug("EINVAL: reserve field set\n");
 		return -EINVAL;
 	}
 
 	/* prevent overflows */
 	if (unlikely(
-	    (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
-	    (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
-	    ((ssize_t)iocb.aio_nbytes < 0)
+	    (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
+	    (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
+	    ((ssize_t)iocb->aio_nbytes < 0)
 	   )) {
 		pr_debug("EINVAL: overflow check\n");
 		return -EINVAL;
 	}
 
-	req = aio_get_req(ctx);
-	if (unlikely(!req))
+	if (!get_reqs_available(ctx))
 		return -EAGAIN;
 
-	if (iocb.aio_flags & IOCB_FLAG_RESFD) {
+	ret = -EAGAIN;
+	req = aio_get_req(ctx);
+	if (unlikely(!req))
+		goto out_put_reqs_available;
+
+	req->ki_filp = fget(iocb->aio_fildes);
+	ret = -EBADF;
+	if (unlikely(!req->ki_filp))
+		goto out_put_req;
+
+	if (iocb->aio_flags & IOCB_FLAG_RESFD) {
 		/*
 		 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
 		 * instance of the file* now. The file descriptor must be
 		 * an eventfd() fd, and will be signaled for each completed
 		 * event using the eventfd_signal() function.
 		 */
-		req->ki_eventfd = eventfd_ctx_fdget((int) iocb.aio_resfd);
+		req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
 		if (IS_ERR(req->ki_eventfd)) {
 			ret = PTR_ERR(req->ki_eventfd);
 			req->ki_eventfd = NULL;
@@ -1833,54 +1821,70 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
 		goto out_put_req;
 	}
 
-	req->ki_user_iocb = user_iocb;
-	req->ki_user_data = iocb.aio_data;
+	req->ki_res.obj = (u64)(unsigned long)user_iocb;
+	req->ki_res.data = iocb->aio_data;
+	req->ki_res.res = 0;
+	req->ki_res.res2 = 0;
 
-	switch (iocb.aio_lio_opcode) {
+	switch (iocb->aio_lio_opcode) {
 	case IOCB_CMD_PREAD:
-		ret = aio_read(&req->rw, &iocb, false, compat);
+		ret = aio_read(&req->rw, iocb, false, compat);
 		break;
 	case IOCB_CMD_PWRITE:
-		ret = aio_write(&req->rw, &iocb, false, compat);
+		ret = aio_write(&req->rw, iocb, false, compat);
 		break;
 	case IOCB_CMD_PREADV:
-		ret = aio_read(&req->rw, &iocb, true, compat);
+		ret = aio_read(&req->rw, iocb, true, compat);
 		break;
 	case IOCB_CMD_PWRITEV:
-		ret = aio_write(&req->rw, &iocb, true, compat);
+		ret = aio_write(&req->rw, iocb, true, compat);
 		break;
 	case IOCB_CMD_FSYNC:
-		ret = aio_fsync(&req->fsync, &iocb, false);
+		ret = aio_fsync(&req->fsync, iocb, false);
 		break;
 	case IOCB_CMD_FDSYNC:
-		ret = aio_fsync(&req->fsync, &iocb, true);
+		ret = aio_fsync(&req->fsync, iocb, true);
 		break;
 	case IOCB_CMD_POLL:
-		ret = aio_poll(req, &iocb);
+		ret = aio_poll(req, iocb);
 		break;
 	default:
-		pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
+		pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
 		ret = -EINVAL;
 		break;
 	}
 
+	/* Done with the synchronous reference */
+	iocb_put(req);
+
 	/*
 	 * If ret is 0, we'd either done aio_complete() ourselves or have
 	 * arranged for that to be done asynchronously.  Anything non-zero
 	 * means that we need to destroy req ourselves.
 	 */
-	if (ret)
-		goto out_put_req;
-	return 0;
+	if (!ret)
+		return 0;
+
 out_put_req:
-	put_reqs_available(ctx, 1);
-	percpu_ref_put(&ctx->reqs);
 	if (req->ki_eventfd)
 		eventfd_ctx_put(req->ki_eventfd);
-	kmem_cache_free(kiocb_cachep, req);
+	iocb_destroy(req);
+out_put_reqs_available:
+	put_reqs_available(ctx, 1);
 	return ret;
 }
 
+static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
+			 bool compat)
+{
+	struct iocb iocb;
+
+	if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
+		return -EFAULT;
+
+	return __io_submit_one(ctx, &iocb, user_iocb, compat);
+}
+
 /* sys_io_submit:
  *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns
  *	the number of iocbs queued.  May return -EINVAL if the aio_context
@@ -1973,24 +1977,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
 }
 #endif
 
-/* lookup_kiocb
- *	Finds a given iocb for cancellation.
- */
-static struct aio_kiocb *
-lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
-{
-	struct aio_kiocb *kiocb;
-
-	assert_spin_locked(&ctx->ctx_lock);
-
-	/* TODO: use a hash or array, this sucks. */
-	list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
-		if (kiocb->ki_user_iocb == iocb)
-			return kiocb;
-	}
-	return NULL;
-}
-
 /* sys_io_cancel:
  *	Attempts to cancel an iocb previously passed to io_submit.  If
  *	the operation is successfully cancelled, the resulting event is
@@ -2008,6 +1994,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
 	struct aio_kiocb *kiocb;
 	int ret = -EINVAL;
 	u32 key;
+	u64 obj = (u64)(unsigned long)iocb;
 
 	if (unlikely(get_user(key, &iocb->aio_key)))
 		return -EFAULT;
@@ -2019,10 +2006,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
 		return -EINVAL;
 
 	spin_lock_irq(&ctx->ctx_lock);
-	kiocb = lookup_kiocb(ctx, iocb);
-	if (kiocb) {
-		ret = kiocb->ki_cancel(&kiocb->rw);
-		list_del_init(&kiocb->ki_list);
+	/* TODO: use a hash or array, this sucks. */
+	list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
+		if (kiocb->ki_res.obj == obj) {
+			ret = kiocb->ki_cancel(&kiocb->rw);
+			list_del_init(&kiocb->ki_list);
+			break;
+		}
 	}
 	spin_unlock_irq(&ctx->ctx_lock);
 
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index ae750b1..ac6c383 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -710,7 +710,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
  * read tree blocks and add keys where required.
  */
 static int add_missing_keys(struct btrfs_fs_info *fs_info,
-			    struct preftrees *preftrees)
+			    struct preftrees *preftrees, bool lock)
 {
 	struct prelim_ref *ref;
 	struct extent_buffer *eb;
@@ -735,12 +735,14 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
 			free_extent_buffer(eb);
 			return -EIO;
 		}
-		btrfs_tree_read_lock(eb);
+		if (lock)
+			btrfs_tree_read_lock(eb);
 		if (btrfs_header_level(eb) == 0)
 			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
 		else
 			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
-		btrfs_tree_read_unlock(eb);
+		if (lock)
+			btrfs_tree_read_unlock(eb);
 		free_extent_buffer(eb);
 		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
 		cond_resched();
@@ -1225,7 +1227,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
 
 	btrfs_release_path(path);
 
-	ret = add_missing_keys(fs_info, &preftrees);
+	ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
 	if (ret)
 		goto out;
 
@@ -1286,11 +1288,14 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
 					ret = -EIO;
 					goto out;
 				}
-				btrfs_tree_read_lock(eb);
-				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+				if (!path->skip_locking) {
+					btrfs_tree_read_lock(eb);
+					btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+				}
 				ret = find_extent_in_eb(eb, bytenr,
 							*extent_item_pos, &eie, ignore_offset);
-				btrfs_tree_read_unlock_blocking(eb);
+				if (!path->skip_locking)
+					btrfs_tree_read_unlock_blocking(eb);
 				free_extent_buffer(eb);
 				if (ret < 0)
 					goto out;
@@ -1452,8 +1457,8 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
  * callers (such as fiemap) which want to know whether the extent is
  * shared but do not need a ref count.
  *
- * This attempts to allocate a transaction in order to account for
- * delayed refs, but continues on even when the alloc fails.
+ * This attempts to attach to the running transaction in order to account for
+ * delayed refs, but continues on even when no running transaction exists.
  *
  * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
  */
@@ -1476,13 +1481,16 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
 	tmp = ulist_alloc(GFP_NOFS);
 	roots = ulist_alloc(GFP_NOFS);
 	if (!tmp || !roots) {
-		ulist_free(tmp);
-		ulist_free(roots);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out;
 	}
 
-	trans = btrfs_join_transaction(root);
+	trans = btrfs_attach_transaction(root);
 	if (IS_ERR(trans)) {
+		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
+			ret = PTR_ERR(trans);
+			goto out;
+		}
 		trans = NULL;
 		down_read(&fs_info->commit_root_sem);
 	} else {
@@ -1515,6 +1523,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
 	} else {
 		up_read(&fs_info->commit_root_sem);
 	}
+out:
 	ulist_free(tmp);
 	ulist_free(roots);
 	return ret;
@@ -1904,14 +1913,20 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
 			extent_item_objectid);
 
 	if (!search_commit_root) {
-		trans = btrfs_join_transaction(fs_info->extent_root);
-		if (IS_ERR(trans))
-			return PTR_ERR(trans);
-		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
-	} else {
-		down_read(&fs_info->commit_root_sem);
+		trans = btrfs_attach_transaction(fs_info->extent_root);
+		if (IS_ERR(trans)) {
+			if (PTR_ERR(trans) != -ENOENT &&
+			    PTR_ERR(trans) != -EROFS)
+				return PTR_ERR(trans);
+			trans = NULL;
+		}
 	}
 
+	if (trans)
+		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
+	else
+		down_read(&fs_info->commit_root_sem);
+
 	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
 				   tree_mod_seq_elem.seq, &refs,
 				   &extent_item_pos, ignore_offset);
@@ -1943,7 +1958,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
 
 	free_leaf_list(refs);
 out:
-	if (!search_commit_root) {
+	if (trans) {
 		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
 		btrfs_end_transaction(trans);
 	} else {
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 48ac8b7..79ac1eb 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2436,6 +2436,16 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
 	if (tmp) {
 		/* first we do an atomic uptodate check */
 		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
+			/*
+			 * Do extra check for first_key, eb can be stale due to
+			 * being cached, read from scrub, or have multiple
+			 * parents (shared tree blocks).
+			 */
+			if (btrfs_verify_level_key(fs_info, tmp,
+					parent_level - 1, &first_key, gen)) {
+				free_extent_buffer(tmp);
+				return -EUCLEAN;
+			}
 			*eb_ret = tmp;
 			return 0;
 		}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index b4f61a3..b2dc613 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -408,9 +408,9 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
 	return ret;
 }
 
-static int verify_level_key(struct btrfs_fs_info *fs_info,
-			    struct extent_buffer *eb, int level,
-			    struct btrfs_key *first_key, u64 parent_transid)
+int btrfs_verify_level_key(struct btrfs_fs_info *fs_info,
+			   struct extent_buffer *eb, int level,
+			   struct btrfs_key *first_key, u64 parent_transid)
 {
 	int found_level;
 	struct btrfs_key found_key;
@@ -487,8 +487,8 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
 			if (verify_parent_transid(io_tree, eb,
 						   parent_transid, 0))
 				ret = -EIO;
-			else if (verify_level_key(fs_info, eb, level,
-						  first_key, parent_transid))
+			else if (btrfs_verify_level_key(fs_info, eb, level,
+						first_key, parent_transid))
 				ret = -EUCLEAN;
 			else
 				break;
@@ -995,13 +995,18 @@ void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
 {
 	struct extent_buffer *buf = NULL;
 	struct inode *btree_inode = fs_info->btree_inode;
+	int ret;
 
 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
 	if (IS_ERR(buf))
 		return;
-	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
-				 buf, WAIT_NONE, 0);
-	free_extent_buffer(buf);
+
+	ret = read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf,
+			WAIT_NONE, 0);
+	if (ret < 0)
+		free_extent_buffer_stale(buf);
+	else
+		free_extent_buffer(buf);
 }
 
 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
@@ -1021,12 +1026,12 @@ int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
 	ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
 				       mirror_num);
 	if (ret) {
-		free_extent_buffer(buf);
+		free_extent_buffer_stale(buf);
 		return ret;
 	}
 
 	if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
-		free_extent_buffer(buf);
+		free_extent_buffer_stale(buf);
 		return -EIO;
 	} else if (extent_buffer_uptodate(buf)) {
 		*eb = buf;
@@ -1080,7 +1085,7 @@ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
 	ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
 					     level, first_key);
 	if (ret) {
-		free_extent_buffer(buf);
+		free_extent_buffer_stale(buf);
 		return ERR_PTR(ret);
 	}
 	return buf;
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 4cccba2..7a4a60f 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -39,6 +39,9 @@ static inline u64 btrfs_sb_offset(int mirror)
 struct btrfs_device;
 struct btrfs_fs_devices;
 
+int btrfs_verify_level_key(struct btrfs_fs_info *fs_info,
+			   struct extent_buffer *eb, int level,
+			   struct btrfs_key *first_key, u64 parent_transid);
 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
 				      u64 parent_transid, int level,
 				      struct btrfs_key *first_key);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index c0db778..0cc800d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3911,8 +3911,7 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
 				    info->space_info_kobj, "%s",
 				    alloc_name(space_info->flags));
 	if (ret) {
-		percpu_counter_destroy(&space_info->total_bytes_pinned);
-		kfree(space_info);
+		kobject_put(&space_info->kobj);
 		return ret;
 	}
 
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index ca4902c..e24c0a6 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2059,6 +2059,18 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 	u64 len;
 
 	/*
+	 * If the inode needs a full sync, make sure we use a full range to
+	 * avoid log tree corruption, due to hole detection racing with ordered
+	 * extent completion for adjacent ranges, and assertion failures during
+	 * hole detection.
+	 */
+	if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+		     &BTRFS_I(inode)->runtime_flags)) {
+		start = 0;
+		end = LLONG_MAX;
+	}
+
+	/*
 	 * The range length can be represented by u64, we have to do the typecasts
 	 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
 	 */
@@ -2565,10 +2577,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 
 	ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
 					  &cached_state);
-	if (ret) {
-		inode_unlock(inode);
+	if (ret)
 		goto out_only_mutex;
-	}
 
 	path = btrfs_alloc_path();
 	if (!path) {
@@ -3151,6 +3161,7 @@ static long btrfs_fallocate(struct file *file, int mode,
 			ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
 					cur_offset, last_byte - cur_offset);
 			if (ret < 0) {
+				cur_offset = last_byte;
 				free_extent_map(em);
 				break;
 			}
@@ -3200,7 +3211,7 @@ static long btrfs_fallocate(struct file *file, int mode,
 	/* Let go of our reservation. */
 	if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
 		btrfs_free_reserved_data_space(inode, data_reserved,
-				alloc_start, alloc_end - cur_offset);
+				cur_offset, alloc_end - cur_offset);
 	extent_changeset_free(data_reserved);
 	return ret;
 }
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 0526b6c..5d57ed6 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4289,27 +4289,36 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
 		mutex_lock(&fs_info->cleaner_mutex);
 		ret = relocate_block_group(rc);
 		mutex_unlock(&fs_info->cleaner_mutex);
-		if (ret < 0) {
+		if (ret < 0)
 			err = ret;
-			goto out;
+
+		/*
+		 * We may have gotten ENOSPC after we already dirtied some
+		 * extents.  If writeout happens while we're relocating a
+		 * different block group we could end up hitting the
+		 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
+		 * btrfs_reloc_cow_block.  Make sure we write everything out
+		 * properly so we don't trip over this problem, and then break
+		 * out of the loop if we hit an error.
+		 */
+		if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
+			ret = btrfs_wait_ordered_range(rc->data_inode, 0,
+						       (u64)-1);
+			if (ret)
+				err = ret;
+			invalidate_mapping_pages(rc->data_inode->i_mapping,
+						 0, -1);
+			rc->stage = UPDATE_DATA_PTRS;
 		}
 
+		if (err < 0)
+			goto out;
+
 		if (rc->extents_found == 0)
 			break;
 
 		btrfs_info(fs_info, "found %llu extents", rc->extents_found);
 
-		if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
-			ret = btrfs_wait_ordered_range(rc->data_inode, 0,
-						       (u64)-1);
-			if (ret) {
-				err = ret;
-				goto out;
-			}
-			invalidate_mapping_pages(rc->data_inode->i_mapping,
-						 0, -1);
-			rc->stage = UPDATE_DATA_PTRS;
-		}
 	}
 
 	WARN_ON(rc->block_group->pinned > 0);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 65bda06..3228d3b 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -132,18 +132,19 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
 		return -ENOMEM;
 
 	ret = btrfs_search_slot(trans, root, key, path, 0, 1);
-	if (ret < 0) {
+	if (ret < 0)
+		goto out;
+
+	if (ret > 0) {
+		btrfs_crit(fs_info,
+			"unable to find root key (%llu %u %llu) in tree %llu",
+			key->objectid, key->type, key->offset,
+			root->root_key.objectid);
+		ret = -EUCLEAN;
 		btrfs_abort_transaction(trans, ret);
 		goto out;
 	}
 
-	if (ret != 0) {
-		btrfs_print_leaf(path->nodes[0]);
-		btrfs_crit(fs_info, "unable to update root key %llu %u %llu",
-			   key->objectid, key->type, key->offset);
-		BUG_ON(1);
-	}
-
 	l = path->nodes[0];
 	slot = path->slots[0];
 	ptr = btrfs_item_ptr_offset(l, slot);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 84cb6e5..635e419 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6583,6 +6583,38 @@ static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
 	return btrfs_commit_transaction(trans);
 }
 
+/*
+ * Make sure any existing dellaloc is flushed for any root used by a send
+ * operation so that we do not miss any data and we do not race with writeback
+ * finishing and changing a tree while send is using the tree. This could
+ * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
+ * a send operation then uses the subvolume.
+ * After flushing delalloc ensure_commit_roots_uptodate() must be called.
+ */
+static int flush_delalloc_roots(struct send_ctx *sctx)
+{
+	struct btrfs_root *root = sctx->parent_root;
+	int ret;
+	int i;
+
+	if (root) {
+		ret = btrfs_start_delalloc_snapshot(root);
+		if (ret)
+			return ret;
+		btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
+	}
+
+	for (i = 0; i < sctx->clone_roots_cnt; i++) {
+		root = sctx->clone_roots[i].root;
+		ret = btrfs_start_delalloc_snapshot(root);
+		if (ret)
+			return ret;
+		btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
+	}
+
+	return 0;
+}
+
 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
 {
 	spin_lock(&root->root_item_lock);
@@ -6807,6 +6839,10 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
 			NULL);
 	sort_clone_roots = 1;
 
+	ret = flush_delalloc_roots(sctx);
+	if (ret)
+		goto out;
+
 	ret = ensure_commit_roots_uptodate(sctx);
 	if (ret)
 		goto out;
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 3717c86..aefb016 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -811,7 +811,12 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs,
 	fs_devs->fsid_kobj.kset = btrfs_kset;
 	error = kobject_init_and_add(&fs_devs->fsid_kobj,
 				&btrfs_ktype, parent, "%pU", fs_devs->fsid);
-	return error;
+	if (error) {
+		kobject_put(&fs_devs->fsid_kobj);
+		return error;
+	}
+
+	return 0;
 }
 
 int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 2f4f095..75051d3 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4121,6 +4121,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
 							       *last_extent, 0,
 							       0, len, 0, len,
 							       0, 0, 0);
+				*last_extent += len;
 			}
 		}
 	}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 82928ce..7f3f64b 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1470,6 +1470,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
 {
 	struct ceph_inode_info *dci = ceph_inode(dir);
+	unsigned hash;
 
 	switch (dci->i_dir_layout.dl_dir_hash) {
 	case 0:	/* for backward compat */
@@ -1477,8 +1478,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
 		return dn->d_name.hash;
 
 	default:
-		return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
+		spin_lock(&dn->d_lock);
+		hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
 				     dn->d_name.name, dn->d_name.len);
+		spin_unlock(&dn->d_lock);
+		return hash;
 	}
 }
 
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 4055ab4..3e518c2 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -524,6 +524,7 @@ static void ceph_i_callback(struct rcu_head *head)
 	struct inode *inode = container_of(head, struct inode, i_rcu);
 	struct ceph_inode_info *ci = ceph_inode(inode);
 
+	kfree(ci->i_symlink);
 	kmem_cache_free(ceph_inode_cachep, ci);
 }
 
@@ -561,7 +562,6 @@ void ceph_destroy_inode(struct inode *inode)
 		ceph_put_snap_realm(mdsc, realm);
 	}
 
-	kfree(ci->i_symlink);
 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
 		frag = rb_entry(n, struct ceph_inode_frag, node);
 		rb_erase(n, &ci->i_fragtree);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index bc43c82..bfcf11c 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1290,6 +1290,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
 			list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
 			ci->i_prealloc_cap_flush = NULL;
 		}
+
+               if (drop &&
+                  ci->i_wrbuffer_ref_head == 0 &&
+                  ci->i_wr_ref == 0 &&
+                  ci->i_dirty_caps == 0 &&
+                  ci->i_flushing_caps == 0) {
+                      ceph_put_snap_context(ci->i_head_snapc);
+                      ci->i_head_snapc = NULL;
+               }
 	}
 	spin_unlock(&ci->i_ceph_lock);
 	while (!list_empty(&to_remove)) {
@@ -1945,10 +1954,39 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
 	return path;
 }
 
+/* Duplicate the dentry->d_name.name safely */
+static int clone_dentry_name(struct dentry *dentry, const char **ppath,
+			     int *ppathlen)
+{
+	u32 len;
+	char *name;
+
+retry:
+	len = READ_ONCE(dentry->d_name.len);
+	name = kmalloc(len + 1, GFP_NOFS);
+	if (!name)
+		return -ENOMEM;
+
+	spin_lock(&dentry->d_lock);
+	if (dentry->d_name.len != len) {
+		spin_unlock(&dentry->d_lock);
+		kfree(name);
+		goto retry;
+	}
+	memcpy(name, dentry->d_name.name, len);
+	spin_unlock(&dentry->d_lock);
+
+	name[len] = '\0';
+	*ppath = name;
+	*ppathlen = len;
+	return 0;
+}
+
 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
 			     const char **ppath, int *ppathlen, u64 *pino,
-			     int *pfreepath)
+			     bool *pfreepath, bool parent_locked)
 {
+	int ret;
 	char *path;
 
 	rcu_read_lock();
@@ -1957,8 +1995,15 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
 	if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
 		*pino = ceph_ino(dir);
 		rcu_read_unlock();
-		*ppath = dentry->d_name.name;
-		*ppathlen = dentry->d_name.len;
+		if (parent_locked) {
+			*ppath = dentry->d_name.name;
+			*ppathlen = dentry->d_name.len;
+		} else {
+			ret = clone_dentry_name(dentry, ppath, ppathlen);
+			if (ret)
+				return ret;
+			*pfreepath = true;
+		}
 		return 0;
 	}
 	rcu_read_unlock();
@@ -1966,13 +2011,13 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
 	if (IS_ERR(path))
 		return PTR_ERR(path);
 	*ppath = path;
-	*pfreepath = 1;
+	*pfreepath = true;
 	return 0;
 }
 
 static int build_inode_path(struct inode *inode,
 			    const char **ppath, int *ppathlen, u64 *pino,
-			    int *pfreepath)
+			    bool *pfreepath)
 {
 	struct dentry *dentry;
 	char *path;
@@ -1988,7 +2033,7 @@ static int build_inode_path(struct inode *inode,
 	if (IS_ERR(path))
 		return PTR_ERR(path);
 	*ppath = path;
-	*pfreepath = 1;
+	*pfreepath = true;
 	return 0;
 }
 
@@ -1999,7 +2044,7 @@ static int build_inode_path(struct inode *inode,
 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
 				  struct inode *rdiri, const char *rpath,
 				  u64 rino, const char **ppath, int *pathlen,
-				  u64 *ino, int *freepath)
+				  u64 *ino, bool *freepath, bool parent_locked)
 {
 	int r = 0;
 
@@ -2009,7 +2054,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
 		     ceph_snap(rinode));
 	} else if (rdentry) {
 		r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
-					freepath);
+					freepath, parent_locked);
 		dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
 		     *ppath);
 	} else if (rpath || rino) {
@@ -2035,7 +2080,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
 	const char *path2 = NULL;
 	u64 ino1 = 0, ino2 = 0;
 	int pathlen1 = 0, pathlen2 = 0;
-	int freepath1 = 0, freepath2 = 0;
+	bool freepath1 = false, freepath2 = false;
 	int len;
 	u16 releases;
 	void *p, *end;
@@ -2043,16 +2088,19 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
 
 	ret = set_request_path_attr(req->r_inode, req->r_dentry,
 			      req->r_parent, req->r_path1, req->r_ino1.ino,
-			      &path1, &pathlen1, &ino1, &freepath1);
+			      &path1, &pathlen1, &ino1, &freepath1,
+			      test_bit(CEPH_MDS_R_PARENT_LOCKED,
+					&req->r_req_flags));
 	if (ret < 0) {
 		msg = ERR_PTR(ret);
 		goto out;
 	}
 
+	/* If r_old_dentry is set, then assume that its parent is locked */
 	ret = set_request_path_attr(NULL, req->r_old_dentry,
 			      req->r_old_dentry_dir,
 			      req->r_path2, req->r_ino2.ino,
-			      &path2, &pathlen2, &ino2, &freepath2);
+			      &path2, &pathlen2, &ino2, &freepath2, true);
 	if (ret < 0) {
 		msg = ERR_PTR(ret);
 		goto out_free1;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index f74193d..1f46b02 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -568,7 +568,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
 	old_snapc = NULL;
 
 update_snapc:
-	if (ci->i_head_snapc) {
+       if (ci->i_wrbuffer_ref_head == 0 &&
+           ci->i_wr_ref == 0 &&
+           ci->i_dirty_caps == 0 &&
+           ci->i_flushing_caps == 0) {
+               ci->i_head_snapc = NULL;
+       } else {
 		ci->i_head_snapc = ceph_get_snap_context(new_snapc);
 		dout(" new snapc is %p\n", new_snapc);
 	}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index eab1359..c5cf46e 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -819,6 +819,12 @@ static void ceph_umount_begin(struct super_block *sb)
 	return;
 }
 
+static int ceph_remount(struct super_block *sb, int *flags, char *data)
+{
+	sync_filesystem(sb);
+	return 0;
+}
+
 static const struct super_operations ceph_super_ops = {
 	.alloc_inode	= ceph_alloc_inode,
 	.destroy_inode	= ceph_destroy_inode,
@@ -826,6 +832,7 @@ static const struct super_operations ceph_super_ops = {
 	.drop_inode	= ceph_drop_inode,
 	.sync_fs        = ceph_sync_fs,
 	.put_super	= ceph_put_super,
+	.remount_fs	= ceph_remount,
 	.show_options   = ceph_show_options,
 	.statfs		= ceph_statfs,
 	.umount_begin   = ceph_umount_begin,
diff --git a/fs/char_dev.c b/fs/char_dev.c
index a279c58..8a63cfa 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -159,6 +159,12 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
 			ret = -EBUSY;
 			goto out;
 		}
+
+		if (new_min < old_min && new_max > old_max) {
+			ret = -EBUSY;
+			goto out;
+		}
+
 	}
 
 	cd->next = *cp;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 80f3358..6f227cc 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1263,6 +1263,7 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
 }
 
 struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
 
 #define CIFS_CACHE_READ_FLG	1
@@ -1763,6 +1764,7 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
 #endif /* CONFIG_CIFS_ACL */
 
 void cifs_oplock_break(struct work_struct *work);
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
 
 extern const struct slow_work_ops cifs_oplock_break_ops;
 extern struct workqueue_struct *cifsiod_wq;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d847132..d6b4568 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -358,13 +358,31 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
 	return cifs_file;
 }
 
-/*
- * Release a reference on the file private data. This may involve closing
- * the filehandle out on the server. Must be called without holding
- * tcon->open_file_lock and cifs_file->file_info_lock.
+/**
+ * cifsFileInfo_put - release a reference of file priv data
+ *
+ * Always potentially wait for oplock handler. See _cifsFileInfo_put().
  */
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 {
+	_cifsFileInfo_put(cifs_file, true);
+}
+
+/**
+ * _cifsFileInfo_put - release a reference of file priv data
+ *
+ * This may involve closing the filehandle @cifs_file out on the
+ * server. Must be called without holding tcon->open_file_lock and
+ * cifs_file->file_info_lock.
+ *
+ * If @wait_for_oplock_handler is true and we are releasing the last
+ * reference, wait for any running oplock break handler of the file
+ * and cancel any pending one. If calling this function from the
+ * oplock break handler, you need to pass false.
+ *
+ */
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
+{
 	struct inode *inode = d_inode(cifs_file->dentry);
 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
 	struct TCP_Server_Info *server = tcon->ses->server;
@@ -411,7 +429,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 
 	spin_unlock(&tcon->open_file_lock);
 
-	oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
+	oplock_break_cancelled = wait_oplock_handler ?
+		cancel_work_sync(&cifs_file->oplock_break) : false;
 
 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
 		struct TCP_Server_Info *server = tcon->ses->server;
@@ -4170,6 +4189,7 @@ void cifs_oplock_break(struct work_struct *work)
 							     cinode);
 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
 	}
+	_cifsFileInfo_put(cfile, false /* do not wait for ourself */);
 	cifs_done_oplock_break(cinode);
 }
 
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index b59ebed4..1fadd314 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1735,6 +1735,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
 	if (rc == 0 || rc != -EBUSY)
 		goto do_rename_exit;
 
+	/* Don't fall back to using SMB on SMB 2+ mount */
+	if (server->vals->protocol_id != 0)
+		goto do_rename_exit;
+
 	/* open-file renames don't work across directories */
 	if (to_dentry->d_parent != from_dentry->d_parent)
 		goto do_rename_exit;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 6926685..facc94e1 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -490,8 +490,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
 					   &pCifsInode->flags);
 
-				queue_work(cifsoplockd_wq,
-					   &netfile->oplock_break);
+				cifs_queue_oplock_break(netfile);
 				netfile->oplock_break_cancelled = false;
 
 				spin_unlock(&tcon->open_file_lock);
@@ -588,6 +587,28 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
 	spin_unlock(&cinode->writers_lock);
 }
 
+/**
+ * cifs_queue_oplock_break - queue the oplock break handler for cfile
+ *
+ * This function is called from the demultiplex thread when it
+ * receives an oplock break for @cfile.
+ *
+ * Assumes the tcon->open_file_lock is held.
+ * Assumes cfile->file_info_lock is NOT held.
+ */
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
+{
+	/*
+	 * Bump the handle refcount now while we hold the
+	 * open_file_lock to enforce the validity of it for the oplock
+	 * break handler. The matching put is done at the end of the
+	 * handler.
+	 */
+	cifsFileInfo_get(cfile);
+
+	queue_work(cifsoplockd_wq, &cfile->oplock_break);
+}
+
 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
 {
 	clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 58700d2..0a7ed2e 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -555,7 +555,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
 			clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
 				  &cinode->flags);
 
-		queue_work(cifsoplockd_wq, &cfile->oplock_break);
+		cifs_queue_oplock_break(cfile);
 		kfree(lw);
 		return true;
 	}
@@ -719,8 +719,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
 					   &cinode->flags);
 				spin_unlock(&cfile->file_info_lock);
-				queue_work(cifsoplockd_wq,
-					   &cfile->oplock_break);
+
+				cifs_queue_oplock_break(cfile);
 
 				spin_unlock(&tcon->open_file_lock);
 				spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index d4d7d61..0ccf8f9 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1906,6 +1906,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
 
 	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov,
 		       &resp_buftype);
+	if (!rc)
+		SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
 	if (!rc || !err_iov.iov_base) {
 		rc = -ENOENT;
 		goto free_path;
@@ -2346,26 +2348,28 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
 		       unsigned int epoch, bool *purge_cache)
 {
 	char message[5] = {0};
+	unsigned int new_oplock = 0;
 
 	oplock &= 0xFF;
 	if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
 		return;
 
-	cinode->oplock = 0;
 	if (oplock & SMB2_LEASE_READ_CACHING_HE) {
-		cinode->oplock |= CIFS_CACHE_READ_FLG;
+		new_oplock |= CIFS_CACHE_READ_FLG;
 		strcat(message, "R");
 	}
 	if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
-		cinode->oplock |= CIFS_CACHE_HANDLE_FLG;
+		new_oplock |= CIFS_CACHE_HANDLE_FLG;
 		strcat(message, "H");
 	}
 	if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
-		cinode->oplock |= CIFS_CACHE_WRITE_FLG;
+		new_oplock |= CIFS_CACHE_WRITE_FLG;
 		strcat(message, "W");
 	}
-	if (!cinode->oplock)
-		strcat(message, "None");
+	if (!new_oplock)
+		strncpy(message, "None", sizeof(message));
+
+	cinode->oplock = new_oplock;
 	cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
 		 &cinode->vfs_inode);
 }
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 71f32d9..33afb63 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -3273,8 +3273,6 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
 	rqst.rq_nvec = 1;
 
 	rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
-	cifs_small_buf_release(req);
-
 	rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
 
 	if (rc) {
@@ -3287,12 +3285,15 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
 					    rc);
 		}
 		free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+		cifs_small_buf_release(req);
 		return rc == -ENODATA ? 0 : rc;
 	} else
 		trace_smb3_read_done(xid, req->PersistentFileId,
 				    io_parms->tcon->tid, ses->Suid,
 				    io_parms->offset, io_parms->length);
 
+	cifs_small_buf_release(req);
+
 	*nbytes = le32_to_cpu(rsp->DataLength);
 	if ((*nbytes > CIFS_MAX_MSGSIZE) ||
 	    (*nbytes > io_parms->length)) {
@@ -3591,7 +3592,6 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
 
 	rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
 			    &resp_buftype, flags, &rsp_iov);
-	cifs_small_buf_release(req);
 	rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
 
 	if (rc) {
@@ -3609,6 +3609,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
 				     io_parms->offset, *nbytes);
 	}
 
+	cifs_small_buf_release(req);
 	free_rsp_buf(resp_buftype, rsp);
 	return rc;
 }
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig
index 02b7d91..f0de238 100644
--- a/fs/crypto/Kconfig
+++ b/fs/crypto/Kconfig
@@ -1,16 +1,16 @@
 config FS_ENCRYPTION
-	tristate "FS Encryption (Per-file encryption)"
+	bool "FS Encryption (Per-file encryption)"
 	select CRYPTO
 	select CRYPTO_AES
 	select CRYPTO_CBC
 	select CRYPTO_ECB
 	select CRYPTO_XTS
 	select CRYPTO_CTS
-	select CRYPTO_CTR
 	select CRYPTO_SHA256
 	select KEYS
 	help
 	  Enable encryption of files and directories.  This
 	  feature is similar to ecryptfs, but it is more memory
 	  efficient since it avoids caching the encrypted and
-	  decrypted pages in the page cache.
+	  decrypted pages in the page cache.  Currently Ext4,
+	  F2FS and UBIFS make use of this feature.
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index f06a8c0..a4a55d5 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -12,9 +12,6 @@
 #ifndef _FSCRYPT_PRIVATE_H
 #define _FSCRYPT_PRIVATE_H
 
-#ifndef __FS_HAS_ENCRYPTION
-#define __FS_HAS_ENCRYPTION 1
-#endif
 #include <linux/fscrypt.h>
 #include <crypto/hash.h>
 #include <linux/pfk.h>
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index 926e5df..56debb1 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -58,7 +58,7 @@ int __fscrypt_prepare_link(struct inode *inode, struct inode *dir)
 		return err;
 
 	if (!fscrypt_has_permitted_context(dir, inode))
-		return -EPERM;
+		return -EXDEV;
 
 	return 0;
 }
@@ -82,13 +82,13 @@ int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
 		if (IS_ENCRYPTED(new_dir) &&
 		    !fscrypt_has_permitted_context(new_dir,
 						   d_inode(old_dentry)))
-			return -EPERM;
+			return -EXDEV;
 
 		if ((flags & RENAME_EXCHANGE) &&
 		    IS_ENCRYPTED(old_dir) &&
 		    !fscrypt_has_permitted_context(old_dir,
 						   d_inode(new_dentry)))
-			return -EPERM;
+			return -EXDEV;
 	}
 	return 0;
 }
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index f490de9..bd7eaf9 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -151,8 +151,7 @@ EXPORT_SYMBOL(fscrypt_ioctl_get_policy);
  * malicious offline violations of this constraint, while the link and rename
  * checks are needed to prevent online violations of this constraint.
  *
- * Return: 1 if permitted, 0 if forbidden.  If forbidden, the caller must fail
- * the filesystem operation with EPERM.
+ * Return: 1 if permitted, 0 if forbidden.
  */
 int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
 {
diff --git a/fs/dax.c b/fs/dax.c
index 09fa706..004c8ac 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1660,8 +1660,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 		}
 
 		trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
-		result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
-					    write);
+		result = vmf_insert_pfn_pmd(vmf, pfn, write);
 		break;
 	case IOMAP_UNWRITTEN:
 	case IOMAP_HOLE:
@@ -1775,8 +1774,7 @@ static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
 		break;
 #ifdef CONFIG_FS_DAX_PMD
 	case PE_SIZE_PMD:
-		ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
-			pfn, true);
+		ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
 		break;
 #endif
 	default:
diff --git a/fs/dcache.c b/fs/dcache.c
index cb515f1..6e00223 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -344,7 +344,7 @@ static void dentry_free(struct dentry *dentry)
 		}
 	}
 	/* if dentry was never visible to RCU, immediate free is OK */
-	if (!(dentry->d_flags & DCACHE_RCUACCESS))
+	if (dentry->d_flags & DCACHE_NORCU)
 		__d_free(&dentry->d_u.d_rcu);
 	else
 		call_rcu(&dentry->d_u.d_rcu, __d_free);
@@ -1694,7 +1694,6 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
 	struct dentry *dentry = __d_alloc(parent->d_sb, name);
 	if (!dentry)
 		return NULL;
-	dentry->d_flags |= DCACHE_RCUACCESS;
 	spin_lock(&parent->d_lock);
 	/*
 	 * don't need child lock because it is not subject
@@ -1719,7 +1718,7 @@ struct dentry *d_alloc_cursor(struct dentry * parent)
 {
 	struct dentry *dentry = d_alloc_anon(parent->d_sb);
 	if (dentry) {
-		dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
+		dentry->d_flags |= DCACHE_DENTRY_CURSOR;
 		dentry->d_parent = dget(parent);
 	}
 	return dentry;
@@ -1732,10 +1731,17 @@ struct dentry *d_alloc_cursor(struct dentry * parent)
  *
  * For a filesystem that just pins its dentries in memory and never
  * performs lookups at all, return an unhashed IS_ROOT dentry.
+ * This is used for pipes, sockets et.al. - the stuff that should
+ * never be anyone's children or parents.  Unlike all other
+ * dentries, these will not have RCU delay between dropping the
+ * last reference and freeing them.
  */
 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
 {
-	return __d_alloc(sb, name);
+	struct dentry *dentry = __d_alloc(sb, name);
+	if (likely(dentry))
+		dentry->d_flags |= DCACHE_NORCU;
+	return dentry;
 }
 EXPORT_SYMBOL(d_alloc_pseudo);
 
@@ -1899,12 +1905,10 @@ struct dentry *d_make_root(struct inode *root_inode)
 
 	if (root_inode) {
 		res = d_alloc_anon(root_inode->i_sb);
-		if (res) {
-			res->d_flags |= DCACHE_RCUACCESS;
+		if (res)
 			d_instantiate(res, root_inode);
-		} else {
+		else
 			iput(root_inode);
-		}
 	}
 	return res;
 }
@@ -2769,9 +2773,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
 		copy_name(dentry, target);
 		target->d_hash.pprev = NULL;
 		dentry->d_parent->d_lockref.count++;
-		if (dentry == old_parent)
-			dentry->d_flags |= DCACHE_RCUACCESS;
-		else
+		if (dentry != old_parent) /* wasn't IS_ROOT */
 			WARN_ON(!--old_parent->d_lockref.count);
 	} else {
 		target->d_parent = old_parent;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 41ef452..e5126fa 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -163,19 +163,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
 	return 0;
 }
 
-static void debugfs_evict_inode(struct inode *inode)
+static void debugfs_i_callback(struct rcu_head *head)
 {
-	truncate_inode_pages_final(&inode->i_data);
-	clear_inode(inode);
+	struct inode *inode = container_of(head, struct inode, i_rcu);
 	if (S_ISLNK(inode->i_mode))
 		kfree(inode->i_link);
+	free_inode_nonrcu(inode);
+}
+
+static void debugfs_destroy_inode(struct inode *inode)
+{
+	call_rcu(&inode->i_rcu, debugfs_i_callback);
 }
 
 static const struct super_operations debugfs_super_operations = {
 	.statfs		= simple_statfs,
 	.remount_fs	= debugfs_remount,
 	.show_options	= debugfs_show_options,
-	.evict_inode	= debugfs_evict_inode,
+	.destroy_inode	= debugfs_destroy_inode,
 };
 
 static void debugfs_release_dentry(struct dentry *dentry)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1dcd800..5362449 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -37,7 +37,6 @@
 #include <linux/uio.h>
 #include <linux/atomic.h>
 #include <linux/prefetch.h>
-#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION)
 #include <linux/fscrypt.h>
 
 /*
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index dcf7ee9..3ed1939 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -101,10 +101,9 @@
 	depends on EXT4_FS
 	select FS_ENCRYPTION
 	help
-	  Enable encryption of ext4 files and directories.  This
-	  feature is similar to ecryptfs, but it is more memory
-	  efficient since it avoids caching the encrypted and
-	  decrypted pages in the page cache.
+	  This kconfig symbol is deprecated; now it just selects
+	  FS_ENCRYPTION.  Use CONFIG_FS_ENCRYPTION=y in new config
+	  files
 
 config EXT4_FS_ENCRYPTION
 	bool "Ext4 FS Encryption"
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index f93f988..0ccd51f 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -111,7 +111,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
 	int dir_has_error = 0;
 	struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
 
-	if (ext4_encrypted_inode(inode)) {
+	if (IS_ENCRYPTED(inode)) {
 		err = fscrypt_get_encryption_info(inode);
 		if (err && err != -ENOKEY)
 			return err;
@@ -138,7 +138,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
 			return err;
 	}
 
-	if (ext4_encrypted_inode(inode)) {
+	if (IS_ENCRYPTED(inode)) {
 		err = fscrypt_fname_alloc_buffer(inode, EXT4_NAME_LEN, &fstr);
 		if (err < 0)
 			return err;
@@ -245,7 +245,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
 			offset += ext4_rec_len_from_disk(de->rec_len,
 					sb->s_blocksize);
 			if (le32_to_cpu(de->inode)) {
-				if (!ext4_encrypted_inode(inode)) {
+				if (!IS_ENCRYPTED(inode)) {
 					if (!dir_emit(ctx, de->name,
 					    de->name_len,
 					    le32_to_cpu(de->inode),
@@ -283,9 +283,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
 done:
 	err = 0;
 errout:
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
 	fscrypt_fname_free_buffer(&fstr);
-#endif
 	brelse(bh);
 	return err;
 }
@@ -613,7 +611,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
 
 static int ext4_dir_open(struct inode * inode, struct file * filp)
 {
-	if (ext4_encrypted_inode(inode))
+	if (IS_ENCRYPTED(inode))
 		return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
 	return 0;
 }
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 746b80f..0e07137 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -40,7 +40,6 @@
 #include <linux/compat.h>
 #endif
 
-#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION)
 #include <linux/fscrypt.h>
 
 #include <linux/compiler.h>
@@ -1337,7 +1336,7 @@ struct ext4_super_block {
 #define EXT4_MF_FS_ABORTED		0x0002	/* Fatal error detected */
 #define EXT4_MF_TEST_DUMMY_ENCRYPTION	0x0004
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 #define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \
 						EXT4_MF_TEST_DUMMY_ENCRYPTION))
 #else
@@ -1673,6 +1672,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
 #define EXT4_FEATURE_INCOMPAT_INLINE_DATA	0x8000 /* data in inode */
 #define EXT4_FEATURE_INCOMPAT_ENCRYPT		0x10000
 
+extern void ext4_update_dynamic_rev(struct super_block *sb);
+
 #define EXT4_FEATURE_COMPAT_FUNCS(name, flagname) \
 static inline bool ext4_has_feature_##name(struct super_block *sb) \
 { \
@@ -1681,6 +1682,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
 } \
 static inline void ext4_set_feature_##name(struct super_block *sb) \
 { \
+	ext4_update_dynamic_rev(sb); \
 	EXT4_SB(sb)->s_es->s_feature_compat |= \
 		cpu_to_le32(EXT4_FEATURE_COMPAT_##flagname); \
 } \
@@ -1698,6 +1700,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
 } \
 static inline void ext4_set_feature_##name(struct super_block *sb) \
 { \
+	ext4_update_dynamic_rev(sb); \
 	EXT4_SB(sb)->s_es->s_feature_ro_compat |= \
 		cpu_to_le32(EXT4_FEATURE_RO_COMPAT_##flagname); \
 } \
@@ -1715,6 +1718,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
 } \
 static inline void ext4_set_feature_##name(struct super_block *sb) \
 { \
+	ext4_update_dynamic_rev(sb); \
 	EXT4_SB(sb)->s_es->s_feature_incompat |= \
 		cpu_to_le32(EXT4_FEATURE_INCOMPAT_##flagname); \
 } \
@@ -2062,7 +2066,7 @@ struct ext4_filename {
 	const struct qstr *usr_fname;
 	struct fscrypt_str disk_name;
 	struct dx_hash_info hinfo;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	struct fscrypt_str crypto_buf;
 #endif
 };
@@ -2290,12 +2294,7 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
 					      struct ext4_group_desc *gdp);
 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
 
-static inline bool ext4_encrypted_inode(struct inode *inode)
-{
-	return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
-}
-
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 static inline int ext4_fname_setup_filename(struct inode *dir,
 			const struct qstr *iname,
 			int lookup, struct ext4_filename *fname)
@@ -2682,7 +2681,6 @@ do {									\
 
 #endif
 
-extern void ext4_update_dynamic_rev(struct super_block *sb);
 extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb,
 					__u32 compat);
 extern int ext4_update_rocompat_feature(handle_t *handle,
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index df908ef..75a5309 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -411,7 +411,7 @@ static inline int ext4_inode_journal_mode(struct inode *inode)
 	    (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
 	    !test_opt(inode->i_sb, DELALLOC))) {
 		/* We do not support data journalling for encrypted data */
-		if (S_ISREG(inode->i_mode) && ext4_encrypted_inode(inode))
+		if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode))
 			return EXT4_INODE_ORDERED_DATA_MODE;  /* ordered */
 		return EXT4_INODE_JOURNAL_DATA_MODE;	/* journal data */
 	}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 72a361d..9b0ea2b 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1035,6 +1035,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
 	__le32 border;
 	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
 	int err = 0;
+	size_t ext_size = 0;
 
 	/* make decision: where to split? */
 	/* FIXME: now decision is simplest: at current extent */
@@ -1126,6 +1127,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
 		le16_add_cpu(&neh->eh_entries, m);
 	}
 
+	/* zero out unused area in the extent block */
+	ext_size = sizeof(struct ext4_extent_header) +
+		sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
+	memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
 	ext4_extent_block_csum_set(inode, neh);
 	set_buffer_uptodate(bh);
 	unlock_buffer(bh);
@@ -1205,6 +1210,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
 				sizeof(struct ext4_extent_idx) * m);
 			le16_add_cpu(&neh->eh_entries, m);
 		}
+		/* zero out unused area in the extent block */
+		ext_size = sizeof(struct ext4_extent_header) +
+		   (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
+		memset(bh->b_data + ext_size, 0,
+			inode->i_sb->s_blocksize - ext_size);
 		ext4_extent_block_csum_set(inode, neh);
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
@@ -1270,6 +1280,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
 	ext4_fsblk_t newblock, goal = 0;
 	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
 	int err = 0;
+	size_t ext_size = 0;
 
 	/* Try to prepend new index to old one */
 	if (ext_depth(inode))
@@ -1295,9 +1306,11 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
 		goto out;
 	}
 
+	ext_size = sizeof(EXT4_I(inode)->i_data);
 	/* move top-level index/leaf into new block */
-	memmove(bh->b_data, EXT4_I(inode)->i_data,
-		sizeof(EXT4_I(inode)->i_data));
+	memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
+	/* zero out unused area in the extent block */
+	memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
 
 	/* set size of new block */
 	neh = ext_block_hdr(bh);
@@ -3569,7 +3582,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
 		max_zeroout = sbi->s_extent_max_zeroout_kb >>
 			(inode->i_sb->s_blocksize_bits - 10);
 
-	if (ext4_encrypted_inode(inode))
+	if (IS_ENCRYPTED(inode))
 		max_zeroout = 0;
 
 	/*
@@ -4919,7 +4932,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 	 * leave it disabled for encrypted inodes for now.  This is a
 	 * bug we should fix....
 	 */
-	if (ext4_encrypted_inode(inode) &&
+	if (IS_ENCRYPTED(inode) &&
 	    (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
 		     FALLOC_FL_ZERO_RANGE)))
 		return -EOPNOTSUPP;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 98ec11f..2c5baa5 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -264,6 +264,13 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	}
 
 	ret = __generic_file_write_iter(iocb, from);
+	/*
+	 * Unaligned direct AIO must be the only IO in flight. Otherwise
+	 * overlapping aligned IO after unaligned might result in data
+	 * corruption.
+	 */
+	if (ret == -EIOCBQUEUED && unaligned_aio)
+		ext4_unwritten_wait(inode);
 	inode_unlock(inode);
 
 	if (ret > 0)
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 091a18a..61f83fa 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -771,7 +771,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
 	if (unlikely(ext4_forced_shutdown(sbi)))
 		return ERR_PTR(-EIO);
 
-	if ((ext4_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
+	if ((IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) &&
 	    !(i_flags & EXT4_EA_INODE_FL)) {
 		err = fscrypt_get_encryption_info(dir);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 56dc9c5..094aec6 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -416,7 +416,7 @@ int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
 {
 	int ret;
 
-	if (ext4_encrypted_inode(inode))
+	if (IS_ENCRYPTED(inode))
 		return fscrypt_zeroout_range(inode, lblk, pblk, len);
 
 	ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
@@ -1151,7 +1151,7 @@ int do_journal_get_write_access(handle_t *handle,
 	return ret;
 }
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
 				  get_block_t *get_block)
 {
@@ -1216,7 +1216,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
 		    !buffer_unwritten(bh) &&
 		    (block_start < from || block_end > to)) {
-			decrypt = ext4_encrypted_inode(inode) &&
+			decrypt = IS_ENCRYPTED(inode) &&
 				S_ISREG(inode->i_mode) &&
 				!fscrypt_using_hardware_encryption(inode);
 			ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0),
@@ -1316,7 +1316,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
 	/* In case writeback began while the page was unlocked */
 	wait_for_stable_page(page);
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	if (ext4_should_dioread_nolock(inode))
 		ret = ext4_block_write_begin(page, pos, len,
 					     ext4_get_block_unwritten);
@@ -3100,7 +3100,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
 	/* In case writeback began while the page was unlocked */
 	wait_for_stable_page(page);
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	ret = ext4_block_write_begin(page, pos, len,
 				     ext4_da_get_block_prep);
 #else
@@ -3767,8 +3767,8 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
 		get_block_func = ext4_dio_get_block_unwritten_async;
 		dio_flags = DIO_LOCKING;
 	}
-#if defined(CONFIG_EXT4_FS_ENCRYPTION)
-	WARN_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+#if defined(CONFIG_FS_ENCRYPTION)
+	WARN_ON(IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)
 		&& !fscrypt_using_hardware_encryption(inode));
 #endif
 	ret = __blockdev_direct_IO(iocb, inode,
@@ -3881,8 +3881,8 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 	ssize_t ret;
 	int rw = iov_iter_rw(iter);
 
-#if defined(CONFIG_EXT4_FS_ENCRYPTION)
-	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+#ifdef CONFIG_FS_ENCRYPTION
+	if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)
 		&& !fscrypt_using_hardware_encryption(inode))
 		return 0;
 #endif
@@ -4093,8 +4093,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
 
 	if (!buffer_uptodate(bh)) {
 		err = -EIO;
-		decrypt = S_ISREG(inode->i_mode) &&
-			ext4_encrypted_inode(inode) &&
+		decrypt = S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
 		    !fscrypt_using_hardware_encryption(inode);
 		ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0), 1, &bh);
 		wait_on_buffer(bh);
@@ -4177,7 +4176,7 @@ static int ext4_block_truncate_page(handle_t *handle,
 	struct inode *inode = mapping->host;
 
 	/* If we are processing an encrypted inode during orphan list handling */
-	if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode))
+	if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
 		return 0;
 
 	blocksize = inode->i_sb->s_blocksize;
@@ -4764,7 +4763,7 @@ static bool ext4_should_use_dax(struct inode *inode)
 		return false;
 	if (ext4_has_inline_data(inode))
 		return false;
-	if (ext4_encrypted_inode(inode))
+	if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
 		return false;
 	return true;
 }
@@ -5114,7 +5113,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
 			ret = -EFSCORRUPTED;
 			goto bad_inode;
 		}
-		if (ext4_encrypted_inode(inode)) {
+		if (IS_ENCRYPTED(inode)) {
 			inode->i_op = &ext4_encrypted_symlink_inode_operations;
 			ext4_set_aops(inode);
 		} else if (ext4_inode_is_fast_symlink(inode)) {
@@ -5393,7 +5392,6 @@ static int ext4_do_update_inode(handle_t *handle,
 		err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
 		if (err)
 			goto out_brelse;
-		ext4_update_dynamic_rev(sb);
 		ext4_set_feature_large_file(sb);
 		ext4_handle_sync(handle);
 		err = ext4_handle_dirty_super(handle, sb);
@@ -5670,25 +5668,22 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
 			up_write(&EXT4_I(inode)->i_data_sem);
 			ext4_journal_stop(handle);
 			if (error) {
-				if (orphan)
+				if (orphan && inode->i_nlink)
 					ext4_orphan_del(NULL, inode);
 				goto err_out;
 			}
 		}
-		if (!shrink)
+		if (!shrink) {
 			pagecache_isize_extended(inode, oldsize, inode->i_size);
-
-		/*
-		 * Blocks are going to be removed from the inode. Wait
-		 * for dio in flight.  Temporarily disable
-		 * dioread_nolock to prevent livelock.
-		 */
-		if (orphan) {
-			if (!ext4_should_journal_data(inode)) {
-				inode_dio_wait(inode);
-			} else
-				ext4_wait_for_tail_page_commit(inode);
+		} else {
+			/*
+			 * Blocks are going to be removed from the inode. Wait
+			 * for dio in flight.
+			 */
+			inode_dio_wait(inode);
 		}
+		if (orphan && ext4_should_journal_data(inode))
+			ext4_wait_for_tail_page_commit(inode);
 		down_write(&EXT4_I(inode)->i_mmap_sem);
 
 		rc = ext4_break_layouts(inode);
@@ -6044,7 +6039,7 @@ int ext4_expand_extra_isize(struct inode *inode,
 
 	ext4_write_lock_xattr(inode, &no_expand);
 
-	BUFFER_TRACE(iloc.bh, "get_write_access");
+	BUFFER_TRACE(iloc->bh, "get_write_access");
 	error = ext4_journal_get_write_access(handle, iloc->bh);
 	if (error) {
 		brelse(iloc->bh);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 5f24fdc..e70adf9 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -256,7 +256,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
 	return err;
 }
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 static int uuid_is_zero(__u8 u[16])
 {
 	int	i;
@@ -977,7 +977,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		if (err == 0)
 			err = err2;
 		mnt_drop_write_file(filp);
-		if (!err && (o_group > EXT4_SB(sb)->s_groups_count) &&
+		if (!err && (o_group < EXT4_SB(sb)->s_groups_count) &&
 		    ext4_has_group_desc_csum(sb) &&
 		    test_opt(sb, INIT_INODE_TABLE))
 			err = ext4_register_li_request(sb, o_group);
@@ -1031,7 +1031,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
 
 	case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 		int err, err2;
 		struct ext4_sb_info *sbi = EXT4_SB(sb);
 		handle_t *handle;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index e29fce2..cc229f3 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1539,7 +1539,7 @@ static int mb_find_extent(struct ext4_buddy *e4b, int block,
 		ex->fe_len += 1 << order;
 	}
 
-	if (ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))) {
+	if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
 		/* Should never happen! (but apparently sometimes does?!?) */
 		WARN_ON(1);
 		ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent "
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 2f5be02..1083a9f 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -592,8 +592,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
 		return -EOPNOTSUPP;
 	}
 
-	if (ext4_encrypted_inode(orig_inode) ||
-	    ext4_encrypted_inode(donor_inode)) {
+	if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) {
 		ext4_msg(orig_inode->i_sb, KERN_ERR,
 			 "Online defrag not supported for encrypted files");
 		return -EOPNOTSUPP;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 4f8de2b..666aff5 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -612,7 +612,7 @@ static struct stats dx_show_leaf(struct inode *dir,
 		{
 			if (show_names)
 			{
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 				int len;
 				char *name;
 				struct fscrypt_str fname_crypto_str =
@@ -621,7 +621,7 @@ static struct stats dx_show_leaf(struct inode *dir,
 
 				name  = de->name;
 				len = de->name_len;
-				if (ext4_encrypted_inode(dir))
+				if (IS_ENCRYPTED(dir))
 					res = fscrypt_get_encryption_info(dir);
 				if (res) {
 					printk(KERN_WARNING "Error setting up"
@@ -871,12 +871,15 @@ static void dx_release(struct dx_frame *frames)
 {
 	struct dx_root_info *info;
 	int i;
+	unsigned int indirect_levels;
 
 	if (frames[0].bh == NULL)
 		return;
 
 	info = &((struct dx_root *)frames[0].bh->b_data)->info;
-	for (i = 0; i <= info->indirect_levels; i++) {
+	/* save local copy, "info" may be freed after brelse() */
+	indirect_levels = info->indirect_levels;
+	for (i = 0; i <= indirect_levels; i++) {
 		if (frames[i].bh == NULL)
 			break;
 		brelse(frames[i].bh);
@@ -984,9 +987,9 @@ static int htree_dirblock_to_tree(struct file *dir_file,
 	top = (struct ext4_dir_entry_2 *) ((char *) de +
 					   dir->i_sb->s_blocksize -
 					   EXT4_DIR_REC_LEN(0));
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	/* Check if the directory is encrypted */
-	if (ext4_encrypted_inode(dir)) {
+	if (IS_ENCRYPTED(dir)) {
 		err = fscrypt_get_encryption_info(dir);
 		if (err < 0) {
 			brelse(bh);
@@ -1015,7 +1018,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
 			continue;
 		if (de->inode == 0)
 			continue;
-		if (!ext4_encrypted_inode(dir)) {
+		if (!IS_ENCRYPTED(dir)) {
 			tmp_str.name = de->name;
 			tmp_str.len = de->name_len;
 			err = ext4_htree_store_dirent(dir_file,
@@ -1047,7 +1050,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
 	}
 errout:
 	brelse(bh);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	fscrypt_fname_free_buffer(&fname_crypto_str);
 #endif
 	return count;
@@ -1267,7 +1270,7 @@ static inline bool ext4_match(const struct ext4_filename *fname,
 
 	f.usr_fname = fname->usr_fname;
 	f.disk_name = fname->disk_name;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	f.crypto_buf = fname->crypto_buf;
 #endif
 	return fscrypt_match_name(&f, de->name, de->name_len);
@@ -1498,7 +1501,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
 	ext4_lblk_t block;
 	int retval;
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	*res_dir = NULL;
 #endif
 	frame = dx_probe(fname, dir, NULL, frames);
@@ -1578,7 +1581,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
 					 ino);
 			return ERR_PTR(-EFSCORRUPTED);
 		}
-		if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
+		if (!IS_ERR(inode) && IS_ENCRYPTED(dir) &&
 		    (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
 		    !fscrypt_has_permitted_context(dir, inode)) {
 			ext4_warning(inode->i_sb,
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 44224a3..b69f9bb 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -66,7 +66,7 @@ static void ext4_finish_bio(struct bio *bio)
 
 	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 		struct page *data_page = NULL;
 #endif
 		struct buffer_head *bh, *head;
@@ -78,7 +78,7 @@ static void ext4_finish_bio(struct bio *bio)
 		if (!page)
 			continue;
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 		if (!page->mapping) {
 			/* The bounce data pages are unmapped. */
 			data_page = page;
@@ -111,7 +111,7 @@ static void ext4_finish_bio(struct bio *bio)
 		bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
 		local_irq_restore(flags);
 		if (!under_io) {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 			if (data_page)
 				fscrypt_restore_control_page(data_page);
 #endif
@@ -480,8 +480,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
 
 	bh = head = page_buffers(page);
 
-	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
-	    nr_to_submit) {
+	if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) {
 		gfp_t gfp_flags = GFP_NOFS;
 
 	retry_encrypt:
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index aafca33..8fbb688 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -50,7 +50,7 @@
 
 static inline bool ext4_bio_encrypted(struct bio *bio)
 {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	return unlikely(bio->bi_private != NULL);
 #else
 	return false;
@@ -282,8 +282,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
 			struct fscrypt_ctx *ctx = NULL;
 			unsigned int flags = 0;
 
-			if (ext4_encrypted_inode(inode) &&
-			    S_ISREG(inode->i_mode)) {
+			if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
 				ctx = fscrypt_get_ctx(inode, GFP_NOFS);
 				if (IS_ERR(ctx))
 					goto set_error_page;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index e7ae26e..4d5c0fc 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -874,6 +874,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
 	err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
 	if (unlikely(err)) {
 		ext4_std_error(sb, err);
+		iloc.bh = NULL;
 		goto errout;
 	}
 	brelse(dind);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c2d5e4b..49452c8 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -699,7 +699,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
 			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
 		save_error_info(sb, function, line);
 	}
-	if (test_opt(sb, ERRORS_PANIC)) {
+	if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
 		if (EXT4_SB(sb)->s_journal &&
 		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
 			return;
@@ -1243,7 +1243,7 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
 	return try_to_free_buffers(page);
 }
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
 {
 	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
@@ -1341,7 +1341,7 @@ static bool ext4_dummy_context(struct inode *inode)
 
 static inline bool ext4_is_encrypted(struct inode *inode)
 {
-	return ext4_encrypted_inode(inode);
+	return IS_ENCRYPTED(inode);
 }
 
 static const struct fscrypt_operations ext4_cryptops = {
@@ -1940,7 +1940,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
 		*journal_ioprio =
 			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
 	} else if (token == Opt_test_dummy_encryption) {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 		sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
 		ext4_msg(sb, KERN_WARNING,
 			 "Test dummy encryption mode enabled");
@@ -2267,7 +2267,6 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
 		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
 	le16_add_cpu(&es->s_mnt_count, 1);
 	ext4_update_tstamp(es, s_mtime);
-	ext4_update_dynamic_rev(sb);
 	if (sbi->s_journal)
 		ext4_set_feature_journal_needs_recovery(sb);
 
@@ -3522,6 +3521,37 @@ int ext4_calculate_overhead(struct super_block *sb)
 	return 0;
 }
 
+static void ext4_clamp_want_extra_isize(struct super_block *sb)
+{
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+	struct ext4_super_block *es = sbi->s_es;
+
+	/* determine the minimum size of new large inodes, if present */
+	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
+	    sbi->s_want_extra_isize == 0) {
+		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+						     EXT4_GOOD_OLD_INODE_SIZE;
+		if (ext4_has_feature_extra_isize(sb)) {
+			if (sbi->s_want_extra_isize <
+			    le16_to_cpu(es->s_want_extra_isize))
+				sbi->s_want_extra_isize =
+					le16_to_cpu(es->s_want_extra_isize);
+			if (sbi->s_want_extra_isize <
+			    le16_to_cpu(es->s_min_extra_isize))
+				sbi->s_want_extra_isize =
+					le16_to_cpu(es->s_min_extra_isize);
+		}
+	}
+	/* Check if enough inode space is available */
+	if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
+							sbi->s_inode_size) {
+		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+						       EXT4_GOOD_OLD_INODE_SIZE;
+		ext4_msg(sb, KERN_INFO,
+			 "required extra inode space not available");
+	}
+}
+
 static void ext4_set_resv_clusters(struct super_block *sb)
 {
 	ext4_fsblk_t resv_clusters;
@@ -4185,7 +4215,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 	sb->s_op = &ext4_sops;
 	sb->s_export_op = &ext4_export_ops;
 	sb->s_xattr = ext4_xattr_handlers;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	sb->s_cop = &ext4_cryptops;
 #endif
 #ifdef CONFIG_QUOTA
@@ -4247,7 +4277,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 				 "data=, fs mounted w/o journal");
 			goto failed_mount_wq;
 		}
-		sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
+		sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
 		clear_opt(sb, JOURNAL_CHECKSUM);
 		clear_opt(sb, DATA_FLAGS);
 		sbi->s_journal = NULL;
@@ -4396,30 +4426,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 	} else if (ret)
 		goto failed_mount4a;
 
-	/* determine the minimum size of new large inodes, if present */
-	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
-	    sbi->s_want_extra_isize == 0) {
-		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
-						     EXT4_GOOD_OLD_INODE_SIZE;
-		if (ext4_has_feature_extra_isize(sb)) {
-			if (sbi->s_want_extra_isize <
-			    le16_to_cpu(es->s_want_extra_isize))
-				sbi->s_want_extra_isize =
-					le16_to_cpu(es->s_want_extra_isize);
-			if (sbi->s_want_extra_isize <
-			    le16_to_cpu(es->s_min_extra_isize))
-				sbi->s_want_extra_isize =
-					le16_to_cpu(es->s_min_extra_isize);
-		}
-	}
-	/* Check if enough inode space is available */
-	if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
-							sbi->s_inode_size) {
-		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
-						       EXT4_GOOD_OLD_INODE_SIZE;
-		ext4_msg(sb, KERN_INFO, "required extra inode space not"
-			 "available");
-	}
+	ext4_clamp_want_extra_isize(sb);
 
 	ext4_set_resv_clusters(sb);
 
@@ -5224,6 +5231,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
 		goto restore_opts;
 	}
 
+	ext4_clamp_want_extra_isize(sb);
+
 	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
 	    test_opt(sb, JOURNAL_CHECKSUM)) {
 		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 9212a02..5e4e78f 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -224,7 +224,7 @@ static struct attribute *ext4_attrs[] = {
 EXT4_ATTR_FEATURE(lazy_itable_init);
 EXT4_ATTR_FEATURE(batched_discard);
 EXT4_ATTR_FEATURE(meta_bg_resize);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 EXT4_ATTR_FEATURE(encryption);
 #endif
 EXT4_ATTR_FEATURE(metadata_csum_seed);
@@ -233,7 +233,7 @@ static struct attribute *ext4_feat_attrs[] = {
 	ATTR_LIST(lazy_itable_init),
 	ATTR_LIST(batched_discard),
 	ATTR_LIST(meta_bg_resize),
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	ATTR_LIST(encryption),
 #endif
 	ATTR_LIST(metadata_csum_seed),
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index c0ba520..f73fc90 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -829,6 +829,7 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
 		bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
 		if (IS_ERR(bh)) {
 			ret = PTR_ERR(bh);
+			bh = NULL;
 			goto out;
 		}
 
@@ -1699,7 +1700,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
 
 	/* No failures allowed past this point. */
 
-	if (!s->not_found && here->e_value_size && here->e_value_offs) {
+	if (!s->not_found && here->e_value_size && !here->e_value_inum) {
 		/* Remove the old value. */
 		void *first_val = s->base + min_offs;
 		size_t offs = le16_to_cpu(here->e_value_offs);
@@ -2907,6 +2908,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
 			if (error == -EIO)
 				EXT4_ERROR_INODE(inode, "block %llu read error",
 						 EXT4_I(inode)->i_file_acl);
+			bh = NULL;
 			goto cleanup;
 		}
 		error = ext4_xattr_check_block(inode, bh);
@@ -3063,6 +3065,7 @@ ext4_xattr_block_cache_find(struct inode *inode,
 		if (IS_ERR(bh)) {
 			if (PTR_ERR(bh) == -ENOMEM)
 				return NULL;
+			bh = NULL;
 			EXT4_ERROR_INODE(inode, "block %lu read error",
 					 (unsigned long)ce->e_value);
 		} else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index 9a20ef42..1494326 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -3,6 +3,7 @@
 	depends on BLOCK
 	select CRYPTO
 	select CRYPTO_CRC32
+	select F2FS_FS_XATTR if FS_ENCRYPTION
 	help
 	  F2FS is based on Log-structured File System (LFS), which supports
 	  versatile "flash-friendly" features. The design has been focused on
@@ -73,13 +74,11 @@
 config F2FS_FS_ENCRYPTION
 	bool "F2FS Encryption"
 	depends on F2FS_FS
-	depends on F2FS_FS_XATTR
 	select FS_ENCRYPTION
 	help
-	  Enable encryption of f2fs files and directories.  This
-	  feature is similar to ecryptfs, but it is more memory
-	  efficient since it avoids caching the encrypted and
-	  decrypted pages in the page cache.
+	  This kconfig symbol is deprecated; now it just selects
+	  FS_ENCRYPTION.  Use CONFIG_FS_ENCRYPTION=y in new config
+	  files.
 
 config F2FS_IO_TRACE
 	bool "F2FS IO tracer"
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 63e5995..217b290 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -285,7 +285,7 @@ static int f2fs_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
 	/* assert(atomic_read(acl->a_refcount) == 1); */
 
 	FOREACH_ACL_ENTRY(pa, acl, pe) {
-		switch(pa->e_tag) {
+		switch (pa->e_tag) {
 		case ACL_USER_OBJ:
 			pa->e_perm &= (mode >> 6) | ~S_IRWXO;
 			mode &= (pa->e_perm << 6) | ~S_IRWXU;
@@ -326,7 +326,7 @@ static int f2fs_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
 	}
 
 	*mode_p = (*mode_p & ~S_IRWXUGO) | mode;
-        return not_equiv;
+	return not_equiv;
 }
 
 static int f2fs_acl_create(struct inode *dir, umode_t *mode,
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index a98e1b0..9c07b71 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -66,7 +66,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
 		.old_blkaddr = index,
 		.new_blkaddr = index,
 		.encrypted_page = NULL,
-		.is_meta = is_meta,
+		.is_por = !is_meta,
 	};
 	int err;
 
@@ -130,6 +130,30 @@ struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
 	return __get_meta_page(sbi, index, false);
 }
 
+static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
+							int type)
+{
+	struct seg_entry *se;
+	unsigned int segno, offset;
+	bool exist;
+
+	if (type != DATA_GENERIC_ENHANCE && type != DATA_GENERIC_ENHANCE_READ)
+		return true;
+
+	segno = GET_SEGNO(sbi, blkaddr);
+	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
+	se = get_seg_entry(sbi, segno);
+
+	exist = f2fs_test_bit(offset, se->cur_valid_map);
+	if (!exist && type == DATA_GENERIC_ENHANCE) {
+		f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
+			"blkaddr:%u, sit bitmap:%d", blkaddr, exist);
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		WARN_ON(1);
+	}
+	return exist;
+}
+
 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
 					block_t blkaddr, int type)
 {
@@ -151,15 +175,22 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
 			return false;
 		break;
 	case META_POR:
-	case DATA_GENERIC:
 		if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
-			blkaddr < MAIN_BLKADDR(sbi))) {
-			if (type == DATA_GENERIC) {
-				f2fs_msg(sbi->sb, KERN_WARNING,
-					"access invalid blkaddr:%u", blkaddr);
-				WARN_ON(1);
-			}
+			blkaddr < MAIN_BLKADDR(sbi)))
 			return false;
+		break;
+	case DATA_GENERIC:
+	case DATA_GENERIC_ENHANCE:
+	case DATA_GENERIC_ENHANCE_READ:
+		if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
+				blkaddr < MAIN_BLKADDR(sbi))) {
+			f2fs_msg(sbi->sb, KERN_WARNING,
+				"access invalid blkaddr:%u", blkaddr);
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			WARN_ON(1);
+			return false;
+		} else {
+			return __is_bitmap_valid(sbi, blkaddr, type);
 		}
 		break;
 	case META_GENERIC:
@@ -189,7 +220,7 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
 		.op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
 		.encrypted_page = NULL,
 		.in_list = false,
-		.is_meta = (type != META_POR),
+		.is_por = (type == META_POR),
 	};
 	struct blk_plug plug;
 
@@ -644,6 +675,12 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
 	if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
 		return 0;
 
+	if (bdev_read_only(sbi->sb->s_bdev)) {
+		f2fs_msg(sbi->sb, KERN_INFO, "write access "
+			"unavailable, skipping orphan cleanup");
+		return 0;
+	}
+
 	if (s_flags & SB_RDONLY) {
 		f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
 		sbi->sb->s_flags &= ~SB_RDONLY;
@@ -758,13 +795,27 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
 	}
 }
 
+static __u32 f2fs_checkpoint_chksum(struct f2fs_sb_info *sbi,
+						struct f2fs_checkpoint *ckpt)
+{
+	unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset);
+	__u32 chksum;
+
+	chksum = f2fs_crc32(sbi, ckpt, chksum_ofs);
+	if (chksum_ofs < CP_CHKSUM_OFFSET) {
+		chksum_ofs += sizeof(chksum);
+		chksum = f2fs_chksum(sbi, chksum, (__u8 *)ckpt + chksum_ofs,
+						F2FS_BLKSIZE - chksum_ofs);
+	}
+	return chksum;
+}
+
 static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
 		struct f2fs_checkpoint **cp_block, struct page **cp_page,
 		unsigned long long *version)
 {
-	unsigned long blk_size = sbi->blocksize;
 	size_t crc_offset = 0;
-	__u32 crc = 0;
+	__u32 crc;
 
 	*cp_page = f2fs_get_meta_page(sbi, cp_addr);
 	if (IS_ERR(*cp_page))
@@ -773,15 +824,27 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
 	*cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
 
 	crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
-	if (crc_offset > (blk_size - sizeof(__le32))) {
+	if (crc_offset < CP_MIN_CHKSUM_OFFSET ||
+			crc_offset > CP_CHKSUM_OFFSET) {
 		f2fs_put_page(*cp_page, 1);
 		f2fs_msg(sbi->sb, KERN_WARNING,
 			"invalid crc_offset: %zu", crc_offset);
 		return -EINVAL;
 	}
 
-	crc = cur_cp_crc(*cp_block);
-	if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+	if (__is_set_ckpt_flags(*cp_block, CP_LARGE_NAT_BITMAP_FLAG)) {
+		if (crc_offset != CP_MIN_CHKSUM_OFFSET) {
+			f2fs_put_page(*cp_page, 1);
+			f2fs_msg(sbi->sb, KERN_WARNING,
+				"layout of large_nat_bitmap is deprecated, "
+				"run fsck to repair, chksum_offset: %zu",
+				crc_offset);
+			return -EINVAL;
+		}
+	}
+
+	crc = f2fs_checkpoint_chksum(sbi, *cp_block);
+	if (crc != cur_cp_crc(*cp_block)) {
 		f2fs_put_page(*cp_page, 1);
 		f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
 		return -EINVAL;
@@ -1009,13 +1072,11 @@ int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
 	if (inode) {
 		unsigned long cur_ino = inode->i_ino;
 
-		if (is_dir)
-			F2FS_I(inode)->cp_task = current;
+		F2FS_I(inode)->cp_task = current;
 
 		filemap_fdatawrite(inode->i_mapping);
 
-		if (is_dir)
-			F2FS_I(inode)->cp_task = NULL;
+		F2FS_I(inode)->cp_task = NULL;
 
 		iput(inode);
 		/* We need to give cpu to another writers. */
@@ -1267,10 +1328,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 
 	if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
 		__set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
-	/*
-	 * TODO: we count on fsck.f2fs to clear this flag until we figure out
-	 * missing cases which clear it incorrectly.
-	 */
+	else
+		__clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
 
 	if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
 		__set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
@@ -1391,7 +1450,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 	get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
 	get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
 
-	crc32 = f2fs_crc32(sbi, ckpt, le32_to_cpu(ckpt->checksum_offset));
+	crc32 = f2fs_checkpoint_chksum(sbi, ckpt);
 	*((__le32 *)((unsigned char *)ckpt +
 				le32_to_cpu(ckpt->checksum_offset)))
 				= cpu_to_le32(crc32);
@@ -1475,7 +1534,11 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 	clear_sbi_flag(sbi, SBI_IS_DIRTY);
 	clear_sbi_flag(sbi, SBI_NEED_CP);
 	clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
+
+	spin_lock(&sbi->stat_lock);
 	sbi->unusable_block_count = 0;
+	spin_unlock(&sbi->stat_lock);
+
 	__set_cp_next_pack(sbi);
 
 	/*
@@ -1500,6 +1563,9 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 	unsigned long long ckpt_ver;
 	int err = 0;
 
+	if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi))
+		return -EROFS;
+
 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
 		if (cpc->reason != CP_PAUSE)
 			return 0;
@@ -1516,10 +1582,6 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 		err = -EIO;
 		goto out;
 	}
-	if (f2fs_readonly(sbi->sb)) {
-		err = -EROFS;
-		goto out;
-	}
 
 	trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
 
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index e0c862e..93132ac 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -228,12 +228,14 @@ struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
 	struct block_device *bdev = sbi->sb->s_bdev;
 	int i;
 
-	for (i = 0; i < sbi->s_ndevs; i++) {
-		if (FDEV(i).start_blk <= blk_addr &&
-					FDEV(i).end_blk >= blk_addr) {
-			blk_addr -= FDEV(i).start_blk;
-			bdev = FDEV(i).bdev;
-			break;
+	if (f2fs_is_multi_device(sbi)) {
+		for (i = 0; i < sbi->s_ndevs; i++) {
+			if (FDEV(i).start_blk <= blk_addr &&
+			    FDEV(i).end_blk >= blk_addr) {
+				blk_addr -= FDEV(i).start_blk;
+				bdev = FDEV(i).bdev;
+				break;
+			}
 		}
 	}
 	if (bio) {
@@ -247,6 +249,9 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
 {
 	int i;
 
+	if (!f2fs_is_multi_device(sbi))
+		return 0;
+
 	for (i = 0; i < sbi->s_ndevs; i++)
 		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
 			return i;
@@ -456,7 +461,7 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
 
 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
 {
-	__submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
+	__submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
 }
 
 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
@@ -485,7 +490,8 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
 	struct inode *inode = fio->page->mapping->host;
 
 	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
-			__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
+			fio->is_por ? META_POR : (__is_meta_io(fio) ?
+			META_GENERIC : DATA_GENERIC_ENHANCE)))
 		return -EFAULT;
 
 	trace_f2fs_submit_page_bio(page, fio);
@@ -540,9 +546,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
 		spin_unlock(&io->io_lock);
 	}
 
-	if (__is_valid_data_blkaddr(fio->old_blkaddr))
-		verify_block_addr(fio, fio->old_blkaddr);
-	verify_block_addr(fio, fio->new_blkaddr);
+	verify_fio_blkaddr(fio);
 
 	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
 	inode = fio->page->mapping->host;
@@ -612,16 +616,13 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
 	struct bio_post_read_ctx *ctx;
 	unsigned int post_read_steps = 0;
 
-	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
-		return ERR_PTR(-EFAULT);
-
 	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
 	if (!bio)
 		return ERR_PTR(-ENOMEM);
 	f2fs_target_device(sbi, blkaddr, bio);
 	bio->bi_end_io = f2fs_read_end_io;
 	bio_set_op_attrs(bio, REQ_OP_READ,
-			 (f2fs_encrypted_inode(inode) ?
+			 (IS_ENCRYPTED(inode) ?
 			  REQ_NOENCRYPT :
 			  0));
 
@@ -646,8 +647,10 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
 							block_t blkaddr)
 {
-	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct bio *bio;
 
+	bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
 
@@ -662,8 +665,8 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
 		return -EFAULT;
 	}
 	ClearPageError(page);
-	inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
-	__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
+	inc_page_count(sbi, F2FS_RD_DATA);
+	__f2fs_submit_read_bio(sbi, bio, DATA);
 	return 0;
 }
 
@@ -791,6 +794,11 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
 
 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
 		dn.data_blkaddr = ei.blk + index - ei.fofs;
+		if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
+						DATA_GENERIC_ENHANCE_READ)) {
+			err = -EFAULT;
+			goto put_err;
+		}
 		goto got_it;
 	}
 
@@ -804,6 +812,13 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
 		err = -ENOENT;
 		goto put_err;
 	}
+	if (dn.data_blkaddr != NEW_ADDR &&
+			!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
+						dn.data_blkaddr,
+						DATA_GENERIC_ENHANCE)) {
+		err = -EFAULT;
+		goto put_err;
+	}
 got_it:
 	if (PageUptodate(page)) {
 		unlock_page(page);
@@ -1146,12 +1161,12 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
 
 	if (__is_valid_data_blkaddr(blkaddr) &&
-		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
+		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
 		err = -EFAULT;
 		goto sync_out;
 	}
 
-	if (is_valid_data_blkaddr(sbi, blkaddr)) {
+	if (__is_valid_data_blkaddr(blkaddr)) {
 		/* use out-place-update for driect IO under LFS mode */
 		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
 							map->m_may_create) {
@@ -1528,7 +1543,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 	}
 
 	if (size) {
-		if (f2fs_encrypted_inode(inode))
+		if (IS_ENCRYPTED(inode))
 			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
 
 		ret = fiemap_fill_next_extent(fieinfo, logical,
@@ -1561,6 +1576,130 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 	return ret;
 }
 
+static int f2fs_read_single_page(struct inode *inode, struct page *page,
+					unsigned nr_pages,
+					struct f2fs_map_blocks *map,
+					struct bio **bio_ret,
+					sector_t *last_block_in_bio,
+					bool is_readahead)
+{
+	struct bio *bio = *bio_ret;
+	const unsigned blkbits = inode->i_blkbits;
+	const unsigned blocksize = 1 << blkbits;
+	sector_t block_in_file;
+	sector_t last_block;
+	sector_t last_block_in_file;
+	sector_t block_nr;
+	bool bio_encrypted;
+	u64 dun;
+	int ret = 0;
+
+	block_in_file = (sector_t)page->index;
+	last_block = block_in_file + nr_pages;
+	last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
+							blkbits;
+	if (last_block > last_block_in_file)
+		last_block = last_block_in_file;
+
+	/* just zeroing out page which is beyond EOF */
+	if (block_in_file >= last_block)
+		goto zero_out;
+	/*
+	 * Map blocks using the previous result first.
+	 */
+	if ((map->m_flags & F2FS_MAP_MAPPED) &&
+			block_in_file > map->m_lblk &&
+			block_in_file < (map->m_lblk + map->m_len))
+		goto got_it;
+
+	/*
+	 * Then do more f2fs_map_blocks() calls until we are
+	 * done with this page.
+	 */
+	map->m_lblk = block_in_file;
+	map->m_len = last_block - block_in_file;
+
+	ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
+	if (ret)
+		goto out;
+got_it:
+	if ((map->m_flags & F2FS_MAP_MAPPED)) {
+		block_nr = map->m_pblk + block_in_file - map->m_lblk;
+		SetPageMappedToDisk(page);
+
+		if (!PageUptodate(page) && !cleancache_get_page(page)) {
+			SetPageUptodate(page);
+			goto confused;
+		}
+
+		if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
+						DATA_GENERIC_ENHANCE_READ)) {
+			ret = -EFAULT;
+			goto out;
+		}
+	} else {
+zero_out:
+		zero_user_segment(page, 0, PAGE_SIZE);
+		if (!PageUptodate(page))
+			SetPageUptodate(page);
+		unlock_page(page);
+		goto out;
+	}
+
+	/*
+	 * This page will go to BIO.  Do we need to send this
+	 * BIO off first?
+	 */
+	if (bio && (*last_block_in_bio != block_nr - 1 ||
+		!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
+submit_and_realloc:
+		__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
+		bio = NULL;
+	}
+
+	dun = PG_DUN(inode, page);
+	bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
+	if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted, 0)) {
+		__submit_bio(F2FS_I_SB(inode), bio, DATA);
+		bio = NULL;
+	}
+
+	if (bio == NULL) {
+		bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
+				is_readahead ? REQ_RAHEAD : 0);
+		if (IS_ERR(bio)) {
+			ret = PTR_ERR(bio);
+			bio = NULL;
+			goto out;
+		}
+		if (bio_encrypted)
+			fscrypt_set_ice_dun(inode, bio, dun);
+	}
+
+	/*
+	 * If the page is under writeback, we need to wait for
+	 * its completion to see the correct decrypted data.
+	 */
+	f2fs_wait_on_block_writeback(inode, block_nr);
+
+	if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+		goto submit_and_realloc;
+
+	inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
+	ClearPageError(page);
+	*last_block_in_bio = block_nr;
+	goto out;
+confused:
+	if (bio) {
+		__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
+		bio = NULL;
+	}
+	unlock_page(page);
+out:
+	*bio_ret = bio;
+	return ret;
+}
+
 /*
  * This function was originally taken from fs/mpage.c, and customized for f2fs.
  * Major change was from block_size == page_size in f2fs by default.
@@ -1577,15 +1716,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
 	struct bio *bio = NULL;
 	sector_t last_block_in_bio = 0;
 	struct inode *inode = mapping->host;
-	const unsigned blkbits = inode->i_blkbits;
-	const unsigned blocksize = 1 << blkbits;
-	sector_t block_in_file;
-	sector_t last_block;
-	sector_t last_block_in_file;
-	sector_t block_nr;
 	struct f2fs_map_blocks map;
-	bool bio_encrypted;
-	u64 dun;
+	int ret = 0;
 
 	map.m_pblk = 0;
 	map.m_lblk = 0;
@@ -1608,108 +1740,13 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
 				goto next_page;
 		}
 
-		block_in_file = (sector_t)page->index;
-		last_block = block_in_file + nr_pages;
-		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
-								blkbits;
-		if (last_block > last_block_in_file)
-			last_block = last_block_in_file;
-
-		/* just zeroing out page which is beyond EOF */
-		if (block_in_file >= last_block)
-			goto zero_out;
-		/*
-		 * Map blocks using the previous result first.
-		 */
-		if ((map.m_flags & F2FS_MAP_MAPPED) &&
-				block_in_file > map.m_lblk &&
-				block_in_file < (map.m_lblk + map.m_len))
-			goto got_it;
-
-		/*
-		 * Then do more f2fs_map_blocks() calls until we are
-		 * done with this page.
-		 */
-		map.m_lblk = block_in_file;
-		map.m_len = last_block - block_in_file;
-
-		if (f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT))
-			goto set_error_page;
-got_it:
-		if ((map.m_flags & F2FS_MAP_MAPPED)) {
-			block_nr = map.m_pblk + block_in_file - map.m_lblk;
-			SetPageMappedToDisk(page);
-
-			if (!PageUptodate(page) && !cleancache_get_page(page)) {
-				SetPageUptodate(page);
-				goto confused;
-			}
-
-			if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
-								DATA_GENERIC))
-				goto set_error_page;
-		} else {
-zero_out:
+		ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio,
+					&last_block_in_bio, is_readahead);
+		if (ret) {
+			SetPageError(page);
 			zero_user_segment(page, 0, PAGE_SIZE);
-			if (!PageUptodate(page))
-				SetPageUptodate(page);
 			unlock_page(page);
-			goto next_page;
 		}
-
-		/*
-		 * This page will go to BIO.  Do we need to send this
-		 * BIO off first?
-		 */
-		if (bio && (last_block_in_bio != block_nr - 1 ||
-			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
-submit_and_realloc:
-			__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
-			bio = NULL;
-		}
-
-		dun = PG_DUN(inode, page);
-		bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
-		if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted, 0)) {
-			__submit_bio(F2FS_I_SB(inode), bio, DATA);
-			bio = NULL;
-		}
-
-		if (bio == NULL) {
-			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
-					is_readahead ? REQ_RAHEAD : 0);
-			if (IS_ERR(bio)) {
-				bio = NULL;
-				goto set_error_page;
-			}
-			if (bio_encrypted)
-				fscrypt_set_ice_dun(inode, bio, dun);
-		}
-
-		/*
-		 * If the page is under writeback, we need to wait for
-		 * its completion to see the correct decrypted data.
-		 */
-		f2fs_wait_on_block_writeback(inode, block_nr);
-
-		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
-			goto submit_and_realloc;
-
-		inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
-		ClearPageError(page);
-		last_block_in_bio = block_nr;
-		goto next_page;
-set_error_page:
-		SetPageError(page);
-		zero_user_segment(page, 0, PAGE_SIZE);
-		unlock_page(page);
-		goto next_page;
-confused:
-		if (bio) {
-			__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
-			bio = NULL;
-		}
-		unlock_page(page);
 next_page:
 		if (pages)
 			put_page(page);
@@ -1717,7 +1754,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
 	BUG_ON(pages && !list_empty(pages));
 	if (bio)
 		__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
-	return 0;
+	return pages ? 0 : ret;
 }
 
 static int f2fs_read_data_page(struct file *file, struct page *page)
@@ -1813,7 +1850,7 @@ static inline bool check_inplace_update_policy(struct inode *inode,
 	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
 			fio && fio->op == REQ_OP_WRITE &&
 			!(fio->op_flags & REQ_SYNC) &&
-			!f2fs_encrypted_inode(inode))
+			!IS_ENCRYPTED(inode))
 		return true;
 
 	/* this is only set during fdatasync */
@@ -1890,7 +1927,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
 		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
 
 		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
-							DATA_GENERIC))
+						DATA_GENERIC_ENHANCE))
 			return -EFAULT;
 
 		ipu_force = true;
@@ -1917,7 +1954,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
 got_it:
 	if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
 		!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
-							DATA_GENERIC)) {
+						DATA_GENERIC_ENHANCE)) {
 		err = -EFAULT;
 		goto out_writepage;
 	}
@@ -1925,7 +1962,8 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
 	 * If current allocation needs SSR,
 	 * it had better in-place writes for updated data.
 	 */
-	if (ipu_force || (is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
+	if (ipu_force ||
+		(__is_valid_data_blkaddr(fio->old_blkaddr) &&
 					need_inplace_update(fio))) {
 		err = encrypt_one_page(fio);
 		if (err)
@@ -1943,9 +1981,10 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
 									true);
 			if (PageWriteback(page))
 				end_page_writeback(page);
+		} else {
+			set_inode_flag(inode, FI_UPDATE_WRITE);
 		}
 		trace_f2fs_do_write_data_page(fio->page, IPU);
-		set_inode_flag(inode, FI_UPDATE_WRITE);
 		return err;
 	}
 
@@ -2107,7 +2146,8 @@ static int __write_data_page(struct page *page, bool *submitted,
 	}
 
 	unlock_page(page);
-	if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode))
+	if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
+					!F2FS_I(inode)->cp_task)
 		f2fs_balance_fs(sbi, need_balance_fs);
 
 	if (unlikely(f2fs_cp_error(sbi))) {
@@ -2578,6 +2618,11 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
 		zero_user_segment(page, 0, PAGE_SIZE);
 		SetPageUptodate(page);
 	} else {
+		if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
+				DATA_GENERIC_ENHANCE_READ)) {
+			err = -EFAULT;
+			goto fail;
+		}
 		err = f2fs_submit_page_read(inode, page, blkaddr);
 		if (err)
 			goto fail;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index d3eafe9..6dd1e4a 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -385,7 +385,7 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
 		if (err)
 			goto put_error;
 
-		if ((f2fs_encrypted_inode(dir) || dummy_encrypt) &&
+		if ((IS_ENCRYPTED(dir) || dummy_encrypt) &&
 					f2fs_may_encrypt(inode)) {
 			err = fscrypt_inherit_context(dir, inode, page, false);
 			if (err)
@@ -399,7 +399,7 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
 
 	if (new_name) {
 		init_dent_inode(new_name, page);
-		if (f2fs_encrypted_inode(dir))
+		if (IS_ENCRYPTED(dir))
 			file_set_enc_name(inode);
 	}
 
@@ -824,7 +824,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
 			goto out;
 		}
 
-		if (f2fs_encrypted_inode(d->inode)) {
+		if (IS_ENCRYPTED(d->inode)) {
 			int save_len = fstr->len;
 
 			err = fscrypt_fname_disk_to_usr(d->inode,
@@ -867,7 +867,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
 	struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
 	int err = 0;
 
-	if (f2fs_encrypted_inode(inode)) {
+	if (IS_ENCRYPTED(inode)) {
 		err = fscrypt_get_encryption_info(inode);
 		if (err && err != -ENOKEY)
 			goto out;
@@ -929,7 +929,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
 
 static int f2fs_dir_open(struct inode *inode, struct file *filp)
 {
-	if (f2fs_encrypted_inode(inode))
+	if (IS_ENCRYPTED(inode))
 		return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
 	return 0;
 }
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 860a00af..b28baf4 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -24,7 +24,6 @@
 #include <linux/quotaops.h>
 #include <crypto/hash.h>
 
-#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION)
 #include <linux/fscrypt.h>
 
 #ifdef CONFIG_F2FS_CHECK_FS
@@ -211,7 +210,14 @@ enum {
 	META_SSA,
 	META_MAX,
 	META_POR,
-	DATA_GENERIC,
+	DATA_GENERIC,		/* check range only */
+	DATA_GENERIC_ENHANCE,	/* strong check on range and segment bitmap */
+	DATA_GENERIC_ENHANCE_READ,	/*
+					 * strong check on range and segment
+					 * bitmap but no warning due to race
+					 * condition of read on truncated area
+					 * by extent_cache
+					 */
 	META_GENERIC,
 };
 
@@ -1042,7 +1048,7 @@ struct f2fs_io_info {
 	bool submitted;		/* indicate IO submission */
 	int need_lock;		/* indicate we need to lock cp_rwsem */
 	bool in_list;		/* indicate fio is in io_list */
-	bool is_meta;		/* indicate borrow meta inode mapping or not */
+	bool is_por;		/* indicate IO is from recovery or not */
 	bool retry;		/* need to reallocate block address */
 	enum iostat_type io_type;	/* io type */
 	struct writeback_control *io_wbc; /* writeback control */
@@ -1069,8 +1075,8 @@ struct f2fs_dev_info {
 	block_t start_blk;
 	block_t end_blk;
 #ifdef CONFIG_BLK_DEV_ZONED
-	unsigned int nr_blkz;			/* Total number of zones */
-	u8 *blkz_type;				/* Array of zones type */
+	unsigned int nr_blkz;		/* Total number of zones */
+	unsigned long *blkz_seq;	/* Bitmap indicating sequential zones */
 #endif
 };
 
@@ -1141,7 +1147,7 @@ enum fsync_mode {
 	FSYNC_MODE_NOBARRIER,	/* fsync behaves nobarrier based on posix */
 };
 
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 #define DUMMY_ENCRYPTION_ENABLED(sbi) \
 			(unlikely(F2FS_OPTION(sbi).test_dummy_encryption))
 #else
@@ -1367,6 +1373,17 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
 }
 #endif
 
+/*
+ * Test if the mounted volume is a multi-device volume.
+ *   - For a single regular disk volume, sbi->s_ndevs is 0.
+ *   - For a single zoned disk volume, sbi->s_ndevs is 1.
+ *   - For a multi-device volume, sbi->s_ndevs is always 2 or more.
+ */
+static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
+{
+	return sbi->s_ndevs > 1;
+}
+
 /* For write statistics. Suppose sector size is 512 bytes,
  * and the return value is in kbytes. s is of struct f2fs_sb_info.
  */
@@ -1779,6 +1796,7 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
 	return -ENOSPC;
 }
 
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
 						struct inode *inode,
 						block_t count)
@@ -1787,13 +1805,21 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
 
 	spin_lock(&sbi->stat_lock);
 	f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
-	f2fs_bug_on(sbi, inode->i_blocks < sectors);
 	sbi->total_valid_block_count -= (block_t)count;
 	if (sbi->reserved_blocks &&
 		sbi->current_reserved_blocks < sbi->reserved_blocks)
 		sbi->current_reserved_blocks = min(sbi->reserved_blocks,
 					sbi->current_reserved_blocks + count);
 	spin_unlock(&sbi->stat_lock);
+	if (unlikely(inode->i_blocks < sectors)) {
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
+			inode->i_ino,
+			(unsigned long long)inode->i_blocks,
+			(unsigned long long)sectors);
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		return;
+	}
 	f2fs_i_blocks_write(inode, count, false, true);
 }
 
@@ -1891,7 +1917,11 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
 	if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
 		offset = (flag == SIT_BITMAP) ?
 			le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
-		return &ckpt->sit_nat_version_bitmap + offset;
+		/*
+		 * if large_nat_bitmap feature is enabled, leave checksum
+		 * protection for all nat/sit bitmaps.
+		 */
+		return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32);
 	}
 
 	if (__cp_payload(sbi) > 0) {
@@ -2010,7 +2040,6 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
 
 	f2fs_bug_on(sbi, !sbi->total_valid_block_count);
 	f2fs_bug_on(sbi, !sbi->total_valid_node_count);
-	f2fs_bug_on(sbi, !is_inode && !inode->i_blocks);
 
 	sbi->total_valid_node_count--;
 	sbi->total_valid_block_count--;
@@ -2020,10 +2049,19 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
 
 	spin_unlock(&sbi->stat_lock);
 
-	if (is_inode)
+	if (is_inode) {
 		dquot_free_inode(inode);
-	else
+	} else {
+		if (unlikely(inode->i_blocks == 0)) {
+			f2fs_msg(sbi->sb, KERN_WARNING,
+				"Inconsistent i_blocks, ino:%lu, iblocks:%llu",
+				inode->i_ino,
+				(unsigned long long)inode->i_blocks);
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			return;
+		}
 		f2fs_i_blocks_write(inode, 1, false, true);
+	}
 }
 
 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
@@ -2161,7 +2199,7 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
 		get_pages(sbi, F2FS_DIO_WRITE))
 		return false;
 
-	if (SM_I(sbi) && SM_I(sbi)->dcc_info &&
+	if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
 			atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
 		return false;
 
@@ -2547,7 +2585,14 @@ static inline int f2fs_has_inline_xattr(struct inode *inode)
 
 static inline unsigned int addrs_per_inode(struct inode *inode)
 {
-	return CUR_ADDRS_PER_INODE(inode) - get_inline_xattr_addrs(inode);
+	unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
+				get_inline_xattr_addrs(inode);
+	return ALIGN_DOWN(addrs, 1);
+}
+
+static inline unsigned int addrs_per_block(struct inode *inode)
+{
+	return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, 1);
 }
 
 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
@@ -2560,7 +2605,9 @@ static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
 
 static inline int inline_xattr_size(struct inode *inode)
 {
-	return get_inline_xattr_addrs(inode) * sizeof(__le32);
+	if (f2fs_has_inline_xattr(inode))
+		return get_inline_xattr_addrs(inode) * sizeof(__le32);
+	return 0;
 }
 
 static inline int f2fs_has_inline_data(struct inode *inode)
@@ -2802,12 +2849,10 @@ static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
 
 #define __is_large_section(sbi)		((sbi)->segs_per_sec > 1)
 
-#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META &&	\
-				(!is_read_io((fio)->op) || (fio)->is_meta))
+#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
 
 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
 					block_t blkaddr, int type);
-void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
 					block_t blkaddr, int type)
 {
@@ -2826,15 +2871,6 @@ static inline bool __is_valid_data_blkaddr(block_t blkaddr)
 	return true;
 }
 
-static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
-						block_t blkaddr)
-{
-	if (!__is_valid_data_blkaddr(blkaddr))
-		return false;
-	verify_blkaddr(sbi, blkaddr, DATA_GENERIC);
-	return true;
-}
-
 static inline void f2fs_set_page_private(struct page *page,
 						unsigned long data)
 {
@@ -3492,19 +3528,14 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
 /*
  * crypto support
  */
-static inline bool f2fs_encrypted_inode(struct inode *inode)
-{
-	return file_is_encrypt(inode);
-}
-
 static inline bool f2fs_encrypted_file(struct inode *inode)
 {
-	return f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode);
+	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
 }
 
 static inline void f2fs_set_encrypted_inode(struct inode *inode)
 {
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	file_set_encrypt(inode);
 	f2fs_set_inode_flags(inode);
 #endif
@@ -3516,7 +3547,8 @@ static inline void f2fs_set_encrypted_inode(struct inode *inode)
  */
 static inline bool f2fs_post_read_required(struct inode *inode)
 {
-	return f2fs_encrypted_file(inode);
+	return (f2fs_encrypted_file(inode)
+			&& !fscrypt_using_hardware_encryption(inode));
 }
 
 #define F2FS_FEATURE_FUNCS(name, flagname) \
@@ -3537,16 +3569,12 @@ F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
 
 #ifdef CONFIG_BLK_DEV_ZONED
-static inline int get_blkz_type(struct f2fs_sb_info *sbi,
-			struct block_device *bdev, block_t blkaddr)
+static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
+				    block_t blkaddr)
 {
 	unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
-	int i;
 
-	for (i = 0; i < sbi->s_ndevs; i++)
-		if (FDEV(i).bdev == bdev)
-			return FDEV(i).blkz_type[zno];
-	return -EINVAL;
+	return test_bit(zno, FDEV(devi).blkz_seq);
 }
 #endif
 
@@ -3555,9 +3583,23 @@ static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
 	return f2fs_sb_has_blkzoned(sbi);
 }
 
+static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
+{
+	return blk_queue_discard(bdev_get_queue(bdev)) ||
+	       bdev_is_zoned(bdev);
+}
+
 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
 {
-	return blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev));
+	int i;
+
+	if (!f2fs_is_multi_device(sbi))
+		return f2fs_bdev_support_discard(sbi->sb->s_bdev);
+
+	for (i = 0; i < sbi->s_ndevs; i++)
+		if (f2fs_bdev_support_discard(FDEV(i).bdev))
+			return true;
+	return false;
 }
 
 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
@@ -3566,6 +3608,20 @@ static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
 					f2fs_hw_should_discard(sbi);
 }
 
+static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
+{
+	int i;
+
+	if (!f2fs_is_multi_device(sbi))
+		return bdev_read_only(sbi->sb->s_bdev);
+
+	for (i = 0; i < sbi->s_ndevs; i++)
+		if (bdev_read_only(FDEV(i).bdev))
+			return true;
+	return false;
+}
+
+
 static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
 {
 	clear_opt(sbi, ADAPTIVE);
@@ -3583,7 +3639,7 @@ static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
 
 static inline bool f2fs_may_encrypt(struct inode *inode)
 {
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	umode_t mode = inode->i_mode;
 
 	return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
@@ -3619,13 +3675,9 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	int rw = iov_iter_rw(iter);
 
-	if ((f2fs_encrypted_file(inode)) &&
-		!fscrypt_using_hardware_encryption(inode))
-		return true;
-
 	if (f2fs_post_read_required(inode))
 		return true;
-	if (sbi->s_ndevs)
+	if (f2fs_is_multi_device(sbi))
 		return true;
 	/*
 	 * for blkzoned device, fallback direct IO to buffered IO, so
@@ -3672,4 +3724,4 @@ static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
 	return false;
 }
 
-#endif
+#endif /* _LINUX_F2FS_H */
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 012815d..45b45f3 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -39,6 +39,8 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
 	ret = filemap_fault(vmf);
 	up_read(&F2FS_I(inode)->i_mmap_sem);
 
+	trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
+
 	return ret;
 }
 
@@ -356,7 +358,7 @@ static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
 	switch (whence) {
 	case SEEK_DATA:
 		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
-			is_valid_data_blkaddr(sbi, blkaddr))
+			__is_valid_data_blkaddr(blkaddr))
 			return true;
 		break;
 	case SEEK_HOLE:
@@ -422,7 +424,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
 
 			if (__is_valid_data_blkaddr(blkaddr) &&
 				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
-						blkaddr, DATA_GENERIC)) {
+					blkaddr, DATA_GENERIC_ENHANCE)) {
 				f2fs_put_dnode(&dn);
 				goto fail;
 			}
@@ -523,7 +525,8 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 		f2fs_set_data_blkaddr(dn);
 
 		if (__is_valid_data_blkaddr(blkaddr) &&
-			!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+			!f2fs_is_valid_blkaddr(sbi, blkaddr,
+					DATA_GENERIC_ENHANCE))
 			continue;
 
 		f2fs_invalidate_blocks(sbi, blkaddr);
@@ -552,7 +555,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 
 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
 {
-	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
+	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
 }
 
 static int truncate_partial_data_page(struct inode *inode, u64 from,
@@ -582,7 +585,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
 	zero_user(page, offset, PAGE_SIZE - offset);
 
 	/* An encrypted inode should have a key and truncate the last page. */
-	f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode));
+	f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
 	if (!cache_only)
 		set_page_dirty(page);
 	f2fs_put_page(page, 1);
@@ -709,7 +712,7 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
 		stat->attributes |= STATX_ATTR_APPEND;
 	if (flags & F2FS_COMPR_FL)
 		stat->attributes |= STATX_ATTR_COMPRESSED;
-	if (f2fs_encrypted_inode(inode))
+	if (IS_ENCRYPTED(inode))
 		stat->attributes |= STATX_ATTR_ENCRYPTED;
 	if (flags & F2FS_IMMUTABLE_FL)
 		stat->attributes |= STATX_ATTR_IMMUTABLE;
@@ -1006,7 +1009,8 @@ static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
 	} else if (ret == -ENOENT) {
 		if (dn.max_level == 0)
 			return -ENOENT;
-		done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
+		done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - dn.ofs_in_node,
+									len);
 		blkaddr += done;
 		do_replace += done;
 		goto next;
@@ -1017,6 +1021,14 @@ static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
 	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
 		*blkaddr = datablock_addr(dn.inode,
 					dn.node_page, dn.ofs_in_node);
+
+		if (__is_valid_data_blkaddr(*blkaddr) &&
+			!f2fs_is_valid_blkaddr(sbi, *blkaddr,
+					DATA_GENERIC_ENHANCE)) {
+			f2fs_put_dnode(&dn);
+			return -EFAULT;
+		}
+
 		if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
 
 			if (test_opt(sbi, LFS)) {
@@ -1157,7 +1169,7 @@ static int __exchange_data_block(struct inode *src_inode,
 	int ret;
 
 	while (len) {
-		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
+		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
 
 		src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
 					array_size(olen, sizeof(block_t)),
@@ -1558,7 +1570,7 @@ static long f2fs_fallocate(struct file *file, int mode,
 	if (!S_ISREG(inode->i_mode))
 		return -EINVAL;
 
-	if (f2fs_encrypted_inode(inode) &&
+	if (IS_ENCRYPTED(inode) &&
 		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
 		return -EOPNOTSUPP;
 
@@ -1642,7 +1654,7 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
 	struct f2fs_inode_info *fi = F2FS_I(inode);
 	unsigned int flags = fi->i_flags;
 
-	if (f2fs_encrypted_inode(inode))
+	if (IS_ENCRYPTED(inode))
 		flags |= F2FS_ENCRYPT_FL;
 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
 		flags |= F2FS_INLINE_DATA_FL;
@@ -2416,7 +2428,7 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
 	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
 		return -EINVAL;
 
-	if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
+	if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
 		return -EOPNOTSUPP;
 
 	if (src == dst) {
@@ -2573,10 +2585,10 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
 							sizeof(range)))
 		return -EFAULT;
 
-	if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
+	if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
 			__is_large_section(sbi)) {
 		f2fs_msg(sbi->sb, KERN_WARNING,
-			"Can't flush %u in %d for segs_per_sec %u != 1\n",
+			"Can't flush %u in %d for segs_per_sec %u != 1",
 				range.dev_num, sbi->s_ndevs,
 				sbi->segs_per_sec);
 		return -EINVAL;
@@ -2858,7 +2870,7 @@ int f2fs_pin_file_control(struct inode *inode, bool inc)
 
 	if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
 		f2fs_msg(sbi->sb, KERN_WARNING,
-			"%s: Enable GC = ino %lx after %x GC trials\n",
+			"%s: Enable GC = ino %lx after %x GC trials",
 			__func__, inode->i_ino,
 			fi->i_gc_failures[GC_FAILURE_PIN]);
 		clear_inode_flag(inode, FI_PIN_FILE);
@@ -3035,15 +3047,21 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	struct inode *inode = file_inode(file);
 	ssize_t ret;
 
-	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
-		return -EIO;
+	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
+		ret = -EIO;
+		goto out;
+	}
 
-	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
-		return -EINVAL;
+	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) {
+		ret = -EINVAL;
+		goto out;
+	}
 
 	if (!inode_trylock(inode)) {
-		if (iocb->ki_flags & IOCB_NOWAIT)
-			return -EAGAIN;
+		if (iocb->ki_flags & IOCB_NOWAIT) {
+			ret = -EAGAIN;
+			goto out;
+		}
 		inode_lock(inode);
 	}
 
@@ -3056,19 +3074,16 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
 			set_inode_flag(inode, FI_NO_PREALLOC);
 
-		if ((iocb->ki_flags & IOCB_NOWAIT) &&
-			(iocb->ki_flags & IOCB_DIRECT)) {
-				if (!f2fs_overwrite_io(inode, iocb->ki_pos,
+		if ((iocb->ki_flags & IOCB_NOWAIT)) {
+			if (!f2fs_overwrite_io(inode, iocb->ki_pos,
 						iov_iter_count(from)) ||
-					f2fs_has_inline_data(inode) ||
-					f2fs_force_buffered_io(inode,
-							iocb, from)) {
-						clear_inode_flag(inode,
-								FI_NO_PREALLOC);
-						inode_unlock(inode);
-						return -EAGAIN;
-				}
-
+				f2fs_has_inline_data(inode) ||
+				f2fs_force_buffered_io(inode, iocb, from)) {
+				clear_inode_flag(inode, FI_NO_PREALLOC);
+				inode_unlock(inode);
+				ret = -EAGAIN;
+				goto out;
+			}
 		} else {
 			preallocated = true;
 			target_size = iocb->ki_pos + iov_iter_count(from);
@@ -3077,7 +3092,8 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 			if (err) {
 				clear_inode_flag(inode, FI_NO_PREALLOC);
 				inode_unlock(inode);
-				return err;
+				ret = err;
+				goto out;
 			}
 		}
 		ret = __generic_file_write_iter(iocb, from);
@@ -3091,7 +3107,9 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
 	}
 	inode_unlock(inode);
-
+out:
+	trace_f2fs_file_write_iter(inode, iocb->ki_pos,
+					iov_iter_count(from), ret);
 	if (ret > 0)
 		ret = generic_write_sync(iocb, ret);
 	return ret;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 195cf0f..963fb45 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -591,7 +591,7 @@ block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
 		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
 		bidx = node_ofs - 5 - dec;
 	}
-	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
+	return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
 }
 
 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
@@ -656,6 +656,11 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
 
 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
 		dn.data_blkaddr = ei.blk + index - ei.fofs;
+		if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
+						DATA_GENERIC_ENHANCE_READ))) {
+			err = -EFAULT;
+			goto put_page;
+		}
 		goto got_it;
 	}
 
@@ -665,8 +670,12 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
 		goto put_page;
 	f2fs_put_dnode(&dn);
 
+	if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
+		err = -ENOENT;
+		goto put_page;
+	}
 	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
-						DATA_GENERIC))) {
+						DATA_GENERIC_ENHANCE))) {
 		err = -EFAULT;
 		goto put_page;
 	}
@@ -1175,6 +1184,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
 				"type [%d, %d] in SSA and SIT",
 				segno, type, GET_SUM_TYPE((&sum->footer)));
 			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			f2fs_stop_checkpoint(sbi, false);
 			goto skip;
 		}
 
@@ -1346,7 +1356,7 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
 	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
 
 	/* give warm/cold data area from slower device */
-	if (sbi->s_ndevs && !__is_large_section(sbi))
+	if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
 		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
 				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
 }
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 8422a13..d67622c 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -438,6 +438,14 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
 	stat_dec_inline_dir(dir);
 	clear_inode_flag(dir, FI_INLINE_DENTRY);
 
+	/*
+	 * should retrieve reserved space which was used to keep
+	 * inline_dentry's structure for backward compatibility.
+	 */
+	if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
+			!f2fs_has_inline_xattr(dir))
+		F2FS_I(dir)->i_inline_xattr_size = 0;
+
 	f2fs_i_depth_write(dir, 1);
 	if (i_size_read(dir) < PAGE_SIZE)
 		f2fs_i_size_write(dir, PAGE_SIZE);
@@ -519,6 +527,15 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
 
 	stat_dec_inline_dir(dir);
 	clear_inode_flag(dir, FI_INLINE_DENTRY);
+
+	/*
+	 * should retrieve reserved space which was used to keep
+	 * inline_dentry's structure for backward compatibility.
+	 */
+	if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
+			!f2fs_has_inline_xattr(dir))
+		F2FS_I(dir)->i_inline_xattr_size = 0;
+
 	kvfree(backup_dentry);
 	return 0;
 recover:
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index d44e6de..ccb0222 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -44,7 +44,7 @@ void f2fs_set_inode_flags(struct inode *inode)
 		new_fl |= S_NOATIME;
 	if (flags & F2FS_DIRSYNC_FL)
 		new_fl |= S_DIRSYNC;
-	if (f2fs_encrypted_inode(inode))
+	if (file_is_encrypt(inode))
 		new_fl |= S_ENCRYPTED;
 	inode_set_flags(inode, new_fl,
 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
@@ -73,7 +73,7 @@ static int __written_first_block(struct f2fs_sb_info *sbi,
 
 	if (!__is_valid_data_blkaddr(addr))
 		return 1;
-	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
+	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
 		return -EFAULT;
 	return 0;
 }
@@ -177,8 +177,8 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
 
 	if (provided != calculated)
 		f2fs_msg(sbi->sb, KERN_WARNING,
-			"checksum invalid, ino = %x, %x vs. %x",
-			ino_of_node(page), provided, calculated);
+			"checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
+			page->index, ino_of_node(page), provided, calculated);
 
 	return provided == calculated;
 }
@@ -267,9 +267,10 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
 		struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
 
 		if (ei->len &&
-			(!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
+			(!f2fs_is_valid_blkaddr(sbi, ei->blk,
+						DATA_GENERIC_ENHANCE) ||
 			!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
-							DATA_GENERIC))) {
+						DATA_GENERIC_ENHANCE))) {
 			set_sbi_flag(sbi, SBI_NEED_FSCK);
 			f2fs_msg(sbi->sb, KERN_WARNING,
 				"%s: inode (ino=%lx) extent info [%u, %u, %u] "
@@ -468,7 +469,7 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
 		inode_nohighmem(inode);
 	} else if (S_ISLNK(inode->i_mode)) {
-		if (f2fs_encrypted_inode(inode))
+		if (file_is_encrypt(inode))
 			inode->i_op = &f2fs_encrypted_symlink_inode_operations;
 		else
 			inode->i_op = &f2fs_symlink_inode_operations;
@@ -488,6 +489,7 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
 	return inode;
 
 bad_inode:
+	f2fs_inode_synced(inode);
 	iget_failed(inode);
 	trace_f2fs_iget_exit(inode, ret);
 	return ERR_PTR(ret);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index f218d94..2fdcbe9 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -76,7 +76,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
 	set_inode_flag(inode, FI_NEW_INODE);
 
 	/* If the directory encrypted, then we should encrypt the inode. */
-	if ((f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
+	if ((IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
 				f2fs_may_encrypt(inode))
 		f2fs_set_encrypted_inode(inode);
 
@@ -143,7 +143,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
 	return ERR_PTR(err);
 }
 
-static int is_extension_exist(const unsigned char *s, const char *sub)
+static inline int is_extension_exist(const unsigned char *s, const char *sub)
 {
 	size_t slen = strlen(s);
 	size_t sublen = strlen(sub);
@@ -477,7 +477,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
 		if (err)
 			goto out_iput;
 	}
-	if (f2fs_encrypted_inode(dir) &&
+	if (IS_ENCRYPTED(dir) &&
 	    (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
 	    !fscrypt_has_permitted_context(dir, inode)) {
 		f2fs_msg(inode->i_sb, KERN_WARNING,
@@ -804,7 +804,7 @@ static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
 	if (unlikely(f2fs_cp_error(sbi)))
 		return -EIO;
 
-	if (f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) {
+	if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) {
 		int err = fscrypt_get_encryption_info(dir);
 		if (err)
 			return err;
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 78baf13..56fe5b97 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -454,7 +454,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
 			new_blkaddr == NULL_ADDR);
 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
 			new_blkaddr == NEW_ADDR);
-	f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
+	f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
 			new_blkaddr == NEW_ADDR);
 
 	/* increment version no as node is removed */
@@ -465,7 +465,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
 
 	/* change address */
 	nat_set_blkaddr(e, new_blkaddr);
-	if (!is_valid_data_blkaddr(sbi, new_blkaddr))
+	if (!__is_valid_data_blkaddr(new_blkaddr))
 		set_nat_flag(e, IS_CHECKPOINTED, false);
 	__set_nat_cache_dirty(nm_i, e);
 
@@ -526,6 +526,7 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
 	struct f2fs_nat_entry ne;
 	struct nat_entry *e;
 	pgoff_t index;
+	block_t blkaddr;
 	int i;
 
 	ni->nid = nid;
@@ -569,6 +570,11 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
 	node_info_from_raw_nat(ni, &ne);
 	f2fs_put_page(page, 1);
 cache:
+	blkaddr = le32_to_cpu(ne.block_addr);
+	if (__is_valid_data_blkaddr(blkaddr) &&
+		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
+		return -EFAULT;
+
 	/* cache nat entry */
 	cache_nat_entry(sbi, nid, &ne);
 	return 0;
@@ -600,9 +606,9 @@ static void f2fs_ra_node_pages(struct page *parent, int start, int n)
 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
 {
 	const long direct_index = ADDRS_PER_INODE(dn->inode);
-	const long direct_blks = ADDRS_PER_BLOCK;
-	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
-	unsigned int skipped_unit = ADDRS_PER_BLOCK;
+	const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
+	const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
+	unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
 	int cur_level = dn->cur_level;
 	int max_level = dn->max_level;
 	pgoff_t base = 0;
@@ -636,9 +642,9 @@ static int get_node_path(struct inode *inode, long block,
 				int offset[4], unsigned int noffset[4])
 {
 	const long direct_index = ADDRS_PER_INODE(inode);
-	const long direct_blks = ADDRS_PER_BLOCK;
+	const long direct_blks = ADDRS_PER_BLOCK(inode);
 	const long dptrs_per_blk = NIDS_PER_BLOCK;
-	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+	const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
 	int n = 0;
 	int level = 0;
@@ -1179,8 +1185,14 @@ int f2fs_remove_inode_page(struct inode *inode)
 		f2fs_put_dnode(&dn);
 		return -EIO;
 	}
-	f2fs_bug_on(F2FS_I_SB(inode),
-			inode->i_blocks != 0 && inode->i_blocks != 8);
+
+	if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
+		f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
+			"Inconsistent i_blocks, ino:%lu, iblocks:%llu",
+			inode->i_ino,
+			(unsigned long long)inode->i_blocks);
+		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+	}
 
 	/* will put inode & node pages */
 	err = truncate_node(&dn);
@@ -1275,9 +1287,10 @@ static int read_node_page(struct page *page, int op_flags)
 	int err;
 
 	if (PageUptodate(page)) {
-#ifdef CONFIG_F2FS_CHECK_FS
-		f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
-#endif
+		if (!f2fs_inode_chksum_verify(sbi, page)) {
+			ClearPageUptodate(page);
+			return -EBADMSG;
+		}
 		return LOCKED_PAGE;
 	}
 
@@ -1543,7 +1556,8 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
 	}
 
 	if (__is_valid_data_blkaddr(ni.blk_addr) &&
-		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC)) {
+		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
+					DATA_GENERIC_ENHANCE)) {
 		up_read(&sbi->node_write);
 		goto redirty_out;
 	}
@@ -2078,6 +2092,9 @@ static bool add_free_nid(struct f2fs_sb_info *sbi,
 	if (unlikely(nid == 0))
 		return false;
 
+	if (unlikely(f2fs_check_nid_range(sbi, nid)))
+		return false;
+
 	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
 	i->nid = nid;
 	i->state = FREE_NID;
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index e3883db..e04f82b 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -325,8 +325,10 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
 			break;
 		}
 
-		if (!is_recoverable_dnode(page))
+		if (!is_recoverable_dnode(page)) {
+			f2fs_put_page(page, 1);
 			break;
+		}
 
 		if (!is_fsync_dnode(page))
 			goto next;
@@ -338,8 +340,10 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
 			if (!check_only &&
 					IS_INODE(page) && is_dent_dnode(page)) {
 				err = f2fs_recover_inode_page(sbi, page);
-				if (err)
+				if (err) {
+					f2fs_put_page(page, 1);
 					break;
+				}
 				quota_inode = true;
 			}
 
@@ -355,6 +359,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
 					err = 0;
 					goto next;
 				}
+				f2fs_put_page(page, 1);
 				break;
 			}
 		}
@@ -370,6 +375,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
 				"%s: detect looped node chain, "
 				"blkaddr:%u, next:%u",
 				__func__, blkaddr, next_blkaddr_of_node(page));
+			f2fs_put_page(page, 1);
 			err = -EINVAL;
 			break;
 		}
@@ -380,7 +386,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
 
 		f2fs_ra_meta_pages_cond(sbi, blkaddr);
 	}
-	f2fs_put_page(page, 1);
 	return err;
 }
 
@@ -546,7 +551,15 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
 		goto err;
 
 	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
-	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
+
+	if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
+			inode->i_ino, ofs_of_node(dn.node_page),
+			ofs_of_node(page));
+		err = -EFAULT;
+		goto err;
+	}
 
 	for (; start < end; start++, dn.ofs_in_node++) {
 		block_t src, dest;
@@ -554,6 +567,18 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
 		src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
 		dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
 
+		if (__is_valid_data_blkaddr(src) &&
+			!f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
+			err = -EFAULT;
+			goto err;
+		}
+
+		if (__is_valid_data_blkaddr(dest) &&
+			!f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
+			err = -EFAULT;
+			goto err;
+		}
+
 		/* skip recovering if dest is the same as src */
 		if (src == dest)
 			continue;
@@ -666,8 +691,10 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
 		 */
 		if (IS_INODE(page)) {
 			err = recover_inode(entry->inode, page);
-			if (err)
+			if (err) {
+				f2fs_put_page(page, 1);
 				break;
+			}
 		}
 		if (entry->last_dentry == blkaddr) {
 			err = recover_dentry(entry->inode, page, dir_list);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index aa7fe79..8dee063 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -580,7 +580,7 @@ static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
 	int ret = 0;
 	int i;
 
-	if (!sbi->s_ndevs)
+	if (!f2fs_is_multi_device(sbi))
 		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
 
 	for (i = 0; i < sbi->s_ndevs; i++) {
@@ -648,7 +648,8 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
 		return ret;
 	}
 
-	if (atomic_inc_return(&fcc->queued_flush) == 1 || sbi->s_ndevs > 1) {
+	if (atomic_inc_return(&fcc->queued_flush) == 1 ||
+	    f2fs_is_multi_device(sbi)) {
 		ret = submit_flush_wait(sbi, ino);
 		atomic_dec(&fcc->queued_flush);
 
@@ -754,7 +755,7 @@ int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
 {
 	int ret = 0, i;
 
-	if (!sbi->s_ndevs)
+	if (!f2fs_is_multi_device(sbi))
 		return 0;
 
 	for (i = 1; i < sbi->s_ndevs; i++) {
@@ -1367,9 +1368,12 @@ static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
 {
 	block_t lblkstart = blkstart;
 
+	if (!f2fs_bdev_support_discard(bdev))
+		return 0;
+
 	trace_f2fs_queue_discard(bdev, blkstart, blklen);
 
-	if (sbi->s_ndevs) {
+	if (f2fs_is_multi_device(sbi)) {
 		int devi = f2fs_target_device_index(sbi, blkstart);
 
 		blkstart -= FDEV(devi).start_blk;
@@ -1732,42 +1736,36 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
 	block_t lblkstart = blkstart;
 	int devi = 0;
 
-	if (sbi->s_ndevs) {
+	if (f2fs_is_multi_device(sbi)) {
 		devi = f2fs_target_device_index(sbi, blkstart);
+		if (blkstart < FDEV(devi).start_blk ||
+		    blkstart > FDEV(devi).end_blk) {
+			f2fs_msg(sbi->sb, KERN_ERR, "Invalid block %x",
+				 blkstart);
+			return -EIO;
+		}
 		blkstart -= FDEV(devi).start_blk;
 	}
 
-	/*
-	 * We need to know the type of the zone: for conventional zones,
-	 * use regular discard if the drive supports it. For sequential
-	 * zones, reset the zone write pointer.
-	 */
-	switch (get_blkz_type(sbi, bdev, blkstart)) {
-
-	case BLK_ZONE_TYPE_CONVENTIONAL:
-		if (!blk_queue_discard(bdev_get_queue(bdev)))
-			return 0;
-		return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
-	case BLK_ZONE_TYPE_SEQWRITE_REQ:
-	case BLK_ZONE_TYPE_SEQWRITE_PREF:
+	/* For sequential zones, reset the zone write pointer */
+	if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
 		sector = SECTOR_FROM_BLOCK(blkstart);
 		nr_sects = SECTOR_FROM_BLOCK(blklen);
 
 		if (sector & (bdev_zone_sectors(bdev) - 1) ||
 				nr_sects != bdev_zone_sectors(bdev)) {
-			f2fs_msg(sbi->sb, KERN_INFO,
-				"(%d) %s: Unaligned discard attempted (block %x + %x)",
+			f2fs_msg(sbi->sb, KERN_ERR,
+				"(%d) %s: Unaligned zone reset attempted (block %x + %x)",
 				devi, sbi->s_ndevs ? FDEV(devi).path: "",
 				blkstart, blklen);
 			return -EIO;
 		}
 		trace_f2fs_issue_reset_zone(bdev, blkstart);
-		return blkdev_reset_zones(bdev, sector,
-					  nr_sects, GFP_NOFS);
-	default:
-		/* Unknown zone type: broken device ? */
-		return -EIO;
+		return blkdev_reset_zones(bdev, sector, nr_sects, GFP_NOFS);
 	}
+
+	/* For conventional zones, use regular discard if supported */
+	return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
 }
 #endif
 
@@ -1775,8 +1773,7 @@ static int __issue_discard_async(struct f2fs_sb_info *sbi,
 		struct block_device *bdev, block_t blkstart, block_t blklen)
 {
 #ifdef CONFIG_BLK_DEV_ZONED
-	if (f2fs_sb_has_blkzoned(sbi) &&
-				bdev_zoned_model(bdev) != BLK_ZONED_NONE)
+	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
 #endif
 	return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
@@ -2172,8 +2169,11 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
 			 * before, we must track that to know how much space we
 			 * really have.
 			 */
-			if (f2fs_test_bit(offset, se->ckpt_valid_map))
+			if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
+				spin_lock(&sbi->stat_lock);
 				sbi->unusable_block_count++;
+				spin_unlock(&sbi->stat_lock);
+			}
 		}
 
 		if (f2fs_test_and_clear_bit(offset, se->discard_map))
@@ -2220,7 +2220,7 @@ bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
 	struct seg_entry *se;
 	bool is_cp = false;
 
-	if (!is_valid_data_blkaddr(sbi, blkaddr))
+	if (!__is_valid_data_blkaddr(blkaddr))
 		return true;
 
 	down_read(&sit_i->sentry_lock);
@@ -3089,7 +3089,7 @@ static void update_device_state(struct f2fs_io_info *fio)
 	struct f2fs_sb_info *sbi = fio->sbi;
 	unsigned int devidx;
 
-	if (!sbi->s_ndevs)
+	if (!f2fs_is_multi_device(sbi))
 		return;
 
 	devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
@@ -3187,13 +3187,18 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
 {
 	int err;
 	struct f2fs_sb_info *sbi = fio->sbi;
+	unsigned int segno;
 
 	fio->new_blkaddr = fio->old_blkaddr;
 	/* i/o temperature is needed for passing down write hints */
 	__get_segment_type(fio);
 
-	f2fs_bug_on(sbi, !IS_DATASEG(get_seg_entry(sbi,
-			GET_SEGNO(sbi, fio->new_blkaddr))->type));
+	segno = GET_SEGNO(sbi, fio->new_blkaddr);
+
+	if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		return -EFAULT;
+	}
 
 	stat_inc_inplace_blocks(fio->sbi);
 
@@ -3336,7 +3341,7 @@ void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
 	if (!f2fs_post_read_required(inode))
 		return;
 
-	if (!is_valid_data_blkaddr(sbi, blkaddr))
+	if (!__is_valid_data_blkaddr(blkaddr))
 		return;
 
 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 5c7ed04..429007b 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -82,7 +82,7 @@
 	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
 
 #define GET_SEGNO(sbi, blk_addr)					\
-	((!is_valid_data_blkaddr(sbi, blk_addr)) ?			\
+	((!__is_valid_data_blkaddr(blk_addr)) ?			\
 	NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),			\
 		GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
 #define BLKS_PER_SEC(sbi)					\
@@ -656,14 +656,15 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
 	f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
 }
 
-static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
+static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
 {
 	struct f2fs_sb_info *sbi = fio->sbi;
 
-	if (__is_meta_io(fio))
-		verify_blkaddr(sbi, blk_addr, META_GENERIC);
-	else
-		verify_blkaddr(sbi, blk_addr, DATA_GENERIC);
+	if (__is_valid_data_blkaddr(fio->old_blkaddr))
+		verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ?
+					META_GENERIC : DATA_GENERIC);
+	verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ?
+					META_GENERIC : DATA_GENERIC_ENHANCE);
 }
 
 /*
@@ -672,7 +673,6 @@ static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
 static inline int check_block_count(struct f2fs_sb_info *sbi,
 		int segno, struct f2fs_sit_entry *raw_sit)
 {
-#ifdef CONFIG_F2FS_CHECK_FS
 	bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
 	int valid_blocks = 0;
 	int cur_pos = 0, next_pos;
@@ -699,7 +699,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
 		set_sbi_flag(sbi, SBI_NEED_FSCK);
 		return -EINVAL;
 	}
-#endif
+
 	/* check segment usage, and check boundary of a given segment number */
 	if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
 					|| segno > TOTAL_SEGS(sbi) - 1)) {
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 8737c5c..ac24cff6 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -757,7 +757,7 @@ static int parse_options(struct super_block *sb, char *options)
 			kvfree(name);
 			break;
 		case Opt_test_dummy_encryption:
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 			if (!f2fs_sb_has_encrypt(sbi)) {
 				f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
 				return -EINVAL;
@@ -1024,7 +1024,7 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
 	for (i = 0; i < sbi->s_ndevs; i++) {
 		blkdev_put(FDEV(i).bdev, FMODE_EXCL);
 #ifdef CONFIG_BLK_DEV_ZONED
-		kvfree(FDEV(i).blkz_type);
+		kvfree(FDEV(i).blkz_seq);
 #endif
 	}
 	kvfree(sbi->devs);
@@ -1244,10 +1244,13 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
 	buf->f_blocks = total_count - start_count;
 	buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
 						sbi->current_reserved_blocks;
+
+	spin_lock(&sbi->stat_lock);
 	if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
 		buf->f_bfree = 0;
 	else
 		buf->f_bfree -= sbi->unusable_block_count;
+	spin_unlock(&sbi->stat_lock);
 
 	if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
 		buf->f_bavail = buf->f_bfree -
@@ -1418,7 +1421,7 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
 		seq_printf(seq, ",whint_mode=%s", "user-based");
 	else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
 		seq_printf(seq, ",whint_mode=%s", "fs-based");
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	if (F2FS_OPTION(sbi).test_dummy_encryption)
 		seq_puts(seq, ",test_dummy_encryption");
 #endif
@@ -1522,9 +1525,15 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
 	mutex_lock(&sbi->gc_mutex);
 	cpc.reason = CP_PAUSE;
 	set_sbi_flag(sbi, SBI_CP_DISABLED);
-	f2fs_write_checkpoint(sbi, &cpc);
+	err = f2fs_write_checkpoint(sbi, &cpc);
+	if (err)
+		goto out_unlock;
 
+	spin_lock(&sbi->stat_lock);
 	sbi->unusable_block_count = 0;
+	spin_unlock(&sbi->stat_lock);
+
+out_unlock:
 	mutex_unlock(&sbi->gc_mutex);
 restore_flag:
 	sbi->sb->s_flags = s_flags;	/* Restore MS_RDONLY status */
@@ -2204,7 +2213,7 @@ static const struct super_operations f2fs_sops = {
 	.remount_fs	= f2fs_remount,
 };
 
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
 {
 	return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
@@ -2301,7 +2310,7 @@ static const struct export_operations f2fs_export_ops = {
 static loff_t max_file_blocks(void)
 {
 	loff_t result = 0;
-	loff_t leaf_count = ADDRS_PER_BLOCK;
+	loff_t leaf_count = DEF_ADDRS_PER_BLOCK;
 
 	/*
 	 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
@@ -2479,7 +2488,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 	/* Currently, support only 4KB page cache size */
 	if (F2FS_BLKSIZE != PAGE_SIZE) {
 		f2fs_msg(sb, KERN_INFO,
-			"Invalid page_cache_size (%lu), supports only 4KB\n",
+			"Invalid page_cache_size (%lu), supports only 4KB",
 			PAGE_SIZE);
 		return 1;
 	}
@@ -2488,7 +2497,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 	blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
 	if (blocksize != F2FS_BLKSIZE) {
 		f2fs_msg(sb, KERN_INFO,
-			"Invalid blocksize (%u), supports only 4KB\n",
+			"Invalid blocksize (%u), supports only 4KB",
 			blocksize);
 		return 1;
 	}
@@ -2496,7 +2505,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 	/* check log blocks per segment */
 	if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
 		f2fs_msg(sb, KERN_INFO,
-			"Invalid log blocks per segment (%u)\n",
+			"Invalid log blocks per segment (%u)",
 			le32_to_cpu(raw_super->log_blocks_per_seg));
 		return 1;
 	}
@@ -2617,7 +2626,8 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
 	unsigned int log_blocks_per_seg;
 	unsigned int segment_count_main;
 	unsigned int cp_pack_start_sum, cp_payload;
-	block_t user_block_count;
+	block_t user_block_count, valid_user_blocks;
+	block_t avail_node_count, valid_node_count;
 	int i, j;
 
 	total = le32_to_cpu(raw_super->segment_count);
@@ -2652,6 +2662,24 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
 		return 1;
 	}
 
+	valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
+	if (valid_user_blocks > user_block_count) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+			"Wrong valid_user_blocks: %u, user_block_count: %u",
+			valid_user_blocks, user_block_count);
+		return 1;
+	}
+
+	valid_node_count = le32_to_cpu(ckpt->valid_node_count);
+	avail_node_count = sbi->total_node_count - sbi->nquota_files -
+						F2FS_RESERVED_NODE_NUM;
+	if (valid_node_count > avail_node_count) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+			"Wrong valid_node_count: %u, avail_node_count: %u",
+			valid_node_count, avail_node_count);
+		return 1;
+	}
+
 	main_segs = le32_to_cpu(raw_super->segment_count_main);
 	blocks_per_seg = sbi->blocks_per_seg;
 
@@ -2823,9 +2851,11 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
 	if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
 		FDEV(devi).nr_blkz++;
 
-	FDEV(devi).blkz_type = f2fs_kmalloc(sbi, FDEV(devi).nr_blkz,
-								GFP_KERNEL);
-	if (!FDEV(devi).blkz_type)
+	FDEV(devi).blkz_seq = f2fs_kzalloc(sbi,
+					BITS_TO_LONGS(FDEV(devi).nr_blkz)
+					* sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!FDEV(devi).blkz_seq)
 		return -ENOMEM;
 
 #define F2FS_REPORT_NR_ZONES   4096
@@ -2852,7 +2882,8 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
 		}
 
 		for (i = 0; i < nr_zones; i++) {
-			FDEV(devi).blkz_type[n] = zones[i].type;
+			if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
+				set_bit(n, FDEV(devi).blkz_seq);
 			sector += zones[i].len;
 			n++;
 		}
@@ -3135,7 +3166,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 #ifndef CONFIG_BLK_DEV_ZONED
 	if (f2fs_sb_has_blkzoned(sbi)) {
 		f2fs_msg(sb, KERN_ERR,
-			 "Zoned block device support is not enabled\n");
+			 "Zoned block device support is not enabled");
 		err = -EOPNOTSUPP;
 		goto free_sb_buf;
 	}
@@ -3159,10 +3190,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 
 #ifdef CONFIG_QUOTA
 	sb->dq_op = &f2fs_quota_operations;
-	if (f2fs_sb_has_quota_ino(sbi))
-		sb->s_qcop = &dquot_quotactl_sysfile_ops;
-	else
-		sb->s_qcop = &f2fs_quotactl_ops;
+	sb->s_qcop = &f2fs_quotactl_ops;
 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
 
 	if (f2fs_sb_has_quota_ino(sbi)) {
@@ -3174,7 +3202,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 #endif
 
 	sb->s_op = &f2fs_sops;
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	sb->s_cop = &f2fs_cryptops;
 #endif
 	sb->s_xattr = f2fs_xattr_handlers;
@@ -3380,10 +3408,17 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 		 * mount should be failed, when device has readonly mode, and
 		 * previous checkpoint was not done by clean system shutdown.
 		 */
-		if (bdev_read_only(sb->s_bdev) &&
-				!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
-			err = -EROFS;
-			goto free_meta;
+		if (f2fs_hw_is_readonly(sbi)) {
+			if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+				err = -EROFS;
+				f2fs_msg(sb, KERN_ERR,
+					"Need to recover fsync data, but "
+					"write access unavailable");
+				goto free_meta;
+			}
+			f2fs_msg(sbi->sb, KERN_INFO, "write access "
+				"unavailable, skipping recovery");
+			goto reset_checkpoint;
 		}
 
 		if (need_fsck)
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 18c627e..729f46a 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -441,7 +441,7 @@ F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
 F2FS_GENERAL_RO_ATTR(features);
 F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
 
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
 #endif
 #ifdef CONFIG_BLK_DEV_ZONED
@@ -503,7 +503,7 @@ static struct attribute *f2fs_attrs[] = {
 };
 
 static struct attribute *f2fs_feat_attrs[] = {
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	ATTR_LIST(encryption),
 #endif
 #ifdef CONFIG_BLK_DEV_ZONED
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 848a785..e791741 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -202,12 +202,17 @@ static inline const struct xattr_handler *f2fs_xattr_handler(int index)
 	return handler;
 }
 
-static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index,
-					size_t len, const char *name)
+static struct f2fs_xattr_entry *__find_xattr(void *base_addr,
+				void *last_base_addr, int index,
+				size_t len, const char *name)
 {
 	struct f2fs_xattr_entry *entry;
 
 	list_for_each_xattr(entry, base_addr) {
+		if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
+			(void *)XATTR_NEXT_ENTRY(entry) > last_base_addr)
+			return NULL;
+
 		if (entry->e_name_index != index)
 			continue;
 		if (entry->e_name_len != len)
@@ -297,20 +302,22 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
 				const char *name, struct f2fs_xattr_entry **xe,
 				void **base_addr, int *base_size)
 {
-	void *cur_addr, *txattr_addr, *last_addr = NULL;
+	void *cur_addr, *txattr_addr, *last_txattr_addr;
+	void *last_addr = NULL;
 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
-	unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0;
 	unsigned int inline_size = inline_xattr_size(inode);
 	int err = 0;
 
-	if (!size && !inline_size)
+	if (!xnid && !inline_size)
 		return -ENODATA;
 
-	*base_size = inline_size + size + XATTR_PADDING_SIZE;
+	*base_size = XATTR_SIZE(xnid, inode) + XATTR_PADDING_SIZE;
 	txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
 	if (!txattr_addr)
 		return -ENOMEM;
 
+	last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(xnid, inode);
+
 	/* read from inline xattr */
 	if (inline_size) {
 		err = read_inline_xattr(inode, ipage, txattr_addr);
@@ -337,7 +344,11 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
 	else
 		cur_addr = txattr_addr;
 
-	*xe = __find_xattr(cur_addr, index, len, name);
+	*xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
+	if (!*xe) {
+		err = -EFAULT;
+		goto out;
+	}
 check:
 	if (IS_XATTR_LAST_ENTRY(*xe)) {
 		err = -ENODATA;
@@ -581,7 +592,8 @@ static int __f2fs_setxattr(struct inode *inode, int index,
 			struct page *ipage, int flags)
 {
 	struct f2fs_xattr_entry *here, *last;
-	void *base_addr;
+	void *base_addr, *last_base_addr;
+	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
 	int found, newsize;
 	size_t len;
 	__u32 new_hsize;
@@ -605,8 +617,14 @@ static int __f2fs_setxattr(struct inode *inode, int index,
 	if (error)
 		return error;
 
+	last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
+
 	/* find entry with wanted name. */
-	here = __find_xattr(base_addr, index, len, name);
+	here = __find_xattr(base_addr, last_base_addr, index, len, name);
+	if (!here) {
+		error = -EFAULT;
+		goto exit;
+	}
 
 	found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1;
 
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index 9172ee0..a90920e 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -71,6 +71,8 @@ struct f2fs_xattr_entry {
 				entry = XATTR_NEXT_ENTRY(entry))
 #define VALID_XATTR_BLOCK_SIZE	(PAGE_SIZE - sizeof(struct node_footer))
 #define XATTR_PADDING_SIZE	(sizeof(__u32))
+#define XATTR_SIZE(x,i)		(((x) ? VALID_XATTR_BLOCK_SIZE : 0) +	\
+						(inline_xattr_size(i)))
 #define MIN_OFFSET(i)		XATTR_ALIGN(inline_xattr_size(i) +	\
 						VALID_XATTR_BLOCK_SIZE)
 
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 82ce6d4..9544e2f 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -530,8 +530,6 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
 
 	isw->inode = inode;
 
-	atomic_inc(&isw_nr_in_flight);
-
 	/*
 	 * In addition to synchronizing among switchers, I_WB_SWITCH tells
 	 * the RCU protected stat update paths to grab the i_page
@@ -539,6 +537,9 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
 	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 	 */
 	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
+
+	atomic_inc(&isw_nr_in_flight);
+
 	goto out_unlock;
 
 out_free:
@@ -908,7 +909,11 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
 void cgroup_writeback_umount(void)
 {
 	if (atomic_read(&isw_nr_in_flight)) {
-		synchronize_rcu();
+		/*
+		 * Use rcu_barrier() to wait for all pending callbacks to
+		 * ensure that all in-flight wb switches are in the workqueue.
+		 */
+		rcu_barrier();
 		flush_workqueue(isw_wq);
 	}
 }
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 414c534..8c6a82d 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1996,10 +1996,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
 		rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
 
 	ret = -EINVAL;
-	if (rem < len) {
-		pipe_unlock(pipe);
-		goto out;
-	}
+	if (rem < len)
+		goto out_free;
 
 	rem = len;
 	while (rem) {
@@ -2017,7 +2015,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
 			pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
 			pipe->nrbufs--;
 		} else {
-			pipe_buf_get(pipe, ibuf);
+			if (!pipe_buf_get(pipe, ibuf))
+				goto out_free;
+
 			*obuf = *ibuf;
 			obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
 			obuf->len = rem;
@@ -2040,11 +2040,11 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
 	ret = fuse_dev_do_write(fud, &cs, len);
 
 	pipe_lock(pipe);
+out_free:
 	for (idx = 0; idx < nbuf; idx++)
 		pipe_buf_release(pipe, &bufs[idx]);
 	pipe_unlock(pipe);
 
-out:
 	kvfree(bufs);
 	return ret;
 }
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index baa8fbc..f1108a5 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -179,7 +179,9 @@ void fuse_finish_open(struct inode *inode, struct file *file)
 		file->f_op = &fuse_direct_io_file_operations;
 	if (!(ff->open_flags & FOPEN_KEEP_CACHE))
 		invalidate_inode_pages2(inode->i_mapping);
-	if (ff->open_flags & FOPEN_NONSEEKABLE)
+	if (ff->open_flags & FOPEN_STREAM)
+		stream_open(inode, file);
+	else if (ff->open_flags & FOPEN_NONSEEKABLE)
 		nonseekable_open(inode, file);
 	if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
 		struct fuse_inode *fi = get_fuse_inode(inode);
@@ -1526,7 +1528,7 @@ __acquires(fc->lock)
 {
 	struct fuse_conn *fc = get_fuse_conn(inode);
 	struct fuse_inode *fi = get_fuse_inode(inode);
-	size_t crop = i_size_read(inode);
+	loff_t crop = i_size_read(inode);
 	struct fuse_req *req;
 
 	while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
@@ -2975,6 +2977,13 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
 		}
 	}
 
+	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+	    offset + length > i_size_read(inode)) {
+		err = inode_newsize_ok(inode, offset + length);
+		if (err)
+			return err;
+	}
+
 	if (!(mode & FALLOC_FL_KEEP_SIZE))
 		set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9d566e6..ccdd8c8 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -140,6 +140,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
 {
 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 
+	BUG_ON(atomic_read(&gl->gl_revokes));
 	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
 	smp_mb();
 	wake_up_glock(gl);
@@ -183,15 +184,19 @@ static int demote_ok(const struct gfs2_glock *gl)
 
 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
 {
+	if (!(gl->gl_ops->go_flags & GLOF_LRU))
+		return;
+
 	spin_lock(&lru_lock);
 
-	if (!list_empty(&gl->gl_lru))
-		list_del_init(&gl->gl_lru);
-	else
-		atomic_inc(&lru_count);
-
+	list_del(&gl->gl_lru);
 	list_add_tail(&gl->gl_lru, &lru_list);
-	set_bit(GLF_LRU, &gl->gl_flags);
+
+	if (!test_bit(GLF_LRU, &gl->gl_flags)) {
+		set_bit(GLF_LRU, &gl->gl_flags);
+		atomic_inc(&lru_count);
+	}
+
 	spin_unlock(&lru_lock);
 }
 
@@ -201,7 +206,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
 		return;
 
 	spin_lock(&lru_lock);
-	if (!list_empty(&gl->gl_lru)) {
+	if (test_bit(GLF_LRU, &gl->gl_flags)) {
 		list_del_init(&gl->gl_lru);
 		atomic_dec(&lru_count);
 		clear_bit(GLF_LRU, &gl->gl_flags);
@@ -1158,8 +1163,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
 		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
 			fast_path = 1;
 	}
-	if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
-	    (glops->go_flags & GLOF_LRU))
+	if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
 		gfs2_glock_add_to_lru(gl);
 
 	trace_gfs2_glock_queue(gh, 0);
@@ -1455,6 +1459,7 @@ __acquires(&lru_lock)
 		if (!spin_trylock(&gl->gl_lockref.lock)) {
 add_back_to_lru:
 			list_add(&gl->gl_lru, &lru_list);
+			set_bit(GLF_LRU, &gl->gl_flags);
 			atomic_inc(&lru_count);
 			continue;
 		}
@@ -1462,7 +1467,6 @@ __acquires(&lru_lock)
 			spin_unlock(&gl->gl_lockref.lock);
 			goto add_back_to_lru;
 		}
-		clear_bit(GLF_LRU, &gl->gl_flags);
 		gl->gl_lockref.count++;
 		if (demote_ok(gl))
 			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1497,6 +1501,7 @@ static long gfs2_scan_glock_lru(int nr)
 		if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
 			list_move(&gl->gl_lru, &dispose);
 			atomic_dec(&lru_count);
+			clear_bit(GLF_LRU, &gl->gl_flags);
 			freed++;
 			continue;
 		}
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index ac7caa2..62edf8f 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -31,9 +31,10 @@
  * @delta is the difference between the current rtt sample and the
  * running average srtt. We add 1/8 of that to the srtt in order to
  * update the current srtt estimate. The variance estimate is a bit
- * more complicated. We subtract the abs value of the @delta from
- * the current variance estimate and add 1/4 of that to the running
- * total.
+ * more complicated. We subtract the current variance estimate from
+ * the abs value of the @delta and add 1/4 of that to the running
+ * total.  That's equivalent to 3/4 of the current variance
+ * estimate plus 1/4 of the abs of @delta.
  *
  * Note that the index points at the array entry containing the smoothed
  * mean value, and the variance is always in the following entry
@@ -49,7 +50,7 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
 	s64 delta = sample - s->stats[index];
 	s->stats[index] += (delta >> 3);
 	index++;
-	s->stats[index] += ((abs(delta) - s->stats[index]) >> 2);
+	s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2;
 }
 
 /**
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index ee20ea42..cd85092 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -604,7 +604,8 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
 	bd->bd_bh = NULL;
 	bd->bd_ops = &gfs2_revoke_lops;
 	sdp->sd_log_num_revoke++;
-	atomic_inc(&gl->gl_revokes);
+	if (atomic_inc_return(&gl->gl_revokes) == 1)
+		gfs2_glock_hold(gl);
 	set_bit(GLF_LFLUSH, &gl->gl_flags);
 	list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
 }
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index f2567f9..8f99b39 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -662,8 +662,10 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
 		list_del_init(&bd->bd_list);
 		gl = bd->bd_gl;
-		atomic_dec(&gl->gl_revokes);
-		clear_bit(GLF_LFLUSH, &gl->gl_flags);
+		if (atomic_dec_return(&gl->gl_revokes) == 0) {
+			clear_bit(GLF_LFLUSH, &gl->gl_flags);
+			gfs2_glock_queue_put(gl);
+		}
 		kmem_cache_free(gfs2_bufdata_cachep, bd);
 	}
 }
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index b0eef00..5c12fed8 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -426,9 +426,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
 			u32 hash;
 
 			index = page->index;
-			hash = hugetlb_fault_mutex_hash(h, current->mm,
-							&pseudo_vma,
-							mapping, index, 0);
+			hash = hugetlb_fault_mutex_hash(h, mapping, index, 0);
 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
 			/*
@@ -625,8 +623,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
 		addr = index * hpage_size;
 
 		/* mutex taken here, fault path and hole punch */
-		hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
-						index, addr);
+		hash = hugetlb_fault_mutex_hash(h, mapping, index, addr);
 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
 		/* See if already present in mapping to avoid alloc/free */
@@ -741,11 +738,17 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
 					umode_t mode, dev_t dev)
 {
 	struct inode *inode;
-	struct resv_map *resv_map;
+	struct resv_map *resv_map = NULL;
 
-	resv_map = resv_map_alloc();
-	if (!resv_map)
-		return NULL;
+	/*
+	 * Reserve maps are only needed for inodes that can have associated
+	 * page allocations.
+	 */
+	if (S_ISREG(mode) || S_ISLNK(mode)) {
+		resv_map = resv_map_alloc();
+		if (!resv_map)
+			return NULL;
+	}
 
 	inode = new_inode(sb);
 	if (inode) {
@@ -780,8 +783,10 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
 			break;
 		}
 		lockdep_annotate_inode_mutex_key(inode);
-	} else
-		kref_put(&resv_map->refs, resv_map_release);
+	} else {
+		if (resv_map)
+			kref_put(&resv_map->refs, resv_map_release);
+	}
 
 	return inode;
 }
diff --git a/fs/internal.h b/fs/internal.h
index 0e276fd..8d0ca52 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -78,9 +78,7 @@ extern int sb_prepare_remount_readonly(struct super_block *);
 
 extern void __init mnt_init(void);
 
-extern int __mnt_want_write(struct vfsmount *);
 extern int __mnt_want_write_file(struct file *);
-extern void __mnt_drop_write(struct vfsmount *);
 extern void __mnt_drop_write_file(struct file *);
 
 /*
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 88f2a49..e9cf88f 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1366,6 +1366,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
 	journal_superblock_t *sb = journal->j_superblock;
 	int ret;
 
+	/* Buffer got discarded which means block device got invalidated */
+	if (!buffer_mapped(bh))
+		return -EIO;
+
 	trace_jbd2_write_superblock(journal, write_flags);
 	if (!(journal->j_flags & JBD2_BARRIER))
 		write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
@@ -2385,22 +2389,19 @@ static struct kmem_cache *jbd2_journal_head_cache;
 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
 #endif
 
-static int jbd2_journal_init_journal_head_cache(void)
+static int __init jbd2_journal_init_journal_head_cache(void)
 {
-	int retval;
-
-	J_ASSERT(jbd2_journal_head_cache == NULL);
+	J_ASSERT(!jbd2_journal_head_cache);
 	jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head",
 				sizeof(struct journal_head),
 				0,		/* offset */
 				SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU,
 				NULL);		/* ctor */
-	retval = 0;
 	if (!jbd2_journal_head_cache) {
-		retval = -ENOMEM;
 		printk(KERN_EMERG "JBD2: no memory for journal_head cache\n");
+		return -ENOMEM;
 	}
-	return retval;
+	return 0;
 }
 
 static void jbd2_journal_destroy_journal_head_cache(void)
@@ -2646,28 +2647,38 @@ static void __exit jbd2_remove_jbd_stats_proc_entry(void)
 
 struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache;
 
-static int __init jbd2_journal_init_handle_cache(void)
+static int __init jbd2_journal_init_inode_cache(void)
 {
-	jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY);
-	if (jbd2_handle_cache == NULL) {
-		printk(KERN_EMERG "JBD2: failed to create handle cache\n");
-		return -ENOMEM;
-	}
+	J_ASSERT(!jbd2_inode_cache);
 	jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0);
-	if (jbd2_inode_cache == NULL) {
-		printk(KERN_EMERG "JBD2: failed to create inode cache\n");
-		kmem_cache_destroy(jbd2_handle_cache);
+	if (!jbd2_inode_cache) {
+		pr_emerg("JBD2: failed to create inode cache\n");
 		return -ENOMEM;
 	}
 	return 0;
 }
 
+static int __init jbd2_journal_init_handle_cache(void)
+{
+	J_ASSERT(!jbd2_handle_cache);
+	jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY);
+	if (!jbd2_handle_cache) {
+		printk(KERN_EMERG "JBD2: failed to create handle cache\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void jbd2_journal_destroy_inode_cache(void)
+{
+	kmem_cache_destroy(jbd2_inode_cache);
+	jbd2_inode_cache = NULL;
+}
+
 static void jbd2_journal_destroy_handle_cache(void)
 {
 	kmem_cache_destroy(jbd2_handle_cache);
 	jbd2_handle_cache = NULL;
-	kmem_cache_destroy(jbd2_inode_cache);
-	jbd2_inode_cache = NULL;
 }
 
 /*
@@ -2678,21 +2689,27 @@ static int __init journal_init_caches(void)
 {
 	int ret;
 
-	ret = jbd2_journal_init_revoke_caches();
+	ret = jbd2_journal_init_revoke_record_cache();
+	if (ret == 0)
+		ret = jbd2_journal_init_revoke_table_cache();
 	if (ret == 0)
 		ret = jbd2_journal_init_journal_head_cache();
 	if (ret == 0)
 		ret = jbd2_journal_init_handle_cache();
 	if (ret == 0)
+		ret = jbd2_journal_init_inode_cache();
+	if (ret == 0)
 		ret = jbd2_journal_init_transaction_cache();
 	return ret;
 }
 
 static void jbd2_journal_destroy_caches(void)
 {
-	jbd2_journal_destroy_revoke_caches();
+	jbd2_journal_destroy_revoke_record_cache();
+	jbd2_journal_destroy_revoke_table_cache();
 	jbd2_journal_destroy_journal_head_cache();
 	jbd2_journal_destroy_handle_cache();
+	jbd2_journal_destroy_inode_cache();
 	jbd2_journal_destroy_transaction_cache();
 	jbd2_journal_destroy_slabs();
 }
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index a1143e5..69b9bc3 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -178,33 +178,41 @@ static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
 	return NULL;
 }
 
-void jbd2_journal_destroy_revoke_caches(void)
+void jbd2_journal_destroy_revoke_record_cache(void)
 {
 	kmem_cache_destroy(jbd2_revoke_record_cache);
 	jbd2_revoke_record_cache = NULL;
+}
+
+void jbd2_journal_destroy_revoke_table_cache(void)
+{
 	kmem_cache_destroy(jbd2_revoke_table_cache);
 	jbd2_revoke_table_cache = NULL;
 }
 
-int __init jbd2_journal_init_revoke_caches(void)
+int __init jbd2_journal_init_revoke_record_cache(void)
 {
 	J_ASSERT(!jbd2_revoke_record_cache);
-	J_ASSERT(!jbd2_revoke_table_cache);
-
 	jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s,
 					SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY);
-	if (!jbd2_revoke_record_cache)
-		goto record_cache_failure;
 
+	if (!jbd2_revoke_record_cache) {
+		pr_emerg("JBD2: failed to create revoke_record cache\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+int __init jbd2_journal_init_revoke_table_cache(void)
+{
+	J_ASSERT(!jbd2_revoke_table_cache);
 	jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s,
 					     SLAB_TEMPORARY);
-	if (!jbd2_revoke_table_cache)
-		goto table_cache_failure;
-	return 0;
-table_cache_failure:
-	jbd2_journal_destroy_revoke_caches();
-record_cache_failure:
+	if (!jbd2_revoke_table_cache) {
+		pr_emerg("JBD2: failed to create revoke_table cache\n");
 		return -ENOMEM;
+	}
+	return 0;
 }
 
 static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 914e725..e20a670 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -42,9 +42,11 @@ int __init jbd2_journal_init_transaction_cache(void)
 					0,
 					SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
 					NULL);
-	if (transaction_cache)
-		return 0;
-	return -ENOMEM;
+	if (!transaction_cache) {
+		pr_emerg("JBD2: failed to create transaction cache\n");
+		return -ENOMEM;
+	}
+	return 0;
 }
 
 void jbd2_journal_destroy_transaction_cache(void)
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 7d8654a..f8fb89b 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -109,9 +109,9 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
 	return ret;
 }
 
-int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg)
+int jffs2_do_readpage_unlock(void *data, struct page *pg)
 {
-	int ret = jffs2_do_readpage_nolock(inode, pg);
+	int ret = jffs2_do_readpage_nolock(data, pg);
 	unlock_page(pg);
 	return ret;
 }
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index eab04ec..7fbe8a7 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -686,7 +686,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
 	struct page *pg;
 
 	pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT,
-			     (void *)jffs2_do_readpage_unlock, inode);
+			     jffs2_do_readpage_unlock, inode);
 	if (IS_ERR(pg))
 		return (void *)pg;
 
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index a2dbbb3..bd3d5f0d 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -155,7 +155,7 @@ extern const struct file_operations jffs2_file_operations;
 extern const struct inode_operations jffs2_file_inode_operations;
 extern const struct address_space_operations jffs2_file_address_operations;
 int jffs2_fsync(struct file *, loff_t, loff_t, int);
-int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg);
+int jffs2_do_readpage_unlock(void *data, struct page *pg);
 
 /* ioctl.c */
 long jffs2_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 389ea53..bccfc40b 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
 
 	jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
 
-	if (f->target) {
-		kfree(f->target);
-		f->target = NULL;
-	}
-
 	fds = f->dents;
 	while(fds) {
 		fd = fds;
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index bb6ae38..05d892c 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
 static void jffs2_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+
+	kfree(f->target);
+	kmem_cache_free(jffs2_inode_cachep, f);
 }
 
 static void jffs2_destroy_inode(struct inode *inode)
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 4ca0b5c..853a69e 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -650,11 +650,10 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
 	kn->id.generation = gen;
 
 	/*
-	 * set ino first. This barrier is paired with atomic_inc_not_zero in
+	 * set ino first. This RELEASE is paired with atomic_inc_not_zero in
 	 * kernfs_find_and_get_node_by_ino
 	 */
-	smp_mb__before_atomic();
-	atomic_set(&kn->count, 1);
+	atomic_set_release(&kn->count, 1);
 	atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
 	RB_CLEAR_NODE(&kn->rb);
 
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 96d5f81..c092661 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -290,6 +290,7 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
 	struct nfs_client *clp;
 	const struct sockaddr *sap = data->addr;
 	struct nfs_net *nn = net_generic(data->net, nfs_net_id);
+	int error;
 
 again:
 	list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
@@ -302,9 +303,11 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
 		if (clp->cl_cons_state > NFS_CS_READY) {
 			refcount_inc(&clp->cl_count);
 			spin_unlock(&nn->nfs_client_lock);
-			nfs_wait_client_init_complete(clp);
+			error = nfs_wait_client_init_complete(clp);
 			nfs_put_client(clp);
 			spin_lock(&nn->nfs_client_lock);
+			if (error < 0)
+				return ERR_PTR(error);
 			goto again;
 		}
 
@@ -413,6 +416,8 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
 		clp = nfs_match_client(cl_init);
 		if (clp) {
 			spin_unlock(&nn->nfs_client_lock);
+			if (IS_ERR(clp))
+				return clp;
 			if (new)
 				new->rpc_ops->free_client(new);
 			return nfs_found_client(cl_init, clp);
@@ -459,7 +464,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
 	case XPRT_TRANSPORT_RDMA:
 		if (retrans == NFS_UNSPEC_RETRANS)
 			to->to_retries = NFS_DEF_TCP_RETRANS;
-		if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0)
+		if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
 			to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
 		if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
 			to->to_initval = NFS_MAX_TCP_TIMEOUT;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index e337a16..a4ea9ab 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -664,9 +664,9 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
  * We only need to convert from xdr once so future lookups are much simpler
  */
 static
-int nfs_readdir_filler(struct file *file, struct page* page)
+int nfs_readdir_filler(void *data, struct page* page)
 {
-	nfs_readdir_descriptor_t *desc = (nfs_readdir_descriptor_t *)file;
+	nfs_readdir_descriptor_t *desc = data;
 	struct inode	*inode = file_inode(desc->file);
 	int ret;
 
@@ -698,8 +698,8 @@ void cache_page_release(nfs_readdir_descriptor_t *desc)
 static
 struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
 {
-	return read_cache_page(desc->file->f_mapping,
-			desc->page_index, nfs_readdir_filler, desc);
+	return read_cache_page(desc->file->f_mapping, desc->page_index,
+			nfs_readdir_filler, desc);
 }
 
 /*
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index d175724..2478a69 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -904,7 +904,7 @@ fl_pnfs_update_layout(struct inode *ino,
 	status = filelayout_check_deviceid(lo, fl, gfp_flags);
 	if (status) {
 		pnfs_put_lseg(lseg);
-		lseg = ERR_PTR(status);
+		lseg = NULL;
 	}
 out:
 	return lseg;
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index fed06fd..94f98e1 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -329,9 +329,6 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
 	};
 	ssize_t err, err2;
 
-	if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY))
-		return -EOPNOTSUPP;
-
 	src_lock = nfs_get_lock_context(nfs_file_open_context(src));
 	if (IS_ERR(src_lock))
 		return PTR_ERR(src_lock);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 4288a6e..1348585 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -133,15 +133,11 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
 				    struct file *file_out, loff_t pos_out,
 				    size_t count, unsigned int flags)
 {
-	ssize_t ret;
-
+	if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY))
+		return -EOPNOTSUPP;
 	if (file_inode(file_in) == file_inode(file_out))
-		return -EINVAL;
-retry:
-	ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
-	if (ret == -EAGAIN)
-		goto retry;
-	return ret;
+		return -EOPNOTSUPP;
+	return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
 }
 
 static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index d2f645d..3ba2087 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -159,6 +159,10 @@ int nfs40_discover_server_trunking(struct nfs_client *clp,
 		/* Sustain the lease, even if it's empty.  If the clientid4
 		 * goes stale it's of no use for trunking discovery. */
 		nfs4_schedule_state_renewal(*result);
+
+		/* If the client state need to recover, do it. */
+		if (clp->cl_state)
+			nfs4_schedule_state_manager(clp);
 	}
 out:
 	return status;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 6b666d1..6df9b85 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2052,7 +2052,8 @@ static int nfs23_validate_mount_data(void *options,
 		memcpy(sap, &data->addr, sizeof(data->addr));
 		args->nfs_server.addrlen = sizeof(data->addr);
 		args->nfs_server.port = ntohs(data->addr.sin_port);
-		if (!nfs_verify_server_address(sap))
+		if (sap->sa_family != AF_INET ||
+		    !nfs_verify_server_address(sap))
 			goto out_no_address;
 
 		if (!(data->flags & NFS_MOUNT_TCP))
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
index 220d5ba..25ba299 100644
--- a/fs/nfs/symlink.c
+++ b/fs/nfs/symlink.c
@@ -26,9 +26,9 @@
  * and straight-forward than readdir caching.
  */
 
-static int nfs_symlink_filler(struct file *file, struct page *page)
+static int nfs_symlink_filler(void *data, struct page *page)
 {
-	struct inode *inode = (struct inode *)file;
+	struct inode *inode = data;
 	int error;
 
 	error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE);
@@ -66,8 +66,8 @@ static const char *nfs_get_link(struct dentry *dentry,
 		err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping));
 		if (err)
 			return err;
-		page = read_cache_page(&inode->i_data, 0,
-					nfs_symlink_filler, inode);
+		page = read_cache_page(&inode->i_data, 0, nfs_symlink_filler,
+				inode);
 		if (IS_ERR(page))
 			return ERR_CAST(page);
 	}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 601bf33..ebbb028 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -926,8 +926,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
 	cb->cb_seq_status = 1;
 	cb->cb_status = 0;
 	if (minorversion) {
-		if (!nfsd41_cb_get_slot(clp, task))
+		if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
 			return;
+		cb->cb_holds_slot = true;
 	}
 	rpc_call_start(task);
 }
@@ -954,6 +955,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
 		return true;
 	}
 
+	if (!cb->cb_holds_slot)
+		goto need_restart;
+
 	switch (cb->cb_seq_status) {
 	case 0:
 		/*
@@ -992,6 +996,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
 			cb->cb_seq_status);
 	}
 
+	cb->cb_holds_slot = false;
 	clear_bit(0, &clp->cl_cb_slot_busy);
 	rpc_wake_up_next(&clp->cl_cb_waitq);
 	dprintk("%s: freed slot, new seqid=%d\n", __func__,
@@ -1199,6 +1204,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
 	cb->cb_seq_status = 1;
 	cb->cb_status = 0;
 	cb->cb_need_restart = false;
+	cb->cb_holds_slot = false;
 }
 
 void nfsd4_run_cb(struct nfsd4_callback *cb)
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 0b15dac..0f07ad6 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -70,6 +70,7 @@ struct nfsd4_callback {
 	int cb_seq_status;
 	int cb_status;
 	bool cb_need_restart;
+	bool cb_holds_slot;
 };
 
 struct nfsd4_callback_ops {
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 60702d6..30d150a4 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -85,13 +85,12 @@ static void *__ns_get_path(struct path *path, struct ns_common *ns)
 	inode->i_fop = &ns_file_operations;
 	inode->i_private = ns;
 
-	dentry = d_alloc_pseudo(mnt->mnt_sb, &empty_name);
+	dentry = d_alloc_anon(mnt->mnt_sb);
 	if (!dentry) {
 		iput(inode);
 		return ERR_PTR(-ENOMEM);
 	}
 	d_instantiate(dentry, inode);
-	dentry->d_flags |= DCACHE_RCUACCESS;
 	dentry->d_fsdata = (void *)ns->ops;
 	d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
 	if (d) {
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 4bf8d58..af2888d 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -148,16 +148,24 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
 	u64 blkno;
 	struct dentry *parent;
 	struct inode *dir = d_inode(child);
+	int set;
 
 	trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
 			       (unsigned long long)OCFS2_I(dir)->ip_blkno);
 
+	status = ocfs2_nfs_sync_lock(OCFS2_SB(dir->i_sb), 1);
+	if (status < 0) {
+		mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status);
+		parent = ERR_PTR(status);
+		goto bail;
+	}
+
 	status = ocfs2_inode_lock(dir, NULL, 0);
 	if (status < 0) {
 		if (status != -ENOENT)
 			mlog_errno(status);
 		parent = ERR_PTR(status);
-		goto bail;
+		goto unlock_nfs_sync;
 	}
 
 	status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno);
@@ -166,11 +174,31 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
 		goto bail_unlock;
 	}
 
+	status = ocfs2_test_inode_bit(OCFS2_SB(dir->i_sb), blkno, &set);
+	if (status < 0) {
+		if (status == -EINVAL) {
+			status = -ESTALE;
+		} else
+			mlog(ML_ERROR, "test inode bit failed %d\n", status);
+		parent = ERR_PTR(status);
+		goto bail_unlock;
+	}
+
+	trace_ocfs2_get_dentry_test_bit(status, set);
+	if (!set) {
+		status = -ESTALE;
+		parent = ERR_PTR(status);
+		goto bail_unlock;
+	}
+
 	parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
 
 bail_unlock:
 	ocfs2_inode_unlock(dir, 0);
 
+unlock_nfs_sync:
+	ocfs2_nfs_sync_unlock(OCFS2_SB(dir->i_sb), 1);
+
 bail:
 	trace_ocfs2_get_parent_end(parent);
 
diff --git a/fs/open.c b/fs/open.c
index 6c94ea0..4dbbacc 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -1227,3 +1227,21 @@ int nonseekable_open(struct inode *inode, struct file *filp)
 }
 
 EXPORT_SYMBOL(nonseekable_open);
+
+/*
+ * stream_open is used by subsystems that want stream-like file descriptors.
+ * Such file descriptors are not seekable and don't have notion of position
+ * (file.f_pos is always 0). Contrary to file descriptors of other regular
+ * files, .read() and .write() can run simultaneously.
+ *
+ * stream_open never fails and is marked to return int so that it could be
+ * directly used as file_operations.open .
+ */
+int stream_open(struct inode *inode, struct file *filp)
+{
+	filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS);
+	filp->f_mode |= FMODE_STREAM;
+	return 0;
+}
+
+EXPORT_SYMBOL(stream_open);
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index c4a9892..076fd2d 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -878,14 +878,14 @@ static bool ovl_open_need_copy_up(struct dentry *dentry, int flags)
 	return true;
 }
 
-int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags)
+int ovl_maybe_copy_up(struct dentry *dentry, int flags)
 {
 	int err = 0;
 
-	if (ovl_open_need_copy_up(dentry, file_flags)) {
+	if (ovl_open_need_copy_up(dentry, flags)) {
 		err = ovl_want_write(dentry);
 		if (!err) {
-			err = ovl_copy_up_flags(dentry, file_flags);
+			err = ovl_copy_up_flags(dentry, flags);
 			ovl_drop_write(dentry);
 		}
 	}
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 2e4af5f..d0a8920 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -260,7 +260,7 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
 		 * hashed directory inode aliases.
 		 */
 		inode = ovl_get_inode(dentry->d_sb, &oip);
-		if (WARN_ON(IS_ERR(inode)))
+		if (IS_ERR(inode))
 			return PTR_ERR(inode);
 	} else {
 		WARN_ON(ovl_inode_real(inode) != d_inode(newdentry));
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index da7d785..179bcb7 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -116,11 +116,10 @@ static int ovl_real_fdget(const struct file *file, struct fd *real)
 
 static int ovl_open(struct inode *inode, struct file *file)
 {
-	struct dentry *dentry = file_dentry(file);
 	struct file *realfile;
 	int err;
 
-	err = ovl_open_maybe_copy_up(dentry, file->f_flags);
+	err = ovl_maybe_copy_up(file_dentry(file), file->f_flags);
 	if (err)
 		return err;
 
@@ -390,7 +389,7 @@ static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		if (ret)
 			return ret;
 
-		ret = ovl_copy_up_with_data(file_dentry(file));
+		ret = ovl_maybe_copy_up(file_dentry(file), O_WRONLY);
 		if (!ret) {
 			ret = ovl_real_ioctl(file, cmd, arg);
 
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index b3c6126..97ed72a 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -832,7 +832,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
 	int fsid = bylower ? oip->lowerpath->layer->fsid : 0;
 	bool is_dir, metacopy = false;
 	unsigned long ino = 0;
-	int err = -ENOMEM;
+	int err = oip->newinode ? -EEXIST : -ENOMEM;
 
 	if (!realinode)
 		realinode = d_inode(lowerdentry);
@@ -917,6 +917,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
 	return inode;
 
 out_err:
+	pr_warn_ratelimited("overlayfs: failed to get inode (%i)\n", err);
 	inode = ERR_PTR(err);
 	goto out;
 }
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 56e3c7a..82283bd 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -412,7 +412,7 @@ extern const struct file_operations ovl_file_operations;
 int ovl_copy_up(struct dentry *dentry);
 int ovl_copy_up_with_data(struct dentry *dentry);
 int ovl_copy_up_flags(struct dentry *dentry, int flags);
-int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
+int ovl_maybe_copy_up(struct dentry *dentry, int flags);
 int ovl_copy_xattr(struct dentry *old, struct dentry *new);
 int ovl_set_attr(struct dentry *upper, struct kstat *stat);
 struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper);
diff --git a/fs/pipe.c b/fs/pipe.c
index c51750e..2a297bc 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -189,9 +189,9 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
  *	in the tee() system call, when we duplicate the buffers in one
  *	pipe into another.
  */
-void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
+bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
 {
-	get_page(buf->page);
+	return try_get_page(buf->page);
 }
 EXPORT_SYMBOL(generic_pipe_buf_get);
 
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8b145d8..b011d70 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2741,6 +2741,11 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
 		rcu_read_unlock();
 		return -EACCES;
 	}
+	/* Prevent changes to overridden credentials. */
+	if (current_cred() != current_real_cred()) {
+		rcu_read_unlock();
+		return -EBUSY;
+	}
 	rcu_read_unlock();
 
 	if (count > PAGE_SIZE)
@@ -2948,6 +2953,116 @@ static int proc_tgid_io_accounting(struct seq_file *m, struct pid_namespace *ns,
 }
 #endif /* CONFIG_TASK_IO_ACCOUNTING */
 
+static ssize_t proc_sched_task_boost_read(struct file *file,
+			   char __user *buf, size_t count, loff_t *ppos)
+{
+	struct task_struct *task = get_proc_task(file_inode(file));
+	char buffer[PROC_NUMBUF];
+	int sched_boost;
+	size_t len;
+
+	if (!task)
+		return -ESRCH;
+	sched_boost = task->boost;
+	put_task_struct(task);
+	len = snprintf(buffer, sizeof(buffer), "%d\n", sched_boost);
+	return simple_read_from_buffer(buf, count, ppos, buffer, len);
+}
+
+static ssize_t proc_sched_task_boost_write(struct file *file,
+		   const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct task_struct *task = get_proc_task(file_inode(file));
+	char buffer[PROC_NUMBUF];
+	int sched_boost;
+	int err;
+
+	if (!task)
+		return -ESRCH;
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtoint(strstrip(buffer), 0, &sched_boost);
+	if (err)
+		goto out;
+	if (sched_boost < 0 || sched_boost > 2) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	task->boost = sched_boost;
+	if (sched_boost == 0)
+		task->boost_period = 0;
+out:
+	put_task_struct(task);
+	return err < 0 ? err : count;
+}
+
+static ssize_t proc_sched_task_boost_period_read(struct file *file,
+			   char __user *buf, size_t count, loff_t *ppos)
+{
+	struct task_struct *task = get_proc_task(file_inode(file));
+	char buffer[PROC_NUMBUF];
+	u64 sched_boost_period_ms = 0;
+	size_t len;
+
+	if (!task)
+		return -ESRCH;
+	sched_boost_period_ms = div64_ul(task->boost_period, 1000000UL);
+	put_task_struct(task);
+	len = snprintf(buffer, sizeof(buffer), "%llu\n", sched_boost_period_ms);
+	return simple_read_from_buffer(buf, count, ppos, buffer, len);
+}
+
+static ssize_t proc_sched_task_boost_period_write(struct file *file,
+		   const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct task_struct *task = get_proc_task(file_inode(file));
+	char buffer[PROC_NUMBUF];
+	unsigned int sched_boost_period;
+	int err;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtouint(strstrip(buffer), 0, &sched_boost_period);
+	if (err)
+		goto out;
+	if (task->boost == 0 && sched_boost_period) {
+		/* setting boost period without boost is invalid */
+		err = -EINVAL;
+		goto out;
+	}
+
+	task->boost_period = (u64)sched_boost_period * 1000 * 1000;
+	task->boost_expires = sched_clock() + task->boost_period;
+out:
+	put_task_struct(task);
+	return err < 0 ? err : count;
+}
+
+static const struct file_operations proc_task_boost_enabled_operations = {
+	.read       = proc_sched_task_boost_read,
+	.write      = proc_sched_task_boost_write,
+	.llseek     = generic_file_llseek,
+};
+
+static const struct file_operations proc_task_boost_period_operations = {
+	.read		= proc_sched_task_boost_period_read,
+	.write		= proc_sched_task_boost_period_write,
+	.llseek		= generic_file_llseek,
+};
+
 #ifdef CONFIG_USER_NS
 static int proc_id_map_open(struct inode *inode, struct file *file,
 	const struct seq_operations *seq_ops)
@@ -3126,6 +3241,8 @@ static const struct pid_entry tgid_base_stuff[] = {
 #ifdef CONFIG_SCHED_WALT
 	REG("sched_init_task_load", 00644, proc_pid_sched_init_task_load_operations),
 	REG("sched_group_id", 00666, proc_pid_sched_group_id_operations),
+	REG("sched_boost", 0666,  proc_task_boost_enabled_operations),
+	REG("sched_boost_period_ms", 0666, proc_task_boost_period_operations),
 #endif
 #ifdef CONFIG_SCHED_DEBUG
 	REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index d653907..7325baa 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1626,9 +1626,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
 	if (--header->nreg)
 		return;
 
-	if (parent)
+	if (parent) {
 		put_links(header);
-	start_unregistering(header);
+		start_unregistering(header);
+	}
+
 	if (!--header->count)
 		kfree_rcu(header, rcu);
 
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 6d41c70..fac4a09 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1202,6 +1202,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 					count = -EINTR;
 					goto out_mm;
 				}
+				/*
+				 * Avoid to modify vma->vm_flags
+				 * without locked ops while the
+				 * coredump reads the vm_flags.
+				 */
+				if (!mmget_still_valid(mm)) {
+					/*
+					 * Silently return "count"
+					 * like if get_task_mm()
+					 * failed. FIXME: should this
+					 * function have returned
+					 * -ESRCH if get_task_mm()
+					 * failed like if
+					 * get_proc_task() fails?
+					 */
+					up_write(&mm->mmap_sem);
+					goto out_mm;
+				}
 				for (vma = mm->mmap; vma; vma = vma->vm_next) {
 					vm_write_begin(vma);
 					WRITE_ONCE(vma->vm_flags,
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 5fcb845..8cf2218 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -482,12 +482,10 @@ static struct file_system_type pstore_fs_type = {
 	.kill_sb	= pstore_kill_sb,
 };
 
-static int __init init_pstore_fs(void)
+int __init pstore_init_fs(void)
 {
 	int err;
 
-	pstore_choose_compression();
-
 	/* Create a convenient mount point for people to access pstore */
 	err = sysfs_create_mount_point(fs_kobj, "pstore");
 	if (err)
@@ -500,14 +498,9 @@ static int __init init_pstore_fs(void)
 out:
 	return err;
 }
-module_init(init_pstore_fs)
 
-static void __exit exit_pstore_fs(void)
+void __exit pstore_exit_fs(void)
 {
 	unregister_filesystem(&pstore_fs_type);
 	sysfs_remove_mount_point(fs_kobj, "pstore");
 }
-module_exit(exit_pstore_fs)
-
-MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
-MODULE_LICENSE("GPL");
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index fb767e2..7062ea4 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -37,7 +37,8 @@ extern bool	pstore_is_mounted(void);
 extern void	pstore_record_init(struct pstore_record *record,
 				   struct pstore_info *psi);
 
-/* Called during module_init() */
-extern void __init pstore_choose_compression(void);
+/* Called during pstore init/exit. */
+int __init	pstore_init_fs(void);
+void __exit	pstore_exit_fs(void);
 
 #endif
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 15e99d5a6..b821054 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -274,36 +274,56 @@ static int pstore_decompress(void *in, void *out,
 
 static void allocate_buf_for_compression(void)
 {
+	struct crypto_comp *ctx;
+	int size;
+	char *buf;
+
+	/* Skip if not built-in or compression backend not selected yet. */
 	if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend)
 		return;
 
+	/* Skip if no pstore backend yet or compression init already done. */
+	if (!psinfo || tfm)
+		return;
+
 	if (!crypto_has_comp(zbackend->name, 0, 0)) {
-		pr_err("No %s compression\n", zbackend->name);
+		pr_err("Unknown compression: %s\n", zbackend->name);
 		return;
 	}
 
-	big_oops_buf_sz = zbackend->zbufsize(psinfo->bufsize);
-	if (big_oops_buf_sz <= 0)
-		return;
-
-	big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
-	if (!big_oops_buf) {
-		pr_err("allocate compression buffer error!\n");
+	size = zbackend->zbufsize(psinfo->bufsize);
+	if (size <= 0) {
+		pr_err("Invalid compression size for %s: %d\n",
+		       zbackend->name, size);
 		return;
 	}
 
-	tfm = crypto_alloc_comp(zbackend->name, 0, 0);
-	if (IS_ERR_OR_NULL(tfm)) {
-		kfree(big_oops_buf);
-		big_oops_buf = NULL;
-		pr_err("crypto_alloc_comp() failed!\n");
+	buf = kmalloc(size, GFP_KERNEL);
+	if (!buf) {
+		pr_err("Failed %d byte compression buffer allocation for: %s\n",
+		       size, zbackend->name);
 		return;
 	}
+
+	ctx = crypto_alloc_comp(zbackend->name, 0, 0);
+	if (IS_ERR_OR_NULL(ctx)) {
+		kfree(buf);
+		pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
+		       PTR_ERR(ctx));
+		return;
+	}
+
+	/* A non-NULL big_oops_buf indicates compression is available. */
+	tfm = ctx;
+	big_oops_buf_sz = size;
+	big_oops_buf = buf;
+
+	pr_info("Using compression: %s\n", zbackend->name);
 }
 
 static void free_buf_for_compression(void)
 {
-	if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && !IS_ERR_OR_NULL(tfm))
+	if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm)
 		crypto_free_comp(tfm);
 	kfree(big_oops_buf);
 	big_oops_buf = NULL;
@@ -774,14 +794,43 @@ void __init pstore_choose_compression(void)
 	for (step = zbackends; step->name; step++) {
 		if (!strcmp(compress, step->name)) {
 			zbackend = step;
-			pr_info("using %s compression\n", zbackend->name);
 			return;
 		}
 	}
 }
 
+static int __init pstore_init(void)
+{
+	int ret;
+
+	pstore_choose_compression();
+
+	/*
+	 * Check if any pstore backends registered earlier but did not
+	 * initialize compression because crypto was not ready. If so,
+	 * initialize compression now.
+	 */
+	allocate_buf_for_compression();
+
+	ret = pstore_init_fs();
+	if (ret)
+		return ret;
+
+	return 0;
+}
+late_initcall(pstore_init);
+
+static void __exit pstore_exit(void)
+{
+	pstore_exit_fs();
+}
+module_exit(pstore_exit)
+
 module_param(compress, charp, 0444);
 MODULE_PARM_DESC(compress, "Pstore compression to use");
 
 module_param(backend, charp, 0444);
 MODULE_PARM_DESC(backend, "Pstore backend to use");
+
+MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index eb67bb7..44ed6b1 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -956,7 +956,7 @@ static int __init ramoops_init(void)
 
 	return ret;
 }
-late_initcall(ramoops_init);
+postcore_initcall(ramoops_init);
 
 static void __exit ramoops_exit(void)
 {
diff --git a/fs/read_write.c b/fs/read_write.c
index 3c5c731..40b9b07 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -564,12 +564,13 @@ EXPORT_SYMBOL(vfs_write);
 
 static inline loff_t file_pos_read(struct file *file)
 {
-	return file->f_pos;
+	return file->f_mode & FMODE_STREAM ? 0 : file->f_pos;
 }
 
 static inline void file_pos_write(struct file *file, loff_t pos)
 {
-	file->f_pos = pos;
+	if ((file->f_mode & FMODE_STREAM) == 0)
+		file->f_pos = pos;
 }
 
 ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count)
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
index 5e0c39a..1240ef2f 100644
--- a/fs/sdcardfs/super.c
+++ b/fs/sdcardfs/super.c
@@ -313,6 +313,8 @@ static int sdcardfs_show_options(struct vfsmount *mnt, struct seq_file *m,
 		seq_printf(m, ",reserved=%uMB", opts->reserved_mb);
 	if (opts->nocache)
 		seq_printf(m, ",nocache");
+	if (opts->unshared_obb)
+		seq_printf(m, ",unshared_obb");
 
 	return 0;
 };
diff --git a/fs/splice.c b/fs/splice.c
index 29e92b5..485e409 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -333,8 +333,8 @@ const struct pipe_buf_operations default_pipe_buf_ops = {
 	.get = generic_pipe_buf_get,
 };
 
-static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
-				    struct pipe_buffer *buf)
+int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
+			     struct pipe_buffer *buf)
 {
 	return 1;
 }
@@ -1584,7 +1584,11 @@ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
 			 * Get a reference to this pipe buffer,
 			 * so we can copy the contents over.
 			 */
-			pipe_buf_get(ipipe, ibuf);
+			if (!pipe_buf_get(ipipe, ibuf)) {
+				if (ret == 0)
+					ret = -EFAULT;
+				break;
+			}
 			*obuf = *ibuf;
 
 			/*
@@ -1658,7 +1662,11 @@ static int link_pipe(struct pipe_inode_info *ipipe,
 		 * Get a reference to this pipe buffer,
 		 * so we can copy the contents over.
 		 */
-		pipe_buf_get(ipipe, ibuf);
+		if (!pipe_buf_get(ipipe, ibuf)) {
+			if (ret == 0)
+				ret = -EFAULT;
+			break;
+		}
 
 		obuf = opipe->bufs + nbuf;
 		*obuf = *ibuf;
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index bbc7854..b9bc6e4 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -7,6 +7,7 @@
 	select CRYPTO if UBIFS_FS_ZLIB
 	select CRYPTO_LZO if UBIFS_FS_LZO
 	select CRYPTO_DEFLATE if UBIFS_FS_ZLIB
+	select UBIFS_FS_XATTR if FS_ENCRYPTION
 	depends on MTD_UBI
 	help
 	  UBIFS is a file system for flash devices which works on top of UBI.
@@ -62,17 +63,6 @@
 
 	  If unsure, say Y.
 
-config UBIFS_FS_ENCRYPTION
-	bool "UBIFS Encryption"
-	depends on UBIFS_FS && UBIFS_FS_XATTR && BLOCK
-	select FS_ENCRYPTION
-	default n
-	help
-	  Enable encryption of UBIFS files and directories. This
-	  feature is similar to ecryptfs, but it is more memory
-	  efficient since it avoids caching the encrypted and
-	  decrypted pages in the page cache.
-
 config UBIFS_FS_SECURITY
 	bool "UBIFS Security Labels"
 	depends on UBIFS_FS && UBIFS_FS_XATTR
diff --git a/fs/ubifs/Makefile b/fs/ubifs/Makefile
index 6197d7e..079b13f 100644
--- a/fs/ubifs/Makefile
+++ b/fs/ubifs/Makefile
@@ -6,5 +6,5 @@
 ubifs-y += budget.o find.o tnc_commit.o compress.o lpt.o lprops.o
 ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o debug.o
 ubifs-y += misc.o
-ubifs-$(CONFIG_UBIFS_FS_ENCRYPTION) += crypto.o
+ubifs-$(CONFIG_FS_ENCRYPTION) += crypto.o
 ubifs-$(CONFIG_UBIFS_FS_XATTR) += xattr.o
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 0164bcc..0f9c362 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -185,7 +185,7 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		return err;
 	}
 	case FS_IOC_SET_ENCRYPTION_POLICY: {
-#ifdef CONFIG_UBIFS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 		struct ubifs_info *c = inode->i_sb->s_fs_info;
 
 		err = ubifs_enable_encryption(c);
@@ -198,7 +198,7 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 #endif
 	}
 	case FS_IOC_GET_ENCRYPTION_POLICY: {
-#ifdef CONFIG_UBIFS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 		return fscrypt_ioctl_get_policy(file, (void __user *)arg);
 #else
 		return -EOPNOTSUPP;
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index bf17f58..3dffa59 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -647,7 +647,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
 		goto out;
 	}
 
-#ifndef CONFIG_UBIFS_FS_ENCRYPTION
+#ifndef CONFIG_FS_ENCRYPTION
 	if (c->encrypted) {
 		ubifs_err(c, "file system contains encrypted files but UBIFS"
 			     " was built without crypto support.");
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index fec62e9..7c98ccc 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2087,7 +2087,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
 #ifdef CONFIG_UBIFS_FS_XATTR
 	sb->s_xattr = ubifs_xattr_handlers;
 #endif
-#ifdef CONFIG_UBIFS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	sb->s_cop = &ubifs_crypt_operations;
 #endif
 
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 4368cde..4d46bb8 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -40,7 +40,6 @@
 #include <linux/xattr.h>
 #include <linux/random.h>
 
-#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_UBIFS_FS_ENCRYPTION)
 #include <linux/fscrypt.h>
 
 #include "ubifs-media.h"
@@ -139,7 +138,7 @@
  */
 #define WORST_COMPR_FACTOR 2
 
-#ifdef CONFIG_UBIFS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 #define UBIFS_CIPHER_BLOCK_SIZE FS_CRYPTO_BLOCK_SIZE
 #else
 #define UBIFS_CIPHER_BLOCK_SIZE 0
@@ -1829,7 +1828,7 @@ int ubifs_decompress(const struct ubifs_info *c, const void *buf, int len,
 #include "misc.h"
 #include "key.h"
 
-#ifndef CONFIG_UBIFS_FS_ENCRYPTION
+#ifndef CONFIG_FS_ENCRYPTION
 static inline int ubifs_encrypt(const struct inode *inode,
 				struct ubifs_data_node *dn,
 				unsigned int in_len, unsigned int *out_len,
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 1fd3011..7fd4802 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -229,7 +229,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
 	case UFS_UID_44BSD:
 		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
 	case UFS_UID_EFT:
-		if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
+		if (inode->ui_u1.oldids.ui_sgid == 0xFFFF)
 			return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
 		/* Fall through */
 	default:
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ab5568f..72ebc38 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -630,6 +630,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
 
 		/* the various vma->vm_userfaultfd_ctx still points to it */
 		down_write(&mm->mmap_sem);
+		/* no task can run (and in turn coredump) yet */
+		VM_WARN_ON(!mmget_still_valid(mm));
 		for (vma = mm->mmap; vma; vma = vma->vm_next)
 			if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
 				vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
@@ -887,6 +889,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
 	 * taking the mmap_sem for writing.
 	 */
 	down_write(&mm->mmap_sem);
+	if (!mmget_still_valid(mm))
+		goto skip_mm;
 	prev = NULL;
 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 		cond_resched();
@@ -912,6 +916,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
 		vm_write_end(vma);
 	}
+skip_mm:
 	up_write(&mm->mmap_sem);
 	mmput(mm);
 wakeup:
@@ -1340,6 +1345,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
 		goto out;
 
 	down_write(&mm->mmap_sem);
+	if (!mmget_still_valid(mm))
+		goto out_unlock;
 	vma = find_vma_prev(mm, start, &prev);
 	if (!vma)
 		goto out_unlock;
@@ -1530,6 +1537,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
 		goto out;
 
 	down_write(&mm->mmap_sem);
+	if (!mmget_still_valid(mm))
+		goto out_unlock;
 	vma = find_vma_prev(mm, start, &prev);
 	if (!vma)
 		goto out_unlock;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 9393da9..8667538 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -66,9 +66,9 @@
  * RODATA_MAIN is not used because existing code already defines .rodata.x
  * sections to be brought in with rodata.
  */
-#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
-#define TEXT_CFI_MAIN .text.cfi .text.[0-9a-zA-Z_]*.cfi
+#define TEXT_CFI_MAIN .text.[0-9a-zA-Z_]*.cfi
 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
@@ -493,8 +493,9 @@
 #define TEXT_TEXT							\
 		ALIGN_FUNCTION();					\
 		*(.text.hot TEXT_MAIN .text.fixup .text.unlikely)	\
-		*(.text..refcount)					\
 		*(TEXT_CFI_MAIN) 					\
+		*(.text..refcount)					\
+		*(.text..ftrace)					\
 		*(.ref.text)						\
 	MEM_KEEP(init.text*)						\
 	MEM_KEEP(exit.text*)						\
diff --git a/include/crypto/ice.h b/include/crypto/ice.h
index a9867cf..7932623 100644
--- a/include/crypto/ice.h
+++ b/include/crypto/ice.h
@@ -7,6 +7,7 @@
 #define _QCOM_INLINE_CRYPTO_ENGINE_H_
 
 #include <linux/platform_device.h>
+#include <linux/cdev.h>
 
 struct request;
 
@@ -27,6 +28,48 @@ enum ice_crpto_key_mode {
 	ICE_CRYPTO_USE_LUT_SW_KEY  = 0x3
 };
 
+#define QCOM_ICE_TYPE_NAME_LEN 8
+
+typedef void (*ice_error_cb)(void *, u32 error);
+
+struct qcom_ice_bus_vote {
+	uint32_t client_handle;
+	uint32_t curr_vote;
+	int min_bw_vote;
+	int max_bw_vote;
+	int saved_vote;
+	bool is_max_bw_needed;
+	struct device_attribute max_bus_bw;
+};
+
+/*
+ * ICE HW device structure.
+ */
+struct ice_device {
+	struct list_head	list;
+	struct device		*pdev;
+	struct cdev		cdev;
+	dev_t			device_no;
+	struct class		*driver_class;
+	void __iomem		*mmio;
+	struct resource		*res;
+	int			irq;
+	bool			is_ice_enabled;
+	bool			is_ice_disable_fuse_blown;
+	ice_error_cb		error_cb;
+	void			*host_controller_data; /* UFS/EMMC/other? */
+	struct list_head	clk_list_head;
+	u32			ice_hw_version;
+	bool			is_ice_clk_available;
+	char			ice_instance_type[QCOM_ICE_TYPE_NAME_LEN];
+	struct regulator	*reg;
+	bool			is_regulator_available;
+	struct qcom_ice_bus_vote bus_vote;
+	ktime_t			ice_reset_start_time;
+	ktime_t			ice_reset_complete_time;
+	void             *key_table;
+};
+
 struct ice_crypto_setting {
 	enum ice_crpto_key_size		key_size;
 	enum ice_cryto_algo_mode	algo_mode;
@@ -54,20 +97,33 @@ enum ice_crypto_data_unit {
 	ICE_CRYPTO_DATA_UNIT_64_KB          = 7,
 };
 
-typedef void (*ice_error_cb)(void *, u32 error);
-
 struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node);
 struct platform_device *qcom_ice_get_pdevice(struct device_node *node);
 
 #ifdef CONFIG_CRYPTO_DEV_QCOM_ICE
+int enable_ice_setup(struct ice_device *ice_dev);
+int disable_ice_setup(struct ice_device *ice_dev);
 int qcom_ice_setup_ice_hw(const char *storage_type, int enable);
 void qcom_ice_set_fde_flag(int flag);
+struct list_head *get_ice_dev_list(void);
 #else
+static inline int enable_ice_setup(struct ice_device *ice_dev)
+{
+	return 0;
+}
+static inline int disable_ice_setup(struct ice_device *ice_dev)
+{
+	return 0;
+}
 static inline int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
 {
 	return 0;
 }
 static inline void qcom_ice_set_fde_flag(int flag) {}
+static inline struct list_head *get_ice_dev_list(void)
+{
+	return NULL;
+}
 #endif
 
 struct qcom_ice_variant_ops {
@@ -80,7 +136,8 @@ struct qcom_ice_variant_ops {
 	int	(*config_start)(struct platform_device *device_start,
 			struct request *req, struct ice_data_setting *setting,
 			bool start);
-	int	(*config_end)(struct request *req);
+	int	(*config_end)(struct platform_device *pdev,
+			struct request *req);
 	int	(*status)(struct platform_device *device_status);
 	void	(*debug)(struct platform_device *device_debug);
 };
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 354cd6f..a6853c6 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -39,6 +39,7 @@ struct drm_encoder;
 struct drm_property;
 struct drm_property_blob;
 struct drm_printer;
+struct drm_panel;
 struct edid;
 
 enum drm_connector_force {
@@ -205,6 +206,40 @@ enum drm_panel_orientation {
 	DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
 };
 
+/*
+ * This is a consolidated colorimetry list supported by HDMI and
+ * DP protocol standard. The respective connectors will register
+ * a property with the subset of this list (supported by that
+ * respective protocol). Userspace will set the colorspace through
+ * a colorspace property which will be created and exposed to
+ * userspace.
+ */
+
+/* For Default case, driver will set the colorspace */
+#define DRM_MODE_COLORIMETRY_DEFAULT			0
+/* CEA 861 Normal Colorimetry options */
+#define DRM_MODE_COLORIMETRY_NO_DATA			0
+#define DRM_MODE_COLORIMETRY_SMPTE_170M_YCC		1
+#define DRM_MODE_COLORIMETRY_BT709_YCC			2
+/* CEA 861 Extended Colorimetry Options */
+#define DRM_MODE_COLORIMETRY_XVYCC_601			3
+#define DRM_MODE_COLORIMETRY_XVYCC_709			4
+#define DRM_MODE_COLORIMETRY_SYCC_601			5
+#define DRM_MODE_COLORIMETRY_OPYCC_601			6
+#define DRM_MODE_COLORIMETRY_OPRGB			7
+#define DRM_MODE_COLORIMETRY_BT2020_CYCC		8
+#define DRM_MODE_COLORIMETRY_BT2020_RGB			9
+#define DRM_MODE_COLORIMETRY_BT2020_YCC			10
+/* Additional Colorimetry extension added as part of CTA 861.G */
+#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65		11
+#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER		12
+/* DP MSA Colorimetry Options */
+#define DRM_MODE_DP_COLORIMETRY_BT601_YCC		13
+#define DRM_MODE_DP_COLORIMETRY_BT709_YCC		14
+#define DRM_MODE_DP_COLORIMETRY_SRGB			15
+#define DRM_MODE_DP_COLORIMETRY_RGB_WIDE_GAMUT	16
+#define DRM_MODE_DP_COLORIMETRY_SCRGB			17
+
 /**
  * struct drm_display_info - runtime data about the connected sink
  *
@@ -449,6 +484,13 @@ struct drm_connector_state {
 	unsigned int content_protection;
 
 	/**
+	 * @colorspace: State variable for Connector property to request
+	 * colorspace change on Sink. This is most commonly used to switch
+	 * to wider color gamuts like BT2020.
+	 */
+	u32 colorspace;
+
+	/**
 	 * @writeback_job: Writeback job for writeback connectors
 	 *
 	 * Holds the framebuffer and out-fence for a writeback connector. As
@@ -916,6 +958,12 @@ struct drm_connector {
 	struct drm_property *content_protection_property;
 
 	/**
+	 * @colorspace_property: Connector property to set the suitable
+	 * colorspace supported by the sink.
+	 */
+	struct drm_property *colorspace_property;
+
+	/**
 	 * @path_blob_ptr:
 	 *
 	 * DRM blob property data for the DP MST path property. This should only
@@ -1011,6 +1059,7 @@ struct drm_connector {
 	 * @pt_scan_info: PT scan info obtained from the VCDB of EDID
 	 * @it_scan_info: IT scan info obtained from the VCDB of EDID
 	 * @ce_scan_info: CE scan info obtained from the VCDB of EDID
+	 * @color_enc_fmt: Colorimetry encoding formats of sink
 	 * @hdr_eotf: Electro optical transfer function obtained from HDR block
 	 * @hdr_metadata_type_one: Metadata type one obtained from HDR block
 	 * @hdr_max_luminance: desired max luminance obtained from HDR block
@@ -1029,6 +1078,7 @@ struct drm_connector {
 	u8 pt_scan_info;
 	u8 it_scan_info;
 	u8 ce_scan_info;
+	u32 color_enc_fmt;
 	u32 hdr_eotf;
 	bool hdr_metadata_type_one;
 	u32 hdr_max_luminance;
@@ -1121,6 +1171,12 @@ struct drm_connector {
 	 * &drm_mode_config.connector_free_work.
 	 */
 	struct llist_node free_node;
+	/**
+	 * @panel:
+	 *
+	 * Can find the panel which connected to drm_connector.
+	 */
+	struct drm_panel *panel;
 };
 
 #define obj_to_connector(x) container_of(x, struct drm_connector, base)
@@ -1231,6 +1287,7 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
 int drm_connector_attach_content_protection_property(
 		struct drm_connector *connector);
 int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
+int drm_mode_create_colorspace_property(struct drm_connector *connector);
 int drm_mode_create_content_type_property(struct drm_device *dev);
 void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
 					 const struct drm_connector_state *conn_state);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index a72efa0..a0fa620 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -27,6 +27,7 @@
 #include <drm/drm_atomic.h>
 
 struct drm_dp_mst_branch;
+struct drm_dp_mst_port;
 
 /**
  * struct drm_dp_vcpi - Virtual Channel Payload Identifier
@@ -42,6 +43,18 @@ struct drm_dp_vcpi {
 	int num_slots;
 };
 
+struct drm_dp_mst_dsc_dpcd_cache {
+	bool valid;
+	bool use_parent_dpcd;
+	u8 dsc_dpcd[16];
+};
+
+struct drm_dp_mst_dsc_info {
+	bool dsc_support;
+	struct drm_dp_mst_port *dsc_port;
+	struct drm_dp_mst_dsc_dpcd_cache dsc_dpcd_cache;
+};
+
 /**
  * struct drm_dp_mst_port - MST port
  * @kref: reference count for this port.
@@ -62,6 +75,8 @@ struct drm_dp_vcpi {
  * @vcpi: Virtual Channel Payload info for this port.
  * @connector: DRM connector this port is connected to.
  * @mgr: topology manager this port lives under.
+ * @fec_capability: Tracks full path fec capability.
+ * @dsc_info: stores dpcd and configuration information.
  *
  * This structure represents an MST port endpoint on a device somewhere
  * in the MST topology.
@@ -98,6 +113,16 @@ struct drm_dp_mst_port {
 	 * audio-capable.
 	 */
 	bool has_audio;
+	/**
+	 * @fec_capability: Tracks full path fec capability as reported by
+	 * enum path resources.
+	 */
+	bool fec_capability;
+	/**
+	 * @dsc_info: stores dpcd and configuration information for the mst
+	 * port where dsc decoding will be enabled.
+	 */
+	struct drm_dp_mst_dsc_info dsc_info;
 };
 
 /**
@@ -291,6 +316,7 @@ struct drm_dp_port_number_req {
 
 struct drm_dp_enum_path_resources_ack_reply {
 	u8 port_number;
+	bool fec_capability;
 	u16 full_payload_bw_number;
 	u16 avail_payload_bw_number;
 };
@@ -588,6 +614,10 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
 
 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
 					struct drm_dp_mst_port *port);
+
+bool drm_dp_mst_has_fec(struct drm_dp_mst_topology_mgr *mgr,
+		struct drm_dp_mst_port *port);
+
 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 
 
@@ -634,6 +664,14 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
 				 struct drm_dp_mst_port *port, bool power_up);
 
+int drm_dp_mst_get_dsc_info(struct drm_dp_mst_topology_mgr *mgr,
+		struct drm_dp_mst_port *port,
+		struct drm_dp_mst_dsc_info *dsc_info);
+
+int drm_dp_mst_update_dsc_info(struct drm_dp_mst_topology_mgr *mgr,
+		struct drm_dp_mst_port *port,
+		struct drm_dp_mst_dsc_info *dsc_info);
+
 int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
 			   struct drm_dp_mst_port *port,
 			   int offset, int size, u8 *bytes);
@@ -642,4 +680,8 @@ int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
 				 struct drm_dp_mst_port *port,
 				 int offset, int size, u8 *bytes);
 
+int drm_dp_mst_get_max_sdp_streams_supported(
+		struct drm_dp_mst_topology_mgr *mgr,
+		struct drm_dp_mst_port *port);
+
 #endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 8c3005b..ece64a1 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -221,6 +221,16 @@ struct detailed_timing {
 				    DRM_EDID_YCBCR420_DC_36 | \
 				    DRM_EDID_YCBCR420_DC_30)
 
+#define DRM_EDID_CLRMETRY_xvYCC_601   (1 << 0)
+#define DRM_EDID_CLRMETRY_xvYCC_709   (1 << 1)
+#define DRM_EDID_CLRMETRY_sYCC_601    (1 << 2)
+#define DRM_EDID_CLRMETRY_ADBYCC_601  (1 << 3)
+#define DRM_EDID_CLRMETRY_ADB_RGB     (1 << 4)
+#define DRM_EDID_CLRMETRY_BT2020_CYCC (1 << 5)
+#define DRM_EDID_CLRMETRY_BT2020_YCC  (1 << 6)
+#define DRM_EDID_CLRMETRY_BT2020_RGB  (1 << 7)
+#define DRM_EDID_CLRMETRY_DCI_P3      (1 << 15)
+
 /* ELD Header Block */
 #define DRM_ELD_HEADER_BLOCK_SIZE	4
 
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 7778147..7f48b7f 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -26,6 +26,23 @@
 
 #include <linux/errno.h>
 #include <linux/list.h>
+#include <linux/notifier.h>
+
+/* A hardware display blank change occurred */
+#define DRM_PANEL_EVENT_BLANK		0x01
+/* A hardware display blank early change occurred */
+#define DRM_PANEL_EARLY_EVENT_BLANK	0x02
+
+enum {
+	/* panel: power on */
+	DRM_PANEL_BLANK_UNBLANK,
+	/* panel: power off */
+	DRM_PANEL_BLANK_POWERDOWN,
+};
+
+struct drm_panel_notifier {
+	void *data;
+};
 
 struct device_node;
 struct drm_connector;
@@ -84,6 +101,7 @@ struct drm_panel_funcs {
  * @dev: parent device of the panel
  * @funcs: operations that can be performed on the panel
  * @list: panel entry in registry
+ * @nh: panel notifier list head
  */
 struct drm_panel {
 	struct drm_device *drm;
@@ -93,6 +111,7 @@ struct drm_panel {
 	const struct drm_panel_funcs *funcs;
 
 	struct list_head list;
+	struct blocking_notifier_head nh;
 };
 
 /**
@@ -194,6 +213,13 @@ void drm_panel_remove(struct drm_panel *panel);
 int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
 int drm_panel_detach(struct drm_panel *panel);
 
+int drm_panel_notifier_register(struct drm_panel *panel,
+	struct notifier_block *nb);
+int drm_panel_notifier_unregister(struct drm_panel *panel,
+	struct notifier_block *nb);
+int drm_panel_notifier_call_chain(struct drm_panel *panel,
+	unsigned long val, void *v);
+
 #if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL)
 struct drm_panel *of_drm_find_panel(const struct device_node *np);
 #else
diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
index b8ba588..bcc98bd 100644
--- a/include/drm/tinydrm/mipi-dbi.h
+++ b/include/drm/tinydrm/mipi-dbi.h
@@ -42,7 +42,7 @@ struct mipi_dbi {
 	struct spi_device *spi;
 	bool enabled;
 	struct mutex cmdlock;
-	int (*command)(struct mipi_dbi *mipi, u8 cmd, u8 *param, size_t num);
+	int (*command)(struct mipi_dbi *mipi, u8 *cmd, u8 *param, size_t num);
 	const u8 *read_commands;
 	struct gpio_desc *dc;
 	u16 *tx_buf;
@@ -79,6 +79,7 @@ u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len);
 
 int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val);
 int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
+int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
 int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
 		      struct drm_clip_rect *clip, bool swap);
 /**
@@ -96,7 +97,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
 #define mipi_dbi_command(mipi, cmd, seq...) \
 ({ \
 	u8 d[] = { seq }; \
-	mipi_dbi_command_buf(mipi, cmd, d, ARRAY_SIZE(d)); \
+	mipi_dbi_command_stackbuf(mipi, cmd, d, ARRAY_SIZE(d)); \
 })
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/include/dt-bindings/clock/qcom,gcc-kona.h b/include/dt-bindings/clock/qcom,gcc-kona.h
index 8969f12b..2e74c28 100644
--- a/include/dt-bindings/clock/qcom,gcc-kona.h
+++ b/include/dt-bindings/clock/qcom,gcc-kona.h
@@ -6,6 +6,11 @@
 #ifndef _DT_BINDINGS_CLK_QCOM_GCC_KONA_H
 #define _DT_BINDINGS_CLK_QCOM_GCC_KONA_H
 
+/* GCC HW clocks */
+#define CORE_BI_PLL_TEST_SE					0
+#define USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK			1
+#define USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK			2
+
 /* GCC clocks */
 #define GCC_AGGRE_NOC_PCIE_TBU_CLK				3
 #define GCC_AGGRE_UFS_CARD_AXI_CLK				4
@@ -202,18 +207,20 @@
 #define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				195
 #define GCC_USB3_PRIM_PHY_COM_AUX_CLK				196
 #define GCC_USB3_PRIM_PHY_PIPE_CLK				197
-#define GCC_USB3_SEC_CLKREF_EN					198
-#define GCC_USB3_SEC_PHY_AUX_CLK				199
-#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				200
-#define GCC_USB3_SEC_PHY_COM_AUX_CLK				201
-#define GCC_USB3_SEC_PHY_PIPE_CLK				202
-#define GCC_VIDEO_AHB_CLK					203
-#define GCC_VIDEO_AXI0_CLK					204
-#define GCC_VIDEO_AXI1_CLK					205
-#define GCC_VIDEO_XO_CLK					206
-#define GPLL0							207
-#define GPLL0_OUT_EVEN						208
-#define GPLL9							209
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC				198
+#define GCC_USB3_SEC_CLKREF_EN					199
+#define GCC_USB3_SEC_PHY_AUX_CLK				200
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				201
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK				202
+#define GCC_USB3_SEC_PHY_PIPE_CLK				203
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC				204
+#define GCC_VIDEO_AHB_CLK					205
+#define GCC_VIDEO_AXI0_CLK					206
+#define GCC_VIDEO_AXI1_CLK					207
+#define GCC_VIDEO_XO_CLK					208
+#define GPLL0							209
+#define GPLL0_OUT_EVEN						210
+#define GPLL9							211
 
 /* GCC resets */
 #define GCC_GPU_BCR						0
diff --git a/include/dt-bindings/clock/qcom,gpucc-kona.h b/include/dt-bindings/clock/qcom,gpucc-kona.h
index 57704d9..f3bed6b2 100644
--- a/include/dt-bindings/clock/qcom,gpucc-kona.h
+++ b/include/dt-bindings/clock/qcom,gpucc-kona.h
@@ -22,6 +22,7 @@
 #define GPU_CC_GX_VSENSE_CLK					13
 #define GPU_CC_PLL1						14
 #define GPU_CC_SLEEP_CLK					15
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK				16
 
 #define CX_GDSC							0
 #define GX_GDSC							1
diff --git a/include/dt-bindings/clock/qcom,gpucc-lito.h b/include/dt-bindings/clock/qcom,gpucc-lito.h
index 47cdc35..ead1508 100644
--- a/include/dt-bindings/clock/qcom,gpucc-lito.h
+++ b/include/dt-bindings/clock/qcom,gpucc-lito.h
@@ -16,8 +16,7 @@
 #define GPU_CC_GX_GMU_CLK					8
 #define GPU_CC_GX_VSENSE_CLK					9
 #define GPU_CC_PLL1						10
-#define GPU_CC_RBCPR_CLK					11
-#define GPU_CC_RBCPR_CLK_SRC					12
-#define GPU_CC_SLEEP_CLK					13
+#define GPU_CC_SLEEP_CLK					11
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK				12
 
 #endif
diff --git a/include/dt-bindings/thermal/qmi_thermal.h b/include/dt-bindings/thermal/qmi_thermal.h
new file mode 100644
index 0000000..b35c17b
--- /dev/null
+++ b/include/dt-bindings/thermal/qmi_thermal.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
+
+#ifndef _DT_BINDINGS_QCOM_QMI_THERMAL_H
+#define _DT_BINDINGS_QCOM_QMI_THERMAL_H
+
+#define QMI_PA			0
+#define QMI_PA_1		1
+#define QMI_PA_2		2
+#define QMI_QFE_PA_0		3
+#define QMI_QFE_WTR_0		4
+#define QMI_MODEM_TSENS		5
+#define QMI_QFE_MMW_0		6
+#define QMI_QFE_MMW_1		7
+#define QMI_QFE_MMW_2		8
+#define QMI_QFE_MMW_3		9
+#define QMI_XO_THERM		10
+#define QMI_QFE_PA_MDM		11
+#define QMI_QFE_PA_WTR		12
+#define QMI_QFE_MMW_STREAMER_0	13
+#define QMI_QFE_MMW_0_MOD	14
+#define QMI_QFE_MMW_1_MOD	15
+#define QMI_QFE_MMW_2_MOD	16
+#define QMI_QFE_MMW_3_MOD	17
+#define QMI_QFE_RET_PA_0	18
+#define QMI_QFE_WTR_PA_0	19
+#define QMI_QFE_WTR_PA_1	20
+#define QMI_QFE_WTR_PA_2	21
+#define QMI_QFE_WTR_PA_3	22
+#define QMI_SYS_THERM_1		23
+#define QMI_SYS_THERM_2		24
+#define QMI_MODEM_TSENS_1	25
+
+#define QMI_MODEM_INST_ID	0x0
+#define QMI_ADSP_INST_ID	0x1
+#define QMI_CDSP_INST_ID	0x43
+#define QMI_SLPI_INST_ID	0x53
+#define QMI_MODEM_NR_INST_ID	0x64
+
+#endif
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 58f02ec..0801ef9 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -265,7 +265,7 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
 {
 	if (count != 1) {
 		bio->bi_flags |= (1 << BIO_REFFED);
-		smp_mb__before_atomic();
+		smp_mb();
 	}
 	atomic_set(&bio->__bi_cnt, count);
 }
diff --git a/include/linux/bluetooth-power.h b/include/linux/bluetooth-power.h
index 05ecdd0..c80a3e9 100644
--- a/include/linux/bluetooth-power.h
+++ b/include/linux/bluetooth-power.h
@@ -69,6 +69,8 @@ struct bluetooth_power_platform_data {
 	struct bt_power_vreg_data *bt_vdd_rfa1;
 	/* VDD RFA2 digital voltage regulator */
 	struct bt_power_vreg_data *bt_vdd_rfa2;
+	/* VDD ASD digital voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_asd;
 	/* Optional: chip power down gpio-regulator
 	 * chip power down data is required when bluetooth module
 	 * and other modules like wifi co-exist in a single chip and
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 523481a..16f6bee 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -34,6 +34,7 @@ struct bpf_map_ops {
 	void (*map_free)(struct bpf_map *map);
 	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
 	void (*map_release_uref)(struct bpf_map *map);
+	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
 
 	/* funcs callable from userspace and from eBPF programs */
 	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
@@ -400,7 +401,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
 		}					\
 _out:							\
 		rcu_read_unlock();			\
-		preempt_enable_no_resched();		\
+		preempt_enable();			\
 		_ret;					\
 	 })
 
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 53cc5b8..6984e7e 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -348,6 +348,11 @@ struct cgroup {
 	 * Dying cgroups are cgroups which were deleted by a user,
 	 * but are still existing because someone else is holding a reference.
 	 * max_descendants is a maximum allowed number of descent cgroups.
+	 *
+	 * nr_descendants and nr_dying_descendants are protected
+	 * by cgroup_mutex and css_set_lock. It's fine to read them holding
+	 * any of cgroup_mutex and css_set_lock; for writing both locks
+	 * should be held.
 	 */
 	int nr_descendants;
 	int nr_dying_descendants;
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 68250a5..b0d530c 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -95,7 +95,8 @@ extern int sysctl_compact_unevictable_allowed;
 extern int fragmentation_index(struct zone *zone, unsigned int order);
 extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
 		unsigned int order, unsigned int alloc_flags,
-		const struct alloc_context *ac, enum compact_priority prio);
+		const struct alloc_context *ac, enum compact_priority prio,
+		struct page **page);
 extern void reset_isolation_suitable(pg_data_t *pgdat);
 extern enum compact_result compaction_suitable(struct zone *zone, int order,
 		unsigned int alloc_flags, int classzone_idx);
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 0294807..1d063d9 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -47,3 +47,10 @@
 #ifdef CONFIG_CFI_CLANG
 #define __nocfi		__attribute__((no_sanitize("cfi")))
 #endif
+
+#ifdef CONFIG_LTO_CLANG
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#define __norecordmcount \
+	__attribute__((__section__(".text..ftrace")))
+#endif
+#endif
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 5e0aaa5..04cde6f 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -251,6 +251,14 @@ struct ftrace_likely_data {
 # define __gnu_inline
 #endif
 
+#ifndef __norecordmcount
+#define __norecordmcount
+#endif
+
+#ifndef __nocfi
+#define __nocfi
+#endif
+
 /*
  * Force always-inline if the user requests it so via the .config.
  * GCC does not warn about unused static inline functions for
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 2e63e73..657a68d 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -317,7 +317,8 @@ static inline int coresight_timeout(void __iomem *addr, u32 offset,
 				     int position, int value) { return 1; }
 static inline void coresight_abort(void) {}
 static inline void coresight_disable_reg_clk(struct coresight_device *csdev) {}
-static inline int coresight_enable_reg_clk(struct coresight_device *csdev) {}
+static inline int coresight_enable_reg_clk(struct coresight_device *csdev)
+{ return -EINVAL; }
 #endif
 
 #if defined(CONFIG_OF) && defined(CONFIG_CORESIGHT)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index f172204..bface53 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -57,6 +57,8 @@ extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
 					  struct device_attribute *attr, char *buf);
 extern ssize_t cpu_show_l1tf(struct device *dev,
 			     struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mds(struct device *dev,
+			    struct device_attribute *attr, char *buf);
 
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -194,4 +196,28 @@ void idle_notifier_register(struct notifier_block *n);
 void idle_notifier_unregister(struct notifier_block *n);
 void idle_notifier_call_chain(unsigned long val);
 
+/*
+ * These are used for a global "mitigations=" cmdline option for toggling
+ * optional CPU mitigations.
+ */
+enum cpu_mitigations {
+	CPU_MITIGATIONS_OFF,
+	CPU_MITIGATIONS_AUTO,
+	CPU_MITIGATIONS_AUTO_NOSMT,
+};
+
+extern enum cpu_mitigations cpu_mitigations;
+
+/* mitigations=off */
+static inline bool cpu_mitigations_off(void)
+{
+	return cpu_mitigations == CPU_MITIGATIONS_OFF;
+}
+
+/* mitigations=auto,nosmt */
+static inline bool cpu_mitigations_auto_nosmt(void)
+{
+	return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
+}
+
 #endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 258c612..37d9611 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -540,15 +540,6 @@ static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
 		__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
 }
 
-static inline void cpufreq_policy_apply_limits_fast(struct cpufreq_policy
-						    *policy)
-{
-	if (policy->max < policy->cur)
-		cpufreq_driver_fast_switch(policy, policy->max);
-	else if (policy->min > policy->cur)
-		cpufreq_driver_fast_switch(policy, policy->min);
-}
-
 /* Governor attribute set */
 struct gov_attr_set {
 	struct kobject kobj;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 2dc6915..bd19969 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -176,7 +176,6 @@ struct dentry_operations {
       * typically using d_splice_alias. */
 
 #define DCACHE_REFERENCED		0x00000040 /* Recently used, don't discard. */
-#define DCACHE_RCUACCESS		0x00000080 /* Entry has ever been RCU-visible */
 
 #define DCACHE_CANT_MOUNT		0x00000100
 #define DCACHE_GENOCIDE			0x00000200
@@ -217,6 +216,7 @@ struct dentry_operations {
 
 #define DCACHE_PAR_LOOKUP		0x10000000 /* being looked up (with parent locked shared) */
 #define DCACHE_DENTRY_CURSOR		0x20000000
+#define DCACHE_NORCU			0x40000000 /* No RCU delay for freeing */
 
 extern seqlock_t rename_lock;
 
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 8a461f8..3b908e5 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -142,7 +142,7 @@
  * a new RANGE of SSIDs to the msg_mask_tbl.
  */
 #define MSG_MASK_TBL_CNT		26
-#define APPS_EVENT_LAST_ID		0xCA7
+#define APPS_EVENT_LAST_ID		0xCAA
 
 #define MSG_SSID_0			0
 #define MSG_SSID_0_LAST			130
@@ -177,7 +177,7 @@
 #define MSG_SSID_15			8000
 #define MSG_SSID_15_LAST		8000
 #define MSG_SSID_16			8500
-#define MSG_SSID_16_LAST		8531
+#define MSG_SSID_16_LAST		8532
 #define MSG_SSID_17			9000
 #define MSG_SSID_17_LAST		9008
 #define MSG_SSID_18			9500
@@ -777,7 +777,8 @@ static const uint32_t msg_bld_masks_16[] = {
 	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
 		MSG_LVL_FATAL,
 	MSG_LVL_MED,
-	MSG_LVL_MED
+	MSG_LVL_MED,
+	MSG_LVL_LOW
 };
 
 static const uint32_t msg_bld_masks_17[] =  {
@@ -915,7 +916,7 @@ static const uint32_t msg_bld_masks_25[] = {
 /* LOG CODES */
 static const uint32_t log_code_last_tbl[] = {
 	0x0,	/* EQUIP ID 0 */
-	0x1C94,	/* EQUIP ID 1 */
+	0x1C9A,	/* EQUIP ID 1 */
 	0x0,	/* EQUIP ID 2 */
 	0x0,	/* EQUIP ID 3 */
 	0x4910,	/* EQUIP ID 4 */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 401e4b2..cc33917 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1564,7 +1564,12 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
 			   struct screen_info *si, efi_guid_t *proto,
 			   unsigned long size);
 
-bool efi_runtime_disabled(void);
+#ifdef CONFIG_EFI
+extern bool efi_runtime_disabled(void);
+#else
+static inline bool efi_runtime_disabled(void) { return true; }
+#endif
+
 extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
 
 enum efi_secureboot_mode {
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index a02deea..a2bf4a6b 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -99,6 +99,7 @@ struct elevator_mq_ops {
 	void (*exit_sched)(struct elevator_queue *);
 	int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
 	void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+	void (*depth_updated)(struct blk_mq_hw_ctx *);
 
 	bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
 	bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
diff --git a/include/linux/esoc_client.h b/include/linux/esoc_client.h
index 5c3f963..a9a9801 100644
--- a/include/linux/esoc_client.h
+++ b/include/linux/esoc_client.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2014, 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2017-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef __ESOC_CLIENT_H_
 #define __ESOC_CLIENT_H_
@@ -9,13 +9,18 @@
 #include <linux/esoc_ctrl.h>
 #include <linux/notifier.h>
 
+/* Flag values used with the power_on and power_off hooks */
+#define ESOC_HOOK_MDM_CRASH	0x0001 /* In crash handling path */
+#define ESOC_HOOK_MDM_DOWN	0x0002 /* MDM about to go down */
+
 struct esoc_client_hook {
 	char *name;
 	void *priv;
 	enum esoc_client_hook_prio prio;
-	int (*esoc_link_power_on)(void *priv, bool mdm_crashed);
-	void (*esoc_link_power_off)(void *priv, bool mdm_crashed);
+	int (*esoc_link_power_on)(void *priv, unsigned int flags);
+	void (*esoc_link_power_off)(void *priv, unsigned int flags);
 	u64 (*esoc_link_get_id)(void *priv);
+	void (*esoc_link_mdm_crash)(void *priv);
 };
 
 /*
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index f574042..6555990 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -164,6 +164,10 @@ struct f2fs_checkpoint {
 	unsigned char sit_nat_version_bitmap[1];
 } __packed;
 
+#define CP_CHKSUM_OFFSET	4092	/* default chksum offset in checkpoint */
+#define CP_MIN_CHKSUM_OFFSET						\
+	(offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap))
+
 /*
  * For orphan inode management
  */
@@ -198,11 +202,12 @@ struct f2fs_extent {
 					get_extra_isize(inode))
 #define DEF_NIDS_PER_INODE	5	/* Node IDs in an Inode */
 #define ADDRS_PER_INODE(inode)	addrs_per_inode(inode)
-#define ADDRS_PER_BLOCK		1018	/* Address Pointers in a Direct Block */
+#define DEF_ADDRS_PER_BLOCK	1018	/* Address Pointers in a Direct Block */
+#define ADDRS_PER_BLOCK(inode)	addrs_per_block(inode)
 #define NIDS_PER_BLOCK		1018	/* Node IDs in an Indirect Block */
 
 #define ADDRS_PER_PAGE(page, inode)	\
-	(IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
+	(IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
 
 #define	NODE_DIR1_BLOCK		(DEF_ADDRS_PER_INODE + 1)
 #define	NODE_DIR2_BLOCK		(DEF_ADDRS_PER_INODE + 2)
@@ -267,7 +272,7 @@ struct f2fs_inode {
 } __packed;
 
 struct direct_node {
-	__le32 addr[ADDRS_PER_BLOCK];	/* array of data block address */
+	__le32 addr[DEF_ADDRS_PER_BLOCK];	/* array of data block address */
 } __packed;
 
 struct indirect_node {
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 0376108..d52a748 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -684,6 +684,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
 {
 	set_memory_ro((unsigned long)hdr, hdr->pages);
+	set_memory_x((unsigned long)hdr, hdr->pages);
 }
 
 static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
@@ -836,6 +837,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
 extern int bpf_jit_enable;
 extern int bpf_jit_harden;
 extern int bpf_jit_kallsyms;
+extern int bpf_jit_limit;
 
 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
 
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 79eed97..c3eee55 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -153,6 +153,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 #define FMODE_OPENED		((__force fmode_t)0x80000)
 #define FMODE_CREATED		((__force fmode_t)0x100000)
 
+/* File is stream-like */
+#define FMODE_STREAM		((__force fmode_t)0x200000)
+
 /* File was opened by fanotify and shouldn't generate fanotify events */
 #define FMODE_NONOTIFY		((__force fmode_t)0x4000000)
 
@@ -304,13 +307,19 @@ enum rw_hint {
 
 struct kiocb {
 	struct file		*ki_filp;
+
+	/* The 'ki_filp' pointer is shared in a union for aio */
+	randomized_struct_fields_start
+
 	loff_t			ki_pos;
 	void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
 	void			*private;
 	int			ki_flags;
 	u16			ki_hint;
 	u16			ki_ioprio; /* See linux/ioprio.h */
-} __randomize_layout;
+
+	randomized_struct_fields_end
+};
 
 static inline bool is_sync_kiocb(struct kiocb *kiocb)
 {
@@ -679,7 +688,7 @@ struct inode {
 	struct fsnotify_mark_connector __rcu	*i_fsnotify_marks;
 #endif
 
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+#ifdef CONFIG_FS_ENCRYPTION
 	struct fscrypt_info	*i_crypt_info;
 #endif
 
@@ -1379,7 +1388,7 @@ struct super_block {
 	void                    *s_security;
 #endif
 	const struct xattr_handler **s_xattr;
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+#ifdef CONFIG_FS_ENCRYPTION
 	const struct fscrypt_operations	*s_cop;
 #endif
 	struct hlist_bl_head	s_roots;	/* alternate root dentries for NFS */
@@ -3038,6 +3047,7 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
 extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
 extern int generic_file_open(struct inode * inode, struct file * filp);
 extern int nonseekable_open(struct inode * inode, struct file * filp);
+extern int stream_open(struct inode * inode, struct file * filp);
 
 #ifdef CONFIG_BLOCK
 typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 3468d00..cf8838c 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -2,9 +2,8 @@
 /*
  * fscrypt.h: declarations for per-file encryption
  *
- * Filesystems that implement per-file encryption include this header
- * file with the __FS_HAS_ENCRYPTION set according to whether that filesystem
- * is being built with encryption support or not.
+ * Filesystems that implement per-file encryption must include this header
+ * file.
  *
  * Copyright (C) 2015, Google, Inc.
  *
@@ -15,6 +14,8 @@
 #define _LINUX_FSCRYPT_H
 
 #include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
 
 #define FS_CRYPTO_BLOCK_SIZE		16
 
@@ -47,11 +48,411 @@ struct fscrypt_name {
 /* Maximum value for the third parameter of fscrypt_operations.set_context(). */
 #define FSCRYPT_SET_CONTEXT_MAX_SIZE	28
 
-#if __FS_HAS_ENCRYPTION
-#include <linux/fscrypt_supp.h>
-#else
-#include <linux/fscrypt_notsupp.h>
-#endif
+#ifdef CONFIG_FS_ENCRYPTION
+/*
+ * fscrypt superblock flags
+ */
+#define FS_CFLG_OWN_PAGES (1U << 1)
+
+/*
+ * crypto operations for filesystems
+ */
+struct fscrypt_operations {
+	unsigned int flags;
+	const char *key_prefix;
+	int (*get_context)(struct inode *, void *, size_t);
+	int (*set_context)(struct inode *, const void *, size_t, void *);
+	bool (*dummy_context)(struct inode *);
+	bool (*empty_dir)(struct inode *);
+	unsigned int max_namelen;
+	bool (*is_encrypted)(struct inode *inode);
+};
+
+struct fscrypt_ctx {
+	union {
+		struct {
+			struct page *bounce_page;	/* Ciphertext page */
+			struct page *control_page;	/* Original page  */
+		} w;
+		struct {
+			struct bio *bio;
+			struct work_struct work;
+		} r;
+		struct list_head free_list;	/* Free list */
+	};
+	u8 flags;				/* Flags */
+};
+
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+	return (inode->i_crypt_info != NULL);
+}
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+	return inode->i_sb->s_cop->dummy_context &&
+		inode->i_sb->s_cop->dummy_context(inode);
+}
+
+/* crypto.c */
+extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
+extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
+extern void fscrypt_release_ctx(struct fscrypt_ctx *);
+extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
+						unsigned int, unsigned int,
+						u64, gfp_t);
+extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
+				unsigned int, u64);
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+	return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+}
+
+extern void fscrypt_restore_control_page(struct page *);
+
+/* policy.c */
+extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
+extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
+extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
+extern int fscrypt_inherit_context(struct inode *, struct inode *,
+					void *, bool);
+/* keyinfo.c */
+extern int fscrypt_get_encryption_info(struct inode *);
+extern void fscrypt_put_encryption_info(struct inode *);
+
+/* fname.c */
+extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
+				int lookup, struct fscrypt_name *);
+
+static inline void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+	kfree(fname->crypto_buf.name);
+}
+
+extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
+				struct fscrypt_str *);
+extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
+extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
+			const struct fscrypt_str *, struct fscrypt_str *);
+
+#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE	32
+
+/* Extracts the second-to-last ciphertext block; see explanation below */
+#define FSCRYPT_FNAME_DIGEST(name, len)	\
+	((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
+			     FS_CRYPTO_BLOCK_SIZE))
+
+#define FSCRYPT_FNAME_DIGEST_SIZE	FS_CRYPTO_BLOCK_SIZE
+
+/**
+ * fscrypt_digested_name - alternate identifier for an on-disk filename
+ *
+ * When userspace lists an encrypted directory without access to the key,
+ * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
+ * bytes are shown in this abbreviated form (base64-encoded) rather than as the
+ * full ciphertext (base64-encoded).  This is necessary to allow supporting
+ * filenames up to NAME_MAX bytes, since base64 encoding expands the length.
+ *
+ * To make it possible for filesystems to still find the correct directory entry
+ * despite not knowing the full on-disk name, we encode any filesystem-specific
+ * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
+ * followed by the second-to-last ciphertext block of the filename.  Due to the
+ * use of the CBC-CTS encryption mode, the second-to-last ciphertext block
+ * depends on the full plaintext.  (Note that ciphertext stealing causes the
+ * last two blocks to appear "flipped".)  This makes accidental collisions very
+ * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
+ * share the same filesystem-specific hashes.
+ *
+ * However, this scheme isn't immune to intentional collisions, which can be
+ * created by anyone able to create arbitrary plaintext filenames and view them
+ * without the key.  Making the "digest" be a real cryptographic hash like
+ * SHA-256 over the full ciphertext would prevent this, although it would be
+ * less efficient and harder to implement, especially since the filesystem would
+ * need to calculate it for each directory entry examined during a search.
+ */
+struct fscrypt_digested_name {
+	u32 hash;
+	u32 minor_hash;
+	u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
+};
+
+/**
+ * fscrypt_match_name() - test whether the given name matches a directory entry
+ * @fname: the name being searched for
+ * @de_name: the name from the directory entry
+ * @de_name_len: the length of @de_name in bytes
+ *
+ * Normally @fname->disk_name will be set, and in that case we simply compare
+ * that to the name stored in the directory entry.  The only exception is that
+ * if we don't have the key for an encrypted directory and a filename in it is
+ * very long, then we won't have the full disk_name and we'll instead need to
+ * match against the fscrypt_digested_name.
+ *
+ * Return: %true if the name matches, otherwise %false.
+ */
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+				      const u8 *de_name, u32 de_name_len)
+{
+	if (unlikely(!fname->disk_name.name)) {
+		const struct fscrypt_digested_name *n =
+			(const void *)fname->crypto_buf.name;
+		if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
+			return false;
+		if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
+			return false;
+		return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
+			       n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
+	}
+
+	if (de_name_len != fname->disk_name.len)
+		return false;
+	return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
+/* bio.c */
+extern void fscrypt_decrypt_bio(struct bio *);
+extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
+					struct bio *bio);
+extern void fscrypt_pullback_bio_page(struct page **, bool);
+extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
+				 unsigned int);
+
+/* hooks.c */
+extern int fscrypt_file_open(struct inode *inode, struct file *filp);
+extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
+extern int __fscrypt_prepare_rename(struct inode *old_dir,
+				    struct dentry *old_dentry,
+				    struct inode *new_dir,
+				    struct dentry *new_dentry,
+				    unsigned int flags);
+extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
+extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
+				     unsigned int max_len,
+				     struct fscrypt_str *disk_link);
+extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
+				     unsigned int len,
+				     struct fscrypt_str *disk_link);
+extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
+				       unsigned int max_size,
+				       struct delayed_call *done);
+#else  /* !CONFIG_FS_ENCRYPTION */
+
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+	return false;
+}
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+	return false;
+}
+
+/* crypto.c */
+static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
+{
+}
+
+static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
+						  gfp_t gfp_flags)
+{
+	return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
+{
+	return;
+}
+
+static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
+						struct page *page,
+						unsigned int len,
+						unsigned int offs,
+						u64 lblk_num, gfp_t gfp_flags)
+{
+	return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_decrypt_page(const struct inode *inode,
+				       struct page *page,
+				       unsigned int len, unsigned int offs,
+				       u64 lblk_num)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+	WARN_ON_ONCE(1);
+	return ERR_PTR(-EINVAL);
+}
+
+static inline void fscrypt_restore_control_page(struct page *page)
+{
+	return;
+}
+
+/* policy.c */
+static inline int fscrypt_ioctl_set_policy(struct file *filp,
+					   const void __user *arg)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_has_permitted_context(struct inode *parent,
+						struct inode *child)
+{
+	return 0;
+}
+
+static inline int fscrypt_inherit_context(struct inode *parent,
+					  struct inode *child,
+					  void *fs_data, bool preload)
+{
+	return -EOPNOTSUPP;
+}
+
+/* keyinfo.c */
+static inline int fscrypt_get_encryption_info(struct inode *inode)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_put_encryption_info(struct inode *inode)
+{
+	return;
+}
+
+ /* fname.c */
+static inline int fscrypt_setup_filename(struct inode *dir,
+					 const struct qstr *iname,
+					 int lookup, struct fscrypt_name *fname)
+{
+	if (IS_ENCRYPTED(dir))
+		return -EOPNOTSUPP;
+
+	memset(fname, 0, sizeof(struct fscrypt_name));
+	fname->usr_fname = iname;
+	fname->disk_name.name = (unsigned char *)iname->name;
+	fname->disk_name.len = iname->len;
+	return 0;
+}
+
+static inline void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+	return;
+}
+
+static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
+					     u32 max_encrypted_len,
+					     struct fscrypt_str *crypto_str)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
+{
+	return;
+}
+
+static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
+					    u32 hash, u32 minor_hash,
+					    const struct fscrypt_str *iname,
+					    struct fscrypt_str *oname)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+				      const u8 *de_name, u32 de_name_len)
+{
+	/* Encryption support disabled; use standard comparison */
+	if (de_name_len != fname->disk_name.len)
+		return false;
+	return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
+/* bio.c */
+static inline void fscrypt_decrypt_bio(struct bio *bio)
+{
+}
+
+static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
+					       struct bio *bio)
+{
+}
+
+static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
+{
+	return;
+}
+
+static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
+					sector_t pblk, unsigned int len)
+{
+	return -EOPNOTSUPP;
+}
+
+/* hooks.c */
+
+static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
+{
+	if (IS_ENCRYPTED(inode))
+		return -EOPNOTSUPP;
+	return 0;
+}
+
+static inline int __fscrypt_prepare_link(struct inode *inode,
+					 struct inode *dir)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_rename(struct inode *old_dir,
+					   struct dentry *old_dentry,
+					   struct inode *new_dir,
+					   struct dentry *new_dentry,
+					   unsigned int flags)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_lookup(struct inode *dir,
+					   struct dentry *dentry)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_symlink(struct inode *dir,
+					    unsigned int len,
+					    unsigned int max_len,
+					    struct fscrypt_str *disk_link)
+{
+	return -EOPNOTSUPP;
+}
+
+
+static inline int __fscrypt_encrypt_symlink(struct inode *inode,
+					    const char *target,
+					    unsigned int len,
+					    struct fscrypt_str *disk_link)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline const char *fscrypt_get_symlink(struct inode *inode,
+					      const void *caddr,
+					      unsigned int max_size,
+					      struct delayed_call *done)
+{
+	return ERR_PTR(-EOPNOTSUPP);
+}
+#endif	/* !CONFIG_FS_ENCRYPTION */
 
 /**
  * fscrypt_require_key - require an inode's encryption key
@@ -94,7 +495,7 @@ static inline int fscrypt_require_key(struct inode *inode)
  * in an encrypted directory tree use the same encryption policy.
  *
  * Return: 0 on success, -ENOKEY if the directory's encryption key is missing,
- * -EPERM if the link would result in an inconsistent encryption policy, or
+ * -EXDEV if the link would result in an inconsistent encryption policy, or
  * another -errno code.
  */
 static inline int fscrypt_prepare_link(struct dentry *old_dentry,
@@ -124,7 +525,7 @@ static inline int fscrypt_prepare_link(struct dentry *old_dentry,
  * We also verify that the rename will not violate the constraint that all files
  * in an encrypted directory tree use the same encryption policy.
  *
- * Return: 0 on success, -ENOKEY if an encryption key is missing, -EPERM if the
+ * Return: 0 on success, -ENOKEY if an encryption key is missing, -EXDEV if the
  * rename would cause inconsistent encryption policies, or another -errno code.
  */
 static inline int fscrypt_prepare_rename(struct inode *old_dir,
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
deleted file mode 100644
index ee8b43e..0000000
--- a/include/linux/fscrypt_notsupp.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * fscrypt_notsupp.h
- *
- * This stubs out the fscrypt functions for filesystems configured without
- * encryption support.
- *
- * Do not include this file directly. Use fscrypt.h instead!
- */
-#ifndef _LINUX_FSCRYPT_H
-#error "Incorrect include of linux/fscrypt_notsupp.h!"
-#endif
-
-#ifndef _LINUX_FSCRYPT_NOTSUPP_H
-#define _LINUX_FSCRYPT_NOTSUPP_H
-
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
-{
-	return false;
-}
-
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
-{
-	return false;
-}
-
-/* crypto.c */
-static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
-{
-}
-
-static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
-						  gfp_t gfp_flags)
-{
-	return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
-{
-	return;
-}
-
-static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
-						struct page *page,
-						unsigned int len,
-						unsigned int offs,
-						u64 lblk_num, gfp_t gfp_flags)
-{
-	return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline int fscrypt_decrypt_page(const struct inode *inode,
-				       struct page *page,
-				       unsigned int len, unsigned int offs,
-				       u64 lblk_num)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
-	WARN_ON_ONCE(1);
-	return ERR_PTR(-EINVAL);
-}
-
-static inline void fscrypt_restore_control_page(struct page *page)
-{
-	return;
-}
-
-/* policy.c */
-static inline int fscrypt_ioctl_set_policy(struct file *filp,
-					   const void __user *arg)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_has_permitted_context(struct inode *parent,
-						struct inode *child)
-{
-	return 0;
-}
-
-static inline int fscrypt_inherit_context(struct inode *parent,
-					  struct inode *child,
-					  void *fs_data, bool preload)
-{
-	return -EOPNOTSUPP;
-}
-
-/* keyinfo.c */
-static inline int fscrypt_get_encryption_info(struct inode *inode)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline void fscrypt_put_encryption_info(struct inode *inode)
-{
-	return;
-}
-
- /* fname.c */
-static inline int fscrypt_setup_filename(struct inode *dir,
-					 const struct qstr *iname,
-					 int lookup, struct fscrypt_name *fname)
-{
-	if (IS_ENCRYPTED(dir))
-		return -EOPNOTSUPP;
-
-	memset(fname, 0, sizeof(struct fscrypt_name));
-	fname->usr_fname = iname;
-	fname->disk_name.name = (unsigned char *)iname->name;
-	fname->disk_name.len = iname->len;
-	return 0;
-}
-
-static inline void fscrypt_free_filename(struct fscrypt_name *fname)
-{
-	return;
-}
-
-static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
-					     u32 max_encrypted_len,
-					     struct fscrypt_str *crypto_str)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
-{
-	return;
-}
-
-static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
-					    u32 hash, u32 minor_hash,
-					    const struct fscrypt_str *iname,
-					    struct fscrypt_str *oname)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
-				      const u8 *de_name, u32 de_name_len)
-{
-	/* Encryption support disabled; use standard comparison */
-	if (de_name_len != fname->disk_name.len)
-		return false;
-	return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
-}
-
-/* bio.c */
-static inline void fscrypt_decrypt_bio(struct bio *bio)
-{
-}
-
-static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
-					       struct bio *bio)
-{
-}
-
-static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
-{
-	return;
-}
-
-static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
-					sector_t pblk, unsigned int len)
-{
-	return -EOPNOTSUPP;
-}
-
-/* hooks.c */
-
-static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
-{
-	if (IS_ENCRYPTED(inode))
-		return -EOPNOTSUPP;
-	return 0;
-}
-
-static inline int __fscrypt_prepare_link(struct inode *inode,
-					 struct inode *dir)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int __fscrypt_prepare_rename(struct inode *old_dir,
-					   struct dentry *old_dentry,
-					   struct inode *new_dir,
-					   struct dentry *new_dentry,
-					   unsigned int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int __fscrypt_prepare_lookup(struct inode *dir,
-					   struct dentry *dentry)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int __fscrypt_prepare_symlink(struct inode *dir,
-					    unsigned int len,
-					    unsigned int max_len,
-					    struct fscrypt_str *disk_link)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int __fscrypt_encrypt_symlink(struct inode *inode,
-					    const char *target,
-					    unsigned int len,
-					    struct fscrypt_str *disk_link)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline const char *fscrypt_get_symlink(struct inode *inode,
-					      const void *caddr,
-					      unsigned int max_size,
-					      struct delayed_call *done)
-{
-	return ERR_PTR(-EOPNOTSUPP);
-}
-
-#endif	/* _LINUX_FSCRYPT_NOTSUPP_H */
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
deleted file mode 100644
index 070d58e..0000000
--- a/include/linux/fscrypt_supp.h
+++ /dev/null
@@ -1,205 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * fscrypt_supp.h
- *
- * Do not include this file directly. Use fscrypt.h instead!
- */
-#ifndef _LINUX_FSCRYPT_H
-#error "Incorrect include of linux/fscrypt_supp.h!"
-#endif
-
-#ifndef _LINUX_FSCRYPT_SUPP_H
-#define _LINUX_FSCRYPT_SUPP_H
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-
-/*
- * fscrypt superblock flags
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
-/*
- * crypto operations for filesystems
- */
-struct fscrypt_operations {
-	unsigned int flags;
-	const char *key_prefix;
-	int (*get_context)(struct inode *, void *, size_t);
-	int (*set_context)(struct inode *, const void *, size_t, void *);
-	bool (*dummy_context)(struct inode *);
-	bool (*empty_dir)(struct inode *);
-	unsigned int max_namelen;
-	bool (*is_encrypted)(struct inode *inode);
-};
-
-struct fscrypt_ctx {
-	union {
-		struct {
-			struct page *bounce_page;	/* Ciphertext page */
-			struct page *control_page;	/* Original page  */
-		} w;
-		struct {
-			struct bio *bio;
-			struct work_struct work;
-		} r;
-		struct list_head free_list;	/* Free list */
-	};
-	u8 flags;				/* Flags */
-};
-
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
-{
-	return (inode->i_crypt_info != NULL);
-}
-
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
-{
-	return inode->i_sb->s_cop->dummy_context &&
-		inode->i_sb->s_cop->dummy_context(inode);
-}
-
-/* crypto.c */
-extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
-extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
-extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
-						unsigned int, unsigned int,
-						u64, gfp_t);
-extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
-				unsigned int, u64);
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
-	return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
-}
-
-extern void fscrypt_restore_control_page(struct page *);
-
-/* policy.c */
-extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
-extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
-extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
-extern int fscrypt_inherit_context(struct inode *, struct inode *,
-					void *, bool);
-/* keyinfo.c */
-extern int fscrypt_get_encryption_info(struct inode *);
-extern void fscrypt_put_encryption_info(struct inode *);
-
-/* fname.c */
-extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
-				int lookup, struct fscrypt_name *);
-
-static inline void fscrypt_free_filename(struct fscrypt_name *fname)
-{
-	kfree(fname->crypto_buf.name);
-}
-
-extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
-				struct fscrypt_str *);
-extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
-extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
-			const struct fscrypt_str *, struct fscrypt_str *);
-
-#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE	32
-
-/* Extracts the second-to-last ciphertext block; see explanation below */
-#define FSCRYPT_FNAME_DIGEST(name, len)	\
-	((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
-			     FS_CRYPTO_BLOCK_SIZE))
-
-#define FSCRYPT_FNAME_DIGEST_SIZE	FS_CRYPTO_BLOCK_SIZE
-
-/**
- * fscrypt_digested_name - alternate identifier for an on-disk filename
- *
- * When userspace lists an encrypted directory without access to the key,
- * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
- * bytes are shown in this abbreviated form (base64-encoded) rather than as the
- * full ciphertext (base64-encoded).  This is necessary to allow supporting
- * filenames up to NAME_MAX bytes, since base64 encoding expands the length.
- *
- * To make it possible for filesystems to still find the correct directory entry
- * despite not knowing the full on-disk name, we encode any filesystem-specific
- * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
- * followed by the second-to-last ciphertext block of the filename.  Due to the
- * use of the CBC-CTS encryption mode, the second-to-last ciphertext block
- * depends on the full plaintext.  (Note that ciphertext stealing causes the
- * last two blocks to appear "flipped".)  This makes accidental collisions very
- * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
- * share the same filesystem-specific hashes.
- *
- * However, this scheme isn't immune to intentional collisions, which can be
- * created by anyone able to create arbitrary plaintext filenames and view them
- * without the key.  Making the "digest" be a real cryptographic hash like
- * SHA-256 over the full ciphertext would prevent this, although it would be
- * less efficient and harder to implement, especially since the filesystem would
- * need to calculate it for each directory entry examined during a search.
- */
-struct fscrypt_digested_name {
-	u32 hash;
-	u32 minor_hash;
-	u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
-};
-
-/**
- * fscrypt_match_name() - test whether the given name matches a directory entry
- * @fname: the name being searched for
- * @de_name: the name from the directory entry
- * @de_name_len: the length of @de_name in bytes
- *
- * Normally @fname->disk_name will be set, and in that case we simply compare
- * that to the name stored in the directory entry.  The only exception is that
- * if we don't have the key for an encrypted directory and a filename in it is
- * very long, then we won't have the full disk_name and we'll instead need to
- * match against the fscrypt_digested_name.
- *
- * Return: %true if the name matches, otherwise %false.
- */
-static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
-				      const u8 *de_name, u32 de_name_len)
-{
-	if (unlikely(!fname->disk_name.name)) {
-		const struct fscrypt_digested_name *n =
-			(const void *)fname->crypto_buf.name;
-		if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
-			return false;
-		if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
-			return false;
-		return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
-			       n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
-	}
-
-	if (de_name_len != fname->disk_name.len)
-		return false;
-	return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
-}
-
-/* bio.c */
-extern void fscrypt_decrypt_bio(struct bio *);
-extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
-					struct bio *bio);
-extern void fscrypt_pullback_bio_page(struct page **, bool);
-extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
-				 unsigned int);
-
-/* hooks.c */
-extern int fscrypt_file_open(struct inode *inode, struct file *filp);
-extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
-extern int __fscrypt_prepare_rename(struct inode *old_dir,
-				    struct dentry *old_dentry,
-				    struct inode *new_dir,
-				    struct dentry *new_dentry,
-				    unsigned int flags);
-extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
-extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
-				     unsigned int max_len,
-				     struct fscrypt_str *disk_link);
-extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
-				     unsigned int len,
-				     struct fscrypt_str *disk_link);
-extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
-				       unsigned int max_size,
-				       struct delayed_call *done);
-
-#endif	/* _LINUX_FSCRYPT_SUPP_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index f767293..f13272d 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -596,6 +596,7 @@ struct unixware_disklabel {
 
 extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
 extern void blk_free_devt(dev_t devt);
+extern void blk_invalidate_devt(dev_t devt);
 extern dev_t blk_lookup_devt(const char *name, int partno);
 extern char *disk_name (struct gendisk *hd, int partno, char *buf);
 
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d44a783..8b3e5e8 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -414,6 +414,7 @@ struct hid_global {
 
 struct hid_local {
 	unsigned usage[HID_MAX_USAGES]; /* usage array */
+	u8 usage_size[HID_MAX_USAGES]; /* usage size array */
 	unsigned collection_index[HID_MAX_USAGES]; /* collection index array */
 	unsigned usage_index;
 	unsigned usage_minimum;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index fdcb459..7722722 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -47,10 +47,8 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 			unsigned long addr, pgprot_t newprot,
 			int prot_numa);
-vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
-			pmd_t *pmd, pfn_t pfn, bool write);
-vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
-			pud_t *pud, pfn_t pfn, bool write);
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
 enum transparent_hugepage_flag {
 	TRANSPARENT_HUGEPAGE_FLAG,
 	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 087fd5f4..d34112f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -123,9 +123,7 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
 void free_huge_page(struct page *page);
 void hugetlb_fix_reserve_counts(struct inode *inode);
 extern struct mutex *hugetlb_fault_mutex_table;
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
-				struct vm_area_struct *vma,
-				struct address_space *mapping,
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
 				pgoff_t idx, unsigned long address);
 
 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 65b4eae..7e74864 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -333,6 +333,7 @@ struct i2c_client {
 	char name[I2C_NAME_SIZE];
 	struct i2c_adapter *adapter;	/* the adapter we sit on	*/
 	struct device dev;		/* the device structure		*/
+	int init_irq;			/* irq set at initialization	*/
 	int irq;			/* irq issued by device		*/
 	struct list_head detected;
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 730ead1..57c122a 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -66,6 +66,7 @@ struct ad_sigma_delta {
 	bool			irq_dis;
 
 	bool			bus_locked;
+	bool			keep_cs_asserted;
 
 	uint8_t			comm;
 
diff --git a/include/linux/init.h b/include/linux/init.h
index 9ef90a3..847e853 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -167,6 +167,20 @@ extern bool initcall_debug;
 
 #ifndef __ASSEMBLY__
 
+#ifdef CONFIG_LTO_CLANG
+  /*
+   * Use __COUNTER__ prefix in the variable to help ensure ordering
+   * inside a compilation unit that defines multiple initcalls, and
+   * __LINE__ to help prevent naming collisions.
+   */
+  #define ___initcall_name2(c, l, fn, id) __initcall_##c##_##l##_##fn##id
+  #define ___initcall_name1(c, l, fn, id) ___initcall_name2(c, l, fn, id)
+  #define __initcall_name(fn, id) \
+		___initcall_name1(__COUNTER__, __LINE__, fn, id)
+#else
+  #define __initcall_name(fn, id) 	__initcall_##fn##id
+#endif
+
 /*
  * initcalls are now grouped by functionality into separate
  * subsections. Ordering inside the subsections is determined
@@ -187,12 +201,12 @@ extern bool initcall_debug;
 #define ___define_initcall(fn, id, __sec)			\
 	__ADDRESSABLE(fn)					\
 	asm(".section	\"" #__sec ".init\", \"a\"	\n"	\
-	"__initcall_" #fn #id ":			\n"	\
+	__stringify(__initcall_name(fn, id)) ":		\n"	\
 	    ".long	" #fn " - .			\n"	\
 	    ".previous					\n");
 #else
 #define ___define_initcall(fn, id, __sec) \
-	static initcall_t __initcall_##fn##id __used \
+	static initcall_t __initcall_name(fn, id) __used \
 		__attribute__((__section__(#__sec ".init"))) = fn;
 #endif
 
diff --git a/include/linux/input/synaptics_dsx.h b/include/linux/input/synaptics_dsx.h
new file mode 100755
index 0000000..fd5ecc0
--- /dev/null
+++ b/include/linux/input/synaptics_dsx.h
@@ -0,0 +1,114 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_DSX_H_
+#define _SYNAPTICS_DSX_H_
+
+#define PLATFORM_DRIVER_NAME "synaptics_dsx"
+#define STYLUS_DRIVER_NAME "synaptics_dsx_stylus"
+#define ACTIVE_PEN_DRIVER_NAME "synaptics_dsx_active_pen"
+#define PROXIMITY_DRIVER_NAME "synaptics_dsx_proximity"
+#define GESTURE_DRIVER_NAME "synaptics_dsx_gesture"
+#define I2C_DRIVER_NAME "synaptics_dsx_i2c"
+#define SPI_DRIVER_NAME "synaptics_dsx_spi"
+
+/*
+ * struct synaptics_dsx_button_map - button map
+ * @nbuttons: number of buttons
+ * @map: pointer to array of button codes
+ */
+struct synaptics_dsx_button_map {
+	unsigned char nbuttons;
+	unsigned int *map;
+};
+
+/*
+ * struct synaptics_dsx_board_data - DSX board data
+ * @x_flip: x flip flag
+ * @y_flip: y flip flag
+ * @swap_axes: swap axes flag
+ * @irq_gpio: attention interrupt GPIO
+ * @irq_on_state: attention interrupt active state
+ * @power_gpio: power switch GPIO
+ * @power_on_state: power switch active state
+ * @reset_gpio: reset GPIO
+ * @reset_on_state: reset active state
+ * @max_y_for_2d: maximum y value for 2D area when virtual buttons are present
+ * @irq_flags: IRQ flags
+ * @i2c_addr: I2C slave address
+ * @ub_i2c_addr: microbootloader mode I2C slave address
+ * @device_descriptor_addr: HID device descriptor address
+ * @panel_x: x-axis resolution of display panel
+ * @panel_y: y-axis resolution of display panel
+ * @power_delay_ms: delay time to wait after powering up device
+ * @reset_delay_ms: delay time to wait after resetting device
+ * @reset_active_ms: reset active time
+ * @byte_delay_us: delay time between two bytes of SPI data
+ * @block_delay_us: delay time between two SPI transfers
+ * @addr_delay_us: delay time after sending address word
+ * @pwr_reg_name: pointer to name of regulator for power control
+ * @bus_reg_name: pointer to name of regulator for bus pullup control
+ * @cap_button_map: pointer to 0D button map
+ * @vir_button_map: pointer to virtual button map
+ */
+struct synaptics_dsx_board_data {
+	bool x_flip;
+	bool y_flip;
+	bool swap_axes;
+	int irq_gpio;
+	int irq_on_state;
+	int power_gpio;
+	int power_on_state;
+	int reset_gpio;
+	int reset_on_state;
+	int max_y_for_2d;
+	unsigned long irq_flags;
+	unsigned short i2c_addr;
+	unsigned short ub_i2c_addr;
+	unsigned short device_descriptor_addr;
+	unsigned int panel_x;
+	unsigned int panel_y;
+	unsigned int power_delay_ms;
+	unsigned int reset_delay_ms;
+	unsigned int reset_active_ms;
+	unsigned int byte_delay_us;
+	unsigned int block_delay_us;
+	unsigned int addr_delay_us;
+	const char *pwr_reg_name;
+	const char *bus_reg_name;
+	struct synaptics_dsx_button_map *cap_button_map;
+	struct synaptics_dsx_button_map *vir_button_map;
+};
+
+#endif
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e34d13c..da56afc2 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -99,6 +99,8 @@ struct iommu_pgtbl_info {
 #define IOMMU_DOMAIN_DMA	(__IOMMU_DOMAIN_PAGING |	\
 				 __IOMMU_DOMAIN_DMA_API)
 
+#define to_msm_iommu_ops(_iommu_ops) \
+	container_of(_iommu_ops, struct msm_iommu_ops, iommu_ops)
 
 #define IOMMU_DOMAIN_NAME_LEN 32
 struct iommu_domain {
@@ -233,8 +235,6 @@ extern struct dentry *iommu_debugfs_top;
  * @of_xlate: add OF master IDs to iommu grouping
  * @pgsize_bitmap: bitmap of all possible supported page sizes
  * @trigger_fault: trigger a fault on the device attached to an iommu domain
- * @reg_read: read an IOMMU register
- * @reg_write: write an IOMMU register
  * @tlbi_domain: Invalidate all TLBs covering an iommu domain
  * @enable_config_clocks: Enable all config clocks for this domain's IOMMU
  * @disable_config_clocks: Disable all config clocks for this domain's IOMMU
@@ -285,10 +285,6 @@ struct iommu_ops {
 	/* Get the number of windows per domain */
 	u32 (*domain_get_windows)(struct iommu_domain *domain);
 	void (*trigger_fault)(struct iommu_domain *domain, unsigned long flags);
-	unsigned long (*reg_read)(struct iommu_domain *domain,
-				  unsigned long offset);
-	void (*reg_write)(struct iommu_domain *domain, unsigned long val,
-			  unsigned long offset);
 	void (*tlbi_domain)(struct iommu_domain *domain);
 	int (*enable_config_clocks)(struct iommu_domain *domain);
 	void (*disable_config_clocks)(struct iommu_domain *domain);
@@ -303,6 +299,34 @@ struct iommu_ops {
 };
 
 /**
+ * struct msm_iommu_ops - standard iommu ops, as well as additional MSM
+ * specific iommu ops
+ * @map_sg: map a scatter-gather list of physically contiguous memory chunks
+ *          to an iommu domain
+ * @iova_to_phys_hard: translate iova to physical address using IOMMU hardware
+ * @is_iova_coherent: checks coherency of the given iova
+ * @trigger_fault: trigger a fault on the device attached to an iommu domain
+ * @tlbi_domain: Invalidate all TLBs covering an iommu domain
+ * @enable_config_clocks: Enable all config clocks for this domain's IOMMU
+ * @disable_config_clocks: Disable all config clocks for this domain's IOMMU
+ * @iova_to_pte: translate iova to Page Table Entry (PTE).
+ * @iommu_ops: the standard iommu ops
+ */
+struct msm_iommu_ops {
+	size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
+			 struct scatterlist *sg, unsigned int nents, int prot);
+	phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
+					 dma_addr_t iova);
+	bool (*is_iova_coherent)(struct iommu_domain *domain, dma_addr_t iova);
+	void (*trigger_fault)(struct iommu_domain *domain, unsigned long flags);
+	void (*tlbi_domain)(struct iommu_domain *domain);
+	int (*enable_config_clocks)(struct iommu_domain *domain);
+	void (*disable_config_clocks)(struct iommu_domain *domain);
+	uint64_t (*iova_to_pte)(struct iommu_domain *domain, dma_addr_t iova);
+	struct iommu_ops iommu_ops;
+};
+
+/**
  * struct iommu_device - IOMMU core representation of one IOMMU hardware
  *			 instance
  * @list: Used by the iommu-core to keep a list of registered iommus
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index c2a83e2..468079a 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -1282,8 +1282,13 @@ int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls);
  */
 int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
 
+int ipa_add_rt_rule_v2(struct ipa_ioc_add_rt_rule_v2 *rules);
+
 int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only);
 
+int ipa_add_rt_rule_usr_v2(struct ipa_ioc_add_rt_rule_v2 *rules,
+	bool user_only);
+
 int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
 
 int ipa_commit_rt(enum ipa_ip_type ip);
@@ -1298,17 +1303,26 @@ int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in);
 
 int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules);
 
+int ipa_mdfy_rt_rule_v2(struct ipa_ioc_mdfy_rt_rule_v2 *rules);
+
 /*
  * Filtering
  */
 int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
 
+int ipa_add_flt_rule_v2(struct ipa_ioc_add_flt_rule_v2 *rules);
+
 int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only);
 
+int ipa_add_flt_rule_usr_v2(struct ipa_ioc_add_flt_rule_v2 *rules,
+	bool user_only);
+
 int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
 
 int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules);
 
+int ipa_mdfy_flt_rule_v2(struct ipa_ioc_mdfy_flt_rule_v2 *rules);
+
 int ipa_commit_flt(enum ipa_ip_type ip);
 
 int ipa_reset_flt(enum ipa_ip_type ip, bool user_only);
@@ -1776,12 +1790,23 @@ static inline int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
 	return -EPERM;
 }
 
+static inline int ipa_add_rt_rule_v2(struct ipa_ioc_add_rt_rule_v2 *rules)
+{
+	return -EPERM;
+}
+
 static inline int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules,
 					bool user_only)
 {
 	return -EPERM;
 }
 
+static inline int ipa_add_rt_rule_usr_v2(
+	struct ipa_ioc_add_rt_rule_v2 *rules, bool user_only)
+{
+	return -EPERM;
+}
+
 static inline int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
 {
 	return -EPERM;
@@ -1817,6 +1842,11 @@ static inline int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules)
 	return -EPERM;
 }
 
+static inline int ipa_mdfy_rt_rule_v2(struct ipa_ioc_mdfy_rt_rule_v2 *rules)
+{
+	return -EPERM;
+}
+
 /*
  * Filtering
  */
@@ -1825,12 +1855,23 @@ static inline int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
 	return -EPERM;
 }
 
+static inline int ipa_add_flt_rule_v2(struct ipa_ioc_add_flt_rule_v2 *rules)
+{
+	return -EPERM;
+}
+
 static inline int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules,
 					bool user_only)
 {
 	return -EPERM;
 }
 
+static inline int ipa_add_flt_rule_usr_v2(
+	struct ipa_ioc_add_flt_rule_v2 *rules, bool user_only)
+{
+	return -EPERM;
+}
+
 static inline int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
 {
 	return -EPERM;
@@ -1841,6 +1882,13 @@ static inline int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules)
 	return -EPERM;
 }
 
+static inline int ipa_mdfy_flt_rule_v2(
+	struct ipa_ioc_mdfy_flt_rule_v2 *rules)
+{
+	return -EPERM;
+}
+
+
 static inline int ipa_commit_flt(enum ipa_ip_type ip)
 {
 	return -EPERM;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index c9bffda..60b5bc9 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -599,6 +599,12 @@ extern int irq_chip_pm_put(struct irq_data *data);
 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
 extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
 extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
+extern int irq_chip_set_parent_state(struct irq_data *data,
+				     enum irqchip_irq_state which,
+				     bool val);
+extern int irq_chip_get_parent_state(struct irq_data *data,
+				     enum irqchip_irq_state which,
+				     bool *state);
 extern void irq_chip_enable_parent(struct irq_data *data);
 extern void irq_chip_disable_parent(struct irq_data *data);
 extern void irq_chip_ack_parent(struct irq_data *data);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index b708e51..583b82b 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1317,7 +1317,7 @@ extern void		__wait_on_journal (journal_t *);
 
 /* Transaction cache support */
 extern void jbd2_journal_destroy_transaction_cache(void);
-extern int  jbd2_journal_init_transaction_cache(void);
+extern int __init jbd2_journal_init_transaction_cache(void);
 extern void jbd2_journal_free_transaction(transaction_t *);
 
 /*
@@ -1445,8 +1445,10 @@ static inline void jbd2_free_inode(struct jbd2_inode *jinode)
 /* Primary revoke support */
 #define JOURNAL_REVOKE_DEFAULT_HASH 256
 extern int	   jbd2_journal_init_revoke(journal_t *, int);
-extern void	   jbd2_journal_destroy_revoke_caches(void);
-extern int	   jbd2_journal_init_revoke_caches(void);
+extern void	   jbd2_journal_destroy_revoke_record_cache(void);
+extern void	   jbd2_journal_destroy_revoke_table_cache(void);
+extern int __init jbd2_journal_init_revoke_record_cache(void);
+extern int __init jbd2_journal_init_revoke_table_cache(void);
 
 extern void	   jbd2_journal_destroy_revoke(journal_t *);
 extern int	   jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d6aac75..3d83ebb 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -73,8 +73,8 @@
 
 #define u64_to_user_ptr(x) (		\
 {					\
-	typecheck(u64, x);		\
-	(void __user *)(uintptr_t)x;	\
+	typecheck(u64, (x));		\
+	(void __user *)(uintptr_t)(x);	\
 }					\
 )
 
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index e909413..32cae0f 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -173,6 +173,7 @@ struct kretprobe_instance {
 	struct kretprobe *rp;
 	kprobe_opcode_t *ret_addr;
 	struct task_struct *task;
+	void *fp;
 	char data[0];
 };
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 23c242a..30efb36 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -28,6 +28,7 @@
 #include <linux/irqbypass.h>
 #include <linux/swait.h>
 #include <linux/refcount.h>
+#include <linux/nospec.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -491,10 +492,10 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
 
 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 {
-	/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
-	 * the caller has read kvm->online_vcpus before (as is the case
-	 * for kvm_for_each_vcpu, for example).
-	 */
+	int num_vcpus = atomic_read(&kvm->online_vcpus);
+	i = array_index_nospec(i, num_vcpus);
+
+	/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu.  */
 	smp_rmb();
 	return kvm->vcpus[i];
 }
@@ -578,6 +579,7 @@ void kvm_put_kvm(struct kvm *kvm);
 
 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
 {
+	as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
 	return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
 			lockdep_is_held(&kvm->slots_lock) ||
 			!refcount_read(&kvm->users_count));
diff --git a/include/linux/list.h b/include/linux/list.h
index de04cc5..7ccd784 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -184,6 +184,17 @@ static inline void list_move_tail(struct list_head *list,
 }
 
 /**
+ * list_is_first -- tests whether @list is the first entry in list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_first(const struct list_head *list,
+					const struct list_head *head)
+{
+	return list->prev == head;
+}
+
+/**
  * list_is_last - tests whether @list is the last entry in list @head
  * @list: the entry to test
  * @head: the head of the list
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 0c658a5..1f8be08 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -140,7 +140,7 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
 
 void __memblock_free_early(phys_addr_t base, phys_addr_t size);
 void __memblock_free_late(phys_addr_t base, phys_addr_t size);
-
+void create_pgtable_mapping(phys_addr_t start, phys_addr_t end);
 /**
  * for_each_mem_range - iterate through memblock areas from type_a and not
  * included in type_b. Or just type_a if type_b is NULL.
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
index 5d42859..844fc29 100644
--- a/include/linux/mfd/da9063/registers.h
+++ b/include/linux/mfd/da9063/registers.h
@@ -215,9 +215,9 @@
 
 /* DA9063 Configuration registers */
 /* OTP */
-#define	DA9063_REG_OPT_COUNT		0x101
-#define	DA9063_REG_OPT_ADDR		0x102
-#define	DA9063_REG_OPT_DATA		0x103
+#define	DA9063_REG_OTP_CONT		0x101
+#define	DA9063_REG_OTP_ADDR		0x102
+#define	DA9063_REG_OTP_DATA		0x103
 
 /* Customer Trim and Configuration */
 #define	DA9063_REG_T_OFFSET		0x104
diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
index ad2a9a8..b4fd5a7 100644
--- a/include/linux/mfd/max77620.h
+++ b/include/linux/mfd/max77620.h
@@ -136,8 +136,8 @@
 #define MAX77620_FPS_PERIOD_MIN_US		40
 #define MAX20024_FPS_PERIOD_MIN_US		20
 
-#define MAX77620_FPS_PERIOD_MAX_US		2560
-#define MAX20024_FPS_PERIOD_MAX_US		5120
+#define MAX20024_FPS_PERIOD_MAX_US		2560
+#define MAX77620_FPS_PERIOD_MAX_US		5120
 
 #define MAX77620_REG_FPS_GPIO1			0x54
 #define MAX77620_REG_FPS_GPIO2			0x55
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 68c9493..93a4778 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -20,6 +20,7 @@ struct mhi_buf_info;
  * @MHI_CB_LPM_ENTER: MHI host entered low power mode
  * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
  * @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
+ * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode ee
  * @MHI_CB_SYS_ERROR: MHI device enter error state (may recover)
  * @MHI_CB_FATAL_ERROR: MHI device entered fatal error
  * @MHI_CB_BW_REQ: Received a bandwidth switch request from device
@@ -30,6 +31,7 @@ enum MHI_CB {
 	MHI_CB_LPM_ENTER,
 	MHI_CB_LPM_EXIT,
 	MHI_CB_EE_RDDM,
+	MHI_CB_EE_MISSION_MODE,
 	MHI_CB_SYS_ERROR,
 	MHI_CB_FATAL_ERROR,
 	MHI_CB_BW_REQ,
@@ -81,15 +83,16 @@ enum mhi_device_type {
  * @MHI_EE_EDL - device in emergency download mode
  */
 enum mhi_ee {
-	MHI_EE_PBL = 0x0,
-	MHI_EE_SBL = 0x1,
-	MHI_EE_AMSS = 0x2,
-	MHI_EE_RDDM = 0x3,
-	MHI_EE_WFW = 0x4,
-	MHI_EE_PTHRU = 0x5,
-	MHI_EE_EDL = 0x6,
+	MHI_EE_PBL,
+	MHI_EE_SBL,
+	MHI_EE_AMSS,
+	MHI_EE_RDDM,
+	MHI_EE_WFW,
+	MHI_EE_PTHRU,
+	MHI_EE_EDL,
 	MHI_EE_MAX_SUPPORTED = MHI_EE_EDL,
 	MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
+	MHI_EE_NOT_SUPPORTED,
 	MHI_EE_MAX,
 };
 
@@ -254,6 +257,7 @@ struct mhi_controller {
 	u32 saved_pm_state; /* saved state during fast suspend */
 	u32 db_access; /* db access only on these states */
 	enum mhi_ee ee;
+	u32 ee_table[MHI_EE_MAX]; /* ee conversion from dev to host */
 	enum mhi_dev_state dev_state;
 	enum mhi_dev_state saved_dev_state;
 	bool wake_set;
@@ -292,6 +296,7 @@ struct mhi_controller {
 			  struct mhi_buf_info *buf);
 	void (*unmap_single)(struct mhi_controller *mhi_cntrl,
 			     struct mhi_buf_info *buf);
+	void (*tsync_log)(struct mhi_controller *mhi_cntrl, u64 remote_time);
 
 	/* channel to control DTR messaging */
 	struct mhi_device *dtr_dev;
@@ -303,6 +308,8 @@ struct mhi_controller {
 	/* supports time sync feature */
 	struct mhi_timesync *mhi_tsync;
 	struct mhi_device *tsync_dev;
+	u64 local_timer_freq;
+	u64 remote_timer_freq;
 
 	/* kernel log level */
 	enum MHI_DEBUG_LEVEL klog_lvl;
@@ -324,6 +331,8 @@ struct mhi_controller {
  * @ul_chan_id: MHI channel id for UL transfer
  * @dl_chan_id: MHI channel id for DL transfer
  * @tiocm: Device current terminal settings
+ * @early_notif: This device needs an early notification in case of error
+ * with external modem.
  * @dev_vote: Keep external device in active state
  * @bus_vote: Keep physical bus (pci, spi) in active state
  * @priv: Driver private data
@@ -340,6 +349,7 @@ struct mhi_device {
 	int ul_event_id;
 	int dl_event_id;
 	u32 tiocm;
+	bool early_notif;
 	const struct mhi_device_id *id;
 	const char *chan_name;
 	struct mhi_controller *mhi_cntrl;
@@ -525,6 +535,22 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
 
 /**
+ * mhi_pause_transfer - Pause the current transfe
+ * Moves both UL and DL channels to STOP state to halt
+ * pending transfers.
+ * @mhi_dev: Device associated with the channels
+ */
+int mhi_pause_transfer(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_resume_transfer - resume current transfer
+ * Moves both UL and DL channels to START state to
+ * resume transfer.
+ * @mhi_dev: Device associated with the channels
+ */
+int mhi_resume_transfer(struct mhi_device *mhi_dev);
+
+/**
  * mhi_get_no_free_descriptors - Get transfer ring length
  * Get # of TD available to queue buffers
  * @mhi_dev: Device associated with the channels
@@ -693,6 +719,14 @@ static inline bool mhi_is_active(struct mhi_device *mhi_dev)
 }
 
 /**
+ * mhi_control_error - MHI controller went into unrecoverable error state.
+ * Will transition MHI into Linkdown state. Do not call from atomic
+ * context.
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_control_error(struct mhi_controller *mhi_cntrl);
+
+/**
  * mhi_debug_reg_dump - dump MHI registers for debug purpose
  * @mhi_cntrl: MHI controller
  */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a068d62..2d6695f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -937,6 +937,10 @@ static inline bool is_device_public_page(const struct page *page)
 }
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
+/* 127: arbitrary random number, small enough to assemble well */
+#define page_ref_zero_or_close_to_overflow(page) \
+	((unsigned int) page_ref_count(page) + 127u <= 127u)
+
 static inline void get_page(struct page *page)
 {
 	page = compound_head(page);
@@ -944,10 +948,19 @@ static inline void get_page(struct page *page)
 	 * Getting a normal page or the head of a compound page
 	 * requires to already have an elevated page->_refcount.
 	 */
-	VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
+	VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
 	page_ref_inc(page);
 }
 
+static inline __must_check bool try_get_page(struct page *page)
+{
+	page = compound_head(page);
+	if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+		return false;
+	page_ref_inc(page);
+	return true;
+}
+
 static inline void put_page(struct page *page)
 {
 	page = compound_head(page);
@@ -2295,6 +2308,7 @@ extern void zone_pcp_reset(struct zone *zone);
 
 /* page_alloc.c */
 extern int min_free_kbytes;
+extern int watermark_boost_factor;
 extern int watermark_scale_factor;
 
 /* nommu.c */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 7ebd1f99..3d35f18 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -399,7 +399,6 @@ struct mmc_card {
 	unsigned int		bouncesz;	/* Bounce buffer size */
 	struct notifier_block   reboot_notify;
 	enum mmc_pon_type	pon_type;
-	u8 cached_ext_csd;
 	struct mmc_bkops_info bkops;
 };
 
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 9367087..51141fc 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -677,8 +677,6 @@ static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
 
 extern int mmc_resume_bus(struct mmc_host *host);
 
-extern int mmc_resume_bus(struct mmc_host *host);
-
 void mmc_detect_change(struct mmc_host *, unsigned long delay);
 void mmc_request_done(struct mmc_host *, struct mmc_request *);
 void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4b93de1..03c4cf7 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -271,9 +271,10 @@ enum zone_watermarks {
 	NR_WMARK
 };
 
-#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
-#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
-#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
+#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
+#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
+#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
+#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
 
 struct per_cpu_pages {
 	int count;		/* number of pages in the list */
@@ -364,7 +365,8 @@ struct zone {
 	/* Read-mostly fields */
 
 	/* zone watermarks, access with *_wmark_pages(zone) macros */
-	unsigned long watermark[NR_WMARK];
+	unsigned long _watermark[NR_WMARK];
+	unsigned long watermark_boost;
 
 	unsigned long nr_reserved_highatomic;
 
@@ -490,6 +492,8 @@ struct zone {
 	unsigned long		compact_cached_free_pfn;
 	/* pfn where async and sync compaction migration scanner should start */
 	unsigned long		compact_cached_migrate_pfn[2];
+	unsigned long		compact_init_migrate_pfn;
+	unsigned long		compact_init_free_pfn;
 #endif
 
 #ifdef CONFIG_COMPACTION
@@ -530,6 +534,12 @@ enum pgdat_flags {
 	PGDAT_RECLAIM_LOCKED,		/* prevents concurrent reclaim */
 };
 
+enum zone_flags {
+	ZONE_BOOSTED_WATERMARK,		/* zone recently boosted watermarks.
+					 * Cleared when kswapd is woken.
+					 */
+};
+
 static inline unsigned long zone_end_pfn(const struct zone *zone)
 {
 	return zone->zone_start_pfn + zone->spanned_pages;
@@ -893,6 +903,8 @@ static inline int is_highmem(struct zone *zone)
 struct ctl_table;
 int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
 					void __user *, size_t *, loff_t *);
+int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
+					void __user *, size_t *, loff_t *);
 int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
 					void __user *, size_t *, loff_t *);
 extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
diff --git a/include/linux/module.h b/include/linux/module.h
index 2755a57..435aee9 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -22,6 +22,7 @@
 #include <linux/error-injection.h>
 #include <linux/cfi.h>
 #include <linux/tracepoint-defs.h>
+#include <linux/cfi.h>
 
 #include <linux/percpu.h>
 #include <asm/module.h>
@@ -688,6 +689,23 @@ static inline bool is_module_text_address(unsigned long addr)
 	return false;
 }
 
+static inline bool within_module_core(unsigned long addr,
+				      const struct module *mod)
+{
+	return false;
+}
+
+static inline bool within_module_init(unsigned long addr,
+				      const struct module *mod)
+{
+	return false;
+}
+
+static inline bool within_module(unsigned long addr, const struct module *mod)
+{
+	return false;
+}
+
 /* Get/put a kernel symbol (calls should be symmetric) */
 #define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); })
 #define symbol_put(x) do { } while (0)
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 1ff21c1..6645650 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -87,6 +87,8 @@ extern bool mnt_may_suid(struct vfsmount *mnt);
 
 struct path;
 extern struct vfsmount *clone_private_mount(const struct path *path);
+extern int __mnt_want_write(struct vfsmount *);
+extern void __mnt_drop_write(struct vfsmount *);
 
 struct file_system_type;
 extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
diff --git a/include/linux/msm_adreno_devfreq.h b/include/linux/msm_adreno_devfreq.h
index 0552599..2b6ea00 100644
--- a/include/linux/msm_adreno_devfreq.h
+++ b/include/linux/msm_adreno_devfreq.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef MSM_ADRENO_DEVFREQ_H
@@ -52,8 +52,8 @@ struct devfreq_msm_adreno_tz_data {
 		u32 width;
 		u32 *up;
 		u32 *down;
-		u32 *p_up;
-		u32 *p_down;
+		s32 *p_up;
+		s32 *p_down;
 		unsigned int *index;
 		uint64_t *ib;
 	} bus;
@@ -82,9 +82,9 @@ struct msm_busmon_extended_profile {
 	struct devfreq_dev_profile profile;
 };
 
-#ifdef CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON
-int devfreq_vbif_update_bw(unsigned long ib, unsigned long ab);
-int devfreq_vbif_register_callback(void *callback);
-#endif
+typedef void(*getbw_func)(unsigned long *, unsigned long *, void *);
+
+int devfreq_vbif_update_bw(void);
+void devfreq_vbif_register_callback(getbw_func func, void *data);
 
 #endif
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index 31948a0..da30c1e 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -97,7 +97,7 @@ enum gsi_intr_type {
  * @rel_clk_cb: callback to release peripheral clock
  * @user_data:  cookie used for notifications
  * @clk_status_cb: callback to update the current msm bus clock vote
- *
+ * @skip_ieob_mask_wa: flag for skipping ieob_mask_wa
  * All the callbacks are in interrupt context
  *
  */
@@ -120,6 +120,7 @@ struct gsi_per_props {
 	int (*rel_clk_cb)(void *user_data);
 	void *user_data;
 	int (*clk_status_cb)(void);
+	bool skip_ieob_mask_wa;
 };
 
 enum gsi_evt_err {
@@ -703,6 +704,74 @@ struct __packed gsi_wdi_channel_scratch {
 };
 
 /**
+ * gsi_wdi2_channel_scratch_lito - WDI protocol SW config area of
+ * channel scratch
+ *
+ * @wifi_rx_ri_addr_low: Low 32 bits of Transfer ring Read Index address.
+ * @wifi_rx_ri_addr_high: High 32 bits of Transfer ring Read Index address.
+ * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
+ *                                  N is the number of packets that IPA will
+ *                                  process before Wifi transfer ring Ri will
+ *                                  be updated.
+ * @qmap_id: Rx only, used for setting metadata register in IPA. Read only field
+ *           for MCS. Write for SW.
+ * @endp_metadatareg_offset: Rx only, the offset of IPA_ENDP_INIT_HDR_METADATA
+ *                           of the corresponding endpoint in 4B words from IPA
+ *                           base address. Read only field for MCS.
+ *                           Write for SW.
+ * @wdi_rx_vdev_id: Rx only. Initialized to 0xFF by SW after allocating channel
+ *                  and before starting it. Both FW_DESC and VDEV_ID are part
+ *                  of a scratch word that is Read/Write for both MCS and SW.
+ *                  To avoid race conditions, SW should not update this field
+ *                  after starting the channel.
+ * @wdi_rx_fw_desc: Rx only. Initialized to 0xFF by SW after allocating channel
+ *                  and before starting it. After Start, this is a Read only
+ *                  field for SW.
+ * @update_ri_moderation_counter: This field is incremented with each TRE
+ *                                processed in MCS.
+ * @wdi_rx_tre_proc_in_progress: It is set if IPA IF returned BECAME FULL
+ *                               status after MCS submitted an inline immediate
+ *                               command to update the metadata. It allows MCS
+ *                               to know that it has to retry sending the TRE
+ *                               to IPA.
+ * @outstanding_tlvs_counter: It is the count of outstanding TLVs submitted to
+ *                           IPA by MCS and waiting for AOS completion from IPA.
+ * @wdi_rx_pkt_length: If WDI_RX_TRE_PROC_IN_PROGRESS is set, this field is
+ *                     valid and contains the packet length of the TRE that
+ *                     needs to be submitted to IPA.
+ * @resv1: reserved bits.
+ * @pkt_comp_count: It is incremented on each AOS received. When event ring
+ *                  Write index is updated, it is decremented by the same
+ *                  amount.
+ * @stop_in_progress_stm: If a Stop request is in progress, this will indicate
+ *                        the current stage of processing of the stop within MCS
+ * @resv2: reserved bits.
+ * wdi_rx_qmap_id_internal: Initialized to 0 by MCS when the channel is
+ *                          allocated. It is updated to the current value of SW
+ *                          QMAP ID that is being written by MCS to the IPA
+ *                          metadata register.
+ */
+struct __packed gsi_wdi2_channel_scratch_new {
+	uint32_t wifi_rx_ri_addr_low;
+	uint32_t wifi_rx_ri_addr_high;
+	uint32_t update_ri_moderation_threshold:5;
+	uint32_t qmap_id:8;
+	uint32_t resv1:3;
+	uint32_t endp_metadatareg_offset:16;
+	uint32_t wdi_rx_vdev_id:8;
+	uint32_t wdi_rx_fw_desc:8;
+	uint32_t update_ri_moderation_counter:6;
+	uint32_t wdi_rx_tre_proc_in_progress:1;
+	uint32_t resv4:1;
+	uint32_t outstanding_tlvs_counter:8;
+	uint32_t wdi_rx_pkt_length:16;
+	uint32_t resv2:2;
+	uint32_t pkt_comp_count:11;
+	uint32_t stop_in_progress_stm:3;
+	uint32_t resv3:16;
+	uint32_t wdi_rx_qmap_id_internal:16;
+};
+/**
 * gsi_mhip_channel_scratch - MHI PRIME protocol SW config area of
 * channel scratch
 * @assert_bit_40: Valid only for non-host channels.
@@ -807,6 +876,7 @@ union __packed gsi_channel_scratch {
 	struct __packed gsi_11ad_tx_channel_scratch tx_11ad;
 	struct __packed gsi_wdi3_channel_scratch wdi3;
 	struct __packed gsi_mhip_channel_scratch mhip;
+	struct __packed gsi_wdi2_channel_scratch_new wdi2_new;
 	struct __packed {
 		uint32_t word1;
 		uint32_t word2;
@@ -838,6 +908,30 @@ union __packed gsi_wdi_channel_scratch3_reg {
 };
 
 /**
+ * gsi_wdi2_channel_scratch2 - WDI protocol SW config area of
+ * channel scratch2
+ */
+
+struct __packed gsi_wdi2_channel_scratch2 {
+	uint32_t update_ri_moderation_threshold:5;
+	uint32_t qmap_id:8;
+	uint32_t resv1:3;
+	uint32_t endp_metadatareg_offset:16;
+};
+
+/**
+ * gsi_wdi_channel_scratch2_reg - channel scratch2 SW config area
+ *
+ */
+
+union __packed gsi_wdi2_channel_scratch2_reg {
+	struct __packed gsi_wdi2_channel_scratch2 wdi;
+	struct __packed {
+		uint32_t word1;
+	} data;
+};
+
+/**
  * gsi_mhi_evt_scratch - MHI protocol SW config area of
  * event scratch
  */
@@ -1195,6 +1289,19 @@ int gsi_write_channel_scratch3_reg(unsigned long chan_hdl,
 		union __packed gsi_wdi_channel_scratch3_reg val);
 
 /**
+ * gsi_write_channel_scratch2_reg - Peripheral should call this function to
+ * write to the scratch2 reg area of the channel context
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ * @val:       Value to write
+ *
+ * @Return gsi_status
+ */
+int gsi_write_channel_scratch2_reg(unsigned long chan_hdl,
+		union __packed gsi_wdi2_channel_scratch2_reg val);
+
+/**
  * gsi_read_channel_scratch - Peripheral should call this function to
  * read to the scratch area of the channel context
  *
@@ -1655,6 +1762,12 @@ static inline int gsi_write_channel_scratch3_reg(unsigned long chan_hdl,
 	return -GSI_STATUS_UNSUPPORTED_OP;
 }
 
+static inline int gsi_write_channel_scratch2_reg(unsigned long chan_hdl,
+		union __packed gsi_wdi2_channel_scratch2_reg val)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
 static inline int gsi_read_channel_scratch(unsigned long chan_hdl,
 		union __packed gsi_channel_scratch *val)
 {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c765496..14678fd 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1456,6 +1456,7 @@ struct net_device_ops {
  * @IFF_FAILOVER: device is a failover master device
  * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
  * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
+ * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
  */
 enum netdev_priv_flags {
 	IFF_802_1Q_VLAN			= 1<<0,
@@ -1488,6 +1489,7 @@ enum netdev_priv_flags {
 	IFF_FAILOVER			= 1<<27,
 	IFF_FAILOVER_SLAVE		= 1<<28,
 	IFF_L3MDEV_RX_HANDLER		= 1<<29,
+	IFF_LIVE_RENAME_OK		= 1<<30,
 };
 
 #define IFF_802_1Q_VLAN			IFF_802_1Q_VLAN
@@ -1519,6 +1521,7 @@ enum netdev_priv_flags {
 #define IFF_FAILOVER			IFF_FAILOVER
 #define IFF_FAILOVER_SLAVE		IFF_FAILOVER_SLAVE
 #define IFF_L3MDEV_RX_HANDLER		IFF_L3MDEV_RX_HANDLER
+#define IFF_LIVE_RENAME_OK		IFF_LIVE_RENAME_OK
 
 /**
  *	struct net_device - The DEVICE structure.
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 4e85447..0389fe0 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -55,6 +55,8 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
 int nvmem_device_cell_write(struct nvmem_device *nvmem,
 			    struct nvmem_cell_info *info, void *buf);
 
+const char *nvmem_dev_name(struct nvmem_device *nvmem);
+
 #else
 
 static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
@@ -143,6 +145,12 @@ static inline int nvmem_device_write(struct nvmem_device *nvmem,
 {
 	return -ENOSYS;
 }
+
+static inline const char *nvmem_dev_name(struct nvmem_device *nvmem)
+{
+	return NULL;
+}
+
 #endif /* CONFIG_NVMEM */
 
 #if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 24def6a..c4b8430 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -67,6 +67,25 @@ struct nvmem_config {
 	struct device		*base_dev;
 };
 
+/**
+ * struct nvmem_cell_table - NVMEM cell definitions for given provider
+ *
+ * @nvmem_name:		Provider name.
+ * @cells:		Array of cell definitions.
+ * @ncells:		Number of cell definitions in the array.
+ * @node:		List node.
+ *
+ * This structure together with related helper functions is provided for users
+ * that don't can't access the nvmem provided structure but wish to register
+ * cell definitions for it e.g. board files registering an EEPROM device.
+ */
+struct nvmem_cell_table {
+	const char		*nvmem_name;
+	const struct nvmem_cell_info	*cells;
+	size_t			ncells;
+	struct list_head	node;
+};
+
 #if IS_ENABLED(CONFIG_NVMEM)
 
 struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
@@ -77,9 +96,9 @@ struct nvmem_device *devm_nvmem_register(struct device *dev,
 
 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem);
 
-int nvmem_add_cells(struct nvmem_device *nvmem,
-		    const struct nvmem_cell_info *info,
-		    int ncells);
+void nvmem_add_cell_table(struct nvmem_cell_table *table);
+void nvmem_del_cell_table(struct nvmem_cell_table *table);
+
 #else
 
 static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
@@ -105,12 +124,8 @@ devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
 
 }
 
-static inline int nvmem_add_cells(struct nvmem_device *nvmem,
-				  const struct nvmem_cell_info *info,
-				  int ncells)
-{
-	return -ENOSYS;
-}
+static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {}
+static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {}
 
 #endif /* CONFIG_NVMEM */
 #endif  /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 40e58b0e..f2c80cc 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -236,8 +236,8 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
 static inline u64 of_read_number(const __be32 *cell, int size)
 {
 	u64 r = 0;
-	while (size--)
-		r = (r << 32) | be32_to_cpu(*(cell++));
+	for (; size--; cell++)
+		r = (r << 32) | be32_to_cpu(*cell);
 	return r;
 }
 
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 40b48e2..15eb85d 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -36,6 +36,12 @@
 #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
 #define type_min(T) ((T)((T)-type_max(T)-(T)1))
 
+/*
+ * Avoids triggering -Wtype-limits compilation warning,
+ * while using unsigned data types to check a < 0.
+ */
+#define is_non_negative(a) ((a) > 0 || (a) == 0)
+#define is_negative(a) (!(is_non_negative(a)))
 
 #ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
 /*
@@ -227,10 +233,10 @@
 	typeof(d) _d = d;						\
 	u64 _a_full = _a;						\
 	unsigned int _to_shift =					\
-		_s >= 0 && _s < 8 * sizeof(*d) ? _s : 0;		\
+		is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0;	\
 	*_d = (_a_full << _to_shift);					\
-	(_to_shift != _s || *_d < 0 || _a < 0 ||			\
-		(*_d >> _to_shift) != _a);				\
+	(_to_shift != _s || is_negative(*_d) || is_negative(_a) ||	\
+	(*_d >> _to_shift) != _a);					\
 })
 
 /**
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 186edf9..dbd280f 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -399,8 +399,7 @@ extern int read_cache_pages(struct address_space *mapping,
 static inline struct page *read_mapping_page(struct address_space *mapping,
 				pgoff_t index, void *data)
 {
-	filler_t *filler = mapping->a_ops->readpage;
-	return read_cache_page(mapping, index, filler, data);
+	return read_cache_page(mapping, index, NULL, data);
 }
 
 /*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 61bfac3..f00ce74 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -347,6 +347,8 @@ struct pci_dev {
 	unsigned int	hotplug_user_indicators:1; /* SlotCtl indicators
 						      controlled exclusively by
 						      user sysfs */
+	unsigned int	clear_retrain_link:1;	/* Need to clear Retrain Link
+						   bit manually */
 	unsigned int	d3_delay;	/* D3->D0 transition time in ms */
 	unsigned int	d3cold_delay;	/* D3cold->D0 transition time in ms */
 
diff --git a/include/linux/pfk.h b/include/linux/pfk.h
index 2849a93..f6c0df2 100644
--- a/include/linux/pfk.h
+++ b/include/linux/pfk.h
@@ -1,12 +1,13 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef PFK_H_
 #define PFK_H_
 
 #include <linux/bio.h>
+#include <crypto/ice.h>
 
 struct ice_crypto_setting;
 
@@ -25,15 +26,17 @@ struct blk_encryption_key {
 	u8 raw[BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS];
 };
 
-int pfk_load_key_start(const struct bio *bio,
+int pfk_load_key_start(const struct bio *bio, struct ice_device *ice_dev,
 			struct ice_crypto_setting *ice_setting,
 				bool *is_pfe, bool async);
-int pfk_load_key_end(const struct bio *bio, bool *is_pfe);
-int pfk_remove_key(const unsigned char *key, size_t key_size);
+int pfk_load_key_end(const struct bio *bio, struct ice_device *ice_dev,
+			bool *is_pfe);
 int pfk_fbe_clear_key(const unsigned char *key, size_t key_size,
 		const unsigned char *salt, size_t salt_size);
 bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2);
-void pfk_clear_on_reset(void);
+void pfk_clear_on_reset(struct ice_device *ice_dev);
+int pfk_initialize_key_table(struct ice_device *ice_dev);
+int pfk_remove(struct ice_device *ice_dev);
 
 #else
 static inline int pfk_load_key_start(const struct bio *bio,
@@ -47,11 +50,6 @@ static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
 	return -ENODEV;
 }
 
-static inline int pfk_remove_key(const unsigned char *key, size_t key_size)
-{
-	return -ENODEV;
-}
-
 static inline bool pfk_allow_merge_bio(const struct bio *bio1,
 		const struct bio *bio2)
 {
@@ -67,6 +65,15 @@ static inline int pfk_fbe_clear_key(const unsigned char *key, size_t key_size,
 static inline void pfk_clear_on_reset(void)
 {}
 
+static inline int pfk_initialize_key_table(struct ice_device *ice_dev)
+{
+	return -ENODEV;
+}
+static inline int pfk_remove(struct ice_device *ice_dev)
+{
+	return -ENODEV;
+}
+
 #endif /* CONFIG_PFK */
 
 #endif /* PFK_H */
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
index 1616dbc..35f92ccc 100644
--- a/include/linux/phy/phy-qcom-ufs.h
+++ b/include/linux/phy/phy-qcom-ufs.h
@@ -33,7 +33,8 @@ void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
 
 int ufs_qcom_phy_start_serdes(struct phy *generic_phy);
 int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy);
-int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B);
+int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B,
+			       bool is_g4);
 int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
 int ufs_qcom_phy_ctrl_rx_linecfg(struct phy *generic_phy, bool ctrl);
 void ufs_qcom_phy_save_controller_version(struct phy *phy,
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 3ecd7ea..7897a3c 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -108,18 +108,20 @@ struct pipe_buf_operations {
 	/*
 	 * Get a reference to the pipe buffer.
 	 */
-	void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+	bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
 };
 
 /**
  * pipe_buf_get - get a reference to a pipe_buffer
  * @pipe:	the pipe that the buffer belongs to
  * @buf:	the buffer to get a reference to
+ *
+ * Return: %true if the reference was successfully obtained.
  */
-static inline void pipe_buf_get(struct pipe_inode_info *pipe,
+static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe,
 				struct pipe_buffer *buf)
 {
-	buf->ops->get(pipe, buf);
+	return buf->ops->get(pipe, buf);
 }
 
 /**
@@ -178,9 +180,10 @@ struct pipe_inode_info *alloc_pipe_info(void);
 void free_pipe_info(struct pipe_inode_info *);
 
 /* Generic pipe buffer ops functions */
-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
 void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
 
diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h
index 3ab8922..7a37ac2 100644
--- a/include/linux/platform_data/x86/clk-pmc-atom.h
+++ b/include/linux/platform_data/x86/clk-pmc-atom.h
@@ -35,10 +35,13 @@ struct pmc_clk {
  *
  * @base:	PMC clock register base offset
  * @clks:	pointer to set of registered clocks, typically 0..5
+ * @critical:	flag to indicate if firmware enabled pmc_plt_clks
+ *		should be marked as critial or not
  */
 struct pmc_clk_data {
 	void __iomem *base;
 	const struct pmc_clk *clks;
+	bool critical;
 };
 
 #endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */
diff --git a/include/linux/pmic-voter.h b/include/linux/pmic-voter.h
index 9a783ce..ea39053 100644
--- a/include/linux/pmic-voter.h
+++ b/include/linux/pmic-voter.h
@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef __PMIC_VOTER_H
@@ -27,6 +28,8 @@ int get_effective_result_locked(struct votable *votable);
 const char *get_effective_client(struct votable *votable);
 const char *get_effective_client_locked(struct votable *votable);
 int vote(struct votable *votable, const char *client_str, bool state, int val);
+int vote_override(struct votable *votable, const char *override_client,
+		  bool state, int val);
 int rerun_election(struct votable *votable);
 struct votable *find_votable(const char *name);
 struct votable *create_votable(const char *name,
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 8709be4..ce7486c 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -336,6 +336,11 @@ enum power_supply_property {
 	POWER_SUPPLY_PROP_THERM_ICL_LIMIT,
 	POWER_SUPPLY_PROP_DC_RESET,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX_LIMIT,
+	POWER_SUPPLY_PROP_REAL_CAPACITY,
+	POWER_SUPPLY_PROP_FORCE_MAIN_ICL,
+	POWER_SUPPLY_PROP_FORCE_MAIN_FCC,
+	POWER_SUPPLY_PROP_COMP_CLAMP_LEVEL,
+	POWER_SUPPLY_PROP_ADAPTER_CC_MODE,
 	/* Charge pump properties */
 	POWER_SUPPLY_PROP_CP_STATUS1,
 	POWER_SUPPLY_PROP_CP_STATUS2,
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 7ea1096..3bcbaaa 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -72,6 +72,7 @@ struct se_geni_rsc {
 };
 
 #define PINCTRL_DEFAULT	"default"
+#define PINCTRL_ACTIVE	"active"
 #define PINCTRL_SLEEP	"sleep"
 
 #define KHz(freq) (1000 * (freq))
@@ -116,13 +117,13 @@ struct se_geni_rsc {
 #define SE_GENI_IOS			(0x908)
 #define SE_GENI_M_GP_LENGTH		(0x910)
 #define SE_GENI_S_GP_LENGTH		(0x914)
-#define GENI_I3C_IBI_LEGACY		(0xA9c)
 #define SE_GSI_EVENT_EN			(0xE18)
 #define SE_IRQ_EN			(0xE1C)
 #define SE_HW_PARAM_0			(0xE24)
 #define SE_HW_PARAM_1			(0xE28)
 #define SE_DMA_GENERAL_CFG		(0xE30)
 #define SE_DMA_DEBUG_REG0		(0xE40)
+#define QUPV3_HW_VER			(0x4)
 
 /* GENI_OUTPUT_CTRL fields */
 #define DEFAULT_IO_OUTPUT_CTRL_MSK	(GENMASK(6, 0))
@@ -260,10 +261,6 @@ struct se_geni_rsc {
 #define GENI_M_EVENT_EN		(BIT(2))
 #define GENI_S_EVENT_EN		(BIT(3))
 
-/* GENI_I3C_IBI_LEGACY fields */
-#define I3C_IBI_LEGACY_EN	(BIT(0))
-#define I3C_IBI_LEGACY_PORTS_EN	(BIT(1))
-
 /* SE_GENI_IOS fields */
 #define IO2_DATA_IN		(BIT(1))
 #define RX_DATA_IN		(BIT(0))
@@ -279,6 +276,7 @@ struct se_geni_rsc {
 #define TX_FIFO_WIDTH_SHFT	(24)
 #define TX_FIFO_DEPTH_MSK	(GENMASK(21, 16))
 #define TX_FIFO_DEPTH_SHFT	(16)
+#define GEN_I3C_IBI_CTRL	(BIT(7))
 
 /* SE_HW_PARAM_1 fields */
 #define RX_FIFO_WIDTH_MSK	(GENMASK(29, 24))
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index e63799a..3734cd8 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -14,6 +14,7 @@ struct device;
 #define SYS_POWER_OFF	0x0003	/* Notify of system power off */
 
 enum reboot_mode {
+	REBOOT_UNDEFINED = -1,
 	REBOOT_COLD = 0,
 	REBOOT_WARM,
 	REBOOT_HARD,
@@ -21,6 +22,7 @@ enum reboot_mode {
 	REBOOT_GPIO,
 };
 extern enum reboot_mode reboot_mode;
+extern enum reboot_mode panic_reboot_mode;
 
 enum reboot_type {
 	BOOT_TRIPLE	= 't',
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index ab93b6e..4b3cfa5 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -42,6 +42,10 @@ struct rw_semaphore {
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	struct lockdep_map	dep_map;
 #endif
+#ifdef CONFIG_RWSEM_PRIO_AWARE
+	/* count for waiters preempt to queue in wait list */
+	long m_count;
+#endif
 };
 
 /*
@@ -83,12 +87,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
 #define __RWSEM_OPT_INIT(lockname)
 #endif
 
+#ifdef CONFIG_RWSEM_PRIO_AWARE
+#define __RWSEM_PRIO_AWARE_INIT(lockname)	.m_count = 0
+#else
+#define __RWSEM_PRIO_AWARE_INIT(lockname)
+#endif
+
 #define __RWSEM_INITIALIZER(name)				\
 	{ __RWSEM_INIT_COUNT(name),				\
 	  .wait_list = LIST_HEAD_INIT((name).wait_list),	\
 	  .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock)	\
 	  __RWSEM_OPT_INIT(name)				\
-	  __RWSEM_DEP_MAP_INIT(name) }
+	  __RWSEM_DEP_MAP_INIT(name),				\
+	  __RWSEM_PRIO_AWARE_INIT(name) }
 
 #define DECLARE_RWSEM(name) \
 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d839ea8..c57b77d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -46,6 +46,7 @@ struct pid_namespace;
 struct pipe_inode_info;
 struct rcu_node;
 struct reclaim_state;
+struct capture_control;
 struct robust_list_head;
 struct sched_attr;
 struct sched_param;
@@ -272,6 +273,7 @@ extern int __must_check io_schedule_prepare(void);
 extern void io_schedule_finish(int token);
 extern long io_schedule_timeout(long timeout);
 extern void io_schedule(void);
+extern int set_task_boost(int boost, u64 period);
 
 /**
  * struct prev_cputime - snapshot of system and user cputime
@@ -781,7 +783,11 @@ struct task_struct {
 	const struct sched_class	*sched_class;
 	struct sched_entity		se;
 	struct sched_rt_entity		rt;
-	u64 last_sleep_ts;
+	u64				 last_sleep_ts;
+
+	int				boost;
+	u64				boost_period;
+	u64				boost_expires;
 #ifdef CONFIG_SCHED_WALT
 	struct ravg ravg;
 	/*
@@ -1120,6 +1126,9 @@ struct task_struct {
 
 	struct io_context		*io_context;
 
+#ifdef CONFIG_COMPACTION
+	struct capture_control		*capture_control;
+#endif
 	/* Ptrace state: */
 	unsigned long			ptrace_message;
 	siginfo_t			*last_siginfo;
diff --git a/include/linux/sched/core_ctl.h b/include/linux/sched/core_ctl.h
index ae2d5ca..b71b42a 100644
--- a/include/linux/sched/core_ctl.h
+++ b/include/linux/sched/core_ctl.h
@@ -1,14 +1,19 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016, 2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __CORE_CTL_H
 #define __CORE_CTL_H
 
+#define MAX_CPUS_PER_CLUSTER 6
+#define MAX_CLUSTERS 3
+
 struct core_ctl_notif_data {
 	unsigned int nr_big;
 	unsigned int coloc_load_pct;
+	unsigned int ta_util_pct[MAX_CLUSTERS];
+	unsigned int cur_cap_pct[MAX_CLUSTERS];
 };
 
 #ifdef CONFIG_SCHED_CORE_CTL
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index aebb370..cebb79f 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -49,6 +49,27 @@ static inline void mmdrop(struct mm_struct *mm)
 		__mmdrop(mm);
 }
 
+/*
+ * This has to be called after a get_task_mm()/mmget_not_zero()
+ * followed by taking the mmap_sem for writing before modifying the
+ * vmas or anything the coredump pretends not to change from under it.
+ *
+ * NOTE: find_extend_vma() called from GUP context is the only place
+ * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+ * for reading and outside the context of the process, so it is also
+ * the only case that holds the mmap_sem for reading that must call
+ * this function. Generally if the mmap_sem is hold for reading
+ * there's no need of this check after get_task_mm()/mmget_not_zero().
+ *
+ * This function can be obsoleted and the check can be removed, after
+ * the coredump code will hold the mmap_sem for writing before
+ * invoking the ->core_dump methods.
+ */
+static inline bool mmget_still_valid(struct mm_struct *mm)
+{
+	return likely(!mm->core_state);
+}
+
 /**
  * mmget() - Pin the address space associated with a &struct mm_struct.
  * @mm: The address space to pin.
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 1be3572..660d78c 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -417,10 +417,20 @@ static inline void set_restore_sigmask(void)
 	set_thread_flag(TIF_RESTORE_SIGMASK);
 	WARN_ON(!test_thread_flag(TIF_SIGPENDING));
 }
+
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+	clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
+
 static inline void clear_restore_sigmask(void)
 {
 	clear_thread_flag(TIF_RESTORE_SIGMASK);
 }
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+	return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
 static inline bool test_restore_sigmask(void)
 {
 	return test_thread_flag(TIF_RESTORE_SIGMASK);
@@ -438,6 +448,10 @@ static inline void set_restore_sigmask(void)
 	current->restore_sigmask = true;
 	WARN_ON(!test_thread_flag(TIF_SIGPENDING));
 }
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+	tsk->restore_sigmask = false;
+}
 static inline void clear_restore_sigmask(void)
 {
 	current->restore_sigmask = false;
@@ -446,6 +460,10 @@ static inline bool test_restore_sigmask(void)
 {
 	return current->restore_sigmask;
 }
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+	return tsk->restore_sigmask;
+}
 static inline bool test_and_clear_restore_sigmask(void)
 {
 	if (!current->restore_sigmask)
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index 3423919..abe28d5 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -13,8 +13,6 @@ extern void generic_sched_clock_init(void);
 
 extern void sched_clock_register(u64 (*read)(void), int bits,
 				 unsigned long rate);
-extern int sched_clock_suspend(void);
-extern void sched_clock_resume(void);
 #else
 static inline void generic_sched_clock_init(void) { }
 
@@ -22,8 +20,6 @@ static inline void sched_clock_register(u64 (*read)(void), int bits,
 					unsigned long rate)
 {
 }
-static inline int sched_clock_suspend(void) { return 0; }
-static inline void sched_clock_resume(void) { }
 #endif
 
 #endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 820903ce..28baccb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1333,10 +1333,12 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
 	struct ubuf_info *uarg = skb_zcopy(skb);
 
 	if (uarg) {
-		if (uarg->callback == sock_zerocopy_callback) {
+		if (skb_zcopy_is_nouarg(skb)) {
+			/* no notification callback */
+		} else if (uarg->callback == sock_zerocopy_callback) {
 			uarg->zerocopy = uarg->zerocopy && zerocopy;
 			sock_zerocopy_put(uarg);
-		} else if (!skb_zcopy_is_nouarg(skb)) {
+		} else {
 			uarg->callback(uarg, zerocopy);
 		}
 
@@ -2587,7 +2589,8 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
 {
 	if (likely(!skb_zcopy(skb)))
 		return 0;
-	if (skb_uarg(skb)->callback == sock_zerocopy_callback)
+	if (!skb_zcopy_is_nouarg(skb) &&
+	    skb_uarg(skb)->callback == sock_zerocopy_callback)
 		return 0;
 	return skb_copy_ubufs(skb, gfp_mask);
 }
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index d0884b5..9d1bc65 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -29,7 +29,7 @@ struct smpboot_thread_data;
  * @thread_comm:	The base name of the thread
  */
 struct smp_hotplug_thread {
-	struct task_struct __percpu	**store;
+	struct task_struct		* __percpu *store;
 	struct list_head		list;
 	int				(*thread_should_run)(unsigned int cpu);
 	void				(*thread_fn)(unsigned int cpu);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 9276cd5..f8444c9 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -129,6 +129,8 @@ struct thermal_zone_device_ops {
 	int (*notify) (struct thermal_zone_device *, int,
 		       enum thermal_trip_type);
 	bool (*is_wakeable)(struct thermal_zone_device *);
+	int (*set_polling_delay)(struct thermal_zone_device *, int);
+	int (*set_passive_delay)(struct thermal_zone_device *, int);
 };
 
 struct thermal_cooling_device_ops {
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 05634af..4a45aea 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -41,6 +41,17 @@ struct itimerspec64 {
 #define KTIME_MAX			((s64)~((u64)1 << 63))
 #define KTIME_SEC_MAX			(KTIME_MAX / NSEC_PER_SEC)
 
+/*
+ * Limits for settimeofday():
+ *
+ * To prevent setting the time close to the wraparound point time setting
+ * is limited so a reasonable uptime can be accomodated. Uptime of 30 years
+ * should be really sufficient, which means the cutoff is 2232. At that
+ * point the cutoff is just a small part of the larger problem.
+ */
+#define TIME_UPTIME_SEC_MAX		(30LL * 365 * 24 *3600)
+#define TIME_SETTOD_SEC_MAX		(KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX)
+
 static inline int timespec64_equal(const struct timespec64 *a,
 				   const struct timespec64 *b)
 {
@@ -108,6 +119,16 @@ static inline bool timespec64_valid_strict(const struct timespec64 *ts)
 	return true;
 }
 
+static inline bool timespec64_valid_settod(const struct timespec64 *ts)
+{
+	if (!timespec64_valid(ts))
+		return false;
+	/* Disallow values which cause overflow issues vs. CLOCK_REALTIME */
+	if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX)
+		return false;
+	return true;
+}
+
 /**
  * timespec64_to_ns - Convert timespec64 to nanoseconds
  * @ts:		pointer to the timespec64 variable to be converted
diff --git a/include/linux/usb.h b/include/linux/usb.h
index dbbf563..eb51a27 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -200,7 +200,6 @@ usb_find_last_int_out_endpoint(struct usb_host_interface *alt,
  * @dev: driver model's view of this device
  * @usb_dev: if an interface is bound to the USB major, this will point
  *	to the sysfs representation for that device.
- * @pm_usage_cnt: PM usage counter for this interface
  * @reset_ws: Used for scheduling resets from atomic context.
  * @resetting_device: USB core reset the device, so use alt setting 0 as
  *	current; needs bandwidth alloc after reset.
@@ -257,7 +256,6 @@ struct usb_interface {
 
 	struct device dev;		/* interface specific device info */
 	struct device *usb_dev;
-	atomic_t pm_usage_cnt;		/* usage counter for autosuspend */
 	struct work_struct reset_ws;	/* for resets in atomic context */
 };
 #define	to_usb_interface(d) container_of(d, struct usb_interface, dev)
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index ca85428..cee63ad 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -111,7 +111,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PGPGOUTCLEAN, PSWPIN, PSWPOUT,
 		SWAP_RA_HIT,
 #endif
 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
-		SPECULATIVE_PGFAULT,
+		SPECULATIVE_PGFAULT_ANON,
+		SPECULATIVE_PGFAULT_FILE,
 #endif
 		NR_VM_EVENT_ITEMS
 };
diff --git a/include/media/msm_cvp_vidc.h b/include/media/msm_cvp_vidc.h
index c3f4a41..b41295c 100644
--- a/include/media/msm_cvp_vidc.h
+++ b/include/media/msm_cvp_vidc.h
@@ -19,6 +19,8 @@
  * @is_downscale:   is downscaling enabled in pipeline
  * @fps:   frame rate
  * @op_rate:   stream operation rate
+ * @colorfmt:   format based on msm_media_info.h
+ * @reserved[16]: for future use
  */
 struct cvp_kmd_usecase_desc {
 	unsigned int fullres_width;
@@ -28,8 +30,13 @@ struct cvp_kmd_usecase_desc {
 	unsigned int is_downscale;
 	unsigned int fps;
 	unsigned int op_rate;
+	unsigned int colorfmt;
+	int reserved[16];
 };
 
+#define VIDEO_NONREALTIME 1
+#define VIDEO_REALTIME 5
+
 #ifdef CONFIG_MSM_CVP_V4L2
 void *msm_cvp_open(int core_id, int session_type);
 int msm_cvp_close(void *instance);
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 7a52923..278613a 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -551,6 +551,7 @@ struct vb2_queue {
 	unsigned int			start_streaming_called:1;
 	unsigned int			error:1;
 	unsigned int			waiting_for_buffers:1;
+	unsigned int			waiting_in_dqbuf:1;
 	unsigned int			is_multiplanar:1;
 	unsigned int			is_output:1;
 	unsigned int			copy_timestamp:1;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index cdd9f1fe..845d947 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -270,6 +270,7 @@ enum {
 	HCI_FORCE_BREDR_SMP,
 	HCI_FORCE_STATIC_ADDR,
 	HCI_LL_RPA_RESOLUTION,
+	HCI_CMD_PENDING,
 
 	__HCI_NUM_FLAGS,
 };
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 1dfb750..cc2d0c3 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -182,6 +182,9 @@ struct adv_info {
 
 #define HCI_MAX_SHORT_NAME_LENGTH	10
 
+/* Min encryption key size to match with SMP */
+#define HCI_MIN_ENC_KEY_SIZE		7
+
 /* Default LE RPA expiry time, 15 minutes */
 #define HCI_DEFAULT_RPA_TIMEOUT		(15 * 60)
 
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
index 4dbfcbd..892096b 100644
--- a/include/net/cnss2.h
+++ b/include/net/cnss2.h
@@ -17,9 +17,11 @@
 
 enum cnss_bus_width_type {
 	CNSS_BUS_WIDTH_NONE,
+	CNSS_BUS_WIDTH_IDLE,
 	CNSS_BUS_WIDTH_LOW,
 	CNSS_BUS_WIDTH_MEDIUM,
-	CNSS_BUS_WIDTH_HIGH
+	CNSS_BUS_WIDTH_HIGH,
+	CNSS_BUS_WIDTH_VERY_HIGH
 };
 
 enum cnss_platform_cap_flag {
@@ -170,7 +172,6 @@ extern int cnss_get_fw_files_for_target(struct device *dev,
 					u32 target_type, u32 target_version);
 extern int cnss_get_platform_cap(struct device *dev,
 				 struct cnss_platform_cap *cap);
-extern struct dma_iommu_mapping *cnss_smmu_get_mapping(struct device *dev);
 extern struct iommu_domain *cnss_smmu_get_domain(struct device *dev);
 extern int cnss_smmu_map(struct device *dev,
 			 phys_addr_t paddr, uint32_t *iova_addr, size_t size);
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 1662cbc..b02bf73 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -77,8 +77,8 @@ struct inet_frag_queue {
 	struct timer_list	timer;
 	spinlock_t		lock;
 	refcount_t		refcnt;
-	struct sk_buff		*fragments;  /* Used in IPv6. */
-	struct rb_root		rb_fragments; /* Used in IPv4. */
+	struct sk_buff		*fragments;  /* used in 6lopwpan IPv6. */
+	struct rb_root		rb_fragments; /* Used in IPv4/IPv6. */
 	struct sk_buff		*fragments_tail;
 	struct sk_buff		*last_run_head;
 	ktime_t			stamp;
@@ -153,4 +153,16 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
 
 extern const u8 ip_frag_ecn_table[16];
 
+/* Return values of inet_frag_queue_insert() */
+#define IPFRAG_OK	0
+#define IPFRAG_DUP	1
+#define IPFRAG_OVERLAP	2
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+			   int offset, int end);
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+			      struct sk_buff *parent);
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+			    void *reasm_data);
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
+
 #endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 2d31e22..983f7a1 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -171,7 +171,8 @@ struct fib6_info {
 					dst_nocount:1,
 					dst_nopolicy:1,
 					dst_host:1,
-					unused:3;
+					fib6_destroying:1,
+					unused:2;
 
 	struct fib6_nh			fib6_nh;
 	struct rcu_head			rcu;
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
index 6ced1e6..28aa9b3 100644
--- a/include/net/ipv6_frag.h
+++ b/include/net/ipv6_frag.h
@@ -82,8 +82,15 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
 
 	/* Don't send error if the first segment did not arrive. */
-	head = fq->q.fragments;
-	if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
+	if (!(fq->q.flags & INET_FRAG_FIRST_IN))
+		goto out;
+
+	/* sk_buff::dev and sk_buff::rbnode are unionized. So we
+	 * pull the head out of the tree in order to be able to
+	 * deal with head->dev.
+	 */
+	head = inet_frag_pull_head(&fq->q);
+	if (!head)
 		goto out;
 
 	head->dev = dev;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 7e01231..f45141b 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -313,6 +313,8 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
 				 gfp_t flags);
 void nf_ct_tmpl_free(struct nf_conn *tmpl);
 
+u32 nf_ct_get_id(const struct nf_conn *ct);
+
 static inline void
 nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
 {
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 0f39ac4..f2be5d0 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -382,6 +382,7 @@ void nft_unregister_set(struct nft_set_type *type);
  * 	@dtype: data type (verdict or numeric type defined by userspace)
  * 	@objtype: object type (see NFT_OBJECT_* definitions)
  * 	@size: maximum set size
+ *	@use: number of rules references to this set
  * 	@nelems: number of elements
  * 	@ndeact: number of deactivated elements queued for removal
  *	@timeout: default timeout value in jiffies
@@ -407,6 +408,7 @@ struct nft_set {
 	u32				dtype;
 	u32				objtype;
 	u32				size;
+	u32				use;
 	atomic_t			nelems;
 	u32				ndeact;
 	u64				timeout;
@@ -416,7 +418,8 @@ struct nft_set {
 	unsigned char			*udata;
 	/* runtime data below here */
 	const struct nft_set_ops	*ops ____cacheline_aligned;
-	u16				flags:14,
+	u16				flags:13,
+					bound:1,
 					genmask:2;
 	u8				klen;
 	u8				dlen;
@@ -466,10 +469,15 @@ struct nft_set_binding {
 	u32				flags;
 };
 
+enum nft_trans_phase;
+void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+			      struct nft_set_binding *binding,
+			      enum nft_trans_phase phase);
 int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
 		       struct nft_set_binding *binding);
 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
-			  struct nft_set_binding *binding);
+			  struct nft_set_binding *binding, bool commit);
+void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
 
 /**
  *	enum nft_set_extensions - set extension type IDs
@@ -689,10 +697,12 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
 	gcb->elems[gcb->head.cnt++] = elem;
 }
 
+struct nft_expr_ops;
 /**
  *	struct nft_expr_type - nf_tables expression type
  *
  *	@select_ops: function to select nft_expr_ops
+ *	@release_ops: release nft_expr_ops
  *	@ops: default ops, used when no select_ops functions is present
  *	@list: used internally
  *	@name: Identifier
@@ -705,6 +715,7 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
 struct nft_expr_type {
 	const struct nft_expr_ops	*(*select_ops)(const struct nft_ctx *,
 						       const struct nlattr * const tb[]);
+	void				(*release_ops)(const struct nft_expr_ops *ops);
 	const struct nft_expr_ops	*ops;
 	struct list_head		list;
 	const char			*name;
@@ -718,13 +729,22 @@ struct nft_expr_type {
 #define NFT_EXPR_STATEFUL		0x1
 #define NFT_EXPR_GC			0x2
 
+enum nft_trans_phase {
+	NFT_TRANS_PREPARE,
+	NFT_TRANS_ABORT,
+	NFT_TRANS_COMMIT,
+	NFT_TRANS_RELEASE
+};
+
 /**
  *	struct nft_expr_ops - nf_tables expression operations
  *
  *	@eval: Expression evaluation function
  *	@size: full expression size, including private data size
  *	@init: initialization function
- *	@destroy: destruction function
+ *	@activate: activate expression in the next generation
+ *	@deactivate: deactivate expression in next generation
+ *	@destroy: destruction function, called after synchronize_rcu
  *	@dump: function to dump parameters
  *	@type: expression type
  *	@validate: validate expression, called during loop detection
@@ -745,7 +765,8 @@ struct nft_expr_ops {
 	void				(*activate)(const struct nft_ctx *ctx,
 						    const struct nft_expr *expr);
 	void				(*deactivate)(const struct nft_ctx *ctx,
-						      const struct nft_expr *expr);
+						      const struct nft_expr *expr,
+						      enum nft_trans_phase phase);
 	void				(*destroy)(const struct nft_ctx *ctx,
 						   const struct nft_expr *expr);
 	void				(*destroy_clone)(const struct nft_ctx *ctx,
diff --git a/include/net/netrom.h b/include/net/netrom.h
index 5a0714f..80f15b1 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *);
 int nr_t1timer_running(struct sock *);
 
 /* sysctl_net_netrom.c */
-void nr_register_sysctl(void);
+int nr_register_sysctl(void);
 void nr_unregister_sysctl(void);
 
 #endif
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 87499b6b..df5c69d 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -166,7 +166,7 @@ struct nci_conn_info {
  * According to specification 102 622 chapter 4.4 Pipes,
  * the pipe identifier is 7 bits long.
  */
-#define NCI_HCI_MAX_PIPES          127
+#define NCI_HCI_MAX_PIPES          128
 
 struct nci_hci_gate {
 	u8 gate;
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 6640f84..6d5beac 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -105,7 +105,6 @@ enum sctp_verb {
 	SCTP_CMD_T1_RETRAN,	 /* Mark for retransmission after T1 timeout  */
 	SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
 	SCTP_CMD_SEND_MSG,	 /* Send the whole use message */
-	SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
 	SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
 	SCTP_CMD_SET_ASOC,	 /* Restore association context */
 	SCTP_CMD_LAST
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index ef8dd0d..56935bf 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -56,7 +56,7 @@ static inline bool is_tcf_gact_goto_chain(const struct tc_action *a)
 
 static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
 {
-	return a->goto_chain->index;
+	return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK;
 }
 
 #endif /* __NET_TC_GACT_H */
diff --git a/include/net/tls.h b/include/net/tls.h
index 0a769cf..c423b7d 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -317,7 +317,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
 {
 #ifdef CONFIG_SOCK_VALIDATE_XMIT
-	return sk_fullsock(sk) &
+	return sk_fullsock(sk) &&
 	       (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
 	       &tls_validate_xmit_skb);
 #else
diff --git a/include/net/udp.h b/include/net/udp.h
index 47a1853..8772bc3 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -458,12 +458,19 @@ void udpv6_encap_enable(void);
 static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
 					      struct sk_buff *skb, bool ipv4)
 {
+	netdev_features_t features = NETIF_F_SG;
 	struct sk_buff *segs;
 
+	/* Avoid csum recalculation by skb_segment unless userspace explicitly
+	 * asks for the final checksum values
+	 */
+	if (!inet_get_convert_csum(sk))
+		features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
 	/* the GSO CB lays after the UDP one, no need to save and restore any
 	 * CB fragment
 	 */
-	segs = __skb_gso_segment(skb, NETIF_F_SG, false);
+	segs = __skb_gso_segment(skb, features, false);
 	if (unlikely(IS_ERR_OR_NULL(segs))) {
 		int segs_nr = skb_shinfo(skb)->gso_segs;
 
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 7161856..c2c10cc 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -34,7 +34,6 @@ struct xdp_umem {
 	u32 headroom;
 	u32 chunk_size_nohr;
 	struct user_struct *user;
-	struct pid *pid;
 	unsigned long address;
 	refcount_t users;
 	struct work_struct work;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 5e3daf5..4ddd2b1 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -295,7 +295,8 @@ struct xfrm_replay {
 };
 
 struct xfrm_if_cb {
-	struct xfrm_if	*(*decode_session)(struct sk_buff *skb);
+	struct xfrm_if	*(*decode_session)(struct sk_buff *skb,
+					   unsigned short family);
 };
 
 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
@@ -1430,6 +1431,23 @@ static inline int xfrm_state_kern(const struct xfrm_state *x)
 	return atomic_read(&x->tunnel_users);
 }
 
+static inline bool xfrm_id_proto_valid(u8 proto)
+{
+	switch (proto) {
+	case IPPROTO_AH:
+	case IPPROTO_ESP:
+	case IPPROTO_COMP:
+#if IS_ENABLED(CONFIG_IPV6)
+	case IPPROTO_ROUTING:
+	case IPPROTO_DSTOPTS:
+#endif
+		return true;
+	default:
+		return false;
+	}
+}
+
+/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
 {
 	return (!userproto || proto == userproto ||
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index bab3c15..d6df17b 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -43,6 +43,8 @@ struct icnss_driver_ops {
 	int (*suspend_noirq)(struct device *dev);
 	int (*resume_noirq)(struct device *dev);
 	int (*uevent)(struct device *dev, struct icnss_uevent_data *uevent);
+	int (*idle_shutdown)(struct device *dev);
+	int (*idle_restart)(struct device *dev);
 };
 
 
@@ -140,4 +142,6 @@ extern bool icnss_is_rejuvenate(void);
 extern int icnss_trigger_recovery(struct device *dev);
 extern void icnss_block_shutdown(bool status);
 extern bool icnss_is_pdr(void);
+extern int icnss_idle_restart(struct device *dev);
+extern int icnss_idle_shutdown(struct device *dev);
 #endif /* _ICNSS_WLAN_H_ */
diff --git a/include/soc/qcom/memory_dump.h b/include/soc/qcom/memory_dump.h
index ba0b277..bc84bc8 100644
--- a/include/soc/qcom/memory_dump.h
+++ b/include/soc/qcom/memory_dump.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2012, 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2014-2017, 2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __MSM_MEMORY_DUMP_H
@@ -77,7 +77,9 @@ enum msm_dump_data_ids {
 	MSM_DUMP_DATA_SCANDUMP = 0xEB,
 	MSM_DUMP_DATA_RPMH = 0xEC,
 	MSM_DUMP_DATA_TMC_ETF = 0xF0,
+	MSM_DUMP_DATA_TMC_ETF_SWAO = 0xF1,
 	MSM_DUMP_DATA_TMC_REG = 0x100,
+	MSM_DUMP_DATA_TMC_ETF_SWAO_REG = 0x102,
 	MSM_DUMP_DATA_LOG_BUF = 0x110,
 	MSM_DUMP_DATA_LOG_BUF_FIRST_IDX = 0x111,
 	MSM_DUMP_DATA_SCANDUMP_PER_CPU = 0x130,
@@ -114,12 +116,19 @@ struct msm_dump_entry {
 #ifdef CONFIG_QCOM_MEMORY_DUMP_V2
 extern int msm_dump_data_register(enum msm_dump_table_ids id,
 				  struct msm_dump_entry *entry);
+extern int msm_dump_data_register_nominidump(enum msm_dump_table_ids id,
+				  struct msm_dump_entry *entry);
 #else
 static inline int msm_dump_data_register(enum msm_dump_table_ids id,
 					 struct msm_dump_entry *entry)
 {
 	return -EINVAL;
 }
+static inline int msm_dump_data_register_nominidump(enum msm_dump_table_ids id,
+					 struct msm_dump_entry *entry)
+{
+	return -EINVAL;
+}
 #endif
 
 #endif
diff --git a/include/soc/qcom/qmi_rmnet.h b/include/soc/qcom/qmi_rmnet.h
index f75e538..d7ab66e 100644
--- a/include/soc/qcom/qmi_rmnet.h
+++ b/include/soc/qcom/qmi_rmnet.h
@@ -79,6 +79,7 @@ void qmi_rmnet_work_init(void *port);
 void qmi_rmnet_work_exit(void *port);
 void qmi_rmnet_work_maybe_restart(void *port);
 void qmi_rmnet_set_dl_msg_active(void *port);
+bool qmi_rmnet_ignore_grant(void *port);
 
 int qmi_rmnet_ps_ind_register(void *port,
 			      struct qmi_rmnet_ps_ind *ps_ind);
@@ -105,6 +106,10 @@ static inline void qmi_rmnet_work_maybe_restart(void *port)
 static inline void qmi_rmnet_set_dl_msg_active(void *port)
 {
 }
+static inline bool qmi_rmnet_ignore_grant(void *port)
+{
+	return false;
+}
 
 static inline int qmi_rmnet_ps_ind_register(struct rmnet_port *port,
 				     struct qmi_rmnet_ps_ind *ps_ind)
diff --git a/include/soc/qcom/qtee_shmbridge.h b/include/soc/qcom/qtee_shmbridge.h
index 4cc332d..ee12264 100644
--- a/include/soc/qcom/qtee_shmbridge.h
+++ b/include/soc/qcom/qtee_shmbridge.h
@@ -29,9 +29,18 @@ struct qtee_shm {
 bool qtee_shmbridge_is_enabled(void);
 
 /**
+ * Check whether a bridge starting from paddr exists
+ *
+ * @ [IN] paddr: physical addr of the buffer
+ *
+ * return 0 or -EEXIST
+ */
+int32_t qtee_shmbridge_query(phys_addr_t paddr);
+
+/**
  * Register paddr & size as a bridge, get bridge handle
  *
- * @ [IN] addr: paddr of buffer to be turned into bridge
+ * @ [IN] paddr: physical addr of the buffer to be turned into bridge
  * @ [IN] size: size of the bridge
  * @ [IN] ns_vmid_list: non-secure vmids array
  * @ [IN] ns_vm_perm_list: NS VM permission array
diff --git a/include/soc/qcom/rmnet_qmi.h b/include/soc/qcom/rmnet_qmi.h
index 9096b10..745a9d2 100644
--- a/include/soc/qcom/rmnet_qmi.h
+++ b/include/soc/qcom/rmnet_qmi.h
@@ -10,6 +10,7 @@
 #include <linux/skbuff.h>
 
 void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb);
+void rmnet_set_data_rates(void *port, u64 rx_rate, u64 tx_rate);
 
 #ifdef CONFIG_QCOM_QMI_RMNET
 void *rmnet_get_qmi_pt(void *port);
@@ -22,7 +23,7 @@ void rmnet_enable_all_flows(void *port);
 bool rmnet_all_flows_enabled(void *port);
 void rmnet_set_powersave_format(void *port);
 void rmnet_clear_powersave_format(void *port);
-void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
+void rmnet_get_stats(void *port, u64 *rx, u64 *tx);
 int rmnet_get_powersave_notif(void *port);
 #else
 static inline void *rmnet_get_qmi_pt(void *port)
@@ -67,7 +68,7 @@ static inline void rmnet_set_port_format(void *port)
 {
 }
 
-static inline void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
+static inline void rmnet_get_stats(void *port, u64 *rx, u64 *tx)
 {
 }
 
diff --git a/include/soc/qcom/scm.h b/include/soc/qcom/scm.h
index ff05a89..b334bcc 100644
--- a/include/soc/qcom/scm.h
+++ b/include/soc/qcom/scm.h
@@ -6,6 +6,8 @@
 #ifndef __MACH_SCM_H
 #define __MACH_SCM_H
 
+#include <soc/qcom/qtee_shmbridge.h>
+
 #define SCM_SVC_BOOT			0x1
 #define SCM_SVC_PIL			0x2
 #define SCM_SVC_UTIL			0x3
@@ -85,7 +87,7 @@ struct scm_desc {
 	u64 ret[MAX_SCM_RETS];
 
 	/* private */
-	void *extra_arg_buf;
+	struct qtee_shm shm;
 	u64 x5;
 };
 
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 602055a..02713d1 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __QCOM_SECURE_BUFFER_H__
@@ -59,6 +59,8 @@ extern int hyp_assign_phys(phys_addr_t addr, u64 size,
 			int *dest_vmids, int *dest_perms, int dest_nelems);
 bool msm_secure_v2_is_supported(void);
 const char *msm_secure_vmid_to_string(int secure_vmid);
+u32 msm_secure_get_vmid_perms(u32 vmid);
+
 #else
 static inline int msm_secure_table(struct sg_table *table)
 {
@@ -102,5 +104,11 @@ static inline const char *msm_secure_vmid_to_string(int secure_vmid)
 {
 	return "N/A";
 }
+
+static inline u32 msm_secure_get_vmid_perms(u32 vmid)
+{
+	return 0;
+}
+
 #endif
 #endif
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 2584968..192185a 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _ARCH_ARM_MACH_MSM_SOCINFO_H_
@@ -56,6 +56,8 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,kona")
 #define early_machine_is_lito()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,lito")
+#define early_machine_is_bengal()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,bengal")
 #define early_machine_is_sdmshrike()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdmshrike")
 #define early_machine_is_sm6150()	\
@@ -88,6 +90,7 @@
 #define early_machine_is_sa8150()	0
 #define early_machine_is_kona()		0
 #define early_machine_is_lito()		0
+#define early_machine_is_bengal()	0
 #define early_machine_is_sdmshrike()	0
 #define early_machine_is_sm6150()	0
 #define early_machine_is_qcs405()	0
@@ -117,6 +120,7 @@ enum msm_cpu {
 	MSM_CPU_SA8150,
 	MSM_CPU_KONA,
 	MSM_CPU_LITO,
+	MSM_CPU_BENGAL,
 	MSM_CPU_SDMSHRIKE,
 	MSM_CPU_SM6150,
 	MSM_CPU_QCS405,
diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h
index 81716f3..9891127 100644
--- a/include/soc/qcom/subsystem_restart.h
+++ b/include/soc/qcom/subsystem_restart.h
@@ -55,7 +55,12 @@ struct subsys_notif_timeout {
  * struct subsys_desc - subsystem descriptor
  * @name: name of subsystem
  * @fw_name: firmware name
- * @depends_on: subsystem this subsystem depends on to operate
+ * @pon_depends_on: subsystem this subsystem wants to power-on first. If the
+ * dependednt subsystem is already powered-on, the framework won't try to power
+ * it back up again.
+ * @poff_depends_on: subsystem this subsystem wants to power-off first. If the
+ * dependednt subsystem is already powered-off, the framework won't try to power
+ * it off again.
  * @dev: parent device
  * @owner: module the descriptor belongs to
  * @shutdown: Stop a subsystem
@@ -79,7 +84,8 @@ struct subsys_notif_timeout {
 struct subsys_desc {
 	const char *name;
 	char fw_name[256];
-	const char *depends_on;
+	const char *pon_depends_on;
+	const char *poff_depends_on;
 	struct device *dev;
 	struct module *owner;
 
diff --git a/include/trace/events/dfc.h b/include/trace/events/dfc.h
index cb62767..375a156 100644
--- a/include/trace/events/dfc.h
+++ b/include/trace/events/dfc.h
@@ -12,15 +12,14 @@
 
 TRACE_EVENT(dfc_qmi_tc,
 
-	TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, u32 grant,
+	TP_PROTO(const char *name, u8 bearer_id, u32 grant,
 		 int qlen, u32 tcm_handle, int enable),
 
-	TP_ARGS(name, bearer_id, flow_id, grant, qlen, tcm_handle, enable),
+	TP_ARGS(name, bearer_id, grant, qlen, tcm_handle, enable),
 
 	TP_STRUCT__entry(
 		__string(dev_name, name)
 		__field(u8, bid)
-		__field(u32, fid)
 		__field(u32, grant)
 		__field(int, qlen)
 		__field(u32, tcm_handle)
@@ -30,16 +29,15 @@ TRACE_EVENT(dfc_qmi_tc,
 	TP_fast_assign(
 		__assign_str(dev_name, name);
 		__entry->bid = bearer_id;
-		__entry->fid = flow_id;
 		__entry->grant = grant;
 		__entry->qlen = qlen;
 		__entry->tcm_handle = tcm_handle;
 		__entry->enable = enable;
 	),
 
-	TP_printk("dev=%s bearer_id=%u grant=%u len=%d flow_id=%u q=%d %s",
+	TP_printk("dev=%s bearer_id=%u grant=%u len=%d mq=%u %s",
 		__get_str(dev_name),
-		__entry->bid, __entry->grant, __entry->qlen, __entry->fid,
+		__entry->bid, __entry->grant, __entry->qlen,
 		__entry->tcm_handle,
 		__entry->enable ? "enable" : "disable")
 );
@@ -82,14 +80,16 @@ TRACE_EVENT(dfc_flow_ind,
 
 TRACE_EVENT(dfc_flow_check,
 
-	TP_PROTO(const char *name, u8 bearer_id, unsigned int len, u32 grant),
+	TP_PROTO(const char *name, u8 bearer_id, unsigned int len,
+		 u32 mark, u32 grant),
 
-	TP_ARGS(name, bearer_id, len, grant),
+	TP_ARGS(name, bearer_id, len, mark, grant),
 
 	TP_STRUCT__entry(
 		__string(dev_name, name)
 		__field(u8, bearer_id)
 		__field(unsigned int, len)
+		__field(u32, mark)
 		__field(u32, grant)
 	),
 
@@ -97,12 +97,13 @@ TRACE_EVENT(dfc_flow_check,
 		__assign_str(dev_name, name)
 		__entry->bearer_id = bearer_id;
 		__entry->len = len;
+		__entry->mark = mark;
 		__entry->grant = grant;
 	),
 
-	TP_printk("dev=%s bearer_id=%u skb_len=%u current_grant=%u",
-		__get_str(dev_name),
-		__entry->bearer_id, __entry->len, __entry->grant)
+	TP_printk("dev=%s bearer_id=%u skb_len=%u mark=%u current_grant=%u",
+		__get_str(dev_name), __entry->bearer_id,
+		__entry->len, __entry->mark, __entry->grant)
 );
 
 TRACE_EVENT(dfc_flow_info,
@@ -130,7 +131,7 @@ TRACE_EVENT(dfc_flow_info,
 		__entry->action = add;
 	),
 
-	TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d q=%d",
+	TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d mq=%d",
 		__entry->action ? "add flow" : "delete flow",
 		__get_str(dev_name),
 		__entry->bid, __entry->fid, __entry->ip, __entry->handle)
@@ -208,6 +209,33 @@ TRACE_EVENT(dfc_qmap_cmd,
 		__entry->type, __entry->tran)
 );
 
+TRACE_EVENT(dfc_tx_link_status_ind,
+
+	TP_PROTO(int src, int idx, u8 status, u8 mux_id, u8 bearer_id),
+
+	TP_ARGS(src, idx, status, mux_id, bearer_id),
+
+	TP_STRUCT__entry(
+		__field(int, src)
+		__field(int, idx)
+		__field(u8, status)
+		__field(u8, mid)
+		__field(u8, bid)
+	),
+
+	TP_fast_assign(
+		__entry->src = src;
+		__entry->idx = idx;
+		__entry->status = status;
+		__entry->mid = mux_id;
+		__entry->bid = bearer_id;
+	),
+
+	TP_printk("src=%d [%d]: status=%u mux_id=%u bearer_id=%u",
+		__entry->src, __entry->idx, __entry->status,
+		__entry->mid, __entry->bid)
+);
+
 #endif /* _TRACE_DFC_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 8b2e347..bd334ec 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -534,6 +534,37 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
 		__entry->err)
 );
 
+TRACE_EVENT(f2fs_file_write_iter,
+
+	TP_PROTO(struct inode *inode, unsigned long offset,
+		unsigned long length, int ret),
+
+	TP_ARGS(inode, offset, length, ret),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(unsigned long, offset)
+		__field(unsigned long, length)
+		__field(int,	ret)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->ino	= inode->i_ino;
+		__entry->offset	= offset;
+		__entry->length	= length;
+		__entry->ret	= ret;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, "
+		"offset = %lu, length = %lu, written(err) = %d",
+		show_dev_ino(__entry),
+		__entry->offset,
+		__entry->length,
+		__entry->ret)
+);
+
 TRACE_EVENT(f2fs_map_blocks,
 	TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret),
 
@@ -1254,6 +1285,32 @@ DEFINE_EVENT(f2fs__page, f2fs_commit_inmem_page,
 	TP_ARGS(page, type)
 );
 
+TRACE_EVENT(f2fs_filemap_fault,
+
+	TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
+
+	TP_ARGS(inode, index, ret),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(pgoff_t, index)
+		__field(unsigned long, ret)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->ino	= inode->i_ino;
+		__entry->index	= index;
+		__entry->ret	= ret;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, index = %lu, ret = %lx",
+		show_dev_ino(__entry),
+		(unsigned long)__entry->index,
+		__entry->ret)
+);
+
 TRACE_EVENT(f2fs_writepages,
 
 	TP_PROTO(struct inode *inode, struct writeback_control *wbc, int type),
diff --git a/include/trace/events/msm_vidc_events.h b/include/trace/events/msm_vidc_events.h
index e5f1732..598a016 100644
--- a/include/trace/events/msm_vidc_events.h
+++ b/include/trace/events/msm_vidc_events.h
@@ -137,6 +137,47 @@ DEFINE_EVENT(venus_hfi_var, venus_hfi_var_done,
 	TP_ARGS(cp_start, cp_size, cp_nonpixel_start, cp_nonpixel_size)
 );
 
+DECLARE_EVENT_CLASS(msm_v4l2_vidc_count_events,
+
+	TP_PROTO(char *event_type,
+		 u32 etb, u32 ebd, u32 ftb, u32 fbd),
+
+	TP_ARGS(event_type, etb, ebd, ftb, fbd),
+
+	TP_STRUCT__entry(
+		__field(char *, event_type)
+		__field(u32, etb)
+		__field(u32, ebd)
+		__field(u32, ftb)
+		__field(u32, fbd)
+	),
+
+	TP_fast_assign(
+		__entry->event_type = event_type;
+		__entry->etb = etb;
+		__entry->ebd = ebd;
+		__entry->ftb = ftb;
+		__entry->fbd = fbd;
+	),
+
+	TP_printk(
+		"%s, ETB %u EBD %u FTB %u FBD %u",
+		__entry->event_type,
+		__entry->etb,
+		__entry->ebd,
+		__entry->ftb,
+		__entry->fbd)
+);
+
+DEFINE_EVENT(msm_v4l2_vidc_count_events, msm_v4l2_vidc_buffer_counter,
+
+	TP_PROTO(char *event_type,
+		u32 etb, u32 ebd, u32 ftb, u32 fbd),
+
+	TP_ARGS(event_type,
+		etb, ebd, ftb, fbd)
+);
+
 DECLARE_EVENT_CLASS(msm_v4l2_vidc_buffer_events,
 
 	TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp,
diff --git a/include/trace/events/psi.h b/include/trace/events/psi.h
index 0065977..79b87615 100644
--- a/include/trace/events/psi.h
+++ b/include/trace/events/psi.h
@@ -59,7 +59,7 @@ TRACE_EVENT(psi_event,
 		__entry->threshold = threshold;
 	),
 
-	TP_printk("State: %d Threshold: %#llx",
+	TP_printk("State: %d Threshold: %#llu ns",
 		__entry->state, __entry->threshold
 	)
 );
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index f95b521..ff615a1 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1207,18 +1207,21 @@ TRACE_EVENT(sched_compute_energy,
 
 TRACE_EVENT(sched_task_util,
 
-	TP_PROTO(struct task_struct *p, int best_energy_cpu,
-		bool sync, bool need_idle, int fastpath,
-		bool placement_boost, int rtg_cpu, u64 start_t,
-		bool stune_boosted),
+	TP_PROTO(struct task_struct *p, unsigned long candidates,
+		int best_energy_cpu, bool sync, bool need_idle, int fastpath,
+		bool placement_boost, u64 start_t,
+		bool stune_boosted, bool is_rtg, bool rtg_skip_min,
+		int start_cpu),
 
-	TP_ARGS(p, best_energy_cpu, sync, need_idle, fastpath,
-		placement_boost, rtg_cpu, start_t, stune_boosted),
+	TP_ARGS(p, candidates, best_energy_cpu, sync, need_idle, fastpath,
+		placement_boost, start_t, stune_boosted, is_rtg, rtg_skip_min,
+		start_cpu),
 
 	TP_STRUCT__entry(
 		__field(int,		pid)
 		__array(char,		comm, TASK_COMM_LEN)
 		__field(unsigned long,	util)
+		__field(unsigned long,	candidates)
 		__field(int,		prev_cpu)
 		__field(int,		best_energy_cpu)
 		__field(bool,		sync)
@@ -1228,6 +1231,9 @@ TRACE_EVENT(sched_task_util,
 		__field(int,		rtg_cpu)
 		__field(u64,		latency)
 		__field(bool,		stune_boosted)
+		__field(bool,		is_rtg)
+		__field(bool,		rtg_skip_min)
+		__field(int,		start_cpu)
 	),
 
 	TP_fast_assign(
@@ -1235,21 +1241,25 @@ TRACE_EVENT(sched_task_util,
 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
 		__entry->util                   = task_util(p);
 		__entry->prev_cpu               = task_cpu(p);
+		__entry->candidates		= candidates;
 		__entry->best_energy_cpu        = best_energy_cpu;
 		__entry->sync                   = sync;
 		__entry->need_idle              = need_idle;
 		__entry->fastpath               = fastpath;
 		__entry->placement_boost        = placement_boost;
-		__entry->rtg_cpu                = rtg_cpu;
 		__entry->latency                = (sched_clock() - start_t);
 		__entry->stune_boosted          = stune_boosted;
+		__entry->is_rtg                 = is_rtg;
+		__entry->rtg_skip_min		= rtg_skip_min;
+		__entry->start_cpu		= start_cpu;
 	),
 
-	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d rtg_cpu=%d latency=%llu stune_boosted=%d",
+	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d",
 		__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu,
-		__entry->best_energy_cpu, __entry->sync, __entry->need_idle,
-		__entry->fastpath, __entry->placement_boost, __entry->rtg_cpu,
-		__entry->latency, __entry->stune_boosted)
+		__entry->candidates, __entry->best_energy_cpu, __entry->sync,
+		__entry->need_idle, __entry->fastpath, __entry->placement_boost,
+		__entry->latency, __entry->stune_boosted,
+		__entry->is_rtg, __entry->rtg_skip_min, __entry->start_cpu)
 )
 
 /*
@@ -1417,6 +1427,33 @@ TRACE_EVENT(core_ctl_update_nr_need,
 		__entry->nrrun, __entry->max_nr, __entry->nr_prev_assist)
 );
 
+TRACE_EVENT(core_ctl_notif_data,
+
+	TP_PROTO(u32 nr_big, u32 ta_load, u32 *ta_util, u32 *cur_cap),
+
+	TP_ARGS(nr_big, ta_load, ta_util, cur_cap),
+
+	TP_STRUCT__entry(
+		__field(u32, nr_big)
+		__field(u32, ta_load)
+		__array(u32, ta_util, MAX_CLUSTERS)
+		__array(u32, cur_cap, MAX_CLUSTERS)
+	),
+
+	TP_fast_assign(
+		__entry->nr_big = nr_big;
+		__entry->ta_load = ta_load;
+		memcpy(__entry->ta_util, ta_util, MAX_CLUSTERS * sizeof(u32));
+		memcpy(__entry->cur_cap, cur_cap, MAX_CLUSTERS * sizeof(u32));
+	),
+
+	TP_printk("nr_big=%u ta_load=%u ta_util=(%u %u %u) cur_cap=(%u %u %u)",
+		  __entry->nr_big, __entry->ta_load,
+		  __entry->ta_util[0], __entry->ta_util[1],
+		  __entry->ta_util[2], __entry->cur_cap[0],
+		  __entry->cur_cap[1], __entry->cur_cap[2])
+);
+
 /*
  * Tracepoint for schedtune_tasks_update
  */
diff --git a/include/trace/events/walt.h b/include/trace/events/walt.h
index 1b7123f..1cf2bf9 100644
--- a/include/trace/events/walt.h
+++ b/include/trace/events/walt.h
@@ -364,23 +364,19 @@ TRACE_EVENT(sched_set_preferred_cluster,
 
 	TP_STRUCT__entry(
 		__field(int,		id)
-		__field(u64,		demand)
-		__field(int,		cluster_first_cpu)
-		__array(char,		comm, TASK_COMM_LEN)
-		__field(pid_t,		pid)
-		__field(unsigned int,	task_demand)
+		__field(u64,		total_demand)
+		__field(bool,		skip_min)
 	),
 
 	TP_fast_assign(
 		__entry->id			= grp->id;
-		__entry->demand			= total_demand;
-		__entry->cluster_first_cpu	= grp->preferred_cluster ?
-			cluster_first_cpu(grp->preferred_cluster) : -1;
+		__entry->total_demand		= total_demand;
+		__entry->skip_min		= grp->skip_min;
 	),
 
-	TP_printk("group_id %d total_demand %llu preferred_cluster_first_cpu %d",
-			__entry->id, __entry->demand,
-			__entry->cluster_first_cpu)
+	TP_printk("group_id %d total_demand %llu skip_min %d",
+			__entry->id, __entry->total_demand,
+			__entry->skip_min)
 );
 
 TRACE_EVENT(sched_migration_update_sum,
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 90910aa..78ad139 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -69,6 +69,21 @@ struct drm_msm_timespec {
 };
 
 /*
+ * Colorimetry Data Block values
+ * These bit nums are defined as per the CTA spec
+ * and indicate the colorspaces supported by the sink
+ */
+#define DRM_EDID_CLRMETRY_xvYCC_601   (1 << 0)
+#define DRM_EDID_CLRMETRY_xvYCC_709   (1 << 1)
+#define DRM_EDID_CLRMETRY_sYCC_601    (1 << 2)
+#define DRM_EDID_CLRMETRY_ADOBE_YCC_601  (1 << 3)
+#define DRM_EDID_CLRMETRY_ADOBE_RGB     (1 << 4)
+#define DRM_EDID_CLRMETRY_BT2020_CYCC (1 << 5)
+#define DRM_EDID_CLRMETRY_BT2020_YCC  (1 << 6)
+#define DRM_EDID_CLRMETRY_BT2020_RGB  (1 << 7)
+#define DRM_EDID_CLRMETRY_DCI_P3      (1 << 15)
+
+/*
  * HDR Metadata
  * These are defined as per EDID spec and shall be used by the sink
  * to set the HDR metadata for playback from userspace.
diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h
index 9c42e6e..9a8a3e2 100644
--- a/include/uapi/linux/esoc_ctrl.h
+++ b/include/uapi/linux/esoc_ctrl.h
@@ -28,10 +28,23 @@ struct esoc_link_data {
 #define ESOC_REG_REQ_ENG	_IO(ESOC_CODE, 7)
 #define ESOC_REG_CMD_ENG	_IO(ESOC_CODE, 8)
 #define ESOC_GET_LINK_ID	_IOWR(ESOC_CODE, 9, struct esoc_link_data)
+#define ESOC_SET_BOOT_FAIL_ACT	_IOW(ESOC_CODE, 10, unsigned int)
+#define ESOC_SET_N_PON_TRIES	_IOW(ESOC_CODE, 11, unsigned int)
 
 #define ESOC_REQ_SEND_SHUTDOWN	ESOC_REQ_SEND_SHUTDOWN
 #define ESOC_REQ_CRASH_SHUTDOWN ESOC_REQ_CRASH_SHUTDOWN
 #define ESOC_PON_RETRY		ESOC_PON_RETRY
+#define ESOC_BOOT_FAIL_ACTION
+
+enum esoc_boot_fail_action {
+	BOOT_FAIL_ACTION_RETRY,
+	BOOT_FAIL_ACTION_COLD_RESET,
+	BOOT_FAIL_ACTION_SHUTDOWN,
+	BOOT_FAIL_ACTION_PANIC,
+	BOOT_FAIL_ACTION_NOP,
+	BOOT_FAIL_ACTION_S3_RESET,
+	BOOT_FAIL_ACTION_LAST,
+};
 
 enum esoc_evt {
 	ESOC_RUN_STATE = 0x1,
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index fbb318d..24af4ed 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -219,10 +219,12 @@ struct fuse_file_lock {
  * FOPEN_DIRECT_IO: bypass page cache for this open file
  * FOPEN_KEEP_CACHE: don't invalidate the data cache on open
  * FOPEN_NONSEEKABLE: the file is not seekable
+ * FOPEN_STREAM: the file is stream-like (no file position at all)
  */
 #define FOPEN_DIRECT_IO		(1 << 0)
 #define FOPEN_KEEP_CACHE	(1 << 1)
 #define FOPEN_NONSEEKABLE	(1 << 2)
+#define FOPEN_STREAM		(1 << 4)
 
 /**
  * INIT request/reply flags
diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h
index f773d96..a266e45 100644
--- a/include/uapi/linux/ipa_qmi_service_v01.h
+++ b/include/uapi/linux/ipa_qmi_service_v01.h
@@ -36,6 +36,7 @@
 
 #define QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01 6
 #define QMI_IPA_MAX_FILTERS_EX_V01 128
+#define QMI_IPA_MAX_FILTERS_EX2_V01 256
 #define QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01 2
 #define QMI_IPA_MAX_FILTERS_V01 64
 #define QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01 2
@@ -56,6 +57,7 @@
  * Indicates presence of newly added member to support HW stats.
  */
 #define IPA_QMI_SUPPORTS_STATS
+#define IPA_QMI_SUPPORT_MHI_DEFAULT
 
 #define IPA_INT_MAX	((int)(~0U>>1))
 #define IPA_INT_MIN	(-IPA_INT_MAX - 1)
@@ -465,6 +467,19 @@ struct ipa_indication_reg_req_msg_v01 {
 	 * If set to TRUE, this field indicates that the client wants to
 	 * receive indications about MHI ready for Channel allocations.
 	 */
+
+	/* Optional */
+	/*  Endpoint Desc Info Indication */
+	uint8_t endpoint_desc_ind_valid;
+	/* Must be set to true if endpoint_desc_ind is being passed */
+	uint8_t endpoint_desc_ind;
+	/*
+	 * If set to TRUE, this field indicates that the client wants to
+	 * receive indications for Endpoint descriptor information via
+	 * QMI_IPA_ENDP_DESC_INDICATION. Setting this field in the request
+	 * message makes sense only when the  QMI_IPA_INDICATION_REGISTER_REQ
+	 * is being originated from the master driver.
+	 */
 };  /* Message */
 
 
@@ -1040,6 +1055,20 @@ struct ipa_install_fltr_rule_req_msg_v01 {
 	/* Must be set to # of elements in filter_spec_ex2_list */
 	struct ipa_filter_spec_ex2_type_v01
 		filter_spec_ex2_list[QMI_IPA_MAX_FILTERS_V01];
+
+	/* Optional */
+	/* List of modem UL Filters in the Spec List which need be to
+	 * replicated with AP UL firewall filters
+	 */
+	uint8_t ul_firewall_indices_list_valid;
+	/* Must be set to # of elements in ul_firewall_indices_list */
+	uint32_t ul_firewall_indices_list_len;
+	uint32_t ul_firewall_indices_list[QMI_IPA_MAX_FILTERS_V01];
+	/* List of UL firewall filter indices.
+	 * Filter rules at specified indices must be replicated across
+	 * the firewall filters by the receiver and installed on the
+	 * associated IPA consumer pipe.
+	 */
 };  /* Message */
 
 struct ipa_filter_rule_identifier_to_handle_map_v01 {
@@ -1230,6 +1259,18 @@ struct ipa_fltr_installed_notif_req_msg_v01 {
 	uint32_t dst_pipe_id[QMI_IPA_MAX_CLIENT_DST_PIPES_V01];
 	/* Provides the list of destination pipe IDs for a source pipe. */
 
+	/* Optional */
+	/*  List of Rule IDs extended */
+	uint8_t rule_id_ex_valid;
+	/* Must be set to true if rule_id_ex is being passed. */
+	uint32_t rule_id_ex_len;
+	/* Must be set to # of elements in rule_id_ex */
+	uint32_t rule_id_ex[QMI_IPA_MAX_FILTERS_EX2_V01];
+	/* Provides the list of Rule IDs of rules added in IPA on the
+	 * given source pipe index. If the install_status TLV indicates
+	 * a failure, the Rule IDs in this list must be set to a
+	 * reserved index (255).
+	 */
 };  /* Message */
 
 /* Response Message; This is the message that is exchanged between the
@@ -1847,6 +1888,19 @@ struct ipa_install_fltr_rule_req_ex_msg_v01 {
 	/* Must be set to # of elements in filter_spec_ex2_list */
 	struct ipa_filter_spec_ex2_type_v01
 		filter_spec_ex2_list[QMI_IPA_MAX_FILTERS_V01];
+	/* Optional */
+	/* List of modem UL Filters in the Spec List which need be to
+	 * replicated with AP UL firewall filters
+	 */
+	uint8_t ul_firewall_indices_list_valid;
+	/* Must be set to # of elements in ul_firewall_indices_list */
+	uint32_t ul_firewall_indices_list_len;
+	uint32_t ul_firewall_indices_list[QMI_IPA_MAX_FILTERS_V01];
+	/* List of UL firewall filter indices.
+	 * Filter rules at specified indices must be replicated across
+	 * the firewall filters by the receiver and installed on the
+	 * associated IPA consumer pipe.
+	 */
 };  /* Message */
 
 /* Response Message; Requests installation of filtering rules in the hardware
@@ -2334,6 +2388,22 @@ struct ipa_mhi_alloc_channel_resp_msg_v01 {
 };
 #define IPA_MHI_ALLOC_CHANNEL_RESP_MSG_V01_MAX_MSG_LEN 23
 
+enum ipa_clock_rate_enum_v01 {
+	IPA_CLOCK_RATE_ENUM_MIN_ENUM_VAL_V01 = IPA_INT_MIN,
+
+	QMI_IPA_CLOCK_RATE_INVALID_V01 = 0,
+
+	QMI_IPA_CLOCK_RATE_LOW_SVS_V01 = 1,
+
+	QMI_IPA_CLOCK_RATE_SVS_V01 = 2,
+
+	QMI_IPA_CLOCK_RATE_NOMINAL_V01 = 3,
+
+	QMI_IPA_CLOCK_RATE_TURBO_V01 = 4,
+
+	IPA_CLOCK_RATE_ENUM_MAX_ENUM_VAL_V01 = IPA_INT_MAX,
+};
+
 struct ipa_mhi_clk_vote_req_msg_v01 {
 	/* Mandatory */
 	uint8_t mhi_vote;
@@ -2342,8 +2412,17 @@ struct ipa_mhi_clk_vote_req_msg_v01 {
 	 * TRUE  - ON
 	 * FALSE - OFF
 	 */
+	/* Optional */
+	/*  Throughput Value */
+	uint8_t tput_value_valid;
+	uint32_t tput_value;
+
+	/* Optional */
+	/*  IPA Clock Rate */
+	uint8_t clk_rate_valid;
+	enum ipa_clock_rate_enum_v01 clk_rate;
 };
-#define IPA_MHI_CLK_VOTE_REQ_MSG_V01_MAX_MSG_LEN 4
+#define IPA_MHI_CLK_VOTE_REQ_MSG_V01_MAX_MSG_LEN 18
 
 struct ipa_mhi_clk_vote_resp_msg_v01 {
 	/* Mandatory */
@@ -2514,8 +2593,20 @@ struct ipa_add_offload_connection_req_msg_v01 {
 	uint32_t filter_spec_ex2_list_len;
 	struct ipa_filter_spec_ex2_type_v01
 		filter_spec_ex2_list[QMI_IPA_MAX_FILTERS_V01];
+	/* Optional */
+	/*  Mux ID for embedded call */
+	uint8_t embedded_call_mux_id_valid;
+	/* Must be set to true if embedded_call_mux_id is being passed */
+	uint32_t embedded_call_mux_id;
+	/* Mux ID for the new embedded call */
+	/* Optional */
+	/*  Default MHI path */
+	uint8_t default_mhi_path_valid;
+	/* Must be set to true if default_mhi_path is being passed */
+	uint8_t default_mhi_path;
+	/* Default MHI path */
 }; /* Message */
-#define IPA_ADD_OFFLOAD_CONNECTION_REQ_MSG_V01_MAX_MSG_LEN 11350
+#define IPA_ADD_OFFLOAD_CONNECTION_REQ_MSG_V01_MAX_MSG_LEN 11361
 
 struct ipa_add_offload_connection_resp_msg_v01 {
 	/*  Result Code */
@@ -2538,8 +2629,14 @@ struct ipa_remove_offload_connection_req_msg_v01 {
 	uint32_t filter_handle_list_len;
 	struct ipa_filter_rule_identifier_to_handle_map_v01
 		filter_handle_list[QMI_IPA_MAX_FILTERS_V01];
+	/* Optional */
+	/*  Clean All rules */
+	uint8_t clean_all_rules_valid;
+	/* Must be set to true if clean_all_rules is being passed */
+	uint8_t clean_all_rules;
+	/* Clean All rules */
 }; /* Message */
-#define IPA_REMOVE_OFFLOAD_CONNECTION_REQ_MSG_V01_MAX_MSG_LEN 516
+#define IPA_REMOVE_OFFLOAD_CONNECTION_REQ_MSG_V01_MAX_MSG_LEN 520
 
 struct ipa_remove_offload_connection_resp_msg_v01 {
 	/* optional */
@@ -2609,11 +2706,11 @@ struct ipa_remove_offload_connection_resp_msg_v01 {
 /* add for max length*/
 #define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 162
 #define QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01 25
-#define QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01 12
+#define QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01 16
 #define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7
-#define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 33445
+#define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 33705
 #define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783
-#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 870
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 1899
 #define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01 7
 #define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01 7
 #define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01 15
@@ -2643,7 +2740,7 @@ struct ipa_remove_offload_connection_resp_msg_v01 {
 #define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01 4
 #define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_MAX_MSG_LEN_V01 7
 
-#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01 33761
+#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01 34021
 #define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01 523
 
 #define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01 4
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 390450c..5df7e23 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -116,7 +116,16 @@
 #define IPA_IOCTL_GSB_CONNECT                   64
 #define IPA_IOCTL_GSB_DISCONNECT                65
 #define IPA_IOCTL_WIGIG_FST_SWITCH              66
-
+#define IPA_IOCTL_ADD_RT_RULE_V2                67
+#define IPA_IOCTL_ADD_RT_RULE_EXT_V2            68
+#define IPA_IOCTL_ADD_RT_RULE_AFTER_V2          69
+#define IPA_IOCTL_MDFY_RT_RULE_V2               70
+#define IPA_IOCTL_ADD_FLT_RULE_V2               71
+#define IPA_IOCTL_ADD_FLT_RULE_AFTER_V2         72
+#define IPA_IOCTL_MDFY_FLT_RULE_V2              73
+#define IPA_IOCTL_FNR_COUNTER_ALLOC             74
+#define IPA_IOCTL_FNR_COUNTER_DEALLOC           75
+#define IPA_IOCTL_FNR_COUNTER_QUERY             76
 
 /**
  * max size of the header to be inserted
@@ -160,6 +169,14 @@
 #define QMI_IPA_MAX_CLIENT_DST_PIPES 4
 
 /**
+ * MAX number of the FLT_RT stats counter supported.
+ */
+#define IPA_MAX_FLT_RT_CNT_INDEX (128)
+#define IPA_FLT_RT_HW_COUNTER (120)
+#define IPA_FLT_RT_SW_COUNTER \
+	(IPA_MAX_FLT_RT_CNT_INDEX - IPA_FLT_RT_HW_COUNTER)
+
+/**
  * the attributes of the rule (routing or filtering)
  */
 #define IPA_FLT_TOS			(1ul << 0)
@@ -387,6 +404,10 @@ enum ipa_client_type {
 	(client) == IPA_CLIENT_APPS_WAN_CONS || \
 	(client) == IPA_CLIENT_APPS_WAN_COAL_CONS)
 
+#define IPA_CLIENT_IS_APPS_PROD(client) \
+	((client) == IPA_CLIENT_APPS_LAN_PROD || \
+	(client) == IPA_CLIENT_APPS_WAN_PROD)
+
 #define IPA_CLIENT_IS_USB_CONS(client) \
 	((client) == IPA_CLIENT_USB_CONS || \
 	(client) == IPA_CLIENT_USB2_CONS || \
@@ -966,6 +987,55 @@ struct ipa_flt_rule {
 };
 
 /**
+ * struct ipa_flt_rule_v2 - attributes of a filtering rule
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ * @to_uc: bool switch to pass packet to micro-controller
+ * @action: action field
+ * @rt_tbl_hdl: handle of table from "get"
+ * @attrib: attributes of the rule
+ * @eq_attrib: attributes of the rule in equation form (valid when
+ * eq_attrib_type is true)
+ * @rt_tbl_idx: index of RT table referred to by filter rule (valid when
+ * eq_attrib_type is true and non-exception action)
+ * @eq_attrib_type: true if equation level form used to specify attributes
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @rule_id: rule_id to be assigned to the filter rule. In case client specifies
+ *  rule_id as 0 the driver will assign a new rule_id
+ * @set_metadata: bool switch. should metadata replacement at the NAT block
+ *  take place?
+ * @pdn_idx: if action is "pass to source\destination NAT" then a comparison
+ * against the PDN index in the matching PDN entry will take place as an
+ * additional condition for NAT hit.
+ * @enable_stats: is true when we want to enable stats for this
+ * flt rule.
+ * @cnt_idx: if 0 means disable, otherwise use for index.
+ * will be assigned by ipa driver.
+ */
+struct ipa_flt_rule_v2 {
+	uint8_t retain_hdr;
+	uint8_t to_uc;
+	enum ipa_flt_action action;
+	uint32_t rt_tbl_hdl;
+	struct ipa_rule_attrib attrib;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+	uint32_t rt_tbl_idx;
+	uint8_t eq_attrib_type;
+	uint8_t max_prio;
+	uint8_t hashable;
+	uint16_t rule_id;
+	uint8_t set_metadata;
+	uint8_t pdn_idx;
+	uint8_t enable_stats;
+	uint8_t cnt_idx;
+};
+
+/**
  * enum ipa_hdr_l2_type - L2 header type
  * IPA_HDR_L2_NONE: L2 header which isn't Ethernet II and isn't 802_3
  * IPA_HDR_L2_ETHERNET_II: L2 header of type Ethernet II
@@ -1032,6 +1102,41 @@ struct ipa_rt_rule {
 #define IPA_RT_SUPPORT_COAL
 
 /**
+ * struct ipa_rt_rule_v2 - attributes of a routing rule
+ * @dst: dst "client"
+ * @hdr_hdl: handle to the dynamic header
+	it is not an index or an offset
+ * @hdr_proc_ctx_hdl: handle to header processing context. if it is provided
+	hdr_hdl shall be 0
+ * @attrib: attributes of the rule
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ * @coalesce: bool to decide whether packets should be coalesced or not
+ * @enable_stats: is true when we want to enable stats for this
+ * rt rule.
+ * @cnt_idx: if enable_stats is 1 and cnt_idx is 0, then cnt_idx
+ * will be assigned by ipa driver.
+ */
+struct ipa_rt_rule_v2 {
+	enum ipa_client_type dst;
+	uint32_t hdr_hdl;
+	uint32_t hdr_proc_ctx_hdl;
+	struct ipa_rule_attrib attrib;
+	uint8_t max_prio;
+	uint8_t hashable;
+	uint8_t retain_hdr;
+	uint8_t coalesce;
+	uint8_t enable_stats;
+	uint8_t cnt_idx;
+};
+
+/**
  * struct ipa_hdr_add - header descriptor includes in and out
  * parameters
  * @name: name of the header
@@ -1266,6 +1371,25 @@ struct ipa_rt_rule_add {
 };
 
 /**
+ * struct ipa_rt_rule_add_v2 - routing rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add_v2 {
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+	struct ipa_rt_rule_v2 rule;
+};
+
+
+/**
  * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
  * multiple rules and commit);
  *
@@ -1285,6 +1409,31 @@ struct ipa_ioc_add_rt_rule {
 };
 
 /**
+ * struct ipa_ioc_add_rt_rule_v2 - routing rule addition
+ * parameters (supports multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @rule_add_size: sizeof(struct ipa_rt_rule_add_v2)
+ * @reserved1: reserved bits for alignment
+ * @reserved2: reserved bits for alignment
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_rt_rule_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	uint32_t rule_add_size;
+	uint32_t reserved1;
+	uint8_t reserved2;
+	uint64_t rules;
+};
+
+/**
  * struct ipa_ioc_add_rt_rule_after - routing rule addition after a specific
  * rule parameters(supports multiple rules and commit);
  *
@@ -1307,6 +1456,33 @@ struct ipa_ioc_add_rt_rule_after {
 };
 
 /**
+ * struct ipa_ioc_add_rt_rule_after_v2 - routing rule addition
+ * after a specific rule parameters(supports multiple rules and
+ * commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @add_after_hdl: the rules will be added after this specific rule
+ * @rule_add_size: sizeof(struct ipa_rt_rule_add_v2)
+ * @reserved: reserved bits for alignment
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ *			   at_rear field will be ignored when using this IOCTL
+ */
+struct ipa_ioc_add_rt_rule_after_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	uint32_t add_after_hdl;
+	uint32_t rule_add_size;
+	uint8_t reserved;
+	uint64_t rules;
+};
+
+/**
  * struct ipa_rt_rule_mdfy - routing rule descriptor includes
  * in and out parameters
  * @rule: actual rule to be added
@@ -1323,6 +1499,22 @@ struct ipa_rt_rule_mdfy {
 };
 
 /**
+ * struct ipa_rt_rule_mdfy_v2 - routing rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @rt_rule_hdl: handle to rule which supposed to modify
+ * @status:	output parameter, status of routing rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_rt_rule_mdfy_v2 {
+	uint32_t rt_rule_hdl;
+	int status;
+	struct ipa_rt_rule_v2 rule;
+};
+
+/**
  * struct ipa_ioc_mdfy_rt_rule - routing rule modify parameters (supports
  * multiple rules and commit)
  * @commit: should rules be written to IPA HW also?
@@ -1338,6 +1530,25 @@ struct ipa_ioc_mdfy_rt_rule {
 };
 
 /**
+ * struct ipa_ioc_mdfy_rt_rule_v2 - routing rule modify
+ * parameters (supports multiple rules and commit)
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @num_rules: number of routing rules that follow
+ * @rule_mdfy_size: sizeof(struct ipa_rt_rule_mdfy_v2)
+ * @reserved: reserved bits for alignment
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_mdfy_rt_rule_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_rules;
+	uint32_t rule_mdfy_size;
+	uint8_t reserved;
+	uint64_t rules;
+};
+
+/**
  * struct ipa_rt_rule_del - routing rule descriptor includes in
  * and out parameters
  * @hdl: handle returned from route rule add operation
@@ -1372,8 +1583,29 @@ struct ipa_rt_rule_add_ext {
 };
 
 /**
- * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
- * multiple rules and commit with rule_id);
+ * struct ipa_rt_rule_add_ext_v2 - routing rule descriptor
+ * includes in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ * @rule_id: rule_id to be assigned to the routing rule. In case client
+ *  specifies rule_id as 0 the driver will assign a new rule_id
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add_ext_v2 {
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+	uint16_t rule_id;
+	struct ipa_rt_rule_v2 rule;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule_ext - routing rule addition
+ * parameters (supports multiple rules and commit with rule_id);
  *
  * all rules MUST be added to same table
  * @commit: should rules be written to IPA HW also?
@@ -1391,6 +1623,32 @@ struct ipa_ioc_add_rt_rule_ext {
 	struct ipa_rt_rule_add_ext rules[0];
 };
 
+/**
+ * struct ipa_ioc_add_rt_rule_ext_v2 - routing rule addition
+ * parameters (supports multiple rules and commit with rule_id);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @rule_add_ext_size: sizeof(struct ipa_rt_rule_add_ext_v2)
+ * @reserved1: reserved bits for alignment
+ * @reserved2: reserved bits for alignment
+ * @ipa_rt_rule_add_ext rules: all rules need to go back to back here,
+ *  no pointers
+ */
+struct ipa_ioc_add_rt_rule_ext_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	uint32_t rule_add_ext_size;
+	uint32_t reserved1;
+	uint8_t reserved2;
+	uint64_t rules;
+};
+
 
 /**
  * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
@@ -1439,6 +1697,24 @@ struct ipa_flt_rule_add {
 };
 
 /**
+ * struct ipa_flt_rule_add_v2 - filtering rule descriptor
+ * includes in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of filtering table?
+ * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of filtering rule add   operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_add_v2 {
+	uint8_t at_rear;
+	uint32_t flt_rule_hdl;
+	int status;
+	struct ipa_flt_rule_v2 rule;
+};
+
+/**
  * struct ipa_ioc_add_flt_rule - filtering rule addition parameters (supports
  * multiple rules and commit)
  * all rules MUST be added to same table
@@ -1460,6 +1736,36 @@ struct ipa_ioc_add_flt_rule {
 };
 
 /**
+ * struct ipa_ioc_add_flt_rule_v2 - filtering rule addition
+ * parameters (supports multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep:	which "clients" pipe does this rule apply to?
+ *	valid only when global is 0
+ * @global: does this apply to global filter table of specific IP family
+ * @num_rules: number of filtering rules that follow
+ * @flt_rule_size: sizeof(struct ipa_flt_rule_add_v2)
+ * @reserved1: reserved bits for alignment
+ * @reserved2: reserved bits for alignment
+ * @reserved3: reserved bits for alignment
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_flt_rule_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	enum ipa_client_type ep;
+	uint8_t global;
+	uint8_t num_rules;
+	uint32_t flt_rule_size;
+	uint32_t reserved1;
+	uint16_t reserved2;
+	uint8_t reserved3;
+	uint64_t rules;
+};
+
+
+/**
  * struct ipa_ioc_add_flt_rule_after - filtering rule addition after specific
  * rule parameters (supports multiple rules and commit)
  * all rules MUST be added to same table
@@ -1481,6 +1787,31 @@ struct ipa_ioc_add_flt_rule_after {
 };
 
 /**
+ * struct ipa_ioc_add_flt_rule_after_v2 - filtering rule
+ * addition after specific rule parameters (supports multiple
+ * rules and commit) all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep:	which "clients" pipe does this rule apply to?
+ * @num_rules: number of filtering rules that follow
+ * @add_after_hdl: rules will be added after the rule with this handle
+ * @flt_rule_size: sizeof(struct ipa_flt_rule_add_v2)
+ * @reserved: reserved bits for alignment
+ * @rules: all rules need to go back to back here, no pointers. at rear field
+ *	   is ignored when using this IOCTL
+ */
+struct ipa_ioc_add_flt_rule_after_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	enum ipa_client_type ep;
+	uint8_t num_rules;
+	uint32_t add_after_hdl;
+	uint32_t flt_rule_size;
+	uint32_t reserved;
+	uint64_t rules;
+};
+
+/**
  * struct ipa_flt_rule_mdfy - filtering rule descriptor includes
  * in and out parameters
  * @rule: actual rule to be added
@@ -1497,6 +1828,22 @@ struct ipa_flt_rule_mdfy {
 };
 
 /**
+ * struct ipa_flt_rule_mdfy_v2 - filtering rule descriptor
+ * includes in and out parameters
+ * @rule: actual rule to be added
+ * @flt_rule_hdl: handle to rule
+ * @status:	output parameter, status of filtering rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_mdfy_v2 {
+	uint32_t rule_hdl;
+	int status;
+	struct ipa_flt_rule_v2 rule;
+};
+
+/**
  * struct ipa_ioc_mdfy_flt_rule - filtering rule modify parameters (supports
  * multiple rules and commit)
  * @commit: should rules be written to IPA HW also?
@@ -1512,6 +1859,25 @@ struct ipa_ioc_mdfy_flt_rule {
 };
 
 /**
+ * struct ipa_ioc_mdfy_flt_rule_v2 - filtering rule modify
+ * parameters (supports multiple rules and commit)
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @num_rules: number of filtering rules that follow
+ * @rule_mdfy_size: sizeof(struct ipa_flt_rule_mdfy_v2)
+ * @reserved: reserved bits for alignment
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_mdfy_flt_rule_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_rules;
+	uint32_t rule_mdfy_size;
+	uint8_t reserved;
+	uint64_t rules;
+};
+
+/**
  * struct ipa_flt_rule_del - filtering rule descriptor includes
  * in and out parameters
  *
@@ -1626,6 +1992,8 @@ struct ipa_ioc_ext_intf_prop {
 	uint8_t is_xlat_rule;
 	uint32_t rule_id;
 	uint8_t is_rule_hashable;
+#define IPA_V6_UL_WL_FIREWALL_HANDLE
+	uint8_t replicate_needed;
 };
 
 /**
@@ -1982,6 +2350,62 @@ struct ipa_ioc_write_qmapid {
 	uint8_t qmap_id;
 };
 
+/**
+ * struct ipa_flt_rt_counter_alloc - flt/rt counter id allocation
+ * @num_counters: input param, num of counters need to be allocated
+ * @allow_less: input param, if true, success even few counter than request
+ * @start_id: output param, allocated start_id, 0 when allocation fails
+ * @end_id: output param, allocated start_id, 0 when allocation fails
+ */
+struct ipa_flt_rt_counter_alloc {
+	uint8_t num_counters;
+	uint8_t allow_less;
+	uint8_t start_id;
+	uint8_t end_id;
+};
+
+/**
+ * struct ipa_ioc_flt_rt_counter_alloc - flt/rt counter id allocation ioctl
+ * @hdl: output param, hdl used for deallocation, negative if allocation fails
+ * @hw_counter: HW counters for HW process
+ * @sw_counter: SW counters for uC / non-HW process
+ */
+struct ipa_ioc_flt_rt_counter_alloc {
+	int hdl;
+	struct ipa_flt_rt_counter_alloc hw_counter;
+	struct ipa_flt_rt_counter_alloc sw_counter;
+};
+
+/**
+ * struct ipa_flt_rt_stats - flt/rt stats info
+ * @num_pkts: number of packets
+ * @num_pkts_hash: number of packets in hash entry
+ * @num_bytes: number of bytes
+ */
+struct ipa_flt_rt_stats {
+	uint32_t num_pkts;
+	uint32_t num_pkts_hash;
+	uint64_t num_bytes;
+};
+
+/**
+ * struct ipa_ioc_flt_rt_query - flt/rt counter id query
+ * @start_id: start counter id for query
+ * @end_id: end counter id for query
+ * @reset: this query need hw counter to be reset or not
+ * @stats_size: sizeof(ipa_flt_rt_stats)
+ * @reserved: reserved bits for alignment
+ * @stats: structure contains the query result
+ */
+struct ipa_ioc_flt_rt_query {
+	uint8_t start_id;
+	uint8_t end_id;
+	uint8_t reset;
+	uint32_t stats_size;
+	uint8_t reserved;
+	uint64_t stats;
+};
+
 enum ipacm_client_enum {
 	IPACM_CLIENT_USB = 1,
 	IPACM_CLIENT_WLAN,
@@ -2108,21 +2532,36 @@ struct ipa_odl_modem_config {
 #define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_ADD_RT_RULE, \
 					struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_ADD_RT_RULE_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE_V2, \
+					struct ipa_ioc_add_rt_rule_v2 *)
 #define IPA_IOC_ADD_RT_RULE_EXT _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_ADD_RT_RULE_EXT, \
 					struct ipa_ioc_add_rt_rule_ext *)
+#define IPA_IOC_ADD_RT_RULE_EXT_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE_EXT_V2, \
+					struct ipa_ioc_add_rt_rule_ext_v2 *)
 #define IPA_IOC_ADD_RT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_ADD_RT_RULE_AFTER, \
 					struct ipa_ioc_add_rt_rule_after *)
+#define IPA_IOC_ADD_RT_RULE_AFTER_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE_AFTER_V2, \
+					struct ipa_ioc_add_rt_rule_after_v2 *)
 #define IPA_IOC_DEL_RT_RULE _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_DEL_RT_RULE, \
 					struct ipa_ioc_del_rt_rule *)
 #define IPA_IOC_ADD_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_ADD_FLT_RULE, \
 					struct ipa_ioc_add_flt_rule *)
+#define IPA_IOC_ADD_FLT_RULE_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE_V2, \
+					struct ipa_ioc_add_flt_rule_v2 *)
 #define IPA_IOC_ADD_FLT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_ADD_FLT_RULE_AFTER, \
 					struct ipa_ioc_add_flt_rule_after *)
+#define IPA_IOC_ADD_FLT_RULE_AFTER_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE_AFTER_V2, \
+					struct ipa_ioc_add_flt_rule_after_v2 *)
 #define IPA_IOC_DEL_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_DEL_FLT_RULE, \
 					struct ipa_ioc_del_flt_rule *)
@@ -2234,9 +2673,15 @@ struct ipa_odl_modem_config {
 #define IPA_IOC_MDFY_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_MDFY_FLT_RULE, \
 					struct ipa_ioc_mdfy_flt_rule *)
+#define IPA_IOC_MDFY_FLT_RULE_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_MDFY_FLT_RULE_V2, \
+					struct ipa_ioc_mdfy_flt_rule_v2 *)
 #define IPA_IOC_MDFY_RT_RULE _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_MDFY_RT_RULE, \
 					struct ipa_ioc_mdfy_rt_rule *)
+#define IPA_IOC_MDFY_RT_RULE_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_MDFY_RT_RULE_V2, \
+					struct ipa_ioc_mdfy_rt_rule_v2 *)
 
 #define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD _IOWR(IPA_IOC_MAGIC, \
 				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
@@ -2312,6 +2757,18 @@ struct ipa_odl_modem_config {
 				IPA_IOCTL_WIGIG_FST_SWITCH, \
 				struct ipa_ioc_wigig_fst_switch)
 
+#define IPA_IOC_FNR_COUNTER_ALLOC _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_FNR_COUNTER_ALLOC, \
+				struct ipa_ioc_flt_rt_counter_alloc)
+
+#define IPA_IOC_FNR_COUNTER_DEALLOC _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_FNR_COUNTER_DEALLOC, \
+				int)
+
+#define IPA_IOC_FNR_COUNTER_QUERY _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_FNR_COUNTER_QUERY, \
+				struct ipa_ioc_flt_rt_query)
+
 /*
  * unique magic number of the Tethering bridge ioctls
  */
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index cda0018..7a98eed 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _UAPI_MSM_KGSL_H
@@ -336,6 +336,39 @@ enum kgsl_timestamp_type {
 #define KGSL_PROP_SECURE_BUFFER_ALIGNMENT 0x23
 #define KGSL_PROP_SECURE_CTXT_SUPPORT 0x24
 #define KGSL_PROP_SPEED_BIN		0x25
+#define KGSL_PROP_GAMING_BIN		0x26
+#define KGSL_PROP_QUERY_CAPABILITIES	0x27
+#define KGSL_PROP_CONTEXT_PROPERTY	0x28
+
+/*
+ * kgsl_capabilties_properties returns a list of supported properties.
+ * If the user passes 0 for 'count' the kernel will set it to the number of
+ * supported properties. The list is expected to be 'count * sizeof(uint32_t)'
+ * bytes long. The kernel will return the actual number of entries copied into
+ * list via 'count'.
+ */
+struct kgsl_capabilities_properties {
+	__u64 list;
+	__u32 count;
+};
+
+/*
+ * KGSL_QUERY_CAPS_PROPERTIES returns a list of the valid properties in the
+ * kernel.  The subtype data should be struct kgsl_capabilities_properties
+ */
+#define KGSL_QUERY_CAPS_PROPERTIES 1
+
+/*
+ * kgsl_capabilities allows the user to query kernel capabiilties. The 'data'
+ * type should be set appropriately for the querytype (see above). Pass 0 to
+ * 'size' and the kernel will set it to the expected size of 'data' that is
+ * appropriate for querytype (in bytes).
+ */
+struct kgsl_capabilities {
+	__u64 data;
+	__u64 size;
+	__u32 querytype;
+};
 
 struct kgsl_shadowprop {
 	unsigned long gpuaddr;
@@ -376,6 +409,21 @@ struct kgsl_gpmu_version {
 	unsigned int features;
 };
 
+struct kgsl_context_property {
+	__u64 data;
+	__u32 size;
+	__u32 type;
+	__u32 contextid;
+};
+
+struct kgsl_context_property_fault {
+	__s32 faults;
+	__u32 timestamp;
+};
+
+/* Context property sub types */
+#define KGSL_CONTEXT_PROP_FAULTS 1
+
 /* Performance counter groups */
 
 #define KGSL_PERFCOUNTER_GROUP_CP 0x0
diff --git a/include/uapi/linux/msm_npu.h b/include/uapi/linux/msm_npu.h
index 139229c..9309567 100644
--- a/include/uapi/linux/msm_npu.h
+++ b/include/uapi/linux/msm_npu.h
@@ -54,6 +54,14 @@
 #define MSM_NPU_RECEIVE_EVENT \
 	_IOR(MSM_NPU_IOCTL_MAGIC, 9, struct msm_npu_event)
 
+/* set property */
+#define MSM_NPU_SET_PROP \
+	_IOW(MSM_NPU_IOCTL_MAGIC, 10, struct msm_npu_property)
+
+/* get property */
+#define MSM_NPU_GET_PROP \
+	_IOW(MSM_NPU_IOCTL_MAGIC, 11, struct msm_npu_property)
+
 #define MSM_NPU_EVENT_TYPE_START 0x10000000
 #define MSM_NPU_EVENT_TYPE_EXEC_DONE (MSM_NPU_EVENT_TYPE_START + 1)
 #define MSM_NPU_EVENT_TYPE_EXEC_V2_DONE (MSM_NPU_EVENT_TYPE_START + 2)
@@ -64,6 +72,23 @@
 #define MSM_NPU_MAX_PATCH_LAYER_NUM (MSM_NPU_MAX_INPUT_LAYER_NUM +\
 	MSM_NPU_MAX_OUTPUT_LAYER_NUM)
 
+#define MSM_NPU_PROP_ID_START 0x100
+#define MSM_NPU_PROP_ID_FW_STATE (MSM_NPU_PROP_ID_START + 0)
+#define MSM_NPU_PROP_ID_PERF_MODE (MSM_NPU_PROP_ID_START + 1)
+#define MSM_NPU_PROP_ID_PERF_MODE_MAX (MSM_NPU_PROP_ID_START + 2)
+#define MSM_NPU_PROP_ID_DRV_VERSION (MSM_NPU_PROP_ID_START + 3)
+#define MSM_NPU_PROP_ID_HARDWARE_VERSION (MSM_NPU_PROP_ID_START + 4)
+
+#define MSM_NPU_FW_PROP_ID_START 0x1000
+#define MSM_NPU_PROP_ID_DCVS_MODE (MSM_NPU_FW_PROP_ID_START + 0)
+#define MSM_NPU_PROP_ID_DCVS_MODE_MAX (MSM_NPU_FW_PROP_ID_START + 1)
+#define MSM_NPU_PROP_ID_CLK_GATING_MODE (MSM_NPU_FW_PROP_ID_START + 2)
+#define MSM_NPU_PROP_ID_HW_VERSION (MSM_NPU_FW_PROP_ID_START + 3)
+#define MSM_NPU_PROP_ID_FW_VERSION (MSM_NPU_FW_PROP_ID_START + 4)
+
+
+#define PROP_PARAM_MAX_SIZE 8
+
 /* -------------------------------------------------------------------------
  * Data Structures
  * -------------------------------------------------------------------------
@@ -259,4 +284,11 @@ struct msm_npu_event {
 	uint32_t reserved[4];
 };
 
+struct msm_npu_property {
+	uint32_t prop_id;
+	uint32_t num_of_params;
+	uint32_t network_hdl;
+	uint32_t prop_param[PROP_PARAM_MAX_SIZE];
+};
+
 #endif /*_UAPI_MSM_NPU_H_*/
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index b816e88..16bf3a5 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -5290,6 +5290,9 @@ enum nl80211_feature_flags {
  *	fairness for transmitted packets and has enabled airtime fairness
  *	scheduling.
  *
+ * @NL80211_EXT_FEATURE_AP_PMKSA_CACHING: Driver/device supports PMKSA caching
+ *	(set/del PMKSA operations) in AP mode.
+ *
  * @NUM_NL80211_EXT_FEATURES: number of extended features.
  * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
  */
@@ -5328,6 +5331,7 @@ enum nl80211_ext_feature_index {
 	NL80211_EXT_FEATURE_CAN_REPLACE_PTK0,
 	NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER,
 	NL80211_EXT_FEATURE_AIRTIME_FAIRNESS,
+	NL80211_EXT_FEATURE_AP_PMKSA_CACHING,
 
 	/* add new features before the definition below */
 	NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
index b3bcabe..5b0ba00 100644
--- a/include/uapi/linux/psci.h
+++ b/include/uapi/linux/psci.h
@@ -49,8 +49,10 @@
 
 #define PSCI_1_0_FN_PSCI_FEATURES		PSCI_0_2_FN(10)
 #define PSCI_1_0_FN_SYSTEM_SUSPEND		PSCI_0_2_FN(14)
+#define PSCI_1_1_FN_SYSTEM_RESET2		PSCI_0_2_FN(18)
 
 #define PSCI_1_0_FN64_SYSTEM_SUSPEND		PSCI_0_2_FN64(14)
+#define PSCI_1_1_FN64_SYSTEM_RESET2		PSCI_0_2_FN64(18)
 
 /* PSCI v0.2 power state encoding for CPU_SUSPEND function */
 #define PSCI_0_2_POWER_STATE_ID_MASK		0xffff
diff --git a/include/uapi/linux/spcom.h b/include/uapi/linux/spcom.h
index 2e5b9da..c5b97fd 100644
--- a/include/uapi/linux/spcom.h
+++ b/include/uapi/linux/spcom.h
@@ -85,6 +85,8 @@ struct spcom_send_command {
 struct spcom_user_create_channel_command {
 	enum spcom_cmd_id cmd_id;
 	char ch_name[SPCOM_CHANNEL_NAME_SIZE];
+#define SPCOM_IS_SHARABLE_SUPPORTED
+	bool is_sharable;
 } __packed;
 
 /* Command structure between userspace spcomlib and spcom driver, on write() */
diff --git a/include/uapi/linux/spss_utils.h b/include/uapi/linux/spss_utils.h
new file mode 100644
index 0000000..acb7bceb
--- /dev/null
+++ b/include/uapi/linux/spss_utils.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UAPI_SPSS_UTILS_H_
+#define _UAPI_SPSS_UTILS_H_
+
+#include <linux/types.h>    /* uint32_t, bool */
+#include <linux/ioctl.h>    /* ioctl() */
+
+/**
+ * @brief - Secure Processor Utilities interface to user space
+ *
+ * The kernel spss_utils driver interface to user space via IOCTL
+ * and SYSFS (device attributes).
+ */
+
+#define SPSS_IOC_MAGIC  'S'
+
+struct spss_ioc_set_fw_cmac {
+	uint32_t cmac[4];
+} __packed;
+
+#define SPSS_IOC_SET_FW_CMAC \
+	_IOWR(SPSS_IOC_MAGIC, 1, struct spss_ioc_set_fw_cmac)
+
+#endif /* _UAPI_SPSS_UTILS_H_ */
diff --git a/include/uapi/linux/sysstats.h b/include/uapi/linux/sysstats.h
new file mode 100644
index 0000000..0343554
--- /dev/null
+++ b/include/uapi/linux/sysstats.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _LINUX_SYSSTATS_H
+#define _LINUX_SYSSTATS_H
+
+#include <linux/types.h>
+#include <linux/taskstats.h>
+#include <linux/cgroupstats.h>
+
+#define SYSSTATS_VERSION	1
+
+/*
+ * Data shared between user space and kernel space
+ * Each member is aligned to a 8 byte boundary.
+ * All values in KB.
+ */
+struct sys_memstats {
+	__u64	version;
+	__u64	memtotal;
+	__u64	vmalloc_total;
+	__u64	reclaimable;
+	__u64	zram_compressed;
+	__u64	swap_used;
+	__u64	swap_total;
+	__u64	unreclaimable;
+	__u64	buffer;
+	__u64	slab_reclaimable;
+	__u64	slab_unreclaimable;
+	__u64	free_cma;
+	__u64	file_mapped;
+	__u64	swapcache;
+	__u64	pagetable;
+	__u64	kernelstack;
+	__u64	shmem;
+	__u64	dma_nr_free_pages;
+	__u64	dma_nr_active_anon;
+	__u64	dma_nr_inactive_anon;
+	__u64	dma_nr_active_file;
+	__u64	dma_nr_inactive_file;
+	__u64	normal_nr_free_pages;
+	__u64	normal_nr_active_anon;
+	__u64	normal_nr_inactive_anon;
+	__u64	normal_nr_active_file;
+	__u64	normal_nr_inactive_file;
+	__u64	movable_nr_free_pages;
+	__u64	movable_nr_active_anon;
+	__u64	movable_nr_inactive_anon;
+	__u64	movable_nr_active_file;
+	__u64	movable_nr_inactive_file;
+	__u64	highmem_nr_free_pages;
+	__u64	highmem_nr_active_anon;
+	__u64	highmem_nr_inactive_anon;
+	__u64	highmem_nr_active_file;
+	__u64	highmem_nr_inactive_file;
+	/* version 1 ends here */
+};
+
+/*
+ * Commands sent from userspace
+ * Not versioned. New commands should only be inserted at the enum's end.
+ */
+
+enum {
+	SYSSTATS_CMD_UNSPEC = __CGROUPSTATS_CMD_MAX,	/* Reserved */
+	SYSSTATS_CMD_GET,		/* user->kernel request/get-response */
+	SYSSTATS_CMD_NEW,		/* kernel->user event */
+};
+
+#define SYSSTATS_CMD_UNSPEC SYSSTATS_CMD_UNSPEC
+#define SYSSTATS_CMD_GET SYSSTATS_CMD_GET
+#define SYSSTATS_CMD_NEW SYSSTATS_CMD_NEW
+
+enum {
+	SYSSTATS_TYPE_UNSPEC = 0,	/* Reserved */
+	SYSSTATS_TYPE_SYSMEM_STATS,	/* contains name + memory stats */
+};
+
+#define SYSSTATS_TYPE_UNSPEC SYSSTATS_TYPE_UNSPEC
+#define SYSSTATS_TYPE_SYSMEM_STATS SYSSTATS_TYPE_SYSMEM_STATS
+
+enum {
+	SYSSTATS_CMD_ATTR_UNSPEC = 0,
+	SYSSTATS_CMD_ATTR_SYSMEM_STATS,
+};
+
+#define SYSSTATS_CMD_ATTR_UNSPEC SYSSTATS_CMD_ATTR_UNSPEC
+#define SYSSTATS_CMD_ATTR_SYSMEM_STATS SYSSTATS_CMD_ATTR_SYSMEM_STATS
+
+#endif /* _LINUX_SYSSTATS_H */
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index 5e8ca16..125b5a9 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -35,6 +35,7 @@
 
 
 #define TASKSTATS_VERSION	9
+#define TASKSTATS2_VERSION	1
 #define TS_COMM_LEN		32	/* should be >= TASK_COMM_LEN
 					 * in linux/sched.h */
 
@@ -170,6 +171,17 @@ struct taskstats {
 	__u64	thrashing_delay_total;
 };
 
+struct taskstats2 {
+	__u16 version;
+	__s16 oom_score;
+	__u32 pid;
+	__u64 anon_rss;	/* KB */
+	__u64 file_rss;	/* KB */
+	__u64 swap_rss;	/* KB */
+	__u64 shmem_rss;	/* KB */
+	__u64 unreclaimable;	/* KB */
+	/* version 1 ends here */
+};
 
 /*
  * Commands sent from userspace
@@ -181,6 +193,7 @@ enum {
 	TASKSTATS_CMD_UNSPEC = 0,	/* Reserved */
 	TASKSTATS_CMD_GET,		/* user->kernel request/get-response */
 	TASKSTATS_CMD_NEW,		/* kernel->user event */
+	TASKSTATS2_CMD_GET,		/* user->kernel request/get-response */
 	__TASKSTATS_CMD_MAX,
 };
 
@@ -194,6 +207,7 @@ enum {
 	TASKSTATS_TYPE_AGGR_PID,	/* contains pid + stats */
 	TASKSTATS_TYPE_AGGR_TGID,	/* contains tgid + stats */
 	TASKSTATS_TYPE_NULL,		/* contains nothing */
+	TASKSTATS_TYPE_FOREACH,		/* contains stats */
 	__TASKSTATS_TYPE_MAX,
 };
 
@@ -205,6 +219,7 @@ enum {
 	TASKSTATS_CMD_ATTR_TGID,
 	TASKSTATS_CMD_ATTR_REGISTER_CPUMASK,
 	TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK,
+	TASKSTATS_CMD_ATTR_FOREACH,
 	__TASKSTATS_CMD_ATTR_MAX,
 };
 
diff --git a/include/uapi/media/cam_icp.h b/include/uapi/media/cam_icp.h
index 969de27..ac683bf 100644
--- a/include/uapi/media/cam_icp.h
+++ b/include/uapi/media/cam_icp.h
@@ -18,10 +18,13 @@
 #define CAM_ICP_DEV_TYPE_MAX     5
 
 /* definitions needed for icp aquire device */
-#define CAM_ICP_RES_TYPE_BPS        1
-#define CAM_ICP_RES_TYPE_IPE_RT     2
-#define CAM_ICP_RES_TYPE_IPE        3
-#define CAM_ICP_RES_TYPE_MAX        4
+#define CAM_ICP_RES_TYPE_BPS         1
+#define CAM_ICP_RES_TYPE_IPE_RT      2
+#define CAM_ICP_RES_TYPE_IPE         3
+#define CAM_ICP_RES_TYPE_IPE_SEMI_RT 4
+#define CAM_ICP_RES_TYPE_BPS_RT      5
+#define CAM_ICP_RES_TYPE_BPS_SEMI_RT 6
+#define CAM_ICP_RES_TYPE_MAX         7
 
 /* packet opcode types */
 #define CAM_ICP_OPCODE_IPE_UPDATE   0
diff --git a/include/uapi/media/cam_isp_ife.h b/include/uapi/media/cam_isp_ife.h
index 7ce826d..34c1b3b 100644
--- a/include/uapi/media/cam_isp_ife.h
+++ b/include/uapi/media/cam_isp_ife.h
@@ -46,7 +46,9 @@
 #define CAM_ISP_IFE_IN_RES_PHY_1               (CAM_ISP_IFE_IN_RES_BASE + 2)
 #define CAM_ISP_IFE_IN_RES_PHY_2               (CAM_ISP_IFE_IN_RES_BASE + 3)
 #define CAM_ISP_IFE_IN_RES_PHY_3               (CAM_ISP_IFE_IN_RES_BASE + 4)
-#define CAM_ISP_IFE_IN_RES_RD                  (CAM_ISP_IFE_IN_RES_BASE + 5)
-#define CAM_ISP_IFE_IN_RES_MAX                 (CAM_ISP_IFE_IN_RES_BASE + 6)
+#define CAM_ISP_IFE_IN_RES_PHY_4               (CAM_ISP_IFE_IN_RES_BASE + 5)
+#define CAM_ISP_IFE_IN_RES_PHY_5               (CAM_ISP_IFE_IN_RES_BASE + 6)
+#define CAM_ISP_IFE_IN_RES_RD                  (CAM_ISP_IFE_IN_RES_BASE + 7)
+#define CAM_ISP_IFE_IN_RES_MAX                 (CAM_ISP_IFE_IN_RES_BASE + 8)
 
 #endif /* __UAPI_CAM_ISP_IFE_H__ */
diff --git a/include/uapi/media/cam_isp_vfe.h b/include/uapi/media/cam_isp_vfe.h
index 5f9ce7a..e2592ae 100644
--- a/include/uapi/media/cam_isp_vfe.h
+++ b/include/uapi/media/cam_isp_vfe.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __UAPI_CAM_ISP_VFE_H__
@@ -43,7 +43,9 @@
 #define CAM_ISP_VFE_IN_RES_PHY_1               (CAM_ISP_VFE_IN_RES_BASE + 2)
 #define CAM_ISP_VFE_IN_RES_PHY_2               (CAM_ISP_VFE_IN_RES_BASE + 3)
 #define CAM_ISP_VFE_IN_RES_PHY_3               (CAM_ISP_VFE_IN_RES_BASE + 4)
-#define CAM_ISP_VFE_IN_RES_FE                  (CAM_ISP_VFE_IN_RES_BASE + 5)
-#define CAM_ISP_VFE_IN_RES_MAX                 (CAM_ISP_VFE_IN_RES_BASE + 6)
+#define CAM_ISP_VFE_IN_RES_PHY_4               (CAM_ISP_VFE_IN_RES_BASE + 5)
+#define CAM_ISP_VFE_IN_RES_PHY_5               (CAM_ISP_VFE_IN_RES_BASE + 6)
+#define CAM_ISP_VFE_IN_RES_FE                  (CAM_ISP_VFE_IN_RES_BASE + 7)
+#define CAM_ISP_VFE_IN_RES_MAX                 (CAM_ISP_VFE_IN_RES_BASE + 8)
 
 #endif /* __UAPI_CAM_ISP_VFE_H__ */
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 94a0ce4..a1278dd 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __UAPI_LINUX_CAM_REQ_MGR_H
@@ -40,6 +40,7 @@
  * It includes both session and device handles
  */
 #define CAM_REQ_MGR_MAX_HANDLES           64
+#define CAM_REQ_MGR_MAX_HANDLES_V2        128
 #define MAX_LINKS_PER_SESSION             2
 
 /* V4L event type which user space will subscribe to */
@@ -126,6 +127,20 @@ struct cam_req_mgr_link_info {
 	int32_t link_hdl;
 };
 
+struct cam_req_mgr_link_info_v2 {
+	int32_t session_hdl;
+	uint32_t num_devices;
+	int32_t dev_hdls[CAM_REQ_MGR_MAX_HANDLES_V2];
+	int32_t link_hdl;
+};
+
+struct cam_req_mgr_ver_info {
+	uint32_t version;
+	union {
+		struct cam_req_mgr_link_info link_info_v1;
+		struct cam_req_mgr_link_info_v2 link_info_v2;
+	} u;
+};
 /**
  * struct cam_req_mgr_unlink_info
  * @session_hdl: input param - session handle
@@ -235,6 +250,7 @@ struct cam_req_mgr_link_control {
 #define CAM_REQ_MGR_RELEASE_BUF                 (CAM_COMMON_OPCODE_MAX + 11)
 #define CAM_REQ_MGR_CACHE_OPS                   (CAM_COMMON_OPCODE_MAX + 12)
 #define CAM_REQ_MGR_LINK_CONTROL                (CAM_COMMON_OPCODE_MAX + 13)
+#define CAM_REQ_MGR_LINK_V2                     (CAM_COMMON_OPCODE_MAX + 14)
 /* end of cam_req_mgr opcodes */
 
 #define CAM_MEM_FLAG_HW_READ_WRITE              (1<<0)
diff --git a/include/uapi/media/msm_cvp_private.h b/include/uapi/media/msm_cvp_private.h
index 7a4ea7d..0dff805 100644
--- a/include/uapi/media/msm_cvp_private.h
+++ b/include/uapi/media/msm_cvp_private.h
@@ -217,7 +217,12 @@ struct cvp_kmd_hfi_packet {
 	unsigned int pkt_data[MAX_HFI_PKT_SIZE];
 };
 
-#define CVP_HFI_VERSION	1
+#define CVP_KMD_PROP_HFI_VERSION	1
+#define CVP_KMD_PROP_SESSION_TYPE	2
+#define CVP_KMD_PROP_SESSION_KERNELMASK	3
+#define CVP_KMD_PROP_SESSION_PRIORITY	4
+#define CVP_KMD_PROP_SESSION_SECURITY	5
+#define CVP_KMD_PROP_SESSION_DSPMASK	6
 struct cvp_kmd_sys_property {
 	unsigned int prop_type;
 	unsigned int data;
diff --git a/include/uapi/media/msm_media_info.h b/include/uapi/media/msm_media_info.h
index e8efd8b..d347218 100644
--- a/include/uapi/media/msm_media_info.h
+++ b/include/uapi/media/msm_media_info.h
@@ -2,6 +2,11 @@
 #ifndef __MSM_MEDIA_INFO_H__
 #define __MSM_MEDIA_INFO_H__
 
+/* Width and Height should be multiple of 16 */
+#define INTERLACE_WIDTH_MAX 1920
+#define INTERLACE_HEIGHT_MAX 1920
+#define INTERLACE_MB_PER_FRAME_MAX ((1920*1088)/256)
+
 #ifndef MSM_MEDIA_ALIGN
 #define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
 	((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
@@ -160,7 +165,7 @@ enum color_fmts {
 	 * Y_Stride = align(Width, 128)
 	 * UV_Stride = align(Width, 128)
 	 * Y_Scanlines = align(Height, 32)
-	 * UV_Scanlines = align(Height/2, 16)
+	 * UV_Scanlines = align(Height/2, 32)
 	 * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
 	 * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
 	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
@@ -422,8 +427,8 @@ enum color_fmts {
 	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
 	 *
 	 *
-	 * Y_Stride = align(Width * 4/3, 128)
-	 * UV_Stride = align(Width * 4/3, 128)
+	 * Y_Stride = align(Width * 4/3, 256)
+	 * UV_Stride = align(Width * 4/3, 256)
 	 * Y_Scanlines = align(Height, 32)
 	 * UV_Scanlines = align(Height/2, 16)
 	 * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
@@ -499,8 +504,8 @@ enum color_fmts {
 	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
 	 * . . . . . . . . . . . . . . . .              V
 	 *
-	 * RGB_Stride = align(Width * 4, 128)
-	 * RGB_Scanlines = align(Height, 32)
+	 * RGB_Stride = align(Width * 4, 256)
+	 * RGB_Scanlines = align(Height, 16)
 	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
 	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
 	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
@@ -589,7 +594,7 @@ enum color_fmts {
 	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
 	 * . . . . . . . . . . . . . . . .              V
 	 *
-	 * RGB_Stride = align(Width * 2, 128)
+	 * RGB_Stride = align(Width * 2, 256)
 	 * RGB_Scanlines = align(Height, 16)
 	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
 	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
@@ -1210,8 +1215,9 @@ static inline unsigned int VENUS_BUFFER_SIZE(unsigned int color_fmt,
 	case COLOR_FMT_NV12_UBWC:
 		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
 		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
-		if ((width <= 1920 && height <= 1088) ||
-			(width <= 1088 && height <= 1920)) {
+		if (width <= INTERLACE_WIDTH_MAX &&
+			height <= INTERLACE_HEIGHT_MAX &&
+			(height * width) / 256 <= INTERLACE_MB_PER_FRAME_MAX) {
 			y_sclines =
 				VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
 			y_ubwc_plane =
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 0d06a8b..2517e58 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -54,6 +54,9 @@
 
 #include <linux/types.h>
 
+/* Maximum PCM channels */
+#define MAX_PCM_DECODE_CHANNELS 32
+
 /* AUDIO CODECS SUPPORTED */
 #define MAX_NUM_CODECS 32
 #define MAX_NUM_CODEC_DESCRIPTORS 32
@@ -110,9 +113,9 @@
 #define SND_AUDIOCODEC_DSD                   ((__u32) 0x00000022)
 #define SND_AUDIOCODEC_APTX                  ((__u32) 0x00000023)
 #define SND_AUDIOCODEC_TRUEHD                ((__u32) 0x00000024)
-#define SND_AUDIOCODEC_DLB_MAT               ((__u32) 0x00000025)
-#define SND_AUDIOCODEC_DLB_THD               ((__u32) 0x00000026)
-#define SND_AUDIOCODEC_MAX                   SND_AUDIOCODEC_DLB_THD
+#define SND_AUDIOCODEC_MAT                   ((__u32) 0x00000025)
+#define SND_AUDIOCODEC_THD                   ((__u32) 0x00000026)
+#define SND_AUDIOCODEC_MAX                   SND_AUDIOCODEC_THD
 
 /*
  * Profile and modes are listed with bit masks. This allows for a
@@ -416,6 +419,15 @@ struct snd_dec_aptx {
 	__u32 nap;
 };
 
+/** struct snd_dec_pcm - codec options for PCM format
+ * @num_channels: Number of channels
+ * @ch_map: Channel map for the above corresponding channels
+ */
+struct snd_dec_pcm {
+	__u32 num_channels;
+	__u8 ch_map[MAX_PCM_DECODE_CHANNELS];
+} __attribute__((packed, aligned(4)));
+
 union snd_codec_options {
 	struct snd_enc_wma wma;
 	struct snd_enc_vorbis vorbis;
@@ -428,6 +440,7 @@ union snd_codec_options {
 	struct snd_dec_ape ape;
 	struct snd_dec_aptx aptx_dec;
 	struct snd_dec_thd truehd;
+	struct snd_dec_pcm pcm_dec;
 };
 
 /** struct snd_codec_desc - description of codec capabilities
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index 7d09e54..58fb573 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -48,6 +48,13 @@ struct dlfb_data {
 	int base8;
 	u32 pseudo_palette[256];
 	int blank_mode; /*one of FB_BLANK_ */
+	struct mutex render_mutex;
+	int damage_x;
+	int damage_y;
+	int damage_x2;
+	int damage_y2;
+	spinlock_t damage_lock;
+	struct work_struct damage_work;
 	struct fb_ops ops;
 	/* blit-only rendering path metrics, exposed through sysfs */
 	atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
diff --git a/init/Kconfig b/init/Kconfig
index 4417b99..39ae9a5 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -19,6 +19,9 @@
 config CC_IS_CLANG
 	def_bool $(success,$(CC) --version | head -n 1 | grep -q clang)
 
+config LD_IS_LLD
+	def_bool $(success,$(LD) -v | head -n 1 | grep -q LLD)
+
 config CLANG_VERSION
 	int
 	default $(shell,$(srctree)/scripts/clang-version.sh $(CC))
diff --git a/init/main.c b/init/main.c
index e083fac..020972f 100644
--- a/init/main.c
+++ b/init/main.c
@@ -568,6 +568,8 @@ asmlinkage __visible void __init start_kernel(void)
 	page_alloc_init();
 
 	pr_notice("Kernel command line: %s\n", boot_command_line);
+	/* parameters may set static keys */
+	jump_label_init();
 	parse_early_param();
 	after_dashes = parse_args("Booting kernel",
 				  static_command_line, __start___param,
@@ -577,8 +579,6 @@ asmlinkage __visible void __init start_kernel(void)
 		parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
 			   NULL, set_init_arg);
 
-	jump_label_init();
-
 	/*
 	 * These use large bootmem allocations and must precede
 	 * kmem_cache_init()
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 84d882f..621c296 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -248,3 +248,7 @@
 config QUEUED_RWLOCKS
 	def_bool y if ARCH_USE_QUEUED_RWLOCKS
 	depends on SMP
+
+config RWSEM_PRIO_AWARE
+       def_bool y
+       depends on RWSEM_XCHGADD_ALGORITHM
diff --git a/kernel/acct.c b/kernel/acct.c
index addf773..81f9831 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -227,7 +227,7 @@ static int acct_on(struct filename *pathname)
 		filp_close(file, NULL);
 		return PTR_ERR(internal);
 	}
-	err = mnt_want_write(internal);
+	err = __mnt_want_write(internal);
 	if (err) {
 		mntput(internal);
 		kfree(acct);
@@ -252,7 +252,7 @@ static int acct_on(struct filename *pathname)
 	old = xchg(&ns->bacct, &acct->pin);
 	mutex_unlock(&acct->lock);
 	pin_kill(old);
-	mnt_drop_write(mnt);
+	__mnt_drop_write(mnt);
 	mntput(mnt);
 	return 0;
 }
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index bf309f2..425c67e 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1114,22 +1114,24 @@ int audit_rule_change(int type, int seq, void *data, size_t datasz)
 	int err = 0;
 	struct audit_entry *entry;
 
-	entry = audit_data_to_entry(data, datasz);
-	if (IS_ERR(entry))
-		return PTR_ERR(entry);
-
 	switch (type) {
 	case AUDIT_ADD_RULE:
+		entry = audit_data_to_entry(data, datasz);
+		if (IS_ERR(entry))
+			return PTR_ERR(entry);
 		err = audit_add_rule(entry);
 		audit_log_rule_change("add_rule", &entry->rule, !err);
 		break;
 	case AUDIT_DEL_RULE:
+		entry = audit_data_to_entry(data, datasz);
+		if (IS_ERR(entry))
+			return PTR_ERR(entry);
 		err = audit_del_rule(entry);
 		audit_log_rule_change("remove_rule", &entry->rule, !err);
 		break;
 	default:
-		err = -EINVAL;
 		WARN_ON(1);
+		return -EINVAL;
 	}
 
 	if (err || type == AUDIT_DEL_RULE) {
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 474525e..bad9985b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -366,10 +366,13 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
 }
 
 #ifdef CONFIG_BPF_JIT
+# define BPF_JIT_LIMIT_DEFAULT	(PAGE_SIZE * 40000)
+
 /* All BPF JIT sysctl knobs here. */
 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
 int bpf_jit_harden   __read_mostly;
 int bpf_jit_kallsyms __read_mostly;
+int bpf_jit_limit    __read_mostly = BPF_JIT_LIMIT_DEFAULT;
 
 static __always_inline void
 bpf_get_prog_addr_region(const struct bpf_prog *prog,
@@ -578,27 +581,64 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 	return ret;
 }
 
+static atomic_long_t bpf_jit_current;
+
+#if defined(MODULES_VADDR)
+static int __init bpf_jit_charge_init(void)
+{
+	/* Only used as heuristic here to derive limit. */
+	bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
+					    PAGE_SIZE), INT_MAX);
+	return 0;
+}
+pure_initcall(bpf_jit_charge_init);
+#endif
+
+static int bpf_jit_charge_modmem(u32 pages)
+{
+	if (atomic_long_add_return(pages, &bpf_jit_current) >
+	    (bpf_jit_limit >> PAGE_SHIFT)) {
+		if (!capable(CAP_SYS_ADMIN)) {
+			atomic_long_sub(pages, &bpf_jit_current);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static void bpf_jit_uncharge_modmem(u32 pages)
+{
+	atomic_long_sub(pages, &bpf_jit_current);
+}
+
 struct bpf_binary_header *
 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 		     unsigned int alignment,
 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
 {
 	struct bpf_binary_header *hdr;
-	unsigned int size, hole, start;
+	u32 size, hole, start, pages;
 
 	/* Most of BPF filters are really small, but if some of them
 	 * fill a page, allow at least 128 extra bytes to insert a
 	 * random section of illegal instructions.
 	 */
 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
-	hdr = module_alloc(size);
-	if (hdr == NULL)
+	pages = size / PAGE_SIZE;
+
+	if (bpf_jit_charge_modmem(pages))
 		return NULL;
+	hdr = module_alloc(size);
+	if (!hdr) {
+		bpf_jit_uncharge_modmem(pages);
+		return NULL;
+	}
 
 	/* Fill space with illegal/arch-dep instructions. */
 	bpf_fill_ill_insns(hdr, size);
 
-	hdr->pages = size / PAGE_SIZE;
+	hdr->pages = pages;
 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
 		     PAGE_SIZE - sizeof(*hdr));
 	start = (get_random_int() % hole) & ~(alignment - 1);
@@ -611,7 +651,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 
 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 {
+	u32 pages = hdr->pages;
+
 	module_memfree(hdr);
+	bpf_jit_uncharge_modmem(pages);
 }
 
 /* This symbol is only overridden by archs that have different
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 141710b..2faad03 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -164,6 +164,9 @@ static void dev_map_free(struct bpf_map *map)
 	bpf_clear_redirect_map(map);
 	synchronize_rcu();
 
+	/* Make sure prior __dev_map_entry_free() have completed. */
+	rcu_barrier();
+
 	/* To ensure all pending flush operations have completed wait for flush
 	 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
 	 * Because the above synchronize_rcu() ensures the map is disconnected
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index cebadd6a..6fe7279 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -518,18 +518,30 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 	return insn - insn_buf;
 }
 
-static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
+static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
+							void *key, const bool mark)
 {
 	struct htab_elem *l = __htab_map_lookup_elem(map, key);
 
 	if (l) {
-		bpf_lru_node_set_ref(&l->lru_node);
+		if (mark)
+			bpf_lru_node_set_ref(&l->lru_node);
 		return l->key + round_up(map->key_size, 8);
 	}
 
 	return NULL;
 }
 
+static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
+{
+	return __htab_lru_map_lookup_elem(map, key, true);
+}
+
+static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
+{
+	return __htab_lru_map_lookup_elem(map, key, false);
+}
+
 static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
 				   struct bpf_insn *insn_buf)
 {
@@ -1206,6 +1218,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
 	.map_free = htab_map_free,
 	.map_get_next_key = htab_map_get_next_key,
 	.map_lookup_elem = htab_lru_map_lookup_elem,
+	.map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
 	.map_update_elem = htab_lru_map_update_elem,
 	.map_delete_elem = htab_lru_map_delete_elem,
 	.map_gen_lookup = htab_lru_map_gen_lookup,
@@ -1237,7 +1250,6 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
 
 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
 {
-	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 	struct htab_elem *l;
 	void __percpu *pptr;
 	int ret = -ENOENT;
@@ -1253,8 +1265,9 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
 	l = __htab_map_lookup_elem(map, key);
 	if (!l)
 		goto out;
-	if (htab_is_lru(htab))
-		bpf_lru_node_set_ref(&l->lru_node);
+	/* We do not mark LRU map element here in order to not mess up
+	 * eviction heuristics when user space does a map walk.
+	 */
 	pptr = htab_elem_get_ptr(l, map->key_size);
 	for_each_possible_cpu(cpu) {
 		bpf_long_memcpy(value + off,
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 4a8f390..dc9d7ac 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -518,7 +518,7 @@ int bpf_obj_get_user(const char __user *pathname, int flags)
 static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
 {
 	struct bpf_prog *prog;
-	int ret = inode_permission(inode, MAY_READ | MAY_WRITE);
+	int ret = inode_permission(inode, MAY_READ);
 	if (ret)
 		return ERR_PTR(ret);
 
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index cc40b8b..ede8238 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -721,7 +721,10 @@ static int map_lookup_elem(union bpf_attr *attr)
 		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
 	} else {
 		rcu_read_lock();
-		ptr = map->ops->map_lookup_elem(map, key);
+		if (map->ops->map_lookup_elem_sys_only)
+			ptr = map->ops->map_lookup_elem_sys_only(map, key);
+		else
+			ptr = map->ops->map_lookup_elem(map, key);
 		if (ptr)
 			memcpy(value, ptr, value_size);
 		rcu_read_unlock();
diff --git a/kernel/cfi.c b/kernel/cfi.c
index c32e6b3..9e99922 100644
--- a/kernel/cfi.c
+++ b/kernel/cfi.c
@@ -12,7 +12,6 @@
 #include <linux/spinlock.h>
 #include <asm/bug.h>
 #include <asm/cacheflush.h>
-#include <asm/memory.h>
 #include <asm/set_memory.h>
 
 /* Compiler-defined handler names */
@@ -27,9 +26,9 @@
 static inline void handle_cfi_failure(void *ptr)
 {
 #ifdef CONFIG_CFI_PERMISSIVE
-	WARN_RATELIMIT(1, "CFI failure (target: [<%px>] %pF):\n", ptr, ptr);
+	WARN_RATELIMIT(1, "CFI failure (target: %pF):\n", ptr);
 #else
-	pr_err("CFI failure (target: [<%px>] %pF):\n", ptr, ptr);
+	pr_err("CFI failure (target: %pF):\n", ptr);
 	BUG();
 #endif
 }
@@ -88,6 +87,14 @@ static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s,
 	return (s->r.min_page + s->shadow[index]) << PAGE_SHIFT;
 }
 
+static inline unsigned long shadow_to_page(const struct cfi_shadow *s,
+	int index)
+{
+	BUG_ON(index < 0 || index >= SHADOW_SIZE);
+
+	return (s->r.min_page + index) << PAGE_SHIFT;
+}
+
 static void prepare_next_shadow(const struct cfi_shadow __rcu *prev,
 		struct cfi_shadow *next)
 {
@@ -110,7 +117,7 @@ static void prepare_next_shadow(const struct cfi_shadow __rcu *prev,
 		if (prev->shadow[i] == SHADOW_INVALID)
 			continue;
 
-		index = ptr_to_shadow(next, shadow_to_ptr(prev, i));
+		index = ptr_to_shadow(next, shadow_to_page(prev, i));
 		if (index < 0)
 			continue;
 
@@ -221,7 +228,6 @@ static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s,
 	unsigned long ptr)
 {
 	int index;
-	unsigned long check;
 
 	if (unlikely(!s))
 		return NULL; /* No shadow available */
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index d009f01..9a66ad3 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -4772,9 +4772,11 @@ static void css_release_work_fn(struct work_struct *work)
 		if (cgroup_on_dfl(cgrp))
 			cgroup_rstat_flush(cgrp);
 
+		spin_lock_irq(&css_set_lock);
 		for (tcgrp = cgroup_parent(cgrp); tcgrp;
 		     tcgrp = cgroup_parent(tcgrp))
 			tcgrp->nr_dying_descendants--;
+		spin_unlock_irq(&css_set_lock);
 
 		cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
 		cgrp->id = -1;
@@ -4992,12 +4994,14 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
 	if (ret)
 		goto out_psi_free;
 
+	spin_lock_irq(&css_set_lock);
 	for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
 		cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
 
 		if (tcgrp != cgrp)
 			tcgrp->nr_descendants++;
 	}
+	spin_unlock_irq(&css_set_lock);
 
 	if (notify_on_release(parent))
 		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
@@ -5282,10 +5286,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
 	if (parent && cgroup_is_threaded(cgrp))
 		parent->nr_threaded_children--;
 
+	spin_lock_irq(&css_set_lock);
 	for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) {
 		tcgrp->nr_descendants--;
 		tcgrp->nr_dying_descendants++;
 	}
+	spin_unlock_irq(&css_set_lock);
 
 	cgroup1_check_for_release(parent);
 
diff --git a/kernel/cpu.c b/kernel/cpu.c
index aec6805..eb91c7c 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1251,6 +1251,13 @@ int freeze_secondary_cpus(int primary)
 	for_each_online_cpu(cpu) {
 		if (cpu == primary)
 			continue;
+
+		if (pm_wakeup_pending()) {
+			pr_info("Wakeup pending. Abort CPU freeze\n");
+			error = -EBUSY;
+			break;
+		}
+
 		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
 		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
 		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
@@ -2382,3 +2389,18 @@ void idle_notifier_call_chain(unsigned long val)
 	atomic_notifier_call_chain(&idle_notifier, val, NULL);
 }
 EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
+
+enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
+
+static int __init mitigations_parse_cmdline(char *arg)
+{
+	if (!strcmp(arg, "off"))
+		cpu_mitigations = CPU_MITIGATIONS_OFF;
+	else if (!strcmp(arg, "auto"))
+		cpu_mitigations = CPU_MITIGATIONS_AUTO;
+	else if (!strcmp(arg, "auto,nosmt"))
+		cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
+
+	return 0;
+}
+early_param("mitigations", mitigations_parse_cmdline);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9ad4e30..9e13143 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2044,8 +2044,8 @@ event_sched_out(struct perf_event *event,
 	event->pmu->del(event, 0);
 	event->oncpu = -1;
 
-	if (event->pending_disable) {
-		event->pending_disable = 0;
+	if (READ_ONCE(event->pending_disable) >= 0) {
+		WRITE_ONCE(event->pending_disable, -1);
 		state = PERF_EVENT_STATE_OFF;
 	}
 	perf_event_set_state(event, state);
@@ -2233,7 +2233,8 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
 
 void perf_event_disable_inatomic(struct perf_event *event)
 {
-	event->pending_disable = 1;
+	WRITE_ONCE(event->pending_disable, smp_processor_id());
+	/* can fail, see perf_pending_event_disable() */
 	irq_work_queue(&event->pending);
 }
 
@@ -5976,10 +5977,45 @@ void perf_event_wakeup(struct perf_event *event)
 	}
 }
 
+static void perf_pending_event_disable(struct perf_event *event)
+{
+	int cpu = READ_ONCE(event->pending_disable);
+
+	if (cpu < 0)
+		return;
+
+	if (cpu == smp_processor_id()) {
+		WRITE_ONCE(event->pending_disable, -1);
+		perf_event_disable_local(event);
+		return;
+	}
+
+	/*
+	 *  CPU-A			CPU-B
+	 *
+	 *  perf_event_disable_inatomic()
+	 *    @pending_disable = CPU-A;
+	 *    irq_work_queue();
+	 *
+	 *  sched-out
+	 *    @pending_disable = -1;
+	 *
+	 *				sched-in
+	 *				perf_event_disable_inatomic()
+	 *				  @pending_disable = CPU-B;
+	 *				  irq_work_queue(); // FAILS
+	 *
+	 *  irq_work_run()
+	 *    perf_pending_event()
+	 *
+	 * But the event runs on CPU-B and wants disabling there.
+	 */
+	irq_work_queue_on(&event->pending, cpu);
+}
+
 static void perf_pending_event(struct irq_work *entry)
 {
-	struct perf_event *event = container_of(entry,
-			struct perf_event, pending);
+	struct perf_event *event = container_of(entry, struct perf_event, pending);
 	int rctx;
 
 	rctx = perf_swevent_get_recursion_context();
@@ -5988,10 +6024,7 @@ static void perf_pending_event(struct irq_work *entry)
 	 * and we won't recurse 'further'.
 	 */
 
-	if (event->pending_disable) {
-		event->pending_disable = 0;
-		perf_event_disable_local(event);
-	}
+	perf_pending_event_disable(event);
 
 	if (event->pending_wakeup) {
 		event->pending_wakeup = 0;
@@ -10264,6 +10297,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
 
 	init_waitqueue_head(&event->waitq);
+	event->pending_disable = -1;
 	init_irq_work(&event->pending, perf_pending_event);
 
 	mutex_init(&event->mmap_mutex);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 474b2cc..99c7f19 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -393,7 +393,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
 		 * store that will be enabled on successful return
 		 */
 		if (!handle->size) { /* A, matches D */
-			event->pending_disable = 1;
+			event->pending_disable = smp_processor_id();
 			perf_output_wakeup(handle);
 			local_set(&rb->aux_nest, 0);
 			goto err_put;
@@ -471,7 +471,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
 
 	if (wakeup) {
 		if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
-			handle->event->pending_disable = 1;
+			handle->event->pending_disable = smp_processor_id();
 		perf_output_wakeup(handle);
 	}
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 0a471239..e7a644e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -627,7 +627,13 @@ static void check_mm(struct mm_struct *mm)
 	int i;
 
 	for (i = 0; i < NR_MM_COUNTERS; i++) {
-		long x = atomic_long_read(&mm->rss_stat.count[i]);
+		long x;
+
+		/* MM_UNRECLAIMABLE could be freed later in exit_files */
+		if (i == MM_UNRECLAIMABLE)
+			continue;
+
+		x = atomic_long_read(&mm->rss_stat.count[i]);
 
 		if (unlikely(x))
 			printk(KERN_ALERT "BUG: Bad rss-counter state "
@@ -936,6 +942,15 @@ static void mm_init_aio(struct mm_struct *mm)
 #endif
 }
 
+static __always_inline void mm_clear_owner(struct mm_struct *mm,
+					   struct task_struct *p)
+{
+#ifdef CONFIG_MEMCG
+	if (mm->owner == p)
+		WRITE_ONCE(mm->owner, NULL);
+#endif
+}
+
 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
 {
 #ifdef CONFIG_MEMCG
@@ -1318,6 +1333,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
 free_pt:
 	/* don't put binfmt in mmput, we haven't got module yet */
 	mm->binfmt = NULL;
+	mm_init_owner(mm, NULL);
 	mmput(mm);
 
 fail_nomem:
@@ -1649,6 +1665,21 @@ static inline void rcu_copy_process(struct task_struct *p)
 #endif /* #ifdef CONFIG_TASKS_RCU */
 }
 
+static void __delayed_free_task(struct rcu_head *rhp)
+{
+	struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
+
+	free_task(tsk);
+}
+
+static __always_inline void delayed_free_task(struct task_struct *tsk)
+{
+	if (IS_ENABLED(CONFIG_MEMCG))
+		call_rcu(&tsk->rcu, __delayed_free_task);
+	else
+		free_task(tsk);
+}
+
 /*
  * This creates a new process as a copy of the old one,
  * but does not actually start it yet.
@@ -2110,8 +2141,10 @@ static __latent_entropy struct task_struct *copy_process(
 bad_fork_cleanup_namespaces:
 	exit_task_namespaces(p);
 bad_fork_cleanup_mm:
-	if (p->mm)
+	if (p->mm) {
+		mm_clear_owner(p->mm, p);
 		mmput(p->mm);
+	}
 bad_fork_cleanup_signal:
 	if (!(clone_flags & CLONE_THREAD))
 		free_signal_struct(p->signal);
@@ -2143,7 +2176,7 @@ static __latent_entropy struct task_struct *copy_process(
 bad_fork_free:
 	p->state = TASK_DEAD;
 	put_task_stack(p);
-	free_task(p);
+	delayed_free_task(p);
 fork_out:
 	spin_lock_irq(&current->sighand->siglock);
 	hlist_del_init(&delayed.node);
diff --git a/kernel/futex.c b/kernel/futex.c
index 5a26d84..afdc5ea 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1306,13 +1306,15 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
 
 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
 {
+	int err;
 	u32 uninitialized_var(curval);
 
 	if (unlikely(should_fail_futex(true)))
 		return -EFAULT;
 
-	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
-		return -EFAULT;
+	err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
+	if (unlikely(err))
+		return err;
 
 	/* If user space value changed, let the caller retry */
 	return curval != uval ? -EAGAIN : 0;
@@ -1498,10 +1500,8 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
 	if (unlikely(should_fail_futex(true)))
 		ret = -EFAULT;
 
-	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
-		ret = -EFAULT;
-
-	} else if (curval != uval) {
+	ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
+	if (!ret && (curval != uval)) {
 		/*
 		 * If a unconditional UNLOCK_PI operation (user space did not
 		 * try the TID->0 transition) raced with a waiter setting the
@@ -1696,32 +1696,32 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
 	double_lock_hb(hb1, hb2);
 	op_ret = futex_atomic_op_inuser(op, uaddr2);
 	if (unlikely(op_ret < 0)) {
-
 		double_unlock_hb(hb1, hb2);
 
-#ifndef CONFIG_MMU
-		/*
-		 * we don't get EFAULT from MMU faults if we don't have an MMU,
-		 * but we might get them from range checking
-		 */
-		ret = op_ret;
-		goto out_put_keys;
-#endif
-
-		if (unlikely(op_ret != -EFAULT)) {
+		if (!IS_ENABLED(CONFIG_MMU) ||
+		    unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
+			/*
+			 * we don't get EFAULT from MMU faults if we don't have
+			 * an MMU, but we might get them from range checking
+			 */
 			ret = op_ret;
 			goto out_put_keys;
 		}
 
-		ret = fault_in_user_writeable(uaddr2);
-		if (ret)
-			goto out_put_keys;
+		if (op_ret == -EFAULT) {
+			ret = fault_in_user_writeable(uaddr2);
+			if (ret)
+				goto out_put_keys;
+		}
 
-		if (!(flags & FLAGS_SHARED))
+		if (!(flags & FLAGS_SHARED)) {
+			cond_resched();
 			goto retry_private;
+		}
 
 		put_futex_key(&key2);
 		put_futex_key(&key1);
+		cond_resched();
 		goto retry;
 	}
 
@@ -2346,7 +2346,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
 	u32 uval, uninitialized_var(curval), newval;
 	struct task_struct *oldowner, *newowner;
 	u32 newtid;
-	int ret;
+	int ret, err = 0;
 
 	lockdep_assert_held(q->lock_ptr);
 
@@ -2417,14 +2417,17 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
 	if (!pi_state->owner)
 		newtid |= FUTEX_OWNER_DIED;
 
-	if (get_futex_value_locked(&uval, uaddr))
-		goto handle_fault;
+	err = get_futex_value_locked(&uval, uaddr);
+	if (err)
+		goto handle_err;
 
 	for (;;) {
 		newval = (uval & FUTEX_OWNER_DIED) | newtid;
 
-		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
-			goto handle_fault;
+		err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
+		if (err)
+			goto handle_err;
+
 		if (curval == uval)
 			break;
 		uval = curval;
@@ -2452,23 +2455,37 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
 	return 0;
 
 	/*
-	 * To handle the page fault we need to drop the locks here. That gives
-	 * the other task (either the highest priority waiter itself or the
-	 * task which stole the rtmutex) the chance to try the fixup of the
-	 * pi_state. So once we are back from handling the fault we need to
-	 * check the pi_state after reacquiring the locks and before trying to
-	 * do another fixup. When the fixup has been done already we simply
-	 * return.
+	 * In order to reschedule or handle a page fault, we need to drop the
+	 * locks here. In the case of a fault, this gives the other task
+	 * (either the highest priority waiter itself or the task which stole
+	 * the rtmutex) the chance to try the fixup of the pi_state. So once we
+	 * are back from handling the fault we need to check the pi_state after
+	 * reacquiring the locks and before trying to do another fixup. When
+	 * the fixup has been done already we simply return.
 	 *
 	 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
 	 * drop hb->lock since the caller owns the hb -> futex_q relation.
 	 * Dropping the pi_mutex->wait_lock requires the state revalidate.
 	 */
-handle_fault:
+handle_err:
 	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
 	spin_unlock(q->lock_ptr);
 
-	ret = fault_in_user_writeable(uaddr);
+	switch (err) {
+	case -EFAULT:
+		ret = fault_in_user_writeable(uaddr);
+		break;
+
+	case -EAGAIN:
+		cond_resched();
+		ret = 0;
+		break;
+
+	default:
+		WARN_ON_ONCE(1);
+		ret = err;
+		break;
+	}
 
 	spin_lock(q->lock_ptr);
 	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
@@ -3037,10 +3054,8 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
 		 * A unconditional UNLOCK_PI op raced against a waiter
 		 * setting the FUTEX_WAITERS bit. Try again.
 		 */
-		if (ret == -EAGAIN) {
-			put_futex_key(&key);
-			goto retry;
-		}
+		if (ret == -EAGAIN)
+			goto pi_retry;
 		/*
 		 * wake_futex_pi has detected invalid state. Tell user
 		 * space.
@@ -3055,9 +3070,19 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
 	 * preserve the WAITERS bit not the OWNER_DIED one. We are the
 	 * owner.
 	 */
-	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
+	if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
 		spin_unlock(&hb->lock);
-		goto pi_faulted;
+		switch (ret) {
+		case -EFAULT:
+			goto pi_faulted;
+
+		case -EAGAIN:
+			goto pi_retry;
+
+		default:
+			WARN_ON_ONCE(1);
+			goto out_putkey;
+		}
 	}
 
 	/*
@@ -3071,6 +3096,11 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
 	put_futex_key(&key);
 	return ret;
 
+pi_retry:
+	put_futex_key(&key);
+	cond_resched();
+	goto retry;
+
 pi_faulted:
 	put_futex_key(&key);
 
@@ -3431,6 +3461,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
 {
 	u32 uval, uninitialized_var(nval), mval;
+	int err;
 
 	/* Futex address must be 32bit aligned */
 	if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
@@ -3440,42 +3471,57 @@ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
 	if (get_user(uval, uaddr))
 		return -1;
 
-	if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
-		/*
-		 * Ok, this dying thread is truly holding a futex
-		 * of interest. Set the OWNER_DIED bit atomically
-		 * via cmpxchg, and if the value had FUTEX_WAITERS
-		 * set, wake up a waiter (if any). (We have to do a
-		 * futex_wake() even if OWNER_DIED is already set -
-		 * to handle the rare but possible case of recursive
-		 * thread-death.) The rest of the cleanup is done in
-		 * userspace.
-		 */
-		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
-		/*
-		 * We are not holding a lock here, but we want to have
-		 * the pagefault_disable/enable() protection because
-		 * we want to handle the fault gracefully. If the
-		 * access fails we try to fault in the futex with R/W
-		 * verification via get_user_pages. get_user() above
-		 * does not guarantee R/W access. If that fails we
-		 * give up and leave the futex locked.
-		 */
-		if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
+	if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
+		return 0;
+
+	/*
+	 * Ok, this dying thread is truly holding a futex
+	 * of interest. Set the OWNER_DIED bit atomically
+	 * via cmpxchg, and if the value had FUTEX_WAITERS
+	 * set, wake up a waiter (if any). (We have to do a
+	 * futex_wake() even if OWNER_DIED is already set -
+	 * to handle the rare but possible case of recursive
+	 * thread-death.) The rest of the cleanup is done in
+	 * userspace.
+	 */
+	mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
+
+	/*
+	 * We are not holding a lock here, but we want to have
+	 * the pagefault_disable/enable() protection because
+	 * we want to handle the fault gracefully. If the
+	 * access fails we try to fault in the futex with R/W
+	 * verification via get_user_pages. get_user() above
+	 * does not guarantee R/W access. If that fails we
+	 * give up and leave the futex locked.
+	 */
+	if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
+		switch (err) {
+		case -EFAULT:
 			if (fault_in_user_writeable(uaddr))
 				return -1;
 			goto retry;
-		}
-		if (nval != uval)
+
+		case -EAGAIN:
+			cond_resched();
 			goto retry;
 
-		/*
-		 * Wake robust non-PI futexes here. The wakeup of
-		 * PI futexes happens in exit_pi_state():
-		 */
-		if (!pi && (uval & FUTEX_WAITERS))
-			futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
+		default:
+			WARN_ON_ONCE(1);
+			return err;
+		}
 	}
+
+	if (nval != uval)
+		goto retry;
+
+	/*
+	 * Wake robust non-PI futexes here. The wakeup of
+	 * PI futexes happens in exit_pi_state():
+	 */
+	if (!pi && (uval & FUTEX_WAITERS))
+		futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
+
 	return 0;
 }
 
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 1e3823f..f71c1ad 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -53,6 +53,7 @@
 choice
 	prompt "Specify GCOV format"
 	depends on GCOV_KERNEL
+	depends on CC_IS_GCC
 	---help---
 	The gcov format is usually determined by the GCC version, and the
 	default is chosen according to your GCC version. However, there are
@@ -62,7 +63,7 @@
 
 config GCOV_FORMAT_3_4
 	bool "GCC 3.4 format"
-	depends on CC_IS_GCC && GCC_VERSION < 40700
+	depends on GCC_VERSION < 40700
 	---help---
 	Select this option to use the format defined by GCC 3.4.
 
diff --git a/kernel/gcov/Makefile b/kernel/gcov/Makefile
index ff06d64..d66a74b 100644
--- a/kernel/gcov/Makefile
+++ b/kernel/gcov/Makefile
@@ -2,5 +2,6 @@
 ccflags-y := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"'
 
 obj-y := base.o fs.o
-obj-$(CONFIG_GCOV_FORMAT_3_4) += gcc_3_4.o
-obj-$(CONFIG_GCOV_FORMAT_4_7) += gcc_4_7.o
+obj-$(CONFIG_GCOV_FORMAT_3_4) += gcc_base.o gcc_3_4.o
+obj-$(CONFIG_GCOV_FORMAT_4_7) += gcc_base.o gcc_4_7.o
+obj-$(CONFIG_CC_IS_CLANG) += clang.o
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
index 9c7c8d5..0ffe9f1 100644
--- a/kernel/gcov/base.c
+++ b/kernel/gcov/base.c
@@ -22,88 +22,8 @@
 #include <linux/sched.h>
 #include "gcov.h"
 
-static int gcov_events_enabled;
-static DEFINE_MUTEX(gcov_lock);
-
-/*
- * __gcov_init is called by gcc-generated constructor code for each object
- * file compiled with -fprofile-arcs.
- */
-void __gcov_init(struct gcov_info *info)
-{
-	static unsigned int gcov_version;
-
-	mutex_lock(&gcov_lock);
-	if (gcov_version == 0) {
-		gcov_version = gcov_info_version(info);
-		/*
-		 * Printing gcc's version magic may prove useful for debugging
-		 * incompatibility reports.
-		 */
-		pr_info("version magic: 0x%x\n", gcov_version);
-	}
-	/*
-	 * Add new profiling data structure to list and inform event
-	 * listener.
-	 */
-	gcov_info_link(info);
-	if (gcov_events_enabled)
-		gcov_event(GCOV_ADD, info);
-	mutex_unlock(&gcov_lock);
-}
-EXPORT_SYMBOL(__gcov_init);
-
-/*
- * These functions may be referenced by gcc-generated profiling code but serve
- * no function for kernel profiling.
- */
-void __gcov_flush(void)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_flush);
-
-void __gcov_merge_add(gcov_type *counters, unsigned int n_counters)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_merge_add);
-
-void __gcov_merge_single(gcov_type *counters, unsigned int n_counters)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_merge_single);
-
-void __gcov_merge_delta(gcov_type *counters, unsigned int n_counters)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_merge_delta);
-
-void __gcov_merge_ior(gcov_type *counters, unsigned int n_counters)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_merge_ior);
-
-void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_merge_time_profile);
-
-void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_merge_icall_topn);
-
-void __gcov_exit(void)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_exit);
+int gcov_events_enabled;
+DEFINE_MUTEX(gcov_lock);
 
 /**
  * gcov_enable_events - enable event reporting through gcov_event()
@@ -144,7 +64,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
 
 	/* Remove entries located in module from linked list. */
 	while ((info = gcov_info_next(info))) {
-		if (within_module((unsigned long)info, mod)) {
+		if (gcov_info_within_module(info, mod)) {
 			gcov_info_unlink(prev, info);
 			if (gcov_events_enabled)
 				gcov_event(GCOV_REMOVE, info);
diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c
new file mode 100644
index 0000000..c94b820
--- /dev/null
+++ b/kernel/gcov/clang.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Google, Inc.
+ * modified from kernel/gcov/gcc_4_7.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * LLVM uses profiling data that's deliberately similar to GCC, but has a
+ * very different way of exporting that data.  LLVM calls llvm_gcov_init() once
+ * per module, and provides a couple of callbacks that we can use to ask for
+ * more data.
+ *
+ * We care about the "writeout" callback, which in turn calls back into
+ * compiler-rt/this module to dump all the gathered coverage data to disk:
+ *
+ *    llvm_gcda_start_file()
+ *      llvm_gcda_emit_function()
+ *      llvm_gcda_emit_arcs()
+ *      llvm_gcda_emit_function()
+ *      llvm_gcda_emit_arcs()
+ *      [... repeats for each function ...]
+ *    llvm_gcda_summary_info()
+ *    llvm_gcda_end_file()
+ *
+ * This design is much more stateless and unstructured than gcc's, and is
+ * intended to run at process exit.  This forces us to keep some local state
+ * about which module we're dealing with at the moment.  On the other hand, it
+ * also means we don't depend as much on how LLVM represents profiling data
+ * internally.
+ *
+ * See LLVM's lib/Transforms/Instrumentation/GCOVProfiling.cpp for more
+ * details on how this works, particularly GCOVProfiler::emitProfileArcs(),
+ * GCOVProfiler::insertCounterWriteout(), and
+ * GCOVProfiler::insertFlush().
+ */
+
+#define pr_fmt(fmt)	"gcov: " fmt
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "gcov.h"
+
+typedef void (*llvm_gcov_callback)(void);
+
+struct gcov_info {
+	struct list_head head;
+
+	const char *filename;
+	unsigned int version;
+	u32 checksum;
+
+	struct list_head functions;
+};
+
+struct gcov_fn_info {
+	struct list_head head;
+
+	u32 ident;
+	u32 checksum;
+	u8 use_extra_checksum;
+	u32 cfg_checksum;
+
+	u32 num_counters;
+	u64 *counters;
+	const char *function_name;
+};
+
+static struct gcov_info *current_info;
+
+static LIST_HEAD(clang_gcov_list);
+
+void llvm_gcov_init(llvm_gcov_callback writeout, llvm_gcov_callback flush)
+{
+	struct gcov_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+	if (!info)
+		return;
+
+	INIT_LIST_HEAD(&info->head);
+	INIT_LIST_HEAD(&info->functions);
+
+	mutex_lock(&gcov_lock);
+
+	list_add_tail(&info->head, &clang_gcov_list);
+	current_info = info;
+	writeout();
+	current_info = NULL;
+	if (gcov_events_enabled)
+		gcov_event(GCOV_ADD, info);
+
+	mutex_unlock(&gcov_lock);
+}
+EXPORT_SYMBOL(llvm_gcov_init);
+
+void llvm_gcda_start_file(const char *orig_filename, const char version[4],
+		u32 checksum)
+{
+	current_info->filename = orig_filename;
+	memcpy(&current_info->version, version, sizeof(current_info->version));
+	current_info->checksum = checksum;
+}
+EXPORT_SYMBOL(llvm_gcda_start_file);
+
+void llvm_gcda_emit_function(u32 ident, const char *function_name,
+		u32 func_checksum, u8 use_extra_checksum, u32 cfg_checksum)
+{
+	struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+	if (!info)
+		return;
+
+	INIT_LIST_HEAD(&info->head);
+	info->ident = ident;
+	info->checksum = func_checksum;
+	info->use_extra_checksum = use_extra_checksum;
+	info->cfg_checksum = cfg_checksum;
+	if (function_name)
+		info->function_name = kstrdup(function_name, GFP_KERNEL);
+
+	list_add_tail(&info->head, &current_info->functions);
+}
+EXPORT_SYMBOL(llvm_gcda_emit_function);
+
+void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters)
+{
+	struct gcov_fn_info *info = list_last_entry(&current_info->functions,
+			struct gcov_fn_info, head);
+
+	info->num_counters = num_counters;
+	info->counters = counters;
+}
+EXPORT_SYMBOL(llvm_gcda_emit_arcs);
+
+void llvm_gcda_summary_info(void)
+{
+}
+EXPORT_SYMBOL(llvm_gcda_summary_info);
+
+void llvm_gcda_end_file(void)
+{
+}
+EXPORT_SYMBOL(llvm_gcda_end_file);
+
+/**
+ * gcov_info_filename - return info filename
+ * @info: profiling data set
+ */
+const char *gcov_info_filename(struct gcov_info *info)
+{
+	return info->filename;
+}
+
+/**
+ * gcov_info_version - return info version
+ * @info: profiling data set
+ */
+unsigned int gcov_info_version(struct gcov_info *info)
+{
+	return info->version;
+}
+
+/**
+ * gcov_info_next - return next profiling data set
+ * @info: profiling data set
+ *
+ * Returns next gcov_info following @info or first gcov_info in the chain if
+ * @info is %NULL.
+ */
+struct gcov_info *gcov_info_next(struct gcov_info *info)
+{
+	if (!info)
+		return list_first_entry_or_null(&clang_gcov_list,
+				struct gcov_info, head);
+	if (list_is_last(&info->head, &clang_gcov_list))
+		return NULL;
+	return list_next_entry(info, head);
+}
+
+/**
+ * gcov_info_link - link/add profiling data set to the list
+ * @info: profiling data set
+ */
+void gcov_info_link(struct gcov_info *info)
+{
+	list_add_tail(&info->head, &clang_gcov_list);
+}
+
+/**
+ * gcov_info_unlink - unlink/remove profiling data set from the list
+ * @prev: previous profiling data set
+ * @info: profiling data set
+ */
+void gcov_info_unlink(struct gcov_info *prev, struct gcov_info *info)
+{
+	/* Generic code unlinks while iterating. */
+	__list_del_entry(&info->head);
+}
+
+/**
+ * gcov_info_within_module - check if a profiling data set belongs to a module
+ * @info: profiling data set
+ * @mod: module
+ *
+ * Returns true if profiling data belongs module, false otherwise.
+ */
+bool gcov_info_within_module(struct gcov_info *info, struct module *mod)
+{
+	return within_module((unsigned long)info->filename, mod);
+}
+
+/* Symbolic links to be created for each profiling data file. */
+const struct gcov_link gcov_link[] = {
+	{ OBJ_TREE, "gcno" },	/* Link to .gcno file in $(objtree). */
+	{ 0, NULL},
+};
+
+/**
+ * gcov_info_reset - reset profiling data to zero
+ * @info: profiling data set
+ */
+void gcov_info_reset(struct gcov_info *info)
+{
+	struct gcov_fn_info *fn;
+
+	list_for_each_entry(fn, &info->functions, head)
+		memset(fn->counters, 0,
+				sizeof(fn->counters[0]) * fn->num_counters);
+}
+
+/**
+ * gcov_info_is_compatible - check if profiling data can be added
+ * @info1: first profiling data set
+ * @info2: second profiling data set
+ *
+ * Returns non-zero if profiling data can be added, zero otherwise.
+ */
+int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2)
+{
+	struct gcov_fn_info *fn_ptr1 = list_first_entry_or_null(
+			&info1->functions, struct gcov_fn_info, head);
+	struct gcov_fn_info *fn_ptr2 = list_first_entry_or_null(
+			&info2->functions, struct gcov_fn_info, head);
+
+	if (info1->checksum != info2->checksum)
+		return false;
+	if (!fn_ptr1)
+		return fn_ptr1 == fn_ptr2;
+	while (!list_is_last(&fn_ptr1->head, &info1->functions) &&
+		!list_is_last(&fn_ptr2->head, &info2->functions)) {
+		if (fn_ptr1->checksum != fn_ptr2->checksum)
+			return false;
+		if (fn_ptr1->use_extra_checksum != fn_ptr2->use_extra_checksum)
+			return false;
+		if (fn_ptr1->use_extra_checksum &&
+			fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum)
+			return false;
+		fn_ptr1 = list_next_entry(fn_ptr1, head);
+		fn_ptr2 = list_next_entry(fn_ptr2, head);
+	}
+	return list_is_last(&fn_ptr1->head, &info1->functions) &&
+		list_is_last(&fn_ptr2->head, &info2->functions);
+}
+
+/**
+ * gcov_info_add - add up profiling data
+ * @dest: profiling data set to which data is added
+ * @source: profiling data set which is added
+ *
+ * Adds profiling counts of @source to @dest.
+ */
+void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
+{
+	struct gcov_fn_info *dfn_ptr;
+	struct gcov_fn_info *sfn_ptr = list_first_entry_or_null(&src->functions,
+			struct gcov_fn_info, head);
+
+	list_for_each_entry(dfn_ptr, &dst->functions, head) {
+		u32 i;
+
+		for (i = 0; i < sfn_ptr->num_counters; i++)
+			dfn_ptr->counters[i] += sfn_ptr->counters[i];
+	}
+}
+
+static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
+{
+	size_t cv_size; /* counter values size */
+	struct gcov_fn_info *fn_dup = kmemdup(fn, sizeof(*fn),
+			GFP_KERNEL);
+	if (!fn_dup)
+		return NULL;
+	INIT_LIST_HEAD(&fn_dup->head);
+
+	fn_dup->function_name = kstrdup(fn->function_name, GFP_KERNEL);
+	if (!fn_dup->function_name)
+		goto err_name;
+
+	cv_size = fn->num_counters * sizeof(fn->counters[0]);
+	fn_dup->counters = vmalloc(cv_size);
+	if (!fn_dup->counters)
+		goto err_counters;
+	memcpy(fn_dup->counters, fn->counters, cv_size);
+
+	return fn_dup;
+
+err_counters:
+	kfree(fn_dup->function_name);
+err_name:
+	kfree(fn_dup);
+	return NULL;
+}
+
+/**
+ * gcov_info_dup - duplicate profiling data set
+ * @info: profiling data set to duplicate
+ *
+ * Return newly allocated duplicate on success, %NULL on error.
+ */
+struct gcov_info *gcov_info_dup(struct gcov_info *info)
+{
+	struct gcov_info *dup;
+	struct gcov_fn_info *fn;
+
+	dup = kmemdup(info, sizeof(*dup), GFP_KERNEL);
+	if (!dup)
+		return NULL;
+	INIT_LIST_HEAD(&dup->head);
+	INIT_LIST_HEAD(&dup->functions);
+	dup->filename = kstrdup(info->filename, GFP_KERNEL);
+	if (!dup->filename)
+		goto err;
+
+	list_for_each_entry(fn, &info->functions, head) {
+		struct gcov_fn_info *fn_dup = gcov_fn_info_dup(fn);
+
+		if (!fn_dup)
+			goto err;
+		list_add_tail(&fn_dup->head, &dup->functions);
+	}
+
+	return dup;
+
+err:
+	gcov_info_free(dup);
+	return NULL;
+}
+
+/**
+ * gcov_info_free - release memory for profiling data set duplicate
+ * @info: profiling data set duplicate to free
+ */
+void gcov_info_free(struct gcov_info *info)
+{
+	struct gcov_fn_info *fn, *tmp;
+
+	list_for_each_entry_safe(fn, tmp, &info->functions, head) {
+		kfree(fn->function_name);
+		vfree(fn->counters);
+		list_del(&fn->head);
+		kfree(fn);
+	}
+	kfree(info->filename);
+	kfree(info);
+}
+
+#define ITER_STRIDE	PAGE_SIZE
+
+/**
+ * struct gcov_iterator - specifies current file position in logical records
+ * @info: associated profiling data
+ * @buffer: buffer containing file data
+ * @size: size of buffer
+ * @pos: current position in file
+ */
+struct gcov_iterator {
+	struct gcov_info *info;
+	void *buffer;
+	size_t size;
+	loff_t pos;
+};
+
+/**
+ * store_gcov_u32 - store 32 bit number in gcov format to buffer
+ * @buffer: target buffer or NULL
+ * @off: offset into the buffer
+ * @v: value to be stored
+ *
+ * Number format defined by gcc: numbers are recorded in the 32 bit
+ * unsigned binary form of the endianness of the machine generating the
+ * file. Returns the number of bytes stored. If @buffer is %NULL, doesn't
+ * store anything.
+ */
+static size_t store_gcov_u32(void *buffer, size_t off, u32 v)
+{
+	u32 *data;
+
+	if (buffer) {
+		data = buffer + off;
+		*data = v;
+	}
+
+	return sizeof(*data);
+}
+
+/**
+ * store_gcov_u64 - store 64 bit number in gcov format to buffer
+ * @buffer: target buffer or NULL
+ * @off: offset into the buffer
+ * @v: value to be stored
+ *
+ * Number format defined by gcc: numbers are recorded in the 32 bit
+ * unsigned binary form of the endianness of the machine generating the
+ * file. 64 bit numbers are stored as two 32 bit numbers, the low part
+ * first. Returns the number of bytes stored. If @buffer is %NULL, doesn't store
+ * anything.
+ */
+static size_t store_gcov_u64(void *buffer, size_t off, u64 v)
+{
+	u32 *data;
+
+	if (buffer) {
+		data = buffer + off;
+
+		data[0] = (v & 0xffffffffUL);
+		data[1] = (v >> 32);
+	}
+
+	return sizeof(*data) * 2;
+}
+
+/**
+ * convert_to_gcda - convert profiling data set to gcda file format
+ * @buffer: the buffer to store file data or %NULL if no data should be stored
+ * @info: profiling data set to be converted
+ *
+ * Returns the number of bytes that were/would have been stored into the buffer.
+ */
+static size_t convert_to_gcda(char *buffer, struct gcov_info *info)
+{
+	struct gcov_fn_info *fi_ptr;
+	size_t pos = 0;
+
+	/* File header. */
+	pos += store_gcov_u32(buffer, pos, GCOV_DATA_MAGIC);
+	pos += store_gcov_u32(buffer, pos, info->version);
+	pos += store_gcov_u32(buffer, pos, info->checksum);
+
+	list_for_each_entry(fi_ptr, &info->functions, head) {
+		u32 i;
+		u32 len = 2;
+
+		if (fi_ptr->use_extra_checksum)
+			len++;
+
+		pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION);
+		pos += store_gcov_u32(buffer, pos, len);
+		pos += store_gcov_u32(buffer, pos, fi_ptr->ident);
+		pos += store_gcov_u32(buffer, pos, fi_ptr->checksum);
+		if (fi_ptr->use_extra_checksum)
+			pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
+
+		pos += store_gcov_u32(buffer, pos, GCOV_TAG_COUNTER_BASE);
+		pos += store_gcov_u32(buffer, pos, fi_ptr->num_counters * 2);
+		for (i = 0; i < fi_ptr->num_counters; i++)
+			pos += store_gcov_u64(buffer, pos, fi_ptr->counters[i]);
+	}
+
+	return pos;
+}
+
+/**
+ * gcov_iter_new - allocate and initialize profiling data iterator
+ * @info: profiling data set to be iterated
+ *
+ * Return file iterator on success, %NULL otherwise.
+ */
+struct gcov_iterator *gcov_iter_new(struct gcov_info *info)
+{
+	struct gcov_iterator *iter;
+
+	iter = kzalloc(sizeof(struct gcov_iterator), GFP_KERNEL);
+	if (!iter)
+		goto err_free;
+
+	iter->info = info;
+	/* Dry-run to get the actual buffer size. */
+	iter->size = convert_to_gcda(NULL, info);
+	iter->buffer = vmalloc(iter->size);
+	if (!iter->buffer)
+		goto err_free;
+
+	convert_to_gcda(iter->buffer, info);
+
+	return iter;
+
+err_free:
+	kfree(iter);
+	return NULL;
+}
+
+
+/**
+ * gcov_iter_get_info - return profiling data set for given file iterator
+ * @iter: file iterator
+ */
+void gcov_iter_free(struct gcov_iterator *iter)
+{
+	vfree(iter->buffer);
+	kfree(iter);
+}
+
+/**
+ * gcov_iter_get_info - return profiling data set for given file iterator
+ * @iter: file iterator
+ */
+struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter)
+{
+	return iter->info;
+}
+
+/**
+ * gcov_iter_start - reset file iterator to starting position
+ * @iter: file iterator
+ */
+void gcov_iter_start(struct gcov_iterator *iter)
+{
+	iter->pos = 0;
+}
+
+/**
+ * gcov_iter_next - advance file iterator to next logical record
+ * @iter: file iterator
+ *
+ * Return zero if new position is valid, non-zero if iterator has reached end.
+ */
+int gcov_iter_next(struct gcov_iterator *iter)
+{
+	if (iter->pos < iter->size)
+		iter->pos += ITER_STRIDE;
+
+	if (iter->pos >= iter->size)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * gcov_iter_write - write data for current pos to seq_file
+ * @iter: file iterator
+ * @seq: seq_file handle
+ *
+ * Return zero on success, non-zero otherwise.
+ */
+int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq)
+{
+	size_t len;
+
+	if (iter->pos >= iter->size)
+		return -EINVAL;
+
+	len = ITER_STRIDE;
+	if (iter->pos + len > iter->size)
+		len = iter->size - iter->pos;
+
+	seq_write(seq, iter->buffer + iter->pos, len);
+
+	return 0;
+}
diff --git a/kernel/gcov/gcc_3_4.c b/kernel/gcov/gcc_3_4.c
index 1e32e66..64d2dd9 100644
--- a/kernel/gcov/gcc_3_4.c
+++ b/kernel/gcov/gcc_3_4.c
@@ -137,6 +137,18 @@ void gcov_info_unlink(struct gcov_info *prev, struct gcov_info *info)
 		gcov_info_head = info->next;
 }
 
+/**
+ * gcov_info_within_module - check if a profiling data set belongs to a module
+ * @info: profiling data set
+ * @mod: module
+ *
+ * Returns true if profiling data belongs module, false otherwise.
+ */
+bool gcov_info_within_module(struct gcov_info *info, struct module *mod)
+{
+	return within_module((unsigned long)info, mod);
+}
+
 /* Symbolic links to be created for each profiling data file. */
 const struct gcov_link gcov_link[] = {
 	{ OBJ_TREE, "gcno" },	/* Link to .gcno file in $(objtree). */
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index ca5e5c0..ec37563 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -150,6 +150,18 @@ void gcov_info_unlink(struct gcov_info *prev, struct gcov_info *info)
 		gcov_info_head = info->next;
 }
 
+/**
+ * gcov_info_within_module - check if a profiling data set belongs to a module
+ * @info: profiling data set
+ * @mod: module
+ *
+ * Returns true if profiling data belongs module, false otherwise.
+ */
+bool gcov_info_within_module(struct gcov_info *info, struct module *mod)
+{
+	return within_module((unsigned long)info, mod);
+}
+
 /* Symbolic links to be created for each profiling data file. */
 const struct gcov_link gcov_link[] = {
 	{ OBJ_TREE, "gcno" },	/* Link to .gcno file in $(objtree). */
diff --git a/kernel/gcov/gcc_base.c b/kernel/gcov/gcc_base.c
new file mode 100644
index 0000000..3cf736b
--- /dev/null
+++ b/kernel/gcov/gcc_base.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include "gcov.h"
+
+/*
+ * __gcov_init is called by gcc-generated constructor code for each object
+ * file compiled with -fprofile-arcs.
+ */
+void __gcov_init(struct gcov_info *info)
+{
+	static unsigned int gcov_version;
+
+	mutex_lock(&gcov_lock);
+	if (gcov_version == 0) {
+		gcov_version = gcov_info_version(info);
+		/*
+		 * Printing gcc's version magic may prove useful for debugging
+		 * incompatibility reports.
+		 */
+		pr_info("version magic: 0x%x\n", gcov_version);
+	}
+	/*
+	 * Add new profiling data structure to list and inform event
+	 * listener.
+	 */
+	gcov_info_link(info);
+	if (gcov_events_enabled)
+		gcov_event(GCOV_ADD, info);
+	mutex_unlock(&gcov_lock);
+}
+EXPORT_SYMBOL(__gcov_init);
+
+/*
+ * These functions may be referenced by gcc-generated profiling code but serve
+ * no function for kernel profiling.
+ */
+void __gcov_flush(void)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_flush);
+
+void __gcov_merge_add(gcov_type *counters, unsigned int n_counters)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_add);
+
+void __gcov_merge_single(gcov_type *counters, unsigned int n_counters)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_single);
+
+void __gcov_merge_delta(gcov_type *counters, unsigned int n_counters)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_delta);
+
+void __gcov_merge_ior(gcov_type *counters, unsigned int n_counters)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_ior);
+
+void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_time_profile);
+
+void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_icall_topn);
+
+void __gcov_exit(void)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_exit);
diff --git a/kernel/gcov/gcov.h b/kernel/gcov/gcov.h
index de118ad..6ab2c18 100644
--- a/kernel/gcov/gcov.h
+++ b/kernel/gcov/gcov.h
@@ -15,6 +15,7 @@
 #ifndef GCOV_H
 #define GCOV_H GCOV_H
 
+#include <linux/module.h>
 #include <linux/types.h>
 
 /*
@@ -46,6 +47,7 @@ unsigned int gcov_info_version(struct gcov_info *info);
 struct gcov_info *gcov_info_next(struct gcov_info *info);
 void gcov_info_link(struct gcov_info *info);
 void gcov_info_unlink(struct gcov_info *prev, struct gcov_info *info);
+bool gcov_info_within_module(struct gcov_info *info, struct module *mod);
 
 /* Base interface. */
 enum gcov_action {
@@ -83,4 +85,7 @@ struct gcov_link {
 };
 extern const struct gcov_link gcov_link[];
 
+extern int gcov_events_enabled;
+extern struct mutex gcov_lock;
+
 #endif /* GCOV_H */
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 379e89c..6e34459 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1234,6 +1234,50 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
 
 /**
+ *	irq_chip_set_parent_state - set the state of a parent interrupt.
+ *	@data: Pointer to interrupt specific data
+ *	@which: State to be restored (one of IRQCHIP_STATE_*)
+ *	@val: Value corresponding to @which
+ *
+ */
+int irq_chip_set_parent_state(struct irq_data *data,
+			      enum irqchip_irq_state which,
+			      bool val)
+{
+	data = data->parent_data;
+	if (!data)
+		return 0;
+
+	if (data->chip->irq_set_irqchip_state)
+		return data->chip->irq_set_irqchip_state(data, which, val);
+
+	return 0;
+}
+EXPORT_SYMBOL(irq_chip_set_parent_state);
+
+/**
+ *	irq_chip_get_parent_state - get the state of a parent interrupt.
+ *	@data: Pointer to interrupt specific data
+ *	@which: one of IRQCHIP_STATE_* the caller wants to know
+ *	@state: a pointer to a boolean where the state is to be stored
+ *
+ */
+int irq_chip_get_parent_state(struct irq_data *data,
+			      enum irqchip_irq_state which,
+			      bool *state)
+{
+	data = data->parent_data;
+	if (!data)
+		return 0;
+
+	if (data->chip->irq_get_irqchip_state)
+		return data->chip->irq_get_irqchip_state(data, which, state);
+
+	return 0;
+}
+EXPORT_SYMBOL(irq_chip_get_parent_state);
+
+/**
  * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
  * NULL)
  * @data:	Pointer to interrupt specific data
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index eb584ad..cd4f9f3 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -356,11 +356,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 	desc->affinity_notify = notify;
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 
-	if (!notify && old_notify)
+	if (old_notify) {
 		cancel_work_sync(&old_notify->work);
-
-	if (old_notify)
 		kref_put(&old_notify->kref, old_notify->release);
+	}
 
 	return 0;
 }
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 6b7cdf1..7328891 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -56,34 +56,18 @@ void __weak arch_irq_work_raise(void)
 	 */
 }
 
-/*
- * Enqueue the irq_work @work on @cpu unless it's already pending
- * somewhere.
- *
- * Can be re-enqueued while the callback is still in progress.
- */
-bool irq_work_queue_on(struct irq_work *work, int cpu)
+/* Enqueue on current CPU, work must already be claimed and preempt disabled */
+static void __irq_work_queue_local(struct irq_work *work)
 {
-	/* All work should have been flushed before going offline */
-	WARN_ON_ONCE(cpu_is_offline(cpu));
-
-#ifdef CONFIG_SMP
-
-	/* Arch remote IPI send/receive backend aren't NMI safe */
-	WARN_ON_ONCE(in_nmi());
-
-	/* Only queue if not already pending */
-	if (!irq_work_claim(work))
-		return false;
-
-	if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
-		arch_send_call_function_single_ipi(cpu);
-
-#else /* #ifdef CONFIG_SMP */
-	irq_work_queue(work);
-#endif /* #else #ifdef CONFIG_SMP */
-
-	return true;
+	/* If the work is "lazy", handle it from next tick if any */
+	if (work->flags & IRQ_WORK_LAZY) {
+		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
+		    tick_nohz_tick_stopped())
+			arch_irq_work_raise();
+	} else {
+		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
+			arch_irq_work_raise();
+	}
 }
 
 /* Enqueue the irq work @work on the current CPU */
@@ -95,23 +79,48 @@ bool irq_work_queue(struct irq_work *work)
 
 	/* Queue the entry and raise the IPI if needed. */
 	preempt_disable();
-
-	/* If the work is "lazy", handle it from next tick if any */
-	if (work->flags & IRQ_WORK_LAZY) {
-		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
-		    tick_nohz_tick_stopped())
-			arch_irq_work_raise();
-	} else {
-		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
-			arch_irq_work_raise();
-	}
-
+	__irq_work_queue_local(work);
 	preempt_enable();
 
 	return true;
 }
 EXPORT_SYMBOL_GPL(irq_work_queue);
 
+/*
+ * Enqueue the irq_work @work on @cpu unless it's already pending
+ * somewhere.
+ *
+ * Can be re-enqueued while the callback is still in progress.
+ */
+bool irq_work_queue_on(struct irq_work *work, int cpu)
+{
+#ifndef CONFIG_SMP
+	return irq_work_queue(work);
+
+#else /* CONFIG_SMP: */
+	/* All work should have been flushed before going offline */
+	WARN_ON_ONCE(cpu_is_offline(cpu));
+
+	/* Only queue if not already pending */
+	if (!irq_work_claim(work))
+		return false;
+
+	preempt_disable();
+	if (cpu != smp_processor_id()) {
+		/* Arch remote IPI send/receive backend aren't NMI safe */
+		WARN_ON_ONCE(in_nmi());
+		if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
+			arch_send_call_function_single_ipi(cpu);
+	} else {
+		__irq_work_queue_local(work);
+	}
+	preempt_enable();
+
+	return true;
+#endif /* CONFIG_SMP */
+}
+
+
 bool irq_work_needs_cpu(void)
 {
 	struct llist_head *raised, *lazy;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 4344381..29ff663 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -703,7 +703,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
 static int reuse_unused_kprobe(struct kprobe *ap)
 {
 	struct optimized_kprobe *op;
-	int ret;
 
 	BUG_ON(!kprobe_unused(ap));
 	/*
@@ -715,9 +714,8 @@ static int reuse_unused_kprobe(struct kprobe *ap)
 	/* Enable the probe again */
 	ap->flags &= ~KPROBE_FLAG_DISABLED;
 	/* Optimize it again (remove from op->list) */
-	ret = kprobe_optready(ap);
-	if (ret)
-		return ret;
+	if (!kprobe_optready(ap))
+		return -EINVAL;
 
 	optimize_kprobe(ap);
 	return 0;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 0cbdbbb..26b57e2 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3567,9 +3567,6 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
 	unsigned int depth;
 	int i;
 
-	if (unlikely(!debug_locks))
-		return 0;
-
 	depth = curr->lockdep_depth;
 	/*
 	 * This function is about (re)setting the class of a held lock,
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index ef90935..ddc9cbd 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -90,21 +90,13 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
 	sem->owner = NULL;
 	osq_lock_init(&sem->osq);
 #endif
+#ifdef CONFIG_RWSEM_PRIO_AWARE
+	sem->m_count = 0;
+#endif
 }
 
 EXPORT_SYMBOL(__init_rwsem);
 
-enum rwsem_waiter_type {
-	RWSEM_WAITING_FOR_WRITE,
-	RWSEM_WAITING_FOR_READ
-};
-
-struct rwsem_waiter {
-	struct list_head list;
-	struct task_struct *task;
-	enum rwsem_waiter_type type;
-};
-
 enum rwsem_wake_type {
 	RWSEM_WAKE_ANY,		/* Wake whatever's at head of wait list */
 	RWSEM_WAKE_READERS,	/* Wake readers only */
@@ -130,6 +122,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
 {
 	struct rwsem_waiter *waiter, *tmp;
 	long oldcount, woken = 0, adjustment = 0;
+	struct list_head wlist;
 
 	/*
 	 * Take a peek at the queue head waiter such that we can determine
@@ -188,18 +181,42 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
 	 * of the queue. We know that woken will be at least 1 as we accounted
 	 * for above. Note we increment the 'active part' of the count by the
 	 * number of readers before waking any processes up.
+	 *
+	 * We have to do wakeup in 2 passes to prevent the possibility that
+	 * the reader count may be decremented before it is incremented. It
+	 * is because the to-be-woken waiter may not have slept yet. So it
+	 * may see waiter->task got cleared, finish its critical section and
+	 * do an unlock before the reader count increment.
+	 *
+	 * 1) Collect the read-waiters in a separate list, count them and
+	 *    fully increment the reader count in rwsem.
+	 * 2) For each waiters in the new list, clear waiter->task and
+	 *    put them into wake_q to be woken up later.
 	 */
-	list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
-		struct task_struct *tsk;
-
+	list_for_each_entry(waiter, &sem->wait_list, list) {
 		if (waiter->type == RWSEM_WAITING_FOR_WRITE)
 			break;
 
 		woken++;
-		tsk = waiter->task;
+	}
+	list_cut_before(&wlist, &sem->wait_list, &waiter->list);
 
+	adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
+	if (list_empty(&sem->wait_list)) {
+		/* hit end of list above */
+		adjustment -= RWSEM_WAITING_BIAS;
+	}
+
+	if (adjustment)
+		atomic_long_add(adjustment, &sem->count);
+
+	/* 2nd pass */
+	list_for_each_entry_safe(waiter, tmp, &wlist, list) {
+		struct task_struct *tsk;
+
+		tsk = waiter->task;
 		get_task_struct(tsk);
-		list_del(&waiter->list);
+
 		/*
 		 * Ensure calling get_task_struct() before setting the reader
 		 * waiter to nil such that rwsem_down_read_failed() cannot
@@ -215,15 +232,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
 		/* wake_q_add() already take the task ref */
 		put_task_struct(tsk);
 	}
-
-	adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
-	if (list_empty(&sem->wait_list)) {
-		/* hit end of list above */
-		adjustment -= RWSEM_WAITING_BIAS;
-	}
-
-	if (adjustment)
-		atomic_long_add(adjustment, &sem->count);
 }
 
 /*
@@ -235,6 +243,7 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
 	long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
 	struct rwsem_waiter waiter;
 	DEFINE_WAKE_Q(wake_q);
+	bool is_first_waiter = false;
 
 	waiter.task = current;
 	waiter.type = RWSEM_WAITING_FOR_READ;
@@ -242,7 +251,9 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
 	raw_spin_lock_irq(&sem->wait_lock);
 	if (list_empty(&sem->wait_list))
 		adjustment += RWSEM_WAITING_BIAS;
-	list_add_tail(&waiter.list, &sem->wait_list);
+
+	/* is_first_waiter == true means we are first in the queue */
+	is_first_waiter = rwsem_list_add_per_prio(&waiter, sem);
 
 	/* we're now waiting on the lock, but no longer actively locking */
 	count = atomic_long_add_return(adjustment, &sem->count);
@@ -255,7 +266,8 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
 	 */
 	if (count == RWSEM_WAITING_BIAS ||
 	    (count > RWSEM_WAITING_BIAS &&
-	     adjustment != -RWSEM_ACTIVE_READ_BIAS))
+	     (adjustment != -RWSEM_ACTIVE_READ_BIAS ||
+	     is_first_waiter)))
 		__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
 
 	raw_spin_unlock_irq(&sem->wait_lock);
@@ -506,6 +518,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
 	struct rwsem_waiter waiter;
 	struct rw_semaphore *ret = sem;
 	DEFINE_WAKE_Q(wake_q);
+	bool is_first_waiter = false;
 
 	/* undo write bias from down_write operation, stop active locking */
 	count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
@@ -527,7 +540,11 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
 	if (list_empty(&sem->wait_list))
 		waiting = false;
 
-	list_add_tail(&waiter.list, &sem->wait_list);
+	/*
+	 * is_first_waiter == true means we are first in the queue,
+	 * so there is no read locks that were queued ahead of us.
+	 */
+	is_first_waiter = rwsem_list_add_per_prio(&waiter, sem);
 
 	/* we're now waiting on the lock, but no longer actively locking */
 	if (waiting) {
@@ -538,7 +555,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
 		 * no active writers, the lock must be read owned; so we try to
 		 * wake any read locks that were queued ahead of us.
 		 */
-		if (count > RWSEM_WAITING_BIAS) {
+		if (!is_first_waiter && count > RWSEM_WAITING_BIAS) {
 			__rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
 			/*
 			 * The wakeup is normally called _after_ the wait_lock
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
index b9d0e72..ae2fb19 100644
--- a/kernel/locking/rwsem.h
+++ b/kernel/locking/rwsem.h
@@ -26,6 +26,17 @@
 # define DEBUG_RWSEMS_WARN_ON(c)
 #endif
 
+enum rwsem_waiter_type {
+	RWSEM_WAITING_FOR_WRITE,
+	RWSEM_WAITING_FOR_READ
+};
+
+struct rwsem_waiter {
+	struct list_head list;
+	struct task_struct *task;
+	enum rwsem_waiter_type type;
+};
+
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 /*
  * All writes to owner are protected by WRITE_ONCE() to make sure that
@@ -85,3 +96,60 @@ static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
 {
 }
 #endif
+
+#ifdef CONFIG_RWSEM_PRIO_AWARE
+
+#define RWSEM_MAX_PREEMPT_ALLOWED 3000
+
+/*
+ * Return true if current waiter is added in the front of the rwsem wait list.
+ */
+static inline bool rwsem_list_add_per_prio(struct rwsem_waiter *waiter_in,
+				    struct rw_semaphore *sem)
+{
+	struct list_head *pos;
+	struct list_head *head;
+	struct rwsem_waiter *waiter = NULL;
+
+	pos = head = &sem->wait_list;
+	/*
+	 * Rules for task prio aware rwsem wait list queueing:
+	 * 1:	Only try to preempt waiters with which task priority
+	 *	which is higher than DEFAULT_PRIO.
+	 * 2:	To avoid starvation, add count to record
+	 *	how many high priority waiters preempt to queue in wait
+	 *	list.
+	 *	If preempt count is exceed RWSEM_MAX_PREEMPT_ALLOWED,
+	 *	use simple fifo until wait list is empty.
+	 */
+	if (list_empty(head)) {
+		list_add_tail(&waiter_in->list, head);
+		sem->m_count = 0;
+		return true;
+	}
+
+	if (waiter_in->task->prio < DEFAULT_PRIO
+		&& sem->m_count < RWSEM_MAX_PREEMPT_ALLOWED) {
+
+		list_for_each(pos, head) {
+			waiter = list_entry(pos, struct rwsem_waiter, list);
+			if (waiter->task->prio > waiter_in->task->prio) {
+				list_add(&waiter_in->list, pos->prev);
+				sem->m_count++;
+				return &waiter_in->list == head->next;
+			}
+		}
+	}
+
+	list_add_tail(&waiter_in->list, head);
+
+	return false;
+}
+#else
+static inline bool rwsem_list_add_per_prio(struct rwsem_waiter *waiter_in,
+				    struct rw_semaphore *sem)
+{
+	list_add_tail(&waiter_in->list, &sem->wait_list);
+	return false;
+}
+#endif
diff --git a/kernel/module.c b/kernel/module.c
index e179303..d0ae2b6e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1949,8 +1949,13 @@ void module_enable_ro(const struct module *mod, bool after_init)
 		return;
 
 	frob_text(&mod->core_layout, set_memory_ro);
+	frob_text(&mod->core_layout, set_memory_x);
+
 	frob_rodata(&mod->core_layout, set_memory_ro);
+
 	frob_text(&mod->init_layout, set_memory_ro);
+	frob_text(&mod->init_layout, set_memory_x);
+
 	frob_rodata(&mod->init_layout, set_memory_ro);
 
 	if (after_init)
diff --git a/kernel/panic.c b/kernel/panic.c
index 11d71ac..7241892 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -277,6 +277,8 @@ void panic(const char *fmt, ...)
 		 * shutting down.  But if there is a chance of
 		 * rebooting the system it will be rebooted.
 		 */
+		if (panic_reboot_mode != REBOOT_UNDEFINED)
+			reboot_mode = panic_reboot_mode;
 		emergency_restart();
 	}
 #ifdef __sparc__
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 21fec73..fc0d667 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -29,6 +29,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/cn_proc.h>
 #include <linux/compat.h>
+#include <linux/sched/signal.h>
 
 /*
  * Access another process' address space via ptrace.
@@ -925,18 +926,26 @@ int ptrace_request(struct task_struct *child, long request,
 			ret = ptrace_setsiginfo(child, &siginfo);
 		break;
 
-	case PTRACE_GETSIGMASK:
+	case PTRACE_GETSIGMASK: {
+		sigset_t *mask;
+
 		if (addr != sizeof(sigset_t)) {
 			ret = -EINVAL;
 			break;
 		}
 
-		if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
+		if (test_tsk_restore_sigmask(child))
+			mask = &child->saved_sigmask;
+		else
+			mask = &child->blocked;
+
+		if (copy_to_user(datavp, mask, sizeof(sigset_t)))
 			ret = -EFAULT;
 		else
 			ret = 0;
 
 		break;
+	}
 
 	case PTRACE_SETSIGMASK: {
 		sigset_t new_set;
@@ -962,6 +971,8 @@ int ptrace_request(struct task_struct *child, long request,
 		child->blocked = new_set;
 		spin_unlock_irq(&child->sighand->siglock);
 
+		clear_tsk_restore_sigmask(child);
+
 		ret = 0;
 		break;
 	}
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index 3424452..19249b8 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -561,6 +561,10 @@ rcu_perf_cleanup(void)
 
 	if (torture_cleanup_begin())
 		return;
+	if (!cur_ops) {
+		torture_cleanup_end();
+		return;
+	}
 
 	if (reader_tasks) {
 		for (i = 0; i < nrealreaders; i++)
@@ -681,6 +685,7 @@ rcu_perf_init(void)
 			pr_cont(" %s", perf_ops[i]->name);
 		pr_cont("\n");
 		firsterr = -EINVAL;
+		cur_ops = NULL;
 		goto unwind;
 	}
 	if (cur_ops->init)
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index c596c6f..0b7af7e 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1826,6 +1826,10 @@ rcu_torture_cleanup(void)
 			cur_ops->cb_barrier();
 		return;
 	}
+	if (!cur_ops) {
+		torture_cleanup_end();
+		return;
+	}
 
 	rcu_torture_barrier_cleanup();
 	torture_stop_kthread(rcu_torture_stall, stall_task);
@@ -1964,6 +1968,7 @@ rcu_torture_init(void)
 			pr_cont(" %s", torture_ops[i]->name);
 		pr_cont("\n");
 		firsterr = -EINVAL;
+		cur_ops = NULL;
 		goto unwind;
 	}
 	if (cur_ops->fqs == NULL && fqs_duration != 0) {
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 8fb44de..4fef589 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -31,6 +31,7 @@ EXPORT_SYMBOL(cad_pid);
 #define DEFAULT_REBOOT_MODE
 #endif
 enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
+enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED;
 
 /*
  * This variable is used privately to keep track of whether or not
@@ -518,6 +519,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
 static int __init reboot_setup(char *str)
 {
 	for (;;) {
+		enum reboot_mode *mode;
+
 		/*
 		 * Having anything passed on the command line via
 		 * reboot= will cause us to disable DMI checking
@@ -525,17 +528,24 @@ static int __init reboot_setup(char *str)
 		 */
 		reboot_default = 0;
 
+		if (!strncmp(str, "panic_", 6)) {
+			mode = &panic_reboot_mode;
+			str += 6;
+		} else {
+			mode = &reboot_mode;
+		}
+
 		switch (*str) {
 		case 'w':
-			reboot_mode = REBOOT_WARM;
+			*mode = REBOOT_WARM;
 			break;
 
 		case 'c':
-			reboot_mode = REBOOT_COLD;
+			*mode = REBOOT_COLD;
 			break;
 
 		case 'h':
-			reboot_mode = REBOOT_HARD;
+			*mode = REBOOT_HARD;
 			break;
 
 		case 's':
@@ -552,11 +562,11 @@ static int __init reboot_setup(char *str)
 				if (rc)
 					return rc;
 			} else
-				reboot_mode = REBOOT_SOFT;
+				*mode = REBOOT_SOFT;
 			break;
 		}
 		case 'g':
-			reboot_mode = REBOOT_GPIO;
+			*mode = REBOOT_GPIO;
 			break;
 
 		case 'b':
diff --git a/kernel/sched/boost.c b/kernel/sched/boost.c
index 2ee6b06..8a00649 100644
--- a/kernel/sched/boost.c
+++ b/kernel/sched/boost.c
@@ -78,10 +78,12 @@ static void sched_full_throttle_boost_exit(void)
 static void sched_conservative_boost_enter(void)
 {
 	update_cgroup_boost_settings();
+	sched_task_filter_util = sysctl_sched_min_task_util_for_boost;
 }
 
 static void sched_conservative_boost_exit(void)
 {
+	sched_task_filter_util = sysctl_sched_min_task_util_for_colocation;
 	restore_cgroup_boost_settings();
 }
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 35e43ee..3c60542 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2258,6 +2258,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 	p->se.nr_migrations		= 0;
 	p->se.vruntime			= 0;
 	p->last_sleep_ts		= 0;
+	p->boost			= 0;
+	p->boost_expires		= 0;
+	p->boost_period			= 0;
 	INIT_LIST_HEAD(&p->se.group_node);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -2284,6 +2287,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 	INIT_HLIST_HEAD(&p->preempt_notifiers);
 #endif
 
+#ifdef CONFIG_COMPACTION
+	p->capture_control = NULL;
+#endif
 	init_numa_balancing(clone_flags, p);
 }
 
@@ -6316,7 +6322,6 @@ int sched_cpu_activate(unsigned int cpu)
 	rq_unlock_irqrestore(rq, &rf);
 
 	update_max_interval();
-	walt_update_min_max_capacity();
 
 	return 0;
 }
@@ -6356,7 +6361,6 @@ int sched_cpu_deactivate(unsigned int cpu)
 		return ret;
 	}
 	sched_domains_numa_masks_clear(cpu);
-	walt_update_min_max_capacity();
 	return 0;
 }
 
@@ -7212,6 +7216,8 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
 				struct cftype *cftype, u64 shareval)
 {
+	if (shareval > scale_load_down(ULONG_MAX))
+		shareval = MAX_SHARES;
 	return sched_group_set_shares(css_tg(css), scale_load(shareval));
 }
 
@@ -7314,8 +7320,10 @@ int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
 	period = ktime_to_ns(tg->cfs_bandwidth.period);
 	if (cfs_quota_us < 0)
 		quota = RUNTIME_INF;
-	else
+	else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
 		quota = (u64)cfs_quota_us * NSEC_PER_USEC;
+	else
+		return -EINVAL;
 
 	return tg_set_cfs_bandwidth(tg, period, quota);
 }
@@ -7337,6 +7345,9 @@ int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
 {
 	u64 quota, period;
 
+	if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
+		return -EINVAL;
+
 	period = (u64)cfs_period_us * NSEC_PER_USEC;
 	quota = tg->cfs_bandwidth.quota;
 
@@ -7782,6 +7793,26 @@ const u32 sched_prio_to_wmult[40] = {
 
 #undef CREATE_TRACE_POINTS
 
+/*
+ *@boost:should be 0,1,2.
+ *@period:boost time based on ms units.
+ */
+int set_task_boost(int boost, u64 period)
+{
+	if (boost < 0 || boost > 2)
+		return -EINVAL;
+	if (boost) {
+		current->boost = boost;
+		current->boost_period = (u64)period * 1000 * 1000;
+		current->boost_expires = sched_clock() + current->boost_period;
+	} else {
+		current->boost = 0;
+		current->boost_expires = 0;
+		current->boost_period = 0;
+	}
+	return 0;
+}
+
 #ifdef CONFIG_SCHED_WALT
 /*
  * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 0fb1fce..1605829 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"core_ctl: " fmt
@@ -20,9 +20,6 @@
 #include "sched.h"
 #include "walt.h"
 
-#define MAX_CPUS_PER_CLUSTER 6
-#define MAX_CLUSTERS 3
-
 struct cluster_data {
 	bool inited;
 	unsigned int min_cpus;
@@ -915,7 +912,7 @@ void core_ctl_notifier_unregister(struct notifier_block *n)
 
 static void core_ctl_call_notifier(void)
 {
-	struct core_ctl_notif_data ndata;
+	struct core_ctl_notif_data ndata = {0};
 	struct notifier_block *nb;
 
 	/*
@@ -930,7 +927,9 @@ static void core_ctl_call_notifier(void)
 		return;
 
 	ndata.nr_big = last_nr_big;
-	ndata.coloc_load_pct = walt_get_default_coloc_group_load();
+	walt_fill_ta_data(&ndata);
+	trace_core_ctl_notif_data(ndata.nr_big, ndata.coloc_load_pct,
+			ndata.ta_util_pct, ndata.cur_cap_pct);
 
 	atomic_notifier_call_chain(&core_ctl_notifier, 0, &ndata);
 }
diff --git a/kernel/sched/core_ctl.h b/kernel/sched/core_ctl.h
deleted file mode 100644
index c7707ff..0000000
--- a/kernel/sched/core_ctl.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __CORE_CTL_H
-#define __CORE_CTL_H
-
-#ifdef CONFIG_SCHED_CORE_CTL
-void core_ctl_check(u64 wallclock);
-int core_ctl_set_boost(bool boost);
-#else
-static inline void core_ctl_check(u64 wallclock) {}
-static inline int core_ctl_set_boost(bool boost)
-{
-	return 0;
-}
-#endif
-#endif
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 6572ee2..31decf0 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -181,10 +181,12 @@ static void sugov_track_cycles(struct sugov_policy *sg_policy,
 				u64 upto)
 {
 	u64 delta_ns, cycles;
+	u64 next_ws = sg_policy->last_ws + sched_ravg_window;
 
 	if (use_pelt())
 		return;
 
+	upto = min(upto, next_ws);
 	/* Track cycles in current window */
 	delta_ns = upto - sg_policy->last_cyc_update_time;
 	delta_ns *= prev_freq;
@@ -1231,6 +1233,7 @@ static int sugov_init(struct cpufreq_policy *policy)
 	return 0;
 
 fail:
+	kobject_put(&tunables->attr_set.kobj);
 	policy->governor_data = NULL;
 	sugov_tunables_free(tunables);
 
@@ -1327,7 +1330,8 @@ static void sugov_stop(struct cpufreq_policy *policy)
 static void sugov_limits(struct cpufreq_policy *policy)
 {
 	struct sugov_policy *sg_policy = policy->governor_data;
-	unsigned long flags;
+	unsigned long flags, now;
+	unsigned int freq;
 
 	if (!policy->fast_switch_enabled) {
 		mutex_lock(&sg_policy->work_lock);
@@ -1339,9 +1343,16 @@ static void sugov_limits(struct cpufreq_policy *policy)
 		mutex_unlock(&sg_policy->work_lock);
 	} else {
 		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
-		sugov_track_cycles(sg_policy, sg_policy->policy->cur,
-				   ktime_get_ns());
-		cpufreq_policy_apply_limits_fast(policy);
+		freq = policy->cur;
+		now = ktime_get_ns();
+
+		/*
+		 * cpufreq_driver_resolve_freq() has a clamp, so we do not need
+		 * to do any sort of additional validation here.
+		 */
+		freq = cpufreq_driver_resolve_freq(policy, freq);
+		sg_policy->cached_raw_freq = freq;
+		sugov_fast_switch(sg_policy, now, freq);
 		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
 	}
 
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index c2f1005..b89fa3a 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -253,7 +253,6 @@ static void task_non_contending(struct task_struct *p)
 	if (dl_entity_is_special(dl_se))
 		return;
 
-	WARN_ON(hrtimer_active(&dl_se->inactive_timer));
 	WARN_ON(dl_se->dl_non_contending);
 
 	zerolag_time = dl_se->deadline -
@@ -270,7 +269,7 @@ static void task_non_contending(struct task_struct *p)
 	 * If the "0-lag time" already passed, decrease the active
 	 * utilization now, instead of starting a timer
 	 */
-	if (zerolag_time < 0) {
+	if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
 		if (dl_task(p))
 			sub_running_bw(dl_se, dl_rq);
 		if (!dl_task(p) || p->state == TASK_DEAD) {
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index cbc88e0..91b4c6b 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -674,8 +674,6 @@ do {									\
 	P(cpu_capacity);
 #endif
 #ifdef CONFIG_SCHED_WALT
-	P(cluster->load_scale_factor);
-	P(cluster->capacity);
 	P(cluster->max_possible_capacity);
 	P(cluster->efficiency);
 	P(cluster->cur_freq);
@@ -765,8 +763,6 @@ static void sched_debug_header(struct seq_file *m)
 	P(sysctl_sched_features);
 #ifdef CONFIG_SCHED_WALT
 	P(sched_init_task_load_windows);
-	P(min_capacity);
-	P(max_capacity);
 	P(sched_ravg_window);
 	P(sched_load_granule);
 #endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c74dd1f..8f81dc1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -27,6 +27,7 @@
 #include "walt.h"
 
 #ifdef CONFIG_SMP
+static inline bool get_rtg_status(struct task_struct *p);
 static inline bool task_fits_max(struct task_struct *p, int cpu);
 #endif /* CONFIG_SMP */
 
@@ -186,6 +187,7 @@ unsigned int sched_capacity_margin_down[NR_CPUS] = {
 unsigned int sysctl_sched_min_task_util_for_boost = 51;
 /* 0.68ms default for 20ms window size scaled to 1024 */
 unsigned int sysctl_sched_min_task_util_for_colocation = 35;
+unsigned int sched_task_filter_util = 35;
 #endif
 unsigned int sched_small_task_threshold = 102;
 
@@ -2074,6 +2076,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
 	if (p->last_task_numa_placement) {
 		delta = runtime - p->last_sum_exec_runtime;
 		*period = now - p->last_task_numa_placement;
+
+		/* Avoid time going backwards, prevent potential divide error: */
+		if (unlikely((s64)*period < 0))
+			*period = 0;
 	} else {
 		delta = p->se.avg.load_sum;
 		*period = LOAD_AVG_MAX;
@@ -3113,6 +3119,18 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
 	}
 }
 
+static inline int per_task_boost(struct task_struct *p)
+{
+	if (p->boost_period) {
+		if (sched_clock() > p->boost_expires) {
+			p->boost_period = 0;
+			p->boost_expires = 0;
+			p->boost = 0;
+		}
+	}
+	return p->boost;
+}
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_FAIR_GROUP_SCHED
 /**
@@ -3805,13 +3823,14 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
 }
 
 static inline bool
-bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target)
+bias_to_waker_cpu(struct task_struct *p, int cpu, int start_cpu)
 {
 	bool base_test = cpumask_test_cpu(cpu, &p->cpus_allowed) &&
-			cpu_active(cpu) && task_fits_max(p, cpu);
-	bool rtg_test = rtg_target && cpumask_test_cpu(cpu, rtg_target);
+			cpu_active(cpu);
+	bool start_cap_test = (capacity_orig_of(cpu) >=
+					capacity_orig_of(start_cpu));
 
-	return base_test && (!rtg_target || rtg_test);
+	return base_test && start_cap_test;
 }
 
 static inline bool task_fits_capacity(struct task_struct *p,
@@ -3832,28 +3851,46 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
 {
 	unsigned long capacity = capacity_orig_of(cpu);
 	unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity.val;
+	unsigned long task_boost = per_task_boost(p);
 
 	if (capacity == max_capacity)
 		return true;
 
-	if ((task_boost_policy(p) == SCHED_BOOST_ON_BIG ||
-			schedtune_task_boost(p) > 0) &&
-			is_min_capacity_cpu(cpu))
-		return false;
+	if (is_min_capacity_cpu(cpu)) {
+		if (task_boost_policy(p) == SCHED_BOOST_ON_BIG ||
+			task_boost > 0 ||
+			schedtune_task_boost(p) > 0)
+			return false;
+	} else { /* mid cap cpu */
+		if (task_boost > 1)
+			return false;
+	}
+
+	return task_fits_capacity(p, capacity, cpu);
+}
+
+static inline bool task_demand_fits(struct task_struct *p, int cpu)
+{
+	unsigned long capacity = capacity_orig_of(cpu);
+	unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity.val;
+
+	if (capacity == max_capacity)
+		return true;
 
 	return task_fits_capacity(p, capacity, cpu);
 }
 
 struct find_best_target_env {
-	struct cpumask *rtg_target;
+	bool is_rtg;
 	int placement_boost;
 	bool need_idle;
 	int fastpath;
+	int start_cpu;
 };
 
 static inline void adjust_cpus_for_packing(struct task_struct *p,
 			int *target_cpu, int *best_idle_cpu,
-			int target_cpus_count, int shallowest_idle_cstate,
+			int shallowest_idle_cstate,
 			struct find_best_target_env *fbt_env,
 			bool boosted)
 {
@@ -3863,14 +3900,11 @@ static inline void adjust_cpus_for_packing(struct task_struct *p,
 		return;
 
 	if (task_placement_boost_enabled(p) || fbt_env->need_idle || boosted ||
-			shallowest_idle_cstate == -1) {
+			shallowest_idle_cstate <= 0) {
 		*target_cpu = -1;
 		return;
 	}
 
-	if (target_cpus_count > 1)
-		return;
-
 	if (task_in_cum_window_demand(cpu_rq(*target_cpu), p))
 		tutil = 0;
 	else
@@ -3889,7 +3923,7 @@ static inline void adjust_cpus_for_packing(struct task_struct *p,
 		return;
 	}
 
-	if (fbt_env->rtg_target)
+	if (fbt_env->is_rtg)
 		*best_idle_cpu = -1;
 }
 
@@ -5044,12 +5078,15 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
 	return HRTIMER_NORESTART;
 }
 
+extern const u64 max_cfs_quota_period;
+
 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
 {
 	struct cfs_bandwidth *cfs_b =
 		container_of(timer, struct cfs_bandwidth, period_timer);
 	int overrun;
 	int idle = 0;
+	int count = 0;
 
 	raw_spin_lock(&cfs_b->lock);
 	for (;;) {
@@ -5057,6 +5094,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
 		if (!overrun)
 			break;
 
+		if (++count > 3) {
+			u64 new, old = ktime_to_ns(cfs_b->period);
+
+			new = (old * 147) / 128; /* ~115% */
+			new = min(new, max_cfs_quota_period);
+
+			cfs_b->period = ns_to_ktime(new);
+
+			/* since max is 1s, this is limited to 1e9^2, which fits in u64 */
+			cfs_b->quota *= new;
+			cfs_b->quota = div64_u64(cfs_b->quota, old);
+
+			pr_warn_ratelimited(
+        "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
+	                        smp_processor_id(),
+	                        div_u64(new, NSEC_PER_USEC),
+                                div_u64(cfs_b->quota, NSEC_PER_USEC));
+
+			/* reset count so we don't come right back in here */
+			count = 0;
+		}
+
 		idle = do_sched_cfs_period_timer(cfs_b, overrun);
 	}
 	if (idle)
@@ -6668,36 +6727,35 @@ unsigned long capacity_curr_of(int cpu)
 	return cap_scale(max_cap, scale_freq);
 }
 
-static int get_start_cpu(struct task_struct *p, bool boosted,
-					struct cpumask *rtg_target)
+static int get_start_cpu(struct task_struct *p)
 {
 	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
-	int start_cpu = -1;
+	int start_cpu = rd->min_cap_orig_cpu;
+	bool boosted = schedtune_task_boost(p) > 0 ||
+			task_boost_policy(p) == SCHED_BOOST_ON_BIG;
+	bool task_skip_min = get_rtg_status(p) &&
+		(task_util(p) > sched_task_filter_util);
 
-	if (boosted) {
-		if (rd->mid_cap_orig_cpu != -1 &&
-		    task_fits_max(p, rd->mid_cap_orig_cpu))
-			return rd->mid_cap_orig_cpu;
-		return rd->max_cap_orig_cpu;
+	/*
+	 * note about min/mid/max_cap_orig_cpu - either all of them will be -ve
+	 * or just mid will be -1, there never be any other combinations of -1s
+	 * beyond these
+	 */
+	if (task_skip_min || boosted) {
+		start_cpu = rd->mid_cap_orig_cpu == -1 ?
+			rd->max_cap_orig_cpu : rd->mid_cap_orig_cpu;
+	}
+	if (start_cpu == -1 || start_cpu == rd->max_cap_orig_cpu)
+		return start_cpu;
+
+	if (start_cpu == rd->min_cap_orig_cpu &&
+			!task_demand_fits(p, start_cpu)) {
+		start_cpu = rd->mid_cap_orig_cpu == -1 ?
+			rd->max_cap_orig_cpu : rd->mid_cap_orig_cpu;
 	}
 
-	/* A task always fits on its rtg_target */
-	if (rtg_target) {
-		int rtg_target_cpu = cpumask_first_and(rtg_target,
-						cpu_online_mask);
-
-		if (rtg_target_cpu < nr_cpu_ids)
-			return rtg_target_cpu;
-	}
-
-	/* Where the task should land based on its demand */
-	if (rd->min_cap_orig_cpu != -1
-			&& task_fits_max(p, rd->min_cap_orig_cpu))
-		start_cpu = rd->min_cap_orig_cpu;
-	else if (rd->mid_cap_orig_cpu != -1
-			&& task_fits_max(p, rd->mid_cap_orig_cpu))
-		start_cpu = rd->mid_cap_orig_cpu;
-	else
+	if (start_cpu == rd->mid_cap_orig_cpu &&
+			!task_demand_fits(p, start_cpu))
 		start_cpu = rd->max_cap_orig_cpu;
 
 	return start_cpu;
@@ -6717,12 +6775,11 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 	unsigned long target_capacity = ULONG_MAX;
 	unsigned long min_wake_util = ULONG_MAX;
 	unsigned long target_max_spare_cap = 0;
-	unsigned long target_util = ULONG_MAX;
 	unsigned long best_active_util = ULONG_MAX;
 	unsigned long best_active_cuml_util = ULONG_MAX;
 	unsigned long best_idle_cuml_util = ULONG_MAX;
 	bool prefer_idle = schedtune_prefer_idle(p);
-	bool boosted = schedtune_task_boost(p) > 0;
+	bool boosted = schedtune_task_boost(p) > 0 || per_task_boost(p) > 0;
 	/* Initialise with deepest possible cstate (INT_MAX) */
 	int shallowest_idle_cstate = INT_MAX;
 	struct sched_domain *start_sd;
@@ -6734,7 +6791,6 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 	int i, start_cpu;
 	long spare_wake_cap, most_spare_wake_cap = 0;
 	int most_spare_cap_cpu = -1;
-	unsigned int active_cpus_count = 0;
 	int prev_cpu = task_cpu(p);
 	bool next_group_higher_cap = false;
 	int isolated_candidate = -1;
@@ -6752,10 +6808,7 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 		target_capacity = 0;
 
 	/* Find start CPU based on boost value */
-	start_cpu = get_start_cpu(p, boosted, fbt_env->rtg_target);
-	if (start_cpu < 0)
-		goto out;
-
+	start_cpu = fbt_env->start_cpu;
 	/* Find SD for the start CPU */
 	start_sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, start_cpu));
 	if (!start_sd)
@@ -6766,7 +6819,9 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 		!cpu_isolated(prev_cpu) && cpu_online(prev_cpu) &&
 		idle_cpu(prev_cpu)) {
 
-		if (idle_get_state_idx(cpu_rq(prev_cpu)) <= 1) {
+		if (idle_get_state_idx(cpu_rq(prev_cpu)) <=
+			(is_min_capacity_cpu(prev_cpu) ? 1 : 0)) {
+
 			target_cpu = prev_cpu;
 
 			fbt_env->fastpath = PREV_CPU_FASTPATH;
@@ -6810,7 +6865,7 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 			 */
 			wake_util = cpu_util_without(i, p);
 			new_util = wake_util + task_util_est(p);
-			spare_wake_cap = capacity_orig_of(i) - wake_util;
+			spare_wake_cap = capacity_orig - wake_util;
 
 			if (spare_wake_cap > most_spare_wake_cap) {
 				most_spare_wake_cap = spare_wake_cap;
@@ -6982,17 +7037,16 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 			 */
 			if (idle_cpu(i)) {
 				/*
-				 * Skip CPUs in deeper idle state, but only
-				 * if they are also less energy efficient.
-				 * IOW, prefer a deep IDLE LITTLE CPU vs a
-				 * shallow idle big CPU.
+				 * Prefer shallowest over deeper idle state cpu,
+				 * of same capacity cpus.
 				 */
-				if (capacity_orig >= target_capacity &&
+				if (capacity_orig == target_capacity &&
 				    sysctl_sched_cstate_aware &&
 				    idle_idx > shallowest_idle_cstate)
 					continue;
 
 				if (shallowest_idle_cstate == idle_idx &&
+					target_capacity == capacity_orig &&
 					(best_idle_cpu == prev_cpu ||
 					(i != prev_cpu &&
 					new_util_cuml > best_idle_cuml_util)))
@@ -7031,16 +7085,12 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 			 * capacity.
 			 */
 
-			active_cpus_count++;
-
 			/* Favor CPUs with maximum spare capacity */
-			if (capacity_orig == target_capacity &&
-			    spare_cap < target_max_spare_cap)
+			if (spare_cap < target_max_spare_cap)
 				continue;
 
 			target_max_spare_cap = spare_cap;
 			target_capacity = capacity_orig;
-			target_util = new_util;
 			target_cpu = i;
 		}
 
@@ -7087,7 +7137,7 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 	} while (sg = sg->next, sg != start_sd->groups);
 
 	adjust_cpus_for_packing(p, &target_cpu, &best_idle_cpu,
-				active_cpus_count, shallowest_idle_cstate,
+				shallowest_idle_cstate,
 				fbt_env, boosted);
 
 	/*
@@ -7382,42 +7432,25 @@ static inline int wake_to_idle(struct task_struct *p)
 }
 
 #ifdef CONFIG_SCHED_WALT
-static inline bool is_task_util_above_min_thresh(struct task_struct *p)
-{
-	unsigned int threshold = (sched_boost() == CONSERVATIVE_BOOST) ?
-			sysctl_sched_min_task_util_for_boost :
-			sysctl_sched_min_task_util_for_colocation;
-
-	return task_util(p) > threshold;
-}
-
-static inline struct cpumask *find_rtg_target(struct task_struct *p)
+static inline bool get_rtg_status(struct task_struct *p)
 {
 	struct related_thread_group *grp;
-	struct cpumask *rtg_target;
+	bool ret = false;
 
 	rcu_read_lock();
 
 	grp = task_related_thread_group(p);
-	if (grp && grp->preferred_cluster && is_task_util_above_min_thresh(p)) {
-		rtg_target = &grp->preferred_cluster->cpus;
-
-		if (!task_fits_max(p, cpumask_first(rtg_target)))
-			rtg_target = NULL;
-		else if (cpumask_subset(rtg_target, &asym_cap_sibling_cpus))
-			rtg_target = &asym_cap_sibling_cpus;
-	} else {
-		rtg_target = NULL;
-	}
+	if (grp)
+		ret = grp->skip_min;
 
 	rcu_read_unlock();
 
-	return rtg_target;
+	return ret;
 }
 #else
-static inline struct cpumask *find_rtg_target(struct task_struct *p)
+static inline bool get_rtg_status(struct task_struct *p)
 {
-	return NULL;
+	return false;
 }
 #endif
 
@@ -7491,24 +7524,34 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
 	struct perf_domain *pd;
 	struct sched_domain *sd;
 	cpumask_t *candidates;
-	struct cpumask *rtg_target = find_rtg_target(p);
+	bool is_rtg;
 	struct find_best_target_env fbt_env;
 	bool need_idle = wake_to_idle(p);
 	int placement_boost = task_boost_policy(p);
 	u64 start_t = 0;
 	int delta = 0;
 	int boosted = (schedtune_task_boost(p) > 0);
+	int start_cpu = get_start_cpu(p);
+
+	if (start_cpu < 0)
+		goto eas_not_ready;
+
+	is_rtg = task_in_related_thread_group(p);
 
 	fbt_env.fastpath = 0;
 
 	if (trace_sched_task_util_enabled())
 		start_t = sched_clock();
 
+	/* Pre-select a set of candidate CPUs. */
+	candidates = this_cpu_ptr(&energy_cpus);
+	cpumask_clear(candidates);
+
 	if (need_idle)
 		sync = 0;
 
 	if (sysctl_sched_sync_hint_enable && sync &&
-				bias_to_waker_cpu(p, cpu, rtg_target)) {
+				bias_to_waker_cpu(p, cpu, start_cpu)) {
 		best_energy_cpu = cpu;
 		fbt_env.fastpath = SYNC_WAKEUP;
 		goto sync_wakeup;
@@ -7533,14 +7576,11 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
 	if (!task_util_est(p))
 		goto unlock;
 
-	/* Pre-select a set of candidate CPUs. */
-	candidates = this_cpu_ptr(&energy_cpus);
-	cpumask_clear(candidates);
-
 	if (sched_feat(FIND_BEST_TARGET)) {
-		fbt_env.rtg_target = rtg_target;
+		fbt_env.is_rtg = is_rtg;
 		fbt_env.placement_boost = placement_boost;
 		fbt_env.need_idle = need_idle;
+		fbt_env.start_cpu = start_cpu;
 
 		find_best_target(NULL, candidates, p, &fbt_env);
 	} else {
@@ -7565,9 +7605,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
 		delta = task_util(p);
 #endif
 	if (task_placement_boost_enabled(p) || need_idle || boosted ||
-	    (rtg_target && (!cpumask_test_cpu(prev_cpu, rtg_target) ||
-	    cpumask_test_cpu(cpu, rtg_target))) ||
-	    __cpu_overutilized(prev_cpu, delta) ||
+	    is_rtg || __cpu_overutilized(prev_cpu, delta) ||
 	    !task_fits_max(p, prev_cpu) || cpu_isolated(prev_cpu)) {
 		best_energy_cpu = cpu;
 		goto unlock;
@@ -7599,27 +7637,25 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
 unlock:
 	rcu_read_unlock();
 
-sync_wakeup:
-	trace_sched_task_util(p, best_energy_cpu, sync,
-			need_idle, fbt_env.fastpath, placement_boost,
-			rtg_target ? cpumask_first(rtg_target) : -1, start_t,
-			boosted);
-
 	/*
-	 * Pick the best CPU if prev_cpu cannot be used, or if it saves at
-	 * least 6% of the energy used by prev_cpu.
+	 * Pick the prev CPU, if best energy CPU can't saves at least 6% of
+	 * the energy used by prev_cpu.
 	 */
-	if (prev_energy == ULONG_MAX)
-		return best_energy_cpu;
+	if ((prev_energy != ULONG_MAX) && (best_energy_cpu != prev_cpu)  &&
+	    ((prev_energy - best_energy) <= prev_energy >> 4))
+		best_energy_cpu = prev_cpu;
 
-	if ((prev_energy - best_energy) > (prev_energy >> 4))
-		return best_energy_cpu;
+sync_wakeup:
 
-	return prev_cpu;
+	trace_sched_task_util(p, cpumask_bits(candidates)[0], best_energy_cpu,
+			sync, need_idle, fbt_env.fastpath, placement_boost,
+			start_t, boosted, is_rtg, get_rtg_status(p), start_cpu);
+
+	return best_energy_cpu;
 
 fail:
 	rcu_read_unlock();
-
+eas_not_ready:
 	return -1;
 }
 
@@ -7648,6 +7684,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
 	if (static_branch_unlikely(&sched_energy_present)) {
 		rcu_read_lock();
 		new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
+		if (unlikely(new_cpu < 0))
+			new_cpu = prev_cpu;
 		rcu_read_unlock();
 		return new_cpu;
 	}
@@ -8602,7 +8640,7 @@ static int detach_tasks(struct lb_env *env)
 	if (!same_cluster(env->dst_cpu, env->src_cpu))
 		env->flags |= LBF_IGNORE_PREFERRED_CLUSTER_TASKS;
 
-	if (cpu_capacity(env->dst_cpu) < cpu_capacity(env->src_cpu))
+	if (capacity_orig_of(env->dst_cpu) < capacity_orig_of(env->src_cpu))
 		env->flags |= LBF_IGNORE_BIG_TASKS;
 
 redo:
@@ -10529,6 +10567,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 				busiest->active_balance = 1;
 				busiest->push_cpu = this_cpu;
 				active_balance = 1;
+				mark_reserved(this_cpu);
 			}
 			raw_spin_unlock_irqrestore(&busiest->lock, flags);
 
@@ -10761,6 +10800,7 @@ static int active_load_balance_cpu_stop(void *data)
 	busiest_rq->active_balance = 0;
 	push_task = busiest_rq->push_task;
 	target_cpu = busiest_rq->push_cpu;
+	clear_reserved(target_cpu);
 
 	if (push_task)
 		busiest_rq->push_task = NULL;
@@ -10771,7 +10811,6 @@ static int active_load_balance_cpu_stop(void *data)
 		if (push_task_detached)
 			attach_one_task(target_rq, push_task);
 		put_task_struct(push_task);
-		clear_reserved(target_cpu);
 	}
 
 	if (p)
@@ -10920,6 +10959,8 @@ static inline int on_null_domain(struct rq *rq)
  * - When one of the busy CPUs notice that there may be an idle rebalancing
  *   needed, they will kick the idle load balancer, which then does idle
  *   load balancing for all the idle CPUs.
+ * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set
+ *   anywhere yet.
  */
 
 static inline int find_new_ilb(void)
@@ -10952,16 +10993,18 @@ static inline int find_new_ilb(void)
 		}
 	}
 
-	if (ilb < nr_cpu_ids && idle_cpu(ilb))
-		return ilb;
+	for_each_cpu_and(ilb, nohz.idle_cpus_mask,
+			      housekeeping_cpumask(HK_FLAG_MISC)) {
+		if (idle_cpu(ilb))
+			return ilb;
+	}
 
 	return nr_cpu_ids;
 }
 
 /*
- * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
- * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
- * CPU (if there is one).
+ * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
+ * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
  */
 static void kick_ilb(unsigned int flags)
 {
@@ -11453,10 +11496,11 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
 		update_next_balance(sd, &next_balance);
 
 		/*
-		 * Stop searching for tasks to pull if there are
-		 * now runnable tasks on this rq.
+		 * Stop searching for tasks to pull if there are now runnable
+		 * tasks on this rq or if active migration kicked in.
 		 */
-		if (pulled_task || this_rq->nr_running > 0)
+		if (pulled_task || this_rq->nr_running > 0 ||
+		    !continue_balancing)
 			break;
 	}
 	rcu_read_unlock();
@@ -12469,7 +12513,6 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
 {
 	int active_balance;
 	int new_cpu = -1;
-	int cpu = smp_processor_id();
 	int prev_cpu = task_cpu(p);
 
 	if (rq->misfit_task_load) {
@@ -12484,13 +12527,13 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
 		rcu_read_lock();
 		new_cpu = find_energy_efficient_cpu(p, prev_cpu, 0);
 		rcu_read_unlock();
-		if ((new_cpu != -1) &&
-			(capacity_orig_of(new_cpu) > capacity_orig_of(cpu))) {
+		if ((new_cpu != -1) && (new_cpu != prev_cpu) &&
+		    (capacity_orig_of(new_cpu) > capacity_orig_of(prev_cpu))) {
 			active_balance = kick_active_balance(rq, p, new_cpu);
 			if (active_balance) {
 				mark_reserved(new_cpu);
 				raw_spin_unlock(&migration_lock);
-				stop_one_cpu_nowait(cpu,
+				stop_one_cpu_nowait(prev_cpu,
 					active_load_balance_cpu_stop, rq,
 					&rq->active_balance_work);
 				return;
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index e3e3db9..328c0d4 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -189,7 +189,7 @@ static void group_init(struct psi_group *group)
 	for_each_possible_cpu(cpu)
 		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
 	group->avg_next_update = sched_clock() + psi_period;
-	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
+	INIT_DEFERRABLE_WORK(&group->avgs_work, psi_avgs_work);
 	mutex_init(&group->avgs_lock);
 	/* Init trigger-related members */
 	atomic_set(&group->poll_scheduled, 0);
@@ -455,7 +455,7 @@ static void trace_event_helper(struct psi_group *group)
 	u64 memstall = group->total[PSI_POLL][PSI_MEM_SOME];
 
 	for_each_populated_zone(zone) {
-		wmark = zone->watermark[WMARK_HIGH];
+		wmark = high_wmark_pages(zone);
 		free = zone_page_state(zone, NR_FREE_PAGES);
 		cma = zone_page_state(zone, NR_FREE_CMA_PAGES);
 		file = zone_page_state(zone, NR_ZONE_ACTIVE_FILE) +
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 835a225..3032035 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2787,6 +2787,8 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
 	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
 	if (rt_runtime_us < 0)
 		rt_runtime = RUNTIME_INF;
+	else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
+		return -EINVAL;
 
 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
 }
@@ -2807,6 +2809,9 @@ int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
 {
 	u64 rt_runtime, rt_period;
 
+	if (rt_period_us > U64_MAX / NSEC_PER_USEC)
+		return -EINVAL;
+
 	rt_period = rt_period_us * NSEC_PER_USEC;
 	rt_runtime = tg->rt_bandwidth.rt_runtime;
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 7261a43..0af4e00 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -91,9 +91,16 @@ extern __read_mostly bool sched_predl;
 extern unsigned int sched_capacity_margin_up[NR_CPUS];
 extern unsigned int sched_capacity_margin_down[NR_CPUS];
 
+struct sched_walt_cpu_load {
+	unsigned long prev_window_util;
+	unsigned long nl;
+	unsigned long pl;
+	bool rtgb_active;
+	u64 ws;
+};
+
 #ifdef CONFIG_SCHED_WALT
 extern unsigned int sched_ravg_window;
-extern unsigned int walt_cpu_util_freq_divisor;
 
 struct walt_sched_stats {
 	int nr_big_tasks;
@@ -130,9 +137,7 @@ struct sched_cluster {
 	int max_power_cost;
 	int min_power_cost;
 	int max_possible_capacity;
-	int capacity;
 	int efficiency; /* Differentiate cpus with different IPC capability */
-	int load_scale_factor;
 	unsigned int exec_scale_factor;
 	/*
 	 * max_freq = user maximum
@@ -2042,7 +2047,10 @@ static inline int hrtick_enabled(struct rq *rq)
 
 #ifdef CONFIG_SCHED_WALT
 u64 sched_ktime_clock(void);
+unsigned long
+cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load);
 #else
+#define sched_ravg_window TICK_NSEC
 static inline u64 sched_ktime_clock(void)
 {
 	return 0;
@@ -2144,14 +2152,6 @@ static inline unsigned long cpu_util(int cpu)
 	return min_t(unsigned long, util, capacity_orig_of(cpu));
 }
 
-struct sched_walt_cpu_load {
-	unsigned long prev_window_util;
-	unsigned long nl;
-	unsigned long pl;
-	bool rtgb_active;
-	u64 ws;
-};
-
 static inline unsigned long cpu_util_cum(int cpu, int delta)
 {
 	u64 util = cpu_rq(cpu)->cfs.avg.util_avg;
@@ -2172,103 +2172,17 @@ extern unsigned long boosted_cpu_util(int cpu, unsigned long other_util,
 				      struct sched_walt_cpu_load *walt_load);
 #endif
 
+static inline unsigned long
+cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
+{
+
 #ifdef CONFIG_SCHED_WALT
-u64 freq_policy_load(struct rq *rq);
-
-extern u64 walt_load_reported_window;
-extern bool rtgb_active;
-
-static inline unsigned long
-__cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
-{
-	u64 util, util_unboosted;
-	struct rq *rq = cpu_rq(cpu);
-	unsigned long capacity = capacity_orig_of(cpu);
-	int boost;
-
-	boost = per_cpu(sched_load_boost, cpu);
-	util_unboosted = util = freq_policy_load(rq);
-	util = div64_u64(util * (100 + boost),
-			walt_cpu_util_freq_divisor);
-
-	if (walt_load) {
-		u64 nl = cpu_rq(cpu)->nt_prev_runnable_sum +
-				rq->grp_time.nt_prev_runnable_sum;
-		u64 pl = rq->walt_stats.pred_demands_sum_scaled;
-
-		/* do_pl_notif() needs unboosted signals */
-		rq->old_busy_time = div64_u64(util_unboosted,
-						sched_ravg_window >>
-						SCHED_CAPACITY_SHIFT);
-		rq->old_estimated_time = pl;
-
-		nl = div64_u64(nl * (100 + boost),
-		walt_cpu_util_freq_divisor);
-		pl = div64_u64(pl * (100 + boost), 100);
-
-		walt_load->prev_window_util = util;
-		walt_load->nl = nl;
-		walt_load->pl = pl;
-		walt_load->ws = walt_load_reported_window;
-		walt_load->rtgb_active = rtgb_active;
-	}
-
-	return (util >= capacity) ? capacity : util;
-}
-
-#define ADJUSTED_ASYM_CAP_CPU_UTIL(orig, other, x)	\
-			(max(orig, mult_frac(other, x, 100)))
-
-static inline unsigned long
-cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
-{
-	struct sched_walt_cpu_load wl_other = {0};
-	unsigned long util = 0, util_other = 0;
-	unsigned long capacity = capacity_orig_of(cpu);
-	int i, mpct = sysctl_sched_asym_cap_sibling_freq_match_pct;
-
-	if (!cpumask_test_cpu(cpu, &asym_cap_sibling_cpus))
-		return __cpu_util_freq_walt(cpu, walt_load);
-
-	for_each_cpu(i, &asym_cap_sibling_cpus) {
-		if (i == cpu)
-			util = __cpu_util_freq_walt(cpu, walt_load);
-		else
-			util_other = __cpu_util_freq_walt(i, &wl_other);
-	}
-
-	if (cpu == cpumask_last(&asym_cap_sibling_cpus))
-		mpct = 100;
-
-	util = ADJUSTED_ASYM_CAP_CPU_UTIL(util, util_other, mpct);
-	walt_load->prev_window_util = util;
-
-	walt_load->nl = ADJUSTED_ASYM_CAP_CPU_UTIL(walt_load->nl, wl_other.nl,
-						   mpct);
-	walt_load->pl = ADJUSTED_ASYM_CAP_CPU_UTIL(walt_load->pl, wl_other.pl,
-						   mpct);
-
-	return (util >= capacity) ? capacity : util;
-}
-
-static inline unsigned long
-cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
-{
 	return cpu_util_freq_walt(cpu, walt_load);
-}
-
 #else
-
-static inline unsigned long
-cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
-{
 	return cpu_util(cpu);
+#endif
 }
 
-#define sched_ravg_window TICK_NSEC
-
-#endif /* CONFIG_SCHED_WALT */
-
 extern unsigned int capacity_margin_freq;
 
 static inline unsigned long
@@ -2729,12 +2643,6 @@ enum sched_boost_policy {
 	SCHED_BOOST_ON_ALL,
 };
 
-/*
- * Returns the rq capacity of any rq in a group. This does not play
- * well with groups where rq capacity can change independently.
- */
-#define group_rq_capacity(group) cpu_capacity(group_first_cpu(group))
-
 #ifdef CONFIG_SCHED_WALT
 
 static inline int cluster_first_cpu(struct sched_cluster *cluster)
@@ -2747,7 +2655,7 @@ struct related_thread_group {
 	raw_spinlock_t lock;
 	struct list_head tasks;
 	struct list_head list;
-	struct sched_cluster *preferred_cluster;
+	bool skip_min;
 	struct rcu_head rcu;
 	u64 last_update;
 	u64 downmigrate_ts;
@@ -2764,8 +2672,6 @@ extern unsigned int max_possible_freq;
 extern unsigned int min_max_freq;
 extern unsigned int max_possible_efficiency;
 extern unsigned int min_possible_efficiency;
-extern unsigned int max_capacity;
-extern unsigned int min_capacity;
 extern unsigned int max_possible_capacity;
 extern unsigned int min_max_possible_capacity;
 extern unsigned int max_power_cost;
@@ -2793,21 +2699,11 @@ static inline int asym_cap_siblings(int cpu1, int cpu2)
 		cpumask_test_cpu(cpu2, &asym_cap_sibling_cpus));
 }
 
-static inline int cpu_capacity(int cpu)
-{
-	return cpu_rq(cpu)->cluster->capacity;
-}
-
 static inline int cpu_max_possible_capacity(int cpu)
 {
 	return cpu_rq(cpu)->cluster->max_possible_capacity;
 }
 
-static inline int cpu_load_scale_factor(int cpu)
-{
-	return cpu_rq(cpu)->cluster->load_scale_factor;
-}
-
 static inline unsigned int cluster_max_freq(struct sched_cluster *cluster)
 {
 	/*
@@ -2828,65 +2724,6 @@ static inline unsigned int cpu_max_possible_freq(int cpu)
 	return cpu_rq(cpu)->cluster->max_possible_freq;
 }
 
-/* Keep track of max/min capacity possible across CPUs "currently" */
-static inline void __update_min_max_capacity(void)
-{
-	int i;
-	int max_cap = 0, min_cap = INT_MAX;
-
-	for_each_possible_cpu(i) {
-		if (!cpu_active(i))
-			continue;
-
-		max_cap = max(max_cap, cpu_capacity(i));
-		min_cap = min(min_cap, cpu_capacity(i));
-	}
-
-	max_capacity = max_cap;
-	min_capacity = min_cap;
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
- * that "most" efficient cpu gets a load_scale_factor of 1
- */
-static inline unsigned long
-load_scale_cpu_efficiency(struct sched_cluster *cluster)
-{
-	return DIV_ROUND_UP(1024 * max_possible_efficiency,
-			    cluster->efficiency);
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to cpu with best max_freq
- * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
- * of 1.
- */
-static inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster)
-{
-	return DIV_ROUND_UP(1024 * max_possible_freq,
-			   cluster_max_freq(cluster));
-}
-
-static inline int compute_load_scale_factor(struct sched_cluster *cluster)
-{
-	int load_scale = 1024;
-
-	/*
-	 * load_scale_factor accounts for the fact that task load
-	 * is in reference to "best" performing cpu. Task's load will need to be
-	 * scaled (up) by a factor to determine suitability to be placed on a
-	 * (little) cpu.
-	 */
-	load_scale *= load_scale_cpu_efficiency(cluster);
-	load_scale >>= 10;
-
-	load_scale *= load_scale_cpu_freq(cluster);
-	load_scale >>= 10;
-
-	return load_scale;
-}
-
 static inline bool hmp_capable(void)
 {
 	return max_possible_capacity != min_max_possible_capacity;
@@ -2902,55 +2739,6 @@ static inline bool is_min_capacity_cpu(int cpu)
 	return cpu_max_possible_capacity(cpu) == min_max_possible_capacity;
 }
 
-/*
- * 'load' is in reference to "best cpu" at its best frequency.
- * Scale that in reference to a given cpu, accounting for how bad it is
- * in reference to "best cpu".
- */
-static inline u64 scale_load_to_cpu(u64 task_load, int cpu)
-{
-	u64 lsf = cpu_load_scale_factor(cpu);
-
-	if (lsf != 1024) {
-		task_load *= lsf;
-		task_load /= 1024;
-	}
-
-	return task_load;
-}
-
-/*
- * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
- * least efficient cpu gets capacity of 1024
- */
-static unsigned long
-capacity_scale_cpu_efficiency(struct sched_cluster *cluster)
-{
-	return (1024 * cluster->efficiency) / min_possible_efficiency;
-}
-
-/*
- * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
- * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
- */
-static unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster)
-{
-	return (1024 * cluster_max_freq(cluster)) / min_max_freq;
-}
-
-static inline int compute_capacity(struct sched_cluster *cluster)
-{
-	int capacity = 1024;
-
-	capacity *= capacity_scale_cpu_efficiency(cluster);
-	capacity >>= 10;
-
-	capacity *= capacity_scale_cpu_freq(cluster);
-	capacity >>= 10;
-
-	return capacity;
-}
-
 static inline unsigned int task_load(struct task_struct *p)
 {
 	return p->ravg.demand;
@@ -2961,12 +2749,6 @@ static inline unsigned int task_pl(struct task_struct *p)
 	return p->ravg.pred_demand;
 }
 
-#define pct_to_real(tunable)	\
-		(div64_u64((u64)tunable * (u64)max_task_load(), 100))
-
-#define real_to_pct(tunable)	\
-		(div64_u64((u64)tunable * (u64)100, (u64)max_task_load()))
-
 static inline bool task_in_related_thread_group(struct task_struct *p)
 {
 	return !!(rcu_access_pointer(p->grp) != NULL);
@@ -2995,6 +2777,7 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
 #define	CPU_RESERVED	1
 
 extern enum sched_boost_policy boost_policy;
+extern unsigned int sched_task_filter_util;
 static inline enum sched_boost_policy sched_boost_policy(void)
 {
 	return boost_policy;
@@ -3087,8 +2870,6 @@ static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta)
 		rq->cum_window_demand_scaled = 0;
 }
 
-extern void update_cpu_cluster_capacity(const cpumask_t *cpus);
-
 extern unsigned long thermal_cap(int cpu);
 
 extern void clear_walt_request(int cpu);
@@ -3124,16 +2905,13 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
 		 * under conservative boost.
 		 */
 		if (sched_boost() == CONSERVATIVE_BOOST &&
-				task_util(p) <=
-				sysctl_sched_min_task_util_for_boost)
+				task_util(p) <= sched_task_filter_util)
 			policy = SCHED_BOOST_NONE;
 	}
 
 	return policy;
 }
 
-extern void walt_update_min_max_capacity(void);
-
 static inline bool is_min_capacity_cluster(struct sched_cluster *cluster)
 {
 	return is_min_capacity_cpu(cluster_first_cpu(cluster));
@@ -3200,18 +2978,6 @@ static inline struct sched_cluster *rq_cluster(struct rq *rq)
 
 static inline int asym_cap_siblings(int cpu1, int cpu2) { return 0; }
 
-static inline u64 scale_load_to_cpu(u64 load, int cpu)
-{
-	return load;
-}
-
-#ifdef CONFIG_SMP
-static inline int cpu_capacity(int cpu)
-{
-	return SCHED_CAPACITY_SCALE;
-}
-#endif
-
 static inline void set_preferred_cluster(struct related_thread_group *grp) { }
 
 static inline bool task_in_related_thread_group(struct task_struct *p)
@@ -3247,8 +3013,6 @@ static inline int alloc_related_thread_groups(void) { return 0; }
 static inline void walt_fixup_cum_window_demand(struct rq *rq,
 						s64 scaled_delta) { }
 
-static inline void update_cpu_cluster_capacity(const cpumask_t *cpus) { }
-
 #ifdef CONFIG_SMP
 static inline unsigned long thermal_cap(int cpu)
 {
@@ -3285,7 +3049,6 @@ static inline unsigned int power_cost(int cpu, u64 demand)
 #endif
 
 static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
-static inline void walt_update_min_max_capacity(void) { }
 #endif	/* CONFIG_SCHED_WALT */
 
 struct sched_avg_stats {
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 5fb03f9..39ea275 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -22,7 +22,6 @@ static DEFINE_PER_CPU(u64, nr_big_prod_sum);
 static DEFINE_PER_CPU(u64, nr);
 static DEFINE_PER_CPU(u64, nr_max);
 
-static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
 static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
 static s64 last_get_time;
 
@@ -87,7 +86,6 @@ void sched_get_nr_running_avg(struct sched_avg_stats *stats)
 		per_cpu(last_time, cpu) = curr_time;
 		per_cpu(nr_prod_sum, cpu) = 0;
 		per_cpu(nr_big_prod_sum, cpu) = 0;
-		per_cpu(iowait_prod_sum, cpu) = 0;
 		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
 
 		spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
@@ -151,7 +149,6 @@ void sched_update_nr_prod(int cpu, long delta, bool inc)
 
 	per_cpu(nr_prod_sum, cpu) += nr_running * diff;
 	per_cpu(nr_big_prod_sum, cpu) += walt_big_tasks(cpu) * diff;
-	per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
 	spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
 }
 EXPORT_SYMBOL(sched_update_nr_prod);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 7fcb57a..df60ad6 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -608,6 +608,22 @@ static void update_top_cache_domain(int cpu)
 	rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
 
 	sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY);
+	/*
+	 * EAS gets disabled when there are no asymmetric capacity
+	 * CPUs in the system. For example, all big CPUs are
+	 * hotplugged out on a b.L system. We want EAS enabled
+	 * all the time to get both power and perf benefits. So,
+	 * lets assign sd_asym_cpucapacity to the only available
+	 * sched domain. This is also important for a single cluster
+	 * systems which wants to use EAS.
+	 *
+	 * Setting sd_asym_cpucapacity() to a sched domain which
+	 * has all symmetric capacity CPUs is technically incorrect but
+	 * works well for us in getting EAS enabled all the time.
+	 */
+	if (!sd)
+		sd = cpu_rq(cpu)->sd;
+
 	rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
 }
 
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index b731208..7e24811 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -162,7 +162,7 @@ root_schedtune = {
  *    implementation especially for the computation of the per-CPU boost
  *    value
  */
-#define BOOSTGROUPS_COUNT 5
+#define BOOSTGROUPS_COUNT 6
 
 /* Array of configured boostgroups */
 static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index d7967a9..c329d38 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -7,7 +7,6 @@
 #include <linux/cpufreq.h>
 #include <linux/list_sort.h>
 #include <linux/jiffies.h>
-#include <linux/sched/core_ctl.h>
 #include <linux/sched/stat.h>
 #include <trace/events/sched.h>
 #include "sched.h"
@@ -33,7 +32,7 @@ static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
 static bool use_cycle_counter;
 DEFINE_MUTEX(cluster_lock);
 static atomic64_t walt_irq_work_lastq_ws;
-u64 walt_load_reported_window;
+static u64 walt_load_reported_window;
 
 static struct irq_work walt_cpufreq_irq_work;
 static struct irq_work walt_migration_irq_work;
@@ -129,7 +128,7 @@ __read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
  * A after-boot constant divisor for cpu_util_freq_walt() to apply the load
  * boost.
  */
-__read_mostly unsigned int walt_cpu_util_freq_divisor;
+static __read_mostly unsigned int walt_cpu_util_freq_divisor;
 
 /* Initial task load. Newly created tasks are assigned this load. */
 unsigned int __read_mostly sched_init_task_load_windows;
@@ -149,9 +148,6 @@ unsigned int max_possible_freq = 1;
  * capacity (cpu_power) of cpus.
  */
 unsigned int min_max_freq = 1;
-
-unsigned int max_capacity = 1024; /* max(rq->capacity) */
-unsigned int min_capacity = 1024; /* min(rq->capacity) */
 unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */
 unsigned int
 min_max_possible_capacity = 1024; /* min(rq->max_possible_capacity) */
@@ -479,12 +475,13 @@ static u32  top_task_load(struct rq *rq)
 	}
 }
 
-u64 freq_policy_load(struct rq *rq)
+static inline u64 freq_policy_load(struct rq *rq)
 {
 	unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
 	struct sched_cluster *cluster = rq->cluster;
 	u64 aggr_grp_load = cluster->aggr_grp_load;
 	u64 load, tt_load = 0;
+	struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu_of(rq));
 
 	if (rq->ed_task != NULL) {
 		load = sched_ravg_window;
@@ -496,6 +493,9 @@ u64 freq_policy_load(struct rq *rq)
 	else
 		load = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
 
+	if (cpu_ksoftirqd && cpu_ksoftirqd->state == TASK_RUNNING)
+		load = max_t(u64, load, task_load(cpu_ksoftirqd));
+
 	tt_load = top_task_load(rq);
 	switch (reporting_policy) {
 	case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
@@ -516,6 +516,80 @@ u64 freq_policy_load(struct rq *rq)
 	return load;
 }
 
+static bool rtgb_active;
+
+static inline unsigned long
+__cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
+{
+	u64 util, util_unboosted;
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long capacity = capacity_orig_of(cpu);
+	int boost;
+
+	boost = per_cpu(sched_load_boost, cpu);
+	util_unboosted = util = freq_policy_load(rq);
+	util = div64_u64(util * (100 + boost),
+			walt_cpu_util_freq_divisor);
+
+	if (walt_load) {
+		u64 nl = cpu_rq(cpu)->nt_prev_runnable_sum +
+				rq->grp_time.nt_prev_runnable_sum;
+		u64 pl = rq->walt_stats.pred_demands_sum_scaled;
+
+		/* do_pl_notif() needs unboosted signals */
+		rq->old_busy_time = div64_u64(util_unboosted,
+						sched_ravg_window >>
+						SCHED_CAPACITY_SHIFT);
+		rq->old_estimated_time = pl;
+
+		nl = div64_u64(nl * (100 + boost), walt_cpu_util_freq_divisor);
+		pl = div64_u64(pl * (100 + boost), 100);
+
+		walt_load->prev_window_util = util;
+		walt_load->nl = nl;
+		walt_load->pl = pl;
+		walt_load->ws = walt_load_reported_window;
+		walt_load->rtgb_active = rtgb_active;
+	}
+
+	return (util >= capacity) ? capacity : util;
+}
+
+#define ADJUSTED_ASYM_CAP_CPU_UTIL(orig, other, x)	\
+			(max(orig, mult_frac(other, x, 100)))
+
+unsigned long
+cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
+{
+	struct sched_walt_cpu_load wl_other = {0};
+	unsigned long util = 0, util_other = 0;
+	unsigned long capacity = capacity_orig_of(cpu);
+	int i, mpct = sysctl_sched_asym_cap_sibling_freq_match_pct;
+
+	if (!cpumask_test_cpu(cpu, &asym_cap_sibling_cpus))
+		return __cpu_util_freq_walt(cpu, walt_load);
+
+	for_each_cpu(i, &asym_cap_sibling_cpus) {
+		if (i == cpu)
+			util = __cpu_util_freq_walt(cpu, walt_load);
+		else
+			util_other = __cpu_util_freq_walt(i, &wl_other);
+	}
+
+	if (cpu == cpumask_last(&asym_cap_sibling_cpus))
+		mpct = 100;
+
+	util = ADJUSTED_ASYM_CAP_CPU_UTIL(util, util_other, mpct);
+	walt_load->prev_window_util = util;
+
+	walt_load->nl = ADJUSTED_ASYM_CAP_CPU_UTIL(walt_load->nl, wl_other.nl,
+						   mpct);
+	walt_load->pl = ADJUSTED_ASYM_CAP_CPU_UTIL(walt_load->pl, wl_other.pl,
+						   mpct);
+
+	return (util >= capacity) ? capacity : util;
+}
+
 /*
  * In this function we match the accumulated subtractions with the current
  * and previous windows we are operating with. Ignore any entries where
@@ -2074,8 +2148,11 @@ void mark_task_starting(struct task_struct *p)
 }
 
 #define pct_to_min_scaled(tunable) \
-		div64_u64(((u64)sched_ravg_window * tunable) << 10, \
-			   (u64)sched_cluster[0]->load_scale_factor * 100)
+		div64_u64(((u64)sched_ravg_window * tunable *	\
+			  cluster_max_freq(sched_cluster[0]) *	\
+			  sched_cluster[0]->efficiency),	\
+			  ((u64)max_possible_freq *		\
+			  max_possible_efficiency * 100))
 
 static inline void walt_update_group_thresholds(void)
 {
@@ -2085,6 +2162,19 @@ static inline void walt_update_group_thresholds(void)
 			pct_to_min_scaled(sysctl_sched_group_downmigrate_pct);
 }
 
+static void walt_cpus_capacity_changed(const cpumask_t *cpus)
+{
+	unsigned long flags;
+
+	acquire_rq_locks_irqsave(cpu_possible_mask, &flags);
+
+	if (cpumask_intersects(cpus, &sched_cluster[0]->cpus))
+		walt_update_group_thresholds();
+
+	release_rq_locks_irqrestore(cpu_possible_mask, &flags);
+}
+
+
 static cpumask_t all_cluster_cpus = CPU_MASK_NONE;
 DECLARE_BITMAP(all_cluster_ids, NR_CPUS);
 struct sched_cluster *sched_cluster[NR_CPUS];
@@ -2121,10 +2211,8 @@ static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
 	INIT_LIST_HEAD(&cluster->list);
 	cluster->max_power_cost		=	1;
 	cluster->min_power_cost		=	1;
-	cluster->capacity		=	1024;
 	cluster->max_possible_capacity	=	1024;
 	cluster->efficiency		=	1;
-	cluster->load_scale_factor	=	1024;
 	cluster->cur_freq		=	1;
 	cluster->max_freq		=	1;
 	cluster->max_mitigated_freq	=	UINT_MAX;
@@ -2164,7 +2252,7 @@ static int compute_max_possible_capacity(struct sched_cluster *cluster)
 {
 	int capacity = 1024;
 
-	capacity *= capacity_scale_cpu_efficiency(cluster);
+	capacity *= (1024 * cluster->efficiency) / min_possible_efficiency;
 	capacity >>= 10;
 
 	capacity *= (1024 * cluster->max_possible_freq) / min_max_freq;
@@ -2173,15 +2261,6 @@ static int compute_max_possible_capacity(struct sched_cluster *cluster)
 	return capacity;
 }
 
-void walt_update_min_max_capacity(void)
-{
-	unsigned long flags;
-
-	acquire_rq_locks_irqsave(cpu_possible_mask, &flags);
-	__update_min_max_capacity();
-	release_rq_locks_irqrestore(cpu_possible_mask, &flags);
-}
-
 unsigned int max_power_cost = 1;
 
 static int
@@ -2249,10 +2328,8 @@ static void update_all_clusters_stats(void)
 	for_each_sched_cluster(cluster) {
 		u64 mpc;
 
-		cluster->capacity = compute_capacity(cluster);
 		mpc = cluster->max_possible_capacity =
 			compute_max_possible_capacity(cluster);
-		cluster->load_scale_factor = compute_load_scale_factor(cluster);
 
 		cluster->exec_scale_factor =
 			DIV_ROUND_UP(cluster->efficiency * 1024,
@@ -2269,7 +2346,6 @@ static void update_all_clusters_stats(void)
 	min_max_possible_capacity = lowest_mpc;
 	walt_update_group_thresholds();
 
-	__update_min_max_capacity();
 	release_rq_locks_irqrestore(cpu_possible_mask, &flags);
 }
 
@@ -2314,10 +2390,8 @@ struct sched_cluster init_cluster = {
 	.id			=	0,
 	.max_power_cost		=	1,
 	.min_power_cost		=	1,
-	.capacity		=	1024,
 	.max_possible_capacity	=	1024,
 	.efficiency		=	1,
-	.load_scale_factor	=	1024,
 	.cur_freq		=	1,
 	.max_freq		=	1,
 	.max_mitigated_freq	=	UINT_MAX,
@@ -2349,8 +2423,6 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
 	if (val != CPUFREQ_NOTIFY)
 		return 0;
 
-	walt_update_min_max_capacity();
-
 	max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
 	if (min_max_freq == 1)
 		min_max_freq = UINT_MAX;
@@ -2391,7 +2463,7 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
 	}
 
 	if (update_capacity)
-		update_cpu_cluster_capacity(policy->related_cpus);
+		walt_cpus_capacity_changed(policy->related_cpus);
 
 	return 0;
 }
@@ -2513,18 +2585,23 @@ void update_best_cluster(struct related_thread_group *grp,
 				   u64 demand, bool boost)
 {
 	if (boost) {
-		grp->preferred_cluster = sched_cluster[1];
+		/*
+		 * since we are in boost, we can keep grp on min, the boosts
+		 * will ensure tasks get to bigs
+		 */
+		grp->skip_min = false;
 		return;
 	}
 
-	if (grp->preferred_cluster == sched_cluster[0]) {
-		if (demand >= sched_group_upmigrate)
-			grp->preferred_cluster = sched_cluster[1];
+	if (!grp->skip_min) {
+		if (demand >= sched_group_upmigrate) {
+			grp->skip_min = true;
+		}
 		return;
 	}
 	if (demand < sched_group_downmigrate) {
 		if (!sysctl_sched_coloc_downmigrate_ns) {
-			grp->preferred_cluster = sched_cluster[0];
+			grp->skip_min = false;
 			return;
 		}
 		if (!grp->downmigrate_ts) {
@@ -2533,8 +2610,8 @@ void update_best_cluster(struct related_thread_group *grp,
 		}
 		if (grp->last_update - grp->downmigrate_ts >
 				sysctl_sched_coloc_downmigrate_ns) {
-			grp->preferred_cluster = sched_cluster[0];
 			grp->downmigrate_ts = 0;
+			grp->skip_min = false;
 		}
 	} else if (grp->downmigrate_ts)
 		grp->downmigrate_ts = 0;
@@ -2549,7 +2626,7 @@ int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
 
 	grp = task_related_thread_group(p);
 	if (grp)
-		rc = ((grp->preferred_cluster == cluster) ||
+		rc = (sched_cluster[(int)grp->skip_min] == cluster ||
 		      cpumask_subset(&cluster->cpus, &asym_cap_sibling_cpus));
 
 	rcu_read_unlock();
@@ -2567,7 +2644,7 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
 		return;
 
 	if (!hmp_capable()) {
-		grp->preferred_cluster = sched_cluster[0];
+		grp->skip_min = false;
 		return;
 	}
 
@@ -2593,6 +2670,10 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
 			continue;
 
 		combined_demand += p->ravg.coloc_demand;
+		if (!trace_sched_set_preferred_cluster_enabled()) {
+			if (combined_demand > sched_group_upmigrate)
+				break;
+		}
 	}
 
 	grp->last_update = wallclock;
@@ -2868,31 +2949,6 @@ int sync_cgroup_colocation(struct task_struct *p, bool insert)
 }
 #endif
 
-void update_cpu_cluster_capacity(const cpumask_t *cpus)
-{
-	int i;
-	struct sched_cluster *cluster;
-	struct cpumask cpumask;
-	unsigned long flags;
-
-	cpumask_copy(&cpumask, cpus);
-	acquire_rq_locks_irqsave(cpu_possible_mask, &flags);
-
-	for_each_cpu(i, &cpumask) {
-		cluster = cpu_rq(i)->cluster;
-		cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
-
-		cluster->capacity = compute_capacity(cluster);
-		cluster->load_scale_factor = compute_load_scale_factor(cluster);
-	}
-
-	__update_min_max_capacity();
-	if (cpumask_intersects(cpus, &sched_cluster[0]->cpus))
-		walt_update_group_thresholds();
-
-	release_rq_locks_irqrestore(cpu_possible_mask, &flags);
-}
-
 static unsigned long max_cap[NR_CPUS];
 static unsigned long thermal_cap_cpu[NR_CPUS];
 
@@ -2958,7 +3014,7 @@ void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax)
 	spin_unlock_irqrestore(&cpu_freq_min_max_lock, flags);
 
 	if (update_capacity)
-		update_cpu_cluster_capacity(cpus);
+		walt_cpus_capacity_changed(cpus);
 }
 
 void note_task_waking(struct task_struct *p, u64 wallclock)
@@ -3076,21 +3132,12 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
 	BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
 }
 
-bool rtgb_active;
-
 static bool is_rtgb_active(void)
 {
 	struct related_thread_group *grp;
 
-	if (sched_boost() == CONSERVATIVE_BOOST)
-		return false;
-
 	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
-	if (!grp || !grp->preferred_cluster ||
-			is_min_capacity_cluster(grp->preferred_cluster))
-		return false;
-
-	return true;
+	return grp && grp->skip_min;
 }
 
 /*
@@ -3214,20 +3261,22 @@ void walt_rotation_checkpoint(int nr_big)
 	walt_rotation_enabled = nr_big >= num_possible_cpus();
 }
 
-unsigned int walt_get_default_coloc_group_load(void)
+void walt_fill_ta_data(struct core_ctl_notif_data *data)
 {
 	struct related_thread_group *grp;
 	unsigned long flags;
 	u64 total_demand = 0, wallclock;
 	struct task_struct *p;
 	int min_cap_cpu, scale = 1024;
+	struct sched_cluster *cluster;
+	int i = 0;
 
 	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
 
 	raw_spin_lock_irqsave(&grp->lock, flags);
 	if (list_empty(&grp->tasks)) {
 		raw_spin_unlock_irqrestore(&grp->lock, flags);
-		return 0;
+		goto fill_util;
 	}
 
 	wallclock = sched_ktime_clock();
@@ -3253,8 +3302,24 @@ unsigned int walt_get_default_coloc_group_load(void)
 	if (min_cap_cpu != -1)
 		scale = arch_scale_cpu_capacity(NULL, min_cap_cpu);
 
-	return div64_u64(total_demand * 1024 * 100,
-			(u64)sched_ravg_window * scale);
+	data->coloc_load_pct = div64_u64(total_demand * 1024 * 100,
+			       (u64)sched_ravg_window * scale);
+
+fill_util:
+	for_each_sched_cluster(cluster) {
+		int fcpu = cluster_first_cpu(cluster);
+
+		if (i == MAX_CLUSTERS)
+			break;
+
+		scale = arch_scale_cpu_capacity(NULL, fcpu);
+		data->ta_util_pct[i] = div64_u64(cluster->aggr_grp_load * 1024 *
+				       100, (u64)sched_ravg_window * scale);
+
+		scale = arch_scale_freq_capacity(fcpu);
+		data->cur_cap_pct[i] = (scale * 100)/1024;
+		i++;
+	}
 }
 
 int walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index e98b7a3..152a9df 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -9,6 +9,7 @@
 #ifdef CONFIG_SCHED_WALT
 
 #include <linux/sched/sysctl.h>
+#include <linux/sched/core_ctl.h>
 
 #define MAX_NR_CLUSTERS			3
 
@@ -297,7 +298,7 @@ static inline void walt_update_last_enqueue(struct task_struct *p)
 extern void walt_rotate_work_init(void);
 extern void walt_rotation_checkpoint(int nr_big);
 extern unsigned int walt_rotation_enabled;
-extern unsigned int walt_get_default_coloc_group_load(void);
+extern void walt_fill_ta_data(struct core_ctl_notif_data *data);
 
 extern __read_mostly bool sched_freq_aggr_en;
 static inline void walt_enable_frequency_aggregation(bool enable)
@@ -312,10 +313,6 @@ static inline void walt_sched_init_rq(struct rq *rq) { }
 static inline void walt_rotate_work_init(void) { }
 static inline void walt_rotation_checkpoint(int nr_big) { }
 static inline void walt_update_last_enqueue(struct task_struct *p) { }
-static inline unsigned int walt_get_default_coloc_group_load(void)
-{
-	return 0;
-}
 
 static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
 				int event, u64 wallclock, u64 irqtime) { }
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 9fa6c53..865152d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -128,6 +128,7 @@ static int __maybe_unused one = 1;
 static int __maybe_unused two = 2;
 static int __maybe_unused three = 3;
 static int __maybe_unused four = 4;
+static unsigned long zero_ul;
 static unsigned long one_ul = 1;
 static unsigned long long_max = LONG_MAX;
 static int one_hundred = 100;
@@ -1641,6 +1642,14 @@ static struct ctl_table vm_table[] = {
 		.extra1		= &zero,
 	},
 	{
+		.procname	= "watermark_boost_factor",
+		.data		= &watermark_boost_factor,
+		.maxlen		= sizeof(watermark_boost_factor),
+		.mode		= 0644,
+		.proc_handler	= watermark_boost_factor_sysctl_handler,
+		.extra1		= &zero,
+	},
+	{
 		.procname	= "watermark_scale_factor",
 		.data		= &watermark_scale_factor,
 		.maxlen		= sizeof(watermark_scale_factor),
@@ -1909,7 +1918,7 @@ static struct ctl_table fs_table[] = {
 		.maxlen		= sizeof(files_stat.max_files),
 		.mode		= 0644,
 		.proc_handler	= proc_doulongvec_minmax,
-		.extra1		= &zero,
+		.extra1		= &zero_ul,
 		.extra2		= &long_max,
 	},
 	{
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 4e62a4a..a578d8f51 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -23,7 +23,10 @@
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/vmalloc.h>
 #include <linux/cgroupstats.h>
+#include <linux/sysstats.h>
 #include <linux/cgroup.h>
 #include <linux/fs.h>
 #include <linux/file.h>
@@ -31,6 +34,7 @@
 #include <net/genetlink.h>
 #include <linux/atomic.h>
 #include <linux/sched/cputime.h>
+#include <linux/oom.h>
 
 /*
  * Maximum length of a cpumask that can be specified in
@@ -48,7 +52,8 @@ static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1
 	[TASKSTATS_CMD_ATTR_PID]  = { .type = NLA_U32 },
 	[TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
 	[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
-	[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
+	[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },
+	[TASKSTATS_CMD_ATTR_FOREACH] = { .type = NLA_U32 },};
 
 /*
  * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
@@ -58,6 +63,11 @@ static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX
 	[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
 };
 
+static const struct nla_policy
+		sysstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
+	[SYSSTATS_CMD_ATTR_SYSMEM_STATS] = { .type = NLA_U32 },
+};
+
 struct listener {
 	struct list_head list;
 	pid_t pid;
@@ -70,6 +80,11 @@ struct listener_list {
 };
 static DEFINE_PER_CPU(struct listener_list, listener_array);
 
+struct tgid_iter {
+	unsigned int tgid;
+	struct task_struct *task;
+};
+
 enum actions {
 	REGISTER,
 	DEREGISTER,
@@ -396,6 +411,142 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
 	return NULL;
 }
 
+#define K(x) ((x) << (PAGE_SHIFT - 10))
+#ifndef CONFIG_NUMA
+static void sysstats_fill_zoneinfo(struct sys_memstats *stats)
+{
+	pg_data_t *pgdat;
+	struct zone *zone;
+	struct zone *node_zones;
+	unsigned long zspages = 0;
+
+	pgdat = NODE_DATA(0);
+	node_zones = pgdat->node_zones;
+
+	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
+		if (!populated_zone(zone))
+			continue;
+
+		zspages += zone_page_state(zone, NR_ZSPAGES);
+		if (!strcmp(zone->name, "DMA")) {
+			stats->dma_nr_free_pages =
+				K(zone_page_state(zone, NR_FREE_PAGES));
+			stats->dma_nr_active_anon =
+				K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON));
+			stats->dma_nr_inactive_anon =
+				K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON));
+			stats->dma_nr_active_file =
+				K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE));
+			stats->dma_nr_inactive_file =
+				K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE));
+		} else if (!strcmp(zone->name, "Normal")) {
+			stats->normal_nr_free_pages =
+				K(zone_page_state(zone, NR_FREE_PAGES));
+			stats->normal_nr_active_anon =
+				K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON));
+			stats->normal_nr_inactive_anon =
+				K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON));
+			stats->normal_nr_active_file =
+				K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE));
+			stats->normal_nr_inactive_file =
+				K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE));
+		} else if (!strcmp(zone->name, "HighMem")) {
+			stats->highmem_nr_free_pages =
+				K(zone_page_state(zone, NR_FREE_PAGES));
+			stats->highmem_nr_active_anon =
+				K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON));
+			stats->highmem_nr_inactive_anon =
+				K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON));
+			stats->highmem_nr_active_file =
+				K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE));
+			stats->highmem_nr_inactive_file =
+				K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE));
+		} else if (!strcmp(zone->name, "Movable")) {
+			stats->movable_nr_free_pages =
+				K(zone_page_state(zone, NR_FREE_PAGES));
+			stats->movable_nr_active_anon =
+				K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON));
+			stats->movable_nr_inactive_anon =
+				K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON));
+			stats->movable_nr_active_file =
+				K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE));
+			stats->movable_nr_inactive_file =
+				K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE));
+		}
+	}
+	stats->zram_compressed = K(zspages);
+}
+#elif
+static void sysstats_fill_zoneinfo(struct sys_memstats *stats)
+{
+}
+#endif
+
+static void sysstats_build(struct sys_memstats *stats)
+{
+	struct sysinfo i;
+
+	si_meminfo(&i);
+	si_swapinfo(&i);
+
+	stats->version = SYSSTATS_VERSION;
+	stats->memtotal = K(i.totalram);
+	stats->reclaimable =
+		global_node_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >> 10;
+	stats->swap_used = K(i.totalswap - i.freeswap);
+	stats->swap_total = K(i.totalswap);
+	stats->vmalloc_total = K(vmalloc_nr_pages());
+	stats->unreclaimable =
+		K(global_node_page_state(NR_UNRECLAIMABLE_PAGES));
+	stats->buffer = K(i.bufferram);
+	stats->swapcache = K(total_swapcache_pages());
+	stats->slab_reclaimable =
+		K(global_node_page_state(NR_SLAB_RECLAIMABLE));
+	stats->slab_unreclaimable =
+		K(global_node_page_state(NR_SLAB_UNRECLAIMABLE));
+	stats->free_cma = K(global_zone_page_state(NR_FREE_CMA_PAGES));
+	stats->file_mapped = K(global_node_page_state(NR_FILE_MAPPED));
+	stats->kernelstack = global_zone_page_state(NR_KERNEL_STACK_KB);
+	stats->pagetable = K(global_zone_page_state(NR_PAGETABLE));
+	stats->shmem = K(i.sharedram);
+	sysstats_fill_zoneinfo(stats);
+}
+#undef K
+
+static int sysstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+	int rc = 0;
+	struct sk_buff *rep_skb;
+	struct sys_memstats *stats;
+	struct nlattr *na;
+	size_t size;
+
+	size = nla_total_size(sizeof(struct sys_memstats));
+
+	rc = prepare_reply(info, SYSSTATS_CMD_NEW, &rep_skb,
+				size);
+	if (rc < 0)
+		goto err;
+
+	na = nla_reserve(rep_skb, SYSSTATS_TYPE_SYSMEM_STATS,
+				sizeof(struct sys_memstats));
+	if (na == NULL) {
+		nlmsg_free(rep_skb);
+		rc = -EMSGSIZE;
+		goto err;
+	}
+
+	stats = nla_data(na);
+	memset(stats, 0, sizeof(*stats));
+
+	sysstats_build(stats);
+
+	rc = send_reply(rep_skb, info);
+
+err:
+	return rc;
+}
+
 static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
 {
 	int rc = 0;
@@ -489,6 +640,65 @@ static size_t taskstats_packet_size(void)
 	return size;
 }
 
+static int taskstats2_cmd_attr_pid(struct genl_info *info)
+{
+	struct taskstats2 *stats;
+	struct sk_buff *rep_skb;
+	struct nlattr *ret;
+	struct task_struct *tsk;
+	struct task_struct *p;
+	size_t size;
+	u32 pid;
+	int rc;
+
+	size = nla_total_size_64bit(sizeof(struct taskstats2));
+
+	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
+	if (rc < 0)
+		return rc;
+
+	rc = -EINVAL;
+	pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
+
+	ret = nla_reserve_64bit(rep_skb, TASKSTATS_TYPE_STATS,
+				sizeof(struct taskstats2), TASKSTATS_TYPE_NULL);
+	if (!ret)
+		goto err;
+
+	stats = nla_data(ret);
+
+	rcu_read_lock();
+	tsk = find_task_by_vpid(pid);
+	if (tsk)
+		get_task_struct(tsk);
+	rcu_read_unlock();
+	if (!tsk) {
+		rc = -ESRCH;
+		goto err;
+	}
+	memset(stats, 0, sizeof(*stats));
+	stats->version = TASKSTATS2_VERSION;
+	stats->pid = task_pid_nr_ns(tsk, task_active_pid_ns(current));
+	p = find_lock_task_mm(tsk);
+	if (p) {
+#define K(x) ((x) << (PAGE_SHIFT - 10))
+		stats->anon_rss = K(get_mm_counter(p->mm, MM_ANONPAGES));
+		stats->file_rss = K(get_mm_counter(p->mm, MM_FILEPAGES));
+		stats->shmem_rss = K(get_mm_counter(p->mm, MM_SHMEMPAGES));
+		stats->swap_rss = K(get_mm_counter(p->mm, MM_SWAPENTS));
+		stats->unreclaimable =
+				K(get_mm_counter(p->mm, MM_UNRECLAIMABLE));
+#undef K
+		task_unlock(p);
+	}
+	put_task_struct(tsk);
+
+	return send_reply(rep_skb, info);
+err:
+	nlmsg_free(rep_skb);
+	return rc;
+}
+
 static int cmd_attr_pid(struct genl_info *info)
 {
 	struct taskstats *stats;
@@ -547,6 +757,114 @@ static int cmd_attr_tgid(struct genl_info *info)
 	return rc;
 }
 
+static struct tgid_iter next_tgid(struct pid_namespace *ns,
+					struct tgid_iter iter)
+{
+	struct pid *pid;
+
+	if (iter.task)
+		put_task_struct(iter.task);
+	rcu_read_lock();
+retry:
+	iter.task = NULL;
+	pid = find_ge_pid(iter.tgid, ns);
+	if (pid) {
+		iter.tgid = pid_nr_ns(pid, ns);
+		iter.task = pid_task(pid, PIDTYPE_PID);
+		if (!iter.task || !has_group_leader_pid(iter.task)) {
+			iter.tgid += 1;
+			goto retry;
+		}
+		get_task_struct(iter.task);
+	}
+	rcu_read_unlock();
+	return iter;
+}
+
+static int taskstats2_foreach(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct pid_namespace *ns = task_active_pid_ns(current);
+	struct tgid_iter iter;
+	void *reply;
+	struct nlattr *attr;
+	struct nlattr *nla;
+	struct taskstats2 *stats;
+	struct task_struct *p;
+	short oom_score;
+	short oom_score_min;
+	short oom_score_max;
+	u32 buf;
+
+	nla = nla_find(nlmsg_attrdata(cb->nlh, GENL_HDRLEN),
+			nlmsg_attrlen(cb->nlh, GENL_HDRLEN),
+			TASKSTATS_TYPE_FOREACH);
+	buf  = nla_get_u32(nla);
+	oom_score_min = (short) (buf & 0xFFFF);
+	oom_score_max = (short) ((buf >> 16) & 0xFFFF);
+
+	iter.tgid = cb->args[0];
+	iter.task = NULL;
+	for (iter = next_tgid(ns, iter); iter.task;
+			iter.tgid += 1, iter = next_tgid(ns, iter)) {
+
+		if (iter.task->flags & PF_KTHREAD)
+			continue;
+
+		oom_score = iter.task->signal->oom_score_adj;
+		if ((oom_score < oom_score_min)
+			|| (oom_score > oom_score_max))
+			continue;
+
+		reply = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+			cb->nlh->nlmsg_seq, &family, 0, TASKSTATS2_CMD_GET);
+		if (reply == NULL) {
+			put_task_struct(iter.task);
+			break;
+		}
+		attr = nla_reserve(skb, TASKSTATS_TYPE_FOREACH,
+				sizeof(struct taskstats2));
+		if (!attr) {
+			put_task_struct(iter.task);
+			genlmsg_cancel(skb, reply);
+			break;
+		}
+		stats = nla_data(attr);
+		memset(stats, 0, sizeof(struct taskstats2));
+		stats->version = TASKSTATS2_VERSION;
+		rcu_read_lock();
+		stats->pid = task_pid_nr_ns(iter.task,
+						task_active_pid_ns(current));
+		stats->oom_score = iter.task->signal->oom_score_adj;
+		rcu_read_unlock();
+		p = find_lock_task_mm(iter.task);
+		if (p) {
+#define K(x) ((x) << (PAGE_SHIFT - 10))
+			stats->anon_rss =
+				K(get_mm_counter(p->mm, MM_ANONPAGES));
+			stats->file_rss =
+				K(get_mm_counter(p->mm, MM_FILEPAGES));
+			stats->shmem_rss =
+				K(get_mm_counter(p->mm, MM_SHMEMPAGES));
+			stats->swap_rss =
+				K(get_mm_counter(p->mm, MM_SWAPENTS));
+			task_unlock(p);
+#undef K
+		}
+		genlmsg_end(skb, reply);
+	}
+
+	cb->args[0] = iter.tgid;
+	return skb->len;
+}
+
+static int taskstats2_user_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+	if (info->attrs[TASKSTATS_CMD_ATTR_PID])
+		return taskstats2_cmd_attr_pid(info);
+	else
+		return -EINVAL;
+}
+
 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
 {
 	if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
@@ -654,10 +972,21 @@ static const struct genl_ops taskstats_ops[] = {
 		.flags		= GENL_ADMIN_PERM,
 	},
 	{
+		.cmd		= TASKSTATS2_CMD_GET,
+		.doit		= taskstats2_user_cmd,
+		.dumpit		= taskstats2_foreach,
+		.policy		= taskstats_cmd_get_policy,
+	},
+	{
 		.cmd		= CGROUPSTATS_CMD_GET,
 		.doit		= cgroupstats_user_cmd,
 		.policy		= cgroupstats_cmd_get_policy,
 	},
+	{
+		.cmd		= SYSSTATS_CMD_GET,
+		.doit		= sysstats_user_cmd,
+		.policy		= sysstats_cmd_get_policy,
+	},
 };
 
 static struct genl_family family __ro_after_init = {
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index de7ebe5..a02e0f6 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -18,7 +18,6 @@
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
-#include <linux/sched_clock.h>
 #include <linux/module.h>
 #include <trace/events/power.h>
 
@@ -516,8 +515,8 @@ void tick_unfreeze(void)
 
 	if (tick_freeze_depth == num_online_cpus()) {
 		timekeeping_resume();
-		system_state = SYSTEM_RUNNING;
 		sched_clock_resume();
+		system_state = SYSTEM_RUNNING;
 		trace_suspend_resume(TPS("timekeeping_freeze"),
 				     smp_processor_id(), false);
 	} else {
diff --git a/kernel/time/time.c b/kernel/time/time.c
index ccdb351..be057d6 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -172,7 +172,7 @@ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz
 	static int firsttime = 1;
 	int error = 0;
 
-	if (tv && !timespec64_valid(tv))
+	if (tv && !timespec64_valid_settod(tv))
 		return -EINVAL;
 
 	error = security_settime64(tv, tz);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 7846ce2..9a6bfcd 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1242,7 +1242,7 @@ int do_settimeofday64(const struct timespec64 *ts)
 	unsigned long flags;
 	int ret = 0;
 
-	if (!timespec64_valid_strict(ts))
+	if (!timespec64_valid_settod(ts))
 		return -EINVAL;
 
 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
@@ -1299,7 +1299,7 @@ static int timekeeping_inject_offset(const struct timespec64 *ts)
 	/* Make sure the proposed value is valid */
 	tmp = timespec64_add(tk_xtime(tk), *ts);
 	if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
-	    !timespec64_valid_strict(&tmp)) {
+	    !timespec64_valid_settod(&tmp)) {
 		ret = -EINVAL;
 		goto error;
 	}
@@ -1556,7 +1556,7 @@ void __init timekeeping_init(void)
 	unsigned long flags;
 
 	read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
-	if (timespec64_valid_strict(&wall_time) &&
+	if (timespec64_valid_settod(&wall_time) &&
 	    timespec64_to_ns(&wall_time) > 0) {
 		persistent_clock_exists = true;
 	} else if (timespec64_to_ns(&wall_time) != 0) {
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
index 7a9b4eb..141ab3a 100644
--- a/kernel/time/timekeeping.h
+++ b/kernel/time/timekeeping.h
@@ -14,6 +14,13 @@ extern u64 timekeeping_max_deferment(void);
 extern void timekeeping_warp_clock(void);
 extern int timekeeping_suspend(void);
 extern void timekeeping_resume(void);
+#ifdef CONFIG_GENERIC_SCHED_CLOCK
+extern int sched_clock_suspend(void);
+extern void sched_clock_resume(void);
+#else
+static inline int sched_clock_suspend(void) { return 0; }
+static inline void sched_clock_resume(void) { }
+#endif
 
 extern void do_timer(unsigned long ticks);
 extern void update_wall_time(void);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 5e6884e..a6bff0c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -34,6 +34,7 @@
 #include <linux/list.h>
 #include <linux/hash.h>
 #include <linux/rcupdate.h>
+#include <linux/kprobes.h>
 
 #include <trace/events/sched.h>
 
@@ -5500,9 +5501,9 @@ static int ftrace_cmp_ips(const void *a, const void *b)
 	return 0;
 }
 
-static int ftrace_process_locs(struct module *mod,
-			       unsigned long *start,
-			       unsigned long *end)
+static int __norecordmcount ftrace_process_locs(struct module *mod,
+						unsigned long *start,
+						unsigned long *end)
 {
 	struct ftrace_page *start_pg;
 	struct ftrace_page *pg;
@@ -6251,7 +6252,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
 	tr->ops->func = ftrace_stub;
 }
 
-static inline void
+static nokprobe_inline void
 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 		       struct ftrace_ops *ignored, struct pt_regs *regs)
 {
@@ -6311,12 +6312,14 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 {
 	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
 }
+NOKPROBE_SYMBOL(ftrace_ops_list_func);
 #else
 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip,
 			      struct ftrace_ops *op, struct pt_regs *regs)
 {
 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
 }
+NOKPROBE_SYMBOL(ftrace_ops_no_ops);
 #endif
 
 /*
@@ -6343,6 +6346,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
 	preempt_enable_notrace();
 	trace_clear_recursion(bit);
 }
+NOKPROBE_SYMBOL(ftrace_ops_assist_func);
 
 /**
  * ftrace_ops_get_func - get the function a trampoline should call
diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c
index cdd2eea..381ef97 100644
--- a/kernel/trace/ipc_logging.c
+++ b/kernel/trace/ipc_logging.c
@@ -895,6 +895,7 @@ void ipc_log_context_free(struct kref *kref)
 int ipc_log_context_destroy(void *ctxt)
 {
 	struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+	struct dfunc_info *df_info = NULL, *tmp = NULL;
 	unsigned long flags;
 
 	if (!ilctxt)
@@ -905,6 +906,10 @@ int ipc_log_context_destroy(void *ctxt)
 	spin_lock(&ilctxt->context_lock_lhb1);
 	ilctxt->destroyed = true;
 	complete_all(&ilctxt->read_avail);
+	list_for_each_entry_safe(df_info, tmp, &ilctxt->dfunc_info_list, list) {
+		list_del(&df_info->list);
+		kfree(df_info);
+	}
 	spin_unlock(&ilctxt->context_lock_lhb1);
 
 	write_lock_irqsave(&context_list_lock_lha1, flags);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 34b4c32..805aef8 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -730,7 +730,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 
 	preempt_disable_notrace();
 	time = rb_time_stamp(buffer);
-	preempt_enable_no_resched_notrace();
+	preempt_enable_notrace();
 
 	return time;
 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9b98348..22a621c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -497,8 +497,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
 	 * not modified.
 	 */
 	pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
-	if (!pid_list)
+	if (!pid_list) {
+		trace_parser_put(&parser);
 		return -ENOMEM;
+	}
 
 	pid_list->pid_max = READ_ONCE(pid_max);
 
@@ -508,6 +510,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
 
 	pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
 	if (!pid_list->pids) {
+		trace_parser_put(&parser);
 		kfree(pid_list);
 		return -ENOMEM;
 	}
@@ -6806,28 +6809,36 @@ struct buffer_ref {
 	struct ring_buffer	*buffer;
 	void			*page;
 	int			cpu;
-	int			ref;
+	refcount_t		refcount;
 };
 
+static void buffer_ref_release(struct buffer_ref *ref)
+{
+	if (!refcount_dec_and_test(&ref->refcount))
+		return;
+	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
+	kfree(ref);
+}
+
 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
 				    struct pipe_buffer *buf)
 {
 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 
-	if (--ref->ref)
-		return;
-
-	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
-	kfree(ref);
+	buffer_ref_release(ref);
 	buf->private = 0;
 }
 
-static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
+static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
 				struct pipe_buffer *buf)
 {
 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 
-	ref->ref++;
+	if (refcount_read(&ref->refcount) > INT_MAX/2)
+		return false;
+
+	refcount_inc(&ref->refcount);
+	return true;
 }
 
 /* Pipe buffer operations for a buffer. */
@@ -6835,7 +6846,7 @@ static const struct pipe_buf_operations buffer_pipe_buf_ops = {
 	.can_merge		= 0,
 	.confirm		= generic_pipe_buf_confirm,
 	.release		= buffer_pipe_buf_release,
-	.steal			= generic_pipe_buf_steal,
+	.steal			= generic_pipe_buf_nosteal,
 	.get			= buffer_pipe_buf_get,
 };
 
@@ -6848,11 +6859,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
 	struct buffer_ref *ref =
 		(struct buffer_ref *)spd->partial[i].private;
 
-	if (--ref->ref)
-		return;
-
-	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
-	kfree(ref);
+	buffer_ref_release(ref);
 	spd->partial[i].private = 0;
 }
 
@@ -6907,7 +6914,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 			break;
 		}
 
-		ref->ref = 1;
+		refcount_set(&ref->refcount, 1);
 		ref->buffer = iter->trace_buffer->buffer;
 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
 		if (IS_ERR(ref->page)) {
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 4ad9674..3ea65cd 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -205,6 +205,8 @@ void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 			  int expect, int is_constant)
 {
+	unsigned long flags = user_access_save();
+
 	/* A constant is always correct */
 	if (is_constant) {
 		f->constant++;
@@ -223,6 +225,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 		f->data.correct++;
 	else
 		f->data.incorrect++;
+
+	user_access_restore(flags);
 }
 EXPORT_SYMBOL(ftrace_likely_update);
 
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 8307faf..234725d 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1318,9 +1318,6 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 	char buf[32];
 	int len;
 
-	if (*ppos)
-		return 0;
-
 	if (unlikely(!id))
 		return -ENODEV;
 
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7d8ae47..5370ade 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2949,6 +2949,9 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
 	if (WARN_ON(!wq_online))
 		return false;
 
+	if (WARN_ON(!work->func))
+		return false;
+
 	if (!from_cancel) {
 		lock_map_acquire(&work->lockdep_map);
 		lock_map_release(&work->lockdep_map);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3eba254..6c164cd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -146,6 +146,15 @@
 	  See Documentation/admin-guide/dynamic-debug-howto.rst for additional
 	  information.
 
+config DEBUG_CONSOLE_UNHASHED_POINTERS
+	bool "Display unhashed kernel pointers"
+	depends on DEBUG_KERNEL
+	help
+	  Pointers %p and %pK are normally hashed prior to being displayed to
+	  prevent leaking kernel addresses. On debug builds, always print
+	  actual pointer values, ignoring the kptr_restrict setting.
+	  Not to be enabled on production builds.
+
 endmenu # "printk and dmesg options"
 
 menu "Compile-time checks and compiler options"
@@ -2010,6 +2019,7 @@
 	depends on m
 	depends on BLOCK && (64BIT || LBDAF)	  # for XFS, BTRFS
 	depends on NETDEVICES && NET_CORE && INET # for TUN
+	depends on BLOCK
 	select TEST_LKM
 	select XFS_FS
 	select TUN
diff --git a/lib/Makefile b/lib/Makefile
index 2b69a94..f3b3bdb 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -17,6 +17,17 @@
 KCOV_INSTRUMENT_debugobjects.o := n
 KCOV_INSTRUMENT_dynamic_debug.o := n
 
+# Early boot use of cmdline, don't instrument it
+ifdef CONFIG_AMD_MEM_ENCRYPT
+KASAN_SANITIZE_string.o := n
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_string.o = -pg
+endif
+
+CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
+endif
+
 lib-y := ctype.o string.o vsprintf.o cmdline.o \
 	 rbtree.o radix-tree.o timerqueue.o\
 	 idr.o int_sqrt.o extable.o \
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 8be175d..acd7b97 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -817,8 +817,21 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache);
 
 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
 {
-	struct page *head = compound_head(page);
-	size_t v = n + offset + page_address(page) - page_address(head);
+	struct page *head;
+	size_t v = n + offset;
+
+	/*
+	 * The general case needs to access the page order in order
+	 * to compute the page size.
+	 * However, we mostly deal with order-0 pages and thus can
+	 * avoid a possible cache line miss for requests that fit all
+	 * page orders.
+	 */
+	if (n <= v && v <= PAGE_SIZE)
+		return true;
+
+	head = compound_head(page);
+	v += (page - head) << PAGE_SHIFT;
 
 	if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
 		return true;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 63d0816a..7761f32 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -464,6 +464,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
 	int i = 0;
 	int retval = 0;
 
+	/*
+	 * Mark "remove" event done regardless of result, for some subsystems
+	 * do not want to re-trigger "remove" event via automatic cleanup.
+	 */
+	if (action == KOBJ_REMOVE)
+		kobj->state_remove_uevent_sent = 1;
+
 	pr_debug("kobject: '%s' (%p): %s\n",
 		 kobject_name(kobj), kobj, __func__);
 
@@ -565,10 +572,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
 		kobj->state_add_uevent_sent = 1;
 		break;
 
-	case KOBJ_REMOVE:
-		kobj->state_remove_uevent_sent = 1;
-		break;
-
 	case KOBJ_UNBIND:
 		zap_modalias_env(env);
 		break;
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index fdd1b8a..0572ac3 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -356,7 +356,7 @@ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
 		 * to ensure that the batch size is updated before the wait
 		 * counts.
 		 */
-		smp_mb__before_atomic();
+		smp_mb();
 		for (i = 0; i < SBQ_WAIT_QUEUES; i++)
 			atomic_set(&sbq->ws[i].wait_cnt, 1);
 	}
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index b53e1b5..e304b54 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -23,10 +23,11 @@
  * hit it), 'max' is the address space maximum (and we return
  * -EFAULT if we hit it).
  */
-static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
+static inline long do_strncpy_from_user(char *dst, const char __user *src,
+					unsigned long count, unsigned long max)
 {
 	const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
-	long res = 0;
+	unsigned long res = 0;
 
 	/*
 	 * Truncate 'max' to the user-specified limit, so that
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 60d0bbd..184f80f 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -28,7 +28,7 @@
 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
 {
 	const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
-	long align, res = 0;
+	unsigned long align, res = 0;
 	unsigned long c;
 
 	/*
@@ -42,7 +42,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
 	 * Do everything aligned. But that means that we
 	 * need to also expand the maximum..
 	 */
-	align = (sizeof(long) - 1) & (unsigned long)src;
+	align = (sizeof(unsigned long) - 1) & (unsigned long)src;
 	src -= align;
 	max += align;
 
diff --git a/lib/ubsan.c b/lib/ubsan.c
index e4162f5..1e9e2ab 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -86,11 +86,13 @@ static bool is_inline_int(struct type_descriptor *type)
 	return bits <= inline_bits;
 }
 
-static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
+static s_max get_signed_val(struct type_descriptor *type, void *val)
 {
 	if (is_inline_int(type)) {
 		unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type);
-		return ((s_max)val) << extra_bits >> extra_bits;
+		unsigned long ulong_val = (unsigned long)val;
+
+		return ((s_max)ulong_val) << extra_bits >> extra_bits;
 	}
 
 	if (type_bit_width(type) == 64)
@@ -99,15 +101,15 @@ static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
 	return *(s_max *)val;
 }
 
-static bool val_is_negative(struct type_descriptor *type, unsigned long val)
+static bool val_is_negative(struct type_descriptor *type, void *val)
 {
 	return type_is_signed(type) && get_signed_val(type, val) < 0;
 }
 
-static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
+static u_max get_unsigned_val(struct type_descriptor *type, void *val)
 {
 	if (is_inline_int(type))
-		return val;
+		return (unsigned long)val;
 
 	if (type_bit_width(type) == 64)
 		return *(u64 *)val;
@@ -116,7 +118,7 @@ static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
 }
 
 static void val_to_string(char *str, size_t size, struct type_descriptor *type,
-	unsigned long value)
+			void *value)
 {
 	if (type_is_int(type)) {
 		if (type_bit_width(type) == 128) {
@@ -163,8 +165,8 @@ static void ubsan_epilogue(unsigned long *flags)
 	current->in_ubsan--;
 }
 
-static void handle_overflow(struct overflow_data *data, unsigned long lhs,
-			unsigned long rhs, char op)
+static void handle_overflow(struct overflow_data *data, void *lhs,
+			void *rhs, char op)
 {
 
 	struct type_descriptor *type = data->type;
@@ -191,8 +193,7 @@ static void handle_overflow(struct overflow_data *data, unsigned long lhs,
 }
 
 void __ubsan_handle_add_overflow(struct overflow_data *data,
-				unsigned long lhs,
-				unsigned long rhs)
+				void *lhs, void *rhs)
 {
 
 	handle_overflow(data, lhs, rhs, '+');
@@ -200,23 +201,21 @@ void __ubsan_handle_add_overflow(struct overflow_data *data,
 EXPORT_SYMBOL(__ubsan_handle_add_overflow);
 
 void __ubsan_handle_sub_overflow(struct overflow_data *data,
-				unsigned long lhs,
-				unsigned long rhs)
+				void *lhs, void *rhs)
 {
 	handle_overflow(data, lhs, rhs, '-');
 }
 EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
 
 void __ubsan_handle_mul_overflow(struct overflow_data *data,
-				unsigned long lhs,
-				unsigned long rhs)
+				void *lhs, void *rhs)
 {
 	handle_overflow(data, lhs, rhs, '*');
 }
 EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
 
 void __ubsan_handle_negate_overflow(struct overflow_data *data,
-				unsigned long old_val)
+				void *old_val)
 {
 	unsigned long flags;
 	char old_val_str[VALUE_LENGTH];
@@ -237,8 +236,7 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
 
 
 void __ubsan_handle_divrem_overflow(struct overflow_data *data,
-				unsigned long lhs,
-				unsigned long rhs)
+				void *lhs, void *rhs)
 {
 	unsigned long flags;
 	char rhs_val_str[VALUE_LENGTH];
@@ -323,7 +321,7 @@ static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
 }
 
 void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
-				unsigned long ptr)
+				void *ptr)
 {
 	struct type_mismatch_data_common common_data = {
 		.location = &data->location,
@@ -332,12 +330,12 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
 		.type_check_kind = data->type_check_kind
 	};
 
-	ubsan_type_mismatch_common(&common_data, ptr);
+	ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
 }
 EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
 
 void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
-				unsigned long ptr)
+				void *ptr)
 {
 
 	struct type_mismatch_data_common common_data = {
@@ -347,12 +345,12 @@ void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
 		.type_check_kind = data->type_check_kind
 	};
 
-	ubsan_type_mismatch_common(&common_data, ptr);
+	ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
 }
 EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
 
 void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
-					unsigned long bound)
+					void *bound)
 {
 	unsigned long flags;
 	char bound_str[VALUE_LENGTH];
@@ -369,8 +367,7 @@ void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
 }
 EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive);
 
-void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
-				unsigned long index)
+void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
 {
 	unsigned long flags;
 	char index_str[VALUE_LENGTH];
@@ -388,7 +385,7 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
 EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
 
 void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
-					unsigned long lhs, unsigned long rhs)
+					void *lhs, void *rhs)
 {
 	unsigned long flags;
 	struct type_descriptor *rhs_type = data->rhs_type;
@@ -439,7 +436,7 @@ void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
 EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
 
 void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
-				unsigned long val)
+				void *val)
 {
 	unsigned long flags;
 	char val_str[VALUE_LENGTH];
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 812e59e..abe67c3 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1940,7 +1940,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
 			return buf;
 		}
 	case 'K':
-		if (!kptr_restrict)
+		if (!kptr_restrict ||
+		    IS_ENABLED(CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS))
 			break;
 		return restricted_pointer(buf, end, ptr, spec);
 	case 'N':
@@ -1972,6 +1973,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
 		return pointer_string(buf, end, ptr, spec);
 	}
 
+	if (IS_ENABLED(CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS))
+		return pointer_string(buf, end, ptr, spec);
+
 	/* default is to _not_ leak addresses, hash before printing */
 	return ptr_to_id(buf, end, ptr, spec);
 }
diff --git a/mm/compaction.c b/mm/compaction.c
index 7c60747..f7eec61 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -66,7 +66,7 @@ static unsigned long release_freepages(struct list_head *freelist)
 	return high_pfn;
 }
 
-static void map_pages(struct list_head *list)
+static void split_map_pages(struct list_head *list)
 {
 	unsigned int i, order, nr_pages;
 	struct page *page, *next;
@@ -237,6 +237,77 @@ static bool pageblock_skip_persistent(struct page *page)
 	return false;
 }
 
+static bool
+__reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
+							bool check_target)
+{
+	struct page *page = pfn_to_online_page(pfn);
+	struct page *block_page;
+	struct page *end_page;
+	unsigned long block_pfn;
+
+	if (!page)
+		return false;
+	if (zone != page_zone(page))
+		return false;
+	if (pageblock_skip_persistent(page))
+		return false;
+
+	/*
+	 * If skip is already cleared do no further checking once the
+	 * restart points have been set.
+	 */
+	if (check_source && check_target && !get_pageblock_skip(page))
+		return true;
+
+	/*
+	 * If clearing skip for the target scanner, do not select a
+	 * non-movable pageblock as the starting point.
+	 */
+	if (!check_source && check_target &&
+	    get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
+		return false;
+
+	/* Ensure the start of the pageblock or zone is online and valid */
+	block_pfn = pageblock_start_pfn(pfn);
+	block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
+	if (block_page) {
+		page = block_page;
+		pfn = block_pfn;
+	}
+
+	/* Ensure the end of the pageblock or zone is online and valid */
+	block_pfn += pageblock_nr_pages;
+	block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
+	end_page = pfn_to_online_page(block_pfn);
+	if (!end_page)
+		return false;
+
+	/*
+	 * Only clear the hint if a sample indicates there is either a
+	 * free page or an LRU page in the block. One or other condition
+	 * is necessary for the block to be a migration source/target.
+	 */
+	do {
+		if (pfn_valid_within(pfn)) {
+			if (check_source && PageLRU(page)) {
+				clear_pageblock_skip(page);
+				return true;
+			}
+
+			if (check_target && PageBuddy(page)) {
+				clear_pageblock_skip(page);
+				return true;
+			}
+		}
+
+		page += (1 << PAGE_ALLOC_COSTLY_ORDER);
+		pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
+	} while (page < end_page);
+
+	return false;
+}
+
 /*
  * This function is called to clear all cached information on pageblocks that
  * should be skipped for page isolation when the migrate and free page scanner
@@ -244,30 +315,54 @@ static bool pageblock_skip_persistent(struct page *page)
  */
 static void __reset_isolation_suitable(struct zone *zone)
 {
-	unsigned long start_pfn = zone->zone_start_pfn;
-	unsigned long end_pfn = zone_end_pfn(zone);
-	unsigned long pfn;
+	unsigned long migrate_pfn = zone->zone_start_pfn;
+	unsigned long free_pfn = zone_end_pfn(zone) - 1;
+	unsigned long reset_migrate = free_pfn;
+	unsigned long reset_free = migrate_pfn;
+	bool source_set = false;
+	bool free_set = false;
+
+	if (!zone->compact_blockskip_flush)
+		return;
 
 	zone->compact_blockskip_flush = false;
 
-	/* Walk the zone and mark every pageblock as suitable for isolation */
-	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
-		struct page *page;
-
+	/*
+	 * Walk the zone and update pageblock skip information. Source looks
+	 * for PageLRU while target looks for PageBuddy. When the scanner
+	 * is found, both PageBuddy and PageLRU are checked as the pageblock
+	 * is suitable as both source and target.
+	 */
+	for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages,
+					free_pfn -= pageblock_nr_pages) {
 		cond_resched();
 
-		page = pfn_to_online_page(pfn);
-		if (!page)
-			continue;
-		if (zone != page_zone(page))
-			continue;
-		if (pageblock_skip_persistent(page))
-			continue;
+		/* Update the migrate PFN */
+		if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
+		    migrate_pfn < reset_migrate) {
+			source_set = true;
+			reset_migrate = migrate_pfn;
+			zone->compact_init_migrate_pfn = reset_migrate;
+			zone->compact_cached_migrate_pfn[0] = reset_migrate;
+			zone->compact_cached_migrate_pfn[1] = reset_migrate;
+		}
 
-		clear_pageblock_skip(page);
+		/* Update the free PFN */
+		if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
+		    free_pfn > reset_free) {
+			free_set = true;
+			reset_free = free_pfn;
+			zone->compact_init_free_pfn = reset_free;
+			zone->compact_cached_free_pfn = reset_free;
+		}
 	}
 
-	reset_cached_positions(zone);
+	/* Leave no distance if no suitable block was reset */
+	if (reset_migrate >= reset_free) {
+		zone->compact_cached_migrate_pfn[0] = migrate_pfn;
+		zone->compact_cached_migrate_pfn[1] = migrate_pfn;
+		zone->compact_cached_free_pfn = free_pfn;
+	}
 }
 
 void reset_isolation_suitable(pg_data_t *pgdat)
@@ -286,15 +381,53 @@ void reset_isolation_suitable(pg_data_t *pgdat)
 }
 
 /*
+ * Sets the pageblock skip bit if it was clear. Note that this is a hint as
+ * locks are not required for read/writers. Returns true if it was already set.
+ */
+static bool test_and_set_skip(struct compact_control *cc, struct page *page,
+							unsigned long pfn)
+{
+	bool skip;
+
+	/* Do no update if skip hint is being ignored */
+	if (cc->ignore_skip_hint)
+		return false;
+
+	if (!IS_ALIGNED(pfn, pageblock_nr_pages))
+		return false;
+
+	skip = get_pageblock_skip(page);
+	if (!skip && !cc->no_set_skip_hint)
+		set_pageblock_skip(page);
+
+	return skip;
+}
+
+static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
+{
+	struct zone *zone = cc->zone;
+
+	pfn = pageblock_end_pfn(pfn);
+
+	/* Set for isolation rather than compaction */
+	if (cc->no_set_skip_hint)
+		return;
+
+	if (pfn > zone->compact_cached_migrate_pfn[0])
+		zone->compact_cached_migrate_pfn[0] = pfn;
+	if (cc->mode != MIGRATE_ASYNC &&
+	    pfn > zone->compact_cached_migrate_pfn[1])
+		zone->compact_cached_migrate_pfn[1] = pfn;
+}
+
+/*
  * If no pages were isolated then mark this pageblock to be skipped in the
  * future. The information is later cleared by __reset_isolation_suitable().
  */
 static void update_pageblock_skip(struct compact_control *cc,
-			struct page *page, unsigned long nr_isolated,
-			bool migrate_scanner)
+			struct page *page, unsigned long pfn)
 {
 	struct zone *zone = cc->zone;
-	unsigned long pfn;
 
 	if (cc->no_set_skip_hint)
 		return;
@@ -302,24 +435,11 @@ static void update_pageblock_skip(struct compact_control *cc,
 	if (!page)
 		return;
 
-	if (nr_isolated)
-		return;
-
 	set_pageblock_skip(page);
 
-	pfn = page_to_pfn(page);
-
 	/* Update where async and sync compaction should restart */
-	if (migrate_scanner) {
-		if (pfn > zone->compact_cached_migrate_pfn[0])
-			zone->compact_cached_migrate_pfn[0] = pfn;
-		if (cc->mode != MIGRATE_ASYNC &&
-		    pfn > zone->compact_cached_migrate_pfn[1])
-			zone->compact_cached_migrate_pfn[1] = pfn;
-	} else {
-		if (pfn < zone->compact_cached_free_pfn)
-			zone->compact_cached_free_pfn = pfn;
-	}
+	if (pfn < zone->compact_cached_free_pfn)
+		zone->compact_cached_free_pfn = pfn;
 }
 #else
 static inline bool isolation_suitable(struct compact_control *cc,
@@ -334,32 +454,42 @@ static inline bool pageblock_skip_persistent(struct page *page)
 }
 
 static inline void update_pageblock_skip(struct compact_control *cc,
-			struct page *page, unsigned long nr_isolated,
-			bool migrate_scanner)
+			struct page *page, unsigned long pfn)
 {
 }
+
+static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
+{
+}
+
+static bool test_and_set_skip(struct compact_control *cc, struct page *page,
+							unsigned long pfn)
+{
+	return false;
+}
 #endif /* CONFIG_COMPACTION */
 
 /*
  * Compaction requires the taking of some coarse locks that are potentially
- * very heavily contended. For async compaction, back out if the lock cannot
- * be taken immediately. For sync compaction, spin on the lock if needed.
+ * very heavily contended. For async compaction, trylock and record if the
+ * lock is contended. The lock will still be acquired but compaction will
+ * abort when the current block is finished regardless of success rate.
+ * Sync compaction acquires the lock.
  *
- * Returns true if the lock is held
- * Returns false if the lock is not held and compaction should abort
+ * Always returns true which makes it easier to track lock state in callers.
  */
-static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
+static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
 						struct compact_control *cc)
 {
-	if (cc->mode == MIGRATE_ASYNC) {
-		if (!spin_trylock_irqsave(lock, *flags)) {
-			cc->contended = true;
-			return false;
-		}
-	} else {
-		spin_lock_irqsave(lock, *flags);
+	/* Track if the lock is contended in async mode */
+	if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
+		if (spin_trylock_irqsave(lock, *flags))
+			return true;
+
+		cc->contended = true;
 	}
 
+	spin_lock_irqsave(lock, *flags);
 	return true;
 }
 
@@ -391,37 +521,7 @@ static bool compact_unlock_should_abort(spinlock_t *lock,
 		return true;
 	}
 
-	if (need_resched()) {
-		if (cc->mode == MIGRATE_ASYNC) {
-			cc->contended = true;
-			return true;
-		}
-		cond_resched();
-	}
-
-	return false;
-}
-
-/*
- * Aside from avoiding lock contention, compaction also periodically checks
- * need_resched() and either schedules in sync compaction or aborts async
- * compaction. This is similar to what compact_unlock_should_abort() does, but
- * is used where no lock is concerned.
- *
- * Returns false when no scheduling was needed, or sync compaction scheduled.
- * Returns true when async compaction should abort.
- */
-static inline bool compact_should_abort(struct compact_control *cc)
-{
-	/* async compaction aborts if contended */
-	if (need_resched()) {
-		if (cc->mode == MIGRATE_ASYNC) {
-			cc->contended = true;
-			return true;
-		}
-
-		cond_resched();
-	}
+	cond_resched();
 
 	return false;
 }
@@ -435,19 +535,24 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 				unsigned long *start_pfn,
 				unsigned long end_pfn,
 				struct list_head *freelist,
+				unsigned int stride,
 				bool strict)
 {
 	int nr_scanned = 0, total_isolated = 0;
-	struct page *cursor, *valid_page = NULL;
+	struct page *cursor;
 	unsigned long flags = 0;
 	bool locked = false;
 	unsigned long blockpfn = *start_pfn;
 	unsigned int order;
 
+	/* Strict mode is for isolation, speed is secondary */
+	if (strict)
+		stride = 1;
+
 	cursor = pfn_to_page(blockpfn);
 
 	/* Isolate free pages. */
-	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
+	for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
 		int isolated;
 		struct page *page = cursor;
 
@@ -465,9 +570,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 		if (!pfn_valid_within(blockpfn))
 			goto isolate_fail;
 
-		if (!valid_page)
-			valid_page = page;
-
 		/*
 		 * For compound pages such as THP and hugetlbfs, we can save
 		 * potentially a lot of iterations if we skip them at once.
@@ -495,18 +597,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 		 * recheck as well.
 		 */
 		if (!locked) {
-			/*
-			 * The zone lock must be held to isolate freepages.
-			 * Unfortunately this is a very coarse lock and can be
-			 * heavily contended if there are parallel allocations
-			 * or parallel compactions. For async compaction do not
-			 * spin on the lock and we acquire the lock as late as
-			 * possible.
-			 */
-			locked = compact_trylock_irqsave(&cc->zone->lock,
+			locked = compact_lock_irqsave(&cc->zone->lock,
 								&flags, cc);
-			if (!locked)
-				break;
 
 			/* Recheck this is a buddy page under lock */
 			if (!PageBuddy(page))
@@ -565,10 +657,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 	if (strict && blockpfn < end_pfn)
 		total_isolated = 0;
 
-	/* Update the pageblock-skip if the whole pageblock was scanned */
-	if (blockpfn == end_pfn)
-		update_pageblock_skip(cc, valid_page, total_isolated, false);
-
 	cc->total_free_scanned += nr_scanned;
 	if (total_isolated)
 		count_compact_events(COMPACTISOLATED, total_isolated);
@@ -626,7 +714,7 @@ isolate_freepages_range(struct compact_control *cc,
 			break;
 
 		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
-						block_end_pfn, &freelist, true);
+					block_end_pfn, &freelist, 0, true);
 
 		/*
 		 * In strict mode, isolate_freepages_block() returns 0 if
@@ -644,7 +732,7 @@ isolate_freepages_range(struct compact_control *cc,
 	}
 
 	/* __isolate_free_page() does not map the pages */
-	map_pages(&freelist);
+	split_map_pages(&freelist);
 
 	if (pfn < end_pfn) {
 		/* Loop terminated early, cleanup. */
@@ -702,6 +790,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 	unsigned long start_pfn = low_pfn;
 	bool skip_on_failure = false;
 	unsigned long next_skip_pfn = 0;
+	bool skip_updated = false;
 
 	/*
 	 * Ensure that there are not too many pages isolated from the LRU
@@ -719,8 +808,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 			return 0;
 	}
 
-	if (compact_should_abort(cc))
-		return 0;
+	cond_resched();
 
 	if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
 		skip_on_failure = true;
@@ -768,8 +856,19 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
 		page = pfn_to_page(low_pfn);
 
-		if (!valid_page)
+		/*
+		 * Check if the pageblock has already been marked skipped.
+		 * Only the aligned PFN is checked as the caller isolates
+		 * COMPACT_CLUSTER_MAX at a time so the second call must
+		 * not falsely conclude that the block should be skipped.
+		 */
+		if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
+			if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
+				low_pfn = end_pfn;
+				goto isolate_abort;
+			}
 			valid_page = page;
+		}
 
 		/*
 		 * Skip if free. We read page order here without zone lock
@@ -848,10 +947,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
 		/* If we already hold the lock, we can skip some rechecking */
 		if (!locked) {
-			locked = compact_trylock_irqsave(zone_lru_lock(zone),
+			locked = compact_lock_irqsave(zone_lru_lock(zone),
 								&flags, cc);
-			if (!locked)
-				break;
+
+			/* Try get exclusive access under lock */
+			if (!skip_updated) {
+				skip_updated = true;
+				if (test_and_set_skip(cc, page, low_pfn))
+					goto isolate_abort;
+			}
 
 			/* Recheck PageLRU and PageCompound under lock */
 			if (!PageLRU(page))
@@ -887,16 +991,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 		nr_isolated++;
 
 		/*
-		 * Record where we could have freed pages by migration and not
-		 * yet flushed them to buddy allocator.
-		 * - this is the lowest page that was isolated and likely be
-		 * then freed by migration.
+		 * Avoid isolating too much unless this block is being
+		 * rescanned (e.g. dirty/writeback pages, parallel allocation)
+		 * or a lock is contended. For contention, isolate quickly to
+		 * potentially remove one source of contention.
 		 */
-		if (!cc->last_migrated_pfn)
-			cc->last_migrated_pfn = low_pfn;
-
-		/* Avoid isolating too much */
-		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
+		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX &&
+		    !cc->rescan && !cc->contended) {
 			++low_pfn;
 			break;
 		}
@@ -918,7 +1019,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 			}
 			putback_movable_pages(&cc->migratepages);
 			cc->nr_migratepages = 0;
-			cc->last_migrated_pfn = 0;
 			nr_isolated = 0;
 		}
 
@@ -939,15 +1039,23 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 	if (unlikely(low_pfn > end_pfn))
 		low_pfn = end_pfn;
 
+isolate_abort:
 	if (locked)
 		spin_unlock_irqrestore(zone_lru_lock(zone), flags);
 
 	/*
-	 * Update the pageblock-skip information and cached scanner pfn,
-	 * if the whole pageblock was scanned without isolating any page.
+	 * Updated the cached scanner pfn once the pageblock has been scanned
+	 * Pages will either be migrated in which case there is no point
+	 * scanning in the near future or migration failed in which case the
+	 * failure reason may persist. The block is marked for skipping if
+	 * there were no pages isolated in the block or if the block is
+	 * rescanned twice in a row.
 	 */
-	if (low_pfn == end_pfn)
-		update_pageblock_skip(cc, valid_page, nr_isolated, true);
+	if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) {
+		if (valid_page && !skip_updated)
+			set_pageblock_skip(valid_page);
+		update_cached_migrate(cc, low_pfn);
+	}
 
 	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
 						nr_scanned, nr_isolated);
@@ -1013,6 +1121,9 @@ static bool suitable_migration_source(struct compact_control *cc,
 {
 	int block_mt;
 
+	if (pageblock_skip_persistent(page))
+		return false;
+
 	if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
 		return true;
 
@@ -1050,6 +1161,14 @@ static bool suitable_migration_target(struct compact_control *cc,
 	return false;
 }
 
+static inline unsigned int
+freelist_scan_limit(struct compact_control *cc)
+{
+	return (COMPACT_CLUSTER_MAX >>
+		min((unsigned short)(BITS_PER_LONG - 1), cc->fast_search_fail))
+		+ 1;
+}
+
 /*
  * Test whether the free scanner has reached the same or lower pageblock than
  * the migration scanner, and compaction should thus terminate.
@@ -1061,6 +1180,248 @@ static inline bool compact_scanners_met(struct compact_control *cc)
 }
 
 /*
+ * Used when scanning for a suitable migration target which scans freelists
+ * in reverse. Reorders the list such as the unscanned pages are scanned
+ * first on the next iteration of the free scanner
+ */
+static void
+move_freelist_head(struct list_head *freelist, struct page *freepage)
+{
+	LIST_HEAD(sublist);
+
+	if (!list_is_last(freelist, &freepage->lru)) {
+		list_cut_before(&sublist, freelist, &freepage->lru);
+		if (!list_empty(&sublist))
+			list_splice_tail(&sublist, freelist);
+	}
+}
+
+/*
+ * Similar to move_freelist_head except used by the migration scanner
+ * when scanning forward. It's possible for these list operations to
+ * move against each other if they search the free list exactly in
+ * lockstep.
+ */
+static void
+move_freelist_tail(struct list_head *freelist, struct page *freepage)
+{
+	LIST_HEAD(sublist);
+
+	if (!list_is_first(freelist, &freepage->lru)) {
+		list_cut_position(&sublist, freelist, &freepage->lru);
+		if (!list_empty(&sublist))
+			list_splice_tail(&sublist, freelist);
+	}
+}
+
+static void
+fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
+{
+	unsigned long start_pfn, end_pfn;
+	struct page *page = pfn_to_page(pfn);
+
+	/* Do not search around if there are enough pages already */
+	if (cc->nr_freepages >= cc->nr_migratepages)
+		return;
+
+	/* Minimise scanning during async compaction */
+	if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
+		return;
+
+	/* Pageblock boundaries */
+	start_pfn = pageblock_start_pfn(pfn);
+	end_pfn = min(start_pfn + pageblock_nr_pages, zone_end_pfn(cc->zone));
+
+	/* Scan before */
+	if (start_pfn != pfn) {
+		isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
+		if (cc->nr_freepages >= cc->nr_migratepages)
+			return;
+	}
+
+	/* Scan after */
+	start_pfn = pfn + nr_isolated;
+	if (start_pfn != end_pfn)
+		isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
+
+	/* Skip this pageblock in the future as it's full or nearly full */
+	if (cc->nr_freepages < cc->nr_migratepages)
+		set_pageblock_skip(page);
+}
+
+/* Search orders in round-robin fashion */
+static int next_search_order(struct compact_control *cc, int order)
+{
+	order--;
+	if (order < 0)
+		order = cc->order - 1;
+
+	/* Search wrapped around? */
+	if (order == cc->search_order) {
+		cc->search_order--;
+		if (cc->search_order < 0)
+			cc->search_order = cc->order - 1;
+		return -1;
+	}
+
+	return order;
+}
+
+static unsigned long
+fast_isolate_freepages(struct compact_control *cc)
+{
+	unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
+	unsigned int nr_scanned = 0;
+	unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0;
+	unsigned long nr_isolated = 0;
+	unsigned long distance;
+	struct page *page = NULL;
+	bool scan_start = false;
+	int order;
+
+	/* Full compaction passes in a negative order */
+	if (cc->order <= 0)
+		return cc->free_pfn;
+
+	/*
+	 * If starting the scan, use a deeper search and use the highest
+	 * PFN found if a suitable one is not found.
+	 */
+	if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
+		limit = pageblock_nr_pages >> 1;
+		scan_start = true;
+	}
+
+	/*
+	 * Preferred point is in the top quarter of the scan space but take
+	 * a pfn from the top half if the search is problematic.
+	 */
+	distance = (cc->free_pfn - cc->migrate_pfn);
+	low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
+	min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
+
+	if (WARN_ON_ONCE(min_pfn > low_pfn))
+		low_pfn = min_pfn;
+
+	/*
+	 * Search starts from the last successful isolation order or the next
+	 * order to search after a previous failure
+	 */
+	cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
+
+	for (order = cc->search_order;
+	     !page && order >= 0;
+	     order = next_search_order(cc, order)) {
+		struct free_area *area = &cc->zone->free_area[order];
+		struct list_head *freelist;
+		struct page *freepage;
+		unsigned long flags;
+		unsigned int order_scanned = 0;
+
+		if (!area->nr_free)
+			continue;
+
+		spin_lock_irqsave(&cc->zone->lock, flags);
+		freelist = &area->free_list[MIGRATE_MOVABLE];
+		list_for_each_entry_reverse(freepage, freelist, lru) {
+			unsigned long pfn;
+
+			order_scanned++;
+			nr_scanned++;
+			pfn = page_to_pfn(freepage);
+
+			if (pfn >= highest)
+				highest = pageblock_start_pfn(pfn);
+
+			if (pfn >= low_pfn) {
+				cc->fast_search_fail = 0;
+				cc->search_order = order;
+				page = freepage;
+				break;
+			}
+
+			if (pfn >= min_pfn && pfn > high_pfn) {
+				high_pfn = pfn;
+
+				/* Shorten the scan if a candidate is found */
+				limit >>= 1;
+			}
+
+			if (order_scanned >= limit)
+				break;
+		}
+
+		/* Use a minimum pfn if a preferred one was not found */
+		if (!page && high_pfn) {
+			page = pfn_to_page(high_pfn);
+
+			/* Update freepage for the list reorder below */
+			freepage = page;
+		}
+
+		/* Reorder to so a future search skips recent pages */
+		move_freelist_head(freelist, freepage);
+
+		/* Isolate the page if available */
+		if (page) {
+			if (__isolate_free_page(page, order)) {
+				set_page_private(page, order);
+				nr_isolated = 1 << order;
+				cc->nr_freepages += nr_isolated;
+				list_add_tail(&page->lru, &cc->freepages);
+				count_compact_events(COMPACTISOLATED, nr_isolated);
+			} else {
+				/* If isolation fails, abort the search */
+				order = cc->search_order + 1;
+				page = NULL;
+			}
+		}
+
+		spin_unlock_irqrestore(&cc->zone->lock, flags);
+
+		/*
+		 * Smaller scan on next order so the total scan ig related
+		 * to freelist_scan_limit.
+		 */
+		if (order_scanned >= limit)
+			limit = min(1U, limit >> 1);
+	}
+
+	if (!page) {
+		cc->fast_search_fail++;
+		if (scan_start) {
+			/*
+			 * Use the highest PFN found above min. If one was
+			 * not found, be pessemistic for direct compaction
+			 * and use the min mark.
+			 */
+			if (highest) {
+				page = pfn_to_page(highest);
+				cc->free_pfn = highest;
+			} else {
+				if (cc->direct_compaction && pfn_valid(min_pfn)) {
+					page = pfn_to_page(min_pfn);
+					cc->free_pfn = min_pfn;
+				}
+			}
+		}
+	}
+
+	if (highest && highest >= cc->zone->compact_cached_free_pfn) {
+		highest -= pageblock_nr_pages;
+		cc->zone->compact_cached_free_pfn = highest;
+	}
+
+	cc->total_free_scanned += nr_scanned;
+	if (!page)
+		return cc->free_pfn;
+
+	low_pfn = page_to_pfn(page);
+	fast_isolate_around(cc, low_pfn, nr_isolated);
+	return low_pfn;
+}
+
+/*
  * Based on information in the current compact_control, find blocks
  * suitable for isolating free pages from and then isolate them.
  */
@@ -1073,6 +1434,12 @@ static void isolate_freepages(struct compact_control *cc)
 	unsigned long block_end_pfn;	/* end of current pageblock */
 	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
 	struct list_head *freelist = &cc->freepages;
+	unsigned int stride;
+
+	/* Try a small search of the free lists for a candidate */
+	isolate_start_pfn = fast_isolate_freepages(cc);
+	if (cc->nr_freepages)
+		goto splitmap;
 
 	/*
 	 * Initialise the free scanner. The starting point is where we last
@@ -1086,10 +1453,11 @@ static void isolate_freepages(struct compact_control *cc)
 	 * is using.
 	 */
 	isolate_start_pfn = cc->free_pfn;
-	block_start_pfn = pageblock_start_pfn(cc->free_pfn);
+	block_start_pfn = pageblock_start_pfn(isolate_start_pfn);
 	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
 						zone_end_pfn(zone));
 	low_pfn = pageblock_end_pfn(cc->migrate_pfn);
+	stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
 
 	/*
 	 * Isolate free pages until enough are available to migrate the
@@ -1100,14 +1468,14 @@ static void isolate_freepages(struct compact_control *cc)
 				block_end_pfn = block_start_pfn,
 				block_start_pfn -= pageblock_nr_pages,
 				isolate_start_pfn = block_start_pfn) {
+		unsigned long nr_isolated;
+
 		/*
 		 * This can iterate a massively long zone without finding any
-		 * suitable migration targets, so periodically check if we need
-		 * to schedule, or even abort async compaction.
+		 * suitable migration targets, so periodically check resched.
 		 */
-		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
-						&& compact_should_abort(cc))
-			break;
+		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
+			cond_resched();
 
 		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
 									zone);
@@ -1123,15 +1491,15 @@ static void isolate_freepages(struct compact_control *cc)
 			continue;
 
 		/* Found a block suitable for isolating free pages from. */
-		isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
-					freelist, false);
+		nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
+					block_end_pfn, freelist, stride, false);
 
-		/*
-		 * If we isolated enough freepages, or aborted due to lock
-		 * contention, terminate.
-		 */
-		if ((cc->nr_freepages >= cc->nr_migratepages)
-							|| cc->contended) {
+		/* Update the skip hint if the full pageblock was scanned */
+		if (isolate_start_pfn == block_end_pfn)
+			update_pageblock_skip(cc, page, block_start_pfn);
+
+		/* Are enough freepages isolated? */
+		if (cc->nr_freepages >= cc->nr_migratepages) {
 			if (isolate_start_pfn >= block_end_pfn) {
 				/*
 				 * Restart at previous pageblock if more
@@ -1148,10 +1516,14 @@ static void isolate_freepages(struct compact_control *cc)
 			 */
 			break;
 		}
-	}
 
-	/* __isolate_free_page() does not map the pages */
-	map_pages(freelist);
+		/* Adjust stride depending on isolation */
+		if (nr_isolated) {
+			stride = 1;
+			continue;
+		}
+		stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1);
+	}
 
 	/*
 	 * Record where the free scanner will restart next time. Either we
@@ -1160,6 +1532,10 @@ static void isolate_freepages(struct compact_control *cc)
 	 * and the loop terminated due to isolate_start_pfn < low_pfn
 	 */
 	cc->free_pfn = isolate_start_pfn;
+
+splitmap:
+	/* __isolate_free_page() does not map the pages */
+	split_map_pages(freelist);
 }
 
 /*
@@ -1172,13 +1548,8 @@ static struct page *compaction_alloc(struct page *migratepage,
 	struct compact_control *cc = (struct compact_control *)data;
 	struct page *freepage;
 
-	/*
-	 * Isolate free pages if necessary, and if we are not aborting due to
-	 * contention.
-	 */
 	if (list_empty(&cc->freepages)) {
-		if (!cc->contended)
-			isolate_freepages(cc);
+		isolate_freepages(cc);
 
 		if (list_empty(&cc->freepages))
 			return NULL;
@@ -1217,6 +1588,147 @@ typedef enum {
  */
 int sysctl_compact_unevictable_allowed __read_mostly = 1;
 
+static inline void
+update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
+{
+	if (cc->fast_start_pfn == ULONG_MAX)
+		return;
+
+	if (!cc->fast_start_pfn)
+		cc->fast_start_pfn = pfn;
+
+	cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
+}
+
+static inline unsigned long
+reinit_migrate_pfn(struct compact_control *cc)
+{
+	if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
+		return cc->migrate_pfn;
+
+	cc->migrate_pfn = cc->fast_start_pfn;
+	cc->fast_start_pfn = ULONG_MAX;
+
+	return cc->migrate_pfn;
+}
+
+/*
+ * Briefly search the free lists for a migration source that already has
+ * some free pages to reduce the number of pages that need migration
+ * before a pageblock is free.
+ */
+static unsigned long fast_find_migrateblock(struct compact_control *cc)
+{
+	unsigned int limit = freelist_scan_limit(cc);
+	unsigned int nr_scanned = 0;
+	unsigned long distance;
+	unsigned long pfn = cc->migrate_pfn;
+	unsigned long high_pfn;
+	int order;
+
+	/* Skip hints are relied on to avoid repeats on the fast search */
+	if (cc->ignore_skip_hint)
+		return pfn;
+
+	/*
+	 * If the migrate_pfn is not at the start of a zone or the start
+	 * of a pageblock then assume this is a continuation of a previous
+	 * scan restarted due to COMPACT_CLUSTER_MAX.
+	 */
+	if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
+		return pfn;
+
+	/*
+	 * For smaller orders, just linearly scan as the number of pages
+	 * to migrate should be relatively small and does not necessarily
+	 * justify freeing up a large block for a small allocation.
+	 */
+	if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
+		return pfn;
+
+	/*
+	 * Only allow kcompactd and direct requests for movable pages to
+	 * quickly clear out a MOVABLE pageblock for allocation. This
+	 * reduces the risk that a large movable pageblock is freed for
+	 * an unmovable/reclaimable small allocation.
+	 */
+	if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
+		return pfn;
+
+	/*
+	 * When starting the migration scanner, pick any pageblock within the
+	 * first half of the search space. Otherwise try and pick a pageblock
+	 * within the first eighth to reduce the chances that a migration
+	 * target later becomes a source.
+	 */
+	distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
+	if (cc->migrate_pfn != cc->zone->zone_start_pfn)
+		distance >>= 2;
+	high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
+
+	for (order = cc->order - 1;
+	     order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit;
+	     order--) {
+		struct free_area *area = &cc->zone->free_area[order];
+		struct list_head *freelist;
+		unsigned long flags;
+		struct page *freepage;
+
+		if (!area->nr_free)
+			continue;
+
+		spin_lock_irqsave(&cc->zone->lock, flags);
+		freelist = &area->free_list[MIGRATE_MOVABLE];
+		list_for_each_entry(freepage, freelist, lru) {
+			unsigned long free_pfn;
+
+			nr_scanned++;
+			free_pfn = page_to_pfn(freepage);
+			if (free_pfn < high_pfn) {
+				/*
+				 * Avoid if skipped recently. Ideally it would
+				 * move to the tail but even safe iteration of
+				 * the list assumes an entry is deleted, not
+				 * reordered.
+				 */
+				if (get_pageblock_skip(freepage)) {
+					if (list_is_last(freelist, &freepage->lru))
+						break;
+
+					continue;
+				}
+
+				/* Reorder to so a future search skips recent pages */
+				move_freelist_tail(freelist, freepage);
+
+				update_fast_start_pfn(cc, free_pfn);
+				pfn = pageblock_start_pfn(free_pfn);
+				cc->fast_search_fail = 0;
+				set_pageblock_skip(freepage);
+				break;
+			}
+
+			if (nr_scanned >= limit) {
+				cc->fast_search_fail++;
+				move_freelist_tail(freelist, freepage);
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&cc->zone->lock, flags);
+	}
+
+	cc->total_migrate_scanned += nr_scanned;
+
+	/*
+	 * If fast scanning failed then use a cached entry for a page block
+	 * that had free pages as the basis for starting a linear scan.
+	 */
+	if (pfn == cc->migrate_pfn)
+		pfn = reinit_migrate_pfn(cc);
+
+	return pfn;
+}
+
 /*
  * Isolate all pages that can be migrated from the first suitable block,
  * starting at the block pointed to by the migrate scanner pfn within
@@ -1232,16 +1744,25 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
 	const isolate_mode_t isolate_mode =
 		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
 		(cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
+	bool fast_find_block;
 
 	/*
 	 * Start at where we last stopped, or beginning of the zone as
-	 * initialized by compact_zone()
+	 * initialized by compact_zone(). The first failure will use
+	 * the lowest PFN as the starting point for linear scanning.
 	 */
-	low_pfn = cc->migrate_pfn;
+	low_pfn = fast_find_migrateblock(cc);
 	block_start_pfn = pageblock_start_pfn(low_pfn);
 	if (block_start_pfn < zone->zone_start_pfn)
 		block_start_pfn = zone->zone_start_pfn;
 
+	/*
+	 * fast_find_migrateblock marks a pageblock skipped so to avoid
+	 * the isolation_suitable check below, check whether the fast
+	 * search was successful.
+	 */
+	fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
+
 	/* Only scan within a pageblock boundary */
 	block_end_pfn = pageblock_end_pfn(low_pfn);
 
@@ -1250,6 +1771,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
 	 * Do not cross the free scanner.
 	 */
 	for (; block_end_pfn <= cc->free_pfn;
+			fast_find_block = false,
 			low_pfn = block_end_pfn,
 			block_start_pfn = block_end_pfn,
 			block_end_pfn += pageblock_nr_pages) {
@@ -1257,34 +1779,45 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
 		/*
 		 * This can potentially iterate a massively long zone with
 		 * many pageblocks unsuitable, so periodically check if we
-		 * need to schedule, or even abort async compaction.
+		 * need to schedule.
 		 */
-		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
-						&& compact_should_abort(cc))
-			break;
+		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
+			cond_resched();
 
 		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
 									zone);
 		if (!page)
 			continue;
 
-		/* If isolation recently failed, do not retry */
-		if (!isolation_suitable(cc, page))
+		/*
+		 * If isolation recently failed, do not retry. Only check the
+		 * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock
+		 * to be visited multiple times. Assume skip was checked
+		 * before making it "skip" so other compaction instances do
+		 * not scan the same block.
+		 */
+		if (IS_ALIGNED(low_pfn, pageblock_nr_pages) &&
+		    !fast_find_block && !isolation_suitable(cc, page))
 			continue;
 
 		/*
-		 * For async compaction, also only scan in MOVABLE blocks.
-		 * Async compaction is optimistic to see if the minimum amount
-		 * of work satisfies the allocation.
+		 * For async compaction, also only scan in MOVABLE blocks
+		 * without huge pages. Async compaction is optimistic to see
+		 * if the minimum amount of work satisfies the allocation.
+		 * The cached PFN is updated as it's possible that all
+		 * remaining blocks between source and target are unsuitable
+		 * and the compaction scanners fail to meet.
 		 */
-		if (!suitable_migration_source(cc, page))
+		if (!suitable_migration_source(cc, page)) {
+			update_cached_migrate(cc, block_end_pfn);
 			continue;
+		}
 
 		/* Perform the isolation */
 		low_pfn = isolate_migratepages_block(cc, low_pfn,
 						block_end_pfn, isolate_mode);
 
-		if (!low_pfn || cc->contended)
+		if (!low_pfn)
 			return ISOLATE_ABORT;
 
 		/*
@@ -1310,19 +1843,16 @@ static inline bool is_via_compact_memory(int order)
 	return order == -1;
 }
 
-static enum compact_result __compact_finished(struct zone *zone,
-						struct compact_control *cc)
+static enum compact_result __compact_finished(struct compact_control *cc)
 {
 	unsigned int order;
 	const int migratetype = cc->migratetype;
-
-	if (cc->contended || fatal_signal_pending(current))
-		return COMPACT_CONTENDED;
+	int ret;
 
 	/* Compaction run completes if the migrate and free scanner meet */
 	if (compact_scanners_met(cc)) {
 		/* Let the next compaction start anew. */
-		reset_cached_positions(zone);
+		reset_cached_positions(cc->zone);
 
 		/*
 		 * Mark that the PG_migrate_skip information should be cleared
@@ -1331,7 +1861,7 @@ static enum compact_result __compact_finished(struct zone *zone,
 		 * based on an allocation request.
 		 */
 		if (cc->direct_compaction)
-			zone->compact_blockskip_flush = true;
+			cc->zone->compact_blockskip_flush = true;
 
 		if (cc->whole_zone)
 			return COMPACT_COMPLETE;
@@ -1342,20 +1872,19 @@ static enum compact_result __compact_finished(struct zone *zone,
 	if (is_via_compact_memory(cc->order))
 		return COMPACT_CONTINUE;
 
-	if (cc->finishing_block) {
-		/*
-		 * We have finished the pageblock, but better check again that
-		 * we really succeeded.
-		 */
-		if (IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
-			cc->finishing_block = false;
-		else
-			return COMPACT_CONTINUE;
-	}
+	/*
+	 * Always finish scanning a pageblock to reduce the possibility of
+	 * fallbacks in the future. This is particularly important when
+	 * migration source is unmovable/reclaimable but it's not worth
+	 * special casing.
+	 */
+	if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
+		return COMPACT_CONTINUE;
 
 	/* Direct compactor: Is a suitable page free? */
+	ret = COMPACT_NO_SUITABLE_PAGE;
 	for (order = cc->order; order < MAX_ORDER; order++) {
-		struct free_area *area = &zone->free_area[order];
+		struct free_area *area = &cc->zone->free_area[order];
 		bool can_steal;
 
 		/* Job done if page is free of the right migratetype */
@@ -1393,21 +1922,23 @@ static enum compact_result __compact_finished(struct zone *zone,
 				return COMPACT_SUCCESS;
 			}
 
-			cc->finishing_block = true;
-			return COMPACT_CONTINUE;
+			ret = COMPACT_CONTINUE;
+			break;
 		}
 	}
 
-	return COMPACT_NO_SUITABLE_PAGE;
+	if (cc->contended || fatal_signal_pending(current))
+		ret = COMPACT_CONTENDED;
+
+	return ret;
 }
 
-static enum compact_result compact_finished(struct zone *zone,
-			struct compact_control *cc)
+static enum compact_result compact_finished(struct compact_control *cc)
 {
 	int ret;
 
-	ret = __compact_finished(zone, cc);
-	trace_mm_compaction_finished(zone, cc->order, ret);
+	ret = __compact_finished(cc);
+	trace_mm_compaction_finished(cc->zone, cc->order, ret);
 	if (ret == COMPACT_NO_SUITABLE_PAGE)
 		ret = COMPACT_CONTINUE;
 
@@ -1431,7 +1962,7 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
 	if (is_via_compact_memory(order))
 		return COMPACT_CONTINUE;
 
-	watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+	watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
 	/*
 	 * If watermarks for high-order allocation are already met, there
 	 * should be no need for compaction at all.
@@ -1534,15 +2065,18 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
 	return false;
 }
 
-static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
+static enum compact_result
+compact_zone(struct compact_control *cc, struct capture_control *capc)
 {
 	enum compact_result ret;
-	unsigned long start_pfn = zone->zone_start_pfn;
-	unsigned long end_pfn = zone_end_pfn(zone);
+	unsigned long start_pfn = cc->zone->zone_start_pfn;
+	unsigned long end_pfn = zone_end_pfn(cc->zone);
+	unsigned long last_migrated_pfn;
 	const bool sync = cc->mode != MIGRATE_ASYNC;
+	bool update_cached;
 
 	cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
-	ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
+	ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
 							cc->classzone_idx);
 	/* Compaction is likely to fail */
 	if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
@@ -1555,8 +2089,8 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
 	 * Clear pageblock skip if there were failures recently and compaction
 	 * is about to be retried after being deferred.
 	 */
-	if (compaction_restarting(zone, cc->order))
-		__reset_isolation_suitable(zone);
+	if (compaction_restarting(cc->zone, cc->order))
+		__reset_isolation_suitable(cc->zone);
 
 	/*
 	 * Setup to move all movable pages to the end of the zone. Used cached
@@ -1564,43 +2098,76 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
 	 * want to compact the whole zone), but check that it is initialised
 	 * by ensuring the values are within zone boundaries.
 	 */
+	cc->fast_start_pfn = 0;
 	if (cc->whole_zone) {
 		cc->migrate_pfn = start_pfn;
 		cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
 	} else {
-		cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
-		cc->free_pfn = zone->compact_cached_free_pfn;
+		cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
+		cc->free_pfn = cc->zone->compact_cached_free_pfn;
 		if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
 			cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
-			zone->compact_cached_free_pfn = cc->free_pfn;
+			cc->zone->compact_cached_free_pfn = cc->free_pfn;
 		}
 		if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
 			cc->migrate_pfn = start_pfn;
-			zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
-			zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
+			cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
+			cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
 		}
 
-		if (cc->migrate_pfn == start_pfn)
+		if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
 			cc->whole_zone = true;
 	}
 
-	cc->last_migrated_pfn = 0;
+	last_migrated_pfn = 0;
+
+	/*
+	 * Migrate has separate cached PFNs for ASYNC and SYNC* migration on
+	 * the basis that some migrations will fail in ASYNC mode. However,
+	 * if the cached PFNs match and pageblocks are skipped due to having
+	 * no isolation candidates, then the sync state does not matter.
+	 * Until a pageblock with isolation candidates is found, keep the
+	 * cached PFNs in sync to avoid revisiting the same blocks.
+	 */
+	update_cached = !sync &&
+		cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
 
 	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
 				cc->free_pfn, end_pfn, sync);
 
 	migrate_prep_local();
 
-	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
+	while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
 		int err;
+		unsigned long start_pfn = cc->migrate_pfn;
 
-		switch (isolate_migratepages(zone, cc)) {
+		/*
+		 * Avoid multiple rescans which can happen if a page cannot be
+		 * isolated (dirty/writeback in async mode) or if the migrated
+		 * pages are being allocated before the pageblock is cleared.
+		 * The first rescan will capture the entire pageblock for
+		 * migration. If it fails, it'll be marked skip and scanning
+		 * will proceed as normal.
+		 */
+		cc->rescan = false;
+		if (pageblock_start_pfn(last_migrated_pfn) ==
+		    pageblock_start_pfn(start_pfn)) {
+			cc->rescan = true;
+		}
+
+		switch (isolate_migratepages(cc->zone, cc)) {
 		case ISOLATE_ABORT:
 			ret = COMPACT_CONTENDED;
 			putback_movable_pages(&cc->migratepages);
 			cc->nr_migratepages = 0;
+			last_migrated_pfn = 0;
 			goto out;
 		case ISOLATE_NONE:
+			if (update_cached) {
+				cc->zone->compact_cached_migrate_pfn[1] =
+					cc->zone->compact_cached_migrate_pfn[0];
+			}
+
 			/*
 			 * We haven't isolated and migrated anything, but
 			 * there might still be unflushed migrations from
@@ -1608,6 +2175,8 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
 			 */
 			goto check_drain;
 		case ISOLATE_SUCCESS:
+			update_cached = false;
+			last_migrated_pfn = start_pfn;
 			;
 		}
 
@@ -1639,8 +2208,7 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
 				cc->migrate_pfn = block_end_pfn(
 						cc->migrate_pfn - 1, cc->order);
 				/* Draining pcplists is useless in this case */
-				cc->last_migrated_pfn = 0;
-
+				last_migrated_pfn = 0;
 			}
 		}
 
@@ -1652,21 +2220,26 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
 		 * compact_finished() can detect immediately if allocation
 		 * would succeed.
 		 */
-		if (cc->order > 0 && cc->last_migrated_pfn) {
+		if (cc->order > 0 && last_migrated_pfn) {
 			int cpu;
 			unsigned long current_block_start =
 				block_start_pfn(cc->migrate_pfn, cc->order);
 
-			if (cc->last_migrated_pfn < current_block_start) {
+			if (last_migrated_pfn < current_block_start) {
 				cpu = get_cpu();
 				lru_add_drain_cpu(cpu);
-				drain_local_pages(zone);
+				drain_local_pages(cc->zone);
 				put_cpu();
 				/* No more flushing until we migrate again */
-				cc->last_migrated_pfn = 0;
+				last_migrated_pfn = 0;
 			}
 		}
 
+		/* Stop if a page has been captured */
+		if (capc && capc->page) {
+			ret = COMPACT_SUCCESS;
+			break;
+		}
 	}
 
 out:
@@ -1685,8 +2258,8 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
 		 * Only go back, not forward. The cached pfn might have been
 		 * already reset to zone end in compact_finished()
 		 */
-		if (free_pfn > zone->compact_cached_free_pfn)
-			zone->compact_cached_free_pfn = free_pfn;
+		if (free_pfn > cc->zone->compact_cached_free_pfn)
+			cc->zone->compact_cached_free_pfn = free_pfn;
 	}
 
 	count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
@@ -1700,7 +2273,8 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
 
 static enum compact_result compact_zone_order(struct zone *zone, int order,
 		gfp_t gfp_mask, enum compact_priority prio,
-		unsigned int alloc_flags, int classzone_idx)
+		unsigned int alloc_flags, int classzone_idx,
+		struct page **capture)
 {
 	enum compact_result ret;
 	struct compact_control cc = {
@@ -1709,6 +2283,7 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
 		.total_migrate_scanned = 0,
 		.total_free_scanned = 0,
 		.order = order,
+		.search_order = order,
 		.gfp_mask = gfp_mask,
 		.zone = zone,
 		.mode = (prio == COMPACT_PRIO_ASYNC) ?
@@ -1720,14 +2295,24 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
 		.ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
 		.ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
 	};
+	struct capture_control capc = {
+		.cc = &cc,
+		.page = NULL,
+	};
+
+	if (capture)
+		current->capture_control = &capc;
 	INIT_LIST_HEAD(&cc.freepages);
 	INIT_LIST_HEAD(&cc.migratepages);
 
-	ret = compact_zone(zone, &cc);
+	ret = compact_zone(&cc, &capc);
 
 	VM_BUG_ON(!list_empty(&cc.freepages));
 	VM_BUG_ON(!list_empty(&cc.migratepages));
 
+	*capture = capc.page;
+	current->capture_control = NULL;
+
 	return ret;
 }
 
@@ -1745,7 +2330,7 @@ int sysctl_extfrag_threshold = 500;
  */
 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
 		unsigned int alloc_flags, const struct alloc_context *ac,
-		enum compact_priority prio)
+		enum compact_priority prio, struct page **capture)
 {
 	int may_perform_io = gfp_mask & __GFP_IO;
 	struct zoneref *z;
@@ -1773,7 +2358,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
 		}
 
 		status = compact_zone_order(zone, order, gfp_mask, prio,
-					alloc_flags, ac_classzone_idx(ac));
+				alloc_flags, ac_classzone_idx(ac), capture);
 		rc = max(status, rc);
 
 		/* The allocation should succeed, stop compacting */
@@ -1841,7 +2426,7 @@ static void compact_node(int nid)
 		INIT_LIST_HEAD(&cc.freepages);
 		INIT_LIST_HEAD(&cc.migratepages);
 
-		compact_zone(zone, &cc);
+		compact_zone(&cc, NULL);
 
 		VM_BUG_ON(!list_empty(&cc.freepages));
 		VM_BUG_ON(!list_empty(&cc.migratepages));
@@ -1948,6 +2533,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
 	struct zone *zone;
 	struct compact_control cc = {
 		.order = pgdat->kcompactd_max_order,
+		.search_order = pgdat->kcompactd_max_order,
 		.total_migrate_scanned = 0,
 		.total_free_scanned = 0,
 		.classzone_idx = pgdat->kcompactd_classzone_idx,
@@ -1983,7 +2569,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
 
 		if (kthread_should_stop())
 			return;
-		status = compact_zone(zone, &cc);
+		status = compact_zone(&cc, NULL);
 
 		if (status == COMPACT_SUCCESS) {
 			compaction_defer_reset(zone, cc.order, false);
diff --git a/mm/filemap.c b/mm/filemap.c
index 653a9fd..815fec2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2477,12 +2477,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
 	pgoff_t offset = vmf->pgoff;
 
 	/* If we don't want any read-ahead, don't bother */
-	if (vmf->vma->vm_flags & VM_RAND_READ)
+	if (vmf->vma_flags & VM_RAND_READ)
 		return fpin;
 	if (!ra->ra_pages)
 		return fpin;
 
-	if (vmf->vma->vm_flags & VM_SEQ_READ) {
+	if (vmf->vma_flags & VM_SEQ_READ) {
 		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
 		page_cache_sync_readahead(mapping, ra, file, offset,
 					  ra->ra_pages);
@@ -2526,7 +2526,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
 	pgoff_t offset = vmf->pgoff;
 
 	/* If we don't want any read-ahead, don't bother */
-	if (vmf->vma->vm_flags & VM_RAND_READ)
+	if (vmf->vma_flags & VM_RAND_READ)
 		return fpin;
 	if (ra->mmap_miss > 0)
 		ra->mmap_miss--;
@@ -2549,7 +2549,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
  * it in the page cache, and handles the special cases reasonably without
  * having a lot of duplicated code.
  *
- * vma->vm_mm->mmap_sem must be held on entry.
+ * vma->vm_mm->mmap_sem must be held on entry (except FAULT_FLAG_SPECULATIVE).
  *
  * If our return value has VM_FAULT_RETRY set, it's because
  * lock_page_or_retry() returned 0.
@@ -2887,7 +2887,11 @@ static struct page *do_read_cache_page(struct address_space *mapping,
 		}
 
 filler:
-		err = filler(data, page);
+		if (filler)
+			err = filler(data, page);
+		else
+			err = mapping->a_ops->readpage(data, page);
+
 		if (err < 0) {
 			put_page(page);
 			return ERR_PTR(err);
@@ -2994,9 +2998,7 @@ struct page *read_cache_page_gfp(struct address_space *mapping,
 				pgoff_t index,
 				gfp_t gfp)
 {
-	filler_t *filler = mapping->a_ops->readpage;
-
-	return do_read_cache_page(mapping, index, filler, NULL, gfp);
+	return do_read_cache_page(mapping, index, NULL, NULL, gfp);
 }
 EXPORT_SYMBOL(read_cache_page_gfp);
 
diff --git a/mm/gup.c b/mm/gup.c
index 0a5374e..caadd31 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -153,7 +153,10 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
 	}
 
 	if (flags & FOLL_GET) {
-		get_page(page);
+		if (unlikely(!try_get_page(page))) {
+			page = ERR_PTR(-ENOMEM);
+			goto out;
+		}
 
 		/* drop the pgmap reference now that we hold the page */
 		if (pgmap) {
@@ -296,7 +299,10 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 			if (pmd_trans_unstable(pmd))
 				ret = -EBUSY;
 		} else {
-			get_page(page);
+			if (unlikely(!try_get_page(page))) {
+				spin_unlock(ptl);
+				return ERR_PTR(-ENOMEM);
+			}
 			spin_unlock(ptl);
 			lock_page(page);
 			ret = split_huge_page(page);
@@ -480,7 +486,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
 		if (is_device_public_page(*page))
 			goto unmap;
 	}
-	get_page(*page);
+	if (unlikely(!try_get_page(*page))) {
+		ret = -ENOMEM;
+		goto unmap;
+	}
 out:
 	ret = 0;
 unmap:
@@ -1368,6 +1377,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
 	}
 }
 
+/*
+ * Return the compund head page with ref appropriately incremented,
+ * or NULL if that failed.
+ */
+static inline struct page *try_get_compound_head(struct page *page, int refs)
+{
+	struct page *head = compound_head(page);
+	if (WARN_ON_ONCE(page_ref_count(head) < 0))
+		return NULL;
+	if (unlikely(!page_cache_add_speculative(head, refs)))
+		return NULL;
+	return head;
+}
+
 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
 			 int write, struct page **pages, int *nr)
@@ -1402,9 +1425,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
 
 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 		page = pte_page(pte);
-		head = compound_head(page);
 
-		if (!page_cache_get_speculative(head))
+		head = try_get_compound_head(page, 1);
+		if (!head)
 			goto pte_unmap;
 
 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
@@ -1543,8 +1566,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
 		refs++;
 	} while (addr += PAGE_SIZE, addr != end);
 
-	head = compound_head(pmd_page(orig));
-	if (!page_cache_add_speculative(head, refs)) {
+	head = try_get_compound_head(pmd_page(orig), refs);
+	if (!head) {
 		*nr -= refs;
 		return 0;
 	}
@@ -1581,8 +1604,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
 		refs++;
 	} while (addr += PAGE_SIZE, addr != end);
 
-	head = compound_head(pud_page(orig));
-	if (!page_cache_add_speculative(head, refs)) {
+	head = try_get_compound_head(pud_page(orig), refs);
+	if (!head) {
 		*nr -= refs;
 		return 0;
 	}
@@ -1618,8 +1641,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
 		refs++;
 	} while (addr += PAGE_SIZE, addr != end);
 
-	head = compound_head(pgd_page(orig));
-	if (!page_cache_add_speculative(head, refs)) {
+	head = try_get_compound_head(pgd_page(orig), refs);
+	if (!head) {
 		*nr -= refs;
 		return 0;
 	}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f707cca..a21b2ca 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -772,11 +772,13 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
 		pte_free(mm, pgtable);
 }
 
-vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
-			pmd_t *pmd, pfn_t pfn, bool write)
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
 {
+	unsigned long addr = vmf->address & PMD_MASK;
+	struct vm_area_struct *vma = vmf->vma;
 	pgprot_t pgprot = vma->vm_page_prot;
 	pgtable_t pgtable = NULL;
+
 	/*
 	 * If we had pmd_special, we could avoid all these restrictions,
 	 * but we need to be consistent with PTEs and architectures that
@@ -799,7 +801,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
 
 	track_pfn_insert(vma, &pgprot, pfn);
 
-	insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable);
+	insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
 	return VM_FAULT_NOPAGE;
 }
 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
@@ -848,10 +850,12 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
 	spin_unlock(ptl);
 }
 
-vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
-			pud_t *pud, pfn_t pfn, bool write)
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
 {
+	unsigned long addr = vmf->address & PUD_MASK;
+	struct vm_area_struct *vma = vmf->vma;
 	pgprot_t pgprot = vma->vm_page_prot;
+
 	/*
 	 * If we had pud_special, we could avoid all these restrictions,
 	 * but we need to be consistent with PTEs and architectures that
@@ -868,7 +872,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
 
 	track_pfn_insert(vma, &pgprot, pfn);
 
-	insert_pfn_pud(vma, addr, pud, pfn, pgprot, write);
+	insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
 	return VM_FAULT_NOPAGE;
 }
 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 518ebfa..4dbdec0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1572,8 +1572,9 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
 	 */
 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
 		SetPageHugeTemporary(page);
+		spin_unlock(&hugetlb_lock);
 		put_page(page);
-		page = NULL;
+		return NULL;
 	} else {
 		h->surplus_huge_pages++;
 		h->surplus_huge_pages_node[page_to_nid(page)]++;
@@ -3779,8 +3780,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
 			 * handling userfault.  Reacquire after handling
 			 * fault to make calling code simpler.
 			 */
-			hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
-							idx, haddr);
+			hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 			ret = handle_userfault(&vmf, VM_UFFD_MISSING);
 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -3888,21 +3888,14 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
 }
 
 #ifdef CONFIG_SMP
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
-			    struct vm_area_struct *vma,
-			    struct address_space *mapping,
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
 			    pgoff_t idx, unsigned long address)
 {
 	unsigned long key[2];
 	u32 hash;
 
-	if (vma->vm_flags & VM_SHARED) {
-		key[0] = (unsigned long) mapping;
-		key[1] = idx;
-	} else {
-		key[0] = (unsigned long) mm;
-		key[1] = address >> huge_page_shift(h);
-	}
+	key[0] = (unsigned long) mapping;
+	key[1] = idx;
 
 	hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
 
@@ -3913,9 +3906,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
  * For uniprocesor systems we always use a single mutex, so just
  * return 0 and avoid the hashing overhead.
  */
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
-			    struct vm_area_struct *vma,
-			    struct address_space *mapping,
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
 			    pgoff_t idx, unsigned long address)
 {
 	return 0;
@@ -3960,7 +3951,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 	 * get spurious allocation failures if two CPUs race to instantiate
 	 * the same page in the page cache.
 	 */
-	hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
+	hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
 	entry = huge_ptep_get(ptep);
@@ -4301,6 +4292,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
 		page = pte_page(huge_ptep_get(pte));
+
+		/*
+		 * Instead of doing 'try_get_page()' below in the same_page
+		 * loop, just check the count once here.
+		 */
+		if (unlikely(page_count(page) <= 0)) {
+			if (pages) {
+				spin_unlock(ptl);
+				remainder = 0;
+				err = -ENOMEM;
+				break;
+			}
+		}
 same_page:
 		if (pages) {
 			pages[i] = mem_map_offset(page, pfn_offset);
diff --git a/mm/internal.h b/mm/internal.h
index 39f3d49..379f88b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -203,14 +203,16 @@ extern int user_min_free_kbytes;
 struct compact_control {
 	struct list_head freepages;	/* List of free pages to migrate to */
 	struct list_head migratepages;	/* List of pages being migrated */
-	struct zone *zone;
-	unsigned long nr_freepages;	/* Number of isolated free pages */
-	unsigned long nr_migratepages;	/* Number of pages to migrate */
-	unsigned long total_migrate_scanned;
-	unsigned long total_free_scanned;
+	unsigned int nr_freepages;	/* Number of isolated free pages */
+	unsigned int nr_migratepages;	/* Number of pages to migrate */
 	unsigned long free_pfn;		/* isolate_freepages search base */
 	unsigned long migrate_pfn;	/* isolate_migratepages search base */
-	unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
+	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
+	struct zone *zone;
+	unsigned long total_migrate_scanned;
+	unsigned long total_free_scanned;
+	unsigned short fast_search_fail;/* failures to use free list searches */
+	short search_order;		/* order to start a fast search at */
 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
 	int order;			/* order a direct compactor needs */
 	int migratetype;		/* migratetype of direct compactor */
@@ -223,7 +225,16 @@ struct compact_control {
 	bool direct_compaction;		/* False from kcompactd or /proc/... */
 	bool whole_zone;		/* Whole zone should/has been scanned */
 	bool contended;			/* Signal lock or sched contention */
-	bool finishing_block;		/* Finishing current pageblock */
+	bool rescan;			/* Rescanning the same pageblock */
+};
+
+/*
+ * Used in direct compaction when a page should be taken from the freelists
+ * immediately when one is created during the free path.
+ */
+struct capture_control {
+	struct compact_control *cc;
+	struct page *page;
 };
 
 unsigned long
@@ -500,10 +511,16 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
 #endif
 
-#define ALLOC_HARDER		0x10 /* try to alloc harder */
-#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
-#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
-#define ALLOC_CMA		0x80 /* allow allocations from CMA areas */
+#define ALLOC_HARDER		 0x10 /* try to alloc harder */
+#define ALLOC_HIGH		 0x20 /* __GFP_HIGH set */
+#define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
+#define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
+#ifdef CONFIG_ZONE_DMA32
+#define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
+#else
+#define ALLOC_NOFRAGMENT	  0x0
+#endif
+#define ALLOC_KSWAPD		0x200 /* allow waking of kswapd */
 
 enum ttu_flags;
 struct tlbflush_unmap_batch;
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 7829a18..a606745 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1385,6 +1385,7 @@ static void scan_block(void *_start, void *_end,
 /*
  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
  */
+#ifdef CONFIG_SMP
 static void scan_large_block(void *start, void *end)
 {
 	void *next;
@@ -1396,6 +1397,7 @@ static void scan_large_block(void *start, void *end)
 		cond_resched();
 	}
 }
+#endif
 
 /*
  * Scan a memory block corresponding to a kmemleak_object. A condition is
@@ -1513,11 +1515,6 @@ static void kmemleak_scan(void)
 	}
 	rcu_read_unlock();
 
-	/* data/bss scanning */
-	scan_large_block(_sdata, _edata);
-	scan_large_block(__bss_start, __bss_stop);
-	scan_large_block(__start_ro_after_init, __end_ro_after_init);
-
 #ifdef CONFIG_SMP
 	/* per-cpu sections scanning */
 	for_each_possible_cpu(i)
@@ -2048,6 +2045,17 @@ void __init kmemleak_init(void)
 	}
 	local_irq_restore(flags);
 
+	/* register the data/bss sections */
+	create_object((unsigned long)_sdata, _edata - _sdata,
+		      KMEMLEAK_GREY, GFP_ATOMIC);
+	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
+		      KMEMLEAK_GREY, GFP_ATOMIC);
+	/* only register .data..ro_after_init if not within .data */
+	if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
+		create_object((unsigned long)__start_ro_after_init,
+			      __end_ro_after_init - __start_ro_after_init,
+			      KMEMLEAK_GREY, GFP_ATOMIC);
+
 	/*
 	 * This is the point where tracking allocations is safe. Automatic
 	 * scanning is started during the late initcall. Add the early logged
diff --git a/mm/memory.c b/mm/memory.c
index 2edb33a..b009e8c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1803,14 +1803,21 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 			 * in may not match the PFN we have mapped if the
 			 * mapped PFN is a writeable COW page.  In the mkwrite
 			 * case we are creating a writable PTE for a shared
-			 * mapping and we expect the PFNs to match.
+			 * mapping and we expect the PFNs to match. If they
+			 * don't match, we are likely racing with block
+			 * allocation and mapping invalidation so just skip the
+			 * update.
 			 */
-			if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn)))
+			if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
+				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
 				goto out_unlock;
-			entry = *pte;
-			goto out_mkwrite;
-		} else
-			goto out_unlock;
+			}
+			entry = pte_mkyoung(*pte);
+			entry = maybe_mkwrite(pte_mkdirty(entry), vma->vm_flags);
+			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
+				update_mmu_cache(vma, addr, pte);
+		}
+		goto out_unlock;
 	}
 
 	/* Ok, finally just insert the thing.. */
@@ -1819,7 +1826,6 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 	else
 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
 
-out_mkwrite:
 	if (mkwrite) {
 		entry = pte_mkyoung(entry);
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma->vm_flags);
@@ -4215,6 +4221,7 @@ static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
 {
 	pte_t entry;
+	int ret = 0;
 
 	if (unlikely(pmd_none(*vmf->pmd))) {
 		/*
@@ -4267,8 +4274,6 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
 	if (!vmf->pte) {
 		if (vma_is_anonymous(vmf->vma))
 			return do_anonymous_page(vmf);
-		else if (vmf->flags & FAULT_FLAG_SPECULATIVE)
-			return VM_FAULT_RETRY;
 		else
 			return do_fault(vmf);
 	}
@@ -4302,10 +4307,12 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
 		 */
 		if (vmf->flags & FAULT_FLAG_WRITE)
 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
+		if (vmf->flags & FAULT_FLAG_SPECULATIVE)
+			ret = VM_FAULT_RETRY;
 	}
 unlock:
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
-	return 0;
+	return ret;
 }
 
 /*
@@ -4450,21 +4457,11 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address,
 	}
 
 	/*
-	 * Can't call vm_ops service has we don't know what they would do
-	 * with the VMA.
-	 * This include huge page from hugetlbfs.
-	 */
-	if (vmf.vma->vm_ops) {
-		trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
-		return VM_FAULT_RETRY;
-	}
-
-	/*
 	 * __anon_vma_prepare() requires the mmap_sem to be held
 	 * because vm_next and vm_prev must be safe. This can't be guaranteed
 	 * in the speculative path.
 	 */
-	if (unlikely(!vmf.vma->anon_vma)) {
+	if (unlikely(vma_is_anonymous(vmf.vma) && !vmf.vma->anon_vma)) {
 		trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
 		return VM_FAULT_RETRY;
 	}
@@ -4606,7 +4603,10 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address,
 	 * If there is no need to retry, don't return the vma to the caller.
 	 */
 	if (ret != VM_FAULT_RETRY) {
-		count_vm_event(SPECULATIVE_PGFAULT);
+		if (vma_is_anonymous(vmf.vma))
+			count_vm_event(SPECULATIVE_PGFAULT_ANON);
+		else
+			count_vm_event(SPECULATIVE_PGFAULT_FILE);
 		put_vma(vmf.vma);
 		*vma = NULL;
 	}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 48c676c..c32bcba 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -944,6 +944,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
 	 */
 	mem = find_memory_block(__pfn_to_section(pfn));
 	nid = mem->nid;
+	put_device(&mem->dev);
 
 	/* associate pfn range with the zone */
 	zone = move_pfn_range(online_type, nid, pfn, nr_pages);
diff --git a/mm/migrate.c b/mm/migrate.c
index 00641e8..1c679e0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -899,7 +899,7 @@ static int fallback_migrate_page(struct address_space *mapping,
 	 */
 	if (page_has_private(page) &&
 	    !try_to_release_page(page, GFP_KERNEL))
-		return -EAGAIN;
+		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
 
 	return migrate_page(mapping, newpage, page, mode);
 }
diff --git a/mm/mincore.c b/mm/mincore.c
index fc37afe..2732c8c07 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -169,6 +169,22 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 	return 0;
 }
 
+static inline bool can_do_mincore(struct vm_area_struct *vma)
+{
+	if (vma_is_anonymous(vma))
+		return true;
+	if (!vma->vm_file)
+		return false;
+	/*
+	 * Reveal pagecache information only for non-anonymous mappings that
+	 * correspond to the files the calling process could (if tried) open
+	 * for writing; otherwise we'd be including shared non-exclusive
+	 * mappings, which opens a side channel.
+	 */
+	return inode_owner_or_capable(file_inode(vma->vm_file)) ||
+		inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
+}
+
 /*
  * Do a chunk of "sys_mincore()". We've already checked
  * all the arguments, we hold the mmap semaphore: we should
@@ -189,8 +205,13 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
 	vma = find_vma(current->mm, addr);
 	if (!vma || addr < vma->vm_start)
 		return -ENOMEM;
-	mincore_walk.mm = vma->vm_mm;
 	end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
+	if (!can_do_mincore(vma)) {
+		unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
+		memset(vec, 1, pages);
+		return pages;
+	}
+	mincore_walk.mm = vma->vm_mm;
 	err = walk_page_range(addr, end, &mincore_walk);
 	if (err < 0)
 		return err;
diff --git a/mm/mmap.c b/mm/mmap.c
index 7f3fd85..8f503c1 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -45,6 +45,7 @@
 #include <linux/moduleparam.h>
 #include <linux/pkeys.h>
 #include <linux/oom.h>
+#include <linux/sched/mm.h>
 
 #include <linux/uaccess.h>
 #include <asm/cacheflush.h>
@@ -2602,7 +2603,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
 	vma = find_vma_prev(mm, addr, &prev);
 	if (vma && (vma->vm_start <= addr))
 		return vma;
-	if (!prev || expand_stack(prev, addr))
+	/* don't alter vm_end if the coredump is running */
+	if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
 		return NULL;
 	if (prev->vm_flags & VM_LOCKED)
 		populate_vma_page_range(prev, addr, prev->vm_end, NULL);
@@ -2628,6 +2630,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
 		return vma;
 	if (!(vma->vm_flags & VM_GROWSDOWN))
 		return NULL;
+	/* don't alter vm_start if the coredump is running */
+	if (!mmget_still_valid(mm))
+		return NULL;
 	start = vma->vm_start;
 	if (expand_stack(vma, addr))
 		return NULL;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a3dc699..375f7d9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -270,6 +270,20 @@ compound_page_dtor * const compound_page_dtors[] = {
  */
 int min_free_kbytes = 1024;
 int user_min_free_kbytes = -1;
+#ifdef CONFIG_DISCONTIGMEM
+/*
+ * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
+ * are not on separate NUMA nodes. Functionally this works but with
+ * watermark_boost_factor, it can reclaim prematurely as the ranges can be
+ * quite small. By default, do not boost watermarks on discontigmem as in
+ * many cases very high-order allocations like THP are likely to be
+ * unsupported and the premature reclaim offsets the advantage of long-term
+ * fragmentation avoidance.
+ */
+int watermark_boost_factor __read_mostly;
+#else
+int watermark_boost_factor __read_mostly = 15000;
+#endif
 int watermark_scale_factor = 10;
 
 /*
@@ -786,6 +800,57 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
 	return 0;
 }
 
+#ifdef CONFIG_COMPACTION
+static inline struct capture_control *task_capc(struct zone *zone)
+{
+	struct capture_control *capc = current->capture_control;
+
+	return capc &&
+		!(current->flags & PF_KTHREAD) &&
+		!capc->page &&
+		capc->cc->zone == zone &&
+		capc->cc->direct_compaction ? capc : NULL;
+}
+
+static inline bool
+compaction_capture(struct capture_control *capc, struct page *page,
+		   int order, int migratetype)
+{
+	if (!capc || order != capc->cc->order)
+		return false;
+
+	/* Do not accidentally pollute CMA or isolated regions*/
+	if (is_migrate_cma(migratetype) ||
+	    is_migrate_isolate(migratetype))
+		return false;
+
+	/*
+	 * Do not let lower order allocations polluate a movable pageblock.
+	 * This might let an unmovable request use a reclaimable pageblock
+	 * and vice-versa but no more than normal fallback logic which can
+	 * have trouble finding a high-order free page.
+	 */
+	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
+		return false;
+
+	capc->page = page;
+	return true;
+}
+
+#else
+static inline struct capture_control *task_capc(struct zone *zone)
+{
+	return NULL;
+}
+
+static inline bool
+compaction_capture(struct capture_control *capc, struct page *page,
+		   int order, int migratetype)
+{
+	return false;
+}
+#endif /* CONFIG_COMPACTION */
+
 /*
  * Freeing function for a buddy system allocator.
  *
@@ -819,6 +884,7 @@ static inline void __free_one_page(struct page *page,
 	unsigned long uninitialized_var(buddy_pfn);
 	struct page *buddy;
 	unsigned int max_order;
+	struct capture_control *capc = task_capc(zone);
 
 	max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
 
@@ -834,6 +900,11 @@ static inline void __free_one_page(struct page *page,
 
 continue_merging:
 	while (order < max_order - 1) {
+		if (compaction_capture(capc, page, order, migratetype)) {
+			__mod_zone_freepage_state(zone, -(1 << order),
+								migratetype);
+			return;
+		}
 		buddy_pfn = __find_buddy_pfn(pfn, order);
 		buddy = page + (buddy_pfn - pfn);
 
@@ -2153,6 +2224,33 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
 	return false;
 }
 
+static inline void boost_watermark(struct zone *zone)
+{
+	unsigned long max_boost;
+
+	if (!watermark_boost_factor)
+		return;
+
+	max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
+			watermark_boost_factor, 10000);
+
+	/*
+	 * high watermark may be uninitialised if fragmentation occurs
+	 * very early in boot so do not boost. We do not fall
+	 * through and boost by pageblock_nr_pages as failing
+	 * allocations that early means that reclaim is not going
+	 * to help and it may even be impossible to reclaim the
+	 * boosted watermark resulting in a hang.
+	 */
+	if (!max_boost)
+		return;
+
+	max_boost = max(pageblock_nr_pages, max_boost);
+
+	zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
+		max_boost);
+}
+
 /*
  * This function implements actual steal behaviour. If order is large enough,
  * we can steal whole pageblock. If not, we first move freepages in this
@@ -2162,7 +2260,7 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
  * itself, so pages freed in the future will be put on the correct free list.
  */
 static void steal_suitable_fallback(struct zone *zone, struct page *page,
-					int start_type, bool whole_block)
+		unsigned int alloc_flags, int start_type, bool whole_block)
 {
 	unsigned int current_order = page_order(page);
 	struct free_area *area;
@@ -2184,6 +2282,15 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
 		goto single_page;
 	}
 
+	/*
+	 * Boost watermarks to increase reclaim pressure to reduce the
+	 * likelihood of future fallbacks. Wake kswapd now as the node
+	 * may be balanced overall and kswapd will not wake naturally.
+	 */
+	boost_watermark(zone);
+	if (alloc_flags & ALLOC_KSWAPD)
+		set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
+
 	/* We are not allowed to try stealing from the whole block */
 	if (!whole_block)
 		goto single_page;
@@ -2399,20 +2506,30 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
  * condition simpler.
  */
 static __always_inline bool
-__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
+__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
+						unsigned int alloc_flags)
 {
 	struct free_area *area;
 	int current_order;
+	int min_order = order;
 	struct page *page;
 	int fallback_mt;
 	bool can_steal;
 
 	/*
+	 * Do not steal pages from freelists belonging to other pageblocks
+	 * i.e. orders < pageblock_order. If there are no local zones free,
+	 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
+	 */
+	if (alloc_flags & ALLOC_NOFRAGMENT)
+		min_order = pageblock_order;
+
+	/*
 	 * Find the largest available free page in the other list. This roughly
 	 * approximates finding the pageblock with the most free pages, which
 	 * would be too costly to do exactly.
 	 */
-	for (current_order = MAX_ORDER - 1; current_order >= order;
+	for (current_order = MAX_ORDER - 1; current_order >= min_order;
 				--current_order) {
 		area = &(zone->free_area[current_order]);
 		fallback_mt = find_suitable_fallback(area, current_order,
@@ -2457,7 +2574,8 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
 	page = list_first_entry(&area->free_list[fallback_mt],
 							struct page, lru);
 
-	steal_suitable_fallback(zone, page, start_migratetype, can_steal);
+	steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
+								can_steal);
 
 	trace_mm_page_alloc_extfrag(page, order, current_order,
 		start_migratetype, fallback_mt);
@@ -2471,14 +2589,16 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
  * Call me with the zone->lock already held.
  */
 static __always_inline struct page *
-__rmqueue(struct zone *zone, unsigned int order, int migratetype)
+__rmqueue(struct zone *zone, unsigned int order, int migratetype,
+						unsigned int alloc_flags)
 {
 	struct page *page;
 
 retry:
 	page = __rmqueue_smallest(zone, order, migratetype);
 
-	if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype))
+	if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype,
+								alloc_flags))
 		goto retry;
 
 	trace_mm_page_alloc_zone_locked(page, order, migratetype);
@@ -2510,7 +2630,7 @@ static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
  */
 static int rmqueue_bulk(struct zone *zone, unsigned int order,
 			unsigned long count, struct list_head *list,
-			int migratetype)
+			int migratetype, unsigned int alloc_flags)
 {
 	int i, alloced = 0;
 
@@ -2526,7 +2646,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
 		if (is_migrate_cma(migratetype))
 			page = __rmqueue_cma(zone, order);
 		else
-			page = __rmqueue(zone, order, migratetype);
+			page = __rmqueue(zone, order, migratetype, alloc_flags);
 
 		if (unlikely(page == NULL))
 			break;
@@ -2569,14 +2689,14 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
  */
 static struct list_head *get_populated_pcp_list(struct zone *zone,
 			unsigned int order, struct per_cpu_pages *pcp,
-			int migratetype)
+			int migratetype, unsigned int alloc_flags)
 {
 	struct list_head *list = &pcp->lists[migratetype];
 
 	if (list_empty(list)) {
 		pcp->count += rmqueue_bulk(zone, order,
 				pcp->batch, list,
-				migratetype);
+				migratetype, alloc_flags);
 
 		if (list_empty(list))
 			list = NULL;
@@ -2945,7 +3065,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
 		 * watermark, because we already know our high-order page
 		 * exists.
 		 */
-		watermark = min_wmark_pages(zone) + (1UL << order);
+		watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
 		if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
 			return 0;
 
@@ -3005,6 +3125,7 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
 
 /* Remove page from the per-cpu list, caller must protect the list */
 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
+			unsigned int alloc_flags,
 			struct per_cpu_pages *pcp,
 			gfp_t gfp_flags)
 {
@@ -3016,7 +3137,7 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
 		if (migratetype == MIGRATE_MOVABLE &&
 				gfp_flags & __GFP_CMA) {
 			list = get_populated_pcp_list(zone, 0, pcp,
-					get_cma_migrate_type());
+					get_cma_migrate_type(), alloc_flags);
 		}
 
 		if (list == NULL) {
@@ -3025,7 +3146,7 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
 			 * free CMA pages.
 			 */
 			list = get_populated_pcp_list(zone, 0, pcp,
-					migratetype);
+					migratetype, alloc_flags);
 			if (unlikely(list == NULL) ||
 					unlikely(list_empty(list)))
 				return NULL;
@@ -3043,7 +3164,8 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
 /* Lock and remove page from the per-cpu list */
 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
 			struct zone *zone, unsigned int order,
-			gfp_t gfp_flags, int migratetype)
+			gfp_t gfp_flags, int migratetype,
+			unsigned int alloc_flags)
 {
 	struct per_cpu_pages *pcp;
 	struct page *page;
@@ -3051,7 +3173,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
 
 	local_irq_save(flags);
 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
-	page = __rmqueue_pcplist(zone,  migratetype, pcp,
+	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp,
 				 gfp_flags);
 	if (page) {
 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
@@ -3075,7 +3197,7 @@ struct page *rmqueue(struct zone *preferred_zone,
 
 	if (likely(order == 0)) {
 		page = rmqueue_pcplist(preferred_zone, zone, order,
-				gfp_flags, migratetype);
+				gfp_flags, migratetype, alloc_flags);
 		goto out;
 	}
 
@@ -3100,7 +3222,7 @@ struct page *rmqueue(struct zone *preferred_zone,
 			page = __rmqueue_cma(zone, order);
 
 		if (!page)
-			page = __rmqueue(zone, order, migratetype);
+			page = __rmqueue(zone, order, migratetype, alloc_flags);
 	} while (page && check_new_pages(page, order));
 
 	spin_unlock(&zone->lock);
@@ -3114,6 +3236,12 @@ struct page *rmqueue(struct zone *preferred_zone,
 	local_irq_restore(flags);
 
 out:
+	/* Separate test+clear to avoid unnecessary atomics */
+	if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
+		clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
+		wakeup_kswapd(zone, 0, 0, zone_idx(zone));
+	}
+
 	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
 	return page;
 
@@ -3352,6 +3480,43 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
 #endif	/* CONFIG_NUMA */
 
 /*
+ * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
+ * fragmentation is subtle. If the preferred zone was HIGHMEM then
+ * premature use of a lower zone may cause lowmem pressure problems that
+ * are worse than fragmentation. If the next zone is ZONE_DMA then it is
+ * probably too small. It only makes sense to spread allocations to avoid
+ * fragmentation between the Normal and DMA32 zones.
+ */
+static inline unsigned int
+alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
+{
+	unsigned int alloc_flags = 0;
+
+	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
+		alloc_flags |= ALLOC_KSWAPD;
+
+#ifdef CONFIG_ZONE_DMA32
+	if (!zone)
+		return alloc_flags;
+
+	if (zone_idx(zone) != ZONE_NORMAL)
+		return alloc_flags;
+
+	/*
+	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
+	 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
+	 * on UMA that if Normal is populated then so is DMA32.
+	 */
+	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
+	if (nr_online_nodes > 1 && !populated_zone(--zone))
+		return alloc_flags;
+
+	alloc_flags |= ALLOC_NOFRAGMENT;
+#endif /* CONFIG_ZONE_DMA32 */
+	return alloc_flags;
+}
+
+/*
  * get_page_from_freelist goes through the zonelist trying to allocate
  * a page.
  */
@@ -3359,14 +3524,18 @@ static struct page *
 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 						const struct alloc_context *ac)
 {
-	struct zoneref *z = ac->preferred_zoneref;
+	struct zoneref *z;
 	struct zone *zone;
 	struct pglist_data *last_pgdat_dirty_limit = NULL;
+	bool no_fallback;
 
+retry:
 	/*
 	 * Scan zonelist, looking for a zone with enough free.
 	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
 	 */
+	no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
+	z = ac->preferred_zoneref;
 	for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
 								ac->nodemask) {
 		struct page *page;
@@ -3405,7 +3574,23 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 			}
 		}
 
-		mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+		if (no_fallback && nr_online_nodes > 1 &&
+		    zone != ac->preferred_zoneref->zone) {
+			int local_nid;
+
+			/*
+			 * If moving to a remote node, retry but allow
+			 * fragmenting fallbacks. Locality is more important
+			 * than fragmentation avoidance.
+			 */
+			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
+			if (zone_to_nid(zone) != local_nid) {
+				alloc_flags &= ~ALLOC_NOFRAGMENT;
+				goto retry;
+			}
+		}
+
+		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
 		if (!zone_watermark_fast(zone, order, mark,
 				       ac_classzone_idx(ac), alloc_flags)) {
 			int ret;
@@ -3472,6 +3657,15 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 		}
 	}
 
+	/*
+	 * It's possible on a UMA machine to get through all zones that are
+	 * fragmented. If avoiding fragmentation, reset and try again.
+	 */
+	if (no_fallback) {
+		alloc_flags &= ~ALLOC_NOFRAGMENT;
+		goto retry;
+	}
+
 	return NULL;
 }
 
@@ -3658,7 +3852,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 		unsigned int alloc_flags, const struct alloc_context *ac,
 		enum compact_priority prio, enum compact_result *compact_result)
 {
-	struct page *page;
+	struct page *page = NULL;
 	unsigned long pflags;
 	unsigned int noreclaim_flag;
 
@@ -3669,21 +3863,24 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 	noreclaim_flag = memalloc_noreclaim_save();
 
 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
-									prio);
+								prio, &page);
 
 	memalloc_noreclaim_restore(noreclaim_flag);
 	psi_memstall_leave(&pflags);
 
-	if (*compact_result <= COMPACT_INACTIVE)
-		return NULL;
-
 	/*
 	 * At least in one zone compaction wasn't deferred or skipped, so let's
 	 * count a compaction stall
 	 */
 	count_vm_event(COMPACTSTALL);
 
-	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
+	/* Prep a captured page if available */
+	if (page)
+		prep_new_page(page, order, gfp_mask, alloc_flags);
+
+	/* Try get a page from the freelist if available */
+	if (!page)
+		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
 
 	if (page) {
 		struct zone *zone = page_zone(page);
@@ -4017,6 +4214,9 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
 	} else if (unlikely(rt_task(current)) && !in_interrupt())
 		alloc_flags |= ALLOC_HARDER;
 
+	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
+		alloc_flags |= ALLOC_KSWAPD;
+
 #ifdef CONFIG_CMA
 	if ((gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) &&
 				(gfp_mask & __GFP_CMA))
@@ -4250,7 +4450,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 	if (!ac->preferred_zoneref->zone)
 		goto nopage;
 
-	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
+	if (alloc_flags & ALLOC_KSWAPD)
 		wake_all_kswapds(order, gfp_mask, ac);
 
 	/*
@@ -4308,7 +4508,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 
 retry:
 	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
-	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
+	if (alloc_flags & ALLOC_KSWAPD)
 		wake_all_kswapds(order, gfp_mask, ac);
 
 	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
@@ -4535,6 +4735,12 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 
 	finalise_ac(gfp_mask, &ac);
 
+	/*
+	 * Forbid the first pass from falling back to types that fragment
+	 * memory until all local zones are considered.
+	 */
+	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
+
 	/* First allocation attempt */
 	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
 	if (likely(page))
@@ -4877,7 +5083,7 @@ long si_mem_available(void)
 		pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
 
 	for_each_zone(zone)
-		wmark_low += zone->watermark[WMARK_LOW];
+		wmark_low += low_wmark_pages(zone);
 
 	/*
 	 * Estimate the amount of memory available for userspace allocations,
@@ -7419,13 +7625,13 @@ static void __setup_per_zone_wmarks(void)
 
 			min_pages = zone->managed_pages / 1024;
 			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
-			zone->watermark[WMARK_MIN] = min_pages;
+			zone->_watermark[WMARK_MIN] = min_pages;
 		} else {
 			/*
 			 * If it's a lowmem zone, reserve a number of pages
 			 * proportionate to the zone's size.
 			 */
-			zone->watermark[WMARK_MIN] = min;
+			zone->_watermark[WMARK_MIN] = min;
 		}
 
 		/*
@@ -7437,10 +7643,11 @@ static void __setup_per_zone_wmarks(void)
 			    mult_frac(zone->managed_pages,
 				      watermark_scale_factor, 10000));
 
-		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) +
+		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) +
 					low + min;
-		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
+		zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) +
 					low + min * 2;
+		zone->watermark_boost = 0;
 
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
@@ -7541,6 +7748,18 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
 	return 0;
 }
 
+int watermark_boost_factor_sysctl_handler(struct ctl_table *table, int write,
+	void __user *buffer, size_t *length, loff_t *ppos)
+{
+	int rc;
+
+	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
 	void __user *buffer, size_t *length, loff_t *ppos)
 {
diff --git a/mm/percpu.c b/mm/percpu.c
index 4b90682..41e58f3 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2529,8 +2529,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
 		ai->groups[group].base_offset = areas[group] - base;
 	}
 
-	pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
-		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
+	pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
+		PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
 		ai->dyn_size, ai->unit_size);
 
 	rc = pcpu_setup_first_chunk(ai, base);
@@ -2651,8 +2651,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
 	}
 
 	/* we're ready, commit */
-	pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
-		unit_pages, psize_str, vm.addr, ai->static_size,
+	pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
+		unit_pages, psize_str, ai->static_size,
 		ai->reserved_size, ai->dyn_size);
 
 	rc = pcpu_setup_first_chunk(ai, vm.addr);
diff --git a/mm/shmem.c b/mm/shmem.c
index 2311dfe..8ad4588 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2023,10 +2023,10 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
 
 	sgp = SGP_CACHE;
 
-	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
+	if ((vmf->vma_flags & VM_NOHUGEPAGE) ||
 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
 		sgp = SGP_NOHUGE;
-	else if (vma->vm_flags & VM_HUGEPAGE)
+	else if (vmf->vma_flags & VM_HUGEPAGE)
 		sgp = SGP_HUGE;
 
 	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
diff --git a/mm/slab.c b/mm/slab.c
index b8e0ec7..018d324 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4305,7 +4305,8 @@ static void show_symbol(struct seq_file *m, unsigned long address)
 
 static int leaks_show(struct seq_file *m, void *p)
 {
-	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
+	struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
+					       root_caches_node);
 	struct page *page;
 	struct kmem_cache_node *n;
 	const char *name;
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 5d7f9a6..c8828c3 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -282,7 +282,7 @@ int free_swap_slot(swp_entry_t entry)
 
 	si = swp_swap_info(entry);
 	cache = raw_cpu_ptr(&swp_slots);
-	if (!(si->flags & SWP_SYNCHRONOUS_IO) &&
+	if ((si && !(si->flags & SWP_SYNCHRONOUS_IO)) &&
 				use_swap_slot_cache && cache->slots_ret) {
 		spin_lock_irq(&cache->free_lock);
 		/* Swap slots cache may be deactivated before acquiring lock */
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 458acda..7529d3f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -271,8 +271,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
 		 */
 		idx = linear_page_index(dst_vma, dst_addr);
 		mapping = dst_vma->vm_file->f_mapping;
-		hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
-								idx, dst_addr);
+		hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr);
 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
 		err = -ENOMEM;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c3303c5..4589a4b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -87,6 +87,9 @@ struct scan_control {
 	/* Can pages be swapped as part of reclaim? */
 	unsigned int may_swap:1;
 
+	/* e.g. boosted watermark reclaim leaves slabs alone */
+	unsigned int may_shrinkslab:1;
+
 	/*
 	 * Cgroups are not reclaimed below their configured memory.low,
 	 * unless we threaten to OOM. If any cgroups are skipped due to
@@ -2259,7 +2262,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
  *   10TB     320        32GB
  */
 static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
-				 struct mem_cgroup *memcg,
 				 struct scan_control *sc, bool actual_reclaim)
 {
 	enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
@@ -2280,16 +2282,12 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
 	inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
 	active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
 
-	if (memcg)
-		refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
-	else
-		refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
-
 	/*
 	 * When refaults are being observed, it means a new workingset
 	 * is being established. Disable active list protection to get
 	 * rid of the stale workingset quickly.
 	 */
+	refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE);
 	if (file && actual_reclaim && lruvec->refaults != refaults) {
 		inactive_ratio = 0;
 	} else {
@@ -2310,12 +2308,10 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
 }
 
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
-				 struct lruvec *lruvec, struct mem_cgroup *memcg,
-				 struct scan_control *sc)
+				 struct lruvec *lruvec, struct scan_control *sc)
 {
 	if (is_active_lru(lru)) {
-		if (inactive_list_is_low(lruvec, is_file_lru(lru),
-					 memcg, sc, true))
+		if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true))
 			shrink_active_list(nr_to_scan, lruvec, sc, lru);
 		return 0;
 	}
@@ -2415,7 +2411,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
 			 * anonymous pages on the LRU in eligible zones.
 			 * Otherwise, the small LRU gets thrashed.
 			 */
-			if (!inactive_list_is_low(lruvec, false, memcg, sc, false) &&
+			if (!inactive_list_is_low(lruvec, false, sc, false) &&
 			    lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx)
 					>> sc->priority) {
 				scan_balance = SCAN_ANON;
@@ -2434,7 +2430,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
 	 * system is under heavy pressure.
 	 */
 	if (!IS_ENABLED(CONFIG_BALANCE_ANON_FILE_RECLAIM) &&
-	    !inactive_list_is_low(lruvec, true, memcg, sc, false) &&
+	    !inactive_list_is_low(lruvec, true, sc, false) &&
 	    lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
 		scan_balance = SCAN_FILE;
 		goto out;
@@ -2587,7 +2583,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
 				nr[lru] -= nr_to_scan;
 
 				nr_reclaimed += shrink_list(lru, nr_to_scan,
-							    lruvec, memcg, sc);
+							    lruvec, sc);
 			}
 		}
 
@@ -2654,7 +2650,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
 	 * Even if we did not try to evict anon pages at all, we want to
 	 * rebalance the anon lru active/inactive ratio.
 	 */
-	if (inactive_list_is_low(lruvec, false, memcg, sc, true))
+	if (inactive_list_is_low(lruvec, false, sc, true))
 		shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
 				   sc, LRU_ACTIVE_ANON);
 }
@@ -2804,8 +2800,10 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 			shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
 			node_lru_pages += lru_pages;
 
-			shrink_slab(sc->gfp_mask, pgdat->node_id,
+			if (sc->may_shrinkslab) {
+				shrink_slab(sc->gfp_mask, pgdat->node_id,
 				    memcg, sc->priority);
+			}
 
 			/* Record the group's reclaim efficiency */
 			vmpressure(sc->gfp_mask, memcg, false,
@@ -3059,12 +3057,8 @@ static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
 		unsigned long refaults;
 		struct lruvec *lruvec;
 
-		if (memcg)
-			refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
-		else
-			refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
-
 		lruvec = mem_cgroup_lruvec(pgdat, memcg);
+		refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE);
 		lruvec->refaults = refaults;
 	} while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL)));
 }
@@ -3294,6 +3288,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 		.may_writepage = !laptop_mode,
 		.may_unmap = 1,
 		.may_swap = 1,
+		.may_shrinkslab = 1,
 	};
 
 	/*
@@ -3338,6 +3333,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
 		.may_unmap = 1,
 		.reclaim_idx = MAX_NR_ZONES - 1,
 		.may_swap = !noswap,
+		.may_shrinkslab = 1,
 	};
 	unsigned long lru_pages;
 
@@ -3384,6 +3380,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
 		.may_writepage = !laptop_mode,
 		.may_unmap = 1,
 		.may_swap = may_swap,
+		.may_shrinkslab = 1,
 	};
 
 	/*
@@ -3426,7 +3423,7 @@ static void age_active_anon(struct pglist_data *pgdat,
 	do {
 		struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
 
-		if (inactive_list_is_low(lruvec, false, memcg, sc, true))
+		if (inactive_list_is_low(lruvec, false, sc, true))
 			shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
 					   sc, LRU_ACTIVE_ANON);
 
@@ -3434,6 +3431,30 @@ static void age_active_anon(struct pglist_data *pgdat,
 	} while (memcg);
 }
 
+static bool pgdat_watermark_boosted(pg_data_t *pgdat, int classzone_idx)
+{
+	int i;
+	struct zone *zone;
+
+	/*
+	 * Check for watermark boosts top-down as the higher zones
+	 * are more likely to be boosted. Both watermarks and boosts
+	 * should not be checked at the time time as reclaim would
+	 * start prematurely when there is no boosting and a lower
+	 * zone is balanced.
+	 */
+	for (i = classzone_idx; i >= 0; i--) {
+		zone = pgdat->node_zones + i;
+		if (!managed_zone(zone))
+			continue;
+
+		if (zone->watermark_boost)
+			return true;
+	}
+
+	return false;
+}
+
 /*
  * Returns true if there is an eligible zone balanced for the request order
  * and classzone_idx
@@ -3444,6 +3465,10 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
 	unsigned long mark = -1;
 	struct zone *zone;
 
+	/*
+	 * Check watermarks bottom-up as lower zones are more likely to
+	 * meet watermarks.
+	 */
 	for (i = 0; i <= classzone_idx; i++) {
 		zone = pgdat->node_zones + i;
 
@@ -3572,14 +3597,14 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
 	unsigned long nr_soft_reclaimed;
 	unsigned long nr_soft_scanned;
 	unsigned long pflags;
+	unsigned long nr_boost_reclaim;
+	unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
+	bool boosted;
 	struct zone *zone;
 	struct scan_control sc = {
 		.gfp_mask = GFP_KERNEL,
 		.order = order,
-		.priority = DEF_PRIORITY,
-		.may_writepage = !laptop_mode,
 		.may_unmap = 1,
-		.may_swap = 1,
 	};
 
 	psi_memstall_enter(&pflags);
@@ -3587,9 +3612,28 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
 
 	count_vm_event(PAGEOUTRUN);
 
+	/*
+	 * Account for the reclaim boost. Note that the zone boost is left in
+	 * place so that parallel allocations that are near the watermark will
+	 * stall or direct reclaim until kswapd is finished.
+	 */
+	nr_boost_reclaim = 0;
+	for (i = 0; i <= classzone_idx; i++) {
+		zone = pgdat->node_zones + i;
+		if (!managed_zone(zone))
+			continue;
+
+		nr_boost_reclaim += zone->watermark_boost;
+		zone_boosts[i] = zone->watermark_boost;
+	}
+	boosted = nr_boost_reclaim;
+
+restart:
+	sc.priority = DEF_PRIORITY;
 	do {
 		unsigned long nr_reclaimed = sc.nr_reclaimed;
 		bool raise_priority = true;
+		bool balanced;
 		bool ret;
 
 		sc.reclaim_idx = classzone_idx;
@@ -3616,13 +3660,40 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
 		}
 
 		/*
-		 * Only reclaim if there are no eligible zones. Note that
-		 * sc.reclaim_idx is not used as buffer_heads_over_limit may
-		 * have adjusted it.
+		 * If the pgdat is imbalanced then ignore boosting and preserve
+		 * the watermarks for a later time and restart. Note that the
+		 * zone watermarks will be still reset at the end of balancing
+		 * on the grounds that the normal reclaim should be enough to
+		 * re-evaluate if boosting is required when kswapd next wakes.
 		 */
-		if (pgdat_balanced(pgdat, sc.order, classzone_idx))
+		balanced = pgdat_balanced(pgdat, sc.order, classzone_idx);
+		if (!balanced && nr_boost_reclaim) {
+			nr_boost_reclaim = 0;
+			goto restart;
+		}
+
+		/*
+		 * If boosting is not active then only reclaim if there are no
+		 * eligible zones. Note that sc.reclaim_idx is not used as
+		 * buffer_heads_over_limit may have adjusted it.
+		 */
+		if (!nr_boost_reclaim && balanced)
 			goto out;
 
+		/* Limit the priority of boosting to avoid reclaim writeback */
+		if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
+			raise_priority = false;
+
+		/*
+		 * Do not writeback or swap pages for boosted reclaim. The
+		 * intent is to relieve pressure not issue sub-optimal IO
+		 * from reclaim context. If no pages are reclaimed, the
+		 * reclaim will be aborted.
+		 */
+		sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
+		sc.may_swap = !nr_boost_reclaim;
+		sc.may_shrinkslab = !nr_boost_reclaim;
+
 		/*
 		 * Do some background aging of the anon list, to give
 		 * pages a chance to be referenced before reclaiming. All
@@ -3674,6 +3745,16 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
 		 * progress in reclaiming pages
 		 */
 		nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
+		nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
+
+		/*
+		 * If reclaim made no progress for a boost, stop reclaim as
+		 * IO cannot be queued and it could be an infinite loop in
+		 * extreme circumstances.
+		 */
+		if (nr_boost_reclaim && !nr_reclaimed)
+			break;
+
 		if (raise_priority || !nr_reclaimed)
 			sc.priority--;
 	} while (sc.priority >= 1);
@@ -3682,6 +3763,28 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
 		pgdat->kswapd_failures++;
 
 out:
+	/* If reclaim was boosted, account for the reclaim done in this pass */
+	if (boosted) {
+		unsigned long flags;
+
+		for (i = 0; i <= classzone_idx; i++) {
+			if (!zone_boosts[i])
+				continue;
+
+			/* Increments are under the zone lock */
+			zone = pgdat->node_zones + i;
+			spin_lock_irqsave(&zone->lock, flags);
+			zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
+			spin_unlock_irqrestore(&zone->lock, flags);
+		}
+
+		/*
+		 * As there is now likely space, wakeup kcompact to defragment
+		 * pageblocks.
+		 */
+		wakeup_kcompactd(pgdat, pageblock_order, classzone_idx);
+	}
+
 	snapshot_refaults(NULL, pgdat);
 	__fs_reclaim_release();
 	psi_memstall_leave(&pflags);
@@ -3910,7 +4013,8 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
 
 	/* Hopeless node, leave it to direct reclaim if possible */
 	if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
-	    pgdat_balanced(pgdat, order, classzone_idx)) {
+	    (pgdat_balanced(pgdat, order, classzone_idx) &&
+	     !pgdat_watermark_boosted(pgdat, classzone_idx))) {
 		/*
 		 * There may be plenty of free memory available, but it's too
 		 * fragmented for high-order allocations.  Wake up kcompactd
diff --git a/mm/vmstat.c b/mm/vmstat.c
index bfec4ac..2cc2ec7 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1275,13 +1275,8 @@ const char * const vmstat_text[] = {
 #endif
 #endif /* CONFIG_MEMORY_BALLOON */
 #ifdef CONFIG_DEBUG_TLBFLUSH
-#ifdef CONFIG_SMP
 	"nr_tlb_remote_flush",
 	"nr_tlb_remote_flush_received",
-#else
-	"", /* nr_tlb_remote_flush */
-	"", /* nr_tlb_remote_flush_received */
-#endif /* CONFIG_SMP */
 	"nr_tlb_local_flush_all",
 	"nr_tlb_local_flush_one",
 #endif /* CONFIG_DEBUG_TLBFLUSH */
@@ -1295,7 +1290,8 @@ const char * const vmstat_text[] = {
 	"swap_ra_hit",
 #endif
 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
-	"speculative_pgfault"
+	"speculative_pgfault_anon",
+	"speculative_pgfault_file",
 #endif
 #endif /* CONFIG_VM_EVENT_COUNTERS */
 };
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 546af0e..fce3b7e 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -368,10 +368,12 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 	ifrr.ifr_ifru = ifr->ifr_ifru;
 
 	switch (cmd) {
+	case SIOCSHWTSTAMP:
+		if (!net_eq(dev_net(dev), &init_net))
+			break;
 	case SIOCGMIIPHY:
 	case SIOCGMIIREG:
 	case SIOCSMIIREG:
-	case SIOCSHWTSTAMP:
 	case SIOCGHWTSTAMP:
 		if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
 			err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
diff --git a/net/atm/lec.c b/net/atm/lec.c
index d7f5cf5..ad4f829 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
 
 static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
 {
-	if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
+	if (arg < 0 || arg >= MAX_LEC_ITF)
+		return -EINVAL;
+	arg = array_index_nospec(arg, MAX_LEC_ITF);
+	if (!dev_lec[arg])
 		return -EINVAL;
 	vcc->proto_data = dev_lec[arg];
 	return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
@@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
 		i = arg;
 	if (arg >= MAX_LEC_ITF)
 		return -EINVAL;
+	i = array_index_nospec(arg, MAX_LEC_ITF);
 	if (!dev_lec[i]) {
 		int size;
 
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index ef0dec2..5da183b 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -104,8 +104,10 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
 
 		ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
 
-		/* free the TID stats immediately */
-		cfg80211_sinfo_release_content(&sinfo);
+		if (!ret) {
+			/* free the TID stats immediately */
+			cfg80211_sinfo_release_content(&sinfo);
+		}
 
 		dev_put(real_netdev);
 		if (ret == -ENOENT) {
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 5f1aeed..85faf25 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -803,6 +803,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
 				 const u8 *mac, const unsigned short vid)
 {
 	struct batadv_bla_claim search_claim, *claim;
+	struct batadv_bla_claim *claim_removed_entry;
+	struct hlist_node *claim_removed_node;
 
 	ether_addr_copy(search_claim.addr, mac);
 	search_claim.vid = vid;
@@ -813,10 +815,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
 	batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
 		   mac, batadv_print_vid(vid));
 
-	batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
-			   batadv_choose_claim, claim);
-	batadv_claim_put(claim); /* reference from the hash is gone */
+	claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
+						batadv_compare_claim,
+						batadv_choose_claim, claim);
+	if (!claim_removed_node)
+		goto free_claim;
 
+	/* reference from the hash is gone */
+	claim_removed_entry = hlist_entry(claim_removed_node,
+					  struct batadv_bla_claim, hash_entry);
+	batadv_claim_put(claim_removed_entry);
+
+free_claim:
 	/* don't need the reference from hash_find() anymore */
 	batadv_claim_put(claim);
 }
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index a60bacf..2895e3b 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1394,7 +1394,6 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
 			   hw_src, &ip_src, hw_dst, &ip_dst,
 			   dat_entry->mac_addr,	&dat_entry->ip);
 		dropped = true;
-		goto out;
 	}
 
 	/* Update our internal cache with both the IP addresses the node got
@@ -1403,6 +1402,9 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
 	batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
 	batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
 
+	if (dropped)
+		goto out;
+
 	/* If BLA is enabled, only forward ARP replies if we have claimed the
 	 * source of the ARP reply or if no one else of the same backbone has
 	 * already claimed that client. This prevents that different gateways
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 69c0d85..79b8a2d 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -160,6 +160,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
 	spin_lock_init(&bat_priv->tt.commit_lock);
 	spin_lock_init(&bat_priv->gw.list_lock);
 #ifdef CONFIG_BATMAN_ADV_MCAST
+	spin_lock_init(&bat_priv->mcast.mla_lock);
 	spin_lock_init(&bat_priv->mcast.want_lists_lock);
 #endif
 	spin_lock_init(&bat_priv->tvlv.container_list_lock);
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 86725d7..b90fe25 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -325,8 +325,6 @@ static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
  * translation table except the ones listed in the given mcast_list.
  *
  * If mcast_list is NULL then all are retracted.
- *
- * Do not call outside of the mcast worker! (or cancel mcast worker first)
  */
 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
 					struct hlist_head *mcast_list)
@@ -334,8 +332,6 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
 	struct batadv_hw_addr *mcast_entry;
 	struct hlist_node *tmp;
 
-	WARN_ON(delayed_work_pending(&bat_priv->mcast.work));
-
 	hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
 				  list) {
 		if (mcast_list &&
@@ -359,8 +355,6 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
  *
  * Adds multicast listener announcements from the given mcast_list to the
  * translation table if they have not been added yet.
- *
- * Do not call outside of the mcast worker! (or cancel mcast worker first)
  */
 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
 				    struct hlist_head *mcast_list)
@@ -368,8 +362,6 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
 	struct batadv_hw_addr *mcast_entry;
 	struct hlist_node *tmp;
 
-	WARN_ON(delayed_work_pending(&bat_priv->mcast.work));
-
 	if (!mcast_list)
 		return;
 
@@ -658,7 +650,10 @@ static void batadv_mcast_mla_update(struct work_struct *work)
 	priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
 	bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
 
+	spin_lock(&bat_priv->mcast.mla_lock);
 	__batadv_mcast_mla_update(bat_priv);
+	spin_unlock(&bat_priv->mcast.mla_lock);
+
 	batadv_mcast_start_timer(bat_priv);
 }
 
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d21624c..359ec1a 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -616,14 +616,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
 				  struct batadv_tt_global_entry *tt_global,
 				  const char *message)
 {
+	struct batadv_tt_global_entry *tt_removed_entry;
+	struct hlist_node *tt_removed_node;
+
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Deleting global tt entry %pM (vid: %d): %s\n",
 		   tt_global->common.addr,
 		   batadv_print_vid(tt_global->common.vid), message);
 
-	batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
-			   batadv_choose_tt, &tt_global->common);
-	batadv_tt_global_entry_put(tt_global);
+	tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
+					     batadv_compare_tt,
+					     batadv_choose_tt,
+					     &tt_global->common);
+	if (!tt_removed_node)
+		return;
+
+	/* drop reference of remove hash entry */
+	tt_removed_entry = hlist_entry(tt_removed_node,
+				       struct batadv_tt_global_entry,
+				       common.hash_entry);
+	batadv_tt_global_entry_put(tt_removed_entry);
 }
 
 /**
@@ -1332,9 +1344,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
 			   unsigned short vid, const char *message,
 			   bool roaming)
 {
+	struct batadv_tt_local_entry *tt_removed_entry;
 	struct batadv_tt_local_entry *tt_local_entry;
 	u16 flags, curr_flags = BATADV_NO_FLAGS;
-	void *tt_entry_exists;
+	struct hlist_node *tt_removed_node;
 
 	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
 	if (!tt_local_entry)
@@ -1363,15 +1376,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
 	 */
 	batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
 
-	tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+	tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
 					     batadv_compare_tt,
 					     batadv_choose_tt,
 					     &tt_local_entry->common);
-	if (!tt_entry_exists)
+	if (!tt_removed_node)
 		goto out;
 
-	/* extra call to free the local tt entry */
-	batadv_tt_local_entry_put(tt_local_entry);
+	/* drop reference of remove hash entry */
+	tt_removed_entry = hlist_entry(tt_removed_node,
+				       struct batadv_tt_local_entry,
+				       common.hash_entry);
+	batadv_tt_local_entry_put(tt_removed_entry);
 
 out:
 	if (tt_local_entry)
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 343d304..eeee3e6 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1216,6 +1216,11 @@ struct batadv_priv_mcast {
 	unsigned char bridged:1;
 
 	/**
+	 * @mla_lock: a lock protecting mla_list and mla_flags
+	 */
+	spinlock_t mla_lock;
+
+	/**
 	 * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP
 	 *  traffic
 	 */
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index bd4978c..3cf0764 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1276,6 +1276,14 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
 		return 0;
 
+	/* The minimum encryption key size needs to be enforced by the
+	 * host stack before establishing any L2CAP connections. The
+	 * specification in theory allows a minimum of 1, but to align
+	 * BR/EDR and LE transports, a minimum of 7 is chosen.
+	 */
+	if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE)
+		return 0;
+
 	return 1;
 }
 
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index a06f030..5afd67e 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -4274,6 +4274,9 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
 		return;
 	}
 
+	/* If we reach this point this event matches the last command sent */
+	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
+
 	/* If the command succeeded and there's still more commands in
 	 * this request the request is not yet complete.
 	 */
@@ -4384,6 +4387,8 @@ static void hci_cmd_work(struct work_struct *work)
 
 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
 		if (hdev->sent_cmd) {
+			if (hci_req_status_pend(hdev))
+				hci_dev_set_flag(hdev, HCI_CMD_PENDING);
 			atomic_dec(&hdev->cmd_cnt);
 			hci_send_frame(hdev, skb);
 			if (test_bit(HCI_RESET, &hdev->flags))
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 7f800c3..3e7badb 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3357,6 +3357,12 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
 			     req_complete_skb);
 
+	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
+		bt_dev_err(hdev,
+			   "unexpected event for opcode 0x%4.4x", *opcode);
+		return;
+	}
+
 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
 		queue_work(hdev->workqueue, &hdev->cmd_work);
 }
@@ -3464,6 +3470,12 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
 				     req_complete_skb);
 
+	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
+		bt_dev_err(hdev,
+			   "unexpected event for opcode 0x%4.4x", *opcode);
+		return;
+	}
+
 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
 		queue_work(hdev->workqueue, &hdev->cmd_work);
 }
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index e8c9ef1..9448ebd 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -46,6 +46,11 @@ void hci_req_purge(struct hci_request *req)
 	skb_queue_purge(&req->cmd_q);
 }
 
+bool hci_req_status_pend(struct hci_dev *hdev)
+{
+	return hdev->req_status == HCI_REQ_PEND;
+}
+
 static int req_run(struct hci_request *req, hci_req_complete_t complete,
 		   hci_req_complete_skb_t complete_skb)
 {
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index 692cc8b..55b2050 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -37,6 +37,7 @@ struct hci_request {
 
 void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
 void hci_req_purge(struct hci_request *req);
+bool hci_req_status_pend(struct hci_dev *hdev);
 int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 1eaac01..7f36fa7 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -76,6 +76,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
 			sockfd_put(csock);
 			return err;
 		}
+		ca.name[sizeof(ca.name)-1] = 0;
 
 		err = hidp_connection_add(&ca, csock, isock);
 		if (!err && copy_to_user(argp, &ca, sizeof(ca)))
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 0363f1b..ed2b600 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -603,13 +603,15 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
 	call_netdevice_notifiers(NETDEV_JOIN, dev);
 
 	err = dev_set_allmulti(dev, 1);
-	if (err)
-		goto put_back;
+	if (err) {
+		kfree(p);	/* kobject not yet init'd, manually free */
+		goto err1;
+	}
 
 	err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
 				   SYSFS_BRIDGE_PORT_ATTR);
 	if (err)
-		goto err1;
+		goto err2;
 
 	err = br_sysfs_addif(p);
 	if (err)
@@ -692,12 +694,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
 	sysfs_remove_link(br->ifobj, p->dev->name);
 err2:
 	kobject_put(&p->kobj);
-	p = NULL; /* kobject_put frees */
-err1:
 	dev_set_allmulti(dev, -1);
-put_back:
+err1:
 	dev_put(dev);
-	kfree(p);
 	return err;
 }
 
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 7207427..fed0ff4 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -195,13 +195,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
 /* note: already called with rcu_read_lock */
 static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
-
 	__br_handle_local_finish(skb);
 
-	BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
-	br_pass_frame_up(skb);
-	return 0;
+	/* return 1 to signal the okfn() was called so it's ok to use the skb */
+	return 1;
 }
 
 /*
@@ -278,10 +275,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
 				goto forward;
 		}
 
-		/* Deliver packet to local host only */
-		NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
-			NULL, skb, skb->dev, NULL, br_handle_local_finish);
-		return RX_HANDLER_CONSUMED;
+		/* The else clause should be hit when nf_hook():
+		 *   - returns < 0 (drop/error)
+		 *   - returns = 0 (stolen/nf_queue)
+		 * Thus return 1 from the okfn() to signal the skb is ok to pass
+		 */
+		if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
+			    dev_net(skb->dev), NULL, skb, skb->dev, NULL,
+			    br_handle_local_finish) == 1) {
+			return RX_HANDLER_PASS;
+		} else {
+			return RX_HANDLER_CONSUMED;
+		}
 	}
 
 forward:
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 20ed7ad..75901c4 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -2152,7 +2152,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
 
 	__br_multicast_open(br, query);
 
-	list_for_each_entry(port, &br->port_list, list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(port, &br->port_list, list) {
 		if (port->state == BR_STATE_DISABLED ||
 		    port->state == BR_STATE_BLOCKING)
 			continue;
@@ -2164,6 +2165,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
 			br_multicast_enable(&port->ip6_own_query);
 #endif
 	}
+	rcu_read_unlock();
 }
 
 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 3b0a03b..212c184 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -515,6 +515,7 @@ static unsigned int br_nf_pre_routing(void *priv,
 	nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
 
 	skb->protocol = htons(ETH_P_IP);
+	skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
 
 	NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
 		skb->dev, NULL,
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 5811208..09d5e0c 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -235,6 +235,8 @@ unsigned int br_nf_pre_routing_ipv6(void *priv,
 	nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
 
 	skb->protocol = htons(ETH_P_IPV6);
+	skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
+
 	NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
 		skb->dev, NULL,
 		br_nf_pre_routing_finish_ipv6);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index f77888e..0bb4d71 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2032,7 +2032,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
 		if (match_kern)
 			match_kern->match_size = ret;
 
-		if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
+		/* rule should have no remaining data after target */
+		if (type == EBT_COMPAT_TARGET && size_left)
 			return -EINVAL;
 
 		match32 = (struct compat_ebt_entry_mwt *) buf;
diff --git a/net/core/dev.c b/net/core/dev.c
index 54dd11e..6851fa6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1182,7 +1182,21 @@ int dev_change_name(struct net_device *dev, const char *newname)
 	BUG_ON(!dev_net(dev));
 
 	net = dev_net(dev);
-	if (dev->flags & IFF_UP)
+
+	/* Some auto-enslaved devices e.g. failover slaves are
+	 * special, as userspace might rename the device after
+	 * the interface had been brought up and running since
+	 * the point kernel initiated auto-enslavement. Allow
+	 * live name change even when these slave devices are
+	 * up and running.
+	 *
+	 * Typically, users of these auto-enslaving devices
+	 * don't actually care about slave name change, as
+	 * they are supposed to operate on master interface
+	 * directly.
+	 */
+	if (dev->flags & IFF_UP &&
+	    likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
 		return -EBUSY;
 
 	write_seqcount_begin(&devnet_rename_seq);
@@ -8732,7 +8746,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
 
 		refcnt = netdev_refcnt_read(dev);
 
-		if (time_after(jiffies, warning_time + 10 * HZ)) {
+		if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
 			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
 				 dev->name, refcnt);
 			warning_time = jiffies;
diff --git a/net/core/failover.c b/net/core/failover.c
index 4a92a98..b5cd3c7 100644
--- a/net/core/failover.c
+++ b/net/core/failover.c
@@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev)
 		goto err_upper_link;
 	}
 
-	slave_dev->priv_flags |= IFF_FAILOVER_SLAVE;
+	slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 
 	if (fops && fops->slave_register &&
 	    !fops->slave_register(slave_dev, failover_dev))
 		return NOTIFY_OK;
 
 	netdev_upper_dev_unlink(slave_dev, failover_dev);
-	slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+	slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 err_upper_link:
 	netdev_rx_handler_unregister(slave_dev);
 done:
@@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev)
 
 	netdev_rx_handler_unregister(slave_dev);
 	netdev_upper_dev_unlink(slave_dev, failover_dev);
-	slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+	slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 
 	if (fops && fops->slave_unregister &&
 	    !fops->slave_unregister(slave_dev, failover_dev))
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 0ff3953..338147b 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -756,9 +756,9 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
 	if (err)
 		goto errout;
 
-	if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
-	    rule_exists(ops, frh, tb, rule)) {
-		err = -EEXIST;
+	if (rule_exists(ops, frh, tb, rule)) {
+		if (nlh->nlmsg_flags & NLM_F_EXCL)
+			err = -EEXIST;
 		goto errout_free;
 	}
 
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ebde98b..3932eed 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1496,14 +1496,15 @@ static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
 	return ret;
 }
 
-static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev)
+static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
+			  bool force)
 {
 	int ifindex = dev_get_iflink(dev);
 
-	if (dev->ifindex == ifindex)
-		return 0;
+	if (force || dev->ifindex != ifindex)
+		return nla_put_u32(skb, IFLA_LINK, ifindex);
 
-	return nla_put_u32(skb, IFLA_LINK, ifindex);
+	return 0;
 }
 
 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
@@ -1520,6 +1521,8 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
 				  const struct net_device *dev,
 				  struct net *src_net)
 {
+	bool put_iflink = false;
+
 	if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
 		struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
 
@@ -1528,10 +1531,12 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
 
 			if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
 				return -EMSGSIZE;
+
+			put_iflink = true;
 		}
 	}
 
-	return 0;
+	return nla_put_iflink(skb, dev, put_iflink);
 }
 
 static int rtnl_fill_link_af(struct sk_buff *skb,
@@ -1617,7 +1622,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
 #ifdef CONFIG_RPS
 	    nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
 #endif
-	    nla_put_iflink(skb, dev) ||
 	    put_master_ifindex(skb, dev) ||
 	    nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
 	    (dev->qdisc &&
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ceee28e..8b57681 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -5071,7 +5071,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
 
 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
 {
-	int mac_len;
+	int mac_len, meta_len;
+	void *meta;
 
 	if (skb_cow(skb, skb_headroom(skb)) < 0) {
 		kfree_skb(skb);
@@ -5083,6 +5084,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
 		memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
 			mac_len - VLAN_HLEN - ETH_TLEN);
 	}
+
+	meta_len = skb_metadata_len(skb);
+	if (meta_len) {
+		meta = skb_metadata_end(skb) - meta_len;
+		memmove(meta + VLAN_HLEN, meta, meta_len);
+	}
+
 	skb->mac_header += VLAN_HLEN;
 	return skb;
 }
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index b1a2c5e..37b4667 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -279,7 +279,6 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
 	return ret;
 }
 
-# ifdef CONFIG_HAVE_EBPF_JIT
 static int
 proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
 				    void __user *buffer, size_t *lenp,
@@ -290,7 +289,6 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
 
 	return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 }
-# endif
 #endif
 
 static struct ctl_table net_core_table[] = {
@@ -397,6 +395,14 @@ static struct ctl_table net_core_table[] = {
 		.extra2		= &one,
 	},
 # endif
+	{
+		.procname	= "bpf_jit_limit",
+		.data		= &bpf_jit_limit,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= proc_dointvec_minmax_bpf_restricted,
+		.extra1		= &one,
+	},
 #endif
 	{
 		.procname	= "netdev_tstamp_prequeue",
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 9f3209f..601534a 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -293,15 +293,22 @@ static int __init dsa_init_module(void)
 
 	rc = dsa_slave_register_notifier();
 	if (rc)
-		return rc;
+		goto register_notifier_fail;
 
 	rc = dsa_legacy_register();
 	if (rc)
-		return rc;
+		goto legacy_register_fail;
 
 	dev_add_pack(&dsa_pack_type);
 
 	return 0;
+
+legacy_register_fail:
+	dsa_slave_unregister_notifier();
+register_notifier_fail:
+	destroy_workqueue(dsa_owq);
+
+	return rc;
 }
 module_init(dsa_init_module);
 
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 12a43a5..114f9de 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -223,7 +223,7 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
 	tail[plen - 1] = proto;
 }
 
-static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
+static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 {
 	int encap_type;
 	struct udphdr *uh;
@@ -231,6 +231,7 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
 	__be16 sport, dport;
 	struct xfrm_encap_tmpl *encap = x->encap;
 	struct ip_esp_hdr *esph = esp->esph;
+	unsigned int len;
 
 	spin_lock_bh(&x->lock);
 	sport = encap->encap_sport;
@@ -238,11 +239,14 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
 	encap_type = encap->encap_type;
 	spin_unlock_bh(&x->lock);
 
+	len = skb->len + esp->tailen - skb_transport_offset(skb);
+	if (len + sizeof(struct iphdr) >= IP_MAX_MTU)
+		return -EMSGSIZE;
+
 	uh = (struct udphdr *)esph;
 	uh->source = sport;
 	uh->dest = dport;
-	uh->len = htons(skb->len + esp->tailen
-		  - skb_transport_offset(skb));
+	uh->len = htons(len);
 	uh->check = 0;
 
 	switch (encap_type) {
@@ -259,6 +263,8 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
 
 	*skb_mac_header(skb) = IPPROTO_UDP;
 	esp->esph = esph;
+
+	return 0;
 }
 
 int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
@@ -272,8 +278,12 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
 	int tailen = esp->tailen;
 
 	/* this is non-NULL only with UDP Encapsulation */
-	if (x->encap)
-		esp_output_udp_encap(x, skb, esp);
+	if (x->encap) {
+		int err = esp_output_udp_encap(x, skb, esp);
+
+		if (err < 0)
+			return err;
+	}
 
 	if (!skb_cloned(skb)) {
 		if (tailen <= skb_tailroom(skb)) {
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 500a599..854ff1e 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -120,6 +120,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
 	struct guehdr *guehdr;
 	void *data;
 	u16 doffset = 0;
+	u8 proto_ctype;
 
 	if (!fou)
 		return 1;
@@ -211,13 +212,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
 	if (unlikely(guehdr->control))
 		return gue_control_message(skb, guehdr);
 
+	proto_ctype = guehdr->proto_ctype;
 	__skb_pull(skb, sizeof(struct udphdr) + hdrlen);
 	skb_reset_transport_header(skb);
 
 	if (iptunnel_pull_offloads(skb))
 		goto drop;
 
-	return -guehdr->proto_ctype;
+	return -proto_ctype;
 
 drop:
 	kfree_skb(skb);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 760a9e5..9f69411 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -25,6 +25,62 @@
 #include <net/sock.h>
 #include <net/inet_frag.h>
 #include <net/inet_ecn.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+/* Use skb->cb to track consecutive/adjacent fragments coming at
+ * the end of the queue. Nodes in the rb-tree queue will
+ * contain "runs" of one or more adjacent fragments.
+ *
+ * Invariants:
+ * - next_frag is NULL at the tail of a "run";
+ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
+ */
+struct ipfrag_skb_cb {
+	union {
+		struct inet_skb_parm	h4;
+		struct inet6_skb_parm	h6;
+	};
+	struct sk_buff		*next_frag;
+	int			frag_run_len;
+};
+
+#define FRAG_CB(skb)		((struct ipfrag_skb_cb *)((skb)->cb))
+
+static void fragcb_clear(struct sk_buff *skb)
+{
+	RB_CLEAR_NODE(&skb->rbnode);
+	FRAG_CB(skb)->next_frag = NULL;
+	FRAG_CB(skb)->frag_run_len = skb->len;
+}
+
+/* Append skb to the last "run". */
+static void fragrun_append_to_last(struct inet_frag_queue *q,
+				   struct sk_buff *skb)
+{
+	fragcb_clear(skb);
+
+	FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
+	FRAG_CB(q->fragments_tail)->next_frag = skb;
+	q->fragments_tail = skb;
+}
+
+/* Create a new "run" with the skb. */
+static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
+{
+	BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
+	fragcb_clear(skb);
+
+	if (q->last_run_head)
+		rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
+			     &q->last_run_head->rbnode.rb_right);
+	else
+		rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
+	rb_insert_color(&skb->rbnode, &q->rb_fragments);
+
+	q->fragments_tail = skb;
+	q->last_run_head = skb;
+}
 
 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
  * Value : 0xff if frame should be dropped.
@@ -123,6 +179,28 @@ static void inet_frag_destroy_rcu(struct rcu_head *head)
 	kmem_cache_free(f->frags_cachep, q);
 }
 
+unsigned int inet_frag_rbtree_purge(struct rb_root *root)
+{
+	struct rb_node *p = rb_first(root);
+	unsigned int sum = 0;
+
+	while (p) {
+		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+		p = rb_next(p);
+		rb_erase(&skb->rbnode, root);
+		while (skb) {
+			struct sk_buff *next = FRAG_CB(skb)->next_frag;
+
+			sum += skb->truesize;
+			kfree_skb(skb);
+			skb = next;
+		}
+	}
+	return sum;
+}
+EXPORT_SYMBOL(inet_frag_rbtree_purge);
+
 void inet_frag_destroy(struct inet_frag_queue *q)
 {
 	struct sk_buff *fp;
@@ -224,3 +302,218 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
 	return fq;
 }
 EXPORT_SYMBOL(inet_frag_find);
+
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+			   int offset, int end)
+{
+	struct sk_buff *last = q->fragments_tail;
+
+	/* RFC5722, Section 4, amended by Errata ID : 3089
+	 *                          When reassembling an IPv6 datagram, if
+	 *   one or more its constituent fragments is determined to be an
+	 *   overlapping fragment, the entire datagram (and any constituent
+	 *   fragments) MUST be silently discarded.
+	 *
+	 * Duplicates, however, should be ignored (i.e. skb dropped, but the
+	 * queue/fragments kept for later reassembly).
+	 */
+	if (!last)
+		fragrun_create(q, skb);  /* First fragment. */
+	else if (last->ip_defrag_offset + last->len < end) {
+		/* This is the common case: skb goes to the end. */
+		/* Detect and discard overlaps. */
+		if (offset < last->ip_defrag_offset + last->len)
+			return IPFRAG_OVERLAP;
+		if (offset == last->ip_defrag_offset + last->len)
+			fragrun_append_to_last(q, skb);
+		else
+			fragrun_create(q, skb);
+	} else {
+		/* Binary search. Note that skb can become the first fragment,
+		 * but not the last (covered above).
+		 */
+		struct rb_node **rbn, *parent;
+
+		rbn = &q->rb_fragments.rb_node;
+		do {
+			struct sk_buff *curr;
+			int curr_run_end;
+
+			parent = *rbn;
+			curr = rb_to_skb(parent);
+			curr_run_end = curr->ip_defrag_offset +
+					FRAG_CB(curr)->frag_run_len;
+			if (end <= curr->ip_defrag_offset)
+				rbn = &parent->rb_left;
+			else if (offset >= curr_run_end)
+				rbn = &parent->rb_right;
+			else if (offset >= curr->ip_defrag_offset &&
+				 end <= curr_run_end)
+				return IPFRAG_DUP;
+			else
+				return IPFRAG_OVERLAP;
+		} while (*rbn);
+		/* Here we have parent properly set, and rbn pointing to
+		 * one of its NULL left/right children. Insert skb.
+		 */
+		fragcb_clear(skb);
+		rb_link_node(&skb->rbnode, parent, rbn);
+		rb_insert_color(&skb->rbnode, &q->rb_fragments);
+	}
+
+	skb->ip_defrag_offset = offset;
+
+	return IPFRAG_OK;
+}
+EXPORT_SYMBOL(inet_frag_queue_insert);
+
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+			      struct sk_buff *parent)
+{
+	struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
+	struct sk_buff **nextp;
+	int delta;
+
+	if (head != skb) {
+		fp = skb_clone(skb, GFP_ATOMIC);
+		if (!fp)
+			return NULL;
+		FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
+		if (RB_EMPTY_NODE(&skb->rbnode))
+			FRAG_CB(parent)->next_frag = fp;
+		else
+			rb_replace_node(&skb->rbnode, &fp->rbnode,
+					&q->rb_fragments);
+		if (q->fragments_tail == skb)
+			q->fragments_tail = fp;
+		skb_morph(skb, head);
+		FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
+		rb_replace_node(&head->rbnode, &skb->rbnode,
+				&q->rb_fragments);
+		consume_skb(head);
+		head = skb;
+	}
+	WARN_ON(head->ip_defrag_offset != 0);
+
+	delta = -head->truesize;
+
+	/* Head of list must not be cloned. */
+	if (skb_unclone(head, GFP_ATOMIC))
+		return NULL;
+
+	delta += head->truesize;
+	if (delta)
+		add_frag_mem_limit(q->net, delta);
+
+	/* If the first fragment is fragmented itself, we split
+	 * it to two chunks: the first with data and paged part
+	 * and the second, holding only fragments.
+	 */
+	if (skb_has_frag_list(head)) {
+		struct sk_buff *clone;
+		int i, plen = 0;
+
+		clone = alloc_skb(0, GFP_ATOMIC);
+		if (!clone)
+			return NULL;
+		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+		skb_frag_list_init(head);
+		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+		clone->data_len = head->data_len - plen;
+		clone->len = clone->data_len;
+		head->truesize += clone->truesize;
+		clone->csum = 0;
+		clone->ip_summed = head->ip_summed;
+		add_frag_mem_limit(q->net, clone->truesize);
+		skb_shinfo(head)->frag_list = clone;
+		nextp = &clone->next;
+	} else {
+		nextp = &skb_shinfo(head)->frag_list;
+	}
+
+	return nextp;
+}
+EXPORT_SYMBOL(inet_frag_reasm_prepare);
+
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+			    void *reasm_data)
+{
+	struct sk_buff **nextp = (struct sk_buff **)reasm_data;
+	struct rb_node *rbn;
+	struct sk_buff *fp;
+
+	skb_push(head, head->data - skb_network_header(head));
+
+	/* Traverse the tree in order, to build frag_list. */
+	fp = FRAG_CB(head)->next_frag;
+	rbn = rb_next(&head->rbnode);
+	rb_erase(&head->rbnode, &q->rb_fragments);
+	while (rbn || fp) {
+		/* fp points to the next sk_buff in the current run;
+		 * rbn points to the next run.
+		 */
+		/* Go through the current run. */
+		while (fp) {
+			*nextp = fp;
+			nextp = &fp->next;
+			fp->prev = NULL;
+			memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+			fp->sk = NULL;
+			head->data_len += fp->len;
+			head->len += fp->len;
+			if (head->ip_summed != fp->ip_summed)
+				head->ip_summed = CHECKSUM_NONE;
+			else if (head->ip_summed == CHECKSUM_COMPLETE)
+				head->csum = csum_add(head->csum, fp->csum);
+			head->truesize += fp->truesize;
+			fp = FRAG_CB(fp)->next_frag;
+		}
+		/* Move to the next run. */
+		if (rbn) {
+			struct rb_node *rbnext = rb_next(rbn);
+
+			fp = rb_to_skb(rbn);
+			rb_erase(rbn, &q->rb_fragments);
+			rbn = rbnext;
+		}
+	}
+	sub_frag_mem_limit(q->net, head->truesize);
+
+	*nextp = NULL;
+	skb_mark_not_on_list(head);
+	head->prev = NULL;
+	head->tstamp = q->stamp;
+}
+EXPORT_SYMBOL(inet_frag_reasm_finish);
+
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
+{
+	struct sk_buff *head;
+
+	if (q->fragments) {
+		head = q->fragments;
+		q->fragments = head->next;
+	} else {
+		struct sk_buff *skb;
+
+		head = skb_rb_first(&q->rb_fragments);
+		if (!head)
+			return NULL;
+		skb = FRAG_CB(head)->next_frag;
+		if (skb)
+			rb_replace_node(&head->rbnode, &skb->rbnode,
+					&q->rb_fragments);
+		else
+			rb_erase(&head->rbnode, &q->rb_fragments);
+		memset(&head->rbnode, 0, sizeof(head->rbnode));
+		barrier();
+	}
+	if (head == q->fragments_tail)
+		q->fragments_tail = NULL;
+
+	sub_frag_mem_limit(q->net, head->truesize);
+
+	return head;
+}
+EXPORT_SYMBOL(inet_frag_pull_head);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index d95b32af..5a1d39e 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -57,57 +57,6 @@
  */
 static const char ip_frag_cache_name[] = "ip4-frags";
 
-/* Use skb->cb to track consecutive/adjacent fragments coming at
- * the end of the queue. Nodes in the rb-tree queue will
- * contain "runs" of one or more adjacent fragments.
- *
- * Invariants:
- * - next_frag is NULL at the tail of a "run";
- * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
- */
-struct ipfrag_skb_cb {
-	struct inet_skb_parm	h;
-	struct sk_buff		*next_frag;
-	int			frag_run_len;
-};
-
-#define FRAG_CB(skb)		((struct ipfrag_skb_cb *)((skb)->cb))
-
-static void ip4_frag_init_run(struct sk_buff *skb)
-{
-	BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
-
-	FRAG_CB(skb)->next_frag = NULL;
-	FRAG_CB(skb)->frag_run_len = skb->len;
-}
-
-/* Append skb to the last "run". */
-static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
-					struct sk_buff *skb)
-{
-	RB_CLEAR_NODE(&skb->rbnode);
-	FRAG_CB(skb)->next_frag = NULL;
-
-	FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
-	FRAG_CB(q->fragments_tail)->next_frag = skb;
-	q->fragments_tail = skb;
-}
-
-/* Create a new "run" with the skb. */
-static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
-{
-	if (q->last_run_head)
-		rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
-			     &q->last_run_head->rbnode.rb_right);
-	else
-		rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
-	rb_insert_color(&skb->rbnode, &q->rb_fragments);
-
-	ip4_frag_init_run(skb);
-	q->fragments_tail = skb;
-	q->last_run_head = skb;
-}
-
 /* Describe an entry in the "incomplete datagrams" queue. */
 struct ipq {
 	struct inet_frag_queue q;
@@ -212,27 +161,9 @@ static void ip_expire(struct timer_list *t)
 	 * pull the head out of the tree in order to be able to
 	 * deal with head->dev.
 	 */
-	if (qp->q.fragments) {
-		head = qp->q.fragments;
-		qp->q.fragments = head->next;
-	} else {
-		head = skb_rb_first(&qp->q.rb_fragments);
-		if (!head)
-			goto out;
-		if (FRAG_CB(head)->next_frag)
-			rb_replace_node(&head->rbnode,
-					&FRAG_CB(head)->next_frag->rbnode,
-					&qp->q.rb_fragments);
-		else
-			rb_erase(&head->rbnode, &qp->q.rb_fragments);
-		memset(&head->rbnode, 0, sizeof(head->rbnode));
-		barrier();
-	}
-	if (head == qp->q.fragments_tail)
-		qp->q.fragments_tail = NULL;
-
-	sub_frag_mem_limit(qp->q.net, head->truesize);
-
+	head = inet_frag_pull_head(&qp->q);
+	if (!head)
+		goto out;
 	head->dev = dev_get_by_index_rcu(net, qp->iif);
 	if (!head->dev)
 		goto out;
@@ -345,12 +276,10 @@ static int ip_frag_reinit(struct ipq *qp)
 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
 {
 	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
-	struct rb_node **rbn, *parent;
-	struct sk_buff *skb1, *prev_tail;
-	int ihl, end, skb1_run_end;
+	int ihl, end, flags, offset;
+	struct sk_buff *prev_tail;
 	struct net_device *dev;
 	unsigned int fragsize;
-	int flags, offset;
 	int err = -ENOENT;
 	u8 ecn;
 
@@ -382,7 +311,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
 		 */
 		if (end < qp->q.len ||
 		    ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
-			goto err;
+			goto discard_qp;
 		qp->q.flags |= INET_FRAG_LAST_IN;
 		qp->q.len = end;
 	} else {
@@ -394,82 +323,33 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
 		if (end > qp->q.len) {
 			/* Some bits beyond end -> corruption. */
 			if (qp->q.flags & INET_FRAG_LAST_IN)
-				goto err;
+				goto discard_qp;
 			qp->q.len = end;
 		}
 	}
 	if (end == offset)
-		goto err;
+		goto discard_qp;
 
 	err = -ENOMEM;
 	if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
-		goto err;
+		goto discard_qp;
 
 	err = pskb_trim_rcsum(skb, end - offset);
 	if (err)
-		goto err;
+		goto discard_qp;
 
 	/* Note : skb->rbnode and skb->dev share the same location. */
 	dev = skb->dev;
 	/* Makes sure compiler wont do silly aliasing games */
 	barrier();
 
-	/* RFC5722, Section 4, amended by Errata ID : 3089
-	 *                          When reassembling an IPv6 datagram, if
-	 *   one or more its constituent fragments is determined to be an
-	 *   overlapping fragment, the entire datagram (and any constituent
-	 *   fragments) MUST be silently discarded.
-	 *
-	 * We do the same here for IPv4 (and increment an snmp counter) but
-	 * we do not want to drop the whole queue in response to a duplicate
-	 * fragment.
-	 */
-
-	err = -EINVAL;
-	/* Find out where to put this fragment.  */
 	prev_tail = qp->q.fragments_tail;
-	if (!prev_tail)
-		ip4_frag_create_run(&qp->q, skb);  /* First fragment. */
-	else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
-		/* This is the common case: skb goes to the end. */
-		/* Detect and discard overlaps. */
-		if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
-			goto discard_qp;
-		if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
-			ip4_frag_append_to_last_run(&qp->q, skb);
-		else
-			ip4_frag_create_run(&qp->q, skb);
-	} else {
-		/* Binary search. Note that skb can become the first fragment,
-		 * but not the last (covered above).
-		 */
-		rbn = &qp->q.rb_fragments.rb_node;
-		do {
-			parent = *rbn;
-			skb1 = rb_to_skb(parent);
-			skb1_run_end = skb1->ip_defrag_offset +
-				       FRAG_CB(skb1)->frag_run_len;
-			if (end <= skb1->ip_defrag_offset)
-				rbn = &parent->rb_left;
-			else if (offset >= skb1_run_end)
-				rbn = &parent->rb_right;
-			else if (offset >= skb1->ip_defrag_offset &&
-				 end <= skb1_run_end)
-				goto err; /* No new data, potential duplicate */
-			else
-				goto discard_qp; /* Found an overlap */
-		} while (*rbn);
-		/* Here we have parent properly set, and rbn pointing to
-		 * one of its NULL left/right children. Insert skb.
-		 */
-		ip4_frag_init_run(skb);
-		rb_link_node(&skb->rbnode, parent, rbn);
-		rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
-	}
+	err = inet_frag_queue_insert(&qp->q, skb, offset, end);
+	if (err)
+		goto insert_error;
 
 	if (dev)
 		qp->iif = dev->ifindex;
-	skb->ip_defrag_offset = offset;
 
 	qp->q.stamp = skb->tstamp;
 	qp->q.meat += skb->len;
@@ -494,15 +374,24 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
 		skb->_skb_refdst = 0UL;
 		err = ip_frag_reasm(qp, skb, prev_tail, dev);
 		skb->_skb_refdst = orefdst;
+		if (err)
+			inet_frag_kill(&qp->q);
 		return err;
 	}
 
 	skb_dst_drop(skb);
 	return -EINPROGRESS;
 
+insert_error:
+	if (err == IPFRAG_DUP) {
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+	err = -EINVAL;
+	__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
 discard_qp:
 	inet_frag_kill(&qp->q);
-	__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
+	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
 err:
 	kfree_skb(skb);
 	return err;
@@ -514,13 +403,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
 {
 	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
 	struct iphdr *iph;
-	struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
-	struct sk_buff **nextp; /* To build frag_list. */
-	struct rb_node *rbn;
-	int len;
-	int ihlen;
-	int delta;
-	int err;
+	void *reasm_data;
+	int len, err;
 	u8 ecn;
 
 	ipq_kill(qp);
@@ -530,117 +414,23 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
 		err = -EINVAL;
 		goto out_fail;
 	}
+
 	/* Make the one we just received the head. */
-	if (head != skb) {
-		fp = skb_clone(skb, GFP_ATOMIC);
-		if (!fp)
-			goto out_nomem;
-		FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
-		if (RB_EMPTY_NODE(&skb->rbnode))
-			FRAG_CB(prev_tail)->next_frag = fp;
-		else
-			rb_replace_node(&skb->rbnode, &fp->rbnode,
-					&qp->q.rb_fragments);
-		if (qp->q.fragments_tail == skb)
-			qp->q.fragments_tail = fp;
-		skb_morph(skb, head);
-		FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
-		rb_replace_node(&head->rbnode, &skb->rbnode,
-				&qp->q.rb_fragments);
-		consume_skb(head);
-		head = skb;
-	}
+	reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
+	if (!reasm_data)
+		goto out_nomem;
 
-	WARN_ON(head->ip_defrag_offset != 0);
-
-	/* Allocate a new buffer for the datagram. */
-	ihlen = ip_hdrlen(head);
-	len = ihlen + qp->q.len;
-
+	len = ip_hdrlen(skb) + qp->q.len;
 	err = -E2BIG;
 	if (len > 65535)
 		goto out_oversize;
 
-	delta = - head->truesize;
+	inet_frag_reasm_finish(&qp->q, skb, reasm_data);
 
-	/* Head of list must not be cloned. */
-	if (skb_unclone(head, GFP_ATOMIC))
-		goto out_nomem;
+	skb->dev = dev;
+	IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
 
-	delta += head->truesize;
-	if (delta)
-		add_frag_mem_limit(qp->q.net, delta);
-
-	/* If the first fragment is fragmented itself, we split
-	 * it to two chunks: the first with data and paged part
-	 * and the second, holding only fragments. */
-	if (skb_has_frag_list(head)) {
-		struct sk_buff *clone;
-		int i, plen = 0;
-
-		clone = alloc_skb(0, GFP_ATOMIC);
-		if (!clone)
-			goto out_nomem;
-		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
-		skb_frag_list_init(head);
-		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
-			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
-		clone->len = clone->data_len = head->data_len - plen;
-		head->truesize += clone->truesize;
-		clone->csum = 0;
-		clone->ip_summed = head->ip_summed;
-		add_frag_mem_limit(qp->q.net, clone->truesize);
-		skb_shinfo(head)->frag_list = clone;
-		nextp = &clone->next;
-	} else {
-		nextp = &skb_shinfo(head)->frag_list;
-	}
-
-	skb_push(head, head->data - skb_network_header(head));
-
-	/* Traverse the tree in order, to build frag_list. */
-	fp = FRAG_CB(head)->next_frag;
-	rbn = rb_next(&head->rbnode);
-	rb_erase(&head->rbnode, &qp->q.rb_fragments);
-	while (rbn || fp) {
-		/* fp points to the next sk_buff in the current run;
-		 * rbn points to the next run.
-		 */
-		/* Go through the current run. */
-		while (fp) {
-			*nextp = fp;
-			nextp = &fp->next;
-			fp->prev = NULL;
-			memset(&fp->rbnode, 0, sizeof(fp->rbnode));
-			fp->sk = NULL;
-			head->data_len += fp->len;
-			head->len += fp->len;
-			if (head->ip_summed != fp->ip_summed)
-				head->ip_summed = CHECKSUM_NONE;
-			else if (head->ip_summed == CHECKSUM_COMPLETE)
-				head->csum = csum_add(head->csum, fp->csum);
-			head->truesize += fp->truesize;
-			fp = FRAG_CB(fp)->next_frag;
-		}
-		/* Move to the next run. */
-		if (rbn) {
-			struct rb_node *rbnext = rb_next(rbn);
-
-			fp = rb_to_skb(rbn);
-			rb_erase(rbn, &qp->q.rb_fragments);
-			rbn = rbnext;
-		}
-	}
-	sub_frag_mem_limit(qp->q.net, head->truesize);
-
-	*nextp = NULL;
-	head->next = NULL;
-	head->prev = NULL;
-	head->dev = dev;
-	head->tstamp = qp->q.stamp;
-	IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
-
-	iph = ip_hdr(head);
+	iph = ip_hdr(skb);
 	iph->tot_len = htons(len);
 	iph->tos |= ecn;
 
@@ -653,7 +443,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
 	 * from one very small df-fragment and one large non-df frag.
 	 */
 	if (qp->max_df_size == qp->q.max_size) {
-		IPCB(head)->flags |= IPSKB_FRAG_PMTU;
+		IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
 		iph->frag_off = htons(IP_DF);
 	} else {
 		iph->frag_off = 0;
@@ -751,28 +541,6 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
 }
 EXPORT_SYMBOL(ip_check_defrag);
 
-unsigned int inet_frag_rbtree_purge(struct rb_root *root)
-{
-	struct rb_node *p = rb_first(root);
-	unsigned int sum = 0;
-
-	while (p) {
-		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
-
-		p = rb_next(p);
-		rb_erase(&skb->rbnode, root);
-		while (skb) {
-			struct sk_buff *next = FRAG_CB(skb)->next_frag;
-
-			sum += skb->truesize;
-			kfree_skb(skb);
-			skb = next;
-		}
-	}
-	return sum;
-}
-EXPORT_SYMBOL(inet_frag_rbtree_purge);
-
 #ifdef CONFIG_SYSCTL
 static int dist_min;
 
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9c4e72e..73894ed 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -519,6 +519,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 	to->pkt_type = from->pkt_type;
 	to->priority = from->priority;
 	to->protocol = from->protocol;
+	to->skb_iif = from->skb_iif;
 	skb_dst_drop(to);
 	skb_dst_copy(to, from);
 	to->dev = from->dev;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 40a7cd5..808f8d1 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -659,9 +659,9 @@ static int __init vti_init(void)
 	return err;
 
 rtnl_link_failed:
-	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
-xfrm_tunnel_failed:
 	xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
+xfrm_tunnel_failed:
+	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
 xfrm_proto_comp_failed:
 	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
 xfrm_proto_ah_failed:
@@ -676,6 +676,7 @@ static int __init vti_init(void)
 static void __exit vti_fini(void)
 {
 	rtnl_link_unregister(&vti_link_ops);
+	xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
 	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
 	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
 	xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 33df4d7..711a5c7 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -174,6 +174,7 @@ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
 static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
 {
 	int sdif = inet_sdif(skb);
+	int dif = inet_iif(skb);
 	struct sock *sk;
 	struct hlist_head *head;
 	int delivered = 0;
@@ -186,8 +187,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
 
 	net = dev_net(skb->dev);
 	sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol,
-			     iph->saddr, iph->daddr,
-			     skb->dev->ifindex, sdif);
+			     iph->saddr, iph->daddr, dif, sdif);
 
 	while (sk) {
 		delivered = 1;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 7a556e4..8bacbcd 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1185,11 +1185,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
 	return dst;
 }
 
+static void ipv4_send_dest_unreach(struct sk_buff *skb)
+{
+	struct ip_options opt;
+	int res;
+
+	/* Recompile ip options since IPCB may not be valid anymore.
+	 * Also check we have a reasonable ipv4 header.
+	 */
+	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
+	    ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
+		return;
+
+	memset(&opt, 0, sizeof(opt));
+	if (ip_hdr(skb)->ihl > 5) {
+		if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
+			return;
+		opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+
+		rcu_read_lock();
+		res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+		rcu_read_unlock();
+
+		if (res)
+			return;
+	}
+	__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
+}
+
 static void ipv4_link_failure(struct sk_buff *skb)
 {
 	struct rtable *rt;
 
-	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+	ipv4_send_dest_unreach(skb);
 
 	rt = skb_rtable(skb);
 	if (rt)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 8ee8ad0..6d7ba21 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -49,6 +49,7 @@ static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 static int comp_sack_nr_max = 255;
 static u32 u32_max_div_HZ = UINT_MAX / HZ;
+static int one_day_secs = 24 * 3600;
 
 /* obsolete */
 static int sysctl_tcp_low_latency __read_mostly;
@@ -1162,7 +1163,9 @@ static struct ctl_table ipv4_net_table[] = {
 		.data		= &init_net.ipv4.sysctl_tcp_min_rtt_wlen,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one_day_secs
 	},
 	{
 		.procname	= "tcp_autocorking",
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 16f2c84..448400f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
+	int room;
+
+	room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
 
 	/* Check #1 */
-	if (tp->rcv_ssthresh < tp->window_clamp &&
-	    (int)tp->rcv_ssthresh < tcp_space(sk) &&
-	    !tcp_under_memory_pressure(sk)) {
+	if (room > 0 && !tcp_under_memory_pressure(sk)) {
 		int incr;
 
 		/* Check #2. Increase window, if skb with such overhead
@@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
 
 		if (incr) {
 			incr = max_t(int, incr, 2 * skb->len);
-			tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
-					       tp->window_clamp);
+			tp->rcv_ssthresh += min(room, incr);
 			inet_csk(sk)->icsk_ack.quick |= 1;
 		}
 	}
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index d73a6d6..2b144b9 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -111,7 +111,8 @@ static void
 _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 {
 	const struct iphdr *iph = ip_hdr(skb);
-	u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
+	int ihl = iph->ihl;
+	u8 *xprth = skb_network_header(skb) + ihl * 4;
 	struct flowi4 *fl4 = &fl->u.ip4;
 	int oif = 0;
 
@@ -122,6 +123,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 	fl4->flowi4_mark = skb->mark;
 	fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
 
+	fl4->flowi4_proto = iph->protocol;
+	fl4->daddr = reverse ? iph->saddr : iph->daddr;
+	fl4->saddr = reverse ? iph->daddr : iph->saddr;
+	fl4->flowi4_tos = iph->tos;
+
 	if (!ip_is_fragment(iph)) {
 		switch (iph->protocol) {
 		case IPPROTO_UDP:
@@ -133,7 +139,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
 				__be16 *ports;
 
-				xprth = skb_network_header(skb) + iph->ihl * 4;
+				xprth = skb_network_header(skb) + ihl * 4;
 				ports = (__be16 *)xprth;
 
 				fl4->fl4_sport = ports[!!reverse];
@@ -146,7 +152,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 			    pskb_may_pull(skb, xprth + 2 - skb->data)) {
 				u8 *icmp;
 
-				xprth = skb_network_header(skb) + iph->ihl * 4;
+				xprth = skb_network_header(skb) + ihl * 4;
 				icmp = xprth;
 
 				fl4->fl4_icmp_type = icmp[0];
@@ -159,7 +165,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
 				__be32 *ehdr;
 
-				xprth = skb_network_header(skb) + iph->ihl * 4;
+				xprth = skb_network_header(skb) + ihl * 4;
 				ehdr = (__be32 *)xprth;
 
 				fl4->fl4_ipsec_spi = ehdr[0];
@@ -171,7 +177,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 			    pskb_may_pull(skb, xprth + 8 - skb->data)) {
 				__be32 *ah_hdr;
 
-				xprth = skb_network_header(skb) + iph->ihl * 4;
+				xprth = skb_network_header(skb) + ihl * 4;
 				ah_hdr = (__be32 *)xprth;
 
 				fl4->fl4_ipsec_spi = ah_hdr[1];
@@ -183,7 +189,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
 				__be16 *ipcomp_hdr;
 
-				xprth = skb_network_header(skb) + iph->ihl * 4;
+				xprth = skb_network_header(skb) + ihl * 4;
 				ipcomp_hdr = (__be16 *)xprth;
 
 				fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
@@ -196,7 +202,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 				__be16 *greflags;
 				__be32 *gre_hdr;
 
-				xprth = skb_network_header(skb) + iph->ihl * 4;
+				xprth = skb_network_header(skb) + ihl * 4;
 				greflags = (__be16 *)xprth;
 				gre_hdr = (__be32 *)xprth;
 
@@ -213,10 +219,6 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 			break;
 		}
 	}
-	fl4->flowi4_proto = iph->protocol;
-	fl4->daddr = reverse ? iph->saddr : iph->daddr;
-	fl4->saddr = reverse ? iph->daddr : iph->saddr;
-	fl4->flowi4_tos = iph->tos;
 }
 
 static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index cbe4617..a6c0479 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -877,6 +877,12 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
 {
 	int cpu;
 
+	/* Make sure rt6_make_pcpu_route() wont add other percpu routes
+	 * while we are cleaning them here.
+	 */
+	f6i->fib6_destroying = 1;
+	mb(); /* paired with the cmpxchg() in rt6_make_pcpu_route() */
+
 	/* release the reference to this fib entry from
 	 * all of its cached pcpu routes
 	 */
@@ -889,9 +895,7 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
 		if (pcpu_rt) {
 			struct fib6_info *from;
 
-			from = rcu_dereference_protected(pcpu_rt->from,
-					     lockdep_is_held(&table->tb6_lock));
-			rcu_assign_pointer(pcpu_rt->from, NULL);
+			from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
 			fib6_info_release(from);
 		}
 	}
@@ -902,6 +906,9 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
 {
 	struct fib6_table *table = rt->fib6_table;
 
+	if (rt->rt6i_pcpu)
+		fib6_drop_pcpu_from(rt, table);
+
 	if (atomic_read(&rt->fib6_ref) != 1) {
 		/* This route is used as dummy address holder in some split
 		 * nodes. It is not leaked, but it still holds other resources,
@@ -923,9 +930,6 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
 			fn = rcu_dereference_protected(fn->parent,
 				    lockdep_is_held(&table->tb6_lock));
 		}
-
-		if (rt->rt6i_pcpu)
-			fib6_drop_pcpu_from(rt, table);
 	}
 }
 
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index cb54a8a..be5f3d7c 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -94,15 +94,21 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
 	return fl;
 }
 
+static void fl_free_rcu(struct rcu_head *head)
+{
+	struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
+
+	if (fl->share == IPV6_FL_S_PROCESS)
+		put_pid(fl->owner.pid);
+	kfree(fl->opt);
+	kfree(fl);
+}
+
 
 static void fl_free(struct ip6_flowlabel *fl)
 {
-	if (fl) {
-		if (fl->share == IPV6_FL_S_PROCESS)
-			put_pid(fl->owner.pid);
-		kfree(fl->opt);
-		kfree_rcu(fl, rcu);
-	}
+	if (fl)
+		call_rcu(&fl->rcu, fl_free_rcu);
 }
 
 static void fl_release(struct ip6_flowlabel *fl)
@@ -633,9 +639,9 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
 				if (fl1->share == IPV6_FL_S_EXCL ||
 				    fl1->share != fl->share ||
 				    ((fl1->share == IPV6_FL_S_PROCESS) &&
-				     (fl1->owner.pid == fl->owner.pid)) ||
+				     (fl1->owner.pid != fl->owner.pid)) ||
 				    ((fl1->share == IPV6_FL_S_USER) &&
-				     uid_eq(fl1->owner.uid, fl->owner.uid)))
+				     !uid_eq(fl1->owner.uid, fl->owner.uid)))
 					goto release;
 
 				err = -ENOMEM;
diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c
index 1059894..4cb83fb 100644
--- a/net/ipv6/netfilter/ip6t_srh.c
+++ b/net/ipv6/netfilter/ip6t_srh.c
@@ -210,6 +210,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
 		psidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
 			  ((srh->segments_left + 1) * sizeof(struct in6_addr));
 		psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid);
+		if (!psid)
+			return false;
 		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_PSID,
 				ipv6_masked_addr_cmp(psid, &srhinfo->psid_msk,
 						     &srhinfo->psid_addr)))
@@ -223,6 +225,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
 		nsidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
 			  ((srh->segments_left - 1) * sizeof(struct in6_addr));
 		nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid);
+		if (!nsid)
+			return false;
 		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NSID,
 				ipv6_masked_addr_cmp(nsid, &srhinfo->nsid_msk,
 						     &srhinfo->nsid_addr)))
@@ -233,6 +237,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
 	if (srhinfo->mt_flags & IP6T_SRH_LSID) {
 		lsidoff = srhoff + sizeof(struct ipv6_sr_hdr);
 		lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid);
+		if (!lsid)
+			return false;
 		if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LSID,
 				ipv6_masked_addr_cmp(lsid, &srhinfo->lsid_msk,
 						     &srhinfo->lsid_addr)))
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 043ed8e..cb1b477 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -136,6 +136,9 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
 }
 #endif
 
+static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
+			     struct sk_buff *prev_tail, struct net_device *dev);
+
 static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
 {
 	return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
@@ -177,9 +180,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
 			     const struct frag_hdr *fhdr, int nhoff)
 {
-	struct sk_buff *prev, *next;
 	unsigned int payload_len;
-	int offset, end;
+	struct net_device *dev;
+	struct sk_buff *prev;
+	int offset, end, err;
 	u8 ecn;
 
 	if (fq->q.flags & INET_FRAG_COMPLETE) {
@@ -254,55 +258,18 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
 		goto err;
 	}
 
-	/* Find out which fragments are in front and at the back of us
-	 * in the chain of fragments so far.  We must know where to put
-	 * this fragment, right?
-	 */
-	prev = fq->q.fragments_tail;
-	if (!prev || prev->ip_defrag_offset < offset) {
-		next = NULL;
-		goto found;
-	}
-	prev = NULL;
-	for (next = fq->q.fragments; next != NULL; next = next->next) {
-		if (next->ip_defrag_offset >= offset)
-			break;	/* bingo! */
-		prev = next;
-	}
-
-found:
-	/* RFC5722, Section 4:
-	 *                                  When reassembling an IPv6 datagram, if
-	 *   one or more its constituent fragments is determined to be an
-	 *   overlapping fragment, the entire datagram (and any constituent
-	 *   fragments, including those not yet received) MUST be silently
-	 *   discarded.
-	 */
-
-	/* Check for overlap with preceding fragment. */
-	if (prev &&
-	    (prev->ip_defrag_offset + prev->len) > offset)
-		goto discard_fq;
-
-	/* Look for overlap with succeeding segment. */
-	if (next && next->ip_defrag_offset < end)
-		goto discard_fq;
-
-	/* Note : skb->ip_defrag_offset and skb->dev share the same location */
-	if (skb->dev)
-		fq->iif = skb->dev->ifindex;
+	/* Note : skb->rbnode and skb->dev share the same location. */
+	dev = skb->dev;
 	/* Makes sure compiler wont do silly aliasing games */
 	barrier();
-	skb->ip_defrag_offset = offset;
 
-	/* Insert this fragment in the chain of fragments. */
-	skb->next = next;
-	if (!next)
-		fq->q.fragments_tail = skb;
-	if (prev)
-		prev->next = skb;
-	else
-		fq->q.fragments = skb;
+	prev = fq->q.fragments_tail;
+	err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+	if (err)
+		goto insert_error;
+
+	if (dev)
+		fq->iif = dev->ifindex;
 
 	fq->q.stamp = skb->tstamp;
 	fq->q.meat += skb->len;
@@ -319,11 +286,25 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
 		fq->q.flags |= INET_FRAG_FIRST_IN;
 	}
 
-	return 0;
+	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+	    fq->q.meat == fq->q.len) {
+		unsigned long orefdst = skb->_skb_refdst;
 
-discard_fq:
+		skb->_skb_refdst = 0UL;
+		err = nf_ct_frag6_reasm(fq, skb, prev, dev);
+		skb->_skb_refdst = orefdst;
+		return err;
+	}
+
+	skb_dst_drop(skb);
+	return -EINPROGRESS;
+
+insert_error:
+	if (err == IPFRAG_DUP)
+		goto err;
 	inet_frag_kill(&fq->q);
 err:
+	skb_dst_drop(skb);
 	return -EINVAL;
 }
 
@@ -333,147 +314,67 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
  *	It is called with locked fq, and caller must check that
  *	queue is eligible for reassembly i.e. it is not COMPLETE,
  *	the last and the first frames arrived and all the bits are here.
- *
- *	returns true if *prev skb has been transformed into the reassembled
- *	skb, false otherwise.
  */
-static bool
-nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct net_device *dev)
+static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
+			     struct sk_buff *prev_tail, struct net_device *dev)
 {
-	struct sk_buff *fp, *head = fq->q.fragments;
-	int    payload_len, delta;
+	void *reasm_data;
+	int payload_len;
 	u8 ecn;
 
 	inet_frag_kill(&fq->q);
 
-	WARN_ON(head == NULL);
-	WARN_ON(head->ip_defrag_offset != 0);
-
 	ecn = ip_frag_ecn_table[fq->ecn];
 	if (unlikely(ecn == 0xff))
-		return false;
+		goto err;
 
-	/* Unfragmented part is taken from the first segment. */
-	payload_len = ((head->data - skb_network_header(head)) -
+	reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
+	if (!reasm_data)
+		goto err;
+
+	payload_len = ((skb->data - skb_network_header(skb)) -
 		       sizeof(struct ipv6hdr) + fq->q.len -
 		       sizeof(struct frag_hdr));
 	if (payload_len > IPV6_MAXPLEN) {
 		net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
 				    payload_len);
-		return false;
-	}
-
-	delta = - head->truesize;
-
-	/* Head of list must not be cloned. */
-	if (skb_unclone(head, GFP_ATOMIC))
-		return false;
-
-	delta += head->truesize;
-	if (delta)
-		add_frag_mem_limit(fq->q.net, delta);
-
-	/* If the first fragment is fragmented itself, we split
-	 * it to two chunks: the first with data and paged part
-	 * and the second, holding only fragments. */
-	if (skb_has_frag_list(head)) {
-		struct sk_buff *clone;
-		int i, plen = 0;
-
-		clone = alloc_skb(0, GFP_ATOMIC);
-		if (clone == NULL)
-			return false;
-
-		clone->next = head->next;
-		head->next = clone;
-		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
-		skb_frag_list_init(head);
-		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
-			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
-		clone->len = clone->data_len = head->data_len - plen;
-		head->data_len -= clone->len;
-		head->len -= clone->len;
-		clone->csum = 0;
-		clone->ip_summed = head->ip_summed;
-
-		add_frag_mem_limit(fq->q.net, clone->truesize);
-	}
-
-	/* morph head into last received skb: prev.
-	 *
-	 * This allows callers of ipv6 conntrack defrag to continue
-	 * to use the last skb(frag) passed into the reasm engine.
-	 * The last skb frag 'silently' turns into the full reassembled skb.
-	 *
-	 * Since prev is also part of q->fragments we have to clone it first.
-	 */
-	if (head != prev) {
-		struct sk_buff *iter;
-
-		fp = skb_clone(prev, GFP_ATOMIC);
-		if (!fp)
-			return false;
-
-		fp->next = prev->next;
-
-		iter = head;
-		while (iter) {
-			if (iter->next == prev) {
-				iter->next = fp;
-				break;
-			}
-			iter = iter->next;
-		}
-
-		skb_morph(prev, head);
-		prev->next = head->next;
-		consume_skb(head);
-		head = prev;
+		goto err;
 	}
 
 	/* We have to remove fragment header from datagram and to relocate
 	 * header in order to calculate ICV correctly. */
-	skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
-	memmove(head->head + sizeof(struct frag_hdr), head->head,
-		(head->data - head->head) - sizeof(struct frag_hdr));
-	head->mac_header += sizeof(struct frag_hdr);
-	head->network_header += sizeof(struct frag_hdr);
+	skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
+	memmove(skb->head + sizeof(struct frag_hdr), skb->head,
+		(skb->data - skb->head) - sizeof(struct frag_hdr));
+	skb->mac_header += sizeof(struct frag_hdr);
+	skb->network_header += sizeof(struct frag_hdr);
 
-	skb_shinfo(head)->frag_list = head->next;
-	skb_reset_transport_header(head);
-	skb_push(head, head->data - skb_network_header(head));
+	skb_reset_transport_header(skb);
 
-	for (fp = head->next; fp; fp = fp->next) {
-		head->data_len += fp->len;
-		head->len += fp->len;
-		if (head->ip_summed != fp->ip_summed)
-			head->ip_summed = CHECKSUM_NONE;
-		else if (head->ip_summed == CHECKSUM_COMPLETE)
-			head->csum = csum_add(head->csum, fp->csum);
-		head->truesize += fp->truesize;
-		fp->sk = NULL;
-	}
-	sub_frag_mem_limit(fq->q.net, head->truesize);
+	inet_frag_reasm_finish(&fq->q, skb, reasm_data);
 
-	head->ignore_df = 1;
-	head->next = NULL;
-	head->dev = dev;
-	head->tstamp = fq->q.stamp;
-	ipv6_hdr(head)->payload_len = htons(payload_len);
-	ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
-	IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
+	skb->ignore_df = 1;
+	skb->dev = dev;
+	ipv6_hdr(skb)->payload_len = htons(payload_len);
+	ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
+	IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
 
 	/* Yes, and fold redundant checksum back. 8) */
-	if (head->ip_summed == CHECKSUM_COMPLETE)
-		head->csum = csum_partial(skb_network_header(head),
-					  skb_network_header_len(head),
-					  head->csum);
+	if (skb->ip_summed == CHECKSUM_COMPLETE)
+		skb->csum = csum_partial(skb_network_header(skb),
+					 skb_network_header_len(skb),
+					 skb->csum);
 
 	fq->q.fragments = NULL;
 	fq->q.rb_fragments = RB_ROOT;
 	fq->q.fragments_tail = NULL;
+	fq->q.last_run_head = NULL;
 
-	return true;
+	return 0;
+
+err:
+	inet_frag_kill(&fq->q);
+	return -EINVAL;
 }
 
 /*
@@ -542,7 +443,6 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
 int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
 {
 	u16 savethdr = skb->transport_header;
-	struct net_device *dev = skb->dev;
 	int fhoff, nhoff, ret;
 	struct frag_hdr *fhdr;
 	struct frag_queue *fq;
@@ -565,10 +465,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
 	hdr = ipv6_hdr(skb);
 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
 
-	if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
-	    fhdr->frag_off & htons(IP6_MF))
-		return -EINVAL;
-
 	skb_orphan(skb);
 	fq = fq_find(net, fhdr->identification, user, hdr,
 		     skb->dev ? skb->dev->ifindex : 0);
@@ -580,31 +476,17 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
 	spin_lock_bh(&fq->q.lock);
 
 	ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
-	if (ret < 0) {
-		if (ret == -EPROTO) {
-			skb->transport_header = savethdr;
-			ret = 0;
-		}
-		goto out_unlock;
+	if (ret == -EPROTO) {
+		skb->transport_header = savethdr;
+		ret = 0;
 	}
 
 	/* after queue has assumed skb ownership, only 0 or -EINPROGRESS
 	 * must be returned.
 	 */
-	ret = -EINPROGRESS;
-	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
-	    fq->q.meat == fq->q.len) {
-		unsigned long orefdst = skb->_skb_refdst;
+	if (ret)
+		ret = -EINPROGRESS;
 
-		skb->_skb_refdst = 0UL;
-		if (nf_ct_frag6_reasm(fq, skb, dev))
-			ret = 0;
-		skb->_skb_refdst = orefdst;
-	} else {
-		skb_dst_drop(skb);
-	}
-
-out_unlock:
 	spin_unlock_bh(&fq->q.lock);
 	inet_frag_put(&fq->q);
 	return ret;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 7c94339..095825f 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -69,8 +69,8 @@ static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
 
 static struct inet_frags ip6_frags;
 
-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
-			  struct net_device *dev);
+static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
+			  struct sk_buff *prev_tail, struct net_device *dev);
 
 static void ip6_frag_expire(struct timer_list *t)
 {
@@ -111,21 +111,26 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
 			  struct frag_hdr *fhdr, int nhoff,
 			  u32 *prob_offset)
 {
-	struct sk_buff *prev, *next;
-	struct net_device *dev;
-	int offset, end, fragsize;
 	struct net *net = dev_net(skb_dst(skb)->dev);
+	int offset, end, fragsize;
+	struct sk_buff *prev_tail;
+	struct net_device *dev;
+	int err = -ENOENT;
 	u8 ecn;
 
 	if (fq->q.flags & INET_FRAG_COMPLETE)
 		goto err;
 
+	err = -EINVAL;
 	offset = ntohs(fhdr->frag_off) & ~0x7;
 	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
 			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
 
 	if ((unsigned int)end > IPV6_MAXPLEN) {
 		*prob_offset = (u8 *)&fhdr->frag_off - skb_network_header(skb);
+		/* note that if prob_offset is set, the skb is freed elsewhere,
+		 * we do not free it here.
+		 */
 		return -1;
 	}
 
@@ -145,7 +150,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
 		 */
 		if (end < fq->q.len ||
 		    ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
-			goto err;
+			goto discard_fq;
 		fq->q.flags |= INET_FRAG_LAST_IN;
 		fq->q.len = end;
 	} else {
@@ -162,70 +167,35 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
 		if (end > fq->q.len) {
 			/* Some bits beyond end -> corruption. */
 			if (fq->q.flags & INET_FRAG_LAST_IN)
-				goto err;
+				goto discard_fq;
 			fq->q.len = end;
 		}
 	}
 
 	if (end == offset)
-		goto err;
+		goto discard_fq;
 
+	err = -ENOMEM;
 	/* Point into the IP datagram 'data' part. */
 	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
-		goto err;
-
-	if (pskb_trim_rcsum(skb, end - offset))
-		goto err;
-
-	/* Find out which fragments are in front and at the back of us
-	 * in the chain of fragments so far.  We must know where to put
-	 * this fragment, right?
-	 */
-	prev = fq->q.fragments_tail;
-	if (!prev || prev->ip_defrag_offset < offset) {
-		next = NULL;
-		goto found;
-	}
-	prev = NULL;
-	for (next = fq->q.fragments; next != NULL; next = next->next) {
-		if (next->ip_defrag_offset >= offset)
-			break;	/* bingo! */
-		prev = next;
-	}
-
-found:
-	/* RFC5722, Section 4, amended by Errata ID : 3089
-	 *                          When reassembling an IPv6 datagram, if
-	 *   one or more its constituent fragments is determined to be an
-	 *   overlapping fragment, the entire datagram (and any constituent
-	 *   fragments) MUST be silently discarded.
-	 */
-
-	/* Check for overlap with preceding fragment. */
-	if (prev &&
-	    (prev->ip_defrag_offset + prev->len) > offset)
 		goto discard_fq;
 
-	/* Look for overlap with succeeding segment. */
-	if (next && next->ip_defrag_offset < end)
+	err = pskb_trim_rcsum(skb, end - offset);
+	if (err)
 		goto discard_fq;
 
-	/* Note : skb->ip_defrag_offset and skb->dev share the same location */
+	/* Note : skb->rbnode and skb->dev share the same location. */
 	dev = skb->dev;
-	if (dev)
-		fq->iif = dev->ifindex;
 	/* Makes sure compiler wont do silly aliasing games */
 	barrier();
-	skb->ip_defrag_offset = offset;
 
-	/* Insert this fragment in the chain of fragments. */
-	skb->next = next;
-	if (!next)
-		fq->q.fragments_tail = skb;
-	if (prev)
-		prev->next = skb;
-	else
-		fq->q.fragments = skb;
+	prev_tail = fq->q.fragments_tail;
+	err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+	if (err)
+		goto insert_error;
+
+	if (dev)
+		fq->iif = dev->ifindex;
 
 	fq->q.stamp = skb->tstamp;
 	fq->q.meat += skb->len;
@@ -246,44 +216,48 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
 
 	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
 	    fq->q.meat == fq->q.len) {
-		int res;
 		unsigned long orefdst = skb->_skb_refdst;
 
 		skb->_skb_refdst = 0UL;
-		res = ip6_frag_reasm(fq, prev, dev);
+		err = ip6_frag_reasm(fq, skb, prev_tail, dev);
 		skb->_skb_refdst = orefdst;
-		return res;
+		return err;
 	}
 
 	skb_dst_drop(skb);
-	return -1;
+	return -EINPROGRESS;
 
+insert_error:
+	if (err == IPFRAG_DUP) {
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+	err = -EINVAL;
+	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+			IPSTATS_MIB_REASM_OVERLAPS);
 discard_fq:
 	inet_frag_kill(&fq->q);
-err:
 	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 			IPSTATS_MIB_REASMFAILS);
+err:
 	kfree_skb(skb);
-	return -1;
+	return err;
 }
 
 /*
  *	Check if this packet is complete.
- *	Returns NULL on failure by any reason, and pointer
- *	to current nexthdr field in reassembled frame.
  *
  *	It is called with locked fq, and caller must check that
  *	queue is eligible for reassembly i.e. it is not COMPLETE,
  *	the last and the first frames arrived and all the bits are here.
  */
-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
-			  struct net_device *dev)
+static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
+			  struct sk_buff *prev_tail, struct net_device *dev)
 {
 	struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
-	struct sk_buff *fp, *head = fq->q.fragments;
-	int    payload_len, delta;
 	unsigned int nhoff;
-	int sum_truesize;
+	void *reasm_data;
+	int payload_len;
 	u8 ecn;
 
 	inet_frag_kill(&fq->q);
@@ -292,121 +266,40 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 	if (unlikely(ecn == 0xff))
 		goto out_fail;
 
-	/* Make the one we just received the head. */
-	if (prev) {
-		head = prev->next;
-		fp = skb_clone(head, GFP_ATOMIC);
+	reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
+	if (!reasm_data)
+		goto out_oom;
 
-		if (!fp)
-			goto out_oom;
-
-		fp->next = head->next;
-		if (!fp->next)
-			fq->q.fragments_tail = fp;
-		prev->next = fp;
-
-		skb_morph(head, fq->q.fragments);
-		head->next = fq->q.fragments->next;
-
-		consume_skb(fq->q.fragments);
-		fq->q.fragments = head;
-	}
-
-	WARN_ON(head == NULL);
-	WARN_ON(head->ip_defrag_offset != 0);
-
-	/* Unfragmented part is taken from the first segment. */
-	payload_len = ((head->data - skb_network_header(head)) -
+	payload_len = ((skb->data - skb_network_header(skb)) -
 		       sizeof(struct ipv6hdr) + fq->q.len -
 		       sizeof(struct frag_hdr));
 	if (payload_len > IPV6_MAXPLEN)
 		goto out_oversize;
 
-	delta = - head->truesize;
-
-	/* Head of list must not be cloned. */
-	if (skb_unclone(head, GFP_ATOMIC))
-		goto out_oom;
-
-	delta += head->truesize;
-	if (delta)
-		add_frag_mem_limit(fq->q.net, delta);
-
-	/* If the first fragment is fragmented itself, we split
-	 * it to two chunks: the first with data and paged part
-	 * and the second, holding only fragments. */
-	if (skb_has_frag_list(head)) {
-		struct sk_buff *clone;
-		int i, plen = 0;
-
-		clone = alloc_skb(0, GFP_ATOMIC);
-		if (!clone)
-			goto out_oom;
-		clone->next = head->next;
-		head->next = clone;
-		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
-		skb_frag_list_init(head);
-		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
-			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
-		clone->len = clone->data_len = head->data_len - plen;
-		head->data_len -= clone->len;
-		head->len -= clone->len;
-		clone->csum = 0;
-		clone->ip_summed = head->ip_summed;
-		add_frag_mem_limit(fq->q.net, clone->truesize);
-	}
-
 	/* We have to remove fragment header from datagram and to relocate
 	 * header in order to calculate ICV correctly. */
 	nhoff = fq->nhoffset;
-	skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
-	memmove(head->head + sizeof(struct frag_hdr), head->head,
-		(head->data - head->head) - sizeof(struct frag_hdr));
-	if (skb_mac_header_was_set(head))
-		head->mac_header += sizeof(struct frag_hdr);
-	head->network_header += sizeof(struct frag_hdr);
+	skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
+	memmove(skb->head + sizeof(struct frag_hdr), skb->head,
+		(skb->data - skb->head) - sizeof(struct frag_hdr));
+	if (skb_mac_header_was_set(skb))
+		skb->mac_header += sizeof(struct frag_hdr);
+	skb->network_header += sizeof(struct frag_hdr);
 
-	skb_reset_transport_header(head);
-	skb_push(head, head->data - skb_network_header(head));
+	skb_reset_transport_header(skb);
 
-	sum_truesize = head->truesize;
-	for (fp = head->next; fp;) {
-		bool headstolen;
-		int delta;
-		struct sk_buff *next = fp->next;
+	inet_frag_reasm_finish(&fq->q, skb, reasm_data);
 
-		sum_truesize += fp->truesize;
-		if (head->ip_summed != fp->ip_summed)
-			head->ip_summed = CHECKSUM_NONE;
-		else if (head->ip_summed == CHECKSUM_COMPLETE)
-			head->csum = csum_add(head->csum, fp->csum);
-
-		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
-			kfree_skb_partial(fp, headstolen);
-		} else {
-			fp->sk = NULL;
-			if (!skb_shinfo(head)->frag_list)
-				skb_shinfo(head)->frag_list = fp;
-			head->data_len += fp->len;
-			head->len += fp->len;
-			head->truesize += fp->truesize;
-		}
-		fp = next;
-	}
-	sub_frag_mem_limit(fq->q.net, sum_truesize);
-
-	head->next = NULL;
-	head->dev = dev;
-	head->tstamp = fq->q.stamp;
-	ipv6_hdr(head)->payload_len = htons(payload_len);
-	ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
-	IP6CB(head)->nhoff = nhoff;
-	IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
-	IP6CB(head)->frag_max_size = fq->q.max_size;
+	skb->dev = dev;
+	ipv6_hdr(skb)->payload_len = htons(payload_len);
+	ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
+	IP6CB(skb)->nhoff = nhoff;
+	IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
+	IP6CB(skb)->frag_max_size = fq->q.max_size;
 
 	/* Yes, and fold redundant checksum back. 8) */
-	skb_postpush_rcsum(head, skb_network_header(head),
-			   skb_network_header_len(head));
+	skb_postpush_rcsum(skb, skb_network_header(skb),
+			   skb_network_header_len(skb));
 
 	rcu_read_lock();
 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
@@ -414,6 +307,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 	fq->q.fragments = NULL;
 	fq->q.rb_fragments = RB_ROOT;
 	fq->q.fragments_tail = NULL;
+	fq->q.last_run_head = NULL;
 	return 1;
 
 out_oversize:
@@ -425,6 +319,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 	rcu_read_lock();
 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
 	rcu_read_unlock();
+	inet_frag_kill(&fq->q);
 	return -1;
 }
 
@@ -463,10 +358,6 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
 		return 1;
 	}
 
-	if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
-	    fhdr->frag_off & htons(IP6_MF))
-		goto fail_hdr;
-
 	iif = skb->dev ? skb->dev->ifindex : 0;
 	fq = fq_find(net, fhdr->identification, hdr, iif);
 	if (fq) {
@@ -484,6 +375,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
 		if (prob_offset) {
 			__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
 					IPSTATS_MIB_INHDRERRORS);
+			/* icmpv6_param_prob() calls kfree_skb(skb) */
 			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);
 		}
 		return ret;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7a9e9d8..8000357 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -110,8 +110,8 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
 			 int iif, int type, u32 portid, u32 seq,
 			 unsigned int flags);
 static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
-					   struct in6_addr *daddr,
-					   struct in6_addr *saddr);
+					   const struct in6_addr *daddr,
+					   const struct in6_addr *saddr);
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
 static struct fib6_info *rt6_add_route_info(struct net *net,
@@ -382,11 +382,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
 		in6_dev_put(idev);
 	}
 
-	rcu_read_lock();
-	from = rcu_dereference(rt->from);
-	rcu_assign_pointer(rt->from, NULL);
+	from = xchg((__force struct fib6_info **)&rt->from, NULL);
 	fib6_info_release(from);
-	rcu_read_unlock();
 }
 
 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -1271,6 +1268,13 @@ static struct rt6_info *rt6_make_pcpu_route(struct net *net,
 	prev = cmpxchg(p, NULL, pcpu_rt);
 	BUG_ON(prev);
 
+	if (rt->fib6_destroying) {
+		struct fib6_info *from;
+
+		from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
+		fib6_info_release(from);
+	}
+
 	return pcpu_rt;
 }
 
@@ -1296,9 +1300,7 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
 	/* purge completely the exception to allow releasing the held resources:
 	 * some [sk] cache may keep the dst around for unlimited time
 	 */
-	from = rcu_dereference_protected(rt6_ex->rt6i->from,
-					 lockdep_is_held(&rt6_exception_lock));
-	rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
+	from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
 	fib6_info_release(from);
 	dst_dev_put(&rt6_ex->rt6i->dst);
 
@@ -1547,31 +1549,44 @@ void rt6_flush_exceptions(struct fib6_info *rt)
  * Caller has to hold rcu_read_lock()
  */
 static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
-					   struct in6_addr *daddr,
-					   struct in6_addr *saddr)
+					   const struct in6_addr *daddr,
+					   const struct in6_addr *saddr)
 {
+	const struct in6_addr *src_key = NULL;
 	struct rt6_exception_bucket *bucket;
-	struct in6_addr *src_key = NULL;
 	struct rt6_exception *rt6_ex;
 	struct rt6_info *res = NULL;
 
-	bucket = rcu_dereference(rt->rt6i_exception_bucket);
-
 #ifdef CONFIG_IPV6_SUBTREES
 	/* rt6i_src.plen != 0 indicates rt is in subtree
 	 * and exception table is indexed by a hash of
 	 * both rt6i_dst and rt6i_src.
-	 * Otherwise, the exception table is indexed by
-	 * a hash of only rt6i_dst.
+	 * However, the src addr used to create the hash
+	 * might not be exactly the passed in saddr which
+	 * is a /128 addr from the flow.
+	 * So we need to use f6i->fib6_src to redo lookup
+	 * if the passed in saddr does not find anything.
+	 * (See the logic in ip6_rt_cache_alloc() on how
+	 * rt->rt6i_src is updated.)
 	 */
 	if (rt->fib6_src.plen)
 		src_key = saddr;
+find_ex:
 #endif
+	bucket = rcu_dereference(rt->rt6i_exception_bucket);
 	rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
 
 	if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
 		res = rt6_ex->rt6i;
 
+#ifdef CONFIG_IPV6_SUBTREES
+	/* Use fib6_src as src_key and redo lookup */
+	if (!res && src_key && src_key != &rt->fib6_src.addr) {
+		src_key = &rt->fib6_src.addr;
+		goto find_ex;
+	}
+#endif
+
 	return res;
 }
 
@@ -2367,6 +2382,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
 
 		rcu_read_lock();
 		from = rcu_dereference(rt6->from);
+		if (!from) {
+			rcu_read_unlock();
+			return;
+		}
 		nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
 		if (nrt6) {
 			rt6_do_update_pmtu(nrt6, mtu);
@@ -2651,10 +2670,8 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
 u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
 		      struct in6_addr *saddr)
 {
-	struct rt6_exception_bucket *bucket;
-	struct rt6_exception *rt6_ex;
-	struct in6_addr *src_key;
 	struct inet6_dev *idev;
+	struct rt6_info *rt;
 	u32 mtu = 0;
 
 	if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
@@ -2663,18 +2680,10 @@ u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
 			goto out;
 	}
 
-	src_key = NULL;
-#ifdef CONFIG_IPV6_SUBTREES
-	if (f6i->fib6_src.plen)
-		src_key = saddr;
-#endif
-
-	bucket = rcu_dereference(f6i->rt6i_exception_bucket);
-	rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
-	if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
-		mtu = dst_metric_raw(&rt6_ex->rt6i->dst, RTAX_MTU);
-
-	if (likely(!mtu)) {
+	rt = rt6_find_cached_rt(f6i, daddr, saddr);
+	if (unlikely(rt)) {
+		mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
+	} else {
 		struct net_device *dev = fib6_info_nh_dev(f6i);
 
 		mtu = IPV6_MIN_MTU;
@@ -3450,11 +3459,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
 
 	rcu_read_lock();
 	from = rcu_dereference(rt->from);
-	/* This fib6_info_hold() is safe here because we hold reference to rt
-	 * and rt already holds reference to fib6_info.
-	 */
-	fib6_info_hold(from);
-	rcu_read_unlock();
+	if (!from)
+		goto out;
 
 	nrt = ip6_rt_cache_alloc(from, &msg->dest, NULL);
 	if (!nrt)
@@ -3466,10 +3472,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
 
 	nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
 
-	/* No need to remove rt from the exception table if rt is
-	 * a cached route because rt6_insert_exception() will
-	 * takes care of it
-	 */
+	/* rt6_insert_exception() will take care of duplicated exceptions */
 	if (rt6_insert_exception(nrt, from)) {
 		dst_release_immediate(&nrt->dst);
 		goto out;
@@ -3482,7 +3485,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
 	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
 
 out:
-	fib6_info_release(from);
+	rcu_read_unlock();
 	neigh_release(neigh);
 }
 
@@ -4963,16 +4966,20 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 
 	rcu_read_lock();
 	from = rcu_dereference(rt->from);
-
-	if (fibmatch)
-		err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, iif,
-				    RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
-				    nlh->nlmsg_seq, 0);
-	else
-		err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
-				    &fl6.saddr, iif, RTM_NEWROUTE,
-				    NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
-				    0);
+	if (from) {
+		if (fibmatch)
+			err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
+					    iif, RTM_NEWROUTE,
+					    NETLINK_CB(in_skb).portid,
+					    nlh->nlmsg_seq, 0);
+		else
+			err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
+					    &fl6.saddr, iif, RTM_NEWROUTE,
+					    NETLINK_CB(in_skb).portid,
+					    nlh->nlmsg_seq, 0);
+	} else {
+		err = -ENETUNREACH;
+	}
 	rcu_read_unlock();
 
 	if (err < 0) {
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8f6cf8e..41b3fe8 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1084,7 +1084,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
 	if (!tdev && tunnel->parms.link)
 		tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
 
-	if (tdev) {
+	if (tdev && !netif_is_l3_master(tdev)) {
 		int t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
 		dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index bc65db7..d9e5f68 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -345,7 +345,7 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
 	unsigned int i;
 
 	xfrm_flush_gc();
-	xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
+	xfrm_state_flush(net, 0, false, true);
 
 	for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
 		WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
@@ -402,6 +402,10 @@ static void __exit xfrm6_tunnel_fini(void)
 	xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
 	xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
 	unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
+	/* Someone maybe has gotten the xfrm6_tunnel_spi.
+	 * So need to wait it.
+	 */
+	rcu_barrier();
 	kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
 }
 
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7d4bed9..0b79c9a 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1951,8 +1951,10 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
 
 	if (rq->sadb_x_ipsecrequest_mode == 0)
 		return -EINVAL;
+	if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
+		return -EINVAL;
 
-	t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */
+	t->id.proto = rq->sadb_x_ipsecrequest_proto;
 	if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
 		return -EINVAL;
 	t->mode = mode;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index fed6bec..52b5a27 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -169,8 +169,8 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
 
 	rcu_read_lock_bh();
 	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
-		if (tunnel->tunnel_id == tunnel_id) {
-			l2tp_tunnel_inc_refcount(tunnel);
+		if (tunnel->tunnel_id == tunnel_id &&
+		    refcount_inc_not_zero(&tunnel->ref_count)) {
 			rcu_read_unlock_bh();
 
 			return tunnel;
@@ -190,8 +190,8 @@ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
 
 	rcu_read_lock_bh();
 	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
-		if (++count > nth) {
-			l2tp_tunnel_inc_refcount(tunnel);
+		if (++count > nth &&
+		    refcount_inc_not_zero(&tunnel->ref_count)) {
 			rcu_read_unlock_bh();
 			return tunnel;
 		}
@@ -909,7 +909,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
 	struct l2tp_tunnel *tunnel;
 
-	tunnel = l2tp_tunnel(sk);
+	tunnel = rcu_dereference_sk_user_data(sk);
 	if (tunnel == NULL)
 		goto pass_up;
 
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index c813207..d37d4ac 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -838,7 +838,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
 
 	dir = sdata->vif.debugfs_dir;
 
-	if (!dir)
+	if (IS_ERR_OR_NULL(dir))
 		return;
 
 	sprintf(buf, "netdev:%s", sdata->name);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 8f69980..2123f6e 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1166,6 +1166,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
 
+	if (local->in_reconfig)
+		return;
+
 	if (!check_sdata_in_driver(sdata))
 		return;
 
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3a0171a..152d436 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1910,6 +1910,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
 	list_del_rcu(&sdata->list);
 	mutex_unlock(&sdata->local->iflist_mtx);
 
+	if (sdata->vif.txq)
+		ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
+
 	synchronize_rcu();
 
 	if (sdata->dev) {
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index c054ac8..f20bb39 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -167,8 +167,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 		 * The driver doesn't know anything about VLAN interfaces.
 		 * Hence, don't send GTKs for VLAN interfaces to the driver.
 		 */
-		if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
+		if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+			ret = 1;
 			goto out_unsupported;
+		}
 	}
 
 	ret = drv_set_key(key->local, SET_KEY, sdata,
@@ -213,11 +215,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 		/* all of these we can do in software - if driver can */
 		if (ret == 1)
 			return 0;
-		if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) {
-			if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
-				return 0;
+		if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
 			return -EINVAL;
-		}
 		return 0;
 	default:
 		return -EINVAL;
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index c3a7396..49a9021 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -23,7 +23,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
 {
 	/* Use last four bytes of hw addr as hash index */
-	return jhash_1word(*(u32 *)(addr+2), seed);
+	return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
 }
 
 static const struct rhashtable_params mesh_rht_params = {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 3dbecae..2ac749c 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1156,9 +1156,6 @@ static void ieee80211_chswitch_work(struct work_struct *work)
 		goto out;
 	}
 
-	/* XXX: shouldn't really modify cfg80211-owned data! */
-	ifmgd->associated->channel = sdata->csa_chandef.chan;
-
 	ifmgd->csa_waiting_bcn = true;
 
 	ieee80211_sta_reset_beacon_monitor(sdata);
diff --git a/net/mac80211/trace_msg.h b/net/mac80211/trace_msg.h
index 366b9e6..40141df 100644
--- a/net/mac80211/trace_msg.h
+++ b/net/mac80211/trace_msg.h
@@ -1,4 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions of this file
+ * Copyright (C) 2019 Intel Corporation
+ */
+
 #ifdef CONFIG_MAC80211_MESSAGE_TRACING
 
 #if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -11,7 +16,7 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM mac80211_msg
 
-#define MAX_MSG_LEN	100
+#define MAX_MSG_LEN	120
 
 DECLARE_EVENT_CLASS(mac80211_msg_event,
 	TP_PROTO(struct va_format *vaf),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 743cde6..2f726cd 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3185,6 +3185,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
 	u8 max_subframes = sta->sta.max_amsdu_subframes;
 	int max_frags = local->hw.max_tx_fragments;
 	int max_amsdu_len = sta->sta.max_amsdu_len;
+	int orig_truesize;
 	__be16 len;
 	void *data;
 	bool ret = false;
@@ -3218,6 +3219,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
 	if (!head)
 		goto out;
 
+	orig_truesize = head->truesize;
 	orig_len = head->len;
 
 	if (skb->len + head->len > max_amsdu_len)
@@ -3272,6 +3274,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
 	*frag_tail = skb;
 
 out_recalc:
+	fq->memory_usage += head->truesize - orig_truesize;
 	if (head->len != orig_len) {
 		flow->backlog += head->len - orig_len;
 		tin->backlog_bytes += head->len - orig_len;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 4dba997..76b5041 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1017,6 +1017,7 @@
 	depends on NETFILTER_ADVANCED
 	depends on IPV6 || IPV6=n
 	depends on !NF_CONNTRACK || NF_CONNTRACK
+	depends on IP6_NF_IPTABLES || !IP6_NF_IPTABLES
 	select NF_DUP_IPV4
 	select NF_DUP_IPV6 if IP6_NF_IPTABLES
 	---help---
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 3f963ea..a42c1bc 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1647,7 +1647,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
 	if (!cp) {
 		int v;
 
-		if (!sysctl_schedule_icmp(ipvs))
+		if (ipip || !sysctl_schedule_icmp(ipvs))
 			return NF_ACCEPT;
 
 		if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 8fd8d06..2d4e048 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -896,12 +896,13 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
 {
 	struct ip_vs_dest *dest;
 	unsigned int atype, i;
-	int ret = 0;
 
 	EnterFunction(2);
 
 #ifdef CONFIG_IP_VS_IPV6
 	if (udest->af == AF_INET6) {
+		int ret;
+
 		atype = ipv6_addr_type(&udest->addr.in6);
 		if ((!(atype & IPV6_ADDR_UNICAST) ||
 			atype & IPV6_ADDR_LINKLOCAL) &&
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 9a24947..27eff89 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -25,6 +25,7 @@
 #include <linux/slab.h>
 #include <linux/random.h>
 #include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <linux/err.h>
 #include <linux/percpu.h>
 #include <linux/moduleparam.h>
@@ -424,6 +425,40 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
 }
 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
 
+/* Generate a almost-unique pseudo-id for a given conntrack.
+ *
+ * intentionally doesn't re-use any of the seeds used for hash
+ * table location, we assume id gets exposed to userspace.
+ *
+ * Following nf_conn items do not change throughout lifetime
+ * of the nf_conn after it has been committed to main hash table:
+ *
+ * 1. nf_conn address
+ * 2. nf_conn->ext address
+ * 3. nf_conn->master address (normally NULL)
+ * 4. tuple
+ * 5. the associated net namespace
+ */
+u32 nf_ct_get_id(const struct nf_conn *ct)
+{
+	static __read_mostly siphash_key_t ct_id_seed;
+	unsigned long a, b, c, d;
+
+	net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
+
+	a = (unsigned long)ct;
+	b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
+	c = (unsigned long)ct->ext;
+	d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
+				   &ct_id_seed);
+#ifdef CONFIG_64BIT
+	return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
+#else
+	return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_ct_get_id);
+
 static void
 clean_from_lists(struct nf_conn *ct)
 {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 036207e..47e5a07 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -29,6 +29,7 @@
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/siphash.h>
 
 #include <linux/netfilter.h>
 #include <net/netlink.h>
@@ -487,7 +488,9 @@ static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct)
 
 static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
+	__be32 id = (__force __be32)nf_ct_get_id(ct);
+
+	if (nla_put_be32(skb, CTA_ID, id))
 		goto nla_put_failure;
 	return 0;
 
@@ -1275,8 +1278,9 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
 	}
 
 	if (cda[CTA_ID]) {
-		u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
-		if (id != (u32)(unsigned long)ct) {
+		__be32 id = nla_get_be32(cda[CTA_ID]);
+
+		if (id != (__force __be32)nf_ct_get_id(ct)) {
 			nf_ct_put(ct);
 			return -ENOENT;
 		}
@@ -2675,6 +2679,25 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
 
 static const union nf_inet_addr any_addr;
 
+static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
+{
+	static __read_mostly siphash_key_t exp_id_seed;
+	unsigned long a, b, c, d;
+
+	net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
+
+	a = (unsigned long)exp;
+	b = (unsigned long)exp->helper;
+	c = (unsigned long)exp->master;
+	d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
+
+#ifdef CONFIG_64BIT
+	return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
+#else
+	return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
+#endif
+}
+
 static int
 ctnetlink_exp_dump_expect(struct sk_buff *skb,
 			  const struct nf_conntrack_expect *exp)
@@ -2722,7 +2745,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
 	}
 #endif
 	if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
-	    nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
+	    nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
 	    nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
 	    nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
 		goto nla_put_failure;
@@ -3027,7 +3050,8 @@ static int ctnetlink_get_expect(struct net *net, struct sock *ctnl,
 
 	if (cda[CTA_EXPECT_ID]) {
 		__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
-		if (ntohl(id) != (u32)(unsigned long)exp) {
+
+		if (id != nf_expect_get_id(exp)) {
 			nf_ct_expect_put(exp);
 			return -ENOENT;
 		}
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 51c5d7e..e903ef9 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -86,7 +86,7 @@ void nf_l4proto_log_invalid(const struct sk_buff *skb,
 	struct va_format vaf;
 	va_list args;
 
-	if (net->ct.sysctl_log_invalid != protonum ||
+	if (net->ct.sysctl_log_invalid != protonum &&
 	    net->ct.sysctl_log_invalid != IPPROTO_RAW)
 		return;
 
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index c06393f..ebfcfe1 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -112,6 +112,23 @@ static void nft_trans_destroy(struct nft_trans *trans)
 	kfree(trans);
 }
 
+static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
+{
+	struct net *net = ctx->net;
+	struct nft_trans *trans;
+
+	if (!nft_set_is_anonymous(set))
+		return;
+
+	list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
+		if (trans->msg_type == NFT_MSG_NEWSET &&
+		    nft_trans_set(trans) == set) {
+			set->bound = true;
+			break;
+		}
+	}
+}
+
 static int nf_tables_register_hook(struct net *net,
 				   const struct nft_table *table,
 				   struct nft_chain *chain)
@@ -222,14 +239,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
 }
 
 static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
-				     struct nft_rule *rule)
+				     struct nft_rule *rule,
+				     enum nft_trans_phase phase)
 {
 	struct nft_expr *expr;
 
 	expr = nft_expr_first(rule);
 	while (expr != nft_expr_last(rule) && expr->ops) {
 		if (expr->ops->deactivate)
-			expr->ops->deactivate(ctx, expr);
+			expr->ops->deactivate(ctx, expr, phase);
 
 		expr = nft_expr_next(expr);
 	}
@@ -280,7 +298,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
 		nft_trans_destroy(trans);
 		return err;
 	}
-	nft_rule_expr_deactivate(ctx, rule);
+	nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE);
 
 	return 0;
 }
@@ -301,7 +319,7 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
 	return 0;
 }
 
-static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
+static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
 			     struct nft_set *set)
 {
 	struct nft_trans *trans;
@@ -321,7 +339,7 @@ static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
 	return 0;
 }
 
-static int nft_delset(struct nft_ctx *ctx, struct nft_set *set)
+static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
 {
 	int err;
 
@@ -1478,7 +1496,7 @@ static int nft_chain_parse_hook(struct net *net,
 		if (IS_ERR(type))
 			return PTR_ERR(type);
 	}
-	if (!(type->hook_mask & (1 << hook->num)))
+	if (hook->num > NF_MAX_HOOKS || !(type->hook_mask & (1 << hook->num)))
 		return -EOPNOTSUPP;
 
 	if (type->type == NFT_CHAIN_T_NAT &&
@@ -2095,9 +2113,11 @@ static int nf_tables_newexpr(const struct nft_ctx *ctx,
 static void nf_tables_expr_destroy(const struct nft_ctx *ctx,
 				   struct nft_expr *expr)
 {
+	const struct nft_expr_type *type = expr->ops->type;
+
 	if (expr->ops->destroy)
 		expr->ops->destroy(ctx, expr);
-	module_put(expr->ops->type->owner);
+	module_put(type->owner);
 }
 
 struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
@@ -2105,6 +2125,7 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
 {
 	struct nft_expr_info info;
 	struct nft_expr *expr;
+	struct module *owner;
 	int err;
 
 	err = nf_tables_expr_parse(ctx, nla, &info);
@@ -2124,7 +2145,11 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
 err3:
 	kfree(expr);
 err2:
-	module_put(info.ops->type->owner);
+	owner = info.ops->type->owner;
+	if (info.ops->type->release_ops)
+		info.ops->type->release_ops(info.ops);
+
+	module_put(owner);
 err1:
 	return ERR_PTR(err);
 }
@@ -2458,7 +2483,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
 static void nf_tables_rule_release(const struct nft_ctx *ctx,
 				   struct nft_rule *rule)
 {
-	nft_rule_expr_deactivate(ctx, rule);
+	nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
 	nf_tables_rule_destroy(ctx, rule);
 }
 
@@ -2694,8 +2719,11 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
 	nf_tables_rule_release(&ctx, rule);
 err1:
 	for (i = 0; i < n; i++) {
-		if (info[i].ops != NULL)
+		if (info[i].ops) {
 			module_put(info[i].ops->type->owner);
+			if (info[i].ops->type->release_ops)
+				info[i].ops->type->release_ops(info[i].ops);
+		}
 	}
 	kvfree(info);
 	return err;
@@ -3562,19 +3590,15 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
 
 static void nft_set_destroy(struct nft_set *set)
 {
+	if (WARN_ON(set->use > 0))
+		return;
+
 	set->ops->destroy(set);
 	module_put(to_set_type(set->ops)->owner);
 	kfree(set->name);
 	kvfree(set);
 }
 
-static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
-{
-	list_del_rcu(&set->list);
-	nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
-	nft_set_destroy(set);
-}
-
 static int nf_tables_delset(struct net *net, struct sock *nlsk,
 			    struct sk_buff *skb, const struct nlmsghdr *nlh,
 			    const struct nlattr * const nla[],
@@ -3609,7 +3633,7 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
 		NL_SET_BAD_ATTR(extack, attr);
 		return PTR_ERR(set);
 	}
-	if (!list_empty(&set->bindings) ||
+	if (set->use ||
 	    (nlh->nlmsg_flags & NLM_F_NONREC && atomic_read(&set->nelems) > 0)) {
 		NL_SET_BAD_ATTR(extack, attr);
 		return -EBUSY;
@@ -3639,6 +3663,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
 	struct nft_set_binding *i;
 	struct nft_set_iter iter;
 
+	if (set->use == UINT_MAX)
+		return -EOVERFLOW;
+
 	if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
 		return -EBUSY;
 
@@ -3665,21 +3692,53 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
 bind:
 	binding->chain = ctx->chain;
 	list_add_tail_rcu(&binding->list, &set->bindings);
+	nft_set_trans_bind(ctx, set);
+	set->use++;
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(nf_tables_bind_set);
 
 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
-			  struct nft_set_binding *binding)
+			  struct nft_set_binding *binding, bool event)
 {
 	list_del_rcu(&binding->list);
 
-	if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
-	    nft_is_active(ctx->net, set))
-		nf_tables_set_destroy(ctx, set);
+	if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
+		list_del_rcu(&set->list);
+		if (event)
+			nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
+					     GFP_KERNEL);
+	}
 }
 EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
 
+void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+			      struct nft_set_binding *binding,
+			      enum nft_trans_phase phase)
+{
+	switch (phase) {
+	case NFT_TRANS_PREPARE:
+		set->use--;
+		return;
+	case NFT_TRANS_ABORT:
+	case NFT_TRANS_RELEASE:
+		set->use--;
+		/* fall through */
+	default:
+		nf_tables_unbind_set(ctx, set, binding,
+				     phase == NFT_TRANS_COMMIT);
+	}
+}
+EXPORT_SYMBOL_GPL(nf_tables_deactivate_set);
+
+void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
+{
+	if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
+		nft_set_destroy(set);
+}
+EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
+
 const struct nft_set_ext_type nft_set_ext_types[] = {
 	[NFT_SET_EXT_KEY]		= {
 		.align	= __alignof__(u32),
@@ -6429,6 +6488,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
 			nf_tables_rule_notify(&trans->ctx,
 					      nft_trans_rule(trans),
 					      NFT_MSG_DELRULE);
+			nft_rule_expr_deactivate(&trans->ctx,
+						 nft_trans_rule(trans),
+						 NFT_TRANS_COMMIT);
 			break;
 		case NFT_MSG_NEWSET:
 			nft_clear(net, nft_trans_set(trans));
@@ -6577,7 +6639,9 @@ static int __nf_tables_abort(struct net *net)
 		case NFT_MSG_NEWRULE:
 			trans->ctx.chain->use--;
 			list_del_rcu(&nft_trans_rule(trans)->list);
-			nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans));
+			nft_rule_expr_deactivate(&trans->ctx,
+						 nft_trans_rule(trans),
+						 NFT_TRANS_ABORT);
 			break;
 		case NFT_MSG_DELRULE:
 			trans->ctx.chain->use++;
@@ -6587,6 +6651,10 @@ static int __nf_tables_abort(struct net *net)
 			break;
 		case NFT_MSG_NEWSET:
 			trans->ctx.table->use--;
+			if (nft_trans_set(trans)->bound) {
+				nft_trans_destroy(trans);
+				break;
+			}
 			list_del_rcu(&nft_trans_set(trans)->list);
 			break;
 		case NFT_MSG_DELSET:
@@ -6595,8 +6663,11 @@ static int __nf_tables_abort(struct net *net)
 			nft_trans_destroy(trans);
 			break;
 		case NFT_MSG_NEWSETELEM:
+			if (nft_trans_elem_set(trans)->bound) {
+				nft_trans_destroy(trans);
+				break;
+			}
 			te = (struct nft_trans_elem *)trans->data;
-
 			te->set->ops->remove(net, te->set, &te->elem);
 			atomic_dec(&te->set->nelems);
 			break;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 38da1f5..1245e02 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -23,19 +23,6 @@
 #include <linux/netfilter_arp/arp_tables.h>
 #include <net/netfilter/nf_tables.h>
 
-struct nft_xt {
-	struct list_head	head;
-	struct nft_expr_ops	ops;
-	unsigned int		refcnt;
-
-	/* Unlike other expressions, ops doesn't have static storage duration.
-	 * nft core assumes they do.  We use kfree_rcu so that nft core can
-	 * can check expr->ops->size even after nft_compat->destroy() frees
-	 * the nft_xt struct that holds the ops structure.
-	 */
-	struct rcu_head		rcu_head;
-};
-
 /* Used for matches where *info is larger than X byte */
 #define NFT_MATCH_LARGE_THRESH	192
 
@@ -43,17 +30,6 @@ struct nft_xt_match_priv {
 	void *info;
 };
 
-static bool nft_xt_put(struct nft_xt *xt)
-{
-	if (--xt->refcnt == 0) {
-		list_del(&xt->head);
-		kfree_rcu(xt, rcu_head);
-		return true;
-	}
-
-	return false;
-}
-
 static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
 						const char *tablename)
 {
@@ -248,7 +224,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 	struct xt_target *target = expr->ops->data;
 	struct xt_tgchk_param par;
 	size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
-	struct nft_xt *nft_xt;
 	u16 proto = 0;
 	bool inv = false;
 	union nft_entry e = {};
@@ -272,8 +247,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 	if (!target->target)
 		return -EINVAL;
 
-	nft_xt = container_of(expr->ops, struct nft_xt, ops);
-	nft_xt->refcnt++;
 	return 0;
 }
 
@@ -292,8 +265,8 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
 	if (par.target->destroy != NULL)
 		par.target->destroy(&par);
 
-	if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
-		module_put(me);
+	module_put(me);
+	kfree(expr->ops);
 }
 
 static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -447,7 +420,6 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 	struct xt_match *match = expr->ops->data;
 	struct xt_mtchk_param par;
 	size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
-	struct nft_xt *nft_xt;
 	u16 proto = 0;
 	bool inv = false;
 	union nft_entry e = {};
@@ -463,13 +435,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 
 	nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
 
-	ret = xt_check_match(&par, size, proto, inv);
-	if (ret < 0)
-		return ret;
-
-	nft_xt = container_of(expr->ops, struct nft_xt, ops);
-	nft_xt->refcnt++;
-	return 0;
+	return xt_check_match(&par, size, proto, inv);
 }
 
 static int
@@ -512,8 +478,8 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
 	if (par.match->destroy != NULL)
 		par.match->destroy(&par);
 
-	if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
-		module_put(me);
+	module_put(me);
+	kfree(expr->ops);
 }
 
 static void
@@ -715,22 +681,13 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
 	.cb		= nfnl_nft_compat_cb,
 };
 
-static LIST_HEAD(nft_match_list);
-
 static struct nft_expr_type nft_match_type;
 
-static bool nft_match_cmp(const struct xt_match *match,
-			  const char *name, u32 rev, u32 family)
-{
-	return strcmp(match->name, name) == 0 && match->revision == rev &&
-	       (match->family == NFPROTO_UNSPEC || match->family == family);
-}
-
 static const struct nft_expr_ops *
 nft_match_select_ops(const struct nft_ctx *ctx,
 		     const struct nlattr * const tb[])
 {
-	struct nft_xt *nft_match;
+	struct nft_expr_ops *ops;
 	struct xt_match *match;
 	unsigned int matchsize;
 	char *mt_name;
@@ -746,14 +703,6 @@ nft_match_select_ops(const struct nft_ctx *ctx,
 	rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
 	family = ctx->family;
 
-	/* Re-use the existing match if it's already loaded. */
-	list_for_each_entry(nft_match, &nft_match_list, head) {
-		struct xt_match *match = nft_match->ops.data;
-
-		if (nft_match_cmp(match, mt_name, rev, family))
-			return &nft_match->ops;
-	}
-
 	match = xt_request_find_match(family, mt_name, rev);
 	if (IS_ERR(match))
 		return ERR_PTR(-ENOENT);
@@ -763,66 +712,62 @@ nft_match_select_ops(const struct nft_ctx *ctx,
 		goto err;
 	}
 
-	/* This is the first time we use this match, allocate operations */
-	nft_match = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
-	if (nft_match == NULL) {
+	ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
+	if (!ops) {
 		err = -ENOMEM;
 		goto err;
 	}
 
-	nft_match->refcnt = 0;
-	nft_match->ops.type = &nft_match_type;
-	nft_match->ops.eval = nft_match_eval;
-	nft_match->ops.init = nft_match_init;
-	nft_match->ops.destroy = nft_match_destroy;
-	nft_match->ops.dump = nft_match_dump;
-	nft_match->ops.validate = nft_match_validate;
-	nft_match->ops.data = match;
+	ops->type = &nft_match_type;
+	ops->eval = nft_match_eval;
+	ops->init = nft_match_init;
+	ops->destroy = nft_match_destroy;
+	ops->dump = nft_match_dump;
+	ops->validate = nft_match_validate;
+	ops->data = match;
 
 	matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
 	if (matchsize > NFT_MATCH_LARGE_THRESH) {
 		matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv));
 
-		nft_match->ops.eval = nft_match_large_eval;
-		nft_match->ops.init = nft_match_large_init;
-		nft_match->ops.destroy = nft_match_large_destroy;
-		nft_match->ops.dump = nft_match_large_dump;
+		ops->eval = nft_match_large_eval;
+		ops->init = nft_match_large_init;
+		ops->destroy = nft_match_large_destroy;
+		ops->dump = nft_match_large_dump;
 	}
 
-	nft_match->ops.size = matchsize;
+	ops->size = matchsize;
 
-	list_add(&nft_match->head, &nft_match_list);
-
-	return &nft_match->ops;
+	return ops;
 err:
 	module_put(match->me);
 	return ERR_PTR(err);
 }
 
+static void nft_match_release_ops(const struct nft_expr_ops *ops)
+{
+	struct xt_match *match = ops->data;
+
+	module_put(match->me);
+	kfree(ops);
+}
+
 static struct nft_expr_type nft_match_type __read_mostly = {
 	.name		= "match",
 	.select_ops	= nft_match_select_ops,
+	.release_ops	= nft_match_release_ops,
 	.policy		= nft_match_policy,
 	.maxattr	= NFTA_MATCH_MAX,
 	.owner		= THIS_MODULE,
 };
 
-static LIST_HEAD(nft_target_list);
-
 static struct nft_expr_type nft_target_type;
 
-static bool nft_target_cmp(const struct xt_target *tg,
-			   const char *name, u32 rev, u32 family)
-{
-	return strcmp(tg->name, name) == 0 && tg->revision == rev &&
-	       (tg->family == NFPROTO_UNSPEC || tg->family == family);
-}
-
 static const struct nft_expr_ops *
 nft_target_select_ops(const struct nft_ctx *ctx,
 		      const struct nlattr * const tb[])
 {
-	struct nft_xt *nft_target;
+	struct nft_expr_ops *ops;
 	struct xt_target *target;
 	char *tg_name;
 	u32 rev, family;
@@ -842,17 +787,6 @@ nft_target_select_ops(const struct nft_ctx *ctx,
 	    strcmp(tg_name, "standard") == 0)
 		return ERR_PTR(-EINVAL);
 
-	/* Re-use the existing target if it's already loaded. */
-	list_for_each_entry(nft_target, &nft_target_list, head) {
-		struct xt_target *target = nft_target->ops.data;
-
-		if (!target->target)
-			continue;
-
-		if (nft_target_cmp(target, tg_name, rev, family))
-			return &nft_target->ops;
-	}
-
 	target = xt_request_find_target(family, tg_name, rev);
 	if (IS_ERR(target))
 		return ERR_PTR(-ENOENT);
@@ -867,38 +801,43 @@ nft_target_select_ops(const struct nft_ctx *ctx,
 		goto err;
 	}
 
-	/* This is the first time we use this target, allocate operations */
-	nft_target = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
-	if (nft_target == NULL) {
+	ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
+	if (!ops) {
 		err = -ENOMEM;
 		goto err;
 	}
 
-	nft_target->refcnt = 0;
-	nft_target->ops.type = &nft_target_type;
-	nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
-	nft_target->ops.init = nft_target_init;
-	nft_target->ops.destroy = nft_target_destroy;
-	nft_target->ops.dump = nft_target_dump;
-	nft_target->ops.validate = nft_target_validate;
-	nft_target->ops.data = target;
+	ops->type = &nft_target_type;
+	ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
+	ops->init = nft_target_init;
+	ops->destroy = nft_target_destroy;
+	ops->dump = nft_target_dump;
+	ops->validate = nft_target_validate;
+	ops->data = target;
 
 	if (family == NFPROTO_BRIDGE)
-		nft_target->ops.eval = nft_target_eval_bridge;
+		ops->eval = nft_target_eval_bridge;
 	else
-		nft_target->ops.eval = nft_target_eval_xt;
+		ops->eval = nft_target_eval_xt;
 
-	list_add(&nft_target->head, &nft_target_list);
-
-	return &nft_target->ops;
+	return ops;
 err:
 	module_put(target->me);
 	return ERR_PTR(err);
 }
 
+static void nft_target_release_ops(const struct nft_expr_ops *ops)
+{
+	struct xt_target *target = ops->data;
+
+	module_put(target->me);
+	kfree(ops);
+}
+
 static struct nft_expr_type nft_target_type __read_mostly = {
 	.name		= "target",
 	.select_ops	= nft_target_select_ops,
+	.release_ops	= nft_target_release_ops,
 	.policy		= nft_target_policy,
 	.maxattr	= NFTA_TARGET_MAX,
 	.owner		= THIS_MODULE,
@@ -923,7 +862,6 @@ static int __init nft_compat_module_init(void)
 	}
 
 	return ret;
-
 err_target:
 	nft_unregister_expr(&nft_target_type);
 err_match:
@@ -933,32 +871,6 @@ static int __init nft_compat_module_init(void)
 
 static void __exit nft_compat_module_exit(void)
 {
-	struct nft_xt *xt, *next;
-
-	/* list should be empty here, it can be non-empty only in case there
-	 * was an error that caused nft_xt expr to not be initialized fully
-	 * and noone else requested the same expression later.
-	 *
-	 * In this case, the lists contain 0-refcount entries that still
-	 * hold module reference.
-	 */
-	list_for_each_entry_safe(xt, next, &nft_target_list, head) {
-		struct xt_target *target = xt->ops.data;
-
-		if (WARN_ON_ONCE(xt->refcnt))
-			continue;
-		module_put(target->me);
-		kfree(xt);
-	}
-
-	list_for_each_entry_safe(xt, next, &nft_match_list, head) {
-		struct xt_match *match = xt->ops.data;
-
-		if (WARN_ON_ONCE(xt->refcnt))
-			continue;
-		module_put(match->me);
-		kfree(xt);
-	}
 	nfnetlink_subsys_unregister(&nfnl_compat_subsys);
 	nft_unregister_expr(&nft_target_type);
 	nft_unregister_expr(&nft_match_type);
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 6e91a37..eb7f9a5 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -235,14 +235,32 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
 	return err;
 }
 
+static void nft_dynset_deactivate(const struct nft_ctx *ctx,
+				  const struct nft_expr *expr,
+				  enum nft_trans_phase phase)
+{
+	struct nft_dynset *priv = nft_expr_priv(expr);
+
+	nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
+}
+
+static void nft_dynset_activate(const struct nft_ctx *ctx,
+				const struct nft_expr *expr)
+{
+	struct nft_dynset *priv = nft_expr_priv(expr);
+
+	priv->set->use++;
+}
+
 static void nft_dynset_destroy(const struct nft_ctx *ctx,
 			       const struct nft_expr *expr)
 {
 	struct nft_dynset *priv = nft_expr_priv(expr);
 
-	nf_tables_unbind_set(ctx, priv->set, &priv->binding);
 	if (priv->expr != NULL)
 		nft_expr_destroy(ctx, priv->expr);
+
+	nf_tables_destroy_set(ctx, priv->set);
 }
 
 static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -279,6 +297,8 @@ static const struct nft_expr_ops nft_dynset_ops = {
 	.eval		= nft_dynset_eval,
 	.init		= nft_dynset_init,
 	.destroy	= nft_dynset_destroy,
+	.activate	= nft_dynset_activate,
+	.deactivate	= nft_dynset_deactivate,
 	.dump		= nft_dynset_dump,
 };
 
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 0777a93..3f6d1d2 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
 }
 
 static void nft_immediate_deactivate(const struct nft_ctx *ctx,
-				     const struct nft_expr *expr)
+				     const struct nft_expr *expr,
+				     enum nft_trans_phase phase)
 {
 	const struct nft_immediate_expr *priv = nft_expr_priv(expr);
 
+	if (phase == NFT_TRANS_COMMIT)
+		return;
+
 	return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
 }
 
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index ad13e86..161c345 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -121,12 +121,29 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
 	return 0;
 }
 
+static void nft_lookup_deactivate(const struct nft_ctx *ctx,
+				  const struct nft_expr *expr,
+				  enum nft_trans_phase phase)
+{
+	struct nft_lookup *priv = nft_expr_priv(expr);
+
+	nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
+}
+
+static void nft_lookup_activate(const struct nft_ctx *ctx,
+				const struct nft_expr *expr)
+{
+	struct nft_lookup *priv = nft_expr_priv(expr);
+
+	priv->set->use++;
+}
+
 static void nft_lookup_destroy(const struct nft_ctx *ctx,
 			       const struct nft_expr *expr)
 {
 	struct nft_lookup *priv = nft_expr_priv(expr);
 
-	nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+	nf_tables_destroy_set(ctx, priv->set);
 }
 
 static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -209,6 +226,8 @@ static const struct nft_expr_ops nft_lookup_ops = {
 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
 	.eval		= nft_lookup_eval,
 	.init		= nft_lookup_init,
+	.activate	= nft_lookup_activate,
+	.deactivate	= nft_lookup_deactivate,
 	.destroy	= nft_lookup_destroy,
 	.dump		= nft_lookup_dump,
 	.validate	= nft_lookup_validate,
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index cdf348f..bf92a40d 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -64,21 +64,34 @@ static int nft_objref_dump(struct sk_buff *skb, const struct nft_expr *expr)
 	return -1;
 }
 
-static void nft_objref_destroy(const struct nft_ctx *ctx,
-			       const struct nft_expr *expr)
+static void nft_objref_deactivate(const struct nft_ctx *ctx,
+				  const struct nft_expr *expr,
+				  enum nft_trans_phase phase)
 {
 	struct nft_object *obj = nft_objref_priv(expr);
 
+	if (phase == NFT_TRANS_COMMIT)
+		return;
+
 	obj->use--;
 }
 
+static void nft_objref_activate(const struct nft_ctx *ctx,
+				const struct nft_expr *expr)
+{
+	struct nft_object *obj = nft_objref_priv(expr);
+
+	obj->use++;
+}
+
 static struct nft_expr_type nft_objref_type;
 static const struct nft_expr_ops nft_objref_ops = {
 	.type		= &nft_objref_type,
 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_object *)),
 	.eval		= nft_objref_eval,
 	.init		= nft_objref_init,
-	.destroy	= nft_objref_destroy,
+	.activate	= nft_objref_activate,
+	.deactivate	= nft_objref_deactivate,
 	.dump		= nft_objref_dump,
 };
 
@@ -155,12 +168,29 @@ static int nft_objref_map_dump(struct sk_buff *skb, const struct nft_expr *expr)
 	return -1;
 }
 
+static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
+				      const struct nft_expr *expr,
+				      enum nft_trans_phase phase)
+{
+	struct nft_objref_map *priv = nft_expr_priv(expr);
+
+	nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
+}
+
+static void nft_objref_map_activate(const struct nft_ctx *ctx,
+				    const struct nft_expr *expr)
+{
+	struct nft_objref_map *priv = nft_expr_priv(expr);
+
+	priv->set->use++;
+}
+
 static void nft_objref_map_destroy(const struct nft_ctx *ctx,
 				   const struct nft_expr *expr)
 {
 	struct nft_objref_map *priv = nft_expr_priv(expr);
 
-	nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+	nf_tables_destroy_set(ctx, priv->set);
 }
 
 static struct nft_expr_type nft_objref_type;
@@ -169,6 +199,8 @@ static const struct nft_expr_ops nft_objref_map_ops = {
 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
 	.eval		= nft_objref_map_eval,
 	.init		= nft_objref_map_init,
+	.activate	= nft_objref_map_activate,
+	.deactivate	= nft_objref_map_deactivate,
 	.destroy	= nft_objref_map_destroy,
 	.dump		= nft_objref_map_dump,
 };
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 0e5ec12..b3e75f9 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -302,10 +302,6 @@ static void *nft_rbtree_deactivate(const struct net *net,
 		else if (d > 0)
 			parent = parent->rb_right;
 		else {
-			if (!nft_set_elem_active(&rbe->ext, genmask)) {
-				parent = parent->rb_left;
-				continue;
-			}
 			if (nft_rbtree_interval_end(rbe) &&
 			    !nft_rbtree_interval_end(this)) {
 				parent = parent->rb_left;
@@ -314,6 +310,9 @@ static void *nft_rbtree_deactivate(const struct net *net,
 				   nft_rbtree_interval_end(this)) {
 				parent = parent->rb_right;
 				continue;
+			} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
+				parent = parent->rb_left;
+				continue;
 			}
 			nft_rbtree_flush(net, set, rbe);
 			return rbe;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 3a1deec..fc599ce 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -51,19 +51,13 @@
 #include <net/sock.h>
 #include <net/inet_sock.h>
 
-struct idletimer_tg_attr {
-	struct attribute attr;
-	ssize_t	(*show)(struct kobject *kobj,
-			struct attribute *attr, char *buf);
-};
-
 struct idletimer_tg {
 	struct list_head entry;
 	struct timer_list timer;
 	struct work_struct work;
 
 	struct kobject *kobj;
-	struct idletimer_tg_attr attr;
+	struct device_attribute attr;
 
 	struct timespec delayed_timer_trigger;
 	struct timespec last_modified_timer;
@@ -185,8 +179,8 @@ struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
 	return NULL;
 }
 
-static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
-				 char *buf)
+static ssize_t idletimer_tg_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
 {
 	struct idletimer_tg *timer;
 	unsigned long expires = 0;
@@ -194,7 +188,7 @@ static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
 
 	mutex_lock(&list_mutex);
 
-	timer =	__idletimer_tg_find_by_label(attr->name);
+	timer =	__idletimer_tg_find_by_label(attr->attr.name);
 	if (timer)
 		expires = timer->timer.expires;
 
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 1d3144d..71ffd1a 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void)
 	int i;
 	int rc = proto_register(&nr_proto, 0);
 
-	if (rc != 0)
-		goto out;
+	if (rc)
+		return rc;
 
 	if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
-		printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
-		return -1;
+		pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
+		       __func__);
+		rc = -EINVAL;
+		goto unregister_proto;
 	}
 
 	dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
-	if (dev_nr == NULL) {
-		printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
-		return -1;
+	if (!dev_nr) {
+		pr_err("NET/ROM: %s - unable to allocate device array\n",
+		       __func__);
+		rc = -ENOMEM;
+		goto unregister_proto;
 	}
 
 	for (i = 0; i < nr_ndevs; i++) {
@@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void)
 		sprintf(name, "nr%d", i);
 		dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
 		if (!dev) {
-			printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
+			rc = -ENOMEM;
 			goto fail;
 		}
 
 		dev->base_addr = i;
-		if (register_netdev(dev)) {
-			printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
+		rc = register_netdev(dev);
+		if (rc) {
 			free_netdev(dev);
 			goto fail;
 		}
@@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void)
 		dev_nr[i] = dev;
 	}
 
-	if (sock_register(&nr_family_ops)) {
-		printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
+	rc = sock_register(&nr_family_ops);
+	if (rc)
 		goto fail;
-	}
 
-	register_netdevice_notifier(&nr_dev_notifier);
+	rc = register_netdevice_notifier(&nr_dev_notifier);
+	if (rc)
+		goto out_sock;
 
 	ax25_register_pid(&nr_pid);
 	ax25_linkfail_register(&nr_linkfail_notifier);
 
 #ifdef CONFIG_SYSCTL
-	nr_register_sysctl();
+	rc = nr_register_sysctl();
+	if (rc)
+		goto out_sysctl;
 #endif
 
 	nr_loopback_init();
 
-	proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops);
-	proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops);
-	proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops);
-out:
-	return rc;
+	rc = -ENOMEM;
+	if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
+		goto proc_remove1;
+	if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
+			     &nr_neigh_seqops))
+		goto proc_remove2;
+	if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
+			     &nr_node_seqops))
+		goto proc_remove3;
+
+	return 0;
+
+proc_remove3:
+	remove_proc_entry("nr_neigh", init_net.proc_net);
+proc_remove2:
+	remove_proc_entry("nr", init_net.proc_net);
+proc_remove1:
+
+	nr_loopback_clear();
+	nr_rt_free();
+
+#ifdef CONFIG_SYSCTL
+	nr_unregister_sysctl();
+out_sysctl:
+#endif
+	ax25_linkfail_release(&nr_linkfail_notifier);
+	ax25_protocol_release(AX25_P_NETROM);
+	unregister_netdevice_notifier(&nr_dev_notifier);
+out_sock:
+	sock_unregister(PF_NETROM);
 fail:
 	while (--i >= 0) {
 		unregister_netdev(dev_nr[i]);
 		free_netdev(dev_nr[i]);
 	}
 	kfree(dev_nr);
+unregister_proto:
 	proto_unregister(&nr_proto);
-	rc = -1;
-	goto out;
+	return rc;
 }
 
 module_init(nr_proto_init);
diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c
index 215ad22..93d13f0 100644
--- a/net/netrom/nr_loopback.c
+++ b/net/netrom/nr_loopback.c
@@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused)
 	}
 }
 
-void __exit nr_loopback_clear(void)
+void nr_loopback_clear(void)
 {
 	del_timer_sync(&loopback_timer);
 	skb_queue_purge(&loopback_queue);
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 6485f59..b76aa66 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = {
 /*
  *	Free all memory associated with the nodes and routes lists.
  */
-void __exit nr_rt_free(void)
+void nr_rt_free(void)
 {
 	struct nr_neigh *s = NULL;
 	struct nr_node  *t = NULL;
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
index ba1c368..771011b 100644
--- a/net/netrom/sysctl_net_netrom.c
+++ b/net/netrom/sysctl_net_netrom.c
@@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = {
 	{ }
 };
 
-void __init nr_register_sysctl(void)
+int __init nr_register_sysctl(void)
 {
 	nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
+	if (!nr_table_header)
+		return -ENOMEM;
+	return 0;
 }
 
 void nr_unregister_sysctl(void)
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index ddfc52a..c0d323b 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -312,6 +312,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
 		create_info = (struct nci_hci_create_pipe_resp *)skb->data;
 		dest_gate = create_info->dest_gate;
 		new_pipe = create_info->pipe;
+		if (new_pipe >= NCI_HCI_MAX_PIPES) {
+			status = NCI_HCI_ANY_E_NOK;
+			goto exit;
+		}
 
 		/* Save the new created pipe and bind with local gate,
 		 * the description for skb->data[3] is destination gate id
@@ -336,6 +340,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
 			goto exit;
 		}
 		delete_info = (struct nci_hci_delete_pipe_noti *)skb->data;
+		if (delete_info->pipe >= NCI_HCI_MAX_PIPES) {
+			status = NCI_HCI_ANY_E_NOK;
+			goto exit;
+		}
 
 		ndev->hci_dev->pipes[delete_info->pipe].gate =
 						NCI_HCI_INVALID_GATE;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 7e727131..2fec256 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2603,8 +2603,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 	void *ph;
 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
 	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
+	unsigned char *addr = NULL;
 	int tp_len, size_max;
-	unsigned char *addr;
 	void *data;
 	int len_sum = 0;
 	int status = TP_STATUS_AVAILABLE;
@@ -2615,7 +2615,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 	if (likely(saddr == NULL)) {
 		dev	= packet_cached_dev_get(po);
 		proto	= po->num;
-		addr	= NULL;
 	} else {
 		err = -EINVAL;
 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2625,10 +2624,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 						sll_addr)))
 			goto out;
 		proto	= saddr->sll_protocol;
-		addr	= saddr->sll_halen ? saddr->sll_addr : NULL;
 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
-		if (addr && dev && saddr->sll_halen < dev->addr_len)
-			goto out_put;
+		if (po->sk.sk_socket->type == SOCK_DGRAM) {
+			if (dev && msg->msg_namelen < dev->addr_len +
+				   offsetof(struct sockaddr_ll, sll_addr))
+				goto out_put;
+			addr = saddr->sll_addr;
+		}
 	}
 
 	err = -ENXIO;
@@ -2800,7 +2802,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
 	struct sk_buff *skb;
 	struct net_device *dev;
 	__be16 proto;
-	unsigned char *addr;
+	unsigned char *addr = NULL;
 	int err, reserve = 0;
 	struct sockcm_cookie sockc;
 	struct virtio_net_hdr vnet_hdr = { 0 };
@@ -2817,7 +2819,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
 	if (likely(saddr == NULL)) {
 		dev	= packet_cached_dev_get(po);
 		proto	= po->num;
-		addr	= NULL;
 	} else {
 		err = -EINVAL;
 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2825,10 +2826,13 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
 			goto out;
 		proto	= saddr->sll_protocol;
-		addr	= saddr->sll_halen ? saddr->sll_addr : NULL;
 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
-		if (addr && dev && saddr->sll_halen < dev->addr_len)
-			goto out_unlock;
+		if (sock->type == SOCK_DGRAM) {
+			if (dev && msg->msg_namelen < dev->addr_len +
+				   offsetof(struct sockaddr_ll, sll_addr))
+				goto out_unlock;
+			addr = saddr->sll_addr;
+		}
 	}
 
 	err = -ENXIO;
@@ -4574,14 +4578,29 @@ static void __exit packet_exit(void)
 
 static int __init packet_init(void)
 {
-	int rc = proto_register(&packet_proto, 0);
+	int rc;
 
-	if (rc != 0)
+	rc = proto_register(&packet_proto, 0);
+	if (rc)
 		goto out;
+	rc = sock_register(&packet_family_ops);
+	if (rc)
+		goto out_proto;
+	rc = register_pernet_subsys(&packet_net_ops);
+	if (rc)
+		goto out_sock;
+	rc = register_netdevice_notifier(&packet_netdev_notifier);
+	if (rc)
+		goto out_pernet;
 
-	sock_register(&packet_family_ops);
-	register_pernet_subsys(&packet_net_ops);
-	register_netdevice_notifier(&packet_netdev_notifier);
+	return 0;
+
+out_pernet:
+	unregister_pernet_subsys(&packet_net_ops);
+out_sock:
+	sock_unregister(PF_PACKET);
+out_proto:
+	proto_unregister(&packet_proto);
 out:
 	return rc;
 }
diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
index 661b770..b864b6b 100644
--- a/net/qrtr/mhi.c
+++ b/net/qrtr/mhi.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
 
 #include <linux/module.h>
 #include <linux/skbuff.h>
@@ -16,6 +16,7 @@ struct qrtr_mhi_dev {
 	struct device *dev;
 	spinlock_t ul_lock;		/* lock to protect ul_pkts */
 	struct list_head ul_pkts;
+	atomic_t in_reset;
 };
 
 struct qrtr_mhi_pkt {
@@ -59,14 +60,33 @@ static void qcom_mhi_qrtr_ul_callback(struct mhi_device *mhi_dev,
 {
 	struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev);
 	struct qrtr_mhi_pkt *pkt;
+	unsigned long flags;
 
-	spin_lock_bh(&qdev->ul_lock);
+	spin_lock_irqsave(&qdev->ul_lock, flags);
 	pkt = list_first_entry(&qdev->ul_pkts, struct qrtr_mhi_pkt, node);
 	list_del(&pkt->node);
 	complete_all(&pkt->done);
 
 	kref_put(&pkt->refcount, qrtr_mhi_pkt_release);
-	spin_unlock_bh(&qdev->ul_lock);
+	spin_unlock_irqrestore(&qdev->ul_lock, flags);
+}
+
+/* fatal error */
+static void qcom_mhi_qrtr_status_callback(struct mhi_device *mhi_dev,
+					  enum MHI_CB mhi_cb)
+{
+	struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev);
+	struct qrtr_mhi_pkt *pkt;
+	unsigned long flags;
+
+	if (mhi_cb != MHI_CB_FATAL_ERROR)
+		return;
+
+	atomic_inc(&qdev->in_reset);
+	spin_lock_irqsave(&qdev->ul_lock, flags);
+	list_for_each_entry(pkt, &qdev->ul_pkts, node)
+		complete_all(&pkt->done);
+	spin_unlock_irqrestore(&qdev->ul_lock, flags);
 }
 
 /* from qrtr to mhi */
@@ -109,10 +129,12 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
 		sock_hold(skb->sk);
 
 	rc = wait_for_completion_interruptible_timeout(&pkt->done, HZ * 5);
-	if (rc > 0)
-		rc = 0;
+	if (atomic_read(&qdev->in_reset))
+		rc = -ECONNRESET;
 	else if (rc == 0)
 		rc = -ETIMEDOUT;
+	else if (rc > 0)
+		rc = 0;
 
 	kref_put(&pkt->refcount, qrtr_mhi_pkt_release);
 	return rc;
@@ -132,6 +154,7 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
 	qdev->mhi_dev = mhi_dev;
 	qdev->dev = &mhi_dev->dev;
 	qdev->ep.xmit = qcom_mhi_qrtr_send;
+	atomic_set(&qdev->in_reset, 0);
 
 	rc = of_property_read_u32(mhi_dev->dev.of_node, "qcom,net-id", &net_id);
 	if (rc < 0)
@@ -169,6 +192,7 @@ static struct mhi_driver qcom_mhi_qrtr_driver = {
 	.remove = qcom_mhi_qrtr_remove,
 	.dl_xfer_cb = qcom_mhi_qrtr_dl_callback,
 	.ul_xfer_cb = qcom_mhi_qrtr_ul_callback,
+	.status_cb = qcom_mhi_qrtr_status_callback,
 	.id_table = qcom_mhi_qrtr_mhi_match,
 	.driver = {
 		.name = "qcom_mhi_qrtr",
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 5c3d455..d994a90 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -674,6 +674,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
 	struct sk_buff *skb;
 	struct qrtr_cb *cb;
 	unsigned int size;
+	int err = -ENOMEM;
+	int frag = false;
 	unsigned int ver;
 	size_t hdrlen;
 
@@ -681,8 +683,14 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
 		return -EINVAL;
 
 	skb = netdev_alloc_skb(NULL, len);
-	if (!skb)
-		return -ENOMEM;
+	if (!skb) {
+		skb = alloc_skb_with_frags(0, len, 0, &err, GFP_ATOMIC);
+		if (!skb) {
+			pr_err("%s memory allocation failed\n", __func__);
+			return -ENOMEM;
+		}
+		frag = true;
+	}
 
 	cb = (struct qrtr_cb *)skb->cb;
 
@@ -736,7 +744,13 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
 	    cb->type != QRTR_TYPE_RESUME_TX)
 		goto err;
 
-	skb_put_data(skb, data + hdrlen, size);
+	if (frag) {
+		skb->data_len = size;
+		skb->len = size;
+		skb_store_bits(skb, 0, data + hdrlen, size);
+	} else {
+		skb_put_data(skb, data + hdrlen, size);
+	}
 	qrtr_log_rx_msg(node, skb);
 
 	skb_queue_tail(&node->rx_queue, skb);
@@ -891,8 +905,11 @@ static void qrtr_node_rx_work(struct kthread_work *work)
 			if (!ipc) {
 				kfree_skb(skb);
 			} else {
-				if (sock_queue_rcv_skb(&ipc->sk, skb))
+				if (sock_queue_rcv_skb(&ipc->sk, skb)) {
+					pr_err("%s qrtr pkt dropped flow[%d]\n",
+					       __func__, cb->confirm_rx);
 					kfree_skb(skb);
+				}
 
 				qrtr_port_put(ipc);
 			}
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 65387e1..cd7e01e 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -506,6 +506,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
 	struct rds_sock *rs = rds_sk_to_rs(sk);
 	int ret = 0;
 
+	if (addr_len < offsetofend(struct sockaddr, sa_family))
+		return -EINVAL;
+
 	lock_sock(sk);
 
 	switch (uaddr->sa_family) {
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 17c9d9f..0f4398e 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 	/* We allow an RDS socket to be bound to either IPv4 or IPv6
 	 * address.
 	 */
+	if (addr_len < offsetofend(struct sockaddr, sa_family))
+		return -EINVAL;
 	if (uaddr->sa_family == AF_INET) {
 		struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
 
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
index e0f70c4..01e764f 100644
--- a/net/rds/ib_fmr.c
+++ b/net/rds/ib_fmr.c
@@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
 	else
 		pool = rds_ibdev->mr_1m_pool;
 
+	if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+		queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+
+	/* Switch pools if one of the pool is reaching upper limit */
+	if (atomic_read(&pool->dirty_count) >=  pool->max_items * 9 / 10) {
+		if (pool->pool_type == RDS_IB_MR_8K_POOL)
+			pool = rds_ibdev->mr_1m_pool;
+		else
+			pool = rds_ibdev->mr_8k_pool;
+	}
+
 	ibmr = rds_ib_try_reuse_ibmr(pool);
 	if (ibmr)
 		return ibmr;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 63c8d10..d664e9a 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -454,9 +454,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
 	struct rds_ib_mr *ibmr = NULL;
 	int iter = 0;
 
-	if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
-		queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
-
 	while (1) {
 		ibmr = rds_ib_reuse_mr(pool);
 		if (ibmr)
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 7af4f99..094a662 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 
 static struct sk_buff_head loopback_queue;
+#define ROSE_LOOPBACK_LIMIT 1000
 static struct timer_list loopback_timer;
 
 static void rose_set_loopback_timer(void);
@@ -35,29 +36,27 @@ static int rose_loopback_running(void)
 
 int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
 {
-	struct sk_buff *skbn;
+	struct sk_buff *skbn = NULL;
 
-	skbn = skb_clone(skb, GFP_ATOMIC);
+	if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
+		skbn = skb_clone(skb, GFP_ATOMIC);
 
-	kfree_skb(skb);
-
-	if (skbn != NULL) {
+	if (skbn) {
+		consume_skb(skb);
 		skb_queue_tail(&loopback_queue, skbn);
 
 		if (!rose_loopback_running())
 			rose_set_loopback_timer();
+	} else {
+		kfree_skb(skb);
 	}
 
 	return 1;
 }
 
-
 static void rose_set_loopback_timer(void)
 {
-	del_timer(&loopback_timer);
-
-	loopback_timer.expires  = jiffies + 10;
-	add_timer(&loopback_timer);
+	mod_timer(&loopback_timer, jiffies + 10);
 }
 
 static void rose_loopback_timer(struct timer_list *unused)
@@ -68,8 +67,12 @@ static void rose_loopback_timer(struct timer_list *unused)
 	struct sock *sk;
 	unsigned short frametype;
 	unsigned int lci_i, lci_o;
+	int count;
 
-	while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
+	for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
+		skb = skb_dequeue(&loopback_queue);
+		if (!skb)
+			return;
 		if (skb->len < ROSE_MIN_LEN) {
 			kfree_skb(skb);
 			continue;
@@ -106,6 +109,8 @@ static void rose_loopback_timer(struct timer_list *unused)
 			kfree_skb(skb);
 		}
 	}
+	if (!skb_queue_empty(&loopback_queue))
+		mod_timer(&loopback_timer, jiffies + 1);
 }
 
 void __exit rose_loopback_clear(void)
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 8f1a8f8..215f4d9 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -701,30 +701,30 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
 
 	_enter("");
 
-	if (list_empty(&rxnet->calls))
-		return;
+	if (!list_empty(&rxnet->calls)) {
+		write_lock(&rxnet->call_lock);
 
-	write_lock(&rxnet->call_lock);
+		while (!list_empty(&rxnet->calls)) {
+			call = list_entry(rxnet->calls.next,
+					  struct rxrpc_call, link);
+			_debug("Zapping call %p", call);
 
-	while (!list_empty(&rxnet->calls)) {
-		call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
-		_debug("Zapping call %p", call);
+			rxrpc_see_call(call);
+			list_del_init(&call->link);
 
-		rxrpc_see_call(call);
-		list_del_init(&call->link);
+			pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
+			       call, atomic_read(&call->usage),
+			       rxrpc_call_states[call->state],
+			       call->flags, call->events);
 
-		pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
-		       call, atomic_read(&call->usage),
-		       rxrpc_call_states[call->state],
-		       call->flags, call->events);
+			write_unlock(&rxnet->call_lock);
+			cond_resched();
+			write_lock(&rxnet->call_lock);
+		}
 
 		write_unlock(&rxnet->call_lock);
-		cond_resched();
-		write_lock(&rxnet->call_lock);
 	}
 
-	write_unlock(&rxnet->call_lock);
-
 	atomic_dec(&rxnet->nr_calls);
 	wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
 }
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 570b49d..d591f54 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1155,19 +1155,19 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
  * handle data received on the local endpoint
  * - may be called in interrupt context
  *
- * The socket is locked by the caller and this prevents the socket from being
- * shut down and the local endpoint from going away, thus sk_user_data will not
- * be cleared until this function returns.
+ * [!] Note that as this is called from the encap_rcv hook, the socket is not
+ * held locked by the caller and nothing prevents sk_user_data on the UDP from
+ * being cleared in the middle of processing this function.
  *
  * Called with the RCU read lock held from the IP layer via UDP.
  */
 int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
 {
+	struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
 	struct rxrpc_connection *conn;
 	struct rxrpc_channel *chan;
 	struct rxrpc_call *call = NULL;
 	struct rxrpc_skb_priv *sp;
-	struct rxrpc_local *local = udp_sk->sk_user_data;
 	struct rxrpc_peer *peer = NULL;
 	struct rxrpc_sock *rx = NULL;
 	unsigned int channel;
@@ -1175,6 +1175,10 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
 
 	_enter("%p", udp_sk);
 
+	if (unlikely(!local)) {
+		kfree_skb(skb);
+		return 0;
+	}
 	if (skb->tstamp == 0)
 		skb->tstamp = ktime_get_real();
 
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 0906e51..10317db 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -304,7 +304,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
 	ret = -ENOMEM;
 sock_error:
 	mutex_unlock(&rxnet->local_mutex);
-	kfree(local);
+	if (local)
+		call_rcu(&local->rcu, rxrpc_local_rcu);
 	_leave(" = %d", ret);
 	return ERR_PTR(ret);
 
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 793016d..9fd37d9 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1508,32 +1508,29 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
 	return idx + (tin << 16);
 }
 
-static void cake_wash_diffserv(struct sk_buff *skb)
-{
-	switch (skb->protocol) {
-	case htons(ETH_P_IP):
-		ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
-		break;
-	case htons(ETH_P_IPV6):
-		ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
-		break;
-	default:
-		break;
-	}
-}
-
 static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
 {
+	int wlen = skb_network_offset(skb);
 	u8 dscp;
 
-	switch (skb->protocol) {
+	switch (tc_skb_protocol(skb)) {
 	case htons(ETH_P_IP):
+		wlen += sizeof(struct iphdr);
+		if (!pskb_may_pull(skb, wlen) ||
+		    skb_try_make_writable(skb, wlen))
+			return 0;
+
 		dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
 		if (wash && dscp)
 			ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
 		return dscp;
 
 	case htons(ETH_P_IPV6):
+		wlen += sizeof(struct ipv6hdr);
+		if (!pskb_may_pull(skb, wlen) ||
+		    skb_try_make_writable(skb, wlen))
+			return 0;
+
 		dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
 		if (wash && dscp)
 			ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
@@ -1553,25 +1550,27 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
 {
 	struct cake_sched_data *q = qdisc_priv(sch);
 	u32 tin;
+	u8 dscp;
 
-	if (TC_H_MAJ(skb->priority) == sch->handle &&
-	    TC_H_MIN(skb->priority) > 0 &&
-	    TC_H_MIN(skb->priority) <= q->tin_cnt) {
+	/* Tin selection: Default to diffserv-based selection, allow overriding
+	 * using firewall marks or skb->priority.
+	 */
+	dscp = cake_handle_diffserv(skb,
+				    q->rate_flags & CAKE_FLAG_WASH);
+
+	if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
+		tin = 0;
+
+	else if (TC_H_MAJ(skb->priority) == sch->handle &&
+		 TC_H_MIN(skb->priority) > 0 &&
+		 TC_H_MIN(skb->priority) <= q->tin_cnt)
 		tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
 
-		if (q->rate_flags & CAKE_FLAG_WASH)
-			cake_wash_diffserv(skb);
-	} else if (q->tin_mode != CAKE_DIFFSERV_BESTEFFORT) {
-		/* extract the Diffserv Precedence field, if it exists */
-		/* and clear DSCP bits if washing */
-		tin = q->tin_index[cake_handle_diffserv(skb,
-				q->rate_flags & CAKE_FLAG_WASH)];
+	else {
+		tin = q->tin_index[dscp];
+
 		if (unlikely(tin >= q->tin_cnt))
 			tin = 0;
-	} else {
-		tin = 0;
-		if (q->rate_flags & CAKE_FLAG_WASH)
-			cake_wash_diffserv(skb);
 	}
 
 	return &q->tins[tin];
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 85d3930..48fe8f0 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1112,32 +1112,6 @@ static void sctp_cmd_send_msg(struct sctp_association *asoc,
 }
 
 
-/* Sent the next ASCONF packet currently stored in the association.
- * This happens after the ASCONF_ACK was succeffully processed.
- */
-static void sctp_cmd_send_asconf(struct sctp_association *asoc)
-{
-	struct net *net = sock_net(asoc->base.sk);
-
-	/* Send the next asconf chunk from the addip chunk
-	 * queue.
-	 */
-	if (!list_empty(&asoc->addip_chunk_list)) {
-		struct list_head *entry = asoc->addip_chunk_list.next;
-		struct sctp_chunk *asconf = list_entry(entry,
-						struct sctp_chunk, list);
-		list_del_init(entry);
-
-		/* Hold the chunk until an ASCONF_ACK is received. */
-		sctp_chunk_hold(asconf);
-		if (sctp_primitive_ASCONF(net, asoc, asconf))
-			sctp_chunk_free(asconf);
-		else
-			asoc->addip_last_asconf = asconf;
-	}
-}
-
-
 /* These three macros allow us to pull the debugging code out of the
  * main flow of sctp_do_sm() to keep attention focused on the real
  * functionality there.
@@ -1783,9 +1757,6 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
 			}
 			sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
 			break;
-		case SCTP_CMD_SEND_NEXT_ASCONF:
-			sctp_cmd_send_asconf(asoc);
-			break;
 		case SCTP_CMD_PURGE_ASCONF_QUEUE:
 			sctp_asconf_queue_teardown(asoc);
 			break;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index c9ae340..713a669 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3824,6 +3824,29 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
 	return SCTP_DISPOSITION_CONSUME;
 }
 
+static enum sctp_disposition sctp_send_next_asconf(
+					struct net *net,
+					const struct sctp_endpoint *ep,
+					struct sctp_association *asoc,
+					const union sctp_subtype type,
+					struct sctp_cmd_seq *commands)
+{
+	struct sctp_chunk *asconf;
+	struct list_head *entry;
+
+	if (list_empty(&asoc->addip_chunk_list))
+		return SCTP_DISPOSITION_CONSUME;
+
+	entry = asoc->addip_chunk_list.next;
+	asconf = list_entry(entry, struct sctp_chunk, list);
+
+	list_del_init(entry);
+	sctp_chunk_hold(asconf);
+	asoc->addip_last_asconf = asconf;
+
+	return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands);
+}
+
 /*
  * ADDIP Section 4.3 General rules for address manipulation
  * When building TLV parameters for the ASCONF Chunk that will add or
@@ -3915,14 +3938,10 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
 				SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
 
 		if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
-					     asconf_ack)) {
-			/* Successfully processed ASCONF_ACK.  We can
-			 * release the next asconf if we have one.
-			 */
-			sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
-					SCTP_NULL());
-			return SCTP_DISPOSITION_CONSUME;
-		}
+					     asconf_ack))
+			return sctp_send_next_asconf(net, ep,
+					(struct sctp_association *)asoc,
+							type, commands);
 
 		abort = sctp_make_abort(asoc, asconf_ack,
 					sizeof(struct sctp_errhdr));
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index da1a676..0f4e427 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -140,13 +140,11 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
 			/* We are going to append to the frags_list of head.
 			 * Need to unshare the frag_list.
 			 */
-			if (skb_has_frag_list(head)) {
-				err = skb_unclone(head, GFP_ATOMIC);
-				if (err) {
-					STRP_STATS_INCR(strp->stats.mem_fail);
-					desc->error = err;
-					return 0;
-				}
+			err = skb_unclone(head, GFP_ATOMIC);
+			if (err) {
+				STRP_STATS_INCR(strp->stats.mem_fail);
+				desc->error = err;
+				return 0;
 			}
 
 			if (unlikely(skb_shinfo(head)->frag_list)) {
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index b6e8ecc..214440c 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
 	h->last_refresh = now;
 }
 
+static inline int cache_is_valid(struct cache_head *h);
 static void cache_fresh_locked(struct cache_head *head, time_t expiry,
 				struct cache_detail *detail);
 static void cache_fresh_unlocked(struct cache_head *head,
@@ -100,6 +101,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
 			if (cache_is_expired(detail, tmp)) {
 				hlist_del_init(&tmp->cache_list);
 				detail->entries --;
+				if (cache_is_valid(tmp) == -EAGAIN)
+					set_bit(CACHE_NEGATIVE, &tmp->flags);
 				cache_fresh_locked(tmp, 0, detail);
 				freeme = tmp;
 				break;
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 5b38f51..d7b0688 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -66,6 +66,10 @@ static int __net_init tipc_init_net(struct net *net)
 	INIT_LIST_HEAD(&tn->node_list);
 	spin_lock_init(&tn->node_list_lock);
 
+	err = tipc_socket_init();
+	if (err)
+		goto out_socket;
+
 	err = tipc_sk_rht_init(net);
 	if (err)
 		goto out_sk_rht;
@@ -92,6 +96,8 @@ static int __net_init tipc_init_net(struct net *net)
 out_nametbl:
 	tipc_sk_rht_destroy(net);
 out_sk_rht:
+	tipc_socket_stop();
+out_socket:
 	return err;
 }
 
@@ -102,6 +108,7 @@ static void __net_exit tipc_exit_net(struct net *net)
 	tipc_bcast_stop(net);
 	tipc_nametbl_stop(net);
 	tipc_sk_rht_destroy(net);
+	tipc_socket_stop();
 }
 
 static struct pernet_operations tipc_net_ops = {
@@ -129,10 +136,6 @@ static int __init tipc_init(void)
 	if (err)
 		goto out_netlink_compat;
 
-	err = tipc_socket_init();
-	if (err)
-		goto out_socket;
-
 	err = tipc_register_sysctl();
 	if (err)
 		goto out_sysctl;
@@ -152,8 +155,6 @@ static int __init tipc_init(void)
 out_pernet:
 	tipc_unregister_sysctl();
 out_sysctl:
-	tipc_socket_stop();
-out_socket:
 	tipc_netlink_compat_stop();
 out_netlink_compat:
 	tipc_netlink_stop();
@@ -168,7 +169,6 @@ static void __exit tipc_exit(void)
 	unregister_pernet_subsys(&tipc_net_ops);
 	tipc_netlink_stop();
 	tipc_netlink_compat_stop();
-	tipc_socket_stop();
 	tipc_unregister_sysctl();
 
 	pr_info("Deactivated\n");
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 66d5b2c..d72985c 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -908,7 +908,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg,
 	for (; i < TIPC_NAMETBL_SIZE; i++) {
 		head = &tn->nametbl->services[i];
 
-		if (*last_type) {
+		if (*last_type ||
+		    (!i && *last_key && (*last_lower == *last_key))) {
 			service = tipc_service_find(net, *last_type);
 			if (!service)
 				return -EPIPE;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 0b21187..588d5aa 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
 	if (msg->rep_type)
 		tipc_tlv_init(msg->rep, msg->rep_type);
 
-	if (cmd->header)
-		(*cmd->header)(msg);
+	if (cmd->header) {
+		err = (*cmd->header)(msg);
+		if (err) {
+			kfree_skb(msg->rep);
+			msg->rep = NULL;
+			return err;
+		}
+	}
 
 	arg = nlmsg_new(0, GFP_KERNEL);
 	if (!arg) {
@@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
 	if (!bearer)
 		return -EMSGSIZE;
 
-	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+	len = TLV_GET_DATA_LEN(msg->req);
+	len -= offsetof(struct tipc_bearer_config, name);
+	if (len <= 0)
+		return -EINVAL;
+
+	len = min_t(int, len, TIPC_MAX_BEARER_NAME);
 	if (!string_is_valid(b->name, len))
 		return -EINVAL;
 
@@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
 
 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
 
-	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+	len = TLV_GET_DATA_LEN(msg->req);
+	len -= offsetof(struct tipc_link_config, name);
+	if (len <= 0)
+		return -EINVAL;
+
+	len = min_t(int, len, TIPC_MAX_LINK_NAME);
 	if (!string_is_valid(lc->name, len))
 		return -EINVAL;
 
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 67a7b31..6c91f12 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -726,11 +726,11 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
 
 	switch (sk->sk_state) {
 	case TIPC_ESTABLISHED:
-	case TIPC_CONNECTING:
 		if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
 			revents |= EPOLLOUT;
 		/* fall thru' */
 	case TIPC_LISTEN:
+	case TIPC_CONNECTING:
 		if (!skb_queue_empty(&sk->sk_receive_queue))
 			revents |= EPOLLIN | EPOLLRDNORM;
 		break;
@@ -2039,7 +2039,7 @@ static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
 			return true;
 
 		/* If empty 'ACK-' message, wake up sleeping connect() */
-		sk->sk_data_ready(sk);
+		sk->sk_state_change(sk);
 
 		/* 'ACK-' message is neither accepted nor rejected: */
 		msg_set_dest_droppable(hdr, 1);
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 961b07d..fdf22cb 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock);
 
 static void tls_device_free_ctx(struct tls_context *ctx)
 {
-	if (ctx->tx_conf == TLS_HW)
+	if (ctx->tx_conf == TLS_HW) {
 		kfree(tls_offload_ctx_tx(ctx));
+		kfree(ctx->tx.rec_seq);
+		kfree(ctx->tx.iv);
+	}
 
 	if (ctx->rx_conf == TLS_HW)
 		kfree(tls_offload_ctx_rx(ctx));
@@ -569,7 +572,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
 static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
 {
 	struct strp_msg *rxm = strp_msg(skb);
-	int err = 0, offset = rxm->offset, copy, nsg;
+	int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
 	struct sk_buff *skb_iter, *unused;
 	struct scatterlist sg[1];
 	char *orig_buf, *buf;
@@ -600,27 +603,44 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
 	else
 		err = 0;
 
-	copy = min_t(int, skb_pagelen(skb) - offset,
-		     rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+	data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
 
-	if (skb->decrypted)
-		skb_store_bits(skb, offset, buf, copy);
+	if (skb_pagelen(skb) > offset) {
+		copy = min_t(int, skb_pagelen(skb) - offset, data_len);
 
-	offset += copy;
-	buf += copy;
-
-	skb_walk_frags(skb, skb_iter) {
-		copy = min_t(int, skb_iter->len,
-			     rxm->full_len - offset + rxm->offset -
-			     TLS_CIPHER_AES_GCM_128_TAG_SIZE);
-
-		if (skb_iter->decrypted)
-			skb_store_bits(skb_iter, offset, buf, copy);
+		if (skb->decrypted)
+			skb_store_bits(skb, offset, buf, copy);
 
 		offset += copy;
 		buf += copy;
 	}
 
+	pos = skb_pagelen(skb);
+	skb_walk_frags(skb, skb_iter) {
+		int frag_pos;
+
+		/* Practically all frags must belong to msg if reencrypt
+		 * is needed with current strparser and coalescing logic,
+		 * but strparser may "get optimized", so let's be safe.
+		 */
+		if (pos + skb_iter->len <= offset)
+			goto done_with_frag;
+		if (pos >= data_len + rxm->offset)
+			break;
+
+		frag_pos = offset - pos;
+		copy = min_t(int, skb_iter->len - frag_pos,
+			     data_len + rxm->offset - offset);
+
+		if (skb_iter->decrypted)
+			skb_store_bits(skb_iter, frag_pos, buf, copy);
+
+		offset += copy;
+		buf += copy;
+done_with_frag:
+		pos += skb_iter->len;
+	}
+
 free_buf:
 	kfree(orig_buf);
 	return err;
@@ -874,7 +894,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
 	goto release_netdev;
 
 free_sw_resources:
+	up_read(&device_offload_lock);
 	tls_sw_free_resources_rx(sk);
+	down_read(&device_offload_lock);
 release_ctx:
 	ctx->priv_ctx_rx = NULL;
 release_netdev:
@@ -909,8 +931,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
 	}
 out:
 	up_read(&device_offload_lock);
-	kfree(tls_ctx->rx.rec_seq);
-	kfree(tls_ctx->rx.iv);
 	tls_sw_release_resources_rx(sk);
 }
 
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 450a6db..426dd97 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -193,18 +193,26 @@ static void update_chksum(struct sk_buff *skb, int headln)
 
 static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
 {
+	struct sock *sk = skb->sk;
+	int delta;
+
 	skb_copy_header(nskb, skb);
 
 	skb_put(nskb, skb->len);
 	memcpy(nskb->data, skb->data, headln);
-	update_chksum(nskb, headln);
 
 	nskb->destructor = skb->destructor;
-	nskb->sk = skb->sk;
+	nskb->sk = sk;
 	skb->destructor = NULL;
 	skb->sk = NULL;
-	refcount_add(nskb->truesize - skb->truesize,
-		     &nskb->sk->sk_wmem_alloc);
+
+	update_chksum(nskb, headln);
+
+	delta = nskb->truesize - skb->truesize;
+	if (likely(delta < 0))
+		WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
+	else if (delta)
+		refcount_add(delta, &sk->sk_wmem_alloc);
 }
 
 /* This function may be called after the user socket is already
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index a091c03..25b3fb58 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -290,11 +290,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
 		tls_sw_free_resources_tx(sk);
 	}
 
-	if (ctx->rx_conf == TLS_SW) {
-		kfree(ctx->rx.rec_seq);
-		kfree(ctx->rx.iv);
+	if (ctx->rx_conf == TLS_SW)
 		tls_sw_free_resources_rx(sk);
-	}
 
 #ifdef CONFIG_TLS_DEVICE
 	if (ctx->rx_conf == TLS_HW)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index b9c6ecf..6848a81 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1118,6 +1118,9 @@ void tls_sw_release_resources_rx(struct sock *sk)
 	struct tls_context *tls_ctx = tls_get_ctx(sk);
 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
 
+	kfree(tls_ctx->rx.rec_seq);
+	kfree(tls_ctx->rx.iv);
+
 	if (ctx->aead_recv) {
 		kfree_skb(ctx->recv_pkt);
 		ctx->recv_pkt = NULL;
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 15eb5d3..96ab344 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -702,28 +702,27 @@ static int __init virtio_vsock_init(void)
 	if (!virtio_vsock_workqueue)
 		return -ENOMEM;
 
-	ret = register_virtio_driver(&virtio_vsock_driver);
+	ret = vsock_core_init(&virtio_transport.transport);
 	if (ret)
 		goto out_wq;
 
-	ret = vsock_core_init(&virtio_transport.transport);
+	ret = register_virtio_driver(&virtio_vsock_driver);
 	if (ret)
-		goto out_vdr;
+		goto out_vci;
 
 	return 0;
 
-out_vdr:
-	unregister_virtio_driver(&virtio_vsock_driver);
+out_vci:
+	vsock_core_exit();
 out_wq:
 	destroy_workqueue(virtio_vsock_workqueue);
 	return ret;
-
 }
 
 static void __exit virtio_vsock_exit(void)
 {
-	vsock_core_exit();
 	unregister_virtio_driver(&virtio_vsock_driver);
+	vsock_core_exit();
 	destroy_workqueue(virtio_vsock_workqueue);
 }
 
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 3ae3a33..f3f3d06 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -662,6 +662,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
  */
 static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
 {
+	const struct virtio_transport *t;
+	struct virtio_vsock_pkt *reply;
 	struct virtio_vsock_pkt_info info = {
 		.op = VIRTIO_VSOCK_OP_RST,
 		.type = le16_to_cpu(pkt->hdr.type),
@@ -672,15 +674,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
 	if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
 		return 0;
 
-	pkt = virtio_transport_alloc_pkt(&info, 0,
-					 le64_to_cpu(pkt->hdr.dst_cid),
-					 le32_to_cpu(pkt->hdr.dst_port),
-					 le64_to_cpu(pkt->hdr.src_cid),
-					 le32_to_cpu(pkt->hdr.src_port));
-	if (!pkt)
+	reply = virtio_transport_alloc_pkt(&info, 0,
+					   le64_to_cpu(pkt->hdr.dst_cid),
+					   le32_to_cpu(pkt->hdr.dst_port),
+					   le64_to_cpu(pkt->hdr.src_cid),
+					   le32_to_cpu(pkt->hdr.src_port));
+	if (!reply)
 		return -ENOMEM;
 
-	return virtio_transport_get_ops()->send_pkt(pkt);
+	t = virtio_transport_get_ops();
+	if (!t) {
+		virtio_transport_free_pkt(reply);
+		return -ENOTCONN;
+	}
+
+	return t->send_pkt(reply);
 }
 
 static void virtio_transport_wait_close(struct sock *sk, long timeout)
@@ -778,12 +786,19 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
 
 void virtio_transport_release(struct vsock_sock *vsk)
 {
+	struct virtio_vsock_sock *vvs = vsk->trans;
+	struct virtio_vsock_pkt *pkt, *tmp;
 	struct sock *sk = &vsk->sk;
 	bool remove_sock = true;
 
 	lock_sock(sk);
 	if (sk->sk_type == SOCK_STREAM)
 		remove_sock = virtio_transport_close(vsk);
+
+	list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
+		list_del(&pkt->list);
+		virtio_transport_free_pkt(pkt);
+	}
 	release_sock(sk);
 
 	if (remove_sock)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index eab7561..840cfc6 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -9714,7 +9714,10 @@ static int nl80211_setdel_pmksa(struct sk_buff *skb, struct genl_info *info)
 	}
 
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
-	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
+	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
+	    !(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP &&
+	      wiphy_ext_feature_isset(&rdev->wiphy,
+				      NL80211_EXT_FEATURE_AP_PMKSA_CACHING)))
 		return -EOPNOTSUPP;
 
 	switch (info->genlhdr->cmd) {
@@ -11457,9 +11460,6 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
 	if (err)
 		return err;
 
-	if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
-	    !tb[NL80211_REKEY_DATA_KCK])
-		return -EINVAL;
 	if (!tb[NL80211_REKEY_DATA_KEK] || !tb[NL80211_REKEY_DATA_REPLAY_CTR] ||
 	    (!wiphy_ext_feature_isset(&rdev->wiphy,
 				      NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
@@ -13443,7 +13443,8 @@ static const struct genl_ops nl80211_ops[] = {
 		.policy = nl80211_policy,
 		.flags = GENL_UNS_ADMIN_PERM,
 		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-				  NL80211_FLAG_NEED_RTNL,
+				  NL80211_FLAG_NEED_RTNL |
+				  NL80211_FLAG_CLEAR_SKB,
 	},
 	{
 		.cmd = NL80211_CMD_DEAUTHENTICATE,
@@ -13494,7 +13495,8 @@ static const struct genl_ops nl80211_ops[] = {
 		.policy = nl80211_policy,
 		.flags = GENL_UNS_ADMIN_PERM,
 		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-				  NL80211_FLAG_NEED_RTNL,
+				  NL80211_FLAG_NEED_RTNL |
+				  NL80211_FLAG_CLEAR_SKB,
 	},
 	{
 		.cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
@@ -13502,7 +13504,8 @@ static const struct genl_ops nl80211_ops[] = {
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
 		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-				  NL80211_FLAG_NEED_RTNL,
+				  NL80211_FLAG_NEED_RTNL |
+				  NL80211_FLAG_CLEAR_SKB,
 	},
 	{
 		.cmd = NL80211_CMD_DISCONNECT,
@@ -13531,7 +13534,8 @@ static const struct genl_ops nl80211_ops[] = {
 		.policy = nl80211_policy,
 		.flags = GENL_UNS_ADMIN_PERM,
 		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-				  NL80211_FLAG_NEED_RTNL,
+				  NL80211_FLAG_NEED_RTNL |
+				  NL80211_FLAG_CLEAR_SKB,
 	},
 	{
 		.cmd = NL80211_CMD_DEL_PMKSA,
@@ -13883,7 +13887,8 @@ static const struct genl_ops nl80211_ops[] = {
 		.policy = nl80211_policy,
 		.flags = GENL_UNS_ADMIN_PERM,
 		.internal_flags = NL80211_FLAG_NEED_WIPHY |
-				  NL80211_FLAG_NEED_RTNL,
+				  NL80211_FLAG_NEED_RTNL |
+				  NL80211_FLAG_CLEAR_SKB,
 	},
 	{
 		.cmd = NL80211_CMD_SET_QOS_MAP,
@@ -13938,7 +13943,8 @@ static const struct genl_ops nl80211_ops[] = {
 		.doit = nl80211_set_pmk,
 		.policy = nl80211_policy,
 		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-				  NL80211_FLAG_NEED_RTNL,
+				  NL80211_FLAG_NEED_RTNL |
+				  NL80211_FLAG_CLEAR_SKB,
 	},
 	{
 		.cmd = NL80211_CMD_DEL_PMK,
@@ -15493,6 +15499,11 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
 
 	wdev->chandef = *chandef;
 	wdev->preset_chandef = *chandef;
+
+	if (wdev->iftype == NL80211_IFTYPE_STATION &&
+	    !WARN_ON(!wdev->current_bss))
+		wdev->current_bss->pub.channel = chandef->chan;
+
 	nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL,
 				 NL80211_CMD_CH_SWITCH_NOTIFY, 0);
 }
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 1c2d500..f432178 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1329,6 +1329,16 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,
 	return dfs_region1;
 }
 
+static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1,
+				    const struct ieee80211_wmm_ac *wmm_ac2,
+				    struct ieee80211_wmm_ac *intersect)
+{
+	intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min);
+	intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max);
+	intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot);
+	intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn);
+}
+
 /*
  * Helper for regdom_intersect(), this does the real
  * mathematical intersection fun
@@ -1343,6 +1353,8 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
 	struct ieee80211_freq_range *freq_range;
 	const struct ieee80211_power_rule *power_rule1, *power_rule2;
 	struct ieee80211_power_rule *power_rule;
+	const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2;
+	struct ieee80211_wmm_rule *wmm_rule;
 	u32 freq_diff, max_bandwidth1, max_bandwidth2;
 
 	freq_range1 = &rule1->freq_range;
@@ -1353,6 +1365,10 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
 	power_rule2 = &rule2->power_rule;
 	power_rule = &intersected_rule->power_rule;
 
+	wmm_rule1 = &rule1->wmm_rule;
+	wmm_rule2 = &rule2->wmm_rule;
+	wmm_rule = &intersected_rule->wmm_rule;
+
 	freq_range->start_freq_khz = max(freq_range1->start_freq_khz,
 					 freq_range2->start_freq_khz);
 	freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
@@ -1396,6 +1412,29 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
 	intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms,
 					   rule2->dfs_cac_ms);
 
+	if (rule1->has_wmm && rule2->has_wmm) {
+		u8 ac;
+
+		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+			reg_wmm_rules_intersect(&wmm_rule1->client[ac],
+						&wmm_rule2->client[ac],
+						&wmm_rule->client[ac]);
+			reg_wmm_rules_intersect(&wmm_rule1->ap[ac],
+						&wmm_rule2->ap[ac],
+						&wmm_rule->ap[ac]);
+		}
+
+		intersected_rule->has_wmm = true;
+	} else if (rule1->has_wmm) {
+		*wmm_rule = *wmm_rule1;
+		intersected_rule->has_wmm = true;
+	} else if (rule2->has_wmm) {
+		*wmm_rule = *wmm_rule2;
+		intersected_rule->has_wmm = true;
+	} else {
+		intersected_rule->has_wmm = false;
+	}
+
 	if (!is_valid_reg_rule(intersected_rule))
 		return -EINVAL;
 
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index bfe2dbe..a3b037f 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -152,9 +152,6 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
 
 static void xdp_umem_release(struct xdp_umem *umem)
 {
-	struct task_struct *task;
-	struct mm_struct *mm;
-
 	xdp_umem_clear_dev(umem);
 
 	if (umem->fq) {
@@ -169,21 +166,10 @@ static void xdp_umem_release(struct xdp_umem *umem)
 
 	xdp_umem_unpin_pages(umem);
 
-	task = get_pid_task(umem->pid, PIDTYPE_PID);
-	put_pid(umem->pid);
-	if (!task)
-		goto out;
-	mm = get_task_mm(task);
-	put_task_struct(task);
-	if (!mm)
-		goto out;
-
-	mmput(mm);
 	kfree(umem->pages);
 	umem->pages = NULL;
 
 	xdp_umem_unaccount_pages(umem);
-out:
 	kfree(umem);
 }
 
@@ -312,7 +298,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 	if (size_chk < 0)
 		return -EINVAL;
 
-	umem->pid = get_task_pid(current, PIDTYPE_PID);
 	umem->address = (unsigned long)addr;
 	umem->props.chunk_mask = ~((u64)chunk_size - 1);
 	umem->props.size = size;
@@ -328,7 +313,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 
 	err = xdp_umem_account_pages(umem);
 	if (err)
-		goto out;
+		return err;
 
 	err = xdp_umem_pin_pages(umem);
 	if (err)
@@ -347,8 +332,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 
 out_account:
 	xdp_umem_unaccount_pages(umem);
-out:
-	put_pid(umem->pid);
 	return err;
 }
 
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 82723ef..555ee2a 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -70,17 +70,28 @@ static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
 	return NULL;
 }
 
-static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
+static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
+					    unsigned short family)
 {
 	struct xfrmi_net *xfrmn;
-	int ifindex;
 	struct xfrm_if *xi;
+	int ifindex = 0;
 
 	if (!secpath_exists(skb) || !skb->dev)
 		return NULL;
 
+	switch (family) {
+	case AF_INET6:
+		ifindex = inet6_sdif(skb);
+		break;
+	case AF_INET:
+		ifindex = inet_sdif(skb);
+		break;
+	}
+	if (!ifindex)
+		ifindex = skb->dev->ifindex;
+
 	xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
-	ifindex = skb->dev->ifindex;
 
 	for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
 		if (ifindex == xi->dev->ifindex &&
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index bf5d592..ce1b262 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2339,7 +2339,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
 	ifcb = xfrm_if_get_cb();
 
 	if (ifcb) {
-		xi = ifcb->decode_session(skb);
+		xi = ifcb->decode_session(skb, family);
 		if (xi) {
 			if_id = xi->p.if_id;
 			net = xi->net;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 3f729cd..11e09eb 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2386,7 +2386,7 @@ void xfrm_state_fini(struct net *net)
 
 	flush_work(&net->xfrm.state_hash_work);
 	flush_work(&xfrm_state_gc_work);
-	xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
+	xfrm_state_flush(net, 0, false, true);
 
 	WARN_ON(!list_empty(&net->xfrm.state_all));
 
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 7e4904b..2122f89 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1424,7 +1424,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
 	ret = verify_policy_dir(p->dir);
 	if (ret)
 		return ret;
-	if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
+	if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
 		return -EINVAL;
 
 	return 0;
@@ -1513,20 +1513,8 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
 			return -EINVAL;
 		}
 
-		switch (ut[i].id.proto) {
-		case IPPROTO_AH:
-		case IPPROTO_ESP:
-		case IPPROTO_COMP:
-#if IS_ENABLED(CONFIG_IPV6)
-		case IPPROTO_ROUTING:
-		case IPPROTO_DSTOPTS:
-#endif
-		case IPSEC_PROTO_ANY:
-			break;
-		default:
+		if (!xfrm_id_proto_valid(ut[i].id.proto))
 			return -EINVAL;
-		}
-
 	}
 
 	return 0;
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 1e7af0a..0ad9974 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -190,6 +190,23 @@
 
 cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $<
 
+ifdef CONFIG_LTO_CLANG
+# Generate .o.symversions files for each .o with exported symbols, and link these
+# to the kernel and/or modules at the end.
+cmd_modversions_c =								\
+	if $(OBJDUMP) -h $(@D)/.tmp_$(@F) >/dev/null 2>/dev/null; then		\
+		if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then	\
+			$(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \
+			    > $(@D)/$(@F).symversions;				\
+		fi;								\
+	else									\
+		if $(LLVM_NM) $(@D)/.tmp_$(@F) | grep -q __ksymtab; then	\
+			$(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \
+			    > $(@D)/$(@F).symversions;				\
+		fi;								\
+	fi;									\
+	mv -f $(@D)/.tmp_$(@F) $@;
+else
 cmd_modversions_c =								\
 	if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then		\
 		$(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes))	\
@@ -202,6 +219,7 @@
 		mv -f $(@D)/.tmp_$(@F) $@;					\
 	fi;
 endif
+endif
 
 ifdef CONFIG_FTRACE_MCOUNT_RECORD
 ifndef CC_USING_RECORD_MCOUNT
@@ -210,6 +228,12 @@
 ifeq ("$(origin RECORDMCOUNT_WARN)", "command line")
   RECORDMCOUNT_FLAGS = -w
 endif
+
+ifdef CONFIG_LTO_CLANG
+# With LTO, we postpone running recordmcount until after the LTO link step, so
+# let's export the parameters for the link script.
+export RECORDMCOUNT_FLAGS
+else
 # Due to recursion, we must skip empty.o.
 # The empty.o file is created in the make process in order to determine
 # the target endianness and word size. It is made before all other C
@@ -218,22 +242,28 @@
 	if [ $(@) != "scripts/mod/empty.o" ]; then	\
 		$(objtree)/scripts/recordmcount $(RECORDMCOUNT_FLAGS) "$(@)";	\
 	fi;
+endif
+
 recordmcount_source := $(srctree)/scripts/recordmcount.c \
 		    $(srctree)/scripts/recordmcount.h
-else
+else # !BUILD_C_RECORDMCOUNT
 sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
 	"$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
 	"$(if $(CONFIG_64BIT),64,32)" \
 	"$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)" \
 	"$(LD) $(KBUILD_LDFLAGS)" "$(NM)" "$(RM)" "$(MV)" \
 	"$(if $(part-of-module),1,0)" "$(@)";
+
 recordmcount_source := $(srctree)/scripts/recordmcount.pl
 endif # BUILD_C_RECORDMCOUNT
+
+ifndef CONFIG_LTO_CLANG
 cmd_record_mcount =						\
 	if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" =	\
 	     "$(CC_FLAGS_FTRACE)" ]; then			\
 		$(sub_cmd_record_mcount)			\
 	fi;
+endif # CONFIG_LTO_CLANG
 endif # CC_USING_RECORD_MCOUNT
 endif # CONFIG_FTRACE_MCOUNT_RECORD
 
@@ -434,6 +464,26 @@
 # To build objects in subdirs, we need to descend into the directories
 $(sort $(subdir-obj-y)): $(subdir-ym) ;
 
+ifdef CONFIG_LTO_CLANG
+  ifdef CONFIG_MODVERSIONS
+    # combine symversions for later processing
+    update_lto_symversions =				\
+	rm -f $@.symversions; 				\
+	for i in $(filter-out FORCE,$^); do		\
+		if [ -f $$i.symversions ]; then		\
+			cat $$i.symversions 		\
+				>> $@.symversions;	\
+		fi;					\
+	done;
+  endif
+  # rebuild the symbol table with llvm-ar to include IR files
+  update_lto_symtable = ;				\
+	mv -f $@ $@.tmp;				\
+	$(LLVM_AR) rcsT$(KBUILD_ARFLAGS) $@ 		\
+		$$($(AR) t $@.tmp); 			\
+	rm -f $@.tmp
+endif
+
 #
 # Rule to compile a set of .o files into one .o file
 #
@@ -444,7 +494,8 @@
 # scripts/link-vmlinux.sh builds an aggregate built-in.a with a symbol
 # table and index.
 quiet_cmd_ar_builtin = AR      $@
-      cmd_ar_builtin = rm -f $@; \
+      cmd_ar_builtin = $(update_lto_symversions) \
+		     rm -f $@; \
                      $(AR) rcSTP$(KBUILD_ARFLAGS) $@ $(filter $(real-obj-y), $^)
 
 $(builtin-target): $(real-obj-y) FORCE
@@ -473,7 +524,11 @@
 quiet_cmd_link_l_target = AR      $@
 
 # lib target archives do get a symbol table and index
-cmd_link_l_target = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(lib-y)
+cmd_link_l_target = 					\
+	$(update_lto_symversions)			\
+	rm -f $@; 					\
+	$(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(lib-y)	\
+	$(update_lto_symtable)
 
 $(lib-target): $(lib-y) FORCE
 	$(call if_changed,link_l_target)
@@ -484,13 +539,34 @@
 ksyms-lds = $(dot-target).lds
 
 quiet_cmd_export_list = EXPORTS $@
-cmd_export_list = $(OBJDUMP) -h $< | \
-	sed -ne '/___ksymtab/s/.*+\([^ ]*\).*/EXTERN(\1)/p' >$(ksyms-lds);\
-	rm -f $(dummy-object);\
+filter_export_list = sed -ne '/___ksymtab/s/.*+\([^ ]*\).*/EXTERN(\1)/p'
+link_export_list = rm -f $(dummy-object); \
 	echo | $(CC) $(a_flags) -c -o $(dummy-object) -x assembler -;\
 	$(LD) $(ld_flags) -r -o $@ -T $(ksyms-lds) $(dummy-object);\
 	rm $(dummy-object) $(ksyms-lds)
 
+ifdef CONFIG_LTO_CLANG
+# objdump doesn't understand IR files and llvm-nm doesn't support archives,
+# so we'll walk through each file in the archive separately
+cmd_export_list = 					\
+	rm -f $(ksyms-lds);				\
+	for o in $$($(AR) t $<); do			\
+		if $(OBJDUMP) -h $$o >/dev/null 2>/dev/null; then \
+			$(OBJDUMP) -h $$o | 		\
+				$(filter_export_list) 	\
+				>>$(ksyms-lds);		\
+		else					\
+			$(LLVM_NM) $$o |		\
+				$(filter_export_list) 	\
+				>>$(ksyms-lds);		\
+		fi; 					\
+	done;						\
+	$(link_export_list)
+else
+cmd_export_list = $(OBJDUMP) -h $< | $(filter_export_list) >$(ksyms-lds); \
+	$(link_export_list)
+endif
+
 $(obj)/lib-ksyms.o: $(lib-target) FORCE
 	$(call if_changed,export_list)
 
@@ -514,13 +590,31 @@
 $($(subst $(obj)/,,$(@:.o=-y)))       \
 $($(subst $(obj)/,,$(@:.o=-m)))), $^)
 
+cmd_link_multi-link = $(LD) $(ld_flags) -r -o $@ $(link_multi_deps) $(cmd_secanalysis)
+
+quiet_cmd_link_multi-y = AR      $@
+cmd_link_multi-y = $(update_lto_symversions) \
+	rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS) $@ $(link_multi_deps) \
+	$(update_lto_symtable)
+
 quiet_cmd_link_multi-m = LD [M]  $@
-cmd_link_multi-m = $(LD) $(ld_flags) -r -o $@ $(link_multi_deps) $(cmd_secanalysis)
+
+ifdef CONFIG_LTO_CLANG
+  # don't compile IR until needed
+  cmd_link_multi-m = $(cmd_link_multi-y)
+else
+  cmd_link_multi-m = $(cmd_link_multi-link)
+endif
+
+$(multi-used-y): FORCE
+	$(call if_changed,link_multi-y)
 
 $(multi-used-m): FORCE
 	$(call if_changed,link_multi-m)
 	@{ echo $(@:.o=.ko); echo $(link_multi_deps); \
 	   $(cmd_undef_syms); } > $(MODVERDIR)/$(@F:.o=.mod)
+
+$(call multi_depend, $(multi-used-y), .o, -objs -y)
 $(call multi_depend, $(multi-used-m), .o, -objs -y -m)
 
 targets += $(multi-used-m)
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 7d4af0d..1771a31 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -83,12 +83,28 @@
 
 MODPOST_OPT=$(subst -i,-n,$(filter -i,$(MAKEFLAGS)))
 
+# If CONFIG_LTO_CLANG is enabled, .o files are either LLVM IR, or empty, so we
+# need to link them into actual objects before passing them to modpost
+modpost-ext = $(if $(CONFIG_LTO_CLANG),.lto,)
+
+ifdef CONFIG_LTO_CLANG
+quiet_cmd_cc_lto_link_modules = LD [M]  $@
+cmd_cc_lto_link_modules =						\
+	$(LD) $(ld_flags) -r -o $(@)					\
+		$(shell [ -s $(@:$(modpost-ext).o=.o.symversions) ] &&	\
+			echo -T $(@:$(modpost-ext).o=.o.symversions))	\
+		--whole-archive $(filter-out FORCE,$^)
+
+$(modules:.ko=$(modpost-ext).o): %$(modpost-ext).o: %.o FORCE
+	$(call if_changed,cc_lto_link_modules)
+endif
+
 # We can go over command line length here, so be careful.
 quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules
-      cmd_modpost = $(MODLISTCMD) | sed 's/\.ko$$/.o/' | $(modpost) $(MODPOST_OPT) -s -T -
+      cmd_modpost = $(MODLISTCMD) | sed 's/\.ko$$/$(modpost-ext)\.o/' | $(modpost) $(MODPOST_OPT) -s -T -
 
 PHONY += __modpost
-__modpost: $(modules:.ko=.o) FORCE
+__modpost: $(modules:.ko=$(modpost-ext).o) FORCE
 	$(call cmd,modpost) $(wildcard vmlinux)
 
 quiet_cmd_kernel-mod = MODPOST $@
@@ -100,7 +116,6 @@
 # Declare generated files as targets for modpost
 $(modules:.ko=.mod.c): __modpost ;
 
-
 # Step 5), compile all *.mod.c files
 
 # modname is set to make c_flags define KBUILD_MODNAME
@@ -119,13 +134,28 @@
 
 # Step 6), final link of the modules with optional arch pass after final link
 quiet_cmd_ld_ko_o = LD [M]  $@
+
+ifdef CONFIG_LTO_CLANG
+      cmd_ld_ko_o = 							\
+	$(LD) -r $(LDFLAGS)                                 		\
+		 $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) 		\
+		 $(shell [ -s $(@:.ko=.o.symversions) ] &&		\
+			echo -T $(@:.ko=.o.symversions))  		\
+		 -o $@ --whole-archive					\
+		 $(filter-out FORCE,$(^:$(modpost-ext).o=.o))
+
+  ifdef CONFIG_FTRACE_MCOUNT_RECORD
+      cmd_ld_ko_o += ; $(objtree)/scripts/recordmcount $(RECORDMCOUNT_FLAGS) $@
+  endif
+else
       cmd_ld_ko_o =                                                     \
 	$(LD) -r $(KBUILD_LDFLAGS)                                      \
                  $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE)             \
                  -o $@ $(filter-out FORCE,$^) ;                         \
 	$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
+endif
 
-$(modules): %.ko :%.o %.mod.o FORCE
+$(modules): %.ko: %$(modpost-ext).o %.mod.o FORCE
 	+$(call if_changed,ld_ko_o)
 
 targets += $(modules)
diff --git a/scripts/coccinelle/api/stream_open.cocci b/scripts/coccinelle/api/stream_open.cocci
new file mode 100644
index 0000000..350145d
--- /dev/null
+++ b/scripts/coccinelle/api/stream_open.cocci
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+// Author: Kirill Smelkov (kirr@nexedi.com)
+//
+// Search for stream-like files that are using nonseekable_open and convert
+// them to stream_open. A stream-like file is a file that does not use ppos in
+// its read and write. Rationale for the conversion is to avoid deadlock in
+// between read and write.
+
+virtual report
+virtual patch
+virtual explain  // explain decisions in the patch (SPFLAGS="-D explain")
+
+// stream-like reader & writer - ones that do not depend on f_pos.
+@ stream_reader @
+identifier readstream, ppos;
+identifier f, buf, len;
+type loff_t;
+@@
+  ssize_t readstream(struct file *f, char *buf, size_t len, loff_t *ppos)
+  {
+    ... when != ppos
+  }
+
+@ stream_writer @
+identifier writestream, ppos;
+identifier f, buf, len;
+type loff_t;
+@@
+  ssize_t writestream(struct file *f, const char *buf, size_t len, loff_t *ppos)
+  {
+    ... when != ppos
+  }
+
+
+// a function that blocks
+@ blocks @
+identifier block_f;
+identifier wait_event =~ "^wait_event_.*";
+@@
+  block_f(...) {
+    ... when exists
+    wait_event(...)
+    ... when exists
+  }
+
+// stream_reader that can block inside.
+//
+// XXX wait_* can be called not directly from current function (e.g. func -> f -> g -> wait())
+// XXX currently reader_blocks supports only direct and 1-level indirect cases.
+@ reader_blocks_direct @
+identifier stream_reader.readstream;
+identifier wait_event =~ "^wait_event_.*";
+@@
+  readstream(...)
+  {
+    ... when exists
+    wait_event(...)
+    ... when exists
+  }
+
+@ reader_blocks_1 @
+identifier stream_reader.readstream;
+identifier blocks.block_f;
+@@
+  readstream(...)
+  {
+    ... when exists
+    block_f(...)
+    ... when exists
+  }
+
+@ reader_blocks depends on reader_blocks_direct || reader_blocks_1 @
+identifier stream_reader.readstream;
+@@
+  readstream(...) {
+    ...
+  }
+
+
+// file_operations + whether they have _any_ .read, .write, .llseek ... at all.
+//
+// XXX add support for file_operations xxx[N] = ...	(sound/core/pcm_native.c)
+@ fops0 @
+identifier fops;
+@@
+  struct file_operations fops = {
+    ...
+  };
+
+@ has_read @
+identifier fops0.fops;
+identifier read_f;
+@@
+  struct file_operations fops = {
+    .read = read_f,
+  };
+
+@ has_read_iter @
+identifier fops0.fops;
+identifier read_iter_f;
+@@
+  struct file_operations fops = {
+    .read_iter = read_iter_f,
+  };
+
+@ has_write @
+identifier fops0.fops;
+identifier write_f;
+@@
+  struct file_operations fops = {
+    .write = write_f,
+  };
+
+@ has_write_iter @
+identifier fops0.fops;
+identifier write_iter_f;
+@@
+  struct file_operations fops = {
+    .write_iter = write_iter_f,
+  };
+
+@ has_llseek @
+identifier fops0.fops;
+identifier llseek_f;
+@@
+  struct file_operations fops = {
+    .llseek = llseek_f,
+  };
+
+@ has_no_llseek @
+identifier fops0.fops;
+@@
+  struct file_operations fops = {
+    .llseek = no_llseek,
+  };
+
+@ has_mmap @
+identifier fops0.fops;
+identifier mmap_f;
+@@
+  struct file_operations fops = {
+    .mmap = mmap_f,
+  };
+
+@ has_copy_file_range @
+identifier fops0.fops;
+identifier copy_file_range_f;
+@@
+  struct file_operations fops = {
+    .copy_file_range = copy_file_range_f,
+  };
+
+@ has_remap_file_range @
+identifier fops0.fops;
+identifier remap_file_range_f;
+@@
+  struct file_operations fops = {
+    .remap_file_range = remap_file_range_f,
+  };
+
+@ has_splice_read @
+identifier fops0.fops;
+identifier splice_read_f;
+@@
+  struct file_operations fops = {
+    .splice_read = splice_read_f,
+  };
+
+@ has_splice_write @
+identifier fops0.fops;
+identifier splice_write_f;
+@@
+  struct file_operations fops = {
+    .splice_write = splice_write_f,
+  };
+
+
+// file_operations that is candidate for stream_open conversion - it does not
+// use mmap and other methods that assume @offset access to file.
+//
+// XXX for simplicity require no .{read/write}_iter and no .splice_{read/write} for now.
+// XXX maybe_steam.fops cannot be used in other rules - it gives "bad rule maybe_stream or bad variable fops".
+@ maybe_stream depends on (!has_llseek || has_no_llseek) && !has_mmap && !has_copy_file_range && !has_remap_file_range && !has_read_iter && !has_write_iter && !has_splice_read && !has_splice_write @
+identifier fops0.fops;
+@@
+  struct file_operations fops = {
+  };
+
+
+// ---- conversions ----
+
+// XXX .open = nonseekable_open -> .open = stream_open
+// XXX .open = func -> openfunc -> nonseekable_open
+
+// read & write
+//
+// if both are used in the same file_operations together with an opener -
+// under that conditions we can use stream_open instead of nonseekable_open.
+@ fops_rw depends on maybe_stream @
+identifier fops0.fops, openfunc;
+identifier stream_reader.readstream;
+identifier stream_writer.writestream;
+@@
+  struct file_operations fops = {
+      .open  = openfunc,
+      .read  = readstream,
+      .write = writestream,
+  };
+
+@ report_rw depends on report @
+identifier fops_rw.openfunc;
+position p1;
+@@
+  openfunc(...) {
+    <...
+     nonseekable_open@p1
+    ...>
+  }
+
+@ script:python depends on report && reader_blocks @
+fops << fops0.fops;
+p << report_rw.p1;
+@@
+coccilib.report.print_report(p[0],
+  "ERROR: %s: .read() can deadlock .write(); change nonseekable_open -> stream_open to fix." % (fops,))
+
+@ script:python depends on report && !reader_blocks @
+fops << fops0.fops;
+p << report_rw.p1;
+@@
+coccilib.report.print_report(p[0],
+  "WARNING: %s: .read() and .write() have stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+
+@ explain_rw_deadlocked depends on explain && reader_blocks @
+identifier fops_rw.openfunc;
+@@
+  openfunc(...) {
+    <...
+-    nonseekable_open
++    nonseekable_open /* read & write (was deadlock) */
+    ...>
+  }
+
+
+@ explain_rw_nodeadlock depends on explain && !reader_blocks @
+identifier fops_rw.openfunc;
+@@
+  openfunc(...) {
+    <...
+-    nonseekable_open
++    nonseekable_open /* read & write (no direct deadlock) */
+    ...>
+  }
+
+@ patch_rw depends on patch @
+identifier fops_rw.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   stream_open
+    ...>
+  }
+
+
+// read, but not write
+@ fops_r depends on maybe_stream && !has_write @
+identifier fops0.fops, openfunc;
+identifier stream_reader.readstream;
+@@
+  struct file_operations fops = {
+      .open  = openfunc,
+      .read  = readstream,
+  };
+
+@ report_r depends on report @
+identifier fops_r.openfunc;
+position p1;
+@@
+  openfunc(...) {
+    <...
+    nonseekable_open@p1
+    ...>
+  }
+
+@ script:python depends on report @
+fops << fops0.fops;
+p << report_r.p1;
+@@
+coccilib.report.print_report(p[0],
+  "WARNING: %s: .read() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+@ explain_r depends on explain @
+identifier fops_r.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   nonseekable_open /* read only */
+    ...>
+  }
+
+@ patch_r depends on patch @
+identifier fops_r.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   stream_open
+    ...>
+  }
+
+
+// write, but not read
+@ fops_w depends on maybe_stream && !has_read @
+identifier fops0.fops, openfunc;
+identifier stream_writer.writestream;
+@@
+  struct file_operations fops = {
+      .open  = openfunc,
+      .write = writestream,
+  };
+
+@ report_w depends on report @
+identifier fops_w.openfunc;
+position p1;
+@@
+  openfunc(...) {
+    <...
+    nonseekable_open@p1
+    ...>
+  }
+
+@ script:python depends on report @
+fops << fops0.fops;
+p << report_w.p1;
+@@
+coccilib.report.print_report(p[0],
+  "WARNING: %s: .write() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+@ explain_w depends on explain @
+identifier fops_w.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   nonseekable_open /* write only */
+    ...>
+  }
+
+@ patch_w depends on patch @
+identifier fops_w.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   stream_open
+    ...>
+  }
+
+
+// no read, no write - don't change anything
diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c
index fe82ff6..b15c8d1 100644
--- a/scripts/kconfig/lxdialog/inputbox.c
+++ b/scripts/kconfig/lxdialog/inputbox.c
@@ -126,7 +126,8 @@ int dialog_inputbox(const char *title, const char *prompt, int height, int width
 			case KEY_DOWN:
 				break;
 			case KEY_BACKSPACE:
-			case 127:
+			case 8:   /* ^H */
+			case 127: /* ^? */
 				if (pos) {
 					wattrset(dialog, dlg.inputbox.atr);
 					if (input_x == 0) {
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index 1ef232a..c8ff1c9 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -1049,7 +1049,7 @@ static int do_match(int key, struct match_state *state, int *ans)
 		state->match_direction = FIND_NEXT_MATCH_UP;
 		*ans = get_mext_match(state->pattern,
 				state->match_direction);
-	} else if (key == KEY_BACKSPACE || key == 127) {
+	} else if (key == KEY_BACKSPACE || key == 8 || key == 127) {
 		state->pattern[strlen(state->pattern)-1] = '\0';
 		adj_match_dir(&state->match_direction);
 	} else
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 88874ac..820fc92 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -440,7 +440,8 @@ int dialog_inputbox(WINDOW *main_window,
 		case KEY_F(F_EXIT):
 		case KEY_F(F_BACK):
 			break;
-		case 127:
+		case 8:   /* ^H */
+		case 127: /* ^? */
 		case KEY_BACKSPACE:
 			if (cursor_position > 0) {
 				memmove(&result[cursor_position-1],
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 8f5c95c..09b9fa7 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -60,6 +60,38 @@
 	${AR} rcsTP${KBUILD_ARFLAGS} built-in.a			\
 				${KBUILD_VMLINUX_INIT}		\
 				${KBUILD_VMLINUX_MAIN}
+
+	# rebuild with llvm-ar to update the symbol table
+	if [ -n "${CONFIG_LTO_CLANG}" ]; then
+		mv -f built-in.a built-in.a.tmp
+		${LLVM_AR} rcsT${KBUILD_ARFLAGS} built-in.a $(${AR} t built-in.a.tmp)
+		rm -f built-in.a.tmp
+	fi
+}
+
+# If CONFIG_LTO_CLANG is selected, collect generated symbol versions into
+# .tmp_symversions
+modversions()
+{
+	if [ -z "${CONFIG_LTO_CLANG}" ]; then
+		return
+	fi
+
+	if [ -z "${CONFIG_MODVERSIONS}" ]; then
+		return
+	fi
+
+	rm -f .tmp_symversions
+
+	for a in built-in.a ${KBUILD_VMLINUX_LIBS}; do
+		for o in $(${AR} t $a); do
+			if [ -f ${o}.symversions ]; then
+				cat ${o}.symversions >> .tmp_symversions
+			fi
+		done
+	done
+
+	echo "-T .tmp_symversions"
 }
 
 # Link of vmlinux.o used for section mismatch analysis
@@ -75,7 +107,26 @@
 		${KBUILD_VMLINUX_LIBS}				\
 		--end-group"
 
-	${LD} ${KBUILD_LDFLAGS} -r -o ${1} ${objects}
+	if [ -n "${CONFIG_LTO_CLANG}" ]; then
+		# This might take a while, so indicate that we're doing
+		# an LTO link
+		info LTO vmlinux.o
+	fi
+
+	${LD} ${KBUILD_LDFLAGS} -r -o ${1} $(modversions) ${objects}
+}
+
+# If CONFIG_LTO_CLANG is selected, we postpone running recordmcount until
+# we have compiled LLVM IR to an object file.
+recordmcount()
+{
+	if [ -z "${CONFIG_LTO_CLANG}" ]; then
+		return
+	fi
+
+	if [ -n "${CONFIG_FTRACE_MCOUNT_RECORD}" ]; then
+		scripts/recordmcount ${RECORDMCOUNT_FLAGS} $*
+	fi
 }
 
 # Link of vmlinux
@@ -87,13 +138,20 @@
 	local objects
 
 	if [ "${SRCARCH}" != "um" ]; then
-		objects="--whole-archive			\
-			built-in.a				\
-			--no-whole-archive			\
-			--start-group				\
-			${KBUILD_VMLINUX_LIBS}			\
-			--end-group				\
-			${1}"
+		if [ -z "${CONFIG_LTO_CLANG}" ]; then
+			objects="--whole-archive		\
+				built-in.a			\
+				--no-whole-archive		\
+				--start-group			\
+				${KBUILD_VMLINUX_LIBS}		\
+				--end-group			\
+				${1}"
+		else
+			objects="--start-group			\
+				vmlinux.o			\
+				--end-group			\
+				${1}"
+		fi
 
 		${LD} ${KBUILD_LDFLAGS} ${LDFLAGS_vmlinux} -o ${2}	\
 			-T ${lds} ${objects}
@@ -114,7 +172,6 @@
 	fi
 }
 
-
 # Create ${2} .o file with all symbols from the ${1} object file
 kallsyms()
 {
@@ -184,6 +241,7 @@
 {
 	rm -f .tmp_System.map
 	rm -f .tmp_kallsyms*
+	rm -f .tmp_symversions
 	rm -f .tmp_vmlinux*
 	rm -f built-in.a
 	rm -f System.map
@@ -247,12 +305,16 @@
 archive_builtin
 
 #link vmlinux.o
-info LD vmlinux.o
 modpost_link vmlinux.o
 
 # modpost vmlinux.o to check for section mismatches
 ${MAKE} -f "${srctree}/scripts/Makefile.modpost" vmlinux.o
 
+if [ -n "${CONFIG_LTO_CLANG}" ]; then
+	# Call recordmcount if needed
+	recordmcount vmlinux.o
+fi
+
 # Generate RTIC MP placeholder compile unit of the correct size
 # and add it to the list of link objects
 # this needs to be done before generating kallsyms
diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile
index 42c5d50..e014b2f 100644
--- a/scripts/mod/Makefile
+++ b/scripts/mod/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 OBJECT_FILES_NON_STANDARD := y
+CFLAGS_empty.o += $(DISABLE_LTO)
 
 hostprogs-y	:= modpost mk_elfconfig
 always		:= $(hostprogs-y) empty.o
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 7be4369..7f40b6a 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -47,49 +47,9 @@ typedef struct {
 struct devtable {
 	const char *device_id; /* name of table, __mod_<name>__*_device_table. */
 	unsigned long id_size;
-	void *function;
+	int (*do_entry)(const char *filename, void *symval, char *alias);
 };
 
-#define ___cat(a,b) a ## b
-#define __cat(a,b) ___cat(a,b)
-
-/* we need some special handling for this host tool running eventually on
- * Darwin. The Mach-O section handling is a bit different than ELF section
- * handling. The differnces in detail are:
- *  a) we have segments which have sections
- *  b) we need a API call to get the respective section symbols */
-#if defined(__MACH__)
-#include <mach-o/getsect.h>
-
-#define INIT_SECTION(name)  do {					\
-		unsigned long name ## _len;				\
-		char *__cat(pstart_,name) = getsectdata("__TEXT",	\
-			#name, &__cat(name,_len));			\
-		char *__cat(pstop_,name) = __cat(pstart_,name) +	\
-			__cat(name, _len);				\
-		__cat(__start_,name) = (void *)__cat(pstart_,name);	\
-		__cat(__stop_,name) = (void *)__cat(pstop_,name);	\
-	} while (0)
-#define SECTION(name)   __attribute__((section("__TEXT, " #name)))
-
-struct devtable **__start___devtable, **__stop___devtable;
-#else
-#define INIT_SECTION(name) /* no-op for ELF */
-#define SECTION(name)   __attribute__((section(#name)))
-
-/* We construct a table of pointers in an ELF section (pointers generally
- * go unpadded by gcc).  ld creates boundary syms for us. */
-extern struct devtable *__start___devtable[], *__stop___devtable[];
-#endif /* __MACH__ */
-
-#if !defined(__used)
-# if __GNUC__ == 3 && __GNUC_MINOR__ < 3
-#  define __used			__attribute__((__unused__))
-# else
-#  define __used			__attribute__((__used__))
-# endif
-#endif
-
 /* Define a variable f that holds the value of field f of struct devid
  * based at address m.
  */
@@ -102,16 +62,6 @@ extern struct devtable *__start___devtable[], *__stop___devtable[];
 #define DEF_FIELD_ADDR(m, devid, f) \
 	typeof(((struct devid *)0)->f) *f = ((m) + OFF_##devid##_##f)
 
-/* Add a table entry.  We test function type matches while we're here. */
-#define ADD_TO_DEVTABLE(device_id, type, function) \
-	static struct devtable __cat(devtable,__LINE__) = {	\
-		device_id + 0*sizeof((function)((const char *)NULL,	\
-						(void *)NULL,		\
-						(char *)NULL)),		\
-		SIZE_##type, (function) };				\
-	static struct devtable *SECTION(__devtable) __used \
-		__cat(devtable_ptr,__LINE__) = &__cat(devtable,__LINE__)
-
 #define ADD(str, sep, cond, field)                              \
 do {                                                            \
         strcat(str, sep);                                       \
@@ -431,7 +381,6 @@ static int do_hid_entry(const char *filename,
 
 	return 1;
 }
-ADD_TO_DEVTABLE("hid", hid_device_id, do_hid_entry);
 
 /* Looks like: ieee1394:venNmoNspNverN */
 static int do_ieee1394_entry(const char *filename,
@@ -456,7 +405,6 @@ static int do_ieee1394_entry(const char *filename,
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("ieee1394", ieee1394_device_id, do_ieee1394_entry);
 
 /* Looks like: pci:vNdNsvNsdNbcNscNiN. */
 static int do_pci_entry(const char *filename,
@@ -500,7 +448,6 @@ static int do_pci_entry(const char *filename,
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("pci", pci_device_id, do_pci_entry);
 
 /* looks like: "ccw:tNmNdtNdmN" */
 static int do_ccw_entry(const char *filename,
@@ -524,7 +471,6 @@ static int do_ccw_entry(const char *filename,
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("ccw", ccw_device_id, do_ccw_entry);
 
 /* looks like: "ap:tN" */
 static int do_ap_entry(const char *filename,
@@ -535,7 +481,6 @@ static int do_ap_entry(const char *filename,
 	sprintf(alias, "ap:t%02X*", dev_type);
 	return 1;
 }
-ADD_TO_DEVTABLE("ap", ap_device_id, do_ap_entry);
 
 /* looks like: "css:tN" */
 static int do_css_entry(const char *filename,
@@ -546,7 +491,6 @@ static int do_css_entry(const char *filename,
 	sprintf(alias, "css:t%01X", type);
 	return 1;
 }
-ADD_TO_DEVTABLE("css", css_device_id, do_css_entry);
 
 /* Looks like: "serio:tyNprNidNexN" */
 static int do_serio_entry(const char *filename,
@@ -566,7 +510,6 @@ static int do_serio_entry(const char *filename,
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry);
 
 /* looks like: "acpi:ACPI0003" or "acpi:PNP0C0B" or "acpi:LNXVIDEO" or
  *             "acpi:bbsspp" (bb=base-class, ss=sub-class, pp=prog-if)
@@ -604,7 +547,6 @@ static int do_acpi_entry(const char *filename,
 	}
 	return 1;
 }
-ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry);
 
 /* looks like: "pnp:dD" */
 static void do_pnp_device_entry(void *symval, unsigned long size,
@@ -725,7 +667,6 @@ static int do_pcmcia_entry(const char *filename,
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
 
 static int do_vio_entry(const char *filename, void *symval,
 		char *alias)
@@ -745,7 +686,6 @@ static int do_vio_entry(const char *filename, void *symval,
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("vio", vio_device_id, do_vio_entry);
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 
@@ -818,7 +758,6 @@ static int do_input_entry(const char *filename, void *symval,
 		do_input(alias, *swbit, 0, INPUT_DEVICE_ID_SW_MAX);
 	return 1;
 }
-ADD_TO_DEVTABLE("input", input_device_id, do_input_entry);
 
 static int do_eisa_entry(const char *filename, void *symval,
 		char *alias)
@@ -830,7 +769,6 @@ static int do_eisa_entry(const char *filename, void *symval,
 		strcat(alias, "*");
 	return 1;
 }
-ADD_TO_DEVTABLE("eisa", eisa_device_id, do_eisa_entry);
 
 /* Looks like: parisc:tNhvNrevNsvN */
 static int do_parisc_entry(const char *filename, void *symval,
@@ -850,7 +788,6 @@ static int do_parisc_entry(const char *filename, void *symval,
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("parisc", parisc_device_id, do_parisc_entry);
 
 /* Looks like: sdio:cNvNdN. */
 static int do_sdio_entry(const char *filename,
@@ -867,7 +804,6 @@ static int do_sdio_entry(const char *filename,
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("sdio", sdio_device_id, do_sdio_entry);
 
 /* Looks like: ssb:vNidNrevN. */
 static int do_ssb_entry(const char *filename,
@@ -884,7 +820,6 @@ static int do_ssb_entry(const char *filename,
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("ssb", ssb_device_id, do_ssb_entry);
 
 /* Looks like: bcma:mNidNrevNclN. */
 static int do_bcma_entry(const char *filename,
@@ -903,7 +838,6 @@ static int do_bcma_entry(const char *filename,
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("bcma", bcma_device_id, do_bcma_entry);
 
 /* Looks like: virtio:dNvN */
 static int do_virtio_entry(const char *filename, void *symval,
@@ -919,7 +853,6 @@ static int do_virtio_entry(const char *filename, void *symval,
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("virtio", virtio_device_id, do_virtio_entry);
 
 /*
  * Looks like: vmbus:guid
@@ -942,7 +875,6 @@ static int do_vmbus_entry(const char *filename, void *symval,
 
 	return 1;
 }
-ADD_TO_DEVTABLE("vmbus", hv_vmbus_device_id, do_vmbus_entry);
 
 /* Looks like: rpmsg:S */
 static int do_rpmsg_entry(const char *filename, void *symval,
@@ -953,7 +885,6 @@ static int do_rpmsg_entry(const char *filename, void *symval,
 
 	return 1;
 }
-ADD_TO_DEVTABLE("rpmsg", rpmsg_device_id, do_rpmsg_entry);
 
 /* Looks like: i2c:S */
 static int do_i2c_entry(const char *filename, void *symval,
@@ -964,7 +895,6 @@ static int do_i2c_entry(const char *filename, void *symval,
 
 	return 1;
 }
-ADD_TO_DEVTABLE("i2c", i2c_device_id, do_i2c_entry);
 
 /* Looks like: spi:S */
 static int do_spi_entry(const char *filename, void *symval,
@@ -975,7 +905,6 @@ static int do_spi_entry(const char *filename, void *symval,
 
 	return 1;
 }
-ADD_TO_DEVTABLE("spi", spi_device_id, do_spi_entry);
 
 static const struct dmifield {
 	const char *prefix;
@@ -1030,7 +959,6 @@ static int do_dmi_entry(const char *filename, void *symval,
 	strcat(alias, ":");
 	return 1;
 }
-ADD_TO_DEVTABLE("dmi", dmi_system_id, do_dmi_entry);
 
 static int do_platform_entry(const char *filename,
 			     void *symval, char *alias)
@@ -1039,7 +967,6 @@ static int do_platform_entry(const char *filename,
 	sprintf(alias, PLATFORM_MODULE_PREFIX "%s", *name);
 	return 1;
 }
-ADD_TO_DEVTABLE("platform", platform_device_id, do_platform_entry);
 
 static int do_mdio_entry(const char *filename,
 			 void *symval, char *alias)
@@ -1064,7 +991,6 @@ static int do_mdio_entry(const char *filename,
 
 	return 1;
 }
-ADD_TO_DEVTABLE("mdio", mdio_device_id, do_mdio_entry);
 
 /* Looks like: zorro:iN. */
 static int do_zorro_entry(const char *filename, void *symval,
@@ -1075,7 +1001,6 @@ static int do_zorro_entry(const char *filename, void *symval,
 	ADD(alias, "i", id != ZORRO_WILDCARD, id);
 	return 1;
 }
-ADD_TO_DEVTABLE("zorro", zorro_device_id, do_zorro_entry);
 
 /* looks like: "pnp:dD" */
 static int do_isapnp_entry(const char *filename,
@@ -1091,7 +1016,6 @@ static int do_isapnp_entry(const char *filename,
 		(function >> 12) & 0x0f, (function >> 8) & 0x0f);
 	return 1;
 }
-ADD_TO_DEVTABLE("isapnp", isapnp_device_id, do_isapnp_entry);
 
 /* Looks like: "ipack:fNvNdN". */
 static int do_ipack_entry(const char *filename,
@@ -1107,7 +1031,6 @@ static int do_ipack_entry(const char *filename,
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("ipack", ipack_device_id, do_ipack_entry);
 
 /*
  * Append a match expression for a single masked hex digit.
@@ -1178,7 +1101,6 @@ static int do_amba_entry(const char *filename,
 
 	return 1;
 }
-ADD_TO_DEVTABLE("amba", amba_id, do_amba_entry);
 
 /*
  * looks like: "mipscdmm:tN"
@@ -1194,7 +1116,6 @@ static int do_mips_cdmm_entry(const char *filename,
 	sprintf(alias, "mipscdmm:t%02X*", type);
 	return 1;
 }
-ADD_TO_DEVTABLE("mipscdmm", mips_cdmm_device_id, do_mips_cdmm_entry);
 
 /* LOOKS like cpu:type:x86,venVVVVfamFFFFmodMMMM:feature:*,FEAT,*
  * All fields are numbers. It would be nicer to use strings for vendor
@@ -1219,7 +1140,6 @@ static int do_x86cpu_entry(const char *filename, void *symval,
 		sprintf(alias + strlen(alias), "%04X*", feature);
 	return 1;
 }
-ADD_TO_DEVTABLE("x86cpu", x86_cpu_id, do_x86cpu_entry);
 
 /* LOOKS like cpu:type:*:feature:*FEAT* */
 static int do_cpu_entry(const char *filename, void *symval, char *alias)
@@ -1229,7 +1149,6 @@ static int do_cpu_entry(const char *filename, void *symval, char *alias)
 	sprintf(alias, "cpu:type:*:feature:*%04X*", feature);
 	return 1;
 }
-ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry);
 
 /* Looks like: mei:S:uuid:N:* */
 static int do_mei_entry(const char *filename, void *symval,
@@ -1248,7 +1167,6 @@ static int do_mei_entry(const char *filename, void *symval,
 
 	return 1;
 }
-ADD_TO_DEVTABLE("mei", mei_cl_device_id, do_mei_entry);
 
 /* Looks like: rapidio:vNdNavNadN */
 static int do_rio_entry(const char *filename,
@@ -1268,7 +1186,6 @@ static int do_rio_entry(const char *filename,
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("rapidio", rio_device_id, do_rio_entry);
 
 /* Looks like: ulpi:vNpN */
 static int do_ulpi_entry(const char *filename, void *symval,
@@ -1281,7 +1198,6 @@ static int do_ulpi_entry(const char *filename, void *symval,
 
 	return 1;
 }
-ADD_TO_DEVTABLE("ulpi", ulpi_device_id, do_ulpi_entry);
 
 /* Looks like: hdaudio:vNrNaN */
 static int do_hda_entry(const char *filename, void *symval, char *alias)
@@ -1298,7 +1214,6 @@ static int do_hda_entry(const char *filename, void *symval, char *alias)
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("hdaudio", hda_device_id, do_hda_entry);
 
 /* Looks like: sdw:mNpN */
 static int do_sdw_entry(const char *filename, void *symval, char *alias)
@@ -1313,7 +1228,6 @@ static int do_sdw_entry(const char *filename, void *symval, char *alias)
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("sdw", sdw_device_id, do_sdw_entry);
 
 /* Looks like: fsl-mc:vNdN */
 static int do_fsl_mc_entry(const char *filename, void *symval,
@@ -1325,7 +1239,6 @@ static int do_fsl_mc_entry(const char *filename, void *symval,
 	sprintf(alias, "fsl-mc:v%08Xd%s", vendor, *obj_type);
 	return 1;
 }
-ADD_TO_DEVTABLE("fslmc", fsl_mc_device_id, do_fsl_mc_entry);
 
 /* Looks like: tbsvc:kSpNvNrN */
 static int do_tbsvc_entry(const char *filename, void *symval, char *alias)
@@ -1350,7 +1263,6 @@ static int do_tbsvc_entry(const char *filename, void *symval, char *alias)
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("tbsvc", tb_service_id, do_tbsvc_entry);
 
 /* Looks like: typec:idNmN */
 static int do_typec_entry(const char *filename, void *symval, char *alias)
@@ -1363,7 +1275,6 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
 
 	return 1;
 }
-ADD_TO_DEVTABLE("typec", typec_device_id, do_typec_entry);
 
 /* Does namelen bytes of name exactly match the symbol? */
 static bool sym_is(const char *name, unsigned namelen, const char *symbol)
@@ -1377,12 +1288,11 @@ static bool sym_is(const char *name, unsigned namelen, const char *symbol)
 static void do_table(void *symval, unsigned long size,
 		     unsigned long id_size,
 		     const char *device_id,
-		     void *function,
+		     int (*do_entry)(const char *filename, void *symval, char *alias),
 		     struct module *mod)
 {
 	unsigned int i;
 	char alias[500];
-	int (*do_entry)(const char *, void *entry, char *alias) = function;
 
 	device_id_check(mod->name, device_id, size, id_size, symval);
 	/* Leave last one: it's the terminator. */
@@ -1396,6 +1306,48 @@ static void do_table(void *symval, unsigned long size,
 	}
 }
 
+static const struct devtable devtable[] = {
+	{"hid", SIZE_hid_device_id, do_hid_entry},
+	{"ieee1394", SIZE_ieee1394_device_id, do_ieee1394_entry},
+	{"pci", SIZE_pci_device_id, do_pci_entry},
+	{"ccw", SIZE_ccw_device_id, do_ccw_entry},
+	{"ap", SIZE_ap_device_id, do_ap_entry},
+	{"css", SIZE_css_device_id, do_css_entry},
+	{"serio", SIZE_serio_device_id, do_serio_entry},
+	{"acpi", SIZE_acpi_device_id, do_acpi_entry},
+	{"pcmcia", SIZE_pcmcia_device_id, do_pcmcia_entry},
+	{"vio", SIZE_vio_device_id, do_vio_entry},
+	{"input", SIZE_input_device_id, do_input_entry},
+	{"eisa", SIZE_eisa_device_id, do_eisa_entry},
+	{"parisc", SIZE_parisc_device_id, do_parisc_entry},
+	{"sdio", SIZE_sdio_device_id, do_sdio_entry},
+	{"ssb", SIZE_ssb_device_id, do_ssb_entry},
+	{"bcma", SIZE_bcma_device_id, do_bcma_entry},
+	{"virtio", SIZE_virtio_device_id, do_virtio_entry},
+	{"vmbus", SIZE_hv_vmbus_device_id, do_vmbus_entry},
+	{"rpmsg", SIZE_rpmsg_device_id, do_rpmsg_entry},
+	{"i2c", SIZE_i2c_device_id, do_i2c_entry},
+	{"spi", SIZE_spi_device_id, do_spi_entry},
+	{"dmi", SIZE_dmi_system_id, do_dmi_entry},
+	{"platform", SIZE_platform_device_id, do_platform_entry},
+	{"mdio", SIZE_mdio_device_id, do_mdio_entry},
+	{"zorro", SIZE_zorro_device_id, do_zorro_entry},
+	{"isapnp", SIZE_isapnp_device_id, do_isapnp_entry},
+	{"ipack", SIZE_ipack_device_id, do_ipack_entry},
+	{"amba", SIZE_amba_id, do_amba_entry},
+	{"mipscdmm", SIZE_mips_cdmm_device_id, do_mips_cdmm_entry},
+	{"x86cpu", SIZE_x86_cpu_id, do_x86cpu_entry},
+	{"cpu", SIZE_cpu_feature, do_cpu_entry},
+	{"mei", SIZE_mei_cl_device_id, do_mei_entry},
+	{"rapidio", SIZE_rio_device_id, do_rio_entry},
+	{"ulpi", SIZE_ulpi_device_id, do_ulpi_entry},
+	{"hdaudio", SIZE_hda_device_id, do_hda_entry},
+	{"sdw", SIZE_sdw_device_id, do_sdw_entry},
+	{"fslmc", SIZE_fsl_mc_device_id, do_fsl_mc_entry},
+	{"tbsvc", SIZE_tb_service_id, do_tbsvc_entry},
+	{"typec", SIZE_typec_device_id, do_typec_entry},
+};
+
 /* Create MODULE_ALIAS() statements.
  * At this time, we cannot write the actual output C source yet,
  * so we write into the mod->dev_table_buf buffer. */
@@ -1450,13 +1402,14 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
 	else if (sym_is(name, namelen, "pnp_card"))
 		do_pnp_card_entries(symval, sym->st_size, mod);
 	else {
-		struct devtable **p;
-		INIT_SECTION(__devtable);
+		int i;
 
-		for (p = __start___devtable; p < __stop___devtable; p++) {
-			if (sym_is(name, namelen, (*p)->device_id)) {
-				do_table(symval, sym->st_size, (*p)->id_size,
-					 (*p)->device_id, (*p)->function, mod);
+		for (i = 0; i < ARRAY_SIZE(devtable); i++) {
+			const struct devtable *p = &devtable[i];
+
+			if (sym_is(name, namelen, p->device_id)) {
+				do_table(symval, sym->st_size, p->id_size,
+					 p->device_id, p->do_entry, mod);
 				break;
 			}
 		}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 858cbe5..b16c893e 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -145,6 +145,9 @@ static struct module *new_module(const char *modname)
 		p[strlen(p) - 2] = '\0';
 		mod->is_dot_o = 1;
 	}
+	/* strip trailing .lto */
+	if (strends(p, ".lto"))
+		p[strlen(p) - 4] = '\0';
 
 	/* add to list */
 	mod->name = p;
@@ -942,6 +945,7 @@ static const char *const head_sections[] = { ".head.text*", NULL };
 static const char *const linker_symbols[] =
 	{ "__init_begin", "_sinittext", "_einittext", NULL };
 static const char *const optim_symbols[] = { "*.constprop.*", NULL };
+static const char *const cfi_symbols[] = { "*.cfi", NULL };
 
 enum mismatch {
 	TEXT_TO_ANY_INIT,
@@ -1163,6 +1167,16 @@ static const struct sectioncheck *section_mismatch(
  *   fromsec = text section
  *   refsymname = *.constprop.*
  *
+ * Pattern 6:
+ *   With CONFIG_CFI_CLANG, clang appends .cfi to all indirectly called
+ *   functions and creates a function stub with the original name. This
+ *   stub is always placed in .text, even if the actual function with the
+ *   .cfi postfix is in .init.text or .exit.text.
+ *   This pattern is identified by
+ *   tosec   = init or exit section
+ *   fromsec = text section
+ *   tosym   = *.cfi
+ *
  **/
 static int secref_whitelist(const struct sectioncheck *mismatch,
 			    const char *fromsec, const char *fromsym,
@@ -1201,6 +1215,12 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
 	    match(fromsym, optim_symbols))
 		return 0;
 
+	/* Check for pattern 6 */
+	if (match(fromsec, text_sections) &&
+	    match(tosec, init_exit_sections) &&
+	    match(tosym, cfi_symbols))
+		return 0;
+
 	return 1;
 }
 
@@ -1927,6 +1947,10 @@ static char *remove_dot(char *s)
 		size_t m = strspn(s + n + 1, "0123456789");
 		if (m && (s[n + m] == '.' || s[n + m] == 0))
 			s[n] = 0;
+
+		/* strip trailing .lto */
+		if (strends(s, ".lto"))
+			s[strlen(s) - 4] = '\0';
 	}
 	return s;
 }
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 895c40e..7a9ad23 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -406,7 +406,8 @@ is_mcounted_section_name(char const *const txtname)
 		strcmp(".softirqentry.text", txtname) == 0 ||
 		strcmp(".kprobes.text", txtname) == 0 ||
 		strcmp(".cpuidle.text", txtname) == 0 ||
-		strcmp(".text.unlikely", txtname) == 0;
+		(strncmp(".text.",       txtname, 6) == 0 &&
+		 strcmp(".text..ftrace", txtname) != 0);
 }
 
 /* 32 bit and 64 bit are very similar */
diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c
index fa48fab..3cc4893 100644
--- a/scripts/selinux/genheaders/genheaders.c
+++ b/scripts/selinux/genheaders/genheaders.c
@@ -9,7 +9,6 @@
 #include <string.h>
 #include <errno.h>
 #include <ctype.h>
-#include <sys/socket.h>
 
 struct security_class_mapping {
 	const char *name;
diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c
index 073fe75..6d51b74 100644
--- a/scripts/selinux/mdp/mdp.c
+++ b/scripts/selinux/mdp/mdp.c
@@ -32,7 +32,6 @@
 #include <stdlib.h>
 #include <unistd.h>
 #include <string.h>
-#include <sys/socket.h>
 
 static void usage(char *name)
 {
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index e09fe4d..40e3a09 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -123,17 +123,22 @@ static int aafs_show_path(struct seq_file *seq, struct dentry *dentry)
 	return 0;
 }
 
-static void aafs_evict_inode(struct inode *inode)
+static void aafs_i_callback(struct rcu_head *head)
 {
-	truncate_inode_pages_final(&inode->i_data);
-	clear_inode(inode);
+	struct inode *inode = container_of(head, struct inode, i_rcu);
 	if (S_ISLNK(inode->i_mode))
 		kfree(inode->i_link);
+	free_inode_nonrcu(inode);
+}
+
+static void aafs_destroy_inode(struct inode *inode)
+{
+	call_rcu(&inode->i_rcu, aafs_i_callback);
 }
 
 static const struct super_operations aafs_super_ops = {
 	.statfs = simple_statfs,
-	.evict_inode = aafs_evict_inode,
+	.destroy_inode = aafs_destroy_inode,
 	.show_path = aafs_show_path,
 };
 
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index cd97929..dc28914 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -560,7 +560,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root,
 		    devcg->behavior == DEVCG_DEFAULT_ALLOW) {
 			rc = dev_exception_add(devcg, ex);
 			if (rc)
-				break;
+				return rc;
 		} else {
 			/*
 			 * in the other possible cases:
diff --git a/security/inode.c b/security/inode.c
index bf28109..0107dc7 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -26,17 +26,22 @@
 static struct vfsmount *mount;
 static int mount_count;
 
-static void securityfs_evict_inode(struct inode *inode)
+static void securityfs_i_callback(struct rcu_head *head)
 {
-	truncate_inode_pages_final(&inode->i_data);
-	clear_inode(inode);
+	struct inode *inode = container_of(head, struct inode, i_rcu);
 	if (S_ISLNK(inode->i_mode))
 		kfree(inode->i_link);
+	free_inode_nonrcu(inode);
+}
+
+static void securityfs_destroy_inode(struct inode *inode)
+{
+	call_rcu(&inode->i_rcu, securityfs_i_callback);
 }
 
 static const struct super_operations securityfs_super_operations = {
 	.statfs		= simple_statfs,
-	.evict_inode	= securityfs_evict_inode,
+	.destroy_inode	= securityfs_destroy_inode,
 };
 
 static int fill_super(struct super_block *sb, void *data, int silent)
diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c
index f6370b6..a46c39d 100644
--- a/security/pfe/pfk.c
+++ b/security/pfe/pfk.c
@@ -99,14 +99,6 @@ static int __init pfk_init(void)
 	if (ret != 0)
 		goto fail;
 
-	ret = pfk_kc_init();
-	if (ret != 0) {
-		pr_err("could init pfk key cache, error %d\n", ret);
-		pfk_ext4_deinit();
-		pfk_f2fs_deinit();
-		goto fail;
-	}
-
 	pfk_ready = true;
 	pr_debug("Driver initialized successfully\n");
 
@@ -343,7 +335,7 @@ static int pfk_get_key_for_bio(const struct bio *bio,
  * Must be followed by pfk_load_key_end when key is no longer used by ice
  *
  */
-int pfk_load_key_start(const struct bio *bio,
+int pfk_load_key_start(const struct bio *bio, struct ice_device *ice_dev,
 		struct ice_crypto_setting *ice_setting, bool *is_pfe,
 		bool async)
 {
@@ -386,7 +378,7 @@ int pfk_load_key_start(const struct bio *bio,
 
 	ret = pfk_kc_load_key_start(key_info.key, key_info.key_size,
 			key_info.salt, key_info.salt_size, &key_index, async,
-			data_unit);
+			data_unit, ice_dev);
 	if (ret) {
 		if (ret != -EBUSY && ret != -EAGAIN)
 			pr_err("start: could not load key into pfk key cache, error %d\n",
@@ -418,7 +410,8 @@ int pfk_load_key_start(const struct bio *bio,
  * @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked
  *			from PFE context
  */
-int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+int pfk_load_key_end(const struct bio *bio, struct ice_device *ice_dev,
+			bool *is_pfe)
 {
 	int ret = 0;
 	struct pfk_key_info key_info = {NULL, NULL, 0, 0};
@@ -442,7 +435,7 @@ int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
 		return ret;
 
 	pfk_kc_load_key_end(key_info.key, key_info.key_size,
-		key_info.salt, key_info.salt_size);
+		key_info.salt, key_info.salt_size, ice_dev);
 
 	pr_debug("finished using key for file %s\n",
 		inode_to_filename(pfk_bio_get_inode(bio)));
@@ -536,12 +529,22 @@ int pfk_fbe_clear_key(const unsigned char *key, size_t key_size,
  * is lost in ICE. We need to flash the cache, so that the keys will be
  * reconfigured again for every subsequent transaction
  */
-void pfk_clear_on_reset(void)
+void pfk_clear_on_reset(struct ice_device *ice_dev)
 {
 	if (!pfk_is_ready())
 		return;
 
-	pfk_kc_clear_on_reset();
+	pfk_kc_clear_on_reset(ice_dev);
+}
+
+int pfk_remove(struct ice_device *ice_dev)
+{
+	return pfk_kc_clear(ice_dev);
+}
+
+int pfk_initialize_key_table(struct ice_device *ice_dev)
+{
+	return pfk_kc_initialize_key_table(ice_dev);
 }
 
 module_init(pfk_init);
diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c
index cf08d8c..8d3928f9 100644
--- a/security/pfe/pfk_ice.c
+++ b/security/pfe/pfk_ice.c
@@ -23,36 +23,45 @@
 /** global definitions		 **/
 /**********************************/
 
-#define TZ_ES_INVALIDATE_ICE_KEY 0x3
-#define TZ_ES_CONFIG_SET_ICE_KEY 0x4
+#define TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE 0x5
+#define TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE 0x6
 
 /* index 0 and 1 is reserved for FDE */
 #define MIN_ICE_KEY_INDEX 2
 
 #define MAX_ICE_KEY_INDEX 31
 
-#define TZ_ES_CONFIG_SET_ICE_KEY_ID \
+#define TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE_ID \
 	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, \
-	TZ_ES_CONFIG_SET_ICE_KEY)
+	TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE)
 
-#define TZ_ES_INVALIDATE_ICE_KEY_ID \
+#define TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE_ID \
 		TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \
-			TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY)
+			TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE)
 
-#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \
-	TZ_SYSCALL_CREATE_PARAM_ID_1( \
-	TZ_SYSCALL_PARAM_TYPE_VAL)
+#define TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
 
-#define TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID \
-	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+#define TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_6( \
 	TZ_SYSCALL_PARAM_TYPE_VAL, \
 	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
-	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
 
 #define CONTEXT_SIZE 0x1000
 
 #define ICE_BUFFER_SIZE 64
 
+#define PFK_UFS "ufs"
+#define PFK_SDCC "sdcc"
+#define PFK_UFS_CARD "ufscard"
+
+#define UFS_CE 10
+#define SDCC_CE 20
+#define UFS_CARD_CE 30
+
 enum {
 	ICE_CIPHER_MODE_XTS_128 = 0,
 	ICE_CIPHER_MODE_CBC_128 = 1,
@@ -61,7 +70,7 @@ enum {
 };
 
 static int set_key(uint32_t index, const uint8_t *key, const uint8_t *salt,
-		unsigned int data_unit)
+		unsigned int data_unit, struct ice_device *ice_dev)
 {
 	struct scm_desc desc = {0};
 	int ret = 0;
@@ -70,8 +79,6 @@ static int set_key(uint32_t index, const uint8_t *key, const uint8_t *salt,
 	uint32_t key_size = ICE_BUFFER_SIZE / 2;
 	struct qtee_shm shm;
 
-	smc_id = TZ_ES_CONFIG_SET_ICE_KEY_ID;
-
 	ret = qtee_shmbridge_allocate_shm(ICE_BUFFER_SIZE, &shm);
 	if (ret)
 		return -ENOMEM;
@@ -82,13 +89,22 @@ static int set_key(uint32_t index, const uint8_t *key, const uint8_t *salt,
 	memcpy(tzbuf+key_size, salt, key_size);
 	dmac_flush_range(tzbuf, tzbuf + ICE_BUFFER_SIZE);
 
-	desc.arginfo = TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID;
+	smc_id = TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE_ID;
+
+	desc.arginfo = TZ_ES_CONFIG_SET_ICE_KEY_CE_TYPE_PARAM_ID;
 	desc.args[0] = index;
 	desc.args[1] = shm.paddr;
 	desc.args[2] = shm.size;
 	desc.args[3] = ICE_CIPHER_MODE_XTS_256;
 	desc.args[4] = data_unit;
 
+	if (!strcmp(ice_dev->ice_instance_type, (char *)PFK_UFS_CARD))
+		desc.args[5] = UFS_CARD_CE;
+	else if (!strcmp(ice_dev->ice_instance_type, (char *)PFK_SDCC))
+		desc.args[5] = SDCC_CE;
+	else if (!strcmp(ice_dev->ice_instance_type, (char *)PFK_UFS))
+		desc.args[5] = UFS_CE;
+
 	ret = scm_call2_noretry(smc_id, &desc);
 	if (ret)
 		pr_err("%s:SCM call Error: 0x%x\n", __func__, ret);
@@ -97,17 +113,24 @@ static int set_key(uint32_t index, const uint8_t *key, const uint8_t *salt,
 	return ret;
 }
 
-static int clear_key(uint32_t index)
+static int clear_key(uint32_t index, struct ice_device *ice_dev)
 {
 	struct scm_desc desc = {0};
 	int ret = 0;
 	uint32_t smc_id = 0;
 
-	smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+	smc_id = TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE_ID;
 
-	desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+	desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_CE_TYPE_PARAM_ID;
 	desc.args[0] = index;
 
+	if (!strcmp(ice_dev->ice_instance_type, (char *)PFK_UFS_CARD))
+		desc.args[1] = UFS_CARD_CE;
+	else if (!strcmp(ice_dev->ice_instance_type, (char *)PFK_SDCC))
+		desc.args[1] = SDCC_CE;
+	else if (!strcmp(ice_dev->ice_instance_type, (char *)PFK_UFS))
+		desc.args[1] = UFS_CE;
+
 	ret = scm_call2_noretry(smc_id, &desc);
 	if (ret)
 		pr_err("%s:SCM call Error: 0x%x\n", __func__, ret);
@@ -115,10 +138,9 @@ static int clear_key(uint32_t index)
 }
 
 int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
-			char *storage_type, unsigned int data_unit)
+			struct ice_device *ice_dev, unsigned int data_unit)
 {
 	int ret = 0, ret1 = 0;
-	char *s_type = storage_type;
 
 	if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
 		pr_err("%s Invalid index %d\n", __func__, index);
@@ -129,32 +151,27 @@ int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
 		return -EINVAL;
 	}
 
-	if (s_type == NULL) {
-		pr_err("%s Invalid Storage type\n", __func__);
-		return -EINVAL;
-	}
-
-	ret = qcom_ice_setup_ice_hw((const char *)s_type, true);
+	ret = enable_ice_setup(ice_dev);
 	if (ret) {
 		pr_err("%s: could not enable clocks: %d\n", __func__, ret);
 		goto out;
 	}
 
-	ret = set_key(index, key, salt, data_unit);
+	ret = set_key(index, key, salt, data_unit, ice_dev);
 	if (ret) {
 		pr_err("%s: Set Key Error: %d\n", __func__, ret);
 		if (ret == -EBUSY) {
-			if (qcom_ice_setup_ice_hw((const char *)s_type, false))
+			if (disable_ice_setup(ice_dev))
 				pr_err("%s: clock disable failed\n", __func__);
 			goto out;
 		}
 		/* Try to invalidate the key to keep ICE in proper state */
-		ret1 = clear_key(index);
+		ret1 = clear_key(index, ice_dev);
 		if (ret1)
 			pr_err("%s: Invalidate key error: %d\n", __func__, ret);
 	}
 
-	ret1 = qcom_ice_setup_ice_hw((const char *)s_type, false);
+	ret1 = disable_ice_setup(ice_dev);
 	if (ret)
 		pr_err("%s: Error %d disabling clocks\n", __func__, ret);
 
@@ -162,7 +179,7 @@ int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
 	return ret;
 }
 
-int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type)
+int qti_pfk_ice_invalidate_key(uint32_t index, struct ice_device *ice_dev)
 {
 	int ret = 0;
 
@@ -171,22 +188,17 @@ int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type)
 		return -EINVAL;
 	}
 
-	if (storage_type == NULL) {
-		pr_err("%s Invalid Storage type\n", __func__);
-		return -EINVAL;
-	}
-
-	ret = qcom_ice_setup_ice_hw((const char *)storage_type, true);
+	ret = enable_ice_setup(ice_dev);
 	if (ret) {
 		pr_err("%s: could not enable clocks: 0x%x\n", __func__, ret);
 		return ret;
 	}
 
-	ret = clear_key(index);
+	ret = clear_key(index, ice_dev);
 	if (ret)
 		pr_err("%s: Invalidate key error: %d\n", __func__, ret);
 
-	if (qcom_ice_setup_ice_hw((const char *)storage_type, false))
+	if (disable_ice_setup(ice_dev))
 		pr_err("%s: could not disable clocks\n", __func__);
 
 	return ret;
diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h
index 0331439..527fb61 100644
--- a/security/pfe/pfk_ice.h
+++ b/security/pfe/pfk_ice.h
@@ -14,12 +14,10 @@
  */
 
 #include <linux/types.h>
-
-int pfk_ice_init(void);
-int pfk_ice_deinit(void);
+#include <crypto/ice.h>
 
 int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
-			char *storage_type, unsigned int data_unit);
-int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type);
+			struct ice_device *ice_dev, unsigned int data_unit);
+int qti_pfk_ice_invalidate_key(uint32_t index, struct ice_device *ice_dev);
 
 #endif /* PFK_ICE_H_ */
diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c
index 64c168d..5a0a557 100644
--- a/security/pfe/pfk_kc.c
+++ b/security/pfe/pfk_kc.c
@@ -18,7 +18,6 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
-#include <crypto/ice.h>
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/jiffies.h>
@@ -44,6 +43,7 @@
 #define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
 #define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
 #define PFK_UFS "ufs"
+#define PFK_UFS_CARD "ufscard"
 
 static DEFINE_SPINLOCK(kc_lock);
 static unsigned long flags;
@@ -100,8 +100,6 @@ struct kc_entry {
 	int scm_error;
 };
 
-static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
-
 /**
  * kc_is_ready() - driver is initialized and ready.
  *
@@ -247,9 +245,10 @@ static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
  * Return entry
  * Should be invoked under spinlock
  */
-static struct kc_entry *kc_entry_at_index(int index)
+static struct kc_entry *kc_entry_at_index(int index,
+		struct ice_device *ice_dev)
 {
-	return &(kc_table[index]);
+	return (struct kc_entry *)(ice_dev->key_table) + index;
 }
 
 /**
@@ -266,13 +265,13 @@ static struct kc_entry *kc_entry_at_index(int index)
  */
 static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
 	size_t key_size, const unsigned char *salt, size_t salt_size,
-	int *starting_index)
+	struct ice_device *ice_dev, int *starting_index)
 {
 	struct kc_entry *entry = NULL;
 	int i = 0;
 
 	for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
-		entry = kc_entry_at_index(i);
+		entry = kc_entry_at_index(i, ice_dev);
 
 		if (salt != NULL) {
 			if (entry->salt_size != salt_size)
@@ -305,11 +304,13 @@ static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
  * Should be invoked under spinlock
  */
 static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
-		const unsigned char *salt, size_t salt_size)
+		const unsigned char *salt, size_t salt_size,
+		struct ice_device *ice_dev)
 {
 	int index = 0;
 
-	return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
+	return kc_find_key_at_index(key, key_size, salt, salt_size,
+				ice_dev, &index);
 }
 
 /**
@@ -321,14 +322,15 @@ static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
  * If all the entries are locked, will return NULL
  * Should be invoked under spin lock
  */
-static struct kc_entry *kc_find_oldest_entry_non_locked(void)
+static struct kc_entry *kc_find_oldest_entry_non_locked(
+		struct ice_device *ice_dev)
 {
 	struct kc_entry *curr_min_entry = NULL;
 	struct kc_entry *entry = NULL;
 	int i = 0;
 
 	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
-		entry = kc_entry_at_index(i);
+		entry = kc_entry_at_index(i, ice_dev);
 
 		if (entry->state == FREE)
 			return entry;
@@ -399,7 +401,7 @@ static void kc_clear_entry(struct kc_entry *entry)
  */
 static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
 	size_t key_size, const unsigned char *salt, size_t salt_size,
-	unsigned int data_unit)
+	unsigned int data_unit, struct ice_device *ice_dev)
 {
 	int ret;
 
@@ -416,7 +418,7 @@ static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
 	kc_spin_unlock();
 
 	ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
-			entry->salt, s_type, data_unit);
+			entry->salt, ice_dev, data_unit);
 
 	kc_spin_lock();
 	return ret;
@@ -427,14 +429,14 @@ static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
  *
  * Return 0 in case of success, error otherwise
  */
-int pfk_kc_init(void)
+static int pfk_kc_init(struct ice_device *ice_dev)
 {
 	int i = 0;
 	struct kc_entry *entry = NULL;
 
 	kc_spin_lock();
 	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
-		entry = kc_entry_at_index(i);
+		entry = kc_entry_at_index(i, ice_dev);
 		entry->key_index = PFK_KC_STARTING_INDEX + i;
 	}
 	kc_ready = true;
@@ -450,11 +452,9 @@ int pfk_kc_init(void)
  */
 int pfk_kc_deinit(void)
 {
-	int res = pfk_kc_clear();
-
 	kc_ready = false;
 
-	return res;
+	return 0;
 }
 
 /**
@@ -483,7 +483,7 @@ int pfk_kc_deinit(void)
  */
 int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
 		const unsigned char *salt, size_t salt_size, u32 *key_index,
-		bool async, unsigned int data_unit)
+		bool async, unsigned int data_unit, struct ice_device *ice_dev)
 {
 	int ret = 0;
 	struct kc_entry *entry = NULL;
@@ -509,7 +509,7 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
 
 	kc_spin_lock();
 
-	entry = kc_find_key(key, key_size, salt, salt_size);
+	entry = kc_find_key(key, key_size, salt, salt_size, ice_dev);
 	if (!entry) {
 		if (async) {
 			pr_debug("%s task will populate entry\n", __func__);
@@ -517,7 +517,7 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
 			return -EAGAIN;
 		}
 
-		entry = kc_find_oldest_entry_non_locked();
+		entry = kc_find_oldest_entry_non_locked(ice_dev);
 		if (!entry) {
 			/* could not find a single non locked entry,
 			 * return EBUSY to upper layers so that the
@@ -539,7 +539,10 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
 			kc_update_timestamp(entry);
 			entry->state = ACTIVE_ICE_LOADED;
 
-			if (!strcmp(s_type, (char *)PFK_UFS)) {
+			if (!strcmp(ice_dev->ice_instance_type,
+				(char *)PFK_UFS) ||
+					!strcmp(ice_dev->ice_instance_type,
+						(char *)PFK_UFS_CARD)) {
 				if (async)
 					entry->loaded_ref_cnt++;
 			} else {
@@ -549,7 +552,7 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
 		}
 	case (FREE):
 		ret = kc_update_entry(entry, key, key_size, salt, salt_size,
-					data_unit);
+					data_unit, ice_dev);
 		if (ret) {
 			entry->state = SCM_ERROR;
 			entry->scm_error = ret;
@@ -563,7 +566,10 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
 			 * sync calls from within work thread do not pass
 			 * requests further to HW
 			 */
-			if (!strcmp(s_type, (char *)PFK_UFS)) {
+			if (!strcmp(ice_dev->ice_instance_type,
+				(char *)PFK_UFS) ||
+					!strcmp(ice_dev->ice_instance_type,
+						(char *)PFK_UFS_CARD)) {
 				if (async)
 					entry->loaded_ref_cnt++;
 			} else {
@@ -578,7 +584,9 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
 	case (ACTIVE_ICE_LOADED):
 		kc_update_timestamp(entry);
 
-		if (!strcmp(s_type, (char *)PFK_UFS)) {
+		if (!strcmp(ice_dev->ice_instance_type, (char *)PFK_UFS) ||
+			!strcmp(ice_dev->ice_instance_type,
+				(char *)PFK_UFS_CARD)) {
 			if (async)
 				entry->loaded_ref_cnt++;
 		} else {
@@ -614,7 +622,8 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
  *
  */
 void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
-		const unsigned char *salt, size_t salt_size)
+		const unsigned char *salt, size_t salt_size,
+		struct ice_device *ice_dev)
 {
 	struct kc_entry *entry = NULL;
 	struct task_struct *tmp_pending = NULL;
@@ -634,7 +643,7 @@ void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
 
 	kc_spin_lock();
 
-	entry = kc_find_key(key, key_size, salt, salt_size);
+	entry = kc_find_key(key, key_size, salt, salt_size, ice_dev);
 	if (!entry) {
 		kc_spin_unlock();
 		pr_err("internal error, there should an entry to unlock\n");
@@ -666,7 +675,8 @@ void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
 }
 
 /**
- * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * pfk_kc_remove_key_with_salt() - remove the key and salt from cache
+ * and from ICE engine.
  * @key: pointer to the key
  * @key_size: the size of the key
  * @salt: pointer to the key
@@ -679,6 +689,8 @@ int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
 		const unsigned char *salt, size_t salt_size)
 {
 	struct kc_entry *entry = NULL;
+	struct list_head *ice_dev_list = NULL;
+	struct ice_device *ice_dev;
 	int res = 0;
 
 	if (!kc_is_ready())
@@ -698,9 +710,26 @@ int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
 
 	kc_spin_lock();
 
-	entry = kc_find_key(key, key_size, salt, salt_size);
+	ice_dev_list = get_ice_dev_list();
+	if (!ice_dev_list) {
+		pr_err("%s: Did not find ICE device head\n", __func__);
+		return -ENODEV;
+	}
+	list_for_each_entry(ice_dev, ice_dev_list, list) {
+		entry = kc_find_key(key, key_size, salt, salt_size, ice_dev);
+		if (entry) {
+			pr_debug("%s: Found entry for ice_dev number %d\n",
+					__func__, ice_dev->device_no);
+
+			break;
+		}
+		pr_debug("%s: Can't find  entry for ice_dev number %d\n",
+					__func__, ice_dev->device_no);
+	}
+
 	if (!entry) {
-		pr_debug("%s: key does not exist\n", __func__);
+		pr_debug("%s: Cannot find entry for any ice device\n",
+				__func__);
 		kc_spin_unlock();
 		return -EINVAL;
 	}
@@ -714,7 +743,7 @@ int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
 
 	kc_spin_unlock();
 
-	qti_pfk_ice_invalidate_key(entry->key_index, s_type);
+	qti_pfk_ice_invalidate_key(entry->key_index, ice_dev);
 
 	kc_spin_lock();
 	kc_entry_finish_invalidating(entry);
@@ -724,101 +753,12 @@ int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
 }
 
 /**
- * pfk_kc_remove_key() - remove the key from cache and from ICE engine
- * when no salt is available. Will only search key part, if there are several,
- * all will be removed
- *
- * @key: pointer to the key
- * @key_size: the size of the key
- *
- * Return 0 in case of success, error otherwise (also for non-existing key)
- */
-int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
-{
-	struct kc_entry *entry = NULL;
-	int index = 0;
-	int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
-	int temp_indexes_size = 0;
-	int i = 0;
-	int res = 0;
-
-	if (!kc_is_ready())
-		return -ENODEV;
-
-	if (!key)
-		return -EINVAL;
-
-	if (key_size != PFK_KC_KEY_SIZE)
-		return -EINVAL;
-
-	memset(temp_indexes, -1, sizeof(temp_indexes));
-
-	kc_spin_lock();
-
-	entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
-	if (!entry) {
-		pr_err("%s: key does not exist\n", __func__);
-		kc_spin_unlock();
-		return -EINVAL;
-	}
-
-	res = kc_entry_start_invalidating(entry);
-	if (res != 0) {
-		kc_spin_unlock();
-		return res;
-	}
-
-	temp_indexes[temp_indexes_size++] = index;
-	kc_clear_entry(entry);
-
-	/* let's clean additional entries with the same key if there are any */
-	do {
-		index++;
-		entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
-		if (!entry)
-			break;
-
-		res = kc_entry_start_invalidating(entry);
-		if (res != 0) {
-			kc_spin_unlock();
-			goto out;
-		}
-
-		temp_indexes[temp_indexes_size++] = index;
-
-		kc_clear_entry(entry);
-
-
-	} while (true);
-
-	kc_spin_unlock();
-
-	temp_indexes_size--;
-	for (i = temp_indexes_size; i >= 0 ; i--)
-		qti_pfk_ice_invalidate_key(
-			kc_entry_at_index(temp_indexes[i])->key_index,
-					s_type);
-
-	/* fall through */
-	res = 0;
-
-out:
-	kc_spin_lock();
-	for (i = temp_indexes_size; i >= 0 ; i--)
-		kc_entry_finish_invalidating(
-				kc_entry_at_index(temp_indexes[i]));
-	kc_spin_unlock();
-
-	return res;
-}
-
-/**
  * pfk_kc_clear() - clear the table and remove all keys from ICE
  *
  * Return 0 on success, error otherwise
  *
  */
-int pfk_kc_clear(void)
+int pfk_kc_clear(struct ice_device *ice_dev)
 {
 	struct kc_entry *entry = NULL;
 	int i = 0;
@@ -829,7 +769,7 @@ int pfk_kc_clear(void)
 
 	kc_spin_lock();
 	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
-		entry = kc_entry_at_index(i);
+		entry = kc_entry_at_index(i, ice_dev);
 		res = kc_entry_start_invalidating(entry);
 		if (res != 0) {
 			kc_spin_unlock();
@@ -840,15 +780,15 @@ int pfk_kc_clear(void)
 	kc_spin_unlock();
 
 	for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
-		qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
-					s_type);
+		qti_pfk_ice_invalidate_key(
+			kc_entry_at_index(i, ice_dev)->key_index, ice_dev);
 
 	/* fall through */
 	res = 0;
 out:
 	kc_spin_lock();
 	for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
-		kc_entry_finish_invalidating(kc_entry_at_index(i));
+		kc_entry_finish_invalidating(kc_entry_at_index(i, ice_dev));
 	kc_spin_unlock();
 
 	return res;
@@ -862,7 +802,7 @@ int pfk_kc_clear(void)
  * Return 0 on success, error otherwise
  *
  */
-void pfk_kc_clear_on_reset(void)
+void pfk_kc_clear_on_reset(struct ice_device *ice_dev)
 {
 	struct kc_entry *entry = NULL;
 	int i = 0;
@@ -872,7 +812,7 @@ void pfk_kc_clear_on_reset(void)
 
 	kc_spin_lock();
 	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
-		entry = kc_entry_at_index(i);
+		entry = kc_entry_at_index(i, ice_dev);
 		kc_clear_entry(entry);
 	}
 	kc_spin_unlock();
@@ -895,6 +835,24 @@ static int pfk_kc_find_storage_type(char **device)
 	return -EINVAL;
 }
 
+int pfk_kc_initialize_key_table(struct ice_device *ice_dev)
+{
+	int res = 0;
+	struct kc_entry *kc_table;
+
+	kc_table = kzalloc(PFK_KC_TABLE_SIZE*sizeof(struct kc_entry),
+			GFP_KERNEL);
+	if (!kc_table) {
+		res = -ENOMEM;
+		pr_err("%s: Error %d allocating memory for key table\n",
+			__func__, res);
+	}
+	ice_dev->key_table = kc_table;
+	pfk_kc_init(ice_dev);
+
+	return res;
+}
+
 static int __init pfk_kc_pre_init(void)
 {
 	return pfk_kc_find_storage_type(&s_type);
diff --git a/security/pfe/pfk_kc.h b/security/pfe/pfk_kc.h
index 30765bf..cc89827 100644
--- a/security/pfe/pfk_kc.h
+++ b/security/pfe/pfk_kc.h
@@ -7,19 +7,21 @@
 #define PFK_KC_H_
 
 #include <linux/types.h>
+#include <crypto/ice.h>
 
-int pfk_kc_init(void);
+
 int pfk_kc_deinit(void);
 int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
 		const unsigned char *salt, size_t salt_size, u32 *key_index,
-		bool async, unsigned int data_unit);
+		bool async, unsigned int data_unit, struct ice_device *ice_dev);
 void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
-		const unsigned char *salt, size_t salt_size);
+		const unsigned char *salt, size_t salt_size,
+		struct ice_device *ice_dev);
 int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
 		const unsigned char *salt, size_t salt_size);
-int pfk_kc_remove_key(const unsigned char *key, size_t key_size);
-int pfk_kc_clear(void);
-void pfk_kc_clear_on_reset(void);
+int pfk_kc_clear(struct ice_device *ice_dev);
+void pfk_kc_clear_on_reset(struct ice_device *ice_dev);
+int pfk_kc_initialize_key_table(struct ice_device *ice_dev);
 const char *pfk_kc_get_storage_type(void);
 extern char *saved_command_line;
 
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 2dff571..44816ee 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -840,6 +840,7 @@ int __init avc_add_callback(int (*callback)(u32 event), u32 events)
  * @ssid,@tsid,@tclass : identifier of an AVC entry
  * @seqno : sequence number when decision was made
  * @xpd: extended_perms_decision to be added to the node
+ * @flags: the AVC_* flags, e.g. AVC_NONBLOCKING, AVC_EXTENDED_PERMS, or 0.
  *
  * if a valid AVC entry doesn't exist,this function returns -ENOENT.
  * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
@@ -858,6 +859,23 @@ static int avc_update_node(struct selinux_avc *avc,
 	struct hlist_head *head;
 	spinlock_t *lock;
 
+	/*
+	 * If we are in a non-blocking code path, e.g. VFS RCU walk,
+	 * then we must not add permissions to a cache entry
+	 * because we cannot safely audit the denial.  Otherwise,
+	 * during the subsequent blocking retry (e.g. VFS ref walk), we
+	 * will find the permissions already granted in the cache entry
+	 * and won't audit anything at all, leading to silent denials in
+	 * permissive mode that only appear when in enforcing mode.
+	 *
+	 * See the corresponding handling in slow_avc_audit(), and the
+	 * logic in selinux_inode_follow_link and selinux_inode_permission
+	 * for the VFS MAY_NOT_BLOCK flag, which is transliterated into
+	 * AVC_NONBLOCKING for avc_has_perm_noaudit().
+	 */
+	if (flags & AVC_NONBLOCKING)
+		return 0;
+
 	node = avc_alloc_node(avc);
 	if (!node) {
 		rc = -ENOMEM;
@@ -1117,7 +1135,7 @@ int avc_has_extended_perms(struct selinux_state *state,
  * @tsid: target security identifier
  * @tclass: target security class
  * @requested: requested permissions, interpreted based on @tclass
- * @flags:  AVC_STRICT or 0
+ * @flags:  AVC_STRICT, AVC_NONBLOCKING, or 0
  * @avd: access vector decisions
  *
  * Check the AVC to determine whether the @requested permissions are granted
@@ -1201,7 +1219,8 @@ int avc_has_perm_flags(struct selinux_state *state,
 	struct av_decision avd;
 	int rc, rc2;
 
-	rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested, 0,
+	rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested,
+				  (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
 				  &avd);
 
 	rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4337b6d..70bad15 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -497,16 +497,10 @@ static int may_context_mount_inode_relabel(u32 sid,
 	return rc;
 }
 
-static int selinux_is_sblabel_mnt(struct super_block *sb)
+static int selinux_is_genfs_special_handling(struct super_block *sb)
 {
-	struct superblock_security_struct *sbsec = sb->s_security;
-
-	return sbsec->behavior == SECURITY_FS_USE_XATTR ||
-		sbsec->behavior == SECURITY_FS_USE_TRANS ||
-		sbsec->behavior == SECURITY_FS_USE_TASK ||
-		sbsec->behavior == SECURITY_FS_USE_NATIVE ||
-		/* Special handling. Genfs but also in-core setxattr handler */
-		!strcmp(sb->s_type->name, "sysfs") ||
+	/* Special handling. Genfs but also in-core setxattr handler */
+	return	!strcmp(sb->s_type->name, "sysfs") ||
 		!strcmp(sb->s_type->name, "pstore") ||
 		!strcmp(sb->s_type->name, "debugfs") ||
 		!strcmp(sb->s_type->name, "tracefs") ||
@@ -516,6 +510,34 @@ static int selinux_is_sblabel_mnt(struct super_block *sb)
 		  !strcmp(sb->s_type->name, "cgroup2")));
 }
 
+static int selinux_is_sblabel_mnt(struct super_block *sb)
+{
+	struct superblock_security_struct *sbsec = sb->s_security;
+
+	/*
+	 * IMPORTANT: Double-check logic in this function when adding a new
+	 * SECURITY_FS_USE_* definition!
+	 */
+	BUILD_BUG_ON(SECURITY_FS_USE_MAX != 7);
+
+	switch (sbsec->behavior) {
+	case SECURITY_FS_USE_XATTR:
+	case SECURITY_FS_USE_TRANS:
+	case SECURITY_FS_USE_TASK:
+	case SECURITY_FS_USE_NATIVE:
+		return 1;
+
+	case SECURITY_FS_USE_GENFS:
+		return selinux_is_genfs_special_handling(sb);
+
+	/* Never allow relabeling on context mounts */
+	case SECURITY_FS_USE_MNTPOINT:
+	case SECURITY_FS_USE_NONE:
+	default:
+		return 0;
+	}
+}
+
 static int sb_finish_set_opts(struct super_block *sb)
 {
 	struct superblock_security_struct *sbsec = sb->s_security;
@@ -3199,7 +3221,9 @@ static int selinux_inode_permission(struct inode *inode, int mask)
 		return PTR_ERR(isec);
 
 	rc = avc_has_perm_noaudit(&selinux_state,
-				  sid, isec->sid, isec->sclass, perms, 0, &avd);
+				  sid, isec->sid, isec->sclass, perms,
+				  (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
+				  &avd);
 	audited = avc_audit_required(perms, &avd, rc,
 				     from_access ? FILE__AUDIT_ACCESS : 0,
 				     &denied);
@@ -4776,7 +4800,7 @@ static int selinux_socket_connect_helper(struct socket *sock,
 		struct lsm_network_audit net = {0,};
 		struct sockaddr_in *addr4 = NULL;
 		struct sockaddr_in6 *addr6 = NULL;
-		unsigned short snum;
+		unsigned short snum = 0;
 		u32 sid, perm;
 
 		/* sctp_connectx(3) calls via selinux_sctp_bind_connect()
@@ -4799,12 +4823,12 @@ static int selinux_socket_connect_helper(struct socket *sock,
 			break;
 		default:
 			/* Note that SCTP services expect -EINVAL, whereas
-			 * others expect -EAFNOSUPPORT.
+			 * others must handle this at the protocol level:
+			 * connect(AF_UNSPEC) on a connected socket is
+			 * a documented way disconnect the socket.
 			 */
 			if (sksec->sclass == SECCLASS_SCTP_SOCKET)
 				return -EINVAL;
-			else
-				return -EAFNOSUPPORT;
 		}
 
 		err = sel_netport_sid(sk->sk_protocol, snum, &sid);
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index ef899bc..74ea509 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -142,6 +142,7 @@ static inline int avc_audit(struct selinux_state *state,
 
 #define AVC_STRICT 1 /* Ignore permissive mode. */
 #define AVC_EXTENDED_PERMS 2	/* update extended permissions */
+#define AVC_NONBLOCKING    4	/* non blocking */
 int avc_has_perm_noaudit(struct selinux_state *state,
 			 u32 ssid, u32 tsid,
 			 u16 tclass, u32 requested,
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index bd5fe0d..201f7e5 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #include <linux/capability.h>
+#include <linux/socket.h>
 
 #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
     "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map"
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index 186e727..6fd9954 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -288,11 +288,8 @@ int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep,
 	int rc;
 	struct netlbl_lsm_secattr secattr;
 	struct sk_security_struct *sksec = ep->base.sk->sk_security;
-	struct sockaddr *addr;
 	struct sockaddr_in addr4;
-#if IS_ENABLED(CONFIG_IPV6)
 	struct sockaddr_in6 addr6;
-#endif
 
 	if (ep->base.sk->sk_family != PF_INET &&
 				ep->base.sk->sk_family != PF_INET6)
@@ -310,16 +307,15 @@ int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep,
 	if (ip_hdr(skb)->version == 4) {
 		addr4.sin_family = AF_INET;
 		addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
-		addr = (struct sockaddr *)&addr4;
-#if IS_ENABLED(CONFIG_IPV6)
-	} else {
+		rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr4, &secattr);
+	} else if (IS_ENABLED(CONFIG_IPV6) && ip_hdr(skb)->version == 6) {
 		addr6.sin6_family = AF_INET6;
 		addr6.sin6_addr = ipv6_hdr(skb)->saddr;
-		addr = (struct sockaddr *)&addr6;
-#endif
+		rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr6, &secattr);
+	} else {
+		rc = -EAFNOSUPPORT;
 	}
 
-	rc = netlbl_conn_setattr(ep->base.sk, addr, &secattr);
 	if (rc == 0)
 		sksec->nlbl_state = NLBL_LABELED;
 
diff --git a/sound/core/info.c b/sound/core/info.c
index ff735cd..fefcb4e 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -733,8 +733,11 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent)
 	INIT_LIST_HEAD(&entry->children);
 	INIT_LIST_HEAD(&entry->list);
 	entry->parent = parent;
-	if (parent)
+	if (parent) {
+		mutex_lock(&parent->access);
 		list_add_tail(&entry->list, &parent->children);
+		mutex_unlock(&parent->access);
+	}
 	return entry;
 }
 
@@ -816,7 +819,12 @@ void snd_info_free_entry(struct snd_info_entry * entry)
 	list_for_each_entry_safe(p, n, &entry->children, list)
 		snd_info_free_entry(p);
 
-	list_del(&entry->list);
+	p = entry->parent;
+	if (p) {
+		mutex_lock(&p->access);
+		list_del(&entry->list);
+		mutex_unlock(&p->access);
+	}
 	kfree(entry->name);
 	if (entry->private_free)
 		entry->private_free(entry);
diff --git a/sound/core/init.c b/sound/core/init.c
index 438c6b2..9457577 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -451,14 +451,7 @@ int snd_card_disconnect(struct snd_card *card)
 	card->shutdown = 1;
 	spin_unlock(&card->files_lock);
 
-	/* phase 1: disable fops (user space) operations for ALSA API */
-	mutex_lock(&snd_card_mutex);
-	snd_cards[card->number] = NULL;
-	clear_bit(card->number, snd_cards_lock);
-	mutex_unlock(&snd_card_mutex);
-	
-	/* phase 2: replace file->f_op with special dummy operations */
-	
+	/* replace file->f_op with special dummy operations */
 	spin_lock(&card->files_lock);
 	list_for_each_entry(mfile, &card->files_list, list) {
 		/* it's critical part, use endless loop */
@@ -474,7 +467,7 @@ int snd_card_disconnect(struct snd_card *card)
 	}
 	spin_unlock(&card->files_lock);	
 
-	/* phase 3: notify all connected devices about disconnection */
+	/* notify all connected devices about disconnection */
 	/* at this point, they cannot respond to any calls except release() */
 
 #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
@@ -490,6 +483,13 @@ int snd_card_disconnect(struct snd_card *card)
 		device_del(&card->card_dev);
 		card->registered = false;
 	}
+
+	/* disable fops (user space) operations for ALSA API */
+	mutex_lock(&snd_card_mutex);
+	snd_cards[card->number] = NULL;
+	clear_bit(card->number, snd_cards_lock);
+	mutex_unlock(&snd_card_mutex);
+
 #ifdef CONFIG_PM
 	wake_up(&card->power_sleep);
 #endif
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 80f7381..0436789 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -7394,8 +7394,10 @@ static void ca0132_free(struct hda_codec *codec)
 	ca0132_exit_chip(codec);
 
 	snd_hda_power_down(codec);
-	if (IS_ENABLED(CONFIG_PCI) && spec->mem_base)
+#ifdef CONFIG_PCI
+	if (spec->mem_base)
 		pci_iounmap(codec->bus->pci, spec->mem_base);
+#endif
 	kfree(spec->spec_init_verbs);
 	kfree(codec->spec);
 }
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index cb587dc..35931a1 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1548,9 +1548,11 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
 	ret = !repoll || !eld->monitor_present || eld->eld_valid;
 
 	jack = snd_hda_jack_tbl_get(codec, pin_nid);
-	if (jack)
+	if (jack) {
 		jack->block_report = !ret;
-
+		jack->pin_sense = (eld->monitor_present && eld->eld_valid) ?
+			AC_PINSENSE_PRESENCE : 0;
+	}
 	mutex_unlock(&per_pin->lock);
 	return ret;
 }
@@ -1660,6 +1662,11 @@ static void hdmi_repoll_eld(struct work_struct *work)
 	container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
 	struct hda_codec *codec = per_pin->codec;
 	struct hdmi_spec *spec = codec->spec;
+	struct hda_jack_tbl *jack;
+
+	jack = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
+	if (jack)
+		jack->jack_dirty = 1;
 
 	if (per_pin->repoll_count++ > 6)
 		per_pin->repoll_count = 0;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bd60eb7..adce5b6 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -477,12 +477,45 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
 		set_eapd(codec, *p, on);
 }
 
+static int find_ext_mic_pin(struct hda_codec *codec);
+
+static void alc_headset_mic_no_shutup(struct hda_codec *codec)
+{
+	const struct hda_pincfg *pin;
+	int mic_pin = find_ext_mic_pin(codec);
+	int i;
+
+	/* don't shut up pins when unloading the driver; otherwise it breaks
+	 * the default pin setup at the next load of the driver
+	 */
+	if (codec->bus->shutdown)
+		return;
+
+	snd_array_for_each(&codec->init_pins, i, pin) {
+		/* use read here for syncing after issuing each verb */
+		if (pin->nid != mic_pin)
+			snd_hda_codec_read(codec, pin->nid, 0,
+					AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
+	}
+
+	codec->pins_shutup = 1;
+}
+
 static void alc_shutup_pins(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
 
-	if (!spec->no_shutup_pins)
-		snd_hda_shutup_pins(codec);
+	switch (codec->core.vendor_id) {
+	case 0x10ec0286:
+	case 0x10ec0288:
+	case 0x10ec0298:
+		alc_headset_mic_no_shutup(codec);
+		break;
+	default:
+		if (!spec->no_shutup_pins)
+			snd_hda_shutup_pins(codec);
+		break;
+	}
 }
 
 /* generic shutup callback;
@@ -803,11 +836,10 @@ static int alc_init(struct hda_codec *codec)
 	if (spec->init_hook)
 		spec->init_hook(codec);
 
+	snd_hda_gen_init(codec);
 	alc_fix_pll(codec);
 	alc_auto_init_amp(codec, spec->init_amp);
 
-	snd_hda_gen_init(codec);
-
 	snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
 
 	return 0;
@@ -2924,27 +2956,6 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
 	return alc_parse_auto_config(codec, alc269_ignore, ssids);
 }
 
-static int find_ext_mic_pin(struct hda_codec *codec);
-
-static void alc286_shutup(struct hda_codec *codec)
-{
-	const struct hda_pincfg *pin;
-	int i;
-	int mic_pin = find_ext_mic_pin(codec);
-	/* don't shut up pins when unloading the driver; otherwise it breaks
-	 * the default pin setup at the next load of the driver
-	 */
-	if (codec->bus->shutdown)
-		return;
-	snd_array_for_each(&codec->init_pins, i, pin) {
-		/* use read here for syncing after issuing each verb */
-		if (pin->nid != mic_pin)
-			snd_hda_codec_read(codec, pin->nid, 0,
-					AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
-	}
-	codec->pins_shutup = 1;
-}
-
 static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
 {
 	alc_update_coef_idx(codec, 0x04, 1 << 11, power_up ? (1 << 11) : 0);
@@ -5448,6 +5459,8 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
 		return;
 
 	spec->gen.preferred_dacs = preferred_pairs;
+	spec->gen.auto_mute_via_amp = 1;
+	codec->power_save_node = 0;
 }
 
 /* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */
@@ -6839,6 +6852,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1558, 0x8550, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1558, 0x8551, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
+	SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
 	SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -6881,7 +6898,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
-	SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
 	SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
 	SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
 	SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
@@ -7170,6 +7187,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
 		{0x12, 0x90a60140},
 		{0x14, 0x90170150},
 		{0x21, 0x02211020}),
+	SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+		{0x21, 0x02211020}),
+	SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+		{0x12, 0x40000000},
+		{0x14, 0x90170110},
+		{0x21, 0x02211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
 		{0x14, 0x90170110},
 		{0x21, 0x02211020}),
@@ -7280,6 +7303,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
 		{0x21, 0x0221101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC256_STANDARD_PINS),
+	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+		{0x14, 0x90170110},
+		{0x1b, 0x01011020},
+		{0x21, 0x0221101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
 		{0x14, 0x90170110},
 		{0x1b, 0x90a70130},
@@ -7439,6 +7466,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
 		{0x12, 0x90a60130},
 		{0x17, 0x90170110},
 		{0x21, 0x04211020}),
+	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
+		{0x12, 0x90a60130},
+		{0x17, 0x90170110},
+		{0x21, 0x03211020}),
+	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+		{0x14, 0x90170110},
+		{0x21, 0x04211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC295_STANDARD_PINS,
 		{0x17, 0x21014020},
@@ -7589,7 +7623,6 @@ static int patch_alc269(struct hda_codec *codec)
 	case 0x10ec0286:
 	case 0x10ec0288:
 		spec->codec_variant = ALC269_TYPE_ALC286;
-		spec->shutup = alc286_shutup;
 		break;
 	case 0x10ec0298:
 		spec->codec_variant = ALC269_TYPE_ALC298;
diff --git a/sound/soc/codecs/cs35l35.c b/sound/soc/codecs/cs35l35.c
index bd6226b..17e0101 100644
--- a/sound/soc/codecs/cs35l35.c
+++ b/sound/soc/codecs/cs35l35.c
@@ -1634,6 +1634,16 @@ static int cs35l35_i2c_probe(struct i2c_client *i2c_client,
 	return ret;
 }
 
+static int cs35l35_i2c_remove(struct i2c_client *i2c_client)
+{
+	struct cs35l35_private *cs35l35 = i2c_get_clientdata(i2c_client);
+
+	regulator_bulk_disable(cs35l35->num_supplies, cs35l35->supplies);
+	gpiod_set_value_cansleep(cs35l35->reset_gpio, 0);
+
+	return 0;
+}
+
 static const struct of_device_id cs35l35_of_match[] = {
 	{.compatible = "cirrus,cs35l35"},
 	{},
@@ -1654,6 +1664,7 @@ static struct i2c_driver cs35l35_i2c_driver = {
 	},
 	.id_table = cs35l35_id,
 	.probe = cs35l35_i2c_probe,
+	.remove = cs35l35_i2c_remove,
 };
 
 module_i2c_driver(cs35l35_i2c_driver);
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 3c266ee..007ce9f 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -642,6 +642,7 @@ static const struct regmap_config cs4270_regmap = {
 	.reg_defaults =		cs4270_reg_defaults,
 	.num_reg_defaults =	ARRAY_SIZE(cs4270_reg_defaults),
 	.cache_type =		REGCACHE_RBTREE,
+	.write_flag_mask =	CS4270_I2C_INCR,
 
 	.readable_reg =		cs4270_reg_is_readable,
 	.volatile_reg =		cs4270_reg_is_volatile,
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index b61d518..6348724 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1829,6 +1829,17 @@ static int hdmi_codec_probe(struct snd_soc_component *component)
 	hdmi->card = dapm->card->snd_card;
 
 	/*
+	 * Setup a device_link between card device and HDMI codec device.
+	 * The card device is the consumer and the HDMI codec device is
+	 * the supplier. With this setting, we can make sure that the audio
+	 * domain in display power will be always turned on before operating
+	 * on the HDMI audio codec registers.
+	 * Let's use the flag DL_FLAG_AUTOREMOVE_CONSUMER. This can make
+	 * sure the device link is freed when the machine driver is removed.
+	 */
+	device_link_add(component->card->dev, &hdev->dev, DL_FLAG_RPM_ACTIVE |
+			DL_FLAG_AUTOREMOVE_CONSUMER);
+	/*
 	 * hdac_device core already sets the state to active and calls
 	 * get_noresume. So enable runtime and set the device to suspend.
 	 */
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index e5b6769..7994e8d 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -439,8 +439,12 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
 		if (!ret) {
 			ret = snd_pcm_hw_constraint_eld(substream->runtime,
 							hcp->eld);
-			if (ret)
+			if (ret) {
+				mutex_lock(&hcp->current_stream_lock);
+				hcp->current_stream = NULL;
+				mutex_unlock(&hcp->current_stream_lock);
 				return ret;
+			}
 		}
 		/* Select chmap supported */
 		hdmi_codec_eld_chmap(hcp);
@@ -529,73 +533,71 @@ static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
 {
 	struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
 	struct hdmi_codec_daifmt cf = { 0 };
-	int ret = 0;
 
 	dev_dbg(dai->dev, "%s()\n", __func__);
 
-	if (dai->id == DAI_ID_SPDIF) {
-		cf.fmt = HDMI_SPDIF;
-	} else {
-		switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
-		case SND_SOC_DAIFMT_CBM_CFM:
-			cf.bit_clk_master = 1;
-			cf.frame_clk_master = 1;
-			break;
-		case SND_SOC_DAIFMT_CBS_CFM:
-			cf.frame_clk_master = 1;
-			break;
-		case SND_SOC_DAIFMT_CBM_CFS:
-			cf.bit_clk_master = 1;
-			break;
-		case SND_SOC_DAIFMT_CBS_CFS:
-			break;
-		default:
-			return -EINVAL;
-		}
+	if (dai->id == DAI_ID_SPDIF)
+		return 0;
 
-		switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
-		case SND_SOC_DAIFMT_NB_NF:
-			break;
-		case SND_SOC_DAIFMT_NB_IF:
-			cf.frame_clk_inv = 1;
-			break;
-		case SND_SOC_DAIFMT_IB_NF:
-			cf.bit_clk_inv = 1;
-			break;
-		case SND_SOC_DAIFMT_IB_IF:
-			cf.frame_clk_inv = 1;
-			cf.bit_clk_inv = 1;
-			break;
-		}
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		cf.bit_clk_master = 1;
+		cf.frame_clk_master = 1;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFM:
+		cf.frame_clk_master = 1;
+		break;
+	case SND_SOC_DAIFMT_CBM_CFS:
+		cf.bit_clk_master = 1;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		break;
+	default:
+		return -EINVAL;
+	}
 
-		switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
-		case SND_SOC_DAIFMT_I2S:
-			cf.fmt = HDMI_I2S;
-			break;
-		case SND_SOC_DAIFMT_DSP_A:
-			cf.fmt = HDMI_DSP_A;
-			break;
-		case SND_SOC_DAIFMT_DSP_B:
-			cf.fmt = HDMI_DSP_B;
-			break;
-		case SND_SOC_DAIFMT_RIGHT_J:
-			cf.fmt = HDMI_RIGHT_J;
-			break;
-		case SND_SOC_DAIFMT_LEFT_J:
-			cf.fmt = HDMI_LEFT_J;
-			break;
-		case SND_SOC_DAIFMT_AC97:
-			cf.fmt = HDMI_AC97;
-			break;
-		default:
-			dev_err(dai->dev, "Invalid DAI interface format\n");
-			return -EINVAL;
-		}
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		cf.frame_clk_inv = 1;
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		cf.bit_clk_inv = 1;
+		break;
+	case SND_SOC_DAIFMT_IB_IF:
+		cf.frame_clk_inv = 1;
+		cf.bit_clk_inv = 1;
+		break;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		cf.fmt = HDMI_I2S;
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		cf.fmt = HDMI_DSP_A;
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		cf.fmt = HDMI_DSP_B;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		cf.fmt = HDMI_RIGHT_J;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		cf.fmt = HDMI_LEFT_J;
+		break;
+	case SND_SOC_DAIFMT_AC97:
+		cf.fmt = HDMI_AC97;
+		break;
+	default:
+		dev_err(dai->dev, "Invalid DAI interface format\n");
+		return -EINVAL;
 	}
 
 	hcp->daifmt[dai->id] = cf;
 
-	return ret;
+	return 0;
 }
 
 static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute)
@@ -792,8 +794,10 @@ static int hdmi_codec_probe(struct platform_device *pdev)
 		i++;
 	}
 
-	if (hcd->spdif)
+	if (hcd->spdif) {
 		hcp->daidrv[i] = hdmi_spdif_dai;
+		hcp->daifmt[DAI_ID_SPDIF].fmt = HDMI_SPDIF;
+	}
 
 	dev_set_drvdata(dev, hcp);
 
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index c97f218..f06ae43 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -1209,14 +1209,14 @@ static const struct snd_soc_dapm_widget max98090_dapm_widgets[] = {
 		&max98090_right_rcv_mixer_controls[0],
 		ARRAY_SIZE(max98090_right_rcv_mixer_controls)),
 
-	SND_SOC_DAPM_MUX("LINMOD Mux", M98090_REG_LOUTR_MIXER,
-		M98090_LINMOD_SHIFT, 0, &max98090_linmod_mux),
+	SND_SOC_DAPM_MUX("LINMOD Mux", SND_SOC_NOPM, 0, 0,
+		&max98090_linmod_mux),
 
-	SND_SOC_DAPM_MUX("MIXHPLSEL Mux", M98090_REG_HP_CONTROL,
-		M98090_MIXHPLSEL_SHIFT, 0, &max98090_mixhplsel_mux),
+	SND_SOC_DAPM_MUX("MIXHPLSEL Mux", SND_SOC_NOPM, 0, 0,
+		&max98090_mixhplsel_mux),
 
-	SND_SOC_DAPM_MUX("MIXHPRSEL Mux", M98090_REG_HP_CONTROL,
-		M98090_MIXHPRSEL_SHIFT, 0, &max98090_mixhprsel_mux),
+	SND_SOC_DAPM_MUX("MIXHPRSEL Mux", SND_SOC_NOPM, 0, 0,
+		&max98090_mixhprsel_mux),
 
 	SND_SOC_DAPM_PGA("HP Left Out", M98090_REG_OUTPUT_ENABLE,
 		M98090_HPLEN_SHIFT, 0, NULL, 0),
diff --git a/sound/soc/codecs/nau8810.c b/sound/soc/codecs/nau8810.c
index bfd74b8..645aa07 100644
--- a/sound/soc/codecs/nau8810.c
+++ b/sound/soc/codecs/nau8810.c
@@ -411,9 +411,9 @@ static const struct snd_soc_dapm_widget nau8810_dapm_widgets[] = {
 	SND_SOC_DAPM_MIXER("Mono Mixer", NAU8810_REG_POWER3,
 		NAU8810_MOUTMX_EN_SFT, 0, &nau8810_mono_mixer_controls[0],
 		ARRAY_SIZE(nau8810_mono_mixer_controls)),
-	SND_SOC_DAPM_DAC("DAC", "HiFi Playback", NAU8810_REG_POWER3,
+	SND_SOC_DAPM_DAC("DAC", "Playback", NAU8810_REG_POWER3,
 		NAU8810_DAC_EN_SFT, 0),
-	SND_SOC_DAPM_ADC("ADC", "HiFi Capture", NAU8810_REG_POWER2,
+	SND_SOC_DAPM_ADC("ADC", "Capture", NAU8810_REG_POWER2,
 		NAU8810_ADC_EN_SFT, 0),
 	SND_SOC_DAPM_PGA("SpkN Out", NAU8810_REG_POWER3,
 		NAU8810_NSPK_EN_SFT, 0, NULL, 0),
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index 468d514..663a208 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -681,8 +681,8 @@ static const struct snd_soc_dapm_widget nau8824_dapm_widgets[] = {
 	SND_SOC_DAPM_ADC("ADCR", NULL, NAU8824_REG_ANALOG_ADC_2,
 		NAU8824_ADCR_EN_SFT, 0),
 
-	SND_SOC_DAPM_AIF_OUT("AIFTX", "HiFi Capture", 0, SND_SOC_NOPM, 0, 0),
-	SND_SOC_DAPM_AIF_IN("AIFRX", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("AIFRX", "Playback", 0, SND_SOC_NOPM, 0, 0),
 
 	SND_SOC_DAPM_DAC("DACL", NULL, NAU8824_REG_RDAC,
 		NAU8824_DACL_EN_SFT, 0),
@@ -831,6 +831,36 @@ static void nau8824_int_status_clear_all(struct regmap *regmap)
 	}
 }
 
+static void nau8824_dapm_disable_pin(struct nau8824 *nau8824, const char *pin)
+{
+	struct snd_soc_dapm_context *dapm = nau8824->dapm;
+	const char *prefix = dapm->component->name_prefix;
+	char prefixed_pin[80];
+
+	if (prefix) {
+		snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
+			 prefix, pin);
+		snd_soc_dapm_disable_pin(dapm, prefixed_pin);
+	} else {
+		snd_soc_dapm_disable_pin(dapm, pin);
+	}
+}
+
+static void nau8824_dapm_enable_pin(struct nau8824 *nau8824, const char *pin)
+{
+	struct snd_soc_dapm_context *dapm = nau8824->dapm;
+	const char *prefix = dapm->component->name_prefix;
+	char prefixed_pin[80];
+
+	if (prefix) {
+		snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
+			 prefix, pin);
+		snd_soc_dapm_force_enable_pin(dapm, prefixed_pin);
+	} else {
+		snd_soc_dapm_force_enable_pin(dapm, pin);
+	}
+}
+
 static void nau8824_eject_jack(struct nau8824 *nau8824)
 {
 	struct snd_soc_dapm_context *dapm = nau8824->dapm;
@@ -839,8 +869,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
 	/* Clear all interruption status */
 	nau8824_int_status_clear_all(regmap);
 
-	snd_soc_dapm_disable_pin(dapm, "SAR");
-	snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+	nau8824_dapm_disable_pin(nau8824, "SAR");
+	nau8824_dapm_disable_pin(nau8824, "MICBIAS");
 	snd_soc_dapm_sync(dapm);
 
 	/* Enable the insertion interruption, disable the ejection
@@ -870,8 +900,8 @@ static void nau8824_jdet_work(struct work_struct *work)
 	struct regmap *regmap = nau8824->regmap;
 	int adc_value, event = 0, event_mask = 0;
 
-	snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
-	snd_soc_dapm_force_enable_pin(dapm, "SAR");
+	nau8824_dapm_enable_pin(nau8824, "MICBIAS");
+	nau8824_dapm_enable_pin(nau8824, "SAR");
 	snd_soc_dapm_sync(dapm);
 
 	msleep(100);
@@ -882,8 +912,8 @@ static void nau8824_jdet_work(struct work_struct *work)
 	if (adc_value < HEADSET_SARADC_THD) {
 		event |= SND_JACK_HEADPHONE;
 
-		snd_soc_dapm_disable_pin(dapm, "SAR");
-		snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+		nau8824_dapm_disable_pin(nau8824, "SAR");
+		nau8824_dapm_disable_pin(nau8824, "MICBIAS");
 		snd_soc_dapm_sync(dapm);
 	} else {
 		event |= SND_JACK_HEADSET;
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index bd51f36..06abcd0 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -58,13 +58,15 @@ static DEFINE_MUTEX(spi_mutex);
  * RT5677_SPI_READ/WRITE_32:	Transfer 4 bytes
  * RT5677_SPI_READ/WRITE_BURST:	Transfer any multiples of 8 bytes
  *
- * For example, reading 260 bytes at 0x60030002 uses the following commands:
- * 0x60030002 RT5677_SPI_READ_16	2 bytes
+ * Note:
+ * 16 Bit writes and reads are restricted to the address range
+ * 0x18020000 ~ 0x18021000
+ *
+ * For example, reading 256 bytes at 0x60030004 uses the following commands:
  * 0x60030004 RT5677_SPI_READ_32	4 bytes
  * 0x60030008 RT5677_SPI_READ_BURST	240 bytes
  * 0x600300F8 RT5677_SPI_READ_BURST	8 bytes
  * 0x60030100 RT5677_SPI_READ_32	4 bytes
- * 0x60030104 RT5677_SPI_READ_16	2 bytes
  *
  * Input:
  * @read: true for read commands; false for write commands
@@ -79,15 +81,13 @@ static u8 rt5677_spi_select_cmd(bool read, u32 align, u32 remain, u32 *len)
 {
 	u8 cmd;
 
-	if (align == 2 || align == 6 || remain == 2) {
-		cmd = RT5677_SPI_READ_16;
-		*len = 2;
-	} else if (align == 4 || remain <= 6) {
+	if (align == 4 || remain <= 4) {
 		cmd = RT5677_SPI_READ_32;
 		*len = 4;
 	} else {
 		cmd = RT5677_SPI_READ_BURST;
-		*len = min_t(u32, remain & ~7, RT5677_SPI_BURST_LEN);
+		*len = (((remain - 1) >> 3) + 1) << 3;
+		*len = min_t(u32, *len, RT5677_SPI_BURST_LEN);
 	}
 	return read ? cmd : cmd + 1;
 }
@@ -108,7 +108,7 @@ static void rt5677_spi_reverse(u8 *dst, u32 dstlen, const u8 *src, u32 srclen)
 	}
 }
 
-/* Read DSP address space using SPI. addr and len have to be 2-byte aligned. */
+/* Read DSP address space using SPI. addr and len have to be 4-byte aligned. */
 int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
 {
 	u32 offset;
@@ -124,7 +124,7 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
 	if (!g_spi)
 		return -ENODEV;
 
-	if ((addr & 1) || (len & 1)) {
+	if ((addr & 3) || (len & 3)) {
 		dev_err(&g_spi->dev, "Bad read align 0x%x(%zu)\n", addr, len);
 		return -EACCES;
 	}
@@ -159,13 +159,13 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
 }
 EXPORT_SYMBOL_GPL(rt5677_spi_read);
 
-/* Write DSP address space using SPI. addr has to be 2-byte aligned.
- * If len is not 2-byte aligned, an extra byte of zero is written at the end
+/* Write DSP address space using SPI. addr has to be 4-byte aligned.
+ * If len is not 4-byte aligned, then extra zeros are written at the end
  * as padding.
  */
 int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
 {
-	u32 offset, len_with_pad = len;
+	u32 offset;
 	int status = 0;
 	struct spi_transfer t;
 	struct spi_message m;
@@ -178,22 +178,19 @@ int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
 	if (!g_spi)
 		return -ENODEV;
 
-	if (addr & 1) {
+	if (addr & 3) {
 		dev_err(&g_spi->dev, "Bad write align 0x%x(%zu)\n", addr, len);
 		return -EACCES;
 	}
 
-	if (len & 1)
-		len_with_pad = len + 1;
-
 	memset(&t, 0, sizeof(t));
 	t.tx_buf = buf;
 	t.speed_hz = RT5677_SPI_FREQ;
 	spi_message_init_with_transfers(&m, &t, 1);
 
-	for (offset = 0; offset < len_with_pad;) {
+	for (offset = 0; offset < len;) {
 		spi_cmd = rt5677_spi_select_cmd(false, (addr + offset) & 7,
-				len_with_pad - offset, &t.len);
+				len - offset, &t.len);
 
 		/* Construct SPI message header */
 		buf[0] = spi_cmd;
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 79ebcc2..6f5dac0 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -1196,7 +1196,7 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
 	struct snd_soc_component *component =
 		snd_soc_dapm_to_component(w->dapm);
 	struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
-	int ref, val, reg, sft, mask, idx = -EINVAL;
+	int ref, val, reg, idx = -EINVAL;
 	static const int div_f[] = {1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48};
 	static const int div_o[] = {1, 2, 4, 6, 8, 12, 16, 24, 32, 48};
 
@@ -1210,15 +1210,10 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
 
 	idx = rt5682_div_sel(rt5682, ref, div_f, ARRAY_SIZE(div_f));
 
-	if (w->shift == RT5682_PWR_ADC_S1F_BIT) {
+	if (w->shift == RT5682_PWR_ADC_S1F_BIT)
 		reg = RT5682_PLL_TRACK_3;
-		sft = RT5682_ADC_OSR_SFT;
-		mask = RT5682_ADC_OSR_MASK;
-	} else {
+	else
 		reg = RT5682_PLL_TRACK_2;
-		sft = RT5682_DAC_OSR_SFT;
-		mask = RT5682_DAC_OSR_MASK;
-	}
 
 	snd_soc_component_update_bits(component, reg,
 		RT5682_FILTER_CLK_DIV_MASK, idx << RT5682_FILTER_CLK_DIV_SFT);
@@ -1230,7 +1225,8 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
 	}
 
 	snd_soc_component_update_bits(component, RT5682_ADDA_CLK_1,
-		mask, idx << sft);
+		RT5682_ADC_OSR_MASK | RT5682_DAC_OSR_MASK,
+		(idx << RT5682_ADC_OSR_SFT) | (idx << RT5682_DAC_OSR_SFT));
 
 	return 0;
 }
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index f03195d..45d9f4a 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -462,6 +462,8 @@ static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
 	SND_SOC_DAPM_INPUT("IN2_R"),
 	SND_SOC_DAPM_INPUT("IN3_L"),
 	SND_SOC_DAPM_INPUT("IN3_R"),
+	SND_SOC_DAPM_INPUT("CM_L"),
+	SND_SOC_DAPM_INPUT("CM_R"),
 };
 
 static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 6a271e6..6c0a3da 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1599,7 +1599,6 @@ static int aic3x_probe(struct snd_soc_component *component)
 	struct aic3x_priv *aic3x = snd_soc_component_get_drvdata(component);
 	int ret, i;
 
-	INIT_LIST_HEAD(&aic3x->list);
 	aic3x->component = component;
 
 	for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++) {
@@ -1682,7 +1681,6 @@ static void aic3x_remove(struct snd_soc_component *component)
 	struct aic3x_priv *aic3x = snd_soc_component_get_drvdata(component);
 	int i;
 
-	list_del(&aic3x->list);
 	for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++)
 		regulator_unregister_notifier(aic3x->supplies[i].consumer,
 					      &aic3x->disable_nb[i].nb);
@@ -1880,6 +1878,7 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
 	if (ret != 0)
 		goto err_gpio;
 
+	INIT_LIST_HEAD(&aic3x->list);
 	list_add(&aic3x->list, &reset_list);
 
 	return 0;
@@ -1896,6 +1895,8 @@ static int aic3x_i2c_remove(struct i2c_client *client)
 {
 	struct aic3x_priv *aic3x = i2c_get_clientdata(client);
 
+	list_del(&aic3x->list);
+
 	if (gpio_is_valid(aic3x->gpio_reset) &&
 	    !aic3x_is_shared_reset(aic3x)) {
 		gpio_set_value(aic3x->gpio_reset, 0);
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 4d3ec29..ee85056 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -3441,8 +3441,6 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
 			}
 		}
 
-		wm_adsp_buffer_clear(compr->buf);
-
 		/* Trigger the IRQ at one fragment of data */
 		ret = wm_adsp_buffer_write(compr->buf,
 					   HOST_BUFFER_FIELD(high_water_mark),
@@ -3454,6 +3452,8 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
 		}
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
+		if (wm_adsp_compr_attached(compr))
+			wm_adsp_buffer_clear(compr->buf);
 		break;
 	default:
 		ret = -EINVAL;
@@ -3819,11 +3819,13 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
 	struct regmap *regmap = dsp->regmap;
 	int ret = 0;
 
+	mutex_lock(&dsp->pwr_lock);
+
 	ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val);
 	if (ret) {
 		adsp_err(dsp,
 			"Failed to read Region Lock Ctrl register: %d\n", ret);
-		return IRQ_HANDLED;
+		goto error;
 	}
 
 	if (val & ADSP2_WDT_TIMEOUT_STS_MASK) {
@@ -3842,7 +3844,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
 			adsp_err(dsp,
 				 "Failed to read Bus Err Addr register: %d\n",
 				 ret);
-			return IRQ_HANDLED;
+			goto error;
 		}
 
 		adsp_err(dsp, "bus error address = 0x%x\n",
@@ -3855,7 +3857,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
 			adsp_err(dsp,
 				 "Failed to read Pmem Xmem Err Addr register: %d\n",
 				 ret);
-			return IRQ_HANDLED;
+			goto error;
 		}
 
 		adsp_err(dsp, "xmem error address = 0x%x\n",
@@ -3868,6 +3870,9 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
 	regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL,
 			   ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT);
 
+error:
+	mutex_unlock(&dsp->pwr_lock);
+
 	return IRQ_HANDLED;
 }
 EXPORT_SYMBOL_GPL(wm_adsp2_bus_error);
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index f70db84..160b276 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -43,6 +43,7 @@
 
 #define MCASP_MAX_AFIFO_DEPTH	64
 
+#ifdef CONFIG_PM
 static u32 context_regs[] = {
 	DAVINCI_MCASP_TXFMCTL_REG,
 	DAVINCI_MCASP_RXFMCTL_REG,
@@ -65,6 +66,7 @@ struct davinci_mcasp_context {
 	u32	*xrsr_regs; /* for serializer configuration */
 	bool	pm_state;
 };
+#endif
 
 struct davinci_mcasp_ruledata {
 	struct davinci_mcasp *mcasp;
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 2e75b5b..f721cd4 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -173,16 +173,17 @@
 
 endif # SND_POWERPC_SOC
 
+config SND_SOC_IMX_PCM_FIQ
+	tristate
+	default y if SND_SOC_IMX_SSI=y && (SND_SOC_FSL_SSI=m || SND_SOC_FSL_SPDIF=m) && (MXC_TZIC || MXC_AVIC)
+	select FIQ
+
 if SND_IMX_SOC
 
 config SND_SOC_IMX_SSI
 	tristate
 	select SND_SOC_FSL_UTILS
 
-config SND_SOC_IMX_PCM_FIQ
-	tristate
-	select FIQ
-
 comment "SoC Audio support for Freescale i.MX boards:"
 
 config SND_MXC_SOC_WM1133_EV1
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index 191426a..30a3d68 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -118,13 +118,13 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
 		if (ret) {
 			dev_err(&pdev->dev,
 				"fsl,mux-int-port node missing or invalid.\n");
-			return ret;
+			goto err;
 		}
 		ret = of_property_read_u32(np, "fsl,mux-ext-port", &ext_port);
 		if (ret) {
 			dev_err(&pdev->dev,
 				"fsl,mux-ext-port node missing or invalid.\n");
-			return ret;
+			goto err;
 		}
 
 		/*
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 38fd32a..ff96db9 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -251,7 +251,7 @@ static int fsl_esai_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
 		break;
 	case ESAI_HCKT_EXTAL:
 		ecr |= ESAI_ECR_ETI;
-		/* fall through */
+		break;
 	case ESAI_HCKR_EXTAL:
 		ecr |= ESAI_ECR_ERI;
 		break;
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 4163f2c..bfc5b21 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -268,12 +268,14 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
 	case SND_SOC_DAIFMT_CBS_CFS:
 		val_cr2 |= FSL_SAI_CR2_BCD_MSTR;
 		val_cr4 |= FSL_SAI_CR4_FSD_MSTR;
+		sai->is_slave_mode = false;
 		break;
 	case SND_SOC_DAIFMT_CBM_CFM:
 		sai->is_slave_mode = true;
 		break;
 	case SND_SOC_DAIFMT_CBS_CFM:
 		val_cr2 |= FSL_SAI_CR2_BCD_MSTR;
+		sai->is_slave_mode = false;
 		break;
 	case SND_SOC_DAIFMT_CBM_CFS:
 		val_cr4 |= FSL_SAI_CR4_FSD_MSTR;
diff --git a/sound/soc/fsl/fsl_utils.c b/sound/soc/fsl/fsl_utils.c
index 7f0fa4b..cca33ab 100644
--- a/sound/soc/fsl/fsl_utils.c
+++ b/sound/soc/fsl/fsl_utils.c
@@ -71,6 +71,7 @@ int fsl_asoc_get_dma_channel(struct device_node *ssi_np,
 	iprop = of_get_property(dma_np, "cell-index", NULL);
 	if (!iprop) {
 		of_node_put(dma_np);
+		of_node_put(dma_channel_np);
 		return -EINVAL;
 	}
 	*dma_id = be32_to_cpup(iprop);
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index f8a68bd..c6c8d20 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -267,7 +267,7 @@ static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
 static const struct snd_soc_dapm_route byt_rt5651_intmic_dmic_map[] = {
 	{"DMIC L1", NULL, "Internal Mic"},
 	{"DMIC R1", NULL, "Internal Mic"},
-	{"IN3P", NULL, "Headset Mic"},
+	{"IN2P", NULL, "Headset Mic"},
 };
 
 static const struct snd_soc_dapm_route byt_rt5651_intmic_in1_map[] = {
@@ -787,7 +787,7 @@ static struct snd_soc_card byt_rt5651_card = {
 };
 
 static const struct x86_cpu_id baytrail_cpu_ids[] = {
-	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, /* Valleyview */
+	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, /* Valleyview */
 	{}
 };
 
diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
index 38f6ab7..07491a0 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
@@ -188,7 +188,7 @@ static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
 
 	jack = &ctx->kabylake_headset;
 
-	snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+	snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
 	snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
 	snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
 	snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index a892b37..b8a03f5 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -406,7 +406,7 @@ static const struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
 };
 
 static const unsigned int dmic_2ch[] = {
-	4,
+	2,
 };
 
 static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = {
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index 11041aed..6cfcc10 100644
--- a/sound/soc/intel/common/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -1251,11 +1251,15 @@ struct sst_dsp *sst_dsp_new(struct device *dev,
 		goto irq_err;
 
 	err = sst_dma_new(sst);
-	if (err)
-		dev_warn(dev, "sst_dma_new failed %d\n", err);
+	if (err)  {
+		dev_err(dev, "sst_dma_new failed %d\n", err);
+		goto dma_err;
+	}
 
 	return sst;
 
+dma_err:
+	free_irq(sst->irq, sst);
 irq_err:
 	if (sst->ops->free)
 		sst->ops->free(sst);
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 823e391..6b2c8c6 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -180,6 +180,7 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
 	struct hdac_stream *hstream;
 	struct hdac_ext_stream *stream;
 	struct hdac_ext_link *link;
+	unsigned char stream_tag;
 
 	hstream = snd_hdac_get_stream(bus, params->stream,
 					params->link_dma_id + 1);
@@ -198,10 +199,13 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
 
 	snd_hdac_ext_link_stream_setup(stream, format_val);
 
-	list_for_each_entry(link, &bus->hlink_list, list) {
-		if (link->index == params->link_index)
-			snd_hdac_ext_link_set_stream_id(link,
-					hstream->stream_tag);
+	stream_tag = hstream->stream_tag;
+	if (stream->hstream.direction == SNDRV_PCM_STREAM_PLAYBACK) {
+		list_for_each_entry(link, &bus->hlink_list, list) {
+			if (link->index == params->link_index)
+				snd_hdac_ext_link_set_stream_id(link,
+								stream_tag);
+		}
 	}
 
 	stream->link_prepared = 1;
@@ -640,6 +644,7 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
 	struct hdac_ext_stream *link_dev =
 				snd_soc_dai_get_dma_data(dai, substream);
 	struct hdac_ext_link *link;
+	unsigned char stream_tag;
 
 	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
 
@@ -649,7 +654,11 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
 	if (!link)
 		return -EINVAL;
 
-	snd_hdac_ext_link_clear_stream_id(link, hdac_stream(link_dev)->stream_tag);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		stream_tag = hdac_stream(link_dev)->stream_tag;
+		snd_hdac_ext_link_clear_stream_id(link, stream_tag);
+	}
+
 	snd_hdac_ext_stream_release(link_dev, HDAC_EXT_STREAM_TYPE_LINK);
 	return 0;
 }
diff --git a/sound/soc/rockchip/rockchip_pcm.c b/sound/soc/rockchip/rockchip_pcm.c
index 7029e0b..4ac78d7 100644
--- a/sound/soc/rockchip/rockchip_pcm.c
+++ b/sound/soc/rockchip/rockchip_pcm.c
@@ -21,7 +21,8 @@ static const struct snd_pcm_hardware snd_rockchip_hardware = {
 	.info			= SNDRV_PCM_INFO_MMAP |
 				  SNDRV_PCM_INFO_MMAP_VALID |
 				  SNDRV_PCM_INFO_PAUSE |
-				  SNDRV_PCM_INFO_RESUME,
+				  SNDRV_PCM_INFO_RESUME |
+				  SNDRV_PCM_INFO_INTERLEAVED,
 	.period_bytes_min	= 32,
 	.period_bytes_max	= 8192,
 	.periods_min		= 1,
diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
index 400e29e..8a2e3bb 100644
--- a/sound/soc/rockchip/rockchip_pdm.c
+++ b/sound/soc/rockchip/rockchip_pdm.c
@@ -208,7 +208,9 @@ static int rockchip_pdm_set_fmt(struct snd_soc_dai *cpu_dai,
 		return -EINVAL;
 	}
 
+	pm_runtime_get_sync(cpu_dai->dev);
 	regmap_update_bits(pdm->regmap, PDM_CLK_CTRL, mask, val);
+	pm_runtime_put(cpu_dai->dev);
 
 	return 0;
 }
diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c
index e7b371b..45c6d73 100644
--- a/sound/soc/samsung/odroid.c
+++ b/sound/soc/samsung/odroid.c
@@ -64,11 +64,11 @@ static int odroid_card_hw_params(struct snd_pcm_substream *substream,
 		return ret;
 
 	/*
-	 *  We add 1 to the rclk_freq value in order to avoid too low clock
+	 *  We add 2 to the rclk_freq value in order to avoid too low clock
 	 *  frequency values due to the EPLL output frequency not being exact
 	 *  multiple of the audio sampling rate.
 	 */
-	rclk_freq = params_rate(params) * rfs + 1;
+	rclk_freq = params_rate(params) * rfs + 2;
 
 	ret = clk_set_rate(priv->sclk_i2s, rclk_freq);
 	if (ret < 0)
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 721716f..cb0b106 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3857,6 +3857,10 @@ snd_soc_dapm_free_kcontrol(struct snd_soc_card *card,
 	int count;
 
 	devm_kfree(card->dev, (void *)*private_value);
+
+	if (!w_param_text)
+		return;
+
 	for (count = 0 ; count < num_params; count++)
 		devm_kfree(card->dev, (void *)w_param_text[count]);
 	devm_kfree(card->dev, w_param_text);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 11a12485..dc0c00a 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1018,10 +1018,13 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
 		codec_params = *params;
 
 		/* fixup params based on TDM slot masks */
-		if (codec_dai->tx_mask)
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+		    codec_dai->tx_mask)
 			soc_pcm_codec_params_fixup(&codec_params,
 						   codec_dai->tx_mask);
-		if (codec_dai->rx_mask)
+
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
+		    codec_dai->rx_mask)
 			soc_pcm_codec_params_fixup(&codec_params,
 						   codec_dai->rx_mask);
 
diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c
index 706ff00..24948b95 100644
--- a/sound/soc/stm/stm32_adfsdm.c
+++ b/sound/soc/stm/stm32_adfsdm.c
@@ -9,6 +9,7 @@
 
 #include <linux/clk.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
@@ -37,6 +38,8 @@ struct stm32_adfsdm_priv {
 	/* PCM buffer */
 	unsigned char *pcm_buff;
 	unsigned int pos;
+
+	struct mutex lock; /* protect against race condition on iio state */
 };
 
 static const struct snd_pcm_hardware stm32_adfsdm_pcm_hw = {
@@ -62,10 +65,12 @@ static void stm32_adfsdm_shutdown(struct snd_pcm_substream *substream,
 {
 	struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
 
+	mutex_lock(&priv->lock);
 	if (priv->iio_active) {
 		iio_channel_stop_all_cb(priv->iio_cb);
 		priv->iio_active = false;
 	}
+	mutex_unlock(&priv->lock);
 }
 
 static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
@@ -74,13 +79,19 @@ static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
 	struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
 	int ret;
 
+	mutex_lock(&priv->lock);
+	if (priv->iio_active) {
+		iio_channel_stop_all_cb(priv->iio_cb);
+		priv->iio_active = false;
+	}
+
 	ret = iio_write_channel_attribute(priv->iio_ch,
 					  substream->runtime->rate, 0,
 					  IIO_CHAN_INFO_SAMP_FREQ);
 	if (ret < 0) {
 		dev_err(dai->dev, "%s: Failed to set %d sampling rate\n",
 			__func__, substream->runtime->rate);
-		return ret;
+		goto out;
 	}
 
 	if (!priv->iio_active) {
@@ -92,6 +103,9 @@ static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
 				__func__, ret);
 	}
 
+out:
+	mutex_unlock(&priv->lock);
+
 	return ret;
 }
 
@@ -290,6 +304,7 @@ MODULE_DEVICE_TABLE(of, stm32_adfsdm_of_match);
 static int stm32_adfsdm_probe(struct platform_device *pdev)
 {
 	struct stm32_adfsdm_priv *priv;
+	struct snd_soc_component *component;
 	int ret;
 
 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -298,6 +313,7 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
 
 	priv->dev = &pdev->dev;
 	priv->dai_drv = stm32_adfsdm_dai;
+	mutex_init(&priv->lock);
 
 	dev_set_drvdata(&pdev->dev, priv);
 
@@ -316,9 +332,15 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
 	if (IS_ERR(priv->iio_cb))
 		return PTR_ERR(priv->iio_cb);
 
-	ret = devm_snd_soc_register_component(&pdev->dev,
-					      &stm32_adfsdm_soc_platform,
-					      NULL, 0);
+	component = devm_kzalloc(&pdev->dev, sizeof(*component), GFP_KERNEL);
+	if (!component)
+		return -ENOMEM;
+#ifdef CONFIG_DEBUG_FS
+	component->debugfs_prefix = "pcm";
+#endif
+
+	ret = snd_soc_add_component(&pdev->dev, component,
+				    &stm32_adfsdm_soc_platform, NULL, 0);
 	if (ret < 0)
 		dev_err(&pdev->dev, "%s: Failed to register PCM platform\n",
 			__func__);
@@ -326,12 +348,20 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
 	return ret;
 }
 
+static int stm32_adfsdm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_component(&pdev->dev);
+
+	return 0;
+}
+
 static struct platform_driver stm32_adfsdm_driver = {
 	.driver = {
 		   .name = STM32_ADFSDM_DRV_NAME,
 		   .of_match_table = stm32_adfsdm_of_match,
 		   },
 	.probe = stm32_adfsdm_probe,
+	.remove = stm32_adfsdm_remove,
 };
 
 module_platform_driver(stm32_adfsdm_driver);
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index 06fba96..2fb2b914 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -96,7 +96,7 @@
  * @slot_mask: rx or tx active slots mask. set at init or at runtime
  * @data_size: PCM data width. corresponds to PCM substream width.
  * @spdif_frm_cnt: S/PDIF playback frame counter
- * @snd_aes_iec958: iec958 data
+ * @iec958: iec958 data
  * @ctrl_lock: control lock
  */
 struct stm32_sai_sub_data {
@@ -498,6 +498,14 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream,
 
 	sai->substream = substream;
 
+	if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
+		snd_pcm_hw_constraint_mask64(substream->runtime,
+					     SNDRV_PCM_HW_PARAM_FORMAT,
+					     SNDRV_PCM_FMTBIT_S32_LE);
+		snd_pcm_hw_constraint_single(substream->runtime,
+					     SNDRV_PCM_HW_PARAM_CHANNELS, 2);
+	}
+
 	ret = clk_prepare_enable(sai->sai_ck);
 	if (ret < 0) {
 		dev_err(cpu_dai->dev, "Failed to enable clock: %d\n", ret);
@@ -888,11 +896,12 @@ static int stm32_sai_pcm_new(struct snd_soc_pcm_runtime *rtd,
 			     struct snd_soc_dai *cpu_dai)
 {
 	struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
+	struct snd_kcontrol_new knew = iec958_ctls;
 
 	if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
 		dev_dbg(&sai->pdev->dev, "%s: register iec controls", __func__);
-		return snd_ctl_add(rtd->pcm->card,
-				   snd_ctl_new1(&iec958_ctls, sai));
+		knew.device = rtd->pcm->device;
+		return snd_ctl_add(rtd->pcm->card, snd_ctl_new1(&knew, sai));
 	}
 
 	return 0;
@@ -1194,7 +1203,6 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
 	if (!sai->cpu_dai_drv)
 		return -ENOMEM;
 
-	sai->cpu_dai_drv->name = dev_name(&pdev->dev);
 	if (STM_SAI_IS_PLAYBACK(sai)) {
 		memcpy(sai->cpu_dai_drv, &stm32_sai_playback_dai,
 		       sizeof(stm32_sai_playback_dai));
@@ -1204,6 +1212,7 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
 		       sizeof(stm32_sai_capture_dai));
 		sai->cpu_dai_drv->capture.stream_name = sai->cpu_dai_drv->name;
 	}
+	sai->cpu_dai_drv->name = dev_name(&pdev->dev);
 
 	return 0;
 }
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index c1376bf..aa28510 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -351,12 +351,16 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
 {
 	struct usb_device *usbdev = line6->usbdev;
 	int ret;
-	unsigned char len;
+	unsigned char *len;
 	unsigned count;
 
 	if (address > 0xffff || datalen > 0xff)
 		return -EINVAL;
 
+	len = kmalloc(sizeof(*len), GFP_KERNEL);
+	if (!len)
+		return -ENOMEM;
+
 	/* query the serial number: */
 	ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
 			      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@@ -365,7 +369,7 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
 
 	if (ret < 0) {
 		dev_err(line6->ifcdev, "read request failed (error %d)\n", ret);
-		return ret;
+		goto exit;
 	}
 
 	/* Wait for data length. We'll get 0xff until length arrives. */
@@ -375,28 +379,29 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
 		ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
 				      USB_TYPE_VENDOR | USB_RECIP_DEVICE |
 				      USB_DIR_IN,
-				      0x0012, 0x0000, &len, 1,
+				      0x0012, 0x0000, len, 1,
 				      LINE6_TIMEOUT * HZ);
 		if (ret < 0) {
 			dev_err(line6->ifcdev,
 				"receive length failed (error %d)\n", ret);
-			return ret;
+			goto exit;
 		}
 
-		if (len != 0xff)
+		if (*len != 0xff)
 			break;
 	}
 
-	if (len == 0xff) {
+	ret = -EIO;
+	if (*len == 0xff) {
 		dev_err(line6->ifcdev, "read failed after %d retries\n",
 			count);
-		return -EIO;
-	} else if (len != datalen) {
+		goto exit;
+	} else if (*len != datalen) {
 		/* should be equal or something went wrong */
 		dev_err(line6->ifcdev,
 			"length mismatch (expected %d, got %d)\n",
-			(int)datalen, (int)len);
-		return -EIO;
+			(int)datalen, (int)*len);
+		goto exit;
 	}
 
 	/* receive the result: */
@@ -405,12 +410,12 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
 			      0x0013, 0x0000, data, datalen,
 			      LINE6_TIMEOUT * HZ);
 
-	if (ret < 0) {
+	if (ret < 0)
 		dev_err(line6->ifcdev, "read failed (error %d)\n", ret);
-		return ret;
-	}
 
-	return 0;
+exit:
+	kfree(len);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(line6_read_data);
 
@@ -422,12 +427,16 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
 {
 	struct usb_device *usbdev = line6->usbdev;
 	int ret;
-	unsigned char status;
+	unsigned char *status;
 	int count;
 
 	if (address > 0xffff || datalen > 0xffff)
 		return -EINVAL;
 
+	status = kmalloc(sizeof(*status), GFP_KERNEL);
+	if (!status)
+		return -ENOMEM;
+
 	ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
 			      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
 			      0x0022, address, data, datalen,
@@ -436,7 +445,7 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
 	if (ret < 0) {
 		dev_err(line6->ifcdev,
 			"write request failed (error %d)\n", ret);
-		return ret;
+		goto exit;
 	}
 
 	for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) {
@@ -447,28 +456,29 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
 				      USB_TYPE_VENDOR | USB_RECIP_DEVICE |
 				      USB_DIR_IN,
 				      0x0012, 0x0000,
-				      &status, 1, LINE6_TIMEOUT * HZ);
+				      status, 1, LINE6_TIMEOUT * HZ);
 
 		if (ret < 0) {
 			dev_err(line6->ifcdev,
 				"receiving status failed (error %d)\n", ret);
-			return ret;
+			goto exit;
 		}
 
-		if (status != 0xff)
+		if (*status != 0xff)
 			break;
 	}
 
-	if (status == 0xff) {
+	if (*status == 0xff) {
 		dev_err(line6->ifcdev, "write failed after %d retries\n",
 			count);
-		return -EIO;
-	} else if (status != 0) {
+		ret = -EIO;
+	} else if (*status != 0) {
 		dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
-		return -EIO;
+		ret = -EIO;
 	}
-
-	return 0;
+exit:
+	kfree(status);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(line6_write_data);
 
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 36ed9c8..5f3c872 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -225,28 +225,32 @@ static void podhd_startup_start_workqueue(struct timer_list *t)
 static int podhd_dev_start(struct usb_line6_podhd *pod)
 {
 	int ret;
-	u8 init_bytes[8];
+	u8 *init_bytes;
 	int i;
 	struct usb_device *usbdev = pod->line6.usbdev;
 
+	init_bytes = kmalloc(8, GFP_KERNEL);
+	if (!init_bytes)
+		return -ENOMEM;
+
 	ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
 					0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
 					0x11, 0,
 					NULL, 0, LINE6_TIMEOUT * HZ);
 	if (ret < 0) {
 		dev_err(pod->line6.ifcdev, "read request failed (error %d)\n", ret);
-		return ret;
+		goto exit;
 	}
 
 	/* NOTE: looks like some kind of ping message */
 	ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
 					USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
 					0x11, 0x0,
-					&init_bytes, 3, LINE6_TIMEOUT * HZ);
+					init_bytes, 3, LINE6_TIMEOUT * HZ);
 	if (ret < 0) {
 		dev_err(pod->line6.ifcdev,
 			"receive length failed (error %d)\n", ret);
-		return ret;
+		goto exit;
 	}
 
 	pod->firmware_version =
@@ -255,7 +259,7 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
 	for (i = 0; i <= 16; i++) {
 		ret = line6_read_data(&pod->line6, 0xf000 + 0x08 * i, init_bytes, 8);
 		if (ret < 0)
-			return ret;
+			goto exit;
 	}
 
 	ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
@@ -263,10 +267,9 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
 					USB_TYPE_STANDARD | USB_RECIP_DEVICE | USB_DIR_OUT,
 					1, 0,
 					NULL, 0, LINE6_TIMEOUT * HZ);
-	if (ret < 0)
-		return ret;
-
-	return 0;
+exit:
+	kfree(init_bytes);
+	return ret;
 }
 
 static void podhd_startup_workqueue(struct work_struct *work)
diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c
index f47ba94..325b07b 100644
--- a/sound/usb/line6/toneport.c
+++ b/sound/usb/line6/toneport.c
@@ -54,8 +54,8 @@ struct usb_line6_toneport {
 	/* Firmware version (x 100) */
 	u8 firmware_version;
 
-	/* Timer for delayed PCM startup */
-	struct timer_list timer;
+	/* Work for delayed PCM startup */
+	struct delayed_work pcm_work;
 
 	/* Device type */
 	enum line6_device_type type;
@@ -241,9 +241,10 @@ static int snd_toneport_source_put(struct snd_kcontrol *kcontrol,
 	return 1;
 }
 
-static void toneport_start_pcm(struct timer_list *t)
+static void toneport_start_pcm(struct work_struct *work)
 {
-	struct usb_line6_toneport *toneport = from_timer(toneport, t, timer);
+	struct usb_line6_toneport *toneport =
+		container_of(work, struct usb_line6_toneport, pcm_work.work);
 	struct usb_line6 *line6 = &toneport->line6;
 
 	line6_pcm_acquire(line6->line6pcm, LINE6_STREAM_MONITOR, true);
@@ -365,16 +366,21 @@ static bool toneport_has_source_select(struct usb_line6_toneport *toneport)
 /*
 	Setup Toneport device.
 */
-static void toneport_setup(struct usb_line6_toneport *toneport)
+static int toneport_setup(struct usb_line6_toneport *toneport)
 {
-	u32 ticks;
+	u32 *ticks;
 	struct usb_line6 *line6 = &toneport->line6;
 	struct usb_device *usbdev = line6->usbdev;
 
+	ticks = kmalloc(sizeof(*ticks), GFP_KERNEL);
+	if (!ticks)
+		return -ENOMEM;
+
 	/* sync time on device with host: */
 	/* note: 32-bit timestamps overflow in year 2106 */
-	ticks = (u32)ktime_get_real_seconds();
-	line6_write_data(line6, 0x80c6, &ticks, 4);
+	*ticks = (u32)ktime_get_real_seconds();
+	line6_write_data(line6, 0x80c6, ticks, 4);
+	kfree(ticks);
 
 	/* enable device: */
 	toneport_send_cmd(usbdev, 0x0301, 0x0000);
@@ -388,7 +394,9 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
 	if (toneport_has_led(toneport))
 		toneport_update_led(toneport);
 
-	mod_timer(&toneport->timer, jiffies + TONEPORT_PCM_DELAY * HZ);
+	schedule_delayed_work(&toneport->pcm_work,
+			      msecs_to_jiffies(TONEPORT_PCM_DELAY * 1000));
+	return 0;
 }
 
 /*
@@ -399,7 +407,7 @@ static void line6_toneport_disconnect(struct usb_line6 *line6)
 	struct usb_line6_toneport *toneport =
 		(struct usb_line6_toneport *)line6;
 
-	del_timer_sync(&toneport->timer);
+	cancel_delayed_work_sync(&toneport->pcm_work);
 
 	if (toneport_has_led(toneport))
 		toneport_remove_leds(toneport);
@@ -416,7 +424,7 @@ static int toneport_init(struct usb_line6 *line6,
 	struct usb_line6_toneport *toneport =  (struct usb_line6_toneport *) line6;
 
 	toneport->type = id->driver_info;
-	timer_setup(&toneport->timer, toneport_start_pcm, 0);
+	INIT_DELAYED_WORK(&toneport->pcm_work, toneport_start_pcm);
 
 	line6->disconnect = line6_toneport_disconnect;
 
@@ -451,7 +459,9 @@ static int toneport_init(struct usb_line6 *line6,
 			return err;
 	}
 
-	toneport_setup(toneport);
+	err = toneport_setup(toneport);
+	if (err)
+		return err;
 
 	/* register audio system: */
 	return snd_card_register(line6->card);
@@ -463,7 +473,11 @@ static int toneport_init(struct usb_line6 *line6,
 */
 static int toneport_reset_resume(struct usb_interface *interface)
 {
-	toneport_setup(usb_get_intfdata(interface));
+	int err;
+
+	err = toneport_setup(usb_get_intfdata(interface));
+	if (err)
+		return err;
 	return line6_resume(interface);
 }
 #endif
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 2ea0519..1056628 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -2681,6 +2681,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
 	kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval);
 	if (! kctl) {
 		usb_audio_err(state->chip, "cannot malloc kcontrol\n");
+		for (i = 0; i < desc->bNrInPins; i++)
+			kfree(namelist[i]);
 		kfree(namelist);
 		kfree(cval);
 		return -ENOMEM;
diff --git a/techpack/Kbuild b/techpack/Kbuild
index aef74e6..1d4274d 100644
--- a/techpack/Kbuild
+++ b/techpack/Kbuild
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 TECHPACK?=y
 
-techpack-dirs := $(shell find $(srctree)/$(src) -maxdepth 1 -mindepth 1 -type d -not -name ".*")
+techpack-dirs := $(shell find $(srctree)/$(src) -maxdepth 1 -mindepth 1 -xtype d -not -name ".*")
 obj-${TECHPACK} += stub/ $(addsuffix /,$(subst $(srctree)/$(src)/,,$(techpack-dirs)))
 
 techpack-header-dirs := $(shell find $(srctree)/techpack -maxdepth 1 -mindepth 1 -type d -not -name ".*")
diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
index 67167e4..8248b8d 100644
--- a/tools/bpf/bpftool/.gitignore
+++ b/tools/bpf/bpftool/.gitignore
@@ -1,5 +1,5 @@
 *.d
-bpftool
+/bpftool
 bpftool*.8
 bpf-helpers.*
 FEATURE-DUMP.bpftool
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index acc704b..0b0ef3a 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -3,8 +3,6 @@
 #define _TOOLS_LINUX_BITOPS_H_
 
 #include <asm/types.h>
-#include <linux/compiler.h>
-
 #ifndef __WORDSIZE
 #define __WORDSIZE (__SIZEOF_LONG__ * 8)
 #endif
@@ -12,10 +10,9 @@
 #ifndef BITS_PER_LONG
 # define BITS_PER_LONG __WORDSIZE
 #endif
+#include <linux/bits.h>
+#include <linux/compiler.h>
 
-#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
-#define BITS_PER_BYTE		8
 #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
 #define BITS_TO_U64(nr)		DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
 #define BITS_TO_U32(nr)		DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
new file mode 100644
index 0000000..2b7b532c1
--- /dev/null
+++ b/tools/include/linux/bits.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr)			(1UL << (nr))
+#define BIT_ULL(nr)		(1ULL << (nr))
+#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE		8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+	(((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+	(((~0ULL) - (1ULL << (l)) + 1) & \
+	 (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif	/* __LINUX_BITS_H */
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 7a00147..dd0b68d 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -53,6 +53,8 @@
 #  define __NR_bpf 349
 # elif defined(__s390__)
 #  define __NR_bpf 351
+# elif defined(__arc__)
+#  define __NR_bpf 280
 # else
 #  error __NR_bpf not defined. libbpf does not support your arch.
 # endif
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 6f38164..c3145ab 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -26,6 +26,7 @@
 #include <linux/bpf.h>
 #include <stdbool.h>
 #include <stddef.h>
+#include <stdint.h>
 
 struct bpf_create_map_attr {
 	const char *name;
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 10985d9..6ccfd13 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -2192,7 +2192,7 @@ eval_type_str(unsigned long long val, const char *type, int pointer)
 		return val & 0xffffffff;
 
 	if (strcmp(type, "u64") == 0 ||
-	    strcmp(type, "s64"))
+	    strcmp(type, "s64") == 0)
 		return val;
 
 	if (strcmp(type, "s8") == 0)
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 53f8be0..8815823 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -7,11 +7,12 @@
 endif
 
 # always use the host compiler
+HOSTAR	?= ar
 HOSTCC	?= gcc
 HOSTLD	?= ld
+AR	 = $(HOSTAR)
 CC	 = $(HOSTCC)
 LD	 = $(HOSTLD)
-AR	 = ar
 
 ifeq ($(srctree),)
 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 550f176..46be345 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -165,6 +165,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
 		"fortify_panic",
 		"usercopy_abort",
 		"machine_real_restart",
+		"rewind_stack_do_exit",
 	};
 
 	if (func->bind == STB_WEAK)
@@ -1804,7 +1805,8 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
 			return 1;
 		}
 
-		func = insn->func ? insn->func->pfunc : NULL;
+		if (insn->func)
+			func = insn->func->pfunc;
 
 		if (func && insn->ignore) {
 			WARN_FUNC("BUG: why am I validating an ignored function?",
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 4419551..fa56fde 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -38,6 +38,10 @@
 #include <numa.h>
 #include <numaif.h>
 
+#ifndef RUSAGE_THREAD
+# define RUSAGE_THREAD 1
+#endif
+
 /*
  * Regular printout to the terminal, supressed if -q is specified:
  */
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 466540e..c72cc73 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -14,6 +14,7 @@
 include/uapi/linux/stat.h
 include/uapi/linux/vhost.h
 include/uapi/sound/asound.h
+include/linux/bits.h
 include/linux/hash.h
 include/uapi/linux/hw_breakpoint.h
 arch/x86/include/asm/disabled-features.h
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 0bc3e6e..4357141 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -58,6 +58,7 @@ enum intel_pt_pkt_state {
 	INTEL_PT_STATE_NO_IP,
 	INTEL_PT_STATE_ERR_RESYNC,
 	INTEL_PT_STATE_IN_SYNC,
+	INTEL_PT_STATE_TNT_CONT,
 	INTEL_PT_STATE_TNT,
 	INTEL_PT_STATE_TIP,
 	INTEL_PT_STATE_TIP_PGD,
@@ -72,8 +73,9 @@ static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
 	case INTEL_PT_STATE_NO_IP:
 	case INTEL_PT_STATE_ERR_RESYNC:
 	case INTEL_PT_STATE_IN_SYNC:
-	case INTEL_PT_STATE_TNT:
+	case INTEL_PT_STATE_TNT_CONT:
 		return true;
+	case INTEL_PT_STATE_TNT:
 	case INTEL_PT_STATE_TIP:
 	case INTEL_PT_STATE_TIP_PGD:
 	case INTEL_PT_STATE_FUP:
@@ -888,16 +890,20 @@ static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder)
 	timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
 	masked_timestamp = timestamp & decoder->period_mask;
 	if (decoder->continuous_period) {
-		if (masked_timestamp != decoder->last_masked_timestamp)
+		if (masked_timestamp > decoder->last_masked_timestamp)
 			return 1;
 	} else {
 		timestamp += 1;
 		masked_timestamp = timestamp & decoder->period_mask;
-		if (masked_timestamp != decoder->last_masked_timestamp) {
+		if (masked_timestamp > decoder->last_masked_timestamp) {
 			decoder->last_masked_timestamp = masked_timestamp;
 			decoder->continuous_period = true;
 		}
 	}
+
+	if (masked_timestamp < decoder->last_masked_timestamp)
+		return decoder->period_ticks;
+
 	return decoder->period_ticks - (timestamp - masked_timestamp);
 }
 
@@ -926,7 +932,10 @@ static void intel_pt_sample_insn(struct intel_pt_decoder *decoder)
 	case INTEL_PT_PERIOD_TICKS:
 		timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
 		masked_timestamp = timestamp & decoder->period_mask;
-		decoder->last_masked_timestamp = masked_timestamp;
+		if (masked_timestamp > decoder->last_masked_timestamp)
+			decoder->last_masked_timestamp = masked_timestamp;
+		else
+			decoder->last_masked_timestamp += decoder->period_ticks;
 		break;
 	case INTEL_PT_PERIOD_NONE:
 	case INTEL_PT_PERIOD_MTC:
@@ -1249,7 +1258,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
 				return -ENOENT;
 			}
 			decoder->tnt.count -= 1;
-			if (!decoder->tnt.count)
+			if (decoder->tnt.count)
+				decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
+			else
 				decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
 			decoder->tnt.payload <<= 1;
 			decoder->state.from_ip = decoder->ip;
@@ -1280,7 +1291,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
 
 		if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
 			decoder->tnt.count -= 1;
-			if (!decoder->tnt.count)
+			if (decoder->tnt.count)
+				decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
+			else
 				decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
 			if (decoder->tnt.payload & BIT63) {
 				decoder->tnt.payload <<= 1;
@@ -1300,8 +1313,11 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
 				return 0;
 			}
 			decoder->ip += intel_pt_insn.length;
-			if (!decoder->tnt.count)
+			if (!decoder->tnt.count) {
+				decoder->sample_timestamp = decoder->timestamp;
+				decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
 				return -EAGAIN;
+			}
 			decoder->tnt.payload <<= 1;
 			continue;
 		}
@@ -2349,6 +2365,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
 			err = intel_pt_walk_trace(decoder);
 			break;
 		case INTEL_PT_STATE_TNT:
+		case INTEL_PT_STATE_TNT_CONT:
 			err = intel_pt_walk_tnt(decoder);
 			if (err == -EAGAIN)
 				err = intel_pt_walk_trace(decoder);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index b1508ce..076718a 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1358,6 +1358,20 @@ static void machine__set_kernel_mmap(struct machine *machine,
 		machine->vmlinux_map->end = ~0ULL;
 }
 
+static void machine__update_kernel_mmap(struct machine *machine,
+				     u64 start, u64 end)
+{
+	struct map *map = machine__kernel_map(machine);
+
+	map__get(map);
+	map_groups__remove(&machine->kmaps, map);
+
+	machine__set_kernel_mmap(machine, start, end);
+
+	map_groups__insert(&machine->kmaps, map);
+	map__put(map);
+}
+
 int machine__create_kernel_maps(struct machine *machine)
 {
 	struct dso *kernel = machine__get_kernel(machine);
@@ -1390,17 +1404,11 @@ int machine__create_kernel_maps(struct machine *machine)
 			goto out_put;
 		}
 
-		/* we have a real start address now, so re-order the kmaps */
-		map = machine__kernel_map(machine);
-
-		map__get(map);
-		map_groups__remove(&machine->kmaps, map);
-
-		/* assume it's the last in the kmaps */
-		machine__set_kernel_mmap(machine, addr, ~0ULL);
-
-		map_groups__insert(&machine->kmaps, map);
-		map__put(map);
+		/*
+		 * we have a real start address now, so re-order the kmaps
+		 * assume it's the last in the kmaps
+		 */
+		machine__update_kernel_mmap(machine, addr, ~0ULL);
 	}
 
 	if (machine__create_extra_kernel_maps(machine, kernel))
@@ -1536,7 +1544,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
 		if (strstr(kernel->long_name, "vmlinux"))
 			dso__set_short_name(kernel, "[kernel.vmlinux]", false);
 
-		machine__set_kernel_mmap(machine, event->mmap.start,
+		machine__update_kernel_mmap(machine, event->mmap.start,
 					 event->mmap.start + event->mmap.len);
 
 		/*
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index 2ab25aa..ff058bf 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -9,7 +9,7 @@
 endif
 
 turbostat : turbostat.c
-CFLAGS +=	-Wall
+CFLAGS +=	-Wall -I../../../include
 CFLAGS +=	-DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
 CFLAGS +=	-DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
 
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 83964f7..fbb53c9 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -2082,7 +2082,7 @@ int has_turbo_ratio_group_limits(int family, int model)
 	switch (model) {
 	case INTEL_FAM6_ATOM_GOLDMONT:
 	case INTEL_FAM6_SKYLAKE_X:
-	case INTEL_FAM6_ATOM_DENVERTON:
+	case INTEL_FAM6_ATOM_GOLDMONT_X:
 		return 1;
 	}
 	return 0;
@@ -3149,9 +3149,9 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
 		pkg_cstate_limits = skx_pkg_cstate_limits;
 		has_misc_feature_control = 1;
 		break;
-	case INTEL_FAM6_ATOM_SILVERMONT1:	/* BYT */
+	case INTEL_FAM6_ATOM_SILVERMONT:	/* BYT */
 		no_MSR_MISC_PWR_MGMT = 1;
-	case INTEL_FAM6_ATOM_SILVERMONT2:	/* AVN */
+	case INTEL_FAM6_ATOM_SILVERMONT_X:	/* AVN */
 		pkg_cstate_limits = slv_pkg_cstate_limits;
 		break;
 	case INTEL_FAM6_ATOM_AIRMONT:	/* AMT */
@@ -3163,8 +3163,8 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
 		pkg_cstate_limits = phi_pkg_cstate_limits;
 		break;
 	case INTEL_FAM6_ATOM_GOLDMONT:	/* BXT */
-	case INTEL_FAM6_ATOM_GEMINI_LAKE:
-	case INTEL_FAM6_ATOM_DENVERTON:	/* DNV */
+	case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+	case INTEL_FAM6_ATOM_GOLDMONT_X:	/* DNV */
 		pkg_cstate_limits = bxt_pkg_cstate_limits;
 		break;
 	default:
@@ -3193,9 +3193,9 @@ int has_slv_msrs(unsigned int family, unsigned int model)
 		return 0;
 
 	switch (model) {
-	case INTEL_FAM6_ATOM_SILVERMONT1:
-	case INTEL_FAM6_ATOM_MERRIFIELD:
-	case INTEL_FAM6_ATOM_MOOREFIELD:
+	case INTEL_FAM6_ATOM_SILVERMONT:
+	case INTEL_FAM6_ATOM_SILVERMONT_MID:
+	case INTEL_FAM6_ATOM_AIRMONT_MID:
 		return 1;
 	}
 	return 0;
@@ -3207,7 +3207,7 @@ int is_dnv(unsigned int family, unsigned int model)
 		return 0;
 
 	switch (model) {
-	case INTEL_FAM6_ATOM_DENVERTON:
+	case INTEL_FAM6_ATOM_GOLDMONT_X:
 		return 1;
 	}
 	return 0;
@@ -3724,8 +3724,8 @@ double get_tdp(unsigned int model)
 			return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
 
 	switch (model) {
-	case INTEL_FAM6_ATOM_SILVERMONT1:
-	case INTEL_FAM6_ATOM_SILVERMONT2:
+	case INTEL_FAM6_ATOM_SILVERMONT:
+	case INTEL_FAM6_ATOM_SILVERMONT_X:
 		return 30.0;
 	default:
 		return 135.0;
@@ -3791,7 +3791,7 @@ void rapl_probe(unsigned int family, unsigned int model)
 		}
 		break;
 	case INTEL_FAM6_ATOM_GOLDMONT:	/* BXT */
-	case INTEL_FAM6_ATOM_GEMINI_LAKE:
+	case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
 		do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO;
 		if (rapl_joules)
 			BIC_PRESENT(BIC_Pkg_J);
@@ -3850,8 +3850,8 @@ void rapl_probe(unsigned int family, unsigned int model)
 			BIC_PRESENT(BIC_RAMWatt);
 		}
 		break;
-	case INTEL_FAM6_ATOM_SILVERMONT1:	/* BYT */
-	case INTEL_FAM6_ATOM_SILVERMONT2:	/* AVN */
+	case INTEL_FAM6_ATOM_SILVERMONT:	/* BYT */
+	case INTEL_FAM6_ATOM_SILVERMONT_X:	/* AVN */
 		do_rapl = RAPL_PKG | RAPL_CORES;
 		if (rapl_joules) {
 			BIC_PRESENT(BIC_Pkg_J);
@@ -3861,7 +3861,7 @@ void rapl_probe(unsigned int family, unsigned int model)
 			BIC_PRESENT(BIC_CorWatt);
 		}
 		break;
-	case INTEL_FAM6_ATOM_DENVERTON:	/* DNV */
+	case INTEL_FAM6_ATOM_GOLDMONT_X:	/* DNV */
 		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS;
 		BIC_PRESENT(BIC_PKG__);
 		BIC_PRESENT(BIC_RAM__);
@@ -3884,7 +3884,7 @@ void rapl_probe(unsigned int family, unsigned int model)
 		return;
 
 	rapl_power_units = 1.0 / (1 << (msr & 0xF));
-	if (model == INTEL_FAM6_ATOM_SILVERMONT1)
+	if (model == INTEL_FAM6_ATOM_SILVERMONT)
 		rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
 	else
 		rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
@@ -4141,8 +4141,8 @@ int has_snb_msrs(unsigned int family, unsigned int model)
 	case INTEL_FAM6_CANNONLAKE_MOBILE:	/* CNL */
 	case INTEL_FAM6_SKYLAKE_X:	/* SKX */
 	case INTEL_FAM6_ATOM_GOLDMONT:	/* BXT */
-	case INTEL_FAM6_ATOM_GEMINI_LAKE:
-	case INTEL_FAM6_ATOM_DENVERTON:	/* DNV */
+	case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+	case INTEL_FAM6_ATOM_GOLDMONT_X:	/* DNV */
 		return 1;
 	}
 	return 0;
@@ -4174,7 +4174,7 @@ int has_hsw_msrs(unsigned int family, unsigned int model)
 	case INTEL_FAM6_KABYLAKE_DESKTOP:	/* KBL */
 	case INTEL_FAM6_CANNONLAKE_MOBILE:	/* CNL */
 	case INTEL_FAM6_ATOM_GOLDMONT:	/* BXT */
-	case INTEL_FAM6_ATOM_GEMINI_LAKE:
+	case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
 		return 1;
 	}
 	return 0;
@@ -4209,8 +4209,8 @@ int is_slm(unsigned int family, unsigned int model)
 	if (!genuine_intel)
 		return 0;
 	switch (model) {
-	case INTEL_FAM6_ATOM_SILVERMONT1:	/* BYT */
-	case INTEL_FAM6_ATOM_SILVERMONT2:	/* AVN */
+	case INTEL_FAM6_ATOM_SILVERMONT:	/* BYT */
+	case INTEL_FAM6_ATOM_SILVERMONT_X:	/* AVN */
 		return 1;
 	}
 	return 0;
@@ -4581,11 +4581,11 @@ void process_cpuid()
 				case INTEL_FAM6_KABYLAKE_DESKTOP:	/* KBL */
 					crystal_hz = 24000000;	/* 24.0 MHz */
 					break;
-				case INTEL_FAM6_ATOM_DENVERTON:	/* DNV */
+				case INTEL_FAM6_ATOM_GOLDMONT_X:	/* DNV */
 					crystal_hz = 25000000;	/* 25.0 MHz */
 					break;
 				case INTEL_FAM6_ATOM_GOLDMONT:	/* BXT */
-				case INTEL_FAM6_ATOM_GEMINI_LAKE:
+				case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
 					crystal_hz = 19200000;	/* 19.2 MHz */
 					break;
 				default:
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
index f4534fb..da781b4 100644
--- a/tools/power/x86/x86_energy_perf_policy/Makefile
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -9,7 +9,7 @@
 endif
 
 x86_energy_perf_policy : x86_energy_perf_policy.c
-CFLAGS +=	-Wall
+CFLAGS +=	-Wall -I../../../include
 CFLAGS +=	-DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
 
 %: %.c
diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c
index 8fcd1c0..cbd55f5 100644
--- a/tools/testing/selftests/bpf/test_libbpf_open.c
+++ b/tools/testing/selftests/bpf/test_libbpf_open.c
@@ -11,6 +11,8 @@ static const char *__doc__ =
 #include <bpf/libbpf.h>
 #include <getopt.h>
 
+#include "bpf_rlimit.h"
+
 static const struct option long_options[] = {
 	{"help",	no_argument,		NULL, 'h' },
 	{"debug",	no_argument,		NULL, 'D' },
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 294fc18..9db5a73 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -32,7 +32,6 @@
 #include <linux/if_ether.h>
 
 #include <bpf/bpf.h>
-#include <bpf/libbpf.h>
 
 #ifdef HAVE_GENHDR
 # include "autoconf.h"
@@ -57,7 +56,6 @@
 
 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
 static bool unpriv_disabled = false;
-static int skips;
 
 struct bpf_test {
 	const char *descr;
@@ -12772,11 +12770,6 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
 	fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
 				     prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
-	if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
-		printf("SKIP (unsupported program type %d)\n", prog_type);
-		skips++;
-		goto close_fds;
-	}
 
 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
 		       test->result_unpriv : test->result;
@@ -12912,7 +12905,7 @@ static void get_unpriv_disabled()
 
 static int do_test(bool unpriv, unsigned int from, unsigned int to)
 {
-	int i, passes = 0, errors = 0;
+	int i, passes = 0, errors = 0, skips = 0;
 
 	for (i = from; i < to; i++) {
 		struct bpf_test *test = &tests[i];
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index cabe2a3..cf156b3 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -51,6 +51,10 @@ struct ksym *ksym_search(long key)
 	int start = 0, end = sym_cnt;
 	int result;
 
+	/* kallsyms not loaded. return NULL */
+	if (sym_cnt <= 0)
+		return NULL;
+
 	while (start < end) {
 		size_t mid = start + (end - start) / 2;
 
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index 28d321b..6f33988 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -26,7 +26,7 @@
  */
 static int test_memcg_subtree_control(const char *root)
 {
-	char *parent, *child, *parent2, *child2;
+	char *parent, *child, *parent2 = NULL, *child2 = NULL;
 	int ret = KSFT_FAIL;
 	char buf[PAGE_SIZE];
 
@@ -34,50 +34,54 @@ static int test_memcg_subtree_control(const char *root)
 	parent = cg_name(root, "memcg_test_0");
 	child = cg_name(root, "memcg_test_0/memcg_test_1");
 	if (!parent || !child)
-		goto cleanup;
+		goto cleanup_free;
 
 	if (cg_create(parent))
-		goto cleanup;
+		goto cleanup_free;
 
 	if (cg_write(parent, "cgroup.subtree_control", "+memory"))
-		goto cleanup;
+		goto cleanup_parent;
 
 	if (cg_create(child))
-		goto cleanup;
+		goto cleanup_parent;
 
 	if (cg_read_strstr(child, "cgroup.controllers", "memory"))
-		goto cleanup;
+		goto cleanup_child;
 
 	/* Create two nested cgroups without enabling memory controller */
 	parent2 = cg_name(root, "memcg_test_1");
 	child2 = cg_name(root, "memcg_test_1/memcg_test_1");
 	if (!parent2 || !child2)
-		goto cleanup;
+		goto cleanup_free2;
 
 	if (cg_create(parent2))
-		goto cleanup;
+		goto cleanup_free2;
 
 	if (cg_create(child2))
-		goto cleanup;
+		goto cleanup_parent2;
 
 	if (cg_read(child2, "cgroup.controllers", buf, sizeof(buf)))
-		goto cleanup;
+		goto cleanup_all;
 
 	if (!cg_read_strstr(child2, "cgroup.controllers", "memory"))
-		goto cleanup;
+		goto cleanup_all;
 
 	ret = KSFT_PASS;
 
-cleanup:
-	cg_destroy(child);
-	cg_destroy(parent);
-	free(parent);
-	free(child);
-
+cleanup_all:
 	cg_destroy(child2);
+cleanup_parent2:
 	cg_destroy(parent2);
+cleanup_free2:
 	free(parent2);
 	free(child2);
+cleanup_child:
+	cg_destroy(child);
+cleanup_parent:
+	cg_destroy(parent);
+cleanup_free:
+	free(parent);
+	free(child);
 
 	return ret;
 }
diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
index d4cfb6a..d84193b 100755
--- a/tools/testing/selftests/net/fib_rule_tests.sh
+++ b/tools/testing/selftests/net/fib_rule_tests.sh
@@ -27,6 +27,7 @@
 		nsuccess=$((nsuccess+1))
 		printf "\n    TEST: %-50s  [ OK ]\n" "${msg}"
 	else
+		ret=1
 		nfail=$((nfail+1))
 		printf "\n    TEST: %-50s  [FAIL]\n" "${msg}"
 		if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
@@ -245,4 +246,9 @@
 run_fibrule_tests
 cleanup
 
+if [ "$TESTS" != "none" ]; then
+	printf "\nTests passed: %3d\n" ${nsuccess}
+	printf "Tests failed: %3d\n"   ${nfail}
+fi
+
 exit $ret
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index a4ccde0..2f190aa 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -602,6 +602,39 @@
 	return $rc
 }
 
+check_expected()
+{
+	local out="$1"
+	local expected="$2"
+	local rc=0
+
+	[ "${out}" = "${expected}" ] && return 0
+
+	if [ -z "${out}" ]; then
+		if [ "$VERBOSE" = "1" ]; then
+			printf "\nNo route entry found\n"
+			printf "Expected:\n"
+			printf "    ${expected}\n"
+		fi
+		return 1
+	fi
+
+	# tricky way to convert output to 1-line without ip's
+	# messy '\'; this drops all extra white space
+	out=$(echo ${out})
+	if [ "${out}" != "${expected}" ]; then
+		rc=1
+		if [ "${VERBOSE}" = "1" ]; then
+			printf "    Unexpected route entry. Have:\n"
+			printf "        ${out}\n"
+			printf "    Expected:\n"
+			printf "        ${expected}\n\n"
+		fi
+	fi
+
+	return $rc
+}
+
 # add route for a prefix, flushing any existing routes first
 # expected to be the first step of a test
 add_route6()
@@ -646,31 +679,7 @@
 	local rc=0
 
 	out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//')
-	[ "${out}" = "${expected}" ] && return 0
-
-	if [ -z "${out}" ]; then
-		if [ "$VERBOSE" = "1" ]; then
-			printf "\nNo route entry found\n"
-			printf "Expected:\n"
-			printf "    ${expected}\n"
-		fi
-		return 1
-	fi
-
-	# tricky way to convert output to 1-line without ip's
-	# messy '\'; this drops all extra white space
-	out=$(echo ${out})
-	if [ "${out}" != "${expected}" ]; then
-		rc=1
-		if [ "${VERBOSE}" = "1" ]; then
-			printf "    Unexpected route entry. Have:\n"
-			printf "        ${out}\n"
-			printf "    Expected:\n"
-			printf "        ${expected}\n\n"
-		fi
-	fi
-
-	return $rc
+	check_expected "${out}" "${expected}"
 }
 
 route_cleanup()
@@ -714,7 +723,7 @@
 	$IP addr add 172.16.103.2/24 dev veth4
 	$IP addr add 172.16.104.1/24 dev dummy1
 
-	set +ex
+	set +e
 }
 
 # assumption is that basic add of a single path route works
@@ -949,7 +958,8 @@
 	run_cmd "$IP li set dev dummy2 down"
 	rc=$?
 	if [ $rc -eq 0 ]; then
-		check_route6 ""
+		out=$($IP -6 ro ls match 2001:db8:104::/64)
+		check_expected "${out}" ""
 		rc=$?
 	fi
 	log_test $rc 0 "Prefix route removed on link down"
@@ -1009,34 +1019,9 @@
 	local pfx="172.16.104.0/24"
 	local expected="$1"
 	local out
-	local rc=0
 
 	out=$($IP ro ls match ${pfx})
-	[ "${out}" = "${expected}" ] && return 0
-
-	if [ -z "${out}" ]; then
-		if [ "$VERBOSE" = "1" ]; then
-			printf "\nNo route entry found\n"
-			printf "Expected:\n"
-			printf "    ${expected}\n"
-		fi
-		return 1
-	fi
-
-	# tricky way to convert output to 1-line without ip's
-	# messy '\'; this drops all extra white space
-	out=$(echo ${out})
-	if [ "${out}" != "${expected}" ]; then
-		rc=1
-		if [ "${VERBOSE}" = "1" ]; then
-			printf "    Unexpected route entry. Have:\n"
-			printf "        ${out}\n"
-			printf "    Expected:\n"
-			printf "        ${expected}\n\n"
-		fi
-	fi
-
-	return $rc
+	check_expected "${out}" "${expected}"
 }
 
 # assumption is that basic add of a single path route works
@@ -1301,7 +1286,8 @@
 	run_cmd "$IP li set dev dummy2 down"
 	rc=$?
 	if [ $rc -eq 0 ]; then
-		check_route ""
+		out=$($IP ro ls match 172.16.104.0/24)
+		check_expected "${out}" ""
 		rc=$?
 	fi
 	log_test $rc 0 "Prefix route removed on link down"
diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
index b093f39c2..14e41fa 100755
--- a/tools/testing/selftests/net/run_netsocktests
+++ b/tools/testing/selftests/net/run_netsocktests
@@ -7,7 +7,7 @@
 ./socket
 if [ $? -ne 0 ]; then
 	echo "[FAIL]"
+	exit 1
 else
 	echo "[PASS]"
 fi
-
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index c9ff2b4..a37cb11 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for netfilter selftests
 
-TEST_PROGS := nft_trans_stress.sh nft_nat.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh conntrack_icmp_related.sh
 
 include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
new file mode 100755
index 0000000..b48e183
--- /dev/null
+++ b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
@@ -0,0 +1,283 @@
+#!/bin/bash
+#
+# check that ICMP df-needed/pkttoobig icmp are set are set as related
+# state
+#
+# Setup is:
+#
+# nsclient1 -> nsrouter1 -> nsrouter2 -> nsclient2
+# MTU 1500, except for nsrouter2 <-> nsclient2 link (1280).
+# ping nsclient2 from nsclient1, checking that conntrack did set RELATED
+# 'fragmentation needed' icmp packet.
+#
+# In addition, nsrouter1 will perform IP masquerading, i.e. also
+# check the icmp errors are propagated to the correct host as per
+# nat of "established" icmp-echo "connection".
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+	echo "SKIP: Could not run test without nft tool"
+	exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+	echo "SKIP: Could not run test without ip tool"
+	exit $ksft_skip
+fi
+
+cleanup() {
+	for i in 1 2;do ip netns del nsclient$i;done
+	for i in 1 2;do ip netns del nsrouter$i;done
+}
+
+ipv4() {
+    echo -n 192.168.$1.2
+}
+
+ipv6 () {
+    echo -n dead:$1::2
+}
+
+check_counter()
+{
+	ns=$1
+	name=$2
+	expect=$3
+	local lret=0
+
+	cnt=$(ip netns exec $ns nft list counter inet filter "$name" | grep -q "$expect")
+	if [ $? -ne 0 ]; then
+		echo "ERROR: counter $name in $ns has unexpected value (expected $expect)" 1>&2
+		ip netns exec $ns nft list counter inet filter "$name" 1>&2
+		lret=1
+	fi
+
+	return $lret
+}
+
+check_unknown()
+{
+	expect="packets 0 bytes 0"
+	for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+		check_counter $n "unknown" "$expect"
+		if [ $? -ne 0 ] ;then
+			return 1
+		fi
+	done
+
+	return 0
+}
+
+for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+  ip netns add $n
+  ip -net $n link set lo up
+done
+
+DEV=veth0
+ip link add $DEV netns nsclient1 type veth peer name eth1 netns nsrouter1
+DEV=veth0
+ip link add $DEV netns nsclient2 type veth peer name eth1 netns nsrouter2
+
+DEV=veth0
+ip link add $DEV netns nsrouter1 type veth peer name eth2 netns nsrouter2
+
+DEV=veth0
+for i in 1 2; do
+    ip -net nsclient$i link set $DEV up
+    ip -net nsclient$i addr add $(ipv4 $i)/24 dev $DEV
+    ip -net nsclient$i addr add $(ipv6 $i)/64 dev $DEV
+done
+
+ip -net nsrouter1 link set eth1 up
+ip -net nsrouter1 link set veth0 up
+
+ip -net nsrouter2 link set eth1 up
+ip -net nsrouter2 link set eth2 up
+
+ip -net nsclient1 route add default via 192.168.1.1
+ip -net nsclient1 -6 route add default via dead:1::1
+
+ip -net nsclient2 route add default via 192.168.2.1
+ip -net nsclient2 route add default via dead:2::1
+
+i=3
+ip -net nsrouter1 addr add 192.168.1.1/24 dev eth1
+ip -net nsrouter1 addr add 192.168.3.1/24 dev veth0
+ip -net nsrouter1 addr add dead:1::1/64 dev eth1
+ip -net nsrouter1 addr add dead:3::1/64 dev veth0
+ip -net nsrouter1 route add default via 192.168.3.10
+ip -net nsrouter1 -6 route add default via dead:3::10
+
+ip -net nsrouter2 addr add 192.168.2.1/24 dev eth1
+ip -net nsrouter2 addr add 192.168.3.10/24 dev eth2
+ip -net nsrouter2 addr add dead:2::1/64 dev eth1
+ip -net nsrouter2 addr add dead:3::10/64 dev eth2
+ip -net nsrouter2 route add default via 192.168.3.1
+ip -net nsrouter2 route add default via dead:3::1
+
+sleep 2
+for i in 4 6; do
+	ip netns exec nsrouter1 sysctl -q net.ipv$i.conf.all.forwarding=1
+	ip netns exec nsrouter2 sysctl -q net.ipv$i.conf.all.forwarding=1
+done
+
+for netns in nsrouter1 nsrouter2; do
+ip netns exec $netns nft -f - <<EOF
+table inet filter {
+	counter unknown { }
+	counter related { }
+	chain forward {
+		type filter hook forward priority 0; policy accept;
+		meta l4proto icmpv6 icmpv6 type "packet-too-big" ct state "related" counter name "related" accept
+		meta l4proto icmp icmp type "destination-unreachable" ct state "related" counter name "related" accept
+		meta l4proto { icmp, icmpv6 } ct state new,established accept
+		counter name "unknown" drop
+	}
+}
+EOF
+done
+
+ip netns exec nsclient1 nft -f - <<EOF
+table inet filter {
+	counter unknown { }
+	counter related { }
+	chain input {
+		type filter hook input priority 0; policy accept;
+		meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+		meta l4proto { icmp, icmpv6 } ct state "related" counter name "related" accept
+		counter name "unknown" drop
+	}
+}
+EOF
+
+ip netns exec nsclient2 nft -f - <<EOF
+table inet filter {
+	counter unknown { }
+	counter new { }
+	counter established { }
+
+	chain input {
+		type filter hook input priority 0; policy accept;
+		meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+		meta l4proto { icmp, icmpv6 } ct state "new" counter name "new" accept
+		meta l4proto { icmp, icmpv6 } ct state "established" counter name "established" accept
+		counter name "unknown" drop
+	}
+	chain output {
+		type filter hook output priority 0; policy accept;
+		meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+		meta l4proto { icmp, icmpv6 } ct state "new" counter name "new"
+		meta l4proto { icmp, icmpv6 } ct state "established" counter name "established"
+		counter name "unknown" drop
+	}
+}
+EOF
+
+
+# make sure NAT core rewrites adress of icmp error if nat is used according to
+# conntrack nat information (icmp error will be directed at nsrouter1 address,
+# but it needs to be routed to nsclient1 address).
+ip netns exec nsrouter1 nft -f - <<EOF
+table ip nat {
+	chain postrouting {
+		type nat hook postrouting priority 0; policy accept;
+		ip protocol icmp oifname "veth0" counter masquerade
+	}
+}
+table ip6 nat {
+	chain postrouting {
+		type nat hook postrouting priority 0; policy accept;
+		ip6 nexthdr icmpv6 oifname "veth0" counter masquerade
+	}
+}
+EOF
+
+ip netns exec nsrouter2 ip link set eth1  mtu 1280
+ip netns exec nsclient2 ip link set veth0 mtu 1280
+sleep 1
+
+ip netns exec nsclient1 ping -c 1 -s 1000 -q -M do 192.168.2.2 >/dev/null
+if [ $? -ne 0 ]; then
+	echo "ERROR: netns ip routing/connectivity broken" 1>&2
+	cleanup
+	exit 1
+fi
+ip netns exec nsclient1 ping6 -q -c 1 -s 1000 dead:2::2 >/dev/null
+if [ $? -ne 0 ]; then
+	echo "ERROR: netns ipv6 routing/connectivity broken" 1>&2
+	cleanup
+	exit 1
+fi
+
+check_unknown
+if [ $? -ne 0 ]; then
+	ret=1
+fi
+
+expect="packets 0 bytes 0"
+for netns in nsrouter1 nsrouter2 nsclient1;do
+	check_counter "$netns" "related" "$expect"
+	if [ $? -ne 0 ]; then
+		ret=1
+	fi
+done
+
+expect="packets 2 bytes 2076"
+check_counter nsclient2 "new" "$expect"
+if [ $? -ne 0 ]; then
+	ret=1
+fi
+
+ip netns exec nsclient1 ping -q -c 1 -s 1300 -M do 192.168.2.2 > /dev/null
+if [ $? -eq 0 ]; then
+	echo "ERROR: ping should have failed with PMTU too big error" 1>&2
+	ret=1
+fi
+
+# nsrouter2 should have generated the icmp error, so
+# related counter should be 0 (its in forward).
+expect="packets 0 bytes 0"
+check_counter "nsrouter2" "related" "$expect"
+if [ $? -ne 0 ]; then
+	ret=1
+fi
+
+# but nsrouter1 should have seen it, same for nsclient1.
+expect="packets 1 bytes 576"
+for netns in nsrouter1 nsclient1;do
+	check_counter "$netns" "related" "$expect"
+	if [ $? -ne 0 ]; then
+		ret=1
+	fi
+done
+
+ip netns exec nsclient1 ping6 -c 1 -s 1300 dead:2::2 > /dev/null
+if [ $? -eq 0 ]; then
+	echo "ERROR: ping6 should have failed with PMTU too big error" 1>&2
+	ret=1
+fi
+
+expect="packets 2 bytes 1856"
+for netns in nsrouter1 nsclient1;do
+	check_counter "$netns" "related" "$expect"
+	if [ $? -ne 0 ]; then
+		ret=1
+	fi
+done
+
+if [ $ret -eq 0 ];then
+	echo "PASS: icmp mtu error had RELATED state"
+else
+	echo "ERROR: icmp error RELATED state test has failed"
+fi
+
+cleanup
+exit $ret
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 1415e36..fef3527 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -949,7 +949,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
 			       const struct kvm_vcpu_init *init)
 {
-	unsigned int i;
+	unsigned int i, ret;
 	int phys_target = kvm_target_cpu();
 
 	if (init->target != phys_target)
@@ -984,9 +984,14 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
 	vcpu->arch.target = phys_target;
 
 	/* Now we know what it is, we can reset it. */
-	return kvm_reset_vcpu(vcpu);
-}
+	ret = kvm_reset_vcpu(vcpu);
+	if (ret) {
+		vcpu->arch.target = -1;
+		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+	}
 
+	return ret;
+}
 
 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
 					 struct kvm_vcpu_init *init)
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index f376c82..621bb00 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -760,8 +760,9 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
 	int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
 	u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
 	int esz = GITS_BASER_ENTRY_SIZE(baser);
-	int index;
+	int index, idx;
 	gfn_t gfn;
+	bool ret;
 
 	switch (type) {
 	case GITS_BASER_TYPE_DEVICE:
@@ -788,7 +789,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
 
 		if (eaddr)
 			*eaddr = addr;
-		return kvm_is_visible_gfn(its->dev->kvm, gfn);
+
+		goto out;
 	}
 
 	/* calculate and check the index into the 1st level */
@@ -822,7 +824,12 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
 
 	if (eaddr)
 		*eaddr = indirect_ptr;
-	return kvm_is_visible_gfn(its->dev->kvm, gfn);
+
+out:
+	idx = srcu_read_lock(&its->dev->kvm->srcu);
+	ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
+	srcu_read_unlock(&its->dev->kvm->srcu, idx);
+	return ret;
 }
 
 static int vgic_its_alloc_collection(struct vgic_its *its,
@@ -1935,7 +1942,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
 	       ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
 		ite->collection->collection_id;
 	val = cpu_to_le64(val);
-	return kvm_write_guest(kvm, gpa, &val, ite_esz);
+	return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
 }
 
 /**
@@ -2082,7 +2089,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
 	       (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
 		(dev->num_eventid_bits - 1));
 	val = cpu_to_le64(val);
-	return kvm_write_guest(kvm, ptr, &val, dte_esz);
+	return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
 }
 
 /**
@@ -2262,7 +2269,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
 	       ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
 	       collection->collection_id);
 	val = cpu_to_le64(val);
-	return kvm_write_guest(its->dev->kvm, gpa, &val, esz);
+	return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
 }
 
 static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
@@ -2333,7 +2340,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
 	 */
 	val = 0;
 	BUG_ON(cte_esz > sizeof(val));
-	ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz);
+	ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
 	return ret;
 }
 
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 9c0dd23..3f2350a 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -358,7 +358,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
 	if (status) {
 		/* clear consumed data */
 		val &= ~(1 << bit_nr);
-		ret = kvm_write_guest(kvm, ptr, &val, 1);
+		ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
 		if (ret)
 			return ret;
 	}
@@ -409,7 +409,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
 		else
 			val &= ~(1 << bit_nr);
 
-		ret = kvm_write_guest(kvm, ptr, &val, 1);
+		ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
 		if (ret)
 			return ret;
 	}
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
index b1286c4..0bd0683 100644
--- a/virt/kvm/irqchip.c
+++ b/virt/kvm/irqchip.c
@@ -144,18 +144,19 @@ static int setup_routing_entry(struct kvm *kvm,
 {
 	struct kvm_kernel_irq_routing_entry *ei;
 	int r;
+	u32 gsi = array_index_nospec(ue->gsi, KVM_MAX_IRQ_ROUTES);
 
 	/*
 	 * Do not allow GSI to be mapped to the same irqchip more than once.
 	 * Allow only one to one mapping between GSI and non-irqchip routing.
 	 */
-	hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
+	hlist_for_each_entry(ei, &rt->map[gsi], link)
 		if (ei->type != KVM_IRQ_ROUTING_IRQCHIP ||
 		    ue->type != KVM_IRQ_ROUTING_IRQCHIP ||
 		    ue->u.irqchip.irqchip == ei->irqchip.irqchip)
 			return -EINVAL;
 
-	e->gsi = ue->gsi;
+	e->gsi = gsi;
 	e->type = ue->type;
 	r = kvm_set_routing_entry(kvm, e, ue);
 	if (r)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6a79df8..e909d99 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2887,12 +2887,14 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
 	struct kvm_device_ops *ops = NULL;
 	struct kvm_device *dev;
 	bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
+	int type;
 	int ret;
 
 	if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
 		return -ENODEV;
 
-	ops = kvm_device_ops_table[cd->type];
+	type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
+	ops = kvm_device_ops_table[type];
 	if (ops == NULL)
 		return -ENODEV;
 
@@ -2907,7 +2909,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
 	dev->kvm = kvm;
 
 	mutex_lock(&kvm->lock);
-	ret = ops->create(dev, cd->type);
+	ret = ops->create(dev, type);
 	if (ret < 0) {
 		mutex_unlock(&kvm->lock);
 		kfree(dev);